diff --git a/.gitignore b/.gitignore index a055c3d0d53..07c36f04002 100644 --- a/.gitignore +++ b/.gitignore @@ -3,7 +3,6 @@ .DS_Store bin/ modules-dev/ -/pkg/ website/.vagrant website/.bundle website/build diff --git a/pkg/addrs/check.go b/pkg/addrs/check.go new file mode 100644 index 00000000000..d0dfee85af7 --- /dev/null +++ b/pkg/addrs/check.go @@ -0,0 +1,136 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +import "fmt" + +// Check is the address of a check block within a module. +// +// For now, checks do not support meta arguments such as "count" or "for_each" +// so this address uniquely describes a single check within a module. +type Check struct { + referenceable + Name string +} + +func (c Check) String() string { + return fmt.Sprintf("check.%s", c.Name) +} + +// InModule returns a ConfigCheck from the receiver and the given module +// address. +func (c Check) InModule(modAddr Module) ConfigCheck { + return ConfigCheck{ + Module: modAddr, + Check: c, + } +} + +// Absolute returns an AbsCheck from the receiver and the given module instance +// address. +func (c Check) Absolute(modAddr ModuleInstance) AbsCheck { + return AbsCheck{ + Module: modAddr, + Check: c, + } +} + +func (c Check) Equal(o Check) bool { + return c.Name == o.Name +} + +func (c Check) UniqueKey() UniqueKey { + return c // A Check is its own UniqueKey +} + +func (c Check) uniqueKeySigil() {} + +// ConfigCheck is an address for a check block within a configuration. +// +// This contains a Check address and a Module address, meaning this describes +// a check block within the entire configuration. +type ConfigCheck struct { + Module Module + Check Check +} + +var _ ConfigCheckable = ConfigCheck{} + +func (c ConfigCheck) UniqueKey() UniqueKey { + return configCheckUniqueKey(c.String()) +} + +func (c ConfigCheck) configCheckableSigil() {} + +func (c ConfigCheck) CheckableKind() CheckableKind { + return CheckableCheck +} + +func (c ConfigCheck) String() string { + if len(c.Module) == 0 { + return c.Check.String() + } + return fmt.Sprintf("%s.%s", c.Module, c.Check) +} + +// AbsCheck is an absolute address for a check block under a given module path. +// +// This contains an actual ModuleInstance address (compared to the Module within +// a ConfigCheck), meaning this uniquely describes a check block within the +// entire configuration after any "count" or "foreach" meta arguments have been +// evaluated on the containing module. +type AbsCheck struct { + Module ModuleInstance + Check Check +} + +var _ Checkable = AbsCheck{} + +func (c AbsCheck) UniqueKey() UniqueKey { + return absCheckUniqueKey(c.String()) +} + +func (c AbsCheck) checkableSigil() {} + +// CheckRule returns an address for a given rule type within the check block. +// +// There will be at most one CheckDataResource rule within a check block (with +// an index of 0). There will be at least one, but potentially many, +// CheckAssertion rules within a check block. +func (c AbsCheck) CheckRule(typ CheckRuleType, i int) CheckRule { + return CheckRule{ + Container: c, + Type: typ, + Index: i, + } +} + +// ConfigCheckable returns the ConfigCheck address for this absolute reference. +func (c AbsCheck) ConfigCheckable() ConfigCheckable { + return ConfigCheck{ + Module: c.Module.Module(), + Check: c.Check, + } +} + +func (c AbsCheck) CheckableKind() CheckableKind { + return CheckableCheck +} + +func (c AbsCheck) String() string { + if len(c.Module) == 0 { + return c.Check.String() + } + return fmt.Sprintf("%s.%s", c.Module, c.Check) +} + +type configCheckUniqueKey string + +func (k configCheckUniqueKey) uniqueKeySigil() {} + +type absCheckUniqueKey string + +func (k absCheckUniqueKey) uniqueKeySigil() {} diff --git a/pkg/addrs/check_rule.go b/pkg/addrs/check_rule.go new file mode 100644 index 00000000000..41d1ecce99d --- /dev/null +++ b/pkg/addrs/check_rule.go @@ -0,0 +1,119 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +import ( + "fmt" +) + +// CheckRule is the address of a check rule within a checkable object. +// +// This represents the check rule globally within a configuration, and is used +// during graph evaluation to identify a condition result object to update with +// the result of check rule evaluation. +// +// The check address is not distinct from resource traversals, and check rule +// values are not intended to be available to the language, so the address is +// not Referenceable. +// +// Note also that the check address is only relevant within the scope of a run, +// as reordering check blocks between runs will result in their addresses +// changing. CheckRule is therefore for internal use only and should not be +// exposed in durable artifacts such as state snapshots. +type CheckRule struct { + Container Checkable + Type CheckRuleType + Index int +} + +func NewCheckRule(container Checkable, typ CheckRuleType, index int) CheckRule { + return CheckRule{ + Container: container, + Type: typ, + Index: index, + } +} + +func (c CheckRule) String() string { + container := c.Container.String() + switch c.Type { + case ResourcePrecondition: + return fmt.Sprintf("%s.precondition[%d]", container, c.Index) + case ResourcePostcondition: + return fmt.Sprintf("%s.postcondition[%d]", container, c.Index) + case OutputPrecondition: + return fmt.Sprintf("%s.precondition[%d]", container, c.Index) + case CheckDataResource: + return fmt.Sprintf("%s.data[%d]", container, c.Index) + case CheckAssertion: + return fmt.Sprintf("%s.assert[%d]", container, c.Index) + case InputValidation: + return fmt.Sprintf("%s.validation[%d]", container, c.Index) + default: + // This should not happen + return fmt.Sprintf("%s.condition[%d]", container, c.Index) + } +} + +func (c CheckRule) UniqueKey() UniqueKey { + return checkRuleKey{ + ContainerKey: c.Container.UniqueKey(), + Type: c.Type, + Index: c.Index, + } +} + +type checkRuleKey struct { + ContainerKey UniqueKey + Type CheckRuleType + Index int +} + +func (k checkRuleKey) uniqueKeySigil() {} + +// CheckRuleType describes a category of check. We use this only to establish +// uniqueness for Check values, and do not expose this concept of "check types" +// (which is subject to change in future) in any durable artifacts such as +// state snapshots. +// +// (See [CheckableKind] for an enumeration that we _do_ use externally, to +// describe the type of object being checked rather than the type of the check +// itself.) +type CheckRuleType int + +//go:generate go run golang.org/x/tools/cmd/stringer -type=CheckRuleType check_rule.go + +const ( + InvalidCondition CheckRuleType = 0 + ResourcePrecondition CheckRuleType = 1 + ResourcePostcondition CheckRuleType = 2 + OutputPrecondition CheckRuleType = 3 + CheckDataResource CheckRuleType = 4 + CheckAssertion CheckRuleType = 5 + InputValidation CheckRuleType = 6 +) + +// Description returns a human-readable description of the check type. This is +// presented in the user interface through a diagnostic summary. +func (c CheckRuleType) Description() string { + switch c { + case ResourcePrecondition: + return "Resource precondition" + case ResourcePostcondition: + return "Resource postcondition" + case OutputPrecondition: + return "Module output value precondition" + case CheckDataResource: + return "Check block data resource" + case CheckAssertion: + return "Check block assertion" + case InputValidation: + return "Input variable validation" + default: + // This should not happen + return "Condition" + } +} diff --git a/pkg/addrs/check_rule_diagnostic.go b/pkg/addrs/check_rule_diagnostic.go new file mode 100644 index 00000000000..2de995684da --- /dev/null +++ b/pkg/addrs/check_rule_diagnostic.go @@ -0,0 +1,75 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +import "github.com/kubegems/opentofu/pkg/tfdiags" + +// DiagnosticExtraCheckRule provides an interface for diagnostic ExtraInfo to +// retrieve an embedded CheckRule from within a tfdiags.Diagnostic. +type DiagnosticExtraCheckRule interface { + // DiagnosticOriginatesFromCheckRule returns the CheckRule that the + // surrounding diagnostic originated from. + DiagnosticOriginatesFromCheckRule() CheckRule +} + +// DiagnosticOriginatesFromCheckRule checks if the provided diagnostic contains +// a CheckRule as ExtraInfo and returns that CheckRule and true if it does. This +// function returns an empty CheckRule and false if the diagnostic does not +// contain a CheckRule. +func DiagnosticOriginatesFromCheckRule(diag tfdiags.Diagnostic) (CheckRule, bool) { + maybe := tfdiags.ExtraInfo[DiagnosticExtraCheckRule](diag) + if maybe == nil { + return CheckRule{}, false + } + return maybe.DiagnosticOriginatesFromCheckRule(), true +} + +// CheckRuleDiagnosticExtra is an object that can be attached to diagnostics +// that originate from check rules. +// +// It implements the DiagnosticExtraCheckRule interface for retrieving the +// concrete CheckRule that spawned the diagnostic. +// +// It also implements the tfdiags.DiagnosticExtraDoNotConsolidate interface, to +// stop diagnostics created by check blocks being consolidated. +// +// It also implements the tfdiags.DiagnosticExtraUnwrapper interface, as nested +// data blocks will attach this struct but do want to lose any extra info +// embedded in the original diagnostic. +type CheckRuleDiagnosticExtra struct { + CheckRule CheckRule + + wrapped interface{} +} + +var ( + _ DiagnosticExtraCheckRule = (*CheckRuleDiagnosticExtra)(nil) + _ tfdiags.DiagnosticExtraDoNotConsolidate = (*CheckRuleDiagnosticExtra)(nil) + _ tfdiags.DiagnosticExtraUnwrapper = (*CheckRuleDiagnosticExtra)(nil) + _ tfdiags.DiagnosticExtraWrapper = (*CheckRuleDiagnosticExtra)(nil) +) + +func (c *CheckRuleDiagnosticExtra) UnwrapDiagnosticExtra() interface{} { + return c.wrapped +} + +func (c *CheckRuleDiagnosticExtra) WrapDiagnosticExtra(inner interface{}) { + if c.wrapped != nil { + // This is a logical inconsistency, the caller should know whether they + // have already wrapped an extra or not. + panic("Attempted to wrap a diagnostic extra into a CheckRuleDiagnosticExtra that is already wrapping a different extra. This is a bug in OpenTofu, please report it.") + } + c.wrapped = inner +} + +func (c *CheckRuleDiagnosticExtra) DoNotConsolidateDiagnostic() bool { + // Do not consolidate warnings from check blocks. + return c.CheckRule.Container.CheckableKind() == CheckableCheck +} + +func (c *CheckRuleDiagnosticExtra) DiagnosticOriginatesFromCheckRule() CheckRule { + return c.CheckRule +} diff --git a/pkg/addrs/check_rule_diagnostic_test.go b/pkg/addrs/check_rule_diagnostic_test.go new file mode 100644 index 00000000000..0fc2147dbaa --- /dev/null +++ b/pkg/addrs/check_rule_diagnostic_test.go @@ -0,0 +1,113 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +import ( + "testing" + + "github.com/hashicorp/hcl/v2" + + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +func TestCheckRuleDiagnosticExtra_WrapsExtra(t *testing.T) { + var originals tfdiags.Diagnostics + originals = originals.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "original error", + Detail: "this is an error", + Extra: "extra", + }) + + overridden := tfdiags.OverrideAll(originals, tfdiags.Warning, func() tfdiags.DiagnosticExtraWrapper { + return &CheckRuleDiagnosticExtra{} + }) + + if overridden[0].ExtraInfo().(*CheckRuleDiagnosticExtra).wrapped.(string) != "extra" { + t.Errorf("unexpected extra info: %v", overridden[0].ExtraInfo()) + } +} + +func TestCheckRuleDiagnosticExtra_Unwraps(t *testing.T) { + var originals tfdiags.Diagnostics + originals = originals.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "original error", + Detail: "this is an error", + Extra: "extra", + }) + + overridden := tfdiags.OverrideAll(originals, tfdiags.Warning, func() tfdiags.DiagnosticExtraWrapper { + return &CheckRuleDiagnosticExtra{} + }) + + result := tfdiags.ExtraInfo[string](overridden[0]) + if result != "extra" { + t.Errorf("unexpected extra info: %v", result) + } +} + +func TestCheckRuleDiagnosticExtra_DoNotConsolidate(t *testing.T) { + var diags tfdiags.Diagnostics + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "original error", + Detail: "this is an error", + Extra: &CheckRuleDiagnosticExtra{ + CheckRule: NewCheckRule(AbsOutputValue{ + Module: RootModuleInstance, + OutputValue: OutputValue{ + Name: "output", + }, + }, OutputPrecondition, 0), + }, + }) + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "original error", + Detail: "this is an error", + Extra: &CheckRuleDiagnosticExtra{ + CheckRule: NewCheckRule(AbsCheck{ + Module: RootModuleInstance, + Check: Check{ + Name: "check", + }, + }, CheckAssertion, 0), + }, + }) + + if tfdiags.DoNotConsolidateDiagnostic(diags[0]) { + t.Errorf("first diag should be consolidated but was not") + } + + if !tfdiags.DoNotConsolidateDiagnostic(diags[1]) { + t.Errorf("second diag should not be consolidated but was") + } + +} + +func TestDiagnosticOriginatesFromCheckRule_Passes(t *testing.T) { + var diags tfdiags.Diagnostics + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "original error", + Detail: "this is an error", + }) + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "original error", + Detail: "this is an error", + Extra: &CheckRuleDiagnosticExtra{}, + }) + + if _, ok := DiagnosticOriginatesFromCheckRule(diags[0]); ok { + t.Errorf("first diag did not originate from check rule but thinks it did") + } + + if _, ok := DiagnosticOriginatesFromCheckRule(diags[1]); !ok { + t.Errorf("second diag did originate from check rule but this it did not") + } +} diff --git a/pkg/addrs/checkable.go b/pkg/addrs/checkable.go new file mode 100644 index 00000000000..0054f9a2176 --- /dev/null +++ b/pkg/addrs/checkable.go @@ -0,0 +1,196 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +import ( + "fmt" + + "golang.org/x/text/cases" + "golang.org/x/text/language" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// Checkable is an interface implemented by all address types that can contain +// condition blocks. +type Checkable interface { + UniqueKeyer + + checkableSigil() + + // CheckRule returns the address of an individual check rule of a specified + // type and index within this checkable container. + CheckRule(CheckRuleType, int) CheckRule + + // ConfigCheckable returns the address of the configuration construct that + // this Checkable belongs to. + // + // Checkable objects can potentially be dynamically declared during a + // plan operation using constructs like resource for_each, and so + // ConfigCheckable gives us a way to talk about the static containers + // those dynamic objects belong to, in case we wish to group together + // dynamic checkable objects into their static checkable for reporting + // purposes. + ConfigCheckable() ConfigCheckable + + CheckableKind() CheckableKind + String() string +} + +var ( + _ Checkable = AbsResourceInstance{} + _ Checkable = AbsOutputValue{} +) + +// CheckableKind describes the different kinds of checkable objects. +type CheckableKind rune + +//go:generate go run golang.org/x/tools/cmd/stringer -type=CheckableKind checkable.go + +const ( + CheckableKindInvalid CheckableKind = 0 + CheckableResource CheckableKind = 'R' + CheckableOutputValue CheckableKind = 'O' + CheckableCheck CheckableKind = 'C' + CheckableInputVariable CheckableKind = 'I' +) + +// ConfigCheckable is an interfaces implemented by address types that represent +// configuration constructs that can have Checkable addresses associated with +// them. +// +// This address type therefore in a sense represents a container for zero or +// more checkable objects all declared by the same configuration construct, +// so that we can talk about these groups of checkable objects before we're +// ready to decide how many checkable objects belong to each one. +type ConfigCheckable interface { + UniqueKeyer + + configCheckableSigil() + + CheckableKind() CheckableKind + String() string +} + +var ( + _ ConfigCheckable = ConfigResource{} + _ ConfigCheckable = ConfigOutputValue{} +) + +// ParseCheckableStr attempts to parse the given string as a Checkable address +// of the given kind. +// +// This should be the opposite of Checkable.String for any Checkable address +// type, as long as "kind" is set to the value returned by the address's +// CheckableKind method. +// +// We do not typically expect users to write out checkable addresses as input, +// but we use them as part of some of our wire formats for persisting check +// results between runs. +func ParseCheckableStr(kind CheckableKind, src string) (Checkable, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(src), "", hcl.InitialPos) + diags = diags.Append(parseDiags) + if parseDiags.HasErrors() { + return nil, diags + } + + path, remain, diags := parseModuleInstancePrefix(traversal) + if diags.HasErrors() { + return nil, diags + } + + if remain.IsRelative() { + // (relative means that there's either nothing left or what's next isn't an identifier) + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid checkable address", + Detail: "Module path must be followed by either a resource instance address or an output value address.", + Subject: remain.SourceRange().Ptr(), + }) + return nil, diags + } + + getCheckableName := func(keyword string, descriptor string) (string, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + var name string + + if len(remain) != 2 { + diags = diags.Append(hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid checkable address", + Detail: fmt.Sprintf("%s address must have only one attribute part after the keyword '%s', giving the name of the %s.", cases.Title(language.English, cases.NoLower).String(keyword), keyword, descriptor), + Subject: remain.SourceRange().Ptr(), + }) + } + + if remain.RootName() != keyword { + diags = diags.Append(hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid checkable address", + Detail: fmt.Sprintf("%s address must follow the module address with the keyword '%s'.", cases.Title(language.English, cases.NoLower).String(keyword), keyword), + Subject: remain.SourceRange().Ptr(), + }) + } + if step, ok := remain[1].(hcl.TraverseAttr); !ok { + diags = diags.Append(hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid checkable address", + Detail: fmt.Sprintf("%s address must have only one attribute part after the keyword '%s', giving the name of the %s.", cases.Title(language.English, cases.NoLower).String(keyword), keyword, descriptor), + Subject: remain.SourceRange().Ptr(), + }) + } else { + name = step.Name + } + + return name, diags + } + + // We use "kind" to disambiguate here because unfortunately we've + // historically never reserved "output" as a possible resource type name + // and so it is in principle possible -- albeit unlikely -- that there + // might be a resource whose type is literally "output". + switch kind { + case CheckableResource: + riAddr, moreDiags := parseResourceInstanceUnderModule(path, remain) + diags = diags.Append(moreDiags) + if diags.HasErrors() { + return nil, diags + } + return riAddr, diags + + case CheckableOutputValue: + name, nameDiags := getCheckableName("output", "output value") + diags = diags.Append(nameDiags) + if diags.HasErrors() { + return nil, diags + } + return OutputValue{Name: name}.Absolute(path), diags + + case CheckableCheck: + name, nameDiags := getCheckableName("check", "check block") + diags = diags.Append(nameDiags) + if diags.HasErrors() { + return nil, diags + } + return Check{Name: name}.Absolute(path), diags + + case CheckableInputVariable: + name, nameDiags := getCheckableName("var", "variable value") + diags = diags.Append(nameDiags) + if diags.HasErrors() { + return nil, diags + } + return InputVariable{Name: name}.Absolute(path), diags + + default: + panic(fmt.Sprintf("unsupported CheckableKind %s", kind)) + } +} diff --git a/pkg/addrs/checkablekind_string.go b/pkg/addrs/checkablekind_string.go new file mode 100644 index 00000000000..80578d4b7e1 --- /dev/null +++ b/pkg/addrs/checkablekind_string.go @@ -0,0 +1,41 @@ +// Code generated by "stringer -type=CheckableKind checkable.go"; DO NOT EDIT. + +package addrs + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[CheckableKindInvalid-0] + _ = x[CheckableResource-82] + _ = x[CheckableOutputValue-79] + _ = x[CheckableCheck-67] + _ = x[CheckableInputVariable-73] +} + +const ( + _CheckableKind_name_0 = "CheckableKindInvalid" + _CheckableKind_name_1 = "CheckableCheck" + _CheckableKind_name_2 = "CheckableInputVariable" + _CheckableKind_name_3 = "CheckableOutputValue" + _CheckableKind_name_4 = "CheckableResource" +) + +func (i CheckableKind) String() string { + switch { + case i == 0: + return _CheckableKind_name_0 + case i == 67: + return _CheckableKind_name_1 + case i == 73: + return _CheckableKind_name_2 + case i == 79: + return _CheckableKind_name_3 + case i == 82: + return _CheckableKind_name_4 + default: + return "CheckableKind(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/pkg/addrs/checkruletype_string.go b/pkg/addrs/checkruletype_string.go new file mode 100644 index 00000000000..18726e211a7 --- /dev/null +++ b/pkg/addrs/checkruletype_string.go @@ -0,0 +1,29 @@ +// Code generated by "stringer -type=CheckRuleType check_rule.go"; DO NOT EDIT. + +package addrs + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidCondition-0] + _ = x[ResourcePrecondition-1] + _ = x[ResourcePostcondition-2] + _ = x[OutputPrecondition-3] + _ = x[CheckDataResource-4] + _ = x[CheckAssertion-5] + _ = x[InputValidation-6] +} + +const _CheckRuleType_name = "InvalidConditionResourcePreconditionResourcePostconditionOutputPreconditionCheckDataResourceCheckAssertionInputValidation" + +var _CheckRuleType_index = [...]uint8{0, 16, 36, 57, 75, 92, 106, 121} + +func (i CheckRuleType) String() string { + if i < 0 || i >= CheckRuleType(len(_CheckRuleType_index)-1) { + return "CheckRuleType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _CheckRuleType_name[_CheckRuleType_index[i]:_CheckRuleType_index[i+1]] +} diff --git a/pkg/addrs/count_attr.go b/pkg/addrs/count_attr.go new file mode 100644 index 00000000000..6bd543c4e21 --- /dev/null +++ b/pkg/addrs/count_attr.go @@ -0,0 +1,23 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +// CountAttr is the address of an attribute of the "count" object in +// the interpolation scope, like "count.index". +type CountAttr struct { + referenceable + Name string +} + +func (ca CountAttr) String() string { + return "count." + ca.Name +} + +func (ca CountAttr) UniqueKey() UniqueKey { + return ca // A CountAttr is its own UniqueKey +} + +func (ca CountAttr) uniqueKeySigil() {} diff --git a/pkg/addrs/doc.go b/pkg/addrs/doc.go new file mode 100644 index 00000000000..c663eeecd7f --- /dev/null +++ b/pkg/addrs/doc.go @@ -0,0 +1,22 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package addrs contains types that represent "addresses", which are +// references to specific objects within a OpenTofu configuration or +// state. +// +// All addresses have string representations based on HCL traversal syntax +// which should be used in the user-interface, and also in-memory +// representations that can be used internally. +// +// For object types that exist within OpenTofu modules a pair of types is +// used. The "local" part of the address is represented by a type, and then +// an absolute path to that object in the context of its module is represented +// by a type of the same name with an "Abs" prefix added, for "absolute". +// +// All types within this package should be treated as immutable, even if this +// is not enforced by the Go compiler. It is always an implementation error +// to modify an address object in-place after it is initially constructed. +package addrs diff --git a/pkg/addrs/for_each_attr.go b/pkg/addrs/for_each_attr.go new file mode 100644 index 00000000000..7775cd5036b --- /dev/null +++ b/pkg/addrs/for_each_attr.go @@ -0,0 +1,23 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +// ForEachAttr is the address of an attribute referencing the current "for_each" object in +// the interpolation scope, addressed using the "each" keyword, ex. "each.key" and "each.value" +type ForEachAttr struct { + referenceable + Name string +} + +func (f ForEachAttr) String() string { + return "each." + f.Name +} + +func (f ForEachAttr) UniqueKey() UniqueKey { + return f // A ForEachAttr is its own UniqueKey +} + +func (f ForEachAttr) uniqueKeySigil() {} diff --git a/pkg/addrs/input_variable.go b/pkg/addrs/input_variable.go new file mode 100644 index 00000000000..f97bf8d60a4 --- /dev/null +++ b/pkg/addrs/input_variable.go @@ -0,0 +1,128 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +import ( + "fmt" +) + +// InputVariable is the address of an input variable. +type InputVariable struct { + referenceable + Name string +} + +func (v InputVariable) String() string { + return "var." + v.Name +} + +func (v InputVariable) UniqueKey() UniqueKey { + return v // A InputVariable is its own UniqueKey +} + +func (v InputVariable) uniqueKeySigil() {} + +// Absolute converts the receiver into an absolute address within the given +// module instance. +func (v InputVariable) Absolute(m ModuleInstance) AbsInputVariableInstance { + return AbsInputVariableInstance{ + Module: m, + Variable: v, + } +} + +func (v InputVariable) InModule(module Module) ConfigInputVariable { + return ConfigInputVariable{ + Module: module, + Variable: v, + } +} + +// AbsInputVariableInstance is the address of an input variable within a +// particular module instance. +type AbsInputVariableInstance struct { + Module ModuleInstance + Variable InputVariable +} + +var _ Checkable = AbsInputVariableInstance{} + +// InputVariable returns the absolute address of the input variable of the +// given name inside the receiving module instance. +func (m ModuleInstance) InputVariable(name string) AbsInputVariableInstance { + return AbsInputVariableInstance{ + Module: m, + Variable: InputVariable{ + Name: name, + }, + } +} + +func (v AbsInputVariableInstance) String() string { + if len(v.Module) == 0 { + return v.Variable.String() + } + + return fmt.Sprintf("%s.%s", v.Module.String(), v.Variable.String()) +} + +func (v AbsInputVariableInstance) UniqueKey() UniqueKey { + return absInputVariableInstanceUniqueKey(v.String()) +} + +func (v AbsInputVariableInstance) checkableSigil() {} + +func (v AbsInputVariableInstance) CheckRule(typ CheckRuleType, i int) CheckRule { + return CheckRule{ + Container: v, + Type: typ, + Index: i, + } +} + +func (v AbsInputVariableInstance) ConfigCheckable() ConfigCheckable { + return ConfigInputVariable{ + Module: v.Module.Module(), + Variable: v.Variable, + } +} + +func (v AbsInputVariableInstance) CheckableKind() CheckableKind { + return CheckableInputVariable +} + +type ConfigInputVariable struct { + Module Module + Variable InputVariable +} + +var _ ConfigCheckable = ConfigInputVariable{} + +func (v ConfigInputVariable) UniqueKey() UniqueKey { + return configInputVariableUniqueKey(v.String()) +} + +func (v ConfigInputVariable) configCheckableSigil() {} + +func (v ConfigInputVariable) CheckableKind() CheckableKind { + return CheckableInputVariable +} + +func (v ConfigInputVariable) String() string { + if len(v.Module) == 0 { + return v.Variable.String() + } + + return fmt.Sprintf("%s.%s", v.Module.String(), v.Variable.String()) +} + +type configInputVariableUniqueKey string + +func (k configInputVariableUniqueKey) uniqueKeySigil() {} + +type absInputVariableInstanceUniqueKey string + +func (k absInputVariableInstanceUniqueKey) uniqueKeySigil() {} diff --git a/pkg/addrs/instance_key.go b/pkg/addrs/instance_key.go new file mode 100644 index 00000000000..8bbb3fbee9a --- /dev/null +++ b/pkg/addrs/instance_key.go @@ -0,0 +1,196 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +import ( + "fmt" + "strings" + "unicode" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/gocty" +) + +// InstanceKey represents the key of an instance within an object that +// contains multiple instances due to using "count" or "for_each" arguments +// in configuration. +// +// IntKey and StringKey are the two implementations of this type. No other +// implementations are allowed. The single instance of an object that _isn't_ +// using "count" or "for_each" is represented by NoKey, which is a nil +// InstanceKey. +type InstanceKey interface { + instanceKeySigil() + String() string + + // Value returns the cty.Value of the appropriate type for the InstanceKey + // value. + Value() cty.Value +} + +// ParseInstanceKey returns the instance key corresponding to the given value, +// which must be known and non-null. +// +// If an unknown or null value is provided then this function will panic. This +// function is intended to deal with the values that would naturally be found +// in a hcl.TraverseIndex, which (when parsed from source, at least) can never +// contain unknown or null values. +func ParseInstanceKey(key cty.Value) (InstanceKey, error) { + switch key.Type() { + case cty.String: + return StringKey(key.AsString()), nil + case cty.Number: + var idx int + err := gocty.FromCtyValue(key, &idx) + return IntKey(idx), err + default: + return NoKey, fmt.Errorf("either a string or an integer is required") + } +} + +// NoKey represents the absense of an InstanceKey, for the single instance +// of a configuration object that does not use "count" or "for_each" at all. +var NoKey InstanceKey + +// IntKey is the InstanceKey representation representing integer indices, as +// used when the "count" argument is specified or if for_each is used with +// a sequence type. +type IntKey int + +func (k IntKey) instanceKeySigil() { +} + +func (k IntKey) String() string { + return fmt.Sprintf("[%d]", int(k)) +} + +func (k IntKey) Value() cty.Value { + return cty.NumberIntVal(int64(k)) +} + +// StringKey is the InstanceKey representation representing string indices, as +// used when the "for_each" argument is specified with a map or object type. +type StringKey string + +func (k StringKey) instanceKeySigil() { +} + +func (k StringKey) String() string { + // We use HCL's quoting syntax here so that we can in principle parse + // an address constructed by this package as if it were an HCL + // traversal, even if the string contains HCL's own metacharacters. + return fmt.Sprintf("[%s]", toHCLQuotedString(string(k))) +} + +func (k StringKey) Value() cty.Value { + return cty.StringVal(string(k)) +} + +// InstanceKeyLess returns true if the first given instance key i should sort +// before the second key j, and false otherwise. +func InstanceKeyLess(i, j InstanceKey) bool { + iTy := instanceKeyType(i) + jTy := instanceKeyType(j) + + switch { + case i == j: + return false + case i == NoKey: + return true + case j == NoKey: + return false + case iTy != jTy: + // The ordering here is arbitrary except that we want NoKeyType + // to sort before the others, so we'll just use the enum values + // of InstanceKeyType here (where NoKey is zero, sorting before + // any other). + return uint32(iTy) < uint32(jTy) + case iTy == IntKeyType: + return int(i.(IntKey)) < int(j.(IntKey)) + case iTy == StringKeyType: + return string(i.(StringKey)) < string(j.(StringKey)) + default: + // Shouldn't be possible to get down here in practice, since the + // above is exhaustive. + return false + } +} + +func instanceKeyType(k InstanceKey) InstanceKeyType { + if _, ok := k.(StringKey); ok { + return StringKeyType + } + if _, ok := k.(IntKey); ok { + return IntKeyType + } + return NoKeyType +} + +// InstanceKeyType represents the different types of instance key that are +// supported. Usually it is sufficient to simply type-assert an InstanceKey +// value to either IntKey or StringKey, but this type and its values can be +// used to represent the types themselves, rather than specific values +// of those types. +type InstanceKeyType rune + +const ( + NoKeyType InstanceKeyType = 0 + IntKeyType InstanceKeyType = 'I' + StringKeyType InstanceKeyType = 'S' +) + +// toHCLQuotedString is a helper which formats the given string in a way that +// HCL's expression parser would treat as a quoted string template. +// +// This includes: +// - Adding quote marks at the start and the end. +// - Using backslash escapes as needed for characters that cannot be represented directly. +// - Escaping anything that would be treated as a template interpolation or control sequence. +func toHCLQuotedString(s string) string { + // This is an adaptation of a similar function inside the hclwrite package, + // inlined here because hclwrite's version generates HCL tokens but we + // only need normal strings. + if len(s) == 0 { + return `""` + } + var buf strings.Builder + buf.WriteByte('"') + for i, r := range s { + switch r { + case '\n': + buf.WriteString(`\n`) + case '\r': + buf.WriteString(`\r`) + case '\t': + buf.WriteString(`\t`) + case '"': + buf.WriteString(`\"`) + case '\\': + buf.WriteString(`\\`) + case '$', '%': + buf.WriteRune(r) + remain := s[i+1:] + if len(remain) > 0 && remain[0] == '{' { + // Double up our template introducer symbol to escape it. + buf.WriteRune(r) + } + default: + if !unicode.IsPrint(r) { + var fmted string + if r < 65536 { + fmted = fmt.Sprintf("\\u%04x", r) + } else { + fmted = fmt.Sprintf("\\U%08x", r) + } + buf.WriteString(fmted) + } else { + buf.WriteRune(r) + } + } + } + buf.WriteByte('"') + return buf.String() +} diff --git a/pkg/addrs/instance_key_test.go b/pkg/addrs/instance_key_test.go new file mode 100644 index 00000000000..efc6c262404 --- /dev/null +++ b/pkg/addrs/instance_key_test.go @@ -0,0 +1,80 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +import ( + "fmt" + "testing" +) + +func TestInstanceKeyString(t *testing.T) { + tests := []struct { + Key InstanceKey + Want string + }{ + { + IntKey(0), + `[0]`, + }, + { + IntKey(5), + `[5]`, + }, + { + StringKey(""), + `[""]`, + }, + { + StringKey("hi"), + `["hi"]`, + }, + { + StringKey("0"), + `["0"]`, // intentionally distinct from IntKey(0) + }, + { + // Quotes must be escaped + StringKey(`"`), + `["\""]`, + }, + { + // Escape sequences must themselves be escaped + StringKey(`\r\n`), + `["\\r\\n"]`, + }, + { + // Template interpolation sequences "${" must be escaped. + StringKey(`${hello}`), + `["$${hello}"]`, + }, + { + // Template control sequences "%{" must be escaped. + StringKey(`%{ for something in something }%{ endfor }`), + `["%%{ for something in something }%%{ endfor }"]`, + }, + { + // Dollar signs that aren't followed by { are not interpolation sequences + StringKey(`$hello`), + `["$hello"]`, + }, + { + // Percent signs that aren't followed by { are not control sequences + StringKey(`%hello`), + `["%hello"]`, + }, + } + + for _, test := range tests { + testName := fmt.Sprintf("%#v", test.Key) + t.Run(testName, func(t *testing.T) { + got := test.Key.String() + want := test.Want + if got != want { + t.Errorf("wrong result\nreciever: %s\ngot: %s\nwant: %s", testName, got, want) + } + }) + } +} diff --git a/pkg/addrs/local_value.go b/pkg/addrs/local_value.go new file mode 100644 index 00000000000..88ecfaf4e59 --- /dev/null +++ b/pkg/addrs/local_value.go @@ -0,0 +1,59 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +import ( + "fmt" +) + +// LocalValue is the address of a local value. +type LocalValue struct { + referenceable + Name string +} + +func (v LocalValue) String() string { + return "local." + v.Name +} + +func (v LocalValue) UniqueKey() UniqueKey { + return v // A LocalValue is its own UniqueKey +} + +func (v LocalValue) uniqueKeySigil() {} + +// Absolute converts the receiver into an absolute address within the given +// module instance. +func (v LocalValue) Absolute(m ModuleInstance) AbsLocalValue { + return AbsLocalValue{ + Module: m, + LocalValue: v, + } +} + +// AbsLocalValue is the absolute address of a local value within a module instance. +type AbsLocalValue struct { + Module ModuleInstance + LocalValue LocalValue +} + +// LocalValue returns the absolute address of a local value of the given +// name within the receiving module instance. +func (m ModuleInstance) LocalValue(name string) AbsLocalValue { + return AbsLocalValue{ + Module: m, + LocalValue: LocalValue{ + Name: name, + }, + } +} + +func (v AbsLocalValue) String() string { + if len(v.Module) == 0 { + return v.LocalValue.String() + } + return fmt.Sprintf("%s.%s", v.Module.String(), v.LocalValue.String()) +} diff --git a/pkg/addrs/map.go b/pkg/addrs/map.go new file mode 100644 index 00000000000..022a86f1ca9 --- /dev/null +++ b/pkg/addrs/map.go @@ -0,0 +1,133 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +// Map represents a mapping whose keys are address types that implement +// UniqueKeyer. +// +// Since not all address types are comparable in the Go language sense, this +// type cannot work with the typical Go map access syntax, and so instead has +// a method-based syntax. Use this type only for situations where the key +// type isn't guaranteed to always be a valid key for a standard Go map. +type Map[K UniqueKeyer, V any] struct { + // Elems is the internal data structure of the map. + // + // This is exported to allow for comparisons during tests and other similar + // careful read operations, but callers MUST NOT modify this map directly. + // Use only the methods of Map to modify the contents of this structure, + // to ensure that it remains correct and consistent. + Elems map[UniqueKey]MapElem[K, V] +} + +type MapElem[K UniqueKeyer, V any] struct { + Key K + Value V +} + +func MakeMap[K UniqueKeyer, V any](initialElems ...MapElem[K, V]) Map[K, V] { + inner := make(map[UniqueKey]MapElem[K, V], len(initialElems)) + ret := Map[K, V]{inner} + for _, elem := range initialElems { + ret.Put(elem.Key, elem.Value) + } + return ret +} + +func MakeMapElem[K UniqueKeyer, V any](key K, value V) MapElem[K, V] { + return MapElem[K, V]{key, value} +} + +// Put inserts a new element into the map, or replaces an existing element +// which has an equivalent key. +func (m Map[K, V]) Put(key K, value V) { + realKey := key.UniqueKey() + m.Elems[realKey] = MapElem[K, V]{key, value} +} + +// PutElement is like Put but takes the key and value from the given MapElement +// structure instead of as individual arguments. +func (m Map[K, V]) PutElement(elem MapElem[K, V]) { + m.Put(elem.Key, elem.Value) +} + +// Remove deletes the element with the given key from the map, or does nothing +// if there is no such element. +func (m Map[K, V]) Remove(key K) { + realKey := key.UniqueKey() + delete(m.Elems, realKey) +} + +// Get returns the value of the element with the given key, or the zero value +// of V if there is no such element. +func (m Map[K, V]) Get(key K) V { + realKey := key.UniqueKey() + return m.Elems[realKey].Value +} + +// GetOk is like Get but additionally returns a flag for whether there was an +// element with the given key present in the map. +func (m Map[K, V]) GetOk(key K) (V, bool) { + realKey := key.UniqueKey() + elem, ok := m.Elems[realKey] + return elem.Value, ok +} + +// Has returns true if and only if there is an element in the map which has the +// given key. +func (m Map[K, V]) Has(key K) bool { + realKey := key.UniqueKey() + _, ok := m.Elems[realKey] + return ok +} + +// Len returns the number of elements in the map. +func (m Map[K, V]) Len() int { + return len(m.Elems) +} + +// Elements returns a slice containing a snapshot of the current elements of +// the map, in an unpredictable order. +func (m Map[K, V]) Elements() []MapElem[K, V] { + if len(m.Elems) == 0 { + return nil + } + ret := make([]MapElem[K, V], 0, len(m.Elems)) + for _, elem := range m.Elems { + ret = append(ret, elem) + } + return ret +} + +// Keys returns a Set[K] containing a snapshot of the current keys of elements +// of the map. +func (m Map[K, V]) Keys() Set[K] { + if len(m.Elems) == 0 { + return nil + } + ret := make(Set[K], len(m.Elems)) + + // We mess with the internals of Set here, rather than going through its + // public interface, because that means we can avoid re-calling UniqueKey + // on all of the elements when we know that our own Put method would have + // already done the same thing. + for realKey, elem := range m.Elems { + ret[realKey] = elem.Key + } + return ret +} + +// Values returns a slice containing a snapshot of the current values of +// elements of the map, in an unpredictable order. +func (m Map[K, V]) Values() []V { + if len(m.Elems) == 0 { + return nil + } + ret := make([]V, 0, len(m.Elems)) + for _, elem := range m.Elems { + ret = append(ret, elem.Value) + } + return ret +} diff --git a/pkg/addrs/map_test.go b/pkg/addrs/map_test.go new file mode 100644 index 00000000000..d32ebd63106 --- /dev/null +++ b/pkg/addrs/map_test.go @@ -0,0 +1,88 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +import ( + "testing" +) + +func TestMap(t *testing.T) { + variableName := InputVariable{Name: "name"} + localHello := LocalValue{Name: "hello"} + pathModule := PathAttr{Name: "module"} + moduleBeep := ModuleCall{Name: "beep"} + eachKey := ForEachAttr{Name: "key"} // intentionally not in the map + + m := MakeMap( + MakeMapElem[Referenceable](variableName, "Aisling"), + ) + + m.Put(localHello, "hello") + m.Put(pathModule, "boop") + m.Put(moduleBeep, "unrealistic") + + keySet := m.Keys() + if want := variableName; !m.Has(want) { + t.Errorf("map does not include %s", want) + } + if want := variableName; !keySet.Has(want) { + t.Errorf("key set does not include %s", want) + } + if want := localHello; !m.Has(want) { + t.Errorf("map does not include %s", want) + } + if want := localHello; !keySet.Has(want) { + t.Errorf("key set does not include %s", want) + } + if want := pathModule; !keySet.Has(want) { + t.Errorf("key set does not include %s", want) + } + if want := moduleBeep; !keySet.Has(want) { + t.Errorf("key set does not include %s", want) + } + if doNotWant := eachKey; m.Has(doNotWant) { + t.Errorf("map includes rogue element %s", doNotWant) + } + if doNotWant := eachKey; keySet.Has(doNotWant) { + t.Errorf("key set includes rogue element %s", doNotWant) + } + + if got, want := m.Get(variableName), "Aisling"; got != want { + t.Errorf("unexpected value %q for %s; want %q", got, variableName, want) + } + if got, want := m.Get(localHello), "hello"; got != want { + t.Errorf("unexpected value %q for %s; want %q", got, localHello, want) + } + if got, want := m.Get(pathModule), "boop"; got != want { + t.Errorf("unexpected value %q for %s; want %q", got, pathModule, want) + } + if got, want := m.Get(moduleBeep), "unrealistic"; got != want { + t.Errorf("unexpected value %q for %s; want %q", got, moduleBeep, want) + } + if got, want := m.Get(eachKey), ""; got != want { + // eachKey isn't in the map, so Get returns the zero value of string + t.Errorf("unexpected value %q for %s; want %q", got, eachKey, want) + } + + if v, ok := m.GetOk(variableName); v != "Aisling" || !ok { + t.Errorf("GetOk for %q returned incorrect result (%q, %#v)", variableName, v, ok) + } + if v, ok := m.GetOk(eachKey); v != "" || ok { + t.Errorf("GetOk for %q returned incorrect result (%q, %#v)", eachKey, v, ok) + } + + m.Remove(moduleBeep) + if doNotWant := moduleBeep; m.Has(doNotWant) { + t.Errorf("map still includes %s after removing it", doNotWant) + } + if want := moduleBeep; !keySet.Has(want) { + t.Errorf("key set no longer includes %s after removing it from the map; key set is supposed to be a snapshot at the time of call", want) + } + keySet = m.Keys() + if doNotWant := moduleBeep; keySet.Has(doNotWant) { + t.Errorf("key set still includes %s after a second call after removing it from the map", doNotWant) + } +} diff --git a/pkg/addrs/module.go b/pkg/addrs/module.go new file mode 100644 index 00000000000..74f726ff97d --- /dev/null +++ b/pkg/addrs/module.go @@ -0,0 +1,316 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +import ( + "strings" + + "github.com/hashicorp/hcl/v2" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// Module is an address for a module call within configuration. This is +// the static counterpart of ModuleInstance, representing a traversal through +// the static module call tree in configuration and does not take into account +// the potentially-multiple instances of a module that might be created by +// "count" and "for_each" arguments within those calls. +// +// This type should be used only in very specialized cases when working with +// the static module call tree. Type ModuleInstance is appropriate in more cases. +// +// Although Module is a slice, it should be treated as immutable after creation. +type Module []string + +// RootModule is the module address representing the root of the static module +// call tree, which is also the zero value of Module. +// +// Note that this is not the root of the dynamic module tree, which is instead +// represented by RootModuleInstance. +var RootModule Module + +// IsRoot returns true if the receiver is the address of the root module, +// or false otherwise. +func (m Module) IsRoot() bool { + return len(m) == 0 +} + +func (m Module) String() string { + if len(m) == 0 { + return "" + } + // Calculate necessary space. + l := 0 + for _, step := range m { + l += len(step) + } + buf := strings.Builder{} + // 8 is len(".module.") which separates entries. + buf.Grow(l + len(m)*8) + sep := "" + for _, step := range m { + buf.WriteString(sep) + buf.WriteString("module.") + buf.WriteString(step) + sep = "." + } + return buf.String() +} + +func (m Module) Equal(other Module) bool { + if len(m) != len(other) { + return false + } + for i := range m { + if m[i] != other[i] { + return false + } + } + return true +} + +func (m Module) targetableSigil() { + // Module is targetable +} + +// TargetContains implements Targetable for Module by returning true if the given other +// address either matches the receiver, is a sub-module-instance of the +// receiver, or is a targetable absolute address within a module that +// is contained within the receiver. +func (m Module) TargetContains(other Targetable) bool { + switch to := other.(type) { + + case Module: + if len(to) < len(m) { + // Can't be contained if the path is shorter + return false + } + // Other is contained if its steps match for the length of our own path. + for i, ourStep := range m { + otherStep := to[i] + if ourStep != otherStep { + return false + } + } + // If we fall out here then the prefixed matched, so it's contained. + return true + + case ModuleInstance: + return m.TargetContains(to.Module()) + + case ConfigResource: + return m.TargetContains(to.Module) + + case AbsResource: + return m.TargetContains(to.Module) + + case AbsResourceInstance: + return m.TargetContains(to.Module) + + default: + return false + } +} + +func (m Module) AddrType() TargetableAddrType { + return ModuleAddrType +} + +// Child returns the address of a child call in the receiver, identified by the +// given name. +func (m Module) Child(name string) Module { + ret := make(Module, 0, len(m)+1) + ret = append(ret, m...) + return append(ret, name) +} + +// Parent returns the address of the parent module of the receiver, or the +// receiver itself if there is no parent (if it's the root module address). +func (m Module) Parent() Module { + if len(m) == 0 { + return m + } + return m[:len(m)-1] +} + +// Call returns the module call address that corresponds to the given module +// instance, along with the address of the module that contains it. +// +// There is no call for the root module, so this method will panic if called +// on the root module address. +// +// In practice, this just turns the last element of the receiver into a +// ModuleCall and then returns a slice of the receiever that excludes that +// last part. This is just a convenience for situations where a call address +// is required, such as when dealing with *Reference and Referencable values. +func (m Module) Call() (Module, ModuleCall) { + if len(m) == 0 { + panic("cannot produce ModuleCall for root module") + } + + caller, callName := m[:len(m)-1], m[len(m)-1] + return caller, ModuleCall{ + Name: callName, + } +} + +// Ancestors returns a slice containing the receiver and all of its ancestor +// modules, all the way up to (and including) the root module. The result is +// ordered by depth, with the root module always first. +// +// Since the result always includes the root module, a caller may choose to +// ignore it by slicing the result with [1:]. +func (m Module) Ancestors() []Module { + ret := make([]Module, 0, len(m)+1) + for i := 0; i <= len(m); i++ { + ret = append(ret, m[:i]) + } + return ret +} + +func (m Module) configMoveableSigil() { + // ModuleInstance is moveable +} +func (m Module) configRemovableSigil() { + // Empty function so Module will fulfill the requirements of the removable interface +} + +// ParseModule parses a module address from the given traversal, +// which has to contain only the module address with no resource/data/variable/etc. +// This function only supports module addresses without instance keys (as the +// returned Module struct doesn't support instance keys) and will return an +// error if it encounters one. +func ParseModule(traversal hcl.Traversal) (Module, tfdiags.Diagnostics) { + mod, remain, diags := parseModulePrefix(traversal) + if !diags.HasErrors() && len(remain) != 0 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Module address expected", + Detail: "It's not allowed to reference anything other than module here.", + Subject: remain[0].SourceRange().Ptr(), + }) + } + + return mod, diags +} + +// parseModulePrefix parses a module address from the given traversal, +// returning the module address and the remaining traversal. +// For example, if the input traversal is ["module","a","module","b", +// "null_resource", example_resource"], the output module will be ["a", "b"] +// and the output remaining traversal will be ["null_resource", +// "example_resource"]. +// This function only supports module addresses without instance keys (as the +// returned Module struct doesn't support instance keys) and will return an +// error if it encounters one. +func parseModulePrefix(traversal hcl.Traversal) (Module, hcl.Traversal, tfdiags.Diagnostics) { + remain := traversal + var module Module + var diags tfdiags.Diagnostics + + for len(remain) > 0 { + moduleName, isModule, moduleNameDiags := getModuleName(remain) + diags = diags.Append(moduleNameDiags) + + if !isModule || diags.HasErrors() { + break + } + + // Because this is a valid module address, we can safely assume that + // the first two elements are "module" and the module name + remain = remain[2:] + + if len(remain) > 0 { + // We don't allow module instances as part of the module address + if _, ok := remain[0].(hcl.TraverseIndex); ok { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Module instance address with keys is not allowed", + Detail: "Module address cannot be a module instance (e.g. \"module.a[0]\"), it must be a module instead (e.g. \"module.a\").", + Subject: remain[0].SourceRange().Ptr(), + }) + + return module, remain, diags + } + } + + module = append(module, moduleName) + } + + var retRemain hcl.Traversal + if len(remain) > 0 { + retRemain = make(hcl.Traversal, len(remain)) + copy(retRemain, remain) + // The first element here might be either a TraverseRoot or a + // TraverseAttr, depending on whether we had a module address on the + // front. To make life easier for callers, we'll normalize to always + // start with a TraverseRoot. + if tt, ok := retRemain[0].(hcl.TraverseAttr); ok { + retRemain[0] = hcl.TraverseRoot{ + Name: tt.Name, + SrcRange: tt.SrcRange, + } + } + } + + return module, retRemain, diags +} + +func getModuleName(remain hcl.Traversal) (moduleName string, isModule bool, diags tfdiags.Diagnostics) { + if len(remain) == 0 { + // If the address is empty, then we can't possibly have a module address + return moduleName, false, diags + } + + var next string + switch tt := remain[0].(type) { + case hcl.TraverseRoot: + next = tt.Name + case hcl.TraverseAttr: + next = tt.Name + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address operator", + Detail: "Module address prefix must be followed by dot and then a name.", + Subject: remain[0].SourceRange().Ptr(), + }) + + return moduleName, false, diags + } + + if next != "module" { + return moduleName, false, diags + } + + kwRange := remain[0].SourceRange() + remain = remain[1:] + // If we have the prefix "module" then we should be followed by a + // module call name, as an attribute + if len(remain) == 0 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address operator", + Detail: "Prefix \"module.\" must be followed by a module name.", + Subject: &kwRange, + }) + + return moduleName, false, diags + } + + switch tt := remain[0].(type) { + case hcl.TraverseAttr: + moduleName = tt.Name + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address operator", + Detail: "Prefix \"module.\" must be followed by a module name.", + Subject: remain[0].SourceRange().Ptr(), + }) + return moduleName, false, diags + } + return moduleName, true, diags +} diff --git a/pkg/addrs/module_call.go b/pkg/addrs/module_call.go new file mode 100644 index 00000000000..8894f37d828 --- /dev/null +++ b/pkg/addrs/module_call.go @@ -0,0 +1,197 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +import ( + "fmt" +) + +// ModuleCall is the address of a call from the current module to a child +// module. +type ModuleCall struct { + referenceable + Name string +} + +func (c ModuleCall) String() string { + return "module." + c.Name +} + +func (c ModuleCall) UniqueKey() UniqueKey { + return c // A ModuleCall is its own UniqueKey +} + +func (c ModuleCall) uniqueKeySigil() {} + +// Instance returns the address of an instance of the receiver identified by +// the given key. +func (c ModuleCall) Instance(key InstanceKey) ModuleCallInstance { + return ModuleCallInstance{ + Call: c, + Key: key, + } +} + +func (c ModuleCall) Absolute(moduleAddr ModuleInstance) AbsModuleCall { + return AbsModuleCall{ + Module: moduleAddr, + Call: c, + } +} + +func (c ModuleCall) Equal(other ModuleCall) bool { + return c.Name == other.Name +} + +// AbsModuleCall is the address of a "module" block relative to the root +// of the configuration. +// +// This is similar to ModuleInstance alone, but specifically represents +// the module block itself rather than any one of the instances that +// module block declares. +type AbsModuleCall struct { + Module ModuleInstance + Call ModuleCall +} + +func (c AbsModuleCall) absMoveableSigil() { + // AbsModuleCall is "moveable". +} + +func (c AbsModuleCall) String() string { + if len(c.Module) == 0 { + return "module." + c.Call.Name + + } + return fmt.Sprintf("%s.module.%s", c.Module, c.Call.Name) +} + +func (c AbsModuleCall) Instance(key InstanceKey) ModuleInstance { + ret := make(ModuleInstance, len(c.Module), len(c.Module)+1) + copy(ret, c.Module) + ret = append(ret, ModuleInstanceStep{ + Name: c.Call.Name, + InstanceKey: key, + }) + return ret +} + +func (c AbsModuleCall) Equal(other AbsModuleCall) bool { + return c.Module.Equal(other.Module) && c.Call.Equal(other.Call) +} + +type absModuleCallInstanceKey string + +func (c AbsModuleCall) UniqueKey() UniqueKey { + return absModuleCallInstanceKey(c.String()) +} + +func (mk absModuleCallInstanceKey) uniqueKeySigil() {} + +// ModuleCallInstance is the address of one instance of a module created from +// a module call, which might create multiple instances using "count" or +// "for_each" arguments. +// +// There is no "Abs" version of ModuleCallInstance because an absolute module +// path is represented by ModuleInstance. +type ModuleCallInstance struct { + referenceable + Call ModuleCall + Key InstanceKey +} + +func (c ModuleCallInstance) String() string { + if c.Key == NoKey { + return c.Call.String() + } + return fmt.Sprintf("module.%s%s", c.Call.Name, c.Key) +} + +func (c ModuleCallInstance) UniqueKey() UniqueKey { + return c // A ModuleCallInstance is its own UniqueKey +} + +func (c ModuleCallInstance) uniqueKeySigil() {} + +func (c ModuleCallInstance) Absolute(moduleAddr ModuleInstance) ModuleInstance { + ret := make(ModuleInstance, len(moduleAddr), len(moduleAddr)+1) + copy(ret, moduleAddr) + ret = append(ret, ModuleInstanceStep{ + Name: c.Call.Name, + InstanceKey: c.Key, + }) + return ret +} + +// ModuleInstance returns the address of the module instance that corresponds +// to the receiving call instance when resolved in the given calling module. +// In other words, it returns the child module instance that the receving +// call instance creates. +func (c ModuleCallInstance) ModuleInstance(caller ModuleInstance) ModuleInstance { + return caller.Child(c.Call.Name, c.Key) +} + +// Output returns the absolute address of an output of the receiver identified by its +// name. +func (c ModuleCallInstance) Output(name string) ModuleCallInstanceOutput { + return ModuleCallInstanceOutput{ + Call: c, + Name: name, + } +} + +// ModuleCallOutput is the address of a named output and its associated +// ModuleCall, which may expand into multiple module instances +type ModuleCallOutput struct { + referenceable + Call ModuleCall + Name string +} + +func (m ModuleCallOutput) String() string { + return fmt.Sprintf("%s.%s", m.Call.String(), m.Name) +} + +func (m ModuleCallOutput) UniqueKey() UniqueKey { + return m // A ModuleCallOutput is its own UniqueKey +} + +func (m ModuleCallOutput) uniqueKeySigil() {} + +// ModuleCallInstanceOutput is the address of a particular named output produced by +// an instance of a module call. +type ModuleCallInstanceOutput struct { + referenceable + Call ModuleCallInstance + Name string +} + +// ModuleCallOutput returns the referenceable ModuleCallOutput for this +// particular instance. +func (co ModuleCallInstanceOutput) ModuleCallOutput() ModuleCallOutput { + return ModuleCallOutput{ + Call: co.Call.Call, + Name: co.Name, + } +} + +func (co ModuleCallInstanceOutput) String() string { + return fmt.Sprintf("%s.%s", co.Call.String(), co.Name) +} + +func (co ModuleCallInstanceOutput) UniqueKey() UniqueKey { + return co // A ModuleCallInstanceOutput is its own UniqueKey +} + +func (co ModuleCallInstanceOutput) uniqueKeySigil() {} + +// AbsOutputValue returns the absolute output value address that corresponds +// to the receving module call output address, once resolved in the given +// calling module. +func (co ModuleCallInstanceOutput) AbsOutputValue(caller ModuleInstance) AbsOutputValue { + moduleAddr := co.Call.ModuleInstance(caller) + return moduleAddr.OutputValue(co.Name) +} diff --git a/pkg/addrs/module_instance.go b/pkg/addrs/module_instance.go new file mode 100644 index 00000000000..56b78d1cf8d --- /dev/null +++ b/pkg/addrs/module_instance.go @@ -0,0 +1,513 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +import ( + "fmt" + "strings" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/gocty" + + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// ModuleInstance is an address for a particular module instance within the +// dynamic module tree. This is an extension of the static traversals +// represented by type Module that deals with the possibility of a single +// module call producing multiple instances via the "count" and "for_each" +// arguments. +// +// Although ModuleInstance is a slice, it should be treated as immutable after +// creation. +type ModuleInstance []ModuleInstanceStep + +var ( + _ Targetable = ModuleInstance(nil) +) + +func ParseModuleInstance(traversal hcl.Traversal) (ModuleInstance, tfdiags.Diagnostics) { + mi, remain, diags := parseModuleInstancePrefix(traversal) + if len(remain) != 0 { + if len(remain) == len(traversal) { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid module instance address", + Detail: "A module instance address must begin with \"module.\".", + Subject: remain.SourceRange().Ptr(), + }) + } else { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid module instance address", + Detail: "The module instance address is followed by additional invalid content.", + Subject: remain.SourceRange().Ptr(), + }) + } + } + return mi, diags +} + +// ParseModuleInstanceStr is a helper wrapper around ParseModuleInstance +// that takes a string and parses it with the HCL native syntax traversal parser +// before interpreting it. +// +// This should be used only in specialized situations since it will cause the +// created references to not have any meaningful source location information. +// If a reference string is coming from a source that should be identified in +// error messages then the caller should instead parse it directly using a +// suitable function from the HCL API and pass the traversal itself to +// ParseModuleInstance. +// +// Error diagnostics are returned if either the parsing fails or the analysis +// of the traversal fails. There is no way for the caller to distinguish the +// two kinds of diagnostics programmatically. If error diagnostics are returned +// then the returned address is invalid. +func ParseModuleInstanceStr(str string) (ModuleInstance, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(parseDiags) + if parseDiags.HasErrors() { + return nil, diags + } + + addr, addrDiags := ParseModuleInstance(traversal) + diags = diags.Append(addrDiags) + return addr, diags +} + +// parseModuleInstancePrefix parses a module instance address from the given +// traversal, returning the module instance address and the remaining +// traversal. +// This function supports module addresses with and without instance keys. +func parseModuleInstancePrefix(traversal hcl.Traversal) (ModuleInstance, hcl.Traversal, tfdiags.Diagnostics) { + remain := traversal + var mi ModuleInstance + var diags tfdiags.Diagnostics + + for len(remain) > 0 { + moduleName, isModule, moduleNameDiags := getModuleName(remain) + diags = diags.Append(moduleNameDiags) + + if !isModule || diags.HasErrors() { + break + } + + // Because this is a valid module address, we can safely assume that + // the first two elements are "module" and the module name + remain = remain[2:] + step := ModuleInstanceStep{ + Name: moduleName, + } + + // Check for optional module instance key + if len(remain) > 0 { + if idx, ok := remain[0].(hcl.TraverseIndex); ok { + remain = remain[1:] + + switch idx.Key.Type() { + case cty.String: + step.InstanceKey = StringKey(idx.Key.AsString()) + case cty.Number: + var idxInt int + err := gocty.FromCtyValue(idx.Key, &idxInt) + if err == nil { + step.InstanceKey = IntKey(idxInt) + } else { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address operator", + Detail: fmt.Sprintf("Invalid module index: %s.", err), + Subject: idx.SourceRange().Ptr(), + }) + } + default: + // Should never happen, because no other types are allowed in traversal indices. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address operator", + Detail: "Invalid module key: must be either a string or an integer.", + Subject: idx.SourceRange().Ptr(), + }) + } + } + } + + mi = append(mi, step) + } + + var retRemain hcl.Traversal + if len(remain) > 0 { + retRemain = make(hcl.Traversal, len(remain)) + copy(retRemain, remain) + // The first element here might be either a TraverseRoot or a + // TraverseAttr, depending on whether we had a module address on the + // front. To make life easier for callers, we'll normalize to always + // start with a TraverseRoot. + if tt, ok := retRemain[0].(hcl.TraverseAttr); ok { + retRemain[0] = hcl.TraverseRoot{ + Name: tt.Name, + SrcRange: tt.SrcRange, + } + } + } + + return mi, retRemain, diags +} + +// UnkeyedInstanceShim is a shim method for converting a Module address to the +// equivalent ModuleInstance address that assumes that no modules have +// keyed instances. +// +// This is a temporary allowance for the fact that OpenTofu does not presently +// support "count" and "for_each" on modules, and thus graph building code that +// derives graph nodes from configuration must just assume unkeyed modules +// in order to construct the graph. At a later time when "count" and "for_each" +// support is added for modules, all callers of this method will need to be +// reworked to allow for keyed module instances. +func (m Module) UnkeyedInstanceShim() ModuleInstance { + path := make(ModuleInstance, len(m)) + for i, name := range m { + path[i] = ModuleInstanceStep{Name: name} + } + return path +} + +// ModuleInstanceStep is a single traversal step through the dynamic module +// tree. It is used only as part of ModuleInstance. +type ModuleInstanceStep struct { + Name string + InstanceKey InstanceKey +} + +// RootModuleInstance is the module instance address representing the root +// module, which is also the zero value of ModuleInstance. +var RootModuleInstance ModuleInstance + +// IsRoot returns true if the receiver is the address of the root module instance, +// or false otherwise. +func (m ModuleInstance) IsRoot() bool { + return len(m) == 0 +} + +// Child returns the address of a child module instance of the receiver, +// identified by the given name and key. +func (m ModuleInstance) Child(name string, key InstanceKey) ModuleInstance { + ret := make(ModuleInstance, 0, len(m)+1) + ret = append(ret, m...) + return append(ret, ModuleInstanceStep{ + Name: name, + InstanceKey: key, + }) +} + +// ChildCall returns the address of a module call within the receiver, +// identified by the given name. +func (m ModuleInstance) ChildCall(name string) AbsModuleCall { + return AbsModuleCall{ + Module: m, + Call: ModuleCall{Name: name}, + } +} + +// Parent returns the address of the parent module instance of the receiver, or +// the receiver itself if there is no parent (if it's the root module address). +func (m ModuleInstance) Parent() ModuleInstance { + if len(m) == 0 { + return m + } + return m[:len(m)-1] +} + +// String returns a string representation of the receiver, in the format used +// within e.g. user-provided resource addresses. +// +// The address of the root module has the empty string as its representation. +func (m ModuleInstance) String() string { + if len(m) == 0 { + return "" + } + // Calculate minimal necessary space (no instance keys). + l := 0 + for _, step := range m { + l += len(step.Name) + } + buf := strings.Builder{} + // 8 is len(".module.") which separates entries. + buf.Grow(l + len(m)*8) + sep := "" + for _, step := range m { + buf.WriteString(sep) + buf.WriteString("module.") + buf.WriteString(step.Name) + if step.InstanceKey != NoKey { + buf.WriteString(step.InstanceKey.String()) + } + sep = "." + } + return buf.String() +} + +type moduleInstanceKey string + +func (m ModuleInstance) UniqueKey() UniqueKey { + return moduleInstanceKey(m.String()) +} + +func (mk moduleInstanceKey) uniqueKeySigil() {} + +// Equal returns true if the receiver and the given other value +// contains the exact same parts. +func (m ModuleInstance) Equal(o ModuleInstance) bool { + if len(m) != len(o) { + return false + } + + for i := range m { + if m[i] != o[i] { + return false + } + } + return true +} + +// Less returns true if the receiver should sort before the given other value +// in a sorted list of addresses. +func (m ModuleInstance) Less(o ModuleInstance) bool { + if len(m) != len(o) { + // Shorter path sorts first. + return len(m) < len(o) + } + + for i := range m { + mS, oS := m[i], o[i] + switch { + case mS.Name != oS.Name: + return mS.Name < oS.Name + case mS.InstanceKey != oS.InstanceKey: + return InstanceKeyLess(mS.InstanceKey, oS.InstanceKey) + } + } + + return false +} + +// Ancestors returns a slice containing the receiver and all of its ancestor +// module instances, all the way up to (and including) the root module. +// The result is ordered by depth, with the root module always first. +// +// Since the result always includes the root module, a caller may choose to +// ignore it by slicing the result with [1:]. +func (m ModuleInstance) Ancestors() []ModuleInstance { + ret := make([]ModuleInstance, 0, len(m)+1) + for i := 0; i <= len(m); i++ { + ret = append(ret, m[:i]) + } + return ret +} + +// IsAncestor returns true if the receiver is an ancestor of the given +// other value. +func (m ModuleInstance) IsAncestor(o ModuleInstance) bool { + // Longer or equal sized paths means the receiver cannot + // be an ancestor of the given module insatnce. + if len(m) >= len(o) { + return false + } + + for i, ms := range m { + if ms.Name != o[i].Name { + return false + } + if ms.InstanceKey != NoKey && ms.InstanceKey != o[i].InstanceKey { + return false + } + } + + return true +} + +// Call returns the module call address that corresponds to the given module +// instance, along with the address of the module instance that contains it. +// +// There is no call for the root module, so this method will panic if called +// on the root module address. +// +// A single module call can produce potentially many module instances, so the +// result discards any instance key that might be present on the last step +// of the instance. To retain this, use CallInstance instead. +// +// In practice, this just turns the last element of the receiver into a +// ModuleCall and then returns a slice of the receiever that excludes that +// last part. This is just a convenience for situations where a call address +// is required, such as when dealing with *Reference and Referencable values. +func (m ModuleInstance) Call() (ModuleInstance, ModuleCall) { + if len(m) == 0 { + panic("cannot produce ModuleCall for root module") + } + + inst, lastStep := m[:len(m)-1], m[len(m)-1] + return inst, ModuleCall{ + Name: lastStep.Name, + } +} + +// CallInstance returns the module call instance address that corresponds to +// the given module instance, along with the address of the module instance +// that contains it. +// +// There is no call for the root module, so this method will panic if called +// on the root module address. +// +// In practice, this just turns the last element of the receiver into a +// ModuleCallInstance and then returns a slice of the receiever that excludes +// that last part. This is just a convenience for situations where a call\ +// address is required, such as when dealing with *Reference and Referencable +// values. +func (m ModuleInstance) CallInstance() (ModuleInstance, ModuleCallInstance) { + if len(m) == 0 { + panic("cannot produce ModuleCallInstance for root module") + } + + inst, lastStep := m[:len(m)-1], m[len(m)-1] + return inst, ModuleCallInstance{ + Call: ModuleCall{ + Name: lastStep.Name, + }, + Key: lastStep.InstanceKey, + } +} + +// TargetContains implements Targetable by returning true if the given other +// address either matches the receiver, is a sub-module-instance of the +// receiver, or is a targetable absolute address within a module that +// is contained within the reciever. +func (m ModuleInstance) TargetContains(other Targetable) bool { + switch to := other.(type) { + case Module: + if len(to) < len(m) { + // Can't be contained if the path is shorter + return false + } + // Other is contained if its steps match for the length of our own path. + for i, ourStep := range m { + otherStep := to[i] + + // We can't contain an entire module if we have a specific instance + // key. The case of NoKey is OK because this address is either + // meant to address an unexpanded module, or a single instance of + // that module, and both of those are a covered in-full by the + // Module address. + if ourStep.InstanceKey != NoKey { + return false + } + + if ourStep.Name != otherStep { + return false + } + } + // If we fall out here then the prefixed matched, so it's contained. + return true + + case ModuleInstance: + if len(to) < len(m) { + return false + } + for i, ourStep := range m { + otherStep := to[i] + + if ourStep.Name != otherStep.Name { + return false + } + + // if this is our last step, because all targets are parsed as + // instances, this may be a ModuleInstance intended to be used as a + // Module. + if i == len(m)-1 { + if ourStep.InstanceKey == NoKey { + // If the other step is a keyed instance, then we contain that + // step, and if it isn't it's a match, which is true either way + return true + } + } + + if ourStep.InstanceKey != otherStep.InstanceKey { + return false + } + + } + return true + + case ConfigResource: + return m.TargetContains(to.Module) + + case AbsResource: + return m.TargetContains(to.Module) + + case AbsResourceInstance: + return m.TargetContains(to.Module) + + default: + return false + } +} + +// Module returns the address of the module that this instance is an instance +// of. +func (m ModuleInstance) Module() Module { + if len(m) == 0 { + return nil + } + ret := make(Module, len(m)) + for i, step := range m { + ret[i] = step.Name + } + return ret +} + +func (m ModuleInstance) AddrType() TargetableAddrType { + return ModuleInstanceAddrType +} + +func (m ModuleInstance) targetableSigil() { + // ModuleInstance is targetable +} + +func (m ModuleInstance) absMoveableSigil() { + // ModuleInstance is moveable +} + +// IsDeclaredByCall returns true if the receiver is an instance of the given +// AbsModuleCall. +func (m ModuleInstance) IsDeclaredByCall(other AbsModuleCall) bool { + // Compare len(m) to len(other.Module+1) because the final module instance + // step in other is stored in the AbsModuleCall.Call + if len(m) > len(other.Module)+1 || len(m) == 0 && len(other.Module) == 0 { + return false + } + + // Verify that the other's ModuleInstance matches the receiver. + inst, lastStep := other.Module, other.Call + for i := range inst { + if inst[i] != m[i] { + return false + } + } + + // Now compare the final step of the received with the other Call, where + // only the name needs to match. + return lastStep.Name == m[len(m)-1].Name +} + +func (s ModuleInstanceStep) String() string { + if s.InstanceKey != NoKey { + return s.Name + s.InstanceKey.String() + } + return s.Name +} diff --git a/pkg/addrs/module_instance_test.go b/pkg/addrs/module_instance_test.go new file mode 100644 index 00000000000..a8de77f8b67 --- /dev/null +++ b/pkg/addrs/module_instance_test.go @@ -0,0 +1,175 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +import ( + "fmt" + "testing" +) + +func TestModuleInstanceEqual_true(t *testing.T) { + addrs := []string{ + "module.foo", + "module.foo.module.bar", + "module.foo[1].module.bar", + `module.foo["a"].module.bar["b"]`, + `module.foo["a"].module.bar.module.baz[3]`, + } + for _, m := range addrs { + t.Run(m, func(t *testing.T) { + addr, diags := ParseModuleInstanceStr(m) + if len(diags) > 0 { + t.Fatalf("unexpected diags: %s", diags.Err()) + } + if !addr.Equal(addr) { + t.Fatalf("expected %#v to be equal to itself", addr) + } + }) + } +} + +func TestModuleInstanceEqual_false(t *testing.T) { + testCases := []struct { + left string + right string + }{ + { + "module.foo", + "module.bar", + }, + { + "module.foo", + "module.foo.module.bar", + }, + { + "module.foo[1]", + "module.bar[1]", + }, + { + `module.foo[1]`, + `module.foo["1"]`, + }, + { + "module.foo.module.bar", + "module.foo[1].module.bar", + }, + { + `module.foo.module.bar`, + `module.foo["a"].module.bar`, + }, + } + for _, tc := range testCases { + t.Run(fmt.Sprintf("%s = %s", tc.left, tc.right), func(t *testing.T) { + left, diags := ParseModuleInstanceStr(tc.left) + if len(diags) > 0 { + t.Fatalf("unexpected diags parsing %s: %s", tc.left, diags.Err()) + } + right, diags := ParseModuleInstanceStr(tc.right) + if len(diags) > 0 { + t.Fatalf("unexpected diags parsing %s: %s", tc.right, diags.Err()) + } + + if left.Equal(right) { + t.Fatalf("expected %#v not to be equal to %#v", left, right) + } + + if right.Equal(left) { + t.Fatalf("expected %#v not to be equal to %#v", right, left) + } + }) + } +} + +func BenchmarkStringShort(b *testing.B) { + addr, _ := ParseModuleInstanceStr(`module.foo`) + for n := 0; n < b.N; n++ { + addr.String() + } +} + +func BenchmarkStringLong(b *testing.B) { + addr, _ := ParseModuleInstanceStr(`module.southamerica-brazil-region.module.user-regional-desktops.module.user-name`) + for n := 0; n < b.N; n++ { + addr.String() + } +} + +func TestModuleInstance_IsDeclaredByCall(t *testing.T) { + tests := []struct { + instance ModuleInstance + call AbsModuleCall + want bool + }{ + { + ModuleInstance{}, + AbsModuleCall{}, + false, + }, + { + mustParseModuleInstanceStr("module.child"), + AbsModuleCall{}, + false, + }, + { + ModuleInstance{}, + AbsModuleCall{ + RootModuleInstance, + ModuleCall{Name: "child"}, + }, + false, + }, + { + mustParseModuleInstanceStr("module.child"), + AbsModuleCall{ // module.child + RootModuleInstance, + ModuleCall{Name: "child"}, + }, + true, + }, + { + mustParseModuleInstanceStr(`module.child`), + AbsModuleCall{ // module.kinder.module.child + mustParseModuleInstanceStr("module.kinder"), + ModuleCall{Name: "child"}, + }, + false, + }, + { + mustParseModuleInstanceStr("module.kinder"), + // module.kinder.module.child contains module.kinder, but is not itself an instance of module.kinder + AbsModuleCall{ + mustParseModuleInstanceStr("module.kinder"), + ModuleCall{Name: "child"}, + }, + false, + }, + { + mustParseModuleInstanceStr("module.child"), + AbsModuleCall{ + mustParseModuleInstanceStr(`module.kinder["a"]`), + ModuleCall{Name: "kinder"}, + }, + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%q.IsCallInstance(%q)", test.instance, test.call.String()), func(t *testing.T) { + got := test.instance.IsDeclaredByCall(test.call) + if got != test.want { + t.Fatal("wrong result") + } + }) + } +} + +func mustParseModuleInstanceStr(str string) ModuleInstance { + mi, diags := ParseModuleInstanceStr(str) + if diags.HasErrors() { + panic(diags.ErrWithWarnings()) + } + return mi +} diff --git a/pkg/addrs/module_package.go b/pkg/addrs/module_package.go new file mode 100644 index 00000000000..bdf59bacbd5 --- /dev/null +++ b/pkg/addrs/module_package.go @@ -0,0 +1,51 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +import ( + tfaddr "github.com/opentofu/registry-address" +) + +// A ModulePackage represents a physical location where OpenTofu can retrieve +// a module package, which is an archive, repository, or other similar +// container which delivers the source code for one or more OpenTofu modules. +// +// A ModulePackage is a string in go-getter's address syntax. By convention, +// we use ModulePackage-typed values only for the result of successfully +// running the go-getter "detectors", which produces an address string which +// includes an explicit installation method prefix along with an address +// string in the format expected by that installation method. +// +// Note that although the "detector" phase of go-getter does do some simple +// normalization in certain cases, it isn't generally possible to compare +// two ModulePackage values to decide if they refer to the same package. Two +// equal ModulePackage values represent the same package, but there might be +// other non-equal ModulePackage values that also refer to that package, and +// there is no reliable way to determine that. +// +// Don't convert a user-provided string directly to ModulePackage. Instead, +// use ParseModuleSource with a remote module address and then access the +// ModulePackage value from the result, making sure to also handle the +// selected subdirectory if any. You should convert directly to ModulePackage +// only for a string that is hard-coded into the program (e.g. in a unit test) +// where you've ensured that it's already in the expected syntax. +type ModulePackage string + +func (p ModulePackage) String() string { + return string(p) +} + +// A ModuleRegistryPackage is an extra indirection over a ModulePackage where +// we use a module registry to translate a more symbolic address (and +// associated version constraint given out of band) into a physical source +// location. +// +// ModuleRegistryPackage is distinct from ModulePackage because they have +// disjoint use-cases: registry package addresses are only used to query a +// registry in order to find a real module package address. These being +// distinct is intended to help future maintainers more easily follow the +// series of steps in the module installer, with the help of the type checker. +type ModuleRegistryPackage = tfaddr.ModulePackage diff --git a/pkg/addrs/module_source.go b/pkg/addrs/module_source.go new file mode 100644 index 00000000000..5bb87d4b778 --- /dev/null +++ b/pkg/addrs/module_source.go @@ -0,0 +1,370 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +import ( + "fmt" + "path" + "strings" + + "github.com/kubegems/opentofu/pkg/getmodules" + tfaddr "github.com/opentofu/registry-address" +) + +// ModuleSource is the general type for all three of the possible module source +// address types. The concrete implementations of this are ModuleSourceLocal, +// ModuleSourceRegistry, and ModuleSourceRemote. +type ModuleSource interface { + // String returns a full representation of the address, including any + // additional components that are typically implied by omission in + // user-written addresses. + // + // We typically use this longer representation in error message, in case + // the inclusion of normally-omitted components is helpful in debugging + // unexpected behavior. + String() string + + // ForDisplay is similar to String but instead returns a representation of + // the idiomatic way to write the address in configuration, omitting + // components that are commonly just implied in addresses written by + // users. + // + // We typically use this shorter representation in informational messages, + // such as the note that we're about to start downloading a package. + ForDisplay() string + + moduleSource() +} + +var _ ModuleSource = ModuleSourceLocal("") +var _ ModuleSource = ModuleSourceRegistry{} +var _ ModuleSource = ModuleSourceRemote{} + +var moduleSourceLocalPrefixes = []string{ + "./", + "../", + ".\\", + "..\\", +} + +// ParseModuleSource parses a module source address as given in the "source" +// argument inside a "module" block in the configuration. +// +// For historical reasons this syntax is a bit overloaded, supporting three +// different address types: +// - Local paths starting with either ./ or ../, which are special because +// OpenTofu considers them to belong to the same "package" as the caller. +// - Module registry addresses, given as either NAMESPACE/NAME/SYSTEM or +// HOST/NAMESPACE/NAME/SYSTEM, in which case the remote registry serves +// as an indirection over the third address type that follows. +// - Various URL-like and other heuristically-recognized strings which +// we currently delegate to the external library go-getter. +// +// There is some ambiguity between the module registry addresses and go-getter's +// very liberal heuristics and so this particular function will typically treat +// an invalid registry address as some other sort of remote source address +// rather than returning an error. If you know that you're expecting a +// registry address in particular, use ParseModuleSourceRegistry instead, which +// can therefore expose more detailed error messages about registry address +// parsing in particular. +func ParseModuleSource(raw string) (ModuleSource, error) { + if isModuleSourceLocal(raw) { + localAddr, err := parseModuleSourceLocal(raw) + if err != nil { + // This is to make sure we really return a nil ModuleSource in + // this case, rather than an interface containing the zero + // value of ModuleSourceLocal. + return nil, err + } + return localAddr, nil + } + + // For historical reasons, whether an address is a registry + // address is defined only by whether it can be successfully + // parsed as one, and anything else must fall through to be + // parsed as a direct remote source, where go-getter might + // then recognize it as a filesystem path. This is odd + // but matches behavior we've had since OpenTofu v0.10 which + // existing modules may be relying on. + // (Notice that this means that there's never any path where + // the registry source parse error gets returned to the caller, + // which is annoying but has been true for many releases + // without it posing a serious problem in practice.) + if ret, err := ParseModuleSourceRegistry(raw); err == nil { + return ret, nil + } + + // If we get down here then we treat everything else as a + // remote address. In practice there's very little that + // go-getter doesn't consider invalid input, so even invalid + // nonsense will probably interpreted as _something_ here + // and then fail during installation instead. We can't + // really improve this situation for historical reasons. + remoteAddr, err := parseModuleSourceRemote(raw) + if err != nil { + // This is to make sure we really return a nil ModuleSource in + // this case, rather than an interface containing the zero + // value of ModuleSourceRemote. + return nil, err + } + return remoteAddr, nil +} + +// ModuleSourceLocal is a ModuleSource representing a local path reference +// from the caller's directory to the callee's directory within the same +// module package. +// +// A "module package" here means a set of modules distributed together in +// the same archive, repository, or similar. That's a significant distinction +// because we always download and cache entire module packages at once, +// and then create relative references within the same directory in order +// to ensure all modules in the package are looking at a consistent filesystem +// layout. We also assume that modules within a package are maintained together, +// which means that cross-cutting maintenence across all of them would be +// possible. +// +// The actual value of a ModuleSourceLocal is a normalized relative path using +// forward slashes, even on operating systems that have other conventions, +// because we're representing traversal within the logical filesystem +// represented by the containing package, not actually within the physical +// filesystem we unpacked the package into. We should typically not construct +// ModuleSourceLocal values directly, except in tests where we can ensure +// the value meets our assumptions. Use ParseModuleSource instead if the +// input string is not hard-coded in the program. +type ModuleSourceLocal string + +func parseModuleSourceLocal(raw string) (ModuleSourceLocal, error) { + // As long as we have a suitable prefix (detected by ParseModuleSource) + // there is no failure case for local paths: we just use the "path" + // package's cleaning logic to remove any redundant "./" and "../" + // sequences and any duplicate slashes and accept whatever that + // produces. + + // Although using backslashes (Windows-style) is non-idiomatic, we do + // allow it and just normalize it away, so the rest of OpenTofu will + // only see the forward-slash form. + if strings.Contains(raw, `\`) { + // Note: We use string replacement rather than filepath.ToSlash + // here because the filepath package behavior varies by current + // platform, but we want to interpret configured paths the same + // across all platforms: these are virtual paths within a module + // package, not physical filesystem paths. + raw = strings.ReplaceAll(raw, `\`, "/") + } + + // Note that we could've historically blocked using "//" in a path here + // in order to avoid confusion with the subdir syntax in remote addresses, + // but we historically just treated that as the same as a single slash + // and so we continue to do that now for compatibility. Clean strips those + // out and reduces them to just a single slash. + clean := path.Clean(raw) + + // However, we do need to keep a single "./" on the front if it isn't + // a "../" path, or else it would be ambigous with the registry address + // syntax. + if !strings.HasPrefix(clean, "../") { + clean = "./" + clean + } + + return ModuleSourceLocal(clean), nil +} + +func isModuleSourceLocal(raw string) bool { + for _, prefix := range moduleSourceLocalPrefixes { + if strings.HasPrefix(raw, prefix) { + return true + } + } + return false +} + +func (s ModuleSourceLocal) moduleSource() {} + +func (s ModuleSourceLocal) String() string { + // We assume that our underlying string was already normalized at + // construction, so we just return it verbatim. + return string(s) +} + +func (s ModuleSourceLocal) ForDisplay() string { + return string(s) +} + +// ModuleSourceRegistry is a ModuleSource representing a module listed in a +// OpenTofu module registry. +// +// A registry source isn't a direct source location but rather an indirection +// over a ModuleSourceRemote. The job of a registry is to translate the +// combination of a ModuleSourceRegistry and a module version number into +// a concrete ModuleSourceRemote that OpenTofu will then download and +// install. +type ModuleSourceRegistry tfaddr.Module + +// DefaultModuleRegistryHost is the hostname used for registry-based module +// source addresses that do not have an explicit hostname. +const DefaultModuleRegistryHost = tfaddr.DefaultModuleRegistryHost + +// ParseModuleSourceRegistry is a variant of ParseModuleSource which only +// accepts module registry addresses, and will reject any other address type. +// +// Use this instead of ParseModuleSource if you know from some other surrounding +// context that an address is intended to be a registry address rather than +// some other address type, which will then allow for better error reporting +// due to the additional information about user intent. +func ParseModuleSourceRegistry(raw string) (ModuleSource, error) { + // Before we delegate to the "real" function we'll just make sure this + // doesn't look like a local source address, so we can return a better + // error message for that situation. + if isModuleSourceLocal(raw) { + return ModuleSourceRegistry{}, fmt.Errorf("can't use local directory %q as a module registry address", raw) + } + + src, err := tfaddr.ParseModuleSource(raw) + if err != nil { + return nil, err + } + return ModuleSourceRegistry{ + Package: src.Package, + Subdir: src.Subdir, + }, nil +} + +func (s ModuleSourceRegistry) moduleSource() {} + +func (s ModuleSourceRegistry) String() string { + if s.Subdir != "" { + return s.Package.String() + "//" + s.Subdir + } + return s.Package.String() +} + +func (s ModuleSourceRegistry) ForDisplay() string { + if s.Subdir != "" { + return s.Package.ForDisplay() + "//" + s.Subdir + } + return s.Package.ForDisplay() +} + +// ModuleSourceRemote is a ModuleSource representing a remote location from +// which we can retrieve a module package. +// +// A ModuleSourceRemote can optionally include a "subdirectory" path, which +// means that it's selecting a sub-directory of the given package to use as +// the entry point into the package. +type ModuleSourceRemote struct { + // Package is the address of the remote package that the requested + // module belongs to. + Package ModulePackage + + // If Subdir is non-empty then it represents a sub-directory within the + // remote package which will serve as the entry-point for the package. + // + // Subdir uses a normalized forward-slash-based path syntax within the + // virtual filesystem represented by the final package. It will never + // include `../` or `./` sequences. + Subdir string +} + +func parseModuleSourceRemote(raw string) (ModuleSourceRemote, error) { + var subDir string + raw, subDir = getmodules.SplitPackageSubdir(raw) + if strings.HasPrefix(subDir, "../") { + return ModuleSourceRemote{}, fmt.Errorf("subdirectory path %q leads outside of the module package", subDir) + } + + // A remote source address is really just a go-getter address resulting + // from go-getter's "detect" phase, which adds on the prefix specifying + // which protocol it should use and possibly also adjusts the + // protocol-specific part into different syntax. + // + // Note that for historical reasons this can potentially do network + // requests in order to disambiguate certain address types, although + // that's a legacy thing that is only for some specific, less-commonly-used + // address types. Most just do local string manipulation. We should + // aim to remove the network requests over time, if possible. + norm, moreSubDir, err := getmodules.NormalizePackageAddress(raw) + if err != nil { + // We must pass through the returned error directly here because + // the getmodules package has some special error types it uses + // for certain cases where the UI layer might want to include a + // more helpful error message. + return ModuleSourceRemote{}, err + } + + if moreSubDir != "" { + switch { + case subDir != "": + // The detector's own subdir goes first, because the + // subdir we were given is conceptually relative to + // the subdirectory that we just detected. + subDir = path.Join(moreSubDir, subDir) + default: + subDir = path.Clean(moreSubDir) + } + if strings.HasPrefix(subDir, "../") { + // This would suggest a bug in a go-getter detector, but + // we'll catch it anyway to avoid doing something confusing + // downstream. + return ModuleSourceRemote{}, fmt.Errorf("detected subdirectory path %q of %q leads outside of the module package", subDir, norm) + } + } + + return ModuleSourceRemote{ + Package: ModulePackage(norm), + Subdir: subDir, + }, nil +} + +func (s ModuleSourceRemote) moduleSource() {} + +func (s ModuleSourceRemote) String() string { + base := s.Package.String() + + if s.Subdir != "" { + // Address contains query string + if strings.Contains(base, "?") { + parts := strings.SplitN(base, "?", 2) + return parts[0] + "//" + s.Subdir + "?" + parts[1] + } + return base + "//" + s.Subdir + } + return base +} + +func (s ModuleSourceRemote) ForDisplay() string { + // The two string representations are identical for this address type. + // This isn't really entirely true to the idea of "ForDisplay" since + // it'll often include some additional components added in by the + // go-getter detectors, but we don't have any function to turn a + // "detected" string back into an idiomatic shorthand the user might've + // entered. + return s.String() +} + +// FromRegistry can be called on a remote source address that was returned +// from a module registry, passing in the original registry source address +// that the registry was asked about, in order to get the effective final +// remote source address. +// +// Specifically, this method handles the situations where one or both of +// the two addresses contain subdirectory paths, combining both when necessary +// in order to ensure that both the registry's given path and the user's +// given path are both respected. +// +// This will return nonsense if given a registry address other than the one +// that generated the reciever via a registry lookup. +func (s ModuleSourceRemote) FromRegistry(given ModuleSourceRegistry) ModuleSourceRemote { + ret := s // not a pointer, so this is a shallow copy + + switch { + case s.Subdir != "" && given.Subdir != "": + ret.Subdir = path.Join(s.Subdir, given.Subdir) + case given.Subdir != "": + ret.Subdir = given.Subdir + } + + return ret +} diff --git a/pkg/addrs/module_source_test.go b/pkg/addrs/module_source_test.go new file mode 100644 index 00000000000..86ad6697da9 --- /dev/null +++ b/pkg/addrs/module_source_test.go @@ -0,0 +1,659 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +import ( + "runtime" + "testing" + + "github.com/google/go-cmp/cmp" + svchost "github.com/hashicorp/terraform-svchost" +) + +func TestParseModuleSource(t *testing.T) { + + absolutePath, absolutePathModulePackage := testDataAbsolutePath() + absolutePathSubdir, absolutePathSubdirModulePackage := testDataAbsolutePathSubdir() + + tests := map[string]struct { + input string + want ModuleSource + wantErr string + }{ + // Local paths + "local in subdirectory": { + input: "./child", + want: ModuleSourceLocal("./child"), + }, + "local in subdirectory non-normalized": { + input: "./nope/../child", + want: ModuleSourceLocal("./child"), + }, + "local in sibling directory": { + input: "../sibling", + want: ModuleSourceLocal("../sibling"), + }, + "local in sibling directory non-normalized": { + input: "./nope/../../sibling", + want: ModuleSourceLocal("../sibling"), + }, + "Windows-style local in subdirectory": { + input: `.\child`, + want: ModuleSourceLocal("./child"), + }, + "Windows-style local in subdirectory non-normalized": { + input: `.\nope\..\child`, + want: ModuleSourceLocal("./child"), + }, + "Windows-style local in sibling directory": { + input: `..\sibling`, + want: ModuleSourceLocal("../sibling"), + }, + "Windows-style local in sibling directory non-normalized": { + input: `.\nope\..\..\sibling`, + want: ModuleSourceLocal("../sibling"), + }, + "an abominable mix of different slashes": { + input: `./nope\nope/why\./please\don't`, + want: ModuleSourceLocal("./nope/nope/why/please/don't"), + }, + + // Registry addresses + // (NOTE: There is another test function TestParseModuleSourceRegistry + // which tests this situation more exhaustively, so this is just a + // token set of cases to see that we are indeed calling into the + // registry address parser when appropriate.) + "main registry implied": { + input: "hashicorp/subnets/cidr", + want: ModuleSourceRegistry{ + Package: ModuleRegistryPackage{ + Host: svchost.Hostname("registry.opentofu.org"), + Namespace: "hashicorp", + Name: "subnets", + TargetSystem: "cidr", + }, + Subdir: "", + }, + }, + "main registry implied, subdir": { + input: "hashicorp/subnets/cidr//examples/foo", + want: ModuleSourceRegistry{ + Package: ModuleRegistryPackage{ + Host: svchost.Hostname("registry.opentofu.org"), + Namespace: "hashicorp", + Name: "subnets", + TargetSystem: "cidr", + }, + Subdir: "examples/foo", + }, + }, + "main registry implied, escaping subdir": { + input: "hashicorp/subnets/cidr//../nope", + // NOTE: This error is actually being caught by the _remote package_ + // address parser, because any registry parsing failure falls back + // to that but both of them have the same subdir validation. This + // case is here to make sure that stays true, so we keep reporting + // a suitable error when the user writes a registry-looking thing. + wantErr: `subdirectory path "../nope" leads outside of the module package`, + }, + "custom registry": { + input: "example.com/awesomecorp/network/happycloud", + want: ModuleSourceRegistry{ + Package: ModuleRegistryPackage{ + Host: svchost.Hostname("example.com"), + Namespace: "awesomecorp", + Name: "network", + TargetSystem: "happycloud", + }, + Subdir: "", + }, + }, + "custom registry, subdir": { + input: "example.com/awesomecorp/network/happycloud//examples/foo", + want: ModuleSourceRegistry{ + Package: ModuleRegistryPackage{ + Host: svchost.Hostname("example.com"), + Namespace: "awesomecorp", + Name: "network", + TargetSystem: "happycloud", + }, + Subdir: "examples/foo", + }, + }, + + // Remote package addresses + "github.com shorthand": { + input: "github.com/hashicorp/terraform-cidr-subnets", + want: ModuleSourceRemote{ + Package: ModulePackage("git::https://github.com/hashicorp/terraform-cidr-subnets.git"), + }, + }, + "github.com shorthand, subdir": { + input: "github.com/hashicorp/terraform-cidr-subnets//example/foo", + want: ModuleSourceRemote{ + Package: ModulePackage("git::https://github.com/hashicorp/terraform-cidr-subnets.git"), + Subdir: "example/foo", + }, + }, + "git protocol, URL-style": { + input: "git://example.com/code/baz.git", + want: ModuleSourceRemote{ + Package: ModulePackage("git://example.com/code/baz.git"), + }, + }, + "git protocol, URL-style, subdir": { + input: "git://example.com/code/baz.git//bleep/bloop", + want: ModuleSourceRemote{ + Package: ModulePackage("git://example.com/code/baz.git"), + Subdir: "bleep/bloop", + }, + }, + "git over HTTPS, URL-style": { + input: "git::https://example.com/code/baz.git", + want: ModuleSourceRemote{ + Package: ModulePackage("git::https://example.com/code/baz.git"), + }, + }, + "git over HTTPS, URL-style, subdir": { + input: "git::https://example.com/code/baz.git//bleep/bloop", + want: ModuleSourceRemote{ + Package: ModulePackage("git::https://example.com/code/baz.git"), + Subdir: "bleep/bloop", + }, + }, + "git over HTTPS, URL-style, subdir, query parameters": { + input: "git::https://example.com/code/baz.git//bleep/bloop?otherthing=blah", + want: ModuleSourceRemote{ + Package: ModulePackage("git::https://example.com/code/baz.git?otherthing=blah"), + Subdir: "bleep/bloop", + }, + }, + "git over SSH, URL-style": { + input: "git::ssh://git@example.com/code/baz.git", + want: ModuleSourceRemote{ + Package: ModulePackage("git::ssh://git@example.com/code/baz.git"), + }, + }, + "git over SSH, URL-style, subdir": { + input: "git::ssh://git@example.com/code/baz.git//bleep/bloop", + want: ModuleSourceRemote{ + Package: ModulePackage("git::ssh://git@example.com/code/baz.git"), + Subdir: "bleep/bloop", + }, + }, + "git over SSH, scp-style": { + input: "git::git@example.com:code/baz.git", + want: ModuleSourceRemote{ + // Normalized to URL-style + Package: ModulePackage("git::ssh://git@example.com/code/baz.git"), + }, + }, + "git over SSH, scp-style, subdir": { + input: "git::git@example.com:code/baz.git//bleep/bloop", + want: ModuleSourceRemote{ + // Normalized to URL-style + Package: ModulePackage("git::ssh://git@example.com/code/baz.git"), + Subdir: "bleep/bloop", + }, + }, + + // NOTE: We intentionally don't test the bitbucket.org shorthands + // here, because that detector makes direct HTTP tequests to the + // Bitbucket API and thus isn't appropriate for unit testing. + + "Google Cloud Storage bucket implied, path prefix": { + input: "www.googleapis.com/storage/v1/BUCKET_NAME/PATH_TO_MODULE", + want: ModuleSourceRemote{ + Package: ModulePackage("gcs::https://www.googleapis.com/storage/v1/BUCKET_NAME/PATH_TO_MODULE"), + }, + }, + "Google Cloud Storage bucket, path prefix": { + input: "gcs::https://www.googleapis.com/storage/v1/BUCKET_NAME/PATH_TO_MODULE", + want: ModuleSourceRemote{ + Package: ModulePackage("gcs::https://www.googleapis.com/storage/v1/BUCKET_NAME/PATH_TO_MODULE"), + }, + }, + "Google Cloud Storage bucket implied, archive object": { + input: "www.googleapis.com/storage/v1/BUCKET_NAME/PATH/TO/module.zip", + want: ModuleSourceRemote{ + Package: ModulePackage("gcs::https://www.googleapis.com/storage/v1/BUCKET_NAME/PATH/TO/module.zip"), + }, + }, + "Google Cloud Storage bucket, archive object": { + input: "gcs::https://www.googleapis.com/storage/v1/BUCKET_NAME/PATH/TO/module.zip", + want: ModuleSourceRemote{ + Package: ModulePackage("gcs::https://www.googleapis.com/storage/v1/BUCKET_NAME/PATH/TO/module.zip"), + }, + }, + + "Amazon S3 bucket implied, archive object": { + input: "s3-eu-west-1.amazonaws.com/examplecorp-terraform-modules/vpc.zip", + want: ModuleSourceRemote{ + Package: ModulePackage("s3::https://s3-eu-west-1.amazonaws.com/examplecorp-terraform-modules/vpc.zip"), + }, + }, + "Amazon S3 bucket, archive object": { + input: "s3::https://s3-eu-west-1.amazonaws.com/examplecorp-terraform-modules/vpc.zip", + want: ModuleSourceRemote{ + Package: ModulePackage("s3::https://s3-eu-west-1.amazonaws.com/examplecorp-terraform-modules/vpc.zip"), + }, + }, + + "HTTP URL": { + input: "http://example.com/module", + want: ModuleSourceRemote{ + Package: ModulePackage("http://example.com/module"), + }, + }, + "HTTPS URL": { + input: "https://example.com/module", + want: ModuleSourceRemote{ + Package: ModulePackage("https://example.com/module"), + }, + }, + "HTTPS URL, archive file": { + input: "https://example.com/module.zip", + want: ModuleSourceRemote{ + Package: ModulePackage("https://example.com/module.zip"), + }, + }, + "HTTPS URL, forced archive file": { + input: "https://example.com/module?archive=tar", + want: ModuleSourceRemote{ + Package: ModulePackage("https://example.com/module?archive=tar"), + }, + }, + "HTTPS URL, forced archive file and checksum": { + input: "https://example.com/module?archive=tar&checksum=blah", + want: ModuleSourceRemote{ + // The query string only actually gets processed when we finally + // do the get, so "checksum=blah" is accepted as valid up + // at this parsing layer. + Package: ModulePackage("https://example.com/module?archive=tar&checksum=blah"), + }, + }, + "absolute filesystem path": { + // Although a local directory isn't really "remote", we do + // treat it as such because we still need to do all of the same + // high-level steps to work with these, even though "downloading" + // is replaced by a deep filesystem copy instead. + input: absolutePath, + want: ModuleSourceRemote{ + Package: ModulePackage(absolutePathModulePackage), + }, + }, + "absolute filesystem path, subdir": { + // This is a funny situation where the user wants to use a + // directory elsewhere on their system as a package containing + // multiple modules, but the entry point is not at the root + // of that subtree, and so they can use the usual subdir + // syntax to move the package root higher in the real filesystem. + input: absolutePathSubdir, + want: ModuleSourceRemote{ + Package: ModulePackage(absolutePathSubdirModulePackage), + Subdir: "example", + }, + }, + + "subdir escaping out of package": { + // This is general logic for all subdir regardless of installation + // protocol, but we're using a filesystem path here just as an + // easy placeholder/ + input: "/tmp/foo//example/../../invalid", + wantErr: `subdirectory path "../invalid" leads outside of the module package`, + }, + + "relative path without the needed prefix": { + input: "boop/bloop", + // For this case we return a generic error message from the addrs + // layer, but using a specialized error type which our module + // installer checks for and produces an extra hint for users who + // were intending to write a local path which then got + // misinterpreted as a remote source due to the missing prefix. + // However, the main message is generic here because this is really + // just a general "this string doesn't match any of our source + // address patterns" situation, not _necessarily_ about relative + // local paths. + wantErr: `OpenTofu cannot detect a supported external module source type for boop/bloop`, + }, + + "go-getter will accept all sorts of garbage": { + input: "dfgdfgsd:dgfhdfghdfghdfg/dfghdfghdfg", + want: ModuleSourceRemote{ + // Unfortunately go-getter doesn't actually reject a totally + // invalid address like this until getting time, as long as + // it looks somewhat like a URL. + Package: ModulePackage("dfgdfgsd:dgfhdfghdfghdfg/dfghdfghdfg"), + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + addr, err := ParseModuleSource(test.input) + + if test.wantErr != "" { + switch { + case err == nil: + t.Errorf("unexpected success\nwant error: %s", test.wantErr) + case err.Error() != test.wantErr: + t.Errorf("wrong error messages\ngot: %s\nwant: %s", err.Error(), test.wantErr) + } + return + } + + if err != nil { + t.Fatalf("unexpected error: %s", err.Error()) + } + + if diff := cmp.Diff(addr, test.want); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + } + +} + +func TestModuleSourceRemoteFromRegistry(t *testing.T) { + t.Run("both have subdir", func(t *testing.T) { + remote := ModuleSourceRemote{ + Package: ModulePackage("boop"), + Subdir: "foo", + } + registry := ModuleSourceRegistry{ + Subdir: "bar", + } + gotAddr := remote.FromRegistry(registry) + if remote.Subdir != "foo" { + t.Errorf("FromRegistry modified the reciever; should be pure function") + } + if registry.Subdir != "bar" { + t.Errorf("FromRegistry modified the given address; should be pure function") + } + if got, want := gotAddr.Subdir, "foo/bar"; got != want { + t.Errorf("wrong resolved subdir\ngot: %s\nwant: %s", got, want) + } + }) + t.Run("only remote has subdir", func(t *testing.T) { + remote := ModuleSourceRemote{ + Package: ModulePackage("boop"), + Subdir: "foo", + } + registry := ModuleSourceRegistry{ + Subdir: "", + } + gotAddr := remote.FromRegistry(registry) + if remote.Subdir != "foo" { + t.Errorf("FromRegistry modified the reciever; should be pure function") + } + if registry.Subdir != "" { + t.Errorf("FromRegistry modified the given address; should be pure function") + } + if got, want := gotAddr.Subdir, "foo"; got != want { + t.Errorf("wrong resolved subdir\ngot: %s\nwant: %s", got, want) + } + }) + t.Run("only registry has subdir", func(t *testing.T) { + remote := ModuleSourceRemote{ + Package: ModulePackage("boop"), + Subdir: "", + } + registry := ModuleSourceRegistry{ + Subdir: "bar", + } + gotAddr := remote.FromRegistry(registry) + if remote.Subdir != "" { + t.Errorf("FromRegistry modified the reciever; should be pure function") + } + if registry.Subdir != "bar" { + t.Errorf("FromRegistry modified the given address; should be pure function") + } + if got, want := gotAddr.Subdir, "bar"; got != want { + t.Errorf("wrong resolved subdir\ngot: %s\nwant: %s", got, want) + } + }) +} + +func TestParseModuleSourceRemote(t *testing.T) { + + tests := map[string]struct { + input string + wantString string + wantForDisplay string + wantErr string + }{ + "git over HTTPS, URL-style, query parameters": { + // Query parameters should be correctly appended after the Package + input: `git::https://example.com/code/baz.git?otherthing=blah`, + wantString: `git::https://example.com/code/baz.git?otherthing=blah`, + wantForDisplay: `git::https://example.com/code/baz.git?otherthing=blah`, + }, + "git over HTTPS, URL-style, subdir, query parameters": { + // Query parameters should be correctly appended after the Package and Subdir + input: `git::https://example.com/code/baz.git//bleep/bloop?otherthing=blah`, + wantString: `git::https://example.com/code/baz.git//bleep/bloop?otherthing=blah`, + wantForDisplay: `git::https://example.com/code/baz.git//bleep/bloop?otherthing=blah`, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + remote, err := parseModuleSourceRemote(test.input) + + if test.wantErr != "" { + switch { + case err == nil: + t.Errorf("unexpected success\nwant error: %s", test.wantErr) + case err.Error() != test.wantErr: + t.Errorf("wrong error messages\ngot: %s\nwant: %s", err.Error(), test.wantErr) + } + return + } + + if err != nil { + t.Fatalf("unexpected error: %s", err.Error()) + } + + if got, want := remote.String(), test.wantString; got != want { + t.Errorf("wrong String() result\ngot: %s\nwant: %s", got, want) + } + if got, want := remote.ForDisplay(), test.wantForDisplay; got != want { + t.Errorf("wrong ForDisplay() result\ngot: %s\nwant: %s", got, want) + } + }) + } +} + +func TestParseModuleSourceRegistry(t *testing.T) { + // We test parseModuleSourceRegistry alone here, in addition to testing + // it indirectly as part of TestParseModuleSource, because general + // module parsing unfortunately eats all of the error situations from + // registry passing by falling back to trying for a direct remote package + // address. + + // Historical note: These test cases were originally derived from the + // ones in the old internal/registry/regsrc package that the + // ModuleSourceRegistry type is replacing. That package had the notion + // of "normalized" addresses as separate from the original user input, + // but this new implementation doesn't try to preserve the original + // user input at all, and so the main string output is always normalized. + // + // That package also had some behaviors to turn the namespace, name, and + // remote system portions into lowercase, but apparently we didn't + // actually make use of that in the end and were preserving the case + // the user provided in the input, and so for backward compatibility + // we're continuing to do that here, at the expense of now making the + // "ForDisplay" output case-preserving where its predecessor in the + // old package wasn't. The main OpenTofu Registry at registry.opentofu.org + // is itself case-insensitive anyway, so our case-preserving here is + // entirely for the benefit of existing third-party registry + // implementations that might be case-sensitive, which we must remain + // compatible with now. + + tests := map[string]struct { + input string + wantString string + wantForDisplay string + wantForProtocol string + wantErr string + }{ + "public registry": { + input: `hashicorp/consul/aws`, + wantString: `registry.opentofu.org/hashicorp/consul/aws`, + wantForDisplay: `hashicorp/consul/aws`, + wantForProtocol: `hashicorp/consul/aws`, + }, + "public registry with subdir": { + input: `hashicorp/consul/aws//foo`, + wantString: `registry.opentofu.org/hashicorp/consul/aws//foo`, + wantForDisplay: `hashicorp/consul/aws//foo`, + wantForProtocol: `hashicorp/consul/aws`, + }, + "public registry using explicit hostname": { + input: `registry.opentofu.org/hashicorp/consul/aws`, + wantString: `registry.opentofu.org/hashicorp/consul/aws`, + wantForDisplay: `hashicorp/consul/aws`, + wantForProtocol: `hashicorp/consul/aws`, + }, + "public registry with mixed case names": { + input: `HashiCorp/Consul/aws`, + wantString: `registry.opentofu.org/HashiCorp/Consul/aws`, + wantForDisplay: `HashiCorp/Consul/aws`, + wantForProtocol: `HashiCorp/Consul/aws`, + }, + "private registry with non-standard port": { + input: `Example.com:1234/HashiCorp/Consul/aws`, + wantString: `example.com:1234/HashiCorp/Consul/aws`, + wantForDisplay: `example.com:1234/HashiCorp/Consul/aws`, + wantForProtocol: `HashiCorp/Consul/aws`, + }, + "private registry with IDN hostname": { + input: `Испытание.com/HashiCorp/Consul/aws`, + wantString: `испытание.com/HashiCorp/Consul/aws`, + wantForDisplay: `испытание.com/HashiCorp/Consul/aws`, + wantForProtocol: `HashiCorp/Consul/aws`, + }, + "private registry with IDN hostname and non-standard port": { + input: `Испытание.com:1234/HashiCorp/Consul/aws//Foo`, + wantString: `испытание.com:1234/HashiCorp/Consul/aws//Foo`, + wantForDisplay: `испытание.com:1234/HashiCorp/Consul/aws//Foo`, + wantForProtocol: `HashiCorp/Consul/aws`, + }, + "invalid hostname": { + input: `---.com/HashiCorp/Consul/aws`, + wantErr: `invalid module registry hostname "---.com"; internationalized domain names must be given as direct unicode characters, not in punycode`, + }, + "hostname with only one label": { + // This was historically forbidden in our initial implementation, + // so we keep it forbidden to avoid newly interpreting such + // addresses as registry addresses rather than remote source + // addresses. + input: `foo/var/baz/qux`, + wantErr: `invalid module registry hostname: must contain at least one dot`, + }, + "invalid target system characters": { + input: `foo/var/no-no-no`, + wantErr: `invalid target system "no-no-no": must be between one and 64 ASCII letters or digits`, + }, + "invalid target system length": { + input: `foo/var/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaah`, + wantErr: `invalid target system "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaah": must be between one and 64 ASCII letters or digits`, + }, + "invalid namespace": { + input: `boop!/var/baz`, + wantErr: `invalid namespace "boop!": must be between one and 64 characters, including ASCII letters, digits, dashes, and underscores, where dashes and underscores may not be the prefix or suffix`, + }, + "missing part with explicit hostname": { + input: `foo.com/var/baz`, + wantErr: `source address must have three more components after the hostname: the namespace, the name, and the target system`, + }, + "errant query string": { + input: `foo/var/baz?otherthing`, + wantErr: `module registry addresses may not include a query string portion`, + }, + "github.com": { + // We don't allow using github.com like a module registry because + // that conflicts with the historically-supported shorthand for + // installing directly from GitHub-hosted git repositories. + input: `github.com/HashiCorp/Consul/aws`, + wantErr: `can't use "github.com" as a module registry host, because it's reserved for installing directly from version control repositories`, + }, + "bitbucket.org": { + // We don't allow using bitbucket.org like a module registry because + // that conflicts with the historically-supported shorthand for + // installing directly from BitBucket-hosted git repositories. + input: `bitbucket.org/HashiCorp/Consul/aws`, + wantErr: `can't use "bitbucket.org" as a module registry host, because it's reserved for installing directly from version control repositories`, + }, + "local path from current dir": { + // Can't use a local path when we're specifically trying to parse + // a _registry_ source address. + input: `./boop`, + wantErr: `can't use local directory "./boop" as a module registry address`, + }, + "local path from parent dir": { + // Can't use a local path when we're specifically trying to parse + // a _registry_ source address. + input: `../boop`, + wantErr: `can't use local directory "../boop" as a module registry address`, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + addrI, err := ParseModuleSourceRegistry(test.input) + + if test.wantErr != "" { + switch { + case err == nil: + t.Errorf("unexpected success\nwant error: %s", test.wantErr) + case err.Error() != test.wantErr: + t.Errorf("wrong error messages\ngot: %s\nwant: %s", err.Error(), test.wantErr) + } + return + } + + if err != nil { + t.Fatalf("unexpected error: %s", err.Error()) + } + + addr, ok := addrI.(ModuleSourceRegistry) + if !ok { + t.Fatalf("wrong address type %T; want %T", addrI, addr) + } + + if got, want := addr.String(), test.wantString; got != want { + t.Errorf("wrong String() result\ngot: %s\nwant: %s", got, want) + } + if got, want := addr.ForDisplay(), test.wantForDisplay; got != want { + t.Errorf("wrong ForDisplay() result\ngot: %s\nwant: %s", got, want) + } + if got, want := addr.Package.ForRegistryProtocol(), test.wantForProtocol; got != want { + t.Errorf("wrong ForRegistryProtocol() result\ngot: %s\nwant: %s", got, want) + } + }) + } +} + +func testDataAbsolutePath() (absolutePath string, modulePackage string) { + absolutePath = "/tmp/foo/example" + modulePackage = "file:///tmp/foo/example" + if runtime.GOOS == "windows" { + absolutePath = "C:\\tmp\\foo\\example" + modulePackage = "C:\\tmp\\foo\\example" + } + return +} + +func testDataAbsolutePathSubdir() (absolutePath string, modulePackage string) { + absolutePath = "/tmp/foo//example" + modulePackage = "file:///tmp/foo" + if runtime.GOOS == "windows" { + absolutePath = "C:\\tmp\\foo//example" + modulePackage = "C:\\tmp\\foo" + } + return +} diff --git a/pkg/addrs/module_test.go b/pkg/addrs/module_test.go new file mode 100644 index 00000000000..9ef6b6576d9 --- /dev/null +++ b/pkg/addrs/module_test.go @@ -0,0 +1,178 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +import ( + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" +) + +func TestModuleEqual_true(t *testing.T) { + modules := []Module{ + RootModule, + {"a"}, + {"a", "b"}, + {"a", "b", "c"}, + } + for _, m := range modules { + t.Run(m.String(), func(t *testing.T) { + if !m.Equal(m) { + t.Fatalf("expected %#v to be equal to itself", m) + } + }) + } +} + +func TestModuleEqual_false(t *testing.T) { + testCases := []struct { + left Module + right Module + }{ + { + RootModule, + Module{"a"}, + }, + { + Module{"a"}, + Module{"b"}, + }, + { + Module{"a"}, + Module{"a", "a"}, + }, + { + Module{"a", "b"}, + Module{"a", "B"}, + }, + } + for _, tc := range testCases { + t.Run(fmt.Sprintf("%s = %s", tc.left, tc.right), func(t *testing.T) { + if tc.left.Equal(tc.right) { + t.Fatalf("expected %#v not to be equal to %#v", tc.left, tc.right) + } + + if tc.right.Equal(tc.left) { + t.Fatalf("expected %#v not to be equal to %#v", tc.right, tc.left) + } + }) + } +} + +func TestModuleString(t *testing.T) { + testCases := map[string]Module{ + "": {}, + "module.alpha": { + "alpha", + }, + "module.alpha.module.beta": { + "alpha", + "beta", + }, + "module.alpha.module.beta.module.charlie": { + "alpha", + "beta", + "charlie", + }, + } + for str, module := range testCases { + t.Run(str, func(t *testing.T) { + if got, want := module.String(), str; got != want { + t.Errorf("wrong result: got %q, want %q", got, want) + } + }) + } +} + +func BenchmarkModuleStringShort(b *testing.B) { + module := Module{"a", "b"} + for n := 0; n < b.N; n++ { + module.String() + } +} + +func BenchmarkModuleStringLong(b *testing.B) { + module := Module{"southamerica-brazil-region", "user-regional-desktop", "user-name"} + for n := 0; n < b.N; n++ { + module.String() + } +} + +func TestParseModule(t *testing.T) { + t.Parallel() + + tests := []struct { + Input string + WantModule Module + WantErr string + }{ + { + Input: "module.a", + WantModule: []string{"a"}, + }, + { + Input: "module.a.module.b", + WantModule: []string{"a", "b"}, + }, + { + Input: "module.a.module.b.c.d", + WantErr: "Module address expected: It's not allowed to reference anything other than module here.", + }, + { + Input: "a.b.c.d", + WantErr: "Module address expected: It's not allowed to reference anything other than module here.", + }, + { + Input: "module", + WantErr: `Invalid address operator: Prefix "module." must be followed by a module name.`, + }, + { + Input: "module.a[0]", + WantErr: `Module instance address with keys is not allowed: Module address cannot be a module instance (e.g. "module.a[0]"), it must be a module instead (e.g. "module.a").`, + }, + { + Input: `module.a["k"]`, + WantErr: `Module instance address with keys is not allowed: Module address cannot be a module instance (e.g. "module.a[0]"), it must be a module instead (e.g. "module.a").`, + }, + } + + for _, test := range tests { + test := test + + t.Run(test.Input, func(t *testing.T) { + t.Parallel() + + traversal, hclDiags := hclsyntax.ParseTraversalAbs([]byte(test.Input), "", hcl.InitialPos) + if hclDiags.HasErrors() { + t.Fatalf("Bug in tests: %s", hclDiags.Error()) + } + + mod, diags := ParseModule(traversal) + + switch { + case test.WantErr != "": + if !diags.HasErrors() { + t.Fatalf("Unexpected success, wanted error: %s", test.WantErr) + } + + gotErr := diags.Err().Error() + if gotErr != test.WantErr { + t.Fatalf("Mismatched error\nGot: %s\nWant: %s", gotErr, test.WantErr) + } + default: + if diags.HasErrors() { + t.Fatalf("Unexpected error: %s", diags.Err().Error()) + } + if diff := cmp.Diff(test.WantModule, mod); diff != "" { + t.Fatalf("Mismatched result:\n%s", diff) + } + } + }) + } +} diff --git a/pkg/addrs/move_endpoint.go b/pkg/addrs/move_endpoint.go new file mode 100644 index 00000000000..fb624cd6109 --- /dev/null +++ b/pkg/addrs/move_endpoint.go @@ -0,0 +1,301 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// MoveEndpoint is to AbsMoveable and ConfigMoveable what Target is to +// Targetable: a wrapping struct that captures the result of decoding an HCL +// traversal representing a relative path from the current module to +// a moveable object. +// +// Its name reflects that its primary purpose is for the "from" and "to" +// addresses in a "moved" statement in the configuration, but it's also +// valid to use MoveEndpoint for other similar mechanisms that give +// OpenTofu hints about historical configuration changes that might +// prompt creating a different plan than OpenTofu would by default. +// +// To obtain a full address from a MoveEndpoint you must use +// either the package function UnifyMoveEndpoints (to get an AbsMoveable) or +// the method ConfigMoveable (to get a ConfigMoveable). +type MoveEndpoint struct { + // SourceRange is the location of the physical endpoint address + // in configuration, if this MoveEndpoint was decoded from a + // configuration expresson. + SourceRange tfdiags.SourceRange + + // Internally we (ab)use AbsMoveable as the representation of our + // relative address, even though everywhere else in OpenTofu + // AbsMoveable always represents a fully-absolute address. + // In practice, due to the implementation of ParseMoveEndpoint, + // this is always either a ModuleInstance or an AbsResourceInstance, + // and we only consider the possibility of interpreting it as + // a AbsModuleCall or an AbsResource in UnifyMoveEndpoints. + // This is intentionally unexported to encapsulate this unusual + // meaning of AbsMoveable. + relSubject AbsMoveable +} + +func (e *MoveEndpoint) ObjectKind() MoveEndpointKind { + return absMoveableEndpointKind(e.relSubject) +} + +func (e *MoveEndpoint) String() string { + // Our internal pseudo-AbsMoveable representing the relative + // address (either ModuleInstance or AbsResourceInstance) is + // a good enough proxy for the relative move endpoint address + // serialization. + return e.relSubject.String() +} + +func (e *MoveEndpoint) Equal(other *MoveEndpoint) bool { + switch { + case (e == nil) != (other == nil): + return false + case e == nil: + return true + default: + // Since we only use ModuleInstance and AbsResourceInstance in our + // string representation, we have no ambiguity between address types + // and can safely just compare the string representations to + // compare the relSubject values. + return e.String() == other.String() && e.SourceRange == other.SourceRange + } +} + +// MightUnifyWith returns true if it is possible that a later call to +// UnifyMoveEndpoints might succeed if given the reciever and the other +// given endpoint. +// +// This is intended for early static validation of obviously-wrong situations, +// although there are still various semantic errors that this cannot catch. +func (e *MoveEndpoint) MightUnifyWith(other *MoveEndpoint) bool { + // For our purposes here we'll just do a unify without a base module + // address, because the rules for whether unify can succeed depend + // only on the relative part of the addresses, not on which module + // they were declared in. + from, to := UnifyMoveEndpoints(RootModule, e, other) + return from != nil && to != nil +} + +// ConfigMovable transforms the reciever into a ConfigMovable by resolving it +// relative to the given base module, which should be the module where +// the MoveEndpoint expression was found. +// +// The result is useful for finding the target object in the configuration, +// but it's not sufficient for fully interpreting a move statement because +// it lacks the specific module and resource instance keys. +func (e *MoveEndpoint) ConfigMoveable(baseModule Module) ConfigMoveable { + addr := e.relSubject + switch addr := addr.(type) { + case ModuleInstance: + ret := make(Module, 0, len(baseModule)+len(addr)) + ret = append(ret, baseModule...) + ret = append(ret, addr.Module()...) + return ret + case AbsResourceInstance: + moduleAddr := make(Module, 0, len(baseModule)+len(addr.Module)) + moduleAddr = append(moduleAddr, baseModule...) + moduleAddr = append(moduleAddr, addr.Module.Module()...) + return ConfigResource{ + Module: moduleAddr, + Resource: addr.Resource.Resource, + } + default: + // The above should be exhaustive for all of the types + // that ParseMoveEndpoint produces as our intermediate + // address representation. + panic(fmt.Sprintf("unsupported address type %T", addr)) + } + +} + +// ParseMoveEndpoint attempts to interpret the given traversal as a +// "move endpoint" address, which is a relative path from the module containing +// the traversal to a movable object in either the same module or in some +// child module. +// +// This deals only with the syntactic element of a move endpoint expression +// in configuration. Before the result will be useful you'll need to combine +// it with the address of the module where it was declared in order to get +// an absolute address relative to the root module. +func ParseMoveEndpoint(traversal hcl.Traversal) (*MoveEndpoint, tfdiags.Diagnostics) { + path, remain, diags := parseModuleInstancePrefix(traversal) + if diags.HasErrors() { + return nil, diags + } + + rng := tfdiags.SourceRangeFromHCL(traversal.SourceRange()) + + if len(remain) == 0 { + return &MoveEndpoint{ + relSubject: path, + SourceRange: rng, + }, diags + } + + riAddr, moreDiags := parseResourceInstanceUnderModule(path, remain) + diags = diags.Append(moreDiags) + if diags.HasErrors() { + return nil, diags + } + + return &MoveEndpoint{ + relSubject: riAddr, + SourceRange: rng, + }, diags +} + +// UnifyMoveEndpoints takes a pair of MoveEndpoint objects representing the +// "from" and "to" addresses in a moved block, and returns a pair of +// MoveEndpointInModule addresses guaranteed to be of the same dynamic type +// that represent what the two MoveEndpoint addresses refer to. +// +// moduleAddr must be the address of the module where the move was declared. +// +// This function deals both with the conversion from relative to absolute +// addresses and with resolving the ambiguity between no-key instance +// addresses and whole-object addresses, returning the least specific +// address type possible. +// +// Not all combinations of addresses are unifyable: the two addresses must +// either both include resources or both just be modules. If the two +// given addresses are incompatible then UnifyMoveEndpoints returns (nil, nil), +// in which case the caller should typically report an error to the user +// stating the unification constraints. +func UnifyMoveEndpoints(moduleAddr Module, relFrom, relTo *MoveEndpoint) (modFrom, modTo *MoveEndpointInModule) { + + // First we'll make a decision about which address type we're + // ultimately trying to unify to. For our internal purposes + // here we're going to borrow TargetableAddrType just as a + // convenient way to talk about our address types, even though + // targetable address types are not 100% aligned with moveable + // address types. + fromType := relFrom.internalAddrType() + toType := relTo.internalAddrType() + var wantType TargetableAddrType + + // Our goal here is to choose the whole-resource or whole-module-call + // addresses if both agree on it, but to use specific instance addresses + // otherwise. This is a somewhat-arbitrary way to resolve syntactic + // ambiguity between the two situations which allows both for renaming + // whole resources and for switching from a single-instance object to + // a multi-instance object. + switch { + case fromType == AbsResourceInstanceAddrType || toType == AbsResourceInstanceAddrType: + wantType = AbsResourceInstanceAddrType + case fromType == AbsResourceAddrType || toType == AbsResourceAddrType: + wantType = AbsResourceAddrType + case fromType == ModuleInstanceAddrType || toType == ModuleInstanceAddrType: + wantType = ModuleInstanceAddrType + case fromType == ModuleAddrType || toType == ModuleAddrType: + // NOTE: We're fudging a little here and using + // ModuleAddrType to represent AbsModuleCall rather + // than Module. + wantType = ModuleAddrType + default: + panic("unhandled move address types") + } + + modFrom = relFrom.prepareMoveEndpointInModule(moduleAddr, wantType) + modTo = relTo.prepareMoveEndpointInModule(moduleAddr, wantType) + if modFrom == nil || modTo == nil { + // if either of them failed then they both failed, to make the + // caller's life a little easier. + return nil, nil + } + return modFrom, modTo +} + +func (e *MoveEndpoint) prepareMoveEndpointInModule(moduleAddr Module, wantType TargetableAddrType) *MoveEndpointInModule { + // relAddr can only be either AbsResourceInstance or ModuleInstance, the + // internal intermediate representation produced by ParseMoveEndpoint. + relAddr := e.relSubject + + switch relAddr := relAddr.(type) { + case ModuleInstance: + switch wantType { + case ModuleInstanceAddrType: + // Since our internal representation is already a module instance, + // we can just rewrap this one. + return &MoveEndpointInModule{ + SourceRange: e.SourceRange, + module: moduleAddr, + relSubject: relAddr, + } + case ModuleAddrType: + // NOTE: We're fudging a little here and using + // ModuleAddrType to represent AbsModuleCall rather + // than Module. + callerAddr, callAddr := relAddr.Call() + absCallAddr := AbsModuleCall{ + Module: callerAddr, + Call: callAddr, + } + return &MoveEndpointInModule{ + SourceRange: e.SourceRange, + module: moduleAddr, + relSubject: absCallAddr, + } + default: + return nil // can't make any other types from a ModuleInstance + } + case AbsResourceInstance: + switch wantType { + case AbsResourceInstanceAddrType: + return &MoveEndpointInModule{ + SourceRange: e.SourceRange, + module: moduleAddr, + relSubject: relAddr, + } + case AbsResourceAddrType: + return &MoveEndpointInModule{ + SourceRange: e.SourceRange, + module: moduleAddr, + relSubject: relAddr.ContainingResource(), + } + default: + return nil // can't make any other types from an AbsResourceInstance + } + default: + panic(fmt.Sprintf("unhandled address type %T", relAddr)) + } +} + +// internalAddrType helps facilitate our slight abuse of TargetableAddrType +// as a way to talk about our different possible result address types in +// UnifyMoveEndpoints. +// +// It's not really correct to use TargetableAddrType in this way, because +// it's for Targetable rather than for AbsMoveable, but as long as the two +// remain aligned enough it saves introducing yet another enumeration with +// similar members that would be for internal use only anyway. +func (e *MoveEndpoint) internalAddrType() TargetableAddrType { + switch addr := e.relSubject.(type) { + case ModuleInstance: + if !addr.IsRoot() && addr[len(addr)-1].InstanceKey == NoKey { + // NOTE: We're fudging a little here and using + // ModuleAddrType to represent AbsModuleCall rather + // than Module. + return ModuleAddrType + } + return ModuleInstanceAddrType + case AbsResourceInstance: + if addr.Resource.Key == NoKey { + return AbsResourceAddrType + } + return AbsResourceInstanceAddrType + default: + // The above should cover all of the address types produced + // by ParseMoveEndpoint. + panic(fmt.Sprintf("unsupported address type %T", addr)) + } +} diff --git a/pkg/addrs/move_endpoint_kind.go b/pkg/addrs/move_endpoint_kind.go new file mode 100644 index 00000000000..e29acfc7f56 --- /dev/null +++ b/pkg/addrs/move_endpoint_kind.go @@ -0,0 +1,38 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +import "fmt" + +// MoveEndpointKind represents the different kinds of object that a movable +// address can refer to. +type MoveEndpointKind rune + +//go:generate go run golang.org/x/tools/cmd/stringer -type MoveEndpointKind + +const ( + // MoveEndpointModule indicates that a move endpoint either refers to + // an individual module instance or to all instances of a particular + // module call. + MoveEndpointModule MoveEndpointKind = 'M' + + // MoveEndpointResource indicates that a move endpoint either refers to + // an individual resource instance or to all instances of a particular + // resource. + MoveEndpointResource MoveEndpointKind = 'R' +) + +func absMoveableEndpointKind(addr AbsMoveable) MoveEndpointKind { + switch addr := addr.(type) { + case ModuleInstance, AbsModuleCall: + return MoveEndpointModule + case AbsResourceInstance, AbsResource: + return MoveEndpointResource + default: + // The above should be exhaustive for all AbsMoveable types. + panic(fmt.Sprintf("unsupported address type %T", addr)) + } +} diff --git a/pkg/addrs/move_endpoint_module.go b/pkg/addrs/move_endpoint_module.go new file mode 100644 index 00000000000..b65a4db60cc --- /dev/null +++ b/pkg/addrs/move_endpoint_module.go @@ -0,0 +1,745 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +import ( + "fmt" + "reflect" + "strings" + + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// anyKeyImpl is the InstanceKey representation indicating a wildcard, which +// matches all possible keys. This is only used internally for matching +// combinations of address types, where only portions of the path contain key +// information. +type anyKeyImpl rune + +func (k anyKeyImpl) instanceKeySigil() { +} + +func (k anyKeyImpl) String() string { + return fmt.Sprintf("[%s]", string(k)) +} + +func (k anyKeyImpl) Value() cty.Value { + return cty.StringVal(string(k)) +} + +// anyKey is the only valid value of anyKeyImpl +var anyKey = anyKeyImpl('*') + +// MoveEndpointInModule annotates a MoveEndpoint with the address of the +// module where it was declared, which is the form we use for resolving +// whether move statements chain from or are nested within other move +// statements. +type MoveEndpointInModule struct { + // SourceRange is the location of the physical endpoint address + // in configuration, if this MoveEndpoint was decoded from a + // configuration expresson. + SourceRange tfdiags.SourceRange + + // The internals are unexported here because, as with MoveEndpoint, + // we're somewhat abusing AbsMoveable here to represent an address + // relative to the module, rather than as an absolute address. + // Conceptually, the following two fields represent a matching pattern + // for AbsMoveables where the elements of "module" behave as + // ModuleInstanceStep values with a wildcard instance key, because + // a moved block in a module affects all instances of that module. + // Unlike MoveEndpoint, relSubject in this case can be any of the + // address types that implement AbsMoveable. + module Module + relSubject AbsMoveable +} + +// ImpliedMoveStatementEndpoint is a special constructor for MoveEndpointInModule +// which is suitable only for constructing "implied" move statements, which +// means that we inferred the statement automatically rather than building it +// from an explicit block in the configuration. +// +// Implied move endpoints, just as for the statements they are embedded in, +// have somewhat-related-but-imprecise source ranges, typically referring to +// some general configuration construct that implied the statement, because +// by definition there is no explicit move endpoint expression in this case. +func ImpliedMoveStatementEndpoint(addr AbsResourceInstance, rng tfdiags.SourceRange) *MoveEndpointInModule { + // implied move endpoints always belong to the root module, because each + // one refers to a single resource instance inside a specific module + // instance, rather than all instances of the module where the resource + // was declared. + return &MoveEndpointInModule{ + SourceRange: rng, + module: RootModule, + relSubject: addr, + } +} + +func (e *MoveEndpointInModule) ObjectKind() MoveEndpointKind { + return absMoveableEndpointKind(e.relSubject) +} + +// String produces a string representation of the object matching pattern +// represented by the reciever. +// +// Since there is no direct syntax for representing such an object matching +// pattern, this function uses a splat-operator-like representation to stand +// in for the wildcard instance keys. +func (e *MoveEndpointInModule) String() string { + if e == nil { + return "" + } + var buf strings.Builder + for _, name := range e.module { + buf.WriteString("module.") + buf.WriteString(name) + buf.WriteString("[*].") + } + buf.WriteString(e.relSubject.String()) + + // For consistency we'll also use the splat-like wildcard syntax to + // represent the final step being either a resource or module call + // rather than an instance, so we can more easily distinguish the two + // in the string representation. + switch e.relSubject.(type) { + case AbsModuleCall, AbsResource: + buf.WriteString("[*]") + } + + return buf.String() +} + +// Equal returns true if the reciever represents the same matching pattern +// as the other given endpoint, ignoring the source location information. +// +// This is not an optimized function and is here primarily to help with +// writing concise assertions in test code. +func (e *MoveEndpointInModule) Equal(other *MoveEndpointInModule) bool { + if (e == nil) != (other == nil) { + return false + } + if !e.module.Equal(other.module) { + return false + } + // This assumes that all of our possible "movables" are trivially + // comparable with reflect, which is true for all of them at the time + // of writing. + return reflect.DeepEqual(e.relSubject, other.relSubject) +} + +// Module returns the address of the module where the receiving address was +// declared. +func (e *MoveEndpointInModule) Module() Module { + return e.module +} + +// InModuleInstance returns an AbsMoveable address which concatenates the +// given module instance address with the receiver's relative object selection +// to produce one example of an instance that might be affected by this +// move statement. +// +// The result is meaningful only if the given module instance is an instance +// of the same module returned by the method Module. InModuleInstance doesn't +// fully verify that (aside from some cheap/easy checks), but it will produce +// meaningless garbage if not. +func (e *MoveEndpointInModule) InModuleInstance(modInst ModuleInstance) AbsMoveable { + if len(modInst) != len(e.module) { + // We don't check all of the steps to make sure that their names match, + // because it would be expensive to do that repeatedly for every + // instance of a module, but if the lengths don't match then that's + // _obviously_ wrong. + panic("given instance address does not match module address") + } + switch relSubject := e.relSubject.(type) { + case ModuleInstance: + ret := make(ModuleInstance, 0, len(modInst)+len(relSubject)) + ret = append(ret, modInst...) + ret = append(ret, relSubject...) + return ret + case AbsModuleCall: + retModAddr := make(ModuleInstance, 0, len(modInst)+len(relSubject.Module)) + retModAddr = append(retModAddr, modInst...) + retModAddr = append(retModAddr, relSubject.Module...) + return relSubject.Call.Absolute(retModAddr) + case AbsResourceInstance: + retModAddr := make(ModuleInstance, 0, len(modInst)+len(relSubject.Module)) + retModAddr = append(retModAddr, modInst...) + retModAddr = append(retModAddr, relSubject.Module...) + return relSubject.Resource.Absolute(retModAddr) + case AbsResource: + retModAddr := make(ModuleInstance, 0, len(modInst)+len(relSubject.Module)) + retModAddr = append(retModAddr, modInst...) + retModAddr = append(retModAddr, relSubject.Module...) + return relSubject.Resource.Absolute(retModAddr) + default: + panic(fmt.Sprintf("unexpected move subject type %T", relSubject)) + } +} + +// ModuleCallTraversals returns both the address of the module where the +// receiver was declared and any other module calls it traverses through +// while selecting a particular object to move. +// +// This is a rather special-purpose function here mainly to support our +// validation rule that a module can only traverse down into child modules. +func (e *MoveEndpointInModule) ModuleCallTraversals() (Module, []ModuleCall) { + // We're returning []ModuleCall rather than Module here to make it clearer + // that this is a relative sequence of calls rather than an absolute + // module path. + + var steps []ModuleInstanceStep + switch relSubject := e.relSubject.(type) { + case ModuleInstance: + // We want all of the steps except the last one here, because the + // last one is always selecting something declared in the same module + // even though our address structure doesn't capture that. + steps = []ModuleInstanceStep(relSubject[:len(relSubject)-1]) + case AbsModuleCall: + steps = []ModuleInstanceStep(relSubject.Module) + case AbsResourceInstance: + steps = []ModuleInstanceStep(relSubject.Module) + case AbsResource: + steps = []ModuleInstanceStep(relSubject.Module) + default: + panic(fmt.Sprintf("unexpected move subject type %T", relSubject)) + } + + ret := make([]ModuleCall, len(steps)) + for i, step := range steps { + ret[i] = ModuleCall{Name: step.Name} + } + return e.module, ret +} + +// synthModuleInstance constructs a module instance out of the module path and +// any module portion of the relSubject, substituting Module and Call segments +// with ModuleInstanceStep using the anyKey value. +// This is only used internally for comparison of these complete paths, but +// does not represent how the individual parts are handled elsewhere in the +// code. +func (e *MoveEndpointInModule) synthModuleInstance() ModuleInstance { + var inst ModuleInstance + + for _, mod := range e.module { + inst = append(inst, ModuleInstanceStep{Name: mod, InstanceKey: anyKey}) + } + + switch sub := e.relSubject.(type) { + case ModuleInstance: + inst = append(inst, sub...) + case AbsModuleCall: + inst = append(inst, sub.Module...) + inst = append(inst, ModuleInstanceStep{Name: sub.Call.Name, InstanceKey: anyKey}) + case AbsResource: + inst = append(inst, sub.Module...) + case AbsResourceInstance: + inst = append(inst, sub.Module...) + default: + panic(fmt.Sprintf("unhandled relative address type %T", sub)) + } + + return inst +} + +// SelectsModule returns true if the reciever directly selects either +// the given module or a resource nested directly inside that module. +// +// This is a good function to use to decide which modules in a state +// to consider when processing a particular move statement. For a +// module move the given module itself is what will move, while a +// resource move indicates that we should search each of the resources in +// the given module to see if they match. +func (e *MoveEndpointInModule) SelectsModule(addr ModuleInstance) bool { + synthInst := e.synthModuleInstance() + + // In order to match the given module instance, our combined path must be + // equal in length. + if len(synthInst) != len(addr) { + return false + } + + for i, step := range synthInst { + switch step.InstanceKey { + case anyKey: + // we can match any key as long as the name matches + if step.Name != addr[i].Name { + return false + } + default: + if step != addr[i] { + return false + } + } + } + return true +} + +// SelectsResource returns true if the receiver directly selects either +// the given resource or one of its instances. +func (e *MoveEndpointInModule) SelectsResource(addr AbsResource) bool { + // Only a subset of subject types can possibly select a resource, so + // we'll take care of those quickly before we do anything more expensive. + switch e.relSubject.(type) { + case AbsResource, AbsResourceInstance: + // okay + default: + return false // can't possibly match + } + + if !e.SelectsModule(addr.Module) { + return false + } + + // If we get here then we know the module part matches, so we only need + // to worry about the relative resource part. + switch relSubject := e.relSubject.(type) { + case AbsResource: + return addr.Resource.Equal(relSubject.Resource) + case AbsResourceInstance: + // We intentionally ignore the instance key, because we consider + // instances to be part of the resource they belong to. + return addr.Resource.Equal(relSubject.Resource.Resource) + default: + // We should've filtered out all other types above + panic(fmt.Sprintf("unsupported relSubject type %T", relSubject)) + } +} + +// moduleInstanceCanMatch indicates that modA can match modB taking into +// account steps with an anyKey InstanceKey as wildcards. The comparison of +// wildcard steps is done symmetrically, because varying portions of either +// instance's path could have been derived from configuration vs evaluation. +// The length of modA must be equal or shorter than the length of modB. +func moduleInstanceCanMatch(modA, modB ModuleInstance) bool { + for i, step := range modA { + switch { + case step.InstanceKey == anyKey || modB[i].InstanceKey == anyKey: + // we can match any key as long as the names match + if step.Name != modB[i].Name { + return false + } + default: + if step != modB[i] { + return false + } + } + } + return true +} + +// CanChainFrom returns true if the reciever describes an address that could +// potentially select an object that the other given address could select. +// +// In other words, this decides whether the move chaining rule applies, if +// the reciever is the "to" from one statement and the other given address +// is the "from" of another statement. +func (e *MoveEndpointInModule) CanChainFrom(other *MoveEndpointInModule) bool { + eMod := e.synthModuleInstance() + oMod := other.synthModuleInstance() + + // if the complete paths are different lengths, these cannot refer to the + // same value. + if len(eMod) != len(oMod) { + return false + } + if !moduleInstanceCanMatch(oMod, eMod) { + return false + } + + eSub := e.relSubject + oSub := other.relSubject + + switch oSub := oSub.(type) { + case AbsModuleCall, ModuleInstance: + switch eSub.(type) { + case AbsModuleCall, ModuleInstance: + // we already know the complete module path including any final + // module call name is equal. + return true + } + + case AbsResource: + switch eSub := eSub.(type) { + case AbsResource: + return eSub.Resource.Equal(oSub.Resource) + } + + case AbsResourceInstance: + switch eSub := eSub.(type) { + case AbsResourceInstance: + return eSub.Resource.Equal(oSub.Resource) + } + } + + return false +} + +// NestedWithin returns true if the receiver describes an address that is +// contained within one of the objects that the given other address could +// select. +func (e *MoveEndpointInModule) NestedWithin(other *MoveEndpointInModule) bool { + eMod := e.synthModuleInstance() + oMod := other.synthModuleInstance() + + // In order to be nested within the given endpoint, the module path must be + // shorter or equal. + if len(oMod) > len(eMod) { + return false + } + + if !moduleInstanceCanMatch(oMod, eMod) { + return false + } + + eSub := e.relSubject + oSub := other.relSubject + + switch oSub := oSub.(type) { + case AbsModuleCall: + switch eSub.(type) { + case AbsModuleCall: + // we know the other endpoint selects our module, but if we are + // also a module call our path must be longer to be nested. + return len(eMod) > len(oMod) + } + + return true + + case ModuleInstance: + switch eSub.(type) { + case ModuleInstance, AbsModuleCall: + // a nested module must have a longer path + return len(eMod) > len(oMod) + } + + return true + + case AbsResource: + if len(eMod) != len(oMod) { + // these resources are from different modules + return false + } + + // A resource can only contain a resource instance. + switch eSub := eSub.(type) { + case AbsResourceInstance: + return eSub.Resource.Resource.Equal(oSub.Resource) + } + } + + return false +} + +// matchModuleInstancePrefix is an internal helper to decide whether the given +// module instance address refers to either the module where the move endpoint +// was declared or some descendent of that module. +// +// If so, it will split the given address into two parts: the "prefix" part +// which corresponds with the module where the statement was declared, and +// the "relative" part which is the remainder that the relSubject of the +// statement might match against. +// +// The second return value is another example of our light abuse of +// ModuleInstance to represent _relative_ module references rather than +// absolute: it's a module instance address relative to the same return value. +// Because the exported idea of ModuleInstance represents only _absolute_ +// module instance addresses, we mustn't expose that value through any exported +// API. +func (e *MoveEndpointInModule) matchModuleInstancePrefix(instAddr ModuleInstance) (ModuleInstance, ModuleInstance, bool) { + if len(e.module) > len(instAddr) { + return nil, nil, false // to short to possibly match + } + for i := range e.module { + if e.module[i] != instAddr[i].Name { + return nil, nil, false + } + } + // If we get here then we have a match, so we'll slice up the input + // to produce the prefix and match segments. + return instAddr[:len(e.module)], instAddr[len(e.module):], true +} + +// MoveDestination considers a an address representing a module +// instance in the context of source and destination move endpoints and then, +// if the module address matches the from endpoint, returns the corresponding +// new module address that the object should move to. +// +// MoveDestination will return false in its second return value if the receiver +// doesn't match fromMatch, indicating that the given move statement doesn't +// apply to this object. +// +// Both of the given endpoints must be from the same move statement and thus +// must have matching object types. If not, MoveDestination will panic. +func (m ModuleInstance) MoveDestination(fromMatch, toMatch *MoveEndpointInModule) (ModuleInstance, bool) { + // NOTE: This implementation assumes the invariant that fromMatch and + // toMatch both belong to the same configuration statement, and thus they + // will both have the same address type and the same declaration module. + + // The root module instance is not itself moveable. + if m.IsRoot() { + return nil, false + } + + // The two endpoints must either be module call or module instance + // addresses, or else this statement can never match. + if fromMatch.ObjectKind() != MoveEndpointModule { + return nil, false + } + + // The rest of our work will be against the part of the reciever that's + // relative to the declaration module. mRel is a weird abuse of + // ModuleInstance that represents a relative module address, similar to + // what we do for MoveEndpointInModule.relSubject. + mPrefix, mRel, match := fromMatch.matchModuleInstancePrefix(m) + if !match { + return nil, false + } + + // Our next goal is to split mRel into two parts: the match (if any) and + // the suffix. Our result will then replace the match with the replacement + // in toMatch while preserving the prefix and suffix. + var mSuffix, mNewMatch ModuleInstance + + switch relSubject := fromMatch.relSubject.(type) { + case ModuleInstance: + if len(relSubject) > len(mRel) { + return nil, false // too short to possibly match + } + for i := range relSubject { + if relSubject[i] != mRel[i] { + return nil, false // this step doesn't match + } + } + // If we get to here then we've found a match. Since the statement + // addresses are already themselves ModuleInstance fragments we can + // just slice out the relevant parts. + mNewMatch = toMatch.relSubject.(ModuleInstance) + mSuffix = mRel[len(relSubject):] + case AbsModuleCall: + // The module instance part of relSubject must be a prefix of + // mRel, and mRel must be at least one step longer to account for + // the call step itself. + if len(relSubject.Module) > len(mRel)-1 { + return nil, false + } + for i := range relSubject.Module { + if relSubject.Module[i] != mRel[i] { + return nil, false // this step doesn't match + } + } + // The call name must also match the next step of mRel, after + // the relSubject.Module prefix. + callStep := mRel[len(relSubject.Module)] + if callStep.Name != relSubject.Call.Name { + return nil, false + } + // If we get to here then we've found a match. We need to construct + // a new mNewMatch that's an instance of the "new" relSubject with + // the same key as our call. + mNewMatch = toMatch.relSubject.(AbsModuleCall).Instance(callStep.InstanceKey) + mSuffix = mRel[len(relSubject.Module)+1:] + default: + panic("invalid address type for module-kind move endpoint") + } + + ret := make(ModuleInstance, 0, len(mPrefix)+len(mNewMatch)+len(mSuffix)) + ret = append(ret, mPrefix...) + ret = append(ret, mNewMatch...) + ret = append(ret, mSuffix...) + return ret, true +} + +// MoveDestination considers a an address representing a resource +// in the context of source and destination move endpoints and then, +// if the resource address matches the from endpoint, returns the corresponding +// new resource address that the object should move to. +// +// MoveDestination will return false in its second return value if the receiver +// doesn't match fromMatch, indicating that the given move statement doesn't +// apply to this object. +// +// Both of the given endpoints must be from the same move statement and thus +// must have matching object types. If not, MoveDestination will panic. +func (r AbsResource) MoveDestination(fromMatch, toMatch *MoveEndpointInModule) (AbsResource, bool) { + switch fromMatch.ObjectKind() { + case MoveEndpointModule: + // If we've moving a module then any resource inside that module + // moves too. + fromMod := r.Module + toMod, match := fromMod.MoveDestination(fromMatch, toMatch) + if !match { + return AbsResource{}, false + } + return r.Resource.Absolute(toMod), true + + case MoveEndpointResource: + fromRelSubject, ok := fromMatch.relSubject.(AbsResource) + if !ok { + // The only other possible type for a resource move is + // AbsResourceInstance, and that can never match an AbsResource. + return AbsResource{}, false + } + + // fromMatch can only possibly match the reciever if the resource + // portions are identical, regardless of the module paths. + if fromRelSubject.Resource != r.Resource { + return AbsResource{}, false + } + + // The module path portion of relSubject must have a prefix that + // matches the module where our endpoints were declared. + mPrefix, mRel, match := fromMatch.matchModuleInstancePrefix(r.Module) + if !match { + return AbsResource{}, false + } + + // The remaining steps of the module path must _exactly_ match + // the relative module path in the "fromMatch" address. + if len(mRel) != len(fromRelSubject.Module) { + return AbsResource{}, false // can't match if lengths are different + } + for i := range mRel { + if mRel[i] != fromRelSubject.Module[i] { + return AbsResource{}, false // all of the steps must match + } + } + + // If we got here then we have a match, and so our result is the + // module instance where the statement was declared (mPrefix) followed + // by the "to" relative address in toMatch. + toRelSubject := toMatch.relSubject.(AbsResource) + var mNew ModuleInstance + if len(mPrefix) > 0 || len(toRelSubject.Module) > 0 { + mNew = make(ModuleInstance, 0, len(mPrefix)+len(toRelSubject.Module)) + mNew = append(mNew, mPrefix...) + mNew = append(mNew, toRelSubject.Module...) + } + ret := toRelSubject.Resource.Absolute(mNew) + return ret, true + + default: + panic("unexpected object kind") + } +} + +// MoveDestination considers a an address representing a resource +// instance in the context of source and destination move endpoints and then, +// if the instance address matches the from endpoint, returns the corresponding +// new instance address that the object should move to. +// +// MoveDestination will return false in its second return value if the receiver +// doesn't match fromMatch, indicating that the given move statement doesn't +// apply to this object. +// +// Both of the given endpoints must be from the same move statement and thus +// must have matching object types. If not, MoveDestination will panic. +func (r AbsResourceInstance) MoveDestination(fromMatch, toMatch *MoveEndpointInModule) (AbsResourceInstance, bool) { + switch fromMatch.ObjectKind() { + case MoveEndpointModule: + // If we've moving a module then any resource inside that module + // moves too. + fromMod := r.Module + toMod, match := fromMod.MoveDestination(fromMatch, toMatch) + if !match { + return AbsResourceInstance{}, false + } + return r.Resource.Absolute(toMod), true + + case MoveEndpointResource: + switch fromMatch.relSubject.(type) { + case AbsResource: + oldResource := r.ContainingResource() + newResource, match := oldResource.MoveDestination(fromMatch, toMatch) + if !match { + return AbsResourceInstance{}, false + } + return newResource.Instance(r.Resource.Key), true + case AbsResourceInstance: + fromRelSubject, ok := fromMatch.relSubject.(AbsResourceInstance) + if !ok { + // The only other possible type for a resource move is + // AbsResourceInstance, and that can never match an AbsResource. + return AbsResourceInstance{}, false + } + + // fromMatch can only possibly match the reciever if the resource + // portions are identical, regardless of the module paths. + if fromRelSubject.Resource != r.Resource { + return AbsResourceInstance{}, false + } + + // The module path portion of relSubject must have a prefix that + // matches the module where our endpoints were declared. + mPrefix, mRel, match := fromMatch.matchModuleInstancePrefix(r.Module) + if !match { + return AbsResourceInstance{}, false + } + + // The remaining steps of the module path must _exactly_ match + // the relative module path in the "fromMatch" address. + if len(mRel) != len(fromRelSubject.Module) { + return AbsResourceInstance{}, false // can't match if lengths are different + } + for i := range mRel { + if mRel[i] != fromRelSubject.Module[i] { + return AbsResourceInstance{}, false // all of the steps must match + } + } + + // If we got here then we have a match, and so our result is the + // module instance where the statement was declared (mPrefix) followed + // by the "to" relative address in toMatch. + toRelSubject := toMatch.relSubject.(AbsResourceInstance) + var mNew ModuleInstance + if len(mPrefix) > 0 || len(toRelSubject.Module) > 0 { + mNew = make(ModuleInstance, 0, len(mPrefix)+len(toRelSubject.Module)) + mNew = append(mNew, mPrefix...) + mNew = append(mNew, toRelSubject.Module...) + } + ret := toRelSubject.Resource.Absolute(mNew) + return ret, true + default: + panic("invalid address type for resource-kind move endpoint") + } + default: + panic("unexpected object kind") + } +} + +// IsModuleReIndex takes the From and To endpoints from a single move +// statement, and returns true if the only changes are to module indexes, and +// all non-absolute paths remain the same. +func (from *MoveEndpointInModule) IsModuleReIndex(to *MoveEndpointInModule) bool { + // The statements must originate from the same module. + if !from.module.Equal(to.module) { + panic("cannot compare move expressions from different modules") + } + + switch f := from.relSubject.(type) { + case AbsModuleCall: + switch t := to.relSubject.(type) { + case ModuleInstance: + // Generate a synthetic module to represent the full address of + // the module call. We're not actually comparing indexes, so the + // instance doesn't matter. + callAddr := f.Instance(NoKey).Module() + return callAddr.Equal(t.Module()) + } + + case ModuleInstance: + switch t := to.relSubject.(type) { + case AbsModuleCall: + callAddr := t.Instance(NoKey).Module() + return callAddr.Equal(f.Module()) + + case ModuleInstance: + return t.Module().Equal(f.Module()) + } + } + + return false +} diff --git a/pkg/addrs/move_endpoint_module_test.go b/pkg/addrs/move_endpoint_module_test.go new file mode 100644 index 00000000000..31d9d18c180 --- /dev/null +++ b/pkg/addrs/move_endpoint_module_test.go @@ -0,0 +1,1750 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +func TestModuleInstanceMoveDestination(t *testing.T) { + tests := []struct { + DeclModule string + StmtFrom, StmtTo string + Receiver string + WantMatch bool + WantResult string + }{ + { + ``, + `module.foo`, + `module.bar`, + `module.foo`, + true, + `module.bar`, + }, + { + ``, + `module.foo`, + `module.bar`, + `module.foo[1]`, + true, + `module.bar[1]`, + }, + { + ``, + `module.foo`, + `module.bar`, + `module.foo["a"]`, + true, + `module.bar["a"]`, + }, + { + ``, + `module.foo`, + `module.bar.module.foo`, + `module.foo`, + true, + `module.bar.module.foo`, + }, + { + ``, + `module.foo.module.bar`, + `module.bar`, + `module.foo.module.bar`, + true, + `module.bar`, + }, + { + ``, + `module.foo[1]`, + `module.foo[2]`, + `module.foo[1]`, + true, + `module.foo[2]`, + }, + { + ``, + `module.foo[1]`, + `module.foo`, + `module.foo[1]`, + true, + `module.foo`, + }, + { + ``, + `module.foo`, + `module.foo[1]`, + `module.foo`, + true, + `module.foo[1]`, + }, + { + ``, + `module.foo`, + `module.foo[1]`, + `module.foo.module.bar`, + true, + `module.foo[1].module.bar`, + }, + { + ``, + `module.foo`, + `module.foo[1]`, + `module.foo.module.bar[0]`, + true, + `module.foo[1].module.bar[0]`, + }, + { + ``, + `module.foo`, + `module.bar.module.foo`, + `module.foo[0]`, + true, + `module.bar.module.foo[0]`, + }, + { + ``, + `module.foo.module.bar`, + `module.bar`, + `module.foo.module.bar[0]`, + true, + `module.bar[0]`, + }, + { + `foo`, + `module.bar`, + `module.baz`, + `module.foo.module.bar`, + true, + `module.foo.module.baz`, + }, + { + `foo`, + `module.bar`, + `module.baz`, + `module.foo[1].module.bar`, + true, + `module.foo[1].module.baz`, + }, + { + `foo`, + `module.bar`, + `module.bar[1]`, + `module.foo[1].module.bar`, + true, + `module.foo[1].module.bar[1]`, + }, + { + ``, + `module.foo[1]`, + `module.foo[2]`, + `module.foo`, + false, // the receiver has a non-matching instance key (NoKey) + ``, + }, + { + ``, + `module.foo[1]`, + `module.foo[2]`, + `module.foo[2]`, + false, // the receiver is already the "to" address + ``, + }, + { + ``, + `module.foo`, + `module.bar`, + ``, + false, // the root module can never be moved + ``, + }, + { + `foo`, + `module.bar`, + `module.bar[1]`, + `module.boz`, + false, // the receiver is outside the declaration module + ``, + }, + { + `foo.bar`, + `module.bar`, + `module.bar[1]`, + `module.boz`, + false, // the receiver is outside the declaration module + ``, + }, + { + `foo.bar`, + `module.a`, + `module.b`, + `module.boz`, + false, // the receiver is outside the declaration module + ``, + }, + { + ``, + `module.a1.module.a2`, + `module.b1.module.b2`, + `module.c`, + false, // the receiver is outside the declaration module + ``, + }, + { + ``, + `module.a1.module.a2[0]`, + `module.b1.module.b2[1]`, + `module.c`, + false, // the receiver is outside the declaration module + ``, + }, + { + ``, + `module.a1.module.a2`, + `module.b1.module.b2`, + `module.a1.module.b2`, + false, // the receiver is outside the declaration module + ``, + }, + { + ``, + `module.a1.module.a2`, + `module.b1.module.b2`, + `module.b1.module.a2`, + false, // the receiver is outside the declaration module + ``, + }, + { + ``, + `module.a1.module.a2[0]`, + `module.b1.module.b2[1]`, + `module.a1.module.b2[0]`, + false, // the receiver is outside the declaration module + ``, + }, + { + ``, + `foo_instance.bar`, + `foo_instance.baz`, + `module.foo`, + false, // a resource address can never match a module instance + ``, + }, + } + + for _, test := range tests { + t.Run( + fmt.Sprintf( + "%s: %s to %s with %s", + test.DeclModule, + test.StmtFrom, test.StmtTo, + test.Receiver, + ), + func(t *testing.T) { + + parseStmtEP := func(t *testing.T, input string) *MoveEndpoint { + t.Helper() + + traversal, hclDiags := hclsyntax.ParseTraversalAbs([]byte(input), "", hcl.InitialPos) + if hclDiags.HasErrors() { + // We're not trying to test the HCL parser here, so any + // failures at this point are likely to be bugs in the + // test case itself. + t.Fatalf("syntax error: %s", hclDiags.Error()) + } + + moveEp, diags := ParseMoveEndpoint(traversal) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err().Error()) + } + return moveEp + } + + fromEPLocal := parseStmtEP(t, test.StmtFrom) + toEPLocal := parseStmtEP(t, test.StmtTo) + + declModule := RootModule + if test.DeclModule != "" { + declModule = strings.Split(test.DeclModule, ".") + } + fromEP, toEP := UnifyMoveEndpoints(declModule, fromEPLocal, toEPLocal) + if fromEP == nil || toEP == nil { + t.Fatalf("invalid test case: non-unifyable endpoints\nfrom: %s\nto: %s", fromEPLocal, toEPLocal) + } + + receiverAddr := RootModuleInstance + if test.Receiver != "" { + var diags tfdiags.Diagnostics + receiverAddr, diags = ParseModuleInstanceStr(test.Receiver) + if diags.HasErrors() { + t.Fatalf("invalid reciever address: %s", diags.Err().Error()) + } + } + gotAddr, gotMatch := receiverAddr.MoveDestination(fromEP, toEP) + if !test.WantMatch { + if gotMatch { + t.Errorf("unexpected match\nreceiver: %s\nfrom: %s\nto: %s\nresult: %s", test.Receiver, fromEP, toEP, gotAddr) + } + return + } + + if !gotMatch { + t.Errorf("unexpected non-match\nreceiver: %s\nfrom: %s\nto: %s", test.Receiver, fromEP, toEP) + } + + if gotStr, wantStr := gotAddr.String(), test.WantResult; gotStr != wantStr { + t.Errorf("wrong result\ngot: %s\nwant: %s", gotStr, wantStr) + } + }, + ) + } +} + +func TestAbsResourceInstanceMoveDestination(t *testing.T) { + tests := []struct { + DeclModule string + StmtFrom, StmtTo string + Receiver string + WantMatch bool + WantResult string + }{ + { + ``, + `test_object.beep`, + `test_object.boop`, + `test_object.beep`, + true, + `test_object.boop`, + }, + { + ``, + `test_object.beep`, + `test_object.beep[2]`, + `test_object.beep`, + true, + `test_object.beep[2]`, + }, + { + ``, + `test_object.beep`, + `module.foo.test_object.beep`, + `test_object.beep`, + true, + `module.foo.test_object.beep`, + }, + { + ``, + `test_object.beep[2]`, + `module.foo.test_object.beep["a"]`, + `test_object.beep[2]`, + true, + `module.foo.test_object.beep["a"]`, + }, + { + ``, + `test_object.beep`, + `module.foo[0].test_object.beep`, + `test_object.beep`, + true, + `module.foo[0].test_object.beep`, + }, + { + ``, + `module.foo.test_object.beep`, + `test_object.beep`, + `module.foo.test_object.beep`, + true, + `test_object.beep`, + }, + { + ``, + `module.foo[0].test_object.beep`, + `test_object.beep`, + `module.foo[0].test_object.beep`, + true, + `test_object.beep`, + }, + { + `foo`, + `test_object.beep`, + `test_object.boop`, + `module.foo[0].test_object.beep`, + true, + `module.foo[0].test_object.boop`, + }, + { + `foo`, + `test_object.beep`, + `test_object.beep[1]`, + `module.foo[0].test_object.beep`, + true, + `module.foo[0].test_object.beep[1]`, + }, + { + ``, + `test_object.beep`, + `test_object.boop`, + `test_object.boop`, + false, // the reciever is already the "to" address + ``, + }, + { + ``, + `test_object.beep[1]`, + `test_object.beep[2]`, + `test_object.beep[5]`, + false, // the receiver has a non-matching instance key + ``, + }, + { + `foo`, + `test_object.beep`, + `test_object.boop`, + `test_object.beep`, + false, // the receiver is not inside an instance of module "foo" + ``, + }, + { + `foo.bar`, + `test_object.beep`, + `test_object.boop`, + `test_object.beep`, + false, // the receiver is not inside an instance of module "foo.bar" + ``, + }, + { + ``, + `module.foo[0].test_object.beep`, + `test_object.beep`, + `module.foo[1].test_object.beep`, + false, // receiver is in a different instance of module.foo + ``, + }, + + // Moving a module also moves all of the resources declared within it. + // The following tests all cover variations of that rule. + { + ``, + `module.foo`, + `module.bar`, + `module.foo.test_object.beep`, + true, + `module.bar.test_object.beep`, + }, + { + ``, + `module.foo`, + `module.bar`, + `module.foo[1].test_object.beep`, + true, + `module.bar[1].test_object.beep`, + }, + { + ``, + `module.foo`, + `module.bar`, + `module.foo["a"].test_object.beep`, + true, + `module.bar["a"].test_object.beep`, + }, + { + ``, + `module.foo`, + `module.bar.module.foo`, + `module.foo.test_object.beep`, + true, + `module.bar.module.foo.test_object.beep`, + }, + { + ``, + `module.foo.module.bar`, + `module.bar`, + `module.foo.module.bar.test_object.beep`, + true, + `module.bar.test_object.beep`, + }, + { + ``, + `module.foo[1]`, + `module.foo[2]`, + `module.foo[1].test_object.beep`, + true, + `module.foo[2].test_object.beep`, + }, + { + ``, + `module.foo[1]`, + `module.foo`, + `module.foo[1].test_object.beep`, + true, + `module.foo.test_object.beep`, + }, + { + ``, + `module.foo`, + `module.foo[1]`, + `module.foo.test_object.beep`, + true, + `module.foo[1].test_object.beep`, + }, + { + ``, + `module.foo`, + `module.foo[1]`, + `module.foo.module.bar.test_object.beep`, + true, + `module.foo[1].module.bar.test_object.beep`, + }, + { + ``, + `module.foo`, + `module.foo[1]`, + `module.foo.module.bar[0].test_object.beep`, + true, + `module.foo[1].module.bar[0].test_object.beep`, + }, + { + ``, + `module.foo`, + `module.bar.module.foo`, + `module.foo[0].test_object.beep`, + true, + `module.bar.module.foo[0].test_object.beep`, + }, + { + ``, + `module.foo.module.bar`, + `module.bar`, + `module.foo.module.bar[0].test_object.beep`, + true, + `module.bar[0].test_object.beep`, + }, + { + `foo`, + `module.bar`, + `module.baz`, + `module.foo.module.bar.test_object.beep`, + true, + `module.foo.module.baz.test_object.beep`, + }, + { + `foo`, + `module.bar`, + `module.baz`, + `module.foo[1].module.bar.test_object.beep`, + true, + `module.foo[1].module.baz.test_object.beep`, + }, + { + `foo`, + `module.bar`, + `module.bar[1]`, + `module.foo[1].module.bar.test_object.beep`, + true, + `module.foo[1].module.bar[1].test_object.beep`, + }, + { + ``, + `module.foo[1]`, + `module.foo[2]`, + `module.foo.test_object.beep`, + false, // the receiver module has a non-matching instance key (NoKey) + ``, + }, + { + ``, + `module.foo[1]`, + `module.foo[2]`, + `module.foo[2].test_object.beep`, + false, // the receiver is already at the "to" address + ``, + }, + { + `foo`, + `module.bar`, + `module.bar[1]`, + `module.boz.test_object.beep`, + false, // the receiver module is outside the declaration module + ``, + }, + { + `foo.bar`, + `module.bar`, + `module.bar[1]`, + `module.boz.test_object.beep`, + false, // the receiver module is outside the declaration module + ``, + }, + { + `foo.bar`, + `module.a`, + `module.b`, + `module.boz.test_object.beep`, + false, // the receiver module is outside the declaration module + ``, + }, + { + ``, + `module.a1.module.a2`, + `module.b1.module.b2`, + `module.c.test_object.beep`, + false, // the receiver module is outside the declaration module + ``, + }, + { + ``, + `module.a1.module.a2[0]`, + `module.b1.module.b2[1]`, + `module.c.test_object.beep`, + false, // the receiver module is outside the declaration module + ``, + }, + { + ``, + `module.a1.module.a2`, + `module.b1.module.b2`, + `module.a1.module.b2.test_object.beep`, + false, // the receiver module is outside the declaration module + ``, + }, + { + ``, + `module.a1.module.a2`, + `module.b1.module.b2`, + `module.b1.module.a2.test_object.beep`, + false, // the receiver module is outside the declaration module + ``, + }, + { + ``, + `module.a1.module.a2[0]`, + `module.b1.module.b2[1]`, + `module.a1.module.b2[0].test_object.beep`, + false, // the receiver module is outside the declaration module + ``, + }, + { + ``, + `foo_instance.bar`, + `foo_instance.baz`, + `module.foo.test_object.beep`, + false, // the resource address is unrelated to the move statements + ``, + }, + } + + for _, test := range tests { + t.Run( + fmt.Sprintf( + "%s: %s to %s with %s", + test.DeclModule, + test.StmtFrom, test.StmtTo, + test.Receiver, + ), + func(t *testing.T) { + + parseStmtEP := func(t *testing.T, input string) *MoveEndpoint { + t.Helper() + + traversal, hclDiags := hclsyntax.ParseTraversalAbs([]byte(input), "", hcl.InitialPos) + if hclDiags.HasErrors() { + // We're not trying to test the HCL parser here, so any + // failures at this point are likely to be bugs in the + // test case itself. + t.Fatalf("syntax error: %s", hclDiags.Error()) + } + + moveEp, diags := ParseMoveEndpoint(traversal) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err().Error()) + } + return moveEp + } + + fromEPLocal := parseStmtEP(t, test.StmtFrom) + toEPLocal := parseStmtEP(t, test.StmtTo) + + declModule := RootModule + if test.DeclModule != "" { + declModule = strings.Split(test.DeclModule, ".") + } + fromEP, toEP := UnifyMoveEndpoints(declModule, fromEPLocal, toEPLocal) + if fromEP == nil || toEP == nil { + t.Fatalf("invalid test case: non-unifyable endpoints\nfrom: %s\nto: %s", fromEPLocal, toEPLocal) + } + + receiverAddr, diags := ParseAbsResourceInstanceStr(test.Receiver) + if diags.HasErrors() { + t.Fatalf("invalid reciever address: %s", diags.Err().Error()) + } + gotAddr, gotMatch := receiverAddr.MoveDestination(fromEP, toEP) + if !test.WantMatch { + if gotMatch { + t.Errorf("unexpected match\nreceiver: %s\nfrom: %s\nto: %s\nresult: %s", test.Receiver, fromEP, toEP, gotAddr) + } + return + } + + if !gotMatch { + t.Fatalf("unexpected non-match\nreceiver: %s (%T)\nfrom: %s\nto: %s\ngot: (no match)\nwant: %s", test.Receiver, receiverAddr, fromEP, toEP, test.WantResult) + } + + if gotStr, wantStr := gotAddr.String(), test.WantResult; gotStr != wantStr { + t.Errorf("wrong result\ngot: %s\nwant: %s", gotStr, wantStr) + } + }, + ) + } +} + +func TestAbsResourceMoveDestination(t *testing.T) { + tests := []struct { + DeclModule string + StmtFrom, StmtTo string + Receiver string + WantMatch bool + WantResult string + }{ + { + ``, + `test_object.beep`, + `test_object.boop`, + `test_object.beep`, + true, + `test_object.boop`, + }, + { + ``, + `test_object.beep`, + `module.foo.test_object.beep`, + `test_object.beep`, + true, + `module.foo.test_object.beep`, + }, + { + ``, + `test_object.beep`, + `module.foo[0].test_object.beep`, + `test_object.beep`, + true, + `module.foo[0].test_object.beep`, + }, + { + ``, + `module.foo.test_object.beep`, + `test_object.beep`, + `module.foo.test_object.beep`, + true, + `test_object.beep`, + }, + { + ``, + `module.foo[0].test_object.beep`, + `test_object.beep`, + `module.foo[0].test_object.beep`, + true, + `test_object.beep`, + }, + { + `foo`, + `test_object.beep`, + `test_object.boop`, + `module.foo[0].test_object.beep`, + true, + `module.foo[0].test_object.boop`, + }, + { + ``, + `test_object.beep`, + `test_object.boop`, + `test_object.boop`, + false, // the reciever is already the "to" address + ``, + }, + { + `foo`, + `test_object.beep`, + `test_object.boop`, + `test_object.beep`, + false, // the receiver is not inside an instance of module "foo" + ``, + }, + { + `foo.bar`, + `test_object.beep`, + `test_object.boop`, + `test_object.beep`, + false, // the receiver is not inside an instance of module "foo.bar" + ``, + }, + { + ``, + `module.foo[0].test_object.beep`, + `test_object.beep`, + `module.foo[1].test_object.beep`, + false, // receiver is in a different instance of module.foo + ``, + }, + + // Moving a module also moves all of the resources declared within it. + // The following tests all cover variations of that rule. + { + ``, + `module.foo`, + `module.bar`, + `module.foo.test_object.beep`, + true, + `module.bar.test_object.beep`, + }, + { + ``, + `module.foo`, + `module.bar`, + `module.foo[1].test_object.beep`, + true, + `module.bar[1].test_object.beep`, + }, + { + ``, + `module.foo`, + `module.bar`, + `module.foo["a"].test_object.beep`, + true, + `module.bar["a"].test_object.beep`, + }, + { + ``, + `module.foo`, + `module.bar.module.foo`, + `module.foo.test_object.beep`, + true, + `module.bar.module.foo.test_object.beep`, + }, + { + ``, + `module.foo.module.bar`, + `module.bar`, + `module.foo.module.bar.test_object.beep`, + true, + `module.bar.test_object.beep`, + }, + { + ``, + `module.foo[1]`, + `module.foo[2]`, + `module.foo[1].test_object.beep`, + true, + `module.foo[2].test_object.beep`, + }, + { + ``, + `module.foo[1]`, + `module.foo`, + `module.foo[1].test_object.beep`, + true, + `module.foo.test_object.beep`, + }, + { + ``, + `module.foo`, + `module.foo[1]`, + `module.foo.test_object.beep`, + true, + `module.foo[1].test_object.beep`, + }, + { + ``, + `module.foo`, + `module.foo[1]`, + `module.foo.module.bar.test_object.beep`, + true, + `module.foo[1].module.bar.test_object.beep`, + }, + { + ``, + `module.foo`, + `module.foo[1]`, + `module.foo.module.bar[0].test_object.beep`, + true, + `module.foo[1].module.bar[0].test_object.beep`, + }, + { + ``, + `module.foo`, + `module.bar.module.foo`, + `module.foo[0].test_object.beep`, + true, + `module.bar.module.foo[0].test_object.beep`, + }, + { + ``, + `module.foo.module.bar`, + `module.bar`, + `module.foo.module.bar[0].test_object.beep`, + true, + `module.bar[0].test_object.beep`, + }, + { + `foo`, + `module.bar`, + `module.baz`, + `module.foo.module.bar.test_object.beep`, + true, + `module.foo.module.baz.test_object.beep`, + }, + { + `foo`, + `module.bar`, + `module.baz`, + `module.foo[1].module.bar.test_object.beep`, + true, + `module.foo[1].module.baz.test_object.beep`, + }, + { + `foo`, + `module.bar`, + `module.bar[1]`, + `module.foo[1].module.bar.test_object.beep`, + true, + `module.foo[1].module.bar[1].test_object.beep`, + }, + { + ``, + `module.foo[1]`, + `module.foo[2]`, + `module.foo.test_object.beep`, + false, // the receiver module has a non-matching instance key (NoKey) + ``, + }, + { + ``, + `module.foo[1]`, + `module.foo[2]`, + `module.foo[2].test_object.beep`, + false, // the receiver is already at the "to" address + ``, + }, + { + `foo`, + `module.bar`, + `module.bar[1]`, + `module.boz.test_object.beep`, + false, // the receiver module is outside the declaration module + ``, + }, + { + `foo.bar`, + `module.bar`, + `module.bar[1]`, + `module.boz.test_object.beep`, + false, // the receiver module is outside the declaration module + ``, + }, + { + `foo.bar`, + `module.a`, + `module.b`, + `module.boz.test_object.beep`, + false, // the receiver module is outside the declaration module + ``, + }, + { + ``, + `module.a1.module.a2`, + `module.b1.module.b2`, + `module.c.test_object.beep`, + false, // the receiver module is outside the declaration module + ``, + }, + { + ``, + `module.a1.module.a2[0]`, + `module.b1.module.b2[1]`, + `module.c.test_object.beep`, + false, // the receiver module is outside the declaration module + ``, + }, + { + ``, + `module.a1.module.a2`, + `module.b1.module.b2`, + `module.a1.module.b2.test_object.beep`, + false, // the receiver module is outside the declaration module + ``, + }, + { + ``, + `module.a1.module.a2`, + `module.b1.module.b2`, + `module.b1.module.a2.test_object.beep`, + false, // the receiver module is outside the declaration module + ``, + }, + { + ``, + `module.a1.module.a2[0]`, + `module.b1.module.b2[1]`, + `module.a1.module.b2[0].test_object.beep`, + false, // the receiver module is outside the declaration module + ``, + }, + { + ``, + `foo_instance.bar`, + `foo_instance.baz`, + `module.foo.test_object.beep`, + false, // the resource address is unrelated to the move statements + ``, + }, + } + + for i, test := range tests { + t.Run( + fmt.Sprintf( + "[%02d] %s: %s to %s with %s", + i, + test.DeclModule, + test.StmtFrom, test.StmtTo, + test.Receiver, + ), + func(t *testing.T) { + + parseStmtEP := func(t *testing.T, input string) *MoveEndpoint { + t.Helper() + + traversal, hclDiags := hclsyntax.ParseTraversalAbs([]byte(input), "", hcl.InitialPos) + if hclDiags.HasErrors() { + // We're not trying to test the HCL parser here, so any + // failures at this point are likely to be bugs in the + // test case itself. + t.Fatalf("syntax error: %s", hclDiags.Error()) + } + + moveEp, diags := ParseMoveEndpoint(traversal) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err().Error()) + } + return moveEp + } + + fromEPLocal := parseStmtEP(t, test.StmtFrom) + toEPLocal := parseStmtEP(t, test.StmtTo) + + declModule := RootModule + if test.DeclModule != "" { + declModule = strings.Split(test.DeclModule, ".") + } + fromEP, toEP := UnifyMoveEndpoints(declModule, fromEPLocal, toEPLocal) + if fromEP == nil || toEP == nil { + t.Fatalf("invalid test case: non-unifyable endpoints\nfrom: %s\nto: %s", fromEPLocal, toEPLocal) + } + + // We only have an AbsResourceInstance parser, not an + // AbsResourceParser, and so we'll just cheat and parse this + // as a resource instance but fail if it includes an instance + // key. + receiverInstanceAddr, diags := ParseAbsResourceInstanceStr(test.Receiver) + if diags.HasErrors() { + t.Fatalf("invalid reciever address: %s", diags.Err().Error()) + } + if receiverInstanceAddr.Resource.Key != NoKey { + t.Fatalf("invalid reciever address: must be a resource, not a resource instance") + } + receiverAddr := receiverInstanceAddr.ContainingResource() + gotAddr, gotMatch := receiverAddr.MoveDestination(fromEP, toEP) + if !test.WantMatch { + if gotMatch { + t.Errorf("unexpected match\nreceiver: %s (%T)\nfrom: %s\nto: %s\nresult: %s", test.Receiver, receiverAddr, fromEP, toEP, gotAddr) + } + return + } + + if !gotMatch { + t.Fatalf("unexpected non-match\nreceiver: %s (%T)\nfrom: %s\nto: %s\ngot: no match\nwant: %s", test.Receiver, receiverAddr, fromEP, toEP, test.WantResult) + } + + if gotStr, wantStr := gotAddr.String(), test.WantResult; gotStr != wantStr { + t.Errorf("wrong result\ngot: %s\nwant: %s", gotStr, wantStr) + } + }, + ) + } +} + +func TestMoveEndpointChainAndNested(t *testing.T) { + tests := []struct { + Endpoint, Other AbsMoveable + EndpointMod, OtherMod Module + CanChainFrom, NestedWithin bool + }{ + { + Endpoint: AbsModuleCall{ + Module: mustParseModuleInstanceStr("module.foo[2]"), + Call: ModuleCall{Name: "bar"}, + }, + Other: AbsModuleCall{ + Module: mustParseModuleInstanceStr("module.foo[2]"), + Call: ModuleCall{Name: "bar"}, + }, + CanChainFrom: true, + NestedWithin: false, + }, + + { + Endpoint: mustParseModuleInstanceStr("module.foo[2]"), + Other: AbsModuleCall{ + Module: mustParseModuleInstanceStr("module.foo[2]"), + Call: ModuleCall{Name: "bar"}, + }, + CanChainFrom: false, + NestedWithin: false, + }, + + { + Endpoint: mustParseModuleInstanceStr("module.foo[2].module.bar[2]"), + Other: AbsModuleCall{ + Module: RootModuleInstance, + Call: ModuleCall{Name: "foo"}, + }, + CanChainFrom: false, + NestedWithin: true, + }, + + { + Endpoint: mustParseAbsResourceInstanceStr("module.foo[2].module.bar.resource.baz").ContainingResource(), + Other: AbsModuleCall{ + Module: mustParseModuleInstanceStr("module.foo[2]"), + Call: ModuleCall{Name: "bar"}, + }, + CanChainFrom: false, + NestedWithin: true, + }, + + { + Endpoint: mustParseAbsResourceInstanceStr("module.foo[2].module.bar[3].resource.baz[2]"), + Other: AbsModuleCall{ + Module: mustParseModuleInstanceStr("module.foo[2]"), + Call: ModuleCall{Name: "bar"}, + }, + CanChainFrom: false, + NestedWithin: true, + }, + + { + Endpoint: AbsModuleCall{ + Module: mustParseModuleInstanceStr("module.foo[2]"), + Call: ModuleCall{Name: "bar"}, + }, + Other: mustParseModuleInstanceStr("module.foo[2]"), + CanChainFrom: false, + NestedWithin: true, + }, + + { + Endpoint: mustParseModuleInstanceStr("module.foo[2]"), + Other: mustParseModuleInstanceStr("module.foo[2]"), + CanChainFrom: true, + NestedWithin: false, + }, + + { + Endpoint: mustParseAbsResourceInstanceStr("module.foo[2].resource.baz").ContainingResource(), + Other: mustParseModuleInstanceStr("module.foo[2]"), + CanChainFrom: false, + NestedWithin: true, + }, + + { + Endpoint: mustParseAbsResourceInstanceStr("module.foo[2].module.bar.resource.baz"), + Other: mustParseModuleInstanceStr("module.foo[2]"), + CanChainFrom: false, + NestedWithin: true, + }, + + { + Endpoint: AbsModuleCall{ + Module: mustParseModuleInstanceStr("module.foo[2]"), + Call: ModuleCall{Name: "bar"}, + }, + Other: mustParseAbsResourceInstanceStr("module.foo[2].resource.baz").ContainingResource(), + CanChainFrom: false, + NestedWithin: false, + }, + + { + Endpoint: mustParseModuleInstanceStr("module.foo[2]"), + Other: mustParseAbsResourceInstanceStr("module.foo[2].resource.baz").ContainingResource(), + CanChainFrom: false, + NestedWithin: false, + }, + + { + Endpoint: mustParseAbsResourceInstanceStr("module.foo[2].resource.baz").ContainingResource(), + Other: mustParseAbsResourceInstanceStr("module.foo[2].resource.baz").ContainingResource(), + CanChainFrom: true, + NestedWithin: false, + }, + + { + Endpoint: mustParseAbsResourceInstanceStr("module.foo[2].resource.baz"), + Other: mustParseAbsResourceInstanceStr("module.foo[2].resource.baz[2]").ContainingResource(), + CanChainFrom: false, + NestedWithin: true, + }, + + { + Endpoint: AbsModuleCall{ + Module: mustParseModuleInstanceStr("module.foo[2]"), + Call: ModuleCall{Name: "bar"}, + }, + Other: mustParseAbsResourceInstanceStr("module.foo[2].resource.baz"), + CanChainFrom: false, + }, + + { + Endpoint: mustParseModuleInstanceStr("module.foo[2]"), + Other: mustParseAbsResourceInstanceStr("module.foo[2].resource.baz"), + CanChainFrom: false, + }, + { + Endpoint: mustParseAbsResourceInstanceStr("module.foo[2].resource.baz").ContainingResource(), + Other: mustParseAbsResourceInstanceStr("module.foo[2].resource.baz"), + CanChainFrom: false, + }, + + { + Endpoint: mustParseAbsResourceInstanceStr("module.foo[2].resource.baz"), + Other: mustParseAbsResourceInstanceStr("module.foo[2].resource.baz"), + CanChainFrom: true, + }, + + { + Endpoint: mustParseAbsResourceInstanceStr("resource.baz"), + EndpointMod: Module{"foo"}, + Other: mustParseAbsResourceInstanceStr("module.foo[2].resource.baz"), + CanChainFrom: true, + }, + + { + Endpoint: mustParseAbsResourceInstanceStr("module.foo[2].resource.baz"), + Other: mustParseAbsResourceInstanceStr("resource.baz"), + OtherMod: Module{"foo"}, + CanChainFrom: true, + }, + + { + Endpoint: mustParseAbsResourceInstanceStr("resource.baz"), + EndpointMod: Module{"foo"}, + Other: mustParseAbsResourceInstanceStr("resource.baz"), + OtherMod: Module{"foo"}, + CanChainFrom: true, + }, + + { + Endpoint: mustParseAbsResourceInstanceStr("resource.baz").ContainingResource(), + EndpointMod: Module{"foo"}, + Other: mustParseAbsResourceInstanceStr("module.foo[2].resource.baz").ContainingResource(), + CanChainFrom: true, + }, + + { + Endpoint: mustParseModuleInstanceStr("module.foo[2].module.baz"), + Other: mustParseModuleInstanceStr("module.baz"), + OtherMod: Module{"foo"}, + CanChainFrom: true, + }, + + { + Endpoint: AbsModuleCall{ + Call: ModuleCall{Name: "bing"}, + }, + EndpointMod: Module{"foo", "baz"}, + Other: AbsModuleCall{ + Module: mustParseModuleInstanceStr("module.baz"), + Call: ModuleCall{Name: "bing"}, + }, + OtherMod: Module{"foo"}, + CanChainFrom: true, + }, + + { + Endpoint: mustParseAbsResourceInstanceStr("resource.baz"), + EndpointMod: Module{"foo"}, + Other: mustParseAbsResourceInstanceStr("module.foo[2].resource.baz").ContainingResource(), + NestedWithin: true, + }, + + { + Endpoint: mustParseAbsResourceInstanceStr("module.foo[2].resource.baz"), + Other: mustParseAbsResourceInstanceStr("resource.baz").ContainingResource(), + OtherMod: Module{"foo"}, + NestedWithin: true, + }, + + { + Endpoint: mustParseAbsResourceInstanceStr("resource.baz"), + EndpointMod: Module{"foo"}, + Other: mustParseAbsResourceInstanceStr("resource.baz").ContainingResource(), + OtherMod: Module{"foo"}, + NestedWithin: true, + }, + + { + Endpoint: mustParseAbsResourceInstanceStr("ressurce.baz").ContainingResource(), + EndpointMod: Module{"foo"}, + Other: mustParseModuleInstanceStr("module.foo[2]"), + NestedWithin: true, + }, + + { + Endpoint: AbsModuleCall{ + Call: ModuleCall{Name: "bang"}, + }, + EndpointMod: Module{"foo", "baz", "bing"}, + Other: AbsModuleCall{ + Module: mustParseModuleInstanceStr("module.baz"), + Call: ModuleCall{Name: "bing"}, + }, + OtherMod: Module{"foo"}, + NestedWithin: true, + }, + + { + Endpoint: AbsModuleCall{ + Module: mustParseModuleInstanceStr("module.bing"), + Call: ModuleCall{Name: "bang"}, + }, + EndpointMod: Module{"foo", "baz"}, + Other: AbsModuleCall{ + Module: mustParseModuleInstanceStr("module.foo.module.baz"), + Call: ModuleCall{Name: "bing"}, + }, + NestedWithin: true, + }, + } + + for i, test := range tests { + t.Run(fmt.Sprintf("[%02d]%s.CanChainFrom(%s)", i, test.Endpoint, test.Other), + func(t *testing.T) { + endpoint := &MoveEndpointInModule{ + relSubject: test.Endpoint, + module: test.EndpointMod, + } + + other := &MoveEndpointInModule{ + relSubject: test.Other, + module: test.OtherMod, + } + + if endpoint.CanChainFrom(other) != test.CanChainFrom { + t.Errorf("expected %s CanChainFrom %s == %t", endpoint, other, test.CanChainFrom) + } + + if endpoint.NestedWithin(other) != test.NestedWithin { + t.Errorf("expected %s NestedWithin %s == %t", endpoint, other, test.NestedWithin) + } + }, + ) + } +} + +func TestSelectsModule(t *testing.T) { + tests := []struct { + Endpoint *MoveEndpointInModule + Addr ModuleInstance + Selects bool + }{ + { + Endpoint: &MoveEndpointInModule{ + relSubject: AbsModuleCall{ + Module: mustParseModuleInstanceStr("module.foo[2]"), + Call: ModuleCall{Name: "bar"}, + }, + }, + Addr: mustParseModuleInstanceStr("module.foo[2].module.bar[1]"), + Selects: true, + }, + { + Endpoint: &MoveEndpointInModule{ + module: mustParseModuleInstanceStr("module.foo").Module(), + relSubject: AbsModuleCall{ + Module: mustParseModuleInstanceStr("module.bar[2]"), + Call: ModuleCall{Name: "baz"}, + }, + }, + Addr: mustParseModuleInstanceStr("module.foo[2].module.bar[2].module.baz"), + Selects: true, + }, + { + Endpoint: &MoveEndpointInModule{ + module: mustParseModuleInstanceStr("module.foo").Module(), + relSubject: AbsModuleCall{ + Module: mustParseModuleInstanceStr("module.bar[2]"), + Call: ModuleCall{Name: "baz"}, + }, + }, + Addr: mustParseModuleInstanceStr("module.foo[2].module.bar[1].module.baz"), + Selects: false, + }, + { + Endpoint: &MoveEndpointInModule{ + relSubject: AbsModuleCall{ + Module: mustParseModuleInstanceStr("module.bar"), + Call: ModuleCall{Name: "baz"}, + }, + }, + Addr: mustParseModuleInstanceStr("module.bar[1].module.baz"), + Selects: false, + }, + { + Endpoint: &MoveEndpointInModule{ + module: mustParseModuleInstanceStr("module.foo").Module(), + relSubject: mustParseAbsResourceInstanceStr(`module.bar.resource.name["key"]`), + }, + Addr: mustParseModuleInstanceStr(`module.foo[1].module.bar`), + Selects: true, + }, + { + Endpoint: &MoveEndpointInModule{ + relSubject: mustParseModuleInstanceStr(`module.bar.module.baz["key"]`), + }, + Addr: mustParseModuleInstanceStr(`module.bar.module.baz["key"]`), + Selects: true, + }, + { + Endpoint: &MoveEndpointInModule{ + relSubject: mustParseAbsResourceInstanceStr(`module.bar.module.baz["key"].resource.name`).ContainingResource(), + }, + Addr: mustParseModuleInstanceStr(`module.bar.module.baz["key"]`), + Selects: true, + }, + { + Endpoint: &MoveEndpointInModule{ + module: mustParseModuleInstanceStr("module.nope").Module(), + relSubject: mustParseAbsResourceInstanceStr(`module.bar.resource.name["key"]`), + }, + Addr: mustParseModuleInstanceStr(`module.foo[1].module.bar`), + Selects: false, + }, + { + Endpoint: &MoveEndpointInModule{ + relSubject: mustParseModuleInstanceStr(`module.bar.module.baz["key"]`), + }, + Addr: mustParseModuleInstanceStr(`module.bar.module.baz["nope"]`), + Selects: false, + }, + { + Endpoint: &MoveEndpointInModule{ + relSubject: mustParseAbsResourceInstanceStr(`module.nope.module.baz["key"].resource.name`).ContainingResource(), + }, + Addr: mustParseModuleInstanceStr(`module.bar.module.baz["key"]`), + Selects: false, + }, + } + + for i, test := range tests { + t.Run(fmt.Sprintf("[%02d]%s.SelectsModule(%s)", i, test.Endpoint, test.Addr), + func(t *testing.T) { + if test.Endpoint.SelectsModule(test.Addr) != test.Selects { + t.Errorf("expected %s SelectsModule %s == %t", test.Endpoint, test.Addr, test.Selects) + } + }, + ) + } +} + +func TestSelectsResource(t *testing.T) { + matchingResource := Resource{ + Mode: ManagedResourceMode, + Type: "foo", + Name: "matching", + } + unmatchingResource := Resource{ + Mode: ManagedResourceMode, + Type: "foo", + Name: "unmatching", + } + childMod := Module{ + "child", + } + childModMatchingInst := ModuleInstance{ + ModuleInstanceStep{Name: "child", InstanceKey: StringKey("matching")}, + } + childModUnmatchingInst := ModuleInstance{ + ModuleInstanceStep{Name: "child", InstanceKey: StringKey("unmatching")}, + } + + tests := []struct { + Endpoint *MoveEndpointInModule + Addr AbsResource + Selects bool + }{ + { + Endpoint: &MoveEndpointInModule{ + relSubject: matchingResource.Absolute(nil), + }, + Addr: matchingResource.Absolute(nil), + Selects: true, // exact match + }, + { + Endpoint: &MoveEndpointInModule{ + relSubject: unmatchingResource.Absolute(nil), + }, + Addr: matchingResource.Absolute(nil), + Selects: false, // wrong resource name + }, + { + Endpoint: &MoveEndpointInModule{ + relSubject: unmatchingResource.Instance(IntKey(1)).Absolute(nil), + }, + Addr: matchingResource.Absolute(nil), + Selects: false, // wrong resource name + }, + { + Endpoint: &MoveEndpointInModule{ + relSubject: matchingResource.Instance(NoKey).Absolute(nil), + }, + Addr: matchingResource.Absolute(nil), + Selects: true, // matches one instance + }, + { + Endpoint: &MoveEndpointInModule{ + relSubject: matchingResource.Instance(IntKey(0)).Absolute(nil), + }, + Addr: matchingResource.Absolute(nil), + Selects: true, // matches one instance + }, + { + Endpoint: &MoveEndpointInModule{ + relSubject: matchingResource.Instance(StringKey("a")).Absolute(nil), + }, + Addr: matchingResource.Absolute(nil), + Selects: true, // matches one instance + }, + { + Endpoint: &MoveEndpointInModule{ + module: childMod, + relSubject: matchingResource.Absolute(nil), + }, + Addr: matchingResource.Absolute(childModMatchingInst), + Selects: true, // in one of the instances of the module where the statement was written + }, + { + Endpoint: &MoveEndpointInModule{ + relSubject: matchingResource.Absolute(childModMatchingInst), + }, + Addr: matchingResource.Absolute(childModMatchingInst), + Selects: true, // exact match + }, + { + Endpoint: &MoveEndpointInModule{ + relSubject: matchingResource.Instance(IntKey(2)).Absolute(childModMatchingInst), + }, + Addr: matchingResource.Absolute(childModMatchingInst), + Selects: true, // matches one instance + }, + { + Endpoint: &MoveEndpointInModule{ + relSubject: matchingResource.Absolute(childModMatchingInst), + }, + Addr: matchingResource.Absolute(childModUnmatchingInst), + Selects: false, // the containing module instance doesn't match + }, + { + Endpoint: &MoveEndpointInModule{ + relSubject: AbsModuleCall{ + Module: mustParseModuleInstanceStr("module.foo[2]"), + Call: ModuleCall{Name: "bar"}, + }, + }, + Addr: matchingResource.Absolute(mustParseModuleInstanceStr("module.foo[2]")), + Selects: false, // a module call can't match a resource + }, + { + Endpoint: &MoveEndpointInModule{ + relSubject: mustParseModuleInstanceStr("module.foo[2]"), + }, + Addr: matchingResource.Absolute(mustParseModuleInstanceStr("module.foo[2]")), + Selects: false, // a module instance can't match a resource + }, + } + + for i, test := range tests { + t.Run(fmt.Sprintf("[%02d]%s SelectsResource(%s)", i, test.Endpoint, test.Addr), + func(t *testing.T) { + if got, want := test.Endpoint.SelectsResource(test.Addr), test.Selects; got != want { + t.Errorf("wrong result\nReceiver: %s\nArgument: %s\ngot: %t\nwant: %t", test.Endpoint, test.Addr, got, want) + } + }, + ) + } +} + +func TestIsModuleMoveReIndex(t *testing.T) { + tests := []struct { + from, to AbsMoveable + expect bool + }{ + { + from: mustParseModuleInstanceStr(`module.bar`), + to: mustParseModuleInstanceStr(`module.bar`), + expect: true, + }, + { + from: mustParseModuleInstanceStr(`module.bar`), + to: mustParseModuleInstanceStr(`module.bar[0]`), + expect: true, + }, + { + from: AbsModuleCall{ + Call: ModuleCall{Name: "bar"}, + }, + to: mustParseModuleInstanceStr(`module.bar[0]`), + expect: true, + }, + { + from: mustParseModuleInstanceStr(`module.bar["a"]`), + to: AbsModuleCall{ + Call: ModuleCall{Name: "bar"}, + }, + expect: true, + }, + { + from: mustParseModuleInstanceStr(`module.foo`), + to: mustParseModuleInstanceStr(`module.bar`), + expect: false, + }, + { + from: mustParseModuleInstanceStr(`module.bar`), + to: mustParseModuleInstanceStr(`module.foo[0]`), + expect: false, + }, + { + from: AbsModuleCall{ + Call: ModuleCall{Name: "bar"}, + }, + to: mustParseModuleInstanceStr(`module.foo[0]`), + expect: false, + }, + { + from: mustParseModuleInstanceStr(`module.bar["a"]`), + to: AbsModuleCall{ + Call: ModuleCall{Name: "foo"}, + }, + expect: false, + }, + { + from: mustParseModuleInstanceStr(`module.bar.module.baz`), + to: mustParseModuleInstanceStr(`module.bar.module.baz`), + expect: true, + }, + { + from: mustParseModuleInstanceStr(`module.bar.module.baz`), + to: mustParseModuleInstanceStr(`module.bar.module.baz[0]`), + expect: true, + }, + { + from: mustParseModuleInstanceStr(`module.bar.module.baz`), + to: mustParseModuleInstanceStr(`module.baz.module.baz`), + expect: false, + }, + { + from: mustParseModuleInstanceStr(`module.bar.module.baz`), + to: mustParseModuleInstanceStr(`module.baz.module.baz[0]`), + expect: false, + }, + { + from: mustParseModuleInstanceStr(`module.bar.module.baz`), + to: mustParseModuleInstanceStr(`module.bar[0].module.baz`), + expect: true, + }, + { + from: mustParseModuleInstanceStr(`module.bar[0].module.baz`), + to: mustParseModuleInstanceStr(`module.bar.module.baz[0]`), + expect: true, + }, + { + from: mustParseModuleInstanceStr(`module.bar[0].module.baz`), + to: mustParseModuleInstanceStr(`module.bar[1].module.baz[0]`), + expect: true, + }, + { + from: AbsModuleCall{ + Call: ModuleCall{Name: "baz"}, + }, + to: mustParseModuleInstanceStr(`module.bar.module.baz[0]`), + expect: false, + }, + { + from: mustParseModuleInstanceStr(`module.bar.module.baz[0]`), + to: AbsModuleCall{ + Call: ModuleCall{Name: "baz"}, + }, + expect: false, + }, + + { + from: AbsModuleCall{ + Module: mustParseModuleInstanceStr(`module.bar[0]`), + Call: ModuleCall{Name: "baz"}, + }, + to: mustParseModuleInstanceStr(`module.bar.module.baz[0]`), + expect: true, + }, + + { + from: mustParseModuleInstanceStr(`module.bar.module.baz[0]`), + to: AbsModuleCall{ + Module: mustParseModuleInstanceStr(`module.bar[0]`), + Call: ModuleCall{Name: "baz"}, + }, + expect: true, + }, + + { + from: mustParseModuleInstanceStr(`module.baz`), + to: mustParseModuleInstanceStr(`module.bar.module.baz[0]`), + expect: false, + }, + { + from: mustParseModuleInstanceStr(`module.bar.module.baz[0]`), + to: mustParseModuleInstanceStr(`module.baz`), + expect: false, + }, + } + + for i, test := range tests { + t.Run(fmt.Sprintf("[%02d]IsModuleMoveReIndex(%s, %s)", i, test.from, test.to), + func(t *testing.T) { + from := &MoveEndpointInModule{ + relSubject: test.from, + } + + to := &MoveEndpointInModule{ + relSubject: test.to, + } + + if got := from.IsModuleReIndex(to); got != test.expect { + t.Errorf("expected %t, got %t", test.expect, got) + } + }, + ) + } +} + +func mustParseAbsResourceInstanceStr(s string) AbsResourceInstance { + r, diags := ParseAbsResourceInstanceStr(s) + if diags.HasErrors() { + panic(diags.ErrWithWarnings().Error()) + } + return r +} diff --git a/pkg/addrs/move_endpoint_test.go b/pkg/addrs/move_endpoint_test.go new file mode 100644 index 00000000000..29fa7ce56e4 --- /dev/null +++ b/pkg/addrs/move_endpoint_test.go @@ -0,0 +1,637 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +import ( + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" +) + +func TestParseMoveEndpoint(t *testing.T) { + tests := []struct { + Input string + WantRel AbsMoveable // funny intermediate subset of AbsMoveable + WantErr string + }{ + { + `foo.bar`, + AbsResourceInstance{ + Module: RootModuleInstance, + Resource: ResourceInstance{ + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "foo", + Name: "bar", + }, + Key: NoKey, + }, + }, + ``, + }, + { + `foo.bar[0]`, + AbsResourceInstance{ + Module: RootModuleInstance, + Resource: ResourceInstance{ + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "foo", + Name: "bar", + }, + Key: IntKey(0), + }, + }, + ``, + }, + { + `foo.bar["a"]`, + AbsResourceInstance{ + Module: RootModuleInstance, + Resource: ResourceInstance{ + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "foo", + Name: "bar", + }, + Key: StringKey("a"), + }, + }, + ``, + }, + { + `module.boop.foo.bar`, + AbsResourceInstance{ + Module: ModuleInstance{ + ModuleInstanceStep{Name: "boop"}, + }, + Resource: ResourceInstance{ + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "foo", + Name: "bar", + }, + Key: NoKey, + }, + }, + ``, + }, + { + `module.boop.foo.bar[0]`, + AbsResourceInstance{ + Module: ModuleInstance{ + ModuleInstanceStep{Name: "boop"}, + }, + Resource: ResourceInstance{ + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "foo", + Name: "bar", + }, + Key: IntKey(0), + }, + }, + ``, + }, + { + `module.boop.foo.bar["a"]`, + AbsResourceInstance{ + Module: ModuleInstance{ + ModuleInstanceStep{Name: "boop"}, + }, + Resource: ResourceInstance{ + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "foo", + Name: "bar", + }, + Key: StringKey("a"), + }, + }, + ``, + }, + { + `data.foo.bar`, + AbsResourceInstance{ + Module: RootModuleInstance, + Resource: ResourceInstance{ + Resource: Resource{ + Mode: DataResourceMode, + Type: "foo", + Name: "bar", + }, + Key: NoKey, + }, + }, + ``, + }, + { + `data.foo.bar[0]`, + AbsResourceInstance{ + Module: RootModuleInstance, + Resource: ResourceInstance{ + Resource: Resource{ + Mode: DataResourceMode, + Type: "foo", + Name: "bar", + }, + Key: IntKey(0), + }, + }, + ``, + }, + { + `data.foo.bar["a"]`, + AbsResourceInstance{ + Module: RootModuleInstance, + Resource: ResourceInstance{ + Resource: Resource{ + Mode: DataResourceMode, + Type: "foo", + Name: "bar", + }, + Key: StringKey("a"), + }, + }, + ``, + }, + { + `module.boop.data.foo.bar`, + AbsResourceInstance{ + Module: ModuleInstance{ + ModuleInstanceStep{Name: "boop"}, + }, + Resource: ResourceInstance{ + Resource: Resource{ + Mode: DataResourceMode, + Type: "foo", + Name: "bar", + }, + Key: NoKey, + }, + }, + ``, + }, + { + `module.boop.data.foo.bar[0]`, + AbsResourceInstance{ + Module: ModuleInstance{ + ModuleInstanceStep{Name: "boop"}, + }, + Resource: ResourceInstance{ + Resource: Resource{ + Mode: DataResourceMode, + Type: "foo", + Name: "bar", + }, + Key: IntKey(0), + }, + }, + ``, + }, + { + `module.boop.data.foo.bar["a"]`, + AbsResourceInstance{ + Module: ModuleInstance{ + ModuleInstanceStep{Name: "boop"}, + }, + Resource: ResourceInstance{ + Resource: Resource{ + Mode: DataResourceMode, + Type: "foo", + Name: "bar", + }, + Key: StringKey("a"), + }, + }, + ``, + }, + { + `module.foo`, + ModuleInstance{ + ModuleInstanceStep{Name: "foo"}, + }, + ``, + }, + { + `module.foo[0]`, + ModuleInstance{ + ModuleInstanceStep{Name: "foo", InstanceKey: IntKey(0)}, + }, + ``, + }, + { + `module.foo["a"]`, + ModuleInstance{ + ModuleInstanceStep{Name: "foo", InstanceKey: StringKey("a")}, + }, + ``, + }, + { + `module.foo.module.bar`, + ModuleInstance{ + ModuleInstanceStep{Name: "foo"}, + ModuleInstanceStep{Name: "bar"}, + }, + ``, + }, + { + `module.foo[1].module.bar`, + ModuleInstance{ + ModuleInstanceStep{Name: "foo", InstanceKey: IntKey(1)}, + ModuleInstanceStep{Name: "bar"}, + }, + ``, + }, + { + `module.foo.module.bar[1]`, + ModuleInstance{ + ModuleInstanceStep{Name: "foo"}, + ModuleInstanceStep{Name: "bar", InstanceKey: IntKey(1)}, + }, + ``, + }, + { + `module.foo[0].module.bar[1]`, + ModuleInstance{ + ModuleInstanceStep{Name: "foo", InstanceKey: IntKey(0)}, + ModuleInstanceStep{Name: "bar", InstanceKey: IntKey(1)}, + }, + ``, + }, + { + `module`, + nil, + `Invalid address operator: Prefix "module." must be followed by a module name.`, + }, + { + `module[0]`, + nil, + `Invalid address operator: Prefix "module." must be followed by a module name.`, + }, + { + `module.foo.data`, + nil, + `Invalid address: Resource specification must include a resource type and name.`, + }, + { + `module.foo.data.bar`, + nil, + `Invalid address: Resource specification must include a resource type and name.`, + }, + { + `module.foo.data[0]`, + nil, + `Invalid address: Resource specification must include a resource type and name.`, + }, + { + `module.foo.data.bar[0]`, + nil, + `Invalid address: A resource name is required.`, + }, + { + `module.foo.bar`, + nil, + `Invalid address: Resource specification must include a resource type and name.`, + }, + { + `module.foo.bar[0]`, + nil, + `Invalid address: A resource name is required.`, + }, + } + + for _, test := range tests { + t.Run(test.Input, func(t *testing.T) { + traversal, hclDiags := hclsyntax.ParseTraversalAbs([]byte(test.Input), "", hcl.InitialPos) + if hclDiags.HasErrors() { + // We're not trying to test the HCL parser here, so any + // failures at this point are likely to be bugs in the + // test case itself. + t.Fatalf("syntax error: %s", hclDiags.Error()) + } + + moveEp, diags := ParseMoveEndpoint(traversal) + + switch { + case test.WantErr != "": + if !diags.HasErrors() { + t.Fatalf("unexpected success\nwant error: %s", test.WantErr) + } + gotErr := diags.Err().Error() + if gotErr != test.WantErr { + t.Fatalf("wrong error\ngot: %s\nwant: %s", gotErr, test.WantErr) + } + default: + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err().Error()) + } + if diff := cmp.Diff(test.WantRel, moveEp.relSubject); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + } + }) + } +} + +func TestUnifyMoveEndpoints(t *testing.T) { + tests := []struct { + InputFrom, InputTo string + Module Module + WantFrom, WantTo string + }{ + { + InputFrom: `foo.bar`, + InputTo: `foo.baz`, + Module: RootModule, + WantFrom: `foo.bar[*]`, + WantTo: `foo.baz[*]`, + }, + { + InputFrom: `foo.bar`, + InputTo: `foo.baz`, + Module: RootModule.Child("a"), + WantFrom: `module.a[*].foo.bar[*]`, + WantTo: `module.a[*].foo.baz[*]`, + }, + { + InputFrom: `foo.bar`, + InputTo: `module.b[0].foo.baz`, + Module: RootModule.Child("a"), + WantFrom: `module.a[*].foo.bar[*]`, + WantTo: `module.a[*].module.b[0].foo.baz[*]`, + }, + { + InputFrom: `foo.bar`, + InputTo: `foo.bar["thing"]`, + Module: RootModule, + WantFrom: `foo.bar`, + WantTo: `foo.bar["thing"]`, + }, + { + InputFrom: `foo.bar["thing"]`, + InputTo: `foo.bar`, + Module: RootModule, + WantFrom: `foo.bar["thing"]`, + WantTo: `foo.bar`, + }, + { + InputFrom: `foo.bar["a"]`, + InputTo: `foo.bar["b"]`, + Module: RootModule, + WantFrom: `foo.bar["a"]`, + WantTo: `foo.bar["b"]`, + }, + { + InputFrom: `module.foo`, + InputTo: `module.bar`, + Module: RootModule, + WantFrom: `module.foo[*]`, + WantTo: `module.bar[*]`, + }, + { + InputFrom: `module.foo`, + InputTo: `module.bar.module.baz`, + Module: RootModule, + WantFrom: `module.foo[*]`, + WantTo: `module.bar.module.baz[*]`, + }, + { + InputFrom: `module.foo`, + InputTo: `module.bar.module.baz`, + Module: RootModule.Child("bloop"), + WantFrom: `module.bloop[*].module.foo[*]`, + WantTo: `module.bloop[*].module.bar.module.baz[*]`, + }, + { + InputFrom: `module.foo[0]`, + InputTo: `module.foo["a"]`, + Module: RootModule, + WantFrom: `module.foo[0]`, + WantTo: `module.foo["a"]`, + }, + { + InputFrom: `module.foo`, + InputTo: `module.foo["a"]`, + Module: RootModule, + WantFrom: `module.foo`, + WantTo: `module.foo["a"]`, + }, + { + InputFrom: `module.foo[0]`, + InputTo: `module.foo`, + Module: RootModule, + WantFrom: `module.foo[0]`, + WantTo: `module.foo`, + }, + { + InputFrom: `module.foo[0]`, + InputTo: `module.foo`, + Module: RootModule.Child("bloop"), + WantFrom: `module.bloop[*].module.foo[0]`, + WantTo: `module.bloop[*].module.foo`, + }, + { + InputFrom: `module.foo`, + InputTo: `foo.bar`, + Module: RootModule, + WantFrom: ``, // Can't unify module call with resource + WantTo: ``, + }, + { + InputFrom: `module.foo[0]`, + InputTo: `foo.bar`, + Module: RootModule, + WantFrom: ``, // Can't unify module instance with resource + WantTo: ``, + }, + { + InputFrom: `module.foo`, + InputTo: `foo.bar[0]`, + Module: RootModule, + WantFrom: ``, // Can't unify module call with resource instance + WantTo: ``, + }, + { + InputFrom: `module.foo[0]`, + InputTo: `foo.bar[0]`, + Module: RootModule, + WantFrom: ``, // Can't unify module instance with resource instance + WantTo: ``, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%s to %s in %s", test.InputFrom, test.InputTo, test.Module), func(t *testing.T) { + parseInput := func(input string) *MoveEndpoint { + t.Helper() + + traversal, hclDiags := hclsyntax.ParseTraversalAbs([]byte(input), "", hcl.InitialPos) + if hclDiags.HasErrors() { + // We're not trying to test the HCL parser here, so any + // failures at this point are likely to be bugs in the + // test case itself. + t.Fatalf("syntax error: %s", hclDiags.Error()) + } + + moveEp, diags := ParseMoveEndpoint(traversal) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err().Error()) + } + return moveEp + } + + fromEp := parseInput(test.InputFrom) + toEp := parseInput(test.InputTo) + + gotFrom, gotTo := UnifyMoveEndpoints(test.Module, fromEp, toEp) + if got, want := gotFrom.String(), test.WantFrom; got != want { + t.Errorf("wrong 'from' result\ngot: %s\nwant: %s", got, want) + } + if got, want := gotTo.String(), test.WantTo; got != want { + t.Errorf("wrong 'to' result\ngot: %s\nwant: %s", got, want) + } + }) + } +} + +func TestMoveEndpointConfigMoveable(t *testing.T) { + tests := []struct { + Input string + Module Module + Want ConfigMoveable + }{ + { + `foo.bar`, + RootModule, + ConfigResource{ + Module: RootModule, + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "foo", + Name: "bar", + }, + }, + }, + { + `foo.bar[0]`, + RootModule, + ConfigResource{ + Module: RootModule, + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "foo", + Name: "bar", + }, + }, + }, + { + `module.foo.bar.baz`, + RootModule, + ConfigResource{ + Module: Module{"foo"}, + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "bar", + Name: "baz", + }, + }, + }, + { + `module.foo[0].bar.baz`, + RootModule, + ConfigResource{ + Module: Module{"foo"}, + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "bar", + Name: "baz", + }, + }, + }, + { + `foo.bar`, + Module{"boop"}, + ConfigResource{ + Module: Module{"boop"}, + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "foo", + Name: "bar", + }, + }, + }, + { + `module.bloop.foo.bar`, + Module{"bleep"}, + ConfigResource{ + Module: Module{"bleep", "bloop"}, + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "foo", + Name: "bar", + }, + }, + }, + { + `module.foo.bar.baz`, + RootModule, + ConfigResource{ + Module: Module{"foo"}, + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "bar", + Name: "baz", + }, + }, + }, + { + `module.foo`, + RootModule, + Module{"foo"}, + }, + { + `module.foo[0]`, + RootModule, + Module{"foo"}, + }, + { + `module.bloop`, + Module{"bleep"}, + Module{"bleep", "bloop"}, + }, + { + `module.bloop[0]`, + Module{"bleep"}, + Module{"bleep", "bloop"}, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%s in %s", test.Input, test.Module), func(t *testing.T) { + traversal, hclDiags := hclsyntax.ParseTraversalAbs([]byte(test.Input), "", hcl.InitialPos) + if hclDiags.HasErrors() { + // We're not trying to test the HCL parser here, so any + // failures at this point are likely to be bugs in the + // test case itself. + t.Fatalf("syntax error: %s", hclDiags.Error()) + } + + moveEp, diags := ParseMoveEndpoint(traversal) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err().Error()) + } + + got := moveEp.ConfigMoveable(test.Module) + if diff := cmp.Diff(test.Want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + } +} diff --git a/pkg/addrs/moveable.go b/pkg/addrs/moveable.go new file mode 100644 index 00000000000..c7274e86594 --- /dev/null +++ b/pkg/addrs/moveable.go @@ -0,0 +1,62 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +// AbsMoveable is an interface implemented by address types that can be either +// the source or destination of a "moved" statement in configuration, along +// with any other similar cross-module state refactoring statements we might +// allow. +// +// Note that AbsMoveable represents an absolute address relative to the root +// of the configuration, which is different than the direct representation +// of these in configuration where the author gives an address relative to +// the current module where the address is defined. The type MoveEndpoint +type AbsMoveable interface { + absMoveableSigil() + UniqueKeyer + + String() string +} + +// The following are all of the possible AbsMoveable address types: +var ( + _ AbsMoveable = AbsResource{} + _ AbsMoveable = AbsResourceInstance{} + _ AbsMoveable = ModuleInstance(nil) + _ AbsMoveable = AbsModuleCall{} +) + +// AbsMoveableResource is an AbsMoveable that is either a resource or a resource +// instance. +type AbsMoveableResource interface { + AbsMoveable + AffectedAbsResource() AbsResource +} + +// The following are all of the possible AbsMoveableResource types: +var ( + _ AbsMoveableResource = AbsResource{} + _ AbsMoveableResource = AbsResourceInstance{} +) + +// ConfigMoveable is similar to AbsMoveable but represents a static object in +// the configuration, rather than an instance of that object created by +// module expansion. +// +// Note that ConfigMovable represents an absolute address relative to the root +// of the configuration, which is different than the direct representation +// of these in configuration where the author gives an address relative to +// the current module where the address is defined. The type MoveEndpoint +// represents the relative form given directly in configuration. +type ConfigMoveable interface { + configMoveableSigil() +} + +// The following are all of the possible ConfigMovable address types: +var ( + _ ConfigMoveable = ConfigResource{} + _ ConfigMoveable = Module(nil) +) diff --git a/pkg/addrs/moveendpointkind_string.go b/pkg/addrs/moveendpointkind_string.go new file mode 100644 index 00000000000..f706fb9cae1 --- /dev/null +++ b/pkg/addrs/moveendpointkind_string.go @@ -0,0 +1,29 @@ +// Code generated by "stringer -type MoveEndpointKind"; DO NOT EDIT. + +package addrs + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[MoveEndpointModule-77] + _ = x[MoveEndpointResource-82] +} + +const ( + _MoveEndpointKind_name_0 = "MoveEndpointModule" + _MoveEndpointKind_name_1 = "MoveEndpointResource" +) + +func (i MoveEndpointKind) String() string { + switch { + case i == 77: + return _MoveEndpointKind_name_0 + case i == 82: + return _MoveEndpointKind_name_1 + default: + return "MoveEndpointKind(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/pkg/addrs/output_value.go b/pkg/addrs/output_value.go new file mode 100644 index 00000000000..5ed1758997d --- /dev/null +++ b/pkg/addrs/output_value.go @@ -0,0 +1,240 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// OutputValue is the address of an output value, in the context of the module +// that is defining it. +// +// This is related to but separate from ModuleCallOutput, which represents +// a module output from the perspective of its parent module. Outputs are +// referencable from the testing scope, in general tofu operation users +// will be referencing ModuleCallOutput. +type OutputValue struct { + referenceable + Name string +} + +func (v OutputValue) String() string { + return "output." + v.Name +} + +func (v OutputValue) Equal(o OutputValue) bool { + return v.Name == o.Name +} + +func (v OutputValue) UniqueKey() UniqueKey { + return v // An OutputValue is its own UniqueKey +} + +func (v OutputValue) uniqueKeySigil() {} + +// Absolute converts the receiver into an absolute address within the given +// module instance. +func (v OutputValue) Absolute(m ModuleInstance) AbsOutputValue { + return AbsOutputValue{ + Module: m, + OutputValue: v, + } +} + +// InModule converts the receiver into a config address within the given +// module. +func (v OutputValue) InModule(m Module) ConfigOutputValue { + return ConfigOutputValue{ + Module: m, + OutputValue: v, + } +} + +// AbsOutputValue is the absolute address of an output value within a module instance. +// +// This represents an output globally within the namespace of a particular +// configuration. It is related to but separate from ModuleCallOutput, which +// represents a module output from the perspective of its parent module. +type AbsOutputValue struct { + Module ModuleInstance + OutputValue OutputValue +} + +// OutputValue returns the absolute address of an output value of the given +// name within the receiving module instance. +func (m ModuleInstance) OutputValue(name string) AbsOutputValue { + return AbsOutputValue{ + Module: m, + OutputValue: OutputValue{ + Name: name, + }, + } +} + +func (v AbsOutputValue) CheckRule(t CheckRuleType, i int) CheckRule { + return CheckRule{ + Container: v, + Type: t, + Index: i, + } +} + +func (v AbsOutputValue) String() string { + if v.Module.IsRoot() { + return v.OutputValue.String() + } + return fmt.Sprintf("%s.%s", v.Module.String(), v.OutputValue.String()) +} + +func (v AbsOutputValue) Equal(o AbsOutputValue) bool { + return v.OutputValue.Equal(o.OutputValue) && v.Module.Equal(o.Module) +} + +func (v AbsOutputValue) ConfigOutputValue() ConfigOutputValue { + return ConfigOutputValue{ + Module: v.Module.Module(), + OutputValue: v.OutputValue, + } +} + +func (v AbsOutputValue) checkableSigil() { + // Output values are checkable +} + +func (v AbsOutputValue) ConfigCheckable() ConfigCheckable { + // Output values are declared by "output" blocks in the configuration, + // represented as ConfigOutputValue. + return v.ConfigOutputValue() +} + +func (v AbsOutputValue) CheckableKind() CheckableKind { + return CheckableOutputValue +} + +func (v AbsOutputValue) UniqueKey() UniqueKey { + return absOutputValueUniqueKey(v.String()) +} + +type absOutputValueUniqueKey string + +func (k absOutputValueUniqueKey) uniqueKeySigil() {} + +func ParseAbsOutputValue(traversal hcl.Traversal) (AbsOutputValue, tfdiags.Diagnostics) { + path, remain, diags := parseModuleInstancePrefix(traversal) + if diags.HasErrors() { + return AbsOutputValue{}, diags + } + + if len(remain) != 2 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "An output name is required.", + Subject: traversal.SourceRange().Ptr(), + }) + return AbsOutputValue{}, diags + } + + if remain.RootName() != "output" { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "Output address must start with \"output.\".", + Subject: remain[0].SourceRange().Ptr(), + }) + return AbsOutputValue{}, diags + } + + var name string + switch tt := remain[1].(type) { + case hcl.TraverseAttr: + name = tt.Name + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "An output name is required.", + Subject: remain[1].SourceRange().Ptr(), + }) + return AbsOutputValue{}, diags + } + + return AbsOutputValue{ + Module: path, + OutputValue: OutputValue{ + Name: name, + }, + }, diags +} + +func ParseAbsOutputValueStr(str string) (AbsOutputValue, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(parseDiags) + if parseDiags.HasErrors() { + return AbsOutputValue{}, diags + } + + addr, addrDiags := ParseAbsOutputValue(traversal) + diags = diags.Append(addrDiags) + return addr, diags +} + +// ModuleCallOutput converts an AbsModuleOutput into a ModuleCallOutput, +// returning also the module instance that the ModuleCallOutput is relative +// to. +// +// The root module does not have a call, and so this method cannot be used +// with outputs in the root module, and will panic in that case. +func (v AbsOutputValue) ModuleCallOutput() (ModuleInstance, ModuleCallInstanceOutput) { + if v.Module.IsRoot() { + panic("ReferenceFromCall used with root module output") + } + + caller, call := v.Module.CallInstance() + return caller, ModuleCallInstanceOutput{ + Call: call, + Name: v.OutputValue.Name, + } +} + +// ConfigOutputValue represents a particular "output" block in the +// configuration, which might have many AbsOutputValue addresses associated +// with it at runtime if it belongs to a module that was called using +// "count" or "for_each". +type ConfigOutputValue struct { + Module Module + OutputValue OutputValue +} + +func (v ConfigOutputValue) String() string { + if v.Module.IsRoot() { + return v.OutputValue.String() + } + return fmt.Sprintf("%s.%s", v.Module.String(), v.OutputValue.String()) +} + +func (v ConfigOutputValue) configCheckableSigil() { + // ConfigOutputValue is the ConfigCheckable for AbsOutputValue. +} + +func (v ConfigOutputValue) CheckableKind() CheckableKind { + return CheckableOutputValue +} + +func (v ConfigOutputValue) UniqueKey() UniqueKey { + return configOutputValueUniqueKey(v.String()) +} + +type configOutputValueUniqueKey string + +func (k configOutputValueUniqueKey) uniqueKeySigil() {} diff --git a/pkg/addrs/output_value_test.go b/pkg/addrs/output_value_test.go new file mode 100644 index 00000000000..8f629f73748 --- /dev/null +++ b/pkg/addrs/output_value_test.go @@ -0,0 +1,136 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +import ( + "fmt" + "strings" + "testing" + + "github.com/go-test/deep" +) + +func TestAbsOutputValueInstanceEqual_true(t *testing.T) { + foo, diags := ParseModuleInstanceStr("module.foo") + if len(diags) > 0 { + t.Fatalf("unexpected diags: %s", diags.Err()) + } + foobar, diags := ParseModuleInstanceStr("module.foo[1].module.bar") + if len(diags) > 0 { + t.Fatalf("unexpected diags: %s", diags.Err()) + } + + ovs := []AbsOutputValue{ + foo.OutputValue("a"), + foobar.OutputValue("b"), + } + for _, r := range ovs { + t.Run(r.String(), func(t *testing.T) { + if !r.Equal(r) { + t.Fatalf("expected %#v to be equal to itself", r) + } + }) + } +} + +func TestAbsOutputValueInstanceEqual_false(t *testing.T) { + foo, diags := ParseModuleInstanceStr("module.foo") + if len(diags) > 0 { + t.Fatalf("unexpected diags: %s", diags.Err()) + } + foobar, diags := ParseModuleInstanceStr("module.foo[1].module.bar") + if len(diags) > 0 { + t.Fatalf("unexpected diags: %s", diags.Err()) + } + + testCases := []struct { + left AbsOutputValue + right AbsOutputValue + }{ + { + foo.OutputValue("a"), + foo.OutputValue("b"), + }, + { + foo.OutputValue("a"), + foobar.OutputValue("a"), + }, + } + for _, tc := range testCases { + t.Run(fmt.Sprintf("%s = %s", tc.left, tc.right), func(t *testing.T) { + if tc.left.Equal(tc.right) { + t.Fatalf("expected %#v not to be equal to %#v", tc.left, tc.right) + } + + if tc.right.Equal(tc.left) { + t.Fatalf("expected %#v not to be equal to %#v", tc.right, tc.left) + } + }) + } +} + +func TestParseAbsOutputValueStr(t *testing.T) { + tests := map[string]struct { + want AbsOutputValue + wantErr string + }{ + "module.foo": { + wantErr: "An output name is required", + }, + "module.foo.output": { + wantErr: "An output name is required", + }, + "module.foo.boop.beep": { + wantErr: "Output address must start with \"output.\"", + }, + "module.foo.output[0]": { + wantErr: "An output name is required", + }, + "output": { + wantErr: "An output name is required", + }, + "output[0]": { + wantErr: "An output name is required", + }, + "output.boop": { + want: AbsOutputValue{ + Module: RootModuleInstance, + OutputValue: OutputValue{ + Name: "boop", + }, + }, + }, + "module.foo.output.beep": { + want: AbsOutputValue{ + Module: mustParseModuleInstanceStr("module.foo"), + OutputValue: OutputValue{ + Name: "beep", + }, + }, + }, + } + + for input, tc := range tests { + t.Run(input, func(t *testing.T) { + got, diags := ParseAbsOutputValueStr(input) + for _, problem := range deep.Equal(got, tc.want) { + t.Errorf(problem) + } + if len(diags) > 0 { + gotErr := diags.Err().Error() + if tc.wantErr == "" { + t.Errorf("got error, expected success: %s", gotErr) + } else if !strings.Contains(gotErr, tc.wantErr) { + t.Errorf("unexpected error\n got: %s\nwant: %s", gotErr, tc.wantErr) + } + } else { + if tc.wantErr != "" { + t.Errorf("got success, expected error: %s", tc.wantErr) + } + } + }) + } +} diff --git a/pkg/addrs/parse_ref.go b/pkg/addrs/parse_ref.go new file mode 100644 index 00000000000..4f7dd4e1c19 --- /dev/null +++ b/pkg/addrs/parse_ref.go @@ -0,0 +1,503 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +import ( + "fmt" + "strings" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// Reference describes a reference to an address with source location +// information. +type Reference struct { + Subject Referenceable + SourceRange tfdiags.SourceRange + Remaining hcl.Traversal +} + +// DisplayString returns a string that approximates the subject and remaining +// traversal of the reciever in a way that resembles the OpenTofu language +// syntax that could've produced it. +// +// It's not guaranteed to actually be a valid OpenTofu language expression, +// since the intended use here is primarily for UI messages such as +// diagnostics. +func (r *Reference) DisplayString() string { + if len(r.Remaining) == 0 { + // Easy case: we can just return the subject's string. + return r.Subject.String() + } + + var ret strings.Builder + ret.WriteString(r.Subject.String()) + for _, step := range r.Remaining { + switch tStep := step.(type) { + case hcl.TraverseRoot: + ret.WriteString(tStep.Name) + case hcl.TraverseAttr: + ret.WriteByte('.') + ret.WriteString(tStep.Name) + case hcl.TraverseIndex: + ret.WriteByte('[') + switch tStep.Key.Type() { + case cty.String: + ret.WriteString(fmt.Sprintf("%q", tStep.Key.AsString())) + case cty.Number: + bf := tStep.Key.AsBigFloat() + ret.WriteString(bf.Text('g', 10)) + } + ret.WriteByte(']') + } + } + return ret.String() +} + +// ParseRef attempts to extract a referencable address from the prefix of the +// given traversal, which must be an absolute traversal or this function +// will panic. +// +// If no error diagnostics are returned, the returned reference includes the +// address that was extracted, the source range it was extracted from, and any +// remaining relative traversal that was not consumed as part of the +// reference. +// +// If error diagnostics are returned then the Reference value is invalid and +// must not be used. +func ParseRef(traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) { + ref, diags := parseRef(traversal) + + // Normalize a little to make life easier for callers. + if ref != nil { + if len(ref.Remaining) == 0 { + ref.Remaining = nil + } + } + + return ref, diags +} + +// ParseRefFromTestingScope adds check blocks and outputs into the available +// references returned by ParseRef. +// +// The testing files and functionality have a slightly expanded referencing +// scope and so should use this function to retrieve references. +func ParseRefFromTestingScope(traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) { + root := traversal.RootName() + + var diags tfdiags.Diagnostics + var reference *Reference + + switch root { + case "output": + name, rng, remain, outputDiags := parseSingleAttrRef(traversal) + reference = &Reference{ + Subject: OutputValue{Name: name}, + SourceRange: tfdiags.SourceRangeFromHCL(rng), + Remaining: remain, + } + diags = outputDiags + case "check": + name, rng, remain, checkDiags := parseSingleAttrRef(traversal) + reference = &Reference{ + Subject: Check{Name: name}, + SourceRange: tfdiags.SourceRangeFromHCL(rng), + Remaining: remain, + } + diags = checkDiags + } + + if reference != nil { + if len(reference.Remaining) == 0 { + reference.Remaining = nil + } + return reference, diags + } + + // If it's not an output or a check block, then just parse it as normal. + return ParseRef(traversal) +} + +// ParseRefStr is a helper wrapper around ParseRef that takes a string +// and parses it with the HCL native syntax traversal parser before +// interpreting it. +// +// This should be used only in specialized situations since it will cause the +// created references to not have any meaningful source location information. +// If a reference string is coming from a source that should be identified in +// error messages then the caller should instead parse it directly using a +// suitable function from the HCL API and pass the traversal itself to +// ParseRef. +// +// Error diagnostics are returned if either the parsing fails or the analysis +// of the traversal fails. There is no way for the caller to distinguish the +// two kinds of diagnostics programmatically. If error diagnostics are returned +// the returned reference may be nil or incomplete. +func ParseRefStr(str string) (*Reference, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(parseDiags) + if parseDiags.HasErrors() { + return nil, diags + } + + ref, targetDiags := ParseRef(traversal) + diags = diags.Append(targetDiags) + return ref, diags +} + +// ParseRefStrFromTestingScope matches ParseRefStr except it supports the +// references supported by ParseRefFromTestingScope. +func ParseRefStrFromTestingScope(str string) (*Reference, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(parseDiags) + if parseDiags.HasErrors() { + return nil, diags + } + + ref, targetDiags := ParseRefFromTestingScope(traversal) + diags = diags.Append(targetDiags) + return ref, diags +} + +func parseRef(traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + root := traversal.RootName() + rootRange := traversal[0].SourceRange() + + switch root { + + case "count": + name, rng, remain, diags := parseSingleAttrRef(traversal) + return &Reference{ + Subject: CountAttr{Name: name}, + SourceRange: tfdiags.SourceRangeFromHCL(rng), + Remaining: remain, + }, diags + + case "each": + name, rng, remain, diags := parseSingleAttrRef(traversal) + return &Reference{ + Subject: ForEachAttr{Name: name}, + SourceRange: tfdiags.SourceRangeFromHCL(rng), + Remaining: remain, + }, diags + + case "data": + if len(traversal) < 3 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid reference", + Detail: `The "data" object must be followed by two attribute names: the data source type and the resource name.`, + Subject: traversal.SourceRange().Ptr(), + }) + return nil, diags + } + remain := traversal[1:] // trim off "data" so we can use our shared resource reference parser + return parseResourceRef(DataResourceMode, rootRange, remain) + + case "resource": + // This is an alias for the normal case of just using a managed resource + // type as a top-level symbol, which will serve as an escape mechanism + // if a later edition of the OpenTofu language introduces a new + // reference prefix that conflicts with a resource type name in an + // existing provider. In that case, the edition upgrade tool can + // rewrite foo.bar into resource.foo.bar to ensure that "foo" remains + // interpreted as a resource type name rather than as the new reserved + // word. + if len(traversal) < 3 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid reference", + Detail: `The "resource" object must be followed by two attribute names: the resource type and the resource name.`, + Subject: traversal.SourceRange().Ptr(), + }) + return nil, diags + } + remain := traversal[1:] // trim off "resource" so we can use our shared resource reference parser + return parseResourceRef(ManagedResourceMode, rootRange, remain) + + case "local": + name, rng, remain, diags := parseSingleAttrRef(traversal) + return &Reference{ + Subject: LocalValue{Name: name}, + SourceRange: tfdiags.SourceRangeFromHCL(rng), + Remaining: remain, + }, diags + + case "module": + callName, callRange, remain, diags := parseSingleAttrRef(traversal) + if diags.HasErrors() { + return nil, diags + } + + // A traversal starting with "module" can either be a reference to an + // entire module, or to a single output from a module instance, + // depending on what we find after this introducer. + callInstance := ModuleCallInstance{ + Call: ModuleCall{ + Name: callName, + }, + Key: NoKey, + } + + if len(remain) == 0 { + // Reference to an entire module. Might alternatively be a + // reference to a single instance of a particular module, but the + // caller will need to deal with that ambiguity since we don't have + // enough context here. + return &Reference{ + Subject: callInstance.Call, + SourceRange: tfdiags.SourceRangeFromHCL(callRange), + Remaining: remain, + }, diags + } + + if idxTrav, ok := remain[0].(hcl.TraverseIndex); ok { + var err error + callInstance.Key, err = ParseInstanceKey(idxTrav.Key) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid index key", + Detail: fmt.Sprintf("Invalid index for module instance: %s.", err), + Subject: &idxTrav.SrcRange, + }) + return nil, diags + } + remain = remain[1:] + + if len(remain) == 0 { + // Also a reference to an entire module instance, but we have a key + // now. + return &Reference{ + Subject: callInstance, + SourceRange: tfdiags.SourceRangeFromHCL(hcl.RangeBetween(callRange, idxTrav.SrcRange)), + Remaining: remain, + }, diags + } + } + + if attrTrav, ok := remain[0].(hcl.TraverseAttr); ok { + remain = remain[1:] + return &Reference{ + Subject: ModuleCallInstanceOutput{ + Name: attrTrav.Name, + Call: callInstance, + }, + SourceRange: tfdiags.SourceRangeFromHCL(hcl.RangeBetween(callRange, attrTrav.SrcRange)), + Remaining: remain, + }, diags + } + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid reference", + Detail: "Module instance objects do not support this operation.", + Subject: remain[0].SourceRange().Ptr(), + }) + return nil, diags + + case "path": + name, rng, remain, diags := parseSingleAttrRef(traversal) + return &Reference{ + Subject: PathAttr{Name: name}, + SourceRange: tfdiags.SourceRangeFromHCL(rng), + Remaining: remain, + }, diags + + case "self": + return &Reference{ + Subject: Self, + SourceRange: tfdiags.SourceRangeFromHCL(rootRange), + Remaining: traversal[1:], + }, diags + + case "terraform": + name, rng, remain, diags := parseSingleAttrRef(traversal) + return &Reference{ + Subject: NewTerraformAttr(IdentTerraform, name), + SourceRange: tfdiags.SourceRangeFromHCL(rng), + Remaining: remain, + }, diags + + case "tofu": + name, rng, remain, parsedDiags := parseSingleAttrRef(traversal) + return &Reference{ + Subject: NewTerraformAttr(IdentTofu, name), + SourceRange: tfdiags.SourceRangeFromHCL(rng), + Remaining: remain, + }, parsedDiags + + case "var": + name, rng, remain, diags := parseSingleAttrRef(traversal) + return &Reference{ + Subject: InputVariable{Name: name}, + SourceRange: tfdiags.SourceRangeFromHCL(rng), + Remaining: remain, + }, diags + case "template", "lazy", "arg": + // These names are all pre-emptively reserved in the hope of landing + // some version of "template values" or "lazy expressions" feature + // before the next opt-in language edition, but don't yet do anything. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved symbol name", + Detail: fmt.Sprintf("The symbol name %q is reserved for use in a future OpenTofu version. If you are using a provider that already uses this as a resource type name, add the prefix \"resource.\" to force interpretation as a resource type name.", root), + Subject: rootRange.Ptr(), + }) + return nil, diags + + default: + function := ParseFunction(root) + if function.IsNamespace(FunctionNamespaceProvider) { + pf, err := function.AsProviderFunction() + if err != nil { + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unable to parse provider function", + Detail: err.Error(), + Subject: rootRange.Ptr(), + }) + } + return &Reference{ + Subject: pf, + SourceRange: tfdiags.SourceRangeFromHCL(rootRange), + }, diags + } + return parseResourceRef(ManagedResourceMode, rootRange, traversal) + } +} + +func parseResourceRef(mode ResourceMode, startRange hcl.Range, traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + if len(traversal) < 2 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid reference", + Detail: `A reference to a resource type must be followed by at least one attribute access, specifying the resource name.`, + Subject: hcl.RangeBetween(traversal[0].SourceRange(), traversal[len(traversal)-1].SourceRange()).Ptr(), + }) + return nil, diags + } + + var typeName, name string + switch tt := traversal[0].(type) { // Could be either root or attr, depending on our resource mode + case hcl.TraverseRoot: + typeName = tt.Name + case hcl.TraverseAttr: + typeName = tt.Name + default: + // If it isn't a TraverseRoot then it must be a "data" reference. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid reference", + Detail: `The "data" object does not support this operation.`, + Subject: traversal[0].SourceRange().Ptr(), + }) + return nil, diags + } + + attrTrav, ok := traversal[1].(hcl.TraverseAttr) + if !ok { + var what string + switch mode { + case DataResourceMode: + what = "data source" + default: + what = "resource type" + } + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid reference", + Detail: fmt.Sprintf(`A reference to a %s must be followed by at least one attribute access, specifying the resource name.`, what), + Subject: traversal[1].SourceRange().Ptr(), + }) + return nil, diags + } + name = attrTrav.Name + rng := hcl.RangeBetween(startRange, attrTrav.SrcRange) + remain := traversal[2:] + + resourceAddr := Resource{ + Mode: mode, + Type: typeName, + Name: name, + } + resourceInstAddr := ResourceInstance{ + Resource: resourceAddr, + Key: NoKey, + } + + if len(remain) == 0 { + // This might actually be a reference to the collection of all instances + // of the resource, but we don't have enough context here to decide + // so we'll let the caller resolve that ambiguity. + return &Reference{ + Subject: resourceAddr, + SourceRange: tfdiags.SourceRangeFromHCL(rng), + }, diags + } + + if idxTrav, ok := remain[0].(hcl.TraverseIndex); ok { + var err error + resourceInstAddr.Key, err = ParseInstanceKey(idxTrav.Key) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid index key", + Detail: fmt.Sprintf("Invalid index for resource instance: %s.", err), + Subject: &idxTrav.SrcRange, + }) + return nil, diags + } + remain = remain[1:] + rng = hcl.RangeBetween(rng, idxTrav.SrcRange) + } + + return &Reference{ + Subject: resourceInstAddr, + SourceRange: tfdiags.SourceRangeFromHCL(rng), + Remaining: remain, + }, diags +} + +func parseSingleAttrRef(traversal hcl.Traversal) (string, hcl.Range, hcl.Traversal, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + root := traversal.RootName() + rootRange := traversal[0].SourceRange() + + if len(traversal) < 2 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid reference", + Detail: fmt.Sprintf("The %q object cannot be accessed directly. Instead, access one of its attributes.", root), + Subject: &rootRange, + }) + return "", hcl.Range{}, nil, diags + } + if attrTrav, ok := traversal[1].(hcl.TraverseAttr); ok { + return attrTrav.Name, hcl.RangeBetween(rootRange, attrTrav.SrcRange), traversal[2:], diags + } + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid reference", + Detail: fmt.Sprintf("The %q object does not support this operation.", root), + Subject: traversal[1].SourceRange().Ptr(), + }) + return "", hcl.Range{}, nil, diags +} diff --git a/pkg/addrs/parse_ref_test.go b/pkg/addrs/parse_ref_test.go new file mode 100644 index 00000000000..2ee88ceac9e --- /dev/null +++ b/pkg/addrs/parse_ref_test.go @@ -0,0 +1,938 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +import ( + "testing" + + "github.com/go-test/deep" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +func TestParseRefInTestingScope(t *testing.T) { + tests := []struct { + Input string + Want *Reference + WantErr string + }{ + { + `output.value`, + &Reference{ + Subject: OutputValue{ + Name: "value", + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 13, Byte: 12}, + }, + }, + ``, + }, + { + `output`, + nil, + `The "output" object cannot be accessed directly. Instead, access one of its attributes.`, + }, + { + `output["foo"]`, + nil, + `The "output" object does not support this operation.`, + }, + + { + `check.health`, + &Reference{ + Subject: Check{ + Name: "health", + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 13, Byte: 12}, + }, + }, + ``, + }, + { + `check`, + nil, + `The "check" object cannot be accessed directly. Instead, access one of its attributes.`, + }, + { + `check["foo"]`, + nil, + `The "check" object does not support this operation.`, + }, + + // Sanity check at least one of the others works to verify it does + // fall through to the core function. + { + `count.index`, + &Reference{ + Subject: CountAttr{ + Name: "index", + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 12, Byte: 11}, + }, + }, + ``, + }, + } + for _, test := range tests { + t.Run(test.Input, func(t *testing.T) { + traversal, travDiags := hclsyntax.ParseTraversalAbs([]byte(test.Input), "", hcl.Pos{Line: 1, Column: 1}) + if travDiags.HasErrors() { + t.Fatal(travDiags.Error()) + } + + got, diags := ParseRefFromTestingScope(traversal) + + switch len(diags) { + case 0: + if test.WantErr != "" { + t.Fatalf("succeeded; want error: %s", test.WantErr) + } + case 1: + if test.WantErr == "" { + t.Fatalf("unexpected diagnostics: %s", diags.Err()) + } + if got, want := diags[0].Description().Detail, test.WantErr; got != want { + t.Fatalf("wrong error\ngot: %s\nwant: %s", got, want) + } + default: + t.Fatalf("too many diagnostics: %s", diags.Err()) + } + + if diags.HasErrors() { + return + } + + for _, problem := range deep.Equal(got, test.Want) { + t.Errorf(problem) + } + }) + } +} + +func TestParseRef(t *testing.T) { + tests := []struct { + Input string + Want *Reference + WantErr string + }{ + + // count + { + `count.index`, + &Reference{ + Subject: CountAttr{ + Name: "index", + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 12, Byte: 11}, + }, + }, + ``, + }, + { + `count.index.blah`, + &Reference{ + Subject: CountAttr{ + Name: "index", + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 12, Byte: 11}, + }, + Remaining: hcl.Traversal{ + hcl.TraverseAttr{ + Name: "blah", + SrcRange: hcl.Range{ + Start: hcl.Pos{Line: 1, Column: 12, Byte: 11}, + End: hcl.Pos{Line: 1, Column: 17, Byte: 16}, + }, + }, + }, + }, + ``, // valid at this layer, but will fail during eval because "index" is a number + }, + { + `count`, + nil, + `The "count" object cannot be accessed directly. Instead, access one of its attributes.`, + }, + { + `count["hello"]`, + nil, + `The "count" object does not support this operation.`, + }, + + // each + { + `each.key`, + &Reference{ + Subject: ForEachAttr{ + Name: "key", + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 9, Byte: 8}, + }, + }, + ``, + }, + { + `each.value.blah`, + &Reference{ + Subject: ForEachAttr{ + Name: "value", + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 11, Byte: 10}, + }, + Remaining: hcl.Traversal{ + hcl.TraverseAttr{ + Name: "blah", + SrcRange: hcl.Range{ + Start: hcl.Pos{Line: 1, Column: 11, Byte: 10}, + End: hcl.Pos{Line: 1, Column: 16, Byte: 15}, + }, + }, + }, + }, + ``, + }, + { + `each`, + nil, + `The "each" object cannot be accessed directly. Instead, access one of its attributes.`, + }, + { + `each["hello"]`, + nil, + `The "each" object does not support this operation.`, + }, + // data + { + `data.external.foo`, + &Reference{ + Subject: Resource{ + Mode: DataResourceMode, + Type: "external", + Name: "foo", + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 18, Byte: 17}, + }, + }, + ``, + }, + { + `data.external.foo.bar`, + &Reference{ + Subject: ResourceInstance{ + Resource: Resource{ + Mode: DataResourceMode, + Type: "external", + Name: "foo", + }, + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 18, Byte: 17}, + }, + Remaining: hcl.Traversal{ + hcl.TraverseAttr{ + Name: "bar", + SrcRange: hcl.Range{ + Start: hcl.Pos{Line: 1, Column: 18, Byte: 17}, + End: hcl.Pos{Line: 1, Column: 22, Byte: 21}, + }, + }, + }, + }, + ``, + }, + { + `data.external.foo["baz"].bar`, + &Reference{ + Subject: ResourceInstance{ + Resource: Resource{ + Mode: DataResourceMode, + Type: "external", + Name: "foo", + }, + Key: StringKey("baz"), + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 25, Byte: 24}, + }, + Remaining: hcl.Traversal{ + hcl.TraverseAttr{ + Name: "bar", + SrcRange: hcl.Range{ + Start: hcl.Pos{Line: 1, Column: 25, Byte: 24}, + End: hcl.Pos{Line: 1, Column: 29, Byte: 28}, + }, + }, + }, + }, + ``, + }, + { + `data.external.foo["baz"]`, + &Reference{ + Subject: ResourceInstance{ + Resource: Resource{ + Mode: DataResourceMode, + Type: "external", + Name: "foo", + }, + Key: StringKey("baz"), + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 25, Byte: 24}, + }, + }, + ``, + }, + { + `data`, + nil, + `The "data" object must be followed by two attribute names: the data source type and the resource name.`, + }, + { + `data.external`, + nil, + `The "data" object must be followed by two attribute names: the data source type and the resource name.`, + }, + + // local + { + `local.foo`, + &Reference{ + Subject: LocalValue{ + Name: "foo", + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 10, Byte: 9}, + }, + }, + ``, + }, + { + `local.foo.blah`, + &Reference{ + Subject: LocalValue{ + Name: "foo", + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 10, Byte: 9}, + }, + Remaining: hcl.Traversal{ + hcl.TraverseAttr{ + Name: "blah", + SrcRange: hcl.Range{ + Start: hcl.Pos{Line: 1, Column: 10, Byte: 9}, + End: hcl.Pos{Line: 1, Column: 15, Byte: 14}, + }, + }, + }, + }, + ``, + }, + { + `local.foo["blah"]`, + &Reference{ + Subject: LocalValue{ + Name: "foo", + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 10, Byte: 9}, + }, + Remaining: hcl.Traversal{ + hcl.TraverseIndex{ + Key: cty.StringVal("blah"), + SrcRange: hcl.Range{ + Start: hcl.Pos{Line: 1, Column: 10, Byte: 9}, + End: hcl.Pos{Line: 1, Column: 18, Byte: 17}, + }, + }, + }, + }, + ``, + }, + { + `local`, + nil, + `The "local" object cannot be accessed directly. Instead, access one of its attributes.`, + }, + { + `local["foo"]`, + nil, + `The "local" object does not support this operation.`, + }, + + // module + { + `module.foo`, + &Reference{ + Subject: ModuleCall{ + Name: "foo", + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 11, Byte: 10}, + }, + }, + ``, + }, + { + `module.foo.bar`, + &Reference{ + Subject: ModuleCallInstanceOutput{ + Call: ModuleCallInstance{ + Call: ModuleCall{ + Name: "foo", + }, + }, + Name: "bar", + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 15, Byte: 14}, + }, + }, + ``, + }, + { + `module.foo.bar.baz`, + &Reference{ + Subject: ModuleCallInstanceOutput{ + Call: ModuleCallInstance{ + Call: ModuleCall{ + Name: "foo", + }, + }, + Name: "bar", + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 15, Byte: 14}, + }, + Remaining: hcl.Traversal{ + hcl.TraverseAttr{ + Name: "baz", + SrcRange: hcl.Range{ + Start: hcl.Pos{Line: 1, Column: 15, Byte: 14}, + End: hcl.Pos{Line: 1, Column: 19, Byte: 18}, + }, + }, + }, + }, + ``, + }, + { + `module.foo["baz"]`, + &Reference{ + Subject: ModuleCallInstance{ + Call: ModuleCall{ + Name: "foo", + }, + Key: StringKey("baz"), + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 18, Byte: 17}, + }, + }, + ``, + }, + { + `module.foo["baz"].bar`, + &Reference{ + Subject: ModuleCallInstanceOutput{ + Call: ModuleCallInstance{ + Call: ModuleCall{ + Name: "foo", + }, + Key: StringKey("baz"), + }, + Name: "bar", + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 22, Byte: 21}, + }, + }, + ``, + }, + { + `module.foo["baz"].bar.boop`, + &Reference{ + Subject: ModuleCallInstanceOutput{ + Call: ModuleCallInstance{ + Call: ModuleCall{ + Name: "foo", + }, + Key: StringKey("baz"), + }, + Name: "bar", + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 22, Byte: 21}, + }, + Remaining: hcl.Traversal{ + hcl.TraverseAttr{ + Name: "boop", + SrcRange: hcl.Range{ + Start: hcl.Pos{Line: 1, Column: 22, Byte: 21}, + End: hcl.Pos{Line: 1, Column: 27, Byte: 26}, + }, + }, + }, + }, + ``, + }, + { + `module`, + nil, + `The "module" object cannot be accessed directly. Instead, access one of its attributes.`, + }, + { + `module["foo"]`, + nil, + `The "module" object does not support this operation.`, + }, + + // path + { + `path.module`, + &Reference{ + Subject: PathAttr{ + Name: "module", + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 12, Byte: 11}, + }, + }, + ``, + }, + { + `path.module.blah`, + &Reference{ + Subject: PathAttr{ + Name: "module", + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 12, Byte: 11}, + }, + Remaining: hcl.Traversal{ + hcl.TraverseAttr{ + Name: "blah", + SrcRange: hcl.Range{ + Start: hcl.Pos{Line: 1, Column: 12, Byte: 11}, + End: hcl.Pos{Line: 1, Column: 17, Byte: 16}, + }, + }, + }, + }, + ``, // valid at this layer, but will fail during eval because "module" is a string + }, + { + `path`, + nil, + `The "path" object cannot be accessed directly. Instead, access one of its attributes.`, + }, + { + `path["module"]`, + nil, + `The "path" object does not support this operation.`, + }, + + // self + { + `self`, + &Reference{ + Subject: Self, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 5, Byte: 4}, + }, + }, + ``, + }, + { + `self.blah`, + &Reference{ + Subject: Self, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 5, Byte: 4}, + }, + Remaining: hcl.Traversal{ + hcl.TraverseAttr{ + Name: "blah", + SrcRange: hcl.Range{ + Start: hcl.Pos{Line: 1, Column: 5, Byte: 4}, + End: hcl.Pos{Line: 1, Column: 10, Byte: 9}, + }, + }, + }, + }, + ``, + }, + + // terraform + { + `terraform.workspace`, + &Reference{ + Subject: NewTerraformAttr("terraform", "workspace"), + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 20, Byte: 19}, + }, + }, + ``, + }, + { + `terraform.workspace.blah`, + &Reference{ + Subject: NewTerraformAttr("terraform", "workspace"), + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 20, Byte: 19}, + }, + Remaining: hcl.Traversal{ + hcl.TraverseAttr{ + Name: "blah", + SrcRange: hcl.Range{ + Start: hcl.Pos{Line: 1, Column: 20, Byte: 19}, + End: hcl.Pos{Line: 1, Column: 25, Byte: 24}, + }, + }, + }, + }, + ``, // valid at this layer, but will fail during eval because "workspace" is a string + }, + { + `terraform`, + nil, + `The "terraform" object cannot be accessed directly. Instead, access one of its attributes.`, + }, + { + `terraform["workspace"]`, + nil, + `The "terraform" object does not support this operation.`, + }, + + // tofu + { + `tofu.workspace`, + &Reference{ + Subject: NewTerraformAttr("tofu", "workspace"), + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 15, Byte: 14}, + }, + }, + ``, + }, + { + `tofu.workspace.blah`, + &Reference{ + Subject: NewTerraformAttr("tofu", "workspace"), + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 15, Byte: 14}, + }, + Remaining: hcl.Traversal{ + hcl.TraverseAttr{ + Name: "blah", + SrcRange: hcl.Range{ + Start: hcl.Pos{Line: 1, Column: 15, Byte: 14}, + End: hcl.Pos{Line: 1, Column: 20, Byte: 19}, + }, + }, + }, + }, + ``, // valid at this layer, but will fail during eval because "workspace" is a string + }, + { + `tofu`, + nil, + `The "tofu" object cannot be accessed directly. Instead, access one of its attributes.`, + }, + { + `tofu["workspace"]`, + nil, + `The "tofu" object does not support this operation.`, + }, + + // var + { + `var.foo`, + &Reference{ + Subject: InputVariable{ + Name: "foo", + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 8, Byte: 7}, + }, + }, + ``, + }, + { + `var.foo.blah`, + &Reference{ + Subject: InputVariable{ + Name: "foo", + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 8, Byte: 7}, + }, + Remaining: hcl.Traversal{ + hcl.TraverseAttr{ + Name: "blah", + SrcRange: hcl.Range{ + Start: hcl.Pos{Line: 1, Column: 8, Byte: 7}, + End: hcl.Pos{Line: 1, Column: 13, Byte: 12}, + }, + }, + }, + }, + ``, // valid at this layer, but will fail during eval because "module" is a string + }, + { + `var`, + nil, + `The "var" object cannot be accessed directly. Instead, access one of its attributes.`, + }, + { + `var["foo"]`, + nil, + `The "var" object does not support this operation.`, + }, + + // the "resource" prefix forces interpreting the next name as a + // resource type name. This is an alias for just using a resource + // type name at the top level, to be used only if a later edition + // of the OpenTofu language introduces a new reserved word that + // overlaps with a resource type name. + { + `resource.boop_instance.foo`, + &Reference{ + Subject: Resource{ + Mode: ManagedResourceMode, + Type: "boop_instance", + Name: "foo", + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 27, Byte: 26}, + }, + }, + ``, + }, + + // We have some names reserved which might be used by a + // still-under-discussion proposal for template values or lazy + // expressions. + { + `template.foo`, + nil, + `The symbol name "template" is reserved for use in a future OpenTofu version. If you are using a provider that already uses this as a resource type name, add the prefix "resource." to force interpretation as a resource type name.`, + }, + { + `lazy.foo`, + nil, + `The symbol name "lazy" is reserved for use in a future OpenTofu version. If you are using a provider that already uses this as a resource type name, add the prefix "resource." to force interpretation as a resource type name.`, + }, + { + `arg.foo`, + nil, + `The symbol name "arg" is reserved for use in a future OpenTofu version. If you are using a provider that already uses this as a resource type name, add the prefix "resource." to force interpretation as a resource type name.`, + }, + + // anything else, interpreted as a managed resource reference + { + `boop_instance.foo`, + &Reference{ + Subject: Resource{ + Mode: ManagedResourceMode, + Type: "boop_instance", + Name: "foo", + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 18, Byte: 17}, + }, + }, + ``, + }, + { + `boop_instance.foo.bar`, + &Reference{ + Subject: ResourceInstance{ + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "boop_instance", + Name: "foo", + }, + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 18, Byte: 17}, + }, + Remaining: hcl.Traversal{ + hcl.TraverseAttr{ + Name: "bar", + SrcRange: hcl.Range{ + Start: hcl.Pos{Line: 1, Column: 18, Byte: 17}, + End: hcl.Pos{Line: 1, Column: 22, Byte: 21}, + }, + }, + }, + }, + ``, + }, + { + `boop_instance.foo["baz"].bar`, + &Reference{ + Subject: ResourceInstance{ + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "boop_instance", + Name: "foo", + }, + Key: StringKey("baz"), + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 25, Byte: 24}, + }, + Remaining: hcl.Traversal{ + hcl.TraverseAttr{ + Name: "bar", + SrcRange: hcl.Range{ + Start: hcl.Pos{Line: 1, Column: 25, Byte: 24}, + End: hcl.Pos{Line: 1, Column: 29, Byte: 28}, + }, + }, + }, + }, + ``, + }, + { + `boop_instance.foo["baz"]`, + &Reference{ + Subject: ResourceInstance{ + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "boop_instance", + Name: "foo", + }, + Key: StringKey("baz"), + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 25, Byte: 24}, + }, + }, + ``, + }, + { + `boop_instance`, + nil, + `A reference to a resource type must be followed by at least one attribute access, specifying the resource name.`, + }, + + // Should interpret checks and outputs as resource types. + { + `output.value`, + &Reference{ + Subject: Resource{ + Mode: ManagedResourceMode, + Type: "output", + Name: "value", + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 13, Byte: 12}, + }, + }, + ``, + }, + { + `check.health`, + &Reference{ + Subject: Resource{ + Mode: ManagedResourceMode, + Type: "check", + Name: "health", + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 13, Byte: 12}, + }, + }, + ``, + }, + } + + for _, test := range tests { + t.Run(test.Input, func(t *testing.T) { + traversal, travDiags := hclsyntax.ParseTraversalAbs([]byte(test.Input), "", hcl.Pos{Line: 1, Column: 1}) + if travDiags.HasErrors() { + t.Fatal(travDiags.Error()) + } + + got, diags := ParseRef(traversal) + + switch len(diags) { + case 0: + if test.WantErr != "" { + t.Fatalf("succeeded; want error: %s", test.WantErr) + } + case 1: + if test.WantErr == "" { + t.Fatalf("unexpected diagnostics: %s", diags.Err()) + } + if got, want := diags[0].Description().Detail, test.WantErr; got != want { + t.Fatalf("wrong error\ngot: %s\nwant: %s", got, want) + } + default: + t.Fatalf("too many diagnostics: %s", diags.Err()) + } + + if diags.HasErrors() { + return + } + + for _, problem := range deep.Equal(got, test.Want) { + t.Errorf(problem) + } + }) + } +} diff --git a/pkg/addrs/parse_target.go b/pkg/addrs/parse_target.go new file mode 100644 index 00000000000..70903af26d0 --- /dev/null +++ b/pkg/addrs/parse_target.go @@ -0,0 +1,431 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2/hclsyntax" + + "github.com/hashicorp/hcl/v2" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// Target describes a targeted address with source location information. +type Target struct { + Subject Targetable + SourceRange tfdiags.SourceRange +} + +// ParseTarget attempts to interpret the given traversal as a targetable +// address. The given traversal must be absolute, or this function will +// panic. +// +// If no error diagnostics are returned, the returned target includes the +// address that was extracted and the source range it was extracted from. +// +// If error diagnostics are returned then the Target value is invalid and +// must not be used. +func ParseTarget(traversal hcl.Traversal) (*Target, tfdiags.Diagnostics) { + path, remain, diags := parseModuleInstancePrefix(traversal) + if diags.HasErrors() { + return nil, diags + } + + rng := tfdiags.SourceRangeFromHCL(traversal.SourceRange()) + + if len(remain) == 0 { + return &Target{ + Subject: path, + SourceRange: rng, + }, diags + } + + riAddr, moreDiags := parseResourceInstanceUnderModule(path, remain) + diags = diags.Append(moreDiags) + if diags.HasErrors() { + return nil, diags + } + + var subject Targetable + switch { + case riAddr.Resource.Key == NoKey: + // We always assume that a no-key instance is meant to + // be referring to the whole resource, because the distinction + // doesn't really matter for targets anyway. + subject = riAddr.ContainingResource() + default: + subject = riAddr + } + + return &Target{ + Subject: subject, + SourceRange: rng, + }, diags +} + +func parseResourceInstanceUnderModule(moduleAddr ModuleInstance, remain hcl.Traversal) (AbsResourceInstance, tfdiags.Diagnostics) { + // Note that this helper is used as part of multiple public functions + // so its error messages should be generic enough to suit all the situations. + + var diags tfdiags.Diagnostics + + mode := ManagedResourceMode + if remain.RootName() == "data" { + mode = DataResourceMode + remain = remain[1:] + } + + typeName, name, diags := parseResourceTypeAndName(remain, mode) + if diags.HasErrors() { + return AbsResourceInstance{}, diags + } + + remain = remain[2:] + switch len(remain) { + case 0: + return moduleAddr.ResourceInstance(mode, typeName, name, NoKey), diags + case 1: + if tt, ok := remain[0].(hcl.TraverseIndex); ok { + key, err := ParseInstanceKey(tt.Key) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: fmt.Sprintf("Invalid resource instance key: %s.", err), + Subject: remain[0].SourceRange().Ptr(), + }) + return AbsResourceInstance{}, diags + } + + return moduleAddr.ResourceInstance(mode, typeName, name, key), diags + } else { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "Resource instance key must be given in square brackets.", + Subject: remain[0].SourceRange().Ptr(), + }) + return AbsResourceInstance{}, diags + } + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "Unexpected extra operators after address.", + Subject: remain[1].SourceRange().Ptr(), + }) + return AbsResourceInstance{}, diags + } +} + +// parseResourceUnderModule is a helper function that parses a traversal, which +// is an address (or a part of an address) that describes a resource (e.g. +// ["null_resource," "boop"] or ["data", "null_data_source," "bip"]), under a +// module. It returns the ConfigResource that represents the resource address. +// It does not support addresses of resources with instance keys, and will +// return an error if it encounters one (unlike +// parseResourceInstanceUnderModule). +// This function does not expect to encounter a module prefix in the traversal, +// as it should be processed by parseModulePrefix first. +func parseResourceUnderModule(moduleAddr Module, remain hcl.Traversal) (ConfigResource, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + mode := ManagedResourceMode + if remain.RootName() == "data" { + mode = DataResourceMode + remain = remain[1:] + } + + typeName, name, diags := parseResourceTypeAndName(remain, mode) + if diags.HasErrors() { + return ConfigResource{}, diags + } + + remain = remain[2:] + switch len(remain) { + case 0: + return moduleAddr.Resource(mode, typeName, name), diags + case 1: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Resource instance address with keys is not allowed", + Detail: "Resource address cannot be a resource instance (e.g. \"null_resource.a[0]\"), it must be a resource instead (e.g. \"null_resource.a\").", + Subject: remain[0].SourceRange().Ptr(), + }) + return ConfigResource{}, diags + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "Unexpected extra operators after address.", + Subject: remain[1].SourceRange().Ptr(), + }) + return ConfigResource{}, diags + } +} + +// parseResourceTypeAndName is a helper function that parses a traversal, which +// is an address (or a part of an address) that describes a resource (e.g. +// ["null_resource," "boop"]) and returns its type and name. +// It is used in parseResourceUnderModule and parseResourceInstanceUnderModule, +// and does not expect to encounter a module prefix in the traversal. +func parseResourceTypeAndName(remain hcl.Traversal, mode ResourceMode) (typeName, name string, diags tfdiags.Diagnostics) { + if len(remain) < 2 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "Resource specification must include a resource type and name.", + Subject: remain.SourceRange().Ptr(), + }) + return typeName, name, diags + } + + switch tt := remain[0].(type) { + case hcl.TraverseRoot: + typeName = tt.Name + case hcl.TraverseAttr: + typeName = tt.Name + default: + switch mode { + case ManagedResourceMode: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "A resource type name is required.", + Subject: remain[0].SourceRange().Ptr(), + }) + case DataResourceMode: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "A data source name is required.", + Subject: remain[0].SourceRange().Ptr(), + }) + default: + panic("unknown mode") + } + return typeName, name, diags + } + + switch tt := remain[1].(type) { + case hcl.TraverseAttr: + name = tt.Name + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "A resource name is required.", + Subject: remain[1].SourceRange().Ptr(), + }) + return typeName, name, diags + } + + return typeName, name, diags +} + +// ParseTargetStr is a helper wrapper around ParseTarget that takes a string +// and parses it with the HCL native syntax traversal parser before +// interpreting it. +// +// This should be used only in specialized situations since it will cause the +// created references to not have any meaningful source location information. +// If a target string is coming from a source that should be identified in +// error messages then the caller should instead parse it directly using a +// suitable function from the HCL API and pass the traversal itself to +// ParseTarget. +// +// Error diagnostics are returned if either the parsing fails or the analysis +// of the traversal fails. There is no way for the caller to distinguish the +// two kinds of diagnostics programmatically. If error diagnostics are returned +// the returned target may be nil or incomplete. +func ParseTargetStr(str string) (*Target, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(parseDiags) + if parseDiags.HasErrors() { + return nil, diags + } + + target, targetDiags := ParseTarget(traversal) + diags = diags.Append(targetDiags) + return target, diags +} + +// ParseAbsResource attempts to interpret the given traversal as an absolute +// resource address, using the same syntax as expected by ParseTarget. +// +// If no error diagnostics are returned, the returned target includes the +// address that was extracted and the source range it was extracted from. +// +// If error diagnostics are returned then the AbsResource value is invalid and +// must not be used. +func ParseAbsResource(traversal hcl.Traversal) (AbsResource, tfdiags.Diagnostics) { + addr, diags := ParseTarget(traversal) + if diags.HasErrors() { + return AbsResource{}, diags + } + + switch tt := addr.Subject.(type) { + + case AbsResource: + return tt, diags + + case AbsResourceInstance: // Catch likely user error with specialized message + // Assume that the last element of the traversal must be the index, + // since that's required for a valid resource instance address. + indexStep := traversal[len(traversal)-1] + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "A resource address is required. This instance key identifies a specific resource instance, which is not expected here.", + Subject: indexStep.SourceRange().Ptr(), + }) + return AbsResource{}, diags + + case ModuleInstance: // Catch likely user error with specialized message + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "A resource address is required here. The module path must be followed by a resource specification.", + Subject: traversal.SourceRange().Ptr(), + }) + return AbsResource{}, diags + + default: // Generic message for other address types + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "A resource address is required here.", + Subject: traversal.SourceRange().Ptr(), + }) + return AbsResource{}, diags + + } +} + +// ParseAbsResourceStr is a helper wrapper around ParseAbsResource that takes a +// string and parses it with the HCL native syntax traversal parser before +// interpreting it. +// +// Error diagnostics are returned if either the parsing fails or the analysis +// of the traversal fails. There is no way for the caller to distinguish the +// two kinds of diagnostics programmatically. If error diagnostics are returned +// the returned address may be incomplete. +// +// Since this function has no context about the source of the given string, +// any returned diagnostics will not have meaningful source location +// information. +func ParseAbsResourceStr(str string) (AbsResource, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(parseDiags) + if parseDiags.HasErrors() { + return AbsResource{}, diags + } + + addr, addrDiags := ParseAbsResource(traversal) + diags = diags.Append(addrDiags) + return addr, diags +} + +// ParseAbsResourceInstance attempts to interpret the given traversal as an +// absolute resource instance address, using the same syntax as expected by +// ParseTarget. +// +// If no error diagnostics are returned, the returned target includes the +// address that was extracted and the source range it was extracted from. +// +// If error diagnostics are returned then the AbsResource value is invalid and +// must not be used. +func ParseAbsResourceInstance(traversal hcl.Traversal) (AbsResourceInstance, tfdiags.Diagnostics) { + addr, diags := ParseTarget(traversal) + if diags.HasErrors() { + return AbsResourceInstance{}, diags + } + + switch tt := addr.Subject.(type) { + + case AbsResource: + return tt.Instance(NoKey), diags + + case AbsResourceInstance: + return tt, diags + + case ModuleInstance: // Catch likely user error with specialized message + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "A resource instance address is required here. The module path must be followed by a resource instance specification.", + Subject: traversal.SourceRange().Ptr(), + }) + return AbsResourceInstance{}, diags + + default: // Generic message for other address types + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "A resource address is required here.", + Subject: traversal.SourceRange().Ptr(), + }) + return AbsResourceInstance{}, diags + + } +} + +// ParseAbsResourceInstanceStr is a helper wrapper around +// ParseAbsResourceInstance that takes a string and parses it with the HCL +// native syntax traversal parser before interpreting it. +// +// Error diagnostics are returned if either the parsing fails or the analysis +// of the traversal fails. There is no way for the caller to distinguish the +// two kinds of diagnostics programmatically. If error diagnostics are returned +// the returned address may be incomplete. +// +// Since this function has no context about the source of the given string, +// any returned diagnostics will not have meaningful source location +// information. +func ParseAbsResourceInstanceStr(str string) (AbsResourceInstance, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(parseDiags) + if parseDiags.HasErrors() { + return AbsResourceInstance{}, diags + } + + addr, addrDiags := ParseAbsResourceInstance(traversal) + diags = diags.Append(addrDiags) + return addr, diags +} + +// ModuleAddr returns the module address portion of the subject of +// the recieving target. +// +// Regardless of specific address type, all targets always include +// a module address. They might also include something in that +// module, which this method always discards if so. +func (t *Target) ModuleAddr() ModuleInstance { + switch addr := t.Subject.(type) { + case ModuleInstance: + return addr + case Module: + // We assume that a module address is really + // referring to a module path containing only + // single-instance modules. + return addr.UnkeyedInstanceShim() + case AbsResourceInstance: + return addr.Module + case AbsResource: + return addr.Module + default: + // The above cases should be exhaustive for all + // implementations of Targetable. + panic(fmt.Sprintf("unsupported target address type %T", addr)) + } +} diff --git a/pkg/addrs/parse_target_test.go b/pkg/addrs/parse_target_test.go new file mode 100644 index 00000000000..d85198f5c06 --- /dev/null +++ b/pkg/addrs/parse_target_test.go @@ -0,0 +1,393 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +import ( + "testing" + + "github.com/go-test/deep" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +func TestParseTarget(t *testing.T) { + tests := []struct { + Input string + Want *Target + WantErr string + }{ + { + `module.foo`, + &Target{ + Subject: ModuleInstance{ + { + Name: "foo", + }, + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 11, Byte: 10}, + }, + }, + ``, + }, + { + `module.foo[2]`, + &Target{ + Subject: ModuleInstance{ + { + Name: "foo", + InstanceKey: IntKey(2), + }, + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 14, Byte: 13}, + }, + }, + ``, + }, + { + `module.foo[2].module.bar`, + &Target{ + Subject: ModuleInstance{ + { + Name: "foo", + InstanceKey: IntKey(2), + }, + { + Name: "bar", + }, + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 25, Byte: 24}, + }, + }, + ``, + }, + { + `aws_instance.foo`, + &Target{ + Subject: AbsResource{ + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + }, + Module: RootModuleInstance, + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 17, Byte: 16}, + }, + }, + ``, + }, + { + `aws_instance.foo[1]`, + &Target{ + Subject: AbsResourceInstance{ + Resource: ResourceInstance{ + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + }, + Key: IntKey(1), + }, + Module: RootModuleInstance, + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 20, Byte: 19}, + }, + }, + ``, + }, + { + `data.aws_instance.foo`, + &Target{ + Subject: AbsResource{ + Resource: Resource{ + Mode: DataResourceMode, + Type: "aws_instance", + Name: "foo", + }, + Module: RootModuleInstance, + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 22, Byte: 21}, + }, + }, + ``, + }, + { + `data.aws_instance.foo[1]`, + &Target{ + Subject: AbsResourceInstance{ + Resource: ResourceInstance{ + Resource: Resource{ + Mode: DataResourceMode, + Type: "aws_instance", + Name: "foo", + }, + Key: IntKey(1), + }, + Module: RootModuleInstance, + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 25, Byte: 24}, + }, + }, + ``, + }, + { + `module.foo.aws_instance.bar`, + &Target{ + Subject: AbsResource{ + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "bar", + }, + Module: ModuleInstance{ + {Name: "foo"}, + }, + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 28, Byte: 27}, + }, + }, + ``, + }, + { + `module.foo.module.bar.aws_instance.baz`, + &Target{ + Subject: AbsResource{ + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "baz", + }, + Module: ModuleInstance{ + {Name: "foo"}, + {Name: "bar"}, + }, + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 39, Byte: 38}, + }, + }, + ``, + }, + { + `module.foo.module.bar.aws_instance.baz["hello"]`, + &Target{ + Subject: AbsResourceInstance{ + Resource: ResourceInstance{ + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "baz", + }, + Key: StringKey("hello"), + }, + Module: ModuleInstance{ + {Name: "foo"}, + {Name: "bar"}, + }, + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 48, Byte: 47}, + }, + }, + ``, + }, + { + `module.foo.data.aws_instance.bar`, + &Target{ + Subject: AbsResource{ + Resource: Resource{ + Mode: DataResourceMode, + Type: "aws_instance", + Name: "bar", + }, + Module: ModuleInstance{ + {Name: "foo"}, + }, + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 33, Byte: 32}, + }, + }, + ``, + }, + { + `module.foo.module.bar.data.aws_instance.baz`, + &Target{ + Subject: AbsResource{ + Resource: Resource{ + Mode: DataResourceMode, + Type: "aws_instance", + Name: "baz", + }, + Module: ModuleInstance{ + {Name: "foo"}, + {Name: "bar"}, + }, + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 44, Byte: 43}, + }, + }, + ``, + }, + { + `module.foo.module.bar[0].data.aws_instance.baz`, + &Target{ + Subject: AbsResource{ + Resource: Resource{ + Mode: DataResourceMode, + Type: "aws_instance", + Name: "baz", + }, + Module: ModuleInstance{ + {Name: "foo", InstanceKey: NoKey}, + {Name: "bar", InstanceKey: IntKey(0)}, + }, + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 47, Byte: 46}, + }, + }, + ``, + }, + { + `module.foo.module.bar["a"].data.aws_instance.baz["hello"]`, + &Target{ + Subject: AbsResourceInstance{ + Resource: ResourceInstance{ + Resource: Resource{ + Mode: DataResourceMode, + Type: "aws_instance", + Name: "baz", + }, + Key: StringKey("hello"), + }, + Module: ModuleInstance{ + {Name: "foo", InstanceKey: NoKey}, + {Name: "bar", InstanceKey: StringKey("a")}, + }, + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 58, Byte: 57}, + }, + }, + ``, + }, + { + `module.foo.module.bar.data.aws_instance.baz["hello"]`, + &Target{ + Subject: AbsResourceInstance{ + Resource: ResourceInstance{ + Resource: Resource{ + Mode: DataResourceMode, + Type: "aws_instance", + Name: "baz", + }, + Key: StringKey("hello"), + }, + Module: ModuleInstance{ + {Name: "foo"}, + {Name: "bar"}, + }, + }, + SourceRange: tfdiags.SourceRange{ + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 53, Byte: 52}, + }, + }, + ``, + }, + + { + `aws_instance`, + nil, + `Resource specification must include a resource type and name.`, + }, + { + `module`, + nil, + `Prefix "module." must be followed by a module name.`, + }, + { + `module["baz"]`, + nil, + `Prefix "module." must be followed by a module name.`, + }, + { + `module.baz.bar`, + nil, + `Resource specification must include a resource type and name.`, + }, + { + `aws_instance.foo.bar`, + nil, + `Resource instance key must be given in square brackets.`, + }, + { + `aws_instance.foo[1].baz`, + nil, + `Unexpected extra operators after address.`, + }, + } + + for _, test := range tests { + t.Run(test.Input, func(t *testing.T) { + traversal, travDiags := hclsyntax.ParseTraversalAbs([]byte(test.Input), "", hcl.Pos{Line: 1, Column: 1}) + if travDiags.HasErrors() { + t.Fatal(travDiags.Error()) + } + + got, diags := ParseTarget(traversal) + + switch len(diags) { + case 0: + if test.WantErr != "" { + t.Fatalf("succeeded; want error: %s", test.WantErr) + } + case 1: + if test.WantErr == "" { + t.Fatalf("unexpected diagnostics: %s", diags.Err()) + } + if got, want := diags[0].Description().Detail, test.WantErr; got != want { + t.Fatalf("wrong error\ngot: %s\nwant: %s", got, want) + } + default: + t.Fatalf("too many diagnostics: %s", diags.Err()) + } + + if diags.HasErrors() { + return + } + + for _, problem := range deep.Equal(got, test.Want) { + t.Errorf(problem) + } + }) + } +} diff --git a/pkg/addrs/path_attr.go b/pkg/addrs/path_attr.go new file mode 100644 index 00000000000..8c7b0df85b0 --- /dev/null +++ b/pkg/addrs/path_attr.go @@ -0,0 +1,23 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +// PathAttr is the address of an attribute of the "path" object in +// the interpolation scope, like "path.module". +type PathAttr struct { + referenceable + Name string +} + +func (pa PathAttr) String() string { + return "path." + pa.Name +} + +func (pa PathAttr) UniqueKey() UniqueKey { + return pa // A PathAttr is its own UniqueKey +} + +func (pa PathAttr) uniqueKeySigil() {} diff --git a/pkg/addrs/provider.go b/pkg/addrs/provider.go new file mode 100644 index 00000000000..a43c09fd775 --- /dev/null +++ b/pkg/addrs/provider.go @@ -0,0 +1,210 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +import ( + "github.com/hashicorp/hcl/v2" + svchost "github.com/hashicorp/terraform-svchost" + "github.com/kubegems/opentofu/pkg/tfdiags" + tfaddr "github.com/opentofu/registry-address" +) + +// Provider encapsulates a single provider type. In the future this will be +// extended to include additional fields including Namespace and SourceHost +type Provider = tfaddr.Provider + +// DefaultProviderRegistryHost is the hostname used for provider addresses that do +// not have an explicit hostname. +const DefaultProviderRegistryHost = tfaddr.DefaultProviderRegistryHost + +// BuiltInProviderHost is the pseudo-hostname used for the "built-in" provider +// namespace. Built-in provider addresses must also have their namespace set +// to BuiltInProviderNamespace in order to be considered as built-in. +const BuiltInProviderHost = tfaddr.BuiltInProviderHost + +// BuiltInProviderNamespace is the provider namespace used for "built-in" +// providers. Built-in provider addresses must also have their hostname +// set to BuiltInProviderHost in order to be considered as built-in. +// +// The this namespace is literally named "builtin", in the hope that users +// who see FQNs containing this will be able to infer the way in which they are +// special, even if they haven't encountered the concept formally yet. +const BuiltInProviderNamespace = tfaddr.BuiltInProviderNamespace + +// LegacyProviderNamespace is the special string used in the Namespace field +// of type Provider to mark a legacy provider address. This special namespace +// value would normally be invalid, and can be used only when the hostname is +// DefaultRegistryHost because that host owns the mapping from legacy name to +// FQN. +const LegacyProviderNamespace = tfaddr.LegacyProviderNamespace + +func IsDefaultProvider(addr Provider) bool { + return addr.Hostname == DefaultProviderRegistryHost && addr.Namespace == "hashicorp" +} + +// NewProvider constructs a provider address from its parts, and normalizes +// the namespace and type parts to lowercase using unicode case folding rules +// so that resulting addrs.Provider values can be compared using standard +// Go equality rules (==). +// +// The hostname is given as a svchost.Hostname, which is required by the +// contract of that type to have already been normalized for equality testing. +// +// This function will panic if the given namespace or type name are not valid. +// When accepting namespace or type values from outside the program, use +// ParseProviderPart first to check that the given value is valid. +func NewProvider(hostname svchost.Hostname, namespace, typeName string) Provider { + return tfaddr.NewProvider(hostname, namespace, typeName) +} + +// ImpliedProviderForUnqualifiedType represents the rules for inferring what +// provider FQN a user intended when only a naked type name is available. +// +// For all except the type name "terraform" this returns a so-called "default" +// provider, which is under the registry.terraform.io/hashicorp/ namespace. +// +// As a special case, the string "terraform" maps to +// "terraform.io/builtin/terraform" because that is the more likely user +// intent than the now-unmaintained "registry.terraform.io/hashicorp/terraform" +// which remains only for compatibility with older OpenTofu versions. +func ImpliedProviderForUnqualifiedType(typeName string) Provider { + switch typeName { + case "terraform": + // Note for future maintainers: any additional strings we add here + // as implied to be builtin must never also be use as provider names + // in the registry.terraform.io/hashicorp/... namespace, because + // otherwise older versions of OpenTofu could implicitly select + // the registry name instead of the internal one. + return NewBuiltInProvider(typeName) + default: + return NewDefaultProvider(typeName) + } +} + +// NewDefaultProvider returns the default address of a HashiCorp-maintained, +// Registry-hosted provider. +func NewDefaultProvider(name string) Provider { + return tfaddr.Provider{ + Type: MustParseProviderPart(name), + Namespace: "hashicorp", + Hostname: DefaultProviderRegistryHost, + } +} + +// NewBuiltInProvider returns the address of a "built-in" provider. See +// the docs for Provider.IsBuiltIn for more information. +func NewBuiltInProvider(name string) Provider { + return tfaddr.Provider{ + Type: MustParseProviderPart(name), + Namespace: BuiltInProviderNamespace, + Hostname: BuiltInProviderHost, + } +} + +// NewLegacyProvider returns a mock address for a provider. +// This will be removed when ProviderType is fully integrated. +func NewLegacyProvider(name string) Provider { + return Provider{ + // We intentionally don't normalize and validate the legacy names, + // because existing code expects legacy provider names to pass through + // verbatim, even if not compliant with our new naming rules. + Type: name, + Namespace: LegacyProviderNamespace, + Hostname: DefaultProviderRegistryHost, + } +} + +// ParseProviderSourceString parses a value of the form expected in the "source" +// argument of a required_providers entry and returns the corresponding +// fully-qualified provider address. This is intended primarily to parse the +// FQN-like strings returned by terraform-config-inspect. +// +// The following are valid source string formats: +// +// - name +// - namespace/name +// - hostname/namespace/name +func ParseProviderSourceString(str string) (tfaddr.Provider, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + ret, err := tfaddr.ParseProviderSource(str) + if pe, ok := err.(*tfaddr.ParserError); ok { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: pe.Summary, + Detail: pe.Detail, + }) + return ret, diags + } + + if !ret.HasKnownNamespace() { + ret.Namespace = "hashicorp" + } + + return ret, nil +} + +// MustParseProviderSourceString is a wrapper around ParseProviderSourceString that panics if +// it returns an error. +func MustParseProviderSourceString(str string) Provider { + result, diags := ParseProviderSourceString(str) + if diags.HasErrors() { + panic(diags.Err().Error()) + } + return result +} + +// ParseProviderPart processes an addrs.Provider namespace or type string +// provided by an end-user, producing a normalized version if possible or +// an error if the string contains invalid characters. +// +// A provider part is processed in the same way as an individual label in a DNS +// domain name: it is transformed to lowercase per the usual DNS case mapping +// and normalization rules and may contain only letters, digits, and dashes. +// Additionally, dashes may not appear at the start or end of the string. +// +// These restrictions are intended to allow these names to appear in fussy +// contexts such as directory/file names on case-insensitive filesystems, +// repository names on GitHub, etc. We're using the DNS rules in particular, +// rather than some similar rules defined locally, because the hostname part +// of an addrs.Provider is already a hostname and it's ideal to use exactly +// the same case folding and normalization rules for all of the parts. +// +// In practice a provider type string conventionally does not contain dashes +// either. Such names are permitted, but providers with such type names will be +// hard to use because their resource type names will not be able to contain +// the provider type name and thus each resource will need an explicit provider +// address specified. (A real-world example of such a provider is the +// "google-beta" variant of the GCP provider, which has resource types that +// start with the "google_" prefix instead.) +// +// It's valid to pass the result of this function as the argument to a +// subsequent call, in which case the result will be identical. +func ParseProviderPart(given string) (string, error) { + return tfaddr.ParseProviderPart(given) +} + +// MustParseProviderPart is a wrapper around ParseProviderPart that panics if +// it returns an error. +func MustParseProviderPart(given string) string { + result, err := ParseProviderPart(given) + if err != nil { + panic(err.Error()) + } + return result +} + +// IsProviderPartNormalized compares a given string to the result of ParseProviderPart(string) +func IsProviderPartNormalized(str string) (bool, error) { + normalized, err := ParseProviderPart(str) + if err != nil { + return false, err + } + if str == normalized { + return true, nil + } + return false, nil +} diff --git a/pkg/addrs/provider_config.go b/pkg/addrs/provider_config.go new file mode 100644 index 00000000000..a1e1998ea29 --- /dev/null +++ b/pkg/addrs/provider_config.go @@ -0,0 +1,415 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +import ( + "fmt" + "strings" + + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" +) + +// ProviderConfig is an interface type whose dynamic type can be either +// LocalProviderConfig or AbsProviderConfig, in order to represent situations +// where a value might either be module-local or absolute but the decision +// cannot be made until runtime. +// +// Where possible, use either LocalProviderConfig or AbsProviderConfig directly +// instead, to make intent more clear. ProviderConfig can be used only in +// situations where the recipient of the value has some out-of-band way to +// determine a "current module" to use if the value turns out to be +// a LocalProviderConfig. +// +// Recipients of non-nil ProviderConfig values that actually need +// AbsProviderConfig values should call ResolveAbsProviderAddr on the +// *configs.Config value representing the root module configuration, which +// handles the translation from local to fully-qualified using mapping tables +// defined in the configuration. +// +// Recipients of a ProviderConfig value can assume it can contain only a +// LocalProviderConfig value, an AbsProviderConfigValue, or nil to represent +// the absense of a provider config in situations where that is meaningful. +type ProviderConfig interface { + providerConfig() +} + +// LocalProviderConfig is the address of a provider configuration from the +// perspective of references in a particular module. +// +// Finding the corresponding AbsProviderConfig will require looking up the +// LocalName in the providers table in the module's configuration; there is +// no syntax-only translation between these types. +type LocalProviderConfig struct { + LocalName string + + // If not empty, Alias identifies which non-default (aliased) provider + // configuration this address refers to. + Alias string +} + +var _ ProviderConfig = LocalProviderConfig{} + +// NewDefaultLocalProviderConfig returns the address of the default (un-aliased) +// configuration for the provider with the given local type name. +func NewDefaultLocalProviderConfig(LocalNameName string) LocalProviderConfig { + return LocalProviderConfig{ + LocalName: LocalNameName, + } +} + +// providerConfig Implements addrs.ProviderConfig. +func (pc LocalProviderConfig) providerConfig() {} + +func (pc LocalProviderConfig) String() string { + if pc.LocalName == "" { + // Should never happen; always indicates a bug + return "provider." + } + + if pc.Alias != "" { + return fmt.Sprintf("provider.%s.%s", pc.LocalName, pc.Alias) + } + + return "provider." + pc.LocalName +} + +// StringCompact is an alternative to String that returns the form that can +// be parsed by ParseProviderConfigCompact, without the "provider." prefix. +func (pc LocalProviderConfig) StringCompact() string { + if pc.Alias != "" { + return fmt.Sprintf("%s.%s", pc.LocalName, pc.Alias) + } + return pc.LocalName +} + +// AbsProviderConfig is the absolute address of a provider configuration +// within a particular module instance. +type AbsProviderConfig struct { + Module Module + Provider Provider + Alias string +} + +var _ ProviderConfig = AbsProviderConfig{} + +// ParseAbsProviderConfig parses the given traversal as an absolute provider +// configuration address. The following are examples of traversals that can be +// successfully parsed as absolute provider configuration addresses: +// +// - provider["registry.opentofu.org/hashicorp/aws"] +// - provider["registry.opentofu.org/hashicorp/aws"].foo +// - module.bar.provider["registry.opentofu.org/hashicorp/aws"] +// - module.bar.module.baz.provider["registry.opentofu.org/hashicorp/aws"].foo +// +// This type of address is used, for example, to record the relationships +// between resources and provider configurations in the state structure. +// This type of address is typically not used prominently in the UI, except in +// error messages that refer to provider configurations. +func ParseAbsProviderConfig(traversal hcl.Traversal) (AbsProviderConfig, tfdiags.Diagnostics) { + modInst, remain, diags := parseModuleInstancePrefix(traversal) + var ret AbsProviderConfig + + // Providers cannot resolve within module instances, so verify that there + // are no instance keys in the module path before converting to a Module. + for _, step := range modInst { + if step.InstanceKey != NoKey { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration address", + Detail: "Provider address cannot contain module indexes", + Subject: remain.SourceRange().Ptr(), + }) + return ret, diags + } + } + ret.Module = modInst.Module() + + if len(remain) < 2 || remain.RootName() != "provider" { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration address", + Detail: "Provider address must begin with \"provider.\", followed by a provider type name.", + Subject: remain.SourceRange().Ptr(), + }) + return ret, diags + } + if len(remain) > 3 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration address", + Detail: "Extraneous operators after provider configuration alias.", + Subject: hcl.Traversal(remain[3:]).SourceRange().Ptr(), + }) + return ret, diags + } + + if tt, ok := remain[1].(hcl.TraverseIndex); ok { + if !tt.Key.Type().Equals(cty.String) { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration address", + Detail: "The prefix \"provider.\" must be followed by a provider type name.", + Subject: remain[1].SourceRange().Ptr(), + }) + return ret, diags + } + p, sourceDiags := ParseProviderSourceString(tt.Key.AsString()) + ret.Provider = p + if sourceDiags.HasErrors() { + diags = diags.Append(sourceDiags) + return ret, diags + } + } else { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration address", + Detail: "The prefix \"provider.\" must be followed by a provider type name.", + Subject: remain[1].SourceRange().Ptr(), + }) + return ret, diags + } + + if len(remain) == 3 { + if tt, ok := remain[2].(hcl.TraverseAttr); ok { + ret.Alias = tt.Name + } else { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration address", + Detail: "Provider type name must be followed by a configuration alias name.", + Subject: remain[2].SourceRange().Ptr(), + }) + return ret, diags + } + } + + return ret, diags +} + +// ParseAbsProviderConfigStr is a helper wrapper around ParseAbsProviderConfig +// that takes a string and parses it with the HCL native syntax traversal parser +// before interpreting it. +// +// This should be used only in specialized situations since it will cause the +// created references to not have any meaningful source location information. +// If a reference string is coming from a source that should be identified in +// error messages then the caller should instead parse it directly using a +// suitable function from the HCL API and pass the traversal itself to +// ParseAbsProviderConfig. +// +// Error diagnostics are returned if either the parsing fails or the analysis +// of the traversal fails. There is no way for the caller to distinguish the +// two kinds of diagnostics programmatically. If error diagnostics are returned +// the returned address is invalid. +func ParseAbsProviderConfigStr(str string) (AbsProviderConfig, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(parseDiags) + if parseDiags.HasErrors() { + return AbsProviderConfig{}, diags + } + addr, addrDiags := ParseAbsProviderConfig(traversal) + diags = diags.Append(addrDiags) + return addr, diags +} + +func ParseLegacyAbsProviderConfigStr(str string) (AbsProviderConfig, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(parseDiags) + if parseDiags.HasErrors() { + return AbsProviderConfig{}, diags + } + + addr, addrDiags := ParseLegacyAbsProviderConfig(traversal) + diags = diags.Append(addrDiags) + return addr, diags +} + +// ParseLegacyAbsProviderConfig parses the given traversal as an absolute +// provider address in the legacy form used by OpenTofu v0.12 and earlier. +// The following are examples of traversals that can be successfully parsed as +// legacy absolute provider configuration addresses: +// +// - provider.aws +// - provider.aws.foo +// - module.bar.provider.aws +// - module.bar.module.baz.provider.aws.foo +// +// We can encounter this kind of address in a historical state snapshot that +// hasn't yet been upgraded by refreshing or applying a plan with +// OpenTofu v0.13. Later versions of OpenTofu reject state snapshots using +// this format, and so users must follow the OpenTofu v0.13 upgrade guide +// in that case. +// +// We will not use this address form for any new file formats. +func ParseLegacyAbsProviderConfig(traversal hcl.Traversal) (AbsProviderConfig, tfdiags.Diagnostics) { + modInst, remain, diags := parseModuleInstancePrefix(traversal) + var ret AbsProviderConfig + + // Providers cannot resolve within module instances, so verify that there + // are no instance keys in the module path before converting to a Module. + for _, step := range modInst { + if step.InstanceKey != NoKey { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration address", + Detail: "Provider address cannot contain module indexes", + Subject: remain.SourceRange().Ptr(), + }) + return ret, diags + } + } + ret.Module = modInst.Module() + + if len(remain) < 2 || remain.RootName() != "provider" { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration address", + Detail: "Provider address must begin with \"provider.\", followed by a provider type name.", + Subject: remain.SourceRange().Ptr(), + }) + return ret, diags + } + if len(remain) > 3 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration address", + Detail: "Extraneous operators after provider configuration alias.", + Subject: hcl.Traversal(remain[3:]).SourceRange().Ptr(), + }) + return ret, diags + } + + // We always assume legacy-style providers in legacy state ... + if tt, ok := remain[1].(hcl.TraverseAttr); ok { + // ... unless it's the builtin "terraform" provider, a special case. + if tt.Name == "terraform" { + ret.Provider = NewBuiltInProvider(tt.Name) + } else { + ret.Provider = NewLegacyProvider(tt.Name) + } + } else { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration address", + Detail: "The prefix \"provider.\" must be followed by a provider type name.", + Subject: remain[1].SourceRange().Ptr(), + }) + return ret, diags + } + + if len(remain) == 3 { + if tt, ok := remain[2].(hcl.TraverseAttr); ok { + ret.Alias = tt.Name + } else { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration address", + Detail: "Provider type name must be followed by a configuration alias name.", + Subject: remain[2].SourceRange().Ptr(), + }) + return ret, diags + } + } + + return ret, diags +} + +// ProviderConfigDefault returns the address of the default provider config of +// the given type inside the recieving module instance. +func (m ModuleInstance) ProviderConfigDefault(provider Provider) AbsProviderConfig { + return AbsProviderConfig{ + Module: m.Module(), + Provider: provider, + } +} + +// ProviderConfigAliased returns the address of an aliased provider config of +// the given type and alias inside the recieving module instance. +func (m ModuleInstance) ProviderConfigAliased(provider Provider, alias string) AbsProviderConfig { + return AbsProviderConfig{ + Module: m.Module(), + Provider: provider, + Alias: alias, + } +} + +// providerConfig Implements addrs.ProviderConfig. +func (pc AbsProviderConfig) providerConfig() {} + +// Inherited returns an address that the receiving configuration address might +// inherit from in a parent module. The second bool return value indicates if +// such inheritance is possible, and thus whether the returned address is valid. +// +// Inheritance is possible only for default (un-aliased) providers in modules +// other than the root module. Even if a valid address is returned, inheritence +// may not be performed for other reasons, such as if the calling module +// provided explicit provider configurations within the call for this module. +// The ProviderTransformer graph transform in the main tofu module has the +// authoritative logic for provider inheritance, and this method is here mainly +// just for its benefit. +func (pc AbsProviderConfig) Inherited() (AbsProviderConfig, bool) { + // Can't inherit if we're already in the root. + if len(pc.Module) == 0 { + return AbsProviderConfig{}, false + } + + // Can't inherit if we have an alias. + if pc.Alias != "" { + return AbsProviderConfig{}, false + } + + // Otherwise, we might inherit from a configuration with the same + // provider type in the parent module instance. + parentMod := pc.Module.Parent() + return AbsProviderConfig{ + Module: parentMod, + Provider: pc.Provider, + }, true + +} + +// LegacyString() returns a legacy-style AbsProviderConfig string and should only be used for legacy state shimming. +func (pc AbsProviderConfig) LegacyString() string { + if pc.Alias != "" { + if len(pc.Module) == 0 { + return fmt.Sprintf("%s.%s.%s", "provider", pc.Provider.LegacyString(), pc.Alias) + } else { + return fmt.Sprintf("%s.%s.%s.%s", pc.Module.String(), "provider", pc.Provider.LegacyString(), pc.Alias) + } + } + if len(pc.Module) == 0 { + return fmt.Sprintf("%s.%s", "provider", pc.Provider.LegacyString()) + } + return fmt.Sprintf("%s.%s.%s", pc.Module.String(), "provider", pc.Provider.LegacyString()) +} + +// String() returns a string representation of an AbsProviderConfig in a format like the following examples: +// +// - provider["example.com/namespace/name"] +// - provider["example.com/namespace/name"].alias +// - module.module-name.provider["example.com/namespace/name"] +// - module.module-name.provider["example.com/namespace/name"].alias +func (pc AbsProviderConfig) String() string { + var parts []string + if len(pc.Module) > 0 { + parts = append(parts, pc.Module.String()) + } + + parts = append(parts, fmt.Sprintf("provider[%q]", pc.Provider)) + + if pc.Alias != "" { + parts = append(parts, pc.Alias) + } + + return strings.Join(parts, ".") +} diff --git a/pkg/addrs/provider_config_test.go b/pkg/addrs/provider_config_test.go new file mode 100644 index 00000000000..da93a1645b5 --- /dev/null +++ b/pkg/addrs/provider_config_test.go @@ -0,0 +1,285 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +import ( + "reflect" + "testing" + + "github.com/go-test/deep" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" +) + +func TestParseAbsProviderConfig(t *testing.T) { + tests := []struct { + Input string + Want AbsProviderConfig + WantDiag string + }{ + { + `provider["registry.opentofu.org/hashicorp/aws"]`, + AbsProviderConfig{ + Module: RootModule, + Provider: Provider{ + Type: "aws", + Namespace: "hashicorp", + Hostname: "registry.opentofu.org", + }, + }, + ``, + }, + { + `provider["registry.opentofu.org/hashicorp/aws"].foo`, + AbsProviderConfig{ + Module: RootModule, + Provider: Provider{ + Type: "aws", + Namespace: "hashicorp", + Hostname: "registry.opentofu.org", + }, + Alias: "foo", + }, + ``, + }, + { + `module.baz.provider["registry.opentofu.org/hashicorp/aws"]`, + AbsProviderConfig{ + Module: Module{"baz"}, + Provider: Provider{ + Type: "aws", + Namespace: "hashicorp", + Hostname: "registry.opentofu.org", + }, + }, + ``, + }, + { + `module.baz.provider["registry.opentofu.org/hashicorp/aws"].foo`, + AbsProviderConfig{ + Module: Module{"baz"}, + Provider: Provider{ + Type: "aws", + Namespace: "hashicorp", + Hostname: "registry.opentofu.org", + }, + Alias: "foo", + }, + ``, + }, + { + `module.baz["foo"].provider["registry.opentofu.org/hashicorp/aws"]`, + AbsProviderConfig{}, + `Provider address cannot contain module indexes`, + }, + { + `module.baz[1].provider["registry.opentofu.org/hashicorp/aws"]`, + AbsProviderConfig{}, + `Provider address cannot contain module indexes`, + }, + { + `module.baz[1].module.bar.provider["registry.opentofu.org/hashicorp/aws"]`, + AbsProviderConfig{}, + `Provider address cannot contain module indexes`, + }, + { + `aws`, + AbsProviderConfig{}, + `Provider address must begin with "provider.", followed by a provider type name.`, + }, + { + `aws.foo`, + AbsProviderConfig{}, + `Provider address must begin with "provider.", followed by a provider type name.`, + }, + { + `provider`, + AbsProviderConfig{}, + `Provider address must begin with "provider.", followed by a provider type name.`, + }, + { + `provider.aws.foo.bar`, + AbsProviderConfig{}, + `Extraneous operators after provider configuration alias.`, + }, + { + `provider["aws"]["foo"]`, + AbsProviderConfig{}, + `Provider type name must be followed by a configuration alias name.`, + }, + { + `module.foo`, + AbsProviderConfig{}, + `Provider address must begin with "provider.", followed by a provider type name.`, + }, + { + `provider[0]`, + AbsProviderConfig{}, + `The prefix "provider." must be followed by a provider type name.`, + }, + } + + for _, test := range tests { + t.Run(test.Input, func(t *testing.T) { + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(test.Input), "", hcl.Pos{}) + if len(parseDiags) != 0 { + t.Errorf("unexpected diagnostics during parse") + for _, diag := range parseDiags { + t.Logf("- %s", diag) + } + return + } + + got, diags := ParseAbsProviderConfig(traversal) + + if test.WantDiag != "" { + if len(diags) != 1 { + t.Fatalf("got %d diagnostics; want 1", len(diags)) + } + gotDetail := diags[0].Description().Detail + if gotDetail != test.WantDiag { + t.Fatalf("wrong diagnostic detail\ngot: %s\nwant: %s", gotDetail, test.WantDiag) + } + return + } else { + if len(diags) != 0 { + t.Fatalf("got %d diagnostics; want 0", len(diags)) + } + } + + for _, problem := range deep.Equal(got, test.Want) { + t.Error(problem) + } + }) + } +} + +func TestAbsProviderConfigString(t *testing.T) { + tests := []struct { + Config AbsProviderConfig + Want string + }{ + { + AbsProviderConfig{ + Module: RootModule, + Provider: NewLegacyProvider("foo"), + }, + `provider["registry.opentofu.org/-/foo"]`, + }, + { + AbsProviderConfig{ + Module: RootModule.Child("child_module"), + Provider: NewDefaultProvider("foo"), + }, + `module.child_module.provider["registry.opentofu.org/hashicorp/foo"]`, + }, + { + AbsProviderConfig{ + Module: RootModule, + Alias: "bar", + Provider: NewDefaultProvider("foo"), + }, + `provider["registry.opentofu.org/hashicorp/foo"].bar`, + }, + { + AbsProviderConfig{ + Module: RootModule.Child("child_module"), + Alias: "bar", + Provider: NewDefaultProvider("foo"), + }, + `module.child_module.provider["registry.opentofu.org/hashicorp/foo"].bar`, + }, + } + + for _, test := range tests { + got := test.Config.String() + if got != test.Want { + t.Errorf("wrong result. Got %s, want %s\n", got, test.Want) + } + } +} + +func TestAbsProviderConfigLegacyString(t *testing.T) { + tests := []struct { + Config AbsProviderConfig + Want string + }{ + { + AbsProviderConfig{ + Module: RootModule, + Provider: NewLegacyProvider("foo"), + }, + `provider.foo`, + }, + { + AbsProviderConfig{ + Module: RootModule.Child("child_module"), + Provider: NewLegacyProvider("foo"), + }, + `module.child_module.provider.foo`, + }, + { + AbsProviderConfig{ + Module: RootModule, + Alias: "bar", + Provider: NewLegacyProvider("foo"), + }, + `provider.foo.bar`, + }, + { + AbsProviderConfig{ + Module: RootModule.Child("child_module"), + Alias: "bar", + Provider: NewLegacyProvider("foo"), + }, + `module.child_module.provider.foo.bar`, + }, + } + + for _, test := range tests { + got := test.Config.LegacyString() + if got != test.Want { + t.Errorf("wrong result. Got %s, want %s\n", got, test.Want) + } + } +} + +func TestParseLegacyAbsProviderConfigStr(t *testing.T) { + tests := []struct { + Config string + Want AbsProviderConfig + }{ + { + `provider.foo`, + AbsProviderConfig{ + Module: RootModule, + Provider: NewLegacyProvider("foo"), + }, + }, + { + `module.child_module.provider.foo`, + AbsProviderConfig{ + Module: RootModule.Child("child_module"), + Provider: NewLegacyProvider("foo"), + }, + }, + { + `provider.terraform`, + AbsProviderConfig{ + Module: RootModule, + Provider: NewBuiltInProvider("terraform"), + }, + }, + } + + for _, test := range tests { + got, _ := ParseLegacyAbsProviderConfigStr(test.Config) + if !reflect.DeepEqual(got, test.Want) { + t.Errorf("wrong result. Got %s, want %s\n", got, test.Want) + } + } +} diff --git a/pkg/addrs/provider_function.go b/pkg/addrs/provider_function.go new file mode 100644 index 00000000000..619f3b766a3 --- /dev/null +++ b/pkg/addrs/provider_function.go @@ -0,0 +1,83 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +import ( + "fmt" + "strings" +) + +// ProviderFunction is the address of a provider defined function. +type ProviderFunction struct { + referenceable + ProviderName string + ProviderAlias string + Function string +} + +func (v ProviderFunction) String() string { + if v.ProviderAlias != "" { + return fmt.Sprintf("provider::%s::%s::%s", v.ProviderName, v.ProviderAlias, v.Function) + } + return fmt.Sprintf("provider::%s::%s", v.ProviderName, v.Function) +} + +func (v ProviderFunction) UniqueKey() UniqueKey { + return v // A ProviderFunction is its own UniqueKey +} + +func (v ProviderFunction) uniqueKeySigil() {} + +type Function struct { + Namespaces []string + Name string +} + +const ( + FunctionNamespaceProvider = "provider" + FunctionNamespaceCore = "core" +) + +var FunctionNamespaces = []string{ + FunctionNamespaceProvider, + FunctionNamespaceCore, +} + +func ParseFunction(input string) Function { + parts := strings.Split(input, "::") + return Function{ + Name: parts[len(parts)-1], + Namespaces: parts[:len(parts)-1], + } +} + +func (f Function) String() string { + return strings.Join(append(f.Namespaces, f.Name), "::") +} + +func (f Function) IsNamespace(namespace string) bool { + return len(f.Namespaces) > 0 && f.Namespaces[0] == namespace +} + +func (f Function) AsProviderFunction() (pf ProviderFunction, err error) { + if !f.IsNamespace(FunctionNamespaceProvider) { + // Should always be checked ahead of time! + panic("BUG: non-provider function " + f.String()) + } + + if len(f.Namespaces) == 2 { + // provider:::: + pf.ProviderName = f.Namespaces[1] + } else if len(f.Namespaces) == 3 { + // provider:::::: + pf.ProviderName = f.Namespaces[1] + pf.ProviderAlias = f.Namespaces[2] + } else { + return pf, fmt.Errorf("invalid provider function %q: expected provider:::: or provider::::::", f) + } + pf.Function = f.Name + return pf, nil +} diff --git a/pkg/addrs/provider_test.go b/pkg/addrs/provider_test.go new file mode 100644 index 00000000000..ae43c100909 --- /dev/null +++ b/pkg/addrs/provider_test.go @@ -0,0 +1,565 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +import ( + "testing" + + "github.com/go-test/deep" + svchost "github.com/hashicorp/terraform-svchost" +) + +func TestProviderString(t *testing.T) { + tests := []struct { + Input Provider + Want string + }{ + { + Provider{ + Type: "test", + Hostname: DefaultProviderRegistryHost, + Namespace: "hashicorp", + }, + NewDefaultProvider("test").String(), + }, + { + Provider{ + Type: "test-beta", + Hostname: DefaultProviderRegistryHost, + Namespace: "hashicorp", + }, + NewDefaultProvider("test-beta").String(), + }, + { + Provider{ + Type: "test", + Hostname: "registry.terraform.com", + Namespace: "hashicorp", + }, + "registry.terraform.com/hashicorp/test", + }, + { + Provider{ + Type: "test", + Hostname: DefaultProviderRegistryHost, + Namespace: "othercorp", + }, + DefaultProviderRegistryHost.ForDisplay() + "/othercorp/test", + }, + } + + for _, test := range tests { + got := test.Input.String() + if got != test.Want { + t.Errorf("wrong result for %s\n", test.Input.String()) + } + } +} + +func TestProviderLegacyString(t *testing.T) { + tests := []struct { + Input Provider + Want string + }{ + { + Provider{ + Type: "test", + Hostname: DefaultProviderRegistryHost, + Namespace: LegacyProviderNamespace, + }, + "test", + }, + { + Provider{ + Type: "terraform", + Hostname: BuiltInProviderHost, + Namespace: BuiltInProviderNamespace, + }, + "terraform", + }, + } + + for _, test := range tests { + got := test.Input.LegacyString() + if got != test.Want { + t.Errorf("wrong result for %s\ngot: %s\nwant: %s", test.Input.String(), got, test.Want) + } + } +} + +func TestProviderDisplay(t *testing.T) { + tests := []struct { + Input Provider + Want string + }{ + { + Provider{ + Type: "test", + Hostname: DefaultProviderRegistryHost, + Namespace: "hashicorp", + }, + "hashicorp/test", + }, + { + Provider{ + Type: "test", + Hostname: "registry.terraform.com", + Namespace: "hashicorp", + }, + "registry.terraform.com/hashicorp/test", + }, + { + Provider{ + Type: "test", + Hostname: DefaultProviderRegistryHost, + Namespace: "othercorp", + }, + "othercorp/test", + }, + } + + for _, test := range tests { + got := test.Input.ForDisplay() + if got != test.Want { + t.Errorf("wrong result for %s\n", test.Input.String()) + } + } +} + +func TestProviderIsDefaultProvider(t *testing.T) { + tests := []struct { + Input Provider + Want bool + }{ + { + Provider{ + Type: "test", + Hostname: DefaultProviderRegistryHost, + Namespace: "hashicorp", + }, + true, + }, + { + Provider{ + Type: "test", + Hostname: "registry.terraform.com", + Namespace: "hashicorp", + }, + false, + }, + { + Provider{ + Type: "test", + Hostname: DefaultProviderRegistryHost, + Namespace: "othercorp", + }, + false, + }, + } + + for _, test := range tests { + got := IsDefaultProvider(test.Input) + if got != test.Want { + t.Errorf("wrong result for %s\n", test.Input.String()) + } + } +} + +func TestProviderIsBuiltIn(t *testing.T) { + tests := []struct { + Input Provider + Want bool + }{ + { + Provider{ + Type: "test", + Hostname: BuiltInProviderHost, + Namespace: BuiltInProviderNamespace, + }, + true, + }, + { + Provider{ + Type: "terraform", + Hostname: BuiltInProviderHost, + Namespace: BuiltInProviderNamespace, + }, + true, + }, + { + Provider{ + Type: "test", + Hostname: BuiltInProviderHost, + Namespace: "boop", + }, + false, + }, + { + Provider{ + Type: "test", + Hostname: DefaultProviderRegistryHost, + Namespace: BuiltInProviderNamespace, + }, + false, + }, + { + Provider{ + Type: "test", + Hostname: DefaultProviderRegistryHost, + Namespace: "hashicorp", + }, + false, + }, + { + Provider{ + Type: "test", + Hostname: "registry.terraform.com", + Namespace: "hashicorp", + }, + false, + }, + { + Provider{ + Type: "test", + Hostname: DefaultProviderRegistryHost, + Namespace: "othercorp", + }, + false, + }, + } + + for _, test := range tests { + got := test.Input.IsBuiltIn() + if got != test.Want { + t.Errorf("wrong result for %s\ngot: %#v\nwant: %#v", test.Input.String(), got, test.Want) + } + } +} + +func TestProviderIsLegacy(t *testing.T) { + tests := []struct { + Input Provider + Want bool + }{ + { + Provider{ + Type: "test", + Hostname: DefaultProviderRegistryHost, + Namespace: LegacyProviderNamespace, + }, + true, + }, + { + Provider{ + Type: "test", + Hostname: "registry.terraform.com", + Namespace: LegacyProviderNamespace, + }, + false, + }, + { + Provider{ + Type: "test", + Hostname: DefaultProviderRegistryHost, + Namespace: "hashicorp", + }, + false, + }, + } + + for _, test := range tests { + got := test.Input.IsLegacy() + if got != test.Want { + t.Errorf("wrong result for %s\n", test.Input.String()) + } + } +} + +func TestParseProviderSourceStr(t *testing.T) { + tests := map[string]struct { + Want Provider + Err bool + }{ + "registry.opentofu.org/hashicorp/aws": { + Provider{ + Type: "aws", + Namespace: "hashicorp", + Hostname: DefaultProviderRegistryHost, + }, + false, + }, + "registry.opentofu.org/HashiCorp/AWS": { + Provider{ + Type: "aws", + Namespace: "hashicorp", + Hostname: DefaultProviderRegistryHost, + }, + false, + }, + "hashicorp/aws": { + Provider{ + Type: "aws", + Namespace: "hashicorp", + Hostname: DefaultProviderRegistryHost, + }, + false, + }, + "HashiCorp/AWS": { + Provider{ + Type: "aws", + Namespace: "hashicorp", + Hostname: DefaultProviderRegistryHost, + }, + false, + }, + "aws": { + Provider{ + Type: "aws", + Namespace: "hashicorp", + Hostname: DefaultProviderRegistryHost, + }, + false, + }, + "AWS": { + Provider{ + Type: "aws", + Namespace: "hashicorp", + Hostname: DefaultProviderRegistryHost, + }, + false, + }, + "example.com/foo-bar/baz-boop": { + Provider{ + Type: "baz-boop", + Namespace: "foo-bar", + Hostname: svchost.Hostname("example.com"), + }, + false, + }, + "foo-bar/baz-boop": { + Provider{ + Type: "baz-boop", + Namespace: "foo-bar", + Hostname: DefaultProviderRegistryHost, + }, + false, + }, + "localhost:8080/foo/bar": { + Provider{ + Type: "bar", + Namespace: "foo", + Hostname: svchost.Hostname("localhost:8080"), + }, + false, + }, + "example.com/too/many/parts/here": { + Provider{}, + true, + }, + "/too///many//slashes": { + Provider{}, + true, + }, + "///": { + Provider{}, + true, + }, + "/ / /": { // empty strings + Provider{}, + true, + }, + "badhost!/hashicorp/aws": { + Provider{}, + true, + }, + "example.com/badnamespace!/aws": { + Provider{}, + true, + }, + "example.com/bad--namespace/aws": { + Provider{}, + true, + }, + "example.com/-badnamespace/aws": { + Provider{}, + true, + }, + "example.com/badnamespace-/aws": { + Provider{}, + true, + }, + "example.com/bad.namespace/aws": { + Provider{}, + true, + }, + "example.com/hashicorp/badtype!": { + Provider{}, + true, + }, + "example.com/hashicorp/bad--type": { + Provider{}, + true, + }, + "example.com/hashicorp/-badtype": { + Provider{}, + true, + }, + "example.com/hashicorp/badtype-": { + Provider{}, + true, + }, + "example.com/hashicorp/bad.type": { + Provider{}, + true, + }, + + // We forbid the terraform- prefix both because it's redundant to + // include "terraform" in a Terraform provider name and because we use + // the longer prefix terraform-provider- to hint for users who might be + // accidentally using the git repository name or executable file name + // instead of the provider type. + "example.com/hashicorp/terraform-provider-bad": { + Provider{}, + true, + }, + "example.com/hashicorp/terraform-bad": { + Provider{}, + true, + }, + } + + for name, test := range tests { + got, diags := ParseProviderSourceString(name) + for _, problem := range deep.Equal(got, test.Want) { + t.Errorf(problem) + } + if len(diags) > 0 { + if test.Err == false { + t.Errorf("got error, expected success") + } + } else { + if test.Err { + t.Errorf("got success, expected error") + } + } + } +} + +func TestParseProviderPart(t *testing.T) { + tests := map[string]struct { + Want string + Error string + }{ + `foo`: { + `foo`, + ``, + }, + `FOO`: { + `foo`, + ``, + }, + `Foo`: { + `foo`, + ``, + }, + `abc-123`: { + `abc-123`, + ``, + }, + `Испытание`: { + `испытание`, + ``, + }, + `münchen`: { // this is a precomposed u with diaeresis + `münchen`, // this is a precomposed u with diaeresis + ``, + }, + `münchen`: { // this is a separate u and combining diaeresis + `münchen`, // this is a precomposed u with diaeresis + ``, + }, + `abc--123`: { + ``, + `cannot use multiple consecutive dashes`, + }, + `xn--80akhbyknj4f`: { // this is the punycode form of "испытание", but we don't accept punycode here + ``, + `cannot use multiple consecutive dashes`, + }, + `abc.123`: { + ``, + `dots are not allowed`, + }, + `-abc123`: { + ``, + `must contain only letters, digits, and dashes, and may not use leading or trailing dashes`, + }, + `abc123-`: { + ``, + `must contain only letters, digits, and dashes, and may not use leading or trailing dashes`, + }, + ``: { + ``, + `must have at least one character`, + }, + } + + for given, test := range tests { + t.Run(given, func(t *testing.T) { + got, err := ParseProviderPart(given) + if test.Error != "" { + if err == nil { + t.Errorf("unexpected success\ngot: %s\nwant: %s", err, test.Error) + } else if got := err.Error(); got != test.Error { + t.Errorf("wrong error\ngot: %s\nwant: %s", got, test.Error) + } + } else { + if err != nil { + t.Errorf("unexpected error\ngot: %s\nwant: ", err) + } else if got != test.Want { + t.Errorf("wrong result\ngot: %s\nwant: %s", got, test.Want) + } + } + }) + } +} + +func TestProviderEquals(t *testing.T) { + tests := []struct { + InputP Provider + OtherP Provider + Want bool + }{ + { + NewProvider(DefaultProviderRegistryHost, "foo", "test"), + NewProvider(DefaultProviderRegistryHost, "foo", "test"), + true, + }, + { + NewProvider(DefaultProviderRegistryHost, "foo", "test"), + NewProvider(DefaultProviderRegistryHost, "bar", "test"), + false, + }, + { + NewProvider(DefaultProviderRegistryHost, "foo", "test"), + NewProvider(DefaultProviderRegistryHost, "foo", "my-test"), + false, + }, + { + NewProvider(DefaultProviderRegistryHost, "foo", "test"), + NewProvider("example.com", "foo", "test"), + false, + }, + } + for _, test := range tests { + t.Run(test.InputP.String(), func(t *testing.T) { + got := test.InputP.Equals(test.OtherP) + if got != test.Want { + t.Errorf("wrong result\ngot: %v\nwant: %v", got, test.Want) + } + }) + } +} diff --git a/pkg/addrs/referenceable.go b/pkg/addrs/referenceable.go new file mode 100644 index 00000000000..a7337bf82c8 --- /dev/null +++ b/pkg/addrs/referenceable.go @@ -0,0 +1,28 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +// Referenceable is an interface implemented by all address types that can +// appear as references in configuration language expressions. +type Referenceable interface { + // All implementations of this interface must be covered by the type switch + // in lang.Scope.buildEvalContext. + referenceableSigil() + + // All Referenceable address types must have unique keys. + UniqueKeyer + + // String produces a string representation of the address that could be + // parsed as a HCL traversal and passed to ParseRef to produce an identical + // result. + String() string +} + +type referenceable struct { +} + +func (r referenceable) referenceableSigil() { +} diff --git a/pkg/addrs/removable.go b/pkg/addrs/removable.go new file mode 100644 index 00000000000..7ab0d9bfe04 --- /dev/null +++ b/pkg/addrs/removable.go @@ -0,0 +1,26 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +// ConfigRemovable is an interface implemented by address types that represents +// the destination of a "removed" statement in configuration. +// +// Note that ConfigRemovable might represent: +// 1. An absolute address relative to the root of the configuration. +// 2. A direct representation of these in configuration where the author gives an +// address relative to the current module where the address is defined. +type ConfigRemovable interface { + Targetable + configRemovableSigil() + + String() string +} + +// The following are all the possible ConfigRemovable address types: +var ( + _ ConfigRemovable = ConfigResource{} + _ ConfigRemovable = Module(nil) +) diff --git a/pkg/addrs/remove_endpoint.go b/pkg/addrs/remove_endpoint.go new file mode 100644 index 00000000000..90f47e860e7 --- /dev/null +++ b/pkg/addrs/remove_endpoint.go @@ -0,0 +1,78 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// RemoveEndpoint is to ConfigRemovable what Target is to Targetable: +// a wrapping struct that captures the result of decoding an HCL +// traversal representing a relative path from the current module to +// a removable object. It is very similar to MoveEndpoint. +// +// Its purpose is to represent the "from" address in a "removed" block +// in the configuration. +// +// To obtain a full address from a RemoveEndpoint we need to combine it +// with any ancestor modules in the configuration +type RemoveEndpoint struct { + // SourceRange is the location of the physical endpoint address + // in configuration, if this RemoveEndpoint was decoded from a + // configuration expression. + SourceRange tfdiags.SourceRange + + // the representation of our relative address as a ConfigRemovable + RelSubject ConfigRemovable +} + +// ParseRemoveEndpoint attempts to interpret the given traversal as a +// "remove endpoint" address, which is a relative path from the module containing +// the traversal to a removable object in either the same module or in some +// child module. +// +// This deals only with the syntactic element of a remove endpoint expression +// in configuration. Before the result will be useful you'll need to combine +// it with the address of the module where it was declared in order to get +// an absolute address relative to the root module. +func ParseRemoveEndpoint(traversal hcl.Traversal) (*RemoveEndpoint, tfdiags.Diagnostics) { + path, remain, diags := parseModulePrefix(traversal) + if diags.HasErrors() { + return nil, diags + } + + rng := tfdiags.SourceRangeFromHCL(traversal.SourceRange()) + + if len(remain) == 0 { + return &RemoveEndpoint{ + RelSubject: path, + SourceRange: rng, + }, diags + } + + riAddr, moreDiags := parseResourceUnderModule(path, remain) + diags = diags.Append(moreDiags) + if diags.HasErrors() { + return nil, diags + } + + if riAddr.Resource.Mode == DataResourceMode { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Data source address is not allowed", + Detail: "Data sources cannot be destroyed, and therefore, 'removed' blocks are not allowed to target them. To remove data sources from the state, you should remove the data source block from the configuration.", + Subject: traversal.SourceRange().Ptr(), + }) + + return nil, diags + } + + return &RemoveEndpoint{ + RelSubject: riAddr, + SourceRange: rng, + }, diags +} diff --git a/pkg/addrs/remove_endpoint_test.go b/pkg/addrs/remove_endpoint_test.go new file mode 100644 index 00000000000..f2f68bd31cc --- /dev/null +++ b/pkg/addrs/remove_endpoint_test.go @@ -0,0 +1,213 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" +) + +func TestParseRemoveEndpoint(t *testing.T) { + tests := []struct { + Input string + WantRel ConfigRemovable + WantErr string + }{ + { + `foo.bar`, + ConfigResource{ + Module: RootModule, + Resource: Resource{ + + Mode: ManagedResourceMode, + Type: "foo", + Name: "bar", + }, + }, + ``, + }, + { + `module.boop`, + Module{"boop"}, + ``, + }, + { + `module.boop.foo.bar`, + ConfigResource{ + Module: Module{"boop"}, + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "foo", + Name: "bar", + }, + }, + ``, + }, + { + `module.foo.module.bar`, + Module{"foo", "bar"}, + ``, + }, + { + `module.boop.module.bip.foo.bar`, + ConfigResource{ + Module: Module{"boop", "bip"}, + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "foo", + Name: "bar", + }, + }, + ``, + }, + { + `foo.bar[0]`, + nil, + `Resource instance address with keys is not allowed: Resource address cannot be a resource instance (e.g. "null_resource.a[0]"), it must be a resource instead (e.g. "null_resource.a").`, + }, + { + `foo.bar["a"]`, + nil, + `Resource instance address with keys is not allowed: Resource address cannot be a resource instance (e.g. "null_resource.a[0]"), it must be a resource instead (e.g. "null_resource.a").`, + }, + { + `module.boop.foo.bar[0]`, + nil, + `Resource instance address with keys is not allowed: Resource address cannot be a resource instance (e.g. "null_resource.a[0]"), it must be a resource instead (e.g. "null_resource.a").`, + }, + { + `module.boop.foo.bar["a"]`, + nil, + + `Resource instance address with keys is not allowed: Resource address cannot be a resource instance (e.g. "null_resource.a[0]"), it must be a resource instead (e.g. "null_resource.a").`, + }, + { + `data.foo.bar`, + nil, + `Data source address is not allowed: Data sources cannot be destroyed, and therefore, 'removed' blocks are not allowed to target them. To remove data sources from the state, you should remove the data source block from the configuration.`, + }, + { + `data.foo.bar[0]`, + nil, + `Resource instance address with keys is not allowed: Resource address cannot be a resource instance (e.g. "null_resource.a[0]"), it must be a resource instead (e.g. "null_resource.a").`, + }, + { + `data.foo.bar["a"]`, + nil, + `Resource instance address with keys is not allowed: Resource address cannot be a resource instance (e.g. "null_resource.a[0]"), it must be a resource instead (e.g. "null_resource.a").`, + }, + { + `module.boop.data.foo.bar[0]`, + nil, + `Resource instance address with keys is not allowed: Resource address cannot be a resource instance (e.g. "null_resource.a[0]"), it must be a resource instead (e.g. "null_resource.a").`, + }, + { + `module.boop.data.foo.bar["a"]`, + nil, + `Resource instance address with keys is not allowed: Resource address cannot be a resource instance (e.g. "null_resource.a[0]"), it must be a resource instead (e.g. "null_resource.a").`, + }, + { + `module.foo[0]`, + nil, + `Module instance address with keys is not allowed: Module address cannot be a module instance (e.g. "module.a[0]"), it must be a module instead (e.g. "module.a").`, + }, + { + `module.foo["a"]`, + nil, + `Module instance address with keys is not allowed: Module address cannot be a module instance (e.g. "module.a[0]"), it must be a module instead (e.g. "module.a").`, + }, + { + `module.foo[1].module.bar`, + nil, + `Module instance address with keys is not allowed: Module address cannot be a module instance (e.g. "module.a[0]"), it must be a module instead (e.g. "module.a").`, + }, + { + `module.foo.module.bar[1]`, + nil, + `Module instance address with keys is not allowed: Module address cannot be a module instance (e.g. "module.a[0]"), it must be a module instead (e.g. "module.a").`, + }, + { + `module.foo[0].module.bar[1]`, + nil, + `Module instance address with keys is not allowed: Module address cannot be a module instance (e.g. "module.a[0]"), it must be a module instead (e.g. "module.a").`, + }, + { + `module`, + nil, + `Invalid address operator: Prefix "module." must be followed by a module name.`, + }, + { + `module[0]`, + nil, + `Invalid address operator: Prefix "module." must be followed by a module name.`, + }, + { + `module.foo.data`, + nil, + `Invalid address: Resource specification must include a resource type and name.`, + }, + { + `module.foo.data.bar`, + nil, + `Invalid address: Resource specification must include a resource type and name.`, + }, + { + `module.foo.data[0]`, + nil, + `Invalid address: Resource specification must include a resource type and name.`, + }, + { + `module.foo.data.bar[0]`, + nil, + `Invalid address: A resource name is required.`, + }, + { + `module.foo.bar`, + nil, + `Invalid address: Resource specification must include a resource type and name.`, + }, + { + `module.foo.bar[0]`, + nil, + `Invalid address: A resource name is required.`, + }, + } + + for _, test := range tests { + t.Run(test.Input, func(t *testing.T) { + traversal, hclDiags := hclsyntax.ParseTraversalAbs([]byte(test.Input), "", hcl.InitialPos) + if hclDiags.HasErrors() { + // We're not trying to test the HCL parser here, so any + // failures at this point are likely to be bugs in the + // test case itself. + t.Fatalf("syntax error: %s", hclDiags.Error()) + } + + moveEp, diags := ParseRemoveEndpoint(traversal) + + switch { + case test.WantErr != "": + if !diags.HasErrors() { + t.Fatalf("unexpected success\nwant error: %s", test.WantErr) + } + gotErr := diags.Err().Error() + if gotErr != test.WantErr { + t.Fatalf("wrong error\ngot: %s\nwant: %s", gotErr, test.WantErr) + } + default: + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err().Error()) + } + if diff := cmp.Diff(test.WantRel, moveEp.RelSubject); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + } + }) + } +} diff --git a/pkg/addrs/resource.go b/pkg/addrs/resource.go new file mode 100644 index 00000000000..71ac0ca495f --- /dev/null +++ b/pkg/addrs/resource.go @@ -0,0 +1,514 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +import ( + "fmt" + "strings" + + "github.com/hashicorp/hcl/v2" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// Resource is an address for a resource block within configuration, which +// contains potentially-multiple resource instances if that configuration +// block uses "count" or "for_each". +type Resource struct { + referenceable + Mode ResourceMode + Type string + Name string +} + +func (r Resource) String() string { + switch r.Mode { + case ManagedResourceMode: + return fmt.Sprintf("%s.%s", r.Type, r.Name) + case DataResourceMode: + return fmt.Sprintf("data.%s.%s", r.Type, r.Name) + default: + // Should never happen, but we'll return a string here rather than + // crashing just in case it does. + return fmt.Sprintf(".%s.%s", r.Type, r.Name) + } +} + +func (r Resource) Equal(o Resource) bool { + return r.Mode == o.Mode && r.Name == o.Name && r.Type == o.Type +} + +func (r Resource) Less(o Resource) bool { + switch { + case r.Mode != o.Mode: + return r.Mode == DataResourceMode + + case r.Type != o.Type: + return r.Type < o.Type + + case r.Name != o.Name: + return r.Name < o.Name + + default: + return false + } +} + +func (r Resource) UniqueKey() UniqueKey { + return r // A Resource is its own UniqueKey +} + +func (r Resource) uniqueKeySigil() {} + +// Instance produces the address for a specific instance of the receiver +// that is idenfied by the given key. +func (r Resource) Instance(key InstanceKey) ResourceInstance { + return ResourceInstance{ + Resource: r, + Key: key, + } +} + +// Absolute returns an AbsResource from the receiver and the given module +// instance address. +func (r Resource) Absolute(module ModuleInstance) AbsResource { + return AbsResource{ + Module: module, + Resource: r, + } +} + +// InModule returns a ConfigResource from the receiver and the given module +// address. +func (r Resource) InModule(module Module) ConfigResource { + return ConfigResource{ + Module: module, + Resource: r, + } +} + +// ImpliedProvider returns the implied provider type name, for e.g. the "aws" in +// "aws_instance" +func (r Resource) ImpliedProvider() string { + typeName := r.Type + if under := strings.Index(typeName, "_"); under != -1 { + typeName = typeName[:under] + } + + return typeName +} + +// ResourceInstance is an address for a specific instance of a resource. +// When a resource is defined in configuration with "count" or "for_each" it +// produces zero or more instances, which can be addressed using this type. +type ResourceInstance struct { + referenceable + Resource Resource + Key InstanceKey +} + +func (r ResourceInstance) ContainingResource() Resource { + return r.Resource +} + +func (r ResourceInstance) String() string { + if r.Key == NoKey { + return r.Resource.String() + } + return r.Resource.String() + r.Key.String() +} + +func (r ResourceInstance) Equal(o ResourceInstance) bool { + return r.Key == o.Key && r.Resource.Equal(o.Resource) +} + +func (r ResourceInstance) Less(o ResourceInstance) bool { + if !r.Resource.Equal(o.Resource) { + return r.Resource.Less(o.Resource) + } + + if r.Key != o.Key { + return InstanceKeyLess(r.Key, o.Key) + } + + return false +} + +func (r ResourceInstance) UniqueKey() UniqueKey { + return r // A ResourceInstance is its own UniqueKey +} + +func (r ResourceInstance) uniqueKeySigil() {} + +// Absolute returns an AbsResourceInstance from the receiver and the given module +// instance address. +func (r ResourceInstance) Absolute(module ModuleInstance) AbsResourceInstance { + return AbsResourceInstance{ + Module: module, + Resource: r, + } +} + +// AbsResource is an absolute address for a resource under a given module path. +type AbsResource struct { + targetable + Module ModuleInstance + Resource Resource +} + +// Resource returns the address of a particular resource within the receiver. +func (m ModuleInstance) Resource(mode ResourceMode, typeName string, name string) AbsResource { + return AbsResource{ + Module: m, + Resource: Resource{ + Mode: mode, + Type: typeName, + Name: name, + }, + } +} + +// Instance produces the address for a specific instance of the receiver +// that is idenfied by the given key. +func (r AbsResource) Instance(key InstanceKey) AbsResourceInstance { + return AbsResourceInstance{ + Module: r.Module, + Resource: r.Resource.Instance(key), + } +} + +// Config returns the unexpanded ConfigResource for this AbsResource. +func (r AbsResource) Config() ConfigResource { + return ConfigResource{ + Module: r.Module.Module(), + Resource: r.Resource, + } +} + +// TargetContains implements Targetable by returning true if the given other +// address is either equal to the receiver or is an instance of the +// receiver. +func (r AbsResource) TargetContains(other Targetable) bool { + switch to := other.(type) { + + case AbsResource: + // We'll use our stringification as a cheat-ish way to test for equality. + return to.String() == r.String() + + case ConfigResource: + // if an absolute resource from parsing a target address contains a + // ConfigResource, the string representation will match + return to.String() == r.String() + + case AbsResourceInstance: + return r.TargetContains(to.ContainingResource()) + + default: + return false + + } +} + +func (r AbsResource) AddrType() TargetableAddrType { + return AbsResourceAddrType +} + +func (r AbsResource) String() string { + if len(r.Module) == 0 { + return r.Resource.String() + } + return fmt.Sprintf("%s.%s", r.Module.String(), r.Resource.String()) +} + +// AffectedAbsResource returns the AbsResource. +func (r AbsResource) AffectedAbsResource() AbsResource { + return r +} + +func (r AbsResource) Equal(o AbsResource) bool { + return r.Module.Equal(o.Module) && r.Resource.Equal(o.Resource) +} + +func (r AbsResource) Less(o AbsResource) bool { + if !r.Module.Equal(o.Module) { + return r.Module.Less(o.Module) + } + + if !r.Resource.Equal(o.Resource) { + return r.Resource.Less(o.Resource) + } + + return false +} + +func (r AbsResource) absMoveableSigil() { + // AbsResource is moveable +} + +type absResourceKey string + +func (r absResourceKey) uniqueKeySigil() {} + +func (r AbsResource) UniqueKey() UniqueKey { + return absResourceKey(r.String()) +} + +// AbsResourceInstance is an absolute address for a resource instance under a +// given module path. +type AbsResourceInstance struct { + targetable + Module ModuleInstance + Resource ResourceInstance +} + +// ResourceInstance returns the address of a particular resource instance within the receiver. +func (m ModuleInstance) ResourceInstance(mode ResourceMode, typeName string, name string, key InstanceKey) AbsResourceInstance { + return AbsResourceInstance{ + Module: m, + Resource: ResourceInstance{ + Resource: Resource{ + Mode: mode, + Type: typeName, + Name: name, + }, + Key: key, + }, + } +} + +// ContainingResource returns the address of the resource that contains the +// receving resource instance. In other words, it discards the key portion +// of the address to produce an AbsResource value. +func (r AbsResourceInstance) ContainingResource() AbsResource { + return AbsResource{ + Module: r.Module, + Resource: r.Resource.ContainingResource(), + } +} + +// ConfigResource returns the address of the configuration block that declared +// this instance. +func (r AbsResourceInstance) ConfigResource() ConfigResource { + return ConfigResource{ + Module: r.Module.Module(), + Resource: r.Resource.Resource, + } +} + +// TargetContains implements Targetable by returning true if the given other +// address is equal to the receiver. +func (r AbsResourceInstance) TargetContains(other Targetable) bool { + switch to := other.(type) { + + // while we currently don't start with an AbsResourceInstance as a target + // address, check all resource types for consistency. + case AbsResourceInstance: + // We'll use our stringification as a cheat-ish way to test for equality. + return to.String() == r.String() + case ConfigResource: + return to.String() == r.String() + case AbsResource: + return to.String() == r.String() + + default: + return false + + } +} + +func (r AbsResourceInstance) AddrType() TargetableAddrType { + return AbsResourceInstanceAddrType +} + +func (r AbsResourceInstance) String() string { + if len(r.Module) == 0 { + return r.Resource.String() + } + return fmt.Sprintf("%s.%s", r.Module.String(), r.Resource.String()) +} + +// AffectedAbsResource returns the AbsResource for the instance. +func (r AbsResourceInstance) AffectedAbsResource() AbsResource { + return AbsResource{ + Module: r.Module, + Resource: r.Resource.Resource, + } +} + +func (r AbsResourceInstance) CheckRule(t CheckRuleType, i int) CheckRule { + return CheckRule{ + Container: r, + Type: t, + Index: i, + } +} + +func (v AbsResourceInstance) CheckableKind() CheckableKind { + return CheckableResource +} + +func (r AbsResourceInstance) Equal(o AbsResourceInstance) bool { + return r.Module.Equal(o.Module) && r.Resource.Equal(o.Resource) +} + +// Less returns true if the receiver should sort before the given other value +// in a sorted list of addresses. +func (r AbsResourceInstance) Less(o AbsResourceInstance) bool { + if !r.Module.Equal(o.Module) { + return r.Module.Less(o.Module) + } + + if !r.Resource.Equal(o.Resource) { + return r.Resource.Less(o.Resource) + } + + return false +} + +// AbsResourceInstance is a Checkable +func (r AbsResourceInstance) checkableSigil() {} + +func (r AbsResourceInstance) ConfigCheckable() ConfigCheckable { + // The ConfigCheckable for an AbsResourceInstance is its ConfigResource. + return r.ConfigResource() +} + +type absResourceInstanceKey string + +func (r AbsResourceInstance) UniqueKey() UniqueKey { + return absResourceInstanceKey(r.String()) +} + +func (r absResourceInstanceKey) uniqueKeySigil() {} + +func (r AbsResourceInstance) absMoveableSigil() { + // AbsResourceInstance is moveable +} + +// ConfigResource is an address for a resource within a configuration. +type ConfigResource struct { + targetable + Module Module + Resource Resource +} + +// ParseConfigResource parses the module address from the given traversal +// and then parses the resource address from the leftover. Returning ConfigResource +// contains both module and resource addresses. ParseConfigResource doesn't support +// instance keys and will return an error if it encounters one. +func ParseConfigResource(traversal hcl.Traversal) (ConfigResource, tfdiags.Diagnostics) { + modulePath, remainTraversal, diags := parseModulePrefix(traversal) + if diags.HasErrors() { + return ConfigResource{}, diags + } + + if len(remainTraversal) == 0 { + return ConfigResource{}, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Module address is not allowed", + Detail: "Expected reference to either resource or data block. Provided reference appears to be a module.", + Subject: traversal.SourceRange().Ptr(), + }) + } + + configRes, moreDiags := parseResourceUnderModule(modulePath, remainTraversal) + return configRes, diags.Append(moreDiags) +} + +// Resource returns the address of a particular resource within the module. +func (m Module) Resource(mode ResourceMode, typeName string, name string) ConfigResource { + return ConfigResource{ + Module: m, + Resource: Resource{ + Mode: mode, + Type: typeName, + Name: name, + }, + } +} + +// Absolute produces the address for the receiver within a specific module instance. +func (r ConfigResource) Absolute(module ModuleInstance) AbsResource { + return AbsResource{ + Module: module, + Resource: r.Resource, + } +} + +// TargetContains implements Targetable by returning true if the given other +// address is either equal to the receiver or is an instance of the +// receiver. +func (r ConfigResource) TargetContains(other Targetable) bool { + switch to := other.(type) { + case ConfigResource: + // We'll use our stringification as a cheat-ish way to test for equality. + return to.String() == r.String() + case AbsResource: + return r.TargetContains(to.Config()) + case AbsResourceInstance: + return r.TargetContains(to.ContainingResource()) + default: + return false + } +} + +func (r ConfigResource) AddrType() TargetableAddrType { + return ConfigResourceAddrType +} + +func (r ConfigResource) String() string { + if len(r.Module) == 0 { + return r.Resource.String() + } + return fmt.Sprintf("%s.%s", r.Module.String(), r.Resource.String()) +} + +func (r ConfigResource) Equal(o ConfigResource) bool { + return r.Module.Equal(o.Module) && r.Resource.Equal(o.Resource) +} + +func (r ConfigResource) UniqueKey() UniqueKey { + return configResourceKey(r.String()) +} + +func (r ConfigResource) configMoveableSigil() { + // ConfigResource is moveable +} + +func (r ConfigResource) configCheckableSigil() { + // ConfigResource represents a configuration object that declares checkable objects +} + +func (v ConfigResource) CheckableKind() CheckableKind { + return CheckableResource +} + +func (r ConfigResource) configRemovableSigil() { + // Empty function so ConfigResource will fulfill the requirements of the removable interface +} + +type configResourceKey string + +func (k configResourceKey) uniqueKeySigil() {} + +// ResourceMode defines which lifecycle applies to a given resource. Each +// resource lifecycle has a slightly different address format. +type ResourceMode rune + +//go:generate go run golang.org/x/tools/cmd/stringer -type ResourceMode + +const ( + // InvalidResourceMode is the zero value of ResourceMode and is not + // a valid resource mode. + InvalidResourceMode ResourceMode = 0 + + // ManagedResourceMode indicates a managed resource, as defined by + // "resource" blocks in configuration. + ManagedResourceMode ResourceMode = 'M' + + // DataResourceMode indicates a data resource, as defined by + // "data" blocks in configuration. + DataResourceMode ResourceMode = 'D' +) diff --git a/pkg/addrs/resource_phase.go b/pkg/addrs/resource_phase.go new file mode 100644 index 00000000000..d266ae08700 --- /dev/null +++ b/pkg/addrs/resource_phase.go @@ -0,0 +1,122 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +import "fmt" + +// ResourceInstancePhase is a special kind of reference used only internally +// during graph building to represent resource instances that are in a +// non-primary state. +// +// Graph nodes can declare themselves referenceable via an instance phase +// or can declare that they reference an instance phase in order to accomodate +// secondary graph nodes dealing with, for example, destroy actions. +// +// This special reference type cannot be accessed directly by end-users, and +// should never be shown in the UI. +type ResourceInstancePhase struct { + referenceable + ResourceInstance ResourceInstance + Phase ResourceInstancePhaseType +} + +var _ Referenceable = ResourceInstancePhase{} + +// Phase returns a special "phase address" for the receving instance. See the +// documentation of ResourceInstancePhase for the limited situations where this +// is intended to be used. +func (r ResourceInstance) Phase(rpt ResourceInstancePhaseType) ResourceInstancePhase { + return ResourceInstancePhase{ + ResourceInstance: r, + Phase: rpt, + } +} + +// ContainingResource returns an address for the same phase of the resource +// that this instance belongs to. +func (rp ResourceInstancePhase) ContainingResource() ResourcePhase { + return rp.ResourceInstance.Resource.Phase(rp.Phase) +} + +func (rp ResourceInstancePhase) String() string { + // We use a different separator here than usual to ensure that we'll + // never conflict with any non-phased resource instance string. This + // is intentionally something that would fail parsing with ParseRef, + // because this special address type should never be exposed in the UI. + return fmt.Sprintf("%s#%s", rp.ResourceInstance, rp.Phase) +} + +func (rp ResourceInstancePhase) UniqueKey() UniqueKey { + return rp // A ResourceInstancePhase is its own UniqueKey +} + +func (rp ResourceInstancePhase) uniqueKeySigil() {} + +// ResourceInstancePhaseType is an enumeration used with ResourceInstancePhase. +type ResourceInstancePhaseType string + +const ( + // ResourceInstancePhaseDestroy represents the "destroy" phase of a + // resource instance. + ResourceInstancePhaseDestroy ResourceInstancePhaseType = "destroy" + + // ResourceInstancePhaseDestroyCBD is similar to ResourceInstancePhaseDestroy + // but is used for resources that have "create_before_destroy" set, thus + // requiring a different dependency ordering. + ResourceInstancePhaseDestroyCBD ResourceInstancePhaseType = "destroy-cbd" +) + +func (rpt ResourceInstancePhaseType) String() string { + return string(rpt) +} + +// ResourcePhase is a special kind of reference used only internally +// during graph building to represent resources that are in a +// non-primary state. +// +// Graph nodes can declare themselves referenceable via a resource phase +// or can declare that they reference a resource phase in order to accomodate +// secondary graph nodes dealing with, for example, destroy actions. +// +// Since resources (as opposed to instances) aren't actually phased, this +// address type is used only as an approximation during initial construction +// of the resource-oriented plan graph, under the assumption that resource +// instances with ResourceInstancePhase addresses will be created in dynamic +// subgraphs during the graph walk. +// +// This special reference type cannot be accessed directly by end-users, and +// should never be shown in the UI. +type ResourcePhase struct { + referenceable + Resource Resource + Phase ResourceInstancePhaseType +} + +var _ Referenceable = ResourcePhase{} + +// Phase returns a special "phase address" for the receving instance. See the +// documentation of ResourceInstancePhase for the limited situations where this +// is intended to be used. +func (r Resource) Phase(rpt ResourceInstancePhaseType) ResourcePhase { + return ResourcePhase{ + Resource: r, + Phase: rpt, + } +} + +func (rp ResourcePhase) String() string { + // We use a different separator here than usual to ensure that we'll + // never conflict with any non-phased resource instance string. This + // is intentionally something that would fail parsing with ParseRef, + // because this special address type should never be exposed in the UI. + return fmt.Sprintf("%s#%s", rp.Resource, rp.Phase) +} + +func (rp ResourcePhase) UniqueKey() UniqueKey { + return rp // A ResourcePhase is its own UniqueKey +} + +func (rp ResourcePhase) uniqueKeySigil() {} diff --git a/pkg/addrs/resource_test.go b/pkg/addrs/resource_test.go new file mode 100644 index 00000000000..9ecfb7bfd46 --- /dev/null +++ b/pkg/addrs/resource_test.go @@ -0,0 +1,487 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +import ( + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" +) + +func TestResourceEqual_true(t *testing.T) { + resources := []Resource{ + { + Mode: ManagedResourceMode, + Type: "a", + Name: "b", + }, + { + Mode: DataResourceMode, + Type: "a", + Name: "b", + }, + } + for _, r := range resources { + t.Run(r.String(), func(t *testing.T) { + if !r.Equal(r) { + t.Fatalf("expected %#v to be equal to itself", r) + } + }) + } +} + +func TestResourceEqual_false(t *testing.T) { + testCases := []struct { + left Resource + right Resource + }{ + { + Resource{Mode: DataResourceMode, Type: "a", Name: "b"}, + Resource{Mode: ManagedResourceMode, Type: "a", Name: "b"}, + }, + { + Resource{Mode: ManagedResourceMode, Type: "a", Name: "b"}, + Resource{Mode: ManagedResourceMode, Type: "b", Name: "b"}, + }, + { + Resource{Mode: ManagedResourceMode, Type: "a", Name: "b"}, + Resource{Mode: ManagedResourceMode, Type: "a", Name: "c"}, + }, + } + for _, tc := range testCases { + t.Run(fmt.Sprintf("%s = %s", tc.left, tc.right), func(t *testing.T) { + if tc.left.Equal(tc.right) { + t.Fatalf("expected %#v not to be equal to %#v", tc.left, tc.right) + } + + if tc.right.Equal(tc.left) { + t.Fatalf("expected %#v not to be equal to %#v", tc.right, tc.left) + } + }) + } +} + +func TestResourceInstanceEqual_true(t *testing.T) { + resources := []ResourceInstance{ + { + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "a", + Name: "b", + }, + Key: IntKey(0), + }, + { + Resource: Resource{ + Mode: DataResourceMode, + Type: "a", + Name: "b", + }, + Key: StringKey("x"), + }, + } + for _, r := range resources { + t.Run(r.String(), func(t *testing.T) { + if !r.Equal(r) { + t.Fatalf("expected %#v to be equal to itself", r) + } + }) + } +} + +func TestResourceInstanceEqual_false(t *testing.T) { + testCases := []struct { + left ResourceInstance + right ResourceInstance + }{ + { + ResourceInstance{ + Resource: Resource{Mode: DataResourceMode, Type: "a", Name: "b"}, + Key: IntKey(0), + }, + ResourceInstance{ + Resource: Resource{Mode: ManagedResourceMode, Type: "a", Name: "b"}, + Key: IntKey(0), + }, + }, + { + ResourceInstance{ + Resource: Resource{Mode: ManagedResourceMode, Type: "a", Name: "b"}, + Key: IntKey(0), + }, + ResourceInstance{ + Resource: Resource{Mode: ManagedResourceMode, Type: "b", Name: "b"}, + Key: IntKey(0), + }, + }, + { + ResourceInstance{ + Resource: Resource{Mode: ManagedResourceMode, Type: "a", Name: "b"}, + Key: IntKey(0), + }, + ResourceInstance{ + Resource: Resource{Mode: ManagedResourceMode, Type: "a", Name: "c"}, + Key: IntKey(0), + }, + }, + { + ResourceInstance{ + Resource: Resource{Mode: DataResourceMode, Type: "a", Name: "b"}, + Key: IntKey(0), + }, + ResourceInstance{ + Resource: Resource{Mode: DataResourceMode, Type: "a", Name: "b"}, + Key: StringKey("0"), + }, + }, + } + for _, tc := range testCases { + t.Run(fmt.Sprintf("%s = %s", tc.left, tc.right), func(t *testing.T) { + if tc.left.Equal(tc.right) { + t.Fatalf("expected %#v not to be equal to %#v", tc.left, tc.right) + } + + if tc.right.Equal(tc.left) { + t.Fatalf("expected %#v not to be equal to %#v", tc.right, tc.left) + } + }) + } +} + +func TestAbsResourceInstanceEqual_true(t *testing.T) { + managed := Resource{Mode: ManagedResourceMode, Type: "a", Name: "b"} + data := Resource{Mode: DataResourceMode, Type: "a", Name: "b"} + + foo, diags := ParseModuleInstanceStr("module.foo") + if len(diags) > 0 { + t.Fatalf("unexpected diags: %s", diags.Err()) + } + foobar, diags := ParseModuleInstanceStr("module.foo[1].module.bar") + if len(diags) > 0 { + t.Fatalf("unexpected diags: %s", diags.Err()) + } + + instances := []AbsResourceInstance{ + managed.Instance(IntKey(0)).Absolute(foo), + data.Instance(IntKey(0)).Absolute(foo), + managed.Instance(StringKey("a")).Absolute(foobar), + } + for _, r := range instances { + t.Run(r.String(), func(t *testing.T) { + if !r.Equal(r) { + t.Fatalf("expected %#v to be equal to itself", r) + } + }) + } +} + +func TestAbsResourceInstanceEqual_false(t *testing.T) { + managed := Resource{Mode: ManagedResourceMode, Type: "a", Name: "b"} + data := Resource{Mode: DataResourceMode, Type: "a", Name: "b"} + + foo, diags := ParseModuleInstanceStr("module.foo") + if len(diags) > 0 { + t.Fatalf("unexpected diags: %s", diags.Err()) + } + foobar, diags := ParseModuleInstanceStr("module.foo[1].module.bar") + if len(diags) > 0 { + t.Fatalf("unexpected diags: %s", diags.Err()) + } + + testCases := []struct { + left AbsResourceInstance + right AbsResourceInstance + }{ + { + managed.Instance(IntKey(0)).Absolute(foo), + data.Instance(IntKey(0)).Absolute(foo), + }, + { + managed.Instance(IntKey(0)).Absolute(foo), + managed.Instance(IntKey(0)).Absolute(foobar), + }, + { + managed.Instance(IntKey(0)).Absolute(foo), + managed.Instance(StringKey("0")).Absolute(foo), + }, + } + for _, tc := range testCases { + t.Run(fmt.Sprintf("%s = %s", tc.left, tc.right), func(t *testing.T) { + if tc.left.Equal(tc.right) { + t.Fatalf("expected %#v not to be equal to %#v", tc.left, tc.right) + } + + if tc.right.Equal(tc.left) { + t.Fatalf("expected %#v not to be equal to %#v", tc.right, tc.left) + } + }) + } +} + +func TestAbsResourceUniqueKey(t *testing.T) { + resourceAddr1 := Resource{ + Mode: ManagedResourceMode, + Type: "a", + Name: "b1", + }.Absolute(RootModuleInstance) + resourceAddr2 := Resource{ + Mode: ManagedResourceMode, + Type: "a", + Name: "b2", + }.Absolute(RootModuleInstance) + resourceAddr3 := Resource{ + Mode: ManagedResourceMode, + Type: "a", + Name: "in_module", + }.Absolute(RootModuleInstance.Child("boop", NoKey)) + + tests := []struct { + Reciever AbsResource + Other UniqueKeyer + WantEqual bool + }{ + { + resourceAddr1, + resourceAddr1, + true, + }, + { + resourceAddr1, + resourceAddr2, + false, + }, + { + resourceAddr1, + resourceAddr3, + false, + }, + { + resourceAddr3, + resourceAddr3, + true, + }, + { + resourceAddr1, + resourceAddr1.Instance(NoKey), + false, // no-key instance key is distinct from its resource even though they have the same String result + }, + { + resourceAddr1, + resourceAddr1.Instance(IntKey(1)), + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%s matches %T %s?", test.Reciever, test.Other, test.Other), func(t *testing.T) { + rKey := test.Reciever.UniqueKey() + oKey := test.Other.UniqueKey() + + gotEqual := rKey == oKey + if gotEqual != test.WantEqual { + t.Errorf( + "wrong result\nreceiver: %s\nother: %s (%T)\ngot: %t\nwant: %t", + test.Reciever, test.Other, test.Other, + gotEqual, test.WantEqual, + ) + } + }) + } +} + +func TestConfigResourceEqual_true(t *testing.T) { + resources := []ConfigResource{ + { + Resource: Resource{Mode: ManagedResourceMode, Type: "a", Name: "b"}, + Module: RootModule, + }, + { + Resource: Resource{Mode: DataResourceMode, Type: "a", Name: "b"}, + Module: RootModule, + }, + { + Resource: Resource{Mode: ManagedResourceMode, Type: "a", Name: "b"}, + Module: Module{"foo"}, + }, + { + Resource: Resource{Mode: DataResourceMode, Type: "a", Name: "b"}, + Module: Module{"foo"}, + }, + } + for _, r := range resources { + t.Run(r.String(), func(t *testing.T) { + if !r.Equal(r) { + t.Fatalf("expected %#v to be equal to itself", r) + } + }) + } +} + +func TestConfigResourceEqual_false(t *testing.T) { + managed := Resource{Mode: ManagedResourceMode, Type: "a", Name: "b"} + data := Resource{Mode: DataResourceMode, Type: "a", Name: "b"} + + foo := Module{"foo"} + foobar := Module{"foobar"} + testCases := []struct { + left ConfigResource + right ConfigResource + }{ + { + ConfigResource{Resource: managed, Module: foo}, + ConfigResource{Resource: data, Module: foo}, + }, + { + ConfigResource{Resource: managed, Module: foo}, + ConfigResource{Resource: managed, Module: foobar}, + }, + } + for _, tc := range testCases { + t.Run(fmt.Sprintf("%s = %s", tc.left, tc.right), func(t *testing.T) { + if tc.left.Equal(tc.right) { + t.Fatalf("expected %#v not to be equal to %#v", tc.left, tc.right) + } + + if tc.right.Equal(tc.left) { + t.Fatalf("expected %#v not to be equal to %#v", tc.right, tc.left) + } + }) + } +} + +func TestParseConfigResource(t *testing.T) { + t.Parallel() + + tests := []struct { + Input string + WantConfigResource ConfigResource + WantErr string + }{ + { + Input: "a.b", + WantConfigResource: ConfigResource{ + Module: RootModule, + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "a", + Name: "b", + }, + }, + }, + { + Input: "data.a.b", + WantConfigResource: ConfigResource{ + Module: RootModule, + Resource: Resource{ + Mode: DataResourceMode, + Type: "a", + Name: "b", + }, + }, + }, + { + Input: "module.a.b.c", + WantConfigResource: ConfigResource{ + Module: []string{"a"}, + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "b", + Name: "c", + }, + }, + }, + { + Input: "module.a.data.b.c", + WantConfigResource: ConfigResource{ + Module: []string{"a"}, + Resource: Resource{ + Mode: DataResourceMode, + Type: "b", + Name: "c", + }, + }, + }, + { + Input: "module.a.module.b.c.d", + WantConfigResource: ConfigResource{ + Module: []string{"a", "b"}, + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "c", + Name: "d", + }, + }, + }, + { + Input: "module.a.module.b.data.c.d", + WantConfigResource: ConfigResource{ + Module: []string{"a", "b"}, + Resource: Resource{ + Mode: DataResourceMode, + Type: "c", + Name: "d", + }, + }, + }, + { + Input: "module.a.module.b", + WantErr: "Module address is not allowed: Expected reference to either resource or data block. Provided reference appears to be a module.", + }, + { + Input: "module", + WantErr: `Invalid address operator: Prefix "module." must be followed by a module name.`, + }, + { + Input: "module.a.module.b.c", + WantErr: "Invalid address: Resource specification must include a resource type and name.", + }, + { + Input: "module.a.module.b.c.d[0]", + WantErr: `Resource instance address with keys is not allowed: Resource address cannot be a resource instance (e.g. "null_resource.a[0]"), it must be a resource instead (e.g. "null_resource.a").`, + }, + { + Input: "module.a.module.b.data.c.d[0]", + WantErr: `Resource instance address with keys is not allowed: Resource address cannot be a resource instance (e.g. "null_resource.a[0]"), it must be a resource instead (e.g. "null_resource.a").`, + }, + } + + for _, test := range tests { + test := test + + t.Run(test.Input, func(t *testing.T) { + t.Parallel() + + traversal, hclDiags := hclsyntax.ParseTraversalAbs([]byte(test.Input), "", hcl.InitialPos) + if hclDiags.HasErrors() { + t.Fatalf("Bug in tests: %s", hclDiags.Error()) + } + + configRes, diags := ParseConfigResource(traversal) + + switch { + case test.WantErr != "": + if !diags.HasErrors() { + t.Fatalf("Unexpected success, wanted error: %s", test.WantErr) + } + + gotErr := diags.Err().Error() + if gotErr != test.WantErr { + t.Fatalf("Mismatched error\nGot: %s\nWant: %s", gotErr, test.WantErr) + } + default: + if diags.HasErrors() { + t.Fatalf("Unexpected error: %s", diags.Err().Error()) + } + if diff := cmp.Diff(test.WantConfigResource, configRes); diff != "" { + t.Fatalf("Mismatched result:\n%s", diff) + } + } + }) + } +} diff --git a/pkg/addrs/resourcemode_string.go b/pkg/addrs/resourcemode_string.go new file mode 100644 index 00000000000..0b5c33f8ee2 --- /dev/null +++ b/pkg/addrs/resourcemode_string.go @@ -0,0 +1,33 @@ +// Code generated by "stringer -type ResourceMode"; DO NOT EDIT. + +package addrs + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidResourceMode-0] + _ = x[ManagedResourceMode-77] + _ = x[DataResourceMode-68] +} + +const ( + _ResourceMode_name_0 = "InvalidResourceMode" + _ResourceMode_name_1 = "DataResourceMode" + _ResourceMode_name_2 = "ManagedResourceMode" +) + +func (i ResourceMode) String() string { + switch { + case i == 0: + return _ResourceMode_name_0 + case i == 68: + return _ResourceMode_name_1 + case i == 77: + return _ResourceMode_name_2 + default: + return "ResourceMode(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/pkg/addrs/self.go b/pkg/addrs/self.go new file mode 100644 index 00000000000..772b9ce87bd --- /dev/null +++ b/pkg/addrs/self.go @@ -0,0 +1,25 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +// Self is the address of the special object "self" that behaves as an alias +// for a containing object currently in scope. +const Self selfT = 0 + +type selfT int + +func (s selfT) referenceableSigil() { +} + +func (s selfT) String() string { + return "self" +} + +func (s selfT) UniqueKey() UniqueKey { + return Self // Self is its own UniqueKey +} + +func (s selfT) uniqueKeySigil() {} diff --git a/pkg/addrs/set.go b/pkg/addrs/set.go new file mode 100644 index 00000000000..8ad995fd20e --- /dev/null +++ b/pkg/addrs/set.go @@ -0,0 +1,72 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +// Set represents a set of addresses of types that implement UniqueKeyer. +// +// Modify the set only by the methods on this type. This type exposes its +// internals for convenience during reading, such as iterating over set elements +// by ranging over the map values, but making direct modifications could +// potentially make the set data invalid or inconsistent, leading to undefined +// behavior elsewhere. +type Set[T UniqueKeyer] map[UniqueKey]T + +func MakeSet[T UniqueKeyer](elems ...T) Set[T] { + ret := Set[T](make(map[UniqueKey]T, len(elems))) + for _, elem := range elems { + ret.Add(elem) + } + return ret +} + +// Has returns true if and only if the set includes the given address. +func (s Set[T]) Has(addr T) bool { + _, exists := s[addr.UniqueKey()] + return exists +} + +// Add inserts the given address into the set, if not already present. If +// an equivalent address is already in the set, this replaces that address +// with the new value. +func (s Set[T]) Add(addr T) { + s[addr.UniqueKey()] = addr +} + +// Remove deletes the given address from the set, if present. If not present, +// this is a no-op. +func (s Set[T]) Remove(addr T) { + delete(s, addr.UniqueKey()) +} + +// Union returns a new set which contains the union of all of the elements +// of both the reciever and the given other set. +func (s Set[T]) Union(other Set[T]) Set[T] { + ret := make(Set[T]) + for k, addr := range s { + ret[k] = addr + } + for k, addr := range other { + ret[k] = addr + } + return ret +} + +// Intersection returns a new set which contains the intersection of all of the +// elements of both the reciever and the given other set. +func (s Set[T]) Intersection(other Set[T]) Set[T] { + ret := make(Set[T]) + for k, addr := range s { + if _, exists := other[k]; exists { + ret[k] = addr + } + } + for k, addr := range other { + if _, exists := s[k]; exists { + ret[k] = addr + } + } + return ret +} diff --git a/pkg/addrs/target_test.go b/pkg/addrs/target_test.go new file mode 100644 index 00000000000..8ecd7f26a15 --- /dev/null +++ b/pkg/addrs/target_test.go @@ -0,0 +1,236 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +import ( + "fmt" + "testing" +) + +func TestTargetContains(t *testing.T) { + for _, test := range []struct { + addr, other Targetable + expect bool + }{ + { + mustParseTarget("module.foo"), + mustParseTarget("module.bar"), + false, + }, + { + mustParseTarget("module.foo"), + mustParseTarget("module.foo"), + true, + }, + { + RootModuleInstance, + mustParseTarget("module.foo"), + true, + }, + { + mustParseTarget("module.foo"), + RootModuleInstance, + false, + }, + { + mustParseTarget("module.foo"), + mustParseTarget("module.foo.module.bar[0]"), + true, + }, + { + mustParseTarget("module.foo"), + mustParseTarget("module.foo.module.bar[0]"), + true, + }, + { + mustParseTarget("module.foo[2]"), + mustParseTarget("module.foo[2].module.bar[0]"), + true, + }, + { + mustParseTarget("module.foo"), + mustParseTarget("module.foo.test_resource.bar"), + true, + }, + { + mustParseTarget("module.foo"), + mustParseTarget("module.foo.test_resource.bar[0]"), + true, + }, + + // Resources + { + mustParseTarget("test_resource.foo"), + mustParseTarget("test_resource.foo[\"bar\"]"), + true, + }, + { + mustParseTarget(`test_resource.foo["bar"]`), + mustParseTarget(`test_resource.foo["bar"]`), + true, + }, + { + mustParseTarget("test_resource.foo"), + mustParseTarget("test_resource.foo[2]"), + true, + }, + { + mustParseTarget("test_resource.foo"), + mustParseTarget("module.bar.test_resource.foo[2]"), + false, + }, + { + mustParseTarget("module.bar.test_resource.foo"), + mustParseTarget("module.bar.test_resource.foo[2]"), + true, + }, + { + mustParseTarget("module.bar.test_resource.foo"), + mustParseTarget("module.bar[0].test_resource.foo[2]"), + false, + }, + { + mustParseTarget("module.bar.test_resource.foo"), + mustParseTarget("module.bar.test_resource.foo[0]"), + true, + }, + { + mustParseTarget("module.bax"), + mustParseTarget("module.bax[0].test_resource.foo[0]"), + true, + }, + + // Config paths, while never returned from parsing a target, must still + // be targetable + { + ConfigResource{ + Module: []string{"bar"}, + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "test_resource", + Name: "foo", + }, + }, + mustParseTarget("module.bar.test_resource.foo[2]"), + true, + }, + { + mustParseTarget("module.bar"), + ConfigResource{ + Module: []string{"bar"}, + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "test_resource", + Name: "foo", + }, + }, + true, + }, + { + mustParseTarget("module.bar.test_resource.foo"), + ConfigResource{ + Module: []string{"bar"}, + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "test_resource", + Name: "foo", + }, + }, + true, + }, + { + ConfigResource{ + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "test_resource", + Name: "foo", + }, + }, + mustParseTarget("module.bar.test_resource.foo[2]"), + false, + }, + { + ConfigResource{ + Module: []string{"bar"}, + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "test_resource", + Name: "foo", + }, + }, + mustParseTarget("module.bar[0].test_resource.foo"), + true, + }, + + // Modules are also never the result of parsing a target, but also need + // to be targetable + { + Module{"bar"}, + Module{"bar", "baz"}, + true, + }, + { + Module{"bar"}, + mustParseTarget("module.bar[0]"), + true, + }, + { + // Parsing an ambiguous module path needs to ensure the + // ModuleInstance could contain the Module. This is safe because if + // the module could be expanded, it must have an index, meaning no + // index indicates that the module instance and module are + // functionally equivalent. + mustParseTarget("module.bar"), + Module{"bar"}, + true, + }, + { + // A specific ModuleInstance cannot contain a module + mustParseTarget("module.bar[0]"), + Module{"bar"}, + false, + }, + { + Module{"bar", "baz"}, + mustParseTarget("module.bar[0].module.baz.test_resource.foo[1]"), + true, + }, + { + mustParseTarget("module.bar[0].module.baz"), + Module{"bar", "baz"}, + false, + }, + } { + t.Run(fmt.Sprintf("%s-in-%s", test.other, test.addr), func(t *testing.T) { + got := test.addr.TargetContains(test.other) + if got != test.expect { + t.Fatalf("expected %q.TargetContains(%q) == %t", test.addr, test.other, test.expect) + } + }) + } +} + +func TestResourceContains(t *testing.T) { + for _, test := range []struct { + in, other Targetable + expect bool + }{} { + t.Run(fmt.Sprintf("%s-in-%s", test.other, test.in), func(t *testing.T) { + got := test.in.TargetContains(test.other) + if got != test.expect { + t.Fatalf("expected %q.TargetContains(%q) == %t", test.in, test.other, test.expect) + } + }) + } +} + +func mustParseTarget(str string) Targetable { + t, diags := ParseTargetStr(str) + if diags != nil { + panic(fmt.Sprintf("%s: %s", str, diags.ErrWithWarnings())) + } + return t.Subject +} diff --git a/pkg/addrs/targetable.go b/pkg/addrs/targetable.go new file mode 100644 index 00000000000..1884ddca884 --- /dev/null +++ b/pkg/addrs/targetable.go @@ -0,0 +1,45 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +// Targetable is an interface implemented by all address types that can be +// used as "targets" for selecting sub-graphs of a graph. +type Targetable interface { + targetableSigil() + + // TargetContains returns true if the receiver is considered to contain + // the given other address. Containment, for the purpose of targeting, + // means that if a container address is targeted then all of the + // addresses within it are also implicitly targeted. + // + // A targetable address always contains at least itself. + TargetContains(other Targetable) bool + + // AddrType returns the address type for comparison with other Targetable + // addresses. + AddrType() TargetableAddrType + + // String produces a string representation of the address that could be + // parsed as a HCL traversal and passed to ParseTarget to produce an + // identical result. + String() string +} + +type targetable struct { +} + +func (r targetable) targetableSigil() { +} + +type TargetableAddrType int + +const ( + ConfigResourceAddrType TargetableAddrType = iota + AbsResourceInstanceAddrType + AbsResourceAddrType + ModuleAddrType + ModuleInstanceAddrType +) diff --git a/pkg/addrs/tf_attr.go b/pkg/addrs/tf_attr.go new file mode 100644 index 00000000000..c1bcc58e424 --- /dev/null +++ b/pkg/addrs/tf_attr.go @@ -0,0 +1,36 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +const ( + IdentTerraform = "terraform" + IdentTofu = "tofu" +) + +func NewTerraformAttr(alias, name string) TerraformAttr { + return TerraformAttr{ + Name: name, + Alias: alias, + } +} + +// TerraformAttr is the address of an attribute of the "terraform" and "tofu" object in +// the interpolation scope, like "terraform.workspace" and "tofu.workspace". +type TerraformAttr struct { + referenceable + Name string + Alias string +} + +func (ta TerraformAttr) String() string { + return ta.Alias + "." + ta.Name +} + +func (ta TerraformAttr) UniqueKey() UniqueKey { + return ta // A TerraformAttr is its own UniqueKey +} + +func (ta TerraformAttr) uniqueKeySigil() {} diff --git a/pkg/addrs/unique_key.go b/pkg/addrs/unique_key.go new file mode 100644 index 00000000000..18a20b44273 --- /dev/null +++ b/pkg/addrs/unique_key.go @@ -0,0 +1,32 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +// UniqueKey is an interface implemented by values that serve as unique map +// keys for particular addresses. +// +// All implementations of UniqueKey are comparable and can thus be used as +// map keys. Unique keys generated from different address types are always +// distinct. All functionally-equivalent keys for the same address type +// always compare equal, and likewise functionally-different values do not. +type UniqueKey interface { + uniqueKeySigil() +} + +// UniqueKeyer is an interface implemented by types that can be represented +// by a unique key. +// +// Some address types naturally comply with the expectations of a UniqueKey +// and may thus be their own unique key type. However, address types that +// are not naturally comparable can implement this interface by returning +// proxy values. +type UniqueKeyer interface { + UniqueKey() UniqueKey +} + +func Equivalent[T UniqueKeyer](a, b T) bool { + return a.UniqueKey() == b.UniqueKey() +} diff --git a/pkg/addrs/unique_key_test.go b/pkg/addrs/unique_key_test.go new file mode 100644 index 00000000000..df3b8a96e9a --- /dev/null +++ b/pkg/addrs/unique_key_test.go @@ -0,0 +1,77 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package addrs + +import ( + "fmt" + "testing" +) + +// TestUniqueKeyer aims to ensure that all of the types that have unique keys +// will continue to meet the UniqueKeyer contract under future changes. +// +// If you add a new implementation of UniqueKey, consider adding a test case +// for it here. +func TestUniqueKeyer(t *testing.T) { + tests := []UniqueKeyer{ + CountAttr{Name: "index"}, + ForEachAttr{Name: "key"}, + TerraformAttr{Name: "workspace"}, + PathAttr{Name: "module"}, + InputVariable{Name: "foo"}, + ModuleCall{Name: "foo"}, + ModuleCallInstance{ + Call: ModuleCall{Name: "foo"}, + Key: StringKey("a"), + }, + ModuleCallOutput{ + Call: ModuleCall{Name: "foo"}, + Name: "bar", + }, + ModuleCallInstanceOutput{ + Call: ModuleCallInstance{ + Call: ModuleCall{Name: "foo"}, + Key: StringKey("a"), + }, + Name: "bar", + }, + Resource{ + Mode: ManagedResourceMode, + Type: "foo", + Name: "bar", + }, + ResourceInstance{ + Resource: Resource{ + Mode: ManagedResourceMode, + Type: "foo", + Name: "bar", + }, + Key: IntKey(1), + }, + RootModuleInstance, + RootModuleInstance.Child("foo", NoKey), + RootModuleInstance.ResourceInstance( + DataResourceMode, + "boop", + "beep", + NoKey, + ), + Self, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%s", test), func(t *testing.T) { + a := test.UniqueKey() + b := test.UniqueKey() + + // The following comparison will panic if the unique key is not + // of a comparable type. + if a != b { + t.Fatalf("the two unique keys are not equal\na: %#v\b: %#v", a, b) + } + }) + } +} diff --git a/pkg/backend/backend.go b/pkg/backend/backend.go new file mode 100644 index 00000000000..95782cc4209 --- /dev/null +++ b/pkg/backend/backend.go @@ -0,0 +1,441 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package backend provides interfaces that the CLI uses to interact with +// OpenTofu. A backend provides the abstraction that allows the same CLI +// to simultaneously support both local and remote operations for seamlessly +// using OpenTofu in a team environment. +package backend + +import ( + "context" + "errors" + "log" + "os" + + svchost "github.com/hashicorp/terraform-svchost" + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/command/clistate" + "github.com/kubegems/opentofu/pkg/command/views" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configload" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/depsfile" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/plans/planfile" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" + "github.com/mitchellh/go-homedir" + "github.com/zclconf/go-cty/cty" +) + +// DefaultStateName is the name of the default, initial state that every +// backend must have. This state cannot be deleted. +const DefaultStateName = "default" + +var ( + // ErrDefaultWorkspaceNotSupported is returned when an operation does not + // support using the default workspace, but requires a named workspace to + // be selected. + ErrDefaultWorkspaceNotSupported = errors.New("default workspace not supported\n" + + "You can create a new workspace with the \"workspace new\" command.") + + // ErrWorkspacesNotSupported is an error returned when a caller attempts + // to perform an operation on a workspace other than "default" for a + // backend that doesn't support multiple workspaces. + // + // The caller can detect this to do special fallback behavior or produce + // a specific, helpful error message. + ErrWorkspacesNotSupported = errors.New("workspaces not supported") +) + +// InitFn is used to initialize a new backend. +type InitFn func(encryption.StateEncryption) Backend + +// Backend is the minimal interface that must be implemented to enable OpenTofu. +type Backend interface { + // ConfigSchema returns a description of the expected configuration + // structure for the receiving backend. + // + // This method does not have any side-effects for the backend and can + // be safely used before configuring. + ConfigSchema() *configschema.Block + + // PrepareConfig checks the validity of the values in the given + // configuration, and inserts any missing defaults, assuming that its + // structure has already been validated per the schema returned by + // ConfigSchema. + // + // This method does not have any side-effects for the backend and can + // be safely used before configuring. It also does not consult any + // external data such as environment variables, disk files, etc. Validation + // that requires such external data should be deferred until the + // Configure call. + // + // If error diagnostics are returned then the configuration is not valid + // and must not subsequently be passed to the Configure method. + // + // This method may return configuration-contextual diagnostics such + // as tfdiags.AttributeValue, and so the caller should provide the + // necessary context via the diags.InConfigBody method before returning + // diagnostics to the user. + PrepareConfig(cty.Value) (cty.Value, tfdiags.Diagnostics) + + // Configure uses the provided configuration to set configuration fields + // within the backend. + // + // The given configuration is assumed to have already been validated + // against the schema returned by ConfigSchema and passed validation + // via PrepareConfig. + // + // This method may be called only once per backend instance, and must be + // called before all other methods except where otherwise stated. + // + // If error diagnostics are returned, the internal state of the instance + // is undefined and no other methods may be called. + Configure(cty.Value) tfdiags.Diagnostics + + // StateMgr returns the state manager for the given workspace name. + // + // If the returned state manager also implements statemgr.Locker then + // it's the caller's responsibility to call Lock and Unlock as appropriate. + // + // If the named workspace doesn't exist, or if it has no state, it will + // be created either immediately on this call or the first time + // PersistState is called, depending on the state manager implementation. + StateMgr(workspace string) (statemgr.Full, error) + + // DeleteWorkspace removes the workspace with the given name if it exists. + // + // DeleteWorkspace cannot prevent deleting a state that is in use. It is + // the responsibility of the caller to hold a Lock for the state manager + // belonging to this workspace before calling this method. + DeleteWorkspace(name string, force bool) error + + // States returns a list of the names of all of the workspaces that exist + // in this backend. + Workspaces() ([]string, error) +} + +// HostAlias describes a list of aliases that should be used when initializing an +// Enhanced Backend +type HostAlias struct { + From svchost.Hostname + To svchost.Hostname +} + +// Enhanced implements additional behavior on top of a normal backend. +// +// 'Enhanced' backends are an implementation detail only, and are no longer reflected as an external +// 'feature' of backends. In other words, backends refer to plugins for remote state snapshot +// storage only, and the Enhanced interface here is a necessary vestige of the 'local' and +// remote/cloud backends only. +type Enhanced interface { + Backend + + // Operation performs a OpenTofu operation such as refresh, plan, apply. + // It is up to the implementation to determine what "performing" means. + // This DOES NOT BLOCK. The context returned as part of RunningOperation + // should be used to block for completion. + // If the state used in the operation can be locked, it is the + // responsibility of the Backend to lock the state for the duration of the + // running operation. + Operation(context.Context, *Operation) (*RunningOperation, error) + + // ServiceDiscoveryAliases returns a mapping of Alias -> Target hosts to + // configure. + ServiceDiscoveryAliases() ([]HostAlias, error) +} + +// Local implements additional behavior on a Backend that allows local +// operations in addition to remote operations. +// +// This enables more behaviors of OpenTofu that require more data such +// as `console`, `import`, `graph`. These require direct access to +// configurations, variables, and more. Not all backends may support this +// so we separate it out into its own optional interface. +type Local interface { + // LocalRun uses information in the Operation to prepare a set of objects + // needed to start running that operation. + // + // The operation doesn't need a Type set, but it needs various other + // options set. This is a rather odd API that tries to treat all + // operations as the same when they really aren't; see the local and remote + // backend's implementations of this to understand what this actually + // does, because this operation has no well-defined contract aside from + // "whatever it already does". + LocalRun(*Operation) (*LocalRun, statemgr.Full, tfdiags.Diagnostics) +} + +// LocalRun represents the assortment of objects that we can collect or +// calculate from an Operation object, which we can then use for local +// operations. +// +// The operation methods on tofu.Context (Plan, Apply, Import, etc) each +// generate new artifacts which supersede parts of the LocalRun object that +// started the operation, so callers should be careful to use those subsequent +// artifacts instead of the fields of LocalRun where appropriate. The LocalRun +// data intentionally doesn't update as a result of calling methods on Context, +// in order to make data flow explicit. +// +// This type is a weird architectural wart resulting from the overly-general +// way our backend API models operations, whereby we behave as if all +// OpenTofu operations have the same inputs and outputs even though they +// are actually all rather different. The exact meaning of the fields in +// this type therefore vary depending on which OperationType was passed to +// Local.Context in order to create an object of this type. +type LocalRun struct { + // Core is an already-initialized OpenTofu Core context, ready to be + // used to run operations such as Plan and Apply. + Core *tofu.Context + + // Config is the configuration we're working with, which typically comes + // from either config files directly on local disk (when we're creating + // a plan, or similar) or from a snapshot embedded in a plan file + // (when we're applying a saved plan). + Config *configs.Config + + // InputState is the state that should be used for whatever is the first + // method call to a context created with CoreOpts. When creating a plan + // this will be the previous run state, but when applying a saved plan + // this will be the prior state recorded in that plan. + InputState *states.State + + // PlanOpts are options to pass to a Plan or Plan-like operation. + // + // This is nil when we're applying a saved plan, because the plan itself + // contains enough information about its options to apply it. + PlanOpts *tofu.PlanOpts + + // Plan is a plan loaded from a saved plan file, if our operation is to + // apply that saved plan. + // + // This is nil when we're not applying a saved plan. + Plan *plans.Plan +} + +// An operation represents an operation for OpenTofu to execute. +// +// Note that not all fields are supported by all backends and can result +// in an error if set. All backend implementations should show user-friendly +// errors explaining any incorrectly set values. For example, the local +// backend doesn't support a PlanId being set. +// +// The operation options are purposely designed to have maximal compatibility +// between OpenTofu and Terraform Servers (a commercial product offered by +// HashiCorp). Therefore, it isn't expected that other implementation support +// every possible option. The struct here is generalized in order to allow +// even partial implementations to exist in the open, without walling off +// remote functionality 100% behind a commercial wall. Anyone can implement +// against this interface and have OpenTofu interact with it just as it +// would with HashiCorp-provided Terraform Servers. +type Operation struct { + // Type is the operation to perform. + Type OperationType + + // Encryption is used by enhanced backends for planning and tofu.Context initialization + Encryption encryption.Encryption + + // PlanId is an opaque value that backends can use to execute a specific + // plan for an apply operation. + // + // PlanOutBackend is the backend to store with the plan. This is the + // backend that will be used when applying the plan. + PlanId string + PlanRefresh bool // PlanRefresh will do a refresh before a plan + PlanOutPath string // PlanOutPath is the path to save the plan + PlanOutBackend *plans.Backend + + // ConfigDir is the path to the directory containing the configuration's + // root module. + ConfigDir string + + // ConfigLoader is a configuration loader that can be used to load + // configuration from ConfigDir. + ConfigLoader *configload.Loader + + // DependencyLocks represents the locked dependencies associated with + // the configuration directory given in ConfigDir. + // + // Note that if field PlanFile is set then the plan file should contain + // its own dependency locks. The backend is responsible for correctly + // selecting between these two sets of locks depending on whether it + // will be using ConfigDir or PlanFile to get the configuration for + // this operation. + DependencyLocks *depsfile.Locks + + // Hooks can be used to perform actions triggered by various events during + // the operation's lifecycle. + Hooks []tofu.Hook + + // Plan is a plan that was passed as an argument. This is valid for + // plan and apply arguments but may not work for all backends. + PlanFile *planfile.WrappedPlanFile + + // The options below are more self-explanatory and affect the runtime + // behavior of the operation. + PlanMode plans.Mode + AutoApprove bool + Targets []addrs.Targetable + ForceReplace []addrs.AbsResourceInstance + // Injected by the command creating the operation (plan/apply/refresh/etc...) + Variables map[string]UnparsedVariableValue + RootCall configs.StaticModuleCall + + // Some operations use root module variables only opportunistically or + // don't need them at all. If this flag is set, the backend must treat + // all variables as optional and provide an unknown value for any required + // variables that aren't set in order to allow partial evaluation against + // the resulting incomplete context. + // + // This flag is honored only if PlanFile isn't set. If PlanFile is set then + // the variables set in the plan are used instead, and they must be valid. + AllowUnsetVariables bool + + // View implements the logic for all UI interactions. + View views.Operation + + // Input/output/control options. + UIIn tofu.UIInput + UIOut tofu.UIOutput + + // StateLocker is used to lock the state while providing UI feedback to the + // user. This will be replaced by the Backend to update the context. + // + // If state locking is not necessary, this should be set to a no-op + // implementation of clistate.Locker. + StateLocker clistate.Locker + + // Workspace is the name of the workspace that this operation should run + // in, which controls which named state is used. + Workspace string + + // GenerateConfigOut tells the operation both that it should generate config + // for unmatched import targets and where any generated config should be + // written to. + GenerateConfigOut string +} + +// HasConfig returns true if and only if the operation has a ConfigDir value +// that refers to a directory containing at least one OpenTofu configuration +// file. +func (o *Operation) HasConfig() bool { + return o.ConfigLoader.IsConfigDir(o.ConfigDir) +} + +// ReportResult is a helper for the common chore of setting the status of +// a running operation and showing any diagnostics produced during that +// operation. +// +// If the given diagnostics contains errors then the operation's result +// will be set to backend.OperationFailure. It will be set to +// backend.OperationSuccess otherwise. It will then use o.View.Diagnostics +// to show the given diagnostics before returning. +// +// Callers should feel free to do each of these operations separately in +// more complex cases where e.g. diagnostics are interleaved with other +// output, but terminating immediately after reporting error diagnostics is +// common and can be expressed concisely via this method. +func (o *Operation) ReportResult(op *RunningOperation, diags tfdiags.Diagnostics) { + if diags.HasErrors() { + op.Result = OperationFailure + } else { + op.Result = OperationSuccess + } + if o.View != nil { + o.View.Diagnostics(diags) + } else { + // Shouldn't generally happen, but if it does then we'll at least + // make some noise in the logs to help us spot it. + if len(diags) != 0 { + log.Printf( + "[ERROR] Backend needs to report diagnostics but View is not set:\n%s", + diags.ErrWithWarnings(), + ) + } + } +} + +// RunningOperation is the result of starting an operation. +type RunningOperation struct { + // For implementers of a backend, this context should not wrap the + // passed in context. Otherwise, cancelling the parent context will + // immediately mark this context as "done" but those aren't the semantics + // we want: we want this context to be done only when the operation itself + // is fully done. + context.Context + + // Stop requests the operation to complete early, by calling Stop on all + // the plugins. If the process needs to terminate immediately, call Cancel. + Stop context.CancelFunc + + // Cancel is the context.CancelFunc associated with the embedded context, + // and can be called to terminate the operation early. + // Once Cancel is called, the operation should return as soon as possible + // to avoid running operations during process exit. + Cancel context.CancelFunc + + // Result is the exit status of the operation, populated only after the + // operation has completed. + Result OperationResult + + // PlanEmpty is populated after a Plan operation completes to note whether + // a plan is empty or has changes. This is only used in the CLI to determine + // the exit status because the plan value is not available at that point. + PlanEmpty bool + + // State is the final state after the operation completed. Persisting + // this state is managed by the backend. This should only be read + // after the operation completes to avoid read/write races. + State *states.State +} + +// OperationResult describes the result status of an operation. +type OperationResult int + +const ( + // OperationSuccess indicates that the operation completed as expected. + OperationSuccess OperationResult = 0 + + // OperationFailure indicates that the operation encountered some sort + // of error, and thus may have been only partially performed or not + // performed at all. + OperationFailure OperationResult = 1 +) + +func (r OperationResult) ExitStatus() int { + return int(r) +} + +// If the argument is a path, ReadPathOrContents loads it and returns the contents, +// otherwise the argument is assumed to be the desired contents and is simply +// returned. +func ReadPathOrContents(poc string) (string, error) { + if len(poc) == 0 { + return poc, nil + } + + path := poc + if path[0] == '~' { + var err error + path, err = homedir.Expand(path) + if err != nil { + return path, err + } + } + + if _, err := os.Stat(path); err == nil { + contents, err := os.ReadFile(path) + if err != nil { + return string(contents), err + } + return string(contents), nil + } + + return poc, nil +} diff --git a/pkg/backend/backend_test.go b/pkg/backend/backend_test.go new file mode 100644 index 00000000000..0c62f92fe4b --- /dev/null +++ b/pkg/backend/backend_test.go @@ -0,0 +1,133 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package backend + +import ( + "io" + "os" + "os/user" + "strings" + "testing" + + "github.com/mitchellh/go-homedir" +) + +func TestReadPathOrContents_Path(t *testing.T) { + f, cleanup := testTempFile(t) + defer cleanup() + + if _, err := io.WriteString(f, "foobar"); err != nil { + t.Fatalf("err: %s", err) + } + f.Close() + + contents, err := ReadPathOrContents(f.Name()) + + if err != nil { + t.Fatalf("err: %s", err) + } + if contents != "foobar" { + t.Fatalf("expected contents %s, got %s", "foobar", contents) + } +} + +func TestReadPathOrContents_TildePath(t *testing.T) { + home, err := homedir.Dir() + if err != nil { + t.Fatalf("err: %s", err) + } + f, cleanup := testTempFile(t, home) + defer cleanup() + + if _, err := io.WriteString(f, "foobar"); err != nil { + t.Fatalf("err: %s", err) + } + f.Close() + + r := strings.NewReplacer(home, "~") + homePath := r.Replace(f.Name()) + contents, err := ReadPathOrContents(homePath) + + if err != nil { + t.Fatalf("err: %s", err) + } + if contents != "foobar" { + t.Fatalf("expected contents %s, got %s", "foobar", contents) + } +} + +func TestRead_PathNoPermission(t *testing.T) { + // This skip condition is intended to get this test out of the way of users + // who are building and testing OpenTofu from within a Linux-based Docker + // container, where it is common for processes to be running as effectively + // root within the container. + if u, err := user.Current(); err == nil && u.Uid == "0" { + t.Skip("This test is invalid when running as root, since root can read every file") + } + + f, cleanup := testTempFile(t) + defer cleanup() + + if _, err := io.WriteString(f, "foobar"); err != nil { + t.Fatalf("err: %s", err) + } + f.Close() + + if err := os.Chmod(f.Name(), 0); err != nil { + t.Fatalf("err: %s", err) + } + + contents, err := ReadPathOrContents(f.Name()) + + if err == nil { + t.Fatal("Expected error, got none!") + } + if contents != "" { + t.Fatalf("expected contents %s, got %s", "", contents) + } +} + +func TestReadPathOrContents_Contents(t *testing.T) { + input := "hello" + + contents, err := ReadPathOrContents(input) + + if err != nil { + t.Fatalf("err: %s", err) + } + if contents != input { + t.Fatalf("expected contents %s, got %s", input, contents) + } +} + +func TestReadPathOrContents_TildeContents(t *testing.T) { + input := "~/hello/notafile" + + contents, err := ReadPathOrContents(input) + + if err != nil { + t.Fatalf("err: %s", err) + } + if contents != input { + t.Fatalf("expected contents %s, got %s", input, contents) + } +} + +// Returns an open tempfile based at baseDir and a function to clean it up. +func testTempFile(t *testing.T, baseDir ...string) (*os.File, func()) { + base := "" + if len(baseDir) == 1 { + base = baseDir[0] + } + f, err := os.CreateTemp(base, "tf") + if err != nil { + t.Fatalf("err: %s", err) + } + + return f, func() { + os.Remove(f.Name()) + } +} diff --git a/pkg/backend/cli.go b/pkg/backend/cli.go new file mode 100644 index 00000000000..aef92e624e2 --- /dev/null +++ b/pkg/backend/cli.go @@ -0,0 +1,96 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package backend + +import ( + "github.com/mitchellh/cli" + "github.com/mitchellh/colorstring" + + "github.com/kubegems/opentofu/pkg/terminal" + "github.com/kubegems/opentofu/pkg/tofu" +) + +// CLI is an optional interface that can be implemented to be initialized +// with information from the OpenTofu CLI. If this is implemented, this +// initialization function will be called with data to help interact better +// with a CLI. +// +// This interface was created to improve backend interaction with the +// official OpenTofu CLI while making it optional for API users to have +// to provide full CLI interaction to every backend. +// +// If you're implementing a Backend, it is acceptable to require CLI +// initialization. In this case, your backend should be coded to error +// on other methods (such as State, Operation) if CLI initialization was not +// done with all required fields. +type CLI interface { + Backend + + // CLIInit is called once with options. The options passed to this + // function may not be modified after calling this since they can be + // read/written at any time by the Backend implementation. + // + // This may be called before or after Configure is called, so if settings + // here affect configurable settings, care should be taken to handle + // whether they should be overwritten or not. + CLIInit(*CLIOpts) error +} + +// CLIOpts are the options passed into CLIInit for the CLI interface. +// +// These options represent the functionality the CLI exposes and often +// maps to meta-flags available on every CLI (such as -input). +// +// When implementing a backend, it isn't expected that every option applies. +// Your backend should be documented clearly to explain to end users what +// options have an affect and what won't. In some cases, it may even make sense +// to error in your backend when an option is set so that users don't make +// a critically incorrect assumption about behavior. +type CLIOpts struct { + // CLI and Colorize control the CLI output. If CLI is nil then no CLI + // output will be done. If CLIColor is nil then no coloring will be done. + CLI cli.Ui + CLIColor *colorstring.Colorize + + // Streams describes the low-level streams for Stdout, Stderr and Stdin, + // including some metadata about whether they are terminals. Most output + // should go via the object in field CLI above, but Streams can be useful + // for tailoring the output to fit the attached terminal, for example. + Streams *terminal.Streams + + // StatePath is the local path where state is read from. + // + // StateOutPath is the local path where the state will be written. + // If this is empty, it will default to StatePath. + // + // StateBackupPath is the local path where a backup file will be written. + // If this is empty, no backup will be taken. + StatePath string + StateOutPath string + StateBackupPath string + + // ContextOpts are the base context options to set when initializing a + // OpenTofu context. Many of these will be overridden or merged by + // Operation. See Operation for more details. + ContextOpts *tofu.ContextOpts + + // Input will ask for necessary input prior to performing any operations. + // + // Validation will perform validation prior to running an operation. The + // variable naming doesn't match the style of others since we have a func + // Validate. + Input bool + Validation bool + + // RunningInAutomation indicates that commands are being run by an + // automated system rather than directly at a command prompt. + // + // This is a hint not to produce messages that expect that a user can + // run a follow-up command, perhaps because OpenTofu is running in + // some sort of workflow automation tool that abstracts away the + // exact commands that are being run. + RunningInAutomation bool +} diff --git a/pkg/backend/init/deprecate_test.go b/pkg/backend/init/deprecate_test.go new file mode 100644 index 00000000000..e5a75358322 --- /dev/null +++ b/pkg/backend/init/deprecate_test.go @@ -0,0 +1,36 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package init + +import ( + "testing" + + "github.com/kubegems/opentofu/pkg/backend/remote-state/inmem" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/zclconf/go-cty/cty" +) + +func TestDeprecateBackend(t *testing.T) { + deprecateMessage := "deprecated backend" + deprecatedBackend := deprecateBackend( + inmem.New(encryption.StateEncryptionDisabled()), + deprecateMessage, + ) + + _, diags := deprecatedBackend.PrepareConfig(cty.EmptyObjectVal) + if len(diags) != 1 { + t.Errorf("got %d diagnostics; want 1", len(diags)) + for _, diag := range diags { + t.Errorf("- %s", diag) + } + return + } + + desc := diags[0].Description() + if desc.Summary != deprecateMessage { + t.Fatalf("wrong message %q; want %q", desc.Summary, deprecateMessage) + } +} diff --git a/pkg/backend/init/init.go b/pkg/backend/init/init.go new file mode 100644 index 00000000000..89d6d3b6805 --- /dev/null +++ b/pkg/backend/init/init.go @@ -0,0 +1,149 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package init contains the list of backends that can be initialized and +// basic helper functions for initializing those backends. +package init + +import ( + "sync" + + "github.com/hashicorp/terraform-svchost/disco" + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/zclconf/go-cty/cty" + + backendLocal "github.com/kubegems/opentofu/pkg/backend/local" + backendRemote "github.com/kubegems/opentofu/pkg/backend/remote" + backendAzure "github.com/kubegems/opentofu/pkg/backend/remote-state/azure" + backendConsul "github.com/kubegems/opentofu/pkg/backend/remote-state/consul" + backendCos "github.com/kubegems/opentofu/pkg/backend/remote-state/cos" + backendGCS "github.com/kubegems/opentofu/pkg/backend/remote-state/gcs" + backendHTTP "github.com/kubegems/opentofu/pkg/backend/remote-state/http" + backendInmem "github.com/kubegems/opentofu/pkg/backend/remote-state/inmem" + backendKubernetes "github.com/kubegems/opentofu/pkg/backend/remote-state/kubernetes" + backendOSS "github.com/kubegems/opentofu/pkg/backend/remote-state/oss" + backendPg "github.com/kubegems/opentofu/pkg/backend/remote-state/pg" + backendS3 "github.com/kubegems/opentofu/pkg/backend/remote-state/s3" + backendCloud "github.com/kubegems/opentofu/pkg/cloud" +) + +// backends is the list of available backends. This is a global variable +// because backends are currently hardcoded into OpenTofu and can't be +// modified without recompilation. +// +// To read an available backend, use the Backend function. This ensures +// safe concurrent read access to the list of built-in backends. +// +// Backends are hardcoded into OpenTofu because the API for backends uses +// complex structures and supporting that over the plugin system is currently +// prohibitively difficult. For those wanting to implement a custom backend, +// they can do so with recompilation. +var backends map[string]backend.InitFn +var backendsLock sync.Mutex + +// RemovedBackends is a record of previously supported backends which have +// since been deprecated and removed. +var RemovedBackends map[string]string + +// Init initializes the backends map with all our hardcoded backends. +func Init(services *disco.Disco) { + backendsLock.Lock() + defer backendsLock.Unlock() + + backends = map[string]backend.InitFn{ + "local": func(enc encryption.StateEncryption) backend.Backend { return backendLocal.New(enc) }, + "remote": func(enc encryption.StateEncryption) backend.Backend { return backendRemote.New(services, enc) }, + + // Remote State backends. + "azurerm": func(enc encryption.StateEncryption) backend.Backend { return backendAzure.New(enc) }, + "consul": func(enc encryption.StateEncryption) backend.Backend { return backendConsul.New(enc) }, + "cos": func(enc encryption.StateEncryption) backend.Backend { return backendCos.New(enc) }, + "gcs": func(enc encryption.StateEncryption) backend.Backend { return backendGCS.New(enc) }, + "http": func(enc encryption.StateEncryption) backend.Backend { return backendHTTP.New(enc) }, + "inmem": func(enc encryption.StateEncryption) backend.Backend { return backendInmem.New(enc) }, + "kubernetes": func(enc encryption.StateEncryption) backend.Backend { return backendKubernetes.New(enc) }, + "oss": func(enc encryption.StateEncryption) backend.Backend { return backendOSS.New(enc) }, + "pg": func(enc encryption.StateEncryption) backend.Backend { return backendPg.New(enc) }, + "s3": func(enc encryption.StateEncryption) backend.Backend { return backendS3.New(enc) }, + + // Terraform Cloud 'backend' + // This is an implementation detail only, used for the cloud package + "cloud": func(enc encryption.StateEncryption) backend.Backend { return backendCloud.New(services, enc) }, + } + + RemovedBackends = map[string]string{ + "artifactory": `The "artifactory" backend is not supported in OpenTofu v1.3 or later.`, + "azure": `The "azure" backend name has been removed, please use "azurerm".`, + "etcd": `The "etcd" backend is not supported in OpenTofu v1.3 or later.`, + "etcdv3": `The "etcdv3" backend is not supported in OpenTofu v1.3 or later.`, + "manta": `The "manta" backend is not supported in OpenTofu v1.3 or later.`, + "swift": `The "swift" backend is not supported in OpenTofu v1.3 or later.`, + } +} + +// Backend returns the initialization factory for the given backend, or +// nil if none exists. +func Backend(name string) backend.InitFn { + backendsLock.Lock() + defer backendsLock.Unlock() + return backends[name] +} + +// Set sets a new backend in the list of backends. If f is nil then the +// backend will be removed from the map. If this backend already exists +// then it will be overwritten. +// +// This method sets this backend globally and care should be taken to do +// this only before OpenTofu is executing to prevent odd behavior of backends +// changing mid-execution. +func Set(name string, f backend.InitFn) { + backendsLock.Lock() + defer backendsLock.Unlock() + + if f == nil { + delete(backends, name) + return + } + + backends[name] = f +} + +// deprecatedBackendShim is used to wrap a backend and inject a deprecation +// warning into the Validate method. +type deprecatedBackendShim struct { + backend.Backend + Message string +} + +// PrepareConfig delegates to the wrapped backend to validate its config +// and then appends shim's deprecation warning. +func (b deprecatedBackendShim) PrepareConfig(obj cty.Value) (cty.Value, tfdiags.Diagnostics) { + newObj, diags := b.Backend.PrepareConfig(obj) + return newObj, diags.Append(tfdiags.SimpleWarning(b.Message)) +} + +// DeprecateBackend can be used to wrap a backend to retrun a deprecation +// warning during validation. +func deprecateBackend(b backend.Backend, message string) backend.Backend { + // Since a Backend wrapped by deprecatedBackendShim can no longer be + // asserted as an Enhanced or Local backend, disallow those types here + // entirely. If something other than a basic backend.Backend needs to be + // deprecated, we can add that functionality to schema.Backend or the + // backend itself. + if _, ok := b.(backend.Enhanced); ok { + panic("cannot use DeprecateBackend on an Enhanced Backend") + } + + if _, ok := b.(backend.Local); ok { + panic("cannot use DeprecateBackend on a Local Backend") + } + + return deprecatedBackendShim{ + Backend: b, + Message: message, + } +} diff --git a/pkg/backend/init/init_test.go b/pkg/backend/init/init_test.go new file mode 100644 index 00000000000..07e8a19fb84 --- /dev/null +++ b/pkg/backend/init/init_test.go @@ -0,0 +1,47 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package init + +import ( + "reflect" + "testing" + + "github.com/kubegems/opentofu/pkg/encryption" +) + +func TestInit_backend(t *testing.T) { + // Initialize the backends map + Init(nil) + + backends := []struct { + Name string + Type string + }{ + {"local", "*local.Local"}, + {"remote", "*remote.Remote"}, + {"azurerm", "*azure.Backend"}, + {"consul", "*consul.Backend"}, + {"cos", "*cos.Backend"}, + {"gcs", "*gcs.Backend"}, + {"inmem", "*inmem.Backend"}, + {"pg", "*pg.Backend"}, + {"s3", "*s3.Backend"}, + } + + // Make sure we get the requested backend + for _, b := range backends { + t.Run(b.Name, func(t *testing.T) { + f := Backend(b.Name) + if f == nil { + t.Fatalf("backend %q is not present; should be", b.Name) + } + bType := reflect.TypeOf(f(encryption.StateEncryptionDisabled())).String() + if bType != b.Type { + t.Fatalf("expected backend %q to be %q, got: %q", b.Name, b.Type, bType) + } + }) + } +} diff --git a/pkg/backend/local/backend.go b/pkg/backend/local/backend.go new file mode 100644 index 00000000000..1caa3ab3385 --- /dev/null +++ b/pkg/backend/local/backend.go @@ -0,0 +1,505 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package local + +import ( + "context" + "errors" + "fmt" + "log" + "os" + "path/filepath" + "sort" + "sync" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/command/views" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/logging" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" + "github.com/zclconf/go-cty/cty" +) + +const ( + DefaultWorkspaceDir = "terraform.tfstate.d" + DefaultWorkspaceFile = "environment" + DefaultStateFilename = "terraform.tfstate" + DefaultBackupExtension = ".backup" +) + +// Local is an implementation of EnhancedBackend that performs all operations +// locally. This is the "default" backend and implements normal OpenTofu +// behavior as it is well known. +type Local struct { + // The State* paths are set from the backend config, and may be left blank + // to use the defaults. If the actual paths for the local backend state are + // needed, use the StatePaths method. + // + // StatePath is the local path where state is read from. + // + // StateOutPath is the local path where the state will be written. + // If this is empty, it will default to StatePath. + // + // StateBackupPath is the local path where a backup file will be written. + // Set this to "-" to disable state backup. + // + // StateWorkspaceDir is the path to the folder containing data for + // non-default workspaces. This defaults to DefaultWorkspaceDir if not set. + StatePath string + StateOutPath string + StateBackupPath string + StateWorkspaceDir string + + // The OverrideState* paths are set based on per-operation CLI arguments + // and will override what'd be built from the State* fields if non-empty. + // While the interpretation of the State* fields depends on the active + // workspace, the OverrideState* fields are always used literally. + OverrideStatePath string + OverrideStateOutPath string + OverrideStateBackupPath string + + // We only want to create a single instance of a local state, so store them + // here as they're loaded. + states map[string]statemgr.Full + + // OpenTofu context. Many of these will be overridden or merged by + // Operation. See Operation for more details. + ContextOpts *tofu.ContextOpts + + // OpInput will ask for necessary input prior to performing any operations. + // + // OpValidation will perform validation prior to running an operation. The + // variable naming doesn't match the style of others since we have a func + // Validate. + OpInput bool + OpValidation bool + + // Backend, if non-nil, will use this backend for non-enhanced behavior. + // This allows local behavior with remote state storage. It is a way to + // "upgrade" a non-enhanced backend to an enhanced backend with typical + // behavior. + // + // If this is nil, local performs normal state loading and storage. + Backend backend.Backend + + // opLock locks operations + opLock sync.Mutex + + encryption encryption.StateEncryption +} + +var _ backend.Backend = (*Local)(nil) + +// New returns a new initialized local backend. +func New(enc encryption.StateEncryption) *Local { + return &Local{ + encryption: enc, + } +} + +// NewWithBackend returns a new local backend initialized with a +// dedicated backend for non-enhanced behavior. +func NewWithBackend(backend backend.Backend, enc encryption.StateEncryption) *Local { + return &Local{ + Backend: backend, + encryption: enc, + } +} + +func (b *Local) ConfigSchema() *configschema.Block { + if b.Backend != nil { + return b.Backend.ConfigSchema() + } + return &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "path": { + Type: cty.String, + Optional: true, + }, + "workspace_dir": { + Type: cty.String, + Optional: true, + }, + }, + } +} + +func (b *Local) PrepareConfig(obj cty.Value) (cty.Value, tfdiags.Diagnostics) { + if b.Backend != nil { + return b.Backend.PrepareConfig(obj) + } + + var diags tfdiags.Diagnostics + + if val := obj.GetAttr("path"); !val.IsNull() { + p := val.AsString() + if p == "" { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid local state file path", + `The "path" attribute value must not be empty.`, + cty.Path{cty.GetAttrStep{Name: "path"}}, + )) + } + } + + if val := obj.GetAttr("workspace_dir"); !val.IsNull() { + p := val.AsString() + if p == "" { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid local workspace directory path", + `The "workspace_dir" attribute value must not be empty.`, + cty.Path{cty.GetAttrStep{Name: "workspace_dir"}}, + )) + } + } + + return obj, diags +} + +func (b *Local) Configure(obj cty.Value) tfdiags.Diagnostics { + if b.Backend != nil { + return b.Backend.Configure(obj) + } + + var diags tfdiags.Diagnostics + + if val := obj.GetAttr("path"); !val.IsNull() { + p := val.AsString() + b.StatePath = p + b.StateOutPath = p + } else { + b.StatePath = DefaultStateFilename + b.StateOutPath = DefaultStateFilename + } + + if val := obj.GetAttr("workspace_dir"); !val.IsNull() { + p := val.AsString() + b.StateWorkspaceDir = p + } else { + b.StateWorkspaceDir = DefaultWorkspaceDir + } + + return diags +} + +func (b *Local) ServiceDiscoveryAliases() ([]backend.HostAlias, error) { + return []backend.HostAlias{}, nil +} + +func (b *Local) Workspaces() ([]string, error) { + // If we have a backend handling state, defer to that. + if b.Backend != nil { + return b.Backend.Workspaces() + } + + // the listing always start with "default" + envs := []string{backend.DefaultStateName} + + entries, err := os.ReadDir(b.stateWorkspaceDir()) + // no error if there's no envs configured + if os.IsNotExist(err) { + return envs, nil + } + if err != nil { + return nil, err + } + + var listed []string + for _, entry := range entries { + if entry.IsDir() { + listed = append(listed, filepath.Base(entry.Name())) + } + } + + sort.Strings(listed) + envs = append(envs, listed...) + + return envs, nil +} + +// DeleteWorkspace removes a workspace. +// +// The "default" workspace cannot be removed. +func (b *Local) DeleteWorkspace(name string, force bool) error { + // If we have a backend handling state, defer to that. + if b.Backend != nil { + return b.Backend.DeleteWorkspace(name, force) + } + + if name == "" { + return errors.New("empty state name") + } + + if name == backend.DefaultStateName { + return errors.New("cannot delete default state") + } + + delete(b.states, name) + return os.RemoveAll(filepath.Join(b.stateWorkspaceDir(), name)) +} + +func (b *Local) StateMgr(name string) (statemgr.Full, error) { + // If we have a backend handling state, delegate to that. + if b.Backend != nil { + return b.Backend.StateMgr(name) + } + + if s, ok := b.states[name]; ok { + return s, nil + } + + if err := b.createState(name); err != nil { + return nil, err + } + + statePath, stateOutPath, backupPath := b.StatePaths(name) + log.Printf("[TRACE] backend/local: state manager for workspace %q will:\n - read initial snapshot from %s\n - write new snapshots to %s\n - create any backup at %s", name, statePath, stateOutPath, backupPath) + + s := statemgr.NewFilesystemBetweenPaths(statePath, stateOutPath, b.encryption) + if backupPath != "" { + s.SetBackupPath(backupPath) + } + + if b.states == nil { + b.states = map[string]statemgr.Full{} + } + b.states[name] = s + return s, nil +} + +// Operation implements backend.Enhanced +// +// This will initialize an in-memory tofu.Context to perform the +// operation within this process. +// +// The given operation parameter will be merged with the ContextOpts on +// the structure with the following rules. If a rule isn't specified and the +// name conflicts, assume that the field is overwritten if set. +func (b *Local) Operation(ctx context.Context, op *backend.Operation) (*backend.RunningOperation, error) { + if op.View == nil { + panic("Operation called with nil View") + } + + // Determine the function to call for our operation + var f func(context.Context, context.Context, *backend.Operation, *backend.RunningOperation) + switch op.Type { + case backend.OperationTypeRefresh: + f = b.opRefresh + case backend.OperationTypePlan: + f = b.opPlan + case backend.OperationTypeApply: + f = b.opApply + default: + return nil, fmt.Errorf( + "unsupported operation type: %s\n\n"+ + "This is a bug in OpenTofu and should be reported. The local backend\n"+ + "is built-in to OpenTofu and should always support all operations.", + op.Type) + } + + // Lock + b.opLock.Lock() + + // Build our running operation + // the runninCtx is only used to block until the operation returns. + runningCtx, done := context.WithCancel(context.Background()) + runningOp := &backend.RunningOperation{ + Context: runningCtx, + } + + // stopCtx wraps the context passed in, and is used to signal a graceful Stop. + stopCtx, stop := context.WithCancel(ctx) + runningOp.Stop = stop + + // cancelCtx is used to cancel the operation immediately, usually + // indicating that the process is exiting. + cancelCtx, cancel := context.WithCancel(context.Background()) + runningOp.Cancel = cancel + + op.StateLocker = op.StateLocker.WithContext(stopCtx) + + panicHandler := logging.PanicHandlerWithTraceFn() + + // Do it + go func() { + defer panicHandler() + defer done() + defer stop() + defer cancel() + + defer b.opLock.Unlock() + f(stopCtx, cancelCtx, op, runningOp) + }() + + // Return + return runningOp, nil +} + +// opWait waits for the operation to complete, and a stop signal or a +// cancelation signal. +func (b *Local) opWait( + doneCh <-chan struct{}, + stopCtx context.Context, + cancelCtx context.Context, + tfCtx *tofu.Context, + opStateMgr statemgr.Persister, + view views.Operation) (canceled bool) { + // Wait for the operation to finish or for us to be interrupted so + // we can handle it properly. + select { + case <-stopCtx.Done(): + view.Stopping() + + // try to force a PersistState just in case the process is terminated + // before we can complete. + if err := opStateMgr.PersistState(nil); err != nil { + // We can't error out from here, but warn the user if there was an error. + // If this isn't transient, we will catch it again below, and + // attempt to save the state another way. + var diags tfdiags.Diagnostics + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Error saving current state", + fmt.Sprintf(earlyStateWriteErrorFmt, err), + )) + view.Diagnostics(diags) + } + + // Stop execution + log.Println("[TRACE] backend/local: waiting for the running operation to stop") + go tfCtx.Stop() + + select { + case <-cancelCtx.Done(): + log.Println("[WARN] running operation was forcefully canceled") + // if the operation was canceled, we need to return immediately + canceled = true + case <-doneCh: + log.Println("[TRACE] backend/local: graceful stop has completed") + } + case <-cancelCtx.Done(): + // this should not be called without first attempting to stop the + // operation + log.Println("[ERROR] running operation canceled without Stop") + canceled = true + case <-doneCh: + } + return +} + +// StatePaths returns the StatePath, StateOutPath, and StateBackupPath as +// configured from the CLI. +func (b *Local) StatePaths(name string) (stateIn, stateOut, backupOut string) { + statePath := b.OverrideStatePath + stateOutPath := b.OverrideStateOutPath + backupPath := b.OverrideStateBackupPath + + isDefault := name == backend.DefaultStateName || name == "" + + baseDir := "" + if !isDefault { + baseDir = filepath.Join(b.stateWorkspaceDir(), name) + } + + if statePath == "" { + if isDefault { + statePath = b.StatePath // s.StatePath applies only to the default workspace, since StateWorkspaceDir is used otherwise + } + if statePath == "" { + statePath = filepath.Join(baseDir, DefaultStateFilename) + } + } + if stateOutPath == "" { + stateOutPath = statePath + } + if backupPath == "" { + backupPath = b.StateBackupPath + } + switch backupPath { + case "-": + backupPath = "" + case "": + backupPath = stateOutPath + DefaultBackupExtension + } + + return statePath, stateOutPath, backupPath +} + +// PathsConflictWith returns true if any state path used by a workspace in +// the receiver is the same as any state path used by the other given +// local backend instance. +// +// This should be used when "migrating" from one local backend configuration to +// another in order to avoid deleting the "old" state snapshots if they are +// in the same files as the "new" state snapshots. +func (b *Local) PathsConflictWith(other *Local) bool { + otherPaths := map[string]struct{}{} + otherWorkspaces, err := other.Workspaces() + if err != nil { + // If we can't enumerate the workspaces then we'll conservatively + // assume that paths _do_ overlap, since we can't be certain. + return true + } + for _, name := range otherWorkspaces { + p, _, _ := other.StatePaths(name) + otherPaths[p] = struct{}{} + } + + ourWorkspaces, err := other.Workspaces() + if err != nil { + // If we can't enumerate the workspaces then we'll conservatively + // assume that paths _do_ overlap, since we can't be certain. + return true + } + + for _, name := range ourWorkspaces { + p, _, _ := b.StatePaths(name) + if _, exists := otherPaths[p]; exists { + return true + } + } + return false +} + +// this only ensures that the named directory exists +func (b *Local) createState(name string) error { + if name == backend.DefaultStateName { + return nil + } + + stateDir := filepath.Join(b.stateWorkspaceDir(), name) + s, err := os.Stat(stateDir) + if err == nil && s.IsDir() { + // no need to check for os.IsNotExist, since that is covered by os.MkdirAll + // which will catch the other possible errors as well. + return nil + } + + err = os.MkdirAll(stateDir, 0755) + if err != nil { + return err + } + + return nil +} + +// stateWorkspaceDir returns the directory where state environments are stored. +func (b *Local) stateWorkspaceDir() string { + if b.StateWorkspaceDir != "" { + return b.StateWorkspaceDir + } + + return DefaultWorkspaceDir +} + +const earlyStateWriteErrorFmt = `Error: %s + +OpenTofu encountered an error attempting to save the state before cancelling the current operation. Once the operation is complete another attempt will be made to save the final state.` diff --git a/pkg/backend/local/backend_apply.go b/pkg/backend/local/backend_apply.go new file mode 100644 index 00000000000..c850f185f2d --- /dev/null +++ b/pkg/backend/local/backend_apply.go @@ -0,0 +1,386 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package local + +import ( + "context" + "errors" + "fmt" + "log" + "os" + "strconv" + "time" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/command/views" + "github.com/kubegems/opentofu/pkg/logging" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/statefile" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" +) + +// test hook called between plan+apply during opApply +var testHookStopPlanApply func() + +const ( + defaultPersistInterval = 20 // arbitrary interval that's hopefully a sweet spot + persistIntervalEnvironmentVariableName = "TF_STATE_PERSIST_INTERVAL" +) + +func getEnvAsInt(envName string, defaultValue int) int { + if val, exists := os.LookupEnv(envName); exists { + parsedVal, err := strconv.Atoi(val) + if err == nil { + return parsedVal + } + panic(fmt.Sprintf("Can't parse value '%s' of environment variable '%s'", val, envName)) + } + return defaultValue +} + +func (b *Local) opApply( + stopCtx context.Context, + cancelCtx context.Context, + op *backend.Operation, + runningOp *backend.RunningOperation) { + log.Printf("[INFO] backend/local: starting Apply operation") + + var diags, moreDiags tfdiags.Diagnostics + + // If we have a nil module at this point, then set it to an empty tree + // to avoid any potential crashes. + if op.PlanFile == nil && op.PlanMode != plans.DestroyMode && !op.HasConfig() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "No configuration files", + "Apply requires configuration to be present. Applying without a configuration "+ + "would mark everything for destruction, which is normally not what is desired. "+ + "If you would like to destroy everything, run 'tofu destroy' instead.", + )) + op.ReportResult(runningOp, diags) + return + } + + stateHook := new(StateHook) + op.Hooks = append(op.Hooks, stateHook) + + // Get our context + lr, _, opState, contextDiags := b.localRun(op) + diags = diags.Append(contextDiags) + if contextDiags.HasErrors() { + op.ReportResult(runningOp, diags) + return + } + // the state was locked during successful context creation; unlock the state + // when the operation completes + defer func() { + diags := op.StateLocker.Unlock() + if diags.HasErrors() { + op.View.Diagnostics(diags) + runningOp.Result = backend.OperationFailure + } + }() + + // We'll start off with our result being the input state, and replace it + // with the result state only if we eventually complete the apply + // operation. + runningOp.State = lr.InputState + + schemas, moreDiags := lr.Core.Schemas(lr.Config, lr.InputState) + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + op.ReportResult(runningOp, diags) + return + } + // stateHook uses schemas for when it periodically persists state to the + // persistent storage backend. + stateHook.Schemas = schemas + persistInterval := getEnvAsInt(persistIntervalEnvironmentVariableName, defaultPersistInterval) + if persistInterval < defaultPersistInterval { + panic(fmt.Sprintf("Can't use value lower than %d for env variable %s, got %d", + defaultPersistInterval, persistIntervalEnvironmentVariableName, persistInterval)) + } + stateHook.PersistInterval = time.Duration(persistInterval) * time.Second + + var plan *plans.Plan + // If we weren't given a plan, then we refresh/plan + if op.PlanFile == nil { + // Perform the plan + log.Printf("[INFO] backend/local: apply calling Plan") + plan, moreDiags = lr.Core.Plan(lr.Config, lr.InputState, lr.PlanOpts) + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + // If OpenTofu Core generated a partial plan despite the errors + // then we'll make the best effort to render it. OpenTofu Core + // promises that if it returns a non-nil plan along with errors + // then the plan won't necessarily contain all the needed + // actions but that any it does include will be properly-formed. + // plan.Errored will be true in this case, which our plan + // renderer can rely on to tailor its messaging. + if plan != nil && (len(plan.Changes.Resources) != 0 || len(plan.Changes.Outputs) != 0) { + op.View.Plan(plan, schemas) + } + op.ReportResult(runningOp, diags) + return + } + + trivialPlan := !plan.CanApply() + hasUI := op.UIOut != nil && op.UIIn != nil + mustConfirm := hasUI && !op.AutoApprove && !trivialPlan + op.View.Plan(plan, schemas) + + if testHookStopPlanApply != nil { + testHookStopPlanApply() + } + + // Check if we've been stopped before going through confirmation, or + // skipping confirmation in the case of -auto-approve. + // This can currently happen if a single stop request was received + // during the final batch of resource plan calls, so no operations were + // forced to abort, and no errors were returned from Plan. + if stopCtx.Err() != nil { + diags = diags.Append(errors.New("execution halted")) + runningOp.Result = backend.OperationFailure + op.ReportResult(runningOp, diags) + return + } + + if mustConfirm { + var desc, query string + switch op.PlanMode { + case plans.DestroyMode: + if op.Workspace != "default" { + query = "Do you really want to destroy all resources in workspace \"" + op.Workspace + "\"?" + } else { + query = "Do you really want to destroy all resources?" + } + desc = "OpenTofu will destroy all your managed infrastructure, as shown above.\n" + + "There is no undo. Only 'yes' will be accepted to confirm." + case plans.RefreshOnlyMode: + if op.Workspace != "default" { + query = "Would you like to update the OpenTofu state for \"" + op.Workspace + "\" to reflect these detected changes?" + } else { + query = "Would you like to update the OpenTofu state to reflect these detected changes?" + } + desc = "OpenTofu will write these changes to the state without modifying any real infrastructure.\n" + + "There is no undo. Only 'yes' will be accepted to confirm." + default: + if op.Workspace != "default" { + query = "Do you want to perform these actions in workspace \"" + op.Workspace + "\"?" + } else { + query = "Do you want to perform these actions?" + } + desc = "OpenTofu will perform the actions described above.\n" + + "Only 'yes' will be accepted to approve." + } + + // We'll show any accumulated warnings before we display the prompt, + // so the user can consider them when deciding how to answer. + if len(diags) > 0 { + op.View.Diagnostics(diags) + diags = nil // reset so we won't show the same diagnostics again later + } + + v, err := op.UIIn.Input(stopCtx, &tofu.InputOpts{ + Id: "approve", + Query: "\n" + query, + Description: desc, + }) + if err != nil { + diags = diags.Append(fmt.Errorf("error asking for approval: %w", err)) + op.ReportResult(runningOp, diags) + return + } + if v != "yes" { + op.View.Cancelled(op.PlanMode) + runningOp.Result = backend.OperationFailure + return + } + } else { + // If we didn't ask for confirmation from the user, and they have + // included any failing checks in their configuration, then they + // will see a very confusing output after the apply operation + // completes. This is because all the diagnostics from the plan + // operation will now be shown alongside the diagnostics from the + // apply operation. For check diagnostics, the plan output is + // irrelevant and simple noise after the same set of checks have + // been executed again during the apply stage. As such, we are going + // to remove all diagnostics marked as check diagnostics at this + // stage, so we will only show the user the check results from the + // apply operation. + // + // Note, if we did ask for approval then we would have displayed the + // plan check results at that point which is useful as the user can + // use them to make a decision about whether to apply the changes. + // It's just that if we didn't ask for approval then showing the + // user the checks from the plan alongside the checks from the apply + // is needlessly confusing. + var filteredDiags tfdiags.Diagnostics + for _, diag := range diags { + if rule, ok := addrs.DiagnosticOriginatesFromCheckRule(diag); ok && rule.Container.CheckableKind() == addrs.CheckableCheck { + continue + } + filteredDiags = filteredDiags.Append(diag) + } + diags = filteredDiags + } + } else { + plan = lr.Plan + if plan.Errored { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Cannot apply incomplete plan", + "OpenTofu encountered an error when generating this plan, so it cannot be applied.", + )) + op.ReportResult(runningOp, diags) + return + } + for _, change := range plan.Changes.Resources { + if change.Action != plans.NoOp { + op.View.PlannedChange(change) + } + } + } + + // Set up our hook for continuous state updates + stateHook.StateMgr = opState + + // Start to apply in a goroutine so that we can be interrupted. + var applyState *states.State + var applyDiags tfdiags.Diagnostics + doneCh := make(chan struct{}) + panicHandler := logging.PanicHandlerWithTraceFn() + go func() { + defer panicHandler() + defer close(doneCh) + log.Printf("[INFO] backend/local: apply calling Apply") + applyState, applyDiags = lr.Core.Apply(plan, lr.Config) + }() + + if b.opWait(doneCh, stopCtx, cancelCtx, lr.Core, opState, op.View) { + return + } + diags = diags.Append(applyDiags) + + // Even on error with an empty state, the state value should not be nil. + // Return early here to prevent corrupting any existing state. + if diags.HasErrors() && applyState == nil { + log.Printf("[ERROR] backend/local: apply returned nil state") + op.ReportResult(runningOp, diags) + return + } + + // Store the final state + runningOp.State = applyState + err := statemgr.WriteAndPersist(opState, applyState, schemas) + if err != nil { + // Export the state file from the state manager and assign the new + // state. This is needed to preserve the existing serial and lineage. + stateFile := statemgr.Export(opState) + if stateFile == nil { + stateFile = &statefile.File{} + } + stateFile.State = applyState + + diags = diags.Append(b.backupStateForError(stateFile, err, op.View)) + op.ReportResult(runningOp, diags) + return + } + + if applyDiags.HasErrors() { + op.ReportResult(runningOp, diags) + return + } + + // If we've accumulated any warnings along the way then we'll show them + // here just before we show the summary and next steps. If we encountered + // errors then we would've returned early at some other point above. + op.View.Diagnostics(diags) +} + +// backupStateForError is called in a scenario where we're unable to persist the +// state for some reason, and will attempt to save a backup copy of the state +// to local disk to help the user recover. This is a "last ditch effort" sort +// of thing, so we really don't want to end up in this codepath; we should do +// everything we possibly can to get the state saved _somewhere_. +func (b *Local) backupStateForError(stateFile *statefile.File, err error, view views.Operation) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to save state", + fmt.Sprintf("Error saving state: %s", err), + )) + + local := statemgr.NewFilesystem("errored.tfstate", b.encryption) + writeErr := local.WriteStateForMigration(stateFile, true) + if writeErr != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to create local state file", + fmt.Sprintf("Error creating local state file for recovery: %s", writeErr), + )) + + // To avoid leaving the user with no state at all, our last resort + // is to print the JSON state out onto the terminal. This is an awful + // UX, so we should definitely avoid doing this if at all possible, + // but at least the user has _some_ path to recover if we end up + // here for some reason. + if dumpErr := view.EmergencyDumpState(stateFile, b.encryption); dumpErr != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to serialize state", + fmt.Sprintf(stateWriteFatalErrorFmt, dumpErr), + )) + } + + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to persist state to backend", + stateWriteConsoleFallbackError, + )) + return diags + } + + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to persist state to backend", + stateWriteBackedUpError, + )) + + return diags +} + +const stateWriteBackedUpError = `The error shown above has prevented OpenTofu from writing the updated state to the configured backend. To allow for recovery, the state has been written to the file "errored.tfstate" in the current working directory. + +Running "tofu apply" again at this point will create a forked state, making it harder to recover. + +To retry writing this state, use the following command: + tofu state push errored.tfstate +` + +const stateWriteConsoleFallbackError = `The errors shown above prevented OpenTofu from writing the updated state to +the configured backend and from creating a local backup file. As a fallback, +the raw state data is printed above as a JSON object. + +To retry writing this state, copy the state data (from the first { to the last } inclusive) and save it into a local file called errored.tfstate, then run the following command: + tofu state push errored.tfstate +` + +const stateWriteFatalErrorFmt = `Failed to save state after apply. + +Error serializing state: %s + +A catastrophic error has prevented OpenTofu from persisting the state file or creating a backup. Unfortunately this means that the record of any resources created during this apply has been lost, and such resources may exist outside of OpenTofu's management. + +For resources that support import, it is possible to recover by manually importing each resource using its id from the target system. + +This is a serious bug in OpenTofu and should be reported. +` diff --git a/pkg/backend/local/backend_apply_test.go b/pkg/backend/local/backend_apply_test.go new file mode 100644 index 00000000000..a8c6fb35055 --- /dev/null +++ b/pkg/backend/local/backend_apply_test.go @@ -0,0 +1,440 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package local + +import ( + "context" + "errors" + "os" + "path/filepath" + "strings" + "sync" + "testing" + + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/command/clistate" + "github.com/kubegems/opentofu/pkg/command/views" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/depsfile" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/initwd" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "github.com/kubegems/opentofu/pkg/terminal" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +func TestLocal_applyBasic(t *testing.T) { + b := TestLocal(t) + + p := TestLocalProvider(t, b, "test", applyFixtureSchema()) + p.ApplyResourceChangeResponse = &providers.ApplyResourceChangeResponse{NewState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yes"), + "ami": cty.StringVal("bar"), + })} + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatal("operation failed") + } + + if p.ReadResourceCalled { + t.Fatal("ReadResource should not be called") + } + + if !p.PlanResourceChangeCalled { + t.Fatal("diff should be called") + } + + if !p.ApplyResourceChangeCalled { + t.Fatal("apply should be called") + } + + checkState(t, b.StateOutPath, ` +test_instance.foo: + ID = yes + provider = provider["registry.opentofu.org/hashicorp/test"] + ami = bar +`) + + if errOutput := done(t).Stderr(); errOutput != "" { + t.Fatalf("unexpected error output:\n%s", errOutput) + } +} +func TestLocal_applyCheck(t *testing.T) { + b := TestLocal(t) + + p := TestLocalProvider(t, b, "test", applyFixtureSchema()) + p.ApplyResourceChangeResponse = &providers.ApplyResourceChangeResponse{NewState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yes"), + "ami": cty.StringVal("bar"), + })} + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-check") + defer configCleanup() + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatal("operation failed") + } + + if p.ReadResourceCalled { + t.Fatal("ReadResource should not be called") + } + + if !p.PlanResourceChangeCalled { + t.Fatal("diff should be called") + } + + if !p.ApplyResourceChangeCalled { + t.Fatal("apply should be called") + } + + d := done(t) + if errOutput := d.Stderr(); errOutput != "" { + t.Fatalf("unexpected error output:\n%s", errOutput) + } + + if stdOutput := d.Stdout(); strings.Contains(stdOutput, "Check block assertion known after apply") { + // As we are running an auto approved plan the warning that was + // generated during the plan should have been hidden. + t.Fatalf("std output contained unexpected check output:\n%s", stdOutput) + } +} + +func TestLocal_applyEmptyDir(t *testing.T) { + b := TestLocal(t) + + p := TestLocalProvider(t, b, "test", providers.ProviderSchema{}) + p.ApplyResourceChangeResponse = &providers.ApplyResourceChangeResponse{NewState: cty.ObjectVal(map[string]cty.Value{"id": cty.StringVal("yes")})} + + op, configCleanup, done := testOperationApply(t, "./testdata/empty") + defer configCleanup() + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("operation succeeded; want error") + } + + if p.ApplyResourceChangeCalled { + t.Fatal("apply should not be called") + } + + if _, err := os.Stat(b.StateOutPath); err == nil { + t.Fatal("should not exist") + } + + // the backend should be unlocked after a run + assertBackendStateUnlocked(t, b) + + if got, want := done(t).Stderr(), "Error: No configuration files"; !strings.Contains(got, want) { + t.Fatalf("unexpected error output:\n%s\nwant: %s", got, want) + } +} + +func TestLocal_applyEmptyDirDestroy(t *testing.T) { + b := TestLocal(t) + + p := TestLocalProvider(t, b, "test", providers.ProviderSchema{}) + p.ApplyResourceChangeResponse = &providers.ApplyResourceChangeResponse{} + + op, configCleanup, done := testOperationApply(t, "./testdata/empty") + defer configCleanup() + op.PlanMode = plans.DestroyMode + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("apply operation failed") + } + + if p.ApplyResourceChangeCalled { + t.Fatal("apply should not be called") + } + + checkState(t, b.StateOutPath, ``) + + if errOutput := done(t).Stderr(); errOutput != "" { + t.Fatalf("unexpected error output:\n%s", errOutput) + } +} + +func TestLocal_applyError(t *testing.T) { + b := TestLocal(t) + + schema := providers.ProviderSchema{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "ami": {Type: cty.String, Optional: true}, + "id": {Type: cty.String, Computed: true}, + }, + }, + }, + }, + } + p := TestLocalProvider(t, b, "test", schema) + + var lock sync.Mutex + errored := false + p.ApplyResourceChangeFn = func( + r providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + + lock.Lock() + defer lock.Unlock() + var diags tfdiags.Diagnostics + + ami := r.Config.GetAttr("ami").AsString() + if !errored && ami == "error" { + errored = true + diags = diags.Append(errors.New("ami error")) + return providers.ApplyResourceChangeResponse{ + Diagnostics: diags, + } + } + return providers.ApplyResourceChangeResponse{ + Diagnostics: diags, + NewState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + "ami": cty.StringVal("bar"), + }), + } + } + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-error") + defer configCleanup() + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("operation succeeded; want failure") + } + + checkState(t, b.StateOutPath, ` +test_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/test"] + ami = bar + `) + + // the backend should be unlocked after a run + assertBackendStateUnlocked(t, b) + + if got, want := done(t).Stderr(), "Error: ami error"; !strings.Contains(got, want) { + t.Fatalf("unexpected error output:\n%s\nwant: %s", got, want) + } +} + +func TestLocal_applyBackendFail(t *testing.T) { + b := TestLocal(t) + + p := TestLocalProvider(t, b, "test", applyFixtureSchema()) + + p.ApplyResourceChangeResponse = &providers.ApplyResourceChangeResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yes"), + "ami": cty.StringVal("bar"), + }), + Diagnostics: tfdiags.Diagnostics.Append(nil, errors.New("error before backend failure")), + } + + wd, err := os.Getwd() + if err != nil { + t.Fatalf("failed to get current working directory") + } + err = os.Chdir(filepath.Dir(b.StatePath)) + if err != nil { + t.Fatalf("failed to set temporary working directory") + } + defer os.Chdir(wd) + + op, configCleanup, done := testOperationApply(t, wd+"/testdata/apply") + defer configCleanup() + + b.Backend = &backendWithFailingState{} + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + + output := done(t) + + if run.Result == backend.OperationSuccess { + t.Fatalf("apply succeeded; want error") + } + + diagErr := output.Stderr() + + if !strings.Contains(diagErr, "Error saving state: fake failure") { + t.Fatalf("missing \"fake failure\" message in diags:\n%s", diagErr) + } + + if !strings.Contains(diagErr, "error before backend failure") { + t.Fatalf("missing 'error before backend failure' diagnostic from apply") + } + + // The fallback behavior should've created a file errored.tfstate in the + // current working directory. + checkState(t, "errored.tfstate", ` +test_instance.foo: (tainted) + ID = yes + provider = provider["registry.opentofu.org/hashicorp/test"] + ami = bar + `) + + // the backend should be unlocked after a run + assertBackendStateUnlocked(t, b) +} + +func TestLocal_applyRefreshFalse(t *testing.T) { + b := TestLocal(t) + + p := TestLocalProvider(t, b, "test", planFixtureSchema()) + testStateFile(t, b.StatePath, testPlanState()) + + op, configCleanup, done := testOperationApply(t, "./testdata/plan") + defer configCleanup() + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("plan operation failed") + } + + if p.ReadResourceCalled { + t.Fatal("ReadResource should not be called") + } + + if errOutput := done(t).Stderr(); errOutput != "" { + t.Fatalf("unexpected error output:\n%s", errOutput) + } +} + +type backendWithFailingState struct { + Local +} + +func (b *backendWithFailingState) StateMgr(name string) (statemgr.Full, error) { + return &failingState{ + statemgr.NewFilesystem("failing-state.tfstate", encryption.StateEncryptionDisabled()), + }, nil +} + +type failingState struct { + *statemgr.Filesystem +} + +func (s failingState) WriteState(state *states.State) error { + return errors.New("fake failure") +} + +func testOperationApply(t *testing.T, configDir string) (*backend.Operation, func(), func(*testing.T) *terminal.TestOutput) { + t.Helper() + + _, configLoader, configCleanup := initwd.MustLoadConfigForTests(t, configDir, "tests") + + streams, done := terminal.StreamsForTesting(t) + view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams)) + + // Many of our tests use an overridden "test" provider that's just in-memory + // inside the test process, not a separate plugin on disk. + depLocks := depsfile.NewLocks() + depLocks.SetProviderOverridden(addrs.MustParseProviderSourceString("registry.opentofu.org/hashicorp/test")) + + return &backend.Operation{ + Type: backend.OperationTypeApply, + Encryption: encryption.Disabled(), + ConfigDir: configDir, + ConfigLoader: configLoader, + StateLocker: clistate.NewNoopLocker(), + View: view, + DependencyLocks: depLocks, + }, configCleanup, done +} + +// applyFixtureSchema returns a schema suitable for processing the +// configuration in testdata/apply . This schema should be +// assigned to a mock provider named "test". +func applyFixtureSchema() providers.ProviderSchema { + return providers.ProviderSchema{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "ami": {Type: cty.String, Optional: true}, + "id": {Type: cty.String, Computed: true}, + }, + }, + }, + }, + } +} + +func TestApply_applyCanceledAutoApprove(t *testing.T) { + b := TestLocal(t) + + TestLocalProvider(t, b, "test", applyFixtureSchema()) + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + op.AutoApprove = true + defer configCleanup() + defer func() { + output := done(t) + if !strings.Contains(output.Stderr(), "execution halted") { + t.Fatal("expected 'execution halted', got:\n", output.All()) + } + }() + + ctx, cancel := context.WithCancel(context.Background()) + testHookStopPlanApply = cancel + defer func() { + testHookStopPlanApply = nil + }() + + run, err := b.Operation(ctx, op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + +} diff --git a/pkg/backend/local/backend_local.go b/pkg/backend/local/backend_local.go new file mode 100644 index 00000000000..5e7eed971ce --- /dev/null +++ b/pkg/backend/local/backend_local.go @@ -0,0 +1,548 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package local + +import ( + "context" + "fmt" + "log" + "sort" + "strings" + + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configload" + "github.com/kubegems/opentofu/pkg/plans/planfile" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" + "github.com/kubegems/opentofu/pkg/tofumigrate" +) + +// backend.Local implementation. +func (b *Local) LocalRun(op *backend.Operation) (*backend.LocalRun, statemgr.Full, tfdiags.Diagnostics) { + // Make sure the type is invalid. We use this as a way to know not + // to ask for input/validate. We're modifying this through a pointer, + // so we're mutating an object that belongs to the caller here, which + // seems bad but we're preserving it for now until we have time to + // properly design this API, vs. just preserving whatever it currently + // happens to do. + op.Type = backend.OperationTypeInvalid + + op.StateLocker = op.StateLocker.WithContext(context.Background()) + + lr, _, stateMgr, diags := b.localRun(op) + return lr, stateMgr, diags +} + +func (b *Local) localRun(op *backend.Operation) (*backend.LocalRun, *configload.Snapshot, statemgr.Full, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // Get the latest state. + log.Printf("[TRACE] backend/local: requesting state manager for workspace %q", op.Workspace) + s, err := b.StateMgr(op.Workspace) + if err != nil { + diags = diags.Append(fmt.Errorf("error loading state: %w", err)) + return nil, nil, nil, diags + } + log.Printf("[TRACE] backend/local: requesting state lock for workspace %q", op.Workspace) + if diags := op.StateLocker.Lock(s, op.Type.String()); diags.HasErrors() { + return nil, nil, nil, diags + } + + defer func() { + // If we're returning with errors, and thus not producing a valid + // context, we'll want to avoid leaving the workspace locked. + if diags.HasErrors() { + diags = diags.Append(op.StateLocker.Unlock()) + } + }() + + log.Printf("[TRACE] backend/local: reading remote state for workspace %q", op.Workspace) + if err := s.RefreshState(); err != nil { + diags = diags.Append(fmt.Errorf("error loading state: %w", err)) + return nil, nil, nil, diags + } + + ret := &backend.LocalRun{} + + // Initialize our context options + var coreOpts tofu.ContextOpts + if v := b.ContextOpts; v != nil { + coreOpts = *v + } + coreOpts.UIInput = op.UIIn + coreOpts.Hooks = op.Hooks + coreOpts.Encryption = op.Encryption + + var ctxDiags tfdiags.Diagnostics + var configSnap *configload.Snapshot + if op.PlanFile.IsCloud() { + diags = diags.Append(fmt.Errorf("error: using a saved cloud plan when executing OpenTofu locally is not supported")) + return nil, nil, nil, diags + } + + if lp, ok := op.PlanFile.Local(); ok { + var stateMeta *statemgr.SnapshotMeta + // If the statemgr implements our optional PersistentMeta interface then we'll + // additionally verify that the state snapshot in the plan file has + // consistent metadata, as an additional safety check. + if sm, ok := s.(statemgr.PersistentMeta); ok { + m := sm.StateSnapshotMeta() + stateMeta = &m + } + log.Printf("[TRACE] backend/local: populating backend.LocalRun from plan file") + ret, configSnap, ctxDiags = b.localRunForPlanFile(op, lp, ret, &coreOpts, stateMeta) + if ctxDiags.HasErrors() { + diags = diags.Append(ctxDiags) + return nil, nil, nil, diags + } + + // Write sources into the cache of the main loader so that they are + // available if we need to generate diagnostic message snippets. + op.ConfigLoader.ImportSourcesFromSnapshot(configSnap) + } else { + log.Printf("[TRACE] backend/local: populating backend.LocalRun for current working directory") + ret, configSnap, ctxDiags = b.localRunDirect(op, ret, &coreOpts, s) + } + diags = diags.Append(ctxDiags) + if diags.HasErrors() { + return nil, nil, nil, diags + } + + // If we have an operation, then we automatically do the input/validate + // here since every option requires this. + if op.Type != backend.OperationTypeInvalid { + // If input asking is enabled, then do that + if op.PlanFile == nil && b.OpInput { + mode := tofu.InputModeProvider + + log.Printf("[TRACE] backend/local: requesting interactive input, if necessary") + inputDiags := ret.Core.Input(ret.Config, mode) + diags = diags.Append(inputDiags) + if inputDiags.HasErrors() { + return nil, nil, nil, diags + } + } + + // If validation is enabled, validate + if b.OpValidation { + log.Printf("[TRACE] backend/local: running validation operation") + validateDiags := ret.Core.Validate(ret.Config) + diags = diags.Append(validateDiags) + } + } + + return ret, configSnap, s, diags +} + +func (b *Local) localRunDirect(op *backend.Operation, run *backend.LocalRun, coreOpts *tofu.ContextOpts, s statemgr.Full) (*backend.LocalRun, *configload.Snapshot, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // Load the configuration using the caller-provided configuration loader. + config, configSnap, configDiags := op.ConfigLoader.LoadConfigWithSnapshot(op.ConfigDir, op.RootCall) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + return nil, nil, diags + } + run.Config = config + + if errs := config.VerifyDependencySelections(op.DependencyLocks); len(errs) > 0 { + var buf strings.Builder + for _, err := range errs { + fmt.Fprintf(&buf, "\n - %s", err.Error()) + } + var suggestion string + switch { + case op.DependencyLocks == nil: + // If we get here then it suggests that there's a caller that we + // didn't yet update to populate DependencyLocks, which is a bug. + suggestion = "This run has no dependency lock information provided at all, which is a bug in OpenTofu; please report it!" + case op.DependencyLocks.Empty(): + suggestion = "To make the initial dependency selections that will initialize the dependency lock file, run:\n tofu init" + default: + suggestion = "To update the locked dependency selections to match a changed configuration, run:\n tofu init -upgrade" + } + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Inconsistent dependency lock file", + fmt.Sprintf( + "The following dependency selections recorded in the lock file are inconsistent with the current configuration:%s\n\n%s", + buf.String(), suggestion, + ), + )) + } + + var rawVariables map[string]backend.UnparsedVariableValue + if op.AllowUnsetVariables { + // Rather than prompting for input, we'll just stub out the required + // but unset variables with unknown values to represent that they are + // placeholders for values the user would need to provide for other + // operations. + rawVariables = b.stubUnsetRequiredVariables(op.Variables, config.Module.Variables) + } else { + // If interactive input is enabled, we might gather some more variable + // values through interactive prompts. + // TODO: Need to route the operation context through into here, so that + // the interactive prompts can be sensitive to its timeouts/etc. + rawVariables = b.interactiveCollectVariables(context.TODO(), op.Variables, config.Module.Variables, op.UIIn) + } + + variables, varDiags := backend.ParseVariableValues(rawVariables, config.Module.Variables) + diags = diags.Append(varDiags) + if diags.HasErrors() { + return nil, nil, diags + } + + planOpts := &tofu.PlanOpts{ + Mode: op.PlanMode, + Targets: op.Targets, + ForceReplace: op.ForceReplace, + SetVariables: variables, + SkipRefresh: op.Type != backend.OperationTypeRefresh && !op.PlanRefresh, + GenerateConfigPath: op.GenerateConfigOut, + } + run.PlanOpts = planOpts + + // For a "direct" local run, the input state is the most recently stored + // snapshot, from the previous run. + state := s.State() + if state != nil { + migratedState, migrateDiags := tofumigrate.MigrateStateProviderAddresses(config, state) + diags = diags.Append(migrateDiags) + if migrateDiags.HasErrors() { + return nil, nil, diags + } + state = migratedState + } + run.InputState = state + + tfCtx, moreDiags := tofu.NewContext(coreOpts) + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + return nil, nil, diags + } + run.Core = tfCtx + return run, configSnap, diags +} + +func (b *Local) localRunForPlanFile(op *backend.Operation, pf *planfile.Reader, run *backend.LocalRun, coreOpts *tofu.ContextOpts, currentStateMeta *statemgr.SnapshotMeta) (*backend.LocalRun, *configload.Snapshot, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + const errSummary = "Invalid plan file" + + // A plan file has a snapshot of configuration embedded inside it, which + // is used instead of whatever configuration might be already present + // in the filesystem. + snap, err := pf.ReadConfigSnapshot() + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + errSummary, + fmt.Sprintf("Failed to read configuration snapshot from plan file: %s.", err), + )) + return nil, snap, diags + } + + plan, err := pf.ReadPlan() + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + errSummary, + fmt.Sprintf("Failed to read plan from plan file: %s.", err), + )) + return nil, snap, diags + } + // When we're applying a saved plan, we populate Plan instead of PlanOpts, + // because a plan object incorporates the subset of data from PlanOps that + // we need to apply the plan. + run.Plan = plan + + subCall := op.RootCall.WithVariables(func(variable *configs.Variable) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + name := variable.Name + v, ok := plan.VariableValues[name] + if !ok { + if variable.Required() { + // This should not happen... + return cty.DynamicVal, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing plan variable " + variable.Name, + }) + } + return variable.Default, nil + } + + parsed, parsedErr := v.Decode(cty.DynamicPseudoType) + if parsedErr != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: parsedErr.Error(), + }) + } + return parsed, diags + }) + + loader := configload.NewLoaderFromSnapshot(snap) + config, configDiags := loader.LoadConfig(snap.Modules[""].Dir, subCall) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + return nil, snap, diags + } + run.Config = config + + // NOTE: We're intentionally comparing the current locks with the + // configuration snapshot, rather than the lock snapshot in the plan file, + // because it's the current locks which dictate our plugin selections + // in coreOpts below. However, we'll also separately check that the + // plan file has identical locked plugins below, and thus we're effectively + // checking consistency with both here. + if errs := config.VerifyDependencySelections(op.DependencyLocks); len(errs) > 0 { + var buf strings.Builder + for _, err := range errs { + fmt.Fprintf(&buf, "\n - %s", err.Error()) + } + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Inconsistent dependency lock file", + fmt.Sprintf( + "The following dependency selections recorded in the lock file are inconsistent with the configuration in the saved plan:%s\n\nA saved plan can be applied only to the same configuration it was created from. Create a new plan from the updated configuration.", + buf.String(), + ), + )) + } + + // This check is an important complement to the check above: the locked + // dependencies in the configuration must match the configuration, and + // the locked dependencies in the plan must match the locked dependencies + // in the configuration, and so transitively we ensure that the locked + // dependencies in the plan match the configuration too. However, this + // additionally catches any inconsistency between the two sets of locks + // even if they both happen to be valid per the current configuration, + // which is one of several ways we try to catch the mistake of applying + // a saved plan file in a different place than where we created it. + depLocksFromPlan, moreDiags := pf.ReadDependencyLocks() + diags = diags.Append(moreDiags) + if depLocksFromPlan != nil && !op.DependencyLocks.Equal(depLocksFromPlan) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Inconsistent dependency lock file", + "The given plan file was created with a different set of external dependency selections than the current configuration. A saved plan can be applied only to the same configuration it was created from.\n\nCreate a new plan from the updated configuration.", + )) + } + + // A plan file also contains a snapshot of the prior state the changes + // are intended to apply to. + priorStateFile, err := pf.ReadStateFile() + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + errSummary, + fmt.Sprintf("Failed to read prior state snapshot from plan file: %s.", err), + )) + return nil, snap, diags + } + + if currentStateMeta != nil { + // If the caller sets this, we require that the stored prior state + // has the same metadata, which is an extra safety check that nothing + // has changed since the plan was created. (All of the "real-world" + // state manager implementations support this, but simpler test backends + // may not.) + + // Because the plan always contains a state, even if it is empty, the + // first plan to be applied will have empty snapshot metadata. In this + // case we compare only the serial in order to provide a more correct + // error. + firstPlan := priorStateFile.Lineage == "" && priorStateFile.Serial == 0 + + switch { + case !firstPlan && priorStateFile.Lineage != currentStateMeta.Lineage: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Saved plan does not match the given state", + "The given plan file can not be applied because it was created from a different state lineage.", + )) + + case priorStateFile.Serial != currentStateMeta.Serial: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Saved plan is stale", + "The given plan file can no longer be applied because the state was changed by another operation after the plan was created.", + )) + } + } + // When we're applying a saved plan, the input state is the "prior state" + // recorded in the plan, which incorporates the result of all of the + // refreshing we did while building the plan. + run.InputState = priorStateFile.State + + tfCtx, moreDiags := tofu.NewContext(coreOpts) + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + return nil, nil, diags + } + run.Core = tfCtx + return run, snap, diags +} + +// interactiveCollectVariables attempts to complete the given existing +// map of variables by interactively prompting for any variables that are +// declared as required but not yet present. +// +// If interactive input is disabled for this backend instance then this is +// a no-op. If input is enabled but fails for some reason, the resulting +// map will be incomplete. For these reasons, the caller must still validate +// that the result is complete and valid. +// +// This function does not modify the map given in "existing", but may return +// it unchanged if no modifications are required. If modifications are required, +// the result is a new map with all the elements from "existing" plus +// additional elements as appropriate. +// +// Interactive prompting is a "best effort" thing for first-time user UX and +// not something we expect folks to be relying on for routine use. OpenTofu +// is primarily a non-interactive tool, and so we prefer to report in error +// messages that variables are not set rather than reporting that input failed: +// the primary resolution to missing variables is to provide them by some other +// means. +func (b *Local) interactiveCollectVariables(ctx context.Context, existing map[string]backend.UnparsedVariableValue, vcs map[string]*configs.Variable, uiInput tofu.UIInput) map[string]backend.UnparsedVariableValue { + var needed []string + if b.OpInput && uiInput != nil { + for name, vc := range vcs { + if !vc.Required() { + continue // We only prompt for required variables + } + if _, exists := existing[name]; !exists { + needed = append(needed, name) + } + } + } else { + log.Print("[DEBUG] backend/local: Skipping interactive prompts for variables because input is disabled") + } + if len(needed) == 0 { + return existing + } + + log.Printf("[DEBUG] backend/local: will prompt for input of unset required variables %s", needed) + + // If we get here then we're planning to prompt for at least one additional + // variable's value. + sort.Strings(needed) // prompt in lexical order + ret := make(map[string]backend.UnparsedVariableValue, len(vcs)) + for k, v := range existing { + ret[k] = v + } + for _, name := range needed { + vc := vcs[name] + rawValue, err := uiInput.Input(ctx, &tofu.InputOpts{ + Id: fmt.Sprintf("var.%s", name), + Query: fmt.Sprintf("var.%s", name), + Description: vc.Description, + Secret: vc.Sensitive, + }) + if err != nil { + // Since interactive prompts are best-effort, we'll just continue + // here and let subsequent validation report this as a variable + // not specified. + log.Printf("[WARN] backend/local: Failed to request user input for variable %q: %s", name, err) + continue + } + ret[name] = unparsedInteractiveVariableValue{Name: name, RawValue: rawValue} + } + return ret +} + +// stubUnsetVariables ensures that all required variables defined in the +// configuration exist in the resulting map, by adding new elements as necessary. +// +// The stubbed value of any additions will be an unknown variable conforming +// to the variable's configured type constraint, meaning that no particular +// value is known and that one must be provided by the user in order to get +// a complete result. +// +// Unset optional attributes (those with default values) will not be populated +// by this function, under the assumption that a later step will handle those. +// In this sense, stubUnsetRequiredVariables is essentially a non-interactive, +// non-error-producing variant of interactiveCollectVariables that creates +// placeholders for values the user would be prompted for interactively on +// other operations. +// +// This function should be used only in situations where variables values +// will not be directly used and the variables map is being constructed only +// to produce a complete OpenTofu context for some ancillary functionality +// like "tofu console", "tofu state ...", etc. +// +// This function is guaranteed not to modify the given map, but it may return +// the given map unchanged if no additions are required. If additions are +// required then the result will be a new map containing everything in the +// given map plus additional elements. +func (b *Local) stubUnsetRequiredVariables(existing map[string]backend.UnparsedVariableValue, vcs map[string]*configs.Variable) map[string]backend.UnparsedVariableValue { + var missing bool // Do we need to add anything? + for name, vc := range vcs { + if !vc.Required() { + continue // We only stub required variables + } + if _, exists := existing[name]; !exists { + missing = true + } + } + if !missing { + return existing + } + + // If we get down here then there's at least one variable value to add. + ret := make(map[string]backend.UnparsedVariableValue, len(vcs)) + for k, v := range existing { + ret[k] = v + } + for name, vc := range vcs { + if !vc.Required() { + continue + } + if _, exists := existing[name]; !exists { + ret[name] = unparsedUnknownVariableValue{Name: name, WantType: vc.Type} + } + } + return ret +} + +type unparsedInteractiveVariableValue struct { + Name, RawValue string +} + +var _ backend.UnparsedVariableValue = unparsedInteractiveVariableValue{} + +func (v unparsedInteractiveVariableValue) ParseVariableValue(mode configs.VariableParsingMode) (*tofu.InputValue, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + val, valDiags := mode.Parse(v.Name, v.RawValue) + diags = diags.Append(valDiags) + if diags.HasErrors() { + return nil, diags + } + return &tofu.InputValue{ + Value: val, + SourceType: tofu.ValueFromInput, + }, diags +} + +type unparsedUnknownVariableValue struct { + Name string + WantType cty.Type +} + +var _ backend.UnparsedVariableValue = unparsedUnknownVariableValue{} + +func (v unparsedUnknownVariableValue) ParseVariableValue(mode configs.VariableParsingMode) (*tofu.InputValue, tfdiags.Diagnostics) { + return &tofu.InputValue{ + Value: cty.UnknownVal(v.WantType), + SourceType: tofu.ValueFromInput, + }, nil +} diff --git a/pkg/backend/local/backend_local_test.go b/pkg/backend/local/backend_local_test.go new file mode 100644 index 00000000000..168b1723eee --- /dev/null +++ b/pkg/backend/local/backend_local_test.go @@ -0,0 +1,280 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package local + +import ( + "fmt" + "os" + "path/filepath" + "testing" + + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/command/clistate" + "github.com/kubegems/opentofu/pkg/command/views" + "github.com/kubegems/opentofu/pkg/configs/configload" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/initwd" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/plans/planfile" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/statefile" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "github.com/kubegems/opentofu/pkg/terminal" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" +) + +func TestLocalRun(t *testing.T) { + configDir := "./testdata/empty" + b := TestLocal(t) + + _, configLoader, configCleanup := initwd.MustLoadConfigForTests(t, configDir, "tests") + defer configCleanup() + + streams, _ := terminal.StreamsForTesting(t) + view := views.NewView(streams) + stateLocker := clistate.NewLocker(0, views.NewStateLocker(arguments.ViewHuman, view)) + + op := &backend.Operation{ + ConfigDir: configDir, + ConfigLoader: configLoader, + Workspace: backend.DefaultStateName, + StateLocker: stateLocker, + } + + _, _, diags := b.LocalRun(op) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err().Error()) + } + + // LocalRun() retains a lock on success + assertBackendStateLocked(t, b) +} + +func TestLocalRun_error(t *testing.T) { + configDir := "./testdata/invalid" + b := TestLocal(t) + + // This backend will return an error when asked to RefreshState, which + // should then cause LocalRun to return with the state unlocked. + b.Backend = backendWithStateStorageThatFailsRefresh{} + + _, configLoader, configCleanup := initwd.MustLoadConfigForTests(t, configDir, "tests") + defer configCleanup() + + streams, _ := terminal.StreamsForTesting(t) + view := views.NewView(streams) + stateLocker := clistate.NewLocker(0, views.NewStateLocker(arguments.ViewHuman, view)) + + op := &backend.Operation{ + ConfigDir: configDir, + ConfigLoader: configLoader, + Workspace: backend.DefaultStateName, + StateLocker: stateLocker, + } + + _, _, diags := b.LocalRun(op) + if !diags.HasErrors() { + t.Fatal("unexpected success") + } + + // LocalRun() unlocks the state on failure + assertBackendStateUnlocked(t, b) +} + +func TestLocalRun_cloudPlan(t *testing.T) { + configDir := "./testdata/apply" + b := TestLocal(t) + + _, configLoader, configCleanup := initwd.MustLoadConfigForTests(t, configDir, "tests") + defer configCleanup() + + planPath := "./testdata/plan-bookmark/bookmark.json" + + planFile, err := planfile.OpenWrapped(planPath, encryption.PlanEncryptionDisabled()) + if err != nil { + t.Fatalf("unexpected error reading planfile: %s", err) + } + + streams, _ := terminal.StreamsForTesting(t) + view := views.NewView(streams) + stateLocker := clistate.NewLocker(0, views.NewStateLocker(arguments.ViewHuman, view)) + + op := &backend.Operation{ + ConfigDir: configDir, + ConfigLoader: configLoader, + PlanFile: planFile, + Workspace: backend.DefaultStateName, + StateLocker: stateLocker, + } + + _, _, diags := b.LocalRun(op) + if !diags.HasErrors() { + t.Fatal("unexpected success") + } + + // LocalRun() unlocks the state on failure + assertBackendStateUnlocked(t, b) +} + +func TestLocalRun_stalePlan(t *testing.T) { + configDir := "./testdata/apply" + b := TestLocal(t) + + _, configLoader, configCleanup := initwd.MustLoadConfigForTests(t, configDir, "tests") + defer configCleanup() + + // Write an empty state file with serial 3 + sf, err := os.Create(b.StatePath) + if err != nil { + t.Fatalf("unexpected error creating state file %s: %s", b.StatePath, err) + } + if err := statefile.Write(statefile.New(states.NewState(), "boop", 3), sf, encryption.StateEncryptionDisabled()); err != nil { + t.Fatalf("unexpected error writing state file: %s", err) + } + + // Refresh the state + sm, err := b.StateMgr("") + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if err := sm.RefreshState(); err != nil { + t.Fatalf("unexpected error refreshing state: %s", err) + } + + // Create a minimal plan which also has state file serial 2, so is stale + backendConfig := cty.ObjectVal(map[string]cty.Value{ + "path": cty.NullVal(cty.String), + "workspace_dir": cty.NullVal(cty.String), + }) + backendConfigRaw, err := plans.NewDynamicValue(backendConfig, backendConfig.Type()) + if err != nil { + t.Fatal(err) + } + plan := &plans.Plan{ + UIMode: plans.NormalMode, + Changes: plans.NewChanges(), + Backend: plans.Backend{ + Type: "local", + Config: backendConfigRaw, + }, + PrevRunState: states.NewState(), + PriorState: states.NewState(), + } + prevStateFile := statefile.New(plan.PrevRunState, "boop", 1) + stateFile := statefile.New(plan.PriorState, "boop", 2) + + // Roundtrip through serialization as expected by the operation + outDir := t.TempDir() + defer os.RemoveAll(outDir) + planPath := filepath.Join(outDir, "plan.tfplan") + planfileArgs := planfile.CreateArgs{ + ConfigSnapshot: configload.NewEmptySnapshot(), + PreviousRunStateFile: prevStateFile, + StateFile: stateFile, + Plan: plan, + } + if err := planfile.Create(planPath, planfileArgs, encryption.PlanEncryptionDisabled()); err != nil { + t.Fatalf("unexpected error writing planfile: %s", err) + } + planFile, err := planfile.OpenWrapped(planPath, encryption.PlanEncryptionDisabled()) + if err != nil { + t.Fatalf("unexpected error reading planfile: %s", err) + } + + streams, _ := terminal.StreamsForTesting(t) + view := views.NewView(streams) + stateLocker := clistate.NewLocker(0, views.NewStateLocker(arguments.ViewHuman, view)) + + op := &backend.Operation{ + ConfigDir: configDir, + ConfigLoader: configLoader, + PlanFile: planFile, + Workspace: backend.DefaultStateName, + StateLocker: stateLocker, + } + + _, _, diags := b.LocalRun(op) + if !diags.HasErrors() { + t.Fatal("unexpected success") + } + + // LocalRun() unlocks the state on failure + assertBackendStateUnlocked(t, b) +} + +type backendWithStateStorageThatFailsRefresh struct { +} + +var _ backend.Backend = backendWithStateStorageThatFailsRefresh{} + +func (b backendWithStateStorageThatFailsRefresh) StateMgr(workspace string) (statemgr.Full, error) { + return &stateStorageThatFailsRefresh{}, nil +} + +func (b backendWithStateStorageThatFailsRefresh) ConfigSchema() *configschema.Block { + return &configschema.Block{} +} + +func (b backendWithStateStorageThatFailsRefresh) PrepareConfig(in cty.Value) (cty.Value, tfdiags.Diagnostics) { + return in, nil +} + +func (b backendWithStateStorageThatFailsRefresh) Configure(cty.Value) tfdiags.Diagnostics { + return nil +} + +func (b backendWithStateStorageThatFailsRefresh) DeleteWorkspace(name string, force bool) error { + return fmt.Errorf("unimplemented") +} + +func (b backendWithStateStorageThatFailsRefresh) Workspaces() ([]string, error) { + return []string{"default"}, nil +} + +type stateStorageThatFailsRefresh struct { + locked bool +} + +func (s *stateStorageThatFailsRefresh) Lock(info *statemgr.LockInfo) (string, error) { + if s.locked { + return "", fmt.Errorf("already locked") + } + s.locked = true + return "locked", nil +} + +func (s *stateStorageThatFailsRefresh) Unlock(id string) error { + if !s.locked { + return fmt.Errorf("not locked") + } + s.locked = false + return nil +} + +func (s *stateStorageThatFailsRefresh) State() *states.State { + return nil +} + +func (s *stateStorageThatFailsRefresh) GetRootOutputValues() (map[string]*states.OutputValue, error) { + return nil, fmt.Errorf("unimplemented") +} + +func (s *stateStorageThatFailsRefresh) WriteState(*states.State) error { + return fmt.Errorf("unimplemented") +} + +func (s *stateStorageThatFailsRefresh) RefreshState() error { + return fmt.Errorf("intentionally failing for testing purposes") +} + +func (s *stateStorageThatFailsRefresh) PersistState(schemas *tofu.Schemas) error { + return fmt.Errorf("unimplemented") +} diff --git a/pkg/backend/local/backend_plan.go b/pkg/backend/local/backend_plan.go new file mode 100644 index 00000000000..b1b31cb77ee --- /dev/null +++ b/pkg/backend/local/backend_plan.go @@ -0,0 +1,256 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package local + +import ( + "context" + "fmt" + "io" + "log" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/genconfig" + "github.com/kubegems/opentofu/pkg/logging" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/plans/planfile" + "github.com/kubegems/opentofu/pkg/states/statefile" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" +) + +func (b *Local) opPlan( + stopCtx context.Context, + cancelCtx context.Context, + op *backend.Operation, + runningOp *backend.RunningOperation) { + + log.Printf("[INFO] backend/local: starting Plan operation") + + var diags tfdiags.Diagnostics + + if op.PlanFile != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Can't re-plan a saved plan", + "The plan command was given a saved plan file as its input. This command generates "+ + "a new plan, and so it requires a configuration directory as its argument.", + )) + op.ReportResult(runningOp, diags) + return + } + + // Local planning requires a config, unless we're planning to destroy. + if op.PlanMode != plans.DestroyMode && !op.HasConfig() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "No configuration files", + "Plan requires configuration to be present. Planning without a configuration would "+ + "mark everything for destruction, which is normally not what is desired. If you "+ + "would like to destroy everything, run plan with the -destroy option. Otherwise, "+ + "create a OpenTofu configuration file (.tf file) and try again.", + )) + op.ReportResult(runningOp, diags) + return + } + + if len(op.GenerateConfigOut) > 0 { + if op.PlanMode != plans.NormalMode { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid generate-config-out flag", + "Config can only be generated during a normal plan operation, and not during a refresh-only or destroy plan.")) + op.ReportResult(runningOp, diags) + return + } + + diags = diags.Append(genconfig.ValidateTargetFile(op.GenerateConfigOut)) + if diags.HasErrors() { + op.ReportResult(runningOp, diags) + return + } + } + + if b.ContextOpts == nil { + b.ContextOpts = new(tofu.ContextOpts) + } + + // Get our context + lr, configSnap, opState, ctxDiags := b.localRun(op) + diags = diags.Append(ctxDiags) + if ctxDiags.HasErrors() { + op.ReportResult(runningOp, diags) + return + } + // the state was locked during succesfull context creation; unlock the state + // when the operation completes + defer func() { + diags := op.StateLocker.Unlock() + if diags.HasErrors() { + op.View.Diagnostics(diags) + runningOp.Result = backend.OperationFailure + } + }() + + // Since planning doesn't immediately change the persisted state, the + // resulting state is always just the input state. + runningOp.State = lr.InputState + + // Perform the plan in a goroutine so we can be interrupted + var plan *plans.Plan + var planDiags tfdiags.Diagnostics + doneCh := make(chan struct{}) + panicHandler := logging.PanicHandlerWithTraceFn() + go func() { + defer panicHandler() + defer close(doneCh) + log.Printf("[INFO] backend/local: plan calling Plan") + plan, planDiags = lr.Core.Plan(lr.Config, lr.InputState, lr.PlanOpts) + }() + + if b.opWait(doneCh, stopCtx, cancelCtx, lr.Core, opState, op.View) { + // If we get in here then the operation was cancelled, which is always + // considered to be a failure. + log.Printf("[INFO] backend/local: plan operation was force-cancelled by interrupt") + runningOp.Result = backend.OperationFailure + return + } + log.Printf("[INFO] backend/local: plan operation completed") + + // NOTE: We intentionally don't stop here on errors because we always want + // to try to present a partial plan report and, if the user chose to, + // generate a partial saved plan file for external analysis. + diags = diags.Append(planDiags) + + // Even if there are errors we need to handle anything that may be + // contained within the plan, so only exit if there is no data at all. + if plan == nil { + runningOp.PlanEmpty = true + op.ReportResult(runningOp, diags) + return + } + + // Record whether this plan includes any side-effects that could be applied. + runningOp.PlanEmpty = !plan.CanApply() + + // Save the plan to disk + if path := op.PlanOutPath; path != "" { + if op.PlanOutBackend == nil { + // This is always a bug in the operation caller; it's not valid + // to set PlanOutPath without also setting PlanOutBackend. + diags = diags.Append(fmt.Errorf( + "PlanOutPath set without also setting PlanOutBackend (this is a bug in OpenTofu)"), + ) + op.ReportResult(runningOp, diags) + return + } + plan.Backend = *op.PlanOutBackend + + // We may have updated the state in the refresh step above, but we + // will freeze that updated state in the plan file for now and + // only write it if this plan is subsequently applied. + plannedStateFile := statemgr.PlannedStateUpdate(opState, plan.PriorState) + + // We also include a file containing the state as it existed before + // we took any action at all, but this one isn't intended to ever + // be saved to the backend (an equivalent snapshot should already be + // there) and so we just use a stub state file header in this case. + // NOTE: This won't be exactly identical to the latest state snapshot + // in the backend because it's still been subject to state upgrading + // to make it consumable by the current OpenTofu version, and + // intentionally doesn't preserve the header info. + prevStateFile := &statefile.File{ + State: plan.PrevRunState, + } + + log.Printf("[INFO] backend/local: writing plan output to: %s", path) + err := planfile.Create(path, planfile.CreateArgs{ + ConfigSnapshot: configSnap, + PreviousRunStateFile: prevStateFile, + StateFile: plannedStateFile, + Plan: plan, + DependencyLocks: op.DependencyLocks, + }, op.Encryption.Plan()) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to write plan file", + fmt.Sprintf("The plan file could not be written: %s.", err), + )) + op.ReportResult(runningOp, diags) + return + } + } + + // Render the plan, if we produced one. + // (This might potentially be a partial plan with Errored set to true) + schemas, moreDiags := lr.Core.Schemas(lr.Config, lr.InputState) + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + op.ReportResult(runningOp, diags) + return + } + + // Write out any generated config, before we render the plan. + wroteConfig, moreDiags := maybeWriteGeneratedConfig(plan, op.GenerateConfigOut) + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + op.ReportResult(runningOp, diags) + return + } + + op.View.Plan(plan, schemas) + + // If we've accumulated any diagnostics along the way then we'll show them + // here just before we show the summary and next steps. This can potentially + // include errors, because we intentionally try to show a partial plan + // above even if OpenTofu Core encountered an error partway through + // creating it. + op.ReportResult(runningOp, diags) + + if !runningOp.PlanEmpty { + if wroteConfig { + op.View.PlanNextStep(op.PlanOutPath, op.GenerateConfigOut) + } else { + op.View.PlanNextStep(op.PlanOutPath, "") + } + } +} + +func maybeWriteGeneratedConfig(plan *plans.Plan, out string) (wroteConfig bool, diags tfdiags.Diagnostics) { + if genconfig.ShouldWriteConfig(out) { + diags := genconfig.ValidateTargetFile(out) + if diags.HasErrors() { + return false, diags + } + + var writer io.Writer + for _, c := range plan.Changes.Resources { + change := genconfig.Change{ + Addr: c.Addr.String(), + GeneratedConfig: c.GeneratedConfig, + } + if c.Importing != nil { + change.ImportID = c.Importing.ID + } + + var moreDiags tfdiags.Diagnostics + writer, wroteConfig, moreDiags = change.MaybeWriteConfig(writer, out) + if moreDiags.HasErrors() { + return false, diags.Append(moreDiags) + } + } + } + + if wroteConfig { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + "Config generation is experimental", + "Generating configuration during import is currently experimental, and the generated configuration format may change in future versions.")) + } + + return wroteConfig, diags +} diff --git a/pkg/backend/local/backend_plan_test.go b/pkg/backend/local/backend_plan_test.go new file mode 100644 index 00000000000..4bf7f3cf016 --- /dev/null +++ b/pkg/backend/local/backend_plan_test.go @@ -0,0 +1,918 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package local + +import ( + "context" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/command/clistate" + "github.com/kubegems/opentofu/pkg/command/views" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/depsfile" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/initwd" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/plans/planfile" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/terminal" + "github.com/kubegems/opentofu/pkg/tofu" +) + +func TestLocal_planBasic(t *testing.T) { + b := TestLocal(t) + p := TestLocalProvider(t, b, "test", planFixtureSchema()) + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + op.PlanRefresh = true + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("plan operation failed") + } + + if !p.PlanResourceChangeCalled { + t.Fatal("PlanResourceChange should be called") + } + + // the backend should be unlocked after a run + assertBackendStateUnlocked(t, b) + + if errOutput := done(t).Stderr(); errOutput != "" { + t.Fatalf("unexpected error output:\n%s", errOutput) + } +} + +func TestLocal_planInAutomation(t *testing.T) { + b := TestLocal(t) + TestLocalProvider(t, b, "test", planFixtureSchema()) + + const msg = `You didn't use the -out option` + + // When we're "in automation" we omit certain text from the plan output. + // However, the responsibility for this omission is in the view, so here we + // test for its presence while the "in automation" setting is false, to + // validate that we are calling the correct view method. + // + // Ideally this test would be replaced by a call-logging mock view, but + // that's future work. + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + op.PlanRefresh = true + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("plan operation failed") + } + + if output := done(t).Stdout(); !strings.Contains(output, msg) { + t.Fatalf("missing next-steps message when not in automation\nwant: %s\noutput:\n%s", msg, output) + } +} + +func TestLocal_planNoConfig(t *testing.T) { + b := TestLocal(t) + TestLocalProvider(t, b, "test", providers.ProviderSchema{}) + + op, configCleanup, done := testOperationPlan(t, "./testdata/empty") + defer configCleanup() + op.PlanRefresh = true + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + + output := done(t) + + if run.Result == backend.OperationSuccess { + t.Fatal("plan operation succeeded; want failure") + } + + if stderr := output.Stderr(); !strings.Contains(stderr, "No configuration files") { + t.Fatalf("bad: %s", stderr) + } + + // the backend should be unlocked after a run + assertBackendStateUnlocked(t, b) +} + +// This test validates the state lacking behavior when the inner call to +// Context() fails +func TestLocal_plan_context_error(t *testing.T) { + b := TestLocal(t) + + // This is an intentionally-invalid value to make tofu.NewContext fail + // when b.Operation calls it. + // NOTE: This test was originally using a provider initialization failure + // as its forced error condition, but tofu.NewContext is no longer + // responsible for checking that. Invalid parallelism is the last situation + // where tofu.NewContext can return error diagnostics, and arguably + // we should be validating this argument at the UI layer anyway, so perhaps + // in future we'll make tofu.NewContext never return errors and then + // this test will become redundant, because its purpose is specifically + // to test that we properly unlock the state if tofu.NewContext + // returns an error. + if b.ContextOpts == nil { + b.ContextOpts = &tofu.ContextOpts{} + } + b.ContextOpts.Parallelism = -1 + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + + // we coerce a failure in Context() by omitting the provider schema + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + if run.Result != backend.OperationFailure { + t.Fatalf("plan operation succeeded") + } + + // the backend should be unlocked after a run + assertBackendStateUnlocked(t, b) + + if got, want := done(t).Stderr(), "Error: Invalid parallelism value"; !strings.Contains(got, want) { + t.Fatalf("unexpected error output:\n%s\nwant: %s", got, want) + } +} + +func TestLocal_planOutputsChanged(t *testing.T) { + b := TestLocal(t) + testStateFile(t, b.StatePath, states.BuildState(func(ss *states.SyncState) { + ss.SetOutputValue(addrs.AbsOutputValue{ + Module: addrs.RootModuleInstance, + OutputValue: addrs.OutputValue{Name: "changed"}, + }, cty.StringVal("before"), false) + ss.SetOutputValue(addrs.AbsOutputValue{ + Module: addrs.RootModuleInstance, + OutputValue: addrs.OutputValue{Name: "sensitive_before"}, + }, cty.StringVal("before"), true) + ss.SetOutputValue(addrs.AbsOutputValue{ + Module: addrs.RootModuleInstance, + OutputValue: addrs.OutputValue{Name: "sensitive_after"}, + }, cty.StringVal("before"), false) + ss.SetOutputValue(addrs.AbsOutputValue{ + Module: addrs.RootModuleInstance, + OutputValue: addrs.OutputValue{Name: "removed"}, // not present in the config fixture + }, cty.StringVal("before"), false) + ss.SetOutputValue(addrs.AbsOutputValue{ + Module: addrs.RootModuleInstance, + OutputValue: addrs.OutputValue{Name: "unchanged"}, + }, cty.StringVal("before"), false) + // NOTE: This isn't currently testing the situation where the new + // value of an output is unknown, because to do that requires there to + // be at least one managed resource Create action in the plan and that + // would defeat the point of this test, which is to ensure that a + // plan containing only output changes is considered "non-empty". + // For now we're not too worried about testing the "new value is + // unknown" situation because that's already common for printing out + // resource changes and we already have many tests for that. + })) + outDir := t.TempDir() + defer os.RemoveAll(outDir) + planPath := filepath.Join(outDir, "plan.tfplan") + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-outputs-changed") + defer configCleanup() + op.PlanRefresh = true + op.PlanOutPath = planPath + cfg := cty.ObjectVal(map[string]cty.Value{ + "path": cty.StringVal(b.StatePath), + }) + cfgRaw, err := plans.NewDynamicValue(cfg, cfg.Type()) + if err != nil { + t.Fatal(err) + } + op.PlanOutBackend = &plans.Backend{ + // Just a placeholder so that we can generate a valid plan file. + Type: "local", + Config: cfgRaw, + } + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("plan operation failed") + } + if run.PlanEmpty { + t.Error("plan should not be empty") + } + + expectedOutput := strings.TrimSpace(` +Changes to Outputs: + + added = "after" + ~ changed = "before" -> "after" + - removed = "before" -> null + ~ sensitive_after = (sensitive value) + ~ sensitive_before = (sensitive value) + +You can apply this plan to save these new output values to the OpenTofu +state, without changing any real infrastructure. +`) + + if output := done(t).Stdout(); !strings.Contains(output, expectedOutput) { + t.Errorf("Unexpected output:\n%s\n\nwant output containing:\n%s", output, expectedOutput) + } +} + +// Module outputs should not cause the plan to be rendered +func TestLocal_planModuleOutputsChanged(t *testing.T) { + b := TestLocal(t) + testStateFile(t, b.StatePath, states.BuildState(func(ss *states.SyncState) { + ss.SetOutputValue(addrs.AbsOutputValue{ + Module: addrs.RootModuleInstance.Child("mod", addrs.NoKey), + OutputValue: addrs.OutputValue{Name: "changed"}, + }, cty.StringVal("before"), false) + })) + outDir := t.TempDir() + defer os.RemoveAll(outDir) + planPath := filepath.Join(outDir, "plan.tfplan") + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-module-outputs-changed") + defer configCleanup() + op.PlanRefresh = true + op.PlanOutPath = planPath + cfg := cty.ObjectVal(map[string]cty.Value{ + "path": cty.StringVal(b.StatePath), + }) + cfgRaw, err := plans.NewDynamicValue(cfg, cfg.Type()) + if err != nil { + t.Fatal(err) + } + op.PlanOutBackend = &plans.Backend{ + Type: "local", + Config: cfgRaw, + } + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("plan operation failed") + } + if !run.PlanEmpty { + t.Fatal("plan should be empty") + } + + expectedOutput := strings.TrimSpace(` +No changes. Your infrastructure matches the configuration. +`) + if output := done(t).Stdout(); !strings.Contains(output, expectedOutput) { + t.Fatalf("Unexpected output:\n%s\n\nwant output containing:\n%s", output, expectedOutput) + } +} + +func TestLocal_planTainted(t *testing.T) { + b := TestLocal(t) + p := TestLocalProvider(t, b, "test", planFixtureSchema()) + testStateFile(t, b.StatePath, testPlanState_tainted()) + outDir := t.TempDir() + planPath := filepath.Join(outDir, "plan.tfplan") + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + op.PlanRefresh = true + op.PlanOutPath = planPath + cfg := cty.ObjectVal(map[string]cty.Value{ + "path": cty.StringVal(b.StatePath), + }) + cfgRaw, err := plans.NewDynamicValue(cfg, cfg.Type()) + if err != nil { + t.Fatal(err) + } + op.PlanOutBackend = &plans.Backend{ + // Just a placeholder so that we can generate a valid plan file. + Type: "local", + Config: cfgRaw, + } + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("plan operation failed") + } + if !p.ReadResourceCalled { + t.Fatal("ReadResource should be called") + } + if run.PlanEmpty { + t.Fatal("plan should not be empty") + } + + expectedOutput := `OpenTofu used the selected providers to generate the following execution +plan. Resource actions are indicated with the following symbols: +-/+ destroy and then create replacement + +OpenTofu will perform the following actions: + + # test_instance.foo is tainted, so it must be replaced +-/+ resource "test_instance" "foo" { + # (1 unchanged attribute hidden) + + # (1 unchanged block hidden) + } + +Plan: 1 to add, 0 to change, 1 to destroy.` + if output := done(t).Stdout(); !strings.Contains(output, expectedOutput) { + t.Fatalf("Unexpected output\ngot\n%s\n\nwant:\n%s", output, expectedOutput) + } +} + +func TestLocal_planDeposedOnly(t *testing.T) { + b := TestLocal(t) + p := TestLocalProvider(t, b, "test", planFixtureSchema()) + testStateFile(t, b.StatePath, states.BuildState(func(ss *states.SyncState) { + ss.SetResourceInstanceDeposed( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + states.DeposedKey("00000000"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{ + "ami": "bar", + "network_interface": [{ + "device_index": 0, + "description": "Main network interface" + }] + }`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + })) + outDir := t.TempDir() + planPath := filepath.Join(outDir, "plan.tfplan") + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + op.PlanRefresh = true + op.PlanOutPath = planPath + cfg := cty.ObjectVal(map[string]cty.Value{ + "path": cty.StringVal(b.StatePath), + }) + cfgRaw, err := plans.NewDynamicValue(cfg, cfg.Type()) + if err != nil { + t.Fatal(err) + } + op.PlanOutBackend = &plans.Backend{ + // Just a placeholder so that we can generate a valid plan file. + Type: "local", + Config: cfgRaw, + } + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("plan operation failed") + } + if !p.ReadResourceCalled { + t.Fatal("ReadResource should've been called to refresh the deposed object") + } + if run.PlanEmpty { + t.Fatal("plan should not be empty") + } + + // The deposed object and the current object are distinct, so our + // plan includes separate actions for each of them. This strange situation + // is not common: it should arise only if OpenTofu fails during + // a create-before-destroy when the "create" hasn't completed yet but + // in a severe way that prevents the previous object from being restored + // as "current". + // + // However, that situation was more common in some earlier OpenTofu + // versions where deposed objects were not managed properly, so this + // can arise when upgrading from an older version with deposed objects + // already in the state. + // + // This is one of the few cases where we expose the idea of "deposed" in + // the UI, including the user-unfriendly "deposed key" (00000000 in this + // case) just so that users can correlate this with what they might + // see in `tofu show` and in the subsequent apply output, because + // it's also possible for there to be _multiple_ deposed objects, in the + // unlikely event that create_before_destroy _keeps_ crashing across + // subsequent runs. + expectedOutput := `OpenTofu used the selected providers to generate the following execution +plan. Resource actions are indicated with the following symbols: + + create + - destroy + +OpenTofu will perform the following actions: + + # test_instance.foo will be created + + resource "test_instance" "foo" { + + ami = "bar" + + + network_interface { + + description = "Main network interface" + + device_index = 0 + } + } + + # test_instance.foo (deposed object 00000000) will be destroyed + # (left over from a partially-failed replacement of this instance) + - resource "test_instance" "foo" { + - ami = "bar" -> null + + - network_interface { + - description = "Main network interface" -> null + - device_index = 0 -> null + } + } + +Plan: 1 to add, 0 to change, 1 to destroy.` + if output := done(t).Stdout(); !strings.Contains(output, expectedOutput) { + t.Fatalf("Unexpected output\ngot\n%s\n\nwant:\n%s", output, expectedOutput) + } +} + +func TestLocal_planTainted_createBeforeDestroy(t *testing.T) { + b := TestLocal(t) + + p := TestLocalProvider(t, b, "test", planFixtureSchema()) + testStateFile(t, b.StatePath, testPlanState_tainted()) + outDir := t.TempDir() + planPath := filepath.Join(outDir, "plan.tfplan") + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-cbd") + defer configCleanup() + op.PlanRefresh = true + op.PlanOutPath = planPath + cfg := cty.ObjectVal(map[string]cty.Value{ + "path": cty.StringVal(b.StatePath), + }) + cfgRaw, err := plans.NewDynamicValue(cfg, cfg.Type()) + if err != nil { + t.Fatal(err) + } + op.PlanOutBackend = &plans.Backend{ + // Just a placeholder so that we can generate a valid plan file. + Type: "local", + Config: cfgRaw, + } + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("plan operation failed") + } + if !p.ReadResourceCalled { + t.Fatal("ReadResource should be called") + } + if run.PlanEmpty { + t.Fatal("plan should not be empty") + } + + expectedOutput := `OpenTofu used the selected providers to generate the following execution +plan. Resource actions are indicated with the following symbols: ++/- create replacement and then destroy + +OpenTofu will perform the following actions: + + # test_instance.foo is tainted, so it must be replaced ++/- resource "test_instance" "foo" { + # (1 unchanged attribute hidden) + + # (1 unchanged block hidden) + } + +Plan: 1 to add, 0 to change, 1 to destroy.` + if output := done(t).Stdout(); !strings.Contains(output, expectedOutput) { + t.Fatalf("Unexpected output\ngot\n%s\n\nwant:\n%s", output, expectedOutput) + } +} + +func TestLocal_planRefreshFalse(t *testing.T) { + b := TestLocal(t) + + p := TestLocalProvider(t, b, "test", planFixtureSchema()) + testStateFile(t, b.StatePath, testPlanState()) + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("plan operation failed") + } + + if p.ReadResourceCalled { + t.Fatal("ReadResource should not be called") + } + + if !run.PlanEmpty { + t.Fatal("plan should be empty") + } + + if errOutput := done(t).Stderr(); errOutput != "" { + t.Fatalf("unexpected error output:\n%s", errOutput) + } +} + +func TestLocal_planDestroy(t *testing.T) { + b := TestLocal(t) + + TestLocalProvider(t, b, "test", planFixtureSchema()) + testStateFile(t, b.StatePath, testPlanState()) + + outDir := t.TempDir() + planPath := filepath.Join(outDir, "plan.tfplan") + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + op.PlanMode = plans.DestroyMode + op.PlanRefresh = true + op.PlanOutPath = planPath + cfg := cty.ObjectVal(map[string]cty.Value{ + "path": cty.StringVal(b.StatePath), + }) + cfgRaw, err := plans.NewDynamicValue(cfg, cfg.Type()) + if err != nil { + t.Fatal(err) + } + op.PlanOutBackend = &plans.Backend{ + // Just a placeholder so that we can generate a valid plan file. + Type: "local", + Config: cfgRaw, + } + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("plan operation failed") + } + + if run.PlanEmpty { + t.Fatal("plan should not be empty") + } + + plan := testReadPlan(t, planPath) + for _, r := range plan.Changes.Resources { + if r.Action.String() != "Delete" { + t.Fatalf("bad: %#v", r.Action.String()) + } + } + + if errOutput := done(t).Stderr(); errOutput != "" { + t.Fatalf("unexpected error output:\n%s", errOutput) + } +} + +func TestLocal_planDestroy_withDataSources(t *testing.T) { + b := TestLocal(t) + + TestLocalProvider(t, b, "test", planFixtureSchema()) + testStateFile(t, b.StatePath, testPlanState_withDataSource()) + + outDir := t.TempDir() + planPath := filepath.Join(outDir, "plan.tfplan") + + op, configCleanup, done := testOperationPlan(t, "./testdata/destroy-with-ds") + defer configCleanup() + op.PlanMode = plans.DestroyMode + op.PlanRefresh = true + op.PlanOutPath = planPath + cfg := cty.ObjectVal(map[string]cty.Value{ + "path": cty.StringVal(b.StatePath), + }) + cfgRaw, err := plans.NewDynamicValue(cfg, cfg.Type()) + if err != nil { + t.Fatal(err) + } + op.PlanOutBackend = &plans.Backend{ + // Just a placeholder so that we can generate a valid plan file. + Type: "local", + Config: cfgRaw, + } + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("plan operation failed") + } + + if run.PlanEmpty { + t.Fatal("plan should not be empty") + } + + // Data source should still exist in the plan file + plan := testReadPlan(t, planPath) + if len(plan.Changes.Resources) != 2 { + t.Fatalf("Expected exactly 1 resource for destruction, %d given: %q", + len(plan.Changes.Resources), getAddrs(plan.Changes.Resources)) + } + + // Data source should not be rendered in the output + expectedOutput := `OpenTofu will perform the following actions: + + # test_instance.foo[0] will be destroyed + - resource "test_instance" "foo" { + - ami = "bar" -> null + + - network_interface { + - description = "Main network interface" -> null + - device_index = 0 -> null + } + } + +Plan: 0 to add, 0 to change, 1 to destroy.` + + if output := done(t).Stdout(); !strings.Contains(output, expectedOutput) { + t.Fatalf("Unexpected output:\n%s", output) + } +} + +func getAddrs(resources []*plans.ResourceInstanceChangeSrc) []string { + addrs := make([]string, len(resources)) + for i, r := range resources { + addrs[i] = r.Addr.String() + } + return addrs +} + +func TestLocal_planOutPathNoChange(t *testing.T) { + b := TestLocal(t) + TestLocalProvider(t, b, "test", planFixtureSchema()) + testStateFile(t, b.StatePath, testPlanState()) + + outDir := t.TempDir() + planPath := filepath.Join(outDir, "plan.tfplan") + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + op.PlanOutPath = planPath + cfg := cty.ObjectVal(map[string]cty.Value{ + "path": cty.StringVal(b.StatePath), + }) + cfgRaw, err := plans.NewDynamicValue(cfg, cfg.Type()) + if err != nil { + t.Fatal(err) + } + op.PlanOutBackend = &plans.Backend{ + // Just a placeholder so that we can generate a valid plan file. + Type: "local", + Config: cfgRaw, + } + op.PlanRefresh = true + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("plan operation failed") + } + + plan := testReadPlan(t, planPath) + + if !plan.Changes.Empty() { + t.Fatalf("expected empty plan to be written") + } + + if errOutput := done(t).Stderr(); errOutput != "" { + t.Fatalf("unexpected error output:\n%s", errOutput) + } +} + +func testOperationPlan(t *testing.T, configDir string) (*backend.Operation, func(), func(*testing.T) *terminal.TestOutput) { + t.Helper() + + _, configLoader, configCleanup := initwd.MustLoadConfigForTests(t, configDir, "tests") + + streams, done := terminal.StreamsForTesting(t) + view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams)) + + // Many of our tests use an overridden "test" provider that's just in-memory + // inside the test process, not a separate plugin on disk. + depLocks := depsfile.NewLocks() + depLocks.SetProviderOverridden(addrs.MustParseProviderSourceString("registry.opentofu.org/hashicorp/test")) + + return &backend.Operation{ + Type: backend.OperationTypePlan, + Encryption: encryption.Disabled(), + ConfigDir: configDir, + ConfigLoader: configLoader, + StateLocker: clistate.NewNoopLocker(), + View: view, + DependencyLocks: depLocks, + }, configCleanup, done +} + +// testPlanState is just a common state that we use for testing plan. +func testPlanState() *states.State { + state := states.NewState() + rootModule := state.RootModule() + rootModule.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{ + "ami": "bar", + "network_interface": [{ + "device_index": 0, + "description": "Main network interface" + }] + }`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + return state +} + +func testPlanState_withDataSource() *states.State { + state := states.NewState() + rootModule := state.RootModule() + rootModule.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.IntKey(0)), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{ + "ami": "bar", + "network_interface": [{ + "device_index": 0, + "description": "Main network interface" + }] + }`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + rootModule.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.DataResourceMode, + Type: "test_ds", + Name: "bar", + }.Instance(addrs.IntKey(0)), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{ + "filter": "foo" + }`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + return state +} + +func testPlanState_tainted() *states.State { + state := states.NewState() + rootModule := state.RootModule() + rootModule.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectTainted, + AttrsJSON: []byte(`{ + "ami": "bar", + "network_interface": [{ + "device_index": 0, + "description": "Main network interface" + }] + }`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + return state +} + +func testReadPlan(t *testing.T, path string) *plans.Plan { + t.Helper() + + p, err := planfile.Open(path, encryption.PlanEncryptionDisabled()) + if err != nil { + t.Fatalf("err: %s", err) + } + + plan, err := p.ReadPlan() + if err != nil { + t.Fatalf("err: %s", err) + } + + return plan +} + +// planFixtureSchema returns a schema suitable for processing the +// configuration in testdata/plan . This schema should be +// assigned to a mock provider named "test". +func planFixtureSchema() providers.ProviderSchema { + return providers.ProviderSchema{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "ami": {Type: cty.String, Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "network_interface": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "device_index": {Type: cty.Number, Optional: true}, + "description": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + }, + }, + }, + DataSources: map[string]providers.Schema{ + "test_ds": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "filter": {Type: cty.String, Required: true}, + }, + }, + }, + }, + } +} + +func TestLocal_invalidOptions(t *testing.T) { + b := TestLocal(t) + TestLocalProvider(t, b, "test", planFixtureSchema()) + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + op.PlanRefresh = true + op.PlanMode = plans.RefreshOnlyMode + op.ForceReplace = []addrs.AbsResourceInstance{mustResourceInstanceAddr("test_instance.foo")} + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatalf("plan operation failed") + } + + if errOutput := done(t).Stderr(); errOutput == "" { + t.Fatal("expected error output") + } +} diff --git a/pkg/backend/local/backend_refresh.go b/pkg/backend/local/backend_refresh.go new file mode 100644 index 00000000000..0d2eeae6a09 --- /dev/null +++ b/pkg/backend/local/backend_refresh.go @@ -0,0 +1,122 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package local + +import ( + "context" + "fmt" + "log" + "os" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/logging" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +func (b *Local) opRefresh( + stopCtx context.Context, + cancelCtx context.Context, + op *backend.Operation, + runningOp *backend.RunningOperation) { + + var diags tfdiags.Diagnostics + + // Check if our state exists if we're performing a refresh operation. We + // only do this if we're managing state with this backend. + if b.Backend == nil { + if _, err := os.Stat(b.StatePath); err != nil { + if os.IsNotExist(err) { + err = nil + } + + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Cannot read state file", + fmt.Sprintf("Failed to read %s: %s", b.StatePath, err), + )) + op.ReportResult(runningOp, diags) + return + } + } + } + + // Refresh now happens via a plan, so we need to ensure this is enabled + op.PlanRefresh = true + + // Get our context + lr, _, opState, contextDiags := b.localRun(op) + diags = diags.Append(contextDiags) + if contextDiags.HasErrors() { + op.ReportResult(runningOp, diags) + return + } + + // the state was locked during successful context creation; unlock the state + // when the operation completes + defer func() { + diags := op.StateLocker.Unlock() + if diags.HasErrors() { + op.View.Diagnostics(diags) + runningOp.Result = backend.OperationFailure + } + }() + + // If we succeed then we'll overwrite this with the resulting state below, + // but otherwise the resulting state is just the input state. + runningOp.State = lr.InputState + if !runningOp.State.HasManagedResourceInstanceObjects() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + "Empty or non-existent state", + "There are currently no remote objects tracked in the state, so there is nothing to refresh.", + )) + } + + // get schemas before writing state + schemas, moreDiags := lr.Core.Schemas(lr.Config, lr.InputState) + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + op.ReportResult(runningOp, diags) + return + } + + // Perform the refresh in a goroutine so we can be interrupted + var newState *states.State + var refreshDiags tfdiags.Diagnostics + doneCh := make(chan struct{}) + panicHandler := logging.PanicHandlerWithTraceFn() + go func() { + defer panicHandler() + defer close(doneCh) + newState, refreshDiags = lr.Core.Refresh(lr.Config, lr.InputState, lr.PlanOpts) + log.Printf("[INFO] backend/local: refresh calling Refresh") + }() + + if b.opWait(doneCh, stopCtx, cancelCtx, lr.Core, opState, op.View) { + return + } + + // Write the resulting state to the running op + runningOp.State = newState + diags = diags.Append(refreshDiags) + if refreshDiags.HasErrors() { + op.ReportResult(runningOp, diags) + return + } + + err := statemgr.WriteAndPersist(opState, newState, schemas) + if err != nil { + diags = diags.Append(fmt.Errorf("failed to write state: %w", err)) + op.ReportResult(runningOp, diags) + return + } + + // Show any remaining warnings before exiting + op.ReportResult(runningOp, diags) +} diff --git a/pkg/backend/local/backend_refresh_test.go b/pkg/backend/local/backend_refresh_test.go new file mode 100644 index 00000000000..b225ec01c8f --- /dev/null +++ b/pkg/backend/local/backend_refresh_test.go @@ -0,0 +1,323 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package local + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/command/clistate" + "github.com/kubegems/opentofu/pkg/command/views" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/depsfile" + "github.com/kubegems/opentofu/pkg/initwd" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/terminal" + "github.com/kubegems/opentofu/pkg/tofu" + + "github.com/zclconf/go-cty/cty" +) + +func TestLocal_refresh(t *testing.T) { + b := TestLocal(t) + + p := TestLocalProvider(t, b, "test", refreshFixtureSchema()) + testStateFile(t, b.StatePath, testRefreshState()) + + p.ReadResourceFn = nil + p.ReadResourceResponse = &providers.ReadResourceResponse{NewState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yes"), + })} + + op, configCleanup, done := testOperationRefresh(t, "./testdata/refresh") + defer configCleanup() + defer done(t) + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + + if !p.ReadResourceCalled { + t.Fatal("ReadResource should be called") + } + + checkState(t, b.StateOutPath, ` +test_instance.foo: + ID = yes + provider = provider["registry.opentofu.org/hashicorp/test"] + `) + + // the backend should be unlocked after a run + assertBackendStateUnlocked(t, b) +} + +func TestLocal_refreshInput(t *testing.T) { + b := TestLocal(t) + + schema := providers.ProviderSchema{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "value": {Type: cty.String, Optional: true}, + }, + }, + }, + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "foo": {Type: cty.String, Optional: true}, + "ami": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + + p := TestLocalProvider(t, b, "test", schema) + testStateFile(t, b.StatePath, testRefreshState()) + + p.ReadResourceFn = nil + p.ReadResourceResponse = &providers.ReadResourceResponse{NewState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yes"), + })} + p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { + val := req.Config.GetAttr("value") + if val.IsNull() || val.AsString() != "bar" { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("incorrect value %#v", val)) + } + + return + } + + // Enable input asking since it is normally disabled by default + b.OpInput = true + b.ContextOpts.UIInput = &tofu.MockUIInput{InputReturnString: "bar"} + + op, configCleanup, done := testOperationRefresh(t, "./testdata/refresh-var-unset") + defer configCleanup() + defer done(t) + op.UIIn = b.ContextOpts.UIInput + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + + if !p.ReadResourceCalled { + t.Fatal("ReadResource should be called") + } + + checkState(t, b.StateOutPath, ` +test_instance.foo: + ID = yes + provider = provider["registry.opentofu.org/hashicorp/test"] + `) +} + +func TestLocal_refreshValidate(t *testing.T) { + b := TestLocal(t) + p := TestLocalProvider(t, b, "test", refreshFixtureSchema()) + testStateFile(t, b.StatePath, testRefreshState()) + p.ReadResourceFn = nil + p.ReadResourceResponse = &providers.ReadResourceResponse{NewState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yes"), + })} + + // Enable validation + b.OpValidation = true + + op, configCleanup, done := testOperationRefresh(t, "./testdata/refresh") + defer configCleanup() + defer done(t) + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + + checkState(t, b.StateOutPath, ` +test_instance.foo: + ID = yes + provider = provider["registry.opentofu.org/hashicorp/test"] + `) +} + +func TestLocal_refreshValidateProviderConfigured(t *testing.T) { + b := TestLocal(t) + + schema := providers.ProviderSchema{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "value": {Type: cty.String, Optional: true}, + }, + }, + }, + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + + p := TestLocalProvider(t, b, "test", schema) + testStateFile(t, b.StatePath, testRefreshState()) + p.ReadResourceFn = nil + p.ReadResourceResponse = &providers.ReadResourceResponse{NewState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yes"), + })} + + // Enable validation + b.OpValidation = true + + op, configCleanup, done := testOperationRefresh(t, "./testdata/refresh-provider-config") + defer configCleanup() + defer done(t) + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + + if !p.ValidateProviderConfigCalled { + t.Fatal("Validate provider config should be called") + } + + checkState(t, b.StateOutPath, ` +test_instance.foo: + ID = yes + provider = provider["registry.opentofu.org/hashicorp/test"] + `) +} + +// This test validates the state lacking behavior when the inner call to +// Context() fails +func TestLocal_refresh_context_error(t *testing.T) { + b := TestLocal(t) + testStateFile(t, b.StatePath, testRefreshState()) + op, configCleanup, done := testOperationRefresh(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + // we coerce a failure in Context() by omitting the provider schema + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("operation succeeded; want failure") + } + assertBackendStateUnlocked(t, b) +} + +func TestLocal_refreshEmptyState(t *testing.T) { + b := TestLocal(t) + + p := TestLocalProvider(t, b, "test", refreshFixtureSchema()) + testStateFile(t, b.StatePath, states.NewState()) + + p.ReadResourceFn = nil + p.ReadResourceResponse = &providers.ReadResourceResponse{NewState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yes"), + })} + + op, configCleanup, done := testOperationRefresh(t, "./testdata/refresh") + defer configCleanup() + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + + output := done(t) + + if stderr := output.Stderr(); stderr != "" { + t.Fatalf("expected only warning diags, got errors: %s", stderr) + } + if got, want := output.Stdout(), "Warning: Empty or non-existent state"; !strings.Contains(got, want) { + t.Errorf("wrong diags\n got: %s\nwant: %s", got, want) + } + + // the backend should be unlocked after a run + assertBackendStateUnlocked(t, b) +} + +func testOperationRefresh(t *testing.T, configDir string) (*backend.Operation, func(), func(*testing.T) *terminal.TestOutput) { + t.Helper() + + _, configLoader, configCleanup := initwd.MustLoadConfigForTests(t, configDir, "tests") + + streams, done := terminal.StreamsForTesting(t) + view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams)) + + // Many of our tests use an overridden "test" provider that's just in-memory + // inside the test process, not a separate plugin on disk. + depLocks := depsfile.NewLocks() + depLocks.SetProviderOverridden(addrs.MustParseProviderSourceString("registry.opentofu.org/hashicorp/test")) + + return &backend.Operation{ + Type: backend.OperationTypeRefresh, + ConfigDir: configDir, + ConfigLoader: configLoader, + StateLocker: clistate.NewNoopLocker(), + View: view, + DependencyLocks: depLocks, + }, configCleanup, done +} + +// testRefreshState is just a common state that we use for testing refresh. +func testRefreshState() *states.State { + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_instance.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + return state +} + +// refreshFixtureSchema returns a schema suitable for processing the +// configuration in testdata/refresh . This schema should be +// assigned to a mock provider named "test". +func refreshFixtureSchema() providers.ProviderSchema { + return providers.ProviderSchema{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "ami": {Type: cty.String, Optional: true}, + "id": {Type: cty.String, Computed: true}, + }, + }, + }, + }, + } +} diff --git a/pkg/backend/local/backend_test.go b/pkg/backend/local/backend_test.go new file mode 100644 index 00000000000..385d75b908e --- /dev/null +++ b/pkg/backend/local/backend_test.go @@ -0,0 +1,252 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package local + +import ( + "errors" + "os" + "path/filepath" + "reflect" + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/states/statefile" + "github.com/kubegems/opentofu/pkg/states/statemgr" +) + +func TestLocal_impl(t *testing.T) { + var _ backend.Enhanced = New(encryption.StateEncryptionDisabled()) + var _ backend.Local = New(encryption.StateEncryptionDisabled()) + var _ backend.CLI = New(encryption.StateEncryptionDisabled()) +} + +func TestLocal_backend(t *testing.T) { + testTmpDir(t) + b := New(encryption.StateEncryptionDisabled()) + backend.TestBackendStates(t, b) + backend.TestBackendStateLocks(t, b, b) +} + +func checkState(t *testing.T, path, expected string) { + t.Helper() + // Read the state + f, err := os.Open(path) + if err != nil { + t.Fatalf("err: %s", err) + } + + state, err := statefile.Read(f, encryption.StateEncryptionDisabled()) + f.Close() + if err != nil { + t.Fatalf("err: %s", err) + } + + actual := state.State.String() + expected = strings.TrimSpace(expected) + if actual != expected { + t.Fatalf("state does not match! actual:\n%s\n\nexpected:\n%s", actual, expected) + } +} + +func TestLocal_StatePaths(t *testing.T) { + b := New(encryption.StateEncryptionDisabled()) + + // Test the defaults + path, out, back := b.StatePaths("") + + if path != DefaultStateFilename { + t.Fatalf("expected %q, got %q", DefaultStateFilename, path) + } + + if out != DefaultStateFilename { + t.Fatalf("expected %q, got %q", DefaultStateFilename, out) + } + + dfltBackup := DefaultStateFilename + DefaultBackupExtension + if back != dfltBackup { + t.Fatalf("expected %q, got %q", dfltBackup, back) + } + + // check with env + testEnv := "test_env" + path, out, back = b.StatePaths(testEnv) + + expectedPath := filepath.Join(DefaultWorkspaceDir, testEnv, DefaultStateFilename) + expectedOut := expectedPath + expectedBackup := expectedPath + DefaultBackupExtension + + if path != expectedPath { + t.Fatalf("expected %q, got %q", expectedPath, path) + } + + if out != expectedOut { + t.Fatalf("expected %q, got %q", expectedOut, out) + } + + if back != expectedBackup { + t.Fatalf("expected %q, got %q", expectedBackup, back) + } + +} + +func TestLocal_addAndRemoveStates(t *testing.T) { + testTmpDir(t) + dflt := backend.DefaultStateName + expectedStates := []string{dflt} + + b := New(encryption.StateEncryptionDisabled()) + states, err := b.Workspaces() + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(states, expectedStates) { + t.Fatalf("expected []string{%q}, got %q", dflt, states) + } + + expectedA := "test_A" + if _, err := b.StateMgr(expectedA); err != nil { + t.Fatal(err) + } + + states, err = b.Workspaces() + if err != nil { + t.Fatal(err) + } + + expectedStates = append(expectedStates, expectedA) + if !reflect.DeepEqual(states, expectedStates) { + t.Fatalf("expected %q, got %q", expectedStates, states) + } + + expectedB := "test_B" + if _, err := b.StateMgr(expectedB); err != nil { + t.Fatal(err) + } + + states, err = b.Workspaces() + if err != nil { + t.Fatal(err) + } + + expectedStates = append(expectedStates, expectedB) + if !reflect.DeepEqual(states, expectedStates) { + t.Fatalf("expected %q, got %q", expectedStates, states) + } + + if err := b.DeleteWorkspace(expectedA, true); err != nil { + t.Fatal(err) + } + + states, err = b.Workspaces() + if err != nil { + t.Fatal(err) + } + + expectedStates = []string{dflt, expectedB} + if !reflect.DeepEqual(states, expectedStates) { + t.Fatalf("expected %q, got %q", expectedStates, states) + } + + if err := b.DeleteWorkspace(expectedB, true); err != nil { + t.Fatal(err) + } + + states, err = b.Workspaces() + if err != nil { + t.Fatal(err) + } + + expectedStates = []string{dflt} + if !reflect.DeepEqual(states, expectedStates) { + t.Fatalf("expected %q, got %q", expectedStates, states) + } + + if err := b.DeleteWorkspace(dflt, true); err == nil { + t.Fatal("expected error deleting default state") + } +} + +// a local backend which returns sentinel errors for NamedState methods to +// verify it's being called. +type testDelegateBackend struct { + *Local + + // return a sentinel error on these calls + stateErr bool + statesErr bool + deleteErr bool +} + +var errTestDelegateState = errors.New("state called") +var errTestDelegateStates = errors.New("states called") +var errTestDelegateDeleteState = errors.New("delete called") + +func (b *testDelegateBackend) StateMgr(name string) (statemgr.Full, error) { + if b.stateErr { + return nil, errTestDelegateState + } + s := statemgr.NewFilesystem("terraform.tfstate", encryption.StateEncryptionDisabled()) + return s, nil +} + +func (b *testDelegateBackend) Workspaces() ([]string, error) { + if b.statesErr { + return nil, errTestDelegateStates + } + return []string{"default"}, nil +} + +func (b *testDelegateBackend) DeleteWorkspace(name string, force bool) error { + if b.deleteErr { + return errTestDelegateDeleteState + } + return nil +} + +// verify that the MultiState methods are dispatched to the correct Backend. +func TestLocal_multiStateBackend(t *testing.T) { + // assign a separate backend where we can read the state + b := NewWithBackend(&testDelegateBackend{ + stateErr: true, + statesErr: true, + deleteErr: true, + }, nil) + + if _, err := b.StateMgr("test"); err != errTestDelegateState { + t.Fatal("expected errTestDelegateState, got:", err) + } + + if _, err := b.Workspaces(); err != errTestDelegateStates { + t.Fatal("expected errTestDelegateStates, got:", err) + } + + if err := b.DeleteWorkspace("test", true); err != errTestDelegateDeleteState { + t.Fatal("expected errTestDelegateDeleteState, got:", err) + } +} + +// testTmpDir changes into a tmp dir and change back automatically when the test +// and all its subtests complete. +func testTmpDir(t *testing.T) { + tmp := t.TempDir() + + old, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + + if err := os.Chdir(tmp); err != nil { + t.Fatal(err) + } + + t.Cleanup(func() { + // ignore errors and try to clean up + os.Chdir(old) + }) +} diff --git a/pkg/backend/local/cli.go b/pkg/backend/local/cli.go new file mode 100644 index 00000000000..ed25fad5323 --- /dev/null +++ b/pkg/backend/local/cli.go @@ -0,0 +1,37 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package local + +import ( + "log" + + "github.com/kubegems/opentofu/pkg/backend" +) + +// backend.CLI impl. +func (b *Local) CLIInit(opts *backend.CLIOpts) error { + b.ContextOpts = opts.ContextOpts + b.OpInput = opts.Input + b.OpValidation = opts.Validation + + // configure any new cli options + if opts.StatePath != "" { + log.Printf("[TRACE] backend/local: CLI option -state is overriding state path to %s", opts.StatePath) + b.OverrideStatePath = opts.StatePath + } + + if opts.StateOutPath != "" { + log.Printf("[TRACE] backend/local: CLI option -state-out is overriding state output path to %s", opts.StateOutPath) + b.OverrideStateOutPath = opts.StateOutPath + } + + if opts.StateBackupPath != "" { + log.Printf("[TRACE] backend/local: CLI option -backup is overriding state backup path to %s", opts.StateBackupPath) + b.OverrideStateBackupPath = opts.StateBackupPath + } + + return nil +} diff --git a/pkg/backend/local/hook_state.go b/pkg/backend/local/hook_state.go new file mode 100644 index 00000000000..3d2c3d1e00e --- /dev/null +++ b/pkg/backend/local/hook_state.go @@ -0,0 +1,177 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package local + +import ( + "log" + "sync" + "time" + + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "github.com/kubegems/opentofu/pkg/tofu" +) + +// StateHook is a hook that continuously updates the state by calling +// WriteState on a statemgr.Full. +type StateHook struct { + tofu.NilHook + sync.Mutex + + StateMgr statemgr.Writer + + // If PersistInterval is nonzero then for any new state update after + // the duration has elapsed we'll try to persist a state snapshot + // to the persistent backend too. + // That's only possible if field Schemas is valid, because the + // StateMgr.PersistState function for some backends needs schemas. + PersistInterval time.Duration + + // Schemas are the schemas to use when persisting state due to + // PersistInterval. This is ignored if PersistInterval is zero, + // and PersistInterval is ignored if this is nil. + Schemas *tofu.Schemas + + intermediatePersist IntermediateStatePersistInfo +} + +type IntermediateStatePersistInfo struct { + // RequestedPersistInterval is the persist interval requested by whatever + // instantiated the StateHook. + // + // Implementations of [IntermediateStateConditionalPersister] should ideally + // respect this, but may ignore it if they use something other than the + // passage of time to make their decision. + RequestedPersistInterval time.Duration + + // LastPersist is the time when the last intermediate state snapshot was + // persisted, or the time of the first report for OpenTofu Core if there + // hasn't yet been a persisted snapshot. + LastPersist time.Time + + // ForcePersist is true when OpenTofu CLI has received an interrupt + // signal and is therefore trying to create snapshots more aggressively + // in anticipation of possibly being terminated ungracefully. + // [IntermediateStateConditionalPersister] implementations should ideally + // persist every snapshot they get when this flag is set, unless they have + // some external information that implies this shouldn't be necessary. + ForcePersist bool +} + +var _ tofu.Hook = (*StateHook)(nil) + +func (h *StateHook) PostStateUpdate(new *states.State) (tofu.HookAction, error) { + h.Lock() + defer h.Unlock() + + h.intermediatePersist.RequestedPersistInterval = h.PersistInterval + + if h.intermediatePersist.LastPersist.IsZero() { + // The first PostStateUpdate starts the clock for intermediate + // calls to PersistState. + h.intermediatePersist.LastPersist = time.Now() + } + + if h.StateMgr != nil { + if err := h.StateMgr.WriteState(new); err != nil { + return tofu.HookActionHalt, err + } + if mgrPersist, ok := h.StateMgr.(statemgr.Persister); ok && h.PersistInterval != 0 && h.Schemas != nil { + if h.shouldPersist() { + err := mgrPersist.PersistState(h.Schemas) + if err != nil { + return tofu.HookActionHalt, err + } + h.intermediatePersist.LastPersist = time.Now() + } else { + log.Printf("[DEBUG] State storage %T declined to persist a state snapshot", h.StateMgr) + } + } + } + + return tofu.HookActionContinue, nil +} + +func (h *StateHook) Stopping() { + h.Lock() + defer h.Unlock() + + // If OpenTofu has been asked to stop then that might mean that a hard + // kill signal will follow shortly in case OpenTofu doesn't stop + // quickly enough, and so we'll try to persist the latest state + // snapshot in the hope that it'll give the user less recovery work to + // do if they _do_ subsequently hard-kill OpenTofu during an apply. + + if mgrPersist, ok := h.StateMgr.(statemgr.Persister); ok && h.Schemas != nil { + // While we're in the stopping phase we'll try to persist every + // new state update to maximize every opportunity we get to avoid + // losing track of objects that have been created or updated. + // OpenTofu Core won't start any new operations after it's been + // stopped, so at most we should see one more PostStateUpdate + // call per already-active request. + h.intermediatePersist.ForcePersist = true + + if h.shouldPersist() { + err := mgrPersist.PersistState(h.Schemas) + if err != nil { + // This hook can't affect OpenTofu Core's ongoing behavior, + // but it's a best effort thing anyway, so we'll just emit a + // log to aid with debugging. + log.Printf("[ERROR] Failed to persist state after interruption: %s", err) + } + } else { + log.Printf("[DEBUG] State storage %T declined to persist a state snapshot", h.StateMgr) + } + } + +} + +func (h *StateHook) shouldPersist() bool { + if m, ok := h.StateMgr.(IntermediateStateConditionalPersister); ok { + return m.ShouldPersistIntermediateState(&h.intermediatePersist) + } + return DefaultIntermediateStatePersistRule(&h.intermediatePersist) +} + +// DefaultIntermediateStatePersistRule is the default implementation of +// [IntermediateStateConditionalPersister.ShouldPersistIntermediateState] used +// when the selected state manager doesn't implement that interface. +// +// Implementers of that interface can optionally wrap a call to this function +// if they want to combine the default behavior with some logic of their own. +func DefaultIntermediateStatePersistRule(info *IntermediateStatePersistInfo) bool { + return info.ForcePersist || time.Since(info.LastPersist) >= info.RequestedPersistInterval +} + +// IntermediateStateConditionalPersister is an optional extension of +// [statemgr.Persister] that allows an implementation to tailor the rules for +// whether to create intermediate state snapshots when OpenTofu Core emits +// events reporting that the state might have changed. +// +// For state managers that don't implement this interface, [StateHook] uses +// a default set of rules that aim to be a good compromise between how long +// a state change can be active before it gets committed as a snapshot vs. +// how many intermediate snapshots will get created. That compromise is subject +// to change over time, but a state manager can implement this interface to +// exert full control over those rules. +type IntermediateStateConditionalPersister interface { + // ShouldPersistIntermediateState will be called each time OpenTofu Core + // emits an intermediate state event that is potentially eligible to be + // persisted. + // + // The implemention should return true to signal that the state snapshot + // most recently provided to the object's WriteState should be persisted, + // or false if it should not be persisted. If this function returns true + // then the receiver will see a subsequent call to + // [statemgr.Persister.PersistState] to request persistence. + // + // The implementation must not modify anything reachable through the + // arguments, and must not retain pointers to anything reachable through + // them after the function returns. However, implementers can assume that + // nothing will write to anything reachable through the arguments while + // this function is active. + ShouldPersistIntermediateState(info *IntermediateStatePersistInfo) bool +} diff --git a/pkg/backend/local/hook_state_test.go b/pkg/backend/local/hook_state_test.go new file mode 100644 index 00000000000..53ac7fdaa92 --- /dev/null +++ b/pkg/backend/local/hook_state_test.go @@ -0,0 +1,299 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package local + +import ( + "fmt" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "github.com/kubegems/opentofu/pkg/tofu" +) + +func TestStateHook_impl(t *testing.T) { + var _ tofu.Hook = new(StateHook) +} + +func TestStateHook(t *testing.T) { + is := statemgr.NewTransientInMemory(nil) + var hook tofu.Hook = &StateHook{StateMgr: is} + + s := statemgr.TestFullInitialState() + action, err := hook.PostStateUpdate(s) + if err != nil { + t.Fatalf("err: %s", err) + } + if action != tofu.HookActionContinue { + t.Fatalf("bad: %v", action) + } + if !is.State().Equal(s) { + t.Fatalf("bad state: %#v", is.State()) + } +} + +func TestStateHookStopping(t *testing.T) { + is := &testPersistentState{} + hook := &StateHook{ + StateMgr: is, + Schemas: &tofu.Schemas{}, + PersistInterval: 4 * time.Hour, + intermediatePersist: IntermediateStatePersistInfo{ + LastPersist: time.Now(), + }, + } + + s := statemgr.TestFullInitialState() + action, err := hook.PostStateUpdate(s) + if err != nil { + t.Fatalf("unexpected error from PostStateUpdate: %s", err) + } + if got, want := action, tofu.HookActionContinue; got != want { + t.Fatalf("wrong hookaction %#v; want %#v", got, want) + } + if is.Written == nil || !is.Written.Equal(s) { + t.Fatalf("mismatching state written") + } + if is.Persisted != nil { + t.Fatalf("persisted too soon") + } + + // We'll now force lastPersist to be long enough ago that persisting + // should be due on the next call. + hook.intermediatePersist.LastPersist = time.Now().Add(-5 * time.Hour) + hook.PostStateUpdate(s) + if is.Written == nil || !is.Written.Equal(s) { + t.Fatalf("mismatching state written") + } + if is.Persisted == nil || !is.Persisted.Equal(s) { + t.Fatalf("mismatching state persisted") + } + hook.PostStateUpdate(s) + if is.Written == nil || !is.Written.Equal(s) { + t.Fatalf("mismatching state written") + } + if is.Persisted == nil || !is.Persisted.Equal(s) { + t.Fatalf("mismatching state persisted") + } + + gotLog := is.CallLog + wantLog := []string{ + // Initial call before we reset lastPersist + "WriteState", + + // Write and then persist after we reset lastPersist + "WriteState", + "PersistState", + + // Final call when persisting wasn't due yet. + "WriteState", + } + if diff := cmp.Diff(wantLog, gotLog); diff != "" { + t.Fatalf("wrong call log so far\n%s", diff) + } + + // We'll reset the log now before we try seeing what happens after + // we use "Stopped". + is.CallLog = is.CallLog[:0] + is.Persisted = nil + + hook.Stopping() + if is.Persisted == nil || !is.Persisted.Equal(s) { + t.Fatalf("mismatching state persisted") + } + + is.Persisted = nil + hook.PostStateUpdate(s) + if is.Persisted == nil || !is.Persisted.Equal(s) { + t.Fatalf("mismatching state persisted") + } + is.Persisted = nil + hook.PostStateUpdate(s) + if is.Persisted == nil || !is.Persisted.Equal(s) { + t.Fatalf("mismatching state persisted") + } + + gotLog = is.CallLog + wantLog = []string{ + // "Stopping" immediately persisted + "PersistState", + + // PostStateUpdate then writes and persists on every call, + // on the assumption that we're now bailing out after + // being cancelled and trying to save as much state as we can. + "WriteState", + "PersistState", + "WriteState", + "PersistState", + } + if diff := cmp.Diff(wantLog, gotLog); diff != "" { + t.Fatalf("wrong call log once in stopping mode\n%s", diff) + } +} + +func TestStateHookCustomPersistRule(t *testing.T) { + is := &testPersistentStateThatRefusesToPersist{} + hook := &StateHook{ + StateMgr: is, + Schemas: &tofu.Schemas{}, + PersistInterval: 4 * time.Hour, + intermediatePersist: IntermediateStatePersistInfo{ + LastPersist: time.Now(), + }, + } + + s := statemgr.TestFullInitialState() + action, err := hook.PostStateUpdate(s) + if err != nil { + t.Fatalf("unexpected error from PostStateUpdate: %s", err) + } + if got, want := action, tofu.HookActionContinue; got != want { + t.Fatalf("wrong hookaction %#v; want %#v", got, want) + } + if is.Written == nil || !is.Written.Equal(s) { + t.Fatalf("mismatching state written") + } + if is.Persisted != nil { + t.Fatalf("persisted too soon") + } + + // We'll now force lastPersist to be long enough ago that persisting + // should be due on the next call. + hook.intermediatePersist.LastPersist = time.Now().Add(-5 * time.Hour) + hook.PostStateUpdate(s) + if is.Written == nil || !is.Written.Equal(s) { + t.Fatalf("mismatching state written") + } + if is.Persisted != nil { + t.Fatalf("has a persisted state, but shouldn't") + } + hook.PostStateUpdate(s) + if is.Written == nil || !is.Written.Equal(s) { + t.Fatalf("mismatching state written") + } + if is.Persisted != nil { + t.Fatalf("has a persisted state, but shouldn't") + } + + gotLog := is.CallLog + wantLog := []string{ + // Initial call before we reset lastPersist + "WriteState", + "ShouldPersistIntermediateState", + // Previous call should return false, preventing a "PersistState" call + + // Write and then decline to persist + "WriteState", + "ShouldPersistIntermediateState", + // Previous call should return false, preventing a "PersistState" call + + // Final call before we start "stopping". + "WriteState", + "ShouldPersistIntermediateState", + // Previous call should return false, preventing a "PersistState" call + } + if diff := cmp.Diff(wantLog, gotLog); diff != "" { + t.Fatalf("wrong call log so far\n%s", diff) + } + + // We'll reset the log now before we try seeing what happens after + // we use "Stopped". + is.CallLog = is.CallLog[:0] + is.Persisted = nil + + hook.Stopping() + if is.Persisted == nil || !is.Persisted.Equal(s) { + t.Fatalf("mismatching state persisted") + } + + is.Persisted = nil + hook.PostStateUpdate(s) + if is.Persisted == nil || !is.Persisted.Equal(s) { + t.Fatalf("mismatching state persisted") + } + is.Persisted = nil + hook.PostStateUpdate(s) + if is.Persisted == nil || !is.Persisted.Equal(s) { + t.Fatalf("mismatching state persisted") + } + + gotLog = is.CallLog + wantLog = []string{ + "ShouldPersistIntermediateState", + // Previous call should return true, allowing the following "PersistState" call + "PersistState", + "WriteState", + "ShouldPersistIntermediateState", + // Previous call should return true, allowing the following "PersistState" call + "PersistState", + "WriteState", + "ShouldPersistIntermediateState", + // Previous call should return true, allowing the following "PersistState" call + "PersistState", + } + if diff := cmp.Diff(wantLog, gotLog); diff != "" { + t.Fatalf("wrong call log once in stopping mode\n%s", diff) + } +} + +type testPersistentState struct { + CallLog []string + + Written *states.State + Persisted *states.State +} + +var _ statemgr.Writer = (*testPersistentState)(nil) +var _ statemgr.Persister = (*testPersistentState)(nil) + +func (sm *testPersistentState) WriteState(state *states.State) error { + sm.CallLog = append(sm.CallLog, "WriteState") + sm.Written = state + return nil +} + +func (sm *testPersistentState) PersistState(schemas *tofu.Schemas) error { + if schemas == nil { + return fmt.Errorf("no schemas") + } + sm.CallLog = append(sm.CallLog, "PersistState") + sm.Persisted = sm.Written + return nil +} + +type testPersistentStateThatRefusesToPersist struct { + CallLog []string + + Written *states.State + Persisted *states.State +} + +var _ statemgr.Writer = (*testPersistentStateThatRefusesToPersist)(nil) +var _ statemgr.Persister = (*testPersistentStateThatRefusesToPersist)(nil) +var _ IntermediateStateConditionalPersister = (*testPersistentStateThatRefusesToPersist)(nil) + +func (sm *testPersistentStateThatRefusesToPersist) WriteState(state *states.State) error { + sm.CallLog = append(sm.CallLog, "WriteState") + sm.Written = state + return nil +} + +func (sm *testPersistentStateThatRefusesToPersist) PersistState(schemas *tofu.Schemas) error { + if schemas == nil { + return fmt.Errorf("no schemas") + } + sm.CallLog = append(sm.CallLog, "PersistState") + sm.Persisted = sm.Written + return nil +} + +// ShouldPersistIntermediateState implements IntermediateStateConditionalPersister +func (sm *testPersistentStateThatRefusesToPersist) ShouldPersistIntermediateState(info *IntermediateStatePersistInfo) bool { + sm.CallLog = append(sm.CallLog, "ShouldPersistIntermediateState") + return info.ForcePersist +} diff --git a/pkg/backend/local/local_test.go b/pkg/backend/local/local_test.go new file mode 100644 index 00000000000..753f93a065f --- /dev/null +++ b/pkg/backend/local/local_test.go @@ -0,0 +1,19 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package local + +import ( + "flag" + "os" + "testing" + + _ "github.com/kubegems/opentofu/pkg/logging" +) + +func TestMain(m *testing.M) { + flag.Parse() + os.Exit(m.Run()) +} diff --git a/pkg/backend/local/testdata/apply-check/main.tf b/pkg/backend/local/testdata/apply-check/main.tf new file mode 100644 index 00000000000..5782be8f473 --- /dev/null +++ b/pkg/backend/local/testdata/apply-check/main.tf @@ -0,0 +1,10 @@ +resource "test_instance" "foo" { + ami = "bar" +} + +check "test_instance_exists" { + assert { + condition = test_instance.foo.id != null + error_message = "value should have been computed" + } +} diff --git a/pkg/backend/local/testdata/apply-empty/hello.txt b/pkg/backend/local/testdata/apply-empty/hello.txt new file mode 100644 index 00000000000..7dcfcbb4a16 --- /dev/null +++ b/pkg/backend/local/testdata/apply-empty/hello.txt @@ -0,0 +1 @@ +This is an empty dir diff --git a/pkg/backend/local/testdata/apply-error/main.tf b/pkg/backend/local/testdata/apply-error/main.tf new file mode 100644 index 00000000000..532c52f944b --- /dev/null +++ b/pkg/backend/local/testdata/apply-error/main.tf @@ -0,0 +1,7 @@ +resource "test_instance" "foo" { + ami = "bar" +} + +resource "test_instance" "bar" { + ami = "error" +} diff --git a/pkg/backend/local/testdata/apply/main.tf b/pkg/backend/local/testdata/apply/main.tf new file mode 100644 index 00000000000..1b101299190 --- /dev/null +++ b/pkg/backend/local/testdata/apply/main.tf @@ -0,0 +1,3 @@ +resource "test_instance" "foo" { + ami = "bar" +} diff --git a/pkg/backend/local/testdata/destroy-with-ds/main.tf b/pkg/backend/local/testdata/destroy-with-ds/main.tf new file mode 100644 index 00000000000..7062d896b48 --- /dev/null +++ b/pkg/backend/local/testdata/destroy-with-ds/main.tf @@ -0,0 +1,8 @@ +resource "test_instance" "foo" { + count = 1 + ami = "bar" +} + +data "test_ds" "bar" { + filter = "foo" +} diff --git a/pkg/backend/local/testdata/empty/.gitignore b/pkg/backend/local/testdata/empty/.gitignore new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/backend/local/testdata/invalid/invalid.tf b/pkg/backend/local/testdata/invalid/invalid.tf new file mode 100644 index 00000000000..7f2d0723d3b --- /dev/null +++ b/pkg/backend/local/testdata/invalid/invalid.tf @@ -0,0 +1,6 @@ +# This configuration is intended to be loadable (valid syntax, etc) but to +# fail terraform.Context.Validate. + +locals { + a = local.nonexist +} diff --git a/pkg/backend/local/testdata/plan-bookmark/bookmark.json b/pkg/backend/local/testdata/plan-bookmark/bookmark.json new file mode 100644 index 00000000000..0a1c73302a2 --- /dev/null +++ b/pkg/backend/local/testdata/plan-bookmark/bookmark.json @@ -0,0 +1,5 @@ +{ + "remote_plan_format": 1, + "run_id": "run-GXfuHMkbyHccAGUg", + "hostname": "app.terraform.io" +} diff --git a/pkg/backend/local/testdata/plan-cbd/main.tf b/pkg/backend/local/testdata/plan-cbd/main.tf new file mode 100644 index 00000000000..1a7ae843379 --- /dev/null +++ b/pkg/backend/local/testdata/plan-cbd/main.tf @@ -0,0 +1,13 @@ +resource "test_instance" "foo" { + ami = "bar" + + # This is here because at some point it caused a test failure + network_interface { + device_index = 0 + description = "Main network interface" + } + + lifecycle { + create_before_destroy = true + } +} diff --git a/pkg/backend/local/testdata/plan-module-outputs-changed/main.tf b/pkg/backend/local/testdata/plan-module-outputs-changed/main.tf new file mode 100644 index 00000000000..ba846846994 --- /dev/null +++ b/pkg/backend/local/testdata/plan-module-outputs-changed/main.tf @@ -0,0 +1,3 @@ +module "mod" { + source = "./mod" +} diff --git a/pkg/backend/local/testdata/plan-module-outputs-changed/mod/main.tf b/pkg/backend/local/testdata/plan-module-outputs-changed/mod/main.tf new file mode 100644 index 00000000000..cee14bd9cab --- /dev/null +++ b/pkg/backend/local/testdata/plan-module-outputs-changed/mod/main.tf @@ -0,0 +1,3 @@ +output "changed" { + value = "after" +} diff --git a/pkg/backend/local/testdata/plan-outputs-changed/main.tf b/pkg/backend/local/testdata/plan-outputs-changed/main.tf new file mode 100644 index 00000000000..1df236ff435 --- /dev/null +++ b/pkg/backend/local/testdata/plan-outputs-changed/main.tf @@ -0,0 +1,28 @@ +module "submodule" { + source = "./submodule" +} + +output "changed" { + value = "after" +} + +output "sensitive_before" { + value = "after" + # no sensitive = true here, but the prior state is marked as sensitive in the test code +} + +output "sensitive_after" { + value = "after" + + # This one is _not_ sensitive in the prior state, but is transitioning to + # being sensitive in our new plan. + sensitive = true +} + +output "added" { // not present in the prior state + value = "after" +} + +output "unchanged" { + value = "before" +} diff --git a/pkg/backend/local/testdata/plan-outputs-changed/submodule/main.tf b/pkg/backend/local/testdata/plan-outputs-changed/submodule/main.tf new file mode 100644 index 00000000000..ae32f8aa13b --- /dev/null +++ b/pkg/backend/local/testdata/plan-outputs-changed/submodule/main.tf @@ -0,0 +1,3 @@ +output "foo" { + value = "bar" +} diff --git a/pkg/backend/local/testdata/plan/main.tf b/pkg/backend/local/testdata/plan/main.tf new file mode 100644 index 00000000000..fd9da13e004 --- /dev/null +++ b/pkg/backend/local/testdata/plan/main.tf @@ -0,0 +1,9 @@ +resource "test_instance" "foo" { + ami = "bar" + + # This is here because at some point it caused a test failure + network_interface { + device_index = 0 + description = "Main network interface" + } +} diff --git a/pkg/backend/local/testdata/refresh-provider-config/main.tf b/pkg/backend/local/testdata/refresh-provider-config/main.tf new file mode 100644 index 00000000000..f3a3ebb85a3 --- /dev/null +++ b/pkg/backend/local/testdata/refresh-provider-config/main.tf @@ -0,0 +1,7 @@ +resource "test_instance" "foo" { + ami = "bar" +} + +provider "test" { + value = "foo" +} diff --git a/pkg/backend/local/testdata/refresh-var-unset/main.tf b/pkg/backend/local/testdata/refresh-var-unset/main.tf new file mode 100644 index 00000000000..8e6b73d0da1 --- /dev/null +++ b/pkg/backend/local/testdata/refresh-var-unset/main.tf @@ -0,0 +1,9 @@ +variable "should_ask" {} + +provider "test" { + value = var.should_ask +} + +resource "test_instance" "foo" { + foo = "bar" +} diff --git a/pkg/backend/local/testdata/refresh/main.tf b/pkg/backend/local/testdata/refresh/main.tf new file mode 100644 index 00000000000..1b101299190 --- /dev/null +++ b/pkg/backend/local/testdata/refresh/main.tf @@ -0,0 +1,3 @@ +resource "test_instance" "foo" { + ami = "bar" +} diff --git a/pkg/backend/local/testing.go b/pkg/backend/local/testing.go new file mode 100644 index 00000000000..1e1863232ed --- /dev/null +++ b/pkg/backend/local/testing.go @@ -0,0 +1,235 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package local + +import ( + "path/filepath" + "testing" + + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "github.com/kubegems/opentofu/pkg/tofu" +) + +// TestLocal returns a configured Local struct with temporary paths and +// in-memory ContextOpts. +// +// No operations will be called on the returned value, so you can still set +// public fields without any locks. +func TestLocal(t *testing.T) *Local { + t.Helper() + tempDir, err := filepath.EvalSymlinks(t.TempDir()) + if err != nil { + t.Fatal(err) + } + + local := New(encryption.StateEncryptionDisabled()) + local.StatePath = filepath.Join(tempDir, "state.tfstate") + local.StateOutPath = filepath.Join(tempDir, "state.tfstate") + local.StateBackupPath = filepath.Join(tempDir, "state.tfstate.bak") + local.StateWorkspaceDir = filepath.Join(tempDir, "state.tfstate.d") + local.ContextOpts = &tofu.ContextOpts{} + + return local +} + +// TestLocalProvider modifies the ContextOpts of the *Local parameter to +// have a provider with the given name. +func TestLocalProvider(t *testing.T, b *Local, name string, schema providers.ProviderSchema) *tofu.MockProvider { + // Build a mock resource provider for in-memory operations + p := new(tofu.MockProvider) + + p.GetProviderSchemaResponse = &schema + + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + // this is a destroy plan, + if req.ProposedNewState.IsNull() { + resp.PlannedState = req.ProposedNewState + resp.PlannedPrivate = req.PriorPrivate + return resp + } + + rSchema, _ := schema.SchemaForResourceType(addrs.ManagedResourceMode, req.TypeName) + if rSchema == nil { + rSchema = &configschema.Block{} // default schema is empty + } + plannedVals := map[string]cty.Value{} + for name, attrS := range rSchema.Attributes { + val := req.ProposedNewState.GetAttr(name) + if attrS.Computed && val.IsNull() { + val = cty.UnknownVal(attrS.Type) + } + plannedVals[name] = val + } + for name := range rSchema.BlockTypes { + // For simplicity's sake we just copy the block attributes over + // verbatim, since this package's mock providers are all relatively + // simple -- we're testing the backend, not esoteric provider features. + plannedVals[name] = req.ProposedNewState.GetAttr(name) + } + + return providers.PlanResourceChangeResponse{ + PlannedState: cty.ObjectVal(plannedVals), + PlannedPrivate: req.PriorPrivate, + } + } + p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { + return providers.ReadResourceResponse{NewState: req.PriorState} + } + p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + return providers.ReadDataSourceResponse{State: req.Config} + } + + // Initialize the opts + if b.ContextOpts == nil { + b.ContextOpts = &tofu.ContextOpts{} + } + + // Set up our provider + b.ContextOpts.Providers = map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider(name): providers.FactoryFixed(p), + } + + return p + +} + +// TestLocalSingleState is a backend implementation that wraps Local +// and modifies it to only support single states (returns +// ErrWorkspacesNotSupported for multi-state operations). +// +// This isn't an actual use case, this is exported just to provide a +// easy way to test that behavior. +type TestLocalSingleState struct { + *Local +} + +// TestNewLocalSingle is a factory for creating a TestLocalSingleState. +// This function matches the signature required for backend/init. +func TestNewLocalSingle(enc encryption.StateEncryption) backend.Backend { + return &TestLocalSingleState{Local: New(encryption.StateEncryptionDisabled())} +} + +func (b *TestLocalSingleState) Workspaces() ([]string, error) { + return nil, backend.ErrWorkspacesNotSupported +} + +func (b *TestLocalSingleState) DeleteWorkspace(string, bool) error { + return backend.ErrWorkspacesNotSupported +} + +func (b *TestLocalSingleState) StateMgr(name string) (statemgr.Full, error) { + if name != backend.DefaultStateName { + return nil, backend.ErrWorkspacesNotSupported + } + + return b.Local.StateMgr(name) +} + +// TestLocalNoDefaultState is a backend implementation that wraps +// Local and modifies it to support named states, but not the +// default state. It returns ErrDefaultWorkspaceNotSupported when +// the DefaultStateName is used. +type TestLocalNoDefaultState struct { + *Local +} + +// TestNewLocalNoDefault is a factory for creating a TestLocalNoDefaultState. +// This function matches the signature required for backend/init. +func TestNewLocalNoDefault(enc encryption.StateEncryption) backend.Backend { + return &TestLocalNoDefaultState{Local: New(encryption.StateEncryptionDisabled())} +} + +func (b *TestLocalNoDefaultState) Workspaces() ([]string, error) { + workspaces, err := b.Local.Workspaces() + if err != nil { + return nil, err + } + + filtered := workspaces[:0] + for _, name := range workspaces { + if name != backend.DefaultStateName { + filtered = append(filtered, name) + } + } + + return filtered, nil +} + +func (b *TestLocalNoDefaultState) DeleteWorkspace(name string, force bool) error { + if name == backend.DefaultStateName { + return backend.ErrDefaultWorkspaceNotSupported + } + return b.Local.DeleteWorkspace(name, force) +} + +func (b *TestLocalNoDefaultState) StateMgr(name string) (statemgr.Full, error) { + if name == backend.DefaultStateName { + return nil, backend.ErrDefaultWorkspaceNotSupported + } + return b.Local.StateMgr(name) +} + +func testStateFile(t *testing.T, path string, s *states.State) { + t.Helper() + + if err := statemgr.WriteAndPersist(statemgr.NewFilesystem(path, encryption.StateEncryptionDisabled()), s, nil); err != nil { + t.Fatal(err) + } +} + +func mustProviderConfig(s string) addrs.AbsProviderConfig { + p, diags := addrs.ParseAbsProviderConfigStr(s) + if diags.HasErrors() { + panic(diags.Err()) + } + return p +} + +func mustResourceInstanceAddr(s string) addrs.AbsResourceInstance { + addr, diags := addrs.ParseAbsResourceInstanceStr(s) + if diags.HasErrors() { + panic(diags.Err()) + } + return addr +} + +// assertBackendStateUnlocked attempts to lock the backend state for a test. +// Failure indicates that the state was locked and false is returned. +// True is returned if a lock was obtained. +func assertBackendStateUnlocked(t *testing.T, b *Local) bool { + t.Helper() + stateMgr, _ := b.StateMgr(backend.DefaultStateName) + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { + t.Errorf("state is already locked: %s", err.Error()) + // lock was obtained + return false + } + // lock was not obtained + return true +} + +// assertBackendStateLocked attempts to lock the backend state for a test. +// Failure indicates that the state was not locked and false is returned. +// True is returned if a lock was not obtained. +func assertBackendStateLocked(t *testing.T, b *Local) bool { + t.Helper() + stateMgr, _ := b.StateMgr(backend.DefaultStateName) + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { + // lock was not obtained + return true + } + t.Error("unexpected success locking state") + // lock was obtained + return false +} diff --git a/pkg/backend/operation_type.go b/pkg/backend/operation_type.go new file mode 100644 index 00000000000..61e398efb31 --- /dev/null +++ b/pkg/backend/operation_type.go @@ -0,0 +1,19 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package backend + +//go:generate go run golang.org/x/tools/cmd/stringer -type=OperationType operation_type.go + +// OperationType is an enum used with Operation to specify the operation +// type to perform for OpenTofu. +type OperationType uint + +const ( + OperationTypeInvalid OperationType = iota + OperationTypeRefresh + OperationTypePlan + OperationTypeApply +) diff --git a/pkg/backend/operationtype_string.go b/pkg/backend/operationtype_string.go new file mode 100644 index 00000000000..fe84d848ddb --- /dev/null +++ b/pkg/backend/operationtype_string.go @@ -0,0 +1,26 @@ +// Code generated by "stringer -type=OperationType operation_type.go"; DO NOT EDIT. + +package backend + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[OperationTypeInvalid-0] + _ = x[OperationTypeRefresh-1] + _ = x[OperationTypePlan-2] + _ = x[OperationTypeApply-3] +} + +const _OperationType_name = "OperationTypeInvalidOperationTypeRefreshOperationTypePlanOperationTypeApply" + +var _OperationType_index = [...]uint8{0, 20, 40, 57, 75} + +func (i OperationType) String() string { + if i >= OperationType(len(_OperationType_index)-1) { + return "OperationType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _OperationType_name[_OperationType_index[i]:_OperationType_index[i+1]] +} diff --git a/pkg/backend/remote-state/azure/README.md b/pkg/backend/remote-state/azure/README.md new file mode 100644 index 00000000000..c9a1939260f --- /dev/null +++ b/pkg/backend/remote-state/azure/README.md @@ -0,0 +1,158 @@ +# Azure State Backend + +This README serves as a guide for developing the Azure State Backend. + +## Running Integration Tests + +The package contains multiple integration tests which need to be run with a live Azure account. This guide assumes you are using a fresh and empty Azure account/subscription. This way you'll be able to wipe it clean at the end without needing to worry about lingering resources. + +You'll also need the azure CLI installed and configured with `az login`. + +First, you'll need to configure the CLI to use the right subscription, in case your account has multiple subscriptions: + +```bash +~> az account set --subscription +``` + +You'll also need to create a service account, via +```bash +~> az ad sp create-for-rbac --role="Owner" --scopes="/subscriptions/" +{ + "appId": "{APP_ID}", + "displayName": "{DISPLAY_NAME}", + "password": "{PASSWORD}", + "tenant": "{TENANT}" +} +``` +We'll also need a certificate for the service account, as there are tests which check certificate authentication. +```bash +# Generating key+cert pair. +~> openssl req -subj '/CN=myclientcertificate/O=MyCompany, Inc./ST=CA/C=US' \ + -new -newkey rsa:4096 -sha256 -days 3 -nodes -x509 -keyout client.key -out client.crt +# Creating a pfx bundle with the format required by the state backend. +~> openssl pkcs12 -certpbe PBE-SHA1-3DES -keypbe PBE-SHA1-3DES -export -macalg sha1 -password "pass:" -out client.pfx -inkey client.key -in client.crt +``` + +You will now have to **use the UI** to add this certificate. Go to `App Registrations` in the Azure Portal, pick the app with the previously generated `{DISPLAY_NAME}`, there you go into `Certificates & secrets`, Certificates tab, and `Upload certificate` with the `client.crt` file. + +You'll now want to compile the tests. We'll be running them later on a VM (so that tests checking IMDS authentication work). Go to the `internal/backend/remote-state/azure` directory and run: +```bash +~> GOOS=linux GOARCH=amd64 go test -c . +``` + +Create a resource group for your Azure VM: +```bash +~> az group create --name myResourceGroup --location eastus +``` + +Now, let's create the Azure VM. +```bash +~> az vm create --resource-group myResourceGroup --name myVM --image Ubuntu2204 --generate-ssh-keys --admin-username azureuser --admin-password +{ + "fqdns": "", + "id": "...", + "location": "eastus", + "macAddress": "...", + "powerState": "VM running", + "privateIpAddress": "...", + "publicIpAddress": "{PUBLIC_IP_ADDRESS}", + "resourceGroup": "myResourceGroup", + "zones": "" +} +``` +Assign an identity to the VM: +```bash +~> az vm identity assign --resource-group myResourceGroup --name myVM +{ + "systemAssignedIdentity": "{IDENTITY}", + "userAssignedIdentities": {} +} +``` + +and a role to that identity: +```bash +~> az role assignment create --assignee "{IDENTITY}" --role Owner --scope "/subscriptions/" +``` + +You'll now want to copy the compiled tests and certificate to the vm: +```bash +# This might hang for a bit, while the VM is booting up. +~> scp azure.test client.pfx azureuser@{PUBLIC_IP_ADDRESS}:~/ +~> ssh azureuser@{PUBLIC_IP_ADDRESS} +``` + +Now, on the Azure VM bash session we'll have to set up the environment variables for the tests: +```bash +export TF_AZURE_TEST=1 +export TF_RUNNING_IN_AZURE=1 +export ARM_SUBSCRIPTION_ID= +export ARM_LOCATION=eastus +export ARM_ENVIRONMENT=public +export ARM_TENANT_ID={TENANT} +export ARM_CLIENT_ID={APP_ID} +export ARM_CLIENT_SECRET={PASSWORD} +export ARM_CLIENT_CERTIFICATE_PATH=/home/azureuser/client.pfx +``` + +Finally, we can run the tests! +```bash +~> ./azure.test -test.v -test.timeout 99999s +``` +The tests should run for around 30 minutes. Enjoy your coffee! + +### Cleanup + +Now it's time to get rid of everything we've created. + +List all resource groups in your subscription: +```bash +~> az group list --subscription --query "[].name" +[ + "myResourceGroup", + "acctestRG-backend-23112414590786-k3nx", + "..." +] +``` + +For each of these, run: +```bash +~> az group delete --subscription --name --yes --no-wait --force-deletion-types "Microsoft.Compute/virtualMachines" +``` + +You'll also want to delete the service account: +```bash +~> az ad sp delete --id {APP_ID} +``` + +List ServicePrincipal role assignments in the subscription: +```bash +~> az role assignment list --subscription --query "[?principalType=='ServicePrincipal']" +[ + { + "canDelegate": null, + "condition": null, + "conditionVersion": null, + "description": null, + "id": "{ASSIGNMENT_ID}", + "name": "...", + "principalId": "...", + "principalType": "ServicePrincipal", + "resourceGroup": "", + "roleDefinitionId": "/subscriptions//providers/Microsoft.Authorization/roleDefinitions/...", + "scope": "/subscriptions/", + "type": "Microsoft.Authorization/roleAssignments" + }, + ... +] +``` + +and for each of those, delete it: +```bash +~> az role assignment delete --subscription --id {ASSIGNMENT_ID} +``` + +At this point, double-check that all resource groups are gone: +```bash +~> az group list --subscription --query "[].name" +[] +``` diff --git a/pkg/backend/remote-state/azure/arm_client.go b/pkg/backend/remote-state/azure/arm_client.go new file mode 100644 index 00000000000..0426ad928e7 --- /dev/null +++ b/pkg/backend/remote-state/azure/arm_client.go @@ -0,0 +1,251 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package azure + +import ( + "context" + "fmt" + "log" + "os" + "time" + + "github.com/Azure/azure-sdk-for-go/profiles/2017-03-09/resources/mgmt/resources" + armStorage "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-01-01/storage" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/hashicorp/go-azure-helpers/authentication" + "github.com/hashicorp/go-azure-helpers/sender" + "github.com/kubegems/opentofu/pkg/httpclient" + "github.com/kubegems/opentofu/version" + "github.com/manicminer/hamilton/environments" + "github.com/tombuildsstuff/giovanni/storage/2018-11-09/blob/blobs" + "github.com/tombuildsstuff/giovanni/storage/2018-11-09/blob/containers" +) + +type ArmClient struct { + // These Clients are only initialized if an Access Key isn't provided + groupsClient *resources.GroupsClient + storageAccountsClient *armStorage.AccountsClient + containersClient *containers.Client + blobsClient *blobs.Client + + // azureAdStorageAuth is only here if we're using AzureAD Authentication but is an Authorizer for Storage + azureAdStorageAuth *autorest.Authorizer + + accessKey string + environment azure.Environment + resourceGroupName string + storageAccountName string + sasToken string +} + +func buildArmClient(ctx context.Context, config BackendConfig) (*ArmClient, error) { + env, err := authentication.AzureEnvironmentByNameFromEndpoint(ctx, config.MetadataHost, config.Environment) + if err != nil { + return nil, err + } + + client := ArmClient{ + environment: *env, + resourceGroupName: config.ResourceGroupName, + storageAccountName: config.StorageAccountName, + } + + // if we have an Access Key - we don't need the other clients + if config.AccessKey != "" { + client.accessKey = config.AccessKey + return &client, nil + } + + // likewise with a SAS token + if config.SasToken != "" { + client.sasToken = config.SasToken + return &client, nil + } + + builder := authentication.Builder{ + ClientID: config.ClientID, + SubscriptionID: config.SubscriptionID, + TenantID: config.TenantID, + CustomResourceManagerEndpoint: config.CustomResourceManagerEndpoint, + MetadataHost: config.MetadataHost, + Environment: config.Environment, + ClientSecretDocsLink: "https://registry.opentofu.org/providers/hashicorp/azurerm/latest/docs/guides/service_principal_client_secret", + + // Service Principal (Client Certificate) + ClientCertPassword: config.ClientCertificatePassword, + ClientCertPath: config.ClientCertificatePath, + + // Service Principal (Client Secret) + ClientSecret: config.ClientSecret, + + // Managed Service Identity + MsiEndpoint: config.MsiEndpoint, + + // OIDC + IDToken: config.OIDCToken, + IDTokenFilePath: config.OIDCTokenFilePath, + IDTokenRequestURL: config.OIDCRequestURL, + IDTokenRequestToken: config.OIDCRequestToken, + + // Feature Toggles + SupportsAzureCliToken: true, + SupportsClientCertAuth: true, + SupportsClientSecretAuth: true, + SupportsManagedServiceIdentity: config.UseMsi, + SupportsOIDCAuth: config.UseOIDC, + UseMicrosoftGraph: true, + } + armConfig, err := builder.Build() + if err != nil { + return nil, fmt.Errorf("Error building ARM Config: %w", err) + } + + oauthConfig, err := armConfig.BuildOAuthConfig(env.ActiveDirectoryEndpoint) + if err != nil { + return nil, err + } + + hamiltonEnv, err := environments.EnvironmentFromString(config.Environment) + if err != nil { + return nil, err + } + + sender := sender.BuildSender("backend/remote-state/azure") + log.Printf("[DEBUG] Obtaining an MSAL / Microsoft Graph token for Resource Manager..") + auth, err := armConfig.GetMSALToken(ctx, hamiltonEnv.ResourceManager, sender, oauthConfig, env.TokenAudience) + if err != nil { + return nil, err + } + + if config.UseAzureADAuthentication { + log.Printf("[DEBUG] Obtaining an MSAL / Microsoft Graph token for Storage..") + storageAuth, err := armConfig.GetMSALToken(ctx, hamiltonEnv.Storage, sender, oauthConfig, env.ResourceIdentifiers.Storage) + if err != nil { + return nil, err + } + client.azureAdStorageAuth = &storageAuth + } + + accountsClient := armStorage.NewAccountsClientWithBaseURI(env.ResourceManagerEndpoint, armConfig.SubscriptionID) + client.configureClient(&accountsClient.Client, auth) + client.storageAccountsClient = &accountsClient + + groupsClient := resources.NewGroupsClientWithBaseURI(env.ResourceManagerEndpoint, armConfig.SubscriptionID) + client.configureClient(&groupsClient.Client, auth) + client.groupsClient = &groupsClient + + return &client, nil +} + +func (c ArmClient) getBlobClient(ctx context.Context) (*blobs.Client, error) { + if c.sasToken != "" { + log.Printf("[DEBUG] Building the Blob Client from a SAS Token") + storageAuth, err := autorest.NewSASTokenAuthorizer(c.sasToken) + if err != nil { + return nil, fmt.Errorf("Error building SAS Token Authorizer: %w", err) + } + + blobsClient := blobs.NewWithEnvironment(c.environment) + c.configureClient(&blobsClient.Client, storageAuth) + return &blobsClient, nil + } + + if c.azureAdStorageAuth != nil { + blobsClient := blobs.NewWithEnvironment(c.environment) + c.configureClient(&blobsClient.Client, *c.azureAdStorageAuth) + return &blobsClient, nil + } + + accessKey := c.accessKey + if accessKey == "" { + log.Printf("[DEBUG] Building the Blob Client from an Access Token (using user credentials)") + keys, err := c.storageAccountsClient.ListKeys(ctx, c.resourceGroupName, c.storageAccountName, "") + if err != nil { + return nil, fmt.Errorf("Error retrieving keys for Storage Account %q: %w", c.storageAccountName, err) + } + + if keys.Keys == nil { + return nil, fmt.Errorf("Nil key returned for storage account %q", c.storageAccountName) + } + + accessKeys := *keys.Keys + accessKey = *accessKeys[0].Value + } + + storageAuth, err := autorest.NewSharedKeyAuthorizer(c.storageAccountName, accessKey, autorest.SharedKey) + if err != nil { + return nil, fmt.Errorf("Error building Shared Key Authorizer: %w", err) + } + + blobsClient := blobs.NewWithEnvironment(c.environment) + c.configureClient(&blobsClient.Client, storageAuth) + return &blobsClient, nil +} + +func (c ArmClient) getContainersClient(ctx context.Context) (*containers.Client, error) { + if c.sasToken != "" { + log.Printf("[DEBUG] Building the Container Client from a SAS Token") + storageAuth, err := autorest.NewSASTokenAuthorizer(c.sasToken) + if err != nil { + return nil, fmt.Errorf("Error building SAS Token Authorizer: %w", err) + } + + containersClient := containers.NewWithEnvironment(c.environment) + c.configureClient(&containersClient.Client, storageAuth) + return &containersClient, nil + } + + if c.azureAdStorageAuth != nil { + containersClient := containers.NewWithEnvironment(c.environment) + c.configureClient(&containersClient.Client, *c.azureAdStorageAuth) + return &containersClient, nil + } + + accessKey := c.accessKey + if accessKey == "" { + log.Printf("[DEBUG] Building the Container Client from an Access Token (using user credentials)") + keys, err := c.storageAccountsClient.ListKeys(ctx, c.resourceGroupName, c.storageAccountName, "") + if err != nil { + return nil, fmt.Errorf("Error retrieving keys for Storage Account %q: %w", c.storageAccountName, err) + } + + if keys.Keys == nil { + return nil, fmt.Errorf("Nil key returned for storage account %q", c.storageAccountName) + } + + accessKeys := *keys.Keys + accessKey = *accessKeys[0].Value + } + + storageAuth, err := autorest.NewSharedKeyAuthorizer(c.storageAccountName, accessKey, autorest.SharedKey) + if err != nil { + return nil, fmt.Errorf("Error building Shared Key Authorizer: %w", err) + } + + containersClient := containers.NewWithEnvironment(c.environment) + c.configureClient(&containersClient.Client, storageAuth) + return &containersClient, nil +} + +func (c *ArmClient) configureClient(client *autorest.Client, auth autorest.Authorizer) { + client.UserAgent = buildUserAgent() + client.Authorizer = auth + client.Sender = buildSender() + client.SkipResourceProviderRegistration = false + client.PollingDuration = 60 * time.Minute +} + +func buildUserAgent() string { + userAgent := httpclient.OpenTofuUserAgent(version.Version) + + // append the CloudShell version to the user agent if it exists + if azureAgent := os.Getenv("AZURE_HTTP_USER_AGENT"); azureAgent != "" { + userAgent = fmt.Sprintf("%s %s", userAgent, azureAgent) + } + + return userAgent +} diff --git a/pkg/backend/remote-state/azure/backend.go b/pkg/backend/remote-state/azure/backend.go new file mode 100644 index 00000000000..0f4ff0c58c1 --- /dev/null +++ b/pkg/backend/remote-state/azure/backend.go @@ -0,0 +1,278 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package azure + +import ( + "context" + "fmt" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/legacy/helper/schema" +) + +// New creates a new backend for Azure remote state. +func New(enc encryption.StateEncryption) backend.Backend { + s := &schema.Backend{ + Schema: map[string]*schema.Schema{ + "storage_account_name": { + Type: schema.TypeString, + Required: true, + Description: "The name of the storage account.", + }, + + "container_name": { + Type: schema.TypeString, + Required: true, + Description: "The container name.", + }, + + "key": { + Type: schema.TypeString, + Required: true, + Description: "The blob key.", + }, + + "metadata_host": { + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.EnvDefaultFunc("ARM_METADATA_HOST", ""), + Description: "The Metadata URL which will be used to obtain the Cloud Environment.", + }, + + "environment": { + Type: schema.TypeString, + Optional: true, + Description: "The Azure cloud environment.", + DefaultFunc: schema.EnvDefaultFunc("ARM_ENVIRONMENT", "public"), + }, + + "access_key": { + Type: schema.TypeString, + Optional: true, + Description: "The access key.", + DefaultFunc: schema.EnvDefaultFunc("ARM_ACCESS_KEY", ""), + }, + + "sas_token": { + Type: schema.TypeString, + Optional: true, + Description: "A SAS Token used to interact with the Blob Storage Account.", + DefaultFunc: schema.EnvDefaultFunc("ARM_SAS_TOKEN", ""), + }, + + "snapshot": { + Type: schema.TypeBool, + Optional: true, + Description: "Enable/Disable automatic blob snapshotting", + DefaultFunc: schema.EnvDefaultFunc("ARM_SNAPSHOT", false), + }, + + "resource_group_name": { + Type: schema.TypeString, + Optional: true, + Description: "The resource group name.", + }, + + "client_id": { + Type: schema.TypeString, + Optional: true, + Description: "The Client ID.", + DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_ID", ""), + }, + + "endpoint": { + Type: schema.TypeString, + Optional: true, + Description: "A custom Endpoint used to access the Azure Resource Manager API's.", + DefaultFunc: schema.EnvDefaultFunc("ARM_ENDPOINT", ""), + }, + + "subscription_id": { + Type: schema.TypeString, + Optional: true, + Description: "The Subscription ID.", + DefaultFunc: schema.EnvDefaultFunc("ARM_SUBSCRIPTION_ID", ""), + }, + + "tenant_id": { + Type: schema.TypeString, + Optional: true, + Description: "The Tenant ID.", + DefaultFunc: schema.EnvDefaultFunc("ARM_TENANT_ID", ""), + }, + + // Service Principal (Client Certificate) specific + "client_certificate_password": { + Type: schema.TypeString, + Optional: true, + Description: "The password associated with the Client Certificate specified in `client_certificate_path`", + DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_CERTIFICATE_PASSWORD", ""), + }, + "client_certificate_path": { + Type: schema.TypeString, + Optional: true, + Description: "The path to the PFX file used as the Client Certificate when authenticating as a Service Principal", + DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_CERTIFICATE_PATH", ""), + }, + + // Service Principal (Client Secret) specific + "client_secret": { + Type: schema.TypeString, + Optional: true, + Description: "The Client Secret.", + DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_SECRET", ""), + }, + + // Managed Service Identity specific + "use_msi": { + Type: schema.TypeBool, + Optional: true, + Description: "Should Managed Service Identity be used?", + DefaultFunc: schema.EnvDefaultFunc("ARM_USE_MSI", false), + }, + "msi_endpoint": { + Type: schema.TypeString, + Optional: true, + Description: "The Managed Service Identity Endpoint.", + DefaultFunc: schema.EnvDefaultFunc("ARM_MSI_ENDPOINT", ""), + }, + + // OIDC auth specific fields + "use_oidc": { + Type: schema.TypeBool, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("ARM_USE_OIDC", false), + Description: "Allow OIDC to be used for authentication", + }, + "oidc_token": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("ARM_OIDC_TOKEN", ""), + Description: "A generic JWT token that can be used for OIDC authentication. Should not be used in conjunction with `oidc_request_token`.", + }, + "oidc_token_file_path": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("ARM_OIDC_TOKEN_FILE_PATH", ""), + Description: "Path to file containing a generic JWT token that can be used for OIDC authentication. Should not be used in conjunction with `oidc_request_token`.", + }, + "oidc_request_url": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{"ARM_OIDC_REQUEST_URL", "ACTIONS_ID_TOKEN_REQUEST_URL"}, ""), + Description: "The URL of the OIDC provider from which to request an ID token. Needs to be used in conjunction with `oidc_request_token`. This is meant to be used for Github Actions.", + }, + "oidc_request_token": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{"ARM_OIDC_REQUEST_TOKEN", "ACTIONS_ID_TOKEN_REQUEST_TOKEN"}, ""), + Description: "The bearer token to use for the request to the OIDC providers `oidc_request_url` URL to fetch an ID token. Needs to be used in conjunction with `oidc_request_url`. This is meant to be used for Github Actions.", + }, + + // Feature Flags + "use_azuread_auth": { + Type: schema.TypeBool, + Optional: true, + Description: "Should OpenTofu use AzureAD Authentication to access the Blob?", + DefaultFunc: schema.EnvDefaultFunc("ARM_USE_AZUREAD", false), + }, + }, + } + + result := &Backend{Backend: s, encryption: enc} + result.Backend.ConfigureFunc = result.configure + return result +} + +type Backend struct { + *schema.Backend + encryption encryption.StateEncryption + + // The fields below are set from configure + armClient *ArmClient + containerName string + keyName string + accountName string + snapshot bool +} + +type BackendConfig struct { + // Required + StorageAccountName string + + // Optional + AccessKey string + ClientID string + ClientCertificatePassword string + ClientCertificatePath string + ClientSecret string + CustomResourceManagerEndpoint string + MetadataHost string + Environment string + MsiEndpoint string + OIDCToken string + OIDCTokenFilePath string + OIDCRequestURL string + OIDCRequestToken string + ResourceGroupName string + SasToken string + SubscriptionID string + TenantID string + UseMsi bool + UseOIDC bool + UseAzureADAuthentication bool +} + +func (b *Backend) configure(ctx context.Context) error { + if b.containerName != "" { + return nil + } + + // Grab the resource data + data := schema.FromContextBackendConfig(ctx) + b.containerName = data.Get("container_name").(string) + b.accountName = data.Get("storage_account_name").(string) + b.keyName = data.Get("key").(string) + b.snapshot = data.Get("snapshot").(bool) + + config := BackendConfig{ + AccessKey: data.Get("access_key").(string), + ClientID: data.Get("client_id").(string), + ClientCertificatePassword: data.Get("client_certificate_password").(string), + ClientCertificatePath: data.Get("client_certificate_path").(string), + ClientSecret: data.Get("client_secret").(string), + CustomResourceManagerEndpoint: data.Get("endpoint").(string), + MetadataHost: data.Get("metadata_host").(string), + Environment: data.Get("environment").(string), + MsiEndpoint: data.Get("msi_endpoint").(string), + OIDCToken: data.Get("oidc_token").(string), + OIDCTokenFilePath: data.Get("oidc_token_file_path").(string), + OIDCRequestURL: data.Get("oidc_request_url").(string), + OIDCRequestToken: data.Get("oidc_request_token").(string), + ResourceGroupName: data.Get("resource_group_name").(string), + SasToken: data.Get("sas_token").(string), + StorageAccountName: data.Get("storage_account_name").(string), + SubscriptionID: data.Get("subscription_id").(string), + TenantID: data.Get("tenant_id").(string), + UseMsi: data.Get("use_msi").(bool), + UseOIDC: data.Get("use_oidc").(bool), + UseAzureADAuthentication: data.Get("use_azuread_auth").(bool), + } + + armClient, err := buildArmClient(context.TODO(), config) + if err != nil { + return err + } + + thingsNeededToLookupAccessKeySpecified := config.AccessKey == "" && config.SasToken == "" && config.ResourceGroupName == "" + if thingsNeededToLookupAccessKeySpecified && !config.UseAzureADAuthentication { + return fmt.Errorf("Either an Access Key / SAS Token or the Resource Group for the Storage Account must be specified - or Azure AD Authentication must be enabled") + } + + b.armClient = armClient + return nil +} diff --git a/pkg/backend/remote-state/azure/backend_state.go b/pkg/backend/remote-state/azure/backend_state.go new file mode 100644 index 00000000000..c1ab2bdc4a5 --- /dev/null +++ b/pkg/backend/remote-state/azure/backend_state.go @@ -0,0 +1,172 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package azure + +import ( + "context" + "fmt" + "sort" + "strings" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/remote" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "github.com/tombuildsstuff/giovanni/storage/2018-11-09/blob/blobs" + "github.com/tombuildsstuff/giovanni/storage/2018-11-09/blob/containers" +) + +const ( + // This will be used as directory name, the odd looking colon is simply to + // reduce the chance of name conflicts with existing objects. + keyEnvPrefix = "env:" +) + +func (b *Backend) Workspaces() ([]string, error) { + prefix := b.keyName + keyEnvPrefix + params := containers.ListBlobsInput{ + Prefix: &prefix, + } + + ctx := context.TODO() + client, err := b.armClient.getContainersClient(ctx) + if err != nil { + return nil, err + } + resp, err := client.ListBlobs(ctx, b.armClient.storageAccountName, b.containerName, params) + if err != nil { + return nil, err + } + + envs := map[string]struct{}{} + for _, obj := range resp.Blobs.Blobs { + key := obj.Name + if strings.HasPrefix(key, prefix) { + name := strings.TrimPrefix(key, prefix) + // we store the state in a key, not a directory + if strings.Contains(name, "/") { + continue + } + + envs[name] = struct{}{} + } + } + + result := []string{backend.DefaultStateName} + for name := range envs { + result = append(result, name) + } + sort.Strings(result[1:]) + return result, nil +} + +func (b *Backend) DeleteWorkspace(name string, _ bool) error { + if name == backend.DefaultStateName || name == "" { + return fmt.Errorf("can't delete default state") + } + + ctx := context.TODO() + client, err := b.armClient.getBlobClient(ctx) + if err != nil { + return err + } + + if resp, err := client.Delete(ctx, b.armClient.storageAccountName, b.containerName, b.path(name), blobs.DeleteInput{}); err != nil { + if resp.Response.StatusCode != 404 { + return err + } + } + + return nil +} + +func (b *Backend) StateMgr(name string) (statemgr.Full, error) { + ctx := context.TODO() + blobClient, err := b.armClient.getBlobClient(ctx) + if err != nil { + return nil, err + } + + client := &RemoteClient{ + giovanniBlobClient: *blobClient, + containerName: b.containerName, + keyName: b.path(name), + accountName: b.accountName, + snapshot: b.snapshot, + } + + stateMgr := remote.NewState(client, b.encryption) + + // Grab the value + if err := stateMgr.RefreshState(); err != nil { + return nil, err + } + //if this isn't the default state name, we need to create the object so + //it's listed by States. + if v := stateMgr.State(); v == nil { + // take a lock on this state while we write it + lockInfo := statemgr.NewLockInfo() + lockInfo.Operation = "init" + lockId, err := client.Lock(lockInfo) + if err != nil { + return nil, fmt.Errorf("failed to lock azure state: %w", err) + } + + // Local helper function so we can call it multiple places + lockUnlock := func(parent error) error { + if err := stateMgr.Unlock(lockId); err != nil { + return fmt.Errorf(strings.TrimSpace(errStateUnlock), lockId, err) + } + return parent + } + + // Grab the value + if err := stateMgr.RefreshState(); err != nil { + err = lockUnlock(err) + return nil, err + } + //if this isn't the default state name, we need to create the object so + //it's listed by States. + if v := stateMgr.State(); v == nil { + // If we have no state, we have to create an empty state + if err := stateMgr.WriteState(states.NewState()); err != nil { + err = lockUnlock(err) + return nil, err + } + if err := stateMgr.PersistState(nil); err != nil { + err = lockUnlock(err) + return nil, err + } + + // Unlock, the state should now be initialized + if err := lockUnlock(nil); err != nil { + return nil, err + } + } + } + + return stateMgr, nil +} + +func (b *Backend) client() *RemoteClient { + return &RemoteClient{} +} + +func (b *Backend) path(name string) string { + if name == backend.DefaultStateName { + return b.keyName + } + + return b.keyName + keyEnvPrefix + name +} + +const errStateUnlock = ` +Error unlocking Azure state. Lock ID: %s + +Error: %w + +You may have to force-unlock this state in order to use it again. +` diff --git a/pkg/backend/remote-state/azure/backend_test.go b/pkg/backend/remote-state/azure/backend_test.go new file mode 100644 index 00000000000..a125a0ee2d2 --- /dev/null +++ b/pkg/backend/remote-state/azure/backend_test.go @@ -0,0 +1,344 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package azure + +import ( + "context" + "os" + "testing" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/legacy/helper/acctest" +) + +func TestBackend_impl(t *testing.T) { + var _ backend.Backend = new(Backend) +} + +func TestBackendConfig(t *testing.T) { + // This test just instantiates the client. Shouldn't make any actual + // requests nor incur any costs. + + config := map[string]interface{}{ + "storage_account_name": "tfaccount", + "container_name": "tfcontainer", + "key": "state", + "snapshot": false, + // Access Key must be Base64 + "access_key": "QUNDRVNTX0tFWQ0K", + } + + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(config)).(*Backend) + + if b.containerName != "tfcontainer" { + t.Fatalf("Incorrect bucketName was populated") + } + if b.keyName != "state" { + t.Fatalf("Incorrect keyName was populated") + } + if b.snapshot != false { + t.Fatalf("Incorrect snapshot was populated") + } +} + +func TestAccBackendAccessKeyBasic(t *testing.T) { + testAccAzureBackend(t) + rs := acctest.RandString(4) + res := testResourceNames(rs, "testState") + armClient := buildTestClient(t, res) + + ctx := context.TODO() + err := armClient.buildTestResources(ctx, &res) + defer armClient.destroyTestResources(ctx, res) + if err != nil { + armClient.destroyTestResources(ctx, res) + t.Fatalf("Error creating Test Resources: %q", err) + } + + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "storage_account_name": res.storageAccountName, + "container_name": res.storageContainerName, + "key": res.storageKeyName, + "access_key": res.storageAccountAccessKey, + "environment": os.Getenv("ARM_ENVIRONMENT"), + "endpoint": os.Getenv("ARM_ENDPOINT"), + })).(*Backend) + + backend.TestBackendStates(t, b) +} + +func TestAccBackendSASTokenBasic(t *testing.T) { + testAccAzureBackend(t) + rs := acctest.RandString(4) + res := testResourceNames(rs, "testState") + armClient := buildTestClient(t, res) + + ctx := context.TODO() + err := armClient.buildTestResources(ctx, &res) + defer armClient.destroyTestResources(ctx, res) + if err != nil { + t.Fatalf("Error creating Test Resources: %q", err) + } + + sasToken, err := buildSasToken(res.storageAccountName, res.storageAccountAccessKey) + if err != nil { + t.Fatalf("Error building SAS Token: %+v", err) + } + + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "storage_account_name": res.storageAccountName, + "container_name": res.storageContainerName, + "key": res.storageKeyName, + "sas_token": *sasToken, + "environment": os.Getenv("ARM_ENVIRONMENT"), + "endpoint": os.Getenv("ARM_ENDPOINT"), + })).(*Backend) + + backend.TestBackendStates(t, b) +} + +func TestAccBackendOIDCBasic(t *testing.T) { + testAccAzureBackend(t) + rs := acctest.RandString(4) + res := testResourceNames(rs, "testState") + armClient := buildTestClient(t, res) + + ctx := context.TODO() + err := armClient.buildTestResources(ctx, &res) + defer armClient.destroyTestResources(ctx, res) + if err != nil { + t.Fatalf("Error creating Test Resources: %q", err) + } + + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "storage_account_name": res.storageAccountName, + "container_name": res.storageContainerName, + "key": res.storageKeyName, + "resource_group_name": res.resourceGroup, + "use_oidc": true, + "subscription_id": os.Getenv("ARM_SUBSCRIPTION_ID"), + "tenant_id": os.Getenv("ARM_TENANT_ID"), + "environment": os.Getenv("ARM_ENVIRONMENT"), + "endpoint": os.Getenv("ARM_ENDPOINT"), + })).(*Backend) + + backend.TestBackendStates(t, b) +} + +func TestAccBackendManagedServiceIdentityBasic(t *testing.T) { + testAccAzureBackendRunningInAzure(t) + rs := acctest.RandString(4) + res := testResourceNames(rs, "testState") + armClient := buildTestClient(t, res) + + ctx := context.TODO() + err := armClient.buildTestResources(ctx, &res) + defer armClient.destroyTestResources(ctx, res) + if err != nil { + t.Fatalf("Error creating Test Resources: %q", err) + } + + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "storage_account_name": res.storageAccountName, + "container_name": res.storageContainerName, + "key": res.storageKeyName, + "resource_group_name": res.resourceGroup, + "use_msi": true, + "subscription_id": os.Getenv("ARM_SUBSCRIPTION_ID"), + "tenant_id": os.Getenv("ARM_TENANT_ID"), + "environment": os.Getenv("ARM_ENVIRONMENT"), + "endpoint": os.Getenv("ARM_ENDPOINT"), + })).(*Backend) + + backend.TestBackendStates(t, b) +} + +func TestAccBackendServicePrincipalClientCertificateBasic(t *testing.T) { + testAccAzureBackend(t) + + clientCertPassword := os.Getenv("ARM_CLIENT_CERTIFICATE_PASSWORD") + clientCertPath := os.Getenv("ARM_CLIENT_CERTIFICATE_PATH") + if clientCertPath == "" { + t.Skip("Skipping since `ARM_CLIENT_CERTIFICATE_PATH` is not specified!") + } + + rs := acctest.RandString(4) + res := testResourceNames(rs, "testState") + armClient := buildTestClient(t, res) + + ctx := context.TODO() + err := armClient.buildTestResources(ctx, &res) + defer armClient.destroyTestResources(ctx, res) + if err != nil { + t.Fatalf("Error creating Test Resources: %q", err) + } + + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "storage_account_name": res.storageAccountName, + "container_name": res.storageContainerName, + "key": res.storageKeyName, + "resource_group_name": res.resourceGroup, + "subscription_id": os.Getenv("ARM_SUBSCRIPTION_ID"), + "tenant_id": os.Getenv("ARM_TENANT_ID"), + "client_id": os.Getenv("ARM_CLIENT_ID"), + "client_certificate_password": clientCertPassword, + "client_certificate_path": clientCertPath, + "environment": os.Getenv("ARM_ENVIRONMENT"), + "endpoint": os.Getenv("ARM_ENDPOINT"), + })).(*Backend) + + backend.TestBackendStates(t, b) +} + +func TestAccBackendServicePrincipalClientSecretBasic(t *testing.T) { + testAccAzureBackend(t) + rs := acctest.RandString(4) + res := testResourceNames(rs, "testState") + armClient := buildTestClient(t, res) + + ctx := context.TODO() + err := armClient.buildTestResources(ctx, &res) + defer armClient.destroyTestResources(ctx, res) + if err != nil { + t.Fatalf("Error creating Test Resources: %q", err) + } + + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "storage_account_name": res.storageAccountName, + "container_name": res.storageContainerName, + "key": res.storageKeyName, + "resource_group_name": res.resourceGroup, + "subscription_id": os.Getenv("ARM_SUBSCRIPTION_ID"), + "tenant_id": os.Getenv("ARM_TENANT_ID"), + "client_id": os.Getenv("ARM_CLIENT_ID"), + "client_secret": os.Getenv("ARM_CLIENT_SECRET"), + "environment": os.Getenv("ARM_ENVIRONMENT"), + "endpoint": os.Getenv("ARM_ENDPOINT"), + })).(*Backend) + + backend.TestBackendStates(t, b) +} + +func TestAccBackendServicePrincipalClientSecretCustomEndpoint(t *testing.T) { + testAccAzureBackend(t) + + // this is only applicable for Azure Stack. + endpoint := os.Getenv("ARM_ENDPOINT") + if endpoint == "" { + t.Skip("Skipping as ARM_ENDPOINT isn't configured") + } + + rs := acctest.RandString(4) + res := testResourceNames(rs, "testState") + armClient := buildTestClient(t, res) + + ctx := context.TODO() + err := armClient.buildTestResources(ctx, &res) + defer armClient.destroyTestResources(ctx, res) + if err != nil { + t.Fatalf("Error creating Test Resources: %q", err) + } + + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "storage_account_name": res.storageAccountName, + "container_name": res.storageContainerName, + "key": res.storageKeyName, + "resource_group_name": res.resourceGroup, + "subscription_id": os.Getenv("ARM_SUBSCRIPTION_ID"), + "tenant_id": os.Getenv("ARM_TENANT_ID"), + "client_id": os.Getenv("ARM_CLIENT_ID"), + "client_secret": os.Getenv("ARM_CLIENT_SECRET"), + "environment": os.Getenv("ARM_ENVIRONMENT"), + "endpoint": endpoint, + })).(*Backend) + + backend.TestBackendStates(t, b) +} + +func TestAccBackendAccessKeyLocked(t *testing.T) { + testAccAzureBackend(t) + rs := acctest.RandString(4) + res := testResourceNames(rs, "testState") + armClient := buildTestClient(t, res) + + ctx := context.TODO() + err := armClient.buildTestResources(ctx, &res) + defer armClient.destroyTestResources(ctx, res) + if err != nil { + t.Fatalf("Error creating Test Resources: %q", err) + } + + b1 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "storage_account_name": res.storageAccountName, + "container_name": res.storageContainerName, + "key": res.storageKeyName, + "access_key": res.storageAccountAccessKey, + "environment": os.Getenv("ARM_ENVIRONMENT"), + "endpoint": os.Getenv("ARM_ENDPOINT"), + })).(*Backend) + + b2 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "storage_account_name": res.storageAccountName, + "container_name": res.storageContainerName, + "key": res.storageKeyName, + "access_key": res.storageAccountAccessKey, + "environment": os.Getenv("ARM_ENVIRONMENT"), + "endpoint": os.Getenv("ARM_ENDPOINT"), + })).(*Backend) + + backend.TestBackendStateLocks(t, b1, b2) + backend.TestBackendStateForceUnlock(t, b1, b2) + + backend.TestBackendStateLocksInWS(t, b1, b2, "foo") + backend.TestBackendStateForceUnlockInWS(t, b1, b2, "foo") +} + +func TestAccBackendServicePrincipalLocked(t *testing.T) { + testAccAzureBackend(t) + rs := acctest.RandString(4) + res := testResourceNames(rs, "testState") + armClient := buildTestClient(t, res) + + ctx := context.TODO() + err := armClient.buildTestResources(ctx, &res) + defer armClient.destroyTestResources(ctx, res) + if err != nil { + t.Fatalf("Error creating Test Resources: %q", err) + } + + b1 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "storage_account_name": res.storageAccountName, + "container_name": res.storageContainerName, + "key": res.storageKeyName, + "access_key": res.storageAccountAccessKey, + "subscription_id": os.Getenv("ARM_SUBSCRIPTION_ID"), + "tenant_id": os.Getenv("ARM_TENANT_ID"), + "client_id": os.Getenv("ARM_CLIENT_ID"), + "client_secret": os.Getenv("ARM_CLIENT_SECRET"), + "environment": os.Getenv("ARM_ENVIRONMENT"), + "endpoint": os.Getenv("ARM_ENDPOINT"), + })).(*Backend) + + b2 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "storage_account_name": res.storageAccountName, + "container_name": res.storageContainerName, + "key": res.storageKeyName, + "access_key": res.storageAccountAccessKey, + "subscription_id": os.Getenv("ARM_SUBSCRIPTION_ID"), + "tenant_id": os.Getenv("ARM_TENANT_ID"), + "client_id": os.Getenv("ARM_CLIENT_ID"), + "client_secret": os.Getenv("ARM_CLIENT_SECRET"), + "environment": os.Getenv("ARM_ENVIRONMENT"), + "endpoint": os.Getenv("ARM_ENDPOINT"), + })).(*Backend) + + backend.TestBackendStateLocks(t, b1, b2) + backend.TestBackendStateForceUnlock(t, b1, b2) + + backend.TestBackendStateLocksInWS(t, b1, b2, "foo") + backend.TestBackendStateForceUnlockInWS(t, b1, b2, "foo") +} diff --git a/pkg/backend/remote-state/azure/client.go b/pkg/backend/remote-state/azure/client.go new file mode 100644 index 00000000000..cabf7769bb4 --- /dev/null +++ b/pkg/backend/remote-state/azure/client.go @@ -0,0 +1,281 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package azure + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "log" + "net/http" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-uuid" + "github.com/kubegems/opentofu/pkg/states/remote" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "github.com/tombuildsstuff/giovanni/storage/2018-11-09/blob/blobs" +) + +const ( + leaseHeader = "x-ms-lease-id" + // Must be lower case + lockInfoMetaKey = "terraformlockid" +) + +type RemoteClient struct { + giovanniBlobClient blobs.Client + accountName string + containerName string + keyName string + leaseID string + snapshot bool +} + +func (c *RemoteClient) Get() (*remote.Payload, error) { + options := blobs.GetInput{} + if c.leaseID != "" { + options.LeaseID = &c.leaseID + } + + ctx := context.TODO() + blob, err := c.giovanniBlobClient.Get(ctx, c.accountName, c.containerName, c.keyName, options) + if err != nil { + if blob.Response.IsHTTPStatus(http.StatusNotFound) { + return nil, nil + } + return nil, err + } + + payload := &remote.Payload{ + Data: blob.Contents, + } + + // If there was no data, then return nil + if len(payload.Data) == 0 { + return nil, nil + } + + return payload, nil +} + +func (c *RemoteClient) Put(data []byte) error { + getOptions := blobs.GetPropertiesInput{} + setOptions := blobs.SetPropertiesInput{} + putOptions := blobs.PutBlockBlobInput{} + + options := blobs.GetInput{} + if c.leaseID != "" { + options.LeaseID = &c.leaseID + getOptions.LeaseID = &c.leaseID + setOptions.LeaseID = &c.leaseID + putOptions.LeaseID = &c.leaseID + } + + ctx := context.TODO() + + if c.snapshot { + snapshotInput := blobs.SnapshotInput{LeaseID: options.LeaseID} + + log.Printf("[DEBUG] Snapshotting existing Blob %q (Container %q / Account %q)", c.keyName, c.containerName, c.accountName) + if _, err := c.giovanniBlobClient.Snapshot(ctx, c.accountName, c.containerName, c.keyName, snapshotInput); err != nil { + return fmt.Errorf("error snapshotting Blob %q (Container %q / Account %q): %w", c.keyName, c.containerName, c.accountName, err) + } + + log.Print("[DEBUG] Created blob snapshot") + } + + blob, err := c.giovanniBlobClient.GetProperties(ctx, c.accountName, c.containerName, c.keyName, getOptions) + if err != nil { + if blob.StatusCode != 404 { + return err + } + } + + contentType := "application/json" + putOptions.Content = &data + putOptions.ContentType = &contentType + putOptions.MetaData = blob.MetaData + _, err = c.giovanniBlobClient.PutBlockBlob(ctx, c.accountName, c.containerName, c.keyName, putOptions) + + return err +} + +func (c *RemoteClient) Delete() error { + options := blobs.DeleteInput{} + + if c.leaseID != "" { + options.LeaseID = &c.leaseID + } + + ctx := context.TODO() + resp, err := c.giovanniBlobClient.Delete(ctx, c.accountName, c.containerName, c.keyName, options) + if err != nil { + if !resp.IsHTTPStatus(http.StatusNotFound) { + return err + } + } + return nil +} + +func (c *RemoteClient) Lock(info *statemgr.LockInfo) (string, error) { + stateName := fmt.Sprintf("%s/%s", c.containerName, c.keyName) + info.Path = stateName + + if info.ID == "" { + lockID, err := uuid.GenerateUUID() + if err != nil { + return "", err + } + + info.ID = lockID + } + + getLockInfoErr := func(err error) error { + lockInfo, infoErr := c.getLockInfo() + if infoErr != nil { + err = multierror.Append(err, infoErr) + } + + return &statemgr.LockError{ + Err: err, + Info: lockInfo, + } + } + + leaseOptions := blobs.AcquireLeaseInput{ + ProposedLeaseID: &info.ID, + LeaseDuration: -1, + } + ctx := context.TODO() + + // obtain properties to see if the blob lease is already in use. If the blob doesn't exist, create it + properties, err := c.giovanniBlobClient.GetProperties(ctx, c.accountName, c.containerName, c.keyName, blobs.GetPropertiesInput{}) + if err != nil { + // error if we had issues getting the blob + if !properties.Response.IsHTTPStatus(http.StatusNotFound) { + return "", getLockInfoErr(err) + } + // if we don't find the blob, we need to build it + + contentType := "application/json" + putGOptions := blobs.PutBlockBlobInput{ + ContentType: &contentType, + } + + _, err = c.giovanniBlobClient.PutBlockBlob(ctx, c.accountName, c.containerName, c.keyName, putGOptions) + if err != nil { + return "", getLockInfoErr(err) + } + } + + // if the blob is already locked then error + if properties.LeaseStatus == blobs.Locked { + return "", getLockInfoErr(fmt.Errorf("state blob is already locked")) + } + + leaseID, err := c.giovanniBlobClient.AcquireLease(ctx, c.accountName, c.containerName, c.keyName, leaseOptions) + if err != nil { + return "", getLockInfoErr(err) + } + + info.ID = leaseID.LeaseID + c.leaseID = leaseID.LeaseID + + if err := c.writeLockInfo(info); err != nil { + return "", err + } + + return info.ID, nil +} + +func (c *RemoteClient) getLockInfo() (*statemgr.LockInfo, error) { + options := blobs.GetPropertiesInput{} + if c.leaseID != "" { + options.LeaseID = &c.leaseID + } + + ctx := context.TODO() + blob, err := c.giovanniBlobClient.GetProperties(ctx, c.accountName, c.containerName, c.keyName, options) + if err != nil { + return nil, err + } + + raw := blob.MetaData[lockInfoMetaKey] + if raw == "" { + return nil, fmt.Errorf("blob metadata %q was empty", lockInfoMetaKey) + } + + data, err := base64.StdEncoding.DecodeString(raw) + if err != nil { + return nil, err + } + + lockInfo := &statemgr.LockInfo{} + err = json.Unmarshal(data, lockInfo) + if err != nil { + return nil, err + } + + return lockInfo, nil +} + +// writes info to blob meta data, deletes metadata entry if info is nil +func (c *RemoteClient) writeLockInfo(info *statemgr.LockInfo) error { + ctx := context.TODO() + blob, err := c.giovanniBlobClient.GetProperties(ctx, c.accountName, c.containerName, c.keyName, blobs.GetPropertiesInput{LeaseID: &c.leaseID}) + if err != nil { + return err + } + + if info == nil { + delete(blob.MetaData, lockInfoMetaKey) + } else { + value := base64.StdEncoding.EncodeToString(info.Marshal()) + blob.MetaData[lockInfoMetaKey] = value + } + + opts := blobs.SetMetaDataInput{ + LeaseID: &c.leaseID, + MetaData: blob.MetaData, + } + + _, err = c.giovanniBlobClient.SetMetaData(ctx, c.accountName, c.containerName, c.keyName, opts) + return err +} + +func (c *RemoteClient) Unlock(id string) error { + lockErr := &statemgr.LockError{} + + lockInfo, err := c.getLockInfo() + if err != nil { + lockErr.Err = fmt.Errorf("failed to retrieve lock info: %w", err) + return lockErr + } + lockErr.Info = lockInfo + + if lockInfo.ID != id { + lockErr.Err = fmt.Errorf("lock id %q does not match existing lock", id) + return lockErr + } + + c.leaseID = lockInfo.ID + if err := c.writeLockInfo(nil); err != nil { + lockErr.Err = fmt.Errorf("failed to delete lock info from metadata: %w", err) + return lockErr + } + + ctx := context.TODO() + _, err = c.giovanniBlobClient.ReleaseLease(ctx, c.accountName, c.containerName, c.keyName, id) + if err != nil { + lockErr.Err = err + return lockErr + } + + c.leaseID = "" + + return nil +} diff --git a/pkg/backend/remote-state/azure/client_test.go b/pkg/backend/remote-state/azure/client_test.go new file mode 100644 index 00000000000..4c3812810a1 --- /dev/null +++ b/pkg/backend/remote-state/azure/client_test.go @@ -0,0 +1,317 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package azure + +import ( + "context" + "os" + "testing" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/legacy/helper/acctest" + "github.com/kubegems/opentofu/pkg/states/remote" + "github.com/tombuildsstuff/giovanni/storage/2018-11-09/blob/blobs" +) + +func TestRemoteClient_impl(t *testing.T) { + var _ remote.Client = new(RemoteClient) + var _ remote.ClientLocker = new(RemoteClient) +} + +func TestRemoteClientAccessKeyBasic(t *testing.T) { + testAccAzureBackend(t) + rs := acctest.RandString(4) + res := testResourceNames(rs, "testState") + armClient := buildTestClient(t, res) + + ctx := context.TODO() + err := armClient.buildTestResources(ctx, &res) + defer armClient.destroyTestResources(ctx, res) + if err != nil { + t.Fatalf("Error creating Test Resources: %q", err) + } + + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "storage_account_name": res.storageAccountName, + "container_name": res.storageContainerName, + "key": res.storageKeyName, + "access_key": res.storageAccountAccessKey, + "environment": os.Getenv("ARM_ENVIRONMENT"), + "endpoint": os.Getenv("ARM_ENDPOINT"), + })).(*Backend) + + state, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + remote.TestClient(t, state.(*remote.State).Client) +} + +func TestRemoteClientManagedServiceIdentityBasic(t *testing.T) { + testAccAzureBackendRunningInAzure(t) + rs := acctest.RandString(4) + res := testResourceNames(rs, "testState") + armClient := buildTestClient(t, res) + + ctx := context.TODO() + err := armClient.buildTestResources(ctx, &res) + defer armClient.destroyTestResources(ctx, res) + if err != nil { + t.Fatalf("Error creating Test Resources: %q", err) + } + + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "storage_account_name": res.storageAccountName, + "container_name": res.storageContainerName, + "key": res.storageKeyName, + "resource_group_name": res.resourceGroup, + "use_msi": true, + "subscription_id": os.Getenv("ARM_SUBSCRIPTION_ID"), + "tenant_id": os.Getenv("ARM_TENANT_ID"), + "environment": os.Getenv("ARM_ENVIRONMENT"), + "endpoint": os.Getenv("ARM_ENDPOINT"), + })).(*Backend) + + state, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + remote.TestClient(t, state.(*remote.State).Client) +} + +func TestRemoteClientSasTokenBasic(t *testing.T) { + testAccAzureBackend(t) + rs := acctest.RandString(4) + res := testResourceNames(rs, "testState") + armClient := buildTestClient(t, res) + + ctx := context.TODO() + err := armClient.buildTestResources(ctx, &res) + defer armClient.destroyTestResources(ctx, res) + if err != nil { + t.Fatalf("Error creating Test Resources: %q", err) + } + + sasToken, err := buildSasToken(res.storageAccountName, res.storageAccountAccessKey) + if err != nil { + t.Fatalf("Error building SAS Token: %+v", err) + } + + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "storage_account_name": res.storageAccountName, + "container_name": res.storageContainerName, + "key": res.storageKeyName, + "sas_token": *sasToken, + "environment": os.Getenv("ARM_ENVIRONMENT"), + "endpoint": os.Getenv("ARM_ENDPOINT"), + })).(*Backend) + + state, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + remote.TestClient(t, state.(*remote.State).Client) +} + +func TestRemoteClientServicePrincipalBasic(t *testing.T) { + testAccAzureBackend(t) + rs := acctest.RandString(4) + res := testResourceNames(rs, "testState") + armClient := buildTestClient(t, res) + + ctx := context.TODO() + err := armClient.buildTestResources(ctx, &res) + defer armClient.destroyTestResources(ctx, res) + if err != nil { + t.Fatalf("Error creating Test Resources: %q", err) + } + + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "storage_account_name": res.storageAccountName, + "container_name": res.storageContainerName, + "key": res.storageKeyName, + "resource_group_name": res.resourceGroup, + "subscription_id": os.Getenv("ARM_SUBSCRIPTION_ID"), + "tenant_id": os.Getenv("ARM_TENANT_ID"), + "client_id": os.Getenv("ARM_CLIENT_ID"), + "client_secret": os.Getenv("ARM_CLIENT_SECRET"), + "environment": os.Getenv("ARM_ENVIRONMENT"), + "endpoint": os.Getenv("ARM_ENDPOINT"), + })).(*Backend) + + state, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + remote.TestClient(t, state.(*remote.State).Client) +} + +func TestRemoteClientAccessKeyLocks(t *testing.T) { + testAccAzureBackend(t) + rs := acctest.RandString(4) + res := testResourceNames(rs, "testState") + armClient := buildTestClient(t, res) + + ctx := context.TODO() + err := armClient.buildTestResources(ctx, &res) + defer armClient.destroyTestResources(ctx, res) + if err != nil { + t.Fatalf("Error creating Test Resources: %q", err) + } + + b1 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "storage_account_name": res.storageAccountName, + "container_name": res.storageContainerName, + "key": res.storageKeyName, + "access_key": res.storageAccountAccessKey, + "environment": os.Getenv("ARM_ENVIRONMENT"), + "endpoint": os.Getenv("ARM_ENDPOINT"), + })).(*Backend) + + b2 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "storage_account_name": res.storageAccountName, + "container_name": res.storageContainerName, + "key": res.storageKeyName, + "access_key": res.storageAccountAccessKey, + "environment": os.Getenv("ARM_ENVIRONMENT"), + "endpoint": os.Getenv("ARM_ENDPOINT"), + })).(*Backend) + + s1, err := b1.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + s2, err := b2.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + remote.TestRemoteLocks(t, s1.(*remote.State).Client, s2.(*remote.State).Client) +} + +func TestRemoteClientServicePrincipalLocks(t *testing.T) { + testAccAzureBackend(t) + rs := acctest.RandString(4) + res := testResourceNames(rs, "testState") + armClient := buildTestClient(t, res) + + ctx := context.TODO() + err := armClient.buildTestResources(ctx, &res) + defer armClient.destroyTestResources(ctx, res) + if err != nil { + t.Fatalf("Error creating Test Resources: %q", err) + } + + b1 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "storage_account_name": res.storageAccountName, + "container_name": res.storageContainerName, + "key": res.storageKeyName, + "resource_group_name": res.resourceGroup, + "subscription_id": os.Getenv("ARM_SUBSCRIPTION_ID"), + "tenant_id": os.Getenv("ARM_TENANT_ID"), + "client_id": os.Getenv("ARM_CLIENT_ID"), + "client_secret": os.Getenv("ARM_CLIENT_SECRET"), + "environment": os.Getenv("ARM_ENVIRONMENT"), + "endpoint": os.Getenv("ARM_ENDPOINT"), + })).(*Backend) + + b2 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "storage_account_name": res.storageAccountName, + "container_name": res.storageContainerName, + "key": res.storageKeyName, + "resource_group_name": res.resourceGroup, + "subscription_id": os.Getenv("ARM_SUBSCRIPTION_ID"), + "tenant_id": os.Getenv("ARM_TENANT_ID"), + "client_id": os.Getenv("ARM_CLIENT_ID"), + "client_secret": os.Getenv("ARM_CLIENT_SECRET"), + "environment": os.Getenv("ARM_ENVIRONMENT"), + "endpoint": os.Getenv("ARM_ENDPOINT"), + })).(*Backend) + + s1, err := b1.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + s2, err := b2.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + remote.TestRemoteLocks(t, s1.(*remote.State).Client, s2.(*remote.State).Client) +} + +func TestPutMaintainsMetaData(t *testing.T) { + testAccAzureBackend(t) + rs := acctest.RandString(4) + res := testResourceNames(rs, "testState") + armClient := buildTestClient(t, res) + + ctx := context.TODO() + err := armClient.buildTestResources(ctx, &res) + defer armClient.destroyTestResources(ctx, res) + if err != nil { + t.Fatalf("Error creating Test Resources: %q", err) + } + + headerName := "acceptancetest" + expectedValue := "f3b56bad-33ad-4b93-a600-7a66e9cbd1eb" + + client, err := armClient.getBlobClient(ctx) + if err != nil { + t.Fatalf("Error building Blob Client: %+v", err) + } + + _, err = client.PutBlockBlob(ctx, res.storageAccountName, res.storageContainerName, res.storageKeyName, blobs.PutBlockBlobInput{}) + if err != nil { + t.Fatalf("Error Creating Block Blob: %+v", err) + } + + blobReference, err := client.GetProperties(ctx, res.storageAccountName, res.storageContainerName, res.storageKeyName, blobs.GetPropertiesInput{}) + if err != nil { + t.Fatalf("Error loading MetaData: %+v", err) + } + + blobReference.MetaData[headerName] = expectedValue + opts := blobs.SetMetaDataInput{ + MetaData: blobReference.MetaData, + } + _, err = client.SetMetaData(ctx, res.storageAccountName, res.storageContainerName, res.storageKeyName, opts) + if err != nil { + t.Fatalf("Error setting MetaData: %+v", err) + } + + // update the metadata using the Backend + remoteClient := RemoteClient{ + keyName: res.storageKeyName, + containerName: res.storageContainerName, + accountName: res.storageAccountName, + + giovanniBlobClient: *client, + } + + bytes := []byte(acctest.RandString(20)) + err = remoteClient.Put(bytes) + if err != nil { + t.Fatalf("Error putting data: %+v", err) + } + + // Verify it still exists + blobReference, err = client.GetProperties(ctx, res.storageAccountName, res.storageContainerName, res.storageKeyName, blobs.GetPropertiesInput{}) + if err != nil { + t.Fatalf("Error loading MetaData: %+v", err) + } + + if blobReference.MetaData[headerName] != expectedValue { + t.Fatalf("%q was not set to %q in the MetaData: %+v", headerName, expectedValue, blobReference.MetaData) + } +} diff --git a/pkg/backend/remote-state/azure/helpers_test.go b/pkg/backend/remote-state/azure/helpers_test.go new file mode 100644 index 00000000000..274c1cd6f07 --- /dev/null +++ b/pkg/backend/remote-state/azure/helpers_test.go @@ -0,0 +1,233 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package azure + +import ( + "context" + "fmt" + "log" + "os" + "strings" + "testing" + "time" + + "github.com/Azure/azure-sdk-for-go/profiles/2017-03-09/resources/mgmt/resources" + armStorage "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-01-01/storage" + "github.com/Azure/go-autorest/autorest" + sasStorage "github.com/hashicorp/go-azure-helpers/storage" + "github.com/tombuildsstuff/giovanni/storage/2018-11-09/blob/containers" +) + +const ( + // required for Azure Stack + sasSignedVersion = "2015-04-05" +) + +// verify that we are doing ACC tests or the Azure tests specifically +func testAccAzureBackend(t *testing.T) { + skip := os.Getenv("TF_ACC") == "" && os.Getenv("TF_AZURE_TEST") == "" + if skip { + t.Log("azure backend tests require setting TF_ACC or TF_AZURE_TEST") + t.Skip() + } +} + +// these kind of tests can only run when within Azure (e.g. MSI) +func testAccAzureBackendRunningInAzure(t *testing.T) { + testAccAzureBackend(t) + + if os.Getenv("TF_RUNNING_IN_AZURE") == "" { + t.Skip("Skipping test since not running in Azure") + } +} + +// these kind of tests can only run when within GitHub Actions (e.g. OIDC) +func testAccAzureBackendRunningInGitHubActions(t *testing.T) { + testAccAzureBackend(t) + + if os.Getenv("TF_RUNNING_IN_GITHUB_ACTIONS") == "" { + t.Skip("Skipping test since not running in GitHub Actions") + } +} + +func buildTestClient(t *testing.T, res resourceNames) *ArmClient { + subscriptionID := os.Getenv("ARM_SUBSCRIPTION_ID") + tenantID := os.Getenv("ARM_TENANT_ID") + clientID := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + msiEnabled := strings.EqualFold(os.Getenv("ARM_USE_MSI"), "true") + environment := os.Getenv("ARM_ENVIRONMENT") + + hasCredentials := (clientID != "" && clientSecret != "") || msiEnabled + if !hasCredentials { + t.Fatal("Azure credentials missing or incomplete") + } + + if subscriptionID == "" { + t.Fatalf("Missing ARM_SUBSCRIPTION_ID") + } + + if tenantID == "" { + t.Fatalf("Missing ARM_TENANT_ID") + } + + if environment == "" { + t.Fatalf("Missing ARM_ENVIRONMENT") + } + + // location isn't used in this method, but is in the other test methods + location := os.Getenv("ARM_LOCATION") + if location == "" { + t.Fatalf("Missing ARM_LOCATION") + } + + // Endpoint is optional (only for Stack) + endpoint := os.Getenv("ARM_ENDPOINT") + + armClient, err := buildArmClient(context.TODO(), BackendConfig{ + SubscriptionID: subscriptionID, + TenantID: tenantID, + ClientID: clientID, + ClientSecret: clientSecret, + CustomResourceManagerEndpoint: endpoint, + Environment: environment, + ResourceGroupName: res.resourceGroup, + StorageAccountName: res.storageAccountName, + UseMsi: msiEnabled, + UseAzureADAuthentication: res.useAzureADAuth, + }) + if err != nil { + t.Fatalf("Failed to build ArmClient: %+v", err) + } + + return armClient +} + +func buildSasToken(accountName, accessKey string) (*string, error) { + // grant full access to Objects in the Blob Storage Account + permissions := "rwdlacup" // full control + resourceTypes := "sco" // service, container, object + services := "b" // blob + + // Details on how to do this are here: + // https://docs.microsoft.com/en-us/rest/api/storageservices/Constructing-an-Account-SAS + signedProtocol := "https,http" + signedIp := "" + signedVersion := sasSignedVersion + + utcNow := time.Now().UTC() + + // account for servers being up to 5 minutes out + startDate := utcNow.Add(time.Minute * -5).Format(time.RFC3339) + endDate := utcNow.Add(time.Hour * 24).Format(time.RFC3339) + + sasToken, err := sasStorage.ComputeAccountSASToken(accountName, accessKey, permissions, services, resourceTypes, + startDate, endDate, signedProtocol, signedIp, signedVersion) + if err != nil { + return nil, fmt.Errorf("Error computing SAS Token: %w", err) + } + log.Printf("SAS Token should be %q", sasToken) + return &sasToken, nil +} + +type resourceNames struct { + resourceGroup string + location string + storageAccountName string + storageContainerName string + storageKeyName string + storageAccountAccessKey string + useAzureADAuth bool +} + +func testResourceNames(rString string, keyName string) resourceNames { + return resourceNames{ + resourceGroup: fmt.Sprintf("acctestRG-backend-%s-%s", strings.Replace(time.Now().Local().Format("060102150405.00"), ".", "", 1), rString), + location: os.Getenv("ARM_LOCATION"), + storageAccountName: fmt.Sprintf("acctestsa%s", rString), + storageContainerName: "acctestcont", + storageKeyName: keyName, + useAzureADAuth: false, + } +} + +func (c *ArmClient) buildTestResources(ctx context.Context, names *resourceNames) error { + log.Printf("Creating Resource Group %q", names.resourceGroup) + _, err := c.groupsClient.CreateOrUpdate(ctx, names.resourceGroup, resources.Group{Location: &names.location}) + if err != nil { + return fmt.Errorf("failed to create test resource group: %w", err) + } + + log.Printf("Creating Storage Account %q in Resource Group %q", names.storageAccountName, names.resourceGroup) + storageProps := armStorage.AccountCreateParameters{ + Sku: &armStorage.Sku{ + Name: armStorage.StandardLRS, + Tier: armStorage.Standard, + }, + Location: &names.location, + } + if names.useAzureADAuth { + allowSharedKeyAccess := false + storageProps.AccountPropertiesCreateParameters = &armStorage.AccountPropertiesCreateParameters{ + AllowSharedKeyAccess: &allowSharedKeyAccess, + } + } + future, err := c.storageAccountsClient.Create(ctx, names.resourceGroup, names.storageAccountName, storageProps) + if err != nil { + return fmt.Errorf("failed to create test storage account: %w", err) + } + + err = future.WaitForCompletionRef(ctx, c.storageAccountsClient.Client) + if err != nil { + return fmt.Errorf("failed waiting for the creation of storage account: %w", err) + } + + containersClient := containers.NewWithEnvironment(c.environment) + if names.useAzureADAuth { + containersClient.Client.Authorizer = *c.azureAdStorageAuth + } else { + log.Printf("fetching access key for storage account") + resp, err := c.storageAccountsClient.ListKeys(ctx, names.resourceGroup, names.storageAccountName, "") + if err != nil { + return fmt.Errorf("failed to list storage account keys %w:", err) + } + + keys := *resp.Keys + accessKey := *keys[0].Value + names.storageAccountAccessKey = accessKey + + storageAuth, err := autorest.NewSharedKeyAuthorizer(names.storageAccountName, accessKey, autorest.SharedKey) + if err != nil { + return fmt.Errorf("Error building Authorizer: %w", err) + } + + containersClient.Client.Authorizer = storageAuth + } + + log.Printf("Creating Container %q in Storage Account %q (Resource Group %q)", names.storageContainerName, names.storageAccountName, names.resourceGroup) + _, err = containersClient.Create(ctx, names.storageAccountName, names.storageContainerName, containers.CreateInput{}) + if err != nil { + return fmt.Errorf("failed to create storage container: %w", err) + } + + return nil +} + +func (c ArmClient) destroyTestResources(ctx context.Context, resources resourceNames) error { + log.Printf("[DEBUG] Deleting Resource Group %q..", resources.resourceGroup) + future, err := c.groupsClient.Delete(ctx, resources.resourceGroup) + if err != nil { + return fmt.Errorf("Error deleting Resource Group: %w", err) + } + + log.Printf("[DEBUG] Waiting for deletion of Resource Group %q..", resources.resourceGroup) + err = future.WaitForCompletionRef(ctx, c.groupsClient.Client) + if err != nil { + return fmt.Errorf("Error waiting for the deletion of Resource Group: %w", err) + } + + return nil +} diff --git a/pkg/backend/remote-state/azure/sender.go b/pkg/backend/remote-state/azure/sender.go new file mode 100644 index 00000000000..2cb081556ac --- /dev/null +++ b/pkg/backend/remote-state/azure/sender.go @@ -0,0 +1,69 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package azure + +import ( + "log" + "net/http" + "net/http/httputil" + + "github.com/Azure/go-autorest/autorest" + "github.com/kubegems/opentofu/pkg/logging" +) + +func buildSender() autorest.Sender { + return autorest.DecorateSender(&http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + }, + }, withRequestLogging()) +} + +func withRequestLogging() autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + // only log if logging's enabled + logLevel := logging.CurrentLogLevel() + if logLevel == "" { + return s.Do(r) + } + + // strip the authorization header prior to printing + authHeaderName := "Authorization" + auth := r.Header.Get(authHeaderName) + if auth != "" { + r.Header.Del(authHeaderName) + } + + // dump request to wire format + if dump, err := httputil.DumpRequestOut(r, true); err == nil { + log.Printf("[DEBUG] Azure Backend Request: \n%s\n", dump) + } else { + // fallback to basic message + log.Printf("[DEBUG] Azure Backend Request: %s to %s\n", r.Method, r.URL) + } + + // add the auth header back + if auth != "" { + r.Header.Add(authHeaderName, auth) + } + + resp, err := s.Do(r) + if resp != nil { + // dump response to wire format + if dump, err2 := httputil.DumpResponse(resp, true); err2 == nil { + log.Printf("[DEBUG] Azure Backend Response for %s: \n%s\n", r.URL, dump) + } else { + // fallback to basic message + log.Printf("[DEBUG] Azure Backend Response: %s for %s\n", resp.Status, r.URL) + } + } else { + log.Printf("[DEBUG] Request to %s completed with no response", r.URL) + } + return resp, err + }) + } +} diff --git a/pkg/backend/remote-state/consul/Dockerfile b/pkg/backend/remote-state/consul/Dockerfile new file mode 100644 index 00000000000..95809f3ed5c --- /dev/null +++ b/pkg/backend/remote-state/consul/Dockerfile @@ -0,0 +1,32 @@ +# Copyright (c) The OpenTofu Authors +# SPDX-License-Identifier: MPL-2.0 +# Copyright (c) 2023 HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +FROM consul:1.15 + +# install dependencies to install Go +RUN apk add -q curl tar + +# download Go +ARG GO_VERSION +RUN if [ "$(uname -m)"=="aarch64" ]; then curl -SLo /tmp/go.tar.gz https://go.dev/dl/go${GO_VERSION}.linux-arm64.tar.gz;\ + elif [ "$(uname -m)"=="arm64" ]; then curl -SLo /tmp/go.tar.gz https://go.dev/dl/go${GO_VERSION}.linux-arm64.tar.gz;\ + elif [ "$(uname -m)"=="arm" ]; then curl -SLo /tmp/go.tar.gz https://go.dev/dl/go${GO_VERSION}.linux-armv6l.tar.gz;\ + else curl -SLo /tmp/go.tar.gz https://go.dev/dl/go${GO_VERSION}.linux-386.tar.gz; \ + fi + +# install Go +RUN cd tmp && \ + tar -xzf go.tar.gz && \ + ln -s /tmp/go/bin/go /usr/local/bin/go && \ + rm go.tar.gz + +# cleanup installation dependencies +RUN apk del -q curl tar + +VOLUME "/app" +WORKDIR "/app" + +ENV TF_CONSUL_TEST=1 +ENTRYPOINT [ "go" ] diff --git a/pkg/backend/remote-state/consul/backend.go b/pkg/backend/remote-state/consul/backend.go new file mode 100644 index 00000000000..0edfc7e27b2 --- /dev/null +++ b/pkg/backend/remote-state/consul/backend.go @@ -0,0 +1,187 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package consul + +import ( + "context" + "net" + "strings" + "time" + + consulapi "github.com/hashicorp/consul/api" + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/legacy/helper/schema" +) + +// New creates a new backend for Consul remote state. +func New(enc encryption.StateEncryption) backend.Backend { + s := &schema.Backend{ + Schema: map[string]*schema.Schema{ + "path": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "Path to store state in Consul", + }, + + "access_token": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Access token for a Consul ACL", + Default: "", // To prevent input + }, + + "address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Address to the Consul Cluster", + Default: "", // To prevent input + }, + + "scheme": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Scheme to communicate to Consul with", + Default: "", // To prevent input + }, + + "datacenter": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Datacenter to communicate with", + Default: "", // To prevent input + }, + + "http_auth": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "HTTP Auth in the format of 'username:password'", + Default: "", // To prevent input + }, + + "gzip": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Compress the state data using gzip", + Default: false, + }, + + "lock": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Lock state access", + Default: true, + }, + + "ca_file": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "A path to a PEM-encoded certificate authority used to verify the remote agent's certificate.", + DefaultFunc: schema.EnvDefaultFunc("CONSUL_CACERT", ""), + }, + + "cert_file": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "A path to a PEM-encoded certificate provided to the remote agent; requires use of key_file.", + DefaultFunc: schema.EnvDefaultFunc("CONSUL_CLIENT_CERT", ""), + }, + + "key_file": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "A path to a PEM-encoded private key, required if cert_file is specified.", + DefaultFunc: schema.EnvDefaultFunc("CONSUL_CLIENT_KEY", ""), + }, + }, + } + + result := &Backend{Backend: s, encryption: enc} + result.Backend.ConfigureFunc = result.configure + return result +} + +type Backend struct { + *schema.Backend + encryption encryption.StateEncryption + + // The fields below are set from configure + client *consulapi.Client + configData *schema.ResourceData + lock bool +} + +func (b *Backend) configure(ctx context.Context) error { + // Grab the resource data + b.configData = schema.FromContextBackendConfig(ctx) + + // Store the lock information + b.lock = b.configData.Get("lock").(bool) + + data := b.configData + + // Configure the client + config := consulapi.DefaultConfig() + + // replace the default Transport Dialer to reduce the KeepAlive + config.Transport.DialContext = dialContext + + if v, ok := data.GetOk("access_token"); ok && v.(string) != "" { + config.Token = v.(string) + } + if v, ok := data.GetOk("address"); ok && v.(string) != "" { + config.Address = v.(string) + } + if v, ok := data.GetOk("scheme"); ok && v.(string) != "" { + config.Scheme = v.(string) + } + if v, ok := data.GetOk("datacenter"); ok && v.(string) != "" { + config.Datacenter = v.(string) + } + + if v, ok := data.GetOk("ca_file"); ok && v.(string) != "" { + config.TLSConfig.CAFile = v.(string) + } + if v, ok := data.GetOk("cert_file"); ok && v.(string) != "" { + config.TLSConfig.CertFile = v.(string) + } + if v, ok := data.GetOk("key_file"); ok && v.(string) != "" { + config.TLSConfig.KeyFile = v.(string) + } + + if v, ok := data.GetOk("http_auth"); ok && v.(string) != "" { + auth := v.(string) + + var username, password string + if strings.Contains(auth, ":") { + split := strings.SplitN(auth, ":", 2) + username = split[0] + password = split[1] + } else { + username = auth + } + + config.HttpAuth = &consulapi.HttpBasicAuth{ + Username: username, + Password: password, + } + } + + client, err := consulapi.NewClient(config) + if err != nil { + return err + } + + b.client = client + return nil +} + +// dialContext is the DialContext function for the consul client transport. +// This is stored in a package var to inject a different dialer for tests. +var dialContext = (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 17 * time.Second, +}).DialContext diff --git a/pkg/backend/remote-state/consul/backend_state.go b/pkg/backend/remote-state/consul/backend_state.go new file mode 100644 index 00000000000..5dca9984d97 --- /dev/null +++ b/pkg/backend/remote-state/consul/backend_state.go @@ -0,0 +1,160 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package consul + +import ( + "fmt" + "strings" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/remote" + "github.com/kubegems/opentofu/pkg/states/statemgr" +) + +const ( + keyEnvPrefix = "-env:" +) + +func (b *Backend) Workspaces() ([]string, error) { + // List our raw path + prefix := b.configData.Get("path").(string) + keyEnvPrefix + keys, _, err := b.client.KV().Keys(prefix, "/", nil) + if err != nil { + return nil, err + } + + // Find the envs, we use a map since we can get duplicates with + // path suffixes. + envs := map[string]struct{}{} + for _, key := range keys { + // Consul should ensure this but it doesn't hurt to check again + if strings.HasPrefix(key, prefix) { + key = strings.TrimPrefix(key, prefix) + + // Ignore anything with a "/" in it since we store the state + // directly in a key not a directory. + if idx := strings.IndexRune(key, '/'); idx >= 0 { + continue + } + + envs[key] = struct{}{} + } + } + + result := make([]string, 1, len(envs)+1) + result[0] = backend.DefaultStateName + for k, _ := range envs { + result = append(result, k) + } + + return result, nil +} + +func (b *Backend) DeleteWorkspace(name string, _ bool) error { + if name == backend.DefaultStateName || name == "" { + return fmt.Errorf("can't delete default state") + } + + // Determine the path of the data + path := b.path(name) + + // Delete it. We just delete it without any locking since + // the DeleteState API is documented as such. + _, err := b.client.KV().Delete(path, nil) + return err +} + +func (b *Backend) StateMgr(name string) (statemgr.Full, error) { + // Determine the path of the data + path := b.path(name) + + // Determine whether to gzip or not + gzip := b.configData.Get("gzip").(bool) + + // Build the state client + var stateMgr = remote.NewState( + &RemoteClient{ + Client: b.client, + Path: path, + GZip: gzip, + lockState: b.lock, + }, + b.encryption, + ) + + if !b.lock { + stateMgr.DisableLocks() + } + + // the default state always exists + if name == backend.DefaultStateName { + return stateMgr, nil + } + + // Grab a lock, we use this to write an empty state if one doesn't + // exist already. We have to write an empty state as a sentinel value + // so States() knows it exists. + lockInfo := statemgr.NewLockInfo() + lockInfo.Operation = "init" + lockId, err := stateMgr.Lock(lockInfo) + if err != nil { + return nil, fmt.Errorf("failed to lock state in Consul: %w", err) + } + + // Local helper function so we can call it multiple places + lockUnlock := func(parent error) error { + if err := stateMgr.Unlock(lockId); err != nil { + return fmt.Errorf(strings.TrimSpace(errStateUnlock), lockId, err) + } + + return parent + } + + // Grab the value + if err := stateMgr.RefreshState(); err != nil { + err = lockUnlock(err) + return nil, err + } + + // If we have no state, we have to create an empty state + if v := stateMgr.State(); v == nil { + if err := stateMgr.WriteState(states.NewState()); err != nil { + err = lockUnlock(err) + return nil, err + } + if err := stateMgr.PersistState(nil); err != nil { + err = lockUnlock(err) + return nil, err + } + } + + // Unlock, the state should now be initialized + if err := lockUnlock(nil); err != nil { + return nil, err + } + + return stateMgr, nil +} + +func (b *Backend) path(name string) string { + path := b.configData.Get("path").(string) + if name != backend.DefaultStateName { + path += fmt.Sprintf("%s%s", keyEnvPrefix, name) + } + + return path +} + +const errStateUnlock = ` +Error unlocking Consul state. Lock ID: %s + +Error: %w + +You may have to force-unlock this state in order to use it again. +The Consul backend acquires a lock during initialization to ensure +the minimum required key/values are prepared. +` diff --git a/pkg/backend/remote-state/consul/backend_test.go b/pkg/backend/remote-state/consul/backend_test.go new file mode 100644 index 00000000000..0aef03780ff --- /dev/null +++ b/pkg/backend/remote-state/consul/backend_test.go @@ -0,0 +1,112 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package consul + +import ( + "flag" + "fmt" + "io" + "os" + "testing" + "time" + + "github.com/hashicorp/consul/sdk/testutil" + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/encryption" +) + +func TestBackend_impl(t *testing.T) { + var _ backend.Backend = new(Backend) +} + +func newConsulTestServer(t *testing.T) *testutil.TestServer { + if os.Getenv("TF_ACC") == "" && os.Getenv("TF_CONSUL_TEST") == "" { + t.Skipf("consul server tests require setting TF_ACC or TF_CONSUL_TEST") + } + + srv, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { + c.LogLevel = "warn" + + if !flag.Parsed() { + flag.Parse() + } + + if !testing.Verbose() { + c.Stdout = io.Discard + c.Stderr = io.Discard + } + }) + + if err != nil { + t.Fatalf("failed to create consul test server: %s", err) + } + + srv.WaitForSerfCheck(t) + srv.WaitForLeader(t) + + return srv +} + +func TestBackend(t *testing.T) { + srv := newConsulTestServer(t) + defer func() { _ = srv.Stop() }() + + path := fmt.Sprintf("tf-unit/%s", time.Now().String()) + + // Get the backend. We need two to test locking. + b1 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "address": srv.HTTPAddr, + "path": path, + })) + + b2 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "address": srv.HTTPAddr, + "path": path, + })) + + // Test + backend.TestBackendStates(t, b1) + backend.TestBackendStateLocks(t, b1, b2) +} + +func TestBackend_lockDisabled(t *testing.T) { + srv := newConsulTestServer(t) + defer func() { _ = srv.Stop() }() + + path := fmt.Sprintf("tf-unit/%s", time.Now().String()) + + // Get the backend. We need two to test locking. + b1 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "address": srv.HTTPAddr, + "path": path, + "lock": false, + })) + + b2 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "address": srv.HTTPAddr, + "path": path + "different", // Diff so locking test would fail if it was locking + "lock": false, + })) + + // Test + backend.TestBackendStates(t, b1) + backend.TestBackendStateLocks(t, b1, b2) +} + +func TestBackend_gzip(t *testing.T) { + srv := newConsulTestServer(t) + defer func() { _ = srv.Stop() }() + + // Get the backend + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "address": srv.HTTPAddr, + "path": fmt.Sprintf("tf-unit/%s", time.Now().String()), + "gzip": true, + })) + + // Test + backend.TestBackendStates(t, b) +} diff --git a/pkg/backend/remote-state/consul/client.go b/pkg/backend/remote-state/consul/client.go new file mode 100644 index 00000000000..ce98055cf2d --- /dev/null +++ b/pkg/backend/remote-state/consul/client.go @@ -0,0 +1,691 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package consul + +import ( + "bytes" + "compress/gzip" + "context" + "crypto/md5" + "encoding/json" + "errors" + "fmt" + "log" + "strings" + "sync" + "time" + + consulapi "github.com/hashicorp/consul/api" + multierror "github.com/hashicorp/go-multierror" + "github.com/kubegems/opentofu/pkg/states/remote" + "github.com/kubegems/opentofu/pkg/states/statemgr" +) + +const ( + lockSuffix = "/.lock" + lockInfoSuffix = "/.lockinfo" + + // The Session TTL associated with this lock. + lockSessionTTL = "15s" + + // the delay time from when a session is lost to when the + // lock is released by the server + lockDelay = 5 * time.Second + // interval between attempts to reacquire a lost lock + lockReacquireInterval = 2 * time.Second +) + +var lostLockErr = errors.New("consul lock was lost") + +// RemoteClient is a remote client that stores data in Consul. +type RemoteClient struct { + Path string + GZip bool + + mu sync.Mutex + Client *consulapi.Client + // lockState is true if we're using locks + lockState bool + + // The index of the last state we wrote. + // If this is > 0, Put will perform a CAS to ensure that the state wasn't + // changed during the operation. This is important even with locks, because + // if the client loses the lock for some reason, then reacquires it, we + // need to make sure that the state was not modified. + modifyIndex uint64 + + consulLock *consulapi.Lock + lockCh <-chan struct{} + + info *statemgr.LockInfo + + // cancel our goroutine which is monitoring the lock to automatically + // reacquire it when possible. + monitorCancel context.CancelFunc + monitorWG sync.WaitGroup + + // sessionCancel cancels the Context use for session.RenewPeriodic, and is + // called when unlocking, or before creating a new lock if the lock is + // lost. + sessionCancel context.CancelFunc +} + +func (c *RemoteClient) Get() (*remote.Payload, error) { + c.mu.Lock() + defer c.mu.Unlock() + + kv := c.Client.KV() + + chunked, hash, chunks, pair, err := c.chunkedMode() + if err != nil { + return nil, err + } + if pair == nil { + return nil, nil + } + + c.modifyIndex = pair.ModifyIndex + + var payload []byte + if chunked { + for _, c := range chunks { + pair, _, err := kv.Get(c, nil) + if err != nil { + return nil, err + } + if pair == nil { + return nil, fmt.Errorf("Key %q could not be found", c) + } + payload = append(payload, pair.Value[:]...) + } + } else { + payload = pair.Value + } + + // If the payload starts with 0x1f, it's gzip, not json + if len(payload) >= 1 && payload[0] == '\x1f' { + payload, err = uncompressState(payload) + if err != nil { + return nil, err + } + } + + md5 := md5.Sum(payload) + + if hash != "" && fmt.Sprintf("%x", md5) != hash { + return nil, fmt.Errorf("The remote state does not match the expected hash") + } + + return &remote.Payload{ + Data: payload, + MD5: md5[:], + }, nil +} + +func (c *RemoteClient) Put(data []byte) error { + // The state can be stored in 4 different ways, based on the payload size + // and whether the user enabled gzip: + // - single entry mode with plain JSON: a single JSON is stored at + // "tfstate/my_project" + // - single entry mode gzip: the JSON payload is first gziped and stored at + // "tfstate/my_project" + // - chunked mode with plain JSON: the JSON payload is split in pieces and + // stored like so: + // - "tfstate/my_project" -> a JSON payload that contains the path of + // the chunks and an MD5 sum like so: + // { + // "current-hash": "abcdef1234", + // "chunks": [ + // "tfstate/my_project/tfstate.abcdef1234/0", + // "tfstate/my_project/tfstate.abcdef1234/1", + // "tfstate/my_project/tfstate.abcdef1234/2", + // ] + // } + // - "tfstate/my_project/tfstate.abcdef1234/0" -> The first chunk + // - "tfstate/my_project/tfstate.abcdef1234/1" -> The next one + // - ... + // - chunked mode with gzip: the same system but we gziped the JSON payload + // before splitting it in chunks + // + // When overwritting the current state, we need to clean the old chunks if + // we were in chunked mode (no matter whether we need to use chunks for the + // new one). To do so based on the 4 possibilities above we look at the + // value at "tfstate/my_project" and if it is: + // - absent then it's a new state and there will be nothing to cleanup, + // - not a JSON payload we were in single entry mode with gzip so there will + // be nothing to cleanup + // - a JSON payload, then we were either single entry mode with plain JSON + // or in chunked mode. To differentiate between the two we look whether a + // "current-hash" key is present in the payload. If we find one we were + // in chunked mode and we will need to remove the old chunks (whether or + // not we were using gzip does not matter in that case). + + c.mu.Lock() + defer c.mu.Unlock() + + kv := c.Client.KV() + + // First we determine what mode we were using and to prepare the cleanup + chunked, hash, _, _, err := c.chunkedMode() + if err != nil { + return err + } + cleanupOldChunks := func() {} + if chunked { + cleanupOldChunks = func() { + // We ignore all errors that can happen here because we already + // saved the new state and there is no way to return a warning to + // the user. We may end up with dangling chunks but there is no way + // to be sure we won't. + path := strings.TrimRight(c.Path, "/") + fmt.Sprintf("/tfstate.%s/", hash) + kv.DeleteTree(path, nil) + } + } + + payload := data + if c.GZip { + if compressedState, err := compressState(data); err == nil { + payload = compressedState + } else { + return err + } + } + + // default to doing a CAS + verb := consulapi.KVCAS + + // Assume a 0 index doesn't need a CAS for now, since we are either + // creating a new state or purposely overwriting one. + if c.modifyIndex == 0 { + verb = consulapi.KVSet + } + + // The payload may be too large to store in a single KV entry in Consul. We + // could try to determine whether it will fit or not before sending the + // request but since we are using the Transaction API and not the KV API, + // it grows by about a 1/3 when it is base64 encoded plus the overhead of + // the fields specific to the Transaction API. + // Rather than trying to calculate the overhead (which could change from + // one version of Consul to another, and between Consul Community Edition + // and Consul Enterprise), we try to send the whole state in one request, if + // it fails because it is too big we then split it in chunks and send each + // chunk separately. + // When splitting in chunks, we make each chunk 524288 bits, which is the + // default max size for raft. If the user changed it, we still may send + // chunks too big and fail but this is not a setting that should be fiddled + // with anyway. + + store := func(payload []byte) error { + // KV.Put doesn't return the new index, so we use a single operation + // transaction to get the new index with a single request. + txOps := consulapi.KVTxnOps{ + &consulapi.KVTxnOp{ + Verb: verb, + Key: c.Path, + Value: payload, + Index: c.modifyIndex, + }, + } + + ok, resp, _, err := kv.Txn(txOps, nil) + if err != nil { + return err + } + // transaction was rolled back + if !ok { + var resultErr error + for _, respError := range resp.Errors { + resultErr = multierror.Append(resultErr, errors.New(respError.What)) + } + return fmt.Errorf("consul CAS failed with transaction errors: %w", resultErr) + } + + if len(resp.Results) != 1 { + // this probably shouldn't happen + return fmt.Errorf("expected on 1 response value, got: %d", len(resp.Results)) + } + + c.modifyIndex = resp.Results[0].ModifyIndex + + // We remove all the old chunks + cleanupOldChunks() + + return nil + } + + if err = store(payload); err == nil { + // The payload was small enough to be stored + return nil + } else if !strings.Contains(err.Error(), "too large") { + // We failed for some other reason, report this to the user + return err + } + + // The payload was too large so we split it in multiple chunks + + md5 := md5.Sum(data) + chunks := split(payload, 524288) + chunkPaths := make([]string, 0) + + // First we write the new chunks + for i, p := range chunks { + path := strings.TrimRight(c.Path, "/") + fmt.Sprintf("/tfstate.%x/%d", md5, i) + chunkPaths = append(chunkPaths, path) + _, err := kv.Put(&consulapi.KVPair{ + Key: path, + Value: p, + }, nil) + + if err != nil { + return err + } + } + + // Then we update the link to point to the new chunks + payload, err = json.Marshal(map[string]interface{}{ + "current-hash": fmt.Sprintf("%x", md5), + "chunks": chunkPaths, + }) + if err != nil { + return err + } + return store(payload) +} + +func (c *RemoteClient) Delete() error { + c.mu.Lock() + defer c.mu.Unlock() + + kv := c.Client.KV() + + chunked, hash, _, _, err := c.chunkedMode() + if err != nil { + return err + } + + _, err = kv.Delete(c.Path, nil) + + // If there were chunks we need to remove them + if chunked { + path := strings.TrimRight(c.Path, "/") + fmt.Sprintf("/tfstate.%s/", hash) + kv.DeleteTree(path, nil) + } + + return err +} + +func (c *RemoteClient) lockPath() string { + // we sanitize the path for the lock as Consul does not like having + // two consecutive slashes for the lock path + return strings.TrimRight(c.Path, "/") +} + +func (c *RemoteClient) putLockInfo(info *statemgr.LockInfo) error { + info.Path = c.Path + info.Created = time.Now().UTC() + + kv := c.Client.KV() + _, err := kv.Put(&consulapi.KVPair{ + Key: c.lockPath() + lockInfoSuffix, + Value: info.Marshal(), + }, nil) + + return err +} + +func (c *RemoteClient) getLockInfo() (*statemgr.LockInfo, error) { + path := c.lockPath() + lockInfoSuffix + pair, _, err := c.Client.KV().Get(path, nil) + if err != nil { + return nil, err + } + if pair == nil { + return nil, nil + } + + li := &statemgr.LockInfo{} + err = json.Unmarshal(pair.Value, li) + if err != nil { + return nil, fmt.Errorf("error unmarshaling lock info: %w", err) + } + + return li, nil +} + +func (c *RemoteClient) Lock(info *statemgr.LockInfo) (string, error) { + c.mu.Lock() + defer c.mu.Unlock() + + if !c.lockState { + return "", nil + } + + c.info = info + + // These checks only are to ensure we strictly follow the specification. + // OpenTofu shouldn't ever re-lock, so provide errors for the 2 possible + // states if this is called. + select { + case <-c.lockCh: + // We had a lock, but lost it. + return "", errors.New("lost consul lock, cannot re-lock") + default: + if c.lockCh != nil { + // we have an active lock already + return "", fmt.Errorf("state %q already locked", c.Path) + } + } + + return c.lock() +} + +// the lock implementation. +// Only to be called while holding Client.mu +func (c *RemoteClient) lock() (string, error) { + // We create a new session here, so it can be canceled when the lock is + // lost or unlocked. + lockSession, err := c.createSession() + if err != nil { + return "", err + } + + // store the session ID for correlation with consul logs + c.info.Info = "consul session: " + lockSession + + // A random lock ID has been generated but we override it with the session + // ID as this will make it easier to manually invalidate the session + // if needed. + c.info.ID = lockSession + + opts := &consulapi.LockOptions{ + Key: c.lockPath() + lockSuffix, + Session: lockSession, + + // only wait briefly, so tofu has the choice to fail fast or + // retry as needed. + LockWaitTime: time.Second, + LockTryOnce: true, + + // Don't let the lock monitor give up right away, as it's possible the + // session is still OK. While the session is refreshed at a rate of + // TTL/2, the lock monitor is an idle blocking request and is more + // susceptible to being closed by a lower network layer. + MonitorRetries: 5, + // + // The delay between lock monitor retries. + // While the session has a 15s TTL plus a 5s wait period on a lost + // lock, if we can't get our lock back in 10+ seconds something is + // wrong so we're going to drop the session and start over. + MonitorRetryTime: 2 * time.Second, + } + + c.consulLock, err = c.Client.LockOpts(opts) + if err != nil { + return "", err + } + + lockErr := &statemgr.LockError{} + + lockCh, err := c.consulLock.Lock(make(chan struct{})) + if err != nil { + lockErr.Err = err + return "", lockErr + } + + if lockCh == nil { + lockInfo, e := c.getLockInfo() + if e != nil { + lockErr.Err = e + return "", lockErr + } + + lockErr.Info = lockInfo + + return "", lockErr + } + + c.lockCh = lockCh + + err = c.putLockInfo(c.info) + if err != nil { + if unlockErr := c.unlock(c.info.ID); unlockErr != nil { + err = multierror.Append(err, unlockErr) + } + + return "", err + } + + // Start a goroutine to monitor the lock state. + // If we lose the lock to due communication issues with the consul agent, + // attempt to immediately reacquire the lock. Put will verify the integrity + // of the state by using a CAS operation. + ctx, cancel := context.WithCancel(context.Background()) + c.monitorCancel = cancel + c.monitorWG.Add(1) + go func() { + defer c.monitorWG.Done() + select { + case <-c.lockCh: + log.Println("[ERROR] lost consul lock") + for { + c.mu.Lock() + // We lost our lock, so we need to cancel the session too. + // The CancelFunc is only replaced while holding Client.mu, so + // this is safe to call here. This will be replaced by the + // lock() call below. + c.sessionCancel() + + c.consulLock = nil + _, err := c.lock() + c.mu.Unlock() + + if err != nil { + // We failed to get the lock, keep trying as long as + // tofu is running. There may be changes in progress, + // so there's no use in aborting. Either we eventually + // reacquire the lock, or a Put will fail on a CAS. + log.Printf("[ERROR] could not reacquire lock: %s", err) + time.Sleep(lockReacquireInterval) + + select { + case <-ctx.Done(): + return + default: + } + continue + } + + // if the error was nil, the new lock started a new copy of + // this goroutine. + return + } + + case <-ctx.Done(): + return + } + }() + + if testLockHook != nil { + testLockHook() + } + + return c.info.ID, nil +} + +// called after a lock is acquired +var testLockHook func() + +func (c *RemoteClient) createSession() (string, error) { + // create the context first. Even if the session creation fails, we assume + // that the CancelFunc is always callable. + ctx, cancel := context.WithCancel(context.Background()) + c.sessionCancel = cancel + + session := c.Client.Session() + se := &consulapi.SessionEntry{ + Name: consulapi.DefaultLockSessionName, + TTL: lockSessionTTL, + LockDelay: lockDelay, + } + + id, _, err := session.Create(se, nil) + if err != nil { + return "", err + } + + log.Println("[INFO] created consul lock session", id) + + // keep the session renewed + go session.RenewPeriodic(lockSessionTTL, id, nil, ctx.Done()) + + return id, nil +} + +func (c *RemoteClient) Unlock(id string) error { + c.mu.Lock() + defer c.mu.Unlock() + + if !c.lockState { + return nil + } + + return c.unlock(id) +} + +// the unlock implementation. +// Only to be called while holding Client.mu +func (c *RemoteClient) unlock(id string) error { + // This method can be called in two circumstances: + // - when the plan apply or destroy operation finishes and the lock needs to be released, + // the watchdog stopped and the session closed + // - when the user calls `tofu force-unlock ` in which case + // we only need to release the lock. + + if c.consulLock == nil || c.lockCh == nil { + // The user called `tofu force-unlock `, we just destroy + // the session which will release the lock, clean the KV store and quit. + + _, err := c.Client.Session().Destroy(id, nil) + if err != nil { + return err + } + // We ignore the errors that may happen during cleanup + kv := c.Client.KV() + kv.Delete(c.lockPath()+lockSuffix, nil) + kv.Delete(c.lockPath()+lockInfoSuffix, nil) + + return nil + } + + // cancel our monitoring goroutine + c.monitorCancel() + + defer func() { + c.consulLock = nil + + // The consul session is only used for this single lock, so cancel it + // after we unlock. + // The session is only created and replaced holding Client.mu, so the + // CancelFunc must be non-nil. + c.sessionCancel() + }() + + select { + case <-c.lockCh: + return lostLockErr + default: + } + + kv := c.Client.KV() + + var errs error + + if _, err := kv.Delete(c.lockPath()+lockInfoSuffix, nil); err != nil { + errs = multierror.Append(errs, err) + } + + if err := c.consulLock.Unlock(); err != nil { + errs = multierror.Append(errs, err) + } + + // the monitoring goroutine may be in a select on the lockCh, so we need to + // wait for it to return before changing the value. + c.monitorWG.Wait() + c.lockCh = nil + + // This is only cleanup, and will fail if the lock was immediately taken by + // another client, so we don't report an error to the user here. + c.consulLock.Destroy() + + return errs +} + +func compressState(data []byte) ([]byte, error) { + b := new(bytes.Buffer) + gz := gzip.NewWriter(b) + if _, err := gz.Write(data); err != nil { + return nil, err + } + if err := gz.Flush(); err != nil { + return nil, err + } + if err := gz.Close(); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +func uncompressState(data []byte) ([]byte, error) { + b := new(bytes.Buffer) + gz, err := gzip.NewReader(bytes.NewReader(data)) + if err != nil { + return nil, err + } + b.ReadFrom(gz) + if err := gz.Close(); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +func split(payload []byte, limit int) [][]byte { + var chunk []byte + chunks := make([][]byte, 0, len(payload)/limit+1) + for len(payload) >= limit { + chunk, payload = payload[:limit], payload[limit:] + chunks = append(chunks, chunk) + } + if len(payload) > 0 { + chunks = append(chunks, payload[:]) + } + return chunks +} + +func (c *RemoteClient) chunkedMode() (bool, string, []string, *consulapi.KVPair, error) { + kv := c.Client.KV() + pair, _, err := kv.Get(c.Path, nil) + if err != nil { + return false, "", nil, pair, err + } + if pair != nil { + var d map[string]interface{} + err = json.Unmarshal(pair.Value, &d) + // If there is an error when unmarshaling the payload, the state has + // probably been gziped in single entry mode. + if err == nil { + // If we find the "current-hash" key we were in chunked mode + hash, ok := d["current-hash"] + if ok { + chunks := make([]string, 0) + for _, c := range d["chunks"].([]interface{}) { + chunks = append(chunks, c.(string)) + } + return true, hash.(string), chunks, pair, nil + } + } + } + return false, "", nil, pair, nil +} diff --git a/pkg/backend/remote-state/consul/client_test.go b/pkg/backend/remote-state/consul/client_test.go new file mode 100644 index 00000000000..bfd96073e5e --- /dev/null +++ b/pkg/backend/remote-state/consul/client_test.go @@ -0,0 +1,504 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package consul + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "math/rand" + "net" + "reflect" + "strings" + "sync" + "testing" + "time" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/states/remote" + "github.com/kubegems/opentofu/pkg/states/statemgr" +) + +func TestRemoteClient_impl(t *testing.T) { + var _ remote.Client = new(RemoteClient) + var _ remote.ClientLocker = new(RemoteClient) +} + +func TestRemoteClient(t *testing.T) { + srv := newConsulTestServer(t) + defer func() { _ = srv.Stop() }() + + testCases := []string{ + fmt.Sprintf("tf-unit/%s", time.Now().String()), + fmt.Sprintf("tf-unit/%s/", time.Now().String()), + } + + for _, path := range testCases { + t.Run(path, func(*testing.T) { + // Get the backend + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "address": srv.HTTPAddr, + "path": path, + })) + + // Grab the client + state, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Test + remote.TestClient(t, state.(*remote.State).Client) + }) + } +} + +// test the gzip functionality of the client +func TestRemoteClient_gzipUpgrade(t *testing.T) { + srv := newConsulTestServer(t) + defer func() { _ = srv.Stop() }() + + statePath := fmt.Sprintf("tf-unit/%s", time.Now().String()) + + // Get the backend + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "address": srv.HTTPAddr, + "path": statePath, + })) + + // Grab the client + state, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Test + remote.TestClient(t, state.(*remote.State).Client) + + // create a new backend with gzip + b = backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "address": srv.HTTPAddr, + "path": statePath, + "gzip": true, + })) + + // Grab the client + state, err = b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Test + remote.TestClient(t, state.(*remote.State).Client) +} + +// TestConsul_largeState tries to write a large payload using the Consul state +// manager, as there is a limit to the size of the values in the KV store it +// will need to be split up before being saved and put back together when read. +func TestConsul_largeState(t *testing.T) { + srv := newConsulTestServer(t) + defer func() { _ = srv.Stop() }() + + path := "tf-unit/test-large-state" + + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "address": srv.HTTPAddr, + "path": path, + })) + + s, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + c := s.(*remote.State).Client.(*RemoteClient) + c.Path = path + + // testPaths fails the test if the keys found at the prefix don't match + // what is expected + testPaths := func(t *testing.T, expected []string) { + kv := c.Client.KV() + pairs, _, err := kv.List(c.Path, nil) + if err != nil { + t.Fatal(err) + } + res := make([]string, 0) + for _, p := range pairs { + res = append(res, p.Key) + } + if !reflect.DeepEqual(res, expected) { + t.Fatalf("Wrong keys: %#v", res) + } + } + + testPayload := func(t *testing.T, data map[string]string, keys []string) { + payload, err := json.Marshal(data) + if err != nil { + t.Fatal(err) + } + err = c.Put(payload) + if err != nil { + t.Fatal("could not put payload", err) + } + + remote, err := c.Get() + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(payload, remote.Data) { + t.Fatal("the data do not match") + } + + testPaths(t, keys) + } + + // The default limit for the size of the value in Consul is 524288 bytes + testPayload( + t, + map[string]string{ + "foo": strings.Repeat("a", 524288+2), + }, + []string{ + "tf-unit/test-large-state", + "tf-unit/test-large-state/tfstate.2cb96f52c9fff8e0b56cb786ec4d2bed/0", + "tf-unit/test-large-state/tfstate.2cb96f52c9fff8e0b56cb786ec4d2bed/1", + }, + ) + + // This payload is just short enough to be stored but will be bigger when + // going through the Transaction API as it will be base64 encoded + testPayload( + t, + map[string]string{ + "foo": strings.Repeat("a", 524288-10), + }, + []string{ + "tf-unit/test-large-state", + "tf-unit/test-large-state/tfstate.4f407ace136a86521fd0d366972fe5c7/0", + }, + ) + + // We try to replace the payload with a small one, the old chunks should be removed + testPayload( + t, + map[string]string{"var": "a"}, + []string{"tf-unit/test-large-state"}, + ) + + // Test with gzip and chunks + b = backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "address": srv.HTTPAddr, + "path": path, + "gzip": true, + })) + + s, err = b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + c = s.(*remote.State).Client.(*RemoteClient) + c.Path = path + + // We need a long random string so it results in multiple chunks even after + // being gziped + + // We use a fixed seed so the test can be reproductible + randomizer := rand.New(rand.NewSource(1234)) + RandStringRunes := func(n int) string { + var letterRunes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + b := make([]rune, n) + for i := range b { + b[i] = letterRunes[randomizer.Intn(len(letterRunes))] + } + return string(b) + } + + testPayload( + t, + map[string]string{ + "bar": RandStringRunes(5 * (524288 + 2)), + }, + []string{ + "tf-unit/test-large-state", + "tf-unit/test-large-state/tfstate.58e8160335864b520b1cc7f2222a4019/0", + "tf-unit/test-large-state/tfstate.58e8160335864b520b1cc7f2222a4019/1", + "tf-unit/test-large-state/tfstate.58e8160335864b520b1cc7f2222a4019/2", + "tf-unit/test-large-state/tfstate.58e8160335864b520b1cc7f2222a4019/3", + }, + ) + + // Deleting the state should remove all chunks + err = c.Delete() + if err != nil { + t.Fatal(err) + } + testPaths(t, []string{}) +} + +func TestConsul_stateLock(t *testing.T) { + srv := newConsulTestServer(t) + defer func() { _ = srv.Stop() }() + + testCases := []string{ + fmt.Sprintf("tf-unit/%s", time.Now().String()), + fmt.Sprintf("tf-unit/%s/", time.Now().String()), + } + + for _, path := range testCases { + t.Run(path, func(*testing.T) { + // create 2 instances to get 2 remote.Clients + sA, err := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "address": srv.HTTPAddr, + "path": path, + })).StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + sB, err := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "address": srv.HTTPAddr, + "path": path, + })).StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + remote.TestRemoteLocks(t, sA.(*remote.State).Client, sB.(*remote.State).Client) + }) + } +} + +func TestConsul_destroyLock(t *testing.T) { + srv := newConsulTestServer(t) + defer func() { _ = srv.Stop() }() + + testCases := []string{ + fmt.Sprintf("tf-unit/%s", time.Now().String()), + fmt.Sprintf("tf-unit/%s/", time.Now().String()), + } + + testLock := func(client *RemoteClient, lockPath string) { + // get the lock val + pair, _, err := client.Client.KV().Get(lockPath, nil) + if err != nil { + t.Fatal(err) + } + if pair != nil { + t.Fatalf("lock key not cleaned up at: %s", pair.Key) + } + } + + for _, path := range testCases { + t.Run(path, func(*testing.T) { + // Get the backend + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "address": srv.HTTPAddr, + "path": path, + })) + + // Grab the client + s, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("err: %s", err) + } + + clientA := s.(*remote.State).Client.(*RemoteClient) + + info := statemgr.NewLockInfo() + id, err := clientA.Lock(info) + if err != nil { + t.Fatal(err) + } + + lockPath := clientA.Path + lockSuffix + + if err := clientA.Unlock(id); err != nil { + t.Fatal(err) + } + + testLock(clientA, lockPath) + + // The release the lock from a second client to test the + // `tofu force-unlock ` functionality + s, err = b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("err: %s", err) + } + + clientB := s.(*remote.State).Client.(*RemoteClient) + + info = statemgr.NewLockInfo() + id, err = clientA.Lock(info) + if err != nil { + t.Fatal(err) + } + + if err := clientB.Unlock(id); err != nil { + t.Fatal(err) + } + + testLock(clientA, lockPath) + + err = clientA.Unlock(id) + + if err == nil { + t.Fatal("consul lock should have been lost") + } + if err.Error() != "consul lock was lost" { + t.Fatal("got wrong error", err) + } + }) + } +} + +func TestConsul_lostLock(t *testing.T) { + srv := newConsulTestServer(t) + defer func() { _ = srv.Stop() }() + + path := fmt.Sprintf("tf-unit/%s", time.Now().String()) + + // create 2 instances to get 2 remote.Clients + sA, err := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "address": srv.HTTPAddr, + "path": path, + })).StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + sB, err := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "address": srv.HTTPAddr, + "path": path + "-not-used", + })).StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + info := statemgr.NewLockInfo() + info.Operation = "test-lost-lock" + id, err := sA.Lock(info) + if err != nil { + t.Fatal(err) + } + + reLocked := make(chan struct{}) + testLockHook = func() { + close(reLocked) + testLockHook = nil + } + + // now we use the second client to break the lock + kv := sB.(*remote.State).Client.(*RemoteClient).Client.KV() + _, err = kv.Delete(path+lockSuffix, nil) + if err != nil { + t.Fatal(err) + } + + <-reLocked + + if err := sA.Unlock(id); err != nil { + t.Fatal(err) + } +} + +func TestConsul_lostLockConnection(t *testing.T) { + srv := newConsulTestServer(t) + defer func() { _ = srv.Stop() }() + + // create an "unreliable" network by closing all the consul client's + // network connections + conns := &unreliableConns{} + origDialFn := dialContext + defer func() { + dialContext = origDialFn + }() + dialContext = conns.DialContext + + path := fmt.Sprintf("tf-unit/%s", time.Now().String()) + + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "address": srv.HTTPAddr, + "path": path, + })) + + s, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + info := statemgr.NewLockInfo() + info.Operation = "test-lost-lock-connection" + id, err := s.Lock(info) + if err != nil { + t.Fatal(err) + } + + // kill the connection a few times + for i := 0; i < 3; i++ { + dialed := conns.dialedDone() + // kill any open connections + conns.Kill() + // wait for a new connection to be dialed, and kill it again + <-dialed + } + + if err := s.Unlock(id); err != nil { + t.Fatal("unlock error:", err) + } +} + +type unreliableConns struct { + sync.Mutex + conns []net.Conn + dialCallback func() +} + +func (u *unreliableConns) DialContext(ctx context.Context, netw, addr string) (net.Conn, error) { + u.Lock() + defer u.Unlock() + + dialer := &net.Dialer{} + conn, err := dialer.DialContext(ctx, netw, addr) + if err != nil { + return nil, err + } + + u.conns = append(u.conns, conn) + + if u.dialCallback != nil { + u.dialCallback() + } + + return conn, nil +} + +func (u *unreliableConns) dialedDone() chan struct{} { + u.Lock() + defer u.Unlock() + dialed := make(chan struct{}) + u.dialCallback = func() { + defer close(dialed) + u.dialCallback = nil + } + + return dialed +} + +// Kill these with a deadline, just to make sure we don't end up with any EOFs +// that get ignored. +func (u *unreliableConns) Kill() { + u.Lock() + defer u.Unlock() + + for _, conn := range u.conns { + conn.(*net.TCPConn).SetDeadline(time.Now()) + } + u.conns = nil +} diff --git a/pkg/backend/remote-state/cos/backend.go b/pkg/backend/remote-state/cos/backend.go new file mode 100644 index 00000000000..c47426fa109 --- /dev/null +++ b/pkg/backend/remote-state/cos/backend.go @@ -0,0 +1,342 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cos + +import ( + "context" + "fmt" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/legacy/helper/schema" + "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common" + "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/profile" + sts "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/sts/v20180813" + tag "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tag/v20180813" + "github.com/tencentyun/cos-go-sdk-v5" +) + +// Default value from environment variable +const ( + PROVIDER_SECRET_ID = "TENCENTCLOUD_SECRET_ID" + PROVIDER_SECRET_KEY = "TENCENTCLOUD_SECRET_KEY" + PROVIDER_SECURITY_TOKEN = "TENCENTCLOUD_SECURITY_TOKEN" + PROVIDER_REGION = "TENCENTCLOUD_REGION" + PROVIDER_ASSUME_ROLE_ARN = "TENCENTCLOUD_ASSUME_ROLE_ARN" + PROVIDER_ASSUME_ROLE_SESSION_NAME = "TENCENTCLOUD_ASSUME_ROLE_SESSION_NAME" + PROVIDER_ASSUME_ROLE_SESSION_DURATION = "TENCENTCLOUD_ASSUME_ROLE_SESSION_DURATION" +) + +// Backend implements "backend".Backend for tencentCloud cos +type Backend struct { + *schema.Backend + encryption encryption.StateEncryption + credential *common.Credential + + cosContext context.Context + cosClient *cos.Client + tagClient *tag.Client + stsClient *sts.Client + + region string + bucket string + prefix string + key string + encrypt bool + acl string +} + +// New creates a new backend for TencentCloud cos remote state. +func New(enc encryption.StateEncryption) backend.Backend { + s := &schema.Backend{ + Schema: map[string]*schema.Schema{ + "secret_id": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc(PROVIDER_SECRET_ID, nil), + Description: "Secret id of Tencent Cloud", + }, + "secret_key": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc(PROVIDER_SECRET_KEY, nil), + Description: "Secret key of Tencent Cloud", + Sensitive: true, + }, + "security_token": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc(PROVIDER_SECURITY_TOKEN, nil), + Description: "TencentCloud Security Token of temporary access credentials. It can be sourced from the `TENCENTCLOUD_SECURITY_TOKEN` environment variable. Notice: for supported products, please refer to: [temporary key supported products](https://intl.cloud.tencent.com/document/product/598/10588).", + Sensitive: true, + }, + "region": { + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.EnvDefaultFunc(PROVIDER_REGION, nil), + Description: "The region of the COS bucket", + InputDefault: "ap-guangzhou", + }, + "bucket": { + Type: schema.TypeString, + Required: true, + Description: "The name of the COS bucket", + }, + "prefix": { + Type: schema.TypeString, + Optional: true, + Description: "The directory for saving the state file in bucket", + ValidateFunc: func(v interface{}, s string) ([]string, []error) { + prefix := v.(string) + if strings.HasPrefix(prefix, "/") || strings.HasPrefix(prefix, "./") { + return nil, []error{fmt.Errorf("prefix must not start with '/' or './'")} + } + return nil, nil + }, + }, + "key": { + Type: schema.TypeString, + Optional: true, + Description: "The path for saving the state file in bucket", + Default: "terraform.tfstate", + ValidateFunc: func(v interface{}, s string) ([]string, []error) { + if strings.HasPrefix(v.(string), "/") || strings.HasSuffix(v.(string), "/") { + return nil, []error{fmt.Errorf("key can not start and end with '/'")} + } + return nil, nil + }, + }, + "encrypt": { + Type: schema.TypeBool, + Optional: true, + Description: "Whether to enable server side encryption of the state file", + Default: true, + }, + "acl": { + Type: schema.TypeString, + Optional: true, + Description: "Object ACL to be applied to the state file", + Default: "private", + ValidateFunc: func(v interface{}, s string) ([]string, []error) { + value := v.(string) + if value != "private" && value != "public-read" { + return nil, []error{fmt.Errorf( + "acl value invalid, expected %s or %s, got %s", + "private", "public-read", value)} + } + return nil, nil + }, + }, + "accelerate": { + Type: schema.TypeBool, + Optional: true, + Description: "Whether to enable global Acceleration", + Default: false, + }, + "assume_role": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 1, + Description: "The `assume_role` block. If provided, tofu will attempt to assume this role using the supplied credentials.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "role_arn": { + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.EnvDefaultFunc(PROVIDER_ASSUME_ROLE_ARN, nil), + Description: "The ARN of the role to assume. It can be sourced from the `TENCENTCLOUD_ASSUME_ROLE_ARN`.", + }, + "session_name": { + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.EnvDefaultFunc(PROVIDER_ASSUME_ROLE_SESSION_NAME, nil), + Description: "The session name to use when making the AssumeRole call. It can be sourced from the `TENCENTCLOUD_ASSUME_ROLE_SESSION_NAME`.", + }, + "session_duration": { + Type: schema.TypeInt, + Required: true, + DefaultFunc: func() (interface{}, error) { + if v := os.Getenv(PROVIDER_ASSUME_ROLE_SESSION_DURATION); v != "" { + return strconv.Atoi(v) + } + return 7200, nil + }, + ValidateFunc: validateIntegerInRange(0, 43200), + Description: "The duration of the session when making the AssumeRole call. Its value ranges from 0 to 43200(seconds), and default is 7200 seconds. It can be sourced from the `TENCENTCLOUD_ASSUME_ROLE_SESSION_DURATION`.", + }, + "policy": { + Type: schema.TypeString, + Optional: true, + Description: "A more restrictive policy when making the AssumeRole call. Its content must not contains `principal` elements. Notice: more syntax references, please refer to: [policies syntax logic](https://intl.cloud.tencent.com/document/product/598/10603).", + }, + }, + }, + }, + }, + } + + result := &Backend{Backend: s, encryption: enc} + result.Backend.ConfigureFunc = result.configure + + return result +} + +func validateIntegerInRange(min, max int64) schema.SchemaValidateFunc { + return func(v interface{}, k string) (ws []string, errors []error) { + value := int64(v.(int)) + if value < min { + errors = append(errors, fmt.Errorf( + "%q cannot be lower than %d: %d", k, min, value)) + } + if value > max { + errors = append(errors, fmt.Errorf( + "%q cannot be higher than %d: %d", k, max, value)) + } + return + } +} + +// configure init cos client +func (b *Backend) configure(ctx context.Context) error { + if b.cosClient != nil { + return nil + } + + b.cosContext = ctx + data := schema.FromContextBackendConfig(b.cosContext) + + b.region = data.Get("region").(string) + b.bucket = data.Get("bucket").(string) + b.prefix = data.Get("prefix").(string) + b.key = data.Get("key").(string) + b.encrypt = data.Get("encrypt").(bool) + b.acl = data.Get("acl").(string) + + var ( + u *url.URL + err error + ) + accelerate := data.Get("accelerate").(bool) + if accelerate { + u, err = url.Parse(fmt.Sprintf("https://%s.cos.accelerate.myqcloud.com", b.bucket)) + } else { + u, err = url.Parse(fmt.Sprintf("https://%s.cos.%s.myqcloud.com", b.bucket, b.region)) + } + if err != nil { + return err + } + + secretId := data.Get("secret_id").(string) + secretKey := data.Get("secret_key").(string) + securityToken := data.Get("security_token").(string) + + // init credential by AKSK & TOKEN + b.credential = common.NewTokenCredential(secretId, secretKey, securityToken) + // update credential if assume role exist + err = handleAssumeRole(data, b) + if err != nil { + return err + } + + b.cosClient = cos.NewClient( + &cos.BaseURL{BucketURL: u}, + &http.Client{ + Timeout: 60 * time.Second, + Transport: &cos.AuthorizationTransport{ + SecretID: b.credential.SecretId, + SecretKey: b.credential.SecretKey, + SessionToken: b.credential.Token, + }, + }, + ) + + b.tagClient = b.UseTagClient() + return err +} + +func handleAssumeRole(data *schema.ResourceData, b *Backend) error { + assumeRoleList := data.Get("assume_role").(*schema.Set).List() + if len(assumeRoleList) == 1 { + assumeRole := assumeRoleList[0].(map[string]interface{}) + assumeRoleArn := assumeRole["role_arn"].(string) + assumeRoleSessionName := assumeRole["session_name"].(string) + assumeRoleSessionDuration := assumeRole["session_duration"].(int) + assumeRolePolicy := assumeRole["policy"].(string) + + err := b.updateCredentialWithSTS(assumeRoleArn, assumeRoleSessionName, assumeRoleSessionDuration, assumeRolePolicy) + if err != nil { + return err + } + } + return nil +} + +func (b *Backend) updateCredentialWithSTS(assumeRoleArn, assumeRoleSessionName string, assumeRoleSessionDuration int, assumeRolePolicy string) error { + // assume role by STS + request := sts.NewAssumeRoleRequest() + request.RoleArn = &assumeRoleArn + request.RoleSessionName = &assumeRoleSessionName + duration := uint64(assumeRoleSessionDuration) + request.DurationSeconds = &duration + if assumeRolePolicy != "" { + policy := url.QueryEscape(assumeRolePolicy) + request.Policy = &policy + } + + response, err := b.UseStsClient().AssumeRole(request) + if err != nil { + return err + } + // update credentials by result of assume role + b.credential = common.NewTokenCredential( + *response.Response.Credentials.TmpSecretId, + *response.Response.Credentials.TmpSecretKey, + *response.Response.Credentials.Token, + ) + + return nil +} + +// UseStsClient returns sts client for service +func (b *Backend) UseStsClient() *sts.Client { + if b.stsClient != nil { + return b.stsClient + } + cpf := b.NewClientProfile(300) + b.stsClient, _ = sts.NewClient(b.credential, b.region, cpf) + b.stsClient.WithHttpTransport(&LogRoundTripper{}) + + return b.stsClient +} + +// UseTagClient returns tag client for service +func (b *Backend) UseTagClient() *tag.Client { + if b.tagClient != nil { + return b.tagClient + } + cpf := b.NewClientProfile(300) + cpf.Language = "en-US" + b.tagClient, _ = tag.NewClient(b.credential, b.region, cpf) + return b.tagClient +} + +// NewClientProfile returns a new ClientProfile +func (b *Backend) NewClientProfile(timeout int) *profile.ClientProfile { + cpf := profile.NewClientProfile() + + // all request use method POST + cpf.HttpProfile.ReqMethod = "POST" + // request timeout + cpf.HttpProfile.ReqTimeout = timeout + + return cpf +} diff --git a/pkg/backend/remote-state/cos/backend_state.go b/pkg/backend/remote-state/cos/backend_state.go new file mode 100644 index 00000000000..af5ba0ed001 --- /dev/null +++ b/pkg/backend/remote-state/cos/backend_state.go @@ -0,0 +1,190 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cos + +import ( + "fmt" + "log" + "path" + "sort" + "strings" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/remote" + "github.com/kubegems/opentofu/pkg/states/statemgr" +) + +// Define file suffix +const ( + stateFileSuffix = ".tfstate" + lockFileSuffix = ".tflock" +) + +// Workspaces returns a list of names for the workspaces +func (b *Backend) Workspaces() ([]string, error) { + c, err := b.client("tencentcloud") + if err != nil { + return nil, err + } + + obs, err := c.getBucket(b.prefix) + log.Printf("[DEBUG] list all workspaces, objects: %v, error: %v", obs, err) + if err != nil { + return nil, err + } + + ws := []string{backend.DefaultStateName} + for _, vv := range obs { + // .tfstate + if !strings.HasSuffix(vv.Key, stateFileSuffix) { + continue + } + // default worksapce + if path.Join(b.prefix, b.key) == vv.Key { + continue + } + // // + prefix := strings.TrimRight(b.prefix, "/") + "/" + parts := strings.Split(strings.TrimPrefix(vv.Key, prefix), "/") + if len(parts) > 0 && parts[0] != "" { + ws = append(ws, parts[0]) + } + } + + sort.Strings(ws[1:]) + log.Printf("[DEBUG] list all workspaces, workspaces: %v", ws) + + return ws, nil +} + +// DeleteWorkspace deletes the named workspaces. The "default" state cannot be deleted. +func (b *Backend) DeleteWorkspace(name string, _ bool) error { + log.Printf("[DEBUG] delete workspace, workspace: %v", name) + + if name == backend.DefaultStateName || name == "" { + return fmt.Errorf("default state is not allow to delete") + } + + c, err := b.client(name) + if err != nil { + return err + } + + return c.Delete() +} + +// StateMgr manage the state, if the named state not exists, a new file will created +func (b *Backend) StateMgr(name string) (statemgr.Full, error) { + log.Printf("[DEBUG] state manager, current workspace: %v", name) + + c, err := b.client(name) + if err != nil { + return nil, err + } + stateMgr := remote.NewState(c, b.encryption) + + ws, err := b.Workspaces() + if err != nil { + return nil, err + } + + exists := false + for _, candidate := range ws { + if candidate == name { + exists = true + break + } + } + + if !exists { + log.Printf("[DEBUG] workspace %v not exists", name) + + // take a lock on this state while we write it + lockInfo := statemgr.NewLockInfo() + lockInfo.Operation = "init" + lockId, err := c.Lock(lockInfo) + if err != nil { + return nil, fmt.Errorf("Failed to lock cos state: %w", err) + } + + // Local helper function so we can call it multiple places + lockUnlock := func(e error) error { + if err := stateMgr.Unlock(lockId); err != nil { + return fmt.Errorf(unlockErrMsg, err, lockId) + } + return e + } + + // Grab the value + if err := stateMgr.RefreshState(); err != nil { + err = lockUnlock(err) + return nil, err + } + + // If we have no state, we have to create an empty state + if v := stateMgr.State(); v == nil { + if err := stateMgr.WriteState(states.NewState()); err != nil { + err = lockUnlock(err) + return nil, err + } + if err := stateMgr.PersistState(nil); err != nil { + err = lockUnlock(err) + return nil, err + } + } + + // Unlock, the state should now be initialized + if err := lockUnlock(nil); err != nil { + return nil, err + } + } + + return stateMgr, nil +} + +// client returns a remoteClient for the named state. +func (b *Backend) client(name string) (*remoteClient, error) { + if strings.TrimSpace(name) == "" { + return nil, fmt.Errorf("state name not allow to be empty") + } + + return &remoteClient{ + cosContext: b.cosContext, + cosClient: b.cosClient, + tagClient: b.tagClient, + bucket: b.bucket, + stateFile: b.stateFile(name), + lockFile: b.lockFile(name), + encrypt: b.encrypt, + acl: b.acl, + }, nil +} + +// stateFile returns state file path by name +func (b *Backend) stateFile(name string) string { + if name == backend.DefaultStateName { + return path.Join(b.prefix, b.key) + } + return path.Join(b.prefix, name, b.key) +} + +// lockFile returns lock file path by name +func (b *Backend) lockFile(name string) string { + return b.stateFile(name) + lockFileSuffix +} + +// unlockErrMsg is error msg for unlock failed +const unlockErrMsg = ` +Unlocking the state file on TencentCloud cos backend failed: + +Error message: %v +Lock ID (gen): %s + +You may have to force-unlock this state in order to use it again. +The TencentCloud backend acquires a lock during initialization +to ensure the initial state file is created. +` diff --git a/pkg/backend/remote-state/cos/backend_test.go b/pkg/backend/remote-state/cos/backend_test.go new file mode 100644 index 00000000000..cbe4a37bb6d --- /dev/null +++ b/pkg/backend/remote-state/cos/backend_test.go @@ -0,0 +1,262 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cos + +import ( + "crypto/md5" + "fmt" + "os" + "testing" + "time" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/states/remote" +) + +const ( + defaultPrefix = "" + defaultKey = "terraform.tfstate" +) + +// Testing Thanks to GCS + +func TestStateFile(t *testing.T) { + t.Parallel() + + cases := []struct { + prefix string + stateName string + key string + wantStateFile string + wantLockFile string + }{ + {"", "default", "default.tfstate", "default.tfstate", "default.tfstate.tflock"}, + {"", "default", "test.tfstate", "test.tfstate", "test.tfstate.tflock"}, + {"", "dev", "test.tfstate", "dev/test.tfstate", "dev/test.tfstate.tflock"}, + {"terraform/test", "default", "default.tfstate", "terraform/test/default.tfstate", "terraform/test/default.tfstate.tflock"}, + {"terraform/test", "default", "test.tfstate", "terraform/test/test.tfstate", "terraform/test/test.tfstate.tflock"}, + {"terraform/test", "dev", "test.tfstate", "terraform/test/dev/test.tfstate", "terraform/test/dev/test.tfstate.tflock"}, + } + + for _, c := range cases { + t.Run(fmt.Sprintf("%s %s %s", c.prefix, c.key, c.stateName), func(t *testing.T) { + b := &Backend{ + prefix: c.prefix, + key: c.key, + } + if got, want := b.stateFile(c.stateName), c.wantStateFile; got != want { + t.Errorf("wrong state file name\ngot: %s\nwant: %s", got, want) + } + if got, want := b.lockFile(c.stateName), c.wantLockFile; got != want { + t.Errorf("wrong lock file name\ngot: %s\nwant: %s", got, want) + } + }) + } +} + +func TestRemoteClient(t *testing.T) { + t.Parallel() + + bucket := bucketName(t) + + be := setupBackend(t, bucket, defaultPrefix, defaultKey, false) + defer teardownBackend(t, be) + + ss, err := be.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + rs, ok := ss.(*remote.State) + if !ok { + t.Fatalf("wrong state manager type\ngot: %T\nwant: %T", ss, rs) + } + + remote.TestClient(t, rs.Client) +} + +func TestRemoteClientWithPrefix(t *testing.T) { + t.Parallel() + + prefix := "prefix/test" + bucket := bucketName(t) + + be := setupBackend(t, bucket, prefix, defaultKey, false) + defer teardownBackend(t, be) + + ss, err := be.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + rs, ok := ss.(*remote.State) + if !ok { + t.Fatalf("wrong state manager type\ngot: %T\nwant: %T", ss, rs) + } + + remote.TestClient(t, rs.Client) +} + +func TestRemoteClientWithEncryption(t *testing.T) { + t.Parallel() + + bucket := bucketName(t) + + be := setupBackend(t, bucket, defaultPrefix, defaultKey, true) + defer teardownBackend(t, be) + + ss, err := be.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + rs, ok := ss.(*remote.State) + if !ok { + t.Fatalf("wrong state manager type\ngot: %T\nwant: %T", ss, rs) + } + + remote.TestClient(t, rs.Client) +} + +func TestRemoteLocks(t *testing.T) { + t.Parallel() + + bucket := bucketName(t) + + be := setupBackend(t, bucket, defaultPrefix, defaultKey, false) + defer teardownBackend(t, be) + + remoteClient := func() (remote.Client, error) { + ss, err := be.StateMgr(backend.DefaultStateName) + if err != nil { + return nil, err + } + + rs, ok := ss.(*remote.State) + if !ok { + return nil, fmt.Errorf("be.StateMgr(): got a %T, want a *remote.State", ss) + } + + return rs.Client, nil + } + + c0, err := remoteClient() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + c1, err := remoteClient() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + remote.TestRemoteLocks(t, c0, c1) +} + +func TestBackend(t *testing.T) { + t.Parallel() + + bucket := bucketName(t) + + be0 := setupBackend(t, bucket, defaultPrefix, defaultKey, false) + defer teardownBackend(t, be0) + + be1 := setupBackend(t, bucket, defaultPrefix, defaultKey, false) + defer teardownBackend(t, be1) + + backend.TestBackendStates(t, be0) + backend.TestBackendStateLocks(t, be0, be1) + backend.TestBackendStateForceUnlock(t, be0, be1) +} + +func TestBackendWithPrefix(t *testing.T) { + t.Parallel() + + prefix := "prefix/test" + bucket := bucketName(t) + + be0 := setupBackend(t, bucket, prefix, defaultKey, false) + defer teardownBackend(t, be0) + + be1 := setupBackend(t, bucket, prefix+"/", defaultKey, false) + defer teardownBackend(t, be1) + + backend.TestBackendStates(t, be0) + backend.TestBackendStateLocks(t, be0, be1) +} + +func TestBackendWithEncryption(t *testing.T) { + t.Parallel() + + bucket := bucketName(t) + + be0 := setupBackend(t, bucket, defaultPrefix, defaultKey, true) + defer teardownBackend(t, be0) + + be1 := setupBackend(t, bucket, defaultPrefix, defaultKey, true) + defer teardownBackend(t, be1) + + backend.TestBackendStates(t, be0) + backend.TestBackendStateLocks(t, be0, be1) +} + +func setupBackend(t *testing.T, bucket, prefix, key string, encrypt bool) backend.Backend { + t.Helper() + + skip := os.Getenv("TF_COS_APPID") == "" + if skip { + t.Skip("This test require setting TF_COS_APPID environment variables") + } + + if os.Getenv(PROVIDER_REGION) == "" { + t.Setenv(PROVIDER_REGION, "ap-guangzhou") + } + + appId := os.Getenv("TF_COS_APPID") + region := os.Getenv(PROVIDER_REGION) + + config := map[string]interface{}{ + "region": region, + "bucket": bucket + appId, + "prefix": prefix, + "key": key, + } + + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(config)) + be := b.(*Backend) + + c, err := be.client("tencentcloud") + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + err = c.putBucket() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + return b +} + +func teardownBackend(t *testing.T, b backend.Backend) { + t.Helper() + + c, err := b.(*Backend).client("tencentcloud") + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + err = c.deleteBucket(true) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } +} + +func bucketName(t *testing.T) string { + unique := fmt.Sprintf("%s-%x", t.Name(), time.Now().UnixNano()) + return fmt.Sprintf("terraform-test-%s-%s", fmt.Sprintf("%x", md5.Sum([]byte(unique)))[:10], "") +} diff --git a/pkg/backend/remote-state/cos/client.go b/pkg/backend/remote-state/cos/client.go new file mode 100644 index 00000000000..6be1751ae88 --- /dev/null +++ b/pkg/backend/remote-state/cos/client.go @@ -0,0 +1,447 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cos + +import ( + "bytes" + "context" + "crypto/md5" + "encoding/json" + "fmt" + "io" + "log" + "net/http" + "strings" + "time" + + multierror "github.com/hashicorp/go-multierror" + "github.com/kubegems/opentofu/pkg/states/remote" + "github.com/kubegems/opentofu/pkg/states/statemgr" + tag "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tag/v20180813" + "github.com/tencentyun/cos-go-sdk-v5" +) + +const ( + lockTagKey = "tencentcloud-terraform-lock" +) + +// RemoteClient implements the client of remote state +type remoteClient struct { + cosContext context.Context + cosClient *cos.Client + tagClient *tag.Client + + bucket string + stateFile string + lockFile string + encrypt bool + acl string +} + +// Get returns remote state file +func (c *remoteClient) Get() (*remote.Payload, error) { + log.Printf("[DEBUG] get remote state file %s", c.stateFile) + + exists, data, checksum, err := c.getObject(c.stateFile) + if err != nil { + return nil, err + } + + if !exists { + return nil, nil + } + + payload := &remote.Payload{ + Data: data, + MD5: []byte(checksum), + } + + return payload, nil +} + +// Put put state file to remote +func (c *remoteClient) Put(data []byte) error { + log.Printf("[DEBUG] put remote state file %s", c.stateFile) + + return c.putObject(c.stateFile, data) +} + +// Delete delete remote state file +func (c *remoteClient) Delete() error { + log.Printf("[DEBUG] delete remote state file %s", c.stateFile) + + return c.deleteObject(c.stateFile) +} + +// Lock lock remote state file for writing +func (c *remoteClient) Lock(info *statemgr.LockInfo) (string, error) { + log.Printf("[DEBUG] lock remote state file %s", c.lockFile) + + err := c.cosLock(c.bucket, c.lockFile) + if err != nil { + return "", c.lockError(err) + } + defer c.cosUnlock(c.bucket, c.lockFile) + + exists, _, _, err := c.getObject(c.lockFile) + if err != nil { + return "", c.lockError(err) + } + + if exists { + return "", c.lockError(fmt.Errorf("lock file %s exists", c.lockFile)) + } + + info.Path = c.lockFile + data, err := json.Marshal(info) + if err != nil { + return "", c.lockError(err) + } + + check := fmt.Sprintf("%x", md5.Sum(data)) + err = c.putObject(c.lockFile, data) + if err != nil { + return "", c.lockError(err) + } + + return check, nil +} + +// Unlock unlock remote state file +func (c *remoteClient) Unlock(check string) error { + log.Printf("[DEBUG] unlock remote state file %s", c.lockFile) + + info, err := c.lockInfo() + if err != nil { + return c.lockError(err) + } + + if info.ID != check { + return c.lockError(fmt.Errorf("lock id mismatch, %v != %v", info.ID, check)) + } + + err = c.deleteObject(c.lockFile) + if err != nil { + return c.lockError(err) + } + + err = c.cosUnlock(c.bucket, c.lockFile) + if err != nil { + return c.lockError(err) + } + + return nil +} + +// lockError returns statemgr.LockError +func (c *remoteClient) lockError(err error) *statemgr.LockError { + log.Printf("[DEBUG] failed to lock or unlock %s: %v", c.lockFile, err) + + lockErr := &statemgr.LockError{ + Err: err, + } + + info, infoErr := c.lockInfo() + if infoErr != nil { + lockErr.Err = multierror.Append(lockErr.Err, infoErr) + } else { + lockErr.Info = info + } + + return lockErr +} + +// lockInfo returns LockInfo from lock file +func (c *remoteClient) lockInfo() (*statemgr.LockInfo, error) { + exists, data, checksum, err := c.getObject(c.lockFile) + if err != nil { + return nil, err + } + + if !exists { + return nil, fmt.Errorf("lock file %s not exists", c.lockFile) + } + + info := &statemgr.LockInfo{} + if err := json.Unmarshal(data, info); err != nil { + return nil, err + } + + info.ID = checksum + + return info, nil +} + +// getObject get remote object +func (c *remoteClient) getObject(cosFile string) (exists bool, data []byte, checksum string, err error) { + rsp, err := c.cosClient.Object.Get(c.cosContext, cosFile, nil) + if rsp == nil { + log.Printf("[DEBUG] getObject %s: error: %v", cosFile, err) + err = fmt.Errorf("failed to open file at %v: %w", cosFile, err) + return + } + defer rsp.Body.Close() + + log.Printf("[DEBUG] getObject %s: code: %d, error: %v", cosFile, rsp.StatusCode, err) + if err != nil { + if rsp.StatusCode == 404 { + err = nil + } else { + err = fmt.Errorf("failed to open file at %v: %w", cosFile, err) + } + return + } + + checksum = rsp.Header.Get("X-Cos-Meta-Md5") + log.Printf("[DEBUG] getObject %s: checksum: %s", cosFile, checksum) + if len(checksum) != 32 { + err = fmt.Errorf("failed to open file at %v: checksum %s invalid", cosFile, checksum) + return + } + + exists = true + data, err = io.ReadAll(rsp.Body) + log.Printf("[DEBUG] getObject %s: data length: %d", cosFile, len(data)) + if err != nil { + err = fmt.Errorf("failed to open file at %v: %w", cosFile, err) + return + } + + check := fmt.Sprintf("%x", md5.Sum(data)) + log.Printf("[DEBUG] getObject %s: check: %s", cosFile, check) + if check != checksum { + err = fmt.Errorf("failed to open file at %v: checksum mismatch, %s != %s", cosFile, check, checksum) + return + } + + return +} + +// putObject put object to remote +func (c *remoteClient) putObject(cosFile string, data []byte) error { + opt := &cos.ObjectPutOptions{ + ObjectPutHeaderOptions: &cos.ObjectPutHeaderOptions{ + XCosMetaXXX: &http.Header{ + "X-Cos-Meta-Md5": []string{fmt.Sprintf("%x", md5.Sum(data))}, + }, + }, + ACLHeaderOptions: &cos.ACLHeaderOptions{ + XCosACL: c.acl, + }, + } + + if c.encrypt { + opt.ObjectPutHeaderOptions.XCosServerSideEncryption = "AES256" + } + + r := bytes.NewReader(data) + rsp, err := c.cosClient.Object.Put(c.cosContext, cosFile, r, opt) + if rsp == nil { + log.Printf("[DEBUG] putObject %s: error: %v", cosFile, err) + return fmt.Errorf("failed to save file to %v: %w", cosFile, err) + } + defer rsp.Body.Close() + + log.Printf("[DEBUG] putObject %s: code: %d, error: %v", cosFile, rsp.StatusCode, err) + if err != nil { + return fmt.Errorf("failed to save file to %v: %w", cosFile, err) + } + + return nil +} + +// deleteObject delete remote object +func (c *remoteClient) deleteObject(cosFile string) error { + rsp, err := c.cosClient.Object.Delete(c.cosContext, cosFile) + if rsp == nil { + log.Printf("[DEBUG] deleteObject %s: error: %v", cosFile, err) + return fmt.Errorf("failed to delete file %v: %w", cosFile, err) + } + defer rsp.Body.Close() + + log.Printf("[DEBUG] deleteObject %s: code: %d, error: %v", cosFile, rsp.StatusCode, err) + if rsp.StatusCode == 404 { + return nil + } + + if err != nil { + return fmt.Errorf("failed to delete file %v: %w", cosFile, err) + } + + return nil +} + +// getBucket list bucket by prefix +func (c *remoteClient) getBucket(prefix string) (obs []cos.Object, err error) { + fs, rsp, err := c.cosClient.Bucket.Get(c.cosContext, &cos.BucketGetOptions{Prefix: prefix}) + if rsp == nil { + log.Printf("[DEBUG] getBucket %s/%s: error: %v", c.bucket, prefix, err) + err = fmt.Errorf("bucket %s not exists", c.bucket) + return + } + defer rsp.Body.Close() + + log.Printf("[DEBUG] getBucket %s/%s: code: %d, error: %v", c.bucket, prefix, rsp.StatusCode, err) + if rsp.StatusCode == 404 { + err = fmt.Errorf("bucket %s not exists", c.bucket) + return + } + + if err != nil { + return + } + + return fs.Contents, nil +} + +// putBucket create cos bucket +func (c *remoteClient) putBucket() error { + rsp, err := c.cosClient.Bucket.Put(c.cosContext, nil) + if rsp == nil { + log.Printf("[DEBUG] putBucket %s: error: %v", c.bucket, err) + return fmt.Errorf("failed to create bucket %v: %w", c.bucket, err) + } + defer rsp.Body.Close() + + log.Printf("[DEBUG] putBucket %s: code: %d, error: %v", c.bucket, rsp.StatusCode, err) + if rsp.StatusCode == 409 { + return nil + } + + if err != nil { + return fmt.Errorf("failed to create bucket %v: %w", c.bucket, err) + } + + return nil +} + +// deleteBucket delete cos bucket +func (c *remoteClient) deleteBucket(recursive bool) error { + if recursive { + obs, err := c.getBucket("") + if err != nil { + if strings.Contains(err.Error(), "not exists") { + return nil + } + log.Printf("[DEBUG] deleteBucket %s: empty bucket error: %v", c.bucket, err) + return fmt.Errorf("failed to empty bucket %v: %w", c.bucket, err) + } + for _, v := range obs { + c.deleteObject(v.Key) + } + } + + rsp, err := c.cosClient.Bucket.Delete(c.cosContext) + if rsp == nil { + log.Printf("[DEBUG] deleteBucket %s: error: %v", c.bucket, err) + return fmt.Errorf("failed to delete bucket %v: %w", c.bucket, err) + } + defer rsp.Body.Close() + + log.Printf("[DEBUG] deleteBucket %s: code: %d, error: %v", c.bucket, rsp.StatusCode, err) + if rsp.StatusCode == 404 { + return nil + } + + if err != nil { + return fmt.Errorf("failed to delete bucket %v: %w", c.bucket, err) + } + + return nil +} + +// cosLock lock cos for writing +func (c *remoteClient) cosLock(bucket, cosFile string) error { + log.Printf("[DEBUG] lock cos file %s:%s", bucket, cosFile) + + cosPath := fmt.Sprintf("%s:%s", bucket, cosFile) + lockTagValue := fmt.Sprintf("%x", md5.Sum([]byte(cosPath))) + + return c.CreateTag(lockTagKey, lockTagValue) +} + +// cosUnlock unlock cos writing +func (c *remoteClient) cosUnlock(bucket, cosFile string) error { + log.Printf("[DEBUG] unlock cos file %s:%s", bucket, cosFile) + + cosPath := fmt.Sprintf("%s:%s", bucket, cosFile) + lockTagValue := fmt.Sprintf("%x", md5.Sum([]byte(cosPath))) + + var err error + for i := 0; i < 30; i++ { + tagExists, err := c.CheckTag(lockTagKey, lockTagValue) + + if err != nil { + return err + } + + if !tagExists { + return nil + } + + err = c.DeleteTag(lockTagKey, lockTagValue) + if err == nil { + return nil + } + time.Sleep(1 * time.Second) + } + + return err +} + +// CheckTag checks if tag key:value exists +func (c *remoteClient) CheckTag(key, value string) (exists bool, err error) { + request := tag.NewDescribeTagsRequest() + request.TagKey = &key + request.TagValue = &value + + response, err := c.tagClient.DescribeTags(request) + log.Printf("[DEBUG] create tag %s:%s: error: %v", key, value, err) + if err != nil { + return + } + + if len(response.Response.Tags) == 0 { + return + } + + tagKey := response.Response.Tags[0].TagKey + tagValue := response.Response.Tags[0].TagValue + + exists = key == *tagKey && value == *tagValue + + return +} + +// CreateTag create tag by key and value +func (c *remoteClient) CreateTag(key, value string) error { + request := tag.NewCreateTagRequest() + request.TagKey = &key + request.TagValue = &value + + _, err := c.tagClient.CreateTag(request) + log.Printf("[DEBUG] create tag %s:%s: error: %v", key, value, err) + if err != nil { + return fmt.Errorf("failed to create tag: %s -> %s: %w", key, value, err) + } + + return nil +} + +// DeleteTag create tag by key and value +func (c *remoteClient) DeleteTag(key, value string) error { + request := tag.NewDeleteTagRequest() + request.TagKey = &key + request.TagValue = &value + + _, err := c.tagClient.DeleteTag(request) + log.Printf("[DEBUG] delete tag %s:%s: error: %v", key, value, err) + if err != nil { + return fmt.Errorf("failed to delete tag: %s -> %s: %w", key, value, err) + } + + return nil +} diff --git a/pkg/backend/remote-state/cos/transport.go b/pkg/backend/remote-state/cos/transport.go new file mode 100644 index 00000000000..ff99781734a --- /dev/null +++ b/pkg/backend/remote-state/cos/transport.go @@ -0,0 +1,117 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cos + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "log" + "net/http" + "os" + "time" +) + +const REQUEST_CLIENT = "TENCENTCLOUD_API_REQUEST_CLIENT" + +var ReqClient = "Terraform-latest" + +func SetReqClient(name string) { + if name == "" { + return + } + ReqClient = name +} + +type LogRoundTripper struct { +} + +func (me *LogRoundTripper) RoundTrip(request *http.Request) (response *http.Response, errRet error) { + + var inBytes, outBytes []byte + + var start = time.Now() + + defer func() { me.log(inBytes, outBytes, errRet, start) }() + + bodyReader, errRet := request.GetBody() + if errRet != nil { + return + } + var headName = "X-TC-Action" + + if envReqClient := os.Getenv(REQUEST_CLIENT); envReqClient != "" { + ReqClient = envReqClient + } + + request.Header.Set("X-TC-RequestClient", ReqClient) + inBytes = []byte(fmt.Sprintf("%s, request: ", request.Header[headName])) + requestBody, errRet := io.ReadAll(bodyReader) + if errRet != nil { + return + } + inBytes = append(inBytes, requestBody...) + + headName = "X-TC-Region" + appendMessage := []byte(fmt.Sprintf( + ", (host %+v, region:%+v)", + request.Header["Host"], + request.Header[headName], + )) + + inBytes = append(inBytes, appendMessage...) + + response, errRet = http.DefaultTransport.RoundTrip(request) + if errRet != nil { + return + } + outBytes, errRet = io.ReadAll(response.Body) + if errRet != nil { + return + } + response.Body = io.NopCloser(bytes.NewBuffer(outBytes)) + return +} + +func (me *LogRoundTripper) log(in []byte, out []byte, err error, start time.Time) { + var buf bytes.Buffer + buf.WriteString("######") + tag := "[DEBUG]" + if err != nil { + tag = "[CRITICAL]" + } + buf.WriteString(tag) + if len(in) > 0 { + buf.WriteString("tencentcloud-sdk-go: ") + buf.Write(in) + } + if len(out) > 0 { + buf.WriteString("; response:") + err := json.Compact(&buf, out) + if err != nil { + out := bytes.Replace(out, + []byte("\n"), + []byte(""), + -1) + out = bytes.Replace(out, + []byte(" "), + []byte(""), + -1) + buf.Write(out) + } + } + + if err != nil { + buf.WriteString("; error:") + buf.WriteString(err.Error()) + } + + costFormat := fmt.Sprintf(",cost %s", time.Since(start).String()) + buf.WriteString(costFormat) + + log.Println(buf.String()) +} diff --git a/pkg/backend/remote-state/gcs/backend.go b/pkg/backend/remote-state/gcs/backend.go new file mode 100644 index 00000000000..3372d77caf6 --- /dev/null +++ b/pkg/backend/remote-state/gcs/backend.go @@ -0,0 +1,257 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package gcs implements remote storage of state on Google Cloud Storage (GCS). +package gcs + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "os" + "strings" + + "cloud.google.com/go/storage" + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/httpclient" + "github.com/kubegems/opentofu/pkg/legacy/helper/schema" + "github.com/kubegems/opentofu/version" + "golang.org/x/oauth2" + "google.golang.org/api/impersonate" + "google.golang.org/api/option" +) + +// Backend implements "backend".Backend for GCS. +// Input(), Validate() and Configure() are implemented by embedding *schema.Backend. +// State(), DeleteState() and States() are implemented explicitly. +type Backend struct { + *schema.Backend + encryption encryption.StateEncryption + + storageClient *storage.Client + storageContext context.Context + + bucketName string + prefix string + + encryptionKey []byte + kmsKeyName string +} + +func New(enc encryption.StateEncryption) backend.Backend { + b := &Backend{encryption: enc} + b.Backend = &schema.Backend{ + ConfigureFunc: b.configure, + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Required: true, + Description: "The name of the Google Cloud Storage bucket", + }, + + "prefix": { + Type: schema.TypeString, + Optional: true, + Description: "The directory where state files will be saved inside the bucket", + }, + + "credentials": { + Type: schema.TypeString, + Optional: true, + Description: "Google Cloud JSON Account Key", + Default: "", + }, + + "access_token": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "GOOGLE_OAUTH_ACCESS_TOKEN", + }, nil), + Description: "An OAuth2 token used for GCP authentication", + }, + + "impersonate_service_account": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "GOOGLE_BACKEND_IMPERSONATE_SERVICE_ACCOUNT", + "GOOGLE_IMPERSONATE_SERVICE_ACCOUNT", + }, nil), + Description: "The service account to impersonate for all Google API Calls", + }, + + "impersonate_service_account_delegates": { + Type: schema.TypeList, + Optional: true, + Description: "The delegation chain for the impersonated service account", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "encryption_key": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "GOOGLE_ENCRYPTION_KEY", + }, nil), + Description: "A 32 byte base64 encoded 'customer supplied encryption key' used when reading and writing state files in the bucket.", + ConflictsWith: []string{"kms_encryption_key"}, + }, + + "kms_encryption_key": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "GOOGLE_KMS_ENCRYPTION_KEY", + }, nil), + Description: "A Cloud KMS key ('customer managed encryption key') used when reading and writing state files in the bucket. Format should be 'projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}/cryptoKeys/{{name}}'.", + ConflictsWith: []string{"encryption_key"}, + }, + + "storage_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "GOOGLE_BACKEND_STORAGE_CUSTOM_ENDPOINT", + "GOOGLE_STORAGE_CUSTOM_ENDPOINT", + }, nil), + }, + }, + } + + return b +} + +func (b *Backend) configure(ctx context.Context) error { + if b.storageClient != nil { + return nil + } + + // ctx is a background context with the backend config added. + // Since no context is passed to remoteClient.Get(), .Lock(), etc. but + // one is required for calling the GCP API, we're holding on to this + // context here and re-use it later. + b.storageContext = ctx + + data := schema.FromContextBackendConfig(b.storageContext) + + b.bucketName = data.Get("bucket").(string) + b.prefix = strings.TrimLeft(data.Get("prefix").(string), "/") + if b.prefix != "" && !strings.HasSuffix(b.prefix, "/") { + b.prefix = b.prefix + "/" + } + + var opts []option.ClientOption + var credOptions []option.ClientOption + + // Add credential source + var creds string + var tokenSource oauth2.TokenSource + + if v, ok := data.GetOk("access_token"); ok { + tokenSource = oauth2.StaticTokenSource(&oauth2.Token{ + AccessToken: v.(string), + }) + } else if v, ok := data.GetOk("credentials"); ok { + creds = v.(string) + } else if v := os.Getenv("GOOGLE_BACKEND_CREDENTIALS"); v != "" { + creds = v + } else { + creds = os.Getenv("GOOGLE_CREDENTIALS") + } + + if tokenSource != nil { + credOptions = append(credOptions, option.WithTokenSource(tokenSource)) + } else if creds != "" { + + // to mirror how the provider works, we accept the file path or the contents + contents, err := backend.ReadPathOrContents(creds) + if err != nil { + return fmt.Errorf("Error loading credentials: %w", err) + } + + if !json.Valid([]byte(contents)) { + return fmt.Errorf("the string provided in credentials is neither valid json nor a valid file path") + } + + credOptions = append(credOptions, option.WithCredentialsJSON([]byte(contents))) + } + + // Service Account Impersonation + if v, ok := data.GetOk("impersonate_service_account"); ok { + ServiceAccount := v.(string) + var delegates []string + + if v, ok := data.GetOk("impersonate_service_account_delegates"); ok { + d := v.([]interface{}) + if len(delegates) > 0 { + delegates = make([]string, 0, len(d)) + } + for _, delegate := range d { + delegates = append(delegates, delegate.(string)) + } + } + + ts, err := impersonate.CredentialsTokenSource(ctx, impersonate.CredentialsConfig{ + TargetPrincipal: ServiceAccount, + Scopes: []string{storage.ScopeReadWrite}, + Delegates: delegates, + }, credOptions...) + + if err != nil { + return err + } + + opts = append(opts, option.WithTokenSource(ts)) + + } else { + opts = append(opts, credOptions...) + } + + opts = append(opts, option.WithUserAgent(httpclient.OpenTofuUserAgent(version.Version))) + + // Custom endpoint for storage API + if storageEndpoint, ok := data.GetOk("storage_custom_endpoint"); ok { + endpoint := option.WithEndpoint(storageEndpoint.(string)) + opts = append(opts, endpoint) + } + client, err := storage.NewClient(b.storageContext, opts...) + if err != nil { + return fmt.Errorf("storage.NewClient() failed: %w", err) + } + + b.storageClient = client + + // Customer-supplied encryption + key := data.Get("encryption_key").(string) + if key != "" { + kc, err := backend.ReadPathOrContents(key) + if err != nil { + return fmt.Errorf("Error loading encryption key: %w", err) + } + + // The GCS client expects a customer supplied encryption key to be + // passed in as a 32 byte long byte slice. The byte slice is base64 + // encoded before being passed to the API. We take a base64 encoded key + // to remain consistent with the GCS docs. + // https://cloud.google.com/storage/docs/encryption#customer-supplied + // https://github.com/GoogleCloudPlatform/google-cloud-go/blob/def681/storage/storage.go#L1181 + k, err := base64.StdEncoding.DecodeString(kc) + if err != nil { + return fmt.Errorf("Error decoding encryption key: %w", err) + } + b.encryptionKey = k + } + + // Customer-managed encryption + kmsName := data.Get("kms_encryption_key").(string) + if kmsName != "" { + b.kmsKeyName = kmsName + } + + return nil +} diff --git a/pkg/backend/remote-state/gcs/backend_state.go b/pkg/backend/remote-state/gcs/backend_state.go new file mode 100644 index 00000000000..ef2fb93a239 --- /dev/null +++ b/pkg/backend/remote-state/gcs/backend_state.go @@ -0,0 +1,160 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package gcs + +import ( + "fmt" + "path" + "sort" + "strings" + + "cloud.google.com/go/storage" + "google.golang.org/api/iterator" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/remote" + "github.com/kubegems/opentofu/pkg/states/statemgr" +) + +const ( + stateFileSuffix = ".tfstate" + lockFileSuffix = ".tflock" +) + +// Workspaces returns a list of names for the workspaces found on GCS. The default +// state is always returned as the first element in the slice. +func (b *Backend) Workspaces() ([]string, error) { + states := []string{backend.DefaultStateName} + + bucket := b.storageClient.Bucket(b.bucketName) + objs := bucket.Objects(b.storageContext, &storage.Query{ + Delimiter: "/", + Prefix: b.prefix, + }) + for { + attrs, err := objs.Next() + if err == iterator.Done { + break + } + if err != nil { + return nil, fmt.Errorf("querying Cloud Storage failed: %w", err) + } + + name := path.Base(attrs.Name) + if !strings.HasSuffix(name, stateFileSuffix) { + continue + } + st := strings.TrimSuffix(name, stateFileSuffix) + + if st != backend.DefaultStateName { + states = append(states, st) + } + } + + sort.Strings(states[1:]) + return states, nil +} + +// DeleteWorkspace deletes the named workspaces. The "default" state cannot be deleted. +func (b *Backend) DeleteWorkspace(name string, _ bool) error { + if name == backend.DefaultStateName { + return fmt.Errorf("cowardly refusing to delete the %q state", name) + } + + c, err := b.client(name) + if err != nil { + return err + } + + return c.Delete() +} + +// client returns a remoteClient for the named state. +func (b *Backend) client(name string) (*remoteClient, error) { + if name == "" { + return nil, fmt.Errorf("%q is not a valid state name", name) + } + + return &remoteClient{ + storageContext: b.storageContext, + storageClient: b.storageClient, + bucketName: b.bucketName, + stateFilePath: b.stateFile(name), + lockFilePath: b.lockFile(name), + encryptionKey: b.encryptionKey, + kmsKeyName: b.kmsKeyName, + }, nil +} + +// StateMgr reads and returns the named state from GCS. If the named state does +// not yet exist, a new state file is created. +func (b *Backend) StateMgr(name string) (statemgr.Full, error) { + c, err := b.client(name) + if err != nil { + return nil, err + } + + st := remote.NewState(c, b.encryption) + + // Grab the value + if err := st.RefreshState(); err != nil { + return nil, err + } + + // If we have no state, we have to create an empty state + if v := st.State(); v == nil { + + lockInfo := statemgr.NewLockInfo() + lockInfo.Operation = "init" + lockID, err := st.Lock(lockInfo) + if err != nil { + return nil, err + } + + // Local helper function so we can call it multiple places + unlock := func(baseErr error) error { + if err := st.Unlock(lockID); err != nil { + const unlockErrMsg = `%v + Additionally, unlocking the state file on Google Cloud Storage failed: + + Error message: %q + Lock ID (gen): %v + Lock file URL: %v + + You may have to force-unlock this state in order to use it again. + The GCloud backend acquires a lock during initialization to ensure + the initial state file is created.` + return fmt.Errorf(unlockErrMsg, baseErr, err.Error(), lockID, c.lockFileURL()) + } + + return baseErr + } + + if err := st.WriteState(states.NewState()); err != nil { + return nil, unlock(err) + } + if err := st.PersistState(nil); err != nil { + return nil, unlock(err) + } + + // Unlock, the state should now be initialized + if err := unlock(nil); err != nil { + return nil, err + } + + } + + return st, nil +} + +func (b *Backend) stateFile(name string) string { + return path.Join(b.prefix, name+stateFileSuffix) +} + +func (b *Backend) lockFile(name string) string { + return path.Join(b.prefix, name+lockFileSuffix) +} diff --git a/pkg/backend/remote-state/gcs/backend_test.go b/pkg/backend/remote-state/gcs/backend_test.go new file mode 100644 index 00000000000..b54f7e4d18b --- /dev/null +++ b/pkg/backend/remote-state/gcs/backend_test.go @@ -0,0 +1,448 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package gcs + +import ( + "context" + "encoding/json" + "fmt" + "log" + "os" + "strings" + "testing" + "time" + + kms "cloud.google.com/go/kms/apiv1" + "cloud.google.com/go/storage" + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/httpclient" + "github.com/kubegems/opentofu/pkg/states/remote" + "github.com/kubegems/opentofu/version" + "google.golang.org/api/option" + kmspb "google.golang.org/genproto/googleapis/cloud/kms/v1" +) + +const ( + noPrefix = "" + noEncryptionKey = "" + noKmsKeyName = "" +) + +// See https://cloud.google.com/storage/docs/using-encryption-keys#generating_your_own_encryption_key +const encryptionKey = "yRyCOikXi1ZDNE0xN3yiFsJjg7LGimoLrGFcLZgQoVk=" + +// KMS key ring name and key name are hardcoded here and re-used because key rings (and keys) cannot be deleted +// Test code asserts their presence and creates them if they're absent. They're not deleted at the end of tests. +// See: https://cloud.google.com/kms/docs/faq#cannot_delete +const ( + keyRingName = "tf-gcs-backend-acc-tests" + keyName = "tf-test-key-1" + kmsRole = "roles/cloudkms.cryptoKeyEncrypterDecrypter" // GCS service account needs this binding on the created key +) + +var keyRingLocation = os.Getenv("GOOGLE_REGION") + +func TestStateFile(t *testing.T) { + t.Parallel() + + cases := []struct { + prefix string + name string + wantStateFile string + wantLockFile string + }{ + {"state", "default", "state/default.tfstate", "state/default.tflock"}, + {"state", "test", "state/test.tfstate", "state/test.tflock"}, + {"state", "test", "state/test.tfstate", "state/test.tflock"}, + {"state", "test", "state/test.tfstate", "state/test.tflock"}, + } + for _, c := range cases { + b := &Backend{ + prefix: c.prefix, + } + + if got := b.stateFile(c.name); got != c.wantStateFile { + t.Errorf("stateFile(%q) = %q, want %q", c.name, got, c.wantStateFile) + } + + if got := b.lockFile(c.name); got != c.wantLockFile { + t.Errorf("lockFile(%q) = %q, want %q", c.name, got, c.wantLockFile) + } + } +} + +func TestRemoteClient(t *testing.T) { + t.Parallel() + + bucket := bucketName(t) + be := setupBackend(t, bucket, noPrefix, noEncryptionKey, noKmsKeyName) + defer teardownBackend(t, be, noPrefix) + + ss, err := be.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("be.StateMgr(%q) = %v", backend.DefaultStateName, err) + } + + rs, ok := ss.(*remote.State) + if !ok { + t.Fatalf("be.StateMgr(): got a %T, want a *remote.State", ss) + } + + remote.TestClient(t, rs.Client) +} +func TestRemoteClientWithEncryption(t *testing.T) { + t.Parallel() + + bucket := bucketName(t) + be := setupBackend(t, bucket, noPrefix, encryptionKey, noKmsKeyName) + defer teardownBackend(t, be, noPrefix) + + ss, err := be.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("be.StateMgr(%q) = %v", backend.DefaultStateName, err) + } + + rs, ok := ss.(*remote.State) + if !ok { + t.Fatalf("be.StateMgr(): got a %T, want a *remote.State", ss) + } + + remote.TestClient(t, rs.Client) +} + +func TestRemoteLocks(t *testing.T) { + t.Parallel() + + bucket := bucketName(t) + be := setupBackend(t, bucket, noPrefix, noEncryptionKey, noKmsKeyName) + defer teardownBackend(t, be, noPrefix) + + remoteClient := func() (remote.Client, error) { + ss, err := be.StateMgr(backend.DefaultStateName) + if err != nil { + return nil, err + } + + rs, ok := ss.(*remote.State) + if !ok { + return nil, fmt.Errorf("be.StateMgr(): got a %T, want a *remote.State", ss) + } + + return rs.Client, nil + } + + c0, err := remoteClient() + if err != nil { + t.Fatalf("remoteClient(0) = %v", err) + } + c1, err := remoteClient() + if err != nil { + t.Fatalf("remoteClient(1) = %v", err) + } + + remote.TestRemoteLocks(t, c0, c1) +} + +func TestBackend(t *testing.T) { + t.Parallel() + + bucket := bucketName(t) + + be0 := setupBackend(t, bucket, noPrefix, noEncryptionKey, noKmsKeyName) + defer teardownBackend(t, be0, noPrefix) + + be1 := setupBackend(t, bucket, noPrefix, noEncryptionKey, noKmsKeyName) + + backend.TestBackendStates(t, be0) + backend.TestBackendStateLocks(t, be0, be1) + backend.TestBackendStateForceUnlock(t, be0, be1) +} + +func TestBackendWithPrefix(t *testing.T) { + t.Parallel() + + prefix := "test/prefix" + bucket := bucketName(t) + + be0 := setupBackend(t, bucket, prefix, noEncryptionKey, noKmsKeyName) + defer teardownBackend(t, be0, prefix) + + be1 := setupBackend(t, bucket, prefix+"/", noEncryptionKey, noKmsKeyName) + + backend.TestBackendStates(t, be0) + backend.TestBackendStateLocks(t, be0, be1) +} +func TestBackendWithCustomerSuppliedEncryption(t *testing.T) { + t.Parallel() + + bucket := bucketName(t) + + be0 := setupBackend(t, bucket, noPrefix, encryptionKey, noKmsKeyName) + defer teardownBackend(t, be0, noPrefix) + + be1 := setupBackend(t, bucket, noPrefix, encryptionKey, noKmsKeyName) + + backend.TestBackendStates(t, be0) + backend.TestBackendStateLocks(t, be0, be1) +} + +func TestBackendWithCustomerManagedKMSEncryption(t *testing.T) { + t.Parallel() + + projectID := os.Getenv("GOOGLE_PROJECT") + bucket := bucketName(t) + + // Taken from global variables in test file + kmsDetails := map[string]string{ + "project": projectID, + "location": keyRingLocation, + "ringName": keyRingName, + "keyName": keyName, + } + + kmsName := setupKmsKey(t, kmsDetails) + + be0 := setupBackend(t, bucket, noPrefix, noEncryptionKey, kmsName) + defer teardownBackend(t, be0, noPrefix) + + be1 := setupBackend(t, bucket, noPrefix, noEncryptionKey, kmsName) + + backend.TestBackendStates(t, be0) + backend.TestBackendStateLocks(t, be0, be1) +} + +// setupBackend returns a new GCS backend. +func setupBackend(t *testing.T, bucket, prefix, key, kmsName string) backend.Backend { + t.Helper() + + projectID := os.Getenv("GOOGLE_PROJECT") + if projectID == "" || os.Getenv("TF_ACC") == "" { + t.Skip("This test creates a bucket in GCS and populates it. " + + "Since this may incur costs, it will only run if " + + "the TF_ACC and GOOGLE_PROJECT environment variables are set.") + } + + config := map[string]interface{}{ + "bucket": bucket, + "prefix": prefix, + } + // Only add encryption keys to config if non-zero value set + // If not set here, default values are supplied in `TestBackendConfig` by `PrepareConfig` function call + if len(key) > 0 { + config["encryption_key"] = key + } + if len(kmsName) > 0 { + config["kms_encryption_key"] = kmsName + } + + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(config)) + be := b.(*Backend) + + // create the bucket if it doesn't exist + bkt := be.storageClient.Bucket(bucket) + _, err := bkt.Attrs(be.storageContext) + if err != nil { + if err != storage.ErrBucketNotExist { + t.Fatal(err) + } + + attrs := &storage.BucketAttrs{ + Location: os.Getenv("GOOGLE_REGION"), + } + err := bkt.Create(be.storageContext, projectID, attrs) + if err != nil { + t.Fatal(err) + } + } + + return b +} + +// setupKmsKey asserts that a KMS key chain and key exist and necessary IAM bindings are in place +// If the key ring or key do not exist they are created and permissions are given to the GCS Service account +func setupKmsKey(t *testing.T, keyDetails map[string]string) string { + t.Helper() + + projectID := os.Getenv("GOOGLE_PROJECT") + if projectID == "" || os.Getenv("TF_ACC") == "" { + t.Skip("This test creates a KMS key ring and key in Cloud KMS. " + + "Since this may incur costs, it will only run if " + + "the TF_ACC and GOOGLE_PROJECT environment variables are set.") + } + + // KMS Client + ctx := context.Background() + opts, err := testGetClientOptions(t) + if err != nil { + e := fmt.Errorf("testGetClientOptions() failed: %w", err) + t.Fatal(e) + } + c, err := kms.NewKeyManagementClient(ctx, opts...) + if err != nil { + e := fmt.Errorf("kms.NewKeyManagementClient() failed: %w", err) + t.Fatal(e) + } + defer c.Close() + + // Get KMS key ring, create if doesn't exist + reqGetKeyRing := &kmspb.GetKeyRingRequest{ + Name: fmt.Sprintf("projects/%s/locations/%s/keyRings/%s", keyDetails["project"], keyDetails["location"], keyDetails["ringName"]), + } + var keyRing *kmspb.KeyRing + keyRing, err = c.GetKeyRing(ctx, reqGetKeyRing) + if err != nil { + if !strings.Contains(err.Error(), "NotFound") { + // Handle unexpected error that isn't related to the key ring not being made yet + t.Fatal(err) + } + // Create key ring that doesn't exist + t.Logf("Cloud KMS key ring `%s` not found: creating key ring", + fmt.Sprintf("projects/%s/locations/%s/keyRings/%s", keyDetails["project"], keyDetails["location"], keyDetails["ringName"]), + ) + reqCreateKeyRing := &kmspb.CreateKeyRingRequest{ + Parent: fmt.Sprintf("projects/%s/locations/%s", keyDetails["project"], keyDetails["location"]), + KeyRingId: keyDetails["ringName"], + } + keyRing, err = c.CreateKeyRing(ctx, reqCreateKeyRing) + if err != nil { + t.Fatal(err) + } + t.Logf("Cloud KMS key ring `%s` created successfully", keyRing.Name) + } + + // Get KMS key, create if doesn't exist (and give GCS service account permission to use) + reqGetKey := &kmspb.GetCryptoKeyRequest{ + Name: fmt.Sprintf("%s/cryptoKeys/%s", keyRing.Name, keyDetails["keyName"]), + } + var key *kmspb.CryptoKey + key, err = c.GetCryptoKey(ctx, reqGetKey) + if err != nil { + if !strings.Contains(err.Error(), "NotFound") { + // Handle unexpected error that isn't related to the key not being made yet + t.Fatal(err) + } + // Create key that doesn't exist + t.Logf("Cloud KMS key `%s` not found: creating key", + fmt.Sprintf("%s/cryptoKeys/%s", keyRing.Name, keyDetails["keyName"]), + ) + reqCreateKey := &kmspb.CreateCryptoKeyRequest{ + Parent: keyRing.Name, + CryptoKeyId: keyDetails["keyName"], + CryptoKey: &kmspb.CryptoKey{ + Purpose: kmspb.CryptoKey_ENCRYPT_DECRYPT, + }, + } + key, err = c.CreateCryptoKey(ctx, reqCreateKey) + if err != nil { + t.Fatal(err) + } + t.Logf("Cloud KMS key `%s` created successfully", key.Name) + } + + // Get GCS Service account email, check has necessary permission on key + // Note: we cannot reuse the backend's storage client (like in the setupBackend function) + // because the KMS key needs to exist before the backend buckets are made in the test. + sc, err := storage.NewClient(ctx, opts...) //reuse opts from KMS client + if err != nil { + e := fmt.Errorf("storage.NewClient() failed: %w", err) + t.Fatal(e) + } + defer sc.Close() + gcsServiceAccount, err := sc.ServiceAccount(ctx, keyDetails["project"]) + if err != nil { + t.Fatal(err) + } + + // Assert Cloud Storage service account has permission to use this key. + member := fmt.Sprintf("serviceAccount:%s", gcsServiceAccount) + iamHandle := c.ResourceIAM(key.Name) + policy, err := iamHandle.Policy(ctx) + if err != nil { + t.Fatal(err) + } + if ok := policy.HasRole(member, kmsRole); !ok { + // Add the missing permissions + t.Logf("Granting GCS service account %s %s role on key %s", gcsServiceAccount, kmsRole, key.Name) + policy.Add(member, kmsRole) + err = iamHandle.SetPolicy(ctx, policy) + if err != nil { + t.Fatal(err) + } + } + return key.Name +} + +// teardownBackend deletes all states from be except the default state. +func teardownBackend(t *testing.T, be backend.Backend, prefix string) { + t.Helper() + gcsBE, ok := be.(*Backend) + if !ok { + t.Fatalf("be is a %T, want a *gcsBackend", be) + } + ctx := gcsBE.storageContext + + bucket := gcsBE.storageClient.Bucket(gcsBE.bucketName) + objs := bucket.Objects(ctx, nil) + + for o, err := objs.Next(); err == nil; o, err = objs.Next() { + if err := bucket.Object(o.Name).Delete(ctx); err != nil { + log.Printf("Error trying to delete object: %s %s\n\n", o.Name, err) + } else { + log.Printf("Object deleted: %s", o.Name) + } + } + + // Delete the bucket itself. + if err := bucket.Delete(ctx); err != nil { + t.Errorf("deleting bucket %q failed, manual cleanup may be required: %v", gcsBE.bucketName, err) + } +} + +// bucketName returns a valid bucket name for this test. +func bucketName(t *testing.T) string { + name := fmt.Sprintf("tf-%x-%s", time.Now().UnixNano(), t.Name()) + + // Bucket names must contain 3 to 63 characters. + if len(name) > 63 { + name = name[:63] + } + + return strings.ToLower(name) +} + +// getClientOptions returns the []option.ClientOption needed to configure Google API clients +// that are required in acceptance tests but are not part of the gcs backend itself +func testGetClientOptions(t *testing.T) ([]option.ClientOption, error) { + t.Helper() + + var creds string + if v := os.Getenv("GOOGLE_BACKEND_CREDENTIALS"); v != "" { + creds = v + } else { + creds = os.Getenv("GOOGLE_CREDENTIALS") + } + if creds == "" { + t.Skip("This test required credentials to be supplied via" + + "the GOOGLE_CREDENTIALS or GOOGLE_BACKEND_CREDENTIALS environment variables.") + } + + var opts []option.ClientOption + var credOptions []option.ClientOption + + contents, err := backend.ReadPathOrContents(creds) + if err != nil { + return nil, fmt.Errorf("error loading credentials: %w", err) + } + if !json.Valid([]byte(contents)) { + return nil, fmt.Errorf("the string provided in credentials is neither valid json nor a valid file path") + } + credOptions = append(credOptions, option.WithCredentialsJSON([]byte(contents))) + opts = append(opts, credOptions...) + opts = append(opts, option.WithUserAgent(httpclient.OpenTofuUserAgent(version.Version))) + + return opts, nil +} diff --git a/pkg/backend/remote-state/gcs/client.go b/pkg/backend/remote-state/gcs/client.go new file mode 100644 index 00000000000..8f4b1396284 --- /dev/null +++ b/pkg/backend/remote-state/gcs/client.go @@ -0,0 +1,200 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package gcs + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + + "cloud.google.com/go/storage" + multierror "github.com/hashicorp/go-multierror" + "github.com/kubegems/opentofu/pkg/states/remote" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "golang.org/x/net/context" +) + +// remoteClient is used by "state/remote".State to read and write +// blobs representing state. +// Implements "state/remote".ClientLocker +type remoteClient struct { + storageContext context.Context + storageClient *storage.Client + bucketName string + stateFilePath string + lockFilePath string + encryptionKey []byte + kmsKeyName string +} + +func (c *remoteClient) Get() (payload *remote.Payload, err error) { + stateFileReader, err := c.stateFile().NewReader(c.storageContext) + if err != nil { + if err == storage.ErrObjectNotExist { + return nil, nil + } else { + return nil, fmt.Errorf("Failed to open state file at %v: %w", c.stateFileURL(), err) + } + } + defer stateFileReader.Close() + + stateFileContents, err := io.ReadAll(stateFileReader) + if err != nil { + return nil, fmt.Errorf("Failed to read state file from %v: %w", c.stateFileURL(), err) + } + + stateFileAttrs, err := c.stateFile().Attrs(c.storageContext) + if err != nil { + return nil, fmt.Errorf("Failed to read state file attrs from %v: %w", c.stateFileURL(), err) + } + + result := &remote.Payload{ + Data: stateFileContents, + MD5: stateFileAttrs.MD5, + } + + return result, nil +} + +func (c *remoteClient) Put(data []byte) error { + err := func() error { + stateFileWriter := c.stateFile().NewWriter(c.storageContext) + if len(c.kmsKeyName) > 0 { + stateFileWriter.KMSKeyName = c.kmsKeyName + } + if _, err := stateFileWriter.Write(data); err != nil { + return err + } + return stateFileWriter.Close() + }() + if err != nil { + return fmt.Errorf("Failed to upload state to %v: %w", c.stateFileURL(), err) + } + + return nil +} + +func (c *remoteClient) Delete() error { + if err := c.stateFile().Delete(c.storageContext); err != nil { + return fmt.Errorf("Failed to delete state file %v: %w", c.stateFileURL(), err) + } + + return nil +} + +// Lock writes to a lock file, ensuring file creation. Returns the generation +// number, which must be passed to Unlock(). +func (c *remoteClient) Lock(info *statemgr.LockInfo) (string, error) { + // update the path we're using + // we can't set the ID until the info is written + info.Path = c.lockFileURL() + + infoJson, err := json.Marshal(info) + if err != nil { + return "", err + } + + lockFile := c.lockFile() + w := lockFile.If(storage.Conditions{DoesNotExist: true}).NewWriter(c.storageContext) + err = func() error { + if _, err := w.Write(infoJson); err != nil { + return err + } + return w.Close() + }() + + if err != nil { + return "", c.lockError(fmt.Errorf("writing %q failed: %w", c.lockFileURL(), err)) + } + + info.ID = strconv.FormatInt(w.Attrs().Generation, 10) + + return info.ID, nil +} + +func (c *remoteClient) Unlock(id string) error { + gen, err := strconv.ParseInt(id, 10, 64) + if err != nil { + return fmt.Errorf("Lock ID should be numerical value, got '%s'", id) + } + + if err := c.lockFile().If(storage.Conditions{GenerationMatch: gen}).Delete(c.storageContext); err != nil { + return c.lockError(err) + } + + return nil +} + +func (c *remoteClient) lockError(err error) *statemgr.LockError { + lockErr := &statemgr.LockError{ + Err: err, + } + + info, infoErr := c.lockInfo() + switch { + case errors.Is(infoErr, storage.ErrObjectNotExist): + // Race condition - file exists initially but then has been deleted by other process + lockErr.InconsistentRead = true + case infoErr != nil: + lockErr.Err = multierror.Append(lockErr.Err, infoErr) + default: + lockErr.Info = info + } + return lockErr +} + +// lockInfo reads the lock file, parses its contents and returns the parsed +// LockInfo struct. +func (c *remoteClient) lockInfo() (*statemgr.LockInfo, error) { + r, err := c.lockFile().NewReader(c.storageContext) + if err != nil { + return nil, err + } + defer r.Close() + + rawData, err := io.ReadAll(r) + if err != nil { + return nil, err + } + + info := &statemgr.LockInfo{} + if err := json.Unmarshal(rawData, info); err != nil { + return nil, err + } + + // We use the Generation as the ID, so overwrite the ID in the json. + // This can't be written into the Info, since the generation isn't known + // until it's written. + attrs, err := c.lockFile().Attrs(c.storageContext) + if err != nil { + return nil, err + } + info.ID = strconv.FormatInt(attrs.Generation, 10) + + return info, nil +} + +func (c *remoteClient) stateFile() *storage.ObjectHandle { + h := c.storageClient.Bucket(c.bucketName).Object(c.stateFilePath) + if len(c.encryptionKey) > 0 { + return h.Key(c.encryptionKey) + } + return h +} + +func (c *remoteClient) stateFileURL() string { + return fmt.Sprintf("gs://%v/%v", c.bucketName, c.stateFilePath) +} + +func (c *remoteClient) lockFile() *storage.ObjectHandle { + return c.storageClient.Bucket(c.bucketName).Object(c.lockFilePath) +} + +func (c *remoteClient) lockFileURL() string { + return fmt.Sprintf("gs://%v/%v", c.bucketName, c.lockFilePath) +} diff --git a/pkg/backend/remote-state/http/backend.go b/pkg/backend/remote-state/http/backend.go new file mode 100644 index 00000000000..d2a4ed1c8e3 --- /dev/null +++ b/pkg/backend/remote-state/http/backend.go @@ -0,0 +1,320 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "context" + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "log" + "net/http" + "net/url" + "regexp" + "strings" + "time" + + "github.com/hashicorp/go-retryablehttp" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/legacy/helper/schema" + "github.com/kubegems/opentofu/pkg/logging" + "github.com/kubegems/opentofu/pkg/states/remote" + "github.com/kubegems/opentofu/pkg/states/statemgr" +) + +func New(enc encryption.StateEncryption) backend.Backend { + s := &schema.Backend{ + Schema: map[string]*schema.Schema{ + "address": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.EnvDefaultFunc("TF_HTTP_ADDRESS", nil), + Description: "The address of the REST endpoint", + }, + "update_method": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("TF_HTTP_UPDATE_METHOD", "POST"), + Description: "HTTP method to use when updating state", + }, + "lock_address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("TF_HTTP_LOCK_ADDRESS", nil), + Description: "The address of the lock REST endpoint", + }, + "unlock_address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("TF_HTTP_UNLOCK_ADDRESS", nil), + Description: "The address of the unlock REST endpoint", + }, + "lock_method": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("TF_HTTP_LOCK_METHOD", "LOCK"), + Description: "The HTTP method to use when locking", + }, + "unlock_method": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("TF_HTTP_UNLOCK_METHOD", "UNLOCK"), + Description: "The HTTP method to use when unlocking", + }, + "username": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("TF_HTTP_USERNAME", nil), + Description: "The username for HTTP basic authentication", + }, + "password": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("TF_HTTP_PASSWORD", nil), + Description: "The password for HTTP basic authentication", + }, + "skip_cert_verification": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Whether to skip TLS verification.", + }, + "retry_max": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("TF_HTTP_RETRY_MAX", 2), + Description: "The number of HTTP request retries.", + }, + "retry_wait_min": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("TF_HTTP_RETRY_WAIT_MIN", 1), + Description: "The minimum time in seconds to wait between HTTP request attempts.", + }, + "retry_wait_max": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("TF_HTTP_RETRY_WAIT_MAX", 30), + Description: "The maximum time in seconds to wait between HTTP request attempts.", + }, + "client_ca_certificate_pem": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("TF_HTTP_CLIENT_CA_CERTIFICATE_PEM", ""), + Description: "A PEM-encoded CA certificate chain used by the client to verify server certificates during TLS authentication.", + }, + "client_certificate_pem": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("TF_HTTP_CLIENT_CERTIFICATE_PEM", ""), + Description: "A PEM-encoded certificate used by the server to verify the client during mutual TLS (mTLS) authentication.", + }, + "client_private_key_pem": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("TF_HTTP_CLIENT_PRIVATE_KEY_PEM", ""), + Description: "A PEM-encoded private key, required if client_certificate_pem is specified.", + }, + "headers": &schema.Schema{ + Type: schema.TypeMap, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + ValidateFunc: func(cv interface{}, ck string) ([]string, []error) { + nameRegex := regexp.MustCompile("[^a-zA-Z0-9-_]") + valueRegex := regexp.MustCompile("[^[:ascii:]]") + + headers := cv.(map[string]interface{}) + err := make([]error, 0, len(headers)) + for name, value := range headers { + if len(name) == 0 || nameRegex.MatchString(name) { + err = append(err, fmt.Errorf( + "%s \"%s\" name must not be empty and only contain A-Za-z0-9-_ characters", ck, name)) + } + + v := value.(string) + if len(strings.TrimSpace(v)) == 0 || valueRegex.MatchString(v) { + err = append(err, fmt.Errorf( + "%s \"%s\" value must not be empty and only contain ascii characters", ck, name)) + } + } + return nil, err + }, + Description: "A map of headers, when set will be included with HTTP requests sent to the HTTP backend", + }, + }, + } + + b := &Backend{Backend: s, encryption: enc} + b.Backend.ConfigureFunc = b.configure + return b +} + +type Backend struct { + *schema.Backend + encryption encryption.StateEncryption + + client *httpClient +} + +// configureTLS configures TLS when needed; if there are no conditions requiring TLS, no change is made. +func (b *Backend) configureTLS(client *retryablehttp.Client, data *schema.ResourceData) error { + // If there are no conditions needing to configure TLS, leave the client untouched + skipCertVerification := data.Get("skip_cert_verification").(bool) + clientCACertificatePem := data.Get("client_ca_certificate_pem").(string) + clientCertificatePem := data.Get("client_certificate_pem").(string) + clientPrivateKeyPem := data.Get("client_private_key_pem").(string) + if !skipCertVerification && clientCACertificatePem == "" && clientCertificatePem == "" && clientPrivateKeyPem == "" { + return nil + } + if clientCertificatePem != "" && clientPrivateKeyPem == "" { + return fmt.Errorf("client_certificate_pem is set but client_private_key_pem is not") + } + if clientPrivateKeyPem != "" && clientCertificatePem == "" { + return fmt.Errorf("client_private_key_pem is set but client_certificate_pem is not") + } + + // TLS configuration is needed; create an object and configure it + var tlsConfig tls.Config + client.HTTPClient.Transport.(*http.Transport).TLSClientConfig = &tlsConfig + + if skipCertVerification { + // ignores TLS verification + tlsConfig.InsecureSkipVerify = true + } + if clientCACertificatePem != "" { + // trust servers based on a CA + tlsConfig.RootCAs = x509.NewCertPool() + if !tlsConfig.RootCAs.AppendCertsFromPEM([]byte(clientCACertificatePem)) { + return errors.New("failed to append certs") + } + } + if clientCertificatePem != "" && clientPrivateKeyPem != "" { + // attach a client certificate to the TLS handshake (aka mTLS) + certificate, err := tls.X509KeyPair([]byte(clientCertificatePem), []byte(clientPrivateKeyPem)) + if err != nil { + return fmt.Errorf("cannot load client certificate: %w", err) + } + tlsConfig.Certificates = []tls.Certificate{certificate} + } + + return nil +} + +func (b *Backend) configure(ctx context.Context) error { + data := schema.FromContextBackendConfig(ctx) + + address := data.Get("address").(string) + updateURL, err := url.Parse(address) + if err != nil { + return fmt.Errorf("failed to parse address URL: %w", err) + } + if updateURL.Scheme != "http" && updateURL.Scheme != "https" { + return fmt.Errorf("address must be HTTP or HTTPS") + } + + updateMethod := data.Get("update_method").(string) + + var lockURL *url.URL + if v, ok := data.GetOk("lock_address"); ok && v.(string) != "" { + var err error + lockURL, err = url.Parse(v.(string)) + if err != nil { + return fmt.Errorf("failed to parse lockAddress URL: %w", err) + } + if lockURL.Scheme != "http" && lockURL.Scheme != "https" { + return fmt.Errorf("lockAddress must be HTTP or HTTPS") + } + } + + lockMethod := data.Get("lock_method").(string) + + var unlockURL *url.URL + if v, ok := data.GetOk("unlock_address"); ok && v.(string) != "" { + var err error + unlockURL, err = url.Parse(v.(string)) + if err != nil { + return fmt.Errorf("failed to parse unlockAddress URL: %w", err) + } + if unlockURL.Scheme != "http" && unlockURL.Scheme != "https" { + return fmt.Errorf("unlockAddress must be HTTP or HTTPS") + } + } + + unlockMethod := data.Get("unlock_method").(string) + + username := data.Get("username").(string) + password := data.Get("password").(string) + + var headers map[string]string + if dv, ok := data.GetOk("headers"); ok { + dh := dv.(map[string]interface{}) + headers = make(map[string]string, len(dh)) + + for k, v := range dh { + value, ok := v.(string) + if !ok { + return fmt.Errorf("header value for %s must be a string", k) + } + switch strings.ToLower(k) { + case "authorization": + if username != "" { + return fmt.Errorf("headers \"%s\" cannot be set when providing username", k) + } + headers[k] = value + case "content-type", "content-md5": + return fmt.Errorf("headers \"%s\" is reserved", k) + default: + headers[k] = value + } + } + } + + rClient := retryablehttp.NewClient() + rClient.RetryMax = data.Get("retry_max").(int) + rClient.RetryWaitMin = time.Duration(data.Get("retry_wait_min").(int)) * time.Second + rClient.RetryWaitMax = time.Duration(data.Get("retry_wait_max").(int)) * time.Second + rClient.Logger = log.New(logging.LogOutput(), "", log.Flags()) + if err = b.configureTLS(rClient, data); err != nil { + return err + } + + b.client = &httpClient{ + URL: updateURL, + UpdateMethod: updateMethod, + + LockURL: lockURL, + LockMethod: lockMethod, + UnlockURL: unlockURL, + UnlockMethod: unlockMethod, + + Headers: headers, + Username: username, + Password: password, + + // accessible only for testing use + Client: rClient, + } + return nil +} + +func (b *Backend) StateMgr(name string) (statemgr.Full, error) { + if name != backend.DefaultStateName { + return nil, backend.ErrWorkspacesNotSupported + } + + return remote.NewState(b.client, b.encryption), nil +} + +func (b *Backend) Workspaces() ([]string, error) { + return nil, backend.ErrWorkspacesNotSupported +} + +func (b *Backend) DeleteWorkspace(string, bool) error { + return backend.ErrWorkspacesNotSupported +} diff --git a/pkg/backend/remote-state/http/backend_test.go b/pkg/backend/remote-state/http/backend_test.go new file mode 100644 index 00000000000..e2a7f3ec8b4 --- /dev/null +++ b/pkg/backend/remote-state/http/backend_test.go @@ -0,0 +1,186 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "testing" + "time" + + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/backend" +) + +func TestBackend_impl(t *testing.T) { + var _ backend.Backend = new(Backend) +} + +func TestHTTPClientFactory(t *testing.T) { + // defaults + + conf := map[string]cty.Value{ + "address": cty.StringVal("http://127.0.0.1:8888/foo"), + } + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), configs.SynthBody("synth", conf)).(*Backend) + client := b.client + + if client == nil { + t.Fatal("Unexpected failure, address") + } + if client.URL.String() != "http://127.0.0.1:8888/foo" { + t.Fatalf("Expected address \"%s\", got \"%s\"", conf["address"], client.URL.String()) + } + if client.UpdateMethod != "POST" { + t.Fatalf("Expected update_method \"%s\", got \"%s\"", "POST", client.UpdateMethod) + } + if client.LockURL != nil || client.LockMethod != "LOCK" { + t.Fatal("Unexpected lock_address or lock_method") + } + if client.UnlockURL != nil || client.UnlockMethod != "UNLOCK" { + t.Fatal("Unexpected unlock_address or unlock_method") + } + if client.Username != "" || client.Password != "" { + t.Fatal("Unexpected username or password") + } + + if client.Headers != nil { + t.Fatal("Unexpected headers") + } + + // custom + conf = map[string]cty.Value{ + "address": cty.StringVal("http://127.0.0.1:8888/foo"), + "update_method": cty.StringVal("BLAH"), + "lock_address": cty.StringVal("http://127.0.0.1:8888/bar"), + "lock_method": cty.StringVal("BLIP"), + "unlock_address": cty.StringVal("http://127.0.0.1:8888/baz"), + "unlock_method": cty.StringVal("BLOOP"), + "username": cty.StringVal("user"), + "password": cty.StringVal("pass"), + "retry_max": cty.StringVal("999"), + "retry_wait_min": cty.StringVal("15"), + "retry_wait_max": cty.StringVal("150"), + "headers": cty.MapVal(map[string]cty.Value{ + "user-defined": cty.StringVal("test"), + }), + } + + b = backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), configs.SynthBody("synth", conf)).(*Backend) + client = b.client + + if client == nil { + t.Fatal("Unexpected failure, update_method") + } + if client.UpdateMethod != "BLAH" { + t.Fatalf("Expected update_method \"%s\", got \"%s\"", "BLAH", client.UpdateMethod) + } + if client.LockURL.String() != conf["lock_address"].AsString() || client.LockMethod != "BLIP" { + t.Fatalf("Unexpected lock_address \"%s\" vs \"%s\" or lock_method \"%s\" vs \"%s\"", client.LockURL.String(), + conf["lock_address"].AsString(), client.LockMethod, conf["lock_method"]) + } + if client.UnlockURL.String() != conf["unlock_address"].AsString() || client.UnlockMethod != "BLOOP" { + t.Fatalf("Unexpected unlock_address \"%s\" vs \"%s\" or unlock_method \"%s\" vs \"%s\"", client.UnlockURL.String(), + conf["unlock_address"].AsString(), client.UnlockMethod, conf["unlock_method"]) + } + if client.Username != "user" || client.Password != "pass" { + t.Fatalf("Unexpected username \"%s\" vs \"%s\" or password \"%s\" vs \"%s\"", client.Username, conf["username"], + client.Password, conf["password"]) + } + if client.Client.RetryMax != 999 { + t.Fatalf("Expected retry_max \"%d\", got \"%d\"", 999, client.Client.RetryMax) + } + if client.Client.RetryWaitMin != 15*time.Second { + t.Fatalf("Expected retry_wait_min \"%s\", got \"%s\"", 15*time.Second, client.Client.RetryWaitMin) + } + if client.Client.RetryWaitMax != 150*time.Second { + t.Fatalf("Expected retry_wait_max \"%s\", got \"%s\"", 150*time.Second, client.Client.RetryWaitMax) + } + + if len(client.Headers) != 1 || client.Headers["user-defined"] != "test" { + t.Fatalf("Expected headers \"user-defined\" to be \"test\", got \"%s\"", client.Headers) + } + + // authorization header + conf = map[string]cty.Value{ + "address": cty.StringVal("http://127.0.0.1:8888/foo"), + "headers": cty.MapVal(map[string]cty.Value{ + "authorization": cty.StringVal("auth-test"), + }), + } + + b, _ = backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), configs.SynthBody("synth", conf)).(*Backend) + client = b.client + + if client == nil { + t.Fatal("Unexpected failure, update_method") + } + + if len(client.Headers) != 1 || client.Headers["authorization"] != "auth-test" { + t.Fatalf("Expected headers \"authorization\" to be \"auth-test\", got \"%s\"", client.Headers) + } +} + +func TestHTTPClientFactoryWithEnv(t *testing.T) { + // env + conf := map[string]string{ + "address": "http://127.0.0.1:8888/foo", + "update_method": "BLAH", + "lock_address": "http://127.0.0.1:8888/bar", + "lock_method": "BLIP", + "unlock_address": "http://127.0.0.1:8888/baz", + "unlock_method": "BLOOP", + "username": "user", + "password": "pass", + "retry_max": "999", + "retry_wait_min": "15", + "retry_wait_max": "150", + } + + t.Setenv("TF_HTTP_ADDRESS", conf["address"]) + t.Setenv("TF_HTTP_UPDATE_METHOD", conf["update_method"]) + t.Setenv("TF_HTTP_LOCK_ADDRESS", conf["lock_address"]) + t.Setenv("TF_HTTP_UNLOCK_ADDRESS", conf["unlock_address"]) + t.Setenv("TF_HTTP_LOCK_METHOD", conf["lock_method"]) + t.Setenv("TF_HTTP_UNLOCK_METHOD", conf["unlock_method"]) + t.Setenv("TF_HTTP_USERNAME", conf["username"]) + t.Setenv("TF_HTTP_PASSWORD", conf["password"]) + t.Setenv("TF_HTTP_RETRY_MAX", conf["retry_max"]) + t.Setenv("TF_HTTP_RETRY_WAIT_MIN", conf["retry_wait_min"]) + t.Setenv("TF_HTTP_RETRY_WAIT_MAX", conf["retry_wait_max"]) + + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), nil).(*Backend) + client := b.client + + if client == nil { + t.Fatal("Unexpected failure, EnvDefaultFunc") + } + if client.UpdateMethod != "BLAH" { + t.Fatalf("Expected update_method \"%s\", got \"%s\"", "BLAH", client.UpdateMethod) + } + if client.LockURL.String() != conf["lock_address"] || client.LockMethod != "BLIP" { + t.Fatalf("Unexpected lock_address \"%s\" vs \"%s\" or lock_method \"%s\" vs \"%s\"", client.LockURL.String(), + conf["lock_address"], client.LockMethod, conf["lock_method"]) + } + if client.UnlockURL.String() != conf["unlock_address"] || client.UnlockMethod != "BLOOP" { + t.Fatalf("Unexpected unlock_address \"%s\" vs \"%s\" or unlock_method \"%s\" vs \"%s\"", client.UnlockURL.String(), + conf["unlock_address"], client.UnlockMethod, conf["unlock_method"]) + } + if client.Username != "user" || client.Password != "pass" { + t.Fatalf("Unexpected username \"%s\" vs \"%s\" or password \"%s\" vs \"%s\"", client.Username, conf["username"], + client.Password, conf["password"]) + } + if client.Client.RetryMax != 999 { + t.Fatalf("Expected retry_max \"%d\", got \"%d\"", 999, client.Client.RetryMax) + } + if client.Client.RetryWaitMin != 15*time.Second { + t.Fatalf("Expected retry_wait_min \"%s\", got \"%s\"", 15*time.Second, client.Client.RetryWaitMin) + } + if client.Client.RetryWaitMax != 150*time.Second { + t.Fatalf("Expected retry_wait_max \"%s\", got \"%s\"", 150*time.Second, client.Client.RetryWaitMax) + } +} diff --git a/pkg/backend/remote-state/http/client.go b/pkg/backend/remote-state/http/client.go new file mode 100644 index 00000000000..28c7387a930 --- /dev/null +++ b/pkg/backend/remote-state/http/client.go @@ -0,0 +1,263 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "bytes" + "crypto/md5" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + + "github.com/hashicorp/go-retryablehttp" + "github.com/kubegems/opentofu/pkg/states/remote" + "github.com/kubegems/opentofu/pkg/states/statemgr" +) + +// httpClient is a remote client that stores data in Consul or HTTP REST. +type httpClient struct { + // Update & Retrieve + URL *url.URL + UpdateMethod string + + // Locking + LockURL *url.URL + LockMethod string + UnlockURL *url.URL + UnlockMethod string + + // HTTP + Client *retryablehttp.Client + Headers map[string]string + Username string + Password string + + lockID string + jsonLockInfo []byte +} + +func (c *httpClient) httpRequest(method string, url *url.URL, data []byte, what string) (*http.Response, error) { + var body interface{} + if len(data) > 0 { + body = data + } + + // Create the request + req, err := retryablehttp.NewRequest(method, url.String(), body) + if err != nil { + return nil, fmt.Errorf("Failed to make %s HTTP request: %w", what, err) + } + + // Add user-defined headers + for k, v := range c.Headers { + req.Header.Set(k, v) + } + + if c.Username != "" { + req.SetBasicAuth(c.Username, c.Password) + } + + // Work with data/body + if len(data) > 0 { + req.Header.Set("Content-Type", "application/json") + + // Generate the MD5 + hash := md5.Sum(data) + b64 := base64.StdEncoding.EncodeToString(hash[:]) + req.Header.Set("Content-MD5", b64) + } + + // Make the request + resp, err := c.Client.Do(req) + if err != nil { + return nil, fmt.Errorf("Failed to %s: %w", what, err) + } + + return resp, nil +} + +func (c *httpClient) Lock(info *statemgr.LockInfo) (string, error) { + if c.LockURL == nil { + return "", nil + } + c.lockID = "" + + jsonLockInfo := info.Marshal() + resp, err := c.httpRequest(c.LockMethod, c.LockURL, jsonLockInfo, "lock") + if err != nil { + return "", err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK: + c.lockID = info.ID + c.jsonLockInfo = jsonLockInfo + return info.ID, nil + case http.StatusUnauthorized: + return "", fmt.Errorf("HTTP remote state endpoint requires auth") + case http.StatusForbidden: + return "", fmt.Errorf("HTTP remote state endpoint invalid auth") + case http.StatusConflict, http.StatusLocked: + body, err := io.ReadAll(resp.Body) + if err != nil { + return "", &statemgr.LockError{ + Info: info, + Err: fmt.Errorf("HTTP remote state already locked, failed to read body"), + } + } + existing := statemgr.LockInfo{} + err = json.Unmarshal(body, &existing) + if err != nil { + return "", &statemgr.LockError{ + Info: info, + Err: fmt.Errorf("HTTP remote state already locked, failed to unmarshal body"), + } + } + return "", &statemgr.LockError{ + Info: info, + Err: fmt.Errorf("HTTP remote state already locked: ID=%s", existing.ID), + } + default: + return "", fmt.Errorf("Unexpected HTTP response code %d", resp.StatusCode) + } +} + +func (c *httpClient) Unlock(id string) error { + if c.UnlockURL == nil { + return nil + } + + resp, err := c.httpRequest(c.UnlockMethod, c.UnlockURL, c.jsonLockInfo, "unlock") + if err != nil { + return err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK: + return nil + default: + return fmt.Errorf("Unexpected HTTP response code %d", resp.StatusCode) + } +} + +func (c *httpClient) Get() (*remote.Payload, error) { + resp, err := c.httpRequest(http.MethodGet, c.URL, nil, "get state") + if err != nil { + return nil, err + } + defer resp.Body.Close() + + // Handle the common status codes + switch resp.StatusCode { + case http.StatusOK: + // Handled after + case http.StatusNoContent: + return nil, nil + case http.StatusNotFound: + return nil, nil + case http.StatusUnauthorized: + return nil, fmt.Errorf("HTTP remote state endpoint requires auth") + case http.StatusForbidden: + return nil, fmt.Errorf("HTTP remote state endpoint invalid auth") + case http.StatusInternalServerError: + return nil, fmt.Errorf("HTTP remote state internal server error") + default: + return nil, fmt.Errorf("Unexpected HTTP response code %d", resp.StatusCode) + } + + // Read in the body + buf := bytes.NewBuffer(nil) + if _, err := io.Copy(buf, resp.Body); err != nil { + return nil, fmt.Errorf("Failed to read remote state: %w", err) + } + + // Create the payload + payload := &remote.Payload{ + Data: buf.Bytes(), + } + + // If there was no data, then return nil + if len(payload.Data) == 0 { + return nil, nil + } + + // Check for the MD5 + if raw := resp.Header.Get("Content-MD5"); raw != "" { + md5, err := base64.StdEncoding.DecodeString(raw) + if err != nil { + return nil, fmt.Errorf( + "Failed to decode Content-MD5 '%s': %w", raw, err) + } + + payload.MD5 = md5 + } else { + // Generate the MD5 + hash := md5.Sum(payload.Data) + payload.MD5 = hash[:] + } + + return payload, nil +} + +func (c *httpClient) Put(data []byte) error { + // Copy the target URL + base := *c.URL + + if c.lockID != "" { + query := base.Query() + query.Set("ID", c.lockID) + base.RawQuery = query.Encode() + } + + /* + // Set the force query parameter if needed + if force { + values := base.Query() + values.Set("force", "true") + base.RawQuery = values.Encode() + } + */ + + var method string = "POST" + if c.UpdateMethod != "" { + method = c.UpdateMethod + } + resp, err := c.httpRequest(method, &base, data, "upload state") + if err != nil { + return err + } + defer resp.Body.Close() + + // Handle the error codes + switch resp.StatusCode { + case http.StatusOK, http.StatusCreated, http.StatusNoContent: + return nil + default: + return fmt.Errorf("HTTP error: %d", resp.StatusCode) + } +} + +func (c *httpClient) Delete() error { + // Make the request + resp, err := c.httpRequest(http.MethodDelete, c.URL, nil, "delete state") + if err != nil { + return err + } + defer resp.Body.Close() + + // Handle the error codes + switch resp.StatusCode { + case http.StatusOK: + return nil + default: + return fmt.Errorf("HTTP error: %d", resp.StatusCode) + } +} diff --git a/pkg/backend/remote-state/http/client_test.go b/pkg/backend/remote-state/http/client_test.go new file mode 100644 index 00000000000..9dcffd654f8 --- /dev/null +++ b/pkg/backend/remote-state/http/client_test.go @@ -0,0 +1,208 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "bytes" + "fmt" + "io" + "net/http" + "net/http/httptest" + "net/url" + "reflect" + "testing" + + "github.com/hashicorp/go-retryablehttp" + "github.com/kubegems/opentofu/pkg/states/remote" +) + +func TestHTTPClient_impl(t *testing.T) { + var _ remote.Client = new(httpClient) + var _ remote.ClientLocker = new(httpClient) +} + +func TestHTTPClient(t *testing.T) { + handler := new(testHTTPHandler) + ts := httptest.NewServer(http.HandlerFunc(handler.Handle)) + defer ts.Close() + + url, err := url.Parse(ts.URL) + if err != nil { + t.Fatalf("Parse: %s", err) + } + + // Test basic get/update + client := &httpClient{URL: url, Client: retryablehttp.NewClient()} + remote.TestClient(t, client) + + // test just a single PUT + p := &httpClient{ + URL: url, + UpdateMethod: "PUT", + Client: retryablehttp.NewClient(), + } + remote.TestClient(t, p) + + // Test headers + c := retryablehttp.NewClient() + c.RequestLogHook = func(_ retryablehttp.Logger, req *http.Request, _ int) { + // Test user defined header is part of the request + v := req.Header.Get("user-defined") + if v != "test" { + t.Fatalf("Expected header \"user-defined\" with value \"test\", got \"%s\"", v) + } + + // Test the content-type header was not overridden + v = req.Header.Get("content-type") + if req.Method == "PUT" && v != "application/json" { + t.Fatalf("Expected header \"content-type\" with value \"application/json\", got \"%s\"", v) + } + } + + p = &httpClient{ + URL: url, + UpdateMethod: "PUT", + Headers: map[string]string{ + "user-defined": "test", + "content-type": "application/xml", + }, + Client: c, + } + + remote.TestClient(t, p) + + // Test locking and alternative UpdateMethod + a := &httpClient{ + URL: url, + UpdateMethod: "PUT", + LockURL: url, + LockMethod: "LOCK", + UnlockURL: url, + UnlockMethod: "UNLOCK", + Client: retryablehttp.NewClient(), + } + b := &httpClient{ + URL: url, + UpdateMethod: "PUT", + LockURL: url, + LockMethod: "LOCK", + UnlockURL: url, + UnlockMethod: "UNLOCK", + Client: retryablehttp.NewClient(), + } + remote.TestRemoteLocks(t, a, b) + + // test a WebDAV-ish backend + davhandler := new(testHTTPHandler) + ts = httptest.NewServer(http.HandlerFunc(davhandler.HandleWebDAV)) + defer ts.Close() + + url, err = url.Parse(ts.URL) + client = &httpClient{ + URL: url, + UpdateMethod: "PUT", + Client: retryablehttp.NewClient(), + } + if err != nil { + t.Fatalf("Parse: %s", err) + } + + remote.TestClient(t, client) // first time through: 201 + remote.TestClient(t, client) // second time, with identical data: 204 + + // test a broken backend + brokenHandler := new(testBrokenHTTPHandler) + brokenHandler.handler = new(testHTTPHandler) + ts = httptest.NewServer(http.HandlerFunc(brokenHandler.Handle)) + defer ts.Close() + + url, err = url.Parse(ts.URL) + if err != nil { + t.Fatalf("Parse: %s", err) + } + client = &httpClient{URL: url, Client: retryablehttp.NewClient()} + remote.TestClient(t, client) +} + +type testHTTPHandler struct { + Data []byte + Locked bool +} + +func (h *testHTTPHandler) Handle(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case "GET": + w.Write(h.Data) + case "PUT": + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, r.Body); err != nil { + w.WriteHeader(500) + } + w.WriteHeader(201) + h.Data = buf.Bytes() + case "POST": + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, r.Body); err != nil { + w.WriteHeader(500) + } + h.Data = buf.Bytes() + case "LOCK": + if h.Locked { + w.WriteHeader(423) + } else { + h.Locked = true + } + case "UNLOCK": + h.Locked = false + case "DELETE": + h.Data = nil + w.WriteHeader(200) + default: + w.WriteHeader(500) + w.Write([]byte(fmt.Sprintf("Unknown method: %s", r.Method))) + } +} + +// mod_dav-ish behavior +func (h *testHTTPHandler) HandleWebDAV(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case "GET": + w.Write(h.Data) + case "PUT": + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, r.Body); err != nil { + w.WriteHeader(500) + } + if reflect.DeepEqual(h.Data, buf.Bytes()) { + h.Data = buf.Bytes() + w.WriteHeader(204) + } else { + h.Data = buf.Bytes() + w.WriteHeader(201) + } + case "DELETE": + h.Data = nil + w.WriteHeader(200) + default: + w.WriteHeader(500) + w.Write([]byte(fmt.Sprintf("Unknown method: %s", r.Method))) + } +} + +type testBrokenHTTPHandler struct { + lastRequestWasBroken bool + handler *testHTTPHandler +} + +func (h *testBrokenHTTPHandler) Handle(w http.ResponseWriter, r *http.Request) { + if h.lastRequestWasBroken { + h.lastRequestWasBroken = false + h.handler.Handle(w, r) + } else { + h.lastRequestWasBroken = true + w.WriteHeader(500) + } +} diff --git a/pkg/backend/remote-state/http/mock_server_test.go b/pkg/backend/remote-state/http/mock_server_test.go new file mode 100644 index 00000000000..ac6a3e3950e --- /dev/null +++ b/pkg/backend/remote-state/http/mock_server_test.go @@ -0,0 +1,95 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: server_test.go + +// Package http is a generated GoMock package. +package http + +import ( + http "net/http" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockHttpServerCallback is a mock of HttpServerCallback interface. +type MockHttpServerCallback struct { + ctrl *gomock.Controller + recorder *MockHttpServerCallbackMockRecorder +} + +// MockHttpServerCallbackMockRecorder is the mock recorder for MockHttpServerCallback. +type MockHttpServerCallbackMockRecorder struct { + mock *MockHttpServerCallback +} + +// NewMockHttpServerCallback creates a new mock instance. +func NewMockHttpServerCallback(ctrl *gomock.Controller) *MockHttpServerCallback { + mock := &MockHttpServerCallback{ctrl: ctrl} + mock.recorder = &MockHttpServerCallbackMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockHttpServerCallback) EXPECT() *MockHttpServerCallbackMockRecorder { + return m.recorder +} + +// StateDELETE mocks base method. +func (m *MockHttpServerCallback) StateDELETE(req *http.Request) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "StateDELETE", req) +} + +// StateDELETE indicates an expected call of StateDELETE. +func (mr *MockHttpServerCallbackMockRecorder) StateDELETE(req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateDELETE", reflect.TypeOf((*MockHttpServerCallback)(nil).StateDELETE), req) +} + +// StateGET mocks base method. +func (m *MockHttpServerCallback) StateGET(req *http.Request) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "StateGET", req) +} + +// StateGET indicates an expected call of StateGET. +func (mr *MockHttpServerCallbackMockRecorder) StateGET(req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGET", reflect.TypeOf((*MockHttpServerCallback)(nil).StateGET), req) +} + +// StateLOCK mocks base method. +func (m *MockHttpServerCallback) StateLOCK(req *http.Request) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "StateLOCK", req) +} + +// StateLOCK indicates an expected call of StateLOCK. +func (mr *MockHttpServerCallbackMockRecorder) StateLOCK(req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateLOCK", reflect.TypeOf((*MockHttpServerCallback)(nil).StateLOCK), req) +} + +// StatePOST mocks base method. +func (m *MockHttpServerCallback) StatePOST(req *http.Request) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "StatePOST", req) +} + +// StatePOST indicates an expected call of StatePOST. +func (mr *MockHttpServerCallbackMockRecorder) StatePOST(req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StatePOST", reflect.TypeOf((*MockHttpServerCallback)(nil).StatePOST), req) +} + +// StateUNLOCK mocks base method. +func (m *MockHttpServerCallback) StateUNLOCK(req *http.Request) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "StateUNLOCK", req) +} + +// StateUNLOCK indicates an expected call of StateUNLOCK. +func (mr *MockHttpServerCallbackMockRecorder) StateUNLOCK(req interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateUNLOCK", reflect.TypeOf((*MockHttpServerCallback)(nil).StateUNLOCK), req) +} diff --git a/pkg/backend/remote-state/http/server_test.go b/pkg/backend/remote-state/http/server_test.go new file mode 100644 index 00000000000..6d9956ce3ea --- /dev/null +++ b/pkg/backend/remote-state/http/server_test.go @@ -0,0 +1,440 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +//go:generate go run github.com/golang/mock/mockgen -package $GOPACKAGE -source $GOFILE -destination mock_$GOFILE + +import ( + "context" + "crypto/tls" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/http/httptest" + "os" + "os/signal" + "path/filepath" + "reflect" + "strings" + "sync" + "syscall" + "testing" + + "github.com/golang/mock/gomock" + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/states" + "github.com/zclconf/go-cty/cty" +) + +const sampleState = ` +{ + "version": 4, + "serial": 0, + "lineage": "666f9301-7e65-4b19-ae23-71184bb19b03", + "remote": { + "type": "http", + "config": { + "path": "local-state.tfstate" + } + } +} +` + +type ( + HttpServerCallback interface { + StateGET(req *http.Request) + StatePOST(req *http.Request) + StateDELETE(req *http.Request) + StateLOCK(req *http.Request) + StateUNLOCK(req *http.Request) + } + httpServer struct { + r *http.ServeMux + data map[string]string + locks map[string]string + lock sync.RWMutex + + httpServerCallback HttpServerCallback + } + httpServerOpt func(*httpServer) +) + +func withHttpServerCallback(callback HttpServerCallback) httpServerOpt { + return func(s *httpServer) { + s.httpServerCallback = callback + } +} + +func newHttpServer(opts ...httpServerOpt) *httpServer { + r := http.NewServeMux() + s := &httpServer{ + r: r, + data: make(map[string]string), + locks: make(map[string]string), + } + for _, opt := range opts { + opt(s) + } + s.data["sample"] = sampleState + r.HandleFunc("/state/", s.handleState) + return s +} + +func (h *httpServer) getResource(req *http.Request) string { + switch pathParts := strings.SplitN(req.URL.Path, string(filepath.Separator), 3); len(pathParts) { + case 3: + return pathParts[2] + default: + return "" + } +} + +func (h *httpServer) handleState(writer http.ResponseWriter, req *http.Request) { + switch req.Method { + case "GET": + h.handleStateGET(writer, req) + case "POST": + h.handleStatePOST(writer, req) + case "DELETE": + h.handleStateDELETE(writer, req) + case "LOCK": + h.handleStateLOCK(writer, req) + case "UNLOCK": + h.handleStateUNLOCK(writer, req) + } +} + +func (h *httpServer) handleStateGET(writer http.ResponseWriter, req *http.Request) { + if h.httpServerCallback != nil { + defer h.httpServerCallback.StateGET(req) + } + resource := h.getResource(req) + + h.lock.RLock() + defer h.lock.RUnlock() + + if state, ok := h.data[resource]; ok { + _, _ = io.WriteString(writer, state) + } else { + writer.WriteHeader(http.StatusNotFound) + } +} + +func (h *httpServer) handleStatePOST(writer http.ResponseWriter, req *http.Request) { + if h.httpServerCallback != nil { + defer h.httpServerCallback.StatePOST(req) + } + defer req.Body.Close() + resource := h.getResource(req) + + data, err := io.ReadAll(req.Body) + if err != nil { + writer.WriteHeader(http.StatusBadRequest) + return + } + + h.lock.Lock() + defer h.lock.Unlock() + + h.data[resource] = string(data) + writer.WriteHeader(http.StatusOK) +} + +func (h *httpServer) handleStateDELETE(writer http.ResponseWriter, req *http.Request) { + if h.httpServerCallback != nil { + defer h.httpServerCallback.StateDELETE(req) + } + resource := h.getResource(req) + + h.lock.Lock() + defer h.lock.Unlock() + + delete(h.data, resource) + writer.WriteHeader(http.StatusOK) +} + +func (h *httpServer) handleStateLOCK(writer http.ResponseWriter, req *http.Request) { + if h.httpServerCallback != nil { + defer h.httpServerCallback.StateLOCK(req) + } + defer req.Body.Close() + resource := h.getResource(req) + + data, err := io.ReadAll(req.Body) + if err != nil { + writer.WriteHeader(http.StatusBadRequest) + return + } + + h.lock.Lock() + defer h.lock.Unlock() + + if existingLock, ok := h.locks[resource]; ok { + writer.WriteHeader(http.StatusLocked) + _, _ = io.WriteString(writer, existingLock) + } else { + h.locks[resource] = string(data) + _, _ = io.WriteString(writer, existingLock) + } +} + +func (h *httpServer) handleStateUNLOCK(writer http.ResponseWriter, req *http.Request) { + if h.httpServerCallback != nil { + defer h.httpServerCallback.StateUNLOCK(req) + } + defer req.Body.Close() + resource := h.getResource(req) + + data, err := io.ReadAll(req.Body) + if err != nil { + writer.WriteHeader(http.StatusBadRequest) + return + } + var lockInfo map[string]interface{} + if err = json.Unmarshal(data, &lockInfo); err != nil { + writer.WriteHeader(http.StatusInternalServerError) + return + } + + h.lock.Lock() + defer h.lock.Unlock() + + if existingLock, ok := h.locks[resource]; ok { + var existingLockInfo map[string]interface{} + if err = json.Unmarshal([]byte(existingLock), &existingLockInfo); err != nil { + writer.WriteHeader(http.StatusInternalServerError) + return + } + lockID := lockInfo["ID"].(string) + existingID := existingLockInfo["ID"].(string) + if lockID != existingID { + writer.WriteHeader(http.StatusConflict) + _, _ = io.WriteString(writer, existingLock) + } else { + delete(h.locks, resource) + _, _ = io.WriteString(writer, existingLock) + } + } else { + writer.WriteHeader(http.StatusConflict) + } +} + +func (h *httpServer) handler() http.Handler { + return h.r +} + +func NewHttpTestServer(opts ...httpServerOpt) (*httptest.Server, error) { + clientCAData, err := os.ReadFile("testdata/certs/ca.cert.pem") + if err != nil { + return nil, err + } + clientCAs := x509.NewCertPool() + clientCAs.AppendCertsFromPEM(clientCAData) + + cert, err := tls.LoadX509KeyPair("testdata/certs/server.crt", "testdata/certs/server.key") + if err != nil { + return nil, err + } + + h := newHttpServer(opts...) + s := httptest.NewUnstartedServer(h.handler()) + s.TLS = &tls.Config{ + ClientAuth: tls.RequireAndVerifyClientCert, + ClientCAs: clientCAs, + Certificates: []tls.Certificate{cert}, + } + + s.StartTLS() + return s, nil +} + +func TestMTLSServer_NoCertFails(t *testing.T) { + // Ensure that no calls are made to the server - everything is blocked by the tls.RequireAndVerifyClientCert + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mockCallback := NewMockHttpServerCallback(ctrl) + + // Fire up a test server + ts, err := NewHttpTestServer(withHttpServerCallback(mockCallback)) + if err != nil { + t.Fatalf("unexpected error creating test server: %v", err) + } + defer ts.Close() + + // Configure the backend to the pre-populated sample state + url := ts.URL + "/state/sample" + conf := map[string]cty.Value{ + "address": cty.StringVal(url), + "skip_cert_verification": cty.BoolVal(true), + } + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), configs.SynthBody("synth", conf)).(*Backend) + if nil == b { + t.Fatal("nil backend") + } + + // Now get a state manager and check that it fails to refresh the state + sm, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("unexpected error fetching StateMgr with %s: %v", backend.DefaultStateName, err) + } + + opErr := new(net.OpError) + err = sm.RefreshState() + if err == nil { + t.Fatal("expected error when refreshing state without a client cert") + } + if errors.As(err, &opErr) { + errType := fmt.Sprintf("%T", opErr.Err) + expected := "tls.alert" + if errType != expected { + t.Fatalf("expected net.OpError.Err type: %q got: %q error:%s", expected, errType, err) + } + } else { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestMTLSServer_WithCertPasses(t *testing.T) { + // Ensure that the expected amount of calls is made to the server + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mockCallback := NewMockHttpServerCallback(ctrl) + + // Two or three (not testing the caching here) calls to GET + mockCallback.EXPECT(). + StateGET(gomock.Any()). + MinTimes(2). + MaxTimes(3) + // One call to the POST to write the data + mockCallback.EXPECT(). + StatePOST(gomock.Any()) + + // Fire up a test server + ts, err := NewHttpTestServer(withHttpServerCallback(mockCallback)) + if err != nil { + t.Fatalf("unexpected error creating test server: %v", err) + } + defer ts.Close() + + // Configure the backend to the pre-populated sample state, and with all the test certs lined up + url := ts.URL + "/state/sample" + caData, err := os.ReadFile("testdata/certs/ca.cert.pem") + if err != nil { + t.Fatalf("error reading ca certs: %v", err) + } + clientCertData, err := os.ReadFile("testdata/certs/client.crt") + if err != nil { + t.Fatalf("error reading client cert: %v", err) + } + clientKeyData, err := os.ReadFile("testdata/certs/client.key") + if err != nil { + t.Fatalf("error reading client key: %v", err) + } + conf := map[string]cty.Value{ + "address": cty.StringVal(url), + "lock_address": cty.StringVal(url), + "unlock_address": cty.StringVal(url), + "client_ca_certificate_pem": cty.StringVal(string(caData)), + "client_certificate_pem": cty.StringVal(string(clientCertData)), + "client_private_key_pem": cty.StringVal(string(clientKeyData)), + } + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), configs.SynthBody("synth", conf)).(*Backend) + if nil == b { + t.Fatal("nil backend") + } + + // Now get a state manager, fetch the state, and ensure that the "foo" output is not set + sm, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("unexpected error fetching StateMgr with %s: %v", backend.DefaultStateName, err) + } + if err = sm.RefreshState(); err != nil { + t.Fatalf("unexpected error calling RefreshState: %v", err) + } + state := sm.State() + if nil == state { + t.Fatal("nil state") + } + stateFoo := state.OutputValue(addrs.OutputValue{Name: "foo"}.Absolute(addrs.RootModuleInstance)) + if stateFoo != nil { + t.Errorf("expected nil foo from state; got %v", stateFoo) + } + + // Create a new state that has "foo" set to "bar" and ensure that state is as expected + state = states.BuildState(func(ss *states.SyncState) { + ss.SetOutputValue( + addrs.OutputValue{Name: "foo"}.Absolute(addrs.RootModuleInstance), + cty.StringVal("bar"), + false) + }) + stateFoo = state.OutputValue(addrs.OutputValue{Name: "foo"}.Absolute(addrs.RootModuleInstance)) + if nil == stateFoo { + t.Fatal("nil foo after building state with foo populated") + } + if foo := stateFoo.Value.AsString(); foo != "bar" { + t.Errorf("Expected built state foo value to be bar; got %s", foo) + } + + // Ensure the change hasn't altered the current state manager state by checking "foo" and comparing states + curState := sm.State() + curStateFoo := curState.OutputValue(addrs.OutputValue{Name: "foo"}.Absolute(addrs.RootModuleInstance)) + if curStateFoo != nil { + t.Errorf("expected session manager state to be unaltered and still nil, but got: %v", curStateFoo) + } + if reflect.DeepEqual(state, curState) { + t.Errorf("expected %v != %v; but they were equal", state, curState) + } + + // Write the new state, persist, and refresh + if err = sm.WriteState(state); err != nil { + t.Errorf("error writing state: %v", err) + } + if err = sm.PersistState(nil); err != nil { + t.Errorf("error persisting state: %v", err) + } + if err = sm.RefreshState(); err != nil { + t.Errorf("error refreshing state: %v", err) + } + + // Get the state again and verify that is now the same as state and has the "foo" value set to "bar" + curState = sm.State() + if !reflect.DeepEqual(state, curState) { + t.Errorf("expected %v == %v; but they were unequal", state, curState) + } + curStateFoo = curState.OutputValue(addrs.OutputValue{Name: "foo"}.Absolute(addrs.RootModuleInstance)) + if nil == curStateFoo { + t.Fatal("nil foo") + } + if foo := curStateFoo.Value.AsString(); foo != "bar" { + t.Errorf("expected foo to be bar, but got: %s", foo) + } +} + +// TestRunServer allows running the server for local debugging; it runs until ctl-c is received +func TestRunServer(t *testing.T) { + if _, ok := os.LookupEnv("TEST_RUN_SERVER"); !ok { + t.Skip("TEST_RUN_SERVER not set") + } + s, err := NewHttpTestServer() + if err != nil { + t.Fatalf("unexpected error creating test server: %v", err) + } + defer s.Close() + + t.Log(s.URL) + + ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) + defer cancel() + // wait until signal + <-ctx.Done() +} diff --git a/pkg/backend/remote-state/http/testdata/certs/ca.cert.pem b/pkg/backend/remote-state/http/testdata/certs/ca.cert.pem new file mode 100644 index 00000000000..81e6201e91f --- /dev/null +++ b/pkg/backend/remote-state/http/testdata/certs/ca.cert.pem @@ -0,0 +1,29 @@ +-----BEGIN CERTIFICATE----- +MIIFBzCCAu+gAwIBAgIUFPfAxSWlzjWAdQAW+uDbciQm3SowDQYJKoZIhvcNAQEL +BQAwEjEQMA4GA1UEAwwHdGVzdC5jYTAgFw0yMjEwMTMyMTE4MTlaGA8zMDIyMDIx +MzIxMTgxOVowEjEQMA4GA1UEAwwHdGVzdC5jYTCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAJUdKvIM9q7H8TLqj0O6qHUnbE0N3dnNNGVtyO7Nkn4t7urx +X4qmQ6nMzKlC5YhGIlOKO4X0kPXf623+bP+jUf9qAFLkx5SK9TDerhh3e9y9+0YY +C+CM8bQdJD7jFN1oOcKTJipNbjVXCqWqrBXJg91v3p4kyUvGUv05d3pU9nQvKd7R +BGdWh68hjPFqdFso+A1ggxwJ4pEQCllxLu60RpRFwPoup/BeblPz9f3voeqhxT1J +RLviG6HhpMxh44qNh8UrWGyaAk2C5c0rghBUHdfx/RgP2cYuUo5fhPYOHhO0lX80 +0LebXA6nwOhVeHNvrRfjEJS3tTWaFXyaOUiJT2QX2nG0i6cx6pS8dLMMSFLjMSX6 +bTH3KtTR+UrOfC3B47FOO5U++EnBg3WiZCKp+i8+5Sc3MjTw4B8cmydYr59hNWrk +8zrfG1uE6WvxKg1bRc1FcixERcLnIbRH6LE3hHXzYlLoJ8+q9zP0EGqGHycSlv+C +E+6QMMKU0u2tHnixqhlt79ad6bpC52VS3lFt3Fh/TEKWjS1rn2hYZKGSymJpbPFn +q1RQZcxZWjKjqi5UEuAVGfBc4+HLZHq2Vq9umjLn0nuVixjBeBsCBaFC/amksFEJ +fAmMXDERO7Hb4vePq1t9iusWrRPhkvZt6R1Pozg1Ls+xSJQE09n3jWd0/fMhAgMB +AAGjUzBRMB0GA1UdDgQWBBSe+CLJRDjlHurYVRcXvhXyohcVdDAfBgNVHSMEGDAW +gBSe+CLJRDjlHurYVRcXvhXyohcVdDAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3 +DQEBCwUAA4ICAQBluWhlAuG7CfMP31aJzl9AucHHLwfAECKg35XTPiF+YrL7rcbQ +0dQyErCXyx7lLGEMqfNxVW48JtLCAATAZk3PwaQdU5OTcKA6Q/mQJwagfgmCVC5+ +Y4fdc7HhfOkGCOQ7aqyJ/EmygafgShreNimRDgIFomEs2hEEKAfvq2YBKcfcDyS7 +vCJZgzKoDmFe4DJjnYN/Gmj/4ak1kwtkoTkwdBlK+zWfbWHSUweXjCvbPPhKCPfy +3Vu++BIW7402aLsP4xyQY/HPGErV3l1TpY3FdCENGQXANF/gPDWj/Q92OdTMRL0U +XXSshNT3YjCxUH3M4A07A11TQwXZRFs2AkZyjJ6M5XNd36FswHh7fSjNLThU6h2V +dI0y/rU4y24KG7KeUayTE1HLGGDskZdXSOL2vH/MTvpheKnLE8fQrKb/SgY+l9RA +fIKwjDfMSL11luuSUIdevt5CEGFms8hpLU1RG2z/qSYz3If/dhN6YdiFJ54Qhjw9 +J5UO4eucsCm3MmsX2jUsDUIjHu92Rt7a3N21lVwzAifwwUzlDrY5xFrtpdhiSEAd +HFmIQOEr3C9xqD3v3b/4N9SoOjZS2j4xk+GQ8XZeTDYf8ZlkXvXHWwEHbVqj0toe +WDooC6oivNJAEs2GxJpyLmmfxIbRjE1sdmVZtmlSb3hY0Rme1SF9FoyZDw== +-----END CERTIFICATE----- diff --git a/pkg/backend/remote-state/http/testdata/certs/ca.key b/pkg/backend/remote-state/http/testdata/certs/ca.key new file mode 100644 index 00000000000..e297d3aa8ca --- /dev/null +++ b/pkg/backend/remote-state/http/testdata/certs/ca.key @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQCVHSryDPaux/Ey +6o9Duqh1J2xNDd3ZzTRlbcjuzZJ+Le7q8V+KpkOpzMypQuWIRiJTijuF9JD13+tt +/mz/o1H/agBS5MeUivUw3q4Yd3vcvftGGAvgjPG0HSQ+4xTdaDnCkyYqTW41Vwql +qqwVyYPdb96eJMlLxlL9OXd6VPZ0Lyne0QRnVoevIYzxanRbKPgNYIMcCeKREApZ +cS7utEaURcD6LqfwXm5T8/X976HqocU9SUS74huh4aTMYeOKjYfFK1hsmgJNguXN +K4IQVB3X8f0YD9nGLlKOX4T2Dh4TtJV/NNC3m1wOp8DoVXhzb60X4xCUt7U1mhV8 +mjlIiU9kF9pxtIunMeqUvHSzDEhS4zEl+m0x9yrU0flKznwtweOxTjuVPvhJwYN1 +omQiqfovPuUnNzI08OAfHJsnWK+fYTVq5PM63xtbhOlr8SoNW0XNRXIsREXC5yG0 +R+ixN4R182JS6CfPqvcz9BBqhh8nEpb/ghPukDDClNLtrR54saoZbe/Wnem6Qudl +Ut5RbdxYf0xClo0ta59oWGShkspiaWzxZ6tUUGXMWVoyo6ouVBLgFRnwXOPhy2R6 +tlavbpoy59J7lYsYwXgbAgWhQv2ppLBRCXwJjFwxETux2+L3j6tbfYrrFq0T4ZL2 +bekdT6M4NS7PsUiUBNPZ941ndP3zIQIDAQABAoICACEM6/3mfa7TxlRgxQxgDQKa +kFir4CZsY1av9L9pdTTefXw5r9GNdKXoLNy/ZRzFXsphczwHrzGwRgCFSieHTZ9t +IVE+QDZebmY8lR37LcsJmO46WjeVReWEKAqATpmchmDoOKdbrjfIaSW7JJVXqxCj +wRYQVUWkWbSiziahOlcaNQ+cCHvXJA/fQdwomk2yUPi2EZlfX4aDpaeZfKuP7azj +oRhSywpuA8o74qQ8PwlAffVNjhyOy00gNGTQtZx6LkO3jcvUfvorL0BAin2QB2Vb +z5tLuBtDHS1NYq0fB++aMSCW1kQ7/TWKXSmh+Cat9BG9VGmCJnoRAv4xOM0pEh1o +vui18+UT2tJ4OZLP8tOH1A0OMTF98EojmKwUlStnkm+vNgdU0IWPFZng77qL+rJd +9sR9BkT9gfW+0EMUMG25ocNV4/t01O0q95oH3F3LQ7iIKGzzErX//2qGteaHEu9u +Cbd1QniQDKzMEJV0hHpWxAcZcJx4Wje7dPgDCRTv2juU8sWM7d43KAAQ+tQpLkem +yzK0UAQzSnWS2QjrR44hujYmf4zPcMsQFBSvztP7dbtwKuTQbiQRYn4ZCqcCv/DQ +RpI69NoulWO7kHhbZqqtiWxcmtdLSwN+9Gx/x6sgYSemx5h8rti0lIBi/Pzfq39U +WuiGg9yjUSU1zqdtDdihAoIBAQDI6XRf/umV3gdba1krxW4q+g5ly3eJlYd/BKTC +xYNx9ixjOJ+J1ov1gF2P+60HDhYQ9bsoPMhHfJU6qXR5Hu0xZSKK7TPkcJHH6WHm +ErcqtgJiADtl7sfo/GTn45MaF71fTXSgrjCMLGA99IYPooMVWE+TrFEYNOcPgO4x +hNq0n0C29ORSr+9oqStCuJ5a+iDvL7KGnmsyun1HuWUKVdxbt4CPpMwsQWcBLfVg +Ispd5q5fG/DPDZFnha5XLbAPWeLn+1mweK4Y4Jugr593o6S9q04jlLa5wLDMCXUN +fPXJFJcg+vcvcZ0IlfvFsfZ8IrO/UMHqOeMUhTt8s4KoMYAFAoIBAQC9/9+aC+Tq +H4t1Dl+GZAavsVlP7kFekmAK02GJ103qmFcITpcx8cx3eA/0oZGpvQCHsJGHoa1P +EaMtKVITokkvOTJB/zxvSw6K5oCx1qEGoEqyXTs2rNLVchGwunpE4C6Ikhw5+gew +e299nmLE6bckStCLVINDWQOjRJ0Jl26rmdGk/3wLgliZNVKu/Yhsr4RY+FZoErOk +YulZp648GfvuwXZUdAWIdmg4JfOrcizmhya3L0qteOZ7FpqKXCPmDgXD3E4IVdMJ +CRfywxkqXCHxRlN49/9I2y3B3eaWkGStqgvzHbrMR6uobn4+YRkjgam4ILnUO6Vt +Zy1R3HHvSH1tAoIBAQC9WGc44UC64RkF61GKkvKUxj0zamIp5CZiarnsZcDPcjW6 +/O4+NViJ8oQ64fHbqEbbjPrpnP8TgDITqwf97kuUNcAsNgilzgFV6nk9H35IXmg4 +fAd+tV7qEJP4ht1nxd/PJWw40nEmadv6B60gpwPq5eN5RPjYW2M3lUbmnFKRz1Rq +GLnlw7FZbbU7mEqFax4GzWjuvfZBRMg1BGBZMToPpg0fUyyouKqezfVmuOMHRBQp +xmdYe20Bp1b7Ci/XB9t0zcllKxbIk0WYVmtvkWX86qkll03uGc+FO5R5Nb9d1m3n +wx2aNPTN1qwFUQb/TqUgNLfMSunbuQSrLXKBmMURAoIBAQC9wkXiJqr0IZk4yagi +ItiCtI/MwtpKx8pgRYmPD5fkC04xH7zlxuc9Eo5s9sjyS6+x1WkjmxfqdmUQf8pX +jaemIGvPekkzpjTaCSjTdNbSNVklFvRCwQy43PpKFZR0IaqX/8VtKghv/Hf3cC6Z +GAsvlgD+huOqaca2U5q7r6B6hl/ZeMi8/eva6GSyHMkaM5ns+enie3srXRZN0qiz +ogf6BwJViqLUDd485bqdqqSpgKXsIrFk2/DlUkf6k9fOtoaPfQH6VS02QvzGGpCR +u/6yaFiJ4rX2X+EtVKAuE/xZbhINN84OpC4PRHuVdYiT67ZEDXtLOl8YCwo6Tf8E +ytNpAoIBADWxq0izh7P7pGW58JhA7wl3vCUFUJ77JC4pjYzKlBsMi7OcZrlzNi2J +4rtO8JO5S8eG5erEA1FuPb6LCPqzetKTD+xKKxgEcICkWuH6RWdRq82bkVqL2gQ7 +tp7qdfwNl0K6XNnB+VaCPAzsrFJJZnoRIz3BQBocT3Fwxe/XwS1KDjLere//bgHR +9jxYZRHKr72Y9lTMWMW2ygxmdlWk37pv4rsQK31HOGo8JtRVOISZnHLSQwMVNQ25 +5IincDO5FGjOQxFxrGjw+YQkAcQC8PkpiKU7hY396FHEvrr8xqTl/TPuJaAuSbvW +g3yddc0Zj29o1jw56J043a37q1CmmsI= +-----END PRIVATE KEY----- diff --git a/pkg/backend/remote-state/http/testdata/certs/client.crt b/pkg/backend/remote-state/http/testdata/certs/client.crt new file mode 100644 index 00000000000..1d5a74459c8 --- /dev/null +++ b/pkg/backend/remote-state/http/testdata/certs/client.crt @@ -0,0 +1,29 @@ +-----BEGIN CERTIFICATE----- +MIIFCzCCAvOgAwIBAgIUJsntRGo85J+ZJAb73snhKsM1oVowDQYJKoZIhvcNAQEL +BQAwEjEQMA4GA1UEAwwHdGVzdC5jYTAgFw0yMjEwMTMyMTE4NDBaGA8zMDIyMDIx +MzIxMTg0MFowFjEUMBIGA1UEAwwLdGVzdC5jbGllbnQwggIiMA0GCSqGSIb3DQEB +AQUAA4ICDwAwggIKAoICAQCUQebHvDL2ksHcNh6hw0xMCbPxwrBd+qQVFGf/2wL3 +Dk8Ls/NgKQqkoG4WPi4vIuu277+7ngqUZcFbDL/MO7uAWlzthFkhI8IyXeB8t+cj +liqwdgfRFLvoae1PG6ZoFrTXgOsW3tW8SRC8Kax7RdJjEMU1yWEC6OwiH/gabqZM ++i2qSwOgPnxAalljbWDJU0kj+zRfw35W4L/Q9quMid8KQYE71wQoiiBFYFcx552c +kL30xEpKat5ffB42sBpDzO3S/dM0k36im3wEFJHaEW2q4+0Ns9/PQ2OxIfoRC+lD +qYVPeljNSK2n+PSZDjswpZtqK68RD0AM0PmuPqV7Q2DPGoCXpwcq3lczlFH69T7z +s7izG8cnmyi9eWiXgPFWG5JzyeQi2P5fMumF1UVpG2NHyDyEGfXME8dh4S6BpAUJ +9BAXjatjzA5bq4CGS1w/pFrUvhiVQY7byGDqrtTiDa1f48T2CkvVmRUIiKlrvnDe +ezCnJ6P28D0yISyJLN45sQhuyw5idaXHl2AsvDRDFj2iZ/WhY7tCf0O/DSCuI1uZ +WcFXHdRFn9RoGuUqy97+6rPZaB+xNnx83O9pG+Hrx2iz2pSD/pb0b9xKH5VvN1pN +JjtaoMXod1+2z8XdTUzPvkeZyDXIasaZwmSEOOZmGgoRe+KE4ZBlk4XBlm5p2Q6U +RwIDAQABo1MwUTAPBgNVHREECDAGhwR/AAABMB0GA1UdDgQWBBQ/5KcOS58ZKYth +wRpJ+VKCcwJdpTAfBgNVHSMEGDAWgBSe+CLJRDjlHurYVRcXvhXyohcVdDANBgkq +hkiG9w0BAQsFAAOCAgEABiy0c+7T7dam8IjejbDlamAMvDCWFoVW+mLjsGwaS7vx +jmtGig5E08q7axf32iAkfwzi/vEwt66uWGVctUm6/EqH2XvlqZXcsMGiAuWYwJ2Q +DXowHlcIoIRC958qA+6cCAdxoUnTpYSdWWMR+QZ9XDB9MaAZJ+zKhb8nEETl9jGR +Z9iaSEnupposxt5NMvNUU8dTjjjv430WvZnvZaTvegLIQ5QaHeECUQ61Nm18tEey +cPiMu2TN8uO4m67lj4kyXaS3wD7zNuZph55g4vNbQrffTEHUZSFqrr1fyG+7Y+fb +F9hzbhqBgCnYQ5JaxtVbqFAvwDFWRoq2G9gARi/Yuf34djoP09IZvbRymZWJ5857 +KRCT6mBestfOzu2oIz6lDO44fFiejOTDCSDHZ2Try3xAsqS4LAZjWNSqfBIJwABi +bNTWV2yxtlnqEkaPtGYSwQLdF8MTBRbxzsiELktgdgt7XcfarhEKj9iHWirEt0Cw +POnl8S8GzwpsSAomijlLhfyU0J1+p6UP0zJE4YOjKZFv5ddmBCeSTwj0gwVSsSNg +ff7T7IvkTcIMZUlrskeMY4svXpI5FeG+sXXNp2J/iz4XIQdcdpB3t+fDCUcic9Fq +ILJKT1sQpjv4gyAO2BJd4D7clUJwDC059+dh3dDC9d51uHvCra2F/+FGeodQRuU= +-----END CERTIFICATE----- diff --git a/pkg/backend/remote-state/http/testdata/certs/client.csr b/pkg/backend/remote-state/http/testdata/certs/client.csr new file mode 100644 index 00000000000..ec0f313710e --- /dev/null +++ b/pkg/backend/remote-state/http/testdata/certs/client.csr @@ -0,0 +1,27 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIIEfTCCAmUCAQAwFjEUMBIGA1UEAwwLdGVzdC5jbGllbnQwggIiMA0GCSqGSIb3 +DQEBAQUAA4ICDwAwggIKAoICAQCUQebHvDL2ksHcNh6hw0xMCbPxwrBd+qQVFGf/ +2wL3Dk8Ls/NgKQqkoG4WPi4vIuu277+7ngqUZcFbDL/MO7uAWlzthFkhI8IyXeB8 +t+cjliqwdgfRFLvoae1PG6ZoFrTXgOsW3tW8SRC8Kax7RdJjEMU1yWEC6OwiH/ga +bqZM+i2qSwOgPnxAalljbWDJU0kj+zRfw35W4L/Q9quMid8KQYE71wQoiiBFYFcx +552ckL30xEpKat5ffB42sBpDzO3S/dM0k36im3wEFJHaEW2q4+0Ns9/PQ2OxIfoR +C+lDqYVPeljNSK2n+PSZDjswpZtqK68RD0AM0PmuPqV7Q2DPGoCXpwcq3lczlFH6 +9T7zs7izG8cnmyi9eWiXgPFWG5JzyeQi2P5fMumF1UVpG2NHyDyEGfXME8dh4S6B +pAUJ9BAXjatjzA5bq4CGS1w/pFrUvhiVQY7byGDqrtTiDa1f48T2CkvVmRUIiKlr +vnDeezCnJ6P28D0yISyJLN45sQhuyw5idaXHl2AsvDRDFj2iZ/WhY7tCf0O/DSCu +I1uZWcFXHdRFn9RoGuUqy97+6rPZaB+xNnx83O9pG+Hrx2iz2pSD/pb0b9xKH5Vv +N1pNJjtaoMXod1+2z8XdTUzPvkeZyDXIasaZwmSEOOZmGgoRe+KE4ZBlk4XBlm5p +2Q6URwIDAQABoCIwIAYJKoZIhvcNAQkOMRMwETAPBgNVHREECDAGhwR/AAABMA0G +CSqGSIb3DQEBCwUAA4ICAQAFUKmXAcULGC1idSXVWRnhzzr6qnl4K2QZVse8kNsk +BD+ePZp7/jc9URP+ykFhVHc1gOy0VgvNm6qePS9ccPTQrRxmXUmMrV2ead9z4h4O +OnnIyfxLxO+Kd1lJ/1UU8CNs3tDQnxEvtx1hYBIDNsyB4bAsfGVBGzBrsoHEjZOg +zTvvPEnH/GpnEITTwK9J6tZ2zanE0K5z2NcSHPjzO0z92sAkcfTIZovcsVCGR3j4 +UDBMWAgK9vybG5G6taQyducU7/kMLcEP5ayG0qIeIrS2GRmOqSixAQQ+Qk6Ucs4w +HD3/9oue5vWJEG0j86jEchdg3OCbHbQEje8Bf39xhpICel45EdGsxc61kiB/c5Lu +8kYQTXDr9P1wtAag5XLmv/nf6pzlQ+LthU/2/EH0r948Rj2Yz4HOOHsfPuB/izF8 +NTAH/VBgp2c/VRjEYd0YQ4X3AS+Q8BwBeR8+OUJu97AIWnM8kjTcRa1ybCGMkQ3L +IjGWgIYnICEmiEJhLo/y7jMSdRwUT9g5zz3koqChzeFSU1LuH/yE2B6GfneblDK+ +B7WDOkUEbHfJ5q0TZwWEgQdpcY5OH+o78NfJpTvgNtPV3B83+g+DdAW2jtgMZ6do +Rb7V+uPvbU9VC2Ng7jacewMtfM3PKugIZ034UUjebQ7/N5ZD01xuJKOG/w2LuUGh +GQ== +-----END CERTIFICATE REQUEST----- diff --git a/pkg/backend/remote-state/http/testdata/certs/client.key b/pkg/backend/remote-state/http/testdata/certs/client.key new file mode 100644 index 00000000000..392a800712b --- /dev/null +++ b/pkg/backend/remote-state/http/testdata/certs/client.key @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQCUQebHvDL2ksHc +Nh6hw0xMCbPxwrBd+qQVFGf/2wL3Dk8Ls/NgKQqkoG4WPi4vIuu277+7ngqUZcFb +DL/MO7uAWlzthFkhI8IyXeB8t+cjliqwdgfRFLvoae1PG6ZoFrTXgOsW3tW8SRC8 +Kax7RdJjEMU1yWEC6OwiH/gabqZM+i2qSwOgPnxAalljbWDJU0kj+zRfw35W4L/Q +9quMid8KQYE71wQoiiBFYFcx552ckL30xEpKat5ffB42sBpDzO3S/dM0k36im3wE +FJHaEW2q4+0Ns9/PQ2OxIfoRC+lDqYVPeljNSK2n+PSZDjswpZtqK68RD0AM0Pmu +PqV7Q2DPGoCXpwcq3lczlFH69T7zs7izG8cnmyi9eWiXgPFWG5JzyeQi2P5fMumF +1UVpG2NHyDyEGfXME8dh4S6BpAUJ9BAXjatjzA5bq4CGS1w/pFrUvhiVQY7byGDq +rtTiDa1f48T2CkvVmRUIiKlrvnDeezCnJ6P28D0yISyJLN45sQhuyw5idaXHl2As +vDRDFj2iZ/WhY7tCf0O/DSCuI1uZWcFXHdRFn9RoGuUqy97+6rPZaB+xNnx83O9p +G+Hrx2iz2pSD/pb0b9xKH5VvN1pNJjtaoMXod1+2z8XdTUzPvkeZyDXIasaZwmSE +OOZmGgoRe+KE4ZBlk4XBlm5p2Q6URwIDAQABAoICAC6TP3l6/bWpqB5SoC/oZzUy +DSZDp912SorWxM9DkfxkMd/20dvhONc8ESmKsj6bpVpsmhrKTP+Osf41FKIIF+D8 +QlpZrBh1n+HrzQTRT1tGJzYVdmIwNdIPSP6DrLThgUF8Xh5qtdG3UHsUSnvVlQEL +OTErCP99hgU4btx662Kea68mbsauKqGf52INcAz/Tahwl+UHyM5pP8lZXM5DV97k +ckGGzGch8X5qBCqI3WJctFhLPB2B0kdD+kfq7e1j2Ujh9bJ8LZnO59huT92mgQHh +Jc0at5Jo1M5GYsVtLQRVIqyzvmcLUIbG9qyIpH6lYBwsCgz9cf00v2OGib0eDzC2 +ZqeiotDiul5f6vtNw1YqDdrZWSxRfwqoqzeZX0/bypw6+UTGri+lU7RSRsA845gd +gMjcAd2WocqSNhPBTVPivIDmzHfSMomfnJHCw+aKcm/o6fxcSp4g8pPzpx52h0Eb +tO7rTKTlmglZ8Cc59CPmRqLq+Pk+lHgxTDOUOxZANCuBih4MrDJ2NFnnZxervjPM +te3VlJu8nE5mNuHhT1czekU01lPHQa2E4f5Q74bWpYg71KntN2Po8oUaQQjcX72N +b9N0TzeBrR2TQD/j2S1Mz4ZoStOwOovHdtPZOmfYN30OMX8JzqZLhF4Dfyx8T1JC +Pd1089N0HbX7XIXuEKJtAoIBAQC8xIVQMqg7A9O3u4i2Afq6nASHDM1tnH1l8Ua+ +T2Z6kmPBgjPb+tBrX6YeD13yDxKvfsEr9GnyuQQJdvqNwjVkGNg3Z6HX/HyHIUij +bub3LvpyQzYNkpb2qcoka+AIWDvbpmstetobQ6OK9F914ur29L9XXSp0diZn+1Ff +JqZFfZwgkhsz0Q8HZxT4FfbV+2k6PWk3RPriyKLgZAd3OXswbx/6K7owdUgrOhYA +vUXRae0UrNi0Y2kanzzoBdBLDS3ChML5VBMPIrHac4A97FJS88aewEgMC0E1wlin +J7nwVAubAG7YzEpQeP/4Wp2j9hfwqtO8JlaJL1vygAgQG42NAoIBAQDJD7v0qDal +cuGaHEQLhEVOu7JtQwHn7LmJQyamqCmlL0mDwQOixhEh58sQFUeNYEGKwEGWF+Tk +hA8sAYk4jagUF5sCkOQoWdFWna4uPqlpwozFc/Wj3jKoiYOGn4SFeEJdgQG3rMDM +oepVvaNOljJnNlntKZHUwOM0F6xxV4dXyqnPn+nXmM/Iywd+LSsMN5w8c4IFE+Da +WKrbKMobdaARtx9Lpv7ESObLX3eCRqL1KbuRN2a000Ojfv4kprH1XxMdCWUxXoLk +ac1I29cvx0FFYJfIr3CScdwaKiwGKguk8IMIih3dLulgnaqJ2vUjI3qyQMEFRMBW +3HxFAk3VU6IjAoIBAFRmMYz3+UvZnDHMAYYPQIFq/IM9cCQQEekghZbVfWZUSZHd +mz5B2CoJ7AYIrOJrZtlcfRYgA7bojiuFLOVw7dpBWXr8NNqTI0Jv2UBpd48RTB0G +fAZ5glHq/FxodxSEDs9YixcclKQYC+k29e+Jc7DTITH4j+DearGXJny6lSEA1muh +p9P1JxkSN8fsWh62eAf4KTDzAJGhT2Gwl73wz2mKZeu+3VKJPalGIUxXU/4btErI +NWQCBp5GkD7VSpoj3E/aeCpuMs9Tnd2kQrRtEynPoQCdzBjGd3OH34dtNa+EhGPb +P7RjMt7kGt559X23rGCIoH7BTXOs3xl/sRsylokCggEBAILXEmEr9iPElrtLGZzE +/rU1v+8KY/shObvxTv21ASTVmOl8eXk7m3qM9MAKmP2PXheE9SlPc0yiA52Hglyj +EnXAxsbsswzvJiNPiUHe1TBVwnXb+EYjGqRCmKzKsdqJX+apRQzaBr0jwPL67YL+ +it5PqEWFf7kLrM8BeN5pL1IaOFc8oVgDwXPRa5bYneLdbXaJVFspjHGKseTcrmkg +KoJcwKjii3gAWPCPt523ieQwvDbL7rJNqP6Eba48LCKZND75FjkCX/t0PnrjVS1q +ZTdYnG2kfYVPQwRj3TJFuj4jpaGw/64oEQcmkwwSyOOM+xN0wCdFjkT4RoZB8ZSZ +UDECggEAQ7nnkDKqL2SGsC2tuXpOO1rn2Ifp71hK9TiSUh6QIEk3parO5ualayH+ +UUsav++GIexHIxH5sxbeO6wCurRbrA/64tTXRYh/T9tIkfI4wstgRoFCMPN5CdIs +Q1s48wH1KfQWz1UiNM0rwJKs2kDIWOj9bZotq9Ir3dXYoKgr4sotQFZUVyq2n5Z7 +jE0/bYPHI8+3WXaZsLEzBA167/6IUzIoM5QEgKYP3999CEu2ZKewjnElPMflDJWm +OGT5JYz9SjwKH/9ngGcpIo8i35LSj5R9cK9Sf6dTKo2YZAU1U8yjfaRXIVAmSBFS +SXbUSo1aOU/ZWOnVKdyjhPBcPZMEqQ== +-----END PRIVATE KEY----- diff --git a/pkg/backend/remote-state/http/testdata/certs/server.crt b/pkg/backend/remote-state/http/testdata/certs/server.crt new file mode 100644 index 00000000000..d16cc623684 --- /dev/null +++ b/pkg/backend/remote-state/http/testdata/certs/server.crt @@ -0,0 +1,29 @@ +-----BEGIN CERTIFICATE----- +MIIFCzCCAvOgAwIBAgIUEJ4OCw9X1j5TegymXZENMgfdBZcwDQYJKoZIhvcNAQEL +BQAwEjEQMA4GA1UEAwwHdGVzdC5jYTAgFw0yMjEwMTMyMTE4MzBaGA8zMDIyMDIx +MzIxMTgzMFowFjEUMBIGA1UEAwwLdGVzdC5zZXJ2ZXIwggIiMA0GCSqGSIb3DQEB +AQUAA4ICDwAwggIKAoICAQDBTTBoca0tn2EAxbQLXw1diEH5+YltZUFz5gH3aSDf +H+uKame4iFsPybsUstmUqy3D9ZTzjNcauAhB75+RgePn4D0/qePPjdsFz11jxacA +AMkg/mPLPtrkEAiRzvSXXoYN1Gq6uNdGricyKGtSKzqQh158W2ZfLKUKvgGlQ8RD +3RFsFanQS6aiNwPgFK1SeFFt4wTJbtpvKNpJSe/XDDmkyMIN6/pVRo56v2PvsERA +mUQJ+blyhOy7Egt0/uF7JUklzLIi5eKjv2JpcVwH83OAovKl1vy2mfiCZSjCwQeB +ahnWgAaAaJir7uOVNDNXI4qM/KySKN7Nfyo+9sRuNBjMb5b0N8s70YIAyzoylJ0U +E8x+a7XyMJlPvpARGXuNSDwR9rRytpGZIeMPcO1YQh9+V5k7il6/70thNYW5/Cb0 +PsHU5XOSlmOsOLcr1JaD2YTJJflVyhrPwMSAphRuUCFuyQinnHi59Rk2rNuTj/R3 +dFrSqtcfnvjbc+1KbyDFpnyf40W/EPmS4mF3UPRD4oRsvXpy85E25uh5Q+R4MMdd +g3KHsZ+SiObUBa33kd9rG6peJz0cvkJIhBzbJcXPzN2EMgow2C5MYKjmNXclYWIH +ypkFSo6OxFHhIQdeh3Ga7pZqJaOVUA8wm2olIjRQgQFJjRRc6KU2w95lJlFvHXlW +0QIDAQABo1MwUTAPBgNVHREECDAGhwR/AAABMB0GA1UdDgQWBBQpv1S2rSSjgJ7a +xONcLKxYRE3qJzAfBgNVHSMEGDAWgBSe+CLJRDjlHurYVRcXvhXyohcVdDANBgkq +hkiG9w0BAQsFAAOCAgEASpVE6Pj/sPf5heCDI8miF3Xw65BkLMCCL4ZUOugtK0Hg +dbcnaMd6Hwf+mEs/2jaD+2xah489fX4KynJnQ68VpnTMT4yYcsEfvwmZA7Bqo/Ef +MwyFJe/E+Y1mAu7KQodLZ1E13cGVQKDQVwQ5ueyRD3C0bY3glMKfnXvnIIEMiSCg +UTAstj4Z0h9KYrVSRRVfCGOtlvFPo8jg+yPVPsDqGHn2hOH+FYoHv8V1/gGrXJTe +HcTHFIAIkBefHAXCaCYYq3Qfp/ZBpuT5N4bwQtHKmgv5hhyy0kaZRFfE98WkGdSk +Yg5wZRIX6UbjPdyiEnhQdOrnGDehKf9iwv1q98B9hgXzEzdK0e3bR8UY2MRvs/Vz +L2BBDkJHsTo9P1q6zAsmfVNhQPGrEH2pDir8yYpXPz/ocZa7GghJ/RPrYirVTHZp +fNxoMkNfgfVQpSsFvvI/fMGfhG65TQJdq82rAJ5tRRRs69uA00NCggKRWmEdVYpV +jWuMiLrE5U2tHruMytM/ek6kjhzmNpJgPG2alsJHgVb5G8elcCuC0Dx5HjnwbR60 +8V1v2z5kgU9dkT05vZ5RPmNyuv+VP+8Qx/NPCMrf1SaQffW4PaP3YUaRwzJYzEP/ +ZDUOmPsgUMLwj/jT3sEkSc1qUByui2A0QJk2dQzcbNfvpWoBQ+q7m2OHkmzXZCc= +-----END CERTIFICATE----- diff --git a/pkg/backend/remote-state/http/testdata/certs/server.csr b/pkg/backend/remote-state/http/testdata/certs/server.csr new file mode 100644 index 00000000000..92d06b74a66 --- /dev/null +++ b/pkg/backend/remote-state/http/testdata/certs/server.csr @@ -0,0 +1,27 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIIEfTCCAmUCAQAwFjEUMBIGA1UEAwwLdGVzdC5zZXJ2ZXIwggIiMA0GCSqGSIb3 +DQEBAQUAA4ICDwAwggIKAoICAQDBTTBoca0tn2EAxbQLXw1diEH5+YltZUFz5gH3 +aSDfH+uKame4iFsPybsUstmUqy3D9ZTzjNcauAhB75+RgePn4D0/qePPjdsFz11j +xacAAMkg/mPLPtrkEAiRzvSXXoYN1Gq6uNdGricyKGtSKzqQh158W2ZfLKUKvgGl +Q8RD3RFsFanQS6aiNwPgFK1SeFFt4wTJbtpvKNpJSe/XDDmkyMIN6/pVRo56v2Pv +sERAmUQJ+blyhOy7Egt0/uF7JUklzLIi5eKjv2JpcVwH83OAovKl1vy2mfiCZSjC +wQeBahnWgAaAaJir7uOVNDNXI4qM/KySKN7Nfyo+9sRuNBjMb5b0N8s70YIAyzoy +lJ0UE8x+a7XyMJlPvpARGXuNSDwR9rRytpGZIeMPcO1YQh9+V5k7il6/70thNYW5 +/Cb0PsHU5XOSlmOsOLcr1JaD2YTJJflVyhrPwMSAphRuUCFuyQinnHi59Rk2rNuT +j/R3dFrSqtcfnvjbc+1KbyDFpnyf40W/EPmS4mF3UPRD4oRsvXpy85E25uh5Q+R4 +MMddg3KHsZ+SiObUBa33kd9rG6peJz0cvkJIhBzbJcXPzN2EMgow2C5MYKjmNXcl +YWIHypkFSo6OxFHhIQdeh3Ga7pZqJaOVUA8wm2olIjRQgQFJjRRc6KU2w95lJlFv +HXlW0QIDAQABoCIwIAYJKoZIhvcNAQkOMRMwETAPBgNVHREECDAGhwR/AAABMA0G +CSqGSIb3DQEBCwUAA4ICAQA5BmbXy/UXXNXe0WHR1gxx5nwmJ1CyNy+efVq4cl8Z +ltxaTWy8IZOGN3YHY2ZhmKccm7ecNq1Kv9FUctPe6+97HXb2rL0rB0gO1AyxWJKU +edzls63/0n+AnQqwnPQdgL9N5vIw/0avLo3U8F+kI5hbYfG7fvw3zHdJIMiLTRsn +qKvkF2TMBxr06nrlJsQqG90k9xS3iX7DqssDq3niVgAwP2NbS2wDXk7/6R40LNx9 +RzFHDyHplF/3ySjctkx7kkAPdamGr8NNs7kQkVZGKmD25V7i5ggoGx9lo3AiBbmT +9Keac43vhlC4Bj9zW2O6Ih9TP9sDhp6iA4NtdNnK9tfn59Av6J4pB6EhMzaLtu4J +jqc5b3+Wvq1xv0Sm2Y+JjuawT7jgrT4vnSEqqkFTTV6igzctatOCxz4ejl3Q2sD0 +OjlArZWX9kY2yyuFt6LhlM3We0IDUQjEf0JtA9EFixbm+ieHbPEFHFiD0w9uN/VI +cYzxnubGgvv2wN1N+YHNRFFOWyT+Ty7Hp0Kz3dh8g+DY4vxvsfG6XfnvPT5StSKd +ACEfl8HoSET/qJZIkuIhErzzUNNK4+4QzQav7auZUQUrdK6P+rryE3lZauZ3rV+9 +ZXWT3PG1qHuWNNriTrC6n4tpa8m5UkZMdeoK2pS3y3SLDCJJV7Q3WHCZEVddIPdV +Ew== +-----END CERTIFICATE REQUEST----- diff --git a/pkg/backend/remote-state/http/testdata/certs/server.key b/pkg/backend/remote-state/http/testdata/certs/server.key new file mode 100644 index 00000000000..881229b0983 --- /dev/null +++ b/pkg/backend/remote-state/http/testdata/certs/server.key @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQDBTTBoca0tn2EA +xbQLXw1diEH5+YltZUFz5gH3aSDfH+uKame4iFsPybsUstmUqy3D9ZTzjNcauAhB +75+RgePn4D0/qePPjdsFz11jxacAAMkg/mPLPtrkEAiRzvSXXoYN1Gq6uNdGricy +KGtSKzqQh158W2ZfLKUKvgGlQ8RD3RFsFanQS6aiNwPgFK1SeFFt4wTJbtpvKNpJ +Se/XDDmkyMIN6/pVRo56v2PvsERAmUQJ+blyhOy7Egt0/uF7JUklzLIi5eKjv2Jp +cVwH83OAovKl1vy2mfiCZSjCwQeBahnWgAaAaJir7uOVNDNXI4qM/KySKN7Nfyo+ +9sRuNBjMb5b0N8s70YIAyzoylJ0UE8x+a7XyMJlPvpARGXuNSDwR9rRytpGZIeMP +cO1YQh9+V5k7il6/70thNYW5/Cb0PsHU5XOSlmOsOLcr1JaD2YTJJflVyhrPwMSA +phRuUCFuyQinnHi59Rk2rNuTj/R3dFrSqtcfnvjbc+1KbyDFpnyf40W/EPmS4mF3 +UPRD4oRsvXpy85E25uh5Q+R4MMddg3KHsZ+SiObUBa33kd9rG6peJz0cvkJIhBzb +JcXPzN2EMgow2C5MYKjmNXclYWIHypkFSo6OxFHhIQdeh3Ga7pZqJaOVUA8wm2ol +IjRQgQFJjRRc6KU2w95lJlFvHXlW0QIDAQABAoICAAnye228f9FrtLWx9tBo/UqV +YvPGrBRFlCcvLGW7crYYsenHDPxZg/odgvOPOpQkdO/zGM2prz4QP1iJSLhXq084 +4l3+05rQLXewkpk6SBw/bho1DRSd8OywiIhcUojhk9ttVWqzbVyVRK4Xl2I8mEBs +vud+WpfGN94EJhiHkrd9TlK2EK2H3xTU6O2kksC+MU6K0qm8+x+iRg1kcSOrXOIG +dLn7rT+rKFTXuYBRnUmHuZEb2Tez8Gy2AoHsRdUs94Uq8fXKx61ugVV0wGwmUojJ +mdv/4rRQ2xF2vDC9dzHpMFgx8WO1PjoGyo5Yh9XRneUgcY757HE9vIJN95DGPIpd +vCYaGrGA/JipOmTrmMoFurhdwdiyzAzUsXV5AKKo+PNEsSz+36y/xa2z7PlvnBR2 +rpKw/ocsRoaKiI9pG7b9ty0QyiY/teVTpt0sQDIvpZwx60wYhkziFl8sn6CGQWQm +a1bFzb+5ZrEMj7gCeffOJmQSvpn2fGzlyp3RrkyaRGK4YhWU9SJsPUAnxRF5yoOm +EzwYFYC0AScPdywS3nA4IWIeKnuydGH+6M/Cqk9qkiGrflKFpCn2eBvFWMTuoUYd +/jyE2t4th/T1qsqKbJqKiRAz4dQlrqWdN6SnBk8MRhDbqtbbSREIdU1z58qD3kuf +0thbm9SrDRV4UgYiUckLAoIBAQDqt3NLq5OgOsEOuoYYqQYWDWNiTusrvsutood+ ++AXPcxZKR8O3g/gUGuhyKE3a9DMYnFoLCaTqX1iDqg/KUemoqc2+A9pZ+QAXRXnE +R/4PFh1Sgyhyrwz30svUVs9FYpiP65ZiY6LhDmSL6bl1u4DYJUhgXiHkF6q+KryN +M1uLpSmUOTOwJf6tltYgMPMegDXQE2VZ7VnQMN1m2rhDRmycM14CyJSSWZAyCzbJ +ylDeKWs4wxATLrWIGUPLzqua4/uILsUeAzvyOCrCgJHSEDdITGdQJClF17bZH9xG +H6pIA0VPoWq480lE+gw9Mwu1m4QjOOM0RHF5nm6YKFJLIdCjAoIBAQDS1FuEtuVI +6oQa2Sh7EILZq/gyVrXmeLLYUQChYBFZxK0PVkbFT+ztkHnD2gBz4exXUEaGpBNA +6Yr8iz6VCNdQ0KcrvzFINZwcRJTeSxLXArQ2LkDJmDZJoIC1cSurrU8ot94EHC94 +qjGQW4K3qFZIiyXHQJNgSrYaHADmDi9sQQNmyP0pSIY/q9Gn/2DLADItIt/0iFz0 +kc07kF6l/1JADSiUHMzHhUxh04LXo2LRQVHYWaK0DrVI50wXivOCARFkQrFdrszX +ymFf7d6AskIiAIBNYXmb3of2NSwzWx6RnZI9YVpw2277xzVh2vDxI2Pc0ordAzFk +YY0DLGFNeI37AoIBAHnHkt949xBUS6RrrHWRBOJeMelozuWUibLeN/TtlH4s1SzX +DTnjE8zCpUXNmY930ib7wFAnwdQEgjVV//lWBKiI6YGkGB9EbQKl/maTf8KuE6qi ++FKAdncCfNT/8WyrmkJZ1l3YGkMwp4RcUOg/z7rVpTaywFzK1sDyBYAxXFcY63jH +MQU8wWWpdBGhtBJoLQN3fMdquYWmRMk/xAjLukBU+nrxPPyt0X3Viaiq+sg5rzL1 +Khr5yiACE8Xjxe+ISBJBSe6neOvUroLaGE5oMXamhZf0GyHsqScAO9Z6SWwxnj2R +n4C0YZiTL9R07qdcN/PaaS/OLx4N0I3Lpd7rfYcCggEAdgBHrPNVR8eC4ygSYTbv +lfeLtlkT/Ignya0kxi3n6C+NkVz/xWYjvR+1F2qIAFQ+HOygXLGu2REeKpWhFHdb +VC9EsdaUNc9Trfqwu+6W/+LSjNS8jFj2YaVFBMjv4WniOW8YA4LnCwlvLlYZxsOg +b3/6SBibpDSM0fZEhn8ACf4lcj0ifR3Ljg2UDgyA134nl13CrbI5HOYSUblPUGek +WJdE1Al+kFnKU6K3xAv9vhNqRMZ+q3rj+ocC7tZlzqjcXBp7/Wxd2JW8hJ21gKDF +JRTUuvrIvvYBcUt3jtL8PBJOjK5VmX8oEiIAfeG2I7FkLm9lK6ii14VGELWhTGQi +SwKCAQEAkZTSBq7tJ5CwiwKW3ubTEyDWugO3IplXAn8NMzGwSG5NF6qLqTpRAlDk +IJ7JEuMY0KKNDj4qCy+EEyR7a3+P1QcYquBAAFcwsaq+49ix+MxApVUjj2RT7yzt +IT3J1NP782AAVMUVK2n/tBvhRnDPmofhwCXKxP8t1SGbUCX+2I5IcAL3aQgSrDsF +uyUPCSL08f6SJWDQa7k9RFg2vnJgJjPJnvf+xuI6jJrbOJUcmUfBmTcYzjWKZvRB +RctFOLbbrfsY3D2jgW/CUw/jbrwUokwm4VatzMCgHlZi6WJIGJftDP4b1MJACe02 ++AXVqLYxuaMTIdm5Ahyl1sCNrOl8nQ== +-----END PRIVATE KEY----- diff --git a/pkg/backend/remote-state/http/testdata/gencerts.sh b/pkg/backend/remote-state/http/testdata/gencerts.sh new file mode 100755 index 00000000000..ebb987d8de5 --- /dev/null +++ b/pkg/backend/remote-state/http/testdata/gencerts.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash +# +# Generates certs required for mTLS testing: +# - ca.key and ca.cert.pem are self-signed, used as the source of truth for client and server to verify each other. +# - client.key and client.crt are the client's key and cert (signed by the ca key and cert) +# - server.key and server.crt are the server's key and cert (signed by the ca key and cert) + +set -ex + +# I was doing this on M1 mac and needed newer openssl to add the SAN IP; please export OPENSSL when invoking as needed +OPENSSL="${OPENSSL:-openssl}" + +# Nuke and recreate the certs dir +rm -rf certs +mkdir certs +cd certs || exit 1 + +# CA +"$OPENSSL" genrsa -out ca.key 4096 +"$OPENSSL" req -new -x509 -days 365000 -key ca.key -out ca.cert.pem + +# Server +"$OPENSSL" genrsa -out server.key 4096 +"$OPENSSL" req -new -key server.key -out server.csr -addext 'subjectAltName = IP:127.0.0.1' +"$OPENSSL" x509 -req -days 365000 -in server.csr -CA ca.cert.pem -CAkey ca.key -CAcreateserial -out server.crt -copy_extensions copy + +# Client +"$OPENSSL" genrsa -out client.key 4096 +"$OPENSSL" req -new -key client.key -out client.csr -addext 'subjectAltName = IP:127.0.0.1' +"$OPENSSL" x509 -req -days 365000 -in client.csr -CA ca.cert.pem -CAkey ca.key -CAcreateserial -out client.crt -copy_extensions copy diff --git a/pkg/backend/remote-state/inmem/backend.go b/pkg/backend/remote-state/inmem/backend.go new file mode 100644 index 00000000000..a7c78cfe5ab --- /dev/null +++ b/pkg/backend/remote-state/inmem/backend.go @@ -0,0 +1,214 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package inmem + +import ( + "context" + "errors" + "fmt" + "sort" + "sync" + "time" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/legacy/helper/schema" + statespkg "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/remote" + "github.com/kubegems/opentofu/pkg/states/statemgr" +) + +// we keep the states and locks in package-level variables, so that they can be +// accessed from multiple instances of the backend. This better emulates +// backend instances accessing a single remote data store. +var ( + states stateMap + locks lockMap +) + +func init() { + Reset() +} + +// Reset clears out all existing state and lock data. +// This is used to initialize the package during init, as well as between +// tests. +func Reset() { + states = stateMap{ + m: map[string]*remote.State{}, + } + + locks = lockMap{ + m: map[string]*statemgr.LockInfo{}, + } +} + +// New creates a new backend for Inmem remote state. +func New(enc encryption.StateEncryption) backend.Backend { + // Set the schema + s := &schema.Backend{ + Schema: map[string]*schema.Schema{ + "lock_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "initializes the state in a locked configuration", + }, + }, + } + backend := &Backend{Backend: s, encryption: enc} + backend.Backend.ConfigureFunc = backend.configure + return backend +} + +type Backend struct { + *schema.Backend + encryption encryption.StateEncryption +} + +func (b *Backend) configure(ctx context.Context) error { + states.Lock() + defer states.Unlock() + + defaultClient := &RemoteClient{ + Name: backend.DefaultStateName, + } + + states.m[backend.DefaultStateName] = remote.NewState(defaultClient, b.encryption) + + // set the default client lock info per the test config + data := schema.FromContextBackendConfig(ctx) + if v, ok := data.GetOk("lock_id"); ok && v.(string) != "" { + info := statemgr.NewLockInfo() + info.ID = v.(string) + info.Operation = "test" + info.Info = "test config" + + locks.lock(backend.DefaultStateName, info) + } + + return nil +} + +func (b *Backend) Workspaces() ([]string, error) { + states.Lock() + defer states.Unlock() + + var workspaces []string + + for s := range states.m { + workspaces = append(workspaces, s) + } + + sort.Strings(workspaces) + return workspaces, nil +} + +func (b *Backend) DeleteWorkspace(name string, _ bool) error { + states.Lock() + defer states.Unlock() + + if name == backend.DefaultStateName || name == "" { + return fmt.Errorf("can't delete default state") + } + + delete(states.m, name) + return nil +} + +func (b *Backend) StateMgr(name string) (statemgr.Full, error) { + states.Lock() + defer states.Unlock() + + s := states.m[name] + if s == nil { + s = remote.NewState( + &RemoteClient{ + Name: name, + }, + b.encryption, + ) + states.m[name] = s + + // to most closely replicate other implementations, we are going to + // take a lock and create a new state if it doesn't exist. + lockInfo := statemgr.NewLockInfo() + lockInfo.Operation = "init" + lockID, err := s.Lock(lockInfo) + if err != nil { + return nil, fmt.Errorf("failed to lock inmem state: %w", err) + } + defer s.Unlock(lockID) + + // If we have no state, we have to create an empty state + if v := s.State(); v == nil { + if err := s.WriteState(statespkg.NewState()); err != nil { + return nil, err + } + if err := s.PersistState(nil); err != nil { + return nil, err + } + } + } + + return s, nil +} + +type stateMap struct { + sync.Mutex + m map[string]*remote.State +} + +// Global level locks for inmem backends. +type lockMap struct { + sync.Mutex + m map[string]*statemgr.LockInfo +} + +func (l *lockMap) lock(name string, info *statemgr.LockInfo) (string, error) { + l.Lock() + defer l.Unlock() + + lockInfo := l.m[name] + if lockInfo != nil { + lockErr := &statemgr.LockError{ + Info: lockInfo, + } + + lockErr.Err = errors.New("state locked") + // make a copy of the lock info to avoid any testing shenanigans + *lockErr.Info = *lockInfo + return "", lockErr + } + + info.Created = time.Now().UTC() + l.m[name] = info + + return info.ID, nil +} + +func (l *lockMap) unlock(name, id string) error { + l.Lock() + defer l.Unlock() + + lockInfo := l.m[name] + + if lockInfo == nil { + return errors.New("state not locked") + } + + lockErr := &statemgr.LockError{ + Info: &statemgr.LockInfo{}, + } + + if id != lockInfo.ID { + lockErr.Err = errors.New("invalid lock id") + *lockErr.Info = *lockInfo + return lockErr + } + + delete(l.m, name) + return nil +} diff --git a/pkg/backend/remote-state/inmem/backend_test.go b/pkg/backend/remote-state/inmem/backend_test.go new file mode 100644 index 00000000000..8ade83e06b2 --- /dev/null +++ b/pkg/backend/remote-state/inmem/backend_test.go @@ -0,0 +1,98 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package inmem + +import ( + "flag" + "os" + "testing" + + "github.com/hashicorp/hcl/v2" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/encryption" + statespkg "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/remote" + + _ "github.com/kubegems/opentofu/pkg/logging" +) + +func TestMain(m *testing.M) { + flag.Parse() + os.Exit(m.Run()) +} + +func TestBackend_impl(t *testing.T) { + var _ backend.Backend = new(Backend) +} + +func TestBackendConfig(t *testing.T) { + defer Reset() + testID := "test_lock_id" + + config := map[string]interface{}{ + "lock_id": testID, + } + + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(config)).(*Backend) + + s, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + c := s.(*remote.State).Client.(*RemoteClient) + if c.Name != backend.DefaultStateName { + t.Fatal("client name is not configured") + } + + if err := locks.unlock(backend.DefaultStateName, testID); err != nil { + t.Fatalf("default state should have been locked: %s", err) + } +} + +func TestBackend(t *testing.T) { + defer Reset() + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), hcl.EmptyBody()).(*Backend) + backend.TestBackendStates(t, b) +} + +func TestBackendLocked(t *testing.T) { + defer Reset() + b1 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), hcl.EmptyBody()).(*Backend) + b2 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), hcl.EmptyBody()).(*Backend) + + backend.TestBackendStateLocks(t, b1, b2) +} + +// use the this backen to test the remote.State implementation +func TestRemoteState(t *testing.T) { + defer Reset() + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), hcl.EmptyBody()) + + workspace := "workspace" + + // create a new workspace in this backend + s, err := b.StateMgr(workspace) + if err != nil { + t.Fatal(err) + } + + // force overwriting the remote state + newState := statespkg.NewState() + + if err := s.WriteState(newState); err != nil { + t.Fatal(err) + } + + if err := s.PersistState(nil); err != nil { + t.Fatal(err) + } + + if err := s.RefreshState(); err != nil { + t.Fatal(err) + } +} diff --git a/pkg/backend/remote-state/inmem/client.go b/pkg/backend/remote-state/inmem/client.go new file mode 100644 index 00000000000..4b2561aa35e --- /dev/null +++ b/pkg/backend/remote-state/inmem/client.go @@ -0,0 +1,52 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package inmem + +import ( + "crypto/md5" + + "github.com/kubegems/opentofu/pkg/states/remote" + "github.com/kubegems/opentofu/pkg/states/statemgr" +) + +// RemoteClient is a remote client that stores data in memory for testing. +type RemoteClient struct { + Data []byte + MD5 []byte + Name string +} + +func (c *RemoteClient) Get() (*remote.Payload, error) { + if c.Data == nil { + return nil, nil + } + + return &remote.Payload{ + Data: c.Data, + MD5: c.MD5, + }, nil +} + +func (c *RemoteClient) Put(data []byte) error { + md5 := md5.Sum(data) + + c.Data = data + c.MD5 = md5[:] + return nil +} + +func (c *RemoteClient) Delete() error { + c.Data = nil + c.MD5 = nil + return nil +} + +func (c *RemoteClient) Lock(info *statemgr.LockInfo) (string, error) { + return locks.lock(c.Name, info) +} +func (c *RemoteClient) Unlock(id string) error { + return locks.unlock(c.Name, id) +} diff --git a/pkg/backend/remote-state/inmem/client_test.go b/pkg/backend/remote-state/inmem/client_test.go new file mode 100644 index 00000000000..17c4f1deeaf --- /dev/null +++ b/pkg/backend/remote-state/inmem/client_test.go @@ -0,0 +1,42 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package inmem + +import ( + "testing" + + "github.com/hashicorp/hcl/v2" + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/states/remote" +) + +func TestRemoteClient_impl(t *testing.T) { + var _ remote.Client = new(RemoteClient) + var _ remote.ClientLocker = new(RemoteClient) +} + +func TestRemoteClient(t *testing.T) { + defer Reset() + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), hcl.EmptyBody()) + + s, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + remote.TestClient(t, s.(*remote.State).Client) +} + +func TestInmemLocks(t *testing.T) { + defer Reset() + s, err := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), hcl.EmptyBody()).StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + remote.TestRemoteLocks(t, s.(*remote.State).Client, s.(*remote.State).Client) +} diff --git a/pkg/backend/remote-state/kubernetes/backend.go b/pkg/backend/remote-state/kubernetes/backend.go new file mode 100644 index 00000000000..59c87da3701 --- /dev/null +++ b/pkg/backend/remote-state/kubernetes/backend.go @@ -0,0 +1,403 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package kubernetes + +import ( + "bytes" + "context" + "fmt" + "log" + "os" + "path/filepath" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/httpclient" + "github.com/kubegems/opentofu/pkg/legacy/helper/schema" + "github.com/kubegems/opentofu/version" + "github.com/mitchellh/go-homedir" + k8sSchema "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" +) + +var ( + secretResource = k8sSchema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "secrets", + } +) + +// New creates a new backend for kubernetes remote state. +func New(enc encryption.StateEncryption) backend.Backend { + s := &schema.Backend{ + Schema: map[string]*schema.Schema{ + "secret_suffix": { + Type: schema.TypeString, + Required: true, + Description: "Suffix used when creating the secret. The secret will be named in the format: `tfstate-{workspace}-{secret_suffix}`.", + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: "Map of additional labels to be applied to the secret.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "namespace": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("KUBE_NAMESPACE", "default"), + Description: "Namespace to store the secret in.", + }, + "in_cluster_config": { + Type: schema.TypeBool, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("KUBE_IN_CLUSTER_CONFIG", false), + Description: "Used to authenticate to the cluster from inside a pod.", + }, + "load_config_file": { + Type: schema.TypeBool, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("KUBE_LOAD_CONFIG_FILE", true), + Description: "Load local kubeconfig.", + }, + "host": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("KUBE_HOST", ""), + Description: "The hostname (in form of URI) of Kubernetes master.", + }, + "username": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("KUBE_USER", ""), + Description: "The username to use for HTTP basic authentication when accessing the Kubernetes master endpoint.", + }, + "password": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("KUBE_PASSWORD", ""), + Description: "The password to use for HTTP basic authentication when accessing the Kubernetes master endpoint.", + }, + "insecure": { + Type: schema.TypeBool, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("KUBE_INSECURE", false), + Description: "Whether server should be accessed without verifying the TLS certificate.", + }, + "client_certificate": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("KUBE_CLIENT_CERT_DATA", ""), + Description: "PEM-encoded client certificate for TLS authentication.", + }, + "client_key": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("KUBE_CLIENT_KEY_DATA", ""), + Description: "PEM-encoded client certificate key for TLS authentication.", + }, + "cluster_ca_certificate": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("KUBE_CLUSTER_CA_CERT_DATA", ""), + Description: "PEM-encoded root certificates bundle for TLS authentication.", + }, + "config_paths": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + Description: "A list of paths to kube config files. Can be set with KUBE_CONFIG_PATHS environment variable.", + }, + "config_path": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("KUBE_CONFIG_PATH", ""), + Description: "Path to the kube config file. Can be set with KUBE_CONFIG_PATH environment variable.", + }, + "config_context": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("KUBE_CTX", ""), + }, + "config_context_auth_info": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("KUBE_CTX_AUTH_INFO", ""), + Description: "", + }, + "config_context_cluster": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("KUBE_CTX_CLUSTER", ""), + Description: "", + }, + "token": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("KUBE_TOKEN", ""), + Description: "Token to authentifcate a service account.", + }, + "exec": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "api_version": { + Type: schema.TypeString, + Required: true, + }, + "command": { + Type: schema.TypeString, + Required: true, + }, + "env": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "args": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + Description: "Use a credential plugin to authenticate.", + }, + }, + } + + result := &Backend{Backend: s, encryption: enc} + result.Backend.ConfigureFunc = result.configure + return result +} + +type Backend struct { + *schema.Backend + encryption encryption.StateEncryption + + // The fields below are set from configure + kubernetesSecretClient dynamic.ResourceInterface + kubernetesLeaseClient coordinationv1.LeaseInterface + config *restclient.Config + namespace string + labels map[string]string + nameSuffix string +} + +func (b Backend) getKubernetesSecretClient() (dynamic.ResourceInterface, error) { + if b.kubernetesSecretClient != nil { + return b.kubernetesSecretClient, nil + } + + client, err := dynamic.NewForConfig(b.config) + if err != nil { + return nil, fmt.Errorf("Failed to configure: %w", err) + } + + b.kubernetesSecretClient = client.Resource(secretResource).Namespace(b.namespace) + return b.kubernetesSecretClient, nil +} + +func (b Backend) getKubernetesLeaseClient() (coordinationv1.LeaseInterface, error) { + if b.kubernetesLeaseClient != nil { + return b.kubernetesLeaseClient, nil + } + + client, err := kubernetes.NewForConfig(b.config) + if err != nil { + return nil, err + } + + b.kubernetesLeaseClient = client.CoordinationV1().Leases(b.namespace) + return b.kubernetesLeaseClient, nil +} + +func (b *Backend) configure(ctx context.Context) error { + if b.config != nil { + return nil + } + + // Grab the resource data + data := schema.FromContextBackendConfig(ctx) + + cfg, err := getInitialConfig(data) + if err != nil { + return err + } + + // Overriding with static configuration + cfg.UserAgent = httpclient.OpenTofuUserAgent(version.Version) + + if v, ok := data.GetOk("host"); ok { + cfg.Host = v.(string) + } + if v, ok := data.GetOk("username"); ok { + cfg.Username = v.(string) + } + if v, ok := data.GetOk("password"); ok { + cfg.Password = v.(string) + } + if v, ok := data.GetOk("insecure"); ok { + cfg.Insecure = v.(bool) + } + if v, ok := data.GetOk("cluster_ca_certificate"); ok { + cfg.CAData = bytes.NewBufferString(v.(string)).Bytes() + } + if v, ok := data.GetOk("client_certificate"); ok { + cfg.CertData = bytes.NewBufferString(v.(string)).Bytes() + } + if v, ok := data.GetOk("client_key"); ok { + cfg.KeyData = bytes.NewBufferString(v.(string)).Bytes() + } + if v, ok := data.GetOk("token"); ok { + cfg.BearerToken = v.(string) + } + + if v, ok := data.GetOk("labels"); ok { + labels := map[string]string{} + for k, vv := range v.(map[string]interface{}) { + labels[k] = vv.(string) + } + b.labels = labels + } + + ns := data.Get("namespace").(string) + b.namespace = ns + b.nameSuffix = data.Get("secret_suffix").(string) + b.config = cfg + + return nil +} + +func getInitialConfig(data *schema.ResourceData) (*restclient.Config, error) { + var cfg *restclient.Config + var err error + + inCluster := data.Get("in_cluster_config").(bool) + if inCluster { + cfg, err = restclient.InClusterConfig() + if err != nil { + return nil, err + } + } else { + cfg, err = tryLoadingConfigFile(data) + if err != nil { + return nil, err + } + } + + if cfg == nil { + cfg = &restclient.Config{} + } + return cfg, err +} + +func tryLoadingConfigFile(d *schema.ResourceData) (*restclient.Config, error) { + loader := &clientcmd.ClientConfigLoadingRules{} + + configPaths := []string{} + if v, ok := d.Get("config_path").(string); ok && v != "" { + configPaths = []string{v} + } else if v, ok := d.Get("config_paths").([]interface{}); ok && len(v) > 0 { + for _, p := range v { + configPaths = append(configPaths, p.(string)) + } + } else if v := os.Getenv("KUBE_CONFIG_PATHS"); v != "" { + configPaths = filepath.SplitList(v) + } + + expandedPaths := []string{} + for _, p := range configPaths { + path, err := homedir.Expand(p) + if err != nil { + log.Printf("[DEBUG] Could not expand path: %s", err) + return nil, err + } + log.Printf("[DEBUG] Using kubeconfig: %s", path) + expandedPaths = append(expandedPaths, path) + } + + if len(expandedPaths) == 1 { + loader.ExplicitPath = expandedPaths[0] + } else { + loader.Precedence = expandedPaths + } + + overrides := &clientcmd.ConfigOverrides{} + ctxSuffix := "; default context" + + ctx, ctxOk := d.GetOk("config_context") + authInfo, authInfoOk := d.GetOk("config_context_auth_info") + cluster, clusterOk := d.GetOk("config_context_cluster") + if ctxOk || authInfoOk || clusterOk { + ctxSuffix = "; overriden context" + if ctxOk { + overrides.CurrentContext = ctx.(string) + ctxSuffix += fmt.Sprintf("; config ctx: %s", overrides.CurrentContext) + log.Printf("[DEBUG] Using custom current context: %q", overrides.CurrentContext) + } + + overrides.Context = clientcmdapi.Context{} + if authInfoOk { + overrides.Context.AuthInfo = authInfo.(string) + ctxSuffix += fmt.Sprintf("; auth_info: %s", overrides.Context.AuthInfo) + } + if clusterOk { + overrides.Context.Cluster = cluster.(string) + ctxSuffix += fmt.Sprintf("; cluster: %s", overrides.Context.Cluster) + } + log.Printf("[DEBUG] Using overidden context: %#v", overrides.Context) + } + + if v, ok := d.GetOk("exec"); ok { + exec := &clientcmdapi.ExecConfig{} + if spec, ok := v.([]interface{})[0].(map[string]interface{}); ok { + exec.APIVersion = spec["api_version"].(string) + exec.Command = spec["command"].(string) + exec.Args = expandStringSlice(spec["args"].([]interface{})) + for kk, vv := range spec["env"].(map[string]interface{}) { + exec.Env = append(exec.Env, clientcmdapi.ExecEnvVar{Name: kk, Value: vv.(string)}) + } + } else { + return nil, fmt.Errorf("Failed to parse exec") + } + overrides.AuthInfo.Exec = exec + } + + cc := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loader, overrides) + cfg, err := cc.ClientConfig() + if err != nil { + if pathErr, ok := err.(*os.PathError); ok && os.IsNotExist(pathErr.Err) { + log.Printf("[INFO] Unable to load config file as it doesn't exist at %q", pathErr.Path) + return nil, nil + } + return nil, fmt.Errorf("Failed to initialize kubernetes configuration: %w", err) + } + + log.Printf("[INFO] Successfully initialized config") + return cfg, nil +} + +func expandStringSlice(s []interface{}) []string { + result := make([]string, len(s), len(s)) + for k, v := range s { + // Handle the OpenTofu parser bug which turns empty strings in lists to nil. + if v == nil { + result[k] = "" + } else { + result[k] = v.(string) + } + } + return result +} diff --git a/pkg/backend/remote-state/kubernetes/backend_state.go b/pkg/backend/remote-state/kubernetes/backend_state.go new file mode 100644 index 00000000000..39f8623cc7a --- /dev/null +++ b/pkg/backend/remote-state/kubernetes/backend_state.go @@ -0,0 +1,171 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package kubernetes + +import ( + "context" + "errors" + "fmt" + "sort" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/remote" + "github.com/kubegems/opentofu/pkg/states/statemgr" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Workspaces returns a list of names for the workspaces found in k8s. The default +// workspace is always returned as the first element in the slice. +func (b *Backend) Workspaces() ([]string, error) { + secretClient, err := b.getKubernetesSecretClient() + if err != nil { + return nil, err + } + + secrets, err := secretClient.List( + context.Background(), + metav1.ListOptions{ + LabelSelector: tfstateKey + "=true", + }, + ) + if err != nil { + return nil, err + } + + // Use a map so there aren't duplicate workspaces + m := make(map[string]struct{}) + for _, secret := range secrets.Items { + sl := secret.GetLabels() + ws, ok := sl[tfstateWorkspaceKey] + if !ok { + continue + } + + key, ok := sl[tfstateSecretSuffixKey] + if !ok { + continue + } + + // Make sure it isn't default and the key matches + if ws != backend.DefaultStateName && key == b.nameSuffix { + m[ws] = struct{}{} + } + } + + states := []string{backend.DefaultStateName} + for k := range m { + states = append(states, k) + } + + sort.Strings(states[1:]) + return states, nil +} + +func (b *Backend) DeleteWorkspace(name string, _ bool) error { + if name == backend.DefaultStateName || name == "" { + return fmt.Errorf("can't delete default state") + } + + client, err := b.remoteClient(name) + if err != nil { + return err + } + + return client.Delete() +} + +func (b *Backend) StateMgr(name string) (statemgr.Full, error) { + c, err := b.remoteClient(name) + if err != nil { + return nil, err + } + + stateMgr := remote.NewState(c, b.encryption) + + // Grab the value + if err := stateMgr.RefreshState(); err != nil { + return nil, err + } + + // If we have no state, we have to create an empty state + if v := stateMgr.State(); v == nil { + + lockInfo := statemgr.NewLockInfo() + lockInfo.Operation = "init" + lockID, err := stateMgr.Lock(lockInfo) + if err != nil { + return nil, err + } + + secretName, err := c.createSecretName() + if err != nil { + return nil, err + } + + // Local helper function so we can call it multiple places + unlock := func(baseErr error) error { + if err := stateMgr.Unlock(lockID); err != nil { + const unlockErrMsg = `%v + Additionally, unlocking the state in Kubernetes failed: + + Error message: %w + Lock ID (gen): %v + Secret Name: %v + + You may have to force-unlock this state in order to use it again. + The Kubernetes backend acquires a lock during initialization to ensure + the initial state file is created.` + return fmt.Errorf(unlockErrMsg, baseErr, err, lockID, secretName) + } + + return baseErr + } + + if err := stateMgr.WriteState(states.NewState()); err != nil { + return nil, unlock(err) + } + if err := stateMgr.PersistState(nil); err != nil { + return nil, unlock(err) + } + + // Unlock, the state should now be initialized + if err := unlock(nil); err != nil { + return nil, err + } + + } + + return stateMgr, nil +} + +// get a remote client configured for this state +func (b *Backend) remoteClient(name string) (*RemoteClient, error) { + if name == "" { + return nil, errors.New("missing state name") + } + + secretClient, err := b.getKubernetesSecretClient() + if err != nil { + return nil, err + } + + leaseClient, err := b.getKubernetesLeaseClient() + if err != nil { + return nil, err + } + + client := &RemoteClient{ + kubernetesSecretClient: secretClient, + kubernetesLeaseClient: leaseClient, + namespace: b.namespace, + labels: b.labels, + nameSuffix: b.nameSuffix, + workspace: name, + } + + return client, nil +} diff --git a/pkg/backend/remote-state/kubernetes/backend_test.go b/pkg/backend/remote-state/kubernetes/backend_test.go new file mode 100644 index 00000000000..8a49b0f68e9 --- /dev/null +++ b/pkg/backend/remote-state/kubernetes/backend_test.go @@ -0,0 +1,202 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package kubernetes + +import ( + "context" + "fmt" + "math/rand" + "os" + "sync" + "testing" + "time" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/states/statemgr" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + secretSuffix = "test-state" +) + +var namespace string + +// verify that we are doing ACC tests or the k8s tests specifically +func testACC(t *testing.T) { + skip := os.Getenv("TF_ACC") == "" && os.Getenv("TF_K8S_TEST") == "" + if skip { + t.Log("k8s backend tests require setting TF_ACC or TF_K8S_TEST") + t.Skip() + } + + ns := os.Getenv("KUBE_NAMESPACE") + + if ns != "" { + namespace = ns + } else { + namespace = "default" + } + + cleanupK8sResources(t) +} + +func TestBackend_impl(t *testing.T) { + var _ backend.Backend = new(Backend) +} + +func TestBackend(t *testing.T) { + testACC(t) + defer cleanupK8sResources(t) + + b1 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "secret_suffix": secretSuffix, + })) + + // Test + backend.TestBackendStates(t, b1) +} + +func TestBackendLocks(t *testing.T) { + testACC(t) + defer cleanupK8sResources(t) + + // Get the backend. We need two to test locking. + b1 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "secret_suffix": secretSuffix, + })) + + b2 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "secret_suffix": secretSuffix, + })) + + // Test + backend.TestBackendStateLocks(t, b1, b2) + backend.TestBackendStateForceUnlock(t, b1, b2) +} + +func TestBackendLocksSoak(t *testing.T) { + testACC(t) + defer cleanupK8sResources(t) + + clientCount := 100 + lockAttempts := 100 + + lockers := []statemgr.Locker{} + for i := 0; i < clientCount; i++ { + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "secret_suffix": secretSuffix, + })) + + s, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("Error creating state manager: %v", err) + } + + lockers = append(lockers, s.(statemgr.Locker)) + } + + wg := sync.WaitGroup{} + for i, l := range lockers { + wg.Add(1) + go func(locker statemgr.Locker, n int) { + defer wg.Done() + + li := statemgr.NewLockInfo() + li.Operation = "test" + li.Who = fmt.Sprintf("client-%v", n) + + for i := 0; i < lockAttempts; i++ { + id, err := locker.Lock(li) + if err != nil { + continue + } + + // hold onto the lock for a little bit + time.Sleep(time.Duration(rand.Intn(10)) * time.Microsecond) + + err = locker.Unlock(id) + if err != nil { + t.Errorf("failed to unlock: %v", err) + } + } + }(l, i) + } + + wg.Wait() +} + +func cleanupK8sResources(t *testing.T) { + ctx := context.Background() + // Get a backend to use the k8s client + b1 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "secret_suffix": secretSuffix, + })) + + b := b1.(*Backend) + + sClient, err := b.getKubernetesSecretClient() + if err != nil { + t.Fatal(err) + } + + // Delete secrets + opts := metav1.ListOptions{LabelSelector: tfstateKey + "=true"} + secrets, err := sClient.List(ctx, opts) + if err != nil { + t.Fatal(err) + } + + delProp := metav1.DeletePropagationBackground + delOps := metav1.DeleteOptions{PropagationPolicy: &delProp} + var errs []error + + for _, secret := range secrets.Items { + labels := secret.GetLabels() + key, ok := labels[tfstateSecretSuffixKey] + if !ok { + continue + } + + if key == secretSuffix { + err = sClient.Delete(ctx, secret.GetName(), delOps) + if err != nil { + errs = append(errs, err) + } + } + } + + leaseClient, err := b.getKubernetesLeaseClient() + if err != nil { + t.Fatal(err) + } + + // Delete leases + leases, err := leaseClient.List(ctx, opts) + if err != nil { + t.Fatal(err) + } + + for _, lease := range leases.Items { + labels := lease.GetLabels() + key, ok := labels[tfstateSecretSuffixKey] + if !ok { + continue + } + + if key == secretSuffix { + err = leaseClient.Delete(ctx, lease.GetName(), delOps) + if err != nil { + errs = append(errs, err) + } + } + } + + if len(errs) > 0 { + t.Fatal(errs) + } +} diff --git a/pkg/backend/remote-state/kubernetes/client.go b/pkg/backend/remote-state/kubernetes/client.go new file mode 100644 index 00000000000..2adb8ae6805 --- /dev/null +++ b/pkg/backend/remote-state/kubernetes/client.go @@ -0,0 +1,418 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package kubernetes + +import ( + "bytes" + "compress/gzip" + "context" + "crypto/md5" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "strings" + + "github.com/kubegems/opentofu/pkg/states/remote" + "github.com/kubegems/opentofu/pkg/states/statemgr" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/client-go/dynamic" + _ "k8s.io/client-go/plugin/pkg/client/auth" // Import to initialize client auth plugins. + "k8s.io/utils/pointer" + + coordinationv1 "k8s.io/api/coordination/v1" + coordinationclientv1 "k8s.io/client-go/kubernetes/typed/coordination/v1" +) + +const ( + tfstateKey = "tfstate" + tfstateSecretSuffixKey = "tfstateSecretSuffix" + tfstateWorkspaceKey = "tfstateWorkspace" + tfstateLockInfoAnnotation = "app.terraform.io/lock-info" + managedByKey = "app.kubernetes.io/managed-by" +) + +type RemoteClient struct { + kubernetesSecretClient dynamic.ResourceInterface + kubernetesLeaseClient coordinationclientv1.LeaseInterface + namespace string + labels map[string]string + nameSuffix string + workspace string +} + +func (c *RemoteClient) Get() (payload *remote.Payload, err error) { + secretName, err := c.createSecretName() + if err != nil { + return nil, err + } + secret, err := c.kubernetesSecretClient.Get(context.Background(), secretName, metav1.GetOptions{}) + if err != nil { + if k8serrors.IsNotFound(err) { + return nil, nil + } + return nil, err + } + + secretData := getSecretData(secret) + stateRaw, ok := secretData[tfstateKey] + if !ok { + // The secret exists but there is no state in it + return nil, nil + } + + stateRawString := stateRaw.(string) + + state, err := uncompressState(stateRawString) + if err != nil { + return nil, err + } + + md5 := md5.Sum(state) + + p := &remote.Payload{ + Data: state, + MD5: md5[:], + } + return p, nil +} + +func (c *RemoteClient) Put(data []byte) error { + ctx := context.Background() + secretName, err := c.createSecretName() + if err != nil { + return err + } + + payload, err := compressState(data) + if err != nil { + return err + } + + secret, err := c.getSecret(secretName) + if err != nil { + if !k8serrors.IsNotFound(err) { + return err + } + + secret = &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": metav1.ObjectMeta{ + Name: secretName, + Namespace: c.namespace, + Labels: c.getLabels(), + Annotations: map[string]string{"encoding": "gzip"}, + }, + }, + } + + secret, err = c.kubernetesSecretClient.Create(ctx, secret, metav1.CreateOptions{}) + if err != nil { + return err + } + } + + setState(secret, payload) + _, err = c.kubernetesSecretClient.Update(ctx, secret, metav1.UpdateOptions{}) + return err +} + +// Delete the state secret +func (c *RemoteClient) Delete() error { + secretName, err := c.createSecretName() + if err != nil { + return err + } + + err = c.deleteSecret(secretName) + if err != nil { + if !k8serrors.IsNotFound(err) { + return err + } + } + + leaseName, err := c.createLeaseName() + if err != nil { + return err + } + + err = c.deleteLease(leaseName) + if err != nil { + if !k8serrors.IsNotFound(err) { + return err + } + } + return nil +} + +func (c *RemoteClient) Lock(info *statemgr.LockInfo) (string, error) { + ctx := context.Background() + leaseName, err := c.createLeaseName() + if err != nil { + return "", err + } + + lease, err := c.getLease(leaseName) + if err != nil { + if !k8serrors.IsNotFound(err) { + return "", err + } + + labels := c.getLabels() + lease = &coordinationv1.Lease{ + ObjectMeta: metav1.ObjectMeta{ + Name: leaseName, + Labels: labels, + Annotations: map[string]string{ + tfstateLockInfoAnnotation: string(info.Marshal()), + }, + }, + Spec: coordinationv1.LeaseSpec{ + HolderIdentity: pointer.StringPtr(info.ID), + }, + } + + _, err = c.kubernetesLeaseClient.Create(ctx, lease, metav1.CreateOptions{}) + if err != nil { + return "", err + } else { + return info.ID, nil + } + } + + if lease.Spec.HolderIdentity != nil { + if *lease.Spec.HolderIdentity == info.ID { + return info.ID, nil + } + + currentLockInfo, err := c.getLockInfo(lease) + if err != nil { + return "", err + } + + lockErr := &statemgr.LockError{ + Info: currentLockInfo, + Err: errors.New("the state is already locked by another tofu client"), + } + return "", lockErr + } + + lease.Spec.HolderIdentity = pointer.StringPtr(info.ID) + setLockInfo(lease, info.Marshal()) + _, err = c.kubernetesLeaseClient.Update(ctx, lease, metav1.UpdateOptions{}) + if err != nil { + return "", err + } + + return info.ID, err +} + +func (c *RemoteClient) Unlock(id string) error { + leaseName, err := c.createLeaseName() + if err != nil { + return err + } + + lease, err := c.getLease(leaseName) + if err != nil { + return err + } + + if lease.Spec.HolderIdentity == nil { + return fmt.Errorf("state is already unlocked") + } + + lockInfo, err := c.getLockInfo(lease) + if err != nil { + return err + } + + lockErr := &statemgr.LockError{Info: lockInfo} + if *lease.Spec.HolderIdentity != id { + lockErr.Err = fmt.Errorf("lock id %q does not match existing lock", id) + return lockErr + } + + lease.Spec.HolderIdentity = nil + removeLockInfo(lease) + + _, err = c.kubernetesLeaseClient.Update(context.Background(), lease, metav1.UpdateOptions{}) + if err != nil { + lockErr.Err = err + return lockErr + } + + return nil +} + +func (c *RemoteClient) getLockInfo(lease *coordinationv1.Lease) (*statemgr.LockInfo, error) { + lockData, ok := getLockInfo(lease) + if len(lockData) == 0 || !ok { + return nil, nil + } + + lockInfo := &statemgr.LockInfo{} + err := json.Unmarshal(lockData, lockInfo) + if err != nil { + return nil, err + } + + return lockInfo, nil +} + +func (c *RemoteClient) getLabels() map[string]string { + l := map[string]string{ + tfstateKey: "true", + tfstateSecretSuffixKey: c.nameSuffix, + tfstateWorkspaceKey: c.workspace, + managedByKey: "terraform", + } + + if len(c.labels) != 0 { + for k, v := range c.labels { + l[k] = v + } + } + + return l +} + +func (c *RemoteClient) getSecret(name string) (*unstructured.Unstructured, error) { + return c.kubernetesSecretClient.Get(context.Background(), name, metav1.GetOptions{}) +} + +func (c *RemoteClient) getLease(name string) (*coordinationv1.Lease, error) { + return c.kubernetesLeaseClient.Get(context.Background(), name, metav1.GetOptions{}) +} + +func (c *RemoteClient) deleteSecret(name string) error { + secret, err := c.getSecret(name) + if err != nil { + return err + } + + labels := secret.GetLabels() + v, ok := labels[tfstateKey] + if !ok || v != "true" { + return fmt.Errorf("Secret does does not have %q label", tfstateKey) + } + + delProp := metav1.DeletePropagationBackground + delOps := metav1.DeleteOptions{PropagationPolicy: &delProp} + return c.kubernetesSecretClient.Delete(context.Background(), name, delOps) +} + +func (c *RemoteClient) deleteLease(name string) error { + secret, err := c.getLease(name) + if err != nil { + return err + } + + labels := secret.GetLabels() + v, ok := labels[tfstateKey] + if !ok || v != "true" { + return fmt.Errorf("Lease does does not have %q label", tfstateKey) + } + + delProp := metav1.DeletePropagationBackground + delOps := metav1.DeleteOptions{PropagationPolicy: &delProp} + return c.kubernetesLeaseClient.Delete(context.Background(), name, delOps) +} + +func (c *RemoteClient) createSecretName() (string, error) { + secretName := strings.Join([]string{tfstateKey, c.workspace, c.nameSuffix}, "-") + + errs := validation.IsDNS1123Subdomain(secretName) + if len(errs) > 0 { + k8sInfo := ` +This is a requirement for Kubernetes secret names. +The workspace name and key must adhere to Kubernetes naming conventions.` + msg := fmt.Sprintf("the secret name %v is invalid, ", secretName) + return "", errors.New(msg + strings.Join(errs, ",") + k8sInfo) + } + + return secretName, nil +} + +func (c *RemoteClient) createLeaseName() (string, error) { + n, err := c.createSecretName() + if err != nil { + return "", err + } + return "lock-" + n, nil +} + +func compressState(data []byte) ([]byte, error) { + b := new(bytes.Buffer) + gz := gzip.NewWriter(b) + if _, err := gz.Write(data); err != nil { + return nil, err + } + if err := gz.Close(); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +func uncompressState(data string) ([]byte, error) { + decode, err := base64.StdEncoding.DecodeString(data) + if err != nil { + return nil, err + } + + b := new(bytes.Buffer) + gz, err := gzip.NewReader(bytes.NewReader(decode)) + if err != nil { + return nil, err + } + b.ReadFrom(gz) + if err := gz.Close(); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +func getSecretData(secret *unstructured.Unstructured) map[string]interface{} { + if m, ok := secret.Object["data"].(map[string]interface{}); ok { + return m + } + return map[string]interface{}{} +} + +func getLockInfo(lease *coordinationv1.Lease) ([]byte, bool) { + info, ok := lease.ObjectMeta.GetAnnotations()[tfstateLockInfoAnnotation] + if !ok { + return nil, false + } + return []byte(info), true +} + +func setLockInfo(lease *coordinationv1.Lease, l []byte) { + annotations := lease.ObjectMeta.GetAnnotations() + if annotations != nil { + annotations[tfstateLockInfoAnnotation] = string(l) + } else { + annotations = map[string]string{ + tfstateLockInfoAnnotation: string(l), + } + } + lease.ObjectMeta.SetAnnotations(annotations) +} + +func removeLockInfo(lease *coordinationv1.Lease) { + annotations := lease.ObjectMeta.GetAnnotations() + delete(annotations, tfstateLockInfoAnnotation) + lease.ObjectMeta.SetAnnotations(annotations) +} + +func setState(secret *unstructured.Unstructured, t []byte) { + secretData := getSecretData(secret) + secretData[tfstateKey] = t + secret.Object["data"] = secretData +} diff --git a/pkg/backend/remote-state/kubernetes/client_test.go b/pkg/backend/remote-state/kubernetes/client_test.go new file mode 100644 index 00000000000..1d5aff9aa5a --- /dev/null +++ b/pkg/backend/remote-state/kubernetes/client_test.go @@ -0,0 +1,125 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package kubernetes + +import ( + "testing" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/states/remote" + "github.com/kubegems/opentofu/pkg/states/statemgr" +) + +func TestRemoteClient_impl(t *testing.T) { + var _ remote.Client = new(RemoteClient) + var _ remote.ClientLocker = new(RemoteClient) +} + +func TestRemoteClient(t *testing.T) { + testACC(t) + defer cleanupK8sResources(t) + + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "secret_suffix": secretSuffix, + })) + + state, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + remote.TestClient(t, state.(*remote.State).Client) +} + +func TestRemoteClientLocks(t *testing.T) { + testACC(t) + defer cleanupK8sResources(t) + + b1 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "secret_suffix": secretSuffix, + })) + + b2 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "secret_suffix": secretSuffix, + })) + + s1, err := b1.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + s2, err := b2.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + remote.TestRemoteLocks(t, s1.(*remote.State).Client, s2.(*remote.State).Client) +} + +func TestForceUnlock(t *testing.T) { + testACC(t) + defer cleanupK8sResources(t) + + b1 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "secret_suffix": secretSuffix, + })) + + b2 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "secret_suffix": secretSuffix, + })) + + // first test with default + s1, err := b1.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + info := statemgr.NewLockInfo() + info.Operation = "test" + info.Who = "clientA" + + lockID, err := s1.Lock(info) + if err != nil { + t.Fatal("unable to get initial lock:", err) + } + + // s1 is now locked, get the same state through s2 and unlock it + s2, err := b2.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal("failed to get default state to force unlock:", err) + } + + if err := s2.Unlock(lockID); err != nil { + t.Fatal("failed to force-unlock default state") + } + + // now try the same thing with a named state + // first test with default + s1, err = b1.StateMgr("test") + if err != nil { + t.Fatal(err) + } + + info = statemgr.NewLockInfo() + info.Operation = "test" + info.Who = "clientA" + + lockID, err = s1.Lock(info) + if err != nil { + t.Fatal("unable to get initial lock:", err) + } + + // s1 is now locked, get the same state through s2 and unlock it + s2, err = b2.StateMgr("test") + if err != nil { + t.Fatal("failed to get named state to force unlock:", err) + } + + if err = s2.Unlock(lockID); err != nil { + t.Fatal("failed to force-unlock named state") + } +} diff --git a/pkg/backend/remote-state/oss/backend.go b/pkg/backend/remote-state/oss/backend.go new file mode 100644 index 00000000000..89f765199b1 --- /dev/null +++ b/pkg/backend/remote-state/oss/backend.go @@ -0,0 +1,711 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package oss + +import ( + "context" + "encoding/json" + "fmt" + "log" + "net/http" + "net/url" + "os" + "regexp" + "runtime" + "strconv" + "strings" + "time" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" + "github.com/aliyun/alibaba-cloud-sdk-go/services/location" + "github.com/aliyun/alibaba-cloud-sdk-go/services/sts" + "github.com/aliyun/aliyun-oss-go-sdk/oss" + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore" + "github.com/hashicorp/go-cleanhttp" + "github.com/jmespath/go-jmespath" + "github.com/mitchellh/go-homedir" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/httpclient" + "github.com/kubegems/opentofu/pkg/legacy/helper/schema" + "github.com/kubegems/opentofu/version" +) + +// Deprecated in favor of flattening assume_role_* options +func deprecatedAssumeRoleSchema() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + MaxItems: 1, + //Deprecated: "use assume_role_* options instead", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "role_arn": { + Type: schema.TypeString, + Required: true, + Description: "The ARN of a RAM role to assume prior to making API calls.", + DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_ASSUME_ROLE_ARN", ""), + }, + "session_name": { + Type: schema.TypeString, + Optional: true, + Description: "The session name to use when assuming the role.", + DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_ASSUME_ROLE_SESSION_NAME", ""), + }, + "policy": { + Type: schema.TypeString, + Optional: true, + Description: "The permissions applied when assuming a role. You cannot use this policy to grant permissions which exceed those of the role that is being assumed.", + }, + "session_expiration": { + Type: schema.TypeInt, + Optional: true, + Description: "The time after which the established session for assuming role expires.", + ValidateFunc: func(v interface{}, k string) ([]string, []error) { + min := 900 + max := 3600 + value, ok := v.(int) + if !ok { + return nil, []error{fmt.Errorf("expected type of %s to be int", k)} + } + + if value < min || value > max { + return nil, []error{fmt.Errorf("expected %s to be in the range (%d - %d), got %d", k, min, max, v)} + } + + return nil, nil + }, + }, + }, + }, + } +} + +// New creates a new backend for OSS remote state. +func New(enc encryption.StateEncryption) backend.Backend { + s := &schema.Backend{ + Schema: map[string]*schema.Schema{ + "access_key": { + Type: schema.TypeString, + Optional: true, + Description: "Alibaba Cloud Access Key ID", + DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_ACCESS_KEY", os.Getenv("ALICLOUD_ACCESS_KEY_ID")), + }, + + "secret_key": { + Type: schema.TypeString, + Optional: true, + Description: "Alibaba Cloud Access Secret Key", + DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_SECRET_KEY", os.Getenv("ALICLOUD_ACCESS_KEY_SECRET")), + }, + + "security_token": { + Type: schema.TypeString, + Optional: true, + Description: "Alibaba Cloud Security Token", + DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_SECURITY_TOKEN", ""), + }, + + "ecs_role_name": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_ECS_ROLE_NAME", os.Getenv("ALICLOUD_ECS_ROLE_NAME")), + Description: "The RAM Role Name attached on a ECS instance for API operations. You can retrieve this from the 'Access Control' section of the Alibaba Cloud console.", + }, + + "region": { + Type: schema.TypeString, + Optional: true, + Description: "The region of the OSS bucket.", + DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_REGION", os.Getenv("ALICLOUD_DEFAULT_REGION")), + }, + "sts_endpoint": { + Type: schema.TypeString, + Optional: true, + Description: "A custom endpoint for the STS API", + DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_STS_ENDPOINT", ""), + }, + "tablestore_endpoint": { + Type: schema.TypeString, + Optional: true, + Description: "A custom endpoint for the TableStore API", + DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_TABLESTORE_ENDPOINT", ""), + }, + "endpoint": { + Type: schema.TypeString, + Optional: true, + Description: "A custom endpoint for the OSS API", + DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_OSS_ENDPOINT", os.Getenv("OSS_ENDPOINT")), + }, + + "bucket": { + Type: schema.TypeString, + Required: true, + Description: "The name of the OSS bucket", + }, + + "prefix": { + Type: schema.TypeString, + Optional: true, + Description: "The directory where state files will be saved inside the bucket", + Default: "env:", + ValidateFunc: func(v interface{}, s string) ([]string, []error) { + prefix := v.(string) + if strings.HasPrefix(prefix, "/") || strings.HasPrefix(prefix, "./") { + return nil, []error{fmt.Errorf("workspace_key_prefix must not start with '/' or './'")} + } + return nil, nil + }, + }, + + "key": { + Type: schema.TypeString, + Optional: true, + Description: "The path of the state file inside the bucket", + ValidateFunc: func(v interface{}, s string) ([]string, []error) { + if strings.HasPrefix(v.(string), "/") || strings.HasSuffix(v.(string), "/") { + return nil, []error{fmt.Errorf("key can not start and end with '/'")} + } + return nil, nil + }, + Default: "terraform.tfstate", + }, + + "tablestore_table": { + Type: schema.TypeString, + Optional: true, + Description: "TableStore table for state locking and consistency", + Default: "", + }, + + "encrypt": { + Type: schema.TypeBool, + Optional: true, + Description: "Whether to enable server side encryption of the state file", + Default: false, + }, + + "acl": { + Type: schema.TypeString, + Optional: true, + Description: "Object ACL to be applied to the state file", + Default: "", + ValidateFunc: func(v interface{}, k string) ([]string, []error) { + if value := v.(string); value != "" { + acls := oss.ACLType(value) + if acls != oss.ACLPrivate && acls != oss.ACLPublicRead && acls != oss.ACLPublicReadWrite { + return nil, []error{fmt.Errorf( + "%q must be a valid ACL value , expected %s, %s or %s, got %q", + k, oss.ACLPrivate, oss.ACLPublicRead, oss.ACLPublicReadWrite, acls)} + } + } + return nil, nil + }, + }, + "shared_credentials_file": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_SHARED_CREDENTIALS_FILE", ""), + Description: "This is the path to the shared credentials file. If this is not set and a profile is specified, `~/.aliyun/config.json` will be used.", + }, + "profile": { + Type: schema.TypeString, + Optional: true, + Description: "This is the Alibaba Cloud profile name as set in the shared credentials file. It can also be sourced from the `ALICLOUD_PROFILE` environment variable.", + DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_PROFILE", ""), + }, + "assume_role": deprecatedAssumeRoleSchema(), + "assume_role_role_arn": { + Type: schema.TypeString, + Optional: true, + Description: "The ARN of a RAM role to assume prior to making API calls.", + DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_ASSUME_ROLE_ARN", ""), + }, + "assume_role_session_name": { + Type: schema.TypeString, + Optional: true, + Description: "The session name to use when assuming the role.", + DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_ASSUME_ROLE_SESSION_NAME", ""), + }, + "assume_role_policy": { + Type: schema.TypeString, + Optional: true, + Description: "The permissions applied when assuming a role. You cannot use this policy to grant permissions which exceed those of the role that is being assumed.", + }, + "assume_role_session_expiration": { + Type: schema.TypeInt, + Optional: true, + Description: "The time after which the established session for assuming role expires.", + ValidateFunc: func(v interface{}, k string) ([]string, []error) { + min := 900 + max := 3600 + value, ok := v.(int) + if !ok { + return nil, []error{fmt.Errorf("expected type of %s to be int", k)} + } + + if value < min || value > max { + return nil, []error{fmt.Errorf("expected %s to be in the range (%d - %d), got %d", k, min, max, v)} + } + + return nil, nil + }, + }, + }, + } + + result := &Backend{Backend: s, encryption: enc} + result.Backend.ConfigureFunc = result.configure + return result +} + +type Backend struct { + *schema.Backend + encryption encryption.StateEncryption + + // The fields below are set from configure + ossClient *oss.Client + otsClient *tablestore.TableStoreClient + + bucketName string + statePrefix string + stateKey string + serverSideEncryption bool + acl string + otsEndpoint string + otsTable string +} + +func (b *Backend) configure(ctx context.Context) error { + if b.ossClient != nil { + return nil + } + + // Grab the resource data + d := schema.FromContextBackendConfig(ctx) + + b.bucketName = d.Get("bucket").(string) + b.statePrefix = strings.TrimPrefix(strings.Trim(d.Get("prefix").(string), "/"), "./") + b.stateKey = d.Get("key").(string) + b.serverSideEncryption = d.Get("encrypt").(bool) + b.acl = d.Get("acl").(string) + + var getBackendConfig = func(str string, key string) string { + if str == "" { + value, err := getConfigFromProfile(d, key) + if err == nil && value != nil { + str = value.(string) + } + } + return str + } + + accessKey := getBackendConfig(d.Get("access_key").(string), "access_key_id") + secretKey := getBackendConfig(d.Get("secret_key").(string), "access_key_secret") + securityToken := getBackendConfig(d.Get("security_token").(string), "sts_token") + region := getBackendConfig(d.Get("region").(string), "region_id") + + stsEndpoint := d.Get("sts_endpoint").(string) + endpoint := d.Get("endpoint").(string) + schma := "https" + + roleArn := getBackendConfig("", "ram_role_arn") + sessionName := getBackendConfig("", "ram_session_name") + var policy string + var sessionExpiration int + expiredSeconds, err := getConfigFromProfile(d, "expired_seconds") + if err == nil && expiredSeconds != nil { + sessionExpiration = (int)(expiredSeconds.(float64)) + } + + if v, ok := d.GetOk("assume_role_role_arn"); ok && v.(string) != "" { + roleArn = v.(string) + if v, ok := d.GetOk("assume_role_session_name"); ok { + sessionName = v.(string) + } + if v, ok := d.GetOk("assume_role_policy"); ok { + policy = v.(string) + } + if v, ok := d.GetOk("assume_role_session_expiration"); ok { + sessionExpiration = v.(int) + } + } else if v, ok := d.GetOk("assume_role"); ok { + // deprecated assume_role block + for _, v := range v.(*schema.Set).List() { + assumeRole := v.(map[string]interface{}) + if assumeRole["role_arn"].(string) != "" { + roleArn = assumeRole["role_arn"].(string) + } + if assumeRole["session_name"].(string) != "" { + sessionName = assumeRole["session_name"].(string) + } + policy = assumeRole["policy"].(string) + sessionExpiration = assumeRole["session_expiration"].(int) + } + } + + if sessionName == "" { + sessionName = "tofu" + } + if sessionExpiration == 0 { + if v := os.Getenv("ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION"); v != "" { + if expiredSeconds, err := strconv.Atoi(v); err == nil { + sessionExpiration = expiredSeconds + } + } + if sessionExpiration == 0 { + sessionExpiration = 3600 + } + } + + if accessKey == "" { + ecsRoleName := getBackendConfig(d.Get("ecs_role_name").(string), "ram_role_name") + subAccessKeyId, subAccessKeySecret, subSecurityToken, err := getAuthCredentialByEcsRoleName(ecsRoleName) + if err != nil { + return err + } + accessKey, secretKey, securityToken = subAccessKeyId, subAccessKeySecret, subSecurityToken + } + + if roleArn != "" { + subAccessKeyId, subAccessKeySecret, subSecurityToken, err := getAssumeRoleAK(accessKey, secretKey, securityToken, region, roleArn, sessionName, policy, stsEndpoint, sessionExpiration) + if err != nil { + return err + } + accessKey, secretKey, securityToken = subAccessKeyId, subAccessKeySecret, subSecurityToken + } + + if endpoint == "" { + endpointsResponse, err := b.getOSSEndpointByRegion(accessKey, secretKey, securityToken, region) + if err != nil { + log.Printf("[WARN] getting oss endpoint failed and using oss-%s.aliyuncs.com instead. Error: %#v.", region, err) + } else { + for _, endpointItem := range endpointsResponse.Endpoints.Endpoint { + if endpointItem.Type == "openAPI" { + endpoint = endpointItem.Endpoint + break + } + } + } + if endpoint == "" { + endpoint = fmt.Sprintf("oss-%s.aliyuncs.com", region) + } + } + if !strings.HasPrefix(endpoint, "http") { + endpoint = fmt.Sprintf("%s://%s", schma, endpoint) + } + log.Printf("[DEBUG] Instantiate OSS client using endpoint: %#v", endpoint) + var options []oss.ClientOption + if securityToken != "" { + options = append(options, oss.SecurityToken(securityToken)) + } + options = append(options, oss.UserAgent(httpclient.OpenTofuUserAgent(TerraformVersion))) + + proxyUrl := getHttpProxyUrl() + if proxyUrl != nil { + options = append(options, oss.Proxy(proxyUrl.String())) + } + + client, err := oss.New(endpoint, accessKey, secretKey, options...) + b.ossClient = client + otsEndpoint := d.Get("tablestore_endpoint").(string) + if otsEndpoint != "" { + if !strings.HasPrefix(otsEndpoint, "http") { + otsEndpoint = fmt.Sprintf("%s://%s", schma, otsEndpoint) + } + b.otsEndpoint = otsEndpoint + parts := strings.Split(strings.TrimPrefix(strings.TrimPrefix(otsEndpoint, "https://"), "http://"), ".") + b.otsClient = tablestore.NewClientWithConfig(otsEndpoint, parts[0], accessKey, secretKey, securityToken, tablestore.NewDefaultTableStoreConfig()) + } + b.otsTable = d.Get("tablestore_table").(string) + + return err +} + +func (b *Backend) getOSSEndpointByRegion(access_key, secret_key, security_token, region string) (*location.DescribeEndpointsResponse, error) { + args := location.CreateDescribeEndpointsRequest() + args.ServiceCode = "oss" + args.Id = region + args.Domain = "location-readonly.aliyuncs.com" + + locationClient, err := location.NewClientWithOptions(region, getSdkConfig(), credentials.NewStsTokenCredential(access_key, secret_key, security_token)) + if err != nil { + return nil, fmt.Errorf("unable to initialize the location client: %w", err) + + } + locationClient.AppendUserAgent(httpclient.DefaultApplicationName, TerraformVersion) + endpointsResponse, err := locationClient.DescribeEndpoints(args) + if err != nil { + return nil, fmt.Errorf("describe oss endpoint using region: %#v got an error: %w", region, err) + } + return endpointsResponse, nil +} + +func getAssumeRoleAK(accessKey, secretKey, stsToken, region, roleArn, sessionName, policy, stsEndpoint string, sessionExpiration int) (string, string, string, error) { + request := sts.CreateAssumeRoleRequest() + request.RoleArn = roleArn + request.RoleSessionName = sessionName + request.DurationSeconds = requests.NewInteger(sessionExpiration) + request.Policy = policy + request.Scheme = "https" + + var client *sts.Client + var err error + if stsToken == "" { + client, err = sts.NewClientWithAccessKey(region, accessKey, secretKey) + } else { + client, err = sts.NewClientWithStsToken(region, accessKey, secretKey, stsToken) + } + if err != nil { + return "", "", "", err + } + if stsEndpoint != "" { + endpoints.AddEndpointMapping(region, "STS", stsEndpoint) + } + response, err := client.AssumeRole(request) + if err != nil { + return "", "", "", err + } + return response.Credentials.AccessKeyId, response.Credentials.AccessKeySecret, response.Credentials.SecurityToken, nil +} + +func getSdkConfig() *sdk.Config { + return sdk.NewConfig(). + WithMaxRetryTime(5). + WithTimeout(time.Duration(30) * time.Second). + WithGoRoutinePoolSize(10). + WithDebug(false). + WithHttpTransport(getTransport()). + WithScheme("HTTPS") +} + +func getTransport() *http.Transport { + handshakeTimeout, err := strconv.Atoi(os.Getenv("TLSHandshakeTimeout")) + if err != nil { + handshakeTimeout = 120 + } + transport := cleanhttp.DefaultTransport() + transport.TLSHandshakeTimeout = time.Duration(handshakeTimeout) * time.Second + transport.Proxy = http.ProxyFromEnvironment + return transport +} + +type Invoker struct { + catchers []*Catcher +} + +type Catcher struct { + Reason string + RetryCount int + RetryWaitSeconds int +} + +var TerraformVersion = strings.TrimSuffix(version.String(), "-dev") +var ClientErrorCatcher = Catcher{"AliyunGoClientFailure", 10, 3} +var ServiceBusyCatcher = Catcher{"ServiceUnavailable", 10, 3} + +func NewInvoker() Invoker { + i := Invoker{} + i.AddCatcher(ClientErrorCatcher) + i.AddCatcher(ServiceBusyCatcher) + return i +} + +func (a *Invoker) AddCatcher(catcher Catcher) { + a.catchers = append(a.catchers, &catcher) +} + +func (a *Invoker) Run(f func() error) error { + err := f() + + if err == nil { + return nil + } + + for _, catcher := range a.catchers { + if strings.Contains(err.Error(), catcher.Reason) { + catcher.RetryCount-- + + if catcher.RetryCount <= 0 { + return fmt.Errorf("retry timeout and got an error: %w", err) + } else { + time.Sleep(time.Duration(catcher.RetryWaitSeconds) * time.Second) + return a.Run(f) + } + } + } + return err +} + +var providerConfig map[string]interface{} + +func getConfigFromProfile(d *schema.ResourceData, ProfileKey string) (interface{}, error) { + + if providerConfig == nil { + if v, ok := d.GetOk("profile"); !ok || v.(string) == "" { + return nil, nil + } + current := d.Get("profile").(string) + // Set CredsFilename, expanding home directory + profilePath, err := homedir.Expand(d.Get("shared_credentials_file").(string)) + if err != nil { + return nil, err + } + if profilePath == "" { + profilePath = fmt.Sprintf("%s/.aliyun/config.json", os.Getenv("HOME")) + if runtime.GOOS == "windows" { + profilePath = fmt.Sprintf("%s/.aliyun/config.json", os.Getenv("USERPROFILE")) + } + } + providerConfig = make(map[string]interface{}) + _, err = os.Stat(profilePath) + if !os.IsNotExist(err) { + data, err := os.ReadFile(profilePath) + if err != nil { + return nil, err + } + config := map[string]interface{}{} + err = json.Unmarshal(data, &config) + if err != nil { + return nil, err + } + for _, v := range config["profiles"].([]interface{}) { + if current == v.(map[string]interface{})["name"] { + providerConfig = v.(map[string]interface{}) + } + } + } + } + + mode := "" + if v, ok := providerConfig["mode"]; ok { + mode = v.(string) + } else { + return v, nil + } + switch ProfileKey { + case "access_key_id", "access_key_secret": + if mode == "EcsRamRole" { + return "", nil + } + case "ram_role_name": + if mode != "EcsRamRole" { + return "", nil + } + case "sts_token": + if mode != "StsToken" { + return "", nil + } + case "ram_role_arn", "ram_session_name": + if mode != "RamRoleArn" { + return "", nil + } + case "expired_seconds": + if mode != "RamRoleArn" { + return float64(0), nil + } + } + + return providerConfig[ProfileKey], nil +} + +var securityCredURL = "http://100.100.100.200/latest/meta-data/ram/security-credentials/" + +// getAuthCredentialByEcsRoleName aims to access meta to get sts credential +// Actually, the job should be done by sdk, but currently not all resources and products support alibaba-cloud-sdk-go, +// and their go sdk does support ecs role name. +// This method is a temporary solution and it should be removed after all go sdk support ecs role name +// The related PR: https://github.com/terraform-providers/terraform-provider-alicloud/pull/731 +func getAuthCredentialByEcsRoleName(ecsRoleName string) (accessKey, secretKey, token string, err error) { + + if ecsRoleName == "" { + return + } + requestUrl := securityCredURL + ecsRoleName + httpRequest, err := http.NewRequest(requests.GET, requestUrl, strings.NewReader("")) + if err != nil { + err = fmt.Errorf("build sts requests err: %w", err) + return + } + httpClient := &http.Client{} + httpResponse, err := httpClient.Do(httpRequest) + if err != nil { + err = fmt.Errorf("get Ecs sts token err: %w", err) + return + } + + response := responses.NewCommonResponse() + err = responses.Unmarshal(response, httpResponse, "") + if err != nil { + err = fmt.Errorf("unmarshal Ecs sts token response err: %w", err) + return + } + + if response.GetHttpStatus() != http.StatusOK { + err = fmt.Errorf("get Ecs sts token err, httpStatus: %d, message = %s", response.GetHttpStatus(), response.GetHttpContentString()) + return + } + var data interface{} + err = json.Unmarshal(response.GetHttpContentBytes(), &data) + if err != nil { + err = fmt.Errorf("refresh Ecs sts token err, json.Unmarshal fail: %w", err) + return + } + code, err := jmespath.Search("Code", data) + if err != nil { + err = fmt.Errorf("refresh Ecs sts token err, fail to get Code: %w", err) + return + } + if code.(string) != "Success" { + err = fmt.Errorf("refresh Ecs sts token err, Code is not Success") + return + } + accessKeyId, err := jmespath.Search("AccessKeyId", data) + if err != nil { + err = fmt.Errorf("refresh Ecs sts token err, fail to get AccessKeyId: %w", err) + return + } + accessKeySecret, err := jmespath.Search("AccessKeySecret", data) + if err != nil { + err = fmt.Errorf("refresh Ecs sts token err, fail to get AccessKeySecret: %w", err) + return + } + securityToken, err := jmespath.Search("SecurityToken", data) + if err != nil { + err = fmt.Errorf("refresh Ecs sts token err, fail to get SecurityToken: %w", err) + return + } + + if accessKeyId == nil || accessKeySecret == nil || securityToken == nil { + err = fmt.Errorf("there is no any available accesskey, secret and security token for Ecs role %s", ecsRoleName) + return + } + + return accessKeyId.(string), accessKeySecret.(string), securityToken.(string), nil +} + +func getHttpProxyUrl() *url.URL { + for _, v := range []string{"HTTPS_PROXY", "https_proxy", "HTTP_PROXY", "http_proxy"} { + value := strings.Trim(os.Getenv(v), " ") + if value != "" { + if !regexp.MustCompile(`^http(s)?://`).MatchString(value) { + value = fmt.Sprintf("https://%s", value) + } + proxyUrl, err := url.Parse(value) + if err == nil { + return proxyUrl + } + break + } + } + return nil +} diff --git a/pkg/backend/remote-state/oss/backend_state.go b/pkg/backend/remote-state/oss/backend_state.go new file mode 100644 index 00000000000..c633eae2932 --- /dev/null +++ b/pkg/backend/remote-state/oss/backend_state.go @@ -0,0 +1,202 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package oss + +import ( + "errors" + "fmt" + "log" + "path" + "sort" + "strings" + + "github.com/aliyun/aliyun-oss-go-sdk/oss" + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/remote" + "github.com/kubegems/opentofu/pkg/states/statemgr" +) + +const ( + lockFileSuffix = ".tflock" +) + +// get a remote client configured for this state +func (b *Backend) remoteClient(name string) (*RemoteClient, error) { + if name == "" { + return nil, errors.New("missing state name") + } + + client := &RemoteClient{ + ossClient: b.ossClient, + bucketName: b.bucketName, + stateFile: b.stateFile(name), + lockFile: b.lockFile(name), + serverSideEncryption: b.serverSideEncryption, + acl: b.acl, + otsTable: b.otsTable, + otsClient: b.otsClient, + } + if b.otsEndpoint != "" && b.otsTable != "" { + _, err := b.otsClient.DescribeTable(&tablestore.DescribeTableRequest{ + TableName: b.otsTable, + }) + if err != nil { + return client, fmt.Errorf("error describing table store %s: %w", b.otsTable, err) + } + } + + return client, nil +} + +func (b *Backend) Workspaces() ([]string, error) { + bucket, err := b.ossClient.Bucket(b.bucketName) + if err != nil { + return []string{""}, fmt.Errorf("error getting bucket: %w", err) + } + + var options []oss.Option + options = append(options, oss.Prefix(b.statePrefix+"/"), oss.MaxKeys(1000)) + resp, err := bucket.ListObjects(options...) + if err != nil { + return nil, err + } + + result := []string{backend.DefaultStateName} + prefix := b.statePrefix + lastObj := "" + for { + for _, obj := range resp.Objects { + // we have 3 parts, the state prefix, the workspace name, and the state file: // + if path.Join(b.statePrefix, b.stateKey) == obj.Key { + // filter the default workspace + continue + } + lastObj = obj.Key + parts := strings.Split(strings.TrimPrefix(obj.Key, prefix+"/"), "/") + if len(parts) > 0 && parts[0] != "" { + result = append(result, parts[0]) + } + } + if resp.IsTruncated { + if len(options) == 3 { + options[2] = oss.Marker(lastObj) + } else { + options = append(options, oss.Marker(lastObj)) + } + resp, err = bucket.ListObjects(options...) + if err != nil { + return nil, err + } + } else { + break + } + } + sort.Strings(result[1:]) + return result, nil +} + +func (b *Backend) DeleteWorkspace(name string, _ bool) error { + if name == backend.DefaultStateName || name == "" { + return fmt.Errorf("can't delete default state") + } + + client, err := b.remoteClient(name) + if err != nil { + return err + } + return client.Delete() +} + +func (b *Backend) StateMgr(name string) (statemgr.Full, error) { + client, err := b.remoteClient(name) + if err != nil { + return nil, err + } + stateMgr := remote.NewState(client, b.encryption) + + // Check to see if this state already exists. + existing, err := b.Workspaces() + if err != nil { + return nil, err + } + + log.Printf("[DEBUG] Current workspace name: %s. All workspaces:%#v", name, existing) + + exists := false + for _, s := range existing { + if s == name { + exists = true + break + } + } + // We need to create the object so it's listed by States. + if !exists { + // take a lock on this state while we write it + lockInfo := statemgr.NewLockInfo() + lockInfo.Operation = "init" + lockId, err := client.Lock(lockInfo) + if err != nil { + return nil, fmt.Errorf("failed to lock OSS state: %w", err) + } + + // Local helper function so we can call it multiple places + lockUnlock := func(e error) error { + if err := stateMgr.Unlock(lockId); err != nil { + return fmt.Errorf(strings.TrimSpace(stateUnlockError), lockId, err) + } + return e + } + + // Grab the value + if err := stateMgr.RefreshState(); err != nil { + err = lockUnlock(err) + return nil, err + } + + // If we have no state, we have to create an empty state + if v := stateMgr.State(); v == nil { + if err := stateMgr.WriteState(states.NewState()); err != nil { + err = lockUnlock(err) + return nil, err + } + if err := stateMgr.PersistState(nil); err != nil { + err = lockUnlock(err) + return nil, err + } + } + + // Unlock, the state should now be initialized + if err := lockUnlock(nil); err != nil { + return nil, err + } + + } + return stateMgr, nil +} + +func (b *Backend) stateFile(name string) string { + if name == backend.DefaultStateName { + return path.Join(b.statePrefix, b.stateKey) + } + return path.Join(b.statePrefix, name, b.stateKey) +} + +func (b *Backend) lockFile(name string) string { + return b.stateFile(name) + lockFileSuffix +} + +const stateUnlockError = ` +Error unlocking Alibaba Cloud OSS state file: + +Lock ID: %s +Error message: %w + +You may have to force-unlock this state in order to use it again. +The Alibaba Cloud backend acquires a lock during initialization to ensure the initial state file is created. +` diff --git a/pkg/backend/remote-state/oss/backend_test.go b/pkg/backend/remote-state/oss/backend_test.go new file mode 100644 index 00000000000..094df592b7b --- /dev/null +++ b/pkg/backend/remote-state/oss/backend_test.go @@ -0,0 +1,255 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package oss + +import ( + "fmt" + "math/rand" + "os" + "testing" + "time" + + "strings" + + "github.com/aliyun/aliyun-oss-go-sdk/oss" + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore" + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/configs/hcl2shim" + "github.com/kubegems/opentofu/pkg/encryption" +) + +// verify that we are doing ACC tests or the OSS tests specifically +func testACC(t *testing.T) { + t.Helper() + skip := os.Getenv("TF_ACC") == "" && os.Getenv("TF_OSS_TEST") == "" + if skip { + t.Log("oss backend tests require setting TF_ACC or TF_OSS_TEST") + t.Skip() + } + if skip { + t.Fatal("oss backend tests require setting ALICLOUD_ACCESS_KEY or ALICLOUD_ACCESS_KEY_ID") + } + t.Setenv("ALICLOUD_REGION", "cn-beijing") +} + +func TestBackend_impl(t *testing.T) { + var _ backend.Backend = new(Backend) +} + +func TestBackendConfig(t *testing.T) { + testACC(t) + config := map[string]interface{}{ + "region": "cn-beijing", + "bucket": "terraform-backend-oss-test", + "prefix": "mystate", + "key": "first.tfstate", + "tablestore_endpoint": "https://terraformstate.cn-beijing.ots.aliyuncs.com", + "tablestore_table": "TableStore", + } + + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(config)).(*Backend) + + if !strings.HasPrefix(b.ossClient.Config.Endpoint, "https://oss-cn-beijing") { + t.Fatalf("Incorrect region was provided") + } + if b.bucketName != "terraform-backend-oss-test" { + t.Fatalf("Incorrect bucketName was provided") + } + if b.statePrefix != "mystate" { + t.Fatalf("Incorrect state file path was provided") + } + if b.stateKey != "first.tfstate" { + t.Fatalf("Incorrect keyName was provided") + } + + if b.ossClient.Config.AccessKeyID == "" { + t.Fatalf("No Access Key Id was provided") + } + if b.ossClient.Config.AccessKeySecret == "" { + t.Fatalf("No Secret Access Key was provided") + } +} + +func TestBackendConfigWorkSpace(t *testing.T) { + testACC(t) + bucketName := fmt.Sprintf("terraform-backend-oss-test-%d", rand.Intn(1000)) + config := map[string]interface{}{ + "region": "cn-beijing", + "bucket": bucketName, + "prefix": "mystate", + "key": "first.tfstate", + "tablestore_endpoint": "https://terraformstate.cn-beijing.ots.aliyuncs.com", + "tablestore_table": "TableStore", + } + + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(config)).(*Backend) + createOSSBucket(t, b.ossClient, bucketName) + defer deleteOSSBucket(t, b.ossClient, bucketName) + if _, err := b.Workspaces(); err != nil { + t.Fatal(err.Error()) + } + if !strings.HasPrefix(b.ossClient.Config.Endpoint, "https://oss-cn-beijing") { + t.Fatalf("Incorrect region was provided") + } + if b.bucketName != bucketName { + t.Fatalf("Incorrect bucketName was provided") + } + if b.statePrefix != "mystate" { + t.Fatalf("Incorrect state file path was provided") + } + if b.stateKey != "first.tfstate" { + t.Fatalf("Incorrect keyName was provided") + } + + if b.ossClient.Config.AccessKeyID == "" { + t.Fatalf("No Access Key Id was provided") + } + if b.ossClient.Config.AccessKeySecret == "" { + t.Fatalf("No Secret Access Key was provided") + } +} + +func TestBackendConfigProfile(t *testing.T) { + testACC(t) + config := map[string]interface{}{ + "region": "cn-beijing", + "bucket": "terraform-backend-oss-test", + "prefix": "mystate", + "key": "first.tfstate", + "tablestore_endpoint": "https://terraformstate.cn-beijing.ots.aliyuncs.com", + "tablestore_table": "TableStore", + "profile": "default", + } + + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(config)).(*Backend) + + if !strings.HasPrefix(b.ossClient.Config.Endpoint, "https://oss-cn-beijing") { + t.Fatalf("Incorrect region was provided") + } + if b.bucketName != "terraform-backend-oss-test" { + t.Fatalf("Incorrect bucketName was provided") + } + if b.statePrefix != "mystate" { + t.Fatalf("Incorrect state file path was provided") + } + if b.stateKey != "first.tfstate" { + t.Fatalf("Incorrect keyName was provided") + } + + if b.ossClient.Config.AccessKeyID == "" { + t.Fatalf("No Access Key Id was provided") + } + if b.ossClient.Config.AccessKeySecret == "" { + t.Fatalf("No Secret Access Key was provided") + } +} + +func TestBackendConfig_invalidKey(t *testing.T) { + testACC(t) + cfg := hcl2shim.HCL2ValueFromConfigValue(map[string]interface{}{ + "region": "cn-beijing", + "bucket": "terraform-backend-oss-test", + "prefix": "/leading-slash", + "name": "/test.tfstate", + "tablestore_endpoint": "https://terraformstate.cn-beijing.ots.aliyuncs.com", + "tablestore_table": "TableStore", + }) + + _, results := New(encryption.StateEncryptionDisabled()).PrepareConfig(cfg) + if !results.HasErrors() { + t.Fatal("expected config validation error") + } +} + +func TestBackend(t *testing.T) { + testACC(t) + + bucketName := fmt.Sprintf("terraform-remote-oss-test-%x", time.Now().Unix()) + statePrefix := "multi/level/path/" + + b1 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "prefix": statePrefix, + })).(*Backend) + + b2 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "prefix": statePrefix, + })).(*Backend) + + createOSSBucket(t, b1.ossClient, bucketName) + defer deleteOSSBucket(t, b1.ossClient, bucketName) + + backend.TestBackendStates(t, b1) + backend.TestBackendStateLocks(t, b1, b2) + backend.TestBackendStateForceUnlock(t, b1, b2) +} + +func createOSSBucket(t *testing.T, ossClient *oss.Client, bucketName string) { + // Be clear about what we're doing in case the user needs to clean this up later. + if err := ossClient.CreateBucket(bucketName); err != nil { + t.Fatal("failed to create test OSS bucket:", err) + } +} + +func deleteOSSBucket(t *testing.T, ossClient *oss.Client, bucketName string) { + warning := "WARNING: Failed to delete the test OSS bucket. It may have been left in your Alibaba Cloud account and may incur storage charges. (error was %s)" + + // first we have to get rid of the env objects, or we can't delete the bucket + bucket, err := ossClient.Bucket(bucketName) + if err != nil { + t.Fatal("Error getting bucket:", err) + return + } + objects, err := bucket.ListObjects() + if err != nil { + t.Logf(warning, err) + return + } + for _, obj := range objects.Objects { + if err := bucket.DeleteObject(obj.Key); err != nil { + // this will need cleanup no matter what, so just warn and exit + t.Logf(warning, err) + return + } + } + + if err := ossClient.DeleteBucket(bucketName); err != nil { + t.Logf(warning, err) + } +} + +// create the tablestore table, and wait until we can query it. +func createTablestoreTable(t *testing.T, otsClient *tablestore.TableStoreClient, tableName string) { + tableMeta := new(tablestore.TableMeta) + tableMeta.TableName = tableName + tableMeta.AddPrimaryKeyColumn(pkName, tablestore.PrimaryKeyType_STRING) + + tableOption := new(tablestore.TableOption) + tableOption.TimeToAlive = -1 + tableOption.MaxVersion = 1 + + reservedThroughput := new(tablestore.ReservedThroughput) + + _, err := otsClient.CreateTable(&tablestore.CreateTableRequest{ + TableMeta: tableMeta, + TableOption: tableOption, + ReservedThroughput: reservedThroughput, + }) + if err != nil { + t.Fatal(err) + } +} + +func deleteTablestoreTable(t *testing.T, otsClient *tablestore.TableStoreClient, tableName string) { + params := &tablestore.DeleteTableRequest{ + TableName: tableName, + } + _, err := otsClient.DeleteTable(params) + if err != nil { + t.Logf("WARNING: Failed to delete the test TableStore table %q. It has been left in your Alibaba Cloud account and may incur charges. (error was %s)", tableName, err) + } +} diff --git a/pkg/backend/remote-state/oss/client.go b/pkg/backend/remote-state/oss/client.go new file mode 100644 index 00000000000..85b013c6a63 --- /dev/null +++ b/pkg/backend/remote-state/oss/client.go @@ -0,0 +1,454 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package oss + +import ( + "bytes" + "crypto/md5" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "log" + "time" + + "github.com/aliyun/aliyun-oss-go-sdk/oss" + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore" + "github.com/hashicorp/go-multierror" + uuid "github.com/hashicorp/go-uuid" + "github.com/pkg/errors" + + "github.com/kubegems/opentofu/pkg/states/remote" + "github.com/kubegems/opentofu/pkg/states/statemgr" +) + +const ( + // Store the last saved serial in tablestore with this suffix for consistency checks. + stateIDSuffix = "-md5" + + pkName = "LockID" +) + +var ( + // The amount of time we will retry a state waiting for it to match the + // expected checksum. + consistencyRetryTimeout = 10 * time.Second + + // delay when polling the state + consistencyRetryPollInterval = 2 * time.Second +) + +// test hook called when checksums don't match +var testChecksumHook func() + +type RemoteClient struct { + ossClient *oss.Client + otsClient *tablestore.TableStoreClient + bucketName string + stateFile string + lockFile string + serverSideEncryption bool + acl string + otsTable string +} + +func (c *RemoteClient) Get() (payload *remote.Payload, err error) { + deadline := time.Now().Add(consistencyRetryTimeout) + + // If we have a checksum, and the returned payload doesn't match, we retry + // up until deadline. + for { + payload, err = c.getObj() + if err != nil { + return nil, err + } + + // If the remote state was manually removed the payload will be nil, + // but if there's still a digest entry for that state we will still try + // to compare the MD5 below. + var digest []byte + if payload != nil { + digest = payload.MD5 + } + + // verify that this state is what we expect + if expected, err := c.getMD5(); err != nil { + log.Printf("[WARN] failed to fetch state md5: %s", err) + } else if len(expected) > 0 && !bytes.Equal(expected, digest) { + log.Printf("[WARN] state md5 mismatch: expected '%x', got '%x'", expected, digest) + + if testChecksumHook != nil { + testChecksumHook() + } + + if time.Now().Before(deadline) { + time.Sleep(consistencyRetryPollInterval) + log.Println("[INFO] retrying OSS RemoteClient.Get...") + continue + } + + return nil, fmt.Errorf(errBadChecksumFmt, digest) + } + + break + } + return payload, nil +} + +func (c *RemoteClient) Put(data []byte) error { + bucket, err := c.ossClient.Bucket(c.bucketName) + if err != nil { + return fmt.Errorf("error getting bucket: %w", err) + } + + body := bytes.NewReader(data) + + var options []oss.Option + if c.acl != "" { + options = append(options, oss.ACL(oss.ACLType(c.acl))) + } + options = append(options, oss.ContentType("application/json")) + if c.serverSideEncryption { + options = append(options, oss.ServerSideEncryption("AES256")) + } + options = append(options, oss.ContentLength(int64(len(data)))) + + if body != nil { + if err := bucket.PutObject(c.stateFile, body, options...); err != nil { + return fmt.Errorf("failed to upload state %s: %w", c.stateFile, err) + } + } + + sum := md5.Sum(data) + if err := c.putMD5(sum[:]); err != nil { + // if this errors out, we unfortunately have to error out altogether, + // since the next Get will inevitably fail. + return fmt.Errorf("failed to store state MD5: %w", err) + } + return nil +} + +func (c *RemoteClient) Delete() error { + bucket, err := c.ossClient.Bucket(c.bucketName) + if err != nil { + return fmt.Errorf("error getting bucket %s: %w", c.bucketName, err) + } + + log.Printf("[DEBUG] Deleting remote state from OSS: %#v", c.stateFile) + + if err := bucket.DeleteObject(c.stateFile); err != nil { + return fmt.Errorf("error deleting state %s: %w", c.stateFile, err) + } + + if err := c.deleteMD5(); err != nil { + log.Printf("[WARN] Error deleting state MD5: %s", err) + } + return nil +} + +func (c *RemoteClient) Lock(info *statemgr.LockInfo) (string, error) { + if c.otsTable == "" { + return "", nil + } + + info.Path = c.lockPath() + + if info.ID == "" { + lockID, err := uuid.GenerateUUID() + if err != nil { + return "", err + } + info.ID = lockID + } + + putParams := &tablestore.PutRowChange{ + TableName: c.otsTable, + PrimaryKey: &tablestore.PrimaryKey{ + PrimaryKeys: []*tablestore.PrimaryKeyColumn{ + { + ColumnName: pkName, + Value: c.lockPath(), + }, + }, + }, + Columns: []tablestore.AttributeColumn{ + { + ColumnName: "Info", + Value: string(info.Marshal()), + }, + }, + Condition: &tablestore.RowCondition{ + RowExistenceExpectation: tablestore.RowExistenceExpectation_EXPECT_NOT_EXIST, + }, + } + + log.Printf("[DEBUG] Recording state lock in tablestore: %#v; LOCKID:%s", putParams, c.lockPath()) + + _, err := c.otsClient.PutRow(&tablestore.PutRowRequest{ + PutRowChange: putParams, + }) + if err != nil { + err = fmt.Errorf("invoking PutRow got an error: %w", err) + lockInfo, infoErr := c.getLockInfo() + if infoErr != nil { + err = multierror.Append(err, fmt.Errorf("\ngetting lock info got an error: %w", infoErr)) + } + lockErr := &statemgr.LockError{ + Err: err, + Info: lockInfo, + } + log.Printf("[ERROR] state lock error: %s", lockErr.Error()) + return "", lockErr + } + + return info.ID, nil +} + +func (c *RemoteClient) getMD5() ([]byte, error) { + if c.otsTable == "" { + return nil, nil + } + + getParams := &tablestore.SingleRowQueryCriteria{ + TableName: c.otsTable, + PrimaryKey: &tablestore.PrimaryKey{ + PrimaryKeys: []*tablestore.PrimaryKeyColumn{ + { + ColumnName: pkName, + Value: c.lockPath() + stateIDSuffix, + }, + }, + }, + ColumnsToGet: []string{pkName, "Digest"}, + MaxVersion: 1, + } + + log.Printf("[DEBUG] Retrieving state serial in tablestore: %#v", getParams) + + object, err := c.otsClient.GetRow(&tablestore.GetRowRequest{ + SingleRowQueryCriteria: getParams, + }) + + if err != nil { + return nil, err + } + + var val string + if v, ok := object.GetColumnMap().Columns["Digest"]; ok && len(v) > 0 { + val = v[0].Value.(string) + } + + sum, err := hex.DecodeString(val) + if err != nil || len(sum) != md5.Size { + return nil, errors.New("invalid md5") + } + + return sum, nil +} + +// store the hash of the state to that clients can check for stale state files. +func (c *RemoteClient) putMD5(sum []byte) error { + if c.otsTable == "" { + return nil + } + + if len(sum) != md5.Size { + return errors.New("invalid payload md5") + } + + putParams := &tablestore.PutRowChange{ + TableName: c.otsTable, + PrimaryKey: &tablestore.PrimaryKey{ + PrimaryKeys: []*tablestore.PrimaryKeyColumn{ + { + ColumnName: pkName, + Value: c.lockPath() + stateIDSuffix, + }, + }, + }, + Columns: []tablestore.AttributeColumn{ + { + ColumnName: "Digest", + Value: hex.EncodeToString(sum), + }, + }, + Condition: &tablestore.RowCondition{ + RowExistenceExpectation: tablestore.RowExistenceExpectation_IGNORE, + }, + } + + log.Printf("[DEBUG] Recoring state serial in tablestore: %#v", putParams) + + _, err := c.otsClient.PutRow(&tablestore.PutRowRequest{ + PutRowChange: putParams, + }) + + if err != nil { + log.Printf("[WARN] failed to record state serial in tablestore: %s", err) + } + + return nil +} + +// remove the hash value for a deleted state +func (c *RemoteClient) deleteMD5() error { + if c.otsTable == "" { + return nil + } + + params := &tablestore.DeleteRowRequest{ + DeleteRowChange: &tablestore.DeleteRowChange{ + TableName: c.otsTable, + PrimaryKey: &tablestore.PrimaryKey{ + PrimaryKeys: []*tablestore.PrimaryKeyColumn{ + { + ColumnName: pkName, + Value: c.lockPath() + stateIDSuffix, + }, + }, + }, + Condition: &tablestore.RowCondition{ + RowExistenceExpectation: tablestore.RowExistenceExpectation_EXPECT_EXIST, + }, + }, + } + + log.Printf("[DEBUG] Deleting state serial in tablestore: %#v", params) + + if _, err := c.otsClient.DeleteRow(params); err != nil { + return err + } + + return nil +} + +func (c *RemoteClient) getLockInfo() (*statemgr.LockInfo, error) { + getParams := &tablestore.SingleRowQueryCriteria{ + TableName: c.otsTable, + PrimaryKey: &tablestore.PrimaryKey{ + PrimaryKeys: []*tablestore.PrimaryKeyColumn{ + { + ColumnName: pkName, + Value: c.lockPath(), + }, + }, + }, + ColumnsToGet: []string{pkName, "Info"}, + MaxVersion: 1, + } + + log.Printf("[DEBUG] Retrieving state lock info from tablestore: %#v", getParams) + + object, err := c.otsClient.GetRow(&tablestore.GetRowRequest{ + SingleRowQueryCriteria: getParams, + }) + if err != nil { + return nil, err + } + + var infoData string + if v, ok := object.GetColumnMap().Columns["Info"]; ok && len(v) > 0 { + infoData = v[0].Value.(string) + } + lockInfo := &statemgr.LockInfo{} + err = json.Unmarshal([]byte(infoData), lockInfo) + if err != nil { + return nil, err + } + return lockInfo, nil +} +func (c *RemoteClient) Unlock(id string) error { + if c.otsTable == "" { + return nil + } + + lockErr := &statemgr.LockError{} + + lockInfo, err := c.getLockInfo() + if err != nil { + lockErr.Err = fmt.Errorf("failed to retrieve lock info: %w", err) + return lockErr + } + lockErr.Info = lockInfo + + if lockInfo.ID != id { + lockErr.Err = fmt.Errorf("lock id %q does not match existing lock", id) + return lockErr + } + params := &tablestore.DeleteRowRequest{ + DeleteRowChange: &tablestore.DeleteRowChange{ + TableName: c.otsTable, + PrimaryKey: &tablestore.PrimaryKey{ + PrimaryKeys: []*tablestore.PrimaryKeyColumn{ + { + ColumnName: pkName, + Value: c.lockPath(), + }, + }, + }, + Condition: &tablestore.RowCondition{ + RowExistenceExpectation: tablestore.RowExistenceExpectation_IGNORE, + }, + }, + } + + _, err = c.otsClient.DeleteRow(params) + + if err != nil { + lockErr.Err = err + return lockErr + } + + return nil +} + +func (c *RemoteClient) lockPath() string { + return fmt.Sprintf("%s/%s", c.bucketName, c.stateFile) +} + +func (c *RemoteClient) getObj() (*remote.Payload, error) { + bucket, err := c.ossClient.Bucket(c.bucketName) + if err != nil { + return nil, fmt.Errorf("error getting bucket %s: %w", c.bucketName, err) + } + + if exist, err := bucket.IsObjectExist(c.stateFile); err != nil { + return nil, fmt.Errorf("estimating object %s is exist got an error: %w", c.stateFile, err) + } else if !exist { + return nil, nil + } + + var options []oss.Option + output, err := bucket.GetObject(c.stateFile, options...) + if err != nil { + return nil, fmt.Errorf("error getting object: %w", err) + } + + buf := bytes.NewBuffer(nil) + if _, err := io.Copy(buf, output); err != nil { + return nil, fmt.Errorf("failed to read remote state: %w", err) + } + sum := md5.Sum(buf.Bytes()) + payload := &remote.Payload{ + Data: buf.Bytes(), + MD5: sum[:], + } + + // If there was no data, then return nil + if len(payload.Data) == 0 { + return nil, nil + } + + return payload, nil +} + +const errBadChecksumFmt = `state data in OSS does not have the expected content. + +This may be caused by unusually long delays in OSS processing a previous state +update. Please wait for a minute or two and try again. If this problem +persists, and neither OSS nor TableStore are experiencing an outage, you may need +to manually verify the remote state and update the Digest value stored in the +TableStore table to the following value: %x` diff --git a/pkg/backend/remote-state/oss/client_test.go b/pkg/backend/remote-state/oss/client_test.go new file mode 100644 index 00000000000..e435c7c2840 --- /dev/null +++ b/pkg/backend/remote-state/oss/client_test.go @@ -0,0 +1,383 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package oss + +import ( + "fmt" + "strings" + "testing" + "time" + + "bytes" + "crypto/md5" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/states/remote" + "github.com/kubegems/opentofu/pkg/states/statefile" + "github.com/kubegems/opentofu/pkg/states/statemgr" +) + +// NOTE: Before running this testcase, please create a OTS instance called 'tf-oss-remote' +var RemoteTestUsedOTSEndpoint = "https://tf-oss-remote.cn-hangzhou.ots.aliyuncs.com" + +func TestRemoteClient_impl(t *testing.T) { + var _ remote.Client = new(RemoteClient) + var _ remote.ClientLocker = new(RemoteClient) +} + +func TestRemoteClient(t *testing.T) { + testACC(t) + bucketName := fmt.Sprintf("tf-remote-oss-test-%x", time.Now().Unix()) + path := "testState" + + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "prefix": path, + "encrypt": true, + })).(*Backend) + + createOSSBucket(t, b.ossClient, bucketName) + defer deleteOSSBucket(t, b.ossClient, bucketName) + + state, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + remote.TestClient(t, state.(*remote.State).Client) +} + +func TestRemoteClientLocks(t *testing.T) { + testACC(t) + bucketName := fmt.Sprintf("tf-remote-oss-test-%x", time.Now().Unix()) + tableName := fmt.Sprintf("tfRemoteTestForce%x", time.Now().Unix()) + path := "testState" + + b1 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "prefix": path, + "encrypt": true, + "tablestore_table": tableName, + "tablestore_endpoint": RemoteTestUsedOTSEndpoint, + })).(*Backend) + + b2 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "prefix": path, + "encrypt": true, + "tablestore_table": tableName, + "tablestore_endpoint": RemoteTestUsedOTSEndpoint, + })).(*Backend) + + createOSSBucket(t, b1.ossClient, bucketName) + defer deleteOSSBucket(t, b1.ossClient, bucketName) + createTablestoreTable(t, b1.otsClient, tableName) + defer deleteTablestoreTable(t, b1.otsClient, tableName) + + s1, err := b1.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + s2, err := b2.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + remote.TestRemoteLocks(t, s1.(*remote.State).Client, s2.(*remote.State).Client) +} + +// verify that the backend can handle more than one state in the same table +func TestRemoteClientLocks_multipleStates(t *testing.T) { + testACC(t) + bucketName := fmt.Sprintf("tf-remote-oss-test-force-%x", time.Now().Unix()) + tableName := fmt.Sprintf("tfRemoteTestForce%x", time.Now().Unix()) + path := "testState" + + b1 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "prefix": path, + "encrypt": true, + "tablestore_table": tableName, + "tablestore_endpoint": RemoteTestUsedOTSEndpoint, + })).(*Backend) + + b2 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "prefix": path, + "encrypt": true, + "tablestore_table": tableName, + "tablestore_endpoint": RemoteTestUsedOTSEndpoint, + })).(*Backend) + + createOSSBucket(t, b1.ossClient, bucketName) + defer deleteOSSBucket(t, b1.ossClient, bucketName) + createTablestoreTable(t, b1.otsClient, tableName) + defer deleteTablestoreTable(t, b1.otsClient, tableName) + + s1, err := b1.StateMgr("s1") + if err != nil { + t.Fatal(err) + } + if _, err := s1.Lock(statemgr.NewLockInfo()); err != nil { + t.Fatal("failed to get lock for s1:", err) + } + + // s1 is now locked, s2 should not be locked as it's a different state file + s2, err := b2.StateMgr("s2") + if err != nil { + t.Fatal(err) + } + if _, err := s2.Lock(statemgr.NewLockInfo()); err != nil { + t.Fatal("failed to get lock for s2:", err) + } +} + +// verify that we can unlock a state with an existing lock +func TestRemoteForceUnlock(t *testing.T) { + testACC(t) + bucketName := fmt.Sprintf("tf-remote-oss-test-force-%x", time.Now().Unix()) + tableName := fmt.Sprintf("tfRemoteTestForce%x", time.Now().Unix()) + path := "testState" + + b1 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "prefix": path, + "encrypt": true, + "tablestore_table": tableName, + "tablestore_endpoint": RemoteTestUsedOTSEndpoint, + })).(*Backend) + + b2 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "prefix": path, + "encrypt": true, + "tablestore_table": tableName, + "tablestore_endpoint": RemoteTestUsedOTSEndpoint, + })).(*Backend) + + createOSSBucket(t, b1.ossClient, bucketName) + defer deleteOSSBucket(t, b1.ossClient, bucketName) + createTablestoreTable(t, b1.otsClient, tableName) + defer deleteTablestoreTable(t, b1.otsClient, tableName) + + // first test with default + s1, err := b1.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + info := statemgr.NewLockInfo() + info.Operation = "test" + info.Who = "clientA" + + lockID, err := s1.Lock(info) + if err != nil { + t.Fatal("unable to get initial lock:", err) + } + + // s1 is now locked, get the same state through s2 and unlock it + s2, err := b2.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal("failed to get default state to force unlock:", err) + } + + if err := s2.Unlock(lockID); err != nil { + t.Fatal("failed to force-unlock default state") + } + + // now try the same thing with a named state + // first test with default + s1, err = b1.StateMgr("test") + if err != nil { + t.Fatal(err) + } + + info = statemgr.NewLockInfo() + info.Operation = "test" + info.Who = "clientA" + + lockID, err = s1.Lock(info) + if err != nil { + t.Fatal("unable to get initial lock:", err) + } + + // s1 is now locked, get the same state through s2 and unlock it + s2, err = b2.StateMgr("test") + if err != nil { + t.Fatal("failed to get named state to force unlock:", err) + } + + if err = s2.Unlock(lockID); err != nil { + t.Fatal("failed to force-unlock named state") + } +} + +func TestRemoteClient_clientMD5(t *testing.T) { + testACC(t) + + bucketName := fmt.Sprintf("tf-remote-oss-test-%x", time.Now().Unix()) + tableName := fmt.Sprintf("tfRemoteTestForce%x", time.Now().Unix()) + path := "testState" + + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "prefix": path, + "tablestore_table": tableName, + "tablestore_endpoint": RemoteTestUsedOTSEndpoint, + })).(*Backend) + + createOSSBucket(t, b.ossClient, bucketName) + defer deleteOSSBucket(t, b.ossClient, bucketName) + createTablestoreTable(t, b.otsClient, tableName) + defer deleteTablestoreTable(t, b.otsClient, tableName) + + s, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + client := s.(*remote.State).Client.(*RemoteClient) + + sum := md5.Sum([]byte("test")) + + if err := client.putMD5(sum[:]); err != nil { + t.Fatal(err) + } + + getSum, err := client.getMD5() + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(getSum, sum[:]) { + t.Fatalf("getMD5 returned the wrong checksum: expected %x, got %x", sum[:], getSum) + } + + if err := client.deleteMD5(); err != nil { + t.Fatal(err) + } + + if getSum, err := client.getMD5(); err == nil { + t.Fatalf("expected getMD5 error, got none. checksum: %x", getSum) + } +} + +// verify that a client won't return a state with an incorrect checksum. +func TestRemoteClient_stateChecksum(t *testing.T) { + testACC(t) + + bucketName := fmt.Sprintf("tf-remote-oss-test-%x", time.Now().Unix()) + tableName := fmt.Sprintf("tfRemoteTestForce%x", time.Now().Unix()) + path := "testState" + + b1 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "prefix": path, + "tablestore_table": tableName, + "tablestore_endpoint": RemoteTestUsedOTSEndpoint, + })).(*Backend) + + createOSSBucket(t, b1.ossClient, bucketName) + defer deleteOSSBucket(t, b1.ossClient, bucketName) + createTablestoreTable(t, b1.otsClient, tableName) + defer deleteTablestoreTable(t, b1.otsClient, tableName) + + s1, err := b1.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + client1 := s1.(*remote.State).Client + + // create an old and new state version to persist + s := statemgr.TestFullInitialState() + sf := &statefile.File{State: s} + var oldState bytes.Buffer + if err := statefile.Write(sf, &oldState, encryption.StateEncryptionDisabled()); err != nil { + t.Fatal(err) + } + sf.Serial++ + var newState bytes.Buffer + if err := statefile.Write(sf, &newState, encryption.StateEncryptionDisabled()); err != nil { + t.Fatal(err) + } + + // Use b2 without a tablestore_table to bypass the lock table to write the state directly. + // client2 will write the "incorrect" state, simulating oss eventually consistency delays + b2 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "prefix": path, + })).(*Backend) + s2, err := b2.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + client2 := s2.(*remote.State).Client + + // write the new state through client2 so that there is no checksum yet + if err := client2.Put(newState.Bytes()); err != nil { + t.Fatal(err) + } + + // verify that we can pull a state without a checksum + if _, err := client1.Get(); err != nil { + t.Fatal(err) + } + + // write the new state back with its checksum + if err := client1.Put(newState.Bytes()); err != nil { + t.Fatal(err) + } + + // put an empty state in place to check for panics during get + if err := client2.Put([]byte{}); err != nil { + t.Fatal(err) + } + + // remove the timeouts so we can fail immediately + origTimeout := consistencyRetryTimeout + origInterval := consistencyRetryPollInterval + defer func() { + consistencyRetryTimeout = origTimeout + consistencyRetryPollInterval = origInterval + }() + consistencyRetryTimeout = 0 + consistencyRetryPollInterval = 0 + + // fetching an empty state through client1 should now error out due to a + // mismatched checksum. + if _, err := client1.Get(); !strings.HasPrefix(err.Error(), errBadChecksumFmt[:80]) { + t.Fatalf("expected state checksum error: got %s", err) + } + + // put the old state in place of the new, without updating the checksum + if err := client2.Put(oldState.Bytes()); err != nil { + t.Fatal(err) + } + + // fetching the wrong state through client1 should now error out due to a + // mismatched checksum. + if _, err := client1.Get(); !strings.HasPrefix(err.Error(), errBadChecksumFmt[:80]) { + t.Fatalf("expected state checksum error: got %s", err) + } + + // update the state with the correct one after we Get again + testChecksumHook = func() { + if err := client2.Put(newState.Bytes()); err != nil { + t.Fatal(err) + } + testChecksumHook = nil + } + + consistencyRetryTimeout = origTimeout + + // this final Get will fail to fail the checksum verification, the above + // callback will update the state with the correct version, and Get should + // retry automatically. + if _, err := client1.Get(); err != nil { + t.Fatal(err) + } +} diff --git a/pkg/backend/remote-state/pg/backend.go b/pkg/backend/remote-state/pg/backend.go new file mode 100644 index 00000000000..9109598454f --- /dev/null +++ b/pkg/backend/remote-state/pg/backend.go @@ -0,0 +1,155 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pg + +import ( + "context" + "database/sql" + "fmt" + "os" + "strconv" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/legacy/helper/schema" + "github.com/lib/pq" +) + +const ( + statesTableName = "states" + statesIndexName = "states_by_name" +) + +func defaultBoolFunc(k string, dv bool) schema.SchemaDefaultFunc { + return func() (interface{}, error) { + if v := os.Getenv(k); v != "" { + return strconv.ParseBool(v) + } + + return dv, nil + } +} + +// New creates a new backend for Postgres remote state. +func New(enc encryption.StateEncryption) backend.Backend { + s := &schema.Backend{ + Schema: map[string]*schema.Schema{ + "conn_str": { + Type: schema.TypeString, + Optional: true, + Description: "Postgres connection string; a `postgres://` URL", + DefaultFunc: schema.EnvDefaultFunc("PG_CONN_STR", nil), + }, + + "schema_name": { + Type: schema.TypeString, + Optional: true, + Description: "Name of the automatically managed Postgres schema to store state", + DefaultFunc: schema.EnvDefaultFunc("PG_SCHEMA_NAME", "terraform_remote_state"), + }, + + "skip_schema_creation": { + Type: schema.TypeBool, + Optional: true, + Description: "If set to `true`, OpenTofu won't try to create the Postgres schema", + DefaultFunc: defaultBoolFunc("PG_SKIP_SCHEMA_CREATION", false), + }, + + "skip_table_creation": { + Type: schema.TypeBool, + Optional: true, + Description: "If set to `true`, OpenTofu won't try to create the Postgres table", + DefaultFunc: defaultBoolFunc("PG_SKIP_TABLE_CREATION", false), + }, + + "skip_index_creation": { + Type: schema.TypeBool, + Optional: true, + Description: "If set to `true`, OpenTofu won't try to create the Postgres index", + DefaultFunc: defaultBoolFunc("PG_SKIP_INDEX_CREATION", false), + }, + }, + } + + result := &Backend{Backend: s, encryption: enc} + result.Backend.ConfigureFunc = result.configure + return result +} + +type Backend struct { + *schema.Backend + encryption encryption.StateEncryption + + // The fields below are set from configure + db *sql.DB + configData *schema.ResourceData + connStr string + schemaName string +} + +func (b *Backend) configure(ctx context.Context) error { + // Grab the resource data + b.configData = schema.FromContextBackendConfig(ctx) + data := b.configData + + b.connStr = data.Get("conn_str").(string) + b.schemaName = pq.QuoteIdentifier(data.Get("schema_name").(string)) + + db, err := sql.Open("postgres", b.connStr) + if err != nil { + return err + } + + // Prepare database schema, tables, & indexes. + var query string + + if !data.Get("skip_schema_creation").(bool) { + // list all schemas to see if it exists + var count int + query = `select count(1) from information_schema.schemata where schema_name = $1` + if err := db.QueryRow(query, data.Get("schema_name").(string)).Scan(&count); err != nil { + return err + } + + // skip schema creation if schema already exists + // `CREATE SCHEMA IF NOT EXISTS` is to be avoided if ever + // a user hasn't been granted the `CREATE SCHEMA` privilege + if count < 1 { + // tries to create the schema + query = `CREATE SCHEMA IF NOT EXISTS %s` + if _, err := db.Exec(fmt.Sprintf(query, b.schemaName)); err != nil { + return err + } + } + } + + if !data.Get("skip_table_creation").(bool) { + if _, err := db.Exec("CREATE SEQUENCE IF NOT EXISTS public.global_states_id_seq AS bigint"); err != nil { + return err + } + + query = `CREATE TABLE IF NOT EXISTS %s.%s ( + id bigint NOT NULL DEFAULT nextval('public.global_states_id_seq') PRIMARY KEY, + name text UNIQUE, + data text + )` + if _, err := db.Exec(fmt.Sprintf(query, b.schemaName, statesTableName)); err != nil { + return err + } + } + + if !data.Get("skip_index_creation").(bool) { + query = `CREATE UNIQUE INDEX IF NOT EXISTS %s ON %s.%s (name)` + if _, err := db.Exec(fmt.Sprintf(query, statesIndexName, b.schemaName, statesTableName)); err != nil { + return err + } + } + + // Assign db after its schema is prepared. + b.db = db + + return nil +} diff --git a/pkg/backend/remote-state/pg/backend_state.go b/pkg/backend/remote-state/pg/backend_state.go new file mode 100644 index 00000000000..47cf16924e7 --- /dev/null +++ b/pkg/backend/remote-state/pg/backend_state.go @@ -0,0 +1,121 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pg + +import ( + "fmt" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/remote" + "github.com/kubegems/opentofu/pkg/states/statemgr" +) + +func (b *Backend) Workspaces() ([]string, error) { + query := `SELECT name FROM %s.%s WHERE name != 'default' ORDER BY name` + rows, err := b.db.Query(fmt.Sprintf(query, b.schemaName, statesTableName)) + if err != nil { + return nil, err + } + defer rows.Close() + + result := []string{ + backend.DefaultStateName, + } + + for rows.Next() { + var name string + if err := rows.Scan(&name); err != nil { + return nil, err + } + result = append(result, name) + } + if err := rows.Err(); err != nil { + return nil, err + } + + return result, nil +} + +func (b *Backend) DeleteWorkspace(name string, _ bool) error { + if name == backend.DefaultStateName || name == "" { + return fmt.Errorf("can't delete default state") + } + + query := `DELETE FROM %s.%s WHERE name = $1` + _, err := b.db.Exec(fmt.Sprintf(query, b.schemaName, statesTableName), name) + if err != nil { + return err + } + + return nil +} + +func (b *Backend) StateMgr(name string) (statemgr.Full, error) { + // Build the state client + var stateMgr statemgr.Full = remote.NewState( + &RemoteClient{ + Client: b.db, + Name: name, + SchemaName: b.schemaName, + }, + b.encryption, + ) + + // Check to see if this state already exists. + // If the state doesn't exist, we have to assume this + // is a normal create operation, and take the lock at that point. + existing, err := b.Workspaces() + if err != nil { + return nil, err + } + + exists := false + for _, s := range existing { + if s == name { + exists = true + break + } + } + + // Grab a lock, we use this to write an empty state if one doesn't + // exist already. We have to write an empty state as a sentinel value + // so Workspaces() knows it exists. + if !exists { + lockInfo := statemgr.NewLockInfo() + lockInfo.Operation = "init" + lockId, err := stateMgr.Lock(lockInfo) + if err != nil { + return nil, fmt.Errorf("failed to lock state in Postgres: %w", err) + } + + // Local helper function so we can call it multiple places + lockUnlock := func(parent error) error { + if err := stateMgr.Unlock(lockId); err != nil { + return fmt.Errorf("error unlocking Postgres state: %w", err) + } + return parent + } + + if v := stateMgr.State(); v == nil { + if err := stateMgr.WriteState(states.NewState()); err != nil { + err = lockUnlock(err) + return nil, err + } + if err := stateMgr.PersistState(nil); err != nil { + err = lockUnlock(err) + return nil, err + } + } + + // Unlock, the state should now be initialized + if err := lockUnlock(nil); err != nil { + return nil, err + } + } + + return stateMgr, nil +} diff --git a/pkg/backend/remote-state/pg/backend_test.go b/pkg/backend/remote-state/pg/backend_test.go new file mode 100644 index 00000000000..3f0b6dd8415 --- /dev/null +++ b/pkg/backend/remote-state/pg/backend_test.go @@ -0,0 +1,526 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pg + +// Create the test database: createdb terraform_backend_pg_test +// TF_ACC=1 GO111MODULE=on go test -v -mod=vendor -timeout=2m -parallel=4 github.com/kubegems/opentofu/backend/remote-state/pg + +import ( + "database/sql" + "fmt" + "net/url" + "os" + "strings" + "testing" + + "github.com/hashicorp/hcl/v2/hcldec" + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/states/remote" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/lib/pq" +) + +// Function to skip a test unless in ACCeptance test mode. +// +// A running Postgres server identified by env variable +// DATABASE_URL is required for acceptance tests. +func testACC(t *testing.T) (connectionURI *url.URL) { + skip := os.Getenv("TF_ACC") == "" && os.Getenv("TF_PG_TEST") == "" + if skip { + t.Log("pg backend tests requires setting TF_ACC or TF_PG_TEST") + t.Skip() + } + databaseUrl, found := os.LookupEnv("DATABASE_URL") + if !found { + t.Fatal("pg backend tests require setting DATABASE_URL") + } + + u, err := url.Parse(databaseUrl) + if err != nil { + t.Fatal(err) + } + return u +} + +func TestBackend_impl(t *testing.T) { + var _ backend.Backend = new(Backend) +} + +func TestBackendConfig(t *testing.T) { + connectionURI := testACC(t) + connStr := os.Getenv("DATABASE_URL") + + user := connectionURI.User.Username() + password, _ := connectionURI.User.Password() + databaseName := connectionURI.Path[1:] + + connectionURIObfuscated := connectionURI + connectionURIObfuscated.User = nil + + testCases := []struct { + Name string + EnvVars map[string]string + Config map[string]interface{} + ExpectConfigurationError string + ExpectConnectionError string + }{ + { + Name: "valid-config", + Config: map[string]interface{}{ + "conn_str": connStr, + "schema_name": fmt.Sprintf("terraform_%s", t.Name()), + }, + }, + { + Name: "missing-conn_str-defaults-to-localhost", + EnvVars: map[string]string{ + "PGSSLMODE": "disable", + "PGDATABASE": databaseName, + "PGUSER": user, + "PGPASSWORD": password, + }, + Config: map[string]interface{}{ + "schema_name": fmt.Sprintf("terraform_%s", t.Name()), + }, + }, + { + Name: "conn-str-env-var", + EnvVars: map[string]string{ + "PG_CONN_STR": connStr, + }, + Config: map[string]interface{}{ + "schema_name": fmt.Sprintf("terraform_%s", t.Name()), + }, + }, + { + Name: "setting-credentials-using-env-vars", + EnvVars: map[string]string{ + "PGUSER": "baduser", + "PGPASSWORD": "badpassword", + }, + Config: map[string]interface{}{ + "conn_str": connectionURIObfuscated.String(), + "schema_name": fmt.Sprintf("terraform_%s", t.Name()), + }, + ExpectConnectionError: `authentication failed for user "baduser"`, + }, + { + Name: "host-in-env-vars", + EnvVars: map[string]string{ + "PGHOST": "hostthatdoesnotexist", + }, + Config: map[string]interface{}{ + "schema_name": fmt.Sprintf("terraform_%s", t.Name()), + }, + ExpectConnectionError: `no such host`, + }, + { + Name: "boolean-env-vars", + EnvVars: map[string]string{ + "PGSSLMODE": "disable", + "PG_SKIP_SCHEMA_CREATION": "f", + "PG_SKIP_TABLE_CREATION": "f", + "PG_SKIP_INDEX_CREATION": "f", + "PGDATABASE": databaseName, + }, + Config: map[string]interface{}{ + "conn_str": connStr, + "schema_name": fmt.Sprintf("terraform_%s", t.Name()), + }, + }, + { + Name: "wrong-boolean-env-vars", + EnvVars: map[string]string{ + "PGSSLMODE": "disable", + "PG_SKIP_SCHEMA_CREATION": "foo", + "PGDATABASE": databaseName, + }, + Config: map[string]interface{}{ + "schema_name": fmt.Sprintf("terraform_%s", t.Name()), + }, + ExpectConfigurationError: `error getting default for "skip_schema_creation"`, + }, + } + + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + for k, v := range tc.EnvVars { + t.Setenv(k, v) + } + + config := backend.TestWrapConfig(tc.Config) + schemaName := pq.QuoteIdentifier(tc.Config["schema_name"].(string)) + + dbCleaner, err := sql.Open("postgres", connStr) + if err != nil { + t.Fatal(err) + } + defer dbCleaner.Query(fmt.Sprintf("DROP SCHEMA IF EXISTS %s CASCADE", schemaName)) + + var diags tfdiags.Diagnostics + b := New(encryption.StateEncryptionDisabled()).(*Backend) + schema := b.ConfigSchema() + spec := schema.DecoderSpec() + obj, decDiags := hcldec.Decode(config, spec, nil) + diags = diags.Append(decDiags) + + newObj, valDiags := b.PrepareConfig(obj) + diags = diags.Append(valDiags.InConfigBody(config, "")) + + if tc.ExpectConfigurationError != "" { + if !diags.HasErrors() { + t.Fatal("error expected but got none") + } + if !strings.Contains(diags.ErrWithWarnings().Error(), tc.ExpectConfigurationError) { + t.Fatalf("failed to find %q in %s", tc.ExpectConfigurationError, diags.ErrWithWarnings()) + } + return + } else if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } + + obj = newObj + + confDiags := b.Configure(obj) + if tc.ExpectConnectionError != "" { + err := confDiags.InConfigBody(config, "").ErrWithWarnings() + if err == nil { + t.Fatal("error expected but got none") + } + if !strings.Contains(err.Error(), tc.ExpectConnectionError) { + t.Fatalf("failed to find %q in %s", tc.ExpectConnectionError, err) + } + return + } else if len(confDiags) != 0 { + confDiags = confDiags.InConfigBody(config, "") + t.Fatal(confDiags.ErrWithWarnings()) + } + + if b == nil { + t.Fatal("Backend could not be configured") + } + + _, err = b.db.Query(fmt.Sprintf("SELECT name, data FROM %s.%s LIMIT 1", schemaName, statesTableName)) + if err != nil { + t.Fatal(err) + } + + _, err = b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + s, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + c := s.(*remote.State).Client.(*RemoteClient) + if c.Name != backend.DefaultStateName { + t.Fatal("RemoteClient name is not configured") + } + + backend.TestBackendStates(t, b) + }) + } + +} + +func TestBackendConfigSkipOptions(t *testing.T) { + testACC(t) + connStr := getDatabaseUrl() + + testCases := []struct { + Name string + SkipSchemaCreation bool + SkipTableCreation bool + SkipIndexCreation bool + TestIndexIsPresent bool + Setup func(t *testing.T, db *sql.DB, schemaName string) + }{ + { + Name: "skip_schema_creation", + SkipSchemaCreation: true, + TestIndexIsPresent: true, + Setup: func(t *testing.T, db *sql.DB, schemaName string) { + // create the schema as a prerequisites + _, err := db.Query(fmt.Sprintf(`CREATE SCHEMA IF NOT EXISTS %s`, schemaName)) + if err != nil { + t.Fatal(err) + } + }, + }, + { + Name: "skip_table_creation", + SkipTableCreation: true, + TestIndexIsPresent: true, + Setup: func(t *testing.T, db *sql.DB, schemaName string) { + // since the table needs to be already created the schema must be too + _, err := db.Query(fmt.Sprintf(`CREATE SCHEMA %s`, schemaName)) + if err != nil { + t.Fatal(err) + } + _, err = db.Query(fmt.Sprintf(`CREATE TABLE %s.%s ( + id SERIAL PRIMARY KEY, + name TEXT, + data TEXT + )`, schemaName, statesTableName)) + if err != nil { + t.Fatal(err) + } + }, + }, + { + Name: "skip_index_creation", + SkipIndexCreation: true, + TestIndexIsPresent: true, + Setup: func(t *testing.T, db *sql.DB, schemaName string) { + // Everything need to exists for the index to be created + _, err := db.Query(fmt.Sprintf(`CREATE SCHEMA %s`, schemaName)) + if err != nil { + t.Fatal(err) + } + _, err = db.Query(fmt.Sprintf(`CREATE TABLE %s.%s ( + id SERIAL PRIMARY KEY, + name TEXT, + data TEXT + )`, schemaName, statesTableName)) + if err != nil { + t.Fatal(err) + } + _, err = db.Exec(fmt.Sprintf(`CREATE UNIQUE INDEX IF NOT EXISTS %s ON %s.%s (name)`, statesIndexName, schemaName, statesTableName)) + if err != nil { + t.Fatal(err) + } + }, + }, + { + Name: "missing_index", + SkipIndexCreation: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + schemaName := tc.Name + + config := backend.TestWrapConfig(map[string]interface{}{ + "conn_str": connStr, + "schema_name": schemaName, + "skip_schema_creation": tc.SkipSchemaCreation, + "skip_table_creation": tc.SkipTableCreation, + "skip_index_creation": tc.SkipIndexCreation, + }) + schemaName = pq.QuoteIdentifier(schemaName) + db, err := sql.Open("postgres", connStr) + if err != nil { + t.Fatal(err) + } + + if tc.Setup != nil { + tc.Setup(t, db, schemaName) + } + defer db.Query(fmt.Sprintf("DROP SCHEMA IF EXISTS %s CASCADE", schemaName)) + + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), config).(*Backend) + + if b == nil { + t.Fatal("Backend could not be configured") + } + + // Make sure everything has been created + + // This tests that both the schema and the table have been created + _, err = b.db.Query(fmt.Sprintf("SELECT name, data FROM %s.%s LIMIT 1", schemaName, statesTableName)) + if err != nil { + t.Fatal(err) + } + if tc.TestIndexIsPresent { + // Make sure that the index exists + query := `select count(*) from pg_indexes where schemaname=$1 and tablename=$2 and indexname=$3;` + var count int + if err := b.db.QueryRow(query, tc.Name, statesTableName, statesIndexName).Scan(&count); err != nil { + t.Fatal(err) + } + if count != 1 { + t.Fatalf("The index has not been created (%d)", count) + } + } + + _, err = b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + s, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + c := s.(*remote.State).Client.(*RemoteClient) + if c.Name != backend.DefaultStateName { + t.Fatal("RemoteClient name is not configured") + } + + // Make sure that all workspace must have a unique name + _, err = db.Exec(fmt.Sprintf(`INSERT INTO %s.%s VALUES (100, 'unique_name_test', '')`, schemaName, statesTableName)) + if err != nil { + t.Fatal(err) + } + _, err = db.Exec(fmt.Sprintf(`INSERT INTO %s.%s VALUES (101, 'unique_name_test', '')`, schemaName, statesTableName)) + if err == nil { + t.Fatal("Creating two workspaces with the same name did not raise an error") + } + }) + } + +} + +func TestBackendStates(t *testing.T) { + testACC(t) + connStr := getDatabaseUrl() + + testCases := []string{ + fmt.Sprintf("terraform_%s", t.Name()), + fmt.Sprintf("test with spaces: %s", t.Name()), + } + for _, schemaName := range testCases { + t.Run(schemaName, func(t *testing.T) { + dbCleaner, err := sql.Open("postgres", connStr) + if err != nil { + t.Fatal(err) + } + defer dbCleaner.Query(fmt.Sprintf("DROP SCHEMA IF EXISTS %s CASCADE", pq.QuoteIdentifier(schemaName))) + + config := backend.TestWrapConfig(map[string]interface{}{ + "conn_str": connStr, + "schema_name": schemaName, + }) + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), config).(*Backend) + + if b == nil { + t.Fatal("Backend could not be configured") + } + + backend.TestBackendStates(t, b) + }) + } +} + +func TestBackendStateLocks(t *testing.T) { + testACC(t) + connStr := getDatabaseUrl() + schemaName := fmt.Sprintf("terraform_%s", t.Name()) + dbCleaner, err := sql.Open("postgres", connStr) + if err != nil { + t.Fatal(err) + } + defer dbCleaner.Query(fmt.Sprintf("DROP SCHEMA IF EXISTS %s CASCADE", schemaName)) + + config := backend.TestWrapConfig(map[string]interface{}{ + "conn_str": connStr, + "schema_name": schemaName, + }) + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), config).(*Backend) + + if b == nil { + t.Fatal("Backend could not be configured") + } + + bb := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), config).(*Backend) + + if bb == nil { + t.Fatal("Backend could not be configured") + } + + backend.TestBackendStateLocks(t, b, bb) +} + +func TestBackendConcurrentLock(t *testing.T) { + testACC(t) + connStr := getDatabaseUrl() + dbCleaner, err := sql.Open("postgres", connStr) + if err != nil { + t.Fatal(err) + } + + getStateMgr := func(schemaName string) (statemgr.Full, *statemgr.LockInfo) { + defer dbCleaner.Query(fmt.Sprintf("DROP SCHEMA IF EXISTS %s CASCADE", schemaName)) + config := backend.TestWrapConfig(map[string]interface{}{ + "conn_str": connStr, + "schema_name": schemaName, + }) + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), config).(*Backend) + + if b == nil { + t.Fatal("Backend could not be configured") + } + stateMgr, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("Failed to get the state manager: %v", err) + } + + info := statemgr.NewLockInfo() + info.Operation = "test" + info.Who = schemaName + + return stateMgr, info + } + + s1, i1 := getStateMgr(fmt.Sprintf("terraform_%s_1", t.Name())) + s2, i2 := getStateMgr(fmt.Sprintf("terraform_%s_2", t.Name())) + + // First we need to create the workspace as the lock for creating them is + // global + lockID1, err := s1.Lock(i1) + if err != nil { + t.Fatalf("failed to lock first state: %v", err) + } + + if err = s1.PersistState(nil); err != nil { + t.Fatalf("failed to persist state: %v", err) + } + + if err := s1.Unlock(lockID1); err != nil { + t.Fatalf("failed to unlock first state: %v", err) + } + + lockID2, err := s2.Lock(i2) + if err != nil { + t.Fatalf("failed to lock second state: %v", err) + } + + if err = s2.PersistState(nil); err != nil { + t.Fatalf("failed to persist state: %v", err) + } + + if err := s2.Unlock(lockID2); err != nil { + t.Fatalf("failed to unlock first state: %v", err) + } + + // Now we can test concurrent lock + lockID1, err = s1.Lock(i1) + if err != nil { + t.Fatalf("failed to lock first state: %v", err) + } + + lockID2, err = s2.Lock(i2) + if err != nil { + t.Fatalf("failed to lock second state: %v", err) + } + + if err := s1.Unlock(lockID1); err != nil { + t.Fatalf("failed to unlock first state: %v", err) + } + + if err := s2.Unlock(lockID2); err != nil { + t.Fatalf("failed to unlock first state: %v", err) + } +} + +func getDatabaseUrl() string { + return os.Getenv("DATABASE_URL") +} diff --git a/pkg/backend/remote-state/pg/client.go b/pkg/backend/remote-state/pg/client.go new file mode 100644 index 00000000000..a3c0073fac8 --- /dev/null +++ b/pkg/backend/remote-state/pg/client.go @@ -0,0 +1,147 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pg + +import ( + "crypto/md5" + "database/sql" + "fmt" + + uuid "github.com/hashicorp/go-uuid" + "github.com/kubegems/opentofu/pkg/states/remote" + "github.com/kubegems/opentofu/pkg/states/statemgr" + _ "github.com/lib/pq" +) + +// RemoteClient is a remote client that stores data in a Postgres database +type RemoteClient struct { + Client *sql.DB + Name string + SchemaName string + + info *statemgr.LockInfo +} + +func (c *RemoteClient) Get() (*remote.Payload, error) { + query := `SELECT data FROM %s.%s WHERE name = $1` + row := c.Client.QueryRow(fmt.Sprintf(query, c.SchemaName, statesTableName), c.Name) + var data []byte + err := row.Scan(&data) + switch { + case err == sql.ErrNoRows: + // No existing state returns empty. + return nil, nil + case err != nil: + return nil, err + default: + md5 := md5.Sum(data) + return &remote.Payload{ + Data: data, + MD5: md5[:], + }, nil + } +} + +func (c *RemoteClient) Put(data []byte) error { + query := `INSERT INTO %s.%s (name, data) VALUES ($1, $2) + ON CONFLICT (name) DO UPDATE + SET data = $2 WHERE %s.name = $1` + _, err := c.Client.Exec(fmt.Sprintf(query, c.SchemaName, statesTableName, statesTableName), c.Name, data) + if err != nil { + return err + } + return nil +} + +func (c *RemoteClient) Delete() error { + query := `DELETE FROM %s.%s WHERE name = $1` + _, err := c.Client.Exec(fmt.Sprintf(query, c.SchemaName, statesTableName), c.Name) + if err != nil { + return err + } + return nil +} + +func (c *RemoteClient) Lock(info *statemgr.LockInfo) (string, error) { + var err error + var lockID string + + if info.ID == "" { + lockID, err = uuid.GenerateUUID() + if err != nil { + return "", err + } + info.ID = lockID + } + + // Local helper function so we can call it multiple places + // + lockUnlock := func(pgLockId string) error { + query := `SELECT pg_advisory_unlock(%s)` + row := c.Client.QueryRow(fmt.Sprintf(query, pgLockId)) + var didUnlock []byte + err := row.Scan(&didUnlock) + if err != nil { + return &statemgr.LockError{Info: info, Err: err} + } + return nil + } + + // Try to acquire locks for the existing row `id` and the creation lock `-1`. + query := `SELECT %s.id, pg_try_advisory_lock(%s.id), pg_try_advisory_lock(-1) FROM %s.%s WHERE %s.name = $1` + row := c.Client.QueryRow(fmt.Sprintf(query, statesTableName, statesTableName, c.SchemaName, statesTableName, statesTableName), c.Name) + var pgLockId, didLock, didLockForCreate []byte + err = row.Scan(&pgLockId, &didLock, &didLockForCreate) + switch { + case err == sql.ErrNoRows: + // No rows means we're creating the workspace. Take the creation lock. + innerRow := c.Client.QueryRow(`SELECT pg_try_advisory_lock(-1)`) + var innerDidLock []byte + err := innerRow.Scan(&innerDidLock) + if err != nil { + return "", &statemgr.LockError{Info: info, Err: err} + } + if string(innerDidLock) == "false" { + return "", &statemgr.LockError{Info: info, Err: fmt.Errorf("Already locked for workspace creation: %s", c.Name)} + } + info.Path = "-1" + case err != nil: + return "", &statemgr.LockError{Info: info, Err: err} + case string(didLock) == "false": + // Existing workspace is already locked. Release the attempted creation lock. + lockUnlock("-1") + return "", &statemgr.LockError{Info: info, Err: fmt.Errorf("Workspace is already locked: %s", c.Name)} + case string(didLockForCreate) == "false": + // Someone has the creation lock already. Release the existing workspace because it might not be safe to touch. + lockUnlock(string(pgLockId)) + return "", &statemgr.LockError{Info: info, Err: fmt.Errorf("Cannot lock workspace; already locked for workspace creation: %s", c.Name)} + default: + // Existing workspace is now locked. Release the attempted creation lock. + lockUnlock("-1") + info.Path = string(pgLockId) + } + c.info = info + + return info.ID, nil +} + +func (c *RemoteClient) getLockInfo() (*statemgr.LockInfo, error) { + return c.info, nil +} + +func (c *RemoteClient) Unlock(id string) error { + if c.info != nil && c.info.Path != "" { + query := `SELECT pg_advisory_unlock(%s)` + row := c.Client.QueryRow(fmt.Sprintf(query, c.info.Path)) + var didUnlock []byte + err := row.Scan(&didUnlock) + if err != nil { + return &statemgr.LockError{Info: c.info, Err: err} + } + c.info = nil + } + return nil +} diff --git a/pkg/backend/remote-state/pg/client_test.go b/pkg/backend/remote-state/pg/client_test.go new file mode 100644 index 00000000000..72d15d2cf62 --- /dev/null +++ b/pkg/backend/remote-state/pg/client_test.go @@ -0,0 +1,82 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pg + +// Create the test database: createdb terraform_backend_pg_test +// TF_ACC=1 GO111MODULE=on go test -v -mod=vendor -timeout=2m -parallel=4 github.com/kubegems/opentofu/backend/remote-state/pg + +import ( + "database/sql" + "fmt" + "testing" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/states/remote" +) + +func TestRemoteClient_impl(t *testing.T) { + var _ remote.Client = new(RemoteClient) + var _ remote.ClientLocker = new(RemoteClient) +} + +func TestRemoteClient(t *testing.T) { + testACC(t) + connStr := getDatabaseUrl() + schemaName := fmt.Sprintf("terraform_%s", t.Name()) + dbCleaner, err := sql.Open("postgres", connStr) + if err != nil { + t.Fatal(err) + } + defer dbCleaner.Query(fmt.Sprintf("DROP SCHEMA IF EXISTS %s CASCADE", schemaName)) + + config := backend.TestWrapConfig(map[string]interface{}{ + "conn_str": connStr, + "schema_name": schemaName, + }) + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), config).(*Backend) + + if b == nil { + t.Fatal("Backend could not be configured") + } + + s, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + remote.TestClient(t, s.(*remote.State).Client) +} + +func TestRemoteLocks(t *testing.T) { + testACC(t) + connStr := getDatabaseUrl() + schemaName := fmt.Sprintf("terraform_%s", t.Name()) + dbCleaner, err := sql.Open("postgres", connStr) + if err != nil { + t.Fatal(err) + } + defer dbCleaner.Query(fmt.Sprintf("DROP SCHEMA IF EXISTS %s CASCADE", schemaName)) + + config := backend.TestWrapConfig(map[string]interface{}{ + "conn_str": connStr, + "schema_name": schemaName, + }) + + b1 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), config).(*Backend) + s1, err := b1.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + b2 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), config).(*Backend) + s2, err := b2.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + remote.TestRemoteLocks(t, s1.(*remote.State).Client, s2.(*remote.State).Client) +} diff --git a/pkg/backend/remote-state/s3/backend.go b/pkg/backend/remote-state/s3/backend.go new file mode 100644 index 00000000000..15abbbe9a64 --- /dev/null +++ b/pkg/backend/remote-state/s3/backend.go @@ -0,0 +1,1278 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package s3 + +import ( + "context" + "encoding/base64" + "fmt" + "log" + "os" + "regexp" + "sort" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "github.com/aws/aws-sdk-go-v2/service/s3" + awsbase "github.com/hashicorp/aws-sdk-go-base/v2" + baselogging "github.com/hashicorp/aws-sdk-go-base/v2/logging" + awsbaseValidation "github.com/hashicorp/aws-sdk-go-base/v2/validation" + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/httpclient" + "github.com/kubegems/opentofu/pkg/logging" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/version" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/gocty" +) + +func New(enc encryption.StateEncryption) backend.Backend { + return &Backend{encryption: enc} +} + +type Backend struct { + encryption encryption.StateEncryption + s3Client *s3.Client + dynClient *dynamodb.Client + awsConfig aws.Config + + bucketName string + keyName string + serverSideEncryption bool + customerEncryptionKey []byte + acl string + kmsKeyID string + ddbTable string + workspaceKeyPrefix string + skipS3Checksum bool +} + +// ConfigSchema returns a description of the expected configuration +// structure for the receiving backend. +// This structure is mirrored by the encryption aws_kms key provider and should be kept in sync. +func (b *Backend) ConfigSchema() *configschema.Block { + return &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "bucket": { + Type: cty.String, + Required: true, + Description: "The name of the S3 bucket", + }, + "key": { + Type: cty.String, + Required: true, + Description: "The path to the state file inside the bucket", + }, + "region": { + Type: cty.String, + Optional: true, + Description: "AWS region of the S3 Bucket and DynamoDB Table (if used).", + }, + "endpoints": { + Optional: true, + NestedType: &configschema.Object{ + Nesting: configschema.NestingSingle, + Attributes: map[string]*configschema.Attribute{ + "s3": { + Type: cty.String, + Optional: true, + Description: "A custom endpoint for the S3 API.", + }, + "iam": { + Type: cty.String, + Optional: true, + Description: "A custom endpoint for the IAM API.", + }, + "sts": { + Type: cty.String, + Optional: true, + Description: "A custom endpoint for the STS API.", + }, + "dynamodb": { + Type: cty.String, + Optional: true, + Description: "A custom endpoint for the DynamoDB API.", + }, + }, + }, + }, + "dynamodb_endpoint": { + Type: cty.String, + Optional: true, + Description: "A custom endpoint for the DynamoDB API. Use `endpoints.dynamodb` instead.", + Deprecated: true, + }, + "endpoint": { + Type: cty.String, + Optional: true, + Description: "A custom endpoint for the S3 API. Use `endpoints.s3` instead", + Deprecated: true, + }, + "iam_endpoint": { + Type: cty.String, + Optional: true, + Description: "A custom endpoint for the IAM API. Use `endpoints.iam` instead", + Deprecated: true, + }, + "sts_endpoint": { + Type: cty.String, + Optional: true, + Description: "A custom endpoint for the STS API. Use `endpoints.sts` instead", + Deprecated: true, + }, + "sts_region": { + Type: cty.String, + Optional: true, + Description: "The region where AWS STS operations will take place", + }, + "encrypt": { + Type: cty.Bool, + Optional: true, + Description: "Whether to enable server side encryption of the state file", + }, + "acl": { + Type: cty.String, + Optional: true, + Description: "Canned ACL to be applied to the state file", + }, + "access_key": { + Type: cty.String, + Optional: true, + Description: "AWS access key", + }, + "secret_key": { + Type: cty.String, + Optional: true, + Description: "AWS secret key", + }, + "kms_key_id": { + Type: cty.String, + Optional: true, + Description: "The ARN of a KMS Key to use for encrypting the state", + }, + "dynamodb_table": { + Type: cty.String, + Optional: true, + Description: "DynamoDB table for state locking and consistency", + }, + "profile": { + Type: cty.String, + Optional: true, + Description: "AWS profile name", + }, + "shared_credentials_file": { + Type: cty.String, + Optional: true, + Description: "Path to a shared credentials file", + }, + "shared_credentials_files": { + Type: cty.Set(cty.String), + Optional: true, + Description: "Paths to a shared credentials files", + }, + "shared_config_files": { + Type: cty.Set(cty.String), + Optional: true, + Description: "Paths to shared config files", + }, + "token": { + Type: cty.String, + Optional: true, + Description: "MFA token", + }, + "skip_credentials_validation": { + Type: cty.Bool, + Optional: true, + Description: "Skip the credentials validation via STS API.", + }, + "skip_metadata_api_check": { + Type: cty.Bool, + Optional: true, + Description: "Skip the AWS Metadata API check.", + }, + "skip_region_validation": { + Type: cty.Bool, + Optional: true, + Description: "Skip static validation of region name.", + }, + "skip_requesting_account_id": { + Type: cty.Bool, + Optional: true, + Description: "Skip requesting the account ID. Useful for AWS API implementations that do not have the IAM, STS API, or metadata API.", + }, + "sse_customer_key": { + Type: cty.String, + Optional: true, + Description: "The base64-encoded encryption key to use for server-side encryption with customer-provided keys (SSE-C).", + Sensitive: true, + }, + "role_arn": { + Type: cty.String, + Optional: true, + Description: "The role to be assumed", + Deprecated: true, + }, + "session_name": { + Type: cty.String, + Optional: true, + Description: "The session name to use when assuming the role.", + Deprecated: true, + }, + "external_id": { + Type: cty.String, + Optional: true, + Description: "The external ID to use when assuming the role", + Deprecated: true, + }, + "assume_role_duration_seconds": { + Type: cty.Number, + Optional: true, + Description: "Seconds to restrict the assume role session duration.", + Deprecated: true, + }, + "assume_role_policy": { + Type: cty.String, + Optional: true, + Description: "IAM Policy JSON describing further restricting permissions for the IAM Role being assumed.", + Deprecated: true, + }, + "assume_role_policy_arns": { + Type: cty.Set(cty.String), + Optional: true, + Description: "Amazon Resource Names (ARNs) of IAM Policies describing further restricting permissions for the IAM Role being assumed.", + Deprecated: true, + }, + "assume_role_tags": { + Type: cty.Map(cty.String), + Optional: true, + Description: "Assume role session tags.", + Deprecated: true, + }, + "assume_role_transitive_tag_keys": { + Type: cty.Set(cty.String), + Optional: true, + Description: "Assume role session tag keys to pass to any subsequent sessions.", + Deprecated: true, + }, + "workspace_key_prefix": { + Type: cty.String, + Optional: true, + Description: "The prefix applied to the non-default state path inside the bucket.", + }, + "force_path_style": { + Type: cty.Bool, + Optional: true, + Description: "Force s3 to use path style api. Use `use_path_style` instead.", + Deprecated: true, + }, + "use_path_style": { + Type: cty.Bool, + Optional: true, + Description: "Enable path-style S3 URLs.", + }, + "retry_mode": { + Type: cty.String, + Optional: true, + Description: "Specifies how retries are attempted. Valid values are `standard` and `adaptive`.", + }, + "max_retries": { + Type: cty.Number, + Optional: true, + Description: "The maximum number of times an AWS API request is retried on retryable failure.", + }, + "custom_ca_bundle": { + Type: cty.String, + Optional: true, + Description: "File containing custom root and intermediate certificates. Can also be configured using the `AWS_CA_BUNDLE` environment variable.", + }, + "ec2_metadata_service_endpoint": { + Type: cty.String, + Optional: true, + Description: "The endpoint of IMDS.", + }, + "ec2_metadata_service_endpoint_mode": { + Type: cty.String, + Optional: true, + Description: "The endpoint mode of IMDS. Valid values: IPv4, IPv6.", + }, + "assume_role": { + Optional: true, + NestedType: &configschema.Object{ + Nesting: configschema.NestingSingle, + Attributes: map[string]*configschema.Attribute{ + "role_arn": { + Type: cty.String, + Required: true, + Description: "The role to be assumed.", + }, + "duration": { + Type: cty.String, + Optional: true, + Description: "Seconds to restrict the assume role session duration.", + }, + "external_id": { + Type: cty.String, + Optional: true, + Description: "The external ID to use when assuming the role", + }, + "policy": { + Type: cty.String, + Optional: true, + Description: "IAM Policy JSON describing further restricting permissions for the IAM Role being assumed.", + }, + "policy_arns": { + Type: cty.Set(cty.String), + Optional: true, + Description: "Amazon Resource Names (ARNs) of IAM Policies describing further restricting permissions for the IAM Role being assumed.", + }, + "session_name": { + Type: cty.String, + Optional: true, + Description: "The session name to use when assuming the role.", + }, + "tags": { + Type: cty.Map(cty.String), + Optional: true, + Description: "Assume role session tags.", + }, + "transitive_tag_keys": { + Type: cty.Set(cty.String), + Optional: true, + Description: "Assume role session tag keys to pass to any subsequent sessions.", + }, + // + // NOT SUPPORTED by `aws-sdk-go-base/v1` + // Cannot be added yet. + // + // "source_identity": stringAttribute{ + // configschema.Attribute{ + // Type: cty.String, + // Optional: true, + // Description: "Source identity specified by the principal assuming the role.", + // ValidateFunc: validAssumeRoleSourceIdentity, + // }, + // }, + }, + }, + }, + "assume_role_with_web_identity": { + Optional: true, + NestedType: &configschema.Object{ + Nesting: configschema.NestingSingle, + Attributes: map[string]*configschema.Attribute{ + "role_arn": { + Type: cty.String, + Optional: true, + Description: "The Amazon Resource Name (ARN) role to assume.", + }, + "web_identity_token": { + Type: cty.String, + Optional: true, + Sensitive: true, + Description: "The OAuth 2.0 access token or OpenID Connect ID token that is provided by the identity provider.", + }, + "web_identity_token_file": { + Type: cty.String, + Optional: true, + Description: "The path to a file which contains an OAuth 2.0 access token or OpenID Connect ID token that is provided by the identity provider.", + }, + "session_name": { + Type: cty.String, + Optional: true, + Description: "The name applied to this assume-role session.", + }, + "policy": { + Type: cty.String, + Optional: true, + Description: "IAM Policy JSON describing further restricting permissions for the IAM Role being assumed.", + }, + "policy_arns": { + Type: cty.Set(cty.String), + Optional: true, + Description: "Amazon Resource Names (ARNs) of IAM Policies describing further restricting permissions for the IAM Role being assumed.", + }, + "duration": { + Type: cty.String, + Optional: true, + Description: "The duration, between 15 minutes and 12 hours, of the role session. Valid time units are ns, us (or µs), ms, s, h, or m.", + }, + }, + }, + }, + "forbidden_account_ids": { + Type: cty.Set(cty.String), + Optional: true, + Description: "List of forbidden AWS account IDs.", + }, + "allowed_account_ids": { + Type: cty.Set(cty.String), + Optional: true, + Description: "List of allowed AWS account IDs.", + }, + "http_proxy": { + Type: cty.String, + Optional: true, + Description: "The address of an HTTP proxy to use when accessing the AWS API.", + }, + "https_proxy": { + Type: cty.String, + Optional: true, + Description: "The address of an HTTPS proxy to use when accessing the AWS API.", + }, + "no_proxy": { + Type: cty.String, + Optional: true, + Description: `Comma-separated values which specify hosts that should be excluded from proxying. +See details: https://cs.opensource.google/go/x/net/+/refs/tags/v0.17.0:http/httpproxy/proxy.go;l=38-50.`, + }, + "insecure": { + Type: cty.Bool, + Optional: true, + Description: "Explicitly allow the backend to perform \"insecure\" SSL requests.", + }, + "use_dualstack_endpoint": { + Type: cty.Bool, + Optional: true, + Description: "Resolve an endpoint with DualStack capability.", + }, + "use_fips_endpoint": { + Type: cty.Bool, + Optional: true, + Description: "Resolve an endpoint with FIPS capability.", + }, + "skip_s3_checksum": { + Type: cty.Bool, + Optional: true, + Description: "Do not include checksum when uploading S3 Objects. Useful for some S3-Compatible APIs as some of them do not support checksum checks.", + }, + }, + } +} + +// PrepareConfig checks the validity of the values in the given +// configuration, and inserts any missing defaults, assuming that its +// structure has already been validated per the schema returned by +// ConfigSchema. +func (b *Backend) PrepareConfig(obj cty.Value) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + if obj.IsNull() { + return obj, diags + } + + if val := obj.GetAttr("bucket"); val.IsNull() || val.AsString() == "" { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid bucket value", + `The "bucket" attribute value must not be empty.`, + cty.Path{cty.GetAttrStep{Name: "bucket"}}, + )) + } + + if val := obj.GetAttr("key"); val.IsNull() || val.AsString() == "" { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid key value", + `The "key" attribute value must not be empty.`, + cty.Path{cty.GetAttrStep{Name: "key"}}, + )) + } else if strings.HasPrefix(val.AsString(), "/") || strings.HasSuffix(val.AsString(), "/") { + // S3 will strip leading slashes from an object, so while this will + // technically be accepted by S3, it will break our workspace hierarchy. + // S3 will recognize objects with a trailing slash as a directory + // so they should not be valid keys + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid key value", + `The "key" attribute value must not start or end with with "/".`, + cty.Path{cty.GetAttrStep{Name: "key"}}, + )) + } + + if val := obj.GetAttr("region"); val.IsNull() || val.AsString() == "" { + if os.Getenv("AWS_REGION") == "" && os.Getenv("AWS_DEFAULT_REGION") == "" { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Missing region value", + `The "region" attribute or the "AWS_REGION" or "AWS_DEFAULT_REGION" environment variables must be set.`, + cty.Path{cty.GetAttrStep{Name: "region"}}, + )) + } + } + + if val := obj.GetAttr("kms_key_id"); !val.IsNull() && val.AsString() != "" { + if val := obj.GetAttr("sse_customer_key"); !val.IsNull() && val.AsString() != "" { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid encryption configuration", + encryptionKeyConflictError, + cty.Path{}, + )) + } else if customerKey := os.Getenv("AWS_SSE_CUSTOMER_KEY"); customerKey != "" { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid encryption configuration", + encryptionKeyConflictEnvVarError, + cty.Path{}, + )) + } + + diags = diags.Append(validateKMSKey(cty.Path{cty.GetAttrStep{Name: "kms_key_id"}}, val.AsString())) + } + + if val := obj.GetAttr("workspace_key_prefix"); !val.IsNull() { + if v := val.AsString(); strings.HasPrefix(v, "/") || strings.HasSuffix(v, "/") { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid workspace_key_prefix value", + `The "workspace_key_prefix" attribute value must not start with "/".`, + cty.Path{cty.GetAttrStep{Name: "workspace_key_prefix"}}, + )) + } + } + + validateAttributesConflict( + cty.GetAttrPath("shared_credentials_file"), + cty.GetAttrPath("shared_credentials_files"), + )(obj, cty.Path{}, &diags) + + attrPath := cty.GetAttrPath("shared_credentials_file") + if val := obj.GetAttr("shared_credentials_file"); !val.IsNull() { + detail := fmt.Sprintf( + `Parameter "%s" is deprecated. Use "%s" instead.`, + pathString(attrPath), + pathString(cty.GetAttrPath("shared_credentials_files"))) + + diags = diags.Append(attributeWarningDiag( + "Deprecated Parameter", + detail, + attrPath)) + } + + if val := obj.GetAttr("force_path_style"); !val.IsNull() { + attrPath := cty.GetAttrPath("force_path_style") + detail := fmt.Sprintf( + `Parameter "%s" is deprecated. Use "%s" instead.`, + pathString(attrPath), + pathString(cty.GetAttrPath("use_path_style"))) + + diags = diags.Append(attributeWarningDiag( + "Deprecated Parameter", + detail, + attrPath)) + } + + validateAttributesConflict( + cty.GetAttrPath("force_path_style"), + cty.GetAttrPath("use_path_style"), + )(obj, cty.Path{}, &diags) + + var assumeRoleDeprecatedFields = map[string]string{ + "role_arn": "assume_role.role_arn", + "session_name": "assume_role.session_name", + "external_id": "assume_role.external_id", + "assume_role_duration_seconds": "assume_role.duration", + "assume_role_policy": "assume_role.policy", + "assume_role_policy_arns": "assume_role.policy_arns", + "assume_role_tags": "assume_role.tags", + "assume_role_transitive_tag_keys": "assume_role.transitive_tag_keys", + } + + if val := obj.GetAttr("assume_role"); !val.IsNull() { + diags = diags.Append(validateNestedAssumeRole(val, cty.Path{cty.GetAttrStep{Name: "assume_role"}})) + + if defined := findDeprecatedFields(obj, assumeRoleDeprecatedFields); len(defined) != 0 { + diags = diags.Append(tfdiags.WholeContainingBody( + tfdiags.Error, + "Conflicting Parameters", + `The following deprecated parameters conflict with the parameter "assume_role". Replace them as follows:`+"\n"+ + formatDeprecated(defined), + )) + } + } else { + if defined := findDeprecatedFields(obj, assumeRoleDeprecatedFields); len(defined) != 0 { + diags = diags.Append(tfdiags.WholeContainingBody( + tfdiags.Warning, + "Deprecated Parameters", + `The following parameters have been deprecated. Replace them as follows:`+"\n"+ + formatDeprecated(defined), + )) + } + } + + if val := obj.GetAttr("assume_role_with_web_identity"); !val.IsNull() { + diags = diags.Append(validateAssumeRoleWithWebIdentity(val, cty.GetAttrPath("assume_role_with_web_identity"))) + } + + validateAttributesConflict( + cty.GetAttrPath("allowed_account_ids"), + cty.GetAttrPath("forbidden_account_ids"), + )(obj, cty.Path{}, &diags) + + if val := obj.GetAttr("retry_mode"); !val.IsNull() { + s := val.AsString() + if _, err := aws.ParseRetryMode(s); err != nil { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid retry mode", + fmt.Sprintf("Valid values are %q and %q.", aws.RetryModeStandard, aws.RetryModeAdaptive), + cty.Path{cty.GetAttrStep{Name: "retry_mode"}}, + )) + } + } + + for _, endpoint := range customEndpoints { + endpoint.Validate(obj, &diags) + } + + return obj, diags +} + +// Configure uses the provided configuration to set configuration fields +// within the backend. +// +// The given configuration is assumed to have already been validated +// against the schema returned by ConfigSchema and passed validation +// via PrepareConfig. +func (b *Backend) Configure(obj cty.Value) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + if obj.IsNull() { + return diags + } + + var region string + if v, ok := stringAttrOk(obj, "region"); ok { + region = v + } + + if region != "" && !boolAttr(obj, "skip_region_validation") { + if err := awsbaseValidation.SupportedRegion(region); err != nil { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid region value", + err.Error(), + cty.Path{cty.GetAttrStep{Name: "region"}}, + )) + return diags + } + } + + b.bucketName = stringAttr(obj, "bucket") + b.keyName = stringAttr(obj, "key") + b.acl = stringAttr(obj, "acl") + b.workspaceKeyPrefix = stringAttrDefault(obj, "workspace_key_prefix", "env:") + b.serverSideEncryption = boolAttr(obj, "encrypt") + b.kmsKeyID = stringAttr(obj, "kms_key_id") + b.ddbTable = stringAttr(obj, "dynamodb_table") + b.skipS3Checksum = boolAttr(obj, "skip_s3_checksum") + + if customerKey, ok := stringAttrOk(obj, "sse_customer_key"); ok { + if len(customerKey) != 44 { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid sse_customer_key value", + "sse_customer_key must be 44 characters in length", + cty.Path{cty.GetAttrStep{Name: "sse_customer_key"}}, + )) + } else { + var err error + if b.customerEncryptionKey, err = base64.StdEncoding.DecodeString(customerKey); err != nil { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid sse_customer_key value", + fmt.Sprintf("sse_customer_key must be base64 encoded: %s", err), + cty.Path{cty.GetAttrStep{Name: "sse_customer_key"}}, + )) + } + } + } else if customerKey := os.Getenv("AWS_SSE_CUSTOMER_KEY"); customerKey != "" { + if len(customerKey) != 44 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid AWS_SSE_CUSTOMER_KEY value", + `The environment variable "AWS_SSE_CUSTOMER_KEY" must be 44 characters in length`, + )) + } else { + var err error + if b.customerEncryptionKey, err = base64.StdEncoding.DecodeString(customerKey); err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid AWS_SSE_CUSTOMER_KEY value", + fmt.Sprintf(`The environment variable "AWS_SSE_CUSTOMER_KEY" must be base64 encoded: %s`, err), + )) + } + } + } + + ctx := context.TODO() + ctx, baselog := attachLoggerToContext(ctx) + + cfg := &awsbase.Config{ + AccessKey: stringAttr(obj, "access_key"), + CallerDocumentationURL: "https://opentofu.org/docs/language/settings/backends/s3", + CallerName: "S3 Backend", + IamEndpoint: customEndpoints["iam"].String(obj), + MaxRetries: intAttrDefault(obj, "max_retries", 5), + Profile: stringAttr(obj, "profile"), + Region: stringAttr(obj, "region"), + SecretKey: stringAttr(obj, "secret_key"), + SkipCredsValidation: boolAttr(obj, "skip_credentials_validation"), + SkipRequestingAccountId: boolAttr(obj, "skip_requesting_account_id"), + StsEndpoint: customEndpoints["sts"].String(obj), + StsRegion: stringAttr(obj, "sts_region"), + Token: stringAttr(obj, "token"), + + // Note: we don't need to read env variables explicitly because they are read implicitly by aws-sdk-base-go: + // see: https://github.com/hashicorp/aws-sdk-go-base/blob/v2.0.0-beta.41/internal/config/config.go#L133 + // which relies on: https://cs.opensource.google/go/x/net/+/refs/tags/v0.18.0:http/httpproxy/proxy.go;l=89-96 + // + // Note: we are switching to "separate" mode here since the legacy mode is deprecated and should no longer be + // used. + HTTPProxyMode: awsbase.HTTPProxyModeSeparate, + Insecure: boolAttr(obj, "insecure"), + UseDualStackEndpoint: boolAttr(obj, "use_dualstack_endpoint"), + UseFIPSEndpoint: boolAttr(obj, "use_fips_endpoint"), + UserAgent: awsbase.UserAgentProducts{ + {Name: "APN", Version: "1.0"}, + {Name: httpclient.DefaultApplicationName, Version: version.String()}, + }, + CustomCABundle: stringAttrDefaultEnvVar(obj, "custom_ca_bundle", "AWS_CA_BUNDLE"), + EC2MetadataServiceEndpoint: stringAttrDefaultEnvVar(obj, "ec2_metadata_service_endpoint", "AWS_EC2_METADATA_SERVICE_ENDPOINT"), + EC2MetadataServiceEndpointMode: stringAttrDefaultEnvVar(obj, "ec2_metadata_service_endpoint_mode", "AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE"), + Logger: baselog, + } + + if val, ok := stringAttrOk(obj, "http_proxy"); ok { + cfg.HTTPProxy = &val + } + if val, ok := stringAttrOk(obj, "https_proxy"); ok { + cfg.HTTPSProxy = &val + } + if val, ok := stringAttrOk(obj, "no_proxy"); ok { + cfg.NoProxy = val + } + + if val, ok := boolAttrOk(obj, "skip_metadata_api_check"); ok { + if val { + cfg.EC2MetadataServiceEnableState = imds.ClientDisabled + } else { + cfg.EC2MetadataServiceEnableState = imds.ClientEnabled + } + } + + if val, ok := stringAttrOk(obj, "shared_credentials_file"); ok { + cfg.SharedCredentialsFiles = []string{val} + } + + if value := obj.GetAttr("assume_role"); !value.IsNull() { + cfg.AssumeRole = configureNestedAssumeRole(obj) + } else if value := obj.GetAttr("role_arn"); !value.IsNull() { + cfg.AssumeRole = configureAssumeRole(obj) + } + + if val := obj.GetAttr("assume_role_with_web_identity"); !val.IsNull() { + cfg.AssumeRoleWithWebIdentity = configureAssumeRoleWithWebIdentity(val) + } + + if val, ok := stringSliceAttrDefaultEnvVarOk(obj, "shared_credentials_files", "AWS_SHARED_CREDENTIALS_FILE"); ok { + cfg.SharedCredentialsFiles = val + } + if val, ok := stringSliceAttrDefaultEnvVarOk(obj, "shared_config_files", "AWS_SHARED_CONFIG_FILE"); ok { + cfg.SharedConfigFiles = val + } + + if val, ok := stringSliceAttrOk(obj, "allowed_account_ids"); ok { + cfg.AllowedAccountIds = val + } + + if val, ok := stringSliceAttrOk(obj, "forbidden_account_ids"); ok { + cfg.ForbiddenAccountIds = val + } + + if val, ok := stringAttrOk(obj, "retry_mode"); ok { + mode, err := aws.ParseRetryMode(val) + if err != nil { + panic(fmt.Sprintf("invalid retry mode %q: %s", val, err)) + } + cfg.RetryMode = mode + } + + _, awsConfig, awsDiags := awsbase.GetAwsConfig(ctx, cfg) + + for _, d := range awsDiags { + diags = diags.Append(tfdiags.Sourceless( + baseSeverityToTofuSeverity(d.Severity()), + d.Summary(), + d.Detail(), + )) + } + + if d := verifyAllowedAccountID(ctx, awsConfig, cfg); len(d) != 0 { + diags = diags.Append(d) + } + + if diags.HasErrors() { + return diags + } + + b.awsConfig = awsConfig + + b.dynClient = dynamodb.NewFromConfig(awsConfig, getDynamoDBConfig(obj)) + + b.s3Client = s3.NewFromConfig(awsConfig, getS3Config(obj)) + + return diags +} + +func attachLoggerToContext(ctx context.Context) (context.Context, baselogging.HcLogger) { + ctx, baselog := baselogging.NewHcLogger(ctx, logging.HCLogger().Named("backend-s3")) + ctx = baselogging.RegisterLogger(ctx, baselog) + return ctx, baselog +} + +func verifyAllowedAccountID(ctx context.Context, awsConfig aws.Config, cfg *awsbase.Config) tfdiags.Diagnostics { + if len(cfg.ForbiddenAccountIds) == 0 && len(cfg.AllowedAccountIds) == 0 { + return nil + } + + var diags tfdiags.Diagnostics + accountID, _, awsDiags := awsbase.GetAwsAccountIDAndPartition(ctx, awsConfig, cfg) + for _, d := range awsDiags { + diags = diags.Append(tfdiags.Sourceless( + baseSeverityToTofuSeverity(d.Severity()), + fmt.Sprintf("Retrieving AWS account details: %s", d.Summary()), + d.Detail(), + )) + } + + err := cfg.VerifyAccountIDAllowed(accountID) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid account ID", + err.Error(), + )) + } + return diags +} + +func getDynamoDBConfig(obj cty.Value) func(options *dynamodb.Options) { + return func(options *dynamodb.Options) { + if v, ok := customEndpoints["dynamodb"].StringOk(obj); ok { + options.BaseEndpoint = aws.String(v) + } + } +} + +func getS3Config(obj cty.Value) func(options *s3.Options) { + return func(options *s3.Options) { + if v, ok := customEndpoints["s3"].StringOk(obj); ok { + options.BaseEndpoint = aws.String(v) + } + if v, ok := boolAttrOk(obj, "force_path_style"); ok { + options.UsePathStyle = v + } + if v, ok := boolAttrOk(obj, "use_path_style"); ok { + options.UsePathStyle = v + } + } +} + +func configureNestedAssumeRole(obj cty.Value) *awsbase.AssumeRole { + assumeRole := awsbase.AssumeRole{} + + obj = obj.GetAttr("assume_role") + if val, ok := stringAttrOk(obj, "role_arn"); ok { + assumeRole.RoleARN = val + } + if val, ok := stringAttrOk(obj, "duration"); ok { + dur, err := time.ParseDuration(val) + if err != nil { + // This should never happen because the schema should have + // already validated the duration. + panic(fmt.Sprintf("invalid duration %q: %s", val, err)) + } + + assumeRole.Duration = dur + } + if val, ok := stringAttrOk(obj, "external_id"); ok { + assumeRole.ExternalID = val + } + + if val, ok := stringAttrOk(obj, "policy"); ok { + assumeRole.Policy = strings.TrimSpace(val) + } + if val, ok := stringSliceAttrOk(obj, "policy_arns"); ok { + assumeRole.PolicyARNs = val + } + if val, ok := stringAttrOk(obj, "session_name"); ok { + assumeRole.SessionName = val + } + if val, ok := stringMapAttrOk(obj, "tags"); ok { + assumeRole.Tags = val + } + if val, ok := stringSliceAttrOk(obj, "transitive_tag_keys"); ok { + assumeRole.TransitiveTagKeys = val + } + + return &assumeRole +} + +func configureAssumeRole(obj cty.Value) *awsbase.AssumeRole { + assumeRole := awsbase.AssumeRole{} + + assumeRole.RoleARN = stringAttr(obj, "role_arn") + assumeRole.Duration = time.Duration(int64(intAttr(obj, "assume_role_duration_seconds")) * int64(time.Second)) + assumeRole.ExternalID = stringAttr(obj, "external_id") + assumeRole.Policy = stringAttr(obj, "assume_role_policy") + assumeRole.SessionName = stringAttr(obj, "session_name") + + if val, ok := stringSliceAttrOk(obj, "assume_role_policy_arns"); ok { + assumeRole.PolicyARNs = val + } + if val, ok := stringMapAttrOk(obj, "assume_role_tags"); ok { + assumeRole.Tags = val + } + if val, ok := stringSliceAttrOk(obj, "assume_role_transitive_tag_keys"); ok { + assumeRole.TransitiveTagKeys = val + } + + return &assumeRole +} + +func configureAssumeRoleWithWebIdentity(obj cty.Value) *awsbase.AssumeRoleWithWebIdentity { + cfg := &awsbase.AssumeRoleWithWebIdentity{ + RoleARN: stringAttrDefaultEnvVar(obj, "role_arn", "AWS_ROLE_ARN"), + Policy: stringAttr(obj, "policy"), + PolicyARNs: stringSliceAttr(obj, "policy_arns"), + SessionName: stringAttrDefaultEnvVar(obj, "session_name", "AWS_ROLE_SESSION_NAME"), + WebIdentityToken: stringAttrDefaultEnvVar(obj, "web_identity_token", "AWS_WEB_IDENTITY_TOKEN"), + WebIdentityTokenFile: stringAttrDefaultEnvVar(obj, "web_identity_token_file", "AWS_WEB_IDENTITY_TOKEN_FILE"), + } + if val, ok := stringAttrOk(obj, "duration"); ok { + d, err := time.ParseDuration(val) + if err != nil { + // This should never happen because the schema should have + // already validated the duration. + panic(fmt.Sprintf("invalid duration %q: %s", val, err)) + } + cfg.Duration = d + } + return cfg +} + +func stringValue(val cty.Value) string { + v, _ := stringValueOk(val) + return v +} + +func stringValueOk(val cty.Value) (string, bool) { + if val.IsNull() { + return "", false + } else { + return val.AsString(), true + } +} + +func stringAttr(obj cty.Value, name string) string { + return stringValue(obj.GetAttr(name)) +} + +func stringAttrOk(obj cty.Value, name string) (string, bool) { + return stringValueOk(obj.GetAttr(name)) +} + +func stringAttrDefault(obj cty.Value, name, def string) string { + if v, ok := stringAttrOk(obj, name); !ok { + return def + } else { + return v + } +} + +func stringSliceValue(val cty.Value) []string { + v, _ := stringSliceValueOk(val) + return v +} + +func stringSliceValueOk(val cty.Value) ([]string, bool) { + if val.IsNull() { + return nil, false + } + + var v []string + if err := gocty.FromCtyValue(val, &v); err != nil { + return nil, false + } + return v, true +} + +func stringSliceAttr(obj cty.Value, name string) []string { + return stringSliceValue(obj.GetAttr(name)) +} + +func stringSliceAttrOk(obj cty.Value, name string) ([]string, bool) { + return stringSliceValueOk(obj.GetAttr(name)) +} + +func stringSliceAttrDefaultEnvVarOk(obj cty.Value, name string, envvars ...string) ([]string, bool) { + if v, ok := stringSliceAttrOk(obj, name); !ok { + for _, envvar := range envvars { + if ev := os.Getenv(envvar); ev != "" { + return []string{ev}, true + } + } + return nil, false + } else { + return v, true + } +} + +func stringAttrDefaultEnvVar(obj cty.Value, name string, envvars ...string) string { + if v, ok := stringAttrDefaultEnvVarOk(obj, name, envvars...); !ok { + return "" + } else { + return v + } +} + +func stringAttrDefaultEnvVarOk(obj cty.Value, name string, envvars ...string) (string, bool) { + if v, ok := stringAttrOk(obj, name); !ok { + for _, envvar := range envvars { + if v := os.Getenv(envvar); v != "" { + return v, true + } + } + return "", false + } else { + return v, true + } +} + +func boolAttr(obj cty.Value, name string) bool { + v, _ := boolAttrOk(obj, name) + return v +} + +func boolAttrOk(obj cty.Value, name string) (bool, bool) { + if val := obj.GetAttr(name); val.IsNull() { + return false, false + } else { + return val.True(), true + } +} + +func intAttr(obj cty.Value, name string) int { + v, _ := intAttrOk(obj, name) + return v +} + +func intAttrOk(obj cty.Value, name string) (int, bool) { + if val := obj.GetAttr(name); val.IsNull() { + return 0, false + } else { + var v int + if err := gocty.FromCtyValue(val, &v); err != nil { + return 0, false + } + return v, true + } +} + +func intAttrDefault(obj cty.Value, name string, def int) int { + if v, ok := intAttrOk(obj, name); !ok { + return def + } else { + return v + } +} + +func stringMapValueOk(val cty.Value) (map[string]string, bool) { + var m map[string]string + err := gocty.FromCtyValue(val, &m) + if err != nil { + return nil, false + } + return m, true +} + +func stringMapAttrOk(obj cty.Value, name string) (map[string]string, bool) { + return stringMapValueOk(obj.GetAttr(name)) +} + +func customEndpointAttrDefaultEnvVarOk(obj cty.Value, endpointsKey, deprecatedKey string, envvars ...string) (string, bool) { + if val := obj.GetAttr("endpoints"); !val.IsNull() { + if v, ok := stringAttrDefaultEnvVarOk(val, endpointsKey, envvars...); ok { + return v, true + } + } + return stringAttrDefaultEnvVarOk(obj, deprecatedKey, envvars...) +} + +func pathString(path cty.Path) string { + var buf strings.Builder + for i, step := range path { + switch x := step.(type) { + case cty.GetAttrStep: + if i != 0 { + buf.WriteString(".") + } + buf.WriteString(x.Name) + case cty.IndexStep: + val := x.Key + typ := val.Type() + var s string + switch { + case typ == cty.String: + s = val.AsString() + case typ == cty.Number: + num := val.AsBigFloat() + if num.IsInt() { + s = num.Text('f', -1) + } else { + s = num.String() + } + default: + s = fmt.Sprintf("", typ.FriendlyName()) + } + buf.WriteString(fmt.Sprintf("[%s]", s)) + default: + if i != 0 { + buf.WriteString(".") + } + buf.WriteString(fmt.Sprintf("", x)) + } + } + return buf.String() +} + +func findDeprecatedFields(obj cty.Value, attrs map[string]string) map[string]string { + defined := make(map[string]string) + for attr, v := range attrs { + if val := obj.GetAttr(attr); !val.IsNull() { + defined[attr] = v + } + } + return defined +} + +func formatDeprecated(attrs map[string]string) string { + var maxLen int + var buf strings.Builder + + names := make([]string, 0, len(attrs)) + for deprecated, replacement := range attrs { + names = append(names, deprecated) + if l := len(deprecated); l > maxLen { + maxLen = l + } + + fmt.Fprintf(&buf, " * %-[1]*[2]s -> %s\n", maxLen, deprecated, replacement) + } + + sort.Strings(names) + + return buf.String() +} + +const encryptionKeyConflictError = `Only one of "kms_key_id" and "sse_customer_key" can be set. + +The "kms_key_id" is used for encryption with KMS-Managed Keys (SSE-KMS) +while "sse_customer_key" is used for encryption with customer-managed keys (SSE-C). +Please choose one or the other.` + +const encryptionKeyConflictEnvVarError = `Only one of "kms_key_id" and the environment variable "AWS_SSE_CUSTOMER_KEY" can be set. + +The "kms_key_id" is used for encryption with KMS-Managed Keys (SSE-KMS) +while "AWS_SSE_CUSTOMER_KEY" is used for encryption with customer-managed keys (SSE-C). +Please choose one or the other.` + +type customEndpoint struct { + Paths []cty.Path + EnvVars []string +} + +func (e customEndpoint) Validate(obj cty.Value, diags *tfdiags.Diagnostics) { + validateAttributesConflict(e.Paths...)(obj, cty.Path{}, diags) +} + +func (e customEndpoint) String(obj cty.Value) string { + v, _ := e.StringOk(obj) + return v +} + +func includeProtoIfNessesary(endpoint string) string { + if matched, _ := regexp.MatchString("[a-z]*://.*", endpoint); !matched { + log.Printf("[DEBUG] Adding https:// prefix to endpoint '%s'", endpoint) + endpoint = fmt.Sprintf("https://%s", endpoint) + } + return endpoint +} + +func (e customEndpoint) StringOk(obj cty.Value) (string, bool) { + for _, path := range e.Paths { + val, err := path.Apply(obj) + if err != nil { + continue + } + if s, ok := stringValueOk(val); ok { + return includeProtoIfNessesary(s), true + } + } + for _, envVar := range e.EnvVars { + if v := os.Getenv(envVar); v != "" { + return includeProtoIfNessesary(v), true + } + } + return "", false +} + +var customEndpoints = map[string]customEndpoint{ + "s3": { + Paths: []cty.Path{ + cty.GetAttrPath("endpoints").GetAttr("s3"), + cty.GetAttrPath("endpoint"), + }, + EnvVars: []string{ + "AWS_ENDPOINT_URL_S3", + "AWS_S3_ENDPOINT", + }, + }, + "iam": { + Paths: []cty.Path{ + cty.GetAttrPath("endpoints").GetAttr("iam"), + cty.GetAttrPath("iam_endpoint"), + }, + EnvVars: []string{ + "AWS_ENDPOINT_URL_IAM", + "AWS_IAM_ENDPOINT", + }, + }, + "sts": { + Paths: []cty.Path{ + cty.GetAttrPath("endpoints").GetAttr("sts"), + cty.GetAttrPath("sts_endpoint"), + }, + EnvVars: []string{ + "AWS_ENDPOINT_URL_STS", + "AWS_STS_ENDPOINT", + }, + }, + "dynamodb": { + Paths: []cty.Path{ + cty.GetAttrPath("endpoints").GetAttr("dynamodb"), + cty.GetAttrPath("dynamodb_endpoint"), + }, + EnvVars: []string{ + "AWS_ENDPOINT_URL_DYNAMODB", + "AWS_DYNAMODB_ENDPOINT", + }, + }, +} diff --git a/pkg/backend/remote-state/s3/backend_complete_test.go b/pkg/backend/remote-state/s3/backend_complete_test.go new file mode 100644 index 00000000000..3448368307a --- /dev/null +++ b/pkg/backend/remote-state/s3/backend_complete_test.go @@ -0,0 +1,2195 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package s3 + +import ( + "context" + "fmt" + "os" + "path/filepath" + "regexp" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/hashicorp/aws-sdk-go-base/v2/mockdata" + "github.com/hashicorp/aws-sdk-go-base/v2/servicemocks" + "github.com/kubegems/opentofu/pkg/configs/hcl2shim" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +const mockStsAssumeRolePolicy = `{ + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "*", + "Resource": "*" + } + }` + +func ExpectNoDiags(t *testing.T, diags tfdiags.Diagnostics) { + expectDiagsCount(t, diags, 0) +} + +func expectDiagsCount(t *testing.T, diags tfdiags.Diagnostics, c int) { + if l := len(diags); l != c { + t.Fatalf("Diagnostics: expected %d element, got %d\n%#v", c, l, diags) + } +} + +func ExpectDiagsEqual(expected tfdiags.Diagnostics) diagsValidator { + return func(t *testing.T, diags tfdiags.Diagnostics) { + if diff := cmp.Diff(diags, expected, cmp.Comparer(diagnosticComparer)); diff != "" { + t.Fatalf("unexpected diagnostics difference: %s", diff) + } + } +} + +type diagsValidator func(*testing.T, tfdiags.Diagnostics) + +// ExpectDiagsMatching returns a validator expeceting a single Diagnostic with fields matching the expectation +func ExpectDiagsMatching(severity tfdiags.Severity, summary matcher, detail matcher) diagsValidator { + return func(t *testing.T, diags tfdiags.Diagnostics) { + for _, d := range diags { + if !summary.Match(d.Description().Summary) || !detail.Match(d.Description().Detail) { + t.Fatalf("expected Diagnostic matching %#v, got %#v", + tfdiags.Sourceless( + severity, + summary.String(), + detail.String(), + ), + d, + ) + } + } + + expectDiagsCount(t, diags, 1) + } +} + +type diagValidator func(*testing.T, tfdiags.Diagnostic) + +func ExpectDiagMatching(severity tfdiags.Severity, summary matcher, detail matcher) diagValidator { + return func(t *testing.T, d tfdiags.Diagnostic) { + if !summary.Match(d.Description().Summary) || !detail.Match(d.Description().Detail) { + t.Fatalf("expected Diagnostic matching %#v, got %#v", + tfdiags.Sourceless( + severity, + summary.String(), + detail.String(), + ), + d, + ) + } + } +} + +func ExpectMultipleDiags(validators ...diagValidator) diagsValidator { + return func(t *testing.T, diags tfdiags.Diagnostics) { + count := len(validators) + if diagCount := len(diags); diagCount < count { + count = diagCount + } + + for i := 0; i < count; i++ { + validators[i](t, diags[i]) + } + + expectDiagsCount(t, diags, len(validators)) + } +} + +type matcher interface { + fmt.Stringer + Match(string) bool +} + +type equalsMatcher string + +func (m equalsMatcher) Match(s string) bool { + return string(m) == s +} + +func (m equalsMatcher) String() string { + return string(m) +} + +type regexpMatcher struct { + re *regexp.Regexp +} + +func newRegexpMatcher(re string) regexpMatcher { + return regexpMatcher{ + re: regexp.MustCompile(re), + } +} + +func (m regexpMatcher) Match(s string) bool { + return m.re.MatchString(s) +} + +func (m regexpMatcher) String() string { + return m.re.String() +} + +type noopMatcher struct{} + +func (m noopMatcher) Match(s string) bool { + return true +} + +func (m noopMatcher) String() string { + return "" +} + +func TestBackendConfig_Authentication(t *testing.T) { + testCases := map[string]struct { + config map[string]any + EnableEc2MetadataServer bool + EnableEcsCredentialsServer bool + EnableWebIdentityEnvVars bool + // EnableWebIdentityConfig bool // Not supported + EnvironmentVariables map[string]string + ExpectedCredentialsValue aws.Credentials + MockStsEndpoints []*servicemocks.MockEndpoint + SharedConfigurationFile string + SharedCredentialsFile string + ValidateDiags diagsValidator + }{ + "empty config": { + config: map[string]any{}, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + ValidateDiags: ExpectDiagsMatching( + tfdiags.Error, + equalsMatcher("No valid credential sources found"), + newRegexpMatcher("^Please see.+"), + ), + }, + + "config AccessKey": { + config: map[string]any{ + "access_key": servicemocks.MockStaticAccessKey, + "secret_key": servicemocks.MockStaticSecretKey, + }, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + ExpectedCredentialsValue: mockdata.MockStaticCredentials, + ValidateDiags: ExpectNoDiags, + }, + + "config AccessKey forbidden account": { + config: map[string]any{ + "access_key": servicemocks.MockStaticAccessKey, + "secret_key": servicemocks.MockStaticSecretKey, + "forbidden_account_ids": []any{"222222222222"}, + }, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + ExpectedCredentialsValue: mockdata.MockStaticCredentials, + ValidateDiags: ExpectDiagsMatching( + tfdiags.Error, + equalsMatcher("Invalid account ID"), + equalsMatcher("AWS account ID not allowed: 222222222222"), + ), + }, + + "config Profile shared credentials profile aws_access_key_id": { + config: map[string]any{ + "profile": "SharedCredentialsProfile", + }, + ExpectedCredentialsValue: aws.Credentials{ + AccessKeyID: "ProfileSharedCredentialsAccessKey", + SecretAccessKey: "ProfileSharedCredentialsSecretKey", + Source: "SharedConfigCredentials", + }, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + SharedCredentialsFile: ` +[default] +aws_access_key_id = DefaultSharedCredentialsAccessKey +aws_secret_access_key = DefaultSharedCredentialsSecretKey + +[SharedCredentialsProfile] +aws_access_key_id = ProfileSharedCredentialsAccessKey +aws_secret_access_key = ProfileSharedCredentialsSecretKey +`, + ValidateDiags: ExpectNoDiags, + }, + + "environment AWS_ACCESS_KEY_ID does not override config Profile": { + config: map[string]any{ + "profile": "SharedCredentialsProfile", + }, + EnvironmentVariables: map[string]string{ + "AWS_ACCESS_KEY_ID": servicemocks.MockEnvAccessKey, + "AWS_SECRET_ACCESS_KEY": servicemocks.MockEnvSecretKey, + }, + ExpectedCredentialsValue: aws.Credentials{ + AccessKeyID: "ProfileSharedCredentialsAccessKey", + SecretAccessKey: "ProfileSharedCredentialsSecretKey", + Source: "SharedConfigCredentials", + }, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + SharedCredentialsFile: ` +[default] +aws_access_key_id = DefaultSharedCredentialsAccessKey +aws_secret_access_key = DefaultSharedCredentialsSecretKey +[SharedCredentialsProfile] +aws_access_key_id = ProfileSharedCredentialsAccessKey +aws_secret_access_key = ProfileSharedCredentialsSecretKey +`, + ValidateDiags: ExpectNoDiags, + }, + + "environment AWS_ACCESS_KEY_ID": { + config: map[string]any{}, + EnvironmentVariables: map[string]string{ + "AWS_ACCESS_KEY_ID": servicemocks.MockEnvAccessKey, + "AWS_SECRET_ACCESS_KEY": servicemocks.MockEnvSecretKey, + }, + ExpectedCredentialsValue: mockdata.MockEnvCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + ValidateDiags: ExpectNoDiags, + }, + + "environment AWS_PROFILE shared credentials profile aws_access_key_id": { + config: map[string]any{}, + EnvironmentVariables: map[string]string{ + "AWS_PROFILE": "SharedCredentialsProfile", + }, + ExpectedCredentialsValue: aws.Credentials{ + AccessKeyID: "ProfileSharedCredentialsAccessKey", + SecretAccessKey: "ProfileSharedCredentialsSecretKey", + Source: "SharedConfigCredentials", + }, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + SharedCredentialsFile: ` +[default] +aws_access_key_id = DefaultSharedCredentialsAccessKey +aws_secret_access_key = DefaultSharedCredentialsSecretKey + +[SharedCredentialsProfile] +aws_access_key_id = ProfileSharedCredentialsAccessKey +aws_secret_access_key = ProfileSharedCredentialsSecretKey +`, + ValidateDiags: ExpectNoDiags, + }, + + "environment AWS_SESSION_TOKEN": { + config: map[string]any{}, + EnvironmentVariables: map[string]string{ + "AWS_ACCESS_KEY_ID": servicemocks.MockEnvAccessKey, + "AWS_SECRET_ACCESS_KEY": servicemocks.MockEnvSecretKey, + "AWS_SESSION_TOKEN": servicemocks.MockEnvSessionToken, + }, + ExpectedCredentialsValue: mockdata.MockEnvCredentialsWithSessionToken, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + }, + + "shared credentials default aws_access_key_id": { + config: map[string]any{}, + ExpectedCredentialsValue: aws.Credentials{ + AccessKeyID: "DefaultSharedCredentialsAccessKey", + SecretAccessKey: "DefaultSharedCredentialsSecretKey", + Source: "SharedConfigCredentials", + }, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + SharedCredentialsFile: ` +[default] +aws_access_key_id = DefaultSharedCredentialsAccessKey +aws_secret_access_key = DefaultSharedCredentialsSecretKey +`, + }, + + "web identity token access key": { + config: map[string]any{}, + EnableWebIdentityEnvVars: true, + ExpectedCredentialsValue: mockdata.MockStsAssumeRoleWithWebIdentityCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsAssumeRoleWithWebIdentityValidEndpoint, + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + }, + + "EC2 metadata access key": { + config: map[string]any{}, + EnableEc2MetadataServer: true, + ExpectedCredentialsValue: mockdata.MockEc2MetadataCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + ValidateDiags: ExpectNoDiags, + }, + + "ECS credentials access key": { + config: map[string]any{}, + EnableEcsCredentialsServer: true, + ExpectedCredentialsValue: mockdata.MockEcsCredentialsCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + }, + + "AssumeWebIdentity envvar AssumeRoleARN access key": { + config: map[string]any{ + "role_arn": servicemocks.MockStsAssumeRoleArn, + "session_name": servicemocks.MockStsAssumeRoleSessionName, + }, + EnableWebIdentityEnvVars: true, + ExpectedCredentialsValue: mockdata.MockStsAssumeRoleCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsAssumeRoleWithWebIdentityValidEndpoint, + servicemocks.MockStsAssumeRoleValidEndpoint, + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + ValidateDiags: ExpectDiagsMatching( + tfdiags.Warning, + equalsMatcher("Deprecated Parameters"), + noopMatcher{}, + ), + }, + + "config AccessKey over environment AWS_ACCESS_KEY_ID": { + config: map[string]any{ + "access_key": servicemocks.MockStaticAccessKey, + "secret_key": servicemocks.MockStaticSecretKey, + }, + EnvironmentVariables: map[string]string{ + "AWS_ACCESS_KEY_ID": servicemocks.MockEnvAccessKey, + "AWS_SECRET_ACCESS_KEY": servicemocks.MockEnvSecretKey, + }, + ExpectedCredentialsValue: mockdata.MockStaticCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + ValidateDiags: ExpectNoDiags, + }, + + "config AccessKey over shared credentials default aws_access_key_id": { + config: map[string]any{ + "access_key": servicemocks.MockStaticAccessKey, + "secret_key": servicemocks.MockStaticSecretKey, + }, + ExpectedCredentialsValue: mockdata.MockStaticCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + SharedCredentialsFile: ` +[default] +aws_access_key_id = DefaultSharedCredentialsAccessKey +aws_secret_access_key = DefaultSharedCredentialsSecretKey +`, + ValidateDiags: ExpectNoDiags, + }, + + "config AccessKey over EC2 metadata access key": { + config: map[string]any{ + "access_key": servicemocks.MockStaticAccessKey, + "secret_key": servicemocks.MockStaticSecretKey, + }, + ExpectedCredentialsValue: mockdata.MockStaticCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + }, + + "config AccessKey over ECS credentials access key": { + config: map[string]any{ + "access_key": servicemocks.MockStaticAccessKey, + "secret_key": servicemocks.MockStaticSecretKey, + }, + EnableEcsCredentialsServer: true, + ExpectedCredentialsValue: mockdata.MockStaticCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + }, + + "environment AWS_ACCESS_KEY_ID over shared credentials default aws_access_key_id": { + config: map[string]any{}, + EnvironmentVariables: map[string]string{ + "AWS_ACCESS_KEY_ID": servicemocks.MockEnvAccessKey, + "AWS_SECRET_ACCESS_KEY": servicemocks.MockEnvSecretKey, + }, + ExpectedCredentialsValue: mockdata.MockEnvCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + SharedCredentialsFile: ` +[default] +aws_access_key_id = DefaultSharedCredentialsAccessKey +aws_secret_access_key = DefaultSharedCredentialsSecretKey +`, + ValidateDiags: ExpectNoDiags, + }, + + "environment AWS_ACCESS_KEY_ID over EC2 metadata access key": { + config: map[string]any{}, + EnvironmentVariables: map[string]string{ + "AWS_ACCESS_KEY_ID": servicemocks.MockEnvAccessKey, + "AWS_SECRET_ACCESS_KEY": servicemocks.MockEnvSecretKey, + }, + ExpectedCredentialsValue: mockdata.MockEnvCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + }, + + "environment AWS_ACCESS_KEY_ID over ECS credentials access key": { + config: map[string]any{}, + EnvironmentVariables: map[string]string{ + "AWS_ACCESS_KEY_ID": servicemocks.MockEnvAccessKey, + "AWS_SECRET_ACCESS_KEY": servicemocks.MockEnvSecretKey, + }, + EnableEcsCredentialsServer: true, + ExpectedCredentialsValue: mockdata.MockEnvCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + }, + + "shared credentials default aws_access_key_id over EC2 metadata access key": { + config: map[string]any{}, + ExpectedCredentialsValue: aws.Credentials{ + AccessKeyID: "DefaultSharedCredentialsAccessKey", + SecretAccessKey: "DefaultSharedCredentialsSecretKey", + Source: "SharedConfigCredentials", + }, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + SharedCredentialsFile: ` +[default] +aws_access_key_id = DefaultSharedCredentialsAccessKey +aws_secret_access_key = DefaultSharedCredentialsSecretKey +`, + }, + + "shared credentials default aws_access_key_id over ECS credentials access key": { + config: map[string]any{}, + EnableEcsCredentialsServer: true, + ExpectedCredentialsValue: aws.Credentials{ + AccessKeyID: "DefaultSharedCredentialsAccessKey", + SecretAccessKey: "DefaultSharedCredentialsSecretKey", + Source: "SharedConfigCredentials", + }, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + SharedCredentialsFile: ` +[default] +aws_access_key_id = DefaultSharedCredentialsAccessKey +aws_secret_access_key = DefaultSharedCredentialsSecretKey +`, + }, + + "ECS credentials access key over EC2 metadata access key": { + config: map[string]any{}, + EnableEcsCredentialsServer: true, + ExpectedCredentialsValue: mockdata.MockEcsCredentialsCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + }, + + "retrieve region from shared configuration file": { + config: map[string]any{ + "access_key": servicemocks.MockStaticAccessKey, + "secret_key": servicemocks.MockStaticSecretKey, + }, + ExpectedCredentialsValue: mockdata.MockStaticCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + SharedConfigurationFile: ` +[default] +region = us-east-1 +`, + }, + + "skip EC2 Metadata API check": { + config: map[string]any{ + "skip_metadata_api_check": true, + }, + // The IMDS server must be enabled so that auth will succeed if the IMDS is called + EnableEc2MetadataServer: true, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + ValidateDiags: ExpectDiagsMatching( + tfdiags.Error, + equalsMatcher("No valid credential sources found"), + newRegexpMatcher("^Please see.+"), + ), + }, + + "invalid profile name from envvar": { + config: map[string]any{}, + EnvironmentVariables: map[string]string{ + "AWS_PROFILE": "no-such-profile", + }, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + SharedCredentialsFile: ` +[some-profile] +aws_access_key_id = DefaultSharedCredentialsAccessKey +aws_secret_access_key = DefaultSharedCredentialsSecretKey +`, + ValidateDiags: ExpectDiagsMatching( + tfdiags.Error, + equalsMatcher("failed to get shared config profile, no-such-profile"), + equalsMatcher(""), + ), + }, + + "invalid profile name from config": { + config: map[string]any{ + "profile": "no-such-profile", + }, + SharedCredentialsFile: ` +[some-profile] +aws_access_key_id = DefaultSharedCredentialsAccessKey +aws_secret_access_key = DefaultSharedCredentialsSecretKey +`, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + ValidateDiags: ExpectDiagsMatching( + tfdiags.Error, + equalsMatcher("failed to get shared config profile, no-such-profile"), + equalsMatcher(""), + ), + }, + + "AWS_ACCESS_KEY_ID overrides AWS_PROFILE": { + config: map[string]any{}, + EnvironmentVariables: map[string]string{ + "AWS_ACCESS_KEY_ID": servicemocks.MockEnvAccessKey, + "AWS_SECRET_ACCESS_KEY": servicemocks.MockEnvSecretKey, + "AWS_PROFILE": "SharedCredentialsProfile", + }, + SharedCredentialsFile: ` +[default] +aws_access_key_id = DefaultSharedCredentialsAccessKey +aws_secret_access_key = DefaultSharedCredentialsSecretKey + +[SharedCredentialsProfile] +aws_access_key_id = ProfileSharedCredentialsAccessKey +aws_secret_access_key = ProfileSharedCredentialsSecretKey +`, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + ExpectedCredentialsValue: mockdata.MockEnvCredentials, + ValidateDiags: ExpectNoDiags, + }, + + "AWS_ACCESS_KEY_ID does not override invalid profile name from envvar": { + config: map[string]any{}, + EnvironmentVariables: map[string]string{ + "AWS_ACCESS_KEY_ID": servicemocks.MockEnvAccessKey, + "AWS_SECRET_ACCESS_KEY": servicemocks.MockEnvSecretKey, + "AWS_PROFILE": "no-such-profile", + }, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + SharedCredentialsFile: ` +[some-profile] +aws_access_key_id = DefaultSharedCredentialsAccessKey +aws_secret_access_key = DefaultSharedCredentialsSecretKey +`, + ValidateDiags: ExpectDiagsMatching( + tfdiags.Error, + equalsMatcher("failed to get shared config profile, no-such-profile"), + equalsMatcher(""), + ), + }, + } + + for name, tc := range testCases { + tc := tc + + t.Run(name, func(t *testing.T) { + servicemocks.InitSessionTestEnv(t) + + // Populate required fields + tc.config["region"] = "us-east-1" + tc.config["bucket"] = "bucket" + tc.config["key"] = "key" + + if tc.ValidateDiags == nil { + tc.ValidateDiags = ExpectNoDiags + } + + if tc.EnableEc2MetadataServer { + closeEc2Metadata := servicemocks.AwsMetadataApiMock(append( + servicemocks.Ec2metadata_securityCredentialsEndpoints, + servicemocks.Ec2metadata_instanceIdEndpoint, + servicemocks.Ec2metadata_iamInfoEndpoint, + )) + defer closeEc2Metadata() + } + + if tc.EnableEcsCredentialsServer { + closeEcsCredentials := servicemocks.EcsCredentialsApiMock() + defer closeEcsCredentials() + } + + if tc.EnableWebIdentityEnvVars /*|| tc.EnableWebIdentityConfig*/ { + file, err := os.CreateTemp("", "aws-sdk-go-base-web-identity-token-file") + if err != nil { + t.Fatalf("unexpected error creating temporary web identity token file: %s", err) + } + + defer os.Remove(file.Name()) + + err = os.WriteFile(file.Name(), []byte(servicemocks.MockWebIdentityToken), 0600) + + if err != nil { + t.Fatalf("unexpected error writing web identity token file: %s", err) + } + + if tc.EnableWebIdentityEnvVars { + t.Setenv("AWS_ROLE_ARN", servicemocks.MockStsAssumeRoleWithWebIdentityArn) + t.Setenv("AWS_ROLE_SESSION_NAME", servicemocks.MockStsAssumeRoleWithWebIdentitySessionName) + t.Setenv("AWS_WEB_IDENTITY_TOKEN_FILE", file.Name()) + } /*else if tc.EnableWebIdentityConfig { + tc.Config.AssumeRoleWithWebIdentity = &AssumeRoleWithWebIdentity{ + RoleARN: servicemocks.MockStsAssumeRoleWithWebIdentityArn, + SessionName: servicemocks.MockStsAssumeRoleWithWebIdentitySessionName, + WebIdentityTokenFile: file.Name(), + } + }*/ + } + + ts := servicemocks.MockAwsApiServer("STS", tc.MockStsEndpoints) + defer ts.Close() + + tc.config["sts_endpoint"] = ts.URL + + if tc.SharedConfigurationFile != "" { + file, err := os.CreateTemp("", "aws-sdk-go-base-shared-configuration-file") + + if err != nil { + t.Fatalf("unexpected error creating temporary shared configuration file: %s", err) + } + + defer os.Remove(file.Name()) + + err = os.WriteFile(file.Name(), []byte(tc.SharedConfigurationFile), 0600) + + if err != nil { + t.Fatalf("unexpected error writing shared configuration file: %s", err) + } + + setSharedConfigFile(t, file.Name()) + } + + if tc.SharedCredentialsFile != "" { + file, err := os.CreateTemp("", "aws-sdk-go-base-shared-credentials-file") + + if err != nil { + t.Fatalf("unexpected error creating temporary shared credentials file: %s", err) + } + + defer os.Remove(file.Name()) + + err = os.WriteFile(file.Name(), []byte(tc.SharedCredentialsFile), 0600) + + if err != nil { + t.Fatalf("unexpected error writing shared credentials file: %s", err) + } + + tc.config["shared_credentials_files"] = []any{file.Name()} + if tc.ExpectedCredentialsValue.Source == "SharedConfigCredentials" { + tc.ExpectedCredentialsValue.Source = fmt.Sprintf("SharedConfigCredentials: %s", file.Name()) + } + + tc.config["shared_config_files"] = []any{file.Name()} + } + + for k, v := range tc.EnvironmentVariables { + t.Setenv(k, v) + } + + b, diags := configureBackend(t, tc.config) + + tc.ValidateDiags(t, diags) + + if diags.HasErrors() { + return + } + + credentials, err := b.awsConfig.Credentials.Retrieve(context.TODO()) + if err != nil { + t.Fatalf("Error when requesting credentials: %s", err) + } + + if diff := cmp.Diff(credentials, tc.ExpectedCredentialsValue, cmpopts.IgnoreFields(aws.Credentials{}, "Expires")); diff != "" { + t.Fatalf("unexpected credentials: (- got, + expected)\n%s", diff) + } + }) + } +} +func TestBackendConfig_Authentication_AssumeRoleInline(t *testing.T) { + testCases := map[string]struct { + config map[string]any + EnableEc2MetadataServer bool + EnableEcsCredentialsServer bool + EnvironmentVariables map[string]string + ExpectedCredentialsValue aws.Credentials + MockStsEndpoints []*servicemocks.MockEndpoint + SharedConfigurationFile string + SharedCredentialsFile string + ValidateDiags diagsValidator + }{ + "from config access_key": { + config: map[string]any{ + "access_key": servicemocks.MockStaticAccessKey, + "secret_key": servicemocks.MockStaticSecretKey, + "role_arn": servicemocks.MockStsAssumeRoleArn, + "session_name": servicemocks.MockStsAssumeRoleSessionName, + }, + ExpectedCredentialsValue: mockdata.MockStsAssumeRoleCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsAssumeRoleValidEndpoint, + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + ValidateDiags: ExpectDiagsMatching( + tfdiags.Warning, + equalsMatcher("Deprecated Parameters"), + noopMatcher{}, + ), + }, + + "from environment AWS_ACCESS_KEY_ID": { + config: map[string]any{ + "role_arn": servicemocks.MockStsAssumeRoleArn, + "session_name": servicemocks.MockStsAssumeRoleSessionName, + }, + EnvironmentVariables: map[string]string{ + "AWS_ACCESS_KEY_ID": servicemocks.MockEnvAccessKey, + "AWS_SECRET_ACCESS_KEY": servicemocks.MockEnvSecretKey, + }, + ExpectedCredentialsValue: mockdata.MockStsAssumeRoleCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsAssumeRoleValidEndpoint, + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + ValidateDiags: ExpectDiagsMatching( + tfdiags.Warning, + equalsMatcher("Deprecated Parameters"), + noopMatcher{}, + ), + }, + + "from config Profile with Ec2InstanceMetadata source": { + config: map[string]any{ + "profile": "SharedConfigurationProfile", + }, + EnableEc2MetadataServer: true, + ExpectedCredentialsValue: mockdata.MockStsAssumeRoleCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsAssumeRoleValidEndpoint, + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + SharedConfigurationFile: fmt.Sprintf(` +[profile SharedConfigurationProfile] +credential_source = Ec2InstanceMetadata +role_arn = %[1]s +role_session_name = %[2]s +`, servicemocks.MockStsAssumeRoleArn, servicemocks.MockStsAssumeRoleSessionName), + }, + + "from environment AWS_PROFILE with Ec2InstanceMetadata source": { + config: map[string]any{}, + EnableEc2MetadataServer: true, + EnvironmentVariables: map[string]string{ + "AWS_PROFILE": "SharedConfigurationProfile", + }, + ExpectedCredentialsValue: mockdata.MockStsAssumeRoleCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsAssumeRoleValidEndpoint, + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + SharedConfigurationFile: fmt.Sprintf(` +[profile SharedConfigurationProfile] +credential_source = Ec2InstanceMetadata +role_arn = %[1]s +role_session_name = %[2]s +`, servicemocks.MockStsAssumeRoleArn, servicemocks.MockStsAssumeRoleSessionName), + }, + + "from config Profile with source profile": { + config: map[string]any{ + "profile": "SharedConfigurationProfile", + }, + ExpectedCredentialsValue: mockdata.MockStsAssumeRoleCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsAssumeRoleValidEndpoint, + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + SharedConfigurationFile: fmt.Sprintf(` +[profile SharedConfigurationProfile] +role_arn = %[1]s +role_session_name = %[2]s +source_profile = SharedConfigurationSourceProfile + +[profile SharedConfigurationSourceProfile] +aws_access_key_id = SharedConfigurationSourceAccessKey +aws_secret_access_key = SharedConfigurationSourceSecretKey +`, servicemocks.MockStsAssumeRoleArn, servicemocks.MockStsAssumeRoleSessionName), + }, + + "from environment AWS_PROFILE with source profile": { + config: map[string]any{}, + EnvironmentVariables: map[string]string{ + "AWS_PROFILE": "SharedConfigurationProfile", + }, + ExpectedCredentialsValue: mockdata.MockStsAssumeRoleCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsAssumeRoleValidEndpoint, + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + SharedConfigurationFile: fmt.Sprintf(` +[profile SharedConfigurationProfile] +role_arn = %[1]s +role_session_name = %[2]s +source_profile = SharedConfigurationSourceProfile + +[profile SharedConfigurationSourceProfile] +aws_access_key_id = SharedConfigurationSourceAccessKey +aws_secret_access_key = SharedConfigurationSourceSecretKey +`, servicemocks.MockStsAssumeRoleArn, servicemocks.MockStsAssumeRoleSessionName), + }, + + "from default profile": { + config: map[string]any{ + "role_arn": servicemocks.MockStsAssumeRoleArn, + "session_name": servicemocks.MockStsAssumeRoleSessionName, + }, + ExpectedCredentialsValue: mockdata.MockStsAssumeRoleCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsAssumeRoleValidEndpoint, + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + SharedCredentialsFile: ` +[default] +aws_access_key_id = DefaultSharedCredentialsAccessKey +aws_secret_access_key = DefaultSharedCredentialsSecretKey +`, + ValidateDiags: ExpectDiagsMatching( + tfdiags.Warning, + equalsMatcher("Deprecated Parameters"), + noopMatcher{}, + ), + }, + + "from EC2 metadata": { + config: map[string]any{ + "role_arn": servicemocks.MockStsAssumeRoleArn, + "session_name": servicemocks.MockStsAssumeRoleSessionName, + }, + EnableEc2MetadataServer: true, + ExpectedCredentialsValue: mockdata.MockStsAssumeRoleCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsAssumeRoleValidEndpoint, + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + ValidateDiags: ExpectDiagsMatching( + tfdiags.Warning, + equalsMatcher("Deprecated Parameters"), + noopMatcher{}, + ), + }, + + "from ECS credentials": { + config: map[string]any{ + "role_arn": servicemocks.MockStsAssumeRoleArn, + "session_name": servicemocks.MockStsAssumeRoleSessionName, + }, + EnableEcsCredentialsServer: true, + ExpectedCredentialsValue: mockdata.MockStsAssumeRoleCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsAssumeRoleValidEndpoint, + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + ValidateDiags: ExpectDiagsMatching( + tfdiags.Warning, + equalsMatcher("Deprecated Parameters"), + noopMatcher{}, + ), + }, + + "with duration": { + config: map[string]any{ + "access_key": servicemocks.MockStaticAccessKey, + "secret_key": servicemocks.MockStaticSecretKey, + "role_arn": servicemocks.MockStsAssumeRoleArn, + "session_name": servicemocks.MockStsAssumeRoleSessionName, + "assume_role_duration_seconds": 3600, + }, + ExpectedCredentialsValue: mockdata.MockStsAssumeRoleCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsAssumeRoleValidEndpointWithOptions(map[string]string{"DurationSeconds": "3600"}), + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + ValidateDiags: ExpectDiagsMatching( + tfdiags.Warning, + equalsMatcher("Deprecated Parameters"), + noopMatcher{}, + ), + }, + + "with external ID": { + config: map[string]any{ + "access_key": servicemocks.MockStaticAccessKey, + "secret_key": servicemocks.MockStaticSecretKey, + "role_arn": servicemocks.MockStsAssumeRoleArn, + "session_name": servicemocks.MockStsAssumeRoleSessionName, + "external_id": servicemocks.MockStsAssumeRoleExternalId, + }, + ExpectedCredentialsValue: mockdata.MockStsAssumeRoleCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsAssumeRoleValidEndpointWithOptions(map[string]string{"ExternalId": servicemocks.MockStsAssumeRoleExternalId}), + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + ValidateDiags: ExpectDiagsMatching( + tfdiags.Warning, + equalsMatcher("Deprecated Parameters"), + noopMatcher{}, + ), + }, + + "with policy": { + config: map[string]any{ + "access_key": servicemocks.MockStaticAccessKey, + "secret_key": servicemocks.MockStaticSecretKey, + "role_arn": servicemocks.MockStsAssumeRoleArn, + "session_name": servicemocks.MockStsAssumeRoleSessionName, + "assume_role_policy": mockStsAssumeRolePolicy, + }, + ExpectedCredentialsValue: mockdata.MockStsAssumeRoleCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsAssumeRoleValidEndpointWithOptions(map[string]string{"Policy": mockStsAssumeRolePolicy}), + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + ValidateDiags: ExpectDiagsMatching( + tfdiags.Warning, + equalsMatcher("Deprecated Parameters"), + noopMatcher{}, + ), + }, + + "with policy ARNs": { + config: map[string]any{ + "access_key": servicemocks.MockStaticAccessKey, + "secret_key": servicemocks.MockStaticSecretKey, + "role_arn": servicemocks.MockStsAssumeRoleArn, + "session_name": servicemocks.MockStsAssumeRoleSessionName, + "assume_role_policy_arns": []any{servicemocks.MockStsAssumeRolePolicyArn}, + }, + ExpectedCredentialsValue: mockdata.MockStsAssumeRoleCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsAssumeRoleValidEndpointWithOptions(map[string]string{"PolicyArns.member.1.arn": servicemocks.MockStsAssumeRolePolicyArn}), + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + ValidateDiags: ExpectDiagsMatching( + tfdiags.Warning, + equalsMatcher("Deprecated Parameters"), + noopMatcher{}, + ), + }, + + "with tags": { + config: map[string]any{ + "access_key": servicemocks.MockStaticAccessKey, + "secret_key": servicemocks.MockStaticSecretKey, + "role_arn": servicemocks.MockStsAssumeRoleArn, + "session_name": servicemocks.MockStsAssumeRoleSessionName, + "assume_role_tags": map[string]any{ + servicemocks.MockStsAssumeRoleTagKey: servicemocks.MockStsAssumeRoleTagValue, + }, + }, + ExpectedCredentialsValue: mockdata.MockStsAssumeRoleCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsAssumeRoleValidEndpointWithOptions(map[string]string{"Tags.member.1.Key": servicemocks.MockStsAssumeRoleTagKey, "Tags.member.1.Value": servicemocks.MockStsAssumeRoleTagValue}), + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + ValidateDiags: ExpectDiagsMatching( + tfdiags.Warning, + equalsMatcher("Deprecated Parameters"), + noopMatcher{}, + ), + }, + + "with transitive tags": { + config: map[string]any{ + "access_key": servicemocks.MockStaticAccessKey, + "secret_key": servicemocks.MockStaticSecretKey, + "role_arn": servicemocks.MockStsAssumeRoleArn, + "session_name": servicemocks.MockStsAssumeRoleSessionName, + "assume_role_tags": map[string]any{ + servicemocks.MockStsAssumeRoleTagKey: servicemocks.MockStsAssumeRoleTagValue, + }, + "assume_role_transitive_tag_keys": []any{servicemocks.MockStsAssumeRoleTagKey}, + }, + ExpectedCredentialsValue: mockdata.MockStsAssumeRoleCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsAssumeRoleValidEndpointWithOptions(map[string]string{"Tags.member.1.Key": servicemocks.MockStsAssumeRoleTagKey, "Tags.member.1.Value": servicemocks.MockStsAssumeRoleTagValue, "TransitiveTagKeys.member.1": servicemocks.MockStsAssumeRoleTagKey}), + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + ValidateDiags: ExpectDiagsMatching( + tfdiags.Warning, + equalsMatcher("Deprecated Parameters"), + noopMatcher{}, + ), + }, + + "error": { + config: map[string]any{ + "access_key": servicemocks.MockStaticAccessKey, + "secret_key": servicemocks.MockStaticSecretKey, + "role_arn": servicemocks.MockStsAssumeRoleArn, + "session_name": servicemocks.MockStsAssumeRoleSessionName, + }, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsAssumeRoleInvalidEndpointInvalidClientTokenId, + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + ValidateDiags: ExpectMultipleDiags( + ExpectDiagMatching( + tfdiags.Warning, + equalsMatcher("Deprecated Parameters"), + noopMatcher{}, + ), + ExpectDiagMatching( + tfdiags.Error, + equalsMatcher("Cannot assume IAM Role"), + noopMatcher{}, + ), + ), + }, + } + + for name, tc := range testCases { + tc := tc + + t.Run(name, func(t *testing.T) { + servicemocks.InitSessionTestEnv(t) + + ctx := context.TODO() + + // Populate required fields + tc.config["region"] = "us-east-1" + tc.config["bucket"] = "bucket" + tc.config["key"] = "key" + + if tc.ValidateDiags == nil { + tc.ValidateDiags = ExpectNoDiags + } + + if tc.EnableEc2MetadataServer { + closeEc2Metadata := servicemocks.AwsMetadataApiMock(append( + servicemocks.Ec2metadata_securityCredentialsEndpoints, + servicemocks.Ec2metadata_instanceIdEndpoint, + servicemocks.Ec2metadata_iamInfoEndpoint, + )) + defer closeEc2Metadata() + } + + if tc.EnableEcsCredentialsServer { + closeEcsCredentials := servicemocks.EcsCredentialsApiMock() + defer closeEcsCredentials() + } + + ts := servicemocks.MockAwsApiServer("STS", tc.MockStsEndpoints) + defer ts.Close() + + tc.config["sts_endpoint"] = ts.URL + + if tc.SharedConfigurationFile != "" { + file, err := os.CreateTemp("", "aws-sdk-go-base-shared-configuration-file") + + if err != nil { + t.Fatalf("unexpected error creating temporary shared configuration file: %s", err) + } + + defer os.Remove(file.Name()) + + err = os.WriteFile(file.Name(), []byte(tc.SharedConfigurationFile), 0600) + + if err != nil { + t.Fatalf("unexpected error writing shared configuration file: %s", err) + } + + setSharedConfigFile(t, file.Name()) + } + + if tc.SharedCredentialsFile != "" { + file, err := os.CreateTemp("", "aws-sdk-go-base-shared-credentials-file") + + if err != nil { + t.Fatalf("unexpected error creating temporary shared credentials file: %s", err) + } + + defer os.Remove(file.Name()) + + err = os.WriteFile(file.Name(), []byte(tc.SharedCredentialsFile), 0600) + + if err != nil { + t.Fatalf("unexpected error writing shared credentials file: %s", err) + } + + tc.config["shared_credentials_files"] = []any{file.Name()} + if tc.ExpectedCredentialsValue.Source == "SharedConfigCredentials" { + tc.ExpectedCredentialsValue.Source = fmt.Sprintf("SharedConfigCredentials: %s", file.Name()) + } + } + + for k, v := range tc.EnvironmentVariables { + t.Setenv(k, v) + } + + b, diags := configureBackend(t, tc.config) + + tc.ValidateDiags(t, diags) + + if diags.HasErrors() { + return + } + + credentials, err := b.awsConfig.Credentials.Retrieve(ctx) + if err != nil { + t.Fatalf("Error when requesting credentials: %s", err) + } + + if diff := cmp.Diff(credentials, tc.ExpectedCredentialsValue, cmpopts.IgnoreFields(aws.Credentials{}, "Expires")); diff != "" { + t.Fatalf("unexpected credentials: (- got, + expected)\n%s", diff) + } + }) + } +} + +func TestBackendConfig_Authentication_AssumeRoleNested(t *testing.T) { + testCases := map[string]struct { + config map[string]any + EnableEc2MetadataServer bool + EnableEcsCredentialsServer bool + EnvironmentVariables map[string]string + ExpectedCredentialsValue aws.Credentials + MockStsEndpoints []*servicemocks.MockEndpoint + SharedConfigurationFile string + SharedCredentialsFile string + ValidateDiags diagsValidator + }{ + "from config access_key": { + config: map[string]any{ + "access_key": servicemocks.MockStaticAccessKey, + "secret_key": servicemocks.MockStaticSecretKey, + "assume_role": map[string]any{ + "role_arn": servicemocks.MockStsAssumeRoleArn, + "session_name": servicemocks.MockStsAssumeRoleSessionName, + }, + }, + ExpectedCredentialsValue: mockdata.MockStsAssumeRoleCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsAssumeRoleValidEndpoint, + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + }, + + "from environment AWS_ACCESS_KEY_ID": { + config: map[string]any{ + "assume_role": map[string]any{ + "role_arn": servicemocks.MockStsAssumeRoleArn, + "session_name": servicemocks.MockStsAssumeRoleSessionName, + }, + }, + EnvironmentVariables: map[string]string{ + "AWS_ACCESS_KEY_ID": servicemocks.MockEnvAccessKey, + "AWS_SECRET_ACCESS_KEY": servicemocks.MockEnvSecretKey, + }, + ExpectedCredentialsValue: mockdata.MockStsAssumeRoleCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsAssumeRoleValidEndpoint, + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + }, + + "from config Profile with Ec2InstanceMetadata source": { + config: map[string]any{ + "profile": "SharedConfigurationProfile", + }, + EnableEc2MetadataServer: true, + ExpectedCredentialsValue: mockdata.MockStsAssumeRoleCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsAssumeRoleValidEndpoint, + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + SharedConfigurationFile: fmt.Sprintf(` +[profile SharedConfigurationProfile] +credential_source = Ec2InstanceMetadata +role_arn = %[1]s +role_session_name = %[2]s +`, servicemocks.MockStsAssumeRoleArn, servicemocks.MockStsAssumeRoleSessionName), + }, + + "from environment AWS_PROFILE with Ec2InstanceMetadata source": { + config: map[string]any{}, + EnableEc2MetadataServer: true, + EnvironmentVariables: map[string]string{ + "AWS_PROFILE": "SharedConfigurationProfile", + }, + ExpectedCredentialsValue: mockdata.MockStsAssumeRoleCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsAssumeRoleValidEndpoint, + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + SharedConfigurationFile: fmt.Sprintf(` +[profile SharedConfigurationProfile] +credential_source = Ec2InstanceMetadata +role_arn = %[1]s +role_session_name = %[2]s +`, servicemocks.MockStsAssumeRoleArn, servicemocks.MockStsAssumeRoleSessionName), + }, + + "from config Profile with source profile": { + config: map[string]any{ + "profile": "SharedConfigurationProfile", + }, + ExpectedCredentialsValue: mockdata.MockStsAssumeRoleCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsAssumeRoleValidEndpoint, + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + SharedConfigurationFile: fmt.Sprintf(` +[profile SharedConfigurationProfile] +role_arn = %[1]s +role_session_name = %[2]s +source_profile = SharedConfigurationSourceProfile + +[profile SharedConfigurationSourceProfile] +aws_access_key_id = SharedConfigurationSourceAccessKey +aws_secret_access_key = SharedConfigurationSourceSecretKey +`, servicemocks.MockStsAssumeRoleArn, servicemocks.MockStsAssumeRoleSessionName), + }, + + "from environment AWS_PROFILE with source profile": { + config: map[string]any{}, + EnvironmentVariables: map[string]string{ + "AWS_PROFILE": "SharedConfigurationProfile", + }, + ExpectedCredentialsValue: mockdata.MockStsAssumeRoleCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsAssumeRoleValidEndpoint, + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + SharedConfigurationFile: fmt.Sprintf(` +[profile SharedConfigurationProfile] +role_arn = %[1]s +role_session_name = %[2]s +source_profile = SharedConfigurationSourceProfile + +[profile SharedConfigurationSourceProfile] +aws_access_key_id = SharedConfigurationSourceAccessKey +aws_secret_access_key = SharedConfigurationSourceSecretKey +`, servicemocks.MockStsAssumeRoleArn, servicemocks.MockStsAssumeRoleSessionName), + }, + + "from default profile": { + config: map[string]any{ + "assume_role": map[string]any{ + "role_arn": servicemocks.MockStsAssumeRoleArn, + "session_name": servicemocks.MockStsAssumeRoleSessionName, + }, + }, + ExpectedCredentialsValue: mockdata.MockStsAssumeRoleCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsAssumeRoleValidEndpoint, + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + SharedCredentialsFile: ` +[default] +aws_access_key_id = DefaultSharedCredentialsAccessKey +aws_secret_access_key = DefaultSharedCredentialsSecretKey +`, + }, + + "from EC2 metadata": { + config: map[string]any{ + "assume_role": map[string]any{ + "role_arn": servicemocks.MockStsAssumeRoleArn, + "session_name": servicemocks.MockStsAssumeRoleSessionName, + }, + }, + EnableEc2MetadataServer: true, + ExpectedCredentialsValue: mockdata.MockStsAssumeRoleCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsAssumeRoleValidEndpoint, + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + }, + + "from ECS credentials": { + config: map[string]any{ + "assume_role": map[string]any{ + "role_arn": servicemocks.MockStsAssumeRoleArn, + "session_name": servicemocks.MockStsAssumeRoleSessionName, + }, + }, + EnableEcsCredentialsServer: true, + ExpectedCredentialsValue: mockdata.MockStsAssumeRoleCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsAssumeRoleValidEndpoint, + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + }, + + "with duration": { + config: map[string]any{ + "access_key": servicemocks.MockStaticAccessKey, + "secret_key": servicemocks.MockStaticSecretKey, + "assume_role": map[string]any{ + "role_arn": servicemocks.MockStsAssumeRoleArn, + "session_name": servicemocks.MockStsAssumeRoleSessionName, + "duration": "1h", + }, + }, + ExpectedCredentialsValue: mockdata.MockStsAssumeRoleCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsAssumeRoleValidEndpointWithOptions(map[string]string{"DurationSeconds": "3600"}), + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + }, + + "with external ID": { + config: map[string]any{ + "access_key": servicemocks.MockStaticAccessKey, + "secret_key": servicemocks.MockStaticSecretKey, + "assume_role": map[string]any{ + "role_arn": servicemocks.MockStsAssumeRoleArn, + "session_name": servicemocks.MockStsAssumeRoleSessionName, + "external_id": servicemocks.MockStsAssumeRoleExternalId, + }, + }, + ExpectedCredentialsValue: mockdata.MockStsAssumeRoleCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsAssumeRoleValidEndpointWithOptions(map[string]string{"ExternalId": servicemocks.MockStsAssumeRoleExternalId}), + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + }, + + "with policy": { + config: map[string]any{ + "access_key": servicemocks.MockStaticAccessKey, + "secret_key": servicemocks.MockStaticSecretKey, + "assume_role": map[string]any{ + "role_arn": servicemocks.MockStsAssumeRoleArn, + "session_name": servicemocks.MockStsAssumeRoleSessionName, + "policy": mockStsAssumeRolePolicy, + }, + }, + ExpectedCredentialsValue: mockdata.MockStsAssumeRoleCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsAssumeRoleValidEndpointWithOptions(map[string]string{"Policy": mockStsAssumeRolePolicy}), + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + }, + + "with policy ARNs": { + config: map[string]any{ + "access_key": servicemocks.MockStaticAccessKey, + "secret_key": servicemocks.MockStaticSecretKey, + "assume_role": map[string]any{ + "role_arn": servicemocks.MockStsAssumeRoleArn, + "session_name": servicemocks.MockStsAssumeRoleSessionName, + "policy_arns": []any{servicemocks.MockStsAssumeRolePolicyArn}, + }, + }, + ExpectedCredentialsValue: mockdata.MockStsAssumeRoleCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsAssumeRoleValidEndpointWithOptions(map[string]string{"PolicyArns.member.1.arn": servicemocks.MockStsAssumeRolePolicyArn}), + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + }, + + "with tags": { + config: map[string]any{ + "access_key": servicemocks.MockStaticAccessKey, + "secret_key": servicemocks.MockStaticSecretKey, + "assume_role": map[string]any{ + "role_arn": servicemocks.MockStsAssumeRoleArn, + "session_name": servicemocks.MockStsAssumeRoleSessionName, + "tags": map[string]any{ + servicemocks.MockStsAssumeRoleTagKey: servicemocks.MockStsAssumeRoleTagValue, + }, + }, + }, + ExpectedCredentialsValue: mockdata.MockStsAssumeRoleCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsAssumeRoleValidEndpointWithOptions(map[string]string{"Tags.member.1.Key": servicemocks.MockStsAssumeRoleTagKey, "Tags.member.1.Value": servicemocks.MockStsAssumeRoleTagValue}), + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + }, + + "with transitive tags": { + config: map[string]any{ + "access_key": servicemocks.MockStaticAccessKey, + "secret_key": servicemocks.MockStaticSecretKey, + "assume_role": map[string]any{ + "role_arn": servicemocks.MockStsAssumeRoleArn, + "session_name": servicemocks.MockStsAssumeRoleSessionName, + "tags": map[string]any{ + servicemocks.MockStsAssumeRoleTagKey: servicemocks.MockStsAssumeRoleTagValue, + }, + "transitive_tag_keys": []any{servicemocks.MockStsAssumeRoleTagKey}, + }, + }, + ExpectedCredentialsValue: mockdata.MockStsAssumeRoleCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsAssumeRoleValidEndpointWithOptions(map[string]string{"Tags.member.1.Key": servicemocks.MockStsAssumeRoleTagKey, "Tags.member.1.Value": servicemocks.MockStsAssumeRoleTagValue, "TransitiveTagKeys.member.1": servicemocks.MockStsAssumeRoleTagKey}), + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + }, + + "error": { + config: map[string]any{ + "access_key": servicemocks.MockStaticAccessKey, + "secret_key": servicemocks.MockStaticSecretKey, + "assume_role": map[string]any{ + "role_arn": servicemocks.MockStsAssumeRoleArn, + "session_name": servicemocks.MockStsAssumeRoleSessionName, + }, + }, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsAssumeRoleInvalidEndpointInvalidClientTokenId, + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }, + ValidateDiags: ExpectDiagsMatching( + tfdiags.Error, + equalsMatcher("Cannot assume IAM Role"), + noopMatcher{}, + ), + }, + } + + for name, tc := range testCases { + tc := tc + + t.Run(name, func(t *testing.T) { + servicemocks.InitSessionTestEnv(t) + + ctx := context.TODO() + + // Populate required fields + tc.config["region"] = "us-east-1" + tc.config["bucket"] = "bucket" + tc.config["key"] = "key" + + if tc.ValidateDiags == nil { + tc.ValidateDiags = ExpectNoDiags + } + + if tc.EnableEc2MetadataServer { + closeEc2Metadata := servicemocks.AwsMetadataApiMock(append( + servicemocks.Ec2metadata_securityCredentialsEndpoints, + servicemocks.Ec2metadata_instanceIdEndpoint, + servicemocks.Ec2metadata_iamInfoEndpoint, + )) + defer closeEc2Metadata() + } + + if tc.EnableEcsCredentialsServer { + closeEcsCredentials := servicemocks.EcsCredentialsApiMock() + defer closeEcsCredentials() + } + + ts := servicemocks.MockAwsApiServer("STS", tc.MockStsEndpoints) + defer ts.Close() + + tc.config["sts_endpoint"] = ts.URL + + if tc.SharedConfigurationFile != "" { + file, err := os.CreateTemp("", "aws-sdk-go-base-shared-configuration-file") + + if err != nil { + t.Fatalf("unexpected error creating temporary shared configuration file: %s", err) + } + + defer os.Remove(file.Name()) + + err = os.WriteFile(file.Name(), []byte(tc.SharedConfigurationFile), 0600) + + if err != nil { + t.Fatalf("unexpected error writing shared configuration file: %s", err) + } + + setSharedConfigFile(t, file.Name()) + } + + if tc.SharedCredentialsFile != "" { + file, err := os.CreateTemp("", "aws-sdk-go-base-shared-credentials-file") + + if err != nil { + t.Fatalf("unexpected error creating temporary shared credentials file: %s", err) + } + + defer os.Remove(file.Name()) + + err = os.WriteFile(file.Name(), []byte(tc.SharedCredentialsFile), 0600) + + if err != nil { + t.Fatalf("unexpected error writing shared credentials file: %s", err) + } + + tc.config["shared_credentials_files"] = []any{file.Name()} + if tc.ExpectedCredentialsValue.Source == "SharedConfigCredentials" { + tc.ExpectedCredentialsValue.Source = fmt.Sprintf("SharedConfigCredentials: %s", file.Name()) + } + + tc.config["shared_config_files"] = []any{file.Name()} + } + + for k, v := range tc.EnvironmentVariables { + t.Setenv(k, v) + } + + b, diags := configureBackend(t, tc.config) + + tc.ValidateDiags(t, diags) + + if diags.HasErrors() { + return + } + + credentials, err := b.awsConfig.Credentials.Retrieve(ctx) + if err != nil { + t.Fatalf("Error when requesting credentials: %s", err) + } + + if diff := cmp.Diff(credentials, tc.ExpectedCredentialsValue, cmpopts.IgnoreFields(aws.Credentials{}, "Expires")); diff != "" { + t.Fatalf("unexpected credentials: (- got, + expected)\n%s", diff) + } + }) + } +} + +func TestBackendConfig_Authentication_AssumeRoleWithWebIdentity(t *testing.T) { + testCases := map[string]struct { + config map[string]any + SetConfig bool + ExpandEnvVars bool + EnvironmentVariables map[string]string + SetTokenFileEnvironmentVariable bool + SharedConfigurationFile string + SetSharedConfigurationFile bool + ExpectedCredentialsValue aws.Credentials + ValidateDiags diagsValidator + MockStsEndpoints []*servicemocks.MockEndpoint + }{ + "config with inline token": { + config: map[string]any{ + "assume_role_with_web_identity": map[string]any{ + "role_arn": servicemocks.MockStsAssumeRoleWithWebIdentityArn, + "session_name": servicemocks.MockStsAssumeRoleWithWebIdentitySessionName, + "web_identity_token": servicemocks.MockWebIdentityToken, + }, + }, + ExpectedCredentialsValue: mockdata.MockStsAssumeRoleWithWebIdentityCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsAssumeRoleWithWebIdentityValidEndpoint, + }, + }, + + "config with token file": { + config: map[string]any{ + "assume_role_with_web_identity": map[string]any{ + "role_arn": servicemocks.MockStsAssumeRoleWithWebIdentityArn, + "session_name": servicemocks.MockStsAssumeRoleWithWebIdentitySessionName, + }, + }, + SetConfig: true, + ExpectedCredentialsValue: mockdata.MockStsAssumeRoleWithWebIdentityCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsAssumeRoleWithWebIdentityValidEndpoint, + }, + }, + + "config with expanded path": { + config: map[string]any{ + "assume_role_with_web_identity": map[string]any{ + "role_arn": servicemocks.MockStsAssumeRoleWithWebIdentityArn, + "session_name": servicemocks.MockStsAssumeRoleWithWebIdentitySessionName, + }, + }, + SetConfig: true, + ExpandEnvVars: true, + ExpectedCredentialsValue: mockdata.MockStsAssumeRoleWithWebIdentityCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsAssumeRoleWithWebIdentityValidEndpoint, + }, + }, + + "envvar": { + config: map[string]any{}, + EnvironmentVariables: map[string]string{ + "AWS_ROLE_ARN": servicemocks.MockStsAssumeRoleWithWebIdentityArn, + "AWS_ROLE_SESSION_NAME": servicemocks.MockStsAssumeRoleWithWebIdentitySessionName, + }, + SetTokenFileEnvironmentVariable: true, + ExpectedCredentialsValue: mockdata.MockStsAssumeRoleWithWebIdentityCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsAssumeRoleWithWebIdentityValidEndpoint, + }, + }, + + "shared configuration file": { + config: map[string]any{}, + SharedConfigurationFile: fmt.Sprintf(` +[default] +role_arn = %[1]s +role_session_name = %[2]s +`, servicemocks.MockStsAssumeRoleWithWebIdentityArn, servicemocks.MockStsAssumeRoleWithWebIdentitySessionName), + SetSharedConfigurationFile: true, + ExpectedCredentialsValue: mockdata.MockStsAssumeRoleWithWebIdentityCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsAssumeRoleWithWebIdentityValidEndpoint, + }, + }, + + "config overrides envvar": { + config: map[string]any{ + "assume_role_with_web_identity": map[string]any{ + "role_arn": servicemocks.MockStsAssumeRoleWithWebIdentityArn, + "session_name": servicemocks.MockStsAssumeRoleWithWebIdentitySessionName, + "web_identity_token": servicemocks.MockWebIdentityToken, + }, + }, + EnvironmentVariables: map[string]string{ + "AWS_ROLE_ARN": servicemocks.MockStsAssumeRoleWithWebIdentityAlternateArn, + "AWS_ROLE_SESSION_NAME": servicemocks.MockStsAssumeRoleWithWebIdentityAlternateSessionName, + "AWS_WEB_IDENTITY_TOKEN_FILE": "no-such-file", + }, + ExpectedCredentialsValue: mockdata.MockStsAssumeRoleWithWebIdentityCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsAssumeRoleWithWebIdentityValidEndpoint, + }, + }, + + "envvar overrides shared configuration": { + config: map[string]any{}, + EnvironmentVariables: map[string]string{ + "AWS_ROLE_ARN": servicemocks.MockStsAssumeRoleWithWebIdentityArn, + "AWS_ROLE_SESSION_NAME": servicemocks.MockStsAssumeRoleWithWebIdentitySessionName, + }, + SetTokenFileEnvironmentVariable: true, + SharedConfigurationFile: fmt.Sprintf(` +[default] +role_arn = %[1]s +role_session_name = %[2]s +web_identity_token_file = no-such-file +`, servicemocks.MockStsAssumeRoleWithWebIdentityAlternateArn, servicemocks.MockStsAssumeRoleWithWebIdentityAlternateSessionName), + ExpectedCredentialsValue: mockdata.MockStsAssumeRoleWithWebIdentityCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsAssumeRoleWithWebIdentityValidEndpoint, + }, + }, + + "config overrides shared configuration": { + config: map[string]any{ + "assume_role_with_web_identity": map[string]any{ + "role_arn": servicemocks.MockStsAssumeRoleWithWebIdentityArn, + "session_name": servicemocks.MockStsAssumeRoleWithWebIdentitySessionName, + "web_identity_token": servicemocks.MockWebIdentityToken, + }, + }, + SharedConfigurationFile: fmt.Sprintf(` +[default] +role_arn = %[1]s +role_session_name = %[2]s +web_identity_token_file = no-such-file +`, servicemocks.MockStsAssumeRoleWithWebIdentityAlternateArn, servicemocks.MockStsAssumeRoleWithWebIdentityAlternateSessionName), + ExpectedCredentialsValue: mockdata.MockStsAssumeRoleWithWebIdentityCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsAssumeRoleWithWebIdentityValidEndpoint, + }, + }, + + "with duration": { + config: map[string]any{ + "assume_role_with_web_identity": map[string]any{ + "role_arn": servicemocks.MockStsAssumeRoleWithWebIdentityArn, + "session_name": servicemocks.MockStsAssumeRoleWithWebIdentitySessionName, + "web_identity_token": servicemocks.MockWebIdentityToken, + "duration": "1h", + }, + }, + ExpectedCredentialsValue: mockdata.MockStsAssumeRoleWithWebIdentityCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsAssumeRoleWithWebIdentityValidWithOptions(map[string]string{"DurationSeconds": "3600"}), + }, + }, + + "with policy": { + config: map[string]any{ + "assume_role_with_web_identity": map[string]any{ + "role_arn": servicemocks.MockStsAssumeRoleWithWebIdentityArn, + "session_name": servicemocks.MockStsAssumeRoleWithWebIdentitySessionName, + "web_identity_token": servicemocks.MockWebIdentityToken, + "policy": "{}", + }, + }, + ExpectedCredentialsValue: mockdata.MockStsAssumeRoleWithWebIdentityCredentials, + MockStsEndpoints: []*servicemocks.MockEndpoint{ + servicemocks.MockStsAssumeRoleWithWebIdentityValidWithOptions(map[string]string{"Policy": "{}"}), + }, + }, + } + + for name, tc := range testCases { + tc := tc + + t.Run(name, func(t *testing.T) { + servicemocks.InitSessionTestEnv(t) + + ctx := context.TODO() + + // Populate required fields + tc.config["region"] = "us-east-1" + tc.config["bucket"] = "bucket" + tc.config["key"] = "key" + + if tc.ValidateDiags == nil { + tc.ValidateDiags = ExpectNoDiags + } + + for k, v := range tc.EnvironmentVariables { + t.Setenv(k, v) + } + + ts := servicemocks.MockAwsApiServer("STS", tc.MockStsEndpoints) + defer ts.Close() + + tc.config["sts_endpoint"] = ts.URL + + t.Setenv("TMPDIR", t.TempDir()) + + tokenFile, err := os.CreateTemp("", "aws-sdk-go-base-web-identity-token-file") + if err != nil { + t.Fatalf("unexpected error creating temporary web identity token file: %s", err) + } + tokenFileName := tokenFile.Name() + + defer os.Remove(tokenFileName) + + err = os.WriteFile(tokenFileName, []byte(servicemocks.MockWebIdentityToken), 0600) + + if err != nil { + t.Fatalf("unexpected error writing web identity token file: %s", err) + } + + if tc.ExpandEnvVars { + tmpdir := os.Getenv("TMPDIR") + rel, err := filepath.Rel(tmpdir, tokenFileName) + if err != nil { + t.Fatalf("error making path relative: %s", err) + } + t.Logf("relative: %s", rel) + tokenFileName = filepath.Join("$TMPDIR", rel) + t.Logf("env tempfile: %s", tokenFileName) + } + + if tc.SetConfig { + ar := tc.config["assume_role_with_web_identity"].(map[string]any) + ar["web_identity_token_file"] = tokenFileName + } + + if tc.SetTokenFileEnvironmentVariable { + t.Setenv("AWS_WEB_IDENTITY_TOKEN_FILE", tokenFileName) + } + + if tc.SharedConfigurationFile != "" { + file, err := os.CreateTemp("", "aws-sdk-go-base-shared-configuration-file") + + if err != nil { + t.Fatalf("unexpected error creating temporary shared configuration file: %s", err) + } + + defer os.Remove(file.Name()) + + if tc.SetSharedConfigurationFile { + tc.SharedConfigurationFile += fmt.Sprintf("web_identity_token_file = %s\n", tokenFileName) + } + + err = os.WriteFile(file.Name(), []byte(tc.SharedConfigurationFile), 0600) + + if err != nil { + t.Fatalf("unexpected error writing shared configuration file: %s", err) + } + + tc.config["shared_config_files"] = []any{file.Name()} + } + + tc.config["skip_credentials_validation"] = true + + b, diags := configureBackend(t, tc.config) + + tc.ValidateDiags(t, diags) + + if diags.HasErrors() { + return + } + + credentials, err := b.awsConfig.Credentials.Retrieve(ctx) + if err != nil { + t.Fatalf("Error when requesting credentials: %s", err) + } + + if diff := cmp.Diff(credentials, tc.ExpectedCredentialsValue, cmpopts.IgnoreFields(aws.Credentials{}, "Expires")); diff != "" { + t.Fatalf("unexpected credentials: (- got, + expected)\n%s", diff) + } + }) + } +} + +func TestBackendConfig_Region(t *testing.T) { + testCases := map[string]struct { + config map[string]any + EnvironmentVariables map[string]string + IMDSRegion string + SharedConfigurationFile string + ExpectedRegion string + }{ + // NOT SUPPORTED: region is required + // "no configuration": { + // config: map[string]any{ + // "access_key": servicemocks.MockStaticAccessKey, + // "secret_key": servicemocks.MockStaticSecretKey, + // }, + // ExpectedRegion: "", + // }, + + "config": { + config: map[string]any{ + "access_key": servicemocks.MockStaticAccessKey, + "secret_key": servicemocks.MockStaticSecretKey, + "region": "us-east-1", + }, + ExpectedRegion: "us-east-1", + }, + + "AWS_REGION": { + config: map[string]any{ + "access_key": servicemocks.MockStaticAccessKey, + "secret_key": servicemocks.MockStaticSecretKey, + }, + EnvironmentVariables: map[string]string{ + "AWS_REGION": "us-east-1", + }, + ExpectedRegion: "us-east-1", + }, + "AWS_DEFAULT_REGION": { + config: map[string]any{ + "access_key": servicemocks.MockStaticAccessKey, + "secret_key": servicemocks.MockStaticSecretKey, + }, + EnvironmentVariables: map[string]string{ + "AWS_DEFAULT_REGION": "us-east-1", + }, + ExpectedRegion: "us-east-1", + }, + "AWS_REGION overrides AWS_DEFAULT_REGION": { + config: map[string]any{ + "access_key": servicemocks.MockStaticAccessKey, + "secret_key": servicemocks.MockStaticSecretKey, + }, + EnvironmentVariables: map[string]string{ + "AWS_REGION": "us-east-1", + "AWS_DEFAULT_REGION": "us-west-2", + }, + ExpectedRegion: "us-east-1", + }, + + // NOT SUPPORTED: region from shared configuration file + // "shared configuration file": { + // config: map[string]any{ + // "access_key": servicemocks.MockStaticAccessKey, + // "secret_key": servicemocks.MockStaticSecretKey, + // }, + // SharedConfigurationFile: ` + // [default] + // region = us-east-1 + // `, + // ExpectedRegion: "us-east-1", + // }, + + // NOT SUPPORTED: region from IMDS + // "IMDS": { + // config: map[string]any{}, + // IMDSRegion: "us-east-1", + // ExpectedRegion: "us-east-1", + // }, + + "config overrides AWS_REGION": { + config: map[string]any{ + "access_key": servicemocks.MockStaticAccessKey, + "secret_key": servicemocks.MockStaticSecretKey, + "region": "us-east-1", + }, + EnvironmentVariables: map[string]string{ + "AWS_REGION": "us-west-2", + }, + ExpectedRegion: "us-east-1", + }, + "config overrides AWS_DEFAULT_REGION": { + config: map[string]any{ + "access_key": servicemocks.MockStaticAccessKey, + "secret_key": servicemocks.MockStaticSecretKey, + "region": "us-east-1", + }, + EnvironmentVariables: map[string]string{ + "AWS_DEFAULT_REGION": "us-west-2", + }, + ExpectedRegion: "us-east-1", + }, + + "config overrides IMDS": { + config: map[string]any{ + "access_key": servicemocks.MockStaticAccessKey, + "secret_key": servicemocks.MockStaticSecretKey, + "region": "us-west-2", + }, + IMDSRegion: "us-east-1", + ExpectedRegion: "us-west-2", + }, + + "AWS_REGION overrides shared configuration": { + config: map[string]any{ + "access_key": servicemocks.MockStaticAccessKey, + "secret_key": servicemocks.MockStaticSecretKey, + }, + EnvironmentVariables: map[string]string{ + "AWS_REGION": "us-east-1", + }, + SharedConfigurationFile: ` +[default] +region = us-west-2 +`, + ExpectedRegion: "us-east-1", + }, + "AWS_DEFAULT_REGION overrides shared configuration": { + config: map[string]any{ + "access_key": servicemocks.MockStaticAccessKey, + "secret_key": servicemocks.MockStaticSecretKey, + }, + EnvironmentVariables: map[string]string{ + "AWS_DEFAULT_REGION": "us-east-1", + }, + SharedConfigurationFile: ` +[default] +region = us-west-2 +`, + ExpectedRegion: "us-east-1", + }, + + "AWS_REGION overrides IMDS": { + config: map[string]any{ + "access_key": servicemocks.MockStaticAccessKey, + "secret_key": servicemocks.MockStaticSecretKey, + }, + EnvironmentVariables: map[string]string{ + "AWS_REGION": "us-east-1", + }, + IMDSRegion: "us-west-2", + ExpectedRegion: "us-east-1", + }, + } + + for name, tc := range testCases { + tc := tc + + t.Run(name, func(t *testing.T) { + servicemocks.InitSessionTestEnv(t) + + // Populate required fields + tc.config["bucket"] = "bucket" + tc.config["key"] = "key" + + for k, v := range tc.EnvironmentVariables { + t.Setenv(k, v) + } + + if tc.IMDSRegion != "" { + closeEc2Metadata := servicemocks.AwsMetadataApiMock(append( + servicemocks.Ec2metadata_securityCredentialsEndpoints, + servicemocks.Ec2metadata_instanceIdEndpoint, + servicemocks.Ec2metadata_iamInfoEndpoint, + servicemocks.Ec2metadata_instanceIdentityEndpoint(tc.IMDSRegion), + )) + defer closeEc2Metadata() + } + + sts := servicemocks.MockAwsApiServer("STS", []*servicemocks.MockEndpoint{ + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }) + defer sts.Close() + + tc.config["sts_endpoint"] = sts.URL + + if tc.SharedConfigurationFile != "" { + file, err := os.CreateTemp("", "aws-sdk-go-base-shared-configuration-file") + + if err != nil { + t.Fatalf("unexpected error creating temporary shared configuration file: %s", err) + } + + defer os.Remove(file.Name()) + + err = os.WriteFile(file.Name(), []byte(tc.SharedConfigurationFile), 0600) + + if err != nil { + t.Fatalf("unexpected error writing shared configuration file: %s", err) + } + + setSharedConfigFile(t, file.Name()) + } + + tc.config["skip_credentials_validation"] = true + + b, diags := configureBackend(t, tc.config) + if diags.HasErrors() { + t.Fatalf("configuring backend: %s", diagnosticsString(diags)) + } + + if a, e := b.awsConfig.Region, tc.ExpectedRegion; a != e { + t.Errorf("expected Region %q, got: %q", e, a) + } + }) + } +} + +func TestBackendConfig_RetryMode(t *testing.T) { + testCases := map[string]struct { + config map[string]any + EnvironmentVariables map[string]string + ExpectedMode aws.RetryMode + }{ + "no config": { + config: map[string]any{ + "access_key": servicemocks.MockStaticAccessKey, + "secret_key": servicemocks.MockStaticSecretKey, + }, + ExpectedMode: "", + }, + + "config": { + config: map[string]any{ + "access_key": servicemocks.MockStaticAccessKey, + "secret_key": servicemocks.MockStaticSecretKey, + "retry_mode": "standard", + }, + ExpectedMode: aws.RetryModeStandard, + }, + + "AWS_RETRY_MODE": { + config: map[string]any{ + "access_key": servicemocks.MockStaticAccessKey, + "secret_key": servicemocks.MockStaticSecretKey, + }, + EnvironmentVariables: map[string]string{ + "AWS_RETRY_MODE": "adaptive", + }, + ExpectedMode: aws.RetryModeAdaptive, + }, + "config overrides AWS_RETRY_MODE": { + config: map[string]any{ + "access_key": servicemocks.MockStaticAccessKey, + "secret_key": servicemocks.MockStaticSecretKey, + "retry_mode": "standard", + }, + EnvironmentVariables: map[string]string{ + "AWS_RETRY_MODE": "adaptive", + }, + ExpectedMode: aws.RetryModeStandard, + }, + } + + for name, tc := range testCases { + tc := tc + + t.Run(name, func(t *testing.T) { + servicemocks.InitSessionTestEnv(t) + + // Populate required fields + tc.config["bucket"] = "bucket" + tc.config["key"] = "key" + tc.config["region"] = "us-east-1" + + for k, v := range tc.EnvironmentVariables { + t.Setenv(k, v) + } + + sts := servicemocks.MockAwsApiServer("STS", []*servicemocks.MockEndpoint{ + servicemocks.MockStsGetCallerIdentityValidEndpoint, + }) + defer sts.Close() + + tc.config["sts_endpoint"] = sts.URL + tc.config["skip_credentials_validation"] = true + + b, diags := configureBackend(t, tc.config) + if diags.HasErrors() { + t.Fatalf("configuring backend: %s", diagnosticsString(diags)) + } + + if a, e := b.awsConfig.RetryMode, tc.ExpectedMode; a != e { + t.Errorf("expected mode %q, got: %q", e, a) + } + }) + } +} + +func setSharedConfigFile(t *testing.T, filename string) { + t.Helper() + t.Setenv("AWS_SDK_LOAD_CONFIG", "1") + t.Setenv("AWS_CONFIG_FILE", filename) +} + +func configureBackend(t *testing.T, config map[string]any) (*Backend, tfdiags.Diagnostics) { + b := New(encryption.StateEncryptionDisabled()).(*Backend) + configSchema := populateSchema(t, b.ConfigSchema(), hcl2shim.HCL2ValueFromConfigValue(config)) + + configSchema, diags := b.PrepareConfig(configSchema) + + if diags.HasErrors() { + return b, diags + } + + confDiags := b.Configure(configSchema) + diags = diags.Append(confDiags) + + return b, diags +} diff --git a/pkg/backend/remote-state/s3/backend_state.go b/pkg/backend/remote-state/s3/backend_state.go new file mode 100644 index 00000000000..c1c3a7dde44 --- /dev/null +++ b/pkg/backend/remote-state/s3/backend_state.go @@ -0,0 +1,243 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package s3 + +import ( + "context" + "errors" + "fmt" + "path" + "sort" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3" + types "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/remote" + "github.com/kubegems/opentofu/pkg/states/statemgr" +) + +func (b *Backend) Workspaces() ([]string, error) { + const maxKeys = 1000 + + prefix := "" + + if b.workspaceKeyPrefix != "" { + prefix = b.workspaceKeyPrefix + "/" + } + + params := &s3.ListObjectsV2Input{ + Bucket: aws.String(b.bucketName), + Prefix: aws.String(prefix), + MaxKeys: aws.Int32(maxKeys), + } + + ctx := context.TODO() + + ctx, _ = attachLoggerToContext(ctx) + + wss := []string{backend.DefaultStateName} + pg := s3.NewListObjectsV2Paginator(b.s3Client, params) + + for pg.HasMorePages() { + page, err := pg.NextPage(ctx) + if err != nil { + var noBucketErr *types.NoSuchBucket + if errors.As(err, &noBucketErr) { + return nil, fmt.Errorf(errS3NoSuchBucket, err) + } + + // Ignoring AccessDenied errors for backward compatibility, + // since it should work for default state when no other workspaces present. + var apiErr smithy.APIError + if errors.As(err, &apiErr) && apiErr.ErrorCode() == "AccessDenied" { + break + } + + return nil, err + } + + for _, obj := range page.Contents { + ws := b.keyEnv(*obj.Key) + if ws != "" { + wss = append(wss, ws) + } + } + } + + sort.Strings(wss[1:]) + return wss, nil +} + +func (b *Backend) keyEnv(key string) string { + prefix := b.workspaceKeyPrefix + + if prefix == "" { + parts := strings.SplitN(key, "/", 2) + if len(parts) > 1 && parts[1] == b.keyName { + return parts[0] + } else { + return "" + } + } + + // add a slash to treat this as a directory + prefix += "/" + + parts := strings.SplitAfterN(key, prefix, 2) + if len(parts) < 2 { + return "" + } + + // shouldn't happen since we listed by prefix + if parts[0] != prefix { + return "" + } + + parts = strings.SplitN(parts[1], "/", 2) + + if len(parts) < 2 { + return "" + } + + // not our key, so don't include it in our listing + if parts[1] != b.keyName { + return "" + } + + return parts[0] +} + +func (b *Backend) DeleteWorkspace(name string, _ bool) error { + if name == backend.DefaultStateName || name == "" { + return fmt.Errorf("can't delete default state") + } + + client, err := b.remoteClient(name) + if err != nil { + return err + } + + return client.Delete() +} + +// get a remote client configured for this state +func (b *Backend) remoteClient(name string) (*RemoteClient, error) { + if name == "" { + return nil, errors.New("missing state name") + } + + client := &RemoteClient{ + s3Client: b.s3Client, + dynClient: b.dynClient, + bucketName: b.bucketName, + path: b.path(name), + serverSideEncryption: b.serverSideEncryption, + customerEncryptionKey: b.customerEncryptionKey, + acl: b.acl, + kmsKeyID: b.kmsKeyID, + ddbTable: b.ddbTable, + skipS3Checksum: b.skipS3Checksum, + } + + return client, nil +} + +func (b *Backend) StateMgr(name string) (statemgr.Full, error) { + client, err := b.remoteClient(name) + if err != nil { + return nil, err + } + + stateMgr := remote.NewState(client, b.encryption) + // Check to see if this state already exists. + // If we're trying to force-unlock a state, we can't take the lock before + // fetching the state. If the state doesn't exist, we have to assume this + // is a normal create operation, and take the lock at that point. + // + // If we need to force-unlock, but for some reason the state no longer + // exists, the user will have to use aws tools to manually fix the + // situation. + existing, err := b.Workspaces() + if err != nil { + return nil, err + } + + exists := false + for _, s := range existing { + if s == name { + exists = true + break + } + } + + // We need to create the object so it's listed by States. + if !exists { + // take a lock on this state while we write it + lockInfo := statemgr.NewLockInfo() + lockInfo.Operation = "init" + lockId, err := client.Lock(lockInfo) + if err != nil { + return nil, fmt.Errorf("failed to lock s3 state: %w", err) + } + + // Local helper function so we can call it multiple places + lockUnlock := func(parent error) error { + if err := stateMgr.Unlock(lockId); err != nil { + return fmt.Errorf(strings.TrimSpace(errStateUnlock), lockId, err) + } + return parent + } + + // Grab the value + // This is to ensure that no one beat us to writing a state between + // the `exists` check and taking the lock. + if err := stateMgr.RefreshState(); err != nil { + err = lockUnlock(err) + return nil, err + } + + // If we have no state, we have to create an empty state + if v := stateMgr.State(); v == nil { + if err := stateMgr.WriteState(states.NewState()); err != nil { + err = lockUnlock(err) + return nil, err + } + if err := stateMgr.PersistState(nil); err != nil { + err = lockUnlock(err) + return nil, err + } + } + + // Unlock, the state should now be initialized + if err := lockUnlock(nil); err != nil { + return nil, err + } + + } + + return stateMgr, nil +} + +func (b *Backend) path(name string) string { + if name == backend.DefaultStateName { + return b.keyName + } + + return path.Join(b.workspaceKeyPrefix, name, b.keyName) +} + +const errStateUnlock = ` +Error unlocking S3 state. Lock ID: %s + +Error: %s + +You may have to force-unlock this state in order to use it again. +` diff --git a/pkg/backend/remote-state/s3/backend_test.go b/pkg/backend/remote-state/s3/backend_test.go new file mode 100644 index 00000000000..4084313a935 --- /dev/null +++ b/pkg/backend/remote-state/s3/backend_test.go @@ -0,0 +1,1819 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package s3 + +import ( + "context" + "encoding/base64" + "fmt" + "net/http" + "net/url" + "os" + "reflect" + "strings" + "testing" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + dtypes "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + "github.com/aws/aws-sdk-go-v2/service/s3" + types "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/aws-sdk-go-base/v2/mockdata" + "github.com/hashicorp/aws-sdk-go-base/v2/servicemocks" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/configs/hcl2shim" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/remote" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +const testBucketPrefix = "tofu-test" + +var ( + mockStsGetCallerIdentityRequestBody = url.Values{ + "Action": []string{"GetCallerIdentity"}, + "Version": []string{"2011-06-15"}, + }.Encode() +) + +// verify that we are doing ACC tests or the S3 tests specifically +func testACC(t *testing.T) { + t.Helper() + skip := os.Getenv("TF_ACC") == "" && os.Getenv("TF_S3_TEST") == "" + if skip { + t.Log("s3 backend tests require setting TF_ACC or TF_S3_TEST") + t.Skip() + } + t.Setenv("AWS_DEFAULT_REGION", "us-west-2") +} + +func TestBackend_impl(t *testing.T) { + var _ backend.Backend = new(Backend) +} + +func TestBackendConfig_original(t *testing.T) { + testACC(t) + config := map[string]interface{}{ + "region": "us-west-1", + "bucket": testBucketPrefix, + "key": "state", + "encrypt": true, + "dynamodb_table": "dynamoTable", + } + + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(config)).(*Backend) + + if b.awsConfig.Region != "us-west-1" { + t.Fatalf("Incorrect region was populated") + } + if b.awsConfig.RetryMaxAttempts != 5 { + t.Fatalf("Default max_retries was not set") + } + if b.bucketName != testBucketPrefix { + t.Fatalf("Incorrect bucketName was populated") + } + if b.keyName != "state" { + t.Fatalf("Incorrect keyName was populated") + } + + credentials, err := b.awsConfig.Credentials.Retrieve(context.TODO()) + if err != nil { + t.Fatalf("Error when requesting credentials") + } + if credentials.AccessKeyID == "" { + t.Fatalf("No Access Key Id was populated") + } + if credentials.SecretAccessKey == "" { + t.Fatalf("No Secret Access Key was populated") + } +} + +func TestBackendConfig_InvalidRegion(t *testing.T) { + testACC(t) + + cases := map[string]struct { + config map[string]any + expectedDiags tfdiags.Diagnostics + }{ + "with region validation": { + config: map[string]interface{}{ + "region": "nonesuch", + "bucket": testBucketPrefix, + "key": "state", + "skip_credentials_validation": true, + }, + expectedDiags: tfdiags.Diagnostics{ + tfdiags.AttributeValue( + tfdiags.Error, + "Invalid region value", + `Invalid AWS Region: nonesuch`, + cty.Path{cty.GetAttrStep{Name: "region"}}, + ), + }, + }, + "skip region validation": { + config: map[string]interface{}{ + "region": "nonesuch", + "bucket": testBucketPrefix, + "key": "state", + "skip_region_validation": true, + "skip_credentials_validation": true, + }, + expectedDiags: nil, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + b := New(encryption.StateEncryptionDisabled()) + configSchema := populateSchema(t, b.ConfigSchema(), hcl2shim.HCL2ValueFromConfigValue(tc.config)) + + configSchema, diags := b.PrepareConfig(configSchema) + if len(diags) > 0 { + t.Fatal(diags.ErrWithWarnings()) + } + + confDiags := b.Configure(configSchema) + diags = diags.Append(confDiags) + + if diff := cmp.Diff(diags, tc.expectedDiags, cmp.Comparer(diagnosticComparer)); diff != "" { + t.Errorf("unexpected diagnostics difference: %s", diff) + } + }) + } +} + +func TestBackendConfig_RegionEnvVar(t *testing.T) { + testACC(t) + config := map[string]interface{}{ + "bucket": testBucketPrefix, + "key": "state", + } + + cases := map[string]struct { + vars map[string]string + }{ + "AWS_REGION": { + vars: map[string]string{ + "AWS_REGION": "us-west-1", + }, + }, + + "AWS_DEFAULT_REGION": { + vars: map[string]string{ + "AWS_DEFAULT_REGION": "us-west-1", + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + for k, v := range tc.vars { + t.Setenv(k, v) + } + + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(config)).(*Backend) + + if b.awsConfig.Region != "us-west-1" { + t.Fatalf("Incorrect region was populated") + } + }) + } +} + +func TestBackendConfig_DynamoDBEndpoint(t *testing.T) { + testACC(t) + + cases := map[string]struct { + config map[string]any + vars map[string]string + expected string + }{ + "none": { + expected: "", + }, + "config": { + config: map[string]any{ + "dynamodb_endpoint": "dynamo.test", + }, + expected: "dynamo.test", + }, + "envvar": { + vars: map[string]string{ + "AWS_DYNAMODB_ENDPOINT": "dynamo.test", + }, + expected: "dynamo.test", + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + config := map[string]interface{}{ + "region": "us-west-1", + "bucket": testBucketPrefix, + "key": "state", + } + + if tc.vars != nil { + for k, v := range tc.vars { + t.Setenv(k, v) + } + } + + if tc.config != nil { + for k, v := range tc.config { + config[k] = v + } + } + + backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(config)) + }) + } +} + +func TestBackendConfig_S3Endpoint(t *testing.T) { + testACC(t) + + cases := map[string]struct { + config map[string]any + vars map[string]string + expected string + }{ + "none": { + expected: "", + }, + "config": { + config: map[string]any{ + "endpoint": "s3.test", + }, + expected: "s3.test", + }, + "envvar": { + vars: map[string]string{ + "AWS_S3_ENDPOINT": "s3.test", + }, + expected: "s3.test", + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + config := map[string]interface{}{ + "region": "us-west-1", + "bucket": testBucketPrefix, + "key": "state", + } + + if tc.vars != nil { + for k, v := range tc.vars { + t.Setenv(k, v) + } + } + + if tc.config != nil { + for k, v := range tc.config { + config[k] = v + } + } + + backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(config)) + }) + } +} + +func TestBackendConfig_STSEndpoint(t *testing.T) { + testACC(t) + + stsMocks := []*servicemocks.MockEndpoint{ + { + Request: &servicemocks.MockRequest{Method: "POST", Uri: "/", Body: url.Values{ + "Action": []string{"AssumeRole"}, + "DurationSeconds": []string{"900"}, + "RoleArn": []string{servicemocks.MockStsAssumeRoleArn}, + "RoleSessionName": []string{servicemocks.MockStsAssumeRoleSessionName}, + "Version": []string{"2011-06-15"}, + }.Encode()}, + Response: &servicemocks.MockResponse{StatusCode: 200, Body: servicemocks.MockStsAssumeRoleValidResponseBody, ContentType: "text/xml"}, + }, + { + Request: &servicemocks.MockRequest{Method: "POST", Uri: "/", Body: mockStsGetCallerIdentityRequestBody}, + Response: &servicemocks.MockResponse{StatusCode: 200, Body: servicemocks.MockStsGetCallerIdentityValidResponseBody, ContentType: "text/xml"}, + }, + } + + cases := map[string]struct { + setConfig bool + setEnvVars bool + expectedDiags tfdiags.Diagnostics + }{ + "none": { + expectedDiags: tfdiags.Diagnostics{ + tfdiags.Sourceless( + tfdiags.Error, + "Cannot assume IAM Role", + ``, + ), + }, + }, + "config": { + setConfig: true, + }, + "envvar": { + setEnvVars: true, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + config := map[string]interface{}{ + "region": "us-west-1", + "bucket": testBucketPrefix, + "key": "state", + "assume_role": map[string]interface{}{ + "role_arn": servicemocks.MockStsAssumeRoleArn, + "session_name": servicemocks.MockStsAssumeRoleSessionName, + }, + } + + closeSts, _, endpoint := mockdata.GetMockedAwsApiSession("STS", stsMocks) + defer closeSts() + + if tc.setEnvVars { + t.Setenv("AWS_STS_ENDPOINT", endpoint) + } + + if tc.setConfig { + config["sts_endpoint"] = endpoint + } + + b := New(encryption.StateEncryptionDisabled()) + configSchema := populateSchema(t, b.ConfigSchema(), hcl2shim.HCL2ValueFromConfigValue(config)) + + configSchema, diags := b.PrepareConfig(configSchema) + if len(diags) > 0 { + t.Fatal(diags.ErrWithWarnings()) + } + + confDiags := b.Configure(configSchema) + diags = diags.Append(confDiags) + + if diff := cmp.Diff(diags, tc.expectedDiags, cmp.Comparer(diagnosticSummaryComparer)); diff != "" { + t.Errorf("unexpected diagnostics difference: %s", diff) + } + }) + } +} + +func TestBackendConfig_AssumeRole(t *testing.T) { + testACC(t) + + testCases := []struct { + Config map[string]interface{} + Description string + MockStsEndpoints []*servicemocks.MockEndpoint + }{ + { + Config: map[string]interface{}{ + "bucket": testBucketPrefix, + "key": "state", + "region": "us-west-1", + "role_arn": servicemocks.MockStsAssumeRoleArn, + "session_name": servicemocks.MockStsAssumeRoleSessionName, + }, + Description: "role_arn", + MockStsEndpoints: []*servicemocks.MockEndpoint{ + { + Request: &servicemocks.MockRequest{Method: "POST", Uri: "/", Body: url.Values{ + "Action": []string{"AssumeRole"}, + "DurationSeconds": []string{"900"}, + "RoleArn": []string{servicemocks.MockStsAssumeRoleArn}, + "RoleSessionName": []string{servicemocks.MockStsAssumeRoleSessionName}, + "Version": []string{"2011-06-15"}, + }.Encode()}, + Response: &servicemocks.MockResponse{StatusCode: 200, Body: servicemocks.MockStsAssumeRoleValidResponseBody, ContentType: "text/xml"}, + }, + { + Request: &servicemocks.MockRequest{Method: "POST", Uri: "/", Body: mockStsGetCallerIdentityRequestBody}, + Response: &servicemocks.MockResponse{StatusCode: 200, Body: servicemocks.MockStsGetCallerIdentityValidResponseBody, ContentType: "text/xml"}, + }, + }, + }, + { + Config: map[string]interface{}{ + "assume_role_duration_seconds": 3600, + "bucket": testBucketPrefix, + "key": "state", + "region": "us-west-1", + "role_arn": servicemocks.MockStsAssumeRoleArn, + "session_name": servicemocks.MockStsAssumeRoleSessionName, + }, + Description: "assume_role_duration_seconds", + MockStsEndpoints: []*servicemocks.MockEndpoint{ + { + Request: &servicemocks.MockRequest{Method: "POST", Uri: "/", Body: url.Values{ + "Action": []string{"AssumeRole"}, + "DurationSeconds": []string{"3600"}, + "RoleArn": []string{servicemocks.MockStsAssumeRoleArn}, + "RoleSessionName": []string{servicemocks.MockStsAssumeRoleSessionName}, + "Version": []string{"2011-06-15"}, + }.Encode()}, + Response: &servicemocks.MockResponse{StatusCode: 200, Body: servicemocks.MockStsAssumeRoleValidResponseBody, ContentType: "text/xml"}, + }, + { + Request: &servicemocks.MockRequest{Method: "POST", Uri: "/", Body: mockStsGetCallerIdentityRequestBody}, + Response: &servicemocks.MockResponse{StatusCode: 200, Body: servicemocks.MockStsGetCallerIdentityValidResponseBody, ContentType: "text/xml"}, + }, + }, + }, + { + Config: map[string]interface{}{ + "bucket": testBucketPrefix, + "external_id": servicemocks.MockStsAssumeRoleExternalId, + "key": "state", + "region": "us-west-1", + "role_arn": servicemocks.MockStsAssumeRoleArn, + "session_name": servicemocks.MockStsAssumeRoleSessionName, + }, + Description: "external_id", + MockStsEndpoints: []*servicemocks.MockEndpoint{ + { + Request: &servicemocks.MockRequest{Method: "POST", Uri: "/", Body: url.Values{ + "Action": []string{"AssumeRole"}, + "DurationSeconds": []string{"900"}, + "ExternalId": []string{servicemocks.MockStsAssumeRoleExternalId}, + "RoleArn": []string{servicemocks.MockStsAssumeRoleArn}, + "RoleSessionName": []string{servicemocks.MockStsAssumeRoleSessionName}, + "Version": []string{"2011-06-15"}, + }.Encode()}, + Response: &servicemocks.MockResponse{StatusCode: 200, Body: servicemocks.MockStsAssumeRoleValidResponseBody, ContentType: "text/xml"}, + }, + { + Request: &servicemocks.MockRequest{Method: "POST", Uri: "/", Body: mockStsGetCallerIdentityRequestBody}, + Response: &servicemocks.MockResponse{StatusCode: 200, Body: servicemocks.MockStsGetCallerIdentityValidResponseBody, ContentType: "text/xml"}, + }, + }, + }, + { + Config: map[string]interface{}{ + "assume_role_policy": servicemocks.MockStsAssumeRolePolicy, + "bucket": "tofu-test", + "key": "state", + "region": "us-west-1", + "role_arn": servicemocks.MockStsAssumeRoleArn, + "session_name": servicemocks.MockStsAssumeRoleSessionName, + }, + Description: "assume_role_policy", + MockStsEndpoints: []*servicemocks.MockEndpoint{ + { + Request: &servicemocks.MockRequest{Method: "POST", Uri: "/", Body: url.Values{ + "Action": []string{"AssumeRole"}, + "DurationSeconds": []string{"900"}, + "Policy": []string{servicemocks.MockStsAssumeRolePolicy}, + "RoleArn": []string{servicemocks.MockStsAssumeRoleArn}, + "RoleSessionName": []string{servicemocks.MockStsAssumeRoleSessionName}, + "Version": []string{"2011-06-15"}, + }.Encode()}, + Response: &servicemocks.MockResponse{StatusCode: 200, Body: servicemocks.MockStsAssumeRoleValidResponseBody, ContentType: "text/xml"}, + }, + { + Request: &servicemocks.MockRequest{Method: "POST", Uri: "/", Body: mockStsGetCallerIdentityRequestBody}, + Response: &servicemocks.MockResponse{StatusCode: 200, Body: servicemocks.MockStsGetCallerIdentityValidResponseBody, ContentType: "text/xml"}, + }, + }, + }, + { + Config: map[string]interface{}{ + "assume_role_policy_arns": []interface{}{servicemocks.MockStsAssumeRolePolicyArn}, + "bucket": "tofu-test", + "key": "state", + "region": "us-west-1", + "role_arn": servicemocks.MockStsAssumeRoleArn, + "session_name": servicemocks.MockStsAssumeRoleSessionName, + }, + Description: "assume_role_policy_arns", + MockStsEndpoints: []*servicemocks.MockEndpoint{ + { + Request: &servicemocks.MockRequest{Method: "POST", Uri: "/", Body: url.Values{ + "Action": []string{"AssumeRole"}, + "DurationSeconds": []string{"900"}, + "PolicyArns.member.1.arn": []string{servicemocks.MockStsAssumeRolePolicyArn}, + "RoleArn": []string{servicemocks.MockStsAssumeRoleArn}, + "RoleSessionName": []string{servicemocks.MockStsAssumeRoleSessionName}, + "Version": []string{"2011-06-15"}, + }.Encode()}, + Response: &servicemocks.MockResponse{StatusCode: 200, Body: servicemocks.MockStsAssumeRoleValidResponseBody, ContentType: "text/xml"}, + }, + { + Request: &servicemocks.MockRequest{Method: "POST", Uri: "/", Body: mockStsGetCallerIdentityRequestBody}, + Response: &servicemocks.MockResponse{StatusCode: 200, Body: servicemocks.MockStsGetCallerIdentityValidResponseBody, ContentType: "text/xml"}, + }, + }, + }, + { + Config: map[string]interface{}{ + "assume_role_tags": map[string]interface{}{ + servicemocks.MockStsAssumeRoleTagKey: servicemocks.MockStsAssumeRoleTagValue, + }, + "bucket": "tofu-test", + "key": "state", + "region": "us-west-1", + "role_arn": servicemocks.MockStsAssumeRoleArn, + "session_name": servicemocks.MockStsAssumeRoleSessionName, + }, + Description: "assume_role_tags", + MockStsEndpoints: []*servicemocks.MockEndpoint{ + { + Request: &servicemocks.MockRequest{Method: "POST", Uri: "/", Body: url.Values{ + "Action": []string{"AssumeRole"}, + "DurationSeconds": []string{"900"}, + "RoleArn": []string{servicemocks.MockStsAssumeRoleArn}, + "RoleSessionName": []string{servicemocks.MockStsAssumeRoleSessionName}, + "Tags.member.1.Key": []string{servicemocks.MockStsAssumeRoleTagKey}, + "Tags.member.1.Value": []string{servicemocks.MockStsAssumeRoleTagValue}, + "Version": []string{"2011-06-15"}, + }.Encode()}, + Response: &servicemocks.MockResponse{StatusCode: 200, Body: servicemocks.MockStsAssumeRoleValidResponseBody, ContentType: "text/xml"}, + }, + { + Request: &servicemocks.MockRequest{Method: "POST", Uri: "/", Body: mockStsGetCallerIdentityRequestBody}, + Response: &servicemocks.MockResponse{StatusCode: 200, Body: servicemocks.MockStsGetCallerIdentityValidResponseBody, ContentType: "text/xml"}, + }, + }, + }, + { + Config: map[string]interface{}{ + "assume_role_tags": map[string]interface{}{ + servicemocks.MockStsAssumeRoleTagKey: servicemocks.MockStsAssumeRoleTagValue, + }, + "assume_role_transitive_tag_keys": []interface{}{servicemocks.MockStsAssumeRoleTagKey}, + "bucket": "tofu-test", + "key": "state", + "region": "us-west-1", + "role_arn": servicemocks.MockStsAssumeRoleArn, + "session_name": servicemocks.MockStsAssumeRoleSessionName, + }, + Description: "assume_role_transitive_tag_keys", + MockStsEndpoints: []*servicemocks.MockEndpoint{ + { + Request: &servicemocks.MockRequest{Method: "POST", Uri: "/", Body: url.Values{ + "Action": []string{"AssumeRole"}, + "DurationSeconds": []string{"900"}, + "RoleArn": []string{servicemocks.MockStsAssumeRoleArn}, + "RoleSessionName": []string{servicemocks.MockStsAssumeRoleSessionName}, + "Tags.member.1.Key": []string{servicemocks.MockStsAssumeRoleTagKey}, + "Tags.member.1.Value": []string{servicemocks.MockStsAssumeRoleTagValue}, + "TransitiveTagKeys.member.1": []string{servicemocks.MockStsAssumeRoleTagKey}, + "Version": []string{"2011-06-15"}, + }.Encode()}, + Response: &servicemocks.MockResponse{StatusCode: 200, Body: servicemocks.MockStsAssumeRoleValidResponseBody, ContentType: "text/xml"}, + }, + { + Request: &servicemocks.MockRequest{Method: "POST", Uri: "/", Body: mockStsGetCallerIdentityRequestBody}, + Response: &servicemocks.MockResponse{StatusCode: 200, Body: servicemocks.MockStsGetCallerIdentityValidResponseBody, ContentType: "text/xml"}, + }, + }, + }, + } + + for _, testCase := range testCases { + testCase := testCase + + t.Run(testCase.Description, func(t *testing.T) { + closeSts, _, endpoint := mockdata.GetMockedAwsApiSession("STS", testCase.MockStsEndpoints) + defer closeSts() + + testCase.Config["sts_endpoint"] = endpoint + + b := New(encryption.StateEncryptionDisabled()) + diags := b.Configure(populateSchema(t, b.ConfigSchema(), hcl2shim.HCL2ValueFromConfigValue(testCase.Config))) + + if diags.HasErrors() { + for _, diag := range diags { + t.Errorf("unexpected error: %s", diag.Description().Summary) + } + } + }) + } +} + +func TestBackendConfig_PrepareConfigValidation(t *testing.T) { + cases := map[string]struct { + config cty.Value + expectedErr string + }{ + "null bucket": { + config: cty.ObjectVal(map[string]cty.Value{ + "bucket": cty.NullVal(cty.String), + "key": cty.StringVal("test"), + "region": cty.StringVal("us-west-2"), + }), + expectedErr: `The "bucket" attribute value must not be empty.`, + }, + "empty bucket": { + config: cty.ObjectVal(map[string]cty.Value{ + "bucket": cty.StringVal(""), + "key": cty.StringVal("test"), + "region": cty.StringVal("us-west-2"), + }), + expectedErr: `The "bucket" attribute value must not be empty.`, + }, + "null key": { + config: cty.ObjectVal(map[string]cty.Value{ + "bucket": cty.StringVal("test"), + "key": cty.NullVal(cty.String), + "region": cty.StringVal("us-west-2"), + }), + expectedErr: `The "key" attribute value must not be empty.`, + }, + "empty key": { + config: cty.ObjectVal(map[string]cty.Value{ + "bucket": cty.StringVal("test"), + "key": cty.StringVal(""), + "region": cty.StringVal("us-west-2"), + }), + expectedErr: `The "key" attribute value must not be empty.`, + }, + "key with leading slash": { + config: cty.ObjectVal(map[string]cty.Value{ + "bucket": cty.StringVal("test"), + "key": cty.StringVal("/leading-slash"), + "region": cty.StringVal("us-west-2"), + }), + expectedErr: `The "key" attribute value must not start or end with with "/".`, + }, + "key with trailing slash": { + config: cty.ObjectVal(map[string]cty.Value{ + "bucket": cty.StringVal("test"), + "key": cty.StringVal("trailing-slash/"), + "region": cty.StringVal("us-west-2"), + }), + expectedErr: `The "key" attribute value must not start or end with with "/".`, + }, + "null region": { + config: cty.ObjectVal(map[string]cty.Value{ + "bucket": cty.StringVal("test"), + "key": cty.StringVal("test"), + "region": cty.NullVal(cty.String), + }), + expectedErr: `The "region" attribute or the "AWS_REGION" or "AWS_DEFAULT_REGION" environment variables must be set.`, + }, + "empty region": { + config: cty.ObjectVal(map[string]cty.Value{ + "bucket": cty.StringVal("test"), + "key": cty.StringVal("test"), + "region": cty.StringVal(""), + }), + expectedErr: `The "region" attribute or the "AWS_REGION" or "AWS_DEFAULT_REGION" environment variables must be set.`, + }, + "workspace_key_prefix with leading slash": { + config: cty.ObjectVal(map[string]cty.Value{ + "bucket": cty.StringVal("test"), + "key": cty.StringVal("test"), + "region": cty.StringVal("us-west-2"), + "workspace_key_prefix": cty.StringVal("/env"), + }), + expectedErr: `The "workspace_key_prefix" attribute value must not start with "/".`, + }, + "workspace_key_prefix with trailing slash": { + config: cty.ObjectVal(map[string]cty.Value{ + "bucket": cty.StringVal("test"), + "key": cty.StringVal("test"), + "region": cty.StringVal("us-west-2"), + "workspace_key_prefix": cty.StringVal("env/"), + }), + expectedErr: `The "workspace_key_prefix" attribute value must not start with "/".`, + }, + "encyrption key conflict": { + config: cty.ObjectVal(map[string]cty.Value{ + "bucket": cty.StringVal("test"), + "key": cty.StringVal("test"), + "region": cty.StringVal("us-west-2"), + "workspace_key_prefix": cty.StringVal("env"), + "sse_customer_key": cty.StringVal("1hwbcNPGWL+AwDiyGmRidTWAEVmCWMKbEHA+Es8w75o="), + "kms_key_id": cty.StringVal("arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"), + }), + expectedErr: `Only one of "kms_key_id" and "sse_customer_key" can be set`, + }, + "allowed forbidden account ids conflict": { + config: cty.ObjectVal(map[string]cty.Value{ + "bucket": cty.StringVal("test"), + "key": cty.StringVal("test"), + "region": cty.StringVal("us-west-2"), + "allowed_account_ids": cty.SetVal([]cty.Value{cty.StringVal("111111111111")}), + "forbidden_account_ids": cty.SetVal([]cty.Value{cty.StringVal("111111111111")}), + }), + expectedErr: "Invalid Attribute Combination: Only one of allowed_account_ids, forbidden_account_ids can be set.", + }, + "invalid retry mode": { + config: cty.ObjectVal(map[string]cty.Value{ + "bucket": cty.StringVal("test"), + "key": cty.StringVal("test"), + "region": cty.StringVal("us-west-2"), + "retry_mode": cty.StringVal("xyz"), + }), + expectedErr: `Invalid retry mode: Valid values are "standard" and "adaptive".`, + }, + "s3 endpoint conflict": { + config: cty.ObjectVal(map[string]cty.Value{ + "bucket": cty.StringVal("test"), + "key": cty.StringVal("test"), + "region": cty.StringVal("us-west-2"), + "endpoint": cty.StringVal("x1"), + "endpoints": cty.ObjectVal(map[string]cty.Value{ + "s3": cty.StringVal("x2"), + }), + }), + expectedErr: `Invalid Attribute Combination: Only one of endpoints.s3, endpoint can be set.`, + }, + "iam endpoint conflict": { + config: cty.ObjectVal(map[string]cty.Value{ + "bucket": cty.StringVal("test"), + "key": cty.StringVal("test"), + "region": cty.StringVal("us-west-2"), + "iam_endpoint": cty.StringVal("x1"), + "endpoints": cty.ObjectVal(map[string]cty.Value{ + "iam": cty.StringVal("x2"), + }), + }), + expectedErr: `Invalid Attribute Combination: Only one of endpoints.iam, iam_endpoint can be set.`, + }, + "sts endpoint conflict": { + config: cty.ObjectVal(map[string]cty.Value{ + "bucket": cty.StringVal("test"), + "key": cty.StringVal("test"), + "region": cty.StringVal("us-west-2"), + "sts_endpoint": cty.StringVal("x1"), + "endpoints": cty.ObjectVal(map[string]cty.Value{ + "sts": cty.StringVal("x2"), + }), + }), + expectedErr: `Invalid Attribute Combination: Only one of endpoints.sts, sts_endpoint can be set.`, + }, + "dynamodb endpoint conflict": { + config: cty.ObjectVal(map[string]cty.Value{ + "bucket": cty.StringVal("test"), + "key": cty.StringVal("test"), + "region": cty.StringVal("us-west-2"), + "dynamodb_endpoint": cty.StringVal("x1"), + "endpoints": cty.ObjectVal(map[string]cty.Value{ + "dynamodb": cty.StringVal("x2"), + }), + }), + expectedErr: `Invalid Attribute Combination: Only one of endpoints.dynamodb, dynamodb_endpoint can be set.`, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + servicemocks.StashEnv(t) + + b := New(encryption.StateEncryptionDisabled()) + + _, valDiags := b.PrepareConfig(populateSchema(t, b.ConfigSchema(), tc.config)) + if tc.expectedErr != "" { + if valDiags.Err() != nil { + actualErr := valDiags.Err().Error() + if !strings.Contains(actualErr, tc.expectedErr) { + t.Fatalf("unexpected validation result: %v", valDiags.Err()) + } + } else { + t.Fatal("expected an error, got none") + } + } else if valDiags.Err() != nil { + t.Fatalf("expected no error, got %s", valDiags.Err()) + } + }) + } +} + +func TestBackendConfig_PrepareConfigValidationWarnings(t *testing.T) { + cases := map[string]struct { + config cty.Value + expectedWarn string + }{ + "deprecated force path style": { + config: cty.ObjectVal(map[string]cty.Value{ + "bucket": cty.StringVal("test"), + "key": cty.StringVal("test"), + "region": cty.StringVal("us-west-2"), + "force_path_style": cty.BoolVal(false), + }), + expectedWarn: `Deprecated Parameter: Parameter "force_path_style" is deprecated. Use "use_path_style" instead.`, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + servicemocks.StashEnv(t) + + b := New(encryption.StateEncryptionDisabled()) + + _, diags := b.PrepareConfig(populateSchema(t, b.ConfigSchema(), tc.config)) + if tc.expectedWarn != "" { + if err := diags.ErrWithWarnings(); err != nil { + if !strings.Contains(err.Error(), tc.expectedWarn) { + t.Fatalf("unexpected validation result: %v", err) + } + } else { + t.Fatal("expected a warning, got none") + } + } else if err := diags.ErrWithWarnings(); err != nil { + t.Fatalf("expected no warnings, got %s", err) + } + }) + } +} + +func TestBackendConfig_PrepareConfigWithEnvVars(t *testing.T) { + cases := map[string]struct { + config cty.Value + vars map[string]string + expectedErr string + }{ + "region env var AWS_REGION": { + config: cty.ObjectVal(map[string]cty.Value{ + "bucket": cty.StringVal("test"), + "key": cty.StringVal("test"), + "region": cty.NullVal(cty.String), + }), + vars: map[string]string{ + "AWS_REGION": "us-west-1", + }, + }, + "region env var AWS_DEFAULT_REGION": { + config: cty.ObjectVal(map[string]cty.Value{ + "bucket": cty.StringVal("test"), + "key": cty.StringVal("test"), + "region": cty.NullVal(cty.String), + }), + vars: map[string]string{ + "AWS_DEFAULT_REGION": "us-west-1", + }, + }, + "encyrption key conflict": { + config: cty.ObjectVal(map[string]cty.Value{ + "bucket": cty.StringVal("test"), + "key": cty.StringVal("test"), + "region": cty.StringVal("us-west-2"), + "workspace_key_prefix": cty.StringVal("env"), + "kms_key_id": cty.StringVal("arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"), + }), + vars: map[string]string{ + "AWS_SSE_CUSTOMER_KEY": "1hwbcNPGWL+AwDiyGmRidTWAEVmCWMKbEHA+Es8w75o=", + }, + expectedErr: `Only one of "kms_key_id" and the environment variable "AWS_SSE_CUSTOMER_KEY" can be set`, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + servicemocks.StashEnv(t) + + b := New(encryption.StateEncryptionDisabled()) + + for k, v := range tc.vars { + t.Setenv(k, v) + } + + _, valDiags := b.PrepareConfig(populateSchema(t, b.ConfigSchema(), tc.config)) + if tc.expectedErr != "" { + if valDiags.Err() != nil { + actualErr := valDiags.Err().Error() + if !strings.Contains(actualErr, tc.expectedErr) { + t.Fatalf("unexpected validation result: %v", valDiags.Err()) + } + } else { + t.Fatal("expected an error, got none") + } + } else if valDiags.Err() != nil { + t.Fatalf("expected no error, got %s", valDiags.Err()) + } + }) + } +} + +// TestBackendConfig_proxy tests proxy configuration +func TestBackendConfig_proxy(t *testing.T) { + testACC(t) + + newURL := func(rawURL string) *url.URL { + o, err := url.Parse(rawURL) + if err != nil { + panic(err) + } + return o + } + + cases := map[string]struct { + config cty.Value + calledURL string + envVars map[string]string + wantProxyURL *url.URL + + // wantErrSubstr contains the part indicating proxy address + wantErrSubstr string + }{ + "shall set proxy using http_proxy config attr": { + config: cty.ObjectVal(map[string]cty.Value{ + "bucket": cty.StringVal("test"), + "key": cty.StringVal("test"), + "http_proxy": cty.StringVal("http://foo.bar"), + }), + calledURL: "http://qux.quxx", + wantProxyURL: newURL("http://foo.bar"), + }, + "shall set proxy using HTTP_PROXY envvar": { + config: cty.ObjectVal(map[string]cty.Value{ + "bucket": cty.StringVal("test"), + "key": cty.StringVal("test"), + }), + envVars: map[string]string{ + "HTTP_PROXY": "http://foo.com", + }, + calledURL: "http://qux.quxx", + wantProxyURL: newURL("http://foo.com"), + }, + "shall set proxy using http_proxy config attr when HTTP_PROXY envvar is also set": { + config: cty.ObjectVal(map[string]cty.Value{ + "bucket": cty.StringVal("test"), + "key": cty.StringVal("test"), + "http_proxy": cty.StringVal("http://foo.bar"), + }), + envVars: map[string]string{ + "HTTP_PROXY": "http://foo.com", + }, + calledURL: "http://qux.quxx", + wantProxyURL: newURL("http://foo.bar"), + }, + "shall set proxy using https_proxy config attr": { + config: cty.ObjectVal(map[string]cty.Value{ + "bucket": cty.StringVal("test"), + "key": cty.StringVal("test"), + "https_proxy": cty.StringVal("https://foo.bar"), + }), + calledURL: "https://qux.quxx", + wantErrSubstr: "proxyconnect tcp: dial tcp: lookup foo.bar", + }, + "shall set proxy using HTTPS_PROXY envvar": { + config: cty.ObjectVal(map[string]cty.Value{ + "bucket": cty.StringVal("test"), + "key": cty.StringVal("test"), + }), + envVars: map[string]string{ + "HTTPS_PROXY": "https://foo.baz", + }, + calledURL: "https://qux.quxx", + wantErrSubstr: "proxyconnect tcp: dial tcp: lookup foo.baz", + }, + "shall set proxy using https_proxy config attr when HTTPS_PROXY envvar is also set": { + config: cty.ObjectVal(map[string]cty.Value{ + "bucket": cty.StringVal("test"), + "key": cty.StringVal("test"), + "https_proxy": cty.StringVal("https://foo.bar"), + }), + envVars: map[string]string{ + "HTTPS_PROXY": "https://foo.com", + }, + calledURL: "https://qux.quxx", + wantErrSubstr: "proxyconnect tcp: dial tcp: lookup foo.bar", + }, + "shall satisfy no_proxy config attr": { + config: cty.ObjectVal(map[string]cty.Value{ + "bucket": cty.StringVal("test"), + "key": cty.StringVal("test"), + "no_proxy": cty.StringVal("http://foo.bar,1.2.3.4"), + }), + calledURL: "http://foo.bar", + }, + "shall satisfy no proxy set using NO_PROXY envvar": { + config: cty.ObjectVal(map[string]cty.Value{ + "bucket": cty.StringVal("test"), + "key": cty.StringVal("test"), + }), + envVars: map[string]string{ + "NO_PROXY": "http://foo.bar,1.2.3.4", + }, + calledURL: "http://foo.bar", + }, + "shall satisfy no_proxy config attr when envvar NO_PROXY is also set": { + config: cty.ObjectVal(map[string]cty.Value{ + "bucket": cty.StringVal("test"), + "key": cty.StringVal("test"), + "no_proxy": cty.StringVal("http://foo.qux,1.2.3.4"), + }), + envVars: map[string]string{ + "NO_PROXY": "http://foo.bar", + }, + calledURL: "http://foo.qux", + }, + "shall satisfy use http_proxy when no_proxy is also set to identical value": { + config: cty.ObjectVal(map[string]cty.Value{ + "bucket": cty.StringVal("test"), + "key": cty.StringVal("test"), + "http_proxy": cty.StringVal("http://foo.bar"), + "no_proxy": cty.StringVal("http://foo.bar"), + }), + calledURL: "http://qux.quxx", + wantProxyURL: newURL("http://foo.bar"), + }, + "shall satisfy use https_proxy when no_proxy is also set to identical value": { + config: cty.ObjectVal(map[string]cty.Value{ + "bucket": cty.StringVal("test"), + "key": cty.StringVal("test"), + "https_proxy": cty.StringVal("https://foo.bar"), + "no_proxy": cty.StringVal("http://foo.bar"), + }), + calledURL: "https://qux.quxx", + wantErrSubstr: "proxyconnect tcp: dial tcp: lookup foo.bar", + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + for k, v := range tc.envVars { + t.Setenv(k, v) + } + + b := New(encryption.StateEncryptionDisabled()) + + got := b.Configure(populateSchema(t, b.ConfigSchema(), tc.config)) + if got.HasErrors() != (tc.wantErrSubstr != "") { + t.Fatalf("unexpected error: %v", got.Err()) + } + + switch got.HasErrors() { + case true: + if !strings.Contains(got.Err().Error(), tc.wantErrSubstr) { + t.Fatalf("unexpected error: want= %s, got= %s", tc.wantErrSubstr, got.Err().Error()) + } + case false: + gotProxyURL, err := b.(*Backend).awsConfig.HTTPClient.(*awshttp.BuildableClient).GetTransport().Proxy(&http.Request{ + URL: newURL(tc.calledURL), + }) + if err != nil { + t.Fatalf("unexpected err: %v", err) + } + + if !reflect.DeepEqual(gotProxyURL, tc.wantProxyURL) { + t.Fatalf("unexpected proxy URL: want= %s, got= %s", tc.wantProxyURL, gotProxyURL) + } + } + }) + } +} + +func TestBackend(t *testing.T) { + testACC(t) + + bucketName := fmt.Sprintf("%s-%x", testBucketPrefix, time.Now().Unix()) + keyName := "testState" + + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "key": keyName, + "encrypt": true, + "region": "us-west-1", + })).(*Backend) + + ctx := context.TODO() + createS3Bucket(ctx, t, b.s3Client, bucketName, b.awsConfig.Region) + defer deleteS3Bucket(ctx, t, b.s3Client, bucketName) + + backend.TestBackendStates(t, b) +} + +func TestBackendLocked(t *testing.T) { + testACC(t) + + bucketName := fmt.Sprintf("%s-%x", testBucketPrefix, time.Now().Unix()) + keyName := "test/state" + + b1 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "key": keyName, + "encrypt": true, + "dynamodb_table": bucketName, + "region": "us-west-1", + })).(*Backend) + + b2 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "key": keyName, + "encrypt": true, + "dynamodb_table": bucketName, + "region": "us-west-1", + })).(*Backend) + + ctx := context.TODO() + createS3Bucket(ctx, t, b1.s3Client, bucketName, b1.awsConfig.Region) + defer deleteS3Bucket(ctx, t, b1.s3Client, bucketName) + createDynamoDBTable(ctx, t, b1.dynClient, bucketName) + defer deleteDynamoDBTable(ctx, t, b1.dynClient, bucketName) + + backend.TestBackendStateLocks(t, b1, b2) + backend.TestBackendStateForceUnlock(t, b1, b2) +} + +func TestBackendSSECustomerKeyConfig(t *testing.T) { + testACC(t) + + testCases := map[string]struct { + customerKey string + expectedErr string + }{ + "invalid length": { + customerKey: "test", + expectedErr: `sse_customer_key must be 44 characters in length`, + }, + "invalid encoding": { + customerKey: "====CT70aTYB2JGff7AjQtwbiLkwH4npICay1PWtmdka", + expectedErr: `sse_customer_key must be base64 encoded`, + }, + "valid": { + customerKey: "4Dm1n4rphuFgawxuzY/bEfvLf6rYK0gIjfaDSLlfXNk=", + }, + } + + for name, testCase := range testCases { + testCase := testCase + + t.Run(name, func(t *testing.T) { + bucketName := fmt.Sprintf("%s-%x", testBucketPrefix, time.Now().Unix()) + config := map[string]interface{}{ + "bucket": bucketName, + "encrypt": true, + "key": "test-SSE-C", + "sse_customer_key": testCase.customerKey, + "region": "us-west-1", + } + + b := New(encryption.StateEncryptionDisabled()).(*Backend) + diags := b.Configure(populateSchema(t, b.ConfigSchema(), hcl2shim.HCL2ValueFromConfigValue(config))) + + if testCase.expectedErr != "" { + if diags.Err() != nil { + actualErr := diags.Err().Error() + if !strings.Contains(actualErr, testCase.expectedErr) { + t.Fatalf("unexpected validation result: %v", diags.Err()) + } + } else { + t.Fatal("expected an error, got none") + } + } else { + if diags.Err() != nil { + t.Fatalf("expected no error, got %s", diags.Err()) + } + if string(b.customerEncryptionKey) != string(must(base64.StdEncoding.DecodeString(testCase.customerKey))) { + t.Fatal("unexpected value for customer encryption key") + } + + ctx := context.TODO() + createS3Bucket(ctx, t, b.s3Client, bucketName, b.awsConfig.Region) + defer deleteS3Bucket(ctx, t, b.s3Client, bucketName) + + backend.TestBackendStates(t, b) + } + }) + } +} + +func TestBackendSSECustomerKeyEnvVar(t *testing.T) { + testACC(t) + + testCases := map[string]struct { + customerKey string + expectedErr string + }{ + "invalid length": { + customerKey: "test", + expectedErr: `The environment variable "AWS_SSE_CUSTOMER_KEY" must be 44 characters in length`, + }, + "invalid encoding": { + customerKey: "====CT70aTYB2JGff7AjQtwbiLkwH4npICay1PWtmdka", + expectedErr: `The environment variable "AWS_SSE_CUSTOMER_KEY" must be base64 encoded`, + }, + "valid": { + customerKey: "4Dm1n4rphuFgawxuzY/bEfvLf6rYK0gIjfaDSLlfXNk=", + }, + } + + for name, testCase := range testCases { + testCase := testCase + + t.Run(name, func(t *testing.T) { + bucketName := fmt.Sprintf("%s-%x", testBucketPrefix, time.Now().Unix()) + config := map[string]interface{}{ + "bucket": bucketName, + "encrypt": true, + "key": "test-SSE-C", + "region": "us-west-1", + } + + t.Setenv("AWS_SSE_CUSTOMER_KEY", testCase.customerKey) + + b := New(encryption.StateEncryptionDisabled()).(*Backend) + diags := b.Configure(populateSchema(t, b.ConfigSchema(), hcl2shim.HCL2ValueFromConfigValue(config))) + + if testCase.expectedErr != "" { + if diags.Err() != nil { + actualErr := diags.Err().Error() + if !strings.Contains(actualErr, testCase.expectedErr) { + t.Fatalf("unexpected validation result: %v", diags.Err()) + } + } else { + t.Fatal("expected an error, got none") + } + } else { + if diags.Err() != nil { + t.Fatalf("expected no error, got %s", diags.Err()) + } + if string(b.customerEncryptionKey) != string(must(base64.StdEncoding.DecodeString(testCase.customerKey))) { + t.Fatal("unexpected value for customer encryption key") + } + + ctx := context.TODO() + createS3Bucket(ctx, t, b.s3Client, bucketName, b.awsConfig.Region) + defer deleteS3Bucket(ctx, t, b.s3Client, bucketName) + + backend.TestBackendStates(t, b) + } + }) + } +} + +// add some extra junk in S3 to try and confuse the env listing. +func TestBackendExtraPaths(t *testing.T) { + testACC(t) + bucketName := fmt.Sprintf("%s-%x", testBucketPrefix, time.Now().Unix()) + keyName := "test/state/tfstate" + + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "key": keyName, + "encrypt": true, + })).(*Backend) + + ctx := context.TODO() + createS3Bucket(ctx, t, b.s3Client, bucketName, b.awsConfig.Region) + defer deleteS3Bucket(ctx, t, b.s3Client, bucketName) + + // put multiple states in old env paths. + s1 := states.NewState() + s2 := states.NewState() + + // RemoteClient to Put things in various paths + client := &RemoteClient{ + s3Client: b.s3Client, + dynClient: b.dynClient, + bucketName: b.bucketName, + path: b.path("s1"), + serverSideEncryption: b.serverSideEncryption, + acl: b.acl, + kmsKeyID: b.kmsKeyID, + ddbTable: b.ddbTable, + } + + // Write the first state + stateMgr := &remote.State{Client: client} + if err := stateMgr.WriteState(s1); err != nil { + t.Fatal(err) + } + if err := stateMgr.PersistState(nil); err != nil { + t.Fatal(err) + } + + // Write the second state + // Note a new state manager - otherwise, because these + // states are equal, the state will not Put to the remote + client.path = b.path("s2") + stateMgr2 := &remote.State{Client: client} + if err := stateMgr2.WriteState(s2); err != nil { + t.Fatal(err) + } + if err := stateMgr2.PersistState(nil); err != nil { + t.Fatal(err) + } + + s2Lineage := stateMgr2.StateSnapshotMeta().Lineage + + if err := checkStateList(b, []string{"default", "s1", "s2"}); err != nil { + t.Fatal(err) + } + + // put a state in an env directory name + client.path = b.workspaceKeyPrefix + "/error" + if err := stateMgr.WriteState(states.NewState()); err != nil { + t.Fatal(err) + } + if err := stateMgr.PersistState(nil); err != nil { + t.Fatal(err) + } + if err := checkStateList(b, []string{"default", "s1", "s2"}); err != nil { + t.Fatal(err) + } + + // add state with the wrong key for an existing env + client.path = b.workspaceKeyPrefix + "/s2/notTestState" + if err := stateMgr.WriteState(states.NewState()); err != nil { + t.Fatal(err) + } + if err := stateMgr.PersistState(nil); err != nil { + t.Fatal(err) + } + if err := checkStateList(b, []string{"default", "s1", "s2"}); err != nil { + t.Fatal(err) + } + + // remove the state with extra subkey + if err := client.Delete(); err != nil { + t.Fatal(err) + } + + // delete the real workspace + if err := b.DeleteWorkspace("s2", true); err != nil { + t.Fatal(err) + } + + if err := checkStateList(b, []string{"default", "s1"}); err != nil { + t.Fatal(err) + } + + // fetch that state again, which should produce a new lineage + s2Mgr, err := b.StateMgr("s2") + if err != nil { + t.Fatal(err) + } + if err := s2Mgr.RefreshState(); err != nil { + t.Fatal(err) + } + + if s2Mgr.(*remote.State).StateSnapshotMeta().Lineage == s2Lineage { + t.Fatal("state s2 was not deleted") + } + _ = s2Mgr.State() // We need the side-effect + s2Lineage = stateMgr.StateSnapshotMeta().Lineage + + // add a state with a key that matches an existing environment dir name + client.path = b.workspaceKeyPrefix + "/s2/" + if err := stateMgr.WriteState(states.NewState()); err != nil { + t.Fatal(err) + } + if err := stateMgr.PersistState(nil); err != nil { + t.Fatal(err) + } + + // make sure s2 is OK + s2Mgr, err = b.StateMgr("s2") + if err != nil { + t.Fatal(err) + } + if err := s2Mgr.RefreshState(); err != nil { + t.Fatal(err) + } + + if stateMgr.StateSnapshotMeta().Lineage != s2Lineage { + t.Fatal("we got the wrong state for s2") + } + + if err := checkStateList(b, []string{"default", "s1", "s2"}); err != nil { + t.Fatal(err) + } +} + +// ensure we can separate the workspace prefix when it also matches the prefix +// of the workspace name itself. +func TestBackendPrefixInWorkspace(t *testing.T) { + testACC(t) + bucketName := fmt.Sprintf("%s-%x", testBucketPrefix, time.Now().Unix()) + + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "key": "test-env.tfstate", + "workspace_key_prefix": "env", + })).(*Backend) + + ctx := context.TODO() + createS3Bucket(ctx, t, b.s3Client, bucketName, b.awsConfig.Region) + defer deleteS3Bucket(ctx, t, b.s3Client, bucketName) + + // get a state that contains the prefix as a substring + sMgr, err := b.StateMgr("env-1") + if err != nil { + t.Fatal(err) + } + if err := sMgr.RefreshState(); err != nil { + t.Fatal(err) + } + + if err := checkStateList(b, []string{"default", "env-1"}); err != nil { + t.Fatal(err) + } +} + +func TestKeyEnv(t *testing.T) { + testACC(t) + keyName := "some/paths/tfstate" + + bucket0Name := fmt.Sprintf("%s-%x-0", testBucketPrefix, time.Now().Unix()) + b0 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucket0Name, + "key": keyName, + "encrypt": true, + "workspace_key_prefix": "", + })).(*Backend) + + ctx := context.TODO() + createS3Bucket(ctx, t, b0.s3Client, bucket0Name, b0.awsConfig.Region) + defer deleteS3Bucket(ctx, t, b0.s3Client, bucket0Name) + + bucket1Name := fmt.Sprintf("%s-%x-1", testBucketPrefix, time.Now().Unix()) + b1 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucket1Name, + "key": keyName, + "encrypt": true, + "workspace_key_prefix": "project/env:", + })).(*Backend) + + createS3Bucket(ctx, t, b1.s3Client, bucket1Name, b1.awsConfig.Region) + defer deleteS3Bucket(ctx, t, b1.s3Client, bucket1Name) + + bucket2Name := fmt.Sprintf("%s-%x-2", testBucketPrefix, time.Now().Unix()) + b2 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucket2Name, + "key": keyName, + "encrypt": true, + })).(*Backend) + + createS3Bucket(ctx, t, b2.s3Client, bucket2Name, b2.awsConfig.Region) + defer deleteS3Bucket(ctx, t, b2.s3Client, bucket2Name) + + if err := testGetWorkspaceForKey(b0, "some/paths/tfstate", ""); err != nil { + t.Fatal(err) + } + + if err := testGetWorkspaceForKey(b0, "ws1/some/paths/tfstate", "ws1"); err != nil { + t.Fatal(err) + } + + if err := testGetWorkspaceForKey(b1, "project/env:/ws1/some/paths/tfstate", "ws1"); err != nil { + t.Fatal(err) + } + + if err := testGetWorkspaceForKey(b1, "project/env:/ws2/some/paths/tfstate", "ws2"); err != nil { + t.Fatal(err) + } + + if err := testGetWorkspaceForKey(b2, "env:/ws3/some/paths/tfstate", "ws3"); err != nil { + t.Fatal(err) + } + + backend.TestBackendStates(t, b0) + backend.TestBackendStates(t, b1) + backend.TestBackendStates(t, b2) +} + +func Test_pathString(t *testing.T) { + tests := []struct { + name string + path cty.Path + expected string + }{ + { + name: "Simple Path", + path: cty.Path{cty.GetAttrStep{Name: "attr"}}, + expected: "attr", + }, + { + name: "Nested Path", + path: cty.Path{ + cty.GetAttrStep{Name: "parent"}, + cty.GetAttrStep{Name: "child"}, + }, + expected: "parent.child", + }, + { + name: "Indexed Path", + path: cty.Path{ + cty.GetAttrStep{Name: "array"}, + cty.IndexStep{Key: cty.NumberIntVal(0)}, + }, + expected: "array[0]", + }, + { + name: "Mixed Path", + path: cty.Path{ + cty.GetAttrStep{Name: "parent"}, + cty.IndexStep{Key: cty.StringVal("key")}, + cty.GetAttrStep{Name: "child"}, + }, + expected: "parent[key].child", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + result := pathString(test.path) + if result != test.expected { + t.Errorf("Expected: %s, Got: %s", test.expected, result) + } + }) + } +} + +func TestBackend_includeProtoIfNessesary(t *testing.T) { + tests := []struct { + name string + provided string + expected string + }{ + { + name: "Unmodified S3", + provided: "https://s3.us-east-1.amazonaws.com", + expected: "https://s3.us-east-1.amazonaws.com", + }, + { + name: "Modified S3", + provided: "s3.us-east-1.amazonaws.com", + expected: "https://s3.us-east-1.amazonaws.com", + }, + { + name: "Unmodified With Port", + provided: "http://localhost:9000/", + expected: "http://localhost:9000/", + }, + { + name: "Modified With Port", + provided: "localhost:9000/", + expected: "https://localhost:9000/", + }, + { + name: "Umodified with strange proto", + provided: "ftp://localhost:9000/", + expected: "ftp://localhost:9000/", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + result := includeProtoIfNessesary(test.provided) + if result != test.expected { + t.Errorf("Expected: %s, Got: %s", test.expected, result) + } + }) + } +} + +func TestBackend_schemaCoercionMinimal(t *testing.T) { + example := cty.ObjectVal(map[string]cty.Value{ + "bucket": cty.StringVal("my-bucket"), + "key": cty.StringVal("state.tf"), + }) + schema := New(encryption.StateEncryptionDisabled()).ConfigSchema() + _, err := schema.CoerceValue(example) + if err != nil { + t.Errorf("Unexpected error: %s", err.Error()) + } +} + +func testGetWorkspaceForKey(b *Backend, key string, expected string) error { + if actual := b.keyEnv(key); actual != expected { + return fmt.Errorf("incorrect workspace for key[%q]. Expected[%q]: Actual[%q]", key, expected, actual) + } + return nil +} + +func checkStateList(b backend.Backend, expected []string) error { + states, err := b.Workspaces() + if err != nil { + return err + } + + if !reflect.DeepEqual(states, expected) { + return fmt.Errorf("incorrect states listed: %q", states) + } + return nil +} + +func createS3Bucket(ctx context.Context, t *testing.T, s3Client *s3.Client, bucketName, region string) { + createBucketReq := &s3.CreateBucketInput{ + Bucket: &bucketName, + } + + // Regions outside of us-east-1 require the appropriate LocationConstraint + // to be specified in order to create the bucket in the desired region. + // https://docs.aws.amazon.com/cli/latest/reference/s3api/create-bucket.html + if region != "us-east-1" { + createBucketReq.CreateBucketConfiguration = &types.CreateBucketConfiguration{ + LocationConstraint: types.BucketLocationConstraint(region), + } + } + + // Be clear about what we're doing in case the user needs to clean + // this up later. + t.Logf("creating S3 bucket %s in %s", bucketName, region) + _, err := s3Client.CreateBucket(ctx, createBucketReq) + if err != nil { + t.Fatal("failed to create test S3 bucket:", err) + } +} + +func deleteS3Bucket(ctx context.Context, t *testing.T, s3Client *s3.Client, bucketName string) { + warning := "WARNING: Failed to delete the test S3 bucket. It may have been left in your AWS account and may incur storage charges. (error was %s)" + + // first we have to get rid of the env objects, or we can't delete the bucket + resp, err := s3Client.ListObjects(ctx, &s3.ListObjectsInput{Bucket: &bucketName}) + if err != nil { + t.Logf(warning, err) + return + } + for _, obj := range resp.Contents { + if _, err := s3Client.DeleteObject(ctx, &s3.DeleteObjectInput{Bucket: &bucketName, Key: obj.Key}); err != nil { + // this will need cleanup no matter what, so just warn and exit + t.Logf(warning, err) + return + } + } + + if _, err := s3Client.DeleteBucket(ctx, &s3.DeleteBucketInput{Bucket: &bucketName}); err != nil { + t.Logf(warning, err) + } +} + +// create the dynamoDB table, and wait until we can query it. +func createDynamoDBTable(ctx context.Context, t *testing.T, dynClient *dynamodb.Client, tableName string) { + createInput := &dynamodb.CreateTableInput{ + AttributeDefinitions: []dtypes.AttributeDefinition{ + { + AttributeName: aws.String("LockID"), + AttributeType: dtypes.ScalarAttributeTypeS, + }, + }, + KeySchema: []dtypes.KeySchemaElement{ + { + AttributeName: aws.String("LockID"), + KeyType: dtypes.KeyTypeHash, + }, + }, + ProvisionedThroughput: &dtypes.ProvisionedThroughput{ + ReadCapacityUnits: aws.Int64(5), + WriteCapacityUnits: aws.Int64(5), + }, + TableName: aws.String(tableName), + } + + _, err := dynClient.CreateTable(ctx, createInput) + if err != nil { + t.Fatal(err) + } + + // now wait until it's ACTIVE + start := time.Now() + time.Sleep(time.Second) + + describeInput := &dynamodb.DescribeTableInput{ + TableName: aws.String(tableName), + } + + for { + resp, err := dynClient.DescribeTable(ctx, describeInput) + if err != nil { + t.Fatal(err) + } + + if resp.Table.TableStatus == dtypes.TableStatusActive { + return + } + + if time.Since(start) > time.Minute { + t.Fatalf("timed out creating DynamoDB table %s", tableName) + } + + time.Sleep(3 * time.Second) + } + +} + +func deleteDynamoDBTable(ctx context.Context, t *testing.T, dynClient *dynamodb.Client, tableName string) { + params := &dynamodb.DeleteTableInput{ + TableName: aws.String(tableName), + } + _, err := dynClient.DeleteTable(ctx, params) + if err != nil { + t.Logf("WARNING: Failed to delete the test DynamoDB table %q. It has been left in your AWS account and may incur charges. (error was %s)", tableName, err) + } +} + +func populateSchema(t *testing.T, schema *configschema.Block, value cty.Value) cty.Value { + ty := schema.ImpliedType() + var path cty.Path + val, err := unmarshal(value, ty, path) + if err != nil { + t.Fatalf("populating schema: %s", err) + } + return val +} + +func unmarshal(value cty.Value, ty cty.Type, path cty.Path) (cty.Value, error) { + switch { + case ty.IsPrimitiveType(): + return value, nil + // case ty.IsListType(): + // return unmarshalList(value, ty.ElementType(), path) + case ty.IsSetType(): + return unmarshalSet(value, ty.ElementType(), path) + case ty.IsMapType(): + return unmarshalMap(value, ty.ElementType(), path) + // case ty.IsTupleType(): + // return unmarshalTuple(value, ty.TupleElementTypes(), path) + case ty.IsObjectType(): + return unmarshalObject(value, ty.AttributeTypes(), path) + default: + return cty.NilVal, path.NewErrorf("unsupported type %s", ty.FriendlyName()) + } +} + +func unmarshalSet(dec cty.Value, ety cty.Type, path cty.Path) (cty.Value, error) { + if dec.IsNull() { + return dec, nil + } + + length := dec.LengthInt() + + if length == 0 { + return cty.SetValEmpty(ety), nil + } + + vals := make([]cty.Value, 0, length) + dec.ForEachElement(func(key, val cty.Value) (stop bool) { + vals = append(vals, val) + return + }) + + return cty.SetVal(vals), nil +} + +func unmarshalMap(dec cty.Value, ety cty.Type, path cty.Path) (cty.Value, error) { + if dec.IsNull() { + return dec, nil + } + + length := dec.LengthInt() + + if length == 0 { + return cty.MapValEmpty(ety), nil + } + + vals := make(map[string]cty.Value, length) + dec.ForEachElement(func(key, val cty.Value) (stop bool) { + k := stringValue(key) + vals[k] = val + return + }) + + return cty.MapVal(vals), nil +} + +func unmarshalObject(dec cty.Value, atys map[string]cty.Type, path cty.Path) (cty.Value, error) { + if dec.IsNull() { + return dec, nil + } + valueTy := dec.Type() + + vals := make(map[string]cty.Value, len(atys)) + path = append(path, nil) + for key, aty := range atys { + path[len(path)-1] = cty.IndexStep{ + Key: cty.StringVal(key), + } + + if !valueTy.HasAttribute(key) { + vals[key] = cty.NullVal(aty) + } else { + val, err := unmarshal(dec.GetAttr(key), aty, path) + if err != nil { + return cty.DynamicVal, err + } + vals[key] = val + } + } + + return cty.ObjectVal(vals), nil +} + +func must[T any](v T, err error) T { + if err != nil { + panic(err) + } else { + return v + } +} diff --git a/pkg/backend/remote-state/s3/client.go b/pkg/backend/remote-state/s3/client.go new file mode 100644 index 00000000000..163a4efa4e7 --- /dev/null +++ b/pkg/backend/remote-state/s3/client.go @@ -0,0 +1,488 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package s3 + +import ( + "bytes" + "context" + "crypto/md5" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + dtypes "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + "github.com/aws/aws-sdk-go-v2/service/s3" + types "github.com/aws/aws-sdk-go-v2/service/s3/types" + multierror "github.com/hashicorp/go-multierror" + uuid "github.com/hashicorp/go-uuid" + + "github.com/kubegems/opentofu/pkg/states/remote" + "github.com/kubegems/opentofu/pkg/states/statemgr" +) + +// Store the last saved serial in dynamo with this suffix for consistency checks. +const ( + s3EncryptionAlgorithm = "AES256" + stateIDSuffix = "-md5" + s3ErrCodeInternalError = "InternalError" +) + +type RemoteClient struct { + s3Client *s3.Client + dynClient *dynamodb.Client + bucketName string + path string + serverSideEncryption bool + customerEncryptionKey []byte + acl string + kmsKeyID string + ddbTable string + + skipS3Checksum bool +} + +var ( + // The amount of time we will retry a state waiting for it to match the + // expected checksum. + consistencyRetryTimeout = 10 * time.Second + + // delay when polling the state + consistencyRetryPollInterval = 2 * time.Second +) + +// test hook called when checksums don't match +var testChecksumHook func() + +func (c *RemoteClient) Get() (payload *remote.Payload, err error) { + ctx := context.TODO() + deadline := time.Now().Add(consistencyRetryTimeout) + + // If we have a checksum, and the returned payload doesn't match, we retry + // up until deadline. + for { + payload, err = c.get(ctx) + if err != nil { + return nil, err + } + + // If the remote state was manually removed the payload will be nil, + // but if there's still a digest entry for that state we will still try + // to compare the MD5 below. + var digest []byte + if payload != nil { + digest = payload.MD5 + } + + // verify that this state is what we expect + if expected, err := c.getMD5(ctx); err != nil { + log.Printf("[WARN] failed to fetch state md5: %s", err) + } else if len(expected) > 0 && !bytes.Equal(expected, digest) { + log.Printf("[WARN] state md5 mismatch: expected '%x', got '%x'", expected, digest) + + if testChecksumHook != nil { + testChecksumHook() + } + + if time.Now().Before(deadline) { + time.Sleep(consistencyRetryPollInterval) + log.Println("[INFO] retrying S3 RemoteClient.Get...") + continue + } + + return nil, fmt.Errorf(errBadChecksumFmt, digest) + } + + break + } + + return payload, err +} + +func (c *RemoteClient) get(ctx context.Context) (*remote.Payload, error) { + var output *s3.GetObjectOutput + var err error + + ctx, _ = attachLoggerToContext(ctx) + + inputHead := &s3.HeadObjectInput{ + Bucket: &c.bucketName, + Key: &c.path, + } + + if c.serverSideEncryption && c.customerEncryptionKey != nil { + inputHead.SSECustomerKey = aws.String(base64.StdEncoding.EncodeToString(c.customerEncryptionKey)) + inputHead.SSECustomerAlgorithm = aws.String(s3EncryptionAlgorithm) + inputHead.SSECustomerKeyMD5 = aws.String(c.getSSECustomerKeyMD5()) + } + + // Head works around some s3 compatible backends not handling missing GetObject requests correctly (ex: minio Get returns Missing Bucket) + _, err = c.s3Client.HeadObject(ctx, inputHead) + if err != nil { + var nb *types.NoSuchBucket + if errors.As(err, &nb) { + return nil, fmt.Errorf(errS3NoSuchBucket, err) + } + + var nk *types.NotFound + if errors.As(err, &nk) { + return nil, nil + } + + return nil, err + } + + input := &s3.GetObjectInput{ + Bucket: &c.bucketName, + Key: &c.path, + } + + if c.serverSideEncryption && c.customerEncryptionKey != nil { + input.SSECustomerKey = aws.String(base64.StdEncoding.EncodeToString(c.customerEncryptionKey)) + input.SSECustomerAlgorithm = aws.String(s3EncryptionAlgorithm) + input.SSECustomerKeyMD5 = aws.String(c.getSSECustomerKeyMD5()) + } + + output, err = c.s3Client.GetObject(ctx, input) + if err != nil { + var nb *types.NoSuchBucket + if errors.As(err, &nb) { + return nil, fmt.Errorf(errS3NoSuchBucket, err) + } + + var nk *types.NoSuchKey + if errors.As(err, &nk) { + return nil, nil + } + + return nil, err + } + + defer output.Body.Close() + + buf := bytes.NewBuffer(nil) + if _, err := io.Copy(buf, output.Body); err != nil { + return nil, fmt.Errorf("Failed to read remote state: %w", err) + } + + sum := md5.Sum(buf.Bytes()) + payload := &remote.Payload{ + Data: buf.Bytes(), + MD5: sum[:], + } + + // If there was no data, then return nil + if len(payload.Data) == 0 { + return nil, nil + } + + return payload, nil +} + +func (c *RemoteClient) Put(data []byte) error { + contentType := "application/json" + contentLength := int64(len(data)) + + i := &s3.PutObjectInput{ + ContentType: &contentType, + ContentLength: aws.Int64(contentLength), + Body: bytes.NewReader(data), + Bucket: &c.bucketName, + Key: &c.path, + } + + if !c.skipS3Checksum { + i.ChecksumAlgorithm = types.ChecksumAlgorithmSha256 + + // There is a conflict in the aws-go-sdk-v2 that prevents it from working with many s3 compatible services + // Since we can pre-compute the hash here, we can work around it. + // ref: https://github.com/aws/aws-sdk-go-v2/issues/1689 + algo := sha256.New() + algo.Write(data) + sum64str := base64.StdEncoding.EncodeToString(algo.Sum(nil)) + i.ChecksumSHA256 = &sum64str + } + + if c.serverSideEncryption { + if c.kmsKeyID != "" { + i.SSEKMSKeyId = &c.kmsKeyID + i.ServerSideEncryption = types.ServerSideEncryptionAwsKms + } else if c.customerEncryptionKey != nil { + i.SSECustomerKey = aws.String(base64.StdEncoding.EncodeToString(c.customerEncryptionKey)) + i.SSECustomerAlgorithm = aws.String(string(s3EncryptionAlgorithm)) + i.SSECustomerKeyMD5 = aws.String(c.getSSECustomerKeyMD5()) + } else { + i.ServerSideEncryption = s3EncryptionAlgorithm + } + } + + if c.acl != "" { + i.ACL = types.ObjectCannedACL(c.acl) + } + + log.Printf("[DEBUG] Uploading remote state to S3: %#v", i) + + ctx := context.TODO() + ctx, _ = attachLoggerToContext(ctx) + + _, err := c.s3Client.PutObject(ctx, i) + if err != nil { + return fmt.Errorf("failed to upload state: %w", err) + } + + sum := md5.Sum(data) + if err := c.putMD5(ctx, sum[:]); err != nil { + // if this errors out, we unfortunately have to error out altogether, + // since the next Get will inevitably fail. + return fmt.Errorf("failed to store state MD5: %w", err) + + } + + return nil +} + +func (c *RemoteClient) Delete() error { + ctx := context.TODO() + ctx, _ = attachLoggerToContext(ctx) + + _, err := c.s3Client.DeleteObject(ctx, &s3.DeleteObjectInput{ + Bucket: &c.bucketName, + Key: &c.path, + }) + + if err != nil { + return err + } + + if err := c.deleteMD5(ctx); err != nil { + log.Printf("error deleting state md5: %s", err) + } + + return nil +} + +func (c *RemoteClient) Lock(info *statemgr.LockInfo) (string, error) { + if c.ddbTable == "" { + return "", nil + } + + info.Path = c.lockPath() + + if info.ID == "" { + lockID, err := uuid.GenerateUUID() + if err != nil { + return "", err + } + + info.ID = lockID + } + + putParams := &dynamodb.PutItemInput{ + Item: map[string]dtypes.AttributeValue{ + "LockID": &dtypes.AttributeValueMemberS{Value: c.lockPath()}, + "Info": &dtypes.AttributeValueMemberS{Value: string(info.Marshal())}, + }, + TableName: aws.String(c.ddbTable), + ConditionExpression: aws.String("attribute_not_exists(LockID)"), + } + + ctx := context.TODO() + _, err := c.dynClient.PutItem(ctx, putParams) + if err != nil { + lockInfo, infoErr := c.getLockInfo(ctx) + if infoErr != nil { + err = multierror.Append(err, infoErr) + } + + lockErr := &statemgr.LockError{ + Err: err, + Info: lockInfo, + } + return "", lockErr + } + + return info.ID, nil +} + +func (c *RemoteClient) getMD5(ctx context.Context) ([]byte, error) { + if c.ddbTable == "" { + return nil, nil + } + + getParams := &dynamodb.GetItemInput{ + Key: map[string]dtypes.AttributeValue{ + "LockID": &dtypes.AttributeValueMemberS{Value: c.lockPath() + stateIDSuffix}, + }, + ProjectionExpression: aws.String("LockID, Digest"), + TableName: aws.String(c.ddbTable), + ConsistentRead: aws.Bool(true), + } + + resp, err := c.dynClient.GetItem(ctx, getParams) + if err != nil { + return nil, err + } + + var val string + if v, ok := resp.Item["Digest"]; ok { + if v, ok := v.(*dtypes.AttributeValueMemberS); ok { + val = v.Value + } + } + + sum, err := hex.DecodeString(val) + if err != nil || len(sum) != md5.Size { + return nil, errors.New("invalid md5") + } + + return sum, nil +} + +// store the hash of the state so that clients can check for stale state files. +func (c *RemoteClient) putMD5(ctx context.Context, sum []byte) error { + if c.ddbTable == "" { + return nil + } + + if len(sum) != md5.Size { + return errors.New("invalid payload md5") + } + + putParams := &dynamodb.PutItemInput{ + Item: map[string]dtypes.AttributeValue{ + "LockID": &dtypes.AttributeValueMemberS{Value: c.lockPath() + stateIDSuffix}, + "Digest": &dtypes.AttributeValueMemberS{Value: hex.EncodeToString(sum)}, + }, + TableName: aws.String(c.ddbTable), + } + _, err := c.dynClient.PutItem(ctx, putParams) + if err != nil { + log.Printf("[WARN] failed to record state serial in dynamodb: %s", err) + } + + return nil +} + +// remove the hash value for a deleted state +func (c *RemoteClient) deleteMD5(ctx context.Context) error { + if c.ddbTable == "" { + return nil + } + + params := &dynamodb.DeleteItemInput{ + Key: map[string]dtypes.AttributeValue{ + "LockID": &dtypes.AttributeValueMemberS{Value: c.lockPath() + stateIDSuffix}, + }, + TableName: aws.String(c.ddbTable), + } + if _, err := c.dynClient.DeleteItem(ctx, params); err != nil { + return err + } + return nil +} + +func (c *RemoteClient) getLockInfo(ctx context.Context) (*statemgr.LockInfo, error) { + getParams := &dynamodb.GetItemInput{ + Key: map[string]dtypes.AttributeValue{ + "LockID": &dtypes.AttributeValueMemberS{Value: c.lockPath()}, + }, + ProjectionExpression: aws.String("LockID, Info"), + TableName: aws.String(c.ddbTable), + ConsistentRead: aws.Bool(true), + } + + resp, err := c.dynClient.GetItem(ctx, getParams) + if err != nil { + return nil, err + } + + var infoData string + if v, ok := resp.Item["Info"]; ok { + if v, ok := v.(*dtypes.AttributeValueMemberS); ok { + infoData = v.Value + } + } + + lockInfo := &statemgr.LockInfo{} + err = json.Unmarshal([]byte(infoData), lockInfo) + if err != nil { + return nil, err + } + + return lockInfo, nil +} + +func (c *RemoteClient) Unlock(id string) error { + if c.ddbTable == "" { + return nil + } + + lockErr := &statemgr.LockError{} + ctx := context.TODO() + + // TODO: store the path and lock ID in separate fields, and have proper + // projection expression only delete the lock if both match, rather than + // checking the ID from the info field first. + lockInfo, err := c.getLockInfo(ctx) + if err != nil { + lockErr.Err = fmt.Errorf("failed to retrieve lock info: %w", err) + return lockErr + } + lockErr.Info = lockInfo + + if lockInfo.ID != id { + lockErr.Err = fmt.Errorf("lock id %q does not match existing lock", id) + return lockErr + } + + params := &dynamodb.DeleteItemInput{ + Key: map[string]dtypes.AttributeValue{ + "LockID": &dtypes.AttributeValueMemberS{Value: c.lockPath()}, + }, + TableName: aws.String(c.ddbTable), + } + _, err = c.dynClient.DeleteItem(ctx, params) + + if err != nil { + lockErr.Err = err + return lockErr + } + return nil +} + +func (c *RemoteClient) lockPath() string { + return fmt.Sprintf("%s/%s", c.bucketName, c.path) +} + +func (c *RemoteClient) getSSECustomerKeyMD5() string { + b := md5.Sum(c.customerEncryptionKey) + return base64.StdEncoding.EncodeToString(b[:]) +} + +const errBadChecksumFmt = `state data in S3 does not have the expected content. + +This may be caused by unusually long delays in S3 processing a previous state +update. Please wait for a minute or two and try again. If this problem +persists, and neither S3 nor DynamoDB are experiencing an outage, you may need +to manually verify the remote state and update the Digest value stored in the +DynamoDB table to the following value: %x +` + +const errS3NoSuchBucket = `S3 bucket does not exist. + +The referenced S3 bucket must have been previously created. If the S3 bucket +was created within the last minute, please wait for a minute or two and try +again. + +Error: %w +` diff --git a/pkg/backend/remote-state/s3/client_test.go b/pkg/backend/remote-state/s3/client_test.go new file mode 100644 index 00000000000..97d96d931c8 --- /dev/null +++ b/pkg/backend/remote-state/s3/client_test.go @@ -0,0 +1,329 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package s3 + +import ( + "bytes" + "context" + "crypto/md5" + "fmt" + "strings" + "testing" + "time" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/states/remote" + "github.com/kubegems/opentofu/pkg/states/statefile" + "github.com/kubegems/opentofu/pkg/states/statemgr" +) + +func TestRemoteClient_impl(t *testing.T) { + var _ remote.Client = new(RemoteClient) + var _ remote.ClientLocker = new(RemoteClient) +} + +func TestRemoteClient(t *testing.T) { + testACC(t) + bucketName := fmt.Sprintf("%s-%x", testBucketPrefix, time.Now().Unix()) + keyName := "testState" + + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "key": keyName, + "encrypt": true, + })).(*Backend) + + ctx := context.TODO() + createS3Bucket(ctx, t, b.s3Client, bucketName, b.awsConfig.Region) + defer deleteS3Bucket(ctx, t, b.s3Client, bucketName) + + state, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + remote.TestClient(t, state.(*remote.State).Client) +} + +func TestRemoteClientLocks(t *testing.T) { + testACC(t) + bucketName := fmt.Sprintf("%s-%x", testBucketPrefix, time.Now().Unix()) + keyName := "testState" + + b1 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "key": keyName, + "encrypt": true, + "dynamodb_table": bucketName, + })).(*Backend) + + b2 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "key": keyName, + "encrypt": true, + "dynamodb_table": bucketName, + })).(*Backend) + + ctx := context.TODO() + createS3Bucket(ctx, t, b1.s3Client, bucketName, b1.awsConfig.Region) + defer deleteS3Bucket(ctx, t, b1.s3Client, bucketName) + createDynamoDBTable(ctx, t, b1.dynClient, bucketName) + defer deleteDynamoDBTable(ctx, t, b1.dynClient, bucketName) + + s1, err := b1.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + s2, err := b2.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + remote.TestRemoteLocks(t, s1.(*remote.State).Client, s2.(*remote.State).Client) +} + +// verify that we can unlock a state with an existing lock +func TestForceUnlock(t *testing.T) { + testACC(t) + bucketName := fmt.Sprintf("%s-force-%x", testBucketPrefix, time.Now().Unix()) + keyName := "testState" + + b1 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "key": keyName, + "encrypt": true, + "dynamodb_table": bucketName, + })).(*Backend) + + b2 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "key": keyName, + "encrypt": true, + "dynamodb_table": bucketName, + })).(*Backend) + + ctx := context.TODO() + createS3Bucket(ctx, t, b1.s3Client, bucketName, b1.awsConfig.Region) + defer deleteS3Bucket(ctx, t, b1.s3Client, bucketName) + createDynamoDBTable(ctx, t, b1.dynClient, bucketName) + defer deleteDynamoDBTable(ctx, t, b1.dynClient, bucketName) + + // first test with default + s1, err := b1.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + info := statemgr.NewLockInfo() + info.Operation = "test" + info.Who = "clientA" + + lockID, err := s1.Lock(info) + if err != nil { + t.Fatal("unable to get initial lock:", err) + } + + // s1 is now locked, get the same state through s2 and unlock it + s2, err := b2.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal("failed to get default state to force unlock:", err) + } + + if err := s2.Unlock(lockID); err != nil { + t.Fatal("failed to force-unlock default state") + } + + // now try the same thing with a named state + // first test with default + s1, err = b1.StateMgr("test") + if err != nil { + t.Fatal(err) + } + + info = statemgr.NewLockInfo() + info.Operation = "test" + info.Who = "clientA" + + lockID, err = s1.Lock(info) + if err != nil { + t.Fatal("unable to get initial lock:", err) + } + + // s1 is now locked, get the same state through s2 and unlock it + s2, err = b2.StateMgr("test") + if err != nil { + t.Fatal("failed to get named state to force unlock:", err) + } + + if err = s2.Unlock(lockID); err != nil { + t.Fatal("failed to force-unlock named state") + } +} + +func TestRemoteClient_clientMD5(t *testing.T) { + testACC(t) + + bucketName := fmt.Sprintf("%s-%x", testBucketPrefix, time.Now().Unix()) + keyName := "testState" + + b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "key": keyName, + "dynamodb_table": bucketName, + })).(*Backend) + + ctx := context.TODO() + createS3Bucket(ctx, t, b.s3Client, bucketName, b.awsConfig.Region) + defer deleteS3Bucket(ctx, t, b.s3Client, bucketName) + createDynamoDBTable(ctx, t, b.dynClient, bucketName) + defer deleteDynamoDBTable(ctx, t, b.dynClient, bucketName) + + s, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + client := s.(*remote.State).Client.(*RemoteClient) + + sum := md5.Sum([]byte("test")) + + if err := client.putMD5(ctx, sum[:]); err != nil { + t.Fatal(err) + } + + getSum, err := client.getMD5(ctx) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(getSum, sum[:]) { + t.Fatalf("getMD5 returned the wrong checksum: expected %x, got %x", sum[:], getSum) + } + + if err := client.deleteMD5(ctx); err != nil { + t.Fatal(err) + } + + if getSum, err := client.getMD5(ctx); err == nil { + t.Fatalf("expected getMD5 error, got none. checksum: %x", getSum) + } +} + +// verify that a client won't return a state with an incorrect checksum. +func TestRemoteClient_stateChecksum(t *testing.T) { + testACC(t) + + bucketName := fmt.Sprintf("%s-%x", testBucketPrefix, time.Now().Unix()) + keyName := "testState" + + b1 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "key": keyName, + "dynamodb_table": bucketName, + })).(*Backend) + + ctx := context.TODO() + createS3Bucket(ctx, t, b1.s3Client, bucketName, b1.awsConfig.Region) + defer deleteS3Bucket(ctx, t, b1.s3Client, bucketName) + createDynamoDBTable(ctx, t, b1.dynClient, bucketName) + defer deleteDynamoDBTable(ctx, t, b1.dynClient, bucketName) + + s1, err := b1.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + client1 := s1.(*remote.State).Client + + // create an old and new state version to persist + s := statemgr.TestFullInitialState() + sf := &statefile.File{State: s} + var oldState bytes.Buffer + if err := statefile.Write(sf, &oldState, encryption.StateEncryptionDisabled()); err != nil { + t.Fatal(err) + } + sf.Serial++ + var newState bytes.Buffer + if err := statefile.Write(sf, &newState, encryption.StateEncryptionDisabled()); err != nil { + t.Fatal(err) + } + + // Use b2 without a dynamodb_table to bypass the lock table to write the state directly. + // client2 will write the "incorrect" state, simulating s3 eventually consistency delays + b2 := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "key": keyName, + })).(*Backend) + s2, err := b2.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + client2 := s2.(*remote.State).Client + + // write the new state through client2 so that there is no checksum yet + if err := client2.Put(newState.Bytes()); err != nil { + t.Fatal(err) + } + + // verify that we can pull a state without a checksum + if _, err := client1.Get(); err != nil { + t.Fatal(err) + } + + // write the new state back with its checksum + if err := client1.Put(newState.Bytes()); err != nil { + t.Fatal(err) + } + + // put an empty state in place to check for panics during get + if err := client2.Put([]byte{}); err != nil { + t.Fatal(err) + } + + // remove the timeouts so we can fail immediately + origTimeout := consistencyRetryTimeout + origInterval := consistencyRetryPollInterval + defer func() { + consistencyRetryTimeout = origTimeout + consistencyRetryPollInterval = origInterval + }() + consistencyRetryTimeout = 0 + consistencyRetryPollInterval = 0 + + // fetching an empty state through client1 should now error out due to a + // mismatched checksum. + if _, err := client1.Get(); !strings.HasPrefix(err.Error(), errBadChecksumFmt[:80]) { + t.Fatalf("expected state checksum error: got %s", err) + } + + // put the old state in place of the new, without updating the checksum + if err := client2.Put(oldState.Bytes()); err != nil { + t.Fatal(err) + } + + // fetching the wrong state through client1 should now error out due to a + // mismatched checksum. + if _, err := client1.Get(); !strings.HasPrefix(err.Error(), errBadChecksumFmt[:80]) { + t.Fatalf("expected state checksum error: got %s", err) + } + + // update the state with the correct one after we Get again + testChecksumHook = func() { + if err := client2.Put(newState.Bytes()); err != nil { + t.Fatal(err) + } + testChecksumHook = nil + } + + consistencyRetryTimeout = origTimeout + + // this final Get will fail to fail the checksum verification, the above + // callback will update the state with the correct version, and Get should + // retry automatically. + if _, err := client1.Get(); err != nil { + t.Fatal(err) + } +} diff --git a/pkg/backend/remote-state/s3/diags.go b/pkg/backend/remote-state/s3/diags.go new file mode 100644 index 00000000000..e24c7a21dc5 --- /dev/null +++ b/pkg/backend/remote-state/s3/diags.go @@ -0,0 +1,52 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package s3 + +import ( + "strings" + + "github.com/hashicorp/aws-sdk-go-base/v2/diag" + + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +func diagnosticString(d tfdiags.Diagnostic) string { + var buffer strings.Builder + buffer.WriteString(d.Severity().String() + ": ") + buffer.WriteString(d.Description().Summary) + if d.Description().Detail != "" { + buffer.WriteString("\n\n") + buffer.WriteString(d.Description().Detail) + } + return buffer.String() +} + +func diagnosticsString(d tfdiags.Diagnostics) string { + l := len(d) + if l == 0 { + return "" + } + + var buffer strings.Builder + for i, v := range d { + buffer.WriteString(diagnosticString(v)) + if i < l-1 { + buffer.WriteString(",\n") + } + } + return buffer.String() +} + +func baseSeverityToTofuSeverity(s diag.Severity) tfdiags.Severity { + switch s { + case diag.SeverityWarning: + return tfdiags.Warning + case diag.SeverityError: + return tfdiags.Error + default: + return -1 + } +} diff --git a/pkg/backend/remote-state/s3/testing_test.go b/pkg/backend/remote-state/s3/testing_test.go new file mode 100644 index 00000000000..79f3bef1307 --- /dev/null +++ b/pkg/backend/remote-state/s3/testing_test.go @@ -0,0 +1,39 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package s3 + +import ( + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// diagnosticComparer is a Comparer function for use with cmp.Diff to compare two tfdiags.Diagnostic values +func diagnosticComparer(l, r tfdiags.Diagnostic) bool { + if l.Severity() != r.Severity() { + return false + } + if l.Description() != r.Description() { + return false + } + + lp := tfdiags.GetAttribute(l) + rp := tfdiags.GetAttribute(r) + if len(lp) != len(rp) { + return false + } + return lp.Equals(rp) +} + +// diagnosticSummaryComparer is a Comparer function for use with cmp.Diff to compare +// the Severity and Summary fields two tfdiags.Diagnostic values +func diagnosticSummaryComparer(l, r tfdiags.Diagnostic) bool { + if l.Severity() != r.Severity() { + return false + } + + ld := l.Description() + rd := r.Description() + return ld.Summary == rd.Summary +} diff --git a/pkg/backend/remote-state/s3/validate.go b/pkg/backend/remote-state/s3/validate.go new file mode 100644 index 00000000000..293ec744abe --- /dev/null +++ b/pkg/backend/remote-state/s3/validate.go @@ -0,0 +1,270 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package s3 + +import ( + "fmt" + "regexp" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws/arn" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +const ( + multiRegionKeyIdPattern = `mrk-[a-f0-9]{32}` + uuidRegexPattern = `[a-f0-9]{8}-[a-f0-9]{4}-[1-5][a-f0-9]{3}-[ab89][a-f0-9]{3}-[a-f0-9]{12}` + aliasRegexPattern = `alias/[a-zA-Z0-9/_-]+` +) + +func validateKMSKey(path cty.Path, s string) (diags tfdiags.Diagnostics) { + if arn.IsARN(s) { + return validateKMSKeyARN(path, s) + } + return validateKMSKeyID(path, s) +} + +func validateKMSKeyID(path cty.Path, s string) (diags tfdiags.Diagnostics) { + keyIdRegex := regexp.MustCompile(`^` + uuidRegexPattern + `|` + multiRegionKeyIdPattern + `|` + aliasRegexPattern + `$`) + if !keyIdRegex.MatchString(s) { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid KMS Key ID", + fmt.Sprintf("Value must be a valid KMS Key ID, got %q", s), + path, + )) + return diags + } + + return diags +} + +func validateKMSKeyARN(path cty.Path, s string) (diags tfdiags.Diagnostics) { + parsedARN, err := arn.Parse(s) + if err != nil { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid KMS Key ARN", + fmt.Sprintf("Value must be a valid KMS Key ARN, got %q", s), + path, + )) + return diags + } + + if !isKeyARN(parsedARN) { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid KMS Key ARN", + fmt.Sprintf("Value must be a valid KMS Key ARN, got %q", s), + path, + )) + return diags + } + + return diags +} + +func validateNestedAssumeRole(obj cty.Value, objPath cty.Path) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + if val, ok := stringAttrOk(obj, "role_arn"); !ok || val == "" { + path := objPath.GetAttr("role_arn") + diags = diags.Append(attributeErrDiag( + "Missing Required Value", + fmt.Sprintf("The attribute %q is required by the backend.\n\n", pathString(path))+ + "Refer to the backend documentation for additional information which attributes are required.", + path, + )) + } + + if val, ok := stringAttrOk(obj, "duration"); ok { + validateDuration(val, 15*time.Minute, 12*time.Hour, objPath.GetAttr("duration"), &diags) + } + + if val, ok := stringAttrOk(obj, "external_id"); ok { + validateNonEmptyString(val, objPath.GetAttr("external_id"), &diags) + } + + if val, ok := stringAttrOk(obj, "policy"); ok { + validateNonEmptyString(val, objPath.GetAttr("policy"), &diags) + } + + if val, ok := stringAttrOk(obj, "session_name"); ok { + validateNonEmptyString(val, objPath.GetAttr("session_name"), &diags) + } + + if val, ok := stringSliceAttrOk(obj, "policy_arns"); ok { + validatePolicyARNSlice(val, objPath.GetAttr("policy_arns"), &diags) + } + + return diags +} + +func validateAssumeRoleWithWebIdentity(obj cty.Value, objPath cty.Path) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + validateAttributesConflict( + cty.GetAttrPath("web_identity_token"), + cty.GetAttrPath("web_identity_token_file"), + )(obj, objPath, &diags) + + if val, ok := stringAttrOk(obj, "session_name"); ok { + validateNonEmptyString(val, objPath.GetAttr("session_name"), &diags) + } + + if val, ok := stringAttrOk(obj, "policy"); ok { + validateNonEmptyString(val, objPath.GetAttr("policy"), &diags) + } + + if val, ok := stringSliceAttrOk(obj, "policy_arns"); ok { + validatePolicyARNSlice(val, objPath.GetAttr("policy_arns"), &diags) + } + + if val, ok := stringAttrOk(obj, "duration"); ok { + validateDuration(val, 15*time.Minute, 12*time.Hour, objPath.GetAttr("duration"), &diags) + } + + return diags +} + +func isKeyARN(arn arn.ARN) bool { + return keyIdFromARNResource(arn.Resource) != "" || aliasIdFromARNResource(arn.Resource) != "" +} + +func keyIdFromARNResource(s string) string { + keyIdResourceRegex := regexp.MustCompile(`^key/(` + uuidRegexPattern + `|` + multiRegionKeyIdPattern + `)$`) + matches := keyIdResourceRegex.FindStringSubmatch(s) + if matches == nil || len(matches) != 2 { + return "" + } + + return matches[1] +} + +func aliasIdFromARNResource(s string) string { + aliasIdResourceRegex := regexp.MustCompile(`^(` + aliasRegexPattern + `)$`) + matches := aliasIdResourceRegex.FindStringSubmatch(s) + if matches == nil || len(matches) != 2 { + return "" + } + + return matches[1] +} + +type objectValidator func(obj cty.Value, objPath cty.Path, diags *tfdiags.Diagnostics) + +func validateAttributesConflict(paths ...cty.Path) objectValidator { + applyPath := func(obj cty.Value, path cty.Path) (cty.Value, error) { + if len(path) == 0 { + return cty.NilVal, nil + } + for _, step := range path { + val, err := step.Apply(obj) + if err != nil { + return cty.NilVal, err + } + if val.IsNull() { + return cty.NilVal, nil + } + obj = val + } + return obj, nil + } + + return func(obj cty.Value, objPath cty.Path, diags *tfdiags.Diagnostics) { + found := false + for _, path := range paths { + val, err := applyPath(obj, path) + if err != nil { + *diags = diags.Append(attributeErrDiag( + "Invalid Path for Schema", + "The S3 Backend unexpectedly provided a path that does not match the schema. "+ + "Please report this to the developers.\n\n"+ + "Path: "+pathString(path)+"\n\n"+ + "Error: "+err.Error(), + objPath, + )) + continue + } + if !val.IsNull() { + if found { + pathStrs := make([]string, len(paths)) + for i, path := range paths { + pathStrs[i] = pathString(path) + } + *diags = diags.Append(attributeErrDiag( + "Invalid Attribute Combination", + fmt.Sprintf(`Only one of %s can be set.`, strings.Join(pathStrs, ", ")), + objPath, + )) + return + } + found = true + } + } + } +} + +func attributeErrDiag(summary, detail string, attrPath cty.Path) tfdiags.Diagnostic { + return tfdiags.AttributeValue(tfdiags.Error, summary, detail, attrPath.Copy()) +} + +func attributeWarningDiag(summary, detail string, attrPath cty.Path) tfdiags.Diagnostic { + return tfdiags.AttributeValue(tfdiags.Warning, summary, detail, attrPath.Copy()) +} + +func validateNonEmptyString(val string, path cty.Path, diags *tfdiags.Diagnostics) { + if len(strings.TrimSpace(val)) == 0 { + *diags = diags.Append(attributeErrDiag( + "Invalid Value", + "The value cannot be empty or all whitespace", + path, + )) + } +} + +func validatePolicyARNSlice(val []string, path cty.Path, diags *tfdiags.Diagnostics) { + for _, v := range val { + arn, err := arn.Parse(v) + if err != nil { + *diags = diags.Append(attributeErrDiag( + "Invalid ARN", + fmt.Sprintf("The value %q cannot be parsed as an ARN: %s", val, err), + path, + )) + break + } else { + if !strings.HasPrefix(arn.Resource, "policy/") { + *diags = diags.Append(attributeErrDiag( + "Invalid IAM Policy ARN", + fmt.Sprintf("Value must be a valid IAM Policy ARN, got %q", val), + path, + )) + } + } + } +} + +func validateDuration(val string, min, max time.Duration, path cty.Path, diags *tfdiags.Diagnostics) { + d, err := time.ParseDuration(val) + if err != nil { + *diags = diags.Append(attributeErrDiag( + "Invalid Duration", + fmt.Sprintf("The value %q cannot be parsed as a duration: %s", val, err), + path, + )) + return + } + if (min > 0 && d < min) || (max > 0 && d > max) { + *diags = diags.Append(attributeErrDiag( + "Invalid Duration", + fmt.Sprintf("Duration must be between %s and %s, had %s", min, max, val), + path, + )) + } +} diff --git a/pkg/backend/remote-state/s3/validate_test.go b/pkg/backend/remote-state/s3/validate_test.go new file mode 100644 index 00000000000..54c2b61aa50 --- /dev/null +++ b/pkg/backend/remote-state/s3/validate_test.go @@ -0,0 +1,376 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package s3 + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +func TestValidateKMSKey(t *testing.T) { + t.Parallel() + + path := cty.Path{cty.GetAttrStep{Name: "field"}} + + testcases := map[string]struct { + in string + expected tfdiags.Diagnostics + }{ + "kms key id": { + in: "57ff7a43-341d-46b6-aee3-a450c9de6dc8", + }, + "kms key arn": { + in: "arn:aws:kms:us-west-2:111122223333:key/57ff7a43-341d-46b6-aee3-a450c9de6dc8", + }, + "kms multi-region key id": { + in: "mrk-f827515944fb43f9b902a09d2c8b554f", + }, + "kms multi-region key arn": { + in: "arn:aws:kms:us-west-2:111122223333:key/mrk-a835af0b39c94b86a21a8fc9535df681", + }, + "kms key alias": { + in: "alias/arbitrary-key", + }, + "kms key alias arn": { + in: "arn:aws:kms:us-west-2:111122223333:alias/arbitrary-key", + }, + "invalid key": { + in: "$%wrongkey", + expected: tfdiags.Diagnostics{ + tfdiags.AttributeValue( + tfdiags.Error, + "Invalid KMS Key ID", + `Value must be a valid KMS Key ID, got "$%wrongkey"`, + path, + ), + }, + }, + "non-kms arn": { + in: "arn:aws:lamda:foo:bar:key/xyz", + expected: tfdiags.Diagnostics{ + tfdiags.AttributeValue( + tfdiags.Error, + "Invalid KMS Key ARN", + `Value must be a valid KMS Key ARN, got "arn:aws:lamda:foo:bar:key/xyz"`, + path, + ), + }, + }, + } + + for name, testcase := range testcases { + testcase := testcase + t.Run(name, func(t *testing.T) { + t.Parallel() + + diags := validateKMSKey(path, testcase.in) + + if diff := cmp.Diff(diags, testcase.expected, cmp.Comparer(diagnosticComparer)); diff != "" { + t.Errorf("unexpected diagnostics difference: %s", diff) + } + }) + } +} + +func TestValidateKeyARN(t *testing.T) { + t.Parallel() + + path := cty.Path{cty.GetAttrStep{Name: "field"}} + + testcases := map[string]struct { + in string + expected tfdiags.Diagnostics + }{ + "kms key id": { + in: "arn:aws:kms:us-west-2:123456789012:key/57ff7a43-341d-46b6-aee3-a450c9de6dc8", + }, + "kms mrk key id": { + in: "arn:aws:kms:us-west-2:111122223333:key/mrk-a835af0b39c94b86a21a8fc9535df681", + }, + "kms non-key id": { + in: "arn:aws:kms:us-west-2:123456789012:something/else", + expected: tfdiags.Diagnostics{ + tfdiags.AttributeValue( + tfdiags.Error, + "Invalid KMS Key ARN", + `Value must be a valid KMS Key ARN, got "arn:aws:kms:us-west-2:123456789012:something/else"`, + path, + ), + }, + }, + "non-kms arn": { + in: "arn:aws:iam::123456789012:user/David", + expected: tfdiags.Diagnostics{ + tfdiags.AttributeValue( + tfdiags.Error, + "Invalid KMS Key ARN", + `Value must be a valid KMS Key ARN, got "arn:aws:iam::123456789012:user/David"`, + path, + ), + }, + }, + "not an arn": { + in: "not an arn", + expected: tfdiags.Diagnostics{ + tfdiags.AttributeValue( + tfdiags.Error, + "Invalid KMS Key ARN", + `Value must be a valid KMS Key ARN, got "not an arn"`, + path, + ), + }, + }, + } + + for name, testcase := range testcases { + testcase := testcase + t.Run(name, func(t *testing.T) { + t.Parallel() + + diags := validateKMSKeyARN(path, testcase.in) + + if diff := cmp.Diff(diags, testcase.expected, cmp.Comparer(diagnosticComparer)); diff != "" { + t.Errorf("unexpected diagnostics difference: %s", diff) + } + }) + } +} + +func Test_validateAttributesConflict(t *testing.T) { + tests := []struct { + name string + paths []cty.Path + objValues map[string]cty.Value + expectErr bool + }{ + { + name: "Conflict Found", + paths: []cty.Path{ + {cty.GetAttrStep{Name: "attr1"}}, + {cty.GetAttrStep{Name: "attr2"}}, + }, + objValues: map[string]cty.Value{ + "attr1": cty.StringVal("value1"), + "attr2": cty.StringVal("value2"), + "attr3": cty.StringVal("value3"), + }, + expectErr: true, + }, + { + name: "No Conflict", + paths: []cty.Path{ + {cty.GetAttrStep{Name: "attr1"}}, + {cty.GetAttrStep{Name: "attr2"}}, + }, + objValues: map[string]cty.Value{ + "attr1": cty.StringVal("value1"), + "attr2": cty.NilVal, + "attr3": cty.StringVal("value3"), + }, + expectErr: false, + }, + { + name: "Nested: Conflict Found", + paths: []cty.Path{ + (cty.Path{cty.GetAttrStep{Name: "nested"}}).GetAttr("attr1"), + {cty.GetAttrStep{Name: "attr2"}}, + }, + objValues: map[string]cty.Value{ + "nested": cty.ObjectVal(map[string]cty.Value{ + "attr1": cty.StringVal("value1"), + }), + "attr2": cty.StringVal("value2"), + "attr3": cty.StringVal("value3"), + }, + expectErr: true, + }, + { + name: "Nested: No Conflict", + paths: []cty.Path{ + (cty.Path{cty.GetAttrStep{Name: "nested"}}).GetAttr("attr1"), + {cty.GetAttrStep{Name: "attr3"}}, + }, + objValues: map[string]cty.Value{ + "nested": cty.NilVal, + "attr1": cty.StringVal("value1"), + "attr3": cty.StringVal("value3"), + }, + expectErr: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var diags tfdiags.Diagnostics + + validator := validateAttributesConflict(test.paths...) + + obj := cty.ObjectVal(test.objValues) + + validator(obj, cty.Path{}, &diags) + + if test.expectErr { + if !diags.HasErrors() { + t.Error("Expected validation errors, but got none.") + } + } else { + if diags.HasErrors() { + t.Errorf("Expected no errors, but got %s.", diags.Err()) + } + } + }) + } +} + +func Test_validateNestedAssumeRole(t *testing.T) { + tests := []struct { + description string + input cty.Value + expectedDiags []string + }{ + { + description: "Valid Input", + input: cty.ObjectVal(map[string]cty.Value{ + "role_arn": cty.StringVal("valid-role-arn"), + "duration": cty.StringVal("30m"), + "external_id": cty.StringVal("valid-external-id"), + "policy": cty.StringVal("valid-policy"), + "session_name": cty.StringVal("valid-session-name"), + "policy_arns": cty.ListVal([]cty.Value{cty.StringVal("arn:aws:iam::123456789012:policy/valid-policy-arn")}), + }), + expectedDiags: nil, + }, + { + description: "Missing Role ARN", + input: cty.ObjectVal(map[string]cty.Value{ + "role_arn": cty.StringVal(""), + "duration": cty.StringVal("30m"), + "external_id": cty.StringVal("valid-external-id"), + "policy": cty.StringVal("valid-policy"), + "session_name": cty.StringVal("valid-session-name"), + "policy_arns": cty.ListVal([]cty.Value{cty.StringVal("arn:aws:iam::123456789012:policy/valid-policy-arn")}), + }), + expectedDiags: []string{ + "The attribute \"assume_role.role_arn\" is required by the backend.\n\nRefer to the backend documentation for additional information which attributes are required.", + }, + }, + { + description: "Invalid Duration", + input: cty.ObjectVal(map[string]cty.Value{ + "role_arn": cty.StringVal("valid-role-arn"), + "duration": cty.StringVal("invalid-duration"), + "external_id": cty.StringVal("valid-external-id"), + "policy": cty.StringVal("valid-policy"), + "session_name": cty.StringVal("valid-session-name"), + "policy_arns": cty.ListVal([]cty.Value{cty.StringVal("arn:aws:iam::123456789012:policy/valid-policy-arn")}), + }), + expectedDiags: []string{ + "The value \"invalid-duration\" cannot be parsed as a duration: time: invalid duration \"invalid-duration\"", + }, + }, + { + description: "Invalid Duration Length", + input: cty.ObjectVal(map[string]cty.Value{ + "role_arn": cty.StringVal("valid-role-arn"), + "duration": cty.StringVal("44h"), + "external_id": cty.StringVal("valid-external-id"), + "policy": cty.StringVal("valid-policy"), + "session_name": cty.StringVal("valid-session-name"), + "policy_arns": cty.ListVal([]cty.Value{cty.StringVal("arn:aws:iam::123456789012:policy/valid-policy-arn")}), + }), + expectedDiags: []string{ + "Duration must be between 15m0s and 12h0m0s, had 44h", + }, + }, + { + description: "Invalid External ID (Empty)", + input: cty.ObjectVal(map[string]cty.Value{ + "role_arn": cty.StringVal("valid-role-arn"), + "duration": cty.StringVal("30m"), + "external_id": cty.StringVal(""), + "policy": cty.StringVal("valid-policy"), + "session_name": cty.StringVal("valid-session-name"), + "policy_arns": cty.ListVal([]cty.Value{cty.StringVal("arn:aws:iam::123456789012:policy/valid-policy-arn")}), + }), + expectedDiags: []string{ + "The value cannot be empty or all whitespace", + }, + }, + { + description: "Invalid Policy (Empty)", + input: cty.ObjectVal(map[string]cty.Value{ + "role_arn": cty.StringVal("valid-role-arn"), + "duration": cty.StringVal("30m"), + "external_id": cty.StringVal("valid-external-id"), + "policy": cty.StringVal(""), + "session_name": cty.StringVal("valid-session-name"), + "policy_arns": cty.ListVal([]cty.Value{cty.StringVal("arn:aws:iam::123456789012:policy/valid-policy-arn")}), + }), + expectedDiags: []string{ + "The value cannot be empty or all whitespace", + }, + }, + { + description: "Invalid Session Name (Empty)", + input: cty.ObjectVal(map[string]cty.Value{ + "role_arn": cty.StringVal("valid-role-arn"), + "duration": cty.StringVal("30m"), + "external_id": cty.StringVal("valid-external-id"), + "policy": cty.StringVal("valid-policy"), + "session_name": cty.StringVal(""), + "policy_arns": cty.ListVal([]cty.Value{cty.StringVal("arn:aws:iam::123456789012:policy/valid-policy-arn")}), + }), + expectedDiags: []string{ + "The value cannot be empty or all whitespace", + }, + }, + { + description: "Invalid Policy ARN (Invalid ARN Format)", + input: cty.ObjectVal(map[string]cty.Value{ + "role_arn": cty.StringVal("valid-role-arn"), + "duration": cty.StringVal("30m"), + "external_id": cty.StringVal("valid-external-id"), + "policy": cty.StringVal("valid-policy"), + "session_name": cty.StringVal("valid-session-name"), + "policy_arns": cty.ListVal([]cty.Value{cty.StringVal("invalid-arn-format")}), + }), + expectedDiags: []string{ + "The value [\"invalid-arn-format\"] cannot be parsed as an ARN: arn: invalid prefix", + }, + }, + { + description: "Invalid Policy ARN (Not Starting with 'policy/')", + input: cty.ObjectVal(map[string]cty.Value{ + "role_arn": cty.StringVal("valid-role-arn"), + "duration": cty.StringVal("30m"), + "external_id": cty.StringVal("valid-external-id"), + "policy": cty.StringVal("valid-policy"), + "session_name": cty.StringVal("valid-session-name"), + "policy_arns": cty.ListVal([]cty.Value{cty.StringVal("arn:aws:iam::123456789012:role/invalid-policy-arn")}), + }), + expectedDiags: []string{ + "Value must be a valid IAM Policy ARN, got [\"arn:aws:iam::123456789012:role/invalid-policy-arn\"]", + }, + }, + } + + for _, test := range tests { + t.Run(test.description, func(t *testing.T) { + diagnostics := validateNestedAssumeRole(test.input, cty.Path{cty.GetAttrStep{Name: "assume_role"}}) + if len(diagnostics) != len(test.expectedDiags) { + t.Errorf("Expected %d diagnostics, but got %d", len(test.expectedDiags), len(diagnostics)) + } + for i, diag := range diagnostics { + if diag.Description().Detail != test.expectedDiags[i] { + t.Errorf("Mismatch in diagnostic %d. Expected: %q, Got: %q", i, test.expectedDiags[i], diag.Description().Detail) + } + } + }) + } +} diff --git a/pkg/backend/remote/backend.go b/pkg/backend/remote/backend.go new file mode 100644 index 00000000000..9f428621ba3 --- /dev/null +++ b/pkg/backend/remote/backend.go @@ -0,0 +1,1112 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package remote + +import ( + "context" + "fmt" + "log" + "net/http" + "net/url" + "os" + "sort" + "strings" + "sync" + "time" + + tfe "github.com/hashicorp/go-tfe" + version "github.com/hashicorp/go-version" + svchost "github.com/hashicorp/terraform-svchost" + "github.com/hashicorp/terraform-svchost/disco" + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/httpclient" + "github.com/kubegems/opentofu/pkg/logging" + "github.com/kubegems/opentofu/pkg/states/remote" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" + tfversion "github.com/kubegems/opentofu/version" + "github.com/mitchellh/cli" + "github.com/mitchellh/colorstring" + "github.com/zclconf/go-cty/cty" + + backendLocal "github.com/kubegems/opentofu/pkg/backend/local" +) + +const ( + defaultParallelism = 10 + stateServiceID = "state.v2" + tfeServiceID = "tfe.v2.1" + genericHostname = "localtofu.com" +) + +// Remote is an implementation of EnhancedBackend that performs all +// operations in a remote backend. +type Remote struct { + // CLI and Colorize control the CLI output. If CLI is nil then no CLI + // output will be done. If CLIColor is nil then no coloring will be done. + CLI cli.Ui + CLIColor *colorstring.Colorize + + // ContextOpts are the base context options to set when initializing a + // new OpenTofu context. Many of these will be overridden or merged by + // Operation. See Operation for more details. + ContextOpts *tofu.ContextOpts + + // client is the remote backend API client. + client *tfe.Client + + // lastRetry is set to the last time a request was retried. + lastRetry time.Time + + // hostname of the remote backend server. + hostname string + + // organization is the organization that contains the target workspaces. + organization string + + // workspace is used to map the default workspace to a remote workspace. + workspace string + + // prefix is used to filter down a set of workspaces that use a single + // configuration. + prefix string + + // services is used for service discovery + services *disco.Disco + + // local, if non-nil, will be used for all enhanced behavior. This + // allows local behavior with the remote backend functioning as remote + // state storage backend. + local backend.Enhanced + + // forceLocal, if true, will force the use of the local backend. + forceLocal bool + + // opLock locks operations + opLock sync.Mutex + + // ignoreVersionConflict, if true, will disable the requirement that the + // local OpenTofu version matches the remote workspace's configured + // version. This will also cause VerifyWorkspaceTerraformVersion to return + // a warning diagnostic instead of an error. + ignoreVersionConflict bool + + encryption encryption.StateEncryption +} + +var _ backend.Backend = (*Remote)(nil) +var _ backend.Enhanced = (*Remote)(nil) +var _ backend.Local = (*Remote)(nil) + +// New creates a new initialized remote backend. +func New(services *disco.Disco, enc encryption.StateEncryption) *Remote { + return &Remote{ + services: services, + encryption: enc, + } +} + +// ConfigSchema implements backend.Enhanced. +func (b *Remote) ConfigSchema() *configschema.Block { + return &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "hostname": { + Type: cty.String, + Optional: true, + Description: schemaDescriptions["hostname"], + }, + "organization": { + Type: cty.String, + Required: true, + Description: schemaDescriptions["organization"], + }, + "token": { + Type: cty.String, + Optional: true, + Description: schemaDescriptions["token"], + }, + }, + + BlockTypes: map[string]*configschema.NestedBlock{ + "workspaces": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "name": { + Type: cty.String, + Optional: true, + Description: schemaDescriptions["name"], + }, + "prefix": { + Type: cty.String, + Optional: true, + Description: schemaDescriptions["prefix"], + }, + }, + }, + Nesting: configschema.NestingSingle, + }, + }, + } +} + +// PrepareConfig implements backend.Backend. +func (b *Remote) PrepareConfig(obj cty.Value) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + if obj.IsNull() { + return obj, diags + } + + if val := obj.GetAttr("organization"); val.IsNull() || val.AsString() == "" { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid organization value", + `The "organization" attribute value must not be empty.`, + cty.Path{cty.GetAttrStep{Name: "organization"}}, + )) + } + + var name, prefix string + if workspaces := obj.GetAttr("workspaces"); !workspaces.IsNull() { + if val := workspaces.GetAttr("name"); !val.IsNull() { + name = val.AsString() + } + if val := workspaces.GetAttr("prefix"); !val.IsNull() { + prefix = val.AsString() + } + } + + // Make sure that we have either a workspace name or a prefix. + if name == "" && prefix == "" { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid workspaces configuration", + `Either workspace "name" or "prefix" is required.`, + cty.Path{cty.GetAttrStep{Name: "workspaces"}}, + )) + } + + // Make sure that only one of workspace name or a prefix is configured. + if name != "" && prefix != "" { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid workspaces configuration", + `Only one of workspace "name" or "prefix" is allowed.`, + cty.Path{cty.GetAttrStep{Name: "workspaces"}}, + )) + } + + return obj, diags +} + +func (b *Remote) ServiceDiscoveryAliases() ([]backend.HostAlias, error) { + aliasHostname, err := svchost.ForComparison(genericHostname) + if err != nil { + // This should never happen because the hostname is statically defined. + return nil, fmt.Errorf("failed to create backend alias from alias %q. The hostname is not in the correct format. This is a bug in the backend", genericHostname) + } + + targetHostname, err := svchost.ForComparison(b.hostname) + if err != nil { + // This should never happen because the 'to' alias is the backend host, which has likely + // already been evaluated as a svchost.Hostname by now + return nil, fmt.Errorf("failed to create backend alias to target %q. The hostname is not in the correct format", b.hostname) + } + + return []backend.HostAlias{ + { + From: aliasHostname, + To: targetHostname, + }, + }, nil +} + +// Configure implements backend.Enhanced. +func (b *Remote) Configure(obj cty.Value) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + if obj.IsNull() { + return diags + } + + // Get the hostname. + if val := obj.GetAttr("hostname"); !val.IsNull() && val.AsString() != "" { + b.hostname = val.AsString() + } else { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Hostname is required for the remote backend", + `OpenTofu does not provide a default "hostname" attribute, so it must be set to the hostname of the remote backend.`, + )) + + return diags + } + + // Get the organization. + if val := obj.GetAttr("organization"); !val.IsNull() { + b.organization = val.AsString() + } + + // Get the workspaces configuration block and retrieve the + // default workspace name and prefix. + if workspaces := obj.GetAttr("workspaces"); !workspaces.IsNull() { + if val := workspaces.GetAttr("name"); !val.IsNull() { + b.workspace = val.AsString() + } + if val := workspaces.GetAttr("prefix"); !val.IsNull() { + b.prefix = val.AsString() + } + } + + // Determine if we are forced to use the local backend. + b.forceLocal = os.Getenv("TF_FORCE_LOCAL_BACKEND") != "" + + serviceID := tfeServiceID + if b.forceLocal { + serviceID = stateServiceID + } + + // Discover the service URL for this host to confirm that it provides + // a remote backend API and to get the version constraints. + service, constraints, err := b.discover(serviceID) + + // First check any constraints we might have received. + if constraints != nil { + diags = diags.Append(b.checkConstraints(constraints)) + if diags.HasErrors() { + return diags + } + } + + // When we don't have any constraints errors, also check for discovery + // errors before we continue. + if err != nil { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + strings.ToUpper(err.Error()[:1])+err.Error()[1:], + "", // no description is needed here, the error is clear + cty.Path{cty.GetAttrStep{Name: "hostname"}}, + )) + return diags + } + + // Get the token from the config. + var token string + if val := obj.GetAttr("token"); !val.IsNull() { + token = val.AsString() + } + + // Retrieve the token for this host as configured in the credentials + // section of the CLI Config File if no token was configured for this + // host in the config. + if token == "" { + token, err = b.token() + if err != nil { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + strings.ToUpper(err.Error()[:1])+err.Error()[1:], + "", // no description is needed here, the error is clear + cty.Path{cty.GetAttrStep{Name: "hostname"}}, + )) + return diags + } + } + + // Return an error if we still don't have a token at this point. + if token == "" { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Required token could not be found", + fmt.Sprintf( + "Run the following command to generate a token for %s:\n %s", + b.hostname, + fmt.Sprintf("tofu login %s", b.hostname), + ), + )) + return diags + } + + cfg := &tfe.Config{ + Address: service.String(), + BasePath: service.Path, + Token: token, + Headers: make(http.Header), + RetryLogHook: b.retryLogHook, + } + + // Set the version header to the current version. + cfg.Headers.Set(tfversion.Header, tfversion.Version) + + // Update user-agent from 'go-tfe' to opentofu + cfg.Headers.Set("User-Agent", httpclient.OpenTofuUserAgent(tfversion.String())) + + // Create the remote backend API client. + b.client, err = tfe.NewClient(cfg) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to create the remote backend client", + fmt.Sprintf( + `The "remote" backend encountered an unexpected error while creating the `+ + `remote backend client: %s.`, err, + ), + )) + return diags + } + + // Check if the organization exists by reading its entitlements. + entitlements, err := b.client.Organizations.ReadEntitlements(context.Background(), b.organization) + if err != nil { + if err == tfe.ErrResourceNotFound { + err = fmt.Errorf("organization %q at host %s not found.\n\n"+ + "Please ensure that the organization and hostname are correct "+ + "and that your API token for %s is valid.", + b.organization, b.hostname, b.hostname) + } + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + fmt.Sprintf("Failed to read organization %q at host %s", b.organization, b.hostname), + fmt.Sprintf("The \"remote\" backend encountered an unexpected error while reading the "+ + "organization settings: %s", err), + cty.Path{cty.GetAttrStep{Name: "organization"}}, + )) + return diags + } + + // Configure a local backend for when we need to run operations locally. + b.local = backendLocal.NewWithBackend(b, b.encryption) + b.forceLocal = b.forceLocal || !entitlements.Operations + + // Enable retries for server errors as the backend is now fully configured. + b.client.RetryServerErrors(true) + + return diags +} + +// discover the remote backend API service URL and version constraints. +func (b *Remote) discover(serviceID string) (*url.URL, *disco.Constraints, error) { + hostname, err := svchost.ForComparison(b.hostname) + if err != nil { + return nil, nil, err + } + + host, err := b.services.Discover(hostname) + if err != nil { + return nil, nil, err + } + + service, err := host.ServiceURL(serviceID) + // Return the error, unless its a disco.ErrVersionNotSupported error. + if _, ok := err.(*disco.ErrVersionNotSupported); !ok && err != nil { + return nil, nil, err + } + + // We purposefully ignore the error and return the previous error, as + // checking for version constraints is considered optional. + constraints, _ := host.VersionConstraints(serviceID, "terraform") + + return service, constraints, err +} + +// checkConstraints checks service version constrains against our own +// version and returns rich and informational diagnostics in case any +// incompatibilities are detected. +func (b *Remote) checkConstraints(c *disco.Constraints) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + if c == nil || c.Minimum == "" || c.Maximum == "" { + return diags + } + + // Generate a parsable constraints string. + excluding := "" + if len(c.Excluding) > 0 { + excluding = fmt.Sprintf(", != %s", strings.Join(c.Excluding, ", != ")) + } + constStr := fmt.Sprintf(">= %s%s, <= %s", c.Minimum, excluding, c.Maximum) + + // Create the constraints to check against. + constraints, err := version.NewConstraint(constStr) + if err != nil { + return diags.Append(checkConstraintsWarning(err)) + } + + // Create the version to check. + v, err := version.NewVersion(tfversion.Version) + if err != nil { + return diags.Append(checkConstraintsWarning(err)) + } + + // Return if we satisfy all constraints. + if constraints.Check(v) { + return diags + } + + // Find out what action (upgrade/downgrade) we should advice. + minimum, err := version.NewVersion(c.Minimum) + if err != nil { + return diags.Append(checkConstraintsWarning(err)) + } + + maximum, err := version.NewVersion(c.Maximum) + if err != nil { + return diags.Append(checkConstraintsWarning(err)) + } + + var excludes []*version.Version + for _, exclude := range c.Excluding { + v, err := version.NewVersion(exclude) + if err != nil { + return diags.Append(checkConstraintsWarning(err)) + } + excludes = append(excludes, v) + } + + // Sort all the excludes. + sort.Sort(version.Collection(excludes)) + + var action, toVersion string + switch { + case minimum.GreaterThan(v): + action = "upgrade" + toVersion = ">= " + minimum.String() + case maximum.LessThan(v): + action = "downgrade" + toVersion = "<= " + maximum.String() + case len(excludes) > 0: + // Get the latest excluded version. + action = "upgrade" + toVersion = "> " + excludes[len(excludes)-1].String() + } + + switch { + case len(excludes) == 1: + excluding = fmt.Sprintf(", excluding version %s", excludes[0].String()) + case len(excludes) > 1: + var vs []string + for _, v := range excludes { + vs = append(vs, v.String()) + } + excluding = fmt.Sprintf(", excluding versions %s", strings.Join(vs, ", ")) + default: + excluding = "" + } + + summary := fmt.Sprintf("Incompatible OpenTofu version v%s", v.String()) + details := fmt.Sprintf( + "The configured remote backend is compatible with OpenTofu "+ + "versions >= %s, <= %s%s.", c.Minimum, c.Maximum, excluding, + ) + + if action != "" && toVersion != "" { + summary = fmt.Sprintf("Please %s OpenTofu to %s", action, toVersion) + details += fmt.Sprintf(" Please %s to a supported version and try again.", action) + } + + // Return the customized and informational error message. + return diags.Append(tfdiags.Sourceless(tfdiags.Error, summary, details)) +} + +// token returns the token for this host as configured in the credentials +// section of the CLI Config File. If no token was configured, an empty +// string will be returned instead. +func (b *Remote) token() (string, error) { + hostname, err := svchost.ForComparison(b.hostname) + if err != nil { + return "", err + } + creds, err := b.services.CredentialsForHost(hostname) + if err != nil { + log.Printf("[WARN] Failed to get credentials for %s: %s (ignoring)", b.hostname, err) + return "", nil + } + if creds != nil { + return creds.Token(), nil + } + return "", nil +} + +// retryLogHook is invoked each time a request is retried allowing the +// backend to log any connection issues to prevent data loss. +func (b *Remote) retryLogHook(attemptNum int, resp *http.Response) { + if b.CLI != nil { + // Ignore the first retry to make sure any delayed output will + // be written to the console before we start logging retries. + // + // The retry logic in the TFE client will retry both rate limited + // requests and server errors, but in the remote backend we only + // care about server errors so we ignore rate limit (429) errors. + if attemptNum == 0 || (resp != nil && resp.StatusCode == 429) { + // Reset the last retry time. + b.lastRetry = time.Now() + return + } + + if attemptNum == 1 { + b.CLI.Output(b.Colorize().Color(strings.TrimSpace(initialRetryError))) + } else { + b.CLI.Output(b.Colorize().Color(strings.TrimSpace( + fmt.Sprintf(repeatedRetryError, time.Since(b.lastRetry).Round(time.Second))))) + } + } +} + +// Workspaces implements backend.Enhanced. +func (b *Remote) Workspaces() ([]string, error) { + if b.prefix == "" { + return nil, backend.ErrWorkspacesNotSupported + } + return b.workspaces() +} + +// workspaces returns a filtered list of remote workspace names. +func (b *Remote) workspaces() ([]string, error) { + options := &tfe.WorkspaceListOptions{} + switch { + case b.workspace != "": + options.Search = b.workspace + case b.prefix != "": + options.Search = b.prefix + } + + // Create a slice to contain all the names. + var names []string + + for { + wl, err := b.client.Workspaces.List(context.Background(), b.organization, options) + if err != nil { + return nil, err + } + + for _, w := range wl.Items { + if b.workspace != "" && w.Name == b.workspace { + names = append(names, backend.DefaultStateName) + continue + } + if b.prefix != "" && strings.HasPrefix(w.Name, b.prefix) { + names = append(names, strings.TrimPrefix(w.Name, b.prefix)) + } + } + + // Exit the loop when we've seen all pages. + if wl.CurrentPage >= wl.TotalPages { + break + } + + // Update the page number to get the next page. + options.PageNumber = wl.NextPage + } + + // Sort the result so we have consistent output. + sort.StringSlice(names).Sort() + + return names, nil +} + +// WorkspaceNamePattern provides an appropriate workspace renaming pattern for backend migration +// purposes (handled outside of this package), based on previous usage of this backend with the +// 'prefix' workspace functionality. As of this writing, see meta_backend.migrate.go +func (b *Remote) WorkspaceNamePattern() string { + if b.prefix != "" { + return b.prefix + "*" + } + + return "" +} + +// DeleteWorkspace implements backend.Enhanced. +func (b *Remote) DeleteWorkspace(name string, _ bool) error { + if b.workspace == "" && name == backend.DefaultStateName { + return backend.ErrDefaultWorkspaceNotSupported + } + if b.prefix == "" && name != backend.DefaultStateName { + return backend.ErrWorkspacesNotSupported + } + + // Configure the remote workspace name. + switch { + case name == backend.DefaultStateName: + name = b.workspace + case b.prefix != "" && !strings.HasPrefix(name, b.prefix): + name = b.prefix + name + } + + client := &remoteClient{ + client: b.client, + organization: b.organization, + workspace: &tfe.Workspace{ + Name: name, + }, + encryption: b.encryption, + } + + return client.Delete() +} + +// StateMgr implements backend.Enhanced. +func (b *Remote) StateMgr(name string) (statemgr.Full, error) { + if b.workspace == "" && name == backend.DefaultStateName { + return nil, backend.ErrDefaultWorkspaceNotSupported + } + if b.prefix == "" && name != backend.DefaultStateName { + return nil, backend.ErrWorkspacesNotSupported + } + + // Configure the remote workspace name. + switch { + case name == backend.DefaultStateName: + name = b.workspace + case b.prefix != "" && !strings.HasPrefix(name, b.prefix): + name = b.prefix + name + } + + workspace, err := b.client.Workspaces.Read(context.Background(), b.organization, name) + if err != nil && err != tfe.ErrResourceNotFound { + return nil, fmt.Errorf("Failed to retrieve workspace %s: %w", name, err) + } + + if err == tfe.ErrResourceNotFound { + options := tfe.WorkspaceCreateOptions{ + Name: tfe.String(name), + } + + // We only set the OpenTofu Version for the new workspace if this is + // a release candidate or a final release. + if tfversion.Prerelease == "" || strings.HasPrefix(tfversion.Prerelease, "rc") { + options.TerraformVersion = tfe.String(tfversion.String()) + } + + workspace, err = b.client.Workspaces.Create(context.Background(), b.organization, options) + if err != nil { + return nil, fmt.Errorf("Error creating workspace %s: %w", name, err) + } + } + + // This is a fallback error check. Most code paths should use other + // mechanisms to check the version, then set the ignoreVersionConflict + // field to true. This check is only in place to ensure that we don't + // accidentally upgrade state with a new code path, and the version check + // logic is coarser and simpler. + if !b.ignoreVersionConflict { + wsv := workspace.TerraformVersion + // Explicitly ignore the pseudo-version "latest" here, as it will cause + // plan and apply to always fail. + if wsv != tfversion.String() && wsv != "latest" { + return nil, fmt.Errorf("Remote workspace OpenTofu version %q does not match local OpenTofu version %q", workspace.TerraformVersion, tfversion.String()) + } + } + + client := &remoteClient{ + client: b.client, + organization: b.organization, + workspace: workspace, + + // This is optionally set during OpenTofu Enterprise runs. + runID: os.Getenv("TFE_RUN_ID"), + + encryption: b.encryption, + } + + state := remote.NewState(client, b.encryption) + if client.runID != "" { + // client.runID will be set if we're running a Terraform Cloud + // or Terraform Enterprise remote execution environment, in which + // case we'll disable intermediate snapshots to avoid extra storage + // costs for Terraform Enterprise customers. + // Other implementations of the remote state protocol should not run + // in contexts where there's a "TFE Run ID" and so are not affected + // by this special case. + state.DisableIntermediateSnapshots() + } + return state, nil +} + +func isLocalExecutionMode(execMode string) bool { + return execMode == "local" +} + +func (b *Remote) fetchWorkspace(ctx context.Context, organization string, name string) (*tfe.Workspace, error) { + remoteWorkspaceName := b.getRemoteWorkspaceName(name) + // Retrieve the workspace for this operation. + w, err := b.client.Workspaces.Read(ctx, b.organization, remoteWorkspaceName) + if err != nil { + switch err { + case context.Canceled: + return nil, err + case tfe.ErrResourceNotFound: + return nil, fmt.Errorf( + "workspace %s not found\n\n"+ + "The configured \"remote\" backend returns '404 Not Found' errors for resources\n"+ + "that do not exist, as well as for resources that a user doesn't have access\n"+ + "to. If the resource does exist, please check the rights for the used token", + name, + ) + default: + err := fmt.Errorf( + "the configured \"remote\" backend encountered an unexpected error:\n\n%w", + err, + ) + return nil, err + } + } + + return w, nil +} + +// Operation implements backend.Enhanced. +func (b *Remote) Operation(ctx context.Context, op *backend.Operation) (*backend.RunningOperation, error) { + w, err := b.fetchWorkspace(ctx, b.organization, op.Workspace) + + if err != nil { + return nil, err + } + + // OpenTofu remote version conflicts are not a concern for operations. We + // are in one of three states: + // + // - Running remotely, in which case the local version is irrelevant; + // - Workspace configured for local operations, in which case the remote + // version is meaningless; + // - Forcing local operations with a remote backend, which should only + // happen in the Terraform Cloud worker, in which case the OpenTofu + // versions by definition match. + b.IgnoreVersionConflict() + + // Check if we need to use the local backend to run the operation. + if b.forceLocal || isLocalExecutionMode(w.ExecutionMode) { + // Record that we're forced to run operations locally to allow the + // command package UI to operate correctly + b.forceLocal = true + log.Printf("[DEBUG] Remote backend is delegating %s to the local backend", op.Type) + return b.local.Operation(ctx, op) + } + + // Set the remote workspace name. + op.Workspace = w.Name + + // Determine the function to call for our operation + var f func(context.Context, context.Context, *backend.Operation, *tfe.Workspace) (*tfe.Run, error) + switch op.Type { + case backend.OperationTypePlan: + f = b.opPlan + case backend.OperationTypeApply: + f = b.opApply + case backend.OperationTypeRefresh: + return nil, fmt.Errorf( + "\n\nThe \"refresh\" operation is not supported when using the \"remote\" backend. " + + "Use \"tofu apply -refresh-only\" instead.") + default: + return nil, fmt.Errorf( + "\n\nThe \"remote\" backend does not support the %q operation.", op.Type) + } + + // Lock + b.opLock.Lock() + + // Build our running operation + // the runninCtx is only used to block until the operation returns. + runningCtx, done := context.WithCancel(context.Background()) + runningOp := &backend.RunningOperation{ + Context: runningCtx, + PlanEmpty: true, + } + + // stopCtx wraps the context passed in, and is used to signal a graceful Stop. + stopCtx, stop := context.WithCancel(ctx) + runningOp.Stop = stop + + // cancelCtx is used to cancel the operation immediately, usually + // indicating that the process is exiting. + cancelCtx, cancel := context.WithCancel(context.Background()) + runningOp.Cancel = cancel + + panicHandler := logging.PanicHandlerWithTraceFn() + + // Do it. + go func() { + defer panicHandler() + defer done() + defer stop() + defer cancel() + + defer b.opLock.Unlock() + + r, opErr := f(stopCtx, cancelCtx, op, w) + if opErr != nil && opErr != context.Canceled { + var diags tfdiags.Diagnostics + diags = diags.Append(opErr) + op.ReportResult(runningOp, diags) + return + } + + if r == nil && opErr == context.Canceled { + runningOp.Result = backend.OperationFailure + return + } + + if r != nil { + // Retrieve the run to get its current status. + r, err := b.client.Runs.Read(cancelCtx, r.ID) + if err != nil { + var diags tfdiags.Diagnostics + diags = diags.Append(generalError("Failed to retrieve run", err)) + op.ReportResult(runningOp, diags) + return + } + + // Record if there are any changes. + runningOp.PlanEmpty = !r.HasChanges + + if opErr == context.Canceled { + if err := b.cancel(cancelCtx, op, r); err != nil { + var diags tfdiags.Diagnostics + diags = diags.Append(generalError("Failed to retrieve run", err)) + op.ReportResult(runningOp, diags) + return + } + } + + if r.Status == tfe.RunCanceled || r.Status == tfe.RunErrored { + runningOp.Result = backend.OperationFailure + } + } + }() + + // Return the running operation. + return runningOp, nil +} + +func (b *Remote) cancel(cancelCtx context.Context, op *backend.Operation, r *tfe.Run) error { + if r.Actions.IsCancelable { + // Only ask if the remote operation should be canceled + // if the auto approve flag is not set. + if !op.AutoApprove { + v, err := op.UIIn.Input(cancelCtx, &tofu.InputOpts{ + Id: "cancel", + Query: "\nDo you want to cancel the remote operation?", + Description: "Only 'yes' will be accepted to cancel.", + }) + if err != nil { + return generalError("Failed asking to cancel", err) + } + if v != "yes" { + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color(strings.TrimSpace(operationNotCanceled))) + } + return nil + } + } else { + if b.CLI != nil { + // Insert a blank line to separate the ouputs. + b.CLI.Output("") + } + } + + // Try to cancel the remote operation. + err := b.client.Runs.Cancel(cancelCtx, r.ID, tfe.RunCancelOptions{}) + if err != nil { + return generalError("Failed to cancel run", err) + } + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color(strings.TrimSpace(operationCanceled))) + } + } + + return nil +} + +// IgnoreVersionConflict allows commands to disable the fall-back check that +// the local OpenTofu version matches the remote workspace's configured +// OpenTofu version. This should be called by commands where this check is +// unnecessary, such as those performing remote operations, or read-only +// operations. It will also be called if the user uses a command-line flag to +// override this check. +func (b *Remote) IgnoreVersionConflict() { + b.ignoreVersionConflict = true +} + +// VerifyWorkspaceTerraformVersion compares the local OpenTofu version against +// the workspace's configured OpenTofu version. If they are equal, this means +// that there are no compatibility concerns, so it returns no diagnostics. +// +// If the versions differ, +func (b *Remote) VerifyWorkspaceTerraformVersion(workspaceName string) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + workspace, err := b.getRemoteWorkspace(context.Background(), workspaceName) + if err != nil { + // If the workspace doesn't exist, there can be no compatibility + // problem, so we can return. This is most likely to happen when + // migrating state from a local backend to a new workspace. + if err == tfe.ErrResourceNotFound { + return nil + } + + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Error looking up workspace", + fmt.Sprintf("Workspace read failed: %s", err), + )) + return diags + } + + // If the workspace has the pseudo-version "latest", all bets are off. We + // cannot reasonably determine what the intended OpenTofu version is, so + // we'll skip version verification. + if workspace.TerraformVersion == "latest" { + return nil + } + + // If the workspace has remote operations disabled, the remote OpenTofu + // version is effectively meaningless, so we'll skip version verification. + if isLocalExecutionMode(workspace.ExecutionMode) { + return nil + } + + remoteVersion, err := version.NewSemver(workspace.TerraformVersion) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Error looking up workspace", + fmt.Sprintf("Invalid OpenTofu version: %s", err), + )) + return diags + } + + v014 := version.Must(version.NewSemver("0.14.0")) + if tfversion.SemVer.LessThan(v014) || remoteVersion.LessThan(v014) { + // Versions of OpenTofu prior to 0.14.0 will refuse to load state files + // written by a newer version of OpenTofu, even if it is only a patch + // level difference. As a result we require an exact match. + if tfversion.SemVer.Equal(remoteVersion) { + return diags + } + } + if tfversion.SemVer.GreaterThanOrEqual(v014) && remoteVersion.GreaterThanOrEqual(v014) { + // Versions of OpenTofu after 0.14.0 should be compatible with each + // other. At the time this code was written, the only constraints we + // are aware of are: + // + // - 0.14.0 is guaranteed to be compatible with versions up to but not + // including 1.3.0 + v130 := version.Must(version.NewSemver("1.3.0")) + if tfversion.SemVer.LessThan(v130) && remoteVersion.LessThan(v130) { + return diags + } + // - Any new OpenTofu state version will require at least minor patch + // increment, so x.y.* will always be compatible with each other + tfvs := tfversion.SemVer.Segments64() + rwvs := remoteVersion.Segments64() + if len(tfvs) == 3 && len(rwvs) == 3 && tfvs[0] == rwvs[0] && tfvs[1] == rwvs[1] { + return diags + } + } + + // Even if ignoring version conflicts, it may still be useful to call this + // method and warn the user about a mismatch between the local and remote + // OpenTofu versions. + severity := tfdiags.Error + if b.ignoreVersionConflict { + severity = tfdiags.Warning + } + + suggestion := " If you're sure you want to upgrade the state, you can force OpenTofu to continue using the -ignore-remote-version flag. This may result in an unusable workspace." + if b.ignoreVersionConflict { + suggestion = "" + } + diags = diags.Append(tfdiags.Sourceless( + severity, + "OpenTofu version mismatch", + fmt.Sprintf( + "The local OpenTofu version (%s) does not match the configured version for remote workspace %s/%s (%s).%s", + tfversion.String(), + b.organization, + workspace.Name, + workspace.TerraformVersion, + suggestion, + ), + )) + + return diags +} + +func (b *Remote) IsLocalOperations() bool { + return b.forceLocal +} + +func generalError(msg string, err error) error { + var diags tfdiags.Diagnostics + + if urlErr, ok := err.(*url.Error); ok { + err = urlErr.Err + } + + switch err { + case context.Canceled: + return err + case tfe.ErrResourceNotFound: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + fmt.Sprintf("%s: %v", msg, err), + `The configured "remote" backend returns '404 Not Found' errors for resources `+ + `that do not exist, as well as for resources that a user doesn't have access `+ + `to. If the resource does exist, please check the rights for the used token.`, + )) + return diags.Err() + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + fmt.Sprintf("%s: %v", msg, err), + `The configured "remote" backend encountered an unexpected error. Sometimes `+ + `this is caused by network connection problems, in which case you could retry `+ + `the command. If the issue persists please open a support ticket to get help `+ + `resolving the problem.`, + )) + return diags.Err() + } +} + +func checkConstraintsWarning(err error) tfdiags.Diagnostic { + return tfdiags.Sourceless( + tfdiags.Warning, + fmt.Sprintf("Failed to check version constraints: %v", err), + "Checking version constraints is considered optional, but this is an"+ + "unexpected error which should be reported.", + ) +} + +// The newline in this error is to make it look good in the CLI! +const initialRetryError = ` +[reset][yellow]There was an error connecting to the remote backend. Please do not exit +OpenTofu to prevent data loss! Trying to restore the connection... +[reset] +` + +const repeatedRetryError = ` +[reset][yellow]Still trying to restore the connection... (%s elapsed)[reset] +` + +const operationCanceled = ` +[reset][red]The remote operation was successfully cancelled.[reset] +` + +const operationNotCanceled = ` +[reset][red]The remote operation was not cancelled.[reset] +` + +var schemaDescriptions = map[string]string{ + "hostname": "The remote backend hostname to connect to.", + "organization": "The name of the organization containing the targeted workspace(s).", + "token": "The token used to authenticate with the remote backend. If credentials for the\n" + + "host are configured in the CLI Config File, then those will be used instead.", + "name": "A workspace name used to map the default workspace to a named remote workspace.\n" + + "When configured only the default workspace can be used. This option conflicts\n" + + "with \"prefix\"", + "prefix": "A prefix used to filter workspaces using a single configuration. New workspaces\n" + + "will automatically be prefixed with this prefix. If omitted only the default\n" + + "workspace can be used. This option conflicts with \"name\"", +} diff --git a/pkg/backend/remote/backend_apply.go b/pkg/backend/remote/backend_apply.go new file mode 100644 index 00000000000..b53c41ca3d4 --- /dev/null +++ b/pkg/backend/remote/backend_apply.go @@ -0,0 +1,306 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package remote + +import ( + "bufio" + "context" + "fmt" + "io" + "log" + + tfe "github.com/hashicorp/go-tfe" + version "github.com/hashicorp/go-version" + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" +) + +func (b *Remote) opApply(stopCtx, cancelCtx context.Context, op *backend.Operation, w *tfe.Workspace) (*tfe.Run, error) { + log.Printf("[INFO] backend/remote: starting Apply operation") + + var diags tfdiags.Diagnostics + + // We should remove the `CanUpdate` part of this test, but for now + // (to remain compatible with tfe.v2.1) we'll leave it in here. + if !w.Permissions.CanUpdate && !w.Permissions.CanQueueApply { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Insufficient rights to apply changes", + "The provided credentials have insufficient rights to apply changes. In order "+ + "to apply changes at least write permissions on the workspace are required.", + )) + return nil, diags.Err() + } + + if w.VCSRepo != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Apply not allowed for workspaces with a VCS connection", + "A workspace that is connected to a VCS requires the VCS-driven workflow "+ + "to ensure that the VCS remains the single source of truth.", + )) + return nil, diags.Err() + } + + if b.ContextOpts != nil && b.ContextOpts.Parallelism != defaultParallelism { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Custom parallelism values are currently not supported", + `The "remote" backend does not support setting a custom parallelism `+ + `value at this time.`, + )) + } + + if op.PlanFile != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Applying a saved plan is currently not supported", + `The "remote" backend currently requires configuration to be present and `+ + `does not accept an existing saved plan as an argument at this time.`, + )) + } + + if b.hasExplicitVariableValues(op) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Run variables are currently not supported", + fmt.Sprintf( + "The \"remote\" backend does not support setting run variables at this time. "+ + "Currently the only to way to pass variables to the remote backend is by "+ + "creating a '*.auto.tfvars' variables file. This file will automatically "+ + "be loaded by the \"remote\" backend when the workspace is configured to use "+ + "OpenTofu v0.10.0 or later.\n\nAdditionally you can also set variables on "+ + "the workspace in the web UI:\nhttps://%s/app/%s/%s/variables", + b.hostname, b.organization, op.Workspace, + ), + )) + } + + if !op.HasConfig() && op.PlanMode != plans.DestroyMode { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "No configuration files found", + `Apply requires configuration to be present. Applying without a configuration `+ + `would mark everything for destruction, which is normally not what is desired. `+ + `If you would like to destroy everything, please run 'tofu destroy' which `+ + `does not require any configuration files.`, + )) + } + + // For API versions prior to 2.3, RemoteAPIVersion will return an empty string, + // so if there's an error when parsing the RemoteAPIVersion, it's handled as + // equivalent to an API version < 2.3. + currentAPIVersion, parseErr := version.NewVersion(b.client.RemoteAPIVersion()) + + if !op.PlanRefresh { + desiredAPIVersion, _ := version.NewVersion("2.4") + + if parseErr != nil || currentAPIVersion.LessThan(desiredAPIVersion) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Planning without refresh is not supported", + fmt.Sprintf( + `The host %s does not support the -refresh=false option for `+ + `remote plans.`, + b.hostname, + ), + )) + } + } + + if op.PlanMode == plans.RefreshOnlyMode { + desiredAPIVersion, _ := version.NewVersion("2.4") + + if parseErr != nil || currentAPIVersion.LessThan(desiredAPIVersion) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Refresh-only mode is not supported", + fmt.Sprintf( + `The host %s does not support -refresh-only mode for `+ + `remote plans.`, + b.hostname, + ), + )) + } + } + + if len(op.ForceReplace) != 0 { + desiredAPIVersion, _ := version.NewVersion("2.4") + + if parseErr != nil || currentAPIVersion.LessThan(desiredAPIVersion) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Planning resource replacements is not supported", + fmt.Sprintf( + `The host %s does not support the -replace option for `+ + `remote plans.`, + b.hostname, + ), + )) + } + } + + if len(op.Targets) != 0 { + desiredAPIVersion, _ := version.NewVersion("2.3") + + if parseErr != nil || currentAPIVersion.LessThan(desiredAPIVersion) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Resource targeting is not supported", + fmt.Sprintf( + `The host %s does not support the -target option for `+ + `remote plans.`, + b.hostname, + ), + )) + } + } + + // Return if there are any errors. + if diags.HasErrors() { + return nil, diags.Err() + } + + // Run the plan phase. + r, err := b.plan(stopCtx, cancelCtx, op, w) + if err != nil { + return r, err + } + + // This check is also performed in the plan method to determine if + // the policies should be checked, but we need to check the values + // here again to determine if we are done and should return. + if !r.HasChanges || r.Status == tfe.RunCanceled || r.Status == tfe.RunErrored { + return r, nil + } + + // Retrieve the run to get its current status. + r, err = b.client.Runs.Read(stopCtx, r.ID) + if err != nil { + return r, generalError("Failed to retrieve run", err) + } + + // Return if the run cannot be confirmed. + if !w.AutoApply && !r.Actions.IsConfirmable { + return r, nil + } + + // Since we already checked the permissions before creating the run + // this should never happen. But it doesn't hurt to keep this in as + // a safeguard for any unexpected situations. + if !w.AutoApply && !r.Permissions.CanApply { + // Make sure we discard the run if possible. + if r.Actions.IsDiscardable { + err = b.client.Runs.Discard(stopCtx, r.ID, tfe.RunDiscardOptions{}) + if err != nil { + switch op.PlanMode { + case plans.DestroyMode: + return r, generalError("Failed to discard destroy", err) + default: + return r, generalError("Failed to discard apply", err) + } + } + } + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Insufficient rights to approve the pending changes", + fmt.Sprintf("There are pending changes, but the provided credentials have "+ + "insufficient rights to approve them. The run will be discarded to prevent "+ + "it from blocking the queue waiting for external approval. To queue a run "+ + "that can be approved by someone else, please use the 'Queue Plan' button in "+ + "the web UI:\nhttps://%s/app/%s/%s/runs", b.hostname, b.organization, op.Workspace), + )) + return r, diags.Err() + } + + mustConfirm := (op.UIIn != nil && op.UIOut != nil) && !op.AutoApprove + + if !w.AutoApply { + if mustConfirm { + opts := &tofu.InputOpts{Id: "approve"} + + if op.PlanMode == plans.DestroyMode { + opts.Query = "\nDo you really want to destroy all resources in workspace \"" + op.Workspace + "\"?" + opts.Description = "OpenTofu will destroy all your managed infrastructure, as shown above.\n" + + "There is no undo. Only 'yes' will be accepted to confirm." + } else { + opts.Query = "\nDo you want to perform these actions in workspace \"" + op.Workspace + "\"?" + opts.Description = "OpenTofu will perform the actions described above.\n" + + "Only 'yes' will be accepted to approve." + } + + err = b.confirm(stopCtx, op, opts, r, "yes") + if err != nil && err != errRunApproved { + return r, err + } + } + + if err != errRunApproved { + if err = b.client.Runs.Apply(stopCtx, r.ID, tfe.RunApplyOptions{}); err != nil { + return r, generalError("Failed to approve the apply command", err) + } + } + } + + // If we don't need to ask for confirmation, insert a blank + // line to separate the ouputs. + if w.AutoApply || !mustConfirm { + if b.CLI != nil { + b.CLI.Output("") + } + } + + r, err = b.waitForRun(stopCtx, cancelCtx, op, "apply", r, w) + if err != nil { + return r, err + } + + logs, err := b.client.Applies.Logs(stopCtx, r.Apply.ID) + if err != nil { + return r, generalError("Failed to retrieve logs", err) + } + reader := bufio.NewReaderSize(logs, 64*1024) + + if b.CLI != nil { + skip := 0 + for next := true; next; { + var l, line []byte + + for isPrefix := true; isPrefix; { + l, isPrefix, err = reader.ReadLine() + if err != nil { + if err != io.EOF { + return r, generalError("Failed to read logs", err) + } + next = false + } + line = append(line, l...) + } + + // Skip the first 3 lines to prevent duplicate output. + if skip < 3 { + skip++ + continue + } + + if next || len(line) > 0 { + b.CLI.Output(b.Colorize().Color(string(line))) + } + } + } + + return r, nil +} + +const applyDefaultHeader = ` +[reset][yellow]Running apply in the remote backend. Output will stream here. Pressing Ctrl-C +will cancel the remote apply if it's still pending. If the apply started it +will stop streaming the logs, but will not stop the apply running remotely.[reset] + +Preparing the remote apply... +` diff --git a/pkg/backend/remote/backend_apply_test.go b/pkg/backend/remote/backend_apply_test.go new file mode 100644 index 00000000000..d99b5a81bf6 --- /dev/null +++ b/pkg/backend/remote/backend_apply_test.go @@ -0,0 +1,1668 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package remote + +import ( + "context" + "os" + "os/signal" + "strings" + "syscall" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + tfe "github.com/hashicorp/go-tfe" + version "github.com/hashicorp/go-version" + "github.com/mitchellh/cli" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/cloud" + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/command/clistate" + "github.com/kubegems/opentofu/pkg/command/views" + "github.com/kubegems/opentofu/pkg/depsfile" + "github.com/kubegems/opentofu/pkg/initwd" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/plans/planfile" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "github.com/kubegems/opentofu/pkg/terminal" + "github.com/kubegems/opentofu/pkg/tofu" + tfversion "github.com/kubegems/opentofu/version" +) + +func testOperationApply(t *testing.T, configDir string) (*backend.Operation, func(), func(*testing.T) *terminal.TestOutput) { + t.Helper() + + return testOperationApplyWithTimeout(t, configDir, 0) +} + +func testOperationApplyWithTimeout(t *testing.T, configDir string, timeout time.Duration) (*backend.Operation, func(), func(*testing.T) *terminal.TestOutput) { + t.Helper() + + _, configLoader, configCleanup := initwd.MustLoadConfigForTests(t, configDir, "tests") + + streams, done := terminal.StreamsForTesting(t) + view := views.NewView(streams) + stateLockerView := views.NewStateLocker(arguments.ViewHuman, view) + operationView := views.NewOperation(arguments.ViewHuman, false, view) + + // Many of our tests use an overridden "null" provider that's just in-memory + // inside the test process, not a separate plugin on disk. + depLocks := depsfile.NewLocks() + depLocks.SetProviderOverridden(addrs.MustParseProviderSourceString("registry.opentofu.org/hashicorp/null")) + + return &backend.Operation{ + ConfigDir: configDir, + ConfigLoader: configLoader, + PlanRefresh: true, + StateLocker: clistate.NewLocker(timeout, stateLockerView), + Type: backend.OperationTypeApply, + View: operationView, + DependencyLocks: depLocks, + }, configCleanup, done +} + +func TestRemote_applyBasic(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summery in output: %s", output) + } + if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("expected apply summery in output: %s", output) + } + + stateMgr, _ := b.StateMgr(backend.DefaultStateName) + // An error suggests that the state was not unlocked after apply + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { + t.Fatalf("unexpected error locking state after apply: %s", err.Error()) + } +} + +func TestRemote_applyCanceled(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + // Stop the run to simulate a Ctrl-C. + run.Stop() + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + + stateMgr, _ := b.StateMgr(backend.DefaultStateName) + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { + t.Fatalf("unexpected error locking state after cancelling apply: %s", err.Error()) + } +} + +func TestRemote_applyWithoutPermissions(t *testing.T) { + b, bCleanup := testBackendNoDefault(t) + defer bCleanup() + + // Create a named workspace without permissions. + w, err := b.client.Workspaces.Create( + context.Background(), + b.organization, + tfe.WorkspaceCreateOptions{ + Name: tfe.String(b.prefix + "prod"), + }, + ) + if err != nil { + t.Fatalf("error creating named workspace: %v", err) + } + w.Permissions.CanQueueApply = false + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + + op.UIOut = b.CLI + op.Workspace = "prod" + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "Insufficient rights to apply changes") { + t.Fatalf("expected a permissions error, got: %v", errOutput) + } +} + +func TestRemote_applyWithVCS(t *testing.T) { + b, bCleanup := testBackendNoDefault(t) + defer bCleanup() + + // Create a named workspace with a VCS. + _, err := b.client.Workspaces.Create( + context.Background(), + b.organization, + tfe.WorkspaceCreateOptions{ + Name: tfe.String(b.prefix + "prod"), + VCSRepo: &tfe.VCSRepoOptions{}, + }, + ) + if err != nil { + t.Fatalf("error creating named workspace: %v", err) + } + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + + op.Workspace = "prod" + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "not allowed for workspaces with a VCS") { + t.Fatalf("expected a VCS error, got: %v", errOutput) + } +} + +func TestRemote_applyWithParallelism(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + + if b.ContextOpts == nil { + b.ContextOpts = &tofu.ContextOpts{} + } + b.ContextOpts.Parallelism = 3 + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "parallelism values are currently not supported") { + t.Fatalf("expected a parallelism error, got: %v", errOutput) + } +} + +func TestRemote_applyWithPlan(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + + op.PlanFile = planfile.NewWrappedLocal(&planfile.Reader{}) + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "saved plan is currently not supported") { + t.Fatalf("expected a saved plan error, got: %v", errOutput) + } +} + +func TestRemote_applyWithoutRefresh(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + op.PlanRefresh = false + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected plan to be non-empty") + } + + // We should find a run inside the mock client that has refresh set + // to false. + runsAPI := b.client.Runs.(*cloud.MockRuns) + if got, want := len(runsAPI.Runs), 1; got != want { + t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) + } + for _, run := range runsAPI.Runs { + if diff := cmp.Diff(false, run.Refresh); diff != "" { + t.Errorf("wrong Refresh setting in the created run\n%s", diff) + } + } +} + +func TestRemote_applyWithoutRefreshIncompatibleAPIVersion(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + + b.client.SetFakeRemoteAPIVersion("2.3") + + op.PlanRefresh = false + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "Planning without refresh is not supported") { + t.Fatalf("expected a not supported error, got: %v", errOutput) + } +} + +func TestRemote_applyWithRefreshOnly(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + op.PlanMode = plans.RefreshOnlyMode + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected plan to be non-empty") + } + + // We should find a run inside the mock client that has refresh-only set + // to true. + runsAPI := b.client.Runs.(*cloud.MockRuns) + if got, want := len(runsAPI.Runs), 1; got != want { + t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) + } + for _, run := range runsAPI.Runs { + if diff := cmp.Diff(true, run.RefreshOnly); diff != "" { + t.Errorf("wrong RefreshOnly setting in the created run\n%s", diff) + } + } +} + +func TestRemote_applyWithRefreshOnlyIncompatibleAPIVersion(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + + b.client.SetFakeRemoteAPIVersion("2.3") + + op.PlanMode = plans.RefreshOnlyMode + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "Refresh-only mode is not supported") { + t.Fatalf("expected a not supported error, got: %v", errOutput) + } +} + +func TestRemote_applyWithTarget(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + addr, _ := addrs.ParseAbsResourceStr("null_resource.foo") + + op.Targets = []addrs.Targetable{addr} + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatal("expected apply operation to succeed") + } + if run.PlanEmpty { + t.Fatalf("expected plan to be non-empty") + } + + // We should find a run inside the mock client that has the same + // target address we requested above. + runsAPI := b.client.Runs.(*cloud.MockRuns) + if got, want := len(runsAPI.Runs), 1; got != want { + t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) + } + for _, run := range runsAPI.Runs { + if diff := cmp.Diff([]string{"null_resource.foo"}, run.TargetAddrs); diff != "" { + t.Errorf("wrong TargetAddrs in the created run\n%s", diff) + } + } +} + +func TestRemote_applyWithTargetIncompatibleAPIVersion(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + + // Set the tfe client's RemoteAPIVersion to an empty string, to mimic + // API versions prior to 2.3. + b.client.SetFakeRemoteAPIVersion("") + + addr, _ := addrs.ParseAbsResourceStr("null_resource.foo") + + op.Targets = []addrs.Targetable{addr} + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "Resource targeting is not supported") { + t.Fatalf("expected a targeting error, got: %v", errOutput) + } +} + +func TestRemote_applyWithReplace(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + addr, _ := addrs.ParseAbsResourceInstanceStr("null_resource.foo") + + op.ForceReplace = []addrs.AbsResourceInstance{addr} + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatal("expected plan operation to succeed") + } + if run.PlanEmpty { + t.Fatalf("expected plan to be non-empty") + } + + // We should find a run inside the mock client that has the same + // refresh address we requested above. + runsAPI := b.client.Runs.(*cloud.MockRuns) + if got, want := len(runsAPI.Runs), 1; got != want { + t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) + } + for _, run := range runsAPI.Runs { + if diff := cmp.Diff([]string{"null_resource.foo"}, run.ReplaceAddrs); diff != "" { + t.Errorf("wrong ReplaceAddrs in the created run\n%s", diff) + } + } +} + +func TestRemote_applyWithReplaceIncompatibleAPIVersion(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + + b.client.SetFakeRemoteAPIVersion("2.3") + + addr, _ := addrs.ParseAbsResourceInstanceStr("null_resource.foo") + + op.ForceReplace = []addrs.AbsResourceInstance{addr} + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "Planning resource replacements is not supported") { + t.Fatalf("expected a not supported error, got: %v", errOutput) + } +} + +func TestRemote_applyWithVariables(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-variables") + defer configCleanup() + + op.Variables = testVariables(tofu.ValueFromNamedFile, "foo", "bar") + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "variables are currently not supported") { + t.Fatalf("expected a variables error, got: %v", errOutput) + } +} + +func TestRemote_applyNoConfig(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/empty") + defer configCleanup() + + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "configuration files found") { + t.Fatalf("expected configuration files error, got: %v", errOutput) + } + + stateMgr, _ := b.StateMgr(backend.DefaultStateName) + // An error suggests that the state was not unlocked after apply + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { + t.Fatalf("unexpected error locking state after failed apply: %s", err.Error()) + } +} + +func TestRemote_applyNoChanges(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-no-changes") + defer configCleanup() + defer done(t) + + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "No changes. Infrastructure is up-to-date.") { + t.Fatalf("expected no changes in plan summery: %s", output) + } + if !strings.Contains(output, "Sentinel Result: true") { + t.Fatalf("expected policy check result in output: %s", output) + } +} + +func TestRemote_applyNoApprove(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + + input := testInput(t, map[string]string{ + "approve": "no", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "Apply discarded") { + t.Fatalf("expected an apply discarded error, got: %v", errOutput) + } +} + +func TestRemote_applyAutoApprove(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "approve": "no", + }) + + op.AutoApprove = true + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) != 1 { + t.Fatalf("expected an unused answer, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summery in output: %s", output) + } + if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("expected apply summery in output: %s", output) + } +} + +func TestRemote_applyApprovedExternally(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "approve": "wait-for-external-update", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = backend.DefaultStateName + + ctx := context.Background() + + run, err := b.Operation(ctx, op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + // Wait 50 milliseconds to make sure the run started. + time.Sleep(50 * time.Millisecond) + + wl, err := b.client.Workspaces.List( + ctx, + b.organization, + nil, + ) + if err != nil { + t.Fatalf("unexpected error listing workspaces: %v", err) + } + if len(wl.Items) != 1 { + t.Fatalf("expected 1 workspace, got %d workspaces", len(wl.Items)) + } + + rl, err := b.client.Runs.List(ctx, wl.Items[0].ID, nil) + if err != nil { + t.Fatalf("unexpected error listing runs: %v", err) + } + if len(rl.Items) != 1 { + t.Fatalf("expected 1 run, got %d runs", len(rl.Items)) + } + + err = b.client.Runs.Apply(context.Background(), rl.Items[0].ID, tfe.RunApplyOptions{}) + if err != nil { + t.Fatalf("unexpected error approving run: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summery in output: %s", output) + } + if !strings.Contains(output, "approved using the UI or API") { + t.Fatalf("expected external approval in output: %s", output) + } + if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("expected apply summery in output: %s", output) + } +} + +func TestRemote_applyDiscardedExternally(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "approve": "wait-for-external-update", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = backend.DefaultStateName + + ctx := context.Background() + + run, err := b.Operation(ctx, op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + // Wait 50 milliseconds to make sure the run started. + time.Sleep(50 * time.Millisecond) + + wl, err := b.client.Workspaces.List( + ctx, + b.organization, + nil, + ) + if err != nil { + t.Fatalf("unexpected error listing workspaces: %v", err) + } + if len(wl.Items) != 1 { + t.Fatalf("expected 1 workspace, got %d workspaces", len(wl.Items)) + } + + rl, err := b.client.Runs.List(ctx, wl.Items[0].ID, nil) + if err != nil { + t.Fatalf("unexpected error listing runs: %v", err) + } + if len(rl.Items) != 1 { + t.Fatalf("expected 1 run, got %d runs", len(rl.Items)) + } + + err = b.client.Runs.Discard(context.Background(), rl.Items[0].ID, tfe.RunDiscardOptions{}) + if err != nil { + t.Fatalf("unexpected error discarding run: %v", err) + } + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summery in output: %s", output) + } + if !strings.Contains(output, "discarded using the UI or API") { + t.Fatalf("expected external discard output: %s", output) + } + if strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("unexpected apply summery in output: %s", output) + } +} + +func TestRemote_applyWithAutoApply(t *testing.T) { + b, bCleanup := testBackendNoDefault(t) + defer bCleanup() + + // Create a named workspace that auto applies. + _, err := b.client.Workspaces.Create( + context.Background(), + b.organization, + tfe.WorkspaceCreateOptions{ + AutoApply: tfe.Bool(true), + Name: tfe.String(b.prefix + "prod"), + }, + ) + if err != nil { + t.Fatalf("error creating named workspace: %v", err) + } + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = "prod" + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) != 1 { + t.Fatalf("expected an unused answer, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summery in output: %s", output) + } + if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("expected apply summery in output: %s", output) + } +} + +func TestRemote_applyForceLocal(t *testing.T) { + // Set TF_FORCE_LOCAL_BACKEND so the remote backend will use + // the local backend with itself as embedded backend. + t.Setenv("TF_FORCE_LOCAL_BACKEND", "1") + + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = backend.DefaultStateName + + streams, done := terminal.StreamsForTesting(t) + view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams)) + op.View = view + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if strings.Contains(output, "Running apply in the remote backend") { + t.Fatalf("unexpected remote backend header in output: %s", output) + } + if output := done(t).Stdout(); !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } + if !run.State.HasManagedResourceInstanceObjects() { + t.Fatalf("expected resources in state") + } +} + +func TestRemote_applyWorkspaceWithoutOperations(t *testing.T) { + b, bCleanup := testBackendNoDefault(t) + defer bCleanup() + + ctx := context.Background() + + // Create a named workspace that doesn't allow operations. + _, err := b.client.Workspaces.Create( + ctx, + b.organization, + tfe.WorkspaceCreateOptions{ + Name: tfe.String(b.prefix + "no-operations"), + }, + ) + if err != nil { + t.Fatalf("error creating named workspace: %v", err) + } + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = "no-operations" + + streams, done := terminal.StreamsForTesting(t) + view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams)) + op.View = view + + run, err := b.Operation(ctx, op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if strings.Contains(output, "Running apply in the remote backend") { + t.Fatalf("unexpected remote backend header in output: %s", output) + } + if output := done(t).Stdout(); !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } + if !run.State.HasManagedResourceInstanceObjects() { + t.Fatalf("expected resources in state") + } +} + +func TestRemote_applyLockTimeout(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + ctx := context.Background() + + // Retrieve the workspace used to run this operation in. + w, err := b.client.Workspaces.Read(ctx, b.organization, b.workspace) + if err != nil { + t.Fatalf("error retrieving workspace: %v", err) + } + + // Create a new configuration version. + c, err := b.client.ConfigurationVersions.Create(ctx, w.ID, tfe.ConfigurationVersionCreateOptions{}) + if err != nil { + t.Fatalf("error creating configuration version: %v", err) + } + + // Create a pending run to block this run. + _, err = b.client.Runs.Create(ctx, tfe.RunCreateOptions{ + ConfigurationVersion: c, + Workspace: w, + }) + if err != nil { + t.Fatalf("error creating pending run: %v", err) + } + + op, configCleanup, done := testOperationApplyWithTimeout(t, "./testdata/apply", 50*time.Millisecond) + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "cancel": "yes", + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = backend.DefaultStateName + + _, err = b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + sigint := make(chan os.Signal, 1) + signal.Notify(sigint, syscall.SIGINT) + select { + case <-sigint: + // Stop redirecting SIGINT signals. + signal.Stop(sigint) + case <-time.After(200 * time.Millisecond): + t.Fatalf("expected lock timeout after 50 milliseconds, waited 200 milliseconds") + } + + if len(input.answers) != 2 { + t.Fatalf("expected unused answers, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } + if !strings.Contains(output, "Lock timeout exceeded") { + t.Fatalf("expected lock timout error in output: %s", output) + } + if strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("unexpected plan summery in output: %s", output) + } + if strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("unexpected apply summery in output: %s", output) + } +} + +func TestRemote_applyDestroy(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-destroy") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op.PlanMode = plans.DestroyMode + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } + if !strings.Contains(output, "0 to add, 0 to change, 1 to destroy") { + t.Fatalf("expected plan summery in output: %s", output) + } + if !strings.Contains(output, "0 added, 0 changed, 1 destroyed") { + t.Fatalf("expected apply summery in output: %s", output) + } +} + +func TestRemote_applyDestroyNoConfig(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op, configCleanup, done := testOperationApply(t, "./testdata/empty") + defer configCleanup() + defer done(t) + + op.PlanMode = plans.DestroyMode + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } +} + +func TestRemote_applyPolicyPass(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-policy-passed") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summery in output: %s", output) + } + if !strings.Contains(output, "Sentinel Result: true") { + t.Fatalf("expected policy check result in output: %s", output) + } + if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("expected apply summery in output: %s", output) + } +} + +func TestRemote_applyPolicyHardFail(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-policy-hard-failed") + defer configCleanup() + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + viewOutput := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + if len(input.answers) != 1 { + t.Fatalf("expected an unused answers, got: %v", input.answers) + } + + errOutput := viewOutput.Stderr() + if !strings.Contains(errOutput, "hard failed") { + t.Fatalf("expected a policy check error, got: %v", errOutput) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summery in output: %s", output) + } + if !strings.Contains(output, "Sentinel Result: false") { + t.Fatalf("expected policy check result in output: %s", output) + } + if strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("unexpected apply summery in output: %s", output) + } +} + +func TestRemote_applyPolicySoftFail(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-policy-soft-failed") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "override": "override", + "approve": "yes", + }) + + op.AutoApprove = false + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summery in output: %s", output) + } + if !strings.Contains(output, "Sentinel Result: false") { + t.Fatalf("expected policy check result in output: %s", output) + } + if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("expected apply summery in output: %s", output) + } +} + +func TestRemote_applyPolicySoftFailAutoApproveSuccess(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-policy-soft-failed") + defer configCleanup() + + input := testInput(t, map[string]string{}) + + op.AutoApprove = true + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + viewOutput := done(t) + if run.Result != backend.OperationSuccess { + t.Fatal("expected apply operation to success due to auto-approve") + } + + if run.PlanEmpty { + t.Fatalf("expected plan to not be empty, plan opertion completed without error") + } + + if len(input.answers) != 0 { + t.Fatalf("expected no answers, got: %v", input.answers) + } + + errOutput := viewOutput.Stderr() + if strings.Contains(errOutput, "soft failed") { + t.Fatalf("expected no policy check errors, instead got: %v", errOutput) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Sentinel Result: false") { + t.Fatalf("expected policy check to be false, insead got: %s", output) + } + if !strings.Contains(output, "Apply complete!") { + t.Fatalf("expected apply to be complete, instead got: %s", output) + } + + if !strings.Contains(output, "Resources: 1 added, 0 changed, 0 destroyed") { + t.Fatalf("expected resources, instead got: %s", output) + } +} + +func TestRemote_applyPolicySoftFailAutoApply(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + // Create a named workspace that auto applies. + _, err := b.client.Workspaces.Create( + context.Background(), + b.organization, + tfe.WorkspaceCreateOptions{ + AutoApply: tfe.Bool(true), + Name: tfe.String(b.prefix + "prod"), + }, + ) + if err != nil { + t.Fatalf("error creating named workspace: %v", err) + } + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-policy-soft-failed") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "override": "override", + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = "prod" + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) != 1 { + t.Fatalf("expected an unused answer, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summery in output: %s", output) + } + if !strings.Contains(output, "Sentinel Result: false") { + t.Fatalf("expected policy check result in output: %s", output) + } + if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("expected apply summery in output: %s", output) + } +} + +func TestRemote_applyWithRemoteError(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-with-error") + defer configCleanup() + defer done(t) + + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if run.Result.ExitStatus() != 1 { + t.Fatalf("expected exit code 1, got %d", run.Result.ExitStatus()) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "null_resource.foo: 1 error") { + t.Fatalf("expected apply error in output: %s", output) + } +} + +func TestRemote_applyVersionCheck(t *testing.T) { + testCases := map[string]struct { + localVersion string + remoteVersion string + forceLocal bool + executionMode string + wantErr string + }{ + "versions can be different for remote apply": { + localVersion: "0.14.0", + remoteVersion: "0.13.5", + executionMode: "remote", + }, + "versions can be different for local apply": { + localVersion: "0.14.0", + remoteVersion: "0.13.5", + executionMode: "local", + }, + "force local with remote operations and different versions is acceptable": { + localVersion: "0.14.0", + remoteVersion: "0.14.0-acme-provider-bundle", + forceLocal: true, + executionMode: "remote", + }, + "no error if versions are identical": { + localVersion: "0.14.0", + remoteVersion: "0.14.0", + forceLocal: true, + executionMode: "remote", + }, + "no error if force local but workspace has remote operations disabled": { + localVersion: "0.14.0", + remoteVersion: "0.13.5", + forceLocal: true, + executionMode: "local", + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + // SETUP: Save original local version state and restore afterwards + p := tfversion.Prerelease + v := tfversion.Version + s := tfversion.SemVer + defer func() { + tfversion.Prerelease = p + tfversion.Version = v + tfversion.SemVer = s + }() + + // SETUP: Set local version for the test case + tfversion.Prerelease = "" + tfversion.Version = tc.localVersion + tfversion.SemVer = version.Must(version.NewSemver(tc.localVersion)) + + // SETUP: Set force local for the test case + b.forceLocal = tc.forceLocal + + ctx := context.Background() + + // SETUP: set the operations and Terraform Version fields on the + // remote workspace + _, err := b.client.Workspaces.Update( + ctx, + b.organization, + b.workspace, + tfe.WorkspaceUpdateOptions{ + ExecutionMode: tfe.String(tc.executionMode), + TerraformVersion: tfe.String(tc.remoteVersion), + }, + ) + if err != nil { + t.Fatalf("error creating named workspace: %v", err) + } + + // RUN: prepare the apply operation and run it + op, configCleanup, _ := testOperationApply(t, "./testdata/apply") + defer configCleanup() + + streams, done := terminal.StreamsForTesting(t) + view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams)) + op.View = view + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(ctx, op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + // RUN: wait for completion + <-run.Done() + output := done(t) + + if tc.wantErr != "" { + // ASSERT: if the test case wants an error, check for failure + // and the error message + if run.Result != backend.OperationFailure { + t.Fatalf("expected run to fail, but result was %#v", run.Result) + } + errOutput := output.Stderr() + if !strings.Contains(errOutput, tc.wantErr) { + t.Fatalf("missing error %q\noutput: %s", tc.wantErr, errOutput) + } + } else { + // ASSERT: otherwise, check for success and appropriate output + // based on whether the run should be local or remote + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + output := b.CLI.(*cli.MockUi).OutputWriter.String() + hasRemote := strings.Contains(output, "Running apply in the remote backend") + hasSummary := strings.Contains(output, "1 added, 0 changed, 0 destroyed") + hasResources := run.State.HasManagedResourceInstanceObjects() + if !tc.forceLocal && !isLocalExecutionMode(tc.executionMode) { + if !hasRemote { + t.Errorf("missing remote backend header in output: %s", output) + } + if !hasSummary { + t.Errorf("expected apply summary in output: %s", output) + } + } else { + if hasRemote { + t.Errorf("unexpected remote backend header in output: %s", output) + } + if !hasResources { + t.Errorf("expected resources in state") + } + } + } + }) + } +} diff --git a/pkg/backend/remote/backend_common.go b/pkg/backend/remote/backend_common.go new file mode 100644 index 00000000000..7045f0fbc7d --- /dev/null +++ b/pkg/backend/remote/backend_common.go @@ -0,0 +1,582 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package remote + +import ( + "bufio" + "context" + "errors" + "fmt" + "io" + "math" + "strconv" + "strings" + "time" + + tfe "github.com/hashicorp/go-tfe" + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/logging" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/tofu" +) + +var ( + errApplyDiscarded = errors.New("Apply discarded.") + errDestroyDiscarded = errors.New("Destroy discarded.") + errRunApproved = errors.New("approved using the UI or API") + errRunDiscarded = errors.New("discarded using the UI or API") + errRunOverridden = errors.New("overridden using the UI or API") +) + +var ( + backoffMin = 1000.0 + backoffMax = 3000.0 + + runPollInterval = 3 * time.Second +) + +// backoff will perform exponential backoff based on the iteration and +// limited by the provided min and max (in milliseconds) durations. +func backoff(min, max float64, iter int) time.Duration { + backoff := math.Pow(2, float64(iter)/5) * min + if backoff > max { + backoff = max + } + return time.Duration(backoff) * time.Millisecond +} + +func (b *Remote) waitForRun(stopCtx, cancelCtx context.Context, op *backend.Operation, opType string, r *tfe.Run, w *tfe.Workspace) (*tfe.Run, error) { + started := time.Now() + updated := started + for i := 0; ; i++ { + select { + case <-stopCtx.Done(): + return r, stopCtx.Err() + case <-cancelCtx.Done(): + return r, cancelCtx.Err() + case <-time.After(backoff(backoffMin, backoffMax, i)): + // Timer up, show status + } + + // Retrieve the run to get its current status. + r, err := b.client.Runs.Read(stopCtx, r.ID) + if err != nil { + return r, generalError("Failed to retrieve run", err) + } + + // Return if the run is no longer pending. + if r.Status != tfe.RunPending && r.Status != tfe.RunConfirmed { + if i == 0 && opType == "plan" && b.CLI != nil { + b.CLI.Output(b.Colorize().Color(fmt.Sprintf("Waiting for the %s to start...\n", opType))) + } + if i > 0 && b.CLI != nil { + // Insert a blank line to separate the ouputs. + b.CLI.Output("") + } + return r, nil + } + + // Check if 30 seconds have passed since the last update. + current := time.Now() + if b.CLI != nil && (i == 0 || current.Sub(updated).Seconds() > 30) { + updated = current + position := 0 + elapsed := "" + + // Calculate and set the elapsed time. + if i > 0 { + elapsed = fmt.Sprintf( + " (%s elapsed)", current.Sub(started).Truncate(30*time.Second)) + } + + // Retrieve the workspace used to run this operation in. + w, err = b.client.Workspaces.Read(stopCtx, b.organization, w.Name) + if err != nil { + return nil, generalError("Failed to retrieve workspace", err) + } + + // If the workspace is locked the run will not be queued and we can + // update the status without making any expensive calls. + if w.Locked && w.CurrentRun != nil { + cr, err := b.client.Runs.Read(stopCtx, w.CurrentRun.ID) + if err != nil { + return r, generalError("Failed to retrieve current run", err) + } + if cr.Status == tfe.RunPending { + b.CLI.Output(b.Colorize().Color( + "Waiting for the manually locked workspace to be unlocked..." + elapsed)) + continue + } + } + + // Skip checking the workspace queue when we are the current run. + if w.CurrentRun == nil || w.CurrentRun.ID != r.ID { + found := false + options := &tfe.RunListOptions{} + runlist: + for { + rl, err := b.client.Runs.List(stopCtx, w.ID, options) + if err != nil { + return r, generalError("Failed to retrieve run list", err) + } + + // Loop through all runs to calculate the workspace queue position. + for _, item := range rl.Items { + if !found { + if r.ID == item.ID { + found = true + } + continue + } + + // If the run is in a final state, ignore it and continue. + switch item.Status { + case tfe.RunApplied, tfe.RunCanceled, tfe.RunDiscarded, tfe.RunErrored: + continue + case tfe.RunPlanned: + if op.Type == backend.OperationTypePlan { + continue + } + } + + // Increase the workspace queue position. + position++ + + // Stop searching when we reached the current run. + if w.CurrentRun != nil && w.CurrentRun.ID == item.ID { + break runlist + } + } + + // Exit the loop when we've seen all pages. + if rl.CurrentPage >= rl.TotalPages { + break + } + + // Update the page number to get the next page. + options.PageNumber = rl.NextPage + } + + if position > 0 { + b.CLI.Output(b.Colorize().Color(fmt.Sprintf( + "Waiting for %d run(s) to finish before being queued...%s", + position, + elapsed, + ))) + continue + } + } + + options := tfe.ReadRunQueueOptions{} + search: + for { + rq, err := b.client.Organizations.ReadRunQueue(stopCtx, b.organization, options) + if err != nil { + return r, generalError("Failed to retrieve queue", err) + } + + // Search through all queued items to find our run. + for _, item := range rq.Items { + if r.ID == item.ID { + position = item.PositionInQueue + break search + } + } + + // Exit the loop when we've seen all pages. + if rq.CurrentPage >= rq.TotalPages { + break + } + + // Update the page number to get the next page. + options.PageNumber = rq.NextPage + } + + if position > 0 { + c, err := b.client.Organizations.ReadCapacity(stopCtx, b.organization) + if err != nil { + return r, generalError("Failed to retrieve capacity", err) + } + b.CLI.Output(b.Colorize().Color(fmt.Sprintf( + "Waiting for %d queued run(s) to finish before starting...%s", + position-c.Running, + elapsed, + ))) + continue + } + + b.CLI.Output(b.Colorize().Color(fmt.Sprintf( + "Waiting for the %s to start...%s", opType, elapsed))) + } + } +} + +// hasExplicitVariableValues is a best-effort check to determine whether the +// user has provided -var or -var-file arguments to a remote operation. +// +// The results may be inaccurate if the configuration is invalid or if +// individual variable values are invalid. That's okay because we only use this +// result to hint the user to set variables a different way. It's always the +// remote system's responsibility to do final validation of the input. +func (b *Remote) hasExplicitVariableValues(op *backend.Operation) bool { + // Load the configuration using the caller-provided configuration loader. + config, _, configDiags := op.ConfigLoader.LoadConfigWithSnapshot(op.ConfigDir, op.RootCall) + if configDiags.HasErrors() { + // If we can't load the configuration then we'll assume no explicit + // variable values just to let the remote operation start and let + // the remote system return the same set of configuration errors. + return false + } + + // We're intentionally ignoring the diagnostics here because validation + // of the variable values is the responsibilty of the remote system. Our + // goal here is just to make a best effort count of how many variable + // values are coming from -var or -var-file CLI arguments so that we can + // hint the user that those are not supported for remote operations. + variables, _ := backend.ParseVariableValues(op.Variables, config.Module.Variables) + + // Check for explicitly-defined (-var and -var-file) variables, which the + // remote backend does not support. All other source types are okay, + // because they are implicit from the execution context anyway and so + // their final values will come from the _remote_ execution context. + for _, v := range variables { + switch v.SourceType { + case tofu.ValueFromCLIArg, tofu.ValueFromNamedFile: + return true + } + } + + return false +} + +func (b *Remote) costEstimate(stopCtx, cancelCtx context.Context, op *backend.Operation, r *tfe.Run) error { + if r.CostEstimate == nil { + return nil + } + + msgPrefix := "Cost estimation" + started := time.Now() + updated := started + for i := 0; ; i++ { + select { + case <-stopCtx.Done(): + return stopCtx.Err() + case <-cancelCtx.Done(): + return cancelCtx.Err() + case <-time.After(backoff(backoffMin, backoffMax, i)): + } + + // Retrieve the cost estimate to get its current status. + ce, err := b.client.CostEstimates.Read(stopCtx, r.CostEstimate.ID) + if err != nil { + return generalError("Failed to retrieve cost estimate", err) + } + + // If the run is canceled or errored, but the cost-estimate still has + // no result, there is nothing further to render. + if ce.Status != tfe.CostEstimateFinished { + if r.Status == tfe.RunCanceled || r.Status == tfe.RunErrored { + return nil + } + } + + // checking if i == 0 so as to avoid printing this starting horizontal-rule + // every retry, and that it only prints it on the first (i=0) attempt. + if b.CLI != nil && i == 0 { + b.CLI.Output("\n------------------------------------------------------------------------\n") + } + + switch ce.Status { + case tfe.CostEstimateFinished: + delta, err := strconv.ParseFloat(ce.DeltaMonthlyCost, 64) + if err != nil { + return generalError("Unexpected error", err) + } + + sign := "+" + if delta < 0 { + sign = "-" + } + + deltaRepr := strings.Replace(ce.DeltaMonthlyCost, "-", "", 1) + + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color(msgPrefix + ":\n")) + b.CLI.Output(b.Colorize().Color(fmt.Sprintf("Resources: %d of %d estimated", ce.MatchedResourcesCount, ce.ResourcesCount))) + b.CLI.Output(b.Colorize().Color(fmt.Sprintf(" $%s/mo %s$%s", ce.ProposedMonthlyCost, sign, deltaRepr))) + + if len(r.PolicyChecks) == 0 && r.HasChanges && op.Type == backend.OperationTypeApply { + b.CLI.Output("\n------------------------------------------------------------------------") + } + } + + return nil + case tfe.CostEstimatePending, tfe.CostEstimateQueued: + // Check if 30 seconds have passed since the last update. + current := time.Now() + if b.CLI != nil && (i == 0 || current.Sub(updated).Seconds() > 30) { + updated = current + elapsed := "" + + // Calculate and set the elapsed time. + if i > 0 { + elapsed = fmt.Sprintf( + " (%s elapsed)", current.Sub(started).Truncate(30*time.Second)) + } + b.CLI.Output(b.Colorize().Color(msgPrefix + ":\n")) + b.CLI.Output(b.Colorize().Color("Waiting for cost estimate to complete..." + elapsed + "\n")) + } + continue + case tfe.CostEstimateSkippedDueToTargeting: + b.CLI.Output(b.Colorize().Color(msgPrefix + ":\n")) + b.CLI.Output("Not available for this plan, because it was created with the -target option.") + b.CLI.Output("\n------------------------------------------------------------------------") + return nil + case tfe.CostEstimateErrored: + b.CLI.Output(msgPrefix + " errored.\n") + b.CLI.Output("\n------------------------------------------------------------------------") + return nil + case tfe.CostEstimateCanceled: + return fmt.Errorf(msgPrefix + " canceled.") + default: + return fmt.Errorf("Unknown or unexpected cost estimate state: %s", ce.Status) + } + } +} + +func (b *Remote) checkPolicy(stopCtx, cancelCtx context.Context, op *backend.Operation, r *tfe.Run) error { + if b.CLI != nil { + b.CLI.Output("\n------------------------------------------------------------------------\n") + } + for i, pc := range r.PolicyChecks { + // Read the policy check logs. This is a blocking call that will only + // return once the policy check is complete. + logs, err := b.client.PolicyChecks.Logs(stopCtx, pc.ID) + if err != nil { + return generalError("Failed to retrieve policy check logs", err) + } + reader := bufio.NewReaderSize(logs, 64*1024) + + // Retrieve the policy check to get its current status. + pc, err := b.client.PolicyChecks.Read(stopCtx, pc.ID) + if err != nil { + return generalError("Failed to retrieve policy check", err) + } + + // If the run is canceled or errored, but the policy check still has + // no result, there is nothing further to render. + if r.Status == tfe.RunCanceled || r.Status == tfe.RunErrored { + switch pc.Status { + case tfe.PolicyPending, tfe.PolicyQueued, tfe.PolicyUnreachable: + continue + } + } + + var msgPrefix string + switch pc.Scope { + case tfe.PolicyScopeOrganization: + msgPrefix = "Organization policy check" + case tfe.PolicyScopeWorkspace: + msgPrefix = "Workspace policy check" + default: + msgPrefix = fmt.Sprintf("Unknown policy check (%s)", pc.Scope) + } + + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color(msgPrefix + ":\n")) + } + + if b.CLI != nil { + for next := true; next; { + var l, line []byte + + for isPrefix := true; isPrefix; { + l, isPrefix, err = reader.ReadLine() + if err != nil { + if err != io.EOF { + return generalError("Failed to read logs", err) + } + next = false + } + line = append(line, l...) + } + + if next || len(line) > 0 { + b.CLI.Output(b.Colorize().Color(string(line))) + } + } + } + + switch pc.Status { + case tfe.PolicyPasses: + if (r.HasChanges && op.Type == backend.OperationTypeApply || i < len(r.PolicyChecks)-1) && b.CLI != nil { + b.CLI.Output("\n------------------------------------------------------------------------") + } + continue + case tfe.PolicyErrored: + return fmt.Errorf(msgPrefix + " errored.") + case tfe.PolicyHardFailed: + return fmt.Errorf(msgPrefix + " hard failed.") + case tfe.PolicySoftFailed: + runUrl := fmt.Sprintf(runHeader, b.hostname, b.organization, op.Workspace, r.ID) + + if op.Type == backend.OperationTypePlan || op.UIOut == nil || op.UIIn == nil || + !pc.Actions.IsOverridable || !pc.Permissions.CanOverride { + return fmt.Errorf(msgPrefix + " soft failed.\n" + runUrl) + } + + if op.AutoApprove { + if _, err = b.client.PolicyChecks.Override(stopCtx, pc.ID); err != nil { + return generalError(fmt.Sprintf("Failed to override policy check.\n%s", runUrl), err) + } + } else { + opts := &tofu.InputOpts{ + Id: "override", + Query: "\nDo you want to override the soft failed policy check?", + Description: "Only 'override' will be accepted to override.", + } + err = b.confirm(stopCtx, op, opts, r, "override") + if err != nil && err != errRunOverridden { + return fmt.Errorf("Failed to override: %w\n%s\n", err, runUrl) + } + + if err != errRunOverridden { + if _, err = b.client.PolicyChecks.Override(stopCtx, pc.ID); err != nil { + return generalError(fmt.Sprintf("Failed to override policy check.\n%s", runUrl), err) + } + } else { + b.CLI.Output(fmt.Sprintf("The run needs to be manually overridden or discarded.\n%s\n", runUrl)) + } + } + + if b.CLI != nil { + b.CLI.Output("------------------------------------------------------------------------") + } + default: + return fmt.Errorf("Unknown or unexpected policy state: %s", pc.Status) + } + } + + return nil +} + +func (b *Remote) confirm(stopCtx context.Context, op *backend.Operation, opts *tofu.InputOpts, r *tfe.Run, keyword string) error { + doneCtx, cancel := context.WithCancel(stopCtx) + result := make(chan error, 2) + + panicHandler := logging.PanicHandlerWithTraceFn() + + go func() { + defer panicHandler() + + // Make sure we cancel doneCtx before we return + // so the input command is also canceled. + defer cancel() + + for { + select { + case <-doneCtx.Done(): + return + case <-stopCtx.Done(): + return + case <-time.After(runPollInterval): + // Retrieve the run again to get its current status. + r, err := b.client.Runs.Read(stopCtx, r.ID) + if err != nil { + result <- generalError("Failed to retrieve run", err) + return + } + + switch keyword { + case "override": + if r.Status != tfe.RunPolicyOverride { + if r.Status == tfe.RunDiscarded { + err = errRunDiscarded + } else { + err = errRunOverridden + } + } + case "yes": + if !r.Actions.IsConfirmable { + if r.Status == tfe.RunDiscarded { + err = errRunDiscarded + } else { + err = errRunApproved + } + } + } + + if err != nil { + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color( + fmt.Sprintf("[reset][yellow]%s[reset]", err.Error()))) + } + + if err == errRunDiscarded { + err = errApplyDiscarded + if op.PlanMode == plans.DestroyMode { + err = errDestroyDiscarded + } + } + + result <- err + return + } + } + } + }() + + result <- func() error { + v, err := op.UIIn.Input(doneCtx, opts) + if err != nil && err != context.Canceled && stopCtx.Err() != context.Canceled { + return fmt.Errorf("Error asking %s: %w", opts.Id, err) + } + + // We return the error of our parent channel as we don't + // care about the error of the doneCtx which is only used + // within this function. So if the doneCtx was canceled + // because stopCtx was canceled, this will properly return + // a context.Canceled error and otherwise it returns nil. + if doneCtx.Err() == context.Canceled || stopCtx.Err() == context.Canceled { + return stopCtx.Err() + } + + // Make sure we cancel the context here so the loop that + // checks for external changes to the run is ended before + // we start to make changes ourselves. + cancel() + + if v != keyword { + // Retrieve the run again to get its current status. + r, err = b.client.Runs.Read(stopCtx, r.ID) + if err != nil { + return generalError("Failed to retrieve run", err) + } + + // Make sure we discard the run if possible. + if r.Actions.IsDiscardable { + err = b.client.Runs.Discard(stopCtx, r.ID, tfe.RunDiscardOptions{}) + if err != nil { + if op.PlanMode == plans.DestroyMode { + return generalError("Failed to discard destroy", err) + } + return generalError("Failed to discard apply", err) + } + } + + // Even if the run was discarded successfully, we still + // return an error as the apply command was canceled. + if op.PlanMode == plans.DestroyMode { + return errDestroyDiscarded + } + return errApplyDiscarded + } + + return nil + }() + + return <-result +} diff --git a/pkg/backend/remote/backend_context.go b/pkg/backend/remote/backend_context.go new file mode 100644 index 00000000000..b719fae65b1 --- /dev/null +++ b/pkg/backend/remote/backend_context.go @@ -0,0 +1,301 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package remote + +import ( + "context" + "fmt" + "log" + "strings" + + tfe "github.com/hashicorp/go-tfe" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" + "github.com/zclconf/go-cty/cty" +) + +// Context implements backend.Local. +func (b *Remote) LocalRun(op *backend.Operation) (*backend.LocalRun, statemgr.Full, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + ret := &backend.LocalRun{ + PlanOpts: &tofu.PlanOpts{ + Mode: op.PlanMode, + Targets: op.Targets, + }, + } + + op.StateLocker = op.StateLocker.WithContext(context.Background()) + + // Get the remote workspace name. + remoteWorkspaceName := b.getRemoteWorkspaceName(op.Workspace) + + // Get the latest state. + log.Printf("[TRACE] backend/remote: requesting state manager for workspace %q", remoteWorkspaceName) + stateMgr, err := b.StateMgr(op.Workspace) + if err != nil { + diags = diags.Append(fmt.Errorf("error loading state: %w", err)) + return nil, nil, diags + } + + log.Printf("[TRACE] backend/remote: requesting state lock for workspace %q", remoteWorkspaceName) + if diags := op.StateLocker.Lock(stateMgr, op.Type.String()); diags.HasErrors() { + return nil, nil, diags + } + + defer func() { + // If we're returning with errors, and thus not producing a valid + // context, we'll want to avoid leaving the remote workspace locked. + if diags.HasErrors() { + diags = diags.Append(op.StateLocker.Unlock()) + } + }() + + log.Printf("[TRACE] backend/remote: reading remote state for workspace %q", remoteWorkspaceName) + if err := stateMgr.RefreshState(); err != nil { + diags = diags.Append(fmt.Errorf("error loading state: %w", err)) + return nil, nil, diags + } + + // Initialize our context options + var opts tofu.ContextOpts + if v := b.ContextOpts; v != nil { + opts = *v + } + + // Copy set options from the operation + opts.UIInput = op.UIIn + opts.Encryption = op.Encryption + + // Load the latest state. If we enter contextFromPlanFile below then the + // state snapshot in the plan file must match this, or else it'll return + // error diagnostics. + log.Printf("[TRACE] backend/remote: retrieving remote state snapshot for workspace %q", remoteWorkspaceName) + ret.InputState = stateMgr.State() + + log.Printf("[TRACE] backend/remote: loading configuration for the current working directory") + config, configDiags := op.ConfigLoader.LoadConfig(op.ConfigDir, op.RootCall) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + return nil, nil, diags + } + ret.Config = config + + if op.AllowUnsetVariables { + // If we're not going to use the variables in an operation we'll be + // more lax about them, stubbing out any unset ones as unknown. + // This gives us enough information to produce a consistent context, + // but not enough information to run a real operation (plan, apply, etc) + ret.PlanOpts.SetVariables = stubAllVariables(op.Variables, config.Module.Variables) + } else { + // The underlying API expects us to use the opaque workspace id to request + // variables, so we'll need to look that up using our organization name + // and workspace name. + remoteWorkspaceID, err := b.getRemoteWorkspaceID(context.Background(), op.Workspace) + if err != nil { + diags = diags.Append(fmt.Errorf("error finding remote workspace: %w", err)) + return nil, nil, diags + } + + w, err := b.fetchWorkspace(context.Background(), b.organization, op.Workspace) + if err != nil { + diags = diags.Append(fmt.Errorf("error loading workspace: %w", err)) + return nil, nil, diags + } + + if isLocalExecutionMode(w.ExecutionMode) { + log.Printf("[TRACE] skipping retrieving variables from workspace %s/%s (%s), workspace is in Local Execution mode", remoteWorkspaceName, b.organization, remoteWorkspaceID) + } else { + log.Printf("[TRACE] backend/remote: retrieving variables from workspace %s/%s (%s)", remoteWorkspaceName, b.organization, remoteWorkspaceID) + tfeVariables, err := b.client.Variables.List(context.Background(), remoteWorkspaceID, nil) + if err != nil && err != tfe.ErrResourceNotFound { + diags = diags.Append(fmt.Errorf("error loading variables: %w", err)) + return nil, nil, diags + } + if tfeVariables != nil { + if op.Variables == nil { + op.Variables = make(map[string]backend.UnparsedVariableValue) + } + for _, v := range tfeVariables.Items { + if v.Category == tfe.CategoryTerraform { + if _, ok := op.Variables[v.Key]; !ok { + op.Variables[v.Key] = &remoteStoredVariableValue{ + definition: v, + } + } + } + } + } + } + + if op.Variables != nil { + variables, varDiags := backend.ParseVariableValues(op.Variables, config.Module.Variables) + diags = diags.Append(varDiags) + if diags.HasErrors() { + return nil, nil, diags + } + ret.PlanOpts.SetVariables = variables + } + } + + tfCtx, ctxDiags := tofu.NewContext(&opts) + diags = diags.Append(ctxDiags) + ret.Core = tfCtx + + log.Printf("[TRACE] backend/remote: finished building tofu.Context") + + return ret, stateMgr, diags +} + +func (b *Remote) getRemoteWorkspaceName(localWorkspaceName string) string { + switch { + case localWorkspaceName == backend.DefaultStateName: + // The default workspace name is a special case, for when the backend + // is configured to with to an exact remote workspace rather than with + // a remote workspace _prefix_. + return b.workspace + case b.prefix != "" && !strings.HasPrefix(localWorkspaceName, b.prefix): + return b.prefix + localWorkspaceName + default: + return localWorkspaceName + } +} + +func (b *Remote) getRemoteWorkspace(ctx context.Context, localWorkspaceName string) (*tfe.Workspace, error) { + remoteWorkspaceName := b.getRemoteWorkspaceName(localWorkspaceName) + + log.Printf("[TRACE] backend/remote: looking up workspace for %s/%s", b.organization, remoteWorkspaceName) + remoteWorkspace, err := b.client.Workspaces.Read(ctx, b.organization, remoteWorkspaceName) + if err != nil { + return nil, err + } + + return remoteWorkspace, nil +} + +func (b *Remote) getRemoteWorkspaceID(ctx context.Context, localWorkspaceName string) (string, error) { + remoteWorkspace, err := b.getRemoteWorkspace(ctx, localWorkspaceName) + if err != nil { + return "", err + } + + return remoteWorkspace.ID, nil +} + +func stubAllVariables(vv map[string]backend.UnparsedVariableValue, decls map[string]*configs.Variable) tofu.InputValues { + ret := make(tofu.InputValues, len(decls)) + + for name, cfg := range decls { + raw, exists := vv[name] + if !exists { + ret[name] = &tofu.InputValue{ + Value: cty.UnknownVal(cfg.Type), + SourceType: tofu.ValueFromConfig, + } + continue + } + + val, diags := raw.ParseVariableValue(cfg.ParsingMode) + if diags.HasErrors() { + ret[name] = &tofu.InputValue{ + Value: cty.UnknownVal(cfg.Type), + SourceType: tofu.ValueFromConfig, + } + continue + } + ret[name] = val + } + + return ret +} + +// remoteStoredVariableValue is a backend.UnparsedVariableValue implementation +// that translates from the go-tfe representation of stored variables into +// the OpenTofu Core backend representation of variables. +type remoteStoredVariableValue struct { + definition *tfe.Variable +} + +var _ backend.UnparsedVariableValue = (*remoteStoredVariableValue)(nil) + +func (v *remoteStoredVariableValue) ParseVariableValue(mode configs.VariableParsingMode) (*tofu.InputValue, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + var val cty.Value + + switch { + case v.definition.Sensitive: + // If it's marked as sensitive then it's not available for use in + // local operations. We'll use an unknown value as a placeholder for + // it so that operations that don't need it might still work, but + // we'll also produce a warning about it to add context for any + // errors that might result here. + val = cty.DynamicVal + if !v.definition.HCL { + // If it's not marked as HCL then we at least know that the + // value must be a string, so we'll set that in case it allows + // us to do some more precise type checking. + val = cty.UnknownVal(cty.String) + } + + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + fmt.Sprintf("Value for var.%s unavailable", v.definition.Key), + fmt.Sprintf("The value of variable %q is marked as sensitive in the remote workspace. This operation always runs locally, so the value for that variable is not available.", v.definition.Key), + )) + + case v.definition.HCL: + // If the variable value is marked as being in HCL syntax, we need to + // parse it the same way as it would be interpreted in a .tfvars + // file because that is how it would get passed to OpenTofu CLI for + // a remote operation and we want to mimic that result as closely as + // possible. + var exprDiags hcl.Diagnostics + expr, exprDiags := hclsyntax.ParseExpression([]byte(v.definition.Value), "", hcl.Pos{Line: 1, Column: 1}) + if expr != nil { + var moreDiags hcl.Diagnostics + val, moreDiags = expr.Value(nil) + exprDiags = append(exprDiags, moreDiags...) + } else { + // We'll have already put some errors in exprDiags above, so we'll + // just stub out the value here. + val = cty.DynamicVal + } + + // We don't have sufficient context to return decent error messages + // for syntax errors in the remote values, so we'll just return a + // generic message instead for now. + // (More complete error messages will still result from true remote + // operations, because they'll run on the remote system where we've + // materialized the values into a tfvars file we can report from.) + if exprDiags.HasErrors() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + fmt.Sprintf("Invalid expression for var.%s", v.definition.Key), + fmt.Sprintf("The value of variable %q is marked in the remote workspace as being specified in HCL syntax, but the given value is not valid HCL. Stored variable values must be valid literal expressions and may not contain references to other variables or calls to functions.", v.definition.Key), + )) + } + + default: + // A variable value _not_ marked as HCL is always be a string, given + // literally. + val = cty.StringVal(v.definition.Value) + } + + return &tofu.InputValue{ + Value: val, + + // We mark these as "from input" with the rationale that entering + // variable values into the Terraform Cloud or Enterprise UI is, + // roughly speaking, a similar idea to entering variable values at + // the interactive CLI prompts. It's not a perfect correspondance, + // but it's closer than the other options. + SourceType: tofu.ValueFromInput, + }, diags +} diff --git a/pkg/backend/remote/backend_context_test.go b/pkg/backend/remote/backend_context_test.go new file mode 100644 index 00000000000..95e4ad39393 --- /dev/null +++ b/pkg/backend/remote/backend_context_test.go @@ -0,0 +1,476 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package remote + +import ( + "context" + "reflect" + "testing" + + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" + + tfe "github.com/hashicorp/go-tfe" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/command/clistate" + "github.com/kubegems/opentofu/pkg/command/views" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/initwd" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "github.com/kubegems/opentofu/pkg/terminal" +) + +func TestRemoteStoredVariableValue(t *testing.T) { + tests := map[string]struct { + Def *tfe.Variable + Want cty.Value + WantError string + }{ + "string literal": { + &tfe.Variable{ + Key: "test", + Value: "foo", + HCL: false, + Sensitive: false, + }, + cty.StringVal("foo"), + ``, + }, + "string HCL": { + &tfe.Variable{ + Key: "test", + Value: `"foo"`, + HCL: true, + Sensitive: false, + }, + cty.StringVal("foo"), + ``, + }, + "list HCL": { + &tfe.Variable{ + Key: "test", + Value: `[]`, + HCL: true, + Sensitive: false, + }, + cty.EmptyTupleVal, + ``, + }, + "null HCL": { + &tfe.Variable{ + Key: "test", + Value: `null`, + HCL: true, + Sensitive: false, + }, + cty.NullVal(cty.DynamicPseudoType), + ``, + }, + "literal sensitive": { + &tfe.Variable{ + Key: "test", + HCL: false, + Sensitive: true, + }, + cty.UnknownVal(cty.String), + ``, + }, + "HCL sensitive": { + &tfe.Variable{ + Key: "test", + HCL: true, + Sensitive: true, + }, + cty.DynamicVal, + ``, + }, + "HCL computation": { + // This (stored expressions containing computation) is not a case + // we intentionally supported, but it became possible for remote + // operations in Terraform 0.12 (due to Terraform Cloud/Enterprise + // just writing the HCL verbatim into generated `.tfvars` files). + // We support it here for consistency, and we continue to support + // it in both places for backward-compatibility. In practice, + // there's little reason to do computation in a stored variable + // value because references are not supported. + &tfe.Variable{ + Key: "test", + Value: `[for v in ["a"] : v]`, + HCL: true, + Sensitive: false, + }, + cty.TupleVal([]cty.Value{cty.StringVal("a")}), + ``, + }, + "HCL syntax error": { + &tfe.Variable{ + Key: "test", + Value: `[`, + HCL: true, + Sensitive: false, + }, + cty.DynamicVal, + `Invalid expression for var.test: The value of variable "test" is marked in the remote workspace as being specified in HCL syntax, but the given value is not valid HCL. Stored variable values must be valid literal expressions and may not contain references to other variables or calls to functions.`, + }, + "HCL with references": { + &tfe.Variable{ + Key: "test", + Value: `foo.bar`, + HCL: true, + Sensitive: false, + }, + cty.DynamicVal, + `Invalid expression for var.test: The value of variable "test" is marked in the remote workspace as being specified in HCL syntax, but the given value is not valid HCL. Stored variable values must be valid literal expressions and may not contain references to other variables or calls to functions.`, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + v := &remoteStoredVariableValue{ + definition: test.Def, + } + // This ParseVariableValue implementation ignores the parsing mode, + // so we'll just always parse literal here. (The parsing mode is + // selected by the remote server, not by our local configuration.) + gotIV, diags := v.ParseVariableValue(configs.VariableParseLiteral) + if test.WantError != "" { + if !diags.HasErrors() { + t.Fatalf("missing expected error\ngot: \nwant: %s", test.WantError) + } + errStr := diags.Err().Error() + if errStr != test.WantError { + t.Fatalf("wrong error\ngot: %s\nwant: %s", errStr, test.WantError) + } + } else { + if diags.HasErrors() { + t.Fatalf("unexpected error\ngot: %s\nwant: ", diags.Err().Error()) + } + got := gotIV.Value + if !test.Want.RawEquals(got) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + } + }) + } +} + +func TestRemoteContextWithVars(t *testing.T) { + catTerraform := tfe.CategoryTerraform + catEnv := tfe.CategoryEnv + + tests := map[string]struct { + Opts *tfe.VariableCreateOptions + WantError string + }{ + "Terraform variable": { + &tfe.VariableCreateOptions{ + Category: &catTerraform, + }, + `Value for undeclared variable: A variable named "key" was assigned a value, but the root module does not declare a variable of that name. To use this value, add a "variable" block to the configuration.`, + }, + "environment variable": { + &tfe.VariableCreateOptions{ + Category: &catEnv, + }, + ``, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + configDir := "./testdata/empty" + + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + _, configLoader, configCleanup := initwd.MustLoadConfigForTests(t, configDir, "tests") + defer configCleanup() + + workspaceID, err := b.getRemoteWorkspaceID(context.Background(), backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + streams, _ := terminal.StreamsForTesting(t) + view := views.NewStateLocker(arguments.ViewHuman, views.NewView(streams)) + + op := &backend.Operation{ + ConfigDir: configDir, + ConfigLoader: configLoader, + StateLocker: clistate.NewLocker(0, view), + Workspace: backend.DefaultStateName, + } + + v := test.Opts + if v.Key == nil { + key := "key" + v.Key = &key + } + b.client.Variables.Create(context.TODO(), workspaceID, *v) + + _, _, diags := b.LocalRun(op) + + if test.WantError != "" { + if !diags.HasErrors() { + t.Fatalf("missing expected error\ngot: \nwant: %s", test.WantError) + } + errStr := diags.Err().Error() + if errStr != test.WantError { + t.Fatalf("wrong error\ngot: %s\nwant: %s", errStr, test.WantError) + } + // When Context() returns an error, it should unlock the state, + // so re-locking it is expected to succeed. + stateMgr, _ := b.StateMgr(backend.DefaultStateName) + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { + t.Fatalf("unexpected error locking state: %s", err.Error()) + } + } else { + if diags.HasErrors() { + t.Fatalf("unexpected error\ngot: %s\nwant: ", diags.Err().Error()) + } + // When Context() succeeds, this should fail w/ "workspace already locked" + stateMgr, _ := b.StateMgr(backend.DefaultStateName) + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err == nil { + t.Fatal("unexpected success locking state after Context") + } + } + }) + } +} + +func TestRemoteVariablesDoNotOverride(t *testing.T) { + catTerraform := tfe.CategoryTerraform + + varName1 := "key1" + varName2 := "key2" + varName3 := "key3" + + varValue1 := "value1" + varValue2 := "value2" + varValue3 := "value3" + + tests := map[string]struct { + localVariables map[string]backend.UnparsedVariableValue + remoteVariables []*tfe.VariableCreateOptions + expectedVariables tofu.InputValues + }{ + "no local variables": { + map[string]backend.UnparsedVariableValue{}, + []*tfe.VariableCreateOptions{ + { + Key: &varName1, + Value: &varValue1, + Category: &catTerraform, + }, + { + Key: &varName2, + Value: &varValue2, + Category: &catTerraform, + }, + { + Key: &varName3, + Value: &varValue3, + Category: &catTerraform, + }, + }, + tofu.InputValues{ + varName1: &tofu.InputValue{ + Value: cty.StringVal(varValue1), + SourceType: tofu.ValueFromInput, + SourceRange: tfdiags.SourceRange{ + Filename: "", + Start: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + End: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + }, + }, + varName2: &tofu.InputValue{ + Value: cty.StringVal(varValue2), + SourceType: tofu.ValueFromInput, + SourceRange: tfdiags.SourceRange{ + Filename: "", + Start: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + End: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + }, + }, + varName3: &tofu.InputValue{ + Value: cty.StringVal(varValue3), + SourceType: tofu.ValueFromInput, + SourceRange: tfdiags.SourceRange{ + Filename: "", + Start: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + End: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + }, + }, + }, + }, + "single conflicting local variable": { + map[string]backend.UnparsedVariableValue{ + varName3: testUnparsedVariableValue(varValue3), + }, + []*tfe.VariableCreateOptions{ + { + Key: &varName1, + Value: &varValue1, + Category: &catTerraform, + }, { + Key: &varName2, + Value: &varValue2, + Category: &catTerraform, + }, { + Key: &varName3, + Value: &varValue3, + Category: &catTerraform, + }, + }, + tofu.InputValues{ + varName1: &tofu.InputValue{ + Value: cty.StringVal(varValue1), + SourceType: tofu.ValueFromInput, + SourceRange: tfdiags.SourceRange{ + Filename: "", + Start: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + End: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + }, + }, + varName2: &tofu.InputValue{ + Value: cty.StringVal(varValue2), + SourceType: tofu.ValueFromInput, + SourceRange: tfdiags.SourceRange{ + Filename: "", + Start: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + End: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + }, + }, + varName3: &tofu.InputValue{ + Value: cty.StringVal(varValue3), + SourceType: tofu.ValueFromNamedFile, + SourceRange: tfdiags.SourceRange{ + Filename: "fake.tfvars", + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + }, + }, + }, + }, + "no conflicting local variable": { + map[string]backend.UnparsedVariableValue{ + varName3: testUnparsedVariableValue(varValue3), + }, + []*tfe.VariableCreateOptions{ + { + Key: &varName1, + Value: &varValue1, + Category: &catTerraform, + }, { + Key: &varName2, + Value: &varValue2, + Category: &catTerraform, + }, + }, + tofu.InputValues{ + varName1: &tofu.InputValue{ + Value: cty.StringVal(varValue1), + SourceType: tofu.ValueFromInput, + SourceRange: tfdiags.SourceRange{ + Filename: "", + Start: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + End: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + }, + }, + varName2: &tofu.InputValue{ + Value: cty.StringVal(varValue2), + SourceType: tofu.ValueFromInput, + SourceRange: tfdiags.SourceRange{ + Filename: "", + Start: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + End: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + }, + }, + varName3: &tofu.InputValue{ + Value: cty.StringVal(varValue3), + SourceType: tofu.ValueFromNamedFile, + SourceRange: tfdiags.SourceRange{ + Filename: "fake.tfvars", + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + }, + }, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + configDir := "./testdata/variables" + + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + _, configLoader, configCleanup := initwd.MustLoadConfigForTests(t, configDir, "tests") + defer configCleanup() + + workspaceID, err := b.getRemoteWorkspaceID(context.Background(), backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + streams, _ := terminal.StreamsForTesting(t) + view := views.NewStateLocker(arguments.ViewHuman, views.NewView(streams)) + + op := &backend.Operation{ + ConfigDir: configDir, + ConfigLoader: configLoader, + StateLocker: clistate.NewLocker(0, view), + Workspace: backend.DefaultStateName, + Variables: test.localVariables, + } + + for _, v := range test.remoteVariables { + b.client.Variables.Create(context.TODO(), workspaceID, *v) + } + + lr, _, diags := b.LocalRun(op) + + if diags.HasErrors() { + t.Fatalf("unexpected error\ngot: %s\nwant: ", diags.Err().Error()) + } + // When Context() succeeds, this should fail w/ "workspace already locked" + stateMgr, _ := b.StateMgr(backend.DefaultStateName) + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err == nil { + t.Fatal("unexpected success locking state after Context") + } + + actual := lr.PlanOpts.SetVariables + expected := test.expectedVariables + + for expectedKey := range expected { + actualValue := actual[expectedKey] + expectedValue := expected[expectedKey] + + if !reflect.DeepEqual(*actualValue, *expectedValue) { + t.Fatalf("unexpected variable '%s'\ngot: %v\nwant: %v", expectedKey, actualValue, expectedValue) + } + } + }) + } +} + +type testUnparsedVariableValue string + +func (v testUnparsedVariableValue) ParseVariableValue(mode configs.VariableParsingMode) (*tofu.InputValue, tfdiags.Diagnostics) { + return &tofu.InputValue{ + Value: cty.StringVal(string(v)), + SourceType: tofu.ValueFromNamedFile, + SourceRange: tfdiags.SourceRange{ + Filename: "fake.tfvars", + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + }, + }, nil +} diff --git a/pkg/backend/remote/backend_plan.go b/pkg/backend/remote/backend_plan.go new file mode 100644 index 00000000000..3500603b0db --- /dev/null +++ b/pkg/backend/remote/backend_plan.go @@ -0,0 +1,457 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package remote + +import ( + "bufio" + "context" + "errors" + "fmt" + "io" + "log" + "os" + "path/filepath" + "strings" + "syscall" + "time" + + tfe "github.com/hashicorp/go-tfe" + version "github.com/hashicorp/go-version" + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/logging" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +var planConfigurationVersionsPollInterval = 500 * time.Millisecond + +func (b *Remote) opPlan(stopCtx, cancelCtx context.Context, op *backend.Operation, w *tfe.Workspace) (*tfe.Run, error) { + log.Printf("[INFO] backend/remote: starting Plan operation") + + var diags tfdiags.Diagnostics + + if !w.Permissions.CanQueueRun { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Insufficient rights to generate a plan", + "The provided credentials have insufficient rights to generate a plan. In order "+ + "to generate plans, at least plan permissions on the workspace are required.", + )) + return nil, diags.Err() + } + + if b.ContextOpts != nil && b.ContextOpts.Parallelism != defaultParallelism { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Custom parallelism values are currently not supported", + `The "remote" backend does not support setting a custom parallelism `+ + `value at this time.`, + )) + } + + if op.PlanFile != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Displaying a saved plan is currently not supported", + `The "remote" backend currently requires configuration to be present and `+ + `does not accept an existing saved plan as an argument at this time.`, + )) + } + + if op.PlanOutPath != "" { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Saving a generated plan is currently not supported", + `The "remote" backend does not support saving the generated execution `+ + `plan locally at this time.`, + )) + } + + if op.GenerateConfigOut != "" { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Generating configuration is not currently supported", + `The "remote" backend does not currently support generating resource configuration `+ + `as part of a plan.`, + )) + } + + if b.hasExplicitVariableValues(op) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Run variables are currently not supported", + fmt.Sprintf( + "The \"remote\" backend does not support setting run variables at this time. "+ + "Currently the only to way to pass variables to the remote backend is by "+ + "creating a '*.auto.tfvars' variables file. This file will automatically "+ + "be loaded by the \"remote\" backend when the workspace is configured to use "+ + "OpenTofu v0.10.0 or later.\n\nAdditionally you can also set variables on "+ + "the workspace in the web UI:\nhttps://%s/app/%s/%s/variables", + b.hostname, b.organization, op.Workspace, + ), + )) + } + + if !op.HasConfig() && op.PlanMode != plans.DestroyMode { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "No configuration files found", + `Plan requires configuration to be present. Planning without a configuration `+ + `would mark everything for destruction, which is normally not what is desired. `+ + `If you would like to destroy everything, please run plan with the "-destroy" `+ + `flag or create a single empty configuration file. Otherwise, please create `+ + `a OpenTofu configuration file in the path being executed and try again.`, + )) + } + + // For API versions prior to 2.3, RemoteAPIVersion will return an empty string, + // so if there's an error when parsing the RemoteAPIVersion, it's handled as + // equivalent to an API version < 2.3. + currentAPIVersion, parseErr := version.NewVersion(b.client.RemoteAPIVersion()) + + if len(op.Targets) != 0 { + desiredAPIVersion, _ := version.NewVersion("2.3") + + if parseErr != nil || currentAPIVersion.LessThan(desiredAPIVersion) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Resource targeting is not supported", + fmt.Sprintf( + `The host %s does not support the -target option for `+ + `remote plans.`, + b.hostname, + ), + )) + } + } + + if !op.PlanRefresh { + desiredAPIVersion, _ := version.NewVersion("2.4") + + if parseErr != nil || currentAPIVersion.LessThan(desiredAPIVersion) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Planning without refresh is not supported", + fmt.Sprintf( + `The host %s does not support the -refresh=false option for `+ + `remote plans.`, + b.hostname, + ), + )) + } + } + + if len(op.ForceReplace) != 0 { + desiredAPIVersion, _ := version.NewVersion("2.4") + + if parseErr != nil || currentAPIVersion.LessThan(desiredAPIVersion) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Planning resource replacements is not supported", + fmt.Sprintf( + `The host %s does not support the -replace option for `+ + `remote plans.`, + b.hostname, + ), + )) + } + } + + if op.PlanMode == plans.RefreshOnlyMode { + desiredAPIVersion, _ := version.NewVersion("2.4") + + if parseErr != nil || currentAPIVersion.LessThan(desiredAPIVersion) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Refresh-only mode is not supported", + fmt.Sprintf( + `The host %s does not support -refresh-only mode for `+ + `remote plans.`, + b.hostname, + ), + )) + } + } + + // Return if there are any errors. + if diags.HasErrors() { + return nil, diags.Err() + } + + return b.plan(stopCtx, cancelCtx, op, w) +} + +func (b *Remote) plan(stopCtx, cancelCtx context.Context, op *backend.Operation, w *tfe.Workspace) (*tfe.Run, error) { + if b.CLI != nil { + header := planDefaultHeader + if op.Type == backend.OperationTypeApply { + header = applyDefaultHeader + } + b.CLI.Output(b.Colorize().Color(strings.TrimSpace(header) + "\n")) + } + + configOptions := tfe.ConfigurationVersionCreateOptions{ + AutoQueueRuns: tfe.Bool(false), + Speculative: tfe.Bool(op.Type == backend.OperationTypePlan), + } + + cv, err := b.client.ConfigurationVersions.Create(stopCtx, w.ID, configOptions) + if err != nil { + return nil, generalError("Failed to create configuration version", err) + } + + var configDir string + if op.ConfigDir != "" { + // De-normalize the configuration directory path. + configDir, err = filepath.Abs(op.ConfigDir) + if err != nil { + return nil, generalError( + "Failed to get absolute path of the configuration directory: %v", err) + } + + // Make sure to take the working directory into account by removing + // the working directory from the current path. This will result in + // a path that points to the expected root of the workspace. + configDir = filepath.Clean(strings.TrimSuffix( + filepath.Clean(configDir), + filepath.Clean(w.WorkingDirectory), + )) + + // If the workspace has a subdirectory as its working directory then + // our configDir will be some parent directory of the current working + // directory. Users are likely to find that surprising, so we'll + // produce an explicit message about it to be transparent about what + // we are doing and why. + if w.WorkingDirectory != "" && filepath.Base(configDir) != w.WorkingDirectory { + if b.CLI != nil { + b.CLI.Output(fmt.Sprintf(strings.TrimSpace(` +The remote workspace is configured to work with configuration at +%s relative to the target repository. + +OpenTofu will upload the contents of the following directory, +excluding files or directories as defined by a .terraformignore file +at %s/.terraformignore (if it is present), +in order to capture the filesystem context the remote workspace expects: + %s +`), w.WorkingDirectory, configDir, configDir) + "\n") + } + } + + } else { + // We did a check earlier to make sure we either have a config dir, + // or the plan is run with -destroy. So this else clause will only + // be executed when we are destroying and doesn't need the config. + configDir, err = os.MkdirTemp("", "tf") + if err != nil { + return nil, generalError("Failed to create temporary directory", err) + } + defer os.RemoveAll(configDir) + + // Make sure the configured working directory exists. + err = os.MkdirAll(filepath.Join(configDir, w.WorkingDirectory), 0700) + if err != nil { + return nil, generalError( + "Failed to create temporary working directory", err) + } + } + + err = b.client.ConfigurationVersions.Upload(stopCtx, cv.UploadURL, configDir) + if err != nil { + return nil, generalError("Failed to upload configuration files", err) + } + + uploaded := false + for i := 0; i < 60 && !uploaded; i++ { + select { + case <-stopCtx.Done(): + return nil, context.Canceled + case <-cancelCtx.Done(): + return nil, context.Canceled + case <-time.After(planConfigurationVersionsPollInterval): + cv, err = b.client.ConfigurationVersions.Read(stopCtx, cv.ID) + if err != nil { + return nil, generalError("Failed to retrieve configuration version", err) + } + + if cv.Status == tfe.ConfigurationUploaded { + uploaded = true + } + } + } + + if !uploaded { + return nil, generalError( + "Failed to upload configuration files", errors.New("operation timed out")) + } + + runOptions := tfe.RunCreateOptions{ + ConfigurationVersion: cv, + Refresh: tfe.Bool(op.PlanRefresh), + Workspace: w, + } + + switch op.PlanMode { + case plans.NormalMode: + // okay, but we don't need to do anything special for this + case plans.RefreshOnlyMode: + runOptions.RefreshOnly = tfe.Bool(true) + case plans.DestroyMode: + runOptions.IsDestroy = tfe.Bool(true) + default: + // Shouldn't get here because we should update this for each new + // plan mode we add, mapping it to the corresponding RunCreateOptions + // field. + return nil, generalError( + "Invalid plan mode", + fmt.Errorf("remote backend doesn't support %s", op.PlanMode), + ) + } + + if len(op.Targets) != 0 { + runOptions.TargetAddrs = make([]string, 0, len(op.Targets)) + for _, addr := range op.Targets { + runOptions.TargetAddrs = append(runOptions.TargetAddrs, addr.String()) + } + } + + if len(op.ForceReplace) != 0 { + runOptions.ReplaceAddrs = make([]string, 0, len(op.ForceReplace)) + for _, addr := range op.ForceReplace { + runOptions.ReplaceAddrs = append(runOptions.ReplaceAddrs, addr.String()) + } + } + + r, err := b.client.Runs.Create(stopCtx, runOptions) + if err != nil { + return r, generalError("Failed to create run", err) + } + + panicHandler := logging.PanicHandlerWithTraceFn() + + // When the lock timeout is set, if the run is still pending and + // cancellable after that period, we attempt to cancel it. + if lockTimeout := op.StateLocker.Timeout(); lockTimeout > 0 { + go func() { + defer panicHandler() + + select { + case <-stopCtx.Done(): + return + case <-cancelCtx.Done(): + return + case <-time.After(lockTimeout): + // Retrieve the run to get its current status. + r, err := b.client.Runs.Read(cancelCtx, r.ID) + if err != nil { + log.Printf("[ERROR] error reading run: %v", err) + return + } + + if r.Status == tfe.RunPending && r.Actions.IsCancelable { + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color(strings.TrimSpace(lockTimeoutErr))) + } + + // We abuse the auto aprove flag to indicate that we do not + // want to ask if the remote operation should be canceled. + op.AutoApprove = true + + p, err := os.FindProcess(os.Getpid()) + if err != nil { + log.Printf("[ERROR] error searching process ID: %v", err) + return + } + p.Signal(syscall.SIGINT) + } + } + }() + } + + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color(strings.TrimSpace(fmt.Sprintf( + runHeader, b.hostname, b.organization, op.Workspace, r.ID)) + "\n")) + } + + r, err = b.waitForRun(stopCtx, cancelCtx, op, "plan", r, w) + if err != nil { + return r, err + } + + logs, err := b.client.Plans.Logs(stopCtx, r.Plan.ID) + if err != nil { + return r, generalError("Failed to retrieve logs", err) + } + reader := bufio.NewReaderSize(logs, 64*1024) + + if b.CLI != nil { + for next := true; next; { + var l, line []byte + + for isPrefix := true; isPrefix; { + l, isPrefix, err = reader.ReadLine() + if err != nil { + if err != io.EOF { + return r, generalError("Failed to read logs", err) + } + next = false + } + line = append(line, l...) + } + + if next || len(line) > 0 { + b.CLI.Output(b.Colorize().Color(string(line))) + } + } + } + + // Retrieve the run to get its current status. + r, err = b.client.Runs.Read(stopCtx, r.ID) + if err != nil { + return r, generalError("Failed to retrieve run", err) + } + + // If the run is canceled or errored, we still continue to the + // cost-estimation and policy check phases to ensure we render any + // results available. In the case of a hard-failed policy check, the + // status of the run will be "errored", but there is still policy + // information which should be shown. + + // Show any cost estimation output. + if r.CostEstimate != nil { + err = b.costEstimate(stopCtx, cancelCtx, op, r) + if err != nil { + return r, err + } + } + + // Check any configured sentinel policies. + if len(r.PolicyChecks) > 0 { + err = b.checkPolicy(stopCtx, cancelCtx, op, r) + if err != nil { + return r, err + } + } + + return r, nil +} + +const planDefaultHeader = ` +[reset][yellow]Running plan in the remote backend. Output will stream here. Pressing Ctrl-C +will stop streaming the logs, but will not stop the plan running remotely.[reset] + +Preparing the remote plan... +` + +const runHeader = ` +[reset][yellow]To view this run in a browser, visit: +https://%s/app/%s/%s/runs/%s[reset] +` + +// The newline in this error is to make it look good in the CLI! +const lockTimeoutErr = ` +[reset][red]Lock timeout exceeded, sending interrupt to cancel the remote operation. +[reset] +` diff --git a/pkg/backend/remote/backend_plan_test.go b/pkg/backend/remote/backend_plan_test.go new file mode 100644 index 00000000000..b36bb75512e --- /dev/null +++ b/pkg/backend/remote/backend_plan_test.go @@ -0,0 +1,1280 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package remote + +import ( + "context" + "os" + "os/signal" + "strings" + "syscall" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + tfe "github.com/hashicorp/go-tfe" + "github.com/mitchellh/cli" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/cloud" + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/command/clistate" + "github.com/kubegems/opentofu/pkg/command/views" + "github.com/kubegems/opentofu/pkg/depsfile" + "github.com/kubegems/opentofu/pkg/initwd" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/plans/planfile" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "github.com/kubegems/opentofu/pkg/terminal" + "github.com/kubegems/opentofu/pkg/tofu" +) + +func testOperationPlan(t *testing.T, configDir string) (*backend.Operation, func(), func(*testing.T) *terminal.TestOutput) { + t.Helper() + + return testOperationPlanWithTimeout(t, configDir, 0) +} + +func testOperationPlanWithTimeout(t *testing.T, configDir string, timeout time.Duration) (*backend.Operation, func(), func(*testing.T) *terminal.TestOutput) { + t.Helper() + + _, configLoader, configCleanup := initwd.MustLoadConfigForTests(t, configDir, "tests") + + streams, done := terminal.StreamsForTesting(t) + view := views.NewView(streams) + stateLockerView := views.NewStateLocker(arguments.ViewHuman, view) + operationView := views.NewOperation(arguments.ViewHuman, false, view) + + // Many of our tests use an overridden "null" provider that's just in-memory + // inside the test process, not a separate plugin on disk. + depLocks := depsfile.NewLocks() + depLocks.SetProviderOverridden(addrs.MustParseProviderSourceString("registry.opentofu.org/hashicorp/null")) + + return &backend.Operation{ + ConfigDir: configDir, + ConfigLoader: configLoader, + PlanRefresh: true, + StateLocker: clistate.NewLocker(timeout, stateLockerView), + Type: backend.OperationTypePlan, + View: operationView, + DependencyLocks: depLocks, + }, configCleanup, done +} + +func TestRemote_planBasic(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatal("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } + + stateMgr, _ := b.StateMgr(backend.DefaultStateName) + // An error suggests that the state was not unlocked after the operation finished + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { + t.Fatalf("unexpected error locking state after successful plan: %s", err.Error()) + } +} + +func TestRemote_planCanceled(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + // Stop the run to simulate a Ctrl-C. + run.Stop() + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + + stateMgr, _ := b.StateMgr(backend.DefaultStateName) + // An error suggests that the state was not unlocked after the operation finished + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { + t.Fatalf("unexpected error locking state after cancelled plan: %s", err.Error()) + } +} + +func TestRemote_planLongLine(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-long-line") + defer configCleanup() + defer done(t) + + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatal("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } +} + +func TestRemote_planWithoutPermissions(t *testing.T) { + b, bCleanup := testBackendNoDefault(t) + defer bCleanup() + + // Create a named workspace without permissions. + w, err := b.client.Workspaces.Create( + context.Background(), + b.organization, + tfe.WorkspaceCreateOptions{ + Name: tfe.String(b.prefix + "prod"), + }, + ) + if err != nil { + t.Fatalf("error creating named workspace: %v", err) + } + w.Permissions.CanQueueRun = false + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + + op.Workspace = "prod" + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "Insufficient rights to generate a plan") { + t.Fatalf("expected a permissions error, got: %v", errOutput) + } +} + +func TestRemote_planWithParallelism(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + + if b.ContextOpts == nil { + b.ContextOpts = &tofu.ContextOpts{} + } + b.ContextOpts.Parallelism = 3 + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "parallelism values are currently not supported") { + t.Fatalf("expected a parallelism error, got: %v", errOutput) + } +} + +func TestRemote_planWithPlan(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + + op.PlanFile = planfile.NewWrappedLocal(&planfile.Reader{}) + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "saved plan is currently not supported") { + t.Fatalf("expected a saved plan error, got: %v", errOutput) + } +} + +func TestRemote_planWithPath(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + + op.PlanOutPath = "./testdata/plan" + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "generated plan is currently not supported") { + t.Fatalf("expected a generated plan error, got: %v", errOutput) + } +} + +func TestRemote_planWithoutRefresh(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + op.PlanRefresh = false + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatal("expected a non-empty plan") + } + + // We should find a run inside the mock client that has refresh set + // to false. + runsAPI := b.client.Runs.(*cloud.MockRuns) + if got, want := len(runsAPI.Runs), 1; got != want { + t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) + } + for _, run := range runsAPI.Runs { + if diff := cmp.Diff(false, run.Refresh); diff != "" { + t.Errorf("wrong Refresh setting in the created run\n%s", diff) + } + } +} + +func TestRemote_planWithoutRefreshIncompatibleAPIVersion(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + + b.client.SetFakeRemoteAPIVersion("2.3") + + op.PlanRefresh = false + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "Planning without refresh is not supported") { + t.Fatalf("expected not supported error, got: %v", errOutput) + } +} + +func TestRemote_planWithRefreshOnly(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + op.PlanMode = plans.RefreshOnlyMode + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatal("expected a non-empty plan") + } + + // We should find a run inside the mock client that has refresh-only set + // to true. + runsAPI := b.client.Runs.(*cloud.MockRuns) + if got, want := len(runsAPI.Runs), 1; got != want { + t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) + } + for _, run := range runsAPI.Runs { + if diff := cmp.Diff(true, run.RefreshOnly); diff != "" { + t.Errorf("wrong RefreshOnly setting in the created run\n%s", diff) + } + } +} + +func TestRemote_planWithRefreshOnlyIncompatibleAPIVersion(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + + b.client.SetFakeRemoteAPIVersion("2.3") + + op.PlanMode = plans.RefreshOnlyMode + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "Refresh-only mode is not supported") { + t.Fatalf("expected not supported error, got: %v", errOutput) + } +} + +func TestRemote_planWithTarget(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + // When the backend code creates a new run, we'll tweak it so that it + // has a cost estimation object with the "skipped_due_to_targeting" status, + // emulating how a real server is expected to behave in that case. + b.client.Runs.(*cloud.MockRuns).ModifyNewRun = func(client *cloud.MockClient, options tfe.RunCreateOptions, run *tfe.Run) { + const fakeID = "fake" + // This is the cost estimate object embedded in the run itself which + // the backend will use to learn the ID to request from the cost + // estimates endpoint. It's pending to simulate what a freshly-created + // run is likely to look like. + run.CostEstimate = &tfe.CostEstimate{ + ID: fakeID, + Status: "pending", + } + // The backend will then use the main cost estimation API to retrieve + // the same ID indicated in the object above, where we'll then return + // the status "skipped_due_to_targeting" to trigger the special skip + // message in the backend output. + client.CostEstimates.Estimations[fakeID] = &tfe.CostEstimate{ + ID: fakeID, + Status: "skipped_due_to_targeting", + } + } + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + addr, _ := addrs.ParseAbsResourceStr("null_resource.foo") + + op.Targets = []addrs.Targetable{addr} + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatal("expected plan operation to succeed") + } + if run.PlanEmpty { + t.Fatalf("expected plan to be non-empty") + } + + // testBackendDefault above attached a "mock UI" to our backend, so we + // can retrieve its non-error output via the OutputWriter in-memory buffer. + gotOutput := b.CLI.(*cli.MockUi).OutputWriter.String() + if wantOutput := "Not available for this plan, because it was created with the -target option."; !strings.Contains(gotOutput, wantOutput) { + t.Errorf("missing message about skipped cost estimation\ngot:\n%s\nwant substring: %s", gotOutput, wantOutput) + } + + // We should find a run inside the mock client that has the same + // target address we requested above. + runsAPI := b.client.Runs.(*cloud.MockRuns) + if got, want := len(runsAPI.Runs), 1; got != want { + t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) + } + for _, run := range runsAPI.Runs { + if diff := cmp.Diff([]string{"null_resource.foo"}, run.TargetAddrs); diff != "" { + t.Errorf("wrong TargetAddrs in the created run\n%s", diff) + } + } +} + +func TestRemote_planWithTargetIncompatibleAPIVersion(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + + // Set the tfe client's RemoteAPIVersion to an empty string, to mimic + // API versions prior to 2.3. + b.client.SetFakeRemoteAPIVersion("") + + addr, _ := addrs.ParseAbsResourceStr("null_resource.foo") + + op.Targets = []addrs.Targetable{addr} + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "Resource targeting is not supported") { + t.Fatalf("expected a targeting error, got: %v", errOutput) + } +} + +func TestRemote_planWithReplace(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + addr, _ := addrs.ParseAbsResourceInstanceStr("null_resource.foo") + + op.ForceReplace = []addrs.AbsResourceInstance{addr} + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatal("expected plan operation to succeed") + } + if run.PlanEmpty { + t.Fatalf("expected plan to be non-empty") + } + + // We should find a run inside the mock client that has the same + // refresh address we requested above. + runsAPI := b.client.Runs.(*cloud.MockRuns) + if got, want := len(runsAPI.Runs), 1; got != want { + t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) + } + for _, run := range runsAPI.Runs { + if diff := cmp.Diff([]string{"null_resource.foo"}, run.ReplaceAddrs); diff != "" { + t.Errorf("wrong ReplaceAddrs in the created run\n%s", diff) + } + } +} + +func TestRemote_planWithReplaceIncompatibleAPIVersion(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + + b.client.SetFakeRemoteAPIVersion("2.3") + + addr, _ := addrs.ParseAbsResourceInstanceStr("null_resource.foo") + + op.ForceReplace = []addrs.AbsResourceInstance{addr} + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "Planning resource replacements is not supported") { + t.Fatalf("expected not supported error, got: %v", errOutput) + } +} + +func TestRemote_planWithVariables(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-variables") + defer configCleanup() + + op.Variables = testVariables(tofu.ValueFromCLIArg, "foo", "bar") + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "variables are currently not supported") { + t.Fatalf("expected a variables error, got: %v", errOutput) + } +} + +func TestRemote_planNoConfig(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/empty") + defer configCleanup() + + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "configuration files found") { + t.Fatalf("expected configuration files error, got: %v", errOutput) + } +} + +func TestRemote_planNoChanges(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-no-changes") + defer configCleanup() + defer done(t) + + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "No changes. Infrastructure is up-to-date.") { + t.Fatalf("expected no changes in plan summary: %s", output) + } + if !strings.Contains(output, "Sentinel Result: true") { + t.Fatalf("expected policy check result in output: %s", output) + } +} + +func TestRemote_planForceLocal(t *testing.T) { + // Set TF_FORCE_LOCAL_BACKEND so the remote backend will use + // the local backend with itself as embedded backend. + t.Setenv("TF_FORCE_LOCAL_BACKEND", "1") + + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + op.Workspace = backend.DefaultStateName + + streams, done := terminal.StreamsForTesting(t) + view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams)) + op.View = view + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if strings.Contains(output, "Running plan in the remote backend") { + t.Fatalf("unexpected remote backend header in output: %s", output) + } + if output := done(t).Stdout(); !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } +} + +func TestRemote_planWithoutOperationsEntitlement(t *testing.T) { + b, bCleanup := testBackendNoOperations(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + op.Workspace = backend.DefaultStateName + + streams, done := terminal.StreamsForTesting(t) + view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams)) + op.View = view + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if strings.Contains(output, "Running plan in the remote backend") { + t.Fatalf("unexpected remote backend header in output: %s", output) + } + if output := done(t).Stdout(); !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } +} + +func TestRemote_planWorkspaceWithoutOperations(t *testing.T) { + b, bCleanup := testBackendNoDefault(t) + defer bCleanup() + + ctx := context.Background() + + // Create a named workspace that doesn't allow operations. + _, err := b.client.Workspaces.Create( + ctx, + b.organization, + tfe.WorkspaceCreateOptions{ + Name: tfe.String(b.prefix + "no-operations"), + }, + ) + if err != nil { + t.Fatalf("error creating named workspace: %v", err) + } + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + op.Workspace = "no-operations" + + streams, done := terminal.StreamsForTesting(t) + view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams)) + op.View = view + + run, err := b.Operation(ctx, op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if strings.Contains(output, "Running plan in the remote backend") { + t.Fatalf("unexpected remote backend header in output: %s", output) + } + if output := done(t).Stdout(); !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } +} + +func TestRemote_planLockTimeout(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + ctx := context.Background() + + // Retrieve the workspace used to run this operation in. + w, err := b.client.Workspaces.Read(ctx, b.organization, b.workspace) + if err != nil { + t.Fatalf("error retrieving workspace: %v", err) + } + + // Create a new configuration version. + c, err := b.client.ConfigurationVersions.Create(ctx, w.ID, tfe.ConfigurationVersionCreateOptions{}) + if err != nil { + t.Fatalf("error creating configuration version: %v", err) + } + + // Create a pending run to block this run. + _, err = b.client.Runs.Create(ctx, tfe.RunCreateOptions{ + ConfigurationVersion: c, + Workspace: w, + }) + if err != nil { + t.Fatalf("error creating pending run: %v", err) + } + + op, configCleanup, done := testOperationPlanWithTimeout(t, "./testdata/plan", 50) + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "cancel": "yes", + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = backend.DefaultStateName + + _, err = b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + sigint := make(chan os.Signal, 1) + signal.Notify(sigint, syscall.SIGINT) + select { + case <-sigint: + // Stop redirecting SIGINT signals. + signal.Stop(sigint) + case <-time.After(200 * time.Millisecond): + t.Fatalf("expected lock timeout after 50 milliseconds, waited 200 milliseconds") + } + + if len(input.answers) != 2 { + t.Fatalf("expected unused answers, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } + if !strings.Contains(output, "Lock timeout exceeded") { + t.Fatalf("expected lock timout error in output: %s", output) + } + if strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("unexpected plan summary in output: %s", output) + } +} + +func TestRemote_planDestroy(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + op.PlanMode = plans.DestroyMode + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } +} + +func TestRemote_planDestroyNoConfig(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/empty") + defer configCleanup() + defer done(t) + + op.PlanMode = plans.DestroyMode + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } +} + +func TestRemote_planWithWorkingDirectory(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + options := tfe.WorkspaceUpdateOptions{ + WorkingDirectory: tfe.String("tofu"), + } + + // Configure the workspace to use a custom working directory. + _, err := b.client.Workspaces.Update(context.Background(), b.organization, b.workspace, options) + if err != nil { + t.Fatalf("error configuring working directory: %v", err) + } + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-with-working-directory/tofu") + defer configCleanup() + defer done(t) + + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "The remote workspace is configured to work with configuration") { + t.Fatalf("expected working directory warning: %s", output) + } + if !strings.Contains(output, "Running plan in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } +} + +func TestRemote_planWithWorkingDirectoryFromCurrentPath(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + options := tfe.WorkspaceUpdateOptions{ + WorkingDirectory: tfe.String("tofu"), + } + + // Configure the workspace to use a custom working directory. + _, err := b.client.Workspaces.Update(context.Background(), b.organization, b.workspace, options) + if err != nil { + t.Fatalf("error configuring working directory: %v", err) + } + + wd, err := os.Getwd() + if err != nil { + t.Fatalf("error getting current working directory: %v", err) + } + + // We need to change into the configuration directory to make sure + // the logic to upload the correct slug is working as expected. + if err := os.Chdir("./testdata/plan-with-working-directory/tofu"); err != nil { + t.Fatalf("error changing directory: %v", err) + } + defer os.Chdir(wd) // Make sure we change back again when were done. + + // For this test we need to give our current directory instead of the + // full path to the configuration as we already changed directories. + op, configCleanup, done := testOperationPlan(t, ".") + defer configCleanup() + defer done(t) + + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } +} + +func TestRemote_planCostEstimation(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-cost-estimation") + defer configCleanup() + defer done(t) + + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } + if !strings.Contains(output, "Resources: 1 of 1 estimated") { + t.Fatalf("expected cost estimate result in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } +} + +func TestRemote_planPolicyPass(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-policy-passed") + defer configCleanup() + defer done(t) + + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } + if !strings.Contains(output, "Sentinel Result: true") { + t.Fatalf("expected policy check result in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } +} + +func TestRemote_planPolicyHardFail(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-policy-hard-failed") + defer configCleanup() + + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + viewOutput := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := viewOutput.Stderr() + if !strings.Contains(errOutput, "hard failed") { + t.Fatalf("expected a policy check error, got: %v", errOutput) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } + if !strings.Contains(output, "Sentinel Result: false") { + t.Fatalf("expected policy check result in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } +} + +func TestRemote_planPolicySoftFail(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-policy-soft-failed") + defer configCleanup() + + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + viewOutput := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := viewOutput.Stderr() + if !strings.Contains(errOutput, "soft failed") { + t.Fatalf("expected a policy check error, got: %v", errOutput) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } + if !strings.Contains(output, "Sentinel Result: false") { + t.Fatalf("expected policy check result in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } +} + +func TestRemote_planWithRemoteError(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-with-error") + defer configCleanup() + defer done(t) + + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + if run.Result.ExitStatus() != 1 { + t.Fatalf("expected exit code 1, got %d", run.Result.ExitStatus()) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } + if !strings.Contains(output, "null_resource.foo: 1 error") { + t.Fatalf("expected plan error in output: %s", output) + } +} + +func TestRemote_planOtherError(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + op.Workspace = "network-error" // custom error response in backend_mock.go + + _, err := b.Operation(context.Background(), op) + if err == nil { + t.Errorf("expected error, got success") + } + + if !strings.Contains(err.Error(), + "the configured \"remote\" backend encountered an unexpected error:\n\nI'm a little teacup") { + t.Fatalf("expected error message, got: %s", err.Error()) + } +} + +func TestRemote_planWithGenConfigOut(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + + op.GenerateConfigOut = "generated.tf" + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "Generating configuration is not currently supported") { + t.Fatalf("expected error about config generation, got: %v", errOutput) + } +} diff --git a/pkg/backend/remote/backend_state.go b/pkg/backend/remote/backend_state.go new file mode 100644 index 00000000000..f7b6d3e752a --- /dev/null +++ b/pkg/backend/remote/backend_state.go @@ -0,0 +1,237 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package remote + +import ( + "bytes" + "context" + "crypto/md5" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "log" + + tfe "github.com/hashicorp/go-tfe" + + "github.com/kubegems/opentofu/pkg/command/jsonstate" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/states/remote" + "github.com/kubegems/opentofu/pkg/states/statefile" + "github.com/kubegems/opentofu/pkg/states/statemgr" +) + +type remoteClient struct { + client *tfe.Client + lockInfo *statemgr.LockInfo + organization string + runID string + stateUploadErr bool + workspace *tfe.Workspace + forcePush bool + encryption encryption.StateEncryption +} + +// Get the remote state. +func (r *remoteClient) Get() (*remote.Payload, error) { + ctx := context.Background() + + sv, err := r.client.StateVersions.ReadCurrent(ctx, r.workspace.ID) + if err != nil { + if err == tfe.ErrResourceNotFound { + // If no state exists, then return nil. + return nil, nil + } + return nil, fmt.Errorf("Error retrieving state: %w", err) + } + + state, err := r.client.StateVersions.Download(ctx, sv.DownloadURL) + if err != nil { + return nil, fmt.Errorf("Error downloading state: %w", err) + } + + // If the state is empty, then return nil. + if len(state) == 0 { + return nil, nil + } + + // Get the MD5 checksum of the state. + sum := md5.Sum(state) + + return &remote.Payload{ + Data: state, + MD5: sum[:], + }, nil +} + +func (r *remoteClient) uploadStateFallback(ctx context.Context, stateFile *statefile.File, state []byte, jsonStateOutputs []byte) error { + options := tfe.StateVersionCreateOptions{ + Lineage: tfe.String(stateFile.Lineage), + Serial: tfe.Int64(int64(stateFile.Serial)), + MD5: tfe.String(fmt.Sprintf("%x", md5.Sum(state))), + Force: tfe.Bool(r.forcePush), + State: tfe.String(base64.StdEncoding.EncodeToString(state)), + JSONStateOutputs: tfe.String(base64.StdEncoding.EncodeToString(jsonStateOutputs)), + } + + // If we have a run ID, make sure to add it to the options + // so the state will be properly associated with the run. + if r.runID != "" { + options.Run = &tfe.Run{ID: r.runID} + } + + // Create the new state. + _, err := r.client.StateVersions.Create(ctx, r.workspace.ID, options) + if err != nil { + r.stateUploadErr = true + return fmt.Errorf("error uploading state in compatibility mode: %w", err) + } + return err +} + +// Put the remote state. +func (r *remoteClient) Put(state []byte) error { + ctx := context.Background() + + // Read the raw state into a OpenTofu state. + stateFile, err := statefile.Read(bytes.NewReader(state), r.encryption) + if err != nil { + return fmt.Errorf("error reading state: %w", err) + } + + ov, err := jsonstate.MarshalOutputs(stateFile.State.RootModule().OutputValues) + if err != nil { + return fmt.Errorf("error reading output values: %w", err) + } + o, err := json.Marshal(ov) + if err != nil { + return fmt.Errorf("error converting output values to json: %w", err) + } + + options := tfe.StateVersionUploadOptions{ + StateVersionCreateOptions: tfe.StateVersionCreateOptions{ + Lineage: tfe.String(stateFile.Lineage), + Serial: tfe.Int64(int64(stateFile.Serial)), + MD5: tfe.String(fmt.Sprintf("%x", md5.Sum(state))), + Force: tfe.Bool(r.forcePush), + JSONStateOutputs: tfe.String(base64.StdEncoding.EncodeToString(o)), + }, + RawState: state, + } + + // If we have a run ID, make sure to add it to the options + // so the state will be properly associated with the run. + if r.runID != "" { + options.Run = &tfe.Run{ID: r.runID} + } + + // Create the new state. + // Create the new state. + _, err = r.client.StateVersions.Upload(ctx, r.workspace.ID, options) + if errors.Is(err, tfe.ErrStateVersionUploadNotSupported) { + // Create the new state with content included in the request (Terraform Enterprise v202306-1 and below) + log.Println("[INFO] Detected that state version upload is not supported. Retrying using compatibility state upload.") + return r.uploadStateFallback(ctx, stateFile, state, o) + } + if err != nil { + r.stateUploadErr = true + return fmt.Errorf("error uploading state: %w", err) + } + + return nil +} + +// Delete the remote state. +func (r *remoteClient) Delete() error { + err := r.client.Workspaces.Delete(context.Background(), r.organization, r.workspace.Name) + if err != nil && err != tfe.ErrResourceNotFound { + return fmt.Errorf("error deleting workspace %s: %w", r.workspace.Name, err) + } + + return nil +} + +// EnableForcePush to allow the remote client to overwrite state +// by implementing remote.ClientForcePusher +func (r *remoteClient) EnableForcePush() { + r.forcePush = true +} + +// Lock the remote state. +func (r *remoteClient) Lock(info *statemgr.LockInfo) (string, error) { + ctx := context.Background() + + lockErr := &statemgr.LockError{Info: r.lockInfo} + + // Lock the workspace. + _, err := r.client.Workspaces.Lock(ctx, r.workspace.ID, tfe.WorkspaceLockOptions{ + Reason: tfe.String("Locked by OpenTofu"), + }) + if err != nil { + if err == tfe.ErrWorkspaceLocked { + lockErr.Info = info + err = fmt.Errorf("%s (lock ID: \"%s/%s\")", err, r.organization, r.workspace.Name) + } + lockErr.Err = err + return "", lockErr + } + + r.lockInfo = info + + return r.lockInfo.ID, nil +} + +// Unlock the remote state. +func (r *remoteClient) Unlock(id string) error { + ctx := context.Background() + + // We first check if there was an error while uploading the latest + // state. If so, we will not unlock the workspace to prevent any + // changes from being applied until the correct state is uploaded. + if r.stateUploadErr { + return nil + } + + lockErr := &statemgr.LockError{Info: r.lockInfo} + + // With lock info this should be treated as a normal unlock. + if r.lockInfo != nil { + // Verify the expected lock ID. + if r.lockInfo.ID != id { + lockErr.Err = fmt.Errorf("lock ID does not match existing lock") + return lockErr + } + + // Unlock the workspace. + _, err := r.client.Workspaces.Unlock(ctx, r.workspace.ID) + if err != nil { + lockErr.Err = err + return lockErr + } + + return nil + } + + // Verify the optional force-unlock lock ID. + if r.organization+"/"+r.workspace.Name != id { + lockErr.Err = fmt.Errorf( + "lock ID %q does not match existing lock ID \"%s/%s\"", + id, + r.organization, + r.workspace.Name, + ) + return lockErr + } + + // Force unlock the workspace. + _, err := r.client.Workspaces.ForceUnlock(ctx, r.workspace.ID) + if err != nil { + lockErr.Err = err + return lockErr + } + + return nil +} diff --git a/pkg/backend/remote/backend_state_test.go b/pkg/backend/remote/backend_state_test.go new file mode 100644 index 00000000000..460d3f3392b --- /dev/null +++ b/pkg/backend/remote/backend_state_test.go @@ -0,0 +1,63 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package remote + +import ( + "bytes" + "testing" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/cloud" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/remote" + "github.com/kubegems/opentofu/pkg/states/statefile" +) + +func TestRemoteClient_impl(t *testing.T) { + var _ remote.Client = new(remoteClient) +} + +func TestRemoteClient(t *testing.T) { + client := testRemoteClient(t) + remote.TestClient(t, client) +} + +func TestRemoteClient_stateLock(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + s1, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("expected no error, got %v", err) + } + + s2, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("expected no error, got %v", err) + } + + remote.TestRemoteLocks(t, s1.(*remote.State).Client, s2.(*remote.State).Client) +} + +func TestRemoteClient_Put_withRunID(t *testing.T) { + // Set the TFE_RUN_ID environment variable before creating the client! + t.Setenv("TFE_RUN_ID", cloud.GenerateID("run-")) + + // Create a new test client. + client := testRemoteClient(t) + + // Create a new empty state. + sf := statefile.New(states.NewState(), "", 0) + var buf bytes.Buffer + statefile.Write(sf, &buf, encryption.StateEncryptionDisabled()) + + // Store the new state to verify (this will be done + // by the mock that is used) that the run ID is set. + if err := client.Put(buf.Bytes()); err != nil { + t.Fatalf("expected no error, got %v", err) + } +} diff --git a/pkg/backend/remote/backend_test.go b/pkg/backend/remote/backend_test.go new file mode 100644 index 00000000000..22ff3a05d00 --- /dev/null +++ b/pkg/backend/remote/backend_test.go @@ -0,0 +1,809 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package remote + +import ( + "context" + "fmt" + "reflect" + "strings" + "testing" + + tfe "github.com/hashicorp/go-tfe" + version "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform-svchost/disco" + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "github.com/kubegems/opentofu/pkg/tfdiags" + tfversion "github.com/kubegems/opentofu/version" + "github.com/zclconf/go-cty/cty" + + backendLocal "github.com/kubegems/opentofu/pkg/backend/local" +) + +func TestRemote(t *testing.T) { + var _ backend.Enhanced = New(nil, encryption.StateEncryptionDisabled()) + var _ backend.CLI = New(nil, encryption.StateEncryptionDisabled()) +} + +func TestRemote_backendDefault(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + backend.TestBackendStates(t, b) + backend.TestBackendStateLocks(t, b, b) + backend.TestBackendStateForceUnlock(t, b, b) +} + +func TestRemote_backendNoDefault(t *testing.T) { + b, bCleanup := testBackendNoDefault(t) + defer bCleanup() + + backend.TestBackendStates(t, b) +} + +func TestRemote_config(t *testing.T) { + cases := map[string]struct { + config cty.Value + confErr string + valErr string + }{ + "with_a_nonexisting_organization": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.StringVal(mockedBackendHost), + "organization": cty.StringVal("nonexisting"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "prefix": cty.NullVal(cty.String), + }), + }), + confErr: "organization \"nonexisting\" at host " + mockedBackendHost + " not found", + }, + "with_a_missing_hostname": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "organization": cty.StringVal("oracle"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "prefix": cty.NullVal(cty.String), + }), + }), + confErr: `Hostname is required for the remote backend`, + }, + "with_an_unknown_host": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.StringVal("nonexisting.local"), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "prefix": cty.NullVal(cty.String), + }), + }), + confErr: "Failed to request discovery document", + }, + // localhost advertises TFE services, but has no token in the credentials + "without_a_token": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.StringVal("localhost"), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "prefix": cty.NullVal(cty.String), + }), + }), + confErr: "tofu login localhost", + }, + "with_a_name": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "prefix": cty.NullVal(cty.String), + }), + }), + }, + "with_a_prefix": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + "prefix": cty.StringVal("my-app-"), + }), + }), + }, + "without_either_a_name_and_a_prefix": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + "prefix": cty.NullVal(cty.String), + }), + }), + valErr: `Either workspace "name" or "prefix" is required`, + }, + "with_both_a_name_and_a_prefix": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "prefix": cty.StringVal("my-app-"), + }), + }), + valErr: `Only one of workspace "name" or "prefix" is allowed`, + }, + "null config": { + config: cty.NullVal(cty.EmptyObject), + }, + } + + for name, tc := range cases { + s := testServer(t) + b := New(testDisco(s), encryption.StateEncryptionDisabled()) + + // Validate + _, valDiags := b.PrepareConfig(tc.config) + if (valDiags.Err() != nil || tc.valErr != "") && + (valDiags.Err() == nil || !strings.Contains(valDiags.Err().Error(), tc.valErr)) { + t.Fatalf("%s: unexpected validation result: %v", name, valDiags.Err()) + } + + // Configure + confDiags := b.Configure(tc.config) + if (confDiags.Err() != nil || tc.confErr != "") && + (confDiags.Err() == nil || !strings.Contains(confDiags.Err().Error(), tc.confErr)) { + t.Fatalf("%s: unexpected configure result: %v", name, confDiags.Err()) + } + } +} + +func TestRemote_versionConstraints(t *testing.T) { + cases := map[string]struct { + config cty.Value + prerelease string + version string + result string + }{ + "compatible version": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.StringVal(mockedBackendHost), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "prefix": cty.NullVal(cty.String), + }), + }), + version: "0.11.1", + }, + "version too old": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.StringVal(mockedBackendHost), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "prefix": cty.NullVal(cty.String), + }), + }), + version: "0.0.1", + result: "upgrade OpenTofu to >= 0.1.0", + }, + "version too new": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.StringVal(mockedBackendHost), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "prefix": cty.NullVal(cty.String), + }), + }), + version: "10.0.1", + result: "downgrade OpenTofu to <= 10.0.0", + }, + } + + // Save and restore the actual version. + p := tfversion.Prerelease + v := tfversion.Version + defer func() { + tfversion.Prerelease = p + tfversion.Version = v + }() + + for name, tc := range cases { + s := testServer(t) + b := New(testDisco(s), encryption.StateEncryptionDisabled()) + + // Set the version for this test. + tfversion.Prerelease = tc.prerelease + tfversion.Version = tc.version + + // Validate + _, valDiags := b.PrepareConfig(tc.config) + if valDiags.HasErrors() { + t.Fatalf("%s: unexpected validation result: %v", name, valDiags.Err()) + } + + // Configure + confDiags := b.Configure(tc.config) + if (confDiags.Err() != nil || tc.result != "") && + (confDiags.Err() == nil || !strings.Contains(confDiags.Err().Error(), tc.result)) { + t.Fatalf("%s: unexpected configure result: %v", name, confDiags.Err()) + } + } +} + +func TestRemote_localBackend(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + local, ok := b.local.(*backendLocal.Local) + if !ok { + t.Fatalf("expected b.local to be \"*local.Local\", got: %T", b.local) + } + + remote, ok := local.Backend.(*Remote) + if !ok { + t.Fatalf("expected local.Backend to be *remote.Remote, got: %T", remote) + } +} + +func TestRemote_addAndRemoveWorkspacesDefault(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + if _, err := b.Workspaces(); err != backend.ErrWorkspacesNotSupported { + t.Fatalf("expected error %v, got %v", backend.ErrWorkspacesNotSupported, err) + } + + if _, err := b.StateMgr(backend.DefaultStateName); err != nil { + t.Fatalf("expected no error, got %v", err) + } + + if _, err := b.StateMgr("prod"); err != backend.ErrWorkspacesNotSupported { + t.Fatalf("expected error %v, got %v", backend.ErrWorkspacesNotSupported, err) + } + + if err := b.DeleteWorkspace(backend.DefaultStateName, true); err != nil { + t.Fatalf("expected no error, got %v", err) + } + + if err := b.DeleteWorkspace("prod", true); err != backend.ErrWorkspacesNotSupported { + t.Fatalf("expected error %v, got %v", backend.ErrWorkspacesNotSupported, err) + } +} + +func TestRemote_addAndRemoveWorkspacesNoDefault(t *testing.T) { + b, bCleanup := testBackendNoDefault(t) + defer bCleanup() + + states, err := b.Workspaces() + if err != nil { + t.Fatal(err) + } + + expectedWorkspaces := []string(nil) + if !reflect.DeepEqual(states, expectedWorkspaces) { + t.Fatalf("expected states %#+v, got %#+v", expectedWorkspaces, states) + } + + if _, err := b.StateMgr(backend.DefaultStateName); err != backend.ErrDefaultWorkspaceNotSupported { + t.Fatalf("expected error %v, got %v", backend.ErrDefaultWorkspaceNotSupported, err) + } + + expectedA := "test_A" + if _, err := b.StateMgr(expectedA); err != nil { + t.Fatal(err) + } + + states, err = b.Workspaces() + if err != nil { + t.Fatal(err) + } + + expectedWorkspaces = append(expectedWorkspaces, expectedA) + if !reflect.DeepEqual(states, expectedWorkspaces) { + t.Fatalf("expected %#+v, got %#+v", expectedWorkspaces, states) + } + + expectedB := "test_B" + if _, err := b.StateMgr(expectedB); err != nil { + t.Fatal(err) + } + + states, err = b.Workspaces() + if err != nil { + t.Fatal(err) + } + + expectedWorkspaces = append(expectedWorkspaces, expectedB) + if !reflect.DeepEqual(states, expectedWorkspaces) { + t.Fatalf("expected %#+v, got %#+v", expectedWorkspaces, states) + } + + if err := b.DeleteWorkspace(backend.DefaultStateName, true); err != backend.ErrDefaultWorkspaceNotSupported { + t.Fatalf("expected error %v, got %v", backend.ErrDefaultWorkspaceNotSupported, err) + } + + if err := b.DeleteWorkspace(expectedA, true); err != nil { + t.Fatal(err) + } + + states, err = b.Workspaces() + if err != nil { + t.Fatal(err) + } + + expectedWorkspaces = []string{expectedB} + if !reflect.DeepEqual(states, expectedWorkspaces) { + t.Fatalf("expected %#+v got %#+v", expectedWorkspaces, states) + } + + if err := b.DeleteWorkspace(expectedB, true); err != nil { + t.Fatal(err) + } + + states, err = b.Workspaces() + if err != nil { + t.Fatal(err) + } + + expectedWorkspaces = []string(nil) + if !reflect.DeepEqual(states, expectedWorkspaces) { + t.Fatalf("expected %#+v, got %#+v", expectedWorkspaces, states) + } +} + +func TestRemote_checkConstraints(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + cases := map[string]struct { + constraints *disco.Constraints + prerelease string + version string + result string + }{ + "compatible version": { + constraints: &disco.Constraints{ + Minimum: "0.11.0", + Maximum: "0.11.11", + }, + version: "0.11.1", + result: "", + }, + "version too old": { + constraints: &disco.Constraints{ + Minimum: "0.11.0", + Maximum: "0.11.11", + }, + version: "0.10.1", + result: "upgrade OpenTofu to >= 0.11.0", + }, + "version too new": { + constraints: &disco.Constraints{ + Minimum: "0.11.0", + Maximum: "0.11.11", + }, + version: "0.12.0", + result: "downgrade OpenTofu to <= 0.11.11", + }, + "version excluded - ordered": { + constraints: &disco.Constraints{ + Minimum: "0.11.0", + Excluding: []string{"0.11.7", "0.11.8"}, + Maximum: "0.11.11", + }, + version: "0.11.7", + result: "upgrade OpenTofu to > 0.11.8", + }, + "version excluded - unordered": { + constraints: &disco.Constraints{ + Minimum: "0.11.0", + Excluding: []string{"0.11.8", "0.11.6"}, + Maximum: "0.11.11", + }, + version: "0.11.6", + result: "upgrade OpenTofu to > 0.11.8", + }, + "list versions": { + constraints: &disco.Constraints{ + Minimum: "0.11.0", + Maximum: "0.11.11", + }, + version: "0.10.1", + result: "versions >= 0.11.0, <= 0.11.11.", + }, + "list exclusion": { + constraints: &disco.Constraints{ + Minimum: "0.11.0", + Excluding: []string{"0.11.6"}, + Maximum: "0.11.11", + }, + version: "0.11.6", + result: "excluding version 0.11.6.", + }, + "list exclusions": { + constraints: &disco.Constraints{ + Minimum: "0.11.0", + Excluding: []string{"0.11.8", "0.11.6"}, + Maximum: "0.11.11", + }, + version: "0.11.6", + result: "excluding versions 0.11.6, 0.11.8.", + }, + } + + // Save and restore the actual version. + p := tfversion.Prerelease + v := tfversion.Version + defer func() { + tfversion.Prerelease = p + tfversion.Version = v + }() + + for name, tc := range cases { + // Set the version for this test. + tfversion.Prerelease = tc.prerelease + tfversion.Version = tc.version + + // Check the constraints. + diags := b.checkConstraints(tc.constraints) + if (diags.Err() != nil || tc.result != "") && + (diags.Err() == nil || !strings.Contains(diags.Err().Error(), tc.result)) { + t.Fatalf("%s: unexpected constraints result: %v", name, diags.Err()) + } + } +} + +func TestRemote_StateMgr_versionCheck(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + // Some fixed versions for testing with. This logic is a simple string + // comparison, so we don't need many test cases. + v0135 := version.Must(version.NewSemver("0.13.5")) + v0140 := version.Must(version.NewSemver("0.14.0")) + + // Save original local version state and restore afterwards + p := tfversion.Prerelease + v := tfversion.Version + s := tfversion.SemVer + defer func() { + tfversion.Prerelease = p + tfversion.Version = v + tfversion.SemVer = s + }() + + // For this test, the local Terraform version is set to 0.14.0 + tfversion.Prerelease = "" + tfversion.Version = v0140.String() + tfversion.SemVer = v0140 + + // Update the mock remote workspace OpenTofu version to match the local + // Terraform version + if _, err := b.client.Workspaces.Update( + context.Background(), + b.organization, + b.workspace, + tfe.WorkspaceUpdateOptions{ + TerraformVersion: tfe.String(v0140.String()), + }, + ); err != nil { + t.Fatalf("error: %v", err) + } + + // This should succeed + if _, err := b.StateMgr(backend.DefaultStateName); err != nil { + t.Fatalf("expected no error, got %v", err) + } + + // Now change the remote workspace to a different Terraform version + if _, err := b.client.Workspaces.Update( + context.Background(), + b.organization, + b.workspace, + tfe.WorkspaceUpdateOptions{ + TerraformVersion: tfe.String(v0135.String()), + }, + ); err != nil { + t.Fatalf("error: %v", err) + } + + // This should fail + want := `Remote workspace OpenTofu version "0.13.5" does not match local OpenTofu version "0.14.0"` + if _, err := b.StateMgr(backend.DefaultStateName); err.Error() != want { + t.Fatalf("wrong error\n got: %v\nwant: %v", err.Error(), want) + } +} + +func TestRemote_Unlock_ignoreVersion(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + // this is set by the unlock command + b.IgnoreVersionConflict() + + v111 := version.Must(version.NewSemver("1.1.1")) + + // Save original local version state and restore afterwards + p := tfversion.Prerelease + v := tfversion.Version + s := tfversion.SemVer + defer func() { + tfversion.Prerelease = p + tfversion.Version = v + tfversion.SemVer = s + }() + + // For this test, the local Terraform version is set to 1.1.1 + tfversion.Prerelease = "" + tfversion.Version = v111.String() + tfversion.SemVer = v111 + + state, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("error: %v", err) + } + + lockID, err := state.Lock(statemgr.NewLockInfo()) + if err != nil { + t.Fatalf("error: %v", err) + } + + // this should succeed since the version conflict is ignored + if err = state.Unlock(lockID); err != nil { + t.Fatalf("error: %v", err) + } +} + +func TestRemote_StateMgr_versionCheckLatest(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + v0140 := version.Must(version.NewSemver("0.14.0")) + + // Save original local version state and restore afterwards + p := tfversion.Prerelease + v := tfversion.Version + s := tfversion.SemVer + defer func() { + tfversion.Prerelease = p + tfversion.Version = v + tfversion.SemVer = s + }() + + // For this test, the local Terraform version is set to 0.14.0 + tfversion.Prerelease = "" + tfversion.Version = v0140.String() + tfversion.SemVer = v0140 + + // Update the remote workspace to the pseudo-version "latest" + if _, err := b.client.Workspaces.Update( + context.Background(), + b.organization, + b.workspace, + tfe.WorkspaceUpdateOptions{ + TerraformVersion: tfe.String("latest"), + }, + ); err != nil { + t.Fatalf("error: %v", err) + } + + // This should succeed despite not being a string match + if _, err := b.StateMgr(backend.DefaultStateName); err != nil { + t.Fatalf("expected no error, got %v", err) + } +} + +func TestRemote_VerifyWorkspaceTerraformVersion(t *testing.T) { + testCases := []struct { + local string + remote string + executionMode string + wantErr bool + }{ + {"0.13.5", "0.13.5", "remote", false}, + {"0.14.0", "0.13.5", "remote", true}, + {"0.14.0", "0.13.5", "local", false}, + {"0.14.0", "0.14.1", "remote", false}, + {"0.14.0", "1.0.99", "remote", false}, + {"0.14.0", "1.1.0", "remote", false}, + {"0.14.0", "1.3.0", "remote", true}, + {"1.2.0", "1.2.99", "remote", false}, + {"1.2.0", "1.3.0", "remote", true}, + {"0.15.0", "latest", "remote", false}, + } + for _, tc := range testCases { + t.Run(fmt.Sprintf("local %s, remote %s", tc.local, tc.remote), func(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + local := version.Must(version.NewSemver(tc.local)) + + // Save original local version state and restore afterwards + p := tfversion.Prerelease + v := tfversion.Version + s := tfversion.SemVer + defer func() { + tfversion.Prerelease = p + tfversion.Version = v + tfversion.SemVer = s + }() + + // Override local version as specified + tfversion.Prerelease = "" + tfversion.Version = local.String() + tfversion.SemVer = local + + // Update the mock remote workspace OpenTofu version to the + // specified remote version + if _, err := b.client.Workspaces.Update( + context.Background(), + b.organization, + b.workspace, + tfe.WorkspaceUpdateOptions{ + ExecutionMode: &tc.executionMode, + TerraformVersion: tfe.String(tc.remote), + }, + ); err != nil { + t.Fatalf("error: %v", err) + } + + diags := b.VerifyWorkspaceTerraformVersion(backend.DefaultStateName) + if tc.wantErr { + if len(diags) != 1 { + t.Fatal("expected diag, but none returned") + } + if got := diags.Err().Error(); !strings.Contains(got, "OpenTofu version mismatch") { + t.Fatalf("unexpected error: %s", got) + } + } else { + if len(diags) != 0 { + t.Fatalf("unexpected diags: %s", diags.Err()) + } + } + }) + } +} + +func TestRemote_VerifyWorkspaceTerraformVersion_workspaceErrors(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + // Attempting to check the version against a workspace which doesn't exist + // should result in no errors + diags := b.VerifyWorkspaceTerraformVersion("invalid-workspace") + if len(diags) != 0 { + t.Fatalf("unexpected error: %s", diags.Err()) + } + + // Use a special workspace ID to trigger a 500 error, which should result + // in a failed check + diags = b.VerifyWorkspaceTerraformVersion("network-error") + if len(diags) != 1 { + t.Fatal("expected diag, but none returned") + } + if got := diags.Err().Error(); !strings.Contains(got, "Error looking up workspace: Workspace read failed") { + t.Fatalf("unexpected error: %s", got) + } + + // Update the mock remote workspace OpenTofu version to an invalid version + if _, err := b.client.Workspaces.Update( + context.Background(), + b.organization, + b.workspace, + tfe.WorkspaceUpdateOptions{ + TerraformVersion: tfe.String("1.0.cheetarah"), + }, + ); err != nil { + t.Fatalf("error: %v", err) + } + diags = b.VerifyWorkspaceTerraformVersion(backend.DefaultStateName) + + if len(diags) != 1 { + t.Fatal("expected diag, but none returned") + } + if got := diags.Err().Error(); !strings.Contains(got, "Error looking up workspace: Invalid OpenTofu version") { + t.Fatalf("unexpected error: %s", got) + } +} + +func TestRemote_VerifyWorkspaceTerraformVersion_ignoreFlagSet(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + // If the ignore flag is set, the behaviour changes + b.IgnoreVersionConflict() + + // Different local & remote versions to cause an error + local := version.Must(version.NewSemver("0.14.0")) + remote := version.Must(version.NewSemver("0.13.5")) + + // Save original local version state and restore afterwards + p := tfversion.Prerelease + v := tfversion.Version + s := tfversion.SemVer + defer func() { + tfversion.Prerelease = p + tfversion.Version = v + tfversion.SemVer = s + }() + + // Override local version as specified + tfversion.Prerelease = "" + tfversion.Version = local.String() + tfversion.SemVer = local + + // Update the mock remote workspace OpenTofu version to the + // specified remote version + if _, err := b.client.Workspaces.Update( + context.Background(), + b.organization, + b.workspace, + tfe.WorkspaceUpdateOptions{ + TerraformVersion: tfe.String(remote.String()), + }, + ); err != nil { + t.Fatalf("error: %v", err) + } + + diags := b.VerifyWorkspaceTerraformVersion(backend.DefaultStateName) + if len(diags) != 1 { + t.Fatal("expected diag, but none returned") + } + + if got, want := diags[0].Severity(), tfdiags.Warning; got != want { + t.Errorf("wrong severity: got %#v, want %#v", got, want) + } + if got, want := diags[0].Description().Summary, "OpenTofu version mismatch"; got != want { + t.Errorf("wrong summary: got %s, want %s", got, want) + } + wantDetail := "The local OpenTofu version (0.14.0) does not match the configured version for remote workspace hashicorp/prod (0.13.5)." + if got := diags[0].Description().Detail; got != wantDetail { + t.Errorf("wrong summary: got %s, want %s", got, wantDetail) + } +} + +func TestRemote_ServiceDiscoveryAliases(t *testing.T) { + s := testServer(t) + b := New(testDisco(s), encryption.StateEncryptionDisabled()) + + diag := b.Configure(cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.StringVal(mockedBackendHost), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "prefix": cty.NullVal(cty.String), + }), + })) + if diag.HasErrors() { + t.Fatalf("expected no diagnostic errors, got %s", diag.Err()) + } + + aliases, err := b.ServiceDiscoveryAliases() + if err != nil { + t.Fatalf("expected no errors, got %s", err) + } + if len(aliases) != 1 { + t.Fatalf("expected 1 alias but got %d", len(aliases)) + } +} diff --git a/pkg/backend/remote/cli.go b/pkg/backend/remote/cli.go new file mode 100644 index 00000000000..2477516a1ed --- /dev/null +++ b/pkg/backend/remote/cli.go @@ -0,0 +1,25 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package remote + +import ( + "github.com/kubegems/opentofu/pkg/backend" +) + +// CLIInit implements backend.CLI +func (b *Remote) CLIInit(opts *backend.CLIOpts) error { + if cli, ok := b.local.(backend.CLI); ok { + if err := cli.CLIInit(opts); err != nil { + return err + } + } + + b.CLI = opts.CLI + b.CLIColor = opts.CLIColor + b.ContextOpts = opts.ContextOpts + + return nil +} diff --git a/pkg/backend/remote/colorize.go b/pkg/backend/remote/colorize.go new file mode 100644 index 00000000000..286df2eca20 --- /dev/null +++ b/pkg/backend/remote/colorize.go @@ -0,0 +1,55 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package remote + +import ( + "regexp" + + "github.com/mitchellh/colorstring" +) + +// TODO SvH: This file should be deleted and the type cliColorize should be +// renamed back to Colorize as soon as we can pass -no-color to the backend. + +// colorsRe is used to find ANSI escaped color codes. +var colorsRe = regexp.MustCompile("\033\\[\\d{1,3}m") + +// Colorer is the interface that must be implemented to colorize strings. +type Colorer interface { + Color(v string) string +} + +// Colorize is used to print output when the -no-color flag is used. It will +// strip all ANSI escaped color codes which are set while the operation was +// executed in Terraform Enterprise. +// +// When Terraform Enterprise supports run specific variables, this code can be +// removed as we can then pass the CLI flag to the backend and prevent the color +// codes from being written to the output. +type Colorize struct { + cliColor *colorstring.Colorize +} + +// Color will strip all ANSI escaped color codes and return a uncolored string. +func (c *Colorize) Color(v string) string { + return colorsRe.ReplaceAllString(c.cliColor.Color(v), "") +} + +// Colorize returns the Colorize structure that can be used for colorizing +// output. This is guaranteed to always return a non-nil value and so is useful +// as a helper to wrap any potentially colored strings. +func (b *Remote) Colorize() Colorer { + if b.CLIColor != nil && !b.CLIColor.Disable { + return b.CLIColor + } + if b.CLIColor != nil { + return &Colorize{cliColor: b.CLIColor} + } + return &Colorize{cliColor: &colorstring.Colorize{ + Colors: colorstring.DefaultColors, + Disable: true, + }} +} diff --git a/pkg/backend/remote/remote_test.go b/pkg/backend/remote/remote_test.go new file mode 100644 index 00000000000..5749cdcfe16 --- /dev/null +++ b/pkg/backend/remote/remote_test.go @@ -0,0 +1,30 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package remote + +import ( + "flag" + "os" + "testing" + "time" + + _ "github.com/kubegems/opentofu/pkg/logging" +) + +func TestMain(m *testing.M) { + flag.Parse() + + // Make sure TF_FORCE_LOCAL_BACKEND is unset + os.Unsetenv("TF_FORCE_LOCAL_BACKEND") + + // Reduce delays to make tests run faster + backoffMin = 1.0 + backoffMax = 1.0 + planConfigurationVersionsPollInterval = 1 * time.Millisecond + runPollInterval = 1 * time.Millisecond + + os.Exit(m.Run()) +} diff --git a/pkg/backend/remote/testdata/apply-destroy/apply.log b/pkg/backend/remote/testdata/apply-destroy/apply.log new file mode 100644 index 00000000000..d126547d95c --- /dev/null +++ b/pkg/backend/remote/testdata/apply-destroy/apply.log @@ -0,0 +1,7 @@ +Terraform v0.11.10 + +Initializing plugins and modules... +null_resource.hello: Destroying... (ID: 8657651096157629581) +null_resource.hello: Destruction complete after 0s + +Apply complete! Resources: 0 added, 0 changed, 1 destroyed. diff --git a/pkg/backend/remote/testdata/apply-destroy/main.tf b/pkg/backend/remote/testdata/apply-destroy/main.tf new file mode 100644 index 00000000000..3911a2a9b2d --- /dev/null +++ b/pkg/backend/remote/testdata/apply-destroy/main.tf @@ -0,0 +1 @@ +resource "null_resource" "foo" {} diff --git a/pkg/backend/remote/testdata/apply-destroy/plan.log b/pkg/backend/remote/testdata/apply-destroy/plan.log new file mode 100644 index 00000000000..1d38d416892 --- /dev/null +++ b/pkg/backend/remote/testdata/apply-destroy/plan.log @@ -0,0 +1,22 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +null_resource.hello: Refreshing state... (ID: 8657651096157629581) + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + - destroy + +Terraform will perform the following actions: + + - null_resource.hello + + +Plan: 0 to add, 0 to change, 1 to destroy. diff --git a/pkg/backend/remote/testdata/apply-no-changes/main.tf b/pkg/backend/remote/testdata/apply-no-changes/main.tf new file mode 100644 index 00000000000..3911a2a9b2d --- /dev/null +++ b/pkg/backend/remote/testdata/apply-no-changes/main.tf @@ -0,0 +1 @@ +resource "null_resource" "foo" {} diff --git a/pkg/backend/remote/testdata/apply-no-changes/plan.log b/pkg/backend/remote/testdata/apply-no-changes/plan.log new file mode 100644 index 00000000000..70416815133 --- /dev/null +++ b/pkg/backend/remote/testdata/apply-no-changes/plan.log @@ -0,0 +1,17 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +null_resource.hello: Refreshing state... (ID: 8657651096157629581) + +------------------------------------------------------------------------ + +No changes. Infrastructure is up-to-date. + +This means that Terraform did not detect any differences between your +configuration and real physical resources that exist. As a result, no +actions need to be performed. diff --git a/pkg/backend/remote/testdata/apply-no-changes/policy.log b/pkg/backend/remote/testdata/apply-no-changes/policy.log new file mode 100644 index 00000000000..b0cb1e59859 --- /dev/null +++ b/pkg/backend/remote/testdata/apply-no-changes/policy.log @@ -0,0 +1,12 @@ +Sentinel Result: true + +This result means that Sentinel policies returned true and the protected +behavior is allowed by Sentinel policies. + +1 policies evaluated. + +## Policy 1: Passthrough.sentinel (soft-mandatory) + +Result: true + +TRUE - Passthrough.sentinel:1:1 - Rule "main" diff --git a/pkg/backend/remote/testdata/apply-policy-hard-failed/main.tf b/pkg/backend/remote/testdata/apply-policy-hard-failed/main.tf new file mode 100644 index 00000000000..3911a2a9b2d --- /dev/null +++ b/pkg/backend/remote/testdata/apply-policy-hard-failed/main.tf @@ -0,0 +1 @@ +resource "null_resource" "foo" {} diff --git a/pkg/backend/remote/testdata/apply-policy-hard-failed/plan.log b/pkg/backend/remote/testdata/apply-policy-hard-failed/plan.log new file mode 100644 index 00000000000..5849e57595e --- /dev/null +++ b/pkg/backend/remote/testdata/apply-policy-hard-failed/plan.log @@ -0,0 +1,21 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + + null_resource.foo + id: + + +Plan: 1 to add, 0 to change, 0 to destroy. diff --git a/pkg/backend/remote/testdata/apply-policy-hard-failed/policy.log b/pkg/backend/remote/testdata/apply-policy-hard-failed/policy.log new file mode 100644 index 00000000000..5d6e6935b93 --- /dev/null +++ b/pkg/backend/remote/testdata/apply-policy-hard-failed/policy.log @@ -0,0 +1,12 @@ +Sentinel Result: false + +Sentinel evaluated to false because one or more Sentinel policies evaluated +to false. This false was not due to an undefined value or runtime error. + +1 policies evaluated. + +## Policy 1: Passthrough.sentinel (hard-mandatory) + +Result: false + +FALSE - Passthrough.sentinel:1:1 - Rule "main" diff --git a/pkg/backend/remote/testdata/apply-policy-passed/apply.log b/pkg/backend/remote/testdata/apply-policy-passed/apply.log new file mode 100644 index 00000000000..901994838f2 --- /dev/null +++ b/pkg/backend/remote/testdata/apply-policy-passed/apply.log @@ -0,0 +1,7 @@ +Terraform v0.11.10 + +Initializing plugins and modules... +null_resource.hello: Creating... +null_resource.hello: Creation complete after 0s (ID: 8657651096157629581) + +Apply complete! Resources: 1 added, 0 changed, 0 destroyed. diff --git a/pkg/backend/remote/testdata/apply-policy-passed/main.tf b/pkg/backend/remote/testdata/apply-policy-passed/main.tf new file mode 100644 index 00000000000..3911a2a9b2d --- /dev/null +++ b/pkg/backend/remote/testdata/apply-policy-passed/main.tf @@ -0,0 +1 @@ +resource "null_resource" "foo" {} diff --git a/pkg/backend/remote/testdata/apply-policy-passed/plan.log b/pkg/backend/remote/testdata/apply-policy-passed/plan.log new file mode 100644 index 00000000000..5849e57595e --- /dev/null +++ b/pkg/backend/remote/testdata/apply-policy-passed/plan.log @@ -0,0 +1,21 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + + null_resource.foo + id: + + +Plan: 1 to add, 0 to change, 0 to destroy. diff --git a/pkg/backend/remote/testdata/apply-policy-passed/policy.log b/pkg/backend/remote/testdata/apply-policy-passed/policy.log new file mode 100644 index 00000000000..b0cb1e59859 --- /dev/null +++ b/pkg/backend/remote/testdata/apply-policy-passed/policy.log @@ -0,0 +1,12 @@ +Sentinel Result: true + +This result means that Sentinel policies returned true and the protected +behavior is allowed by Sentinel policies. + +1 policies evaluated. + +## Policy 1: Passthrough.sentinel (soft-mandatory) + +Result: true + +TRUE - Passthrough.sentinel:1:1 - Rule "main" diff --git a/pkg/backend/remote/testdata/apply-policy-soft-failed/apply.log b/pkg/backend/remote/testdata/apply-policy-soft-failed/apply.log new file mode 100644 index 00000000000..901994838f2 --- /dev/null +++ b/pkg/backend/remote/testdata/apply-policy-soft-failed/apply.log @@ -0,0 +1,7 @@ +Terraform v0.11.10 + +Initializing plugins and modules... +null_resource.hello: Creating... +null_resource.hello: Creation complete after 0s (ID: 8657651096157629581) + +Apply complete! Resources: 1 added, 0 changed, 0 destroyed. diff --git a/pkg/backend/remote/testdata/apply-policy-soft-failed/main.tf b/pkg/backend/remote/testdata/apply-policy-soft-failed/main.tf new file mode 100644 index 00000000000..3911a2a9b2d --- /dev/null +++ b/pkg/backend/remote/testdata/apply-policy-soft-failed/main.tf @@ -0,0 +1 @@ +resource "null_resource" "foo" {} diff --git a/pkg/backend/remote/testdata/apply-policy-soft-failed/plan.log b/pkg/backend/remote/testdata/apply-policy-soft-failed/plan.log new file mode 100644 index 00000000000..5849e57595e --- /dev/null +++ b/pkg/backend/remote/testdata/apply-policy-soft-failed/plan.log @@ -0,0 +1,21 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + + null_resource.foo + id: + + +Plan: 1 to add, 0 to change, 0 to destroy. diff --git a/pkg/backend/remote/testdata/apply-policy-soft-failed/policy.log b/pkg/backend/remote/testdata/apply-policy-soft-failed/policy.log new file mode 100644 index 00000000000..3e4ebedf617 --- /dev/null +++ b/pkg/backend/remote/testdata/apply-policy-soft-failed/policy.log @@ -0,0 +1,12 @@ +Sentinel Result: false + +Sentinel evaluated to false because one or more Sentinel policies evaluated +to false. This false was not due to an undefined value or runtime error. + +1 policies evaluated. + +## Policy 1: Passthrough.sentinel (soft-mandatory) + +Result: false + +FALSE - Passthrough.sentinel:1:1 - Rule "main" diff --git a/pkg/backend/remote/testdata/apply-variables/apply.log b/pkg/backend/remote/testdata/apply-variables/apply.log new file mode 100644 index 00000000000..901994838f2 --- /dev/null +++ b/pkg/backend/remote/testdata/apply-variables/apply.log @@ -0,0 +1,7 @@ +Terraform v0.11.10 + +Initializing plugins and modules... +null_resource.hello: Creating... +null_resource.hello: Creation complete after 0s (ID: 8657651096157629581) + +Apply complete! Resources: 1 added, 0 changed, 0 destroyed. diff --git a/pkg/backend/remote/testdata/apply-variables/main.tf b/pkg/backend/remote/testdata/apply-variables/main.tf new file mode 100644 index 00000000000..955e8b4c09a --- /dev/null +++ b/pkg/backend/remote/testdata/apply-variables/main.tf @@ -0,0 +1,4 @@ +variable "foo" {} +variable "bar" {} + +resource "null_resource" "foo" {} diff --git a/pkg/backend/remote/testdata/apply-variables/plan.log b/pkg/backend/remote/testdata/apply-variables/plan.log new file mode 100644 index 00000000000..5849e57595e --- /dev/null +++ b/pkg/backend/remote/testdata/apply-variables/plan.log @@ -0,0 +1,21 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + + null_resource.foo + id: + + +Plan: 1 to add, 0 to change, 0 to destroy. diff --git a/pkg/backend/remote/testdata/apply-with-error/main.tf b/pkg/backend/remote/testdata/apply-with-error/main.tf new file mode 100644 index 00000000000..bc45f28f563 --- /dev/null +++ b/pkg/backend/remote/testdata/apply-with-error/main.tf @@ -0,0 +1,5 @@ +resource "null_resource" "foo" { + triggers { + random = "${guid()}" + } +} diff --git a/pkg/backend/remote/testdata/apply-with-error/plan.log b/pkg/backend/remote/testdata/apply-with-error/plan.log new file mode 100644 index 00000000000..4344a372290 --- /dev/null +++ b/pkg/backend/remote/testdata/apply-with-error/plan.log @@ -0,0 +1,10 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... + +Error: null_resource.foo: 1 error(s) occurred: + +* null_resource.foo: 1:3: unknown function called: guid in: + +${guid()} diff --git a/pkg/backend/remote/testdata/apply/apply.log b/pkg/backend/remote/testdata/apply/apply.log new file mode 100644 index 00000000000..901994838f2 --- /dev/null +++ b/pkg/backend/remote/testdata/apply/apply.log @@ -0,0 +1,7 @@ +Terraform v0.11.10 + +Initializing plugins and modules... +null_resource.hello: Creating... +null_resource.hello: Creation complete after 0s (ID: 8657651096157629581) + +Apply complete! Resources: 1 added, 0 changed, 0 destroyed. diff --git a/pkg/backend/remote/testdata/apply/main.tf b/pkg/backend/remote/testdata/apply/main.tf new file mode 100644 index 00000000000..3911a2a9b2d --- /dev/null +++ b/pkg/backend/remote/testdata/apply/main.tf @@ -0,0 +1 @@ +resource "null_resource" "foo" {} diff --git a/pkg/backend/remote/testdata/apply/plan.log b/pkg/backend/remote/testdata/apply/plan.log new file mode 100644 index 00000000000..5849e57595e --- /dev/null +++ b/pkg/backend/remote/testdata/apply/plan.log @@ -0,0 +1,21 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + + null_resource.foo + id: + + +Plan: 1 to add, 0 to change, 0 to destroy. diff --git a/pkg/backend/remote/testdata/empty/.gitignore b/pkg/backend/remote/testdata/empty/.gitignore new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/backend/remote/testdata/plan-cost-estimation/ce.log b/pkg/backend/remote/testdata/plan-cost-estimation/ce.log new file mode 100644 index 00000000000..e51fef1edc6 --- /dev/null +++ b/pkg/backend/remote/testdata/plan-cost-estimation/ce.log @@ -0,0 +1,6 @@ ++---------+------+-----+-------------+----------------------+ +| PRODUCT | NAME | SKU | DESCRIPTION | DELTA | ++---------+------+-----+-------------+----------------------+ ++---------+------+-----+-------------+----------------------+ +| TOTAL | $0.000 USD / 720 HRS | ++---------+------+-----+-------------+----------------------+ diff --git a/pkg/backend/remote/testdata/plan-cost-estimation/cost-estimate.log b/pkg/backend/remote/testdata/plan-cost-estimation/cost-estimate.log new file mode 100644 index 00000000000..67a50928cec --- /dev/null +++ b/pkg/backend/remote/testdata/plan-cost-estimation/cost-estimate.log @@ -0,0 +1,5 @@ +Cost estimation: + +Waiting for cost estimation to complete... +Resources: 1 of 1 estimated + $25.488/mo +$25.488 \ No newline at end of file diff --git a/pkg/backend/remote/testdata/plan-cost-estimation/main.tf b/pkg/backend/remote/testdata/plan-cost-estimation/main.tf new file mode 100644 index 00000000000..3911a2a9b2d --- /dev/null +++ b/pkg/backend/remote/testdata/plan-cost-estimation/main.tf @@ -0,0 +1 @@ +resource "null_resource" "foo" {} diff --git a/pkg/backend/remote/testdata/plan-cost-estimation/plan.log b/pkg/backend/remote/testdata/plan-cost-estimation/plan.log new file mode 100644 index 00000000000..fae287f4529 --- /dev/null +++ b/pkg/backend/remote/testdata/plan-cost-estimation/plan.log @@ -0,0 +1,20 @@ +Terraform v0.12.9 +Configuring remote state backend... +Initializing Terraform configuration... +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + + null_resource.foo + id: + + +Plan: 1 to add, 0 to change, 0 to destroy. diff --git a/pkg/backend/remote/testdata/plan-long-line/main.tf b/pkg/backend/remote/testdata/plan-long-line/main.tf new file mode 100644 index 00000000000..0a8d623a9d6 --- /dev/null +++ b/pkg/backend/remote/testdata/plan-long-line/main.tf @@ -0,0 +1,5 @@ +resource "null_resource" "foo" { + triggers { + long_line = "[{'_id':'5c5ab0ed7de45e993ffb9eeb','index':0,'guid':'e734d772-6b5a-4cb0-805c-91cd5e560e20','isActive':false,'balance':'$1,472.03','picture':'http://placehold.it/32x32','age':30,'eyeColor':'blue','name':{'first':'Darlene','last':'Garza'},'company':'GEEKOSIS','email':'darlene.garza@geekosis.io','phone':'+1 (850) 506-3347','address':'165 Kiely Place, Como, New Mexico, 4335','about':'Officia ullamco et sunt magna voluptate culpa cupidatat ea tempor laboris cupidatat ea anim laboris. Minim enim quis enim esse laborum est veniam. Lorem excepteur elit Lorem cupidatat elit ea anim irure fugiat fugiat sunt mollit. Consectetur ad nulla dolor amet esse occaecat aliquip sit. Magna sit elit adipisicing ut reprehenderit anim exercitation sit quis ea pariatur Lorem magna dolore.','registered':'Wednesday, March 11, 2015 12:58 PM','latitude':'20.729127','longitude':'-127.343593','tags':['minim','in','deserunt','occaecat','fugiat'],'greeting':'Hello, Darlene! You have 8 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0eda9117d15f1c1f112','index':1,'guid':'f0d1eed2-c6a9-4535-8800-d4bd53fe7eee','isActive':true,'balance':'$2,901.90','picture':'http://placehold.it/32x32','age':28,'eyeColor':'brown','name':{'first':'Flora','last':'Short'},'company':'SIGNITY','email':'flora.short@signity.me','phone':'+1 (840) 520-2666','address':'636 Johnson Avenue, Gerber, Wisconsin, 9139','about':'Veniam dolore deserunt Lorem aliqua qui eiusmod. Amet tempor fugiat duis incididunt amet adipisicing. Id ea nisi veniam eiusmod.','registered':'Wednesday, May 2, 2018 5:59 AM','latitude':'-63.267612','longitude':'4.224102','tags':['veniam','incididunt','id','aliqua','reprehenderit'],'greeting':'Hello, Flora! You have 10 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed83fd574d8041fa16','index':2,'guid':'29499a07-414a-436f-ba62-6634ca16bdcc','isActive':true,'balance':'$2,781.28','picture':'http://placehold.it/32x32','age':22,'eyeColor':'green','name':{'first':'Trevino','last':'Marks'},'company':'KEGULAR','email':'trevino.marks@kegular.com','phone':'+1 (843) 571-2269','address':'200 Alabama Avenue, Grenelefe, Florida, 7963','about':'Occaecat nisi exercitation Lorem mollit laborum magna adipisicing culpa dolor proident dolore. Non consequat ea amet et id mollit incididunt minim anim amet nostrud labore tempor. Proident eu sint commodo nisi consequat voluptate do fugiat proident. Laboris eiusmod veniam non et elit nulla nisi labore incididunt Lorem consequat consectetur voluptate.','registered':'Saturday, January 25, 2014 5:56 AM','latitude':'65.044005','longitude':'-127.454864','tags':['anim','duis','velit','pariatur','enim'],'greeting':'Hello, Trevino! You have 10 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed784eb6e350ff0a07','index':3,'guid':'40ed47e2-1747-4665-ab59-cdb3630a7642','isActive':true,'balance':'$2,000.78','picture':'http://placehold.it/32x32','age':25,'eyeColor':'brown','name':{'first':'Solis','last':'Mckinney'},'company':'QABOOS','email':'solis.mckinney@qaboos.org','phone':'+1 (924) 405-2560','address':'712 Herkimer Court, Klondike, Ohio, 8133','about':'Minim ad anim minim tempor mollit magna tempor et non commodo amet. Nisi cupidatat labore culpa consectetur exercitation laborum adipisicing fugiat officia adipisicing consequat non. Qui voluptate tempor laboris exercitation qui non adipisicing occaecat voluptate sunt do nostrud velit. Consequat tempor officia laboris tempor irure cupidatat aliquip voluptate nostrud velit ex nulla tempor laboris. Qui pariatur pariatur enim aliquip velit. Officia mollit ullamco laboris velit velit eiusmod enim amet incididunt consectetur sunt.','registered':'Wednesday, April 12, 2017 6:59 AM','latitude':'-25.055596','longitude':'-140.126525','tags':['ipsum','adipisicing','amet','nulla','dolore'],'greeting':'Hello, Solis! You have 5 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed02ce1ea9a2155d51','index':4,'guid':'1b5fb7d3-3b9a-4382-81b5-9ab01a27e74b','isActive':true,'balance':'$1,373.67','picture':'http://placehold.it/32x32','age':28,'eyeColor':'green','name':{'first':'Janell','last':'Battle'},'company':'GEEKMOSIS','email':'janell.battle@geekmosis.net','phone':'+1 (810) 591-3014','address':'517 Onderdonk Avenue, Shrewsbury, District Of Columbia, 2335','about':'Reprehenderit ad proident do anim qui officia magna magna duis cillum esse minim est. Excepteur ipsum anim ad laboris. In occaecat dolore nulla ea Lorem tempor et culpa in sint. Officia eu eu incididunt sit amet. Culpa duis id reprehenderit ut anim sit sunt. Duis dolore proident velit incididunt adipisicing pariatur fugiat incididunt eiusmod eu veniam irure.','registered':'Thursday, February 8, 2018 1:44 AM','latitude':'-33.254864','longitude':'-154.145885','tags':['aute','deserunt','ipsum','eiusmod','laborum'],'greeting':'Hello, Janell! You have 5 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edab58604bd7d3dd1c','index':5,'guid':'6354c035-af22-44c9-8be9-b2ea9decc24d','isActive':true,'balance':'$3,535.68','picture':'http://placehold.it/32x32','age':30,'eyeColor':'green','name':{'first':'Combs','last':'Kirby'},'company':'LUXURIA','email':'combs.kirby@luxuria.name','phone':'+1 (900) 498-3266','address':'377 Kingsland Avenue, Ruckersville, Maine, 9916','about':'Lorem duis ipsum pariatur aliquip sunt. Commodo esse laborum incididunt mollit quis est laboris ea ea quis fugiat. Enim elit ullamco velit et fugiat veniam irure deserunt aliqua ad irure veniam.','registered':'Tuesday, February 21, 2017 4:04 PM','latitude':'-70.20591','longitude':'162.546871','tags':['reprehenderit','est','enim','aute','ad'],'greeting':'Hello, Combs! You have 10 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edf7fafeffc6357c51','index':6,'guid':'02523e0b-cc90-4309-b6b2-f493dc6076f6','isActive':false,'balance':'$3,754.30','picture':'http://placehold.it/32x32','age':29,'eyeColor':'green','name':{'first':'Macias','last':'Calderon'},'company':'AMTAP','email':'macias.calderon@amtap.us','phone':'+1 (996) 569-3667','address':'305 Royce Street, Glidden, Iowa, 9248','about':'Exercitation nulla deserunt pariatur adipisicing. In commodo deserunt incididunt ut velit minim qui ut quis. Labore elit ullamco eiusmod voluptate in eu do est fugiat aute mollit deserunt. Eu duis proident velit fugiat velit ut. Ut non esse amet laborum nisi tempor in nulla.','registered':'Thursday, October 23, 2014 10:28 PM','latitude':'32.371629','longitude':'60.155135','tags':['commodo','elit','velit','excepteur','aliqua'],'greeting':'Hello, Macias! You have 9 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed0e8a6109e7fabf17','index':7,'guid':'675ff6b6-197b-4154-9775-813d661df822','isActive':false,'balance':'$2,850.62','picture':'http://placehold.it/32x32','age':37,'eyeColor':'green','name':{'first':'Stefanie','last':'Rivers'},'company':'RECRITUBE','email':'stefanie.rivers@recritube.biz','phone':'+1 (994) 591-3551','address':'995 Campus Road, Abrams, Virginia, 3251','about':'Esse aute non laborum Lorem nulla irure. Veniam elit aute ut et dolor non deserunt laboris tempor. Ipsum quis cupidatat laborum laboris voluptate esse duis eiusmod excepteur consectetur commodo ullamco qui occaecat. Culpa velit cillum occaecat minim nisi.','registered':'Thursday, June 9, 2016 3:40 PM','latitude':'-18.526825','longitude':'149.670782','tags':['occaecat','sunt','reprehenderit','ipsum','magna'],'greeting':'Hello, Stefanie! You have 9 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edf7d9bc2db4e476e3','index':8,'guid':'adaefc55-f6ea-4bd1-a147-0e31c3ce7a21','isActive':true,'balance':'$2,555.13','picture':'http://placehold.it/32x32','age':20,'eyeColor':'blue','name':{'first':'Hillary','last':'Lancaster'},'company':'OLUCORE','email':'hillary.lancaster@olucore.ca','phone':'+1 (964) 474-3018','address':'232 Berriman Street, Kaka, Massachusetts, 6792','about':'Veniam ad laboris quis reprehenderit aliquip nisi sunt excepteur ea aute laborum excepteur incididunt. Nisi exercitation aliquip do culpa commodo ex officia ut enim mollit in deserunt in amet. Anim eu deserunt dolore non cupidatat ut enim incididunt aute dolore voluptate. Do cillum mollit laborum non incididunt occaecat aute voluptate nisi irure.','registered':'Thursday, June 4, 2015 9:45 PM','latitude':'88.075919','longitude':'-148.951368','tags':['reprehenderit','veniam','ad','aute','anim'],'greeting':'Hello, Hillary! You have 6 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed7b7192ad6a0f267c','index':9,'guid':'0ca9b8ea-f671-474e-be26-4a49cae4838a','isActive':true,'balance':'$3,684.51','picture':'http://placehold.it/32x32','age':40,'eyeColor':'brown','name':{'first':'Jill','last':'Conner'},'company':'EXOZENT','email':'jill.conner@exozent.info','phone':'+1 (887) 467-2168','address':'751 Thames Street, Juarez, American Samoa, 8386','about':'Enim voluptate et non est in magna laborum aliqua enim aliqua est non nostrud. Tempor est nulla ipsum consectetur esse nostrud est id. Consequat do voluptate cupidatat eu fugiat et fugiat velit id. Sint dolore ad qui tempor anim eu amet consectetur do elit aute adipisicing consequat ex.','registered':'Sunday, October 22, 2017 7:35 AM','latitude':'84.384911','longitude':'40.305648','tags':['tempor','sint','irure','et','ex'],'greeting':'Hello, Jill! You have 9 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed713fe676575aa72b','index':10,'guid':'c28023cf-cc57-4c2e-8d91-dfbe6bafadcd','isActive':false,'balance':'$2,792.45','picture':'http://placehold.it/32x32','age':25,'eyeColor':'brown','name':{'first':'Hurley','last':'George'},'company':'ZAJ','email':'hurley.george@zaj.tv','phone':'+1 (984) 547-3284','address':'727 Minna Street, Lacomb, Colorado, 2557','about':'Ex velit cupidatat veniam culpa. Eiusmod ut fugiat adipisicing incididunt consectetur exercitation Lorem exercitation ex. Incididunt anim aute incididunt fugiat cupidatat qui eu non reprehenderit. Eiusmod dolor nisi culpa excepteur ut velit minim dolor voluptate amet commodo culpa in.','registered':'Thursday, February 16, 2017 6:41 AM','latitude':'25.989949','longitude':'10.200053','tags':['minim','ut','sunt','consequat','ullamco'],'greeting':'Hello, Hurley! You have 8 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed1e56732746c70d8b','index':11,'guid':'e9766f13-766c-4450-b4d2-8b04580f60b7','isActive':true,'balance':'$3,874.26','picture':'http://placehold.it/32x32','age':35,'eyeColor':'green','name':{'first':'Leticia','last':'Pace'},'company':'HONOTRON','email':'leticia.pace@honotron.co.uk','phone':'+1 (974) 536-3322','address':'365 Goodwin Place, Savage, Nevada, 9191','about':'Nisi Lorem aliqua esse eiusmod magna. Ad minim incididunt proident ut Lorem cupidatat qui velit aliqua ullamco et ipsum in. Aliquip elit consectetur pariatur esse exercitation et officia quis. Occaecat tempor proident cillum anim ad commodo velit ut voluptate. Tempor et occaecat sit sint aliquip tempor nulla velit magna nisi proident exercitation Lorem id.','registered':'Saturday, August 4, 2018 5:05 AM','latitude':'70.620386','longitude':'-86.335813','tags':['occaecat','velit','labore','laboris','esse'],'greeting':'Hello, Leticia! You have 8 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed941337fe42f47426','index':12,'guid':'6d390762-17ea-4b58-9a36-b0c9a8748a42','isActive':true,'balance':'$1,049.61','picture':'http://placehold.it/32x32','age':38,'eyeColor':'green','name':{'first':'Rose','last':'Humphrey'},'company':'MYOPIUM','email':'rose.humphrey@myopium.io','phone':'+1 (828) 426-3086','address':'389 Sapphire Street, Saticoy, Marshall Islands, 1423','about':'Aliquip enim excepteur adipisicing ex. Consequat aliqua consequat nostrud do occaecat deserunt excepteur sit et ipsum sunt dolor eu. Dolore laborum commodo excepteur tempor ad adipisicing proident excepteur magna non Lorem proident consequat aute. Fugiat minim consequat occaecat voluptate esse velit officia laboris nostrud nisi ut voluptate.','registered':'Monday, April 16, 2018 12:38 PM','latitude':'-47.083742','longitude':'109.022423','tags':['aute','non','sit','adipisicing','mollit'],'greeting':'Hello, Rose! You have 9 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edd0c02fc3fdc01a40','index':13,'guid':'07755618-6fdf-4b33-af50-364c18909227','isActive':true,'balance':'$1,823.61','picture':'http://placehold.it/32x32','age':36,'eyeColor':'green','name':{'first':'Judith','last':'Hale'},'company':'COLLAIRE','email':'judith.hale@collaire.me','phone':'+1 (922) 508-2843','address':'193 Coffey Street, Castleton, North Dakota, 3638','about':'Minim non ullamco ad anim nostrud dolore nostrud veniam consequat id eiusmod veniam laboris. Lorem irure esse mollit non velit aute id cupidatat est mollit occaecat magna excepteur. Adipisicing tempor nisi sit aliquip tempor pariatur tempor eu consectetur nulla amet nulla. Quis nisi nisi ea incididunt culpa et do. Esse officia eu pariatur velit sunt quis proident amet consectetur consequat. Nisi excepteur culpa nulla sit dolor deserunt excepteur dolor consequat elit cillum tempor Lorem.','registered':'Wednesday, August 24, 2016 12:29 AM','latitude':'-80.15514','longitude':'39.91007','tags':['consectetur','incididunt','aliquip','dolor','consequat'],'greeting':'Hello, Judith! You have 8 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edb3e1e29caa4f728b','index':14,'guid':'2c6617a2-e7a9-4ff7-a8b9-e99554fe70fe','isActive':true,'balance':'$1,971.00','picture':'http://placehold.it/32x32','age':39,'eyeColor':'blue','name':{'first':'Estes','last':'Sweet'},'company':'GEEKKO','email':'estes.sweet@geekko.com','phone':'+1 (866) 448-3032','address':'847 Cove Lane, Kula, Mississippi, 9178','about':'Veniam consectetur occaecat est excepteur consequat ipsum cillum sit consectetur. Ut cupidatat et reprehenderit dolore enim do cillum qui pariatur ad laborum incididunt esse. Fugiat sunt dolor veniam laboris ipsum deserunt proident reprehenderit laboris non nostrud. Magna excepteur sint magna laborum tempor sit exercitation ipsum labore est ullamco ullamco. Cillum voluptate cillum ea laborum Lorem. Excepteur sint ut nisi est esse non. Minim excepteur ullamco velit nisi ut in elit exercitation ut dolore.','registered':'Sunday, August 12, 2018 5:06 PM','latitude':'-9.57771','longitude':'-159.94577','tags':['culpa','dolor','velit','anim','pariatur'],'greeting':'Hello, Estes! You have 7 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0edbcf088c6fd593091','index':15,'guid':'2cc79958-1b40-4e2c-907a-433903fd3da9','isActive':false,'balance':'$3,751.53','picture':'http://placehold.it/32x32','age':34,'eyeColor':'brown','name':{'first':'Kemp','last':'Spence'},'company':'EXOBLUE','email':'kemp.spence@exoblue.org','phone':'+1 (864) 487-2992','address':'217 Clay Street, Monument, North Carolina, 1460','about':'Nostrud duis cillum sint non commodo dolor aute aliqua adipisicing ad nulla non excepteur proident. Fugiat labore elit tempor cillum veniam reprehenderit laboris consectetur dolore amet qui cupidatat. Amet aliqua elit anim et consequat commodo excepteur officia anim aliqua ea eu labore cillum. Et ex dolor duis dolore commodo veniam et nisi.','registered':'Monday, October 29, 2018 5:23 AM','latitude':'-70.304222','longitude':'83.582371','tags':['velit','duis','consequat','incididunt','duis'],'greeting':'Hello, Kemp! You have 7 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed6400479feb3de505','index':16,'guid':'91ccae6d-a3ea-43cf-bb00-3f2729256cc9','isActive':false,'balance':'$2,477.79','picture':'http://placehold.it/32x32','age':40,'eyeColor':'blue','name':{'first':'Ronda','last':'Burris'},'company':'EQUITOX','email':'ronda.burris@equitox.net','phone':'+1 (817) 553-3228','address':'708 Lawton Street, Deputy, Wyoming, 8598','about':'Excepteur voluptate aliquip consequat cillum est duis sit cillum eu eiusmod et laborum ullamco. Et minim reprehenderit aute voluptate amet ullamco. Amet sit enim ad irure deserunt nostrud anim veniam consequat dolor commodo. Consequat do occaecat do exercitation ullamco dolor ut. Id laboris consequat est dolor dolore tempor ullamco anim do ut nulla deserunt labore. Mollit ex Lorem ullamco mollit.','registered':'Monday, April 23, 2018 5:27 PM','latitude':'-31.227208','longitude':'0.63785','tags':['ipsum','magna','consectetur','sit','irure'],'greeting':'Hello, Ronda! You have 5 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0eddbeab2e53e04d563','index':17,'guid':'a86d4eb6-6bd8-48c2-a8fc-1c933c835852','isActive':false,'balance':'$3,709.03','picture':'http://placehold.it/32x32','age':37,'eyeColor':'blue','name':{'first':'Rosario','last':'Dillard'},'company':'BARKARAMA','email':'rosario.dillard@barkarama.name','phone':'+1 (933) 525-3898','address':'730 Chauncey Street, Forbestown, South Carolina, 6894','about':'Est eu fugiat aliquip ea ad qui ad mollit ad tempor voluptate et incididunt reprehenderit. Incididunt fugiat commodo minim adipisicing culpa consectetur duis eu ut commodo consequat voluptate labore. Nostrud irure labore adipisicing irure quis magna consequat dolor Lorem sint enim. Sint excepteur eu dolore elit ut do mollit sunt enim est. Labore id nostrud sint Lorem esse nostrud.','registered':'Friday, December 25, 2015 8:59 PM','latitude':'37.440827','longitude':'44.580474','tags':['Lorem','sit','ipsum','ea','ut'],'greeting':'Hello, Rosario! You have 5 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0eddf8e9b9c031d04e8','index':18,'guid':'a96f997c-daf8-40d4-92e1-be07e2cf0f60','isActive':false,'balance':'$1,878.37','picture':'http://placehold.it/32x32','age':37,'eyeColor':'brown','name':{'first':'Sondra','last':'Gonzales'},'company':'XUMONK','email':'sondra.gonzales@xumonk.us','phone':'+1 (838) 560-2255','address':'230 Cox Place, Geyserville, Georgia, 6805','about':'Laborum sunt voluptate ea laboris nostrud. Amet deserunt aliqua Lorem voluptate velit deserunt occaecat minim ullamco. Lorem occaecat sit labore adipisicing ad magna mollit labore ullamco proident. Ea velit do proident fugiat esse commodo ex nostrud eu mollit pariatur. Labore laborum qui voluptate quis proident reprehenderit tempor dolore duis deserunt esse aliqua aliquip. Non veniam enim pariatur cupidatat ipsum dolore est reprehenderit. Non exercitation adipisicing proident magna elit occaecat non magna.','registered':'Sunday, June 26, 2016 4:02 AM','latitude':'62.247742','longitude':'-44.90666','tags':['ea','aute','in','voluptate','magna'],'greeting':'Hello, Sondra! You have 6 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed2c1bcd06781f677e','index':19,'guid':'6ac47a16-eed4-4460-92ee-e0dd33c1fbb5','isActive':false,'balance':'$3,730.64','picture':'http://placehold.it/32x32','age':20,'eyeColor':'brown','name':{'first':'Anastasia','last':'Vega'},'company':'FIREWAX','email':'anastasia.vega@firewax.biz','phone':'+1 (867) 493-3698','address':'803 Arlington Avenue, Rosburg, Northern Mariana Islands, 8769','about':'Sint ex nisi tempor sunt voluptate non et eiusmod irure. Aute reprehenderit dolor mollit aliqua Lorem voluptate occaecat. Sint laboris deserunt Lorem incididunt nulla cupidatat do.','registered':'Friday, March 18, 2016 12:02 PM','latitude':'-32.010216','longitude':'-87.874753','tags':['aliquip','mollit','mollit','ad','laborum'],'greeting':'Hello, Anastasia! You have 7 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed727fd645854bbf43','index':20,'guid':'67bd8cdb-ce6b-455c-944c-a80e17c6fa75','isActive':true,'balance':'$2,868.06','picture':'http://placehold.it/32x32','age':29,'eyeColor':'green','name':{'first':'Lucinda','last':'Cox'},'company':'ENDIPINE','email':'lucinda.cox@endipine.ca','phone':'+1 (990) 428-3002','address':'412 Thatford Avenue, Lafferty, New Jersey, 5271','about':'Esse nulla sunt ut consequat aute mollit. Est occaecat sunt nisi irure id anim est commodo. Elit mollit amet dolore sunt adipisicing ea laborum quis ea reprehenderit non consequat dolore. Minim sunt occaecat quis aute commodo dolore quis commodo proident. Sunt sint duis ullamco sit ea esse Lorem. Consequat pariatur eiusmod laboris adipisicing labore in laboris adipisicing adipisicing consequat aute ea et.','registered':'Friday, May 1, 2015 10:16 PM','latitude':'-14.200957','longitude':'-82.211386','tags':['do','sit','qui','officia','aliquip'],'greeting':'Hello, Lucinda! You have 9 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed5a97284eb2cbd3a8','index':21,'guid':'f9fc999d-515c-4fc4-b339-76300e1b4bf2','isActive':true,'balance':'$1,172.57','picture':'http://placehold.it/32x32','age':35,'eyeColor':'brown','name':{'first':'Conrad','last':'Bradley'},'company':'FUELWORKS','email':'conrad.bradley@fuelworks.info','phone':'+1 (956) 561-3226','address':'685 Fenimore Street, Esmont, Maryland, 7523','about':'Labore reprehenderit anim nisi sunt do nisi in. Est anim cillum id minim exercitation ullamco voluptate ipsum eu. Elit culpa consequat reprehenderit laborum in eu. Laboris amet voluptate laboris qui voluptate duis minim reprehenderit. Commodo sunt irure dolore sunt occaecat velit nisi eu minim minim.','registered':'Wednesday, January 18, 2017 11:13 PM','latitude':'31.665993','longitude':'38.868968','tags':['excepteur','exercitation','est','nisi','mollit'],'greeting':'Hello, Conrad! You have 10 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edc4eaf6f760c38218','index':22,'guid':'8794ef5f-da2f-46f0-a755-c18a16409fd5','isActive':false,'balance':'$3,594.73','picture':'http://placehold.it/32x32','age':27,'eyeColor':'blue','name':{'first':'Marquez','last':'Vargas'},'company':'MALATHION','email':'marquez.vargas@malathion.tv','phone':'+1 (976) 438-3126','address':'296 Hall Street, National, Texas, 2067','about':'Proident cillum aute minim fugiat sunt aliqua non occaecat est duis id id tempor. Qui deserunt nisi amet pariatur proident eu laboris esse adipisicing magna. Anim anim mollit aute non magna nisi aute magna labore ullamco reprehenderit voluptate et ad. Proident adipisicing aute eiusmod nostrud nostrud deserunt culpa. Elit eu ullamco nisi aliqua dolor sint pariatur excepteur sit consectetur tempor. Consequat Lorem ullamco commodo veniam qui sint magna. Sit mollit ad aliquip est id eu officia id adipisicing duis ad.','registered':'Tuesday, November 17, 2015 6:16 PM','latitude':'-36.443667','longitude':'22.336776','tags':['aliquip','veniam','ipsum','Lorem','ex'],'greeting':'Hello, Marquez! You have 9 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0edd7c718518ee0466a','index':23,'guid':'ad8781a2-059e-4288-9879-309d53a99bf5','isActive':true,'balance':'$3,570.68','picture':'http://placehold.it/32x32','age':21,'eyeColor':'brown','name':{'first':'Snider','last':'Frost'},'company':'ZILODYNE','email':'snider.frost@zilodyne.co.uk','phone':'+1 (913) 485-3275','address':'721 Lincoln Road, Richmond, Utah, 672','about':'Minim enim Lorem esse incididunt do reprehenderit velit laborum ullamco. In aute eiusmod esse aliqua et labore tempor sunt ex mollit veniam tempor. Nulla elit cillum qui ullamco dolore amet deserunt magna amet laborum.','registered':'Saturday, August 23, 2014 12:58 AM','latitude':'-88.682554','longitude':'74.063179','tags':['nulla','ea','sint','aliquip','duis'],'greeting':'Hello, Snider! You have 6 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edf026fece8e2c0970','index':24,'guid':'1b7d81e1-1dba-4322-bb1a-eaa6a24cccea','isActive':false,'balance':'$2,037.91','picture':'http://placehold.it/32x32','age':28,'eyeColor':'green','name':{'first':'Snyder','last':'Fletcher'},'company':'COMTEST','email':'snyder.fletcher@comtest.io','phone':'+1 (830) 538-3860','address':'221 Lewis Place, Zortman, Idaho, 572','about':'Elit anim enim esse dolore exercitation. Laboris esse sint adipisicing fugiat sint do occaecat ut voluptate sint nulla. Ad sint ut reprehenderit nostrud irure id consectetur officia velit consequat.','registered':'Sunday, January 1, 2017 1:13 AM','latitude':'-54.742604','longitude':'69.534932','tags':['exercitation','commodo','in','id','aliqua'],'greeting':'Hello, Snyder! You have 10 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed4b9a7f83da6d2dfd','index':25,'guid':'0b2cc6b6-0044-4b1c-aa31-bd72963457a0','isActive':false,'balance':'$1,152.76','picture':'http://placehold.it/32x32','age':27,'eyeColor':'blue','name':{'first':'Regina','last':'James'},'company':'TELPOD','email':'regina.james@telpod.me','phone':'+1 (989) 455-3228','address':'688 Essex Street, Clayville, Alabama, 2772','about':'Eiusmod elit culpa reprehenderit ea veniam. Officia irure culpa duis aute ut. Irure duis cillum officia ea pariatur velit ut dolor incididunt reprehenderit ex elit laborum. Est pariatur veniam ad irure. Labore velit sunt esse laboris aliqua velit deserunt deserunt sit. Elit eiusmod ad laboris aliquip minim irure excepteur enim quis. Quis incididunt adipisicing ut magna cupidatat sit amet culpa.','registered':'Tuesday, April 25, 2017 10:16 PM','latitude':'-75.088027','longitude':'47.209828','tags':['elit','nisi','est','voluptate','proident'],'greeting':'Hello, Regina! You have 6 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed10884f32f779f2bf','index':26,'guid':'1f6fb522-0002-46ff-8dac-451247f28168','isActive':true,'balance':'$1,948.79','picture':'http://placehold.it/32x32','age':25,'eyeColor':'brown','name':{'first':'Collins','last':'Mcpherson'},'company':'DIGIGEN','email':'collins.mcpherson@digigen.com','phone':'+1 (991) 519-2334','address':'317 Merit Court, Sanford, Michigan, 6468','about':'Magna qui culpa dolor officia labore mollit ex excepteur duis eiusmod. Ea cupidatat ex ipsum mollit do minim duis. Nisi eiusmod minim tempor id esse commodo sunt sunt ullamco ut do laborum ullamco magna. Aliquip laborum dolor officia officia eu nostrud velit minim est anim. Ex elit laborum sunt magna exercitation nisi cillum sunt aute qui ea ullamco. Cupidatat ea sunt aute dolor duis nisi Lorem ullamco eiusmod. Sit ea velit ad veniam aliqua ad elit cupidatat ut magna in.','registered':'Friday, June 10, 2016 4:38 PM','latitude':'25.513996','longitude':'14.911124','tags':['exercitation','non','sit','velit','officia'],'greeting':'Hello, Collins! You have 5 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed8a575110efb15c6c','index':27,'guid':'2a904c82-068b-4ded-9ae6-cfeb6d7e62c9','isActive':true,'balance':'$3,427.91','picture':'http://placehold.it/32x32','age':24,'eyeColor':'green','name':{'first':'Mckay','last':'Barrera'},'company':'COMVEYER','email':'mckay.barrera@comveyer.org','phone':'+1 (853) 470-2560','address':'907 Glenwood Road, Churchill, Oregon, 8583','about':'In voluptate esse dolore enim sint quis dolor do exercitation sint et labore nisi. Eiusmod tempor exercitation dolore elit sit velit sint et. Sit magna adipisicing eiusmod do anim velit deserunt laboris ad ea pariatur. Irure nisi anim mollit elit commodo nulla. Aute eiusmod sit nulla eiusmod. Eiusmod est officia commodo mollit laboris do deserunt eu do nisi amet. Proident ad duis eiusmod laboris Lorem ut culpa pariatur Lorem reprehenderit minim aliquip irure sunt.','registered':'Saturday, December 19, 2015 2:49 PM','latitude':'-55.243287','longitude':'138.035406','tags':['non','quis','laboris','enim','nisi'],'greeting':'Hello, Mckay! You have 7 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edcd49ab6a73ff7f32','index':28,'guid':'5d3e0dae-3f58-437f-b12d-de24667a904d','isActive':true,'balance':'$3,270.52','picture':'http://placehold.it/32x32','age':35,'eyeColor':'blue','name':{'first':'Mabel','last':'Leonard'},'company':'QUADEEBO','email':'mabel.leonard@quadeebo.net','phone':'+1 (805) 432-2356','address':'965 Underhill Avenue, Falconaire, Minnesota, 4450','about':'Cupidatat amet sunt est ipsum occaecat sit fugiat excepteur Lorem Lorem ex ea ipsum. Ad incididunt est irure magna excepteur occaecat nostrud. Minim dolor id anim ipsum qui nostrud ullamco aute ex Lorem magna deserunt excepteur Lorem.','registered':'Saturday, March 28, 2015 5:55 AM','latitude':'27.388359','longitude':'156.408728','tags':['quis','velit','deserunt','dolore','sit'],'greeting':'Hello, Mabel! You have 7 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edde16ac2dc2fbb6c1','index':29,'guid':'d50c2233-70fc-4748-8ebf-02d45ac2a446','isActive':false,'balance':'$3,100.70','picture':'http://placehold.it/32x32','age':30,'eyeColor':'green','name':{'first':'Pace','last':'Duke'},'company':'SEQUITUR','email':'pace.duke@sequitur.name','phone':'+1 (983) 568-3119','address':'895 Melrose Street, Reno, Connecticut, 6259','about':'Ex veniam aliquip exercitation mollit elit est minim veniam aliqua labore deserunt. Dolor sunt sint cillum Lorem nisi ea irure cupidatat. Velit ut culpa cupidatat consequat cillum. Sint voluptate quis laboris qui incididunt do elit Lorem qui ullamco ut eu pariatur occaecat.','registered':'Saturday, August 18, 2018 2:18 PM','latitude':'31.930443','longitude':'-129.494784','tags':['culpa','est','nostrud','quis','aliquip'],'greeting':'Hello, Pace! You have 8 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edb908d85642ba77e8','index':30,'guid':'3edb6e42-367a-403d-a511-eb78bcc11f60','isActive':true,'balance':'$1,912.07','picture':'http://placehold.it/32x32','age':24,'eyeColor':'green','name':{'first':'Cohen','last':'Morrison'},'company':'POWERNET','email':'cohen.morrison@powernet.us','phone':'+1 (888) 597-2141','address':'565 Troutman Street, Idledale, West Virginia, 3196','about':'Ullamco voluptate duis commodo amet occaecat consequat et occaecat dolore nulla eu. Do aliqua sunt deserunt occaecat laboris labore voluptate cupidatat ullamco exercitation aliquip elit voluptate anim. Occaecat deserunt in labore cillum aute deserunt ea excepteur laboris sunt. Officia irure sint incididunt labore sint ipsum ullamco ea elit. Fugiat nostrud sunt ut officia mollit proident sunt dolor fugiat esse tempor do.','registered':'Friday, January 1, 2016 5:42 AM','latitude':'-20.01215','longitude':'26.361552','tags':['consectetur','sunt','nulla','reprehenderit','dolore'],'greeting':'Hello, Cohen! You have 10 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed91c77aa25a64a757','index':31,'guid':'8999a97b-0035-4f19-b555-91dd69aaa9b8','isActive':false,'balance':'$3,097.67','picture':'http://placehold.it/32x32','age':25,'eyeColor':'brown','name':{'first':'Stout','last':'Valdez'},'company':'UPLINX','email':'stout.valdez@uplinx.biz','phone':'+1 (854) 480-3633','address':'880 Chestnut Avenue, Lowgap, Hawaii, 1537','about':'Cupidatat enim dolore non voluptate. Aliqua ut non Lorem in exercitation reprehenderit voluptate. Excepteur deserunt tempor laboris quis.','registered':'Wednesday, March 16, 2016 6:53 AM','latitude':'50.328393','longitude':'-25.990308','tags':['ea','fugiat','duis','consectetur','enim'],'greeting':'Hello, Stout! You have 5 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed0f52176c8c3e1bed','index':32,'guid':'743abcbd-1fab-4aed-8cb7-3c935eb64c74','isActive':false,'balance':'$1,118.54','picture':'http://placehold.it/32x32','age':30,'eyeColor':'blue','name':{'first':'Ortega','last':'Joseph'},'company':'APEXIA','email':'ortega.joseph@apexia.ca','phone':'+1 (872) 596-3024','address':'304 Canda Avenue, Mulino, New York, 8721','about':'Ipsum elit id cupidatat minim nisi minim. Ea ex amet ea ipsum Lorem deserunt. Occaecat cupidatat magna cillum aliquip sint id quis amet nostrud officia enim laborum. Aliqua deserunt amet commodo laboris labore mollit est. Officia voluptate Lorem esse mollit aliquip laboris cupidatat minim et. Labore esse incididunt officia nostrud pariatur reprehenderit.','registered':'Tuesday, January 31, 2017 6:06 AM','latitude':'43.861714','longitude':'33.771783','tags':['ut','Lorem','esse','quis','fugiat'],'greeting':'Hello, Ortega! You have 6 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed2c00cdd101b6cd52','index':33,'guid':'4f6f99cf-f692-4d03-b23a-26f2b27273bd','isActive':true,'balance':'$1,682.91','picture':'http://placehold.it/32x32','age':20,'eyeColor':'blue','name':{'first':'Sampson','last':'Taylor'},'company':'GEOFORMA','email':'sampson.taylor@geoforma.info','phone':'+1 (911) 482-2993','address':'582 Kent Street, Umapine, Virgin Islands, 5300','about':'Voluptate laboris occaecat laboris tempor cillum quis cupidatat qui pariatur. Lorem minim commodo mollit adipisicing Lorem ut dolor consectetur ipsum. Sint sit voluptate labore aliqua ex labore velit. Ullamco tempor consectetur voluptate deserunt voluptate minim enim. Cillum commodo duis reprehenderit eu duis.','registered':'Thursday, November 9, 2017 11:24 PM','latitude':'24.949379','longitude':'155.034468','tags':['Lorem','cupidatat','elit','reprehenderit','commodo'],'greeting':'Hello, Sampson! You have 8 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed4b7210ba0bc0d508','index':34,'guid':'73fd415f-f8cf-43e0-a86c-e725d000abd4','isActive':false,'balance':'$1,289.37','picture':'http://placehold.it/32x32','age':30,'eyeColor':'green','name':{'first':'Shari','last':'Melendez'},'company':'DIGIPRINT','email':'shari.melendez@digiprint.tv','phone':'+1 (914) 475-3995','address':'950 Wolf Place, Enetai, Alaska, 693','about':'Dolor incididunt et est commodo aliquip labore ad ullamco. Velit ex cillum nulla elit ex esse. Consectetur mollit fugiat cillum proident elit sunt non officia cillum ex laboris sint eu. Esse nulla eu officia in Lorem sint minim esse velit. Est Lorem ipsum enim aute. Elit minim eiusmod officia reprehenderit officia ut irure Lorem.','registered':'Wednesday, August 23, 2017 11:12 PM','latitude':'-70.347863','longitude':'94.812072','tags':['ea','ex','fugiat','duis','eu'],'greeting':'Hello, Shari! You have 7 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed85ac364619d892ef','index':35,'guid':'c1905f34-14ff-4bd8-b683-02cac4d52623','isActive':false,'balance':'$2,538.50','picture':'http://placehold.it/32x32','age':30,'eyeColor':'green','name':{'first':'Santiago','last':'Joyner'},'company':'BRAINCLIP','email':'santiago.joyner@brainclip.co.uk','phone':'+1 (835) 405-2676','address':'554 Rose Street, Muir, Kentucky, 7752','about':'Quis culpa dolore fugiat magna culpa non deserunt consectetur elit. Id cupidatat occaecat duis irure ullamco elit in labore magna pariatur cillum est. Mollit dolore velit ipsum anim aliqua culpa sint. Occaecat aute anim ut sunt eu.','registered':'Thursday, January 18, 2018 4:49 PM','latitude':'57.057918','longitude':'-50.472596','tags':['ullamco','ullamco','sunt','voluptate','irure'],'greeting':'Hello, Santiago! You have 7 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed1763f56b1121fa88','index':36,'guid':'a7f50659-4ae3-4f3e-a9d8-087e05334b51','isActive':false,'balance':'$1,435.16','picture':'http://placehold.it/32x32','age':37,'eyeColor':'blue','name':{'first':'Adeline','last':'Hoffman'},'company':'BITREX','email':'adeline.hoffman@bitrex.io','phone':'+1 (823) 488-3201','address':'221 Corbin Place, Edmund, Palau, 193','about':'Magna ullamco consectetur velit adipisicing cillum ea. Est qui incididunt est ullamco ex aute exercitation irure. Cupidatat consectetur proident qui fugiat do. Labore magna aliqua consectetur fugiat. Excepteur deserunt sit qui dolor fugiat aute sunt anim ipsum magna ea commodo qui. Minim eu adipisicing ut irure excepteur eiusmod aliqua. Voluptate nisi ad consequat qui.','registered':'Tuesday, June 14, 2016 9:26 AM','latitude':'-53.123355','longitude':'88.180776','tags':['non','est','commodo','ut','aliquip'],'greeting':'Hello, Adeline! You have 9 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed945d079f63e3185e','index':37,'guid':'1f4619e0-9289-4bea-a9db-a75f4cba1138','isActive':true,'balance':'$2,019.54','picture':'http://placehold.it/32x32','age':36,'eyeColor':'blue','name':{'first':'Porter','last':'Morse'},'company':'COMVOY','email':'porter.morse@comvoy.me','phone':'+1 (933) 562-3220','address':'416 India Street, Bourg, Rhode Island, 2266','about':'Et sint anim et sunt. Non mollit sunt cillum veniam sunt sint amet non mollit. Fugiat ea ullamco pariatur deserunt ex do minim irure irure.','registered':'Saturday, July 16, 2016 10:03 PM','latitude':'-81.782545','longitude':'69.783509','tags':['irure','consequat','veniam','nulla','velit'],'greeting':'Hello, Porter! You have 10 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed411dd0f06c66bba6','index':38,'guid':'93c900f0-54c0-4c4c-b21d-d59d8d7c6177','isActive':true,'balance':'$3,764.84','picture':'http://placehold.it/32x32','age':26,'eyeColor':'green','name':{'first':'Fitzgerald','last':'Logan'},'company':'UTARIAN','email':'fitzgerald.logan@utarian.com','phone':'+1 (815) 461-2709','address':'498 Logan Street, Tonopah, Arkansas, 6652','about':'Quis Lorem sit est et dolor est esse in veniam. Mollit anim nostrud laboris consequat voluptate qui ad ipsum sint laborum exercitation quis ipsum. Incididunt cupidatat esse ea amet deserunt consequat eu proident duis adipisicing pariatur. Amet deserunt mollit aliquip mollit consequat sunt quis labore laboris quis. Magna cillum fugiat anim velit Lorem duis. Lorem duis amet veniam occaecat est excepteur ut ea velit esse non pariatur. Do veniam quis eu consequat ad duis incididunt minim dolore sit non minim adipisicing et.','registered':'Wednesday, August 9, 2017 9:20 PM','latitude':'24.480657','longitude':'-108.693421','tags':['dolore','ad','occaecat','quis','labore'],'greeting':'Hello, Fitzgerald! You have 5 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edbb6f14559d8a7b28','index':39,'guid':'9434f48b-70a0-4161-8d06-c53bf8b9df94','isActive':true,'balance':'$3,713.47','picture':'http://placehold.it/32x32','age':25,'eyeColor':'blue','name':{'first':'Mcconnell','last':'Nash'},'company':'TETAK','email':'mcconnell.nash@tetak.org','phone':'+1 (956) 477-3586','address':'853 Turnbull Avenue, Clarence, Missouri, 1599','about':'Culpa excepteur minim anim magna dolor dolore ad ex eu. In cupidatat cillum elit dolore in est minim dolore consectetur reprehenderit voluptate laborum. Deserunt id velit ad dolor mollit.','registered':'Saturday, November 10, 2018 9:27 AM','latitude':'1.691589','longitude':'143.704377','tags':['ut','deserunt','sit','cupidatat','ea'],'greeting':'Hello, Mcconnell! You have 10 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed1a87ea0390733ffa','index':40,'guid':'ec8a55f7-7114-4787-b1ff-4e631731bc2c','isActive':true,'balance':'$2,200.71','picture':'http://placehold.it/32x32','age':25,'eyeColor':'brown','name':{'first':'Kitty','last':'Meyers'},'company':'FIBEROX','email':'kitty.meyers@fiberox.net','phone':'+1 (864) 458-3826','address':'537 Georgia Avenue, Thermal, Illinois, 7930','about':'Non excepteur laboris Lorem magna adipisicing exercitation. Anim esse in pariatur minim ipsum qui voluptate irure. Pariatur Lorem pariatur esse commodo aute adipisicing anim commodo. Exercitation nostrud aliqua duis et amet amet tempor.','registered':'Tuesday, September 13, 2016 8:16 PM','latitude':'19.59506','longitude':'-57.814297','tags':['duis','ullamco','velit','sint','consequat'],'greeting':'Hello, Kitty! You have 9 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed4dc76717bf1217b3','index':41,'guid':'40521cde-f835-4620-902b-af7abf185d8d','isActive':false,'balance':'$2,907.02','picture':'http://placehold.it/32x32','age':26,'eyeColor':'green','name':{'first':'Klein','last':'Goodwin'},'company':'PLASTO','email':'klein.goodwin@plasto.name','phone':'+1 (950) 563-3104','address':'764 Devoe Street, Lindcove, Oklahoma, 458','about':'Amet aliqua magna ea veniam non aliquip irure esse id ipsum cillum sint tempor dolor. Ullamco deserunt fugiat amet pariatur culpa nostrud commodo commodo. Ad occaecat magna adipisicing voluptate. Minim ad adipisicing cupidatat elit nostrud eu irure. Cupidatat occaecat aute magna consectetur dolore anim et. Ex voluptate velit exercitation laborum ad ullamco ad. Aliquip nulla ipsum dolore cillum qui nostrud eu adipisicing amet tempor do.','registered':'Tuesday, February 13, 2018 3:56 PM','latitude':'-27.168725','longitude':'-29.499285','tags':['minim','labore','do','deserunt','dolor'],'greeting':'Hello, Klein! You have 6 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed1ac77396b29aee9e','index':42,'guid':'7cfc03e3-30e9-4ae1-a1f5-f6c3223ca770','isActive':true,'balance':'$2,986.47','picture':'http://placehold.it/32x32','age':22,'eyeColor':'brown','name':{'first':'Isabelle','last':'Bishop'},'company':'GEEKNET','email':'isabelle.bishop@geeknet.us','phone':'+1 (908) 418-2642','address':'729 Willmohr Street, Aguila, Montana, 7510','about':'In nulla commodo nostrud sint. Elit et occaecat et aliqua aliquip magna esse commodo duis Lorem dolor magna enim deserunt. Ipsum pariatur reprehenderit ipsum adipisicing mollit incididunt ut. Sunt in consequat ex ut minim non qui anim labore. Deserunt minim voluptate in nulla occaecat.','registered':'Monday, September 15, 2014 6:22 AM','latitude':'-81.686947','longitude':'38.409291','tags':['proident','est','aliqua','veniam','anim'],'greeting':'Hello, Isabelle! You have 7 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edb3a070c9469a4893','index':43,'guid':'3dec76b4-0b55-4765-a2fd-b8dbd9c82f8f','isActive':true,'balance':'$2,501.24','picture':'http://placehold.it/32x32','age':31,'eyeColor':'blue','name':{'first':'Josefina','last':'Turner'},'company':'COMSTAR','email':'josefina.turner@comstar.biz','phone':'+1 (908) 566-3029','address':'606 Schenck Place, Brutus, Vermont, 8681','about':'Enim consectetur pariatur sint dolor nostrud est deserunt nulla quis pariatur sit. Ad aute incididunt nisi excepteur duis est velit voluptate ullamco occaecat magna reprehenderit aliquip. Proident deserunt consectetur non et exercitation elit dolore enim aliqua incididunt anim amet. Ex esse sint commodo minim aliqua ut irure. Proident ex culpa voluptate fugiat nisi. Sint commodo laboris excepteur minim ipsum labore tempor quis magna.','registered':'Saturday, December 31, 2016 6:38 AM','latitude':'35.275088','longitude':'24.30485','tags':['minim','ut','irure','Lorem','veniam'],'greeting':'Hello, Josefina! You have 6 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed1aa7d74128ee3d0f','index':44,'guid':'10599279-c367-46c4-9f7a-744c2e4bf6c9','isActive':true,'balance':'$1,753.06','picture':'http://placehold.it/32x32','age':27,'eyeColor':'blue','name':{'first':'Lily','last':'Haynes'},'company':'KIOSK','email':'lily.haynes@kiosk.ca','phone':'+1 (872) 451-2301','address':'509 Balfour Place, Grazierville, New Hampshire, 2750','about':'Nisi aliquip occaecat nostrud do sint qui nisi officia Lorem. Ad et et laboris nisi dolore aliqua eu. Aliqua veniam quis eu pariatur incididunt mollit id deserunt officia eiusmod. Consequat adipisicing do nisi voluptate eiusmod minim pariatur minim nisi nostrud culpa cupidatat. Irure consectetur id consequat adipisicing ullamco occaecat do. Ex proident ea quis nulla incididunt sunt excepteur incididunt. Aliquip minim nostrud non anim Lorem.','registered':'Tuesday, November 20, 2018 9:28 AM','latitude':'-12.677798','longitude':'114.506787','tags':['culpa','amet','elit','officia','irure'],'greeting':'Hello, Lily! You have 8 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed74c76f2e84e201ce','index':45,'guid':'ec0a68d4-629e-46c9-9af7-f6ea867f02ba','isActive':true,'balance':'$1,477.93','picture':'http://placehold.it/32x32','age':23,'eyeColor':'green','name':{'first':'Shauna','last':'Pitts'},'company':'SPACEWAX','email':'shauna.pitts@spacewax.info','phone':'+1 (841) 406-2360','address':'348 Tabor Court, Westwood, Puerto Rico, 8297','about':'Aliquip irure officia magna ea magna mollit ea non amet deserunt. Veniam mollit labore culpa magna aliqua quis consequat est consectetur ea reprehenderit nostrud consequat aliqua. Mollit do ipsum mollit eiusmod.','registered':'Thursday, October 2, 2014 2:48 AM','latitude':'-55.17388','longitude':'-13.370494','tags':['anim','consectetur','cillum','veniam','duis'],'greeting':'Hello, Shauna! You have 7 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed419e718484b16722','index':46,'guid':'b2d6101d-5646-43f4-8207-284494e5a990','isActive':false,'balance':'$2,006.96','picture':'http://placehold.it/32x32','age':27,'eyeColor':'brown','name':{'first':'Lawrence','last':'Boyer'},'company':'SKYPLEX','email':'lawrence.boyer@skyplex.tv','phone':'+1 (953) 548-2618','address':'464 Pilling Street, Blandburg, Arizona, 5531','about':'Culpa sit minim pariatur mollit cupidatat sunt duis. Nisi ea proident veniam exercitation adipisicing Lorem aliquip amet dolor voluptate in nisi. Non commodo anim sunt est fugiat laborum nisi aliqua non Lorem exercitation dolor. Laboris dolore do minim ut eiusmod enim magna cillum laborum consectetur aliquip minim enim Lorem. Veniam ex veniam occaecat aliquip elit aliquip est eiusmod minim minim adipisicing.','registered':'Wednesday, July 30, 2014 2:17 AM','latitude':'-78.681255','longitude':'139.960626','tags':['consequat','Lorem','incididunt','dolor','esse'],'greeting':'Hello, Lawrence! You have 6 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed08a9024998292c70','index':47,'guid':'277de142-ebeb-4828-906a-7fd8bc0a738a','isActive':true,'balance':'$1,273.19','picture':'http://placehold.it/32x32','age':27,'eyeColor':'brown','name':{'first':'Sonya','last':'Stafford'},'company':'AQUACINE','email':'sonya.stafford@aquacine.co.uk','phone':'+1 (824) 581-3927','address':'641 Bowery Street, Hillsboro, Delaware, 7893','about':'Culpa labore ex reprehenderit mollit cupidatat dolore et ut quis in. Sint esse culpa enim culpa tempor exercitation veniam minim consectetur. Sunt est laboris minim quis incididunt exercitation laboris cupidatat fugiat ad. Deserunt ipsum do dolor cillum excepteur incididunt.','registered':'Thursday, March 26, 2015 1:10 PM','latitude':'-84.750592','longitude':'165.493533','tags':['minim','officia','dolore','ipsum','est'],'greeting':'Hello, Sonya! You have 8 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edd5037f2c79ecde68','index':48,'guid':'2dc6532f-9a26-49aa-b444-8923896db89c','isActive':false,'balance':'$3,168.93','picture':'http://placehold.it/32x32','age':36,'eyeColor':'brown','name':{'first':'Marguerite','last':'Stuart'},'company':'ACCUFARM','email':'marguerite.stuart@accufarm.io','phone':'+1 (848) 535-2253','address':'301 Menahan Street, Sunnyside, Nebraska, 4809','about':'Deserunt sint labore voluptate amet anim culpa nostrud adipisicing enim cupidatat ullamco exercitation fugiat est. Magna dolor aute incididunt ea ad adipisicing. Do cupidatat ut officia officia culpa sit do.','registered':'Thursday, May 8, 2014 1:25 PM','latitude':'21.82277','longitude':'-7.368347','tags':['labore','nulla','ullamco','irure','adipisicing'],'greeting':'Hello, Marguerite! You have 6 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edb26d315635818dae','index':49,'guid':'083a5eda-0a70-4f89-87f7-2cd386c0f22a','isActive':false,'balance':'$2,576.25','picture':'http://placehold.it/32x32','age':38,'eyeColor':'blue','name':{'first':'Louella','last':'Holloway'},'company':'BEDDER','email':'louella.holloway@bedder.me','phone':'+1 (801) 425-3761','address':'545 Lafayette Avenue, Caledonia, Louisiana, 2816','about':'Qui exercitation occaecat dolore mollit. Fugiat cupidatat proident culpa fugiat quis. In cupidatat commodo elit ea enim occaecat esse exercitation nostrud occaecat veniam laboris fugiat. Nisi sunt reprehenderit aliqua reprehenderit tempor id dolore ullamco pariatur reprehenderit et eu ex pariatur.','registered':'Wednesday, November 5, 2014 1:10 AM','latitude':'36.385637','longitude':'77.949423','tags':['eu','irure','velit','non','aliquip'],'greeting':'Hello, Louella! You have 7 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed77cd60a1abc1ecce','index':50,'guid':'2887c3c1-3eba-4237-a0db-1977eed94554','isActive':true,'balance':'$1,633.51','picture':'http://placehold.it/32x32','age':22,'eyeColor':'green','name':{'first':'Bates','last':'Carrillo'},'company':'ZOMBOID','email':'bates.carrillo@zomboid.com','phone':'+1 (934) 405-2006','address':'330 Howard Alley, Troy, Kansas, 4881','about':'Voluptate esse est ullamco anim tempor ea reprehenderit. Occaecat pariatur deserunt cillum laboris labore id exercitation esse ipsum ipsum ex aliquip. Sunt non elit est ea occaecat. Magna deserunt commodo aliqua ipsum est cillum dolor nisi. Ex duis est tempor tempor laboris do do quis id magna. Dolor do est elit eu laborum ullamco culpa consequat velit eiusmod tempor.','registered':'Saturday, May 28, 2016 3:56 AM','latitude':'83.310134','longitude':'-105.862836','tags':['est','commodo','ea','commodo','sunt'],'greeting':'Hello, Bates! You have 9 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed5ec0ec299b471fb5','index':51,'guid':'512b5e67-f785-492e-9d94-e43ef8b399b8','isActive':false,'balance':'$3,032.22','picture':'http://placehold.it/32x32','age':30,'eyeColor':'blue','name':{'first':'Floyd','last':'Yang'},'company':'FRENEX','email':'floyd.yang@frenex.org','phone':'+1 (924) 566-3304','address':'418 Quay Street, Chumuckla, Guam, 7743','about':'Irure sit velit exercitation dolore est nisi incididunt ut quis consectetur incididunt est dolor. Aute nisi enim esse aliquip enim culpa commodo consectetur. Duis laborum magna ad duis ipsum aliqua eiusmod cillum. Consectetur et duis eiusmod irure ad est nisi incididunt eiusmod labore. Pariatur proident in Lorem adipisicing mollit proident excepteur nulla do nostrud mollit eiusmod. Duis ad dolore irure fugiat anim laboris ipsum et sit duis ipsum voluptate. Lorem non aute exercitation qui ullamco officia minim sint pariatur ut dolor.','registered':'Wednesday, January 18, 2017 2:01 AM','latitude':'45.888721','longitude':'-41.232793','tags':['elit','in','esse','ea','officia'],'greeting':'Hello, Floyd! You have 5 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed51e26ca89e5caf49','index':52,'guid':'4e0907f6-facc-46df-8952-73561a53fe33','isActive':true,'balance':'$3,767.41','picture':'http://placehold.it/32x32','age':25,'eyeColor':'blue','name':{'first':'Gardner','last':'Carey'},'company':'KLUGGER','email':'gardner.carey@klugger.net','phone':'+1 (876) 481-3502','address':'131 Utica Avenue, Cannondale, Federated States Of Micronesia, 610','about':'Amet ad pariatur excepteur anim ex officia commodo proident aliqua occaecat consequat Lorem officia sit. Id minim velit nisi laboris nisi nulla incididunt eiusmod velit. Deserunt labore quis et tempor. Et labore exercitation laborum officia ullamco nostrud adipisicing laboris esse laborum aute anim elit. Sunt ad officia tempor esse et quis aliquip irure pariatur laborum id quis ex. Eu consequat nisi deserunt id eu proident ex minim aute nulla tempor ex.','registered':'Friday, February 21, 2014 6:42 AM','latitude':'-54.740231','longitude':'15.01484','tags':['commodo','laboris','occaecat','aliquip','adipisicing'],'greeting':'Hello, Gardner! You have 10 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed52e3c9407105093a','index':53,'guid':'1d3b9e7a-1bc3-40ea-b808-1c33f0d48c70','isActive':true,'balance':'$1,113.30','picture':'http://placehold.it/32x32','age':26,'eyeColor':'blue','name':{'first':'Herman','last':'Rogers'},'company':'TALENDULA','email':'herman.rogers@talendula.name','phone':'+1 (818) 521-2005','address':'541 Norman Avenue, Winfred, Tennessee, 447','about':'Culpa ex laborum non ad ullamco officia. Nisi mollit mollit voluptate sit sint ullamco. Lorem exercitation nulla anim eiusmod deserunt magna sint. Officia sunt eiusmod aliqua reprehenderit sunt mollit sit cupidatat sint.','registered':'Wednesday, July 11, 2018 1:05 AM','latitude':'-20.708105','longitude':'-151.294563','tags':['exercitation','minim','officia','qui','enim'],'greeting':'Hello, Herman! You have 10 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edfcb123d545b6edb4','index':54,'guid':'c0e0c669-4eed-43ee-bdd0-78fe6e9ca4d5','isActive':true,'balance':'$3,309.64','picture':'http://placehold.it/32x32','age':22,'eyeColor':'green','name':{'first':'Whitley','last':'Stark'},'company':'MUSAPHICS','email':'whitley.stark@musaphics.us','phone':'+1 (803) 476-2151','address':'548 Cobek Court, Chamizal, Indiana, 204','about':'Adipisicing veniam dolor ex sint sit id eu voluptate. Excepteur veniam proident exercitation id eu et sunt pariatur. Qui occaecat culpa aliqua nisi excepteur minim veniam. Est duis nulla laborum excepteur cillum pariatur sint incididunt. Velit commodo eu incididunt voluptate. Amet laboris laboris id adipisicing labore eiusmod consequat minim cillum et.','registered':'Thursday, March 27, 2014 9:10 AM','latitude':'71.219596','longitude':'51.012855','tags':['reprehenderit','mollit','laborum','voluptate','aliquip'],'greeting':'Hello, Whitley! You have 7 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed81510dfc61602fcf','index':55,'guid':'7ec5c24d-f169-4399-a2a3-300c0f45e52e','isActive':false,'balance':'$3,721.04','picture':'http://placehold.it/32x32','age':23,'eyeColor':'green','name':{'first':'Gretchen','last':'Wade'},'company':'EWEVILLE','email':'gretchen.wade@eweville.biz','phone':'+1 (977) 598-3700','address':'721 Colonial Road, Brookfield, South Dakota, 3888','about':'Fugiat consequat sint ut ut et ullamco eiusmod deserunt pariatur. Veniam eiusmod esse fugiat mollit. Proident laboris minim qui do ipsum excepteur exercitation irure anim. Aliqua labore quis eu fugiat dolore ullamco velit Lorem voluptate ipsum nostrud eiusmod laborum proident.','registered':'Friday, October 12, 2018 10:59 AM','latitude':'41.937653','longitude':'63.378531','tags':['aute','cillum','ea','ex','aute'],'greeting':'Hello, Gretchen! You have 9 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edf78f77d4a7d557bb','index':56,'guid':'8718ada7-6fd0-49ef-a405-29850503948b','isActive':false,'balance':'$3,341.33','picture':'http://placehold.it/32x32','age':32,'eyeColor':'blue','name':{'first':'Naomi','last':'Frye'},'company':'MAZUDA','email':'naomi.frye@mazuda.ca','phone':'+1 (825) 427-2255','address':'741 Coyle Street, Comptche, Pennsylvania, 8441','about':'Aliqua fugiat laborum quis ullamco cupidatat sit dolor nulla dolore. Do Lorem et ipsum culpa irure sit do dolor qui sit laboris aliqua. Ex consectetur irure in veniam reprehenderit amet do elit eiusmod est magna.','registered':'Thursday, January 9, 2014 7:18 AM','latitude':'41.078645','longitude':'-50.241966','tags':['do','aliquip','eiusmod','velit','id'],'greeting':'Hello, Naomi! You have 7 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edbf45db2e072a48b4','index':57,'guid':'c158ebf7-fb8b-4ea8-adbf-8c51c6486715','isActive':true,'balance':'$2,811.55','picture':'http://placehold.it/32x32','age':25,'eyeColor':'blue','name':{'first':'Lamb','last':'Johns'},'company':'DOGTOWN','email':'lamb.johns@dogtown.info','phone':'+1 (946) 530-3057','address':'559 Malbone Street, Kennedyville, California, 2052','about':'Eiusmod dolor labore cillum ad veniam elit voluptate voluptate pariatur est cupidatat. Laboris ut qui in cillum sunt dolore ut enim. Minim nostrud ex qui quis reprehenderit magna ipsum cupidatat irure minim laboris veniam irure. Fugiat velit deserunt aliquip in esse proident excepteur labore reprehenderit excepteur sunt in cupidatat exercitation. Ex pariatur irure mollit tempor non magna ex.','registered':'Friday, April 21, 2017 1:51 AM','latitude':'-61.403599','longitude':'-93.447102','tags':['aliquip','tempor','sint','enim','ipsum'],'greeting':'Hello, Lamb! You have 6 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edbb9c88190cb59cf2','index':58,'guid':'f0de5ac5-eb28-491b-81c5-76d447c9055e','isActive':true,'balance':'$1,611.99','picture':'http://placehold.it/32x32','age':37,'eyeColor':'brown','name':{'first':'Lynette','last':'Cleveland'},'company':'ARTWORLDS','email':'lynette.cleveland@artworlds.tv','phone':'+1 (889) 596-3723','address':'439 Montauk Avenue, Felt, New Mexico, 9681','about':'Incididunt aliquip est aliquip est ullamco do consectetur dolor. Lorem mollit mollit dolor et ipsum ut qui veniam aute ea. Adipisicing reprehenderit culpa velit laborum adipisicing amet consectetur velit nisi. Ut qui proident ad cillum excepteur adipisicing quis labore. Duis velit culpa et excepteur eiusmod ex labore in nisi nostrud. Et ullamco minim excepteur ut enim reprehenderit consequat eiusmod laboris Lorem commodo exercitation qui laborum.','registered':'Wednesday, August 26, 2015 12:53 PM','latitude':'49.861336','longitude':'86.865926','tags':['reprehenderit','minim','in','minim','nostrud'],'greeting':'Hello, Lynette! You have 6 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed5b760ddde7295fa8','index':59,'guid':'f8180d3f-c5c0-48b2-966e-a0b2a80f8e84','isActive':true,'balance':'$3,376.75','picture':'http://placehold.it/32x32','age':32,'eyeColor':'green','name':{'first':'Obrien','last':'Page'},'company':'GLASSTEP','email':'obrien.page@glasstep.co.uk','phone':'+1 (902) 583-3086','address':'183 Ridgewood Avenue, Vicksburg, Wisconsin, 7430','about':'Aute excepteur cillum exercitation duis Lorem irure labore elit. Labore magna cupidatat velit consectetur minim do Lorem in excepteur commodo ea consequat ullamco laborum. Ut in id occaecat eu quis duis id ea deserunt veniam.','registered':'Wednesday, March 29, 2017 12:13 AM','latitude':'-40.156154','longitude':'72.76301','tags':['excepteur','non','anim','nulla','anim'],'greeting':'Hello, Obrien! You have 6 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed52985d3d8901d653','index':60,'guid':'d2e14fa1-8c54-4bcb-8a58-eb2e6f8d0e45','isActive':true,'balance':'$1,659.47','picture':'http://placehold.it/32x32','age':33,'eyeColor':'brown','name':{'first':'Knowles','last':'Goodman'},'company':'CENTREE','email':'knowles.goodman@centree.io','phone':'+1 (862) 563-3692','address':'504 Lott Street, Allensworth, Florida, 7148','about':'Do aliquip voluptate aliqua nostrud. Eu dolore ex occaecat pariatur aute laborum aute nulla aute amet. Excepteur sit laboris ad non anim ut officia ut ad exercitation officia dolore laboris. Esse voluptate minim deserunt nostrud exercitation laborum voluptate exercitation id laborum fugiat proident cupidatat proident. Nulla nostrud est sint adipisicing incididunt exercitation dolor sit et elit tempor occaecat sint culpa. Pariatur occaecat laboris pariatur laboris ad pariatur in cillum fugiat est fugiat. Proident eu id irure excepteur esse aute cillum adipisicing.','registered':'Wednesday, October 15, 2014 6:17 PM','latitude':'-15.73863','longitude':'87.422009','tags':['consequat','sint','tempor','veniam','culpa'],'greeting':'Hello, Knowles! You have 6 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0eda00b73bdb7ea54e9','index':61,'guid':'c8a064db-0ec6-4832-9820-7280a0333709','isActive':true,'balance':'$3,701.14','picture':'http://placehold.it/32x32','age':35,'eyeColor':'brown','name':{'first':'Shepherd','last':'Todd'},'company':'ECRATIC','email':'shepherd.todd@ecratic.me','phone':'+1 (881) 444-3389','address':'450 Frank Court, Temperanceville, Ohio, 7006','about':'Voluptate cillum ad fugiat velit adipisicing sint consequat veniam Lorem reprehenderit. Cillum sit non deserunt consequat. Amet sunt pariatur non mollit ullamco proident sint dolore anim elit cupidatat anim do ullamco. Lorem Lorem incididunt ea elit consequat laboris enim duis quis Lorem id aute veniam consequat. Cillum veniam cillum sint qui Lorem fugiat culpa consequat. Est sint duis ut qui fugiat. Laborum pariatur velit et sunt mollit eiusmod excepteur culpa ex et officia.','registered':'Tuesday, October 10, 2017 2:01 AM','latitude':'82.951563','longitude':'-4.866954','tags':['eu','qui','proident','esse','ex'],'greeting':'Hello, Shepherd! You have 5 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed0e51d1a7e2d9e559','index':62,'guid':'739c3d38-200d-4531-84d8-4e7c39ae5b8c','isActive':true,'balance':'$3,679.01','picture':'http://placehold.it/32x32','age':31,'eyeColor':'brown','name':{'first':'Rosalyn','last':'Heath'},'company':'ZAYA','email':'rosalyn.heath@zaya.com','phone':'+1 (865) 403-3520','address':'303 Henderson Walk, Hoehne, District Of Columbia, 4306','about':'Sint occaecat nulla mollit sint fugiat eu proident dolor labore consequat. Occaecat tempor excepteur do fugiat incididunt Lorem in ullamco dolore laborum. Cillum mollit aliquip excepteur aliquip sint sunt minim non irure irure. Cillum fugiat aliqua enim dolore. Nulla culpa culpa nostrud ad. Eiusmod culpa proident proident non est cupidatat eu sunt sit incididunt id nisi.','registered':'Wednesday, April 22, 2015 12:35 PM','latitude':'33.628504','longitude':'110.772802','tags':['consequat','ut','ex','labore','consectetur'],'greeting':'Hello, Rosalyn! You have 6 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edd5274c01d353d0c5','index':63,'guid':'8815fe55-8af1-4708-a62a-d554dbd74a4a','isActive':true,'balance':'$2,126.01','picture':'http://placehold.it/32x32','age':30,'eyeColor':'blue','name':{'first':'Queen','last':'Harper'},'company':'TRI@TRIBALOG','email':'queen.harper@tri@tribalog.org','phone':'+1 (903) 592-3145','address':'926 Heath Place, Wawona, Maine, 7340','about':'Laborum cupidatat commodo aliquip reprehenderit. Excepteur eu labore duis minim minim voluptate aute nostrud deserunt ut velit ullamco. Adipisicing nisi occaecat laborum proident. Id reprehenderit eiusmod cupidatat qui aute consequat amet enim commodo duis non ipsum. Amet ut aliqua magna qui proident mollit aute.','registered':'Saturday, April 9, 2016 5:12 AM','latitude':'51.814216','longitude':'177.348115','tags':['cillum','ut','dolor','do','nisi'],'greeting':'Hello, Queen! You have 10 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed126298b6ce62ed56','index':64,'guid':'001c87fe-182f-450f-903b-2e29a9bb0322','isActive':true,'balance':'$3,578.29','picture':'http://placehold.it/32x32','age':20,'eyeColor':'green','name':{'first':'Pauline','last':'Mills'},'company':'CRUSTATIA','email':'pauline.mills@crustatia.net','phone':'+1 (984) 582-3899','address':'899 Revere Place, Welch, Iowa, 216','about':'Tempor eu exercitation ut id. Deserunt ex reprehenderit veniam nisi. Aute laborum veniam velit dolore ut deserunt Lorem sit esse quis dolor ex do nisi. In dolor tempor officia id. Velit nisi culpa nostrud laborum officia incididunt laborum velit non quis id exercitation exercitation. Anim elit ullamco in enim Lorem culpa aliqua Lorem.','registered':'Monday, June 2, 2014 2:03 PM','latitude':'56.427576','longitude':'172.183669','tags':['pariatur','pariatur','pariatur','fugiat','Lorem'],'greeting':'Hello, Pauline! You have 8 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed3e332ad9e8a178d8','index':65,'guid':'5ad7292b-feef-4a7e-b485-142cadfbe8ea','isActive':false,'balance':'$3,916.54','picture':'http://placehold.it/32x32','age':22,'eyeColor':'brown','name':{'first':'Garrett','last':'Richmond'},'company':'XYQAG','email':'garrett.richmond@xyqag.name','phone':'+1 (952) 584-3794','address':'233 Grove Street, Summerfield, Virginia, 4735','about':'Nostrud quis pariatur occaecat laborum laboris aliqua ut fugiat dolor. Commodo tempor excepteur enim nostrud Lorem. Aute elit nulla labore ad pariatur cupidatat Lorem qui cupidatat velit deserunt excepteur esse. Excepteur nulla et nostrud quis labore est veniam enim nisi laboris ut enim. Ea esse nulla anim excepteur reprehenderit deserunt voluptate minim qui labore adipisicing amet eu enim.','registered':'Wednesday, March 5, 2014 4:35 PM','latitude':'68.665041','longitude':'148.799524','tags':['irure','reprehenderit','minim','ea','do'],'greeting':'Hello, Garrett! You have 7 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed541aa2ec47466ace','index':66,'guid':'9cda6f3c-c9ab-451c-bb19-2e4c8463d011','isActive':true,'balance':'$3,352.52','picture':'http://placehold.it/32x32','age':30,'eyeColor':'brown','name':{'first':'Cobb','last':'Whitley'},'company':'UNIA','email':'cobb.whitley@unia.us','phone':'+1 (888) 490-3342','address':'864 Belmont Avenue, Needmore, Massachusetts, 8286','about':'Nisi aliquip fugiat ipsum nisi ullamco minim pariatur labore. Sint labore anim do ad ad esse eu nostrud nulla commodo anim. Cillum anim enim duis cillum non do nisi aliquip veniam voluptate commodo aliqua laborum. Exercitation in do eu qui sint aliquip. Esse adipisicing deserunt deserunt qui anim aliqua occaecat et nostrud elit ea in anim cillum. Tempor mollit proident tempor sunt est sint laborum ullamco incididunt non. Velit aliqua sunt excepteur nisi qui eiusmod ipsum dolore aliquip velit ullamco ullamco.','registered':'Friday, May 23, 2014 7:11 PM','latitude':'-32.950581','longitude':'147.772494','tags':['mollit','adipisicing','irure','ad','minim'],'greeting':'Hello, Cobb! You have 6 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed8186c3d6f34c2be3','index':67,'guid':'fee98f6d-d68a-4189-8180-b6cb337e537e','isActive':false,'balance':'$1,698.42','picture':'http://placehold.it/32x32','age':20,'eyeColor':'blue','name':{'first':'Brennan','last':'Tyler'},'company':'PODUNK','email':'brennan.tyler@podunk.biz','phone':'+1 (867) 498-2727','address':'599 Harkness Avenue, Gorst, American Samoa, 322','about':'Reprehenderit id sit qui id qui aute ea sit magna in qui proident. Excepteur ad nostrud do nostrud in incididunt voluptate adipisicing sint anim. Ullamco consequat minim nulla irure ex est irure reprehenderit deserunt voluptate dolore anim sunt. Occaecat dolore voluptate voluptate elit commodo nulla laborum ad do irure.','registered':'Friday, February 9, 2018 5:40 PM','latitude':'11.150893','longitude':'-85.298004','tags':['quis','minim','deserunt','cillum','laboris'],'greeting':'Hello, Brennan! You have 10 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed075c9c4f7439818d','index':68,'guid':'1ef76b18-6b8d-4c3c-aca3-9fa2b43f0242','isActive':false,'balance':'$2,091.17','picture':'http://placehold.it/32x32','age':26,'eyeColor':'brown','name':{'first':'Neal','last':'Stephenson'},'company':'OTHERSIDE','email':'neal.stephenson@otherside.ca','phone':'+1 (820) 496-3344','address':'867 Wilson Street, Kidder, Colorado, 4599','about':'Do laboris enim proident in qui velit adipisicing magna anim. Amet proident non exercitation ipsum aliqua excepteur nostrud. Enim esse non sit in nostrud deserunt id laborum cillum deserunt consequat. Anim velit exercitation qui sit voluptate. Irure duis non veniam velit mollit exercitation id exercitation.','registered':'Thursday, November 13, 2014 11:00 PM','latitude':'54.809693','longitude':'1.877241','tags':['anim','duis','in','officia','sint'],'greeting':'Hello, Neal! You have 7 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0eda0a2dc24db64b638','index':69,'guid':'194744fd-089b-40b6-a290-98a6ec30a415','isActive':false,'balance':'$3,191.67','picture':'http://placehold.it/32x32','age':24,'eyeColor':'brown','name':{'first':'Shields','last':'Hubbard'},'company':'MIRACULA','email':'shields.hubbard@miracula.info','phone':'+1 (885) 582-2001','address':'529 Eagle Street, Guilford, Nevada, 1460','about':'Eiusmod exercitation ut incididunt veniam commodo culpa ullamco mollit id adipisicing exercitation ad sint. Nostrud excepteur amet aliqua mollit incididunt laborum voluptate id anim. Nulla sint laboris dolor esse cupidatat laborum ex sint. Ex non sunt sit nulla.','registered':'Monday, February 13, 2017 6:22 AM','latitude':'-69.145209','longitude':'-40.69755','tags':['tempor','enim','qui','velit','elit'],'greeting':'Hello, Shields! You have 10 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edf939c130177e074d','index':70,'guid':'303b176c-7803-4ed2-a35f-3e3c831793ef','isActive':false,'balance':'$2,359.09','picture':'http://placehold.it/32x32','age':31,'eyeColor':'blue','name':{'first':'Coleen','last':'Knight'},'company':'BLEEKO','email':'coleen.knight@bleeko.tv','phone':'+1 (867) 423-3146','address':'527 Broadway , Bonanza, Marshall Islands, 4988','about':'Laboris nulla pariatur laborum ad aute excepteur sunt pariatur exercitation. Do nostrud qui ipsum ullamco et sint do Lorem cillum ullamco do. Exercitation labore excepteur commodo incididunt eiusmod proident consectetur adipisicing nostrud aute voluptate laboris. Commodo anim proident eiusmod pariatur est ea laborum incididunt qui tempor reprehenderit ullamco id. Eiusmod commodo nisi consectetur ut qui quis aliqua sit minim nostrud sunt laborum eiusmod adipisicing.','registered':'Sunday, May 6, 2018 8:03 AM','latitude':'70.729041','longitude':'113.052761','tags':['Lorem','ullamco','nulla','ullamco','commodo'],'greeting':'Hello, Coleen! You have 7 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edae8b1ce688b61223','index':71,'guid':'7d6f3b1a-c367-4068-9e8e-1717d513ece3','isActive':false,'balance':'$2,911.07','picture':'http://placehold.it/32x32','age':21,'eyeColor':'brown','name':{'first':'Clark','last':'Ryan'},'company':'ECLIPSENT','email':'clark.ryan@eclipsent.co.uk','phone':'+1 (938) 562-2740','address':'500 Lewis Avenue, Rockbridge, North Dakota, 5133','about':'Adipisicing exercitation officia sit excepteur excepteur sunt sint amet. Aliqua ipsum sint laboris eiusmod esse culpa elit sunt. Dolore est consectetur est quis quis magna. Aliquip nostrud dolore ex pariatur. Anim nostrud duis exercitation ut magna magna culpa. Nisi irure id mollit labore non sit mollit occaecat Lorem est ipsum. Nulla est fugiat cillum nisi aliqua consectetur amet nulla nostrud esse.','registered':'Friday, July 24, 2015 9:28 AM','latitude':'-68.055815','longitude':'-50.926966','tags':['deserunt','ad','ad','ut','id'],'greeting':'Hello, Clark! You have 7 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed5d1e8df45d8ab4db','index':72,'guid':'ce85db37-7d04-4f4c-a4b0-78003533e5c6','isActive':false,'balance':'$1,127.43','picture':'http://placehold.it/32x32','age':21,'eyeColor':'green','name':{'first':'Dillon','last':'Hooper'},'company':'MEDESIGN','email':'dillon.hooper@medesign.io','phone':'+1 (929) 600-3797','address':'652 Mill Avenue, Elliston, Mississippi, 2958','about':'Dolore culpa qui exercitation nostrud do. Irure duis in ad ipsum aliqua aliquip nulla sit veniam officia quis occaecat est. Magna qui eiusmod pariatur aliquip minim commodo. Qui ex dolor excepteur consequat eiusmod occaecat. In officia ipsum do Lorem excepteur proident pariatur labore.','registered':'Monday, May 26, 2014 2:38 AM','latitude':'-36.032189','longitude':'86.865529','tags':['non','ut','ex','Lorem','quis'],'greeting':'Hello, Dillon! You have 10 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edb84814579c3121b3','index':73,'guid':'d7303901-5186-4595-a759-22306f67d0a3','isActive':true,'balance':'$2,326.59','picture':'http://placehold.it/32x32','age':33,'eyeColor':'green','name':{'first':'Moreno','last':'Hull'},'company':'ZEAM','email':'moreno.hull@zeam.me','phone':'+1 (984) 586-3738','address':'265 Pine Street, Talpa, North Carolina, 6041','about':'Fugiat exercitation est ullamco anim. Exercitation proident id sunt culpa Lorem amet. Consectetur anim consectetur pariatur consequat consectetur amet excepteur voluptate ea velit duis eiusmod proident. In sint laborum cupidatat ea amet ex. Reprehenderit amet sunt dolor ullamco est ex deserunt.','registered':'Wednesday, January 24, 2018 8:52 PM','latitude':'84.956857','longitude':'113.210051','tags':['est','excepteur','anim','Lorem','dolor'],'greeting':'Hello, Moreno! You have 6 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0eda4eb9dcb92c82d06','index':74,'guid':'8ee28651-802e-4523-b676-c713f6e874b8','isActive':true,'balance':'$3,783.97','picture':'http://placehold.it/32x32','age':38,'eyeColor':'blue','name':{'first':'Tracie','last':'Price'},'company':'ICOLOGY','email':'tracie.price@icology.com','phone':'+1 (897) 403-3768','address':'487 Sheffield Avenue, Vallonia, Wyoming, 276','about':'Voluptate laboris laborum aute ex sint voluptate officia proident. Sit esse nostrud cupidatat in veniam sit duis est. Do mollit elit exercitation aliqua id irure ex. Lorem reprehenderit do ullamco sint ea ad nisi ad ut.','registered':'Saturday, December 10, 2016 9:44 AM','latitude':'77.770464','longitude':'151.392903','tags':['incididunt','labore','aliquip','anim','minim'],'greeting':'Hello, Tracie! You have 6 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed68ab1a55d1c35e6c','index':75,'guid':'deedd26a-8928-4064-9666-5c59ea8144b4','isActive':true,'balance':'$2,848.08','picture':'http://placehold.it/32x32','age':32,'eyeColor':'brown','name':{'first':'Montgomery','last':'Bruce'},'company':'CYTREK','email':'montgomery.bruce@cytrek.org','phone':'+1 (824) 414-2731','address':'397 Beach Place, Ellerslie, South Carolina, 967','about':'Mollit minim excepteur magna velit cillum excepteur exercitation anim id labore deserunt do. Fugiat ex et id ad. Duis excepteur laboris est nulla do id irure quis eiusmod do esse ut culpa in.','registered':'Tuesday, August 25, 2015 6:42 AM','latitude':'79.722631','longitude':'-7.516885','tags':['Lorem','sint','voluptate','proident','incididunt'],'greeting':'Hello, Montgomery! You have 6 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edd90e0abb1cc2b0aa','index':76,'guid':'a072159d-12db-4747-9c2a-e2486a53d043','isActive':false,'balance':'$2,723.54','picture':'http://placehold.it/32x32','age':40,'eyeColor':'green','name':{'first':'Zelma','last':'Salinas'},'company':'IMAGEFLOW','email':'zelma.salinas@imageflow.net','phone':'+1 (964) 555-3856','address':'584 Reeve Place, Nord, Georgia, 7473','about':'Aliqua proident excepteur duis cupidatat cillum amet esse esse consectetur ea. Officia sunt consequat nostrud minim enim dolore dolor duis cillum. Esse labore veniam sint laborum excepteur sint tempor do ad cupidatat aliquip laboris elit id. Velit reprehenderit ullamco velit ullamco adipisicing velit esse irure velit et.','registered':'Thursday, February 25, 2016 8:18 PM','latitude':'-32.880524','longitude':'115.180489','tags':['id','nulla','reprehenderit','consequat','reprehenderit'],'greeting':'Hello, Zelma! You have 10 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed98d836c8da283bb2','index':77,'guid':'838bebad-cc20-44e9-9eb7-902a8ca25efb','isActive':false,'balance':'$3,488.91','picture':'http://placehold.it/32x32','age':20,'eyeColor':'green','name':{'first':'Shaw','last':'Parsons'},'company':'PEARLESEX','email':'shaw.parsons@pearlesex.name','phone':'+1 (912) 567-3580','address':'606 Ocean Avenue, Tyro, Northern Mariana Islands, 3367','about':'Laborum labore occaecat culpa pariatur nisi non adipisicing esse consectetur officia officia. Deserunt velit eu enim consectetur ut cillum aliqua occaecat dolor qui esse. Incididunt ad est ex eu culpa anim aliquip laborum. Aliqua consectetur velit exercitation magna minim nulla do ut excepteur enim aliquip et. Nostrud enim sunt amet amet proident aliqua velit dolore. Consectetur ipsum fugiat proident id est reprehenderit tempor irure commodo. Sit excepteur fugiat occaecat nulla Lorem et cillum.','registered':'Thursday, April 19, 2018 1:41 AM','latitude':'69.715573','longitude':'-118.481237','tags':['laboris','adipisicing','magna','voluptate','id'],'greeting':'Hello, Shaw! You have 9 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed1101734633c6ebba','index':78,'guid':'8fd0c52a-9d74-4984-a608-d612ecd8ddf0','isActive':true,'balance':'$3,820.02','picture':'http://placehold.it/32x32','age':39,'eyeColor':'brown','name':{'first':'Jaime','last':'Beard'},'company':'IZZBY','email':'jaime.beard@izzby.us','phone':'+1 (820) 412-3806','address':'362 Hudson Avenue, Delco, New Jersey, 5684','about':'Ut cupidatat veniam nulla magna commodo sit duis veniam consectetur cupidatat elit quis tempor. Duis officia ullamco proident sunt non mollit excepteur. Nisi ex amet laboris proident duis reprehenderit et est aliqua mollit amet ad. Enim eu elit excepteur eu exercitation duis consequat culpa. Adipisicing reprehenderit duis Lorem reprehenderit dolor aliqua incididunt eiusmod consequat ad occaecat fugiat do laborum. Qui ad aliquip ex do sunt. Fugiat non ut fugiat eu.','registered':'Sunday, March 9, 2014 3:41 PM','latitude':'17.926318','longitude':'108.985996','tags':['ut','voluptate','veniam','non','commodo'],'greeting':'Hello, Jaime! You have 7 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edcd125a89dcf18e0d','index':79,'guid':'eccaa4ca-0fa7-4b00-a1e3-fe7953403894','isActive':true,'balance':'$1,521.33','picture':'http://placehold.it/32x32','age':30,'eyeColor':'green','name':{'first':'Terra','last':'Sullivan'},'company':'ZANITY','email':'terra.sullivan@zanity.biz','phone':'+1 (995) 498-2714','address':'346 Congress Street, Tuttle, Maryland, 3152','about':'Incididunt enim veniam ut veniam quis dolore pariatur culpa ex. Cillum laboris dolor exercitation officia. Officia irure magna aliqua veniam officia ullamco culpa. Cillum enim velit ea sint sint officia labore ea adipisicing culpa laboris. Anim aute sint commodo culpa ex quis minim ut laborum.','registered':'Sunday, June 1, 2014 5:38 AM','latitude':'-4.655435','longitude':'5.851803','tags':['anim','non','anim','laborum','pariatur'],'greeting':'Hello, Terra! You have 5 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed9b9fc3041a674c87','index':80,'guid':'9f95fa36-4e45-4c3f-9362-3d4d809bf57f','isActive':true,'balance':'$3,403.16','picture':'http://placehold.it/32x32','age':39,'eyeColor':'brown','name':{'first':'Sharpe','last':'Berger'},'company':'ZILLAN','email':'sharpe.berger@zillan.ca','phone':'+1 (913) 498-3005','address':'277 Bragg Street, Faywood, Texas, 6487','about':'Dolor duis id aute ea veniam amet ullamco id. Culpa deserunt irure mollit tempor dolore veniam culpa officia culpa laborum eiusmod. Ullamco tempor qui aliqua cupidatat veniam cillum eu ut ex minim eu in. Quis exercitation anim eiusmod tempor esse mollit exercitation cillum ipsum reprehenderit. Sint voluptate ipsum officia sint magna nulla tempor eiusmod eiusmod veniam. Consectetur non ad veniam exercitation voluptate non nostrud.','registered':'Tuesday, June 27, 2017 12:58 AM','latitude':'-0.54085','longitude':'106.258693','tags':['proident','eiusmod','commodo','excepteur','pariatur'],'greeting':'Hello, Sharpe! You have 5 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed1a1866757bf675e0','index':81,'guid':'1b944a01-01d3-4846-94e3-630f4d0e51a3','isActive':true,'balance':'$2,038.61','picture':'http://placehold.it/32x32','age':28,'eyeColor':'brown','name':{'first':'Blanchard','last':'Ewing'},'company':'CONJURICA','email':'blanchard.ewing@conjurica.info','phone':'+1 (859) 593-3212','address':'252 Beaver Street, Kiskimere, Utah, 3255','about':'Labore magna aute adipisicing ut dolor sit ea. Officia culpa aute occaecat sit ex ullamco aliquip ad sit culpa. Ex in enim dolore ex est sit. Do irure nulla magna sint aliquip in duis aute. Magna ullamco sit labore ea tempor voluptate.','registered':'Monday, May 4, 2015 10:50 AM','latitude':'76.207595','longitude':'0.672563','tags':['proident','pariatur','officia','in','culpa'],'greeting':'Hello, Blanchard! You have 9 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed987d82f4e22d939c','index':82,'guid':'97a90aee-3cee-4678-819e-24fb94279dc1','isActive':false,'balance':'$1,201.55','picture':'http://placehold.it/32x32','age':28,'eyeColor':'blue','name':{'first':'Wells','last':'Solomon'},'company':'CORPULSE','email':'wells.solomon@corpulse.tv','phone':'+1 (840) 539-3349','address':'159 Radde Place, Linganore, Idaho, 230','about':'Consequat dolore mollit sit irure cupidatat commodo. Incididunt cillum reprehenderit ullamco sit proident cupidatat occaecat reprehenderit officia. Ad anim Lorem elit in officia minim proident nisi commodo eiusmod ea Lorem dolore voluptate. Dolor aliquip est commodo Lorem dolor ut aliquip ut. Sit anim officia dolore excepteur aute enim cillum.','registered':'Friday, January 6, 2017 1:59 PM','latitude':'70.020883','longitude':'14.503588','tags':['mollit','aute','officia','nostrud','laboris'],'greeting':'Hello, Wells! You have 7 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0eddf7a904ea0d0bc2a','index':83,'guid':'fe639a0c-7517-43e6-b0da-cd9ca5b9e267','isActive':false,'balance':'$3,664.47','picture':'http://placehold.it/32x32','age':33,'eyeColor':'blue','name':{'first':'Natalia','last':'Brown'},'company':'SYNTAC','email':'natalia.brown@syntac.co.uk','phone':'+1 (952) 595-3513','address':'332 Lenox Road, Springville, Alabama, 8406','about':'Nulla consequat officia commodo ea sunt irure anim velit aliquip aliquip. Labore ullamco occaecat proident voluptate cillum labore minim nostrud excepteur. Qui fugiat nostrud cillum fugiat ullamco id commodo aliqua voluptate mollit id id laboris. Cillum qui duis duis sit adipisicing elit ut aliqua eu. Anim nisi aliqua sit mollit.','registered':'Sunday, July 30, 2017 1:02 PM','latitude':'31.937613','longitude':'-9.957927','tags':['magna','adipisicing','exercitation','tempor','consectetur'],'greeting':'Hello, Natalia! You have 9 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed8823fa385cad4aa3','index':84,'guid':'5cf280da-f5f0-4cc6-9063-e9d5863c8c89','isActive':false,'balance':'$1,624.17','picture':'http://placehold.it/32x32','age':25,'eyeColor':'blue','name':{'first':'Greene','last':'Waller'},'company':'ISOTRACK','email':'greene.waller@isotrack.io','phone':'+1 (838) 406-3608','address':'362 Albemarle Road, Gardiner, Michigan, 2764','about':'Ut nisi sit sint nulla dolor magna. Culpa occaecat adipisicing veniam proident excepteur tempor quis ex. Fugiat tempor laborum dolor adipisicing irure anim cupidatat ut exercitation ex sit. Cupidatat exercitation commodo sunt ex irure fugiat eu esse do ullamco mollit dolore cupidatat. Cupidatat magna incididunt officia dolore esse voluptate deserunt in laborum dolor. Sit fugiat Lorem eu ullamco. Laboris veniam quis cillum tempor ex fugiat cillum cupidatat.','registered':'Sunday, June 10, 2018 10:32 PM','latitude':'0.256921','longitude':'-96.141941','tags':['magna','dolore','deserunt','aliquip','cillum'],'greeting':'Hello, Greene! You have 6 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0eda7c905c2d24c7d31','index':85,'guid':'aa30a9fb-8a16-48eb-8bb7-1307d1e1f191','isActive':false,'balance':'$1,974.04','picture':'http://placehold.it/32x32','age':36,'eyeColor':'green','name':{'first':'Carlene','last':'Hanson'},'company':'DIGIRANG','email':'carlene.hanson@digirang.me','phone':'+1 (981) 417-3209','address':'435 Clark Street, Choctaw, Oregon, 9888','about':'Amet labore esse cillum irure laborum consectetur occaecat non aliquip aliquip proident. Nisi magna nulla officia duis labore aute nulla laborum duis tempor minim. Velit elit reprehenderit nisi exercitation officia incididunt amet cupidatat excepteur proident consectetur.','registered':'Thursday, April 20, 2017 6:13 AM','latitude':'68.529086','longitude':'68.802409','tags':['pariatur','nulla','qui','amet','labore'],'greeting':'Hello, Carlene! You have 10 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed6fbee12ce9e55dbf','index':86,'guid':'0fce89aa-3310-48df-862a-68bd3d776644','isActive':false,'balance':'$3,909.64','picture':'http://placehold.it/32x32','age':40,'eyeColor':'brown','name':{'first':'Doris','last':'Collins'},'company':'ZIORE','email':'doris.collins@ziore.com','phone':'+1 (914) 405-2360','address':'301 Lorraine Street, Stouchsburg, Minnesota, 7476','about':'Nisi deserunt aliquip et deserunt ipsum ad consectetur est non ullamco. Dolore do ut voluptate do eiusmod. Culpa ad in eiusmod nisi cillum do. Officia magna cillum sint aliqua reprehenderit amet est ipsum. Eiusmod deserunt commodo proident consequat. Amet minim dolor consequat aliquip aliquip culpa non exercitation non.','registered':'Wednesday, February 25, 2015 9:15 PM','latitude':'-57.364906','longitude':'130.766587','tags':['nulla','deserunt','cillum','eiusmod','adipisicing'],'greeting':'Hello, Doris! You have 10 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edede9402476c398c0','index':87,'guid':'60cf0aa6-bc6d-4305-8842-d27e6af1306f','isActive':false,'balance':'$2,817.53','picture':'http://placehold.it/32x32','age':28,'eyeColor':'green','name':{'first':'Cline','last':'Hayden'},'company':'ECRAZE','email':'cline.hayden@ecraze.org','phone':'+1 (965) 507-2138','address':'352 Rutland Road, Ebro, Connecticut, 1196','about':'Dolor eiusmod enim anim sit enim ea tempor. Tempor amet consectetur aliquip culpa do ex excepteur deserunt. Dolor commodo veniam culpa sint. Commodo consectetur pariatur irure nisi deserunt cillum est dolor ipsum ea.','registered':'Thursday, September 29, 2016 5:58 AM','latitude':'62.50713','longitude':'86.247286','tags':['enim','tempor','anim','veniam','proident'],'greeting':'Hello, Cline! You have 9 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0edeb72f151994a551b','index':88,'guid':'dbb49c62-86b1-409f-b8b8-f609c709d2a8','isActive':false,'balance':'$3,122.56','picture':'http://placehold.it/32x32','age':39,'eyeColor':'green','name':{'first':'Janelle','last':'Rutledge'},'company':'TERRAGEN','email':'janelle.rutledge@terragen.net','phone':'+1 (914) 581-3749','address':'170 Falmouth Street, Alderpoint, West Virginia, 642','about':'Laboris proident cillum sunt qui ea sunt. Officia adipisicing exercitation dolore magna reprehenderit amet anim id. Laboris commodo sit irure irure. Excepteur est mollit fugiat incididunt consectetur veniam irure ea mollit. Cillum enim consequat sunt sunt nisi incididunt tempor enim.','registered':'Monday, February 16, 2015 5:46 AM','latitude':'-46.392023','longitude':'32.054562','tags':['eu','eu','nisi','labore','deserunt'],'greeting':'Hello, Janelle! You have 9 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edc9c2604846ff9a0d','index':89,'guid':'c4d7a365-f1d3-4584-b78e-008394c219f7','isActive':true,'balance':'$1,807.19','picture':'http://placehold.it/32x32','age':24,'eyeColor':'green','name':{'first':'Abby','last':'Lopez'},'company':'GRAINSPOT','email':'abby.lopez@grainspot.name','phone':'+1 (917) 442-3955','address':'488 Kensington Walk, Winston, Hawaii, 9109','about':'Incididunt deserunt Lorem proident magna tempor enim quis duis eu ut adipisicing in. Ex mollit non irure aliqua officia. Fugiat id ipsum consequat irure id ullamco culpa quis nulla enim aliquip consequat et. Dolor ut anim velit irure consequat cillum eu. Aute occaecat laborum est aliqua.','registered':'Sunday, April 1, 2018 11:28 PM','latitude':'-10.177041','longitude':'-165.756718','tags':['est','laborum','culpa','non','quis'],'greeting':'Hello, Abby! You have 9 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed03237438b158af9e','index':90,'guid':'36c4a19f-2d00-4e40-bd49-155fd2ce0a6c','isActive':false,'balance':'$2,757.86','picture':'http://placehold.it/32x32','age':31,'eyeColor':'blue','name':{'first':'Whitney','last':'Sheppard'},'company':'ANACHO','email':'whitney.sheppard@anacho.us','phone':'+1 (922) 437-2383','address':'951 Beekman Place, Homeworth, New York, 6088','about':'Sint minim nisi minim non minim aliqua pariatur ullamco do sint qui labore. Aute elit reprehenderit ad do fugiat est amet. In incididunt tempor commodo cillum tempor est labore anim.','registered':'Tuesday, September 13, 2016 6:43 PM','latitude':'-49.732527','longitude':'-171.846715','tags':['exercitation','veniam','sunt','est','proident'],'greeting':'Hello, Whitney! You have 6 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0edb99dd3aa53d2cb7f','index':91,'guid':'17afd430-f37f-4d55-958c-72f35cdb5997','isActive':false,'balance':'$3,683.86','picture':'http://placehold.it/32x32','age':38,'eyeColor':'blue','name':{'first':'Ilene','last':'Blackwell'},'company':'ENQUILITY','email':'ilene.blackwell@enquility.biz','phone':'+1 (817) 555-2616','address':'950 Varanda Place, Belgreen, Virgin Islands, 1765','about':'Id eiusmod deserunt eiusmod adipisicing adipisicing est enim pariatur esse duis. Qui velit duis irure magna consectetur dolore reprehenderit. Cillum dolore minim consectetur irure non qui velit cillum veniam adipisicing incididunt. Deserunt veniam excepteur veniam velit aliquip labore quis exercitation magna do non dolor. Aliquip occaecat minim adipisicing deserunt fugiat nulla occaecat proident irure consectetur eiusmod irure. Enim Lorem deserunt amet Lorem commodo eiusmod reprehenderit occaecat adipisicing dolor voluptate cillum.','registered':'Thursday, February 1, 2018 8:39 AM','latitude':'57.393644','longitude':'-3.704258','tags':['adipisicing','dolor','commodo','Lorem','Lorem'],'greeting':'Hello, Ilene! You have 6 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed353f4deb62c3342a','index':92,'guid':'9953e285-2095-4f1c-978b-9ece2a867e9d','isActive':false,'balance':'$1,202.44','picture':'http://placehold.it/32x32','age':38,'eyeColor':'blue','name':{'first':'Dawson','last':'Herman'},'company':'BITENDREX','email':'dawson.herman@bitendrex.ca','phone':'+1 (843) 522-2655','address':'471 Channel Avenue, Denio, Alaska, 5040','about':'Nisi occaecat mollit reprehenderit nisi minim Lorem mollit. Ea proident irure cillum quis. Deserunt consectetur consectetur consequat quis enim minim ea ipsum proident nisi ad non aliquip. Veniam aute minim consequat irure voluptate aute amet excepteur exercitation cillum duis quis adipisicing nostrud.','registered':'Tuesday, December 8, 2015 5:40 PM','latitude':'-55.602721','longitude':'-26.683234','tags':['qui','dolor','deserunt','eiusmod','labore'],'greeting':'Hello, Dawson! You have 7 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edd5464bc50a5310ad','index':93,'guid':'724b2434-4dbd-417d-aa07-6065715f434f','isActive':false,'balance':'$1,595.98','picture':'http://placehold.it/32x32','age':25,'eyeColor':'brown','name':{'first':'Alice','last':'Christian'},'company':'ZENOLUX','email':'alice.christian@zenolux.info','phone':'+1 (954) 466-2650','address':'875 Gerritsen Avenue, Townsend, Kentucky, 6568','about':'Nulla labore occaecat ex culpa magna. Commodo occaecat et in consequat cillum laborum magna adipisicing excepteur. Do ut Lorem esse voluptate officia ea aliquip proident amet veniam minim nulla adipisicing. Enim consectetur incididunt laborum voluptate tempor deserunt non laboris. Aliquip deserunt aute irure dolore magna anim aliquip sint magna Lorem. Officia laboris nulla officia sint labore nisi. Do Lorem id in est esse adipisicing id fugiat enim esse laborum.','registered':'Wednesday, October 3, 2018 9:26 PM','latitude':'-88.790637','longitude':'138.817328','tags':['duis','ea','magna','ea','incididunt'],'greeting':'Hello, Alice! You have 8 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0eda01886247b6a4f3d','index':94,'guid':'17c9f4d3-7d72-44e3-8f7c-08d7de920f46','isActive':false,'balance':'$3,173.29','picture':'http://placehold.it/32x32','age':31,'eyeColor':'blue','name':{'first':'Schwartz','last':'Mccormick'},'company':'EVIDENDS','email':'schwartz.mccormick@evidends.tv','phone':'+1 (924) 531-2802','address':'160 Midwood Street, Indio, Palau, 4241','about':'Anim reprehenderit et et adipisicing voluptate consequat elit. Sint Lorem laboris Lorem minim nostrud aute reprehenderit elit aute quis nulla. Officia aute eiusmod mollit cillum eu aliquip non enim ea occaecat quis fugiat occaecat officia. Eiusmod culpa exercitation dolor aliqua enim occaecat nisi cupidatat duis ex dolore id. Id consequat aliqua cupidatat ut. Sit nisi est sunt culpa ullamco excepteur sunt pariatur incididunt amet. Ut tempor duis velit eu ut id culpa aute anim occaecat labore.','registered':'Thursday, March 2, 2017 5:57 PM','latitude':'38.618587','longitude':'-165.142529','tags':['ad','reprehenderit','magna','elit','mollit'],'greeting':'Hello, Schwartz! You have 10 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed51be4df456ec2bc9','index':95,'guid':'44f68f65-959b-4ec2-bd2a-1f30035f76fc','isActive':false,'balance':'$3,242.24','picture':'http://placehold.it/32x32','age':39,'eyeColor':'blue','name':{'first':'Bonita','last':'Stevens'},'company':'SLOFAST','email':'bonita.stevens@slofast.co.uk','phone':'+1 (886) 473-2105','address':'459 Bushwick Court, Kilbourne, Rhode Island, 9450','about':'Consequat reprehenderit qui reprehenderit nisi sit est in qui aliquip amet. Ex deserunt cupidatat amet cillum eiusmod irure anim in amet proident voluptate. Ad officia culpa in non incididunt do.','registered':'Saturday, August 22, 2015 5:23 AM','latitude':'60.013542','longitude':'58.242132','tags':['aute','adipisicing','in','cillum','officia'],'greeting':'Hello, Bonita! You have 5 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed50a55e3587993f68','index':96,'guid':'652e434f-221e-4899-af12-38dca5c9621d','isActive':false,'balance':'$2,720.06','picture':'http://placehold.it/32x32','age':28,'eyeColor':'green','name':{'first':'Charmaine','last':'Jackson'},'company':'FLUM','email':'charmaine.jackson@flum.io','phone':'+1 (947) 573-2692','address':'788 Windsor Place, Highland, Arkansas, 8869','about':'Dolore reprehenderit irure excepteur eu reprehenderit sint Lorem ut amet in. Consequat anim elit sunt aliquip incididunt. Culpa consequat do exercitation dolor enim dolor sunt sit excepteur ad anim. Dolor aute elit velit mollit minim eu.','registered':'Wednesday, April 6, 2016 7:54 PM','latitude':'25.756553','longitude':'-5.482531','tags':['amet','sint','consequat','est','ex'],'greeting':'Hello, Charmaine! You have 10 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed213621949bbdd5d3','index':97,'guid':'7d7d93d8-3e37-4b4a-9fa2-591fb7d153ce','isActive':true,'balance':'$1,370.63','picture':'http://placehold.it/32x32','age':36,'eyeColor':'brown','name':{'first':'Petersen','last':'Cooley'},'company':'ROTODYNE','email':'petersen.cooley@rotodyne.me','phone':'+1 (929) 563-3339','address':'338 Pioneer Street, Carbonville, Missouri, 3030','about':'Cillum elit dolore labore aute. Cillum ea incididunt cupidatat consequat sint eu mollit. Excepteur commodo eiusmod ex Lorem enim velit minim.','registered':'Friday, December 8, 2017 5:53 AM','latitude':'-10.576254','longitude':'-111.176861','tags':['veniam','eu','eiusmod','dolore','voluptate'],'greeting':'Hello, Petersen! You have 9 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed3e938138d58ed453','index':98,'guid':'d6fea4a3-03f6-46ee-90b9-8ec51a585e29','isActive':true,'balance':'$1,216.54','picture':'http://placehold.it/32x32','age':39,'eyeColor':'blue','name':{'first':'Rosanne','last':'Terry'},'company':'EXTREMO','email':'rosanne.terry@extremo.com','phone':'+1 (812) 496-2691','address':'368 Rockaway Avenue, Gloucester, Illinois, 7913','about':'Duis et nostrud duis quis minim eiusmod culpa do ea ad pariatur tempor. Velit veniam aliqua aliquip est enim ex et culpa dolor ullamco culpa officia. Eu id occaecat aute cillum aute sit aute laboris ipsum voluptate ex. Amet tempor minim tempor Lorem quis dolore. Pariatur consequat dolore nulla veniam dolor exercitation consequat nulla laboris incididunt do. Dolore do tempor deserunt exercitation incididunt officia incididunt ut do reprehenderit do eiusmod nulla.','registered':'Sunday, August 6, 2017 12:46 PM','latitude':'-43.257964','longitude':'-45.147686','tags':['et','incididunt','esse','commodo','ipsum'],'greeting':'Hello, Rosanne! You have 6 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed632b1a1d65501d6b','index':99,'guid':'bf8c6ac1-ee18-48ee-ae94-ea515a53c951','isActive':true,'balance':'$2,905.58','picture':'http://placehold.it/32x32','age':21,'eyeColor':'blue','name':{'first':'Irene','last':'Castro'},'company':'POLARIA','email':'irene.castro@polaria.org','phone':'+1 (818) 417-3761','address':'901 Dupont Street, Sperryville, Oklahoma, 953','about':'Pariatur minim laboris aliqua dolor aliquip consequat ea do duis voluptate id Lorem. In reprehenderit et adipisicing anim elit incididunt velit in laborum laborum. Qui minim magna et amet sit do voluptate reprehenderit ea sit sint velit.','registered':'Tuesday, August 18, 2015 10:48 AM','latitude':'-7.004055','longitude':'116.052433','tags':['sit','proident','enim','ullamco','non'],'greeting':'Hello, Irene! You have 10 unread messages.','favoriteFruit':'apple'}]" + } +} diff --git a/pkg/backend/remote/testdata/plan-long-line/plan.log b/pkg/backend/remote/testdata/plan-long-line/plan.log new file mode 100644 index 00000000000..f34ed170c9a --- /dev/null +++ b/pkg/backend/remote/testdata/plan-long-line/plan.log @@ -0,0 +1,23 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + + null_resource.foo + id: + triggers.%: "1" + triggers.long_line: "[{'_id':'5c5ab0ed7de45e993ffb9eeb','index':0,'guid':'e734d772-6b5a-4cb0-805c-91cd5e560e20','isActive':false,'balance':'$1,472.03','picture':'http://placehold.it/32x32','age':30,'eyeColor':'blue','name':{'first':'Darlene','last':'Garza'},'company':'GEEKOSIS','email':'darlene.garza@geekosis.io','phone':'+1 (850) 506-3347','address':'165 Kiely Place, Como, New Mexico, 4335','about':'Officia ullamco et sunt magna voluptate culpa cupidatat ea tempor laboris cupidatat ea anim laboris. Minim enim quis enim esse laborum est veniam. Lorem excepteur elit Lorem cupidatat elit ea anim irure fugiat fugiat sunt mollit. Consectetur ad nulla dolor amet esse occaecat aliquip sit. Magna sit elit adipisicing ut reprehenderit anim exercitation sit quis ea pariatur Lorem magna dolore.','registered':'Wednesday, March 11, 2015 12:58 PM','latitude':'20.729127','longitude':'-127.343593','tags':['minim','in','deserunt','occaecat','fugiat'],'greeting':'Hello, Darlene! You have 8 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0eda9117d15f1c1f112','index':1,'guid':'f0d1eed2-c6a9-4535-8800-d4bd53fe7eee','isActive':true,'balance':'$2,901.90','picture':'http://placehold.it/32x32','age':28,'eyeColor':'brown','name':{'first':'Flora','last':'Short'},'company':'SIGNITY','email':'flora.short@signity.me','phone':'+1 (840) 520-2666','address':'636 Johnson Avenue, Gerber, Wisconsin, 9139','about':'Veniam dolore deserunt Lorem aliqua qui eiusmod. Amet tempor fugiat duis incididunt amet adipisicing. Id ea nisi veniam eiusmod.','registered':'Wednesday, May 2, 2018 5:59 AM','latitude':'-63.267612','longitude':'4.224102','tags':['veniam','incididunt','id','aliqua','reprehenderit'],'greeting':'Hello, Flora! You have 10 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed83fd574d8041fa16','index':2,'guid':'29499a07-414a-436f-ba62-6634ca16bdcc','isActive':true,'balance':'$2,781.28','picture':'http://placehold.it/32x32','age':22,'eyeColor':'green','name':{'first':'Trevino','last':'Marks'},'company':'KEGULAR','email':'trevino.marks@kegular.com','phone':'+1 (843) 571-2269','address':'200 Alabama Avenue, Grenelefe, Florida, 7963','about':'Occaecat nisi exercitation Lorem mollit laborum magna adipisicing culpa dolor proident dolore. Non consequat ea amet et id mollit incididunt minim anim amet nostrud labore tempor. Proident eu sint commodo nisi consequat voluptate do fugiat proident. Laboris eiusmod veniam non et elit nulla nisi labore incididunt Lorem consequat consectetur voluptate.','registered':'Saturday, January 25, 2014 5:56 AM','latitude':'65.044005','longitude':'-127.454864','tags':['anim','duis','velit','pariatur','enim'],'greeting':'Hello, Trevino! You have 10 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed784eb6e350ff0a07','index':3,'guid':'40ed47e2-1747-4665-ab59-cdb3630a7642','isActive':true,'balance':'$2,000.78','picture':'http://placehold.it/32x32','age':25,'eyeColor':'brown','name':{'first':'Solis','last':'Mckinney'},'company':'QABOOS','email':'solis.mckinney@qaboos.org','phone':'+1 (924) 405-2560','address':'712 Herkimer Court, Klondike, Ohio, 8133','about':'Minim ad anim minim tempor mollit magna tempor et non commodo amet. Nisi cupidatat labore culpa consectetur exercitation laborum adipisicing fugiat officia adipisicing consequat non. Qui voluptate tempor laboris exercitation qui non adipisicing occaecat voluptate sunt do nostrud velit. Consequat tempor officia laboris tempor irure cupidatat aliquip voluptate nostrud velit ex nulla tempor laboris. Qui pariatur pariatur enim aliquip velit. Officia mollit ullamco laboris velit velit eiusmod enim amet incididunt consectetur sunt.','registered':'Wednesday, April 12, 2017 6:59 AM','latitude':'-25.055596','longitude':'-140.126525','tags':['ipsum','adipisicing','amet','nulla','dolore'],'greeting':'Hello, Solis! You have 5 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed02ce1ea9a2155d51','index':4,'guid':'1b5fb7d3-3b9a-4382-81b5-9ab01a27e74b','isActive':true,'balance':'$1,373.67','picture':'http://placehold.it/32x32','age':28,'eyeColor':'green','name':{'first':'Janell','last':'Battle'},'company':'GEEKMOSIS','email':'janell.battle@geekmosis.net','phone':'+1 (810) 591-3014','address':'517 Onderdonk Avenue, Shrewsbury, District Of Columbia, 2335','about':'Reprehenderit ad proident do anim qui officia magna magna duis cillum esse minim est. Excepteur ipsum anim ad laboris. In occaecat dolore nulla ea Lorem tempor et culpa in sint. Officia eu eu incididunt sit amet. Culpa duis id reprehenderit ut anim sit sunt. Duis dolore proident velit incididunt adipisicing pariatur fugiat incididunt eiusmod eu veniam irure.','registered':'Thursday, February 8, 2018 1:44 AM','latitude':'-33.254864','longitude':'-154.145885','tags':['aute','deserunt','ipsum','eiusmod','laborum'],'greeting':'Hello, Janell! You have 5 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edab58604bd7d3dd1c','index':5,'guid':'6354c035-af22-44c9-8be9-b2ea9decc24d','isActive':true,'balance':'$3,535.68','picture':'http://placehold.it/32x32','age':30,'eyeColor':'green','name':{'first':'Combs','last':'Kirby'},'company':'LUXURIA','email':'combs.kirby@luxuria.name','phone':'+1 (900) 498-3266','address':'377 Kingsland Avenue, Ruckersville, Maine, 9916','about':'Lorem duis ipsum pariatur aliquip sunt. Commodo esse laborum incididunt mollit quis est laboris ea ea quis fugiat. Enim elit ullamco velit et fugiat veniam irure deserunt aliqua ad irure veniam.','registered':'Tuesday, February 21, 2017 4:04 PM','latitude':'-70.20591','longitude':'162.546871','tags':['reprehenderit','est','enim','aute','ad'],'greeting':'Hello, Combs! You have 10 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edf7fafeffc6357c51','index':6,'guid':'02523e0b-cc90-4309-b6b2-f493dc6076f6','isActive':false,'balance':'$3,754.30','picture':'http://placehold.it/32x32','age':29,'eyeColor':'green','name':{'first':'Macias','last':'Calderon'},'company':'AMTAP','email':'macias.calderon@amtap.us','phone':'+1 (996) 569-3667','address':'305 Royce Street, Glidden, Iowa, 9248','about':'Exercitation nulla deserunt pariatur adipisicing. In commodo deserunt incididunt ut velit minim qui ut quis. Labore elit ullamco eiusmod voluptate in eu do est fugiat aute mollit deserunt. Eu duis proident velit fugiat velit ut. Ut non esse amet laborum nisi tempor in nulla.','registered':'Thursday, October 23, 2014 10:28 PM','latitude':'32.371629','longitude':'60.155135','tags':['commodo','elit','velit','excepteur','aliqua'],'greeting':'Hello, Macias! You have 9 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed0e8a6109e7fabf17','index':7,'guid':'675ff6b6-197b-4154-9775-813d661df822','isActive':false,'balance':'$2,850.62','picture':'http://placehold.it/32x32','age':37,'eyeColor':'green','name':{'first':'Stefanie','last':'Rivers'},'company':'RECRITUBE','email':'stefanie.rivers@recritube.biz','phone':'+1 (994) 591-3551','address':'995 Campus Road, Abrams, Virginia, 3251','about':'Esse aute non laborum Lorem nulla irure. Veniam elit aute ut et dolor non deserunt laboris tempor. Ipsum quis cupidatat laborum laboris voluptate esse duis eiusmod excepteur consectetur commodo ullamco qui occaecat. Culpa velit cillum occaecat minim nisi.','registered':'Thursday, June 9, 2016 3:40 PM','latitude':'-18.526825','longitude':'149.670782','tags':['occaecat','sunt','reprehenderit','ipsum','magna'],'greeting':'Hello, Stefanie! You have 9 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edf7d9bc2db4e476e3','index':8,'guid':'adaefc55-f6ea-4bd1-a147-0e31c3ce7a21','isActive':true,'balance':'$2,555.13','picture':'http://placehold.it/32x32','age':20,'eyeColor':'blue','name':{'first':'Hillary','last':'Lancaster'},'company':'OLUCORE','email':'hillary.lancaster@olucore.ca','phone':'+1 (964) 474-3018','address':'232 Berriman Street, Kaka, Massachusetts, 6792','about':'Veniam ad laboris quis reprehenderit aliquip nisi sunt excepteur ea aute laborum excepteur incididunt. Nisi exercitation aliquip do culpa commodo ex officia ut enim mollit in deserunt in amet. Anim eu deserunt dolore non cupidatat ut enim incididunt aute dolore voluptate. Do cillum mollit laborum non incididunt occaecat aute voluptate nisi irure.','registered':'Thursday, June 4, 2015 9:45 PM','latitude':'88.075919','longitude':'-148.951368','tags':['reprehenderit','veniam','ad','aute','anim'],'greeting':'Hello, Hillary! You have 6 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed7b7192ad6a0f267c','index':9,'guid':'0ca9b8ea-f671-474e-be26-4a49cae4838a','isActive':true,'balance':'$3,684.51','picture':'http://placehold.it/32x32','age':40,'eyeColor':'brown','name':{'first':'Jill','last':'Conner'},'company':'EXOZENT','email':'jill.conner@exozent.info','phone':'+1 (887) 467-2168','address':'751 Thames Street, Juarez, American Samoa, 8386','about':'Enim voluptate et non est in magna laborum aliqua enim aliqua est non nostrud. Tempor est nulla ipsum consectetur esse nostrud est id. Consequat do voluptate cupidatat eu fugiat et fugiat velit id. Sint dolore ad qui tempor anim eu amet consectetur do elit aute adipisicing consequat ex.','registered':'Sunday, October 22, 2017 7:35 AM','latitude':'84.384911','longitude':'40.305648','tags':['tempor','sint','irure','et','ex'],'greeting':'Hello, Jill! You have 9 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed713fe676575aa72b','index':10,'guid':'c28023cf-cc57-4c2e-8d91-dfbe6bafadcd','isActive':false,'balance':'$2,792.45','picture':'http://placehold.it/32x32','age':25,'eyeColor':'brown','name':{'first':'Hurley','last':'George'},'company':'ZAJ','email':'hurley.george@zaj.tv','phone':'+1 (984) 547-3284','address':'727 Minna Street, Lacomb, Colorado, 2557','about':'Ex velit cupidatat veniam culpa. Eiusmod ut fugiat adipisicing incididunt consectetur exercitation Lorem exercitation ex. Incididunt anim aute incididunt fugiat cupidatat qui eu non reprehenderit. Eiusmod dolor nisi culpa excepteur ut velit minim dolor voluptate amet commodo culpa in.','registered':'Thursday, February 16, 2017 6:41 AM','latitude':'25.989949','longitude':'10.200053','tags':['minim','ut','sunt','consequat','ullamco'],'greeting':'Hello, Hurley! You have 8 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed1e56732746c70d8b','index':11,'guid':'e9766f13-766c-4450-b4d2-8b04580f60b7','isActive':true,'balance':'$3,874.26','picture':'http://placehold.it/32x32','age':35,'eyeColor':'green','name':{'first':'Leticia','last':'Pace'},'company':'HONOTRON','email':'leticia.pace@honotron.co.uk','phone':'+1 (974) 536-3322','address':'365 Goodwin Place, Savage, Nevada, 9191','about':'Nisi Lorem aliqua esse eiusmod magna. Ad minim incididunt proident ut Lorem cupidatat qui velit aliqua ullamco et ipsum in. Aliquip elit consectetur pariatur esse exercitation et officia quis. Occaecat tempor proident cillum anim ad commodo velit ut voluptate. Tempor et occaecat sit sint aliquip tempor nulla velit magna nisi proident exercitation Lorem id.','registered':'Saturday, August 4, 2018 5:05 AM','latitude':'70.620386','longitude':'-86.335813','tags':['occaecat','velit','labore','laboris','esse'],'greeting':'Hello, Leticia! You have 8 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed941337fe42f47426','index':12,'guid':'6d390762-17ea-4b58-9a36-b0c9a8748a42','isActive':true,'balance':'$1,049.61','picture':'http://placehold.it/32x32','age':38,'eyeColor':'green','name':{'first':'Rose','last':'Humphrey'},'company':'MYOPIUM','email':'rose.humphrey@myopium.io','phone':'+1 (828) 426-3086','address':'389 Sapphire Street, Saticoy, Marshall Islands, 1423','about':'Aliquip enim excepteur adipisicing ex. Consequat aliqua consequat nostrud do occaecat deserunt excepteur sit et ipsum sunt dolor eu. Dolore laborum commodo excepteur tempor ad adipisicing proident excepteur magna non Lorem proident consequat aute. Fugiat minim consequat occaecat voluptate esse velit officia laboris nostrud nisi ut voluptate.','registered':'Monday, April 16, 2018 12:38 PM','latitude':'-47.083742','longitude':'109.022423','tags':['aute','non','sit','adipisicing','mollit'],'greeting':'Hello, Rose! You have 9 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edd0c02fc3fdc01a40','index':13,'guid':'07755618-6fdf-4b33-af50-364c18909227','isActive':true,'balance':'$1,823.61','picture':'http://placehold.it/32x32','age':36,'eyeColor':'green','name':{'first':'Judith','last':'Hale'},'company':'COLLAIRE','email':'judith.hale@collaire.me','phone':'+1 (922) 508-2843','address':'193 Coffey Street, Castleton, North Dakota, 3638','about':'Minim non ullamco ad anim nostrud dolore nostrud veniam consequat id eiusmod veniam laboris. Lorem irure esse mollit non velit aute id cupidatat est mollit occaecat magna excepteur. Adipisicing tempor nisi sit aliquip tempor pariatur tempor eu consectetur nulla amet nulla. Quis nisi nisi ea incididunt culpa et do. Esse officia eu pariatur velit sunt quis proident amet consectetur consequat. Nisi excepteur culpa nulla sit dolor deserunt excepteur dolor consequat elit cillum tempor Lorem.','registered':'Wednesday, August 24, 2016 12:29 AM','latitude':'-80.15514','longitude':'39.91007','tags':['consectetur','incididunt','aliquip','dolor','consequat'],'greeting':'Hello, Judith! You have 8 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edb3e1e29caa4f728b','index':14,'guid':'2c6617a2-e7a9-4ff7-a8b9-e99554fe70fe','isActive':true,'balance':'$1,971.00','picture':'http://placehold.it/32x32','age':39,'eyeColor':'blue','name':{'first':'Estes','last':'Sweet'},'company':'GEEKKO','email':'estes.sweet@geekko.com','phone':'+1 (866) 448-3032','address':'847 Cove Lane, Kula, Mississippi, 9178','about':'Veniam consectetur occaecat est excepteur consequat ipsum cillum sit consectetur. Ut cupidatat et reprehenderit dolore enim do cillum qui pariatur ad laborum incididunt esse. Fugiat sunt dolor veniam laboris ipsum deserunt proident reprehenderit laboris non nostrud. Magna excepteur sint magna laborum tempor sit exercitation ipsum labore est ullamco ullamco. Cillum voluptate cillum ea laborum Lorem. Excepteur sint ut nisi est esse non. Minim excepteur ullamco velit nisi ut in elit exercitation ut dolore.','registered':'Sunday, August 12, 2018 5:06 PM','latitude':'-9.57771','longitude':'-159.94577','tags':['culpa','dolor','velit','anim','pariatur'],'greeting':'Hello, Estes! You have 7 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0edbcf088c6fd593091','index':15,'guid':'2cc79958-1b40-4e2c-907a-433903fd3da9','isActive':false,'balance':'$3,751.53','picture':'http://placehold.it/32x32','age':34,'eyeColor':'brown','name':{'first':'Kemp','last':'Spence'},'company':'EXOBLUE','email':'kemp.spence@exoblue.org','phone':'+1 (864) 487-2992','address':'217 Clay Street, Monument, North Carolina, 1460','about':'Nostrud duis cillum sint non commodo dolor aute aliqua adipisicing ad nulla non excepteur proident. Fugiat labore elit tempor cillum veniam reprehenderit laboris consectetur dolore amet qui cupidatat. Amet aliqua elit anim et consequat commodo excepteur officia anim aliqua ea eu labore cillum. Et ex dolor duis dolore commodo veniam et nisi.','registered':'Monday, October 29, 2018 5:23 AM','latitude':'-70.304222','longitude':'83.582371','tags':['velit','duis','consequat','incididunt','duis'],'greeting':'Hello, Kemp! You have 7 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed6400479feb3de505','index':16,'guid':'91ccae6d-a3ea-43cf-bb00-3f2729256cc9','isActive':false,'balance':'$2,477.79','picture':'http://placehold.it/32x32','age':40,'eyeColor':'blue','name':{'first':'Ronda','last':'Burris'},'company':'EQUITOX','email':'ronda.burris@equitox.net','phone':'+1 (817) 553-3228','address':'708 Lawton Street, Deputy, Wyoming, 8598','about':'Excepteur voluptate aliquip consequat cillum est duis sit cillum eu eiusmod et laborum ullamco. Et minim reprehenderit aute voluptate amet ullamco. Amet sit enim ad irure deserunt nostrud anim veniam consequat dolor commodo. Consequat do occaecat do exercitation ullamco dolor ut. Id laboris consequat est dolor dolore tempor ullamco anim do ut nulla deserunt labore. Mollit ex Lorem ullamco mollit.','registered':'Monday, April 23, 2018 5:27 PM','latitude':'-31.227208','longitude':'0.63785','tags':['ipsum','magna','consectetur','sit','irure'],'greeting':'Hello, Ronda! You have 5 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0eddbeab2e53e04d563','index':17,'guid':'a86d4eb6-6bd8-48c2-a8fc-1c933c835852','isActive':false,'balance':'$3,709.03','picture':'http://placehold.it/32x32','age':37,'eyeColor':'blue','name':{'first':'Rosario','last':'Dillard'},'company':'BARKARAMA','email':'rosario.dillard@barkarama.name','phone':'+1 (933) 525-3898','address':'730 Chauncey Street, Forbestown, South Carolina, 6894','about':'Est eu fugiat aliquip ea ad qui ad mollit ad tempor voluptate et incididunt reprehenderit. Incididunt fugiat commodo minim adipisicing culpa consectetur duis eu ut commodo consequat voluptate labore. Nostrud irure labore adipisicing irure quis magna consequat dolor Lorem sint enim. Sint excepteur eu dolore elit ut do mollit sunt enim est. Labore id nostrud sint Lorem esse nostrud.','registered':'Friday, December 25, 2015 8:59 PM','latitude':'37.440827','longitude':'44.580474','tags':['Lorem','sit','ipsum','ea','ut'],'greeting':'Hello, Rosario! You have 5 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0eddf8e9b9c031d04e8','index':18,'guid':'a96f997c-daf8-40d4-92e1-be07e2cf0f60','isActive':false,'balance':'$1,878.37','picture':'http://placehold.it/32x32','age':37,'eyeColor':'brown','name':{'first':'Sondra','last':'Gonzales'},'company':'XUMONK','email':'sondra.gonzales@xumonk.us','phone':'+1 (838) 560-2255','address':'230 Cox Place, Geyserville, Georgia, 6805','about':'Laborum sunt voluptate ea laboris nostrud. Amet deserunt aliqua Lorem voluptate velit deserunt occaecat minim ullamco. Lorem occaecat sit labore adipisicing ad magna mollit labore ullamco proident. Ea velit do proident fugiat esse commodo ex nostrud eu mollit pariatur. Labore laborum qui voluptate quis proident reprehenderit tempor dolore duis deserunt esse aliqua aliquip. Non veniam enim pariatur cupidatat ipsum dolore est reprehenderit. Non exercitation adipisicing proident magna elit occaecat non magna.','registered':'Sunday, June 26, 2016 4:02 AM','latitude':'62.247742','longitude':'-44.90666','tags':['ea','aute','in','voluptate','magna'],'greeting':'Hello, Sondra! You have 6 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed2c1bcd06781f677e','index':19,'guid':'6ac47a16-eed4-4460-92ee-e0dd33c1fbb5','isActive':false,'balance':'$3,730.64','picture':'http://placehold.it/32x32','age':20,'eyeColor':'brown','name':{'first':'Anastasia','last':'Vega'},'company':'FIREWAX','email':'anastasia.vega@firewax.biz','phone':'+1 (867) 493-3698','address':'803 Arlington Avenue, Rosburg, Northern Mariana Islands, 8769','about':'Sint ex nisi tempor sunt voluptate non et eiusmod irure. Aute reprehenderit dolor mollit aliqua Lorem voluptate occaecat. Sint laboris deserunt Lorem incididunt nulla cupidatat do.','registered':'Friday, March 18, 2016 12:02 PM','latitude':'-32.010216','longitude':'-87.874753','tags':['aliquip','mollit','mollit','ad','laborum'],'greeting':'Hello, Anastasia! You have 7 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed727fd645854bbf43','index':20,'guid':'67bd8cdb-ce6b-455c-944c-a80e17c6fa75','isActive':true,'balance':'$2,868.06','picture':'http://placehold.it/32x32','age':29,'eyeColor':'green','name':{'first':'Lucinda','last':'Cox'},'company':'ENDIPINE','email':'lucinda.cox@endipine.ca','phone':'+1 (990) 428-3002','address':'412 Thatford Avenue, Lafferty, New Jersey, 5271','about':'Esse nulla sunt ut consequat aute mollit. Est occaecat sunt nisi irure id anim est commodo. Elit mollit amet dolore sunt adipisicing ea laborum quis ea reprehenderit non consequat dolore. Minim sunt occaecat quis aute commodo dolore quis commodo proident. Sunt sint duis ullamco sit ea esse Lorem. Consequat pariatur eiusmod laboris adipisicing labore in laboris adipisicing adipisicing consequat aute ea et.','registered':'Friday, May 1, 2015 10:16 PM','latitude':'-14.200957','longitude':'-82.211386','tags':['do','sit','qui','officia','aliquip'],'greeting':'Hello, Lucinda! You have 9 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed5a97284eb2cbd3a8','index':21,'guid':'f9fc999d-515c-4fc4-b339-76300e1b4bf2','isActive':true,'balance':'$1,172.57','picture':'http://placehold.it/32x32','age':35,'eyeColor':'brown','name':{'first':'Conrad','last':'Bradley'},'company':'FUELWORKS','email':'conrad.bradley@fuelworks.info','phone':'+1 (956) 561-3226','address':'685 Fenimore Street, Esmont, Maryland, 7523','about':'Labore reprehenderit anim nisi sunt do nisi in. Est anim cillum id minim exercitation ullamco voluptate ipsum eu. Elit culpa consequat reprehenderit laborum in eu. Laboris amet voluptate laboris qui voluptate duis minim reprehenderit. Commodo sunt irure dolore sunt occaecat velit nisi eu minim minim.','registered':'Wednesday, January 18, 2017 11:13 PM','latitude':'31.665993','longitude':'38.868968','tags':['excepteur','exercitation','est','nisi','mollit'],'greeting':'Hello, Conrad! You have 10 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edc4eaf6f760c38218','index':22,'guid':'8794ef5f-da2f-46f0-a755-c18a16409fd5','isActive':false,'balance':'$3,594.73','picture':'http://placehold.it/32x32','age':27,'eyeColor':'blue','name':{'first':'Marquez','last':'Vargas'},'company':'MALATHION','email':'marquez.vargas@malathion.tv','phone':'+1 (976) 438-3126','address':'296 Hall Street, National, Texas, 2067','about':'Proident cillum aute minim fugiat sunt aliqua non occaecat est duis id id tempor. Qui deserunt nisi amet pariatur proident eu laboris esse adipisicing magna. Anim anim mollit aute non magna nisi aute magna labore ullamco reprehenderit voluptate et ad. Proident adipisicing aute eiusmod nostrud nostrud deserunt culpa. Elit eu ullamco nisi aliqua dolor sint pariatur excepteur sit consectetur tempor. Consequat Lorem ullamco commodo veniam qui sint magna. Sit mollit ad aliquip est id eu officia id adipisicing duis ad.','registered':'Tuesday, November 17, 2015 6:16 PM','latitude':'-36.443667','longitude':'22.336776','tags':['aliquip','veniam','ipsum','Lorem','ex'],'greeting':'Hello, Marquez! You have 9 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0edd7c718518ee0466a','index':23,'guid':'ad8781a2-059e-4288-9879-309d53a99bf5','isActive':true,'balance':'$3,570.68','picture':'http://placehold.it/32x32','age':21,'eyeColor':'brown','name':{'first':'Snider','last':'Frost'},'company':'ZILODYNE','email':'snider.frost@zilodyne.co.uk','phone':'+1 (913) 485-3275','address':'721 Lincoln Road, Richmond, Utah, 672','about':'Minim enim Lorem esse incididunt do reprehenderit velit laborum ullamco. In aute eiusmod esse aliqua et labore tempor sunt ex mollit veniam tempor. Nulla elit cillum qui ullamco dolore amet deserunt magna amet laborum.','registered':'Saturday, August 23, 2014 12:58 AM','latitude':'-88.682554','longitude':'74.063179','tags':['nulla','ea','sint','aliquip','duis'],'greeting':'Hello, Snider! You have 6 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edf026fece8e2c0970','index':24,'guid':'1b7d81e1-1dba-4322-bb1a-eaa6a24cccea','isActive':false,'balance':'$2,037.91','picture':'http://placehold.it/32x32','age':28,'eyeColor':'green','name':{'first':'Snyder','last':'Fletcher'},'company':'COMTEST','email':'snyder.fletcher@comtest.io','phone':'+1 (830) 538-3860','address':'221 Lewis Place, Zortman, Idaho, 572','about':'Elit anim enim esse dolore exercitation. Laboris esse sint adipisicing fugiat sint do occaecat ut voluptate sint nulla. Ad sint ut reprehenderit nostrud irure id consectetur officia velit consequat.','registered':'Sunday, January 1, 2017 1:13 AM','latitude':'-54.742604','longitude':'69.534932','tags':['exercitation','commodo','in','id','aliqua'],'greeting':'Hello, Snyder! You have 10 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed4b9a7f83da6d2dfd','index':25,'guid':'0b2cc6b6-0044-4b1c-aa31-bd72963457a0','isActive':false,'balance':'$1,152.76','picture':'http://placehold.it/32x32','age':27,'eyeColor':'blue','name':{'first':'Regina','last':'James'},'company':'TELPOD','email':'regina.james@telpod.me','phone':'+1 (989) 455-3228','address':'688 Essex Street, Clayville, Alabama, 2772','about':'Eiusmod elit culpa reprehenderit ea veniam. Officia irure culpa duis aute ut. Irure duis cillum officia ea pariatur velit ut dolor incididunt reprehenderit ex elit laborum. Est pariatur veniam ad irure. Labore velit sunt esse laboris aliqua velit deserunt deserunt sit. Elit eiusmod ad laboris aliquip minim irure excepteur enim quis. Quis incididunt adipisicing ut magna cupidatat sit amet culpa.','registered':'Tuesday, April 25, 2017 10:16 PM','latitude':'-75.088027','longitude':'47.209828','tags':['elit','nisi','est','voluptate','proident'],'greeting':'Hello, Regina! You have 6 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed10884f32f779f2bf','index':26,'guid':'1f6fb522-0002-46ff-8dac-451247f28168','isActive':true,'balance':'$1,948.79','picture':'http://placehold.it/32x32','age':25,'eyeColor':'brown','name':{'first':'Collins','last':'Mcpherson'},'company':'DIGIGEN','email':'collins.mcpherson@digigen.com','phone':'+1 (991) 519-2334','address':'317 Merit Court, Sanford, Michigan, 6468','about':'Magna qui culpa dolor officia labore mollit ex excepteur duis eiusmod. Ea cupidatat ex ipsum mollit do minim duis. Nisi eiusmod minim tempor id esse commodo sunt sunt ullamco ut do laborum ullamco magna. Aliquip laborum dolor officia officia eu nostrud velit minim est anim. Ex elit laborum sunt magna exercitation nisi cillum sunt aute qui ea ullamco. Cupidatat ea sunt aute dolor duis nisi Lorem ullamco eiusmod. Sit ea velit ad veniam aliqua ad elit cupidatat ut magna in.','registered':'Friday, June 10, 2016 4:38 PM','latitude':'25.513996','longitude':'14.911124','tags':['exercitation','non','sit','velit','officia'],'greeting':'Hello, Collins! You have 5 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed8a575110efb15c6c','index':27,'guid':'2a904c82-068b-4ded-9ae6-cfeb6d7e62c9','isActive':true,'balance':'$3,427.91','picture':'http://placehold.it/32x32','age':24,'eyeColor':'green','name':{'first':'Mckay','last':'Barrera'},'company':'COMVEYER','email':'mckay.barrera@comveyer.org','phone':'+1 (853) 470-2560','address':'907 Glenwood Road, Churchill, Oregon, 8583','about':'In voluptate esse dolore enim sint quis dolor do exercitation sint et labore nisi. Eiusmod tempor exercitation dolore elit sit velit sint et. Sit magna adipisicing eiusmod do anim velit deserunt laboris ad ea pariatur. Irure nisi anim mollit elit commodo nulla. Aute eiusmod sit nulla eiusmod. Eiusmod est officia commodo mollit laboris do deserunt eu do nisi amet. Proident ad duis eiusmod laboris Lorem ut culpa pariatur Lorem reprehenderit minim aliquip irure sunt.','registered':'Saturday, December 19, 2015 2:49 PM','latitude':'-55.243287','longitude':'138.035406','tags':['non','quis','laboris','enim','nisi'],'greeting':'Hello, Mckay! You have 7 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edcd49ab6a73ff7f32','index':28,'guid':'5d3e0dae-3f58-437f-b12d-de24667a904d','isActive':true,'balance':'$3,270.52','picture':'http://placehold.it/32x32','age':35,'eyeColor':'blue','name':{'first':'Mabel','last':'Leonard'},'company':'QUADEEBO','email':'mabel.leonard@quadeebo.net','phone':'+1 (805) 432-2356','address':'965 Underhill Avenue, Falconaire, Minnesota, 4450','about':'Cupidatat amet sunt est ipsum occaecat sit fugiat excepteur Lorem Lorem ex ea ipsum. Ad incididunt est irure magna excepteur occaecat nostrud. Minim dolor id anim ipsum qui nostrud ullamco aute ex Lorem magna deserunt excepteur Lorem.','registered':'Saturday, March 28, 2015 5:55 AM','latitude':'27.388359','longitude':'156.408728','tags':['quis','velit','deserunt','dolore','sit'],'greeting':'Hello, Mabel! You have 7 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edde16ac2dc2fbb6c1','index':29,'guid':'d50c2233-70fc-4748-8ebf-02d45ac2a446','isActive':false,'balance':'$3,100.70','picture':'http://placehold.it/32x32','age':30,'eyeColor':'green','name':{'first':'Pace','last':'Duke'},'company':'SEQUITUR','email':'pace.duke@sequitur.name','phone':'+1 (983) 568-3119','address':'895 Melrose Street, Reno, Connecticut, 6259','about':'Ex veniam aliquip exercitation mollit elit est minim veniam aliqua labore deserunt. Dolor sunt sint cillum Lorem nisi ea irure cupidatat. Velit ut culpa cupidatat consequat cillum. Sint voluptate quis laboris qui incididunt do elit Lorem qui ullamco ut eu pariatur occaecat.','registered':'Saturday, August 18, 2018 2:18 PM','latitude':'31.930443','longitude':'-129.494784','tags':['culpa','est','nostrud','quis','aliquip'],'greeting':'Hello, Pace! You have 8 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edb908d85642ba77e8','index':30,'guid':'3edb6e42-367a-403d-a511-eb78bcc11f60','isActive':true,'balance':'$1,912.07','picture':'http://placehold.it/32x32','age':24,'eyeColor':'green','name':{'first':'Cohen','last':'Morrison'},'company':'POWERNET','email':'cohen.morrison@powernet.us','phone':'+1 (888) 597-2141','address':'565 Troutman Street, Idledale, West Virginia, 3196','about':'Ullamco voluptate duis commodo amet occaecat consequat et occaecat dolore nulla eu. Do aliqua sunt deserunt occaecat laboris labore voluptate cupidatat ullamco exercitation aliquip elit voluptate anim. Occaecat deserunt in labore cillum aute deserunt ea excepteur laboris sunt. Officia irure sint incididunt labore sint ipsum ullamco ea elit. Fugiat nostrud sunt ut officia mollit proident sunt dolor fugiat esse tempor do.','registered':'Friday, January 1, 2016 5:42 AM','latitude':'-20.01215','longitude':'26.361552','tags':['consectetur','sunt','nulla','reprehenderit','dolore'],'greeting':'Hello, Cohen! You have 10 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed91c77aa25a64a757','index':31,'guid':'8999a97b-0035-4f19-b555-91dd69aaa9b8','isActive':false,'balance':'$3,097.67','picture':'http://placehold.it/32x32','age':25,'eyeColor':'brown','name':{'first':'Stout','last':'Valdez'},'company':'UPLINX','email':'stout.valdez@uplinx.biz','phone':'+1 (854) 480-3633','address':'880 Chestnut Avenue, Lowgap, Hawaii, 1537','about':'Cupidatat enim dolore non voluptate. Aliqua ut non Lorem in exercitation reprehenderit voluptate. Excepteur deserunt tempor laboris quis.','registered':'Wednesday, March 16, 2016 6:53 AM','latitude':'50.328393','longitude':'-25.990308','tags':['ea','fugiat','duis','consectetur','enim'],'greeting':'Hello, Stout! You have 5 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed0f52176c8c3e1bed','index':32,'guid':'743abcbd-1fab-4aed-8cb7-3c935eb64c74','isActive':false,'balance':'$1,118.54','picture':'http://placehold.it/32x32','age':30,'eyeColor':'blue','name':{'first':'Ortega','last':'Joseph'},'company':'APEXIA','email':'ortega.joseph@apexia.ca','phone':'+1 (872) 596-3024','address':'304 Canda Avenue, Mulino, New York, 8721','about':'Ipsum elit id cupidatat minim nisi minim. Ea ex amet ea ipsum Lorem deserunt. Occaecat cupidatat magna cillum aliquip sint id quis amet nostrud officia enim laborum. Aliqua deserunt amet commodo laboris labore mollit est. Officia voluptate Lorem esse mollit aliquip laboris cupidatat minim et. Labore esse incididunt officia nostrud pariatur reprehenderit.','registered':'Tuesday, January 31, 2017 6:06 AM','latitude':'43.861714','longitude':'33.771783','tags':['ut','Lorem','esse','quis','fugiat'],'greeting':'Hello, Ortega! You have 6 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed2c00cdd101b6cd52','index':33,'guid':'4f6f99cf-f692-4d03-b23a-26f2b27273bd','isActive':true,'balance':'$1,682.91','picture':'http://placehold.it/32x32','age':20,'eyeColor':'blue','name':{'first':'Sampson','last':'Taylor'},'company':'GEOFORMA','email':'sampson.taylor@geoforma.info','phone':'+1 (911) 482-2993','address':'582 Kent Street, Umapine, Virgin Islands, 5300','about':'Voluptate laboris occaecat laboris tempor cillum quis cupidatat qui pariatur. Lorem minim commodo mollit adipisicing Lorem ut dolor consectetur ipsum. Sint sit voluptate labore aliqua ex labore velit. Ullamco tempor consectetur voluptate deserunt voluptate minim enim. Cillum commodo duis reprehenderit eu duis.','registered':'Thursday, November 9, 2017 11:24 PM','latitude':'24.949379','longitude':'155.034468','tags':['Lorem','cupidatat','elit','reprehenderit','commodo'],'greeting':'Hello, Sampson! You have 8 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed4b7210ba0bc0d508','index':34,'guid':'73fd415f-f8cf-43e0-a86c-e725d000abd4','isActive':false,'balance':'$1,289.37','picture':'http://placehold.it/32x32','age':30,'eyeColor':'green','name':{'first':'Shari','last':'Melendez'},'company':'DIGIPRINT','email':'shari.melendez@digiprint.tv','phone':'+1 (914) 475-3995','address':'950 Wolf Place, Enetai, Alaska, 693','about':'Dolor incididunt et est commodo aliquip labore ad ullamco. Velit ex cillum nulla elit ex esse. Consectetur mollit fugiat cillum proident elit sunt non officia cillum ex laboris sint eu. Esse nulla eu officia in Lorem sint minim esse velit. Est Lorem ipsum enim aute. Elit minim eiusmod officia reprehenderit officia ut irure Lorem.','registered':'Wednesday, August 23, 2017 11:12 PM','latitude':'-70.347863','longitude':'94.812072','tags':['ea','ex','fugiat','duis','eu'],'greeting':'Hello, Shari! You have 7 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed85ac364619d892ef','index':35,'guid':'c1905f34-14ff-4bd8-b683-02cac4d52623','isActive':false,'balance':'$2,538.50','picture':'http://placehold.it/32x32','age':30,'eyeColor':'green','name':{'first':'Santiago','last':'Joyner'},'company':'BRAINCLIP','email':'santiago.joyner@brainclip.co.uk','phone':'+1 (835) 405-2676','address':'554 Rose Street, Muir, Kentucky, 7752','about':'Quis culpa dolore fugiat magna culpa non deserunt consectetur elit. Id cupidatat occaecat duis irure ullamco elit in labore magna pariatur cillum est. Mollit dolore velit ipsum anim aliqua culpa sint. Occaecat aute anim ut sunt eu.','registered':'Thursday, January 18, 2018 4:49 PM','latitude':'57.057918','longitude':'-50.472596','tags':['ullamco','ullamco','sunt','voluptate','irure'],'greeting':'Hello, Santiago! You have 7 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed1763f56b1121fa88','index':36,'guid':'a7f50659-4ae3-4f3e-a9d8-087e05334b51','isActive':false,'balance':'$1,435.16','picture':'http://placehold.it/32x32','age':37,'eyeColor':'blue','name':{'first':'Adeline','last':'Hoffman'},'company':'BITREX','email':'adeline.hoffman@bitrex.io','phone':'+1 (823) 488-3201','address':'221 Corbin Place, Edmund, Palau, 193','about':'Magna ullamco consectetur velit adipisicing cillum ea. Est qui incididunt est ullamco ex aute exercitation irure. Cupidatat consectetur proident qui fugiat do. Labore magna aliqua consectetur fugiat. Excepteur deserunt sit qui dolor fugiat aute sunt anim ipsum magna ea commodo qui. Minim eu adipisicing ut irure excepteur eiusmod aliqua. Voluptate nisi ad consequat qui.','registered':'Tuesday, June 14, 2016 9:26 AM','latitude':'-53.123355','longitude':'88.180776','tags':['non','est','commodo','ut','aliquip'],'greeting':'Hello, Adeline! You have 9 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed945d079f63e3185e','index':37,'guid':'1f4619e0-9289-4bea-a9db-a75f4cba1138','isActive':true,'balance':'$2,019.54','picture':'http://placehold.it/32x32','age':36,'eyeColor':'blue','name':{'first':'Porter','last':'Morse'},'company':'COMVOY','email':'porter.morse@comvoy.me','phone':'+1 (933) 562-3220','address':'416 India Street, Bourg, Rhode Island, 2266','about':'Et sint anim et sunt. Non mollit sunt cillum veniam sunt sint amet non mollit. Fugiat ea ullamco pariatur deserunt ex do minim irure irure.','registered':'Saturday, July 16, 2016 10:03 PM','latitude':'-81.782545','longitude':'69.783509','tags':['irure','consequat','veniam','nulla','velit'],'greeting':'Hello, Porter! You have 10 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed411dd0f06c66bba6','index':38,'guid':'93c900f0-54c0-4c4c-b21d-d59d8d7c6177','isActive':true,'balance':'$3,764.84','picture':'http://placehold.it/32x32','age':26,'eyeColor':'green','name':{'first':'Fitzgerald','last':'Logan'},'company':'UTARIAN','email':'fitzgerald.logan@utarian.com','phone':'+1 (815) 461-2709','address':'498 Logan Street, Tonopah, Arkansas, 6652','about':'Quis Lorem sit est et dolor est esse in veniam. Mollit anim nostrud laboris consequat voluptate qui ad ipsum sint laborum exercitation quis ipsum. Incididunt cupidatat esse ea amet deserunt consequat eu proident duis adipisicing pariatur. Amet deserunt mollit aliquip mollit consequat sunt quis labore laboris quis. Magna cillum fugiat anim velit Lorem duis. Lorem duis amet veniam occaecat est excepteur ut ea velit esse non pariatur. Do veniam quis eu consequat ad duis incididunt minim dolore sit non minim adipisicing et.','registered':'Wednesday, August 9, 2017 9:20 PM','latitude':'24.480657','longitude':'-108.693421','tags':['dolore','ad','occaecat','quis','labore'],'greeting':'Hello, Fitzgerald! You have 5 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edbb6f14559d8a7b28','index':39,'guid':'9434f48b-70a0-4161-8d06-c53bf8b9df94','isActive':true,'balance':'$3,713.47','picture':'http://placehold.it/32x32','age':25,'eyeColor':'blue','name':{'first':'Mcconnell','last':'Nash'},'company':'TETAK','email':'mcconnell.nash@tetak.org','phone':'+1 (956) 477-3586','address':'853 Turnbull Avenue, Clarence, Missouri, 1599','about':'Culpa excepteur minim anim magna dolor dolore ad ex eu. In cupidatat cillum elit dolore in est minim dolore consectetur reprehenderit voluptate laborum. Deserunt id velit ad dolor mollit.','registered':'Saturday, November 10, 2018 9:27 AM','latitude':'1.691589','longitude':'143.704377','tags':['ut','deserunt','sit','cupidatat','ea'],'greeting':'Hello, Mcconnell! You have 10 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed1a87ea0390733ffa','index':40,'guid':'ec8a55f7-7114-4787-b1ff-4e631731bc2c','isActive':true,'balance':'$2,200.71','picture':'http://placehold.it/32x32','age':25,'eyeColor':'brown','name':{'first':'Kitty','last':'Meyers'},'company':'FIBEROX','email':'kitty.meyers@fiberox.net','phone':'+1 (864) 458-3826','address':'537 Georgia Avenue, Thermal, Illinois, 7930','about':'Non excepteur laboris Lorem magna adipisicing exercitation. Anim esse in pariatur minim ipsum qui voluptate irure. Pariatur Lorem pariatur esse commodo aute adipisicing anim commodo. Exercitation nostrud aliqua duis et amet amet tempor.','registered':'Tuesday, September 13, 2016 8:16 PM','latitude':'19.59506','longitude':'-57.814297','tags':['duis','ullamco','velit','sint','consequat'],'greeting':'Hello, Kitty! You have 9 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed4dc76717bf1217b3','index':41,'guid':'40521cde-f835-4620-902b-af7abf185d8d','isActive':false,'balance':'$2,907.02','picture':'http://placehold.it/32x32','age':26,'eyeColor':'green','name':{'first':'Klein','last':'Goodwin'},'company':'PLASTO','email':'klein.goodwin@plasto.name','phone':'+1 (950) 563-3104','address':'764 Devoe Street, Lindcove, Oklahoma, 458','about':'Amet aliqua magna ea veniam non aliquip irure esse id ipsum cillum sint tempor dolor. Ullamco deserunt fugiat amet pariatur culpa nostrud commodo commodo. Ad occaecat magna adipisicing voluptate. Minim ad adipisicing cupidatat elit nostrud eu irure. Cupidatat occaecat aute magna consectetur dolore anim et. Ex voluptate velit exercitation laborum ad ullamco ad. Aliquip nulla ipsum dolore cillum qui nostrud eu adipisicing amet tempor do.','registered':'Tuesday, February 13, 2018 3:56 PM','latitude':'-27.168725','longitude':'-29.499285','tags':['minim','labore','do','deserunt','dolor'],'greeting':'Hello, Klein! You have 6 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed1ac77396b29aee9e','index':42,'guid':'7cfc03e3-30e9-4ae1-a1f5-f6c3223ca770','isActive':true,'balance':'$2,986.47','picture':'http://placehold.it/32x32','age':22,'eyeColor':'brown','name':{'first':'Isabelle','last':'Bishop'},'company':'GEEKNET','email':'isabelle.bishop@geeknet.us','phone':'+1 (908) 418-2642','address':'729 Willmohr Street, Aguila, Montana, 7510','about':'In nulla commodo nostrud sint. Elit et occaecat et aliqua aliquip magna esse commodo duis Lorem dolor magna enim deserunt. Ipsum pariatur reprehenderit ipsum adipisicing mollit incididunt ut. Sunt in consequat ex ut minim non qui anim labore. Deserunt minim voluptate in nulla occaecat.','registered':'Monday, September 15, 2014 6:22 AM','latitude':'-81.686947','longitude':'38.409291','tags':['proident','est','aliqua','veniam','anim'],'greeting':'Hello, Isabelle! You have 7 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edb3a070c9469a4893','index':43,'guid':'3dec76b4-0b55-4765-a2fd-b8dbd9c82f8f','isActive':true,'balance':'$2,501.24','picture':'http://placehold.it/32x32','age':31,'eyeColor':'blue','name':{'first':'Josefina','last':'Turner'},'company':'COMSTAR','email':'josefina.turner@comstar.biz','phone':'+1 (908) 566-3029','address':'606 Schenck Place, Brutus, Vermont, 8681','about':'Enim consectetur pariatur sint dolor nostrud est deserunt nulla quis pariatur sit. Ad aute incididunt nisi excepteur duis est velit voluptate ullamco occaecat magna reprehenderit aliquip. Proident deserunt consectetur non et exercitation elit dolore enim aliqua incididunt anim amet. Ex esse sint commodo minim aliqua ut irure. Proident ex culpa voluptate fugiat nisi. Sint commodo laboris excepteur minim ipsum labore tempor quis magna.','registered':'Saturday, December 31, 2016 6:38 AM','latitude':'35.275088','longitude':'24.30485','tags':['minim','ut','irure','Lorem','veniam'],'greeting':'Hello, Josefina! You have 6 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed1aa7d74128ee3d0f','index':44,'guid':'10599279-c367-46c4-9f7a-744c2e4bf6c9','isActive':true,'balance':'$1,753.06','picture':'http://placehold.it/32x32','age':27,'eyeColor':'blue','name':{'first':'Lily','last':'Haynes'},'company':'KIOSK','email':'lily.haynes@kiosk.ca','phone':'+1 (872) 451-2301','address':'509 Balfour Place, Grazierville, New Hampshire, 2750','about':'Nisi aliquip occaecat nostrud do sint qui nisi officia Lorem. Ad et et laboris nisi dolore aliqua eu. Aliqua veniam quis eu pariatur incididunt mollit id deserunt officia eiusmod. Consequat adipisicing do nisi voluptate eiusmod minim pariatur minim nisi nostrud culpa cupidatat. Irure consectetur id consequat adipisicing ullamco occaecat do. Ex proident ea quis nulla incididunt sunt excepteur incididunt. Aliquip minim nostrud non anim Lorem.','registered':'Tuesday, November 20, 2018 9:28 AM','latitude':'-12.677798','longitude':'114.506787','tags':['culpa','amet','elit','officia','irure'],'greeting':'Hello, Lily! You have 8 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed74c76f2e84e201ce','index':45,'guid':'ec0a68d4-629e-46c9-9af7-f6ea867f02ba','isActive':true,'balance':'$1,477.93','picture':'http://placehold.it/32x32','age':23,'eyeColor':'green','name':{'first':'Shauna','last':'Pitts'},'company':'SPACEWAX','email':'shauna.pitts@spacewax.info','phone':'+1 (841) 406-2360','address':'348 Tabor Court, Westwood, Puerto Rico, 8297','about':'Aliquip irure officia magna ea magna mollit ea non amet deserunt. Veniam mollit labore culpa magna aliqua quis consequat est consectetur ea reprehenderit nostrud consequat aliqua. Mollit do ipsum mollit eiusmod.','registered':'Thursday, October 2, 2014 2:48 AM','latitude':'-55.17388','longitude':'-13.370494','tags':['anim','consectetur','cillum','veniam','duis'],'greeting':'Hello, Shauna! You have 7 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed419e718484b16722','index':46,'guid':'b2d6101d-5646-43f4-8207-284494e5a990','isActive':false,'balance':'$2,006.96','picture':'http://placehold.it/32x32','age':27,'eyeColor':'brown','name':{'first':'Lawrence','last':'Boyer'},'company':'SKYPLEX','email':'lawrence.boyer@skyplex.tv','phone':'+1 (953) 548-2618','address':'464 Pilling Street, Blandburg, Arizona, 5531','about':'Culpa sit minim pariatur mollit cupidatat sunt duis. Nisi ea proident veniam exercitation adipisicing Lorem aliquip amet dolor voluptate in nisi. Non commodo anim sunt est fugiat laborum nisi aliqua non Lorem exercitation dolor. Laboris dolore do minim ut eiusmod enim magna cillum laborum consectetur aliquip minim enim Lorem. Veniam ex veniam occaecat aliquip elit aliquip est eiusmod minim minim adipisicing.','registered':'Wednesday, July 30, 2014 2:17 AM','latitude':'-78.681255','longitude':'139.960626','tags':['consequat','Lorem','incididunt','dolor','esse'],'greeting':'Hello, Lawrence! You have 6 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed08a9024998292c70','index':47,'guid':'277de142-ebeb-4828-906a-7fd8bc0a738a','isActive':true,'balance':'$1,273.19','picture':'http://placehold.it/32x32','age':27,'eyeColor':'brown','name':{'first':'Sonya','last':'Stafford'},'company':'AQUACINE','email':'sonya.stafford@aquacine.co.uk','phone':'+1 (824) 581-3927','address':'641 Bowery Street, Hillsboro, Delaware, 7893','about':'Culpa labore ex reprehenderit mollit cupidatat dolore et ut quis in. Sint esse culpa enim culpa tempor exercitation veniam minim consectetur. Sunt est laboris minim quis incididunt exercitation laboris cupidatat fugiat ad. Deserunt ipsum do dolor cillum excepteur incididunt.','registered':'Thursday, March 26, 2015 1:10 PM','latitude':'-84.750592','longitude':'165.493533','tags':['minim','officia','dolore','ipsum','est'],'greeting':'Hello, Sonya! You have 8 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edd5037f2c79ecde68','index':48,'guid':'2dc6532f-9a26-49aa-b444-8923896db89c','isActive':false,'balance':'$3,168.93','picture':'http://placehold.it/32x32','age':36,'eyeColor':'brown','name':{'first':'Marguerite','last':'Stuart'},'company':'ACCUFARM','email':'marguerite.stuart@accufarm.io','phone':'+1 (848) 535-2253','address':'301 Menahan Street, Sunnyside, Nebraska, 4809','about':'Deserunt sint labore voluptate amet anim culpa nostrud adipisicing enim cupidatat ullamco exercitation fugiat est. Magna dolor aute incididunt ea ad adipisicing. Do cupidatat ut officia officia culpa sit do.','registered':'Thursday, May 8, 2014 1:25 PM','latitude':'21.82277','longitude':'-7.368347','tags':['labore','nulla','ullamco','irure','adipisicing'],'greeting':'Hello, Marguerite! You have 6 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edb26d315635818dae','index':49,'guid':'083a5eda-0a70-4f89-87f7-2cd386c0f22a','isActive':false,'balance':'$2,576.25','picture':'http://placehold.it/32x32','age':38,'eyeColor':'blue','name':{'first':'Louella','last':'Holloway'},'company':'BEDDER','email':'louella.holloway@bedder.me','phone':'+1 (801) 425-3761','address':'545 Lafayette Avenue, Caledonia, Louisiana, 2816','about':'Qui exercitation occaecat dolore mollit. Fugiat cupidatat proident culpa fugiat quis. In cupidatat commodo elit ea enim occaecat esse exercitation nostrud occaecat veniam laboris fugiat. Nisi sunt reprehenderit aliqua reprehenderit tempor id dolore ullamco pariatur reprehenderit et eu ex pariatur.','registered':'Wednesday, November 5, 2014 1:10 AM','latitude':'36.385637','longitude':'77.949423','tags':['eu','irure','velit','non','aliquip'],'greeting':'Hello, Louella! You have 7 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed77cd60a1abc1ecce','index':50,'guid':'2887c3c1-3eba-4237-a0db-1977eed94554','isActive':true,'balance':'$1,633.51','picture':'http://placehold.it/32x32','age':22,'eyeColor':'green','name':{'first':'Bates','last':'Carrillo'},'company':'ZOMBOID','email':'bates.carrillo@zomboid.com','phone':'+1 (934) 405-2006','address':'330 Howard Alley, Troy, Kansas, 4881','about':'Voluptate esse est ullamco anim tempor ea reprehenderit. Occaecat pariatur deserunt cillum laboris labore id exercitation esse ipsum ipsum ex aliquip. Sunt non elit est ea occaecat. Magna deserunt commodo aliqua ipsum est cillum dolor nisi. Ex duis est tempor tempor laboris do do quis id magna. Dolor do est elit eu laborum ullamco culpa consequat velit eiusmod tempor.','registered':'Saturday, May 28, 2016 3:56 AM','latitude':'83.310134','longitude':'-105.862836','tags':['est','commodo','ea','commodo','sunt'],'greeting':'Hello, Bates! You have 9 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed5ec0ec299b471fb5','index':51,'guid':'512b5e67-f785-492e-9d94-e43ef8b399b8','isActive':false,'balance':'$3,032.22','picture':'http://placehold.it/32x32','age':30,'eyeColor':'blue','name':{'first':'Floyd','last':'Yang'},'company':'FRENEX','email':'floyd.yang@frenex.org','phone':'+1 (924) 566-3304','address':'418 Quay Street, Chumuckla, Guam, 7743','about':'Irure sit velit exercitation dolore est nisi incididunt ut quis consectetur incididunt est dolor. Aute nisi enim esse aliquip enim culpa commodo consectetur. Duis laborum magna ad duis ipsum aliqua eiusmod cillum. Consectetur et duis eiusmod irure ad est nisi incididunt eiusmod labore. Pariatur proident in Lorem adipisicing mollit proident excepteur nulla do nostrud mollit eiusmod. Duis ad dolore irure fugiat anim laboris ipsum et sit duis ipsum voluptate. Lorem non aute exercitation qui ullamco officia minim sint pariatur ut dolor.','registered':'Wednesday, January 18, 2017 2:01 AM','latitude':'45.888721','longitude':'-41.232793','tags':['elit','in','esse','ea','officia'],'greeting':'Hello, Floyd! You have 5 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed51e26ca89e5caf49','index':52,'guid':'4e0907f6-facc-46df-8952-73561a53fe33','isActive':true,'balance':'$3,767.41','picture':'http://placehold.it/32x32','age':25,'eyeColor':'blue','name':{'first':'Gardner','last':'Carey'},'company':'KLUGGER','email':'gardner.carey@klugger.net','phone':'+1 (876) 481-3502','address':'131 Utica Avenue, Cannondale, Federated States Of Micronesia, 610','about':'Amet ad pariatur excepteur anim ex officia commodo proident aliqua occaecat consequat Lorem officia sit. Id minim velit nisi laboris nisi nulla incididunt eiusmod velit. Deserunt labore quis et tempor. Et labore exercitation laborum officia ullamco nostrud adipisicing laboris esse laborum aute anim elit. Sunt ad officia tempor esse et quis aliquip irure pariatur laborum id quis ex. Eu consequat nisi deserunt id eu proident ex minim aute nulla tempor ex.','registered':'Friday, February 21, 2014 6:42 AM','latitude':'-54.740231','longitude':'15.01484','tags':['commodo','laboris','occaecat','aliquip','adipisicing'],'greeting':'Hello, Gardner! You have 10 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed52e3c9407105093a','index':53,'guid':'1d3b9e7a-1bc3-40ea-b808-1c33f0d48c70','isActive':true,'balance':'$1,113.30','picture':'http://placehold.it/32x32','age':26,'eyeColor':'blue','name':{'first':'Herman','last':'Rogers'},'company':'TALENDULA','email':'herman.rogers@talendula.name','phone':'+1 (818) 521-2005','address':'541 Norman Avenue, Winfred, Tennessee, 447','about':'Culpa ex laborum non ad ullamco officia. Nisi mollit mollit voluptate sit sint ullamco. Lorem exercitation nulla anim eiusmod deserunt magna sint. Officia sunt eiusmod aliqua reprehenderit sunt mollit sit cupidatat sint.','registered':'Wednesday, July 11, 2018 1:05 AM','latitude':'-20.708105','longitude':'-151.294563','tags':['exercitation','minim','officia','qui','enim'],'greeting':'Hello, Herman! You have 10 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edfcb123d545b6edb4','index':54,'guid':'c0e0c669-4eed-43ee-bdd0-78fe6e9ca4d5','isActive':true,'balance':'$3,309.64','picture':'http://placehold.it/32x32','age':22,'eyeColor':'green','name':{'first':'Whitley','last':'Stark'},'company':'MUSAPHICS','email':'whitley.stark@musaphics.us','phone':'+1 (803) 476-2151','address':'548 Cobek Court, Chamizal, Indiana, 204','about':'Adipisicing veniam dolor ex sint sit id eu voluptate. Excepteur veniam proident exercitation id eu et sunt pariatur. Qui occaecat culpa aliqua nisi excepteur minim veniam. Est duis nulla laborum excepteur cillum pariatur sint incididunt. Velit commodo eu incididunt voluptate. Amet laboris laboris id adipisicing labore eiusmod consequat minim cillum et.','registered':'Thursday, March 27, 2014 9:10 AM','latitude':'71.219596','longitude':'51.012855','tags':['reprehenderit','mollit','laborum','voluptate','aliquip'],'greeting':'Hello, Whitley! You have 7 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed81510dfc61602fcf','index':55,'guid':'7ec5c24d-f169-4399-a2a3-300c0f45e52e','isActive':false,'balance':'$3,721.04','picture':'http://placehold.it/32x32','age':23,'eyeColor':'green','name':{'first':'Gretchen','last':'Wade'},'company':'EWEVILLE','email':'gretchen.wade@eweville.biz','phone':'+1 (977) 598-3700','address':'721 Colonial Road, Brookfield, South Dakota, 3888','about':'Fugiat consequat sint ut ut et ullamco eiusmod deserunt pariatur. Veniam eiusmod esse fugiat mollit. Proident laboris minim qui do ipsum excepteur exercitation irure anim. Aliqua labore quis eu fugiat dolore ullamco velit Lorem voluptate ipsum nostrud eiusmod laborum proident.','registered':'Friday, October 12, 2018 10:59 AM','latitude':'41.937653','longitude':'63.378531','tags':['aute','cillum','ea','ex','aute'],'greeting':'Hello, Gretchen! You have 9 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edf78f77d4a7d557bb','index':56,'guid':'8718ada7-6fd0-49ef-a405-29850503948b','isActive':false,'balance':'$3,341.33','picture':'http://placehold.it/32x32','age':32,'eyeColor':'blue','name':{'first':'Naomi','last':'Frye'},'company':'MAZUDA','email':'naomi.frye@mazuda.ca','phone':'+1 (825) 427-2255','address':'741 Coyle Street, Comptche, Pennsylvania, 8441','about':'Aliqua fugiat laborum quis ullamco cupidatat sit dolor nulla dolore. Do Lorem et ipsum culpa irure sit do dolor qui sit laboris aliqua. Ex consectetur irure in veniam reprehenderit amet do elit eiusmod est magna.','registered':'Thursday, January 9, 2014 7:18 AM','latitude':'41.078645','longitude':'-50.241966','tags':['do','aliquip','eiusmod','velit','id'],'greeting':'Hello, Naomi! You have 7 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edbf45db2e072a48b4','index':57,'guid':'c158ebf7-fb8b-4ea8-adbf-8c51c6486715','isActive':true,'balance':'$2,811.55','picture':'http://placehold.it/32x32','age':25,'eyeColor':'blue','name':{'first':'Lamb','last':'Johns'},'company':'DOGTOWN','email':'lamb.johns@dogtown.info','phone':'+1 (946) 530-3057','address':'559 Malbone Street, Kennedyville, California, 2052','about':'Eiusmod dolor labore cillum ad veniam elit voluptate voluptate pariatur est cupidatat. Laboris ut qui in cillum sunt dolore ut enim. Minim nostrud ex qui quis reprehenderit magna ipsum cupidatat irure minim laboris veniam irure. Fugiat velit deserunt aliquip in esse proident excepteur labore reprehenderit excepteur sunt in cupidatat exercitation. Ex pariatur irure mollit tempor non magna ex.','registered':'Friday, April 21, 2017 1:51 AM','latitude':'-61.403599','longitude':'-93.447102','tags':['aliquip','tempor','sint','enim','ipsum'],'greeting':'Hello, Lamb! You have 6 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edbb9c88190cb59cf2','index':58,'guid':'f0de5ac5-eb28-491b-81c5-76d447c9055e','isActive':true,'balance':'$1,611.99','picture':'http://placehold.it/32x32','age':37,'eyeColor':'brown','name':{'first':'Lynette','last':'Cleveland'},'company':'ARTWORLDS','email':'lynette.cleveland@artworlds.tv','phone':'+1 (889) 596-3723','address':'439 Montauk Avenue, Felt, New Mexico, 9681','about':'Incididunt aliquip est aliquip est ullamco do consectetur dolor. Lorem mollit mollit dolor et ipsum ut qui veniam aute ea. Adipisicing reprehenderit culpa velit laborum adipisicing amet consectetur velit nisi. Ut qui proident ad cillum excepteur adipisicing quis labore. Duis velit culpa et excepteur eiusmod ex labore in nisi nostrud. Et ullamco minim excepteur ut enim reprehenderit consequat eiusmod laboris Lorem commodo exercitation qui laborum.','registered':'Wednesday, August 26, 2015 12:53 PM','latitude':'49.861336','longitude':'86.865926','tags':['reprehenderit','minim','in','minim','nostrud'],'greeting':'Hello, Lynette! You have 6 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed5b760ddde7295fa8','index':59,'guid':'f8180d3f-c5c0-48b2-966e-a0b2a80f8e84','isActive':true,'balance':'$3,376.75','picture':'http://placehold.it/32x32','age':32,'eyeColor':'green','name':{'first':'Obrien','last':'Page'},'company':'GLASSTEP','email':'obrien.page@glasstep.co.uk','phone':'+1 (902) 583-3086','address':'183 Ridgewood Avenue, Vicksburg, Wisconsin, 7430','about':'Aute excepteur cillum exercitation duis Lorem irure labore elit. Labore magna cupidatat velit consectetur minim do Lorem in excepteur commodo ea consequat ullamco laborum. Ut in id occaecat eu quis duis id ea deserunt veniam.','registered':'Wednesday, March 29, 2017 12:13 AM','latitude':'-40.156154','longitude':'72.76301','tags':['excepteur','non','anim','nulla','anim'],'greeting':'Hello, Obrien! You have 6 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed52985d3d8901d653','index':60,'guid':'d2e14fa1-8c54-4bcb-8a58-eb2e6f8d0e45','isActive':true,'balance':'$1,659.47','picture':'http://placehold.it/32x32','age':33,'eyeColor':'brown','name':{'first':'Knowles','last':'Goodman'},'company':'CENTREE','email':'knowles.goodman@centree.io','phone':'+1 (862) 563-3692','address':'504 Lott Street, Allensworth, Florida, 7148','about':'Do aliquip voluptate aliqua nostrud. Eu dolore ex occaecat pariatur aute laborum aute nulla aute amet. Excepteur sit laboris ad non anim ut officia ut ad exercitation officia dolore laboris. Esse voluptate minim deserunt nostrud exercitation laborum voluptate exercitation id laborum fugiat proident cupidatat proident. Nulla nostrud est sint adipisicing incididunt exercitation dolor sit et elit tempor occaecat sint culpa. Pariatur occaecat laboris pariatur laboris ad pariatur in cillum fugiat est fugiat. Proident eu id irure excepteur esse aute cillum adipisicing.','registered':'Wednesday, October 15, 2014 6:17 PM','latitude':'-15.73863','longitude':'87.422009','tags':['consequat','sint','tempor','veniam','culpa'],'greeting':'Hello, Knowles! You have 6 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0eda00b73bdb7ea54e9','index':61,'guid':'c8a064db-0ec6-4832-9820-7280a0333709','isActive':true,'balance':'$3,701.14','picture':'http://placehold.it/32x32','age':35,'eyeColor':'brown','name':{'first':'Shepherd','last':'Todd'},'company':'ECRATIC','email':'shepherd.todd@ecratic.me','phone':'+1 (881) 444-3389','address':'450 Frank Court, Temperanceville, Ohio, 7006','about':'Voluptate cillum ad fugiat velit adipisicing sint consequat veniam Lorem reprehenderit. Cillum sit non deserunt consequat. Amet sunt pariatur non mollit ullamco proident sint dolore anim elit cupidatat anim do ullamco. Lorem Lorem incididunt ea elit consequat laboris enim duis quis Lorem id aute veniam consequat. Cillum veniam cillum sint qui Lorem fugiat culpa consequat. Est sint duis ut qui fugiat. Laborum pariatur velit et sunt mollit eiusmod excepteur culpa ex et officia.','registered':'Tuesday, October 10, 2017 2:01 AM','latitude':'82.951563','longitude':'-4.866954','tags':['eu','qui','proident','esse','ex'],'greeting':'Hello, Shepherd! You have 5 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed0e51d1a7e2d9e559','index':62,'guid':'739c3d38-200d-4531-84d8-4e7c39ae5b8c','isActive':true,'balance':'$3,679.01','picture':'http://placehold.it/32x32','age':31,'eyeColor':'brown','name':{'first':'Rosalyn','last':'Heath'},'company':'ZAYA','email':'rosalyn.heath@zaya.com','phone':'+1 (865) 403-3520','address':'303 Henderson Walk, Hoehne, District Of Columbia, 4306','about':'Sint occaecat nulla mollit sint fugiat eu proident dolor labore consequat. Occaecat tempor excepteur do fugiat incididunt Lorem in ullamco dolore laborum. Cillum mollit aliquip excepteur aliquip sint sunt minim non irure irure. Cillum fugiat aliqua enim dolore. Nulla culpa culpa nostrud ad. Eiusmod culpa proident proident non est cupidatat eu sunt sit incididunt id nisi.','registered':'Wednesday, April 22, 2015 12:35 PM','latitude':'33.628504','longitude':'110.772802','tags':['consequat','ut','ex','labore','consectetur'],'greeting':'Hello, Rosalyn! You have 6 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edd5274c01d353d0c5','index':63,'guid':'8815fe55-8af1-4708-a62a-d554dbd74a4a','isActive':true,'balance':'$2,126.01','picture':'http://placehold.it/32x32','age':30,'eyeColor':'blue','name':{'first':'Queen','last':'Harper'},'company':'TRI@TRIBALOG','email':'queen.harper@tri@tribalog.org','phone':'+1 (903) 592-3145','address':'926 Heath Place, Wawona, Maine, 7340','about':'Laborum cupidatat commodo aliquip reprehenderit. Excepteur eu labore duis minim minim voluptate aute nostrud deserunt ut velit ullamco. Adipisicing nisi occaecat laborum proident. Id reprehenderit eiusmod cupidatat qui aute consequat amet enim commodo duis non ipsum. Amet ut aliqua magna qui proident mollit aute.','registered':'Saturday, April 9, 2016 5:12 AM','latitude':'51.814216','longitude':'177.348115','tags':['cillum','ut','dolor','do','nisi'],'greeting':'Hello, Queen! You have 10 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed126298b6ce62ed56','index':64,'guid':'001c87fe-182f-450f-903b-2e29a9bb0322','isActive':true,'balance':'$3,578.29','picture':'http://placehold.it/32x32','age':20,'eyeColor':'green','name':{'first':'Pauline','last':'Mills'},'company':'CRUSTATIA','email':'pauline.mills@crustatia.net','phone':'+1 (984) 582-3899','address':'899 Revere Place, Welch, Iowa, 216','about':'Tempor eu exercitation ut id. Deserunt ex reprehenderit veniam nisi. Aute laborum veniam velit dolore ut deserunt Lorem sit esse quis dolor ex do nisi. In dolor tempor officia id. Velit nisi culpa nostrud laborum officia incididunt laborum velit non quis id exercitation exercitation. Anim elit ullamco in enim Lorem culpa aliqua Lorem.','registered':'Monday, June 2, 2014 2:03 PM','latitude':'56.427576','longitude':'172.183669','tags':['pariatur','pariatur','pariatur','fugiat','Lorem'],'greeting':'Hello, Pauline! You have 8 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed3e332ad9e8a178d8','index':65,'guid':'5ad7292b-feef-4a7e-b485-142cadfbe8ea','isActive':false,'balance':'$3,916.54','picture':'http://placehold.it/32x32','age':22,'eyeColor':'brown','name':{'first':'Garrett','last':'Richmond'},'company':'XYQAG','email':'garrett.richmond@xyqag.name','phone':'+1 (952) 584-3794','address':'233 Grove Street, Summerfield, Virginia, 4735','about':'Nostrud quis pariatur occaecat laborum laboris aliqua ut fugiat dolor. Commodo tempor excepteur enim nostrud Lorem. Aute elit nulla labore ad pariatur cupidatat Lorem qui cupidatat velit deserunt excepteur esse. Excepteur nulla et nostrud quis labore est veniam enim nisi laboris ut enim. Ea esse nulla anim excepteur reprehenderit deserunt voluptate minim qui labore adipisicing amet eu enim.','registered':'Wednesday, March 5, 2014 4:35 PM','latitude':'68.665041','longitude':'148.799524','tags':['irure','reprehenderit','minim','ea','do'],'greeting':'Hello, Garrett! You have 7 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed541aa2ec47466ace','index':66,'guid':'9cda6f3c-c9ab-451c-bb19-2e4c8463d011','isActive':true,'balance':'$3,352.52','picture':'http://placehold.it/32x32','age':30,'eyeColor':'brown','name':{'first':'Cobb','last':'Whitley'},'company':'UNIA','email':'cobb.whitley@unia.us','phone':'+1 (888) 490-3342','address':'864 Belmont Avenue, Needmore, Massachusetts, 8286','about':'Nisi aliquip fugiat ipsum nisi ullamco minim pariatur labore. Sint labore anim do ad ad esse eu nostrud nulla commodo anim. Cillum anim enim duis cillum non do nisi aliquip veniam voluptate commodo aliqua laborum. Exercitation in do eu qui sint aliquip. Esse adipisicing deserunt deserunt qui anim aliqua occaecat et nostrud elit ea in anim cillum. Tempor mollit proident tempor sunt est sint laborum ullamco incididunt non. Velit aliqua sunt excepteur nisi qui eiusmod ipsum dolore aliquip velit ullamco ullamco.','registered':'Friday, May 23, 2014 7:11 PM','latitude':'-32.950581','longitude':'147.772494','tags':['mollit','adipisicing','irure','ad','minim'],'greeting':'Hello, Cobb! You have 6 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed8186c3d6f34c2be3','index':67,'guid':'fee98f6d-d68a-4189-8180-b6cb337e537e','isActive':false,'balance':'$1,698.42','picture':'http://placehold.it/32x32','age':20,'eyeColor':'blue','name':{'first':'Brennan','last':'Tyler'},'company':'PODUNK','email':'brennan.tyler@podunk.biz','phone':'+1 (867) 498-2727','address':'599 Harkness Avenue, Gorst, American Samoa, 322','about':'Reprehenderit id sit qui id qui aute ea sit magna in qui proident. Excepteur ad nostrud do nostrud in incididunt voluptate adipisicing sint anim. Ullamco consequat minim nulla irure ex est irure reprehenderit deserunt voluptate dolore anim sunt. Occaecat dolore voluptate voluptate elit commodo nulla laborum ad do irure.','registered':'Friday, February 9, 2018 5:40 PM','latitude':'11.150893','longitude':'-85.298004','tags':['quis','minim','deserunt','cillum','laboris'],'greeting':'Hello, Brennan! You have 10 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed075c9c4f7439818d','index':68,'guid':'1ef76b18-6b8d-4c3c-aca3-9fa2b43f0242','isActive':false,'balance':'$2,091.17','picture':'http://placehold.it/32x32','age':26,'eyeColor':'brown','name':{'first':'Neal','last':'Stephenson'},'company':'OTHERSIDE','email':'neal.stephenson@otherside.ca','phone':'+1 (820) 496-3344','address':'867 Wilson Street, Kidder, Colorado, 4599','about':'Do laboris enim proident in qui velit adipisicing magna anim. Amet proident non exercitation ipsum aliqua excepteur nostrud. Enim esse non sit in nostrud deserunt id laborum cillum deserunt consequat. Anim velit exercitation qui sit voluptate. Irure duis non veniam velit mollit exercitation id exercitation.','registered':'Thursday, November 13, 2014 11:00 PM','latitude':'54.809693','longitude':'1.877241','tags':['anim','duis','in','officia','sint'],'greeting':'Hello, Neal! You have 7 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0eda0a2dc24db64b638','index':69,'guid':'194744fd-089b-40b6-a290-98a6ec30a415','isActive':false,'balance':'$3,191.67','picture':'http://placehold.it/32x32','age':24,'eyeColor':'brown','name':{'first':'Shields','last':'Hubbard'},'company':'MIRACULA','email':'shields.hubbard@miracula.info','phone':'+1 (885) 582-2001','address':'529 Eagle Street, Guilford, Nevada, 1460','about':'Eiusmod exercitation ut incididunt veniam commodo culpa ullamco mollit id adipisicing exercitation ad sint. Nostrud excepteur amet aliqua mollit incididunt laborum voluptate id anim. Nulla sint laboris dolor esse cupidatat laborum ex sint. Ex non sunt sit nulla.','registered':'Monday, February 13, 2017 6:22 AM','latitude':'-69.145209','longitude':'-40.69755','tags':['tempor','enim','qui','velit','elit'],'greeting':'Hello, Shields! You have 10 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edf939c130177e074d','index':70,'guid':'303b176c-7803-4ed2-a35f-3e3c831793ef','isActive':false,'balance':'$2,359.09','picture':'http://placehold.it/32x32','age':31,'eyeColor':'blue','name':{'first':'Coleen','last':'Knight'},'company':'BLEEKO','email':'coleen.knight@bleeko.tv','phone':'+1 (867) 423-3146','address':'527 Broadway , Bonanza, Marshall Islands, 4988','about':'Laboris nulla pariatur laborum ad aute excepteur sunt pariatur exercitation. Do nostrud qui ipsum ullamco et sint do Lorem cillum ullamco do. Exercitation labore excepteur commodo incididunt eiusmod proident consectetur adipisicing nostrud aute voluptate laboris. Commodo anim proident eiusmod pariatur est ea laborum incididunt qui tempor reprehenderit ullamco id. Eiusmod commodo nisi consectetur ut qui quis aliqua sit minim nostrud sunt laborum eiusmod adipisicing.','registered':'Sunday, May 6, 2018 8:03 AM','latitude':'70.729041','longitude':'113.052761','tags':['Lorem','ullamco','nulla','ullamco','commodo'],'greeting':'Hello, Coleen! You have 7 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edae8b1ce688b61223','index':71,'guid':'7d6f3b1a-c367-4068-9e8e-1717d513ece3','isActive':false,'balance':'$2,911.07','picture':'http://placehold.it/32x32','age':21,'eyeColor':'brown','name':{'first':'Clark','last':'Ryan'},'company':'ECLIPSENT','email':'clark.ryan@eclipsent.co.uk','phone':'+1 (938) 562-2740','address':'500 Lewis Avenue, Rockbridge, North Dakota, 5133','about':'Adipisicing exercitation officia sit excepteur excepteur sunt sint amet. Aliqua ipsum sint laboris eiusmod esse culpa elit sunt. Dolore est consectetur est quis quis magna. Aliquip nostrud dolore ex pariatur. Anim nostrud duis exercitation ut magna magna culpa. Nisi irure id mollit labore non sit mollit occaecat Lorem est ipsum. Nulla est fugiat cillum nisi aliqua consectetur amet nulla nostrud esse.','registered':'Friday, July 24, 2015 9:28 AM','latitude':'-68.055815','longitude':'-50.926966','tags':['deserunt','ad','ad','ut','id'],'greeting':'Hello, Clark! You have 7 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed5d1e8df45d8ab4db','index':72,'guid':'ce85db37-7d04-4f4c-a4b0-78003533e5c6','isActive':false,'balance':'$1,127.43','picture':'http://placehold.it/32x32','age':21,'eyeColor':'green','name':{'first':'Dillon','last':'Hooper'},'company':'MEDESIGN','email':'dillon.hooper@medesign.io','phone':'+1 (929) 600-3797','address':'652 Mill Avenue, Elliston, Mississippi, 2958','about':'Dolore culpa qui exercitation nostrud do. Irure duis in ad ipsum aliqua aliquip nulla sit veniam officia quis occaecat est. Magna qui eiusmod pariatur aliquip minim commodo. Qui ex dolor excepteur consequat eiusmod occaecat. In officia ipsum do Lorem excepteur proident pariatur labore.','registered':'Monday, May 26, 2014 2:38 AM','latitude':'-36.032189','longitude':'86.865529','tags':['non','ut','ex','Lorem','quis'],'greeting':'Hello, Dillon! You have 10 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edb84814579c3121b3','index':73,'guid':'d7303901-5186-4595-a759-22306f67d0a3','isActive':true,'balance':'$2,326.59','picture':'http://placehold.it/32x32','age':33,'eyeColor':'green','name':{'first':'Moreno','last':'Hull'},'company':'ZEAM','email':'moreno.hull@zeam.me','phone':'+1 (984) 586-3738','address':'265 Pine Street, Talpa, North Carolina, 6041','about':'Fugiat exercitation est ullamco anim. Exercitation proident id sunt culpa Lorem amet. Consectetur anim consectetur pariatur consequat consectetur amet excepteur voluptate ea velit duis eiusmod proident. In sint laborum cupidatat ea amet ex. Reprehenderit amet sunt dolor ullamco est ex deserunt.','registered':'Wednesday, January 24, 2018 8:52 PM','latitude':'84.956857','longitude':'113.210051','tags':['est','excepteur','anim','Lorem','dolor'],'greeting':'Hello, Moreno! You have 6 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0eda4eb9dcb92c82d06','index':74,'guid':'8ee28651-802e-4523-b676-c713f6e874b8','isActive':true,'balance':'$3,783.97','picture':'http://placehold.it/32x32','age':38,'eyeColor':'blue','name':{'first':'Tracie','last':'Price'},'company':'ICOLOGY','email':'tracie.price@icology.com','phone':'+1 (897) 403-3768','address':'487 Sheffield Avenue, Vallonia, Wyoming, 276','about':'Voluptate laboris laborum aute ex sint voluptate officia proident. Sit esse nostrud cupidatat in veniam sit duis est. Do mollit elit exercitation aliqua id irure ex. Lorem reprehenderit do ullamco sint ea ad nisi ad ut.','registered':'Saturday, December 10, 2016 9:44 AM','latitude':'77.770464','longitude':'151.392903','tags':['incididunt','labore','aliquip','anim','minim'],'greeting':'Hello, Tracie! You have 6 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed68ab1a55d1c35e6c','index':75,'guid':'deedd26a-8928-4064-9666-5c59ea8144b4','isActive':true,'balance':'$2,848.08','picture':'http://placehold.it/32x32','age':32,'eyeColor':'brown','name':{'first':'Montgomery','last':'Bruce'},'company':'CYTREK','email':'montgomery.bruce@cytrek.org','phone':'+1 (824) 414-2731','address':'397 Beach Place, Ellerslie, South Carolina, 967','about':'Mollit minim excepteur magna velit cillum excepteur exercitation anim id labore deserunt do. Fugiat ex et id ad. Duis excepteur laboris est nulla do id irure quis eiusmod do esse ut culpa in.','registered':'Tuesday, August 25, 2015 6:42 AM','latitude':'79.722631','longitude':'-7.516885','tags':['Lorem','sint','voluptate','proident','incididunt'],'greeting':'Hello, Montgomery! You have 6 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edd90e0abb1cc2b0aa','index':76,'guid':'a072159d-12db-4747-9c2a-e2486a53d043','isActive':false,'balance':'$2,723.54','picture':'http://placehold.it/32x32','age':40,'eyeColor':'green','name':{'first':'Zelma','last':'Salinas'},'company':'IMAGEFLOW','email':'zelma.salinas@imageflow.net','phone':'+1 (964) 555-3856','address':'584 Reeve Place, Nord, Georgia, 7473','about':'Aliqua proident excepteur duis cupidatat cillum amet esse esse consectetur ea. Officia sunt consequat nostrud minim enim dolore dolor duis cillum. Esse labore veniam sint laborum excepteur sint tempor do ad cupidatat aliquip laboris elit id. Velit reprehenderit ullamco velit ullamco adipisicing velit esse irure velit et.','registered':'Thursday, February 25, 2016 8:18 PM','latitude':'-32.880524','longitude':'115.180489','tags':['id','nulla','reprehenderit','consequat','reprehenderit'],'greeting':'Hello, Zelma! You have 10 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed98d836c8da283bb2','index':77,'guid':'838bebad-cc20-44e9-9eb7-902a8ca25efb','isActive':false,'balance':'$3,488.91','picture':'http://placehold.it/32x32','age':20,'eyeColor':'green','name':{'first':'Shaw','last':'Parsons'},'company':'PEARLESEX','email':'shaw.parsons@pearlesex.name','phone':'+1 (912) 567-3580','address':'606 Ocean Avenue, Tyro, Northern Mariana Islands, 3367','about':'Laborum labore occaecat culpa pariatur nisi non adipisicing esse consectetur officia officia. Deserunt velit eu enim consectetur ut cillum aliqua occaecat dolor qui esse. Incididunt ad est ex eu culpa anim aliquip laborum. Aliqua consectetur velit exercitation magna minim nulla do ut excepteur enim aliquip et. Nostrud enim sunt amet amet proident aliqua velit dolore. Consectetur ipsum fugiat proident id est reprehenderit tempor irure commodo. Sit excepteur fugiat occaecat nulla Lorem et cillum.','registered':'Thursday, April 19, 2018 1:41 AM','latitude':'69.715573','longitude':'-118.481237','tags':['laboris','adipisicing','magna','voluptate','id'],'greeting':'Hello, Shaw! You have 9 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed1101734633c6ebba','index':78,'guid':'8fd0c52a-9d74-4984-a608-d612ecd8ddf0','isActive':true,'balance':'$3,820.02','picture':'http://placehold.it/32x32','age':39,'eyeColor':'brown','name':{'first':'Jaime','last':'Beard'},'company':'IZZBY','email':'jaime.beard@izzby.us','phone':'+1 (820) 412-3806','address':'362 Hudson Avenue, Delco, New Jersey, 5684','about':'Ut cupidatat veniam nulla magna commodo sit duis veniam consectetur cupidatat elit quis tempor. Duis officia ullamco proident sunt non mollit excepteur. Nisi ex amet laboris proident duis reprehenderit et est aliqua mollit amet ad. Enim eu elit excepteur eu exercitation duis consequat culpa. Adipisicing reprehenderit duis Lorem reprehenderit dolor aliqua incididunt eiusmod consequat ad occaecat fugiat do laborum. Qui ad aliquip ex do sunt. Fugiat non ut fugiat eu.','registered':'Sunday, March 9, 2014 3:41 PM','latitude':'17.926318','longitude':'108.985996','tags':['ut','voluptate','veniam','non','commodo'],'greeting':'Hello, Jaime! You have 7 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edcd125a89dcf18e0d','index':79,'guid':'eccaa4ca-0fa7-4b00-a1e3-fe7953403894','isActive':true,'balance':'$1,521.33','picture':'http://placehold.it/32x32','age':30,'eyeColor':'green','name':{'first':'Terra','last':'Sullivan'},'company':'ZANITY','email':'terra.sullivan@zanity.biz','phone':'+1 (995) 498-2714','address':'346 Congress Street, Tuttle, Maryland, 3152','about':'Incididunt enim veniam ut veniam quis dolore pariatur culpa ex. Cillum laboris dolor exercitation officia. Officia irure magna aliqua veniam officia ullamco culpa. Cillum enim velit ea sint sint officia labore ea adipisicing culpa laboris. Anim aute sint commodo culpa ex quis minim ut laborum.','registered':'Sunday, June 1, 2014 5:38 AM','latitude':'-4.655435','longitude':'5.851803','tags':['anim','non','anim','laborum','pariatur'],'greeting':'Hello, Terra! You have 5 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed9b9fc3041a674c87','index':80,'guid':'9f95fa36-4e45-4c3f-9362-3d4d809bf57f','isActive':true,'balance':'$3,403.16','picture':'http://placehold.it/32x32','age':39,'eyeColor':'brown','name':{'first':'Sharpe','last':'Berger'},'company':'ZILLAN','email':'sharpe.berger@zillan.ca','phone':'+1 (913) 498-3005','address':'277 Bragg Street, Faywood, Texas, 6487','about':'Dolor duis id aute ea veniam amet ullamco id. Culpa deserunt irure mollit tempor dolore veniam culpa officia culpa laborum eiusmod. Ullamco tempor qui aliqua cupidatat veniam cillum eu ut ex minim eu in. Quis exercitation anim eiusmod tempor esse mollit exercitation cillum ipsum reprehenderit. Sint voluptate ipsum officia sint magna nulla tempor eiusmod eiusmod veniam. Consectetur non ad veniam exercitation voluptate non nostrud.','registered':'Tuesday, June 27, 2017 12:58 AM','latitude':'-0.54085','longitude':'106.258693','tags':['proident','eiusmod','commodo','excepteur','pariatur'],'greeting':'Hello, Sharpe! You have 5 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed1a1866757bf675e0','index':81,'guid':'1b944a01-01d3-4846-94e3-630f4d0e51a3','isActive':true,'balance':'$2,038.61','picture':'http://placehold.it/32x32','age':28,'eyeColor':'brown','name':{'first':'Blanchard','last':'Ewing'},'company':'CONJURICA','email':'blanchard.ewing@conjurica.info','phone':'+1 (859) 593-3212','address':'252 Beaver Street, Kiskimere, Utah, 3255','about':'Labore magna aute adipisicing ut dolor sit ea. Officia culpa aute occaecat sit ex ullamco aliquip ad sit culpa. Ex in enim dolore ex est sit. Do irure nulla magna sint aliquip in duis aute. Magna ullamco sit labore ea tempor voluptate.','registered':'Monday, May 4, 2015 10:50 AM','latitude':'76.207595','longitude':'0.672563','tags':['proident','pariatur','officia','in','culpa'],'greeting':'Hello, Blanchard! You have 9 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed987d82f4e22d939c','index':82,'guid':'97a90aee-3cee-4678-819e-24fb94279dc1','isActive':false,'balance':'$1,201.55','picture':'http://placehold.it/32x32','age':28,'eyeColor':'blue','name':{'first':'Wells','last':'Solomon'},'company':'CORPULSE','email':'wells.solomon@corpulse.tv','phone':'+1 (840) 539-3349','address':'159 Radde Place, Linganore, Idaho, 230','about':'Consequat dolore mollit sit irure cupidatat commodo. Incididunt cillum reprehenderit ullamco sit proident cupidatat occaecat reprehenderit officia. Ad anim Lorem elit in officia minim proident nisi commodo eiusmod ea Lorem dolore voluptate. Dolor aliquip est commodo Lorem dolor ut aliquip ut. Sit anim officia dolore excepteur aute enim cillum.','registered':'Friday, January 6, 2017 1:59 PM','latitude':'70.020883','longitude':'14.503588','tags':['mollit','aute','officia','nostrud','laboris'],'greeting':'Hello, Wells! You have 7 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0eddf7a904ea0d0bc2a','index':83,'guid':'fe639a0c-7517-43e6-b0da-cd9ca5b9e267','isActive':false,'balance':'$3,664.47','picture':'http://placehold.it/32x32','age':33,'eyeColor':'blue','name':{'first':'Natalia','last':'Brown'},'company':'SYNTAC','email':'natalia.brown@syntac.co.uk','phone':'+1 (952) 595-3513','address':'332 Lenox Road, Springville, Alabama, 8406','about':'Nulla consequat officia commodo ea sunt irure anim velit aliquip aliquip. Labore ullamco occaecat proident voluptate cillum labore minim nostrud excepteur. Qui fugiat nostrud cillum fugiat ullamco id commodo aliqua voluptate mollit id id laboris. Cillum qui duis duis sit adipisicing elit ut aliqua eu. Anim nisi aliqua sit mollit.','registered':'Sunday, July 30, 2017 1:02 PM','latitude':'31.937613','longitude':'-9.957927','tags':['magna','adipisicing','exercitation','tempor','consectetur'],'greeting':'Hello, Natalia! You have 9 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed8823fa385cad4aa3','index':84,'guid':'5cf280da-f5f0-4cc6-9063-e9d5863c8c89','isActive':false,'balance':'$1,624.17','picture':'http://placehold.it/32x32','age':25,'eyeColor':'blue','name':{'first':'Greene','last':'Waller'},'company':'ISOTRACK','email':'greene.waller@isotrack.io','phone':'+1 (838) 406-3608','address':'362 Albemarle Road, Gardiner, Michigan, 2764','about':'Ut nisi sit sint nulla dolor magna. Culpa occaecat adipisicing veniam proident excepteur tempor quis ex. Fugiat tempor laborum dolor adipisicing irure anim cupidatat ut exercitation ex sit. Cupidatat exercitation commodo sunt ex irure fugiat eu esse do ullamco mollit dolore cupidatat. Cupidatat magna incididunt officia dolore esse voluptate deserunt in laborum dolor. Sit fugiat Lorem eu ullamco. Laboris veniam quis cillum tempor ex fugiat cillum cupidatat.','registered':'Sunday, June 10, 2018 10:32 PM','latitude':'0.256921','longitude':'-96.141941','tags':['magna','dolore','deserunt','aliquip','cillum'],'greeting':'Hello, Greene! You have 6 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0eda7c905c2d24c7d31','index':85,'guid':'aa30a9fb-8a16-48eb-8bb7-1307d1e1f191','isActive':false,'balance':'$1,974.04','picture':'http://placehold.it/32x32','age':36,'eyeColor':'green','name':{'first':'Carlene','last':'Hanson'},'company':'DIGIRANG','email':'carlene.hanson@digirang.me','phone':'+1 (981) 417-3209','address':'435 Clark Street, Choctaw, Oregon, 9888','about':'Amet labore esse cillum irure laborum consectetur occaecat non aliquip aliquip proident. Nisi magna nulla officia duis labore aute nulla laborum duis tempor minim. Velit elit reprehenderit nisi exercitation officia incididunt amet cupidatat excepteur proident consectetur.','registered':'Thursday, April 20, 2017 6:13 AM','latitude':'68.529086','longitude':'68.802409','tags':['pariatur','nulla','qui','amet','labore'],'greeting':'Hello, Carlene! You have 10 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed6fbee12ce9e55dbf','index':86,'guid':'0fce89aa-3310-48df-862a-68bd3d776644','isActive':false,'balance':'$3,909.64','picture':'http://placehold.it/32x32','age':40,'eyeColor':'brown','name':{'first':'Doris','last':'Collins'},'company':'ZIORE','email':'doris.collins@ziore.com','phone':'+1 (914) 405-2360','address':'301 Lorraine Street, Stouchsburg, Minnesota, 7476','about':'Nisi deserunt aliquip et deserunt ipsum ad consectetur est non ullamco. Dolore do ut voluptate do eiusmod. Culpa ad in eiusmod nisi cillum do. Officia magna cillum sint aliqua reprehenderit amet est ipsum. Eiusmod deserunt commodo proident consequat. Amet minim dolor consequat aliquip aliquip culpa non exercitation non.','registered':'Wednesday, February 25, 2015 9:15 PM','latitude':'-57.364906','longitude':'130.766587','tags':['nulla','deserunt','cillum','eiusmod','adipisicing'],'greeting':'Hello, Doris! You have 10 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edede9402476c398c0','index':87,'guid':'60cf0aa6-bc6d-4305-8842-d27e6af1306f','isActive':false,'balance':'$2,817.53','picture':'http://placehold.it/32x32','age':28,'eyeColor':'green','name':{'first':'Cline','last':'Hayden'},'company':'ECRAZE','email':'cline.hayden@ecraze.org','phone':'+1 (965) 507-2138','address':'352 Rutland Road, Ebro, Connecticut, 1196','about':'Dolor eiusmod enim anim sit enim ea tempor. Tempor amet consectetur aliquip culpa do ex excepteur deserunt. Dolor commodo veniam culpa sint. Commodo consectetur pariatur irure nisi deserunt cillum est dolor ipsum ea.','registered':'Thursday, September 29, 2016 5:58 AM','latitude':'62.50713','longitude':'86.247286','tags':['enim','tempor','anim','veniam','proident'],'greeting':'Hello, Cline! You have 9 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0edeb72f151994a551b','index':88,'guid':'dbb49c62-86b1-409f-b8b8-f609c709d2a8','isActive':false,'balance':'$3,122.56','picture':'http://placehold.it/32x32','age':39,'eyeColor':'green','name':{'first':'Janelle','last':'Rutledge'},'company':'TERRAGEN','email':'janelle.rutledge@terragen.net','phone':'+1 (914) 581-3749','address':'170 Falmouth Street, Alderpoint, West Virginia, 642','about':'Laboris proident cillum sunt qui ea sunt. Officia adipisicing exercitation dolore magna reprehenderit amet anim id. Laboris commodo sit irure irure. Excepteur est mollit fugiat incididunt consectetur veniam irure ea mollit. Cillum enim consequat sunt sunt nisi incididunt tempor enim.','registered':'Monday, February 16, 2015 5:46 AM','latitude':'-46.392023','longitude':'32.054562','tags':['eu','eu','nisi','labore','deserunt'],'greeting':'Hello, Janelle! You have 9 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edc9c2604846ff9a0d','index':89,'guid':'c4d7a365-f1d3-4584-b78e-008394c219f7','isActive':true,'balance':'$1,807.19','picture':'http://placehold.it/32x32','age':24,'eyeColor':'green','name':{'first':'Abby','last':'Lopez'},'company':'GRAINSPOT','email':'abby.lopez@grainspot.name','phone':'+1 (917) 442-3955','address':'488 Kensington Walk, Winston, Hawaii, 9109','about':'Incididunt deserunt Lorem proident magna tempor enim quis duis eu ut adipisicing in. Ex mollit non irure aliqua officia. Fugiat id ipsum consequat irure id ullamco culpa quis nulla enim aliquip consequat et. Dolor ut anim velit irure consequat cillum eu. Aute occaecat laborum est aliqua.','registered':'Sunday, April 1, 2018 11:28 PM','latitude':'-10.177041','longitude':'-165.756718','tags':['est','laborum','culpa','non','quis'],'greeting':'Hello, Abby! You have 9 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed03237438b158af9e','index':90,'guid':'36c4a19f-2d00-4e40-bd49-155fd2ce0a6c','isActive':false,'balance':'$2,757.86','picture':'http://placehold.it/32x32','age':31,'eyeColor':'blue','name':{'first':'Whitney','last':'Sheppard'},'company':'ANACHO','email':'whitney.sheppard@anacho.us','phone':'+1 (922) 437-2383','address':'951 Beekman Place, Homeworth, New York, 6088','about':'Sint minim nisi minim non minim aliqua pariatur ullamco do sint qui labore. Aute elit reprehenderit ad do fugiat est amet. In incididunt tempor commodo cillum tempor est labore anim.','registered':'Tuesday, September 13, 2016 6:43 PM','latitude':'-49.732527','longitude':'-171.846715','tags':['exercitation','veniam','sunt','est','proident'],'greeting':'Hello, Whitney! You have 6 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0edb99dd3aa53d2cb7f','index':91,'guid':'17afd430-f37f-4d55-958c-72f35cdb5997','isActive':false,'balance':'$3,683.86','picture':'http://placehold.it/32x32','age':38,'eyeColor':'blue','name':{'first':'Ilene','last':'Blackwell'},'company':'ENQUILITY','email':'ilene.blackwell@enquility.biz','phone':'+1 (817) 555-2616','address':'950 Varanda Place, Belgreen, Virgin Islands, 1765','about':'Id eiusmod deserunt eiusmod adipisicing adipisicing est enim pariatur esse duis. Qui velit duis irure magna consectetur dolore reprehenderit. Cillum dolore minim consectetur irure non qui velit cillum veniam adipisicing incididunt. Deserunt veniam excepteur veniam velit aliquip labore quis exercitation magna do non dolor. Aliquip occaecat minim adipisicing deserunt fugiat nulla occaecat proident irure consectetur eiusmod irure. Enim Lorem deserunt amet Lorem commodo eiusmod reprehenderit occaecat adipisicing dolor voluptate cillum.','registered':'Thursday, February 1, 2018 8:39 AM','latitude':'57.393644','longitude':'-3.704258','tags':['adipisicing','dolor','commodo','Lorem','Lorem'],'greeting':'Hello, Ilene! You have 6 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed353f4deb62c3342a','index':92,'guid':'9953e285-2095-4f1c-978b-9ece2a867e9d','isActive':false,'balance':'$1,202.44','picture':'http://placehold.it/32x32','age':38,'eyeColor':'blue','name':{'first':'Dawson','last':'Herman'},'company':'BITENDREX','email':'dawson.herman@bitendrex.ca','phone':'+1 (843) 522-2655','address':'471 Channel Avenue, Denio, Alaska, 5040','about':'Nisi occaecat mollit reprehenderit nisi minim Lorem mollit. Ea proident irure cillum quis. Deserunt consectetur consectetur consequat quis enim minim ea ipsum proident nisi ad non aliquip. Veniam aute minim consequat irure voluptate aute amet excepteur exercitation cillum duis quis adipisicing nostrud.','registered':'Tuesday, December 8, 2015 5:40 PM','latitude':'-55.602721','longitude':'-26.683234','tags':['qui','dolor','deserunt','eiusmod','labore'],'greeting':'Hello, Dawson! You have 7 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edd5464bc50a5310ad','index':93,'guid':'724b2434-4dbd-417d-aa07-6065715f434f','isActive':false,'balance':'$1,595.98','picture':'http://placehold.it/32x32','age':25,'eyeColor':'brown','name':{'first':'Alice','last':'Christian'},'company':'ZENOLUX','email':'alice.christian@zenolux.info','phone':'+1 (954) 466-2650','address':'875 Gerritsen Avenue, Townsend, Kentucky, 6568','about':'Nulla labore occaecat ex culpa magna. Commodo occaecat et in consequat cillum laborum magna adipisicing excepteur. Do ut Lorem esse voluptate officia ea aliquip proident amet veniam minim nulla adipisicing. Enim consectetur incididunt laborum voluptate tempor deserunt non laboris. Aliquip deserunt aute irure dolore magna anim aliquip sint magna Lorem. Officia laboris nulla officia sint labore nisi. Do Lorem id in est esse adipisicing id fugiat enim esse laborum.','registered':'Wednesday, October 3, 2018 9:26 PM','latitude':'-88.790637','longitude':'138.817328','tags':['duis','ea','magna','ea','incididunt'],'greeting':'Hello, Alice! You have 8 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0eda01886247b6a4f3d','index':94,'guid':'17c9f4d3-7d72-44e3-8f7c-08d7de920f46','isActive':false,'balance':'$3,173.29','picture':'http://placehold.it/32x32','age':31,'eyeColor':'blue','name':{'first':'Schwartz','last':'Mccormick'},'company':'EVIDENDS','email':'schwartz.mccormick@evidends.tv','phone':'+1 (924) 531-2802','address':'160 Midwood Street, Indio, Palau, 4241','about':'Anim reprehenderit et et adipisicing voluptate consequat elit. Sint Lorem laboris Lorem minim nostrud aute reprehenderit elit aute quis nulla. Officia aute eiusmod mollit cillum eu aliquip non enim ea occaecat quis fugiat occaecat officia. Eiusmod culpa exercitation dolor aliqua enim occaecat nisi cupidatat duis ex dolore id. Id consequat aliqua cupidatat ut. Sit nisi est sunt culpa ullamco excepteur sunt pariatur incididunt amet. Ut tempor duis velit eu ut id culpa aute anim occaecat labore.','registered':'Thursday, March 2, 2017 5:57 PM','latitude':'38.618587','longitude':'-165.142529','tags':['ad','reprehenderit','magna','elit','mollit'],'greeting':'Hello, Schwartz! You have 10 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed51be4df456ec2bc9','index':95,'guid':'44f68f65-959b-4ec2-bd2a-1f30035f76fc','isActive':false,'balance':'$3,242.24','picture':'http://placehold.it/32x32','age':39,'eyeColor':'blue','name':{'first':'Bonita','last':'Stevens'},'company':'SLOFAST','email':'bonita.stevens@slofast.co.uk','phone':'+1 (886) 473-2105','address':'459 Bushwick Court, Kilbourne, Rhode Island, 9450','about':'Consequat reprehenderit qui reprehenderit nisi sit est in qui aliquip amet. Ex deserunt cupidatat amet cillum eiusmod irure anim in amet proident voluptate. Ad officia culpa in non incididunt do.','registered':'Saturday, August 22, 2015 5:23 AM','latitude':'60.013542','longitude':'58.242132','tags':['aute','adipisicing','in','cillum','officia'],'greeting':'Hello, Bonita! You have 5 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed50a55e3587993f68','index':96,'guid':'652e434f-221e-4899-af12-38dca5c9621d','isActive':false,'balance':'$2,720.06','picture':'http://placehold.it/32x32','age':28,'eyeColor':'green','name':{'first':'Charmaine','last':'Jackson'},'company':'FLUM','email':'charmaine.jackson@flum.io','phone':'+1 (947) 573-2692','address':'788 Windsor Place, Highland, Arkansas, 8869','about':'Dolore reprehenderit irure excepteur eu reprehenderit sint Lorem ut amet in. Consequat anim elit sunt aliquip incididunt. Culpa consequat do exercitation dolor enim dolor sunt sit excepteur ad anim. Dolor aute elit velit mollit minim eu.','registered':'Wednesday, April 6, 2016 7:54 PM','latitude':'25.756553','longitude':'-5.482531','tags':['amet','sint','consequat','est','ex'],'greeting':'Hello, Charmaine! You have 10 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed213621949bbdd5d3','index':97,'guid':'7d7d93d8-3e37-4b4a-9fa2-591fb7d153ce','isActive':true,'balance':'$1,370.63','picture':'http://placehold.it/32x32','age':36,'eyeColor':'brown','name':{'first':'Petersen','last':'Cooley'},'company':'ROTODYNE','email':'petersen.cooley@rotodyne.me','phone':'+1 (929) 563-3339','address':'338 Pioneer Street, Carbonville, Missouri, 3030','about':'Cillum elit dolore labore aute. Cillum ea incididunt cupidatat consequat sint eu mollit. Excepteur commodo eiusmod ex Lorem enim velit minim.','registered':'Friday, December 8, 2017 5:53 AM','latitude':'-10.576254','longitude':'-111.176861','tags':['veniam','eu','eiusmod','dolore','voluptate'],'greeting':'Hello, Petersen! You have 9 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed3e938138d58ed453','index':98,'guid':'d6fea4a3-03f6-46ee-90b9-8ec51a585e29','isActive':true,'balance':'$1,216.54','picture':'http://placehold.it/32x32','age':39,'eyeColor':'blue','name':{'first':'Rosanne','last':'Terry'},'company':'EXTREMO','email':'rosanne.terry@extremo.com','phone':'+1 (812) 496-2691','address':'368 Rockaway Avenue, Gloucester, Illinois, 7913','about':'Duis et nostrud duis quis minim eiusmod culpa do ea ad pariatur tempor. Velit veniam aliqua aliquip est enim ex et culpa dolor ullamco culpa officia. Eu id occaecat aute cillum aute sit aute laboris ipsum voluptate ex. Amet tempor minim tempor Lorem quis dolore. Pariatur consequat dolore nulla veniam dolor exercitation consequat nulla laboris incididunt do. Dolore do tempor deserunt exercitation incididunt officia incididunt ut do reprehenderit do eiusmod nulla.','registered':'Sunday, August 6, 2017 12:46 PM','latitude':'-43.257964','longitude':'-45.147686','tags':['et','incididunt','esse','commodo','ipsum'],'greeting':'Hello, Rosanne! You have 6 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed632b1a1d65501d6b','index':99,'guid':'bf8c6ac1-ee18-48ee-ae94-ea515a53c951','isActive':true,'balance':'$2,905.58','picture':'http://placehold.it/32x32','age':21,'eyeColor':'blue','name':{'first':'Irene','last':'Castro'},'company':'POLARIA','email':'irene.castro@polaria.org','phone':'+1 (818) 417-3761','address':'901 Dupont Street, Sperryville, Oklahoma, 953','about':'Pariatur minim laboris aliqua dolor aliquip consequat ea do duis voluptate id Lorem. In reprehenderit et adipisicing anim elit incididunt velit in laborum laborum. Qui minim magna et amet sit do voluptate reprehenderit ea sit sint velit.','registered':'Tuesday, August 18, 2015 10:48 AM','latitude':'-7.004055','longitude':'116.052433','tags':['sit','proident','enim','ullamco','non'],'greeting':'Hello, Irene! You have 10 unread messages.','favoriteFruit':'apple'}]" + + +Plan: 1 to add, 0 to change, 0 to destroy. diff --git a/pkg/backend/remote/testdata/plan-no-changes/main.tf b/pkg/backend/remote/testdata/plan-no-changes/main.tf new file mode 100644 index 00000000000..3911a2a9b2d --- /dev/null +++ b/pkg/backend/remote/testdata/plan-no-changes/main.tf @@ -0,0 +1 @@ +resource "null_resource" "foo" {} diff --git a/pkg/backend/remote/testdata/plan-no-changes/plan.log b/pkg/backend/remote/testdata/plan-no-changes/plan.log new file mode 100644 index 00000000000..70416815133 --- /dev/null +++ b/pkg/backend/remote/testdata/plan-no-changes/plan.log @@ -0,0 +1,17 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +null_resource.hello: Refreshing state... (ID: 8657651096157629581) + +------------------------------------------------------------------------ + +No changes. Infrastructure is up-to-date. + +This means that Terraform did not detect any differences between your +configuration and real physical resources that exist. As a result, no +actions need to be performed. diff --git a/pkg/backend/remote/testdata/plan-no-changes/policy.log b/pkg/backend/remote/testdata/plan-no-changes/policy.log new file mode 100644 index 00000000000..b0cb1e59859 --- /dev/null +++ b/pkg/backend/remote/testdata/plan-no-changes/policy.log @@ -0,0 +1,12 @@ +Sentinel Result: true + +This result means that Sentinel policies returned true and the protected +behavior is allowed by Sentinel policies. + +1 policies evaluated. + +## Policy 1: Passthrough.sentinel (soft-mandatory) + +Result: true + +TRUE - Passthrough.sentinel:1:1 - Rule "main" diff --git a/pkg/backend/remote/testdata/plan-policy-hard-failed/main.tf b/pkg/backend/remote/testdata/plan-policy-hard-failed/main.tf new file mode 100644 index 00000000000..3911a2a9b2d --- /dev/null +++ b/pkg/backend/remote/testdata/plan-policy-hard-failed/main.tf @@ -0,0 +1 @@ +resource "null_resource" "foo" {} diff --git a/pkg/backend/remote/testdata/plan-policy-hard-failed/plan.log b/pkg/backend/remote/testdata/plan-policy-hard-failed/plan.log new file mode 100644 index 00000000000..5849e57595e --- /dev/null +++ b/pkg/backend/remote/testdata/plan-policy-hard-failed/plan.log @@ -0,0 +1,21 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + + null_resource.foo + id: + + +Plan: 1 to add, 0 to change, 0 to destroy. diff --git a/pkg/backend/remote/testdata/plan-policy-hard-failed/policy.log b/pkg/backend/remote/testdata/plan-policy-hard-failed/policy.log new file mode 100644 index 00000000000..5d6e6935b93 --- /dev/null +++ b/pkg/backend/remote/testdata/plan-policy-hard-failed/policy.log @@ -0,0 +1,12 @@ +Sentinel Result: false + +Sentinel evaluated to false because one or more Sentinel policies evaluated +to false. This false was not due to an undefined value or runtime error. + +1 policies evaluated. + +## Policy 1: Passthrough.sentinel (hard-mandatory) + +Result: false + +FALSE - Passthrough.sentinel:1:1 - Rule "main" diff --git a/pkg/backend/remote/testdata/plan-policy-passed/main.tf b/pkg/backend/remote/testdata/plan-policy-passed/main.tf new file mode 100644 index 00000000000..3911a2a9b2d --- /dev/null +++ b/pkg/backend/remote/testdata/plan-policy-passed/main.tf @@ -0,0 +1 @@ +resource "null_resource" "foo" {} diff --git a/pkg/backend/remote/testdata/plan-policy-passed/plan.log b/pkg/backend/remote/testdata/plan-policy-passed/plan.log new file mode 100644 index 00000000000..5849e57595e --- /dev/null +++ b/pkg/backend/remote/testdata/plan-policy-passed/plan.log @@ -0,0 +1,21 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + + null_resource.foo + id: + + +Plan: 1 to add, 0 to change, 0 to destroy. diff --git a/pkg/backend/remote/testdata/plan-policy-passed/policy.log b/pkg/backend/remote/testdata/plan-policy-passed/policy.log new file mode 100644 index 00000000000..b0cb1e59859 --- /dev/null +++ b/pkg/backend/remote/testdata/plan-policy-passed/policy.log @@ -0,0 +1,12 @@ +Sentinel Result: true + +This result means that Sentinel policies returned true and the protected +behavior is allowed by Sentinel policies. + +1 policies evaluated. + +## Policy 1: Passthrough.sentinel (soft-mandatory) + +Result: true + +TRUE - Passthrough.sentinel:1:1 - Rule "main" diff --git a/pkg/backend/remote/testdata/plan-policy-soft-failed/main.tf b/pkg/backend/remote/testdata/plan-policy-soft-failed/main.tf new file mode 100644 index 00000000000..3911a2a9b2d --- /dev/null +++ b/pkg/backend/remote/testdata/plan-policy-soft-failed/main.tf @@ -0,0 +1 @@ +resource "null_resource" "foo" {} diff --git a/pkg/backend/remote/testdata/plan-policy-soft-failed/plan.log b/pkg/backend/remote/testdata/plan-policy-soft-failed/plan.log new file mode 100644 index 00000000000..5849e57595e --- /dev/null +++ b/pkg/backend/remote/testdata/plan-policy-soft-failed/plan.log @@ -0,0 +1,21 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + + null_resource.foo + id: + + +Plan: 1 to add, 0 to change, 0 to destroy. diff --git a/pkg/backend/remote/testdata/plan-policy-soft-failed/policy.log b/pkg/backend/remote/testdata/plan-policy-soft-failed/policy.log new file mode 100644 index 00000000000..3e4ebedf617 --- /dev/null +++ b/pkg/backend/remote/testdata/plan-policy-soft-failed/policy.log @@ -0,0 +1,12 @@ +Sentinel Result: false + +Sentinel evaluated to false because one or more Sentinel policies evaluated +to false. This false was not due to an undefined value or runtime error. + +1 policies evaluated. + +## Policy 1: Passthrough.sentinel (soft-mandatory) + +Result: false + +FALSE - Passthrough.sentinel:1:1 - Rule "main" diff --git a/pkg/backend/remote/testdata/plan-variables/main.tf b/pkg/backend/remote/testdata/plan-variables/main.tf new file mode 100644 index 00000000000..955e8b4c09a --- /dev/null +++ b/pkg/backend/remote/testdata/plan-variables/main.tf @@ -0,0 +1,4 @@ +variable "foo" {} +variable "bar" {} + +resource "null_resource" "foo" {} diff --git a/pkg/backend/remote/testdata/plan-variables/plan.log b/pkg/backend/remote/testdata/plan-variables/plan.log new file mode 100644 index 00000000000..5849e57595e --- /dev/null +++ b/pkg/backend/remote/testdata/plan-variables/plan.log @@ -0,0 +1,21 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + + null_resource.foo + id: + + +Plan: 1 to add, 0 to change, 0 to destroy. diff --git a/pkg/backend/remote/testdata/plan-with-error/main.tf b/pkg/backend/remote/testdata/plan-with-error/main.tf new file mode 100644 index 00000000000..bc45f28f563 --- /dev/null +++ b/pkg/backend/remote/testdata/plan-with-error/main.tf @@ -0,0 +1,5 @@ +resource "null_resource" "foo" { + triggers { + random = "${guid()}" + } +} diff --git a/pkg/backend/remote/testdata/plan-with-error/plan.log b/pkg/backend/remote/testdata/plan-with-error/plan.log new file mode 100644 index 00000000000..4344a372290 --- /dev/null +++ b/pkg/backend/remote/testdata/plan-with-error/plan.log @@ -0,0 +1,10 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... + +Error: null_resource.foo: 1 error(s) occurred: + +* null_resource.foo: 1:3: unknown function called: guid in: + +${guid()} diff --git a/pkg/backend/remote/testdata/plan-with-working-directory/tofu/main.tf b/pkg/backend/remote/testdata/plan-with-working-directory/tofu/main.tf new file mode 100644 index 00000000000..3911a2a9b2d --- /dev/null +++ b/pkg/backend/remote/testdata/plan-with-working-directory/tofu/main.tf @@ -0,0 +1 @@ +resource "null_resource" "foo" {} diff --git a/pkg/backend/remote/testdata/plan-with-working-directory/tofu/plan.log b/pkg/backend/remote/testdata/plan-with-working-directory/tofu/plan.log new file mode 100644 index 00000000000..5849e57595e --- /dev/null +++ b/pkg/backend/remote/testdata/plan-with-working-directory/tofu/plan.log @@ -0,0 +1,21 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + + null_resource.foo + id: + + +Plan: 1 to add, 0 to change, 0 to destroy. diff --git a/pkg/backend/remote/testdata/plan/main.tf b/pkg/backend/remote/testdata/plan/main.tf new file mode 100644 index 00000000000..3911a2a9b2d --- /dev/null +++ b/pkg/backend/remote/testdata/plan/main.tf @@ -0,0 +1 @@ +resource "null_resource" "foo" {} diff --git a/pkg/backend/remote/testdata/plan/plan.log b/pkg/backend/remote/testdata/plan/plan.log new file mode 100644 index 00000000000..5849e57595e --- /dev/null +++ b/pkg/backend/remote/testdata/plan/plan.log @@ -0,0 +1,21 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + + null_resource.foo + id: + + +Plan: 1 to add, 0 to change, 0 to destroy. diff --git a/pkg/backend/remote/testdata/variables/main.tf b/pkg/backend/remote/testdata/variables/main.tf new file mode 100644 index 00000000000..9e1a0a40ff6 --- /dev/null +++ b/pkg/backend/remote/testdata/variables/main.tf @@ -0,0 +1,8 @@ +variable "key1" { +} + +variable "key2" { +} + +variable "key3" { +} diff --git a/pkg/backend/remote/testing.go b/pkg/backend/remote/testing.go new file mode 100644 index 00000000000..74508f370b5 --- /dev/null +++ b/pkg/backend/remote/testing.go @@ -0,0 +1,330 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package remote + +import ( + "context" + "fmt" + "io" + "net/http" + "net/http/httptest" + "path" + "testing" + "time" + + tfe "github.com/hashicorp/go-tfe" + svchost "github.com/hashicorp/terraform-svchost" + "github.com/hashicorp/terraform-svchost/auth" + "github.com/hashicorp/terraform-svchost/disco" + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/cloud" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/httpclient" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states/remote" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" + "github.com/kubegems/opentofu/version" + "github.com/mitchellh/cli" + "github.com/zclconf/go-cty/cty" + + backendLocal "github.com/kubegems/opentofu/pkg/backend/local" +) + +const ( + testCred = "test-auth-token" +) + +var ( + mockedBackendHost = "app.example.com" + credsSrc = auth.StaticCredentialsSource(map[svchost.Hostname]map[string]interface{}{ + svchost.Hostname(mockedBackendHost): {"token": testCred}, + }) +) + +// mockInput is a mock implementation of tofu.UIInput. +type mockInput struct { + answers map[string]string +} + +func (m *mockInput) Input(ctx context.Context, opts *tofu.InputOpts) (string, error) { + v, ok := m.answers[opts.Id] + if !ok { + return "", fmt.Errorf("unexpected input request in test: %s", opts.Id) + } + if v == "wait-for-external-update" { + select { + case <-ctx.Done(): + case <-time.After(time.Minute): + } + } + delete(m.answers, opts.Id) + return v, nil +} + +func testInput(t *testing.T, answers map[string]string) *mockInput { + return &mockInput{answers: answers} +} + +func testBackendDefault(t *testing.T) (*Remote, func()) { + obj := cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.StringVal(mockedBackendHost), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "prefix": cty.NullVal(cty.String), + }), + }) + return testBackend(t, obj) +} + +func testBackendNoDefault(t *testing.T) (*Remote, func()) { + obj := cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.StringVal(mockedBackendHost), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + "prefix": cty.StringVal("my-app-"), + }), + }) + return testBackend(t, obj) +} + +func testBackendNoOperations(t *testing.T) (*Remote, func()) { + obj := cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.StringVal(mockedBackendHost), + "organization": cty.StringVal("no-operations"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "prefix": cty.NullVal(cty.String), + }), + }) + return testBackend(t, obj) +} + +func testRemoteClient(t *testing.T) remote.Client { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + raw, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("error: %v", err) + } + + return raw.(*remote.State).Client +} + +func testBackend(t *testing.T, obj cty.Value) (*Remote, func()) { + s := testServer(t) + b := New(testDisco(s), encryption.StateEncryptionDisabled()) + + // Configure the backend so the client is created. + newObj, valDiags := b.PrepareConfig(obj) + if len(valDiags) != 0 { + t.Fatal(valDiags.ErrWithWarnings()) + } + obj = newObj + + confDiags := b.Configure(obj) + if len(confDiags) != 0 { + t.Fatal(confDiags.ErrWithWarnings()) + } + + // Get a new mock client. + mc := cloud.NewMockClient() + + // Replace the services we use with our mock services. + b.CLI = cli.NewMockUi() + b.client.Applies = mc.Applies + b.client.ConfigurationVersions = mc.ConfigurationVersions + b.client.CostEstimates = mc.CostEstimates + b.client.Organizations = mc.Organizations + b.client.Plans = mc.Plans + b.client.PolicyChecks = mc.PolicyChecks + b.client.Runs = mc.Runs + b.client.RunEvents = mc.RunEvents + b.client.StateVersions = mc.StateVersions + b.client.Variables = mc.Variables + b.client.Workspaces = mc.Workspaces + + // Set local to a local test backend. + b.local = testLocalBackend(t, b) + + ctx := context.Background() + + // Create the organization. + _, err := b.client.Organizations.Create(ctx, tfe.OrganizationCreateOptions{ + Name: tfe.String(b.organization), + }) + if err != nil { + t.Fatalf("error: %v", err) + } + + // Create the default workspace if required. + if b.workspace != "" { + _, err = b.client.Workspaces.Create(ctx, b.organization, tfe.WorkspaceCreateOptions{ + Name: tfe.String(b.workspace), + }) + if err != nil { + t.Fatalf("error: %v", err) + } + } + + return b, s.Close +} + +func testLocalBackend(t *testing.T, remote *Remote) backend.Enhanced { + b := backendLocal.NewWithBackend(remote, nil) + + // Add a test provider to the local backend. + p := backendLocal.TestLocalProvider(t, b, "null", providers.ProviderSchema{ + ResourceTypes: map[string]providers.Schema{ + "null_resource": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + }, + }, + }) + p.ApplyResourceChangeResponse = &providers.ApplyResourceChangeResponse{NewState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yes"), + })} + + return b +} + +// testServer returns a *httptest.Server used for local testing. +func testServer(t *testing.T) *httptest.Server { + mux := http.NewServeMux() + + // Respond to service discovery calls. + mux.HandleFunc("/well-known/terraform.json", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + io.WriteString(w, `{ + "state.v2": "/api/v2/", + "tfe.v2.1": "/api/v2/", + "versions.v1": "/v1/versions/" +}`) + }) + + // Respond to service version constraints calls. + mux.HandleFunc("/v1/versions/", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + io.WriteString(w, fmt.Sprintf(`{ + "service": "%s", + "product": "terraform", + "minimum": "0.1.0", + "maximum": "10.0.0" +}`, path.Base(r.URL.Path))) + }) + + // Respond to pings to get the API version header. + mux.HandleFunc("/api/v2/ping", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Header().Set("TFP-API-Version", "2.4") + }) + + // Respond to the initial query to read the hashicorp org entitlements. + mux.HandleFunc("/api/v2/organizations/hashicorp/entitlement-set", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.api+json") + io.WriteString(w, `{ + "data": { + "id": "org-GExadygjSbKP8hsY", + "type": "entitlement-sets", + "attributes": { + "operations": true, + "private-module-registry": true, + "sentinel": true, + "state-storage": true, + "teams": true, + "vcs-integrations": true + } + } +}`) + }) + + // Respond to the initial query to read the no-operations org entitlements. + mux.HandleFunc("/api/v2/organizations/no-operations/entitlement-set", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.api+json") + io.WriteString(w, `{ + "data": { + "id": "org-ufxa3y8jSbKP8hsT", + "type": "entitlement-sets", + "attributes": { + "operations": false, + "private-module-registry": true, + "sentinel": true, + "state-storage": true, + "teams": true, + "vcs-integrations": true + } + } +}`) + }) + + // All tests that are assumed to pass will use the hashicorp organization, + // so for all other organization requests we will return a 404. + mux.HandleFunc("/api/v2/organizations/", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(404) + io.WriteString(w, `{ + "errors": [ + { + "status": "404", + "title": "not found" + } + ] +}`) + }) + + return httptest.NewServer(mux) +} + +// testDisco returns a *disco.Disco mapping to mockedBackendHost and +// localhost to a local test server. +func testDisco(s *httptest.Server) *disco.Disco { + services := map[string]interface{}{ + "state.v2": fmt.Sprintf("%s/api/v2/", s.URL), + "tfe.v2.1": fmt.Sprintf("%s/api/v2/", s.URL), + "versions.v1": fmt.Sprintf("%s/v1/versions/", s.URL), + } + d := disco.NewWithCredentialsSource(credsSrc) + d.SetUserAgent(httpclient.OpenTofuUserAgent(version.String())) + + d.ForceHostServices(svchost.Hostname(mockedBackendHost), services) + d.ForceHostServices(svchost.Hostname("localhost"), services) + return d +} + +type unparsedVariableValue struct { + value string + source tofu.ValueSourceType +} + +func (v *unparsedVariableValue) ParseVariableValue(mode configs.VariableParsingMode) (*tofu.InputValue, tfdiags.Diagnostics) { + return &tofu.InputValue{ + Value: cty.StringVal(v.value), + SourceType: v.source, + }, tfdiags.Diagnostics{} +} + +// testVariable returns a backend.UnparsedVariableValue used for testing. +func testVariables(s tofu.ValueSourceType, vs ...string) map[string]backend.UnparsedVariableValue { + vars := make(map[string]backend.UnparsedVariableValue, len(vs)) + for _, v := range vs { + vars[v] = &unparsedVariableValue{ + value: v, + source: s, + } + } + return vars +} diff --git a/pkg/backend/testing.go b/pkg/backend/testing.go new file mode 100644 index 00000000000..40ec5182eba --- /dev/null +++ b/pkg/backend/testing.go @@ -0,0 +1,430 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package backend + +import ( + "reflect" + "sort" + "testing" + + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hcldec" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/hcl2shim" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// TestBackendConfig validates and configures the backend with the +// given configuration. +func TestBackendConfig(t *testing.T, b Backend, c hcl.Body) Backend { + t.Helper() + + t.Logf("TestBackendConfig on %T with %#v", b, c) + + var diags tfdiags.Diagnostics + + // To make things easier for test authors, we'll allow a nil body here + // (even though that's not normally valid) and just treat it as an empty + // body. + if c == nil { + c = hcl.EmptyBody() + } + + schema := b.ConfigSchema() + spec := schema.DecoderSpec() + obj, decDiags := hcldec.Decode(c, spec, nil) + diags = diags.Append(decDiags) + + newObj, valDiags := b.PrepareConfig(obj) + diags = diags.Append(valDiags.InConfigBody(c, "")) + + // it's valid for a Backend to have warnings (e.g. a Deprecation) as such we should only raise on errors + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } + + obj = newObj + + confDiags := b.Configure(obj) + if len(confDiags) != 0 { + confDiags = confDiags.InConfigBody(c, "") + t.Fatal(confDiags.ErrWithWarnings()) + } + + return b +} + +// TestWrapConfig takes a raw data structure and converts it into a +// synthetic hcl.Body to use for testing. +// +// The given structure should only include values that can be accepted by +// hcl2shim.HCL2ValueFromConfigValue. If incompatible values are given, +// this function will panic. +func TestWrapConfig(raw map[string]interface{}) hcl.Body { + obj := hcl2shim.HCL2ValueFromConfigValue(raw) + return configs.SynthBody("", obj.AsValueMap()) +} + +// TestBackend will test the functionality of a Backend. The backend is +// assumed to already be configured. This will test state functionality. +// If the backend reports it doesn't support multi-state by returning the +// error ErrWorkspacesNotSupported, then it will not test that. +func TestBackendStates(t *testing.T, b Backend) { + t.Helper() + + noDefault := false + if _, err := b.StateMgr(DefaultStateName); err != nil { + if err == ErrDefaultWorkspaceNotSupported { + noDefault = true + } else { + t.Fatalf("error: %v", err) + } + } + + workspaces, err := b.Workspaces() + if err != nil { + if err == ErrWorkspacesNotSupported { + t.Logf("TestBackend: workspaces not supported in %T, skipping", b) + return + } + t.Fatalf("error: %v", err) + } + + // Test it starts with only the default + if !noDefault && (len(workspaces) != 1 || workspaces[0] != DefaultStateName) { + t.Fatalf("should only have the default workspace to start: %#v", workspaces) + } + + // Create a couple states + foo, err := b.StateMgr("foo") + if err != nil { + t.Fatalf("error: %s", err) + } + if err := foo.RefreshState(); err != nil { + t.Fatalf("bad: %s", err) + } + if v := foo.State(); v.HasManagedResourceInstanceObjects() { + t.Fatalf("should be empty: %s", v) + } + + bar, err := b.StateMgr("bar") + if err != nil { + t.Fatalf("error: %s", err) + } + if err := bar.RefreshState(); err != nil { + t.Fatalf("bad: %s", err) + } + if v := bar.State(); v.HasManagedResourceInstanceObjects() { + t.Fatalf("should be empty: %s", v) + } + + // Verify they are distinct states that can be read back from storage + { + // We'll use two distinct states here and verify that changing one + // does not also change the other. + fooState := states.NewState() + barState := states.NewState() + + // write a known state to foo + if err := foo.WriteState(fooState); err != nil { + t.Fatal("error writing foo state:", err) + } + if err := foo.PersistState(nil); err != nil { + t.Fatal("error persisting foo state:", err) + } + + // We'll make "bar" different by adding a fake resource state to it. + barState.SyncWrapper().SetResourceInstanceCurrent( + addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "foo", + }, + }.Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte("{}"), + Status: states.ObjectReady, + SchemaVersion: 0, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + + // write a distinct known state to bar + if err := bar.WriteState(barState); err != nil { + t.Fatalf("bad: %s", err) + } + if err := bar.PersistState(nil); err != nil { + t.Fatalf("bad: %s", err) + } + + // verify that foo is unchanged with the existing state manager + if err := foo.RefreshState(); err != nil { + t.Fatal("error refreshing foo:", err) + } + fooState = foo.State() + if fooState.HasManagedResourceInstanceObjects() { + t.Fatal("after writing a resource to bar, foo now has resources too") + } + + // fetch foo again from the backend + foo, err = b.StateMgr("foo") + if err != nil { + t.Fatal("error re-fetching state:", err) + } + if err := foo.RefreshState(); err != nil { + t.Fatal("error refreshing foo:", err) + } + fooState = foo.State() + if fooState.HasManagedResourceInstanceObjects() { + t.Fatal("after writing a resource to bar and re-reading foo, foo now has resources too") + } + + // fetch the bar again from the backend + bar, err = b.StateMgr("bar") + if err != nil { + t.Fatal("error re-fetching state:", err) + } + if err := bar.RefreshState(); err != nil { + t.Fatal("error refreshing bar:", err) + } + barState = bar.State() + if !barState.HasManagedResourceInstanceObjects() { + t.Fatal("after writing a resource instance object to bar and re-reading it, the object has vanished") + } + } + + // Verify we can now list them + { + // we determined that named stated are supported earlier + workspaces, err := b.Workspaces() + if err != nil { + t.Fatalf("err: %s", err) + } + + sort.Strings(workspaces) + expected := []string{"bar", "default", "foo"} + if noDefault { + expected = []string{"bar", "foo"} + } + if !reflect.DeepEqual(workspaces, expected) { + t.Fatalf("wrong workspaces list\ngot: %#v\nwant: %#v", workspaces, expected) + } + } + + // Delete some workspaces + if err := b.DeleteWorkspace("foo", true); err != nil { + t.Fatalf("err: %s", err) + } + + // Verify the default state can't be deleted + if err := b.DeleteWorkspace(DefaultStateName, true); err == nil { + t.Fatal("expected error") + } + + // Create and delete the foo workspace again. + // Make sure that there are no leftover artifacts from a deleted state + // preventing re-creation. + foo, err = b.StateMgr("foo") + if err != nil { + t.Fatalf("error: %s", err) + } + if err := foo.RefreshState(); err != nil { + t.Fatalf("bad: %s", err) + } + if v := foo.State(); v.HasManagedResourceInstanceObjects() { + t.Fatalf("should be empty: %s", v) + } + // and delete it again + if err := b.DeleteWorkspace("foo", true); err != nil { + t.Fatalf("err: %s", err) + } + + // Verify deletion + { + workspaces, err := b.Workspaces() + if err != nil { + t.Fatalf("err: %s", err) + } + + sort.Strings(workspaces) + expected := []string{"bar", "default"} + if noDefault { + expected = []string{"bar"} + } + if !reflect.DeepEqual(workspaces, expected) { + t.Fatalf("wrong workspaces list\ngot: %#v\nwant: %#v", workspaces, expected) + } + } +} + +// TestBackendStateLocks will test the locking functionality of the remote +// state backend. +func TestBackendStateLocks(t *testing.T, b1, b2 Backend) { + t.Helper() + testLocks(t, b1, b2, false) +} + +// TestBackendStateForceUnlock verifies that the lock error is the expected +// type, and the lock can be unlocked using the ID reported in the error. +// Remote state backends that support -force-unlock should call this in at +// least one of the acceptance tests. +func TestBackendStateForceUnlock(t *testing.T, b1, b2 Backend) { + t.Helper() + testLocks(t, b1, b2, true) +} + +// TestBackendStateLocksInWS will test the locking functionality of the remote +// state backend. +func TestBackendStateLocksInWS(t *testing.T, b1, b2 Backend, ws string) { + t.Helper() + testLocksInWorkspace(t, b1, b2, false, ws) +} + +// TestBackendStateForceUnlockInWS verifies that the lock error is the expected +// type, and the lock can be unlocked using the ID reported in the error. +// Remote state backends that support -force-unlock should call this in at +// least one of the acceptance tests. +func TestBackendStateForceUnlockInWS(t *testing.T, b1, b2 Backend, ws string) { + t.Helper() + testLocksInWorkspace(t, b1, b2, true, ws) +} + +func testLocks(t *testing.T, b1, b2 Backend, testForceUnlock bool) { + testLocksInWorkspace(t, b1, b2, testForceUnlock, DefaultStateName) +} + +func testLocksInWorkspace(t *testing.T, b1, b2 Backend, testForceUnlock bool, workspace string) { + t.Helper() + + // Get the default state for each + b1StateMgr, err := b1.StateMgr(DefaultStateName) + if err != nil { + t.Fatalf("error: %s", err) + } + if err := b1StateMgr.RefreshState(); err != nil { + t.Fatalf("bad: %s", err) + } + + // Fast exit if this doesn't support locking at all + if _, ok := b1StateMgr.(statemgr.Locker); !ok { + t.Logf("TestBackend: backend %T doesn't support state locking, not testing", b1) + return + } + + t.Logf("TestBackend: testing state locking for %T", b1) + + b2StateMgr, err := b2.StateMgr(DefaultStateName) + if err != nil { + t.Fatalf("error: %s", err) + } + if err := b2StateMgr.RefreshState(); err != nil { + t.Fatalf("bad: %s", err) + } + + // Reassign so its obvious whats happening + lockerA := b1StateMgr.(statemgr.Locker) + lockerB := b2StateMgr.(statemgr.Locker) + + infoA := statemgr.NewLockInfo() + infoA.Operation = "test" + infoA.Who = "clientA" + + infoB := statemgr.NewLockInfo() + infoB.Operation = "test" + infoB.Who = "clientB" + + lockIDA, err := lockerA.Lock(infoA) + if err != nil { + t.Fatal("unable to get initial lock:", err) + } + + // Make sure we can still get the statemgr.Full from another instance even + // when locked. This should only happen when a state is loaded via the + // backend, and as a remote state. + _, err = b2.StateMgr(DefaultStateName) + if err != nil { + t.Errorf("failed to read locked state from another backend instance: %s", err) + } + + // If the lock ID is blank, assume locking is disabled + if lockIDA == "" { + t.Logf("TestBackend: %T: empty string returned for lock, assuming disabled", b1) + return + } + + _, err = lockerB.Lock(infoB) + if err == nil { + lockerA.Unlock(lockIDA) + t.Fatal("client B obtained lock while held by client A") + } + + if err := lockerA.Unlock(lockIDA); err != nil { + t.Fatal("error unlocking client A", err) + } + + lockIDB, err := lockerB.Lock(infoB) + if err != nil { + t.Fatal("unable to obtain lock from client B") + } + + if lockIDB == lockIDA { + t.Errorf("duplicate lock IDs: %q", lockIDB) + } + + if err = lockerB.Unlock(lockIDB); err != nil { + t.Fatal("error unlocking client B:", err) + } + + // test the equivalent of -force-unlock, by using the id from the error + // output. + if !testForceUnlock { + return + } + + // get a new ID + infoA.ID, err = uuid.GenerateUUID() + if err != nil { + panic(err) + } + + lockIDA, err = lockerA.Lock(infoA) + if err != nil { + t.Fatal("unable to get re lock A:", err) + } + unlock := func() { + err := lockerA.Unlock(lockIDA) + if err != nil { + t.Fatal(err) + } + } + + _, err = lockerB.Lock(infoB) + if err == nil { + unlock() + t.Fatal("client B obtained lock while held by client A") + } + + infoErr, ok := err.(*statemgr.LockError) + if !ok { + unlock() + t.Fatalf("expected type *statemgr.LockError, got : %#v", err) + } + + // try to unlock with the second unlocker, using the ID from the error + if err := lockerB.Unlock(infoErr.Info.ID); err != nil { + unlock() + t.Fatalf("could not unlock with the reported ID %q: %s", infoErr.Info.ID, err) + } +} diff --git a/pkg/backend/unparsed_value.go b/pkg/backend/unparsed_value.go new file mode 100644 index 00000000000..9fa00448bf6 --- /dev/null +++ b/pkg/backend/unparsed_value.go @@ -0,0 +1,217 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package backend + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" + "github.com/zclconf/go-cty/cty" +) + +// UnparsedVariableValue represents a variable value provided by the caller +// whose parsing must be deferred until configuration is available. +// +// This exists to allow processing of variable-setting arguments (e.g. in the +// command package) to be separated from parsing (in the backend package). +type UnparsedVariableValue interface { + // ParseVariableValue information in the provided variable configuration + // to parse (if necessary) and return the variable value encapsulated in + // the receiver. + // + // If error diagnostics are returned, the resulting value may be invalid + // or incomplete. + ParseVariableValue(mode configs.VariableParsingMode) (*tofu.InputValue, tfdiags.Diagnostics) +} + +// ParseUndeclaredVariableValues processes a map of unparsed variable values +// and returns an input values map of the ones not declared in the specified +// declaration map along with detailed diagnostics about values of undeclared +// variables being present, depending on the source of these values. If more +// than two undeclared values are present in file form (config, auto, -var-file) +// the remaining errors are summarized to avoid a massive list of errors. +func ParseUndeclaredVariableValues(vv map[string]UnparsedVariableValue, decls map[string]*configs.Variable) (tofu.InputValues, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + ret := make(tofu.InputValues, len(vv)) + seenUndeclaredInFile := 0 + + for name, rv := range vv { + if _, declared := decls[name]; declared { + // Only interested in parsing undeclared variables + continue + } + + val, valDiags := rv.ParseVariableValue(configs.VariableParseLiteral) + if valDiags.HasErrors() { + continue + } + + ret[name] = val + + switch val.SourceType { + case tofu.ValueFromConfig, tofu.ValueFromAutoFile, tofu.ValueFromNamedFile: + // We allow undeclared names for variable values from files and warn in case + // users have forgotten a variable {} declaration or have a typo in their var name. + // Some users will actively ignore this warning because they use a .tfvars file + // across multiple configurations. + if seenUndeclaredInFile < 2 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + "Value for undeclared variable", + fmt.Sprintf("The root module does not declare a variable named %q but a value was found in file %q. If you meant to use this value, add a \"variable\" block to the configuration.\n\nTo silence these warnings, use TF_VAR_... environment variables to provide certain \"global\" settings to all configurations in your organization. To reduce the verbosity of these warnings, use the -compact-warnings option.", name, val.SourceRange.Filename), + )) + } + seenUndeclaredInFile++ + + case tofu.ValueFromEnvVar: + // We allow and ignore undeclared names for environment + // variables, because users will often set these globally + // when they are used across many (but not necessarily all) + // configurations. + case tofu.ValueFromCLIArg: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Value for undeclared variable", + fmt.Sprintf("A variable named %q was assigned on the command line, but the root module does not declare a variable of that name. To use this value, add a \"variable\" block to the configuration.", name), + )) + default: + // For all other source types we are more vague, but other situations + // don't generally crop up at this layer in practice. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Value for undeclared variable", + fmt.Sprintf("A variable named %q was assigned a value, but the root module does not declare a variable of that name. To use this value, add a \"variable\" block to the configuration.", name), + )) + } + } + + if seenUndeclaredInFile > 2 { + extras := seenUndeclaredInFile - 2 + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "Values for undeclared variables", + Detail: fmt.Sprintf("In addition to the other similar warnings shown, %d other variable(s) defined without being declared.", extras), + }) + } + + return ret, diags +} + +// ParseDeclaredVariableValues processes a map of unparsed variable values +// and returns an input values map of the ones declared in the specified +// variable declaration mapping. Diagnostics will be populating with +// any variable parsing errors encountered within this collection. +func ParseDeclaredVariableValues(vv map[string]UnparsedVariableValue, decls map[string]*configs.Variable) (tofu.InputValues, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + ret := make(tofu.InputValues, len(vv)) + + for name, rv := range vv { + var mode configs.VariableParsingMode + config, declared := decls[name] + + if declared { + mode = config.ParsingMode + } else { + // Only interested in parsing declared variables + continue + } + + val, valDiags := rv.ParseVariableValue(mode) + diags = diags.Append(valDiags) + if valDiags.HasErrors() { + continue + } + + ret[name] = val + } + + return ret, diags +} + +// Checks all given tofu.InputValues variable maps for the existance of +// a named variable +func isDefinedAny(name string, maps ...tofu.InputValues) bool { + for _, m := range maps { + if _, defined := m[name]; defined { + return true + } + } + return false +} + +// ParseVariableValues processes a map of unparsed variable values by +// correlating each one with the given variable declarations which should +// be from a root module. +// +// The map of unparsed variable values should include variables from all +// possible root module declarations sources such that it is as complete as +// it can possibly be for the current operation. If any declared variables +// are not included in the map, ParseVariableValues will either substitute +// a configured default value or produce an error. +// +// If this function returns without any errors in the diagnostics, the +// resulting input values map is guaranteed to be valid and ready to pass +// to tofu.NewContext. If the diagnostics contains errors, the returned +// InputValues may be incomplete but will include the subset of variables +// that were successfully processed, allowing for careful analysis of the +// partial result. +func ParseVariableValues(vv map[string]UnparsedVariableValue, decls map[string]*configs.Variable) (tofu.InputValues, tfdiags.Diagnostics) { + ret, diags := ParseDeclaredVariableValues(vv, decls) + undeclared, diagsUndeclared := ParseUndeclaredVariableValues(vv, decls) + + diags = diags.Append(diagsUndeclared) + + // By this point we should've gathered all the required root module + // variables from one of the many possible sources. We'll now populate + // any we haven't gathered as unset placeholders which OpenTofu Core + // can then react to. + for name, vc := range decls { + if isDefinedAny(name, ret, undeclared) { + continue + } + + // This check is redundant with a check made in OpenTofu Core when + // processing undeclared variables, but allows us to generate a more + // specific error message which mentions -var and -var-file command + // line options, whereas the one in OpenTofu Core is more general + // due to supporting both root and child module variables. + if vc.Required() { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "No value for required variable", + Detail: fmt.Sprintf("The root module input variable %q is not set, and has no default value. Use a -var or -var-file command line argument to provide a value for this variable.", name), + Subject: vc.DeclRange.Ptr(), + }) + + // We'll include a placeholder value anyway, just so that our + // result is complete for any calling code that wants to cautiously + // analyze it for diagnostic purposes. Since our diagnostics now + // includes an error, normal processing will ignore this result. + ret[name] = &tofu.InputValue{ + Value: cty.DynamicVal, + SourceType: tofu.ValueFromConfig, + SourceRange: tfdiags.SourceRangeFromHCL(vc.DeclRange), + } + } else { + // We're still required to put an entry for this variable + // in the mapping to be explicit to OpenTofu Core that we + // visited it, but its value will be cty.NilVal to represent + // that it wasn't set at all at this layer, and so OpenTofu Core + // should substitute a default if available, or generate an error + // if not. + ret[name] = &tofu.InputValue{ + Value: cty.NilVal, + SourceType: tofu.ValueFromConfig, + SourceRange: tfdiags.SourceRangeFromHCL(vc.DeclRange), + } + } + } + + return ret, diags +} diff --git a/pkg/backend/unparsed_value_test.go b/pkg/backend/unparsed_value_test.go new file mode 100644 index 00000000000..5fc42a6ff60 --- /dev/null +++ b/pkg/backend/unparsed_value_test.go @@ -0,0 +1,239 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package backend + +import ( + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" +) + +func TestUnparsedValue(t *testing.T) { + vv := map[string]UnparsedVariableValue{ + "undeclared0": testUnparsedVariableValue("0"), + "undeclared1": testUnparsedVariableValue("1"), + "undeclared2": testUnparsedVariableValue("2"), + "undeclared3": testUnparsedVariableValue("3"), + "undeclared4": testUnparsedVariableValue("4"), + "declared1": testUnparsedVariableValue("5"), + } + decls := map[string]*configs.Variable{ + "declared1": { + Name: "declared1", + Type: cty.String, + ConstraintType: cty.String, + ParsingMode: configs.VariableParseLiteral, + DeclRange: hcl.Range{ + Filename: "fake.tf", + Start: hcl.Pos{Line: 2, Column: 1, Byte: 0}, + End: hcl.Pos{Line: 2, Column: 1, Byte: 0}, + }, + }, + "missing1": { + Name: "missing1", + Type: cty.String, + ConstraintType: cty.String, + ParsingMode: configs.VariableParseLiteral, + DeclRange: hcl.Range{ + Filename: "fake.tf", + Start: hcl.Pos{Line: 3, Column: 1, Byte: 0}, + End: hcl.Pos{Line: 3, Column: 1, Byte: 0}, + }, + }, + "missing2": { + Name: "missing1", + Type: cty.String, + ConstraintType: cty.String, + ParsingMode: configs.VariableParseLiteral, + Default: cty.StringVal("default for missing2"), + DeclRange: hcl.Range{ + Filename: "fake.tf", + Start: hcl.Pos{Line: 4, Column: 1, Byte: 0}, + End: hcl.Pos{Line: 4, Column: 1, Byte: 0}, + }, + }, + } + + const undeclSingular = `Value for undeclared variable` + const undeclPlural = `Values for undeclared variables` + + t.Run("ParseDeclaredVariableValues", func(t *testing.T) { + gotVals, diags := ParseDeclaredVariableValues(vv, decls) + + if got, want := len(diags), 0; got != want { + t.Fatalf("wrong number of diagnostics %d; want %d", got, want) + } + + wantVals := tofu.InputValues{ + "declared1": { + Value: cty.StringVal("5"), + SourceType: tofu.ValueFromNamedFile, + SourceRange: tfdiags.SourceRange{ + Filename: "fake.tfvars", + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + }, + }, + } + + if diff := cmp.Diff(wantVals, gotVals, cmp.Comparer(cty.Value.RawEquals)); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + + t.Run("ParseUndeclaredVariableValues", func(t *testing.T) { + gotVals, diags := ParseUndeclaredVariableValues(vv, decls) + + if got, want := len(diags), 3; got != want { + t.Fatalf("wrong number of diagnostics %d; want %d", got, want) + } + + if got, want := diags[0].Description().Summary, undeclSingular; got != want { + t.Errorf("wrong summary for diagnostic 0\ngot: %s\nwant: %s", got, want) + } + + if got, want := diags[1].Description().Summary, undeclSingular; got != want { + t.Errorf("wrong summary for diagnostic 1\ngot: %s\nwant: %s", got, want) + } + + if got, want := diags[2].Description().Summary, undeclPlural; got != want { + t.Errorf("wrong summary for diagnostic 2\ngot: %s\nwant: %s", got, want) + } + + wantVals := tofu.InputValues{ + "undeclared0": { + Value: cty.StringVal("0"), + SourceType: tofu.ValueFromNamedFile, + SourceRange: tfdiags.SourceRange{ + Filename: "fake.tfvars", + Start: tfdiags.SourcePos{Line: 1, Column: 1}, + End: tfdiags.SourcePos{Line: 1, Column: 1}, + }, + }, + "undeclared1": { + Value: cty.StringVal("1"), + SourceType: tofu.ValueFromNamedFile, + SourceRange: tfdiags.SourceRange{ + Filename: "fake.tfvars", + Start: tfdiags.SourcePos{Line: 1, Column: 1}, + End: tfdiags.SourcePos{Line: 1, Column: 1}, + }, + }, + "undeclared2": { + Value: cty.StringVal("2"), + SourceType: tofu.ValueFromNamedFile, + SourceRange: tfdiags.SourceRange{ + Filename: "fake.tfvars", + Start: tfdiags.SourcePos{Line: 1, Column: 1}, + End: tfdiags.SourcePos{Line: 1, Column: 1}, + }, + }, + "undeclared3": { + Value: cty.StringVal("3"), + SourceType: tofu.ValueFromNamedFile, + SourceRange: tfdiags.SourceRange{ + Filename: "fake.tfvars", + Start: tfdiags.SourcePos{Line: 1, Column: 1}, + End: tfdiags.SourcePos{Line: 1, Column: 1}, + }, + }, + "undeclared4": { + Value: cty.StringVal("4"), + SourceType: tofu.ValueFromNamedFile, + SourceRange: tfdiags.SourceRange{ + Filename: "fake.tfvars", + Start: tfdiags.SourcePos{Line: 1, Column: 1}, + End: tfdiags.SourcePos{Line: 1, Column: 1}, + }, + }, + } + if diff := cmp.Diff(wantVals, gotVals, cmp.Comparer(cty.Value.RawEquals)); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + + t.Run("ParseVariableValues", func(t *testing.T) { + gotVals, diags := ParseVariableValues(vv, decls) + for _, diag := range diags { + t.Logf("%s: %s", diag.Description().Summary, diag.Description().Detail) + } + if got, want := len(diags), 4; got != want { + t.Fatalf("wrong number of diagnostics %d; want %d", got, want) + } + + const missingRequired = `No value for required variable` + + if got, want := diags[0].Description().Summary, undeclSingular; got != want { + t.Errorf("wrong summary for diagnostic 0\ngot: %s\nwant: %s", got, want) + } + if got, want := diags[1].Description().Summary, undeclSingular; got != want { + t.Errorf("wrong summary for diagnostic 1\ngot: %s\nwant: %s", got, want) + } + if got, want := diags[2].Description().Summary, undeclPlural; got != want { + t.Errorf("wrong summary for diagnostic 2\ngot: %s\nwant: %s", got, want) + } + if got, want := diags[2].Description().Detail, "3 other variable(s)"; !strings.Contains(got, want) { + t.Errorf("wrong detail for diagnostic 2\ngot: %s\nmust contain: %s", got, want) + } + if got, want := diags[3].Description().Summary, missingRequired; got != want { + t.Errorf("wrong summary for diagnostic 3\ngot: %s\nwant: %s", got, want) + } + + wantVals := tofu.InputValues{ + "declared1": { + Value: cty.StringVal("5"), + SourceType: tofu.ValueFromNamedFile, + SourceRange: tfdiags.SourceRange{ + Filename: "fake.tfvars", + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + }, + }, + "missing1": { + Value: cty.DynamicVal, + SourceType: tofu.ValueFromConfig, + SourceRange: tfdiags.SourceRange{ + Filename: "fake.tf", + Start: tfdiags.SourcePos{Line: 3, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 3, Column: 1, Byte: 0}, + }, + }, + "missing2": { + Value: cty.NilVal, // OpenTofu Core handles substituting the default + SourceType: tofu.ValueFromConfig, + SourceRange: tfdiags.SourceRange{ + Filename: "fake.tf", + Start: tfdiags.SourcePos{Line: 4, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 4, Column: 1, Byte: 0}, + }, + }, + } + if diff := cmp.Diff(wantVals, gotVals, cmp.Comparer(cty.Value.RawEquals)); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) +} + +type testUnparsedVariableValue string + +func (v testUnparsedVariableValue) ParseVariableValue(mode configs.VariableParsingMode) (*tofu.InputValue, tfdiags.Diagnostics) { + return &tofu.InputValue{ + Value: cty.StringVal(string(v)), + SourceType: tofu.ValueFromNamedFile, + SourceRange: tfdiags.SourceRange{ + Filename: "fake.tfvars", + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + }, + }, nil +} diff --git a/pkg/builtin/providers/README b/pkg/builtin/providers/README new file mode 100644 index 00000000000..00ffa71458c --- /dev/null +++ b/pkg/builtin/providers/README @@ -0,0 +1 @@ +providers moved to github.com/terraform-providers diff --git a/pkg/builtin/providers/tf/data_source_state.go b/pkg/builtin/providers/tf/data_source_state.go new file mode 100644 index 00000000000..6d8facfb56f --- /dev/null +++ b/pkg/builtin/providers/tf/data_source_state.go @@ -0,0 +1,269 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tf + +import ( + "fmt" + "log" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/backend/remote" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/zclconf/go-cty/cty" + + backendInit "github.com/kubegems/opentofu/pkg/backend/init" +) + +func dataSourceRemoteStateGetSchema() providers.Schema { + return providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "backend": { + Type: cty.String, + Description: "The remote backend to use, e.g. `remote` or `http`.", + DescriptionKind: configschema.StringMarkdown, + Required: true, + }, + "config": { + Type: cty.DynamicPseudoType, + Description: "The configuration of the remote backend. " + + "Although this is optional, most backends require " + + "some configuration.\n\n" + + "The object can use any arguments that would be valid " + + "in the equivalent `terraform { backend \"\" { ... } }` " + + "block.", + DescriptionKind: configschema.StringMarkdown, + Optional: true, + }, + "defaults": { + Type: cty.DynamicPseudoType, + Description: "Default values for outputs, in case " + + "the state file is empty or lacks a required output.", + DescriptionKind: configschema.StringMarkdown, + Optional: true, + }, + "outputs": { + Type: cty.DynamicPseudoType, + Description: "An object containing every root-level " + + "output in the remote state.", + DescriptionKind: configschema.StringMarkdown, + Computed: true, + }, + "workspace": { + Type: cty.String, + Description: "The OpenTofu workspace to use, if " + + "the backend supports workspaces.", + DescriptionKind: configschema.StringMarkdown, + Optional: true, + }, + }, + }, + } +} + +func dataSourceRemoteStateValidate(cfg cty.Value) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + // Getting the backend implicitly validates the configuration for it, + // but we can only do that if it's all known already. + if cfg.GetAttr("config").IsWhollyKnown() && cfg.GetAttr("backend").IsKnown() { + _, _, moreDiags := getBackend(cfg, nil) // Don't need the encryption for validation here + diags = diags.Append(moreDiags) + } else { + // Otherwise we'll just type-check the config object itself. + configTy := cfg.GetAttr("config").Type() + if configTy != cty.DynamicPseudoType && !(configTy.IsObjectType() || configTy.IsMapType()) { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid backend configuration", + "The configuration must be an object value.", + cty.GetAttrPath("config"), + )) + } + } + + { + defaultsTy := cfg.GetAttr("defaults").Type() + if defaultsTy != cty.DynamicPseudoType && !(defaultsTy.IsObjectType() || defaultsTy.IsMapType()) { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid default values", + "Defaults must be given in an object value.", + cty.GetAttrPath("defaults"), + )) + } + } + + return diags +} + +func dataSourceRemoteStateRead(d cty.Value, enc encryption.StateEncryption) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + b, cfg, moreDiags := getBackend(d, enc) + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + return cty.NilVal, diags + } + + configureDiags := b.Configure(cfg) + if configureDiags.HasErrors() { + diags = diags.Append(configureDiags.Err()) + return cty.NilVal, diags + } + + newState := make(map[string]cty.Value) + newState["backend"] = d.GetAttr("backend") + newState["config"] = d.GetAttr("config") + + workspaceVal := d.GetAttr("workspace") + // This attribute is not computed, so we always have to store the state + // value, even if we implicitly use a default. + newState["workspace"] = workspaceVal + + workspaceName := backend.DefaultStateName + if !workspaceVal.IsNull() { + workspaceName = workspaceVal.AsString() + } + + state, err := b.StateMgr(workspaceName) + if err != nil { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Error loading state error", + fmt.Sprintf("error loading the remote state: %s", err), + cty.Path(nil).GetAttr("backend"), + )) + return cty.NilVal, diags + } + + if err := state.RefreshState(); err != nil { + diags = diags.Append(err) + return cty.NilVal, diags + } + + outputs := make(map[string]cty.Value) + + if defaultsVal := d.GetAttr("defaults"); !defaultsVal.IsNull() { + newState["defaults"] = defaultsVal + it := defaultsVal.ElementIterator() + for it.Next() { + k, v := it.Element() + outputs[k.AsString()] = v + } + } else { + newState["defaults"] = cty.NullVal(cty.DynamicPseudoType) + } + + remoteState := state.State() + if remoteState == nil { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Unable to find remote state", + "No stored state was found for the given workspace in the given backend.", + cty.Path(nil).GetAttr("workspace"), + )) + newState["outputs"] = cty.EmptyObjectVal + return cty.ObjectVal(newState), diags + } + mod := remoteState.RootModule() + if mod != nil { // should always have a root module in any valid state + for k, os := range mod.OutputValues { + outputs[k] = os.Value + } + } + + newState["outputs"] = cty.ObjectVal(outputs) + + return cty.ObjectVal(newState), diags +} + +func getBackend(cfg cty.Value, enc encryption.StateEncryption) (backend.Backend, cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + backendType := cfg.GetAttr("backend").AsString() + + // Don't break people using the old _local syntax - but note warning above + if backendType == "_local" { + log.Println(`[INFO] Switching old (unsupported) backend "_local" to "local"`) + backendType = "local" + } + + // Create the client to access our remote state + log.Printf("[DEBUG] Initializing remote state backend: %s", backendType) + f := getBackendFactory(backendType) + if f == nil { + detail := fmt.Sprintf("There is no backend type named %q.", backendType) + if msg, removed := backendInit.RemovedBackends[backendType]; removed { + detail = msg + } + + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid backend configuration", + detail, + cty.Path(nil).GetAttr("backend"), + )) + return nil, cty.NilVal, diags + } + b := f(enc) + + config := cfg.GetAttr("config") + if config.IsNull() { + // We'll treat this as an empty configuration and see if the backend's + // schema and validation code will accept it. + config = cty.EmptyObjectVal + } + + if config.Type().IsMapType() { // The code below expects an object type, so we'll convert + config = cty.ObjectVal(config.AsValueMap()) + } + + schema := b.ConfigSchema() + // Try to coerce the provided value into the desired configuration type. + configVal, err := schema.CoerceValue(config) + if err != nil { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid backend configuration", + fmt.Sprintf("The given configuration is not valid for backend %q: %s.", backendType, + tfdiags.FormatError(err)), + cty.Path(nil).GetAttr("config"), + )) + return nil, cty.NilVal, diags + } + + newVal, validateDiags := b.PrepareConfig(configVal) + diags = diags.Append(validateDiags) + if validateDiags.HasErrors() { + return nil, cty.NilVal, diags + } + + // If this is the enhanced remote backend, we want to disable the version + // check, because this is a read-only operation + if rb, ok := b.(*remote.Remote); ok { + rb.IgnoreVersionConflict() + } + + return b, newVal, diags +} + +// overrideBackendFactories allows test cases to control the set of available +// backends to allow for more self-contained tests. This should never be set +// in non-test code. +var overrideBackendFactories map[string]backend.InitFn + +func getBackendFactory(backendType string) backend.InitFn { + if len(overrideBackendFactories) > 0 { + // Tests may override the set of backend factories. + return overrideBackendFactories[backendType] + } + + return backendInit.Backend(backendType) +} diff --git a/pkg/builtin/providers/tf/data_source_state_test.go b/pkg/builtin/providers/tf/data_source_state_test.go new file mode 100644 index 00000000000..b6e4135b1f0 --- /dev/null +++ b/pkg/builtin/providers/tf/data_source_state_test.go @@ -0,0 +1,377 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tf + +import ( + "fmt" + "log" + "testing" + + "github.com/apparentlymart/go-dump/dump" + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +func TestResource(t *testing.T) { + if err := dataSourceRemoteStateGetSchema().Block.InternalValidate(); err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestState_basic(t *testing.T) { + var tests = map[string]struct { + Config cty.Value + Want cty.Value + Err bool + }{ + "basic": { + cty.ObjectVal(map[string]cty.Value{ + "backend": cty.StringVal("local"), + "config": cty.ObjectVal(map[string]cty.Value{ + "path": cty.StringVal("./testdata/basic.tfstate"), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "backend": cty.StringVal("local"), + "config": cty.ObjectVal(map[string]cty.Value{ + "path": cty.StringVal("./testdata/basic.tfstate"), + }), + "outputs": cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }), + "defaults": cty.NullVal(cty.DynamicPseudoType), + "workspace": cty.NullVal(cty.String), + }), + false, + }, + "workspace": { + cty.ObjectVal(map[string]cty.Value{ + "backend": cty.StringVal("local"), + "workspace": cty.StringVal(backend.DefaultStateName), + "config": cty.ObjectVal(map[string]cty.Value{ + "path": cty.StringVal("./testdata/basic.tfstate"), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "backend": cty.StringVal("local"), + "workspace": cty.StringVal(backend.DefaultStateName), + "config": cty.ObjectVal(map[string]cty.Value{ + "path": cty.StringVal("./testdata/basic.tfstate"), + }), + "outputs": cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }), + "defaults": cty.NullVal(cty.DynamicPseudoType), + }), + false, + }, + "_local": { + cty.ObjectVal(map[string]cty.Value{ + "backend": cty.StringVal("_local"), + "config": cty.ObjectVal(map[string]cty.Value{ + "path": cty.StringVal("./testdata/basic.tfstate"), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "backend": cty.StringVal("_local"), + "config": cty.ObjectVal(map[string]cty.Value{ + "path": cty.StringVal("./testdata/basic.tfstate"), + }), + "outputs": cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }), + "defaults": cty.NullVal(cty.DynamicPseudoType), + "workspace": cty.NullVal(cty.String), + }), + false, + }, + "complex outputs": { + cty.ObjectVal(map[string]cty.Value{ + "backend": cty.StringVal("local"), + "config": cty.ObjectVal(map[string]cty.Value{ + "path": cty.StringVal("./testdata/complex_outputs.tfstate"), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "backend": cty.StringVal("local"), + "config": cty.ObjectVal(map[string]cty.Value{ + "path": cty.StringVal("./testdata/complex_outputs.tfstate"), + }), + "outputs": cty.ObjectVal(map[string]cty.Value{ + "computed_map": cty.MapVal(map[string]cty.Value{ + "key1": cty.StringVal("value1"), + }), + "computed_set": cty.ListVal([]cty.Value{ + cty.StringVal("setval1"), + cty.StringVal("setval2"), + }), + "map": cty.MapVal(map[string]cty.Value{ + "key": cty.StringVal("test"), + "test": cty.StringVal("test"), + }), + "set": cty.ListVal([]cty.Value{ + cty.StringVal("test1"), + cty.StringVal("test2"), + }), + }), + "defaults": cty.NullVal(cty.DynamicPseudoType), + "workspace": cty.NullVal(cty.String), + }), + false, + }, + "null outputs": { + cty.ObjectVal(map[string]cty.Value{ + "backend": cty.StringVal("local"), + "config": cty.ObjectVal(map[string]cty.Value{ + "path": cty.StringVal("./testdata/null_outputs.tfstate"), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "backend": cty.StringVal("local"), + "config": cty.ObjectVal(map[string]cty.Value{ + "path": cty.StringVal("./testdata/null_outputs.tfstate"), + }), + "outputs": cty.ObjectVal(map[string]cty.Value{ + "map": cty.NullVal(cty.Map(cty.String)), + "list": cty.NullVal(cty.List(cty.String)), + }), + "defaults": cty.NullVal(cty.DynamicPseudoType), + "workspace": cty.NullVal(cty.String), + }), + false, + }, + "defaults": { + cty.ObjectVal(map[string]cty.Value{ + "backend": cty.StringVal("local"), + "config": cty.ObjectVal(map[string]cty.Value{ + "path": cty.StringVal("./testdata/empty.tfstate"), + }), + "defaults": cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "backend": cty.StringVal("local"), + "config": cty.ObjectVal(map[string]cty.Value{ + "path": cty.StringVal("./testdata/empty.tfstate"), + }), + "defaults": cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }), + "outputs": cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }), + "workspace": cty.NullVal(cty.String), + }), + false, + }, + "missing": { + cty.ObjectVal(map[string]cty.Value{ + "backend": cty.StringVal("local"), + "config": cty.ObjectVal(map[string]cty.Value{ + "path": cty.StringVal("./testdata/missing.tfstate"), // intentionally not present on disk + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "backend": cty.StringVal("local"), + "config": cty.ObjectVal(map[string]cty.Value{ + "path": cty.StringVal("./testdata/missing.tfstate"), + }), + "defaults": cty.NullVal(cty.DynamicPseudoType), + "outputs": cty.EmptyObjectVal, + "workspace": cty.NullVal(cty.String), + }), + true, + }, + "wrong type for config": { + cty.ObjectVal(map[string]cty.Value{ + "backend": cty.StringVal("local"), + "config": cty.StringVal("nope"), + }), + cty.NilVal, + true, + }, + "wrong type for config with unknown backend": { + cty.ObjectVal(map[string]cty.Value{ + "backend": cty.UnknownVal(cty.String), + "config": cty.StringVal("nope"), + }), + cty.NilVal, + true, + }, + "wrong type for config with unknown config": { + cty.ObjectVal(map[string]cty.Value{ + "backend": cty.StringVal("local"), + "config": cty.UnknownVal(cty.String), + }), + cty.NilVal, + true, + }, + "wrong type for defaults": { + cty.ObjectVal(map[string]cty.Value{ + "backend": cty.StringVal("local"), + "config": cty.ObjectVal(map[string]cty.Value{ + "path": cty.StringVal("./testdata/basic.tfstate"), + }), + "defaults": cty.StringVal("nope"), + }), + cty.NilVal, + true, + }, + "config as map": { + cty.ObjectVal(map[string]cty.Value{ + "backend": cty.StringVal("local"), + "config": cty.MapVal(map[string]cty.Value{ + "path": cty.StringVal("./testdata/empty.tfstate"), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "backend": cty.StringVal("local"), + "config": cty.MapVal(map[string]cty.Value{ + "path": cty.StringVal("./testdata/empty.tfstate"), + }), + "defaults": cty.NullVal(cty.DynamicPseudoType), + "outputs": cty.EmptyObjectVal, + "workspace": cty.NullVal(cty.String), + }), + false, + }, + "defaults as map": { + cty.ObjectVal(map[string]cty.Value{ + "backend": cty.StringVal("local"), + "config": cty.ObjectVal(map[string]cty.Value{ + "path": cty.StringVal("./testdata/basic.tfstate"), + }), + "defaults": cty.MapValEmpty(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "backend": cty.StringVal("local"), + "config": cty.ObjectVal(map[string]cty.Value{ + "path": cty.StringVal("./testdata/basic.tfstate"), + }), + "defaults": cty.MapValEmpty(cty.String), + "outputs": cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }), + "workspace": cty.NullVal(cty.String), + }), + false, + }, + "nonexistent backend": { + cty.ObjectVal(map[string]cty.Value{ + "backend": cty.StringVal("nonexistent"), + "config": cty.ObjectVal(map[string]cty.Value{ + "path": cty.StringVal("./testdata/basic.tfstate"), + }), + }), + cty.NilVal, + true, + }, + "null config": { + cty.ObjectVal(map[string]cty.Value{ + "backend": cty.StringVal("local"), + "config": cty.NullVal(cty.DynamicPseudoType), + }), + cty.NilVal, + true, + }, + } + for name, test := range tests { + t.Run(name, func(t *testing.T) { + schema := dataSourceRemoteStateGetSchema().Block + config, err := schema.CoerceValue(test.Config) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + diags := dataSourceRemoteStateValidate(config) + + var got cty.Value + if !diags.HasErrors() && config.IsWhollyKnown() { + var moreDiags tfdiags.Diagnostics + got, moreDiags = dataSourceRemoteStateRead(config, encryption.StateEncryptionDisabled()) + diags = diags.Append(moreDiags) + } + + if test.Err { + if !diags.HasErrors() { + t.Fatal("succeeded; want error") + } + } else if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + if test.Want != cty.NilVal && !test.Want.RawEquals(got) { + t.Errorf("wrong result\nconfig: %sgot: %swant: %s", dump.Value(config), dump.Value(got), dump.Value(test.Want)) + } + }) + } +} + +func TestState_validation(t *testing.T) { + // The main test TestState_basic covers both validation and reading of + // state snapshots, so this additional test is here only to verify that + // the validation step in isolation does not attempt to configure + // the backend. + overrideBackendFactories = map[string]backend.InitFn{ + "failsconfigure": func(enc encryption.StateEncryption) backend.Backend { + return backendFailsConfigure{} + }, + } + defer func() { + // undo our overrides so we won't affect other tests + overrideBackendFactories = nil + }() + + schema := dataSourceRemoteStateGetSchema().Block + config, err := schema.CoerceValue(cty.ObjectVal(map[string]cty.Value{ + "backend": cty.StringVal("failsconfigure"), + "config": cty.EmptyObjectVal, + })) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + diags := dataSourceRemoteStateValidate(config) + if diags.HasErrors() { + t.Fatalf("unexpected errors\n%s", diags.Err().Error()) + } +} + +type backendFailsConfigure struct{} + +func (b backendFailsConfigure) ConfigSchema() *configschema.Block { + log.Printf("[TRACE] backendFailsConfigure.ConfigSchema") + return &configschema.Block{} // intentionally empty configuration schema +} + +func (b backendFailsConfigure) PrepareConfig(given cty.Value) (cty.Value, tfdiags.Diagnostics) { + // No special actions to take here + return given, nil +} + +func (b backendFailsConfigure) Configure(config cty.Value) tfdiags.Diagnostics { + log.Printf("[TRACE] backendFailsConfigure.Configure(%#v)", config) + var diags tfdiags.Diagnostics + diags = diags.Append(fmt.Errorf("Configure should never be called")) + return diags +} + +func (b backendFailsConfigure) StateMgr(workspace string) (statemgr.Full, error) { + return nil, fmt.Errorf("StateMgr not implemented") +} + +func (b backendFailsConfigure) DeleteWorkspace(name string, _ bool) error { + return fmt.Errorf("DeleteWorkspace not implemented") +} + +func (b backendFailsConfigure) Workspaces() ([]string, error) { + return nil, fmt.Errorf("Workspaces not implemented") +} diff --git a/pkg/builtin/providers/tf/provider.go b/pkg/builtin/providers/tf/provider.go new file mode 100644 index 00000000000..aaf8848d1e3 --- /dev/null +++ b/pkg/builtin/providers/tf/provider.go @@ -0,0 +1,174 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tf + +import ( + "fmt" + "log" + "strings" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/providers" +) + +// Provider is an implementation of providers.Interface +type Provider struct{} + +// NewProvider returns a new tofu provider +func NewProvider() providers.Interface { + return &Provider{} +} + +// GetSchema returns the complete schema for the provider. +func (p *Provider) GetProviderSchema() providers.GetProviderSchemaResponse { + return providers.GetProviderSchemaResponse{ + DataSources: map[string]providers.Schema{ + "terraform_remote_state": dataSourceRemoteStateGetSchema(), + }, + ResourceTypes: map[string]providers.Schema{ + "terraform_data": dataStoreResourceSchema(), + }, + } +} + +// ValidateProviderConfig is used to validate the configuration values. +func (p *Provider) ValidateProviderConfig(req providers.ValidateProviderConfigRequest) providers.ValidateProviderConfigResponse { + // At this moment there is nothing to configure for the tofu provider, + // so we will happily return without taking any action + var res providers.ValidateProviderConfigResponse + res.PreparedConfig = req.Config + return res +} + +// ValidateDataResourceConfig is used to validate the data source configuration values. +func (p *Provider) ValidateDataResourceConfig(req providers.ValidateDataResourceConfigRequest) providers.ValidateDataResourceConfigResponse { + // FIXME: move the backend configuration validate call that's currently + // inside the read method into here so that we can catch provider configuration + // errors in tofu validate as well as during tofu plan. + var res providers.ValidateDataResourceConfigResponse + + // This should not happen + if req.TypeName != "terraform_remote_state" { + res.Diagnostics.Append(fmt.Errorf("Error: unsupported data source %s", req.TypeName)) + return res + } + + diags := dataSourceRemoteStateValidate(req.Config) + res.Diagnostics = diags + + return res +} + +// Configure configures and initializes the provider. +func (p *Provider) ConfigureProvider(providers.ConfigureProviderRequest) providers.ConfigureProviderResponse { + // At this moment there is nothing to configure for the terraform provider, + // so we will happily return without taking any action + var res providers.ConfigureProviderResponse + return res +} + +// ReadDataSource returns the data source's current state. +func (p *Provider) ReadDataSource(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + panic("Should not be called directly, special case for terraform_remote_state") +} + +func (p *Provider) ReadDataSourceEncrypted(req providers.ReadDataSourceRequest, path addrs.AbsResourceInstance, enc encryption.Encryption) providers.ReadDataSourceResponse { + // call function + var res providers.ReadDataSourceResponse + + // This should not happen + if req.TypeName != "terraform_remote_state" { + res.Diagnostics.Append(fmt.Errorf("Error: unsupported data source %s", req.TypeName)) + return res + } + + // These string manipulations are kind of funky + key := path.String() + + // data.terraform_remote_state.foo[4] -> foo[4] + // module.submod[1].data.terraform_remote_state.bar -> module.submod[1].bar + key = strings.Replace(key, "data.terraform_remote_state.", "", 1) + + // module.submod[1].bar -> submod[1].bar + key = strings.TrimPrefix(key, "module.") + + log.Printf("[DEBUG] accessing remote state at %s", key) + + newState, diags := dataSourceRemoteStateRead(req.Config, enc.RemoteState(key)) + + if diags.HasErrors() { + diags = diags.Append(fmt.Errorf("%s: Unable to read remote state", path.String())) + } + + res.State = newState + res.Diagnostics = diags + + return res +} + +// Stop is called when the provider should halt any in-flight actions. +func (p *Provider) Stop() error { + log.Println("[DEBUG] terraform provider cannot Stop") + return nil +} + +// All the Resource-specific functions are below. +// The terraform provider supplies a single data source, `terraform_remote_state` +// and no resources. + +// UpgradeResourceState is called when the state loader encounters an +// instance state whose schema version is less than the one reported by the +// currently-used version of the corresponding provider, and the upgraded +// result is used for any further processing. +func (p *Provider) UpgradeResourceState(req providers.UpgradeResourceStateRequest) providers.UpgradeResourceStateResponse { + return upgradeDataStoreResourceState(req) +} + +// ReadResource refreshes a resource and returns its current state. +func (p *Provider) ReadResource(req providers.ReadResourceRequest) providers.ReadResourceResponse { + return readDataStoreResourceState(req) +} + +// PlanResourceChange takes the current state and proposed state of a +// resource, and returns the planned final state. +func (p *Provider) PlanResourceChange(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return planDataStoreResourceChange(req) +} + +// ApplyResourceChange takes the planned state for a resource, which may +// yet contain unknown computed values, and applies the changes returning +// the final state. +func (p *Provider) ApplyResourceChange(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + return applyDataStoreResourceChange(req) +} + +// ImportResourceState requests that the given resource be imported. +func (p *Provider) ImportResourceState(req providers.ImportResourceStateRequest) providers.ImportResourceStateResponse { + if req.TypeName == "terraform_data" { + return importDataStore(req) + } + + panic("unimplemented - terraform_remote_state has no resources") +} + +// ValidateResourceConfig is used to to validate the resource configuration values. +func (p *Provider) ValidateResourceConfig(req providers.ValidateResourceConfigRequest) providers.ValidateResourceConfigResponse { + return validateDataStoreResourceConfig(req) +} + +func (p *Provider) GetFunctions() providers.GetFunctionsResponse { + panic("unimplemented - terraform provider has no functions") +} + +func (p *Provider) CallFunction(r providers.CallFunctionRequest) providers.CallFunctionResponse { + panic("unimplemented - terraform provider has no functions") +} + +// Close is a noop for this provider, since it's run in-process. +func (p *Provider) Close() error { + return nil +} diff --git a/pkg/builtin/providers/tf/provider_test.go b/pkg/builtin/providers/tf/provider_test.go new file mode 100644 index 00000000000..f4d327a4d78 --- /dev/null +++ b/pkg/builtin/providers/tf/provider_test.go @@ -0,0 +1,15 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tf + +import ( + backendInit "github.com/kubegems/opentofu/pkg/backend/init" +) + +func init() { + // Initialize the backends + backendInit.Init(nil) +} diff --git a/pkg/builtin/providers/tf/resource_data.go b/pkg/builtin/providers/tf/resource_data.go new file mode 100644 index 00000000000..43b8d347e8b --- /dev/null +++ b/pkg/builtin/providers/tf/resource_data.go @@ -0,0 +1,174 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tf + +import ( + "fmt" + + "github.com/hashicorp/go-uuid" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" +) + +func dataStoreResourceSchema() providers.Schema { + return providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "input": {Type: cty.DynamicPseudoType, Optional: true}, + "output": {Type: cty.DynamicPseudoType, Computed: true}, + "triggers_replace": {Type: cty.DynamicPseudoType, Optional: true}, + "id": {Type: cty.String, Computed: true}, + }, + }, + } +} + +func validateDataStoreResourceConfig(req providers.ValidateResourceConfigRequest) (resp providers.ValidateResourceConfigResponse) { + if req.Config.IsNull() { + return resp + } + + // Core does not currently validate computed values are not set in the + // configuration. + for _, attr := range []string{"id", "output"} { + if !req.Config.GetAttr(attr).IsNull() { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf(`%q attribute is read-only`, attr)) + } + } + return resp +} + +func upgradeDataStoreResourceState(req providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) { + ty := dataStoreResourceSchema().Block.ImpliedType() + val, err := ctyjson.Unmarshal(req.RawStateJSON, ty) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + resp.UpgradedState = val + return resp +} + +func readDataStoreResourceState(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { + resp.NewState = req.PriorState + return resp +} + +func planDataStoreResourceChange(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + if req.ProposedNewState.IsNull() { + // destroy op + resp.PlannedState = req.ProposedNewState + return resp + } + + planned := req.ProposedNewState.AsValueMap() + + input := req.ProposedNewState.GetAttr("input") + trigger := req.ProposedNewState.GetAttr("triggers_replace") + + switch { + case req.PriorState.IsNull(): + // Create + // Set the id value to unknown. + planned["id"] = cty.UnknownVal(cty.String).RefineNotNull() + + // Output type must always match the input, even when it's null. + if input.IsNull() { + planned["output"] = input + } else { + planned["output"] = cty.UnknownVal(input.Type()) + } + + resp.PlannedState = cty.ObjectVal(planned) + return resp + + case !req.PriorState.GetAttr("triggers_replace").RawEquals(trigger): + // trigger changed, so we need to replace the entire instance + resp.RequiresReplace = append(resp.RequiresReplace, cty.GetAttrPath("triggers_replace")) + planned["id"] = cty.UnknownVal(cty.String).RefineNotNull() + + // We need to check the input for the replacement instance to compute a + // new output. + if input.IsNull() { + planned["output"] = input + } else { + planned["output"] = cty.UnknownVal(input.Type()) + } + + case !req.PriorState.GetAttr("input").RawEquals(input): + // only input changed, so we only need to re-compute output + planned["output"] = cty.UnknownVal(input.Type()) + } + + resp.PlannedState = cty.ObjectVal(planned) + return resp +} + +var testUUIDHook func() string + +func applyDataStoreResourceChange(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + if req.PlannedState.IsNull() { + resp.NewState = req.PlannedState + return resp + } + + newState := req.PlannedState.AsValueMap() + + if !req.PlannedState.GetAttr("output").IsKnown() { + newState["output"] = req.PlannedState.GetAttr("input") + } + + if !req.PlannedState.GetAttr("id").IsKnown() { + idString, err := uuid.GenerateUUID() + // OpenTofu would probably never get this far without a good random + // source, but catch the error anyway. + if err != nil { + diag := tfdiags.AttributeValue( + tfdiags.Error, + "Error generating id", + err.Error(), + cty.GetAttrPath("id"), + ) + + resp.Diagnostics = resp.Diagnostics.Append(diag) + } + + if testUUIDHook != nil { + idString = testUUIDHook() + } + + newState["id"] = cty.StringVal(idString) + } + + resp.NewState = cty.ObjectVal(newState) + + return resp +} + +// TODO: This isn't very useful even for examples, because terraform_data has +// no way to refresh the full resource value from only the import ID. This +// minimal implementation allows the import to succeed, and can be extended +// once the configuration is available during import. +func importDataStore(req providers.ImportResourceStateRequest) (resp providers.ImportResourceStateResponse) { + schema := dataStoreResourceSchema() + v := cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal(req.ID), + }) + state, err := schema.Block.CoerceValue(v) + resp.Diagnostics = resp.Diagnostics.Append(err) + + resp.ImportedResources = []providers.ImportedResource{ + { + TypeName: req.TypeName, + State: state, + }, + } + return resp +} diff --git a/pkg/builtin/providers/tf/resource_data_test.go b/pkg/builtin/providers/tf/resource_data_test.go new file mode 100644 index 00000000000..4979f7f0ff8 --- /dev/null +++ b/pkg/builtin/providers/tf/resource_data_test.go @@ -0,0 +1,387 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tf + +import ( + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/providers" + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" +) + +func TestManagedDataValidate(t *testing.T) { + cfg := map[string]cty.Value{ + "input": cty.NullVal(cty.DynamicPseudoType), + "output": cty.NullVal(cty.DynamicPseudoType), + "triggers_replace": cty.NullVal(cty.DynamicPseudoType), + "id": cty.NullVal(cty.String), + } + + // empty + req := providers.ValidateResourceConfigRequest{ + TypeName: "terraform_data", + Config: cty.ObjectVal(cfg), + } + + resp := validateDataStoreResourceConfig(req) + if resp.Diagnostics.HasErrors() { + t.Error("empty config error:", resp.Diagnostics.ErrWithWarnings()) + } + + // invalid computed values + cfg["output"] = cty.StringVal("oops") + req.Config = cty.ObjectVal(cfg) + + resp = validateDataStoreResourceConfig(req) + if !resp.Diagnostics.HasErrors() { + t.Error("expected error") + } + + msg := resp.Diagnostics.Err().Error() + if !strings.Contains(msg, "attribute is read-only") { + t.Error("unexpected error", msg) + } +} + +func TestManagedDataUpgradeState(t *testing.T) { + schema := dataStoreResourceSchema() + ty := schema.Block.ImpliedType() + + state := cty.ObjectVal(map[string]cty.Value{ + "input": cty.StringVal("input"), + "output": cty.StringVal("input"), + "triggers_replace": cty.ListVal([]cty.Value{ + cty.StringVal("a"), cty.StringVal("b"), + }), + "id": cty.StringVal("not-quite-unique"), + }) + + jsState, err := ctyjson.Marshal(state, ty) + if err != nil { + t.Fatal(err) + } + + // empty + req := providers.UpgradeResourceStateRequest{ + TypeName: "terraform_data", + RawStateJSON: jsState, + } + + resp := upgradeDataStoreResourceState(req) + if resp.Diagnostics.HasErrors() { + t.Error("upgrade state error:", resp.Diagnostics.ErrWithWarnings()) + } + + if !resp.UpgradedState.RawEquals(state) { + t.Errorf("prior state was:\n%#v\nupgraded state is:\n%#v\n", state, resp.UpgradedState) + } +} + +func TestManagedDataRead(t *testing.T) { + req := providers.ReadResourceRequest{ + TypeName: "terraform_data", + PriorState: cty.ObjectVal(map[string]cty.Value{ + "input": cty.StringVal("input"), + "output": cty.StringVal("input"), + "triggers_replace": cty.ListVal([]cty.Value{ + cty.StringVal("a"), cty.StringVal("b"), + }), + "id": cty.StringVal("not-quite-unique"), + }), + } + + resp := readDataStoreResourceState(req) + if resp.Diagnostics.HasErrors() { + t.Fatal("unexpected error", resp.Diagnostics.ErrWithWarnings()) + } + + if !resp.NewState.RawEquals(req.PriorState) { + t.Errorf("prior state was:\n%#v\nnew state is:\n%#v\n", req.PriorState, resp.NewState) + } +} + +func TestManagedDataPlan(t *testing.T) { + schema := dataStoreResourceSchema().Block + ty := schema.ImpliedType() + + for name, tc := range map[string]struct { + prior cty.Value + proposed cty.Value + planned cty.Value + }{ + "create": { + prior: cty.NullVal(ty), + proposed: cty.ObjectVal(map[string]cty.Value{ + "input": cty.NullVal(cty.DynamicPseudoType), + "output": cty.NullVal(cty.DynamicPseudoType), + "triggers_replace": cty.NullVal(cty.DynamicPseudoType), + "id": cty.NullVal(cty.String), + }), + planned: cty.ObjectVal(map[string]cty.Value{ + "input": cty.NullVal(cty.DynamicPseudoType), + "output": cty.NullVal(cty.DynamicPseudoType), + "triggers_replace": cty.NullVal(cty.DynamicPseudoType), + "id": cty.UnknownVal(cty.String).RefineNotNull(), + }), + }, + + "create-typed-null-input": { + prior: cty.NullVal(ty), + proposed: cty.ObjectVal(map[string]cty.Value{ + "input": cty.NullVal(cty.String), + "output": cty.NullVal(cty.DynamicPseudoType), + "triggers_replace": cty.NullVal(cty.DynamicPseudoType), + "id": cty.NullVal(cty.String), + }), + planned: cty.ObjectVal(map[string]cty.Value{ + "input": cty.NullVal(cty.String), + "output": cty.NullVal(cty.String), + "triggers_replace": cty.NullVal(cty.DynamicPseudoType), + "id": cty.UnknownVal(cty.String).RefineNotNull(), + }), + }, + + "create-output": { + prior: cty.NullVal(ty), + proposed: cty.ObjectVal(map[string]cty.Value{ + "input": cty.StringVal("input"), + "output": cty.NullVal(cty.DynamicPseudoType), + "triggers_replace": cty.NullVal(cty.DynamicPseudoType), + "id": cty.NullVal(cty.String), + }), + planned: cty.ObjectVal(map[string]cty.Value{ + "input": cty.StringVal("input"), + "output": cty.UnknownVal(cty.String), + "triggers_replace": cty.NullVal(cty.DynamicPseudoType), + "id": cty.UnknownVal(cty.String).RefineNotNull(), + }), + }, + + "update-input": { + prior: cty.ObjectVal(map[string]cty.Value{ + "input": cty.StringVal("input"), + "output": cty.StringVal("input"), + "triggers_replace": cty.NullVal(cty.DynamicPseudoType), + "id": cty.StringVal("not-quite-unique"), + }), + proposed: cty.ObjectVal(map[string]cty.Value{ + "input": cty.UnknownVal(cty.List(cty.String)), + "output": cty.StringVal("input"), + "triggers_replace": cty.NullVal(cty.DynamicPseudoType), + "id": cty.StringVal("not-quite-unique"), + }), + planned: cty.ObjectVal(map[string]cty.Value{ + "input": cty.UnknownVal(cty.List(cty.String)), + "output": cty.UnknownVal(cty.List(cty.String)), + "triggers_replace": cty.NullVal(cty.DynamicPseudoType), + "id": cty.StringVal("not-quite-unique"), + }), + }, + + "update-trigger": { + prior: cty.ObjectVal(map[string]cty.Value{ + "input": cty.StringVal("input"), + "output": cty.StringVal("input"), + "triggers_replace": cty.NullVal(cty.DynamicPseudoType), + "id": cty.StringVal("not-quite-unique"), + }), + proposed: cty.ObjectVal(map[string]cty.Value{ + "input": cty.StringVal("input"), + "output": cty.StringVal("input"), + "triggers_replace": cty.StringVal("new-value"), + "id": cty.StringVal("not-quite-unique"), + }), + planned: cty.ObjectVal(map[string]cty.Value{ + "input": cty.StringVal("input"), + "output": cty.UnknownVal(cty.String), + "triggers_replace": cty.StringVal("new-value"), + "id": cty.UnknownVal(cty.String).RefineNotNull(), + }), + }, + + "update-input-trigger": { + prior: cty.ObjectVal(map[string]cty.Value{ + "input": cty.StringVal("input"), + "output": cty.StringVal("input"), + "triggers_replace": cty.MapVal(map[string]cty.Value{ + "key": cty.StringVal("value"), + }), + "id": cty.StringVal("not-quite-unique"), + }), + proposed: cty.ObjectVal(map[string]cty.Value{ + "input": cty.ListVal([]cty.Value{cty.StringVal("new-input")}), + "output": cty.StringVal("input"), + "triggers_replace": cty.MapVal(map[string]cty.Value{ + "key": cty.StringVal("new value"), + }), + "id": cty.StringVal("not-quite-unique"), + }), + planned: cty.ObjectVal(map[string]cty.Value{ + "input": cty.ListVal([]cty.Value{cty.StringVal("new-input")}), + "output": cty.UnknownVal(cty.List(cty.String)), + "triggers_replace": cty.MapVal(map[string]cty.Value{ + "key": cty.StringVal("new value"), + }), + "id": cty.UnknownVal(cty.String).RefineNotNull(), + }), + }, + } { + t.Run("plan-"+name, func(t *testing.T) { + req := providers.PlanResourceChangeRequest{ + TypeName: "terraform_data", + PriorState: tc.prior, + ProposedNewState: tc.proposed, + } + + resp := planDataStoreResourceChange(req) + if resp.Diagnostics.HasErrors() { + t.Fatal(resp.Diagnostics.ErrWithWarnings()) + } + + if !resp.PlannedState.RawEquals(tc.planned) { + t.Errorf("expected:\n%#v\ngot:\n%#v\n", tc.planned, resp.PlannedState) + } + }) + } +} + +func TestManagedDataApply(t *testing.T) { + testUUIDHook = func() string { + return "not-quite-unique" + } + defer func() { + testUUIDHook = nil + }() + + schema := dataStoreResourceSchema().Block + ty := schema.ImpliedType() + + for name, tc := range map[string]struct { + prior cty.Value + planned cty.Value + state cty.Value + }{ + "create": { + prior: cty.NullVal(ty), + planned: cty.ObjectVal(map[string]cty.Value{ + "input": cty.NullVal(cty.DynamicPseudoType), + "output": cty.NullVal(cty.DynamicPseudoType), + "triggers_replace": cty.NullVal(cty.DynamicPseudoType), + "id": cty.UnknownVal(cty.String), + }), + state: cty.ObjectVal(map[string]cty.Value{ + "input": cty.NullVal(cty.DynamicPseudoType), + "output": cty.NullVal(cty.DynamicPseudoType), + "triggers_replace": cty.NullVal(cty.DynamicPseudoType), + "id": cty.StringVal("not-quite-unique"), + }), + }, + + "create-output": { + prior: cty.NullVal(ty), + planned: cty.ObjectVal(map[string]cty.Value{ + "input": cty.StringVal("input"), + "output": cty.UnknownVal(cty.String), + "triggers_replace": cty.NullVal(cty.DynamicPseudoType), + "id": cty.UnknownVal(cty.String), + }), + state: cty.ObjectVal(map[string]cty.Value{ + "input": cty.StringVal("input"), + "output": cty.StringVal("input"), + "triggers_replace": cty.NullVal(cty.DynamicPseudoType), + "id": cty.StringVal("not-quite-unique"), + }), + }, + + "update-input": { + prior: cty.ObjectVal(map[string]cty.Value{ + "input": cty.StringVal("input"), + "output": cty.StringVal("input"), + "triggers_replace": cty.NullVal(cty.DynamicPseudoType), + "id": cty.StringVal("not-quite-unique"), + }), + planned: cty.ObjectVal(map[string]cty.Value{ + "input": cty.ListVal([]cty.Value{cty.StringVal("new-input")}), + "output": cty.UnknownVal(cty.List(cty.String)), + "triggers_replace": cty.NullVal(cty.DynamicPseudoType), + "id": cty.StringVal("not-quite-unique"), + }), + state: cty.ObjectVal(map[string]cty.Value{ + "input": cty.ListVal([]cty.Value{cty.StringVal("new-input")}), + "output": cty.ListVal([]cty.Value{cty.StringVal("new-input")}), + "triggers_replace": cty.NullVal(cty.DynamicPseudoType), + "id": cty.StringVal("not-quite-unique"), + }), + }, + + "update-trigger": { + prior: cty.ObjectVal(map[string]cty.Value{ + "input": cty.StringVal("input"), + "output": cty.StringVal("input"), + "triggers_replace": cty.NullVal(cty.DynamicPseudoType), + "id": cty.StringVal("not-quite-unique"), + }), + planned: cty.ObjectVal(map[string]cty.Value{ + "input": cty.StringVal("input"), + "output": cty.UnknownVal(cty.String), + "triggers_replace": cty.StringVal("new-value"), + "id": cty.UnknownVal(cty.String), + }), + state: cty.ObjectVal(map[string]cty.Value{ + "input": cty.StringVal("input"), + "output": cty.StringVal("input"), + "triggers_replace": cty.StringVal("new-value"), + "id": cty.StringVal("not-quite-unique"), + }), + }, + + "update-input-trigger": { + prior: cty.ObjectVal(map[string]cty.Value{ + "input": cty.StringVal("input"), + "output": cty.StringVal("input"), + "triggers_replace": cty.MapVal(map[string]cty.Value{ + "key": cty.StringVal("value"), + }), + "id": cty.StringVal("not-quite-unique"), + }), + planned: cty.ObjectVal(map[string]cty.Value{ + "input": cty.ListVal([]cty.Value{cty.StringVal("new-input")}), + "output": cty.UnknownVal(cty.List(cty.String)), + "triggers_replace": cty.MapVal(map[string]cty.Value{ + "key": cty.StringVal("new value"), + }), + "id": cty.UnknownVal(cty.String), + }), + state: cty.ObjectVal(map[string]cty.Value{ + "input": cty.ListVal([]cty.Value{cty.StringVal("new-input")}), + "output": cty.ListVal([]cty.Value{cty.StringVal("new-input")}), + "triggers_replace": cty.MapVal(map[string]cty.Value{ + "key": cty.StringVal("new value"), + }), + "id": cty.StringVal("not-quite-unique"), + }), + }, + } { + t.Run("apply-"+name, func(t *testing.T) { + req := providers.ApplyResourceChangeRequest{ + TypeName: "terraform_data", + PriorState: tc.prior, + PlannedState: tc.planned, + } + + resp := applyDataStoreResourceChange(req) + if resp.Diagnostics.HasErrors() { + t.Fatal(resp.Diagnostics.ErrWithWarnings()) + } + + if !resp.NewState.RawEquals(tc.state) { + t.Errorf("expected:\n%#v\ngot:\n%#v\n", tc.state, resp.NewState) + } + }) + } +} diff --git a/pkg/builtin/providers/tf/testdata/basic.tfstate b/pkg/builtin/providers/tf/testdata/basic.tfstate new file mode 100644 index 00000000000..d41a2d1f65e --- /dev/null +++ b/pkg/builtin/providers/tf/testdata/basic.tfstate @@ -0,0 +1,12 @@ +{ + "version": 4, + "terraform_version": "0.13.0", + "serial": 0, + "lineage": "", + "outputs": { + "foo": { + "value": "bar", + "type": "string" + } + } +} diff --git a/pkg/builtin/providers/tf/testdata/complex_outputs.tfstate b/pkg/builtin/providers/tf/testdata/complex_outputs.tfstate new file mode 100644 index 00000000000..e67d838043b --- /dev/null +++ b/pkg/builtin/providers/tf/testdata/complex_outputs.tfstate @@ -0,0 +1,70 @@ +{ + "version": 4, + "terraform_version": "0.13.0", + "serial": 0, + "lineage": "", + "outputs": { + "computed_map": { + "sensitive": false, + "type": [ + "map", + "string" + ], + "value": { + "key1": "value1" + } + }, + "computed_set": { + "sensitive": false, + "type": [ + "list", + "string" + ], + "value": [ + "setval1", + "setval2" + ] + }, + "map": { + "sensitive": false, + "type": [ + "map", + "string" + ], + "value": { + "key": "test", + "test": "test" + } + }, + "set": { + "sensitive": false, + "type": [ + "list", + "string" + ], + "value": [ + "test1", + "test2" + ] + } + }, + "resources": [ + { + "mode": "managed", + "type": "test_resource", + "name": "main", + "each": "list", + "provider": "provider[\"registry.opentofu.org/hashicorp/test\"]", + "instances": [ + { + "index_key": 0, + "schema_version": 0, + "attributes": { + "id": "testId" + }, + "private": "bnVsbA==" + } + ] + } + ] +} diff --git a/pkg/builtin/providers/tf/testdata/empty.tfstate b/pkg/builtin/providers/tf/testdata/empty.tfstate new file mode 100644 index 00000000000..14610e8188c --- /dev/null +++ b/pkg/builtin/providers/tf/testdata/empty.tfstate @@ -0,0 +1,6 @@ +{ + "version": 4, + "terraform_version": "0.13.0", + "serial": 0, + "lineage": "" +} diff --git a/pkg/builtin/providers/tf/testdata/null_outputs.tfstate b/pkg/builtin/providers/tf/testdata/null_outputs.tfstate new file mode 100644 index 00000000000..8b0cfdf2f0b --- /dev/null +++ b/pkg/builtin/providers/tf/testdata/null_outputs.tfstate @@ -0,0 +1,22 @@ +{ + "version": 4, + "terraform_version": "0.13.0", + "serial": 0, + "lineage": "", + "outputs": { + "map": { + "value": null, + "type": [ + "map", + "string" + ] + }, + "list": { + "value": null, + "type": [ + "list", + "string" + ] + } + } +} diff --git a/pkg/builtin/provisioners/file/resource_provisioner.go b/pkg/builtin/provisioners/file/resource_provisioner.go new file mode 100644 index 00000000000..bcae9dd422d --- /dev/null +++ b/pkg/builtin/provisioners/file/resource_provisioner.go @@ -0,0 +1,211 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package file + +import ( + "context" + "errors" + "fmt" + "os" + + "github.com/kubegems/opentofu/pkg/communicator" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/provisioners" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/mitchellh/go-homedir" + "github.com/zclconf/go-cty/cty" +) + +func New() provisioners.Interface { + ctx, cancel := context.WithCancel(context.Background()) + return &provisioner{ + ctx: ctx, + cancel: cancel, + } +} + +type provisioner struct { + // We store a context here tied to the lifetime of the provisioner. + // This allows the Stop method to cancel any in-flight requests. + ctx context.Context + cancel context.CancelFunc +} + +func (p *provisioner) GetSchema() (resp provisioners.GetSchemaResponse) { + schema := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "source": { + Type: cty.String, + Optional: true, + }, + + "content": { + Type: cty.String, + Optional: true, + }, + + "destination": { + Type: cty.String, + Required: true, + }, + }, + } + resp.Provisioner = schema + return resp +} + +func (p *provisioner) ValidateProvisionerConfig(req provisioners.ValidateProvisionerConfigRequest) (resp provisioners.ValidateProvisionerConfigResponse) { + cfg, err := p.GetSchema().Provisioner.CoerceValue(req.Config) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + } + + source := cfg.GetAttr("source") + content := cfg.GetAttr("content") + + switch { + case !source.IsNull() && !content.IsNull(): + resp.Diagnostics = resp.Diagnostics.Append(errors.New("Cannot set both 'source' and 'content'")) + return resp + case source.IsNull() && content.IsNull(): + resp.Diagnostics = resp.Diagnostics.Append(errors.New("Must provide one of 'source' or 'content'")) + return resp + } + + return resp +} + +func (p *provisioner) ProvisionResource(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { + if req.Connection.IsNull() { + resp.Diagnostics = resp.Diagnostics.Append(tfdiags.WholeContainingBody( + tfdiags.Error, + "file provisioner error", + "Missing connection configuration for provisioner.", + )) + return resp + } + + comm, err := communicator.New(req.Connection) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(tfdiags.WholeContainingBody( + tfdiags.Error, + "file provisioner error", + err.Error(), + )) + return resp + } + + // Get the source + src, deleteSource, err := getSrc(req.Config) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(tfdiags.WholeContainingBody( + tfdiags.Error, + "file provisioner error", + err.Error(), + )) + return resp + } + if deleteSource { + defer os.Remove(src) + } + + // Begin the file copy + dst := req.Config.GetAttr("destination").AsString() + if err := copyFiles(p.ctx, comm, src, dst); err != nil { + resp.Diagnostics = resp.Diagnostics.Append(tfdiags.WholeContainingBody( + tfdiags.Error, + "file provisioner error", + err.Error(), + )) + return resp + } + + return resp +} + +// getSrc returns the file to use as source +func getSrc(v cty.Value) (string, bool, error) { + content := v.GetAttr("content") + src := v.GetAttr("source") + + switch { + case !content.IsNull(): + file, err := os.CreateTemp("", "tf-file-content") + if err != nil { + return "", true, err + } + + if _, err = file.WriteString(content.AsString()); err != nil { + return "", true, err + } + + return file.Name(), true, nil + + case !src.IsNull(): + expansion, err := homedir.Expand(src.AsString()) + return expansion, false, err + + default: + return "", false, errors.New("source and content cannot both be null") + } +} + +// copyFiles is used to copy the files from a source to a destination +func copyFiles(ctx context.Context, comm communicator.Communicator, src, dst string) error { + retryCtx, cancel := context.WithTimeout(ctx, comm.Timeout()) + defer cancel() + + // Wait and retry until we establish the connection + err := communicator.Retry(retryCtx, func() error { + return comm.Connect(nil) + }) + if err != nil { + return err + } + + // disconnect when the context is canceled, which will close this after + // Apply as well. + go func() { + <-ctx.Done() + comm.Disconnect() + }() + + info, err := os.Stat(src) + if err != nil { + return err + } + + // If we're uploading a directory, short circuit and do that + if info.IsDir() { + if err := comm.UploadDir(dst, src); err != nil { + return fmt.Errorf("Upload failed: %w", err) + } + return nil + } + + // We're uploading a file... + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + err = comm.Upload(dst, f) + if err != nil { + return fmt.Errorf("Upload failed: %w", err) + } + + return err +} + +func (p *provisioner) Stop() error { + p.cancel() + return nil +} + +func (p *provisioner) Close() error { + return nil +} diff --git a/pkg/builtin/provisioners/file/resource_provisioner_test.go b/pkg/builtin/provisioners/file/resource_provisioner_test.go new file mode 100644 index 00000000000..b76d6365439 --- /dev/null +++ b/pkg/builtin/provisioners/file/resource_provisioner_test.go @@ -0,0 +1,148 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package file + +import ( + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/provisioners" + "github.com/zclconf/go-cty/cty" +) + +func TestResourceProvider_Validate_good_source(t *testing.T) { + v := cty.ObjectVal(map[string]cty.Value{ + "source": cty.StringVal("/tmp/foo"), + "destination": cty.StringVal("/tmp/bar"), + }) + + resp := New().ValidateProvisionerConfig(provisioners.ValidateProvisionerConfigRequest{ + Config: v, + }) + + if len(resp.Diagnostics) > 0 { + t.Fatal(resp.Diagnostics.ErrWithWarnings()) + } +} + +func TestResourceProvider_Validate_good_content(t *testing.T) { + v := cty.ObjectVal(map[string]cty.Value{ + "content": cty.StringVal("value to copy"), + "destination": cty.StringVal("/tmp/bar"), + }) + + resp := New().ValidateProvisionerConfig(provisioners.ValidateProvisionerConfigRequest{ + Config: v, + }) + + if len(resp.Diagnostics) > 0 { + t.Fatal(resp.Diagnostics.ErrWithWarnings()) + } +} + +func TestResourceProvider_Validate_good_unknown_variable_value(t *testing.T) { + v := cty.ObjectVal(map[string]cty.Value{ + "content": cty.UnknownVal(cty.String), + "destination": cty.StringVal("/tmp/bar"), + }) + + resp := New().ValidateProvisionerConfig(provisioners.ValidateProvisionerConfigRequest{ + Config: v, + }) + + if len(resp.Diagnostics) > 0 { + t.Fatal(resp.Diagnostics.ErrWithWarnings()) + } +} + +func TestResourceProvider_Validate_bad_not_destination(t *testing.T) { + v := cty.ObjectVal(map[string]cty.Value{ + "source": cty.StringVal("nope"), + }) + + resp := New().ValidateProvisionerConfig(provisioners.ValidateProvisionerConfigRequest{ + Config: v, + }) + + if !resp.Diagnostics.HasErrors() { + t.Fatal("Should have errors") + } +} + +func TestResourceProvider_Validate_bad_no_source(t *testing.T) { + v := cty.ObjectVal(map[string]cty.Value{ + "destination": cty.StringVal("/tmp/bar"), + }) + + resp := New().ValidateProvisionerConfig(provisioners.ValidateProvisionerConfigRequest{ + Config: v, + }) + + if !resp.Diagnostics.HasErrors() { + t.Fatal("Should have errors") + } +} + +func TestResourceProvider_Validate_bad_to_many_src(t *testing.T) { + v := cty.ObjectVal(map[string]cty.Value{ + "source": cty.StringVal("nope"), + "content": cty.StringVal("vlue to copy"), + "destination": cty.StringVal("/tmp/bar"), + }) + + resp := New().ValidateProvisionerConfig(provisioners.ValidateProvisionerConfigRequest{ + Config: v, + }) + + if !resp.Diagnostics.HasErrors() { + t.Fatal("Should have errors") + } +} + +// Validate that Stop can Close can be called even when not provisioning. +func TestResourceProvisioner_StopClose(t *testing.T) { + p := New() + p.Stop() + p.Close() +} + +func TestResourceProvisioner_connectionRequired(t *testing.T) { + p := New() + resp := p.ProvisionResource(provisioners.ProvisionResourceRequest{}) + if !resp.Diagnostics.HasErrors() { + t.Fatal("expected error") + } + + got := resp.Diagnostics.Err().Error() + if !strings.Contains(got, "Missing connection") { + t.Fatalf("expected 'Missing connection' error: got %q", got) + } +} + +func TestResourceProvisioner_nullSrcVars(t *testing.T) { + conn := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal(""), + "host": cty.StringVal("localhost"), + }) + config := cty.ObjectVal(map[string]cty.Value{ + "source": cty.NilVal, + "content": cty.NilVal, + "destination": cty.StringVal("/tmp/bar"), + }) + p := New() + resp := p.ProvisionResource(provisioners.ProvisionResourceRequest{ + Connection: conn, + Config: config, + }) + if !resp.Diagnostics.HasErrors() { + t.Fatal("expected error") + } + + got := resp.Diagnostics.Err().Error() + if !strings.Contains(got, "file provisioner error: source and content cannot both be null") { + t.Fatalf("file provisioner error: source and content cannot both be null' error: got %q", got) + } +} diff --git a/pkg/builtin/provisioners/local-exec/resource_provisioner.go b/pkg/builtin/provisioners/local-exec/resource_provisioner.go new file mode 100644 index 00000000000..c046524215a --- /dev/null +++ b/pkg/builtin/provisioners/local-exec/resource_provisioner.go @@ -0,0 +1,226 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package localexec + +import ( + "context" + "fmt" + "io" + "os" + "os/exec" + "runtime" + + "github.com/armon/circbuf" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/provisioners" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/mitchellh/go-linereader" + "github.com/zclconf/go-cty/cty" +) + +const ( + // maxBufSize limits how much output we collect from a local + // invocation. This is to prevent TF memory usage from growing + // to an enormous amount due to a faulty process. + maxBufSize = 8 * 1024 +) + +func New() provisioners.Interface { + ctx, cancel := context.WithCancel(context.Background()) + return &provisioner{ + ctx: ctx, + cancel: cancel, + } +} + +type provisioner struct { + // We store a context here tied to the lifetime of the provisioner. + // This allows the Stop method to cancel any in-flight requests. + ctx context.Context + cancel context.CancelFunc +} + +func (p *provisioner) GetSchema() (resp provisioners.GetSchemaResponse) { + schema := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "command": { + Type: cty.String, + Required: true, + }, + "interpreter": { + Type: cty.List(cty.String), + Optional: true, + }, + "working_dir": { + Type: cty.String, + Optional: true, + }, + "environment": { + Type: cty.Map(cty.String), + Optional: true, + }, + "quiet": { + Type: cty.Bool, + Optional: true, + }, + }, + } + + resp.Provisioner = schema + return resp +} + +func (p *provisioner) ValidateProvisionerConfig(req provisioners.ValidateProvisionerConfigRequest) (resp provisioners.ValidateProvisionerConfigResponse) { + if _, err := p.GetSchema().Provisioner.CoerceValue(req.Config); err != nil { + resp.Diagnostics = resp.Diagnostics.Append(tfdiags.WholeContainingBody( + tfdiags.Error, + "Invalid local-exec provisioner configuration", + err.Error(), + )) + } + return resp +} + +func (p *provisioner) ProvisionResource(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { + command := req.Config.GetAttr("command").AsString() + if command == "" { + resp.Diagnostics = resp.Diagnostics.Append(tfdiags.WholeContainingBody( + tfdiags.Error, + "Invalid local-exec provisioner command", + "The command must be a non-empty string.", + )) + return resp + } + + envVal := req.Config.GetAttr("environment") + var env []string + + if !envVal.IsNull() { + for k, v := range envVal.AsValueMap() { + if !v.IsNull() { + entry := fmt.Sprintf("%s=%s", k, v.AsString()) + env = append(env, entry) + } + } + } + + // Execute the command using a shell + intrVal := req.Config.GetAttr("interpreter") + + var cmdargs []string + if !intrVal.IsNull() && intrVal.LengthInt() > 0 { + for _, v := range intrVal.AsValueSlice() { + if !v.IsNull() { + cmdargs = append(cmdargs, v.AsString()) + } + } + } else { + if runtime.GOOS == "windows" { + cmdargs = []string{"cmd", "/C"} + } else { + cmdargs = []string{"/bin/sh", "-c"} + } + } + + cmdargs = append(cmdargs, command) + + workingdir := "" + if wdVal := req.Config.GetAttr("working_dir"); !wdVal.IsNull() { + workingdir = wdVal.AsString() + } + + // Set up the reader that will read the output from the command. + // We use an os.Pipe so that the *os.File can be passed directly to the + // process, and not rely on goroutines copying the data which may block. + // See golang.org/issue/18874 + pr, pw, err := os.Pipe() + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(tfdiags.WholeContainingBody( + tfdiags.Error, + "local-exec provisioner error", + fmt.Sprintf("Failed to initialize pipe for output: %s", err), + )) + return resp + } + + var cmdEnv []string + cmdEnv = os.Environ() + cmdEnv = append(cmdEnv, env...) + + // Set up the command + cmd := exec.CommandContext(p.ctx, cmdargs[0], cmdargs[1:]...) + cmd.Stderr = pw + cmd.Stdout = pw + // Dir specifies the working directory of the command. + // If Dir is the empty string (this is default), runs the command + // in the calling process's current directory. + cmd.Dir = workingdir + // Env specifies the environment of the command. + // By default will use the calling process's environment + cmd.Env = cmdEnv + + output, _ := circbuf.NewBuffer(maxBufSize) + + // Write everything we read from the pipe to the output buffer too + tee := io.TeeReader(pr, output) + + // copy the teed output to the UI output + copyDoneCh := make(chan struct{}) + go copyUIOutput(req.UIOutput, tee, copyDoneCh) + + // Output what we're about to run + if quietVal := req.Config.GetAttr("quiet"); !quietVal.IsNull() && quietVal.True() { + req.UIOutput.Output("local-exec: Executing: Suppressed by quiet=true") + } else { + req.UIOutput.Output(fmt.Sprintf("Executing: %q", cmdargs)) + } + + // Start the command + err = cmd.Start() + if err == nil { + err = cmd.Wait() + } + + // Close the write-end of the pipe so that the goroutine mirroring output + // ends properly. + pw.Close() + + // Cancelling the command may block the pipe reader if the file descriptor + // was passed to a child process which hasn't closed it. In this case the + // copyOutput goroutine will just hang out until exit. + select { + case <-copyDoneCh: + case <-p.ctx.Done(): + } + + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(tfdiags.WholeContainingBody( + tfdiags.Error, + "local-exec provisioner error", + fmt.Sprintf("Error running command '%s': %v. Output: %s", command, err, output.Bytes()), + )) + return resp + } + + return resp +} + +func (p *provisioner) Stop() error { + p.cancel() + return nil +} + +func (p *provisioner) Close() error { + return nil +} + +func copyUIOutput(o provisioners.UIOutput, r io.Reader, doneCh chan<- struct{}) { + defer close(doneCh) + lr := linereader.New(r) + for line := range lr.Ch { + o.Output(line) + } +} diff --git a/pkg/builtin/provisioners/local-exec/resource_provisioner_test.go b/pkg/builtin/provisioners/local-exec/resource_provisioner_test.go new file mode 100644 index 00000000000..383eb499c9f --- /dev/null +++ b/pkg/builtin/provisioners/local-exec/resource_provisioner_test.go @@ -0,0 +1,271 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package localexec + +import ( + "fmt" + "os" + "runtime" + "strings" + "testing" + "time" + + "github.com/kubegems/opentofu/pkg/provisioners" + "github.com/mitchellh/cli" + "github.com/zclconf/go-cty/cty" +) + +func TestResourceProvider_Apply(t *testing.T) { + defer os.Remove("test_out") + output := cli.NewMockUi() + p := New() + schema := p.GetSchema().Provisioner + c, err := schema.CoerceValue(cty.ObjectVal(map[string]cty.Value{ + "command": cty.StringVal("echo foo > test_out"), + })) + if err != nil { + t.Fatal(err) + } + + resp := p.ProvisionResource(provisioners.ProvisionResourceRequest{ + Config: c, + UIOutput: output, + }) + + if resp.Diagnostics.HasErrors() { + t.Fatalf("err: %v", resp.Diagnostics.Err()) + } + + // Check the file + raw, err := os.ReadFile("test_out") + if err != nil { + t.Fatalf("err: %v", err) + } + + actual := strings.TrimSpace(string(raw)) + expected := "foo" + if actual != expected { + t.Fatalf("bad: %#v", actual) + } +} + +func TestResourceProvider_stop(t *testing.T) { + output := cli.NewMockUi() + p := New() + schema := p.GetSchema().Provisioner + + c, err := schema.CoerceValue(cty.ObjectVal(map[string]cty.Value{ + // bash/zsh/ksh will exec a single command in the same process. This + // makes certain there's a subprocess in the shell. + "command": cty.StringVal("sleep 30; sleep 30"), + })) + if err != nil { + t.Fatal(err) + } + + doneCh := make(chan struct{}) + startTime := time.Now() + go func() { + defer close(doneCh) + // The functionality of p.Apply is tested in TestResourceProvider_Apply. + // Because p.Apply is called in a goroutine, trying to t.Fatal() on its + // result would be ignored or would cause a panic if the parent goroutine + // has already completed. + _ = p.ProvisionResource(provisioners.ProvisionResourceRequest{ + Config: c, + UIOutput: output, + }) + }() + + mustExceed := (50 * time.Millisecond) + select { + case <-doneCh: + t.Fatalf("expected to finish sometime after %s finished in %s", mustExceed, time.Since(startTime)) + case <-time.After(mustExceed): + t.Logf("correctly took longer than %s", mustExceed) + } + + // Stop it + stopTime := time.Now() + p.Stop() + + maxTempl := "expected to finish under %s, finished in %s" + finishWithin := (2 * time.Second) + select { + case <-doneCh: + t.Logf(maxTempl, finishWithin, time.Since(stopTime)) + case <-time.After(finishWithin): + t.Fatalf(maxTempl, finishWithin, time.Since(stopTime)) + } +} + +func TestResourceProvider_ApplyCustomInterpreter(t *testing.T) { + output := cli.NewMockUi() + p := New() + + schema := p.GetSchema().Provisioner + + c, err := schema.CoerceValue(cty.ObjectVal(map[string]cty.Value{ + "interpreter": cty.ListVal([]cty.Value{cty.StringVal("echo"), cty.StringVal("is")}), + "command": cty.StringVal("not really an interpreter"), + })) + if err != nil { + t.Fatal(err) + } + + resp := p.ProvisionResource(provisioners.ProvisionResourceRequest{ + Config: c, + UIOutput: output, + }) + + if resp.Diagnostics.HasErrors() { + t.Fatal(resp.Diagnostics.Err()) + } + + got := strings.TrimSpace(output.OutputWriter.String()) + want := `Executing: ["echo" "is" "not really an interpreter"] +is not really an interpreter` + if got != want { + t.Errorf("wrong output\ngot: %s\nwant: %s", got, want) + } +} + +func TestResourceProvider_ApplyCustomWorkingDirectory(t *testing.T) { + testdir := "working_dir_test" + os.Mkdir(testdir, 0755) + defer os.Remove(testdir) + + output := cli.NewMockUi() + p := New() + schema := p.GetSchema().Provisioner + + command := "echo `pwd`" + if runtime.GOOS == "windows" { + command = "echo %cd%" + } + c, err := schema.CoerceValue(cty.ObjectVal(map[string]cty.Value{ + "working_dir": cty.StringVal(testdir), + "command": cty.StringVal(command), + })) + if err != nil { + t.Fatal(err) + } + + resp := p.ProvisionResource(provisioners.ProvisionResourceRequest{ + Config: c, + UIOutput: output, + }) + + if resp.Diagnostics.HasErrors() { + t.Fatal(resp.Diagnostics.Err()) + } + + dir, err := os.Getwd() + if err != nil { + t.Fatalf("err: %v", err) + } + + got := strings.TrimSpace(output.OutputWriter.String()) + want := "Executing: [\"/bin/sh\" \"-c\" \"echo `pwd`\"]\n" + dir + "/" + testdir + if runtime.GOOS == "windows" { + want = "Executing: [\"cmd\" \"/C\" \"echo %cd%\"]\n" + dir + "\\" + testdir + } + if got != want { + t.Errorf("wrong output\ngot: %s\nwant: %s", got, want) + } +} + +func TestResourceProvider_ApplyCustomEnv(t *testing.T) { + output := cli.NewMockUi() + p := New() + schema := p.GetSchema().Provisioner + command := "echo $FOO $BAR $BAZ" + if runtime.GOOS == "windows" { + command = "echo %FOO% %BAR% %BAZ%" + } + c, err := schema.CoerceValue(cty.ObjectVal(map[string]cty.Value{ + "command": cty.StringVal(command), + "environment": cty.MapVal(map[string]cty.Value{ + "FOO": cty.StringVal("BAR"), + "BAR": cty.StringVal("1"), + "BAZ": cty.StringVal("true"), + }), + })) + if err != nil { + t.Fatal(err) + } + + resp := p.ProvisionResource(provisioners.ProvisionResourceRequest{ + Config: c, + UIOutput: output, + }) + if resp.Diagnostics.HasErrors() { + t.Fatal(resp.Diagnostics.Err()) + } + + got := strings.TrimSpace(output.OutputWriter.String()) + + want := "Executing: [\"/bin/sh\" \"-c\" \"echo $FOO $BAR $BAZ\"]\nBAR 1 true" + if runtime.GOOS == "windows" { + want = "Executing: [\"cmd\" \"/C\" \"echo %FOO% %BAR% %BAZ%\"]\nBAR 1 true" + } + + if got != want { + t.Errorf("wrong output\ngot: %s\nwant: %s", got, want) + } +} + +// Validate that Stop can Close can be called even when not provisioning. +func TestResourceProvisioner_StopClose(t *testing.T) { + p := New() + p.Stop() + p.Close() +} + +func TestResourceProvisioner_nullsInOptionals(t *testing.T) { + output := cli.NewMockUi() + p := New() + schema := p.GetSchema().Provisioner + + for i, cfg := range []cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "command": cty.StringVal("echo OK"), + "environment": cty.MapVal(map[string]cty.Value{ + "FOO": cty.NullVal(cty.String), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "command": cty.StringVal("echo OK"), + "environment": cty.NullVal(cty.Map(cty.String)), + }), + cty.ObjectVal(map[string]cty.Value{ + "command": cty.StringVal("echo OK"), + "interpreter": cty.ListVal([]cty.Value{cty.NullVal(cty.String)}), + }), + cty.ObjectVal(map[string]cty.Value{ + "command": cty.StringVal("echo OK"), + "interpreter": cty.NullVal(cty.List(cty.String)), + }), + cty.ObjectVal(map[string]cty.Value{ + "command": cty.StringVal("echo OK"), + "working_dir": cty.NullVal(cty.String), + }), + } { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + + cfg, err := schema.CoerceValue(cfg) + if err != nil { + t.Fatal(err) + } + + // verifying there are no panics + p.ProvisionResource(provisioners.ProvisionResourceRequest{ + Config: cfg, + UIOutput: output, + }) + }) + } +} diff --git a/pkg/builtin/provisioners/remote-exec/resource_provisioner.go b/pkg/builtin/provisioners/remote-exec/resource_provisioner.go new file mode 100644 index 00000000000..2025f29fcc9 --- /dev/null +++ b/pkg/builtin/provisioners/remote-exec/resource_provisioner.go @@ -0,0 +1,298 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package remoteexec + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "log" + "os" + "strings" + + "github.com/kubegems/opentofu/pkg/communicator" + "github.com/kubegems/opentofu/pkg/communicator/remote" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/provisioners" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/mitchellh/go-linereader" + "github.com/zclconf/go-cty/cty" +) + +func New() provisioners.Interface { + ctx, cancel := context.WithCancel(context.Background()) + return &provisioner{ + ctx: ctx, + cancel: cancel, + } +} + +type provisioner struct { + // We store a context here tied to the lifetime of the provisioner. + // This allows the Stop method to cancel any in-flight requests. + ctx context.Context + cancel context.CancelFunc +} + +func (p *provisioner) GetSchema() (resp provisioners.GetSchemaResponse) { + schema := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "inline": { + Type: cty.List(cty.String), + Optional: true, + }, + "script": { + Type: cty.String, + Optional: true, + }, + "scripts": { + Type: cty.List(cty.String), + Optional: true, + }, + }, + } + + resp.Provisioner = schema + return resp +} + +func (p *provisioner) ValidateProvisionerConfig(req provisioners.ValidateProvisionerConfigRequest) (resp provisioners.ValidateProvisionerConfigResponse) { + cfg, err := p.GetSchema().Provisioner.CoerceValue(req.Config) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(tfdiags.WholeContainingBody( + tfdiags.Error, + "Invalid remote-exec provisioner configuration", + err.Error(), + )) + return resp + } + + inline := cfg.GetAttr("inline") + script := cfg.GetAttr("script") + scripts := cfg.GetAttr("scripts") + + set := 0 + if !inline.IsNull() { + set++ + } + if !script.IsNull() { + set++ + } + if !scripts.IsNull() { + set++ + } + if set != 1 { + resp.Diagnostics = resp.Diagnostics.Append(tfdiags.WholeContainingBody( + tfdiags.Error, + "Invalid remote-exec provisioner configuration", + `Only one of "inline", "script", or "scripts" must be set`, + )) + } + return resp +} + +func (p *provisioner) ProvisionResource(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { + if req.Connection.IsNull() { + resp.Diagnostics = resp.Diagnostics.Append(tfdiags.WholeContainingBody( + tfdiags.Error, + "remote-exec provisioner error", + "Missing connection configuration for provisioner.", + )) + return resp + } + + comm, err := communicator.New(req.Connection) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(tfdiags.WholeContainingBody( + tfdiags.Error, + "remote-exec provisioner error", + err.Error(), + )) + return resp + } + + // Collect the scripts + scripts, err := collectScripts(req.Config) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(tfdiags.WholeContainingBody( + tfdiags.Error, + "remote-exec provisioner error", + err.Error(), + )) + return resp + } + for _, s := range scripts { + defer s.Close() + } + + // Copy and execute each script + if err := runScripts(p.ctx, req.UIOutput, comm, scripts); err != nil { + resp.Diagnostics = resp.Diagnostics.Append(tfdiags.WholeContainingBody( + tfdiags.Error, + "remote-exec provisioner error", + err.Error(), + )) + return resp + } + + return resp +} + +func (p *provisioner) Stop() error { + p.cancel() + return nil +} + +func (p *provisioner) Close() error { + return nil +} + +// generateScripts takes the configuration and creates a script from each inline config +func generateScripts(inline cty.Value) ([]string, error) { + var lines []string + for _, l := range inline.AsValueSlice() { + if l.IsNull() { + return nil, errors.New("invalid null string in 'scripts'") + } + + s := l.AsString() + if s == "" { + return nil, errors.New("invalid empty string in 'scripts'") + } + lines = append(lines, s) + } + lines = append(lines, "") + + return []string{strings.Join(lines, "\n")}, nil +} + +// collectScripts is used to collect all the scripts we need +// to execute in preparation for copying them. +func collectScripts(v cty.Value) ([]io.ReadCloser, error) { + // Check if inline + if inline := v.GetAttr("inline"); !inline.IsNull() { + scripts, err := generateScripts(inline) + if err != nil { + return nil, err + } + + var r []io.ReadCloser + for _, script := range scripts { + r = append(r, io.NopCloser(bytes.NewReader([]byte(script)))) + } + + return r, nil + } + + // Collect scripts + var scripts []string + if script := v.GetAttr("script"); !script.IsNull() { + s := script.AsString() + if s == "" { + return nil, errors.New("invalid empty string in 'script'") + } + scripts = append(scripts, s) + } + + if scriptList := v.GetAttr("scripts"); !scriptList.IsNull() { + for _, script := range scriptList.AsValueSlice() { + if script.IsNull() { + return nil, errors.New("invalid null string in 'script'") + } + s := script.AsString() + if s == "" { + return nil, errors.New("invalid empty string in 'script'") + } + scripts = append(scripts, s) + } + } + + // Open all the scripts + var fhs []io.ReadCloser + for _, s := range scripts { + fh, err := os.Open(s) + if err != nil { + for _, fh := range fhs { + fh.Close() + } + return nil, fmt.Errorf("Failed to open script '%s': %w", s, err) + } + fhs = append(fhs, fh) + } + + // Done, return the file handles + return fhs, nil +} + +// runScripts is used to copy and execute a set of scripts +func runScripts(ctx context.Context, o provisioners.UIOutput, comm communicator.Communicator, scripts []io.ReadCloser) error { + retryCtx, cancel := context.WithTimeout(ctx, comm.Timeout()) + defer cancel() + + // Wait and retry until we establish the connection + err := communicator.Retry(retryCtx, func() error { + return comm.Connect(o) + }) + if err != nil { + return err + } + + // Wait for the context to end and then disconnect + go func() { + <-ctx.Done() + comm.Disconnect() + }() + + for _, script := range scripts { + var cmd *remote.Cmd + + outR, outW := io.Pipe() + errR, errW := io.Pipe() + defer outW.Close() + defer errW.Close() + + go copyUIOutput(o, outR) + go copyUIOutput(o, errR) + + remotePath := comm.ScriptPath() + + if err := comm.UploadScript(remotePath, script); err != nil { + return fmt.Errorf("Failed to upload script: %w", err) + } + + cmd = &remote.Cmd{ + Command: remotePath, + Stdout: outW, + Stderr: errW, + } + if err := comm.Start(cmd); err != nil { + return fmt.Errorf("Error starting script: %w", err) + } + + if err := cmd.Wait(); err != nil { + return err + } + + // Upload a blank follow up file in the same path to prevent residual + // script contents from remaining on remote machine + empty := bytes.NewReader([]byte("")) + if err := comm.Upload(remotePath, empty); err != nil { + // This feature is best-effort. + log.Printf("[WARN] Failed to upload empty follow up script: %v", err) + } + } + + return nil +} + +func copyUIOutput(o provisioners.UIOutput, r io.Reader) { + lr := linereader.New(r) + for line := range lr.Ch { + o.Output(line) + } +} diff --git a/pkg/builtin/provisioners/remote-exec/resource_provisioner_test.go b/pkg/builtin/provisioners/remote-exec/resource_provisioner_test.go new file mode 100644 index 00000000000..de3c89ed1df --- /dev/null +++ b/pkg/builtin/provisioners/remote-exec/resource_provisioner_test.go @@ -0,0 +1,333 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package remoteexec + +import ( + "bytes" + "context" + "fmt" + "io" + "log" + "testing" + "time" + + "strings" + + "github.com/kubegems/opentofu/pkg/communicator" + "github.com/kubegems/opentofu/pkg/communicator/remote" + "github.com/kubegems/opentofu/pkg/provisioners" + "github.com/mitchellh/cli" + "github.com/zclconf/go-cty/cty" +) + +func TestResourceProvider_Validate_good(t *testing.T) { + c := cty.ObjectVal(map[string]cty.Value{ + "inline": cty.ListVal([]cty.Value{cty.StringVal("echo foo")}), + }) + + resp := New().ValidateProvisionerConfig(provisioners.ValidateProvisionerConfigRequest{ + Config: c, + }) + if len(resp.Diagnostics) > 0 { + t.Fatal(resp.Diagnostics.ErrWithWarnings()) + } +} + +func TestResourceProvider_Validate_bad(t *testing.T) { + c := cty.ObjectVal(map[string]cty.Value{ + "invalid": cty.StringVal("nope"), + }) + + resp := New().ValidateProvisionerConfig(provisioners.ValidateProvisionerConfigRequest{ + Config: c, + }) + if !resp.Diagnostics.HasErrors() { + t.Fatalf("Should have errors") + } +} + +var expectedScriptOut = `cd /tmp +wget http://foobar +exit 0 +` + +func TestResourceProvider_generateScript(t *testing.T) { + inline := cty.ListVal([]cty.Value{ + cty.StringVal("cd /tmp"), + cty.StringVal("wget http://foobar"), + cty.StringVal("exit 0"), + }) + + out, err := generateScripts(inline) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(out) != 1 { + t.Fatal("expected 1 out") + } + + if out[0] != expectedScriptOut { + t.Fatalf("bad: %v", out) + } +} + +func TestResourceProvider_generateScriptEmptyInline(t *testing.T) { + inline := cty.ListVal([]cty.Value{cty.StringVal("")}) + + _, err := generateScripts(inline) + if err == nil { + t.Fatal("expected error, got none") + } + + if !strings.Contains(err.Error(), "empty string") { + t.Fatalf("expected empty string error, got: %s", err) + } +} + +func TestResourceProvider_CollectScripts_inline(t *testing.T) { + conf := map[string]cty.Value{ + "inline": cty.ListVal([]cty.Value{ + cty.StringVal("cd /tmp"), + cty.StringVal("wget http://foobar"), + cty.StringVal("exit 0"), + }), + } + + scripts, err := collectScripts(cty.ObjectVal(conf)) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(scripts) != 1 { + t.Fatalf("bad: %v", scripts) + } + + var out bytes.Buffer + _, err = io.Copy(&out, scripts[0]) + if err != nil { + t.Fatalf("err: %v", err) + } + + if out.String() != expectedScriptOut { + t.Fatalf("bad: %v", out.String()) + } +} + +func TestResourceProvider_CollectScripts_script(t *testing.T) { + p := New() + schema := p.GetSchema().Provisioner + + conf, err := schema.CoerceValue(cty.ObjectVal(map[string]cty.Value{ + "scripts": cty.ListVal([]cty.Value{ + cty.StringVal("testdata/script1.sh"), + }), + })) + if err != nil { + t.Fatal(err) + } + + scripts, err := collectScripts(conf) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(scripts) != 1 { + t.Fatalf("bad: %v", scripts) + } + + var out bytes.Buffer + _, err = io.Copy(&out, scripts[0]) + if err != nil { + t.Fatalf("err: %v", err) + } + + expectedOutput := normaliseNewlines(expectedScriptOut) + actualOutput := normaliseNewlines(out.String()) + if actualOutput != expectedOutput { + t.Fatalf("bad: %v", out.String()) + } +} + +func TestResourceProvider_CollectScripts_scripts(t *testing.T) { + p := New() + schema := p.GetSchema().Provisioner + + conf, err := schema.CoerceValue(cty.ObjectVal(map[string]cty.Value{ + "scripts": cty.ListVal([]cty.Value{ + cty.StringVal("testdata/script1.sh"), + cty.StringVal("testdata/script1.sh"), + cty.StringVal("testdata/script1.sh"), + }), + })) + if err != nil { + log.Fatal(err) + } + + scripts, err := collectScripts(conf) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(scripts) != 3 { + t.Fatalf("bad: %v", scripts) + } + + for idx := range scripts { + var out bytes.Buffer + _, err = io.Copy(&out, scripts[idx]) + if err != nil { + t.Fatalf("err: %v", err) + } + + expectedOutput := normaliseNewlines(expectedScriptOut) + actualOutput := normaliseNewlines(out.String()) + if actualOutput != expectedOutput { + t.Fatalf("bad: %v", out.String()) + } + } +} + +func TestResourceProvider_CollectScripts_scriptsEmpty(t *testing.T) { + p := New() + schema := p.GetSchema().Provisioner + + conf, err := schema.CoerceValue(cty.ObjectVal(map[string]cty.Value{ + "scripts": cty.ListVal([]cty.Value{cty.StringVal("")}), + })) + if err != nil { + t.Fatal(err) + } + + _, err = collectScripts(conf) + if err == nil { + t.Fatal("expected error") + } + + if !strings.Contains(err.Error(), "empty string") { + t.Fatalf("Expected empty string error, got: %s", err) + } +} + +func TestProvisionerTimeout(t *testing.T) { + o := cli.NewMockUi() + c := new(communicator.MockCommunicator) + + disconnected := make(chan struct{}) + c.DisconnectFunc = func() error { + close(disconnected) + return nil + } + + completed := make(chan struct{}) + c.CommandFunc = func(cmd *remote.Cmd) error { + defer close(completed) + cmd.Init() + time.Sleep(2 * time.Second) + cmd.SetExitStatus(0, nil) + return nil + } + c.ConnTimeout = time.Second + c.UploadScripts = map[string]string{"hello": "echo hello"} + c.RemoteScriptPath = "hello" + + conf := map[string]cty.Value{ + "inline": cty.ListVal([]cty.Value{cty.StringVal("echo hello")}), + } + + scripts, err := collectScripts(cty.ObjectVal(conf)) + if err != nil { + t.Fatal(err) + } + + ctx := context.Background() + + done := make(chan struct{}) + + var runErr error + go func() { + defer close(done) + runErr = runScripts(ctx, o, c, scripts) + }() + + select { + case <-disconnected: + t.Fatal("communicator disconnected before command completed") + case <-completed: + } + + <-done + if runErr != nil { + t.Fatal(err) + } +} + +// Validate that Stop can Close can be called even when not provisioning. +func TestResourceProvisioner_StopClose(t *testing.T) { + p := New() + p.Stop() + p.Close() +} + +func TestResourceProvisioner_connectionRequired(t *testing.T) { + p := New() + resp := p.ProvisionResource(provisioners.ProvisionResourceRequest{}) + if !resp.Diagnostics.HasErrors() { + t.Fatal("expected error") + } + + got := resp.Diagnostics.Err().Error() + if !strings.Contains(got, "Missing connection") { + t.Fatalf("expected 'Missing connection' error: got %q", got) + } +} + +func TestResourceProvisioner_nullsInOptionals(t *testing.T) { + output := cli.NewMockUi() + p := New() + schema := p.GetSchema().Provisioner + + for i, cfg := range []cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "script": cty.StringVal("echo"), + "inline": cty.NullVal(cty.List(cty.String)), + }), + cty.ObjectVal(map[string]cty.Value{ + "inline": cty.ListVal([]cty.Value{ + cty.NullVal(cty.String), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "script": cty.NullVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "scripts": cty.NullVal(cty.List(cty.String)), + }), + cty.ObjectVal(map[string]cty.Value{ + "scripts": cty.ListVal([]cty.Value{ + cty.NullVal(cty.String), + }), + }), + } { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + + cfg, err := schema.CoerceValue(cfg) + if err != nil { + t.Fatal(err) + } + + // verifying there are no panics + p.ProvisionResource(provisioners.ProvisionResourceRequest{ + Config: cfg, + UIOutput: output, + }) + }) + } +} + +func normaliseNewlines(input string) string { + return strings.ReplaceAll(input, "\r\n", "\n") +} diff --git a/pkg/builtin/provisioners/remote-exec/testdata/script1.sh b/pkg/builtin/provisioners/remote-exec/testdata/script1.sh new file mode 100755 index 00000000000..81b3d5af86a --- /dev/null +++ b/pkg/builtin/provisioners/remote-exec/testdata/script1.sh @@ -0,0 +1,3 @@ +cd /tmp +wget http://foobar +exit 0 diff --git a/pkg/checks/doc.go b/pkg/checks/doc.go new file mode 100644 index 00000000000..15c51a12529 --- /dev/null +++ b/pkg/checks/doc.go @@ -0,0 +1,10 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package checks contains the models for representing various kinds of +// declarative condition checks that can be defined in a OpenTofu module +// and then evaluated and reported by OpenTofu Core during plan and apply +// operations. +package checks diff --git a/pkg/checks/state.go b/pkg/checks/state.go new file mode 100644 index 00000000000..aa94026867b --- /dev/null +++ b/pkg/checks/state.go @@ -0,0 +1,294 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package checks + +import ( + "fmt" + "sort" + "sync" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" +) + +// State is a container for state tracking of all of the checks declared in +// a particular OpenTofu configuration and their current statuses. +// +// A State object is mutable during plan and apply operations but should +// otherwise be treated as a read-only snapshot of the status of checks +// at a particular moment. +// +// The checks State tracks a few different concepts: +// - configuration objects: items in the configuration which statically +// declare some checks associated with zero or more checkable objects. +// - checkable objects: dynamically-determined objects that are each +// associated with one configuration object. +// - checks: a single check that is declared as part of a configuration +// object and then resolved once for each of its associated checkable +// objects. +// - check statuses: the current state of a particular check associated +// with a particular checkable object. +// +// This container type is concurrency-safe for both reads and writes through +// its various methods. +type State struct { + mu sync.Mutex + statuses addrs.Map[addrs.ConfigCheckable, *configCheckableState] + failureMsgs addrs.Map[addrs.CheckRule, string] +} + +// configCheckableState is an internal part of type State that represents +// the evaluation status for a particular addrs.ConfigCheckable address. +// +// Its initial state, at the beginning of a run, is that it doesn't even know +// how many checkable objects will be dynamically-declared yet. OpenTofu Core +// will notify the State object of the associated Checkables once +// it has decided the appropriate expansion of that configuration object, +// and then will gradually report the results of each check once the graph +// walk reaches it. +// +// This must be accessed only while holding the mutex inside the associated +// State object. +type configCheckableState struct { + // checkTypes captures the expected number of checks of each type + // associated with object declared by this configuration construct. Since + // checks are statically declared (even though the checkable objects + // aren't) we can compute this only from the configuration. + checkTypes map[addrs.CheckRuleType]int + + // objects represents the set of dynamic checkable objects associated + // with this configuration construct. This is initially nil to represent + // that we don't know the objects yet, and is replaced by a non-nil map + // once OpenTofu Core reports the expansion of this configuration + // construct. + // + // The leaf Status values will initially be StatusUnknown + // and then gradually updated by OpenTofu Core as it visits the + // individual checkable objects and reports their status. + objects addrs.Map[addrs.Checkable, map[addrs.CheckRuleType][]Status] +} + +// NOTE: For the "Report"-prefixed methods that we use to gradually update +// the structure with results during a plan or apply operation, see the +// state_report.go file also in this package. + +// NewState returns a new State object representing the check statuses of +// objects declared in the given configuration. +// +// The configuration determines which configuration objects and associated +// checks we'll be expecting to see, so that we can seed their statuses as +// all unknown until we see affirmative reports sent by the Report-prefixed +// methods on Checks. +func NewState(config *configs.Config) *State { + return &State{ + statuses: initialStatuses(config), + } +} + +// ConfigHasChecks returns true if and only if the given address refers to +// a configuration object that this State object is expecting to recieve +// statuses for. +// +// Other methods of Checks will typically panic if given a config address +// that would not have returned true from ConfigHasChecked. +func (c *State) ConfigHasChecks(addr addrs.ConfigCheckable) bool { + c.mu.Lock() + defer c.mu.Unlock() + return c.statuses.Has(addr) +} + +// AllConfigAddrs returns all of the addresses of all configuration objects +// that could potentially produce checkable objects at runtime. +// +// This is a good starting point for reporting on the outcome of all of the +// configured checks at the configuration level of granularity, e.g. for +// automated testing reports where we want to report the status of all +// configured checks even if the graph walk aborted before we reached any +// of their objects. +func (c *State) AllConfigAddrs() addrs.Set[addrs.ConfigCheckable] { + c.mu.Lock() + defer c.mu.Unlock() + return c.statuses.Keys() +} + +// ObjectAddrs returns the addresses of individual checkable objects belonging +// to the configuration object with the given address. +// +// This will panic if the given address isn't a known configuration object +// that has checks. +func (c *State) ObjectAddrs(configAddr addrs.ConfigCheckable) addrs.Set[addrs.Checkable] { + c.mu.Lock() + defer c.mu.Unlock() + + st, ok := c.statuses.GetOk(configAddr) + if !ok { + panic(fmt.Sprintf("unknown configuration object %s", configAddr)) + } + + ret := addrs.MakeSet[addrs.Checkable]() + for _, elem := range st.objects.Elems { + ret.Add(elem.Key) + } + return ret + +} + +// AggregateCheckStatus returns a summarization of all of the check results +// for a particular configuration object into a single status. +// +// The given address must refer to an object within the configuration that +// this Checks was instantiated from, or this method will panic. +func (c *State) AggregateCheckStatus(addr addrs.ConfigCheckable) Status { + c.mu.Lock() + defer c.mu.Unlock() + + st, ok := c.statuses.GetOk(addr) + if !ok { + panic(fmt.Sprintf("request for status of unknown configuration object %s", addr)) + } + + if st.objects.Elems == nil { + // If we don't even know how many objects we have for this + // configuration construct then that summarizes as unknown. + // (Note: this is different than Elems being a non-nil empty map, + // which means that we know there are zero objects and therefore + // the aggregate result will be pass to pass below.) + return StatusUnknown + } + + // Otherwise, our result depends on how many of our known objects are + // in each status. + errorCount := 0 + failCount := 0 + unknownCount := 0 + + for _, objects := range st.objects.Elems { + for _, checks := range objects.Value { + for _, status := range checks { + switch status { + case StatusPass: + // ok + case StatusFail: + failCount++ + case StatusError: + errorCount++ + default: + unknownCount++ + } + } + } + } + + return summarizeCheckStatuses(errorCount, failCount, unknownCount) +} + +// ObjectCheckStatus returns a summarization of all of the check results +// for a particular checkable object into a single status. +// +// The given address must refer to a checkable object that OpenTofu Core +// previously reported while doing a graph walk, or this method will panic. +func (c *State) ObjectCheckStatus(addr addrs.Checkable) Status { + c.mu.Lock() + defer c.mu.Unlock() + + configAddr := addr.ConfigCheckable() + + st, ok := c.statuses.GetOk(configAddr) + if !ok { + panic(fmt.Sprintf("request for status of unknown object %s", addr)) + } + if st.objects.Elems == nil { + panic(fmt.Sprintf("request for status of %s before establishing the checkable objects for %s", addr, configAddr)) + } + checks, ok := st.objects.GetOk(addr) + if !ok { + panic(fmt.Sprintf("request for status of unknown object %s", addr)) + } + + errorCount := 0 + failCount := 0 + unknownCount := 0 + for _, statuses := range checks { + for _, status := range statuses { + switch status { + case StatusPass: + // ok + case StatusFail: + failCount++ + case StatusError: + errorCount++ + default: + unknownCount++ + } + } + } + return summarizeCheckStatuses(errorCount, failCount, unknownCount) +} + +// ObjectFailureMessages returns the zero or more failure messages reported +// for the object with the given address. +// +// Failure messages are recorded only for checks whose status is StatusFail, +// but since this aggregates together the results of all of the checks +// on the given object it's possible for there to be a mixture of failures +// and errors at the same time, which would aggregate as StatusError in +// ObjectCheckStatus's result because errors are defined as "stronger" +// than failures. +func (c *State) ObjectFailureMessages(addr addrs.Checkable) []string { + var ret []string + + configAddr := addr.ConfigCheckable() + + st, ok := c.statuses.GetOk(configAddr) + if !ok { + panic(fmt.Sprintf("request for status of unknown object %s", addr)) + } + if st.objects.Elems == nil { + panic(fmt.Sprintf("request for status of %s before establishing the checkable objects for %s", addr, configAddr)) + } + checksByType, ok := st.objects.GetOk(addr) + if !ok { + panic(fmt.Sprintf("request for status of unknown object %s", addr)) + } + + for checkType, checks := range checksByType { + for i, status := range checks { + if status == StatusFail { + checkAddr := addrs.NewCheckRule(addr, checkType, i) + msg := c.failureMsgs.Get(checkAddr) + if msg != "" { + ret = append(ret, msg) + } + } + } + } + + // We always return the messages in a lexical sort order just so that + // it'll be consistent between runs if we still have the same problems. + sort.Strings(ret) + + return ret +} + +func summarizeCheckStatuses(errorCount, failCount, unknownCount int) Status { + switch { + case errorCount > 0: + // If we saw any errors then we'll treat the whole thing as errored. + return StatusError + case failCount > 0: + // If anything failed then this whole configuration construct failed. + return StatusFail + case unknownCount > 0: + // If nothing failed but we still have unknowns then our outcome isn't + // known yet. + return StatusUnknown + default: + // If we have no failures and no unknowns then either we have all + // passes or no checkable objects at all, both of which summarize as + // a pass. + return StatusPass + } +} diff --git a/pkg/checks/state_init.go b/pkg/checks/state_init.go new file mode 100644 index 00000000000..a75cbebdd2a --- /dev/null +++ b/pkg/checks/state_init.go @@ -0,0 +1,114 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package checks + +import ( + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" +) + +func initialStatuses(cfg *configs.Config) addrs.Map[addrs.ConfigCheckable, *configCheckableState] { + ret := addrs.MakeMap[addrs.ConfigCheckable, *configCheckableState]() + if cfg == nil { + // This should not happen in normal use, but can arise in some + // unit tests that are not working with a full configuration and + // don't care about checks. + return ret + } + + collectInitialStatuses(ret, cfg) + + return ret +} + +func collectInitialStatuses(into addrs.Map[addrs.ConfigCheckable, *configCheckableState], cfg *configs.Config) { + moduleAddr := cfg.Path + + for _, rc := range cfg.Module.ManagedResources { + addr := rc.Addr().InModule(moduleAddr) + collectInitialStatusForResource(into, addr, rc) + } + for _, rc := range cfg.Module.DataResources { + addr := rc.Addr().InModule(moduleAddr) + collectInitialStatusForResource(into, addr, rc) + } + + for _, oc := range cfg.Module.Outputs { + addr := oc.Addr().InModule(moduleAddr) + + ct := len(oc.Preconditions) + if ct == 0 { + // We just ignore output values that don't declare any checks. + continue + } + + st := &configCheckableState{} + + st.checkTypes = map[addrs.CheckRuleType]int{ + addrs.OutputPrecondition: ct, + } + + into.Put(addr, st) + } + + for _, c := range cfg.Module.Checks { + addr := c.Addr().InModule(moduleAddr) + + st := &configCheckableState{ + checkTypes: map[addrs.CheckRuleType]int{ + addrs.CheckAssertion: len(c.Asserts), + }, + } + + if c.DataResource != nil { + st.checkTypes[addrs.CheckDataResource] = 1 + } + + into.Put(addr, st) + } + + for _, v := range cfg.Module.Variables { + addr := v.Addr().InModule(moduleAddr) + + vs := len(v.Validations) + if vs == 0 { + continue + } + + st := &configCheckableState{} + st.checkTypes = map[addrs.CheckRuleType]int{ + addrs.InputValidation: vs, + } + + into.Put(addr, st) + } + + // Must also visit child modules to collect everything + for _, child := range cfg.Children { + collectInitialStatuses(into, child) + } +} + +func collectInitialStatusForResource(into addrs.Map[addrs.ConfigCheckable, *configCheckableState], addr addrs.ConfigResource, rc *configs.Resource) { + if (len(rc.Preconditions) + len(rc.Postconditions)) == 0 { + // Don't bother with any resource that doesn't have at least + // one condition. + return + } + + st := &configCheckableState{ + checkTypes: make(map[addrs.CheckRuleType]int), + } + + if ct := len(rc.Preconditions); ct > 0 { + st.checkTypes[addrs.ResourcePrecondition] = ct + } + if ct := len(rc.Postconditions); ct > 0 { + st.checkTypes[addrs.ResourcePostcondition] = ct + } + + into.Put(addr, st) +} diff --git a/pkg/checks/state_report.go b/pkg/checks/state_report.go new file mode 100644 index 00000000000..4698a9afee9 --- /dev/null +++ b/pkg/checks/state_report.go @@ -0,0 +1,121 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package checks + +import ( + "fmt" + + "github.com/kubegems/opentofu/pkg/addrs" +) + +// These are the "Report"-prefixed methods of Checks used by OpenTofu Core +// to gradually signal the results of checks during a plan or apply operation. + +// ReportCheckableObjects is the interface by which OpenTofu Core should +// tell the State object which specific checkable objects were declared +// by the given configuration object. +// +// This method will panic if the given configuration address isn't one known +// by this Checks to have pending checks, and if any of the given object +// addresses don't belong to the given configuration address. +func (c *State) ReportCheckableObjects(configAddr addrs.ConfigCheckable, objectAddrs addrs.Set[addrs.Checkable]) { + c.mu.Lock() + defer c.mu.Unlock() + + st, ok := c.statuses.GetOk(configAddr) + if !ok { + panic(fmt.Sprintf("checkable objects report for unknown configuration object %s", configAddr)) + } + if st.objects.Elems != nil { + // Can only report checkable objects once per configuration object + // This is not a problem as the result is already cached. + return + } + + // At this point we pre-populate all of the check results as StatusUnknown, + // so that even if we never hear from OpenTofu Core again we'll still + // remember that these results were all pending. + st.objects = addrs.MakeMap[addrs.Checkable, map[addrs.CheckRuleType][]Status]() + for _, objectAddr := range objectAddrs { + if gotConfigAddr := objectAddr.ConfigCheckable(); !addrs.Equivalent(configAddr, gotConfigAddr) { + // All of the given object addresses must belong to the specified configuration address + panic(fmt.Sprintf("%s belongs to %s, not %s", objectAddr, gotConfigAddr, configAddr)) + } + + checks := make(map[addrs.CheckRuleType][]Status, len(st.checkTypes)) + for checkType, count := range st.checkTypes { + // NOTE: This is intentionally a slice of count of the zero value + // of Status, which is StatusUnknown to represent that we don't + // yet have a report for that particular check. + checks[checkType] = make([]Status, count) + } + + st.objects.Put(objectAddr, checks) + } +} + +// ReportCheckResult is the interface by which OpenTofu Core should tell the +// State object the result of a specific check for an object that was +// previously registered with ReportCheckableObjects. +// +// If the given object address doesn't match a previously-reported object, +// or if the check index is out of bounds for the number of checks expected +// of the given type, this method will panic to indicate a bug in the caller. +// +// This method will also panic if the specified check already had a known +// status; each check should have its result reported only once. +func (c *State) ReportCheckResult(objectAddr addrs.Checkable, checkType addrs.CheckRuleType, index int, status Status) { + c.mu.Lock() + defer c.mu.Unlock() + + c.reportCheckResult(objectAddr, checkType, index, status) +} + +// ReportCheckFailure is a more specialized version of ReportCheckResult which +// captures a failure outcome in particular, giving the opportunity to capture +// an author-specified error message string along with the failure. +// +// This always records the given check as having StatusFail. Don't use this for +// situations where the check condition was itself invalid, because that +// should be represented by StatusError instead, and the error signalled via +// diagnostics as normal. +func (c *State) ReportCheckFailure(objectAddr addrs.Checkable, checkType addrs.CheckRuleType, index int, errorMessage string) { + c.mu.Lock() + defer c.mu.Unlock() + + c.reportCheckResult(objectAddr, checkType, index, StatusFail) + if c.failureMsgs.Elems == nil { + c.failureMsgs = addrs.MakeMap[addrs.CheckRule, string]() + } + checkAddr := addrs.NewCheckRule(objectAddr, checkType, index) + c.failureMsgs.Put(checkAddr, errorMessage) +} + +// reportCheckResult is shared between both ReportCheckResult and +// ReportCheckFailure, and assumes its caller already holds the mutex. +func (c *State) reportCheckResult(objectAddr addrs.Checkable, checkType addrs.CheckRuleType, index int, status Status) { + configAddr := objectAddr.ConfigCheckable() + + st, ok := c.statuses.GetOk(configAddr) + if !ok { + panic(fmt.Sprintf("checkable object status report for unknown configuration object %s", configAddr)) + } + + checks, ok := st.objects.GetOk(objectAddr) + if !ok { + panic(fmt.Sprintf("checkable object status report for unexpected checkable object %s", objectAddr)) + } + + if index >= len(checks[checkType]) { + panic(fmt.Sprintf("%s index %d out of range for %s", checkType, index, objectAddr)) + } + if checks[checkType][index] != StatusUnknown { + panic(fmt.Sprintf("duplicate status report for %s %s %d", objectAddr, checkType, index)) + } + + checks[checkType][index] = status + +} diff --git a/pkg/checks/state_test.go b/pkg/checks/state_test.go new file mode 100644 index 00000000000..e2698612414 --- /dev/null +++ b/pkg/checks/state_test.go @@ -0,0 +1,231 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package checks + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configload" + "github.com/kubegems/opentofu/pkg/initwd" +) + +func TestChecksHappyPath(t *testing.T) { + const fixtureDir = "testdata/happypath" + loader, close := configload.NewLoaderForTests(t) + defer close() + inst := initwd.NewModuleInstaller(loader.ModulesDir(), loader, nil) + _, instDiags := inst.InstallModules(context.Background(), fixtureDir, "tests", true, false, initwd.ModuleInstallHooksImpl{}, configs.RootModuleCallForTesting()) + if instDiags.HasErrors() { + t.Fatal(instDiags.Err()) + } + if err := loader.RefreshModules(); err != nil { + t.Fatalf("failed to refresh modules after installation: %s", err) + } + + ///////////////////////////////////////////////////////////////////////// + + cfg, hclDiags := loader.LoadConfig(fixtureDir, configs.RootModuleCallForTesting()) + if hclDiags.HasErrors() { + t.Fatalf("invalid configuration: %s", hclDiags.Error()) + } + + resourceA := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "a", + }.InModule(addrs.RootModule) + resourceNoChecks := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "no_checks", + }.InModule(addrs.RootModule) + resourceNonExist := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "nonexist", + }.InModule(addrs.RootModule) + rootOutput := addrs.OutputValue{ + Name: "a", + }.InModule(addrs.RootModule) + moduleChild := addrs.RootModule.Child("child") + resourceB := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "b", + }.InModule(moduleChild) + resourceC := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "c", + }.InModule(moduleChild) + childOutput := addrs.OutputValue{ + Name: "b", + }.InModule(moduleChild) + checkBlock := addrs.Check{ + Name: "check", + }.InModule(addrs.RootModule) + + // First some consistency checks to make sure our configuration is the + // shape we are relying on it to be. + if addr := resourceA; cfg.Module.ResourceByAddr(addr.Resource) == nil { + t.Fatalf("configuration does not include %s", addr) + } + if addr := resourceB; cfg.Children["child"].Module.ResourceByAddr(addr.Resource) == nil { + t.Fatalf("configuration does not include %s", addr) + } + if addr := resourceNoChecks; cfg.Module.ResourceByAddr(addr.Resource) == nil { + t.Fatalf("configuration does not include %s", addr) + } + if addr := resourceNonExist; cfg.Module.ResourceByAddr(addr.Resource) != nil { + t.Fatalf("configuration includes %s, which is not supposed to exist", addr) + } + if addr := checkBlock; cfg.Module.Checks[addr.Check.Name] == nil { + t.Fatalf("configuration does not include %s", addr) + } + + ///////////////////////////////////////////////////////////////////////// + + checks := NewState(cfg) + + missing := 0 + if addr := resourceA; !checks.ConfigHasChecks(addr) { + t.Errorf("checks not detected for %s", addr) + missing++ + } + if addr := resourceB; !checks.ConfigHasChecks(addr) { + t.Errorf("checks not detected for %s", addr) + missing++ + } + if addr := resourceC; !checks.ConfigHasChecks(addr) { + t.Errorf("checks not detected for %s", addr) + missing++ + } + if addr := rootOutput; !checks.ConfigHasChecks(addr) { + t.Errorf("checks not detected for %s", addr) + missing++ + } + if addr := childOutput; !checks.ConfigHasChecks(addr) { + t.Errorf("checks not detected for %s", addr) + missing++ + } + if addr := resourceNoChecks; checks.ConfigHasChecks(addr) { + t.Errorf("checks detected for %s, even though it has none", addr) + } + if addr := resourceNonExist; checks.ConfigHasChecks(addr) { + t.Errorf("checks detected for %s, even though it doesn't exist", addr) + } + if addr := checkBlock; !checks.ConfigHasChecks(addr) { + t.Errorf("checks not detected for %s", addr) + missing++ + } + if missing > 0 { + t.Fatalf("missing some configuration objects we'd need for subsequent testing") + } + + ///////////////////////////////////////////////////////////////////////// + + // Everything should start with status unknown. + + { + wantConfigAddrs := addrs.MakeSet[addrs.ConfigCheckable]( + resourceA, + resourceB, + resourceC, + rootOutput, + childOutput, + checkBlock, + ) + gotConfigAddrs := checks.AllConfigAddrs() + if diff := cmp.Diff(wantConfigAddrs, gotConfigAddrs); diff != "" { + t.Errorf("wrong detected config addresses\n%s", diff) + } + + for _, configAddr := range gotConfigAddrs { + if got, want := checks.AggregateCheckStatus(configAddr), StatusUnknown; got != want { + t.Errorf("incorrect initial aggregate check status for %s: %s, but want %s", configAddr, got, want) + } + } + } + + ///////////////////////////////////////////////////////////////////////// + + // The following are steps that would normally be done by OpenTofu Core + // as part of visiting checkable objects during the graph walk. We're + // simulating a likely sequence of calls here for testing purposes, but + // OpenTofu Core won't necessarily visit all of these in exactly the + // same order every time and so this is just one possible valid ordering + // of calls. + + resourceInstA := resourceA.Resource.Absolute(addrs.RootModuleInstance).Instance(addrs.NoKey) + rootOutputInst := rootOutput.OutputValue.Absolute(addrs.RootModuleInstance) + moduleChildInst := addrs.RootModuleInstance.Child("child", addrs.NoKey) + resourceInstB := resourceB.Resource.Absolute(moduleChildInst).Instance(addrs.NoKey) + resourceInstC0 := resourceC.Resource.Absolute(moduleChildInst).Instance(addrs.IntKey(0)) + resourceInstC1 := resourceC.Resource.Absolute(moduleChildInst).Instance(addrs.IntKey(1)) + childOutputInst := childOutput.OutputValue.Absolute(moduleChildInst) + checkBlockInst := checkBlock.Check.Absolute(addrs.RootModuleInstance) + + checks.ReportCheckableObjects(resourceA, addrs.MakeSet[addrs.Checkable](resourceInstA)) + checks.ReportCheckResult(resourceInstA, addrs.ResourcePrecondition, 0, StatusPass) + checks.ReportCheckResult(resourceInstA, addrs.ResourcePrecondition, 1, StatusPass) + checks.ReportCheckResult(resourceInstA, addrs.ResourcePostcondition, 0, StatusPass) + + checks.ReportCheckableObjects(resourceB, addrs.MakeSet[addrs.Checkable](resourceInstB)) + checks.ReportCheckResult(resourceInstB, addrs.ResourcePrecondition, 0, StatusPass) + + checks.ReportCheckableObjects(resourceC, addrs.MakeSet[addrs.Checkable](resourceInstC0, resourceInstC1)) + checks.ReportCheckResult(resourceInstC0, addrs.ResourcePostcondition, 0, StatusPass) + checks.ReportCheckResult(resourceInstC1, addrs.ResourcePostcondition, 0, StatusPass) + + checks.ReportCheckableObjects(childOutput, addrs.MakeSet[addrs.Checkable](childOutputInst)) + checks.ReportCheckResult(childOutputInst, addrs.OutputPrecondition, 0, StatusPass) + + checks.ReportCheckableObjects(rootOutput, addrs.MakeSet[addrs.Checkable](rootOutputInst)) + checks.ReportCheckResult(rootOutputInst, addrs.OutputPrecondition, 0, StatusPass) + + checks.ReportCheckableObjects(checkBlock, addrs.MakeSet[addrs.Checkable](checkBlockInst)) + checks.ReportCheckResult(checkBlockInst, addrs.CheckAssertion, 0, StatusPass) + + ///////////////////////////////////////////////////////////////////////// + + // This "section" is simulating what we might do to report the results + // of the checks after a run completes. + + { + configCount := 0 + for _, configAddr := range checks.AllConfigAddrs() { + configCount++ + if got, want := checks.AggregateCheckStatus(configAddr), StatusPass; got != want { + t.Errorf("incorrect final aggregate check status for %s: %s, but want %s", configAddr, got, want) + } + } + if got, want := configCount, 6; got != want { + t.Errorf("incorrect number of known config addresses %d; want %d", got, want) + } + } + + { + objAddrs := addrs.MakeSet[addrs.Checkable]( + resourceInstA, + rootOutputInst, + resourceInstB, + resourceInstC0, + resourceInstC1, + childOutputInst, + checkBlockInst, + ) + for _, addr := range objAddrs { + if got, want := checks.ObjectCheckStatus(addr), StatusPass; got != want { + t.Errorf("incorrect final check status for object %s: %s, but want %s", addr, got, want) + } + } + } +} diff --git a/pkg/checks/status.go b/pkg/checks/status.go new file mode 100644 index 00000000000..c8aee18c47b --- /dev/null +++ b/pkg/checks/status.go @@ -0,0 +1,79 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package checks + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" +) + +// Status represents the status of an individual check associated with a +// checkable object. +type Status rune + +//go:generate go run golang.org/x/tools/cmd/stringer -type=Status + +const ( + // StatusUnknown represents that there is not yet a conclusive result + // for the check, either because we haven't yet visited its associated + // object or because the check condition itself depends on a value not + // yet known during planning. + StatusUnknown Status = 0 + // NOTE: Our implementation relies on StatusUnknown being the zero value + // of Status. + + // StatusPass represents that OpenTofu Core has evaluated the check's + // condition and it returned true, indicating success. + StatusPass Status = 'P' + + // StatusFail represents that OpenTofu Core has evaluated the check's + // condition and it returned false, indicating failure. + StatusFail Status = 'F' + + // StatusError represents that OpenTofu Core tried to evaluate the check's + // condition but encountered an error while evaluating the check expression. + // + // This is different than StatusFail because StatusFail indiciates that + // the condition was valid and returned false, whereas StatusError + // indicates that the condition was not valid at all. + StatusError Status = 'E' +) + +// StatusForCtyValue returns the Status value corresponding to the given +// cty Value, which must be one of either cty.True, cty.False, or +// cty.UnknownVal(cty.Bool) or else this function will panic. +// +// The current behavior of this function is: +// +// cty.True StatusPass +// cty.False StatusFail +// cty.UnknownVal(cty.Bool) StatusUnknown +// +// Any other input will panic. Note that there's no value that can produce +// StatusError, because in case of a condition error there will not typically +// be a result value at all. +func StatusForCtyValue(v cty.Value) Status { + if !v.Type().Equals(cty.Bool) { + panic(fmt.Sprintf("cannot use %s as check status", v.Type().FriendlyName())) + } + if v.IsNull() { + panic("cannot use null as check status") + } + + switch { + case v == cty.True: + return StatusPass + case v == cty.False: + return StatusFail + case !v.IsKnown(): + return StatusUnknown + default: + // Should be impossible to get here unless something particularly + // weird is going on, like a marked condition result. + panic(fmt.Sprintf("cannot use %#v as check status", v)) + } +} diff --git a/pkg/checks/status_string.go b/pkg/checks/status_string.go new file mode 100644 index 00000000000..3cee235aa93 --- /dev/null +++ b/pkg/checks/status_string.go @@ -0,0 +1,39 @@ +// Code generated by "stringer -type=Status"; DO NOT EDIT. + +package checks + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[StatusUnknown-0] + _ = x[StatusPass-80] + _ = x[StatusFail-70] + _ = x[StatusError-69] +} + +const ( + _Status_name_0 = "StatusUnknown" + _Status_name_1 = "StatusErrorStatusFail" + _Status_name_2 = "StatusPass" +) + +var ( + _Status_index_1 = [...]uint8{0, 11, 21} +) + +func (i Status) String() string { + switch { + case i == 0: + return _Status_name_0 + case 69 <= i && i <= 70: + i -= 69 + return _Status_name_1[_Status_index_1[i]:_Status_index_1[i+1]] + case i == 80: + return _Status_name_2 + default: + return "Status(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/pkg/checks/testdata/happypath/checks-happypath.tf b/pkg/checks/testdata/happypath/checks-happypath.tf new file mode 100644 index 00000000000..a9cd055b7ee --- /dev/null +++ b/pkg/checks/testdata/happypath/checks-happypath.tf @@ -0,0 +1,39 @@ +resource "null_resource" "a" { + lifecycle { + precondition { + condition = null_resource.no_checks == "" + error_message = "Impossible." + } + precondition { + condition = null_resource.no_checks == "" + error_message = "Also impossible." + } + postcondition { + condition = null_resource.no_checks == "" + error_message = "Definitely not possible." + } + } +} + +resource "null_resource" "no_checks" { +} + +module "child" { + source = "./child" +} + +output "a" { + value = null_resource.a.id + + precondition { + condition = null_resource.a.id != "" + error_message = "A has no id." + } +} + +check "check" { + assert { + condition = null_resource.a.id != "" + error_message = "check block: A has no id" + } +} diff --git a/pkg/checks/testdata/happypath/child/checks-happypath-child.tf b/pkg/checks/testdata/happypath/child/checks-happypath-child.tf new file mode 100644 index 00000000000..d067bc2aa03 --- /dev/null +++ b/pkg/checks/testdata/happypath/child/checks-happypath-child.tf @@ -0,0 +1,29 @@ +resource "null_resource" "b" { + lifecycle { + precondition { + condition = self.id == "" + error_message = "Impossible." + } + } +} + +resource "null_resource" "c" { + count = 2 + + lifecycle { + postcondition { + condition = self.id == "" + error_message = "Impossible." + } + } +} + +output "b" { + value = null_resource.b.id + + precondition { + condition = null_resource.b.id != "" + error_message = "B has no id." + } +} + diff --git a/pkg/cloud/backend.go b/pkg/cloud/backend.go new file mode 100644 index 00000000000..00b85b6d7ff --- /dev/null +++ b/pkg/cloud/backend.go @@ -0,0 +1,1330 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cloud + +import ( + "context" + "errors" + "fmt" + "log" + "net/http" + "net/url" + "os" + "sort" + "strings" + "sync" + "time" + + tfe "github.com/hashicorp/go-tfe" + version "github.com/hashicorp/go-version" + svchost "github.com/hashicorp/terraform-svchost" + "github.com/hashicorp/terraform-svchost/disco" + "github.com/mitchellh/cli" + "github.com/mitchellh/colorstring" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/gocty" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/command/jsonformat" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/httpclient" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" + tfversion "github.com/kubegems/opentofu/version" + + backendLocal "github.com/kubegems/opentofu/pkg/backend/local" +) + +const ( + defaultParallelism = 10 + tfeServiceID = "tfe.v2" + headerSourceKey = "X-Terraform-Integration" + headerSourceValue = "cloud" + genericHostname = "localterraform.com" +) + +// Cloud is an implementation of EnhancedBackend in service of the cloud backend +// integration for OpenTofu CLI. This backend is not intended to be surfaced at the user level and +// is instead an implementation detail of cloud.Cloud. +type Cloud struct { + // CLI and Colorize control the CLI output. If CLI is nil then no CLI + // output will be done. If CLIColor is nil then no coloring will be done. + CLI cli.Ui + CLIColor *colorstring.Colorize + + // ContextOpts are the base context options to set when initializing a + // new OpenTofu context. Many of these will be overridden or merged by + // Operation. See Operation for more details. + ContextOpts *tofu.ContextOpts + + // client is the cloud backend API client. + client *tfe.Client + + // lastRetry is set to the last time a request was retried. + lastRetry time.Time + + // hostname of cloud backend + hostname string + + // token for cloud backend + token string + + // organization is the organization that contains the target workspaces. + organization string + + // WorkspaceMapping contains strategies for mapping CLI workspaces in the working directory + // to remote Terraform Cloud workspaces. + WorkspaceMapping WorkspaceMapping + + // services is used for service discovery + services *disco.Disco + + // renderer is used for rendering JSON plan output and streamed logs. + renderer *jsonformat.Renderer + + // local allows local operations, where Terraform Cloud serves as a state storage backend. + local backend.Enhanced + + // forceLocal, if true, will force the use of the local backend. + forceLocal bool + + // opLock locks operations + opLock sync.Mutex + + // ignoreVersionConflict, if true, will disable the requirement that the + // local OpenTofu version matches the remote workspace's configured + // version. This will also cause VerifyWorkspaceTerraformVersion to return + // a warning diagnostic instead of an error. + ignoreVersionConflict bool + + runningInAutomation bool + + // input stores the value of the -input flag, since it will be used + // to determine whether or not to ask the user for approval of a run. + input bool + + encryption encryption.StateEncryption +} + +var _ backend.Backend = (*Cloud)(nil) +var _ backend.Enhanced = (*Cloud)(nil) +var _ backend.Local = (*Cloud)(nil) + +// New creates a new initialized cloud backend. +func New(services *disco.Disco, enc encryption.StateEncryption) *Cloud { + return &Cloud{ + services: services, + encryption: enc, + } +} + +// ConfigSchema implements backend.Enhanced. +func (b *Cloud) ConfigSchema() *configschema.Block { + return &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "hostname": { + Type: cty.String, + Optional: true, + Description: schemaDescriptionHostname, + }, + "organization": { + Type: cty.String, + Optional: true, + Description: schemaDescriptionOrganization, + }, + "token": { + Type: cty.String, + Optional: true, + Description: schemaDescriptionToken, + }, + }, + + BlockTypes: map[string]*configschema.NestedBlock{ + "workspaces": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "name": { + Type: cty.String, + Optional: true, + Description: schemaDescriptionName, + }, + "project": { + Type: cty.String, + Optional: true, + Description: schemaDescriptionProject, + }, + "tags": { + Type: cty.Set(cty.String), + Optional: true, + Description: schemaDescriptionTags, + }, + }, + }, + Nesting: configschema.NestingSingle, + }, + }, + } +} + +// PrepareConfig implements backend.Backend. +func (b *Cloud) PrepareConfig(obj cty.Value) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + if obj.IsNull() { + return obj, diags + } + + // check if organization is specified in the config. + if val := obj.GetAttr("organization"); val.IsNull() || val.AsString() == "" { + // organization is specified in the config but is invalid, so + // we'll fallback on TF_CLOUD_ORGANIZATION + if val := os.Getenv("TF_CLOUD_ORGANIZATION"); val == "" { + diags = diags.Append(missingConfigAttributeAndEnvVar("organization", "TF_CLOUD_ORGANIZATION")) + } + } + + // Consider preserving the state in the receiver because it's instantiated twice, see b.setConfigurationFields + WorkspaceMapping := newWorkspacesMappingFromFields(obj) + + if diag := reconcileWorkspaceMappingEnvVars(&WorkspaceMapping); diag != nil { + diags = diags.Append(diag) + } + + switch WorkspaceMapping.Strategy() { + // Make sure have a workspace mapping strategy present + case WorkspaceNoneStrategy: + diags = diags.Append(invalidWorkspaceConfigMissingValues) + // Make sure that a workspace name is configured. + case WorkspaceInvalidStrategy: + diags = diags.Append(invalidWorkspaceConfigMisconfiguration) + } + + return obj, diags +} + +func newWorkspacesMappingFromFields(obj cty.Value) WorkspaceMapping { + mapping := WorkspaceMapping{} + + config := obj.GetAttr("workspaces") + if config.IsNull() { + return mapping + } + + workspaceName := config.GetAttr("name") + if !workspaceName.IsNull() { + mapping.Name = workspaceName.AsString() + } + + workspaceTags := config.GetAttr("tags") + if !workspaceTags.IsNull() { + err := gocty.FromCtyValue(workspaceTags, &mapping.Tags) + if err != nil { + log.Panicf("An unexpected error occurred: %s", err) + } + } + + projectName := config.GetAttr("project") + if !projectName.IsNull() && projectName.AsString() != "" { + mapping.Project = projectName.AsString() + } + + return mapping +} + +func (b *Cloud) ServiceDiscoveryAliases() ([]backend.HostAlias, error) { + aliasHostname, err := svchost.ForComparison(genericHostname) + if err != nil { + // This should never happen because the hostname is statically defined. + return nil, fmt.Errorf("failed to create backend alias from alias %q. The hostname is not in the correct format. This is a bug in the backend", genericHostname) + } + + targetHostname, err := svchost.ForComparison(b.hostname) + if err != nil { + // This should never happen because the 'to' alias is the backend host, which has + // already been ev + return nil, fmt.Errorf("failed to create backend alias to target %q. The hostname is not in the correct format.", b.hostname) + } + + return []backend.HostAlias{ + { + From: aliasHostname, + To: targetHostname, + }, + }, nil +} + +// Configure implements backend.Enhanced. +func (b *Cloud) Configure(obj cty.Value) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + if obj.IsNull() { + return diags + } + + diagErr := b.setConfigurationFields(obj) + if diagErr.HasErrors() { + return diagErr + } + + // Discover the service URL to confirm that it provides the cloud backend API + service, err := b.discover() + + // Check for errors before we continue. + if err != nil { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + strings.ToUpper(err.Error()[:1])+err.Error()[1:], + "", // no description is needed here, the error is clear + cty.Path{cty.GetAttrStep{Name: "hostname"}}, + )) + return diags + } + + // First we'll retrieve the token from the configuration + var token string + if val := obj.GetAttr("token"); !val.IsNull() { + token = val.AsString() + } + + // Get the token from the CLI Config File in the credentials section + // if no token was not set in the configuration + if token == "" { + token, err = b.cliConfigToken() + if err != nil { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + strings.ToUpper(err.Error()[:1])+err.Error()[1:], + "", // no description is needed here, the error is clear + cty.Path{cty.GetAttrStep{Name: "hostname"}}, + )) + return diags + } + } + + // Return an error if we still don't have a token at this point. + if token == "" { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Required token could not be found", + fmt.Sprintf( + "Run the following command to generate a token for %s:\n %s", + b.hostname, + fmt.Sprintf("tofu login %s", b.hostname), + ), + )) + return diags + } + + b.token = token + + if b.client == nil { + cfg := &tfe.Config{ + Address: service.String(), + BasePath: service.Path, + Token: token, + Headers: make(http.Header), + RetryLogHook: b.retryLogHook, + } + + // Set the version header to the current version. + cfg.Headers.Set(tfversion.Header, tfversion.Version) + cfg.Headers.Set(headerSourceKey, headerSourceValue) + + // Update user-agent from 'go-tfe' to opentofu + cfg.Headers.Set("User-Agent", httpclient.OpenTofuUserAgent(tfversion.String())) + + // Create the TFC/E API client. + b.client, err = tfe.NewClient(cfg) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to create the cloud backend client", + fmt.Sprintf( + `Encountered an unexpected error while creating the `+ + `cloud backend client: %s.`, err, + ), + )) + return diags + } + } + + // Check if the organization exists by reading its entitlements. + entitlements, err := b.client.Organizations.ReadEntitlements(context.Background(), b.organization) + if err != nil { + if err == tfe.ErrResourceNotFound { + err = fmt.Errorf("organization %q at host %s not found.\n\n"+ + "Please ensure that the organization and hostname are correct "+ + "and that your API token for %s is valid.", + b.organization, b.hostname, b.hostname) + } + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + fmt.Sprintf("Failed to read organization %q at host %s", b.organization, b.hostname), + fmt.Sprintf("Encountered an unexpected error while reading the "+ + "organization settings: %s", err), + cty.Path{cty.GetAttrStep{Name: "organization"}}, + )) + return diags + } + + if ws, ok := os.LookupEnv("TF_WORKSPACE"); ok { + if ws == b.WorkspaceMapping.Name || b.WorkspaceMapping.Strategy() == WorkspaceTagsStrategy { + diag := b.validWorkspaceEnvVar(context.Background(), b.organization, ws) + if diag != nil { + diags = diags.Append(diag) + return diags + } + } + } + + // Check for the minimum version of Terraform Enterprise required. + // + // For API versions prior to 2.3, RemoteAPIVersion will return an empty string, + // so if there's an error when parsing the RemoteAPIVersion, it's handled as + // equivalent to an API version < 2.3. + currentAPIVersion, parseErr := version.NewVersion(b.client.RemoteAPIVersion()) + desiredAPIVersion, _ := version.NewVersion("2.5") + + if parseErr != nil || currentAPIVersion.LessThan(desiredAPIVersion) { + log.Printf("[TRACE] API version check failed; want: >= %s, got: %s", desiredAPIVersion.Original(), currentAPIVersion) + if b.runningInAutomation { + // It should never be possible for this OpenTofu process to be mistakenly + // used internally within an unsupported Terraform Enterprise install - but + // just in case it happens, give an actionable error. + diags = diags.Append( + tfdiags.Sourceless( + tfdiags.Error, + "Unsupported cloud backend version", + cloudIntegrationUsedInUnsupportedTFE, + ), + ) + } else { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unsupported cloud backend version", + `The 'cloud' option is not supported with this version of the cloud backend.`, + ), + ) + } + } + + // Configure a local backend for when we need to run operations locally. + b.local = backendLocal.NewWithBackend(b, b.encryption) + b.forceLocal = b.forceLocal || !entitlements.Operations + + // Enable retries for server errors as the backend is now fully configured. + b.client.RetryServerErrors(true) + + return diags +} + +func (b *Cloud) setConfigurationFields(obj cty.Value) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + // Get the hostname. + b.hostname = os.Getenv("TF_CLOUD_HOSTNAME") + if val := obj.GetAttr("hostname"); !val.IsNull() && val.AsString() != "" { + b.hostname = val.AsString() + } else if b.hostname == "" { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Hostname is required for the cloud backend", + `OpenTofu does not provide a default "hostname" attribute, so it must be set to the hostname of the cloud backend.`, + )) + + return diags + } + + // We can have two options, setting the organization via the config + // or using TF_CLOUD_ORGANIZATION. Since PrepareConfig() validates that one of these + // values must exist, we'll initially set it to the env var and override it if + // specified in the configuration. + b.organization = os.Getenv("TF_CLOUD_ORGANIZATION") + + // Check if the organization is present and valid in the config. + if val := obj.GetAttr("organization"); !val.IsNull() && val.AsString() != "" { + b.organization = val.AsString() + } + + // Initially, set workspaces from the configuration + b.WorkspaceMapping = newWorkspacesMappingFromFields(obj) + + // Overwrite workspaces config from env variable + if diag := reconcileWorkspaceMappingEnvVars(&b.WorkspaceMapping); diag != nil { + return diags.Append(diag) + } + + // Determine if we are forced to use the local backend. + b.forceLocal = os.Getenv("TF_FORCE_LOCAL_BACKEND") != "" + + return diags +} + +func reconcileWorkspaceMappingEnvVars(w *WorkspaceMapping) tfdiags.Diagnostic { + if v := os.Getenv("TF_WORKSPACE"); v != "" { + if w.Name != "" && w.Name != v { + return invalidWorkspaceConfigInconsistentNameAndEnvVar() + } + + // If we don't have workspaces name or tags set in config, we can get the name from the TF_WORKSPACE env var + if w.Strategy() == WorkspaceNoneStrategy { + w.Name = v + } + } + + if v := os.Getenv("TF_CLOUD_PROJECT"); v != "" && w.Project == "" { + w.Project = v + } + + return nil +} + +// discover the TFC/E API service URL and version constraints. +func (b *Cloud) discover() (*url.URL, error) { + hostname, err := svchost.ForComparison(b.hostname) + if err != nil { + return nil, err + } + + host, err := b.services.Discover(hostname) + if err != nil { + var serviceDiscoErr *disco.ErrServiceDiscoveryNetworkRequest + + switch { + case errors.As(err, &serviceDiscoErr): + err = fmt.Errorf("a network issue prevented cloud configuration; %w", err) + return nil, err + default: + return nil, err + } + } + + service, err := host.ServiceURL(tfeServiceID) + // Return the error, unless its a disco.ErrVersionNotSupported error. + if _, ok := err.(*disco.ErrVersionNotSupported); !ok && err != nil { + return nil, err + } + + return service, err +} + +// cliConfigToken returns the token for this host as configured in the credentials +// section of the CLI Config File. If no token was configured, an empty +// string will be returned instead. +func (b *Cloud) cliConfigToken() (string, error) { + hostname, err := svchost.ForComparison(b.hostname) + if err != nil { + return "", err + } + creds, err := b.services.CredentialsForHost(hostname) + if err != nil { + log.Printf("[WARN] Failed to get credentials for %s: %s (ignoring)", b.hostname, err) + return "", nil + } + if creds != nil { + return creds.Token(), nil + } + return "", nil +} + +// retryLogHook is invoked each time a request is retried allowing the +// backend to log any connection issues to prevent data loss. +func (b *Cloud) retryLogHook(attemptNum int, resp *http.Response) { + if b.CLI != nil { + // Ignore the first retry to make sure any delayed output will + // be written to the console before we start logging retries. + // + // The retry logic in the TFE client will retry both rate limited + // requests and server errors, but in the cloud backend we only + // care about server errors so we ignore rate limit (429) errors. + if attemptNum == 0 || (resp != nil && resp.StatusCode == 429) { + // Reset the last retry time. + b.lastRetry = time.Now() + return + } + + if attemptNum == 1 { + b.CLI.Output(b.Colorize().Color(strings.TrimSpace(initialRetryError))) + } else { + b.CLI.Output(b.Colorize().Color(strings.TrimSpace( + fmt.Sprintf(repeatedRetryError, time.Since(b.lastRetry).Round(time.Second))))) + } + } +} + +// Workspaces implements backend.Enhanced, returning a filtered list of workspace names according to +// the workspace mapping strategy configured. +func (b *Cloud) Workspaces() ([]string, error) { + // Create a slice to contain all the names. + var names []string + + // If configured for a single workspace, return that exact name only. The StateMgr for this + // backend will automatically create the remote workspace if it does not yet exist. + if b.WorkspaceMapping.Strategy() == WorkspaceNameStrategy { + names = append(names, b.WorkspaceMapping.Name) + return names, nil + } + + // Otherwise, multiple workspaces are being mapped. Query Terraform Cloud for all the remote + // workspaces by the provided mapping strategy. + options := &tfe.WorkspaceListOptions{} + if b.WorkspaceMapping.Strategy() == WorkspaceTagsStrategy { + taglist := strings.Join(b.WorkspaceMapping.Tags, ",") + options.Tags = taglist + } + + if b.WorkspaceMapping.Project != "" { + listOpts := &tfe.ProjectListOptions{ + Name: b.WorkspaceMapping.Project, + } + projects, err := b.client.Projects.List(context.Background(), b.organization, listOpts) + if err != nil && err != tfe.ErrResourceNotFound { + return nil, fmt.Errorf("failed to retrieve project %s: %w", listOpts.Name, err) + } + for _, p := range projects.Items { + if p.Name == b.WorkspaceMapping.Project { + options.ProjectID = p.ID + break + } + } + } + + for { + wl, err := b.client.Workspaces.List(context.Background(), b.organization, options) + if err != nil { + return nil, err + } + + for _, w := range wl.Items { + names = append(names, w.Name) + } + + // Exit the loop when we've seen all pages. + if wl.CurrentPage >= wl.TotalPages { + break + } + + // Update the page number to get the next page. + options.PageNumber = wl.NextPage + } + + // Sort the result so we have consistent output. + sort.StringSlice(names).Sort() + + return names, nil +} + +// DeleteWorkspace implements backend.Enhanced. +func (b *Cloud) DeleteWorkspace(name string, force bool) error { + if name == backend.DefaultStateName { + return backend.ErrDefaultWorkspaceNotSupported + } + + if b.WorkspaceMapping.Strategy() == WorkspaceNameStrategy { + return backend.ErrWorkspacesNotSupported + } + + workspace, err := b.client.Workspaces.Read(context.Background(), b.organization, name) + if err == tfe.ErrResourceNotFound { + return nil // If the workspace does not exist, succeed + } + + if err != nil { + return fmt.Errorf("failed to retrieve workspace %s: %w", name, err) + } + + // Configure the remote workspace name. + State := &State{tfeClient: b.client, organization: b.organization, workspace: workspace, enableIntermediateSnapshots: false, encryption: b.encryption} + return State.Delete(force) +} + +// StateMgr implements backend.Enhanced. +func (b *Cloud) StateMgr(name string) (statemgr.Full, error) { + var remoteTFVersion string + + if name == backend.DefaultStateName { + return nil, backend.ErrDefaultWorkspaceNotSupported + } + + if b.WorkspaceMapping.Strategy() == WorkspaceNameStrategy && name != b.WorkspaceMapping.Name { + return nil, backend.ErrWorkspacesNotSupported + } + + workspace, err := b.client.Workspaces.Read(context.Background(), b.organization, name) + if err != nil && err != tfe.ErrResourceNotFound { + return nil, fmt.Errorf("Failed to retrieve workspace %s: %w", name, err) + } + if workspace != nil { + remoteTFVersion = workspace.TerraformVersion + } + + var configuredProject *tfe.Project + + // Attempt to find project if configured + if b.WorkspaceMapping.Project != "" { + listOpts := &tfe.ProjectListOptions{ + Name: b.WorkspaceMapping.Project, + } + projects, err := b.client.Projects.List(context.Background(), b.organization, listOpts) + if err != nil && err != tfe.ErrResourceNotFound { + // This is a failure to make an API request, fail to initialize + return nil, fmt.Errorf("Attempted to find configured project %s but was unable to.", b.WorkspaceMapping.Project) + } + for _, p := range projects.Items { + if p.Name == b.WorkspaceMapping.Project { + configuredProject = p + break + } + } + + if configuredProject == nil { + // We were able to read project, but were unable to find the configured project + // This is not fatal as we may attempt to create the project if we need to create + // the workspace + log.Printf("[TRACE] cloud: Attempted to find configured project %s but was unable to.", b.WorkspaceMapping.Project) + } + } + + if err == tfe.ErrResourceNotFound { + // Create workspace if it was not found + + // Workspace Create Options + workspaceCreateOptions := tfe.WorkspaceCreateOptions{ + Name: tfe.String(name), + Tags: b.WorkspaceMapping.tfeTags(), + Project: configuredProject, + } + + // Create project if not exists, otherwise use it + if workspaceCreateOptions.Project == nil && b.WorkspaceMapping.Project != "" { + // If we didn't find the project, try to create it + if workspaceCreateOptions.Project == nil { + createOpts := tfe.ProjectCreateOptions{ + Name: b.WorkspaceMapping.Project, + } + // didn't find project, create it instead + log.Printf("[TRACE] cloud: Creating cloud backend project %s/%s", b.organization, b.WorkspaceMapping.Project) + project, err := b.client.Projects.Create(context.Background(), b.organization, createOpts) + if err != nil && err != tfe.ErrResourceNotFound { + return nil, fmt.Errorf("failed to create project %s: %w", b.WorkspaceMapping.Project, err) + } + configuredProject = project + workspaceCreateOptions.Project = configuredProject + } + } + + // Create a workspace + log.Printf("[TRACE] cloud: Creating cloud backend workspace %s/%s", b.organization, name) + workspace, err = b.client.Workspaces.Create(context.Background(), b.organization, workspaceCreateOptions) + if err != nil { + return nil, fmt.Errorf("error creating workspace %s: %w", name, err) + } + + remoteTFVersion = workspace.TerraformVersion + + // Attempt to set the new workspace to use this version of OpenTofu. This + // can fail if there's no enabled tool_version whose name matches our + // version string, but that's expected sometimes -- just warn and continue. + versionOptions := tfe.WorkspaceUpdateOptions{ + TerraformVersion: tfe.String(tfversion.String()), + } + _, err := b.client.Workspaces.UpdateByID(context.Background(), workspace.ID, versionOptions) + if err == nil { + remoteTFVersion = tfversion.String() + } else { + // TODO: Ideally we could rely on the client to tell us what the actual + // problem was, but we currently can't get enough context from the error + // object to do a nicely formatted message, so we're just assuming the + // issue was that the version wasn't available since that's probably what + // happened. + log.Printf("[TRACE] cloud: Attempted to select version %s for cloud backend workspace; unavailable, so %s will be used instead.", tfversion.String(), workspace.TerraformVersion) + if b.CLI != nil { + versionUnavailable := fmt.Sprintf(unavailableTerraformVersion, tfversion.String(), workspace.TerraformVersion) + b.CLI.Output(b.Colorize().Color(versionUnavailable)) + } + } + } + + if b.workspaceTagsRequireUpdate(workspace, b.WorkspaceMapping) { + options := tfe.WorkspaceAddTagsOptions{ + Tags: b.WorkspaceMapping.tfeTags(), + } + log.Printf("[TRACE] cloud: Adding tags for cloud backend workspace %s/%s", b.organization, name) + err = b.client.Workspaces.AddTags(context.Background(), workspace.ID, options) + if err != nil { + return nil, fmt.Errorf("Error updating workspace %s: %w", name, err) + } + } + + // This is a fallback error check. Most code paths should use other + // mechanisms to check the version, then set the ignoreVersionConflict + // field to true. This check is only in place to ensure that we don't + // accidentally upgrade state with a new code path, and the version check + // logic is coarser and simpler. + if !b.ignoreVersionConflict { + // Explicitly ignore the pseudo-version "latest" here, as it will cause + // plan and apply to always fail. + if remoteTFVersion != tfversion.String() && remoteTFVersion != "latest" { + return nil, fmt.Errorf("Remote workspace TF version %q does not match local OpenTofu version %q", remoteTFVersion, tfversion.String()) + } + } + + return &State{tfeClient: b.client, organization: b.organization, workspace: workspace, enableIntermediateSnapshots: false, encryption: b.encryption}, nil +} + +// Operation implements backend.Enhanced. +func (b *Cloud) Operation(ctx context.Context, op *backend.Operation) (*backend.RunningOperation, error) { + // Retrieve the workspace for this operation. + w, err := b.fetchWorkspace(ctx, b.organization, op.Workspace) + if err != nil { + return nil, err + } + + // Terraform remote version conflicts are not a concern for operations. We + // are in one of three states: + // + // - Running remotely, in which case the local version is irrelevant; + // - Workspace configured for local operations, in which case the remote + // version is meaningless; + // - Forcing local operations, which should only happen in the Terraform Cloud worker, in + // which case the Terraform versions by definition match. + b.IgnoreVersionConflict() + + // Check if we need to use the local backend to run the operation. + if b.forceLocal || isLocalExecutionMode(w.ExecutionMode) { + // Record that we're forced to run operations locally to allow the + // command package UI to operate correctly + b.forceLocal = true + return b.local.Operation(ctx, op) + } + + // Set the remote workspace name. + op.Workspace = w.Name + + // Determine the function to call for our operation + var f func(context.Context, context.Context, *backend.Operation, *tfe.Workspace) (*tfe.Run, error) + switch op.Type { + case backend.OperationTypePlan: + f = b.opPlan + case backend.OperationTypeApply: + f = b.opApply + case backend.OperationTypeRefresh: + // The `tofu refresh` command has been deprecated in favor of `tofu apply -refresh-state`. + // Rather than respond with an error telling the user to run the other command we can just run + // that command instead. We will tell the user what we are doing, and then do it. + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color(strings.TrimSpace(refreshToApplyRefresh) + "\n")) + } + op.PlanMode = plans.RefreshOnlyMode + op.PlanRefresh = true + op.AutoApprove = true + f = b.opApply + default: + return nil, fmt.Errorf( + "\n\nThe cloud backend does not support the %q operation.", op.Type) + } + + // Lock + b.opLock.Lock() + + // Build our running operation + // the runninCtx is only used to block until the operation returns. + runningCtx, done := context.WithCancel(context.Background()) + runningOp := &backend.RunningOperation{ + Context: runningCtx, + PlanEmpty: true, + } + + // stopCtx wraps the context passed in, and is used to signal a graceful Stop. + stopCtx, stop := context.WithCancel(ctx) + runningOp.Stop = stop + + // cancelCtx is used to cancel the operation immediately, usually + // indicating that the process is exiting. + cancelCtx, cancel := context.WithCancel(context.Background()) + runningOp.Cancel = cancel + + // Do it. + go func() { + defer done() + defer stop() + defer cancel() + + defer b.opLock.Unlock() + + r, opErr := f(stopCtx, cancelCtx, op, w) + if opErr != nil && opErr != context.Canceled { + var diags tfdiags.Diagnostics + diags = diags.Append(opErr) + op.ReportResult(runningOp, diags) + return + } + + if r == nil && opErr == context.Canceled { + runningOp.Result = backend.OperationFailure + return + } + + if r != nil { + // Retrieve the run to get its current status. + r, err := b.client.Runs.Read(cancelCtx, r.ID) + if err != nil { + var diags tfdiags.Diagnostics + diags = diags.Append(generalError("Failed to retrieve run", err)) + op.ReportResult(runningOp, diags) + return + } + + // Record if there are any changes. + runningOp.PlanEmpty = !r.HasChanges + + if opErr == context.Canceled { + if err := b.cancel(cancelCtx, op, r); err != nil { + var diags tfdiags.Diagnostics + diags = diags.Append(generalError("Failed to retrieve run", err)) + op.ReportResult(runningOp, diags) + return + } + } + + if r.Status == tfe.RunCanceled || r.Status == tfe.RunErrored { + runningOp.Result = backend.OperationFailure + } + } + }() + + // Return the running operation. + return runningOp, nil +} + +func (b *Cloud) cancel(cancelCtx context.Context, op *backend.Operation, r *tfe.Run) error { + if r.Actions.IsCancelable { + // Only ask if the remote operation should be canceled + // if the auto approve flag is not set. + if !op.AutoApprove { + v, err := op.UIIn.Input(cancelCtx, &tofu.InputOpts{ + Id: "cancel", + Query: "\nDo you want to cancel the remote operation?", + Description: "Only 'yes' will be accepted to cancel.", + }) + if err != nil { + return generalError("Failed asking to cancel", err) + } + if v != "yes" { + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color(strings.TrimSpace(operationNotCanceled))) + } + return nil + } + } else { + if b.CLI != nil { + // Insert a blank line to separate the ouputs. + b.CLI.Output("") + } + } + + // Try to cancel the remote operation. + err := b.client.Runs.Cancel(cancelCtx, r.ID, tfe.RunCancelOptions{}) + if err != nil { + return generalError("Failed to cancel run", err) + } + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color(strings.TrimSpace(operationCanceled))) + } + } + + return nil +} + +// IgnoreVersionConflict allows commands to disable the fall-back check that +// the local OpenTofu version matches the remote workspace's configured +// OpenTofu version. This should be called by commands where this check is +// unnecessary, such as those performing remote operations, or read-only +// operations. It will also be called if the user uses a command-line flag to +// override this check. +func (b *Cloud) IgnoreVersionConflict() { + b.ignoreVersionConflict = true +} + +// VerifyWorkspaceTerraformVersion compares the local OpenTofu version against +// the workspace's configured OpenTofu version. If they are compatible, this +// means that there are no state compatibility concerns, so it returns no +// diagnostics. +// +// If the versions aren't compatible, it returns an error (or, if +// b.ignoreVersionConflict is set, a warning). +func (b *Cloud) VerifyWorkspaceTerraformVersion(workspaceName string) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + workspace, err := b.getRemoteWorkspace(context.Background(), workspaceName) + if err != nil { + // If the workspace doesn't exist, there can be no compatibility + // problem, so we can return. This is most likely to happen when + // migrating state from a local backend to a new workspace. + if err == tfe.ErrResourceNotFound { + return nil + } + + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Error looking up workspace", + fmt.Sprintf("Workspace read failed: %s", err), + )) + return diags + } + + // If the workspace has the pseudo-version "latest", all bets are off. We + // cannot reasonably determine what the intended OpenTofu version is, so + // we'll skip version verification. + if workspace.TerraformVersion == "latest" { + return nil + } + + // If the workspace has execution-mode set to local, the remote OpenTofu + // version is effectively meaningless, so we'll skip version verification. + if isLocalExecutionMode(workspace.ExecutionMode) { + return nil + } + + remoteConstraint, err := version.NewConstraint(workspace.TerraformVersion) + if err != nil { + message := fmt.Sprintf( + "The remote workspace specified an invalid TF version or constraint (%s), "+ + "and it isn't possible to determine whether the local OpenTofu version (%s) is compatible.", + workspace.TerraformVersion, + tfversion.String(), + ) + diags = diags.Append(incompatibleWorkspaceTerraformVersion(message, b.ignoreVersionConflict)) + return diags + } + + remoteVersion, _ := version.NewSemver(workspace.TerraformVersion) + + // We can use a looser version constraint if the workspace specifies a + // literal Terraform version, and it is not a prerelease. The latter + // restriction is because we cannot compare prerelease versions with any + // operator other than simple equality. + if remoteVersion != nil && remoteVersion.Prerelease() == "" { + v014 := version.Must(version.NewSemver("0.14.0")) + v130 := version.Must(version.NewSemver("1.3.0")) + + // Versions from 0.14 through the early 1.x series should be compatible + // (though we don't know about 1.3 yet). + if remoteVersion.GreaterThanOrEqual(v014) && remoteVersion.LessThan(v130) { + early1xCompatible, err := version.NewConstraint(fmt.Sprintf(">= 0.14.0, < %s", v130.String())) + if err != nil { + panic(err) + } + remoteConstraint = early1xCompatible + } + + // Any future new state format will require at least a minor version + // increment, so x.y.* will always be compatible with each other. + if remoteVersion.GreaterThanOrEqual(v130) { + rwvs := remoteVersion.Segments64() + if len(rwvs) >= 3 { + // ~> x.y.0 + minorVersionCompatible, err := version.NewConstraint(fmt.Sprintf("~> %d.%d.0", rwvs[0], rwvs[1])) + if err != nil { + panic(err) + } + remoteConstraint = minorVersionCompatible + } + } + } + + // Re-parsing tfversion.String because tfversion.SemVer omits the prerelease + // prefix, and we want to allow constraints like `~> 1.2.0-beta1`. + fullTfversion := version.Must(version.NewSemver(tfversion.String())) + + if remoteConstraint.Check(fullTfversion) { + return diags + } + + message := fmt.Sprintf( + "The local OpenTofu version (%s) does not meet the version requirements for remote workspace %s/%s (%s).", + tfversion.String(), + b.organization, + workspace.Name, + remoteConstraint, + ) + diags = diags.Append(incompatibleWorkspaceTerraformVersion(message, b.ignoreVersionConflict)) + return diags +} + +func (b *Cloud) IsLocalOperations() bool { + return b.forceLocal +} + +// Colorize returns the Colorize structure that can be used for colorizing +// output. This is guaranteed to always return a non-nil value and so useful +// as a helper to wrap any potentially colored strings. +// +// TODO SvH: Rename this back to Colorize as soon as we can pass -no-color. +// +//lint:ignore U1000 see above todo +func (b *Cloud) cliColorize() *colorstring.Colorize { + if b.CLIColor != nil { + return b.CLIColor + } + + return &colorstring.Colorize{ + Colors: colorstring.DefaultColors, + Disable: true, + } +} + +func (b *Cloud) workspaceTagsRequireUpdate(workspace *tfe.Workspace, workspaceMapping WorkspaceMapping) bool { + if workspaceMapping.Strategy() != WorkspaceTagsStrategy { + return false + } + + existingTags := map[string]struct{}{} + for _, t := range workspace.TagNames { + existingTags[t] = struct{}{} + } + + for _, tag := range workspaceMapping.Tags { + if _, ok := existingTags[tag]; !ok { + return true + } + } + + return false +} + +type WorkspaceMapping struct { + Name string + Project string + Tags []string +} + +type workspaceStrategy string + +const ( + WorkspaceTagsStrategy workspaceStrategy = "tags" + WorkspaceNameStrategy workspaceStrategy = "name" + WorkspaceNoneStrategy workspaceStrategy = "none" + WorkspaceInvalidStrategy workspaceStrategy = "invalid" +) + +func (wm WorkspaceMapping) Strategy() workspaceStrategy { + switch { + case len(wm.Tags) > 0 && wm.Name == "": + return WorkspaceTagsStrategy + case len(wm.Tags) == 0 && wm.Name != "": + return WorkspaceNameStrategy + case len(wm.Tags) == 0 && wm.Name == "": + return WorkspaceNoneStrategy + default: + // Any other combination is invalid as each strategy is mutually exclusive + return WorkspaceInvalidStrategy + } +} + +func isLocalExecutionMode(execMode string) bool { + return execMode == "local" +} + +func (b *Cloud) fetchWorkspace(ctx context.Context, organization string, workspace string) (*tfe.Workspace, error) { + // Retrieve the workspace for this operation. + w, err := b.client.Workspaces.Read(ctx, organization, workspace) + if err != nil { + switch err { + case context.Canceled: + return nil, err + case tfe.ErrResourceNotFound: + return nil, fmt.Errorf( + "workspace %s not found\n\n"+ + "For security, cloud backends return '404 Not Found' responses for resources\n"+ + "for resources that a user doesn't have access to, in addition to resources that\n"+ + "do not exist. If the resource does exist, please check the permissions of the provided token.", + workspace, + ) + default: + err := fmt.Errorf( + "Cloud backend returned an unexpected error:\n\n%w", + err, + ) + return nil, err + } + } + + return w, nil +} + +// validWorkspaceEnvVar ensures we have selected a valid workspace using TF_WORKSPACE: +// First, it ensures the workspace specified by TF_WORKSPACE exists in the organization +// Second, if tags are specified in the configuration, it ensures TF_WORKSPACE belongs to the set +// of available workspaces with those given tags. +func (b *Cloud) validWorkspaceEnvVar(ctx context.Context, organization, workspace string) tfdiags.Diagnostic { + // first ensure the workspace exists + _, err := b.client.Workspaces.Read(ctx, organization, workspace) + if err != nil && err != tfe.ErrResourceNotFound { + return tfdiags.Sourceless( + tfdiags.Error, + "Cloud backend returned an unexpected error", + err.Error(), + ) + } + + if err == tfe.ErrResourceNotFound { + return tfdiags.Sourceless( + tfdiags.Error, + "Invalid workspace selection", + fmt.Sprintf(`OpenTofu failed to find workspace %q in organization %s.`, workspace, organization), + ) + } + + // if the configuration has specified tags, we need to ensure TF_WORKSPACE + // is a valid member + if b.WorkspaceMapping.Strategy() == WorkspaceTagsStrategy { + opts := &tfe.WorkspaceListOptions{} + opts.Tags = strings.Join(b.WorkspaceMapping.Tags, ",") + + for { + wl, err := b.client.Workspaces.List(ctx, b.organization, opts) + if err != nil { + return tfdiags.Sourceless( + tfdiags.Error, + "Cloud backend returned an unexpected error", + err.Error(), + ) + } + + for _, ws := range wl.Items { + if ws.Name == workspace { + return nil + } + } + + if wl.CurrentPage >= wl.TotalPages { + break + } + + opts.PageNumber = wl.NextPage + } + + return tfdiags.Sourceless( + tfdiags.Error, + "Invalid workspace selection", + fmt.Sprintf( + "OpenTofu failed to find workspace %q with the tags specified in your configuration:\n[%s]", + workspace, + strings.ReplaceAll(opts.Tags, ",", ", "), + ), + ) + } + + return nil +} + +func (wm WorkspaceMapping) tfeTags() []*tfe.Tag { + var tags []*tfe.Tag + + if wm.Strategy() != WorkspaceTagsStrategy { + return tags + } + + for _, tag := range wm.Tags { + t := tfe.Tag{Name: tag} + tags = append(tags, &t) + } + + return tags +} + +func generalError(msg string, err error) error { + var diags tfdiags.Diagnostics + + if urlErr, ok := err.(*url.Error); ok { + err = urlErr.Err + } + + switch err { + case context.Canceled: + return err + case tfe.ErrResourceNotFound: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + fmt.Sprintf("%s: %v", msg, err), + "For security, cloud backends returns '404 Not Found' responses for resources\n"+ + "for resources that a user doesn't have access to, in addition to resources that\n"+ + "do not exist. If the resource does exist, please check the permissions of the provided token.", + )) + return diags.Err() + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + fmt.Sprintf("%s: %v", msg, err), + `Cloud backend returned an unexpected error. Sometimes `+ + `this is caused by network connection problems, in which case you could retry `+ + `the command. If the issue persists please open a support ticket to get help `+ + `resolving the problem.`, + )) + return diags.Err() + } +} + +// The newline in this error is to make it look good in the CLI! +const initialRetryError = ` +[reset][yellow]There was an error connecting to the cloud backend. Please do not exit +OpenTofu to prevent data loss! Trying to restore the connection... +[reset] +` + +const repeatedRetryError = ` +[reset][yellow]Still trying to restore the connection... (%s elapsed)[reset] +` + +const operationCanceled = ` +[reset][red]The remote operation was successfully cancelled.[reset] +` + +const operationNotCanceled = ` +[reset][red]The remote operation was not cancelled.[reset] +` + +const refreshToApplyRefresh = `[bold][yellow]Proceeding with 'tofu apply -refresh-only -auto-approve'.[reset]` + +const unavailableTerraformVersion = ` +[reset][yellow]The local OpenTofu version (%s) is not available in the cloud backend, or your +organization does not have access to it. The new workspace will use %s. You can +change this later in the workspace settings.[reset]` + +const cloudIntegrationUsedInUnsupportedTFE = ` +This version of cloud backend does not support the state mechanism +attempting to be used by the platform. This should never happen. + +Please reach out to OpenTofu Support to resolve this issue.` + +var ( + workspaceConfigurationHelp = fmt.Sprintf( + `The 'workspaces' block configures how OpenTofu CLI maps its workspaces for this single +configuration to workspaces within a cloud backend organization. Two strategies are available: + +[bold]tags[reset] - %s + +[bold]name[reset] - %s`, schemaDescriptionTags, schemaDescriptionName) + + schemaDescriptionHostname = `The cloud backend hostname to connect to.` + + schemaDescriptionOrganization = `The name of the organization containing the targeted workspace(s).` + + schemaDescriptionToken = `The token used to authenticate with the cloud backend. Typically this argument should not +be set, and 'tofu login' used instead; your credentials will then be fetched from your CLI +configuration file or configured credential helper.` + + schemaDescriptionTags = `A set of tags used to select remote cloud backend workspaces to be used for this single +configuration. New workspaces will automatically be tagged with these tag values. Generally, this +is the primary and recommended strategy to use. This option conflicts with "name".` + + schemaDescriptionName = `The name of a single cloud backend workspace to be used with this configuration. +When configured, only the specified workspace can be used. This option conflicts with "tags".` + + schemaDescriptionProject = `The name of a project that resulting workspace(s) will be created in.` +) diff --git a/pkg/cloud/backend_apply.go b/pkg/cloud/backend_apply.go new file mode 100644 index 00000000000..5e43d4a5928 --- /dev/null +++ b/pkg/cloud/backend_apply.go @@ -0,0 +1,336 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cloud + +import ( + "bufio" + "context" + "encoding/json" + "fmt" + "io" + "log" + "strings" + + tfe "github.com/hashicorp/go-tfe" + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/command/jsonformat" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" +) + +func (b *Cloud) opApply(stopCtx, cancelCtx context.Context, op *backend.Operation, w *tfe.Workspace) (*tfe.Run, error) { + log.Printf("[INFO] cloud: starting Apply operation") + + var diags tfdiags.Diagnostics + + // We should remove the `CanUpdate` part of this test, but for now + // (to remain compatible with tfe.v2.1) we'll leave it in here. + if !w.Permissions.CanUpdate && !w.Permissions.CanQueueApply { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Insufficient rights to apply changes", + "The provided credentials have insufficient rights to apply changes. In order "+ + "to apply changes at least write permissions on the workspace are required.", + )) + return nil, diags.Err() + } + + if w.VCSRepo != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Apply not allowed for workspaces with a VCS connection", + "A workspace that is connected to a VCS requires the VCS-driven workflow "+ + "to ensure that the VCS remains the single source of truth.", + )) + return nil, diags.Err() + } + + if b.ContextOpts != nil && b.ContextOpts.Parallelism != defaultParallelism { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Custom parallelism values are currently not supported", + `Cloud backend does not support setting a custom parallelism `+ + `value at this time.`, + )) + } + + if op.PlanFile.IsLocal() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Applying a saved local plan is not supported", + `Cloud backend can apply a saved cloud plan, or create a new plan when `+ + `configuration is present. It cannot apply a saved local plan.`, + )) + } + + if !op.HasConfig() && op.PlanMode != plans.DestroyMode { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "No configuration files found", + `Apply requires configuration to be present. Applying without a configuration `+ + `would mark everything for destruction, which is normally not what is desired. `+ + `If you would like to destroy everything, please run 'tofu destroy' which `+ + `does not require any configuration files.`, + )) + } + + // Return if there are any errors. + if diags.HasErrors() { + return nil, diags.Err() + } + + var r *tfe.Run + var err error + + if cp, ok := op.PlanFile.Cloud(); ok { + log.Printf("[TRACE] Loading saved cloud plan for apply") + // Check hostname first, for a more actionable error than a generic 404 later + if cp.Hostname != b.hostname { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Saved plan is for a different hostname", + fmt.Sprintf("The given saved plan refers to a run on %s, but the currently configured cloud backend instance is %s.", cp.Hostname, b.hostname), + )) + return r, diags.Err() + } + // Fetch the run referenced in the saved plan bookmark. + r, err = b.client.Runs.ReadWithOptions(stopCtx, cp.RunID, &tfe.RunReadOptions{ + Include: []tfe.RunIncludeOpt{tfe.RunWorkspace}, + }) + + if err != nil { + return r, err + } + + if r.Workspace.ID != w.ID { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Saved plan is for a different workspace", + fmt.Sprintf("The given saved plan does not refer to a run in the current workspace (%s/%s), so it cannot currently be applied. For more details, view this run in a browser at:\n%s", w.Organization.Name, w.Name, runURL(b.hostname, r.Workspace.Organization.Name, r.Workspace.Name, r.ID)), + )) + return r, diags.Err() + } + + if !r.Actions.IsConfirmable { + url := runURL(b.hostname, b.organization, op.Workspace, r.ID) + return r, unusableSavedPlanError(r.Status, url) + } + + // Since we're not calling plan(), we need to print a run header ourselves: + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color(strings.TrimSpace(applySavedHeader) + "\n")) + b.CLI.Output(b.Colorize().Color(strings.TrimSpace(fmt.Sprintf( + runHeader, b.hostname, b.organization, r.Workspace.Name, r.ID)) + "\n")) + } + } else { + log.Printf("[TRACE] Running new cloud plan for apply") + // Run the plan phase. + r, err = b.plan(stopCtx, cancelCtx, op, w) + + if err != nil { + return r, err + } + + // This check is also performed in the plan method to determine if + // the policies should be checked, but we need to check the values + // here again to determine if we are done and should return. + if !r.HasChanges || r.Status == tfe.RunCanceled || r.Status == tfe.RunErrored { + return r, nil + } + + // Retrieve the run to get its current status. + r, err = b.client.Runs.Read(stopCtx, r.ID) + if err != nil { + return r, generalError("Failed to retrieve run", err) + } + + // Return if the run cannot be confirmed. + if !op.AutoApprove && !r.Actions.IsConfirmable { + return r, nil + } + + mustConfirm := (op.UIIn != nil && op.UIOut != nil) && !op.AutoApprove + + if mustConfirm && b.input { + opts := &tofu.InputOpts{Id: "approve"} + + if op.PlanMode == plans.DestroyMode { + opts.Query = "\nDo you really want to destroy all resources in workspace \"" + op.Workspace + "\"?" + opts.Description = "OpenTofu will destroy all your managed infrastructure, as shown above.\n" + + "There is no undo. Only 'yes' will be accepted to confirm." + } else { + opts.Query = "\nDo you want to perform these actions in workspace \"" + op.Workspace + "\"?" + opts.Description = "OpenTofu will perform the actions described above.\n" + + "Only 'yes' will be accepted to approve." + } + + err = b.confirm(stopCtx, op, opts, r, "yes") + if err != nil && err != errRunApproved { + return r, err + } + } else if mustConfirm && !b.input { + return r, errApplyNeedsUIConfirmation + } else { + // If we don't need to ask for confirmation, insert a blank + // line to separate the ouputs. + if b.CLI != nil { + b.CLI.Output("") + } + } + } + + // Do the apply! + if !op.AutoApprove && err != errRunApproved { + if err = b.client.Runs.Apply(stopCtx, r.ID, tfe.RunApplyOptions{}); err != nil { + return r, generalError("Failed to approve the apply command", err) + } + } + + // Retrieve the run to get task stages. + // Task Stages are calculated upfront so we only need to call this once for the run. + taskStages, err := b.runTaskStages(stopCtx, b.client, r.ID) + if err != nil { + return r, err + } + + if stage, ok := taskStages[tfe.PreApply]; ok { + if err := b.waitTaskStage(stopCtx, cancelCtx, op, r, stage.ID, "Pre-apply Tasks"); err != nil { + return r, err + } + } + + r, err = b.waitForRun(stopCtx, cancelCtx, op, "apply", r, w) + if err != nil { + return r, err + } + + err = b.renderApplyLogs(stopCtx, r) + if err != nil { + return r, err + } + + return r, nil +} + +func (b *Cloud) renderApplyLogs(ctx context.Context, run *tfe.Run) error { + logs, err := b.client.Applies.Logs(ctx, run.Apply.ID) + if err != nil { + return err + } + + if b.CLI != nil { + reader := bufio.NewReaderSize(logs, 64*1024) + skip := 0 + + for next := true; next; { + var l, line []byte + var err error + + for isPrefix := true; isPrefix; { + l, isPrefix, err = reader.ReadLine() + if err != nil { + if err != io.EOF { + return generalError("Failed to read logs", err) + } + next = false + } + + line = append(line, l...) + } + + // Apply logs show the same Terraform info logs as shown in the plan logs + // (which contain version and os/arch information), we therefore skip to prevent duplicate output. + if skip < 3 { + skip++ + continue + } + + if next || len(line) > 0 { + log := &jsonformat.JSONLog{} + if err := json.Unmarshal(line, log); err != nil { + // If we can not parse the line as JSON, we will simply + // print the line. This maintains backwards compatibility for + // users who do not wish to enable structured output in their + // workspace. + b.CLI.Output(string(line)) + continue + } + + if b.renderer != nil { + // Otherwise, we will print the log + err := b.renderer.RenderLog(log) + if err != nil { + return err + } + } + } + } + } + + return nil +} + +func runURL(hostname, orgName, wsName, runID string) string { + return fmt.Sprintf("https://%s/app/%s/%s/runs/%s", hostname, orgName, wsName, runID) +} + +func unusableSavedPlanError(status tfe.RunStatus, url string) error { + var diags tfdiags.Diagnostics + var summary, reason string + + switch status { + case tfe.RunApplied: + summary = "Saved plan is already applied" + reason = "The given plan file was already successfully applied, and cannot be applied again." + case tfe.RunApplying, tfe.RunApplyQueued, tfe.RunConfirmed: + summary = "Saved plan is already confirmed" + reason = "The given plan file is already being applied, and cannot be applied again." + case tfe.RunCanceled: + summary = "Saved plan is canceled" + reason = "The given plan file can no longer be applied because the run was canceled via the cloud backend UI or API." + case tfe.RunDiscarded: + summary = "Saved plan is discarded" + reason = "The given plan file can no longer be applied; either another run was applied first, or a user discarded it via the cloud backend UI or API." + case tfe.RunErrored: + summary = "Saved plan is errored" + reason = "The given plan file refers to a plan that had errors and did not complete successfully. It cannot be applied." + case tfe.RunPlannedAndFinished: + // Note: planned and finished can also indicate a plan-only run, but + // tofu plan can't create a saved plan for a plan-only run, so we + // know it's no-changes in this case. + summary = "Saved plan has no changes" + reason = "The given plan file contains no changes, so it cannot be applied." + case tfe.RunPolicyOverride: + summary = "Saved plan requires policy override" + reason = "The given plan file has soft policy failures, and cannot be applied until a user with appropriate permissions overrides the policy check." + default: + summary = "Saved plan cannot be applied" + reason = "Cloud backend cannot apply the given plan file. This may mean the plan and checks have not yet completed, or may indicate another problem." + } + + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + summary, + fmt.Sprintf("%s For more details, view this run in a browser at:\n%s", reason, url), + )) + return diags.Err() +} + +const applyDefaultHeader = ` +[reset][yellow]Running apply in cloud backend. Output will stream here. Pressing Ctrl-C +will cancel the remote apply if it's still pending. If the apply started it +will stop streaming the logs, but will not stop the apply running remotely.[reset] + +Preparing the remote apply... +` + +const applySavedHeader = ` +[reset][yellow]Running apply in cloud backend. Output will stream here. Pressing Ctrl-C +will stop streaming the logs, but will not stop the apply running remotely.[reset] + +Preparing the remote apply... +` diff --git a/pkg/cloud/backend_apply_test.go b/pkg/cloud/backend_apply_test.go new file mode 100644 index 00000000000..a6c6f8c4025 --- /dev/null +++ b/pkg/cloud/backend_apply_test.go @@ -0,0 +1,1972 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cloud + +import ( + "context" + "fmt" + "os" + "os/signal" + "strings" + "syscall" + "testing" + "time" + + gomock "github.com/golang/mock/gomock" + "github.com/google/go-cmp/cmp" + tfe "github.com/hashicorp/go-tfe" + mocks "github.com/hashicorp/go-tfe/mocks" + version "github.com/hashicorp/go-version" + "github.com/mitchellh/cli" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/cloud/cloudplan" + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/command/clistate" + "github.com/kubegems/opentofu/pkg/command/jsonformat" + "github.com/kubegems/opentofu/pkg/command/views" + "github.com/kubegems/opentofu/pkg/depsfile" + "github.com/kubegems/opentofu/pkg/initwd" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/plans/planfile" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "github.com/kubegems/opentofu/pkg/terminal" + "github.com/kubegems/opentofu/pkg/tofu" + tfversion "github.com/kubegems/opentofu/version" +) + +func testOperationApply(t *testing.T, configDir string) (*backend.Operation, func(), func(*testing.T) *terminal.TestOutput) { + t.Helper() + + return testOperationApplyWithTimeout(t, configDir, 0) +} + +func testOperationApplyWithTimeout(t *testing.T, configDir string, timeout time.Duration) (*backend.Operation, func(), func(*testing.T) *terminal.TestOutput) { + t.Helper() + + _, configLoader, configCleanup := initwd.MustLoadConfigForTests(t, configDir, "tests") + + streams, done := terminal.StreamsForTesting(t) + view := views.NewView(streams) + stateLockerView := views.NewStateLocker(arguments.ViewHuman, view) + operationView := views.NewOperation(arguments.ViewHuman, false, view) + + // Many of our tests use an overridden "null" provider that's just in-memory + // inside the test process, not a separate plugin on disk. + depLocks := depsfile.NewLocks() + depLocks.SetProviderOverridden(addrs.MustParseProviderSourceString("registry.opentofu.org/hashicorp/null")) + + return &backend.Operation{ + ConfigDir: configDir, + ConfigLoader: configLoader, + PlanRefresh: true, + StateLocker: clistate.NewLocker(timeout, stateLockerView), + Type: backend.OperationTypeApply, + View: operationView, + DependencyLocks: depLocks, + }, configCleanup, done +} + +func TestCloud_applyBasic(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in cloud backend") { + t.Fatalf("expected TFC header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summery in output: %s", output) + } + if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("expected apply summery in output: %s", output) + } + + stateMgr, _ := b.StateMgr(testBackendSingleWorkspaceName) + // An error suggests that the state was not unlocked after apply + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { + t.Fatalf("unexpected error locking state after apply: %s", err.Error()) + } +} + +func TestCloud_applyJSONBasic(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + stream, close := terminal.StreamsForTesting(t) + + b.renderer = &jsonformat.Renderer{ + Streams: stream, + Colorize: mockColorize(), + } + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-json") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = testBackendSingleWorkspaceName + + mockSROWorkspace(t, b, op.Workspace) + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } + + outp := close(t) + gotOut := outp.Stdout() + + if !strings.Contains(gotOut, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", gotOut) + } + if !strings.Contains(gotOut, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("expected apply summary in output: %s", gotOut) + } + + stateMgr, _ := b.StateMgr(testBackendSingleWorkspaceName) + // An error suggests that the state was not unlocked after apply + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { + t.Fatalf("unexpected error locking state after apply: %s", err.Error()) + } +} + +func TestCloud_applyJSONWithOutputs(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + stream, close := terminal.StreamsForTesting(t) + + b.renderer = &jsonformat.Renderer{ + Streams: stream, + Colorize: mockColorize(), + } + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-json-with-outputs") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = testBackendSingleWorkspaceName + + mockSROWorkspace(t, b, op.Workspace) + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } + + outp := close(t) + gotOut := outp.Stdout() + expectedSimpleOutput := `simple = [ + "some", + "list", + ]` + expectedSensitiveOutput := `secret = (sensitive value)` + expectedComplexOutput := `complex = { + keyA = { + someList = [ + 1, + 2, + 3, + ] + } + keyB = { + someBool = true + someStr = "hello" + } + }` + + if !strings.Contains(gotOut, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", gotOut) + } + if !strings.Contains(gotOut, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("expected apply summary in output: %s", gotOut) + } + if !strings.Contains(gotOut, "Outputs:") { + t.Fatalf("expected output header: %s", gotOut) + } + if !strings.Contains(gotOut, expectedSimpleOutput) { + t.Fatalf("expected output: %s, got: %s", expectedSimpleOutput, gotOut) + } + if !strings.Contains(gotOut, expectedSensitiveOutput) { + t.Fatalf("expected output: %s, got: %s", expectedSensitiveOutput, gotOut) + } + if !strings.Contains(gotOut, expectedComplexOutput) { + t.Fatalf("expected output: %s, got: %s", expectedComplexOutput, gotOut) + } + stateMgr, _ := b.StateMgr(testBackendSingleWorkspaceName) + // An error suggests that the state was not unlocked after apply + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { + t.Fatalf("unexpected error locking state after apply: %s", err.Error()) + } +} + +func TestCloud_applyCanceled(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + // Stop the run to simulate a Ctrl-C. + run.Stop() + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + + stateMgr, _ := b.StateMgr(testBackendSingleWorkspaceName) + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { + t.Fatalf("unexpected error locking state after cancelling apply: %s", err.Error()) + } +} + +func TestCloud_applyWithoutPermissions(t *testing.T) { + b, bCleanup := testBackendWithTags(t) + defer bCleanup() + + // Create a named workspace without permissions. + w, err := b.client.Workspaces.Create( + context.Background(), + b.organization, + tfe.WorkspaceCreateOptions{ + Name: tfe.String("prod"), + }, + ) + if err != nil { + t.Fatalf("error creating named workspace: %v", err) + } + w.Permissions.CanQueueApply = false + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + + op.UIOut = b.CLI + op.Workspace = "prod" + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "Insufficient rights to apply changes") { + t.Fatalf("expected a permissions error, got: %v", errOutput) + } +} + +func TestCloud_applyWithVCS(t *testing.T) { + b, bCleanup := testBackendWithTags(t) + defer bCleanup() + + // Create a named workspace with a VCS. + _, err := b.client.Workspaces.Create( + context.Background(), + b.organization, + tfe.WorkspaceCreateOptions{ + Name: tfe.String("prod"), + VCSRepo: &tfe.VCSRepoOptions{}, + }, + ) + if err != nil { + t.Fatalf("error creating named workspace: %v", err) + } + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + + op.Workspace = "prod" + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "not allowed for workspaces with a VCS") { + t.Fatalf("expected a VCS error, got: %v", errOutput) + } +} + +func TestCloud_applyWithParallelism(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + + if b.ContextOpts == nil { + b.ContextOpts = &tofu.ContextOpts{} + } + b.ContextOpts.Parallelism = 3 + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "parallelism values are currently not supported") { + t.Fatalf("expected a parallelism error, got: %v", errOutput) + } +} + +// Apply with local plan file should fail. +func TestCloud_applyWithLocalPlan(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + + op.PlanFile = planfile.NewWrappedLocal(&planfile.Reader{}) + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "saved local plan is not supported") { + t.Fatalf("expected a saved plan error, got: %v", errOutput) + } +} + +// Apply with bookmark to an existing cloud plan that's in a confirmable state +// should work. +func TestCloud_applyWithCloudPlan(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-json") + defer configCleanup() + defer done(t) + + op.UIOut = b.CLI + op.Workspace = testBackendSingleWorkspaceName + + mockSROWorkspace(t, b, op.Workspace) + + // Perform the plan before trying to apply it + ws, err := b.client.Workspaces.Read(context.Background(), b.organization, b.WorkspaceMapping.Name) + if err != nil { + t.Fatalf("Couldn't read workspace: %s", err) + } + + planRun, err := b.plan(context.Background(), context.Background(), op, ws) + if err != nil { + t.Fatalf("Couldn't perform plan: %s", err) + } + + // Synthesize a cloud plan file with the plan's run ID + pf := &cloudplan.SavedPlanBookmark{ + RemotePlanFormat: 1, + RunID: planRun.ID, + Hostname: b.hostname, + } + op.PlanFile = planfile.NewWrappedCloud(pf) + + // Start spying on the apply output (now that the plan's done) + stream, close := terminal.StreamsForTesting(t) + + b.renderer = &jsonformat.Renderer{ + Streams: stream, + Colorize: mockColorize(), + } + + // Try apply + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := close(t) + if run.Result != backend.OperationSuccess { + t.Fatal("expected apply operation to succeed") + } + if run.PlanEmpty { + t.Fatalf("expected plan to not be empty") + } + + gotOut := output.Stdout() + if !strings.Contains(gotOut, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("expected apply summary in output: %s", gotOut) + } + + stateMgr, _ := b.StateMgr(testBackendSingleWorkspaceName) + // An error suggests that the state was not unlocked after apply + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { + t.Fatalf("unexpected error locking state after apply: %s", err.Error()) + } +} + +func TestCloud_applyWithoutRefresh(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + op.PlanRefresh = false + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected plan to be non-empty") + } + + // We should find a run inside the mock client that has refresh set + // to false. + runsAPI := b.client.Runs.(*MockRuns) + if got, want := len(runsAPI.Runs), 1; got != want { + t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) + } + for _, run := range runsAPI.Runs { + if diff := cmp.Diff(false, run.Refresh); diff != "" { + t.Errorf("wrong Refresh setting in the created run\n%s", diff) + } + } +} + +func TestCloud_applyWithRefreshOnly(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + op.PlanMode = plans.RefreshOnlyMode + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected plan to be non-empty") + } + + // We should find a run inside the mock client that has refresh-only set + // to true. + runsAPI := b.client.Runs.(*MockRuns) + if got, want := len(runsAPI.Runs), 1; got != want { + t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) + } + for _, run := range runsAPI.Runs { + if diff := cmp.Diff(true, run.RefreshOnly); diff != "" { + t.Errorf("wrong RefreshOnly setting in the created run\n%s", diff) + } + } +} + +func TestCloud_applyWithTarget(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + addr, _ := addrs.ParseAbsResourceStr("null_resource.foo") + + op.Targets = []addrs.Targetable{addr} + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatal("expected apply operation to succeed") + } + if run.PlanEmpty { + t.Fatalf("expected plan to be non-empty") + } + + // We should find a run inside the mock client that has the same + // target address we requested above. + runsAPI := b.client.Runs.(*MockRuns) + if got, want := len(runsAPI.Runs), 1; got != want { + t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) + } + for _, run := range runsAPI.Runs { + if diff := cmp.Diff([]string{"null_resource.foo"}, run.TargetAddrs); diff != "" { + t.Errorf("wrong TargetAddrs in the created run\n%s", diff) + } + } +} + +func TestCloud_applyWithReplace(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + addr, _ := addrs.ParseAbsResourceInstanceStr("null_resource.foo") + + op.ForceReplace = []addrs.AbsResourceInstance{addr} + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatal("expected plan operation to succeed") + } + if run.PlanEmpty { + t.Fatalf("expected plan to be non-empty") + } + + // We should find a run inside the mock client that has the same + // refresh address we requested above. + runsAPI := b.client.Runs.(*MockRuns) + if got, want := len(runsAPI.Runs), 1; got != want { + t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) + } + for _, run := range runsAPI.Runs { + if diff := cmp.Diff([]string{"null_resource.foo"}, run.ReplaceAddrs); diff != "" { + t.Errorf("wrong ReplaceAddrs in the created run\n%s", diff) + } + } +} + +func TestCloud_applyWithRequiredVariables(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-variables") + defer configCleanup() + defer done(t) + + op.Variables = testVariables(tofu.ValueFromNamedFile, "foo") // "bar" variable value missing + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + // The usual error of a required variable being missing is deferred and the operation + // is successful + if run.Result != backend.OperationSuccess { + t.Fatal("expected plan operation to succeed") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in cloud backend") { + t.Fatalf("unexpected TFC header in output: %s", output) + } +} + +func TestCloud_applyNoConfig(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/empty") + defer configCleanup() + + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "configuration files found") { + t.Fatalf("expected configuration files error, got: %v", errOutput) + } + + stateMgr, _ := b.StateMgr(testBackendSingleWorkspaceName) + // An error suggests that the state was not unlocked after apply + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { + t.Fatalf("unexpected error locking state after failed apply: %s", err.Error()) + } +} + +func TestCloud_applyNoChanges(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-no-changes") + defer configCleanup() + defer done(t) + + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "No changes. Infrastructure is up-to-date.") { + t.Fatalf("expected no changes in plan summery: %s", output) + } + if !strings.Contains(output, "Sentinel Result: true") { + t.Fatalf("expected policy check result in output: %s", output) + } +} + +func TestCloud_applyNoApprove(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + + input := testInput(t, map[string]string{ + "approve": "no", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "Apply discarded") { + t.Fatalf("expected an apply discarded error, got: %v", errOutput) + } +} + +func TestCloud_applyAutoApprove(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + ctrl := gomock.NewController(t) + + applyMock := mocks.NewMockApplies(ctrl) + // This needs three new lines because we check for a minimum of three lines + // in the parsing of logs in `opApply` function. + logs := strings.NewReader(applySuccessOneResourceAdded) + applyMock.EXPECT().Logs(gomock.Any(), gomock.Any()).Return(logs, nil) + b.client.Applies = applyMock + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "approve": "no", + }) + + op.AutoApprove = true + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) != 1 { + t.Fatalf("expected an unused answer, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in cloud backend") { + t.Fatalf("expected TFC header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summery in output: %s", output) + } + if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("expected apply summery in output: %s", output) + } +} + +func TestCloud_applyApprovedExternally(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "approve": "wait-for-external-update", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = testBackendSingleWorkspaceName + + ctx := context.Background() + + run, err := b.Operation(ctx, op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + // Wait 50 milliseconds to make sure the run started. + time.Sleep(50 * time.Millisecond) + + wl, err := b.client.Workspaces.List( + ctx, + b.organization, + nil, + ) + if err != nil { + t.Fatalf("unexpected error listing workspaces: %v", err) + } + if len(wl.Items) != 1 { + t.Fatalf("expected 1 workspace, got %d workspaces", len(wl.Items)) + } + + rl, err := b.client.Runs.List(ctx, wl.Items[0].ID, nil) + if err != nil { + t.Fatalf("unexpected error listing runs: %v", err) + } + if len(rl.Items) != 1 { + t.Fatalf("expected 1 run, got %d runs", len(rl.Items)) + } + + err = b.client.Runs.Apply(context.Background(), rl.Items[0].ID, tfe.RunApplyOptions{}) + if err != nil { + t.Fatalf("unexpected error approving run: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in cloud backend") { + t.Fatalf("expected TFC header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summery in output: %s", output) + } + if !strings.Contains(output, "approved using the UI or API") { + t.Fatalf("expected external approval in output: %s", output) + } + if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("expected apply summery in output: %s", output) + } +} + +func TestCloud_applyDiscardedExternally(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "approve": "wait-for-external-update", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = testBackendSingleWorkspaceName + + ctx := context.Background() + + run, err := b.Operation(ctx, op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + // Wait 50 milliseconds to make sure the run started. + time.Sleep(50 * time.Millisecond) + + wl, err := b.client.Workspaces.List( + ctx, + b.organization, + nil, + ) + if err != nil { + t.Fatalf("unexpected error listing workspaces: %v", err) + } + if len(wl.Items) != 1 { + t.Fatalf("expected 1 workspace, got %d workspaces", len(wl.Items)) + } + + rl, err := b.client.Runs.List(ctx, wl.Items[0].ID, nil) + if err != nil { + t.Fatalf("unexpected error listing runs: %v", err) + } + if len(rl.Items) != 1 { + t.Fatalf("expected 1 run, got %d runs", len(rl.Items)) + } + + err = b.client.Runs.Discard(context.Background(), rl.Items[0].ID, tfe.RunDiscardOptions{}) + if err != nil { + t.Fatalf("unexpected error discarding run: %v", err) + } + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in cloud backend") { + t.Fatalf("expected TFC header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summery in output: %s", output) + } + if !strings.Contains(output, "discarded using the UI or API") { + t.Fatalf("expected external discard output: %s", output) + } + if strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("unexpected apply summery in output: %s", output) + } +} + +func TestCloud_applyWithAutoApprove(t *testing.T) { + b, bCleanup := testBackendWithTags(t) + defer bCleanup() + ctrl := gomock.NewController(t) + + applyMock := mocks.NewMockApplies(ctrl) + // This needs three new lines because we check for a minimum of three lines + // in the parsing of logs in `opApply` function. + logs := strings.NewReader(applySuccessOneResourceAdded) + applyMock.EXPECT().Logs(gomock.Any(), gomock.Any()).Return(logs, nil) + b.client.Applies = applyMock + + // Create a named workspace that auto applies. + _, err := b.client.Workspaces.Create( + context.Background(), + b.organization, + tfe.WorkspaceCreateOptions{ + Name: tfe.String("prod"), + }, + ) + if err != nil { + t.Fatalf("error creating named workspace: %v", err) + } + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = "prod" + op.AutoApprove = true + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) != 1 { + t.Fatalf("expected an unused answer, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in cloud backend") { + t.Fatalf("expected TFC header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summery in output: %s", output) + } + if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("expected apply summery in output: %s", output) + } +} + +func TestCloud_applyForceLocal(t *testing.T) { + // Set TF_FORCE_LOCAL_BACKEND so the cloud backend will use + // the local backend with itself as embedded backend. + t.Setenv("TF_FORCE_LOCAL_BACKEND", "1") + + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = testBackendSingleWorkspaceName + + streams, done := terminal.StreamsForTesting(t) + view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams)) + op.View = view + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if strings.Contains(output, "Running apply in cloud backend") { + t.Fatalf("unexpected TFC header in output: %s", output) + } + if output := done(t).Stdout(); !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } + if !run.State.HasManagedResourceInstanceObjects() { + t.Fatalf("expected resources in state") + } +} + +func TestCloud_applyWorkspaceWithoutOperations(t *testing.T) { + b, bCleanup := testBackendWithTags(t) + defer bCleanup() + + ctx := context.Background() + + // Create a named workspace that doesn't allow operations. + _, err := b.client.Workspaces.Create( + ctx, + b.organization, + tfe.WorkspaceCreateOptions{ + Name: tfe.String("no-operations"), + }, + ) + if err != nil { + t.Fatalf("error creating named workspace: %v", err) + } + + op, configCleanup, done := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = "no-operations" + + streams, done := terminal.StreamsForTesting(t) + view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams)) + op.View = view + + run, err := b.Operation(ctx, op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if strings.Contains(output, "Running apply in cloud backend") { + t.Fatalf("unexpected TFC header in output: %s", output) + } + if output := done(t).Stdout(); !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } + if !run.State.HasManagedResourceInstanceObjects() { + t.Fatalf("expected resources in state") + } +} + +func TestCloud_applyLockTimeout(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + ctx := context.Background() + + // Retrieve the workspace used to run this operation in. + w, err := b.client.Workspaces.Read(ctx, b.organization, b.WorkspaceMapping.Name) + if err != nil { + t.Fatalf("error retrieving workspace: %v", err) + } + + // Create a new configuration version. + c, err := b.client.ConfigurationVersions.Create(ctx, w.ID, tfe.ConfigurationVersionCreateOptions{}) + if err != nil { + t.Fatalf("error creating configuration version: %v", err) + } + + // Create a pending run to block this run. + _, err = b.client.Runs.Create(ctx, tfe.RunCreateOptions{ + ConfigurationVersion: c, + Workspace: w, + }) + if err != nil { + t.Fatalf("error creating pending run: %v", err) + } + + op, configCleanup, done := testOperationApplyWithTimeout(t, "./testdata/apply", 50*time.Millisecond) + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "cancel": "yes", + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = testBackendSingleWorkspaceName + + _, err = b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + sigint := make(chan os.Signal, 1) + signal.Notify(sigint, syscall.SIGINT) + select { + case <-sigint: + // Stop redirecting SIGINT signals. + signal.Stop(sigint) + case <-time.After(200 * time.Millisecond): + t.Fatalf("expected lock timeout after 50 milliseconds, waited 200 milliseconds") + } + + if len(input.answers) != 2 { + t.Fatalf("expected unused answers, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in cloud backend") { + t.Fatalf("expected TFC header in output: %s", output) + } + if !strings.Contains(output, "Lock timeout exceeded") { + t.Fatalf("expected lock timout error in output: %s", output) + } + if strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("unexpected plan summery in output: %s", output) + } + if strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("unexpected apply summery in output: %s", output) + } +} + +func TestCloud_applyDestroy(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-destroy") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op.PlanMode = plans.DestroyMode + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in cloud backend") { + t.Fatalf("expected TFC header in output: %s", output) + } + if !strings.Contains(output, "0 to add, 0 to change, 1 to destroy") { + t.Fatalf("expected plan summery in output: %s", output) + } + if !strings.Contains(output, "0 added, 0 changed, 1 destroyed") { + t.Fatalf("expected apply summery in output: %s", output) + } +} + +func TestCloud_applyDestroyNoConfig(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op, configCleanup, done := testOperationApply(t, "./testdata/empty") + defer configCleanup() + defer done(t) + + op.PlanMode = plans.DestroyMode + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } +} + +func TestCloud_applyJSONWithProvisioner(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + stream, close := terminal.StreamsForTesting(t) + + b.renderer = &jsonformat.Renderer{ + Streams: stream, + Colorize: mockColorize(), + } + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-json-with-provisioner") + defer configCleanup() + defer done(t) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = testBackendSingleWorkspaceName + + mockSROWorkspace(t, b, op.Workspace) + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } + + outp := close(t) + gotOut := outp.Stdout() + if !strings.Contains(gotOut, "null_resource.foo: Provisioning with 'local-exec'") { + t.Fatalf("expected provisioner local-exec start in logs: %s", gotOut) + } + + if !strings.Contains(gotOut, "null_resource.foo: (local-exec):") { + t.Fatalf("expected provisioner local-exec progress in logs: %s", gotOut) + } + + if !strings.Contains(gotOut, "Hello World!") { + t.Fatalf("expected provisioner local-exec output in logs: %s", gotOut) + } + + stateMgr, _ := b.StateMgr(testBackendSingleWorkspaceName) + // An error suggests that the state was not unlocked after apply + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { + t.Fatalf("unexpected error locking state after apply: %s", err.Error()) + } +} + +func TestCloud_applyJSONWithProvisionerError(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + stream, close := terminal.StreamsForTesting(t) + + b.renderer = &jsonformat.Renderer{ + Streams: stream, + Colorize: mockColorize(), + } + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-json-with-provisioner-error") + defer configCleanup() + defer done(t) + + op.Workspace = testBackendSingleWorkspaceName + + mockSROWorkspace(t, b, op.Workspace) + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + + outp := close(t) + gotOut := outp.Stdout() + + if !strings.Contains(gotOut, "local-exec provisioner error") { + t.Fatalf("unexpected error in apply logs: %s", gotOut) + } +} + +func TestCloud_applyPolicyPass(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-policy-passed") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in cloud backend") { + t.Fatalf("expected TFC header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summery in output: %s", output) + } + if !strings.Contains(output, "Sentinel Result: true") { + t.Fatalf("expected policy check result in output: %s", output) + } + if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("expected apply summery in output: %s", output) + } +} + +func TestCloud_applyPolicyHardFail(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-policy-hard-failed") + defer configCleanup() + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + viewOutput := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + if len(input.answers) != 1 { + t.Fatalf("expected an unused answers, got: %v", input.answers) + } + + errOutput := viewOutput.Stderr() + if !strings.Contains(errOutput, "hard failed") { + t.Fatalf("expected a policy check error, got: %v", errOutput) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in cloud backend") { + t.Fatalf("expected TFC header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summery in output: %s", output) + } + if !strings.Contains(output, "Sentinel Result: false") { + t.Fatalf("expected policy check result in output: %s", output) + } + if strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("unexpected apply summery in output: %s", output) + } +} + +func TestCloud_applyPolicySoftFail(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-policy-soft-failed") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "override": "override", + "approve": "yes", + }) + + op.AutoApprove = false + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in cloud backend") { + t.Fatalf("expected TFC header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summery in output: %s", output) + } + if !strings.Contains(output, "Sentinel Result: false") { + t.Fatalf("expected policy check result in output: %s", output) + } + if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("expected apply summery in output: %s", output) + } +} + +func TestCloud_applyPolicySoftFailAutoApproveSuccess(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + ctrl := gomock.NewController(t) + + policyCheckMock := mocks.NewMockPolicyChecks(ctrl) + // This needs three new lines because we check for a minimum of three lines + // in the parsing of logs in `opApply` function. + logs := strings.NewReader(fmt.Sprintf("%s\n%s", sentinelSoftFail, applySuccessOneResourceAdded)) + + pc := &tfe.PolicyCheck{ + ID: "pc-1", + Actions: &tfe.PolicyActions{ + IsOverridable: true, + }, + Permissions: &tfe.PolicyPermissions{ + CanOverride: true, + }, + Scope: tfe.PolicyScopeOrganization, + Status: tfe.PolicySoftFailed, + } + policyCheckMock.EXPECT().Read(gomock.Any(), gomock.Any()).Return(pc, nil) + policyCheckMock.EXPECT().Logs(gomock.Any(), gomock.Any()).Return(logs, nil) + policyCheckMock.EXPECT().Override(gomock.Any(), gomock.Any()).Return(nil, nil) + b.client.PolicyChecks = policyCheckMock + applyMock := mocks.NewMockApplies(ctrl) + // This needs three new lines because we check for a minimum of three lines + // in the parsing of logs in `opApply` function. + logs = strings.NewReader("\n\n\n1 added, 0 changed, 0 destroyed") + applyMock.EXPECT().Logs(gomock.Any(), gomock.Any()).Return(logs, nil) + b.client.Applies = applyMock + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-policy-soft-failed") + defer configCleanup() + + input := testInput(t, map[string]string{}) + + op.AutoApprove = true + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + viewOutput := done(t) + if run.Result != backend.OperationSuccess { + t.Fatal("expected apply operation to success due to auto-approve") + } + + if run.PlanEmpty { + t.Fatalf("expected plan to not be empty, plan opertion completed without error") + } + + if len(input.answers) != 0 { + t.Fatalf("expected no answers, got: %v", input.answers) + } + + errOutput := viewOutput.Stderr() + if strings.Contains(errOutput, "soft failed") { + t.Fatalf("expected no policy check errors, instead got: %v", errOutput) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Sentinel Result: false") { + t.Fatalf("expected policy check to be false, insead got: %s", output) + } + if !strings.Contains(output, "Apply complete!") { + t.Fatalf("expected apply to be complete, instead got: %s", output) + } + + if !strings.Contains(output, "Resources: 1 added, 0 changed, 0 destroyed") { + t.Fatalf("expected resources, instead got: %s", output) + } +} + +func TestCloud_applyPolicySoftFailAutoApprove(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + ctrl := gomock.NewController(t) + + applyMock := mocks.NewMockApplies(ctrl) + // This needs three new lines because we check for a minimum of three lines + // in the parsing of logs in `opApply` function. + logs := strings.NewReader(applySuccessOneResourceAdded) + applyMock.EXPECT().Logs(gomock.Any(), gomock.Any()).Return(logs, nil) + b.client.Applies = applyMock + + // Create a named workspace that auto applies. + _, err := b.client.Workspaces.Create( + context.Background(), + b.organization, + tfe.WorkspaceCreateOptions{ + Name: tfe.String("prod"), + }, + ) + if err != nil { + t.Fatalf("error creating named workspace: %v", err) + } + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-policy-soft-failed") + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "override": "override", + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = "prod" + op.AutoApprove = true + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) != 2 { + t.Fatalf("expected an unused answer, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in cloud backend") { + t.Fatalf("expected TFC header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summery in output: %s", output) + } + if !strings.Contains(output, "Sentinel Result: false") { + t.Fatalf("expected policy check result in output: %s", output) + } + if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("expected apply summery in output: %s", output) + } +} + +func TestCloud_applyWithRemoteError(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-with-error") + defer configCleanup() + defer done(t) + + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if run.Result.ExitStatus() != 1 { + t.Fatalf("expected exit code 1, got %d", run.Result.ExitStatus()) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "null_resource.foo: 1 error") { + t.Fatalf("expected apply error in output: %s", output) + } +} + +func TestCloud_applyJSONWithRemoteError(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + stream, close := terminal.StreamsForTesting(t) + + b.renderer = &jsonformat.Renderer{ + Streams: stream, + Colorize: mockColorize(), + } + + op, configCleanup, done := testOperationApply(t, "./testdata/apply-json-with-error") + defer configCleanup() + defer done(t) + + op.Workspace = testBackendSingleWorkspaceName + + mockSROWorkspace(t, b, op.Workspace) + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if run.Result.ExitStatus() != 1 { + t.Fatalf("expected exit code 1, got %d", run.Result.ExitStatus()) + } + + outp := close(t) + gotOut := outp.Stdout() + + if !strings.Contains(gotOut, "Unsupported block type") { + t.Fatalf("unexpected plan error in output: %s", gotOut) + } +} + +func TestCloud_applyVersionCheck(t *testing.T) { + testCases := map[string]struct { + localVersion string + remoteVersion string + forceLocal bool + executionMode string + wantErr string + }{ + "versions can be different for remote apply": { + localVersion: "0.14.0", + remoteVersion: "0.13.5", + executionMode: "remote", + }, + "versions can be different for local apply": { + localVersion: "0.14.0", + remoteVersion: "0.13.5", + executionMode: "local", + }, + "force local with remote operations and different versions is acceptable": { + localVersion: "0.14.0", + remoteVersion: "0.14.0-acme-provider-bundle", + forceLocal: true, + executionMode: "remote", + }, + "no error if versions are identical": { + localVersion: "0.14.0", + remoteVersion: "0.14.0", + forceLocal: true, + executionMode: "remote", + }, + "no error if force local but workspace has remote operations disabled": { + localVersion: "0.14.0", + remoteVersion: "0.13.5", + forceLocal: true, + executionMode: "local", + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + // SETUP: Save original local version state and restore afterwards + p := tfversion.Prerelease + v := tfversion.Version + s := tfversion.SemVer + defer func() { + tfversion.Prerelease = p + tfversion.Version = v + tfversion.SemVer = s + }() + + // SETUP: Set local version for the test case + tfversion.Prerelease = "" + tfversion.Version = tc.localVersion + tfversion.SemVer = version.Must(version.NewSemver(tc.localVersion)) + + // SETUP: Set force local for the test case + b.forceLocal = tc.forceLocal + + ctx := context.Background() + + // SETUP: set the operations and Terraform Version fields on the + // remote workspace + _, err := b.client.Workspaces.Update( + ctx, + b.organization, + b.WorkspaceMapping.Name, + tfe.WorkspaceUpdateOptions{ + ExecutionMode: tfe.String(tc.executionMode), + TerraformVersion: tfe.String(tc.remoteVersion), + }, + ) + if err != nil { + t.Fatalf("error creating named workspace: %v", err) + } + + // RUN: prepare the apply operation and run it + op, configCleanup, opDone := testOperationApply(t, "./testdata/apply") + defer configCleanup() + defer opDone(t) + + streams, done := terminal.StreamsForTesting(t) + view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams)) + op.View = view + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(ctx, op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + // RUN: wait for completion + <-run.Done() + output := done(t) + + if tc.wantErr != "" { + // ASSERT: if the test case wants an error, check for failure + // and the error message + if run.Result != backend.OperationFailure { + t.Fatalf("expected run to fail, but result was %#v", run.Result) + } + errOutput := output.Stderr() + if !strings.Contains(errOutput, tc.wantErr) { + t.Fatalf("missing error %q\noutput: %s", tc.wantErr, errOutput) + } + } else { + // ASSERT: otherwise, check for success and appropriate output + // based on whether the run should be local or remote + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + output := b.CLI.(*cli.MockUi).OutputWriter.String() + hasRemote := strings.Contains(output, "Running apply in cloud backend") + hasSummary := strings.Contains(output, "1 added, 0 changed, 0 destroyed") + hasResources := run.State.HasManagedResourceInstanceObjects() + if !tc.forceLocal && !isLocalExecutionMode(tc.executionMode) { + if !hasRemote { + t.Errorf("missing TFC header in output: %s", output) + } + if !hasSummary { + t.Errorf("expected apply summary in output: %s", output) + } + } else { + if hasRemote { + t.Errorf("unexpected TFC header in output: %s", output) + } + if !hasResources { + t.Errorf("expected resources in state") + } + } + } + }) + } +} + +const applySuccessOneResourceAdded = ` +OpenTofu v0.11.10 + +Initializing plugins and modules... +null_resource.hello: Creating... +null_resource.hello: Creation complete after 0s (ID: 8657651096157629581) + +Apply complete! Resources: 1 added, 0 changed, 0 destroyed. +` + +const sentinelSoftFail = ` +Sentinel Result: false + +Sentinel evaluated to false because one or more Sentinel policies evaluated +to false. This false was not due to an undefined value or runtime error. + +1 policies evaluated. + +## Policy 1: Passthrough.sentinel (soft-mandatory) + +Result: false + +FALSE - Passthrough.sentinel:1:1 - Rule "main" +` diff --git a/pkg/cloud/backend_cli.go b/pkg/cloud/backend_cli.go new file mode 100644 index 00000000000..e76e725e496 --- /dev/null +++ b/pkg/cloud/backend_cli.go @@ -0,0 +1,32 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cloud + +import ( + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/command/jsonformat" +) + +// CLIInit implements backend.CLI +func (b *Cloud) CLIInit(opts *backend.CLIOpts) error { + if cli, ok := b.local.(backend.CLI); ok { + if err := cli.CLIInit(opts); err != nil { + return err + } + } + + b.CLI = opts.CLI + b.CLIColor = opts.CLIColor + b.ContextOpts = opts.ContextOpts + b.runningInAutomation = opts.RunningInAutomation + b.input = opts.Input + b.renderer = &jsonformat.Renderer{ + Streams: opts.Streams, + Colorize: opts.CLIColor, + } + + return nil +} diff --git a/pkg/cloud/backend_colorize.go b/pkg/cloud/backend_colorize.go new file mode 100644 index 00000000000..434acc38d53 --- /dev/null +++ b/pkg/cloud/backend_colorize.go @@ -0,0 +1,55 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cloud + +import ( + "regexp" + + "github.com/mitchellh/colorstring" +) + +// TODO SvH: This file should be deleted and the type cliColorize should be +// renamed back to Colorize as soon as we can pass -no-color to the backend. + +// colorsRe is used to find ANSI escaped color codes. +var colorsRe = regexp.MustCompile("\033\\[\\d{1,3}m") + +// Colorer is the interface that must be implemented to colorize strings. +type Colorer interface { + Color(v string) string +} + +// Colorize is used to print output when the -no-color flag is used. It will +// strip all ANSI escaped color codes which are set while the operation was +// executed in Terraform Enterprise. +// +// When Terraform Enterprise supports run specific variables, this code can be +// removed as we can then pass the CLI flag to the backend and prevent the color +// codes from being written to the output. +type Colorize struct { + cliColor *colorstring.Colorize +} + +// Color will strip all ANSI escaped color codes and return a uncolored string. +func (c *Colorize) Color(v string) string { + return colorsRe.ReplaceAllString(c.cliColor.Color(v), "") +} + +// Colorize returns the Colorize structure that can be used for colorizing +// output. This is guaranteed to always return a non-nil value and so is useful +// as a helper to wrap any potentially colored strings. +func (b *Cloud) Colorize() Colorer { + if b.CLIColor != nil && !b.CLIColor.Disable { + return b.CLIColor + } + if b.CLIColor != nil { + return &Colorize{cliColor: b.CLIColor} + } + return &Colorize{cliColor: &colorstring.Colorize{ + Colors: colorstring.DefaultColors, + Disable: true, + }} +} diff --git a/pkg/cloud/backend_common.go b/pkg/cloud/backend_common.go new file mode 100644 index 00000000000..7f9cd02712a --- /dev/null +++ b/pkg/cloud/backend_common.go @@ -0,0 +1,648 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cloud + +import ( + "bufio" + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-retryablehttp" + tfe "github.com/hashicorp/go-tfe" + "github.com/hashicorp/jsonapi" + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/command/jsonformat" + "github.com/kubegems/opentofu/pkg/logging" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/tofu" +) + +var ( + backoffMin = 1000.0 + backoffMax = 3000.0 + + runPollInterval = 3 * time.Second +) + +// backoff will perform exponential backoff based on the iteration and +// limited by the provided min and max (in milliseconds) durations. +func backoff(min, max float64, iter int) time.Duration { + backoff := math.Pow(2, float64(iter)/5) * min + if backoff > max { + backoff = max + } + return time.Duration(backoff) * time.Millisecond +} + +func (b *Cloud) waitForRun(stopCtx, cancelCtx context.Context, op *backend.Operation, opType string, r *tfe.Run, w *tfe.Workspace) (*tfe.Run, error) { + started := time.Now() + updated := started + for i := 0; ; i++ { + select { + case <-stopCtx.Done(): + return r, stopCtx.Err() + case <-cancelCtx.Done(): + return r, cancelCtx.Err() + case <-time.After(backoff(backoffMin, backoffMax, i)): + // Timer up, show status + } + + // Retrieve the run to get its current status. + r, err := b.client.Runs.Read(stopCtx, r.ID) + if err != nil { + return r, generalError("Failed to retrieve run", err) + } + + // Return if the run is no longer pending. + if r.Status != tfe.RunPending && r.Status != tfe.RunConfirmed { + if i == 0 && opType == "plan" && b.CLI != nil { + b.CLI.Output(b.Colorize().Color(fmt.Sprintf("Waiting for the %s to start...\n", opType))) + } + if i > 0 && b.CLI != nil { + // Insert a blank line to separate the ouputs. + b.CLI.Output("") + } + return r, nil + } + + // Check if 30 seconds have passed since the last update. + current := time.Now() + if b.CLI != nil && (i == 0 || current.Sub(updated).Seconds() > 30) { + updated = current + position := 0 + elapsed := "" + + // Calculate and set the elapsed time. + if i > 0 { + elapsed = fmt.Sprintf( + " (%s elapsed)", current.Sub(started).Truncate(30*time.Second)) + } + + // Retrieve the workspace used to run this operation in. + w, err = b.client.Workspaces.Read(stopCtx, b.organization, w.Name) + if err != nil { + return nil, generalError("Failed to retrieve workspace", err) + } + + // If the workspace is locked the run will not be queued and we can + // update the status without making any expensive calls. + if w.Locked && w.CurrentRun != nil { + cr, err := b.client.Runs.Read(stopCtx, w.CurrentRun.ID) + if err != nil { + return r, generalError("Failed to retrieve current run", err) + } + if cr.Status == tfe.RunPending { + b.CLI.Output(b.Colorize().Color( + "Waiting for the manually locked workspace to be unlocked..." + elapsed)) + continue + } + } + + // Skip checking the workspace queue when we are the current run. + if w.CurrentRun == nil || w.CurrentRun.ID != r.ID { + found := false + options := &tfe.RunListOptions{} + runlist: + for { + rl, err := b.client.Runs.List(stopCtx, w.ID, options) + if err != nil { + return r, generalError("Failed to retrieve run list", err) + } + + // Loop through all runs to calculate the workspace queue position. + for _, item := range rl.Items { + if !found { + if r.ID == item.ID { + found = true + } + continue + } + + // If the run is in a final state, ignore it and continue. + switch item.Status { + case tfe.RunApplied, tfe.RunCanceled, tfe.RunDiscarded, tfe.RunErrored: + continue + case tfe.RunPlanned: + if op.Type == backend.OperationTypePlan { + continue + } + } + + // Increase the workspace queue position. + position++ + + // Stop searching when we reached the current run. + if w.CurrentRun != nil && w.CurrentRun.ID == item.ID { + break runlist + } + } + + // Exit the loop when we've seen all pages. + if rl.CurrentPage >= rl.TotalPages { + break + } + + // Update the page number to get the next page. + options.PageNumber = rl.NextPage + } + + if position > 0 { + b.CLI.Output(b.Colorize().Color(fmt.Sprintf( + "Waiting for %d run(s) to finish before being queued...%s", + position, + elapsed, + ))) + continue + } + } + + options := tfe.ReadRunQueueOptions{} + search: + for { + rq, err := b.client.Organizations.ReadRunQueue(stopCtx, b.organization, options) + if err != nil { + return r, generalError("Failed to retrieve queue", err) + } + + // Search through all queued items to find our run. + for _, item := range rq.Items { + if r.ID == item.ID { + position = item.PositionInQueue + break search + } + } + + // Exit the loop when we've seen all pages. + if rq.CurrentPage >= rq.TotalPages { + break + } + + // Update the page number to get the next page. + options.PageNumber = rq.NextPage + } + + if position > 0 { + c, err := b.client.Organizations.ReadCapacity(stopCtx, b.organization) + if err != nil { + return r, generalError("Failed to retrieve capacity", err) + } + b.CLI.Output(b.Colorize().Color(fmt.Sprintf( + "Waiting for %d queued run(s) to finish before starting...%s", + position-c.Running, + elapsed, + ))) + continue + } + + b.CLI.Output(b.Colorize().Color(fmt.Sprintf( + "Waiting for the %s to start...%s", opType, elapsed))) + } + } +} + +func (b *Cloud) waitTaskStage(stopCtx, cancelCtx context.Context, op *backend.Operation, r *tfe.Run, stageID string, outputTitle string) error { + integration := &IntegrationContext{ + B: b, + StopContext: stopCtx, + CancelContext: cancelCtx, + Op: op, + Run: r, + } + return b.runTaskStage(integration, integration.BeginOutput(outputTitle), stageID) +} + +func (b *Cloud) costEstimate(stopCtx, cancelCtx context.Context, op *backend.Operation, r *tfe.Run) error { + if r.CostEstimate == nil { + return nil + } + + msgPrefix := "Cost Estimation" + started := time.Now() + updated := started + for i := 0; ; i++ { + select { + case <-stopCtx.Done(): + return stopCtx.Err() + case <-cancelCtx.Done(): + return cancelCtx.Err() + case <-time.After(backoff(backoffMin, backoffMax, i)): + } + + // Retrieve the cost estimate to get its current status. + ce, err := b.client.CostEstimates.Read(stopCtx, r.CostEstimate.ID) + if err != nil { + return generalError("Failed to retrieve cost estimate", err) + } + + // If the run is canceled or errored, but the cost-estimate still has + // no result, there is nothing further to render. + if ce.Status != tfe.CostEstimateFinished { + if r.Status == tfe.RunCanceled || r.Status == tfe.RunErrored { + return nil + } + } + + // checking if i == 0 so as to avoid printing this starting horizontal-rule + // every retry, and that it only prints it on the first (i=0) attempt. + if b.CLI != nil && i == 0 { + b.CLI.Output("\n------------------------------------------------------------------------\n") + } + + switch ce.Status { + case tfe.CostEstimateFinished: + delta, err := strconv.ParseFloat(ce.DeltaMonthlyCost, 64) + if err != nil { + return generalError("Unexpected error", err) + } + + sign := "+" + if delta < 0 { + sign = "-" + } + + deltaRepr := strings.Replace(ce.DeltaMonthlyCost, "-", "", 1) + + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color("[bold]" + msgPrefix + ":\n")) + b.CLI.Output(b.Colorize().Color(fmt.Sprintf("Resources: %d of %d estimated", ce.MatchedResourcesCount, ce.ResourcesCount))) + b.CLI.Output(b.Colorize().Color(fmt.Sprintf(" $%s/mo %s$%s", ce.ProposedMonthlyCost, sign, deltaRepr))) + + if len(r.PolicyChecks) == 0 && r.HasChanges && op.Type == backend.OperationTypeApply { + b.CLI.Output("\n------------------------------------------------------------------------") + } + } + + return nil + case tfe.CostEstimatePending, tfe.CostEstimateQueued: + // Check if 30 seconds have passed since the last update. + current := time.Now() + if b.CLI != nil && (i == 0 || current.Sub(updated).Seconds() > 30) { + updated = current + elapsed := "" + + // Calculate and set the elapsed time. + if i > 0 { + elapsed = fmt.Sprintf( + " (%s elapsed)", current.Sub(started).Truncate(30*time.Second)) + } + b.CLI.Output(b.Colorize().Color("[bold]" + msgPrefix + ":\n")) + b.CLI.Output(b.Colorize().Color("Waiting for cost estimate to complete..." + elapsed + "\n")) + } + continue + case tfe.CostEstimateSkippedDueToTargeting: + b.CLI.Output(b.Colorize().Color("[bold]" + msgPrefix + ":\n")) + b.CLI.Output("Not available for this plan, because it was created with the -target option.") + b.CLI.Output("\n------------------------------------------------------------------------") + return nil + case tfe.CostEstimateErrored: + b.CLI.Output(msgPrefix + " errored.\n") + b.CLI.Output("\n------------------------------------------------------------------------") + return nil + case tfe.CostEstimateCanceled: + return fmt.Errorf(msgPrefix + " canceled.") + default: + return fmt.Errorf("Unknown or unexpected cost estimate state: %s", ce.Status) + } + } +} + +func (b *Cloud) checkPolicy(stopCtx, cancelCtx context.Context, op *backend.Operation, r *tfe.Run) error { + if b.CLI != nil { + b.CLI.Output("\n------------------------------------------------------------------------\n") + } + for i, pc := range r.PolicyChecks { + // Read the policy check logs. This is a blocking call that will only + // return once the policy check is complete. + logs, err := b.client.PolicyChecks.Logs(stopCtx, pc.ID) + if err != nil { + return generalError("Failed to retrieve policy check logs", err) + } + reader := bufio.NewReaderSize(logs, 64*1024) + + // Retrieve the policy check to get its current status. + pc, err := b.client.PolicyChecks.Read(stopCtx, pc.ID) + if err != nil { + return generalError("Failed to retrieve policy check", err) + } + + // If the run is canceled or errored, but the policy check still has + // no result, there is nothing further to render. + if r.Status == tfe.RunCanceled || r.Status == tfe.RunErrored { + switch pc.Status { + case tfe.PolicyPending, tfe.PolicyQueued, tfe.PolicyUnreachable: + continue + } + } + + var msgPrefix string + switch pc.Scope { + case tfe.PolicyScopeOrganization: + msgPrefix = "Organization Policy Check" + case tfe.PolicyScopeWorkspace: + msgPrefix = "Workspace Policy Check" + default: + msgPrefix = fmt.Sprintf("Unknown policy check (%s)", pc.Scope) + } + + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color("[bold]" + msgPrefix + ":\n")) + } + + if b.CLI != nil { + for next := true; next; { + var l, line []byte + + for isPrefix := true; isPrefix; { + l, isPrefix, err = reader.ReadLine() + if err != nil { + if err != io.EOF { + return generalError("Failed to read logs", err) + } + next = false + } + line = append(line, l...) + } + + if next || len(line) > 0 { + b.CLI.Output(b.Colorize().Color(string(line))) + } + } + } + + switch pc.Status { + case tfe.PolicyPasses: + if (r.HasChanges && op.Type == backend.OperationTypeApply || i < len(r.PolicyChecks)-1) && b.CLI != nil { + b.CLI.Output("\n------------------------------------------------------------------------") + } + continue + case tfe.PolicyErrored: + return fmt.Errorf(msgPrefix + " errored.") + case tfe.PolicyHardFailed: + return fmt.Errorf(msgPrefix + " hard failed.") + case tfe.PolicySoftFailed: + runUrl := fmt.Sprintf(runHeader, b.hostname, b.organization, op.Workspace, r.ID) + + if op.Type == backend.OperationTypePlan || op.UIOut == nil || op.UIIn == nil || + !pc.Actions.IsOverridable || !pc.Permissions.CanOverride { + return fmt.Errorf(msgPrefix + " soft failed.\n" + runUrl) + } + + if op.AutoApprove { + if _, err = b.client.PolicyChecks.Override(stopCtx, pc.ID); err != nil { + return generalError(fmt.Sprintf("Failed to override policy check.\n%s", runUrl), err) + } + } else if !b.input { + return errPolicyOverrideNeedsUIConfirmation + } else { + opts := &tofu.InputOpts{ + Id: "override", + Query: "\nDo you want to override the soft failed policy check?", + Description: "Only 'override' will be accepted to override.", + } + err = b.confirm(stopCtx, op, opts, r, "override") + if err != nil && err != errRunOverridden { + return fmt.Errorf( + fmt.Sprintf("Failed to override: %s\n%s\n", err.Error(), runUrl), + ) + } + + if err != errRunOverridden { + if _, err = b.client.PolicyChecks.Override(stopCtx, pc.ID); err != nil { + return generalError(fmt.Sprintf("Failed to override policy check.\n%s", runUrl), err) + } + } else { + b.CLI.Output(fmt.Sprintf("The run needs to be manually overridden or discarded.\n%s\n", runUrl)) + } + } + + if b.CLI != nil { + b.CLI.Output("------------------------------------------------------------------------") + } + default: + return fmt.Errorf("Unknown or unexpected policy state: %s", pc.Status) + } + } + + return nil +} + +func (b *Cloud) confirm(stopCtx context.Context, op *backend.Operation, opts *tofu.InputOpts, r *tfe.Run, keyword string) error { + doneCtx, cancel := context.WithCancel(stopCtx) + result := make(chan error, 2) + + go func() { + // Make sure we cancel doneCtx before we return + // so the input command is also canceled. + defer cancel() + + for { + select { + case <-doneCtx.Done(): + return + case <-stopCtx.Done(): + return + case <-time.After(runPollInterval): + // Retrieve the run again to get its current status. + r, err := b.client.Runs.Read(stopCtx, r.ID) + if err != nil { + result <- generalError("Failed to retrieve run", err) + return + } + + switch keyword { + case "override": + if r.Status != tfe.RunPolicyOverride && r.Status != tfe.RunPostPlanAwaitingDecision { + if r.Status == tfe.RunDiscarded { + err = errRunDiscarded + } else { + err = errRunOverridden + } + } + case "yes": + if !r.Actions.IsConfirmable { + if r.Status == tfe.RunDiscarded { + err = errRunDiscarded + } else { + err = errRunApproved + } + } + } + + if err != nil { + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color( + fmt.Sprintf("[reset][yellow]%s[reset]", err.Error()))) + } + + if err == errRunDiscarded { + err = errApplyDiscarded + if op.PlanMode == plans.DestroyMode { + err = errDestroyDiscarded + } + } + + result <- err + return + } + } + } + }() + + result <- func() error { + v, err := op.UIIn.Input(doneCtx, opts) + if err != nil && err != context.Canceled && stopCtx.Err() != context.Canceled { + return fmt.Errorf("Error asking %s: %w", opts.Id, err) + } + + // We return the error of our parent channel as we don't + // care about the error of the doneCtx which is only used + // within this function. So if the doneCtx was canceled + // because stopCtx was canceled, this will properly return + // a context.Canceled error and otherwise it returns nil. + if doneCtx.Err() == context.Canceled || stopCtx.Err() == context.Canceled { + return stopCtx.Err() + } + + // Make sure we cancel the context here so the loop that + // checks for external changes to the run is ended before + // we start to make changes ourselves. + cancel() + + if v != keyword { + // Retrieve the run again to get its current status. + r, err = b.client.Runs.Read(stopCtx, r.ID) + if err != nil { + return generalError("Failed to retrieve run", err) + } + + // Make sure we discard the run if possible. + if r.Actions.IsDiscardable { + err = b.client.Runs.Discard(stopCtx, r.ID, tfe.RunDiscardOptions{}) + if err != nil { + if op.PlanMode == plans.DestroyMode { + return generalError("Failed to discard destroy", err) + } + return generalError("Failed to discard apply", err) + } + } + + // Even if the run was discarded successfully, we still + // return an error as the apply command was canceled. + if op.PlanMode == plans.DestroyMode { + return errDestroyDiscarded + } + return errApplyDiscarded + } + + return nil + }() + + return <-result +} + +// This method will fetch the redacted plan output as a byte slice, mirroring +// the behavior of the similar client.Plans.ReadJSONOutput method. +// +// Note: Apologies for the lengthy definition, this is a result of not being +// able to mock receiver methods +var readRedactedPlan func(context.Context, url.URL, string, string) ([]byte, error) = func(ctx context.Context, baseURL url.URL, token string, planID string) ([]byte, error) { + client := retryablehttp.NewClient() + client.RetryMax = 10 + client.RetryWaitMin = 100 * time.Millisecond + client.RetryWaitMax = 400 * time.Millisecond + client.Logger = logging.HCLogger() + + u, err := baseURL.Parse(fmt.Sprintf( + "plans/%s/json-output-redacted", url.QueryEscape(planID))) + if err != nil { + return nil, err + } + + req, err := retryablehttp.NewRequest("GET", u.String(), nil) + if err != nil { + return nil, err + } + + req.Header.Set("Authorization", "Bearer "+token) + req.Header.Set("Accept", "application/json") + + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if err = checkResponseCode(resp); err != nil { + return nil, err + } + + return io.ReadAll(resp.Body) +} + +// decodeRedactedPlan unmarshals a downloaded redacted plan into a struct the +// jsonformat.Renderer expects. +func decodeRedactedPlan(jsonBytes []byte) (*jsonformat.Plan, error) { + r := bytes.NewReader(jsonBytes) + p := &jsonformat.Plan{} + if err := json.NewDecoder(r).Decode(p); err != nil { + return nil, err + } + return p, nil +} + +func checkResponseCode(r *http.Response) error { + if r.StatusCode >= 200 && r.StatusCode <= 299 { + return nil + } + + var errs []string + var err error + + switch r.StatusCode { + case 401: + return tfe.ErrUnauthorized + case 404: + return tfe.ErrResourceNotFound + } + + errs, err = decodeErrorPayload(r) + if err != nil { + return err + } + + return errors.New(strings.Join(errs, "\n")) +} + +func decodeErrorPayload(r *http.Response) ([]string, error) { + // Decode the error payload. + var errs []string + errPayload := &jsonapi.ErrorsPayload{} + err := json.NewDecoder(r.Body).Decode(errPayload) + if err != nil || len(errPayload.Errors) == 0 { + return errs, errors.New(r.Status) + } + + // Parse and format the errors. + for _, e := range errPayload.Errors { + if e.Detail == "" { + errs = append(errs, e.Title) + } else { + errs = append(errs, fmt.Sprintf("%s\n\n%s", e.Title, e.Detail)) + } + } + + return errs, nil +} diff --git a/pkg/cloud/backend_context.go b/pkg/cloud/backend_context.go new file mode 100644 index 00000000000..10f6a3ca7a4 --- /dev/null +++ b/pkg/cloud/backend_context.go @@ -0,0 +1,298 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cloud + +import ( + "context" + "fmt" + "log" + + "github.com/hashicorp/hcl/v2" + + tfe "github.com/hashicorp/go-tfe" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" + "github.com/zclconf/go-cty/cty" +) + +// LocalRun implements backend.Local +func (b *Cloud) LocalRun(op *backend.Operation) (*backend.LocalRun, statemgr.Full, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + ret := &backend.LocalRun{ + PlanOpts: &tofu.PlanOpts{ + Mode: op.PlanMode, + Targets: op.Targets, + }, + } + + op.StateLocker = op.StateLocker.WithContext(context.Background()) + + // Get the remote workspace name. + remoteWorkspaceName := b.getRemoteWorkspaceName(op.Workspace) + + // Get the latest state. + log.Printf("[TRACE] cloud: requesting state manager for workspace %q", remoteWorkspaceName) + stateMgr, err := b.StateMgr(op.Workspace) + if err != nil { + diags = diags.Append(fmt.Errorf("error loading state: %w", err)) + return nil, nil, diags + } + + log.Printf("[TRACE] cloud: requesting state lock for workspace %q", remoteWorkspaceName) + if diags := op.StateLocker.Lock(stateMgr, op.Type.String()); diags.HasErrors() { + return nil, nil, diags + } + + defer func() { + // If we're returning with errors, and thus not producing a valid + // context, we'll want to avoid leaving the remote workspace locked. + if diags.HasErrors() { + diags = diags.Append(op.StateLocker.Unlock()) + } + }() + + log.Printf("[TRACE] cloud: reading remote state for workspace %q", remoteWorkspaceName) + if err := stateMgr.RefreshState(); err != nil { + diags = diags.Append(fmt.Errorf("error loading state: %w", err)) + return nil, nil, diags + } + + // Initialize our context options + var opts tofu.ContextOpts + if v := b.ContextOpts; v != nil { + opts = *v + } + + // Copy set options from the operation + opts.UIInput = op.UIIn + opts.Encryption = op.Encryption + + // Load the latest state. If we enter contextFromPlanFile below then the + // state snapshot in the plan file must match this, or else it'll return + // error diagnostics. + log.Printf("[TRACE] cloud: retrieving remote state snapshot for workspace %q", remoteWorkspaceName) + ret.InputState = stateMgr.State() + + log.Printf("[TRACE] cloud: loading configuration for the current working directory") + config, configDiags := op.ConfigLoader.LoadConfig(op.ConfigDir, op.RootCall) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + return nil, nil, diags + } + ret.Config = config + + if op.AllowUnsetVariables { + // If we're not going to use the variables in an operation we'll be + // more lax about them, stubbing out any unset ones as unknown. + // This gives us enough information to produce a consistent context, + // but not enough information to run a real operation (plan, apply, etc) + ret.PlanOpts.SetVariables = stubAllVariables(op.Variables, config.Module.Variables) + } else { + // The underlying API expects us to use the opaque workspace id to request + // variables, so we'll need to look that up using our organization name + // and workspace name. + remoteWorkspaceID, err := b.getRemoteWorkspaceID(context.Background(), op.Workspace) + if err != nil { + diags = diags.Append(fmt.Errorf("error finding remote workspace: %w", err)) + return nil, nil, diags + } + w, err := b.fetchWorkspace(context.Background(), b.organization, op.Workspace) + if err != nil { + diags = diags.Append(fmt.Errorf("error loading workspace: %w", err)) + return nil, nil, diags + } + + if isLocalExecutionMode(w.ExecutionMode) { + log.Printf("[TRACE] skipping retrieving variables from workspace %s/%s (%s), workspace is in Local Execution mode", remoteWorkspaceName, b.organization, remoteWorkspaceID) + } else { + log.Printf("[TRACE] cloud: retrieving variables from workspace %s/%s (%s)", remoteWorkspaceName, b.organization, remoteWorkspaceID) + tfeVariables, err := b.client.Variables.List(context.Background(), remoteWorkspaceID, nil) + if err != nil && err != tfe.ErrResourceNotFound { + diags = diags.Append(fmt.Errorf("error loading variables: %w", err)) + return nil, nil, diags + } + + if tfeVariables != nil { + if op.Variables == nil { + op.Variables = make(map[string]backend.UnparsedVariableValue) + } + + for _, v := range tfeVariables.Items { + if v.Category == tfe.CategoryTerraform { + if _, ok := op.Variables[v.Key]; !ok { + op.Variables[v.Key] = &remoteStoredVariableValue{ + definition: v, + } + } + } + } + } + } + + if op.Variables != nil { + variables, varDiags := backend.ParseVariableValues(op.Variables, config.Module.Variables) + diags = diags.Append(varDiags) + if diags.HasErrors() { + return nil, nil, diags + } + ret.PlanOpts.SetVariables = variables + } + } + + tfCtx, ctxDiags := tofu.NewContext(&opts) + diags = diags.Append(ctxDiags) + ret.Core = tfCtx + + log.Printf("[TRACE] cloud: finished building tofu.Context") + + return ret, stateMgr, diags +} + +func (b *Cloud) getRemoteWorkspaceName(localWorkspaceName string) string { + switch { + case localWorkspaceName == backend.DefaultStateName: + // The default workspace name is a special case + return b.WorkspaceMapping.Name + default: + return localWorkspaceName + } +} + +func (b *Cloud) getRemoteWorkspace(ctx context.Context, localWorkspaceName string) (*tfe.Workspace, error) { + remoteWorkspaceName := b.getRemoteWorkspaceName(localWorkspaceName) + + log.Printf("[TRACE] cloud: looking up workspace for %s/%s", b.organization, remoteWorkspaceName) + remoteWorkspace, err := b.client.Workspaces.Read(ctx, b.organization, remoteWorkspaceName) + if err != nil { + return nil, err + } + + return remoteWorkspace, nil +} + +func (b *Cloud) getRemoteWorkspaceID(ctx context.Context, localWorkspaceName string) (string, error) { + remoteWorkspace, err := b.getRemoteWorkspace(ctx, localWorkspaceName) + if err != nil { + return "", err + } + + return remoteWorkspace.ID, nil +} + +func stubAllVariables(vv map[string]backend.UnparsedVariableValue, decls map[string]*configs.Variable) tofu.InputValues { + ret := make(tofu.InputValues, len(decls)) + + for name, cfg := range decls { + raw, exists := vv[name] + if !exists { + ret[name] = &tofu.InputValue{ + Value: cty.UnknownVal(cfg.Type), + SourceType: tofu.ValueFromConfig, + } + continue + } + + val, diags := raw.ParseVariableValue(cfg.ParsingMode) + if diags.HasErrors() { + ret[name] = &tofu.InputValue{ + Value: cty.UnknownVal(cfg.Type), + SourceType: tofu.ValueFromConfig, + } + continue + } + ret[name] = val + } + + return ret +} + +// remoteStoredVariableValue is a backend.UnparsedVariableValue implementation +// that translates from the go-tfe representation of stored variables into +// the Terraform Core backend representation of variables. +type remoteStoredVariableValue struct { + definition *tfe.Variable +} + +var _ backend.UnparsedVariableValue = (*remoteStoredVariableValue)(nil) + +func (v *remoteStoredVariableValue) ParseVariableValue(mode configs.VariableParsingMode) (*tofu.InputValue, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + var val cty.Value + + switch { + case v.definition.Sensitive: + // If it's marked as sensitive then it's not available for use in + // local operations. We'll use an unknown value as a placeholder for + // it so that operations that don't need it might still work, but + // we'll also produce a warning about it to add context for any + // errors that might result here. + val = cty.DynamicVal + if !v.definition.HCL { + // If it's not marked as HCL then we at least know that the + // value must be a string, so we'll set that in case it allows + // us to do some more precise type checking. + val = cty.UnknownVal(cty.String) + } + + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + fmt.Sprintf("Value for var.%s unavailable", v.definition.Key), + fmt.Sprintf("The value of variable %q is marked as sensitive in the remote workspace. This operation always runs locally, so the value for that variable is not available.", v.definition.Key), + )) + + case v.definition.HCL: + // If the variable value is marked as being in HCL syntax, we need to + // parse it the same way as it would be interpreted in a .tfvars + // file because that is how it would get passed to Terraform CLI for + // a remote operation and we want to mimic that result as closely as + // possible. + var exprDiags hcl.Diagnostics + expr, exprDiags := hclsyntax.ParseExpression([]byte(v.definition.Value), "", hcl.Pos{Line: 1, Column: 1}) + if expr != nil { + var moreDiags hcl.Diagnostics + val, moreDiags = expr.Value(nil) + exprDiags = append(exprDiags, moreDiags...) + } else { + // We'll have already put some errors in exprDiags above, so we'll + // just stub out the value here. + val = cty.DynamicVal + } + + // We don't have sufficient context to return decent error messages + // for syntax errors in the remote values, so we'll just return a + // generic message instead for now. + // (More complete error messages will still result from true remote + // operations, because they'll run on the remote system where we've + // materialized the values into a tfvars file we can report from.) + if exprDiags.HasErrors() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + fmt.Sprintf("Invalid expression for var.%s", v.definition.Key), + fmt.Sprintf("The value of variable %q is marked in the remote workspace as being specified in HCL syntax, but the given value is not valid HCL. Stored variable values must be valid literal expressions and may not contain references to other variables or calls to functions.", v.definition.Key), + )) + } + + default: + // A variable value _not_ marked as HCL is always be a string, given + // literally. + val = cty.StringVal(v.definition.Value) + } + + return &tofu.InputValue{ + Value: val, + + // We mark these as "from input" with the rationale that entering + // variable values into the Terraform Cloud or Enterprise UI is, + // roughly speaking, a similar idea to entering variable values at + // the interactive CLI prompts. It's not a perfect correspondance, + // but it's closer than the other options. + SourceType: tofu.ValueFromInput, + }, diags +} diff --git a/pkg/cloud/backend_context_test.go b/pkg/cloud/backend_context_test.go new file mode 100644 index 00000000000..35fb21297f6 --- /dev/null +++ b/pkg/cloud/backend_context_test.go @@ -0,0 +1,461 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cloud + +import ( + "context" + "reflect" + "testing" + + tfe "github.com/hashicorp/go-tfe" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/command/clistate" + "github.com/kubegems/opentofu/pkg/command/views" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/initwd" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "github.com/kubegems/opentofu/pkg/terminal" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" +) + +func TestRemoteStoredVariableValue(t *testing.T) { + tests := map[string]struct { + Def *tfe.Variable + Want cty.Value + WantError string + }{ + "string literal": { + &tfe.Variable{ + Key: "test", + Value: "foo", + HCL: false, + Sensitive: false, + }, + cty.StringVal("foo"), + ``, + }, + "string HCL": { + &tfe.Variable{ + Key: "test", + Value: `"foo"`, + HCL: true, + Sensitive: false, + }, + cty.StringVal("foo"), + ``, + }, + "list HCL": { + &tfe.Variable{ + Key: "test", + Value: `[]`, + HCL: true, + Sensitive: false, + }, + cty.EmptyTupleVal, + ``, + }, + "null HCL": { + &tfe.Variable{ + Key: "test", + Value: `null`, + HCL: true, + Sensitive: false, + }, + cty.NullVal(cty.DynamicPseudoType), + ``, + }, + "literal sensitive": { + &tfe.Variable{ + Key: "test", + HCL: false, + Sensitive: true, + }, + cty.UnknownVal(cty.String), + ``, + }, + "HCL sensitive": { + &tfe.Variable{ + Key: "test", + HCL: true, + Sensitive: true, + }, + cty.DynamicVal, + ``, + }, + "HCL computation": { + // This (stored expressions containing computation) is not a case + // we intentionally supported, but it became possible for remote + // operations in Terraform 0.12 (due to Terraform Cloud/Enterprise + // just writing the HCL verbatim into generated `.tfvars` files). + // We support it here for consistency, and we continue to support + // it in both places for backward-compatibility. In practice, + // there's little reason to do computation in a stored variable + // value because references are not supported. + &tfe.Variable{ + Key: "test", + Value: `[for v in ["a"] : v]`, + HCL: true, + Sensitive: false, + }, + cty.TupleVal([]cty.Value{cty.StringVal("a")}), + ``, + }, + "HCL syntax error": { + &tfe.Variable{ + Key: "test", + Value: `[`, + HCL: true, + Sensitive: false, + }, + cty.DynamicVal, + `Invalid expression for var.test: The value of variable "test" is marked in the remote workspace as being specified in HCL syntax, but the given value is not valid HCL. Stored variable values must be valid literal expressions and may not contain references to other variables or calls to functions.`, + }, + "HCL with references": { + &tfe.Variable{ + Key: "test", + Value: `foo.bar`, + HCL: true, + Sensitive: false, + }, + cty.DynamicVal, + `Invalid expression for var.test: The value of variable "test" is marked in the remote workspace as being specified in HCL syntax, but the given value is not valid HCL. Stored variable values must be valid literal expressions and may not contain references to other variables or calls to functions.`, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + v := &remoteStoredVariableValue{ + definition: test.Def, + } + // This ParseVariableValue implementation ignores the parsing mode, + // so we'll just always parse literal here. (The parsing mode is + // selected by the remote server, not by our local configuration.) + gotIV, diags := v.ParseVariableValue(configs.VariableParseLiteral) + if test.WantError != "" { + if !diags.HasErrors() { + t.Fatalf("missing expected error\ngot: \nwant: %s", test.WantError) + } + errStr := diags.Err().Error() + if errStr != test.WantError { + t.Fatalf("wrong error\ngot: %s\nwant: %s", errStr, test.WantError) + } + } else { + if diags.HasErrors() { + t.Fatalf("unexpected error\ngot: %s\nwant: ", diags.Err().Error()) + } + got := gotIV.Value + if !test.Want.RawEquals(got) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + } + }) + } +} + +func TestRemoteContextWithVars(t *testing.T) { + catTerraform := tfe.CategoryTerraform + catEnv := tfe.CategoryEnv + + tests := map[string]struct { + Opts *tfe.VariableCreateOptions + WantError string + }{ + "Terraform variable": { + &tfe.VariableCreateOptions{ + Category: &catTerraform, + }, + `Value for undeclared variable: A variable named "key" was assigned a value, but the root module does not declare a variable of that name. To use this value, add a "variable" block to the configuration.`, + }, + "environment variable": { + &tfe.VariableCreateOptions{ + Category: &catEnv, + }, + ``, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + configDir := "./testdata/empty" + + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + _, configLoader, configCleanup := initwd.MustLoadConfigForTests(t, configDir, "tests") + defer configCleanup() + + workspaceID, err := b.getRemoteWorkspaceID(context.Background(), testBackendSingleWorkspaceName) + if err != nil { + t.Fatal(err) + } + + streams, _ := terminal.StreamsForTesting(t) + view := views.NewStateLocker(arguments.ViewHuman, views.NewView(streams)) + + op := &backend.Operation{ + ConfigDir: configDir, + ConfigLoader: configLoader, + StateLocker: clistate.NewLocker(0, view), + Workspace: testBackendSingleWorkspaceName, + } + + v := test.Opts + if v.Key == nil { + key := "key" + v.Key = &key + } + b.client.Variables.Create(context.TODO(), workspaceID, *v) + + _, _, diags := b.LocalRun(op) + + if test.WantError != "" { + if !diags.HasErrors() { + t.Fatalf("missing expected error\ngot: \nwant: %s", test.WantError) + } + errStr := diags.Err().Error() + if errStr != test.WantError { + t.Fatalf("wrong error\ngot: %s\nwant: %s", errStr, test.WantError) + } + // When Context() returns an error, it should unlock the state, + // so re-locking it is expected to succeed. + stateMgr, _ := b.StateMgr(testBackendSingleWorkspaceName) + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { + t.Fatalf("unexpected error locking state: %s", err.Error()) + } + } else { + if diags.HasErrors() { + t.Fatalf("unexpected error\ngot: %s\nwant: ", diags.Err().Error()) + } + // When Context() succeeds, this should fail w/ "workspace already locked" + stateMgr, _ := b.StateMgr(testBackendSingleWorkspaceName) + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err == nil { + t.Fatal("unexpected success locking state after Context") + } + } + }) + } +} + +func TestRemoteVariablesDoNotOverride(t *testing.T) { + catTerraform := tfe.CategoryTerraform + + varName1 := "key1" + varName2 := "key2" + varName3 := "key3" + + varValue1 := "value1" + varValue2 := "value2" + varValue3 := "value3" + + tests := map[string]struct { + localVariables map[string]backend.UnparsedVariableValue + remoteVariables []*tfe.VariableCreateOptions + expectedVariables tofu.InputValues + }{ + "no local variables": { + map[string]backend.UnparsedVariableValue{}, + []*tfe.VariableCreateOptions{ + { + Key: &varName1, + Value: &varValue1, + Category: &catTerraform, + }, + { + Key: &varName2, + Value: &varValue2, + Category: &catTerraform, + }, + { + Key: &varName3, + Value: &varValue3, + Category: &catTerraform, + }, + }, + tofu.InputValues{ + varName1: &tofu.InputValue{ + Value: cty.StringVal(varValue1), + SourceType: tofu.ValueFromInput, + SourceRange: tfdiags.SourceRange{ + Filename: "", + Start: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + End: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + }, + }, + varName2: &tofu.InputValue{ + Value: cty.StringVal(varValue2), + SourceType: tofu.ValueFromInput, + SourceRange: tfdiags.SourceRange{ + Filename: "", + Start: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + End: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + }, + }, + varName3: &tofu.InputValue{ + Value: cty.StringVal(varValue3), + SourceType: tofu.ValueFromInput, + SourceRange: tfdiags.SourceRange{ + Filename: "", + Start: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + End: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + }, + }, + }, + }, + "single conflicting local variable": { + map[string]backend.UnparsedVariableValue{ + varName3: testUnparsedVariableValue{source: tofu.ValueFromNamedFile, value: cty.StringVal(varValue3)}, + }, + []*tfe.VariableCreateOptions{ + { + Key: &varName1, + Value: &varValue1, + Category: &catTerraform, + }, { + Key: &varName2, + Value: &varValue2, + Category: &catTerraform, + }, { + Key: &varName3, + Value: &varValue3, + Category: &catTerraform, + }, + }, + tofu.InputValues{ + varName1: &tofu.InputValue{ + Value: cty.StringVal(varValue1), + SourceType: tofu.ValueFromInput, + SourceRange: tfdiags.SourceRange{ + Filename: "", + Start: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + End: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + }, + }, + varName2: &tofu.InputValue{ + Value: cty.StringVal(varValue2), + SourceType: tofu.ValueFromInput, + SourceRange: tfdiags.SourceRange{ + Filename: "", + Start: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + End: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + }, + }, + varName3: &tofu.InputValue{ + Value: cty.StringVal(varValue3), + SourceType: tofu.ValueFromNamedFile, + SourceRange: tfdiags.SourceRange{ + Filename: "fake.tfvars", + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + }, + }, + }, + }, + "no conflicting local variable": { + map[string]backend.UnparsedVariableValue{ + varName3: testUnparsedVariableValue{source: tofu.ValueFromNamedFile, value: cty.StringVal(varValue3)}, + }, + []*tfe.VariableCreateOptions{ + { + Key: &varName1, + Value: &varValue1, + Category: &catTerraform, + }, { + Key: &varName2, + Value: &varValue2, + Category: &catTerraform, + }, + }, + tofu.InputValues{ + varName1: &tofu.InputValue{ + Value: cty.StringVal(varValue1), + SourceType: tofu.ValueFromInput, + SourceRange: tfdiags.SourceRange{ + Filename: "", + Start: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + End: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + }, + }, + varName2: &tofu.InputValue{ + Value: cty.StringVal(varValue2), + SourceType: tofu.ValueFromInput, + SourceRange: tfdiags.SourceRange{ + Filename: "", + Start: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + End: tfdiags.SourcePos{Line: 0, Column: 0, Byte: 0}, + }, + }, + varName3: &tofu.InputValue{ + Value: cty.StringVal(varValue3), + SourceType: tofu.ValueFromNamedFile, + SourceRange: tfdiags.SourceRange{ + Filename: "fake.tfvars", + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + }, + }, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + configDir := "./testdata/variables" + + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + _, configLoader, configCleanup := initwd.MustLoadConfigForTests(t, configDir, "tests") + defer configCleanup() + + workspaceID, err := b.getRemoteWorkspaceID(context.Background(), testBackendSingleWorkspaceName) + if err != nil { + t.Fatal(err) + } + + streams, _ := terminal.StreamsForTesting(t) + view := views.NewStateLocker(arguments.ViewHuman, views.NewView(streams)) + + op := &backend.Operation{ + ConfigDir: configDir, + ConfigLoader: configLoader, + StateLocker: clistate.NewLocker(0, view), + Workspace: testBackendSingleWorkspaceName, + Variables: test.localVariables, + } + + for _, v := range test.remoteVariables { + b.client.Variables.Create(context.TODO(), workspaceID, *v) + } + + lr, _, diags := b.LocalRun(op) + + if diags.HasErrors() { + t.Fatalf("unexpected error\ngot: %s\nwant: ", diags.Err().Error()) + } + // When Context() succeeds, this should fail w/ "workspace already locked" + stateMgr, _ := b.StateMgr(testBackendSingleWorkspaceName) + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err == nil { + t.Fatal("unexpected success locking state after Context") + } + + actual := lr.PlanOpts.SetVariables + expected := test.expectedVariables + + for expectedKey := range expected { + actualValue := actual[expectedKey] + expectedValue := expected[expectedKey] + + if !reflect.DeepEqual(*actualValue, *expectedValue) { + t.Fatalf("unexpected variable '%s'\ngot: %v\nwant: %v", expectedKey, actualValue, expectedValue) + } + } + }) + } +} diff --git a/pkg/cloud/backend_plan.go b/pkg/cloud/backend_plan.go new file mode 100644 index 00000000000..f5ecad65aca --- /dev/null +++ b/pkg/cloud/backend_plan.go @@ -0,0 +1,640 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cloud + +import ( + "bufio" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "os" + "path/filepath" + "strconv" + "strings" + "syscall" + "time" + + tfe "github.com/hashicorp/go-tfe" + version "github.com/hashicorp/go-version" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/cloud/cloudplan" + "github.com/kubegems/opentofu/pkg/command/jsonformat" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/genconfig" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +var planConfigurationVersionsPollInterval = 500 * time.Millisecond + +func (b *Cloud) opPlan(stopCtx, cancelCtx context.Context, op *backend.Operation, w *tfe.Workspace) (*tfe.Run, error) { + log.Printf("[INFO] cloud: starting Plan operation") + + var diags tfdiags.Diagnostics + + if !w.Permissions.CanQueueRun { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Insufficient rights to generate a plan", + "The provided credentials have insufficient rights to generate a plan. In order "+ + "to generate plans, at least plan permissions on the workspace are required.", + )) + return nil, diags.Err() + } + + if b.ContextOpts != nil && b.ContextOpts.Parallelism != defaultParallelism { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Custom parallelism values are currently not supported", + `Cloud backend does not support setting a custom parallelism `+ + `value at this time.`, + )) + } + + if op.PlanFile != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Displaying a saved plan is currently not supported", + `Cloud backend currently requires configuration to be present and `+ + `does not accept an existing saved plan as an argument at this time.`, + )) + } + + if !op.HasConfig() && op.PlanMode != plans.DestroyMode { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "No configuration files found", + `Plan requires configuration to be present. Planning without a configuration `+ + `would mark everything for destruction, which is normally not what is desired. `+ + `If you would like to destroy everything, please run plan with the "-destroy" `+ + `flag or create a single empty configuration file. Otherwise, please create `+ + `a OpenTofu configuration file in the path being executed and try again.`, + )) + } + + if len(op.GenerateConfigOut) > 0 { + diags = diags.Append(genconfig.ValidateTargetFile(op.GenerateConfigOut)) + } + + // Return if there are any errors. + if diags.HasErrors() { + return nil, diags.Err() + } + + // If the run errored, exit before checking whether to save a plan file + run, err := b.plan(stopCtx, cancelCtx, op, w) + if err != nil { + return nil, err + } + + // Save plan file if -out was specified + if op.PlanOutPath != "" { + bookmark := cloudplan.NewSavedPlanBookmark(run.ID, b.hostname) + err = bookmark.Save(op.PlanOutPath) + if err != nil { + return nil, err + } + } + + // Everything succeded, so display next steps + op.View.PlanNextStep(op.PlanOutPath, op.GenerateConfigOut) + + return run, nil +} + +func (b *Cloud) plan(stopCtx, cancelCtx context.Context, op *backend.Operation, w *tfe.Workspace) (*tfe.Run, error) { + if b.CLI != nil { + header := planDefaultHeader + if op.Type == backend.OperationTypeApply || op.Type == backend.OperationTypeRefresh { + header = applyDefaultHeader + } + b.CLI.Output(b.Colorize().Color(strings.TrimSpace(header) + "\n")) + } + + // Plan-only means they ran tofu plan without -out. + provisional := op.PlanOutPath != "" + planOnly := op.Type == backend.OperationTypePlan && !provisional + + configOptions := tfe.ConfigurationVersionCreateOptions{ + AutoQueueRuns: tfe.Bool(false), + Speculative: tfe.Bool(planOnly), + Provisional: tfe.Bool(provisional), + } + + cv, err := b.client.ConfigurationVersions.Create(stopCtx, w.ID, configOptions) + if err != nil { + return nil, generalError("Failed to create configuration version", err) + } + + var configDir string + if op.ConfigDir != "" { + // De-normalize the configuration directory path. + configDir, err = filepath.Abs(op.ConfigDir) + if err != nil { + return nil, generalError( + "Failed to get absolute path of the configuration directory: %v", err) + } + + // Make sure to take the working directory into account by removing + // the working directory from the current path. This will result in + // a path that points to the expected root of the workspace. + configDir = filepath.Clean(strings.TrimSuffix( + filepath.Clean(configDir), + filepath.Clean(w.WorkingDirectory), + )) + + // If the workspace has a subdirectory as its working directory then + // our configDir will be some parent directory of the current working + // directory. Users are likely to find that surprising, so we'll + // produce an explicit message about it to be transparent about what + // we are doing and why. + if w.WorkingDirectory != "" && filepath.Base(configDir) != w.WorkingDirectory { + if b.CLI != nil { + b.CLI.Output(fmt.Sprintf(strings.TrimSpace(` +The remote workspace is configured to work with configuration at +%s relative to the target repository. + +OpenTofu will upload the contents of the following directory, +excluding files or directories as defined by a .terraformignore file +at %s/.terraformignore (if it is present), +in order to capture the filesystem context the remote workspace expects: + %s +`), w.WorkingDirectory, configDir, configDir) + "\n") + } + } + + } else { + // We did a check earlier to make sure we either have a config dir, + // or the plan is run with -destroy. So this else clause will only + // be executed when we are destroying and doesn't need the config. + configDir, err = os.MkdirTemp("", "tf") + if err != nil { + return nil, generalError("Failed to create temporary directory", err) + } + defer os.RemoveAll(configDir) + + // Make sure the configured working directory exists. + err = os.MkdirAll(filepath.Join(configDir, w.WorkingDirectory), 0700) + if err != nil { + return nil, generalError( + "Failed to create temporary working directory", err) + } + } + + err = b.client.ConfigurationVersions.Upload(stopCtx, cv.UploadURL, configDir) + if err != nil { + return nil, generalError("Failed to upload configuration files", err) + } + + uploaded := false + for i := 0; i < 60 && !uploaded; i++ { + select { + case <-stopCtx.Done(): + return nil, context.Canceled + case <-cancelCtx.Done(): + return nil, context.Canceled + case <-time.After(planConfigurationVersionsPollInterval): + cv, err = b.client.ConfigurationVersions.Read(stopCtx, cv.ID) + if err != nil { + return nil, generalError("Failed to retrieve configuration version", err) + } + + if cv.Status == tfe.ConfigurationUploaded { + uploaded = true + } + } + } + + if !uploaded { + return nil, generalError( + "Failed to upload configuration files", errors.New("operation timed out")) + } + + runOptions := tfe.RunCreateOptions{ + ConfigurationVersion: cv, + Refresh: tfe.Bool(op.PlanRefresh), + Workspace: w, + AutoApply: tfe.Bool(op.AutoApprove), + SavePlan: tfe.Bool(op.PlanOutPath != ""), + } + + switch op.PlanMode { + case plans.NormalMode: + // okay, but we don't need to do anything special for this + case plans.RefreshOnlyMode: + runOptions.RefreshOnly = tfe.Bool(true) + case plans.DestroyMode: + runOptions.IsDestroy = tfe.Bool(true) + default: + // Shouldn't get here because we should update this for each new + // plan mode we add, mapping it to the corresponding RunCreateOptions + // field. + return nil, generalError( + "Invalid plan mode", + fmt.Errorf("Cloud backend doesn't support %s", op.PlanMode), + ) + } + + if len(op.Targets) != 0 { + runOptions.TargetAddrs = make([]string, 0, len(op.Targets)) + for _, addr := range op.Targets { + runOptions.TargetAddrs = append(runOptions.TargetAddrs, addr.String()) + } + } + + if len(op.ForceReplace) != 0 { + runOptions.ReplaceAddrs = make([]string, 0, len(op.ForceReplace)) + for _, addr := range op.ForceReplace { + runOptions.ReplaceAddrs = append(runOptions.ReplaceAddrs, addr.String()) + } + } + + config, _, configDiags := op.ConfigLoader.LoadConfigWithSnapshot(op.ConfigDir, op.RootCall) + if configDiags.HasErrors() { + return nil, fmt.Errorf("error loading config with snapshot: %w", configDiags.Errs()[0]) + } + + variables, varDiags := ParseCloudRunVariables(op.Variables, config.Module.Variables) + + if varDiags.HasErrors() { + return nil, varDiags.Err() + } + + runVariables := make([]*tfe.RunVariable, 0, len(variables)) + for name, value := range variables { + runVariables = append(runVariables, &tfe.RunVariable{ + Key: name, + Value: value, + }) + } + runOptions.Variables = runVariables + + if len(op.GenerateConfigOut) > 0 { + runOptions.AllowConfigGeneration = tfe.Bool(true) + } + + r, err := b.client.Runs.Create(stopCtx, runOptions) + if err != nil { + return r, generalError("Failed to create run", err) + } + + // When the lock timeout is set, if the run is still pending and + // cancellable after that period, we attempt to cancel it. + if lockTimeout := op.StateLocker.Timeout(); lockTimeout > 0 { + go func() { + select { + case <-stopCtx.Done(): + return + case <-cancelCtx.Done(): + return + case <-time.After(lockTimeout): + // Retrieve the run to get its current status. + r, err := b.client.Runs.Read(cancelCtx, r.ID) + if err != nil { + log.Printf("[ERROR] error reading run: %v", err) + return + } + + if r.Status == tfe.RunPending && r.Actions.IsCancelable { + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color(strings.TrimSpace(lockTimeoutErr))) + } + + // We abuse the auto aprove flag to indicate that we do not + // want to ask if the remote operation should be canceled. + op.AutoApprove = true + + p, err := os.FindProcess(os.Getpid()) + if err != nil { + log.Printf("[ERROR] error searching process ID: %v", err) + return + } + p.Signal(syscall.SIGINT) + } + } + }() + } + + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color(strings.TrimSpace(fmt.Sprintf( + runHeader, b.hostname, b.organization, op.Workspace, r.ID)) + "\n")) + } + + // Render any warnings that were raised during run creation + if err := b.renderRunWarnings(stopCtx, b.client, r.ID); err != nil { + return r, err + } + + // Retrieve the run to get task stages. + // Task Stages are calculated upfront so we only need to call this once for the run. + taskStages, err := b.runTaskStages(stopCtx, b.client, r.ID) + if err != nil { + return r, err + } + + if stage, ok := taskStages[tfe.PrePlan]; ok { + if err := b.waitTaskStage(stopCtx, cancelCtx, op, r, stage.ID, "Pre-plan Tasks"); err != nil { + return r, err + } + } + + r, err = b.waitForRun(stopCtx, cancelCtx, op, "plan", r, w) + if err != nil { + return r, err + } + + err = b.renderPlanLogs(stopCtx, op, r) + if err != nil { + return r, err + } + + // Retrieve the run to get its current status. + r, err = b.client.Runs.Read(stopCtx, r.ID) + if err != nil { + return r, generalError("Failed to retrieve run", err) + } + + // If the run is canceled or errored, we still continue to the + // cost-estimation and policy check phases to ensure we render any + // results available. In the case of a hard-failed policy check, the + // status of the run will be "errored", but there is still policy + // information which should be shown. + + if stage, ok := taskStages[tfe.PostPlan]; ok { + if err := b.waitTaskStage(stopCtx, cancelCtx, op, r, stage.ID, "Post-plan Tasks"); err != nil { + return r, err + } + } + + // Show any cost estimation output. + if r.CostEstimate != nil { + err = b.costEstimate(stopCtx, cancelCtx, op, r) + if err != nil { + return r, err + } + } + + // Check any configured sentinel policies. + if len(r.PolicyChecks) > 0 { + err = b.checkPolicy(stopCtx, cancelCtx, op, r) + if err != nil { + return r, err + } + } + + return r, nil +} + +// AssertImportCompatible errors if the user is attempting to use configuration- +// driven import and the version of the agent or API is too low to support it. +func (b *Cloud) AssertImportCompatible(config *configs.Config) error { + // Check TFC_RUN_ID is populated, indicating we are running in a remote TFC + // execution environment. + if len(config.Module.Import) > 0 && os.Getenv("TFC_RUN_ID") != "" { + // First, check the remote API version is high enough. + currentAPIVersion, err := version.NewVersion(b.client.RemoteAPIVersion()) + if err != nil { + return fmt.Errorf("Error parsing remote API version. To proceed, please remove any import blocks from your config. Please report the following error to the OpenTofu team: %w", err) + } + desiredAPIVersion, _ := version.NewVersion("2.6") + if currentAPIVersion.LessThan(desiredAPIVersion) { + return fmt.Errorf("Import blocks are not supported in this version of the cloud backend. Please remove any import blocks from your config or upgrade the cloud backend.") + } + + // Second, check the agent version is high enough. + agentEnv, isSet := os.LookupEnv("TFC_AGENT_VERSION") + if !isSet { + return fmt.Errorf("Error reading TFC agent version. To proceed, please remove any import blocks from your config. Please report the following error to the OpenTofu team: TFC_AGENT_VERSION not present.") + } + currentAgentVersion, err := version.NewVersion(agentEnv) + if err != nil { + return fmt.Errorf("Error parsing TFC agent version. To proceed, please remove any import blocks from your config. Please report the following error to the OpenTofu team: %w", err) + } + desiredAgentVersion, _ := version.NewVersion("1.10") + if currentAgentVersion.LessThan(desiredAgentVersion) { + return fmt.Errorf("Import blocks are not supported in this version of the cloud backend Agent. You are using agent version %s, but this feature requires version %s. Please remove any import blocks from your config or upgrade your agent.", currentAgentVersion, desiredAgentVersion) + } + } + return nil +} + +// renderPlanLogs reads the streamed plan JSON logs and calls the JSON Plan renderer (jsonformat.RenderPlan) to +// render the plan output. The plan output is fetched from the redacted output endpoint. +func (b *Cloud) renderPlanLogs(ctx context.Context, op *backend.Operation, run *tfe.Run) error { + logs, err := b.client.Plans.Logs(ctx, run.Plan.ID) + if err != nil { + return err + } + + if b.CLI != nil { + reader := bufio.NewReaderSize(logs, 64*1024) + + for next := true; next; { + var l, line []byte + var err error + + for isPrefix := true; isPrefix; { + l, isPrefix, err = reader.ReadLine() + if err != nil { + if err != io.EOF { + return generalError("Failed to read logs", err) + } + next = false + } + + line = append(line, l...) + } + + if next || len(line) > 0 { + log := &jsonformat.JSONLog{} + if err := json.Unmarshal(line, log); err != nil { + // If we can not parse the line as JSON, we will simply + // print the line. This maintains backwards compatibility for + // users who do not wish to enable structured output in their + // workspace. + b.CLI.Output(string(line)) + continue + } + + // We will ignore plan output, change summary or outputs logs + // during the plan phase. + if log.Type == jsonformat.LogOutputs || + log.Type == jsonformat.LogChangeSummary || + log.Type == jsonformat.LogPlannedChange { + continue + } + + if b.renderer != nil { + // Otherwise, we will print the log + err := b.renderer.RenderLog(log) + if err != nil { + return err + } + } + } + } + } + + // Get the run's current status and include the workspace and plan. We will check if + // the run has errored, if structured output is enabled, and if the plan + run, err = b.client.Runs.ReadWithOptions(ctx, run.ID, &tfe.RunReadOptions{ + Include: []tfe.RunIncludeOpt{tfe.RunWorkspace, tfe.RunPlan}, + }) + if err != nil { + return err + } + + // If the run was errored, canceled, or discarded we will not resume the rest + // of this logic and attempt to render the plan, except in certain special circumstances + // where the plan errored but successfully generated configuration during an + // import operation. In that case, we need to keep going so we can load the JSON plan + // and use it to write the generated config to the specified output file. + shouldGenerateConfig := shouldGenerateConfig(op.GenerateConfigOut, run) + shouldRenderPlan := shouldRenderPlan(run) + if !shouldRenderPlan && !shouldGenerateConfig { + // We won't return an error here since we need to resume the logic that + // follows after rendering the logs (run tasks, cost estimation, etc.) + return nil + } + + // Fetch the redacted JSON plan if we need it for either rendering the plan + // or writing out generated configuration. + var redactedPlan *jsonformat.Plan + renderSRO, err := b.shouldRenderStructuredRunOutput(run) + if err != nil { + return err + } + if renderSRO || shouldGenerateConfig { + jsonBytes, err := readRedactedPlan(ctx, b.client.BaseURL(), b.token, run.Plan.ID) + if err != nil { + return generalError("Failed to read JSON plan", err) + } + redactedPlan, err = decodeRedactedPlan(jsonBytes) + if err != nil { + return generalError("Failed to decode JSON plan", err) + } + } + + // Write any generated config before rendering the plan, so we can stop in case of errors + if shouldGenerateConfig { + diags := maybeWriteGeneratedConfig(redactedPlan, op.GenerateConfigOut) + if diags.HasErrors() { + return diags.Err() + } + } + + // Only generate the human readable output from the plan if structured run output is + // enabled. Otherwise we risk duplicate plan output since plan output may also be + // shown in the streamed logs. + if shouldRenderPlan && renderSRO { + b.renderer.RenderHumanPlan(*redactedPlan, op.PlanMode) + } + + return nil +} + +// maybeWriteGeneratedConfig attempts to write any generated configuration from the JSON plan +// to the specified output file, if generated configuration exists and the correct flag was +// passed to the plan command. +func maybeWriteGeneratedConfig(plan *jsonformat.Plan, out string) (diags tfdiags.Diagnostics) { + if genconfig.ShouldWriteConfig(out) { + diags := genconfig.ValidateTargetFile(out) + if diags.HasErrors() { + return diags + } + + var writer io.Writer + for _, c := range plan.ResourceChanges { + change := genconfig.Change{ + Addr: c.Address, + GeneratedConfig: c.Change.GeneratedConfig, + } + if c.Change.Importing != nil { + change.ImportID = c.Change.Importing.ID + } + + var moreDiags tfdiags.Diagnostics + writer, _, moreDiags = change.MaybeWriteConfig(writer, out) + if moreDiags.HasErrors() { + return diags.Append(moreDiags) + } + } + } + + return diags +} + +// shouldRenderStructuredRunOutput ensures the remote workspace has structured +// run output enabled and, if using Terraform Enterprise, ensures it is a release +// that supports enabling SRO for CLI-driven runs. The plan output will have +// already been rendered when the logs were read if this wasn't the case. +func (b *Cloud) shouldRenderStructuredRunOutput(run *tfe.Run) (bool, error) { + if b.renderer == nil || !run.Workspace.StructuredRunOutputEnabled { + return false, nil + } + + // If the cloud backend is configured against TFC, we only require that + // the workspace has structured run output enabled. + if b.client.IsCloud() && run.Workspace.StructuredRunOutputEnabled { + return true, nil + } + + // If the cloud backend is configured against TFE, ensure the release version + // supports enabling SRO for CLI runs. + if b.client.IsEnterprise() { + tfeVersion := b.client.RemoteTFEVersion() + if tfeVersion != "" { + v := strings.Split(tfeVersion[1:], "-") + releaseDate, err := strconv.Atoi(v[0]) + if err != nil { + return false, err + } + + // Any release older than 202302-1 will not support enabling SRO for + // CLI-driven runs + if releaseDate < 202302 { + return false, nil + } else if run.Workspace.StructuredRunOutputEnabled { + return true, nil + } + } + } + + // Version of TFE is unknowable + return false, nil +} + +func shouldRenderPlan(run *tfe.Run) bool { + return !(run.Status == tfe.RunErrored || run.Status == tfe.RunCanceled || + run.Status == tfe.RunDiscarded) +} + +func shouldGenerateConfig(out string, run *tfe.Run) bool { + return (run.Plan.Status == tfe.PlanErrored || run.Plan.Status == tfe.PlanFinished) && + run.Plan.GeneratedConfiguration && len(out) > 0 +} + +const planDefaultHeader = ` +[reset][yellow]Running plan in cloud backend. Output will stream here. Pressing Ctrl-C +will stop streaming the logs, but will not stop the plan running remotely.[reset] + +Preparing the remote plan... +` + +const runHeader = ` +[reset][yellow]To view this run in a browser, visit: +https://%s/app/%s/%s/runs/%s[reset] +` + +// The newline in this error is to make it look good in the CLI! +const lockTimeoutErr = ` +[reset][red]Lock timeout exceeded, sending interrupt to cancel the remote operation. +[reset] +` diff --git a/pkg/cloud/backend_plan_test.go b/pkg/cloud/backend_plan_test.go new file mode 100644 index 00000000000..c1fea299228 --- /dev/null +++ b/pkg/cloud/backend_plan_test.go @@ -0,0 +1,1548 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cloud + +import ( + "context" + "net/http" + "os" + "os/signal" + "path/filepath" + "strings" + "syscall" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + tfe "github.com/hashicorp/go-tfe" + "github.com/mitchellh/cli" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/cloud/cloudplan" + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/command/clistate" + "github.com/kubegems/opentofu/pkg/command/jsonformat" + "github.com/kubegems/opentofu/pkg/command/views" + "github.com/kubegems/opentofu/pkg/depsfile" + "github.com/kubegems/opentofu/pkg/initwd" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/plans/planfile" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "github.com/kubegems/opentofu/pkg/terminal" + "github.com/kubegems/opentofu/pkg/tofu" +) + +func testOperationPlan(t *testing.T, configDir string) (*backend.Operation, func(), func(*testing.T) *terminal.TestOutput) { + t.Helper() + + return testOperationPlanWithTimeout(t, configDir, 0) +} + +func testOperationPlanWithTimeout(t *testing.T, configDir string, timeout time.Duration) (*backend.Operation, func(), func(*testing.T) *terminal.TestOutput) { + t.Helper() + + _, configLoader, configCleanup := initwd.MustLoadConfigForTests(t, configDir, "tests") + + streams, done := terminal.StreamsForTesting(t) + view := views.NewView(streams) + stateLockerView := views.NewStateLocker(arguments.ViewHuman, view) + operationView := views.NewOperation(arguments.ViewHuman, false, view) + + // Many of our tests use an overridden "null" provider that's just in-memory + // inside the test process, not a separate plugin on disk. + depLocks := depsfile.NewLocks() + depLocks.SetProviderOverridden(addrs.MustParseProviderSourceString("registry.opentofu.org/hashicorp/null")) + + return &backend.Operation{ + ConfigDir: configDir, + ConfigLoader: configLoader, + PlanRefresh: true, + StateLocker: clistate.NewLocker(timeout, stateLockerView), + Type: backend.OperationTypePlan, + View: operationView, + DependencyLocks: depLocks, + }, configCleanup, done +} + +func TestCloud_planBasic(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatal("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in cloud backend") { + t.Fatalf("expected TFC header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } + + stateMgr, _ := b.StateMgr(testBackendSingleWorkspaceName) + // An error suggests that the state was not unlocked after the operation finished + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { + t.Fatalf("unexpected error locking state after successful plan: %s", err.Error()) + } +} + +func TestCloud_planJSONBasic(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + stream, close := terminal.StreamsForTesting(t) + + b.renderer = &jsonformat.Renderer{ + Streams: stream, + Colorize: mockColorize(), + } + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-json-basic") + defer configCleanup() + defer done(t) + + op.Workspace = testBackendSingleWorkspaceName + + mockSROWorkspace(t, b, op.Workspace) + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatal("expected a non-empty plan") + } + + outp := close(t) + gotOut := outp.Stdout() + + if !strings.Contains(gotOut, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", gotOut) + } + + stateMgr, _ := b.StateMgr(testBackendSingleWorkspaceName) + // An error suggests that the state was not unlocked after the operation finished + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { + t.Fatalf("unexpected error locking state after successful plan: %s", err.Error()) + } +} + +func TestCloud_planCanceled(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + // Stop the run to simulate a Ctrl-C. + run.Stop() + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + + stateMgr, _ := b.StateMgr(testBackendSingleWorkspaceName) + // An error suggests that the state was not unlocked after the operation finished + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { + t.Fatalf("unexpected error locking state after cancelled plan: %s", err.Error()) + } +} + +func TestCloud_planLongLine(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-long-line") + defer configCleanup() + defer done(t) + + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatal("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in cloud backend") { + t.Fatalf("expected TFC header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } +} + +func TestCloud_planJSONFull(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + stream, close := terminal.StreamsForTesting(t) + + b.renderer = &jsonformat.Renderer{ + Streams: stream, + Colorize: mockColorize(), + } + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-json-full") + defer configCleanup() + defer done(t) + + op.Workspace = testBackendSingleWorkspaceName + + mockSROWorkspace(t, b, op.Workspace) + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatal("expected a non-empty plan") + } + + outp := close(t) + gotOut := outp.Stdout() + + if !strings.Contains(gotOut, "tfcoremock_simple_resource.example: Refreshing state... [id=my-simple-resource]") { + t.Fatalf("expected plan log: %s", gotOut) + } + + if !strings.Contains(gotOut, "2 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", gotOut) + } + + stateMgr, _ := b.StateMgr(testBackendSingleWorkspaceName) + // An error suggests that the state was not unlocked after the operation finished + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { + t.Fatalf("unexpected error locking state after successful plan: %s", err.Error()) + } +} + +func TestCloud_planWithoutPermissions(t *testing.T) { + b, bCleanup := testBackendWithTags(t) + defer bCleanup() + + // Create a named workspace without permissions. + w, err := b.client.Workspaces.Create( + context.Background(), + b.organization, + tfe.WorkspaceCreateOptions{ + Name: tfe.String("prod"), + }, + ) + if err != nil { + t.Fatalf("error creating named workspace: %v", err) + } + w.Permissions.CanQueueRun = false + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + + op.Workspace = "prod" + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "Insufficient rights to generate a plan") { + t.Fatalf("expected a permissions error, got: %v", errOutput) + } +} + +func TestCloud_planWithParallelism(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + + if b.ContextOpts == nil { + b.ContextOpts = &tofu.ContextOpts{} + } + b.ContextOpts.Parallelism = 3 + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "parallelism values are currently not supported") { + t.Fatalf("expected a parallelism error, got: %v", errOutput) + } +} + +func TestCloud_planWithPlan(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + + op.PlanFile = planfile.NewWrappedLocal(&planfile.Reader{}) + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "saved plan is currently not supported") { + t.Fatalf("expected a saved plan error, got: %v", errOutput) + } +} + +func TestCloud_planWithPath(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + tmpDir := t.TempDir() + pfPath := tmpDir + "/plan.tfplan" + op.PlanOutPath = pfPath + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatal("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in cloud backend") { + t.Fatalf("expected TFC header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } + + plan, err := cloudplan.LoadSavedPlanBookmark(pfPath) + if err != nil { + t.Fatalf("error loading cloud plan file: %v", err) + } + if !strings.Contains(plan.RunID, "run-") || plan.Hostname != tfeHost { + t.Fatalf("unexpected contents in saved cloud plan: %v", plan) + } + + // We should find a run inside the mock client that has a provisional, non-speculative + // configuration version + configVersionsAPI := b.client.ConfigurationVersions.(*MockConfigurationVersions) + if got, want := len(configVersionsAPI.configVersions), 1; got != want { + t.Fatalf("wrong number of configuration versions in the mock client %d; want %d", got, want) + } + for _, configVersion := range configVersionsAPI.configVersions { + if configVersion.Provisional != true { + t.Errorf("wrong Provisional setting in the created configuration version\ngot %v, expected %v", configVersion.Provisional, true) + } + + if configVersion.Speculative != false { + t.Errorf("wrong Speculative setting in the created configuration version\ngot %v, expected %v", configVersion.Speculative, false) + } + } +} + +func TestCloud_planWithoutRefresh(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + op.PlanRefresh = false + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatal("expected a non-empty plan") + } + + // We should find a run inside the mock client that has refresh set + // to false. + runsAPI := b.client.Runs.(*MockRuns) + if got, want := len(runsAPI.Runs), 1; got != want { + t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) + } + for _, run := range runsAPI.Runs { + if diff := cmp.Diff(false, run.Refresh); diff != "" { + t.Errorf("wrong Refresh setting in the created run\n%s", diff) + } + } +} + +func TestCloud_planWithRefreshOnly(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + op.PlanMode = plans.RefreshOnlyMode + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatal("expected a non-empty plan") + } + + // We should find a run inside the mock client that has refresh-only set + // to true. + runsAPI := b.client.Runs.(*MockRuns) + if got, want := len(runsAPI.Runs), 1; got != want { + t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) + } + for _, run := range runsAPI.Runs { + if diff := cmp.Diff(true, run.RefreshOnly); diff != "" { + t.Errorf("wrong RefreshOnly setting in the created run\n%s", diff) + } + } +} + +func TestCloud_planWithTarget(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + // When the backend code creates a new run, we'll tweak it so that it + // has a cost estimation object with the "skipped_due_to_targeting" status, + // emulating how a real server is expected to behave in that case. + b.client.Runs.(*MockRuns).ModifyNewRun = func(client *MockClient, options tfe.RunCreateOptions, run *tfe.Run) { + const fakeID = "fake" + // This is the cost estimate object embedded in the run itself which + // the backend will use to learn the ID to request from the cost + // estimates endpoint. It's pending to simulate what a freshly-created + // run is likely to look like. + run.CostEstimate = &tfe.CostEstimate{ + ID: fakeID, + Status: "pending", + } + // The backend will then use the main cost estimation API to retrieve + // the same ID indicated in the object above, where we'll then return + // the status "skipped_due_to_targeting" to trigger the special skip + // message in the backend output. + client.CostEstimates.Estimations[fakeID] = &tfe.CostEstimate{ + ID: fakeID, + Status: "skipped_due_to_targeting", + } + } + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + addr, _ := addrs.ParseAbsResourceStr("null_resource.foo") + + op.Targets = []addrs.Targetable{addr} + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatal("expected plan operation to succeed") + } + if run.PlanEmpty { + t.Fatalf("expected plan to be non-empty") + } + + // testBackendDefault above attached a "mock UI" to our backend, so we + // can retrieve its non-error output via the OutputWriter in-memory buffer. + gotOutput := b.CLI.(*cli.MockUi).OutputWriter.String() + if wantOutput := "Not available for this plan, because it was created with the -target option."; !strings.Contains(gotOutput, wantOutput) { + t.Errorf("missing message about skipped cost estimation\ngot:\n%s\nwant substring: %s", gotOutput, wantOutput) + } + + // We should find a run inside the mock client that has the same + // target address we requested above. + runsAPI := b.client.Runs.(*MockRuns) + if got, want := len(runsAPI.Runs), 1; got != want { + t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) + } + for _, run := range runsAPI.Runs { + if diff := cmp.Diff([]string{"null_resource.foo"}, run.TargetAddrs); diff != "" { + t.Errorf("wrong TargetAddrs in the created run\n%s", diff) + } + } +} + +func TestCloud_planWithReplace(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + addr, _ := addrs.ParseAbsResourceInstanceStr("null_resource.foo") + + op.ForceReplace = []addrs.AbsResourceInstance{addr} + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatal("expected plan operation to succeed") + } + if run.PlanEmpty { + t.Fatalf("expected plan to be non-empty") + } + + // We should find a run inside the mock client that has the same + // refresh address we requested above. + runsAPI := b.client.Runs.(*MockRuns) + if got, want := len(runsAPI.Runs), 1; got != want { + t.Fatalf("wrong number of runs in the mock client %d; want %d", got, want) + } + for _, run := range runsAPI.Runs { + if diff := cmp.Diff([]string{"null_resource.foo"}, run.ReplaceAddrs); diff != "" { + t.Errorf("wrong ReplaceAddrs in the created run\n%s", diff) + } + } +} + +func TestCloud_planWithRequiredVariables(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-variables") + defer configCleanup() + defer done(t) + + op.Variables = testVariables(tofu.ValueFromCLIArg, "foo") // "bar" variable defined in config is missing + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + // The usual error of a required variable being missing is deferred and the operation + // is successful. + if run.Result != backend.OperationSuccess { + t.Fatal("expected plan operation to succeed") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in cloud backend") { + t.Fatalf("unexpected TFC header in output: %s", output) + } +} + +func TestCloud_planNoConfig(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/empty") + defer configCleanup() + + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "configuration files found") { + t.Fatalf("expected configuration files error, got: %v", errOutput) + } +} + +func TestCloud_planNoChanges(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-no-changes") + defer configCleanup() + defer done(t) + + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "No changes. Infrastructure is up-to-date.") { + t.Fatalf("expected no changes in plan summary: %s", output) + } + if !strings.Contains(output, "Sentinel Result: true") { + t.Fatalf("expected policy check result in output: %s", output) + } +} + +func TestCloud_planForceLocal(t *testing.T) { + // Set TF_FORCE_LOCAL_BACKEND so the cloud backend will use + // the local backend with itself as embedded backend. + t.Setenv("TF_FORCE_LOCAL_BACKEND", "1") + + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + op.Workspace = testBackendSingleWorkspaceName + + streams, done := terminal.StreamsForTesting(t) + view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams)) + op.View = view + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if strings.Contains(output, "Running plan in cloud backend") { + t.Fatalf("unexpected TFC header in output: %s", output) + } + if output := done(t).Stdout(); !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } +} + +func TestCloud_planWithoutOperationsEntitlement(t *testing.T) { + b, bCleanup := testBackendNoOperations(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + op.Workspace = testBackendSingleWorkspaceName + + streams, done := terminal.StreamsForTesting(t) + view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams)) + op.View = view + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if strings.Contains(output, "Running plan in cloud backend") { + t.Fatalf("unexpected TFC header in output: %s", output) + } + if output := done(t).Stdout(); !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } +} + +func TestCloud_planWorkspaceWithoutOperations(t *testing.T) { + b, bCleanup := testBackendWithTags(t) + defer bCleanup() + + ctx := context.Background() + + // Create a named workspace that doesn't allow operations. + _, err := b.client.Workspaces.Create( + ctx, + b.organization, + tfe.WorkspaceCreateOptions{ + Name: tfe.String("no-operations"), + }, + ) + if err != nil { + t.Fatalf("error creating named workspace: %v", err) + } + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + op.Workspace = "no-operations" + + streams, done := terminal.StreamsForTesting(t) + view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams)) + op.View = view + + run, err := b.Operation(ctx, op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if strings.Contains(output, "Running plan in cloud backend") { + t.Fatalf("unexpected TFC header in output: %s", output) + } + if output := done(t).Stdout(); !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } +} + +func TestCloud_planLockTimeout(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + ctx := context.Background() + + // Retrieve the workspace used to run this operation in. + w, err := b.client.Workspaces.Read(ctx, b.organization, b.WorkspaceMapping.Name) + if err != nil { + t.Fatalf("error retrieving workspace: %v", err) + } + + // Create a new configuration version. + c, err := b.client.ConfigurationVersions.Create(ctx, w.ID, tfe.ConfigurationVersionCreateOptions{}) + if err != nil { + t.Fatalf("error creating configuration version: %v", err) + } + + // Create a pending run to block this run. + _, err = b.client.Runs.Create(ctx, tfe.RunCreateOptions{ + ConfigurationVersion: c, + Workspace: w, + }) + if err != nil { + t.Fatalf("error creating pending run: %v", err) + } + + op, configCleanup, done := testOperationPlanWithTimeout(t, "./testdata/plan", 50) + defer configCleanup() + defer done(t) + + input := testInput(t, map[string]string{ + "cancel": "yes", + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = testBackendSingleWorkspaceName + + _, err = b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + sigint := make(chan os.Signal, 1) + signal.Notify(sigint, syscall.SIGINT) + select { + case <-sigint: + // Stop redirecting SIGINT signals. + signal.Stop(sigint) + case <-time.After(200 * time.Millisecond): + t.Fatalf("expected lock timeout after 50 milliseconds, waited 200 milliseconds") + } + + if len(input.answers) != 2 { + t.Fatalf("expected unused answers, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in cloud backend") { + t.Fatalf("expected TFC header in output: %s", output) + } + if !strings.Contains(output, "Lock timeout exceeded") { + t.Fatalf("expected lock timout error in output: %s", output) + } + if strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("unexpected plan summary in output: %s", output) + } +} + +func TestCloud_planDestroy(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + op.PlanMode = plans.DestroyMode + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } +} + +func TestCloud_planDestroyNoConfig(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/empty") + defer configCleanup() + defer done(t) + + op.PlanMode = plans.DestroyMode + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } +} + +func TestCloud_planWithWorkingDirectory(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + options := tfe.WorkspaceUpdateOptions{ + WorkingDirectory: tfe.String("tofu"), + } + + // Configure the workspace to use a custom working directory. + _, err := b.client.Workspaces.Update(context.Background(), b.organization, b.WorkspaceMapping.Name, options) + if err != nil { + t.Fatalf("error configuring working directory: %v", err) + } + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-with-working-directory/tofu") + defer configCleanup() + defer done(t) + + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "The remote workspace is configured to work with configuration") { + t.Fatalf("expected working directory warning: %s", output) + } + if !strings.Contains(output, "Running plan in cloud backend") { + t.Fatalf("expected TFC header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } +} + +func TestCloud_planWithWorkingDirectoryFromCurrentPath(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + options := tfe.WorkspaceUpdateOptions{ + WorkingDirectory: tfe.String("tofu"), + } + + // Configure the workspace to use a custom working directory. + _, err := b.client.Workspaces.Update(context.Background(), b.organization, b.WorkspaceMapping.Name, options) + if err != nil { + t.Fatalf("error configuring working directory: %v", err) + } + + wd, err := os.Getwd() + if err != nil { + t.Fatalf("error getting current working directory: %v", err) + } + + // We need to change into the configuration directory to make sure + // the logic to upload the correct slug is working as expected. + if err := os.Chdir("./testdata/plan-with-working-directory/tofu"); err != nil { + t.Fatalf("error changing directory: %v", err) + } + defer os.Chdir(wd) // Make sure we change back again when were done. + + // For this test we need to give our current directory instead of the + // full path to the configuration as we already changed directories. + op, configCleanup, done := testOperationPlan(t, ".") + defer configCleanup() + defer done(t) + + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in cloud backend") { + t.Fatalf("expected TFC header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } +} + +func TestCloud_planCostEstimation(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-cost-estimation") + defer configCleanup() + defer done(t) + + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in cloud backend") { + t.Fatalf("expected TFC header in output: %s", output) + } + if !strings.Contains(output, "Resources: 1 of 1 estimated") { + t.Fatalf("expected cost estimate result in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } +} + +func TestCloud_planPolicyPass(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-policy-passed") + defer configCleanup() + defer done(t) + + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in cloud backend") { + t.Fatalf("expected TFC header in output: %s", output) + } + if !strings.Contains(output, "Sentinel Result: true") { + t.Fatalf("expected policy check result in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } +} + +func TestCloud_planPolicyHardFail(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-policy-hard-failed") + defer configCleanup() + + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + viewOutput := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := viewOutput.Stderr() + if !strings.Contains(errOutput, "hard failed") { + t.Fatalf("expected a policy check error, got: %v", errOutput) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in cloud backend") { + t.Fatalf("expected TFC header in output: %s", output) + } + if !strings.Contains(output, "Sentinel Result: false") { + t.Fatalf("expected policy check result in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } +} + +func TestCloud_planPolicySoftFail(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-policy-soft-failed") + defer configCleanup() + + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + viewOutput := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := viewOutput.Stderr() + if !strings.Contains(errOutput, "soft failed") { + t.Fatalf("expected a policy check error, got: %v", errOutput) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in cloud backend") { + t.Fatalf("expected TFC header in output: %s", output) + } + if !strings.Contains(output, "Sentinel Result: false") { + t.Fatalf("expected policy check result in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", output) + } +} + +func TestCloud_planWithRemoteError(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-with-error") + defer configCleanup() + defer done(t) + + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + if run.Result.ExitStatus() != 1 { + t.Fatalf("expected exit code 1, got %d", run.Result.ExitStatus()) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in cloud backend") { + t.Fatalf("expected TFC header in output: %s", output) + } + if !strings.Contains(output, "null_resource.foo: 1 error") { + t.Fatalf("expected plan error in output: %s", output) + } +} + +func TestCloud_planJSONWithRemoteError(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + stream, close := terminal.StreamsForTesting(t) + + // Initialize the plan renderer + b.renderer = &jsonformat.Renderer{ + Streams: stream, + Colorize: mockColorize(), + } + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-json-error") + defer configCleanup() + defer done(t) + + op.Workspace = testBackendSingleWorkspaceName + + mockSROWorkspace(t, b, op.Workspace) + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + if run.Result.ExitStatus() != 1 { + t.Fatalf("expected exit code 1, got %d", run.Result.ExitStatus()) + } + + outp := close(t) + gotOut := outp.Stdout() + + if !strings.Contains(gotOut, "Unsupported block type") { + t.Fatalf("unexpected plan error in output: %s", gotOut) + } +} + +func TestCloud_planOtherError(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + op.Workspace = "network-error" // custom error response in backend_mock.go + + _, err := b.Operation(context.Background(), op) + if err == nil { + t.Errorf("expected error, got success") + } + + if !strings.Contains(err.Error(), + "Cloud backend returned an unexpected error:\n\nI'm a little teacup") { + t.Fatalf("expected error message, got: %s", err.Error()) + } +} + +func TestCloud_planImportConfigGeneration(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + stream, close := terminal.StreamsForTesting(t) + + b.renderer = &jsonformat.Renderer{ + Streams: stream, + Colorize: mockColorize(), + } + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-import-config-gen") + defer configCleanup() + defer done(t) + + genPath := filepath.Join(op.ConfigDir, "generated.tf") + op.GenerateConfigOut = genPath + defer os.Remove(genPath) + + op.Workspace = testBackendSingleWorkspaceName + + mockSROWorkspace(t, b, op.Workspace) + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatal("expected a non-empty plan") + } + outp := close(t) + gotOut := outp.Stdout() + + if !strings.Contains(gotOut, "1 to import, 0 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summary in output: %s", gotOut) + } + + stateMgr, _ := b.StateMgr(testBackendSingleWorkspaceName) + // An error suggests that the state was not unlocked after the operation finished + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { + t.Fatalf("unexpected error locking state after successful plan: %s", err.Error()) + } + + testFileEquals(t, genPath, filepath.Join(op.ConfigDir, "generated.tf.expected")) +} + +func TestCloud_planImportGenerateInvalidConfig(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + stream, close := terminal.StreamsForTesting(t) + + b.renderer = &jsonformat.Renderer{ + Streams: stream, + Colorize: mockColorize(), + } + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-import-config-gen-validation-error") + defer configCleanup() + defer done(t) + + genPath := filepath.Join(op.ConfigDir, "generated.tf") + op.GenerateConfigOut = genPath + defer os.Remove(genPath) + + op.Workspace = testBackendSingleWorkspaceName + + mockSROWorkspace(t, b, op.Workspace) + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationFailure { + t.Fatalf("expected operation to fail") + } + if run.Result.ExitStatus() != 1 { + t.Fatalf("expected exit code 1, got %d", run.Result.ExitStatus()) + } + + outp := close(t) + gotOut := outp.Stdout() + + if !strings.Contains(gotOut, "Conflicting configuration arguments") { + t.Fatalf("Expected error in output: %s", gotOut) + } + + testFileEquals(t, genPath, filepath.Join(op.ConfigDir, "generated.tf.expected")) +} + +func TestCloud_planInvalidGenConfigOutPath(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan-import-config-gen-exists") + defer configCleanup() + + genPath := filepath.Join(op.ConfigDir, "generated.tf") + op.GenerateConfigOut = genPath + + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + output := done(t) + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + + errOutput := output.Stderr() + if !strings.Contains(errOutput, "generated file already exists") { + t.Fatalf("expected configuration files error, got: %v", errOutput) + } +} + +func TestCloud_planShouldRenderSRO(t *testing.T) { + t.Run("when instance is TFC", func(t *testing.T) { + handlers := map[string]func(http.ResponseWriter, *http.Request){ + "/api/v2/ping": func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Header().Set("TFP-API-Version", "2.5") + w.Header().Set("TFP-AppName", "Terraform Cloud") + }, + } + b, bCleanup := testBackendWithHandlers(t, handlers) + t.Cleanup(bCleanup) + b.renderer = &jsonformat.Renderer{} + + t.Run("and SRO is enabled", func(t *testing.T) { + r := &tfe.Run{ + Workspace: &tfe.Workspace{ + StructuredRunOutputEnabled: true, + }, + } + assertSRORendered(t, b, r, true) + }) + + t.Run("and SRO is not enabled", func(t *testing.T) { + r := &tfe.Run{ + Workspace: &tfe.Workspace{ + StructuredRunOutputEnabled: false, + }, + } + assertSRORendered(t, b, r, false) + }) + + }) + + t.Run("when instance is TFE and version supports CLI SRO", func(t *testing.T) { + handlers := map[string]func(http.ResponseWriter, *http.Request){ + "/api/v2/ping": func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Header().Set("TFP-API-Version", "2.5") + w.Header().Set("TFP-AppName", "Terraform Enterprise") + w.Header().Set("X-TFE-Version", "v202303-1") + }, + } + b, bCleanup := testBackendWithHandlers(t, handlers) + t.Cleanup(bCleanup) + b.renderer = &jsonformat.Renderer{} + + t.Run("and SRO is enabled", func(t *testing.T) { + r := &tfe.Run{ + Workspace: &tfe.Workspace{ + StructuredRunOutputEnabled: true, + }, + } + assertSRORendered(t, b, r, true) + }) + + t.Run("and SRO is not enabled", func(t *testing.T) { + r := &tfe.Run{ + Workspace: &tfe.Workspace{ + StructuredRunOutputEnabled: false, + }, + } + assertSRORendered(t, b, r, false) + }) + }) + + t.Run("when instance is a known unsupported TFE release", func(t *testing.T) { + handlers := map[string]func(http.ResponseWriter, *http.Request){ + "/api/v2/ping": func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Header().Set("TFP-API-Version", "2.5") + w.Header().Set("TFP-AppName", "Terraform Enterprise") + w.Header().Set("X-TFE-Version", "v202208-1") + }, + } + b, bCleanup := testBackendWithHandlers(t, handlers) + t.Cleanup(bCleanup) + b.renderer = &jsonformat.Renderer{} + + r := &tfe.Run{ + Workspace: &tfe.Workspace{ + StructuredRunOutputEnabled: true, + }, + } + assertSRORendered(t, b, r, false) + }) + + t.Run("when instance is an unknown TFE release", func(t *testing.T) { + handlers := map[string]func(http.ResponseWriter, *http.Request){ + "/api/v2/ping": func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Header().Set("TFP-API-Version", "2.5") + }, + } + b, bCleanup := testBackendWithHandlers(t, handlers) + t.Cleanup(bCleanup) + b.renderer = &jsonformat.Renderer{} + + r := &tfe.Run{ + Workspace: &tfe.Workspace{ + StructuredRunOutputEnabled: true, + }, + } + assertSRORendered(t, b, r, false) + }) + +} + +func assertSRORendered(t *testing.T, b *Cloud, r *tfe.Run, shouldRender bool) { + got, err := b.shouldRenderStructuredRunOutput(r) + if err != nil { + t.Fatalf("expected no error: %v", err) + } + if shouldRender != got { + t.Fatalf("expected SRO to be rendered: %t, got %t", shouldRender, got) + } +} + +func testFileEquals(t *testing.T, got, want string) { + t.Helper() + + actual, err := os.ReadFile(got) + if err != nil { + t.Fatalf("error reading %s", got) + } + + expected, err := os.ReadFile(want) + if err != nil { + t.Fatalf("error reading %s", want) + } + + if diff := cmp.Diff(string(actual), string(expected)); len(diff) > 0 { + t.Fatalf("got:\n%s\nwant:\n%s\ndiff:\n%s", actual, expected, diff) + } +} diff --git a/pkg/cloud/backend_refresh_test.go b/pkg/cloud/backend_refresh_test.go new file mode 100644 index 00000000000..9bff9d5657e --- /dev/null +++ b/pkg/cloud/backend_refresh_test.go @@ -0,0 +1,85 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cloud + +import ( + "context" + "strings" + "testing" + "time" + + "github.com/mitchellh/cli" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/command/clistate" + "github.com/kubegems/opentofu/pkg/command/views" + "github.com/kubegems/opentofu/pkg/initwd" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "github.com/kubegems/opentofu/pkg/terminal" +) + +func testOperationRefresh(t *testing.T, configDir string) (*backend.Operation, func(), func(*testing.T) *terminal.TestOutput) { + t.Helper() + + return testOperationRefreshWithTimeout(t, configDir, 0) +} + +func testOperationRefreshWithTimeout(t *testing.T, configDir string, timeout time.Duration) (*backend.Operation, func(), func(*testing.T) *terminal.TestOutput) { + t.Helper() + + _, configLoader, configCleanup := initwd.MustLoadConfigForTests(t, configDir, "tests") + + streams, done := terminal.StreamsForTesting(t) + view := views.NewView(streams) + stateLockerView := views.NewStateLocker(arguments.ViewHuman, view) + operationView := views.NewOperation(arguments.ViewHuman, false, view) + + return &backend.Operation{ + ConfigDir: configDir, + ConfigLoader: configLoader, + PlanRefresh: true, + StateLocker: clistate.NewLocker(timeout, stateLockerView), + Type: backend.OperationTypeRefresh, + View: operationView, + }, configCleanup, done +} + +func TestCloud_refreshBasicActuallyRunsApplyRefresh(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + op, configCleanup, done := testOperationRefresh(t, "./testdata/refresh") + defer configCleanup() + defer done(t) + + op.UIOut = b.CLI + b.CLIColor = b.cliColorize() + op.PlanMode = plans.RefreshOnlyMode + op.Workspace = testBackendSingleWorkspaceName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Proceeding with 'tofu apply -refresh-only -auto-approve'") { + t.Fatalf("expected TFC header in output: %s", output) + } + + stateMgr, _ := b.StateMgr(testBackendSingleWorkspaceName) + // An error suggests that the state was not unlocked after apply + if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil { + t.Fatalf("unexpected error locking state after apply: %s", err.Error()) + } +} diff --git a/pkg/cloud/backend_run_warning.go b/pkg/cloud/backend_run_warning.go new file mode 100644 index 00000000000..187349ac15e --- /dev/null +++ b/pkg/cloud/backend_run_warning.go @@ -0,0 +1,51 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cloud + +import ( + "context" + "fmt" + "strings" + + tfe "github.com/hashicorp/go-tfe" +) + +const ( + changedPolicyEnforcementAction = "changed_policy_enforcements" + changedTaskEnforcementAction = "changed_task_enforcements" + ignoredPolicySetAction = "ignored_policy_sets" +) + +func (b *Cloud) renderRunWarnings(ctx context.Context, client *tfe.Client, runId string) error { + if b.CLI == nil { + return nil + } + + result, err := client.RunEvents.List(ctx, runId, nil) + if err != nil { + return err + } + if result == nil { + return nil + } + + // We don't have to worry about paging as the API doesn't support it yet + for _, re := range result.Items { + switch re.Action { + case changedPolicyEnforcementAction, changedTaskEnforcementAction, ignoredPolicySetAction: + if re.Description != "" { + b.CLI.Warn(b.Colorize().Color(strings.TrimSpace(fmt.Sprintf( + runWarningHeader, re.Description)) + "\n")) + } + } + } + + return nil +} + +const runWarningHeader = ` +[reset][yellow]Warning:[reset] %s +` diff --git a/pkg/cloud/backend_run_warning_test.go b/pkg/cloud/backend_run_warning_test.go new file mode 100644 index 00000000000..af687d442ec --- /dev/null +++ b/pkg/cloud/backend_run_warning_test.go @@ -0,0 +1,158 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cloud + +import ( + "context" + "strings" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/hashicorp/go-tfe" + tfemocks "github.com/hashicorp/go-tfe/mocks" + "github.com/mitchellh/cli" +) + +func MockAllRunEvents(t *testing.T, client *tfe.Client) (fullRunID string, emptyRunID string) { + ctrl := gomock.NewController(t) + + fullRunID = "run-full" + emptyRunID = "run-empty" + + mockRunEventsAPI := tfemocks.NewMockRunEvents(ctrl) + + emptyList := tfe.RunEventList{ + Items: []*tfe.RunEvent{}, + } + fullList := tfe.RunEventList{ + Items: []*tfe.RunEvent{ + { + Action: "created", + CreatedAt: time.Now(), + Description: "", + }, + { + Action: "changed_task_enforcements", + CreatedAt: time.Now(), + Description: "The enforcement level for task 'MockTask' was changed to 'advisory' because the run task limit was exceeded.", + }, + { + Action: "changed_policy_enforcements", + CreatedAt: time.Now(), + Description: "The enforcement level for policy 'MockPolicy' was changed to 'advisory' because the policy limit was exceeded.", + }, + { + Action: "ignored_policy_sets", + CreatedAt: time.Now(), + Description: "The policy set 'MockPolicySet' was ignored because the versioned policy set limit was exceeded.", + }, + { + Action: "queued", + CreatedAt: time.Now(), + Description: "", + }, + }, + } + // Mock Full Request + mockRunEventsAPI. + EXPECT(). + List(gomock.Any(), fullRunID, gomock.Any()). + Return(&fullList, nil). + AnyTimes() + + // Mock Full Request + mockRunEventsAPI. + EXPECT(). + List(gomock.Any(), emptyRunID, gomock.Any()). + Return(&emptyList, nil). + AnyTimes() + + // Mock a bad Read response + mockRunEventsAPI. + EXPECT(). + List(gomock.Any(), gomock.Any(), gomock.Any()). + Return(nil, tfe.ErrInvalidRunID). + AnyTimes() + + // Wire up the mock interfaces + client.RunEvents = mockRunEventsAPI + return +} + +func TestRunEventWarningsAll(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + config := &tfe.Config{ + Token: "not-a-token", + } + client, _ := tfe.NewClient(config) + fullRunID, _ := MockAllRunEvents(t, client) + + ctx := context.TODO() + + err := b.renderRunWarnings(ctx, client, fullRunID) + if err != nil { + t.Fatalf("Expected to not error but received %s", err) + } + + output := b.CLI.(*cli.MockUi).ErrorWriter.String() + testString := "The enforcement level for task 'MockTask'" + if !strings.Contains(output, testString) { + t.Fatalf("Expected %q to contain %q but it did not", output, testString) + } + testString = "The enforcement level for policy 'MockPolicy'" + if !strings.Contains(output, testString) { + t.Fatalf("Expected %q to contain %q but it did not", output, testString) + } + testString = "The policy set 'MockPolicySet'" + if !strings.Contains(output, testString) { + t.Fatalf("Expected %q to contain %q but it did not", output, testString) + } +} + +func TestRunEventWarningsEmpty(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + config := &tfe.Config{ + Token: "not-a-token", + } + client, _ := tfe.NewClient(config) + _, emptyRunID := MockAllRunEvents(t, client) + + ctx := context.TODO() + + err := b.renderRunWarnings(ctx, client, emptyRunID) + if err != nil { + t.Fatalf("Expected to not error but received %s", err) + } + + output := b.CLI.(*cli.MockUi).ErrorWriter.String() + if output != "" { + t.Fatalf("Expected %q to be empty but it was not", output) + } +} + +func TestRunEventWarningsWithError(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + config := &tfe.Config{ + Token: "not-a-token", + } + client, _ := tfe.NewClient(config) + MockAllRunEvents(t, client) + + ctx := context.TODO() + + err := b.renderRunWarnings(ctx, client, "bad run id") + + if err == nil { + t.Error("Expected to error but did not") + } +} diff --git a/pkg/cloud/backend_show.go b/pkg/cloud/backend_show.go new file mode 100644 index 00000000000..527ef8712f5 --- /dev/null +++ b/pkg/cloud/backend_show.go @@ -0,0 +1,116 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cloud + +import ( + "context" + "fmt" + "strings" + + tfe "github.com/hashicorp/go-tfe" + "github.com/kubegems/opentofu/pkg/cloud/cloudplan" + "github.com/kubegems/opentofu/pkg/plans" +) + +// ShowPlanForRun downloads the JSON plan output for the specified cloud run +// (either the redacted or unredacted format, per the caller's request), and +// returns it in a cloudplan.RemotePlanJSON wrapper struct (along with various +// metadata required by tofu show). It's intended for use by the tofu +// show command, in order to format and display a saved cloud plan. +func (b *Cloud) ShowPlanForRun(ctx context.Context, runID, runHostname string, redacted bool) (*cloudplan.RemotePlanJSON, error) { + var jsonBytes []byte + mode := plans.NormalMode + var opts []plans.Quality + + // Bail early if wrong hostname + if runHostname != b.hostname { + return nil, fmt.Errorf("hostname for run (%s) does not match the configured cloud integration (%s)", runHostname, b.hostname) + } + + // Get run and plan + r, err := b.client.Runs.ReadWithOptions(ctx, runID, &tfe.RunReadOptions{Include: []tfe.RunIncludeOpt{tfe.RunPlan, tfe.RunWorkspace}}) + if err == tfe.ErrResourceNotFound { + return nil, fmt.Errorf("couldn't read information for cloud run %s; make sure you've run `tofu login` and that you have permission to view the run", runID) + } else if err != nil { + return nil, fmt.Errorf("couldn't read information for cloud run %s: %w", runID, err) + } + + // Sort out the run mode + if r.IsDestroy { + mode = plans.DestroyMode + } else if r.RefreshOnly { + mode = plans.RefreshOnlyMode + } + + // Check that the plan actually finished + switch r.Plan.Status { + case tfe.PlanErrored: + // Errored plans might still be displayable, but we want to mention it to the renderer. + opts = append(opts, plans.Errored) + case tfe.PlanFinished: + // Good to go, but alert the renderer if it has no changes. + if !r.Plan.HasChanges { + opts = append(opts, plans.NoChanges) + } + default: + // Bail, we can't use this. + err = fmt.Errorf("can't display a cloud plan that is currently %s", r.Plan.Status) + return nil, err + } + + // Fetch the json plan! + if redacted { + jsonBytes, err = readRedactedPlan(ctx, b.client.BaseURL(), b.token, r.Plan.ID) + } else { + jsonBytes, err = b.client.Plans.ReadJSONOutput(ctx, r.Plan.ID) + } + if err == tfe.ErrResourceNotFound { + if redacted { + return nil, fmt.Errorf("couldn't read plan data for cloud run %s; make sure you've run `tofu login` and that you have permission to view the run", runID) + } else { + return nil, fmt.Errorf("couldn't read unredacted JSON plan data for cloud run %s; make sure you've run `tofu login` and that you have admin permissions on the workspace", runID) + } + } else if err != nil { + return nil, fmt.Errorf("couldn't read plan data for cloud run %s: %w", runID, err) + } + + // Format a run header and footer + header := strings.TrimSpace(fmt.Sprintf(runHeader, b.hostname, b.organization, r.Workspace.Name, r.ID)) + footer := strings.TrimSpace(statusFooter(r.Status, r.Actions.IsConfirmable, r.Workspace.Locked)) + + out := &cloudplan.RemotePlanJSON{ + JSONBytes: jsonBytes, + Redacted: redacted, + Mode: mode, + Qualities: opts, + RunHeader: header, + RunFooter: footer, + } + + return out, nil +} + +func statusFooter(status tfe.RunStatus, isConfirmable, locked bool) string { + statusText := strings.ReplaceAll(string(status), "_", " ") + statusColor := "red" + statusNote := "not confirmable" + if isConfirmable { + statusColor = "green" + statusNote = "confirmable" + } + lockedColor := "green" + lockedText := "unlocked" + if locked { + lockedColor = "red" + lockedText = "locked" + } + return fmt.Sprintf(statusFooterText, statusColor, statusText, statusNote, lockedColor, lockedText) +} + +const statusFooterText = ` +[reset][%s]Run status: %s (%s)[reset] +[%s]Workspace is %s[reset] +` diff --git a/pkg/cloud/backend_show_test.go b/pkg/cloud/backend_show_test.go new file mode 100644 index 00000000000..842bc8cfcc9 --- /dev/null +++ b/pkg/cloud/backend_show_test.go @@ -0,0 +1,219 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cloud + +import ( + "context" + "path/filepath" + "strings" + "testing" + + tfe "github.com/hashicorp/go-tfe" + "github.com/kubegems/opentofu/pkg/plans" +) + +// A brief discourse on the theory of testing for this feature. Doing +// `tofu show cloudplan.tfplan` relies on the correctness of the following +// behaviors: +// +// 1. TFC API returns redacted or unredacted plan JSON on request, if permission +// requirements are met and the run is in a condition where that JSON exists. +// 2. Cloud.ShowPlanForRun() makes correct API calls, calculates metadata +// properly given a tfe.Run, and returns either a cloudplan.RemotePlanJSON or an err. +// 3. The Show command instantiates Cloud backend when given a cloud planfile, +// calls .ShowPlanForRun() on it, and passes result to Display() impls. +// 4. Display() impls yield the correct output when given a cloud plan json biscuit. +// +// 1 is axiomatic and outside our domain. 3 is regrettably totally untestable +// unless we refactor the Meta command to enable stubbing out a backend factory +// or something, which seems inadvisable at this juncture. 4 is exercised over +// in internal/command/views/show_test.go. And thus, this file only cares about +// item 2. + +// 404 on run: special error message +func TestCloud_showMissingRun(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + mockSROWorkspace(t, b, testBackendSingleWorkspaceName) + + absentRunID := "run-WwwwXxxxYyyyZzzz" + _, err := b.ShowPlanForRun(context.Background(), absentRunID, tfeHost, true) + if !strings.Contains(err.Error(), "tofu login") { + t.Fatalf("expected error message to suggest checking your login status, instead got: %s", err) + } +} + +// If redacted json is available but unredacted is not +func TestCloud_showMissingUnredactedJson(t *testing.T) { + b, mc, bCleanup := testBackendAndMocksWithName(t) + defer bCleanup() + mockSROWorkspace(t, b, testBackendSingleWorkspaceName) + + ctx := context.Background() + + runID, err := testCloudRunForShow(mc, "./testdata/plan-json-basic-no-unredacted", tfe.RunPlannedAndSaved, tfe.PlanFinished) + if err != nil { + t.Fatalf("failed to init test data: %s", err) + } + // Showing the human-formatted plan should still work as expected! + redacted, err := b.ShowPlanForRun(ctx, runID, tfeHost, true) + if err != nil { + t.Fatalf("failed to show plan for human, even though redacted json should be present: %s", err) + } + if !strings.Contains(string(redacted.JSONBytes), `"plan_format_version":`) { + t.Fatalf("show for human doesn't include expected redacted json content") + } + // Should be marked as containing changes and non-errored + canNotApply := false + errored := false + for _, opt := range redacted.Qualities { + if opt == plans.NoChanges { + canNotApply = true + } + if opt == plans.Errored { + errored = true + } + } + if canNotApply || errored { + t.Fatalf("expected neither errored nor can't-apply in opts, instead got: %#v", redacted.Qualities) + } + + // But show -json should result in a special error. + _, err = b.ShowPlanForRun(ctx, runID, tfeHost, false) + if err == nil { + t.Fatalf("unexpected success: reading unredacted json without admin permissions should have errored") + } + if !strings.Contains(err.Error(), "admin") { + t.Fatalf("expected error message to suggest your permissions are wrong, instead got: %s", err) + } +} + +// If both kinds of json are available, both kinds of show should work +func TestCloud_showIncludesUnredactedJson(t *testing.T) { + b, mc, bCleanup := testBackendAndMocksWithName(t) + defer bCleanup() + mockSROWorkspace(t, b, testBackendSingleWorkspaceName) + + ctx := context.Background() + + runID, err := testCloudRunForShow(mc, "./testdata/plan-json-basic", tfe.RunPlannedAndSaved, tfe.PlanFinished) + if err != nil { + t.Fatalf("failed to init test data: %s", err) + } + // Showing the human-formatted plan should work as expected: + redacted, err := b.ShowPlanForRun(ctx, runID, tfeHost, true) + if err != nil { + t.Fatalf("failed to show plan for human, even though redacted json should be present: %s", err) + } + if !strings.Contains(string(redacted.JSONBytes), `"plan_format_version":`) { + t.Fatalf("show for human doesn't include expected redacted json content") + } + // Showing the external json plan format should work as expected: + unredacted, err := b.ShowPlanForRun(ctx, runID, tfeHost, false) + if err != nil { + t.Fatalf("failed to show plan for robot, even though unredacted json should be present: %s", err) + } + if !strings.Contains(string(unredacted.JSONBytes), `"format_version":`) { + t.Fatalf("show for robot doesn't include expected unredacted json content") + } +} + +func TestCloud_showNoChanges(t *testing.T) { + b, mc, bCleanup := testBackendAndMocksWithName(t) + defer bCleanup() + mockSROWorkspace(t, b, testBackendSingleWorkspaceName) + + ctx := context.Background() + + runID, err := testCloudRunForShow(mc, "./testdata/plan-json-no-changes", tfe.RunPlannedAndSaved, tfe.PlanFinished) + if err != nil { + t.Fatalf("failed to init test data: %s", err) + } + // Showing the human-formatted plan should work as expected: + redacted, err := b.ShowPlanForRun(ctx, runID, tfeHost, true) + if err != nil { + t.Fatalf("failed to show plan for human, even though redacted json should be present: %s", err) + } + // Should be marked as no changes + canNotApply := false + for _, opt := range redacted.Qualities { + if opt == plans.NoChanges { + canNotApply = true + } + } + if !canNotApply { + t.Fatalf("expected opts to include CanNotApply, instead got: %#v", redacted.Qualities) + } +} + +func TestCloud_showFooterNotConfirmable(t *testing.T) { + b, mc, bCleanup := testBackendAndMocksWithName(t) + defer bCleanup() + mockSROWorkspace(t, b, testBackendSingleWorkspaceName) + + ctx := context.Background() + + runID, err := testCloudRunForShow(mc, "./testdata/plan-json-full", tfe.RunDiscarded, tfe.PlanFinished) + if err != nil { + t.Fatalf("failed to init test data: %s", err) + } + + // A little more custom run tweaking: + mc.Runs.Runs[runID].Actions.IsConfirmable = false + + // Showing the human-formatted plan should work as expected: + redacted, err := b.ShowPlanForRun(ctx, runID, tfeHost, true) + if err != nil { + t.Fatalf("failed to show plan for human, even though redacted json should be present: %s", err) + } + + // Footer should mention that you can't apply it: + if !strings.Contains(redacted.RunFooter, "not confirmable") { + t.Fatalf("footer should call out that run isn't confirmable, instead got: %s", redacted.RunFooter) + } +} + +func testCloudRunForShow(mc *MockClient, configDir string, runStatus tfe.RunStatus, planStatus tfe.PlanStatus) (string, error) { + ctx := context.Background() + + // get workspace ID + wsID := mc.Workspaces.workspaceNames[testBackendSingleWorkspaceName].ID + // create and upload config version + cvOpts := tfe.ConfigurationVersionCreateOptions{ + AutoQueueRuns: tfe.Bool(false), + Speculative: tfe.Bool(false), + } + cv, err := mc.ConfigurationVersions.Create(ctx, wsID, cvOpts) + if err != nil { + return "", err + } + absDir, err := filepath.Abs(configDir) + if err != nil { + return "", err + } + err = mc.ConfigurationVersions.Upload(ctx, cv.UploadURL, absDir) + if err != nil { + return "", err + } + // create run + rOpts := tfe.RunCreateOptions{ + PlanOnly: tfe.Bool(false), + IsDestroy: tfe.Bool(false), + RefreshOnly: tfe.Bool(false), + ConfigurationVersion: cv, + Workspace: &tfe.Workspace{ID: wsID}, + } + r, err := mc.Runs.Create(ctx, rOpts) + if err != nil { + return "", err + } + // mess with statuses (this is what requires full access to mock client) + mc.Runs.Runs[r.ID].Status = runStatus + mc.Plans.plans[r.Plan.ID].Status = planStatus + + // return the ID + return r.ID, nil +} diff --git a/pkg/cloud/backend_taskStage_policyEvaluation.go b/pkg/cloud/backend_taskStage_policyEvaluation.go new file mode 100644 index 00000000000..3e66cf089a7 --- /dev/null +++ b/pkg/cloud/backend_taskStage_policyEvaluation.go @@ -0,0 +1,162 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cloud + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-tfe" +) + +type policyEvaluationSummary struct { + unreachable bool + pending int + failed int + passed int +} + +type Symbol rune + +const ( + Tick Symbol = '\u2713' + Cross Symbol = '\u00d7' + Warning Symbol = '\u24be' + Arrow Symbol = '\u2192' + DownwardArrow Symbol = '\u21b3' +) + +type policyEvaluationSummarizer struct { + finished bool + cloud *Cloud + counter int +} + +func newPolicyEvaluationSummarizer(b *Cloud, ts *tfe.TaskStage) taskStageSummarizer { + if len(ts.PolicyEvaluations) == 0 { + return nil + } + return &policyEvaluationSummarizer{ + finished: false, + cloud: b, + } +} + +func (pes *policyEvaluationSummarizer) Summarize(context *IntegrationContext, output IntegrationOutputWriter, ts *tfe.TaskStage) (bool, *string, error) { + if pes.counter == 0 { + output.Output("[bold]OPA Policy Evaluation\n") + pes.counter++ + } + + if pes.finished { + return false, nil, nil + } + + counts := summarizePolicyEvaluationResults(ts.PolicyEvaluations) + + if counts.pending != 0 { + pendingMessage := "Evaluating ... " + return true, &pendingMessage, nil + } + + if counts.unreachable { + output.Output("Skipping policy evaluation.") + output.End() + return false, nil, nil + } + + // Print out the summary + if err := pes.taskStageWithPolicyEvaluation(context, output, ts.PolicyEvaluations); err != nil { + return false, nil, err + } + // Mark as finished + pes.finished = true + + return false, nil, nil +} + +func summarizePolicyEvaluationResults(policyEvaluations []*tfe.PolicyEvaluation) *policyEvaluationSummary { + var pendingCount, errCount, passedCount int + for _, policyEvaluation := range policyEvaluations { + switch policyEvaluation.Status { + case "unreachable": + return &policyEvaluationSummary{ + unreachable: true, + } + case "running", "pending", "queued": + pendingCount++ + case "passed": + passedCount++ + default: + // Everything else is a failure + errCount++ + } + } + + return &policyEvaluationSummary{ + unreachable: false, + pending: pendingCount, + failed: errCount, + passed: passedCount, + } +} + +func (pes *policyEvaluationSummarizer) taskStageWithPolicyEvaluation(context *IntegrationContext, output IntegrationOutputWriter, policyEvaluation []*tfe.PolicyEvaluation) error { + var result, message string + // Currently only one policy evaluation supported : OPA + for _, polEvaluation := range policyEvaluation { + if polEvaluation.Status == tfe.PolicyEvaluationPassed { + message = "[dim] This result means that all OPA policies passed and the protected behavior is allowed" + result = fmt.Sprintf("[green]%s", strings.ToUpper(string(tfe.PolicyEvaluationPassed))) + if polEvaluation.ResultCount.AdvisoryFailed > 0 { + result += " (with advisory)" + } + } else { + message = "[dim] This result means that one or more OPA policies failed. More than likely, this was due to the discovery of violations by the main rule and other sub rules" + result = fmt.Sprintf("[red]%s", strings.ToUpper(string(tfe.PolicyEvaluationFailed))) + } + + output.Output(fmt.Sprintf("[bold]%c%c Overall Result: %s", Arrow, Arrow, result)) + + output.Output(message) + + total := getPolicyCount(polEvaluation.ResultCount) + + output.Output(fmt.Sprintf("%d policies evaluated\n", total)) + + policyOutcomes, err := pes.cloud.client.PolicySetOutcomes.List(context.StopContext, polEvaluation.ID, nil) + if err != nil { + return err + } + + for i, out := range policyOutcomes.Items { + output.Output(fmt.Sprintf("%c Policy set %d: [bold]%s (%d)", Arrow, i+1, out.PolicySetName, len(out.Outcomes))) + for _, outcome := range out.Outcomes { + output.Output(fmt.Sprintf(" %c Policy name: [bold]%s", DownwardArrow, outcome.PolicyName)) + switch outcome.Status { + case "passed": + output.Output(fmt.Sprintf(" | [green][bold]%c Passed", Tick)) + case "failed": + if outcome.EnforcementLevel == tfe.EnforcementAdvisory { + output.Output(fmt.Sprintf(" | [blue][bold]%c Advisory", Warning)) + } else { + output.Output(fmt.Sprintf(" | [red][bold]%c Failed", Cross)) + } + } + if outcome.Description != "" { + output.Output(fmt.Sprintf(" | [dim]%s", outcome.Description)) + } else { + output.Output(" | [dim]No description available") + } + } + } + } + return nil +} + +func getPolicyCount(resultCount *tfe.PolicyResultCount) int { + return resultCount.AdvisoryFailed + resultCount.MandatoryFailed + resultCount.Errored + resultCount.Passed +} diff --git a/pkg/cloud/backend_taskStage_policyEvaluation_test.go b/pkg/cloud/backend_taskStage_policyEvaluation_test.go new file mode 100644 index 00000000000..b024cb96c6b --- /dev/null +++ b/pkg/cloud/backend_taskStage_policyEvaluation_test.go @@ -0,0 +1,102 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cloud + +import ( + "strings" + "testing" + + "github.com/hashicorp/go-tfe" +) + +func TestCloud_runTaskStageWithPolicyEvaluation(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + integrationContext, writer := newMockIntegrationContext(b, t) + + cases := map[string]struct { + taskStage func() *tfe.TaskStage + context *IntegrationContext + writer *testIntegrationOutput + expectedOutputs []string + isError bool + }{ + "all-succeeded": { + taskStage: func() *tfe.TaskStage { + ts := &tfe.TaskStage{} + ts.PolicyEvaluations = []*tfe.PolicyEvaluation{ + {ID: "pol-pass", ResultCount: &tfe.PolicyResultCount{Passed: 1}, Status: "passed"}, + } + return ts + }, + writer: writer, + context: integrationContext, + expectedOutputs: []string{"│ [bold]OPA Policy Evaluation\n\n│ [bold]→→ Overall Result: [green]PASSED\n│ [dim] This result means that all OPA policies passed and the protected behavior is allowed\n│ 1 policies evaluated\n\n│ → Policy set 1: [bold]policy-set-that-passes (1)\n│ ↳ Policy name: [bold]policy-pass\n│ | [green][bold]✓ Passed\n│ | [dim]This policy will pass\n"}, + isError: false, + }, + "mandatory-failed": { + taskStage: func() *tfe.TaskStage { + ts := &tfe.TaskStage{} + ts.PolicyEvaluations = []*tfe.PolicyEvaluation{ + {ID: "pol-fail", ResultCount: &tfe.PolicyResultCount{MandatoryFailed: 1}, Status: "failed"}, + } + return ts + }, + writer: writer, + context: integrationContext, + expectedOutputs: []string{"│ [bold]→→ Overall Result: [red]FAILED\n│ [dim] This result means that one or more OPA policies failed. More than likely, this was due to the discovery of violations by the main rule and other sub rules\n│ 1 policies evaluated\n\n│ → Policy set 1: [bold]policy-set-that-fails (1)\n│ ↳ Policy name: [bold]policy-fail\n│ | [red][bold]× Failed\n│ | [dim]This policy will fail"}, + isError: true, + }, + "advisory-failed": { + taskStage: func() *tfe.TaskStage { + ts := &tfe.TaskStage{} + ts.PolicyEvaluations = []*tfe.PolicyEvaluation{ + {ID: "adv-fail", ResultCount: &tfe.PolicyResultCount{AdvisoryFailed: 1}, Status: "failed"}, + } + return ts + }, + writer: writer, + context: integrationContext, + expectedOutputs: []string{"│ [bold]OPA Policy Evaluation\n\n│ [bold]→→ Overall Result: [red]FAILED\n│ [dim] This result means that one or more OPA policies failed. More than likely, this was due to the discovery of violations by the main rule and other sub rules\n│ 1 policies evaluated\n\n│ → Policy set 1: [bold]policy-set-that-fails (1)\n│ ↳ Policy name: [bold]policy-fail\n│ | [blue][bold]Ⓘ Advisory\n│ | [dim]This policy will fail"}, + isError: false, + }, + "unreachable": { + taskStage: func() *tfe.TaskStage { + ts := &tfe.TaskStage{} + ts.PolicyEvaluations = []*tfe.PolicyEvaluation{ + {ID: "adv-fail", ResultCount: &tfe.PolicyResultCount{Errored: 1}, Status: "unreachable"}, + } + return ts + }, + writer: writer, + context: integrationContext, + expectedOutputs: []string{"Skipping policy evaluation."}, + isError: false, + }, + } + + for _, c := range cases { + c.writer.output.Reset() + trs := policyEvaluationSummarizer{ + cloud: b, + } + c.context.Poll(0, 0, func(i int) (bool, error) { + cont, _, _ := trs.Summarize(c.context, c.writer, c.taskStage()) + if cont { + return true, nil + } + + output := c.writer.output.String() + for _, expected := range c.expectedOutputs { + if !strings.Contains(output, expected) { + t.Fatalf("Expected output to contain '%s' but it was:\n\n%s", expected, output) + } + } + return false, nil + }) + } +} diff --git a/pkg/cloud/backend_taskStage_taskResults.go b/pkg/cloud/backend_taskStage_taskResults.go new file mode 100644 index 00000000000..987adf48194 --- /dev/null +++ b/pkg/cloud/backend_taskStage_taskResults.go @@ -0,0 +1,152 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cloud + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-tfe" +) + +type taskResultSummary struct { + unreachable bool + pending int + failed int + failedMandatory int + passed int +} + +type taskResultSummarizer struct { + finished bool + cloud *Cloud + counter int +} + +func newTaskResultSummarizer(b *Cloud, ts *tfe.TaskStage) taskStageSummarizer { + if len(ts.TaskResults) == 0 { + return nil + } + return &taskResultSummarizer{ + finished: false, + cloud: b, + } +} + +func (trs *taskResultSummarizer) Summarize(context *IntegrationContext, output IntegrationOutputWriter, ts *tfe.TaskStage) (bool, *string, error) { + if trs.finished { + return false, nil, nil + } + trs.counter++ + + counts := summarizeTaskResults(ts.TaskResults) + + if counts.pending != 0 { + pendingMessage := "%d tasks still pending, %d passed, %d failed ... " + message := fmt.Sprintf(pendingMessage, counts.pending, counts.passed, counts.failed) + return true, &message, nil + } + if counts.unreachable { + output.Output("Skipping task results.") + output.End() + return false, nil, nil + } + + // Print out the summary + trs.runTasksWithTaskResults(output, ts.TaskResults, counts) + + // Mark as finished + trs.finished = true + + return false, nil, nil +} + +func summarizeTaskResults(taskResults []*tfe.TaskResult) *taskResultSummary { + var pendingCount, errCount, errMandatoryCount, passedCount int + for _, task := range taskResults { + if task.Status == tfe.TaskUnreachable { + return &taskResultSummary{ + unreachable: true, + } + } else if task.Status == tfe.TaskRunning || task.Status == tfe.TaskPending { + pendingCount++ + } else if task.Status == tfe.TaskPassed { + passedCount++ + } else { + // Everything else is a failure + errCount++ + if task.WorkspaceTaskEnforcementLevel == tfe.Mandatory { + errMandatoryCount++ + } + } + } + + return &taskResultSummary{ + unreachable: false, + pending: pendingCount, + failed: errCount, + failedMandatory: errMandatoryCount, + passed: passedCount, + } +} + +func (trs *taskResultSummarizer) runTasksWithTaskResults(output IntegrationOutputWriter, taskResults []*tfe.TaskResult, count *taskResultSummary) { + // Track the first task name that is a mandatory enforcement level breach. + var firstMandatoryTaskFailed *string = nil + + if trs.counter == 0 { + output.Output(fmt.Sprintf("All tasks completed! %d passed, %d failed", count.passed, count.failed)) + } else { + output.OutputElapsed(fmt.Sprintf("All tasks completed! %d passed, %d failed", count.passed, count.failed), 50) + } + + output.Output("") + + for _, t := range taskResults { + capitalizedStatus := string(t.Status) + capitalizedStatus = strings.ToUpper(capitalizedStatus[:1]) + capitalizedStatus[1:] + + status := "[green]" + capitalizedStatus + if t.Status != "passed" { + level := string(t.WorkspaceTaskEnforcementLevel) + level = strings.ToUpper(level[:1]) + level[1:] + status = fmt.Sprintf("[red]%s (%s)", capitalizedStatus, level) + + if t.WorkspaceTaskEnforcementLevel == "mandatory" && firstMandatoryTaskFailed == nil { + firstMandatoryTaskFailed = &t.TaskName + } + } + + title := fmt.Sprintf(`%s ⸺ %s`, t.TaskName, status) + output.SubOutput(title) + + if len(t.Message) > 0 { + output.SubOutput(fmt.Sprintf("[dim]%s", t.Message)) + } + if len(t.URL) > 0 { + output.SubOutput(fmt.Sprintf("[dim]Details: %s", t.URL)) + } + output.SubOutput("") + } + + // If a mandatory enforcement level is breached, return an error. + var overall string = "[green]Passed" + if firstMandatoryTaskFailed != nil { + overall = "[red]Failed" + if count.failedMandatory > 1 { + output.Output(fmt.Sprintf("[reset][bold][red]Error:[reset][bold]the run failed because %d mandatory tasks are required to succeed", count.failedMandatory)) + } else { + output.Output(fmt.Sprintf("[reset][bold][red]Error: [reset][bold]the run failed because the run task, %s, is required to succeed", *firstMandatoryTaskFailed)) + } + } else if count.failed > 0 { // we have failures but none of them mandatory + overall = "[green]Passed with advisory failures" + } + + output.SubOutput("") + output.SubOutput("[bold]Overall Result: " + overall) + + output.End() +} diff --git a/pkg/cloud/backend_taskStage_taskResults_test.go b/pkg/cloud/backend_taskStage_taskResults_test.go new file mode 100644 index 00000000000..39a927ee0ba --- /dev/null +++ b/pkg/cloud/backend_taskStage_taskResults_test.go @@ -0,0 +1,174 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cloud + +import ( + "context" + "strings" + "testing" + + "github.com/hashicorp/go-tfe" +) + +type testIntegrationOutput struct { + ctx *IntegrationContext + output *strings.Builder + t *testing.T +} + +var _ IntegrationOutputWriter = (*testIntegrationOutput)(nil) // Compile time check + +func (s *testIntegrationOutput) End() { + s.output.WriteString("END\n") +} + +func (s *testIntegrationOutput) SubOutput(str string) { + s.output.WriteString(s.ctx.B.Colorize().Color("[reset]│ "+str) + "\n") +} + +func (s *testIntegrationOutput) Output(str string) { + s.output.WriteString(s.ctx.B.Colorize().Color("[reset]│ ") + str + "\n") +} + +func (s *testIntegrationOutput) OutputElapsed(message string, maxMessage int) { + s.output.WriteString("PENDING MESSAGE: " + message) +} + +func newMockIntegrationContext(b *Cloud, t *testing.T) (*IntegrationContext, *testIntegrationOutput) { + ctx := context.Background() + + // Retrieve the workspace used to run this operation in. + w, err := b.client.Workspaces.Read(ctx, b.organization, b.WorkspaceMapping.Name) + if err != nil { + t.Fatalf("error retrieving workspace: %v", err) + } + + // Create a new configuration version. + c, err := b.client.ConfigurationVersions.Create(ctx, w.ID, tfe.ConfigurationVersionCreateOptions{}) + if err != nil { + t.Fatalf("error creating configuration version: %v", err) + } + + // Create a pending run to block this run. + r, err := b.client.Runs.Create(ctx, tfe.RunCreateOptions{ + ConfigurationVersion: c, + Workspace: w, + }) + if err != nil { + t.Fatalf("error creating pending run: %v", err) + } + + op, configCleanup, done := testOperationPlan(t, "./testdata/plan") + defer configCleanup() + defer done(t) + + integrationContext := &IntegrationContext{ + B: b, + StopContext: ctx, + CancelContext: ctx, + Op: op, + Run: r, + } + + return integrationContext, &testIntegrationOutput{ + ctx: integrationContext, + output: &strings.Builder{}, + t: t, + } +} + +func TestCloud_runTasksWithTaskResults(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + integrationContext, writer := newMockIntegrationContext(b, t) + + cases := map[string]struct { + taskStage func() *tfe.TaskStage + context *IntegrationContext + writer *testIntegrationOutput + expectedOutputs []string + isError bool + }{ + "all-succeeded": { + taskStage: func() *tfe.TaskStage { + ts := &tfe.TaskStage{} + ts.TaskResults = []*tfe.TaskResult{ + {ID: "1", TaskName: "Mandatory", Message: "A-OK", Status: "passed", WorkspaceTaskEnforcementLevel: "mandatory"}, + {ID: "2", TaskName: "Advisory", Message: "A-OK", Status: "passed", WorkspaceTaskEnforcementLevel: "advisory"}, + } + return ts + }, + writer: writer, + context: integrationContext, + expectedOutputs: []string{"Overall Result: Passed\n"}, + isError: false, + }, + "mandatory-failed": { + taskStage: func() *tfe.TaskStage { + ts := &tfe.TaskStage{} + ts.TaskResults = []*tfe.TaskResult{ + {ID: "1", TaskName: "Mandatory", Message: "500 Error", Status: "failed", WorkspaceTaskEnforcementLevel: "mandatory"}, + {ID: "2", TaskName: "Advisory", Message: "A-OK", Status: "passed", WorkspaceTaskEnforcementLevel: "advisory"}, + } + return ts + }, + writer: writer, + context: integrationContext, + expectedOutputs: []string{"Passed\n", "A-OK\n", "Overall Result: Failed\n"}, + isError: true, + }, + "advisory-failed": { + taskStage: func() *tfe.TaskStage { + ts := &tfe.TaskStage{} + ts.TaskResults = []*tfe.TaskResult{ + {ID: "1", TaskName: "Mandatory", Message: "A-OK", Status: "passed", WorkspaceTaskEnforcementLevel: "mandatory"}, + {ID: "2", TaskName: "Advisory", Message: "500 Error", Status: "failed", WorkspaceTaskEnforcementLevel: "advisory"}, + } + return ts + }, + writer: writer, + context: integrationContext, + expectedOutputs: []string{"Failed (Advisory)", "Overall Result: Passed with advisory failure"}, + isError: false, + }, + "unreachable": { + taskStage: func() *tfe.TaskStage { + ts := &tfe.TaskStage{} + ts.TaskResults = []*tfe.TaskResult{ + {ID: "1", TaskName: "Mandatory", Message: "", Status: "unreachable", WorkspaceTaskEnforcementLevel: "mandatory"}, + {ID: "2", TaskName: "Advisory", Message: "", Status: "unreachable", WorkspaceTaskEnforcementLevel: "advisory"}, + } + return ts + }, + writer: writer, + context: integrationContext, + expectedOutputs: []string{"Skipping"}, + isError: false, + }, + } + + for _, c := range cases { + c.writer.output.Reset() + trs := taskResultSummarizer{ + cloud: b, + } + c.context.Poll(0, 0, func(i int) (bool, error) { + cont, _, _ := trs.Summarize(c.context, c.writer, c.taskStage()) + if cont { + return true, nil + } + + output := c.writer.output.String() + for _, expected := range c.expectedOutputs { + if !strings.Contains(output, expected) { + t.Fatalf("Expected output to contain '%s' but it was:\n\n%s", expected, output) + } + } + return false, nil + }) + } +} diff --git a/pkg/cloud/backend_taskStages.go b/pkg/cloud/backend_taskStages.go new file mode 100644 index 00000000000..082e04618e9 --- /dev/null +++ b/pkg/cloud/backend_taskStages.go @@ -0,0 +1,195 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cloud + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/go-multierror" + tfe "github.com/hashicorp/go-tfe" + "github.com/kubegems/opentofu/pkg/tofu" +) + +type taskStages map[tfe.Stage]*tfe.TaskStage + +const ( + taskStageBackoffMin = 4000.0 + taskStageBackoffMax = 12000.0 +) + +const taskStageHeader = ` +To view this run in a browser, visit: +https://%s/app/%s/%s/runs/%s +` + +type taskStageSummarizer interface { + // Summarize takes an IntegrationContext, IntegrationOutputWriter for + // writing output and a pointer to a tfe.TaskStage object as arguments. + // This function summarizes and outputs the results of the task stage. + // It returns a boolean which signifies whether we should continue polling + // for results, an optional message string to print while it is polling + // and an error if any. + Summarize(*IntegrationContext, IntegrationOutputWriter, *tfe.TaskStage) (bool, *string, error) +} + +func (b *Cloud) runTaskStages(ctx context.Context, client *tfe.Client, runId string) (taskStages, error) { + taskStages := make(taskStages, 0) + result, err := client.Runs.ReadWithOptions(ctx, runId, &tfe.RunReadOptions{ + Include: []tfe.RunIncludeOpt{tfe.RunTaskStages}, + }) + if err == nil { + for _, t := range result.TaskStages { + if t != nil { + taskStages[t.Stage] = t + } + } + } else { + // This error would be expected for older versions of TFE that do not allow + // fetching task_stages. + if !strings.HasSuffix(err.Error(), "Invalid include parameter") { + return taskStages, generalError("Failed to retrieve run", err) + } + } + + return taskStages, nil +} + +func (b *Cloud) getTaskStageWithAllOptions(ctx *IntegrationContext, stageID string) (*tfe.TaskStage, error) { + options := tfe.TaskStageReadOptions{ + Include: []tfe.TaskStageIncludeOpt{tfe.TaskStageTaskResults, tfe.PolicyEvaluationsTaskResults}, + } + stage, err := b.client.TaskStages.Read(ctx.StopContext, stageID, &options) + if err != nil { + return nil, generalError("Failed to retrieve task stage", err) + } else { + return stage, nil + } +} + +func (b *Cloud) runTaskStage(ctx *IntegrationContext, output IntegrationOutputWriter, stageID string) error { + var errs *multierror.Error + + // Create our summarizers + summarizers := make([]taskStageSummarizer, 0) + ts, err := b.getTaskStageWithAllOptions(ctx, stageID) + if err != nil { + return err + } + + if s := newTaskResultSummarizer(b, ts); s != nil { + summarizers = append(summarizers, s) + } + + if s := newPolicyEvaluationSummarizer(b, ts); s != nil { + summarizers = append(summarizers, s) + } + + return ctx.Poll(taskStageBackoffMin, taskStageBackoffMax, func(i int) (bool, error) { + options := tfe.TaskStageReadOptions{ + Include: []tfe.TaskStageIncludeOpt{tfe.TaskStageTaskResults, tfe.PolicyEvaluationsTaskResults}, + } + stage, err := b.client.TaskStages.Read(ctx.StopContext, stageID, &options) + if err != nil { + return false, generalError("Failed to retrieve task stage", err) + } + + switch stage.Status { + case tfe.TaskStagePending: + // Waiting for it to start + return true, nil + case tfe.TaskStageRunning: + if _, e := processSummarizers(ctx, output, stage, summarizers, errs); e != nil { + errs = e + } + // not a terminal status so we continue to poll + return true, nil + // Note: Terminal statuses need to print out one last time just in case + case tfe.TaskStagePassed: + ok, e := processSummarizers(ctx, output, stage, summarizers, errs) + if e != nil { + errs = e + } + if ok { + return true, nil + } + case tfe.TaskStageCanceled, tfe.TaskStageErrored, tfe.TaskStageFailed: + ok, e := processSummarizers(ctx, output, stage, summarizers, errs) + if e != nil { + errs = e + } + if ok { + return true, nil + } + return false, fmt.Errorf("Task Stage %s.", stage.Status) + case tfe.TaskStageAwaitingOverride: + ok, e := processSummarizers(ctx, output, stage, summarizers, errs) + if e != nil { + errs = e + } + if ok { + return true, nil + } + cont, err := b.processStageOverrides(ctx, output, stage.ID) + if err != nil { + errs = multierror.Append(errs, err) + } else { + return cont, nil + } + case tfe.TaskStageUnreachable: + return false, nil + default: + return false, fmt.Errorf("Invalid Task stage status: %s ", stage.Status) + } + return false, errs.ErrorOrNil() + }) +} + +func processSummarizers(ctx *IntegrationContext, output IntegrationOutputWriter, stage *tfe.TaskStage, summarizers []taskStageSummarizer, errs *multierror.Error) (bool, *multierror.Error) { + for _, s := range summarizers { + cont, msg, err := s.Summarize(ctx, output, stage) + if err != nil { + errs = multierror.Append(errs, err) + break + } + + if !cont { + continue + } + + // cont is true and we must continue to poll + if msg != nil { + output.OutputElapsed(*msg, len(*msg)) // Up to 2 digits are allowed by the max message allocation + } + return true, nil + } + return false, errs +} + +func (b *Cloud) processStageOverrides(context *IntegrationContext, output IntegrationOutputWriter, taskStageID string) (bool, error) { + opts := &tofu.InputOpts{ + Id: fmt.Sprintf("%c%c [bold]Override", Arrow, Arrow), + Query: "\nDo you want to override the failed policy check?", + Description: "Only 'override' will be accepted to override.", + } + runUrl := fmt.Sprintf(taskStageHeader, b.hostname, b.organization, context.Op.Workspace, context.Run.ID) + err := b.confirm(context.StopContext, context.Op, opts, context.Run, "override") + if err != nil && err != errRunOverridden { + return false, fmt.Errorf("Failed to override: %w\n%s\n", err, runUrl) + } + + if err != errRunOverridden { + if _, err = b.client.TaskStages.Override(context.StopContext, taskStageID, tfe.TaskStageOverrideOptions{}); err != nil { + return false, generalError(fmt.Sprintf("Failed to override policy check.\n%s", runUrl), err) + } else { + return true, nil + } + } else { + output.Output(fmt.Sprintf("The run needs to be manually overridden or discarded.\n%s\n", runUrl)) + } + return false, nil +} diff --git a/pkg/cloud/backend_taskStages_test.go b/pkg/cloud/backend_taskStages_test.go new file mode 100644 index 00000000000..e82ae37d58a --- /dev/null +++ b/pkg/cloud/backend_taskStages_test.go @@ -0,0 +1,280 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cloud + +import ( + "context" + "errors" + "strings" + "testing" + + "github.com/golang/mock/gomock" + "github.com/hashicorp/go-tfe" + tfemocks "github.com/hashicorp/go-tfe/mocks" +) + +func MockAllTaskStages(t *testing.T, client *tfe.Client) (RunID string) { + ctrl := gomock.NewController(t) + + RunID = "run-all_task_stages" + + mockRunsAPI := tfemocks.NewMockRuns(ctrl) + + goodRun := tfe.Run{ + TaskStages: []*tfe.TaskStage{ + { + Stage: tfe.PrePlan, + }, + { + Stage: tfe.PostPlan, + }, + { + Stage: tfe.PreApply, + }, + }, + } + mockRunsAPI. + EXPECT(). + ReadWithOptions(gomock.Any(), RunID, gomock.Any()). + Return(&goodRun, nil). + AnyTimes() + + // Mock a bad Read response + mockRunsAPI. + EXPECT(). + ReadWithOptions(gomock.Any(), gomock.Any(), gomock.Any()). + Return(nil, tfe.ErrInvalidOrg). + AnyTimes() + + // Wire up the mock interfaces + client.Runs = mockRunsAPI + return +} + +func MockPrePlanTaskStage(t *testing.T, client *tfe.Client) (RunID string) { + ctrl := gomock.NewController(t) + + RunID = "run-pre_plan_task_stage" + + mockRunsAPI := tfemocks.NewMockRuns(ctrl) + + goodRun := tfe.Run{ + TaskStages: []*tfe.TaskStage{ + { + Stage: tfe.PrePlan, + }, + }, + } + mockRunsAPI. + EXPECT(). + ReadWithOptions(gomock.Any(), RunID, gomock.Any()). + Return(&goodRun, nil). + AnyTimes() + + // Mock a bad Read response + mockRunsAPI. + EXPECT(). + ReadWithOptions(gomock.Any(), gomock.Any(), gomock.Any()). + Return(nil, tfe.ErrInvalidOrg). + AnyTimes() + + // Wire up the mock interfaces + client.Runs = mockRunsAPI + return +} + +func MockTaskStageUnsupported(t *testing.T, client *tfe.Client) (RunID string) { + ctrl := gomock.NewController(t) + + RunID = "run-unsupported_task_stage" + + mockRunsAPI := tfemocks.NewMockRuns(ctrl) + + mockRunsAPI. + EXPECT(). + ReadWithOptions(gomock.Any(), RunID, gomock.Any()). + Return(nil, errors.New("Invalid include parameter")). + AnyTimes() + + mockRunsAPI. + EXPECT(). + ReadWithOptions(gomock.Any(), gomock.Any(), gomock.Any()). + Return(nil, tfe.ErrInvalidOrg). + AnyTimes() + + client.Runs = mockRunsAPI + return +} + +func TestTaskStagesWithAllStages(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + config := &tfe.Config{ + Token: "not-a-token", + } + client, _ := tfe.NewClient(config) + runID := MockAllTaskStages(t, client) + + ctx := context.TODO() + taskStages, err := b.runTaskStages(ctx, client, runID) + + if err != nil { + t.Fatalf("Expected to not error but received %s", err) + } + + for _, stageName := range []tfe.Stage{ + tfe.PrePlan, + tfe.PostPlan, + tfe.PreApply, + } { + if stage, ok := taskStages[stageName]; ok { + if stage.Stage != stageName { + t.Errorf("Expected task stage indexed by %s to find a Task Stage with the same index, but receieved %s", stageName, stage.Stage) + } + } else { + t.Errorf("Expected task stage indexed by %s to exist, but it did not", stageName) + } + } +} + +func TestTaskStagesWithOneStage(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + config := &tfe.Config{ + Token: "not-a-token", + } + client, _ := tfe.NewClient(config) + runID := MockPrePlanTaskStage(t, client) + + ctx := context.TODO() + taskStages, err := b.runTaskStages(ctx, client, runID) + + if err != nil { + t.Fatalf("Expected to not error but received %s", err) + } + + if _, ok := taskStages[tfe.PrePlan]; !ok { + t.Errorf("Expected task stage indexed by %s to exist, but it did not", tfe.PrePlan) + } + + for _, stageName := range []tfe.Stage{ + tfe.PostPlan, + tfe.PreApply, + } { + if _, ok := taskStages[stageName]; ok { + t.Errorf("Expected task stage indexed by %s to not exist, but it did", stageName) + } + } +} + +func TestTaskStagesWithOldTFC(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + config := &tfe.Config{ + Token: "not-a-token", + } + client, _ := tfe.NewClient(config) + runID := MockTaskStageUnsupported(t, client) + + ctx := context.TODO() + taskStages, err := b.runTaskStages(ctx, client, runID) + + if err != nil { + t.Fatalf("Expected to not error but received %s", err) + } + + if len(taskStages) != 0 { + t.Errorf("Expected task stage to be empty, but found %d stages", len(taskStages)) + } +} + +func TestTaskStagesWithErrors(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + config := &tfe.Config{ + Token: "not-a-token", + } + client, _ := tfe.NewClient(config) + MockTaskStageUnsupported(t, client) + + ctx := context.TODO() + _, err := b.runTaskStages(ctx, client, "this run ID will not exist is invalid anyway") + + if err == nil { + t.Error("Expected to error but did not") + } +} + +func TestTaskStageOverride(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + integrationContext, writer := newMockIntegrationContext(b, t) + + integrationContext.Op.UIOut = b.CLI + + cases := map[string]struct { + taskStageID string + isError bool + errMsg string + input *mockInput + cont bool + }{ + "override-pass": { + taskStageID: "ts-pass", + isError: false, + input: testInput(t, map[string]string{ + "→→ [bold]Override": "override", + }), + errMsg: "", + cont: true, + }, + "override-fail": { + taskStageID: "ts-err", + isError: true, + input: testInput(t, map[string]string{ + "→→ [bold]Override": "override", + }), + errMsg: "", + cont: false, + }, + "skip-override": { + taskStageID: "ts-err", + isError: true, + errMsg: "Failed to override: Apply discarded.", + input: testInput(t, map[string]string{ + "→→ [bold]Override": "no", + }), + cont: false, + }, + } + for _, c := range cases { + integrationContext.Op.UIIn = c.input + cont, err := b.processStageOverrides(integrationContext, writer, c.taskStageID) + if c.isError { + if err == nil { + t.Fatalf("Expected to fail with some error") + } + if c.errMsg != "" { + if !strings.Contains(err.Error(), c.errMsg) { + t.Fatalf("Expected: %s, got: %s", c.errMsg, err.Error()) + } + } + + } else { + if err != nil { + t.Fatalf("Expected to pass, got err: %s", err) + } + } + if c.cont != cont { + t.Fatalf("expected polling continue: %t, got: %t", c.cont, cont) + } + } +} diff --git a/pkg/cloud/backend_test.go b/pkg/cloud/backend_test.go new file mode 100644 index 00000000000..281b14c0a2c --- /dev/null +++ b/pkg/cloud/backend_test.go @@ -0,0 +1,1335 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cloud + +import ( + "context" + "fmt" + "net/http" + "strings" + "testing" + + tfe "github.com/hashicorp/go-tfe" + version "github.com/hashicorp/go-version" + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/tfdiags" + tfversion "github.com/kubegems/opentofu/version" + "github.com/zclconf/go-cty/cty" + + backendLocal "github.com/kubegems/opentofu/pkg/backend/local" +) + +func TestCloud(t *testing.T) { + var _ backend.Enhanced = New(nil, encryption.StateEncryptionDisabled()) + var _ backend.CLI = New(nil, encryption.StateEncryptionDisabled()) +} + +func TestCloud_backendWithName(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + workspaces, err := b.Workspaces() + if err != nil { + t.Fatalf("error: %v", err) + } + + if len(workspaces) != 1 || workspaces[0] != testBackendSingleWorkspaceName { + t.Fatalf("should only have a single configured workspace matching the configured 'name' strategy, but got: %#v", workspaces) + } + + if _, err := b.StateMgr("foo"); err != backend.ErrWorkspacesNotSupported { + t.Fatalf("expected fetching a state which is NOT the single configured workspace to have an ErrWorkspacesNotSupported error, but got: %v", err) + } + + if err := b.DeleteWorkspace(testBackendSingleWorkspaceName, true); err != backend.ErrWorkspacesNotSupported { + t.Fatalf("expected deleting the single configured workspace name to result in an error, but got: %v", err) + } + + if err := b.DeleteWorkspace("foo", true); err != backend.ErrWorkspacesNotSupported { + t.Fatalf("expected deleting a workspace which is NOT the configured workspace name to result in an error, but got: %v", err) + } +} + +func TestCloud_backendWithoutHost(t *testing.T) { + s := testServer(t) + b := New(testDisco(s), encryption.StateEncryptionDisabled()) + + obj := cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal(testBackendSingleWorkspaceName), + "tags": cty.NullVal(cty.Set(cty.String)), + "project": cty.NullVal(cty.String), + }), + }) + + // Configure the backend so the client is created. + newObj, valDiags := b.PrepareConfig(obj) + if len(valDiags) != 0 { + t.Fatalf("testBackend: backend.PrepareConfig() failed: %s", valDiags.ErrWithWarnings()) + } + obj = newObj + + confDiags := b.Configure(obj) + + if !confDiags.HasErrors() { + t.Fatalf("testBackend: backend.Configure() should have failed") + } + + if !strings.Contains(confDiags.Err().Error(), "Hostname is required for the cloud backend") { + t.Fatalf("testBackend: backend.Configure() should have failed with missing hostname error") + } +} + +func TestCloud_backendWithTags(t *testing.T) { + b, bCleanup := testBackendWithTags(t) + defer bCleanup() + + backend.TestBackendStates(t, b) + + // Test pagination works + for i := 0; i < 25; i++ { + _, err := b.StateMgr(fmt.Sprintf("foo-%d", i+1)) + if err != nil { + t.Fatalf("error: %s", err) + } + } + + workspaces, err := b.Workspaces() + if err != nil { + t.Fatalf("error: %s", err) + } + actual := len(workspaces) + if actual != 26 { + t.Errorf("expected 26 workspaces (over one standard paginated response), got %d", actual) + } +} + +func TestCloud_PrepareConfig(t *testing.T) { + cases := map[string]struct { + config cty.Value + expectedErr string + }{ + "null organization": { + config: cty.ObjectVal(map[string]cty.Value{ + "organization": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "tags": cty.NullVal(cty.Set(cty.String)), + "project": cty.NullVal(cty.String), + }), + }), + expectedErr: `Invalid or missing required argument: "organization" must be set in the cloud configuration or as an environment variable: TF_CLOUD_ORGANIZATION.`, + }, + "null workspace": { + config: cty.ObjectVal(map[string]cty.Value{ + "organization": cty.StringVal("org"), + "workspaces": cty.NullVal(cty.String), + }), + expectedErr: `Invalid workspaces configuration: Missing workspace mapping strategy. Either workspace "tags" or "name" is required.`, + }, + "workspace: empty tags, name": { + config: cty.ObjectVal(map[string]cty.Value{ + "organization": cty.StringVal("org"), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + "tags": cty.NullVal(cty.Set(cty.String)), + "project": cty.NullVal(cty.String), + }), + }), + expectedErr: `Invalid workspaces configuration: Missing workspace mapping strategy. Either workspace "tags" or "name" is required.`, + }, + "workspace: name present": { + config: cty.ObjectVal(map[string]cty.Value{ + "organization": cty.StringVal("org"), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "tags": cty.NullVal(cty.Set(cty.String)), + "project": cty.NullVal(cty.String), + }), + }), + expectedErr: `Invalid workspaces configuration: Only one of workspace "tags" or "name" is allowed.`, + }, + "workspace: name and tags present": { + config: cty.ObjectVal(map[string]cty.Value{ + "organization": cty.StringVal("org"), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "tags": cty.SetVal( + []cty.Value{ + cty.StringVal("billing"), + }, + ), + "project": cty.NullVal(cty.String), + }), + }), + expectedErr: `Invalid workspaces configuration: Only one of workspace "tags" or "name" is allowed.`, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + s := testServer(t) + b := New(testDisco(s), encryption.StateEncryptionDisabled()) + + // Validate + _, valDiags := b.PrepareConfig(tc.config) + if valDiags.Err() != nil && tc.expectedErr != "" { + actualErr := valDiags.Err().Error() + if !strings.Contains(actualErr, tc.expectedErr) { + t.Fatalf("%s: unexpected validation result: %v", name, valDiags.Err()) + } + } + }) + } +} + +func TestCloud_PrepareConfigWithEnvVars(t *testing.T) { + cases := map[string]struct { + config cty.Value + vars map[string]string + expectedErr string + }{ + "with no organization": { + config: cty.ObjectVal(map[string]cty.Value{ + "organization": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "tags": cty.NullVal(cty.Set(cty.String)), + "project": cty.NullVal(cty.String), + }), + }), + vars: map[string]string{ + "TF_CLOUD_ORGANIZATION": "example-org", + }, + }, + "with no organization attribute or env var": { + config: cty.ObjectVal(map[string]cty.Value{ + "organization": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "tags": cty.NullVal(cty.Set(cty.String)), + "project": cty.NullVal(cty.String), + }), + }), + vars: map[string]string{}, + expectedErr: `Invalid or missing required argument: "organization" must be set in the cloud configuration or as an environment variable: TF_CLOUD_ORGANIZATION.`, + }, + "null workspace": { + config: cty.ObjectVal(map[string]cty.Value{ + "organization": cty.StringVal("hashicorp"), + "workspaces": cty.NullVal(cty.String), + }), + vars: map[string]string{ + "TF_WORKSPACE": "my-workspace", + }, + }, + "organization and workspace and project env var": { + config: cty.ObjectVal(map[string]cty.Value{ + "organization": cty.NullVal(cty.String), + "workspaces": cty.NullVal(cty.String), + }), + vars: map[string]string{ + "TF_CLOUD_ORGANIZATION": "hashicorp", + "TF_WORKSPACE": "my-workspace", + "TF_CLOUD_PROJECT": "example-project", + }, + }, + "with no project": { + config: cty.ObjectVal(map[string]cty.Value{ + "organization": cty.StringVal("organization"), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "tags": cty.NullVal(cty.Set(cty.String)), + "project": cty.NullVal(cty.String), + }), + }), + }, + "with null project": { + config: cty.ObjectVal(map[string]cty.Value{ + "organization": cty.StringVal("organization"), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "tags": cty.NullVal(cty.Set(cty.String)), + "project": cty.NullVal(cty.String), + }), + }), + vars: map[string]string{ + "TF_CLOUD_PROJECT": "example-project", + }, + }, + "with project env var overwrite config value": { + config: cty.ObjectVal(map[string]cty.Value{ + "organization": cty.StringVal("organization"), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "tags": cty.NullVal(cty.Set(cty.String)), + "project": cty.StringVal("project-name"), + }), + }), + vars: map[string]string{ + "TF_CLOUD_PROJECT": "example-project", + }, + }, + "with workspace defined by tags overwritten by TF_WORKSPACE": { + // see https://github.com/kubegems/opentofu/issues/814 for context + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.StringVal("foo"), + "organization": cty.StringVal("bar"), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + "project": cty.NullVal(cty.String), + "tags": cty.SetVal([]cty.Value{cty.StringVal("baz"), cty.StringVal("qux")}), + }), + }), + vars: map[string]string{ + "TF_WORKSPACE": "qux", + }, + }, + "with TF_WORKSPACE value outside of the tags set": { + // see https://github.com/kubegems/opentofu/issues/814 for context + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.StringVal("foo"), + "organization": cty.StringVal("bar"), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + "project": cty.NullVal(cty.String), + "tags": cty.SetVal([]cty.Value{cty.StringVal("baz"), cty.StringVal("qux")}), + }), + }), + vars: map[string]string{ + "TF_WORKSPACE": "quxx", + }, + expectedErr: `Invalid workspaces configuration: The workspace defined using the environment variable "TF_WORKSPACE" does not belong to "tags".`, + }, + "with workspace block w/o attributes, TF_WORKSPACE defined": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.StringVal("foo"), + "organization": cty.StringVal("bar"), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + "tags": cty.NullVal(cty.Set(cty.String)), + "project": cty.NullVal(cty.String), + }), + }), + vars: map[string]string{ + "TF_WORKSPACE": "qux", + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + s := testServer(t) + b := New(testDisco(s), encryption.StateEncryptionDisabled()) + + for k, v := range tc.vars { + t.Setenv(k, v) + } + + _, valDiags := b.PrepareConfig(tc.config) + if (valDiags.Err() == nil) != (tc.expectedErr == "") { + t.Fatalf("%s: unexpected validation result: %v", name, valDiags.Err()) + } + if valDiags.Err() != nil { + if !strings.Contains(valDiags.Err().Error(), tc.expectedErr) { + t.Fatalf("%s: unexpected validation result: %v", name, valDiags.Err()) + } + } + }) + } +} + +func TestCloud_config(t *testing.T) { + cases := map[string]struct { + config cty.Value + confErr string + valErr string + envVars map[string]string + }{ + "with_a_non_tfe_host": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.StringVal("nontfe.local"), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "tags": cty.NullVal(cty.Set(cty.String)), + "project": cty.NullVal(cty.String), + }), + }), + confErr: "Host nontfe.local does not provide a tfe service", + }, + // localhost advertises TFE services, but has no token in the credentials + "without_a_token": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.StringVal("localhost"), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "tags": cty.NullVal(cty.Set(cty.String)), + "project": cty.NullVal(cty.String), + }), + }), + confErr: "tofu login localhost", + }, + "with_tags": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + "tags": cty.SetVal( + []cty.Value{ + cty.StringVal("billing"), + }, + ), + "project": cty.NullVal(cty.String), + }), + }), + }, + "with_a_name": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "tags": cty.NullVal(cty.Set(cty.String)), + "project": cty.NullVal(cty.String), + }), + }), + }, + "without_a_name_tags": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + "tags": cty.NullVal(cty.Set(cty.String)), + "project": cty.NullVal(cty.String), + }), + }), + valErr: `Missing workspace mapping strategy.`, + }, + "with_both_a_name_and_tags": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "tags": cty.SetVal( + []cty.Value{ + cty.StringVal("billing"), + }, + ), + "project": cty.NullVal(cty.String), + }), + }), + valErr: `Only one of workspace "tags" or "name" is allowed.`, + }, + "null config": { + config: cty.NullVal(cty.EmptyObject), + }, + "with_tags_and_TF_WORKSPACE_env_var_not_matching_tags": { //TODO: once we have proper e2e backend testing we should also add the opposite test - with_tags_and_TF_WORKSPACE_env_var_matching_tags + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "organization": cty.StringVal("opentofu"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "tags": cty.SetVal( + []cty.Value{ + cty.StringVal("billing"), + }, + ), + "project": cty.NullVal(cty.String), + }), + }), + envVars: map[string]string{ + "TF_WORKSPACE": "my-workspace", + }, + confErr: `OpenTofu failed to find workspace my-workspace with the tags specified in your configuration`, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + for k, v := range tc.envVars { + t.Setenv(k, v) + } + + b, cleanup := testUnconfiguredBackend(t) + t.Cleanup(cleanup) + + // Validate + _, valDiags := b.PrepareConfig(tc.config) + if (valDiags.Err() != nil || tc.valErr != "") && + (valDiags.Err() == nil || !strings.Contains(valDiags.Err().Error(), tc.valErr)) { + t.Fatalf("unexpected validation result: %v", valDiags.Err()) + } + + // Configure + confDiags := b.Configure(tc.config) + if (confDiags.Err() != nil || tc.confErr != "") && + (confDiags.Err() == nil || !strings.Contains(confDiags.Err().Error(), tc.confErr)) { + t.Fatalf("unexpected configure result: %v", confDiags.Err()) + } + }) + } +} + +func TestCloud_configVerifyMinimumTFEVersion(t *testing.T) { + config := cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.StringVal(tfeHost), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + "tags": cty.SetVal( + []cty.Value{ + cty.StringVal("billing"), + }, + ), + "project": cty.NullVal(cty.String), + }), + }) + + handlers := map[string]func(http.ResponseWriter, *http.Request){ + "/api/v2/ping": func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Header().Set("TFP-API-Version", "2.4") + }, + } + s := testServerWithHandlers(handlers) + + b := New(testDisco(s), encryption.StateEncryptionDisabled()) + + confDiags := b.Configure(config) + if confDiags.Err() == nil { + t.Fatalf("expected configure to error") + } + + expected := `The 'cloud' option is not supported with this version of the cloud backend.` + if !strings.Contains(confDiags.Err().Error(), expected) { + t.Fatalf("expected configure to error with %q, got %q", expected, confDiags.Err().Error()) + } +} + +func TestCloud_configVerifyMinimumTFEVersionInAutomation(t *testing.T) { + config := cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.StringVal(tfeHost), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + "tags": cty.SetVal( + []cty.Value{ + cty.StringVal("billing"), + }, + ), + "project": cty.NullVal(cty.String), + }), + }) + + handlers := map[string]func(http.ResponseWriter, *http.Request){ + "/api/v2/ping": func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Header().Set("TFP-API-Version", "2.4") + }, + } + s := testServerWithHandlers(handlers) + + b := New(testDisco(s), encryption.StateEncryptionDisabled()) + b.runningInAutomation = true + + confDiags := b.Configure(config) + if confDiags.Err() == nil { + t.Fatalf("expected configure to error") + } + + expected := `This version of cloud backend does not support the state mechanism +attempting to be used by the platform. This should never happen.` + if !strings.Contains(confDiags.Err().Error(), expected) { + t.Fatalf("expected configure to error with %q, got %q", expected, confDiags.Err().Error()) + } +} + +func TestCloud_setUnavailableTerraformVersion(t *testing.T) { + // go-tfe returns an error IRL if you try to set a Terraform version that's + // not available in your TFC instance. To test this, tfe_client_mock errors if + // you try to set any Terraform version for this specific workspace name. + workspaceName := "unavailable-terraform-version" + + config := cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.StringVal(tfeHost), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + "tags": cty.SetVal( + []cty.Value{ + cty.StringVal("sometag"), + }, + ), + "project": cty.NullVal(cty.String), + }), + }) + + b, _, bCleanup := testBackend(t, config, nil) + defer bCleanup() + + // Make sure the workspace doesn't exist yet -- otherwise, we can't test what + // happens when a workspace gets created. This is why we can't use "name" in + // the backend config above, btw: if you do, testBackend() creates the default + // workspace before we get a chance to do anything. + _, err := b.client.Workspaces.Read(context.Background(), b.organization, workspaceName) + if err != tfe.ErrResourceNotFound { + t.Fatalf("the workspace we were about to try and create (%s/%s) already exists in the mocks somehow, so this test isn't trustworthy anymore", b.organization, workspaceName) + } + + _, err = b.StateMgr(workspaceName) + if err != nil { + t.Fatalf("expected no error from StateMgr, despite not being able to set remote TF version: %#v", err) + } + // Make sure the workspace was created: + workspace, err := b.client.Workspaces.Read(context.Background(), b.organization, workspaceName) + if err != nil { + t.Fatalf("b.StateMgr() didn't actually create the desired workspace") + } + // Make sure our mocks still error as expected, using the same update function b.StateMgr() would call: + _, err = b.client.Workspaces.UpdateByID( + context.Background(), + workspace.ID, + tfe.WorkspaceUpdateOptions{TerraformVersion: tfe.String("1.1.0")}, + ) + if err == nil { + t.Fatalf("the mocks aren't emulating a nonexistent remote TF version correctly, so this test isn't trustworthy anymore") + } +} + +func TestCloud_setConfigurationFieldsHappyPath(t *testing.T) { + cases := map[string]struct { + obj cty.Value + envVars map[string]string + expectedHostname string + expectedOrganization string + expectedWorkspaceName string + expectedProjectName string + expectedWorkspaceTags map[string]struct{} + expectedForceLocal bool + }{ + "with hostname, organization and tags set": { + obj: cty.ObjectVal(map[string]cty.Value{ + "organization": cty.StringVal("opentofu"), + "hostname": cty.StringVal("opentofu.org"), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + "tags": cty.SetVal([]cty.Value{cty.StringVal("foo"), cty.StringVal("bar")}), + "project": cty.NullVal(cty.String), + }), + }), + expectedHostname: "opentofu.org", + expectedOrganization: "opentofu", + expectedWorkspaceTags: map[string]struct{}{"foo": {}, "bar": {}}, + }, + "with hostname and workspace name set": { + obj: cty.ObjectVal(map[string]cty.Value{ + "organization": cty.NullVal(cty.String), + "hostname": cty.StringVal("opentofu.org"), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "tags": cty.NullVal(cty.Set(cty.String)), + "project": cty.NullVal(cty.String), + }), + }), + expectedHostname: "opentofu.org", + expectedWorkspaceName: "prod", + }, + "with hostname and project name set": { + obj: cty.ObjectVal(map[string]cty.Value{ + "organization": cty.NullVal(cty.String), + "hostname": cty.StringVal("opentofu.org"), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + "tags": cty.NullVal(cty.Set(cty.String)), + "project": cty.StringVal("my-project"), + }), + }), + expectedHostname: "opentofu.org", + expectedProjectName: "my-project", + }, + "with hostname and force local set (env var)": { + obj: cty.ObjectVal(map[string]cty.Value{ + "organization": cty.NullVal(cty.String), + "hostname": cty.StringVal("opentofu.org"), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + "tags": cty.NullVal(cty.Set(cty.String)), + "project": cty.NullVal(cty.String), + }), + }), + expectedHostname: "opentofu.org", + envVars: map[string]string{ + "TF_FORCE_LOCAL_BACKEND": "1", + }, + expectedForceLocal: true, + }, + "with hostname and workspace tags set, then tags should not be overwritten by TF_WORKSPACE": { + // see: https://github.com/kubegems/opentofu/issues/814 + obj: cty.ObjectVal(map[string]cty.Value{ + "organization": cty.NullVal(cty.String), + "hostname": cty.StringVal("opentofu.org"), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + "tags": cty.SetVal([]cty.Value{cty.StringVal("foo"), cty.StringVal("bar")}), + "project": cty.NullVal(cty.String), + }), + }), + envVars: map[string]string{ + "TF_WORKSPACE": "foo", + }, + expectedHostname: "opentofu.org", + expectedWorkspaceName: "", + expectedWorkspaceTags: map[string]struct{}{"foo": {}, "bar": {}}, + }, + "with hostname and workspace name set, and workspace name the same as provided TF_WORKSPACE": { + obj: cty.ObjectVal(map[string]cty.Value{ + "organization": cty.NullVal(cty.String), + "hostname": cty.StringVal("opentofu.org"), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("my-workspace"), + "tags": cty.NullVal(cty.Set(cty.String)), + "project": cty.NullVal(cty.String), + }), + }), + envVars: map[string]string{ + "TF_WORKSPACE": "my-workspace", + }, + expectedHostname: "opentofu.org", + expectedWorkspaceName: "my-workspace", + }, + "with hostname and project set, and project overwritten by TF_CLOUD_PROJECT": { + obj: cty.ObjectVal(map[string]cty.Value{ + "organization": cty.NullVal(cty.String), + "hostname": cty.StringVal("opentofu.org"), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + "tags": cty.NullVal(cty.Set(cty.String)), + "project": cty.StringVal("old"), + }), + }), + envVars: map[string]string{ + "TF_CLOUD_PROJECT": "new", + }, + expectedHostname: "opentofu.org", + expectedProjectName: "old", + }, + "with hostname set, and project specified by TF_CLOUD_PROJECT": { + obj: cty.ObjectVal(map[string]cty.Value{ + "organization": cty.NullVal(cty.String), + "hostname": cty.StringVal("opentofu.org"), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + "tags": cty.NullVal(cty.Set(cty.String)), + "project": cty.NullVal(cty.String), + }), + }), + envVars: map[string]string{ + "TF_CLOUD_PROJECT": "new", + }, + expectedHostname: "opentofu.org", + expectedProjectName: "new", + }, + "with hostname set, and organization specified by TF_CLOUD_ORGANIZATION": { + obj: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.StringVal("opentofu.org"), + "token": cty.NullVal(cty.String), + "organization": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + "tags": cty.NullVal(cty.Set(cty.String)), + "project": cty.NullVal(cty.String), + }), + }), + envVars: map[string]string{ + "TF_CLOUD_ORGANIZATION": "my-org", + }, + expectedHostname: "opentofu.org", + expectedOrganization: "my-org", + }, + "with hostname set, and TF_CLOUD_HOSTNAME defined": { + obj: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.StringVal("opentofu.org"), + "token": cty.NullVal(cty.String), + "organization": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + "tags": cty.NullVal(cty.Set(cty.String)), + "project": cty.NullVal(cty.String), + }), + }), + envVars: map[string]string{ + "TF_CLOUD_HOSTNAME": "new", + }, + expectedHostname: "opentofu.org", + }, + "with hostname specified by TF_CLOUD_HOSTNAME": { + obj: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "token": cty.NullVal(cty.String), + "organization": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + "tags": cty.NullVal(cty.Set(cty.String)), + "project": cty.NullVal(cty.String), + }), + }), + envVars: map[string]string{ + "TF_CLOUD_HOSTNAME": "new", + }, + expectedHostname: "new", + }, + "with nothing set, all configured using env vars": { + obj: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "organization": cty.NullVal(cty.String), + "workspaces": cty.NullVal(cty.String), + }), + envVars: map[string]string{ + "TF_CLOUD_HOSTNAME": "opentofu.org", + "TF_CLOUD_ORGANIZATION": "opentofu", + "TF_WORKSPACE": "foo", + "TF_CLOUD_PROJECT": "bar", + }, + expectedHostname: "opentofu.org", + expectedOrganization: "opentofu", + expectedWorkspaceName: "foo", + expectedProjectName: "bar", + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + for k, v := range tc.envVars { + t.Setenv(k, v) + } + + b := &Cloud{} + errDiags := b.setConfigurationFields(tc.obj) + + if errDiags.HasErrors() { + t.Fatalf("%s: unexpected validation result: %v", name, errDiags.Err()) + } + if b.hostname != tc.expectedHostname { + t.Fatalf("%s: expected hostname %s to match configured hostname %s", name, b.hostname, tc.expectedHostname) + } + if b.organization != tc.expectedOrganization { + t.Fatalf("%s: expected organization (%s) to match configured organization (%s)", name, b.organization, tc.expectedOrganization) + } + if b.WorkspaceMapping.Name != tc.expectedWorkspaceName { + t.Fatalf("%s: expected workspace name mapping (%s) to match configured workspace name (%s)", name, b.WorkspaceMapping.Name, tc.expectedWorkspaceName) + } + if b.forceLocal != tc.expectedForceLocal { + t.Fatalf("%s: expected force local backend to be set to %v", name, tc.expectedForceLocal) + } + if b.WorkspaceMapping.Project != tc.expectedProjectName { + t.Fatalf("%s: expected project name mapping (%s) to match configured project name (%s)", name, b.WorkspaceMapping.Project, tc.expectedProjectName) + } + + // read map of configured tags + gotTags := map[string]struct{}{} + for _, v := range b.WorkspaceMapping.Tags { + gotTags[v] = struct{}{} + } + + if len(gotTags) != len(tc.expectedWorkspaceTags) { + t.Fatalf("%s: unordered workspace tags (%v) don't match configuration (%v)", name, gotTags, tc.expectedWorkspaceTags) + } + + for k := range tc.expectedWorkspaceTags { + if _, ok := gotTags[k]; !ok { + t.Fatalf("%s: unordered workspace tags (%v) don't match configuration (%v)", name, gotTags, tc.expectedWorkspaceTags) + } + } + }) + } +} + +func TestCloud_setConfigurationFieldsUnhappyPath(t *testing.T) { + cases := map[string]struct { + obj cty.Value + envVars map[string]string + wantSummary string + wantDetail string + }{ + "cloud block is not configured": { + obj: cty.ObjectVal(map[string]cty.Value{ + "organization": cty.NullVal(cty.String), + "hostname": cty.NullVal(cty.String), + "workspaces": cty.NullVal(cty.String), + }), + wantSummary: "Hostname is required for the cloud backend", + wantDetail: `OpenTofu does not provide a default "hostname" attribute, so it must be set to the hostname of the cloud backend.`, + }, + "with hostname and workspace name set, and workspace name is not the same as provided TF_WORKSPACE": { + obj: cty.ObjectVal(map[string]cty.Value{ + "organization": cty.NullVal(cty.String), + "hostname": cty.StringVal("opentofu.org"), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("my-workspace"), + "tags": cty.NullVal(cty.Set(cty.String)), + "project": cty.NullVal(cty.String), + }), + }), + envVars: map[string]string{ + "TF_WORKSPACE": "qux", + }, + wantSummary: invalidWorkspaceConfigInconsistentNameAndEnvVar().Description().Summary, + wantDetail: invalidWorkspaceConfigInconsistentNameAndEnvVar().Description().Detail, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + for k, v := range tc.envVars { + t.Setenv(k, v) + } + + b := &Cloud{} + errDiags := b.setConfigurationFields(tc.obj) + if (tc.wantDetail != "" || tc.wantSummary != "") != errDiags.HasErrors() { + t.Fatalf("%s error expected", name) + } + + gotSummary := errDiags[0].Description().Summary + if gotSummary != tc.wantSummary { + t.Fatalf("%s diagnostic summary mismatch, want: %s, got: %s", name, tc.wantSummary, gotSummary) + } + + gotDetail := errDiags[0].Description().Detail + if gotDetail != tc.wantDetail { + t.Fatalf("%s diagnostic details mismatch, want: %s, got: %s", name, tc.wantDetail, gotDetail) + } + }) + } +} + +func TestCloud_localBackend(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + local, ok := b.local.(*backendLocal.Local) + if !ok { + t.Fatalf("expected b.local to be \"*local.Local\", got: %T", b.local) + } + + cloud, ok := local.Backend.(*Cloud) + if !ok { + t.Fatalf("expected local.Backend to be *cloud.Cloud, got: %T", cloud) + } +} + +func TestCloud_addAndRemoveWorkspacesDefault(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + if _, err := b.StateMgr(testBackendSingleWorkspaceName); err != nil { + t.Fatalf("expected no error, got %v", err) + } + + if err := b.DeleteWorkspace(testBackendSingleWorkspaceName, true); err != backend.ErrWorkspacesNotSupported { + t.Fatalf("expected error %v, got %v", backend.ErrWorkspacesNotSupported, err) + } +} + +func TestCloud_StateMgr_versionCheck(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + // Some fixed versions for testing with. This logic is a simple string + // comparison, so we don't need many test cases. + v0135 := version.Must(version.NewSemver("0.13.5")) + v0140 := version.Must(version.NewSemver("0.14.0")) + + // Save original local version state and restore afterwards + p := tfversion.Prerelease + v := tfversion.Version + s := tfversion.SemVer + defer func() { + tfversion.Prerelease = p + tfversion.Version = v + tfversion.SemVer = s + }() + + // For this test, the local Terraform version is set to 0.14.0 + tfversion.Prerelease = "" + tfversion.Version = v0140.String() + tfversion.SemVer = v0140 + + // Update the mock remote workspace Terraform version to match the local + // Terraform version + if _, err := b.client.Workspaces.Update( + context.Background(), + b.organization, + b.WorkspaceMapping.Name, + tfe.WorkspaceUpdateOptions{ + TerraformVersion: tfe.String(v0140.String()), + }, + ); err != nil { + t.Fatalf("error: %v", err) + } + + // This should succeed + if _, err := b.StateMgr(testBackendSingleWorkspaceName); err != nil { + t.Fatalf("expected no error, got %v", err) + } + + // Now change the remote workspace to a different Terraform version + if _, err := b.client.Workspaces.Update( + context.Background(), + b.organization, + b.WorkspaceMapping.Name, + tfe.WorkspaceUpdateOptions{ + TerraformVersion: tfe.String(v0135.String()), + }, + ); err != nil { + t.Fatalf("error: %v", err) + } + + // This should fail + want := `Remote workspace TF version "0.13.5" does not match local OpenTofu version "0.14.0"` + if _, err := b.StateMgr(testBackendSingleWorkspaceName); err.Error() != want { + t.Fatalf("wrong error\n got: %v\nwant: %v", err.Error(), want) + } +} + +func TestCloud_StateMgr_versionCheckLatest(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + v0140 := version.Must(version.NewSemver("0.14.0")) + + // Save original local version state and restore afterwards + p := tfversion.Prerelease + v := tfversion.Version + s := tfversion.SemVer + defer func() { + tfversion.Prerelease = p + tfversion.Version = v + tfversion.SemVer = s + }() + + // For this test, the local Terraform version is set to 0.14.0 + tfversion.Prerelease = "" + tfversion.Version = v0140.String() + tfversion.SemVer = v0140 + + // Update the remote workspace to the pseudo-version "latest" + if _, err := b.client.Workspaces.Update( + context.Background(), + b.organization, + b.WorkspaceMapping.Name, + tfe.WorkspaceUpdateOptions{ + TerraformVersion: tfe.String("latest"), + }, + ); err != nil { + t.Fatalf("error: %v", err) + } + + // This should succeed despite not being a string match + if _, err := b.StateMgr(testBackendSingleWorkspaceName); err != nil { + t.Fatalf("expected no error, got %v", err) + } +} + +func TestCloud_VerifyWorkspaceTerraformVersion(t *testing.T) { + testCases := []struct { + local string + remote string + executionMode string + wantErr bool + }{ + {"0.13.5", "0.13.5", "agent", false}, + {"0.14.0", "0.13.5", "remote", true}, + {"0.14.0", "0.13.5", "local", false}, + {"0.14.0", "0.14.1", "remote", false}, + {"0.14.0", "1.0.99", "remote", false}, + {"0.14.0", "1.1.0", "remote", false}, + {"0.14.0", "1.3.0", "remote", true}, + {"1.2.0", "1.2.99", "remote", false}, + {"1.2.0", "1.3.0", "remote", true}, + {"0.15.0", "latest", "remote", false}, + {"1.1.5", "~> 1.1.1", "remote", false}, + {"1.1.5", "> 1.1.0, < 1.3.0", "remote", false}, + {"1.1.5", "~> 1.0.1", "remote", true}, + // pre-release versions are comparable within their pre-release stage (dev, + // alpha, beta), but not comparable to different stages and not comparable + // to final releases. + {"1.1.0-beta1", "1.1.0-beta1", "remote", false}, + {"1.1.0-beta1", "~> 1.1.0-beta", "remote", false}, + {"1.1.0", "~> 1.1.0-beta", "remote", true}, + {"1.1.0-beta1", "~> 1.1.0-dev", "remote", true}, + } + for _, tc := range testCases { + t.Run(fmt.Sprintf("local %s, remote %s", tc.local, tc.remote), func(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + local := version.Must(version.NewSemver(tc.local)) + + // Save original local version state and restore afterwards + p := tfversion.Prerelease + v := tfversion.Version + s := tfversion.SemVer + defer func() { + tfversion.Prerelease = p + tfversion.Version = v + tfversion.SemVer = s + }() + + // Override local version as specified + tfversion.Prerelease = "" + tfversion.Version = local.String() + tfversion.SemVer = local + + // Update the mock remote workspace Terraform version to the + // specified remote version + if _, err := b.client.Workspaces.Update( + context.Background(), + b.organization, + b.WorkspaceMapping.Name, + tfe.WorkspaceUpdateOptions{ + ExecutionMode: &tc.executionMode, + TerraformVersion: tfe.String(tc.remote), + }, + ); err != nil { + t.Fatalf("error: %v", err) + } + + diags := b.VerifyWorkspaceTerraformVersion(backend.DefaultStateName) + if tc.wantErr { + if len(diags) != 1 { + t.Fatal("expected diag, but none returned") + } + if got := diags.Err().Error(); !strings.Contains(got, "Incompatible TF version") { + t.Fatalf("unexpected error: %s", got) + } + } else { + if len(diags) != 0 { + t.Fatalf("unexpected diags: %s", diags.Err()) + } + } + }) + } +} + +func TestCloud_VerifyWorkspaceTerraformVersion_workspaceErrors(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + // Attempting to check the version against a workspace which doesn't exist + // should result in no errors + diags := b.VerifyWorkspaceTerraformVersion("invalid-workspace") + if len(diags) != 0 { + t.Fatalf("unexpected error: %s", diags.Err()) + } + + // Use a special workspace ID to trigger a 500 error, which should result + // in a failed check + diags = b.VerifyWorkspaceTerraformVersion("network-error") + if len(diags) != 1 { + t.Fatal("expected diag, but none returned") + } + if got := diags.Err().Error(); !strings.Contains(got, "Error looking up workspace: Workspace read failed") { + t.Fatalf("unexpected error: %s", got) + } + + // Update the mock remote workspace Terraform version to an invalid version + if _, err := b.client.Workspaces.Update( + context.Background(), + b.organization, + b.WorkspaceMapping.Name, + tfe.WorkspaceUpdateOptions{ + TerraformVersion: tfe.String("1.0.cheetarah"), + }, + ); err != nil { + t.Fatalf("error: %v", err) + } + diags = b.VerifyWorkspaceTerraformVersion(backend.DefaultStateName) + + if len(diags) != 1 { + t.Fatal("expected diag, but none returned") + } + if got := diags.Err().Error(); !strings.Contains(got, "Incompatible TF version: The remote workspace specified") { + t.Fatalf("unexpected error: %s", got) + } +} + +func TestCloud_VerifyWorkspaceTerraformVersion_ignoreFlagSet(t *testing.T) { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + // If the ignore flag is set, the behaviour changes + b.IgnoreVersionConflict() + + // Different local & remote versions to cause an error + local := version.Must(version.NewSemver("0.14.0")) + remote := version.Must(version.NewSemver("0.13.5")) + + // Save original local version state and restore afterwards + p := tfversion.Prerelease + v := tfversion.Version + s := tfversion.SemVer + defer func() { + tfversion.Prerelease = p + tfversion.Version = v + tfversion.SemVer = s + }() + + // Override local version as specified + tfversion.Prerelease = "" + tfversion.Version = local.String() + tfversion.SemVer = local + + // Update the mock remote workspace Terraform version to the + // specified remote version + if _, err := b.client.Workspaces.Update( + context.Background(), + b.organization, + b.WorkspaceMapping.Name, + tfe.WorkspaceUpdateOptions{ + TerraformVersion: tfe.String(remote.String()), + }, + ); err != nil { + t.Fatalf("error: %v", err) + } + + diags := b.VerifyWorkspaceTerraformVersion(backend.DefaultStateName) + if len(diags) != 1 { + t.Fatal("expected diag, but none returned") + } + + if got, want := diags[0].Severity(), tfdiags.Warning; got != want { + t.Errorf("wrong severity: got %#v, want %#v", got, want) + } + if got, want := diags[0].Description().Summary, "Incompatible TF version"; got != want { + t.Errorf("wrong summary: got %s, want %s", got, want) + } + wantDetail := "The local OpenTofu version (0.14.0) does not meet the version requirements for remote workspace hashicorp/app-prod (0.13.5)." + if got := diags[0].Description().Detail; got != wantDetail { + t.Errorf("wrong summary: got %s, want %s", got, wantDetail) + } +} + +func TestCloudBackend_DeleteWorkspace_SafeAndForce(t *testing.T) { + b, bCleanup := testBackendWithTags(t) + defer bCleanup() + safeDeleteWorkspaceName := "safe-delete-workspace" + forceDeleteWorkspaceName := "force-delete-workspace" + + _, err := b.StateMgr(safeDeleteWorkspaceName) + if err != nil { + t.Fatalf("error: %s", err) + } + + _, err = b.StateMgr(forceDeleteWorkspaceName) + if err != nil { + t.Fatalf("error: %s", err) + } + + // sanity check that the mock now contains two workspaces + wl, err := b.Workspaces() + if err != nil { + t.Fatalf("error fetching workspace names: %v", err) + } + if len(wl) != 2 { + t.Fatalf("expected 2 workspaced but got %d", len(wl)) + } + + c := context.Background() + safeDeleteWorkspace, err := b.client.Workspaces.Read(c, b.organization, safeDeleteWorkspaceName) + if err != nil { + t.Fatalf("error fetching workspace: %v", err) + } + + // Lock a workspace so that it should fail to be safe deleted + _, err = b.client.Workspaces.Lock(context.Background(), safeDeleteWorkspace.ID, tfe.WorkspaceLockOptions{Reason: tfe.String("test")}) + if err != nil { + t.Fatalf("error locking workspace: %v", err) + } + err = b.DeleteWorkspace(safeDeleteWorkspaceName, false) + if err == nil { + t.Fatalf("workspace should have failed to safe delete") + } + + // unlock the workspace and confirm that safe-delete now works + _, err = b.client.Workspaces.Unlock(context.Background(), safeDeleteWorkspace.ID) + if err != nil { + t.Fatalf("error unlocking workspace: %v", err) + } + err = b.DeleteWorkspace(safeDeleteWorkspaceName, false) + if err != nil { + t.Fatalf("error safe deleting workspace: %v", err) + } + + // lock a workspace and then confirm that force deleting it works + forceDeleteWorkspace, err := b.client.Workspaces.Read(c, b.organization, forceDeleteWorkspaceName) + if err != nil { + t.Fatalf("error fetching workspace: %v", err) + } + _, err = b.client.Workspaces.Lock(context.Background(), forceDeleteWorkspace.ID, tfe.WorkspaceLockOptions{Reason: tfe.String("test")}) + if err != nil { + t.Fatalf("error locking workspace: %v", err) + } + err = b.DeleteWorkspace(forceDeleteWorkspaceName, true) + if err != nil { + t.Fatalf("error force deleting workspace: %v", err) + } +} + +func TestCloudBackend_DeleteWorkspace_DoesNotExist(t *testing.T) { + b, bCleanup := testBackendWithTags(t) + defer bCleanup() + + err := b.DeleteWorkspace("non-existent-workspace", false) + if err != nil { + t.Fatalf("expected deleting a workspace which does not exist to succeed") + } +} + +func TestCloud_ServiceDiscoveryAliases(t *testing.T) { + s := testServer(t) + b := New(testDisco(s), encryption.StateEncryptionDisabled()) + + diag := b.Configure(cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.StringVal(tfeHost), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "tags": cty.NullVal(cty.Set(cty.String)), + "project": cty.NullVal(cty.String), + }), + })) + if diag.HasErrors() { + t.Fatalf("expected no diagnostic errors, got %s", diag.Err()) + } + + aliases, err := b.ServiceDiscoveryAliases() + if err != nil { + t.Fatalf("expected no errors, got %s", err) + } + if len(aliases) != 1 { + t.Fatalf("expected 1 alias but got %d", len(aliases)) + } +} diff --git a/pkg/cloud/cloud_integration.go b/pkg/cloud/cloud_integration.go new file mode 100644 index 00000000000..c4ce24e5f8b --- /dev/null +++ b/pkg/cloud/cloud_integration.go @@ -0,0 +1,117 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cloud + +import ( + "context" + "fmt" + "strconv" + "time" + + "github.com/hashicorp/go-tfe" + "github.com/kubegems/opentofu/pkg/backend" + "github.com/mitchellh/cli" +) + +// IntegrationOutputWriter is an interface used to to write output tailored for +// Terraform Cloud integrations +type IntegrationOutputWriter interface { + End() + OutputElapsed(message string, maxMessage int) + Output(str string) + SubOutput(str string) +} + +// IntegrationContext is a set of data that is useful when performing Terraform Cloud integration operations +type IntegrationContext struct { + B *Cloud + StopContext context.Context + CancelContext context.Context + Op *backend.Operation + Run *tfe.Run +} + +// integrationCLIOutput implements IntegrationOutputWriter +type integrationCLIOutput struct { + CLI cli.Ui + Colorizer Colorer + started time.Time +} + +var _ IntegrationOutputWriter = (*integrationCLIOutput)(nil) // Compile time check + +func (s *IntegrationContext) Poll(backoffMinInterval float64, backoffMaxInterval float64, every func(i int) (bool, error)) error { + for i := 0; ; i++ { + select { + case <-s.StopContext.Done(): + return s.StopContext.Err() + case <-s.CancelContext.Done(): + return s.CancelContext.Err() + case <-time.After(backoff(backoffMinInterval, backoffMaxInterval, i)): + // blocks for a time between min and max + } + + cont, err := every(i) + if !cont { + return err + } + } +} + +// BeginOutput writes a preamble to the CLI and creates a new IntegrationOutputWriter interface +// to write the remaining CLI output to. Use IntegrationOutputWriter.End() to complete integration +// output +func (s *IntegrationContext) BeginOutput(name string) IntegrationOutputWriter { + var result IntegrationOutputWriter = &integrationCLIOutput{ + CLI: s.B.CLI, + Colorizer: s.B.Colorize(), + started: time.Now(), + } + + result.Output("\n[bold]" + name + ":\n") + + return result +} + +// End writes the termination output for the integration +func (s *integrationCLIOutput) End() { + if s.CLI == nil { + return + } + + s.CLI.Output("\n------------------------------------------------------------------------\n") +} + +// Output writes a string after colorizing it using any [colorstrings](https://github.com/mitchellh/colorstring) it contains +func (s *integrationCLIOutput) Output(str string) { + if s.CLI == nil { + return + } + s.CLI.Output(s.Colorizer.Color(str)) +} + +// SubOutput writes a string prefixed by a "│ " after colorizing it using any [colorstrings](https://github.com/mitchellh/colorstring) it contains +func (s *integrationCLIOutput) SubOutput(str string) { + if s.CLI == nil { + return + } + s.CLI.Output(s.Colorizer.Color(fmt.Sprintf("[reset]│ %s", str))) +} + +// OutputElapsed writes a string followed by the amount of time that has elapsed since calling BeginOutput. +// Example pending output; the variable spacing (50 chars) allows up to 99 tasks (two digits) in each category: +// --------------- +// 13 tasks still pending, 0 passed, 0 failed ... +// 13 tasks still pending, 0 passed, 0 failed ... (8s elapsed) +// 13 tasks still pending, 0 passed, 0 failed ... (19s elapsed) +// 13 tasks still pending, 0 passed, 0 failed ... (33s elapsed) +func (s *integrationCLIOutput) OutputElapsed(message string, maxMessage int) { + if s.CLI == nil { + return + } + elapsed := time.Since(s.started).Truncate(1 * time.Second) + s.CLI.Output(fmt.Sprintf("%-"+strconv.FormatInt(int64(maxMessage), 10)+"s", message) + s.Colorizer.Color(fmt.Sprintf("[dim](%s elapsed)", elapsed))) +} diff --git a/pkg/cloud/cloud_variables.go b/pkg/cloud/cloud_variables.go new file mode 100644 index 00000000000..ad341160010 --- /dev/null +++ b/pkg/cloud/cloud_variables.go @@ -0,0 +1,47 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cloud + +import ( + "github.com/hashicorp/hcl/v2/hclwrite" + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" +) + +func allowedSourceType(source tofu.ValueSourceType) bool { + return source == tofu.ValueFromNamedFile || source == tofu.ValueFromCLIArg || source == tofu.ValueFromEnvVar +} + +// ParseCloudRunVariables accepts a mapping of unparsed values and a mapping of variable +// declarations and returns a name/value variable map appropriate for an API run context, +// that is, containing variables only sourced from non-file inputs like CLI args +// and environment variables. However, all variable parsing diagnostics are returned +// in order to allow callers to short circuit cloud runs that contain variable +// declaration or parsing errors. The only exception is that missing required values are not +// considered errors because they may be defined within the cloud workspace. +func ParseCloudRunVariables(vv map[string]backend.UnparsedVariableValue, decls map[string]*configs.Variable) (map[string]string, tfdiags.Diagnostics) { + declared, diags := backend.ParseDeclaredVariableValues(vv, decls) + _, undedeclaredDiags := backend.ParseUndeclaredVariableValues(vv, decls) + diags = diags.Append(undedeclaredDiags) + + ret := make(map[string]string, len(declared)) + + // Even if there are parsing or declaration errors, populate the return map with the + // variables that could be used for cloud runs + for name, v := range declared { + if !allowedSourceType(v.SourceType) { + continue + } + + // RunVariables are always expressed as HCL strings + tokens := hclwrite.TokensForValue(v.Value) + ret[name] = string(tokens.Bytes()) + } + + return ret, diags +} diff --git a/pkg/cloud/cloud_variables_test.go b/pkg/cloud/cloud_variables_test.go new file mode 100644 index 00000000000..c683314bf33 --- /dev/null +++ b/pkg/cloud/cloud_variables_test.go @@ -0,0 +1,188 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cloud + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/hcl/v2" + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" + "github.com/zclconf/go-cty/cty" +) + +func TestParseCloudRunVariables(t *testing.T) { + t.Run("populates variables from allowed sources", func(t *testing.T) { + vv := map[string]backend.UnparsedVariableValue{ + "undeclared": testUnparsedVariableValue{source: tofu.ValueFromCLIArg, value: cty.StringVal("0")}, + "declaredFromConfig": testUnparsedVariableValue{source: tofu.ValueFromConfig, value: cty.StringVal("1")}, + "declaredFromNamedFileMapString": testUnparsedVariableValue{source: tofu.ValueFromNamedFile, value: cty.MapVal(map[string]cty.Value{"foo": cty.StringVal("bar")})}, + "declaredFromNamedFileBool": testUnparsedVariableValue{source: tofu.ValueFromNamedFile, value: cty.BoolVal(true)}, + "declaredFromNamedFileNumber": testUnparsedVariableValue{source: tofu.ValueFromNamedFile, value: cty.NumberIntVal(2)}, + "declaredFromNamedFileListString": testUnparsedVariableValue{source: tofu.ValueFromNamedFile, value: cty.ListVal([]cty.Value{cty.StringVal("2a"), cty.StringVal("2b")})}, + "declaredFromNamedFileNull": testUnparsedVariableValue{source: tofu.ValueFromNamedFile, value: cty.NullVal(cty.String)}, + "declaredFromNamedMapComplex": testUnparsedVariableValue{source: tofu.ValueFromNamedFile, value: cty.MapVal(map[string]cty.Value{"foo": cty.ObjectVal(map[string]cty.Value{"qux": cty.ListVal([]cty.Value{cty.BoolVal(true), cty.BoolVal(false)})})})}, + "declaredFromCLIArg": testUnparsedVariableValue{source: tofu.ValueFromCLIArg, value: cty.StringVal("3")}, + "declaredFromEnvVar": testUnparsedVariableValue{source: tofu.ValueFromEnvVar, value: cty.StringVal("4")}, + } + + decls := map[string]*configs.Variable{ + "declaredFromConfig": { + Name: "declaredFromConfig", + Type: cty.String, + ConstraintType: cty.String, + ParsingMode: configs.VariableParseLiteral, + DeclRange: hcl.Range{ + Filename: "fake.tf", + Start: hcl.Pos{Line: 2, Column: 1, Byte: 0}, + End: hcl.Pos{Line: 2, Column: 1, Byte: 0}, + }, + }, + "declaredFromNamedFileMapString": { + Name: "declaredFromNamedFileMapString", + Type: cty.Map(cty.String), + ConstraintType: cty.Map(cty.String), + ParsingMode: configs.VariableParseHCL, + DeclRange: hcl.Range{ + Filename: "fake.tf", + Start: hcl.Pos{Line: 2, Column: 1, Byte: 0}, + End: hcl.Pos{Line: 2, Column: 1, Byte: 0}, + }, + }, + "declaredFromNamedFileBool": { + Name: "declaredFromNamedFileBool", + Type: cty.Bool, + ConstraintType: cty.Bool, + ParsingMode: configs.VariableParseLiteral, + DeclRange: hcl.Range{ + Filename: "fake.tf", + Start: hcl.Pos{Line: 2, Column: 1, Byte: 0}, + End: hcl.Pos{Line: 2, Column: 1, Byte: 0}, + }, + }, + "declaredFromNamedFileNumber": { + Name: "declaredFromNamedFileNumber", + Type: cty.Number, + ConstraintType: cty.Number, + ParsingMode: configs.VariableParseLiteral, + DeclRange: hcl.Range{ + Filename: "fake.tf", + Start: hcl.Pos{Line: 2, Column: 1, Byte: 0}, + End: hcl.Pos{Line: 2, Column: 1, Byte: 0}, + }, + }, + "declaredFromNamedFileListString": { + Name: "declaredFromNamedFileListString", + Type: cty.List(cty.String), + ConstraintType: cty.List(cty.String), + ParsingMode: configs.VariableParseHCL, + DeclRange: hcl.Range{ + Filename: "fake.tf", + Start: hcl.Pos{Line: 2, Column: 1, Byte: 0}, + End: hcl.Pos{Line: 2, Column: 1, Byte: 0}, + }, + }, + "declaredFromNamedFileNull": { + Name: "declaredFromNamedFileNull", + Type: cty.String, + ConstraintType: cty.String, + ParsingMode: configs.VariableParseHCL, + DeclRange: hcl.Range{ + Filename: "fake.tf", + Start: hcl.Pos{Line: 2, Column: 1, Byte: 0}, + End: hcl.Pos{Line: 2, Column: 1, Byte: 0}, + }, + }, + "declaredFromNamedMapComplex": { + Name: "declaredFromNamedMapComplex", + Type: cty.DynamicPseudoType, + ConstraintType: cty.DynamicPseudoType, + ParsingMode: configs.VariableParseHCL, + DeclRange: hcl.Range{ + Filename: "fake.tf", + Start: hcl.Pos{Line: 2, Column: 1, Byte: 0}, + End: hcl.Pos{Line: 2, Column: 1, Byte: 0}, + }, + }, + "declaredFromCLIArg": { + Name: "declaredFromCLIArg", + Type: cty.String, + ConstraintType: cty.String, + ParsingMode: configs.VariableParseLiteral, + DeclRange: hcl.Range{ + Filename: "fake.tf", + Start: hcl.Pos{Line: 2, Column: 1, Byte: 0}, + End: hcl.Pos{Line: 2, Column: 1, Byte: 0}, + }, + }, + "declaredFromEnvVar": { + Name: "declaredFromEnvVar", + Type: cty.String, + ConstraintType: cty.String, + ParsingMode: configs.VariableParseLiteral, + DeclRange: hcl.Range{ + Filename: "fake.tf", + Start: hcl.Pos{Line: 2, Column: 1, Byte: 0}, + End: hcl.Pos{Line: 2, Column: 1, Byte: 0}, + }, + }, + "missing": { + Name: "missing", + Type: cty.String, + ConstraintType: cty.String, + Default: cty.StringVal("2"), + ParsingMode: configs.VariableParseLiteral, + DeclRange: hcl.Range{ + Filename: "fake.tf", + Start: hcl.Pos{Line: 2, Column: 1, Byte: 0}, + End: hcl.Pos{Line: 2, Column: 1, Byte: 0}, + }, + }, + } + wantVals := make(map[string]string) + wantVals["declaredFromNamedFileBool"] = "true" + wantVals["declaredFromNamedFileNumber"] = "2" + wantVals["declaredFromNamedFileListString"] = `["2a", "2b"]` + wantVals["declaredFromNamedFileNull"] = "null" + wantVals["declaredFromNamedFileMapString"] = "{\n foo = \"bar\"\n}" + wantVals["declaredFromNamedMapComplex"] = "{\n foo = {\n qux = [true, false]\n }\n}" + wantVals["declaredFromCLIArg"] = `"3"` + wantVals["declaredFromEnvVar"] = `"4"` + + gotVals, diags := ParseCloudRunVariables(vv, decls) + if diff := cmp.Diff(wantVals, gotVals, cmp.Comparer(cty.Value.RawEquals)); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + + if got, want := len(diags), 1; got != want { + t.Fatalf("expected 1 variable error: %v, got %v", diags.Err(), want) + } + + if got, want := diags[0].Description().Summary, "Value for undeclared variable"; got != want { + t.Errorf("wrong summary for diagnostic 0\ngot: %s\nwant: %s", got, want) + } + }) +} + +type testUnparsedVariableValue struct { + source tofu.ValueSourceType + value cty.Value +} + +func (v testUnparsedVariableValue) ParseVariableValue(mode configs.VariableParsingMode) (*tofu.InputValue, tfdiags.Diagnostics) { + return &tofu.InputValue{ + Value: v.value, + SourceType: v.source, + SourceRange: tfdiags.SourceRange{ + Filename: "fake.tfvars", + Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + End: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0}, + }, + }, nil +} diff --git a/pkg/cloud/cloudplan/remote_plan_json.go b/pkg/cloud/cloudplan/remote_plan_json.go new file mode 100644 index 00000000000..f0d9daa040c --- /dev/null +++ b/pkg/cloud/cloudplan/remote_plan_json.go @@ -0,0 +1,41 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cloudplan + +import ( + "github.com/kubegems/opentofu/pkg/plans" +) + +// RemotePlanJSON is a wrapper struct that associates a pre-baked JSON plan with +// several pieces of metadata that can't be derived directly from the JSON +// contents and must instead be discovered from a tfe.Run or tfe.Plan. The +// wrapper is useful for moving data between the Cloud backend (which is the +// only thing able to fetch the JSON and determine values for the metadata) and +// the command.ShowCommand and views.Show interface (which need to have all of +// this information together). +type RemotePlanJSON struct { + // The raw bytes of json we got from the API. + JSONBytes []byte + // Indicates whether the json bytes are the "redacted json plan" format, or + // the unredacted stable "external json plan" format. These formats are + // actually very different under the hood; the redacted one can be decoded + // directly into a jsonformat.Plan struct and is intended for formatting a + // plan for human consumption, while the unredacted one matches what is + // returned by the jsonplan.Marshal() function, cannot be directly decoded + // into a public type (it's actually a jsonplan.plan struct), and will + // generally be spat back out verbatim. + Redacted bool + // Normal/destroy/refresh. Required by (jsonformat.Renderer).RenderHumanPlan. + Mode plans.Mode + // Unchanged/errored. Required by (jsonformat.Renderer).RenderHumanPlan. + Qualities []plans.Quality + // A human-readable header with a link to view the associated run in the + // Terraform Cloud UI. + RunHeader string + // A human-readable footer with information relevant to the likely next + // actions for this plan. + RunFooter string +} diff --git a/pkg/cloud/cloudplan/saved_plan.go b/pkg/cloud/cloudplan/saved_plan.go new file mode 100644 index 00000000000..c8a50869d1d --- /dev/null +++ b/pkg/cloud/cloudplan/saved_plan.go @@ -0,0 +1,77 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package cloudplan + +import ( + "encoding/json" + "errors" + "io" + "os" + "strings" +) + +var ErrInvalidRemotePlanFormat = errors.New("invalid remote plan format, must be 1") +var ErrInvalidRunID = errors.New("invalid run ID") +var ErrInvalidHostname = errors.New("invalid hostname") + +type SavedPlanBookmark struct { + RemotePlanFormat int `json:"remote_plan_format"` + RunID string `json:"run_id"` + Hostname string `json:"hostname"` +} + +func NewSavedPlanBookmark(runID, hostname string) SavedPlanBookmark { + return SavedPlanBookmark{ + RemotePlanFormat: 1, + RunID: runID, + Hostname: hostname, + } +} + +func LoadSavedPlanBookmark(filepath string) (SavedPlanBookmark, error) { + bookmark := SavedPlanBookmark{} + + file, err := os.Open(filepath) + if err != nil { + return bookmark, err + } + defer file.Close() + + data, err := io.ReadAll(file) + if err != nil { + return bookmark, err + } + + err = json.Unmarshal(data, &bookmark) + if err != nil { + return bookmark, err + } + + // Note that these error cases are somewhat ambiguous, but they *likely* + // mean we're not looking at a saved plan bookmark at all. Since we're not + // certain about the format at this point, it doesn't quite make sense to + // emit a "known file type but bad" error struct the way we do over in the + // planfile and statefile packages. + if bookmark.RemotePlanFormat != 1 { + return bookmark, ErrInvalidRemotePlanFormat + } else if bookmark.Hostname == "" { + return bookmark, ErrInvalidHostname + } else if bookmark.RunID == "" || !strings.HasPrefix(bookmark.RunID, "run-") { + return bookmark, ErrInvalidRunID + } + + return bookmark, err +} + +func (s *SavedPlanBookmark) Save(filepath string) error { + data, _ := json.Marshal(s) + + err := os.WriteFile(filepath, data, 0644) + if err != nil { + return err + } + + return nil +} diff --git a/pkg/cloud/cloudplan/saved_plan_test.go b/pkg/cloud/cloudplan/saved_plan_test.go new file mode 100644 index 00000000000..10d510f6d8c --- /dev/null +++ b/pkg/cloud/cloudplan/saved_plan_test.go @@ -0,0 +1,101 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cloudplan + +import ( + "errors" + "os" + "path/filepath" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/zclconf/go-cty/cty" +) + +func TestCloud_loadBasic(t *testing.T) { + bookmark := SavedPlanBookmark{ + RemotePlanFormat: 1, + RunID: "run-GXfuHMkbyHccAGUg", + Hostname: "app.terraform.io", + } + + file := "./testdata/plan-bookmark/bookmark.json" + result, err := LoadSavedPlanBookmark(file) + if err != nil { + t.Fatal(err) + } + + if diff := cmp.Diff(bookmark, result, cmp.Comparer(cty.Value.RawEquals)); diff != "" { + t.Errorf("wrong result\n%s", diff) + } +} + +func TestCloud_loadCheckRunID(t *testing.T) { + // Run ID must never be empty + file := "./testdata/plan-bookmark/empty_run_id.json" + _, err := LoadSavedPlanBookmark(file) + if !errors.Is(err, ErrInvalidRunID) { + t.Fatalf("expected %s but got %s", ErrInvalidRunID, err) + } +} + +func TestCloud_loadCheckHostname(t *testing.T) { + // Hostname must never be empty + file := "./testdata/plan-bookmark/empty_hostname.json" + _, err := LoadSavedPlanBookmark(file) + if !errors.Is(err, ErrInvalidHostname) { + t.Fatalf("expected %s but got %s", ErrInvalidHostname, err) + } +} + +func TestCloud_loadCheckVersionNumberBasic(t *testing.T) { + // remote_plan_format must be set to 1 + // remote_plan_format and format version number are used interchangeably + file := "./testdata/plan-bookmark/invalid_version.json" + _, err := LoadSavedPlanBookmark(file) + if !errors.Is(err, ErrInvalidRemotePlanFormat) { + t.Fatalf("expected %s but got %s", ErrInvalidRemotePlanFormat, err) + } +} + +func TestCloud_saveWhenFileExistsBasic(t *testing.T) { + tmpDir := t.TempDir() + tmpFile, err := os.Create(filepath.Join(tmpDir, "saved-bookmark.json")) + if err != nil { + t.Fatal("File could not be created.", err) + } + defer tmpFile.Close() + + // verify the created path exists + // os.Stat() wants path to file + _, error := os.Stat(tmpFile.Name()) + if error != nil { + t.Fatal("Path to file does not exist.", error) + } else { + b := &SavedPlanBookmark{ + RemotePlanFormat: 1, + RunID: "run-GXfuHMkbyHccAGUg", + Hostname: "app.terraform.io", + } + err := b.Save(tmpFile.Name()) + if err != nil { + t.Fatal(err) + } + } +} + +func TestCloud_saveWhenFileDoesNotExistBasic(t *testing.T) { + tmpDir := t.TempDir() + b := &SavedPlanBookmark{ + RemotePlanFormat: 1, + RunID: "run-GXfuHMkbyHccAGUg", + Hostname: "app.terraform.io", + } + err := b.Save(filepath.Join(tmpDir, "create-new-file.txt")) + if err != nil { + t.Fatal(err) + } +} diff --git a/pkg/cloud/cloudplan/testdata/plan-bookmark/bookmark.json b/pkg/cloud/cloudplan/testdata/plan-bookmark/bookmark.json new file mode 100644 index 00000000000..0a1c73302a2 --- /dev/null +++ b/pkg/cloud/cloudplan/testdata/plan-bookmark/bookmark.json @@ -0,0 +1,5 @@ +{ + "remote_plan_format": 1, + "run_id": "run-GXfuHMkbyHccAGUg", + "hostname": "app.terraform.io" +} diff --git a/pkg/cloud/cloudplan/testdata/plan-bookmark/empty_hostname.json b/pkg/cloud/cloudplan/testdata/plan-bookmark/empty_hostname.json new file mode 100644 index 00000000000..990267294f7 --- /dev/null +++ b/pkg/cloud/cloudplan/testdata/plan-bookmark/empty_hostname.json @@ -0,0 +1,5 @@ +{ + "remote_plan_format": 1, + "run_id": "run-GXfuHMkbyHccAGUg", + "hostname": "" +} diff --git a/pkg/cloud/cloudplan/testdata/plan-bookmark/empty_run_id.json b/pkg/cloud/cloudplan/testdata/plan-bookmark/empty_run_id.json new file mode 100644 index 00000000000..712581aeaea --- /dev/null +++ b/pkg/cloud/cloudplan/testdata/plan-bookmark/empty_run_id.json @@ -0,0 +1,5 @@ +{ + "remote_plan_format": 1, + "run_id": "", + "hostname": "app.terraform.io" +} diff --git a/pkg/cloud/cloudplan/testdata/plan-bookmark/invalid_version.json b/pkg/cloud/cloudplan/testdata/plan-bookmark/invalid_version.json new file mode 100644 index 00000000000..59a89d92313 --- /dev/null +++ b/pkg/cloud/cloudplan/testdata/plan-bookmark/invalid_version.json @@ -0,0 +1,5 @@ +{ + "remote_plan_format": 11, + "run_id": "run-GXfuHMkbyHccAGUg", + "hostname": "app.terraform.io" +} diff --git a/pkg/cloud/configchangemode_string.go b/pkg/cloud/configchangemode_string.go new file mode 100644 index 00000000000..b60692be15f --- /dev/null +++ b/pkg/cloud/configchangemode_string.go @@ -0,0 +1,37 @@ +// Code generated by "stringer -type ConfigChangeMode"; DO NOT EDIT. + +package cloud + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ConfigMigrationIn-8600] + _ = x[ConfigMigrationOut-8598] + _ = x[ConfigChangeInPlace-8635] + _ = x[ConfigChangeIrrelevant-129335] +} + +const ( + _ConfigChangeMode_name_0 = "ConfigMigrationOut" + _ConfigChangeMode_name_1 = "ConfigMigrationIn" + _ConfigChangeMode_name_2 = "ConfigChangeInPlace" + _ConfigChangeMode_name_3 = "ConfigChangeIrrelevant" +) + +func (i ConfigChangeMode) String() string { + switch { + case i == 8598: + return _ConfigChangeMode_name_0 + case i == 8600: + return _ConfigChangeMode_name_1 + case i == 8635: + return _ConfigChangeMode_name_2 + case i == 129335: + return _ConfigChangeMode_name_3 + default: + return "ConfigChangeMode(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/pkg/cloud/e2e/README.md b/pkg/cloud/e2e/README.md new file mode 100644 index 00000000000..c29ec295a22 --- /dev/null +++ b/pkg/cloud/e2e/README.md @@ -0,0 +1,24 @@ +# How to run tests + +To run them, use: +``` +TFE_TOKEN= TFE_HOSTNAME= TF_ACC=1 go test ./internal/cloud/e2e/... -ldflags "-X \"github.com/kubegems/opentofu/version.Prerelease=\"" +``` + +Required flags +* `TF_ACC=1`. This variable is used as part of tofu for tests that make + external network calls. This is needed to run these tests. Without it, the + tests do not run. +* `TFE_TOKEN=` and `TFE_HOSTNAME=`. The helpers +for these tests require admin access to a TFC/TFE instance. +* `-timeout=30m`. Some of these tests take longer than the default 10m timeout for `go test`. + +### Flags + +* Use the `-v` flag for normal verbose mode. +* Use the `-tfoutput` flag to print the tofu output to standard out. +* Use `-ldflags` to change the version Prerelease to match a version +available remotely. Some behaviors rely on the exact local version OpenTofu +being available in TFC/TFE, and manipulating the Prerelease during build is +often the only way to ensure this. +[(More on `-ldflags`.)](https://www.digitalocean.com/community/tutorials/using-ldflags-to-set-version-information-for-go-applications) diff --git a/pkg/cloud/e2e/apply_auto_approve_test.go b/pkg/cloud/e2e/apply_auto_approve_test.go new file mode 100644 index 00000000000..93acbc10c73 --- /dev/null +++ b/pkg/cloud/e2e/apply_auto_approve_test.go @@ -0,0 +1,183 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "context" + "testing" + + tfe "github.com/hashicorp/go-tfe" + tfversion "github.com/kubegems/opentofu/version" +) + +func Test_terraform_apply_autoApprove(t *testing.T) { + t.Parallel() + skipIfMissingEnvVar(t) + skipWithoutRemoteTerraformVersion(t) + + ctx := context.Background() + + cases := testCases{ + "workspace manual apply, terraform apply without auto-approve, expect prompt": { + operations: []operationSets{ + { + prep: func(t *testing.T, orgName, dir string) { + wsName := "app" + _ = createWorkspace(t, orgName, tfe.WorkspaceCreateOptions{ + Name: tfe.String(wsName), + TerraformVersion: tfe.String(tfversion.String()), + AutoApply: tfe.Bool(false), + }) + tfBlock := terraformConfigCloudBackendName(orgName, wsName) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectedCmdOutput: `Terraform Cloud has been successfully initialized!`, + }, + { + command: []string{"apply"}, + expectedCmdOutput: `Do you want to perform these actions in workspace "app"?`, + userInput: []string{"yes"}, + postInputOutput: []string{`Apply complete!`}, + }, + }, + }, + }, + validations: func(t *testing.T, orgName string) { + workspace, err := tfeClient.Workspaces.ReadWithOptions(ctx, orgName, "app", &tfe.WorkspaceReadOptions{Include: []tfe.WSIncludeOpt{tfe.WSCurrentRun}}) + if err != nil { + t.Fatal(err) + } + if workspace.CurrentRun == nil { + t.Fatal("Expected workspace to have run, but got nil") + } + if workspace.CurrentRun.Status != tfe.RunApplied { + t.Fatalf("Expected run status to be `applied`, but is %s", workspace.CurrentRun.Status) + } + }, + }, + "workspace auto apply, terraform apply without auto-approve, expect prompt": { + operations: []operationSets{ + { + prep: func(t *testing.T, orgName, dir string) { + wsName := "app" + _ = createWorkspace(t, orgName, tfe.WorkspaceCreateOptions{ + Name: tfe.String(wsName), + TerraformVersion: tfe.String(tfversion.String()), + AutoApply: tfe.Bool(true), + }) + tfBlock := terraformConfigCloudBackendName(orgName, wsName) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectedCmdOutput: `Terraform Cloud has been successfully initialized!`, + }, + { + command: []string{"apply"}, + expectedCmdOutput: `Do you want to perform these actions in workspace "app"?`, + userInput: []string{"yes"}, + postInputOutput: []string{`Apply complete!`}, + }, + }, + }, + }, + validations: func(t *testing.T, orgName string) { + workspace, err := tfeClient.Workspaces.ReadWithOptions(ctx, orgName, "app", &tfe.WorkspaceReadOptions{Include: []tfe.WSIncludeOpt{tfe.WSCurrentRun}}) + if err != nil { + t.Fatal(err) + } + if workspace.CurrentRun == nil { + t.Fatal("Expected workspace to have run, but got nil") + } + if workspace.CurrentRun.Status != tfe.RunApplied { + t.Fatalf("Expected run status to be `applied`, but is %s", workspace.CurrentRun.Status) + } + }, + }, + "workspace manual apply, terraform apply with auto-approve, no prompt": { + operations: []operationSets{ + { + prep: func(t *testing.T, orgName, dir string) { + wsName := "app" + _ = createWorkspace(t, orgName, tfe.WorkspaceCreateOptions{ + Name: tfe.String(wsName), + TerraformVersion: tfe.String(tfversion.String()), + AutoApply: tfe.Bool(false), + }) + tfBlock := terraformConfigCloudBackendName(orgName, wsName) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectedCmdOutput: `Terraform Cloud has been successfully initialized!`, + }, + { + command: []string{"apply", "-auto-approve"}, + expectedCmdOutput: `Apply complete!`, + }, + }, + }, + }, + validations: func(t *testing.T, orgName string) { + workspace, err := tfeClient.Workspaces.ReadWithOptions(ctx, orgName, "app", &tfe.WorkspaceReadOptions{Include: []tfe.WSIncludeOpt{tfe.WSCurrentRun}}) + if err != nil { + t.Fatal(err) + } + if workspace.CurrentRun == nil { + t.Fatal("Expected workspace to have run, but got nil") + } + if workspace.CurrentRun.Status != tfe.RunApplied { + t.Fatalf("Expected run status to be `applied`, but is %s", workspace.CurrentRun.Status) + } + }, + }, + "workspace auto apply, terraform apply with auto-approve, no prompt": { + operations: []operationSets{ + { + prep: func(t *testing.T, orgName, dir string) { + wsName := "app" + _ = createWorkspace(t, orgName, tfe.WorkspaceCreateOptions{ + Name: tfe.String(wsName), + TerraformVersion: tfe.String(tfversion.String()), + AutoApply: tfe.Bool(true), + }) + tfBlock := terraformConfigCloudBackendName(orgName, wsName) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectedCmdOutput: `Terraform Cloud has been successfully initialized!`, + }, + { + command: []string{"apply", "-auto-approve"}, + expectedCmdOutput: `Apply complete!`, + }, + }, + }, + }, + validations: func(t *testing.T, orgName string) { + workspace, err := tfeClient.Workspaces.ReadWithOptions(ctx, orgName, "app", &tfe.WorkspaceReadOptions{Include: []tfe.WSIncludeOpt{tfe.WSCurrentRun}}) + if err != nil { + t.Fatal(err) + } + if workspace.CurrentRun == nil { + t.Fatal("Expected workspace to have run, but got nil") + } + if workspace.CurrentRun.Status != tfe.RunApplied { + t.Fatalf("Expected run status to be `applied`, but is %s", workspace.CurrentRun.Status) + } + }, + }, + } + + testRunner(t, cases, 1) +} diff --git a/pkg/cloud/e2e/apply_no_input_flag_test.go b/pkg/cloud/e2e/apply_no_input_flag_test.go new file mode 100644 index 00000000000..059df422335 --- /dev/null +++ b/pkg/cloud/e2e/apply_no_input_flag_test.go @@ -0,0 +1,63 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "testing" +) + +func Test_apply_no_input_flag(t *testing.T) { + t.Parallel() + skipIfMissingEnvVar(t) + + cases := testCases{ + "terraform apply with -input=false": { + operations: []operationSets{ + { + prep: func(t *testing.T, orgName, dir string) { + wsName := "new-workspace" + tfBlock := terraformConfigCloudBackendName(orgName, wsName) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init", "-input=false"}, + expectedCmdOutput: `Terraform Cloud has been successfully initialized`, + }, + { + command: []string{"apply", "-input=false"}, + expectedCmdOutput: `Cannot confirm apply due to -input=false. Please handle run confirmation in the UI.`, + expectError: true, + }, + }, + }, + }, + }, + "terraform apply with auto approve and -input=false": { + operations: []operationSets{ + { + prep: func(t *testing.T, orgName, dir string) { + wsName := "cloud-workspace" + tfBlock := terraformConfigCloudBackendName(orgName, wsName) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init", "-input=false"}, + expectedCmdOutput: `Terraform Cloud has been successfully initialized`, + }, + { + command: []string{"apply", "-auto-approve", "-input=false"}, + expectedCmdOutput: `Apply complete!`, + }, + }, + }, + }, + }, + } + + testRunner(t, cases, 1) +} diff --git a/pkg/cloud/e2e/backend_apply_before_init_test.go b/pkg/cloud/e2e/backend_apply_before_init_test.go new file mode 100644 index 00000000000..eb6b3e5ffa9 --- /dev/null +++ b/pkg/cloud/e2e/backend_apply_before_init_test.go @@ -0,0 +1,73 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "testing" +) + +func Test_backend_apply_before_init(t *testing.T) { + t.Parallel() + skipIfMissingEnvVar(t) + skipWithoutRemoteTerraformVersion(t) + + cases := testCases{ + "terraform apply with cloud block - blank state": { + operations: []operationSets{ + { + prep: func(t *testing.T, orgName, dir string) { + wsName := "new-workspace" + tfBlock := terraformConfigCloudBackendName(orgName, wsName) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"apply"}, + expectedCmdOutput: `Terraform Cloud initialization required: please run "terraform init"`, + expectError: true, + }, + }, + }, + }, + }, + "terraform apply with cloud block - local state": { + operations: []operationSets{ + { + prep: func(t *testing.T, orgName, dir string) { + tfBlock := terraformConfigLocalBackend() + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectedCmdOutput: `Successfully configured the backend "local"!`, + }, + { + command: []string{"apply", "-auto-approve"}, + postInputOutput: []string{`Apply complete!`}, + }, + }, + }, + { + prep: func(t *testing.T, orgName, dir string) { + wsName := "new-workspace" + tfBlock := terraformConfigCloudBackendName(orgName, wsName) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"apply"}, + expectedCmdOutput: `Terraform Cloud initialization required: please run "terraform init"`, + expectError: true, + }, + }, + }, + }, + }, + } + + testRunner(t, cases, 1) +} diff --git a/pkg/cloud/e2e/env_variables_test.go b/pkg/cloud/e2e/env_variables_test.go new file mode 100644 index 00000000000..e9e67a84771 --- /dev/null +++ b/pkg/cloud/e2e/env_variables_test.go @@ -0,0 +1,269 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "context" + "fmt" + "testing" + + "github.com/hashicorp/go-tfe" +) + +func Test_cloud_organization_env_var(t *testing.T) { + t.Parallel() + skipIfMissingEnvVar(t) + + ctx := context.Background() + org, cleanup := createOrganization(t) + t.Cleanup(cleanup) + + cases := testCases{ + "with TF_CLOUD_ORGANIZATION set": { + operations: []operationSets{ + { + prep: func(t *testing.T, orgName, dir string) { + remoteWorkspace := "cloud-workspace" + tfBlock := terraformConfigCloudBackendOmitOrg(remoteWorkspace) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectedCmdOutput: `Terraform Cloud has been successfully initialized!`, + }, + { + command: []string{"apply", "-auto-approve"}, + postInputOutput: []string{`Apply complete!`}, + }, + }, + }, + }, + validations: func(t *testing.T, orgName string) { + expectedName := "cloud-workspace" + ws, err := tfeClient.Workspaces.Read(ctx, org.Name, expectedName) + if err != nil { + t.Fatal(err) + } + if ws == nil { + t.Fatalf("Expected workspace %s to be present, but is not.", expectedName) + } + }, + }, + } + + testRunner(t, cases, 0, fmt.Sprintf("TF_CLOUD_ORGANIZATION=%s", org.Name)) +} + +func Test_cloud_workspace_name_env_var(t *testing.T) { + t.Parallel() + skipIfMissingEnvVar(t) + + org, orgCleanup := createOrganization(t) + t.Cleanup(orgCleanup) + + wk := createWorkspace(t, org.Name, tfe.WorkspaceCreateOptions{ + Name: tfe.String("cloud-workspace"), + }) + + validCases := testCases{ + "a workspace that exists": { + operations: []operationSets{ + { + prep: func(t *testing.T, orgName, dir string) { + tfBlock := terraformConfigCloudBackendOmitWorkspaces(org.Name) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectedCmdOutput: `Terraform Cloud has been successfully initialized!`, + }, + { + command: []string{"apply", "-auto-approve"}, + postInputOutput: []string{`Apply complete!`}, + }, + }, + }, + { + prep: func(t *testing.T, orgName, dir string) { + tfBlock := terraformConfigCloudBackendOmitWorkspaces(org.Name) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectedCmdOutput: `Terraform Cloud has been successfully initialized!`, + }, + { + command: []string{"workspace", "show"}, + expectedCmdOutput: wk.Name, + }, + }, + }, + }, + }, + } + + errCases := testCases{ + "a workspace that doesn't exist": { + operations: []operationSets{ + { + prep: func(t *testing.T, orgName, dir string) { + tfBlock := terraformConfigCloudBackendOmitWorkspaces(org.Name) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectError: true, + }, + }, + }, + }, + }, + } + + testRunner(t, validCases, 0, fmt.Sprintf(`TF_WORKSPACE=%s`, wk.Name)) + testRunner(t, errCases, 0, fmt.Sprintf(`TF_WORKSPACE=%s`, "the-fires-of-mt-doom")) +} + +func Test_cloud_workspace_tags_env_var(t *testing.T) { + t.Parallel() + skipIfMissingEnvVar(t) + + org, orgCleanup := createOrganization(t) + t.Cleanup(orgCleanup) + + wkValid := createWorkspace(t, org.Name, tfe.WorkspaceCreateOptions{ + Name: tfe.String("cloud-workspace"), + Tags: []*tfe.Tag{ + {Name: "cloud"}, + }, + }) + + // this will be a workspace that won't have a tag listed in our test configuration + wkInvalid := createWorkspace(t, org.Name, tfe.WorkspaceCreateOptions{ + Name: tfe.String("cloud-workspace-2"), + }) + + validCases := testCases{ + "a workspace with valid tag": { + operations: []operationSets{ + { + prep: func(t *testing.T, orgName, dir string) { + tfBlock := terraformConfigCloudBackendTags(org.Name, wkValid.TagNames[0]) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectedCmdOutput: `Terraform Cloud has been successfully initialized!`, + }, + { + command: []string{"apply", "-auto-approve"}, + postInputOutput: []string{`Apply complete!`}, + }, + }, + }, + { + prep: func(t *testing.T, orgName, dir string) { + tfBlock := terraformConfigCloudBackendTags(org.Name, wkValid.TagNames[0]) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectedCmdOutput: `Terraform Cloud has been successfully initialized!`, + }, + { + command: []string{"workspace", "show"}, + expectedCmdOutput: wkValid.Name, + }, + }, + }, + }, + }, + } + + errCases := testCases{ + "a workspace not specified by tags": { + operations: []operationSets{ + { + prep: func(t *testing.T, orgName, dir string) { + tfBlock := terraformConfigCloudBackendTags(org.Name, wkValid.TagNames[0]) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectError: true, + }, + }, + }, + }, + }, + } + + testRunner(t, validCases, 0, fmt.Sprintf(`TF_WORKSPACE=%s`, wkValid.Name)) + testRunner(t, errCases, 0, fmt.Sprintf(`TF_WORKSPACE=%s`, wkInvalid.Name)) +} + +func Test_cloud_null_config(t *testing.T) { + t.Parallel() + skipIfMissingEnvVar(t) + + org, cleanup := createOrganization(t) + t.Cleanup(cleanup) + + wk := createWorkspace(t, org.Name, tfe.WorkspaceCreateOptions{ + Name: tfe.String("cloud-workspace"), + }) + + cases := testCases{ + "with all env vars set": { + operations: []operationSets{ + { + prep: func(t *testing.T, orgName, dir string) { + tfBlock := terraformConfigCloudBackendOmitConfig() + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectedCmdOutput: `Terraform Cloud has been successfully initialized!`, + }, + { + command: []string{"apply", "-auto-approve"}, + postInputOutput: []string{`Apply complete!`}, + }, + }, + }, + { + prep: func(t *testing.T, orgName, dir string) { + tfBlock := terraformConfigCloudBackendOmitConfig() + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectedCmdOutput: `Terraform Cloud has been successfully initialized!`, + }, + { + command: []string{"workspace", "show"}, + expectedCmdOutput: wk.Name, + }, + }, + }, + }, + }, + } + + testRunner(t, cases, 1, + fmt.Sprintf(`TF_CLOUD_ORGANIZATION=%s`, org.Name), + fmt.Sprintf(`TF_CLOUD_HOSTNAME=%s`, tfeHostname), + fmt.Sprintf(`TF_WORKSPACE=%s`, wk.Name)) +} diff --git a/pkg/cloud/e2e/helper_test.go b/pkg/cloud/e2e/helper_test.go new file mode 100644 index 00000000000..2225418cfdc --- /dev/null +++ b/pkg/cloud/e2e/helper_test.go @@ -0,0 +1,306 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "context" + "fmt" + "os" + "testing" + "time" + + expect "github.com/Netflix/go-expect" + tfe "github.com/hashicorp/go-tfe" + "github.com/hashicorp/go-uuid" + goversion "github.com/hashicorp/go-version" + tfversion "github.com/kubegems/opentofu/version" +) + +const ( + // We need to give the console enough time to hear back. + // 1 minute was too short in some cases, so this gives it ample time. + expectConsoleTimeout = 3 * time.Minute +) + +type tfCommand struct { + command []string + expectedCmdOutput string + expectError bool + userInput []string + postInputOutput []string +} + +type operationSets struct { + commands []tfCommand + prep func(t *testing.T, orgName, dir string) +} + +type testCases map[string]struct { + operations []operationSets + validations func(t *testing.T, orgName string) +} + +func defaultOpts() []expect.ConsoleOpt { + opts := []expect.ConsoleOpt{ + expect.WithDefaultTimeout(expectConsoleTimeout), + } + if verboseMode { + opts = append(opts, expect.WithStdout(os.Stdout)) + } + return opts +} + +func createOrganization(t *testing.T) (*tfe.Organization, func()) { + ctx := context.Background() + org, err := tfeClient.Organizations.Create(ctx, tfe.OrganizationCreateOptions{ + Name: tfe.String("tst-" + randomString(t)), + Email: tfe.String(fmt.Sprintf("%s@tfe.local", randomString(t))), + CostEstimationEnabled: tfe.Bool(false), + }) + if err != nil { + t.Fatal(err) + } + + _, err = tfeClient.Admin.Organizations.Update(ctx, org.Name, tfe.AdminOrganizationUpdateOptions{ + AccessBetaTools: tfe.Bool(true), + }) + if err != nil { + t.Fatal(err) + } + + return org, func() { + if err := tfeClient.Organizations.Delete(ctx, org.Name); err != nil { + t.Errorf("Error destroying organization! WARNING: Dangling resources\n"+ + "may exist! The full error is shown below.\n\n"+ + "Organization: %s\nError: %s", org.Name, err) + } + } +} + +func createWorkspace(t *testing.T, orgName string, wOpts tfe.WorkspaceCreateOptions) *tfe.Workspace { + ctx := context.Background() + w, err := tfeClient.Workspaces.Create(ctx, orgName, wOpts) + if err != nil { + t.Fatal(err) + } + + return w +} + +func getWorkspace(workspaces []*tfe.Workspace, workspace string) (*tfe.Workspace, bool) { + for _, ws := range workspaces { + if ws.Name == workspace { + return ws, false + } + } + return nil, true +} + +func randomString(t *testing.T) string { + v, err := uuid.GenerateUUID() + if err != nil { + t.Fatal(err) + } + return v +} + +func terraformConfigLocalBackend() string { + return ` +terraform { + backend "local" { + } +} + +output "val" { + value = "${terraform.workspace}" +} +` +} + +func terraformConfigRemoteBackendName(org, name string) string { + return fmt.Sprintf(` +terraform { + backend "remote" { + hostname = "%s" + organization = "%s" + + workspaces { + name = "%s" + } + } +} + +output "val" { + value = "${terraform.workspace}" +} +`, tfeHostname, org, name) +} + +func terraformConfigRemoteBackendPrefix(org, prefix string) string { + return fmt.Sprintf(` +terraform { + backend "remote" { + hostname = "%s" + organization = "%s" + + workspaces { + prefix = "%s" + } + } +} + +output "val" { + value = "${terraform.workspace}" +} +`, tfeHostname, org, prefix) +} + +func terraformConfigCloudBackendTags(org, tag string) string { + return fmt.Sprintf(` +terraform { + cloud { + hostname = "%s" + organization = "%s" + + workspaces { + tags = ["%s"] + } + } +} + +output "tag_val" { + value = "%s" +} +`, tfeHostname, org, tag, tag) +} + +func terraformConfigCloudBackendName(org, name string) string { + return fmt.Sprintf(` +terraform { + cloud { + hostname = "%s" + organization = "%s" + + workspaces { + name = "%s" + } + } +} + +output "val" { + value = "${terraform.workspace}" +} +`, tfeHostname, org, name) +} + +func terraformConfigCloudBackendOmitOrg(workspaceName string) string { + return fmt.Sprintf(` +terraform { + cloud { + hostname = "%s" + + workspaces { + name = "%s" + } + } +} + +output "val" { + value = "${terraform.workspace}" +} +`, tfeHostname, workspaceName) +} + +func terraformConfigCloudBackendOmitWorkspaces(orgName string) string { + return fmt.Sprintf(` +terraform { + cloud { + hostname = "%s" + organization = "%s" + } +} + +output "val" { + value = "${terraform.workspace}" +} +`, tfeHostname, orgName) +} + +func terraformConfigCloudBackendOmitConfig() string { + return ` +terraform { + cloud {} +} + +output "val" { + value = "${terraform.workspace}" +} +` +} + +func writeMainTF(t *testing.T, block string, dir string) { + f, err := os.Create(fmt.Sprintf("%s/main.tf", dir)) + if err != nil { + t.Fatal(err) + } + _, err = f.WriteString(block) + if err != nil { + t.Fatal(err) + } + f.Close() +} + +// The e2e tests rely on the fact that the terraform version in TFC/E is able to +// run the `cloud` configuration block, which is available in 1.1 and will +// continue to be available in later versions. So this function checks that +// there is a version that is >= 1.1. +func skipWithoutRemoteTerraformVersion(t *testing.T) { + version := tfversion.Version + baseVersion, err := goversion.NewVersion(version) + if err != nil { + t.Fatalf(fmt.Sprintf("Error instantiating go-version for %s", version)) + } + opts := &tfe.AdminTerraformVersionsListOptions{ + ListOptions: tfe.ListOptions{ + PageNumber: 1, + PageSize: 100, + }, + } + hasVersion := false + +findTfVersion: + for { + // TODO: update go-tfe Read() to retrieve a tofu version by name. + // Currently you can only retrieve by ID. + tfVersionList, err := tfeClient.Admin.TerraformVersions.List(context.Background(), opts) + if err != nil { + t.Fatalf("Could not retrieve list of terraform versions: %v", err) + } + for _, item := range tfVersionList.Items { + availableVersion, err := goversion.NewVersion(item.Version) + if err != nil { + t.Logf("Error instantiating go-version for %s", item.Version) + continue + } + if availableVersion.Core().GreaterThanOrEqual(baseVersion.Core()) { + hasVersion = true + break findTfVersion + } + } + + // Exit the loop when we've seen all pages. + if tfVersionList.CurrentPage >= tfVersionList.TotalPages { + break + } + + // Update the page number to get the next page. + opts.PageNumber = tfVersionList.NextPage + } + + if !hasVersion { + t.Skipf("Skipping test because TFC/E does not have current Terraform version to test with (%s)", version) + } +} diff --git a/pkg/cloud/e2e/init_with_empty_tags_test.go b/pkg/cloud/e2e/init_with_empty_tags_test.go new file mode 100644 index 00000000000..44812d2c978 --- /dev/null +++ b/pkg/cloud/e2e/init_with_empty_tags_test.go @@ -0,0 +1,40 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "testing" +) + +func Test_init_with_empty_tags(t *testing.T) { + t.Parallel() + skipIfMissingEnvVar(t) + skipWithoutRemoteTerraformVersion(t) + + cases := testCases{ + "terraform init with cloud block - no tagged workspaces exist yet": { + operations: []operationSets{ + { + prep: func(t *testing.T, orgName, dir string) { + wsTag := "emptytag" + tfBlock := terraformConfigCloudBackendTags(orgName, wsTag) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectedCmdOutput: `There are no workspaces with the configured tags`, + userInput: []string{"emptytag-prod"}, + postInputOutput: []string{`Terraform Cloud has been successfully initialized!`}, + }, + }, + }, + }, + }, + } + + testRunner(t, cases, 1) +} diff --git a/pkg/cloud/e2e/main_test.go b/pkg/cloud/e2e/main_test.go new file mode 100644 index 00000000000..ff5f8cc46a2 --- /dev/null +++ b/pkg/cloud/e2e/main_test.go @@ -0,0 +1,256 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "flag" + "fmt" + "log" + "os" + "os/exec" + "path/filepath" + "strings" + "testing" + + expect "github.com/Netflix/go-expect" + tfe "github.com/hashicorp/go-tfe" + "github.com/kubegems/opentofu/pkg/e2e" + tfversion "github.com/kubegems/opentofu/version" +) + +var tofuBin string +var cliConfigFileEnv string + +var tfeClient *tfe.Client +var tfeHostname string +var tfeToken string +var verboseMode bool + +func TestMain(m *testing.M) { + teardown := setup() + code := m.Run() + teardown() + + os.Exit(code) +} + +func accTest() bool { + // TF_ACC is set when we want to run acceptance tests, meaning it relies on + // network access. + return os.Getenv("TF_ACC") != "" +} + +func hasHostname() bool { + return os.Getenv("TFE_HOSTNAME") != "" +} + +func hasToken() bool { + return os.Getenv("TFE_TOKEN") != "" +} + +func hasRequiredEnvVars() bool { + return accTest() && hasHostname() && hasToken() +} + +func skipIfMissingEnvVar(t *testing.T) { + if !hasRequiredEnvVars() { + t.Skip("Skipping test, required environment variables missing. Use `TF_ACC`, `TFE_HOSTNAME`, `TFE_TOKEN`") + } +} + +func setup() func() { + tfOutput := flag.Bool("tfoutput", false, "This flag produces the terraform output from tests.") + flag.Parse() + verboseMode = *tfOutput + + setTfeClient() + teardown := setupBinary() + + return func() { + teardown() + } +} +func testRunner(t *testing.T, cases testCases, orgCount int, tfEnvFlags ...string) { + for name, tc := range cases { + tc := tc // rebind tc into this lexical scope + t.Run(name, func(subtest *testing.T) { + subtest.Parallel() + + orgNames := []string{} + for i := 0; i < orgCount; i++ { + organization, cleanup := createOrganization(t) + t.Cleanup(cleanup) + orgNames = append(orgNames, organization.Name) + } + + exp, err := expect.NewConsole(defaultOpts()...) + if err != nil { + subtest.Fatal(err) + } + defer exp.Close() + + tmpDir := t.TempDir() + + tf := e2e.NewBinary(t, tofuBin, tmpDir) + tfEnvFlags = append(tfEnvFlags, "TF_LOG=INFO") + tfEnvFlags = append(tfEnvFlags, cliConfigFileEnv) + for _, env := range tfEnvFlags { + tf.AddEnv(env) + } + + var orgName string + for index, op := range tc.operations { + switch orgCount { + case 0: + orgName = "" + case 1: + orgName = orgNames[0] + default: + orgName = orgNames[index] + } + + op.prep(t, orgName, tf.WorkDir()) + for _, tfCmd := range op.commands { + cmd := tf.Cmd(tfCmd.command...) + cmd.Stdin = exp.Tty() + cmd.Stdout = exp.Tty() + cmd.Stderr = exp.Tty() + + err = cmd.Start() + if err != nil { + subtest.Fatal(err) + } + + if tfCmd.expectedCmdOutput != "" { + got, err := exp.ExpectString(tfCmd.expectedCmdOutput) + if err != nil { + subtest.Fatalf("error while waiting for output\nwant: %s\nerror: %s\noutput\n%s", tfCmd.expectedCmdOutput, err, got) + } + } + + lenInput := len(tfCmd.userInput) + lenInputOutput := len(tfCmd.postInputOutput) + if lenInput > 0 { + for i := 0; i < lenInput; i++ { + input := tfCmd.userInput[i] + exp.SendLine(input) + // use the index to find the corresponding + // output that matches the input. + if lenInputOutput-1 >= i { + output := tfCmd.postInputOutput[i] + _, err := exp.ExpectString(output) + if err != nil { + subtest.Fatal(err) + } + } + } + } + + err = cmd.Wait() + if err != nil && !tfCmd.expectError { + subtest.Fatal(err) + } + } + } + + if tc.validations != nil { + tc.validations(t, orgName) + } + }) + } +} + +func setTfeClient() { + tfeHostname = os.Getenv("TFE_HOSTNAME") + tfeToken = os.Getenv("TFE_TOKEN") + + cfg := &tfe.Config{ + Address: fmt.Sprintf("https://%s", tfeHostname), + Token: tfeToken, + } + + if tfeHostname != "" && tfeToken != "" { + // Create a new TFE client. + client, err := tfe.NewClient(cfg) + if err != nil { + fmt.Printf("Could not create new tfe client: %v\n", err) + os.Exit(1) + } + tfeClient = client + } +} + +func setupBinary() func() { + log.Println("Setting up terraform binary") + tmpTerraformBinaryDir, err := os.MkdirTemp("", "terraform-test") + if err != nil { + fmt.Printf("Could not create temp directory: %v\n", err) + os.Exit(1) + } + log.Println(tmpTerraformBinaryDir) + currentDir, err := os.Getwd() + defer os.Chdir(currentDir) + if err != nil { + fmt.Printf("Could not change directories: %v\n", err) + os.Exit(1) + } + // Getting top level dir + newDir := filepath.ToSlash(currentDir) + dirPaths := strings.Split(newDir, "/") + log.Println(currentDir) + topLevel := len(dirPaths) - 3 + topDir := strings.Join(dirPaths[0:topLevel], "/") + + if err := os.Chdir(topDir); err != nil { + fmt.Printf("Could not change directories: %v\n", err) + os.Exit(1) + } + + cmd := exec.Command( + "go", + "build", + "-o", tmpTerraformBinaryDir, + "-ldflags", fmt.Sprintf("-X \"github.com/kubegems/opentofu/version.Prerelease=%s\"", tfversion.Prerelease), + "./cmd/tofu", + ) + err = cmd.Run() + if err != nil { + fmt.Printf("Could not run exec command: %v\n", err) + os.Exit(1) + } + + credFile := fmt.Sprintf("%s/dev.tfrc", tmpTerraformBinaryDir) + writeCredRC(credFile) + + tofuBin = fmt.Sprintf("%s/terraform", tmpTerraformBinaryDir) + cliConfigFileEnv = fmt.Sprintf("TF_CLI_CONFIG_FILE=%s", credFile) + + return func() { + os.RemoveAll(tmpTerraformBinaryDir) + } +} + +func writeCredRC(file string) { + creds := credentialBlock() + f, err := os.Create(file) + if err != nil { + fmt.Printf("Could not create file: %v\n", err) + os.Exit(1) + } + _, err = f.WriteString(creds) + if err != nil { + fmt.Printf("Could not write credentials: %v\n", err) + os.Exit(1) + } + f.Close() +} + +func credentialBlock() string { + return fmt.Sprintf(` +credentials "%s" { + token = "%s" +}`, tfeHostname, tfeToken) +} diff --git a/pkg/cloud/e2e/migrate_state_multi_to_tfc_test.go b/pkg/cloud/e2e/migrate_state_multi_to_tfc_test.go new file mode 100644 index 00000000000..4744cf87329 --- /dev/null +++ b/pkg/cloud/e2e/migrate_state_multi_to_tfc_test.go @@ -0,0 +1,445 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "context" + "testing" + + tfe "github.com/hashicorp/go-tfe" + tfversion "github.com/kubegems/opentofu/version" +) + +func Test_migrate_multi_to_tfc_cloud_name_strategy(t *testing.T) { + t.Parallel() + skipIfMissingEnvVar(t) + skipWithoutRemoteTerraformVersion(t) + + ctx := context.Background() + + cases := testCases{ + "migrating multiple workspaces to cloud using name strategy; current workspace is 'default'": { + operations: []operationSets{ + { + prep: func(t *testing.T, orgName, dir string) { + tfBlock := terraformConfigLocalBackend() + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectedCmdOutput: `Successfully configured the backend "local"!`, + }, + { + command: []string{"apply", "-auto-approve"}, + postInputOutput: []string{`Apply complete!`}, + }, + { + command: []string{"workspace", "new", "prod"}, + expectedCmdOutput: `Created and switched to workspace "prod"!`, + }, + { + command: []string{"apply", "-auto-approve"}, + postInputOutput: []string{`Apply complete!`}, + }, + { + command: []string{"workspace", "select", "default"}, + expectedCmdOutput: `Switched to workspace "default".`, + }, + }, + }, + { + prep: func(t *testing.T, orgName, dir string) { + wsName := "new-workspace" + tfBlock := terraformConfigCloudBackendName(orgName, wsName) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectedCmdOutput: `Do you want to copy only your current workspace?`, + userInput: []string{"yes"}, + postInputOutput: []string{`Terraform Cloud has been successfully initialized!`}, + }, + { + command: []string{"workspace", "show"}, + expectedCmdOutput: `new-workspace`, // this comes from the `prep` function + }, + { + command: []string{"output"}, + expectedCmdOutput: `val = "default"`, // this was the output of the current workspace selected before migration + }, + }, + }, + }, + validations: func(t *testing.T, orgName string) { + wsList, err := tfeClient.Workspaces.List(ctx, orgName, nil) + if err != nil { + t.Fatal(err) + } + if len(wsList.Items) != 1 { + t.Fatalf("Expected the number of workspaces to be 1, but got %d", len(wsList.Items)) + } + ws := wsList.Items[0] + // this workspace name is what exists in the cloud backend configuration block + if ws.Name != "new-workspace" { + t.Fatalf("Expected workspace to be `new-workspace`, but is %s", ws.Name) + } + }, + }, + "migrating multiple workspaces to cloud using name strategy; current workspace is 'prod'": { + operations: []operationSets{ + { + prep: func(t *testing.T, orgName, dir string) { + tfBlock := terraformConfigLocalBackend() + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectedCmdOutput: `Successfully configured the backend "local"!`, + }, + { + command: []string{"apply", "-auto-approve"}, + postInputOutput: []string{`Apply complete!`}, + }, + { + command: []string{"workspace", "new", "prod"}, + expectedCmdOutput: `Created and switched to workspace "prod"!`, + }, + { + command: []string{"apply", "-auto-approve"}, + postInputOutput: []string{`Apply complete!`}, + }, + }, + }, + { + prep: func(t *testing.T, orgName, dir string) { + wsName := "new-workspace" + tfBlock := terraformConfigCloudBackendName(orgName, wsName) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectedCmdOutput: `Do you want to copy only your current workspace?`, + userInput: []string{"yes"}, + postInputOutput: []string{`Terraform Cloud has been successfully initialized!`}, + }, + { + command: []string{"workspace", "list"}, + expectedCmdOutput: `new-workspace`, // this comes from the `prep` function + }, + { + command: []string{"output"}, + expectedCmdOutput: `val = "prod"`, + }, + }, + }, + }, + validations: func(t *testing.T, orgName string) { + wsList, err := tfeClient.Workspaces.List(ctx, orgName, nil) + if err != nil { + t.Fatal(err) + } + ws := wsList.Items[0] + // this workspace name is what exists in the cloud backend configuration block + if ws.Name != "new-workspace" { + t.Fatalf("Expected workspace to be `new-workspace`, but is %s", ws.Name) + } + }, + }, + "migrating multiple workspaces to cloud using name strategy; 'default' workspace is empty": { + operations: []operationSets{ + { + prep: func(t *testing.T, orgName, dir string) { + tfBlock := terraformConfigLocalBackend() + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectedCmdOutput: `Successfully configured the backend "local"!`, + }, + { + command: []string{"workspace", "new", "workspace1"}, + expectedCmdOutput: `Created and switched to workspace "workspace1"!`, + }, + { + command: []string{"apply", "-auto-approve"}, + postInputOutput: []string{`Apply complete!`}, + }, + { + command: []string{"workspace", "new", "workspace2"}, + expectedCmdOutput: `Created and switched to workspace "workspace2"!`, + }, + { + command: []string{"apply", "-auto-approve"}, + postInputOutput: []string{`Apply complete!`}, + }, + }, + }, + { + prep: func(t *testing.T, orgName, dir string) { + wsName := "new-workspace" + tfBlock := terraformConfigCloudBackendName(orgName, wsName) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectedCmdOutput: `Do you want to copy only your current workspace?`, + userInput: []string{"yes"}, + postInputOutput: []string{`Terraform Cloud has been successfully initialized!`}, + }, + { + command: []string{"workspace", "select", "default"}, + expectError: true, + }, + { + command: []string{"output"}, + expectedCmdOutput: `val = "workspace2"`, // this was the output of the current workspace selected before migration + }, + }, + }, + }, + validations: func(t *testing.T, orgName string) { + wsList, err := tfeClient.Workspaces.List(ctx, orgName, nil) + if err != nil { + t.Fatal(err) + } + if len(wsList.Items) != 1 { + t.Fatalf("Expected the number of workspaces to be 1, but got %d", len(wsList.Items)) + } + ws := wsList.Items[0] + // this workspace name is what exists in the cloud backend configuration block + if ws.Name != "new-workspace" { + t.Fatalf("Expected workspace to be `new-workspace`, but is %s", ws.Name) + } + }, + }, + } + + testRunner(t, cases, 1) +} + +func Test_migrate_multi_to_tfc_cloud_tags_strategy(t *testing.T) { + t.Parallel() + skipIfMissingEnvVar(t) + skipWithoutRemoteTerraformVersion(t) + + ctx := context.Background() + + cases := map[string]struct { + operations []operationSets + validations func(t *testing.T, orgName string) + }{ + "migrating multiple workspaces to cloud using tags strategy; pattern is using prefix `app-*`": { + operations: []operationSets{ + { + prep: func(t *testing.T, orgName, dir string) { + tfBlock := terraformConfigLocalBackend() + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectedCmdOutput: `Successfully configured the backend "local"!`, + }, + { + command: []string{"apply", "-auto-approve"}, + postInputOutput: []string{`Apply complete!`}, + }, + { + command: []string{"workspace", "new", "prod"}, + expectedCmdOutput: `Created and switched to workspace "prod"!`, + }, + { + command: []string{"apply", "-auto-approve"}, + postInputOutput: []string{`Apply complete!`}, + }, + { + command: []string{"workspace", "select", "default"}, + expectedCmdOutput: `Switched to workspace "default".`, + }, + { + command: []string{"output"}, + expectedCmdOutput: `val = "default"`, + }, + { + command: []string{"workspace", "select", "prod"}, + expectedCmdOutput: `Switched to workspace "prod".`, + }, + { + command: []string{"output"}, + expectedCmdOutput: `val = "prod"`, + }, + }, + }, + { + prep: func(t *testing.T, orgName, dir string) { + tag := "app" + tfBlock := terraformConfigCloudBackendTags(orgName, tag) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectedCmdOutput: `Terraform Cloud requires all workspaces to be given an explicit name.`, + userInput: []string{"dev", "1", "app-*"}, + postInputOutput: []string{ + `Would you like to rename your workspaces?`, + "How would you like to rename your workspaces?", + "Terraform Cloud has been successfully initialized!"}, + }, + { + command: []string{"workspace", "select", "app-dev"}, + expectedCmdOutput: `Switched to workspace "app-dev".`, + }, + { + command: []string{"output"}, + expectedCmdOutput: `val = "default"`, + }, + { + command: []string{"workspace", "select", "app-prod"}, + expectedCmdOutput: `Switched to workspace "app-prod".`, + }, + { + command: []string{"output"}, + expectedCmdOutput: `val = "prod"`, + }, + }, + }, + }, + validations: func(t *testing.T, orgName string) { + wsList, err := tfeClient.Workspaces.List(ctx, orgName, &tfe.WorkspaceListOptions{ + Tags: "app", + }) + if err != nil { + t.Fatal(err) + } + if len(wsList.Items) != 2 { + t.Fatalf("Expected the number of workspaecs to be 2, but got %d", len(wsList.Items)) + } + expectedWorkspaceNames := []string{"app-prod", "app-dev"} + for _, ws := range wsList.Items { + hasName := false + for _, expectedNames := range expectedWorkspaceNames { + if expectedNames == ws.Name { + hasName = true + } + } + if !hasName { + t.Fatalf("Worksapce %s is not in the expected list of workspaces", ws.Name) + } + } + }, + }, + "migrating multiple workspaces to cloud using tags strategy; existing workspaces": { + operations: []operationSets{ + { + prep: func(t *testing.T, orgName, dir string) { + tfBlock := terraformConfigLocalBackend() + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectedCmdOutput: `Successfully configured the backend "local"!`, + }, + { + command: []string{"apply", "-auto-approve"}, + postInputOutput: []string{`Apply complete!`}, + }, + { + command: []string{"workspace", "new", "identity"}, + expectedCmdOutput: `Created and switched to workspace "identity"!`, + }, + { + command: []string{"apply", "-auto-approve"}, + postInputOutput: []string{`Apply complete!`}, + }, + { + command: []string{"workspace", "new", "billing"}, + expectedCmdOutput: `Created and switched to workspace "billing"!`, + }, + { + command: []string{"apply", "-auto-approve"}, + postInputOutput: []string{`Apply complete!`}, + }, + { + command: []string{"workspace", "select", "default"}, + expectedCmdOutput: `Switched to workspace "default".`, + }, + }, + }, + { + prep: func(t *testing.T, orgName, dir string) { + tag := "app" + _ = createWorkspace(t, orgName, tfe.WorkspaceCreateOptions{ + Name: tfe.String("identity"), + TerraformVersion: tfe.String(tfversion.String()), + }) + _ = createWorkspace(t, orgName, tfe.WorkspaceCreateOptions{ + Name: tfe.String("billing"), + TerraformVersion: tfe.String(tfversion.String()), + }) + tfBlock := terraformConfigCloudBackendTags(orgName, tag) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectedCmdOutput: `Terraform Cloud requires all workspaces to be given an explicit name.`, + userInput: []string{"dev", "1", "app-*"}, + postInputOutput: []string{ + `Would you like to rename your workspaces?`, + "How would you like to rename your workspaces?", + "Terraform Cloud has been successfully initialized!"}, + }, + { + command: []string{"workspace", "select", "app-billing"}, + expectedCmdOutput: `Switched to workspace "app-billing".`, + }, + { + command: []string{"workspace", "select", "app-identity"}, + expectedCmdOutput: `Switched to workspace "app-identity".`, + }, + { + command: []string{"workspace", "select", "app-dev"}, + expectedCmdOutput: `Switched to workspace "app-dev".`, + }, + }, + }, + }, + validations: func(t *testing.T, orgName string) { + wsList, err := tfeClient.Workspaces.List(ctx, orgName, &tfe.WorkspaceListOptions{ + Tags: "app", + }) + if err != nil { + t.Fatal(err) + } + if len(wsList.Items) != 3 { + t.Fatalf("Expected the number of workspaecs to be 3, but got %d", len(wsList.Items)) + } + expectedWorkspaceNames := []string{"app-billing", "app-dev", "app-identity"} + for _, ws := range wsList.Items { + hasName := false + for _, expectedNames := range expectedWorkspaceNames { + if expectedNames == ws.Name { + hasName = true + } + } + if !hasName { + t.Fatalf("Worksapce %s is not in the expected list of workspaces", ws.Name) + } + } + }, + }, + } + + testRunner(t, cases, 1) +} diff --git a/pkg/cloud/e2e/migrate_state_remote_backend_to_tfc_test.go b/pkg/cloud/e2e/migrate_state_remote_backend_to_tfc_test.go new file mode 100644 index 00000000000..219814a76f5 --- /dev/null +++ b/pkg/cloud/e2e/migrate_state_remote_backend_to_tfc_test.go @@ -0,0 +1,528 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "context" + "testing" + + tfe "github.com/hashicorp/go-tfe" +) + +func Test_migrate_remote_backend_single_org(t *testing.T) { + t.Parallel() + skipIfMissingEnvVar(t) + skipWithoutRemoteTerraformVersion(t) + + ctx := context.Background() + cases := testCases{ + "migrate remote backend name to tfc name": { + operations: []operationSets{ + { + prep: func(t *testing.T, orgName, dir string) { + remoteWorkspace := "remote-workspace" + tfBlock := terraformConfigRemoteBackendName(orgName, remoteWorkspace) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectedCmdOutput: `Successfully configured the backend "remote"!`, + }, + { + command: []string{"apply", "-auto-approve"}, + expectedCmdOutput: `Apply complete!`, + }, + }, + }, + { + prep: func(t *testing.T, orgName, dir string) { + wsName := "cloud-workspace" + tfBlock := terraformConfigCloudBackendName(orgName, wsName) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init", "-ignore-remote-version"}, + expectedCmdOutput: `Migrating from backend "remote" to Terraform Cloud.`, + userInput: []string{"yes", "yes"}, + postInputOutput: []string{ + `Should Terraform migrate your existing state?`, + `Terraform Cloud has been successfully initialized!`}, + }, + { + command: []string{"workspace", "show"}, + expectedCmdOutput: `cloud-workspace`, + }, + }, + }, + }, + validations: func(t *testing.T, orgName string) { + expectedName := "cloud-workspace" + ws, err := tfeClient.Workspaces.Read(ctx, orgName, expectedName) + if err != nil { + t.Fatal(err) + } + if ws == nil { + t.Fatalf("Expected workspace %s to be present, but is not.", expectedName) + } + }, + }, + "migrate remote backend name to tfc same name": { + operations: []operationSets{ + { + prep: func(t *testing.T, orgName, dir string) { + remoteWorkspace := "remote-workspace" + tfBlock := terraformConfigRemoteBackendName(orgName, remoteWorkspace) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectedCmdOutput: `Successfully configured the backend "remote"!`, + }, + { + command: []string{"apply", "-auto-approve"}, + postInputOutput: []string{`Apply complete!`}, + }, + }, + }, + { + prep: func(t *testing.T, orgName, dir string) { + wsName := "remote-workspace" + tfBlock := terraformConfigCloudBackendName(orgName, wsName) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init", "-ignore-remote-version"}, + expectedCmdOutput: `Migrating from backend "remote" to Terraform Cloud.`, + userInput: []string{"yes", "yes"}, + postInputOutput: []string{ + `Should Terraform migrate your existing state?`, + `Terraform Cloud has been successfully initialized!`}, + }, + { + command: []string{"workspace", "show"}, + expectedCmdOutput: `remote-workspace`, + }, + }, + }, + }, + validations: func(t *testing.T, orgName string) { + expectedName := "remote-workspace" + ws, err := tfeClient.Workspaces.Read(ctx, orgName, expectedName) + if err != nil { + t.Fatal(err) + } + if ws == nil { + t.Fatalf("Expected workspace %s to be present, but is not.", expectedName) + } + }, + }, + "migrate remote backend name to tfc tags": { + operations: []operationSets{ + { + prep: func(t *testing.T, orgName, dir string) { + remoteWorkspace := "remote-workspace" + tfBlock := terraformConfigRemoteBackendName(orgName, remoteWorkspace) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectedCmdOutput: `Successfully configured the backend "remote"!`, + }, + { + command: []string{"apply", "-auto-approve"}, + postInputOutput: []string{`Apply complete!`}, + }, + { + command: []string{"workspace", "show"}, + expectedCmdOutput: `default`, + }, + }, + }, + { + prep: func(t *testing.T, orgName, dir string) { + tag := "app" + tfBlock := terraformConfigCloudBackendTags(orgName, tag) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init", "-ignore-remote-version"}, + expectedCmdOutput: `Migrating from backend "remote" to Terraform Cloud.`, + userInput: []string{"yes", "cloud-workspace", "yes"}, + postInputOutput: []string{ + `Should Terraform migrate your existing state?`, + `Terraform Cloud requires all workspaces to be given an explicit name.`, + `Terraform Cloud has been successfully initialized!`}, + }, + { + command: []string{"workspace", "show"}, + expectedCmdOutput: `cloud-workspace`, + }, + }, + }, + }, + validations: func(t *testing.T, orgName string) { + wsList, err := tfeClient.Workspaces.List(ctx, orgName, &tfe.WorkspaceListOptions{ + Tags: "app", + }) + if err != nil { + t.Fatal(err) + } + if len(wsList.Items) != 1 { + t.Fatalf("Expected number of workspaces to be 1, but got %d", len(wsList.Items)) + } + ws := wsList.Items[0] + if ws.Name != "cloud-workspace" { + t.Fatalf("Expected workspace to be `cloud-workspace`, but is %s", ws.Name) + } + }, + }, + "migrate remote backend prefix to tfc name strategy single workspace": { + operations: []operationSets{ + { + prep: func(t *testing.T, orgName, dir string) { + _ = createWorkspace(t, orgName, tfe.WorkspaceCreateOptions{Name: tfe.String("app-one")}) + prefix := "app-" + tfBlock := terraformConfigRemoteBackendPrefix(orgName, prefix) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectedCmdOutput: `Terraform has been successfully initialized!`, + }, + { + command: []string{"apply", "-auto-approve"}, + postInputOutput: []string{`Apply complete!`}, + }, + }, + }, + { + prep: func(t *testing.T, orgName, dir string) { + wsName := "cloud-workspace" + tfBlock := terraformConfigCloudBackendName(orgName, wsName) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init", "-ignore-remote-version"}, + expectedCmdOutput: `Migrating from backend "remote" to Terraform Cloud.`, + userInput: []string{"yes", "yes"}, + postInputOutput: []string{ + `Should Terraform migrate your existing state?`, + `Terraform Cloud has been successfully initialized!`}, + }, + { + command: []string{"workspace", "show"}, + expectedCmdOutput: `cloud-workspace`, + }, + }, + }, + }, + validations: func(t *testing.T, orgName string) { + expectedName := "cloud-workspace" + ws, err := tfeClient.Workspaces.Read(ctx, orgName, expectedName) + if err != nil { + t.Fatal(err) + } + if ws == nil { + t.Fatalf("Expected workspace %s to be present, but is not.", expectedName) + } + }, + }, + "migrate remote backend prefix to tfc name strategy multi workspace": { + operations: []operationSets{ + { + prep: func(t *testing.T, orgName, dir string) { + _ = createWorkspace(t, orgName, tfe.WorkspaceCreateOptions{Name: tfe.String("app-one")}) + _ = createWorkspace(t, orgName, tfe.WorkspaceCreateOptions{Name: tfe.String("app-two")}) + prefix := "app-" + tfBlock := terraformConfigRemoteBackendPrefix(orgName, prefix) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectedCmdOutput: `The currently selected workspace (default) does not exist.`, + userInput: []string{"1"}, + postInputOutput: []string{`Terraform has been successfully initialized!`}, + }, + { + command: []string{"apply", "-auto-approve"}, + postInputOutput: []string{`Apply complete!`}, + }, + { + command: []string{"workspace", "list"}, + expectedCmdOutput: "* one", // app name retrieved via prefix + }, + { + command: []string{"workspace", "select", "two"}, + expectedCmdOutput: `Switched to workspace "two".`, // app name retrieved via prefix + }, + }, + }, + { + prep: func(t *testing.T, orgName, dir string) { + wsName := "cloud-workspace" + tfBlock := terraformConfigCloudBackendName(orgName, wsName) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init", "-ignore-remote-version"}, + expectedCmdOutput: `Do you want to copy only your current workspace?`, + userInput: []string{"yes"}, + postInputOutput: []string{ + `Terraform Cloud has been successfully initialized!`}, + }, + { + command: []string{"workspace", "show"}, + expectedCmdOutput: `cloud-workspace`, + }, + }, + }, + }, + validations: func(t *testing.T, orgName string) { + expectedName := "cloud-workspace" + ws, err := tfeClient.Workspaces.Read(ctx, orgName, expectedName) + if err != nil { + t.Fatal(err) + } + if ws == nil { + t.Fatalf("Expected workspace %s to be present, but is not.", expectedName) + } + wsList, err := tfeClient.Workspaces.List(ctx, orgName, nil) + if err != nil { + t.Fatal(err) + } + if len(wsList.Items) != 3 { + t.Fatalf("expected number of workspaces in this org to be 3, but got %d", len(wsList.Items)) + } + _, empty := getWorkspace(wsList.Items, "cloud-workspace") + if empty { + t.Fatalf("expected workspaces to include 'cloud-workspace' but didn't.") + } + _, empty = getWorkspace(wsList.Items, "app-one") + if empty { + t.Fatalf("expected workspaces to include 'app-one' but didn't.") + } + _, empty = getWorkspace(wsList.Items, "app-two") + if empty { + t.Fatalf("expected workspaces to include 'app-two' but didn't.") + } + }, + }, + "migrate remote backend prefix to tfc tags strategy single workspace": { + operations: []operationSets{ + { + prep: func(t *testing.T, orgName, dir string) { + _ = createWorkspace(t, orgName, tfe.WorkspaceCreateOptions{Name: tfe.String("app-one")}) + prefix := "app-" + tfBlock := terraformConfigRemoteBackendPrefix(orgName, prefix) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectedCmdOutput: `Terraform has been successfully initialized!`, + }, + { + command: []string{"apply", "-auto-approve"}, + postInputOutput: []string{`Apply complete!`}, + }, + }, + }, + { + prep: func(t *testing.T, orgName, dir string) { + tag := "app" + tfBlock := terraformConfigCloudBackendTags(orgName, tag) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init", "-ignore-remote-version"}, + expectedCmdOutput: `Migrating from backend "remote" to Terraform Cloud.`, + userInput: []string{"yes", "cloud-workspace", "yes"}, + postInputOutput: []string{ + `Should Terraform migrate your existing state?`, + `Terraform Cloud requires all workspaces to be given an explicit name.`, + `Terraform Cloud has been successfully initialized!`}, + }, + { + command: []string{"workspace", "list"}, + expectedCmdOutput: `cloud-workspace`, + }, + }, + }, + }, + validations: func(t *testing.T, orgName string) { + expectedName := "cloud-workspace" + ws, err := tfeClient.Workspaces.Read(ctx, orgName, expectedName) + if err != nil { + t.Fatal(err) + } + if ws == nil { + t.Fatalf("Expected workspace %s to be present, but is not.", expectedName) + } + }, + }, + "migrate remote backend prefix to tfc tags strategy multi workspace": { + operations: []operationSets{ + { + prep: func(t *testing.T, orgName, dir string) { + _ = createWorkspace(t, orgName, tfe.WorkspaceCreateOptions{Name: tfe.String("app-one")}) + _ = createWorkspace(t, orgName, tfe.WorkspaceCreateOptions{Name: tfe.String("app-two")}) + prefix := "app-" + tfBlock := terraformConfigRemoteBackendPrefix(orgName, prefix) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectedCmdOutput: `The currently selected workspace (default) does not exist.`, + userInput: []string{"1"}, + postInputOutput: []string{`Terraform has been successfully initialized!`}, + }, + { + command: []string{"apply"}, + expectedCmdOutput: `Do you want to perform these actions in workspace "app-one"?`, + userInput: []string{"yes"}, + postInputOutput: []string{`Apply complete!`}, + }, + { + command: []string{"workspace", "select", "two"}, + }, + { + command: []string{"apply"}, + expectedCmdOutput: `Do you want to perform these actions in workspace "app-two"?`, + userInput: []string{"yes"}, + postInputOutput: []string{`Apply complete!`}, + }, + }, + }, + { + prep: func(t *testing.T, orgName, dir string) { + tag := "app" + tfBlock := terraformConfigCloudBackendTags(orgName, tag) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init", "-ignore-remote-version"}, + expectedCmdOutput: `Do you wish to proceed?`, + userInput: []string{"yes"}, + postInputOutput: []string{`Terraform Cloud has been successfully initialized!`}, + }, + { + command: []string{"workspace", "show"}, + expectedCmdOutput: "app-two", + }, + { + command: []string{"workspace", "select", "app-one"}, + expectedCmdOutput: `Switched to workspace "app-one".`, + }, + }, + }, + }, + validations: func(t *testing.T, orgName string) { + wsList, err := tfeClient.Workspaces.List(ctx, orgName, &tfe.WorkspaceListOptions{ + Tags: "app", + }) + if err != nil { + t.Fatal(err) + } + if len(wsList.Items) != 2 { + t.Logf("Expected the number of workspaces to be 2, but got %d", len(wsList.Items)) + } + ws, empty := getWorkspace(wsList.Items, "app-one") + if empty { + t.Fatalf("expected workspaces to include 'app-one' but didn't.") + } + if len(ws.TagNames) == 0 { + t.Fatalf("expected workspaces 'one' to have tags.") + } + ws, empty = getWorkspace(wsList.Items, "app-two") + if empty { + t.Fatalf("expected workspaces to include 'app-two' but didn't.") + } + if len(ws.TagNames) == 0 { + t.Fatalf("expected workspaces 'app-two' to have tags.") + } + }, + }, + } + + testRunner(t, cases, 1) +} + +func Test_migrate_remote_backend_multi_org(t *testing.T) { + t.Parallel() + skipIfMissingEnvVar(t) + skipWithoutRemoteTerraformVersion(t) + + ctx := context.Background() + cases := testCases{ + "migrate remote backend name to tfc name": { + operations: []operationSets{ + { + prep: func(t *testing.T, orgName, dir string) { + remoteWorkspace := "remote-workspace" + tfBlock := terraformConfigRemoteBackendName(orgName, remoteWorkspace) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectedCmdOutput: `Successfully configured the backend "remote"!`, + }, + { + command: []string{"apply", "-auto-approve"}, + postInputOutput: []string{`Apply complete!`}, + }, + }, + }, + { + prep: func(t *testing.T, orgName, dir string) { + wsName := "remote-workspace" + tfBlock := terraformConfigCloudBackendName(orgName, wsName) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init", "-ignore-remote-version"}, + expectedCmdOutput: `Migrating from backend "remote" to Terraform Cloud.`, + userInput: []string{"yes", "yes"}, + postInputOutput: []string{ + `Should Terraform migrate your existing state?`, + `Terraform Cloud has been successfully initialized!`}, + }, + { + command: []string{"workspace", "show"}, + expectedCmdOutput: `remote-workspace`, + }, + }, + }, + }, + validations: func(t *testing.T, orgName string) { + expectedName := "remote-workspace" + ws, err := tfeClient.Workspaces.Read(ctx, orgName, expectedName) + if err != nil { + t.Fatal(err) + } + if ws == nil { + t.Fatalf("Expected workspace %s to be present, but is not.", expectedName) + } + }, + }, + } + + testRunner(t, cases, 2) +} diff --git a/pkg/cloud/e2e/migrate_state_single_to_tfc_test.go b/pkg/cloud/e2e/migrate_state_single_to_tfc_test.go new file mode 100644 index 00000000000..a490a01f7df --- /dev/null +++ b/pkg/cloud/e2e/migrate_state_single_to_tfc_test.go @@ -0,0 +1,131 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "context" + "testing" + + tfe "github.com/hashicorp/go-tfe" +) + +func Test_migrate_single_to_tfc(t *testing.T) { + t.Parallel() + skipIfMissingEnvVar(t) + skipWithoutRemoteTerraformVersion(t) + + ctx := context.Background() + + cases := testCases{ + "migrate using cloud workspace name strategy": { + operations: []operationSets{ + { + prep: func(t *testing.T, orgName, dir string) { + tfBlock := terraformConfigLocalBackend() + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectedCmdOutput: `Successfully configured the backend "local"!`, + }, + { + command: []string{"apply", "-auto-approve"}, + postInputOutput: []string{`Apply complete!`}, + }, + }, + }, + { + prep: func(t *testing.T, orgName, dir string) { + wsName := "new-workspace" + tfBlock := terraformConfigCloudBackendName(orgName, wsName) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectedCmdOutput: `Migrating from backend "local" to Terraform Cloud.`, + userInput: []string{"yes", "yes"}, + postInputOutput: []string{ + `Should Terraform migrate your existing state?`, + `Terraform Cloud has been successfully initialized!`}, + }, + { + command: []string{"workspace", "list"}, + expectedCmdOutput: `new-workspace`, + }, + }, + }, + }, + validations: func(t *testing.T, orgName string) { + wsList, err := tfeClient.Workspaces.List(ctx, orgName, nil) + if err != nil { + t.Fatal(err) + } + ws := wsList.Items[0] + if ws.Name != "new-workspace" { + t.Fatalf("Expected workspace to be `new-workspace`, but is %s", ws.Name) + } + }, + }, + "migrate using cloud workspace tags strategy": { + operations: []operationSets{ + { + prep: func(t *testing.T, orgName, dir string) { + tfBlock := terraformConfigLocalBackend() + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectedCmdOutput: `Successfully configured the backend "local"!`, + }, + { + command: []string{"apply", "-auto-approve"}, + postInputOutput: []string{`Apply complete!`}, + }, + }, + }, + { + prep: func(t *testing.T, orgName, dir string) { + tag := "app" + tfBlock := terraformConfigCloudBackendTags(orgName, tag) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectedCmdOutput: `Migrating from backend "local" to Terraform Cloud.`, + userInput: []string{"yes", "new-workspace", "yes"}, + postInputOutput: []string{ + `Should Terraform migrate your existing state?`, + `Terraform Cloud requires all workspaces to be given an explicit name.`, + `Terraform Cloud has been successfully initialized!`}, + }, + { + command: []string{"workspace", "list"}, + expectedCmdOutput: `new-workspace`, + }, + }, + }, + }, + validations: func(t *testing.T, orgName string) { + wsList, err := tfeClient.Workspaces.List(ctx, orgName, &tfe.WorkspaceListOptions{ + Tags: "app", + }) + if err != nil { + t.Fatal(err) + } + ws := wsList.Items[0] + if ws.Name != "new-workspace" { + t.Fatalf("Expected workspace to be `new-workspace`, but is %s", ws.Name) + } + }, + }, + } + + testRunner(t, cases, 1) +} diff --git a/pkg/cloud/e2e/migrate_state_tfc_to_other_test.go b/pkg/cloud/e2e/migrate_state_tfc_to_other_test.go new file mode 100644 index 00000000000..a212c19fc78 --- /dev/null +++ b/pkg/cloud/e2e/migrate_state_tfc_to_other_test.go @@ -0,0 +1,50 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "testing" +) + +func Test_migrate_tfc_to_other(t *testing.T) { + t.Parallel() + skipIfMissingEnvVar(t) + + cases := testCases{ + "migrate from cloud to local backend": { + operations: []operationSets{ + { + prep: func(t *testing.T, orgName, dir string) { + wsName := "new-workspace" + tfBlock := terraformConfigCloudBackendName(orgName, wsName) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectedCmdOutput: `Terraform Cloud has been successfully initialized!`, + }, + }, + }, + { + prep: func(t *testing.T, orgName, dir string) { + tfBlock := terraformConfigLocalBackend() + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectedCmdOutput: `Migrating state from Terraform Cloud to another backend is not yet implemented.`, + expectError: true, + }, + }, + }, + }, + }, + } + + testRunner(t, cases, 1) +} diff --git a/pkg/cloud/e2e/migrate_state_tfc_to_tfc_test.go b/pkg/cloud/e2e/migrate_state_tfc_to_tfc_test.go new file mode 100644 index 00000000000..f059ba91bd0 --- /dev/null +++ b/pkg/cloud/e2e/migrate_state_tfc_to_tfc_test.go @@ -0,0 +1,374 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "context" + "testing" + + tfe "github.com/hashicorp/go-tfe" + tfversion "github.com/kubegems/opentofu/version" +) + +func Test_migrate_tfc_to_tfc_single_workspace(t *testing.T) { + t.Parallel() + skipIfMissingEnvVar(t) + skipWithoutRemoteTerraformVersion(t) + + ctx := context.Background() + + cases := testCases{ + "migrating from name to name": { + operations: []operationSets{ + { + prep: func(t *testing.T, orgName, dir string) { + wsName := "prod" + // Creating the workspace here instead of it being created + // dynamically in the Cloud StateMgr because we want to ensure that + // the tofu version selected for the workspace matches the + // tofu version of this current branch. + _ = createWorkspace(t, orgName, tfe.WorkspaceCreateOptions{ + Name: tfe.String("prod"), + TerraformVersion: tfe.String(tfversion.String()), + }) + tfBlock := terraformConfigCloudBackendName(orgName, wsName) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectedCmdOutput: `Terraform Cloud has been successfully initialized!`, + }, + { + command: []string{"workspace", "show"}, + expectedCmdOutput: `prod`, // this comes from the `prep` function + }, + { + command: []string{"apply", "-auto-approve"}, + postInputOutput: []string{`Apply complete!`}, + }, + }, + }, + { + prep: func(t *testing.T, orgName, dir string) { + wsName := "dev" + _ = createWorkspace(t, orgName, tfe.WorkspaceCreateOptions{ + Name: tfe.String(wsName), + TerraformVersion: tfe.String(tfversion.String()), + }) + tfBlock := terraformConfigCloudBackendName(orgName, wsName) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init", "-ignore-remote-version"}, + postInputOutput: []string{`Terraform Cloud has been successfully initialized!`}, + }, + { + command: []string{"workspace", "show"}, + expectedCmdOutput: `dev`, // this comes from the `prep` function + }, + }, + }, + }, + validations: func(t *testing.T, orgName string) { + wsList, err := tfeClient.Workspaces.List(ctx, orgName, nil) + if err != nil { + t.Fatal(err) + } + // this workspace name is what exists in the cloud backend configuration block + if len(wsList.Items) != 2 { + t.Fatal("Expected number of workspaces to be 2") + } + }, + }, + "migrating from name to tags": { + operations: []operationSets{ + { + prep: func(t *testing.T, orgName, dir string) { + wsName := "prod" + _ = createWorkspace(t, orgName, tfe.WorkspaceCreateOptions{ + Name: tfe.String("prod"), + TerraformVersion: tfe.String(tfversion.String()), + }) + tfBlock := terraformConfigCloudBackendName(orgName, wsName) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectedCmdOutput: `Terraform Cloud has been successfully initialized!`, + }, + { + command: []string{"apply", "-auto-approve"}, + postInputOutput: []string{`Apply complete!`}, + }, + }, + }, + { + prep: func(t *testing.T, orgName, dir string) { + tag := "app" + tfBlock := terraformConfigCloudBackendTags(orgName, tag) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init", "-ignore-remote-version"}, + expectedCmdOutput: `There are no workspaces with the configured tags (app)`, + userInput: []string{"new-workspace"}, + postInputOutput: []string{ + `Terraform can create a properly tagged workspace for you now.`, + `Terraform Cloud has been successfully initialized!`}, + }, + { + command: []string{"workspace", "show"}, + expectedCmdOutput: `new-workspace`, // this comes from the `prep` function + }, + }, + }, + }, + validations: func(t *testing.T, orgName string) { + wsList, err := tfeClient.Workspaces.List(ctx, orgName, &tfe.WorkspaceListOptions{ + Tags: "app", + }) + if err != nil { + t.Fatal(err) + } + // this workspace name is what exists in the cloud backend configuration block + if len(wsList.Items) != 1 { + t.Fatal("Expected number of workspaces to be 1") + } + }, + }, + "migrating from name to tags without ignore-version flag": { + operations: []operationSets{ + { + prep: func(t *testing.T, orgName, dir string) { + wsName := "prod" + _ = createWorkspace(t, orgName, tfe.WorkspaceCreateOptions{ + Name: tfe.String("prod"), + TerraformVersion: tfe.String(tfversion.String()), + }) + tfBlock := terraformConfigCloudBackendName(orgName, wsName) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectedCmdOutput: `Terraform Cloud has been successfully initialized!`, + }, + { + command: []string{"apply", "-auto-approve"}, + postInputOutput: []string{`Apply complete!`}, + }, + }, + }, + { + prep: func(t *testing.T, orgName, dir string) { + tag := "app" + // This is only here to ensure that the updated tofu version is + // present in the workspace, and it does not default to a lower + // version that does not support `cloud`. + _ = createWorkspace(t, orgName, tfe.WorkspaceCreateOptions{ + Name: tfe.String("new-workspace"), + TerraformVersion: tfe.String(tfversion.String()), + }) + tfBlock := terraformConfigCloudBackendTags(orgName, tag) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectedCmdOutput: `There are no workspaces with the configured tags (app)`, + userInput: []string{"new-workspace"}, + postInputOutput: []string{ + `Terraform can create a properly tagged workspace for you now.`, + `Terraform Cloud has been successfully initialized!`}, + }, + }, + }, + }, + validations: func(t *testing.T, orgName string) { + // We created the workspace, so it will be there. We could not complete the state migration, + // though, so the workspace should be empty. + ws, err := tfeClient.Workspaces.ReadWithOptions(ctx, orgName, "new-workspace", &tfe.WorkspaceReadOptions{Include: []tfe.WSIncludeOpt{tfe.WSCurrentRun}}) + if err != nil { + t.Fatal(err) + } + if ws.CurrentRun != nil { + t.Fatal("Expected to workspace be empty") + } + }, + }, + } + + testRunner(t, cases, 1) +} + +func Test_migrate_tfc_to_tfc_multiple_workspace(t *testing.T) { + t.Parallel() + skipIfMissingEnvVar(t) + skipWithoutRemoteTerraformVersion(t) + + ctx := context.Background() + + cases := testCases{ + "migrating from multiple workspaces via tags to name": { + operations: []operationSets{ + { + prep: func(t *testing.T, orgName, dir string) { + tag := "app" + _ = createWorkspace(t, orgName, tfe.WorkspaceCreateOptions{ + Name: tfe.String("app-prod"), + Tags: []*tfe.Tag{{Name: tag}}, + TerraformVersion: tfe.String(tfversion.String()), + }) + _ = createWorkspace(t, orgName, tfe.WorkspaceCreateOptions{ + Name: tfe.String("app-staging"), + Tags: []*tfe.Tag{{Name: tag}}, + TerraformVersion: tfe.String(tfversion.String()), + }) + tfBlock := terraformConfigCloudBackendTags(orgName, tag) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectedCmdOutput: `The currently selected workspace (default) does not exist.`, + userInput: []string{"1"}, + postInputOutput: []string{`Terraform Cloud has been successfully initialized!`}, + }, + { + command: []string{"apply"}, + expectedCmdOutput: `Do you want to perform these actions in workspace "app-prod"?`, + userInput: []string{"yes"}, + postInputOutput: []string{`Apply complete!`}, + }, + { + command: []string{"workspace", "select", "app-staging"}, + expectedCmdOutput: `Switched to workspace "app-staging".`, + }, + { + command: []string{"apply", "-auto-approve"}, + postInputOutput: []string{`Apply complete!`}, + }, + { + command: []string{"output"}, + expectedCmdOutput: `tag_val = "app"`, + }, + }, + }, + { + prep: func(t *testing.T, orgName, dir string) { + name := "service" + // Doing this here instead of relying on dynamic workspace creation + // because we want to set the tofu version here so that it is + // using the right version for post init operations. + _ = createWorkspace(t, orgName, tfe.WorkspaceCreateOptions{ + Name: tfe.String(name), + TerraformVersion: tfe.String(tfversion.String()), + }) + tfBlock := terraformConfigCloudBackendName(orgName, name) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init", "-ignore-remote-version"}, + expectedCmdOutput: `Terraform Cloud has been successfully initialized!`, + postInputOutput: []string{`tag_val = "service"`}, + }, + { + command: []string{"workspace", "show"}, + expectedCmdOutput: `service`, // this comes from the `prep` function + }, + }, + }, + }, + validations: func(t *testing.T, orgName string) { + ws, err := tfeClient.Workspaces.Read(ctx, orgName, "service") + if err != nil { + t.Fatal(err) + } + if ws == nil { + t.Fatal("Expected to workspace not be empty") + } + }, + }, + "migrating from multiple workspaces via tags to other tags": { + operations: []operationSets{ + { + prep: func(t *testing.T, orgName, dir string) { + tag := "app" + _ = createWorkspace(t, orgName, tfe.WorkspaceCreateOptions{ + Name: tfe.String("app-prod"), + Tags: []*tfe.Tag{{Name: tag}}, + TerraformVersion: tfe.String(tfversion.String()), + }) + _ = createWorkspace(t, orgName, tfe.WorkspaceCreateOptions{ + Name: tfe.String("app-staging"), + Tags: []*tfe.Tag{{Name: tag}}, + TerraformVersion: tfe.String(tfversion.String()), + }) + tfBlock := terraformConfigCloudBackendTags(orgName, tag) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectedCmdOutput: `The currently selected workspace (default) does not exist.`, + userInput: []string{"1"}, + postInputOutput: []string{`Terraform Cloud has been successfully initialized!`}, + }, + { + command: []string{"apply", "-auto-approve"}, + expectedCmdOutput: `Apply complete!`, + }, + { + command: []string{"workspace", "select", "app-staging"}, + expectedCmdOutput: `Switched to workspace "app-staging".`, + }, + { + command: []string{"apply", "-auto-approve"}, + expectedCmdOutput: `Apply complete!`, + }, + }, + }, + { + prep: func(t *testing.T, orgName, dir string) { + tag := "billing" + tfBlock := terraformConfigCloudBackendTags(orgName, tag) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init", "-ignore-remote-version"}, + expectedCmdOutput: `There are no workspaces with the configured tags (billing)`, + userInput: []string{"new-app-prod"}, + postInputOutput: []string{`Terraform Cloud has been successfully initialized!`}, + }, + }, + }, + }, + validations: func(t *testing.T, orgName string) { + wsList, err := tfeClient.Workspaces.List(ctx, orgName, &tfe.WorkspaceListOptions{ + Tags: "billing", + }) + if err != nil { + t.Fatal(err) + } + if len(wsList.Items) != 1 { + t.Logf("Expected the number of workspaces to be 2, but got %d", len(wsList.Items)) + } + _, empty := getWorkspace(wsList.Items, "new-app-prod") + if empty { + t.Fatalf("expected workspaces to include 'new-app-prod' but didn't.") + } + }, + }, + } + + testRunner(t, cases, 1) +} diff --git a/pkg/cloud/e2e/run_variables_test.go b/pkg/cloud/e2e/run_variables_test.go new file mode 100644 index 00000000000..de401f4b4fe --- /dev/null +++ b/pkg/cloud/e2e/run_variables_test.go @@ -0,0 +1,86 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "fmt" + "testing" + + tfe "github.com/hashicorp/go-tfe" + tfversion "github.com/kubegems/opentofu/version" +) + +func terraformConfigRequiredVariable(org, name string) string { + return fmt.Sprintf(` +terraform { + cloud { + hostname = "%s" + organization = "%s" + + workspaces { + name = "%s" + } + } +} + +variable "foo" { + type = string +} + +variable "baz" { + type = string +} + +output "test_cli" { + value = var.foo +} + +output "test_env" { + value = var.baz +} + +`, tfeHostname, org, name) +} + +func Test_cloud_run_variables(t *testing.T) { + t.Parallel() + skipIfMissingEnvVar(t) + skipWithoutRemoteTerraformVersion(t) + + cases := testCases{ + "run variables from CLI arg": { + operations: []operationSets{ + { + prep: func(t *testing.T, orgName, dir string) { + wsName := "new-workspace" + _ = createWorkspace(t, orgName, tfe.WorkspaceCreateOptions{ + Name: tfe.String(wsName), + TerraformVersion: tfe.String(tfversion.String()), + }) + tfBlock := terraformConfigRequiredVariable(orgName, wsName) + writeMainTF(t, tfBlock, dir) + }, + commands: []tfCommand{ + { + command: []string{"init"}, + expectedCmdOutput: `Terraform Cloud has been successfully initialized!`, + }, + { + command: []string{"plan", "-var", "foo=bar"}, + expectedCmdOutput: ` + test_cli = "bar"`, + }, + { + command: []string{"plan", "-var", "foo=bar"}, + expectedCmdOutput: ` + test_env = "qux"`, + }, + }, + }, + }, + }, + } + + testRunner(t, cases, 1, "TF_CLI_ARGS=-no-color", "TF_VAR_baz=qux") +} diff --git a/pkg/cloud/errored.tfstate b/pkg/cloud/errored.tfstate new file mode 100644 index 00000000000..0e31ca62859 --- /dev/null +++ b/pkg/cloud/errored.tfstate @@ -0,0 +1,25 @@ +{ + "version": 4, + "terraform_version": "0.14.0", + "serial": 1, + "lineage": "30a4d634-f765-186a-f411-7dfa798a767e", + "outputs": {}, + "resources": [ + { + "mode": "managed", + "type": "null_resource", + "name": "foo", + "provider": "provider[\"registry.opentofu.org/hashicorp/null\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "id": "yes" + }, + "sensitive_attributes": [] + } + ] + } + ], + "check_results": null +} diff --git a/pkg/cloud/errors.go b/pkg/cloud/errors.go new file mode 100644 index 00000000000..47096feb981 --- /dev/null +++ b/pkg/cloud/errors.go @@ -0,0 +1,74 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cloud + +import ( + "errors" + "fmt" + "strings" + + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +// String based errors +var ( + errApplyDiscarded = errors.New("Apply discarded.") + errDestroyDiscarded = errors.New("Destroy discarded.") + errRunApproved = errors.New("approved using the UI or API") + errRunDiscarded = errors.New("discarded using the UI or API") + errRunOverridden = errors.New("overridden using the UI or API") + errApplyNeedsUIConfirmation = errors.New("Cannot confirm apply due to -input=false. Please handle run confirmation in the UI.") + errPolicyOverrideNeedsUIConfirmation = errors.New("Cannot override soft failed policy checks when -input=false. Please open the run in the UI to override.") +) + +// Diagnostic error messages +var ( + invalidWorkspaceConfigMissingValues = tfdiags.AttributeValue( + tfdiags.Error, + "Invalid workspaces configuration", + fmt.Sprintf("Missing workspace mapping strategy. Either workspace \"tags\" or \"name\" is required.\n\n%s", workspaceConfigurationHelp), + cty.Path{cty.GetAttrStep{Name: "workspaces"}}, + ) + + invalidWorkspaceConfigMisconfiguration = tfdiags.AttributeValue( + tfdiags.Error, + "Invalid workspaces configuration", + fmt.Sprintf("Only one of workspace \"tags\" or \"name\" is allowed.\n\n%s", workspaceConfigurationHelp), + cty.Path{cty.GetAttrStep{Name: "workspaces"}}, + ) +) + +const ignoreRemoteVersionHelp = "If you're sure you want to upgrade the state, you can force OpenTofu to continue using the -ignore-remote-version flag. This may result in an unusable workspace." + +func missingConfigAttributeAndEnvVar(attribute string, envVar string) tfdiags.Diagnostic { + detail := strings.TrimSpace(fmt.Sprintf("\"%s\" must be set in the cloud configuration or as an environment variable: %s.\n", attribute, envVar)) + return tfdiags.AttributeValue( + tfdiags.Error, + "Invalid or missing required argument", + detail, + cty.Path{cty.GetAttrStep{Name: attribute}}) +} + +func incompatibleWorkspaceTerraformVersion(message string, ignoreVersionConflict bool) tfdiags.Diagnostic { + severity := tfdiags.Error + suggestion := ignoreRemoteVersionHelp + if ignoreVersionConflict { + severity = tfdiags.Warning + suggestion = "" + } + description := strings.TrimSpace(fmt.Sprintf("%s\n\n%s", message, suggestion)) + return tfdiags.Sourceless(severity, "Incompatible TF version", description) +} + +func invalidWorkspaceConfigInconsistentNameAndEnvVar() tfdiags.Diagnostic { + return tfdiags.AttributeValue( + tfdiags.Error, + "Invalid workspaces configuration", + fmt.Sprintf("The workspace defined using the environment variable \"TF_WORKSPACE\" is not consistent with the workspace \"name\" in the configuration.\n\n%s", workspaceConfigurationHelp), + cty.Path{cty.GetAttrStep{Name: "workspaces"}}, + ) +} diff --git a/pkg/cloud/migration.go b/pkg/cloud/migration.go new file mode 100644 index 00000000000..d8c5403c560 --- /dev/null +++ b/pkg/cloud/migration.go @@ -0,0 +1,111 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cloud + +import ( + "github.com/kubegems/opentofu/pkg/configs" + legacy "github.com/kubegems/opentofu/pkg/legacy/tofu" +) + +// Most of the logic for migrating into and out of "cloud mode" actually lives +// in the "command" package as part of the general backend init mechanisms, +// but we have some cloud-specific helper functionality here. + +// ConfigChangeMode is a rough way to think about different situations that +// our backend change and state migration codepaths need to distinguish in +// the context of Cloud integration mode. +type ConfigChangeMode rune + +//go:generate go run golang.org/x/tools/cmd/stringer -type ConfigChangeMode + +const ( + // ConfigMigrationIn represents when the configuration calls for using + // Cloud mode but the working directory state disagrees. + ConfigMigrationIn ConfigChangeMode = '↘' + + // ConfigMigrationOut represents when the working directory state calls + // for using Cloud mode but the working directory state disagrees. + ConfigMigrationOut ConfigChangeMode = '↖' + + // ConfigChangeInPlace represents when both the working directory state + // and the config call for using Cloud mode, and so there might be + // (but won't necessarily be) cloud settings changing, but we don't + // need to do any actual migration. + ConfigChangeInPlace ConfigChangeMode = '↻' + + // ConfigChangeIrrelevant represents when the config and working directory + // state disagree but neither calls for using Cloud mode, and so the + // Cloud integration is not involved in dealing with this. + ConfigChangeIrrelevant ConfigChangeMode = '🤷' +) + +// DetectConfigChangeType encapsulates the fiddly logic for deciding what kind +// of Cloud configuration change we seem to be making, based on the existing +// working directory state (if any) and the current configuration. +// +// This is a pretty specialized sort of thing focused on finicky details of +// the way we currently model working directory settings and config, so its +// signature probably won't survive any non-trivial refactoring of how +// the CLI layer thinks about backends/state storage. +func DetectConfigChangeType(wdState *legacy.BackendState, config *configs.Backend, haveLocalStates bool) ConfigChangeMode { + // Although externally the cloud integration isn't really a "backend", + // internally we treat it a bit like one just to preserve all of our + // existing interfaces that assume backends. "cloud" is the placeholder + // name we use for it, even though that isn't a backend that's actually + // available for selection in the usual way. + wdIsCloud := wdState != nil && wdState.Type == "cloud" + configIsCloud := config != nil && config.Type == "cloud" + + // "uninit" here means that the working directory is totally uninitialized, + // even taking into account the possibility of implied local state that + // therefore doesn't typically require explicit "tofu init". + wdIsUninit := wdState == nil && !haveLocalStates + + switch { + case configIsCloud: + switch { + case wdIsCloud || wdIsUninit: + // If config has cloud and the working directory is completely + // uninitialized then we assume we're doing the initial activation + // of this working directory for an already-migrated-to-cloud + // remote state. + return ConfigChangeInPlace + default: + // Otherwise, we seem to be migrating into cloud mode from a backend. + return ConfigMigrationIn + } + default: + switch { + case wdIsCloud: + // If working directory is already cloud but config isn't, we're + // migrating away from cloud to a backend. + return ConfigMigrationOut + default: + // Otherwise, this situation seems to be something unrelated to + // cloud mode and so outside of our scope here. + return ConfigChangeIrrelevant + } + } + +} + +func (m ConfigChangeMode) InvolvesCloud() bool { + switch m { + case ConfigMigrationIn, ConfigMigrationOut, ConfigChangeInPlace: + return true + default: + return false + } +} + +func (m ConfigChangeMode) IsCloudMigration() bool { + switch m { + case ConfigMigrationIn, ConfigMigrationOut: + return true + default: + return false + } +} diff --git a/pkg/cloud/migration_test.go b/pkg/cloud/migration_test.go new file mode 100644 index 00000000000..70618e134b8 --- /dev/null +++ b/pkg/cloud/migration_test.go @@ -0,0 +1,143 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cloud + +import ( + "testing" + + "github.com/kubegems/opentofu/pkg/configs" + legacy "github.com/kubegems/opentofu/pkg/legacy/tofu" +) + +func TestDetectConfigChangeType(t *testing.T) { + tests := map[string]struct { + stateType string + configType string + localStates bool + want ConfigChangeMode + wantInvolvesCloud bool + wantIsCloudMigration bool + }{ + "init cloud": { + ``, `cloud`, false, + ConfigChangeInPlace, + true, false, + }, + "reinit cloud": { + `cloud`, `cloud`, false, + ConfigChangeInPlace, + true, false, + }, + "migrate default local to cloud with existing local state": { + ``, `cloud`, true, + ConfigMigrationIn, + true, true, + }, + "migrate local to cloud": { + `local`, `cloud`, false, + ConfigMigrationIn, + true, true, + }, + "migrate remote to cloud": { + `local`, `cloud`, false, + ConfigMigrationIn, + true, true, + }, + "migrate cloud to local": { + `cloud`, `local`, false, + ConfigMigrationOut, + true, true, + }, + "migrate cloud to remote": { + `cloud`, `remote`, false, + ConfigMigrationOut, + true, true, + }, + "migrate cloud to default local": { + `cloud`, ``, false, + ConfigMigrationOut, + true, true, + }, + + // Various other cases can potentially be valid (decided by the + // Terraform CLI layer) but are irrelevant for Cloud mode purposes. + "init default local": { + ``, ``, false, + ConfigChangeIrrelevant, + false, false, + }, + "init default local with existing local state": { + ``, ``, true, + ConfigChangeIrrelevant, + false, false, + }, + "init remote backend": { + ``, `remote`, false, + ConfigChangeIrrelevant, + false, false, + }, + "init remote backend with existing local state": { + ``, `remote`, true, + ConfigChangeIrrelevant, + false, false, + }, + "reinit remote backend": { + `remote`, `remote`, false, + ConfigChangeIrrelevant, + false, false, + }, + "migrate local to remote backend": { + `local`, `remote`, false, + ConfigChangeIrrelevant, + false, false, + }, + "migrate remote to default local": { + `remote`, ``, false, + ConfigChangeIrrelevant, + false, false, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + var state *legacy.BackendState + var config *configs.Backend + if test.stateType != "" { + state = &legacy.BackendState{ + Type: test.stateType, + // everything else is irrelevant for our purposes here + } + } + if test.configType != "" { + config = &configs.Backend{ + Type: test.configType, + // everything else is irrelevant for our purposes here + } + } + got := DetectConfigChangeType(state, config, test.localStates) + + if got != test.want { + t.Errorf( + "wrong result\nstate type: %s\nconfig type: %s\nlocal states: %t\n\ngot: %s\nwant: %s", + test.stateType, test.configType, test.localStates, + got, test.want, + ) + } + if got, want := got.InvolvesCloud(), test.wantInvolvesCloud; got != want { + t.Errorf( + "wrong InvolvesCloud result\ngot: %t\nwant: %t", + got, want, + ) + } + if got, want := got.IsCloudMigration(), test.wantIsCloudMigration; got != want { + t.Errorf( + "wrong IsCloudMigration result\ngot: %t\nwant: %t", + got, want, + ) + } + }) + } +} diff --git a/pkg/cloud/remote_test.go b/pkg/cloud/remote_test.go new file mode 100644 index 00000000000..574e180e330 --- /dev/null +++ b/pkg/cloud/remote_test.go @@ -0,0 +1,30 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cloud + +import ( + "flag" + "os" + "testing" + "time" + + _ "github.com/kubegems/opentofu/pkg/logging" +) + +func TestMain(m *testing.M) { + flag.Parse() + + // Make sure TF_FORCE_LOCAL_BACKEND is unset + os.Unsetenv("TF_FORCE_LOCAL_BACKEND") + + // Reduce delays to make tests run faster + backoffMin = 1.0 + backoffMax = 1.0 + planConfigurationVersionsPollInterval = 1 * time.Millisecond + runPollInterval = 1 * time.Millisecond + + os.Exit(m.Run()) +} diff --git a/pkg/cloud/state.go b/pkg/cloud/state.go new file mode 100644 index 00000000000..d0f38818862 --- /dev/null +++ b/pkg/cloud/state.go @@ -0,0 +1,639 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cloud + +import ( + "bytes" + "context" + "crypto/md5" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "log" + "net/http" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/gocty" + + tfe "github.com/hashicorp/go-tfe" + uuid "github.com/hashicorp/go-uuid" + + "github.com/kubegems/opentofu/pkg/backend/local" + "github.com/kubegems/opentofu/pkg/command/jsonstate" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/remote" + "github.com/kubegems/opentofu/pkg/states/statefile" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "github.com/kubegems/opentofu/pkg/tofu" +) + +const ( + // HeaderSnapshotInterval is the header key that controls the snapshot interval + HeaderSnapshotInterval = "x-terraform-snapshot-interval" +) + +// State implements the State interfaces in the state package to handle +// reading and writing the remote state to TFC. This State on its own does no +// local caching so every persist will go to the remote storage and local +// writes will go to memory. +type State struct { + // We track two pieces of meta data in addition to the state itself: + // + // lineage - the state's unique ID + // serial - the monotonic counter of "versions" of the state + // + // Both of these (along with state) have a sister field + // that represents the values read in from an existing source. + // All three of these values are used to determine if the new + // state has changed from an existing state we read in. + lineage, readLineage string + serial, readSerial uint64 + mu sync.Mutex + state, readState *states.State + disableLocks bool + tfeClient *tfe.Client + organization string + workspace *tfe.Workspace + stateUploadErr bool + forcePush bool + lockInfo *statemgr.LockInfo + + // The server can optionally return an X-Terraform-Snapshot-Interval header + // in its response to the "Create State Version" operation, which specifies + // a number of seconds the server would prefer us to wait before trying + // to write a new snapshot. If this is non-zero then we'll wait at least + // this long before allowing another intermediate snapshot. This does + // not effect final snapshots after an operation, which will always + // be written to the remote API. + stateSnapshotInterval time.Duration + // If the header X-Terraform-Snapshot-Interval is present then + // we will enable snapshots + enableIntermediateSnapshots bool + + encryption encryption.StateEncryption +} + +var ErrStateVersionUnauthorizedUpgradeState = errors.New(strings.TrimSpace(` +You are not authorized to read the full state version containing outputs. +State versions created by tofu v1.3.0 and newer do not require this level +of authorization and therefore this error can usually be fixed by upgrading the +remote state version. +`)) + +var _ statemgr.Full = (*State)(nil) +var _ statemgr.Migrator = (*State)(nil) +var _ local.IntermediateStateConditionalPersister = (*State)(nil) + +// statemgr.Reader impl. +func (s *State) State() *states.State { + s.mu.Lock() + defer s.mu.Unlock() + + return s.state.DeepCopy() +} + +// StateForMigration is part of our implementation of statemgr.Migrator. +func (s *State) StateForMigration() *statefile.File { + s.mu.Lock() + defer s.mu.Unlock() + + return statefile.New(s.state.DeepCopy(), s.lineage, s.serial) +} + +// WriteStateForMigration is part of our implementation of statemgr.Migrator. +func (s *State) WriteStateForMigration(f *statefile.File, force bool) error { + s.mu.Lock() + defer s.mu.Unlock() + + if !force { + checkFile := statefile.New(s.state, s.lineage, s.serial) + if err := statemgr.CheckValidImport(f, checkFile); err != nil { + return err + } + } + + // We create a deep copy of the state here, because the caller also has + // a reference to the given object and can potentially go on to mutate + // it after we return, but we want the snapshot at this point in time. + s.state = f.State.DeepCopy() + s.lineage = f.Lineage + s.serial = f.Serial + s.forcePush = force + + return nil +} + +// DisableLocks turns the Lock and Unlock methods into no-ops. This is intended +// to be called during initialization of a state manager and should not be +// called after any of the statemgr.Full interface methods have been called. +func (s *State) DisableLocks() { + s.disableLocks = true +} + +// StateSnapshotMeta returns the metadata from the most recently persisted +// or refreshed persistent state snapshot. +// +// This is an implementation of statemgr.PersistentMeta. +func (s *State) StateSnapshotMeta() statemgr.SnapshotMeta { + return statemgr.SnapshotMeta{ + Lineage: s.lineage, + Serial: s.serial, + } +} + +// statemgr.Writer impl. +func (s *State) WriteState(state *states.State) error { + s.mu.Lock() + defer s.mu.Unlock() + + // We create a deep copy of the state here, because the caller also has + // a reference to the given object and can potentially go on to mutate + // it after we return, but we want the snapshot at this point in time. + s.state = state.DeepCopy() + s.forcePush = false + + return nil +} + +// PersistState uploads a snapshot of the latest state as a StateVersion to Terraform Cloud +func (s *State) PersistState(schemas *tofu.Schemas) error { + s.mu.Lock() + defer s.mu.Unlock() + + log.Printf("[DEBUG] cloud/state: state read serial is: %d; serial is: %d", s.readSerial, s.serial) + log.Printf("[DEBUG] cloud/state: state read lineage is: %s; lineage is: %s", s.readLineage, s.lineage) + + if s.readState != nil { + lineageUnchanged := s.readLineage != "" && s.lineage == s.readLineage + serialUnchanged := s.readSerial != 0 && s.serial == s.readSerial + stateUnchanged := statefile.StatesMarshalEqual(s.state, s.readState) + if stateUnchanged && lineageUnchanged && serialUnchanged { + // If the state, lineage or serial haven't changed at all then we have nothing to do. + return nil + } + s.serial++ + } else { + // We might be writing a new state altogether, but before we do that + // we'll check to make sure there isn't already a snapshot present + // that we ought to be updating. + err := s.refreshState() + if err != nil { + return fmt.Errorf("failed checking for existing remote state: %w", err) + } + log.Printf("[DEBUG] cloud/state: after refresh, state read serial is: %d; serial is: %d", s.readSerial, s.serial) + log.Printf("[DEBUG] cloud/state: after refresh, state read lineage is: %s; lineage is: %s", s.readLineage, s.lineage) + + if s.lineage == "" { // indicates that no state snapshot is present yet + lineage, err := uuid.GenerateUUID() + if err != nil { + return fmt.Errorf("failed to generate initial lineage: %w", err) + } + s.lineage = lineage + s.serial++ + } + } + + f := statefile.New(s.state, s.lineage, s.serial) + + var buf bytes.Buffer + err := statefile.Write(f, &buf, s.encryption) + if err != nil { + return err + } + + var jsonState []byte + if schemas != nil { + jsonState, err = jsonstate.Marshal(f, schemas) + if err != nil { + return err + } + } + + stateFile, err := statefile.Read(bytes.NewReader(buf.Bytes()), s.encryption) + if err != nil { + return fmt.Errorf("failed to read state: %w", err) + } + + ov, err := jsonstate.MarshalOutputs(stateFile.State.RootModule().OutputValues) + if err != nil { + return fmt.Errorf("failed to translate outputs: %w", err) + } + jsonStateOutputs, err := json.Marshal(ov) + if err != nil { + return fmt.Errorf("failed to marshal outputs to json: %w", err) + } + + err = s.uploadState(s.lineage, s.serial, s.forcePush, buf.Bytes(), jsonState, jsonStateOutputs) + if err != nil { + s.stateUploadErr = true + return fmt.Errorf("error uploading state: %w", err) + } + // After we've successfully persisted, what we just wrote is our new + // reference state until someone calls RefreshState again. + // We've potentially overwritten (via force) the state, lineage + // and / or serial (and serial was incremented) so we copy over all + // three fields so everything matches the new state and a subsequent + // operation would correctly detect no changes to the lineage, serial or state. + s.readState = s.state.DeepCopy() + s.readLineage = s.lineage + s.readSerial = s.serial + + return nil +} + +// ShouldPersistIntermediateState implements local.IntermediateStateConditionalPersister +func (s *State) ShouldPersistIntermediateState(info *local.IntermediateStatePersistInfo) bool { + if info.ForcePersist { + return true + } + + // This value is controlled by a x-terraform-snapshot-interval header intercepted during + // state-versions API responses + if !s.enableIntermediateSnapshots { + return false + } + + // Our persist interval is the largest of either the caller's requested + // interval or the server's requested interval. + wantInterval := info.RequestedPersistInterval + if s.stateSnapshotInterval > wantInterval { + wantInterval = s.stateSnapshotInterval + } + + currentInterval := time.Since(info.LastPersist) + return currentInterval >= wantInterval +} + +func (s *State) uploadStateFallback(ctx context.Context, lineage string, serial uint64, isForcePush bool, state, jsonState, jsonStateOutputs []byte) error { + options := tfe.StateVersionCreateOptions{ + Lineage: tfe.String(lineage), + Serial: tfe.Int64(int64(serial)), + MD5: tfe.String(fmt.Sprintf("%x", md5.Sum(state))), + Force: tfe.Bool(isForcePush), + State: tfe.String(base64.StdEncoding.EncodeToString(state)), + JSONState: tfe.String(base64.StdEncoding.EncodeToString(jsonState)), + JSONStateOutputs: tfe.String(base64.StdEncoding.EncodeToString(jsonStateOutputs)), + } + + // If we have a run ID, make sure to add it to the options + // so the state will be properly associated with the run. + runID := os.Getenv("TFE_RUN_ID") + if runID != "" { + options.Run = &tfe.Run{ID: runID} + } + + // Create the new state. + _, err := s.tfeClient.StateVersions.Create(ctx, s.workspace.ID, options) + return err +} + +func (s *State) uploadState(lineage string, serial uint64, isForcePush bool, state, jsonState, jsonStateOutputs []byte) error { + ctx := context.Background() + + options := tfe.StateVersionUploadOptions{ + StateVersionCreateOptions: tfe.StateVersionCreateOptions{ + Lineage: tfe.String(lineage), + Serial: tfe.Int64(int64(serial)), + MD5: tfe.String(fmt.Sprintf("%x", md5.Sum(state))), + Force: tfe.Bool(isForcePush), + JSONStateOutputs: tfe.String(base64.StdEncoding.EncodeToString(jsonStateOutputs)), + }, + RawState: state, + RawJSONState: jsonState, + } + + // If we have a run ID, make sure to add it to the options + // so the state will be properly associated with the run. + runID := os.Getenv("TFE_RUN_ID") + if runID != "" { + options.StateVersionCreateOptions.Run = &tfe.Run{ID: runID} + } + + // The server is allowed to dynamically request a different time interval + // than we'd normally use, for example if it's currently under heavy load + // and needs clients to backoff for a while. + ctx = tfe.ContextWithResponseHeaderHook(ctx, s.readSnapshotIntervalHeader) + + // Create the new state. + _, err := s.tfeClient.StateVersions.Upload(ctx, s.workspace.ID, options) + if errors.Is(err, tfe.ErrStateVersionUploadNotSupported) { + // Create the new state with content included in the request (Terraform Enterprise v202306-1 and below) + log.Println("[INFO] Detected that state version upload is not supported. Retrying using compatibility state upload.") + return s.uploadStateFallback(ctx, lineage, serial, isForcePush, state, jsonState, jsonStateOutputs) + } + + return err +} + +// Lock calls the Client's Lock method if it's implemented. +func (s *State) Lock(info *statemgr.LockInfo) (string, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if s.disableLocks { + return "", nil + } + ctx := context.Background() + + lockErr := &statemgr.LockError{Info: s.lockInfo} + + // Lock the workspace. + _, err := s.tfeClient.Workspaces.Lock(ctx, s.workspace.ID, tfe.WorkspaceLockOptions{ + Reason: tfe.String("Locked by OpenTofu"), + }) + if err != nil { + if err == tfe.ErrWorkspaceLocked { + lockErr.Info = info + err = fmt.Errorf("%w (lock ID: \"%s/%s\")", err, s.organization, s.workspace.Name) + } + lockErr.Err = err + return "", lockErr + } + + s.lockInfo = info + + return s.lockInfo.ID, nil +} + +// statemgr.Refresher impl. +func (s *State) RefreshState() error { + s.mu.Lock() + defer s.mu.Unlock() + return s.refreshState() +} + +// refreshState is the main implementation of RefreshState, but split out so +// that we can make internal calls to it from methods that are already holding +// the s.mu lock. +func (s *State) refreshState() error { + payload, err := s.getStatePayload() + if err != nil { + return err + } + + // no remote state is OK + if payload == nil { + s.readState = nil + s.lineage = "" + s.serial = 0 + return nil + } + + stateFile, err := statefile.Read(bytes.NewReader(payload.Data), s.encryption) + if err != nil { + return err + } + + s.lineage = stateFile.Lineage + s.serial = stateFile.Serial + s.state = stateFile.State + + // Properties from the remote must be separate so we can + // track changes as lineage, serial and/or state are mutated + s.readLineage = stateFile.Lineage + s.readSerial = stateFile.Serial + s.readState = s.state.DeepCopy() + return nil +} + +func (s *State) getStatePayload() (*remote.Payload, error) { + ctx := context.Background() + + // Check the x-terraform-snapshot-interval header to see if it has a non-empty + // value which would indicate snapshots are enabled + ctx = tfe.ContextWithResponseHeaderHook(ctx, s.readSnapshotIntervalHeader) + + sv, err := s.tfeClient.StateVersions.ReadCurrent(ctx, s.workspace.ID) + if err != nil { + if err == tfe.ErrResourceNotFound { + // If no state exists, then return nil. + return nil, nil + } + return nil, fmt.Errorf("error retrieving state: %w", err) + } + + state, err := s.tfeClient.StateVersions.Download(ctx, sv.DownloadURL) + if err != nil { + return nil, fmt.Errorf("error downloading state: %w", err) + } + + // If the state is empty, then return nil. + if len(state) == 0 { + return nil, nil + } + + // Get the MD5 checksum of the state. + sum := md5.Sum(state) + + return &remote.Payload{ + Data: state, + MD5: sum[:], + }, nil +} + +// Unlock calls the Client's Unlock method if it's implemented. +func (s *State) Unlock(id string) error { + s.mu.Lock() + defer s.mu.Unlock() + + if s.disableLocks { + return nil + } + + ctx := context.Background() + + // We first check if there was an error while uploading the latest + // state. If so, we will not unlock the workspace to prevent any + // changes from being applied until the correct state is uploaded. + if s.stateUploadErr { + return nil + } + + lockErr := &statemgr.LockError{Info: s.lockInfo} + + // With lock info this should be treated as a normal unlock. + if s.lockInfo != nil { + // Verify the expected lock ID. + if s.lockInfo.ID != id { + lockErr.Err = fmt.Errorf("lock ID does not match existing lock") + return lockErr + } + + // Unlock the workspace. + _, err := s.tfeClient.Workspaces.Unlock(ctx, s.workspace.ID) + if err != nil { + lockErr.Err = err + return lockErr + } + + return nil + } + + // Verify the optional force-unlock lock ID. + if s.organization+"/"+s.workspace.Name != id { + lockErr.Err = fmt.Errorf( + "lock ID %q does not match existing lock ID \"%s/%s\"", + id, + s.organization, + s.workspace.Name, + ) + return lockErr + } + + // Force unlock the workspace. + _, err := s.tfeClient.Workspaces.ForceUnlock(ctx, s.workspace.ID) + if err != nil { + lockErr.Err = err + return lockErr + } + + return nil +} + +// Delete the remote state. +func (s *State) Delete(force bool) error { + + var err error + + isSafeDeleteSupported := s.workspace.Permissions.CanForceDelete != nil + if force || !isSafeDeleteSupported { + err = s.tfeClient.Workspaces.Delete(context.Background(), s.organization, s.workspace.Name) + } else { + err = s.tfeClient.Workspaces.SafeDelete(context.Background(), s.organization, s.workspace.Name) + } + + if err != nil && err != tfe.ErrResourceNotFound { + return fmt.Errorf("error deleting workspace %s: %w", s.workspace.Name, err) + } + + return nil +} + +// GetRootOutputValues fetches output values from Terraform Cloud +func (s *State) GetRootOutputValues() (map[string]*states.OutputValue, error) { + ctx := context.Background() + + so, err := s.tfeClient.StateVersionOutputs.ReadCurrent(ctx, s.workspace.ID) + + if err != nil { + return nil, fmt.Errorf("could not read state version outputs: %w", err) + } + + result := make(map[string]*states.OutputValue) + + for _, output := range so.Items { + if output.DetailedType == nil { + // If there is no detailed type information available, this state was probably created + // with a version of terraform < 1.3.0. In this case, we'll eject completely from this + // function and fall back to the old behavior of reading the entire state file, which + // requires a higher level of authorization. + log.Printf("[DEBUG] falling back to reading full state") + + if err := s.RefreshState(); err != nil { + return nil, fmt.Errorf("failed to load state: %w", err) + } + + state := s.State() + if state == nil { + // We know that there is supposed to be state (and this is not simply a new workspace + // without state) because the fallback is only invoked when outputs are present but + // detailed types are not available. + return nil, ErrStateVersionUnauthorizedUpgradeState + } + + return state.RootModule().OutputValues, nil + } + + if output.Sensitive { + // Since this is a sensitive value, the output must be requested explicitly in order to + // read its value, which is assumed to be present by callers + sensitiveOutput, err := s.tfeClient.StateVersionOutputs.Read(ctx, output.ID) + if err != nil { + return nil, fmt.Errorf("could not read state version output %s: %w", output.ID, err) + } + output.Value = sensitiveOutput.Value + } + + cval, err := tfeOutputToCtyValue(*output) + if err != nil { + return nil, fmt.Errorf("could not decode output %s (ID %s)", output.Name, output.ID) + } + + result[output.Name] = &states.OutputValue{ + Value: cval, + Sensitive: output.Sensitive, + } + } + + return result, nil +} + +func clamp(val, min, max int64) int64 { + if val < min { + return min + } else if val > max { + return max + } + return val +} + +func (s *State) readSnapshotIntervalHeader(status int, header http.Header) { + // Only proceed if this came from tfe.v2 API + contentType := header.Get("Content-Type") + if !strings.Contains(contentType, tfe.ContentTypeJSONAPI) { + log.Printf("[TRACE] Skipping intermediate state interval because Content-Type was %q", contentType) + return + } + + intervalStr := header.Get(HeaderSnapshotInterval) + + if intervalSecs, err := strconv.ParseInt(intervalStr, 10, 64); err == nil { + // More than an hour is an unreasonable delay, so we'll just + // limit to one hour max. + intervalSecs = clamp(intervalSecs, 0, 3600) + s.stateSnapshotInterval = time.Duration(intervalSecs) * time.Second + } else { + // If the header field is either absent or invalid then we'll + // just choose zero, which effectively means that we'll just use + // the caller's requested interval instead. If the caller has no + // requested interval, or it is zero, then we will disable snapshots. + s.stateSnapshotInterval = time.Duration(0) + } + + // We will only enable snapshots for intervals greater than zero + log.Printf("[TRACE] Intermediate state interval is set by header to %v", s.stateSnapshotInterval) + s.enableIntermediateSnapshots = s.stateSnapshotInterval > 0 +} + +// tfeOutputToCtyValue decodes a combination of TFE output value and detailed-type to create a +// cty value that is suitable for use in tofu. +func tfeOutputToCtyValue(output tfe.StateVersionOutput) (cty.Value, error) { + var result cty.Value + bufType, err := json.Marshal(output.DetailedType) + if err != nil { + return result, fmt.Errorf("could not marshal output %s type: %w", output.ID, err) + } + + var ctype cty.Type + err = ctype.UnmarshalJSON(bufType) + if err != nil { + return result, fmt.Errorf("could not interpret output %s type: %w", output.ID, err) + } + + result, err = gocty.ToCtyValue(output.Value, ctype) + if err != nil { + return result, fmt.Errorf("could not interpret value %v as type %s for output %s: %w", result, ctype.FriendlyName(), output.ID, err) + } + + return result, nil +} diff --git a/pkg/cloud/state_test.go b/pkg/cloud/state_test.go new file mode 100644 index 00000000000..bc23f16d28f --- /dev/null +++ b/pkg/cloud/state_test.go @@ -0,0 +1,428 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cloud + +import ( + "bytes" + "context" + "os" + "testing" + "time" + + tfe "github.com/hashicorp/go-tfe" + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/backend/local" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/statefile" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "github.com/zclconf/go-cty/cty" +) + +func TestState_impl(t *testing.T) { + var _ statemgr.Reader = new(State) + var _ statemgr.Writer = new(State) + var _ statemgr.Persister = new(State) + var _ statemgr.Refresher = new(State) + var _ statemgr.OutputReader = new(State) + var _ statemgr.Locker = new(State) +} + +type ExpectedOutput struct { + Name string + Sensitive bool + IsNull bool +} + +func TestState_GetRootOutputValues(t *testing.T) { + b, bCleanup := testBackendWithOutputs(t) + defer bCleanup() + + state := &State{tfeClient: b.client, organization: b.organization, workspace: &tfe.Workspace{ + ID: "ws-abcd", + }, encryption: encryption.StateEncryptionDisabled()} + outputs, err := state.GetRootOutputValues() + + if err != nil { + t.Fatalf("error returned from GetRootOutputValues: %s", err) + } + + cases := []ExpectedOutput{ + { + Name: "sensitive_output", + Sensitive: true, + IsNull: false, + }, + { + Name: "nonsensitive_output", + Sensitive: false, + IsNull: false, + }, + { + Name: "object_output", + Sensitive: false, + IsNull: false, + }, + { + Name: "list_output", + Sensitive: false, + IsNull: false, + }, + } + + if len(outputs) != len(cases) { + t.Errorf("Expected %d item but %d were returned", len(cases), len(outputs)) + } + + for _, testCase := range cases { + so, ok := outputs[testCase.Name] + if !ok { + t.Fatalf("Expected key %s but it was not found", testCase.Name) + } + if so.Value.IsNull() != testCase.IsNull { + t.Errorf("Key %s does not match null expectation %v", testCase.Name, testCase.IsNull) + } + if so.Sensitive != testCase.Sensitive { + t.Errorf("Key %s does not match sensitive expectation %v", testCase.Name, testCase.Sensitive) + } + } +} + +func TestState(t *testing.T) { + var buf bytes.Buffer + s := statemgr.TestFullInitialState() + sf := statefile.New(s, "stub-lineage", 2) + err := statefile.Write(sf, &buf, encryption.StateEncryptionDisabled()) + if err != nil { + t.Fatalf("err: %s", err) + } + data := buf.Bytes() + + state := testCloudState(t) + + jsonState, err := os.ReadFile("../command/testdata/show-json-state/sensitive-variables/output.json") + if err != nil { + t.Fatal(err) + } + + jsonStateOutputs := []byte(` +{ + "outputs": { + "foo": { + "type": "string", + "value": "bar" + } + } +}`) + + if err := state.uploadState(state.lineage, state.serial, state.forcePush, data, jsonState, jsonStateOutputs); err != nil { + t.Fatalf("put: %s", err) + } + + payload, err := state.getStatePayload() + if err != nil { + t.Fatalf("get: %s", err) + } + if !bytes.Equal(payload.Data, data) { + t.Fatalf("expected full state %q\n\ngot: %q", string(payload.Data), string(data)) + } + + if err := state.Delete(true); err != nil { + t.Fatalf("delete: %s", err) + } + + p, err := state.getStatePayload() + if err != nil { + t.Fatalf("get: %s", err) + } + if p != nil { + t.Fatalf("expected empty state, got: %q", string(p.Data)) + } +} + +func TestCloudLocks(t *testing.T) { + back, bCleanup := testBackendWithName(t) + defer bCleanup() + + a, err := back.StateMgr(testBackendSingleWorkspaceName) + if err != nil { + t.Fatalf("expected no error, got %v", err) + } + b, err := back.StateMgr(testBackendSingleWorkspaceName) + if err != nil { + t.Fatalf("expected no error, got %v", err) + } + + lockerA, ok := a.(statemgr.Locker) + if !ok { + t.Fatal("client A not a statemgr.Locker") + } + + lockerB, ok := b.(statemgr.Locker) + if !ok { + t.Fatal("client B not a statemgr.Locker") + } + + infoA := statemgr.NewLockInfo() + infoA.Operation = "test" + infoA.Who = "clientA" + + infoB := statemgr.NewLockInfo() + infoB.Operation = "test" + infoB.Who = "clientB" + + lockIDA, err := lockerA.Lock(infoA) + if err != nil { + t.Fatal("unable to get initial lock:", err) + } + + _, err = lockerB.Lock(infoB) + if err == nil { + lockerA.Unlock(lockIDA) + t.Fatal("client B obtained lock while held by client A") + } + if _, ok := err.(*statemgr.LockError); !ok { + t.Errorf("expected a LockError, but was %t: %s", err, err) + } + + if err := lockerA.Unlock(lockIDA); err != nil { + t.Fatal("error unlocking client A", err) + } + + lockIDB, err := lockerB.Lock(infoB) + if err != nil { + t.Fatal("unable to obtain lock from client B") + } + + if lockIDB == lockIDA { + t.Fatalf("duplicate lock IDs: %q", lockIDB) + } + + if err = lockerB.Unlock(lockIDB); err != nil { + t.Fatal("error unlocking client B:", err) + } +} + +func TestDelete_SafeDeleteNotSupported(t *testing.T) { + state := testCloudState(t) + workspaceId := state.workspace.ID + state.workspace.Permissions.CanForceDelete = nil + state.workspace.ResourceCount = 5 + + // Typically delete(false) should safe-delete a cloud workspace, which should fail on this workspace with resources + // However, since we have set the workspace canForceDelete permission to nil, we should fall back to force delete + if err := state.Delete(false); err != nil { + t.Fatalf("delete: %s", err) + } + workspace, err := state.tfeClient.Workspaces.ReadByID(context.Background(), workspaceId) + if workspace != nil || err != tfe.ErrResourceNotFound { + t.Fatalf("workspace %s not deleted", workspaceId) + } +} + +func TestDelete_ForceDelete(t *testing.T) { + state := testCloudState(t) + workspaceId := state.workspace.ID + state.workspace.Permissions.CanForceDelete = tfe.Bool(true) + state.workspace.ResourceCount = 5 + + if err := state.Delete(true); err != nil { + t.Fatalf("delete: %s", err) + } + workspace, err := state.tfeClient.Workspaces.ReadByID(context.Background(), workspaceId) + if workspace != nil || err != tfe.ErrResourceNotFound { + t.Fatalf("workspace %s not deleted", workspaceId) + } +} + +func TestDelete_SafeDelete(t *testing.T) { + state := testCloudState(t) + workspaceId := state.workspace.ID + state.workspace.Permissions.CanForceDelete = tfe.Bool(false) + state.workspace.ResourceCount = 5 + + // safe-deleting a workspace with resources should fail + err := state.Delete(false) + if err == nil { + t.Fatalf("workspace should have failed to safe delete") + } + + // safe-deleting a workspace with resources should succeed once it has no resources + state.workspace.ResourceCount = 0 + if err = state.Delete(false); err != nil { + t.Fatalf("workspace safe-delete err: %s", err) + } + + workspace, err := state.tfeClient.Workspaces.ReadByID(context.Background(), workspaceId) + if workspace != nil || err != tfe.ErrResourceNotFound { + t.Fatalf("workspace %s not deleted", workspaceId) + } +} + +func TestState_PersistState(t *testing.T) { + t.Run("Initial PersistState", func(t *testing.T) { + cloudState := testCloudState(t) + + if cloudState.readState != nil { + t.Fatal("expected nil initial readState") + } + + err := cloudState.PersistState(nil) + if err != nil { + t.Fatalf("expected no error, got %q", err) + } + + var expectedSerial uint64 = 1 + if cloudState.readSerial != expectedSerial { + t.Fatalf("expected initial state readSerial to be %d, got %d", expectedSerial, cloudState.readSerial) + } + }) + + t.Run("Snapshot Interval Backpressure Header", func(t *testing.T) { + // The "Create a State Version" API is allowed to return a special + // HTTP response header X-Terraform-Snapshot-Interval, in which case + // we should remember the number of seconds it specifies and delay + // creating any more intermediate state snapshots for that many seconds. + + cloudState := testCloudState(t) + + if cloudState.stateSnapshotInterval != 0 { + t.Error("state manager already has a nonzero snapshot interval") + } + + if cloudState.enableIntermediateSnapshots { + t.Error("expected state manager to have disabled snapshots") + } + + // For this test we'll use a real client talking to a fake server, + // since HTTP-level concerns like headers are out of scope for the + // mock client we typically use in other tests in this package, which + // aim to abstract away HTTP altogether. + + // Didn't want to repeat myself here + for _, testCase := range []struct { + expectedInterval time.Duration + snapshotsEnabled bool + }{ + { + expectedInterval: 300 * time.Second, + snapshotsEnabled: true, + }, + { + expectedInterval: 0 * time.Second, + snapshotsEnabled: false, + }, + } { + server := testServerWithSnapshotsEnabled(t, testCase.snapshotsEnabled) + + defer server.Close() + cfg := &tfe.Config{ + Address: server.URL, + BasePath: "api", + Token: "placeholder", + } + client, err := tfe.NewClient(cfg) + if err != nil { + t.Fatal(err) + } + cloudState.tfeClient = client + + err = cloudState.RefreshState() + if err != nil { + t.Fatal(err) + } + cloudState.WriteState(states.BuildState(func(s *states.SyncState) { + s.SetOutputValue( + addrs.OutputValue{Name: "boop"}.Absolute(addrs.RootModuleInstance), + cty.StringVal("beep"), false, + ) + })) + + err = cloudState.PersistState(nil) + if err != nil { + t.Fatal(err) + } + + // The PersistState call above should have sent a request to the test + // server and got back the x-terraform-snapshot-interval header, whose + // value should therefore now be recorded in the relevant field. + if got := cloudState.stateSnapshotInterval; got != testCase.expectedInterval { + t.Errorf("wrong state snapshot interval after PersistState\ngot: %s\nwant: %s", got, testCase.expectedInterval) + } + + if got, want := cloudState.enableIntermediateSnapshots, testCase.snapshotsEnabled; got != want { + t.Errorf("expected disable intermediate snapshots to be\ngot: %t\nwant: %t", got, want) + } + } + }) +} + +func TestState_ShouldPersistIntermediateState(t *testing.T) { + cloudState := testCloudState(t) + + testCases := []struct { + Enabled bool + LastPersist time.Time + Interval time.Duration + Expected bool + Force bool + Description string + }{ + { + Interval: 20 * time.Second, + Enabled: true, + Expected: true, + Description: "Not persisted yet", + }, + { + Interval: 20 * time.Second, + Enabled: false, + Expected: false, + Description: "Intermediate snapshots not enabled", + }, + { + Interval: 20 * time.Second, + Enabled: false, + Force: true, + Expected: true, + Description: "Force persist", + }, + { + Interval: 20 * time.Second, + LastPersist: time.Now().Add(-15 * time.Second), + Enabled: true, + Expected: false, + Description: "Last persisted 15s ago", + }, + { + Interval: 20 * time.Second, + LastPersist: time.Now().Add(-25 * time.Second), + Enabled: true, + Expected: true, + Description: "Last persisted 25s ago", + }, + { + Interval: 5 * time.Second, + LastPersist: time.Now().Add(-15 * time.Second), + Enabled: true, + Expected: true, + Description: "Last persisted 15s ago, but interval is 5s", + }, + } + + for _, testCase := range testCases { + cloudState.enableIntermediateSnapshots = testCase.Enabled + cloudState.stateSnapshotInterval = testCase.Interval + + actual := cloudState.ShouldPersistIntermediateState(&local.IntermediateStatePersistInfo{ + LastPersist: testCase.LastPersist, + ForcePersist: testCase.Force, + }) + if actual != testCase.Expected { + t.Errorf("%s: expected %v but got %v", testCase.Description, testCase.Expected, actual) + } + } +} diff --git a/pkg/cloud/testdata/.gitignore b/pkg/cloud/testdata/.gitignore new file mode 100644 index 00000000000..15498bbfb82 --- /dev/null +++ b/pkg/cloud/testdata/.gitignore @@ -0,0 +1 @@ +!*.log diff --git a/pkg/cloud/testdata/apply-destroy/apply.log b/pkg/cloud/testdata/apply-destroy/apply.log new file mode 100644 index 00000000000..d126547d95c --- /dev/null +++ b/pkg/cloud/testdata/apply-destroy/apply.log @@ -0,0 +1,7 @@ +Terraform v0.11.10 + +Initializing plugins and modules... +null_resource.hello: Destroying... (ID: 8657651096157629581) +null_resource.hello: Destruction complete after 0s + +Apply complete! Resources: 0 added, 0 changed, 1 destroyed. diff --git a/pkg/cloud/testdata/apply-destroy/main.tf b/pkg/cloud/testdata/apply-destroy/main.tf new file mode 100644 index 00000000000..3911a2a9b2d --- /dev/null +++ b/pkg/cloud/testdata/apply-destroy/main.tf @@ -0,0 +1 @@ +resource "null_resource" "foo" {} diff --git a/pkg/cloud/testdata/apply-destroy/plan.log b/pkg/cloud/testdata/apply-destroy/plan.log new file mode 100644 index 00000000000..1d38d416892 --- /dev/null +++ b/pkg/cloud/testdata/apply-destroy/plan.log @@ -0,0 +1,22 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +null_resource.hello: Refreshing state... (ID: 8657651096157629581) + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + - destroy + +Terraform will perform the following actions: + + - null_resource.hello + + +Plan: 0 to add, 0 to change, 1 to destroy. diff --git a/pkg/cloud/testdata/apply-json-with-error/main.tf b/pkg/cloud/testdata/apply-json-with-error/main.tf new file mode 100644 index 00000000000..6fa9534f484 --- /dev/null +++ b/pkg/cloud/testdata/apply-json-with-error/main.tf @@ -0,0 +1,5 @@ +resource "null_resource" "foo" { + triggers = { + random = "${guid()}" + } +} diff --git a/pkg/cloud/testdata/apply-json-with-error/plan-redacted.json b/pkg/cloud/testdata/apply-json-with-error/plan-redacted.json new file mode 100644 index 00000000000..0967ef424bc --- /dev/null +++ b/pkg/cloud/testdata/apply-json-with-error/plan-redacted.json @@ -0,0 +1 @@ +{} diff --git a/pkg/cloud/testdata/apply-json-with-error/plan.log b/pkg/cloud/testdata/apply-json-with-error/plan.log new file mode 100644 index 00000000000..b877f1e5834 --- /dev/null +++ b/pkg/cloud/testdata/apply-json-with-error/plan.log @@ -0,0 +1,2 @@ +{"@level":"info","@message":"Terraform 1.3.7","@module":"terraform.ui","@timestamp":"2023-01-20T12:12:25.477403-05:00","terraform":"1.3.7","type":"version","ui":"1.0"} +{"@level":"error","@message":"Error: Unsupported block type","@module":"terraform.ui","@timestamp":"2023-01-20T12:12:25.615995-05:00","diagnostic":{"severity":"error","summary":"Unsupported block type","detail":"Blocks of type \"triggers\" are not expected here. Did you mean to define argument \"triggers\"? If so, use the equals sign to assign it a value.","range":{"filename":"main.tf","start":{"line":2,"column":3,"byte":35},"end":{"line":2,"column":11,"byte":43}},"snippet":{"context":"resource \"null_resource\" \"foo\"","code":" triggers {","start_line":2,"highlight_start_offset":2,"highlight_end_offset":10,"values":[]}},"type":"diagnostic"} diff --git a/pkg/cloud/testdata/apply-json-with-outputs/apply.log b/pkg/cloud/testdata/apply-json-with-outputs/apply.log new file mode 100644 index 00000000000..fe26806895f --- /dev/null +++ b/pkg/cloud/testdata/apply-json-with-outputs/apply.log @@ -0,0 +1,5 @@ +{"@level":"info","@message":"Terraform 1.3.7","@module":"terraform.ui","@timestamp":"2023-01-20T21:13:14.916732Z","terraform":"1.3.7","type":"version","ui":"1.0"} +{"@level":"info","@message":"null_resource.foo: Creating...","@module":"terraform.ui","@timestamp":"2023-01-20T21:13:16.390332Z","hook":{"resource":{"addr":"null_resource.foo","module":"","resource":"null_resource.foo","implied_provider":"null","resource_type":"null_resource","resource_name":"foo","resource_key":null},"action":"create"},"type":"apply_start"} +{"@level":"info","@message":"null_resource.foo: Creation complete after 0s [id=7091618264040236234]","@module":"terraform.ui","@timestamp":"2023-01-20T21:13:16.391654Z","hook":{"resource":{"addr":"null_resource.foo","module":"","resource":"null_resource.foo","implied_provider":"null","resource_type":"null_resource","resource_name":"foo","resource_key":null},"action":"create","id_key":"id","id_value":"7091618264040236234","elapsed_seconds":0},"type":"apply_complete"} +{"@level":"info","@message":"Apply complete! Resources: 1 added, 0 changed, 0 destroyed.","@module":"terraform.ui","@timestamp":"2023-01-20T21:13:16.992073Z","changes":{"add":1,"change":0,"remove":0,"operation":"apply"},"type":"change_summary"} +{"@level":"info","@message":"Outputs: 3","@module":"terraform.ui","@timestamp":"2023-01-20T21:13:16.992183Z","outputs":{"complex":{"sensitive":false,"type":["object",{"keyA":["object",{"someList":["tuple",["number","number","number"]]}],"keyB":["object",{"someBool":"bool","someStr":"string"}]}],"value":{"keyA":{"someList":[1,2,3]},"keyB":{"someBool":true,"someStr":"hello"}}},"secret":{"sensitive":true,"type":"string","value":"my-secret"},"simple":{"sensitive":false,"type":["tuple",["string","string"]],"value":["some","list"]}},"type":"outputs"} diff --git a/pkg/cloud/testdata/apply-json-with-outputs/main.tf b/pkg/cloud/testdata/apply-json-with-outputs/main.tf new file mode 100644 index 00000000000..d801668459d --- /dev/null +++ b/pkg/cloud/testdata/apply-json-with-outputs/main.tf @@ -0,0 +1,22 @@ +resource "null_resource" "foo" {} + +output "simple" { + value = ["some", "list"] +} + +output "secret" { + value = "my-secret" + sensitive = true +} + +output "complex" { + value = { + keyA = { + someList = [1, 2, 3] + } + keyB = { + someBool = true + someStr = "hello" + } + } +} diff --git a/pkg/cloud/testdata/apply-json-with-outputs/plan-redacted.json b/pkg/cloud/testdata/apply-json-with-outputs/plan-redacted.json new file mode 100644 index 00000000000..49551af229c --- /dev/null +++ b/pkg/cloud/testdata/apply-json-with-outputs/plan-redacted.json @@ -0,0 +1 @@ +{"plan_format_version":"1.1","resource_drift":[],"resource_changes":[{"address":"null_resource.foo","mode":"managed","type":"null_resource","name":"foo","provider_name":"registry.opentofu.org/hashicorp/null","change":{"actions":["create"],"before":null,"after":{"triggers":null},"after_unknown":{"id":true},"before_sensitive":false,"after_sensitive":{}}}],"relevant_attributes":[],"output_changes":{"complex":{"actions":["create"],"before":null,"after":{"keyA":{"someList":[1,2,3]},"keyB":{"someBool":true,"someStr":"hello"}},"after_unknown":false,"before_sensitive":false,"after_sensitive":false},"secret":{"actions":["create"],"before":null,"after":"8517896e47af3c9ca19a694ea0d6cc30b0dccf08598f33d93e583721fd5f3032","after_unknown":false,"before_sensitive":true,"after_sensitive":true},"simple":{"actions":["create"],"before":null,"after":["some","list"],"after_unknown":false,"before_sensitive":false,"after_sensitive":false}},"provider_schemas":{"registry.opentofu.org/hashicorp/null":{"provider":{"version":0,"block":{"description_kind":"plain"}},"resource_schemas":{"null_resource":{"version":0,"block":{"attributes":{"id":{"type":"string","description":"This is set to a random value at create time.","description_kind":"plain","computed":true},"triggers":{"type":["map","string"],"description":"A map of arbitrary strings that, when changed, will force the null resource to be replaced, re-running any associated provisioners.","description_kind":"plain","optional":true}},"description":"The `null_resource` resource implements the standard resource lifecycle but takes no further action.\n\nThe `triggers` argument allows specifying an arbitrary set of values that, when changed, will cause the resource to be replaced.","description_kind":"plain"}}},"data_source_schemas":{"null_data_source":{"version":0,"block":{"attributes":{"has_computed_default":{"type":"string","description":"If set, its literal value will be stored and returned. If not, its value defaults to `\"default\"`. This argument exists primarily for testing and has little practical use.","description_kind":"plain","optional":true,"computed":true},"id":{"type":"string","description":"This attribute is only present for some legacy compatibility issues and should not be used. It will be removed in a future version.","description_kind":"plain","deprecated":true,"computed":true},"inputs":{"type":["map","string"],"description":"A map of arbitrary strings that is copied into the `outputs` attribute, and accessible directly for interpolation.","description_kind":"plain","optional":true},"outputs":{"type":["map","string"],"description":"After the data source is \"read\", a copy of the `inputs` map.","description_kind":"plain","computed":true},"random":{"type":"string","description":"A random value. This is primarily for testing and has little practical use; prefer the [hashicorp/random provider](https://registry.opentofu.org/providers/hashicorp/random) for more practical random number use-cases.","description_kind":"plain","computed":true}},"description":"The `null_data_source` data source implements the standard data source lifecycle but does not\ninteract with any external APIs.\n\nHistorically, the `null_data_source` was typically used to construct intermediate values to re-use elsewhere in configuration. The\nsame can now be achieved using [locals](https://www.terraform.io/docs/language/values/locals.html).\n","description_kind":"plain","deprecated":true}}}}},"provider_format_version":"1.0"} \ No newline at end of file diff --git a/pkg/cloud/testdata/apply-json-with-outputs/plan.log b/pkg/cloud/testdata/apply-json-with-outputs/plan.log new file mode 100644 index 00000000000..357586ac3e3 --- /dev/null +++ b/pkg/cloud/testdata/apply-json-with-outputs/plan.log @@ -0,0 +1,6 @@ +{"@level":"info","@message":"Terraform 1.3.7","@module":"terraform.ui","@timestamp":"2023-01-20T21:13:02.177699Z","terraform":"1.3.7","type":"version","ui":"1.0"} +{"@level":"info","@message":"null_resource.foo: Plan to create","@module":"terraform.ui","@timestamp":"2023-01-20T21:13:03.842915Z","change":{"resource":{"addr":"null_resource.foo","module":"","resource":"null_resource.foo","implied_provider":"null","resource_type":"null_resource","resource_name":"foo","resource_key":null},"action":"create"},"type":"planned_change"} +{"@level":"info","@message":"Plan: 1 to add, 0 to change, 0 to destroy.","@module":"terraform.ui","@timestamp":"2023-01-20T21:13:03.842951Z","changes":{"add":1,"change":0,"remove":0,"operation":"plan"},"type":"change_summary"} +{"@level":"info","@message":"Outputs: 3","@module":"terraform.ui","@timestamp":"2023-01-20T21:13:03.842965Z","outputs":{"complex":{"sensitive":false,"action":"create"},"secret":{"sensitive":true,"action":"create"},"simple":{"sensitive":false,"action":"create"}},"type":"outputs"} + + diff --git a/pkg/cloud/testdata/apply-json-with-provisioner-error/apply.log b/pkg/cloud/testdata/apply-json-with-provisioner-error/apply.log new file mode 100644 index 00000000000..64f949fb8e6 --- /dev/null +++ b/pkg/cloud/testdata/apply-json-with-provisioner-error/apply.log @@ -0,0 +1,9 @@ +{"@level":"info","@message":"Terraform 1.3.7","@module":"terraform.ui","@timestamp":"2023-01-20T15:50:04.623068-05:00","terraform":"1.3.7","type":"version","ui":"1.0"} +{"@level":"info","@message":"null_resource.foo: Destroying... [id=5383176453498935794]","@module":"terraform.ui","@timestamp":"2023-02-16T10:13:14.725584-05:00","hook":{"resource":{"addr":"null_resource.foo","module":"","resource":"null_resource.foo","implied_provider":"null","resource_type":"null_resource","resource_name":"foo","resource_key":null},"action":"delete","id_key":"id","id_value":"5383176453498935794"},"type":"apply_start"} +{"@level":"info","@message":"null_resource.foo: Destruction complete after 0s","@module":"terraform.ui","@timestamp":"2023-02-16T10:13:14.728526-05:00","hook":{"resource":{"addr":"null_resource.foo","module":"","resource":"null_resource.foo","implied_provider":"null","resource_type":"null_resource","resource_name":"foo","resource_key":null},"action":"delete","elapsed_seconds":0},"type":"apply_complete"} +{"@level":"info","@message":"null_resource.foo: Creating...","@module":"terraform.ui","@timestamp":"2023-02-16T10:13:14.745016-05:00","hook":{"resource":{"addr":"null_resource.foo","module":"","resource":"null_resource.foo","implied_provider":"null","resource_type":"null_resource","resource_name":"foo","resource_key":null},"action":"create"},"type":"apply_start"} +{"@level":"info","@message":"null_resource.foo: Provisioning with 'local-exec'...","@module":"terraform.ui","@timestamp":"2023-02-16T10:13:14.748796-05:00","hook":{"resource":{"addr":"null_resource.foo","module":"","resource":"null_resource.foo","implied_provider":"null","resource_type":"null_resource","resource_name":"foo","resource_key":null},"provisioner":"local-exec"},"type":"provision_start"} +{"@level":"info","@message":"null_resource.foo: (local-exec): Executing: [\"/bin/sh\" \"-c\" \"exit 125\"]","@module":"terraform.ui","@timestamp":"2023-02-16T10:13:14.749082-05:00","hook":{"resource":{"addr":"null_resource.foo","module":"","resource":"null_resource.foo","implied_provider":"null","resource_type":"null_resource","resource_name":"foo","resource_key":null},"provisioner":"local-exec","output":"Executing: [\"/bin/sh\" \"-c\" \"exit 125\"]"},"type":"provision_progress"} +{"@level":"info","@message":"null_resource.foo: (local-exec) Provisioning errored","@module":"terraform.ui","@timestamp":"2023-02-16T10:13:14.751770-05:00","hook":{"resource":{"addr":"null_resource.foo","module":"","resource":"null_resource.foo","implied_provider":"null","resource_type":"null_resource","resource_name":"foo","resource_key":null},"provisioner":"local-exec"},"type":"provision_errored"} +{"@level":"info","@message":"null_resource.foo: Creation errored after 0s","@module":"terraform.ui","@timestamp":"2023-02-16T10:13:14.752082-05:00","hook":{"resource":{"addr":"null_resource.foo","module":"","resource":"null_resource.foo","implied_provider":"null","resource_type":"null_resource","resource_name":"foo","resource_key":null},"action":"create","elapsed_seconds":0},"type":"apply_errored"} +{"@level":"error","@message":"Error: local-exec provisioner error","@module":"terraform.ui","@timestamp":"2023-02-16T10:13:14.761681-05:00","diagnostic":{"severity":"error","summary":"local-exec provisioner error","detail":"Error running command 'exit 125': exit status 125. Output: ","address":"null_resource.foo","range":{"filename":"main.tf","start":{"line":2,"column":28,"byte":60},"end":{"line":2,"column":29,"byte":61}},"snippet":{"context":"resource \"null_resource\" \"foo\"","code":" provisioner \"local-exec\" {","start_line":2,"highlight_start_offset":27,"highlight_end_offset":28,"values":[]}},"type":"diagnostic"} diff --git a/pkg/cloud/testdata/apply-json-with-provisioner-error/main.tf b/pkg/cloud/testdata/apply-json-with-provisioner-error/main.tf new file mode 100644 index 00000000000..fb1ce036c00 --- /dev/null +++ b/pkg/cloud/testdata/apply-json-with-provisioner-error/main.tf @@ -0,0 +1,5 @@ +resource "null_resource" "foo" { + provisioner "local-exec" { + command = "exit 125" + } +} diff --git a/pkg/cloud/testdata/apply-json-with-provisioner-error/plan-redacted.json b/pkg/cloud/testdata/apply-json-with-provisioner-error/plan-redacted.json new file mode 100644 index 00000000000..3e3b067798f --- /dev/null +++ b/pkg/cloud/testdata/apply-json-with-provisioner-error/plan-redacted.json @@ -0,0 +1,116 @@ +{ + "plan_format_version": "1.1", + "resource_drift": [], + "resource_changes": [ + { + "address": "null_resource.foo", + "mode": "managed", + "type": "null_resource", + "name": "foo", + "provider_name": "registry.opentofu.org/hashicorp/null", + "change": { + "actions": [ + "create" + ], + "before": null, + "after": { + "triggers": null + }, + "after_unknown": { + "id": true + }, + "before_sensitive": false, + "after_sensitive": {} + } + } + ], + "relevant_attributes": [], + "output_changes": {}, + "provider_schemas": { + "registry.opentofu.org/hashicorp/null": { + "provider": { + "version": 0, + "block": { + "description_kind": "plain" + } + }, + "resource_schemas": { + "null_resource": { + "version": 0, + "block": { + "attributes": { + "id": { + "type": "string", + "description": "This is set to a random value at create time.", + "description_kind": "plain", + "computed": true + }, + "triggers": { + "type": [ + "map", + "string" + ], + "description": "A map of arbitrary strings that, when changed, will force the null resource to be replaced, re-running any associated provisioners.", + "description_kind": "plain", + "optional": true + } + }, + "description": "The `null_resource` resource implements the standard resource lifecycle but takes no further action.\n\nThe `triggers` argument allows specifying an arbitrary set of values that, when changed, will cause the resource to be replaced.", + "description_kind": "plain" + } + } + }, + "data_source_schemas": { + "null_data_source": { + "version": 0, + "block": { + "attributes": { + "has_computed_default": { + "type": "string", + "description": "If set, its literal value will be stored and returned. If not, its value defaults to `\"default\"`. This argument exists primarily for testing and has little practical use.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "id": { + "type": "string", + "description": "This attribute is only present for some legacy compatibility issues and should not be used. It will be removed in a future version.", + "description_kind": "plain", + "deprecated": true, + "computed": true + }, + "inputs": { + "type": [ + "map", + "string" + ], + "description": "A map of arbitrary strings that is copied into the `outputs` attribute, and accessible directly for interpolation.", + "description_kind": "plain", + "optional": true + }, + "outputs": { + "type": [ + "map", + "string" + ], + "description": "After the data source is \"read\", a copy of the `inputs` map.", + "description_kind": "plain", + "computed": true + }, + "random": { + "type": "string", + "description": "A random value. This is primarily for testing and has little practical use; prefer the [hashicorp/random provider](https://registry.terraform.io/providers/hashicorp/random) for more practical random number use-cases.", + "description_kind": "plain", + "computed": true + } + }, + "description": "The `null_data_source` data source implements the standard data source lifecycle but does not\ninteract with any external APIs.\n\nHistorically, the `null_data_source` was typically used to construct intermediate values to re-use elsewhere in configuration. The\nsame can now be achieved using [locals](https://www.terraform.io/docs/language/values/locals.html).\n", + "description_kind": "plain", + "deprecated": true + } + } + } + } + }, + "provider_format_version": "1.0" +} diff --git a/pkg/cloud/testdata/apply-json-with-provisioner-error/plan.log b/pkg/cloud/testdata/apply-json-with-provisioner-error/plan.log new file mode 100644 index 00000000000..26d39210b73 --- /dev/null +++ b/pkg/cloud/testdata/apply-json-with-provisioner-error/plan.log @@ -0,0 +1,3 @@ +{"@level":"info","@message":"Terraform 1.3.7","@module":"terraform.ui","@timestamp":"2023-01-20T15:50:04.623068-05:00","terraform":"1.3.7","type":"version","ui":"1.0"} +{"@level":"info","@message":"null_resource.foo: Plan to create","@module":"terraform.ui","@timestamp":"2023-01-20T15:50:04.822722-05:00","change":{"resource":{"addr":"null_resource.foo","module":"","resource":"null_resource.foo","implied_provider":"null","resource_type":"null_resource","resource_name":"foo","resource_key":null},"action":"create"},"type":"planned_change"} +{"@level":"info","@message":"Plan: 1 to add, 0 to change, 0 to destroy.","@module":"terraform.ui","@timestamp":"2023-01-20T15:50:04.822787-05:00","changes":{"add":1,"change":0,"remove":0,"operation":"plan"},"type":"change_summary"} diff --git a/pkg/cloud/testdata/apply-json-with-provisioner/apply.log b/pkg/cloud/testdata/apply-json-with-provisioner/apply.log new file mode 100644 index 00000000000..78acd7862e4 --- /dev/null +++ b/pkg/cloud/testdata/apply-json-with-provisioner/apply.log @@ -0,0 +1,10 @@ +{"@level":"info","@message":"null_resource.foo: Destroying... [id=102500065134967380]","@module":"terraform.ui","@timestamp":"2023-02-16T10:15:39.614616-05:00","hook":{"resource":{"addr":"null_resource.foo","module":"","resource":"null_resource.foo","implied_provider":"null","resource_type":"null_resource","resource_name":"foo","resource_key":null},"action":"delete","id_key":"id","id_value":"102500065134967380"},"type":"apply_start"} +{"@level":"info","@message":"null_resource.foo: Destruction complete after 0s","@module":"terraform.ui","@timestamp":"2023-02-16T10:15:39.615777-05:00","hook":{"resource":{"addr":"null_resource.foo","module":"","resource":"null_resource.foo","implied_provider":"null","resource_type":"null_resource","resource_name":"foo","resource_key":null},"action":"delete","elapsed_seconds":0},"type":"apply_complete"} +{"@level":"info","@message":"null_resource.foo: Creating...","@module":"terraform.ui","@timestamp":"2023-02-16T10:15:39.621975-05:00","hook":{"resource":{"addr":"null_resource.foo","module":"","resource":"null_resource.foo","implied_provider":"null","resource_type":"null_resource","resource_name":"foo","resource_key":null},"action":"create"},"type":"apply_start"} +{"@level":"info","@message":"null_resource.foo: Provisioning with 'local-exec'...","@module":"terraform.ui","@timestamp":"2023-02-16T10:15:39.622630-05:00","hook":{"resource":{"addr":"null_resource.foo","module":"","resource":"null_resource.foo","implied_provider":"null","resource_type":"null_resource","resource_name":"foo","resource_key":null},"provisioner":"local-exec"},"type":"provision_start"} +{"@level":"info","@message":"null_resource.foo: (local-exec): Executing: [\"/bin/sh\" \"-c\" \"echo Hello World!\"]","@module":"terraform.ui","@timestamp":"2023-02-16T10:15:39.622702-05:00","hook":{"resource":{"addr":"null_resource.foo","module":"","resource":"null_resource.foo","implied_provider":"null","resource_type":"null_resource","resource_name":"foo","resource_key":null},"provisioner":"local-exec","output":"Executing: [\"/bin/sh\" \"-c\" \"echo Hello World!\"]"},"type":"provision_progress"} +{"@level":"info","@message":"null_resource.foo: (local-exec): Hello World!","@module":"terraform.ui","@timestamp":"2023-02-16T10:15:39.623236-05:00","hook":{"resource":{"addr":"null_resource.foo","module":"","resource":"null_resource.foo","implied_provider":"null","resource_type":"null_resource","resource_name":"foo","resource_key":null},"provisioner":"local-exec","output":"Hello World!"},"type":"provision_progress"} +{"@level":"info","@message":"null_resource.foo: (local-exec) Provisioning complete","@module":"terraform.ui","@timestamp":"2023-02-16T10:15:39.623275-05:00","hook":{"resource":{"addr":"null_resource.foo","module":"","resource":"null_resource.foo","implied_provider":"null","resource_type":"null_resource","resource_name":"foo","resource_key":null},"provisioner":"local-exec"},"type":"provision_complete"} +{"@level":"info","@message":"null_resource.foo: Creation complete after 0s [id=7836952171100801169]","@module":"terraform.ui","@timestamp":"2023-02-16T10:15:39.623320-05:00","hook":{"resource":{"addr":"null_resource.foo","module":"","resource":"null_resource.foo","implied_provider":"null","resource_type":"null_resource","resource_name":"foo","resource_key":null},"action":"create","id_key":"id","id_value":"7836952171100801169","elapsed_seconds":0},"type":"apply_complete"} +{"@level":"info","@message":"Apply complete! Resources: 1 added, 0 changed, 1 destroyed.","@module":"terraform.ui","@timestamp":"2023-02-16T10:15:39.631098-05:00","changes":{"add":1,"change":0,"remove":1,"operation":"apply"},"type":"change_summary"} +{"@level":"info","@message":"Outputs: 0","@module":"terraform.ui","@timestamp":"2023-02-16T10:15:39.631112-05:00","outputs":{},"type":"outputs"} diff --git a/pkg/cloud/testdata/apply-json-with-provisioner/main.tf b/pkg/cloud/testdata/apply-json-with-provisioner/main.tf new file mode 100644 index 00000000000..20bf745ad4d --- /dev/null +++ b/pkg/cloud/testdata/apply-json-with-provisioner/main.tf @@ -0,0 +1,5 @@ +resource "null_resource" "foo" { + provisioner "local-exec" { + command = "echo Hello World!" + } +} diff --git a/pkg/cloud/testdata/apply-json-with-provisioner/plan-redacted.json b/pkg/cloud/testdata/apply-json-with-provisioner/plan-redacted.json new file mode 100644 index 00000000000..3e3b067798f --- /dev/null +++ b/pkg/cloud/testdata/apply-json-with-provisioner/plan-redacted.json @@ -0,0 +1,116 @@ +{ + "plan_format_version": "1.1", + "resource_drift": [], + "resource_changes": [ + { + "address": "null_resource.foo", + "mode": "managed", + "type": "null_resource", + "name": "foo", + "provider_name": "registry.opentofu.org/hashicorp/null", + "change": { + "actions": [ + "create" + ], + "before": null, + "after": { + "triggers": null + }, + "after_unknown": { + "id": true + }, + "before_sensitive": false, + "after_sensitive": {} + } + } + ], + "relevant_attributes": [], + "output_changes": {}, + "provider_schemas": { + "registry.opentofu.org/hashicorp/null": { + "provider": { + "version": 0, + "block": { + "description_kind": "plain" + } + }, + "resource_schemas": { + "null_resource": { + "version": 0, + "block": { + "attributes": { + "id": { + "type": "string", + "description": "This is set to a random value at create time.", + "description_kind": "plain", + "computed": true + }, + "triggers": { + "type": [ + "map", + "string" + ], + "description": "A map of arbitrary strings that, when changed, will force the null resource to be replaced, re-running any associated provisioners.", + "description_kind": "plain", + "optional": true + } + }, + "description": "The `null_resource` resource implements the standard resource lifecycle but takes no further action.\n\nThe `triggers` argument allows specifying an arbitrary set of values that, when changed, will cause the resource to be replaced.", + "description_kind": "plain" + } + } + }, + "data_source_schemas": { + "null_data_source": { + "version": 0, + "block": { + "attributes": { + "has_computed_default": { + "type": "string", + "description": "If set, its literal value will be stored and returned. If not, its value defaults to `\"default\"`. This argument exists primarily for testing and has little practical use.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "id": { + "type": "string", + "description": "This attribute is only present for some legacy compatibility issues and should not be used. It will be removed in a future version.", + "description_kind": "plain", + "deprecated": true, + "computed": true + }, + "inputs": { + "type": [ + "map", + "string" + ], + "description": "A map of arbitrary strings that is copied into the `outputs` attribute, and accessible directly for interpolation.", + "description_kind": "plain", + "optional": true + }, + "outputs": { + "type": [ + "map", + "string" + ], + "description": "After the data source is \"read\", a copy of the `inputs` map.", + "description_kind": "plain", + "computed": true + }, + "random": { + "type": "string", + "description": "A random value. This is primarily for testing and has little practical use; prefer the [hashicorp/random provider](https://registry.terraform.io/providers/hashicorp/random) for more practical random number use-cases.", + "description_kind": "plain", + "computed": true + } + }, + "description": "The `null_data_source` data source implements the standard data source lifecycle but does not\ninteract with any external APIs.\n\nHistorically, the `null_data_source` was typically used to construct intermediate values to re-use elsewhere in configuration. The\nsame can now be achieved using [locals](https://www.terraform.io/docs/language/values/locals.html).\n", + "description_kind": "plain", + "deprecated": true + } + } + } + } + }, + "provider_format_version": "1.0" +} diff --git a/pkg/cloud/testdata/apply-json-with-provisioner/plan.log b/pkg/cloud/testdata/apply-json-with-provisioner/plan.log new file mode 100644 index 00000000000..26d39210b73 --- /dev/null +++ b/pkg/cloud/testdata/apply-json-with-provisioner/plan.log @@ -0,0 +1,3 @@ +{"@level":"info","@message":"Terraform 1.3.7","@module":"terraform.ui","@timestamp":"2023-01-20T15:50:04.623068-05:00","terraform":"1.3.7","type":"version","ui":"1.0"} +{"@level":"info","@message":"null_resource.foo: Plan to create","@module":"terraform.ui","@timestamp":"2023-01-20T15:50:04.822722-05:00","change":{"resource":{"addr":"null_resource.foo","module":"","resource":"null_resource.foo","implied_provider":"null","resource_type":"null_resource","resource_name":"foo","resource_key":null},"action":"create"},"type":"planned_change"} +{"@level":"info","@message":"Plan: 1 to add, 0 to change, 0 to destroy.","@module":"terraform.ui","@timestamp":"2023-01-20T15:50:04.822787-05:00","changes":{"add":1,"change":0,"remove":0,"operation":"plan"},"type":"change_summary"} diff --git a/pkg/cloud/testdata/apply-json/apply.log b/pkg/cloud/testdata/apply-json/apply.log new file mode 100644 index 00000000000..1238b4c9ff5 --- /dev/null +++ b/pkg/cloud/testdata/apply-json/apply.log @@ -0,0 +1,5 @@ +{"@level":"info","@message":"Terraform 1.3.7","@module":"terraform.ui","@timestamp":"2023-01-20T15:50:04.623068-05:00","terraform":"1.3.7","type":"version","ui":"1.0"} +{"@level":"info","@message":"null_resource.foo: Creating...","@module":"terraform.ui","@timestamp":"2023-01-20T15:50:04.874882-05:00","hook":{"resource":{"addr":"null_resource.foo","module":"","resource":"null_resource.foo","implied_provider":"null","resource_type":"null_resource","resource_name":"foo","resource_key":null},"action":"create"},"type":"apply_start"} +{"@level":"info","@message":"null_resource.foo: Creation complete after 0s [id=3573948886993018026]","@module":"terraform.ui","@timestamp":"2023-01-20T15:50:04.878389-05:00","hook":{"resource":{"addr":"null_resource.foo","module":"","resource":"null_resource.foo","implied_provider":"null","resource_type":"null_resource","resource_name":"foo","resource_key":null},"action":"create","id_key":"id","id_value":"3573948886993018026","elapsed_seconds":0},"type":"apply_complete"} +{"@level":"info","@message":"Apply complete! Resources: 1 added, 0 changed, 0 destroyed.","@module":"terraform.ui","@timestamp":"2023-01-20T15:50:04.887223-05:00","changes":{"add":1,"change":0,"remove":0,"operation":"apply"},"type":"change_summary"} +{"@level":"info","@message":"Outputs: 0","@module":"terraform.ui","@timestamp":"2023-01-20T15:50:04.887259-05:00","outputs":{},"type":"outputs"} diff --git a/pkg/cloud/testdata/apply-json/main.tf b/pkg/cloud/testdata/apply-json/main.tf new file mode 100644 index 00000000000..3911a2a9b2d --- /dev/null +++ b/pkg/cloud/testdata/apply-json/main.tf @@ -0,0 +1 @@ +resource "null_resource" "foo" {} diff --git a/pkg/cloud/testdata/apply-json/plan-redacted.json b/pkg/cloud/testdata/apply-json/plan-redacted.json new file mode 100644 index 00000000000..3e3b067798f --- /dev/null +++ b/pkg/cloud/testdata/apply-json/plan-redacted.json @@ -0,0 +1,116 @@ +{ + "plan_format_version": "1.1", + "resource_drift": [], + "resource_changes": [ + { + "address": "null_resource.foo", + "mode": "managed", + "type": "null_resource", + "name": "foo", + "provider_name": "registry.opentofu.org/hashicorp/null", + "change": { + "actions": [ + "create" + ], + "before": null, + "after": { + "triggers": null + }, + "after_unknown": { + "id": true + }, + "before_sensitive": false, + "after_sensitive": {} + } + } + ], + "relevant_attributes": [], + "output_changes": {}, + "provider_schemas": { + "registry.opentofu.org/hashicorp/null": { + "provider": { + "version": 0, + "block": { + "description_kind": "plain" + } + }, + "resource_schemas": { + "null_resource": { + "version": 0, + "block": { + "attributes": { + "id": { + "type": "string", + "description": "This is set to a random value at create time.", + "description_kind": "plain", + "computed": true + }, + "triggers": { + "type": [ + "map", + "string" + ], + "description": "A map of arbitrary strings that, when changed, will force the null resource to be replaced, re-running any associated provisioners.", + "description_kind": "plain", + "optional": true + } + }, + "description": "The `null_resource` resource implements the standard resource lifecycle but takes no further action.\n\nThe `triggers` argument allows specifying an arbitrary set of values that, when changed, will cause the resource to be replaced.", + "description_kind": "plain" + } + } + }, + "data_source_schemas": { + "null_data_source": { + "version": 0, + "block": { + "attributes": { + "has_computed_default": { + "type": "string", + "description": "If set, its literal value will be stored and returned. If not, its value defaults to `\"default\"`. This argument exists primarily for testing and has little practical use.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "id": { + "type": "string", + "description": "This attribute is only present for some legacy compatibility issues and should not be used. It will be removed in a future version.", + "description_kind": "plain", + "deprecated": true, + "computed": true + }, + "inputs": { + "type": [ + "map", + "string" + ], + "description": "A map of arbitrary strings that is copied into the `outputs` attribute, and accessible directly for interpolation.", + "description_kind": "plain", + "optional": true + }, + "outputs": { + "type": [ + "map", + "string" + ], + "description": "After the data source is \"read\", a copy of the `inputs` map.", + "description_kind": "plain", + "computed": true + }, + "random": { + "type": "string", + "description": "A random value. This is primarily for testing and has little practical use; prefer the [hashicorp/random provider](https://registry.terraform.io/providers/hashicorp/random) for more practical random number use-cases.", + "description_kind": "plain", + "computed": true + } + }, + "description": "The `null_data_source` data source implements the standard data source lifecycle but does not\ninteract with any external APIs.\n\nHistorically, the `null_data_source` was typically used to construct intermediate values to re-use elsewhere in configuration. The\nsame can now be achieved using [locals](https://www.terraform.io/docs/language/values/locals.html).\n", + "description_kind": "plain", + "deprecated": true + } + } + } + } + }, + "provider_format_version": "1.0" +} diff --git a/pkg/cloud/testdata/apply-json/plan.log b/pkg/cloud/testdata/apply-json/plan.log new file mode 100644 index 00000000000..3ac5e43fcf9 --- /dev/null +++ b/pkg/cloud/testdata/apply-json/plan.log @@ -0,0 +1,4 @@ +{"@level":"info","@message":"Terraform 1.3.7","@module":"terraform.ui","@timestamp":"2023-01-20T15:50:04.623068-05:00","terraform":"1.3.7","type":"version","ui":"1.0"} +{"@level":"info","@message":"null_resource.foo: Plan to create","@module":"terraform.ui","@timestamp":"2023-01-20T15:50:04.822722-05:00","change":{"resource":{"addr":"null_resource.foo","module":"","resource":"null_resource.foo","implied_provider":"null","resource_type":"null_resource","resource_name":"foo","resource_key":null},"action":"create"},"type":"planned_change"} +{"@level":"info","@message":"Plan: 1 to add, 0 to change, 0 to destroy.","@module":"terraform.ui","@timestamp":"2023-01-20T15:50:04.822787-05:00","changes":{"add":1,"change":0,"remove":0,"operation":"plan"},"type":"change_summary"} + diff --git a/pkg/cloud/testdata/apply-no-changes/main.tf b/pkg/cloud/testdata/apply-no-changes/main.tf new file mode 100644 index 00000000000..3911a2a9b2d --- /dev/null +++ b/pkg/cloud/testdata/apply-no-changes/main.tf @@ -0,0 +1 @@ +resource "null_resource" "foo" {} diff --git a/pkg/cloud/testdata/apply-no-changes/plan.log b/pkg/cloud/testdata/apply-no-changes/plan.log new file mode 100644 index 00000000000..70416815133 --- /dev/null +++ b/pkg/cloud/testdata/apply-no-changes/plan.log @@ -0,0 +1,17 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +null_resource.hello: Refreshing state... (ID: 8657651096157629581) + +------------------------------------------------------------------------ + +No changes. Infrastructure is up-to-date. + +This means that Terraform did not detect any differences between your +configuration and real physical resources that exist. As a result, no +actions need to be performed. diff --git a/pkg/cloud/testdata/apply-no-changes/policy.log b/pkg/cloud/testdata/apply-no-changes/policy.log new file mode 100644 index 00000000000..b0cb1e59859 --- /dev/null +++ b/pkg/cloud/testdata/apply-no-changes/policy.log @@ -0,0 +1,12 @@ +Sentinel Result: true + +This result means that Sentinel policies returned true and the protected +behavior is allowed by Sentinel policies. + +1 policies evaluated. + +## Policy 1: Passthrough.sentinel (soft-mandatory) + +Result: true + +TRUE - Passthrough.sentinel:1:1 - Rule "main" diff --git a/pkg/cloud/testdata/apply-policy-hard-failed/main.tf b/pkg/cloud/testdata/apply-policy-hard-failed/main.tf new file mode 100644 index 00000000000..3911a2a9b2d --- /dev/null +++ b/pkg/cloud/testdata/apply-policy-hard-failed/main.tf @@ -0,0 +1 @@ +resource "null_resource" "foo" {} diff --git a/pkg/cloud/testdata/apply-policy-hard-failed/plan.log b/pkg/cloud/testdata/apply-policy-hard-failed/plan.log new file mode 100644 index 00000000000..5849e57595e --- /dev/null +++ b/pkg/cloud/testdata/apply-policy-hard-failed/plan.log @@ -0,0 +1,21 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + + null_resource.foo + id: + + +Plan: 1 to add, 0 to change, 0 to destroy. diff --git a/pkg/cloud/testdata/apply-policy-hard-failed/policy.log b/pkg/cloud/testdata/apply-policy-hard-failed/policy.log new file mode 100644 index 00000000000..5d6e6935b93 --- /dev/null +++ b/pkg/cloud/testdata/apply-policy-hard-failed/policy.log @@ -0,0 +1,12 @@ +Sentinel Result: false + +Sentinel evaluated to false because one or more Sentinel policies evaluated +to false. This false was not due to an undefined value or runtime error. + +1 policies evaluated. + +## Policy 1: Passthrough.sentinel (hard-mandatory) + +Result: false + +FALSE - Passthrough.sentinel:1:1 - Rule "main" diff --git a/pkg/cloud/testdata/apply-policy-passed/apply.log b/pkg/cloud/testdata/apply-policy-passed/apply.log new file mode 100644 index 00000000000..901994838f2 --- /dev/null +++ b/pkg/cloud/testdata/apply-policy-passed/apply.log @@ -0,0 +1,7 @@ +Terraform v0.11.10 + +Initializing plugins and modules... +null_resource.hello: Creating... +null_resource.hello: Creation complete after 0s (ID: 8657651096157629581) + +Apply complete! Resources: 1 added, 0 changed, 0 destroyed. diff --git a/pkg/cloud/testdata/apply-policy-passed/main.tf b/pkg/cloud/testdata/apply-policy-passed/main.tf new file mode 100644 index 00000000000..3911a2a9b2d --- /dev/null +++ b/pkg/cloud/testdata/apply-policy-passed/main.tf @@ -0,0 +1 @@ +resource "null_resource" "foo" {} diff --git a/pkg/cloud/testdata/apply-policy-passed/plan.log b/pkg/cloud/testdata/apply-policy-passed/plan.log new file mode 100644 index 00000000000..5849e57595e --- /dev/null +++ b/pkg/cloud/testdata/apply-policy-passed/plan.log @@ -0,0 +1,21 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + + null_resource.foo + id: + + +Plan: 1 to add, 0 to change, 0 to destroy. diff --git a/pkg/cloud/testdata/apply-policy-passed/policy.log b/pkg/cloud/testdata/apply-policy-passed/policy.log new file mode 100644 index 00000000000..b0cb1e59859 --- /dev/null +++ b/pkg/cloud/testdata/apply-policy-passed/policy.log @@ -0,0 +1,12 @@ +Sentinel Result: true + +This result means that Sentinel policies returned true and the protected +behavior is allowed by Sentinel policies. + +1 policies evaluated. + +## Policy 1: Passthrough.sentinel (soft-mandatory) + +Result: true + +TRUE - Passthrough.sentinel:1:1 - Rule "main" diff --git a/pkg/cloud/testdata/apply-policy-soft-failed/apply.log b/pkg/cloud/testdata/apply-policy-soft-failed/apply.log new file mode 100644 index 00000000000..901994838f2 --- /dev/null +++ b/pkg/cloud/testdata/apply-policy-soft-failed/apply.log @@ -0,0 +1,7 @@ +Terraform v0.11.10 + +Initializing plugins and modules... +null_resource.hello: Creating... +null_resource.hello: Creation complete after 0s (ID: 8657651096157629581) + +Apply complete! Resources: 1 added, 0 changed, 0 destroyed. diff --git a/pkg/cloud/testdata/apply-policy-soft-failed/main.tf b/pkg/cloud/testdata/apply-policy-soft-failed/main.tf new file mode 100644 index 00000000000..3911a2a9b2d --- /dev/null +++ b/pkg/cloud/testdata/apply-policy-soft-failed/main.tf @@ -0,0 +1 @@ +resource "null_resource" "foo" {} diff --git a/pkg/cloud/testdata/apply-policy-soft-failed/plan.log b/pkg/cloud/testdata/apply-policy-soft-failed/plan.log new file mode 100644 index 00000000000..5849e57595e --- /dev/null +++ b/pkg/cloud/testdata/apply-policy-soft-failed/plan.log @@ -0,0 +1,21 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + + null_resource.foo + id: + + +Plan: 1 to add, 0 to change, 0 to destroy. diff --git a/pkg/cloud/testdata/apply-policy-soft-failed/policy.log b/pkg/cloud/testdata/apply-policy-soft-failed/policy.log new file mode 100644 index 00000000000..3e4ebedf617 --- /dev/null +++ b/pkg/cloud/testdata/apply-policy-soft-failed/policy.log @@ -0,0 +1,12 @@ +Sentinel Result: false + +Sentinel evaluated to false because one or more Sentinel policies evaluated +to false. This false was not due to an undefined value or runtime error. + +1 policies evaluated. + +## Policy 1: Passthrough.sentinel (soft-mandatory) + +Result: false + +FALSE - Passthrough.sentinel:1:1 - Rule "main" diff --git a/pkg/cloud/testdata/apply-variables/apply.log b/pkg/cloud/testdata/apply-variables/apply.log new file mode 100644 index 00000000000..901994838f2 --- /dev/null +++ b/pkg/cloud/testdata/apply-variables/apply.log @@ -0,0 +1,7 @@ +Terraform v0.11.10 + +Initializing plugins and modules... +null_resource.hello: Creating... +null_resource.hello: Creation complete after 0s (ID: 8657651096157629581) + +Apply complete! Resources: 1 added, 0 changed, 0 destroyed. diff --git a/pkg/cloud/testdata/apply-variables/main.tf b/pkg/cloud/testdata/apply-variables/main.tf new file mode 100644 index 00000000000..955e8b4c09a --- /dev/null +++ b/pkg/cloud/testdata/apply-variables/main.tf @@ -0,0 +1,4 @@ +variable "foo" {} +variable "bar" {} + +resource "null_resource" "foo" {} diff --git a/pkg/cloud/testdata/apply-variables/plan.log b/pkg/cloud/testdata/apply-variables/plan.log new file mode 100644 index 00000000000..5849e57595e --- /dev/null +++ b/pkg/cloud/testdata/apply-variables/plan.log @@ -0,0 +1,21 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + + null_resource.foo + id: + + +Plan: 1 to add, 0 to change, 0 to destroy. diff --git a/pkg/cloud/testdata/apply-with-error/main.tf b/pkg/cloud/testdata/apply-with-error/main.tf new file mode 100644 index 00000000000..bc45f28f563 --- /dev/null +++ b/pkg/cloud/testdata/apply-with-error/main.tf @@ -0,0 +1,5 @@ +resource "null_resource" "foo" { + triggers { + random = "${guid()}" + } +} diff --git a/pkg/cloud/testdata/apply-with-error/plan.log b/pkg/cloud/testdata/apply-with-error/plan.log new file mode 100644 index 00000000000..4344a372290 --- /dev/null +++ b/pkg/cloud/testdata/apply-with-error/plan.log @@ -0,0 +1,10 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... + +Error: null_resource.foo: 1 error(s) occurred: + +* null_resource.foo: 1:3: unknown function called: guid in: + +${guid()} diff --git a/pkg/cloud/testdata/apply/apply.log b/pkg/cloud/testdata/apply/apply.log new file mode 100644 index 00000000000..901994838f2 --- /dev/null +++ b/pkg/cloud/testdata/apply/apply.log @@ -0,0 +1,7 @@ +Terraform v0.11.10 + +Initializing plugins and modules... +null_resource.hello: Creating... +null_resource.hello: Creation complete after 0s (ID: 8657651096157629581) + +Apply complete! Resources: 1 added, 0 changed, 0 destroyed. diff --git a/pkg/cloud/testdata/apply/main.tf b/pkg/cloud/testdata/apply/main.tf new file mode 100644 index 00000000000..3911a2a9b2d --- /dev/null +++ b/pkg/cloud/testdata/apply/main.tf @@ -0,0 +1 @@ +resource "null_resource" "foo" {} diff --git a/pkg/cloud/testdata/apply/plan.log b/pkg/cloud/testdata/apply/plan.log new file mode 100644 index 00000000000..5849e57595e --- /dev/null +++ b/pkg/cloud/testdata/apply/plan.log @@ -0,0 +1,21 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + + null_resource.foo + id: + + +Plan: 1 to add, 0 to change, 0 to destroy. diff --git a/pkg/cloud/testdata/empty/.gitignore b/pkg/cloud/testdata/empty/.gitignore new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/cloud/testdata/plan-bookmark/bookmark.json b/pkg/cloud/testdata/plan-bookmark/bookmark.json new file mode 100644 index 00000000000..0a1c73302a2 --- /dev/null +++ b/pkg/cloud/testdata/plan-bookmark/bookmark.json @@ -0,0 +1,5 @@ +{ + "remote_plan_format": 1, + "run_id": "run-GXfuHMkbyHccAGUg", + "hostname": "app.terraform.io" +} diff --git a/pkg/cloud/testdata/plan-cost-estimation/ce.log b/pkg/cloud/testdata/plan-cost-estimation/ce.log new file mode 100644 index 00000000000..e51fef1edc6 --- /dev/null +++ b/pkg/cloud/testdata/plan-cost-estimation/ce.log @@ -0,0 +1,6 @@ ++---------+------+-----+-------------+----------------------+ +| PRODUCT | NAME | SKU | DESCRIPTION | DELTA | ++---------+------+-----+-------------+----------------------+ ++---------+------+-----+-------------+----------------------+ +| TOTAL | $0.000 USD / 720 HRS | ++---------+------+-----+-------------+----------------------+ diff --git a/pkg/cloud/testdata/plan-cost-estimation/cost-estimate.log b/pkg/cloud/testdata/plan-cost-estimation/cost-estimate.log new file mode 100644 index 00000000000..67a50928cec --- /dev/null +++ b/pkg/cloud/testdata/plan-cost-estimation/cost-estimate.log @@ -0,0 +1,5 @@ +Cost estimation: + +Waiting for cost estimation to complete... +Resources: 1 of 1 estimated + $25.488/mo +$25.488 \ No newline at end of file diff --git a/pkg/cloud/testdata/plan-cost-estimation/main.tf b/pkg/cloud/testdata/plan-cost-estimation/main.tf new file mode 100644 index 00000000000..3911a2a9b2d --- /dev/null +++ b/pkg/cloud/testdata/plan-cost-estimation/main.tf @@ -0,0 +1 @@ +resource "null_resource" "foo" {} diff --git a/pkg/cloud/testdata/plan-cost-estimation/plan.log b/pkg/cloud/testdata/plan-cost-estimation/plan.log new file mode 100644 index 00000000000..fae287f4529 --- /dev/null +++ b/pkg/cloud/testdata/plan-cost-estimation/plan.log @@ -0,0 +1,20 @@ +Terraform v0.12.9 +Configuring remote state backend... +Initializing Terraform configuration... +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + + null_resource.foo + id: + + +Plan: 1 to add, 0 to change, 0 to destroy. diff --git a/pkg/cloud/testdata/plan-import-config-gen-exists/generated.tf b/pkg/cloud/testdata/plan-import-config-gen-exists/generated.tf new file mode 100644 index 00000000000..1efdb231aff --- /dev/null +++ b/pkg/cloud/testdata/plan-import-config-gen-exists/generated.tf @@ -0,0 +1,8 @@ +# __generated__ by Terraform +# Please review these resources and move them into your main configuration files. + +# __generated__ by Terraform from "bar" +resource "terraform_data" "foo" { + input = null + triggers_replace = null +} diff --git a/pkg/cloud/testdata/plan-import-config-gen-exists/main.tf b/pkg/cloud/testdata/plan-import-config-gen-exists/main.tf new file mode 100644 index 00000000000..8257ac5af63 --- /dev/null +++ b/pkg/cloud/testdata/plan-import-config-gen-exists/main.tf @@ -0,0 +1,4 @@ +import { + id = "bar" + to = terraform_data.foo +} diff --git a/pkg/cloud/testdata/plan-import-config-gen-validation-error/generated.tf.expected b/pkg/cloud/testdata/plan-import-config-gen-validation-error/generated.tf.expected new file mode 100644 index 00000000000..c7628258303 --- /dev/null +++ b/pkg/cloud/testdata/plan-import-config-gen-validation-error/generated.tf.expected @@ -0,0 +1,8 @@ +# __generated__ by OpenTofu +# Please review these resources and move them into your main configuration files. + +# __generated__ by OpenTofu from "bar" +resource "terraform_data" "foo" { + input = null + triggers_replace = null +} diff --git a/pkg/cloud/testdata/plan-import-config-gen-validation-error/main.tf b/pkg/cloud/testdata/plan-import-config-gen-validation-error/main.tf new file mode 100644 index 00000000000..8257ac5af63 --- /dev/null +++ b/pkg/cloud/testdata/plan-import-config-gen-validation-error/main.tf @@ -0,0 +1,4 @@ +import { + id = "bar" + to = terraform_data.foo +} diff --git a/pkg/cloud/testdata/plan-import-config-gen-validation-error/plan-redacted.json b/pkg/cloud/testdata/plan-import-config-gen-validation-error/plan-redacted.json new file mode 100644 index 00000000000..9e24e22517c --- /dev/null +++ b/pkg/cloud/testdata/plan-import-config-gen-validation-error/plan-redacted.json @@ -0,0 +1,127 @@ +{ + "format_version": "1.2", + "terraform_version": "1.5.0", + "planned_values": { + "root_module": { + "resources": [ + { + "address": "terraform_data.foo", + "mode": "managed", + "type": "terraform_data", + "name": "foo", + "provider_name": "terraform.io/builtin/terraform", + "schema_version": 0, + "values": { + "id": "bar", + "input": null, + "output": null, + "triggers_replace": null + }, + "sensitive_values": {} + } + ] + } + }, + "resource_changes": [ + { + "address": "terraform_data.foo", + "mode": "managed", + "type": "terraform_data", + "name": "foo", + "provider_name": "terraform.io/builtin/terraform", + "change": { + "actions": [ + "no-op" + ], + "before": { + "id": "bar", + "input": null, + "output": null, + "triggers_replace": null + }, + "after": { + "id": "bar", + "input": null, + "output": null, + "triggers_replace": null + }, + "after_unknown": {}, + "before_sensitive": {}, + "after_sensitive": {}, + "importing": { + "id": "bar" + }, + "generated_config": "resource \"terraform_data\" \"foo\" {\n input = null\n triggers_replace = null\n}" + } + } + ], + "prior_state": { + "format_version": "1.0", + "terraform_version": "1.6.0", + "values": { + "root_module": { + "resources": [ + { + "address": "terraform_data.foo", + "mode": "managed", + "type": "terraform_data", + "name": "foo", + "provider_name": "terraform.io/builtin/terraform", + "schema_version": 0, + "values": { + "id": "bar", + "input": null, + "output": null, + "triggers_replace": null + }, + "sensitive_values": {} + } + ] + } + } + }, + "configuration": { + "provider_config": { + "terraform": { + "name": "terraform", + "full_name": "terraform.io/builtin/terraform" + } + }, + "root_module": {} + }, + "provider_schemas": { + "terraform.io/builtin/terraform": { + "resource_schemas": { + "terraform_data": { + "version": 0, + "block": { + "attributes": { + "id": { + "type": "string", + "description_kind": "plain", + "computed": true + }, + "input": { + "type": "dynamic", + "description_kind": "plain", + "optional": true + }, + "output": { + "type": "dynamic", + "description_kind": "plain", + "computed": true + }, + "triggers_replace": { + "type": "dynamic", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + } + } + }, + "timestamp": "2023-05-30T03:34:55Z" +} \ No newline at end of file diff --git a/pkg/cloud/testdata/plan-import-config-gen-validation-error/plan.log b/pkg/cloud/testdata/plan-import-config-gen-validation-error/plan.log new file mode 100644 index 00000000000..192b2b801aa --- /dev/null +++ b/pkg/cloud/testdata/plan-import-config-gen-validation-error/plan.log @@ -0,0 +1,3 @@ +{"@level":"info","@message":"Terraform 1.5.0","@module":"terraform.ui","@timestamp":"2023-05-29T21:30:07.206963-07:00","terraform":"1.5.0","type":"version","ui":"1.1"} +{"@level":"info","@message":"Plan: 0 to add, 0 to change, 0 to destroy.","@module":"terraform.ui","@timestamp":"2023-05-29T21:30:08.302799-07:00","changes":{"add":0,"change":0,"import":0,"remove":0,"operation":"plan"},"type":"change_summary"} +{"@level":"error","@message":"Error: Conflicting configuration arguments","@module":"terraform.ui","@timestamp":"2023-05-29T21:30:08.302847-07:00","diagnostic":{"severity":"error","summary":"Conflicting configuration arguments","detail":"Not allowed","address":"terraform_data.foo","range":{"filename":"generated.tf","start":{"line":22,"column":33,"byte":867},"end":{"line":22,"column":35,"byte":869}}},"type":"diagnostic"} \ No newline at end of file diff --git a/pkg/cloud/testdata/plan-import-config-gen/generated.tf.expected b/pkg/cloud/testdata/plan-import-config-gen/generated.tf.expected new file mode 100644 index 00000000000..c7628258303 --- /dev/null +++ b/pkg/cloud/testdata/plan-import-config-gen/generated.tf.expected @@ -0,0 +1,8 @@ +# __generated__ by OpenTofu +# Please review these resources and move them into your main configuration files. + +# __generated__ by OpenTofu from "bar" +resource "terraform_data" "foo" { + input = null + triggers_replace = null +} diff --git a/pkg/cloud/testdata/plan-import-config-gen/main.tf b/pkg/cloud/testdata/plan-import-config-gen/main.tf new file mode 100644 index 00000000000..8257ac5af63 --- /dev/null +++ b/pkg/cloud/testdata/plan-import-config-gen/main.tf @@ -0,0 +1,4 @@ +import { + id = "bar" + to = terraform_data.foo +} diff --git a/pkg/cloud/testdata/plan-import-config-gen/plan-redacted.json b/pkg/cloud/testdata/plan-import-config-gen/plan-redacted.json new file mode 100644 index 00000000000..9e24e22517c --- /dev/null +++ b/pkg/cloud/testdata/plan-import-config-gen/plan-redacted.json @@ -0,0 +1,127 @@ +{ + "format_version": "1.2", + "terraform_version": "1.5.0", + "planned_values": { + "root_module": { + "resources": [ + { + "address": "terraform_data.foo", + "mode": "managed", + "type": "terraform_data", + "name": "foo", + "provider_name": "terraform.io/builtin/terraform", + "schema_version": 0, + "values": { + "id": "bar", + "input": null, + "output": null, + "triggers_replace": null + }, + "sensitive_values": {} + } + ] + } + }, + "resource_changes": [ + { + "address": "terraform_data.foo", + "mode": "managed", + "type": "terraform_data", + "name": "foo", + "provider_name": "terraform.io/builtin/terraform", + "change": { + "actions": [ + "no-op" + ], + "before": { + "id": "bar", + "input": null, + "output": null, + "triggers_replace": null + }, + "after": { + "id": "bar", + "input": null, + "output": null, + "triggers_replace": null + }, + "after_unknown": {}, + "before_sensitive": {}, + "after_sensitive": {}, + "importing": { + "id": "bar" + }, + "generated_config": "resource \"terraform_data\" \"foo\" {\n input = null\n triggers_replace = null\n}" + } + } + ], + "prior_state": { + "format_version": "1.0", + "terraform_version": "1.6.0", + "values": { + "root_module": { + "resources": [ + { + "address": "terraform_data.foo", + "mode": "managed", + "type": "terraform_data", + "name": "foo", + "provider_name": "terraform.io/builtin/terraform", + "schema_version": 0, + "values": { + "id": "bar", + "input": null, + "output": null, + "triggers_replace": null + }, + "sensitive_values": {} + } + ] + } + } + }, + "configuration": { + "provider_config": { + "terraform": { + "name": "terraform", + "full_name": "terraform.io/builtin/terraform" + } + }, + "root_module": {} + }, + "provider_schemas": { + "terraform.io/builtin/terraform": { + "resource_schemas": { + "terraform_data": { + "version": 0, + "block": { + "attributes": { + "id": { + "type": "string", + "description_kind": "plain", + "computed": true + }, + "input": { + "type": "dynamic", + "description_kind": "plain", + "optional": true + }, + "output": { + "type": "dynamic", + "description_kind": "plain", + "computed": true + }, + "triggers_replace": { + "type": "dynamic", + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + } + } + }, + "timestamp": "2023-05-30T03:34:55Z" +} \ No newline at end of file diff --git a/pkg/cloud/testdata/plan-import-config-gen/plan.log b/pkg/cloud/testdata/plan-import-config-gen/plan.log new file mode 100644 index 00000000000..27713055678 --- /dev/null +++ b/pkg/cloud/testdata/plan-import-config-gen/plan.log @@ -0,0 +1,3 @@ +{"@level":"info","@message":"Terraform 1.5.0","@module":"terraform.ui","@timestamp":"2023-05-29T20:30:14.113797-07:00","terraform":"1.5.0","type":"version","ui":"1.1"} +{"@level":"info","@message":"terraform_data.foo: Plan to import","@module":"terraform.ui","@timestamp":"2023-05-29T20:30:14.130354-07:00","change":{"resource":{"addr":"terraform_data.foo","module":"","resource":"terraform_data.foo","implied_provider":"terraform","resource_type":"terraform_data","resource_name":"foo","resource_key":null},"action":"import","importing":{"id":"bar"},"generated_config":"resource \"terraform_data\" \"foo\" {\n input = null\n triggers_replace = null\n}"},"type":"planned_change"} +{"@level":"info","@message":"Plan: 1 to import, 0 to add, 0 to change, 0 to destroy.","@module":"terraform.ui","@timestamp":"2023-05-29T20:30:14.130392-07:00","changes":{"add":0,"change":0,"import":1,"remove":0,"operation":"plan"},"type":"change_summary"} \ No newline at end of file diff --git a/pkg/cloud/testdata/plan-json-basic-no-unredacted/main.tf b/pkg/cloud/testdata/plan-json-basic-no-unredacted/main.tf new file mode 100644 index 00000000000..3911a2a9b2d --- /dev/null +++ b/pkg/cloud/testdata/plan-json-basic-no-unredacted/main.tf @@ -0,0 +1 @@ +resource "null_resource" "foo" {} diff --git a/pkg/cloud/testdata/plan-json-basic-no-unredacted/plan-redacted.json b/pkg/cloud/testdata/plan-json-basic-no-unredacted/plan-redacted.json new file mode 100644 index 00000000000..3e3b067798f --- /dev/null +++ b/pkg/cloud/testdata/plan-json-basic-no-unredacted/plan-redacted.json @@ -0,0 +1,116 @@ +{ + "plan_format_version": "1.1", + "resource_drift": [], + "resource_changes": [ + { + "address": "null_resource.foo", + "mode": "managed", + "type": "null_resource", + "name": "foo", + "provider_name": "registry.opentofu.org/hashicorp/null", + "change": { + "actions": [ + "create" + ], + "before": null, + "after": { + "triggers": null + }, + "after_unknown": { + "id": true + }, + "before_sensitive": false, + "after_sensitive": {} + } + } + ], + "relevant_attributes": [], + "output_changes": {}, + "provider_schemas": { + "registry.opentofu.org/hashicorp/null": { + "provider": { + "version": 0, + "block": { + "description_kind": "plain" + } + }, + "resource_schemas": { + "null_resource": { + "version": 0, + "block": { + "attributes": { + "id": { + "type": "string", + "description": "This is set to a random value at create time.", + "description_kind": "plain", + "computed": true + }, + "triggers": { + "type": [ + "map", + "string" + ], + "description": "A map of arbitrary strings that, when changed, will force the null resource to be replaced, re-running any associated provisioners.", + "description_kind": "plain", + "optional": true + } + }, + "description": "The `null_resource` resource implements the standard resource lifecycle but takes no further action.\n\nThe `triggers` argument allows specifying an arbitrary set of values that, when changed, will cause the resource to be replaced.", + "description_kind": "plain" + } + } + }, + "data_source_schemas": { + "null_data_source": { + "version": 0, + "block": { + "attributes": { + "has_computed_default": { + "type": "string", + "description": "If set, its literal value will be stored and returned. If not, its value defaults to `\"default\"`. This argument exists primarily for testing and has little practical use.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "id": { + "type": "string", + "description": "This attribute is only present for some legacy compatibility issues and should not be used. It will be removed in a future version.", + "description_kind": "plain", + "deprecated": true, + "computed": true + }, + "inputs": { + "type": [ + "map", + "string" + ], + "description": "A map of arbitrary strings that is copied into the `outputs` attribute, and accessible directly for interpolation.", + "description_kind": "plain", + "optional": true + }, + "outputs": { + "type": [ + "map", + "string" + ], + "description": "After the data source is \"read\", a copy of the `inputs` map.", + "description_kind": "plain", + "computed": true + }, + "random": { + "type": "string", + "description": "A random value. This is primarily for testing and has little practical use; prefer the [hashicorp/random provider](https://registry.terraform.io/providers/hashicorp/random) for more practical random number use-cases.", + "description_kind": "plain", + "computed": true + } + }, + "description": "The `null_data_source` data source implements the standard data source lifecycle but does not\ninteract with any external APIs.\n\nHistorically, the `null_data_source` was typically used to construct intermediate values to re-use elsewhere in configuration. The\nsame can now be achieved using [locals](https://www.terraform.io/docs/language/values/locals.html).\n", + "description_kind": "plain", + "deprecated": true + } + } + } + } + }, + "provider_format_version": "1.0" +} diff --git a/pkg/cloud/testdata/plan-json-basic-no-unredacted/plan.log b/pkg/cloud/testdata/plan-json-basic-no-unredacted/plan.log new file mode 100644 index 00000000000..6e7352ed449 --- /dev/null +++ b/pkg/cloud/testdata/plan-json-basic-no-unredacted/plan.log @@ -0,0 +1,3 @@ +{"@level":"info","@message":"Terraform 1.3.7","@module":"terraform.ui","@timestamp":"2023-01-19T10:47:27.409143-05:00","terraform":"1.3.7","type":"version","ui":"1.0"} +{"@level":"info","@message":"null_resource.foo: Plan to create","@module":"terraform.ui","@timestamp":"2023-01-19T10:47:27.605841-05:00","change":{"resource":{"addr":"null_resource.foo","module":"","resource":"null_resource.foo","implied_provider":"null","resource_type":"null_resource","resource_name":"foo","resource_key":null},"action":"create"},"type":"planned_change"} +{"@level":"info","@message":"Plan: 1 to add, 0 to change, 0 to destroy.","@module":"terraform.ui","@timestamp":"2023-01-19T10:47:27.605906-05:00","changes":{"add":1,"change":0,"remove":0,"operation":"plan"},"type":"change_summary"} diff --git a/pkg/cloud/testdata/plan-json-basic/main.tf b/pkg/cloud/testdata/plan-json-basic/main.tf new file mode 100644 index 00000000000..3911a2a9b2d --- /dev/null +++ b/pkg/cloud/testdata/plan-json-basic/main.tf @@ -0,0 +1 @@ +resource "null_resource" "foo" {} diff --git a/pkg/cloud/testdata/plan-json-basic/plan-redacted.json b/pkg/cloud/testdata/plan-json-basic/plan-redacted.json new file mode 100644 index 00000000000..3e3b067798f --- /dev/null +++ b/pkg/cloud/testdata/plan-json-basic/plan-redacted.json @@ -0,0 +1,116 @@ +{ + "plan_format_version": "1.1", + "resource_drift": [], + "resource_changes": [ + { + "address": "null_resource.foo", + "mode": "managed", + "type": "null_resource", + "name": "foo", + "provider_name": "registry.opentofu.org/hashicorp/null", + "change": { + "actions": [ + "create" + ], + "before": null, + "after": { + "triggers": null + }, + "after_unknown": { + "id": true + }, + "before_sensitive": false, + "after_sensitive": {} + } + } + ], + "relevant_attributes": [], + "output_changes": {}, + "provider_schemas": { + "registry.opentofu.org/hashicorp/null": { + "provider": { + "version": 0, + "block": { + "description_kind": "plain" + } + }, + "resource_schemas": { + "null_resource": { + "version": 0, + "block": { + "attributes": { + "id": { + "type": "string", + "description": "This is set to a random value at create time.", + "description_kind": "plain", + "computed": true + }, + "triggers": { + "type": [ + "map", + "string" + ], + "description": "A map of arbitrary strings that, when changed, will force the null resource to be replaced, re-running any associated provisioners.", + "description_kind": "plain", + "optional": true + } + }, + "description": "The `null_resource` resource implements the standard resource lifecycle but takes no further action.\n\nThe `triggers` argument allows specifying an arbitrary set of values that, when changed, will cause the resource to be replaced.", + "description_kind": "plain" + } + } + }, + "data_source_schemas": { + "null_data_source": { + "version": 0, + "block": { + "attributes": { + "has_computed_default": { + "type": "string", + "description": "If set, its literal value will be stored and returned. If not, its value defaults to `\"default\"`. This argument exists primarily for testing and has little practical use.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "id": { + "type": "string", + "description": "This attribute is only present for some legacy compatibility issues and should not be used. It will be removed in a future version.", + "description_kind": "plain", + "deprecated": true, + "computed": true + }, + "inputs": { + "type": [ + "map", + "string" + ], + "description": "A map of arbitrary strings that is copied into the `outputs` attribute, and accessible directly for interpolation.", + "description_kind": "plain", + "optional": true + }, + "outputs": { + "type": [ + "map", + "string" + ], + "description": "After the data source is \"read\", a copy of the `inputs` map.", + "description_kind": "plain", + "computed": true + }, + "random": { + "type": "string", + "description": "A random value. This is primarily for testing and has little practical use; prefer the [hashicorp/random provider](https://registry.terraform.io/providers/hashicorp/random) for more practical random number use-cases.", + "description_kind": "plain", + "computed": true + } + }, + "description": "The `null_data_source` data source implements the standard data source lifecycle but does not\ninteract with any external APIs.\n\nHistorically, the `null_data_source` was typically used to construct intermediate values to re-use elsewhere in configuration. The\nsame can now be achieved using [locals](https://www.terraform.io/docs/language/values/locals.html).\n", + "description_kind": "plain", + "deprecated": true + } + } + } + } + }, + "provider_format_version": "1.0" +} diff --git a/pkg/cloud/testdata/plan-json-basic/plan-unredacted.json b/pkg/cloud/testdata/plan-json-basic/plan-unredacted.json new file mode 100644 index 00000000000..9ae22186ae3 --- /dev/null +++ b/pkg/cloud/testdata/plan-json-basic/plan-unredacted.json @@ -0,0 +1 @@ +{"format_version":"1.1","terraform_version":"1.4.4","planned_values":{"root_module":{"resources":[{"address":"null_resource.foo","mode":"managed","type":"null_resource","name":"foo","provider_name":"registry.opentofu.org/hashicorp/null","schema_version":0,"values":{"triggers":null},"sensitive_values":{}}]}},"resource_changes":[{"address":"null_resource.foo","mode":"managed","type":"null_resource","name":"foo","provider_name":"registry.opentofu.org/hashicorp/null","change":{"actions":["create"],"before":null,"after":{"triggers":null},"after_unknown":{"id":true},"before_sensitive":false,"after_sensitive":{}}}],"configuration":{"provider_config":{"null":{"name":"null","full_name":"registry.opentofu.org/hashicorp/null"}},"root_module":{"resources":[{"address":"null_resource.foo","mode":"managed","type":"null_resource","name":"foo","provider_config_key":"null","schema_version":0}]}}} diff --git a/pkg/cloud/testdata/plan-json-basic/plan.log b/pkg/cloud/testdata/plan-json-basic/plan.log new file mode 100644 index 00000000000..6e7352ed449 --- /dev/null +++ b/pkg/cloud/testdata/plan-json-basic/plan.log @@ -0,0 +1,3 @@ +{"@level":"info","@message":"Terraform 1.3.7","@module":"terraform.ui","@timestamp":"2023-01-19T10:47:27.409143-05:00","terraform":"1.3.7","type":"version","ui":"1.0"} +{"@level":"info","@message":"null_resource.foo: Plan to create","@module":"terraform.ui","@timestamp":"2023-01-19T10:47:27.605841-05:00","change":{"resource":{"addr":"null_resource.foo","module":"","resource":"null_resource.foo","implied_provider":"null","resource_type":"null_resource","resource_name":"foo","resource_key":null},"action":"create"},"type":"planned_change"} +{"@level":"info","@message":"Plan: 1 to add, 0 to change, 0 to destroy.","@module":"terraform.ui","@timestamp":"2023-01-19T10:47:27.605906-05:00","changes":{"add":1,"change":0,"remove":0,"operation":"plan"},"type":"change_summary"} diff --git a/pkg/cloud/testdata/plan-json-error/main.tf b/pkg/cloud/testdata/plan-json-error/main.tf new file mode 100644 index 00000000000..bc45f28f563 --- /dev/null +++ b/pkg/cloud/testdata/plan-json-error/main.tf @@ -0,0 +1,5 @@ +resource "null_resource" "foo" { + triggers { + random = "${guid()}" + } +} diff --git a/pkg/cloud/testdata/plan-json-error/plan-redacted.json b/pkg/cloud/testdata/plan-json-error/plan-redacted.json new file mode 100644 index 00000000000..0967ef424bc --- /dev/null +++ b/pkg/cloud/testdata/plan-json-error/plan-redacted.json @@ -0,0 +1 @@ +{} diff --git a/pkg/cloud/testdata/plan-json-error/plan.log b/pkg/cloud/testdata/plan-json-error/plan.log new file mode 100644 index 00000000000..b877f1e5834 --- /dev/null +++ b/pkg/cloud/testdata/plan-json-error/plan.log @@ -0,0 +1,2 @@ +{"@level":"info","@message":"Terraform 1.3.7","@module":"terraform.ui","@timestamp":"2023-01-20T12:12:25.477403-05:00","terraform":"1.3.7","type":"version","ui":"1.0"} +{"@level":"error","@message":"Error: Unsupported block type","@module":"terraform.ui","@timestamp":"2023-01-20T12:12:25.615995-05:00","diagnostic":{"severity":"error","summary":"Unsupported block type","detail":"Blocks of type \"triggers\" are not expected here. Did you mean to define argument \"triggers\"? If so, use the equals sign to assign it a value.","range":{"filename":"main.tf","start":{"line":2,"column":3,"byte":35},"end":{"line":2,"column":11,"byte":43}},"snippet":{"context":"resource \"null_resource\" \"foo\"","code":" triggers {","start_line":2,"highlight_start_offset":2,"highlight_end_offset":10,"values":[]}},"type":"diagnostic"} diff --git a/pkg/cloud/testdata/plan-json-full/main.tf b/pkg/cloud/testdata/plan-json-full/main.tf new file mode 100644 index 00000000000..d6e43c5521e --- /dev/null +++ b/pkg/cloud/testdata/plan-json-full/main.tf @@ -0,0 +1,82 @@ +provider "tfcoremock" {} + +# In order to generate the JSON logs contained in plan.log +# First ONLY apply tfcoremock_simple_resource.example (set the bool attribute +# to true). Make sure the complex_resource is commented out. +# Once applied, change the bool attribute to false and uncomment the complex +# resource. + +resource "tfcoremock_simple_resource" "example" { + id = "my-simple-resource" + bool = false + number = 0 + string = "Hello, world!" + float = 0 + integer = 0 +} + +resource "tfcoremock_complex_resource" "example" { + id = "my-complex-resource" + + bool = true + number = 0 + string = "Hello, world!" + float = 0 + integer = 0 + + list = [ + { + string = "list.one" + }, + { + string = "list.two" + } + ] + + set = [ + { + string = "set.one" + }, + { + string = "set.two" + } + ] + + map = { + "one" : { + string = "map.one" + }, + "two" : { + string = "map.two" + } + } + + object = { + + string = "nested object" + + object = { + string = "nested nested object" + } + } + + list_block { + string = "list_block.one" + } + + list_block { + string = "list_block.two" + } + + list_block { + string = "list_block.three" + } + + set_block { + string = "set_block.one" + } + + set_block { + string = "set_block.two" + } +} diff --git a/pkg/cloud/testdata/plan-json-full/plan-redacted.json b/pkg/cloud/testdata/plan-json-full/plan-redacted.json new file mode 100644 index 00000000000..a08e1181ae8 --- /dev/null +++ b/pkg/cloud/testdata/plan-json-full/plan-redacted.json @@ -0,0 +1 @@ +{"plan_format_version":"1.1","resource_drift":[{"address":"tfcoremock_simple_resource.example","mode":"managed","type":"tfcoremock_simple_resource","name":"example","provider_name":"registry.opentofu.org/hashicorp/tfcoremock","change":{"actions":["delete"],"before":{"bool":true,"float":0,"id":"my-simple-resource","integer":0,"number":0,"string":"Hello, world!"},"after":null,"after_unknown":{},"before_sensitive":{},"after_sensitive":false}}],"resource_changes":[{"address":"tfcoremock_complex_resource.example","mode":"managed","type":"tfcoremock_complex_resource","name":"example","provider_name":"registry.opentofu.org/hashicorp/tfcoremock","change":{"actions":["create"],"before":null,"after":{"bool":true,"float":0,"id":"my-complex-resource","integer":0,"list":[{"bool":null,"float":null,"integer":null,"list":null,"map":null,"number":null,"object":null,"set":null,"string":"list.one"},{"bool":null,"float":null,"integer":null,"list":null,"map":null,"number":null,"object":null,"set":null,"string":"list.two"}],"list_block":[{"bool":null,"float":null,"integer":null,"list":null,"list_block":[],"map":null,"number":null,"object":null,"set":null,"set_block":[],"string":"list_block.one"},{"bool":null,"float":null,"integer":null,"list":null,"list_block":[],"map":null,"number":null,"object":null,"set":null,"set_block":[],"string":"list_block.two"},{"bool":null,"float":null,"integer":null,"list":null,"list_block":[],"map":null,"number":null,"object":null,"set":null,"set_block":[],"string":"list_block.three"}],"map":{"one":{"bool":null,"float":null,"integer":null,"list":null,"map":null,"number":null,"object":null,"set":null,"string":"map.one"},"two":{"bool":null,"float":null,"integer":null,"list":null,"map":null,"number":null,"object":null,"set":null,"string":"map.two"}},"number":0,"object":{"bool":null,"float":null,"integer":null,"list":null,"map":null,"number":null,"object":{"bool":null,"float":null,"integer":null,"list":null,"map":null,"number":null,"object":null,"set":null,"string":"nested nested object"},"set":null,"string":"nested object"},"set":[{"bool":null,"float":null,"integer":null,"list":null,"map":null,"number":null,"object":null,"set":null,"string":"set.one"},{"bool":null,"float":null,"integer":null,"list":null,"map":null,"number":null,"object":null,"set":null,"string":"set.two"}],"set_block":[{"bool":null,"float":null,"integer":null,"list":null,"list_block":[],"map":null,"number":null,"object":null,"set":null,"set_block":[],"string":"set_block.one"},{"bool":null,"float":null,"integer":null,"list":null,"list_block":[],"map":null,"number":null,"object":null,"set":null,"set_block":[],"string":"set_block.two"}],"string":"Hello, world!"},"after_unknown":{},"before_sensitive":false,"after_sensitive":{"list":[{},{}],"list_block":[{"list_block":[],"set_block":[]},{"list_block":[],"set_block":[]},{"list_block":[],"set_block":[]}],"map":{"one":{},"two":{}},"object":{"object":{}},"set":[{},{}],"set_block":[{"list_block":[],"set_block":[]},{"list_block":[],"set_block":[]}]}}},{"address":"tfcoremock_simple_resource.example","mode":"managed","type":"tfcoremock_simple_resource","name":"example","provider_name":"registry.opentofu.org/hashicorp/tfcoremock","change":{"actions":["create"],"before":null,"after":{"bool":false,"float":0,"id":"my-simple-resource","integer":0,"number":0,"string":"Hello, world!"},"after_unknown":{},"before_sensitive":false,"after_sensitive":{}}}],"relevant_attributes":[],"output_changes":{},"provider_schemas":{"registry.opentofu.org/hashicorp/tfcoremock":{"provider":{"version":0,"block":{"attributes":{"data_directory":{"type":"string","description":"The directory that the provider should use to read the human-readable JSON files for each requested data source. Defaults to `data.resource`.","description_kind":"markdown","optional":true},"resource_directory":{"type":"string","description":"The directory that the provider should use to write the human-readable JSON files for each managed resource. If `use_only_state` is set to `true` then this value does not matter. Defaults to `terraform.resource`.","description_kind":"markdown","optional":true},"use_only_state":{"type":"bool","description":"If set to true the provider will rely only on the Terraform state file to load managed resources and will not write anything to disk. Defaults to `false`.","description_kind":"markdown","optional":true}},"description":"The `tfcoremock` provider is intended to aid with testing the Terraform core libraries and the Terraform CLI. This provider should allow users to define all possible Terraform configurations and run them through the Terraform core platform.\n\nThe provider supplies two static resources:\n\n- `tfcoremock_simple_resource`\n- `tfcoremock_complex_resource`\n \nUsers can then define additional dynamic resources by supplying a `dynamic_resources.json` file alongside their root Terraform configuration. These dynamic resources can be used to model any Terraform configuration not covered by the provided static resources.\n\nBy default, all resources created by the provider are then converted into a human-readable JSON format and written out to the resource directory. This behaviour can be disabled by turning on the `use_only_state` flag in the provider schema (this is useful when running the provider in a Terraform Cloud environment). The resource directory defaults to `terraform.resource`.\n\nAll resources supplied by the provider (including the simple and complex resource as well as any dynamic resources) are duplicated into data sources. The data sources should be supplied in the JSON format that resources are written into. The provider looks into the data directory, which defaults to `terraform.data`.\n\nFinally, all resources (and data sources) supplied by the provider have an `id` attribute that is generated if not set by the configuration. Dynamic resources cannot define an `id` attribute as the provider will create one for them. The `id` attribute is used as name of the human-readable JSON files held in the resource and data directories.","description_kind":"markdown"}},"resource_schemas":{"tfcoremock_complex_resource":{"version":0,"block":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"id":{"type":"string","description_kind":"plain","optional":true,"computed":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"block_types":{"list_block":{"nesting_mode":"list","block":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"block_types":{"list_block":{"nesting_mode":"list","block":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"block_types":{"list_block":{"nesting_mode":"list","block":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"description":"A list block that contains the same attributes and blocks as the root schema, allowing nested blocks and objects to be modelled.","description_kind":"markdown"}},"set_block":{"nesting_mode":"set","block":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"description":"A set block that contains the same attributes and blocks as the root schema, allowing nested blocks and objects to be modelled.","description_kind":"markdown"}}},"description":"A list block that contains the same attributes and blocks as the root schema, allowing nested blocks and objects to be modelled.","description_kind":"markdown"}},"set_block":{"nesting_mode":"set","block":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"block_types":{"list_block":{"nesting_mode":"list","block":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"description":"A list block that contains the same attributes and blocks as the root schema, allowing nested blocks and objects to be modelled.","description_kind":"markdown"}},"set_block":{"nesting_mode":"set","block":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"description":"A set block that contains the same attributes and blocks as the root schema, allowing nested blocks and objects to be modelled.","description_kind":"markdown"}}},"description":"A set block that contains the same attributes and blocks as the root schema, allowing nested blocks and objects to be modelled.","description_kind":"markdown"}}},"description":"A list block that contains the same attributes and blocks as the root schema, allowing nested blocks and objects to be modelled.","description_kind":"markdown"}},"set_block":{"nesting_mode":"set","block":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"block_types":{"list_block":{"nesting_mode":"list","block":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"block_types":{"list_block":{"nesting_mode":"list","block":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"description":"A list block that contains the same attributes and blocks as the root schema, allowing nested blocks and objects to be modelled.","description_kind":"markdown"}},"set_block":{"nesting_mode":"set","block":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"description":"A set block that contains the same attributes and blocks as the root schema, allowing nested blocks and objects to be modelled.","description_kind":"markdown"}}},"description":"A list block that contains the same attributes and blocks as the root schema, allowing nested blocks and objects to be modelled.","description_kind":"markdown"}},"set_block":{"nesting_mode":"set","block":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"block_types":{"list_block":{"nesting_mode":"list","block":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"description":"A list block that contains the same attributes and blocks as the root schema, allowing nested blocks and objects to be modelled.","description_kind":"markdown"}},"set_block":{"nesting_mode":"set","block":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"description":"A set block that contains the same attributes and blocks as the root schema, allowing nested blocks and objects to be modelled.","description_kind":"markdown"}}},"description":"A set block that contains the same attributes and blocks as the root schema, allowing nested blocks and objects to be modelled.","description_kind":"markdown"}}},"description":"A set block that contains the same attributes and blocks as the root schema, allowing nested blocks and objects to be modelled.","description_kind":"markdown"}}},"description":"A complex resource that contains five basic attributes, four complex attributes, and two nested blocks.\n\nThe five basic attributes are `boolean`, `number`, `string`, `float`, and `integer` (as with the `tfcoremock_simple_resource`).\n\nThe complex attributes are a `map`, a `list`, a `set`, and an `object`. The `object` type contains the same set of attributes as the schema itself, making a recursive structure. The `list`, `set` and `map` all contain objects which are also recursive. Blocks cannot go into attributes, so the complex attributes do not recurse on the block types.\n\nThe blocks are a nested `list_block` and a nested `set_block`. The blocks contain the same set of attributes and blocks as the schema itself, also making a recursive structure. Note, blocks contain both attributes and more blocks so the block types are fully recursive.\n\nThe complex and block types are nested 3 times, at the leaf level of recursion the complex attributes and blocks only contain the simple (ie. non-recursive) attributes. This prevents a potentially infinite level of recursion.","description_kind":"markdown"}},"tfcoremock_simple_resource":{"version":0,"block":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"id":{"type":"string","description_kind":"plain","optional":true,"computed":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"description":"A simple resource that holds optional attributes for the five basic types: `bool`, `number`, `string`, `float`, and `integer`.","description_kind":"markdown"}}},"data_source_schemas":{"tfcoremock_complex_resource":{"version":0,"block":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"id":{"type":"string","description_kind":"plain","required":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"block_types":{"list_block":{"nesting_mode":"list","block":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"block_types":{"list_block":{"nesting_mode":"list","block":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"block_types":{"list_block":{"nesting_mode":"list","block":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"description":"A list block that contains the same attributes and blocks as the root schema, allowing nested blocks and objects to be modelled.","description_kind":"markdown"}},"set_block":{"nesting_mode":"set","block":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"description":"A set block that contains the same attributes and blocks as the root schema, allowing nested blocks and objects to be modelled.","description_kind":"markdown"}}},"description":"A list block that contains the same attributes and blocks as the root schema, allowing nested blocks and objects to be modelled.","description_kind":"markdown"}},"set_block":{"nesting_mode":"set","block":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"block_types":{"list_block":{"nesting_mode":"list","block":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"description":"A list block that contains the same attributes and blocks as the root schema, allowing nested blocks and objects to be modelled.","description_kind":"markdown"}},"set_block":{"nesting_mode":"set","block":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"description":"A set block that contains the same attributes and blocks as the root schema, allowing nested blocks and objects to be modelled.","description_kind":"markdown"}}},"description":"A set block that contains the same attributes and blocks as the root schema, allowing nested blocks and objects to be modelled.","description_kind":"markdown"}}},"description":"A list block that contains the same attributes and blocks as the root schema, allowing nested blocks and objects to be modelled.","description_kind":"markdown"}},"set_block":{"nesting_mode":"set","block":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"block_types":{"list_block":{"nesting_mode":"list","block":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"block_types":{"list_block":{"nesting_mode":"list","block":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"description":"A list block that contains the same attributes and blocks as the root schema, allowing nested blocks and objects to be modelled.","description_kind":"markdown"}},"set_block":{"nesting_mode":"set","block":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"description":"A set block that contains the same attributes and blocks as the root schema, allowing nested blocks and objects to be modelled.","description_kind":"markdown"}}},"description":"A list block that contains the same attributes and blocks as the root schema, allowing nested blocks and objects to be modelled.","description_kind":"markdown"}},"set_block":{"nesting_mode":"set","block":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"list":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"list"},"description":"A list attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"map":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"map"},"description":"A map attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"object":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"single"},"description":"An object attribute that matches the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"set":{"nested_type":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"nesting_mode":"set"},"description":"A set attribute that contains objects that match the root schema, allowing for nested collections and objects to be modelled.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"block_types":{"list_block":{"nesting_mode":"list","block":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"description":"A list block that contains the same attributes and blocks as the root schema, allowing nested blocks and objects to be modelled.","description_kind":"markdown"}},"set_block":{"nesting_mode":"set","block":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"description":"A set block that contains the same attributes and blocks as the root schema, allowing nested blocks and objects to be modelled.","description_kind":"markdown"}}},"description":"A set block that contains the same attributes and blocks as the root schema, allowing nested blocks and objects to be modelled.","description_kind":"markdown"}}},"description":"A set block that contains the same attributes and blocks as the root schema, allowing nested blocks and objects to be modelled.","description_kind":"markdown"}}},"description":"A complex resource that contains five basic attributes, four complex attributes, and two nested blocks.\n\nThe five basic attributes are `boolean`, `number`, `string`, `float`, and `integer` (as with the `tfcoremock_simple_resource`).\n\nThe complex attributes are a `map`, a `list`, a `set`, and an `object`. The `object` type contains the same set of attributes as the schema itself, making a recursive structure. The `list`, `set` and `map` all contain objects which are also recursive. Blocks cannot go into attributes, so the complex attributes do not recurse on the block types.\n\nThe blocks are a nested `list_block` and a nested `set_block`. The blocks contain the same set of attributes and blocks as the schema itself, also making a recursive structure. Note, blocks contain both attributes and more blocks so the block types are fully recursive.\n\nThe complex and block types are nested 3 times, at the leaf level of recursion the complex attributes and blocks only contain the simple (ie. non-recursive) attributes. This prevents a potentially infinite level of recursion.","description_kind":"markdown"}},"tfcoremock_simple_resource":{"version":0,"block":{"attributes":{"bool":{"type":"bool","description":"An optional boolean attribute, can be true or false.","description_kind":"markdown","optional":true},"float":{"type":"number","description":"An optional float attribute.","description_kind":"markdown","optional":true},"id":{"type":"string","description_kind":"plain","required":true},"integer":{"type":"number","description":"An optional integer attribute.","description_kind":"markdown","optional":true},"number":{"type":"number","description":"An optional number attribute, can be an integer or a float.","description_kind":"markdown","optional":true},"string":{"type":"string","description":"An optional string attribute.","description_kind":"markdown","optional":true}},"description":"A simple resource that holds optional attributes for the five basic types: `bool`, `number`, `string`, `float`, and `integer`.","description_kind":"markdown"}}}}},"provider_format_version":"1.0"} \ No newline at end of file diff --git a/pkg/cloud/testdata/plan-json-full/plan-unredacted.json b/pkg/cloud/testdata/plan-json-full/plan-unredacted.json new file mode 100644 index 00000000000..47da5a7084a --- /dev/null +++ b/pkg/cloud/testdata/plan-json-full/plan-unredacted.json @@ -0,0 +1 @@ +{"format_version":"1.1","terraform_version":"1.4.4","planned_values":{"root_module":{"resources":[{"address":"tfcoremock_complex_resource.example","mode":"managed","type":"tfcoremock_complex_resource","name":"example","provider_name":"registry.opentofu.org/hashicorp/tfcoremock","schema_version":0,"values":{"bool":true,"float":0,"id":"my-complex-resource","integer":0,"list":[{"bool":null,"float":null,"integer":null,"list":null,"map":null,"number":null,"object":null,"set":null,"string":"list.one"},{"bool":null,"float":null,"integer":null,"list":null,"map":null,"number":null,"object":null,"set":null,"string":"list.two"}],"list_block":[{"bool":null,"float":null,"integer":null,"list":null,"list_block":[],"map":null,"number":null,"object":null,"set":null,"set_block":[],"string":"list_block.one"},{"bool":null,"float":null,"integer":null,"list":null,"list_block":[],"map":null,"number":null,"object":null,"set":null,"set_block":[],"string":"list_block.two"},{"bool":null,"float":null,"integer":null,"list":null,"list_block":[],"map":null,"number":null,"object":null,"set":null,"set_block":[],"string":"list_block.three"}],"map":{"one":{"bool":null,"float":null,"integer":null,"list":null,"map":null,"number":null,"object":null,"set":null,"string":"map.one"},"two":{"bool":null,"float":null,"integer":null,"list":null,"map":null,"number":null,"object":null,"set":null,"string":"map.two"}},"number":0,"object":{"bool":null,"float":null,"integer":null,"list":null,"map":null,"number":null,"object":{"bool":null,"float":null,"integer":null,"list":null,"map":null,"number":null,"object":null,"set":null,"string":"nested nested object"},"set":null,"string":"nested object"},"set":[{"bool":null,"float":null,"integer":null,"list":null,"map":null,"number":null,"object":null,"set":null,"string":"set.one"},{"bool":null,"float":null,"integer":null,"list":null,"map":null,"number":null,"object":null,"set":null,"string":"set.two"}],"set_block":[{"bool":null,"float":null,"integer":null,"list":null,"list_block":[],"map":null,"number":null,"object":null,"set":null,"set_block":[],"string":"set_block.one"},{"bool":null,"float":null,"integer":null,"list":null,"list_block":[],"map":null,"number":null,"object":null,"set":null,"set_block":[],"string":"set_block.two"}],"string":"Hello, world!"},"sensitive_values":{"list":[{},{}],"list_block":[{"list_block":[],"set_block":[]},{"list_block":[],"set_block":[]},{"list_block":[],"set_block":[]}],"map":{"one":{},"two":{}},"object":{"object":{}},"set":[{},{}],"set_block":[{"list_block":[],"set_block":[]},{"list_block":[],"set_block":[]}]}},{"address":"tfcoremock_simple_resource.example","mode":"managed","type":"tfcoremock_simple_resource","name":"example","provider_name":"registry.opentofu.org/hashicorp/tfcoremock","schema_version":0,"values":{"bool":false,"float":0,"id":"my-simple-resource","integer":0,"number":0,"string":"Hello, world!"},"sensitive_values":{}}]}},"resource_changes":[{"address":"tfcoremock_complex_resource.example","mode":"managed","type":"tfcoremock_complex_resource","name":"example","provider_name":"registry.opentofu.org/hashicorp/tfcoremock","change":{"actions":["create"],"before":null,"after":{"bool":true,"float":0,"id":"my-complex-resource","integer":0,"list":[{"bool":null,"float":null,"integer":null,"list":null,"map":null,"number":null,"object":null,"set":null,"string":"list.one"},{"bool":null,"float":null,"integer":null,"list":null,"map":null,"number":null,"object":null,"set":null,"string":"list.two"}],"list_block":[{"bool":null,"float":null,"integer":null,"list":null,"list_block":[],"map":null,"number":null,"object":null,"set":null,"set_block":[],"string":"list_block.one"},{"bool":null,"float":null,"integer":null,"list":null,"list_block":[],"map":null,"number":null,"object":null,"set":null,"set_block":[],"string":"list_block.two"},{"bool":null,"float":null,"integer":null,"list":null,"list_block":[],"map":null,"number":null,"object":null,"set":null,"set_block":[],"string":"list_block.three"}],"map":{"one":{"bool":null,"float":null,"integer":null,"list":null,"map":null,"number":null,"object":null,"set":null,"string":"map.one"},"two":{"bool":null,"float":null,"integer":null,"list":null,"map":null,"number":null,"object":null,"set":null,"string":"map.two"}},"number":0,"object":{"bool":null,"float":null,"integer":null,"list":null,"map":null,"number":null,"object":{"bool":null,"float":null,"integer":null,"list":null,"map":null,"number":null,"object":null,"set":null,"string":"nested nested object"},"set":null,"string":"nested object"},"set":[{"bool":null,"float":null,"integer":null,"list":null,"map":null,"number":null,"object":null,"set":null,"string":"set.one"},{"bool":null,"float":null,"integer":null,"list":null,"map":null,"number":null,"object":null,"set":null,"string":"set.two"}],"set_block":[{"bool":null,"float":null,"integer":null,"list":null,"list_block":[],"map":null,"number":null,"object":null,"set":null,"set_block":[],"string":"set_block.one"},{"bool":null,"float":null,"integer":null,"list":null,"list_block":[],"map":null,"number":null,"object":null,"set":null,"set_block":[],"string":"set_block.two"}],"string":"Hello, world!"},"after_unknown":{},"before_sensitive":false,"after_sensitive":{"list":[{},{}],"list_block":[{"list_block":[],"set_block":[]},{"list_block":[],"set_block":[]},{"list_block":[],"set_block":[]}],"map":{"one":{},"two":{}},"object":{"object":{}},"set":[{},{}],"set_block":[{"list_block":[],"set_block":[]},{"list_block":[],"set_block":[]}]}}},{"address":"tfcoremock_simple_resource.example","mode":"managed","type":"tfcoremock_simple_resource","name":"example","provider_name":"registry.opentofu.org/hashicorp/tfcoremock","change":{"actions":["create"],"before":null,"after":{"bool":false,"float":0,"id":"my-simple-resource","integer":0,"number":0,"string":"Hello, world!"},"after_unknown":{},"before_sensitive":false,"after_sensitive":{}}}],"configuration":{"provider_config":{"tfcoremock":{"name":"tfcoremock","full_name":"registry.opentofu.org/hashicorp/tfcoremock"}},"root_module":{"resources":[{"address":"tfcoremock_complex_resource.example","mode":"managed","type":"tfcoremock_complex_resource","name":"example","provider_config_key":"tfcoremock","expressions":{"bool":{"constant_value":true},"float":{"constant_value":0},"id":{"constant_value":"my-complex-resource"},"integer":{"constant_value":0},"list":{"constant_value":[{"string":"list.one"},{"string":"list.two"}]},"list_block":[{"string":{"constant_value":"list_block.one"}},{"string":{"constant_value":"list_block.two"}},{"string":{"constant_value":"list_block.three"}}],"map":{"constant_value":{"one":{"string":"map.one"},"two":{"string":"map.two"}}},"number":{"constant_value":0},"object":{"constant_value":{"object":{"string":"nested nested object"},"string":"nested object"}},"set":{"constant_value":[{"string":"set.one"},{"string":"set.two"}]},"set_block":[{"string":{"constant_value":"set_block.one"}},{"string":{"constant_value":"set_block.two"}}],"string":{"constant_value":"Hello, world!"}},"schema_version":0},{"address":"tfcoremock_simple_resource.example","mode":"managed","type":"tfcoremock_simple_resource","name":"example","provider_config_key":"tfcoremock","expressions":{"bool":{"constant_value":false},"float":{"constant_value":0},"id":{"constant_value":"my-simple-resource"},"integer":{"constant_value":0},"number":{"constant_value":0},"string":{"constant_value":"Hello, world!"}},"schema_version":0}]}}} diff --git a/pkg/cloud/testdata/plan-json-full/plan.log b/pkg/cloud/testdata/plan-json-full/plan.log new file mode 100644 index 00000000000..59fa3cb32c6 --- /dev/null +++ b/pkg/cloud/testdata/plan-json-full/plan.log @@ -0,0 +1,6 @@ +{"@level":"info","@message":"Terraform 1.3.7","@module":"terraform.ui","@timestamp":"2023-01-19T13:28:29.004160-05:00","terraform":"1.3.7","type":"version","ui":"1.0"} +{"@level":"info","@message":"tfcoremock_simple_resource.example: Refreshing state... [id=my-simple-resource]","@module":"terraform.ui","@timestamp":"2023-01-19T13:28:29.232274-05:00","hook":{"resource":{"addr":"tfcoremock_simple_resource.example","module":"","resource":"tfcoremock_simple_resource.example","implied_provider":"tfcoremock","resource_type":"tfcoremock_simple_resource","resource_name":"example","resource_key":null},"id_key":"id","id_value":"my-simple-resource"},"type":"refresh_start"} +{"@level":"info","@message":"tfcoremock_simple_resource.example: Refresh complete [id=my-simple-resource]","@module":"terraform.ui","@timestamp":"2023-01-19T13:28:29.232882-05:00","hook":{"resource":{"addr":"tfcoremock_simple_resource.example","module":"","resource":"tfcoremock_simple_resource.example","implied_provider":"tfcoremock","resource_type":"tfcoremock_simple_resource","resource_name":"example","resource_key":null},"id_key":"id","id_value":"my-simple-resource"},"type":"refresh_complete"} +{"@level":"info","@message":"tfcoremock_simple_resource.example: Plan to update","@module":"terraform.ui","@timestamp":"2023-01-19T13:28:29.289259-05:00","change":{"resource":{"addr":"tfcoremock_simple_resource.example","module":"","resource":"tfcoremock_simple_resource.example","implied_provider":"tfcoremock","resource_type":"tfcoremock_simple_resource","resource_name":"example","resource_key":null},"action":"update"},"type":"planned_change"} +{"@level":"info","@message":"tfcoremock_complex_resource.example: Plan to create","@module":"terraform.ui","@timestamp":"2023-01-19T13:28:29.289320-05:00","change":{"resource":{"addr":"tfcoremock_complex_resource.example","module":"","resource":"tfcoremock_complex_resource.example","implied_provider":"tfcoremock","resource_type":"tfcoremock_complex_resource","resource_name":"example","resource_key":null},"action":"create"},"type":"planned_change"} +{"@level":"info","@message":"Plan: 1 to add, 1 to change, 0 to destroy.","@module":"terraform.ui","@timestamp":"2023-01-19T13:28:29.289330-05:00","changes":{"add":1,"change":1,"remove":0,"operation":"plan"},"type":"change_summary"} diff --git a/pkg/cloud/testdata/plan-json-no-changes/main.tf b/pkg/cloud/testdata/plan-json-no-changes/main.tf new file mode 100644 index 00000000000..3911a2a9b2d --- /dev/null +++ b/pkg/cloud/testdata/plan-json-no-changes/main.tf @@ -0,0 +1 @@ +resource "null_resource" "foo" {} diff --git a/pkg/cloud/testdata/plan-json-no-changes/plan-redacted.json b/pkg/cloud/testdata/plan-json-no-changes/plan-redacted.json new file mode 100644 index 00000000000..01a31a81762 --- /dev/null +++ b/pkg/cloud/testdata/plan-json-no-changes/plan-redacted.json @@ -0,0 +1,118 @@ +{ + "plan_format_version": "1.1", + "resource_drift": [], + "resource_changes": [ + { + "address": "null_resource.foo", + "mode": "managed", + "type": "null_resource", + "name": "foo", + "provider_name": "registry.opentofu.org/hashicorp/null", + "change": { + "actions": [ + "no-op" + ], + "before": { + "id": "3549869958859575216", + "triggers": null + }, + "after": { + "id": "3549869958859575216", + "triggers": null + }, + "after_unknown": {}, + "before_sensitive": {}, + "after_sensitive": {} + } + } + ], + "relevant_attributes": [], + "output_changes": {}, + "provider_schemas": { + "registry.opentofu.org/hashicorp/null": { + "provider": { + "version": 0, + "block": { + "description_kind": "plain" + } + }, + "resource_schemas": { + "null_resource": { + "version": 0, + "block": { + "attributes": { + "id": { + "type": "string", + "description": "This is set to a random value at create time.", + "description_kind": "plain", + "computed": true + }, + "triggers": { + "type": [ + "map", + "string" + ], + "description": "A map of arbitrary strings that, when changed, will force the null resource to be replaced, re-running any associated provisioners.", + "description_kind": "plain", + "optional": true + } + }, + "description": "The `null_resource` resource implements the standard resource lifecycle but takes no further action.\n\nThe `triggers` argument allows specifying an arbitrary set of values that, when changed, will cause the resource to be replaced.", + "description_kind": "plain" + } + } + }, + "data_source_schemas": { + "null_data_source": { + "version": 0, + "block": { + "attributes": { + "has_computed_default": { + "type": "string", + "description": "If set, its literal value will be stored and returned. If not, its value defaults to `\"default\"`. This argument exists primarily for testing and has little practical use.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "id": { + "type": "string", + "description": "This attribute is only present for some legacy compatibility issues and should not be used. It will be removed in a future version.", + "description_kind": "plain", + "deprecated": true, + "computed": true + }, + "inputs": { + "type": [ + "map", + "string" + ], + "description": "A map of arbitrary strings that is copied into the `outputs` attribute, and accessible directly for interpolation.", + "description_kind": "plain", + "optional": true + }, + "outputs": { + "type": [ + "map", + "string" + ], + "description": "After the data source is \"read\", a copy of the `inputs` map.", + "description_kind": "plain", + "computed": true + }, + "random": { + "type": "string", + "description": "A random value. This is primarily for testing and has little practical use; prefer the [hashicorp/random provider](https://registry.terraform.io/providers/hashicorp/random) for more practical random number use-cases.", + "description_kind": "plain", + "computed": true + } + }, + "description": "The `null_data_source` data source implements the standard data source lifecycle but does not\ninteract with any external APIs.\n\nHistorically, the `null_data_source` was typically used to construct intermediate values to re-use elsewhere in configuration. The\nsame can now be achieved using [locals](https://www.terraform.io/docs/language/values/locals.html).\n", + "description_kind": "plain", + "deprecated": true + } + } + } + } + }, + "provider_format_version": "1.0" +} diff --git a/pkg/cloud/testdata/plan-json-no-changes/plan-unredacted.json b/pkg/cloud/testdata/plan-json-no-changes/plan-unredacted.json new file mode 100644 index 00000000000..96566db11cb --- /dev/null +++ b/pkg/cloud/testdata/plan-json-no-changes/plan-unredacted.json @@ -0,0 +1 @@ +{"format_version":"1.1","terraform_version":"1.4.4","planned_values":{"root_module":{"resources":[{"address":"null_resource.foo","mode":"managed","type":"null_resource","name":"foo","provider_name":"registry.opentofu.org/hashicorp/null","schema_version":0,"values":{"id":"3549869958859575216","triggers":null},"sensitive_values":{}}]}},"resource_changes":[{"address":"null_resource.foo","mode":"managed","type":"null_resource","name":"foo","provider_name":"registry.opentofu.org/hashicorp/null","change":{"actions":["no-op"],"before":{"id":"3549869958859575216","triggers":null},"after":{"id":"3549869958859575216","triggers":null},"after_unknown":{},"before_sensitive":{},"after_sensitive":{}}}],"prior_state":{"format_version":"1.0","terraform_version":"1.4.4","values":{"root_module":{"resources":[{"address":"null_resource.foo","mode":"managed","type":"null_resource","name":"foo","provider_name":"registry.opentofu.org/hashicorp/null","schema_version":0,"values":{"id":"3549869958859575216","triggers":null},"sensitive_values":{}}]}}},"configuration":{"provider_config":{"null":{"name":"null","full_name":"registry.opentofu.org/hashicorp/null"}},"root_module":{"resources":[{"address":"null_resource.foo","mode":"managed","type":"null_resource","name":"foo","provider_config_key":"null","schema_version":0}]}}} diff --git a/pkg/cloud/testdata/plan-json-no-changes/plan.log b/pkg/cloud/testdata/plan-json-no-changes/plan.log new file mode 100644 index 00000000000..7b10d42020d --- /dev/null +++ b/pkg/cloud/testdata/plan-json-no-changes/plan.log @@ -0,0 +1,2 @@ +{"@level":"info","@message":"Terraform 1.3.7","@module":"terraform.ui","@timestamp":"2023-01-19T10:47:27.409143-05:00","terraform":"1.3.7","type":"version","ui":"1.0"} +{"@level":"info","@message":"Plan: 0 to add, 0 to change, 0 to destroy.","@module":"terraform.ui","@timestamp":"2023-01-19T10:47:27.605906-05:00","changes":{"add":0,"change":0,"remove":0,"operation":"plan"},"type":"change_summary"} diff --git a/pkg/cloud/testdata/plan-long-line/main.tf b/pkg/cloud/testdata/plan-long-line/main.tf new file mode 100644 index 00000000000..0a8d623a9d6 --- /dev/null +++ b/pkg/cloud/testdata/plan-long-line/main.tf @@ -0,0 +1,5 @@ +resource "null_resource" "foo" { + triggers { + long_line = "[{'_id':'5c5ab0ed7de45e993ffb9eeb','index':0,'guid':'e734d772-6b5a-4cb0-805c-91cd5e560e20','isActive':false,'balance':'$1,472.03','picture':'http://placehold.it/32x32','age':30,'eyeColor':'blue','name':{'first':'Darlene','last':'Garza'},'company':'GEEKOSIS','email':'darlene.garza@geekosis.io','phone':'+1 (850) 506-3347','address':'165 Kiely Place, Como, New Mexico, 4335','about':'Officia ullamco et sunt magna voluptate culpa cupidatat ea tempor laboris cupidatat ea anim laboris. Minim enim quis enim esse laborum est veniam. Lorem excepteur elit Lorem cupidatat elit ea anim irure fugiat fugiat sunt mollit. Consectetur ad nulla dolor amet esse occaecat aliquip sit. Magna sit elit adipisicing ut reprehenderit anim exercitation sit quis ea pariatur Lorem magna dolore.','registered':'Wednesday, March 11, 2015 12:58 PM','latitude':'20.729127','longitude':'-127.343593','tags':['minim','in','deserunt','occaecat','fugiat'],'greeting':'Hello, Darlene! You have 8 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0eda9117d15f1c1f112','index':1,'guid':'f0d1eed2-c6a9-4535-8800-d4bd53fe7eee','isActive':true,'balance':'$2,901.90','picture':'http://placehold.it/32x32','age':28,'eyeColor':'brown','name':{'first':'Flora','last':'Short'},'company':'SIGNITY','email':'flora.short@signity.me','phone':'+1 (840) 520-2666','address':'636 Johnson Avenue, Gerber, Wisconsin, 9139','about':'Veniam dolore deserunt Lorem aliqua qui eiusmod. Amet tempor fugiat duis incididunt amet adipisicing. Id ea nisi veniam eiusmod.','registered':'Wednesday, May 2, 2018 5:59 AM','latitude':'-63.267612','longitude':'4.224102','tags':['veniam','incididunt','id','aliqua','reprehenderit'],'greeting':'Hello, Flora! You have 10 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed83fd574d8041fa16','index':2,'guid':'29499a07-414a-436f-ba62-6634ca16bdcc','isActive':true,'balance':'$2,781.28','picture':'http://placehold.it/32x32','age':22,'eyeColor':'green','name':{'first':'Trevino','last':'Marks'},'company':'KEGULAR','email':'trevino.marks@kegular.com','phone':'+1 (843) 571-2269','address':'200 Alabama Avenue, Grenelefe, Florida, 7963','about':'Occaecat nisi exercitation Lorem mollit laborum magna adipisicing culpa dolor proident dolore. Non consequat ea amet et id mollit incididunt minim anim amet nostrud labore tempor. Proident eu sint commodo nisi consequat voluptate do fugiat proident. Laboris eiusmod veniam non et elit nulla nisi labore incididunt Lorem consequat consectetur voluptate.','registered':'Saturday, January 25, 2014 5:56 AM','latitude':'65.044005','longitude':'-127.454864','tags':['anim','duis','velit','pariatur','enim'],'greeting':'Hello, Trevino! You have 10 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed784eb6e350ff0a07','index':3,'guid':'40ed47e2-1747-4665-ab59-cdb3630a7642','isActive':true,'balance':'$2,000.78','picture':'http://placehold.it/32x32','age':25,'eyeColor':'brown','name':{'first':'Solis','last':'Mckinney'},'company':'QABOOS','email':'solis.mckinney@qaboos.org','phone':'+1 (924) 405-2560','address':'712 Herkimer Court, Klondike, Ohio, 8133','about':'Minim ad anim minim tempor mollit magna tempor et non commodo amet. Nisi cupidatat labore culpa consectetur exercitation laborum adipisicing fugiat officia adipisicing consequat non. Qui voluptate tempor laboris exercitation qui non adipisicing occaecat voluptate sunt do nostrud velit. Consequat tempor officia laboris tempor irure cupidatat aliquip voluptate nostrud velit ex nulla tempor laboris. Qui pariatur pariatur enim aliquip velit. Officia mollit ullamco laboris velit velit eiusmod enim amet incididunt consectetur sunt.','registered':'Wednesday, April 12, 2017 6:59 AM','latitude':'-25.055596','longitude':'-140.126525','tags':['ipsum','adipisicing','amet','nulla','dolore'],'greeting':'Hello, Solis! You have 5 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed02ce1ea9a2155d51','index':4,'guid':'1b5fb7d3-3b9a-4382-81b5-9ab01a27e74b','isActive':true,'balance':'$1,373.67','picture':'http://placehold.it/32x32','age':28,'eyeColor':'green','name':{'first':'Janell','last':'Battle'},'company':'GEEKMOSIS','email':'janell.battle@geekmosis.net','phone':'+1 (810) 591-3014','address':'517 Onderdonk Avenue, Shrewsbury, District Of Columbia, 2335','about':'Reprehenderit ad proident do anim qui officia magna magna duis cillum esse minim est. Excepteur ipsum anim ad laboris. In occaecat dolore nulla ea Lorem tempor et culpa in sint. Officia eu eu incididunt sit amet. Culpa duis id reprehenderit ut anim sit sunt. Duis dolore proident velit incididunt adipisicing pariatur fugiat incididunt eiusmod eu veniam irure.','registered':'Thursday, February 8, 2018 1:44 AM','latitude':'-33.254864','longitude':'-154.145885','tags':['aute','deserunt','ipsum','eiusmod','laborum'],'greeting':'Hello, Janell! You have 5 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edab58604bd7d3dd1c','index':5,'guid':'6354c035-af22-44c9-8be9-b2ea9decc24d','isActive':true,'balance':'$3,535.68','picture':'http://placehold.it/32x32','age':30,'eyeColor':'green','name':{'first':'Combs','last':'Kirby'},'company':'LUXURIA','email':'combs.kirby@luxuria.name','phone':'+1 (900) 498-3266','address':'377 Kingsland Avenue, Ruckersville, Maine, 9916','about':'Lorem duis ipsum pariatur aliquip sunt. Commodo esse laborum incididunt mollit quis est laboris ea ea quis fugiat. Enim elit ullamco velit et fugiat veniam irure deserunt aliqua ad irure veniam.','registered':'Tuesday, February 21, 2017 4:04 PM','latitude':'-70.20591','longitude':'162.546871','tags':['reprehenderit','est','enim','aute','ad'],'greeting':'Hello, Combs! You have 10 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edf7fafeffc6357c51','index':6,'guid':'02523e0b-cc90-4309-b6b2-f493dc6076f6','isActive':false,'balance':'$3,754.30','picture':'http://placehold.it/32x32','age':29,'eyeColor':'green','name':{'first':'Macias','last':'Calderon'},'company':'AMTAP','email':'macias.calderon@amtap.us','phone':'+1 (996) 569-3667','address':'305 Royce Street, Glidden, Iowa, 9248','about':'Exercitation nulla deserunt pariatur adipisicing. In commodo deserunt incididunt ut velit minim qui ut quis. Labore elit ullamco eiusmod voluptate in eu do est fugiat aute mollit deserunt. Eu duis proident velit fugiat velit ut. Ut non esse amet laborum nisi tempor in nulla.','registered':'Thursday, October 23, 2014 10:28 PM','latitude':'32.371629','longitude':'60.155135','tags':['commodo','elit','velit','excepteur','aliqua'],'greeting':'Hello, Macias! You have 9 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed0e8a6109e7fabf17','index':7,'guid':'675ff6b6-197b-4154-9775-813d661df822','isActive':false,'balance':'$2,850.62','picture':'http://placehold.it/32x32','age':37,'eyeColor':'green','name':{'first':'Stefanie','last':'Rivers'},'company':'RECRITUBE','email':'stefanie.rivers@recritube.biz','phone':'+1 (994) 591-3551','address':'995 Campus Road, Abrams, Virginia, 3251','about':'Esse aute non laborum Lorem nulla irure. Veniam elit aute ut et dolor non deserunt laboris tempor. Ipsum quis cupidatat laborum laboris voluptate esse duis eiusmod excepteur consectetur commodo ullamco qui occaecat. Culpa velit cillum occaecat minim nisi.','registered':'Thursday, June 9, 2016 3:40 PM','latitude':'-18.526825','longitude':'149.670782','tags':['occaecat','sunt','reprehenderit','ipsum','magna'],'greeting':'Hello, Stefanie! You have 9 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edf7d9bc2db4e476e3','index':8,'guid':'adaefc55-f6ea-4bd1-a147-0e31c3ce7a21','isActive':true,'balance':'$2,555.13','picture':'http://placehold.it/32x32','age':20,'eyeColor':'blue','name':{'first':'Hillary','last':'Lancaster'},'company':'OLUCORE','email':'hillary.lancaster@olucore.ca','phone':'+1 (964) 474-3018','address':'232 Berriman Street, Kaka, Massachusetts, 6792','about':'Veniam ad laboris quis reprehenderit aliquip nisi sunt excepteur ea aute laborum excepteur incididunt. Nisi exercitation aliquip do culpa commodo ex officia ut enim mollit in deserunt in amet. Anim eu deserunt dolore non cupidatat ut enim incididunt aute dolore voluptate. Do cillum mollit laborum non incididunt occaecat aute voluptate nisi irure.','registered':'Thursday, June 4, 2015 9:45 PM','latitude':'88.075919','longitude':'-148.951368','tags':['reprehenderit','veniam','ad','aute','anim'],'greeting':'Hello, Hillary! You have 6 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed7b7192ad6a0f267c','index':9,'guid':'0ca9b8ea-f671-474e-be26-4a49cae4838a','isActive':true,'balance':'$3,684.51','picture':'http://placehold.it/32x32','age':40,'eyeColor':'brown','name':{'first':'Jill','last':'Conner'},'company':'EXOZENT','email':'jill.conner@exozent.info','phone':'+1 (887) 467-2168','address':'751 Thames Street, Juarez, American Samoa, 8386','about':'Enim voluptate et non est in magna laborum aliqua enim aliqua est non nostrud. Tempor est nulla ipsum consectetur esse nostrud est id. Consequat do voluptate cupidatat eu fugiat et fugiat velit id. Sint dolore ad qui tempor anim eu amet consectetur do elit aute adipisicing consequat ex.','registered':'Sunday, October 22, 2017 7:35 AM','latitude':'84.384911','longitude':'40.305648','tags':['tempor','sint','irure','et','ex'],'greeting':'Hello, Jill! You have 9 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed713fe676575aa72b','index':10,'guid':'c28023cf-cc57-4c2e-8d91-dfbe6bafadcd','isActive':false,'balance':'$2,792.45','picture':'http://placehold.it/32x32','age':25,'eyeColor':'brown','name':{'first':'Hurley','last':'George'},'company':'ZAJ','email':'hurley.george@zaj.tv','phone':'+1 (984) 547-3284','address':'727 Minna Street, Lacomb, Colorado, 2557','about':'Ex velit cupidatat veniam culpa. Eiusmod ut fugiat adipisicing incididunt consectetur exercitation Lorem exercitation ex. Incididunt anim aute incididunt fugiat cupidatat qui eu non reprehenderit. Eiusmod dolor nisi culpa excepteur ut velit minim dolor voluptate amet commodo culpa in.','registered':'Thursday, February 16, 2017 6:41 AM','latitude':'25.989949','longitude':'10.200053','tags':['minim','ut','sunt','consequat','ullamco'],'greeting':'Hello, Hurley! You have 8 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed1e56732746c70d8b','index':11,'guid':'e9766f13-766c-4450-b4d2-8b04580f60b7','isActive':true,'balance':'$3,874.26','picture':'http://placehold.it/32x32','age':35,'eyeColor':'green','name':{'first':'Leticia','last':'Pace'},'company':'HONOTRON','email':'leticia.pace@honotron.co.uk','phone':'+1 (974) 536-3322','address':'365 Goodwin Place, Savage, Nevada, 9191','about':'Nisi Lorem aliqua esse eiusmod magna. Ad minim incididunt proident ut Lorem cupidatat qui velit aliqua ullamco et ipsum in. Aliquip elit consectetur pariatur esse exercitation et officia quis. Occaecat tempor proident cillum anim ad commodo velit ut voluptate. Tempor et occaecat sit sint aliquip tempor nulla velit magna nisi proident exercitation Lorem id.','registered':'Saturday, August 4, 2018 5:05 AM','latitude':'70.620386','longitude':'-86.335813','tags':['occaecat','velit','labore','laboris','esse'],'greeting':'Hello, Leticia! You have 8 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed941337fe42f47426','index':12,'guid':'6d390762-17ea-4b58-9a36-b0c9a8748a42','isActive':true,'balance':'$1,049.61','picture':'http://placehold.it/32x32','age':38,'eyeColor':'green','name':{'first':'Rose','last':'Humphrey'},'company':'MYOPIUM','email':'rose.humphrey@myopium.io','phone':'+1 (828) 426-3086','address':'389 Sapphire Street, Saticoy, Marshall Islands, 1423','about':'Aliquip enim excepteur adipisicing ex. Consequat aliqua consequat nostrud do occaecat deserunt excepteur sit et ipsum sunt dolor eu. Dolore laborum commodo excepteur tempor ad adipisicing proident excepteur magna non Lorem proident consequat aute. Fugiat minim consequat occaecat voluptate esse velit officia laboris nostrud nisi ut voluptate.','registered':'Monday, April 16, 2018 12:38 PM','latitude':'-47.083742','longitude':'109.022423','tags':['aute','non','sit','adipisicing','mollit'],'greeting':'Hello, Rose! You have 9 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edd0c02fc3fdc01a40','index':13,'guid':'07755618-6fdf-4b33-af50-364c18909227','isActive':true,'balance':'$1,823.61','picture':'http://placehold.it/32x32','age':36,'eyeColor':'green','name':{'first':'Judith','last':'Hale'},'company':'COLLAIRE','email':'judith.hale@collaire.me','phone':'+1 (922) 508-2843','address':'193 Coffey Street, Castleton, North Dakota, 3638','about':'Minim non ullamco ad anim nostrud dolore nostrud veniam consequat id eiusmod veniam laboris. Lorem irure esse mollit non velit aute id cupidatat est mollit occaecat magna excepteur. Adipisicing tempor nisi sit aliquip tempor pariatur tempor eu consectetur nulla amet nulla. Quis nisi nisi ea incididunt culpa et do. Esse officia eu pariatur velit sunt quis proident amet consectetur consequat. Nisi excepteur culpa nulla sit dolor deserunt excepteur dolor consequat elit cillum tempor Lorem.','registered':'Wednesday, August 24, 2016 12:29 AM','latitude':'-80.15514','longitude':'39.91007','tags':['consectetur','incididunt','aliquip','dolor','consequat'],'greeting':'Hello, Judith! You have 8 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edb3e1e29caa4f728b','index':14,'guid':'2c6617a2-e7a9-4ff7-a8b9-e99554fe70fe','isActive':true,'balance':'$1,971.00','picture':'http://placehold.it/32x32','age':39,'eyeColor':'blue','name':{'first':'Estes','last':'Sweet'},'company':'GEEKKO','email':'estes.sweet@geekko.com','phone':'+1 (866) 448-3032','address':'847 Cove Lane, Kula, Mississippi, 9178','about':'Veniam consectetur occaecat est excepteur consequat ipsum cillum sit consectetur. Ut cupidatat et reprehenderit dolore enim do cillum qui pariatur ad laborum incididunt esse. Fugiat sunt dolor veniam laboris ipsum deserunt proident reprehenderit laboris non nostrud. Magna excepteur sint magna laborum tempor sit exercitation ipsum labore est ullamco ullamco. Cillum voluptate cillum ea laborum Lorem. Excepteur sint ut nisi est esse non. Minim excepteur ullamco velit nisi ut in elit exercitation ut dolore.','registered':'Sunday, August 12, 2018 5:06 PM','latitude':'-9.57771','longitude':'-159.94577','tags':['culpa','dolor','velit','anim','pariatur'],'greeting':'Hello, Estes! You have 7 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0edbcf088c6fd593091','index':15,'guid':'2cc79958-1b40-4e2c-907a-433903fd3da9','isActive':false,'balance':'$3,751.53','picture':'http://placehold.it/32x32','age':34,'eyeColor':'brown','name':{'first':'Kemp','last':'Spence'},'company':'EXOBLUE','email':'kemp.spence@exoblue.org','phone':'+1 (864) 487-2992','address':'217 Clay Street, Monument, North Carolina, 1460','about':'Nostrud duis cillum sint non commodo dolor aute aliqua adipisicing ad nulla non excepteur proident. Fugiat labore elit tempor cillum veniam reprehenderit laboris consectetur dolore amet qui cupidatat. Amet aliqua elit anim et consequat commodo excepteur officia anim aliqua ea eu labore cillum. Et ex dolor duis dolore commodo veniam et nisi.','registered':'Monday, October 29, 2018 5:23 AM','latitude':'-70.304222','longitude':'83.582371','tags':['velit','duis','consequat','incididunt','duis'],'greeting':'Hello, Kemp! You have 7 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed6400479feb3de505','index':16,'guid':'91ccae6d-a3ea-43cf-bb00-3f2729256cc9','isActive':false,'balance':'$2,477.79','picture':'http://placehold.it/32x32','age':40,'eyeColor':'blue','name':{'first':'Ronda','last':'Burris'},'company':'EQUITOX','email':'ronda.burris@equitox.net','phone':'+1 (817) 553-3228','address':'708 Lawton Street, Deputy, Wyoming, 8598','about':'Excepteur voluptate aliquip consequat cillum est duis sit cillum eu eiusmod et laborum ullamco. Et minim reprehenderit aute voluptate amet ullamco. Amet sit enim ad irure deserunt nostrud anim veniam consequat dolor commodo. Consequat do occaecat do exercitation ullamco dolor ut. Id laboris consequat est dolor dolore tempor ullamco anim do ut nulla deserunt labore. Mollit ex Lorem ullamco mollit.','registered':'Monday, April 23, 2018 5:27 PM','latitude':'-31.227208','longitude':'0.63785','tags':['ipsum','magna','consectetur','sit','irure'],'greeting':'Hello, Ronda! You have 5 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0eddbeab2e53e04d563','index':17,'guid':'a86d4eb6-6bd8-48c2-a8fc-1c933c835852','isActive':false,'balance':'$3,709.03','picture':'http://placehold.it/32x32','age':37,'eyeColor':'blue','name':{'first':'Rosario','last':'Dillard'},'company':'BARKARAMA','email':'rosario.dillard@barkarama.name','phone':'+1 (933) 525-3898','address':'730 Chauncey Street, Forbestown, South Carolina, 6894','about':'Est eu fugiat aliquip ea ad qui ad mollit ad tempor voluptate et incididunt reprehenderit. Incididunt fugiat commodo minim adipisicing culpa consectetur duis eu ut commodo consequat voluptate labore. Nostrud irure labore adipisicing irure quis magna consequat dolor Lorem sint enim. Sint excepteur eu dolore elit ut do mollit sunt enim est. Labore id nostrud sint Lorem esse nostrud.','registered':'Friday, December 25, 2015 8:59 PM','latitude':'37.440827','longitude':'44.580474','tags':['Lorem','sit','ipsum','ea','ut'],'greeting':'Hello, Rosario! You have 5 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0eddf8e9b9c031d04e8','index':18,'guid':'a96f997c-daf8-40d4-92e1-be07e2cf0f60','isActive':false,'balance':'$1,878.37','picture':'http://placehold.it/32x32','age':37,'eyeColor':'brown','name':{'first':'Sondra','last':'Gonzales'},'company':'XUMONK','email':'sondra.gonzales@xumonk.us','phone':'+1 (838) 560-2255','address':'230 Cox Place, Geyserville, Georgia, 6805','about':'Laborum sunt voluptate ea laboris nostrud. Amet deserunt aliqua Lorem voluptate velit deserunt occaecat minim ullamco. Lorem occaecat sit labore adipisicing ad magna mollit labore ullamco proident. Ea velit do proident fugiat esse commodo ex nostrud eu mollit pariatur. Labore laborum qui voluptate quis proident reprehenderit tempor dolore duis deserunt esse aliqua aliquip. Non veniam enim pariatur cupidatat ipsum dolore est reprehenderit. Non exercitation adipisicing proident magna elit occaecat non magna.','registered':'Sunday, June 26, 2016 4:02 AM','latitude':'62.247742','longitude':'-44.90666','tags':['ea','aute','in','voluptate','magna'],'greeting':'Hello, Sondra! You have 6 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed2c1bcd06781f677e','index':19,'guid':'6ac47a16-eed4-4460-92ee-e0dd33c1fbb5','isActive':false,'balance':'$3,730.64','picture':'http://placehold.it/32x32','age':20,'eyeColor':'brown','name':{'first':'Anastasia','last':'Vega'},'company':'FIREWAX','email':'anastasia.vega@firewax.biz','phone':'+1 (867) 493-3698','address':'803 Arlington Avenue, Rosburg, Northern Mariana Islands, 8769','about':'Sint ex nisi tempor sunt voluptate non et eiusmod irure. Aute reprehenderit dolor mollit aliqua Lorem voluptate occaecat. Sint laboris deserunt Lorem incididunt nulla cupidatat do.','registered':'Friday, March 18, 2016 12:02 PM','latitude':'-32.010216','longitude':'-87.874753','tags':['aliquip','mollit','mollit','ad','laborum'],'greeting':'Hello, Anastasia! You have 7 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed727fd645854bbf43','index':20,'guid':'67bd8cdb-ce6b-455c-944c-a80e17c6fa75','isActive':true,'balance':'$2,868.06','picture':'http://placehold.it/32x32','age':29,'eyeColor':'green','name':{'first':'Lucinda','last':'Cox'},'company':'ENDIPINE','email':'lucinda.cox@endipine.ca','phone':'+1 (990) 428-3002','address':'412 Thatford Avenue, Lafferty, New Jersey, 5271','about':'Esse nulla sunt ut consequat aute mollit. Est occaecat sunt nisi irure id anim est commodo. Elit mollit amet dolore sunt adipisicing ea laborum quis ea reprehenderit non consequat dolore. Minim sunt occaecat quis aute commodo dolore quis commodo proident. Sunt sint duis ullamco sit ea esse Lorem. Consequat pariatur eiusmod laboris adipisicing labore in laboris adipisicing adipisicing consequat aute ea et.','registered':'Friday, May 1, 2015 10:16 PM','latitude':'-14.200957','longitude':'-82.211386','tags':['do','sit','qui','officia','aliquip'],'greeting':'Hello, Lucinda! You have 9 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed5a97284eb2cbd3a8','index':21,'guid':'f9fc999d-515c-4fc4-b339-76300e1b4bf2','isActive':true,'balance':'$1,172.57','picture':'http://placehold.it/32x32','age':35,'eyeColor':'brown','name':{'first':'Conrad','last':'Bradley'},'company':'FUELWORKS','email':'conrad.bradley@fuelworks.info','phone':'+1 (956) 561-3226','address':'685 Fenimore Street, Esmont, Maryland, 7523','about':'Labore reprehenderit anim nisi sunt do nisi in. Est anim cillum id minim exercitation ullamco voluptate ipsum eu. Elit culpa consequat reprehenderit laborum in eu. Laboris amet voluptate laboris qui voluptate duis minim reprehenderit. Commodo sunt irure dolore sunt occaecat velit nisi eu minim minim.','registered':'Wednesday, January 18, 2017 11:13 PM','latitude':'31.665993','longitude':'38.868968','tags':['excepteur','exercitation','est','nisi','mollit'],'greeting':'Hello, Conrad! You have 10 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edc4eaf6f760c38218','index':22,'guid':'8794ef5f-da2f-46f0-a755-c18a16409fd5','isActive':false,'balance':'$3,594.73','picture':'http://placehold.it/32x32','age':27,'eyeColor':'blue','name':{'first':'Marquez','last':'Vargas'},'company':'MALATHION','email':'marquez.vargas@malathion.tv','phone':'+1 (976) 438-3126','address':'296 Hall Street, National, Texas, 2067','about':'Proident cillum aute minim fugiat sunt aliqua non occaecat est duis id id tempor. Qui deserunt nisi amet pariatur proident eu laboris esse adipisicing magna. Anim anim mollit aute non magna nisi aute magna labore ullamco reprehenderit voluptate et ad. Proident adipisicing aute eiusmod nostrud nostrud deserunt culpa. Elit eu ullamco nisi aliqua dolor sint pariatur excepteur sit consectetur tempor. Consequat Lorem ullamco commodo veniam qui sint magna. Sit mollit ad aliquip est id eu officia id adipisicing duis ad.','registered':'Tuesday, November 17, 2015 6:16 PM','latitude':'-36.443667','longitude':'22.336776','tags':['aliquip','veniam','ipsum','Lorem','ex'],'greeting':'Hello, Marquez! You have 9 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0edd7c718518ee0466a','index':23,'guid':'ad8781a2-059e-4288-9879-309d53a99bf5','isActive':true,'balance':'$3,570.68','picture':'http://placehold.it/32x32','age':21,'eyeColor':'brown','name':{'first':'Snider','last':'Frost'},'company':'ZILODYNE','email':'snider.frost@zilodyne.co.uk','phone':'+1 (913) 485-3275','address':'721 Lincoln Road, Richmond, Utah, 672','about':'Minim enim Lorem esse incididunt do reprehenderit velit laborum ullamco. In aute eiusmod esse aliqua et labore tempor sunt ex mollit veniam tempor. Nulla elit cillum qui ullamco dolore amet deserunt magna amet laborum.','registered':'Saturday, August 23, 2014 12:58 AM','latitude':'-88.682554','longitude':'74.063179','tags':['nulla','ea','sint','aliquip','duis'],'greeting':'Hello, Snider! You have 6 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edf026fece8e2c0970','index':24,'guid':'1b7d81e1-1dba-4322-bb1a-eaa6a24cccea','isActive':false,'balance':'$2,037.91','picture':'http://placehold.it/32x32','age':28,'eyeColor':'green','name':{'first':'Snyder','last':'Fletcher'},'company':'COMTEST','email':'snyder.fletcher@comtest.io','phone':'+1 (830) 538-3860','address':'221 Lewis Place, Zortman, Idaho, 572','about':'Elit anim enim esse dolore exercitation. Laboris esse sint adipisicing fugiat sint do occaecat ut voluptate sint nulla. Ad sint ut reprehenderit nostrud irure id consectetur officia velit consequat.','registered':'Sunday, January 1, 2017 1:13 AM','latitude':'-54.742604','longitude':'69.534932','tags':['exercitation','commodo','in','id','aliqua'],'greeting':'Hello, Snyder! You have 10 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed4b9a7f83da6d2dfd','index':25,'guid':'0b2cc6b6-0044-4b1c-aa31-bd72963457a0','isActive':false,'balance':'$1,152.76','picture':'http://placehold.it/32x32','age':27,'eyeColor':'blue','name':{'first':'Regina','last':'James'},'company':'TELPOD','email':'regina.james@telpod.me','phone':'+1 (989) 455-3228','address':'688 Essex Street, Clayville, Alabama, 2772','about':'Eiusmod elit culpa reprehenderit ea veniam. Officia irure culpa duis aute ut. Irure duis cillum officia ea pariatur velit ut dolor incididunt reprehenderit ex elit laborum. Est pariatur veniam ad irure. Labore velit sunt esse laboris aliqua velit deserunt deserunt sit. Elit eiusmod ad laboris aliquip minim irure excepteur enim quis. Quis incididunt adipisicing ut magna cupidatat sit amet culpa.','registered':'Tuesday, April 25, 2017 10:16 PM','latitude':'-75.088027','longitude':'47.209828','tags':['elit','nisi','est','voluptate','proident'],'greeting':'Hello, Regina! You have 6 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed10884f32f779f2bf','index':26,'guid':'1f6fb522-0002-46ff-8dac-451247f28168','isActive':true,'balance':'$1,948.79','picture':'http://placehold.it/32x32','age':25,'eyeColor':'brown','name':{'first':'Collins','last':'Mcpherson'},'company':'DIGIGEN','email':'collins.mcpherson@digigen.com','phone':'+1 (991) 519-2334','address':'317 Merit Court, Sanford, Michigan, 6468','about':'Magna qui culpa dolor officia labore mollit ex excepteur duis eiusmod. Ea cupidatat ex ipsum mollit do minim duis. Nisi eiusmod minim tempor id esse commodo sunt sunt ullamco ut do laborum ullamco magna. Aliquip laborum dolor officia officia eu nostrud velit minim est anim. Ex elit laborum sunt magna exercitation nisi cillum sunt aute qui ea ullamco. Cupidatat ea sunt aute dolor duis nisi Lorem ullamco eiusmod. Sit ea velit ad veniam aliqua ad elit cupidatat ut magna in.','registered':'Friday, June 10, 2016 4:38 PM','latitude':'25.513996','longitude':'14.911124','tags':['exercitation','non','sit','velit','officia'],'greeting':'Hello, Collins! You have 5 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed8a575110efb15c6c','index':27,'guid':'2a904c82-068b-4ded-9ae6-cfeb6d7e62c9','isActive':true,'balance':'$3,427.91','picture':'http://placehold.it/32x32','age':24,'eyeColor':'green','name':{'first':'Mckay','last':'Barrera'},'company':'COMVEYER','email':'mckay.barrera@comveyer.org','phone':'+1 (853) 470-2560','address':'907 Glenwood Road, Churchill, Oregon, 8583','about':'In voluptate esse dolore enim sint quis dolor do exercitation sint et labore nisi. Eiusmod tempor exercitation dolore elit sit velit sint et. Sit magna adipisicing eiusmod do anim velit deserunt laboris ad ea pariatur. Irure nisi anim mollit elit commodo nulla. Aute eiusmod sit nulla eiusmod. Eiusmod est officia commodo mollit laboris do deserunt eu do nisi amet. Proident ad duis eiusmod laboris Lorem ut culpa pariatur Lorem reprehenderit minim aliquip irure sunt.','registered':'Saturday, December 19, 2015 2:49 PM','latitude':'-55.243287','longitude':'138.035406','tags':['non','quis','laboris','enim','nisi'],'greeting':'Hello, Mckay! You have 7 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edcd49ab6a73ff7f32','index':28,'guid':'5d3e0dae-3f58-437f-b12d-de24667a904d','isActive':true,'balance':'$3,270.52','picture':'http://placehold.it/32x32','age':35,'eyeColor':'blue','name':{'first':'Mabel','last':'Leonard'},'company':'QUADEEBO','email':'mabel.leonard@quadeebo.net','phone':'+1 (805) 432-2356','address':'965 Underhill Avenue, Falconaire, Minnesota, 4450','about':'Cupidatat amet sunt est ipsum occaecat sit fugiat excepteur Lorem Lorem ex ea ipsum. Ad incididunt est irure magna excepteur occaecat nostrud. Minim dolor id anim ipsum qui nostrud ullamco aute ex Lorem magna deserunt excepteur Lorem.','registered':'Saturday, March 28, 2015 5:55 AM','latitude':'27.388359','longitude':'156.408728','tags':['quis','velit','deserunt','dolore','sit'],'greeting':'Hello, Mabel! You have 7 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edde16ac2dc2fbb6c1','index':29,'guid':'d50c2233-70fc-4748-8ebf-02d45ac2a446','isActive':false,'balance':'$3,100.70','picture':'http://placehold.it/32x32','age':30,'eyeColor':'green','name':{'first':'Pace','last':'Duke'},'company':'SEQUITUR','email':'pace.duke@sequitur.name','phone':'+1 (983) 568-3119','address':'895 Melrose Street, Reno, Connecticut, 6259','about':'Ex veniam aliquip exercitation mollit elit est minim veniam aliqua labore deserunt. Dolor sunt sint cillum Lorem nisi ea irure cupidatat. Velit ut culpa cupidatat consequat cillum. Sint voluptate quis laboris qui incididunt do elit Lorem qui ullamco ut eu pariatur occaecat.','registered':'Saturday, August 18, 2018 2:18 PM','latitude':'31.930443','longitude':'-129.494784','tags':['culpa','est','nostrud','quis','aliquip'],'greeting':'Hello, Pace! You have 8 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edb908d85642ba77e8','index':30,'guid':'3edb6e42-367a-403d-a511-eb78bcc11f60','isActive':true,'balance':'$1,912.07','picture':'http://placehold.it/32x32','age':24,'eyeColor':'green','name':{'first':'Cohen','last':'Morrison'},'company':'POWERNET','email':'cohen.morrison@powernet.us','phone':'+1 (888) 597-2141','address':'565 Troutman Street, Idledale, West Virginia, 3196','about':'Ullamco voluptate duis commodo amet occaecat consequat et occaecat dolore nulla eu. Do aliqua sunt deserunt occaecat laboris labore voluptate cupidatat ullamco exercitation aliquip elit voluptate anim. Occaecat deserunt in labore cillum aute deserunt ea excepteur laboris sunt. Officia irure sint incididunt labore sint ipsum ullamco ea elit. Fugiat nostrud sunt ut officia mollit proident sunt dolor fugiat esse tempor do.','registered':'Friday, January 1, 2016 5:42 AM','latitude':'-20.01215','longitude':'26.361552','tags':['consectetur','sunt','nulla','reprehenderit','dolore'],'greeting':'Hello, Cohen! You have 10 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed91c77aa25a64a757','index':31,'guid':'8999a97b-0035-4f19-b555-91dd69aaa9b8','isActive':false,'balance':'$3,097.67','picture':'http://placehold.it/32x32','age':25,'eyeColor':'brown','name':{'first':'Stout','last':'Valdez'},'company':'UPLINX','email':'stout.valdez@uplinx.biz','phone':'+1 (854) 480-3633','address':'880 Chestnut Avenue, Lowgap, Hawaii, 1537','about':'Cupidatat enim dolore non voluptate. Aliqua ut non Lorem in exercitation reprehenderit voluptate. Excepteur deserunt tempor laboris quis.','registered':'Wednesday, March 16, 2016 6:53 AM','latitude':'50.328393','longitude':'-25.990308','tags':['ea','fugiat','duis','consectetur','enim'],'greeting':'Hello, Stout! You have 5 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed0f52176c8c3e1bed','index':32,'guid':'743abcbd-1fab-4aed-8cb7-3c935eb64c74','isActive':false,'balance':'$1,118.54','picture':'http://placehold.it/32x32','age':30,'eyeColor':'blue','name':{'first':'Ortega','last':'Joseph'},'company':'APEXIA','email':'ortega.joseph@apexia.ca','phone':'+1 (872) 596-3024','address':'304 Canda Avenue, Mulino, New York, 8721','about':'Ipsum elit id cupidatat minim nisi minim. Ea ex amet ea ipsum Lorem deserunt. Occaecat cupidatat magna cillum aliquip sint id quis amet nostrud officia enim laborum. Aliqua deserunt amet commodo laboris labore mollit est. Officia voluptate Lorem esse mollit aliquip laboris cupidatat minim et. Labore esse incididunt officia nostrud pariatur reprehenderit.','registered':'Tuesday, January 31, 2017 6:06 AM','latitude':'43.861714','longitude':'33.771783','tags':['ut','Lorem','esse','quis','fugiat'],'greeting':'Hello, Ortega! You have 6 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed2c00cdd101b6cd52','index':33,'guid':'4f6f99cf-f692-4d03-b23a-26f2b27273bd','isActive':true,'balance':'$1,682.91','picture':'http://placehold.it/32x32','age':20,'eyeColor':'blue','name':{'first':'Sampson','last':'Taylor'},'company':'GEOFORMA','email':'sampson.taylor@geoforma.info','phone':'+1 (911) 482-2993','address':'582 Kent Street, Umapine, Virgin Islands, 5300','about':'Voluptate laboris occaecat laboris tempor cillum quis cupidatat qui pariatur. Lorem minim commodo mollit adipisicing Lorem ut dolor consectetur ipsum. Sint sit voluptate labore aliqua ex labore velit. Ullamco tempor consectetur voluptate deserunt voluptate minim enim. Cillum commodo duis reprehenderit eu duis.','registered':'Thursday, November 9, 2017 11:24 PM','latitude':'24.949379','longitude':'155.034468','tags':['Lorem','cupidatat','elit','reprehenderit','commodo'],'greeting':'Hello, Sampson! You have 8 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed4b7210ba0bc0d508','index':34,'guid':'73fd415f-f8cf-43e0-a86c-e725d000abd4','isActive':false,'balance':'$1,289.37','picture':'http://placehold.it/32x32','age':30,'eyeColor':'green','name':{'first':'Shari','last':'Melendez'},'company':'DIGIPRINT','email':'shari.melendez@digiprint.tv','phone':'+1 (914) 475-3995','address':'950 Wolf Place, Enetai, Alaska, 693','about':'Dolor incididunt et est commodo aliquip labore ad ullamco. Velit ex cillum nulla elit ex esse. Consectetur mollit fugiat cillum proident elit sunt non officia cillum ex laboris sint eu. Esse nulla eu officia in Lorem sint minim esse velit. Est Lorem ipsum enim aute. Elit minim eiusmod officia reprehenderit officia ut irure Lorem.','registered':'Wednesday, August 23, 2017 11:12 PM','latitude':'-70.347863','longitude':'94.812072','tags':['ea','ex','fugiat','duis','eu'],'greeting':'Hello, Shari! You have 7 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed85ac364619d892ef','index':35,'guid':'c1905f34-14ff-4bd8-b683-02cac4d52623','isActive':false,'balance':'$2,538.50','picture':'http://placehold.it/32x32','age':30,'eyeColor':'green','name':{'first':'Santiago','last':'Joyner'},'company':'BRAINCLIP','email':'santiago.joyner@brainclip.co.uk','phone':'+1 (835) 405-2676','address':'554 Rose Street, Muir, Kentucky, 7752','about':'Quis culpa dolore fugiat magna culpa non deserunt consectetur elit. Id cupidatat occaecat duis irure ullamco elit in labore magna pariatur cillum est. Mollit dolore velit ipsum anim aliqua culpa sint. Occaecat aute anim ut sunt eu.','registered':'Thursday, January 18, 2018 4:49 PM','latitude':'57.057918','longitude':'-50.472596','tags':['ullamco','ullamco','sunt','voluptate','irure'],'greeting':'Hello, Santiago! You have 7 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed1763f56b1121fa88','index':36,'guid':'a7f50659-4ae3-4f3e-a9d8-087e05334b51','isActive':false,'balance':'$1,435.16','picture':'http://placehold.it/32x32','age':37,'eyeColor':'blue','name':{'first':'Adeline','last':'Hoffman'},'company':'BITREX','email':'adeline.hoffman@bitrex.io','phone':'+1 (823) 488-3201','address':'221 Corbin Place, Edmund, Palau, 193','about':'Magna ullamco consectetur velit adipisicing cillum ea. Est qui incididunt est ullamco ex aute exercitation irure. Cupidatat consectetur proident qui fugiat do. Labore magna aliqua consectetur fugiat. Excepteur deserunt sit qui dolor fugiat aute sunt anim ipsum magna ea commodo qui. Minim eu adipisicing ut irure excepteur eiusmod aliqua. Voluptate nisi ad consequat qui.','registered':'Tuesday, June 14, 2016 9:26 AM','latitude':'-53.123355','longitude':'88.180776','tags':['non','est','commodo','ut','aliquip'],'greeting':'Hello, Adeline! You have 9 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed945d079f63e3185e','index':37,'guid':'1f4619e0-9289-4bea-a9db-a75f4cba1138','isActive':true,'balance':'$2,019.54','picture':'http://placehold.it/32x32','age':36,'eyeColor':'blue','name':{'first':'Porter','last':'Morse'},'company':'COMVOY','email':'porter.morse@comvoy.me','phone':'+1 (933) 562-3220','address':'416 India Street, Bourg, Rhode Island, 2266','about':'Et sint anim et sunt. Non mollit sunt cillum veniam sunt sint amet non mollit. Fugiat ea ullamco pariatur deserunt ex do minim irure irure.','registered':'Saturday, July 16, 2016 10:03 PM','latitude':'-81.782545','longitude':'69.783509','tags':['irure','consequat','veniam','nulla','velit'],'greeting':'Hello, Porter! You have 10 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed411dd0f06c66bba6','index':38,'guid':'93c900f0-54c0-4c4c-b21d-d59d8d7c6177','isActive':true,'balance':'$3,764.84','picture':'http://placehold.it/32x32','age':26,'eyeColor':'green','name':{'first':'Fitzgerald','last':'Logan'},'company':'UTARIAN','email':'fitzgerald.logan@utarian.com','phone':'+1 (815) 461-2709','address':'498 Logan Street, Tonopah, Arkansas, 6652','about':'Quis Lorem sit est et dolor est esse in veniam. Mollit anim nostrud laboris consequat voluptate qui ad ipsum sint laborum exercitation quis ipsum. Incididunt cupidatat esse ea amet deserunt consequat eu proident duis adipisicing pariatur. Amet deserunt mollit aliquip mollit consequat sunt quis labore laboris quis. Magna cillum fugiat anim velit Lorem duis. Lorem duis amet veniam occaecat est excepteur ut ea velit esse non pariatur. Do veniam quis eu consequat ad duis incididunt minim dolore sit non minim adipisicing et.','registered':'Wednesday, August 9, 2017 9:20 PM','latitude':'24.480657','longitude':'-108.693421','tags':['dolore','ad','occaecat','quis','labore'],'greeting':'Hello, Fitzgerald! You have 5 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edbb6f14559d8a7b28','index':39,'guid':'9434f48b-70a0-4161-8d06-c53bf8b9df94','isActive':true,'balance':'$3,713.47','picture':'http://placehold.it/32x32','age':25,'eyeColor':'blue','name':{'first':'Mcconnell','last':'Nash'},'company':'TETAK','email':'mcconnell.nash@tetak.org','phone':'+1 (956) 477-3586','address':'853 Turnbull Avenue, Clarence, Missouri, 1599','about':'Culpa excepteur minim anim magna dolor dolore ad ex eu. In cupidatat cillum elit dolore in est minim dolore consectetur reprehenderit voluptate laborum. Deserunt id velit ad dolor mollit.','registered':'Saturday, November 10, 2018 9:27 AM','latitude':'1.691589','longitude':'143.704377','tags':['ut','deserunt','sit','cupidatat','ea'],'greeting':'Hello, Mcconnell! You have 10 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed1a87ea0390733ffa','index':40,'guid':'ec8a55f7-7114-4787-b1ff-4e631731bc2c','isActive':true,'balance':'$2,200.71','picture':'http://placehold.it/32x32','age':25,'eyeColor':'brown','name':{'first':'Kitty','last':'Meyers'},'company':'FIBEROX','email':'kitty.meyers@fiberox.net','phone':'+1 (864) 458-3826','address':'537 Georgia Avenue, Thermal, Illinois, 7930','about':'Non excepteur laboris Lorem magna adipisicing exercitation. Anim esse in pariatur minim ipsum qui voluptate irure. Pariatur Lorem pariatur esse commodo aute adipisicing anim commodo. Exercitation nostrud aliqua duis et amet amet tempor.','registered':'Tuesday, September 13, 2016 8:16 PM','latitude':'19.59506','longitude':'-57.814297','tags':['duis','ullamco','velit','sint','consequat'],'greeting':'Hello, Kitty! You have 9 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed4dc76717bf1217b3','index':41,'guid':'40521cde-f835-4620-902b-af7abf185d8d','isActive':false,'balance':'$2,907.02','picture':'http://placehold.it/32x32','age':26,'eyeColor':'green','name':{'first':'Klein','last':'Goodwin'},'company':'PLASTO','email':'klein.goodwin@plasto.name','phone':'+1 (950) 563-3104','address':'764 Devoe Street, Lindcove, Oklahoma, 458','about':'Amet aliqua magna ea veniam non aliquip irure esse id ipsum cillum sint tempor dolor. Ullamco deserunt fugiat amet pariatur culpa nostrud commodo commodo. Ad occaecat magna adipisicing voluptate. Minim ad adipisicing cupidatat elit nostrud eu irure. Cupidatat occaecat aute magna consectetur dolore anim et. Ex voluptate velit exercitation laborum ad ullamco ad. Aliquip nulla ipsum dolore cillum qui nostrud eu adipisicing amet tempor do.','registered':'Tuesday, February 13, 2018 3:56 PM','latitude':'-27.168725','longitude':'-29.499285','tags':['minim','labore','do','deserunt','dolor'],'greeting':'Hello, Klein! You have 6 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed1ac77396b29aee9e','index':42,'guid':'7cfc03e3-30e9-4ae1-a1f5-f6c3223ca770','isActive':true,'balance':'$2,986.47','picture':'http://placehold.it/32x32','age':22,'eyeColor':'brown','name':{'first':'Isabelle','last':'Bishop'},'company':'GEEKNET','email':'isabelle.bishop@geeknet.us','phone':'+1 (908) 418-2642','address':'729 Willmohr Street, Aguila, Montana, 7510','about':'In nulla commodo nostrud sint. Elit et occaecat et aliqua aliquip magna esse commodo duis Lorem dolor magna enim deserunt. Ipsum pariatur reprehenderit ipsum adipisicing mollit incididunt ut. Sunt in consequat ex ut minim non qui anim labore. Deserunt minim voluptate in nulla occaecat.','registered':'Monday, September 15, 2014 6:22 AM','latitude':'-81.686947','longitude':'38.409291','tags':['proident','est','aliqua','veniam','anim'],'greeting':'Hello, Isabelle! You have 7 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edb3a070c9469a4893','index':43,'guid':'3dec76b4-0b55-4765-a2fd-b8dbd9c82f8f','isActive':true,'balance':'$2,501.24','picture':'http://placehold.it/32x32','age':31,'eyeColor':'blue','name':{'first':'Josefina','last':'Turner'},'company':'COMSTAR','email':'josefina.turner@comstar.biz','phone':'+1 (908) 566-3029','address':'606 Schenck Place, Brutus, Vermont, 8681','about':'Enim consectetur pariatur sint dolor nostrud est deserunt nulla quis pariatur sit. Ad aute incididunt nisi excepteur duis est velit voluptate ullamco occaecat magna reprehenderit aliquip. Proident deserunt consectetur non et exercitation elit dolore enim aliqua incididunt anim amet. Ex esse sint commodo minim aliqua ut irure. Proident ex culpa voluptate fugiat nisi. Sint commodo laboris excepteur minim ipsum labore tempor quis magna.','registered':'Saturday, December 31, 2016 6:38 AM','latitude':'35.275088','longitude':'24.30485','tags':['minim','ut','irure','Lorem','veniam'],'greeting':'Hello, Josefina! You have 6 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed1aa7d74128ee3d0f','index':44,'guid':'10599279-c367-46c4-9f7a-744c2e4bf6c9','isActive':true,'balance':'$1,753.06','picture':'http://placehold.it/32x32','age':27,'eyeColor':'blue','name':{'first':'Lily','last':'Haynes'},'company':'KIOSK','email':'lily.haynes@kiosk.ca','phone':'+1 (872) 451-2301','address':'509 Balfour Place, Grazierville, New Hampshire, 2750','about':'Nisi aliquip occaecat nostrud do sint qui nisi officia Lorem. Ad et et laboris nisi dolore aliqua eu. Aliqua veniam quis eu pariatur incididunt mollit id deserunt officia eiusmod. Consequat adipisicing do nisi voluptate eiusmod minim pariatur minim nisi nostrud culpa cupidatat. Irure consectetur id consequat adipisicing ullamco occaecat do. Ex proident ea quis nulla incididunt sunt excepteur incididunt. Aliquip minim nostrud non anim Lorem.','registered':'Tuesday, November 20, 2018 9:28 AM','latitude':'-12.677798','longitude':'114.506787','tags':['culpa','amet','elit','officia','irure'],'greeting':'Hello, Lily! You have 8 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed74c76f2e84e201ce','index':45,'guid':'ec0a68d4-629e-46c9-9af7-f6ea867f02ba','isActive':true,'balance':'$1,477.93','picture':'http://placehold.it/32x32','age':23,'eyeColor':'green','name':{'first':'Shauna','last':'Pitts'},'company':'SPACEWAX','email':'shauna.pitts@spacewax.info','phone':'+1 (841) 406-2360','address':'348 Tabor Court, Westwood, Puerto Rico, 8297','about':'Aliquip irure officia magna ea magna mollit ea non amet deserunt. Veniam mollit labore culpa magna aliqua quis consequat est consectetur ea reprehenderit nostrud consequat aliqua. Mollit do ipsum mollit eiusmod.','registered':'Thursday, October 2, 2014 2:48 AM','latitude':'-55.17388','longitude':'-13.370494','tags':['anim','consectetur','cillum','veniam','duis'],'greeting':'Hello, Shauna! You have 7 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed419e718484b16722','index':46,'guid':'b2d6101d-5646-43f4-8207-284494e5a990','isActive':false,'balance':'$2,006.96','picture':'http://placehold.it/32x32','age':27,'eyeColor':'brown','name':{'first':'Lawrence','last':'Boyer'},'company':'SKYPLEX','email':'lawrence.boyer@skyplex.tv','phone':'+1 (953) 548-2618','address':'464 Pilling Street, Blandburg, Arizona, 5531','about':'Culpa sit minim pariatur mollit cupidatat sunt duis. Nisi ea proident veniam exercitation adipisicing Lorem aliquip amet dolor voluptate in nisi. Non commodo anim sunt est fugiat laborum nisi aliqua non Lorem exercitation dolor. Laboris dolore do minim ut eiusmod enim magna cillum laborum consectetur aliquip minim enim Lorem. Veniam ex veniam occaecat aliquip elit aliquip est eiusmod minim minim adipisicing.','registered':'Wednesday, July 30, 2014 2:17 AM','latitude':'-78.681255','longitude':'139.960626','tags':['consequat','Lorem','incididunt','dolor','esse'],'greeting':'Hello, Lawrence! You have 6 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed08a9024998292c70','index':47,'guid':'277de142-ebeb-4828-906a-7fd8bc0a738a','isActive':true,'balance':'$1,273.19','picture':'http://placehold.it/32x32','age':27,'eyeColor':'brown','name':{'first':'Sonya','last':'Stafford'},'company':'AQUACINE','email':'sonya.stafford@aquacine.co.uk','phone':'+1 (824) 581-3927','address':'641 Bowery Street, Hillsboro, Delaware, 7893','about':'Culpa labore ex reprehenderit mollit cupidatat dolore et ut quis in. Sint esse culpa enim culpa tempor exercitation veniam minim consectetur. Sunt est laboris minim quis incididunt exercitation laboris cupidatat fugiat ad. Deserunt ipsum do dolor cillum excepteur incididunt.','registered':'Thursday, March 26, 2015 1:10 PM','latitude':'-84.750592','longitude':'165.493533','tags':['minim','officia','dolore','ipsum','est'],'greeting':'Hello, Sonya! You have 8 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edd5037f2c79ecde68','index':48,'guid':'2dc6532f-9a26-49aa-b444-8923896db89c','isActive':false,'balance':'$3,168.93','picture':'http://placehold.it/32x32','age':36,'eyeColor':'brown','name':{'first':'Marguerite','last':'Stuart'},'company':'ACCUFARM','email':'marguerite.stuart@accufarm.io','phone':'+1 (848) 535-2253','address':'301 Menahan Street, Sunnyside, Nebraska, 4809','about':'Deserunt sint labore voluptate amet anim culpa nostrud adipisicing enim cupidatat ullamco exercitation fugiat est. Magna dolor aute incididunt ea ad adipisicing. Do cupidatat ut officia officia culpa sit do.','registered':'Thursday, May 8, 2014 1:25 PM','latitude':'21.82277','longitude':'-7.368347','tags':['labore','nulla','ullamco','irure','adipisicing'],'greeting':'Hello, Marguerite! You have 6 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edb26d315635818dae','index':49,'guid':'083a5eda-0a70-4f89-87f7-2cd386c0f22a','isActive':false,'balance':'$2,576.25','picture':'http://placehold.it/32x32','age':38,'eyeColor':'blue','name':{'first':'Louella','last':'Holloway'},'company':'BEDDER','email':'louella.holloway@bedder.me','phone':'+1 (801) 425-3761','address':'545 Lafayette Avenue, Caledonia, Louisiana, 2816','about':'Qui exercitation occaecat dolore mollit. Fugiat cupidatat proident culpa fugiat quis. In cupidatat commodo elit ea enim occaecat esse exercitation nostrud occaecat veniam laboris fugiat. Nisi sunt reprehenderit aliqua reprehenderit tempor id dolore ullamco pariatur reprehenderit et eu ex pariatur.','registered':'Wednesday, November 5, 2014 1:10 AM','latitude':'36.385637','longitude':'77.949423','tags':['eu','irure','velit','non','aliquip'],'greeting':'Hello, Louella! You have 7 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed77cd60a1abc1ecce','index':50,'guid':'2887c3c1-3eba-4237-a0db-1977eed94554','isActive':true,'balance':'$1,633.51','picture':'http://placehold.it/32x32','age':22,'eyeColor':'green','name':{'first':'Bates','last':'Carrillo'},'company':'ZOMBOID','email':'bates.carrillo@zomboid.com','phone':'+1 (934) 405-2006','address':'330 Howard Alley, Troy, Kansas, 4881','about':'Voluptate esse est ullamco anim tempor ea reprehenderit. Occaecat pariatur deserunt cillum laboris labore id exercitation esse ipsum ipsum ex aliquip. Sunt non elit est ea occaecat. Magna deserunt commodo aliqua ipsum est cillum dolor nisi. Ex duis est tempor tempor laboris do do quis id magna. Dolor do est elit eu laborum ullamco culpa consequat velit eiusmod tempor.','registered':'Saturday, May 28, 2016 3:56 AM','latitude':'83.310134','longitude':'-105.862836','tags':['est','commodo','ea','commodo','sunt'],'greeting':'Hello, Bates! You have 9 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed5ec0ec299b471fb5','index':51,'guid':'512b5e67-f785-492e-9d94-e43ef8b399b8','isActive':false,'balance':'$3,032.22','picture':'http://placehold.it/32x32','age':30,'eyeColor':'blue','name':{'first':'Floyd','last':'Yang'},'company':'FRENEX','email':'floyd.yang@frenex.org','phone':'+1 (924) 566-3304','address':'418 Quay Street, Chumuckla, Guam, 7743','about':'Irure sit velit exercitation dolore est nisi incididunt ut quis consectetur incididunt est dolor. Aute nisi enim esse aliquip enim culpa commodo consectetur. Duis laborum magna ad duis ipsum aliqua eiusmod cillum. Consectetur et duis eiusmod irure ad est nisi incididunt eiusmod labore. Pariatur proident in Lorem adipisicing mollit proident excepteur nulla do nostrud mollit eiusmod. Duis ad dolore irure fugiat anim laboris ipsum et sit duis ipsum voluptate. Lorem non aute exercitation qui ullamco officia minim sint pariatur ut dolor.','registered':'Wednesday, January 18, 2017 2:01 AM','latitude':'45.888721','longitude':'-41.232793','tags':['elit','in','esse','ea','officia'],'greeting':'Hello, Floyd! You have 5 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed51e26ca89e5caf49','index':52,'guid':'4e0907f6-facc-46df-8952-73561a53fe33','isActive':true,'balance':'$3,767.41','picture':'http://placehold.it/32x32','age':25,'eyeColor':'blue','name':{'first':'Gardner','last':'Carey'},'company':'KLUGGER','email':'gardner.carey@klugger.net','phone':'+1 (876) 481-3502','address':'131 Utica Avenue, Cannondale, Federated States Of Micronesia, 610','about':'Amet ad pariatur excepteur anim ex officia commodo proident aliqua occaecat consequat Lorem officia sit. Id minim velit nisi laboris nisi nulla incididunt eiusmod velit. Deserunt labore quis et tempor. Et labore exercitation laborum officia ullamco nostrud adipisicing laboris esse laborum aute anim elit. Sunt ad officia tempor esse et quis aliquip irure pariatur laborum id quis ex. Eu consequat nisi deserunt id eu proident ex minim aute nulla tempor ex.','registered':'Friday, February 21, 2014 6:42 AM','latitude':'-54.740231','longitude':'15.01484','tags':['commodo','laboris','occaecat','aliquip','adipisicing'],'greeting':'Hello, Gardner! You have 10 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed52e3c9407105093a','index':53,'guid':'1d3b9e7a-1bc3-40ea-b808-1c33f0d48c70','isActive':true,'balance':'$1,113.30','picture':'http://placehold.it/32x32','age':26,'eyeColor':'blue','name':{'first':'Herman','last':'Rogers'},'company':'TALENDULA','email':'herman.rogers@talendula.name','phone':'+1 (818) 521-2005','address':'541 Norman Avenue, Winfred, Tennessee, 447','about':'Culpa ex laborum non ad ullamco officia. Nisi mollit mollit voluptate sit sint ullamco. Lorem exercitation nulla anim eiusmod deserunt magna sint. Officia sunt eiusmod aliqua reprehenderit sunt mollit sit cupidatat sint.','registered':'Wednesday, July 11, 2018 1:05 AM','latitude':'-20.708105','longitude':'-151.294563','tags':['exercitation','minim','officia','qui','enim'],'greeting':'Hello, Herman! You have 10 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edfcb123d545b6edb4','index':54,'guid':'c0e0c669-4eed-43ee-bdd0-78fe6e9ca4d5','isActive':true,'balance':'$3,309.64','picture':'http://placehold.it/32x32','age':22,'eyeColor':'green','name':{'first':'Whitley','last':'Stark'},'company':'MUSAPHICS','email':'whitley.stark@musaphics.us','phone':'+1 (803) 476-2151','address':'548 Cobek Court, Chamizal, Indiana, 204','about':'Adipisicing veniam dolor ex sint sit id eu voluptate. Excepteur veniam proident exercitation id eu et sunt pariatur. Qui occaecat culpa aliqua nisi excepteur minim veniam. Est duis nulla laborum excepteur cillum pariatur sint incididunt. Velit commodo eu incididunt voluptate. Amet laboris laboris id adipisicing labore eiusmod consequat minim cillum et.','registered':'Thursday, March 27, 2014 9:10 AM','latitude':'71.219596','longitude':'51.012855','tags':['reprehenderit','mollit','laborum','voluptate','aliquip'],'greeting':'Hello, Whitley! You have 7 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed81510dfc61602fcf','index':55,'guid':'7ec5c24d-f169-4399-a2a3-300c0f45e52e','isActive':false,'balance':'$3,721.04','picture':'http://placehold.it/32x32','age':23,'eyeColor':'green','name':{'first':'Gretchen','last':'Wade'},'company':'EWEVILLE','email':'gretchen.wade@eweville.biz','phone':'+1 (977) 598-3700','address':'721 Colonial Road, Brookfield, South Dakota, 3888','about':'Fugiat consequat sint ut ut et ullamco eiusmod deserunt pariatur. Veniam eiusmod esse fugiat mollit. Proident laboris minim qui do ipsum excepteur exercitation irure anim. Aliqua labore quis eu fugiat dolore ullamco velit Lorem voluptate ipsum nostrud eiusmod laborum proident.','registered':'Friday, October 12, 2018 10:59 AM','latitude':'41.937653','longitude':'63.378531','tags':['aute','cillum','ea','ex','aute'],'greeting':'Hello, Gretchen! You have 9 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edf78f77d4a7d557bb','index':56,'guid':'8718ada7-6fd0-49ef-a405-29850503948b','isActive':false,'balance':'$3,341.33','picture':'http://placehold.it/32x32','age':32,'eyeColor':'blue','name':{'first':'Naomi','last':'Frye'},'company':'MAZUDA','email':'naomi.frye@mazuda.ca','phone':'+1 (825) 427-2255','address':'741 Coyle Street, Comptche, Pennsylvania, 8441','about':'Aliqua fugiat laborum quis ullamco cupidatat sit dolor nulla dolore. Do Lorem et ipsum culpa irure sit do dolor qui sit laboris aliqua. Ex consectetur irure in veniam reprehenderit amet do elit eiusmod est magna.','registered':'Thursday, January 9, 2014 7:18 AM','latitude':'41.078645','longitude':'-50.241966','tags':['do','aliquip','eiusmod','velit','id'],'greeting':'Hello, Naomi! You have 7 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edbf45db2e072a48b4','index':57,'guid':'c158ebf7-fb8b-4ea8-adbf-8c51c6486715','isActive':true,'balance':'$2,811.55','picture':'http://placehold.it/32x32','age':25,'eyeColor':'blue','name':{'first':'Lamb','last':'Johns'},'company':'DOGTOWN','email':'lamb.johns@dogtown.info','phone':'+1 (946) 530-3057','address':'559 Malbone Street, Kennedyville, California, 2052','about':'Eiusmod dolor labore cillum ad veniam elit voluptate voluptate pariatur est cupidatat. Laboris ut qui in cillum sunt dolore ut enim. Minim nostrud ex qui quis reprehenderit magna ipsum cupidatat irure minim laboris veniam irure. Fugiat velit deserunt aliquip in esse proident excepteur labore reprehenderit excepteur sunt in cupidatat exercitation. Ex pariatur irure mollit tempor non magna ex.','registered':'Friday, April 21, 2017 1:51 AM','latitude':'-61.403599','longitude':'-93.447102','tags':['aliquip','tempor','sint','enim','ipsum'],'greeting':'Hello, Lamb! You have 6 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edbb9c88190cb59cf2','index':58,'guid':'f0de5ac5-eb28-491b-81c5-76d447c9055e','isActive':true,'balance':'$1,611.99','picture':'http://placehold.it/32x32','age':37,'eyeColor':'brown','name':{'first':'Lynette','last':'Cleveland'},'company':'ARTWORLDS','email':'lynette.cleveland@artworlds.tv','phone':'+1 (889) 596-3723','address':'439 Montauk Avenue, Felt, New Mexico, 9681','about':'Incididunt aliquip est aliquip est ullamco do consectetur dolor. Lorem mollit mollit dolor et ipsum ut qui veniam aute ea. Adipisicing reprehenderit culpa velit laborum adipisicing amet consectetur velit nisi. Ut qui proident ad cillum excepteur adipisicing quis labore. Duis velit culpa et excepteur eiusmod ex labore in nisi nostrud. Et ullamco minim excepteur ut enim reprehenderit consequat eiusmod laboris Lorem commodo exercitation qui laborum.','registered':'Wednesday, August 26, 2015 12:53 PM','latitude':'49.861336','longitude':'86.865926','tags':['reprehenderit','minim','in','minim','nostrud'],'greeting':'Hello, Lynette! You have 6 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed5b760ddde7295fa8','index':59,'guid':'f8180d3f-c5c0-48b2-966e-a0b2a80f8e84','isActive':true,'balance':'$3,376.75','picture':'http://placehold.it/32x32','age':32,'eyeColor':'green','name':{'first':'Obrien','last':'Page'},'company':'GLASSTEP','email':'obrien.page@glasstep.co.uk','phone':'+1 (902) 583-3086','address':'183 Ridgewood Avenue, Vicksburg, Wisconsin, 7430','about':'Aute excepteur cillum exercitation duis Lorem irure labore elit. Labore magna cupidatat velit consectetur minim do Lorem in excepteur commodo ea consequat ullamco laborum. Ut in id occaecat eu quis duis id ea deserunt veniam.','registered':'Wednesday, March 29, 2017 12:13 AM','latitude':'-40.156154','longitude':'72.76301','tags':['excepteur','non','anim','nulla','anim'],'greeting':'Hello, Obrien! You have 6 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed52985d3d8901d653','index':60,'guid':'d2e14fa1-8c54-4bcb-8a58-eb2e6f8d0e45','isActive':true,'balance':'$1,659.47','picture':'http://placehold.it/32x32','age':33,'eyeColor':'brown','name':{'first':'Knowles','last':'Goodman'},'company':'CENTREE','email':'knowles.goodman@centree.io','phone':'+1 (862) 563-3692','address':'504 Lott Street, Allensworth, Florida, 7148','about':'Do aliquip voluptate aliqua nostrud. Eu dolore ex occaecat pariatur aute laborum aute nulla aute amet. Excepteur sit laboris ad non anim ut officia ut ad exercitation officia dolore laboris. Esse voluptate minim deserunt nostrud exercitation laborum voluptate exercitation id laborum fugiat proident cupidatat proident. Nulla nostrud est sint adipisicing incididunt exercitation dolor sit et elit tempor occaecat sint culpa. Pariatur occaecat laboris pariatur laboris ad pariatur in cillum fugiat est fugiat. Proident eu id irure excepteur esse aute cillum adipisicing.','registered':'Wednesday, October 15, 2014 6:17 PM','latitude':'-15.73863','longitude':'87.422009','tags':['consequat','sint','tempor','veniam','culpa'],'greeting':'Hello, Knowles! You have 6 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0eda00b73bdb7ea54e9','index':61,'guid':'c8a064db-0ec6-4832-9820-7280a0333709','isActive':true,'balance':'$3,701.14','picture':'http://placehold.it/32x32','age':35,'eyeColor':'brown','name':{'first':'Shepherd','last':'Todd'},'company':'ECRATIC','email':'shepherd.todd@ecratic.me','phone':'+1 (881) 444-3389','address':'450 Frank Court, Temperanceville, Ohio, 7006','about':'Voluptate cillum ad fugiat velit adipisicing sint consequat veniam Lorem reprehenderit. Cillum sit non deserunt consequat. Amet sunt pariatur non mollit ullamco proident sint dolore anim elit cupidatat anim do ullamco. Lorem Lorem incididunt ea elit consequat laboris enim duis quis Lorem id aute veniam consequat. Cillum veniam cillum sint qui Lorem fugiat culpa consequat. Est sint duis ut qui fugiat. Laborum pariatur velit et sunt mollit eiusmod excepteur culpa ex et officia.','registered':'Tuesday, October 10, 2017 2:01 AM','latitude':'82.951563','longitude':'-4.866954','tags':['eu','qui','proident','esse','ex'],'greeting':'Hello, Shepherd! You have 5 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed0e51d1a7e2d9e559','index':62,'guid':'739c3d38-200d-4531-84d8-4e7c39ae5b8c','isActive':true,'balance':'$3,679.01','picture':'http://placehold.it/32x32','age':31,'eyeColor':'brown','name':{'first':'Rosalyn','last':'Heath'},'company':'ZAYA','email':'rosalyn.heath@zaya.com','phone':'+1 (865) 403-3520','address':'303 Henderson Walk, Hoehne, District Of Columbia, 4306','about':'Sint occaecat nulla mollit sint fugiat eu proident dolor labore consequat. Occaecat tempor excepteur do fugiat incididunt Lorem in ullamco dolore laborum. Cillum mollit aliquip excepteur aliquip sint sunt minim non irure irure. Cillum fugiat aliqua enim dolore. Nulla culpa culpa nostrud ad. Eiusmod culpa proident proident non est cupidatat eu sunt sit incididunt id nisi.','registered':'Wednesday, April 22, 2015 12:35 PM','latitude':'33.628504','longitude':'110.772802','tags':['consequat','ut','ex','labore','consectetur'],'greeting':'Hello, Rosalyn! You have 6 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edd5274c01d353d0c5','index':63,'guid':'8815fe55-8af1-4708-a62a-d554dbd74a4a','isActive':true,'balance':'$2,126.01','picture':'http://placehold.it/32x32','age':30,'eyeColor':'blue','name':{'first':'Queen','last':'Harper'},'company':'TRI@TRIBALOG','email':'queen.harper@tri@tribalog.org','phone':'+1 (903) 592-3145','address':'926 Heath Place, Wawona, Maine, 7340','about':'Laborum cupidatat commodo aliquip reprehenderit. Excepteur eu labore duis minim minim voluptate aute nostrud deserunt ut velit ullamco. Adipisicing nisi occaecat laborum proident. Id reprehenderit eiusmod cupidatat qui aute consequat amet enim commodo duis non ipsum. Amet ut aliqua magna qui proident mollit aute.','registered':'Saturday, April 9, 2016 5:12 AM','latitude':'51.814216','longitude':'177.348115','tags':['cillum','ut','dolor','do','nisi'],'greeting':'Hello, Queen! You have 10 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed126298b6ce62ed56','index':64,'guid':'001c87fe-182f-450f-903b-2e29a9bb0322','isActive':true,'balance':'$3,578.29','picture':'http://placehold.it/32x32','age':20,'eyeColor':'green','name':{'first':'Pauline','last':'Mills'},'company':'CRUSTATIA','email':'pauline.mills@crustatia.net','phone':'+1 (984) 582-3899','address':'899 Revere Place, Welch, Iowa, 216','about':'Tempor eu exercitation ut id. Deserunt ex reprehenderit veniam nisi. Aute laborum veniam velit dolore ut deserunt Lorem sit esse quis dolor ex do nisi. In dolor tempor officia id. Velit nisi culpa nostrud laborum officia incididunt laborum velit non quis id exercitation exercitation. Anim elit ullamco in enim Lorem culpa aliqua Lorem.','registered':'Monday, June 2, 2014 2:03 PM','latitude':'56.427576','longitude':'172.183669','tags':['pariatur','pariatur','pariatur','fugiat','Lorem'],'greeting':'Hello, Pauline! You have 8 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed3e332ad9e8a178d8','index':65,'guid':'5ad7292b-feef-4a7e-b485-142cadfbe8ea','isActive':false,'balance':'$3,916.54','picture':'http://placehold.it/32x32','age':22,'eyeColor':'brown','name':{'first':'Garrett','last':'Richmond'},'company':'XYQAG','email':'garrett.richmond@xyqag.name','phone':'+1 (952) 584-3794','address':'233 Grove Street, Summerfield, Virginia, 4735','about':'Nostrud quis pariatur occaecat laborum laboris aliqua ut fugiat dolor. Commodo tempor excepteur enim nostrud Lorem. Aute elit nulla labore ad pariatur cupidatat Lorem qui cupidatat velit deserunt excepteur esse. Excepteur nulla et nostrud quis labore est veniam enim nisi laboris ut enim. Ea esse nulla anim excepteur reprehenderit deserunt voluptate minim qui labore adipisicing amet eu enim.','registered':'Wednesday, March 5, 2014 4:35 PM','latitude':'68.665041','longitude':'148.799524','tags':['irure','reprehenderit','minim','ea','do'],'greeting':'Hello, Garrett! You have 7 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed541aa2ec47466ace','index':66,'guid':'9cda6f3c-c9ab-451c-bb19-2e4c8463d011','isActive':true,'balance':'$3,352.52','picture':'http://placehold.it/32x32','age':30,'eyeColor':'brown','name':{'first':'Cobb','last':'Whitley'},'company':'UNIA','email':'cobb.whitley@unia.us','phone':'+1 (888) 490-3342','address':'864 Belmont Avenue, Needmore, Massachusetts, 8286','about':'Nisi aliquip fugiat ipsum nisi ullamco minim pariatur labore. Sint labore anim do ad ad esse eu nostrud nulla commodo anim. Cillum anim enim duis cillum non do nisi aliquip veniam voluptate commodo aliqua laborum. Exercitation in do eu qui sint aliquip. Esse adipisicing deserunt deserunt qui anim aliqua occaecat et nostrud elit ea in anim cillum. Tempor mollit proident tempor sunt est sint laborum ullamco incididunt non. Velit aliqua sunt excepteur nisi qui eiusmod ipsum dolore aliquip velit ullamco ullamco.','registered':'Friday, May 23, 2014 7:11 PM','latitude':'-32.950581','longitude':'147.772494','tags':['mollit','adipisicing','irure','ad','minim'],'greeting':'Hello, Cobb! You have 6 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed8186c3d6f34c2be3','index':67,'guid':'fee98f6d-d68a-4189-8180-b6cb337e537e','isActive':false,'balance':'$1,698.42','picture':'http://placehold.it/32x32','age':20,'eyeColor':'blue','name':{'first':'Brennan','last':'Tyler'},'company':'PODUNK','email':'brennan.tyler@podunk.biz','phone':'+1 (867) 498-2727','address':'599 Harkness Avenue, Gorst, American Samoa, 322','about':'Reprehenderit id sit qui id qui aute ea sit magna in qui proident. Excepteur ad nostrud do nostrud in incididunt voluptate adipisicing sint anim. Ullamco consequat minim nulla irure ex est irure reprehenderit deserunt voluptate dolore anim sunt. Occaecat dolore voluptate voluptate elit commodo nulla laborum ad do irure.','registered':'Friday, February 9, 2018 5:40 PM','latitude':'11.150893','longitude':'-85.298004','tags':['quis','minim','deserunt','cillum','laboris'],'greeting':'Hello, Brennan! You have 10 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed075c9c4f7439818d','index':68,'guid':'1ef76b18-6b8d-4c3c-aca3-9fa2b43f0242','isActive':false,'balance':'$2,091.17','picture':'http://placehold.it/32x32','age':26,'eyeColor':'brown','name':{'first':'Neal','last':'Stephenson'},'company':'OTHERSIDE','email':'neal.stephenson@otherside.ca','phone':'+1 (820) 496-3344','address':'867 Wilson Street, Kidder, Colorado, 4599','about':'Do laboris enim proident in qui velit adipisicing magna anim. Amet proident non exercitation ipsum aliqua excepteur nostrud. Enim esse non sit in nostrud deserunt id laborum cillum deserunt consequat. Anim velit exercitation qui sit voluptate. Irure duis non veniam velit mollit exercitation id exercitation.','registered':'Thursday, November 13, 2014 11:00 PM','latitude':'54.809693','longitude':'1.877241','tags':['anim','duis','in','officia','sint'],'greeting':'Hello, Neal! You have 7 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0eda0a2dc24db64b638','index':69,'guid':'194744fd-089b-40b6-a290-98a6ec30a415','isActive':false,'balance':'$3,191.67','picture':'http://placehold.it/32x32','age':24,'eyeColor':'brown','name':{'first':'Shields','last':'Hubbard'},'company':'MIRACULA','email':'shields.hubbard@miracula.info','phone':'+1 (885) 582-2001','address':'529 Eagle Street, Guilford, Nevada, 1460','about':'Eiusmod exercitation ut incididunt veniam commodo culpa ullamco mollit id adipisicing exercitation ad sint. Nostrud excepteur amet aliqua mollit incididunt laborum voluptate id anim. Nulla sint laboris dolor esse cupidatat laborum ex sint. Ex non sunt sit nulla.','registered':'Monday, February 13, 2017 6:22 AM','latitude':'-69.145209','longitude':'-40.69755','tags':['tempor','enim','qui','velit','elit'],'greeting':'Hello, Shields! You have 10 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edf939c130177e074d','index':70,'guid':'303b176c-7803-4ed2-a35f-3e3c831793ef','isActive':false,'balance':'$2,359.09','picture':'http://placehold.it/32x32','age':31,'eyeColor':'blue','name':{'first':'Coleen','last':'Knight'},'company':'BLEEKO','email':'coleen.knight@bleeko.tv','phone':'+1 (867) 423-3146','address':'527 Broadway , Bonanza, Marshall Islands, 4988','about':'Laboris nulla pariatur laborum ad aute excepteur sunt pariatur exercitation. Do nostrud qui ipsum ullamco et sint do Lorem cillum ullamco do. Exercitation labore excepteur commodo incididunt eiusmod proident consectetur adipisicing nostrud aute voluptate laboris. Commodo anim proident eiusmod pariatur est ea laborum incididunt qui tempor reprehenderit ullamco id. Eiusmod commodo nisi consectetur ut qui quis aliqua sit minim nostrud sunt laborum eiusmod adipisicing.','registered':'Sunday, May 6, 2018 8:03 AM','latitude':'70.729041','longitude':'113.052761','tags':['Lorem','ullamco','nulla','ullamco','commodo'],'greeting':'Hello, Coleen! You have 7 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edae8b1ce688b61223','index':71,'guid':'7d6f3b1a-c367-4068-9e8e-1717d513ece3','isActive':false,'balance':'$2,911.07','picture':'http://placehold.it/32x32','age':21,'eyeColor':'brown','name':{'first':'Clark','last':'Ryan'},'company':'ECLIPSENT','email':'clark.ryan@eclipsent.co.uk','phone':'+1 (938) 562-2740','address':'500 Lewis Avenue, Rockbridge, North Dakota, 5133','about':'Adipisicing exercitation officia sit excepteur excepteur sunt sint amet. Aliqua ipsum sint laboris eiusmod esse culpa elit sunt. Dolore est consectetur est quis quis magna. Aliquip nostrud dolore ex pariatur. Anim nostrud duis exercitation ut magna magna culpa. Nisi irure id mollit labore non sit mollit occaecat Lorem est ipsum. Nulla est fugiat cillum nisi aliqua consectetur amet nulla nostrud esse.','registered':'Friday, July 24, 2015 9:28 AM','latitude':'-68.055815','longitude':'-50.926966','tags':['deserunt','ad','ad','ut','id'],'greeting':'Hello, Clark! You have 7 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed5d1e8df45d8ab4db','index':72,'guid':'ce85db37-7d04-4f4c-a4b0-78003533e5c6','isActive':false,'balance':'$1,127.43','picture':'http://placehold.it/32x32','age':21,'eyeColor':'green','name':{'first':'Dillon','last':'Hooper'},'company':'MEDESIGN','email':'dillon.hooper@medesign.io','phone':'+1 (929) 600-3797','address':'652 Mill Avenue, Elliston, Mississippi, 2958','about':'Dolore culpa qui exercitation nostrud do. Irure duis in ad ipsum aliqua aliquip nulla sit veniam officia quis occaecat est. Magna qui eiusmod pariatur aliquip minim commodo. Qui ex dolor excepteur consequat eiusmod occaecat. In officia ipsum do Lorem excepteur proident pariatur labore.','registered':'Monday, May 26, 2014 2:38 AM','latitude':'-36.032189','longitude':'86.865529','tags':['non','ut','ex','Lorem','quis'],'greeting':'Hello, Dillon! You have 10 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edb84814579c3121b3','index':73,'guid':'d7303901-5186-4595-a759-22306f67d0a3','isActive':true,'balance':'$2,326.59','picture':'http://placehold.it/32x32','age':33,'eyeColor':'green','name':{'first':'Moreno','last':'Hull'},'company':'ZEAM','email':'moreno.hull@zeam.me','phone':'+1 (984) 586-3738','address':'265 Pine Street, Talpa, North Carolina, 6041','about':'Fugiat exercitation est ullamco anim. Exercitation proident id sunt culpa Lorem amet. Consectetur anim consectetur pariatur consequat consectetur amet excepteur voluptate ea velit duis eiusmod proident. In sint laborum cupidatat ea amet ex. Reprehenderit amet sunt dolor ullamco est ex deserunt.','registered':'Wednesday, January 24, 2018 8:52 PM','latitude':'84.956857','longitude':'113.210051','tags':['est','excepteur','anim','Lorem','dolor'],'greeting':'Hello, Moreno! You have 6 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0eda4eb9dcb92c82d06','index':74,'guid':'8ee28651-802e-4523-b676-c713f6e874b8','isActive':true,'balance':'$3,783.97','picture':'http://placehold.it/32x32','age':38,'eyeColor':'blue','name':{'first':'Tracie','last':'Price'},'company':'ICOLOGY','email':'tracie.price@icology.com','phone':'+1 (897) 403-3768','address':'487 Sheffield Avenue, Vallonia, Wyoming, 276','about':'Voluptate laboris laborum aute ex sint voluptate officia proident. Sit esse nostrud cupidatat in veniam sit duis est. Do mollit elit exercitation aliqua id irure ex. Lorem reprehenderit do ullamco sint ea ad nisi ad ut.','registered':'Saturday, December 10, 2016 9:44 AM','latitude':'77.770464','longitude':'151.392903','tags':['incididunt','labore','aliquip','anim','minim'],'greeting':'Hello, Tracie! You have 6 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed68ab1a55d1c35e6c','index':75,'guid':'deedd26a-8928-4064-9666-5c59ea8144b4','isActive':true,'balance':'$2,848.08','picture':'http://placehold.it/32x32','age':32,'eyeColor':'brown','name':{'first':'Montgomery','last':'Bruce'},'company':'CYTREK','email':'montgomery.bruce@cytrek.org','phone':'+1 (824) 414-2731','address':'397 Beach Place, Ellerslie, South Carolina, 967','about':'Mollit minim excepteur magna velit cillum excepteur exercitation anim id labore deserunt do. Fugiat ex et id ad. Duis excepteur laboris est nulla do id irure quis eiusmod do esse ut culpa in.','registered':'Tuesday, August 25, 2015 6:42 AM','latitude':'79.722631','longitude':'-7.516885','tags':['Lorem','sint','voluptate','proident','incididunt'],'greeting':'Hello, Montgomery! You have 6 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edd90e0abb1cc2b0aa','index':76,'guid':'a072159d-12db-4747-9c2a-e2486a53d043','isActive':false,'balance':'$2,723.54','picture':'http://placehold.it/32x32','age':40,'eyeColor':'green','name':{'first':'Zelma','last':'Salinas'},'company':'IMAGEFLOW','email':'zelma.salinas@imageflow.net','phone':'+1 (964) 555-3856','address':'584 Reeve Place, Nord, Georgia, 7473','about':'Aliqua proident excepteur duis cupidatat cillum amet esse esse consectetur ea. Officia sunt consequat nostrud minim enim dolore dolor duis cillum. Esse labore veniam sint laborum excepteur sint tempor do ad cupidatat aliquip laboris elit id. Velit reprehenderit ullamco velit ullamco adipisicing velit esse irure velit et.','registered':'Thursday, February 25, 2016 8:18 PM','latitude':'-32.880524','longitude':'115.180489','tags':['id','nulla','reprehenderit','consequat','reprehenderit'],'greeting':'Hello, Zelma! You have 10 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed98d836c8da283bb2','index':77,'guid':'838bebad-cc20-44e9-9eb7-902a8ca25efb','isActive':false,'balance':'$3,488.91','picture':'http://placehold.it/32x32','age':20,'eyeColor':'green','name':{'first':'Shaw','last':'Parsons'},'company':'PEARLESEX','email':'shaw.parsons@pearlesex.name','phone':'+1 (912) 567-3580','address':'606 Ocean Avenue, Tyro, Northern Mariana Islands, 3367','about':'Laborum labore occaecat culpa pariatur nisi non adipisicing esse consectetur officia officia. Deserunt velit eu enim consectetur ut cillum aliqua occaecat dolor qui esse. Incididunt ad est ex eu culpa anim aliquip laborum. Aliqua consectetur velit exercitation magna minim nulla do ut excepteur enim aliquip et. Nostrud enim sunt amet amet proident aliqua velit dolore. Consectetur ipsum fugiat proident id est reprehenderit tempor irure commodo. Sit excepteur fugiat occaecat nulla Lorem et cillum.','registered':'Thursday, April 19, 2018 1:41 AM','latitude':'69.715573','longitude':'-118.481237','tags':['laboris','adipisicing','magna','voluptate','id'],'greeting':'Hello, Shaw! You have 9 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed1101734633c6ebba','index':78,'guid':'8fd0c52a-9d74-4984-a608-d612ecd8ddf0','isActive':true,'balance':'$3,820.02','picture':'http://placehold.it/32x32','age':39,'eyeColor':'brown','name':{'first':'Jaime','last':'Beard'},'company':'IZZBY','email':'jaime.beard@izzby.us','phone':'+1 (820) 412-3806','address':'362 Hudson Avenue, Delco, New Jersey, 5684','about':'Ut cupidatat veniam nulla magna commodo sit duis veniam consectetur cupidatat elit quis tempor. Duis officia ullamco proident sunt non mollit excepteur. Nisi ex amet laboris proident duis reprehenderit et est aliqua mollit amet ad. Enim eu elit excepteur eu exercitation duis consequat culpa. Adipisicing reprehenderit duis Lorem reprehenderit dolor aliqua incididunt eiusmod consequat ad occaecat fugiat do laborum. Qui ad aliquip ex do sunt. Fugiat non ut fugiat eu.','registered':'Sunday, March 9, 2014 3:41 PM','latitude':'17.926318','longitude':'108.985996','tags':['ut','voluptate','veniam','non','commodo'],'greeting':'Hello, Jaime! You have 7 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edcd125a89dcf18e0d','index':79,'guid':'eccaa4ca-0fa7-4b00-a1e3-fe7953403894','isActive':true,'balance':'$1,521.33','picture':'http://placehold.it/32x32','age':30,'eyeColor':'green','name':{'first':'Terra','last':'Sullivan'},'company':'ZANITY','email':'terra.sullivan@zanity.biz','phone':'+1 (995) 498-2714','address':'346 Congress Street, Tuttle, Maryland, 3152','about':'Incididunt enim veniam ut veniam quis dolore pariatur culpa ex. Cillum laboris dolor exercitation officia. Officia irure magna aliqua veniam officia ullamco culpa. Cillum enim velit ea sint sint officia labore ea adipisicing culpa laboris. Anim aute sint commodo culpa ex quis minim ut laborum.','registered':'Sunday, June 1, 2014 5:38 AM','latitude':'-4.655435','longitude':'5.851803','tags':['anim','non','anim','laborum','pariatur'],'greeting':'Hello, Terra! You have 5 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed9b9fc3041a674c87','index':80,'guid':'9f95fa36-4e45-4c3f-9362-3d4d809bf57f','isActive':true,'balance':'$3,403.16','picture':'http://placehold.it/32x32','age':39,'eyeColor':'brown','name':{'first':'Sharpe','last':'Berger'},'company':'ZILLAN','email':'sharpe.berger@zillan.ca','phone':'+1 (913) 498-3005','address':'277 Bragg Street, Faywood, Texas, 6487','about':'Dolor duis id aute ea veniam amet ullamco id. Culpa deserunt irure mollit tempor dolore veniam culpa officia culpa laborum eiusmod. Ullamco tempor qui aliqua cupidatat veniam cillum eu ut ex minim eu in. Quis exercitation anim eiusmod tempor esse mollit exercitation cillum ipsum reprehenderit. Sint voluptate ipsum officia sint magna nulla tempor eiusmod eiusmod veniam. Consectetur non ad veniam exercitation voluptate non nostrud.','registered':'Tuesday, June 27, 2017 12:58 AM','latitude':'-0.54085','longitude':'106.258693','tags':['proident','eiusmod','commodo','excepteur','pariatur'],'greeting':'Hello, Sharpe! You have 5 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed1a1866757bf675e0','index':81,'guid':'1b944a01-01d3-4846-94e3-630f4d0e51a3','isActive':true,'balance':'$2,038.61','picture':'http://placehold.it/32x32','age':28,'eyeColor':'brown','name':{'first':'Blanchard','last':'Ewing'},'company':'CONJURICA','email':'blanchard.ewing@conjurica.info','phone':'+1 (859) 593-3212','address':'252 Beaver Street, Kiskimere, Utah, 3255','about':'Labore magna aute adipisicing ut dolor sit ea. Officia culpa aute occaecat sit ex ullamco aliquip ad sit culpa. Ex in enim dolore ex est sit. Do irure nulla magna sint aliquip in duis aute. Magna ullamco sit labore ea tempor voluptate.','registered':'Monday, May 4, 2015 10:50 AM','latitude':'76.207595','longitude':'0.672563','tags':['proident','pariatur','officia','in','culpa'],'greeting':'Hello, Blanchard! You have 9 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed987d82f4e22d939c','index':82,'guid':'97a90aee-3cee-4678-819e-24fb94279dc1','isActive':false,'balance':'$1,201.55','picture':'http://placehold.it/32x32','age':28,'eyeColor':'blue','name':{'first':'Wells','last':'Solomon'},'company':'CORPULSE','email':'wells.solomon@corpulse.tv','phone':'+1 (840) 539-3349','address':'159 Radde Place, Linganore, Idaho, 230','about':'Consequat dolore mollit sit irure cupidatat commodo. Incididunt cillum reprehenderit ullamco sit proident cupidatat occaecat reprehenderit officia. Ad anim Lorem elit in officia minim proident nisi commodo eiusmod ea Lorem dolore voluptate. Dolor aliquip est commodo Lorem dolor ut aliquip ut. Sit anim officia dolore excepteur aute enim cillum.','registered':'Friday, January 6, 2017 1:59 PM','latitude':'70.020883','longitude':'14.503588','tags':['mollit','aute','officia','nostrud','laboris'],'greeting':'Hello, Wells! You have 7 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0eddf7a904ea0d0bc2a','index':83,'guid':'fe639a0c-7517-43e6-b0da-cd9ca5b9e267','isActive':false,'balance':'$3,664.47','picture':'http://placehold.it/32x32','age':33,'eyeColor':'blue','name':{'first':'Natalia','last':'Brown'},'company':'SYNTAC','email':'natalia.brown@syntac.co.uk','phone':'+1 (952) 595-3513','address':'332 Lenox Road, Springville, Alabama, 8406','about':'Nulla consequat officia commodo ea sunt irure anim velit aliquip aliquip. Labore ullamco occaecat proident voluptate cillum labore minim nostrud excepteur. Qui fugiat nostrud cillum fugiat ullamco id commodo aliqua voluptate mollit id id laboris. Cillum qui duis duis sit adipisicing elit ut aliqua eu. Anim nisi aliqua sit mollit.','registered':'Sunday, July 30, 2017 1:02 PM','latitude':'31.937613','longitude':'-9.957927','tags':['magna','adipisicing','exercitation','tempor','consectetur'],'greeting':'Hello, Natalia! You have 9 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed8823fa385cad4aa3','index':84,'guid':'5cf280da-f5f0-4cc6-9063-e9d5863c8c89','isActive':false,'balance':'$1,624.17','picture':'http://placehold.it/32x32','age':25,'eyeColor':'blue','name':{'first':'Greene','last':'Waller'},'company':'ISOTRACK','email':'greene.waller@isotrack.io','phone':'+1 (838) 406-3608','address':'362 Albemarle Road, Gardiner, Michigan, 2764','about':'Ut nisi sit sint nulla dolor magna. Culpa occaecat adipisicing veniam proident excepteur tempor quis ex. Fugiat tempor laborum dolor adipisicing irure anim cupidatat ut exercitation ex sit. Cupidatat exercitation commodo sunt ex irure fugiat eu esse do ullamco mollit dolore cupidatat. Cupidatat magna incididunt officia dolore esse voluptate deserunt in laborum dolor. Sit fugiat Lorem eu ullamco. Laboris veniam quis cillum tempor ex fugiat cillum cupidatat.','registered':'Sunday, June 10, 2018 10:32 PM','latitude':'0.256921','longitude':'-96.141941','tags':['magna','dolore','deserunt','aliquip','cillum'],'greeting':'Hello, Greene! You have 6 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0eda7c905c2d24c7d31','index':85,'guid':'aa30a9fb-8a16-48eb-8bb7-1307d1e1f191','isActive':false,'balance':'$1,974.04','picture':'http://placehold.it/32x32','age':36,'eyeColor':'green','name':{'first':'Carlene','last':'Hanson'},'company':'DIGIRANG','email':'carlene.hanson@digirang.me','phone':'+1 (981) 417-3209','address':'435 Clark Street, Choctaw, Oregon, 9888','about':'Amet labore esse cillum irure laborum consectetur occaecat non aliquip aliquip proident. Nisi magna nulla officia duis labore aute nulla laborum duis tempor minim. Velit elit reprehenderit nisi exercitation officia incididunt amet cupidatat excepteur proident consectetur.','registered':'Thursday, April 20, 2017 6:13 AM','latitude':'68.529086','longitude':'68.802409','tags':['pariatur','nulla','qui','amet','labore'],'greeting':'Hello, Carlene! You have 10 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed6fbee12ce9e55dbf','index':86,'guid':'0fce89aa-3310-48df-862a-68bd3d776644','isActive':false,'balance':'$3,909.64','picture':'http://placehold.it/32x32','age':40,'eyeColor':'brown','name':{'first':'Doris','last':'Collins'},'company':'ZIORE','email':'doris.collins@ziore.com','phone':'+1 (914) 405-2360','address':'301 Lorraine Street, Stouchsburg, Minnesota, 7476','about':'Nisi deserunt aliquip et deserunt ipsum ad consectetur est non ullamco. Dolore do ut voluptate do eiusmod. Culpa ad in eiusmod nisi cillum do. Officia magna cillum sint aliqua reprehenderit amet est ipsum. Eiusmod deserunt commodo proident consequat. Amet minim dolor consequat aliquip aliquip culpa non exercitation non.','registered':'Wednesday, February 25, 2015 9:15 PM','latitude':'-57.364906','longitude':'130.766587','tags':['nulla','deserunt','cillum','eiusmod','adipisicing'],'greeting':'Hello, Doris! You have 10 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edede9402476c398c0','index':87,'guid':'60cf0aa6-bc6d-4305-8842-d27e6af1306f','isActive':false,'balance':'$2,817.53','picture':'http://placehold.it/32x32','age':28,'eyeColor':'green','name':{'first':'Cline','last':'Hayden'},'company':'ECRAZE','email':'cline.hayden@ecraze.org','phone':'+1 (965) 507-2138','address':'352 Rutland Road, Ebro, Connecticut, 1196','about':'Dolor eiusmod enim anim sit enim ea tempor. Tempor amet consectetur aliquip culpa do ex excepteur deserunt. Dolor commodo veniam culpa sint. Commodo consectetur pariatur irure nisi deserunt cillum est dolor ipsum ea.','registered':'Thursday, September 29, 2016 5:58 AM','latitude':'62.50713','longitude':'86.247286','tags':['enim','tempor','anim','veniam','proident'],'greeting':'Hello, Cline! You have 9 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0edeb72f151994a551b','index':88,'guid':'dbb49c62-86b1-409f-b8b8-f609c709d2a8','isActive':false,'balance':'$3,122.56','picture':'http://placehold.it/32x32','age':39,'eyeColor':'green','name':{'first':'Janelle','last':'Rutledge'},'company':'TERRAGEN','email':'janelle.rutledge@terragen.net','phone':'+1 (914) 581-3749','address':'170 Falmouth Street, Alderpoint, West Virginia, 642','about':'Laboris proident cillum sunt qui ea sunt. Officia adipisicing exercitation dolore magna reprehenderit amet anim id. Laboris commodo sit irure irure. Excepteur est mollit fugiat incididunt consectetur veniam irure ea mollit. Cillum enim consequat sunt sunt nisi incididunt tempor enim.','registered':'Monday, February 16, 2015 5:46 AM','latitude':'-46.392023','longitude':'32.054562','tags':['eu','eu','nisi','labore','deserunt'],'greeting':'Hello, Janelle! You have 9 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edc9c2604846ff9a0d','index':89,'guid':'c4d7a365-f1d3-4584-b78e-008394c219f7','isActive':true,'balance':'$1,807.19','picture':'http://placehold.it/32x32','age':24,'eyeColor':'green','name':{'first':'Abby','last':'Lopez'},'company':'GRAINSPOT','email':'abby.lopez@grainspot.name','phone':'+1 (917) 442-3955','address':'488 Kensington Walk, Winston, Hawaii, 9109','about':'Incididunt deserunt Lorem proident magna tempor enim quis duis eu ut adipisicing in. Ex mollit non irure aliqua officia. Fugiat id ipsum consequat irure id ullamco culpa quis nulla enim aliquip consequat et. Dolor ut anim velit irure consequat cillum eu. Aute occaecat laborum est aliqua.','registered':'Sunday, April 1, 2018 11:28 PM','latitude':'-10.177041','longitude':'-165.756718','tags':['est','laborum','culpa','non','quis'],'greeting':'Hello, Abby! You have 9 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed03237438b158af9e','index':90,'guid':'36c4a19f-2d00-4e40-bd49-155fd2ce0a6c','isActive':false,'balance':'$2,757.86','picture':'http://placehold.it/32x32','age':31,'eyeColor':'blue','name':{'first':'Whitney','last':'Sheppard'},'company':'ANACHO','email':'whitney.sheppard@anacho.us','phone':'+1 (922) 437-2383','address':'951 Beekman Place, Homeworth, New York, 6088','about':'Sint minim nisi minim non minim aliqua pariatur ullamco do sint qui labore. Aute elit reprehenderit ad do fugiat est amet. In incididunt tempor commodo cillum tempor est labore anim.','registered':'Tuesday, September 13, 2016 6:43 PM','latitude':'-49.732527','longitude':'-171.846715','tags':['exercitation','veniam','sunt','est','proident'],'greeting':'Hello, Whitney! You have 6 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0edb99dd3aa53d2cb7f','index':91,'guid':'17afd430-f37f-4d55-958c-72f35cdb5997','isActive':false,'balance':'$3,683.86','picture':'http://placehold.it/32x32','age':38,'eyeColor':'blue','name':{'first':'Ilene','last':'Blackwell'},'company':'ENQUILITY','email':'ilene.blackwell@enquility.biz','phone':'+1 (817) 555-2616','address':'950 Varanda Place, Belgreen, Virgin Islands, 1765','about':'Id eiusmod deserunt eiusmod adipisicing adipisicing est enim pariatur esse duis. Qui velit duis irure magna consectetur dolore reprehenderit. Cillum dolore minim consectetur irure non qui velit cillum veniam adipisicing incididunt. Deserunt veniam excepteur veniam velit aliquip labore quis exercitation magna do non dolor. Aliquip occaecat minim adipisicing deserunt fugiat nulla occaecat proident irure consectetur eiusmod irure. Enim Lorem deserunt amet Lorem commodo eiusmod reprehenderit occaecat adipisicing dolor voluptate cillum.','registered':'Thursday, February 1, 2018 8:39 AM','latitude':'57.393644','longitude':'-3.704258','tags':['adipisicing','dolor','commodo','Lorem','Lorem'],'greeting':'Hello, Ilene! You have 6 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed353f4deb62c3342a','index':92,'guid':'9953e285-2095-4f1c-978b-9ece2a867e9d','isActive':false,'balance':'$1,202.44','picture':'http://placehold.it/32x32','age':38,'eyeColor':'blue','name':{'first':'Dawson','last':'Herman'},'company':'BITENDREX','email':'dawson.herman@bitendrex.ca','phone':'+1 (843) 522-2655','address':'471 Channel Avenue, Denio, Alaska, 5040','about':'Nisi occaecat mollit reprehenderit nisi minim Lorem mollit. Ea proident irure cillum quis. Deserunt consectetur consectetur consequat quis enim minim ea ipsum proident nisi ad non aliquip. Veniam aute minim consequat irure voluptate aute amet excepteur exercitation cillum duis quis adipisicing nostrud.','registered':'Tuesday, December 8, 2015 5:40 PM','latitude':'-55.602721','longitude':'-26.683234','tags':['qui','dolor','deserunt','eiusmod','labore'],'greeting':'Hello, Dawson! You have 7 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edd5464bc50a5310ad','index':93,'guid':'724b2434-4dbd-417d-aa07-6065715f434f','isActive':false,'balance':'$1,595.98','picture':'http://placehold.it/32x32','age':25,'eyeColor':'brown','name':{'first':'Alice','last':'Christian'},'company':'ZENOLUX','email':'alice.christian@zenolux.info','phone':'+1 (954) 466-2650','address':'875 Gerritsen Avenue, Townsend, Kentucky, 6568','about':'Nulla labore occaecat ex culpa magna. Commodo occaecat et in consequat cillum laborum magna adipisicing excepteur. Do ut Lorem esse voluptate officia ea aliquip proident amet veniam minim nulla adipisicing. Enim consectetur incididunt laborum voluptate tempor deserunt non laboris. Aliquip deserunt aute irure dolore magna anim aliquip sint magna Lorem. Officia laboris nulla officia sint labore nisi. Do Lorem id in est esse adipisicing id fugiat enim esse laborum.','registered':'Wednesday, October 3, 2018 9:26 PM','latitude':'-88.790637','longitude':'138.817328','tags':['duis','ea','magna','ea','incididunt'],'greeting':'Hello, Alice! You have 8 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0eda01886247b6a4f3d','index':94,'guid':'17c9f4d3-7d72-44e3-8f7c-08d7de920f46','isActive':false,'balance':'$3,173.29','picture':'http://placehold.it/32x32','age':31,'eyeColor':'blue','name':{'first':'Schwartz','last':'Mccormick'},'company':'EVIDENDS','email':'schwartz.mccormick@evidends.tv','phone':'+1 (924) 531-2802','address':'160 Midwood Street, Indio, Palau, 4241','about':'Anim reprehenderit et et adipisicing voluptate consequat elit. Sint Lorem laboris Lorem minim nostrud aute reprehenderit elit aute quis nulla. Officia aute eiusmod mollit cillum eu aliquip non enim ea occaecat quis fugiat occaecat officia. Eiusmod culpa exercitation dolor aliqua enim occaecat nisi cupidatat duis ex dolore id. Id consequat aliqua cupidatat ut. Sit nisi est sunt culpa ullamco excepteur sunt pariatur incididunt amet. Ut tempor duis velit eu ut id culpa aute anim occaecat labore.','registered':'Thursday, March 2, 2017 5:57 PM','latitude':'38.618587','longitude':'-165.142529','tags':['ad','reprehenderit','magna','elit','mollit'],'greeting':'Hello, Schwartz! You have 10 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed51be4df456ec2bc9','index':95,'guid':'44f68f65-959b-4ec2-bd2a-1f30035f76fc','isActive':false,'balance':'$3,242.24','picture':'http://placehold.it/32x32','age':39,'eyeColor':'blue','name':{'first':'Bonita','last':'Stevens'},'company':'SLOFAST','email':'bonita.stevens@slofast.co.uk','phone':'+1 (886) 473-2105','address':'459 Bushwick Court, Kilbourne, Rhode Island, 9450','about':'Consequat reprehenderit qui reprehenderit nisi sit est in qui aliquip amet. Ex deserunt cupidatat amet cillum eiusmod irure anim in amet proident voluptate. Ad officia culpa in non incididunt do.','registered':'Saturday, August 22, 2015 5:23 AM','latitude':'60.013542','longitude':'58.242132','tags':['aute','adipisicing','in','cillum','officia'],'greeting':'Hello, Bonita! You have 5 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed50a55e3587993f68','index':96,'guid':'652e434f-221e-4899-af12-38dca5c9621d','isActive':false,'balance':'$2,720.06','picture':'http://placehold.it/32x32','age':28,'eyeColor':'green','name':{'first':'Charmaine','last':'Jackson'},'company':'FLUM','email':'charmaine.jackson@flum.io','phone':'+1 (947) 573-2692','address':'788 Windsor Place, Highland, Arkansas, 8869','about':'Dolore reprehenderit irure excepteur eu reprehenderit sint Lorem ut amet in. Consequat anim elit sunt aliquip incididunt. Culpa consequat do exercitation dolor enim dolor sunt sit excepteur ad anim. Dolor aute elit velit mollit minim eu.','registered':'Wednesday, April 6, 2016 7:54 PM','latitude':'25.756553','longitude':'-5.482531','tags':['amet','sint','consequat','est','ex'],'greeting':'Hello, Charmaine! You have 10 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed213621949bbdd5d3','index':97,'guid':'7d7d93d8-3e37-4b4a-9fa2-591fb7d153ce','isActive':true,'balance':'$1,370.63','picture':'http://placehold.it/32x32','age':36,'eyeColor':'brown','name':{'first':'Petersen','last':'Cooley'},'company':'ROTODYNE','email':'petersen.cooley@rotodyne.me','phone':'+1 (929) 563-3339','address':'338 Pioneer Street, Carbonville, Missouri, 3030','about':'Cillum elit dolore labore aute. Cillum ea incididunt cupidatat consequat sint eu mollit. Excepteur commodo eiusmod ex Lorem enim velit minim.','registered':'Friday, December 8, 2017 5:53 AM','latitude':'-10.576254','longitude':'-111.176861','tags':['veniam','eu','eiusmod','dolore','voluptate'],'greeting':'Hello, Petersen! You have 9 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed3e938138d58ed453','index':98,'guid':'d6fea4a3-03f6-46ee-90b9-8ec51a585e29','isActive':true,'balance':'$1,216.54','picture':'http://placehold.it/32x32','age':39,'eyeColor':'blue','name':{'first':'Rosanne','last':'Terry'},'company':'EXTREMO','email':'rosanne.terry@extremo.com','phone':'+1 (812) 496-2691','address':'368 Rockaway Avenue, Gloucester, Illinois, 7913','about':'Duis et nostrud duis quis minim eiusmod culpa do ea ad pariatur tempor. Velit veniam aliqua aliquip est enim ex et culpa dolor ullamco culpa officia. Eu id occaecat aute cillum aute sit aute laboris ipsum voluptate ex. Amet tempor minim tempor Lorem quis dolore. Pariatur consequat dolore nulla veniam dolor exercitation consequat nulla laboris incididunt do. Dolore do tempor deserunt exercitation incididunt officia incididunt ut do reprehenderit do eiusmod nulla.','registered':'Sunday, August 6, 2017 12:46 PM','latitude':'-43.257964','longitude':'-45.147686','tags':['et','incididunt','esse','commodo','ipsum'],'greeting':'Hello, Rosanne! You have 6 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed632b1a1d65501d6b','index':99,'guid':'bf8c6ac1-ee18-48ee-ae94-ea515a53c951','isActive':true,'balance':'$2,905.58','picture':'http://placehold.it/32x32','age':21,'eyeColor':'blue','name':{'first':'Irene','last':'Castro'},'company':'POLARIA','email':'irene.castro@polaria.org','phone':'+1 (818) 417-3761','address':'901 Dupont Street, Sperryville, Oklahoma, 953','about':'Pariatur minim laboris aliqua dolor aliquip consequat ea do duis voluptate id Lorem. In reprehenderit et adipisicing anim elit incididunt velit in laborum laborum. Qui minim magna et amet sit do voluptate reprehenderit ea sit sint velit.','registered':'Tuesday, August 18, 2015 10:48 AM','latitude':'-7.004055','longitude':'116.052433','tags':['sit','proident','enim','ullamco','non'],'greeting':'Hello, Irene! You have 10 unread messages.','favoriteFruit':'apple'}]" + } +} diff --git a/pkg/cloud/testdata/plan-long-line/plan.log b/pkg/cloud/testdata/plan-long-line/plan.log new file mode 100644 index 00000000000..f34ed170c9a --- /dev/null +++ b/pkg/cloud/testdata/plan-long-line/plan.log @@ -0,0 +1,23 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + + null_resource.foo + id: + triggers.%: "1" + triggers.long_line: "[{'_id':'5c5ab0ed7de45e993ffb9eeb','index':0,'guid':'e734d772-6b5a-4cb0-805c-91cd5e560e20','isActive':false,'balance':'$1,472.03','picture':'http://placehold.it/32x32','age':30,'eyeColor':'blue','name':{'first':'Darlene','last':'Garza'},'company':'GEEKOSIS','email':'darlene.garza@geekosis.io','phone':'+1 (850) 506-3347','address':'165 Kiely Place, Como, New Mexico, 4335','about':'Officia ullamco et sunt magna voluptate culpa cupidatat ea tempor laboris cupidatat ea anim laboris. Minim enim quis enim esse laborum est veniam. Lorem excepteur elit Lorem cupidatat elit ea anim irure fugiat fugiat sunt mollit. Consectetur ad nulla dolor amet esse occaecat aliquip sit. Magna sit elit adipisicing ut reprehenderit anim exercitation sit quis ea pariatur Lorem magna dolore.','registered':'Wednesday, March 11, 2015 12:58 PM','latitude':'20.729127','longitude':'-127.343593','tags':['minim','in','deserunt','occaecat','fugiat'],'greeting':'Hello, Darlene! You have 8 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0eda9117d15f1c1f112','index':1,'guid':'f0d1eed2-c6a9-4535-8800-d4bd53fe7eee','isActive':true,'balance':'$2,901.90','picture':'http://placehold.it/32x32','age':28,'eyeColor':'brown','name':{'first':'Flora','last':'Short'},'company':'SIGNITY','email':'flora.short@signity.me','phone':'+1 (840) 520-2666','address':'636 Johnson Avenue, Gerber, Wisconsin, 9139','about':'Veniam dolore deserunt Lorem aliqua qui eiusmod. Amet tempor fugiat duis incididunt amet adipisicing. Id ea nisi veniam eiusmod.','registered':'Wednesday, May 2, 2018 5:59 AM','latitude':'-63.267612','longitude':'4.224102','tags':['veniam','incididunt','id','aliqua','reprehenderit'],'greeting':'Hello, Flora! You have 10 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed83fd574d8041fa16','index':2,'guid':'29499a07-414a-436f-ba62-6634ca16bdcc','isActive':true,'balance':'$2,781.28','picture':'http://placehold.it/32x32','age':22,'eyeColor':'green','name':{'first':'Trevino','last':'Marks'},'company':'KEGULAR','email':'trevino.marks@kegular.com','phone':'+1 (843) 571-2269','address':'200 Alabama Avenue, Grenelefe, Florida, 7963','about':'Occaecat nisi exercitation Lorem mollit laborum magna adipisicing culpa dolor proident dolore. Non consequat ea amet et id mollit incididunt minim anim amet nostrud labore tempor. Proident eu sint commodo nisi consequat voluptate do fugiat proident. Laboris eiusmod veniam non et elit nulla nisi labore incididunt Lorem consequat consectetur voluptate.','registered':'Saturday, January 25, 2014 5:56 AM','latitude':'65.044005','longitude':'-127.454864','tags':['anim','duis','velit','pariatur','enim'],'greeting':'Hello, Trevino! You have 10 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed784eb6e350ff0a07','index':3,'guid':'40ed47e2-1747-4665-ab59-cdb3630a7642','isActive':true,'balance':'$2,000.78','picture':'http://placehold.it/32x32','age':25,'eyeColor':'brown','name':{'first':'Solis','last':'Mckinney'},'company':'QABOOS','email':'solis.mckinney@qaboos.org','phone':'+1 (924) 405-2560','address':'712 Herkimer Court, Klondike, Ohio, 8133','about':'Minim ad anim minim tempor mollit magna tempor et non commodo amet. Nisi cupidatat labore culpa consectetur exercitation laborum adipisicing fugiat officia adipisicing consequat non. Qui voluptate tempor laboris exercitation qui non adipisicing occaecat voluptate sunt do nostrud velit. Consequat tempor officia laboris tempor irure cupidatat aliquip voluptate nostrud velit ex nulla tempor laboris. Qui pariatur pariatur enim aliquip velit. Officia mollit ullamco laboris velit velit eiusmod enim amet incididunt consectetur sunt.','registered':'Wednesday, April 12, 2017 6:59 AM','latitude':'-25.055596','longitude':'-140.126525','tags':['ipsum','adipisicing','amet','nulla','dolore'],'greeting':'Hello, Solis! You have 5 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed02ce1ea9a2155d51','index':4,'guid':'1b5fb7d3-3b9a-4382-81b5-9ab01a27e74b','isActive':true,'balance':'$1,373.67','picture':'http://placehold.it/32x32','age':28,'eyeColor':'green','name':{'first':'Janell','last':'Battle'},'company':'GEEKMOSIS','email':'janell.battle@geekmosis.net','phone':'+1 (810) 591-3014','address':'517 Onderdonk Avenue, Shrewsbury, District Of Columbia, 2335','about':'Reprehenderit ad proident do anim qui officia magna magna duis cillum esse minim est. Excepteur ipsum anim ad laboris. In occaecat dolore nulla ea Lorem tempor et culpa in sint. Officia eu eu incididunt sit amet. Culpa duis id reprehenderit ut anim sit sunt. Duis dolore proident velit incididunt adipisicing pariatur fugiat incididunt eiusmod eu veniam irure.','registered':'Thursday, February 8, 2018 1:44 AM','latitude':'-33.254864','longitude':'-154.145885','tags':['aute','deserunt','ipsum','eiusmod','laborum'],'greeting':'Hello, Janell! You have 5 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edab58604bd7d3dd1c','index':5,'guid':'6354c035-af22-44c9-8be9-b2ea9decc24d','isActive':true,'balance':'$3,535.68','picture':'http://placehold.it/32x32','age':30,'eyeColor':'green','name':{'first':'Combs','last':'Kirby'},'company':'LUXURIA','email':'combs.kirby@luxuria.name','phone':'+1 (900) 498-3266','address':'377 Kingsland Avenue, Ruckersville, Maine, 9916','about':'Lorem duis ipsum pariatur aliquip sunt. Commodo esse laborum incididunt mollit quis est laboris ea ea quis fugiat. Enim elit ullamco velit et fugiat veniam irure deserunt aliqua ad irure veniam.','registered':'Tuesday, February 21, 2017 4:04 PM','latitude':'-70.20591','longitude':'162.546871','tags':['reprehenderit','est','enim','aute','ad'],'greeting':'Hello, Combs! You have 10 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edf7fafeffc6357c51','index':6,'guid':'02523e0b-cc90-4309-b6b2-f493dc6076f6','isActive':false,'balance':'$3,754.30','picture':'http://placehold.it/32x32','age':29,'eyeColor':'green','name':{'first':'Macias','last':'Calderon'},'company':'AMTAP','email':'macias.calderon@amtap.us','phone':'+1 (996) 569-3667','address':'305 Royce Street, Glidden, Iowa, 9248','about':'Exercitation nulla deserunt pariatur adipisicing. In commodo deserunt incididunt ut velit minim qui ut quis. Labore elit ullamco eiusmod voluptate in eu do est fugiat aute mollit deserunt. Eu duis proident velit fugiat velit ut. Ut non esse amet laborum nisi tempor in nulla.','registered':'Thursday, October 23, 2014 10:28 PM','latitude':'32.371629','longitude':'60.155135','tags':['commodo','elit','velit','excepteur','aliqua'],'greeting':'Hello, Macias! You have 9 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed0e8a6109e7fabf17','index':7,'guid':'675ff6b6-197b-4154-9775-813d661df822','isActive':false,'balance':'$2,850.62','picture':'http://placehold.it/32x32','age':37,'eyeColor':'green','name':{'first':'Stefanie','last':'Rivers'},'company':'RECRITUBE','email':'stefanie.rivers@recritube.biz','phone':'+1 (994) 591-3551','address':'995 Campus Road, Abrams, Virginia, 3251','about':'Esse aute non laborum Lorem nulla irure. Veniam elit aute ut et dolor non deserunt laboris tempor. Ipsum quis cupidatat laborum laboris voluptate esse duis eiusmod excepteur consectetur commodo ullamco qui occaecat. Culpa velit cillum occaecat minim nisi.','registered':'Thursday, June 9, 2016 3:40 PM','latitude':'-18.526825','longitude':'149.670782','tags':['occaecat','sunt','reprehenderit','ipsum','magna'],'greeting':'Hello, Stefanie! You have 9 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edf7d9bc2db4e476e3','index':8,'guid':'adaefc55-f6ea-4bd1-a147-0e31c3ce7a21','isActive':true,'balance':'$2,555.13','picture':'http://placehold.it/32x32','age':20,'eyeColor':'blue','name':{'first':'Hillary','last':'Lancaster'},'company':'OLUCORE','email':'hillary.lancaster@olucore.ca','phone':'+1 (964) 474-3018','address':'232 Berriman Street, Kaka, Massachusetts, 6792','about':'Veniam ad laboris quis reprehenderit aliquip nisi sunt excepteur ea aute laborum excepteur incididunt. Nisi exercitation aliquip do culpa commodo ex officia ut enim mollit in deserunt in amet. Anim eu deserunt dolore non cupidatat ut enim incididunt aute dolore voluptate. Do cillum mollit laborum non incididunt occaecat aute voluptate nisi irure.','registered':'Thursday, June 4, 2015 9:45 PM','latitude':'88.075919','longitude':'-148.951368','tags':['reprehenderit','veniam','ad','aute','anim'],'greeting':'Hello, Hillary! You have 6 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed7b7192ad6a0f267c','index':9,'guid':'0ca9b8ea-f671-474e-be26-4a49cae4838a','isActive':true,'balance':'$3,684.51','picture':'http://placehold.it/32x32','age':40,'eyeColor':'brown','name':{'first':'Jill','last':'Conner'},'company':'EXOZENT','email':'jill.conner@exozent.info','phone':'+1 (887) 467-2168','address':'751 Thames Street, Juarez, American Samoa, 8386','about':'Enim voluptate et non est in magna laborum aliqua enim aliqua est non nostrud. Tempor est nulla ipsum consectetur esse nostrud est id. Consequat do voluptate cupidatat eu fugiat et fugiat velit id. Sint dolore ad qui tempor anim eu amet consectetur do elit aute adipisicing consequat ex.','registered':'Sunday, October 22, 2017 7:35 AM','latitude':'84.384911','longitude':'40.305648','tags':['tempor','sint','irure','et','ex'],'greeting':'Hello, Jill! You have 9 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed713fe676575aa72b','index':10,'guid':'c28023cf-cc57-4c2e-8d91-dfbe6bafadcd','isActive':false,'balance':'$2,792.45','picture':'http://placehold.it/32x32','age':25,'eyeColor':'brown','name':{'first':'Hurley','last':'George'},'company':'ZAJ','email':'hurley.george@zaj.tv','phone':'+1 (984) 547-3284','address':'727 Minna Street, Lacomb, Colorado, 2557','about':'Ex velit cupidatat veniam culpa. Eiusmod ut fugiat adipisicing incididunt consectetur exercitation Lorem exercitation ex. Incididunt anim aute incididunt fugiat cupidatat qui eu non reprehenderit. Eiusmod dolor nisi culpa excepteur ut velit minim dolor voluptate amet commodo culpa in.','registered':'Thursday, February 16, 2017 6:41 AM','latitude':'25.989949','longitude':'10.200053','tags':['minim','ut','sunt','consequat','ullamco'],'greeting':'Hello, Hurley! You have 8 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed1e56732746c70d8b','index':11,'guid':'e9766f13-766c-4450-b4d2-8b04580f60b7','isActive':true,'balance':'$3,874.26','picture':'http://placehold.it/32x32','age':35,'eyeColor':'green','name':{'first':'Leticia','last':'Pace'},'company':'HONOTRON','email':'leticia.pace@honotron.co.uk','phone':'+1 (974) 536-3322','address':'365 Goodwin Place, Savage, Nevada, 9191','about':'Nisi Lorem aliqua esse eiusmod magna. Ad minim incididunt proident ut Lorem cupidatat qui velit aliqua ullamco et ipsum in. Aliquip elit consectetur pariatur esse exercitation et officia quis. Occaecat tempor proident cillum anim ad commodo velit ut voluptate. Tempor et occaecat sit sint aliquip tempor nulla velit magna nisi proident exercitation Lorem id.','registered':'Saturday, August 4, 2018 5:05 AM','latitude':'70.620386','longitude':'-86.335813','tags':['occaecat','velit','labore','laboris','esse'],'greeting':'Hello, Leticia! You have 8 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed941337fe42f47426','index':12,'guid':'6d390762-17ea-4b58-9a36-b0c9a8748a42','isActive':true,'balance':'$1,049.61','picture':'http://placehold.it/32x32','age':38,'eyeColor':'green','name':{'first':'Rose','last':'Humphrey'},'company':'MYOPIUM','email':'rose.humphrey@myopium.io','phone':'+1 (828) 426-3086','address':'389 Sapphire Street, Saticoy, Marshall Islands, 1423','about':'Aliquip enim excepteur adipisicing ex. Consequat aliqua consequat nostrud do occaecat deserunt excepteur sit et ipsum sunt dolor eu. Dolore laborum commodo excepteur tempor ad adipisicing proident excepteur magna non Lorem proident consequat aute. Fugiat minim consequat occaecat voluptate esse velit officia laboris nostrud nisi ut voluptate.','registered':'Monday, April 16, 2018 12:38 PM','latitude':'-47.083742','longitude':'109.022423','tags':['aute','non','sit','adipisicing','mollit'],'greeting':'Hello, Rose! You have 9 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edd0c02fc3fdc01a40','index':13,'guid':'07755618-6fdf-4b33-af50-364c18909227','isActive':true,'balance':'$1,823.61','picture':'http://placehold.it/32x32','age':36,'eyeColor':'green','name':{'first':'Judith','last':'Hale'},'company':'COLLAIRE','email':'judith.hale@collaire.me','phone':'+1 (922) 508-2843','address':'193 Coffey Street, Castleton, North Dakota, 3638','about':'Minim non ullamco ad anim nostrud dolore nostrud veniam consequat id eiusmod veniam laboris. Lorem irure esse mollit non velit aute id cupidatat est mollit occaecat magna excepteur. Adipisicing tempor nisi sit aliquip tempor pariatur tempor eu consectetur nulla amet nulla. Quis nisi nisi ea incididunt culpa et do. Esse officia eu pariatur velit sunt quis proident amet consectetur consequat. Nisi excepteur culpa nulla sit dolor deserunt excepteur dolor consequat elit cillum tempor Lorem.','registered':'Wednesday, August 24, 2016 12:29 AM','latitude':'-80.15514','longitude':'39.91007','tags':['consectetur','incididunt','aliquip','dolor','consequat'],'greeting':'Hello, Judith! You have 8 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edb3e1e29caa4f728b','index':14,'guid':'2c6617a2-e7a9-4ff7-a8b9-e99554fe70fe','isActive':true,'balance':'$1,971.00','picture':'http://placehold.it/32x32','age':39,'eyeColor':'blue','name':{'first':'Estes','last':'Sweet'},'company':'GEEKKO','email':'estes.sweet@geekko.com','phone':'+1 (866) 448-3032','address':'847 Cove Lane, Kula, Mississippi, 9178','about':'Veniam consectetur occaecat est excepteur consequat ipsum cillum sit consectetur. Ut cupidatat et reprehenderit dolore enim do cillum qui pariatur ad laborum incididunt esse. Fugiat sunt dolor veniam laboris ipsum deserunt proident reprehenderit laboris non nostrud. Magna excepteur sint magna laborum tempor sit exercitation ipsum labore est ullamco ullamco. Cillum voluptate cillum ea laborum Lorem. Excepteur sint ut nisi est esse non. Minim excepteur ullamco velit nisi ut in elit exercitation ut dolore.','registered':'Sunday, August 12, 2018 5:06 PM','latitude':'-9.57771','longitude':'-159.94577','tags':['culpa','dolor','velit','anim','pariatur'],'greeting':'Hello, Estes! You have 7 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0edbcf088c6fd593091','index':15,'guid':'2cc79958-1b40-4e2c-907a-433903fd3da9','isActive':false,'balance':'$3,751.53','picture':'http://placehold.it/32x32','age':34,'eyeColor':'brown','name':{'first':'Kemp','last':'Spence'},'company':'EXOBLUE','email':'kemp.spence@exoblue.org','phone':'+1 (864) 487-2992','address':'217 Clay Street, Monument, North Carolina, 1460','about':'Nostrud duis cillum sint non commodo dolor aute aliqua adipisicing ad nulla non excepteur proident. Fugiat labore elit tempor cillum veniam reprehenderit laboris consectetur dolore amet qui cupidatat. Amet aliqua elit anim et consequat commodo excepteur officia anim aliqua ea eu labore cillum. Et ex dolor duis dolore commodo veniam et nisi.','registered':'Monday, October 29, 2018 5:23 AM','latitude':'-70.304222','longitude':'83.582371','tags':['velit','duis','consequat','incididunt','duis'],'greeting':'Hello, Kemp! You have 7 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed6400479feb3de505','index':16,'guid':'91ccae6d-a3ea-43cf-bb00-3f2729256cc9','isActive':false,'balance':'$2,477.79','picture':'http://placehold.it/32x32','age':40,'eyeColor':'blue','name':{'first':'Ronda','last':'Burris'},'company':'EQUITOX','email':'ronda.burris@equitox.net','phone':'+1 (817) 553-3228','address':'708 Lawton Street, Deputy, Wyoming, 8598','about':'Excepteur voluptate aliquip consequat cillum est duis sit cillum eu eiusmod et laborum ullamco. Et minim reprehenderit aute voluptate amet ullamco. Amet sit enim ad irure deserunt nostrud anim veniam consequat dolor commodo. Consequat do occaecat do exercitation ullamco dolor ut. Id laboris consequat est dolor dolore tempor ullamco anim do ut nulla deserunt labore. Mollit ex Lorem ullamco mollit.','registered':'Monday, April 23, 2018 5:27 PM','latitude':'-31.227208','longitude':'0.63785','tags':['ipsum','magna','consectetur','sit','irure'],'greeting':'Hello, Ronda! You have 5 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0eddbeab2e53e04d563','index':17,'guid':'a86d4eb6-6bd8-48c2-a8fc-1c933c835852','isActive':false,'balance':'$3,709.03','picture':'http://placehold.it/32x32','age':37,'eyeColor':'blue','name':{'first':'Rosario','last':'Dillard'},'company':'BARKARAMA','email':'rosario.dillard@barkarama.name','phone':'+1 (933) 525-3898','address':'730 Chauncey Street, Forbestown, South Carolina, 6894','about':'Est eu fugiat aliquip ea ad qui ad mollit ad tempor voluptate et incididunt reprehenderit. Incididunt fugiat commodo minim adipisicing culpa consectetur duis eu ut commodo consequat voluptate labore. Nostrud irure labore adipisicing irure quis magna consequat dolor Lorem sint enim. Sint excepteur eu dolore elit ut do mollit sunt enim est. Labore id nostrud sint Lorem esse nostrud.','registered':'Friday, December 25, 2015 8:59 PM','latitude':'37.440827','longitude':'44.580474','tags':['Lorem','sit','ipsum','ea','ut'],'greeting':'Hello, Rosario! You have 5 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0eddf8e9b9c031d04e8','index':18,'guid':'a96f997c-daf8-40d4-92e1-be07e2cf0f60','isActive':false,'balance':'$1,878.37','picture':'http://placehold.it/32x32','age':37,'eyeColor':'brown','name':{'first':'Sondra','last':'Gonzales'},'company':'XUMONK','email':'sondra.gonzales@xumonk.us','phone':'+1 (838) 560-2255','address':'230 Cox Place, Geyserville, Georgia, 6805','about':'Laborum sunt voluptate ea laboris nostrud. Amet deserunt aliqua Lorem voluptate velit deserunt occaecat minim ullamco. Lorem occaecat sit labore adipisicing ad magna mollit labore ullamco proident. Ea velit do proident fugiat esse commodo ex nostrud eu mollit pariatur. Labore laborum qui voluptate quis proident reprehenderit tempor dolore duis deserunt esse aliqua aliquip. Non veniam enim pariatur cupidatat ipsum dolore est reprehenderit. Non exercitation adipisicing proident magna elit occaecat non magna.','registered':'Sunday, June 26, 2016 4:02 AM','latitude':'62.247742','longitude':'-44.90666','tags':['ea','aute','in','voluptate','magna'],'greeting':'Hello, Sondra! You have 6 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed2c1bcd06781f677e','index':19,'guid':'6ac47a16-eed4-4460-92ee-e0dd33c1fbb5','isActive':false,'balance':'$3,730.64','picture':'http://placehold.it/32x32','age':20,'eyeColor':'brown','name':{'first':'Anastasia','last':'Vega'},'company':'FIREWAX','email':'anastasia.vega@firewax.biz','phone':'+1 (867) 493-3698','address':'803 Arlington Avenue, Rosburg, Northern Mariana Islands, 8769','about':'Sint ex nisi tempor sunt voluptate non et eiusmod irure. Aute reprehenderit dolor mollit aliqua Lorem voluptate occaecat. Sint laboris deserunt Lorem incididunt nulla cupidatat do.','registered':'Friday, March 18, 2016 12:02 PM','latitude':'-32.010216','longitude':'-87.874753','tags':['aliquip','mollit','mollit','ad','laborum'],'greeting':'Hello, Anastasia! You have 7 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed727fd645854bbf43','index':20,'guid':'67bd8cdb-ce6b-455c-944c-a80e17c6fa75','isActive':true,'balance':'$2,868.06','picture':'http://placehold.it/32x32','age':29,'eyeColor':'green','name':{'first':'Lucinda','last':'Cox'},'company':'ENDIPINE','email':'lucinda.cox@endipine.ca','phone':'+1 (990) 428-3002','address':'412 Thatford Avenue, Lafferty, New Jersey, 5271','about':'Esse nulla sunt ut consequat aute mollit. Est occaecat sunt nisi irure id anim est commodo. Elit mollit amet dolore sunt adipisicing ea laborum quis ea reprehenderit non consequat dolore. Minim sunt occaecat quis aute commodo dolore quis commodo proident. Sunt sint duis ullamco sit ea esse Lorem. Consequat pariatur eiusmod laboris adipisicing labore in laboris adipisicing adipisicing consequat aute ea et.','registered':'Friday, May 1, 2015 10:16 PM','latitude':'-14.200957','longitude':'-82.211386','tags':['do','sit','qui','officia','aliquip'],'greeting':'Hello, Lucinda! You have 9 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed5a97284eb2cbd3a8','index':21,'guid':'f9fc999d-515c-4fc4-b339-76300e1b4bf2','isActive':true,'balance':'$1,172.57','picture':'http://placehold.it/32x32','age':35,'eyeColor':'brown','name':{'first':'Conrad','last':'Bradley'},'company':'FUELWORKS','email':'conrad.bradley@fuelworks.info','phone':'+1 (956) 561-3226','address':'685 Fenimore Street, Esmont, Maryland, 7523','about':'Labore reprehenderit anim nisi sunt do nisi in. Est anim cillum id minim exercitation ullamco voluptate ipsum eu. Elit culpa consequat reprehenderit laborum in eu. Laboris amet voluptate laboris qui voluptate duis minim reprehenderit. Commodo sunt irure dolore sunt occaecat velit nisi eu minim minim.','registered':'Wednesday, January 18, 2017 11:13 PM','latitude':'31.665993','longitude':'38.868968','tags':['excepteur','exercitation','est','nisi','mollit'],'greeting':'Hello, Conrad! You have 10 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edc4eaf6f760c38218','index':22,'guid':'8794ef5f-da2f-46f0-a755-c18a16409fd5','isActive':false,'balance':'$3,594.73','picture':'http://placehold.it/32x32','age':27,'eyeColor':'blue','name':{'first':'Marquez','last':'Vargas'},'company':'MALATHION','email':'marquez.vargas@malathion.tv','phone':'+1 (976) 438-3126','address':'296 Hall Street, National, Texas, 2067','about':'Proident cillum aute minim fugiat sunt aliqua non occaecat est duis id id tempor. Qui deserunt nisi amet pariatur proident eu laboris esse adipisicing magna. Anim anim mollit aute non magna nisi aute magna labore ullamco reprehenderit voluptate et ad. Proident adipisicing aute eiusmod nostrud nostrud deserunt culpa. Elit eu ullamco nisi aliqua dolor sint pariatur excepteur sit consectetur tempor. Consequat Lorem ullamco commodo veniam qui sint magna. Sit mollit ad aliquip est id eu officia id adipisicing duis ad.','registered':'Tuesday, November 17, 2015 6:16 PM','latitude':'-36.443667','longitude':'22.336776','tags':['aliquip','veniam','ipsum','Lorem','ex'],'greeting':'Hello, Marquez! You have 9 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0edd7c718518ee0466a','index':23,'guid':'ad8781a2-059e-4288-9879-309d53a99bf5','isActive':true,'balance':'$3,570.68','picture':'http://placehold.it/32x32','age':21,'eyeColor':'brown','name':{'first':'Snider','last':'Frost'},'company':'ZILODYNE','email':'snider.frost@zilodyne.co.uk','phone':'+1 (913) 485-3275','address':'721 Lincoln Road, Richmond, Utah, 672','about':'Minim enim Lorem esse incididunt do reprehenderit velit laborum ullamco. In aute eiusmod esse aliqua et labore tempor sunt ex mollit veniam tempor. Nulla elit cillum qui ullamco dolore amet deserunt magna amet laborum.','registered':'Saturday, August 23, 2014 12:58 AM','latitude':'-88.682554','longitude':'74.063179','tags':['nulla','ea','sint','aliquip','duis'],'greeting':'Hello, Snider! You have 6 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edf026fece8e2c0970','index':24,'guid':'1b7d81e1-1dba-4322-bb1a-eaa6a24cccea','isActive':false,'balance':'$2,037.91','picture':'http://placehold.it/32x32','age':28,'eyeColor':'green','name':{'first':'Snyder','last':'Fletcher'},'company':'COMTEST','email':'snyder.fletcher@comtest.io','phone':'+1 (830) 538-3860','address':'221 Lewis Place, Zortman, Idaho, 572','about':'Elit anim enim esse dolore exercitation. Laboris esse sint adipisicing fugiat sint do occaecat ut voluptate sint nulla. Ad sint ut reprehenderit nostrud irure id consectetur officia velit consequat.','registered':'Sunday, January 1, 2017 1:13 AM','latitude':'-54.742604','longitude':'69.534932','tags':['exercitation','commodo','in','id','aliqua'],'greeting':'Hello, Snyder! You have 10 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed4b9a7f83da6d2dfd','index':25,'guid':'0b2cc6b6-0044-4b1c-aa31-bd72963457a0','isActive':false,'balance':'$1,152.76','picture':'http://placehold.it/32x32','age':27,'eyeColor':'blue','name':{'first':'Regina','last':'James'},'company':'TELPOD','email':'regina.james@telpod.me','phone':'+1 (989) 455-3228','address':'688 Essex Street, Clayville, Alabama, 2772','about':'Eiusmod elit culpa reprehenderit ea veniam. Officia irure culpa duis aute ut. Irure duis cillum officia ea pariatur velit ut dolor incididunt reprehenderit ex elit laborum. Est pariatur veniam ad irure. Labore velit sunt esse laboris aliqua velit deserunt deserunt sit. Elit eiusmod ad laboris aliquip minim irure excepteur enim quis. Quis incididunt adipisicing ut magna cupidatat sit amet culpa.','registered':'Tuesday, April 25, 2017 10:16 PM','latitude':'-75.088027','longitude':'47.209828','tags':['elit','nisi','est','voluptate','proident'],'greeting':'Hello, Regina! You have 6 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed10884f32f779f2bf','index':26,'guid':'1f6fb522-0002-46ff-8dac-451247f28168','isActive':true,'balance':'$1,948.79','picture':'http://placehold.it/32x32','age':25,'eyeColor':'brown','name':{'first':'Collins','last':'Mcpherson'},'company':'DIGIGEN','email':'collins.mcpherson@digigen.com','phone':'+1 (991) 519-2334','address':'317 Merit Court, Sanford, Michigan, 6468','about':'Magna qui culpa dolor officia labore mollit ex excepteur duis eiusmod. Ea cupidatat ex ipsum mollit do minim duis. Nisi eiusmod minim tempor id esse commodo sunt sunt ullamco ut do laborum ullamco magna. Aliquip laborum dolor officia officia eu nostrud velit minim est anim. Ex elit laborum sunt magna exercitation nisi cillum sunt aute qui ea ullamco. Cupidatat ea sunt aute dolor duis nisi Lorem ullamco eiusmod. Sit ea velit ad veniam aliqua ad elit cupidatat ut magna in.','registered':'Friday, June 10, 2016 4:38 PM','latitude':'25.513996','longitude':'14.911124','tags':['exercitation','non','sit','velit','officia'],'greeting':'Hello, Collins! You have 5 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed8a575110efb15c6c','index':27,'guid':'2a904c82-068b-4ded-9ae6-cfeb6d7e62c9','isActive':true,'balance':'$3,427.91','picture':'http://placehold.it/32x32','age':24,'eyeColor':'green','name':{'first':'Mckay','last':'Barrera'},'company':'COMVEYER','email':'mckay.barrera@comveyer.org','phone':'+1 (853) 470-2560','address':'907 Glenwood Road, Churchill, Oregon, 8583','about':'In voluptate esse dolore enim sint quis dolor do exercitation sint et labore nisi. Eiusmod tempor exercitation dolore elit sit velit sint et. Sit magna adipisicing eiusmod do anim velit deserunt laboris ad ea pariatur. Irure nisi anim mollit elit commodo nulla. Aute eiusmod sit nulla eiusmod. Eiusmod est officia commodo mollit laboris do deserunt eu do nisi amet. Proident ad duis eiusmod laboris Lorem ut culpa pariatur Lorem reprehenderit minim aliquip irure sunt.','registered':'Saturday, December 19, 2015 2:49 PM','latitude':'-55.243287','longitude':'138.035406','tags':['non','quis','laboris','enim','nisi'],'greeting':'Hello, Mckay! You have 7 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edcd49ab6a73ff7f32','index':28,'guid':'5d3e0dae-3f58-437f-b12d-de24667a904d','isActive':true,'balance':'$3,270.52','picture':'http://placehold.it/32x32','age':35,'eyeColor':'blue','name':{'first':'Mabel','last':'Leonard'},'company':'QUADEEBO','email':'mabel.leonard@quadeebo.net','phone':'+1 (805) 432-2356','address':'965 Underhill Avenue, Falconaire, Minnesota, 4450','about':'Cupidatat amet sunt est ipsum occaecat sit fugiat excepteur Lorem Lorem ex ea ipsum. Ad incididunt est irure magna excepteur occaecat nostrud. Minim dolor id anim ipsum qui nostrud ullamco aute ex Lorem magna deserunt excepteur Lorem.','registered':'Saturday, March 28, 2015 5:55 AM','latitude':'27.388359','longitude':'156.408728','tags':['quis','velit','deserunt','dolore','sit'],'greeting':'Hello, Mabel! You have 7 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edde16ac2dc2fbb6c1','index':29,'guid':'d50c2233-70fc-4748-8ebf-02d45ac2a446','isActive':false,'balance':'$3,100.70','picture':'http://placehold.it/32x32','age':30,'eyeColor':'green','name':{'first':'Pace','last':'Duke'},'company':'SEQUITUR','email':'pace.duke@sequitur.name','phone':'+1 (983) 568-3119','address':'895 Melrose Street, Reno, Connecticut, 6259','about':'Ex veniam aliquip exercitation mollit elit est minim veniam aliqua labore deserunt. Dolor sunt sint cillum Lorem nisi ea irure cupidatat. Velit ut culpa cupidatat consequat cillum. Sint voluptate quis laboris qui incididunt do elit Lorem qui ullamco ut eu pariatur occaecat.','registered':'Saturday, August 18, 2018 2:18 PM','latitude':'31.930443','longitude':'-129.494784','tags':['culpa','est','nostrud','quis','aliquip'],'greeting':'Hello, Pace! You have 8 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edb908d85642ba77e8','index':30,'guid':'3edb6e42-367a-403d-a511-eb78bcc11f60','isActive':true,'balance':'$1,912.07','picture':'http://placehold.it/32x32','age':24,'eyeColor':'green','name':{'first':'Cohen','last':'Morrison'},'company':'POWERNET','email':'cohen.morrison@powernet.us','phone':'+1 (888) 597-2141','address':'565 Troutman Street, Idledale, West Virginia, 3196','about':'Ullamco voluptate duis commodo amet occaecat consequat et occaecat dolore nulla eu. Do aliqua sunt deserunt occaecat laboris labore voluptate cupidatat ullamco exercitation aliquip elit voluptate anim. Occaecat deserunt in labore cillum aute deserunt ea excepteur laboris sunt. Officia irure sint incididunt labore sint ipsum ullamco ea elit. Fugiat nostrud sunt ut officia mollit proident sunt dolor fugiat esse tempor do.','registered':'Friday, January 1, 2016 5:42 AM','latitude':'-20.01215','longitude':'26.361552','tags':['consectetur','sunt','nulla','reprehenderit','dolore'],'greeting':'Hello, Cohen! You have 10 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed91c77aa25a64a757','index':31,'guid':'8999a97b-0035-4f19-b555-91dd69aaa9b8','isActive':false,'balance':'$3,097.67','picture':'http://placehold.it/32x32','age':25,'eyeColor':'brown','name':{'first':'Stout','last':'Valdez'},'company':'UPLINX','email':'stout.valdez@uplinx.biz','phone':'+1 (854) 480-3633','address':'880 Chestnut Avenue, Lowgap, Hawaii, 1537','about':'Cupidatat enim dolore non voluptate. Aliqua ut non Lorem in exercitation reprehenderit voluptate. Excepteur deserunt tempor laboris quis.','registered':'Wednesday, March 16, 2016 6:53 AM','latitude':'50.328393','longitude':'-25.990308','tags':['ea','fugiat','duis','consectetur','enim'],'greeting':'Hello, Stout! You have 5 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed0f52176c8c3e1bed','index':32,'guid':'743abcbd-1fab-4aed-8cb7-3c935eb64c74','isActive':false,'balance':'$1,118.54','picture':'http://placehold.it/32x32','age':30,'eyeColor':'blue','name':{'first':'Ortega','last':'Joseph'},'company':'APEXIA','email':'ortega.joseph@apexia.ca','phone':'+1 (872) 596-3024','address':'304 Canda Avenue, Mulino, New York, 8721','about':'Ipsum elit id cupidatat minim nisi minim. Ea ex amet ea ipsum Lorem deserunt. Occaecat cupidatat magna cillum aliquip sint id quis amet nostrud officia enim laborum. Aliqua deserunt amet commodo laboris labore mollit est. Officia voluptate Lorem esse mollit aliquip laboris cupidatat minim et. Labore esse incididunt officia nostrud pariatur reprehenderit.','registered':'Tuesday, January 31, 2017 6:06 AM','latitude':'43.861714','longitude':'33.771783','tags':['ut','Lorem','esse','quis','fugiat'],'greeting':'Hello, Ortega! You have 6 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed2c00cdd101b6cd52','index':33,'guid':'4f6f99cf-f692-4d03-b23a-26f2b27273bd','isActive':true,'balance':'$1,682.91','picture':'http://placehold.it/32x32','age':20,'eyeColor':'blue','name':{'first':'Sampson','last':'Taylor'},'company':'GEOFORMA','email':'sampson.taylor@geoforma.info','phone':'+1 (911) 482-2993','address':'582 Kent Street, Umapine, Virgin Islands, 5300','about':'Voluptate laboris occaecat laboris tempor cillum quis cupidatat qui pariatur. Lorem minim commodo mollit adipisicing Lorem ut dolor consectetur ipsum. Sint sit voluptate labore aliqua ex labore velit. Ullamco tempor consectetur voluptate deserunt voluptate minim enim. Cillum commodo duis reprehenderit eu duis.','registered':'Thursday, November 9, 2017 11:24 PM','latitude':'24.949379','longitude':'155.034468','tags':['Lorem','cupidatat','elit','reprehenderit','commodo'],'greeting':'Hello, Sampson! You have 8 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed4b7210ba0bc0d508','index':34,'guid':'73fd415f-f8cf-43e0-a86c-e725d000abd4','isActive':false,'balance':'$1,289.37','picture':'http://placehold.it/32x32','age':30,'eyeColor':'green','name':{'first':'Shari','last':'Melendez'},'company':'DIGIPRINT','email':'shari.melendez@digiprint.tv','phone':'+1 (914) 475-3995','address':'950 Wolf Place, Enetai, Alaska, 693','about':'Dolor incididunt et est commodo aliquip labore ad ullamco. Velit ex cillum nulla elit ex esse. Consectetur mollit fugiat cillum proident elit sunt non officia cillum ex laboris sint eu. Esse nulla eu officia in Lorem sint minim esse velit. Est Lorem ipsum enim aute. Elit minim eiusmod officia reprehenderit officia ut irure Lorem.','registered':'Wednesday, August 23, 2017 11:12 PM','latitude':'-70.347863','longitude':'94.812072','tags':['ea','ex','fugiat','duis','eu'],'greeting':'Hello, Shari! You have 7 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed85ac364619d892ef','index':35,'guid':'c1905f34-14ff-4bd8-b683-02cac4d52623','isActive':false,'balance':'$2,538.50','picture':'http://placehold.it/32x32','age':30,'eyeColor':'green','name':{'first':'Santiago','last':'Joyner'},'company':'BRAINCLIP','email':'santiago.joyner@brainclip.co.uk','phone':'+1 (835) 405-2676','address':'554 Rose Street, Muir, Kentucky, 7752','about':'Quis culpa dolore fugiat magna culpa non deserunt consectetur elit. Id cupidatat occaecat duis irure ullamco elit in labore magna pariatur cillum est. Mollit dolore velit ipsum anim aliqua culpa sint. Occaecat aute anim ut sunt eu.','registered':'Thursday, January 18, 2018 4:49 PM','latitude':'57.057918','longitude':'-50.472596','tags':['ullamco','ullamco','sunt','voluptate','irure'],'greeting':'Hello, Santiago! You have 7 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed1763f56b1121fa88','index':36,'guid':'a7f50659-4ae3-4f3e-a9d8-087e05334b51','isActive':false,'balance':'$1,435.16','picture':'http://placehold.it/32x32','age':37,'eyeColor':'blue','name':{'first':'Adeline','last':'Hoffman'},'company':'BITREX','email':'adeline.hoffman@bitrex.io','phone':'+1 (823) 488-3201','address':'221 Corbin Place, Edmund, Palau, 193','about':'Magna ullamco consectetur velit adipisicing cillum ea. Est qui incididunt est ullamco ex aute exercitation irure. Cupidatat consectetur proident qui fugiat do. Labore magna aliqua consectetur fugiat. Excepteur deserunt sit qui dolor fugiat aute sunt anim ipsum magna ea commodo qui. Minim eu adipisicing ut irure excepteur eiusmod aliqua. Voluptate nisi ad consequat qui.','registered':'Tuesday, June 14, 2016 9:26 AM','latitude':'-53.123355','longitude':'88.180776','tags':['non','est','commodo','ut','aliquip'],'greeting':'Hello, Adeline! You have 9 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed945d079f63e3185e','index':37,'guid':'1f4619e0-9289-4bea-a9db-a75f4cba1138','isActive':true,'balance':'$2,019.54','picture':'http://placehold.it/32x32','age':36,'eyeColor':'blue','name':{'first':'Porter','last':'Morse'},'company':'COMVOY','email':'porter.morse@comvoy.me','phone':'+1 (933) 562-3220','address':'416 India Street, Bourg, Rhode Island, 2266','about':'Et sint anim et sunt. Non mollit sunt cillum veniam sunt sint amet non mollit. Fugiat ea ullamco pariatur deserunt ex do minim irure irure.','registered':'Saturday, July 16, 2016 10:03 PM','latitude':'-81.782545','longitude':'69.783509','tags':['irure','consequat','veniam','nulla','velit'],'greeting':'Hello, Porter! You have 10 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed411dd0f06c66bba6','index':38,'guid':'93c900f0-54c0-4c4c-b21d-d59d8d7c6177','isActive':true,'balance':'$3,764.84','picture':'http://placehold.it/32x32','age':26,'eyeColor':'green','name':{'first':'Fitzgerald','last':'Logan'},'company':'UTARIAN','email':'fitzgerald.logan@utarian.com','phone':'+1 (815) 461-2709','address':'498 Logan Street, Tonopah, Arkansas, 6652','about':'Quis Lorem sit est et dolor est esse in veniam. Mollit anim nostrud laboris consequat voluptate qui ad ipsum sint laborum exercitation quis ipsum. Incididunt cupidatat esse ea amet deserunt consequat eu proident duis adipisicing pariatur. Amet deserunt mollit aliquip mollit consequat sunt quis labore laboris quis. Magna cillum fugiat anim velit Lorem duis. Lorem duis amet veniam occaecat est excepteur ut ea velit esse non pariatur. Do veniam quis eu consequat ad duis incididunt minim dolore sit non minim adipisicing et.','registered':'Wednesday, August 9, 2017 9:20 PM','latitude':'24.480657','longitude':'-108.693421','tags':['dolore','ad','occaecat','quis','labore'],'greeting':'Hello, Fitzgerald! You have 5 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edbb6f14559d8a7b28','index':39,'guid':'9434f48b-70a0-4161-8d06-c53bf8b9df94','isActive':true,'balance':'$3,713.47','picture':'http://placehold.it/32x32','age':25,'eyeColor':'blue','name':{'first':'Mcconnell','last':'Nash'},'company':'TETAK','email':'mcconnell.nash@tetak.org','phone':'+1 (956) 477-3586','address':'853 Turnbull Avenue, Clarence, Missouri, 1599','about':'Culpa excepteur minim anim magna dolor dolore ad ex eu. In cupidatat cillum elit dolore in est minim dolore consectetur reprehenderit voluptate laborum. Deserunt id velit ad dolor mollit.','registered':'Saturday, November 10, 2018 9:27 AM','latitude':'1.691589','longitude':'143.704377','tags':['ut','deserunt','sit','cupidatat','ea'],'greeting':'Hello, Mcconnell! You have 10 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed1a87ea0390733ffa','index':40,'guid':'ec8a55f7-7114-4787-b1ff-4e631731bc2c','isActive':true,'balance':'$2,200.71','picture':'http://placehold.it/32x32','age':25,'eyeColor':'brown','name':{'first':'Kitty','last':'Meyers'},'company':'FIBEROX','email':'kitty.meyers@fiberox.net','phone':'+1 (864) 458-3826','address':'537 Georgia Avenue, Thermal, Illinois, 7930','about':'Non excepteur laboris Lorem magna adipisicing exercitation. Anim esse in pariatur minim ipsum qui voluptate irure. Pariatur Lorem pariatur esse commodo aute adipisicing anim commodo. Exercitation nostrud aliqua duis et amet amet tempor.','registered':'Tuesday, September 13, 2016 8:16 PM','latitude':'19.59506','longitude':'-57.814297','tags':['duis','ullamco','velit','sint','consequat'],'greeting':'Hello, Kitty! You have 9 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed4dc76717bf1217b3','index':41,'guid':'40521cde-f835-4620-902b-af7abf185d8d','isActive':false,'balance':'$2,907.02','picture':'http://placehold.it/32x32','age':26,'eyeColor':'green','name':{'first':'Klein','last':'Goodwin'},'company':'PLASTO','email':'klein.goodwin@plasto.name','phone':'+1 (950) 563-3104','address':'764 Devoe Street, Lindcove, Oklahoma, 458','about':'Amet aliqua magna ea veniam non aliquip irure esse id ipsum cillum sint tempor dolor. Ullamco deserunt fugiat amet pariatur culpa nostrud commodo commodo. Ad occaecat magna adipisicing voluptate. Minim ad adipisicing cupidatat elit nostrud eu irure. Cupidatat occaecat aute magna consectetur dolore anim et. Ex voluptate velit exercitation laborum ad ullamco ad. Aliquip nulla ipsum dolore cillum qui nostrud eu adipisicing amet tempor do.','registered':'Tuesday, February 13, 2018 3:56 PM','latitude':'-27.168725','longitude':'-29.499285','tags':['minim','labore','do','deserunt','dolor'],'greeting':'Hello, Klein! You have 6 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed1ac77396b29aee9e','index':42,'guid':'7cfc03e3-30e9-4ae1-a1f5-f6c3223ca770','isActive':true,'balance':'$2,986.47','picture':'http://placehold.it/32x32','age':22,'eyeColor':'brown','name':{'first':'Isabelle','last':'Bishop'},'company':'GEEKNET','email':'isabelle.bishop@geeknet.us','phone':'+1 (908) 418-2642','address':'729 Willmohr Street, Aguila, Montana, 7510','about':'In nulla commodo nostrud sint. Elit et occaecat et aliqua aliquip magna esse commodo duis Lorem dolor magna enim deserunt. Ipsum pariatur reprehenderit ipsum adipisicing mollit incididunt ut. Sunt in consequat ex ut minim non qui anim labore. Deserunt minim voluptate in nulla occaecat.','registered':'Monday, September 15, 2014 6:22 AM','latitude':'-81.686947','longitude':'38.409291','tags':['proident','est','aliqua','veniam','anim'],'greeting':'Hello, Isabelle! You have 7 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edb3a070c9469a4893','index':43,'guid':'3dec76b4-0b55-4765-a2fd-b8dbd9c82f8f','isActive':true,'balance':'$2,501.24','picture':'http://placehold.it/32x32','age':31,'eyeColor':'blue','name':{'first':'Josefina','last':'Turner'},'company':'COMSTAR','email':'josefina.turner@comstar.biz','phone':'+1 (908) 566-3029','address':'606 Schenck Place, Brutus, Vermont, 8681','about':'Enim consectetur pariatur sint dolor nostrud est deserunt nulla quis pariatur sit. Ad aute incididunt nisi excepteur duis est velit voluptate ullamco occaecat magna reprehenderit aliquip. Proident deserunt consectetur non et exercitation elit dolore enim aliqua incididunt anim amet. Ex esse sint commodo minim aliqua ut irure. Proident ex culpa voluptate fugiat nisi. Sint commodo laboris excepteur minim ipsum labore tempor quis magna.','registered':'Saturday, December 31, 2016 6:38 AM','latitude':'35.275088','longitude':'24.30485','tags':['minim','ut','irure','Lorem','veniam'],'greeting':'Hello, Josefina! You have 6 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed1aa7d74128ee3d0f','index':44,'guid':'10599279-c367-46c4-9f7a-744c2e4bf6c9','isActive':true,'balance':'$1,753.06','picture':'http://placehold.it/32x32','age':27,'eyeColor':'blue','name':{'first':'Lily','last':'Haynes'},'company':'KIOSK','email':'lily.haynes@kiosk.ca','phone':'+1 (872) 451-2301','address':'509 Balfour Place, Grazierville, New Hampshire, 2750','about':'Nisi aliquip occaecat nostrud do sint qui nisi officia Lorem. Ad et et laboris nisi dolore aliqua eu. Aliqua veniam quis eu pariatur incididunt mollit id deserunt officia eiusmod. Consequat adipisicing do nisi voluptate eiusmod minim pariatur minim nisi nostrud culpa cupidatat. Irure consectetur id consequat adipisicing ullamco occaecat do. Ex proident ea quis nulla incididunt sunt excepteur incididunt. Aliquip minim nostrud non anim Lorem.','registered':'Tuesday, November 20, 2018 9:28 AM','latitude':'-12.677798','longitude':'114.506787','tags':['culpa','amet','elit','officia','irure'],'greeting':'Hello, Lily! You have 8 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed74c76f2e84e201ce','index':45,'guid':'ec0a68d4-629e-46c9-9af7-f6ea867f02ba','isActive':true,'balance':'$1,477.93','picture':'http://placehold.it/32x32','age':23,'eyeColor':'green','name':{'first':'Shauna','last':'Pitts'},'company':'SPACEWAX','email':'shauna.pitts@spacewax.info','phone':'+1 (841) 406-2360','address':'348 Tabor Court, Westwood, Puerto Rico, 8297','about':'Aliquip irure officia magna ea magna mollit ea non amet deserunt. Veniam mollit labore culpa magna aliqua quis consequat est consectetur ea reprehenderit nostrud consequat aliqua. Mollit do ipsum mollit eiusmod.','registered':'Thursday, October 2, 2014 2:48 AM','latitude':'-55.17388','longitude':'-13.370494','tags':['anim','consectetur','cillum','veniam','duis'],'greeting':'Hello, Shauna! You have 7 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed419e718484b16722','index':46,'guid':'b2d6101d-5646-43f4-8207-284494e5a990','isActive':false,'balance':'$2,006.96','picture':'http://placehold.it/32x32','age':27,'eyeColor':'brown','name':{'first':'Lawrence','last':'Boyer'},'company':'SKYPLEX','email':'lawrence.boyer@skyplex.tv','phone':'+1 (953) 548-2618','address':'464 Pilling Street, Blandburg, Arizona, 5531','about':'Culpa sit minim pariatur mollit cupidatat sunt duis. Nisi ea proident veniam exercitation adipisicing Lorem aliquip amet dolor voluptate in nisi. Non commodo anim sunt est fugiat laborum nisi aliqua non Lorem exercitation dolor. Laboris dolore do minim ut eiusmod enim magna cillum laborum consectetur aliquip minim enim Lorem. Veniam ex veniam occaecat aliquip elit aliquip est eiusmod minim minim adipisicing.','registered':'Wednesday, July 30, 2014 2:17 AM','latitude':'-78.681255','longitude':'139.960626','tags':['consequat','Lorem','incididunt','dolor','esse'],'greeting':'Hello, Lawrence! You have 6 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed08a9024998292c70','index':47,'guid':'277de142-ebeb-4828-906a-7fd8bc0a738a','isActive':true,'balance':'$1,273.19','picture':'http://placehold.it/32x32','age':27,'eyeColor':'brown','name':{'first':'Sonya','last':'Stafford'},'company':'AQUACINE','email':'sonya.stafford@aquacine.co.uk','phone':'+1 (824) 581-3927','address':'641 Bowery Street, Hillsboro, Delaware, 7893','about':'Culpa labore ex reprehenderit mollit cupidatat dolore et ut quis in. Sint esse culpa enim culpa tempor exercitation veniam minim consectetur. Sunt est laboris minim quis incididunt exercitation laboris cupidatat fugiat ad. Deserunt ipsum do dolor cillum excepteur incididunt.','registered':'Thursday, March 26, 2015 1:10 PM','latitude':'-84.750592','longitude':'165.493533','tags':['minim','officia','dolore','ipsum','est'],'greeting':'Hello, Sonya! You have 8 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edd5037f2c79ecde68','index':48,'guid':'2dc6532f-9a26-49aa-b444-8923896db89c','isActive':false,'balance':'$3,168.93','picture':'http://placehold.it/32x32','age':36,'eyeColor':'brown','name':{'first':'Marguerite','last':'Stuart'},'company':'ACCUFARM','email':'marguerite.stuart@accufarm.io','phone':'+1 (848) 535-2253','address':'301 Menahan Street, Sunnyside, Nebraska, 4809','about':'Deserunt sint labore voluptate amet anim culpa nostrud adipisicing enim cupidatat ullamco exercitation fugiat est. Magna dolor aute incididunt ea ad adipisicing. Do cupidatat ut officia officia culpa sit do.','registered':'Thursday, May 8, 2014 1:25 PM','latitude':'21.82277','longitude':'-7.368347','tags':['labore','nulla','ullamco','irure','adipisicing'],'greeting':'Hello, Marguerite! You have 6 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edb26d315635818dae','index':49,'guid':'083a5eda-0a70-4f89-87f7-2cd386c0f22a','isActive':false,'balance':'$2,576.25','picture':'http://placehold.it/32x32','age':38,'eyeColor':'blue','name':{'first':'Louella','last':'Holloway'},'company':'BEDDER','email':'louella.holloway@bedder.me','phone':'+1 (801) 425-3761','address':'545 Lafayette Avenue, Caledonia, Louisiana, 2816','about':'Qui exercitation occaecat dolore mollit. Fugiat cupidatat proident culpa fugiat quis. In cupidatat commodo elit ea enim occaecat esse exercitation nostrud occaecat veniam laboris fugiat. Nisi sunt reprehenderit aliqua reprehenderit tempor id dolore ullamco pariatur reprehenderit et eu ex pariatur.','registered':'Wednesday, November 5, 2014 1:10 AM','latitude':'36.385637','longitude':'77.949423','tags':['eu','irure','velit','non','aliquip'],'greeting':'Hello, Louella! You have 7 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed77cd60a1abc1ecce','index':50,'guid':'2887c3c1-3eba-4237-a0db-1977eed94554','isActive':true,'balance':'$1,633.51','picture':'http://placehold.it/32x32','age':22,'eyeColor':'green','name':{'first':'Bates','last':'Carrillo'},'company':'ZOMBOID','email':'bates.carrillo@zomboid.com','phone':'+1 (934) 405-2006','address':'330 Howard Alley, Troy, Kansas, 4881','about':'Voluptate esse est ullamco anim tempor ea reprehenderit. Occaecat pariatur deserunt cillum laboris labore id exercitation esse ipsum ipsum ex aliquip. Sunt non elit est ea occaecat. Magna deserunt commodo aliqua ipsum est cillum dolor nisi. Ex duis est tempor tempor laboris do do quis id magna. Dolor do est elit eu laborum ullamco culpa consequat velit eiusmod tempor.','registered':'Saturday, May 28, 2016 3:56 AM','latitude':'83.310134','longitude':'-105.862836','tags':['est','commodo','ea','commodo','sunt'],'greeting':'Hello, Bates! You have 9 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed5ec0ec299b471fb5','index':51,'guid':'512b5e67-f785-492e-9d94-e43ef8b399b8','isActive':false,'balance':'$3,032.22','picture':'http://placehold.it/32x32','age':30,'eyeColor':'blue','name':{'first':'Floyd','last':'Yang'},'company':'FRENEX','email':'floyd.yang@frenex.org','phone':'+1 (924) 566-3304','address':'418 Quay Street, Chumuckla, Guam, 7743','about':'Irure sit velit exercitation dolore est nisi incididunt ut quis consectetur incididunt est dolor. Aute nisi enim esse aliquip enim culpa commodo consectetur. Duis laborum magna ad duis ipsum aliqua eiusmod cillum. Consectetur et duis eiusmod irure ad est nisi incididunt eiusmod labore. Pariatur proident in Lorem adipisicing mollit proident excepteur nulla do nostrud mollit eiusmod. Duis ad dolore irure fugiat anim laboris ipsum et sit duis ipsum voluptate. Lorem non aute exercitation qui ullamco officia minim sint pariatur ut dolor.','registered':'Wednesday, January 18, 2017 2:01 AM','latitude':'45.888721','longitude':'-41.232793','tags':['elit','in','esse','ea','officia'],'greeting':'Hello, Floyd! You have 5 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed51e26ca89e5caf49','index':52,'guid':'4e0907f6-facc-46df-8952-73561a53fe33','isActive':true,'balance':'$3,767.41','picture':'http://placehold.it/32x32','age':25,'eyeColor':'blue','name':{'first':'Gardner','last':'Carey'},'company':'KLUGGER','email':'gardner.carey@klugger.net','phone':'+1 (876) 481-3502','address':'131 Utica Avenue, Cannondale, Federated States Of Micronesia, 610','about':'Amet ad pariatur excepteur anim ex officia commodo proident aliqua occaecat consequat Lorem officia sit. Id minim velit nisi laboris nisi nulla incididunt eiusmod velit. Deserunt labore quis et tempor. Et labore exercitation laborum officia ullamco nostrud adipisicing laboris esse laborum aute anim elit. Sunt ad officia tempor esse et quis aliquip irure pariatur laborum id quis ex. Eu consequat nisi deserunt id eu proident ex minim aute nulla tempor ex.','registered':'Friday, February 21, 2014 6:42 AM','latitude':'-54.740231','longitude':'15.01484','tags':['commodo','laboris','occaecat','aliquip','adipisicing'],'greeting':'Hello, Gardner! You have 10 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed52e3c9407105093a','index':53,'guid':'1d3b9e7a-1bc3-40ea-b808-1c33f0d48c70','isActive':true,'balance':'$1,113.30','picture':'http://placehold.it/32x32','age':26,'eyeColor':'blue','name':{'first':'Herman','last':'Rogers'},'company':'TALENDULA','email':'herman.rogers@talendula.name','phone':'+1 (818) 521-2005','address':'541 Norman Avenue, Winfred, Tennessee, 447','about':'Culpa ex laborum non ad ullamco officia. Nisi mollit mollit voluptate sit sint ullamco. Lorem exercitation nulla anim eiusmod deserunt magna sint. Officia sunt eiusmod aliqua reprehenderit sunt mollit sit cupidatat sint.','registered':'Wednesday, July 11, 2018 1:05 AM','latitude':'-20.708105','longitude':'-151.294563','tags':['exercitation','minim','officia','qui','enim'],'greeting':'Hello, Herman! You have 10 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edfcb123d545b6edb4','index':54,'guid':'c0e0c669-4eed-43ee-bdd0-78fe6e9ca4d5','isActive':true,'balance':'$3,309.64','picture':'http://placehold.it/32x32','age':22,'eyeColor':'green','name':{'first':'Whitley','last':'Stark'},'company':'MUSAPHICS','email':'whitley.stark@musaphics.us','phone':'+1 (803) 476-2151','address':'548 Cobek Court, Chamizal, Indiana, 204','about':'Adipisicing veniam dolor ex sint sit id eu voluptate. Excepteur veniam proident exercitation id eu et sunt pariatur. Qui occaecat culpa aliqua nisi excepteur minim veniam. Est duis nulla laborum excepteur cillum pariatur sint incididunt. Velit commodo eu incididunt voluptate. Amet laboris laboris id adipisicing labore eiusmod consequat minim cillum et.','registered':'Thursday, March 27, 2014 9:10 AM','latitude':'71.219596','longitude':'51.012855','tags':['reprehenderit','mollit','laborum','voluptate','aliquip'],'greeting':'Hello, Whitley! You have 7 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed81510dfc61602fcf','index':55,'guid':'7ec5c24d-f169-4399-a2a3-300c0f45e52e','isActive':false,'balance':'$3,721.04','picture':'http://placehold.it/32x32','age':23,'eyeColor':'green','name':{'first':'Gretchen','last':'Wade'},'company':'EWEVILLE','email':'gretchen.wade@eweville.biz','phone':'+1 (977) 598-3700','address':'721 Colonial Road, Brookfield, South Dakota, 3888','about':'Fugiat consequat sint ut ut et ullamco eiusmod deserunt pariatur. Veniam eiusmod esse fugiat mollit. Proident laboris minim qui do ipsum excepteur exercitation irure anim. Aliqua labore quis eu fugiat dolore ullamco velit Lorem voluptate ipsum nostrud eiusmod laborum proident.','registered':'Friday, October 12, 2018 10:59 AM','latitude':'41.937653','longitude':'63.378531','tags':['aute','cillum','ea','ex','aute'],'greeting':'Hello, Gretchen! You have 9 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edf78f77d4a7d557bb','index':56,'guid':'8718ada7-6fd0-49ef-a405-29850503948b','isActive':false,'balance':'$3,341.33','picture':'http://placehold.it/32x32','age':32,'eyeColor':'blue','name':{'first':'Naomi','last':'Frye'},'company':'MAZUDA','email':'naomi.frye@mazuda.ca','phone':'+1 (825) 427-2255','address':'741 Coyle Street, Comptche, Pennsylvania, 8441','about':'Aliqua fugiat laborum quis ullamco cupidatat sit dolor nulla dolore. Do Lorem et ipsum culpa irure sit do dolor qui sit laboris aliqua. Ex consectetur irure in veniam reprehenderit amet do elit eiusmod est magna.','registered':'Thursday, January 9, 2014 7:18 AM','latitude':'41.078645','longitude':'-50.241966','tags':['do','aliquip','eiusmod','velit','id'],'greeting':'Hello, Naomi! You have 7 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edbf45db2e072a48b4','index':57,'guid':'c158ebf7-fb8b-4ea8-adbf-8c51c6486715','isActive':true,'balance':'$2,811.55','picture':'http://placehold.it/32x32','age':25,'eyeColor':'blue','name':{'first':'Lamb','last':'Johns'},'company':'DOGTOWN','email':'lamb.johns@dogtown.info','phone':'+1 (946) 530-3057','address':'559 Malbone Street, Kennedyville, California, 2052','about':'Eiusmod dolor labore cillum ad veniam elit voluptate voluptate pariatur est cupidatat. Laboris ut qui in cillum sunt dolore ut enim. Minim nostrud ex qui quis reprehenderit magna ipsum cupidatat irure minim laboris veniam irure. Fugiat velit deserunt aliquip in esse proident excepteur labore reprehenderit excepteur sunt in cupidatat exercitation. Ex pariatur irure mollit tempor non magna ex.','registered':'Friday, April 21, 2017 1:51 AM','latitude':'-61.403599','longitude':'-93.447102','tags':['aliquip','tempor','sint','enim','ipsum'],'greeting':'Hello, Lamb! You have 6 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edbb9c88190cb59cf2','index':58,'guid':'f0de5ac5-eb28-491b-81c5-76d447c9055e','isActive':true,'balance':'$1,611.99','picture':'http://placehold.it/32x32','age':37,'eyeColor':'brown','name':{'first':'Lynette','last':'Cleveland'},'company':'ARTWORLDS','email':'lynette.cleveland@artworlds.tv','phone':'+1 (889) 596-3723','address':'439 Montauk Avenue, Felt, New Mexico, 9681','about':'Incididunt aliquip est aliquip est ullamco do consectetur dolor. Lorem mollit mollit dolor et ipsum ut qui veniam aute ea. Adipisicing reprehenderit culpa velit laborum adipisicing amet consectetur velit nisi. Ut qui proident ad cillum excepteur adipisicing quis labore. Duis velit culpa et excepteur eiusmod ex labore in nisi nostrud. Et ullamco minim excepteur ut enim reprehenderit consequat eiusmod laboris Lorem commodo exercitation qui laborum.','registered':'Wednesday, August 26, 2015 12:53 PM','latitude':'49.861336','longitude':'86.865926','tags':['reprehenderit','minim','in','minim','nostrud'],'greeting':'Hello, Lynette! You have 6 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed5b760ddde7295fa8','index':59,'guid':'f8180d3f-c5c0-48b2-966e-a0b2a80f8e84','isActive':true,'balance':'$3,376.75','picture':'http://placehold.it/32x32','age':32,'eyeColor':'green','name':{'first':'Obrien','last':'Page'},'company':'GLASSTEP','email':'obrien.page@glasstep.co.uk','phone':'+1 (902) 583-3086','address':'183 Ridgewood Avenue, Vicksburg, Wisconsin, 7430','about':'Aute excepteur cillum exercitation duis Lorem irure labore elit. Labore magna cupidatat velit consectetur minim do Lorem in excepteur commodo ea consequat ullamco laborum. Ut in id occaecat eu quis duis id ea deserunt veniam.','registered':'Wednesday, March 29, 2017 12:13 AM','latitude':'-40.156154','longitude':'72.76301','tags':['excepteur','non','anim','nulla','anim'],'greeting':'Hello, Obrien! You have 6 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed52985d3d8901d653','index':60,'guid':'d2e14fa1-8c54-4bcb-8a58-eb2e6f8d0e45','isActive':true,'balance':'$1,659.47','picture':'http://placehold.it/32x32','age':33,'eyeColor':'brown','name':{'first':'Knowles','last':'Goodman'},'company':'CENTREE','email':'knowles.goodman@centree.io','phone':'+1 (862) 563-3692','address':'504 Lott Street, Allensworth, Florida, 7148','about':'Do aliquip voluptate aliqua nostrud. Eu dolore ex occaecat pariatur aute laborum aute nulla aute amet. Excepteur sit laboris ad non anim ut officia ut ad exercitation officia dolore laboris. Esse voluptate minim deserunt nostrud exercitation laborum voluptate exercitation id laborum fugiat proident cupidatat proident. Nulla nostrud est sint adipisicing incididunt exercitation dolor sit et elit tempor occaecat sint culpa. Pariatur occaecat laboris pariatur laboris ad pariatur in cillum fugiat est fugiat. Proident eu id irure excepteur esse aute cillum adipisicing.','registered':'Wednesday, October 15, 2014 6:17 PM','latitude':'-15.73863','longitude':'87.422009','tags':['consequat','sint','tempor','veniam','culpa'],'greeting':'Hello, Knowles! You have 6 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0eda00b73bdb7ea54e9','index':61,'guid':'c8a064db-0ec6-4832-9820-7280a0333709','isActive':true,'balance':'$3,701.14','picture':'http://placehold.it/32x32','age':35,'eyeColor':'brown','name':{'first':'Shepherd','last':'Todd'},'company':'ECRATIC','email':'shepherd.todd@ecratic.me','phone':'+1 (881) 444-3389','address':'450 Frank Court, Temperanceville, Ohio, 7006','about':'Voluptate cillum ad fugiat velit adipisicing sint consequat veniam Lorem reprehenderit. Cillum sit non deserunt consequat. Amet sunt pariatur non mollit ullamco proident sint dolore anim elit cupidatat anim do ullamco. Lorem Lorem incididunt ea elit consequat laboris enim duis quis Lorem id aute veniam consequat. Cillum veniam cillum sint qui Lorem fugiat culpa consequat. Est sint duis ut qui fugiat. Laborum pariatur velit et sunt mollit eiusmod excepteur culpa ex et officia.','registered':'Tuesday, October 10, 2017 2:01 AM','latitude':'82.951563','longitude':'-4.866954','tags':['eu','qui','proident','esse','ex'],'greeting':'Hello, Shepherd! You have 5 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed0e51d1a7e2d9e559','index':62,'guid':'739c3d38-200d-4531-84d8-4e7c39ae5b8c','isActive':true,'balance':'$3,679.01','picture':'http://placehold.it/32x32','age':31,'eyeColor':'brown','name':{'first':'Rosalyn','last':'Heath'},'company':'ZAYA','email':'rosalyn.heath@zaya.com','phone':'+1 (865) 403-3520','address':'303 Henderson Walk, Hoehne, District Of Columbia, 4306','about':'Sint occaecat nulla mollit sint fugiat eu proident dolor labore consequat. Occaecat tempor excepteur do fugiat incididunt Lorem in ullamco dolore laborum. Cillum mollit aliquip excepteur aliquip sint sunt minim non irure irure. Cillum fugiat aliqua enim dolore. Nulla culpa culpa nostrud ad. Eiusmod culpa proident proident non est cupidatat eu sunt sit incididunt id nisi.','registered':'Wednesday, April 22, 2015 12:35 PM','latitude':'33.628504','longitude':'110.772802','tags':['consequat','ut','ex','labore','consectetur'],'greeting':'Hello, Rosalyn! You have 6 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edd5274c01d353d0c5','index':63,'guid':'8815fe55-8af1-4708-a62a-d554dbd74a4a','isActive':true,'balance':'$2,126.01','picture':'http://placehold.it/32x32','age':30,'eyeColor':'blue','name':{'first':'Queen','last':'Harper'},'company':'TRI@TRIBALOG','email':'queen.harper@tri@tribalog.org','phone':'+1 (903) 592-3145','address':'926 Heath Place, Wawona, Maine, 7340','about':'Laborum cupidatat commodo aliquip reprehenderit. Excepteur eu labore duis minim minim voluptate aute nostrud deserunt ut velit ullamco. Adipisicing nisi occaecat laborum proident. Id reprehenderit eiusmod cupidatat qui aute consequat amet enim commodo duis non ipsum. Amet ut aliqua magna qui proident mollit aute.','registered':'Saturday, April 9, 2016 5:12 AM','latitude':'51.814216','longitude':'177.348115','tags':['cillum','ut','dolor','do','nisi'],'greeting':'Hello, Queen! You have 10 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed126298b6ce62ed56','index':64,'guid':'001c87fe-182f-450f-903b-2e29a9bb0322','isActive':true,'balance':'$3,578.29','picture':'http://placehold.it/32x32','age':20,'eyeColor':'green','name':{'first':'Pauline','last':'Mills'},'company':'CRUSTATIA','email':'pauline.mills@crustatia.net','phone':'+1 (984) 582-3899','address':'899 Revere Place, Welch, Iowa, 216','about':'Tempor eu exercitation ut id. Deserunt ex reprehenderit veniam nisi. Aute laborum veniam velit dolore ut deserunt Lorem sit esse quis dolor ex do nisi. In dolor tempor officia id. Velit nisi culpa nostrud laborum officia incididunt laborum velit non quis id exercitation exercitation. Anim elit ullamco in enim Lorem culpa aliqua Lorem.','registered':'Monday, June 2, 2014 2:03 PM','latitude':'56.427576','longitude':'172.183669','tags':['pariatur','pariatur','pariatur','fugiat','Lorem'],'greeting':'Hello, Pauline! You have 8 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed3e332ad9e8a178d8','index':65,'guid':'5ad7292b-feef-4a7e-b485-142cadfbe8ea','isActive':false,'balance':'$3,916.54','picture':'http://placehold.it/32x32','age':22,'eyeColor':'brown','name':{'first':'Garrett','last':'Richmond'},'company':'XYQAG','email':'garrett.richmond@xyqag.name','phone':'+1 (952) 584-3794','address':'233 Grove Street, Summerfield, Virginia, 4735','about':'Nostrud quis pariatur occaecat laborum laboris aliqua ut fugiat dolor. Commodo tempor excepteur enim nostrud Lorem. Aute elit nulla labore ad pariatur cupidatat Lorem qui cupidatat velit deserunt excepteur esse. Excepteur nulla et nostrud quis labore est veniam enim nisi laboris ut enim. Ea esse nulla anim excepteur reprehenderit deserunt voluptate minim qui labore adipisicing amet eu enim.','registered':'Wednesday, March 5, 2014 4:35 PM','latitude':'68.665041','longitude':'148.799524','tags':['irure','reprehenderit','minim','ea','do'],'greeting':'Hello, Garrett! You have 7 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed541aa2ec47466ace','index':66,'guid':'9cda6f3c-c9ab-451c-bb19-2e4c8463d011','isActive':true,'balance':'$3,352.52','picture':'http://placehold.it/32x32','age':30,'eyeColor':'brown','name':{'first':'Cobb','last':'Whitley'},'company':'UNIA','email':'cobb.whitley@unia.us','phone':'+1 (888) 490-3342','address':'864 Belmont Avenue, Needmore, Massachusetts, 8286','about':'Nisi aliquip fugiat ipsum nisi ullamco minim pariatur labore. Sint labore anim do ad ad esse eu nostrud nulla commodo anim. Cillum anim enim duis cillum non do nisi aliquip veniam voluptate commodo aliqua laborum. Exercitation in do eu qui sint aliquip. Esse adipisicing deserunt deserunt qui anim aliqua occaecat et nostrud elit ea in anim cillum. Tempor mollit proident tempor sunt est sint laborum ullamco incididunt non. Velit aliqua sunt excepteur nisi qui eiusmod ipsum dolore aliquip velit ullamco ullamco.','registered':'Friday, May 23, 2014 7:11 PM','latitude':'-32.950581','longitude':'147.772494','tags':['mollit','adipisicing','irure','ad','minim'],'greeting':'Hello, Cobb! You have 6 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed8186c3d6f34c2be3','index':67,'guid':'fee98f6d-d68a-4189-8180-b6cb337e537e','isActive':false,'balance':'$1,698.42','picture':'http://placehold.it/32x32','age':20,'eyeColor':'blue','name':{'first':'Brennan','last':'Tyler'},'company':'PODUNK','email':'brennan.tyler@podunk.biz','phone':'+1 (867) 498-2727','address':'599 Harkness Avenue, Gorst, American Samoa, 322','about':'Reprehenderit id sit qui id qui aute ea sit magna in qui proident. Excepteur ad nostrud do nostrud in incididunt voluptate adipisicing sint anim. Ullamco consequat minim nulla irure ex est irure reprehenderit deserunt voluptate dolore anim sunt. Occaecat dolore voluptate voluptate elit commodo nulla laborum ad do irure.','registered':'Friday, February 9, 2018 5:40 PM','latitude':'11.150893','longitude':'-85.298004','tags':['quis','minim','deserunt','cillum','laboris'],'greeting':'Hello, Brennan! You have 10 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed075c9c4f7439818d','index':68,'guid':'1ef76b18-6b8d-4c3c-aca3-9fa2b43f0242','isActive':false,'balance':'$2,091.17','picture':'http://placehold.it/32x32','age':26,'eyeColor':'brown','name':{'first':'Neal','last':'Stephenson'},'company':'OTHERSIDE','email':'neal.stephenson@otherside.ca','phone':'+1 (820) 496-3344','address':'867 Wilson Street, Kidder, Colorado, 4599','about':'Do laboris enim proident in qui velit adipisicing magna anim. Amet proident non exercitation ipsum aliqua excepteur nostrud. Enim esse non sit in nostrud deserunt id laborum cillum deserunt consequat. Anim velit exercitation qui sit voluptate. Irure duis non veniam velit mollit exercitation id exercitation.','registered':'Thursday, November 13, 2014 11:00 PM','latitude':'54.809693','longitude':'1.877241','tags':['anim','duis','in','officia','sint'],'greeting':'Hello, Neal! You have 7 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0eda0a2dc24db64b638','index':69,'guid':'194744fd-089b-40b6-a290-98a6ec30a415','isActive':false,'balance':'$3,191.67','picture':'http://placehold.it/32x32','age':24,'eyeColor':'brown','name':{'first':'Shields','last':'Hubbard'},'company':'MIRACULA','email':'shields.hubbard@miracula.info','phone':'+1 (885) 582-2001','address':'529 Eagle Street, Guilford, Nevada, 1460','about':'Eiusmod exercitation ut incididunt veniam commodo culpa ullamco mollit id adipisicing exercitation ad sint. Nostrud excepteur amet aliqua mollit incididunt laborum voluptate id anim. Nulla sint laboris dolor esse cupidatat laborum ex sint. Ex non sunt sit nulla.','registered':'Monday, February 13, 2017 6:22 AM','latitude':'-69.145209','longitude':'-40.69755','tags':['tempor','enim','qui','velit','elit'],'greeting':'Hello, Shields! You have 10 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edf939c130177e074d','index':70,'guid':'303b176c-7803-4ed2-a35f-3e3c831793ef','isActive':false,'balance':'$2,359.09','picture':'http://placehold.it/32x32','age':31,'eyeColor':'blue','name':{'first':'Coleen','last':'Knight'},'company':'BLEEKO','email':'coleen.knight@bleeko.tv','phone':'+1 (867) 423-3146','address':'527 Broadway , Bonanza, Marshall Islands, 4988','about':'Laboris nulla pariatur laborum ad aute excepteur sunt pariatur exercitation. Do nostrud qui ipsum ullamco et sint do Lorem cillum ullamco do. Exercitation labore excepteur commodo incididunt eiusmod proident consectetur adipisicing nostrud aute voluptate laboris. Commodo anim proident eiusmod pariatur est ea laborum incididunt qui tempor reprehenderit ullamco id. Eiusmod commodo nisi consectetur ut qui quis aliqua sit minim nostrud sunt laborum eiusmod adipisicing.','registered':'Sunday, May 6, 2018 8:03 AM','latitude':'70.729041','longitude':'113.052761','tags':['Lorem','ullamco','nulla','ullamco','commodo'],'greeting':'Hello, Coleen! You have 7 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edae8b1ce688b61223','index':71,'guid':'7d6f3b1a-c367-4068-9e8e-1717d513ece3','isActive':false,'balance':'$2,911.07','picture':'http://placehold.it/32x32','age':21,'eyeColor':'brown','name':{'first':'Clark','last':'Ryan'},'company':'ECLIPSENT','email':'clark.ryan@eclipsent.co.uk','phone':'+1 (938) 562-2740','address':'500 Lewis Avenue, Rockbridge, North Dakota, 5133','about':'Adipisicing exercitation officia sit excepteur excepteur sunt sint amet. Aliqua ipsum sint laboris eiusmod esse culpa elit sunt. Dolore est consectetur est quis quis magna. Aliquip nostrud dolore ex pariatur. Anim nostrud duis exercitation ut magna magna culpa. Nisi irure id mollit labore non sit mollit occaecat Lorem est ipsum. Nulla est fugiat cillum nisi aliqua consectetur amet nulla nostrud esse.','registered':'Friday, July 24, 2015 9:28 AM','latitude':'-68.055815','longitude':'-50.926966','tags':['deserunt','ad','ad','ut','id'],'greeting':'Hello, Clark! You have 7 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed5d1e8df45d8ab4db','index':72,'guid':'ce85db37-7d04-4f4c-a4b0-78003533e5c6','isActive':false,'balance':'$1,127.43','picture':'http://placehold.it/32x32','age':21,'eyeColor':'green','name':{'first':'Dillon','last':'Hooper'},'company':'MEDESIGN','email':'dillon.hooper@medesign.io','phone':'+1 (929) 600-3797','address':'652 Mill Avenue, Elliston, Mississippi, 2958','about':'Dolore culpa qui exercitation nostrud do. Irure duis in ad ipsum aliqua aliquip nulla sit veniam officia quis occaecat est. Magna qui eiusmod pariatur aliquip minim commodo. Qui ex dolor excepteur consequat eiusmod occaecat. In officia ipsum do Lorem excepteur proident pariatur labore.','registered':'Monday, May 26, 2014 2:38 AM','latitude':'-36.032189','longitude':'86.865529','tags':['non','ut','ex','Lorem','quis'],'greeting':'Hello, Dillon! You have 10 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edb84814579c3121b3','index':73,'guid':'d7303901-5186-4595-a759-22306f67d0a3','isActive':true,'balance':'$2,326.59','picture':'http://placehold.it/32x32','age':33,'eyeColor':'green','name':{'first':'Moreno','last':'Hull'},'company':'ZEAM','email':'moreno.hull@zeam.me','phone':'+1 (984) 586-3738','address':'265 Pine Street, Talpa, North Carolina, 6041','about':'Fugiat exercitation est ullamco anim. Exercitation proident id sunt culpa Lorem amet. Consectetur anim consectetur pariatur consequat consectetur amet excepteur voluptate ea velit duis eiusmod proident. In sint laborum cupidatat ea amet ex. Reprehenderit amet sunt dolor ullamco est ex deserunt.','registered':'Wednesday, January 24, 2018 8:52 PM','latitude':'84.956857','longitude':'113.210051','tags':['est','excepteur','anim','Lorem','dolor'],'greeting':'Hello, Moreno! You have 6 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0eda4eb9dcb92c82d06','index':74,'guid':'8ee28651-802e-4523-b676-c713f6e874b8','isActive':true,'balance':'$3,783.97','picture':'http://placehold.it/32x32','age':38,'eyeColor':'blue','name':{'first':'Tracie','last':'Price'},'company':'ICOLOGY','email':'tracie.price@icology.com','phone':'+1 (897) 403-3768','address':'487 Sheffield Avenue, Vallonia, Wyoming, 276','about':'Voluptate laboris laborum aute ex sint voluptate officia proident. Sit esse nostrud cupidatat in veniam sit duis est. Do mollit elit exercitation aliqua id irure ex. Lorem reprehenderit do ullamco sint ea ad nisi ad ut.','registered':'Saturday, December 10, 2016 9:44 AM','latitude':'77.770464','longitude':'151.392903','tags':['incididunt','labore','aliquip','anim','minim'],'greeting':'Hello, Tracie! You have 6 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed68ab1a55d1c35e6c','index':75,'guid':'deedd26a-8928-4064-9666-5c59ea8144b4','isActive':true,'balance':'$2,848.08','picture':'http://placehold.it/32x32','age':32,'eyeColor':'brown','name':{'first':'Montgomery','last':'Bruce'},'company':'CYTREK','email':'montgomery.bruce@cytrek.org','phone':'+1 (824) 414-2731','address':'397 Beach Place, Ellerslie, South Carolina, 967','about':'Mollit minim excepteur magna velit cillum excepteur exercitation anim id labore deserunt do. Fugiat ex et id ad. Duis excepteur laboris est nulla do id irure quis eiusmod do esse ut culpa in.','registered':'Tuesday, August 25, 2015 6:42 AM','latitude':'79.722631','longitude':'-7.516885','tags':['Lorem','sint','voluptate','proident','incididunt'],'greeting':'Hello, Montgomery! You have 6 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edd90e0abb1cc2b0aa','index':76,'guid':'a072159d-12db-4747-9c2a-e2486a53d043','isActive':false,'balance':'$2,723.54','picture':'http://placehold.it/32x32','age':40,'eyeColor':'green','name':{'first':'Zelma','last':'Salinas'},'company':'IMAGEFLOW','email':'zelma.salinas@imageflow.net','phone':'+1 (964) 555-3856','address':'584 Reeve Place, Nord, Georgia, 7473','about':'Aliqua proident excepteur duis cupidatat cillum amet esse esse consectetur ea. Officia sunt consequat nostrud minim enim dolore dolor duis cillum. Esse labore veniam sint laborum excepteur sint tempor do ad cupidatat aliquip laboris elit id. Velit reprehenderit ullamco velit ullamco adipisicing velit esse irure velit et.','registered':'Thursday, February 25, 2016 8:18 PM','latitude':'-32.880524','longitude':'115.180489','tags':['id','nulla','reprehenderit','consequat','reprehenderit'],'greeting':'Hello, Zelma! You have 10 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed98d836c8da283bb2','index':77,'guid':'838bebad-cc20-44e9-9eb7-902a8ca25efb','isActive':false,'balance':'$3,488.91','picture':'http://placehold.it/32x32','age':20,'eyeColor':'green','name':{'first':'Shaw','last':'Parsons'},'company':'PEARLESEX','email':'shaw.parsons@pearlesex.name','phone':'+1 (912) 567-3580','address':'606 Ocean Avenue, Tyro, Northern Mariana Islands, 3367','about':'Laborum labore occaecat culpa pariatur nisi non adipisicing esse consectetur officia officia. Deserunt velit eu enim consectetur ut cillum aliqua occaecat dolor qui esse. Incididunt ad est ex eu culpa anim aliquip laborum. Aliqua consectetur velit exercitation magna minim nulla do ut excepteur enim aliquip et. Nostrud enim sunt amet amet proident aliqua velit dolore. Consectetur ipsum fugiat proident id est reprehenderit tempor irure commodo. Sit excepteur fugiat occaecat nulla Lorem et cillum.','registered':'Thursday, April 19, 2018 1:41 AM','latitude':'69.715573','longitude':'-118.481237','tags':['laboris','adipisicing','magna','voluptate','id'],'greeting':'Hello, Shaw! You have 9 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed1101734633c6ebba','index':78,'guid':'8fd0c52a-9d74-4984-a608-d612ecd8ddf0','isActive':true,'balance':'$3,820.02','picture':'http://placehold.it/32x32','age':39,'eyeColor':'brown','name':{'first':'Jaime','last':'Beard'},'company':'IZZBY','email':'jaime.beard@izzby.us','phone':'+1 (820) 412-3806','address':'362 Hudson Avenue, Delco, New Jersey, 5684','about':'Ut cupidatat veniam nulla magna commodo sit duis veniam consectetur cupidatat elit quis tempor. Duis officia ullamco proident sunt non mollit excepteur. Nisi ex amet laboris proident duis reprehenderit et est aliqua mollit amet ad. Enim eu elit excepteur eu exercitation duis consequat culpa. Adipisicing reprehenderit duis Lorem reprehenderit dolor aliqua incididunt eiusmod consequat ad occaecat fugiat do laborum. Qui ad aliquip ex do sunt. Fugiat non ut fugiat eu.','registered':'Sunday, March 9, 2014 3:41 PM','latitude':'17.926318','longitude':'108.985996','tags':['ut','voluptate','veniam','non','commodo'],'greeting':'Hello, Jaime! You have 7 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edcd125a89dcf18e0d','index':79,'guid':'eccaa4ca-0fa7-4b00-a1e3-fe7953403894','isActive':true,'balance':'$1,521.33','picture':'http://placehold.it/32x32','age':30,'eyeColor':'green','name':{'first':'Terra','last':'Sullivan'},'company':'ZANITY','email':'terra.sullivan@zanity.biz','phone':'+1 (995) 498-2714','address':'346 Congress Street, Tuttle, Maryland, 3152','about':'Incididunt enim veniam ut veniam quis dolore pariatur culpa ex. Cillum laboris dolor exercitation officia. Officia irure magna aliqua veniam officia ullamco culpa. Cillum enim velit ea sint sint officia labore ea adipisicing culpa laboris. Anim aute sint commodo culpa ex quis minim ut laborum.','registered':'Sunday, June 1, 2014 5:38 AM','latitude':'-4.655435','longitude':'5.851803','tags':['anim','non','anim','laborum','pariatur'],'greeting':'Hello, Terra! You have 5 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed9b9fc3041a674c87','index':80,'guid':'9f95fa36-4e45-4c3f-9362-3d4d809bf57f','isActive':true,'balance':'$3,403.16','picture':'http://placehold.it/32x32','age':39,'eyeColor':'brown','name':{'first':'Sharpe','last':'Berger'},'company':'ZILLAN','email':'sharpe.berger@zillan.ca','phone':'+1 (913) 498-3005','address':'277 Bragg Street, Faywood, Texas, 6487','about':'Dolor duis id aute ea veniam amet ullamco id. Culpa deserunt irure mollit tempor dolore veniam culpa officia culpa laborum eiusmod. Ullamco tempor qui aliqua cupidatat veniam cillum eu ut ex minim eu in. Quis exercitation anim eiusmod tempor esse mollit exercitation cillum ipsum reprehenderit. Sint voluptate ipsum officia sint magna nulla tempor eiusmod eiusmod veniam. Consectetur non ad veniam exercitation voluptate non nostrud.','registered':'Tuesday, June 27, 2017 12:58 AM','latitude':'-0.54085','longitude':'106.258693','tags':['proident','eiusmod','commodo','excepteur','pariatur'],'greeting':'Hello, Sharpe! You have 5 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed1a1866757bf675e0','index':81,'guid':'1b944a01-01d3-4846-94e3-630f4d0e51a3','isActive':true,'balance':'$2,038.61','picture':'http://placehold.it/32x32','age':28,'eyeColor':'brown','name':{'first':'Blanchard','last':'Ewing'},'company':'CONJURICA','email':'blanchard.ewing@conjurica.info','phone':'+1 (859) 593-3212','address':'252 Beaver Street, Kiskimere, Utah, 3255','about':'Labore magna aute adipisicing ut dolor sit ea. Officia culpa aute occaecat sit ex ullamco aliquip ad sit culpa. Ex in enim dolore ex est sit. Do irure nulla magna sint aliquip in duis aute. Magna ullamco sit labore ea tempor voluptate.','registered':'Monday, May 4, 2015 10:50 AM','latitude':'76.207595','longitude':'0.672563','tags':['proident','pariatur','officia','in','culpa'],'greeting':'Hello, Blanchard! You have 9 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed987d82f4e22d939c','index':82,'guid':'97a90aee-3cee-4678-819e-24fb94279dc1','isActive':false,'balance':'$1,201.55','picture':'http://placehold.it/32x32','age':28,'eyeColor':'blue','name':{'first':'Wells','last':'Solomon'},'company':'CORPULSE','email':'wells.solomon@corpulse.tv','phone':'+1 (840) 539-3349','address':'159 Radde Place, Linganore, Idaho, 230','about':'Consequat dolore mollit sit irure cupidatat commodo. Incididunt cillum reprehenderit ullamco sit proident cupidatat occaecat reprehenderit officia. Ad anim Lorem elit in officia minim proident nisi commodo eiusmod ea Lorem dolore voluptate. Dolor aliquip est commodo Lorem dolor ut aliquip ut. Sit anim officia dolore excepteur aute enim cillum.','registered':'Friday, January 6, 2017 1:59 PM','latitude':'70.020883','longitude':'14.503588','tags':['mollit','aute','officia','nostrud','laboris'],'greeting':'Hello, Wells! You have 7 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0eddf7a904ea0d0bc2a','index':83,'guid':'fe639a0c-7517-43e6-b0da-cd9ca5b9e267','isActive':false,'balance':'$3,664.47','picture':'http://placehold.it/32x32','age':33,'eyeColor':'blue','name':{'first':'Natalia','last':'Brown'},'company':'SYNTAC','email':'natalia.brown@syntac.co.uk','phone':'+1 (952) 595-3513','address':'332 Lenox Road, Springville, Alabama, 8406','about':'Nulla consequat officia commodo ea sunt irure anim velit aliquip aliquip. Labore ullamco occaecat proident voluptate cillum labore minim nostrud excepteur. Qui fugiat nostrud cillum fugiat ullamco id commodo aliqua voluptate mollit id id laboris. Cillum qui duis duis sit adipisicing elit ut aliqua eu. Anim nisi aliqua sit mollit.','registered':'Sunday, July 30, 2017 1:02 PM','latitude':'31.937613','longitude':'-9.957927','tags':['magna','adipisicing','exercitation','tempor','consectetur'],'greeting':'Hello, Natalia! You have 9 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed8823fa385cad4aa3','index':84,'guid':'5cf280da-f5f0-4cc6-9063-e9d5863c8c89','isActive':false,'balance':'$1,624.17','picture':'http://placehold.it/32x32','age':25,'eyeColor':'blue','name':{'first':'Greene','last':'Waller'},'company':'ISOTRACK','email':'greene.waller@isotrack.io','phone':'+1 (838) 406-3608','address':'362 Albemarle Road, Gardiner, Michigan, 2764','about':'Ut nisi sit sint nulla dolor magna. Culpa occaecat adipisicing veniam proident excepteur tempor quis ex. Fugiat tempor laborum dolor adipisicing irure anim cupidatat ut exercitation ex sit. Cupidatat exercitation commodo sunt ex irure fugiat eu esse do ullamco mollit dolore cupidatat. Cupidatat magna incididunt officia dolore esse voluptate deserunt in laborum dolor. Sit fugiat Lorem eu ullamco. Laboris veniam quis cillum tempor ex fugiat cillum cupidatat.','registered':'Sunday, June 10, 2018 10:32 PM','latitude':'0.256921','longitude':'-96.141941','tags':['magna','dolore','deserunt','aliquip','cillum'],'greeting':'Hello, Greene! You have 6 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0eda7c905c2d24c7d31','index':85,'guid':'aa30a9fb-8a16-48eb-8bb7-1307d1e1f191','isActive':false,'balance':'$1,974.04','picture':'http://placehold.it/32x32','age':36,'eyeColor':'green','name':{'first':'Carlene','last':'Hanson'},'company':'DIGIRANG','email':'carlene.hanson@digirang.me','phone':'+1 (981) 417-3209','address':'435 Clark Street, Choctaw, Oregon, 9888','about':'Amet labore esse cillum irure laborum consectetur occaecat non aliquip aliquip proident. Nisi magna nulla officia duis labore aute nulla laborum duis tempor minim. Velit elit reprehenderit nisi exercitation officia incididunt amet cupidatat excepteur proident consectetur.','registered':'Thursday, April 20, 2017 6:13 AM','latitude':'68.529086','longitude':'68.802409','tags':['pariatur','nulla','qui','amet','labore'],'greeting':'Hello, Carlene! You have 10 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed6fbee12ce9e55dbf','index':86,'guid':'0fce89aa-3310-48df-862a-68bd3d776644','isActive':false,'balance':'$3,909.64','picture':'http://placehold.it/32x32','age':40,'eyeColor':'brown','name':{'first':'Doris','last':'Collins'},'company':'ZIORE','email':'doris.collins@ziore.com','phone':'+1 (914) 405-2360','address':'301 Lorraine Street, Stouchsburg, Minnesota, 7476','about':'Nisi deserunt aliquip et deserunt ipsum ad consectetur est non ullamco. Dolore do ut voluptate do eiusmod. Culpa ad in eiusmod nisi cillum do. Officia magna cillum sint aliqua reprehenderit amet est ipsum. Eiusmod deserunt commodo proident consequat. Amet minim dolor consequat aliquip aliquip culpa non exercitation non.','registered':'Wednesday, February 25, 2015 9:15 PM','latitude':'-57.364906','longitude':'130.766587','tags':['nulla','deserunt','cillum','eiusmod','adipisicing'],'greeting':'Hello, Doris! You have 10 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0edede9402476c398c0','index':87,'guid':'60cf0aa6-bc6d-4305-8842-d27e6af1306f','isActive':false,'balance':'$2,817.53','picture':'http://placehold.it/32x32','age':28,'eyeColor':'green','name':{'first':'Cline','last':'Hayden'},'company':'ECRAZE','email':'cline.hayden@ecraze.org','phone':'+1 (965) 507-2138','address':'352 Rutland Road, Ebro, Connecticut, 1196','about':'Dolor eiusmod enim anim sit enim ea tempor. Tempor amet consectetur aliquip culpa do ex excepteur deserunt. Dolor commodo veniam culpa sint. Commodo consectetur pariatur irure nisi deserunt cillum est dolor ipsum ea.','registered':'Thursday, September 29, 2016 5:58 AM','latitude':'62.50713','longitude':'86.247286','tags':['enim','tempor','anim','veniam','proident'],'greeting':'Hello, Cline! You have 9 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0edeb72f151994a551b','index':88,'guid':'dbb49c62-86b1-409f-b8b8-f609c709d2a8','isActive':false,'balance':'$3,122.56','picture':'http://placehold.it/32x32','age':39,'eyeColor':'green','name':{'first':'Janelle','last':'Rutledge'},'company':'TERRAGEN','email':'janelle.rutledge@terragen.net','phone':'+1 (914) 581-3749','address':'170 Falmouth Street, Alderpoint, West Virginia, 642','about':'Laboris proident cillum sunt qui ea sunt. Officia adipisicing exercitation dolore magna reprehenderit amet anim id. Laboris commodo sit irure irure. Excepteur est mollit fugiat incididunt consectetur veniam irure ea mollit. Cillum enim consequat sunt sunt nisi incididunt tempor enim.','registered':'Monday, February 16, 2015 5:46 AM','latitude':'-46.392023','longitude':'32.054562','tags':['eu','eu','nisi','labore','deserunt'],'greeting':'Hello, Janelle! You have 9 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edc9c2604846ff9a0d','index':89,'guid':'c4d7a365-f1d3-4584-b78e-008394c219f7','isActive':true,'balance':'$1,807.19','picture':'http://placehold.it/32x32','age':24,'eyeColor':'green','name':{'first':'Abby','last':'Lopez'},'company':'GRAINSPOT','email':'abby.lopez@grainspot.name','phone':'+1 (917) 442-3955','address':'488 Kensington Walk, Winston, Hawaii, 9109','about':'Incididunt deserunt Lorem proident magna tempor enim quis duis eu ut adipisicing in. Ex mollit non irure aliqua officia. Fugiat id ipsum consequat irure id ullamco culpa quis nulla enim aliquip consequat et. Dolor ut anim velit irure consequat cillum eu. Aute occaecat laborum est aliqua.','registered':'Sunday, April 1, 2018 11:28 PM','latitude':'-10.177041','longitude':'-165.756718','tags':['est','laborum','culpa','non','quis'],'greeting':'Hello, Abby! You have 9 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed03237438b158af9e','index':90,'guid':'36c4a19f-2d00-4e40-bd49-155fd2ce0a6c','isActive':false,'balance':'$2,757.86','picture':'http://placehold.it/32x32','age':31,'eyeColor':'blue','name':{'first':'Whitney','last':'Sheppard'},'company':'ANACHO','email':'whitney.sheppard@anacho.us','phone':'+1 (922) 437-2383','address':'951 Beekman Place, Homeworth, New York, 6088','about':'Sint minim nisi minim non minim aliqua pariatur ullamco do sint qui labore. Aute elit reprehenderit ad do fugiat est amet. In incididunt tempor commodo cillum tempor est labore anim.','registered':'Tuesday, September 13, 2016 6:43 PM','latitude':'-49.732527','longitude':'-171.846715','tags':['exercitation','veniam','sunt','est','proident'],'greeting':'Hello, Whitney! You have 6 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0edb99dd3aa53d2cb7f','index':91,'guid':'17afd430-f37f-4d55-958c-72f35cdb5997','isActive':false,'balance':'$3,683.86','picture':'http://placehold.it/32x32','age':38,'eyeColor':'blue','name':{'first':'Ilene','last':'Blackwell'},'company':'ENQUILITY','email':'ilene.blackwell@enquility.biz','phone':'+1 (817) 555-2616','address':'950 Varanda Place, Belgreen, Virgin Islands, 1765','about':'Id eiusmod deserunt eiusmod adipisicing adipisicing est enim pariatur esse duis. Qui velit duis irure magna consectetur dolore reprehenderit. Cillum dolore minim consectetur irure non qui velit cillum veniam adipisicing incididunt. Deserunt veniam excepteur veniam velit aliquip labore quis exercitation magna do non dolor. Aliquip occaecat minim adipisicing deserunt fugiat nulla occaecat proident irure consectetur eiusmod irure. Enim Lorem deserunt amet Lorem commodo eiusmod reprehenderit occaecat adipisicing dolor voluptate cillum.','registered':'Thursday, February 1, 2018 8:39 AM','latitude':'57.393644','longitude':'-3.704258','tags':['adipisicing','dolor','commodo','Lorem','Lorem'],'greeting':'Hello, Ilene! You have 6 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed353f4deb62c3342a','index':92,'guid':'9953e285-2095-4f1c-978b-9ece2a867e9d','isActive':false,'balance':'$1,202.44','picture':'http://placehold.it/32x32','age':38,'eyeColor':'blue','name':{'first':'Dawson','last':'Herman'},'company':'BITENDREX','email':'dawson.herman@bitendrex.ca','phone':'+1 (843) 522-2655','address':'471 Channel Avenue, Denio, Alaska, 5040','about':'Nisi occaecat mollit reprehenderit nisi minim Lorem mollit. Ea proident irure cillum quis. Deserunt consectetur consectetur consequat quis enim minim ea ipsum proident nisi ad non aliquip. Veniam aute minim consequat irure voluptate aute amet excepteur exercitation cillum duis quis adipisicing nostrud.','registered':'Tuesday, December 8, 2015 5:40 PM','latitude':'-55.602721','longitude':'-26.683234','tags':['qui','dolor','deserunt','eiusmod','labore'],'greeting':'Hello, Dawson! You have 7 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0edd5464bc50a5310ad','index':93,'guid':'724b2434-4dbd-417d-aa07-6065715f434f','isActive':false,'balance':'$1,595.98','picture':'http://placehold.it/32x32','age':25,'eyeColor':'brown','name':{'first':'Alice','last':'Christian'},'company':'ZENOLUX','email':'alice.christian@zenolux.info','phone':'+1 (954) 466-2650','address':'875 Gerritsen Avenue, Townsend, Kentucky, 6568','about':'Nulla labore occaecat ex culpa magna. Commodo occaecat et in consequat cillum laborum magna adipisicing excepteur. Do ut Lorem esse voluptate officia ea aliquip proident amet veniam minim nulla adipisicing. Enim consectetur incididunt laborum voluptate tempor deserunt non laboris. Aliquip deserunt aute irure dolore magna anim aliquip sint magna Lorem. Officia laboris nulla officia sint labore nisi. Do Lorem id in est esse adipisicing id fugiat enim esse laborum.','registered':'Wednesday, October 3, 2018 9:26 PM','latitude':'-88.790637','longitude':'138.817328','tags':['duis','ea','magna','ea','incididunt'],'greeting':'Hello, Alice! You have 8 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0eda01886247b6a4f3d','index':94,'guid':'17c9f4d3-7d72-44e3-8f7c-08d7de920f46','isActive':false,'balance':'$3,173.29','picture':'http://placehold.it/32x32','age':31,'eyeColor':'blue','name':{'first':'Schwartz','last':'Mccormick'},'company':'EVIDENDS','email':'schwartz.mccormick@evidends.tv','phone':'+1 (924) 531-2802','address':'160 Midwood Street, Indio, Palau, 4241','about':'Anim reprehenderit et et adipisicing voluptate consequat elit. Sint Lorem laboris Lorem minim nostrud aute reprehenderit elit aute quis nulla. Officia aute eiusmod mollit cillum eu aliquip non enim ea occaecat quis fugiat occaecat officia. Eiusmod culpa exercitation dolor aliqua enim occaecat nisi cupidatat duis ex dolore id. Id consequat aliqua cupidatat ut. Sit nisi est sunt culpa ullamco excepteur sunt pariatur incididunt amet. Ut tempor duis velit eu ut id culpa aute anim occaecat labore.','registered':'Thursday, March 2, 2017 5:57 PM','latitude':'38.618587','longitude':'-165.142529','tags':['ad','reprehenderit','magna','elit','mollit'],'greeting':'Hello, Schwartz! You have 10 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed51be4df456ec2bc9','index':95,'guid':'44f68f65-959b-4ec2-bd2a-1f30035f76fc','isActive':false,'balance':'$3,242.24','picture':'http://placehold.it/32x32','age':39,'eyeColor':'blue','name':{'first':'Bonita','last':'Stevens'},'company':'SLOFAST','email':'bonita.stevens@slofast.co.uk','phone':'+1 (886) 473-2105','address':'459 Bushwick Court, Kilbourne, Rhode Island, 9450','about':'Consequat reprehenderit qui reprehenderit nisi sit est in qui aliquip amet. Ex deserunt cupidatat amet cillum eiusmod irure anim in amet proident voluptate. Ad officia culpa in non incididunt do.','registered':'Saturday, August 22, 2015 5:23 AM','latitude':'60.013542','longitude':'58.242132','tags':['aute','adipisicing','in','cillum','officia'],'greeting':'Hello, Bonita! You have 5 unread messages.','favoriteFruit':'banana'},{'_id':'5c5ab0ed50a55e3587993f68','index':96,'guid':'652e434f-221e-4899-af12-38dca5c9621d','isActive':false,'balance':'$2,720.06','picture':'http://placehold.it/32x32','age':28,'eyeColor':'green','name':{'first':'Charmaine','last':'Jackson'},'company':'FLUM','email':'charmaine.jackson@flum.io','phone':'+1 (947) 573-2692','address':'788 Windsor Place, Highland, Arkansas, 8869','about':'Dolore reprehenderit irure excepteur eu reprehenderit sint Lorem ut amet in. Consequat anim elit sunt aliquip incididunt. Culpa consequat do exercitation dolor enim dolor sunt sit excepteur ad anim. Dolor aute elit velit mollit minim eu.','registered':'Wednesday, April 6, 2016 7:54 PM','latitude':'25.756553','longitude':'-5.482531','tags':['amet','sint','consequat','est','ex'],'greeting':'Hello, Charmaine! You have 10 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed213621949bbdd5d3','index':97,'guid':'7d7d93d8-3e37-4b4a-9fa2-591fb7d153ce','isActive':true,'balance':'$1,370.63','picture':'http://placehold.it/32x32','age':36,'eyeColor':'brown','name':{'first':'Petersen','last':'Cooley'},'company':'ROTODYNE','email':'petersen.cooley@rotodyne.me','phone':'+1 (929) 563-3339','address':'338 Pioneer Street, Carbonville, Missouri, 3030','about':'Cillum elit dolore labore aute. Cillum ea incididunt cupidatat consequat sint eu mollit. Excepteur commodo eiusmod ex Lorem enim velit minim.','registered':'Friday, December 8, 2017 5:53 AM','latitude':'-10.576254','longitude':'-111.176861','tags':['veniam','eu','eiusmod','dolore','voluptate'],'greeting':'Hello, Petersen! You have 9 unread messages.','favoriteFruit':'apple'},{'_id':'5c5ab0ed3e938138d58ed453','index':98,'guid':'d6fea4a3-03f6-46ee-90b9-8ec51a585e29','isActive':true,'balance':'$1,216.54','picture':'http://placehold.it/32x32','age':39,'eyeColor':'blue','name':{'first':'Rosanne','last':'Terry'},'company':'EXTREMO','email':'rosanne.terry@extremo.com','phone':'+1 (812) 496-2691','address':'368 Rockaway Avenue, Gloucester, Illinois, 7913','about':'Duis et nostrud duis quis minim eiusmod culpa do ea ad pariatur tempor. Velit veniam aliqua aliquip est enim ex et culpa dolor ullamco culpa officia. Eu id occaecat aute cillum aute sit aute laboris ipsum voluptate ex. Amet tempor minim tempor Lorem quis dolore. Pariatur consequat dolore nulla veniam dolor exercitation consequat nulla laboris incididunt do. Dolore do tempor deserunt exercitation incididunt officia incididunt ut do reprehenderit do eiusmod nulla.','registered':'Sunday, August 6, 2017 12:46 PM','latitude':'-43.257964','longitude':'-45.147686','tags':['et','incididunt','esse','commodo','ipsum'],'greeting':'Hello, Rosanne! You have 6 unread messages.','favoriteFruit':'strawberry'},{'_id':'5c5ab0ed632b1a1d65501d6b','index':99,'guid':'bf8c6ac1-ee18-48ee-ae94-ea515a53c951','isActive':true,'balance':'$2,905.58','picture':'http://placehold.it/32x32','age':21,'eyeColor':'blue','name':{'first':'Irene','last':'Castro'},'company':'POLARIA','email':'irene.castro@polaria.org','phone':'+1 (818) 417-3761','address':'901 Dupont Street, Sperryville, Oklahoma, 953','about':'Pariatur minim laboris aliqua dolor aliquip consequat ea do duis voluptate id Lorem. In reprehenderit et adipisicing anim elit incididunt velit in laborum laborum. Qui minim magna et amet sit do voluptate reprehenderit ea sit sint velit.','registered':'Tuesday, August 18, 2015 10:48 AM','latitude':'-7.004055','longitude':'116.052433','tags':['sit','proident','enim','ullamco','non'],'greeting':'Hello, Irene! You have 10 unread messages.','favoriteFruit':'apple'}]" + + +Plan: 1 to add, 0 to change, 0 to destroy. diff --git a/pkg/cloud/testdata/plan-no-changes/main.tf b/pkg/cloud/testdata/plan-no-changes/main.tf new file mode 100644 index 00000000000..3911a2a9b2d --- /dev/null +++ b/pkg/cloud/testdata/plan-no-changes/main.tf @@ -0,0 +1 @@ +resource "null_resource" "foo" {} diff --git a/pkg/cloud/testdata/plan-no-changes/plan.log b/pkg/cloud/testdata/plan-no-changes/plan.log new file mode 100644 index 00000000000..70416815133 --- /dev/null +++ b/pkg/cloud/testdata/plan-no-changes/plan.log @@ -0,0 +1,17 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +null_resource.hello: Refreshing state... (ID: 8657651096157629581) + +------------------------------------------------------------------------ + +No changes. Infrastructure is up-to-date. + +This means that Terraform did not detect any differences between your +configuration and real physical resources that exist. As a result, no +actions need to be performed. diff --git a/pkg/cloud/testdata/plan-no-changes/policy.log b/pkg/cloud/testdata/plan-no-changes/policy.log new file mode 100644 index 00000000000..b0cb1e59859 --- /dev/null +++ b/pkg/cloud/testdata/plan-no-changes/policy.log @@ -0,0 +1,12 @@ +Sentinel Result: true + +This result means that Sentinel policies returned true and the protected +behavior is allowed by Sentinel policies. + +1 policies evaluated. + +## Policy 1: Passthrough.sentinel (soft-mandatory) + +Result: true + +TRUE - Passthrough.sentinel:1:1 - Rule "main" diff --git a/pkg/cloud/testdata/plan-policy-hard-failed/main.tf b/pkg/cloud/testdata/plan-policy-hard-failed/main.tf new file mode 100644 index 00000000000..3911a2a9b2d --- /dev/null +++ b/pkg/cloud/testdata/plan-policy-hard-failed/main.tf @@ -0,0 +1 @@ +resource "null_resource" "foo" {} diff --git a/pkg/cloud/testdata/plan-policy-hard-failed/plan.log b/pkg/cloud/testdata/plan-policy-hard-failed/plan.log new file mode 100644 index 00000000000..5849e57595e --- /dev/null +++ b/pkg/cloud/testdata/plan-policy-hard-failed/plan.log @@ -0,0 +1,21 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + + null_resource.foo + id: + + +Plan: 1 to add, 0 to change, 0 to destroy. diff --git a/pkg/cloud/testdata/plan-policy-hard-failed/policy.log b/pkg/cloud/testdata/plan-policy-hard-failed/policy.log new file mode 100644 index 00000000000..5d6e6935b93 --- /dev/null +++ b/pkg/cloud/testdata/plan-policy-hard-failed/policy.log @@ -0,0 +1,12 @@ +Sentinel Result: false + +Sentinel evaluated to false because one or more Sentinel policies evaluated +to false. This false was not due to an undefined value or runtime error. + +1 policies evaluated. + +## Policy 1: Passthrough.sentinel (hard-mandatory) + +Result: false + +FALSE - Passthrough.sentinel:1:1 - Rule "main" diff --git a/pkg/cloud/testdata/plan-policy-passed/main.tf b/pkg/cloud/testdata/plan-policy-passed/main.tf new file mode 100644 index 00000000000..3911a2a9b2d --- /dev/null +++ b/pkg/cloud/testdata/plan-policy-passed/main.tf @@ -0,0 +1 @@ +resource "null_resource" "foo" {} diff --git a/pkg/cloud/testdata/plan-policy-passed/plan.log b/pkg/cloud/testdata/plan-policy-passed/plan.log new file mode 100644 index 00000000000..5849e57595e --- /dev/null +++ b/pkg/cloud/testdata/plan-policy-passed/plan.log @@ -0,0 +1,21 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + + null_resource.foo + id: + + +Plan: 1 to add, 0 to change, 0 to destroy. diff --git a/pkg/cloud/testdata/plan-policy-passed/policy.log b/pkg/cloud/testdata/plan-policy-passed/policy.log new file mode 100644 index 00000000000..b0cb1e59859 --- /dev/null +++ b/pkg/cloud/testdata/plan-policy-passed/policy.log @@ -0,0 +1,12 @@ +Sentinel Result: true + +This result means that Sentinel policies returned true and the protected +behavior is allowed by Sentinel policies. + +1 policies evaluated. + +## Policy 1: Passthrough.sentinel (soft-mandatory) + +Result: true + +TRUE - Passthrough.sentinel:1:1 - Rule "main" diff --git a/pkg/cloud/testdata/plan-policy-soft-failed/main.tf b/pkg/cloud/testdata/plan-policy-soft-failed/main.tf new file mode 100644 index 00000000000..3911a2a9b2d --- /dev/null +++ b/pkg/cloud/testdata/plan-policy-soft-failed/main.tf @@ -0,0 +1 @@ +resource "null_resource" "foo" {} diff --git a/pkg/cloud/testdata/plan-policy-soft-failed/plan.log b/pkg/cloud/testdata/plan-policy-soft-failed/plan.log new file mode 100644 index 00000000000..5849e57595e --- /dev/null +++ b/pkg/cloud/testdata/plan-policy-soft-failed/plan.log @@ -0,0 +1,21 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + + null_resource.foo + id: + + +Plan: 1 to add, 0 to change, 0 to destroy. diff --git a/pkg/cloud/testdata/plan-policy-soft-failed/policy.log b/pkg/cloud/testdata/plan-policy-soft-failed/policy.log new file mode 100644 index 00000000000..3e4ebedf617 --- /dev/null +++ b/pkg/cloud/testdata/plan-policy-soft-failed/policy.log @@ -0,0 +1,12 @@ +Sentinel Result: false + +Sentinel evaluated to false because one or more Sentinel policies evaluated +to false. This false was not due to an undefined value or runtime error. + +1 policies evaluated. + +## Policy 1: Passthrough.sentinel (soft-mandatory) + +Result: false + +FALSE - Passthrough.sentinel:1:1 - Rule "main" diff --git a/pkg/cloud/testdata/plan-variables/main.tf b/pkg/cloud/testdata/plan-variables/main.tf new file mode 100644 index 00000000000..955e8b4c09a --- /dev/null +++ b/pkg/cloud/testdata/plan-variables/main.tf @@ -0,0 +1,4 @@ +variable "foo" {} +variable "bar" {} + +resource "null_resource" "foo" {} diff --git a/pkg/cloud/testdata/plan-variables/plan.log b/pkg/cloud/testdata/plan-variables/plan.log new file mode 100644 index 00000000000..5849e57595e --- /dev/null +++ b/pkg/cloud/testdata/plan-variables/plan.log @@ -0,0 +1,21 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + + null_resource.foo + id: + + +Plan: 1 to add, 0 to change, 0 to destroy. diff --git a/pkg/cloud/testdata/plan-with-error/main.tf b/pkg/cloud/testdata/plan-with-error/main.tf new file mode 100644 index 00000000000..bc45f28f563 --- /dev/null +++ b/pkg/cloud/testdata/plan-with-error/main.tf @@ -0,0 +1,5 @@ +resource "null_resource" "foo" { + triggers { + random = "${guid()}" + } +} diff --git a/pkg/cloud/testdata/plan-with-error/plan.log b/pkg/cloud/testdata/plan-with-error/plan.log new file mode 100644 index 00000000000..4344a372290 --- /dev/null +++ b/pkg/cloud/testdata/plan-with-error/plan.log @@ -0,0 +1,10 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... + +Error: null_resource.foo: 1 error(s) occurred: + +* null_resource.foo: 1:3: unknown function called: guid in: + +${guid()} diff --git a/pkg/cloud/testdata/plan-with-working-directory/tofu/main.tf b/pkg/cloud/testdata/plan-with-working-directory/tofu/main.tf new file mode 100644 index 00000000000..3911a2a9b2d --- /dev/null +++ b/pkg/cloud/testdata/plan-with-working-directory/tofu/main.tf @@ -0,0 +1 @@ +resource "null_resource" "foo" {} diff --git a/pkg/cloud/testdata/plan-with-working-directory/tofu/plan.log b/pkg/cloud/testdata/plan-with-working-directory/tofu/plan.log new file mode 100644 index 00000000000..5849e57595e --- /dev/null +++ b/pkg/cloud/testdata/plan-with-working-directory/tofu/plan.log @@ -0,0 +1,21 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + + null_resource.foo + id: + + +Plan: 1 to add, 0 to change, 0 to destroy. diff --git a/pkg/cloud/testdata/plan/main.tf b/pkg/cloud/testdata/plan/main.tf new file mode 100644 index 00000000000..3911a2a9b2d --- /dev/null +++ b/pkg/cloud/testdata/plan/main.tf @@ -0,0 +1 @@ +resource "null_resource" "foo" {} diff --git a/pkg/cloud/testdata/plan/plan.log b/pkg/cloud/testdata/plan/plan.log new file mode 100644 index 00000000000..5849e57595e --- /dev/null +++ b/pkg/cloud/testdata/plan/plan.log @@ -0,0 +1,21 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + + null_resource.foo + id: + + +Plan: 1 to add, 0 to change, 0 to destroy. diff --git a/pkg/cloud/testdata/refresh/main.tf b/pkg/cloud/testdata/refresh/main.tf new file mode 100644 index 00000000000..8d61d5f5180 --- /dev/null +++ b/pkg/cloud/testdata/refresh/main.tf @@ -0,0 +1,6 @@ +resource "random_pet" "always_new" { + keepers = { + uuid = uuid() # Force a new name each time + } + length = 3 +} diff --git a/pkg/cloud/testdata/variables/main.tf b/pkg/cloud/testdata/variables/main.tf new file mode 100644 index 00000000000..9e1a0a40ff6 --- /dev/null +++ b/pkg/cloud/testdata/variables/main.tf @@ -0,0 +1,8 @@ +variable "key1" { +} + +variable "key2" { +} + +variable "key3" { +} diff --git a/pkg/cloud/testing.go b/pkg/cloud/testing.go new file mode 100644 index 00000000000..e8ae479aecf --- /dev/null +++ b/pkg/cloud/testing.go @@ -0,0 +1,632 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cloud + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/http/httptest" + "net/url" + "os" + "path" + "strconv" + "testing" + "time" + + tfe "github.com/hashicorp/go-tfe" + svchost "github.com/hashicorp/terraform-svchost" + "github.com/hashicorp/terraform-svchost/auth" + "github.com/hashicorp/terraform-svchost/disco" + "github.com/mitchellh/cli" + "github.com/mitchellh/colorstring" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/httpclient" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/statefile" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" + "github.com/kubegems/opentofu/version" + + backendLocal "github.com/kubegems/opentofu/pkg/backend/local" +) + +const ( + testCred = "test-auth-token" +) + +var ( + tfeHost = "app.terraform.io" + credsSrc = auth.StaticCredentialsSource(map[svchost.Hostname]map[string]interface{}{ + svchost.Hostname(tfeHost): {"token": testCred}, + }) + testBackendSingleWorkspaceName = "app-prod" + defaultTFCPing = map[string]func(http.ResponseWriter, *http.Request){ + "/api/v2/ping": func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Header().Set("TFP-API-Version", "2.5") + w.Header().Set("TFP-AppName", "Terraform Cloud") + }, + } +) + +func skipIfTFENotEnabled(t *testing.T) { + if os.Getenv("TF_TFC_TEST") == "" { + t.Skip("this test accesses " + tfeHost + "; set TF_TFC_TEST=1 to run it") + } +} + +// mockInput is a mock implementation of tofu.UIInput. +type mockInput struct { + answers map[string]string +} + +func (m *mockInput) Input(ctx context.Context, opts *tofu.InputOpts) (string, error) { + v, ok := m.answers[opts.Id] + if !ok { + return "", fmt.Errorf("unexpected input request in test: %s", opts.Id) + } + if v == "wait-for-external-update" { + select { + case <-ctx.Done(): + case <-time.After(time.Minute): + } + } + delete(m.answers, opts.Id) + return v, nil +} + +func testInput(t *testing.T, answers map[string]string) *mockInput { + skipIfTFENotEnabled(t) + return &mockInput{answers: answers} +} + +func testBackendWithName(t *testing.T) (*Cloud, func()) { + b, _, c := testBackendAndMocksWithName(t) + return b, c +} + +func testBackendAndMocksWithName(t *testing.T) (*Cloud, *MockClient, func()) { + obj := cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.StringVal(tfeHost), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal(testBackendSingleWorkspaceName), + "tags": cty.NullVal(cty.Set(cty.String)), + "project": cty.NullVal(cty.String), + }), + }) + return testBackend(t, obj, defaultTFCPing) +} + +func testBackendWithTags(t *testing.T) (*Cloud, func()) { + obj := cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.StringVal(tfeHost), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + "tags": cty.SetVal( + []cty.Value{ + cty.StringVal("billing"), + }, + ), + "project": cty.NullVal(cty.String), + }), + }) + b, _, c := testBackend(t, obj, nil) + return b, c +} + +func testBackendNoOperations(t *testing.T) (*Cloud, func()) { + obj := cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.StringVal(tfeHost), + "organization": cty.StringVal("no-operations"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal(testBackendSingleWorkspaceName), + "tags": cty.NullVal(cty.Set(cty.String)), + "project": cty.NullVal(cty.String), + }), + }) + b, _, c := testBackend(t, obj, nil) + return b, c +} + +func testBackendWithHandlers(t *testing.T, handlers map[string]func(http.ResponseWriter, *http.Request)) (*Cloud, func()) { + obj := cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.StringVal(tfeHost), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal(testBackendSingleWorkspaceName), + "tags": cty.NullVal(cty.Set(cty.String)), + "project": cty.NullVal(cty.String), + }), + }) + b, _, c := testBackend(t, obj, handlers) + return b, c +} + +func testCloudState(t *testing.T) *State { + b, bCleanup := testBackendWithName(t) + defer bCleanup() + + raw, err := b.StateMgr(testBackendSingleWorkspaceName) + if err != nil { + t.Fatalf("error: %v", err) + } + + return raw.(*State) +} + +func testBackendWithOutputs(t *testing.T) (*Cloud, func()) { + b, cleanup := testBackendWithName(t) + + // Get a new mock client to use for adding outputs + mc := NewMockClient() + + mc.StateVersionOutputs.create("svo-abcd", &tfe.StateVersionOutput{ + ID: "svo-abcd", + Value: "foobar", + Sensitive: true, + Type: "string", + Name: "sensitive_output", + DetailedType: "string", + }) + + mc.StateVersionOutputs.create("svo-zyxw", &tfe.StateVersionOutput{ + ID: "svo-zyxw", + Value: "bazqux", + Type: "string", + Name: "nonsensitive_output", + DetailedType: "string", + }) + + var dt interface{} + var val interface{} + err := json.Unmarshal([]byte(`["object", {"foo":"string"}]`), &dt) + if err != nil { + t.Fatalf("could not unmarshal detailed type: %s", err) + } + err = json.Unmarshal([]byte(`{"foo":"bar"}`), &val) + if err != nil { + t.Fatalf("could not unmarshal value: %s", err) + } + mc.StateVersionOutputs.create("svo-efgh", &tfe.StateVersionOutput{ + ID: "svo-efgh", + Value: val, + Type: "object", + Name: "object_output", + DetailedType: dt, + }) + + err = json.Unmarshal([]byte(`["list", "bool"]`), &dt) + if err != nil { + t.Fatalf("could not unmarshal detailed type: %s", err) + } + err = json.Unmarshal([]byte(`[true, false, true, true]`), &val) + if err != nil { + t.Fatalf("could not unmarshal value: %s", err) + } + mc.StateVersionOutputs.create("svo-ijkl", &tfe.StateVersionOutput{ + ID: "svo-ijkl", + Value: val, + Type: "array", + Name: "list_output", + DetailedType: dt, + }) + + b.client.StateVersionOutputs = mc.StateVersionOutputs + + return b, cleanup +} + +func testBackend(t *testing.T, obj cty.Value, handlers map[string]func(http.ResponseWriter, *http.Request)) (*Cloud, *MockClient, func()) { + skipIfTFENotEnabled(t) + var s *httptest.Server + if handlers != nil { + s = testServerWithHandlers(handlers) + } else { + s = testServer(t) + } + b := New(testDisco(s), encryption.StateEncryptionDisabled()) + + // Configure the backend so the client is created. + newObj, valDiags := b.PrepareConfig(obj) + if len(valDiags) != 0 { + t.Fatalf("testBackend: backend.PrepareConfig() failed: %s", valDiags.ErrWithWarnings()) + } + obj = newObj + + confDiags := b.Configure(obj) + if len(confDiags) != 0 { + t.Fatalf("testBackend: backend.Configure() failed: %s", confDiags.ErrWithWarnings()) + } + + // Get a new mock client. + mc := NewMockClient() + + // Replace the services we use with our mock services. + b.CLI = cli.NewMockUi() + b.client.Applies = mc.Applies + b.client.ConfigurationVersions = mc.ConfigurationVersions + b.client.CostEstimates = mc.CostEstimates + b.client.Organizations = mc.Organizations + b.client.Plans = mc.Plans + b.client.TaskStages = mc.TaskStages + b.client.PolicySetOutcomes = mc.PolicySetOutcomes + b.client.PolicyChecks = mc.PolicyChecks + b.client.Runs = mc.Runs + b.client.RunEvents = mc.RunEvents + b.client.StateVersions = mc.StateVersions + b.client.StateVersionOutputs = mc.StateVersionOutputs + b.client.Variables = mc.Variables + b.client.Workspaces = mc.Workspaces + + // Set local to a local test backend. + b.local = testLocalBackend(t, b) + b.input = true + + baseURL, err := url.Parse("https://" + tfeHost) + if err != nil { + t.Fatalf("testBackend: failed to parse base URL for client") + } + baseURL.Path = "/api/v2/" + + readRedactedPlan = func(ctx context.Context, baseURL url.URL, token, planID string) ([]byte, error) { + return mc.RedactedPlans.Read(ctx, baseURL.Hostname(), token, planID) + } + + ctx := context.Background() + + // Create the organization. + _, err = b.client.Organizations.Create(ctx, tfe.OrganizationCreateOptions{ + Name: tfe.String(b.organization), + }) + if err != nil { + t.Fatalf("error: %v", err) + } + + // Create the default workspace if required. + if b.WorkspaceMapping.Name != "" { + _, err = b.client.Workspaces.Create(ctx, b.organization, tfe.WorkspaceCreateOptions{ + Name: tfe.String(b.WorkspaceMapping.Name), + }) + if err != nil { + t.Fatalf("error: %v", err) + } + } + + return b, mc, s.Close +} + +// testUnconfiguredBackend is used for testing the configuration of the backend +// with the mock client +func testUnconfiguredBackend(t *testing.T) (*Cloud, func()) { + skipIfTFENotEnabled(t) + + s := testServer(t) + b := New(testDisco(s), encryption.StateEncryptionDisabled()) + + // Normally, the client is created during configuration, but the configuration uses the + // client to read entitlements. + var err error + b.client, err = tfe.NewClient(&tfe.Config{ + Token: "fake-token", + }) + if err != nil { + t.Fatal(err) + } + + // Get a new mock client. + mc := NewMockClient() + + // Replace the services we use with our mock services. + b.CLI = cli.NewMockUi() + b.client.Applies = mc.Applies + b.client.ConfigurationVersions = mc.ConfigurationVersions + b.client.CostEstimates = mc.CostEstimates + b.client.Organizations = mc.Organizations + b.client.Plans = mc.Plans + b.client.PolicySetOutcomes = mc.PolicySetOutcomes + b.client.PolicyChecks = mc.PolicyChecks + b.client.Runs = mc.Runs + b.client.RunEvents = mc.RunEvents + b.client.StateVersions = mc.StateVersions + b.client.StateVersionOutputs = mc.StateVersionOutputs + b.client.Variables = mc.Variables + b.client.Workspaces = mc.Workspaces + + baseURL, err := url.Parse("https://" + tfeHost) + if err != nil { + t.Fatalf("testBackend: failed to parse base URL for client") + } + baseURL.Path = "/api/v2/" + + readRedactedPlan = func(ctx context.Context, baseURL url.URL, token, planID string) ([]byte, error) { + return mc.RedactedPlans.Read(ctx, baseURL.Hostname(), token, planID) + } + + // Set local to a local test backend. + b.local = testLocalBackend(t, b) + + return b, s.Close +} + +func testLocalBackend(t *testing.T, cloud *Cloud) backend.Enhanced { + skipIfTFENotEnabled(t) + + b := backendLocal.NewWithBackend(cloud, nil) + + // Add a test provider to the local backend. + p := backendLocal.TestLocalProvider(t, b, "null", providers.ProviderSchema{ + ResourceTypes: map[string]providers.Schema{ + "null_resource": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + }, + }, + }) + p.ApplyResourceChangeResponse = &providers.ApplyResourceChangeResponse{NewState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yes"), + })} + + return b +} + +// testServer returns a started *httptest.Server used for local testing with the default set of +// request handlers. +func testServer(t *testing.T) *httptest.Server { + skipIfTFENotEnabled(t) + + return testServerWithHandlers(testDefaultRequestHandlers) +} + +// testServerWithHandlers returns a started *httptest.Server with the given set of request handlers +// overriding any default request handlers (testDefaultRequestHandlers). +func testServerWithHandlers(handlers map[string]func(http.ResponseWriter, *http.Request)) *httptest.Server { + mux := http.NewServeMux() + for route, handler := range handlers { + mux.HandleFunc(route, handler) + } + for route, handler := range testDefaultRequestHandlers { + if handlers[route] == nil { + mux.HandleFunc(route, handler) + } + } + + return httptest.NewServer(mux) +} + +func testServerWithSnapshotsEnabled(t *testing.T, enabled bool) *httptest.Server { + skipIfTFENotEnabled(t) + + var serverURL string + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + t.Log(r.Method, r.URL.String()) + + if r.URL.Path == "/state-json" { + t.Log("pretending to be Archivist") + fakeState := states.NewState() + fakeStateFile := statefile.New(fakeState, "boop", 1) + var buf bytes.Buffer + statefile.Write(fakeStateFile, &buf, encryption.StateEncryptionDisabled()) + respBody := buf.Bytes() + w.Header().Set("content-type", "application/json") + w.Header().Set("content-length", strconv.FormatInt(int64(len(respBody)), 10)) + w.WriteHeader(http.StatusOK) + w.Write(respBody) + return + } + + if r.URL.Path == "/api/ping" { + t.Log("pretending to be Ping") + w.WriteHeader(http.StatusNoContent) + return + } + + fakeBody := map[string]any{ + "data": map[string]any{ + "type": "state-versions", + "id": GenerateID("sv-"), + "attributes": map[string]any{ + "hosted-state-download-url": serverURL + "/state-json", + "hosted-state-upload-url": serverURL + "/state-json", + }, + }, + } + fakeBodyRaw, err := json.Marshal(fakeBody) + if err != nil { + t.Fatal(err) + } + + w.Header().Set("content-type", tfe.ContentTypeJSONAPI) + w.Header().Set("content-length", strconv.FormatInt(int64(len(fakeBodyRaw)), 10)) + + switch r.Method { + case "POST": + t.Log("pretending to be Create a State Version") + if enabled { + w.Header().Set("x-terraform-snapshot-interval", "300") + } + w.WriteHeader(http.StatusAccepted) + case "GET": + t.Log("pretending to be Fetch the Current State Version for a Workspace") + if enabled { + w.Header().Set("x-terraform-snapshot-interval", "300") + } + w.WriteHeader(http.StatusOK) + case "PUT": + t.Log("pretending to be Archivist") + default: + t.Fatal("don't know what API operation this was supposed to be") + } + + w.WriteHeader(http.StatusOK) + w.Write(fakeBodyRaw) + })) + serverURL = server.URL + return server +} + +// testDefaultRequestHandlers is a map of request handlers intended to be used in a request +// multiplexer for a test server. A caller may use testServerWithHandlers to start a server with +// this base set of routes, and override a particular route for whatever edge case is being tested. +var testDefaultRequestHandlers = map[string]func(http.ResponseWriter, *http.Request){ + // Respond to service discovery calls. + "/well-known/terraform.json": func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + io.WriteString(w, `{ + "tfe.v2": "/api/v2/", +}`) + }, + + // Respond to service version constraints calls. + "/v1/versions/": func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + io.WriteString(w, fmt.Sprintf(`{ + "service": "%s", + "product": "terraform", + "minimum": "0.1.0", + "maximum": "10.0.0" +}`, path.Base(r.URL.Path))) + }, + + // Respond to pings to get the API version header. + "/api/v2/ping": func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Header().Set("TFP-API-Version", "2.5") + }, + + // Respond to the initial query to read the hashicorp org entitlements. + "/api/v2/organizations/hashicorp/entitlement-set": func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.api+json") + io.WriteString(w, `{ + "data": { + "id": "org-GExadygjSbKP8hsY", + "type": "entitlement-sets", + "attributes": { + "operations": true, + "private-module-registry": true, + "sentinel": true, + "state-storage": true, + "teams": true, + "vcs-integrations": true + } + } +}`) + }, + + // Respond to the initial query to read the no-operations org entitlements. + "/api/v2/organizations/no-operations/entitlement-set": func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.api+json") + io.WriteString(w, `{ + "data": { + "id": "org-ufxa3y8jSbKP8hsT", + "type": "entitlement-sets", + "attributes": { + "operations": false, + "private-module-registry": true, + "sentinel": true, + "state-storage": true, + "teams": true, + "vcs-integrations": true + } + } +}`) + }, + + // All tests that are assumed to pass will use the hashicorp organization, + // so for all other organization requests we will return a 404. + "/api/v2/organizations/": func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(404) + io.WriteString(w, `{ + "errors": [ + { + "status": "404", + "title": "not found" + } + ] +}`) + }, +} + +func mockColorize() *colorstring.Colorize { + colors := make(map[string]string) + for k, v := range colorstring.DefaultColors { + colors[k] = v + } + colors["purple"] = "38;5;57" + + return &colorstring.Colorize{ + Colors: colors, + Disable: false, + Reset: true, + } +} + +func mockSROWorkspace(t *testing.T, b *Cloud, workspaceName string) { + _, err := b.client.Workspaces.Update(context.Background(), "hashicorp", workspaceName, tfe.WorkspaceUpdateOptions{ + StructuredRunOutputEnabled: tfe.Bool(true), + TerraformVersion: tfe.String("1.4.0"), + }) + if err != nil { + t.Fatalf("Error enabling SRO on workspace %s: %v", workspaceName, err) + } +} + +// testDisco returns a *disco.Disco mapping app.terraform.io and +// localhost to a local test server. +func testDisco(s *httptest.Server) *disco.Disco { + services := map[string]interface{}{ + "tfe.v2": fmt.Sprintf("%s/api/v2/", s.URL), + } + d := disco.NewWithCredentialsSource(credsSrc) + d.SetUserAgent(httpclient.OpenTofuUserAgent(version.String())) + + d.ForceHostServices(svchost.Hostname(tfeHost), services) + d.ForceHostServices(svchost.Hostname("localhost"), services) + d.ForceHostServices(svchost.Hostname("nontfe.local"), nil) + return d +} + +type unparsedVariableValue struct { + value string + source tofu.ValueSourceType +} + +func (v *unparsedVariableValue) ParseVariableValue(mode configs.VariableParsingMode) (*tofu.InputValue, tfdiags.Diagnostics) { + return &tofu.InputValue{ + Value: cty.StringVal(v.value), + SourceType: v.source, + }, tfdiags.Diagnostics{} +} + +// testVariable returns a backend.UnparsedVariableValue used for testing. +func testVariables(s tofu.ValueSourceType, vs ...string) map[string]backend.UnparsedVariableValue { + vars := make(map[string]backend.UnparsedVariableValue, len(vs)) + for _, v := range vs { + vars[v] = &unparsedVariableValue{ + value: v, + source: s, + } + } + return vars +} diff --git a/pkg/cloud/tfe_client_mock.go b/pkg/cloud/tfe_client_mock.go new file mode 100644 index 00000000000..472925c0803 --- /dev/null +++ b/pkg/cloud/tfe_client_mock.go @@ -0,0 +1,1946 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cloud + +import ( + "bytes" + "context" + "encoding/base64" + "errors" + "fmt" + "io" + "math/rand" + "os" + "path/filepath" + "strings" + "sync" + "time" + + tfe "github.com/hashicorp/go-tfe" + "github.com/mitchellh/copystructure" + + tfversion "github.com/kubegems/opentofu/version" +) + +type MockClient struct { + Applies *MockApplies + ConfigurationVersions *MockConfigurationVersions + CostEstimates *MockCostEstimates + Organizations *MockOrganizations + Plans *MockPlans + PolicySetOutcomes *MockPolicySetOutcomes + TaskStages *MockTaskStages + RedactedPlans *MockRedactedPlans + PolicyChecks *MockPolicyChecks + Projects *MockProjects + Runs *MockRuns + RunEvents *MockRunEvents + StateVersions *MockStateVersions + StateVersionOutputs *MockStateVersionOutputs + Variables *MockVariables + Workspaces *MockWorkspaces +} + +func NewMockClient() *MockClient { + c := &MockClient{} + c.Applies = newMockApplies(c) + c.ConfigurationVersions = newMockConfigurationVersions(c) + c.CostEstimates = newMockCostEstimates(c) + c.Organizations = newMockOrganizations(c) + c.Plans = newMockPlans(c) + c.TaskStages = newMockTaskStages(c) + c.PolicySetOutcomes = newMockPolicySetOutcomes(c) + c.PolicyChecks = newMockPolicyChecks(c) + c.Projects = newMockProjects(c) + c.Runs = newMockRuns(c) + c.RunEvents = newMockRunEvents(c) + c.StateVersions = newMockStateVersions(c) + c.StateVersionOutputs = newMockStateVersionOutputs(c) + c.Variables = newMockVariables(c) + c.Workspaces = newMockWorkspaces(c) + c.RedactedPlans = newMockRedactedPlans(c) + return c +} + +type MockApplies struct { + client *MockClient + applies map[string]*tfe.Apply + logs map[string]string +} + +func newMockApplies(client *MockClient) *MockApplies { + return &MockApplies{ + client: client, + applies: make(map[string]*tfe.Apply), + logs: make(map[string]string), + } +} + +// create is a helper function to create a mock apply that uses the configured +// working directory to find the logfile. +func (m *MockApplies) create(cvID, workspaceID string) (*tfe.Apply, error) { + c, ok := m.client.ConfigurationVersions.configVersions[cvID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + if c.Speculative { + // Speculative means its plan-only so we don't create a Apply. + return nil, nil + } + + id := GenerateID("apply-") + url := fmt.Sprintf("https://%s/_archivist/%s", tfeHost, id) + + a := &tfe.Apply{ + ID: id, + LogReadURL: url, + Status: tfe.ApplyPending, + } + + w, ok := m.client.Workspaces.workspaceIDs[workspaceID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + if w.AutoApply { + a.Status = tfe.ApplyRunning + } + + m.logs[url] = filepath.Join( + m.client.ConfigurationVersions.uploadPaths[cvID], + w.WorkingDirectory, + "apply.log", + ) + m.applies[a.ID] = a + + return a, nil +} + +func (m *MockConfigurationVersions) CreateForRegistryModule(ctx context.Context, moduleID tfe.RegistryModuleID) (*tfe.ConfigurationVersion, error) { + return &tfe.ConfigurationVersion{}, nil +} + +func (m *MockApplies) Read(ctx context.Context, applyID string) (*tfe.Apply, error) { + a, ok := m.applies[applyID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + // Together with the mockLogReader this allows testing queued runs. + if a.Status == tfe.ApplyRunning { + a.Status = tfe.ApplyFinished + } + return a, nil +} + +func (m *MockApplies) Logs(ctx context.Context, applyID string) (io.Reader, error) { + a, err := m.Read(ctx, applyID) + if err != nil { + return nil, err + } + + logfile, ok := m.logs[a.LogReadURL] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + if _, err := os.Stat(logfile); os.IsNotExist(err) { + return bytes.NewBufferString("logfile does not exist"), nil + } + + logs, err := os.ReadFile(logfile) + if err != nil { + return nil, err + } + + done := func() (bool, error) { + a, err := m.Read(ctx, applyID) + if err != nil { + return false, err + } + if a.Status != tfe.ApplyFinished { + return false, nil + } + return true, nil + } + + return &mockLogReader{ + done: done, + logs: bytes.NewBuffer(logs), + }, nil +} + +type MockConfigurationVersions struct { + client *MockClient + configVersions map[string]*tfe.ConfigurationVersion + uploadPaths map[string]string + uploadURLs map[string]*tfe.ConfigurationVersion +} + +func newMockConfigurationVersions(client *MockClient) *MockConfigurationVersions { + return &MockConfigurationVersions{ + client: client, + configVersions: make(map[string]*tfe.ConfigurationVersion), + uploadPaths: make(map[string]string), + uploadURLs: make(map[string]*tfe.ConfigurationVersion), + } +} + +func (m *MockConfigurationVersions) List(ctx context.Context, workspaceID string, options *tfe.ConfigurationVersionListOptions) (*tfe.ConfigurationVersionList, error) { + cvl := &tfe.ConfigurationVersionList{} + for _, cv := range m.configVersions { + cvl.Items = append(cvl.Items, cv) + } + + cvl.Pagination = &tfe.Pagination{ + CurrentPage: 1, + NextPage: 1, + PreviousPage: 1, + TotalPages: 1, + TotalCount: len(cvl.Items), + } + + return cvl, nil +} + +func (m *MockConfigurationVersions) Create(ctx context.Context, workspaceID string, options tfe.ConfigurationVersionCreateOptions) (*tfe.ConfigurationVersion, error) { + id := GenerateID("cv-") + url := fmt.Sprintf("https://%s/_archivist/%s", tfeHost, id) + + cv := &tfe.ConfigurationVersion{ + ID: id, + Status: tfe.ConfigurationPending, + UploadURL: url, + } + + if options.Provisional != nil && *options.Provisional { + cv.Provisional = true + } + + if options.Speculative != nil && *options.Speculative { + cv.Speculative = true + } + + m.configVersions[cv.ID] = cv + m.uploadURLs[url] = cv + + return cv, nil +} + +func (m *MockConfigurationVersions) Read(ctx context.Context, cvID string) (*tfe.ConfigurationVersion, error) { + cv, ok := m.configVersions[cvID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + return cv, nil +} + +func (m *MockConfigurationVersions) ReadWithOptions(ctx context.Context, cvID string, options *tfe.ConfigurationVersionReadOptions) (*tfe.ConfigurationVersion, error) { + cv, ok := m.configVersions[cvID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + return cv, nil +} + +func (m *MockConfigurationVersions) Upload(ctx context.Context, url, path string) error { + cv, ok := m.uploadURLs[url] + if !ok { + return errors.New("404 not found") + } + m.uploadPaths[cv.ID] = path + cv.Status = tfe.ConfigurationUploaded + + return m.UploadTarGzip(ctx, url, nil) +} + +func (m *MockConfigurationVersions) UploadTarGzip(ctx context.Context, url string, archive io.Reader) error { + return nil +} + +func (m *MockConfigurationVersions) Archive(ctx context.Context, cvID string) error { + panic("not implemented") +} + +func (m *MockConfigurationVersions) Download(ctx context.Context, cvID string) ([]byte, error) { + panic("not implemented") +} + +type MockCostEstimates struct { + client *MockClient + Estimations map[string]*tfe.CostEstimate + logs map[string]string +} + +func newMockCostEstimates(client *MockClient) *MockCostEstimates { + return &MockCostEstimates{ + client: client, + Estimations: make(map[string]*tfe.CostEstimate), + logs: make(map[string]string), + } +} + +// create is a helper function to create a mock cost estimation that uses the +// configured working directory to find the logfile. +func (m *MockCostEstimates) create(cvID, workspaceID string) (*tfe.CostEstimate, error) { + id := GenerateID("ce-") + + ce := &tfe.CostEstimate{ + ID: id, + MatchedResourcesCount: 1, + ResourcesCount: 1, + DeltaMonthlyCost: "0.00", + ProposedMonthlyCost: "0.00", + Status: tfe.CostEstimateFinished, + } + + w, ok := m.client.Workspaces.workspaceIDs[workspaceID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + logfile := filepath.Join( + m.client.ConfigurationVersions.uploadPaths[cvID], + w.WorkingDirectory, + "cost-estimate.log", + ) + + if _, err := os.Stat(logfile); os.IsNotExist(err) { + return nil, nil + } + + m.logs[ce.ID] = logfile + m.Estimations[ce.ID] = ce + + return ce, nil +} + +func (m *MockCostEstimates) Read(ctx context.Context, costEstimateID string) (*tfe.CostEstimate, error) { + ce, ok := m.Estimations[costEstimateID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + return ce, nil +} + +func (m *MockCostEstimates) Logs(ctx context.Context, costEstimateID string) (io.Reader, error) { + ce, ok := m.Estimations[costEstimateID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + logfile, ok := m.logs[ce.ID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + if _, err := os.Stat(logfile); os.IsNotExist(err) { + return bytes.NewBufferString("logfile does not exist"), nil + } + + logs, err := os.ReadFile(logfile) + if err != nil { + return nil, err + } + + ce.Status = tfe.CostEstimateFinished + + return bytes.NewBuffer(logs), nil +} + +type MockOrganizations struct { + client *MockClient + organizations map[string]*tfe.Organization +} + +func newMockOrganizations(client *MockClient) *MockOrganizations { + return &MockOrganizations{ + client: client, + organizations: make(map[string]*tfe.Organization), + } +} + +func (m *MockOrganizations) List(ctx context.Context, options *tfe.OrganizationListOptions) (*tfe.OrganizationList, error) { + orgl := &tfe.OrganizationList{} + for _, org := range m.organizations { + orgl.Items = append(orgl.Items, org) + } + + orgl.Pagination = &tfe.Pagination{ + CurrentPage: 1, + NextPage: 1, + PreviousPage: 1, + TotalPages: 1, + TotalCount: len(orgl.Items), + } + + return orgl, nil +} + +// mockLogReader is a mock logreader that enables testing queued runs. +type mockLogReader struct { + done func() (bool, error) + logs *bytes.Buffer +} + +func (m *mockLogReader) Read(l []byte) (int, error) { + for { + if written, err := m.read(l); err != io.ErrNoProgress { + return written, err + } + time.Sleep(1 * time.Millisecond) + } +} + +func (m *mockLogReader) read(l []byte) (int, error) { + done, err := m.done() + if err != nil { + return 0, err + } + if !done { + return 0, io.ErrNoProgress + } + return m.logs.Read(l) +} + +func (m *MockOrganizations) Create(ctx context.Context, options tfe.OrganizationCreateOptions) (*tfe.Organization, error) { + org := &tfe.Organization{Name: *options.Name} + m.organizations[org.Name] = org + return org, nil +} + +func (m *MockOrganizations) Read(ctx context.Context, name string) (*tfe.Organization, error) { + return m.ReadWithOptions(ctx, name, tfe.OrganizationReadOptions{}) +} + +func (m *MockOrganizations) ReadWithOptions(ctx context.Context, name string, options tfe.OrganizationReadOptions) (*tfe.Organization, error) { + org, ok := m.organizations[name] + if !ok { + return nil, tfe.ErrResourceNotFound + } + return org, nil +} + +func (m *MockOrganizations) Update(ctx context.Context, name string, options tfe.OrganizationUpdateOptions) (*tfe.Organization, error) { + org, ok := m.organizations[name] + if !ok { + return nil, tfe.ErrResourceNotFound + } + org.Name = *options.Name + return org, nil + +} + +func (m *MockOrganizations) Delete(ctx context.Context, name string) error { + delete(m.organizations, name) + return nil +} + +func (m *MockOrganizations) ReadCapacity(ctx context.Context, name string) (*tfe.Capacity, error) { + var pending, running int + for _, r := range m.client.Runs.Runs { + if r.Status == tfe.RunPending { + pending++ + continue + } + running++ + } + return &tfe.Capacity{Pending: pending, Running: running}, nil +} + +func (m *MockOrganizations) ReadEntitlements(ctx context.Context, name string) (*tfe.Entitlements, error) { + return &tfe.Entitlements{ + Operations: true, + PrivateModuleRegistry: true, + Sentinel: true, + StateStorage: true, + Teams: true, + VCSIntegrations: true, + }, nil +} + +func (m *MockOrganizations) ReadRunQueue(ctx context.Context, name string, options tfe.ReadRunQueueOptions) (*tfe.RunQueue, error) { + rq := &tfe.RunQueue{} + + for _, r := range m.client.Runs.Runs { + rq.Items = append(rq.Items, r) + } + + rq.Pagination = &tfe.Pagination{ + CurrentPage: 1, + NextPage: 1, + PreviousPage: 1, + TotalPages: 1, + TotalCount: len(rq.Items), + } + + return rq, nil +} + +type MockRedactedPlans struct { + client *MockClient + redactedPlans map[string][]byte +} + +func newMockRedactedPlans(client *MockClient) *MockRedactedPlans { + return &MockRedactedPlans{ + client: client, + redactedPlans: make(map[string][]byte), + } +} + +func (m *MockRedactedPlans) create(cvID, workspaceID, planID string) error { + w, ok := m.client.Workspaces.workspaceIDs[workspaceID] + if !ok { + return tfe.ErrResourceNotFound + } + + planPath := filepath.Join( + m.client.ConfigurationVersions.uploadPaths[cvID], + w.WorkingDirectory, + "plan-redacted.json", + ) + + redactedPlanFile, err := os.Open(planPath) + if err != nil { + return err + } + + raw, err := io.ReadAll(redactedPlanFile) + if err != nil { + return err + } + + m.redactedPlans[planID] = raw + + return nil +} + +func (m *MockRedactedPlans) Read(ctx context.Context, hostname, token, planID string) ([]byte, error) { + if p, ok := m.redactedPlans[planID]; ok { + return p, nil + } + return nil, tfe.ErrResourceNotFound +} + +type MockPlans struct { + client *MockClient + logs map[string]string + planOutputs map[string][]byte + plans map[string]*tfe.Plan +} + +func newMockPlans(client *MockClient) *MockPlans { + return &MockPlans{ + client: client, + logs: make(map[string]string), + planOutputs: make(map[string][]byte), + plans: make(map[string]*tfe.Plan), + } +} + +// create is a helper function to create a mock plan that uses the configured +// working directory to find the logfile. +func (m *MockPlans) create(cvID, workspaceID string) (*tfe.Plan, error) { + id := GenerateID("plan-") + url := fmt.Sprintf("https://%s/_archivist/%s", tfeHost, id) + + p := &tfe.Plan{ + ID: id, + LogReadURL: url, + Status: tfe.PlanPending, + } + + w, ok := m.client.Workspaces.workspaceIDs[workspaceID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + m.logs[url] = filepath.Join( + m.client.ConfigurationVersions.uploadPaths[cvID], + w.WorkingDirectory, + "plan.log", + ) + + // Try to load unredacted json output, if it exists + outputPath := filepath.Join( + m.client.ConfigurationVersions.uploadPaths[cvID], + w.WorkingDirectory, + "plan-unredacted.json", + ) + if outBytes, err := os.ReadFile(outputPath); err == nil { + m.planOutputs[p.ID] = outBytes + } + + m.plans[p.ID] = p + + return p, nil +} + +func (m *MockPlans) Read(ctx context.Context, planID string) (*tfe.Plan, error) { + p, ok := m.plans[planID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + // Together with the mockLogReader this allows testing queued runs. + if p.Status == tfe.PlanRunning { + p.Status = tfe.PlanFinished + } + return p, nil +} + +func (m *MockPlans) Logs(ctx context.Context, planID string) (io.Reader, error) { + p, err := m.Read(ctx, planID) + if err != nil { + return nil, err + } + + logfile, ok := m.logs[p.LogReadURL] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + if _, err := os.Stat(logfile); os.IsNotExist(err) { + return bytes.NewBufferString("logfile does not exist"), nil + } + + logs, err := os.ReadFile(logfile) + if err != nil { + return nil, err + } + + done := func() (bool, error) { + p, err := m.Read(ctx, planID) + if err != nil { + return false, err + } + if p.Status != tfe.PlanFinished { + return false, nil + } + return true, nil + } + + return &mockLogReader{ + done: done, + logs: bytes.NewBuffer(logs), + }, nil +} + +func (m *MockPlans) ReadJSONOutput(ctx context.Context, planID string) ([]byte, error) { + planOutput, ok := m.planOutputs[planID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + return planOutput, nil +} + +type MockTaskStages struct { + client *MockClient +} + +func newMockTaskStages(client *MockClient) *MockTaskStages { + return &MockTaskStages{ + client: client, + } +} + +func (m *MockTaskStages) Override(ctx context.Context, taskStageID string, options tfe.TaskStageOverrideOptions) (*tfe.TaskStage, error) { + switch taskStageID { + case "ts-err": + return nil, errors.New("test error") + + default: + return nil, nil + } +} + +func (m *MockTaskStages) Read(ctx context.Context, taskStageID string, options *tfe.TaskStageReadOptions) (*tfe.TaskStage, error) { + //TODO implement me + panic("implement me") +} + +func (m *MockTaskStages) List(ctx context.Context, runID string, options *tfe.TaskStageListOptions) (*tfe.TaskStageList, error) { + //TODO implement me + panic("implement me") +} + +type MockPolicySetOutcomes struct { + client *MockClient +} + +func newMockPolicySetOutcomes(client *MockClient) *MockPolicySetOutcomes { + return &MockPolicySetOutcomes{ + client: client, + } +} + +func (m *MockPolicySetOutcomes) List(ctx context.Context, policyEvaluationID string, options *tfe.PolicySetOutcomeListOptions) (*tfe.PolicySetOutcomeList, error) { + switch policyEvaluationID { + case "pol-pass": + return &tfe.PolicySetOutcomeList{ + Items: []*tfe.PolicySetOutcome{ + { + ID: policyEvaluationID, + Outcomes: []tfe.Outcome{ + { + EnforcementLevel: "mandatory", + Query: "data.example.rule", + Status: "passed", + PolicyName: "policy-pass", + Description: "This policy will pass", + }, + }, + Overridable: tfe.Bool(true), + Error: "", + PolicySetName: "policy-set-that-passes", + PolicySetDescription: "This policy set will always pass", + ResultCount: tfe.PolicyResultCount{ + AdvisoryFailed: 0, + MandatoryFailed: 0, + Passed: 1, + Errored: 0, + }, + }, + }, + }, nil + case "pol-fail": + return &tfe.PolicySetOutcomeList{ + Items: []*tfe.PolicySetOutcome{ + { + ID: policyEvaluationID, + Outcomes: []tfe.Outcome{ + { + EnforcementLevel: "mandatory", + Query: "data.example.rule", + Status: "failed", + PolicyName: "policy-fail", + Description: "This policy will fail", + }, + }, + Overridable: tfe.Bool(true), + Error: "", + PolicySetName: "policy-set-that-fails", + PolicySetDescription: "This policy set will always fail", + ResultCount: tfe.PolicyResultCount{ + AdvisoryFailed: 0, + MandatoryFailed: 1, + Passed: 0, + Errored: 0, + }, + }, + }, + }, nil + + case "adv-fail": + return &tfe.PolicySetOutcomeList{ + Items: []*tfe.PolicySetOutcome{ + { + ID: policyEvaluationID, + Outcomes: []tfe.Outcome{ + { + EnforcementLevel: "advisory", + Query: "data.example.rule", + Status: "failed", + PolicyName: "policy-fail", + Description: "This policy will fail", + }, + }, + Overridable: tfe.Bool(true), + Error: "", + PolicySetName: "policy-set-that-fails", + PolicySetDescription: "This policy set will always fail", + ResultCount: tfe.PolicyResultCount{ + AdvisoryFailed: 1, + MandatoryFailed: 0, + Passed: 0, + Errored: 0, + }, + }, + }, + }, nil + default: + return &tfe.PolicySetOutcomeList{ + Items: []*tfe.PolicySetOutcome{ + { + ID: policyEvaluationID, + Outcomes: []tfe.Outcome{ + { + EnforcementLevel: "mandatory", + Query: "data.example.rule", + Status: "passed", + PolicyName: "policy-pass", + Description: "This policy will pass", + }, + }, + Overridable: tfe.Bool(true), + Error: "", + PolicySetName: "policy-set-that-passes", + PolicySetDescription: "This policy set will always pass", + ResultCount: tfe.PolicyResultCount{ + AdvisoryFailed: 0, + MandatoryFailed: 0, + Passed: 1, + Errored: 0, + }, + }, + }, + }, nil + } +} + +func (m *MockPolicySetOutcomes) Read(ctx context.Context, policySetOutcomeID string) (*tfe.PolicySetOutcome, error) { + return nil, nil +} + +type MockPolicyChecks struct { + client *MockClient + checks map[string]*tfe.PolicyCheck + logs map[string]string +} + +func newMockPolicyChecks(client *MockClient) *MockPolicyChecks { + return &MockPolicyChecks{ + client: client, + checks: make(map[string]*tfe.PolicyCheck), + logs: make(map[string]string), + } +} + +// create is a helper function to create a mock policy check that uses the +// configured working directory to find the logfile. +func (m *MockPolicyChecks) create(cvID, workspaceID string) (*tfe.PolicyCheck, error) { + id := GenerateID("pc-") + + pc := &tfe.PolicyCheck{ + ID: id, + Actions: &tfe.PolicyActions{}, + Permissions: &tfe.PolicyPermissions{}, + Scope: tfe.PolicyScopeOrganization, + Status: tfe.PolicyPending, + } + + w, ok := m.client.Workspaces.workspaceIDs[workspaceID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + logfile := filepath.Join( + m.client.ConfigurationVersions.uploadPaths[cvID], + w.WorkingDirectory, + "policy.log", + ) + + if _, err := os.Stat(logfile); os.IsNotExist(err) { + return nil, nil + } + + m.logs[pc.ID] = logfile + m.checks[pc.ID] = pc + + return pc, nil +} + +func (m *MockPolicyChecks) List(ctx context.Context, runID string, options *tfe.PolicyCheckListOptions) (*tfe.PolicyCheckList, error) { + _, ok := m.client.Runs.Runs[runID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + pcl := &tfe.PolicyCheckList{} + for _, pc := range m.checks { + pcl.Items = append(pcl.Items, pc) + } + + pcl.Pagination = &tfe.Pagination{ + CurrentPage: 1, + NextPage: 1, + PreviousPage: 1, + TotalPages: 1, + TotalCount: len(pcl.Items), + } + + return pcl, nil +} + +func (m *MockPolicyChecks) Read(ctx context.Context, policyCheckID string) (*tfe.PolicyCheck, error) { + pc, ok := m.checks[policyCheckID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + logfile, ok := m.logs[pc.ID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + if _, err := os.Stat(logfile); os.IsNotExist(err) { + return nil, fmt.Errorf("logfile does not exist") + } + + logs, err := os.ReadFile(logfile) + if err != nil { + return nil, err + } + + switch { + case bytes.Contains(logs, []byte("Sentinel Result: true")): + pc.Status = tfe.PolicyPasses + case bytes.Contains(logs, []byte("Sentinel Result: false")): + switch { + case bytes.Contains(logs, []byte("hard-mandatory")): + pc.Status = tfe.PolicyHardFailed + case bytes.Contains(logs, []byte("soft-mandatory")): + pc.Actions.IsOverridable = true + pc.Permissions.CanOverride = true + pc.Status = tfe.PolicySoftFailed + } + default: + // As this is an unexpected state, we say the policy errored. + pc.Status = tfe.PolicyErrored + } + + return pc, nil +} + +func (m *MockPolicyChecks) Override(ctx context.Context, policyCheckID string) (*tfe.PolicyCheck, error) { + pc, ok := m.checks[policyCheckID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + pc.Status = tfe.PolicyOverridden + return pc, nil +} + +func (m *MockPolicyChecks) Logs(ctx context.Context, policyCheckID string) (io.Reader, error) { + pc, ok := m.checks[policyCheckID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + logfile, ok := m.logs[pc.ID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + if _, err := os.Stat(logfile); os.IsNotExist(err) { + return bytes.NewBufferString("logfile does not exist"), nil + } + + logs, err := os.ReadFile(logfile) + if err != nil { + return nil, err + } + + switch { + case bytes.Contains(logs, []byte("Sentinel Result: true")): + pc.Status = tfe.PolicyPasses + case bytes.Contains(logs, []byte("Sentinel Result: false")): + switch { + case bytes.Contains(logs, []byte("hard-mandatory")): + pc.Status = tfe.PolicyHardFailed + case bytes.Contains(logs, []byte("soft-mandatory")): + pc.Actions.IsOverridable = true + pc.Permissions.CanOverride = true + pc.Status = tfe.PolicySoftFailed + } + default: + // As this is an unexpected state, we say the policy errored. + pc.Status = tfe.PolicyErrored + } + + return bytes.NewBuffer(logs), nil +} + +type MockProjects struct { + client *MockClient + projects map[string]*tfe.Project +} + +func newMockProjects(client *MockClient) *MockProjects { + return &MockProjects{ + client: client, + projects: make(map[string]*tfe.Project), + } +} + +func (m *MockProjects) Create(ctx context.Context, organization string, options tfe.ProjectCreateOptions) (*tfe.Project, error) { + id := GenerateID("prj-") + + p := &tfe.Project{ + ID: id, + Name: options.Name, + } + + m.projects[p.ID] = p + + return p, nil +} + +func (m *MockProjects) List(ctx context.Context, organization string, options *tfe.ProjectListOptions) (*tfe.ProjectList, error) { + pl := &tfe.ProjectList{} + + for _, project := range m.projects { + pc, err := copystructure.Copy(project) + if err != nil { + panic(err) + } + pl.Items = append(pl.Items, pc.(*tfe.Project)) + } + + pl.Pagination = &tfe.Pagination{ + CurrentPage: 1, + NextPage: 1, + PreviousPage: 1, + TotalPages: 1, + TotalCount: len(pl.Items), + } + + return pl, nil +} + +func (m *MockProjects) Read(ctx context.Context, projectID string) (*tfe.Project, error) { + p, ok := m.projects[projectID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + // we must return a copy for the client + pc, err := copystructure.Copy(p) + if err != nil { + panic(err) + } + + return pc.(*tfe.Project), nil +} + +func (m *MockProjects) Update(ctx context.Context, projectID string, options tfe.ProjectUpdateOptions) (*tfe.Project, error) { + p, ok := m.projects[projectID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + p.Name = *options.Name + + // we must return a copy for the client + pc, err := copystructure.Copy(p) + if err != nil { + panic(err) + } + + return pc.(*tfe.Project), nil +} + +func (m *MockProjects) Delete(ctx context.Context, projectID string) error { + var projectToDelete *tfe.Project + for _, p := range m.projects { + if p.ID == projectID { + projectToDelete = p + break + } + } + if projectToDelete == nil { + return tfe.ErrResourceNotFound + } + + delete(m.projects, projectToDelete.Name) + + return nil +} + +type MockRuns struct { + sync.Mutex + client *MockClient + Runs map[string]*tfe.Run + workspaces map[string][]*tfe.Run + + // If ModifyNewRun is non-nil, the create method will call it just before + // saving a new run in the runs map, so that a calling test can mimic + // side-effects that a real server might apply in certain situations. + ModifyNewRun func(client *MockClient, options tfe.RunCreateOptions, run *tfe.Run) +} + +func newMockRuns(client *MockClient) *MockRuns { + return &MockRuns{ + client: client, + Runs: make(map[string]*tfe.Run), + workspaces: make(map[string][]*tfe.Run), + } +} + +func (m *MockRuns) List(ctx context.Context, workspaceID string, options *tfe.RunListOptions) (*tfe.RunList, error) { + m.Lock() + defer m.Unlock() + + w, ok := m.client.Workspaces.workspaceIDs[workspaceID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + rl := &tfe.RunList{} + for _, run := range m.workspaces[w.ID] { + rc, err := copystructure.Copy(run) + if err != nil { + panic(err) + } + rl.Items = append(rl.Items, rc.(*tfe.Run)) + } + + rl.Pagination = &tfe.Pagination{ + CurrentPage: 1, + NextPage: 1, + PreviousPage: 1, + TotalPages: 1, + TotalCount: len(rl.Items), + } + + return rl, nil +} + +func (m *MockRuns) Create(ctx context.Context, options tfe.RunCreateOptions) (*tfe.Run, error) { + m.Lock() + defer m.Unlock() + + a, err := m.client.Applies.create(options.ConfigurationVersion.ID, options.Workspace.ID) + if err != nil { + return nil, err + } + + ce, err := m.client.CostEstimates.create(options.ConfigurationVersion.ID, options.Workspace.ID) + if err != nil { + return nil, err + } + + p, err := m.client.Plans.create(options.ConfigurationVersion.ID, options.Workspace.ID) + if err != nil { + return nil, err + } + + pc, err := m.client.PolicyChecks.create(options.ConfigurationVersion.ID, options.Workspace.ID) + if err != nil { + return nil, err + } + + r := &tfe.Run{ + ID: GenerateID("run-"), + Actions: &tfe.RunActions{IsCancelable: true}, + Apply: a, + CostEstimate: ce, + HasChanges: false, + Permissions: &tfe.RunPermissions{}, + Plan: p, + ReplaceAddrs: options.ReplaceAddrs, + Status: tfe.RunPending, + TargetAddrs: options.TargetAddrs, + AllowConfigGeneration: options.AllowConfigGeneration, + } + + if options.Message != nil { + r.Message = *options.Message + } + + if pc != nil { + r.PolicyChecks = []*tfe.PolicyCheck{pc} + } + + if options.IsDestroy != nil { + r.IsDestroy = *options.IsDestroy + } + + if options.Refresh != nil { + r.Refresh = *options.Refresh + } + + if options.RefreshOnly != nil { + r.RefreshOnly = *options.RefreshOnly + } + + if options.AllowConfigGeneration != nil && *options.AllowConfigGeneration { + r.Plan.GeneratedConfiguration = true + } + + w, ok := m.client.Workspaces.workspaceIDs[options.Workspace.ID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + if w.CurrentRun == nil { + w.CurrentRun = r + } + + r.Workspace = &tfe.Workspace{ + ID: w.ID, + StructuredRunOutputEnabled: w.StructuredRunOutputEnabled, + TerraformVersion: w.TerraformVersion, + } + + if w.StructuredRunOutputEnabled { + err := m.client.RedactedPlans.create(options.ConfigurationVersion.ID, options.Workspace.ID, p.ID) + if err != nil { + return nil, err + } + } + + if m.ModifyNewRun != nil { + // caller-provided callback may modify the run in-place to mimic + // side-effects that a real server might take in some situations. + m.ModifyNewRun(m.client, options, r) + } + + m.Runs[r.ID] = r + m.workspaces[options.Workspace.ID] = append(m.workspaces[options.Workspace.ID], r) + + return r, nil +} + +func (m *MockRuns) Read(ctx context.Context, runID string) (*tfe.Run, error) { + return m.ReadWithOptions(ctx, runID, nil) +} + +func (m *MockRuns) ReadWithOptions(ctx context.Context, runID string, options *tfe.RunReadOptions) (*tfe.Run, error) { + m.Lock() + defer m.Unlock() + + r, ok := m.Runs[runID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + pending := false + for _, r := range m.Runs { + if r.ID != runID && r.Status == tfe.RunPending { + pending = true + break + } + } + + if !pending && r.Status == tfe.RunPending { + // Only update the status if there are no other pending runs. + r.Status = tfe.RunPlanning + r.Plan.Status = tfe.PlanRunning + } + + logs, _ := os.ReadFile(m.client.Plans.logs[r.Plan.LogReadURL]) + if (r.Status == tfe.RunPlanning || r.Status == tfe.RunPlannedAndSaved) && r.Plan.Status == tfe.PlanFinished { + hasChanges := r.IsDestroy || + bytes.Contains(logs, []byte("1 to add")) || + bytes.Contains(logs, []byte("1 to change")) || + bytes.Contains(logs, []byte("1 to import")) + if hasChanges { + r.Actions.IsCancelable = false + r.Actions.IsConfirmable = true + r.HasChanges = true + r.Plan.HasChanges = true + r.Permissions.CanApply = true + } + + hasError := bytes.Contains(logs, []byte("null_resource.foo: 1 error")) || + bytes.Contains(logs, []byte("Error: Unsupported block type")) || + bytes.Contains(logs, []byte("Error: Conflicting configuration arguments")) + if hasError { + r.Actions.IsCancelable = false + r.HasChanges = false + r.Status = tfe.RunErrored + } + } + + // we must return a copy for the client + rc, err := copystructure.Copy(r) + if err != nil { + panic(err) + } + r = rc.(*tfe.Run) + + // After copying, handle includes... or at least, any includes we're known to rely on. + if options != nil { + for _, n := range options.Include { + switch n { + case tfe.RunWorkspace: + ws, ok := m.client.Workspaces.workspaceIDs[r.Workspace.ID] + if ok { + r.Workspace = ws + } + } + } + } + + return r, nil +} + +func (m *MockRuns) Apply(ctx context.Context, runID string, options tfe.RunApplyOptions) error { + m.Lock() + defer m.Unlock() + + r, ok := m.Runs[runID] + if !ok { + return tfe.ErrResourceNotFound + } + if r.Status != tfe.RunPending { + // Only update the status if the run is not pending anymore. + r.Status = tfe.RunApplying + r.Actions.IsConfirmable = false + r.Apply.Status = tfe.ApplyRunning + } + return nil +} + +func (m *MockRuns) Cancel(ctx context.Context, runID string, options tfe.RunCancelOptions) error { + panic("not implemented") +} + +func (m *MockRuns) ForceCancel(ctx context.Context, runID string, options tfe.RunForceCancelOptions) error { + panic("not implemented") +} + +func (m *MockRuns) ForceExecute(ctx context.Context, runID string) error { + panic("implement me") +} + +func (m *MockRuns) Discard(ctx context.Context, runID string, options tfe.RunDiscardOptions) error { + m.Lock() + defer m.Unlock() + + r, ok := m.Runs[runID] + if !ok { + return tfe.ErrResourceNotFound + } + r.Status = tfe.RunDiscarded + r.Actions.IsConfirmable = false + return nil +} + +type MockRunEvents struct{} + +func newMockRunEvents(_ *MockClient) *MockRunEvents { + return &MockRunEvents{} +} + +// List all the runs events of the given run. +func (m *MockRunEvents) List(ctx context.Context, runID string, options *tfe.RunEventListOptions) (*tfe.RunEventList, error) { + return &tfe.RunEventList{ + Items: []*tfe.RunEvent{}, + }, nil +} + +func (m *MockRunEvents) Read(ctx context.Context, runEventID string) (*tfe.RunEvent, error) { + return m.ReadWithOptions(ctx, runEventID, nil) +} + +func (m *MockRunEvents) ReadWithOptions(ctx context.Context, runEventID string, options *tfe.RunEventReadOptions) (*tfe.RunEvent, error) { + return &tfe.RunEvent{ + ID: GenerateID("re-"), + Action: "created", + CreatedAt: time.Now(), + }, nil +} + +type MockStateVersions struct { + client *MockClient + states map[string][]byte + stateVersions map[string]*tfe.StateVersion + workspaces map[string][]string + outputStates map[string][]byte +} + +func newMockStateVersions(client *MockClient) *MockStateVersions { + return &MockStateVersions{ + client: client, + states: make(map[string][]byte), + stateVersions: make(map[string]*tfe.StateVersion), + workspaces: make(map[string][]string), + outputStates: make(map[string][]byte), + } +} + +func (m *MockStateVersions) List(ctx context.Context, options *tfe.StateVersionListOptions) (*tfe.StateVersionList, error) { + svl := &tfe.StateVersionList{} + for _, sv := range m.stateVersions { + svl.Items = append(svl.Items, sv) + } + + svl.Pagination = &tfe.Pagination{ + CurrentPage: 1, + NextPage: 1, + PreviousPage: 1, + TotalPages: 1, + TotalCount: len(svl.Items), + } + + return svl, nil +} + +func (m *MockStateVersions) Create(ctx context.Context, workspaceID string, options tfe.StateVersionCreateOptions) (*tfe.StateVersion, error) { + id := GenerateID("sv-") + runID := os.Getenv("TFE_RUN_ID") + url := fmt.Sprintf("https://%s/_archivist/%s", tfeHost, id) + + if runID != "" && (options.Run == nil || runID != options.Run.ID) { + return nil, fmt.Errorf("option.Run.ID does not contain the ID exported by TFE_RUN_ID") + } + + sv := &tfe.StateVersion{ + ID: id, + DownloadURL: url, + UploadURL: fmt.Sprintf("/_archivist/upload/%s", id), + Serial: *options.Serial, + } + + state, err := base64.StdEncoding.DecodeString(*options.State) + if err != nil { + return nil, err + } + m.states[sv.DownloadURL] = state + m.outputStates[sv.ID] = []byte(*options.JSONStateOutputs) + m.stateVersions[sv.ID] = sv + m.workspaces[workspaceID] = append(m.workspaces[workspaceID], sv.ID) + + return sv, nil +} + +func (m *MockStateVersions) Upload(ctx context.Context, workspaceID string, options tfe.StateVersionUploadOptions) (*tfe.StateVersion, error) { + createOptions := options.StateVersionCreateOptions + createOptions.State = tfe.String(base64.StdEncoding.EncodeToString(options.RawState)) + + return m.Create(ctx, workspaceID, createOptions) +} + +func (m *MockStateVersions) Read(ctx context.Context, svID string) (*tfe.StateVersion, error) { + return m.ReadWithOptions(ctx, svID, nil) +} + +func (m *MockStateVersions) ReadWithOptions(ctx context.Context, svID string, options *tfe.StateVersionReadOptions) (*tfe.StateVersion, error) { + sv, ok := m.stateVersions[svID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + return sv, nil +} + +func (m *MockStateVersions) ReadCurrent(ctx context.Context, workspaceID string) (*tfe.StateVersion, error) { + return m.ReadCurrentWithOptions(ctx, workspaceID, nil) +} + +func (m *MockStateVersions) ReadCurrentWithOptions(ctx context.Context, workspaceID string, options *tfe.StateVersionCurrentOptions) (*tfe.StateVersion, error) { + w, ok := m.client.Workspaces.workspaceIDs[workspaceID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + svs, ok := m.workspaces[w.ID] + if !ok || len(svs) == 0 { + return nil, tfe.ErrResourceNotFound + } + + sv, ok := m.stateVersions[svs[len(svs)-1]] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + return sv, nil +} + +func (m *MockStateVersions) Download(ctx context.Context, url string) ([]byte, error) { + state, ok := m.states[url] + if !ok { + return nil, tfe.ErrResourceNotFound + } + return state, nil +} + +func (m *MockStateVersions) ListOutputs(ctx context.Context, svID string, options *tfe.StateVersionOutputsListOptions) (*tfe.StateVersionOutputsList, error) { + panic("not implemented") +} + +type MockStateVersionOutputs struct { + client *MockClient + outputs map[string]*tfe.StateVersionOutput +} + +func newMockStateVersionOutputs(client *MockClient) *MockStateVersionOutputs { + return &MockStateVersionOutputs{ + client: client, + outputs: make(map[string]*tfe.StateVersionOutput), + } +} + +// This is a helper function in order to create mocks to be read later +func (m *MockStateVersionOutputs) create(id string, svo *tfe.StateVersionOutput) { + m.outputs[id] = svo +} + +func (m *MockStateVersionOutputs) Read(ctx context.Context, outputID string) (*tfe.StateVersionOutput, error) { + result, ok := m.outputs[outputID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + return result, nil +} + +func (m *MockStateVersionOutputs) ReadCurrent(ctx context.Context, workspaceID string) (*tfe.StateVersionOutputsList, error) { + svl := &tfe.StateVersionOutputsList{} + for _, sv := range m.outputs { + svl.Items = append(svl.Items, sv) + } + + svl.Pagination = &tfe.Pagination{ + CurrentPage: 1, + NextPage: 1, + PreviousPage: 1, + TotalPages: 1, + TotalCount: len(svl.Items), + } + + return svl, nil +} + +type MockVariables struct { + client *MockClient + workspaces map[string]*tfe.VariableList +} + +var _ tfe.Variables = (*MockVariables)(nil) + +func newMockVariables(client *MockClient) *MockVariables { + return &MockVariables{ + client: client, + workspaces: make(map[string]*tfe.VariableList), + } +} + +func (m *MockVariables) List(ctx context.Context, workspaceID string, options *tfe.VariableListOptions) (*tfe.VariableList, error) { + vl := m.workspaces[workspaceID] + return vl, nil +} + +func (m *MockVariables) Create(ctx context.Context, workspaceID string, options tfe.VariableCreateOptions) (*tfe.Variable, error) { + v := &tfe.Variable{ + ID: GenerateID("var-"), + Key: *options.Key, + Category: *options.Category, + } + if options.Value != nil { + v.Value = *options.Value + } + if options.HCL != nil { + v.HCL = *options.HCL + } + if options.Sensitive != nil { + v.Sensitive = *options.Sensitive + } + + workspace := workspaceID + + if m.workspaces[workspace] == nil { + m.workspaces[workspace] = &tfe.VariableList{} + } + + vl := m.workspaces[workspace] + vl.Items = append(vl.Items, v) + + return v, nil +} + +func (m *MockVariables) Read(ctx context.Context, workspaceID string, variableID string) (*tfe.Variable, error) { + panic("not implemented") +} + +func (m *MockVariables) Update(ctx context.Context, workspaceID string, variableID string, options tfe.VariableUpdateOptions) (*tfe.Variable, error) { + panic("not implemented") +} + +func (m *MockVariables) Delete(ctx context.Context, workspaceID string, variableID string) error { + panic("not implemented") +} + +type MockWorkspaces struct { + client *MockClient + workspaceIDs map[string]*tfe.Workspace + workspaceNames map[string]*tfe.Workspace +} + +func newMockWorkspaces(client *MockClient) *MockWorkspaces { + return &MockWorkspaces{ + client: client, + workspaceIDs: make(map[string]*tfe.Workspace), + workspaceNames: make(map[string]*tfe.Workspace), + } +} + +func (m *MockWorkspaces) List(ctx context.Context, organization string, options *tfe.WorkspaceListOptions) (*tfe.WorkspaceList, error) { + wl := &tfe.WorkspaceList{} + // Get all the workspaces that match the Search value + searchValue := "" + var ws []*tfe.Workspace + var tags []string + + if options != nil { + if len(options.Search) > 0 { + searchValue = options.Search + } + if len(options.Tags) > 0 { + tags = strings.Split(options.Tags, ",") + } + } + + for _, w := range m.workspaceIDs { + wTags := make(map[string]struct{}) + for _, wTag := range w.Tags { + wTags[wTag.Name] = struct{}{} + } + + if strings.Contains(w.Name, searchValue) { + tagsSatisfied := true + for _, tag := range tags { + if _, ok := wTags[tag]; !ok { + tagsSatisfied = false + } + } + if tagsSatisfied { + ws = append(ws, w) + } + } + } + + // Return an empty result if we have no matches. + if len(ws) == 0 { + wl.Pagination = &tfe.Pagination{ + CurrentPage: 1, + } + return wl, nil + } + + numPages := (len(ws) / 20) + 1 + currentPage := 1 + if options != nil { + if options.PageNumber != 0 { + currentPage = options.PageNumber + } + } + previousPage := currentPage - 1 + nextPage := currentPage + 1 + + for i := ((currentPage - 1) * 20); i < ((currentPage-1)*20)+20; i++ { + if i > (len(ws) - 1) { + break + } + wl.Items = append(wl.Items, ws[i]) + } + + wl.Pagination = &tfe.Pagination{ + CurrentPage: currentPage, + NextPage: nextPage, + PreviousPage: previousPage, + TotalPages: numPages, + TotalCount: len(wl.Items), + } + + return wl, nil +} + +func (m *MockWorkspaces) Create(ctx context.Context, organization string, options tfe.WorkspaceCreateOptions) (*tfe.Workspace, error) { + // for TestCloud_setUnavailableTerraformVersion + if *options.Name == "unavailable-terraform-version" && options.TerraformVersion != nil { + return nil, fmt.Errorf("requested Terraform version not available in this TFC instance") + } + if strings.HasSuffix(*options.Name, "no-operations") { + options.Operations = tfe.Bool(false) + options.ExecutionMode = tfe.String("local") + } else if options.Operations == nil { + options.Operations = tfe.Bool(true) + options.ExecutionMode = tfe.String("remote") + } + w := &tfe.Workspace{ + ID: GenerateID("ws-"), + Name: *options.Name, + ExecutionMode: *options.ExecutionMode, + Operations: *options.Operations, + StructuredRunOutputEnabled: false, + Permissions: &tfe.WorkspacePermissions{ + CanQueueApply: true, + CanQueueRun: true, + CanForceDelete: tfe.Bool(true), + }, + Organization: &tfe.Organization{ + Name: organization, + }, + } + if options.Project != nil { + w.Project = options.Project + } + if options.AutoApply != nil { + w.AutoApply = *options.AutoApply + } + if options.VCSRepo != nil { + w.VCSRepo = &tfe.VCSRepo{} + } + + if options.TerraformVersion != nil { + w.TerraformVersion = *options.TerraformVersion + } else { + w.TerraformVersion = tfversion.String() + } + + var tags []*tfe.Tag + for _, tag := range options.Tags { + tags = append(tags, tag) + w.TagNames = append(w.TagNames, tag.Name) + } + w.Tags = tags + m.workspaceIDs[w.ID] = w + m.workspaceNames[w.Name] = w + return w, nil +} + +func (m *MockWorkspaces) Read(ctx context.Context, organization, workspace string) (*tfe.Workspace, error) { + // custom error for TestCloud_plan500 in backend_plan_test.go + if workspace == "network-error" { + return nil, errors.New("I'm a little teacup") + } + + w, ok := m.workspaceNames[workspace] + if !ok { + return nil, tfe.ErrResourceNotFound + } + return w, nil +} + +func (m *MockWorkspaces) ReadByID(ctx context.Context, workspaceID string) (*tfe.Workspace, error) { + w, ok := m.workspaceIDs[workspaceID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + return w, nil +} + +func (m *MockWorkspaces) ReadWithOptions(ctx context.Context, organization string, workspace string, options *tfe.WorkspaceReadOptions) (*tfe.Workspace, error) { + panic("not implemented") +} + +func (m *MockWorkspaces) ReadByIDWithOptions(ctx context.Context, workspaceID string, options *tfe.WorkspaceReadOptions) (*tfe.Workspace, error) { + w, ok := m.workspaceIDs[workspaceID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + return w, nil +} + +func (m *MockWorkspaces) Update(ctx context.Context, organization, workspace string, options tfe.WorkspaceUpdateOptions) (*tfe.Workspace, error) { + w, ok := m.workspaceNames[workspace] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + err := updateMockWorkspaceAttributes(w, options) + if err != nil { + return nil, err + } + + delete(m.workspaceNames, workspace) + m.workspaceNames[w.Name] = w + + return w, nil +} + +func (m *MockWorkspaces) UpdateByID(ctx context.Context, workspaceID string, options tfe.WorkspaceUpdateOptions) (*tfe.Workspace, error) { + w, ok := m.workspaceIDs[workspaceID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + originalName := w.Name + err := updateMockWorkspaceAttributes(w, options) + if err != nil { + return nil, err + } + + delete(m.workspaceNames, originalName) + m.workspaceNames[w.Name] = w + + return w, nil +} + +func updateMockWorkspaceAttributes(w *tfe.Workspace, options tfe.WorkspaceUpdateOptions) error { + // for TestCloud_setUnavailableTerraformVersion + if w.Name == "unavailable-terraform-version" && options.TerraformVersion != nil { + return fmt.Errorf("requested Terraform version not available in this TFC instance") + } + + if options.Operations != nil { + w.Operations = *options.Operations + } + if options.ExecutionMode != nil { + w.ExecutionMode = *options.ExecutionMode + } + if options.Name != nil { + w.Name = *options.Name + } + if options.TerraformVersion != nil { + w.TerraformVersion = *options.TerraformVersion + } + if options.WorkingDirectory != nil { + w.WorkingDirectory = *options.WorkingDirectory + } + + if options.StructuredRunOutputEnabled != nil { + w.StructuredRunOutputEnabled = *options.StructuredRunOutputEnabled + } + + return nil +} + +func (m *MockWorkspaces) Delete(ctx context.Context, organization, workspace string) error { + if w, ok := m.workspaceNames[workspace]; ok { + delete(m.workspaceIDs, w.ID) + } + delete(m.workspaceNames, workspace) + return nil +} + +func (m *MockWorkspaces) DeleteByID(ctx context.Context, workspaceID string) error { + if w, ok := m.workspaceIDs[workspaceID]; ok { + delete(m.workspaceIDs, w.Name) + } + delete(m.workspaceIDs, workspaceID) + return nil +} + +func (m *MockWorkspaces) SafeDelete(ctx context.Context, organization, workspace string) error { + w, ok := m.client.Workspaces.workspaceNames[workspace] + + if !ok { + return tfe.ErrResourceNotFound + } + + if w.Locked { + return errors.New("cannot safe delete locked workspace") + } + + if w.ResourceCount > 0 { + return fmt.Errorf("cannot safe delete workspace with %d resources", w.ResourceCount) + } + + return m.Delete(ctx, organization, workspace) +} + +func (m *MockWorkspaces) SafeDeleteByID(ctx context.Context, workspaceID string) error { + w, ok := m.client.Workspaces.workspaceIDs[workspaceID] + if !ok { + return tfe.ErrResourceNotFound + } + + if w.Locked { + return errors.New("cannot safe delete locked workspace") + } + + if w.ResourceCount > 0 { + return fmt.Errorf("cannot safe delete workspace with %d resources", w.ResourceCount) + } + + return m.DeleteByID(ctx, workspaceID) +} + +func (m *MockWorkspaces) RemoveVCSConnection(ctx context.Context, organization, workspace string) (*tfe.Workspace, error) { + w, ok := m.workspaceNames[workspace] + if !ok { + return nil, tfe.ErrResourceNotFound + } + w.VCSRepo = nil + return w, nil +} + +func (m *MockWorkspaces) RemoveVCSConnectionByID(ctx context.Context, workspaceID string) (*tfe.Workspace, error) { + w, ok := m.workspaceIDs[workspaceID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + w.VCSRepo = nil + return w, nil +} + +func (m *MockWorkspaces) Lock(ctx context.Context, workspaceID string, options tfe.WorkspaceLockOptions) (*tfe.Workspace, error) { + w, ok := m.workspaceIDs[workspaceID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + if w.Locked { + return nil, tfe.ErrWorkspaceLocked + } + w.Locked = true + return w, nil +} + +func (m *MockWorkspaces) Unlock(ctx context.Context, workspaceID string) (*tfe.Workspace, error) { + w, ok := m.workspaceIDs[workspaceID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + if !w.Locked { + return nil, tfe.ErrWorkspaceNotLocked + } + w.Locked = false + return w, nil +} + +func (m *MockWorkspaces) ForceUnlock(ctx context.Context, workspaceID string) (*tfe.Workspace, error) { + w, ok := m.workspaceIDs[workspaceID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + if !w.Locked { + return nil, tfe.ErrWorkspaceNotLocked + } + w.Locked = false + return w, nil +} + +func (m *MockWorkspaces) AssignSSHKey(ctx context.Context, workspaceID string, options tfe.WorkspaceAssignSSHKeyOptions) (*tfe.Workspace, error) { + panic("not implemented") +} + +func (m *MockWorkspaces) UnassignSSHKey(ctx context.Context, workspaceID string) (*tfe.Workspace, error) { + panic("not implemented") +} + +func (m *MockWorkspaces) ListRemoteStateConsumers(ctx context.Context, workspaceID string, options *tfe.RemoteStateConsumersListOptions) (*tfe.WorkspaceList, error) { + panic("not implemented") +} + +func (m *MockWorkspaces) AddRemoteStateConsumers(ctx context.Context, workspaceID string, options tfe.WorkspaceAddRemoteStateConsumersOptions) error { + panic("not implemented") +} + +func (m *MockWorkspaces) RemoveRemoteStateConsumers(ctx context.Context, workspaceID string, options tfe.WorkspaceRemoveRemoteStateConsumersOptions) error { + panic("not implemented") +} + +func (m *MockWorkspaces) UpdateRemoteStateConsumers(ctx context.Context, workspaceID string, options tfe.WorkspaceUpdateRemoteStateConsumersOptions) error { + panic("not implemented") +} + +func (m *MockWorkspaces) Readme(ctx context.Context, workspaceID string) (io.Reader, error) { + panic("not implemented") +} + +func (m *MockWorkspaces) ListTags(ctx context.Context, workspaceID string, options *tfe.WorkspaceTagListOptions) (*tfe.TagList, error) { + panic("not implemented") +} + +func (m *MockWorkspaces) AddTags(ctx context.Context, workspaceID string, options tfe.WorkspaceAddTagsOptions) error { + return nil +} + +func (m *MockWorkspaces) RemoveTags(ctx context.Context, workspaceID string, options tfe.WorkspaceRemoveTagsOptions) error { + panic("not implemented") +} + +const alphanumeric = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" + +func GenerateID(s string) string { + b := make([]byte, 16) + for i := range b { + b[i] = alphanumeric[rand.Intn(len(alphanumeric))] + } + return s + string(b) +} diff --git a/pkg/cloudplugin/cloudplugin1/grpc_client.go b/pkg/cloudplugin/cloudplugin1/grpc_client.go new file mode 100644 index 00000000000..62ce164a8db --- /dev/null +++ b/pkg/cloudplugin/cloudplugin1/grpc_client.go @@ -0,0 +1,77 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cloudplugin1 + +import ( + "context" + "fmt" + "io" + "log" + + "github.com/kubegems/opentofu/pkg/cloudplugin" + "github.com/kubegems/opentofu/pkg/cloudplugin/cloudproto1" +) + +// GRPCCloudClient is the client interface for interacting with terraform-cloudplugin +type GRPCCloudClient struct { + client cloudproto1.CommandServiceClient + context context.Context +} + +// Proof that GRPCCloudClient fulfills the go-plugin interface +var _ cloudplugin.Cloud1 = GRPCCloudClient{} + +// Execute sends the client Execute request and waits for the plugin to return +// an exit code response before returning +func (c GRPCCloudClient) Execute(args []string, stdout, stderr io.Writer) int { + client, err := c.client.Execute(c.context, &cloudproto1.CommandRequest{ + Args: args, + }) + + if err != nil { + fmt.Fprint(stderr, err.Error()) + return 1 + } + + for { + // cloudplugin streams output as multiple CommandResponse value. Each + // value will either contain stdout bytes, stderr bytes, or an exit code. + response, err := client.Recv() + if err == io.EOF { + log.Print("[DEBUG] received EOF from cloudplugin") + break + } else if err != nil { + fmt.Fprintf(stderr, "Failed to receive command response from cloudplugin: %s", err) + return 1 + } + + if bytes := response.GetStdout(); len(bytes) > 0 { + _, err := fmt.Fprint(stdout, string(bytes)) + if err != nil { + log.Printf("[ERROR] Failed to write cloudplugin output to stdout: %s", err) + return 1 + } + } else if bytes := response.GetStderr(); len(bytes) > 0 { + _, err := fmt.Fprint(stderr, string(bytes)) + if err != nil { + log.Printf("[ERROR] Failed to write cloudplugin output to stderr: %s", err) + return 1 + } + } else { + exitCode := response.GetExitCode() + log.Printf("[TRACE] received exit code: %d", exitCode) + if exitCode < 0 || exitCode > 255 { + log.Printf("[ERROR] cloudplugin returned an invalid error code %d", exitCode) + return 255 + } + return int(exitCode) + } + } + + // This should indicate a bug in the plugin + fmt.Fprint(stderr, "cloudplugin exited without responding with an error code") + return 1 +} diff --git a/pkg/cloudplugin/cloudplugin1/grpc_client_test.go b/pkg/cloudplugin/cloudplugin1/grpc_client_test.go new file mode 100644 index 00000000000..06bd28019e3 --- /dev/null +++ b/pkg/cloudplugin/cloudplugin1/grpc_client_test.go @@ -0,0 +1,142 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cloudplugin1 + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "testing" + + "github.com/golang/mock/gomock" + "github.com/kubegems/opentofu/pkg/cloudplugin/cloudproto1" + "github.com/kubegems/opentofu/pkg/cloudplugin/mock_cloudproto1" +) + +var mockError = "this is a mock error" + +func testGRPCloudClient(t *testing.T, ctrl *gomock.Controller, client *mock_cloudproto1.MockCommandService_ExecuteClient, executeError error) *GRPCCloudClient { + t.Helper() + + if client != nil && executeError != nil { + t.Fatal("one of client or executeError must be nil") + } + + result := mock_cloudproto1.NewMockCommandServiceClient(ctrl) + + result.EXPECT().Execute( + gomock.Any(), + gomock.Any(), + gomock.Any(), + ).Return(client, executeError) + + return &GRPCCloudClient{ + client: result, + context: context.Background(), + } +} + +func Test_GRPCCloudClient_ExecuteError(t *testing.T) { + ctrl := gomock.NewController(t) + gRPCClient := testGRPCloudClient(t, ctrl, nil, errors.New(mockError)) + + buffer := bytes.Buffer{} + exitCode := gRPCClient.Execute([]string{"example"}, io.Discard, &buffer) + + if exitCode != 1 { + t.Fatalf("expected exit %d, got %d", 1, exitCode) + } + + if buffer.String() != mockError { + t.Errorf("expected error %q, got %q", mockError, buffer.String()) + } +} + +func Test_GRPCCloudClient_Execute_RecvError(t *testing.T) { + ctrl := gomock.NewController(t) + executeClient := mock_cloudproto1.NewMockCommandService_ExecuteClient(ctrl) + executeClient.EXPECT().Recv().Return(nil, errors.New(mockError)) + + gRPCClient := testGRPCloudClient(t, ctrl, executeClient, nil) + + buffer := bytes.Buffer{} + exitCode := gRPCClient.Execute([]string{"example"}, io.Discard, &buffer) + + if exitCode != 1 { + t.Fatalf("expected exit %d, got %d", 1, exitCode) + } + + mockRecvError := fmt.Sprintf("Failed to receive command response from cloudplugin: %s", mockError) + + if buffer.String() != mockRecvError { + t.Errorf("expected error %q, got %q", mockRecvError, buffer.String()) + } +} + +func Test_GRPCCloudClient_Execute_Invalid_Exit(t *testing.T) { + ctrl := gomock.NewController(t) + executeClient := mock_cloudproto1.NewMockCommandService_ExecuteClient(ctrl) + + executeClient.EXPECT().Recv().Return( + &cloudproto1.CommandResponse{ + Data: &cloudproto1.CommandResponse_ExitCode{ + ExitCode: 3_000, + }, + }, nil, + ) + + gRPCClient := testGRPCloudClient(t, ctrl, executeClient, nil) + + exitCode := gRPCClient.Execute([]string{"example"}, io.Discard, io.Discard) + + if exitCode != 255 { + t.Fatalf("expected exit %q, got %q", 255, exitCode) + } +} + +func Test_GRPCCloudClient_Execute(t *testing.T) { + ctrl := gomock.NewController(t) + executeClient := mock_cloudproto1.NewMockCommandService_ExecuteClient(ctrl) + + gomock.InOrder( + executeClient.EXPECT().Recv().Return( + &cloudproto1.CommandResponse{ + Data: &cloudproto1.CommandResponse_Stdout{ + Stdout: []byte("firstcall\n"), + }, + }, nil, + ), + executeClient.EXPECT().Recv().Return( + &cloudproto1.CommandResponse{ + Data: &cloudproto1.CommandResponse_Stdout{ + Stdout: []byte("secondcall\n"), + }, + }, nil, + ), + executeClient.EXPECT().Recv().Return( + &cloudproto1.CommandResponse{ + Data: &cloudproto1.CommandResponse_ExitCode{ + ExitCode: 99, + }, + }, nil, + ), + ) + + gRPCClient := testGRPCloudClient(t, ctrl, executeClient, nil) + + stdoutBuffer := bytes.Buffer{} + exitCode := gRPCClient.Execute([]string{"example"}, &stdoutBuffer, io.Discard) + + if exitCode != 99 { + t.Fatalf("expected exit %q, got %q", 99, exitCode) + } + + if stdoutBuffer.String() != "firstcall\nsecondcall\n" { + t.Errorf("expected output %q, got %q", "firstcall\nsecondcall\n", stdoutBuffer.String()) + } +} diff --git a/pkg/cloudplugin/cloudplugin1/grpc_plugin.go b/pkg/cloudplugin/cloudplugin1/grpc_plugin.go new file mode 100644 index 00000000000..16c32fe9760 --- /dev/null +++ b/pkg/cloudplugin/cloudplugin1/grpc_plugin.go @@ -0,0 +1,50 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cloudplugin1 + +import ( + "context" + "errors" + "net/rpc" + + "github.com/hashicorp/go-plugin" + "github.com/kubegems/opentofu/pkg/cloudplugin" + "github.com/kubegems/opentofu/pkg/cloudplugin/cloudproto1" + "google.golang.org/grpc" +) + +// GRPCCloudPlugin is the go-plugin implementation, but only the client +// implementation exists in this package. +type GRPCCloudPlugin struct { + plugin.GRPCPlugin + Impl cloudplugin.Cloud1 +} + +// Server always returns an error; we're only implementing the GRPCPlugin +// interface, not the Plugin interface. +func (p *GRPCCloudPlugin) Server(*plugin.MuxBroker) (interface{}, error) { + return nil, errors.New("cloudplugin only implements gRPC clients") +} + +// Client always returns an error; we're only implementing the GRPCPlugin +// interface, not the Plugin interface. +func (p *GRPCCloudPlugin) Client(*plugin.MuxBroker, *rpc.Client) (interface{}, error) { + return nil, errors.New("cloudplugin only implements gRPC clients") +} + +// GRPCServer always returns an error; we're only implementing the client +// interface, not the server. +func (p *GRPCCloudPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { + return errors.New("cloudplugin only implements gRPC clients") +} + +// GRPCClient returns a new GRPC client for interacting with the cloud plugin server. +func (p *GRPCCloudPlugin) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { + return &GRPCCloudClient{ + client: cloudproto1.NewCommandServiceClient(c), + context: ctx, + }, nil +} diff --git a/pkg/cloudplugin/cloudproto1/cloudproto1.pb.go b/pkg/cloudplugin/cloudproto1/cloudproto1.pb.go new file mode 100644 index 00000000000..d309b9b1be5 --- /dev/null +++ b/pkg/cloudplugin/cloudproto1/cloudproto1.pb.go @@ -0,0 +1,395 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v3.15.6 +// source: cloudproto1.proto + +package cloudproto1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// CommandRequest is used to request the execution of a specific command with +// provided flags. It is the raw args from the terraform cloud command. +type CommandRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Args []string `protobuf:"bytes,1,rep,name=args,proto3" json:"args,omitempty"` +} + +func (x *CommandRequest) Reset() { + *x = CommandRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_cloudproto1_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CommandRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CommandRequest) ProtoMessage() {} + +func (x *CommandRequest) ProtoReflect() protoreflect.Message { + mi := &file_cloudproto1_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CommandRequest.ProtoReflect.Descriptor instead. +func (*CommandRequest) Descriptor() ([]byte, []int) { + return file_cloudproto1_proto_rawDescGZIP(), []int{0} +} + +func (x *CommandRequest) GetArgs() []string { + if x != nil { + return x.Args + } + return nil +} + +// CommandResponse contains the result of the command execution, including any +// output or errors. +type CommandResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Data: + // + // *CommandResponse_ExitCode + // *CommandResponse_Stdout + // *CommandResponse_Stderr + Data isCommandResponse_Data `protobuf_oneof:"data"` +} + +func (x *CommandResponse) Reset() { + *x = CommandResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_cloudproto1_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CommandResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CommandResponse) ProtoMessage() {} + +func (x *CommandResponse) ProtoReflect() protoreflect.Message { + mi := &file_cloudproto1_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CommandResponse.ProtoReflect.Descriptor instead. +func (*CommandResponse) Descriptor() ([]byte, []int) { + return file_cloudproto1_proto_rawDescGZIP(), []int{1} +} + +func (m *CommandResponse) GetData() isCommandResponse_Data { + if m != nil { + return m.Data + } + return nil +} + +func (x *CommandResponse) GetExitCode() int32 { + if x, ok := x.GetData().(*CommandResponse_ExitCode); ok { + return x.ExitCode + } + return 0 +} + +func (x *CommandResponse) GetStdout() []byte { + if x, ok := x.GetData().(*CommandResponse_Stdout); ok { + return x.Stdout + } + return nil +} + +func (x *CommandResponse) GetStderr() []byte { + if x, ok := x.GetData().(*CommandResponse_Stderr); ok { + return x.Stderr + } + return nil +} + +type isCommandResponse_Data interface { + isCommandResponse_Data() +} + +type CommandResponse_ExitCode struct { + ExitCode int32 `protobuf:"varint,1,opt,name=exitCode,proto3,oneof"` +} + +type CommandResponse_Stdout struct { + Stdout []byte `protobuf:"bytes,2,opt,name=stdout,proto3,oneof"` +} + +type CommandResponse_Stderr struct { + Stderr []byte `protobuf:"bytes,3,opt,name=stderr,proto3,oneof"` +} + +func (*CommandResponse_ExitCode) isCommandResponse_Data() {} + +func (*CommandResponse_Stdout) isCommandResponse_Data() {} + +func (*CommandResponse_Stderr) isCommandResponse_Data() {} + +var File_cloudproto1_proto protoreflect.FileDescriptor + +var file_cloudproto1_proto_rawDesc = []byte{ + 0x0a, 0x11, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x31, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x31, + 0x22, 0x24, 0x0a, 0x0e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, 0x22, 0x6b, 0x0a, 0x0f, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, + 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x08, 0x65, 0x78, 0x69, + 0x74, 0x43, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x08, 0x65, + 0x78, 0x69, 0x74, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x06, 0x73, 0x74, 0x64, 0x6f, 0x75, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x06, 0x73, 0x74, 0x64, 0x6f, 0x75, + 0x74, 0x12, 0x18, 0x0a, 0x06, 0x73, 0x74, 0x64, 0x65, 0x72, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0c, 0x48, 0x00, 0x52, 0x06, 0x73, 0x74, 0x64, 0x65, 0x72, 0x72, 0x42, 0x06, 0x0a, 0x04, 0x64, + 0x61, 0x74, 0x61, 0x32, 0x5a, 0x0a, 0x0e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x48, 0x0a, 0x07, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, + 0x12, 0x1b, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x31, 0x2e, 0x43, + 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x31, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, + 0x61, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x42, + 0x3f, 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6f, 0x70, + 0x65, 0x6e, 0x74, 0x6f, 0x66, 0x75, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x6f, 0x66, 0x75, 0x2f, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x31, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_cloudproto1_proto_rawDescOnce sync.Once + file_cloudproto1_proto_rawDescData = file_cloudproto1_proto_rawDesc +) + +func file_cloudproto1_proto_rawDescGZIP() []byte { + file_cloudproto1_proto_rawDescOnce.Do(func() { + file_cloudproto1_proto_rawDescData = protoimpl.X.CompressGZIP(file_cloudproto1_proto_rawDescData) + }) + return file_cloudproto1_proto_rawDescData +} + +var file_cloudproto1_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_cloudproto1_proto_goTypes = []interface{}{ + (*CommandRequest)(nil), // 0: cloudproto1.CommandRequest + (*CommandResponse)(nil), // 1: cloudproto1.CommandResponse +} +var file_cloudproto1_proto_depIdxs = []int32{ + 0, // 0: cloudproto1.CommandService.Execute:input_type -> cloudproto1.CommandRequest + 1, // 1: cloudproto1.CommandService.Execute:output_type -> cloudproto1.CommandResponse + 1, // [1:2] is the sub-list for method output_type + 0, // [0:1] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_cloudproto1_proto_init() } +func file_cloudproto1_proto_init() { + if File_cloudproto1_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_cloudproto1_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CommandRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cloudproto1_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CommandResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_cloudproto1_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*CommandResponse_ExitCode)(nil), + (*CommandResponse_Stdout)(nil), + (*CommandResponse_Stderr)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cloudproto1_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_cloudproto1_proto_goTypes, + DependencyIndexes: file_cloudproto1_proto_depIdxs, + MessageInfos: file_cloudproto1_proto_msgTypes, + }.Build() + File_cloudproto1_proto = out.File + file_cloudproto1_proto_rawDesc = nil + file_cloudproto1_proto_goTypes = nil + file_cloudproto1_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// CommandServiceClient is the client API for CommandService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type CommandServiceClient interface { + // Execute runs a specific command with the provided flags and returns the result. + Execute(ctx context.Context, in *CommandRequest, opts ...grpc.CallOption) (CommandService_ExecuteClient, error) +} + +type commandServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewCommandServiceClient(cc grpc.ClientConnInterface) CommandServiceClient { + return &commandServiceClient{cc} +} + +func (c *commandServiceClient) Execute(ctx context.Context, in *CommandRequest, opts ...grpc.CallOption) (CommandService_ExecuteClient, error) { + stream, err := c.cc.NewStream(ctx, &_CommandService_serviceDesc.Streams[0], "/cloudproto1.CommandService/Execute", opts...) + if err != nil { + return nil, err + } + x := &commandServiceExecuteClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type CommandService_ExecuteClient interface { + Recv() (*CommandResponse, error) + grpc.ClientStream +} + +type commandServiceExecuteClient struct { + grpc.ClientStream +} + +func (x *commandServiceExecuteClient) Recv() (*CommandResponse, error) { + m := new(CommandResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// CommandServiceServer is the server API for CommandService service. +type CommandServiceServer interface { + // Execute runs a specific command with the provided flags and returns the result. + Execute(*CommandRequest, CommandService_ExecuteServer) error +} + +// UnimplementedCommandServiceServer can be embedded to have forward compatible implementations. +type UnimplementedCommandServiceServer struct { +} + +func (*UnimplementedCommandServiceServer) Execute(*CommandRequest, CommandService_ExecuteServer) error { + return status.Errorf(codes.Unimplemented, "method Execute not implemented") +} + +func RegisterCommandServiceServer(s *grpc.Server, srv CommandServiceServer) { + s.RegisterService(&_CommandService_serviceDesc, srv) +} + +func _CommandService_Execute_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(CommandRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(CommandServiceServer).Execute(m, &commandServiceExecuteServer{stream}) +} + +type CommandService_ExecuteServer interface { + Send(*CommandResponse) error + grpc.ServerStream +} + +type commandServiceExecuteServer struct { + grpc.ServerStream +} + +func (x *commandServiceExecuteServer) Send(m *CommandResponse) error { + return x.ServerStream.SendMsg(m) +} + +var _CommandService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "cloudproto1.CommandService", + HandlerType: (*CommandServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "Execute", + Handler: _CommandService_Execute_Handler, + ServerStreams: true, + }, + }, + Metadata: "cloudproto1.proto", +} diff --git a/pkg/cloudplugin/cloudproto1/cloudproto1.proto b/pkg/cloudplugin/cloudproto1/cloudproto1.proto new file mode 100644 index 00000000000..eddf7b59a1f --- /dev/null +++ b/pkg/cloudplugin/cloudproto1/cloudproto1.proto @@ -0,0 +1,32 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +syntax = "proto3"; +package cloudproto1; + +option go_package = "github.com/kubegems/opentofu/pkg/cloudplugin/cloudproto1"; + +// CommandRequest is used to request the execution of a specific command with +// provided flags. It is the raw args from the terraform cloud command. +message CommandRequest { + repeated string args = 1; +} + +// CommandResponse contains the result of the command execution, including any +// output or errors. +message CommandResponse { + oneof data { + int32 exitCode = 1; + bytes stdout = 2; + bytes stderr = 3; + } +} + +// PluginService defines the gRPC service to handle available commands and +// their execution. +service CommandService { + // Execute runs a specific command with the provided flags and returns the result. + rpc Execute(CommandRequest) returns (stream CommandResponse) {} +} diff --git a/pkg/cloudplugin/interface.go b/pkg/cloudplugin/interface.go new file mode 100644 index 00000000000..4dd17dd883e --- /dev/null +++ b/pkg/cloudplugin/interface.go @@ -0,0 +1,14 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cloudplugin + +import ( + "io" +) + +type Cloud1 interface { + Execute(args []string, stdout, stderr io.Writer) int +} diff --git a/pkg/cloudplugin/mock_cloudproto1/generate.go b/pkg/cloudplugin/mock_cloudproto1/generate.go new file mode 100644 index 00000000000..d5f52e21969 --- /dev/null +++ b/pkg/cloudplugin/mock_cloudproto1/generate.go @@ -0,0 +1,8 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:generate go run github.com/golang/mock/mockgen -destination mock.go github.com/kubegems/opentofu/pkg/cloudplugin/cloudproto1 CommandServiceClient,CommandService_ExecuteClient + +package mock_cloudproto1 diff --git a/pkg/cloudplugin/mock_cloudproto1/mock.go b/pkg/cloudplugin/mock_cloudproto1/mock.go new file mode 100644 index 00000000000..5434f3dc659 --- /dev/null +++ b/pkg/cloudplugin/mock_cloudproto1/mock.go @@ -0,0 +1,181 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/kubegems/opentofu/pkg/cloudplugin/cloudproto1 (interfaces: CommandServiceClient,CommandService_ExecuteClient) + +// Package mock_cloudproto1 is a generated GoMock package. +package mock_cloudproto1 + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + cloudproto1 "github.com/kubegems/opentofu/pkg/cloudplugin/cloudproto1" + grpc "google.golang.org/grpc" + metadata "google.golang.org/grpc/metadata" +) + +// MockCommandServiceClient is a mock of CommandServiceClient interface. +type MockCommandServiceClient struct { + ctrl *gomock.Controller + recorder *MockCommandServiceClientMockRecorder +} + +// MockCommandServiceClientMockRecorder is the mock recorder for MockCommandServiceClient. +type MockCommandServiceClientMockRecorder struct { + mock *MockCommandServiceClient +} + +// NewMockCommandServiceClient creates a new mock instance. +func NewMockCommandServiceClient(ctrl *gomock.Controller) *MockCommandServiceClient { + mock := &MockCommandServiceClient{ctrl: ctrl} + mock.recorder = &MockCommandServiceClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockCommandServiceClient) EXPECT() *MockCommandServiceClientMockRecorder { + return m.recorder +} + +// Execute mocks base method. +func (m *MockCommandServiceClient) Execute(arg0 context.Context, arg1 *cloudproto1.CommandRequest, arg2 ...grpc.CallOption) (cloudproto1.CommandService_ExecuteClient, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Execute", varargs...) + ret0, _ := ret[0].(cloudproto1.CommandService_ExecuteClient) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Execute indicates an expected call of Execute. +func (mr *MockCommandServiceClientMockRecorder) Execute(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Execute", reflect.TypeOf((*MockCommandServiceClient)(nil).Execute), varargs...) +} + +// MockCommandService_ExecuteClient is a mock of CommandService_ExecuteClient interface. +type MockCommandService_ExecuteClient struct { + ctrl *gomock.Controller + recorder *MockCommandService_ExecuteClientMockRecorder +} + +// MockCommandService_ExecuteClientMockRecorder is the mock recorder for MockCommandService_ExecuteClient. +type MockCommandService_ExecuteClientMockRecorder struct { + mock *MockCommandService_ExecuteClient +} + +// NewMockCommandService_ExecuteClient creates a new mock instance. +func NewMockCommandService_ExecuteClient(ctrl *gomock.Controller) *MockCommandService_ExecuteClient { + mock := &MockCommandService_ExecuteClient{ctrl: ctrl} + mock.recorder = &MockCommandService_ExecuteClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockCommandService_ExecuteClient) EXPECT() *MockCommandService_ExecuteClientMockRecorder { + return m.recorder +} + +// CloseSend mocks base method. +func (m *MockCommandService_ExecuteClient) CloseSend() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CloseSend") + ret0, _ := ret[0].(error) + return ret0 +} + +// CloseSend indicates an expected call of CloseSend. +func (mr *MockCommandService_ExecuteClientMockRecorder) CloseSend() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseSend", reflect.TypeOf((*MockCommandService_ExecuteClient)(nil).CloseSend)) +} + +// Context mocks base method. +func (m *MockCommandService_ExecuteClient) Context() context.Context { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Context") + ret0, _ := ret[0].(context.Context) + return ret0 +} + +// Context indicates an expected call of Context. +func (mr *MockCommandService_ExecuteClientMockRecorder) Context() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockCommandService_ExecuteClient)(nil).Context)) +} + +// Header mocks base method. +func (m *MockCommandService_ExecuteClient) Header() (metadata.MD, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Header") + ret0, _ := ret[0].(metadata.MD) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Header indicates an expected call of Header. +func (mr *MockCommandService_ExecuteClientMockRecorder) Header() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Header", reflect.TypeOf((*MockCommandService_ExecuteClient)(nil).Header)) +} + +// Recv mocks base method. +func (m *MockCommandService_ExecuteClient) Recv() (*cloudproto1.CommandResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Recv") + ret0, _ := ret[0].(*cloudproto1.CommandResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Recv indicates an expected call of Recv. +func (mr *MockCommandService_ExecuteClientMockRecorder) Recv() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockCommandService_ExecuteClient)(nil).Recv)) +} + +// RecvMsg mocks base method. +func (m *MockCommandService_ExecuteClient) RecvMsg(arg0 interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RecvMsg", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// RecvMsg indicates an expected call of RecvMsg. +func (mr *MockCommandService_ExecuteClientMockRecorder) RecvMsg(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockCommandService_ExecuteClient)(nil).RecvMsg), arg0) +} + +// SendMsg mocks base method. +func (m *MockCommandService_ExecuteClient) SendMsg(arg0 interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendMsg", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendMsg indicates an expected call of SendMsg. +func (mr *MockCommandService_ExecuteClientMockRecorder) SendMsg(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockCommandService_ExecuteClient)(nil).SendMsg), arg0) +} + +// Trailer mocks base method. +func (m *MockCommandService_ExecuteClient) Trailer() metadata.MD { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Trailer") + ret0, _ := ret[0].(metadata.MD) + return ret0 +} + +// Trailer indicates an expected call of Trailer. +func (mr *MockCommandService_ExecuteClientMockRecorder) Trailer() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trailer", reflect.TypeOf((*MockCommandService_ExecuteClient)(nil).Trailer)) +} diff --git a/pkg/collections/set.go b/pkg/collections/set.go new file mode 100644 index 00000000000..0719df9db57 --- /dev/null +++ b/pkg/collections/set.go @@ -0,0 +1,64 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package collections + +import ( + "fmt" + "strings" + + "golang.org/x/exp/slices" +) + +// Set is a container that can hold each item only once and has a fast lookup time. +// +// You can define a new set like this: +// +// var validKeyLengths = collections.Set[int]{ +// 16: {}, +// 24: {}, +// 32: {}, +// } +// +// You can also use the constructor to create a new set +// +// var validKeyLengths = collections.NewSet[int](16,24,32) +type Set[T comparable] map[T]struct{} + +// Constructs a new set given the members of type T +func NewSet[T comparable](members ...T) Set[T] { + set := Set[T]{} + for _, member := range members { + set[member] = struct{}{} + } + return set +} + +// Has returns true if the item exists in the Set +func (s Set[T]) Has(value T) bool { + _, ok := s[value] + return ok +} + +// String creates a comma-separated list of all values in the set. +func (s Set[T]) String() string { + parts := make([]string, len(s)) + i := 0 + for v := range s { + parts[i] = fmt.Sprintf("%v", v) + i++ + } + + slices.SortStableFunc(parts, func(a, b string) int { + if a < b { + return -1 + } else if b > a { + return 1 + } else { + return 0 + } + }) + return strings.Join(parts, ", ") +} diff --git a/pkg/collections/set_test.go b/pkg/collections/set_test.go new file mode 100644 index 00000000000..d2d56ad8d05 --- /dev/null +++ b/pkg/collections/set_test.go @@ -0,0 +1,100 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package collections_test + +import ( + "testing" + + "github.com/kubegems/opentofu/pkg/collections" +) + +type hasTestCase struct { + name string + set collections.Set[string] + testValueResults map[string]bool +} + +func TestSet_NewSet(t *testing.T) { + testCases := []struct { + name string + constructed collections.Set[int] + expected collections.Set[int] + }{ + { + name: "empty", + constructed: collections.NewSet[int](), + expected: collections.Set[int]{}, + }, { + name: "items", + constructed: collections.NewSet[int](1, 54, 284), + expected: collections.Set[int]{1: {}, 54: {}, 284: {}}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + if len(tc.constructed) != len(tc.expected) { + t.Fatal("Set length mismatch") + } + + for k := range tc.expected { + if _, ok := tc.constructed[k]; !ok { + t.Fatalf("Expected to find key %v in constructed set", k) + } + } + }) + } +} + +func TestSet_has(t *testing.T) { + testCases := []hasTestCase{ + { + name: "string", + set: collections.Set[string]{ + "a": {}, + "b": {}, + "c": {}, + }, + testValueResults: map[string]bool{ + "a": true, + "b": true, + "c": true, + "d": false, + "e": false, + }, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + for value, has := range testCase.testValueResults { + t.Run(value, func(t *testing.T) { + if has { + if !testCase.set.Has(value) { + t.Fatalf("Set does not have expected value of %s", value) + } + } else { + if testCase.set.Has(value) { + t.Fatalf("Set has unexpected value of %s", value) + } + } + }) + } + }) + } +} + +func TestSet_string(t *testing.T) { + testSet := collections.Set[string]{ + "a": {}, + "b": {}, + "c": {}, + } + + if str := testSet.String(); str != "a, b, c" { + t.Fatalf("Incorrect string concatenation: %s", str) + } +} diff --git a/pkg/command/alias.go b/pkg/command/alias.go new file mode 100644 index 00000000000..a7283da839f --- /dev/null +++ b/pkg/command/alias.go @@ -0,0 +1,27 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "github.com/mitchellh/cli" +) + +// AliasCommand is a Command implementation that wraps another Command for the purpose of aliasing. +type AliasCommand struct { + cli.Command +} + +func (c *AliasCommand) Run(args []string) int { + return c.Command.Run(args) +} + +func (c *AliasCommand) Help() string { + return c.Command.Help() +} + +func (c *AliasCommand) Synopsis() string { + return c.Command.Synopsis() +} diff --git a/pkg/command/apply.go b/pkg/command/apply.go new file mode 100644 index 00000000000..f251a435549 --- /dev/null +++ b/pkg/command/apply.go @@ -0,0 +1,407 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/command/views" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/plans/planfile" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// ApplyCommand is a Command implementation that applies a OpenTofu +// configuration and actually builds or changes infrastructure. +type ApplyCommand struct { + Meta + + // If true, then this apply command will become the "destroy" + // command. It is just like apply but only processes a destroy. + Destroy bool +} + +func (c *ApplyCommand) Run(rawArgs []string) int { + var diags tfdiags.Diagnostics + + // Parse and apply global view arguments + common, rawArgs := arguments.ParseView(rawArgs) + c.View.Configure(common) + + // Propagate -no-color for legacy use of Ui. The remote backend and + // cloud package use this; it should be removed when/if they are + // migrated to views. + c.Meta.color = !common.NoColor + c.Meta.Color = c.Meta.color + + // Parse and validate flags + var args *arguments.Apply + switch { + case c.Destroy: + args, diags = arguments.ParseApplyDestroy(rawArgs) + default: + args, diags = arguments.ParseApply(rawArgs) + } + + c.View.SetShowSensitive(args.ShowSensitive) + + // Instantiate the view, even if there are flag errors, so that we render + // diagnostics according to the desired view + view := views.NewApply(args.ViewType, c.Destroy, c.View) + + if diags.HasErrors() { + view.Diagnostics(diags) + view.HelpPrompt() + return 1 + } + + // Check for user-supplied plugin path + var err error + if c.pluginPath, err = c.loadPluginPath(); err != nil { + diags = diags.Append(err) + view.Diagnostics(diags) + return 1 + } + + // Inject variables from args into meta for static evaluation + c.GatherVariables(args.Vars) + + // Load the encryption configuration + enc, encDiags := c.Encryption() + diags = diags.Append(encDiags) + if encDiags.HasErrors() { + view.Diagnostics(diags) + return 1 + } + + // Attempt to load the plan file, if specified + planFile, diags := c.LoadPlanFile(args.PlanPath, enc) + if diags.HasErrors() { + view.Diagnostics(diags) + return 1 + } + + // Check for invalid combination of plan file and variable overrides + if planFile != nil && !args.Vars.Empty() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Can't set variables when applying a saved plan", + "The -var and -var-file options cannot be used when applying a saved plan file, because a saved plan includes the variable values that were set when it was created.", + )) + view.Diagnostics(diags) + return 1 + } + + // FIXME: the -input flag value is needed to initialize the backend and the + // operation, but there is no clear path to pass this value down, so we + // continue to mutate the Meta object state for now. + c.Meta.input = args.InputEnabled + + // FIXME: the -parallelism flag is used to control the concurrency of + // OpenTofu operations. At the moment, this value is used both to + // initialize the backend via the ContextOpts field inside CLIOpts, and to + // set a largely unused field on the Operation request. Again, there is no + // clear path to pass this value down, so we continue to mutate the Meta + // object state for now. + c.Meta.parallelism = args.Operation.Parallelism + + // Prepare the backend, passing the plan file if present, and the + // backend-specific arguments + be, beDiags := c.PrepareBackend(planFile, args.State, args.ViewType, enc.State()) + diags = diags.Append(beDiags) + if diags.HasErrors() { + view.Diagnostics(diags) + return 1 + } + + // Build the operation request + opReq, opDiags := c.OperationRequest(be, view, args.ViewType, planFile, args.Operation, args.AutoApprove, enc) + diags = diags.Append(opDiags) + + // Before we delegate to the backend, we'll print any warning diagnostics + // we've accumulated here, since the backend will start fresh with its own + // diagnostics. + view.Diagnostics(diags) + if diags.HasErrors() { + return 1 + } + diags = nil + + // Run the operation + op, diags := c.RunOperation(be, opReq) + view.Diagnostics(diags) + if diags.HasErrors() { + return 1 + } + + if op.Result != backend.OperationSuccess { + return op.Result.ExitStatus() + } + + // Render the resource count and outputs, unless those counts are being + // rendered already in a remote OpenTofu process. + if rb, isRemoteBackend := be.(BackendWithRemoteTerraformVersion); !isRemoteBackend || rb.IsLocalOperations() { + view.ResourceCount(args.State.StateOutPath) + if !c.Destroy && op.State != nil { + view.Outputs(op.State.RootModule().OutputValues) + } + } + + view.Diagnostics(diags) + + if diags.HasErrors() { + return 1 + } + + return 0 +} + +func (c *ApplyCommand) LoadPlanFile(path string, enc encryption.Encryption) (*planfile.WrappedPlanFile, tfdiags.Diagnostics) { + var planFile *planfile.WrappedPlanFile + var diags tfdiags.Diagnostics + + // Try to load plan if path is specified + if path != "" { + var err error + planFile, err = c.PlanFile(path, enc.Plan()) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + fmt.Sprintf("Failed to load %q as a plan file", path), + fmt.Sprintf("Error: %s", err), + )) + return nil, diags + } + + // If the path doesn't look like a plan, both planFile and err will be + // nil. In that case, the user is probably trying to use the positional + // argument to specify a configuration path. Point them at -chdir. + if planFile == nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + fmt.Sprintf("Failed to load %q as a plan file", path), + "The specified path is a directory, not a plan file. You can use the global -chdir flag to use this directory as the configuration root.", + )) + return nil, diags + } + + // If we successfully loaded a plan but this is a destroy operation, + // explain that this is not supported. + if c.Destroy { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Destroy can't be called with a plan file", + fmt.Sprintf("If this plan was created using plan -destroy, apply it using:\n tofu apply %q", path), + )) + return nil, diags + } + } + + return planFile, diags +} + +func (c *ApplyCommand) PrepareBackend(planFile *planfile.WrappedPlanFile, args *arguments.State, viewType arguments.ViewType, enc encryption.StateEncryption) (backend.Enhanced, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // FIXME: we need to apply the state arguments to the meta object here + // because they are later used when initializing the backend. Carving a + // path to pass these arguments to the functions that need them is + // difficult but would make their use easier to understand. + c.Meta.applyStateArguments(args) + + // Load the backend + var be backend.Enhanced + var beDiags tfdiags.Diagnostics + if lp, ok := planFile.Local(); ok { + plan, err := lp.ReadPlan() + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to read plan from plan file", + fmt.Sprintf("Cannot read the plan from the given plan file: %s.", err), + )) + return nil, diags + } + if plan.Backend.Config == nil { + // Should never happen; always indicates a bug in the creation of the plan file + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to read plan from plan file", + "The given plan file does not have a valid backend configuration. This is a bug in the OpenTofu command that generated this plan file.", + )) + return nil, diags + } + be, beDiags = c.BackendForLocalPlan(plan.Backend, enc) + } else { + // Both new plans and saved cloud plans load their backend from config. + backendConfig, configDiags := c.loadBackendConfig(".") + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + return nil, diags + } + + be, beDiags = c.Backend(&BackendOpts{ + Config: backendConfig, + ViewType: viewType, + }, enc) + } + + diags = diags.Append(beDiags) + if beDiags.HasErrors() { + return nil, diags + } + return be, diags +} + +func (c *ApplyCommand) OperationRequest( + be backend.Enhanced, + view views.Apply, + viewType arguments.ViewType, + planFile *planfile.WrappedPlanFile, + args *arguments.Operation, + autoApprove bool, + enc encryption.Encryption, +) (*backend.Operation, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // Applying changes with dev overrides in effect could make it impossible + // to switch back to a release version if the schema isn't compatible, + // so we'll warn about it. + diags = diags.Append(c.providerDevOverrideRuntimeWarnings()) + + // Build the operation + opReq := c.Operation(be, viewType, enc) + opReq.AutoApprove = autoApprove + opReq.ConfigDir = "." + opReq.PlanMode = args.PlanMode + opReq.Hooks = view.Hooks() + opReq.PlanFile = planFile + opReq.PlanRefresh = args.Refresh + opReq.Targets = args.Targets + opReq.ForceReplace = args.ForceReplace + opReq.Type = backend.OperationTypeApply + opReq.View = view.Operation() + + var err error + opReq.ConfigLoader, err = c.initConfigLoader() + if err != nil { + diags = diags.Append(fmt.Errorf("Failed to initialize config loader: %w", err)) + return nil, diags + } + + return opReq, diags +} + +func (c *ApplyCommand) GatherVariables(args *arguments.Vars) { + // FIXME the arguments package currently trivially gathers variable related + // arguments in a heterogenous slice, in order to minimize the number of + // code paths gathering variables during the transition to this structure. + // Once all commands that gather variables have been converted to this + // structure, we could move the variable gathering code to the arguments + // package directly, removing this shim layer. + + varArgs := args.All() + items := make([]rawFlag, len(varArgs)) + for i := range varArgs { + items[i].Name = varArgs[i].Name + items[i].Value = varArgs[i].Value + } + c.Meta.variableArgs = rawFlags{items: &items} +} + +func (c *ApplyCommand) Help() string { + if c.Destroy { + return c.helpDestroy() + } + + return c.helpApply() +} + +func (c *ApplyCommand) Synopsis() string { + if c.Destroy { + return "Destroy previously-created infrastructure" + } + + return "Create or update infrastructure" +} + +func (c *ApplyCommand) helpApply() string { + helpText := ` +Usage: tofu [global options] apply [options] [PLAN] + + Creates or updates infrastructure according to OpenTofu configuration + files in the current directory. + + By default, OpenTofu will generate a new plan and present it for your + approval before taking any action. You can optionally provide a plan + file created by a previous call to "tofu plan", in which case + OpenTofu will take the actions described in that plan without any + confirmation prompt. + +Options: + + -auto-approve Skip interactive approval of plan before applying. + + -backup=path Path to backup the existing state file before + modifying. Defaults to the "-state-out" path with + ".backup" extension. Set to "-" to disable backup. + + -compact-warnings If OpenTofu produces any warnings that are not + accompanied by errors, show them in a more compact + form that includes only the summary messages. + + -destroy Destroy OpenTofu-managed infrastructure. + The command "tofu destroy" is a convenience alias + for this option. + + -lock=false Don't hold a state lock during the operation. This is + dangerous if others might concurrently run commands + against the same workspace. + + -lock-timeout=0s Duration to retry a state lock. + + -input=true Ask for input for variables if not directly set. + + -no-color If specified, output won't contain any color. + + -parallelism=n Limit the number of parallel resource operations. + Defaults to 10. + + -state=path Path to read and save state (unless state-out + is specified). Defaults to "terraform.tfstate". + + -state-out=path Path to write state to that is different than + "-state". This can be used to preserve the old + state. + + -show-sensitive If specified, sensitive values will be displayed. + + If you don't provide a saved plan file then this command will also accept + all of the plan-customization options accepted by the tofu plan command. + For more information on those options, run: + tofu plan -help +` + return strings.TrimSpace(helpText) +} + +func (c *ApplyCommand) helpDestroy() string { + helpText := ` +Usage: tofu [global options] destroy [options] + + Destroy OpenTofu-managed infrastructure. + + This command is a convenience alias for: + tofu apply -destroy + + This command also accepts many of the plan-customization options accepted by + the tofu plan command. For more information on those options, run: + tofu plan -help +` + return strings.TrimSpace(helpText) +} diff --git a/pkg/command/apply_destroy_test.go b/pkg/command/apply_destroy_test.go new file mode 100644 index 00000000000..04db5880c9d --- /dev/null +++ b/pkg/command/apply_destroy_test.go @@ -0,0 +1,678 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "os" + "strings" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/mitchellh/cli" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/statefile" +) + +func TestApply_destroy(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply"), td) + defer testChdir(t, td)() + + originalState := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, originalState) + + p := testProvider() + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + + view, done := testView(t) + c := &ApplyCommand{ + Destroy: true, + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + // Run the apply command pointing to our existing state + args := []string{ + "-auto-approve", + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Log(output.Stdout()) + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + // Verify a new state exists + if _, err := os.Stat(statePath); err != nil { + t.Fatalf("err: %s", err) + } + + f, err := os.Open(statePath) + if err != nil { + t.Fatalf("err: %s", err) + } + defer f.Close() + + stateFile, err := statefile.Read(f, encryption.StateEncryptionDisabled()) + if err != nil { + t.Fatalf("err: %s", err) + } + if stateFile.State == nil { + t.Fatal("state should not be nil") + } + + actualStr := strings.TrimSpace(stateFile.State.String()) + expectedStr := strings.TrimSpace(testApplyDestroyStr) + if actualStr != expectedStr { + t.Fatalf("bad:\n\n%s\n\n%s", actualStr, expectedStr) + } + + // Should have a backup file + f, err = os.Open(statePath + DefaultBackupExtension) + if err != nil { + t.Fatalf("err: %s", err) + } + + backupStateFile, err := statefile.Read(f, encryption.StateEncryptionDisabled()) + f.Close() + if err != nil { + t.Fatalf("err: %s", err) + } + + actualStr = strings.TrimSpace(backupStateFile.State.String()) + expectedStr = strings.TrimSpace(originalState.String()) + if actualStr != expectedStr { + t.Fatalf("bad:\n\n%s\n\n%s", actualStr, expectedStr) + } +} + +func TestApply_destroyApproveNo(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply"), td) + defer testChdir(t, td)() + + // Create some existing state + originalState := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, originalState) + + p := applyFixtureProvider() + + defer testInputMap(t, map[string]string{ + "approve": "no", + })() + + // Do not use the NewMockUi initializer here, as we want to delay + // the call to init until after setting up the input mocks + ui := new(cli.MockUi) + view, done := testView(t) + c := &ApplyCommand{ + Destroy: true, + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + + args := []string{ + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 1 { + t.Fatalf("bad: %d\n\n%s", code, output.Stdout()) + } + if got, want := output.Stdout(), "Destroy cancelled"; !strings.Contains(got, want) { + t.Fatalf("expected output to include %q, but was:\n%s", want, got) + } + + state := testStateRead(t, statePath) + if state == nil { + t.Fatal("state should not be nil") + } + actualStr := strings.TrimSpace(state.String()) + expectedStr := strings.TrimSpace(originalState.String()) + if actualStr != expectedStr { + t.Fatalf("bad:\n\n%s\n\n%s", actualStr, expectedStr) + } +} + +func TestApply_destroyApproveYes(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply"), td) + defer testChdir(t, td)() + + // Create some existing state + originalState := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, originalState) + + p := applyFixtureProvider() + + defer testInputMap(t, map[string]string{ + "approve": "yes", + })() + + // Do not use the NewMockUi initializer here, as we want to delay + // the call to init until after setting up the input mocks + ui := new(cli.MockUi) + view, done := testView(t) + c := &ApplyCommand{ + Destroy: true, + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + + args := []string{ + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Log(output.Stdout()) + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if _, err := os.Stat(statePath); err != nil { + t.Fatalf("err: %s", err) + } + + state := testStateRead(t, statePath) + if state == nil { + t.Fatal("state should not be nil") + } + + actualStr := strings.TrimSpace(state.String()) + expectedStr := strings.TrimSpace(testApplyDestroyStr) + if actualStr != expectedStr { + t.Fatalf("bad:\n\n%s\n\n%s", actualStr, expectedStr) + } +} + +func TestApply_destroyLockedState(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply"), td) + defer testChdir(t, td)() + + originalState := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, originalState) + + unlock, err := testLockState(t, testDataDir, statePath) + if err != nil { + t.Fatal(err) + } + defer unlock() + + p := testProvider() + view, done := testView(t) + c := &ApplyCommand{ + Destroy: true, + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + // Run the apply command pointing to our existing state + args := []string{ + "-auto-approve", + "-state", statePath, + } + + code := c.Run(args) + output := done(t) + if code == 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stdout()) + } + + if !strings.Contains(output.Stderr(), "lock") { + t.Fatal("command output does not look like a lock error:", output.Stderr()) + } +} + +func TestApply_destroyPlan(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply"), td) + defer testChdir(t, td)() + + planPath := testPlanFileNoop(t) + + p := testProvider() + view, done := testView(t) + c := &ApplyCommand{ + Destroy: true, + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + // Run the apply command pointing to our existing state + args := []string{ + planPath, + } + code := c.Run(args) + output := done(t) + if code != 1 { + t.Fatalf("bad: %d\n\n%s", code, output.Stdout()) + } + if !strings.Contains(output.Stderr(), "plan file") { + t.Fatal("expected command output to refer to plan file, but got:", output.Stderr()) + } +} + +func TestApply_destroyPath(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply"), td) + defer testChdir(t, td)() + + p := applyFixtureProvider() + + view, done := testView(t) + c := &ApplyCommand{ + Destroy: true, + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-auto-approve", + testFixturePath("apply"), + } + code := c.Run(args) + output := done(t) + if code != 1 { + t.Fatalf("bad: %d\n\n%s", code, output.Stdout()) + } + if !strings.Contains(output.Stderr(), "-chdir") { + t.Fatal("expected command output to refer to -chdir flag, but got:", output.Stderr()) + } +} + +// Config with multiple resources with dependencies, targeting destroy of a +// root node, expecting all other resources to be destroyed due to +// dependencies. +func TestApply_destroyTargetedDependencies(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply-destroy-targeted"), td) + defer testChdir(t, td)() + + originalState := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.IntKey(0)).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"i-ab123"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_load_balancer", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"i-abc123"}`), + Dependencies: []addrs.ConfigResource{mustResourceAddr("test_instance.foo")}, + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, originalState) + + p := testProvider() + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + }, + "test_load_balancer": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "instances": {Type: cty.List(cty.String), Optional: true}, + }, + }, + }, + }, + } + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + + view, done := testView(t) + c := &ApplyCommand{ + Destroy: true, + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + // Run the apply command pointing to our existing state + args := []string{ + "-auto-approve", + "-target", "test_instance.foo", + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Log(output.Stdout()) + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + // Verify a new state exists + if _, err := os.Stat(statePath); err != nil { + t.Fatalf("err: %s", err) + } + + f, err := os.Open(statePath) + if err != nil { + t.Fatalf("err: %s", err) + } + defer f.Close() + + stateFile, err := statefile.Read(f, encryption.StateEncryptionDisabled()) + if err != nil { + t.Fatalf("err: %s", err) + } + if stateFile == nil || stateFile.State == nil { + t.Fatal("state should not be nil") + } + + spew.Config.DisableMethods = true + if !stateFile.State.Empty() { + t.Fatalf("unexpected final state\ngot: %s\nwant: empty state", spew.Sdump(stateFile.State)) + } + + // Should have a backup file + f, err = os.Open(statePath + DefaultBackupExtension) + if err != nil { + t.Fatalf("err: %s", err) + } + + backupStateFile, err := statefile.Read(f, encryption.StateEncryptionDisabled()) + f.Close() + if err != nil { + t.Fatalf("err: %s", err) + } + + actualStr := strings.TrimSpace(backupStateFile.State.String()) + expectedStr := strings.TrimSpace(originalState.String()) + if actualStr != expectedStr { + t.Fatalf("bad:\n\nactual:\n%s\n\nexpected:\nb%s", actualStr, expectedStr) + } +} + +// Config with multiple resources with dependencies, targeting destroy of a +// leaf node, expecting the other resources to remain. +func TestApply_destroyTargeted(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply-destroy-targeted"), td) + defer testChdir(t, td)() + + originalState := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.IntKey(0)).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"i-ab123"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_load_balancer", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"i-abc123"}`), + Dependencies: []addrs.ConfigResource{mustResourceAddr("test_instance.foo")}, + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + wantState := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.IntKey(0)).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"i-ab123"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, originalState) + + p := testProvider() + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + }, + "test_load_balancer": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "instances": {Type: cty.List(cty.String), Optional: true}, + }, + }, + }, + }, + } + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + + view, done := testView(t) + c := &ApplyCommand{ + Destroy: true, + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + // Run the apply command pointing to our existing state + args := []string{ + "-auto-approve", + "-target", "test_load_balancer.foo", + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Log(output.Stdout()) + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + // Verify a new state exists + if _, err := os.Stat(statePath); err != nil { + t.Fatalf("err: %s", err) + } + + f, err := os.Open(statePath) + if err != nil { + t.Fatalf("err: %s", err) + } + defer f.Close() + + stateFile, err := statefile.Read(f, encryption.StateEncryptionDisabled()) + if err != nil { + t.Fatalf("err: %s", err) + } + if stateFile == nil || stateFile.State == nil { + t.Fatal("state should not be nil") + } + + actualStr := strings.TrimSpace(stateFile.State.String()) + expectedStr := strings.TrimSpace(wantState.String()) + if actualStr != expectedStr { + t.Fatalf("bad:\n\nactual:\n%s\n\nexpected:\nb%s", actualStr, expectedStr) + } + + // Should have a backup file + f, err = os.Open(statePath + DefaultBackupExtension) + if err != nil { + t.Fatalf("err: %s", err) + } + + backupStateFile, err := statefile.Read(f, encryption.StateEncryptionDisabled()) + f.Close() + if err != nil { + t.Fatalf("err: %s", err) + } + + backupActualStr := strings.TrimSpace(backupStateFile.State.String()) + backupExpectedStr := strings.TrimSpace(originalState.String()) + if backupActualStr != backupExpectedStr { + t.Fatalf("bad:\n\nactual:\n%s\n\nexpected:\nb%s", backupActualStr, backupExpectedStr) + } +} + +const testApplyDestroyStr = ` + +` diff --git a/pkg/command/apply_test.go b/pkg/command/apply_test.go new file mode 100644 index 00000000000..57c1ec04ff6 --- /dev/null +++ b/pkg/command/apply_test.go @@ -0,0 +1,2399 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "bytes" + "context" + "fmt" + "os" + "path/filepath" + "reflect" + "strings" + "sync" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/mitchellh/cli" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" +) + +func TestApply(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply"), td) + defer testChdir(t, td)() + + statePath := testTempFile(t) + + p := applyFixtureProvider() + + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-state", statePath, + "-auto-approve", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if _, err := os.Stat(statePath); err != nil { + t.Fatalf("err: %s", err) + } + + state := testStateRead(t, statePath) + if state == nil { + t.Fatal("state should not be nil") + } +} +func TestApply_conditionalSensitive(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply-plan-conditional-sensitive"), td) + defer testChdir(t, td)() + + p := applyFixtureProvider() + + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-auto-approve", + } + code := c.Run(args) + output := done(t).Stderr() + if code != 1 { + t.Fatalf("bad status code: %d\n\n%s", code, output) + } + + if strings.Count(output, "Output refers to sensitive values") != 9 { + t.Fatal("Not all outputs have issue with refer to sensitive value", output) + } +} + +func TestApply_path(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply"), td) + defer testChdir(t, td)() + + p := applyFixtureProvider() + + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-auto-approve", + testFixturePath("apply"), + } + code := c.Run(args) + output := done(t) + if code != 1 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + if !strings.Contains(output.Stderr(), "-chdir") { + t.Fatal("expected command output to refer to -chdir flag, but got:", output.Stderr()) + } +} + +func TestApply_approveNo(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply"), td) + defer testChdir(t, td)() + + statePath := testTempFile(t) + + defer testInputMap(t, map[string]string{ + "approve": "no", + })() + + // Do not use the NewMockUi initializer here, as we want to delay + // the call to init until after setting up the input mocks + ui := new(cli.MockUi) + + p := applyFixtureProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + + args := []string{ + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 1 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + if got, want := output.Stdout(), "Apply cancelled"; !strings.Contains(got, want) { + t.Fatalf("expected output to include %q, but was:\n%s", want, got) + } + + if _, err := os.Stat(statePath); err == nil || !os.IsNotExist(err) { + t.Fatalf("state file should not exist") + } +} + +func TestApply_approveYes(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply"), td) + defer testChdir(t, td)() + + statePath := testTempFile(t) + + p := applyFixtureProvider() + + defer testInputMap(t, map[string]string{ + "approve": "yes", + })() + + // Do not use the NewMockUi initializer here, as we want to delay + // the call to init until after setting up the input mocks + ui := new(cli.MockUi) + + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + + args := []string{ + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if _, err := os.Stat(statePath); err != nil { + t.Fatalf("err: %s", err) + } + + state := testStateRead(t, statePath) + if state == nil { + t.Fatal("state should not be nil") + } +} + +// test apply with locked state +func TestApply_lockedState(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply"), td) + defer testChdir(t, td)() + + statePath := testTempFile(t) + + unlock, err := testLockState(t, testDataDir, statePath) + if err != nil { + t.Fatal(err) + } + defer unlock() + + p := applyFixtureProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-state", statePath, + "-auto-approve", + } + code := c.Run(args) + output := done(t) + if code == 0 { + t.Fatal("expected error") + } + + if !strings.Contains(output.Stderr(), "lock") { + t.Fatal("command output does not look like a lock error:", output.Stderr()) + } +} + +// test apply with locked state, waiting for unlock +func TestApply_lockedStateWait(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply"), td) + defer testChdir(t, td)() + + statePath := testTempFile(t) + + unlock, err := testLockState(t, testDataDir, statePath) + if err != nil { + t.Fatal(err) + } + + // unlock during apply + go func() { + time.Sleep(500 * time.Millisecond) + unlock() + }() + + p := applyFixtureProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + // wait 4s just in case the lock process doesn't release in under a second, + // and we want our context to be alive for a second retry at the 3s mark. + args := []string{ + "-state", statePath, + "-lock-timeout", "4s", + "-auto-approve", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("lock should have succeeded in less than 3s: %s", output.Stderr()) + } +} + +// Verify that the parallelism flag allows no more than the desired number of +// concurrent calls to ApplyResourceChange. +func TestApply_parallelism(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("parallelism"), td) + defer testChdir(t, td)() + + statePath := testTempFile(t) + + par := 4 + + // started is a semaphore that we use to ensure that we never have more + // than "par" apply operations happening concurrently + started := make(chan struct{}, par) + + // beginCtx is used as a starting gate to hold back ApplyResourceChange + // calls until we reach the desired concurrency. The cancel func "begin" is + // called once we reach the desired concurrency, allowing all apply calls + // to proceed in unison. + beginCtx, begin := context.WithCancel(context.Background()) + + // Since our mock provider has its own mutex preventing concurrent calls + // to ApplyResourceChange, we need to use a number of separate providers + // here. They will all have the same mock implementation function assigned + // but crucially they will each have their own mutex. + providerFactories := map[addrs.Provider]providers.Factory{} + for i := 0; i < 10; i++ { + name := fmt.Sprintf("test%d", i) + provider := &tofu.MockProvider{} + provider.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + name + "_instance": {Block: &configschema.Block{}}, + }, + } + provider.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + provider.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + + // If we ever have more than our intended parallelism number of + // apply operations running concurrently, the semaphore will fail. + select { + case started <- struct{}{}: + defer func() { + <-started + }() + default: + t.Fatal("too many concurrent apply operations") + } + + // If we never reach our intended parallelism, the context will + // never be canceled and the test will time out. + if len(started) >= par { + begin() + } + <-beginCtx.Done() + + // do some "work" + // Not required for correctness, but makes it easier to spot a + // failure when there is more overlap. + time.Sleep(10 * time.Millisecond) + + return providers.ApplyResourceChangeResponse{ + NewState: cty.EmptyObjectVal, + } + } + providerFactories[addrs.NewDefaultProvider(name)] = providers.FactoryFixed(provider) + } + testingOverrides := &testingOverrides{ + Providers: providerFactories, + } + + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: testingOverrides, + View: view, + }, + } + + args := []string{ + "-state", statePath, + "-auto-approve", + fmt.Sprintf("-parallelism=%d", par), + } + + res := c.Run(args) + output := done(t) + if res != 0 { + t.Fatal(output.Stdout()) + } +} + +func TestApply_configInvalid(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply-config-invalid"), td) + defer testChdir(t, td)() + + p := testProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-state", testTempFile(t), + "-auto-approve", + } + code := c.Run(args) + output := done(t) + if code != 1 { + t.Fatalf("bad: \n%s", output.Stdout()) + } +} + +func TestApply_defaultState(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply"), td) + defer testChdir(t, td)() + + statePath := filepath.Join(td, DefaultStateFilename) + + // Change to the temporary directory + cwd, err := os.Getwd() + if err != nil { + t.Fatalf("err: %s", err) + } + if err := os.Chdir(filepath.Dir(statePath)); err != nil { + t.Fatalf("err: %s", err) + } + defer os.Chdir(cwd) + + p := applyFixtureProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + // create an existing state file + if err := statemgr.WriteAndPersist(statemgr.NewFilesystem(statePath, encryption.StateEncryptionDisabled()), states.NewState(), nil); err != nil { + t.Fatal(err) + } + + args := []string{ + "-auto-approve", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if _, err := os.Stat(statePath); err != nil { + t.Fatalf("err: %s", err) + } + + state := testStateRead(t, statePath) + if state == nil { + t.Fatal("state should not be nil") + } +} + +func TestApply_error(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply-error"), td) + defer testChdir(t, td)() + + statePath := testTempFile(t) + + p := testProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + var lock sync.Mutex + errored := false + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + lock.Lock() + defer lock.Unlock() + + if !errored { + errored = true + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("error")) + } + + s := req.PlannedState.AsValueMap() + s["id"] = cty.StringVal("foo") + + resp.NewState = cty.ObjectVal(s) + return + } + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + s := req.ProposedNewState.AsValueMap() + s["id"] = cty.UnknownVal(cty.String) + resp.PlannedState = cty.ObjectVal(s) + return + } + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "error": {Type: cty.Bool, Optional: true}, + }, + }, + }, + }, + } + + args := []string{ + "-state", statePath, + "-auto-approve", + } + code := c.Run(args) + output := done(t) + if code != 1 { + t.Fatalf("wrong exit code %d; want 1\n%s", code, output.Stdout()) + } + + if _, err := os.Stat(statePath); err != nil { + t.Fatalf("err: %s", err) + } + + state := testStateRead(t, statePath) + if state == nil { + t.Fatal("state should not be nil") + } + if len(state.RootModule().Resources) == 0 { + t.Fatal("no resources in state") + } +} + +func TestApply_input(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply-input"), td) + defer testChdir(t, td)() + + // Disable test mode so input would be asked + test = false + defer func() { test = true }() + + // The configuration for this test includes a declaration of variable + // "foo" with no default, and we don't set it on the command line below, + // so the apply command will produce an interactive prompt for the + // value of var.foo. We'll answer "foo" here, and we expect the output + // value "result" to echo that back to us below. + defaultInputReader = bytes.NewBufferString("foo\n") + defaultInputWriter = new(bytes.Buffer) + + statePath := testTempFile(t) + + p := testProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-state", statePath, + "-auto-approve", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + expected := strings.TrimSpace(` + +Outputs: + +result = foo + `) + testStateOutput(t, statePath, expected) +} + +// When only a partial set of the variables are set, OpenTofu +// should still ask for the unset ones by default (with -input=true) +func TestApply_inputPartial(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply-input-partial"), td) + defer testChdir(t, td)() + + // Disable test mode so input would be asked + test = false + defer func() { test = true }() + + // Set some default reader/writers for the inputs + defaultInputReader = bytes.NewBufferString("one\ntwo\n") + defaultInputWriter = new(bytes.Buffer) + + statePath := testTempFile(t) + + p := testProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-state", statePath, + "-auto-approve", + "-var", "foo=foovalue", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + expected := strings.TrimSpace(` + +Outputs: + +bar = one +foo = foovalue + `) + testStateOutput(t, statePath, expected) +} + +func TestApply_noArgs(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply"), td) + defer testChdir(t, td)() + + statePath := testTempFile(t) + + p := applyFixtureProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-state", statePath, + "-auto-approve", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if _, err := os.Stat(statePath); err != nil { + t.Fatalf("err: %s", err) + } + + state := testStateRead(t, statePath) + if state == nil { + t.Fatal("state should not be nil") + } +} + +func TestApply_plan(t *testing.T) { + // Disable test mode so input would be asked + test = false + defer func() { test = true }() + + // Set some default reader/writers for the inputs + defaultInputReader = new(bytes.Buffer) + defaultInputWriter = new(bytes.Buffer) + + planPath := applyFixturePlanFile(t) + statePath := testTempFile(t) + + p := applyFixtureProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-state-out", statePath, + planPath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if _, err := os.Stat(statePath); err != nil { + t.Fatalf("err: %s", err) + } + + state := testStateRead(t, statePath) + if state == nil { + t.Fatal("state should not be nil") + } +} + +func TestApply_plan_backup(t *testing.T) { + statePath := testTempFile(t) + backupPath := testTempFile(t) + + p := applyFixtureProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + // create a state file that needs to be backed up + fs := statemgr.NewFilesystem(statePath, encryption.StateEncryptionDisabled()) + fs.StateSnapshotMeta() + if err := statemgr.WriteAndPersist(fs, states.NewState(), nil); err != nil { + t.Fatal(err) + } + + // the plan file must contain the metadata from the prior state to be + // backed up + planPath := applyFixturePlanFileMatchState(t, fs.StateSnapshotMeta()) + + args := []string{ + "-state", statePath, + "-backup", backupPath, + planPath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + // Should have a backup file + testStateRead(t, backupPath) +} + +func TestApply_plan_noBackup(t *testing.T) { + planPath := applyFixturePlanFile(t) + statePath := testTempFile(t) + + p := applyFixtureProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-state-out", statePath, + "-backup", "-", + planPath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + // Ensure there is no backup + _, err := os.Stat(statePath + DefaultBackupExtension) + if err == nil || !os.IsNotExist(err) { + t.Fatalf("backup should not exist") + } + + // Ensure there is no literal "-" + _, err = os.Stat("-") + if err == nil || !os.IsNotExist(err) { + t.Fatalf("backup should not exist") + } +} + +func TestApply_plan_remoteState(t *testing.T) { + // Disable test mode so input would be asked + test = false + defer func() { test = true }() + tmp := testCwd(t) + remoteStatePath := filepath.Join(tmp, DefaultDataDir, DefaultStateFilename) + if err := os.MkdirAll(filepath.Dir(remoteStatePath), 0755); err != nil { + t.Fatalf("err: %s", err) + } + + // Set some default reader/writers for the inputs + defaultInputReader = new(bytes.Buffer) + defaultInputWriter = new(bytes.Buffer) + + // Create a remote state + state := testState() + _, srv := testRemoteState(t, state, 200) + defer srv.Close() + + _, snap := testModuleWithSnapshot(t, "apply") + backendConfig := cty.ObjectVal(map[string]cty.Value{ + "address": cty.StringVal(srv.URL), + "update_method": cty.NullVal(cty.String), + "lock_address": cty.NullVal(cty.String), + "unlock_address": cty.NullVal(cty.String), + "lock_method": cty.NullVal(cty.String), + "unlock_method": cty.NullVal(cty.String), + "username": cty.NullVal(cty.String), + "password": cty.NullVal(cty.String), + "skip_cert_verification": cty.NullVal(cty.Bool), + "retry_max": cty.NullVal(cty.String), + "retry_wait_min": cty.NullVal(cty.String), + "retry_wait_max": cty.NullVal(cty.String), + "client_ca_certificate_pem": cty.NullVal(cty.String), + "client_certificate_pem": cty.NullVal(cty.String), + "client_private_key_pem": cty.NullVal(cty.String), + "headers": cty.NullVal(cty.String), + }) + backendConfigRaw, err := plans.NewDynamicValue(backendConfig, backendConfig.Type()) + if err != nil { + t.Fatal(err) + } + planPath := testPlanFile(t, snap, state, &plans.Plan{ + Backend: plans.Backend{ + Type: "http", + Config: backendConfigRaw, + }, + Changes: plans.NewChanges(), + }) + + p := testProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + planPath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + // State file should be not be installed + if _, err := os.Stat(filepath.Join(tmp, DefaultStateFilename)); err == nil { + data, _ := os.ReadFile(DefaultStateFilename) + t.Fatalf("State path should not exist: %s", string(data)) + } + + // Check that there is no remote state config + if src, err := os.ReadFile(remoteStatePath); err == nil { + t.Fatalf("has %s file; should not\n%s", remoteStatePath, src) + } +} + +func TestApply_planWithVarFile(t *testing.T) { + varFileDir := testTempDir(t) + varFilePath := filepath.Join(varFileDir, "terraform.tfvars") + if err := os.WriteFile(varFilePath, []byte(applyVarFile), 0644); err != nil { + t.Fatalf("err: %s", err) + } + + planPath := applyFixturePlanFile(t) + statePath := testTempFile(t) + + cwd, err := os.Getwd() + if err != nil { + t.Fatalf("err: %s", err) + } + if err := os.Chdir(varFileDir); err != nil { + t.Fatalf("err: %s", err) + } + defer os.Chdir(cwd) + + p := applyFixtureProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-state-out", statePath, + planPath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if _, err := os.Stat(statePath); err != nil { + t.Fatalf("err: %s", err) + } + + state := testStateRead(t, statePath) + if state == nil { + t.Fatal("state should not be nil") + } +} + +func TestApply_planVars(t *testing.T) { + planPath := applyFixturePlanFile(t) + statePath := testTempFile(t) + + p := applyFixtureProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-state", statePath, + "-var", "foo=bar", + planPath, + } + code := c.Run(args) + output := done(t) + if code == 0 { + t.Fatal("should've failed: ", output.Stdout()) + } +} + +// we should be able to apply a plan file with no other file dependencies +func TestApply_planNoModuleFiles(t *testing.T) { + // temporary data directory which we can remove between commands + td := testTempDir(t) + defer os.RemoveAll(td) + + defer testChdir(t, td)() + + p := applyFixtureProvider() + planPath := applyFixturePlanFile(t) + view, done := testView(t) + apply := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: new(cli.MockUi), + View: view, + }, + } + args := []string{ + planPath, + } + apply.Run(args) + done(t) +} + +func TestApply_refresh(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply"), td) + defer testChdir(t, td)() + + originalState := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"ami":"bar"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, originalState) + + p := applyFixtureProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-state", statePath, + "-auto-approve", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if !p.ReadResourceCalled { + t.Fatal("should call ReadResource") + } + + if _, err := os.Stat(statePath); err != nil { + t.Fatalf("err: %s", err) + } + + state := testStateRead(t, statePath) + if state == nil { + t.Fatal("state should not be nil") + } + + // Should have a backup file + backupState := testStateRead(t, statePath+DefaultBackupExtension) + + actualStr := strings.TrimSpace(backupState.String()) + expectedStr := strings.TrimSpace(originalState.String()) + if actualStr != expectedStr { + t.Fatalf("bad:\n\n%s\n\n%s", actualStr, expectedStr) + } +} + +func TestApply_refreshFalse(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply"), td) + defer testChdir(t, td)() + + originalState := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"ami":"bar"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, originalState) + + p := applyFixtureProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-state", statePath, + "-auto-approve", + "-refresh=false", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if p.ReadResourceCalled { + t.Fatal("should not call ReadResource when refresh=false") + } +} +func TestApply_shutdown(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply-shutdown"), td) + defer testChdir(t, td)() + + cancelled := make(chan struct{}) + shutdownCh := make(chan struct{}) + + statePath := testTempFile(t) + p := testProvider() + + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + ShutdownCh: shutdownCh, + }, + } + + p.StopFn = func() error { + close(cancelled) + return nil + } + + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + resp.PlannedState = req.ProposedNewState + return + } + + var once sync.Once + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + // only cancel once + once.Do(func() { + shutdownCh <- struct{}{} + }) + + // Because of the internal lock in the MockProvider, we can't + // coordiante directly with the calling of Stop, and making the + // MockProvider concurrent is disruptive to a lot of existing tests. + // Wait here a moment to help make sure the main goroutine gets to the + // Stop call before we exit, or the plan may finish before it can be + // canceled. + time.Sleep(200 * time.Millisecond) + + resp.NewState = req.PlannedState + return + } + + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "ami": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + + args := []string{ + "-state", statePath, + "-auto-approve", + } + code := c.Run(args) + output := done(t) + if code != 1 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if _, err := os.Stat(statePath); err != nil { + t.Fatalf("err: %s", err) + } + + select { + case <-cancelled: + default: + t.Fatal("command not cancelled") + } + + state := testStateRead(t, statePath) + if state == nil { + t.Fatal("state should not be nil") + } +} + +func TestApply_state(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply"), td) + defer testChdir(t, td)() + + originalState := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"ami":"foo"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, originalState) + + p := applyFixtureProvider() + p.PlanResourceChangeResponse = &providers.PlanResourceChangeResponse{ + PlannedState: cty.ObjectVal(map[string]cty.Value{ + "ami": cty.StringVal("bar"), + }), + } + p.ApplyResourceChangeResponse = &providers.ApplyResourceChangeResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "ami": cty.StringVal("bar"), + }), + } + + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + // Run the apply command pointing to our existing state + args := []string{ + "-state", statePath, + "-auto-approve", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + // Verify that the provider was called with the existing state + actual := p.PlanResourceChangeRequest.PriorState + expected := cty.ObjectVal(map[string]cty.Value{ + "id": cty.NullVal(cty.String), + "ami": cty.StringVal("foo"), + }) + if !expected.RawEquals(actual) { + t.Fatalf("wrong prior state during plan\ngot: %#v\nwant: %#v", actual, expected) + } + + actual = p.ApplyResourceChangeRequest.PriorState + expected = cty.ObjectVal(map[string]cty.Value{ + "id": cty.NullVal(cty.String), + "ami": cty.StringVal("foo"), + }) + if !expected.RawEquals(actual) { + t.Fatalf("wrong prior state during apply\ngot: %#v\nwant: %#v", actual, expected) + } + + // Verify a new state exists + if _, err := os.Stat(statePath); err != nil { + t.Fatalf("err: %s", err) + } + + state := testStateRead(t, statePath) + if state == nil { + t.Fatal("state should not be nil") + } + + backupState := testStateRead(t, statePath+DefaultBackupExtension) + + actualStr := strings.TrimSpace(backupState.String()) + expectedStr := strings.TrimSpace(originalState.String()) + if actualStr != expectedStr { + t.Fatalf("bad:\n\n%s\n\n%s", actualStr, expectedStr) + } +} + +func TestApply_stateNoExist(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply"), td) + defer testChdir(t, td)() + + p := applyFixtureProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "idontexist.tfstate", + } + code := c.Run(args) + output := done(t) + if code != 1 { + t.Fatalf("bad: \n%s", output.Stdout()) + } +} + +func TestApply_sensitiveOutput(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply-sensitive-output"), td) + defer testChdir(t, td)() + + p := testProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + statePath := testTempFile(t) + + args := []string{ + "-state", statePath, + "-auto-approve", + } + + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: \n%s", output.Stdout()) + } + + stdout := output.Stdout() + if !strings.Contains(stdout, "notsensitive = \"Hello world\"") { + t.Fatalf("bad: output should contain 'notsensitive' output\n%s", stdout) + } + if !strings.Contains(stdout, "sensitive = ") { + t.Fatalf("bad: output should contain 'sensitive' output\n%s", stdout) + } +} + +func TestApply_vars(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply-vars"), td) + defer testChdir(t, td)() + + statePath := testTempFile(t) + + p := testProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + actual := "" + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "value": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + return providers.ApplyResourceChangeResponse{ + NewState: req.PlannedState, + } + } + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + actual = req.ProposedNewState.GetAttr("value").AsString() + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + + args := []string{ + "-auto-approve", + "-var", "foo=bar", + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if actual != "bar" { + t.Fatal("didn't work") + } +} + +func TestApply_varFile(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply-vars"), td) + defer testChdir(t, td)() + + varFilePath := testTempFile(t) + if err := os.WriteFile(varFilePath, []byte(applyVarFile), 0644); err != nil { + t.Fatalf("err: %s", err) + } + + statePath := testTempFile(t) + + p := testProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + actual := "" + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "value": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + return providers.ApplyResourceChangeResponse{ + NewState: req.PlannedState, + } + } + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + actual = req.ProposedNewState.GetAttr("value").AsString() + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + + args := []string{ + "-auto-approve", + "-var-file", varFilePath, + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if actual != "bar" { + t.Fatal("didn't work") + } +} + +func TestApply_varFileDefault(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply-vars"), td) + defer testChdir(t, td)() + + varFilePath := filepath.Join(td, "terraform.tfvars") + if err := os.WriteFile(varFilePath, []byte(applyVarFile), 0644); err != nil { + t.Fatalf("err: %s", err) + } + + statePath := testTempFile(t) + + p := testProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + actual := "" + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "value": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + return providers.ApplyResourceChangeResponse{ + NewState: req.PlannedState, + } + } + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + actual = req.ProposedNewState.GetAttr("value").AsString() + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + + args := []string{ + "-auto-approve", + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if actual != "bar" { + t.Fatal("didn't work") + } +} + +func TestApply_varFileDefaultJSON(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply-vars"), td) + defer testChdir(t, td)() + + varFilePath := filepath.Join(td, "terraform.tfvars.json") + if err := os.WriteFile(varFilePath, []byte(applyVarFileJSON), 0644); err != nil { + t.Fatalf("err: %s", err) + } + + statePath := testTempFile(t) + + p := testProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + actual := "" + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "value": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + return providers.ApplyResourceChangeResponse{ + NewState: req.PlannedState, + } + } + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + actual = req.ProposedNewState.GetAttr("value").AsString() + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + + args := []string{ + "-auto-approve", + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if actual != "bar" { + t.Fatal("didn't work") + } +} + +func TestApply_backup(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply"), td) + defer testChdir(t, td)() + + originalState := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, originalState) + backupPath := testTempFile(t) + + p := applyFixtureProvider() + p.PlanResourceChangeResponse = &providers.PlanResourceChangeResponse{ + PlannedState: cty.ObjectVal(map[string]cty.Value{ + "ami": cty.StringVal("bar"), + }), + } + + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + // Run the apply command pointing to our existing state + args := []string{ + "-auto-approve", + "-state", statePath, + "-backup", backupPath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + // Verify a new state exists + if _, err := os.Stat(statePath); err != nil { + t.Fatalf("err: %s", err) + } + + state := testStateRead(t, statePath) + if state == nil { + t.Fatal("state should not be nil") + } + + backupState := testStateRead(t, backupPath) + + actual := backupState.RootModule().Resources["test_instance.foo"] + expected := originalState.RootModule().Resources["test_instance.foo"] + if !cmp.Equal(actual, expected, cmpopts.EquateEmpty()) { + t.Fatalf( + "wrong aws_instance.foo state\n%s", + cmp.Diff(expected, actual, cmp.Transformer("bytesAsString", func(b []byte) string { + return string(b) + })), + ) + } +} + +func TestApply_disableBackup(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply"), td) + defer testChdir(t, td)() + + originalState := testState() + statePath := testStateFile(t, originalState) + + p := applyFixtureProvider() + p.PlanResourceChangeResponse = &providers.PlanResourceChangeResponse{ + PlannedState: cty.ObjectVal(map[string]cty.Value{ + "ami": cty.StringVal("bar"), + }), + } + + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + // Run the apply command pointing to our existing state + args := []string{ + "-auto-approve", + "-state", statePath, + "-backup", "-", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + // Verify that the provider was called with the existing state + actual := p.PlanResourceChangeRequest.PriorState + expected := cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("bar"), + "ami": cty.NullVal(cty.String), + }) + if !expected.RawEquals(actual) { + t.Fatalf("wrong prior state during plan\ngot: %#v\nwant: %#v", actual, expected) + } + + actual = p.ApplyResourceChangeRequest.PriorState + expected = cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("bar"), + "ami": cty.NullVal(cty.String), + }) + if !expected.RawEquals(actual) { + t.Fatalf("wrong prior state during apply\ngot: %#v\nwant: %#v", actual, expected) + } + + // Verify a new state exists + if _, err := os.Stat(statePath); err != nil { + t.Fatalf("err: %s", err) + } + + state := testStateRead(t, statePath) + if state == nil { + t.Fatal("state should not be nil") + } + + // Ensure there is no backup + _, err := os.Stat(statePath + DefaultBackupExtension) + if err == nil || !os.IsNotExist(err) { + t.Fatalf("backup should not exist") + } + + // Ensure there is no literal "-" + _, err = os.Stat("-") + if err == nil || !os.IsNotExist(err) { + t.Fatalf("backup should not exist") + } +} + +func TestApply_tfWorkspace(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply-tf-workspace"), td) + defer testChdir(t, td)() + + statePath := testTempFile(t) + + p := testProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-auto-approve", + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + expected := strings.TrimSpace(` + +Outputs: + +output = default + `) + testStateOutput(t, statePath, expected) +} + +// Test that the OpenTofu env is passed through +func TestApply_tofuWorkspace(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply-tofu-workspace"), td) + defer testChdir(t, td)() + + statePath := testTempFile(t) + + p := testProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-auto-approve", + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + expected := strings.TrimSpace(` + +Outputs: + +output = default + `) + testStateOutput(t, statePath, expected) +} + +func TestApply_tfWorkspaceNonDefault(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply-tf-workspace"), td) + defer testChdir(t, td)() + + // Create new env + { + ui := new(cli.MockUi) + newCmd := &WorkspaceNewCommand{ + Meta: Meta{ + Ui: ui, + }, + } + if code := newCmd.Run([]string{"test"}); code != 0 { + t.Fatal("error creating workspace") + } + } + + // Switch to it + { + args := []string{"test"} + ui := new(cli.MockUi) + selCmd := &WorkspaceSelectCommand{ + Meta: Meta{ + Ui: ui, + }, + } + if code := selCmd.Run(args); code != 0 { + t.Fatal("error switching workspace") + } + } + + p := testProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-auto-approve", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + statePath := filepath.Join("terraform.tfstate.d", "test", "terraform.tfstate") + expected := strings.TrimSpace(` + +Outputs: + +output = test + `) + testStateOutput(t, statePath, expected) +} + +// Test that the OpenTofu env is passed through +func TestApply_tofuWorkspaceNonDefault(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply-tofu-workspace"), td) + defer testChdir(t, td)() + + // Create new env + { + ui := new(cli.MockUi) + newCmd := &WorkspaceNewCommand{ + Meta: Meta{ + Ui: ui, + }, + } + if code := newCmd.Run([]string{"test"}); code != 0 { + t.Fatal("error creating workspace") + } + } + + // Switch to it + { + args := []string{"test"} + ui := new(cli.MockUi) + selCmd := &WorkspaceSelectCommand{ + Meta: Meta{ + Ui: ui, + }, + } + if code := selCmd.Run(args); code != 0 { + t.Fatal("error switching workspace") + } + } + + p := testProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-auto-approve", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + statePath := filepath.Join("terraform.tfstate.d", "test", "terraform.tfstate") + expected := strings.TrimSpace(` + +Outputs: + +output = test + `) + testStateOutput(t, statePath, expected) +} + +// Config with multiple resources, targeting apply of a subset +func TestApply_targeted(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("apply-targeted"), td) + defer testChdir(t, td)() + + p := testProvider() + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + }, + }, + } + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-auto-approve", + "-target", "test_instance.foo", + "-target", "test_instance.baz", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if got, want := output.Stdout(), "3 added, 0 changed, 0 destroyed"; !strings.Contains(got, want) { + t.Fatalf("bad change summary, want %q, got:\n%s", want, got) + } +} + +// Diagnostics for invalid -target flags +func TestApply_targetFlagsDiags(t *testing.T) { + testCases := map[string]string{ + "test_instance.": "Dot must be followed by attribute name.", + "test_instance": "Resource specification must include a resource type and name.", + } + + for target, wantDiag := range testCases { + t.Run(target, func(t *testing.T) { + td := testTempDir(t) + defer os.RemoveAll(td) + defer testChdir(t, td)() + + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + View: view, + }, + } + + args := []string{ + "-auto-approve", + "-target", target, + } + code := c.Run(args) + output := done(t) + if code != 1 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + got := output.Stderr() + if !strings.Contains(got, target) { + t.Fatalf("bad error output, want %q, got:\n%s", target, got) + } + if !strings.Contains(got, wantDiag) { + t.Fatalf("bad error output, want %q, got:\n%s", wantDiag, got) + } + }) + } +} + +func TestApply_replace(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("apply-replace"), td) + defer testChdir(t, td)() + + originalState := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "a", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"hello"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, originalState) + + p := testProvider() + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + }, + }, + } + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + createCount := 0 + deleteCount := 0 + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + if req.PriorState.IsNull() { + createCount++ + } + if req.PlannedState.IsNull() { + deleteCount++ + } + return providers.ApplyResourceChangeResponse{ + NewState: req.PlannedState, + } + } + + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-auto-approve", + "-state", statePath, + "-replace", "test_instance.a", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("wrong exit code %d\n\n%s", code, output.Stderr()) + } + + if got, want := output.Stdout(), "1 added, 0 changed, 1 destroyed"; !strings.Contains(got, want) { + t.Errorf("wrong change summary\ngot output:\n%s\n\nwant substring: %s", got, want) + } + + if got, want := createCount, 1; got != want { + t.Errorf("wrong create count %d; want %d", got, want) + } + if got, want := deleteCount, 1; got != want { + t.Errorf("wrong create count %d; want %d", got, want) + } +} + +func TestApply_pluginPath(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply"), td) + defer testChdir(t, td)() + + statePath := testTempFile(t) + + p := applyFixtureProvider() + + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + pluginPath := []string{"a", "b", "c"} + + if err := c.Meta.storePluginPath(pluginPath); err != nil { + t.Fatal(err) + } + c.Meta.pluginPath = nil + + args := []string{ + "-state", statePath, + "-auto-approve", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if !reflect.DeepEqual(pluginPath, c.Meta.pluginPath) { + t.Fatalf("expected plugin path %#v, got %#v", pluginPath, c.Meta.pluginPath) + } +} + +func TestApply_jsonGoldenReference(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply"), td) + defer testChdir(t, td)() + + statePath := testTempFile(t) + + p := applyFixtureProvider() + + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-json", + "-state", statePath, + "-auto-approve", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if _, err := os.Stat(statePath); err != nil { + t.Fatalf("err: %s", err) + } + + state := testStateRead(t, statePath) + if state == nil { + t.Fatal("state should not be nil") + } + + checkGoldenReference(t, output, "apply") +} + +func TestApply_warnings(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply"), td) + defer testChdir(t, td)() + + p := testProvider() + p.GetProviderSchemaResponse = applyFixtureSchema() + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + Diagnostics: tfdiags.Diagnostics{ + tfdiags.SimpleWarning("warning 1"), + tfdiags.SimpleWarning("warning 2"), + }, + } + } + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + return providers.ApplyResourceChangeResponse{ + NewState: cty.UnknownAsNull(req.PlannedState), + } + } + + t.Run("full warnings", func(t *testing.T) { + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{"-auto-approve"} + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + wantWarnings := []string{ + "warning 1", + "warning 2", + } + for _, want := range wantWarnings { + if !strings.Contains(output.Stdout(), want) { + t.Errorf("missing warning %s", want) + } + } + }) + + t.Run("compact warnings", func(t *testing.T) { + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + code := c.Run([]string{"-auto-approve", "-compact-warnings"}) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + // the output should contain 2 warnings and a message about -compact-warnings + wantWarnings := []string{ + "warning 1", + "warning 2", + "To see the full warning notes, run OpenTofu without -compact-warnings.", + } + for _, want := range wantWarnings { + if !strings.Contains(output.Stdout(), want) { + t.Errorf("missing warning %s", want) + } + } + }) +} + +func TestApply_showSensitiveArg(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("apply-sensitive-output"), td) + defer testChdir(t, td)() + + p := testProvider() + view, done := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + statePath := testTempFile(t) + + args := []string{ + "-state", statePath, + "-auto-approve", + "-show-sensitive", + } + + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: \n%s", output.Stderr()) + } + + stdout := output.Stdout() + if !strings.Contains(stdout, "notsensitive = \"Hello world\"") { + t.Fatalf("bad: output should contain 'notsensitive' output\n%s", stdout) + } + if !strings.Contains(stdout, "sensitive = \"Hello world\"") { + t.Fatalf("bad: output should contain 'sensitive' output\n%s", stdout) + } +} + +// applyFixtureSchema returns a schema suitable for processing the +// configuration in testdata/apply . This schema should be +// assigned to a mock provider named "test". +func applyFixtureSchema() *providers.GetProviderSchemaResponse { + return &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } +} + +// applyFixtureProvider returns a mock provider that is configured for basic +// operation with the configuration in testdata/apply. This mock has +// GetSchemaResponse, PlanResourceChangeFn, and ApplyResourceChangeFn populated, +// with the plan/apply steps just passing through the data determined by +// OpenTofu Core. +func applyFixtureProvider() *tofu.MockProvider { + p := testProvider() + p.GetProviderSchemaResponse = applyFixtureSchema() + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + return providers.ApplyResourceChangeResponse{ + NewState: cty.UnknownAsNull(req.PlannedState), + } + } + return p +} + +// applyFixturePlanFile creates a plan file at a temporary location containing +// a single change to create the test_instance.foo that is included in the +// "apply" test fixture, returning the location of that plan file. +func applyFixturePlanFile(t *testing.T) string { + return applyFixturePlanFileMatchState(t, statemgr.SnapshotMeta{}) +} + +// applyFixturePlanFileMatchState creates a planfile like applyFixturePlanFile, +// but inserts the state meta information if that plan must match a preexisting +// state. +func applyFixturePlanFileMatchState(t *testing.T, stateMeta statemgr.SnapshotMeta) string { + _, snap := testModuleWithSnapshot(t, "apply") + plannedVal := cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("bar"), + }) + priorValRaw, err := plans.NewDynamicValue(cty.NullVal(plannedVal.Type()), plannedVal.Type()) + if err != nil { + t.Fatal(err) + } + plannedValRaw, err := plans.NewDynamicValue(plannedVal, plannedVal.Type()) + if err != nil { + t.Fatal(err) + } + plan := testPlan(t) + plan.Changes.SyncWrapper().AppendResourceInstanceChange(&plans.ResourceInstanceChangeSrc{ + Addr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + ProviderAddr: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ChangeSrc: plans.ChangeSrc{ + Action: plans.Create, + Before: priorValRaw, + After: plannedValRaw, + }, + }) + return testPlanFileMatchState( + t, + snap, + states.NewState(), + plan, + stateMeta, + ) +} + +const applyVarFile = ` +foo = "bar" +` + +const applyVarFileJSON = ` +{ "foo": "bar" } +` diff --git a/pkg/command/arguments/apply.go b/pkg/command/arguments/apply.go new file mode 100644 index 00000000000..5ed99c979b8 --- /dev/null +++ b/pkg/command/arguments/apply.go @@ -0,0 +1,156 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package arguments + +import ( + "fmt" + + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// Apply represents the command-line arguments for the apply command. +type Apply struct { + // State, Operation, and Vars are the common extended flags + State *State + Operation *Operation + Vars *Vars + + // AutoApprove skips the manual verification step for the apply operation. + AutoApprove bool + + // InputEnabled is used to disable interactive input for unspecified + // variable and backend config values. Default is true. + InputEnabled bool + + // PlanPath contains an optional path to a stored plan file + PlanPath string + + // ViewType specifies which output format to use + ViewType ViewType + + // ShowSensitive is used to display the value of variables marked as sensitive. + ShowSensitive bool +} + +// ParseApply processes CLI arguments, returning an Apply value and errors. +// If errors are encountered, an Apply value is still returned representing +// the best effort interpretation of the arguments. +func ParseApply(args []string) (*Apply, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + apply := &Apply{ + State: &State{}, + Operation: &Operation{}, + Vars: &Vars{}, + } + + cmdFlags := extendedFlagSet("apply", apply.State, apply.Operation, apply.Vars) + cmdFlags.BoolVar(&apply.AutoApprove, "auto-approve", false, "auto-approve") + cmdFlags.BoolVar(&apply.InputEnabled, "input", true, "input") + cmdFlags.BoolVar(&apply.ShowSensitive, "show-sensitive", false, "displays sensitive values") + + var json bool + cmdFlags.BoolVar(&json, "json", false, "json") + + if err := cmdFlags.Parse(args); err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to parse command-line flags", + err.Error(), + )) + } + + args = cmdFlags.Args() + if len(args) > 0 { + apply.PlanPath = args[0] + args = args[1:] + } + + if len(args) > 0 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Too many command line arguments", + "Expected at most one positional argument.", + )) + } + + // JSON view currently does not support input, so we disable it here. + if json { + apply.InputEnabled = false + } + + // JSON view cannot confirm apply, so we require either a plan file or + // auto-approve to be specified. We intentionally fail here rather than + // override auto-approve, which would be dangerous. + if json && apply.PlanPath == "" && !apply.AutoApprove { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Plan file or auto-approve required", + "OpenTofu cannot ask for interactive approval when -json is set. You can either apply a saved plan file, or enable the -auto-approve option.", + )) + } + + diags = diags.Append(apply.Operation.Parse()) + + switch { + case json: + apply.ViewType = ViewJSON + default: + apply.ViewType = ViewHuman + } + + return apply, diags +} + +// ParseApplyDestroy is a special case of ParseApply that deals with the +// "tofu destroy" command, which is effectively an alias for +// "tofu apply -destroy". +func ParseApplyDestroy(args []string) (*Apply, tfdiags.Diagnostics) { + apply, diags := ParseApply(args) + + // So far ParseApply was using the command line options like -destroy + // and -refresh-only to determine the plan mode. For "tofu destroy" + // we expect neither of those arguments to be set, and so the plan mode + // should currently be set to NormalMode, which we'll replace with + // DestroyMode here. If it's already set to something else then that + // suggests incorrect usage. + switch apply.Operation.PlanMode { + case plans.NormalMode: + // This indicates that the user didn't specify any mode options at + // all, which is correct, although we know from the command that + // they actually intended to use DestroyMode here. + apply.Operation.PlanMode = plans.DestroyMode + case plans.DestroyMode: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid mode option", + "The -destroy option is not valid for \"tofu destroy\", because this command always runs in destroy mode.", + )) + case plans.RefreshOnlyMode: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid mode option", + "The -refresh-only option is not valid for \"tofu destroy\".", + )) + default: + // This is a non-ideal error message for if we forget to handle a + // newly-handled plan mode in Operation.Parse. Ideally they should all + // have cases above so we can produce better error messages. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid mode option", + fmt.Sprintf("The \"tofu destroy\" command doesn't support %s.", apply.Operation.PlanMode), + )) + } + + // NOTE: It's also invalid to have apply.PlanPath set in this codepath, + // but we don't check that in here because we'll return a different error + // message depending on whether the given path seems to refer to a saved + // plan file or to a configuration directory. The apply command + // implementation itself therefore handles this situation. + + return apply, diags +} diff --git a/pkg/command/arguments/apply_test.go b/pkg/command/arguments/apply_test.go new file mode 100644 index 00000000000..8c01f7ee232 --- /dev/null +++ b/pkg/command/arguments/apply_test.go @@ -0,0 +1,394 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package arguments + +import ( + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/plans" +) + +func TestParseApply_basicValid(t *testing.T) { + testCases := map[string]struct { + args []string + want *Apply + }{ + "defaults": { + nil, + &Apply{ + AutoApprove: false, + InputEnabled: true, + PlanPath: "", + ViewType: ViewHuman, + State: &State{Lock: true}, + Vars: &Vars{}, + Operation: &Operation{ + PlanMode: plans.NormalMode, + Parallelism: 10, + Refresh: true, + }, + }, + }, + "auto-approve, disabled input, and plan path": { + []string{"-auto-approve", "-input=false", "saved.tfplan"}, + &Apply{ + AutoApprove: true, + InputEnabled: false, + PlanPath: "saved.tfplan", + ViewType: ViewHuman, + State: &State{Lock: true}, + Vars: &Vars{}, + Operation: &Operation{ + PlanMode: plans.NormalMode, + Parallelism: 10, + Refresh: true, + }, + }, + }, + "destroy mode": { + []string{"-destroy"}, + &Apply{ + AutoApprove: false, + InputEnabled: true, + PlanPath: "", + ViewType: ViewHuman, + State: &State{Lock: true}, + Vars: &Vars{}, + Operation: &Operation{ + PlanMode: plans.DestroyMode, + Parallelism: 10, + Refresh: true, + }, + }, + }, + "JSON view disables input": { + []string{"-json", "-auto-approve"}, + &Apply{ + AutoApprove: true, + InputEnabled: false, + PlanPath: "", + ViewType: ViewJSON, + State: &State{Lock: true}, + Vars: &Vars{}, + Operation: &Operation{ + PlanMode: plans.NormalMode, + Parallelism: 10, + Refresh: true, + }, + }, + }, + } + + cmpOpts := cmpopts.IgnoreUnexported(Operation{}, Vars{}, State{}) + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got, diags := ParseApply(tc.args) + if len(diags) > 0 { + t.Fatalf("unexpected diags: %v", diags) + } + if diff := cmp.Diff(tc.want, got, cmpOpts); diff != "" { + t.Errorf("unexpected result\n%s", diff) + } + }) + } +} + +func TestParseApply_json(t *testing.T) { + testCases := map[string]struct { + args []string + wantSuccess bool + }{ + "-json": { + []string{"-json"}, + false, + }, + "-json -auto-approve": { + []string{"-json", "-auto-approve"}, + true, + }, + "-json saved.tfplan": { + []string{"-json", "saved.tfplan"}, + true, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got, diags := ParseApply(tc.args) + + if tc.wantSuccess { + if len(diags) > 0 { + t.Errorf("unexpected diags: %v", diags) + } + } else { + if got, want := diags.Err().Error(), "Plan file or auto-approve required"; !strings.Contains(got, want) { + t.Errorf("wrong diags\n got: %s\nwant: %s", got, want) + } + } + + if got.ViewType != ViewJSON { + t.Errorf("unexpected view type. got: %#v, want: %#v", got.ViewType, ViewJSON) + } + }) + } +} + +func TestParseApply_invalid(t *testing.T) { + got, diags := ParseApply([]string{"-frob"}) + if len(diags) == 0 { + t.Fatal("expected diags but got none") + } + if got, want := diags.Err().Error(), "flag provided but not defined"; !strings.Contains(got, want) { + t.Fatalf("wrong diags\n got: %s\nwant: %s", got, want) + } + if got.ViewType != ViewHuman { + t.Fatalf("wrong view type, got %#v, want %#v", got.ViewType, ViewHuman) + } +} + +func TestParseApply_tooManyArguments(t *testing.T) { + got, diags := ParseApply([]string{"saved.tfplan", "please"}) + if len(diags) == 0 { + t.Fatal("expected diags but got none") + } + if got, want := diags.Err().Error(), "Too many command line arguments"; !strings.Contains(got, want) { + t.Fatalf("wrong diags\n got: %s\nwant: %s", got, want) + } + if got.ViewType != ViewHuman { + t.Fatalf("wrong view type, got %#v, want %#v", got.ViewType, ViewHuman) + } +} + +func TestParseApply_targets(t *testing.T) { + foobarbaz, _ := addrs.ParseTargetStr("foo_bar.baz") + boop, _ := addrs.ParseTargetStr("module.boop") + testCases := map[string]struct { + args []string + want []addrs.Targetable + wantErr string + }{ + "no targets by default": { + args: nil, + want: nil, + }, + "one target": { + args: []string{"-target=foo_bar.baz"}, + want: []addrs.Targetable{foobarbaz.Subject}, + }, + "two targets": { + args: []string{"-target=foo_bar.baz", "-target", "module.boop"}, + want: []addrs.Targetable{foobarbaz.Subject, boop.Subject}, + }, + "invalid traversal": { + args: []string{"-target=foo."}, + want: nil, + wantErr: "Dot must be followed by attribute name", + }, + "invalid target": { + args: []string{"-target=data[0].foo"}, + want: nil, + wantErr: "A data source name is required", + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got, diags := ParseApply(tc.args) + if len(diags) > 0 { + if tc.wantErr == "" { + t.Fatalf("unexpected diags: %v", diags) + } else if got := diags.Err().Error(); !strings.Contains(got, tc.wantErr) { + t.Fatalf("wrong diags\n got: %s\nwant: %s", got, tc.wantErr) + } + } + if !cmp.Equal(got.Operation.Targets, tc.want) { + t.Fatalf("unexpected result\n%s", cmp.Diff(got.Operation.Targets, tc.want)) + } + }) + } +} + +func TestParseApply_replace(t *testing.T) { + foobarbaz, _ := addrs.ParseAbsResourceInstanceStr("foo_bar.baz") + foobarbeep, _ := addrs.ParseAbsResourceInstanceStr("foo_bar.beep") + testCases := map[string]struct { + args []string + want []addrs.AbsResourceInstance + wantErr string + }{ + "no addresses by default": { + args: nil, + want: nil, + }, + "one address": { + args: []string{"-replace=foo_bar.baz"}, + want: []addrs.AbsResourceInstance{foobarbaz}, + }, + "two addresses": { + args: []string{"-replace=foo_bar.baz", "-replace", "foo_bar.beep"}, + want: []addrs.AbsResourceInstance{foobarbaz, foobarbeep}, + }, + "non-resource-instance address": { + args: []string{"-replace=module.boop"}, + want: nil, + wantErr: "A resource instance address is required here.", + }, + "data resource address": { + args: []string{"-replace=data.foo.bar"}, + want: nil, + wantErr: "Only managed resources can be used", + }, + "invalid traversal": { + args: []string{"-replace=foo."}, + want: nil, + wantErr: "Dot must be followed by attribute name", + }, + "invalid address": { + args: []string{"-replace=data[0].foo"}, + want: nil, + wantErr: "A data source name is required", + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got, diags := ParseApply(tc.args) + if len(diags) > 0 { + if tc.wantErr == "" { + t.Fatalf("unexpected diags: %v", diags) + } else if got := diags.Err().Error(); !strings.Contains(got, tc.wantErr) { + t.Fatalf("wrong diags\n got: %s\nwant: %s", got, tc.wantErr) + } + } + if !cmp.Equal(got.Operation.ForceReplace, tc.want) { + t.Fatalf("unexpected result\n%s", cmp.Diff(got.Operation.Targets, tc.want)) + } + }) + } +} + +func TestParseApply_vars(t *testing.T) { + testCases := map[string]struct { + args []string + want []FlagNameValue + }{ + "no var flags by default": { + args: nil, + want: nil, + }, + "one var": { + args: []string{"-var", "foo=bar"}, + want: []FlagNameValue{ + {Name: "-var", Value: "foo=bar"}, + }, + }, + "one var-file": { + args: []string{"-var-file", "cool.tfvars"}, + want: []FlagNameValue{ + {Name: "-var-file", Value: "cool.tfvars"}, + }, + }, + "ordering preserved": { + args: []string{ + "-var", "foo=bar", + "-var-file", "cool.tfvars", + "-var", "boop=beep", + }, + want: []FlagNameValue{ + {Name: "-var", Value: "foo=bar"}, + {Name: "-var-file", Value: "cool.tfvars"}, + {Name: "-var", Value: "boop=beep"}, + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got, diags := ParseApply(tc.args) + if len(diags) > 0 { + t.Fatalf("unexpected diags: %v", diags) + } + if vars := got.Vars.All(); !cmp.Equal(vars, tc.want) { + t.Fatalf("unexpected result\n%s", cmp.Diff(vars, tc.want)) + } + if got, want := got.Vars.Empty(), len(tc.want) == 0; got != want { + t.Fatalf("expected Empty() to return %t, but was %t", want, got) + } + }) + } +} + +func TestParseApplyDestroy_basicValid(t *testing.T) { + testCases := map[string]struct { + args []string + want *Apply + }{ + "defaults": { + nil, + &Apply{ + AutoApprove: false, + InputEnabled: true, + ViewType: ViewHuman, + State: &State{Lock: true}, + Vars: &Vars{}, + Operation: &Operation{ + PlanMode: plans.DestroyMode, + Parallelism: 10, + Refresh: true, + }, + }, + }, + "auto-approve and disabled input": { + []string{"-auto-approve", "-input=false"}, + &Apply{ + AutoApprove: true, + InputEnabled: false, + ViewType: ViewHuman, + State: &State{Lock: true}, + Vars: &Vars{}, + Operation: &Operation{ + PlanMode: plans.DestroyMode, + Parallelism: 10, + Refresh: true, + }, + }, + }, + } + + cmpOpts := cmpopts.IgnoreUnexported(Operation{}, Vars{}, State{}) + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got, diags := ParseApplyDestroy(tc.args) + if len(diags) > 0 { + t.Fatalf("unexpected diags: %v", diags) + } + if diff := cmp.Diff(tc.want, got, cmpOpts); diff != "" { + t.Errorf("unexpected result\n%s", diff) + } + }) + } +} + +func TestParseApplyDestroy_invalid(t *testing.T) { + t.Run("explicit destroy mode", func(t *testing.T) { + got, diags := ParseApplyDestroy([]string{"-destroy"}) + if len(diags) == 0 { + t.Fatal("expected diags but got none") + } + if got, want := diags.Err().Error(), "Invalid mode option:"; !strings.Contains(got, want) { + t.Fatalf("wrong diags\n got: %s\nwant: %s", got, want) + } + if got.ViewType != ViewHuman { + t.Fatalf("wrong view type, got %#v, want %#v", got.ViewType, ViewHuman) + } + }) +} diff --git a/pkg/command/arguments/default.go b/pkg/command/arguments/default.go new file mode 100644 index 00000000000..888b7035272 --- /dev/null +++ b/pkg/command/arguments/default.go @@ -0,0 +1,21 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package arguments + +import ( + "flag" + "io" +) + +// defaultFlagSet creates a FlagSet with the common settings to override +// the flag package's noisy defaults. +func defaultFlagSet(name string) *flag.FlagSet { + f := flag.NewFlagSet(name, flag.ContinueOnError) + f.SetOutput(io.Discard) + f.Usage = func() {} + + return f +} diff --git a/pkg/command/arguments/extended.go b/pkg/command/arguments/extended.go new file mode 100644 index 00000000000..4f0cbdb55ae --- /dev/null +++ b/pkg/command/arguments/extended.go @@ -0,0 +1,248 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package arguments + +import ( + "flag" + "fmt" + "time" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// DefaultParallelism is the limit OpenTofu places on total parallel +// operations as it walks the dependency graph. +const DefaultParallelism = 10 + +// State describes arguments which are used to define how OpenTofu interacts +// with state. +type State struct { + // Lock controls whether or not the state manager is used to lock state + // during operations. + Lock bool + + // LockTimeout allows setting a time limit on acquiring the state lock. + // The default is 0, meaning no limit. + LockTimeout time.Duration + + // StatePath specifies a non-default location for the state file. The + // default value is blank, which is interpeted as "terraform.tfstate". + StatePath string + + // StateOutPath specifies a different path to write the final state file. + // The default value is blank, which results in state being written back to + // StatePath. + StateOutPath string + + // BackupPath specifies the path where a backup copy of the state file will + // be stored before the new state is written. The default value is blank, + // which is interpreted as StateOutPath + + // ".backup". + BackupPath string +} + +// Operation describes arguments which are used to configure how a OpenTofu +// operation such as a plan or apply executes. +type Operation struct { + // PlanMode selects one of the mutually-exclusive planning modes that + // decides the overall goal of a plan operation. This field is relevant + // only for an operation that produces a plan. + PlanMode plans.Mode + + // Parallelism is the limit OpenTofu places on total parallel operations + // as it walks the dependency graph. + Parallelism int + + // Refresh controls whether or not the operation should refresh existing + // state before proceeding. Default is true. + Refresh bool + + // Targets allow limiting an operation to a set of resource addresses and + // their dependencies. + Targets []addrs.Targetable + + // ForceReplace addresses cause OpenTofu to force a particular set of + // resource instances to generate "replace" actions in any plan where they + // would normally have generated "no-op" or "update" actions. + // + // This is currently limited to specific instances because typical uses + // of replace are associated with only specific remote objects that the + // user has somehow learned to be malfunctioning, in which case it + // would be unusual and potentially dangerous to replace everything under + // a module all at once. We could potentially loosen this later if we + // learn a use-case for broader matching. + ForceReplace []addrs.AbsResourceInstance + + // These private fields are used only temporarily during decoding. Use + // method Parse to populate the exported fields from these, validating + // the raw values in the process. + targetsRaw []string + forceReplaceRaw []string + destroyRaw bool + refreshOnlyRaw bool +} + +// Parse must be called on Operation after initial flag parse. This processes +// the raw target flags into addrs.Targetable values, returning diagnostics if +// invalid. +func (o *Operation) Parse() tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + o.Targets = nil + + for _, tr := range o.targetsRaw { + traversal, syntaxDiags := hclsyntax.ParseTraversalAbs([]byte(tr), "", hcl.Pos{Line: 1, Column: 1}) + if syntaxDiags.HasErrors() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + fmt.Sprintf("Invalid target %q", tr), + syntaxDiags[0].Detail, + )) + continue + } + + target, targetDiags := addrs.ParseTarget(traversal) + if targetDiags.HasErrors() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + fmt.Sprintf("Invalid target %q", tr), + targetDiags[0].Description().Detail, + )) + continue + } + + o.Targets = append(o.Targets, target.Subject) + } + + for _, raw := range o.forceReplaceRaw { + traversal, syntaxDiags := hclsyntax.ParseTraversalAbs([]byte(raw), "", hcl.Pos{Line: 1, Column: 1}) + if syntaxDiags.HasErrors() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + fmt.Sprintf("Invalid force-replace address %q", raw), + syntaxDiags[0].Detail, + )) + continue + } + + addr, addrDiags := addrs.ParseAbsResourceInstance(traversal) + if addrDiags.HasErrors() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + fmt.Sprintf("Invalid force-replace address %q", raw), + addrDiags[0].Description().Detail, + )) + continue + } + + if addr.Resource.Resource.Mode != addrs.ManagedResourceMode { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + fmt.Sprintf("Invalid force-replace address %q", raw), + "Only managed resources can be used with the -replace=... option.", + )) + continue + } + + o.ForceReplace = append(o.ForceReplace, addr) + } + + // If you add a new possible value for o.PlanMode here, consider also + // adding a specialized error message for it in ParseApplyDestroy. + switch { + case o.destroyRaw && o.refreshOnlyRaw: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Incompatible plan mode options", + "The -destroy and -refresh-only options are mutually-exclusive.", + )) + case o.destroyRaw: + o.PlanMode = plans.DestroyMode + case o.refreshOnlyRaw: + o.PlanMode = plans.RefreshOnlyMode + if !o.Refresh { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Incompatible refresh options", + "It doesn't make sense to use -refresh-only at the same time as -refresh=false, because OpenTofu would have nothing to do.", + )) + } + default: + o.PlanMode = plans.NormalMode + } + + return diags +} + +// Vars describes arguments which specify non-default variable values. This +// interfce is unfortunately obscure, because the order of the CLI arguments +// determines the final value of the gathered variables. In future it might be +// desirable for the arguments package to handle the gathering of variables +// directly, returning a map of variable values. +type Vars struct { + vars *flagNameValueSlice + varFiles *flagNameValueSlice +} + +func (v *Vars) All() []FlagNameValue { + if v.vars == nil { + return nil + } + return v.vars.AllItems() +} + +func (v *Vars) Empty() bool { + if v.vars == nil { + return true + } + return v.vars.Empty() +} + +// extendedFlagSet creates a FlagSet with common backend, operation, and vars +// flags used in many commands. Target structs for each subset of flags must be +// provided in order to support those flags. +func extendedFlagSet(name string, state *State, operation *Operation, vars *Vars) *flag.FlagSet { + f := defaultFlagSet(name) + + if state == nil && operation == nil && vars == nil { + panic("use defaultFlagSet") + } + + if state != nil { + f.BoolVar(&state.Lock, "lock", true, "lock") + f.DurationVar(&state.LockTimeout, "lock-timeout", 0, "lock-timeout") + f.StringVar(&state.StatePath, "state", "", "state-path") + f.StringVar(&state.StateOutPath, "state-out", "", "state-path") + f.StringVar(&state.BackupPath, "backup", "", "backup-path") + } + + if operation != nil { + f.IntVar(&operation.Parallelism, "parallelism", DefaultParallelism, "parallelism") + f.BoolVar(&operation.Refresh, "refresh", true, "refresh") + f.BoolVar(&operation.destroyRaw, "destroy", false, "destroy") + f.BoolVar(&operation.refreshOnlyRaw, "refresh-only", false, "refresh-only") + f.Var((*flagStringSlice)(&operation.targetsRaw), "target", "target") + f.Var((*flagStringSlice)(&operation.forceReplaceRaw), "replace", "replace") + } + + // Gather all -var and -var-file arguments into one heterogenous structure + // to preserve the overall order. + if vars != nil { + varsFlags := newFlagNameValueSlice("-var") + varFilesFlags := varsFlags.Alias("-var-file") + vars.vars = &varsFlags + vars.varFiles = &varFilesFlags + f.Var(vars.vars, "var", "var") + f.Var(vars.varFiles, "var-file", "var-file") + } + + return f +} diff --git a/pkg/command/arguments/flags.go b/pkg/command/arguments/flags.go new file mode 100644 index 00000000000..88395f8f566 --- /dev/null +++ b/pkg/command/arguments/flags.go @@ -0,0 +1,102 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package arguments + +import ( + "flag" + "fmt" +) + +// flagStringSlice is a flag.Value implementation which allows collecting +// multiple instances of a single flag into a slice. This is used for flags +// such as -target=aws_instance.foo and -var x=y. +type flagStringSlice []string + +var _ flag.Value = (*flagStringSlice)(nil) + +func (v *flagStringSlice) String() string { + return "" +} +func (v *flagStringSlice) Set(raw string) error { + *v = append(*v, raw) + + return nil +} + +// flagNameValueSlice is a flag.Value implementation that appends raw flag +// names and values to a slice. This is used to collect a sequence of flags +// with possibly different names, preserving the overall order. +// +// FIXME: this is a copy of rawFlags from command/meta_config.go, with the +// eventual aim of replacing it altogether by gathering variables in the +// arguments package. +type flagNameValueSlice struct { + flagName string + items *[]FlagNameValue +} + +var _ flag.Value = flagNameValueSlice{} + +func newFlagNameValueSlice(flagName string) flagNameValueSlice { + var items []FlagNameValue + return flagNameValueSlice{ + flagName: flagName, + items: &items, + } +} + +func (f flagNameValueSlice) Empty() bool { + if f.items == nil { + return true + } + return len(*f.items) == 0 +} + +func (f flagNameValueSlice) AllItems() []FlagNameValue { + if f.items == nil { + return nil + } + return *f.items +} + +func (f flagNameValueSlice) Alias(flagName string) flagNameValueSlice { + return flagNameValueSlice{ + flagName: flagName, + items: f.items, + } +} + +func (f flagNameValueSlice) String() string { + return "" +} + +func (f flagNameValueSlice) Set(str string) error { + *f.items = append(*f.items, FlagNameValue{ + Name: f.flagName, + Value: str, + }) + return nil +} + +type FlagNameValue struct { + Name string + Value string +} + +func (f FlagNameValue) String() string { + return fmt.Sprintf("%s=%q", f.Name, f.Value) +} + +// FlagIsSet returns whether a flag is explicitly set in a set of flags +func FlagIsSet(flags *flag.FlagSet, name string) bool { + isSet := false + flags.Visit(func(f *flag.Flag) { + if f.Name == name { + isSet = true + } + }) + return isSet +} diff --git a/pkg/command/arguments/output.go b/pkg/command/arguments/output.go new file mode 100644 index 00000000000..062b31bc233 --- /dev/null +++ b/pkg/command/arguments/output.go @@ -0,0 +1,101 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package arguments + +import ( + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// Output represents the command-line arguments for the output command. +type Output struct { + // Name identifies which root module output to show. If empty, show all + // outputs. + Name string + + // StatePath is an optional path to a state file, from which outputs will + // be loaded. + StatePath string + + // ViewType specifies which output format to use: human, JSON, or "raw". + ViewType ViewType + + Vars *Vars + + // ShowSensitive is used to display the value of variables marked as sensitive. + ShowSensitive bool +} + +// ParseOutput processes CLI arguments, returning an Output value and errors. +// If errors are encountered, an Output value is still returned representing +// the best effort interpretation of the arguments. +func ParseOutput(args []string) (*Output, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + output := &Output{ + Vars: &Vars{}, + } + + var jsonOutput, rawOutput bool + var statePath string + cmdFlags := extendedFlagSet("output", nil, nil, output.Vars) + cmdFlags.BoolVar(&jsonOutput, "json", false, "json") + cmdFlags.BoolVar(&rawOutput, "raw", false, "raw") + cmdFlags.StringVar(&statePath, "state", "", "path") + cmdFlags.BoolVar(&output.ShowSensitive, "show-sensitive", false, "displays sensitive values") + + if err := cmdFlags.Parse(args); err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to parse command-line flags", + err.Error(), + )) + } + + args = cmdFlags.Args() + if len(args) > 1 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unexpected argument", + "The output command expects exactly one argument with the name of an output variable or no arguments to show all outputs.", + )) + } + + if jsonOutput && rawOutput { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid output format", + "The -raw and -json options are mutually-exclusive.", + )) + + // Since the desired output format is unknowable, fall back to default + jsonOutput = false + rawOutput = false + } + + output.StatePath = statePath + + if len(args) > 0 { + output.Name = args[0] + } + + if rawOutput && output.Name == "" { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Output name required", + "You must give the name of a single output value when using the -raw option.", + )) + } + + switch { + case jsonOutput: + output.ViewType = ViewJSON + case rawOutput: + output.ViewType = ViewRaw + default: + output.ViewType = ViewHuman + } + + return output, diags +} diff --git a/pkg/command/arguments/output_test.go b/pkg/command/arguments/output_test.go new file mode 100644 index 00000000000..9b4178e97f1 --- /dev/null +++ b/pkg/command/arguments/output_test.go @@ -0,0 +1,149 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package arguments + +import ( + "reflect" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +func TestParseOutput_valid(t *testing.T) { + testCases := map[string]struct { + args []string + want *Output + }{ + "defaults": { + nil, + &Output{ + Name: "", + ViewType: ViewHuman, + StatePath: "", + }, + }, + "json": { + []string{"-json"}, + &Output{ + Name: "", + ViewType: ViewJSON, + StatePath: "", + }, + }, + "raw": { + []string{"-raw", "foo"}, + &Output{ + Name: "foo", + ViewType: ViewRaw, + StatePath: "", + }, + }, + "state": { + []string{"-state=foobar.tfstate", "-raw", "foo"}, + &Output{ + Name: "foo", + ViewType: ViewRaw, + StatePath: "foobar.tfstate", + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got, diags := ParseOutput(tc.args) + if len(diags) > 0 { + t.Fatalf("unexpected diags: %v", diags) + } + got.Vars = nil + if *got != *tc.want { + t.Fatalf("unexpected result\n got: %#v\nwant: %#v", got, tc.want) + } + }) + } +} + +func TestParseOutput_invalid(t *testing.T) { + testCases := map[string]struct { + args []string + want *Output + wantDiags tfdiags.Diagnostics + }{ + "unknown flag": { + []string{"-boop"}, + &Output{ + Name: "", + ViewType: ViewHuman, + StatePath: "", + }, + tfdiags.Diagnostics{ + tfdiags.Sourceless( + tfdiags.Error, + "Failed to parse command-line flags", + "flag provided but not defined: -boop", + ), + }, + }, + "json and raw specified": { + []string{"-json", "-raw"}, + &Output{ + Name: "", + ViewType: ViewHuman, + StatePath: "", + }, + tfdiags.Diagnostics{ + tfdiags.Sourceless( + tfdiags.Error, + "Invalid output format", + "The -raw and -json options are mutually-exclusive.", + ), + }, + }, + "raw with no name": { + []string{"-raw"}, + &Output{ + Name: "", + ViewType: ViewRaw, + StatePath: "", + }, + tfdiags.Diagnostics{ + tfdiags.Sourceless( + tfdiags.Error, + "Output name required", + "You must give the name of a single output value when using the -raw option.", + ), + }, + }, + "too many arguments": { + []string{"-raw", "-state=foo.tfstate", "bar", "baz"}, + &Output{ + Name: "bar", + ViewType: ViewRaw, + StatePath: "foo.tfstate", + }, + tfdiags.Diagnostics{ + tfdiags.Sourceless( + tfdiags.Error, + "Unexpected argument", + "The output command expects exactly one argument with the name of an output variable or no arguments to show all outputs.", + ), + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got, gotDiags := ParseOutput(tc.args) + got.Vars = nil + if *got != *tc.want { + t.Fatalf("unexpected result\n got: %#v\nwant: %#v", got, tc.want) + } + if !reflect.DeepEqual(gotDiags, tc.wantDiags) { + t.Errorf("wrong result\ngot: %s\nwant: %s", spew.Sdump(gotDiags), spew.Sdump(tc.wantDiags)) + } + }) + } +} diff --git a/pkg/command/arguments/plan.go b/pkg/command/arguments/plan.go new file mode 100644 index 00000000000..d1b516ee05e --- /dev/null +++ b/pkg/command/arguments/plan.go @@ -0,0 +1,96 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package arguments + +import ( + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// Plan represents the command-line arguments for the plan command. +type Plan struct { + // State, Operation, and Vars are the common extended flags + State *State + Operation *Operation + Vars *Vars + + // DetailedExitCode enables different exit codes for error, success with + // changes, and success with no changes. + DetailedExitCode bool + + // InputEnabled is used to disable interactive input for unspecified + // variable and backend config values. Default is true. + InputEnabled bool + + // OutPath contains an optional path to store the plan file + OutPath string + + // GenerateConfigPath tells OpenTofu that config should be generated for + // unmatched import target paths and which path the generated file should + // be written to. + GenerateConfigPath string + + // ViewType specifies which output format to use + ViewType ViewType + + // ShowSensitive is used to display the value of variables marked as sensitive. + ShowSensitive bool +} + +// ParsePlan processes CLI arguments, returning a Plan value and errors. +// If errors are encountered, a Plan value is still returned representing +// the best effort interpretation of the arguments. +func ParsePlan(args []string) (*Plan, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + plan := &Plan{ + State: &State{}, + Operation: &Operation{}, + Vars: &Vars{}, + } + + cmdFlags := extendedFlagSet("plan", plan.State, plan.Operation, plan.Vars) + cmdFlags.BoolVar(&plan.DetailedExitCode, "detailed-exitcode", false, "detailed-exitcode") + cmdFlags.BoolVar(&plan.InputEnabled, "input", true, "input") + cmdFlags.StringVar(&plan.OutPath, "out", "", "out") + cmdFlags.StringVar(&plan.GenerateConfigPath, "generate-config-out", "", "generate-config-out") + cmdFlags.BoolVar(&plan.ShowSensitive, "show-sensitive", false, "displays sensitive values") + + var json bool + cmdFlags.BoolVar(&json, "json", false, "json") + + if err := cmdFlags.Parse(args); err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to parse command-line flags", + err.Error(), + )) + } + + args = cmdFlags.Args() + + if len(args) > 0 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Too many command line arguments", + "To specify a working directory for the plan, use the global -chdir flag.", + )) + } + + diags = diags.Append(plan.Operation.Parse()) + + // JSON view currently does not support input, so we disable it here + if json { + plan.InputEnabled = false + } + + switch { + case json: + plan.ViewType = ViewJSON + default: + plan.ViewType = ViewHuman + } + + return plan, diags +} diff --git a/pkg/command/arguments/plan_test.go b/pkg/command/arguments/plan_test.go new file mode 100644 index 00000000000..c0a11a404aa --- /dev/null +++ b/pkg/command/arguments/plan_test.go @@ -0,0 +1,212 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package arguments + +import ( + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/plans" +) + +func TestParsePlan_basicValid(t *testing.T) { + testCases := map[string]struct { + args []string + want *Plan + }{ + "defaults": { + nil, + &Plan{ + DetailedExitCode: false, + InputEnabled: true, + OutPath: "", + ViewType: ViewHuman, + State: &State{Lock: true}, + Vars: &Vars{}, + Operation: &Operation{ + PlanMode: plans.NormalMode, + Parallelism: 10, + Refresh: true, + }, + }, + }, + "setting all options": { + []string{"-destroy", "-detailed-exitcode", "-input=false", "-out=saved.tfplan"}, + &Plan{ + DetailedExitCode: true, + InputEnabled: false, + OutPath: "saved.tfplan", + ViewType: ViewHuman, + State: &State{Lock: true}, + Vars: &Vars{}, + Operation: &Operation{ + PlanMode: plans.DestroyMode, + Parallelism: 10, + Refresh: true, + }, + }, + }, + "JSON view disables input": { + []string{"-json"}, + &Plan{ + DetailedExitCode: false, + InputEnabled: false, + OutPath: "", + ViewType: ViewJSON, + State: &State{Lock: true}, + Vars: &Vars{}, + Operation: &Operation{ + PlanMode: plans.NormalMode, + Parallelism: 10, + Refresh: true, + }, + }, + }, + } + + cmpOpts := cmpopts.IgnoreUnexported(Operation{}, Vars{}, State{}) + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got, diags := ParsePlan(tc.args) + if len(diags) > 0 { + t.Fatalf("unexpected diags: %v", diags) + } + if diff := cmp.Diff(tc.want, got, cmpOpts); diff != "" { + t.Errorf("unexpected result\n%s", diff) + } + }) + } +} + +func TestParsePlan_invalid(t *testing.T) { + got, diags := ParsePlan([]string{"-frob"}) + if len(diags) == 0 { + t.Fatal("expected diags but got none") + } + if got, want := diags.Err().Error(), "flag provided but not defined"; !strings.Contains(got, want) { + t.Fatalf("wrong diags\n got: %s\nwant: %s", got, want) + } + if got.ViewType != ViewHuman { + t.Fatalf("wrong view type, got %#v, want %#v", got.ViewType, ViewHuman) + } +} + +func TestParsePlan_tooManyArguments(t *testing.T) { + got, diags := ParsePlan([]string{"saved.tfplan"}) + if len(diags) == 0 { + t.Fatal("expected diags but got none") + } + if got, want := diags.Err().Error(), "Too many command line arguments"; !strings.Contains(got, want) { + t.Fatalf("wrong diags\n got: %s\nwant: %s", got, want) + } + if got.ViewType != ViewHuman { + t.Fatalf("wrong view type, got %#v, want %#v", got.ViewType, ViewHuman) + } +} + +func TestParsePlan_targets(t *testing.T) { + foobarbaz, _ := addrs.ParseTargetStr("foo_bar.baz") + boop, _ := addrs.ParseTargetStr("module.boop") + testCases := map[string]struct { + args []string + want []addrs.Targetable + wantErr string + }{ + "no targets by default": { + args: nil, + want: nil, + }, + "one target": { + args: []string{"-target=foo_bar.baz"}, + want: []addrs.Targetable{foobarbaz.Subject}, + }, + "two targets": { + args: []string{"-target=foo_bar.baz", "-target", "module.boop"}, + want: []addrs.Targetable{foobarbaz.Subject, boop.Subject}, + }, + "invalid traversal": { + args: []string{"-target=foo."}, + want: nil, + wantErr: "Dot must be followed by attribute name", + }, + "invalid target": { + args: []string{"-target=data[0].foo"}, + want: nil, + wantErr: "A data source name is required", + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got, diags := ParsePlan(tc.args) + if len(diags) > 0 { + if tc.wantErr == "" { + t.Fatalf("unexpected diags: %v", diags) + } else if got := diags.Err().Error(); !strings.Contains(got, tc.wantErr) { + t.Fatalf("wrong diags\n got: %s\nwant: %s", got, tc.wantErr) + } + } + if !cmp.Equal(got.Operation.Targets, tc.want) { + t.Fatalf("unexpected result\n%s", cmp.Diff(got.Operation.Targets, tc.want)) + } + }) + } +} + +func TestParsePlan_vars(t *testing.T) { + testCases := map[string]struct { + args []string + want []FlagNameValue + }{ + "no var flags by default": { + args: nil, + want: nil, + }, + "one var": { + args: []string{"-var", "foo=bar"}, + want: []FlagNameValue{ + {Name: "-var", Value: "foo=bar"}, + }, + }, + "one var-file": { + args: []string{"-var-file", "cool.tfvars"}, + want: []FlagNameValue{ + {Name: "-var-file", Value: "cool.tfvars"}, + }, + }, + "ordering preserved": { + args: []string{ + "-var", "foo=bar", + "-var-file", "cool.tfvars", + "-var", "boop=beep", + }, + want: []FlagNameValue{ + {Name: "-var", Value: "foo=bar"}, + {Name: "-var-file", Value: "cool.tfvars"}, + {Name: "-var", Value: "boop=beep"}, + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got, diags := ParsePlan(tc.args) + if len(diags) > 0 { + t.Fatalf("unexpected diags: %v", diags) + } + if vars := got.Vars.All(); !cmp.Equal(vars, tc.want) { + t.Fatalf("unexpected result\n%s", cmp.Diff(vars, tc.want)) + } + if got, want := got.Vars.Empty(), len(tc.want) == 0; got != want { + t.Fatalf("expected Empty() to return %t, but was %t", want, got) + } + }) + } +} diff --git a/pkg/command/arguments/refresh.go b/pkg/command/arguments/refresh.go new file mode 100644 index 00000000000..d44ae89af39 --- /dev/null +++ b/pkg/command/arguments/refresh.go @@ -0,0 +1,76 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package arguments + +import ( + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// Refresh represents the command-line arguments for the apply command. +type Refresh struct { + // State, Operation, and Vars are the common extended flags + State *State + Operation *Operation + Vars *Vars + + // InputEnabled is used to disable interactive input for unspecified + // variable and backend config values. Default is true. + InputEnabled bool + + // ViewType specifies which output format to use + ViewType ViewType +} + +// ParseRefresh processes CLI arguments, returning a Refresh value and errors. +// If errors are encountered, a Refresh value is still returned representing +// the best effort interpretation of the arguments. +func ParseRefresh(args []string) (*Refresh, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + refresh := &Refresh{ + State: &State{}, + Operation: &Operation{}, + Vars: &Vars{}, + } + + cmdFlags := extendedFlagSet("refresh", refresh.State, refresh.Operation, refresh.Vars) + cmdFlags.BoolVar(&refresh.InputEnabled, "input", true, "input") + + var json bool + cmdFlags.BoolVar(&json, "json", false, "json") + + if err := cmdFlags.Parse(args); err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to parse command-line flags", + err.Error(), + )) + } + + args = cmdFlags.Args() + if len(args) > 0 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Too many command line arguments", + "Expected at most one positional argument.", + )) + } + + diags = diags.Append(refresh.Operation.Parse()) + + // JSON view currently does not support input, so we disable it here + if json { + refresh.InputEnabled = false + } + + switch { + case json: + refresh.ViewType = ViewJSON + default: + refresh.ViewType = ViewHuman + } + + return refresh, diags +} diff --git a/pkg/command/arguments/refresh_test.go b/pkg/command/arguments/refresh_test.go new file mode 100644 index 00000000000..9bf29a9ec58 --- /dev/null +++ b/pkg/command/arguments/refresh_test.go @@ -0,0 +1,185 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package arguments + +import ( + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/kubegems/opentofu/pkg/addrs" +) + +func TestParseRefresh_basicValid(t *testing.T) { + testCases := map[string]struct { + args []string + want *Refresh + }{ + "defaults": { + nil, + &Refresh{ + InputEnabled: true, + ViewType: ViewHuman, + }, + }, + "input=false": { + []string{"-input=false"}, + &Refresh{ + InputEnabled: false, + ViewType: ViewHuman, + }, + }, + "JSON view disables input": { + []string{"-json"}, + &Refresh{ + InputEnabled: false, + ViewType: ViewJSON, + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got, diags := ParseRefresh(tc.args) + if len(diags) > 0 { + t.Fatalf("unexpected diags: %v", diags) + } + // Ignore the extended arguments for simplicity + got.State = nil + got.Operation = nil + got.Vars = nil + if *got != *tc.want { + t.Fatalf("unexpected result\n got: %#v\nwant: %#v", got, tc.want) + } + }) + } +} + +func TestParseRefresh_invalid(t *testing.T) { + got, diags := ParseRefresh([]string{"-frob"}) + if len(diags) == 0 { + t.Fatal("expected diags but got none") + } + if got, want := diags.Err().Error(), "flag provided but not defined"; !strings.Contains(got, want) { + t.Fatalf("wrong diags\n got: %s\nwant: %s", got, want) + } + if got.ViewType != ViewHuman { + t.Fatalf("wrong view type, got %#v, want %#v", got.ViewType, ViewHuman) + } +} + +func TestParseRefresh_tooManyArguments(t *testing.T) { + got, diags := ParseRefresh([]string{"saved.tfplan"}) + if len(diags) == 0 { + t.Fatal("expected diags but got none") + } + if got, want := diags.Err().Error(), "Too many command line arguments"; !strings.Contains(got, want) { + t.Fatalf("wrong diags\n got: %s\nwant: %s", got, want) + } + if got.ViewType != ViewHuman { + t.Fatalf("wrong view type, got %#v, want %#v", got.ViewType, ViewHuman) + } +} + +func TestParseRefresh_targets(t *testing.T) { + foobarbaz, _ := addrs.ParseTargetStr("foo_bar.baz") + boop, _ := addrs.ParseTargetStr("module.boop") + testCases := map[string]struct { + args []string + want []addrs.Targetable + wantErr string + }{ + "no targets by default": { + args: nil, + want: nil, + }, + "one target": { + args: []string{"-target=foo_bar.baz"}, + want: []addrs.Targetable{foobarbaz.Subject}, + }, + "two targets": { + args: []string{"-target=foo_bar.baz", "-target", "module.boop"}, + want: []addrs.Targetable{foobarbaz.Subject, boop.Subject}, + }, + "invalid traversal": { + args: []string{"-target=foo."}, + want: nil, + wantErr: "Dot must be followed by attribute name", + }, + "invalid target": { + args: []string{"-target=data[0].foo"}, + want: nil, + wantErr: "A data source name is required", + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got, diags := ParseRefresh(tc.args) + if len(diags) > 0 { + if tc.wantErr == "" { + t.Fatalf("unexpected diags: %v", diags) + } else if got := diags.Err().Error(); !strings.Contains(got, tc.wantErr) { + t.Fatalf("wrong diags\n got: %s\nwant: %s", got, tc.wantErr) + } + } + if !cmp.Equal(got.Operation.Targets, tc.want) { + t.Fatalf("unexpected result\n%s", cmp.Diff(got.Operation.Targets, tc.want)) + } + }) + } +} + +func TestParseRefresh_vars(t *testing.T) { + testCases := map[string]struct { + args []string + want []FlagNameValue + }{ + "no var flags by default": { + args: nil, + want: nil, + }, + "one var": { + args: []string{"-var", "foo=bar"}, + want: []FlagNameValue{ + {Name: "-var", Value: "foo=bar"}, + }, + }, + "one var-file": { + args: []string{"-var-file", "cool.tfvars"}, + want: []FlagNameValue{ + {Name: "-var-file", Value: "cool.tfvars"}, + }, + }, + "ordering preserved": { + args: []string{ + "-var", "foo=bar", + "-var-file", "cool.tfvars", + "-var", "boop=beep", + }, + want: []FlagNameValue{ + {Name: "-var", Value: "foo=bar"}, + {Name: "-var-file", Value: "cool.tfvars"}, + {Name: "-var", Value: "boop=beep"}, + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got, diags := ParseRefresh(tc.args) + if len(diags) > 0 { + t.Fatalf("unexpected diags: %v", diags) + } + if vars := got.Vars.All(); !cmp.Equal(vars, tc.want) { + t.Fatalf("unexpected result\n%s", cmp.Diff(vars, tc.want)) + } + if got, want := got.Vars.Empty(), len(tc.want) == 0; got != want { + t.Fatalf("expected Empty() to return %t, but was %t", want, got) + } + }) + } +} diff --git a/pkg/command/arguments/show.go b/pkg/command/arguments/show.go new file mode 100644 index 00000000000..5d6b27ef05a --- /dev/null +++ b/pkg/command/arguments/show.go @@ -0,0 +1,71 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package arguments + +import ( + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// Show represents the command-line arguments for the show command. +type Show struct { + // Path is the path to the state file or plan file to be displayed. If + // unspecified, show will display the latest state snapshot. + Path string + + // ViewType specifies which output format to use: human, JSON, or "raw". + ViewType ViewType + + Vars *Vars + + // ShowSensitive is used to display the value of variables marked as sensitive. + ShowSensitive bool +} + +// ParseShow processes CLI arguments, returning a Show value and errors. +// If errors are encountered, a Show value is still returned representing +// the best effort interpretation of the arguments. +func ParseShow(args []string) (*Show, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + show := &Show{ + Path: "", + Vars: &Vars{}, + } + + var jsonOutput bool + cmdFlags := extendedFlagSet("show", nil, nil, show.Vars) + cmdFlags.BoolVar(&jsonOutput, "json", false, "json") + cmdFlags.BoolVar(&show.ShowSensitive, "show-sensitive", false, "displays sensitive values") + + if err := cmdFlags.Parse(args); err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to parse command-line flags", + err.Error(), + )) + } + + args = cmdFlags.Args() + if len(args) > 1 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Too many command line arguments", + "Expected at most one positional argument.", + )) + } + + if len(args) > 0 { + show.Path = args[0] + } + + switch { + case jsonOutput: + show.ViewType = ViewJSON + default: + show.ViewType = ViewHuman + } + + return show, diags +} diff --git a/pkg/command/arguments/show_test.go b/pkg/command/arguments/show_test.go new file mode 100644 index 00000000000..d9d4ca82bcd --- /dev/null +++ b/pkg/command/arguments/show_test.go @@ -0,0 +1,106 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package arguments + +import ( + "reflect" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +func TestParseShow_valid(t *testing.T) { + testCases := map[string]struct { + args []string + want *Show + }{ + "defaults": { + nil, + &Show{ + Path: "", + ViewType: ViewHuman, + }, + }, + "json": { + []string{"-json"}, + &Show{ + Path: "", + ViewType: ViewJSON, + }, + }, + "path": { + []string{"-json", "foo"}, + &Show{ + Path: "foo", + ViewType: ViewJSON, + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got, diags := ParseShow(tc.args) + got.Vars = nil + if len(diags) > 0 { + t.Fatalf("unexpected diags: %v", diags) + } + if *got != *tc.want { + t.Fatalf("unexpected result\n got: %#v\nwant: %#v", got, tc.want) + } + }) + } +} + +func TestParseShow_invalid(t *testing.T) { + testCases := map[string]struct { + args []string + want *Show + wantDiags tfdiags.Diagnostics + }{ + "unknown flag": { + []string{"-boop"}, + &Show{ + Path: "", + ViewType: ViewHuman, + }, + tfdiags.Diagnostics{ + tfdiags.Sourceless( + tfdiags.Error, + "Failed to parse command-line flags", + "flag provided but not defined: -boop", + ), + }, + }, + "too many arguments": { + []string{"-json", "bar", "baz"}, + &Show{ + Path: "bar", + ViewType: ViewJSON, + }, + tfdiags.Diagnostics{ + tfdiags.Sourceless( + tfdiags.Error, + "Too many command line arguments", + "Expected at most one positional argument.", + ), + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got, gotDiags := ParseShow(tc.args) + got.Vars = nil + if *got != *tc.want { + t.Fatalf("unexpected result\n got: %#v\nwant: %#v", got, tc.want) + } + if !reflect.DeepEqual(gotDiags, tc.wantDiags) { + t.Errorf("wrong result\ngot: %s\nwant: %s", spew.Sdump(gotDiags), spew.Sdump(tc.wantDiags)) + } + }) + } +} diff --git a/pkg/command/arguments/test.go b/pkg/command/arguments/test.go new file mode 100644 index 00000000000..10c74bd59bf --- /dev/null +++ b/pkg/command/arguments/test.go @@ -0,0 +1,66 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package arguments + +import ( + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// Test represents the command-line arguments for the test command. +type Test struct { + // Filter contains a list of test files to execute. If empty, all test files + // will be executed. + Filter []string + + // TestDirectory allows the user to override the directory that the test + // command will use to discover test files, defaults to "tests". Regardless + // of the value here, test files within the configuration directory will + // always be discovered. + TestDirectory string + + // ViewType specifies which output format to use: human or JSON. + ViewType ViewType + + // You can specify common variables for all tests from the command line. + Vars *Vars + + // Verbose tells the test command to print out the plan either in + // human-readable format or JSON for each run step depending on the + // ViewType. + Verbose bool +} + +func ParseTest(args []string) (*Test, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + test := Test{ + Vars: new(Vars), + } + + var jsonOutput bool + cmdFlags := extendedFlagSet("test", nil, nil, test.Vars) + cmdFlags.Var((*flagStringSlice)(&test.Filter), "filter", "filter") + cmdFlags.StringVar(&test.TestDirectory, "test-directory", configs.DefaultTestDirectory, "test-directory") + cmdFlags.BoolVar(&jsonOutput, "json", false, "json") + cmdFlags.BoolVar(&test.Verbose, "verbose", false, "verbose") + + if err := cmdFlags.Parse(args); err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to parse command-line flags", + err.Error())) + } + + switch { + case jsonOutput: + test.ViewType = ViewJSON + default: + test.ViewType = ViewHuman + } + + return &test, diags +} diff --git a/pkg/command/arguments/test_test.go b/pkg/command/arguments/test_test.go new file mode 100644 index 00000000000..6b7c68d8f7d --- /dev/null +++ b/pkg/command/arguments/test_test.go @@ -0,0 +1,159 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package arguments + +import ( + "reflect" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +func TestParseTest_Vars(t *testing.T) { + tcs := map[string]struct { + args []string + want []FlagNameValue + }{ + "no var flags by default": { + args: nil, + want: nil, + }, + "one var": { + args: []string{"-var", "foo=bar"}, + want: []FlagNameValue{ + {Name: "-var", Value: "foo=bar"}, + }, + }, + "one var-file": { + args: []string{"-var-file", "cool.tfvars"}, + want: []FlagNameValue{ + {Name: "-var-file", Value: "cool.tfvars"}, + }, + }, + "ordering preserved": { + args: []string{ + "-var", "foo=bar", + "-var-file", "cool.tfvars", + "-var", "boop=beep", + }, + want: []FlagNameValue{ + {Name: "-var", Value: "foo=bar"}, + {Name: "-var-file", Value: "cool.tfvars"}, + {Name: "-var", Value: "boop=beep"}, + }, + }, + } + + for name, tc := range tcs { + t.Run(name, func(t *testing.T) { + got, diags := ParseTest(tc.args) + if len(diags) > 0 { + t.Fatalf("unexpected diags: %v", diags) + } + if vars := got.Vars.All(); !cmp.Equal(vars, tc.want) { + t.Fatalf("unexpected result\n%s", cmp.Diff(vars, tc.want)) + } + if got, want := got.Vars.Empty(), len(tc.want) == 0; got != want { + t.Fatalf("expected Empty() to return %t, but was %t", want, got) + } + }) + } +} + +func TestParseTest(t *testing.T) { + tcs := map[string]struct { + args []string + want *Test + wantDiags tfdiags.Diagnostics + }{ + "defaults": { + args: nil, + want: &Test{ + Filter: nil, + TestDirectory: "tests", + ViewType: ViewHuman, + Vars: &Vars{}, + }, + wantDiags: nil, + }, + "with-filters": { + args: []string{"-filter=one.tftest.hcl", "-filter=two.tftest.hcl"}, + want: &Test{ + Filter: []string{"one.tftest.hcl", "two.tftest.hcl"}, + TestDirectory: "tests", + ViewType: ViewHuman, + Vars: &Vars{}, + }, + wantDiags: nil, + }, + "json": { + args: []string{"-json"}, + want: &Test{ + Filter: nil, + TestDirectory: "tests", + ViewType: ViewJSON, + Vars: &Vars{}, + }, + wantDiags: nil, + }, + "test-directory": { + args: []string{"-test-directory=other"}, + want: &Test{ + Filter: nil, + TestDirectory: "other", + ViewType: ViewHuman, + Vars: &Vars{}, + }, + wantDiags: nil, + }, + "verbose": { + args: []string{"-verbose"}, + want: &Test{ + Filter: nil, + TestDirectory: "tests", + ViewType: ViewHuman, + Verbose: true, + Vars: &Vars{}, + }, + }, + "unknown flag": { + args: []string{"-boop"}, + want: &Test{ + Filter: nil, + TestDirectory: "tests", + ViewType: ViewHuman, + Vars: &Vars{}, + }, + wantDiags: tfdiags.Diagnostics{ + tfdiags.Sourceless( + tfdiags.Error, + "Failed to parse command-line flags", + "flag provided but not defined: -boop", + ), + }, + }, + } + + cmpOpts := cmpopts.IgnoreUnexported(Operation{}, Vars{}, State{}) + + for name, tc := range tcs { + t.Run(name, func(t *testing.T) { + got, diags := ParseTest(tc.args) + + if diff := cmp.Diff(tc.want, got, cmpOpts); len(diff) > 0 { + t.Errorf("diff:\n%s", diff) + } + + if !reflect.DeepEqual(diags, tc.wantDiags) { + t.Errorf("wrong result\ngot: %s\nwant: %s", spew.Sdump(diags), spew.Sdump(tc.wantDiags)) + } + }) + } +} diff --git a/pkg/command/arguments/types.go b/pkg/command/arguments/types.go new file mode 100644 index 00000000000..ddaca6b1e61 --- /dev/null +++ b/pkg/command/arguments/types.go @@ -0,0 +1,33 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package arguments + +// ViewType represents which view layer to use for a given command. Not all +// commands will support all view types, and validation that the type is +// supported should happen in the view constructor. +type ViewType rune + +const ( + ViewNone ViewType = 0 + ViewHuman ViewType = 'H' + ViewJSON ViewType = 'J' + ViewRaw ViewType = 'R' +) + +func (vt ViewType) String() string { + switch vt { + case ViewNone: + return "none" + case ViewHuman: + return "human" + case ViewJSON: + return "json" + case ViewRaw: + return "raw" + default: + return "unknown" + } +} diff --git a/pkg/command/arguments/validate.go b/pkg/command/arguments/validate.go new file mode 100644 index 00000000000..7693047663b --- /dev/null +++ b/pkg/command/arguments/validate.go @@ -0,0 +1,78 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package arguments + +import ( + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// Validate represents the command-line arguments for the validate command. +type Validate struct { + // Path is the directory containing the configuration to be validated. If + // unspecified, validate will use the current directory. + Path string + + // TestDirectory is the directory containing any test files that should be + // validated alongside the main configuration. Should be relative to the + // Path. + TestDirectory string + + // NoTests indicates that OpenTofu should not validate any test files + // included with the module. + NoTests bool + + // ViewType specifies which output format to use: human, JSON, or "raw". + ViewType ViewType + + Vars *Vars +} + +// ParseValidate processes CLI arguments, returning a Validate value and errors. +// If errors are encountered, a Validate value is still returned representing +// the best effort interpretation of the arguments. +func ParseValidate(args []string) (*Validate, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + validate := &Validate{ + Path: ".", + Vars: &Vars{}, + } + + var jsonOutput bool + cmdFlags := extendedFlagSet("validate", nil, nil, validate.Vars) + cmdFlags.BoolVar(&jsonOutput, "json", false, "json") + cmdFlags.StringVar(&validate.TestDirectory, "test-directory", "tests", "test-directory") + cmdFlags.BoolVar(&validate.NoTests, "no-tests", false, "no-tests") + + if err := cmdFlags.Parse(args); err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to parse command-line flags", + err.Error(), + )) + } + + args = cmdFlags.Args() + if len(args) > 1 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Too many command line arguments", + "Expected at most one positional argument.", + )) + } + + if len(args) > 0 { + validate.Path = args[0] + } + + switch { + case jsonOutput: + validate.ViewType = ViewJSON + default: + validate.ViewType = ViewHuman + } + + return validate, diags +} diff --git a/pkg/command/arguments/validate_test.go b/pkg/command/arguments/validate_test.go new file mode 100644 index 00000000000..1f251ce71f6 --- /dev/null +++ b/pkg/command/arguments/validate_test.go @@ -0,0 +1,129 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package arguments + +import ( + "reflect" + "testing" + + "github.com/davecgh/go-spew/spew" + + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +func TestParseValidate_valid(t *testing.T) { + testCases := map[string]struct { + args []string + want *Validate + }{ + "defaults": { + nil, + &Validate{ + Path: ".", + TestDirectory: "tests", + ViewType: ViewHuman, + }, + }, + "json": { + []string{"-json"}, + &Validate{ + Path: ".", + TestDirectory: "tests", + ViewType: ViewJSON, + }, + }, + "path": { + []string{"-json", "foo"}, + &Validate{ + Path: "foo", + TestDirectory: "tests", + ViewType: ViewJSON, + }, + }, + "test-directory": { + []string{"-test-directory", "other"}, + &Validate{ + Path: ".", + TestDirectory: "other", + ViewType: ViewHuman, + }, + }, + "no-tests": { + []string{"-no-tests"}, + &Validate{ + Path: ".", + TestDirectory: "tests", + ViewType: ViewHuman, + NoTests: true, + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got, diags := ParseValidate(tc.args) + if len(diags) > 0 { + t.Fatalf("unexpected diags: %v", diags) + } + got.Vars = nil + if *got != *tc.want { + t.Fatalf("unexpected result\n got: %#v\nwant: %#v", got, tc.want) + } + }) + } +} + +func TestParseValidate_invalid(t *testing.T) { + testCases := map[string]struct { + args []string + want *Validate + wantDiags tfdiags.Diagnostics + }{ + "unknown flag": { + []string{"-boop"}, + &Validate{ + Path: ".", + TestDirectory: "tests", + ViewType: ViewHuman, + }, + tfdiags.Diagnostics{ + tfdiags.Sourceless( + tfdiags.Error, + "Failed to parse command-line flags", + "flag provided but not defined: -boop", + ), + }, + }, + "too many arguments": { + []string{"-json", "bar", "baz"}, + &Validate{ + Path: "bar", + TestDirectory: "tests", + ViewType: ViewJSON, + }, + tfdiags.Diagnostics{ + tfdiags.Sourceless( + tfdiags.Error, + "Too many command line arguments", + "Expected at most one positional argument.", + ), + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got, gotDiags := ParseValidate(tc.args) + got.Vars = nil + if *got != *tc.want { + t.Fatalf("unexpected result\n got: %#v\nwant: %#v", got, tc.want) + } + if !reflect.DeepEqual(gotDiags, tc.wantDiags) { + t.Errorf("wrong result\ngot: %s\nwant: %s", spew.Sdump(gotDiags), spew.Sdump(tc.wantDiags)) + } + }) + } +} diff --git a/pkg/command/arguments/view.go b/pkg/command/arguments/view.go new file mode 100644 index 00000000000..3b6e11be261 --- /dev/null +++ b/pkg/command/arguments/view.go @@ -0,0 +1,57 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package arguments + +// View represents the global command-line arguments which configure the view. +type View struct { + // NoColor is used to disable the use of terminal color codes in all + // output. + NoColor bool + + // CompactWarnings is used to coalesce duplicate warnings, to reduce the + // level of noise when multiple instances of the same warning are raised + // for a configuration. + CompactWarnings bool + + // Concise is used to reduce the level of noise in the output and display + // only the important details. + Concise bool + + // ShowSensitive is used to display the value of variables marked as sensitive. + ShowSensitive bool +} + +// ParseView processes CLI arguments, returning a View value and a +// possibly-modified slice of arguments. If any of the supported flags are +// found, they will be removed from the slice. +func ParseView(args []string) (*View, []string) { + common := &View{} + + // Keep track of the length of the returned slice. When we find an + // argument we support, i will not be incremented. + i := 0 + for _, v := range args { + switch v { + case "-no-color": + common.NoColor = true + case "-compact-warnings": + common.CompactWarnings = true + case "-concise": + common.Concise = true + default: + // Unsupported argument: move left to the current position, and + // increment the index. + args[i] = v + i++ + } + } + + // Reduce the slice to the number of unsupported arguments. Any remaining + // to the right of i have already been moved left. + args = args[:i] + + return common, args +} diff --git a/pkg/command/arguments/view_test.go b/pkg/command/arguments/view_test.go new file mode 100644 index 00000000000..de55c556a66 --- /dev/null +++ b/pkg/command/arguments/view_test.go @@ -0,0 +1,87 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package arguments + +import ( + "testing" + + "github.com/google/go-cmp/cmp" +) + +func TestParseView(t *testing.T) { + testCases := map[string]struct { + args []string + want *View + wantArgs []string + }{ + "nil": { + nil, + &View{NoColor: false, CompactWarnings: false, Concise: false}, + nil, + }, + "empty": { + []string{}, + &View{NoColor: false, CompactWarnings: false, Concise: false}, + []string{}, + }, + "none matching": { + []string{"-foo", "bar", "-baz"}, + &View{NoColor: false, CompactWarnings: false, Concise: false}, + []string{"-foo", "bar", "-baz"}, + }, + "no-color": { + []string{"-foo", "-no-color", "-baz"}, + &View{NoColor: true, CompactWarnings: false, Concise: false}, + []string{"-foo", "-baz"}, + }, + "compact-warnings": { + []string{"-foo", "-compact-warnings", "-baz"}, + &View{NoColor: false, CompactWarnings: true, Concise: false}, + []string{"-foo", "-baz"}, + }, + "concise": { + []string{"-foo", "-concise", "-baz"}, + &View{NoColor: false, CompactWarnings: false, Concise: true}, + []string{"-foo", "-baz"}, + }, + "no-color and compact-warnings": { + []string{"-foo", "-no-color", "-compact-warnings", "-baz"}, + &View{NoColor: true, CompactWarnings: true, Concise: false}, + []string{"-foo", "-baz"}, + }, + "no-color and concise": { + []string{"-foo", "-no-color", "-concise", "-baz"}, + &View{NoColor: true, CompactWarnings: false, Concise: true}, + []string{"-foo", "-baz"}, + }, + "concise and compact-warnings": { + []string{"-foo", "-concise", "-compact-warnings", "-baz"}, + &View{NoColor: false, CompactWarnings: true, Concise: true}, + []string{"-foo", "-baz"}, + }, + "all three": { + []string{"-foo", "-no-color", "-compact-warnings", "-concise", "-baz"}, + &View{NoColor: true, CompactWarnings: true, Concise: true}, + []string{"-foo", "-baz"}, + }, + "all three, resulting in empty args": { + []string{"-no-color", "-compact-warnings", "-concise"}, + &View{NoColor: true, CompactWarnings: true, Concise: true}, + []string{}, + }, + } + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got, gotArgs := ParseView(tc.args) + if *got != *tc.want { + t.Errorf("unexpected result\n got: %#v\nwant: %#v", got, tc.want) + } + if !cmp.Equal(gotArgs, tc.wantArgs) { + t.Errorf("unexpected args\n got: %#v\nwant: %#v", gotArgs, tc.wantArgs) + } + }) + } +} diff --git a/pkg/command/autocomplete.go b/pkg/command/autocomplete.go new file mode 100644 index 00000000000..917236d266e --- /dev/null +++ b/pkg/command/autocomplete.go @@ -0,0 +1,74 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "github.com/posener/complete" +) + +// This file contains some re-usable predictors for auto-complete. The +// command-specific autocomplete configurations live within each command's +// own source file, as AutocompleteArgs and AutocompleteFlags methods on each +// Command implementation. + +// For completing the value of boolean flags like -foo false +var completePredictBoolean = complete.PredictSet("true", "false") + +// We don't currently have a real predictor for module sources, but +// we'll probably add one later. +var completePredictModuleSource = complete.PredictAnything + +type completePredictSequence []complete.Predictor + +func (s completePredictSequence) Predict(a complete.Args) []string { + // Nested subcommands do not require any placeholder entry for their subcommand name. + idx := len(a.Completed) + if idx >= len(s) { + return nil + } + + return s[idx].Predict(a) +} + +func (m *Meta) completePredictWorkspaceName() complete.Predictor { + return complete.PredictFunc(func(a complete.Args) []string { + // There are lot of things that can fail in here, so if we encounter + // any error then we'll just return nothing and not support autocomplete + // until whatever error is fixed. (The user can't actually see the error + // here, but other commands should produce a user-visible error before + // too long.) + + // We assume here that we want to autocomplete for the current working + // directory, since we don't have enough context to know where to + // find any config path argument, and it might be _after_ the argument + // we're trying to complete here anyway. + configPath, err := modulePath(nil) + if err != nil { + return nil + } + + backendConfig, diags := m.loadBackendConfig(configPath) + if diags.HasErrors() { + return nil + } + + // Load the encryption configuration + enc, encDiags := m.Encryption() + if encDiags.HasErrors() { + return nil + } + + b, diags := m.Backend(&BackendOpts{ + Config: backendConfig, + }, enc.State()) + if diags.HasErrors() { + return nil + } + + names, _ := b.Workspaces() + return names + }) +} diff --git a/pkg/command/autocomplete_test.go b/pkg/command/autocomplete_test.go new file mode 100644 index 00000000000..2bb1f945dcf --- /dev/null +++ b/pkg/command/autocomplete_test.go @@ -0,0 +1,41 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "os" + "reflect" + "testing" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +func TestMetaCompletePredictWorkspaceName(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + os.MkdirAll(td, 0755) + defer testChdir(t, td)() + + // make sure a vars file doesn't interfere + err := os.WriteFile(DefaultVarsFilename, nil, 0644) + if err != nil { + t.Fatal(err) + } + + ui := new(cli.MockUi) + meta := &Meta{Ui: ui} + + predictor := meta.completePredictWorkspaceName() + + got := predictor.Predict(complete.Args{ + Last: "", + }) + want := []string{"default"} + if !reflect.DeepEqual(got, want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, want) + } +} diff --git a/pkg/command/cli_ui.go b/pkg/command/cli_ui.go new file mode 100644 index 00000000000..e3ebcb5939e --- /dev/null +++ b/pkg/command/cli_ui.go @@ -0,0 +1,56 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + + "github.com/mitchellh/cli" + "github.com/mitchellh/colorstring" +) + +// ColorizeUi is a Ui implementation that colors its output according +// to the given color schemes for the given type of output. +type ColorizeUi struct { + Colorize *colorstring.Colorize + OutputColor string + InfoColor string + ErrorColor string + WarnColor string + Ui cli.Ui +} + +func (u *ColorizeUi) Ask(query string) (string, error) { + return u.Ui.Ask(u.colorize(query, u.OutputColor)) +} + +func (u *ColorizeUi) AskSecret(query string) (string, error) { + return u.Ui.AskSecret(u.colorize(query, u.OutputColor)) +} + +func (u *ColorizeUi) Output(message string) { + u.Ui.Output(u.colorize(message, u.OutputColor)) +} + +func (u *ColorizeUi) Info(message string) { + u.Ui.Info(u.colorize(message, u.InfoColor)) +} + +func (u *ColorizeUi) Error(message string) { + u.Ui.Error(u.colorize(message, u.ErrorColor)) +} + +func (u *ColorizeUi) Warn(message string) { + u.Ui.Warn(u.colorize(message, u.WarnColor)) +} + +func (u *ColorizeUi) colorize(message string, color string) string { + if color == "" { + return message + } + + return u.Colorize.Color(fmt.Sprintf("%s%s[reset]", color, message)) +} diff --git a/pkg/command/cli_ui_test.go b/pkg/command/cli_ui_test.go new file mode 100644 index 00000000000..240ee220884 --- /dev/null +++ b/pkg/command/cli_ui_test.go @@ -0,0 +1,16 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "testing" + + "github.com/mitchellh/cli" +) + +func TestColorizeUi_impl(t *testing.T) { + var _ cli.Ui = new(ColorizeUi) +} diff --git a/pkg/command/cliconfig/cliconfig.go b/pkg/command/cliconfig/cliconfig.go new file mode 100644 index 00000000000..6e40e4dd80e --- /dev/null +++ b/pkg/command/cliconfig/cliconfig.go @@ -0,0 +1,461 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package cliconfig has the types representing and the logic to load CLI-level +// configuration settings. +// +// The CLI config is a small collection of settings that a user can override via +// some files in their home directory or, in some cases, via environment +// variables. The CLI config is not the same thing as a OpenTofu configuration +// written in the Terraform language; the logic for those lives in the top-level +// directory "configs". +package cliconfig + +import ( + "errors" + "fmt" + "io/fs" + "log" + "os" + "path/filepath" + "strings" + + "github.com/hashicorp/hcl" + + svchost "github.com/hashicorp/terraform-svchost" + + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +const pluginCacheDirEnvVar = "TF_PLUGIN_CACHE_DIR" +const pluginCacheMayBreakLockFileEnvVar = "TF_PLUGIN_CACHE_MAY_BREAK_DEPENDENCY_LOCK_FILE" + +// Config is the structure of the configuration for the OpenTofu CLI. +// +// This is not the configuration for OpenTofu itself. That is in the +// "config" package. +type Config struct { + Providers map[string]string + Provisioners map[string]string + + // If set, enables local caching of plugins in this directory to + // avoid repeatedly re-downloading over the Internet. + PluginCacheDir string `hcl:"plugin_cache_dir"` + + // PluginCacheMayBreakDependencyLockFile is an interim accommodation for + // those who wish to use the Plugin Cache Dir even in cases where doing so + // will cause the dependency lock file to be incomplete. + // + // This is likely to become a silent no-op in future OpenTofu versions but + // is here in recognition of the fact that the dependency lock file is not + // yet a good fit for all OpenTofu workflows and folks in that category + // would prefer to have the plugin cache dir's behavior to take priority + // over the requirements of the dependency lock file. + PluginCacheMayBreakDependencyLockFile bool `hcl:"plugin_cache_may_break_dependency_lock_file"` + + Hosts map[string]*ConfigHost `hcl:"host"` + + Credentials map[string]map[string]interface{} `hcl:"credentials"` + CredentialsHelpers map[string]*ConfigCredentialsHelper `hcl:"credentials_helper"` + + // ProviderInstallation represents any provider_installation blocks + // in the configuration. Only one of these is allowed across the whole + // configuration, but we decode into a slice here so that we can handle + // that validation at validation time rather than initial decode time. + ProviderInstallation []*ProviderInstallation +} + +// ConfigHost is the structure of the "host" nested block within the CLI +// configuration, which can be used to override the default service host +// discovery behavior for a particular hostname. +type ConfigHost struct { + Services map[string]interface{} `hcl:"services"` +} + +// ConfigCredentialsHelper is the structure of the "credentials_helper" +// nested block within the CLI configuration. +type ConfigCredentialsHelper struct { + Args []string `hcl:"args"` +} + +// BuiltinConfig is the built-in defaults for the configuration. These +// can be overridden by user configurations. +var BuiltinConfig Config + +// ConfigFile returns the default path to the configuration file. +// +// On Unix-like systems this is the ".tofurc" file in the home directory. +// On Windows, this is the "tofu.rc" file in the application data +// directory. +func ConfigFile() (string, error) { + return configFile() +} + +// ConfigDir returns the configuration directory for OpenTofu. +func ConfigDir() (string, error) { + return configDir() +} + +// DataDirs returns the data directories for OpenTofu. +func DataDirs() ([]string, error) { + return dataDirs() +} + +// LoadConfig reads the CLI configuration from the various filesystem locations +// and from the environment, returning a merged configuration along with any +// diagnostics (errors and warnings) encountered along the way. +func LoadConfig() (*Config, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + configVal := BuiltinConfig // copy + config := &configVal + + if mainFilename, mainFileDiags := cliConfigFile(); len(mainFileDiags) == 0 { + if _, err := os.Stat(mainFilename); err == nil { + mainConfig, mainDiags := loadConfigFile(mainFilename) + diags = diags.Append(mainDiags) + config = config.Merge(mainConfig) + } + } else { + diags = diags.Append(mainFileDiags) + } + + // Unless the user has specifically overridden the configuration file + // location using an environment variable, we'll also load what we find + // in the config directory. We skip the config directory when source + // file override is set because we interpret the environment variable + // being set as an intention to ignore the default set of CLI config + // files because we're doing something special, like running OpenTofu + // in automation with a locally-customized configuration. + if cliConfigFileOverride() == "" { + if configDir, err := ConfigDir(); err == nil { + if info, err := os.Stat(configDir); err == nil && info.IsDir() { + dirConfig, dirDiags := loadConfigDir(configDir) + diags = diags.Append(dirDiags) + config = config.Merge(dirConfig) + } + } + } else { + log.Printf("[DEBUG] Not reading CLI config directory because config location is overridden by environment variable") + } + + if envConfig := EnvConfig(); envConfig != nil { + // envConfig takes precedence + config = envConfig.Merge(config) + } + + diags = diags.Append(config.Validate()) + + return config, diags +} + +// loadConfigFile loads the CLI configuration from ".tofurc" files. +func loadConfigFile(path string) (*Config, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + result := &Config{} + + log.Printf("Loading CLI configuration from %s", path) + + // Read the HCL file and prepare for parsing + d, err := os.ReadFile(path) + if err != nil { + diags = diags.Append(fmt.Errorf("Error reading %s: %w", path, err)) + return result, diags + } + + // Parse it + obj, err := hcl.Parse(string(d)) + if err != nil { + diags = diags.Append(fmt.Errorf("Error parsing %s: %w", path, err)) + return result, diags + } + + // Build up the result + if err := hcl.DecodeObject(&result, obj); err != nil { + diags = diags.Append(fmt.Errorf("Error parsing %s: %w", path, err)) + return result, diags + } + + // Deal with the provider_installation block, which is not handled using + // DecodeObject because its structure is not compatible with the + // limitations of that function. + providerInstBlocks, moreDiags := decodeProviderInstallationFromConfig(obj) + diags = diags.Append(moreDiags) + result.ProviderInstallation = providerInstBlocks + + // Replace all env vars + for k, v := range result.Providers { + result.Providers[k] = os.ExpandEnv(v) + } + for k, v := range result.Provisioners { + result.Provisioners[k] = os.ExpandEnv(v) + } + + if result.PluginCacheDir != "" { + result.PluginCacheDir = os.ExpandEnv(result.PluginCacheDir) + } + + return result, diags +} + +func loadConfigDir(path string) (*Config, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + result := &Config{} + + entries, err := os.ReadDir(path) + if err != nil { + diags = diags.Append(fmt.Errorf("Error reading %s: %w", path, err)) + return result, diags + } + + for _, entry := range entries { + name := entry.Name() + // Ignoring errors here because it is used only to indicate pattern + // syntax errors, and our patterns are hard-coded here. + hclMatched, _ := filepath.Match("*.tfrc", name) + jsonMatched, _ := filepath.Match("*.tfrc.json", name) + if !(hclMatched || jsonMatched) { + continue + } + + filePath := filepath.Join(path, name) + fileConfig, fileDiags := loadConfigFile(filePath) + diags = diags.Append(fileDiags) + result = result.Merge(fileConfig) + } + + return result, diags +} + +// EnvConfig returns a Config populated from environment variables. +// +// Any values specified in this config should override those set in the +// configuration file. +func EnvConfig() *Config { + env := makeEnvMap(os.Environ()) + return envConfig(env) +} + +func envConfig(env map[string]string) *Config { + config := &Config{} + + if envPluginCacheDir := env[pluginCacheDirEnvVar]; envPluginCacheDir != "" { + // No Expandenv here, because expanding environment variables inside + // an environment variable would be strange and seems unnecessary. + // (User can expand variables into the value while setting it using + // standard shell features.) + config.PluginCacheDir = envPluginCacheDir + } + + if envMayBreak := env[pluginCacheMayBreakLockFileEnvVar]; envMayBreak != "" && envMayBreak != "0" { + // This is an environment variable analog to the + // plugin_cache_may_break_dependency_lock_file setting. If either this + // or the config file setting are enabled then it's enabled; there is + // no way to override back to false if either location sets this to + // true. + config.PluginCacheMayBreakDependencyLockFile = true + } + + return config +} + +func makeEnvMap(environ []string) map[string]string { + if len(environ) == 0 { + return nil + } + + ret := make(map[string]string, len(environ)) + for _, entry := range environ { + eq := strings.IndexByte(entry, '=') + if eq == -1 { + continue + } + ret[entry[:eq]] = entry[eq+1:] + } + return ret +} + +// Validate checks for errors in the configuration that cannot be detected +// just by HCL decoding, returning any problems as diagnostics. +// +// On success, the returned diagnostics will return false from the HasErrors +// method. A non-nil diagnostics is not necessarily an error, since it may +// contain just warnings. +func (c *Config) Validate() tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + if c == nil { + return diags + } + + // FIXME: Right now our config parsing doesn't retain enough information + // to give proper source references to any errors. We should improve + // on this when we change the CLI config parser to use HCL2. + + // Check that all "host" blocks have valid hostnames. + for givenHost := range c.Hosts { + _, err := svchost.ForComparison(givenHost) + if err != nil { + diags = diags.Append( + fmt.Errorf("The host %q block has an invalid hostname: %w", givenHost, err), + ) + } + } + + // Check that all "credentials" blocks have valid hostnames. + for givenHost := range c.Credentials { + _, err := svchost.ForComparison(givenHost) + if err != nil { + diags = diags.Append( + fmt.Errorf("The credentials %q block has an invalid hostname: %w", givenHost, err), + ) + } + } + + // Should have zero or one "credentials_helper" blocks + if len(c.CredentialsHelpers) > 1 { + diags = diags.Append( + fmt.Errorf("No more than one credentials_helper block may be specified"), + ) + } + + // Should have zero or one "provider_installation" blocks + if len(c.ProviderInstallation) > 1 { + diags = diags.Append( + fmt.Errorf("No more than one provider_installation block may be specified"), + ) + } + + if c.PluginCacheDir != "" { + _, err := os.Stat(c.PluginCacheDir) + if err != nil { + diags = diags.Append( + fmt.Errorf("The specified plugin cache dir %s cannot be opened: %w", c.PluginCacheDir, err), + ) + } + } + + return diags +} + +// Merge merges two configurations and returns a third entirely +// new configuration with the two merged. +func (c *Config) Merge(c2 *Config) *Config { + var result Config + result.Providers = make(map[string]string) + result.Provisioners = make(map[string]string) + for k, v := range c.Providers { + result.Providers[k] = v + } + for k, v := range c2.Providers { + if v1, ok := c.Providers[k]; ok { + log.Printf("[INFO] Local %s provider configuration '%s' overrides '%s'", k, v, v1) + } + result.Providers[k] = v + } + for k, v := range c.Provisioners { + result.Provisioners[k] = v + } + for k, v := range c2.Provisioners { + if v1, ok := c.Provisioners[k]; ok { + log.Printf("[INFO] Local %s provisioner configuration '%s' overrides '%s'", k, v, v1) + } + result.Provisioners[k] = v + } + + result.PluginCacheDir = c.PluginCacheDir + if result.PluginCacheDir == "" { + result.PluginCacheDir = c2.PluginCacheDir + } + + if c.PluginCacheMayBreakDependencyLockFile || c2.PluginCacheMayBreakDependencyLockFile { + // This setting saturates to "on"; once either configuration sets it, + // there is no way to override it back to off again. + result.PluginCacheMayBreakDependencyLockFile = true + } + + if (len(c.Hosts) + len(c2.Hosts)) > 0 { + result.Hosts = make(map[string]*ConfigHost) + for name, host := range c.Hosts { + result.Hosts[name] = host + } + for name, host := range c2.Hosts { + result.Hosts[name] = host + } + } + + if (len(c.Credentials) + len(c2.Credentials)) > 0 { + result.Credentials = make(map[string]map[string]interface{}) + for host, creds := range c.Credentials { + result.Credentials[host] = creds + } + for host, creds := range c2.Credentials { + // We just clobber an entry from the other file right now. Will + // improve on this later using the more-robust merging behavior + // built in to HCL2. + result.Credentials[host] = creds + } + } + + if (len(c.CredentialsHelpers) + len(c2.CredentialsHelpers)) > 0 { + result.CredentialsHelpers = make(map[string]*ConfigCredentialsHelper) + for name, helper := range c.CredentialsHelpers { + result.CredentialsHelpers[name] = helper + } + for name, helper := range c2.CredentialsHelpers { + result.CredentialsHelpers[name] = helper + } + } + + if (len(c.ProviderInstallation) + len(c2.ProviderInstallation)) > 0 { + result.ProviderInstallation = append(result.ProviderInstallation, c.ProviderInstallation...) + result.ProviderInstallation = append(result.ProviderInstallation, c2.ProviderInstallation...) + } + + return &result +} + +func cliConfigFile() (string, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + mustExist := true + + configFilePath := cliConfigFileOverride() + if configFilePath == "" { + var err error + configFilePath, err = ConfigFile() + mustExist = false + + if err != nil { + log.Printf( + "[ERROR] Error detecting default CLI config file path: %s", + err) + } + } + + log.Printf("[DEBUG] Attempting to open CLI config file: %s", configFilePath) + f, err := os.Open(configFilePath) + if err == nil { + f.Close() + return configFilePath, diags + } + + if mustExist || !errors.Is(err, fs.ErrNotExist) { + diags = append(diags, tfdiags.Sourceless( + tfdiags.Warning, + "Unable to open CLI configuration file", + fmt.Sprintf("The CLI configuration file at %q does not exist.", configFilePath), + )) + } + + log.Println("[DEBUG] File doesn't exist, but doesn't need to. Ignoring.") + return "", diags +} + +func cliConfigFileOverride() string { + configFilePath := os.Getenv("TF_CLI_CONFIG_FILE") + if configFilePath == "" { + configFilePath = os.Getenv("TERRAFORM_CONFIG") + } + return configFilePath +} diff --git a/pkg/command/cliconfig/cliconfig_test.go b/pkg/command/cliconfig/cliconfig_test.go new file mode 100644 index 00000000000..2c07663f457 --- /dev/null +++ b/pkg/command/cliconfig/cliconfig_test.go @@ -0,0 +1,513 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cliconfig + +import ( + "os" + "path/filepath" + "reflect" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/google/go-cmp/cmp" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// This is the directory where our test fixtures are. +const fixtureDir = "./testdata" + +func TestLoadConfig(t *testing.T) { + c, err := loadConfigFile(filepath.Join(fixtureDir, "config")) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &Config{ + Providers: map[string]string{ + "aws": "foo", + "do": "bar", + }, + } + + if !reflect.DeepEqual(c, expected) { + t.Fatalf("bad: %#v", c) + } +} + +func TestLoadConfig_envSubst(t *testing.T) { + t.Setenv("TFTEST", "hello") + + c, err := loadConfigFile(filepath.Join(fixtureDir, "config-env")) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &Config{ + Providers: map[string]string{ + "aws": "hello", + "google": "bar", + }, + Provisioners: map[string]string{ + "local": "hello", + }, + } + + if !reflect.DeepEqual(c, expected) { + t.Fatalf("bad: %#v", c) + } +} + +func TestLoadConfig_non_existing_file(t *testing.T) { + tmpDir := os.TempDir() + cliTmpFile := filepath.Join(tmpDir, "dev.tfrc") + + t.Setenv("TF_CLI_CONFIG_FILE", cliTmpFile) + + c, errs := LoadConfig() + if errs.HasErrors() || c.Validate().HasErrors() { + t.Fatalf("err: %s", errs) + } + + hasOpenFileWarn := false + for _, err := range errs { + if err.Severity() == tfdiags.Warning && err.Description().Summary == "Unable to open CLI configuration file" { + hasOpenFileWarn = true + break + } + } + + if !hasOpenFileWarn { + t.Fatal("expecting a warning message because of nonexisting CLI configuration file") + } +} + +func TestEnvConfig(t *testing.T) { + tests := map[string]struct { + env map[string]string + want *Config + }{ + "no environment variables": { + nil, + &Config{}, + }, + "TF_PLUGIN_CACHE_DIR=boop": { + map[string]string{ + "TF_PLUGIN_CACHE_DIR": "boop", + }, + &Config{ + PluginCacheDir: "boop", + }, + }, + "TF_PLUGIN_CACHE_MAY_BREAK_DEPENDENCY_LOCK_FILE=anything_except_zero": { + map[string]string{ + "TF_PLUGIN_CACHE_MAY_BREAK_DEPENDENCY_LOCK_FILE": "anything_except_zero", + }, + &Config{ + PluginCacheMayBreakDependencyLockFile: true, + }, + }, + "TF_PLUGIN_CACHE_MAY_BREAK_DEPENDENCY_LOCK_FILE=0": { + map[string]string{ + "TF_PLUGIN_CACHE_MAY_BREAK_DEPENDENCY_LOCK_FILE": "0", + }, + &Config{}, + }, + "TF_PLUGIN_CACHE_DIR and TF_PLUGIN_CACHE_MAY_BREAK_DEPENDENCY_LOCK_FILE": { + map[string]string{ + "TF_PLUGIN_CACHE_DIR": "beep", + "TF_PLUGIN_CACHE_MAY_BREAK_DEPENDENCY_LOCK_FILE": "1", + }, + &Config{ + PluginCacheDir: "beep", + PluginCacheMayBreakDependencyLockFile: true, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + got := envConfig(test.env) + want := test.want + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + } +} + +func TestMakeEnvMap(t *testing.T) { + tests := map[string]struct { + environ []string + want map[string]string + }{ + "nil": { + nil, + nil, + }, + "one": { + []string{ + "FOO=bar", + }, + map[string]string{ + "FOO": "bar", + }, + }, + "many": { + []string{ + "FOO=1", + "BAR=2", + "BAZ=3", + }, + map[string]string{ + "FOO": "1", + "BAR": "2", + "BAZ": "3", + }, + }, + "conflict": { + []string{ + "FOO=1", + "BAR=1", + "FOO=2", + }, + map[string]string{ + "BAR": "1", + "FOO": "2", // Last entry of each name wins + }, + }, + "empty_val": { + []string{ + "FOO=", + }, + map[string]string{ + "FOO": "", + }, + }, + "no_equals": { + []string{ + "FOO=bar", + "INVALID", + }, + map[string]string{ + "FOO": "bar", + }, + }, + "multi_equals": { + []string{ + "FOO=bar=baz=boop", + }, + map[string]string{ + "FOO": "bar=baz=boop", + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + got := makeEnvMap(test.environ) + want := test.want + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + } + +} + +func TestLoadConfig_hosts(t *testing.T) { + got, diags := loadConfigFile(filepath.Join(fixtureDir, "hosts")) + if len(diags) != 0 { + t.Fatalf("%s", diags.Err()) + } + + want := &Config{ + Hosts: map[string]*ConfigHost{ + "example.com": { + Services: map[string]interface{}{ + "modules.v1": "https://example.com/", + }, + }, + }, + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("wrong result\ngot: %swant: %s", spew.Sdump(got), spew.Sdump(want)) + } +} + +func TestLoadConfig_credentials(t *testing.T) { + got, err := loadConfigFile(filepath.Join(fixtureDir, "credentials")) + if err != nil { + t.Fatal(err) + } + + want := &Config{ + Credentials: map[string]map[string]interface{}{ + "example.com": map[string]interface{}{ + "token": "foo the bar baz", + }, + "example.net": map[string]interface{}{ + "username": "foo", + "password": "baz", + }, + }, + CredentialsHelpers: map[string]*ConfigCredentialsHelper{ + "foo": &ConfigCredentialsHelper{ + Args: []string{"bar", "baz"}, + }, + }, + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("wrong result\ngot: %swant: %s", spew.Sdump(got), spew.Sdump(want)) + } +} + +func TestConfigValidate(t *testing.T) { + tests := map[string]struct { + Config *Config + DiagCount int + }{ + "nil": { + nil, + 0, + }, + "empty": { + &Config{}, + 0, + }, + "host good": { + &Config{ + Hosts: map[string]*ConfigHost{ + "example.com": {}, + }, + }, + 0, + }, + "host with bad hostname": { + &Config{ + Hosts: map[string]*ConfigHost{ + "example..com": {}, + }, + }, + 1, // host block has invalid hostname + }, + "credentials good": { + &Config{ + Credentials: map[string]map[string]interface{}{ + "example.com": map[string]interface{}{ + "token": "foo", + }, + }, + }, + 0, + }, + "credentials with bad hostname": { + &Config{ + Credentials: map[string]map[string]interface{}{ + "example..com": map[string]interface{}{ + "token": "foo", + }, + }, + }, + 1, // credentials block has invalid hostname + }, + "credentials helper good": { + &Config{ + CredentialsHelpers: map[string]*ConfigCredentialsHelper{ + "foo": {}, + }, + }, + 0, + }, + "credentials helper too many": { + &Config{ + CredentialsHelpers: map[string]*ConfigCredentialsHelper{ + "foo": {}, + "bar": {}, + }, + }, + 1, // no more than one credentials_helper block allowed + }, + "provider_installation good none": { + &Config{ + ProviderInstallation: nil, + }, + 0, + }, + "provider_installation good one": { + &Config{ + ProviderInstallation: []*ProviderInstallation{ + {}, + }, + }, + 0, + }, + "provider_installation too many": { + &Config{ + ProviderInstallation: []*ProviderInstallation{ + {}, + {}, + }, + }, + 1, // no more than one provider_installation block allowed + }, + "plugin_cache_dir does not exist": { + &Config{ + PluginCacheDir: "fake", + }, + 1, // The specified plugin cache dir %s cannot be opened + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + diags := test.Config.Validate() + if len(diags) != test.DiagCount { + t.Errorf("wrong number of diagnostics %d; want %d", len(diags), test.DiagCount) + for _, diag := range diags { + t.Logf("- %#v", diag.Description()) + } + } + }) + } +} + +func TestConfig_Merge(t *testing.T) { + c1 := &Config{ + Providers: map[string]string{ + "foo": "bar", + "bar": "blah", + }, + Provisioners: map[string]string{ + "local": "local", + "remote": "bad", + }, + Hosts: map[string]*ConfigHost{ + "example.com": { + Services: map[string]interface{}{ + "modules.v1": "http://example.com/", + }, + }, + }, + Credentials: map[string]map[string]interface{}{ + "foo": { + "bar": "baz", + }, + }, + CredentialsHelpers: map[string]*ConfigCredentialsHelper{ + "buz": {}, + }, + ProviderInstallation: []*ProviderInstallation{ + { + Methods: []*ProviderInstallationMethod{ + {Location: ProviderInstallationFilesystemMirror("a")}, + {Location: ProviderInstallationFilesystemMirror("b")}, + }, + }, + { + Methods: []*ProviderInstallationMethod{ + {Location: ProviderInstallationFilesystemMirror("c")}, + }, + }, + }, + } + + c2 := &Config{ + Providers: map[string]string{ + "bar": "baz", + "baz": "what", + }, + Provisioners: map[string]string{ + "remote": "remote", + }, + Hosts: map[string]*ConfigHost{ + "example.net": { + Services: map[string]interface{}{ + "modules.v1": "https://example.net/", + }, + }, + }, + Credentials: map[string]map[string]interface{}{ + "fee": { + "bur": "bez", + }, + }, + CredentialsHelpers: map[string]*ConfigCredentialsHelper{ + "biz": {}, + }, + ProviderInstallation: []*ProviderInstallation{ + { + Methods: []*ProviderInstallationMethod{ + {Location: ProviderInstallationFilesystemMirror("d")}, + }, + }, + }, + PluginCacheMayBreakDependencyLockFile: true, + } + + expected := &Config{ + Providers: map[string]string{ + "foo": "bar", + "bar": "baz", + "baz": "what", + }, + Provisioners: map[string]string{ + "local": "local", + "remote": "remote", + }, + Hosts: map[string]*ConfigHost{ + "example.com": { + Services: map[string]interface{}{ + "modules.v1": "http://example.com/", + }, + }, + "example.net": { + Services: map[string]interface{}{ + "modules.v1": "https://example.net/", + }, + }, + }, + Credentials: map[string]map[string]interface{}{ + "foo": { + "bar": "baz", + }, + "fee": { + "bur": "bez", + }, + }, + CredentialsHelpers: map[string]*ConfigCredentialsHelper{ + "buz": {}, + "biz": {}, + }, + ProviderInstallation: []*ProviderInstallation{ + { + Methods: []*ProviderInstallationMethod{ + {Location: ProviderInstallationFilesystemMirror("a")}, + {Location: ProviderInstallationFilesystemMirror("b")}, + }, + }, + { + Methods: []*ProviderInstallationMethod{ + {Location: ProviderInstallationFilesystemMirror("c")}, + }, + }, + { + Methods: []*ProviderInstallationMethod{ + {Location: ProviderInstallationFilesystemMirror("d")}, + }, + }, + }, + PluginCacheMayBreakDependencyLockFile: true, + } + + actual := c1.Merge(c2) + if diff := cmp.Diff(expected, actual); diff != "" { + t.Fatalf("wrong result\n%s", diff) + } +} diff --git a/pkg/command/cliconfig/config_unix.go b/pkg/command/cliconfig/config_unix.go new file mode 100644 index 00000000000..d68d288209f --- /dev/null +++ b/pkg/command/cliconfig/config_unix.go @@ -0,0 +1,90 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !windows +// +build !windows + +package cliconfig + +import ( + "errors" + "os" + "os/user" + "path/filepath" +) + +func configFile() (string, error) { + dir, err := homeDir() + if err != nil { + return "", err + } + + newConfigFile := filepath.Join(dir, ".tofurc") + legacyConfigFile := filepath.Join(dir, ".terraformrc") + + if xdgDir := os.Getenv("XDG_CONFIG_HOME"); xdgDir != "" && !pathExists(legacyConfigFile) && !pathExists(newConfigFile) { + // a fresh install should not use terraform naming + return filepath.Join(xdgDir, "opentofu", "tofurc"), nil + } + + return getNewOrLegacyPath(newConfigFile, legacyConfigFile) +} + +func configDir() (string, error) { + dir, err := homeDir() + if err != nil { + return "", err + } + + configDir := filepath.Join(dir, ".terraform.d") + if xdgDir := os.Getenv("XDG_CONFIG_HOME"); !pathExists(configDir) && xdgDir != "" { + configDir = filepath.Join(xdgDir, "opentofu") + } + + return configDir, nil +} + +func dataDirs() ([]string, error) { + dir, err := homeDir() + if err != nil { + return nil, err + } + + dirs := []string{filepath.Join(dir, ".terraform.d")} + if xdgDir := os.Getenv("XDG_DATA_HOME"); xdgDir != "" { + dirs = append(dirs, filepath.Join(xdgDir, "opentofu")) + } + + return dirs, nil +} + +func homeDir() (string, error) { + // First prefer the HOME environmental variable + if home := os.Getenv("HOME"); home != "" { + // FIXME: homeDir gets called from globalPluginDirs during init, before + // the logging is set up. We should move meta initializtion outside of + // init, but in the meantime we just need to silence this output. + // log.Printf("[DEBUG] Detected home directory from env var: %s", home) + + return home, nil + } + + // If that fails, try build-in module + user, err := user.Current() + if err != nil { + return "", err + } + + if user.HomeDir == "" { + return "", errors.New("blank output") + } + + return user.HomeDir, nil +} + +func pathExists(path string) bool { + _, err := os.Stat(path) + return err == nil +} diff --git a/pkg/command/cliconfig/config_unix_test.go b/pkg/command/cliconfig/config_unix_test.go new file mode 100644 index 00000000000..4f76c133cb6 --- /dev/null +++ b/pkg/command/cliconfig/config_unix_test.go @@ -0,0 +1,153 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !windows +// +build !windows + +package cliconfig + +import ( + "os" + "path/filepath" + "slices" + "testing" +) + +func TestConfigFileConfigDir(t *testing.T) { + homeDir := filepath.Join(t.TempDir(), "home") + + tests := []struct { + name string + xdgConfigHome string + files []string + testFunc func() (string, error) + expect string + }{ + { + name: "configFile: use home tofurc", + testFunc: configFile, + files: []string{filepath.Join(homeDir, ".tofurc")}, + expect: filepath.Join(homeDir, ".tofurc"), + }, + { + name: "configFile: use home terraformrc", + testFunc: configFile, + files: []string{filepath.Join(homeDir, ".terraformrc")}, + expect: filepath.Join(homeDir, ".terraformrc"), + }, + { + name: "configFile: use default fallback", + testFunc: configFile, + expect: filepath.Join(homeDir, ".tofurc"), + }, + { + name: "configFile: use XDG tofurc", + testFunc: configFile, + xdgConfigHome: filepath.Join(homeDir, "xdg"), + expect: filepath.Join(homeDir, "xdg", "opentofu", "tofurc"), + }, + { + name: "configFile: prefer home tofurc", + testFunc: configFile, + xdgConfigHome: filepath.Join(homeDir, "xdg"), + files: []string{filepath.Join(homeDir, ".tofurc")}, + expect: filepath.Join(homeDir, ".tofurc"), + }, + { + name: "configFile: prefer home terraformrc", + testFunc: configFile, + xdgConfigHome: filepath.Join(homeDir, "xdg"), + files: []string{filepath.Join(homeDir, ".terraformrc")}, + expect: filepath.Join(homeDir, ".terraformrc"), + }, + { + name: "configDir: use .terraform.d default", + testFunc: configDir, + expect: filepath.Join(homeDir, ".terraform.d"), + }, + { + name: "configDir: prefer .terraform.d", + testFunc: configDir, + xdgConfigHome: filepath.Join(homeDir, "xdg"), + files: []string{filepath.Join(homeDir, ".terraform.d", "placeholder")}, + expect: filepath.Join(homeDir, ".terraform.d"), + }, + { + name: "configDir: use XDG value", + testFunc: configDir, + xdgConfigHome: filepath.Join(homeDir, "xdg"), + expect: filepath.Join(homeDir, "xdg", "opentofu"), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Setenv("HOME", homeDir) + t.Setenv("XDG_CONFIG_HOME", test.xdgConfigHome) + for _, f := range test.files { + createFile(t, f) + } + + file, err := test.testFunc() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if test.expect != file { + t.Fatalf("expected %q, but got %q", test.expect, file) + } + }) + } +} + +func TestDataDirs(t *testing.T) { + homeDir := filepath.Join(t.TempDir(), "home") + + tests := []struct { + name string + xdgDataHome string + expect []string + }{ + { + name: "use XDG data dir", + xdgDataHome: filepath.Join(homeDir, "xdg"), + expect: []string{ + filepath.Join(homeDir, ".terraform.d"), + filepath.Join(homeDir, "xdg", "opentofu"), + }, + }, + { + name: "use default", + expect: []string{ + filepath.Join(homeDir, ".terraform.d"), + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Setenv("HOME", homeDir) + t.Setenv("XDG_DATA_HOME", test.xdgDataHome) + + dirs, err := dataDirs() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !slices.Equal(test.expect, dirs) { + t.Fatalf("expected %+v, but got %+v", test.expect, dirs) + } + }) + } +} + +func createFile(t *testing.T, path string) { + t.Helper() + if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil { + t.Fatal(err) + } + if err := os.WriteFile(path, nil, 0o600); err != nil { + t.Fatal(err) + } + t.Cleanup(func() { _ = os.RemoveAll(filepath.Dir(path)) }) +} diff --git a/pkg/command/cliconfig/config_windows.go b/pkg/command/cliconfig/config_windows.go new file mode 100644 index 00000000000..2a62f48bd97 --- /dev/null +++ b/pkg/command/cliconfig/config_windows.go @@ -0,0 +1,63 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build windows +// +build windows + +package cliconfig + +import ( + "path/filepath" + "syscall" + "unsafe" +) + +var ( + shell = syscall.MustLoadDLL("Shell32.dll") + getFolderPath = shell.MustFindProc("SHGetFolderPathW") +) + +const CSIDL_APPDATA = 26 + +func configFile() (string, error) { + dir, err := homeDir() + if err != nil { + return "", err + } + + newConfigFile := filepath.Join(dir, "terraform.rc") + oldConfigFile := filepath.Join(dir, "tofu.rc") + + return getNewOrLegacyPath(newConfigFile, oldConfigFile) +} + +func configDir() (string, error) { + dir, err := homeDir() + if err != nil { + return "", err + } + + return filepath.Join(dir, "terraform.d"), nil +} + +func dataDirs() ([]string, error) { + dir, err := configDir() + if err != nil { + return nil, err + } + return []string{dir}, nil +} + +func homeDir() (string, error) { + b := make([]uint16, syscall.MAX_PATH) + + // See: http://msdn.microsoft.com/en-us/library/windows/desktop/bb762181(v=vs.85).aspx + r, _, err := getFolderPath.Call(0, CSIDL_APPDATA, 0, 0, uintptr(unsafe.Pointer(&b[0]))) + if uint32(r) != 0 { + return "", err + } + + return syscall.UTF16ToString(b), nil +} diff --git a/pkg/command/cliconfig/credentials.go b/pkg/command/cliconfig/credentials.go new file mode 100644 index 00000000000..7e280ff5bbc --- /dev/null +++ b/pkg/command/cliconfig/credentials.go @@ -0,0 +1,530 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cliconfig + +import ( + "bytes" + "encoding/json" + "fmt" + "log" + "os" + "path/filepath" + "strings" + + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + svchost "github.com/hashicorp/terraform-svchost" + svcauth "github.com/hashicorp/terraform-svchost/auth" + + "github.com/kubegems/opentofu/pkg/configs/hcl2shim" + pluginDiscovery "github.com/kubegems/opentofu/pkg/plugin/discovery" + "github.com/kubegems/opentofu/pkg/replacefile" +) + +// credentialsConfigFile returns the path for the special configuration file +// that the credentials source will use when asked to save or forget credentials +// and when a "credentials helper" program is not active. +func credentialsConfigFile() (string, error) { + configDir, err := ConfigDir() + if err != nil { + return "", err + } + return filepath.Join(configDir, "credentials.tfrc.json"), nil +} + +// CredentialsSource creates and returns a service credentials source whose +// behavior depends on which "credentials" and "credentials_helper" blocks, +// if any, are present in the receiving config. +func (c *Config) CredentialsSource(helperPlugins pluginDiscovery.PluginMetaSet) (*CredentialsSource, error) { + credentialsFilePath, err := credentialsConfigFile() + if err != nil { + // If we managed to load a Config object at all then we would already + // have located this file, so this error is very unlikely. + return nil, fmt.Errorf("can't locate credentials file: %w", err) + } + + var helper svcauth.CredentialsSource + var helperType string + for givenType, givenConfig := range c.CredentialsHelpers { + available := helperPlugins.WithName(givenType) + if available.Count() == 0 { + log.Printf("[ERROR] Unable to find credentials helper %q; ignoring", givenType) + break + } + + selected := available.Newest() + + helperSource := svcauth.HelperProgramCredentialsSource(selected.Path, givenConfig.Args...) + helper = svcauth.CachingCredentialsSource(helperSource) // cached because external operation may be slow/expensive + helperType = givenType + + // There should only be zero or one "credentials_helper" blocks. We + // assume that the config was validated earlier and so we don't check + // for extras here. + break + } + + return c.credentialsSource(helperType, helper, credentialsFilePath), nil +} + +// EmptyCredentialsSourceForTests constructs a CredentialsSource with +// no credentials pre-loaded and which writes new credentials to a file +// at the given path. +// +// As the name suggests, this function is here only for testing and should not +// be used in normal application code. +func EmptyCredentialsSourceForTests(credentialsFilePath string) *CredentialsSource { + cfg := &Config{} + return cfg.credentialsSource("", nil, credentialsFilePath) +} + +// credentialsSource is an internal factory for the credentials source which +// allows overriding the credentials file path, which allows setting it to +// a temporary file location when testing. +func (c *Config) credentialsSource(helperType string, helper svcauth.CredentialsSource, credentialsFilePath string) *CredentialsSource { + configured := map[svchost.Hostname]cty.Value{} + for userHost, creds := range c.Credentials { + host, err := svchost.ForComparison(userHost) + if err != nil { + // We expect the config was already validated by the time we get + // here, so we'll just ignore invalid hostnames. + continue + } + + // For now our CLI config continues to use HCL 1.0, so we'll shim it + // over to HCL 2.0 types. In future we will hopefully migrate it to + // HCL 2.0 instead, and so it'll be a cty.Value already. + credsV := hcl2shim.HCL2ValueFromConfigValue(creds) + configured[host] = credsV + } + + writableLocal := readHostsInCredentialsFile(credentialsFilePath) + unwritableLocal := map[svchost.Hostname]cty.Value{} + for host, v := range configured { + if _, exists := writableLocal[host]; !exists { + unwritableLocal[host] = v + } + } + + return &CredentialsSource{ + configured: configured, + unwritable: unwritableLocal, + credentialsFilePath: credentialsFilePath, + helper: helper, + helperType: helperType, + } +} + +func collectCredentialsFromEnv() map[svchost.Hostname]string { + const prefix = "TF_TOKEN_" + + ret := make(map[svchost.Hostname]string) + for _, ev := range os.Environ() { + eqIdx := strings.Index(ev, "=") + if eqIdx < 0 { + continue + } + name := ev[:eqIdx] + value := ev[eqIdx+1:] + if !strings.HasPrefix(name, prefix) { + continue + } + rawHost := name[len(prefix):] + + // We accept double underscores in place of hyphens because hyphens are not valid + // identifiers in most shells and are therefore hard to set. + // This is unambiguous with replacing single underscores below because + // hyphens are not allowed at the beginning or end of a label and therefore + // odd numbers of underscores will not appear together in a valid variable name. + rawHost = strings.ReplaceAll(rawHost, "__", "-") + + // We accept underscores in place of dots because dots are not valid + // identifiers in most shells and are therefore hard to set. + // Underscores are not valid in hostnames, so this is unambiguous for + // valid hostnames. + rawHost = strings.ReplaceAll(rawHost, "_", ".") + + // Because environment variables are often set indirectly by OS + // libraries that might interfere with how they are encoded, we'll + // be tolerant of them being given either directly as UTF-8 IDNs + // or in Punycode form, normalizing to Punycode form here because + // that is what the OpenTofu credentials helper protocol will + // use in its requests. + // + // Using ForDisplay first here makes this more liberal than OpenTofu + // itself would usually be in that it will tolerate pre-punycoded + // hostnames that OpenTofu normally rejects in other contexts in order + // to ensure stored hostnames are human-readable. + dispHost := svchost.ForDisplay(rawHost) + hostname, err := svchost.ForComparison(dispHost) + if err != nil { + // Ignore invalid hostnames + continue + } + + ret[hostname] = value + } + + return ret +} + +// hostCredentialsFromEnv returns a token credential by searching for a hostname-specific +// environment variable. The host parameter is expected to be in the "comparison" form, +// for example, hostnames containing non-ASCII characters like "café.fr" +// should be expressed as "xn--caf-dma.fr". If the variable based on the hostname is not +// defined, nil is returned. +// +// Hyphen and period characters are allowed in environment variable names, but are not valid POSIX +// variable names. However, it's still possible to set variable names with these characters using +// utilities like env or docker. Variable names may have periods translated to underscores and +// hyphens translated to double underscores in the variable name. +// For the example "café.fr", you may use the variable names "TF_TOKEN_xn____caf__dma_fr", +// "TF_TOKEN_xn--caf-dma_fr", or "TF_TOKEN_xn--caf-dma.fr" +func hostCredentialsFromEnv(host svchost.Hostname) svcauth.HostCredentials { + token, ok := collectCredentialsFromEnv()[host] + if !ok { + return nil + } + return svcauth.HostCredentialsToken(token) +} + +// CredentialsSource is an implementation of svcauth.CredentialsSource +// that can read and write the CLI configuration, and possibly also delegate +// to a credentials helper when configured. +type CredentialsSource struct { + // configured describes the credentials explicitly configured in the CLI + // config via "credentials" blocks. This map will also change to reflect + // any writes to the special credentials.tfrc.json file. + configured map[svchost.Hostname]cty.Value + + // unwritable describes any credentials explicitly configured in the + // CLI config in any file other than credentials.tfrc.json. We cannot update + // these automatically because only credentials.tfrc.json is subject to + // editing by this credentials source. + unwritable map[svchost.Hostname]cty.Value + + // credentialsFilePath is the full path to the credentials.tfrc.json file + // that we'll update if any changes to credentials are requested and if + // a credentials helper isn't available to use instead. + // + // (This is a field here rather than just calling credentialsConfigFile + // directly just so that we can use temporary file location instead during + // testing.) + credentialsFilePath string + + // helper is the credentials source representing the configured credentials + // helper, if any. When this is non-nil, it will be consulted for any + // hostnames not explicitly represented in "configured". Any writes to + // the credentials store will also be sent to a configured helper instead + // of the credentials.tfrc.json file. + helper svcauth.CredentialsSource + + // helperType is the name of the type of credentials helper that is + // referenced in "helper", or the empty string if "helper" is nil. + helperType string +} + +// Assertion that credentialsSource implements CredentialsSource +var _ svcauth.CredentialsSource = (*CredentialsSource)(nil) + +func (s *CredentialsSource) ForHost(host svchost.Hostname) (svcauth.HostCredentials, error) { + // The first order of precedence for credentials is a host-specific environment variable + if envCreds := hostCredentialsFromEnv(host); envCreds != nil { + return envCreds, nil + } + + // Then, any credentials block present in the CLI config + v, ok := s.configured[host] + if ok { + return svcauth.HostCredentialsFromObject(v), nil + } + + // And finally, the credentials helper + if s.helper != nil { + return s.helper.ForHost(host) + } + + return nil, nil +} + +func (s *CredentialsSource) StoreForHost(host svchost.Hostname, credentials svcauth.HostCredentialsWritable) error { + return s.updateHostCredentials(host, credentials) +} + +func (s *CredentialsSource) ForgetForHost(host svchost.Hostname) error { + return s.updateHostCredentials(host, nil) +} + +// HostCredentialsLocation returns a value indicating what type of storage is +// currently used for the credentials for the given hostname. +// +// The current location of credentials determines whether updates are possible +// at all and, if they are, where any updates will be written. +func (s *CredentialsSource) HostCredentialsLocation(host svchost.Hostname) CredentialsLocation { + if _, unwritable := s.unwritable[host]; unwritable { + return CredentialsInOtherFile + } + if _, exists := s.configured[host]; exists { + return CredentialsInPrimaryFile + } + if s.helper != nil { + return CredentialsViaHelper + } + return CredentialsNotAvailable +} + +// CredentialsFilePath returns the full path to the local credentials +// configuration file, so that a caller can mention this path in order to +// be transparent about where credentials will be stored. +// +// This file will be used for writes only if HostCredentialsLocation for the +// relevant host returns CredentialsInPrimaryFile or CredentialsNotAvailable. +// +// The credentials file path is found relative to the current user's home +// directory, so this function will return an error in the unlikely event that +// we cannot determine a suitable home directory to resolve relative to. +func (s *CredentialsSource) CredentialsFilePath() (string, error) { + return s.credentialsFilePath, nil +} + +// CredentialsHelperType returns the name of the configured credentials helper +// type, or an empty string if no credentials helper is configured. +func (s *CredentialsSource) CredentialsHelperType() string { + return s.helperType +} + +func (s *CredentialsSource) updateHostCredentials(host svchost.Hostname, new svcauth.HostCredentialsWritable) error { + switch loc := s.HostCredentialsLocation(host); loc { + case CredentialsInOtherFile: + return ErrUnwritableHostCredentials(host) + case CredentialsInPrimaryFile, CredentialsNotAvailable: + // If the host already has credentials stored locally then we'll update + // them locally too, even if there's a credentials helper configured, + // because the user might be intentionally retaining this particular + // host locally for some reason, e.g. if the credentials helper is + // talking to some shared remote service like HashiCorp Vault. + return s.updateLocalHostCredentials(host, new) + case CredentialsViaHelper: + // Delegate entirely to the helper, then. + if new == nil { + return s.helper.ForgetForHost(host) + } + return s.helper.StoreForHost(host, new) + default: + // Should never happen because the above cases are exhaustive + return fmt.Errorf("invalid credentials location %#v", loc) + } +} + +func (s *CredentialsSource) updateLocalHostCredentials(host svchost.Hostname, new svcauth.HostCredentialsWritable) error { + // This function updates the local credentials file in particular, + // regardless of whether a credentials helper is active. It should be + // called only indirectly via updateHostCredentials. + + filename, err := s.CredentialsFilePath() + if err != nil { + return fmt.Errorf("unable to determine credentials file path: %w", err) + } + + oldSrc, err := os.ReadFile(filename) + if err != nil && !os.IsNotExist(err) { + return fmt.Errorf("cannot read %s: %w", filename, err) + } + + var raw map[string]interface{} + + if len(oldSrc) > 0 { + // When decoding we use a custom decoder so we can decode any numbers as + // json.Number and thus avoid losing any accuracy in our round-trip. + dec := json.NewDecoder(bytes.NewReader(oldSrc)) + dec.UseNumber() + err = dec.Decode(&raw) + if err != nil { + return fmt.Errorf("cannot read %s: %w", filename, err) + } + } else { + raw = make(map[string]interface{}) + } + + rawCredsI, ok := raw["credentials"] + if !ok { + rawCredsI = make(map[string]interface{}) + raw["credentials"] = rawCredsI + } + rawCredsMap, ok := rawCredsI.(map[string]interface{}) + if !ok { + return fmt.Errorf("credentials file %s has invalid value for \"credentials\" property: must be a JSON object", filename) + } + + // We use display-oriented hostnames in our file to mimick how a human user + // would write it, so we need to search for and remove any key that + // normalizes to our target hostname so we won't generate something invalid + // when the existing entry is slightly different. + for givenHost := range rawCredsMap { + canonHost, err := svchost.ForComparison(givenHost) + if err == nil && canonHost == host { + delete(rawCredsMap, givenHost) + } + } + + // If we have a new object to store we'll write it in now. If the previous + // object had the hostname written in a different way then this will + // appear to change it into our canonical display form, with all the + // letters in lowercase and other transforms from the Internationalized + // Domain Names specification. + if new != nil { + toStore := new.ToStore() + rawCredsMap[host.ForDisplay()] = ctyjson.SimpleJSONValue{ + Value: toStore, + } + } + + newSrc, err := json.MarshalIndent(raw, "", " ") + if err != nil { + return fmt.Errorf("cannot serialize updated credentials file: %w", err) + } + + // Now we'll write our new content over the top of the existing file. + // Because we updated the data structure surgically here we should not + // have disturbed the meaning of any other content in the file, but it + // might have a different JSON layout than before. + // We'll create a new file with a different name first and then rename + // it over the old file in order to make the change as atomically as + // the underlying OS/filesystem will allow. + { + dir, file := filepath.Split(filename) + f, err := os.CreateTemp(dir, file) + if err != nil { + return fmt.Errorf("cannot create temporary file to update credentials: %w", err) + } + tmpName := f.Name() + moved := false + defer func(f *os.File, name string) { + // Remove the temporary file if it hasn't been moved yet. We're + // ignoring errors here because there's nothing we can do about + // them anyway. + if !moved { + os.Remove(name) + } + }(f, tmpName) + + // Write the credentials to the temporary file, then immediately close + // it, whether or not the write succeeds. + _, err = f.Write(newSrc) + f.Close() + if err != nil { + return fmt.Errorf("cannot write to temporary file %s: %w", tmpName, err) + } + + // Temporary file now replaces the original file, as atomically as + // possible. (At the very least, we should not end up with a file + // containing only a partial JSON object.) + err = replacefile.AtomicRename(tmpName, filename) + if err != nil { + return fmt.Errorf("failed to replace %s with temporary file %s: %w", filename, tmpName, err) + } + + // Credentials file should be readable only by its owner. (This may + // not be effective on all platforms, but should at least work on + // Unix-like targets and should be harmless elsewhere.) + if err := os.Chmod(filename, 0600); err != nil { + return fmt.Errorf("cannot set mode for credentials file %s: %w", filename, err) + } + + moved = true + } + + if new != nil { + s.configured[host] = new.ToStore() + } else { + delete(s.configured, host) + } + + return nil +} + +// readHostsInCredentialsFile discovers which hosts have credentials configured +// in the credentials file specifically, as opposed to in any other CLI +// config file. +// +// If the credentials file isn't present or is unreadable for any reason then +// this returns an empty set, reflecting that effectively no credentials are +// stored there. +func readHostsInCredentialsFile(filename string) map[svchost.Hostname]struct{} { + src, err := os.ReadFile(filename) + if err != nil { + return nil + } + + var raw map[string]interface{} + err = json.Unmarshal(src, &raw) + if err != nil { + return nil + } + + rawCredsI, ok := raw["credentials"] + if !ok { + return nil + } + rawCredsMap, ok := rawCredsI.(map[string]interface{}) + if !ok { + return nil + } + + ret := make(map[svchost.Hostname]struct{}) + for givenHost := range rawCredsMap { + host, err := svchost.ForComparison(givenHost) + if err != nil { + // We expect the config was already validated by the time we get + // here, so we'll just ignore invalid hostnames. + continue + } + ret[host] = struct{}{} + } + return ret +} + +// ErrUnwritableHostCredentials is an error type that is returned when a caller +// tries to write credentials for a host that has existing credentials configured +// in a file that we cannot automatically update. +type ErrUnwritableHostCredentials svchost.Hostname + +func (err ErrUnwritableHostCredentials) Error() string { + return fmt.Sprintf("cannot change credentials for %s: existing manually-configured credentials in a CLI config file", svchost.Hostname(err).ForDisplay()) +} + +// Hostname returns the host that could not be written. +func (err ErrUnwritableHostCredentials) Hostname() svchost.Hostname { + return svchost.Hostname(err) +} + +// CredentialsLocation describes a type of storage used for the credentials +// for a particular hostname. +type CredentialsLocation rune + +const ( + // CredentialsNotAvailable means that we know that there are no credential + // available for the host. + // + // Note that CredentialsViaHelper might also lead to no credentials being + // available, depending on how the helper answers when we request credentials + // from it. + CredentialsNotAvailable CredentialsLocation = 0 + + // CredentialsInPrimaryFile means that there is already a credentials object + // for the host in the credentials.tfrc.json file. + CredentialsInPrimaryFile CredentialsLocation = 'P' + + // CredentialsInOtherFile means that there is already a credentials object + // for the host in a CLI config file other than credentials.tfrc.json. + CredentialsInOtherFile CredentialsLocation = 'O' + + // CredentialsViaHelper indicates that no statically-configured credentials + // are available for the host but a helper program is available that may + // or may not have credentials for the host. + CredentialsViaHelper CredentialsLocation = 'H' +) diff --git a/pkg/command/cliconfig/credentials_test.go b/pkg/command/cliconfig/credentials_test.go new file mode 100644 index 00000000000..00323b60380 --- /dev/null +++ b/pkg/command/cliconfig/credentials_test.go @@ -0,0 +1,461 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cliconfig + +import ( + "net/http" + "path/filepath" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/zclconf/go-cty/cty" + + svchost "github.com/hashicorp/terraform-svchost" + svcauth "github.com/hashicorp/terraform-svchost/auth" +) + +func TestCredentialsForHost(t *testing.T) { + credSrc := &CredentialsSource{ + configured: map[svchost.Hostname]cty.Value{ + "configured.example.com": cty.ObjectVal(map[string]cty.Value{ + "token": cty.StringVal("configured"), + }), + "unused.example.com": cty.ObjectVal(map[string]cty.Value{ + "token": cty.StringVal("incorrectly-configured"), + }), + }, + + // We'll use a static source to stand in for what would normally be + // a credentials helper program, since we're only testing the logic + // for choosing when to delegate to the helper here. The logic for + // interacting with a helper program is tested in the svcauth package. + helper: svcauth.StaticCredentialsSource(map[svchost.Hostname]map[string]interface{}{ + "from-helper.example.com": { + "token": "from-helper", + }, + + // This should be shadowed by the "configured" entry with the same + // hostname above. + "configured.example.com": { + "token": "incorrectly-from-helper", + }, + }), + helperType: "fake", + } + + testReqAuthHeader := func(t *testing.T, creds svcauth.HostCredentials) string { + t.Helper() + + if creds == nil { + return "" + } + + req, err := http.NewRequest("GET", "http://example.com/", nil) + if err != nil { + t.Fatalf("cannot construct HTTP request: %s", err) + } + creds.PrepareRequest(req) + return req.Header.Get("Authorization") + } + + t.Run("configured", func(t *testing.T) { + creds, err := credSrc.ForHost(svchost.Hostname("configured.example.com")) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if got, want := testReqAuthHeader(t, creds), "Bearer configured"; got != want { + t.Errorf("wrong result\ngot: %s\nwant: %s", got, want) + } + }) + t.Run("from helper", func(t *testing.T) { + creds, err := credSrc.ForHost(svchost.Hostname("from-helper.example.com")) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if got, want := testReqAuthHeader(t, creds), "Bearer from-helper"; got != want { + t.Errorf("wrong result\ngot: %s\nwant: %s", got, want) + } + }) + t.Run("not available", func(t *testing.T) { + creds, err := credSrc.ForHost(svchost.Hostname("unavailable.example.com")) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if got, want := testReqAuthHeader(t, creds), ""; got != want { + t.Errorf("wrong result\ngot: %s\nwant: %s", got, want) + } + }) + t.Run("set in environment", func(t *testing.T) { + envName := "TF_TOKEN_configured_example_com" + + expectedToken := "configured-by-env" + t.Setenv(envName, expectedToken) + + creds, err := credSrc.ForHost(svchost.Hostname("configured.example.com")) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if creds == nil { + t.Fatal("no credentials found") + } + + if got := creds.Token(); got != expectedToken { + t.Errorf("wrong result\ngot: %s\nwant: %s", got, expectedToken) + } + }) + + t.Run("punycode name set in environment", func(t *testing.T) { + envName := "TF_TOKEN_env_xn--eckwd4c7cu47r2wf_com" + + expectedToken := "configured-by-env" + t.Setenv(envName, expectedToken) + + hostname, _ := svchost.ForComparison("env.ドメイン名例.com") + creds, err := credSrc.ForHost(hostname) + + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if creds == nil { + t.Fatal("no credentials found") + } + + if got := creds.Token(); got != expectedToken { + t.Errorf("wrong result\ngot: %s\nwant: %s", got, expectedToken) + } + }) + + t.Run("hyphens can be encoded as double underscores", func(t *testing.T) { + envName := "TF_TOKEN_env_xn____caf__dma_fr" + expectedToken := "configured-by-fallback" + + t.Setenv(envName, expectedToken) + + hostname, _ := svchost.ForComparison("env.café.fr") + creds, err := credSrc.ForHost(hostname) + + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if creds == nil { + t.Fatal("no credentials found") + } + + if got := creds.Token(); got != expectedToken { + t.Errorf("wrong result\ngot: %s\nwant: %s", got, expectedToken) + } + }) + + t.Run("periods are ok", func(t *testing.T) { + envName := "TF_TOKEN_configured.example.com" + expectedToken := "configured-by-env" + + t.Setenv(envName, expectedToken) + + hostname, _ := svchost.ForComparison("configured.example.com") + creds, err := credSrc.ForHost(hostname) + + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if creds == nil { + t.Fatal("no credentials found") + } + + if got := creds.Token(); got != expectedToken { + t.Errorf("wrong result\ngot: %s\nwant: %s", got, expectedToken) + } + }) + + t.Run("casing is insensitive", func(t *testing.T) { + envName := "TF_TOKEN_CONFIGUREDUPPERCASE_EXAMPLE_COM" + expectedToken := "configured-by-env" + + t.Setenv(envName, expectedToken) + + hostname, _ := svchost.ForComparison("configureduppercase.example.com") + creds, err := credSrc.ForHost(hostname) + + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if creds == nil { + t.Fatal("no credentials found") + } + + if got := creds.Token(); got != expectedToken { + t.Errorf("wrong result\ngot: %s\nwant: %s", got, expectedToken) + } + }) +} + +func TestCredentialsStoreForget(t *testing.T) { + d := t.TempDir() + + mockCredsFilename := filepath.Join(d, "credentials.tfrc.json") + + cfg := &Config{ + // This simulates there being a credentials block manually configured + // in some file _other than_ credentials.tfrc.json. + Credentials: map[string]map[string]interface{}{ + "manually-configured.example.com": { + "token": "manually-configured", + }, + }, + } + + // We'll initially use a credentials source with no credentials helper at + // all, and thus with credentials stored in the credentials file. + credSrc := cfg.credentialsSource( + "", nil, + mockCredsFilename, + ) + + testReqAuthHeader := func(t *testing.T, creds svcauth.HostCredentials) string { + t.Helper() + + if creds == nil { + return "" + } + + req, err := http.NewRequest("GET", "http://example.com/", nil) + if err != nil { + t.Fatalf("cannot construct HTTP request: %s", err) + } + creds.PrepareRequest(req) + return req.Header.Get("Authorization") + } + + // Because these store/forget calls have side-effects, we'll bail out with + // t.Fatal (or equivalent) as soon as anything unexpected happens. + // Otherwise downstream tests might fail in confusing ways. + { + err := credSrc.StoreForHost( + svchost.Hostname("manually-configured.example.com"), + svcauth.HostCredentialsToken("not-manually-configured"), + ) + if err == nil { + t.Fatalf("successfully stored for manually-configured; want error") + } + if _, ok := err.(ErrUnwritableHostCredentials); !ok { + t.Fatalf("wrong error type %T; want ErrUnwritableHostCredentials", err) + } + } + { + err := credSrc.ForgetForHost( + svchost.Hostname("manually-configured.example.com"), + ) + if err == nil { + t.Fatalf("successfully forgot for manually-configured; want error") + } + if _, ok := err.(ErrUnwritableHostCredentials); !ok { + t.Fatalf("wrong error type %T; want ErrUnwritableHostCredentials", err) + } + } + { + // We don't have a credentials file at all yet, so this first call + // must create it. + err := credSrc.StoreForHost( + svchost.Hostname("stored-locally.example.com"), + svcauth.HostCredentialsToken("stored-locally"), + ) + if err != nil { + t.Fatalf("unexpected error storing locally: %s", err) + } + + creds, err := credSrc.ForHost(svchost.Hostname("stored-locally.example.com")) + if err != nil { + t.Fatalf("failed to read back stored-locally credentials: %s", err) + } + + if got, want := testReqAuthHeader(t, creds), "Bearer stored-locally"; got != want { + t.Fatalf("wrong header value for stored-locally\ngot: %s\nwant: %s", got, want) + } + + got := readHostsInCredentialsFile(mockCredsFilename) + want := map[svchost.Hostname]struct{}{ + svchost.Hostname("stored-locally.example.com"): struct{}{}, + } + if diff := cmp.Diff(want, got); diff != "" { + t.Fatalf("wrong credentials file content\n%s", diff) + } + } + + // Now we'll switch to having a credential helper active. + // If we were loading the real CLI config from disk here then this + // entry would already be in cfg.Credentials, but we need to fake that + // in the test because we're constructing this *Config value directly. + cfg.Credentials["stored-locally.example.com"] = map[string]interface{}{ + "token": "stored-locally", + } + mockHelper := &mockCredentialsHelper{current: make(map[svchost.Hostname]cty.Value)} + credSrc = cfg.credentialsSource( + "mock", mockHelper, + mockCredsFilename, + ) + { + err := credSrc.StoreForHost( + svchost.Hostname("manually-configured.example.com"), + svcauth.HostCredentialsToken("not-manually-configured"), + ) + if err == nil { + t.Fatalf("successfully stored for manually-configured with helper active; want error") + } + } + { + err := credSrc.StoreForHost( + svchost.Hostname("stored-in-helper.example.com"), + svcauth.HostCredentialsToken("stored-in-helper"), + ) + if err != nil { + t.Fatalf("unexpected error storing in helper: %s", err) + } + + creds, err := credSrc.ForHost(svchost.Hostname("stored-in-helper.example.com")) + if err != nil { + t.Fatalf("failed to read back stored-in-helper credentials: %s", err) + } + + if got, want := testReqAuthHeader(t, creds), "Bearer stored-in-helper"; got != want { + t.Fatalf("wrong header value for stored-in-helper\ngot: %s\nwant: %s", got, want) + } + + // Nothing should have changed in the saved credentials file + got := readHostsInCredentialsFile(mockCredsFilename) + want := map[svchost.Hostname]struct{}{ + svchost.Hostname("stored-locally.example.com"): struct{}{}, + } + if diff := cmp.Diff(want, got); diff != "" { + t.Fatalf("wrong credentials file content\n%s", diff) + } + } + { + // Because stored-locally is already in the credentials file, a new + // store should be sent there rather than to the credentials helper. + err := credSrc.StoreForHost( + svchost.Hostname("stored-locally.example.com"), + svcauth.HostCredentialsToken("stored-locally-again"), + ) + if err != nil { + t.Fatalf("unexpected error storing locally again: %s", err) + } + + creds, err := credSrc.ForHost(svchost.Hostname("stored-locally.example.com")) + if err != nil { + t.Fatalf("failed to read back stored-locally credentials: %s", err) + } + + if got, want := testReqAuthHeader(t, creds), "Bearer stored-locally-again"; got != want { + t.Fatalf("wrong header value for stored-locally\ngot: %s\nwant: %s", got, want) + } + } + { + // Forgetting a host already in the credentials file should remove it + // from the credentials file, not from the helper. + err := credSrc.ForgetForHost( + svchost.Hostname("stored-locally.example.com"), + ) + if err != nil { + t.Fatalf("unexpected error forgetting locally: %s", err) + } + + creds, err := credSrc.ForHost(svchost.Hostname("stored-locally.example.com")) + if err != nil { + t.Fatalf("failed to read back stored-locally credentials: %s", err) + } + + if got, want := testReqAuthHeader(t, creds), ""; got != want { + t.Fatalf("wrong header value for stored-locally\ngot: %s\nwant: %s", got, want) + } + + // Should not be present in the credentials file anymore + got := readHostsInCredentialsFile(mockCredsFilename) + want := map[svchost.Hostname]struct{}{} + if diff := cmp.Diff(want, got); diff != "" { + t.Fatalf("wrong credentials file content\n%s", diff) + } + } + { + err := credSrc.ForgetForHost( + svchost.Hostname("stored-in-helper.example.com"), + ) + if err != nil { + t.Fatalf("unexpected error forgetting in helper: %s", err) + } + + creds, err := credSrc.ForHost(svchost.Hostname("stored-in-helper.example.com")) + if err != nil { + t.Fatalf("failed to read back stored-in-helper credentials: %s", err) + } + + if got, want := testReqAuthHeader(t, creds), ""; got != want { + t.Fatalf("wrong header value for stored-in-helper\ngot: %s\nwant: %s", got, want) + } + } + + { + // Finally, the log in our mock helper should show that it was only + // asked to deal with stored-in-helper, not stored-locally. + got := mockHelper.log + want := []mockCredentialsHelperChange{ + { + Host: svchost.Hostname("stored-in-helper.example.com"), + Action: "store", + }, + { + Host: svchost.Hostname("stored-in-helper.example.com"), + Action: "forget", + }, + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("unexpected credentials helper operation log\n%s", diff) + } + } +} + +type mockCredentialsHelperChange struct { + Host svchost.Hostname + Action string +} + +type mockCredentialsHelper struct { + current map[svchost.Hostname]cty.Value + log []mockCredentialsHelperChange +} + +// Assertion that mockCredentialsHelper implements svcauth.CredentialsSource +var _ svcauth.CredentialsSource = (*mockCredentialsHelper)(nil) + +func (s *mockCredentialsHelper) ForHost(hostname svchost.Hostname) (svcauth.HostCredentials, error) { + v, ok := s.current[hostname] + if !ok { + return nil, nil + } + return svcauth.HostCredentialsFromObject(v), nil +} + +func (s *mockCredentialsHelper) StoreForHost(hostname svchost.Hostname, new svcauth.HostCredentialsWritable) error { + s.log = append(s.log, mockCredentialsHelperChange{ + Host: hostname, + Action: "store", + }) + s.current[hostname] = new.ToStore() + return nil +} + +func (s *mockCredentialsHelper) ForgetForHost(hostname svchost.Hostname) error { + s.log = append(s.log, mockCredentialsHelperChange{ + Host: hostname, + Action: "forget", + }) + delete(s.current, hostname) + return nil +} diff --git a/pkg/command/cliconfig/provider_installation.go b/pkg/command/cliconfig/provider_installation.go new file mode 100644 index 00000000000..fb8137963ca --- /dev/null +++ b/pkg/command/cliconfig/provider_installation.go @@ -0,0 +1,343 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cliconfig + +import ( + "fmt" + "path/filepath" + + "github.com/hashicorp/hcl" + hclast "github.com/hashicorp/hcl/hcl/ast" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/getproviders" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// ProviderInstallation is the structure of the "provider_installation" +// nested block within the CLI configuration. +type ProviderInstallation struct { + Methods []*ProviderInstallationMethod + + // DevOverrides allows overriding the normal selection process for + // a particular subset of providers to force using a particular + // local directory and disregard version numbering altogether. + // This is here to allow provider developers to conveniently test + // local builds of their plugins in a development environment, without + // having to fuss with version constraints, dependency lock files, and + // so forth. + // + // This is _not_ intended for "production" use because it bypasses the + // usual version selection and checksum verification mechanisms for + // the providers in question. To make that intent/effect clearer, some + // OpenTofu commands emit warnings when overrides are present. Local + // mirror directories are a better way to distribute "released" + // providers, because they are still subject to version constraints and + // checksum verification. + DevOverrides map[addrs.Provider]getproviders.PackageLocalDir +} + +// decodeProviderInstallationFromConfig uses the HCL AST API directly to +// decode "provider_installation" blocks from the given file. +// +// This uses the HCL AST directly, rather than HCL's decoder, because the +// intended configuration structure can't be represented using the HCL +// decoder's struct tags. This structure is intended as something that would +// be relatively easier to deal with in HCL 2 once we eventually migrate +// CLI config over to that, and so this function is stricter than HCL 1's +// decoder would be in terms of exactly what configuration shape it is +// expecting. +// +// Note that this function wants the top-level file object which might or +// might not contain provider_installation blocks, not a provider_installation +// block directly itself. +func decodeProviderInstallationFromConfig(hclFile *hclast.File) ([]*ProviderInstallation, tfdiags.Diagnostics) { + var ret []*ProviderInstallation + var diags tfdiags.Diagnostics + + root := hclFile.Node.(*hclast.ObjectList) + + // This is a rather odd hybrid: it's a HCL 2-like decode implemented using + // the HCL 1 AST API. That makes it a bit awkward in places, but it allows + // us to mimick the strictness of HCL 2 (making a later migration easier) + // and to support a block structure that the HCL 1 decoder can't represent. + for _, block := range root.Items { + if block.Keys[0].Token.Value() != "provider_installation" { + continue + } + // HCL only tracks whether the input was JSON or native syntax inside + // individual tokens, so we'll use our block type token to decide + // and assume that the rest of the block must be written in the same + // syntax, because syntax is a whole-file idea. + isJSON := block.Keys[0].Token.JSON + if block.Assign.Line != 0 && !isJSON { + // Seems to be an attribute rather than a block + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid provider_installation block", + fmt.Sprintf("The provider_installation block at %s must not be introduced with an equals sign.", block.Pos()), + )) + continue + } + if len(block.Keys) > 1 && !isJSON { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid provider_installation block", + fmt.Sprintf("The provider_installation block at %s must not have any labels.", block.Pos()), + )) + } + + pi := &ProviderInstallation{} + devOverrides := make(map[addrs.Provider]getproviders.PackageLocalDir) + + body, ok := block.Val.(*hclast.ObjectType) + if !ok { + // We can't get in here with native HCL syntax because we + // already checked above that we're using block syntax, but + // if we're reading JSON then our value could potentially be + // anything. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid provider_installation block", + fmt.Sprintf("The provider_installation block at %s must not be introduced with an equals sign.", block.Pos()), + )) + continue + } + + for _, methodBlock := range body.List.Items { + if methodBlock.Assign.Line != 0 && !isJSON { + // Seems to be an attribute rather than a block + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid provider_installation method block", + fmt.Sprintf("The items inside the provider_installation block at %s must all be blocks.", block.Pos()), + )) + continue + } + if len(methodBlock.Keys) > 1 && !isJSON { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid provider_installation method block", + fmt.Sprintf("The blocks inside the provider_installation block at %s may not have any labels.", block.Pos()), + )) + } + + methodBody, ok := methodBlock.Val.(*hclast.ObjectType) + if !ok { + // We can't get in here with native HCL syntax because we + // already checked above that we're using block syntax, but + // if we're reading JSON then our value could potentially be + // anything. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid provider_installation method block", + fmt.Sprintf("The items inside the provider_installation block at %s must all be blocks.", block.Pos()), + )) + continue + } + + methodTypeStr := methodBlock.Keys[0].Token.Value().(string) + var location ProviderInstallationLocation + var include, exclude []string + switch methodTypeStr { + case "direct": + type BodyContent struct { + Include []string `hcl:"include"` + Exclude []string `hcl:"exclude"` + } + var bodyContent BodyContent + err := hcl.DecodeObject(&bodyContent, methodBody) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid provider_installation method block", + fmt.Sprintf("Invalid %s block at %s: %s.", methodTypeStr, block.Pos(), err), + )) + continue + } + location = ProviderInstallationDirect + include = bodyContent.Include + exclude = bodyContent.Exclude + case "filesystem_mirror": + type BodyContent struct { + Path string `hcl:"path"` + Include []string `hcl:"include"` + Exclude []string `hcl:"exclude"` + } + var bodyContent BodyContent + err := hcl.DecodeObject(&bodyContent, methodBody) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid provider_installation method block", + fmt.Sprintf("Invalid %s block at %s: %s.", methodTypeStr, block.Pos(), err), + )) + continue + } + if bodyContent.Path == "" { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid provider_installation method block", + fmt.Sprintf("Invalid %s block at %s: \"path\" argument is required.", methodTypeStr, block.Pos()), + )) + continue + } + location = ProviderInstallationFilesystemMirror(bodyContent.Path) + include = bodyContent.Include + exclude = bodyContent.Exclude + case "network_mirror": + type BodyContent struct { + URL string `hcl:"url"` + Include []string `hcl:"include"` + Exclude []string `hcl:"exclude"` + } + var bodyContent BodyContent + err := hcl.DecodeObject(&bodyContent, methodBody) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid provider_installation method block", + fmt.Sprintf("Invalid %s block at %s: %s.", methodTypeStr, block.Pos(), err), + )) + continue + } + if bodyContent.URL == "" { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid provider_installation method block", + fmt.Sprintf("Invalid %s block at %s: \"url\" argument is required.", methodTypeStr, block.Pos()), + )) + continue + } + location = ProviderInstallationNetworkMirror(bodyContent.URL) + include = bodyContent.Include + exclude = bodyContent.Exclude + case "dev_overrides": + if len(pi.Methods) > 0 { + // We require dev_overrides to appear first if it's present, + // because dev_overrides effectively bypass the normal + // selection process for a particular provider altogether, + // and so they don't participate in the usual + // include/exclude arguments and priority ordering. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid provider_installation method block", + fmt.Sprintf("The dev_overrides block at at %s must appear before all other installation methods, because development overrides always have the highest priority.", methodBlock.Pos()), + )) + continue + } + + // The content of a dev_overrides block is a mapping from + // provider source addresses to local filesystem paths. To get + // our decoding started, we'll use the normal HCL decoder to + // populate a map of strings and then decode further from + // that. + var rawItems map[string]string + err := hcl.DecodeObject(&rawItems, methodBody) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid provider_installation method block", + fmt.Sprintf("Invalid %s block at %s: %s.", methodTypeStr, block.Pos(), err), + )) + continue + } + + for rawAddr, rawPath := range rawItems { + addr, moreDiags := addrs.ParseProviderSourceString(rawAddr) + if moreDiags.HasErrors() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid provider installation dev overrides", + fmt.Sprintf("The entry %q in %s is not a valid provider source string.\n\n%s", rawAddr, block.Pos(), moreDiags.Err().Error()), + )) + continue + } + dirPath := filepath.Clean(rawPath) + devOverrides[addr] = getproviders.PackageLocalDir(dirPath) + } + + continue // We won't add anything to pi.MethodConfigs for this one + + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid provider_installation method block", + fmt.Sprintf("Unknown provider installation method %q at %s.", methodTypeStr, methodBlock.Pos()), + )) + continue + } + + pi.Methods = append(pi.Methods, &ProviderInstallationMethod{ + Location: location, + Include: include, + Exclude: exclude, + }) + } + + if len(devOverrides) > 0 { + pi.DevOverrides = devOverrides + } + + ret = append(ret, pi) + } + + return ret, diags +} + +// ProviderInstallationMethod represents an installation method block inside +// a provider_installation block. +type ProviderInstallationMethod struct { + Location ProviderInstallationLocation + Include []string `hcl:"include"` + Exclude []string `hcl:"exclude"` +} + +// ProviderInstallationLocation is an interface type representing the +// different installation location types. The concrete implementations of +// this interface are: +// +// - [ProviderInstallationDirect]: install from the provider's origin registry +// - [ProviderInstallationFilesystemMirror] (dir): install from a local filesystem mirror +// - [ProviderInstallationNetworkMirror] (host): install from a network mirror +type ProviderInstallationLocation interface { + providerInstallationLocation() +} + +type providerInstallationDirect [0]byte + +func (i providerInstallationDirect) providerInstallationLocation() {} + +// ProviderInstallationDirect is a ProviderInstallationSourceLocation +// representing installation from a provider's origin registry. +var ProviderInstallationDirect ProviderInstallationLocation = providerInstallationDirect{} + +func (i providerInstallationDirect) GoString() string { + return "cliconfig.ProviderInstallationDirect" +} + +// ProviderInstallationFilesystemMirror is a ProviderInstallationSourceLocation +// representing installation from a particular local filesystem mirror. The +// string value is the filesystem path to the mirror directory. +type ProviderInstallationFilesystemMirror string + +func (i ProviderInstallationFilesystemMirror) providerInstallationLocation() {} + +func (i ProviderInstallationFilesystemMirror) GoString() string { + return fmt.Sprintf("cliconfig.ProviderInstallationFilesystemMirror(%q)", i) +} + +// ProviderInstallationNetworkMirror is a ProviderInstallationSourceLocation +// representing installation from a particular local network mirror. The +// string value is the HTTP base URL exactly as written in the configuration, +// without any normalization. +type ProviderInstallationNetworkMirror string + +func (i ProviderInstallationNetworkMirror) providerInstallationLocation() {} + +func (i ProviderInstallationNetworkMirror) GoString() string { + return fmt.Sprintf("cliconfig.ProviderInstallationNetworkMirror(%q)", i) +} diff --git a/pkg/command/cliconfig/provider_installation_test.go b/pkg/command/cliconfig/provider_installation_test.go new file mode 100644 index 00000000000..2f7823b7f2f --- /dev/null +++ b/pkg/command/cliconfig/provider_installation_test.go @@ -0,0 +1,82 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cliconfig + +import ( + "path/filepath" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/getproviders" +) + +func TestLoadConfig_providerInstallation(t *testing.T) { + for _, configFile := range []string{"provider-installation", "provider-installation.json"} { + t.Run(configFile, func(t *testing.T) { + got, diags := loadConfigFile(filepath.Join(fixtureDir, configFile)) + if diags.HasErrors() { + t.Errorf("unexpected diagnostics: %s", diags.Err().Error()) + } + + want := &Config{ + ProviderInstallation: []*ProviderInstallation{ + { + Methods: []*ProviderInstallationMethod{ + { + Location: ProviderInstallationFilesystemMirror("/tmp/example1"), + Include: []string{"example.com/*/*"}, + }, + { + Location: ProviderInstallationNetworkMirror("https://tf-Mirror.example.com/"), + Include: []string{"registry.opentofu.org/*/*"}, + Exclude: []string{"registry.OpenTofu.org/foobar/*"}, + }, + { + Location: ProviderInstallationFilesystemMirror("/tmp/example2"), + }, + { + Location: ProviderInstallationDirect, + Exclude: []string{"example.com/*/*"}, + }, + }, + + DevOverrides: map[addrs.Provider]getproviders.PackageLocalDir{ + addrs.MustParseProviderSourceString("hashicorp/boop"): getproviders.PackageLocalDir(filepath.FromSlash("/tmp/boop")), + addrs.MustParseProviderSourceString("hashicorp/blorp"): getproviders.PackageLocalDir(filepath.FromSlash("/tmp/blorp")), + }, + }, + }, + } + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + } +} + +func TestLoadConfig_providerInstallationErrors(t *testing.T) { + _, diags := loadConfigFile(filepath.Join(fixtureDir, "provider-installation-errors")) + want := `7 problems: + +- Invalid provider_installation method block: Unknown provider installation method "not_a_thing" at 2:3. +- Invalid provider_installation method block: Invalid filesystem_mirror block at 1:1: "path" argument is required. +- Invalid provider_installation method block: Invalid network_mirror block at 1:1: "url" argument is required. +- Invalid provider_installation method block: The items inside the provider_installation block at 1:1 must all be blocks. +- Invalid provider_installation method block: The blocks inside the provider_installation block at 1:1 may not have any labels. +- Invalid provider_installation block: The provider_installation block at 9:1 must not have any labels. +- Invalid provider_installation block: The provider_installation block at 11:1 must not be introduced with an equals sign.` + + // The above error messages include only line/column location information + // and not file location information because HCL 1 does not store + // information about the filename a location belongs to. (There is a field + // for it in token.Pos but it's always an empty string in practice.) + + if got := diags.Err().Error(); got != want { + t.Errorf("wrong diagnostics\ngot:\n%s\nwant:\n%s", got, want) + } +} diff --git a/pkg/command/cliconfig/testdata/config b/pkg/command/cliconfig/testdata/config new file mode 100644 index 00000000000..4cc989932b2 --- /dev/null +++ b/pkg/command/cliconfig/testdata/config @@ -0,0 +1,4 @@ +providers { + aws = "foo" + do = "bar" +} diff --git a/pkg/command/cliconfig/testdata/config-env b/pkg/command/cliconfig/testdata/config-env new file mode 100644 index 00000000000..e127b138d4b --- /dev/null +++ b/pkg/command/cliconfig/testdata/config-env @@ -0,0 +1,8 @@ +providers { + aws = "$TFTEST" + google = "bar" +} + +provisioners { + local = "$TFTEST" +} diff --git a/pkg/command/cliconfig/testdata/credentials b/pkg/command/cliconfig/testdata/credentials new file mode 100644 index 00000000000..cca9f7801d9 --- /dev/null +++ b/pkg/command/cliconfig/testdata/credentials @@ -0,0 +1,17 @@ + +credentials "example.com" { + token = "foo the bar baz" +} + +credentials "example.net" { + # Username and password are not currently supported, but we want to tolerate + # unknown keys in case future versions add new keys when both old and new + # versions of OpenTofu are installed on a system, sharing the same + # CLI config. + username = "foo" + password = "baz" +} + +credentials_helper "foo" { + args = ["bar", "baz"] +} diff --git a/pkg/command/cliconfig/testdata/hosts b/pkg/command/cliconfig/testdata/hosts new file mode 100644 index 00000000000..1726404c1a8 --- /dev/null +++ b/pkg/command/cliconfig/testdata/hosts @@ -0,0 +1,6 @@ + +host "example.com" { + services = { + "modules.v1" = "https://example.com/", + } +} diff --git a/pkg/command/cliconfig/testdata/provider-installation b/pkg/command/cliconfig/testdata/provider-installation new file mode 100644 index 00000000000..6f14db2984d --- /dev/null +++ b/pkg/command/cliconfig/testdata/provider-installation @@ -0,0 +1,21 @@ +provider_installation { + dev_overrides { + "hashicorp/boop" = "/tmp/bloop/../boop" + "hashicorp/blorp" = "/tmp/blorp" + } + filesystem_mirror { + path = "/tmp/example1" + include = ["example.com/*/*"] + } + network_mirror { + url = "https://tf-Mirror.example.com/" + include = ["registry.opentofu.org/*/*"] + exclude = ["registry.OpenTofu.org/foobar/*"] + } + filesystem_mirror { + path = "/tmp/example2" + } + direct { + exclude = ["example.com/*/*"] + } +} diff --git a/pkg/command/cliconfig/testdata/provider-installation-errors b/pkg/command/cliconfig/testdata/provider-installation-errors new file mode 100644 index 00000000000..8cf634e50a9 --- /dev/null +++ b/pkg/command/cliconfig/testdata/provider-installation-errors @@ -0,0 +1,11 @@ +provider_installation { + not_a_thing {} # unknown source type + filesystem_mirror {} # missing "path" argument + network_mirror {} # missing "host" argument + direct = {} # should be a block, not an argument + direct "what" {} # should not have a label +} + +provider_installation "what" {} # should not have a label + +provider_installation = {} # should be a block, not an argument diff --git a/pkg/command/cliconfig/testdata/provider-installation.json b/pkg/command/cliconfig/testdata/provider-installation.json new file mode 100644 index 00000000000..f73023523e0 --- /dev/null +++ b/pkg/command/cliconfig/testdata/provider-installation.json @@ -0,0 +1,23 @@ +{ + "provider_installation": { + "dev_overrides": { + "hashicorp/boop": "/tmp/bloop/../boop", + "hashicorp/blorp": "/tmp/blorp" + }, + "filesystem_mirror": [{ + "path": "/tmp/example1", + "include": ["example.com/*/*"] + }], + "network_mirror": [{ + "url": "https://tf-Mirror.example.com/", + "include": ["registry.opentofu.org/*/*"], + "exclude": ["registry.OpenTofu.org/foobar/*"] + }], + "filesystem_mirror": [{ + "path": "/tmp/example2" + }], + "direct": [{ + "exclude": ["example.com/*/*"] + }] + } +} diff --git a/pkg/command/cliconfig/util.go b/pkg/command/cliconfig/util.go new file mode 100644 index 00000000000..2bc08e66024 --- /dev/null +++ b/pkg/command/cliconfig/util.go @@ -0,0 +1,20 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cliconfig + +import "os" + +func getNewOrLegacyPath(newPath string, legacyPath string) (string, error) { + // If the legacy directory exists, but the new directory does not, then use the legacy directory, for backwards compatibility reasons. + // Otherwise, use the new directory. + if _, err := os.Stat(legacyPath); err == nil { + if _, err := os.Stat(newPath); os.IsNotExist(err) { + return legacyPath, nil + } + } + + return newPath, nil +} diff --git a/pkg/command/clistate/local_state.go b/pkg/command/clistate/local_state.go new file mode 100644 index 00000000000..e08bf61106b --- /dev/null +++ b/pkg/command/clistate/local_state.go @@ -0,0 +1,317 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package clistate + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "sync" + "time" + + multierror "github.com/hashicorp/go-multierror" + "github.com/kubegems/opentofu/pkg/legacy/tofu" + "github.com/kubegems/opentofu/pkg/states/statemgr" +) + +// LocalState manages a state storage that is local to the filesystem. +type LocalState struct { + // Path is the path to read the state from. PathOut is the path to + // write the state to. If PathOut is not specified, Path will be used. + // If PathOut already exists, it will be overwritten. + Path string + PathOut string + + // the file handle corresponding to PathOut + stateFileOut *os.File + + // While the stateFileOut will correspond to the lock directly, + // store and check the lock ID to maintain a strict state.Locker + // implementation. + lockID string + + // created is set to true if stateFileOut didn't exist before we created it. + // This is mostly so we can clean up empty files during tests, but doesn't + // hurt to remove file we never wrote to. + created bool + + mu sync.Mutex + state *tofu.State + readState *tofu.State + written bool +} + +// SetState will force a specific state in-memory for this local state. +func (s *LocalState) SetState(state *tofu.State) { + s.mu.Lock() + defer s.mu.Unlock() + + s.state = state.DeepCopy() + s.readState = state.DeepCopy() +} + +// StateReader impl. +func (s *LocalState) State() *tofu.State { + return s.state.DeepCopy() +} + +// WriteState for LocalState always persists the state as well. +// TODO: this should use a more robust method of writing state, by first +// writing to a temp file on the same filesystem, and renaming the file over +// the original. +// +// StateWriter impl. +func (s *LocalState) WriteState(state *tofu.State) error { + s.mu.Lock() + defer s.mu.Unlock() + + if s.stateFileOut == nil { + if err := s.createStateFiles(); err != nil { + return nil + } + } + defer s.stateFileOut.Sync() + + s.state = state.DeepCopy() // don't want mutations before we actually get this written to disk + + if s.readState != nil && s.state != nil { + // We don't trust callers to properly manage serials. Instead, we assume + // that a WriteState is always for the next version after what was + // most recently read. + s.state.Serial = s.readState.Serial + } + + if _, err := s.stateFileOut.Seek(0, io.SeekStart); err != nil { + return err + } + if err := s.stateFileOut.Truncate(0); err != nil { + return err + } + + if state == nil { + // if we have no state, don't write anything else. + return nil + } + + if !s.state.MarshalEqual(s.readState) { + s.state.Serial++ + } + + if err := tofu.WriteState(s.state, s.stateFileOut); err != nil { + return err + } + + s.written = true + return nil +} + +// PersistState for LocalState is a no-op since WriteState always persists. +// +// StatePersister impl. +func (s *LocalState) PersistState() error { + return nil +} + +// StateRefresher impl. +func (s *LocalState) RefreshState() error { + s.mu.Lock() + defer s.mu.Unlock() + + if s.PathOut == "" { + s.PathOut = s.Path + } + + var reader io.Reader + + // The s.Path file is only OK to read if we have not written any state out + // (in which case the same state needs to be read in), and no state output file + // has been opened (possibly via a lock) or the input path is different + // than the output path. + // This is important for Windows, as if the input file is the same as the + // output file, and the output file has been locked already, we can't open + // the file again. + if !s.written && (s.stateFileOut == nil || s.Path != s.PathOut) { + // we haven't written a state file yet, so load from Path + f, err := os.Open(s.Path) + if err != nil { + // It is okay if the file doesn't exist, we treat that as a nil state + if !os.IsNotExist(err) { + return err + } + + // we need a non-nil reader for ReadState and an empty buffer works + // to return EOF immediately + reader = bytes.NewBuffer(nil) + + } else { + defer f.Close() + reader = f + } + } else { + // no state to refresh + if s.stateFileOut == nil { + return nil + } + + // we have a state file, make sure we're at the start + s.stateFileOut.Seek(0, io.SeekStart) + reader = s.stateFileOut + } + + state, err := tofu.ReadState(reader) + // if there's no state we just assign the nil return value + if err != nil && err != tofu.ErrNoState { + return err + } + + s.state = state + s.readState = s.state.DeepCopy() + return nil +} + +// Lock implements a local filesystem state.Locker. +func (s *LocalState) Lock(info *statemgr.LockInfo) (string, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if s.stateFileOut == nil { + if err := s.createStateFiles(); err != nil { + return "", err + } + } + + if s.lockID != "" { + return "", fmt.Errorf("state %q already locked", s.stateFileOut.Name()) + } + + if err := s.lock(); err != nil { + info, infoErr := s.lockInfo() + if infoErr != nil { + err = multierror.Append(err, infoErr) + } + + lockErr := &statemgr.LockError{ + Info: info, + Err: err, + } + + return "", lockErr + } + + s.lockID = info.ID + return s.lockID, s.writeLockInfo(info) +} + +func (s *LocalState) Unlock(id string) error { + s.mu.Lock() + defer s.mu.Unlock() + + if s.lockID == "" { + return fmt.Errorf("LocalState not locked") + } + + if id != s.lockID { + idErr := fmt.Errorf("invalid lock id: %q. current id: %q", id, s.lockID) + info, err := s.lockInfo() + if err != nil { + idErr = multierror.Append(idErr, err) + } + + return &statemgr.LockError{ + Err: idErr, + Info: info, + } + } + + os.Remove(s.lockInfoPath()) + + fileName := s.stateFileOut.Name() + + unlockErr := s.unlock() + + s.stateFileOut.Close() + s.stateFileOut = nil + s.lockID = "" + + // clean up the state file if we created it an never wrote to it + stat, err := os.Stat(fileName) + if err == nil && stat.Size() == 0 && s.created { + os.Remove(fileName) + } + + return unlockErr +} + +// Open the state file, creating the directories and file as needed. +func (s *LocalState) createStateFiles() error { + if s.PathOut == "" { + s.PathOut = s.Path + } + + // yes this could race, but we only use it to clean up empty files + if _, err := os.Stat(s.PathOut); os.IsNotExist(err) { + s.created = true + } + + // Create all the directories + if err := os.MkdirAll(filepath.Dir(s.PathOut), 0755); err != nil { + return err + } + + f, err := os.OpenFile(s.PathOut, os.O_RDWR|os.O_CREATE, 0666) + if err != nil { + return err + } + + s.stateFileOut = f + return nil +} + +// return the path for the lockInfo metadata. +func (s *LocalState) lockInfoPath() string { + stateDir, stateName := filepath.Split(s.Path) + if stateName == "" { + panic("empty state file path") + } + + if stateName[0] == '.' { + stateName = stateName[1:] + } + + return filepath.Join(stateDir, fmt.Sprintf(".%s.lock.info", stateName)) +} + +// lockInfo returns the data in a lock info file +func (s *LocalState) lockInfo() (*statemgr.LockInfo, error) { + path := s.lockInfoPath() + infoData, err := os.ReadFile(path) + if err != nil { + return nil, err + } + + info := statemgr.LockInfo{} + err = json.Unmarshal(infoData, &info) + if err != nil { + return nil, fmt.Errorf("state file %q locked, but could not unmarshal lock info: %w", s.Path, err) + } + return &info, nil +} + +// write a new lock info file +func (s *LocalState) writeLockInfo(info *statemgr.LockInfo) error { + path := s.lockInfoPath() + info.Path = s.Path + info.Created = time.Now().UTC() + + err := os.WriteFile(path, info.Marshal(), 0600) + if err != nil { + return fmt.Errorf("could not write lock info for %q: %w", s.Path, err) + } + return nil +} diff --git a/pkg/command/clistate/local_state_lock_unix.go b/pkg/command/clistate/local_state_lock_unix.go new file mode 100644 index 00000000000..97f27ecc679 --- /dev/null +++ b/pkg/command/clistate/local_state_lock_unix.go @@ -0,0 +1,40 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !windows +// +build !windows + +package clistate + +import ( + "io" + "syscall" +) + +// use fcntl POSIX locks for the most consistent behavior across platforms, and +// hopefully some campatibility over NFS and CIFS. +func (s *LocalState) lock() error { + flock := &syscall.Flock_t{ + Type: syscall.F_RDLCK | syscall.F_WRLCK, + Whence: int16(io.SeekStart), + Start: 0, + Len: 0, + } + + fd := s.stateFileOut.Fd() + return syscall.FcntlFlock(fd, syscall.F_SETLK, flock) +} + +func (s *LocalState) unlock() error { + flock := &syscall.Flock_t{ + Type: syscall.F_UNLCK, + Whence: int16(io.SeekStart), + Start: 0, + Len: 0, + } + + fd := s.stateFileOut.Fd() + return syscall.FcntlFlock(fd, syscall.F_SETLK, flock) +} diff --git a/pkg/command/clistate/local_state_lock_windows.go b/pkg/command/clistate/local_state_lock_windows.go new file mode 100644 index 00000000000..0de35e1fe78 --- /dev/null +++ b/pkg/command/clistate/local_state_lock_windows.go @@ -0,0 +1,114 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build windows +// +build windows + +package clistate + +import ( + "math" + "syscall" + "unsafe" +) + +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + procLockFileEx = modkernel32.NewProc("LockFileEx") + procCreateEventW = modkernel32.NewProc("CreateEventW") +) + +const ( + // dwFlags defined for LockFileEx + // https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx + _LOCKFILE_FAIL_IMMEDIATELY = 1 + _LOCKFILE_EXCLUSIVE_LOCK = 2 +) + +func (s *LocalState) lock() error { + // even though we're failing immediately, an overlapped event structure is + // required + ol, err := newOverlapped() + if err != nil { + return err + } + defer syscall.CloseHandle(ol.HEvent) + + return lockFileEx( + syscall.Handle(s.stateFileOut.Fd()), + _LOCKFILE_EXCLUSIVE_LOCK|_LOCKFILE_FAIL_IMMEDIATELY, + 0, // reserved + 0, // bytes low + math.MaxUint32, // bytes high + ol, + ) +} + +func (s *LocalState) unlock() error { + // the file is closed in Unlock + return nil +} + +func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall6( + procLockFileEx.Addr(), + 6, + uintptr(h), + uintptr(flags), + uintptr(reserved), + uintptr(locklow), + uintptr(lockhigh), + uintptr(unsafe.Pointer(ol)), + ) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +// newOverlapped creates a structure used to track asynchronous +// I/O requests that have been issued. +func newOverlapped() (*syscall.Overlapped, error) { + event, err := createEvent(nil, true, false, nil) + if err != nil { + return nil, err + } + return &syscall.Overlapped{HEvent: event}, nil +} + +func createEvent(sa *syscall.SecurityAttributes, manualReset bool, initialState bool, name *uint16) (handle syscall.Handle, err error) { + var _p0 uint32 + if manualReset { + _p0 = 1 + } + var _p1 uint32 + if initialState { + _p1 = 1 + } + + r0, _, e1 := syscall.Syscall6( + procCreateEventW.Addr(), + 4, + uintptr(unsafe.Pointer(sa)), + uintptr(_p0), + uintptr(_p1), + uintptr(unsafe.Pointer(name)), + 0, + 0, + ) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} diff --git a/pkg/command/clistate/state.go b/pkg/command/clistate/state.go new file mode 100644 index 00000000000..5aa66b193a1 --- /dev/null +++ b/pkg/command/clistate/state.go @@ -0,0 +1,195 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package state exposes common helpers for working with state from the CLI. +// +// This is a separate package so that backends can use this for consistent +// messaging without creating a circular reference to the command package. +package clistate + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/kubegems/opentofu/pkg/command/views" + "github.com/kubegems/opentofu/pkg/helper/slowmessage" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +const ( + LockThreshold = 400 * time.Millisecond + LockErrorMessage = `Error message: %s + +OpenTofu acquires a state lock to protect the state from being written +by multiple users at the same time. Please resolve the issue above and try +again. For most commands, you can disable locking with the "-lock=false" +flag, but this is not recommended.` + + UnlockErrorMessage = `Error message: %s + +OpenTofu acquires a lock when accessing your state to prevent others +running OpenTofu to potentially modify the state at the same time. An +error occurred while releasing this lock. This could mean that the lock +did or did not release properly. If the lock didn't release properly, +OpenTofu may not be able to run future commands since it'll appear as if +the lock is held. + +In this scenario, please call the "force-unlock" command to unlock the +state manually. This is a very dangerous operation since if it is done +erroneously it could result in two people modifying state at the same time. +Only call this command if you're certain that the unlock above failed and +that no one else is holding a lock.` +) + +// Locker allows for more convenient usage of the lower-level statemgr.Locker +// implementations. +// The statemgr.Locker API requires passing in a statemgr.LockInfo struct. Locker +// implementations are expected to create the required LockInfo struct when +// Lock is called, populate the Operation field with the "reason" string +// provided, and pass that on to the underlying statemgr.Locker. +// Locker implementations are also expected to store any state required to call +// Unlock, which is at a minimum the LockID string returned by the +// statemgr.Locker. +type Locker interface { + // Returns a shallow copy of the locker with its context changed to ctx. + WithContext(ctx context.Context) Locker + + // Lock the provided state manager, storing the reason string in the LockInfo. + Lock(s statemgr.Locker, reason string) tfdiags.Diagnostics + + // Unlock the previously locked state. + Unlock() tfdiags.Diagnostics + + // Timeout returns the configured timeout duration + Timeout() time.Duration +} + +type locker struct { + ctx context.Context + timeout time.Duration + mu sync.Mutex + state statemgr.Locker + view views.StateLocker + lockID string +} + +var _ Locker = (*locker)(nil) + +// Create a new Locker. +// This Locker uses state.LockWithContext to retry the lock until the provided +// timeout is reached, or the context is canceled. Lock progress will be be +// reported to the user through the provided UI. +func NewLocker(timeout time.Duration, view views.StateLocker) Locker { + return &locker{ + ctx: context.Background(), + timeout: timeout, + view: view, + } +} + +// WithContext returns a new Locker with the specified context, copying the +// timeout and view parameters from the original Locker. +func (l *locker) WithContext(ctx context.Context) Locker { + if ctx == nil { + panic("nil context") + } + return &locker{ + ctx: ctx, + timeout: l.timeout, + view: l.view, + } +} + +// Locker locks the given state and outputs to the user if locking is taking +// longer than the threshold. The lock is retried until the context is +// cancelled. +func (l *locker) Lock(s statemgr.Locker, reason string) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + l.mu.Lock() + defer l.mu.Unlock() + + l.state = s + + ctx, cancel := context.WithTimeout(l.ctx, l.timeout) + defer cancel() + + lockInfo := statemgr.NewLockInfo() + lockInfo.Operation = reason + + err := slowmessage.Do(LockThreshold, func() error { + id, err := statemgr.LockWithContext(ctx, s, lockInfo) + l.lockID = id + return err + }, l.view.Locking) + + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Error acquiring the state lock", + fmt.Sprintf(LockErrorMessage, err), + )) + } + + return diags +} + +func (l *locker) Unlock() tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + l.mu.Lock() + defer l.mu.Unlock() + + if l.lockID == "" { + return diags + } + + err := slowmessage.Do(LockThreshold, func() error { + return l.state.Unlock(l.lockID) + }, l.view.Unlocking) + + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Error releasing the state lock", + fmt.Sprintf(UnlockErrorMessage, err), + )) + } + + return diags + +} + +func (l *locker) Timeout() time.Duration { + return l.timeout +} + +type noopLocker struct{} + +// NewNoopLocker returns a valid Locker that does nothing. +func NewNoopLocker() Locker { + return noopLocker{} +} + +var _ Locker = noopLocker{} + +func (l noopLocker) WithContext(ctx context.Context) Locker { + return l +} + +func (l noopLocker) Lock(statemgr.Locker, string) tfdiags.Diagnostics { + return nil +} + +func (l noopLocker) Unlock() tfdiags.Diagnostics { + return nil +} + +func (l noopLocker) Timeout() time.Duration { + return 0 +} diff --git a/pkg/command/clistate/state_test.go b/pkg/command/clistate/state_test.go new file mode 100644 index 00000000000..e262b0ecb24 --- /dev/null +++ b/pkg/command/clistate/state_test.go @@ -0,0 +1,30 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package clistate + +import ( + "testing" + + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/command/views" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "github.com/kubegems/opentofu/pkg/terminal" +) + +func TestUnlock(t *testing.T) { + streams, _ := terminal.StreamsForTesting(t) + view := views.NewView(streams) + + l := NewLocker(0, views.NewStateLocker(arguments.ViewHuman, view)) + l.Lock(statemgr.NewUnlockErrorFull(nil, nil), "test-lock") + + diags := l.Unlock() + if diags.HasErrors() { + t.Log(diags.Err().Error()) + } else { + t.Error("expected error") + } +} diff --git a/pkg/command/cloud.go b/pkg/command/cloud.go new file mode 100644 index 00000000000..cc9c77dee05 --- /dev/null +++ b/pkg/command/cloud.go @@ -0,0 +1,114 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "bytes" + "fmt" + "io" + "os" + "os/exec" + + "github.com/hashicorp/go-plugin" + + "github.com/kubegems/opentofu/pkg/cloudplugin" + "github.com/kubegems/opentofu/pkg/cloudplugin/cloudplugin1" + "github.com/kubegems/opentofu/pkg/logging" +) + +// CloudCommand is a Command implementation that interacts with Terraform +// Cloud for operations that are inherently planless. It delegates +// all execution to an internal plugin. +type CloudCommand struct { + Meta +} + +const ( + // DefaultCloudPluginVersion is the implied protocol version, though all + // historical versions are defined explicitly. + DefaultCloudPluginVersion = 1 + + // ExitRPCError is the exit code that is returned if an plugin + // communication error occurred. + ExitRPCError = 99 +) + +var ( + // Handshake is used to verify that the plugin is the appropriate plugin for + // the client. This is not a security verification. + Handshake = plugin.HandshakeConfig{ + MagicCookieKey: "TF_CLOUDPLUGIN_MAGIC_COOKIE", + MagicCookieValue: "721fca41431b780ff3ad2623838faaa178d74c65e1cfdfe19537c31656496bf9f82d6c6707f71d81c8eed0db9043f79e56ab4582d013bc08ead14f57961461dc", + ProtocolVersion: DefaultCloudPluginVersion, + } +) + +func (c *CloudCommand) proxy(args []string, stdout, stderr io.Writer) int { + client := plugin.NewClient(&plugin.ClientConfig{ + HandshakeConfig: Handshake, + AllowedProtocols: []plugin.Protocol{plugin.ProtocolGRPC}, + Cmd: exec.Command("./terraform-cloudplugin"), + Logger: logging.NewCloudLogger(), + VersionedPlugins: map[int]plugin.PluginSet{ + 1: { + "cloud": &cloudplugin1.GRPCCloudPlugin{}, + }, + }, + }) + defer client.Kill() + + // Connect via RPC + rpcClient, err := client.Client() + if err != nil { + fmt.Fprintf(stderr, "Failed to create cloud plugin client: %s", err) + return ExitRPCError + } + + // Request the plugin + raw, err := rpcClient.Dispense("cloud") + if err != nil { + fmt.Fprintf(stderr, "Failed to request cloud plugin interface: %s", err) + return ExitRPCError + } + + // Proxy the request + // Note: future changes will need to determine the type of raw when + // multiple versions are possible. + cloud1, ok := raw.(cloudplugin.Cloud1) + if !ok { + c.Ui.Error("If more than one cloudplugin versions are available, they need to be added to the cloud command. This is a bug in OpenTofu.") + return ExitRPCError + } + return cloud1.Execute(args, stdout, stderr) +} + +// Run runs the cloud command with the given arguments. +func (c *CloudCommand) Run(args []string) int { + args = c.Meta.process(args) + + // TODO: Download and verify the signing of the terraform-cloudplugin + // release that is appropriate for this OS/Arch + if _, err := os.Stat("./terraform-cloudplugin"); err != nil { + c.Ui.Warn("terraform-cloudplugin not found. This plugin does not have an official release yet.") + return 1 + } + + // TODO: Need to use some type of c.Meta handle here + return c.proxy(args, os.Stdout, os.Stderr) +} + +// Help returns help text for the cloud command. +func (c *CloudCommand) Help() string { + helpText := new(bytes.Buffer) + c.proxy([]string{}, helpText, io.Discard) + + return helpText.String() +} + +// Synopsis returns a short summary of the cloud command. +func (c *CloudCommand) Synopsis() string { + return "Manage cloud backend settings and metadata" +} diff --git a/pkg/command/command.go b/pkg/command/command.go new file mode 100644 index 00000000000..42dda99e39a --- /dev/null +++ b/pkg/command/command.go @@ -0,0 +1,79 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "os" + "runtime" +) + +// Set to true when we're testing +var test bool = false + +// DefaultDataDir is the default directory for storing local data. +const DefaultDataDir = ".terraform" + +// PluginPathFile is the name of the file in the data dir which stores the list +// of directories supplied by the user with the `-plugin-dir` flag during init. +const PluginPathFile = "plugin_path" + +// pluginMachineName is the directory name used in new plugin paths. +const pluginMachineName = runtime.GOOS + "_" + runtime.GOARCH + +// DefaultPluginVendorDir is the location in the config directory to look for +// user-added plugin binaries. OpenTofu only reads from this path if it +// exists, it is never created by tofu. +const DefaultPluginVendorDir = "terraform.d/plugins/" + pluginMachineName + +// DefaultStateFilename is the default filename used for the state file. +const DefaultStateFilename = "terraform.tfstate" + +// DefaultVarsExtension is the default file extension used for vars +const DefaultVarsExtension = ".tfvars" + +// DefaultVarsFilename is the default filename used for vars +const DefaultVarsFilename = "terraform" + DefaultVarsExtension + +// DefaultBackupExtension is added to the state file to form the path +const DefaultBackupExtension = ".backup" + +// DefaultParallelism is the limit Terraform places on total parallel +// operations as it walks the dependency graph. +const DefaultParallelism = 10 + +// ErrUnsupportedLocalOp is the common error message shown for operations +// that require a backend.Local. +const ErrUnsupportedLocalOp = `The configured backend doesn't support this operation. + +The "backend" in OpenTofu defines how OpenTofu operates. The default +backend performs all operations locally on your machine. Your configuration +is configured to use a non-local backend. This backend doesn't support this +operation. +` + +// modulePath returns the path to the root module and validates CLI arguments. +// +// This centralizes the logic for any commands that previously accepted +// a module path via CLI arguments. This will error if any extraneous arguments +// are given and suggest using the -chdir flag instead. +// +// If your command accepts more than one arg, then change the slice bounds +// to pass validation. +func modulePath(args []string) (string, error) { + // TODO: test + + if len(args) > 0 { + return "", fmt.Errorf("Too many command line arguments. Did you mean to use -chdir?") + } + + path, err := os.Getwd() + if err != nil { + return "", fmt.Errorf("Error getting pwd: %w", err) + } + + return path, nil +} diff --git a/pkg/command/command_test.go b/pkg/command/command_test.go new file mode 100644 index 00000000000..c0a2eff52f2 --- /dev/null +++ b/pkg/command/command_test.go @@ -0,0 +1,1172 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "bytes" + "context" + "crypto/md5" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/fs" + "net/http" + "net/http/httptest" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + "syscall" + "testing" + + "github.com/google/go-cmp/cmp" + + svchost "github.com/hashicorp/terraform-svchost" + "github.com/hashicorp/terraform-svchost/disco" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + backendInit "github.com/kubegems/opentofu/pkg/backend/init" + backendLocal "github.com/kubegems/opentofu/pkg/backend/local" + "github.com/kubegems/opentofu/pkg/command/views" + "github.com/kubegems/opentofu/pkg/command/workdir" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configload" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/copy" + "github.com/kubegems/opentofu/pkg/depsfile" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/getproviders" + "github.com/kubegems/opentofu/pkg/initwd" + legacy "github.com/kubegems/opentofu/pkg/legacy/tofu" + _ "github.com/kubegems/opentofu/pkg/logging" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/plans/planfile" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/registry" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/statefile" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "github.com/kubegems/opentofu/pkg/terminal" + "github.com/kubegems/opentofu/pkg/tofu" + "github.com/kubegems/opentofu/version" +) + +// These are the directories for our test data and fixtures. +var ( + fixtureDir = "./testdata" + testDataDir = "./testdata" +) + +func init() { + test = true + + // Initialize the backends + backendInit.Init(nil) + + // Expand the data and fixture dirs on init because + // we change the working directory in some tests. + var err error + fixtureDir, err = filepath.Abs(fixtureDir) + if err != nil { + panic(err) + } + + testDataDir, err = filepath.Abs(testDataDir) + if err != nil { + panic(err) + } +} + +func TestMain(m *testing.M) { + // Make sure backend init is initialized, since our tests tend to assume it. + backendInit.Init(nil) + + os.Exit(m.Run()) +} + +// tempWorkingDir constructs a workdir.Dir object referring to a newly-created +// temporary directory. The temporary directory is automatically removed when +// the test and all its subtests complete. +// +// Although workdir.Dir is built to support arbitrary base directories, the +// not-yet-migrated behaviors in command.Meta tend to expect the root module +// directory to be the real process working directory, and so if you intend +// to use the result inside a command.Meta object you must use a pattern +// similar to the following when initializing your test: +// +// wd := tempWorkingDir(t) +// defer testChdir(t, wd.RootModuleDir())() +// +// Note that testChdir modifies global state for the test process, and so a +// test using this pattern must never call t.Parallel(). +func tempWorkingDir(t *testing.T) *workdir.Dir { + t.Helper() + + dirPath := t.TempDir() + t.Logf("temporary directory %s", dirPath) + + return workdir.NewDir(dirPath) +} + +// tempWorkingDirFixture is like tempWorkingDir but it also copies the content +// from a fixture directory into the temporary directory before returning it. +// +// The same caveats about working directory apply as for testWorkingDir. See +// the testWorkingDir commentary for an example of how to use this function +// along with testChdir to meet the expectations of command.Meta legacy +// functionality. +func tempWorkingDirFixture(t *testing.T, fixtureName string) *workdir.Dir { + t.Helper() + + dirPath := testTempDir(t) + t.Logf("temporary directory %s with fixture %q", dirPath, fixtureName) + + fixturePath := testFixturePath(fixtureName) + testCopyDir(t, fixturePath, dirPath) + // NOTE: Unfortunately because testCopyDir immediately aborts the test + // on failure, a failure to copy will prevent us from cleaning up the + // temporary directory. Oh well. :( + + return workdir.NewDir(dirPath) +} + +func testFixturePath(name string) string { + return filepath.Join(fixtureDir, name) +} + +func metaOverridesForProvider(p providers.Interface) *testingOverrides { + return &testingOverrides{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): providers.FactoryFixed(p), + addrs.NewProvider(addrs.DefaultProviderRegistryHost, "hashicorp2", "test"): providers.FactoryFixed(p), + }, + } +} + +func testModuleWithSnapshot(t *testing.T, name string) (*configs.Config, *configload.Snapshot) { + t.Helper() + + dir := filepath.Join(fixtureDir, name) + // FIXME: We're not dealing with the cleanup function here because + // this testModule function is used all over and so we don't want to + // change its interface at this late stage. + loader, _ := configload.NewLoaderForTests(t) + + // Test modules usually do not refer to remote sources, and for local + // sources only this ultimately just records all of the module paths + // in a JSON file so that we can load them below. + inst := initwd.NewModuleInstaller(loader.ModulesDir(), loader, registry.NewClient(nil, nil)) + _, instDiags := inst.InstallModules(context.Background(), dir, "tests", true, false, initwd.ModuleInstallHooksImpl{}, configs.RootModuleCallForTesting()) + if instDiags.HasErrors() { + t.Fatal(instDiags.Err()) + } + + config, snap, diags := loader.LoadConfigWithSnapshot(dir, configs.RootModuleCallForTesting()) + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + return config, snap +} + +// testPlan returns a non-nil noop plan. +func testPlan(t *testing.T) *plans.Plan { + t.Helper() + + // This is what an empty configuration block would look like after being + // decoded with the schema of the "local" backend. + backendConfig := cty.ObjectVal(map[string]cty.Value{ + "path": cty.NullVal(cty.String), + "workspace_dir": cty.NullVal(cty.String), + }) + backendConfigRaw, err := plans.NewDynamicValue(backendConfig, backendConfig.Type()) + if err != nil { + t.Fatal(err) + } + + return &plans.Plan{ + Backend: plans.Backend{ + // This is just a placeholder so that the plan file can be written + // out. Caller may wish to override it to something more "real" + // where the plan will actually be subsequently applied. + Type: "local", + Config: backendConfigRaw, + }, + Changes: plans.NewChanges(), + } +} + +func testPlanFile(t *testing.T, configSnap *configload.Snapshot, state *states.State, plan *plans.Plan) string { + return testPlanFileMatchState(t, configSnap, state, plan, statemgr.SnapshotMeta{}) +} + +func testPlanFileMatchState(t *testing.T, configSnap *configload.Snapshot, state *states.State, plan *plans.Plan, stateMeta statemgr.SnapshotMeta) string { + t.Helper() + + stateFile := &statefile.File{ + Lineage: stateMeta.Lineage, + Serial: stateMeta.Serial, + State: state, + TerraformVersion: version.SemVer, + } + prevStateFile := &statefile.File{ + Lineage: stateMeta.Lineage, + Serial: stateMeta.Serial, + State: state, // we just assume no changes detected during refresh + TerraformVersion: version.SemVer, + } + + path := testTempFile(t) + err := planfile.Create(path, planfile.CreateArgs{ + ConfigSnapshot: configSnap, + PreviousRunStateFile: prevStateFile, + StateFile: stateFile, + Plan: plan, + DependencyLocks: depsfile.NewLocks(), + }, encryption.PlanEncryptionDisabled()) + if err != nil { + t.Fatalf("failed to create temporary plan file: %s", err) + } + + return path +} + +// testPlanFileNoop is a shortcut function that creates a plan file that +// represents no changes and returns its path. This is useful when a test +// just needs any plan file, and it doesn't matter what is inside it. +func testPlanFileNoop(t *testing.T) string { + snap := &configload.Snapshot{ + Modules: map[string]*configload.SnapshotModule{ + "": { + Dir: ".", + Files: map[string][]byte{ + "main.tf": nil, + }, + }, + }, + } + state := states.NewState() + plan := testPlan(t) + return testPlanFile(t, snap, state, plan) +} + +func testFileEquals(t *testing.T, got, want string) { + t.Helper() + + actual, err := os.ReadFile(got) + if err != nil { + t.Fatalf("error reading %s", got) + } + + expected, err := os.ReadFile(want) + if err != nil { + t.Fatalf("error reading %s", want) + } + + if diff := cmp.Diff(string(actual), string(expected)); len(diff) > 0 { + t.Fatalf("got:\n%s\nwant:\n%s\ndiff:\n%s", actual, expected, diff) + } +} + +func testReadPlan(t *testing.T, path string) *plans.Plan { + t.Helper() + + f, err := planfile.Open(path, encryption.PlanEncryptionDisabled()) + if err != nil { + t.Fatalf("error opening plan file %q: %s", path, err) + } + + p, err := f.ReadPlan() + if err != nil { + t.Fatalf("error reading plan from plan file %q: %s", path, err) + } + + return p +} + +// testState returns a test State structure that we use for a lot of tests. +func testState() *states.State { + return states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + // The weird whitespace here is reflective of how this would + // get written out in a real state file, due to the indentation + // of all of the containing wrapping objects and arrays. + AttrsJSON: []byte(`{"id":"bar"}`), + Status: states.ObjectReady, + Dependencies: []addrs.ConfigResource{}, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + // DeepCopy is used here to ensure our synthetic state matches exactly + // with a state that will have been copied during the command + // operation, and all fields have been copied correctly. + }).DeepCopy() +} + +// writeStateForTesting is a helper that writes the given naked state to the +// given writer, generating a stub *statefile.File wrapper which is then +// immediately discarded. +func writeStateForTesting(state *states.State, w io.Writer) error { + sf := &statefile.File{ + Serial: 0, + Lineage: "fake-for-testing", + State: state, + } + return statefile.Write(sf, w, encryption.StateEncryptionDisabled()) +} + +// testStateMgrCurrentLineage returns the current lineage for the given state +// manager, or the empty string if it does not use lineage. This is primarily +// for testing against the local backend, which always supports lineage. +func testStateMgrCurrentLineage(mgr statemgr.Persistent) string { + if pm, ok := mgr.(statemgr.PersistentMeta); ok { + m := pm.StateSnapshotMeta() + return m.Lineage + } + return "" +} + +// markStateForMatching is a helper that writes a specific marker value to +// a state so that it can be recognized later with getStateMatchingMarker. +// +// Internally this just sets a root module output value called "testing_mark" +// to the given string value. If the state is being checked in other ways, +// the test code may need to compensate for the addition or overwriting of this +// special output value name. +// +// The given mark string is returned verbatim, to allow the following pattern +// in tests: +// +// mark := markStateForMatching(state, "foo") +// // (do stuff to the state) +// assertStateHasMarker(state, mark) +func markStateForMatching(state *states.State, mark string) string { + state.RootModule().SetOutputValue("testing_mark", cty.StringVal(mark), false) + return mark +} + +// getStateMatchingMarker is used with markStateForMatching to retrieve the +// mark string previously added to the given state. If no such mark is present, +// the result is an empty string. +func getStateMatchingMarker(state *states.State) string { + os := state.RootModule().OutputValues["testing_mark"] + if os == nil { + return "" + } + v := os.Value + if v.Type() == cty.String && v.IsKnown() && !v.IsNull() { + return v.AsString() + } + return "" +} + +// stateHasMarker is a helper around getStateMatchingMarker that also includes +// the equality test, for more convenient use in test assertion branches. +func stateHasMarker(state *states.State, want string) bool { + return getStateMatchingMarker(state) == want +} + +// assertStateHasMarker wraps stateHasMarker to automatically generate a +// fatal test result (i.e. t.Fatal) if the marker doesn't match. +func assertStateHasMarker(t *testing.T, state *states.State, want string) { + if !stateHasMarker(state, want) { + t.Fatalf("wrong state marker\ngot: %q\nwant: %q", getStateMatchingMarker(state), want) + } +} + +func testStateFile(t *testing.T, s *states.State) string { + t.Helper() + + path := testTempFile(t) + + f, err := os.Create(path) + if err != nil { + t.Fatalf("failed to create temporary state file %s: %s", path, err) + } + defer f.Close() + + err = writeStateForTesting(s, f) + if err != nil { + t.Fatalf("failed to write state to temporary file %s: %s", path, err) + } + + return path +} + +// testStateFileDefault writes the state out to the default statefile +// in the cwd. Use `testCwd` to change into a temp cwd. +func testStateFileDefault(t *testing.T, s *states.State) { + t.Helper() + + f, err := os.Create(DefaultStateFilename) + if err != nil { + t.Fatalf("err: %s", err) + } + defer f.Close() + + if err := writeStateForTesting(s, f); err != nil { + t.Fatalf("err: %s", err) + } +} + +// testStateFileWorkspaceDefault writes the state out to the default statefile +// for the given workspace in the cwd. Use `testCwd` to change into a temp cwd. +func testStateFileWorkspaceDefault(t *testing.T, workspace string, s *states.State) string { + t.Helper() + + workspaceDir := filepath.Join(backendLocal.DefaultWorkspaceDir, workspace) + err := os.MkdirAll(workspaceDir, os.ModePerm) + if err != nil { + t.Fatalf("err: %s", err) + } + + path := filepath.Join(workspaceDir, DefaultStateFilename) + f, err := os.Create(path) + if err != nil { + t.Fatalf("err: %s", err) + } + defer f.Close() + + if err := writeStateForTesting(s, f); err != nil { + t.Fatalf("err: %s", err) + } + + return path +} + +// testStateFileRemote writes the state out to the remote statefile +// in the cwd. Use `testCwd` to change into a temp cwd. +func testStateFileRemote(t *testing.T, s *legacy.State) string { + t.Helper() + + path := filepath.Join(DefaultDataDir, DefaultStateFilename) + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + t.Fatalf("err: %s", err) + } + + f, err := os.Create(path) + if err != nil { + t.Fatalf("err: %s", err) + } + defer f.Close() + + if err := legacy.WriteState(s, f); err != nil { + t.Fatalf("err: %s", err) + } + + return path +} + +// testStateRead reads the state from a file +func testStateRead(t *testing.T, path string) *states.State { + t.Helper() + + f, err := os.Open(path) + if err != nil { + t.Fatalf("err: %s", err) + } + defer f.Close() + + sf, err := statefile.Read(f, encryption.StateEncryptionDisabled()) + if err != nil { + t.Fatalf("err: %s", err) + } + + return sf.State +} + +// testDataStateRead reads a "data state", which is a file format resembling +// our state format v3 that is used only to track current backend settings. +// +// This old format still uses *legacy.State, but should be replaced with +// a more specialized type in a later release. +func testDataStateRead(t *testing.T, path string) *legacy.State { + t.Helper() + + f, err := os.Open(path) + if err != nil { + t.Fatalf("err: %s", err) + } + defer f.Close() + + s, err := legacy.ReadState(f) + if err != nil { + t.Fatalf("err: %s", err) + } + + return s +} + +// testStateOutput tests that the state at the given path contains +// the expected state string. +func testStateOutput(t *testing.T, path string, expected string) { + t.Helper() + + newState := testStateRead(t, path) + actual := strings.TrimSpace(newState.String()) + expected = strings.TrimSpace(expected) + if actual != expected { + t.Fatalf("expected:\n%s\nactual:\n%s", expected, actual) + } +} + +func testProvider() *tofu.MockProvider { + p := new(tofu.MockProvider) + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + resp.PlannedState = req.ProposedNewState + return resp + } + + p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { + return providers.ReadResourceResponse{ + NewState: req.PriorState, + } + } + return p +} + +func testTempFile(t *testing.T) string { + t.Helper() + + return filepath.Join(testTempDir(t), "state.tfstate") +} + +func testTempDir(t *testing.T) string { + t.Helper() + d, err := filepath.EvalSymlinks(t.TempDir()) + if err != nil { + t.Fatal(err) + } + + return d +} + +// testChdir changes the directory and returns a function to defer to +// revert the old cwd. +func testChdir(t *testing.T, new string) func() { + t.Helper() + + old, err := os.Getwd() + if err != nil { + t.Fatalf("err: %s", err) + } + + if err := os.Chdir(new); err != nil { + t.Fatalf("err: %v", err) + } + + return func() { + // Re-run the function ignoring the defer result + testChdir(t, old) + } +} + +// testCwd is used to change the current working directory into a temporary +// directory. The cleanup is performed automatically after the test and all its +// subtests complete. +func testCwd(t *testing.T) string { + t.Helper() + + tmp := t.TempDir() + + cwd, err := os.Getwd() + if err != nil { + t.Fatalf("err: %v", err) + } + + if err := os.Chdir(tmp); err != nil { + t.Fatalf("err: %v", err) + } + + t.Cleanup(func() { + if err := os.Chdir(cwd); err != nil { + t.Fatalf("err: %v", err) + } + }) + + return tmp +} + +// testStdinPipe changes os.Stdin to be a pipe that sends the data from +// the reader before closing the pipe. +// +// The returned function should be deferred to properly clean up and restore +// the original stdin. +func testStdinPipe(t *testing.T, src io.Reader) func() { + t.Helper() + + r, w, err := os.Pipe() + if err != nil { + t.Fatalf("err: %s", err) + } + + // Modify stdin to point to our new pipe + old := os.Stdin + os.Stdin = r + + // Copy the data from the reader to the pipe + go func() { + defer w.Close() + io.Copy(w, src) + }() + + return func() { + // Close our read end + r.Close() + + // Reset stdin + os.Stdin = old + } +} + +// Modify os.Stdout to write to the given buffer. Note that this is generally +// not useful since the commands are configured to write to a cli.Ui, not +// Stdout directly. Commands like `console` though use the raw stdout. +func testStdoutCapture(t *testing.T, dst io.Writer) func() { + t.Helper() + + r, w, err := os.Pipe() + if err != nil { + t.Fatalf("err: %s", err) + } + + // Modify stdout + old := os.Stdout + os.Stdout = w + + // Copy + doneCh := make(chan struct{}) + go func() { + defer close(doneCh) + defer r.Close() + io.Copy(dst, r) + }() + + return func() { + // Close the writer end of the pipe + w.Sync() + w.Close() + + // Reset stdout + os.Stdout = old + + // Wait for the data copy to complete to avoid a race reading data + <-doneCh + } +} + +// testInteractiveInput configures tests so that the answers given are sent +// in order to interactive prompts. The returned function must be called +// in a defer to clean up. +func testInteractiveInput(t *testing.T, answers []string) func() { + t.Helper() + + // Disable test mode so input is called + test = false + + // Set up reader/writers + testInputResponse = answers + defaultInputReader = bytes.NewBufferString("") + defaultInputWriter = new(bytes.Buffer) + + // Return the cleanup + return func() { + test = true + testInputResponse = nil + } +} + +// testInputMap configures tests so that the given answers are returned +// for calls to Input when the right question is asked. The key is the +// question "Id" that is used. +func testInputMap(t *testing.T, answers map[string]string) func() { + t.Helper() + + // Disable test mode so input is called + test = false + + // Set up reader/writers + defaultInputReader = bytes.NewBufferString("") + defaultInputWriter = new(bytes.Buffer) + + // Setup answers + testInputResponse = nil + testInputResponseMap = answers + + // Return the cleanup + return func() { + var unusedAnswers = testInputResponseMap + + // First, clean up! + test = true + testInputResponseMap = nil + + if len(unusedAnswers) > 0 { + t.Fatalf("expected no unused answers provided to command.testInputMap, got: %v", unusedAnswers) + } + } +} + +// testBackendState is used to make a test HTTP server to test a configured +// backend. This returns the complete state that can be saved. Use +// `testStateFileRemote` to write the returned state. +// +// When using this function, the configuration fixture for the test must +// include an empty configuration block for the HTTP backend, like this: +// +// terraform { +// backend "http" { +// } +// } +// +// If such a block isn't present, or if it isn't empty, then an error will +// be returned about the backend configuration having changed and that +// "tofu init" must be run, since the test backend config cache created +// by this function contains the hash for an empty configuration. +func testBackendState(t *testing.T, s *states.State, c int) (*legacy.State, *httptest.Server) { + t.Helper() + + var b64md5 string + buf := bytes.NewBuffer(nil) + + cb := func(resp http.ResponseWriter, req *http.Request) { + if req.Method == "PUT" { + resp.WriteHeader(c) + return + } + if s == nil { + resp.WriteHeader(404) + return + } + + resp.Header().Set("Content-MD5", b64md5) + resp.Write(buf.Bytes()) + } + + // If a state was given, make sure we calculate the proper b64md5 + if s != nil { + err := statefile.Write(&statefile.File{State: s}, buf, encryption.StateEncryptionDisabled()) + if err != nil { + t.Fatalf("err: %v", err) + } + md5 := md5.Sum(buf.Bytes()) + b64md5 = base64.StdEncoding.EncodeToString(md5[:16]) + } + + srv := httptest.NewServer(http.HandlerFunc(cb)) + + backendConfig := &configs.Backend{ + Type: "http", + Config: configs.SynthBody("", map[string]cty.Value{}), + Eval: configs.NewStaticEvaluator(nil, configs.RootModuleCallForTesting()), + } + b := backendInit.Backend("http")(encryption.StateEncryptionDisabled()) + configSchema := b.ConfigSchema() + hash, _ := backendConfig.Hash(configSchema) + + state := legacy.NewState() + state.Backend = &legacy.BackendState{ + Type: "http", + ConfigRaw: json.RawMessage(fmt.Sprintf(`{"address":%q}`, srv.URL)), + Hash: uint64(hash), + } + + return state, srv +} + +// testRemoteState is used to make a test HTTP server to return a given +// state file that can be used for testing legacy remote state. +// +// The return values are a *legacy.State instance that should be written +// as the "data state" (really: backend state) and the server that the +// returned data state refers to. +func testRemoteState(t *testing.T, s *states.State, c int) (*legacy.State, *httptest.Server) { + t.Helper() + + var b64md5 string + buf := bytes.NewBuffer(nil) + + cb := func(resp http.ResponseWriter, req *http.Request) { + if req.Method == "PUT" { + resp.WriteHeader(c) + return + } + if s == nil { + resp.WriteHeader(404) + return + } + + resp.Header().Set("Content-MD5", b64md5) + resp.Write(buf.Bytes()) + } + + retState := legacy.NewState() + + srv := httptest.NewServer(http.HandlerFunc(cb)) + b := &legacy.BackendState{ + Type: "http", + } + b.SetConfig(cty.ObjectVal(map[string]cty.Value{ + "address": cty.StringVal(srv.URL), + }), &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "address": { + Type: cty.String, + Required: true, + }, + }, + }) + retState.Backend = b + + if s != nil { + err := statefile.Write(&statefile.File{State: s}, buf, encryption.StateEncryptionDisabled()) + if err != nil { + t.Fatalf("failed to write initial state: %v", err) + } + } + + return retState, srv +} + +// testlockState calls a separate process to the lock the state file at path. +// deferFunc should be called in the caller to properly unlock the file. +// Since many tests change the working directory, the sourcedir argument must be +// supplied to locate the statelocker.go source. +func testLockState(t *testing.T, sourceDir, path string) (func(), error) { + // build and run the binary ourselves so we can quickly terminate it for cleanup + buildDir := t.TempDir() + + source := filepath.Join(sourceDir, "statelocker.go") + lockBin := filepath.Join(buildDir, "statelocker") + + cmd := exec.Command("go", "build", "-o", lockBin, source) + cmd.Dir = filepath.Dir(sourceDir) + + out, err := cmd.CombinedOutput() + if err != nil { + return nil, fmt.Errorf("%w %s", err, out) + } + + locker := exec.Command(lockBin, path) + pr, pw, err := os.Pipe() + if err != nil { + return nil, err + } + defer pr.Close() + defer pw.Close() + locker.Stderr = pw + locker.Stdout = pw + + if err := locker.Start(); err != nil { + return nil, err + } + deferFunc := func() { + locker.Process.Signal(syscall.SIGTERM) + locker.Wait() + } + + // wait for the process to lock + buf := make([]byte, 1024) + n, err := pr.Read(buf) + if err != nil { + return deferFunc, fmt.Errorf("read from statelocker returned: %w", err) + } + + output := string(buf[:n]) + if !strings.HasPrefix(output, "LOCKID") { + return deferFunc, fmt.Errorf("statelocker wrote: %s", string(buf[:n])) + } + return deferFunc, nil +} + +// testCopyDir recursively copies a directory tree, attempting to preserve +// permissions. Source directory must exist, destination directory may exist +// but will be created if not; it should typically be a temporary directory, +// and thus already created using os.MkdirTemp or similar. +// Symlinks are ignored and skipped. +func testCopyDir(t *testing.T, src, dst string) { + t.Helper() + + src = filepath.Clean(src) + dst = filepath.Clean(dst) + + si, err := os.Stat(src) + if err != nil { + t.Fatal(err) + } + if !si.IsDir() { + t.Fatal("source is not a directory") + } + + _, err = os.Stat(dst) + if err != nil && !os.IsNotExist(err) { + t.Fatal(err) + } + + err = os.MkdirAll(dst, si.Mode()) + if err != nil { + t.Fatal(err) + } + + entries, err := os.ReadDir(src) + if err != nil { + return + } + + for _, entry := range entries { + srcPath := filepath.Join(src, entry.Name()) + dstPath := filepath.Join(dst, entry.Name()) + + // If the entry is a symlink, we copy the contents + for entry.Type()&os.ModeSymlink != 0 { + target, err := os.Readlink(srcPath) + if err != nil { + t.Fatal(err) + } + + fi, err := os.Stat(target) + if err != nil { + t.Fatal(err) + } + entry = fs.FileInfoToDirEntry(fi) + } + + if entry.IsDir() { + testCopyDir(t, srcPath, dstPath) + } else { + err = copy.CopyFile(srcPath, dstPath) + if err != nil { + t.Fatal(err) + } + } + } +} + +// normalizeJSON removes all insignificant whitespace from the given JSON buffer +// and returns it as a string for easier comparison. +func normalizeJSON(t *testing.T, src []byte) string { + t.Helper() + var buf bytes.Buffer + err := json.Compact(&buf, src) + if err != nil { + t.Fatalf("error normalizing JSON: %s", err) + } + return buf.String() +} + +func mustResourceAddr(s string) addrs.ConfigResource { + addr, diags := addrs.ParseAbsResourceStr(s) + if diags.HasErrors() { + panic(diags.Err()) + } + return addr.Config() +} + +// This map from provider type name to namespace is used by the fake registry +// when called via LookupLegacyProvider. Providers not in this map will return +// a 404 Not Found error. +var legacyProviderNamespaces = map[string]string{ + "foo": "hashicorp", + "bar": "hashicorp", + "baz": "terraform-providers", + "qux": "hashicorp", +} + +// This map is used to mock the provider redirect feature. +var movedProviderNamespaces = map[string]string{ + "qux": "acme", +} + +// testServices starts up a local HTTP server running a fake provider registry +// service which responds only to discovery requests and legacy provider lookup +// API calls. +// +// The final return value is a function to call at the end of a test function +// to shut down the test server. After you call that function, the discovery +// object becomes useless. +func testServices(t *testing.T) (services *disco.Disco, cleanup func()) { + server := httptest.NewServer(http.HandlerFunc(fakeRegistryHandler)) + + services = disco.New() + services.ForceHostServices(svchost.Hostname("registry.opentofu.org"), map[string]interface{}{ + "providers.v1": server.URL + "/providers/v1/", + }) + + return services, func() { + server.Close() + } +} + +// testRegistrySource is a wrapper around testServices that uses the created +// discovery object to produce a Source instance that is ready to use with the +// fake registry services. +// +// As with testServices, the final return value is a function to call at the end +// of your test in order to shut down the test server. +func testRegistrySource(t *testing.T) (source *getproviders.RegistrySource, cleanup func()) { + services, close := testServices(t) + source = getproviders.NewRegistrySource(services) + return source, close +} + +func fakeRegistryHandler(resp http.ResponseWriter, req *http.Request) { + path := req.URL.EscapedPath() + + if !strings.HasPrefix(path, "/providers/v1/") { + resp.WriteHeader(404) + resp.Write([]byte(`not a provider registry endpoint`)) + return + } + + pathParts := strings.Split(path, "/")[3:] + + if len(pathParts) != 3 { + resp.WriteHeader(404) + resp.Write([]byte(`unrecognized path scheme`)) + return + } + + if pathParts[2] != "versions" { + resp.WriteHeader(404) + resp.Write([]byte(`this registry only supports legacy namespace lookup requests`)) + return + } + + name := pathParts[1] + + // Legacy lookup + if pathParts[0] == "-" { + if namespace, ok := legacyProviderNamespaces[name]; ok { + resp.Header().Set("Content-Type", "application/json") + resp.WriteHeader(200) + if movedNamespace, ok := movedProviderNamespaces[name]; ok { + resp.Write([]byte(fmt.Sprintf(`{"id":"%s/%s","moved_to":"%s/%s","versions":[{"version":"1.0.0","protocols":["4"]}]}`, namespace, name, movedNamespace, name))) + } else { + resp.Write([]byte(fmt.Sprintf(`{"id":"%s/%s","versions":[{"version":"1.0.0","protocols":["4"]}]}`, namespace, name))) + } + } else { + resp.WriteHeader(404) + resp.Write([]byte(`provider not found`)) + } + return + } + + // Also return versions for redirect target + if namespace, ok := movedProviderNamespaces[name]; ok && pathParts[0] == namespace { + resp.Header().Set("Content-Type", "application/json") + resp.WriteHeader(200) + resp.Write([]byte(fmt.Sprintf(`{"id":"%s/%s","versions":[{"version":"1.0.0","protocols":["4"]}]}`, namespace, name))) + } else { + resp.WriteHeader(404) + resp.Write([]byte(`provider not found`)) + } +} + +func testView(t *testing.T) (*views.View, func(*testing.T) *terminal.TestOutput) { + streams, done := terminal.StreamsForTesting(t) + return views.NewView(streams), done +} + +// checkGoldenReference compares the given test output with a known "golden" output log +// located under the specified fixture path. +// +// If any of these tests fail, please communicate with Terraform Cloud folks before resolving, +// as changes to UI output may also affect the behavior of Terraform Cloud's structured run output. +func checkGoldenReference(t *testing.T, output *terminal.TestOutput, fixturePathName string) { + t.Helper() + + // Load the golden reference fixture + wantFile, err := os.Open(path.Join(testFixturePath(fixturePathName), "output.jsonlog")) + if err != nil { + t.Fatalf("failed to open output file: %s", err) + } + defer wantFile.Close() + wantBytes, err := io.ReadAll(wantFile) + if err != nil { + t.Fatalf("failed to read output file: %s", err) + } + want := string(wantBytes) + + got := output.Stdout() + + // Split the output and the reference into lines so that we can compare + // messages + got = strings.TrimSuffix(got, "\n") + gotLines := strings.Split(got, "\n") + + want = strings.TrimSuffix(want, "\n") + wantLines := strings.Split(want, "\n") + + if len(gotLines) != len(wantLines) { + t.Errorf("unexpected number of log lines: got %d, want %d\n"+ + "NOTE: This failure may indicate a UI change affecting the behavior of structured run output on TFC.\n"+ + "Please communicate with Terraform Cloud team before resolving", len(gotLines), len(wantLines)) + } + + // Verify that the log starts with a version message + type versionMessage struct { + Level string `json:"@level"` + Message string `json:"@message"` + Type string `json:"type"` + OpenTofu string `json:"tofu"` + UI string `json:"ui"` + } + var gotVersion versionMessage + if err := json.Unmarshal([]byte(gotLines[0]), &gotVersion); err != nil { + t.Errorf("failed to unmarshal version line: %s\n%s", err, gotLines[0]) + } + wantVersion := versionMessage{ + "info", + fmt.Sprintf("OpenTofu %s", version.String()), + "version", + version.String(), + views.JSON_UI_VERSION, + } + if !cmp.Equal(wantVersion, gotVersion) { + t.Errorf("unexpected first message:\n%s", cmp.Diff(wantVersion, gotVersion)) + } + + // Compare the rest of the lines against the golden reference + var gotLineMaps []map[string]interface{} + for i, line := range gotLines[1:] { + index := i + 1 + var gotMap map[string]interface{} + if err := json.Unmarshal([]byte(line), &gotMap); err != nil { + t.Errorf("failed to unmarshal got line %d: %s\n%s", index, err, gotLines[index]) + } + if _, ok := gotMap["@timestamp"]; !ok { + t.Errorf("missing @timestamp field in log: %s", gotLines[index]) + } + delete(gotMap, "@timestamp") + gotLineMaps = append(gotLineMaps, gotMap) + } + var wantLineMaps []map[string]interface{} + for i, line := range wantLines[1:] { + index := i + 1 + var wantMap map[string]interface{} + if err := json.Unmarshal([]byte(line), &wantMap); err != nil { + t.Errorf("failed to unmarshal want line %d: %s\n%s", index, err, gotLines[index]) + } + wantLineMaps = append(wantLineMaps, wantMap) + } + if diff := cmp.Diff(wantLineMaps, gotLineMaps); diff != "" { + t.Errorf("wrong output lines\n%s\n"+ + "NOTE: This failure may indicate a UI change affecting the behavior of structured run output on TFC.\n"+ + "Please communicate with Terraform Cloud team before resolving", diff) + } +} diff --git a/pkg/command/console.go b/pkg/command/console.go new file mode 100644 index 00000000000..041fd4562c7 --- /dev/null +++ b/pkg/command/console.go @@ -0,0 +1,241 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "bufio" + "fmt" + "os" + "strings" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/repl" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" + + "github.com/mitchellh/cli" +) + +// ConsoleCommand is a Command implementation that starts an interactive +// console that can be used to try expressions with the current config. +type ConsoleCommand struct { + Meta +} + +func (c *ConsoleCommand) Run(args []string) int { + args = c.Meta.process(args) + cmdFlags := c.Meta.extendedFlagSet("console") + cmdFlags.StringVar(&c.Meta.statePath, "state", DefaultStateFilename, "path") + cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } + if err := cmdFlags.Parse(args); err != nil { + c.Ui.Error(fmt.Sprintf("Error parsing command line flags: %s\n", err.Error())) + return 1 + } + + configPath, err := modulePath(cmdFlags.Args()) + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + configPath = c.Meta.normalizePath(configPath) + + // Check for user-supplied plugin path + if c.pluginPath, err = c.loadPluginPath(); err != nil { + c.Ui.Error(fmt.Sprintf("Error loading plugin path: %s", err)) + return 1 + } + + var diags tfdiags.Diagnostics + + // Load the encryption configuration + enc, encDiags := c.EncryptionFromPath(configPath) + diags = diags.Append(encDiags) + if encDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + backendConfig, backendDiags := c.loadBackendConfig(configPath) + diags = diags.Append(backendDiags) + if diags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // Load the backend + b, backendDiags := c.Backend(&BackendOpts{ + Config: backendConfig, + }, enc.State()) + diags = diags.Append(backendDiags) + if backendDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // We require a local backend + local, ok := b.(backend.Local) + if !ok { + c.showDiagnostics(diags) // in case of any warnings in here + c.Ui.Error(ErrUnsupportedLocalOp) + return 1 + } + + // This is a read-only command + c.ignoreRemoteVersionConflict(b) + + // Build the operation + opReq := c.Operation(b, arguments.ViewHuman, enc) + opReq.ConfigDir = configPath + opReq.ConfigLoader, err = c.initConfigLoader() + opReq.AllowUnsetVariables = true // we'll just evaluate them as unknown + if err != nil { + diags = diags.Append(err) + c.showDiagnostics(diags) + return 1 + } + + { + // Setup required variables/call for operation (usually done in Meta.RunOperation) + var moreDiags, callDiags tfdiags.Diagnostics + opReq.Variables, moreDiags = c.collectVariableValues() + opReq.RootCall, callDiags = c.rootModuleCall(opReq.ConfigDir) + diags = diags.Append(moreDiags).Append(callDiags) + if moreDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + } + + // Get the context + lr, _, ctxDiags := local.LocalRun(opReq) + diags = diags.Append(ctxDiags) + if ctxDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // Successfully creating the context can result in a lock, so ensure we release it + defer func() { + diags := opReq.StateLocker.Unlock() + if diags.HasErrors() { + c.showDiagnostics(diags) + } + }() + + // Set up the UI so we can output directly to stdout + ui := &cli.BasicUi{ + Writer: os.Stdout, + ErrorWriter: os.Stderr, + } + + evalOpts := &tofu.EvalOpts{} + if lr.PlanOpts != nil { + // the LocalRun type is built primarily to support the main operations, + // so the variable values end up in the "PlanOpts" even though we're + // not actually making a plan. + evalOpts.SetVariables = lr.PlanOpts.SetVariables + } + + // Before we can evaluate expressions, we must compute and populate any + // derived values (input variables, local values, output values) + // that are not stored in the persistent state. + scope, scopeDiags := lr.Core.Eval(lr.Config, lr.InputState, addrs.RootModuleInstance, evalOpts) + diags = diags.Append(scopeDiags) + if scope == nil { + // scope is nil if there are errors so bad that we can't even build a scope. + // Otherwise, we'll try to eval anyway. + c.showDiagnostics(diags) + return 1 + } + + // set the ConsoleMode to true so any available console-only functions included. + scope.ConsoleMode = true + + if diags.HasErrors() { + diags = diags.Append(tfdiags.SimpleWarning("Due to the problems above, some expressions may produce unexpected results.")) + } + + // Before we become interactive we'll show any diagnostics we encountered + // during initialization, and then afterwards the driver will manage any + // further diagnostics itself. + c.showDiagnostics(diags) + + // IO Loop + session := &repl.Session{ + Scope: scope, + } + + // Determine if stdin is a pipe. If so, we evaluate directly. + if c.StdinPiped() { + return c.modePiped(session, ui) + } + + return c.modeInteractive(session, ui) +} + +func (c *ConsoleCommand) modePiped(session *repl.Session, ui cli.Ui) int { + scanner := bufio.NewScanner(os.Stdin) + + var consoleState consoleBracketState + + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + + // we check if there is no escaped new line at the end, or any open brackets + // if we have neither, then we can execute + fullCommand, bracketState := consoleState.UpdateState(line) + if bracketState <= 0 { + result, exit, diags := session.Handle(fullCommand) + if diags.HasErrors() { + // We're in piped mode, so we'll exit immediately on error. + c.showDiagnostics(diags) + return 1 + } + if exit { + return 0 + } + // Output the result + ui.Output(result) + } + } + + return 0 +} + +func (c *ConsoleCommand) Help() string { + helpText := ` +Usage: tofu [global options] console [options] + + Starts an interactive console for experimenting with OpenTofu + interpolations. + + This will open an interactive console that you can use to type + interpolations into and inspect their values. This command loads the + current state. This lets you explore and test interpolations before + using them in future configurations. + + This command will never modify your state. + +Options: + + -state=path Legacy option for the local backend only. See the local + backend's documentation for more information. + + -var 'foo=bar' Set a variable in the OpenTofu configuration. This + flag can be set multiple times. + + -var-file=foo Set variables in the OpenTofu configuration from + a file. If "terraform.tfvars" or any ".auto.tfvars" + files are present, they will be automatically loaded. +` + return strings.TrimSpace(helpText) +} + +func (c *ConsoleCommand) Synopsis() string { + return "Try OpenTofu expressions at an interactive command prompt" +} diff --git a/pkg/command/console_interactive.go b/pkg/command/console_interactive.go new file mode 100644 index 00000000000..3018441a9ab --- /dev/null +++ b/pkg/command/console_interactive.go @@ -0,0 +1,83 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "errors" + "fmt" + "io" + "os" + "strings" + + "github.com/kubegems/opentofu/pkg/repl" + + "github.com/chzyer/readline" + "github.com/mitchellh/cli" +) + +func (c *ConsoleCommand) modeInteractive(session *repl.Session, ui cli.Ui) int { + // Configure input + l, err := readline.NewEx(&readline.Config{ + Prompt: "> ", + InterruptPrompt: "^C", + EOFPrompt: "exit", + HistorySearchFold: true, + Stdin: os.Stdin, + Stdout: os.Stdout, + Stderr: os.Stderr, + }) + if err != nil { + c.Ui.Error(fmt.Sprintf( + "Error initializing console: %s", + err)) + return 1 + } + defer l.Close() + + var consoleState consoleBracketState + + for { + // Read a line + line, err := l.Readline() + if errors.Is(err, readline.ErrInterrupt) { + if len(line) == 0 { + break + } else { + continue + } + } else if errors.Is(err, io.EOF) { + break + } + line = strings.TrimSpace(line) + + // we update the state with the new line, so if we have open + // brackets we know not to execute the command just yet + fullCommand, openState := consoleState.UpdateState(line) + + switch { + case openState > 0: + // here there are open brackets somewhere, so we don't execute it + // as we are in a bracket we update the prompt. we use one . per layer pf brackets + l.SetPrompt(fmt.Sprintf("%s ", strings.Repeat(".", openState))) + default: + out, exit, diags := session.Handle(fullCommand) + if diags.HasErrors() { + c.showDiagnostics(diags) + } + if exit { + break + } + + // clear the state and buffer as we have executed a command + // we also reset the prompt + l.SetPrompt("> ") + + ui.Output(out) + } + } + + return 0 +} diff --git a/pkg/command/console_interactive_test.go b/pkg/command/console_interactive_test.go new file mode 100644 index 00000000000..5d5122c05b6 --- /dev/null +++ b/pkg/command/console_interactive_test.go @@ -0,0 +1,121 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "bytes" + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/mitchellh/cli" + "github.com/zclconf/go-cty/cty" +) + +func TestConsole_multiline_interactive(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("console-multiline-vars"), td) + defer testChdir(t, td)() + + p := testProvider() + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "value": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + ui := cli.NewMockUi() + view, _ := testView(t) + c := &ConsoleCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + + type testCase struct { + input string + expected string + } + + tests := map[string]testCase{ + "single_line": { + input: `var.counts.lalala`, + expected: "1\n", + }, + "basic_multi_line": { + input: ` + var.counts.lalala + var.counts.lololo`, + expected: "\n1\n2\n", + }, + "backets_multi_line": { + input: ` + var.counts.lalala + split( + "_", + "lalala_lolol_lelelele" + )`, + expected: "\n1\ntolist([\n \"lalala\",\n \"lolol\",\n \"lelelele\",\n])\n", + }, + "baces_multi_line": { + input: ` + { + for key, value in var.counts : key => value + if value == 1 + }`, + expected: "\n{\n \"lalala\" = 1\n}\n", + }, + "escaped_new_line": { + input: ` + 5 + 4 \ + + `, + expected: "\n9\n\n", + }, + "heredoc": { + input: ` + { + default = <<-EOT + lulululu + EOT + }`, + expected: "\n{\n \"default\" = <<-EOT\n lulululu\n \n EOT\n}\n", + }, + "quoted_braces": { + input: "{\ndefault = format(\"%s%s%s\",\"{\",var.counts.lalala,\"}\")\n}", + expected: "{\n \"default\" = \"{1}\"\n}\n", + }, + } + + for testName, tc := range tests { + t.Run(testName, func(t *testing.T) { + var output bytes.Buffer + defer testStdinPipe(t, strings.NewReader(tc.input))() + outCloser := testStdoutCapture(t, &output) + + args := []string{} + code := c.Run(args) + outCloser() + + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + got := output.String() + if got != tc.expected { + t.Fatalf("unexpected output. For input: %s\ngot: %q\nexpected: %q", tc.input, got, tc.expected) + } + }) + } +} diff --git a/pkg/command/console_state.go b/pkg/command/console_state.go new file mode 100644 index 00000000000..8da8a2e0962 --- /dev/null +++ b/pkg/command/console_state.go @@ -0,0 +1,101 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" +) + +type consoleBracketState struct { + openNewLine int + brace int + bracket int + parentheses int + buffer []string +} + +// commandInOpenState return an int to inform if brackets are open +// or if any escaped new lines +// in the console and we should hold off on processing the commands +// it returns 3 states: +// -1 is returned the is an incorrect amount of brackets. +// for example "())" has too many close brackets +// 0 is returned if the brackets are closed. +// for examples "()" or "" would be in a close bracket state +// >=1 is returned for the amount of open brackets. +// for example "({" would return 2. "({}" would return 1 +func (c *consoleBracketState) commandInOpenState() int { + switch { + case c.brace < 0: + fallthrough + case c.bracket < 0: + fallthrough + case c.parentheses < 0: + return -1 + } + + // we calculate open brackets, braces and parentheses by the diff between each count + var total int + total += c.openNewLine + total += c.brace + total += c.bracket + total += c.parentheses + return total +} + +// UpdateState updates the state of the console with the latest line data +func (c *consoleBracketState) UpdateState(line string) (string, int) { + defer c.checkStateAndClearBuffer() + // as new lines are a kind of "one off" we reset each update + c.openNewLine = 0 + + // escaped new lines are treated as a "one off" bracket + // the four \\\\ means we have a false positive for a new line, as it's just an escaped \.. + if strings.HasSuffix(line, "\\") && !strings.HasSuffix(line, "\\\\") { + c.openNewLine++ + } + + line = strings.TrimSuffix(line, "\\") + if len(line) == 0 { + // we can skip empty lines + return c.getCommand(), c.commandInOpenState() + } + c.buffer = append(c.buffer, line) + + tokens, _ := hclsyntax.LexConfig([]byte(line), "", hcl.Pos{Line: 1, Column: 1}) + for _, token := range tokens { + switch token.Type { //nolint:exhaustive // we only care about these specific types + case hclsyntax.TokenOBrace: + c.brace++ + case hclsyntax.TokenCBrace: + c.brace-- + case hclsyntax.TokenOBrack: + c.bracket++ + case hclsyntax.TokenCBrack: + c.bracket-- + case hclsyntax.TokenOParen: + c.parentheses++ + case hclsyntax.TokenCParen: + c.parentheses-- + } + } + return c.getCommand(), c.commandInOpenState() +} + +// getCommand joins the buffer and returns it +func (c *consoleBracketState) getCommand() string { + output := strings.Join(c.buffer, "\n") + return output +} + +func (c *consoleBracketState) checkStateAndClearBuffer() { + if c.commandInOpenState() <= 0 { + c.buffer = []string{} + } +} diff --git a/pkg/command/console_state_test.go b/pkg/command/console_state_test.go new file mode 100644 index 00000000000..2b911619acb --- /dev/null +++ b/pkg/command/console_state_test.go @@ -0,0 +1,226 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import "testing" + +func Test_commandInOpenState(t *testing.T) { + type testCase struct { + input string + expected int + } + + tests := map[string]testCase{ + "plain braces": { + input: "{}", + expected: 0, + }, + "plain brackets": { + input: "[]", + expected: 0, + }, + "plain parentheses": { + input: "()", + expected: 0, + }, + "open braces": { + input: "{", + expected: 1, + }, + "open brackets": { + input: "[", + expected: 1, + }, + "open parentheses": { + input: "(", + expected: 1, + }, + "two open braces": { + input: "{{", + expected: 2, + }, + "two open brackets": { + input: "[[", + expected: 2, + }, + "two open parentheses": { + input: "((", + expected: 2, + }, + "open and closed braces": { + input: "{{}", + expected: 1, + }, + "open and closed brackets": { + input: "[[]", + expected: 1, + }, + "open and closed parentheses": { + input: "(()", + expected: 1, + }, + "mix braces and brackets": { + input: "{[]", + expected: 1, + }, + "mix brackets and parentheses": { + input: "[()", + expected: 1, + }, + "mix parentheses and braces": { + input: "({}", + expected: 1, + }, + "invalid braces": { + input: "{}}", + expected: -1, + }, + "invalid brackets": { + input: "[]]", + expected: -1, + }, + "invalid parentheses": { + input: "())", + expected: -1, + }, + "escaped new line": { + input: "\\", + expected: 1, + }, + "false positive new line": { + input: "\\\\", + expected: 0, + }, + "mix parentheses and new line": { + input: "(\\", + expected: 2, + }, + } + + for testName, tc := range tests { + t.Run(testName, func(t *testing.T) { + state := consoleBracketState{} + _, actual := state.UpdateState(tc.input) + if actual != tc.expected { + t.Fatalf("Actual: %d, expected %d", actual, tc.expected) + } + }) + } +} + +func Test_UpdateState(t *testing.T) { + type testCase struct { + inputs []string + expected int + } + + tests := map[string]testCase{ + "plain braces": { + inputs: []string{"{", "}"}, + expected: 0, + }, + "open brackets": { + inputs: []string{"[", "[", "]"}, + expected: 1, + }, + "invalid parenthesis": { + inputs: []string{"(", ")", ")"}, + expected: -1, + }, + "a fake brace": { + inputs: []string{"{", "\"}\"", "}"}, + expected: 0, + }, + "a mixed bag": { + inputs: []string{"{", "}", "[", "...", "()", "]"}, + expected: 0, + }, + "multiple open": { + inputs: []string{"{", "[", "("}, + expected: 3, + }, + "escaped new line": { + inputs: []string{"\\"}, + expected: 1, + }, + "false positive new line": { + inputs: []string{"\\\\"}, + expected: 0, + }, + } + + for testName, tc := range tests { + t.Run(testName, func(t *testing.T) { + actual := 0 + state := consoleBracketState{} + for _, input := range tc.inputs { + _, actual = state.UpdateState(input) + } + + if actual != tc.expected { + t.Fatalf("Actual: %d, expected %d", actual, tc.expected) + } + }) + } +} + +func Test_GetFullCommand(t *testing.T) { + type testCase struct { + inputs []string + expected []string + } + + tests := map[string]testCase{ + "plain braces": { + inputs: []string{"{", "}"}, + expected: []string{"{", "{\n}"}, + }, + "open brackets": { + inputs: []string{"[", "[", "]"}, + expected: []string{"[", "[\n[", "[\n[\n]"}, + }, + "invalid parenthesis": { + inputs: []string{"(", ")", ")"}, + expected: []string{"(", "(\n)", ")"}, + }, + "a fake brace": { + inputs: []string{"{", "\"}\"", "}"}, + expected: []string{"{", "{\n\"}\"", "{\n\"}\"\n}"}, + }, + "a mixed bag": { + inputs: []string{"{", "}", "[", "...", "", "()", "]"}, + expected: []string{"{", "{\n}", "[", "[\n...", "[\n...", "[\n...\n()", "[\n...\n()\n]"}, + }, + "multiple open": { + inputs: []string{"{", "[", "("}, + expected: []string{"{", "{\n[", "{\n[\n("}, + }, + "escaped new line": { + inputs: []string{"\\"}, + expected: []string{""}, + }, + "false positive new line": { + inputs: []string{"\\\\"}, + expected: []string{"\\"}, + }, + } + + for testName, tc := range tests { + t.Run(testName, func(t *testing.T) { + state := consoleBracketState{} + if len(tc.inputs) != len(tc.expected) { + t.Fatalf("\nthe length of inputs: %d\n and expected: %d don't match", len(tc.inputs), len(tc.expected)) + } + + for i, input := range tc.inputs { + actual, _ := state.UpdateState(input) + if actual != tc.expected[i] { + t.Fatalf("\nActual: %q\nexpected: %q", actual, tc.expected[i]) + } + } + }) + } +} diff --git a/pkg/command/console_test.go b/pkg/command/console_test.go new file mode 100644 index 00000000000..822fffad5ee --- /dev/null +++ b/pkg/command/console_test.go @@ -0,0 +1,352 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "bytes" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/terminal" + "github.com/mitchellh/cli" + "github.com/zclconf/go-cty/cty" +) + +// ConsoleCommand is tested primarily with tests in the "repl" package. +// It is not tested here because the Console uses a readline-like library +// that takes over stdin/stdout. It is difficult to test directly. The +// core logic is tested in "repl" +// +// This file still contains some tests using the stdin-based input. + +func TestConsole_basic(t *testing.T) { + testCwd(t) + + p := testProvider() + ui := cli.NewMockUi() + view, _ := testView(t) + c := &ConsoleCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + + var output bytes.Buffer + defer testStdinPipe(t, strings.NewReader("1+5\n"))() + outCloser := testStdoutCapture(t, &output) + + args := []string{} + code := c.Run(args) + outCloser() + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + actual := output.String() + if actual != "6\n" { + t.Fatalf("bad: %q", actual) + } +} + +func TestConsole_tfvars(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("apply-vars"), td) + defer testChdir(t, td)() + + // Write a terraform.tvars + varFilePath := filepath.Join(td, "terraform.tfvars") + if err := os.WriteFile(varFilePath, []byte(applyVarFile), 0644); err != nil { + t.Fatalf("err: %s", err) + } + + p := testProvider() + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "value": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + ui := cli.NewMockUi() + view, _ := testView(t) + c := &ConsoleCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + + var output bytes.Buffer + defer testStdinPipe(t, strings.NewReader("var.foo\n"))() + outCloser := testStdoutCapture(t, &output) + + args := []string{} + code := c.Run(args) + outCloser() + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + actual := output.String() + if actual != "\"bar\"\n" { + t.Fatalf("bad: %q", actual) + } +} + +func TestConsole_unsetRequiredVars(t *testing.T) { + // This test is verifying that it's possible to run "tofu console" + // without providing values for all required variables, without + // "tofu console" producing an interactive prompt for those variables + // or producing errors. Instead, it should allow evaluation in that + // partial context but see the unset variables values as being unknown. + // + // This test fixture includes variable "foo" {}, which we are + // intentionally not setting here. + td := t.TempDir() + testCopyDir(t, testFixturePath("apply-vars"), td) + defer testChdir(t, td)() + + p := testProvider() + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "value": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + ui := cli.NewMockUi() + view, _ := testView(t) + c := &ConsoleCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + + var output bytes.Buffer + defer testStdinPipe(t, strings.NewReader("var.foo\n"))() + outCloser := testStdoutCapture(t, &output) + + args := []string{} + code := c.Run(args) + outCloser() + + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + if got, want := output.String(), "(known after apply)\n"; got != want { + t.Fatalf("unexpected output\n got: %q\nwant: %q", got, want) + } +} + +func TestConsole_variables(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("variables"), td) + defer testChdir(t, td)() + + p := testProvider() + ui := cli.NewMockUi() + view, _ := testView(t) + c := &ConsoleCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + + commands := map[string]string{ + "var.foo\n": "\"bar\"\n", + "var.snack\n": "\"popcorn\"\n", + "var.secret_snack\n": "(sensitive value)\n", + "local.snack_bar\n": "[\n \"popcorn\",\n (sensitive value),\n]\n", + } + + args := []string{} + + for cmd, val := range commands { + var output bytes.Buffer + defer testStdinPipe(t, strings.NewReader(cmd))() + outCloser := testStdoutCapture(t, &output) + code := c.Run(args) + outCloser() + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + actual := output.String() + if output.String() != val { + t.Fatalf("bad: %q, expected %q", actual, val) + } + } +} + +func TestConsole_modules(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("modules"), td) + defer testChdir(t, td)() + + p := applyFixtureProvider() + ui := cli.NewMockUi() + view, _ := testView(t) + + c := &ConsoleCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + + commands := map[string]string{ + "module.child.myoutput\n": "\"bar\"\n", + "module.count_child[0].myoutput\n": "\"bar\"\n", + "local.foo\n": "3\n", + } + + args := []string{} + + for cmd, val := range commands { + var output bytes.Buffer + defer testStdinPipe(t, strings.NewReader(cmd))() + outCloser := testStdoutCapture(t, &output) + code := c.Run(args) + outCloser() + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + actual := output.String() + if output.String() != val { + t.Fatalf("bad: %q, expected %q", actual, val) + } + } +} + +func TestConsole_multiline_pipe(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("console-multiline-vars"), td) + defer testChdir(t, td)() + + p := testProvider() + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "value": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + + type testCase struct { + input string + expected string + } + + tests := map[string]testCase{ + "single_line": { + input: `var.counts.lalala`, + expected: "1\n", + }, + "basic_multi_line": { + input: ` + var.counts.lalala + var.counts.lololo`, + expected: "\n1\n2\n", + }, + "backets_multi_line": { + input: ` + var.counts.lalala + split( + "_", + "lalala_lolol_lelelele" + )`, + expected: "\n1\ntolist([\n \"lalala\",\n \"lolol\",\n \"lelelele\",\n])\n", + }, + "baces_multi_line": { + input: ` + { + for key, value in var.counts : key => value + if value == 1 + }`, + expected: "\n{\n \"lalala\" = 1\n}\n", + }, + "escaped_new_line": { + input: ` + 5 + 4 \ + + `, + expected: "\n9\n\n", + }, + "heredoc": { + input: ` + { + default = <<-EOT + lulululu + EOT + }`, + expected: "\n{\n \"default\" = <<-EOT\n lulululu\n \n EOT\n}\n", + }, + "quoted_braces": { + input: "{\ndefault = format(\"%s%s%s\",\"{\",var.counts.lalala,\"}\")\n}", + expected: "{\n \"default\" = \"{1}\"\n}\n", + }, + } + + for testName, tc := range tests { + t.Run(testName, func(t *testing.T) { + streams, _ := terminal.StreamsForTesting(t) + + ui := cli.NewMockUi() + view, _ := testView(t) + c := &ConsoleCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + Streams: streams, + }, + } + + var output bytes.Buffer + defer testStdinPipe(t, strings.NewReader(tc.input))() + outCloser := testStdoutCapture(t, &output) + + args := []string{} + code := c.Run(args) + outCloser() + + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + got := output.String() + if got != tc.expected { + t.Fatalf("unexpected output\ngot: %q\nexpected: %q", got, tc.expected) + } + }) + } +} diff --git a/pkg/command/e2etest/.gitignore b/pkg/command/e2etest/.gitignore new file mode 100644 index 00000000000..a007feab071 --- /dev/null +++ b/pkg/command/e2etest/.gitignore @@ -0,0 +1 @@ +build/* diff --git a/pkg/command/e2etest/automation_test.go b/pkg/command/e2etest/automation_test.go new file mode 100644 index 00000000000..074d2299b35 --- /dev/null +++ b/pkg/command/e2etest/automation_test.go @@ -0,0 +1,245 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package e2etest + +import ( + "path/filepath" + "reflect" + "sort" + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/e2e" + "github.com/kubegems/opentofu/pkg/plans" +) + +// TestPlanApplyInAutomation runs through the "main case" of init, plan, apply +// using the specific command line options suggested in the guide. +func TestPlanApplyInAutomation(t *testing.T) { + t.Parallel() + + // This test reaches out to registry.opentofu.org to download the + // template and null providers, so it can only run if network access is + // allowed. + skipIfCannotAccessNetwork(t) + + fixturePath := filepath.Join("testdata", "full-workflow-null") + tf := e2e.NewBinary(t, tofuBin, fixturePath) + + // We advertise that _any_ non-empty value works, so we'll test something + // unconventional here. + tf.AddEnv("TF_IN_AUTOMATION=yes-please") + + //// INIT + stdout, stderr, err := tf.Run("init", "-input=false") + if err != nil { + t.Fatalf("unexpected init error: %s\nstderr:\n%s", err, stderr) + } + + // Make sure we actually downloaded the plugins, rather than picking up + // copies that might be already installed globally on the system. + if !strings.Contains(stdout, "Installing hashicorp/template v") { + t.Errorf("template provider download message is missing from init output:\n%s", stdout) + t.Logf("(this can happen if you have a copy of the plugin in one of the global plugin search dirs)") + } + if !strings.Contains(stdout, "Installing hashicorp/null v") { + t.Errorf("null provider download message is missing from init output:\n%s", stdout) + t.Logf("(this can happen if you have a copy of the plugin in one of the global plugin search dirs)") + } + + //// PLAN + stdout, stderr, err = tf.Run("plan", "-out=tfplan", "-input=false") + if err != nil { + t.Fatalf("unexpected plan error: %s\nstderr:\n%s", err, stderr) + } + + if !strings.Contains(stdout, "1 to add, 0 to change, 0 to destroy") { + t.Errorf("incorrect plan tally; want 1 to add:\n%s", stdout) + } + + // Because we're running with TF_IN_AUTOMATION set, we should not see + // any mention of the plan file in the output. + if strings.Contains(stdout, "tfplan") { + t.Errorf("unwanted mention of \"tfplan\" file in plan output\n%s", stdout) + } + + plan, err := tf.Plan("tfplan") + if err != nil { + t.Fatalf("failed to read plan file: %s", err) + } + + // stateResources := plan.Changes.Resources + diffResources := plan.Changes.Resources + if len(diffResources) != 1 { + t.Errorf("incorrect number of resources in plan") + } + + expected := map[string]plans.Action{ + "null_resource.test": plans.Create, + } + + for _, r := range diffResources { + expectedAction, ok := expected[r.Addr.String()] + if !ok { + t.Fatalf("unexpected change for %q", r.Addr) + } + if r.Action != expectedAction { + t.Fatalf("unexpected action %q for %q", r.Action, r.Addr) + } + } + + //// APPLY + stdout, stderr, err = tf.Run("apply", "-input=false", "tfplan") + if err != nil { + t.Fatalf("unexpected apply error: %s\nstderr:\n%s", err, stderr) + } + + if !strings.Contains(stdout, "Resources: 1 added, 0 changed, 0 destroyed") { + t.Errorf("incorrect apply tally; want 1 added:\n%s", stdout) + } + + state, err := tf.LocalState() + if err != nil { + t.Fatalf("failed to read state file: %s", err) + } + + stateResources := state.RootModule().Resources + var gotResources []string + for n := range stateResources { + gotResources = append(gotResources, n) + } + sort.Strings(gotResources) + + wantResources := []string{ + "data.template_file.test", + "null_resource.test", + } + + if !reflect.DeepEqual(gotResources, wantResources) { + t.Errorf("wrong resources in state\ngot: %#v\nwant: %#v", gotResources, wantResources) + } +} + +// TestAutoApplyInAutomation tests the scenario where the caller skips creating +// an explicit plan and instead forces automatic application of changes. +func TestAutoApplyInAutomation(t *testing.T) { + t.Parallel() + + // This test reaches out to registry.opentofu.org to download the + // template and null providers, so it can only run if network access is + // allowed. + skipIfCannotAccessNetwork(t) + + fixturePath := filepath.Join("testdata", "full-workflow-null") + tf := e2e.NewBinary(t, tofuBin, fixturePath) + + // We advertise that _any_ non-empty value works, so we'll test something + // unconventional here. + tf.AddEnv("TF_IN_AUTOMATION=very-much-so") + + //// INIT + stdout, stderr, err := tf.Run("init", "-input=false") + if err != nil { + t.Fatalf("unexpected init error: %s\nstderr:\n%s", err, stderr) + } + + // Make sure we actually downloaded the plugins, rather than picking up + // copies that might be already installed globally on the system. + if !strings.Contains(stdout, "Installing hashicorp/template v") { + t.Errorf("template provider download message is missing from init output:\n%s", stdout) + t.Logf("(this can happen if you have a copy of the plugin in one of the global plugin search dirs)") + } + if !strings.Contains(stdout, "Installing hashicorp/null v") { + t.Errorf("null provider download message is missing from init output:\n%s", stdout) + t.Logf("(this can happen if you have a copy of the plugin in one of the global plugin search dirs)") + } + + //// APPLY + stdout, stderr, err = tf.Run("apply", "-input=false", "-auto-approve") + if err != nil { + t.Fatalf("unexpected apply error: %s\nstderr:\n%s", err, stderr) + } + + if !strings.Contains(stdout, "Resources: 1 added, 0 changed, 0 destroyed") { + t.Errorf("incorrect apply tally; want 1 added:\n%s", stdout) + } + + state, err := tf.LocalState() + if err != nil { + t.Fatalf("failed to read state file: %s", err) + } + + stateResources := state.RootModule().Resources + var gotResources []string + for n := range stateResources { + gotResources = append(gotResources, n) + } + sort.Strings(gotResources) + + wantResources := []string{ + "data.template_file.test", + "null_resource.test", + } + + if !reflect.DeepEqual(gotResources, wantResources) { + t.Errorf("wrong resources in state\ngot: %#v\nwant: %#v", gotResources, wantResources) + } +} + +// TestPlanOnlyInAutomation tests the scenario of creating a "throwaway" plan, +// which we recommend as a way to verify a pull request. +func TestPlanOnlyInAutomation(t *testing.T) { + t.Parallel() + + // This test reaches out to registry.opentofu.org to download the + // template and null providers, so it can only run if network access is + // allowed. + skipIfCannotAccessNetwork(t) + + fixturePath := filepath.Join("testdata", "full-workflow-null") + tf := e2e.NewBinary(t, tofuBin, fixturePath) + + // We advertise that _any_ non-empty value works, so we'll test something + // unconventional here. + tf.AddEnv("TF_IN_AUTOMATION=verily") + + //// INIT + stdout, stderr, err := tf.Run("init", "-input=false") + if err != nil { + t.Fatalf("unexpected init error: %s\nstderr:\n%s", err, stderr) + } + + // Make sure we actually downloaded the plugins, rather than picking up + // copies that might be already installed globally on the system. + if !strings.Contains(stdout, "Installing hashicorp/template v") { + t.Errorf("template provider download message is missing from init output:\n%s", stdout) + t.Logf("(this can happen if you have a copy of the plugin in one of the global plugin search dirs)") + } + if !strings.Contains(stdout, "Installing hashicorp/null v") { + t.Errorf("null provider download message is missing from init output:\n%s", stdout) + t.Logf("(this can happen if you have a copy of the plugin in one of the global plugin search dirs)") + } + + //// PLAN + stdout, stderr, err = tf.Run("plan", "-input=false") + if err != nil { + t.Fatalf("unexpected plan error: %s\nstderr:\n%s", err, stderr) + } + + if !strings.Contains(stdout, "1 to add, 0 to change, 0 to destroy") { + t.Errorf("incorrect plan tally; want 1 to add:\n%s", stdout) + } + + // Because we're running with TF_IN_AUTOMATION set, we should not see + // any mention of the "tofu apply" command in the output. + if strings.Contains(stdout, "tofu apply") { + t.Errorf("unwanted mention of \"tofu apply\" in plan output\n%s", stdout) + } + + if tf.FileExists("tfplan") { + t.Error("plan file was created, but was not expected") + } +} diff --git a/pkg/command/e2etest/doc.go b/pkg/command/e2etest/doc.go new file mode 100644 index 00000000000..d179c56946e --- /dev/null +++ b/pkg/command/e2etest/doc.go @@ -0,0 +1,34 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package e2etest contains a set of tests that run against a real OpenTofu +// binary, compiled on the fly at the start of the test run. +// +// These tests help ensure that key end-to-end OpenTofu use-cases are working +// for a real binary, whereas other tests always have at least _some_ amount +// of test stubbing. +// +// The goal of this package is not to duplicate the functional testing done +// in other packages but rather to fully exercise a few important workflows +// in a realistic way. +// +// These tests can be used in two ways. The simplest way is to just run them +// with "go test" as normal: +// +// go test -v github.com/kubegems/opentofu/pkg/command/e2etest +// +// This will compile on the fly a OpenTofu binary and run the tests against +// it. +// +// Alternatively, the make-archive.sh script can be used to produce a +// self-contained zip file that can be shipped to another machine to run +// the tests there without needing a locally-installed Go compiler. This +// is primarily useful for testing cross-compiled builds during our release +// process. For more information, see the commentary in make-archive.sh. +// +// The TF_ACC environment variable must be set for the tests to reach out +// to external network services. Since these are end-to-end tests, only a +// few very basic tests can execute without this environment variable set. +package e2etest diff --git a/pkg/command/e2etest/encryption_test.go b/pkg/command/e2etest/encryption_test.go new file mode 100644 index 00000000000..7d3ec61a4af --- /dev/null +++ b/pkg/command/e2etest/encryption_test.go @@ -0,0 +1,239 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package e2etest + +import ( + "fmt" + "os" + "path/filepath" + "runtime/debug" + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/e2e" +) + +type tofuResult struct { + t *testing.T + + stdout string + stderr string + err error +} + +func (r tofuResult) Success() tofuResult { + if r.stderr != "" { + debug.PrintStack() + r.t.Fatalf("unexpected stderr output:\n%s", r.stderr) + } + if r.err != nil { + debug.PrintStack() + r.t.Fatalf("unexpected error: %s", r.err) + } + + return r +} + +func (r tofuResult) Failure() tofuResult { + if r.err == nil { + debug.PrintStack() + r.t.Fatal("expected error") + } + return r +} + +func (r tofuResult) StderrContains(msg string) tofuResult { + if !strings.Contains(r.stderr, msg) { + debug.PrintStack() + r.t.Fatalf("expected stderr output %q:\n%s", msg, r.stderr) + } + return r +} + +func (r tofuResult) Contains(msg string) tofuResult { + if !strings.Contains(r.stdout, msg) { + debug.PrintStack() + r.t.Fatalf("expected output %q:\n%s", msg, r.stdout) + } + return r +} + +// This test covers the scenario where a user migrates an existing project +// to having encryption enabled, uses it, then migrates back to encryption +// disabled +func TestEncryptionFlow(t *testing.T) { + + // This test reaches out to registry.opentofu.org to download the + // mock provider, so it can only run if network access is allowed + skipIfCannotAccessNetwork(t) + + // There is a lot of setup / helpers defined. Actual test logic is below. + + fixturePath := filepath.Join("testdata", "encryption-flow") + tf := e2e.NewBinary(t, tofuBin, fixturePath) + + // tofu init + _, stderr, err := tf.Run("init") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + if stderr != "" { + t.Errorf("unexpected stderr output:\n%s", stderr) + } + + iter := 0 + + run := func(args ...string) tofuResult { + stdout, stderr, err := tf.Run(args...) + return tofuResult{t, stdout, stderr, err} + } + apply := func() tofuResult { + iter += 1 + return run("apply", fmt.Sprintf("-var=iter=%v", iter), "-auto-approve") + } + + createPlan := func(planfile string) tofuResult { + iter += 1 + return run("plan", fmt.Sprintf("-var=iter=%v", iter), "-out="+planfile) + } + applyPlan := func(planfile string) tofuResult { + return run("apply", "-auto-approve", planfile) + } + + requireUnencryptedState := func() { + _, err = tf.LocalState() + if err != nil { + t.Fatalf("expected unencrypted state file: %q", err) + } + } + requireEncryptedState := func() { + _, err = tf.LocalState() + if err == nil || err.Error() != "Error reading statefile: Unsupported state file format: This state file is encrypted and can not be read without an encryption configuration" { + t.Fatalf("expected encrypted state file: %q", err) + } + } + + with := func(path string, fn func()) { + src := tf.Path(path + ".disabled") + dst := tf.Path(path) + + err := os.Rename(src, dst) + if err != nil { + t.Fatalf(err.Error()) + } + + fn() + + err = os.Rename(dst, src) + if err != nil { + t.Fatalf(err.Error()) + } + } + + // Actual test begins HERE + // NOTE: state plans are still readable and tests the encryption state + + unencryptedPlan := "unencrypted.tfplan" + encryptedPlan := "encrypted.tfplan" + + { + // Everything works before adding encryption configuration + apply().Success() + requireUnencryptedState() + // Check read/write of state file + apply().Success() + requireUnencryptedState() + + // Save an unencrypted plan + createPlan(unencryptedPlan).Success() + // Validate unencrypted plan + applyPlan(unencryptedPlan).Success() + requireUnencryptedState() + } + + with("required.tf", func() { + // Can't switch directly to encryption, need to migrate + apply().Failure().StderrContains("encountered unencrypted payload without unencrypted method") + requireUnencryptedState() + }) + + with("migrateto.tf", func() { + // Migrate to using encryption + apply().Success() + requireEncryptedState() + // Make changes and confirm it's still encrypted (even with migration enabled) + apply().Success() + requireEncryptedState() + + // Save an encrypted plan + createPlan(encryptedPlan).Success() + + // Apply encrypted plan (with migration active) + applyPlan(encryptedPlan).Success() + requireEncryptedState() + // Apply unencrypted plan (with migration active) + applyPlan(unencryptedPlan).StderrContains("Saved plan is stale") + requireEncryptedState() + }) + + { + // Unconfigured encryption clearly fails on encrypted state + apply().Failure().StderrContains("can not be read without an encryption configuration") + } + + with("required.tf", func() { + // Encryption works with fallback removed + apply().Success() + requireEncryptedState() + + // Can't apply unencrypted plan + applyPlan(unencryptedPlan).Failure().StderrContains("encountered unencrypted payload without unencrypted method") + requireEncryptedState() + + // Apply encrypted plan + applyPlan(encryptedPlan).StderrContains("Saved plan is stale") + requireEncryptedState() + }) + + with("broken.tf", func() { + // Make sure changes to encryption keys notify the user correctly + apply().Failure().StderrContains("decryption failed for state") + requireEncryptedState() + + applyPlan(encryptedPlan).Failure().StderrContains("decryption failed: cipher: message authentication failed") + requireEncryptedState() + }) + + with("migratefrom.tf", func() { + // Apply migration from encrypted state + apply().Success() + requireUnencryptedState() + // Make changes and confirm it's still encrypted (even with migration enabled) + apply().Success() + requireUnencryptedState() + + // Apply unencrypted plan (with migration active) + applyPlan(unencryptedPlan).StderrContains("Saved plan is stale") + requireUnencryptedState() + + // Apply encrypted plan (with migration active) + applyPlan(encryptedPlan).StderrContains("Saved plan is stale") + requireUnencryptedState() + }) + + { + // Back to no encryption configuration with unencrypted state + apply().Success() + requireUnencryptedState() + + // Apply unencrypted plan + applyPlan(unencryptedPlan).StderrContains("Saved plan is stale") + requireUnencryptedState() + // Can't apply encrypted plan + applyPlan(encryptedPlan).Failure().StderrContains("the given plan file is encrypted and requires a valid encryption") + requireUnencryptedState() + } +} diff --git a/pkg/command/e2etest/init_test.go b/pkg/command/e2etest/init_test.go new file mode 100644 index 00000000000..4bf2b507ad2 --- /dev/null +++ b/pkg/command/e2etest/init_test.go @@ -0,0 +1,549 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package e2etest + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + + "github.com/kubegems/opentofu/pkg/e2e" +) + +func TestInitProviders(t *testing.T) { + t.Parallel() + + // This test reaches out to registry.opentofu.org to download the + // template provider, so it can only run if network access is allowed. + // We intentionally don't try to stub this here, because there's already + // a stubbed version of this in the "command" package and so the goal here + // is to test the interaction with the real repository. + skipIfCannotAccessNetwork(t) + + fixturePath := filepath.Join("testdata", "template-provider") + tf := e2e.NewBinary(t, tofuBin, fixturePath) + + stdout, stderr, err := tf.Run("init") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + + if stderr != "" { + t.Errorf("unexpected stderr output:\n%s", stderr) + } + + if !strings.Contains(stdout, "OpenTofu has been successfully initialized!") { + t.Errorf("success message is missing from output:\n%s", stdout) + } + + if !strings.Contains(stdout, "- Installing hashicorp/template v") { + t.Errorf("provider download message is missing from output:\n%s", stdout) + t.Logf("(this can happen if you have a copy of the plugin in one of the global plugin search dirs)") + } + + if !strings.Contains(stdout, "OpenTofu has created a lock file") { + t.Errorf("lock file notification is missing from output:\n%s", stdout) + } + +} + +func TestInitProvidersInternal(t *testing.T) { + t.Parallel() + + // This test should _not_ reach out anywhere because the "terraform" + // provider is internal to the core tofu binary. + + t.Run("output in human readable format", func(t *testing.T) { + fixturePath := filepath.Join("testdata", "tf-provider") + tf := e2e.NewBinary(t, tofuBin, fixturePath) + + stdout, stderr, err := tf.Run("init") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + + if stderr != "" { + t.Errorf("unexpected stderr output:\n%s", stderr) + } + + if !strings.Contains(stdout, "OpenTofu has been successfully initialized!") { + t.Errorf("success message is missing from output:\n%s", stdout) + } + + if strings.Contains(stdout, "Installing hashicorp/terraform") { + // Shouldn't have downloaded anything with this config, because the + // provider is built in. + t.Errorf("provider download message appeared in output:\n%s", stdout) + } + + if strings.Contains(stdout, "Installing terraform.io/builtin/terraform") { + // Shouldn't have downloaded anything with this config, because the + // provider is built in. + t.Errorf("provider download message appeared in output:\n%s", stdout) + } + }) + + t.Run("output in machine readable format", func(t *testing.T) { + fixturePath := filepath.Join("testdata", "tf-provider") + tf := e2e.NewBinary(t, tofuBin, fixturePath) + + stdout, stderr, err := tf.Run("init", "-json") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + + if stderr != "" { + t.Errorf("unexpected stderr output:\n%s", stderr) + } + + // we can not check timestamp, so the sub string is not a valid json object + if !strings.Contains(stdout, `{"@level":"info","@message":"OpenTofu has been successfully initialized!","@module":"tofu.ui"`) { + t.Errorf("success message is missing from output:\n%s", stdout) + } + + if strings.Contains(stdout, "Installing hashicorp/terraform") { + // Shouldn't have downloaded anything with this config, because the + // provider is built in. + t.Errorf("provider download message appeared in output:\n%s", stdout) + } + + if strings.Contains(stdout, "Installing terraform.io/builtin/terraform") { + // Shouldn't have downloaded anything with this config, because the + // provider is built in. + t.Errorf("provider download message appeared in output:\n%s", stdout) + } + }) + +} + +func TestInitProvidersVendored(t *testing.T) { + t.Parallel() + + // This test will try to reach out to registry.opentofu.org as one of the + // possible installation locations for + // hashicorp/null, where it will find that + // versions do exist but will ultimately select the version that is + // vendored due to the version constraint. + skipIfCannotAccessNetwork(t) + + fixturePath := filepath.Join("testdata", "vendored-provider") + tf := e2e.NewBinary(t, tofuBin, fixturePath) + + // Our fixture dir has a generic os_arch dir, which we need to customize + // to the actual OS/arch where this test is running in order to get the + // desired result. + fixtMachineDir := tf.Path("terraform.d/plugins/registry.opentofu.org/hashicorp/null/1.0.0+local/os_arch") + wantMachineDir := tf.Path("terraform.d/plugins/registry.opentofu.org/hashicorp/null/1.0.0+local/", fmt.Sprintf("%s_%s", runtime.GOOS, runtime.GOARCH)) + err := os.Rename(fixtMachineDir, wantMachineDir) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + stdout, stderr, err := tf.Run("init") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + + if stderr != "" { + t.Errorf("unexpected stderr output:\n%s", stderr) + } + + if !strings.Contains(stdout, "OpenTofu has been successfully initialized!") { + t.Errorf("success message is missing from output:\n%s", stdout) + } + + if !strings.Contains(stdout, "- Installing hashicorp/null v1.0.0+local") { + t.Errorf("provider download message is missing from output:\n%s", stdout) + t.Logf("(this can happen if you have a copy of the plugin in one of the global plugin search dirs)") + } + +} + +func TestInitProvidersLocalOnly(t *testing.T) { + t.Parallel() + + // This test should not reach out to the network if it is behaving as + // intended. If it _does_ try to access an upstream registry and encounter + // an error doing so then that's a legitimate test failure that should be + // fixed. (If it incorrectly reaches out anywhere then it's likely to be + // to the host "example.com", which is the placeholder domain we use in + // the test fixture.) + + t.Run("output in human readable format", func(t *testing.T) { + fixturePath := filepath.Join("testdata", "local-only-provider") + tf := e2e.NewBinary(t, tofuBin, fixturePath) + // If you run this test on a workstation with a plugin-cache directory + // configured, it will leave a bad directory behind and tofu init will + // not work until you remove it. + // + // To avoid this, we will "zero out" any existing cli config file. + tf.AddEnv("TF_CLI_CONFIG_FILE=") + + // Our fixture dir has a generic os_arch dir, which we need to customize + // to the actual OS/arch where this test is running in order to get the + // desired result. + fixtMachineDir := tf.Path("terraform.d/plugins/example.com/awesomecorp/happycloud/1.2.0/os_arch") + wantMachineDir := tf.Path("terraform.d/plugins/example.com/awesomecorp/happycloud/1.2.0/", fmt.Sprintf("%s_%s", runtime.GOOS, runtime.GOARCH)) + err := os.Rename(fixtMachineDir, wantMachineDir) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + stdout, stderr, err := tf.Run("init") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + + if stderr != "" { + t.Errorf("unexpected stderr output:\n%s", stderr) + } + + if !strings.Contains(stdout, "OpenTofu has been successfully initialized!") { + t.Errorf("success message is missing from output:\n%s", stdout) + } + + if !strings.Contains(stdout, "- Installing example.com/awesomecorp/happycloud v1.2.0") { + t.Errorf("provider download message is missing from output:\n%s", stdout) + t.Logf("(this can happen if you have a conflicting copy of the plugin in one of the global plugin search dirs)") + } + }) + + t.Run("output in machine readable format", func(t *testing.T) { + fixturePath := filepath.Join("testdata", "local-only-provider") + tf := e2e.NewBinary(t, tofuBin, fixturePath) + // If you run this test on a workstation with a plugin-cache directory + // configured, it will leave a bad directory behind and tofu init will + // not work until you remove it. + // + // To avoid this, we will "zero out" any existing cli config file. + tf.AddEnv("TF_CLI_CONFIG_FILE=") + + // Our fixture dir has a generic os_arch dir, which we need to customize + // to the actual OS/arch where this test is running in order to get the + // desired result. + fixtMachineDir := tf.Path("terraform.d/plugins/example.com/awesomecorp/happycloud/1.2.0/os_arch") + wantMachineDir := tf.Path("terraform.d/plugins/example.com/awesomecorp/happycloud/1.2.0/", fmt.Sprintf("%s_%s", runtime.GOOS, runtime.GOARCH)) + err := os.Rename(fixtMachineDir, wantMachineDir) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + stdout, stderr, err := tf.Run("init", "-json") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + + if stderr != "" { + t.Errorf("unexpected stderr output:\n%s", stderr) + } + + // we can not check timestamp, so the sub string is not a valid json object + if !strings.Contains(stdout, `{"@level":"info","@message":"OpenTofu has been successfully initialized!","@module":"tofu.ui"`) { + t.Errorf("success message is missing from output:\n%s", stdout) + } + + if !strings.Contains(stdout, `{"@level":"info","@message":"- Installing example.com/awesomecorp/happycloud v1.2.0...","@module":"tofu.ui"`) { + t.Errorf("provider download message is missing from output:\n%s", stdout) + t.Logf("(this can happen if you have a conflicting copy of the plugin in one of the global plugin search dirs)") + } + }) + +} + +func TestInitProvidersCustomMethod(t *testing.T) { + t.Parallel() + + // This test should not reach out to the network if it is behaving as + // intended. If it _does_ try to access an upstream registry and encounter + // an error doing so then that's a legitimate test failure that should be + // fixed. (If it incorrectly reaches out anywhere then it's likely to be + // to the host "example.com", which is the placeholder domain we use in + // the test fixture.) + + for _, configFile := range []string{"cliconfig.tfrc", "cliconfig.tfrc.json"} { + t.Run(configFile, func(t *testing.T) { + fixturePath := filepath.Join("testdata", "custom-provider-install-method") + tf := e2e.NewBinary(t, tofuBin, fixturePath) + + // Our fixture dir has a generic os_arch dir, which we need to customize + // to the actual OS/arch where this test is running in order to get the + // desired result. + fixtMachineDir := tf.Path("fs-mirror/example.com/awesomecorp/happycloud/1.2.0/os_arch") + wantMachineDir := tf.Path("fs-mirror/example.com/awesomecorp/happycloud/1.2.0/", fmt.Sprintf("%s_%s", runtime.GOOS, runtime.GOARCH)) + err := os.Rename(fixtMachineDir, wantMachineDir) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + // We'll use a local CLI configuration file taken from our fixture + // directory so we can force a custom installation method config. + tf.AddEnv("TF_CLI_CONFIG_FILE=" + tf.Path(configFile)) + + stdout, stderr, err := tf.Run("init") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + + if stderr != "" { + t.Errorf("unexpected stderr output:\n%s", stderr) + } + + if !strings.Contains(stdout, "OpenTofu has been successfully initialized!") { + t.Errorf("success message is missing from output:\n%s", stdout) + } + + if !strings.Contains(stdout, "- Installing example.com/awesomecorp/happycloud v1.2.0") { + t.Errorf("provider download message is missing from output:\n%s", stdout) + } + }) + } +} + +func TestInitProviders_pluginCache(t *testing.T) { + t.Parallel() + + // This test reaches out to registry.opentofu.org to access plugin + // metadata, and download the null plugin, though the template plugin + // should come from local cache. + skipIfCannotAccessNetwork(t) + + fixturePath := filepath.Join("testdata", "plugin-cache") + tf := e2e.NewBinary(t, tofuBin, fixturePath) + + // Our fixture dir has a generic os_arch dir, which we need to customize + // to the actual OS/arch where this test is running in order to get the + // desired result. + fixtMachineDir := tf.Path("cache/registry.opentofu.org/hashicorp/template/2.1.0/os_arch") + wantMachineDir := tf.Path("cache/registry.opentofu.org/hashicorp/template/2.1.0/", fmt.Sprintf("%s_%s", runtime.GOOS, runtime.GOARCH)) + err := os.Rename(fixtMachineDir, wantMachineDir) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + cmd := tf.Cmd("init") + + // convert the slashes if building for windows. + p := filepath.FromSlash("./cache") + cmd.Env = append(cmd.Env, "TF_PLUGIN_CACHE_DIR="+p) + err = cmd.Run() + if err != nil { + t.Errorf("unexpected error: %s", err) + } + + path := filepath.FromSlash(fmt.Sprintf(".terraform/providers/registry.opentofu.org/hashicorp/template/2.1.0/%s_%s/terraform-provider-template_v2.1.0_x4", runtime.GOOS, runtime.GOARCH)) + content, err := tf.ReadFile(path) + if err != nil { + t.Fatalf("failed to read installed plugin from %s: %s", path, err) + } + if strings.TrimSpace(string(content)) != "this is not a real plugin" { + t.Errorf("template plugin was not installed from local cache") + } + + nullLinkPath := filepath.FromSlash(fmt.Sprintf(".terraform/providers/registry.opentofu.org/hashicorp/null/2.1.0/%s_%s/terraform-provider-null", runtime.GOOS, runtime.GOARCH)) + if runtime.GOOS == "windows" { + nullLinkPath = nullLinkPath + ".exe" + } + if !tf.FileExists(nullLinkPath) { + t.Errorf("null plugin was not installed into %s", nullLinkPath) + } + + nullCachePath := filepath.FromSlash(fmt.Sprintf("cache/registry.opentofu.org/hashicorp/null/2.1.0/%s_%s/terraform-provider-null", runtime.GOOS, runtime.GOARCH)) + if runtime.GOOS == "windows" { + nullCachePath = nullCachePath + ".exe" + } + if !tf.FileExists(nullCachePath) { + t.Errorf("null plugin is not in cache after install. expected in: %s", nullCachePath) + } +} + +func TestInit_fromModule(t *testing.T) { + t.Parallel() + + // This test reaches out to registry.opentofu.org and github.com to lookup + // and fetch a module. + skipIfCannotAccessNetwork(t) + + fixturePath := filepath.Join("testdata", "empty") + tf := e2e.NewBinary(t, tofuBin, fixturePath) + + cmd := tf.Cmd("init", "-from-module=hashicorp/vault/aws") + cmd.Stdin = nil + cmd.Stderr = &bytes.Buffer{} + + err := cmd.Run() + if err != nil { + t.Errorf("unexpected error: %s", err) + } + + stderr := cmd.Stderr.(*bytes.Buffer).String() + if stderr != "" { + t.Errorf("unexpected stderr output:\n%s", stderr) + } + + content, err := tf.ReadFile("main.tf") + if err != nil { + t.Fatalf("failed to read main.tf: %s", err) + } + if !bytes.Contains(content, []byte("vault")) { + t.Fatalf("main.tf doesn't appear to be a vault configuration: \n%s", content) + } +} + +func TestInitProviderNotFound(t *testing.T) { + t.Parallel() + + // This test will reach out to registry.opentofu.org as one of the possible + // installation locations for hashicorp/nonexist, which should not exist. + skipIfCannotAccessNetwork(t) + + fixturePath := filepath.Join("testdata", "provider-not-found") + tf := e2e.NewBinary(t, tofuBin, fixturePath) + + t.Run("registry provider not found", func(t *testing.T) { + _, stderr, err := tf.Run("init", "-no-color") + if err == nil { + t.Fatal("expected error, got success") + } + + oneLineStderr := strings.ReplaceAll(stderr, "\n", " ") + if !strings.Contains(oneLineStderr, "provider registry registry.opentofu.org does not have a provider named registry.opentofu.org/hashicorp/nonexist") { + t.Errorf("expected error message is missing from output:\n%s", stderr) + } + + if !strings.Contains(oneLineStderr, "All modules should specify their required_providers") { + t.Errorf("expected error message is missing from output:\n%s", stderr) + } + }) + + t.Run("registry provider not found output in json format", func(t *testing.T) { + stdout, _, err := tf.Run("init", "-no-color", "-json") + if err == nil { + t.Fatal("expected error, got success") + } + + oneLineStdout := strings.ReplaceAll(stdout, "\n", " ") + if !strings.Contains(oneLineStdout, `"diagnostic":{"severity":"error","summary":"Failed to query available provider packages","detail":"Could not retrieve the list of available versions for provider hashicorp/nonexist: provider registry registry.opentofu.org does not have a provider named registry.opentofu.org/hashicorp/nonexist\n\nAll modules should specify their required_providers so that external consumers will get the correct providers when using a module. To see which modules are currently depending on hashicorp/nonexist, run the following command:\n tofu providers\n\nIf you believe this provider is missing from the registry, please submit a issue on the OpenTofu Registry https://github.com/opentofu/registry/issues/new/choose"},"type":"diagnostic"}`) { + t.Errorf("expected error message is missing from output:\n%s", stdout) + } + }) + + t.Run("local provider not found", func(t *testing.T) { + // The -plugin-dir directory must exist for the provider installer to search it. + pluginDir := tf.Path("empty-for-json") + if err := os.Mkdir(pluginDir, os.ModePerm); err != nil { + t.Fatal(err) + } + + _, stderr, err := tf.Run("init", "-no-color", "-plugin-dir="+pluginDir) + if err == nil { + t.Fatal("expected error, got success") + } + + if !strings.Contains(stderr, "provider registry.opentofu.org/hashicorp/nonexist was not\nfound in any of the search locations\n\n - "+pluginDir) { + t.Errorf("expected error message is missing from output:\n%s", stderr) + } + }) + + t.Run("local provider not found output in json format", func(t *testing.T) { + // The -plugin-dir directory must exist for the provider installer to search it. + pluginDir := tf.Path("empty") + if err := os.Mkdir(pluginDir, os.ModePerm); err != nil { + t.Fatal(err) + } + + stdout, _, err := tf.Run("init", "-no-color", "-plugin-dir="+pluginDir, "-json") + if err == nil { + t.Fatal("expected error, got success") + } + + escapedPluginDir := escapeStringJSON(pluginDir) + + if !strings.Contains(stdout, `"diagnostic":{"severity":"error","summary":"Failed to query available provider packages","detail":"Could not retrieve the list of available versions for provider hashicorp/nonexist: provider registry.opentofu.org/hashicorp/nonexist was not found in any of the search locations\n\n - `+escapedPluginDir+`"},"type":"diagnostic"}`) { + t.Errorf("expected error message is missing from output (pluginDir = '%s'):\n%s", escapedPluginDir, stdout) + } + }) + + t.Run("special characters enabled", func(t *testing.T) { + _, stderr, err := tf.Run("init") + if err == nil { + t.Fatal("expected error, got success") + } + + expectedErr := `╷ +│ Error: Failed to query available provider packages +│` + ` ` + ` +│ Could not retrieve the list of available versions for provider +│ hashicorp/nonexist: provider registry registry.opentofu.org does not have a +│ provider named registry.opentofu.org/hashicorp/nonexist +│ +│ All modules should specify their required_providers so that external +│ consumers will get the correct providers when using a module. To see which +│ modules are currently depending on hashicorp/nonexist, run the following +│ command: +│ tofu providers +│ +│ If you believe this provider is missing from the registry, please submit a +│ issue on the OpenTofu Registry +│ https://github.com/opentofu/registry/issues/new/choose +╵ + +` + if stripAnsi(stderr) != expectedErr { + t.Errorf("wrong output:\n%s", cmp.Diff(stripAnsi(stderr), expectedErr)) + } + }) +} + +// The following test is temporarily removed until the OpenTofu registry returns a deprecation warning +// https://github.com/opentofu/registry/issues/108 +//func TestInitProviderWarnings(t *testing.T) { +// t.Parallel() +// +// // This test will reach out to registry.terraform.io as one of the possible +// // installation locations for hashicorp/terraform, which is an archived package that is no longer needed. +// skipIfCannotAccessNetwork(t) +// +// fixturePath := filepath.Join("testdata", "provider-warnings") +// tf := e2e.NewBinary(t, tofuBin, fixturePath) +// +// stdout, _, err := tf.Run("init") +// if err == nil { +// t.Fatal("expected error, got success") +// } +// +// if !strings.Contains(stdout, "This provider is archived and no longer needed.") { +// t.Errorf("expected warning message is missing from output:\n%s", stdout) +// } +// +//} + +func escapeStringJSON(v string) string { + b := &strings.Builder{} + + enc := json.NewEncoder(b) + + enc.SetEscapeHTML(false) + + if err := enc.Encode(v); err != nil { + panic("failed to escapeStringJSON: " + v) + } + + marshaledV := b.String() + + // shouldn't happen + if len(marshaledV) < 2 { + return string(marshaledV) + } + + return string(marshaledV[1 : len(marshaledV)-2]) +} diff --git a/pkg/command/e2etest/main_test.go b/pkg/command/e2etest/main_test.go new file mode 100644 index 00000000000..66604573670 --- /dev/null +++ b/pkg/command/e2etest/main_test.go @@ -0,0 +1,81 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package e2etest + +import ( + "fmt" + "os" + "path/filepath" + "testing" + + "github.com/kubegems/opentofu/pkg/e2e" +) + +var tofuBin string + +// canRunGoBuild is a short-term compromise to account for the fact that we +// have a small number of tests that work by building helper programs using +// "go build" at runtime, but we can't do that in our isolated test mode +// driven by the make-archive.sh script. +// +// FIXME: Rework this a bit so that we build the necessary helper programs +// (test plugins, etc) as part of the initial suite setup, and in the +// make-archive.sh script, so that we can run all of the tests in both +// situations with the tests just using the executable already built for +// them, as we do for tofuBin. +var canRunGoBuild bool + +func TestMain(m *testing.M) { + teardown := setup() + code := m.Run() + teardown() + os.Exit(code) +} + +func setup() func() { + if tofuBin != "" { + // this is pre-set when we're running in a binary produced from + // the make-archive.sh script, since that is for testing an + // executable obtained from a real release package. However, we do + // need to turn it into an absolute path so that we can find it + // when we change the working directory during tests. + var err error + tofuBin, err = filepath.Abs(tofuBin) + if err != nil { + panic(fmt.Sprintf("failed to find absolute path of tofu executable: %s", err)) + } + return func() {} + } + + tmpFilename := e2e.GoBuild("github.com/kubegems/opentofu/cmd/tofu", "tofu") + + // Make the executable available for use in tests + tofuBin = tmpFilename + + // Tests running in the ad-hoc testing mode are allowed to use "go build" + // and similar to produce other test executables. + // (See the comment on this variable's declaration for more information.) + canRunGoBuild = true + + return func() { + os.Remove(tmpFilename) + } +} + +func canAccessNetwork() bool { + // We re-use the flag normally used for acceptance tests since that's + // established as a way to opt-in to reaching out to real systems that + // may suffer transient errors. + return os.Getenv("TF_ACC") != "" +} + +func skipIfCannotAccessNetwork(t *testing.T) { + t.Helper() + + if !canAccessNetwork() { + t.Skip("network access not allowed; use TF_ACC=1 to enable") + } +} diff --git a/pkg/command/e2etest/make-archive.sh b/pkg/command/e2etest/make-archive.sh new file mode 100755 index 00000000000..4c34e517f86 --- /dev/null +++ b/pkg/command/e2etest/make-archive.sh @@ -0,0 +1,58 @@ +#!/bin/bash +# Copyright (c) The OpenTofu Authors +# SPDX-License-Identifier: MPL-2.0 +# Copyright (c) 2023 HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +# For normal use this package can just be tested with "go test" as standard, +# but this script is an alternative to allow the tests to be run somewhere +# other than where they are built. + +# The primary use for this is cross-compilation, where e.g. we can produce an +# archive that can be extracted on a Windows system to run the e2e tests there: +# $ GOOS=windows GOARCH=amd64 ./make-archive.sh +# +# This will produce a zip file build/terraform-e2etest_windows_amd64.zip which +# can be shipped off to a Windows amd64 system, extracted to some directory, +# and then executed as follows: +# set TF_ACC=1 +# ./e2etest.exe +# +# Because separated e2etest harnesses are intended for testing against "real" +# release executables, the generated archives don't include a copy of +# the Terraform executable. Instead, the caller of the tests must retrieve +# and extract a release package into the working directory before running +# the e2etest executable, so that "e2etest" can find and execute it. + +set +euo pipefail + +# Always run from the directory where this script lives +cd "$( dirname "${BASH_SOURCE[0]}" )" + +GOOS="$(go env GOOS)" +GOARCH="$(go env GOARCH)" +GOEXE="$(go env GOEXE)" +OUTDIR="build/${GOOS}_${GOARCH}" +OUTFILE="tofu-e2etest_${GOOS}_${GOARCH}.zip" + +LDFLAGS="-X github.com/kubegems/opentofu/pkg/command/e2etest.tofuBin=./tofu$GOEXE" +# Caller may pass in the environment variable GO_LDFLAGS with additional +# flags we'll use when building. +if [ -n "${GO_LDFLAGS+set}" ]; then + LDFLAGS="${GO_LDFLAGS} ${LDFLAGS}" +fi + +mkdir -p "$OUTDIR" + +# We need the test fixtures available when we run the tests. +cp -r testdata "$OUTDIR/testdata" + +# Build the test program +go test -o "$OUTDIR/e2etest$GOEXE" -c -ldflags "$LDFLAGS" github.com/kubegems/opentofu/pkg/command/e2etest + +# Now bundle it all together for easy shipping! +cd "$OUTDIR" +zip -r "../$OUTFILE" * + +echo "e2etest archive created at build/$OUTFILE" diff --git a/pkg/command/e2etest/module_archive_test.go b/pkg/command/e2etest/module_archive_test.go new file mode 100644 index 00000000000..7ea01752231 --- /dev/null +++ b/pkg/command/e2etest/module_archive_test.go @@ -0,0 +1,37 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package e2etest + +import ( + "path/filepath" + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/e2e" +) + +func TestInitModuleArchive(t *testing.T) { + t.Parallel() + + // this fetches a module archive from github + skipIfCannotAccessNetwork(t) + + fixturePath := filepath.Join("testdata", "module-archive") + tf := e2e.NewBinary(t, tofuBin, fixturePath) + + stdout, stderr, err := tf.Run("init") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + + if stderr != "" { + t.Errorf("unexpected stderr output:\n%s", stderr) + } + + if !strings.Contains(stdout, "OpenTofu has been successfully initialized!") { + t.Errorf("success message is missing from output:\n%s", stdout) + } +} diff --git a/pkg/command/e2etest/primary_test.go b/pkg/command/e2etest/primary_test.go new file mode 100644 index 00000000000..514b5af2432 --- /dev/null +++ b/pkg/command/e2etest/primary_test.go @@ -0,0 +1,234 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package e2etest + +import ( + "path/filepath" + "reflect" + "sort" + "strings" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/kubegems/opentofu/pkg/e2e" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/zclconf/go-cty/cty" +) + +// The tests in this file are for the "primary workflow", which includes +// variants of the following sequence, with different details: +// tofu init +// tofu plan +// tofu apply +// tofu destroy + +func TestPrimarySeparatePlan(t *testing.T) { + t.Parallel() + + // This test reaches out to registry.opentofu.org to download the + // template and null providers, so it can only run if network access is + // allowed. + skipIfCannotAccessNetwork(t) + + fixturePath := filepath.Join("testdata", "full-workflow-null") + tf := e2e.NewBinary(t, tofuBin, fixturePath) + + //// INIT + stdout, stderr, err := tf.Run("init") + if err != nil { + t.Fatalf("unexpected init error: %s\nstderr:\n%s", err, stderr) + } + + // Make sure we actually downloaded the plugins, rather than picking up + // copies that might be already installed globally on the system. + if !strings.Contains(stdout, "Installing hashicorp/template v") { + t.Errorf("template provider download message is missing from init output:\n%s", stdout) + t.Logf("(this can happen if you have a copy of the plugin in one of the global plugin search dirs)") + } + if !strings.Contains(stdout, "Installing hashicorp/null v") { + t.Errorf("null provider download message is missing from init output:\n%s", stdout) + t.Logf("(this can happen if you have a copy of the plugin in one of the global plugin search dirs)") + } + + //// PLAN + stdout, stderr, err = tf.Run("plan", "-out=tfplan") + if err != nil { + t.Fatalf("unexpected plan error: %s\nstderr:\n%s", err, stderr) + } + + if !strings.Contains(stdout, "1 to add, 0 to change, 0 to destroy") { + t.Errorf("incorrect plan tally; want 1 to add:\n%s", stdout) + } + + if !strings.Contains(stdout, "Saved the plan to: tfplan") { + t.Errorf("missing \"Saved the plan to...\" message in plan output\n%s", stdout) + } + if !strings.Contains(stdout, "tofu apply \"tfplan\"") { + t.Errorf("missing next-step instruction in plan output\n%s", stdout) + } + + plan, err := tf.Plan("tfplan") + if err != nil { + t.Fatalf("failed to read plan file: %s", err) + } + + diffResources := plan.Changes.Resources + if len(diffResources) != 1 { + t.Errorf("incorrect number of resources in plan") + } + + expected := map[string]plans.Action{ + "null_resource.test": plans.Create, + } + + for _, r := range diffResources { + expectedAction, ok := expected[r.Addr.String()] + if !ok { + t.Fatalf("unexpected change for %q", r.Addr) + } + if r.Action != expectedAction { + t.Fatalf("unexpected action %q for %q", r.Action, r.Addr) + } + } + + //// APPLY + stdout, stderr, err = tf.Run("apply", "tfplan") + if err != nil { + t.Fatalf("unexpected apply error: %s\nstderr:\n%s", err, stderr) + } + + if !strings.Contains(stdout, "Resources: 1 added, 0 changed, 0 destroyed") { + t.Errorf("incorrect apply tally; want 1 added:\n%s", stdout) + } + + state, err := tf.LocalState() + if err != nil { + t.Fatalf("failed to read state file: %s", err) + } + + stateResources := state.RootModule().Resources + var gotResources []string + for n := range stateResources { + gotResources = append(gotResources, n) + } + sort.Strings(gotResources) + + wantResources := []string{ + "data.template_file.test", + "null_resource.test", + } + + if !reflect.DeepEqual(gotResources, wantResources) { + t.Errorf("wrong resources in state\ngot: %#v\nwant: %#v", gotResources, wantResources) + } + + //// DESTROY + stdout, stderr, err = tf.Run("destroy", "-auto-approve") + if err != nil { + t.Fatalf("unexpected destroy error: %s\nstderr:\n%s", err, stderr) + } + + if !strings.Contains(stdout, "Resources: 1 destroyed") { + t.Errorf("incorrect destroy tally; want 1 destroyed:\n%s", stdout) + } + + state, err = tf.LocalState() + if err != nil { + t.Fatalf("failed to read state file after destroy: %s", err) + } + + stateResources = state.RootModule().Resources + if len(stateResources) != 0 { + t.Errorf("wrong resources in state after destroy; want none, but still have:%s", spew.Sdump(stateResources)) + } + +} + +func TestPrimaryChdirOption(t *testing.T) { + t.Parallel() + + // This test case does not include any provider dependencies, so it's + // safe to run it even when network access is disallowed. + + fixturePath := filepath.Join("testdata", "chdir-option") + tf := e2e.NewBinary(t, tofuBin, fixturePath) + + //// INIT + _, stderr, err := tf.Run("-chdir=subdir", "init") + if err != nil { + t.Fatalf("unexpected init error: %s\nstderr:\n%s", err, stderr) + } + + //// PLAN + stdout, stderr, err := tf.Run("-chdir=subdir", "plan", "-out=tfplan") + if err != nil { + t.Fatalf("unexpected plan error: %s\nstderr:\n%s", err, stderr) + } + + if want := "You can apply this plan to save these new output values"; !strings.Contains(stdout, want) { + t.Errorf("missing expected message for an outputs-only plan\ngot:\n%s\n\nwant substring: %s", stdout, want) + } + + if !strings.Contains(stdout, "Saved the plan to: tfplan") { + t.Errorf("missing \"Saved the plan to...\" message in plan output\n%s", stdout) + } + if !strings.Contains(stdout, "tofu apply \"tfplan\"") { + t.Errorf("missing next-step instruction in plan output\n%s", stdout) + } + + // The saved plan is in the subdirectory because -chdir switched there + plan, err := tf.Plan("subdir/tfplan") + if err != nil { + t.Fatalf("failed to read plan file: %s", err) + } + + diffResources := plan.Changes.Resources + if len(diffResources) != 0 { + t.Errorf("incorrect diff in plan; want no resource changes, but have:\n%s", spew.Sdump(diffResources)) + } + + //// APPLY + stdout, stderr, err = tf.Run("-chdir=subdir", "apply", "tfplan") + if err != nil { + t.Fatalf("unexpected apply error: %s\nstderr:\n%s", err, stderr) + } + + if !strings.Contains(stdout, "Resources: 0 added, 0 changed, 0 destroyed") { + t.Errorf("incorrect apply tally; want 0 added:\n%s", stdout) + } + + // The state file is in subdir because -chdir changed the current working directory. + state, err := tf.StateFromFile("subdir/terraform.tfstate") + if err != nil { + t.Fatalf("failed to read state file: %s", err) + } + + gotOutput := state.RootModule().OutputValues["cwd"] + wantOutputValue := cty.StringVal(filepath.ToSlash(tf.Path())) // path.cwd returns the original path, because path.root is how we get the overridden path + if gotOutput == nil || !wantOutputValue.RawEquals(gotOutput.Value) { + t.Errorf("incorrect value for cwd output\ngot: %#v\nwant Value: %#v", gotOutput, wantOutputValue) + } + + gotOutput = state.RootModule().OutputValues["root"] + wantOutputValue = cty.StringVal(filepath.ToSlash(tf.Path("subdir"))) // path.root is a relative path, but the text fixture uses abspath on it. + if gotOutput == nil || !wantOutputValue.RawEquals(gotOutput.Value) { + t.Errorf("incorrect value for root output\ngot: %#v\nwant Value: %#v", gotOutput, wantOutputValue) + } + + if len(state.RootModule().Resources) != 0 { + t.Errorf("unexpected resources in state") + } + + //// DESTROY + stdout, stderr, err = tf.Run("-chdir=subdir", "destroy", "-auto-approve") + if err != nil { + t.Fatalf("unexpected destroy error: %s\nstderr:\n%s", err, stderr) + } + + if !strings.Contains(stdout, "Resources: 0 destroyed") { + t.Errorf("incorrect destroy tally; want 0 destroyed:\n%s", stdout) + } +} diff --git a/pkg/command/e2etest/provider_dev_test.go b/pkg/command/e2etest/provider_dev_test.go new file mode 100644 index 00000000000..821e5b6ecc2 --- /dev/null +++ b/pkg/command/e2etest/provider_dev_test.go @@ -0,0 +1,99 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package e2etest + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/e2e" +) + +// TestProviderDevOverrides is a test for the special dev_overrides setting +// in the provider_installation section of the CLI configuration file, which +// is our current answer to smoothing provider development by allowing +// developers to opt out of the version number and checksum verification +// we normally do, so they can just overwrite the same local executable +// in-place to iterate faster. +func TestProviderDevOverrides(t *testing.T) { + if !canRunGoBuild { + // We're running in a separate-build-then-run context, so we can't + // currently execute this test which depends on being able to build + // new executable at runtime. + // + // (See the comment on canRunGoBuild's declaration for more information.) + t.Skip("can't run without building a new provider executable") + } + t.Parallel() + + tf := e2e.NewBinary(t, tofuBin, "testdata/provider-dev-override") + + // In order to do a decent end-to-end test for this case we will need a + // real enough provider plugin to try to run and make sure we are able + // to actually run it. For now we'll use the "test" provider for that, + // because it happens to be in this repository and therefore allows + // us to avoid drawing in anything external, but we might revisit this + // strategy in future if other needs cause us to evolve the test + // provider in a way that makes it less suitable for this particular test, + // such as if it stops being buildable into an independent executable. + providerExeDir := filepath.Join(tf.WorkDir(), "pkgdir") + providerExePrefix := filepath.Join(providerExeDir, "terraform-provider-test_") + providerExe := e2e.GoBuild("github.com/kubegems/opentofu/pkg/provider-simple/main", providerExePrefix) + t.Logf("temporary provider executable is %s", providerExe) + + err := os.WriteFile(filepath.Join(tf.WorkDir(), "dev.tfrc"), []byte(fmt.Sprintf(` + provider_installation { + dev_overrides { + "example.com/test/test" = %q + } + } + `, providerExeDir)), os.ModePerm) + if err != nil { + t.Fatal(err) + } + + tf.AddEnv("TF_CLI_CONFIG_FILE=dev.tfrc") + + stdout, stderr, err := tf.Run("providers") + if err != nil { + t.Fatalf("unexpected error: %s\n%s", err, stderr) + } + if got, want := stdout, `provider[example.com/test/test]`; !strings.Contains(got, want) { + t.Errorf("configuration should depend on %s, but doesn't\n%s", want, got) + } + + // NOTE: We're intentionally not running "tofu init" here, because + // dev overrides are always ready to use and don't need any special action + // to "install" them. This test is mimicking the a happy path of going + // directly from "go build" to validate/plan/apply without interacting + // with any registries, mirrors, lock files, etc. To verify "tofu + // init" does actually show a warning, that behavior is tested at the end. + stdout, stderr, err = tf.Run("validate") + if err != nil { + t.Fatalf("unexpected error: %s\n%s", err, stderr) + } + + if got, want := stdout, `The configuration is valid, but`; !strings.Contains(got, want) { + t.Errorf("stdout doesn't include the success message\nwant: %s\n%s", want, got) + } + if got, want := stdout, `Provider development overrides are in effect`; !strings.Contains(got, want) { + t.Errorf("stdout doesn't include the warning about development overrides\nwant: %s\n%s", want, got) + } + + stdout, stderr, err = tf.Run("init") + if err == nil { + t.Fatal("expected error: Failed to query available provider packages") + } + if got, want := stdout, `Provider development overrides are in effect`; !strings.Contains(got, want) { + t.Errorf("stdout doesn't include the warning about development overrides\nwant: %s\n%s", want, got) + } + if got, want := stderr, `Failed to resolve provider packages`; !strings.Contains(got, want) { + t.Errorf("stderr doesn't include the error about listing unavailable development provider\nwant: %s\n%s", want, got) + } +} diff --git a/pkg/command/e2etest/provider_functions_test.go b/pkg/command/e2etest/provider_functions_test.go new file mode 100644 index 00000000000..04e1d15aea4 --- /dev/null +++ b/pkg/command/e2etest/provider_functions_test.go @@ -0,0 +1,80 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package e2etest + +import ( + "path/filepath" + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/e2e" +) + +func TestFunction_Simple(t *testing.T) { + // This test reaches out to registry.opentofu.org to download the + // test functions provider, so it can only run if network access is allowed + skipIfCannotAccessNetwork(t) + + fixturePath := filepath.Join("testdata", "functions") + tf := e2e.NewBinary(t, tofuBin, fixturePath) + + // tofu init + _, stderr, err := tf.Run("init") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + if stderr != "" { + t.Errorf("unexpected stderr output:\n%s", stderr) + } + + _, stderr, err = tf.Run("plan", "-out=fnplan") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + if stderr != "" { + t.Errorf("unexpected stderr output:\n%s", stderr) + } + + plan, err := tf.Plan("fnplan") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + + if len(plan.Changes.Outputs) != 1 { + t.Fatalf("expected 1 outputs, got %d", len(plan.Changes.Outputs)) + } + for _, out := range plan.Changes.Outputs { + if !strings.Contains(string(out.After), "Hello Functions") { + t.Fatalf("unexpected plan output: %s", string(out.After)) + } + } +} + +func TestFunction_Error(t *testing.T) { + // This test reaches out to registry.opentofu.org to download the + // test functions provider, so it can only run if network access is allowed + skipIfCannotAccessNetwork(t) + fixturePath := filepath.Join("testdata", "functions-error") + tf := e2e.NewBinary(t, tofuBin, fixturePath) + + // tofu init + _, stderr, err := tf.Run("init") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + if stderr != "" { + t.Errorf("unexpected stderr output:\n%s", stderr) + } + + // tofu plan -out=fnplan + _, stderr, err = tf.Run("plan", "-out=fnplan") + if err == nil { + t.Errorf("expected error: %s", err) + } + if !strings.Contains(stderr, "Call to function \"provider::example::error\" failed") { + t.Errorf("unexpected stderr output:\n%s", stderr) + } +} diff --git a/pkg/command/e2etest/provider_plugin_test.go b/pkg/command/e2etest/provider_plugin_test.go new file mode 100644 index 00000000000..d0c7b29800b --- /dev/null +++ b/pkg/command/e2etest/provider_plugin_test.go @@ -0,0 +1,92 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package e2etest + +import ( + "os" + "path/filepath" + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/e2e" + "github.com/kubegems/opentofu/pkg/getproviders" +) + +// TestProviderProtocols verifies that OpenTofu can execute provider plugins +// with both supported protocol versions. +func TestProviderProtocols(t *testing.T) { + if !canRunGoBuild { + // We're running in a separate-build-then-run context, so we can't + // currently execute this test which depends on being able to build + // new executable at runtime. + // + // (See the comment on canRunGoBuild's declaration for more information.) + t.Skip("can't run without building a new provider executable") + } + t.Parallel() + + tf := e2e.NewBinary(t, tofuBin, "testdata/provider-plugin") + + // In order to do a decent end-to-end test for this case we will need a real + // enough provider plugin to try to run and make sure we are able to + // actually run it. Here will build the simple and simple6 (built with + // protocol v6) providers. + simple6Provider := filepath.Join(tf.WorkDir(), "terraform-provider-simple6") + simple6ProviderExe := e2e.GoBuild("github.com/kubegems/opentofu/pkg/provider-simple-v6/main", simple6Provider) + + simpleProvider := filepath.Join(tf.WorkDir(), "terraform-provider-simple") + simpleProviderExe := e2e.GoBuild("github.com/kubegems/opentofu/pkg/provider-simple/main", simpleProvider) + + // Move the provider binaries into a directory that we will point tofu + // to using the -plugin-dir cli flag. + platform := getproviders.CurrentPlatform.String() + hashiDir := "cache/registry.opentofu.org/hashicorp/" + if err := os.MkdirAll(tf.Path(hashiDir, "simple6/0.0.1/", platform), os.ModePerm); err != nil { + t.Fatal(err) + } + if err := os.Rename(simple6ProviderExe, tf.Path(hashiDir, "simple6/0.0.1/", platform, "terraform-provider-simple6")); err != nil { + t.Fatal(err) + } + + if err := os.MkdirAll(tf.Path(hashiDir, "simple/0.0.1/", platform), os.ModePerm); err != nil { + t.Fatal(err) + } + if err := os.Rename(simpleProviderExe, tf.Path(hashiDir, "simple/0.0.1/", platform, "terraform-provider-simple")); err != nil { + t.Fatal(err) + } + + //// INIT + _, stderr, err := tf.Run("init", "-plugin-dir=cache") + if err != nil { + t.Fatalf("unexpected init error: %s\nstderr:\n%s", err, stderr) + } + + //// PLAN + _, stderr, err = tf.Run("plan", "-out=tfplan") + if err != nil { + t.Fatalf("unexpected plan error: %s\nstderr:\n%s", err, stderr) + } + + //// APPLY + stdout, stderr, err := tf.Run("apply", "tfplan") + if err != nil { + t.Fatalf("unexpected apply error: %s\nstderr:\n%s", err, stderr) + } + + if !strings.Contains(stdout, "Apply complete! Resources: 2 added, 0 changed, 0 destroyed.") { + t.Fatalf("wrong output:\nstdout:%s\nstderr%s", stdout, stderr) + } + + /// DESTROY + stdout, stderr, err = tf.Run("destroy", "-auto-approve") + if err != nil { + t.Fatalf("unexpected apply error: %s\nstderr:\n%s", err, stderr) + } + + if !strings.Contains(stdout, "Resources: 2 destroyed") { + t.Fatalf("wrong destroy output\nstdout:%s\nstderr:%s", stdout, stderr) + } +} diff --git a/pkg/command/e2etest/providers_mirror_test.go b/pkg/command/e2etest/providers_mirror_test.go new file mode 100644 index 00000000000..ca8982cde2c --- /dev/null +++ b/pkg/command/e2etest/providers_mirror_test.go @@ -0,0 +1,86 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package e2etest + +import ( + "os" + "path/filepath" + "sort" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/kubegems/opentofu/pkg/e2e" +) + +// The tests in this file are for the "tofu providers mirror" command, +// which is tested in an e2etest mode rather than a unit test mode because it +// interacts directly with OpenTofu Registry and the full details of that are +// tricky to mock. Such a mock is _possible_, but we're using e2etest as a +// compromise for now to keep these tests relatively simple. + +func TestOpenTofuProvidersMirror(t *testing.T) { + testOpenTofuProvidersMirror(t, "tofu-providers-mirror") +} + +func TestOpenTofuProvidersMirrorWithLockFile(t *testing.T) { + testOpenTofuProvidersMirror(t, "tofu-providers-mirror-with-lock-file") +} + +func testOpenTofuProvidersMirror(t *testing.T, fixture string) { + // This test reaches out to registry.opentofu.org to download the + // template and null providers, so it can only run if network access is + // allowed. + skipIfCannotAccessNetwork(t) + + outputDir := t.TempDir() + t.Logf("creating mirror directory in %s", outputDir) + + fixturePath := filepath.Join("testdata", fixture) + tf := e2e.NewBinary(t, tofuBin, fixturePath) + + stdout, stderr, err := tf.Run("providers", "mirror", "-platform=linux_amd64", "-platform=windows_386", outputDir) + if err != nil { + t.Fatalf("unexpected error: %s\nstdout:\n%s\nstderr:\n%s", err, stdout, stderr) + } + + // The test fixture includes exact version constraints for the two + // providers it depends on so that the following should remain stable. + // In the (unlikely) event that these particular versions of these + // providers are removed from the registry, this test will start to fail. + want := []string{ + "registry.opentofu.org/hashicorp/null/2.1.0.json", + "registry.opentofu.org/hashicorp/null/index.json", + "registry.opentofu.org/hashicorp/null/terraform-provider-null_2.1.0_linux_amd64.zip", + "registry.opentofu.org/hashicorp/null/terraform-provider-null_2.1.0_windows_386.zip", + "registry.opentofu.org/hashicorp/template/2.1.1.json", + "registry.opentofu.org/hashicorp/template/index.json", + "registry.opentofu.org/hashicorp/template/terraform-provider-template_2.1.1_linux_amd64.zip", + "registry.opentofu.org/hashicorp/template/terraform-provider-template_2.1.1_windows_386.zip", + } + var got []string + walkErr := filepath.Walk(outputDir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + return nil // we only care about leaf files for this test + } + relPath, err := filepath.Rel(outputDir, path) + if err != nil { + return err + } + got = append(got, filepath.ToSlash(relPath)) + return nil + }) + if walkErr != nil { + t.Fatal(walkErr) + } + sort.Strings(got) + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("unexpected files in result\n%s", diff) + } +} diff --git a/pkg/command/e2etest/providers_tamper_test.go b/pkg/command/e2etest/providers_tamper_test.go new file mode 100644 index 00000000000..4408bfc9a3f --- /dev/null +++ b/pkg/command/e2etest/providers_tamper_test.go @@ -0,0 +1,274 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package e2etest + +import ( + "os" + "path/filepath" + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/e2e" + "github.com/kubegems/opentofu/pkg/getproviders" +) + +// TestProviderTampering tests various ways that the provider plugins in the +// local cache directory might be modified after an initial "tofu init", +// which other OpenTofu commands which use those plugins should catch and +// report early. +func TestProviderTampering(t *testing.T) { + // General setup: we'll do a one-off init of a test directory as our + // starting point, and then we'll clone that result for each test so + // that we can save the cost of a repeated re-init with the same + // provider. + t.Parallel() + + // This test reaches out to registry.opentofu.org to download the + // null provider, so it can only run if network access is allowed. + skipIfCannotAccessNetwork(t) + + fixturePath := filepath.Join("testdata", "provider-tampering-base") + tf := e2e.NewBinary(t, tofuBin, fixturePath) + + stdout, stderr, err := tf.Run("init") + if err != nil { + t.Fatalf("unexpected init error: %s\nstderr:\n%s", err, stderr) + } + if !strings.Contains(stdout, "Installing hashicorp/null v") { + t.Errorf("null provider download message is missing from init output:\n%s", stdout) + t.Logf("(this can happen if you have a copy of the plugin in one of the global plugin search dirs)") + } + + seedDir := tf.WorkDir() + const providerVersion = "3.1.0" // must match the version in the fixture config + pluginDir := filepath.Join(".terraform", "providers", "registry.opentofu.org", "hashicorp", "null", providerVersion, getproviders.CurrentPlatform.String()) + pluginExe := filepath.Join(pluginDir, "terraform-provider-null_v"+providerVersion+"_x5") + if getproviders.CurrentPlatform.OS == "windows" { + pluginExe += ".exe" // ugh + } + + // filepath.Join here to make sure we get the right path separator + // for whatever OS we're running these tests on. + providerCacheDir := filepath.Join(".terraform", "providers") + + t.Run("cache dir totally gone", func(t *testing.T) { + tf := e2e.NewBinary(t, tofuBin, seedDir) + workDir := tf.WorkDir() + + err := os.RemoveAll(filepath.Join(workDir, ".terraform")) + if err != nil { + t.Fatal(err) + } + + stdout, stderr, err := tf.Run("plan") + if err == nil { + t.Fatalf("unexpected plan success\nstdout:\n%s", stdout) + } + if want := `registry.opentofu.org/hashicorp/null: there is no package for registry.opentofu.org/hashicorp/null 3.1.0 cached in ` + providerCacheDir; !strings.Contains(stderr, want) { + t.Errorf("missing expected error message\nwant substring: %s\ngot:\n%s", want, stderr) + } + if want := `tofu init`; !strings.Contains(stderr, want) { + t.Errorf("missing expected error message\nwant substring: %s\ngot:\n%s", want, stderr) + } + + // Running init as suggested resolves the problem + _, stderr, err = tf.Run("init") + if err != nil { + t.Fatalf("unexpected init error: %s\nstderr:\n%s", err, stderr) + } + _, stderr, err = tf.Run("plan") + if err != nil { + t.Fatalf("unexpected plan error: %s\nstderr:\n%s", err, stderr) + } + }) + t.Run("cache dir totally gone, explicit backend", func(t *testing.T) { + tf := e2e.NewBinary(t, tofuBin, seedDir) + workDir := tf.WorkDir() + + err := os.WriteFile(filepath.Join(workDir, "backend.tf"), []byte(localBackendConfig), 0600) + if err != nil { + t.Fatal(err) + } + + err = os.RemoveAll(filepath.Join(workDir, ".terraform")) + if err != nil { + t.Fatal(err) + } + + stdout, stderr, err := tf.Run("plan") + if err == nil { + t.Fatalf("unexpected plan success\nstdout:\n%s", stdout) + } + if want := `Initial configuration of the requested backend "local"`; !strings.Contains(stderr, want) { + t.Errorf("missing expected error message\nwant substring: %s\ngot:\n%s", want, stderr) + } + if want := `tofu init`; !strings.Contains(stderr, want) { + t.Errorf("missing expected error message\nwant substring: %s\ngot:\n%s", want, stderr) + } + + // Running init as suggested resolves the problem + _, stderr, err = tf.Run("init") + if err != nil { + t.Fatalf("unexpected init error: %s\nstderr:\n%s", err, stderr) + } + _, stderr, err = tf.Run("plan") + if err != nil { + t.Fatalf("unexpected plan error: %s\nstderr:\n%s", err, stderr) + } + }) + t.Run("null plugin package modified before plan", func(t *testing.T) { + tf := e2e.NewBinary(t, tofuBin, seedDir) + workDir := tf.WorkDir() + + err := os.WriteFile(filepath.Join(workDir, pluginExe), []byte("tamper"), 0600) + if err != nil { + t.Fatal(err) + } + + stdout, stderr, err := tf.Run("plan") + if err == nil { + t.Fatalf("unexpected plan success\nstdout:\n%s", stdout) + } + if want := `registry.opentofu.org/hashicorp/null: the cached package for registry.opentofu.org/hashicorp/null 3.1.0 (in ` + providerCacheDir + `) does not match any of the checksums recorded in the dependency lock file`; !strings.Contains(stderr, want) { + t.Errorf("missing expected error message\nwant substring: %s\ngot:\n%s", want, stderr) + } + if want := `tofu init`; !strings.Contains(stderr, want) { + t.Errorf("missing expected error message\nwant substring: %s\ngot:\n%s", want, stderr) + } + }) + t.Run("version constraint changed in config before plan", func(t *testing.T) { + tf := e2e.NewBinary(t, tofuBin, seedDir) + workDir := tf.WorkDir() + + err := os.WriteFile(filepath.Join(workDir, "provider-tampering-base.tf"), []byte(` + terraform { + required_providers { + null = { + source = "hashicorp/null" + version = "1.0.0" + } + } + } + `), 0600) + if err != nil { + t.Fatal(err) + } + + stdout, stderr, err := tf.Run("plan") + if err == nil { + t.Fatalf("unexpected plan success\nstdout:\n%s", stdout) + } + if want := `provider registry.opentofu.org/hashicorp/null: locked version selection 3.1.0 doesn't match the updated version constraints "1.0.0"`; !strings.Contains(stderr, want) { + t.Errorf("missing expected error message\nwant substring: %s\ngot:\n%s", want, stderr) + } + if want := `tofu init -upgrade`; !strings.Contains(stderr, want) { + t.Errorf("missing expected error message\nwant substring: %s\ngot:\n%s", want, stderr) + } + }) + t.Run("lock file modified before plan", func(t *testing.T) { + tf := e2e.NewBinary(t, tofuBin, seedDir) + workDir := tf.WorkDir() + + // NOTE: We're just emptying out the lock file here because that's + // good enough for what we're trying to assert. The leaf codepath + // that generates this family of errors has some different variations + // of this error message for otehr sorts of inconsistency, but those + // are tested more thoroughly over in the "configs" package, which is + // ultimately responsible for that logic. + err := os.WriteFile(filepath.Join(workDir, ".terraform.lock.hcl"), []byte(``), 0600) + if err != nil { + t.Fatal(err) + } + + stdout, stderr, err := tf.Run("plan") + if err == nil { + t.Fatalf("unexpected plan success\nstdout:\n%s", stdout) + } + if want := `provider registry.opentofu.org/hashicorp/null: required by this configuration but no version is selected`; !strings.Contains(stderr, want) { + t.Errorf("missing expected error message\nwant substring: %s\ngot:\n%s", want, stderr) + } + if want := `tofu init`; !strings.Contains(stderr, want) { + t.Errorf("missing expected error message\nwant substring: %s\ngot:\n%s", want, stderr) + } + }) + t.Run("lock file modified after plan", func(t *testing.T) { + tf := e2e.NewBinary(t, tofuBin, seedDir) + workDir := tf.WorkDir() + + _, stderr, err := tf.Run("plan", "-out", "tfplan") + if err != nil { + t.Fatalf("unexpected plan failure\nstderr:\n%s", stderr) + } + + err = os.Remove(filepath.Join(workDir, ".terraform.lock.hcl")) + if err != nil { + t.Fatal(err) + } + + stdout, stderr, err := tf.Run("apply", "tfplan") + if err == nil { + t.Fatalf("unexpected apply success\nstdout:\n%s", stdout) + } + if want := `provider registry.opentofu.org/hashicorp/null: required by this configuration but no version is selected`; !strings.Contains(stderr, want) { + t.Errorf("missing expected error message\nwant substring: %s\ngot:\n%s", want, stderr) + } + if want := `Create a new plan from the updated configuration.`; !strings.Contains(stderr, want) { + t.Errorf("missing expected error message\nwant substring: %s\ngot:\n%s", want, stderr) + } + }) + t.Run("plugin cache dir entirely removed after plan", func(t *testing.T) { + tf := e2e.NewBinary(t, tofuBin, seedDir) + workDir := tf.WorkDir() + + _, stderr, err := tf.Run("plan", "-out", "tfplan") + if err != nil { + t.Fatalf("unexpected plan failure\nstderr:\n%s", stderr) + } + + err = os.RemoveAll(filepath.Join(workDir, ".terraform")) + if err != nil { + t.Fatal(err) + } + + stdout, stderr, err := tf.Run("apply", "tfplan") + if err == nil { + t.Fatalf("unexpected apply success\nstdout:\n%s", stdout) + } + if want := `registry.opentofu.org/hashicorp/null: there is no package for registry.opentofu.org/hashicorp/null 3.1.0 cached in ` + providerCacheDir; !strings.Contains(stderr, want) { + t.Errorf("missing expected error message\nwant substring: %s\ngot:\n%s", want, stderr) + } + }) + t.Run("null plugin package modified after plan", func(t *testing.T) { + tf := e2e.NewBinary(t, tofuBin, seedDir) + workDir := tf.WorkDir() + + _, stderr, err := tf.Run("plan", "-out", "tfplan") + if err != nil { + t.Fatalf("unexpected plan failure\nstderr:\n%s", stderr) + } + + err = os.WriteFile(filepath.Join(workDir, pluginExe), []byte("tamper"), 0600) + if err != nil { + t.Fatal(err) + } + + stdout, stderr, err := tf.Run("apply", "tfplan") + if err == nil { + t.Fatalf("unexpected apply success\nstdout:\n%s", stdout) + } + if want := `registry.opentofu.org/hashicorp/null: the cached package for registry.opentofu.org/hashicorp/null 3.1.0 (in ` + providerCacheDir + `) does not match any of the checksums recorded in the dependency lock file`; !strings.Contains(stderr, want) { + t.Errorf("missing expected error message\nwant substring: %s\ngot:\n%s", want, stderr) + } + }) +} + +const localBackendConfig = ` +terraform { + backend "local" { + path = "terraform.tfstate" + } +} +` diff --git a/pkg/command/e2etest/provisioner_plugin_test.go b/pkg/command/e2etest/provisioner_plugin_test.go new file mode 100644 index 00000000000..5b31e2c8ecb --- /dev/null +++ b/pkg/command/e2etest/provisioner_plugin_test.go @@ -0,0 +1,77 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package e2etest + +import ( + "os" + "path/filepath" + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/e2e" +) + +// TestProvisionerPlugin is a test that tofu can execute a 3rd party +// provisioner plugin. +func TestProvisionerPlugin(t *testing.T) { + if !canRunGoBuild { + // We're running in a separate-build-then-run context, so we can't + // currently execute this test which depends on being able to build + // new executable at runtime. + // + // (See the comment on canRunGoBuild's declaration for more information.) + t.Skip("can't run without building a new provisioner executable") + } + t.Parallel() + + // This test reaches out to registry.opentofu.org to download the + // template and null providers, so it can only run if network access is + // allowed. + skipIfCannotAccessNetwork(t) + + tf := e2e.NewBinary(t, tofuBin, "testdata/provisioner-plugin") + + // In order to do a decent end-to-end test for this case we will need a + // real enough provisioner plugin to try to run and make sure we are able + // to actually run it. Here will build the local-exec provisioner into a + // binary called test-provisioner + provisionerExePrefix := filepath.Join(tf.WorkDir(), "terraform-provisioner-test_") + provisionerExe := e2e.GoBuild("github.com/kubegems/opentofu/pkg/provisioner-local-exec/main", provisionerExePrefix) + + // provisioners must use the old binary name format, so rename this binary + newExe := filepath.Join(tf.WorkDir(), "terraform-provisioner-test") + if _, err := os.Stat(newExe); !os.IsNotExist(err) { + t.Fatalf("%q already exists", newExe) + } + if err := os.Rename(provisionerExe, newExe); err != nil { + t.Fatalf("error renaming provisioner binary: %v", err) + } + provisionerExe = newExe + + t.Logf("temporary provisioner executable is %s", provisionerExe) + + //// INIT + _, stderr, err := tf.Run("init") + if err != nil { + t.Fatalf("unexpected init error: %s\nstderr:\n%s", err, stderr) + } + + //// PLAN + _, stderr, err = tf.Run("plan", "-out=tfplan") + if err != nil { + t.Fatalf("unexpected plan error: %s\nstderr:\n%s", err, stderr) + } + + //// APPLY + stdout, stderr, err := tf.Run("apply", "tfplan") + if err != nil { + t.Fatalf("unexpected apply error: %s\nstderr:\n%s", err, stderr) + } + + if !strings.Contains(stdout, "HelloProvisioner") { + t.Fatalf("missing provisioner output:\n%s", stdout) + } +} diff --git a/pkg/command/e2etest/provisioner_test.go b/pkg/command/e2etest/provisioner_test.go new file mode 100644 index 00000000000..914e9ae2dc4 --- /dev/null +++ b/pkg/command/e2etest/provisioner_test.go @@ -0,0 +1,48 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package e2etest + +import ( + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/e2e" +) + +// TestProviderDevOverrides is a test that tofu can execute a 3rd party +// provisioner plugin. +func TestProvisioner(t *testing.T) { + t.Parallel() + + // This test reaches out to registry.opentofu.org to download the + // template and null providers, so it can only run if network access is + // allowed. + skipIfCannotAccessNetwork(t) + + tf := e2e.NewBinary(t, tofuBin, "testdata/provisioner") + + //// INIT + _, stderr, err := tf.Run("init") + if err != nil { + t.Fatalf("unexpected init error: %s\nstderr:\n%s", err, stderr) + } + + //// PLAN + _, stderr, err = tf.Run("plan", "-out=tfplan") + if err != nil { + t.Fatalf("unexpected plan error: %s\nstderr:\n%s", err, stderr) + } + + //// APPLY + stdout, stderr, err := tf.Run("apply", "tfplan") + if err != nil { + t.Fatalf("unexpected apply error: %s\nstderr:\n%s", err, stderr) + } + + if !strings.Contains(stdout, "HelloProvisioner") { + t.Fatalf("missing provisioner output:\n%s", stdout) + } +} diff --git a/pkg/command/e2etest/remote_state_test.go b/pkg/command/e2etest/remote_state_test.go new file mode 100644 index 00000000000..497a7a0fa96 --- /dev/null +++ b/pkg/command/e2etest/remote_state_test.go @@ -0,0 +1,33 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package e2etest + +import ( + "path/filepath" + "testing" + + "github.com/kubegems/opentofu/pkg/e2e" +) + +func TestOpenTofuProviderRead(t *testing.T) { + // Ensure the tofu provider can correctly read a remote state + + t.Parallel() + fixturePath := filepath.Join("testdata", "tf-provider") + tf := e2e.NewBinary(t, tofuBin, fixturePath) + + //// INIT + _, stderr, err := tf.Run("init") + if err != nil { + t.Fatalf("unexpected init error: %s\nstderr:\n%s", err, stderr) + } + + //// PLAN + _, stderr, err = tf.Run("plan") + if err != nil { + t.Fatalf("unexpected plan error: %s\nstderr:\n%s", err, stderr) + } +} diff --git a/pkg/command/e2etest/static_plan_test.go b/pkg/command/e2etest/static_plan_test.go new file mode 100644 index 00000000000..9378652bf52 --- /dev/null +++ b/pkg/command/e2etest/static_plan_test.go @@ -0,0 +1,144 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package e2etest + +import ( + "fmt" + "path/filepath" + "testing" + + "github.com/kubegems/opentofu/pkg/e2e" +) + +// This is an e2e test as it relies on very specific configuration +// within the meta object that is currently very hard to mock out. +func TestStaticPlanVariables(t *testing.T) { + fixtures := []string{ + "static_plan_variables", + "static_plan_typed_variables", + } + for _, fixture := range fixtures { + t.Run(fmt.Sprintf("TestStaticPlanVariables/%s", fixture), func(t *testing.T) { + fixturePath := filepath.Join("testdata", fixture) + tf := e2e.NewBinary(t, tofuBin, fixturePath) + + run := func(args ...string) tofuResult { + stdout, stderr, err := tf.Run(args...) + return tofuResult{t, stdout, stderr, err} + } + + statePath := "custom.tfstate" + stateVar := "-var=state_path=" + statePath + modVar := "-var=src=./mod" + planfile := "static.plan" + + modErr := "module.mod.source depends on var.src which is not available" + backendErr := "backend.local depends on var.state_path which is not available" + + // Init + run("init").Failure().StderrContains(modErr) + run("init", stateVar, modVar).Success() + + // Get + run("get").Failure().StderrContains(modErr) + run("get", stateVar, modVar).Success() + + // Validate + run("validate").Failure().StderrContains(modErr) + run("validate", stateVar, modVar).Success() + + // Providers + run("providers").Failure().StderrContains(modErr) + run("providers", stateVar, modVar).Success() + run("providers", "lock").Failure().StderrContains(modErr) + run("providers", "lock", stateVar, modVar).Success() + run("providers", "mirror", "./tempproviders").Failure().StderrContains(modErr) + run("providers", "mirror", stateVar, modVar, "./tempproviders").Failure().StderrContains("Could not scan the output directory to get package metadata for the JSON") + run("providers", "schema", "-json").Failure().StderrContains(backendErr) + run("providers", "schema", "-json", stateVar, modVar).Success() + + // Check console init (early exits due to stdin setup) + run("console").Failure().StderrContains(backendErr) + run("console", stateVar, modVar).Success() + + // Check graph (without plan) + run("graph").Failure().StderrContains(backendErr) + run("graph", stateVar, modVar).Success() + + // Plan with static variable + run("plan", stateVar, modVar, "-out="+planfile).Success() + + // Show plan without static variable (embedded) + run("show", planfile).Success() + + // Check graph (without plan) + run("graph", "-plan="+planfile).Success() + + // Apply plan without static variable (embedded) + run("apply", planfile).Success() + + // Show State + run("show", statePath).Failure().StderrContains(modErr) + run("show", stateVar, modVar, statePath).Success().Contains(`out = "placeholder"`) + + // Force Unlock + run("force-unlock", "ident").Failure().StderrContains(backendErr) + run("force-unlock", stateVar, modVar, "ident").Failure().StderrContains("Local state cannot be unlocked by another process") + + // Output values + run("output").Failure().StderrContains(backendErr) + run("output", stateVar, modVar).Success().Contains(`out = "placeholder"`) + + // Refresh + run("refresh").Failure().StderrContains(backendErr) + run("refresh", stateVar, modVar).Success().Contains("There are currently no remote objects tracked in the state") + + // Import + run("import", "resource.addr", "id").Failure().StderrContains(modErr) + run("import", stateVar, modVar, "resource.addr", "id").Failure().StderrContains("Before importing this resource, please create its configuration in the root module.") + + // Taint + run("taint", "resource.addr").Failure().StderrContains(modErr) + run("taint", stateVar, modVar, "resource.addr").Failure().StderrContains("There is no resource instance in the state with the address resource.addr.") + run("untaint", "resource.addr").Failure().StderrContains(backendErr) + run("untaint", stateVar, modVar, "resource.addr").Failure().StderrContains("There is no resource instance in the state with the address resource.addr.") + + // State + run("state", "list").Failure().StderrContains(backendErr) + run("state", "list", stateVar, modVar).Success() + run("state", "mv", "foo.bar", "foo.baz").Failure().StderrContains(modErr) + run("state", "mv", stateVar, modVar, "foo.bar", "foo.baz").Failure().StderrContains("Cannot move foo.bar: does not match anything in the current state.") + run("state", "pull").Failure().StderrContains(modErr) + run("state", "pull", stateVar, modVar).Success().Contains(`"outputs":{"out":{"value":"placeholder","type":"string"}}`) + run("state", "push", statePath).Failure().StderrContains(modErr) + run("state", "push", stateVar, modVar, statePath).Success() + run("state", "replace-provider", "foo", "bar").Failure().StderrContains(modErr) + run("state", "replace-provider", stateVar, modVar, "foo", "bar").Success().Contains("No matching resources found.") + run("state", "rm", "foo.bar").Failure().StderrContains(modErr) + run("state", "rm", stateVar, modVar, "foo.bar").Failure().StderrContains("No matching objects found.") + run("state", "show", "out").Failure().StderrContains(backendErr) + run("state", "show", stateVar, modVar, "invalid.resource").Failure().StderrContains("No instance found for the given address!") + + // Workspace + run("workspace", "list").Failure().StderrContains(backendErr) + run("workspace", "list", stateVar, modVar).Success().Contains(`default`) + run("workspace", "new", "foo").Failure().StderrContains(backendErr) + run("workspace", "new", stateVar, modVar, "foo").Success().Contains(`foo`) + run("workspace", "select", "default").Failure().StderrContains(backendErr) + run("workspace", "select", stateVar, modVar, "default").Success().Contains(`default`) + run("workspace", "delete", "foo").Failure().StderrContains(backendErr) + run("workspace", "delete", stateVar, modVar, "foo").Success().Contains(`foo`) + + // Test + run("test").Failure().StderrContains(modErr) + run("test", stateVar, modVar).Success().Contains(`Success!`) + + // Destroy + run("destroy", "-auto-approve").Failure().StderrContains(backendErr) + run("destroy", stateVar, modVar, "-auto-approve").Success().Contains("You can apply this plan to save these new output values") + }) + } +} diff --git a/pkg/command/e2etest/strip_ansi.go b/pkg/command/e2etest/strip_ansi.go new file mode 100644 index 00000000000..0d273478eb5 --- /dev/null +++ b/pkg/command/e2etest/strip_ansi.go @@ -0,0 +1,18 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package e2etest + +import ( + "regexp" +) + +const ansi = "[\u001B\u009B][[\\]()#;?]*(?:(?:(?:[a-zA-Z\\d]*(?:;[a-zA-Z\\d]*)*)?\u0007)|(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PRZcf-ntqry=><~]))" + +var ansiRe = regexp.MustCompile(ansi) + +func stripAnsi(str string) string { + return ansiRe.ReplaceAllString(str, "") +} diff --git a/pkg/command/e2etest/test_test.go b/pkg/command/e2etest/test_test.go new file mode 100644 index 00000000000..ea64b553fea --- /dev/null +++ b/pkg/command/e2etest/test_test.go @@ -0,0 +1,82 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package e2etest + +import ( + "path/filepath" + "strings" + "testing" + "time" + + "github.com/kubegems/opentofu/pkg/e2e" +) + +func TestMultipleRunBlocks(t *testing.T) { + timeout := time.After(5 * time.Second) + type testResult struct { + stdout string + stderr string + err error + } + done := make(chan *testResult) + + go func() { + fixturePath := filepath.Join("testdata", "multiple-run-blocks") + tf := e2e.NewBinary(t, tofuBin, fixturePath) + stdout, stderr, err := tf.Run("test") + done <- &testResult{ + stdout: stdout, + stderr: stderr, + err: err, + } + }() + + select { + case <-timeout: + t.Fatal("timed out") + case result := <-done: + if result.err != nil { + t.Errorf("unexpected error: %s", result.err) + } + + if result.stderr != "" { + t.Errorf("unexpected stderr output:\n%s", result.stderr) + } + + if !strings.Contains(result.stdout, "30 passed") { + t.Errorf("success message is missing from output:\n%s", result.stdout) + } + } +} + +func TestMocksAndOverrides(t *testing.T) { + // This test fetches providers from registry. + skipIfCannotAccessNetwork(t) + + tf := e2e.NewBinary(t, tofuBin, filepath.Join("testdata", "overrides-in-tests")) + + stdout, stderr, err := tf.Run("init") + if err != nil { + t.Errorf("unexpected error on 'init': %v", err) + } + if stderr != "" { + t.Errorf("unexpected stderr output on 'init':\n%s", stderr) + } + if stdout == "" { + t.Errorf("expected some output on 'init', got nothing") + } + + stdout, stderr, err = tf.Run("test") + if err != nil { + t.Errorf("unexpected error on 'test': %v", err) + } + if stderr != "" { + t.Errorf("unexpected stderr output on 'test':\n%s", stderr) + } + if !strings.Contains(stdout, "13 passed, 0 failed") { + t.Errorf("output doesn't have expected success string:\n%s", stdout) + } +} diff --git a/pkg/command/e2etest/testdata/chdir-option/subdir/main.tf b/pkg/command/e2etest/testdata/chdir-option/subdir/main.tf new file mode 100644 index 00000000000..eddb20049c8 --- /dev/null +++ b/pkg/command/e2etest/testdata/chdir-option/subdir/main.tf @@ -0,0 +1,7 @@ +output "cwd" { + value = path.cwd +} + +output "root" { + value = abspath(path.root) +} diff --git a/pkg/command/e2etest/testdata/custom-provider-install-method/cliconfig.tfrc b/pkg/command/e2etest/testdata/custom-provider-install-method/cliconfig.tfrc new file mode 100644 index 00000000000..4b4dbefa7f9 --- /dev/null +++ b/pkg/command/e2etest/testdata/custom-provider-install-method/cliconfig.tfrc @@ -0,0 +1,8 @@ +provider_installation { + filesystem_mirror { + path = "./fs-mirror" + } + direct { + exclude = ["example.com/*/*"] + } +} diff --git a/pkg/command/e2etest/testdata/custom-provider-install-method/cliconfig.tfrc.json b/pkg/command/e2etest/testdata/custom-provider-install-method/cliconfig.tfrc.json new file mode 100644 index 00000000000..6e5e946b8eb --- /dev/null +++ b/pkg/command/e2etest/testdata/custom-provider-install-method/cliconfig.tfrc.json @@ -0,0 +1,9 @@ +{ + "provider_installation": { + "filesystem_mirror": [ + { + "path": "./fs-mirror" + } + ] + } +} diff --git a/pkg/command/e2etest/testdata/custom-provider-install-method/fs-mirror/example.com/awesomecorp/happycloud/1.2.0/os_arch/terraform-provider-happycloud_v1.2.0 b/pkg/command/e2etest/testdata/custom-provider-install-method/fs-mirror/example.com/awesomecorp/happycloud/1.2.0/os_arch/terraform-provider-happycloud_v1.2.0 new file mode 100644 index 00000000000..3299bec8ad8 --- /dev/null +++ b/pkg/command/e2etest/testdata/custom-provider-install-method/fs-mirror/example.com/awesomecorp/happycloud/1.2.0/os_arch/terraform-provider-happycloud_v1.2.0 @@ -0,0 +1,2 @@ +This is not a real plugin executable. It's just here to be discovered by the +provider installation process. diff --git a/pkg/command/e2etest/testdata/custom-provider-install-method/main.tf b/pkg/command/e2etest/testdata/custom-provider-install-method/main.tf new file mode 100644 index 00000000000..a521cf07bc2 --- /dev/null +++ b/pkg/command/e2etest/testdata/custom-provider-install-method/main.tf @@ -0,0 +1,21 @@ +# The purpose of this test is to refer to a provider whose address contains +# a hostname that is only used for namespacing purposes and doesn't actually +# have a provider registry deployed at it. +# +# A user can install such a provider in one of the implied local filesystem +# directories and Terraform should accept that as the selection for that +# provider without producing any errors about the fact that example.com +# does not have a provider registry. +# +# For this test in particular we're using the "vendor" directory that is +# the documented way to include provider plugins directly inside a +# configuration uploaded to Terraform Cloud, but this functionality applies +# to all of the implicit local filesystem search directories. + +terraform { + required_providers { + happycloud = { + source = "example.com/awesomecorp/happycloud" + } + } +} diff --git a/pkg/command/e2etest/testdata/empty/.exists b/pkg/command/e2etest/testdata/empty/.exists new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/command/e2etest/testdata/encryption-flow/broken.tf.disabled b/pkg/command/e2etest/testdata/encryption-flow/broken.tf.disabled new file mode 100644 index 00000000000..46176db92dd --- /dev/null +++ b/pkg/command/e2etest/testdata/encryption-flow/broken.tf.disabled @@ -0,0 +1,29 @@ +variable "passphrase" { + type = string + default = "aaaaaaaa-83f1-47ec-9b2d-2aebf6417167" +} + +locals { + key_length = 32 +} + +terraform { + encryption { + key_provider "pbkdf2" "basic" { + passphrase = var.passphrase + key_length = local.key_length + iterations = 200000 + hash_function = "sha512" + salt_length = 12 + } + method "aes_gcm" "example" { + keys = key_provider.pbkdf2.basic + } + state { + method = method.aes_gcm.example + } + plan { + method = method.aes_gcm.example + } + } +} diff --git a/pkg/command/e2etest/testdata/encryption-flow/main.tf b/pkg/command/e2etest/testdata/encryption-flow/main.tf new file mode 100644 index 00000000000..af9341cc1b2 --- /dev/null +++ b/pkg/command/e2etest/testdata/encryption-flow/main.tf @@ -0,0 +1,7 @@ +variable "iter" { + type = string +} + +resource "tfcoremock_simple_resource" "simple" { + string = "helloworld ${var.iter}" +} diff --git a/pkg/command/e2etest/testdata/encryption-flow/migratefrom.tf.disabled b/pkg/command/e2etest/testdata/encryption-flow/migratefrom.tf.disabled new file mode 100644 index 00000000000..d16f50bef81 --- /dev/null +++ b/pkg/command/e2etest/testdata/encryption-flow/migratefrom.tf.disabled @@ -0,0 +1,27 @@ +terraform { + encryption { + key_provider "pbkdf2" "basic" { + passphrase = "26281afb-83f1-47ec-9b2d-2aebf6417167" + key_length = 32 + iterations = 200000 + hash_function = "sha512" + salt_length = 12 + } + method "aes_gcm" "example" { + keys = key_provider.pbkdf2.basic + } + method "unencrypted" "fallback" {} + state { + method = method.unencrypted.fallback + fallback { + method = method.aes_gcm.example + } + } + plan { + method = method.unencrypted.fallback + fallback { + method = method.aes_gcm.example + } + } + } +} diff --git a/pkg/command/e2etest/testdata/encryption-flow/migrateto.tf.disabled b/pkg/command/e2etest/testdata/encryption-flow/migrateto.tf.disabled new file mode 100644 index 00000000000..a5d0e841df8 --- /dev/null +++ b/pkg/command/e2etest/testdata/encryption-flow/migrateto.tf.disabled @@ -0,0 +1,27 @@ +terraform { + encryption { + key_provider "pbkdf2" "basic" { + passphrase = "26281afb-83f1-47ec-9b2d-2aebf6417167" + key_length = 32 + iterations = 200000 + hash_function = "sha512" + salt_length = 12 + } + method "aes_gcm" "example" { + keys = key_provider.pbkdf2.basic + } + method "unencrypted" "fallback" {} + state { + method = method.aes_gcm.example + fallback { + method = method.unencrypted.fallback + } + } + plan { + method = method.aes_gcm.example + fallback { + method = method.unencrypted.fallback + } + } + } +} diff --git a/pkg/command/e2etest/testdata/encryption-flow/required.tf.disabled b/pkg/command/e2etest/testdata/encryption-flow/required.tf.disabled new file mode 100644 index 00000000000..d036c553f28 --- /dev/null +++ b/pkg/command/e2etest/testdata/encryption-flow/required.tf.disabled @@ -0,0 +1,20 @@ +terraform { + encryption { + key_provider "pbkdf2" "basic" { + passphrase = "26281afb-83f1-47ec-9b2d-2aebf6417167" + key_length = 32 + iterations = 200000 + hash_function = "sha512" + salt_length = 12 + } + method "aes_gcm" "example" { + keys = key_provider.pbkdf2.basic + } + state { + method = method.aes_gcm.example + } + plan { + method = method.aes_gcm.example + } + } +} diff --git a/pkg/command/e2etest/testdata/full-workflow-null/main.tf b/pkg/command/e2etest/testdata/full-workflow-null/main.tf new file mode 100644 index 00000000000..1c3fc36e10b --- /dev/null +++ b/pkg/command/e2etest/testdata/full-workflow-null/main.tf @@ -0,0 +1,22 @@ + +variable "name" { + default = "world" +} + +data "template_file" "test" { + template = "Hello, $${name}" + + vars = { + name = "${var.name}" + } +} + +resource "null_resource" "test" { + triggers = { + greeting = "${data.template_file.test.rendered}" + } +} + +output "greeting" { + value = "${null_resource.test.triggers["greeting"]}" +} diff --git a/pkg/command/e2etest/testdata/functions-error/main.tf b/pkg/command/e2etest/testdata/functions-error/main.tf new file mode 100644 index 00000000000..edad8ecafd2 --- /dev/null +++ b/pkg/command/e2etest/testdata/functions-error/main.tf @@ -0,0 +1,12 @@ +terraform { + required_providers { + example = { + source = "opentofu/testfunctions" + version = "1.0.0" + } + } +} + +output "dummy" { + value = provider::example::error() +} diff --git a/pkg/command/e2etest/testdata/functions/main.tf b/pkg/command/e2etest/testdata/functions/main.tf new file mode 100644 index 00000000000..21da4e66f83 --- /dev/null +++ b/pkg/command/e2etest/testdata/functions/main.tf @@ -0,0 +1,21 @@ +terraform { + required_providers { + example = { + source = "opentofu/testfunctions" + version = "1.0.0" + } + } +} + +variable "number" { + type = number + default = 1 + validation { + condition = provider::example::echo(var.number) > 0 + error_message = "number must be > ${provider::example::echo(0)}" + } +} + +output "dummy" { + value = provider::example::echo("Hello Functions") +} diff --git a/pkg/command/e2etest/testdata/local-only-provider/main.tf b/pkg/command/e2etest/testdata/local-only-provider/main.tf new file mode 100644 index 00000000000..a521cf07bc2 --- /dev/null +++ b/pkg/command/e2etest/testdata/local-only-provider/main.tf @@ -0,0 +1,21 @@ +# The purpose of this test is to refer to a provider whose address contains +# a hostname that is only used for namespacing purposes and doesn't actually +# have a provider registry deployed at it. +# +# A user can install such a provider in one of the implied local filesystem +# directories and Terraform should accept that as the selection for that +# provider without producing any errors about the fact that example.com +# does not have a provider registry. +# +# For this test in particular we're using the "vendor" directory that is +# the documented way to include provider plugins directly inside a +# configuration uploaded to Terraform Cloud, but this functionality applies +# to all of the implicit local filesystem search directories. + +terraform { + required_providers { + happycloud = { + source = "example.com/awesomecorp/happycloud" + } + } +} diff --git a/pkg/command/e2etest/testdata/local-only-provider/terraform.d/plugins/example.com/awesomecorp/happycloud/1.2.0/os_arch/terraform-provider-happycloud_v1.2.0 b/pkg/command/e2etest/testdata/local-only-provider/terraform.d/plugins/example.com/awesomecorp/happycloud/1.2.0/os_arch/terraform-provider-happycloud_v1.2.0 new file mode 100644 index 00000000000..3299bec8ad8 --- /dev/null +++ b/pkg/command/e2etest/testdata/local-only-provider/terraform.d/plugins/example.com/awesomecorp/happycloud/1.2.0/os_arch/terraform-provider-happycloud_v1.2.0 @@ -0,0 +1,2 @@ +This is not a real plugin executable. It's just here to be discovered by the +provider installation process. diff --git a/pkg/command/e2etest/testdata/module-archive/main.tf b/pkg/command/e2etest/testdata/module-archive/main.tf new file mode 100644 index 00000000000..8101c8094ee --- /dev/null +++ b/pkg/command/e2etest/testdata/module-archive/main.tf @@ -0,0 +1,5 @@ +// this should be able to unpack the tarball and change the module directory to +// the archive directory regardless of its name. +module "bucket" { + source = "https://github.com/terraform-aws-modules/terraform-aws-s3-bucket/archive/v3.3.0.tar.gz//*?archive=tar.gz" +} diff --git a/pkg/command/e2etest/testdata/multiple-run-blocks/main.tf b/pkg/command/e2etest/testdata/multiple-run-blocks/main.tf new file mode 100644 index 00000000000..585f9075d13 --- /dev/null +++ b/pkg/command/e2etest/testdata/multiple-run-blocks/main.tf @@ -0,0 +1,8 @@ +resource "terraform_data" "provision" { + connection { + host = "localhost" + } + provisioner "remote-exec" { + inline = ["echo test"] + } +} diff --git a/pkg/command/e2etest/testdata/multiple-run-blocks/main.tftest.hcl b/pkg/command/e2etest/testdata/multiple-run-blocks/main.tftest.hcl new file mode 100644 index 00000000000..de9605bc0b3 --- /dev/null +++ b/pkg/command/e2etest/testdata/multiple-run-blocks/main.tftest.hcl @@ -0,0 +1,30 @@ +run "test_01" { command = plan } +run "test_02" { command = plan } +run "test_03" { command = plan } +run "test_04" { command = plan } +run "test_05" { command = plan } +run "test_06" { command = plan } +run "test_07" { command = plan } +run "test_08" { command = plan } +run "test_09" { command = plan } +run "test_10" { command = plan } +run "test_11" { command = plan } +run "test_12" { command = plan } +run "test_13" { command = plan } +run "test_14" { command = plan } +run "test_15" { command = plan } +run "test_16" { command = plan } +run "test_17" { command = plan } +run "test_18" { command = plan } +run "test_19" { command = plan } +run "test_20" { command = plan } +run "test_21" { command = plan } +run "test_22" { command = plan } +run "test_23" { command = plan } +run "test_24" { command = plan } +run "test_25" { command = plan } +run "test_26" { command = plan } +run "test_27" { command = plan } +run "test_28" { command = plan } +run "test_29" { command = plan } +run "test_30" { command = plan } diff --git a/pkg/command/e2etest/testdata/overrides-in-tests/first/main.tf b/pkg/command/e2etest/testdata/overrides-in-tests/first/main.tf new file mode 100644 index 00000000000..69e57bd2404 --- /dev/null +++ b/pkg/command/e2etest/testdata/overrides-in-tests/first/main.tf @@ -0,0 +1,13 @@ +resource "local_file" "dont_create_me" { + filename = "${path.module}/dont_create_me.txt" + content = "101" +} + +resource "local_file" "create_me" { + filename = "${path.module}/create_me.txt" + content = "101" +} + +output "create_me_filename" { + value = "main.tf" +} diff --git a/pkg/command/e2etest/testdata/overrides-in-tests/main.tf b/pkg/command/e2etest/testdata/overrides-in-tests/main.tf new file mode 100644 index 00000000000..84b6d9ff404 --- /dev/null +++ b/pkg/command/e2etest/testdata/overrides-in-tests/main.tf @@ -0,0 +1,100 @@ +module "first" { + source = "./first" +} + +module "second" { + source = "./second" +} + +resource "local_file" "dont_create_me" { + filename = "${path.module}/dont_create_me.txt" + content = "101" +} + +resource "local_file" "create_me" { + filename = "${path.module}/create_me.txt" + content = "101" +} + +data "local_file" "second_mod_file" { + filename = module.first.create_me_filename +} + +resource "random_integer" "count" { + count = 2 + + min = 1 + max = 10 +} + +resource "random_integer" "for_each" { + for_each = { + "a": { + "min": 1 + "max": 10 + } + "b": { + "min": 20 + "max": 30 + } + } + + min = each.value.min + max = each.value.max +} + +module "rand_for_each" { + for_each = { + "a": 1 + "b": 2 + } + + source = "./rand" +} + +module "rand_count" { + count = 2 + + source = "./rand" +} + +resource "aws_s3_bucket" "test" { + bucket = "must not be used anyway" +} + +data "aws_s3_bucket" "test" { + bucket = "must not be used anyway" +} + +provider "local" { + alias = "aliased" +} + +resource "local_file" "mocked" { + provider = local.aliased + filename = "mocked.txt" + content = "I am mocked file, do not create me please" +} + +data "local_file" "maintf" { + provider = local.aliased + filename = "main.tf" +} + +resource "random_pet" "cat" {} + +provider random { + alias = "aliased" +} + +resource "random_integer" "aliased" { + provider = random.aliased + + # helps create a new value when test with mocked pet runs + keepers = { + pet = random_pet.cat.id + } + + min = 1 + max = 10 +} diff --git a/pkg/command/e2etest/testdata/overrides-in-tests/main.tftest.hcl b/pkg/command/e2etest/testdata/overrides-in-tests/main.tftest.hcl new file mode 100644 index 00000000000..13ff33d5308 --- /dev/null +++ b/pkg/command/e2etest/testdata/overrides-in-tests/main.tftest.hcl @@ -0,0 +1,311 @@ +override_module { + target = module.second +} + +override_resource { + target = local_file.dont_create_me +} + +override_resource { + target = module.first.local_file.dont_create_me +} + +run "check_root_overridden_res" { + assert { + condition = !fileexists("${path.module}/dont_create_me.txt") + error_message = "File 'dont_create_me.txt' must not be created in the root module" + } +} + +run "check_root_overridden_res_twice" { + override_resource { + target = local_file.dont_create_me + values = { + file_permission = "0333" + } + } + + assert { + condition = !fileexists("${path.module}/dont_create_me.txt") && local_file.dont_create_me.file_permission == "0333" + error_message = "File 'dont_create_me.txt' must not be created in the root module and its file_permission must be overridden" + } +} + +run "check_root_data" { + assert { + condition = data.local_file.second_mod_file.content == file("main.tf") + error_message = "Content from the local_file data in the root module must be from real file" + } +} + +run "check_root_overridden_data" { + override_data { + target = data.local_file.second_mod_file + values = { + content = "101" + } + } + + assert { + condition = data.local_file.second_mod_file.content == "101" + error_message = "Content from the local_file data in the root module must be overridden" + } +} + +run "check_overridden_module_output" { + override_module { + target = module.first + outputs = { + create_me_filename = "main.tftest.hcl" + } + } + + assert { + condition = data.local_file.second_mod_file.content == file("main.tftest.hcl") + error_message = "Overridden module output is not used in the depending data resource" + } +} + +run "check_first_module" { + assert { + condition = fileexists("${path.module}/first/create_me.txt") + error_message = "File 'create_me.txt' must be created in the first module" + } +} + +run "check_first_module_overridden_res" { + assert { + condition = !fileexists("${path.module}/first/dont_create_me.txt") + error_message = "File 'dont_create_me.txt' must not be created in the first module" + } +} + +run "check_second_module" { + assert { + condition = !fileexists("${path.module}/second/dont_create_me.txt") + error_message = "File 'dont_create_me.txt' must not be created in the second module" + } +} + +run "check_third_module" { + assert { + condition = !fileexists("${path.module}/second/third/dont_create_me.txt") + error_message = "File 'dont_create_me.txt' must not be created in the third module" + } +} + +override_resource { + target = random_integer.count +} + +override_resource { + target = random_integer.for_each +} + +override_module { + target = module.rand_count +} + +override_module { + target = module.rand_for_each +} + +run "check_for_each_n_count_mocked" { + assert { + condition = random_integer.count[0].result == 0 + error_message = "Mocked random integer should be 0" + } + + assert { + condition = random_integer.count[1].result == 0 + error_message = "Mocked random integer should be 0" + } + + assert { + condition = random_integer.for_each["a"].result == 0 + error_message = "Mocked random integer should be 0" + } + + assert { + condition = random_integer.for_each["b"].result == 0 + error_message = "Mocked random integer should be 0" + } + + assert { + condition = module.rand_count[0].random_integer == null + error_message = "Mocked random integer should be null" + } + + assert { + condition = module.rand_count[1].random_integer == null + error_message = "Mocked random integer should be null" + } + + assert { + condition = module.rand_for_each["a"].random_integer == null + error_message = "Mocked random integer should be null" + } + + assert { + condition = module.rand_for_each["b"].random_integer == null + error_message = "Mocked random integer should be null" + } +} + +run "check_for_each_n_count_overridden" { + override_resource { + target = random_integer.count + values = { + result = 101 + } + } + + assert { + condition = random_integer.count[0].result == 101 + error_message = "Overridden random integer should be 101" + } + + assert { + condition = random_integer.count[1].result == 101 + error_message = "Overridden random integer should be 101" + } + + override_resource { + target = random_integer.for_each + values = { + result = 101 + } + } + + assert { + condition = random_integer.for_each["a"].result == 101 + error_message = "Overridden random integer should be 101" + } + + assert { + condition = random_integer.for_each["b"].result == 101 + error_message = "Overridden random integer should be 101" + } + + override_module { + target = module.rand_count + outputs = { + random_integer = 101 + } + } + + assert { + condition = module.rand_count[0].random_integer == 101 + error_message = "Mocked random integer should be 101" + } + + assert { + condition = module.rand_count[1].random_integer == 101 + error_message = "Mocked random integer should be 101" + } + + override_module { + target = module.rand_for_each + outputs = { + random_integer = 101 + } + } + + assert { + condition = module.rand_for_each["a"].random_integer == 101 + error_message = "Mocked random integer should be 101" + } + + assert { + condition = module.rand_for_each["b"].random_integer == 101 + error_message = "Mocked random integer should be 101" + } +} + +# ensures non-aliased provider is mocked by default +mock_provider "aws" { + mock_resource "aws_s3_bucket" { + defaults = { + arn = "arn:aws:s3:::mocked" + } + } + + mock_data "aws_s3_bucket" { + defaults = { + bucket_domain_name = "mocked.com" + } + } +} + +# ensures non-aliased provider works as intended +# and aliased one is mocked +mock_provider "local" { + alias = "aliased" +} + +# ensures we can use this provider in run's providers block +# to use mocked one only for a specific test +mock_provider "random" { + alias = "for_pets" + + mock_resource "random_pet" { + defaults = { + id = "my lovely cat" + } + } +} + +mock_provider "random" { + alias = "aliased" + + mock_resource "random_integer" { + defaults = { + id = "11" + } + } +} + +run "check_mock_providers" { + assert { + condition = resource.aws_s3_bucket.test.arn == "arn:aws:s3:::mocked" + error_message = "aws s3 bucket resource doesn't have mocked values" + } + + assert { + condition = data.aws_s3_bucket.test.bucket_domain_name == "mocked.com" + error_message = "aws s3 bucket data doesn't have mocked values" + } + + assert { + condition = !fileexists(local_file.mocked.filename) + error_message = "file should not be created due to provider being mocked" + } + + assert { + condition = data.local_file.maintf.content != file("main.tf") + error_message = "file should not be read due to provider being mocked" + } + + assert { + condition = resource.random_integer.aliased.id == "11" + error_message = "random integer should be 11 due to provider being mocked" + } +} + +run "check_providers_block" { + providers = { + aws = aws + local.aliased = local.aliased + random = random.for_pets + } + + assert { + condition = resource.random_pet.cat.id == "my lovely cat" + error_message = "providers block in run should allow replacing real providers by mocked" + } + + assert { + condition = resource.random_integer.aliased.id != "11" + error_message = "random integer should not be mocked if providers block present" + } +} diff --git a/pkg/command/e2etest/testdata/overrides-in-tests/rand/main.tf b/pkg/command/e2etest/testdata/overrides-in-tests/rand/main.tf new file mode 100644 index 00000000000..eb813649185 --- /dev/null +++ b/pkg/command/e2etest/testdata/overrides-in-tests/rand/main.tf @@ -0,0 +1,8 @@ +resource "random_integer" "main" { + min = 1 + max = 20 +} + +output "random_integer" { + value = random_integer.main.id +} diff --git a/pkg/command/e2etest/testdata/overrides-in-tests/second/main.tf b/pkg/command/e2etest/testdata/overrides-in-tests/second/main.tf new file mode 100644 index 00000000000..82c3f2c1184 --- /dev/null +++ b/pkg/command/e2etest/testdata/overrides-in-tests/second/main.tf @@ -0,0 +1,8 @@ +module "third" { + source = "./third" +} + +resource "local_file" "dont_create_me" { + filename = "${path.module}/dont_create_me.txt" + content = "101" +} diff --git a/pkg/command/e2etest/testdata/overrides-in-tests/second/third/main.tf b/pkg/command/e2etest/testdata/overrides-in-tests/second/third/main.tf new file mode 100644 index 00000000000..2d0eee840da --- /dev/null +++ b/pkg/command/e2etest/testdata/overrides-in-tests/second/third/main.tf @@ -0,0 +1,4 @@ +resource "local_file" "dont_create_me" { + filename = "${path.module}/dont_create_me.txt" + content = "101" +} diff --git a/pkg/command/e2etest/testdata/plugin-cache/.terraform.lock.hcl b/pkg/command/e2etest/testdata/plugin-cache/.terraform.lock.hcl new file mode 100644 index 00000000000..caf2b06be69 --- /dev/null +++ b/pkg/command/e2etest/testdata/plugin-cache/.terraform.lock.hcl @@ -0,0 +1,14 @@ +# The global cache is only an eligible installation source if there's already +# a lock entry for the given provider and it contains at least one checksum +# that matches the cache entry. +# +# This lock file therefore matches the "not a real provider" fake executable +# under the "cache" directory, rather than the real provider from upstream, +# so that Terraform CLI will consider the cache entry as valid. + +provider "registry.opentofu.org/hashicorp/template" { + version = "2.1.0" + hashes = [ + "h1:e7YvVlRZlaZJ8ED5KnH0dAg0kPL0nAU7eEoCAZ/sOos=", + ] +} diff --git a/pkg/command/e2etest/testdata/plugin-cache/cache/registry.opentofu.org/hashicorp/template/2.1.0/os_arch/terraform-provider-template_v2.1.0_x4 b/pkg/command/e2etest/testdata/plugin-cache/cache/registry.opentofu.org/hashicorp/template/2.1.0/os_arch/terraform-provider-template_v2.1.0_x4 new file mode 100644 index 00000000000..c92a59ab2fb --- /dev/null +++ b/pkg/command/e2etest/testdata/plugin-cache/cache/registry.opentofu.org/hashicorp/template/2.1.0/os_arch/terraform-provider-template_v2.1.0_x4 @@ -0,0 +1 @@ +this is not a real plugin diff --git a/pkg/command/e2etest/testdata/plugin-cache/main.tf b/pkg/command/e2etest/testdata/plugin-cache/main.tf new file mode 100644 index 00000000000..f52d482944a --- /dev/null +++ b/pkg/command/e2etest/testdata/plugin-cache/main.tf @@ -0,0 +1,7 @@ +provider "template" { + version = "2.1.0" +} + +provider "null" { + version = "2.1.0" +} diff --git a/pkg/command/e2etest/testdata/provider-dev-override/pkgdir/.exists b/pkg/command/e2etest/testdata/provider-dev-override/pkgdir/.exists new file mode 100644 index 00000000000..052e1ad06c5 --- /dev/null +++ b/pkg/command/e2etest/testdata/provider-dev-override/pkgdir/.exists @@ -0,0 +1 @@ +This is where the test will place the temporary build of the test provider. diff --git a/pkg/command/e2etest/testdata/provider-dev-override/provider-dev-override.tf b/pkg/command/e2etest/testdata/provider-dev-override/provider-dev-override.tf new file mode 100644 index 00000000000..9c629f72275 --- /dev/null +++ b/pkg/command/e2etest/testdata/provider-dev-override/provider-dev-override.tf @@ -0,0 +1,11 @@ +terraform { + required_providers { + simple = { + source = "example.com/test/test" + version = "2.0.0" + } + } +} + +data "simple_resource" "test" { +} diff --git a/pkg/command/e2etest/testdata/provider-not-found-non-default/main.tf b/pkg/command/e2etest/testdata/provider-not-found-non-default/main.tf new file mode 100644 index 00000000000..fe5112730c2 --- /dev/null +++ b/pkg/command/e2etest/testdata/provider-not-found-non-default/main.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + nonexist = { + source = "teamterraform/nonexist" + } + } +} diff --git a/pkg/command/e2etest/testdata/provider-not-found/main.tf b/pkg/command/e2etest/testdata/provider-not-found/main.tf new file mode 100644 index 00000000000..781f0067736 --- /dev/null +++ b/pkg/command/e2etest/testdata/provider-not-found/main.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + nonexist = { + source = "registry.opentofu.org/hashicorp/nonexist" + } + } +} diff --git a/pkg/command/e2etest/testdata/provider-plugin/main.tf b/pkg/command/e2etest/testdata/provider-plugin/main.tf new file mode 100644 index 00000000000..1a8f5238112 --- /dev/null +++ b/pkg/command/e2etest/testdata/provider-plugin/main.tf @@ -0,0 +1,20 @@ +// the provider-plugin tests uses the -plugin-cache flag so terraform pulls the +// test binaries instead of reaching out to the registry. +terraform { + required_providers { + simple5 = { + source = "registry.opentofu.org/hashicorp/simple" + } + simple6 = { + source = "registry.opentofu.org/hashicorp/simple6" + } + } +} + +resource "simple_resource" "test-proto5" { + provider = simple5 +} + +resource "simple_resource" "test-proto6" { + provider = simple6 +} diff --git a/pkg/command/e2etest/testdata/provider-tampering-base/provider-tampering-base.tf b/pkg/command/e2etest/testdata/provider-tampering-base/provider-tampering-base.tf new file mode 100644 index 00000000000..87bd9ac2005 --- /dev/null +++ b/pkg/command/e2etest/testdata/provider-tampering-base/provider-tampering-base.tf @@ -0,0 +1,12 @@ +terraform { + required_providers { + null = { + # Our version is intentionally fixed so that we have a fixed + # test case here, though we might have to update this in future + # if e.g. Terraform stops supporting plugin protocol 5, or if + # the null provider is yanked from the registry for some reason. + source = "hashicorp/null" + version = "3.1.0" + } + } +} diff --git a/pkg/command/e2etest/testdata/provider-warnings/main.tf b/pkg/command/e2etest/testdata/provider-warnings/main.tf new file mode 100644 index 00000000000..4300f04f861 --- /dev/null +++ b/pkg/command/e2etest/testdata/provider-warnings/main.tf @@ -0,0 +1,12 @@ +terraform { + required_providers { + terraform = { + // hashicorp/terraform is published in the registry, but it is + // archived (since it is internal) and returns a warning: + // + // "This provider is archived and no longer needed. The terraform_remote_state + // data source is built into the latest Terraform release." + source = "hashicorp/terraform" + } + } +} diff --git a/pkg/command/e2etest/testdata/provisioner-plugin/main.tf b/pkg/command/e2etest/testdata/provisioner-plugin/main.tf new file mode 100644 index 00000000000..8e6268b9617 --- /dev/null +++ b/pkg/command/e2etest/testdata/provisioner-plugin/main.tf @@ -0,0 +1,5 @@ +resource "null_resource" "a" { + provisioner "test" { + command = "echo HelloProvisioner" + } +} diff --git a/pkg/command/e2etest/testdata/provisioner/main.tf b/pkg/command/e2etest/testdata/provisioner/main.tf new file mode 100644 index 00000000000..c37ad380be4 --- /dev/null +++ b/pkg/command/e2etest/testdata/provisioner/main.tf @@ -0,0 +1,5 @@ +resource "null_resource" "a" { + provisioner "local-exec" { + command = "echo HelloProvisioner" + } +} diff --git a/pkg/command/e2etest/testdata/static_plan_typed_variables/main.tf b/pkg/command/e2etest/testdata/static_plan_typed_variables/main.tf new file mode 100644 index 00000000000..8e81a1a767c --- /dev/null +++ b/pkg/command/e2etest/testdata/static_plan_typed_variables/main.tf @@ -0,0 +1,19 @@ +variable "state_path" {} + +variable "src" { + type = string +} + +terraform { + backend "local" { + path = var.state_path + } +} + +module "mod" { + source = var.src +} + +output "out" { + value = module.mod.out +} diff --git a/pkg/command/e2etest/testdata/static_plan_typed_variables/mod/mod.tf b/pkg/command/e2etest/testdata/static_plan_typed_variables/mod/mod.tf new file mode 100644 index 00000000000..95473f39dab --- /dev/null +++ b/pkg/command/e2etest/testdata/static_plan_typed_variables/mod/mod.tf @@ -0,0 +1,3 @@ +output "out" { + value = "placeholder" +} diff --git a/pkg/command/e2etest/testdata/static_plan_variables/main.tf b/pkg/command/e2etest/testdata/static_plan_variables/main.tf new file mode 100644 index 00000000000..5f66a8de65a --- /dev/null +++ b/pkg/command/e2etest/testdata/static_plan_variables/main.tf @@ -0,0 +1,17 @@ +variable "state_path" {} + +variable "src" {} + +terraform { + backend "local" { + path = var.state_path + } +} + +module "mod" { + source = var.src +} + +output "out" { + value = module.mod.out +} diff --git a/pkg/command/e2etest/testdata/static_plan_variables/mod/mod.tf b/pkg/command/e2etest/testdata/static_plan_variables/mod/mod.tf new file mode 100644 index 00000000000..95473f39dab --- /dev/null +++ b/pkg/command/e2etest/testdata/static_plan_variables/mod/mod.tf @@ -0,0 +1,3 @@ +output "out" { + value = "placeholder" +} diff --git a/pkg/command/e2etest/testdata/template-provider/main.tf b/pkg/command/e2etest/testdata/template-provider/main.tf new file mode 100644 index 00000000000..31af45150a7 --- /dev/null +++ b/pkg/command/e2etest/testdata/template-provider/main.tf @@ -0,0 +1,7 @@ +provider "template" { + +} + +data "template_file" "test" { + template = "Hello World" +} diff --git a/pkg/command/e2etest/testdata/test-provider/main.tf b/pkg/command/e2etest/testdata/test-provider/main.tf new file mode 100644 index 00000000000..a4de134c848 --- /dev/null +++ b/pkg/command/e2etest/testdata/test-provider/main.tf @@ -0,0 +1,10 @@ +terraform { + required_providers { + simple = { + source = "hashicorp/test" + } + } +} + +resource "simple_resource" "test" { +} diff --git a/pkg/command/e2etest/testdata/tf-provider/main.tf b/pkg/command/e2etest/testdata/tf-provider/main.tf new file mode 100644 index 00000000000..bd9887e99ad --- /dev/null +++ b/pkg/command/e2etest/testdata/tf-provider/main.tf @@ -0,0 +1,10 @@ +provider "terraform" { + +} + +data "terraform_remote_state" "test" { + backend = "local" + config = { + path = "test.tfstate" + } +} diff --git a/pkg/command/e2etest/testdata/tf-provider/test.tfstate b/pkg/command/e2etest/testdata/tf-provider/test.tfstate new file mode 100644 index 00000000000..fb1012d15d3 --- /dev/null +++ b/pkg/command/e2etest/testdata/tf-provider/test.tfstate @@ -0,0 +1,13 @@ +{ + "version": 4, + "terraform_version": "0.13.0", + "serial": 1, + "lineage": "8fab7b5a-511c-d586-988e-250f99c8feb4", + "outputs": { + "out": { + "value": "test", + "type": "string" + } + }, + "resources": [] +} diff --git a/pkg/command/e2etest/testdata/tofu-managed-data/main.tf b/pkg/command/e2etest/testdata/tofu-managed-data/main.tf new file mode 100644 index 00000000000..271888e6a10 --- /dev/null +++ b/pkg/command/e2etest/testdata/tofu-managed-data/main.tf @@ -0,0 +1,18 @@ +resource "terraform_data" "a" { +} + +resource "terraform_data" "b" { + input = terraform_data.a.id +} + +resource "terraform_data" "c" { + triggers_replace = terraform_data.b +} + +resource "terraform_data" "d" { + input = [ terraform_data.b, terraform_data.c ] +} + +output "d" { + value = terraform_data.d +} diff --git a/pkg/command/e2etest/testdata/tofu-providers-mirror-with-lock-file/.terraform.lock.hcl b/pkg/command/e2etest/testdata/tofu-providers-mirror-with-lock-file/.terraform.lock.hcl new file mode 100644 index 00000000000..2d2ee86f971 --- /dev/null +++ b/pkg/command/e2etest/testdata/tofu-providers-mirror-with-lock-file/.terraform.lock.hcl @@ -0,0 +1,44 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.opentofu.org/hashicorp/null" { + version = "2.1.0" + constraints = "2.1.0" + hashes = [ + "h1:J/XPKw4nOAsE0iHHqkR0oIBfchtt3pokNj4gFlHqVvk=", + "h1:uugNjv4FEabvXfifTzRCqSerdraltZR0UwXzH8QYPUQ=", + "zh:022eb9cefb72d25cb39aebf17787ae5a1a239544abae7ac11fdc2b5a464c06f8", + "zh:089aec7ba6b9843741fec84e0bc046d97d2e41a9fedbe5d77124e66227395c63", + "zh:09e9a6fe88e8d33e4656a4f3768275c0f959f4624886a3a96d250e1067afec8c", + "zh:0fa2d6a05874405eb8b2a7ececb6b7522be25642e31838d23620bf7b4f371c9d", + "zh:2a7ab2f42d86e8bd4db3cdf94287a6d91c61456b59a0ce2d0f5d6992a08b668b", + "zh:6526bfa4f547223d4a14d7bf9098a4f7177a5c886a7edc65056df1cb98f6aad9", + "zh:8e58a5a130d377e8fc0da8ad526f33738c320b19463679f7d68212c5c939bad4", + "zh:9dc5be5713fca7dbfa99e9673450aaa7216915bffbc043b30798e037a8f2c870", + "zh:ab7671e33198b718a1ae3272dcea0380f357926324f96c3be0c6ef9423ebece1", + "zh:b27db66404ea0704fb076ef26bb5b5c556a31b81a8b2302ec705a7e46d93d3e0", + "zh:bcc4a07ce1fb3bdee4ea360dd9549e099ecc2e9d80aab7f8daf54387a87a5f8e", + "zh:bf44f8693075f46ae833303fee17e0b0649c72e9347027670fa30e9fbce37fc4", + ] +} + +provider "registry.opentofu.org/hashicorp/template" { + version = "2.1.1" + constraints = "2.1.1" + hashes = [ + "h1:fBNBluCX4pWlYEw5ZyCTHB00E+3BDSe7GjRzF1ojcvU=", + "h1:x2/zuJFN/oOUpE1C1nSk4n86AA2zASOyy2BUdFYcpXw=", + "zh:05fddf3cacb607f623c2b221c3e9ab724079deca0b703b2738e9d55c10e31717", + "zh:1a250b29274f3e340ea775bf9bd57476e982bca1fb4b59343fb3126e75dfd85c", + "zh:284735b9bd0e416ec02c0844e7f4ebbd4b5744140a21606e33f16eb14640cbf1", + "zh:2e9d246094ac8a68951015d40f42145e795b31d7c84fee20fa9f997b3d428906", + "zh:65e8e73860662a0c0698c8a8d35c857302f1fe3f41947e7c048c49a541a9c7f1", + "zh:70dacd22d0c93b2000948c06ded67fa147d992a0353737438f24a61e3f956c41", + "zh:aa1a0321e79e08ffb52789ab0af3896c493d436de7396d154d09a0be7d5d50e1", + "zh:bea4c276c4df9d117f19c4266d060db9b48c865ac7a71d2e77a27866c19bfaf5", + "zh:de04cb0cb046dad184f5bb783659cf98d88c6798db038cbf5a2c3c08e853d444", + "zh:de3c45a4fa1f756aa4db3350c021d1c0f9b23640cff77e0ba4df4eeb8eae957f", + "zh:e3cf2db204f64ad4e288af00fabc6a8af13a6687aba60a7e1ce0ea215a9580b1", + "zh:f795833225207d2eee022b91d26bee18d5e518e70912dd7a1d2a0eff2cbe4f1d", + ] +} diff --git a/pkg/command/e2etest/testdata/tofu-providers-mirror-with-lock-file/main.tf b/pkg/command/e2etest/testdata/tofu-providers-mirror-with-lock-file/main.tf new file mode 100644 index 00000000000..1598a278354 --- /dev/null +++ b/pkg/command/e2etest/testdata/tofu-providers-mirror-with-lock-file/main.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + template = { source = "hashicorp/template" } + null = { source = "hashicorp/null" } + terraform = { source = "terraform.io/builtin/terraform" } + } +} diff --git a/pkg/command/e2etest/testdata/tofu-providers-mirror/main.tf b/pkg/command/e2etest/testdata/tofu-providers-mirror/main.tf new file mode 100644 index 00000000000..4b31e030120 --- /dev/null +++ b/pkg/command/e2etest/testdata/tofu-providers-mirror/main.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + template = { version = "2.1.1" } + null = { source = "hashicorp/null", version = "2.1.0" } + terraform = { source = "terraform.io/builtin/terraform" } + } +} diff --git a/pkg/command/e2etest/testdata/vendored-provider/main.tf b/pkg/command/e2etest/testdata/vendored-provider/main.tf new file mode 100644 index 00000000000..3cb62153783 --- /dev/null +++ b/pkg/command/e2etest/testdata/vendored-provider/main.tf @@ -0,0 +1,8 @@ +terraform { + required_providers { + null = { + source = "hashicorp/null" + version = "1.0.0+local" + } + } +} diff --git a/pkg/command/e2etest/testdata/vendored-provider/terraform.d/plugins/registry.opentofu.org/hashicorp/null/1.0.0+local/os_arch/terraform-provider-null_v1.0.0 b/pkg/command/e2etest/testdata/vendored-provider/terraform.d/plugins/registry.opentofu.org/hashicorp/null/1.0.0+local/os_arch/terraform-provider-null_v1.0.0 new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/command/e2etest/tf_provider_data_test.go b/pkg/command/e2etest/tf_provider_data_test.go new file mode 100644 index 00000000000..9bf26b55e3f --- /dev/null +++ b/pkg/command/e2etest/tf_provider_data_test.go @@ -0,0 +1,60 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package e2etest + +import ( + "path/filepath" + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/e2e" +) + +func TestOpenTofuProviderData(t *testing.T) { + + fixturePath := filepath.Join("testdata", "tofu-managed-data") + tf := e2e.NewBinary(t, tofuBin, fixturePath) + + _, stderr, err := tf.Run("init", "-input=false") + if err != nil { + t.Fatalf("unexpected init error: %s\nstderr:\n%s", err, stderr) + } + + stdout, stderr, err := tf.Run("plan", "-out=tfplan", "-input=false") + if err != nil { + t.Fatalf("unexpected plan error: %s\nstderr:\n%s", err, stderr) + } + + if !strings.Contains(stdout, "4 to add, 0 to change, 0 to destroy") { + t.Errorf("incorrect plan tally; want 4 to add:\n%s", stdout) + } + + stdout, stderr, err = tf.Run("apply", "-input=false", "tfplan") + if err != nil { + t.Fatalf("unexpected apply error: %s\nstderr:\n%s", err, stderr) + } + + if !strings.Contains(stdout, "Resources: 4 added, 0 changed, 0 destroyed") { + t.Errorf("incorrect apply tally; want 4 added:\n%s", stdout) + } + + state, err := tf.LocalState() + if err != nil { + t.Fatalf("failed to read state file: %s", err) + } + + // we'll check the final output to validate the resources + d := state.Module(addrs.RootModuleInstance).OutputValues["d"].Value + input := d.GetAttr("input") + output := d.GetAttr("output") + if input.IsNull() { + t.Fatal("missing input from resource d") + } + if !input.RawEquals(output) { + t.Fatalf("input %#v does not equal output %#v\n", input, output) + } +} diff --git a/pkg/command/e2etest/unmanaged_test.go b/pkg/command/e2etest/unmanaged_test.go new file mode 100644 index 00000000000..46e9e839873 --- /dev/null +++ b/pkg/command/e2etest/unmanaged_test.go @@ -0,0 +1,358 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package e2etest + +import ( + "context" + "encoding/json" + "io" + "path/filepath" + "strings" + "sync" + "testing" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin" + "github.com/kubegems/opentofu/pkg/e2e" + "github.com/kubegems/opentofu/pkg/grpcwrap" + tfplugin5 "github.com/kubegems/opentofu/pkg/plugin" + tfplugin "github.com/kubegems/opentofu/pkg/plugin6" + simple5 "github.com/kubegems/opentofu/pkg/provider-simple" + simple "github.com/kubegems/opentofu/pkg/provider-simple-v6" + proto5 "github.com/kubegems/opentofu/pkg/tfplugin5" + proto "github.com/kubegems/opentofu/pkg/tfplugin6" +) + +// The tests in this file are for the "unmanaged provider workflow", which +// includes variants of the following sequence, with different details: +// tofu init +// tofu plan +// tofu apply +// +// These tests are run against an in-process server, and checked to make sure +// they're not trying to control the lifecycle of the binary. They are not +// checked for correctness of the operations themselves. + +type reattachConfig struct { + Protocol string + ProtocolVersion int + Pid int + Test bool + Addr reattachConfigAddr +} + +type reattachConfigAddr struct { + Network string + String string +} + +type providerServer struct { + sync.Mutex + proto.ProviderServer + planResourceChangeCalled bool + applyResourceChangeCalled bool +} + +func (p *providerServer) PlanResourceChange(ctx context.Context, req *proto.PlanResourceChange_Request) (*proto.PlanResourceChange_Response, error) { + p.Lock() + defer p.Unlock() + + p.planResourceChangeCalled = true + return p.ProviderServer.PlanResourceChange(ctx, req) +} + +func (p *providerServer) ApplyResourceChange(ctx context.Context, req *proto.ApplyResourceChange_Request) (*proto.ApplyResourceChange_Response, error) { + p.Lock() + defer p.Unlock() + + p.applyResourceChangeCalled = true + return p.ProviderServer.ApplyResourceChange(ctx, req) +} + +func (p *providerServer) PlanResourceChangeCalled() bool { + p.Lock() + defer p.Unlock() + + return p.planResourceChangeCalled +} +func (p *providerServer) ResetPlanResourceChangeCalled() { + p.Lock() + defer p.Unlock() + + p.planResourceChangeCalled = false +} + +func (p *providerServer) ApplyResourceChangeCalled() bool { + p.Lock() + defer p.Unlock() + + return p.applyResourceChangeCalled +} +func (p *providerServer) ResetApplyResourceChangeCalled() { + p.Lock() + defer p.Unlock() + + p.applyResourceChangeCalled = false +} + +type providerServer5 struct { + sync.Mutex + proto5.ProviderServer + planResourceChangeCalled bool + applyResourceChangeCalled bool +} + +func (p *providerServer5) PlanResourceChange(ctx context.Context, req *proto5.PlanResourceChange_Request) (*proto5.PlanResourceChange_Response, error) { + p.Lock() + defer p.Unlock() + + p.planResourceChangeCalled = true + return p.ProviderServer.PlanResourceChange(ctx, req) +} + +func (p *providerServer5) ApplyResourceChange(ctx context.Context, req *proto5.ApplyResourceChange_Request) (*proto5.ApplyResourceChange_Response, error) { + p.Lock() + defer p.Unlock() + + p.applyResourceChangeCalled = true + return p.ProviderServer.ApplyResourceChange(ctx, req) +} + +func (p *providerServer5) PlanResourceChangeCalled() bool { + p.Lock() + defer p.Unlock() + + return p.planResourceChangeCalled +} +func (p *providerServer5) ResetPlanResourceChangeCalled() { + p.Lock() + defer p.Unlock() + + p.planResourceChangeCalled = false +} + +func (p *providerServer5) ApplyResourceChangeCalled() bool { + p.Lock() + defer p.Unlock() + + return p.applyResourceChangeCalled +} +func (p *providerServer5) ResetApplyResourceChangeCalled() { + p.Lock() + defer p.Unlock() + + p.applyResourceChangeCalled = false +} + +func TestUnmanagedSeparatePlan(t *testing.T) { + t.Parallel() + + fixturePath := filepath.Join("testdata", "test-provider") + tf := e2e.NewBinary(t, tofuBin, fixturePath) + + reattachCh := make(chan *plugin.ReattachConfig) + closeCh := make(chan struct{}) + provider := &providerServer{ + ProviderServer: grpcwrap.Provider6(simple.Provider()), + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go plugin.Serve(&plugin.ServeConfig{ + Logger: hclog.New(&hclog.LoggerOptions{ + Name: "plugintest", + Level: hclog.Trace, + Output: io.Discard, + }), + Test: &plugin.ServeTestConfig{ + Context: ctx, + ReattachConfigCh: reattachCh, + CloseCh: closeCh, + }, + GRPCServer: plugin.DefaultGRPCServer, + VersionedPlugins: map[int]plugin.PluginSet{ + 6: { + "provider": &tfplugin.GRPCProviderPlugin{ + GRPCProvider: func() proto.ProviderServer { + return provider + }, + }, + }, + }, + }) + config := <-reattachCh + if config == nil { + t.Fatalf("no reattach config received") + } + reattachStr, err := json.Marshal(map[string]reattachConfig{ + "hashicorp/test": { + Protocol: string(config.Protocol), + ProtocolVersion: 6, + Pid: config.Pid, + Test: true, + Addr: reattachConfigAddr{ + Network: config.Addr.Network(), + String: config.Addr.String(), + }, + }, + }) + if err != nil { + t.Fatal(err) + } + + tf.AddEnv("TF_REATTACH_PROVIDERS=" + string(reattachStr)) + + //// INIT + stdout, stderr, err := tf.Run("init") + if err != nil { + t.Fatalf("unexpected init error: %s\nstderr:\n%s", err, stderr) + } + + // Make sure we didn't download the binary + if strings.Contains(stdout, "Installing hashicorp/test v") { + t.Errorf("test provider download message is present in init output:\n%s", stdout) + } + if tf.FileExists(filepath.Join(".terraform", "plugins", "registry.opentofu.org", "hashicorp", "test")) { + t.Errorf("test provider binary found in .terraform dir") + } + + //// PLAN + _, stderr, err = tf.Run("plan", "-out=tfplan") + if err != nil { + t.Fatalf("unexpected plan error: %s\nstderr:\n%s", err, stderr) + } + + if !provider.PlanResourceChangeCalled() { + t.Error("PlanResourceChange not called on un-managed provider") + } + + //// APPLY + _, stderr, err = tf.Run("apply", "tfplan") + if err != nil { + t.Fatalf("unexpected apply error: %s\nstderr:\n%s", err, stderr) + } + + if !provider.ApplyResourceChangeCalled() { + t.Error("ApplyResourceChange not called on un-managed provider") + } + provider.ResetApplyResourceChangeCalled() + + //// DESTROY + _, stderr, err = tf.Run("destroy", "-auto-approve") + if err != nil { + t.Fatalf("unexpected destroy error: %s\nstderr:\n%s", err, stderr) + } + + if !provider.ApplyResourceChangeCalled() { + t.Error("ApplyResourceChange (destroy) not called on in-process provider") + } + cancel() + <-closeCh +} + +func TestUnmanagedSeparatePlan_proto5(t *testing.T) { + t.Parallel() + + fixturePath := filepath.Join("testdata", "test-provider") + tf := e2e.NewBinary(t, tofuBin, fixturePath) + + reattachCh := make(chan *plugin.ReattachConfig) + closeCh := make(chan struct{}) + provider := &providerServer5{ + ProviderServer: grpcwrap.Provider(simple5.Provider()), + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go plugin.Serve(&plugin.ServeConfig{ + Logger: hclog.New(&hclog.LoggerOptions{ + Name: "plugintest", + Level: hclog.Trace, + Output: io.Discard, + }), + Test: &plugin.ServeTestConfig{ + Context: ctx, + ReattachConfigCh: reattachCh, + CloseCh: closeCh, + }, + GRPCServer: plugin.DefaultGRPCServer, + VersionedPlugins: map[int]plugin.PluginSet{ + 5: { + "provider": &tfplugin5.GRPCProviderPlugin{ + GRPCProvider: func() proto5.ProviderServer { + return provider + }, + }, + }, + }, + }) + config := <-reattachCh + if config == nil { + t.Fatalf("no reattach config received") + } + reattachStr, err := json.Marshal(map[string]reattachConfig{ + "hashicorp/test": { + Protocol: string(config.Protocol), + ProtocolVersion: 5, + Pid: config.Pid, + Test: true, + Addr: reattachConfigAddr{ + Network: config.Addr.Network(), + String: config.Addr.String(), + }, + }, + }) + if err != nil { + t.Fatal(err) + } + + tf.AddEnv("TF_REATTACH_PROVIDERS=" + string(reattachStr)) + + //// INIT + stdout, stderr, err := tf.Run("init") + if err != nil { + t.Fatalf("unexpected init error: %s\nstderr:\n%s", err, stderr) + } + + // Make sure we didn't download the binary + if strings.Contains(stdout, "Installing hashicorp/test v") { + t.Errorf("test provider download message is present in init output:\n%s", stdout) + } + if tf.FileExists(filepath.Join(".terraform", "plugins", "registry.opentofu.org", "hashicorp", "test")) { + t.Errorf("test provider binary found in .terraform dir") + } + + //// PLAN + _, stderr, err = tf.Run("plan", "-out=tfplan") + if err != nil { + t.Fatalf("unexpected plan error: %s\nstderr:\n%s", err, stderr) + } + + if !provider.PlanResourceChangeCalled() { + t.Error("PlanResourceChange not called on un-managed provider") + } + + //// APPLY + _, stderr, err = tf.Run("apply", "tfplan") + if err != nil { + t.Fatalf("unexpected apply error: %s\nstderr:\n%s", err, stderr) + } + + if !provider.ApplyResourceChangeCalled() { + t.Error("ApplyResourceChange not called on un-managed provider") + } + provider.ResetApplyResourceChangeCalled() + + //// DESTROY + _, stderr, err = tf.Run("destroy", "-auto-approve") + if err != nil { + t.Fatalf("unexpected destroy error: %s\nstderr:\n%s", err, stderr) + } + + if !provider.ApplyResourceChangeCalled() { + t.Error("ApplyResourceChange (destroy) not called on in-process provider") + } + cancel() + <-closeCh +} diff --git a/pkg/command/e2etest/version_test.go b/pkg/command/e2etest/version_test.go new file mode 100644 index 00000000000..8dbd389c508 --- /dev/null +++ b/pkg/command/e2etest/version_test.go @@ -0,0 +1,99 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package e2etest + +import ( + "fmt" + "path/filepath" + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/e2e" + "github.com/kubegems/opentofu/version" +) + +func TestVersion(t *testing.T) { + // Along with testing the "version" command in particular, this serves + // as a good smoke test for whether the OpenTofu binary can even be + // compiled and run, since it doesn't require any external network access + // to do its job. + + t.Parallel() + + fixturePath := filepath.Join("testdata", "empty") + tf := e2e.NewBinary(t, tofuBin, fixturePath) + + stdout, stderr, err := tf.Run("version") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + + if stderr != "" { + t.Errorf("unexpected stderr output:\n%s", stderr) + } + + wantVersion := fmt.Sprintf("OpenTofu v%s", version.String()) + if !strings.Contains(stdout, wantVersion) { + t.Errorf("output does not contain our current version %q:\n%s", wantVersion, stdout) + } +} + +func TestVersionWithProvider(t *testing.T) { + // This is a more elaborate use of "version" that shows the selected + // versions of plugins too. + t.Parallel() + + // This test reaches out to registry.opentofu.org to download the + // template and null providers, so it can only run if network access is + // allowed. + skipIfCannotAccessNetwork(t) + + fixturePath := filepath.Join("testdata", "template-provider") + tf := e2e.NewBinary(t, tofuBin, fixturePath) + + // Initial run (before "init") should work without error but will not + // include the provider version, since we've not "locked" one yet. + { + stdout, stderr, err := tf.Run("version") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + + if stderr != "" { + t.Errorf("unexpected stderr output:\n%s", stderr) + } + + wantVersion := fmt.Sprintf("OpenTofu v%s", version.String()) + if !strings.Contains(stdout, wantVersion) { + t.Errorf("output does not contain our current version %q:\n%s", wantVersion, stdout) + } + } + + { + _, _, err := tf.Run("init") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + } + + // After running init, we additionally include information about the + // selected version of the "template" provider. + { + stdout, stderr, err := tf.Run("version") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + + if stderr != "" { + t.Errorf("unexpected stderr output:\n%s", stderr) + } + + wantMsg := "+ provider registry.opentofu.org/hashicorp/template v" // we don't know which version we'll get here + if !strings.Contains(stdout, wantMsg) { + t.Errorf("output does not contain provider information %q:\n%s", wantMsg, stdout) + } + } +} diff --git a/pkg/command/flag_kv.go b/pkg/command/flag_kv.go new file mode 100644 index 00000000000..f743cbc9e1c --- /dev/null +++ b/pkg/command/flag_kv.go @@ -0,0 +1,48 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" +) + +// FlagStringKV is a flag.Value implementation for parsing user variables +// from the command-line in the format of '-var key=value', where value is +// only ever a primitive. +type FlagStringKV map[string]string + +func (v *FlagStringKV) String() string { + return "" +} + +func (v *FlagStringKV) Set(raw string) error { + idx := strings.Index(raw, "=") + if idx == -1 { + return fmt.Errorf("No '=' value in arg: %s", raw) + } + + if *v == nil { + *v = make(map[string]string) + } + + key, value := raw[0:idx], raw[idx+1:] + (*v)[key] = value + return nil +} + +// FlagStringSlice is a flag.Value implementation for parsing targets from the +// command line, e.g. -target=aws_instance.foo -target=aws_vpc.bar +type FlagStringSlice []string + +func (v *FlagStringSlice) String() string { + return "" +} +func (v *FlagStringSlice) Set(raw string) error { + *v = append(*v, raw) + + return nil +} diff --git a/pkg/command/flag_kv_test.go b/pkg/command/flag_kv_test.go new file mode 100644 index 00000000000..fd038470d59 --- /dev/null +++ b/pkg/command/flag_kv_test.go @@ -0,0 +1,73 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "flag" + "reflect" + "testing" +) + +func TestFlagStringKV_impl(t *testing.T) { + var _ flag.Value = new(FlagStringKV) +} + +func TestFlagStringKV(t *testing.T) { + cases := []struct { + Input string + Output map[string]string + Error bool + }{ + { + "key=value", + map[string]string{"key": "value"}, + false, + }, + + { + "key=", + map[string]string{"key": ""}, + false, + }, + + { + "key=foo=bar", + map[string]string{"key": "foo=bar"}, + false, + }, + + { + "map.key=foo", + map[string]string{"map.key": "foo"}, + false, + }, + + { + "key", + nil, + true, + }, + + { + "key=/path", + map[string]string{"key": "/path"}, + false, + }, + } + + for _, tc := range cases { + f := new(FlagStringKV) + err := f.Set(tc.Input) + if err != nil != tc.Error { + t.Fatalf("bad error. Input: %#v\n\nError: %s", tc.Input, err) + } + + actual := map[string]string(*f) + if !reflect.DeepEqual(actual, tc.Output) { + t.Fatalf("bad: %#v", actual) + } + } +} diff --git a/pkg/command/fmt.go b/pkg/command/fmt.go new file mode 100644 index 00000000000..6dc7b426f3c --- /dev/null +++ b/pkg/command/fmt.go @@ -0,0 +1,619 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "bytes" + "fmt" + "io" + "log" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/hashicorp/hcl/v2/hclwrite" + "github.com/mitchellh/cli" + + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +const ( + stdinArg = "-" +) + +var ( + fmtSupportedExts = []string{ + ".tf", + ".tofu", + ".tfvars", + ".tftest.hcl", + ".tofutest.hcl", + } +) + +// FmtCommand is a Command implementation that rewrites OpenTofu config +// files to a canonical format and style. +type FmtCommand struct { + Meta + list bool + write bool + diff bool + check bool + recursive bool + input io.Reader // STDIN if nil +} + +func (c *FmtCommand) Run(args []string) int { + if c.input == nil { + c.input = os.Stdin + } + + args = c.Meta.process(args) + cmdFlags := c.Meta.defaultFlagSet("fmt") + cmdFlags.BoolVar(&c.list, "list", true, "list") + cmdFlags.BoolVar(&c.write, "write", true, "write") + cmdFlags.BoolVar(&c.diff, "diff", false, "diff") + cmdFlags.BoolVar(&c.check, "check", false, "check") + cmdFlags.BoolVar(&c.recursive, "recursive", false, "recursive") + cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } + if err := cmdFlags.Parse(args); err != nil { + c.Ui.Error(fmt.Sprintf("Error parsing command-line flags: %s\n", err.Error())) + return 1 + } + + args = cmdFlags.Args() + + var paths []string + if len(args) == 0 { + paths = []string{"."} + } else if args[0] == stdinArg { + c.list = false + c.write = false + } else { + paths = args + } + + var output io.Writer + list := c.list // preserve the original value of -list + if c.check { + // set to true so we can use the list output to check + // if the input needs formatting + c.list = true + c.write = false + output = &bytes.Buffer{} + } else { + output = &cli.UiWriter{Ui: c.Ui} + } + + diags := c.fmt(paths, c.input, output) + c.showDiagnostics(diags) + if diags.HasErrors() { + return 2 + } + + if c.check { + buf := output.(*bytes.Buffer) + ok := buf.Len() == 0 + if list { + io.Copy(&cli.UiWriter{Ui: c.Ui}, buf) + } + if ok { + return 0 + } else { + return 3 + } + } + + return 0 +} + +func (c *FmtCommand) fmt(paths []string, stdin io.Reader, stdout io.Writer) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + if len(paths) == 0 { // Assuming stdin, then. + if c.write { + diags = diags.Append(fmt.Errorf("Option -write cannot be used when reading from stdin")) + return diags + } + fileDiags := c.processFile("", stdin, stdout, true) + diags = diags.Append(fileDiags) + return diags + } + + for _, path := range paths { + path = c.normalizePath(path) + info, err := os.Stat(path) + if err != nil { + diags = diags.Append(fmt.Errorf("No file or directory at %s", path)) + return diags + } + if info.IsDir() { + dirDiags := c.processDir(path, stdout) + diags = diags.Append(dirDiags) + } else { + fmtd := false + for _, ext := range fmtSupportedExts { + if strings.HasSuffix(path, ext) { + f, err := os.Open(path) + if err != nil { + // Open does not produce error messages that are end-user-appropriate, + // so we'll need to simplify here. + diags = diags.Append(fmt.Errorf("Failed to read file %s", path)) + continue + } + + fileDiags := c.processFile(c.normalizePath(path), f, stdout, false) + diags = diags.Append(fileDiags) + f.Close() + + // Take note that we processed the file. + fmtd = true + + // Don't check the remaining extensions. + break + } + } + + if !fmtd { + diags = diags.Append(fmt.Errorf("Only .tf, .tfvars, and .tftest.hcl files can be processed with tofu fmt")) + continue + } + } + } + + return diags +} + +func (c *FmtCommand) processFile(path string, r io.Reader, w io.Writer, isStdout bool) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + log.Printf("[TRACE] tofu fmt: Formatting %s", path) + + src, err := io.ReadAll(r) + if err != nil { + diags = diags.Append(fmt.Errorf("Failed to read %s", path)) + return diags + } + + // Register this path as a synthetic configuration source, so that any + // diagnostic errors can include the source code snippet + c.registerSynthConfigSource(path, src) + + // File must be parseable as HCL native syntax before we'll try to format + // it. If not, the formatter is likely to make drastic changes that would + // be hard for the user to undo. + _, syntaxDiags := hclsyntax.ParseConfig(src, path, hcl.Pos{Line: 1, Column: 1}) + if syntaxDiags.HasErrors() { + diags = diags.Append(syntaxDiags) + return diags + } + + result := c.formatSourceCode(src, path) + + if !bytes.Equal(src, result) { + // Something was changed + if c.list { + fmt.Fprintln(w, path) + } + if c.write { + err := os.WriteFile(path, result, 0644) + if err != nil { + diags = diags.Append(fmt.Errorf("Failed to write %s", path)) + return diags + } + } + if c.diff { + diff, err := bytesDiff(src, result, path) + if err != nil { + diags = diags.Append(fmt.Errorf("Failed to generate diff for %s: %w", path, err)) + return diags + } + w.Write(diff) + } + } + + if !c.list && !c.write && !c.diff { + _, err = w.Write(result) + if err != nil { + diags = diags.Append(fmt.Errorf("Failed to write result")) + } + } + + return diags +} + +func (c *FmtCommand) processDir(path string, stdout io.Writer) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + log.Printf("[TRACE] tofu fmt: looking for files in %s", path) + + entries, err := os.ReadDir(path) + if err != nil { + switch { + case os.IsNotExist(err): + diags = diags.Append(fmt.Errorf("There is no configuration directory at %s", path)) + default: + // ReadDir does not produce error messages that are end-user-appropriate, + // so we'll need to simplify here. + diags = diags.Append(fmt.Errorf("Cannot read directory %s", path)) + } + return diags + } + + for _, info := range entries { + name := info.Name() + if configs.IsIgnoredFile(name) { + continue + } + subPath := filepath.Join(path, name) + if info.IsDir() { + if c.recursive { + subDiags := c.processDir(subPath, stdout) + diags = diags.Append(subDiags) + } + + // We do not recurse into child directories by default because we + // want to mimic the file-reading behavior of "tofu plan", etc, + // operating on one module at a time. + continue + } + + for _, ext := range fmtSupportedExts { + if strings.HasSuffix(name, ext) { + f, err := os.Open(subPath) + if err != nil { + // Open does not produce error messages that are end-user-appropriate, + // so we'll need to simplify here. + diags = diags.Append(fmt.Errorf("Failed to read file %s", subPath)) + continue + } + + fileDiags := c.processFile(c.normalizePath(subPath), f, stdout, false) + diags = diags.Append(fileDiags) + f.Close() + + // Don't need to check the remaining extensions. + break + } + } + } + + return diags +} + +// formatSourceCode is the formatting logic itself, applied to each file that +// is selected (directly or indirectly) on the command line. +func (c *FmtCommand) formatSourceCode(src []byte, filename string) []byte { + f, diags := hclwrite.ParseConfig(src, filename, hcl.InitialPos) + if diags.HasErrors() { + // It would be weird to get here because the caller should already have + // checked for syntax errors and returned them. We'll just do nothing + // in this case, returning the input exactly as given. + return src + } + + c.formatBody(f.Body(), nil) + + return f.Bytes() +} + +func (c *FmtCommand) formatBody(body *hclwrite.Body, inBlocks []string) { + attrs := body.Attributes() + for name, attr := range attrs { + if len(inBlocks) == 1 && inBlocks[0] == "variable" && name == "type" { + cleanedExprTokens := c.formatTypeExpr(attr.Expr().BuildTokens(nil)) + body.SetAttributeRaw(name, cleanedExprTokens) + continue + } + cleanedExprTokens := c.formatValueExpr(attr.Expr().BuildTokens(nil)) + body.SetAttributeRaw(name, cleanedExprTokens) + } + + blocks := body.Blocks() + for _, block := range blocks { + // Normalize the label formatting, removing any weird stuff like + // interleaved inline comments and using the idiomatic quoted + // label syntax. + block.SetLabels(block.Labels()) + + inBlocks := append(inBlocks, block.Type()) + c.formatBody(block.Body(), inBlocks) + } +} + +func (c *FmtCommand) formatValueExpr(tokens hclwrite.Tokens) hclwrite.Tokens { + if len(tokens) < 5 { + // Can't possibly be a "${ ... }" sequence without at least enough + // tokens for the delimiters and one token inside them. + return tokens + } + oQuote := tokens[0] + oBrace := tokens[1] + cBrace := tokens[len(tokens)-2] + cQuote := tokens[len(tokens)-1] + if oQuote.Type != hclsyntax.TokenOQuote || oBrace.Type != hclsyntax.TokenTemplateInterp || cBrace.Type != hclsyntax.TokenTemplateSeqEnd || cQuote.Type != hclsyntax.TokenCQuote { + // Not an interpolation sequence at all, then. + return tokens + } + + inside := tokens[2 : len(tokens)-2] + + // We're only interested in sequences that are provable to be single + // interpolation sequences, which we'll determine by hunting inside + // the interior tokens for any other interpolation sequences. This is + // likely to produce false negatives sometimes, but that's better than + // false positives and we're mainly interested in catching the easy cases + // here. + quotes := 0 + for _, token := range inside { + if token.Type == hclsyntax.TokenOQuote { + quotes++ + continue + } + if token.Type == hclsyntax.TokenCQuote { + quotes-- + continue + } + if quotes > 0 { + // Interpolation sequences inside nested quotes are okay, because + // they are part of a nested expression. + // "${foo("${bar}")}" + continue + } + if token.Type == hclsyntax.TokenTemplateInterp || token.Type == hclsyntax.TokenTemplateSeqEnd { + // We've found another template delimiter within our interior + // tokens, which suggests that we've found something like this: + // "${foo}${bar}" + // That isn't unwrappable, so we'll leave the whole expression alone. + return tokens + } + if token.Type == hclsyntax.TokenQuotedLit { + // If there's any literal characters in the outermost + // quoted sequence then it is not unwrappable. + return tokens + } + } + + // If we got down here without an early return then this looks like + // an unwrappable sequence, but we'll trim any leading and trailing + // newlines that might result in an invalid result if we were to + // naively trim something like this: + // "${ + // foo + // }" + trimmed := c.trimNewlines(inside) + + // Finally, we check if the unwrapped expression is on multiple lines. If + // so, we ensure that it is surrounded by parenthesis to make sure that it + // parses correctly after unwrapping. This may be redundant in some cases, + // but is required for at least multi-line ternary expressions. + isMultiLine := false + hasLeadingParen := false + hasTrailingParen := false + for i, token := range trimmed { + switch { + case i == 0 && token.Type == hclsyntax.TokenOParen: + hasLeadingParen = true + case token.Type == hclsyntax.TokenNewline: + isMultiLine = true + case i == len(trimmed)-1 && token.Type == hclsyntax.TokenCParen: + hasTrailingParen = true + } + } + if isMultiLine && !(hasLeadingParen && hasTrailingParen) { + wrapped := make(hclwrite.Tokens, 0, len(trimmed)+2) + wrapped = append(wrapped, &hclwrite.Token{ + Type: hclsyntax.TokenOParen, + Bytes: []byte("("), + }) + wrapped = append(wrapped, trimmed...) + wrapped = append(wrapped, &hclwrite.Token{ + Type: hclsyntax.TokenCParen, + Bytes: []byte(")"), + }) + + return wrapped + } + + return trimmed +} + +func (c *FmtCommand) formatTypeExpr(tokens hclwrite.Tokens) hclwrite.Tokens { + switch len(tokens) { + case 1: + kwTok := tokens[0] + if kwTok.Type != hclsyntax.TokenIdent { + // Not a single type keyword, then. + return tokens + } + + // Collection types without an explicit element type mean + // the element type is "any", so we'll normalize that. + switch string(kwTok.Bytes) { + case "list", "map", "set": + return hclwrite.Tokens{ + kwTok, + { + Type: hclsyntax.TokenOParen, + Bytes: []byte("("), + }, + { + Type: hclsyntax.TokenIdent, + Bytes: []byte("any"), + }, + { + Type: hclsyntax.TokenCParen, + Bytes: []byte(")"), + }, + } + default: + return tokens + } + + case 3: + // A pre-0.12 legacy quoted string type, like "string". + oQuote := tokens[0] + strTok := tokens[1] + cQuote := tokens[2] + if oQuote.Type != hclsyntax.TokenOQuote || strTok.Type != hclsyntax.TokenQuotedLit || cQuote.Type != hclsyntax.TokenCQuote { + // Not a quoted string sequence, then. + return tokens + } + + // Because this quoted syntax is from Terraform 0.11 and + // earlier, which didn't have the idea of "any" as an, + // element type, we use string as the default element + // type. That will avoid oddities if somehow the configuration + // was relying on numeric values being auto-converted to + // string, as 0.11 would do. This mimicks what terraform + // 0.12upgrade used to do, because we'd found real-world + // modules that were depending on the auto-stringing.) + switch string(strTok.Bytes) { + case "string": + return hclwrite.Tokens{ + { + Type: hclsyntax.TokenIdent, + Bytes: []byte("string"), + }, + } + case "list": + return hclwrite.Tokens{ + { + Type: hclsyntax.TokenIdent, + Bytes: []byte("list"), + }, + { + Type: hclsyntax.TokenOParen, + Bytes: []byte("("), + }, + { + Type: hclsyntax.TokenIdent, + Bytes: []byte("string"), + }, + { + Type: hclsyntax.TokenCParen, + Bytes: []byte(")"), + }, + } + case "map": + return hclwrite.Tokens{ + { + Type: hclsyntax.TokenIdent, + Bytes: []byte("map"), + }, + { + Type: hclsyntax.TokenOParen, + Bytes: []byte("("), + }, + { + Type: hclsyntax.TokenIdent, + Bytes: []byte("string"), + }, + { + Type: hclsyntax.TokenCParen, + Bytes: []byte(")"), + }, + } + default: + // Something else we're not expecting, then. + return tokens + } + default: + return tokens + } +} + +func (c *FmtCommand) trimNewlines(tokens hclwrite.Tokens) hclwrite.Tokens { + if len(tokens) == 0 { + return nil + } + var start, end int + for start = 0; start < len(tokens); start++ { + if tokens[start].Type != hclsyntax.TokenNewline { + break + } + } + for end = len(tokens); end > 0; end-- { + if tokens[end-1].Type != hclsyntax.TokenNewline { + break + } + } + return tokens[start:end] +} + +func (c *FmtCommand) Help() string { + helpText := ` +Usage: tofu [global options] fmt [options] [target...] + + Rewrites all OpenTofu configuration files to a canonical format. All + configuration files (.tf), variables files (.tfvars), and testing files + (.tftest.hcl) are updated. JSON files (.tf.json, .tfvars.json, or + .tftest.json) are not modified. + + By default, fmt scans the current directory for configuration files. If you + provide a directory for the target argument, then fmt will scan that + directory instead. If you provide a file, then fmt will process just that + file. If you provide a single dash ("-"), then fmt will read from standard + input (STDIN). + + The content must be in the OpenTofu language native syntax; JSON is not + supported. + +Options: + + -list=false Don't list files whose formatting differs + (always disabled if using STDIN) + + -write=false Don't write to source files + (always disabled if using STDIN or -check) + + -diff Display diffs of formatting changes + + -check Check if the input is formatted. Exit status will be 0 if all + input is properly formatted and non-zero otherwise. + + -no-color If specified, output won't contain any color. + + -recursive Also process files in subdirectories. By default, only the + given directory (or current directory) is processed. +` + return strings.TrimSpace(helpText) +} + +func (c *FmtCommand) Synopsis() string { + return "Reformat your configuration in the standard style" +} + +func bytesDiff(b1, b2 []byte, path string) (data []byte, err error) { + f1, err := os.CreateTemp("", "") + if err != nil { + return + } + defer os.Remove(f1.Name()) + defer f1.Close() + + f2, err := os.CreateTemp("", "") + if err != nil { + return + } + defer os.Remove(f2.Name()) + defer f2.Close() + + f1.Write(b1) + f2.Write(b2) + + data, err = exec.Command("diff", "--label=old/"+path, "--label=new/"+path, "-u", f1.Name(), f2.Name()).CombinedOutput() + if len(data) > 0 { + // diff exits with a non-zero status when the files don't match. + // Ignore that failure as long as we get output. + err = nil + } + return +} diff --git a/pkg/command/fmt_test.go b/pkg/command/fmt_test.go new file mode 100644 index 00000000000..23828b1a6d1 --- /dev/null +++ b/pkg/command/fmt_test.go @@ -0,0 +1,507 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "sort" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/mitchellh/cli" +) + +func TestFmt_TestFiles(t *testing.T) { + const inSuffix = "_in.tftest.hcl" + const outSuffix = "_out.tftest.hcl" + const gotSuffix = "_got.tftest.hcl" + entries, err := os.ReadDir("testdata/tftest-fmt") + if err != nil { + t.Fatal(err) + } + + tmpDir, err := filepath.EvalSymlinks(t.TempDir()) + if err != nil { + t.Fatal(err) + } + + for _, info := range entries { + if info.IsDir() { + continue + } + filename := info.Name() + if !strings.HasSuffix(filename, inSuffix) { + continue + } + testName := filename[:len(filename)-len(inSuffix)] + t.Run(testName, func(t *testing.T) { + inFile := filepath.Join("testdata", "tftest-fmt", testName+inSuffix) + wantFile := filepath.Join("testdata", "tftest-fmt", testName+outSuffix) + gotFile := filepath.Join(tmpDir, testName+gotSuffix) + input, err := os.ReadFile(inFile) + if err != nil { + t.Fatal(err) + } + want, err := os.ReadFile(wantFile) + if err != nil { + t.Fatal(err) + } + err = os.WriteFile(gotFile, input, 0700) + if err != nil { + t.Fatal(err) + } + + ui := cli.NewMockUi() + c := &FmtCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + }, + } + args := []string{gotFile} + if code := c.Run(args); code != 0 { + t.Fatalf("fmt command was unsuccessful:\n%s", ui.ErrorWriter.String()) + } + + got, err := os.ReadFile(gotFile) + if err != nil { + t.Fatal(err) + } + + if diff := cmp.Diff(string(want), string(got)); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + } +} + +func TestFmt(t *testing.T) { + const inSuffix = "_in.tf" + const outSuffix = "_out.tf" + const gotSuffix = "_got.tf" + entries, err := os.ReadDir("testdata/fmt") + if err != nil { + t.Fatal(err) + } + + tmpDir, err := filepath.EvalSymlinks(t.TempDir()) + if err != nil { + t.Fatal(err) + } + + for _, info := range entries { + if info.IsDir() { + continue + } + filename := info.Name() + if !strings.HasSuffix(filename, inSuffix) { + continue + } + testName := filename[:len(filename)-len(inSuffix)] + t.Run(testName, func(t *testing.T) { + inFile := filepath.Join("testdata", "fmt", testName+inSuffix) + wantFile := filepath.Join("testdata", "fmt", testName+outSuffix) + gotFile := filepath.Join(tmpDir, testName+gotSuffix) + input, err := os.ReadFile(inFile) + if err != nil { + t.Fatal(err) + } + want, err := os.ReadFile(wantFile) + if err != nil { + t.Fatal(err) + } + err = os.WriteFile(gotFile, input, 0700) + if err != nil { + t.Fatal(err) + } + + ui := cli.NewMockUi() + c := &FmtCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + }, + } + args := []string{gotFile} + if code := c.Run(args); code != 0 { + t.Fatalf("fmt command was unsuccessful:\n%s", ui.ErrorWriter.String()) + } + + got, err := os.ReadFile(gotFile) + if err != nil { + t.Fatal(err) + } + + if diff := cmp.Diff(string(want), string(got)); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + } +} + +func TestFmt_nonexist(t *testing.T) { + tempDir := fmtFixtureWriteDir(t) + + ui := new(cli.MockUi) + c := &FmtCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + }, + } + + missingDir := filepath.Join(tempDir, "doesnotexist") + args := []string{missingDir} + if code := c.Run(args); code != 2 { + t.Fatalf("wrong exit code. errors: \n%s", ui.ErrorWriter.String()) + } + + expected := "No file or directory at" + if actual := ui.ErrorWriter.String(); !strings.Contains(actual, expected) { + t.Fatalf("expected:\n%s\n\nto include: %q", actual, expected) + } +} + +func TestFmt_syntaxError(t *testing.T) { + tempDir := testTempDir(t) + + invalidSrc := ` +a = 1 + +` + + err := os.WriteFile(filepath.Join(tempDir, "invalid.tf"), []byte(invalidSrc), 0644) + if err != nil { + t.Fatal(err) + } + + ui := new(cli.MockUi) + c := &FmtCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + }, + } + + args := []string{tempDir} + if code := c.Run(args); code != 2 { + t.Fatalf("wrong exit code. errors: \n%s", ui.ErrorWriter.String()) + } + + expected := "Invalid expression" + if actual := ui.ErrorWriter.String(); !strings.Contains(actual, expected) { + t.Fatalf("expected:\n%s\n\nto include: %q", actual, expected) + } +} + +func TestFmt_snippetInError(t *testing.T) { + tempDir := testTempDir(t) + + backendSrc := `terraform {backend "s3" {}}` + + err := os.WriteFile(filepath.Join(tempDir, "backend.tf"), []byte(backendSrc), 0644) + if err != nil { + t.Fatal(err) + } + + ui := new(cli.MockUi) + c := &FmtCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + }, + } + + args := []string{tempDir} + if code := c.Run(args); code != 2 { + t.Fatalf("wrong exit code. errors: \n%s", ui.ErrorWriter.String()) + } + + substrings := []string{ + "Argument definition required", + "line 1, in terraform", + `1: terraform {backend "s3" {}}`, + } + for _, substring := range substrings { + if actual := ui.ErrorWriter.String(); !strings.Contains(actual, substring) { + t.Errorf("expected:\n%s\n\nto include: %q", actual, substring) + } + } +} + +func TestFmt_manyArgs(t *testing.T) { + tempDir := fmtFixtureWriteDir(t) + // Add a second file + secondSrc := `locals { x = 1 }` + + err := os.WriteFile(filepath.Join(tempDir, "second.tf"), []byte(secondSrc), 0644) + if err != nil { + t.Fatal(err) + } + + ui := new(cli.MockUi) + c := &FmtCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + }, + } + + args := []string{ + filepath.Join(tempDir, "main.tf"), + filepath.Join(tempDir, "second.tf"), + } + if code := c.Run(args); code != 0 { + t.Fatalf("wrong exit code. errors: \n%s", ui.ErrorWriter.String()) + } + + got, err := filepath.Abs(strings.TrimSpace(ui.OutputWriter.String())) + if err != nil { + t.Fatal(err) + } + want := filepath.Join(tempDir, fmtFixture.filename) + + if got != want { + t.Fatalf("wrong output\ngot: %s\nwant: %s", got, want) + } +} + +func TestFmt_workingDirectory(t *testing.T) { + tempDir := fmtFixtureWriteDir(t) + + cwd, err := os.Getwd() + if err != nil { + t.Fatalf("err: %s", err) + } + err = os.Chdir(tempDir) + if err != nil { + t.Fatalf("err: %s", err) + } + defer os.Chdir(cwd) + + ui := new(cli.MockUi) + c := &FmtCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + }, + } + + args := []string{} + if code := c.Run(args); code != 0 { + t.Fatalf("wrong exit code. errors: \n%s", ui.ErrorWriter.String()) + } + + output := strings.Split(strings.TrimSpace(ui.OutputWriter.String()), "\n") + + // Consistent order + sort.Strings(output) + + for i, expected := range []string{fmtFixture.filename, fmtFixture.altFilename} { + actual := output[i] + if actual != expected { + t.Fatalf("got: %q\nexpected: %q", actual, expected) + } + } +} + +func TestFmt_directoryArg(t *testing.T) { + tempDir := fmtFixtureWriteDir(t) + + ui := new(cli.MockUi) + c := &FmtCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + }, + } + + args := []string{tempDir} + if code := c.Run(args); code != 0 { + t.Fatalf("wrong exit code. errors: \n%s", ui.ErrorWriter.String()) + } + + output := strings.Split(strings.TrimSpace(ui.OutputWriter.String()), "\n") + + // Consistent order + sort.Strings(output) + + for i, check := range []string{fmtFixture.filename, fmtFixture.altFilename} { + got, err := filepath.Abs(output[i]) + if err != nil { + t.Fatal(err) + } + want := filepath.Join(tempDir, check) + + if got != want { + t.Fatalf("wrong output\ngot: %s\nwant: %s", got, want) + } + } +} + +func TestFmt_fileArg(t *testing.T) { + tempDir := fmtFixtureWriteDir(t) + + ui := new(cli.MockUi) + c := &FmtCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + }, + } + + args := []string{filepath.Join(tempDir, fmtFixture.filename)} + if code := c.Run(args); code != 0 { + t.Fatalf("wrong exit code. errors: \n%s", ui.ErrorWriter.String()) + } + + got, err := filepath.Abs(strings.TrimSpace(ui.OutputWriter.String())) + if err != nil { + t.Fatal(err) + } + want := filepath.Join(tempDir, fmtFixture.filename) + + if got != want { + t.Fatalf("wrong output\ngot: %s\nwant: %s", got, want) + } +} + +func TestFmt_stdinArg(t *testing.T) { + input := new(bytes.Buffer) + input.Write(fmtFixture.input) + + ui := new(cli.MockUi) + c := &FmtCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + }, + input: input, + } + + args := []string{"-"} + if code := c.Run(args); code != 0 { + t.Fatalf("wrong exit code. errors: \n%s", ui.ErrorWriter.String()) + } + + expected := fmtFixture.golden + if actual := ui.OutputWriter.Bytes(); !bytes.Equal(actual, expected) { + t.Fatalf("got: %q\nexpected: %q", actual, expected) + } +} + +func TestFmt_nonDefaultOptions(t *testing.T) { + tempDir := fmtFixtureWriteDir(t) + + ui := new(cli.MockUi) + c := &FmtCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + }, + } + + args := []string{ + "-list=false", + "-write=false", + "-diff", + tempDir, + } + if code := c.Run(args); code != 0 { + t.Fatalf("wrong exit code. errors: \n%s", ui.ErrorWriter.String()) + } + + expected := fmt.Sprintf("-%s+%s", fmtFixture.input, fmtFixture.golden) + if actual := ui.OutputWriter.String(); !strings.Contains(actual, expected) { + t.Fatalf("expected:\n%s\n\nto include: %q", actual, expected) + } +} + +func TestFmt_check(t *testing.T) { + tempDir := fmtFixtureWriteDir(t) + + ui := new(cli.MockUi) + c := &FmtCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + }, + } + + args := []string{ + "-check", + tempDir, + } + if code := c.Run(args); code != 3 { + t.Fatalf("wrong exit code. expected 3") + } + + // Given that we give relative paths back to the user, normalize this temp + // dir so that we're comparing against a relative-ized (normalized) path + tempDir = c.normalizePath(tempDir) + + if actual := ui.OutputWriter.String(); !strings.Contains(actual, tempDir) { + t.Fatalf("expected:\n%s\n\nto include: %q", actual, tempDir) + } +} + +func TestFmt_checkStdin(t *testing.T) { + input := new(bytes.Buffer) + input.Write(fmtFixture.input) + + ui := new(cli.MockUi) + c := &FmtCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + }, + input: input, + } + + args := []string{ + "-check", + "-", + } + if code := c.Run(args); code != 3 { + t.Fatalf("wrong exit code. expected 3, got %d", code) + } + + if ui.OutputWriter != nil { + t.Fatalf("expected no output, got: %q", ui.OutputWriter.String()) + } +} + +var fmtFixture = struct { + filename string + altFilename string + input, golden []byte +}{ + "main.tf", + "main.tofu", + []byte(` foo = "bar" +`), + []byte(`foo = "bar" +`), +} + +func fmtFixtureWriteDir(t *testing.T) string { + dir := testTempDir(t) + + err := os.WriteFile(filepath.Join(dir, fmtFixture.filename), fmtFixture.input, 0600) + if err != nil { + t.Fatal(err) + } + + err = os.WriteFile(filepath.Join(dir, fmtFixture.altFilename), fmtFixture.input, 0600) + if err != nil { + t.Fatal(err) + } + + return dir +} diff --git a/pkg/command/format/diagnostic.go b/pkg/command/format/diagnostic.go new file mode 100644 index 00000000000..239d25db150 --- /dev/null +++ b/pkg/command/format/diagnostic.go @@ -0,0 +1,325 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package format + +import ( + "bufio" + "bytes" + "fmt" + "sort" + "strings" + + "github.com/hashicorp/hcl/v2" + viewsjson "github.com/kubegems/opentofu/pkg/command/views/json" + "github.com/kubegems/opentofu/pkg/tfdiags" + + "github.com/mitchellh/colorstring" + wordwrap "github.com/mitchellh/go-wordwrap" +) + +var disabledColorize = &colorstring.Colorize{ + Colors: colorstring.DefaultColors, + Disable: true, +} + +// Diagnostic formats a single diagnostic message. +// +// The width argument specifies at what column the diagnostic messages will +// be wrapped. If set to zero, messages will not be wrapped by this function +// at all. Although the long-form text parts of the message are wrapped, +// not all aspects of the message are guaranteed to fit within the specified +// terminal width. +func Diagnostic(diag tfdiags.Diagnostic, sources map[string]*hcl.File, color *colorstring.Colorize, width int) string { + return DiagnosticFromJSON(viewsjson.NewDiagnostic(diag, sources), color, width) +} + +func DiagnosticFromJSON(diag *viewsjson.Diagnostic, color *colorstring.Colorize, width int) string { + if diag == nil { + // No good reason to pass a nil diagnostic in here... + return "" + } + + var buf bytes.Buffer + + // these leftRule* variables are markers for the beginning of the lines + // containing the diagnostic that are intended to help sighted users + // better understand the information hierarchy when diagnostics appear + // alongside other information or alongside other diagnostics. + // + // Without this, it seems (based on folks sharing incomplete messages when + // asking questions, or including extra content that's not part of the + // diagnostic) that some readers have trouble easily identifying which + // text belongs to the diagnostic and which does not. + var leftRuleLine, leftRuleStart, leftRuleEnd string + var leftRuleWidth int // in visual character cells + + switch diag.Severity { + case viewsjson.DiagnosticSeverityError: + buf.WriteString(color.Color("[bold][red]Error: [reset]")) + leftRuleLine = color.Color("[red]│[reset] ") + leftRuleStart = color.Color("[red]╷[reset]") + leftRuleEnd = color.Color("[red]╵[reset]") + leftRuleWidth = 2 + case viewsjson.DiagnosticSeverityWarning: + buf.WriteString(color.Color("[bold][yellow]Warning: [reset]")) + leftRuleLine = color.Color("[yellow]│[reset] ") + leftRuleStart = color.Color("[yellow]╷[reset]") + leftRuleEnd = color.Color("[yellow]╵[reset]") + leftRuleWidth = 2 + default: + // Clear out any coloring that might be applied by OpenTofu's UI helper, + // so our result is not context-sensitive. + buf.WriteString(color.Color("\n[reset]")) + } + + // We don't wrap the summary, since we expect it to be terse, and since + // this is where we put the text of a native Go error it may not always + // be pure text that lends itself well to word-wrapping. + fmt.Fprintf(&buf, color.Color("[bold]%s[reset]\n\n"), diag.Summary) + + appendSourceSnippets(&buf, diag, color) + + if diag.Detail != "" { + paraWidth := width - leftRuleWidth - 1 // leave room for the left rule + if paraWidth > 0 { + lines := strings.Split(diag.Detail, "\n") + for _, line := range lines { + if !strings.HasPrefix(line, " ") { + line = wordwrap.WrapString(line, uint(paraWidth)) + } + fmt.Fprintf(&buf, "%s\n", line) + } + } else { + fmt.Fprintf(&buf, "%s\n", diag.Detail) + } + } + + // Before we return, we'll finally add the left rule prefixes to each + // line so that the overall message is visually delimited from what's + // around it. We'll do that by scanning over what we already generated + // and adding the prefix for each line. + var ruleBuf strings.Builder + sc := bufio.NewScanner(&buf) + ruleBuf.WriteString(leftRuleStart) + ruleBuf.WriteByte('\n') + for sc.Scan() { + line := sc.Text() + prefix := leftRuleLine + if line == "" { + // Don't print the space after the line if there would be nothing + // after it anyway. + prefix = strings.TrimSpace(prefix) + } + ruleBuf.WriteString(prefix) + ruleBuf.WriteString(line) + ruleBuf.WriteByte('\n') + } + ruleBuf.WriteString(leftRuleEnd) + ruleBuf.WriteByte('\n') + + return ruleBuf.String() +} + +// DiagnosticPlain is an alternative to Diagnostic which minimises the use of +// virtual terminal formatting sequences. +// +// It is intended for use in automation and other contexts in which diagnostic +// messages are parsed from the OpenTofu output. +func DiagnosticPlain(diag tfdiags.Diagnostic, sources map[string]*hcl.File, width int) string { + return DiagnosticPlainFromJSON(viewsjson.NewDiagnostic(diag, sources), width) +} + +func DiagnosticPlainFromJSON(diag *viewsjson.Diagnostic, width int) string { + if diag == nil { + // No good reason to pass a nil diagnostic in here... + return "" + } + + var buf bytes.Buffer + + switch diag.Severity { + case viewsjson.DiagnosticSeverityError: + buf.WriteString("\nError: ") + case viewsjson.DiagnosticSeverityWarning: + buf.WriteString("\nWarning: ") + default: + buf.WriteString("\n") + } + + // We don't wrap the summary, since we expect it to be terse, and since + // this is where we put the text of a native Go error it may not always + // be pure text that lends itself well to word-wrapping. + fmt.Fprintf(&buf, "%s\n\n", diag.Summary) + + appendSourceSnippets(&buf, diag, disabledColorize) + + if diag.Detail != "" { + if width > 1 { + lines := strings.Split(diag.Detail, "\n") + for _, line := range lines { + if !strings.HasPrefix(line, " ") { + line = wordwrap.WrapString(line, uint(width-1)) + } + fmt.Fprintf(&buf, "%s\n", line) + } + } else { + fmt.Fprintf(&buf, "%s\n", diag.Detail) + } + } + + return buf.String() +} + +// DiagnosticWarningsCompact is an alternative to Diagnostic for when all of +// the given diagnostics are warnings and we want to show them compactly, +// with only two lines per warning and excluding all of the detail information. +// +// The caller may optionally pre-process the given diagnostics with +// ConsolidateWarnings, in which case this function will recognize consolidated +// messages and include an indication that they are consolidated. +// +// Do not pass non-warning diagnostics to this function, or the result will +// be nonsense. +func DiagnosticWarningsCompact(diags tfdiags.Diagnostics, color *colorstring.Colorize) string { + var b strings.Builder + b.WriteString(color.Color("[bold][yellow]Warnings:[reset]\n\n")) + for _, diag := range diags { + sources := tfdiags.WarningGroupSourceRanges(diag) + b.WriteString(fmt.Sprintf("- %s\n", diag.Description().Summary)) + if len(sources) > 0 { + mainSource := sources[0] + if mainSource.Subject != nil { + if len(sources) > 1 { + b.WriteString(fmt.Sprintf( + " on %s line %d (and %d more)\n", + mainSource.Subject.Filename, + mainSource.Subject.Start.Line, + len(sources)-1, + )) + } else { + b.WriteString(fmt.Sprintf( + " on %s line %d\n", + mainSource.Subject.Filename, + mainSource.Subject.Start.Line, + )) + } + } else if len(sources) > 1 { + b.WriteString(fmt.Sprintf( + " (%d occurences of this warning)\n", + len(sources), + )) + } + } + } + + return b.String() +} + +func appendSourceSnippets(buf *bytes.Buffer, diag *viewsjson.Diagnostic, color *colorstring.Colorize) { + if diag.Address != "" { + fmt.Fprintf(buf, " with %s,\n", diag.Address) + } + + if diag.Range == nil { + return + } + + if diag.Snippet == nil { + // This should generally not happen, as long as sources are always + // loaded through the main loader. We may load things in other + // ways in weird cases, so we'll tolerate it at the expense of + // a not-so-helpful error message. + fmt.Fprintf(buf, " on %s line %d:\n (source code not available)\n", diag.Range.Filename, diag.Range.Start.Line) + } else { + snippet := diag.Snippet + code := snippet.Code + + var contextStr string + if snippet.Context != nil { + contextStr = fmt.Sprintf(", in %s", *snippet.Context) + } + fmt.Fprintf(buf, " on %s line %d%s:\n", diag.Range.Filename, diag.Range.Start.Line, contextStr) + + // Split the snippet and render the highlighted section with underlines + start := snippet.HighlightStartOffset + end := snippet.HighlightEndOffset + + // Only buggy diagnostics can have an end range before the start, but + // we need to ensure we don't crash here if that happens. + if end < start { + end = start + 1 + if end > len(code) { + end = len(code) + } + } + + // If either start or end is out of range for the code buffer then + // we'll cap them at the bounds just to avoid a panic, although + // this would happen only if there's a bug in the code generating + // the snippet objects. + if start < 0 { + start = 0 + } else if start > len(code) { + start = len(code) + } + if end < 0 { + end = 0 + } else if end > len(code) { + end = len(code) + } + + before, highlight, after := code[0:start], code[start:end], code[end:] + code = fmt.Sprintf(color.Color("%s[underline]%s[reset]%s"), before, highlight, after) + + // Split the snippet into lines and render one at a time + lines := strings.Split(code, "\n") + for i, line := range lines { + fmt.Fprintf( + buf, "%4d: %s\n", + snippet.StartLine+i, + line, + ) + } + + if len(snippet.Values) > 0 || (snippet.FunctionCall != nil && snippet.FunctionCall.Signature != nil) { + // The diagnostic may also have information about the dynamic + // values of relevant variables at the point of evaluation. + // This is particularly useful for expressions that get evaluated + // multiple times with different values, such as blocks using + // "count" and "for_each", or within "for" expressions. + values := make([]viewsjson.DiagnosticExpressionValue, len(snippet.Values)) + copy(values, snippet.Values) + sort.Slice(values, func(i, j int) bool { + return values[i].Traversal < values[j].Traversal + }) + + fmt.Fprint(buf, color.Color(" [dark_gray]├────────────────[reset]\n")) + if callInfo := snippet.FunctionCall; callInfo != nil && callInfo.Signature != nil { + + fmt.Fprintf(buf, color.Color(" [dark_gray]│[reset] while calling [bold]%s[reset]("), callInfo.CalledAs) + for i, param := range callInfo.Signature.Params { + if i > 0 { + buf.WriteString(", ") + } + buf.WriteString(param.Name) + } + if param := callInfo.Signature.VariadicParam; param != nil { + if len(callInfo.Signature.Params) > 0 { + buf.WriteString(", ") + } + buf.WriteString(param.Name) + buf.WriteString("...") + } + buf.WriteString(")\n") + } + for _, value := range values { + fmt.Fprintf(buf, color.Color(" [dark_gray]│[reset] [bold]%s[reset] %s\n"), value.Traversal, value.Statement) + } + } + } + + buf.WriteByte('\n') +} diff --git a/pkg/command/format/diagnostic_test.go b/pkg/command/format/diagnostic_test.go new file mode 100644 index 00000000000..f40c2f0e585 --- /dev/null +++ b/pkg/command/format/diagnostic_test.go @@ -0,0 +1,950 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package format + +import ( + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/hashicorp/hcl/v2/hcltest" + "github.com/mitchellh/colorstring" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + + viewsjson "github.com/kubegems/opentofu/pkg/command/views/json" + "github.com/kubegems/opentofu/pkg/lang/marks" + + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +func TestDiagnostic(t *testing.T) { + + tests := map[string]struct { + Diag interface{} + Want string + }{ + "sourceless error": { + tfdiags.Sourceless( + tfdiags.Error, + "A sourceless error", + "It has no source references but it does have a pretty long detail that should wrap over multiple lines.", + ), + `[red]╷[reset] +[red]│[reset] [bold][red]Error: [reset][bold]A sourceless error[reset] +[red]│[reset] +[red]│[reset] It has no source references but it +[red]│[reset] does have a pretty long detail that +[red]│[reset] should wrap over multiple lines. +[red]╵[reset] +`, + }, + "sourceless warning": { + tfdiags.Sourceless( + tfdiags.Warning, + "A sourceless warning", + "It has no source references but it does have a pretty long detail that should wrap over multiple lines.", + ), + `[yellow]╷[reset] +[yellow]│[reset] [bold][yellow]Warning: [reset][bold]A sourceless warning[reset] +[yellow]│[reset] +[yellow]│[reset] It has no source references but it +[yellow]│[reset] does have a pretty long detail that +[yellow]│[reset] should wrap over multiple lines. +[yellow]╵[reset] +`, + }, + "error with source code subject": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Bad bad bad", + Detail: "Whatever shall we do?", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, + End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, + }, + }, + `[red]╷[reset] +[red]│[reset] [bold][red]Error: [reset][bold]Bad bad bad[reset] +[red]│[reset] +[red]│[reset] on test.tf line 1: +[red]│[reset] 1: test [underline]source[reset] code +[red]│[reset] +[red]│[reset] Whatever shall we do? +[red]╵[reset] +`, + }, + "error with source code subject and known expression": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Bad bad bad", + Detail: "Whatever shall we do?", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, + End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, + }, + Expression: hcltest.MockExprTraversal(hcl.Traversal{ + hcl.TraverseRoot{Name: "boop"}, + hcl.TraverseAttr{Name: "beep"}, + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "boop": cty.ObjectVal(map[string]cty.Value{ + "beep": cty.StringVal("blah"), + }), + }, + }, + }, + `[red]╷[reset] +[red]│[reset] [bold][red]Error: [reset][bold]Bad bad bad[reset] +[red]│[reset] +[red]│[reset] on test.tf line 1: +[red]│[reset] 1: test [underline]source[reset] code +[red]│[reset] [dark_gray]├────────────────[reset] +[red]│[reset] [dark_gray]│[reset] [bold]boop.beep[reset] is "blah" +[red]│[reset] +[red]│[reset] Whatever shall we do? +[red]╵[reset] +`, + }, + "error with source code subject and expression referring to sensitive value": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Bad bad bad", + Detail: "Whatever shall we do?", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, + End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, + }, + Expression: hcltest.MockExprTraversal(hcl.Traversal{ + hcl.TraverseRoot{Name: "boop"}, + hcl.TraverseAttr{Name: "beep"}, + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "boop": cty.ObjectVal(map[string]cty.Value{ + "beep": cty.StringVal("blah").Mark(marks.Sensitive), + }), + }, + }, + Extra: diagnosticCausedBySensitive(true), + }, + `[red]╷[reset] +[red]│[reset] [bold][red]Error: [reset][bold]Bad bad bad[reset] +[red]│[reset] +[red]│[reset] on test.tf line 1: +[red]│[reset] 1: test [underline]source[reset] code +[red]│[reset] [dark_gray]├────────────────[reset] +[red]│[reset] [dark_gray]│[reset] [bold]boop.beep[reset] has a sensitive value +[red]│[reset] +[red]│[reset] Whatever shall we do? +[red]╵[reset] +`, + }, + "error with source code subject and unknown string expression": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Bad bad bad", + Detail: "Whatever shall we do?", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, + End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, + }, + Expression: hcltest.MockExprTraversal(hcl.Traversal{ + hcl.TraverseRoot{Name: "boop"}, + hcl.TraverseAttr{Name: "beep"}, + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "boop": cty.ObjectVal(map[string]cty.Value{ + "beep": cty.UnknownVal(cty.String), + }), + }, + }, + Extra: diagnosticCausedByUnknown(true), + }, + `[red]╷[reset] +[red]│[reset] [bold][red]Error: [reset][bold]Bad bad bad[reset] +[red]│[reset] +[red]│[reset] on test.tf line 1: +[red]│[reset] 1: test [underline]source[reset] code +[red]│[reset] [dark_gray]├────────────────[reset] +[red]│[reset] [dark_gray]│[reset] [bold]boop.beep[reset] is a string, known only after apply +[red]│[reset] +[red]│[reset] Whatever shall we do? +[red]╵[reset] +`, + }, + "error with source code subject and unknown expression of unknown type": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Bad bad bad", + Detail: "Whatever shall we do?", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, + End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, + }, + Expression: hcltest.MockExprTraversal(hcl.Traversal{ + hcl.TraverseRoot{Name: "boop"}, + hcl.TraverseAttr{Name: "beep"}, + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "boop": cty.ObjectVal(map[string]cty.Value{ + "beep": cty.UnknownVal(cty.DynamicPseudoType), + }), + }, + }, + Extra: diagnosticCausedByUnknown(true), + }, + `[red]╷[reset] +[red]│[reset] [bold][red]Error: [reset][bold]Bad bad bad[reset] +[red]│[reset] +[red]│[reset] on test.tf line 1: +[red]│[reset] 1: test [underline]source[reset] code +[red]│[reset] [dark_gray]├────────────────[reset] +[red]│[reset] [dark_gray]│[reset] [bold]boop.beep[reset] will be known only after apply +[red]│[reset] +[red]│[reset] Whatever shall we do? +[red]╵[reset] +`, + }, + "error with source code subject and function call annotation": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Bad bad bad", + Detail: "Whatever shall we do?", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, + End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, + }, + Expression: hcltest.MockExprLiteral(cty.True), + EvalContext: &hcl.EvalContext{ + Functions: map[string]function.Function{ + "beep": function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "pos_param_0", + Type: cty.String, + }, + { + Name: "pos_param_1", + Type: cty.Number, + }, + }, + VarParam: &function.Parameter{ + Name: "var_param", + Type: cty.Bool, + }, + }), + }, + }, + // This is simulating what the HCL function call expression + // type would generate on evaluation, by implementing the + // same interface it uses. + Extra: fakeDiagFunctionCallExtra("beep"), + }, + `[red]╷[reset] +[red]│[reset] [bold][red]Error: [reset][bold]Bad bad bad[reset] +[red]│[reset] +[red]│[reset] on test.tf line 1: +[red]│[reset] 1: test [underline]source[reset] code +[red]│[reset] [dark_gray]├────────────────[reset] +[red]│[reset] [dark_gray]│[reset] while calling [bold]beep[reset](pos_param_0, pos_param_1, var_param...) +[red]│[reset] +[red]│[reset] Whatever shall we do? +[red]╵[reset] +`, + }, + } + + sources := map[string]*hcl.File{ + "test.tf": {Bytes: []byte(`test source code`)}, + } + + // This empty Colorize just passes through all of the formatting codes + // untouched, because it doesn't define any formatting keywords. + colorize := &colorstring.Colorize{} + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + var diags tfdiags.Diagnostics + diags = diags.Append(test.Diag) // to normalize it into a tfdiag.Diagnostic + diag := diags[0] + got := strings.TrimSpace(Diagnostic(diag, sources, colorize, 40)) + want := strings.TrimSpace(test.Want) + if got != want { + t.Errorf("wrong result\ngot:\n%s\n\nwant:\n%s\n\n", got, want) + } + }) + } +} + +func TestDiagnosticPlain(t *testing.T) { + + tests := map[string]struct { + Diag interface{} + Want string + }{ + "sourceless error": { + tfdiags.Sourceless( + tfdiags.Error, + "A sourceless error", + "It has no source references but it does have a pretty long detail that should wrap over multiple lines.", + ), + ` +Error: A sourceless error + +It has no source references but it does +have a pretty long detail that should +wrap over multiple lines. +`, + }, + "sourceless warning": { + tfdiags.Sourceless( + tfdiags.Warning, + "A sourceless warning", + "It has no source references but it does have a pretty long detail that should wrap over multiple lines.", + ), + ` +Warning: A sourceless warning + +It has no source references but it does +have a pretty long detail that should +wrap over multiple lines. +`, + }, + "error with source code subject": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Bad bad bad", + Detail: "Whatever shall we do?", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, + End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, + }, + }, + ` +Error: Bad bad bad + + on test.tf line 1: + 1: test source code + +Whatever shall we do? +`, + }, + "error with source code subject and known expression": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Bad bad bad", + Detail: "Whatever shall we do?", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, + End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, + }, + Expression: hcltest.MockExprTraversal(hcl.Traversal{ + hcl.TraverseRoot{Name: "boop"}, + hcl.TraverseAttr{Name: "beep"}, + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "boop": cty.ObjectVal(map[string]cty.Value{ + "beep": cty.StringVal("blah"), + }), + }, + }, + }, + ` +Error: Bad bad bad + + on test.tf line 1: + 1: test source code + ├──────────────── + │ boop.beep is "blah" + +Whatever shall we do? +`, + }, + "error with source code subject and expression referring to sensitive value": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Bad bad bad", + Detail: "Whatever shall we do?", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, + End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, + }, + Expression: hcltest.MockExprTraversal(hcl.Traversal{ + hcl.TraverseRoot{Name: "boop"}, + hcl.TraverseAttr{Name: "beep"}, + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "boop": cty.ObjectVal(map[string]cty.Value{ + "beep": cty.StringVal("blah").Mark(marks.Sensitive), + }), + }, + }, + Extra: diagnosticCausedBySensitive(true), + }, + ` +Error: Bad bad bad + + on test.tf line 1: + 1: test source code + ├──────────────── + │ boop.beep has a sensitive value + +Whatever shall we do? +`, + }, + "error with source code subject and expression referring to sensitive value when not related to sensitivity": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Bad bad bad", + Detail: "Whatever shall we do?", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, + End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, + }, + Expression: hcltest.MockExprTraversal(hcl.Traversal{ + hcl.TraverseRoot{Name: "boop"}, + hcl.TraverseAttr{Name: "beep"}, + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "boop": cty.ObjectVal(map[string]cty.Value{ + "beep": cty.StringVal("blah").Mark(marks.Sensitive), + }), + }, + }, + }, + ` +Error: Bad bad bad + + on test.tf line 1: + 1: test source code + +Whatever shall we do? +`, + }, + "error with source code subject and unknown string expression": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Bad bad bad", + Detail: "Whatever shall we do?", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, + End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, + }, + Expression: hcltest.MockExprTraversal(hcl.Traversal{ + hcl.TraverseRoot{Name: "boop"}, + hcl.TraverseAttr{Name: "beep"}, + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "boop": cty.ObjectVal(map[string]cty.Value{ + "beep": cty.UnknownVal(cty.String), + }), + }, + }, + Extra: diagnosticCausedByUnknown(true), + }, + ` +Error: Bad bad bad + + on test.tf line 1: + 1: test source code + ├──────────────── + │ boop.beep is a string, known only after apply + +Whatever shall we do? +`, + }, + "error with source code subject and unknown string expression when problem isn't unknown-related": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Bad bad bad", + Detail: "Whatever shall we do?", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, + End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, + }, + Expression: hcltest.MockExprTraversal(hcl.Traversal{ + hcl.TraverseRoot{Name: "boop"}, + hcl.TraverseAttr{Name: "beep"}, + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "boop": cty.ObjectVal(map[string]cty.Value{ + "beep": cty.UnknownVal(cty.String), + }), + }, + }, + }, + ` +Error: Bad bad bad + + on test.tf line 1: + 1: test source code + ├──────────────── + │ boop.beep is a string + +Whatever shall we do? +`, + }, + "error with source code subject and unknown expression of unknown type": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Bad bad bad", + Detail: "Whatever shall we do?", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, + End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, + }, + Expression: hcltest.MockExprTraversal(hcl.Traversal{ + hcl.TraverseRoot{Name: "boop"}, + hcl.TraverseAttr{Name: "beep"}, + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "boop": cty.ObjectVal(map[string]cty.Value{ + "beep": cty.UnknownVal(cty.DynamicPseudoType), + }), + }, + }, + Extra: diagnosticCausedByUnknown(true), + }, + ` +Error: Bad bad bad + + on test.tf line 1: + 1: test source code + ├──────────────── + │ boop.beep will be known only after apply + +Whatever shall we do? +`, + }, + "error with source code subject and unknown expression of unknown type when problem isn't unknown-related": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Bad bad bad", + Detail: "Whatever shall we do?", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, + End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, + }, + Expression: hcltest.MockExprTraversal(hcl.Traversal{ + hcl.TraverseRoot{Name: "boop"}, + hcl.TraverseAttr{Name: "beep"}, + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "boop": cty.ObjectVal(map[string]cty.Value{ + "beep": cty.UnknownVal(cty.DynamicPseudoType), + }), + }, + }, + }, + ` +Error: Bad bad bad + + on test.tf line 1: + 1: test source code + +Whatever shall we do? +`, + }, + } + + sources := map[string]*hcl.File{ + "test.tf": {Bytes: []byte(`test source code`)}, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + var diags tfdiags.Diagnostics + diags = diags.Append(test.Diag) // to normalize it into a tfdiag.Diagnostic + diag := diags[0] + got := strings.TrimSpace(DiagnosticPlain(diag, sources, 40)) + want := strings.TrimSpace(test.Want) + if got != want { + t.Errorf("wrong result\ngot:\n%s\n\nwant:\n%s\n\n", got, want) + } + }) + } +} + +func TestDiagnosticWarningsCompact(t *testing.T) { + var diags tfdiags.Diagnostics + diags = diags.Append(tfdiags.SimpleWarning("foo")) + diags = diags.Append(tfdiags.SimpleWarning("foo")) + diags = diags.Append(tfdiags.SimpleWarning("bar")) + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "source foo", + Detail: "...", + Subject: &hcl.Range{ + Filename: "source.tf", + Start: hcl.Pos{Line: 2, Column: 1, Byte: 5}, + End: hcl.Pos{Line: 2, Column: 1, Byte: 5}, + }, + }) + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "source foo", + Detail: "...", + Subject: &hcl.Range{ + Filename: "source.tf", + Start: hcl.Pos{Line: 3, Column: 1, Byte: 7}, + End: hcl.Pos{Line: 3, Column: 1, Byte: 7}, + }, + }) + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "source bar", + Detail: "...", + Subject: &hcl.Range{ + Filename: "source2.tf", + Start: hcl.Pos{Line: 1, Column: 1, Byte: 1}, + End: hcl.Pos{Line: 1, Column: 1, Byte: 1}, + }, + }) + + // ConsolidateWarnings groups together the ones + // that have source location information and that + // have the same summary text. + diags = diags.ConsolidateWarnings(1) + + // A zero-value Colorize just passes all the formatting + // codes back to us, so we can test them literally. + got := DiagnosticWarningsCompact(diags, &colorstring.Colorize{}) + want := `[bold][yellow]Warnings:[reset] + +- foo +- foo +- bar +- source foo + on source.tf line 2 (and 1 more) +- source bar + on source2.tf line 1 +` + if got != want { + t.Errorf( + "wrong result\ngot:\n%s\n\nwant:\n%s\n\ndiff:\n%s", + got, want, cmp.Diff(want, got), + ) + } +} + +// Test case via https://github.com/hashicorp/terraform/issues/21359 +func TestDiagnostic_nonOverlappingHighlightContext(t *testing.T) { + var diags tfdiags.Diagnostics + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Some error", + Detail: "...", + Subject: &hcl.Range{ + Filename: "source.tf", + Start: hcl.Pos{Line: 1, Column: 5, Byte: 5}, + End: hcl.Pos{Line: 1, Column: 5, Byte: 5}, + }, + Context: &hcl.Range{ + Filename: "source.tf", + Start: hcl.Pos{Line: 1, Column: 5, Byte: 5}, + End: hcl.Pos{Line: 4, Column: 2, Byte: 60}, + }, + }) + sources := map[string]*hcl.File{ + "source.tf": {Bytes: []byte(`x = somefunc("testing", { + alpha = "foo" + beta = "bar" +}) +`)}, + } + color := &colorstring.Colorize{ + Colors: colorstring.DefaultColors, + Reset: true, + Disable: true, + } + expected := `╷ +│ Error: Some error +│ +│ on source.tf line 1: +│ 1: x = somefunc("testing", { +│ 2: alpha = "foo" +│ 3: beta = "bar" +│ 4: }) +│ +│ ... +╵ +` + output := Diagnostic(diags[0], sources, color, 80) + + if output != expected { + t.Fatalf("unexpected output: got:\n%s\nwant\n%s\n", output, expected) + } +} + +func TestDiagnostic_emptyOverlapHighlightContext(t *testing.T) { + var diags tfdiags.Diagnostics + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Some error", + Detail: "...", + Subject: &hcl.Range{ + Filename: "source.tf", + Start: hcl.Pos{Line: 3, Column: 10, Byte: 38}, + End: hcl.Pos{Line: 4, Column: 1, Byte: 39}, + }, + Context: &hcl.Range{ + Filename: "source.tf", + Start: hcl.Pos{Line: 2, Column: 13, Byte: 27}, + End: hcl.Pos{Line: 4, Column: 1, Byte: 39}, + }, + }) + sources := map[string]*hcl.File{ + "source.tf": {Bytes: []byte(`variable "x" { + default = { + "foo" + } +`)}, + } + color := &colorstring.Colorize{ + Colors: colorstring.DefaultColors, + Reset: true, + Disable: true, + } + expected := `╷ +│ Error: Some error +│ +│ on source.tf line 3, in variable "x": +│ 2: default = { +│ 3: "foo" +│ 4: } +│ +│ ... +╵ +` + output := Diagnostic(diags[0], sources, color, 80) + + if output != expected { + t.Fatalf("unexpected output: got:\n%s\nwant\n%s\n", output, expected) + } +} + +func TestDiagnosticPlain_emptyOverlapHighlightContext(t *testing.T) { + var diags tfdiags.Diagnostics + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Some error", + Detail: "...", + Subject: &hcl.Range{ + Filename: "source.tf", + Start: hcl.Pos{Line: 3, Column: 10, Byte: 38}, + End: hcl.Pos{Line: 4, Column: 1, Byte: 39}, + }, + Context: &hcl.Range{ + Filename: "source.tf", + Start: hcl.Pos{Line: 2, Column: 13, Byte: 27}, + End: hcl.Pos{Line: 4, Column: 1, Byte: 39}, + }, + }) + sources := map[string]*hcl.File{ + "source.tf": {Bytes: []byte(`variable "x" { + default = { + "foo" + } +`)}, + } + + expected := ` +Error: Some error + + on source.tf line 3, in variable "x": + 2: default = { + 3: "foo" + 4: } + +... +` + output := DiagnosticPlain(diags[0], sources, 80) + + if output != expected { + t.Fatalf("unexpected output: got:\n%s\nwant\n%s\n", output, expected) + } +} + +func TestDiagnostic_wrapDetailIncludingCommand(t *testing.T) { + var diags tfdiags.Diagnostics + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Everything went wrong", + Detail: "This is a very long sentence about whatever went wrong which is supposed to wrap onto multiple lines. Thank-you very much for listening.\n\nTo fix this, run this very long command:\n terraform read-my-mind -please -thanks -but-do-not-wrap-this-line-because-it-is-prefixed-with-spaces\n\nHere is a coda which is also long enough to wrap and so it should eventually make it onto multiple lines. THE END", + }) + color := &colorstring.Colorize{ + Colors: colorstring.DefaultColors, + Reset: true, + Disable: true, + } + expected := `╷ +│ Error: Everything went wrong +│ +│ This is a very long sentence about whatever went wrong which is supposed +│ to wrap onto multiple lines. Thank-you very much for listening. +│ +│ To fix this, run this very long command: +│ terraform read-my-mind -please -thanks -but-do-not-wrap-this-line-because-it-is-prefixed-with-spaces +│ +│ Here is a coda which is also long enough to wrap and so it should +│ eventually make it onto multiple lines. THE END +╵ +` + output := Diagnostic(diags[0], nil, color, 76) + + if output != expected { + t.Fatalf("unexpected output: got:\n%s\nwant\n%s\n", output, expected) + } +} + +func TestDiagnosticPlain_wrapDetailIncludingCommand(t *testing.T) { + var diags tfdiags.Diagnostics + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Everything went wrong", + Detail: "This is a very long sentence about whatever went wrong which is supposed to wrap onto multiple lines. Thank-you very much for listening.\n\nTo fix this, run this very long command:\n terraform read-my-mind -please -thanks -but-do-not-wrap-this-line-because-it-is-prefixed-with-spaces\n\nHere is a coda which is also long enough to wrap and so it should eventually make it onto multiple lines. THE END", + }) + + expected := ` +Error: Everything went wrong + +This is a very long sentence about whatever went wrong which is supposed to +wrap onto multiple lines. Thank-you very much for listening. + +To fix this, run this very long command: + terraform read-my-mind -please -thanks -but-do-not-wrap-this-line-because-it-is-prefixed-with-spaces + +Here is a coda which is also long enough to wrap and so it should +eventually make it onto multiple lines. THE END +` + output := DiagnosticPlain(diags[0], nil, 76) + + if output != expected { + t.Fatalf("unexpected output: got:\n%s\nwant\n%s\n", output, expected) + } +} + +// Test cases covering invalid JSON diagnostics which should still render +// correctly. These JSON diagnostic values cannot be generated from the +// json.NewDiagnostic code path, but we may read and display JSON diagnostics +// in future from other sources. +func TestDiagnosticFromJSON_invalid(t *testing.T) { + tests := map[string]struct { + Diag *viewsjson.Diagnostic + Want string + }{ + "zero-value end range and highlight end byte": { + &viewsjson.Diagnostic{ + Severity: viewsjson.DiagnosticSeverityError, + Summary: "Bad end", + Detail: "It all went wrong.", + Range: &viewsjson.DiagnosticRange{ + Filename: "ohno.tf", + Start: viewsjson.Pos{Line: 1, Column: 23, Byte: 22}, + End: viewsjson.Pos{Line: 0, Column: 0, Byte: 0}, + }, + Snippet: &viewsjson.DiagnosticSnippet{ + Code: `resource "foo_bar "baz" {`, + StartLine: 1, + HighlightStartOffset: 22, + HighlightEndOffset: 0, + }, + }, + `[red]╷[reset] +[red]│[reset] [bold][red]Error: [reset][bold]Bad end[reset] +[red]│[reset] +[red]│[reset] on ohno.tf line 1: +[red]│[reset] 1: resource "foo_bar "baz[underline]"[reset] { +[red]│[reset] +[red]│[reset] It all went wrong. +[red]╵[reset] +`, + }, + } + + // This empty Colorize just passes through all of the formatting codes + // untouched, because it doesn't define any formatting keywords. + colorize := &colorstring.Colorize{} + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + got := strings.TrimSpace(DiagnosticFromJSON(test.Diag, colorize, 40)) + want := strings.TrimSpace(test.Want) + if got != want { + t.Errorf("wrong result\ngot:\n%s\n\nwant:\n%s\n\n", got, want) + } + }) + } +} + +// fakeDiagFunctionCallExtra is a fake implementation of the interface that +// HCL uses to provide "extra information" associated with diagnostics that +// describe errors during a function call. +type fakeDiagFunctionCallExtra string + +var _ hclsyntax.FunctionCallDiagExtra = fakeDiagFunctionCallExtra("") + +func (e fakeDiagFunctionCallExtra) CalledFunctionName() string { + return string(e) +} + +func (e fakeDiagFunctionCallExtra) FunctionCallError() error { + return nil +} + +// diagnosticCausedByUnknown is a testing helper for exercising our logic +// for selectively showing unknown values alongside our source snippets for +// diagnostics that are explicitly marked as being caused by unknown values. +type diagnosticCausedByUnknown bool + +var _ tfdiags.DiagnosticExtraBecauseUnknown = diagnosticCausedByUnknown(true) + +func (e diagnosticCausedByUnknown) DiagnosticCausedByUnknown() bool { + return bool(e) +} + +// diagnosticCausedBySensitive is a testing helper for exercising our logic +// for selectively showing sensitive values alongside our source snippets for +// diagnostics that are explicitly marked as being caused by sensitive values. +type diagnosticCausedBySensitive bool + +var _ tfdiags.DiagnosticExtraBecauseSensitive = diagnosticCausedBySensitive(true) + +func (e diagnosticCausedBySensitive) DiagnosticCausedBySensitive() bool { + return bool(e) +} diff --git a/pkg/command/format/format.go b/pkg/command/format/format.go new file mode 100644 index 00000000000..bf43e66740d --- /dev/null +++ b/pkg/command/format/format.go @@ -0,0 +1,42 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package format contains helpers for formatting various OpenTofu +// structures for human-readabout output. +// +// This package is used by the official OpenTofu CLI in formatting any +// output and is exported to encourage non-official frontends to mimic the +// output formatting as much as possible so that text formats of OpenTofu +// structures have a consistent look and feel. +package format + +import "github.com/kubegems/opentofu/pkg/plans" + +// DiffActionSymbol returns a string that, once passed through a +// colorstring.Colorize, will produce a result that can be written +// to a terminal to produce a symbol made of three printable +// characters, possibly interspersed with VT100 color codes. +func DiffActionSymbol(action plans.Action) string { + switch action { + case plans.DeleteThenCreate: + return "[red]-[reset]/[green]+[reset]" + case plans.CreateThenDelete: + return "[green]+[reset]/[red]-[reset]" + case plans.Create: + return " [green]+[reset]" + case plans.Delete: + return " [red]-[reset]" + case plans.Read: + return " [cyan]<=[reset]" + case plans.Update: + return " [yellow]~[reset]" + case plans.NoOp: + return " " + case plans.Forget: + return " [red].[reset]" + default: + return " ?" + } +} diff --git a/pkg/command/format/object_id.go b/pkg/command/format/object_id.go new file mode 100644 index 00000000000..b7ba29c1a99 --- /dev/null +++ b/pkg/command/format/object_id.go @@ -0,0 +1,155 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package format + +import ( + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/zclconf/go-cty/cty" +) + +// ObjectValueID takes a value that is assumed to be an object representation +// of some resource instance object and attempts to heuristically find an +// attribute of it that is likely to be a unique identifier in the remote +// system that it belongs to which will be useful to the user. +// +// If such an attribute is found, its name and string value intended for +// display are returned. Both returned strings are empty if no such attribute +// exists, in which case the caller should assume that the resource instance +// address within the OpenTofu configuration is the best available identifier. +// +// This is only a best-effort sort of thing, relying on naming conventions in +// our resource type schemas. The result is not guaranteed to be unique, but +// should generally be suitable for display to an end-user anyway. +// +// This function will panic if the given value is not of an object type. +func ObjectValueID(obj cty.Value) (k, v string) { + if obj.IsNull() || !obj.IsKnown() { + return "", "" + } + + atys := obj.Type().AttributeTypes() + + switch { + + case atys["id"] == cty.String: + v := obj.GetAttr("id") + if v.HasMark(marks.Sensitive) { + break + } + v, _ = v.Unmark() + + if v.IsKnown() && !v.IsNull() { + return "id", v.AsString() + } + + case atys["name"] == cty.String: + // "name" isn't always globally unique, but if there isn't also an + // "id" then it _often_ is, in practice. + v := obj.GetAttr("name") + if v.HasMark(marks.Sensitive) { + break + } + v, _ = v.Unmark() + + if v.IsKnown() && !v.IsNull() { + return "name", v.AsString() + } + } + + return "", "" +} + +// ObjectValueName takes a value that is assumed to be an object representation +// of some resource instance object and attempts to heuristically find an +// attribute of it that is likely to be a human-friendly name in the remote +// system that it belongs to which will be useful to the user. +// +// If such an attribute is found, its name and string value intended for +// display are returned. Both returned strings are empty if no such attribute +// exists, in which case the caller should assume that the resource instance +// address within the OpenTofu configuration is the best available identifier. +// +// This is only a best-effort sort of thing, relying on naming conventions in +// our resource type schemas. The result is not guaranteed to be unique, but +// should generally be suitable for display to an end-user anyway. +// +// Callers that use both ObjectValueName and ObjectValueID at the same time +// should be prepared to get the same attribute key and value from both in +// some cases, since there is overlap betweek the id-extraction and +// name-extraction heuristics. +// +// This function will panic if the given value is not of an object type. +func ObjectValueName(obj cty.Value) (k, v string) { + if obj.IsNull() || !obj.IsKnown() { + return "", "" + } + + atys := obj.Type().AttributeTypes() + + switch { + + case atys["name"] == cty.String: + v := obj.GetAttr("name") + if v.HasMark(marks.Sensitive) { + break + } + v, _ = v.Unmark() + + if v.IsKnown() && !v.IsNull() { + return "name", v.AsString() + } + + case atys["tags"].IsMapType() && atys["tags"].ElementType() == cty.String: + tags := obj.GetAttr("tags") + if tags.IsNull() || !tags.IsWhollyKnown() || tags.HasMark(marks.Sensitive) { + break + } + tags, _ = tags.Unmark() + + switch { + case tags.HasIndex(cty.StringVal("name")).RawEquals(cty.True): + v := tags.Index(cty.StringVal("name")) + if v.HasMark(marks.Sensitive) { + break + } + v, _ = v.Unmark() + + if v.IsKnown() && !v.IsNull() { + return "tags.name", v.AsString() + } + case tags.HasIndex(cty.StringVal("Name")).RawEquals(cty.True): + // AWS-style naming convention + v := tags.Index(cty.StringVal("Name")) + if v.HasMark(marks.Sensitive) { + break + } + v, _ = v.Unmark() + + if v.IsKnown() && !v.IsNull() { + return "tags.Name", v.AsString() + } + } + } + + return "", "" +} + +// ObjectValueIDOrName is a convenience wrapper around both ObjectValueID +// and ObjectValueName (in that preference order) to try to extract some sort +// of human-friendly descriptive string value for an object as additional +// context about an object when it is being displayed in a compact way (where +// not all of the attributes are visible.) +// +// Just as with the two functions it wraps, it is a best-effort and may return +// two empty strings if no suitable attribute can be found for a given object. +func ObjectValueIDOrName(obj cty.Value) (k, v string) { + k, v = ObjectValueID(obj) + if k != "" { + return + } + k, v = ObjectValueName(obj) + return +} diff --git a/pkg/command/format/object_id_test.go b/pkg/command/format/object_id_test.go new file mode 100644 index 00000000000..d9e1291eff4 --- /dev/null +++ b/pkg/command/format/object_id_test.go @@ -0,0 +1,218 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package format + +import ( + "fmt" + "testing" + + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/zclconf/go-cty/cty" +) + +func TestObjectValueIDOrName(t *testing.T) { + tests := []struct { + obj cty.Value + id [2]string + name [2]string + either [2]string + }{ + { + cty.NullVal(cty.EmptyObject), + [...]string{"", ""}, + [...]string{"", ""}, + [...]string{"", ""}, + }, + { + cty.UnknownVal(cty.EmptyObject), + [...]string{"", ""}, + [...]string{"", ""}, + [...]string{"", ""}, + }, + { + cty.EmptyObjectVal, + [...]string{"", ""}, + [...]string{"", ""}, + [...]string{"", ""}, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo-123"), + }), + [...]string{"id", "foo-123"}, + [...]string{"", ""}, + [...]string{"id", "foo-123"}, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo-123"), + "name": cty.StringVal("awesome-foo"), + }), + [...]string{"id", "foo-123"}, + [...]string{"name", "awesome-foo"}, + [...]string{"id", "foo-123"}, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("awesome-foo"), + }), + [...]string{"name", "awesome-foo"}, + [...]string{"name", "awesome-foo"}, + [...]string{"name", "awesome-foo"}, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("awesome-foo").Mark(marks.Sensitive), + }), + [...]string{"", ""}, + [...]string{"", ""}, + [...]string{"", ""}, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("awesome-foo"), + "tags": cty.MapVal(map[string]cty.Value{ + "Name": cty.StringVal("My Awesome Foo"), + }), + }), + [...]string{"name", "awesome-foo"}, + [...]string{"name", "awesome-foo"}, + [...]string{"name", "awesome-foo"}, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "tags": cty.MapVal(map[string]cty.Value{ + "Name": cty.StringVal("My Awesome Foo"), + "name": cty.StringVal("awesome-foo"), + }), + }), + [...]string{"", ""}, + [...]string{"tags.name", "awesome-foo"}, + [...]string{"tags.name", "awesome-foo"}, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "tags": cty.MapVal(map[string]cty.Value{ + "Name": cty.StringVal("My Awesome Foo"), + }), + }), + [...]string{"", ""}, + [...]string{"tags.Name", "My Awesome Foo"}, + [...]string{"tags.Name", "My Awesome Foo"}, + }, + + // The following are degenerate cases, included to make sure we don't + // crash when we encounter them. If you're here fixing a reported panic + // in this formatter, this is the place to add a new test case. + { + cty.ObjectVal(map[string]cty.Value{ + "id": cty.True, + }), + [...]string{"", ""}, + [...]string{"", ""}, + [...]string{"", ""}, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "id": cty.NullVal(cty.String), + }), + [...]string{"", ""}, + [...]string{"", ""}, + [...]string{"", ""}, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + }), + [...]string{"", ""}, + [...]string{"", ""}, + [...]string{"", ""}, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "tags": cty.StringVal("foo"), + }), + [...]string{"", ""}, + [...]string{"", ""}, + [...]string{"", ""}, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "tags": cty.NullVal(cty.Map(cty.String)), + }), + [...]string{"", ""}, + [...]string{"", ""}, + [...]string{"", ""}, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "tags": cty.UnknownVal(cty.Map(cty.String)), + }), + [...]string{"", ""}, + [...]string{"", ""}, + [...]string{"", ""}, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "tags": cty.MapVal(map[string]cty.Value{ + "Name": cty.True, + }), + }), + [...]string{"", ""}, + [...]string{"", ""}, + [...]string{"", ""}, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "tags": cty.MapVal(map[string]cty.Value{ + "Name": cty.UnknownVal(cty.String), + }), + }), + [...]string{"", ""}, + [...]string{"", ""}, + [...]string{"", ""}, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "tags": cty.MapVal(map[string]cty.Value{ + "Name": cty.UnknownVal(cty.String).Mark(marks.Sensitive), + }), + }), + [...]string{"", ""}, + [...]string{"", ""}, + [...]string{"", ""}, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "tags": cty.MapVal(map[string]cty.Value{ + "Name": cty.NullVal(cty.String), + }), + }), + [...]string{"", ""}, + [...]string{"", ""}, + [...]string{"", ""}, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%#v", test.obj), func(t *testing.T) { + obj := test.obj + gotIDKey, gotIDVal := ObjectValueID(obj) + gotNameKey, gotNameVal := ObjectValueName(obj) + gotEitherKey, gotEitherVal := ObjectValueIDOrName(obj) + + if got, want := [...]string{gotIDKey, gotIDVal}, test.id; got != want { + t.Errorf("wrong ObjectValueID result\ngot: %#v\nwant: %#v", got, want) + } + if got, want := [...]string{gotNameKey, gotNameVal}, test.name; got != want { + t.Errorf("wrong ObjectValueName result\ngot: %#v\nwant: %#v", got, want) + } + if got, want := [...]string{gotEitherKey, gotEitherVal}, test.either; got != want { + t.Errorf("wrong ObjectValueIDOrName result\ngot: %#v\nwant: %#v", got, want) + } + }) + } +} diff --git a/pkg/command/format/trivia.go b/pkg/command/format/trivia.go new file mode 100644 index 00000000000..4d3711401bc --- /dev/null +++ b/pkg/command/format/trivia.go @@ -0,0 +1,63 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package format + +import ( + "strings" + + "github.com/mitchellh/colorstring" + wordwrap "github.com/mitchellh/go-wordwrap" +) + +// HorizontalRule returns a newline character followed by a number of +// horizontal line characters to fill the given width. +// +// If the given colorize has colors enabled, the rule will also be given a +// dark grey color to attempt to visually de-emphasize it for sighted users. +// +// This is intended for printing to the UI via mitchellh/cli.UI.Output, or +// similar, which will automatically append a trailing newline too. +func HorizontalRule(color *colorstring.Colorize, width int) string { + if width <= 1 { + return "\n" + } + rule := strings.Repeat("─", width-1) + if color == nil { // sometimes unit tests don't populate this properly + return "\n" + rule + } + return color.Color("[dark_gray]\n" + rule) +} + +// WordWrap takes a string containing unbroken lines of text and inserts +// newline characters to try to make the text fit within the given width. +// +// The string can already contain newline characters, for example if you are +// trying to render multiple paragraphs of text. (In that case, our usual +// style would be to have _two_ newline characters as the paragraph separator.) +// +// As a special case, any line that begins with at least one space will be left +// unbroken. This allows including literal segments in the output, such as +// code snippets or filenames, where word wrapping would be confusing. +func WordWrap(str string, width int) string { + if width <= 1 { + // Silly edge case. We'll just return the original string to avoid + // panicking or doing other weird stuff. + return str + } + + var buf strings.Builder + lines := strings.Split(str, "\n") + for i, line := range lines { + if !strings.HasPrefix(line, " ") { + line = wordwrap.WrapString(line, uint(width-1)) + } + if i > 0 { + buf.WriteByte('\n') // reintroduce the newlines we skipped in Scan + } + buf.WriteString(line) + } + return buf.String() +} diff --git a/pkg/command/get.go b/pkg/command/get.go new file mode 100644 index 00000000000..7538986b266 --- /dev/null +++ b/pkg/command/get.go @@ -0,0 +1,124 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "context" + "fmt" + "strings" + + "github.com/kubegems/opentofu/pkg/command/views" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// GetCommand is a Command implementation that takes a OpenTofu +// configuration and downloads all the modules. +type GetCommand struct { + Meta +} + +func (c *GetCommand) Run(args []string) int { + var update bool + var testsDirectory string + + args = c.Meta.process(args) + cmdFlags := c.Meta.defaultFlagSet("get") + c.Meta.varFlagSet(cmdFlags) + cmdFlags.BoolVar(&update, "update", false, "update") + cmdFlags.StringVar(&testsDirectory, "test-directory", "tests", "test-directory") + cmdFlags.BoolVar(&c.outputInJSON, "json", false, "json") + cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } + if err := cmdFlags.Parse(args); err != nil { + c.Ui.Error(fmt.Sprintf("Error parsing command-line flags: %s\n", err.Error())) + return 1 + } + if c.outputInJSON { + c.Meta.color = false + c.Meta.Color = false + c.oldUi = c.Ui + c.Ui = &WrappedUi{ + cliUi: c.oldUi, + jsonView: views.NewJSONView(c.View), + outputInJSON: true, + } + } + + // Initialization can be aborted by interruption signals + ctx, done := c.InterruptibleContext(c.CommandContext()) + defer done() + + path, err := modulePath(cmdFlags.Args()) + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + + path = c.normalizePath(path) + + abort, diags := getModules(ctx, &c.Meta, path, testsDirectory, update) + c.showDiagnostics(diags) + if abort || diags.HasErrors() { + return 1 + } + + return 0 +} + +func (c *GetCommand) Help() string { + helpText := ` +Usage: tofu [global options] get [options] + + Downloads and installs modules needed for the configuration in the + current working directory. + + This recursively downloads all modules needed, such as modules + imported by modules imported by the root and so on. If a module is + already downloaded, it will not be redownloaded or checked for updates + unless the -update flag is specified. + + Module installation also happens automatically by default as part of + the "tofu init" command, so you should rarely need to run this + command separately. + +Options: + + -update Check already-downloaded modules for available updates + and install the newest versions available. + + -no-color Disable text coloring in the output. + + -test-directory=path Set the OpenTofu test directory, defaults to "tests". When set, the + test command will search for test files in the current directory and + in the one specified by the flag. + + -json Produce output in a machine-readable JSON format, + suitable for use in text editor integrations and other + automated systems. Always disables color. + + -var 'foo=bar' Set a value for one of the input variables in the root + module of the configuration. Use this option more than + once to set more than one variable. + + -var-file=filename Load variable values from the given file, in addition + to the default files terraform.tfvars and *.auto.tfvars. + Use this option more than once to include more than one + variables file. + +` + return strings.TrimSpace(helpText) +} + +func (c *GetCommand) Synopsis() string { + return "Install or upgrade remote OpenTofu modules" +} + +func getModules(ctx context.Context, m *Meta, path string, testsDir string, upgrade bool) (abort bool, diags tfdiags.Diagnostics) { + hooks := uiModuleInstallHooks{ + Ui: m.Ui, + ShowLocalPaths: true, + } + return m.installModules(ctx, path, testsDir, upgrade, true, hooks) +} diff --git a/pkg/command/get_test.go b/pkg/command/get_test.go new file mode 100644 index 00000000000..2aca46afab8 --- /dev/null +++ b/pkg/command/get_test.go @@ -0,0 +1,117 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + "testing" + + "github.com/mitchellh/cli" +) + +func TestGet(t *testing.T) { + wd := tempWorkingDirFixture(t, "get") + defer testChdir(t, wd.RootModuleDir())() + + ui := cli.NewMockUi() + c := &GetCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + WorkingDir: wd, + }, + } + + args := []string{} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + output := ui.OutputWriter.String() + if !strings.Contains(output, "- foo in") { + t.Fatalf("doesn't look like get: %s", output) + } +} + +func TestGet_multipleArgs(t *testing.T) { + wd := tempWorkingDir(t) + defer testChdir(t, wd.RootModuleDir())() + + ui := cli.NewMockUi() + c := &GetCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + WorkingDir: wd, + }, + } + + args := []string{ + "bad", + "bad", + } + if code := c.Run(args); code != 1 { + t.Fatalf("bad: \n%s", ui.OutputWriter.String()) + } +} + +func TestGet_update(t *testing.T) { + wd := tempWorkingDirFixture(t, "get") + defer testChdir(t, wd.RootModuleDir())() + + ui := cli.NewMockUi() + c := &GetCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + WorkingDir: wd, + }, + } + + args := []string{ + "-update", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + output := ui.OutputWriter.String() + if !strings.Contains(output, `- foo in`) { + t.Fatalf("doesn't look like get: %s", output) + } +} + +func TestGet_cancel(t *testing.T) { + // This test runs `tofu get` as if SIGINT (or similar on other + // platforms) were sent to it, testing that it is interruptible. + + wd := tempWorkingDirFixture(t, "init-registry-module") + defer testChdir(t, wd.RootModuleDir())() + + // Our shutdown channel is pre-closed so init will exit as soon as it + // starts a cancelable portion of the process. + shutdownCh := make(chan struct{}) + close(shutdownCh) + + ui := cli.NewMockUi() + c := &GetCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + WorkingDir: wd, + ShutdownCh: shutdownCh, + }, + } + + args := []string{} + if code := c.Run(args); code == 0 { + t.Fatalf("succeeded; wanted error\n%s", ui.OutputWriter.String()) + } + + if got, want := ui.ErrorWriter.String(), `Module installation was canceled by an interrupt signal`; !strings.Contains(got, want) { + t.Fatalf("wrong error message\nshould contain: %s\ngot:\n%s", want, got) + } +} diff --git a/pkg/command/graph.go b/pkg/command/graph.go new file mode 100644 index 00000000000..aa91343de45 --- /dev/null +++ b/pkg/command/graph.go @@ -0,0 +1,290 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/dag" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/plans/planfile" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" +) + +// GraphCommand is a Command implementation that takes a OpenTofu +// configuration and outputs the dependency tree in graphical form. +type GraphCommand struct { + Meta +} + +func (c *GraphCommand) Run(args []string) int { + var diags tfdiags.Diagnostics + + var drawCycles bool + var graphTypeStr string + var moduleDepth int + var verbose bool + var planPath string + + args = c.Meta.process(args) + cmdFlags := c.Meta.defaultFlagSet("graph") + c.Meta.varFlagSet(cmdFlags) + cmdFlags.BoolVar(&drawCycles, "draw-cycles", false, "draw-cycles") + cmdFlags.StringVar(&graphTypeStr, "type", "", "type") + cmdFlags.IntVar(&moduleDepth, "module-depth", -1, "module-depth") + cmdFlags.BoolVar(&verbose, "verbose", false, "verbose") + cmdFlags.StringVar(&planPath, "plan", "", "plan") + cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } + if err := cmdFlags.Parse(args); err != nil { + c.Ui.Error(fmt.Sprintf("Error parsing command-line flags: %s\n", err.Error())) + return 1 + } + + configPath, err := modulePath(cmdFlags.Args()) + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + + // Check for user-supplied plugin path + if c.pluginPath, err = c.loadPluginPath(); err != nil { + c.Ui.Error(fmt.Sprintf("Error loading plugin path: %s", err)) + return 1 + } + + // Load the encryption configuration + enc, encDiags := c.EncryptionFromPath(configPath) + diags = diags.Append(encDiags) + if encDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // Try to load plan if path is specified + var planFile *planfile.WrappedPlanFile + if planPath != "" { + planFile, err = c.PlanFile(planPath, enc.Plan()) + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + } + + // Load the backend + var b backend.Enhanced + //nolint: nestif // This is inspired by apply:PrepareBackend + if lp, ok := planFile.Local(); ok { + plan, planErr := lp.ReadPlan() + if planErr != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to read plan from plan file", + fmt.Sprintf("Cannot read the plan from the given plan file: %s.", planErr), + )) + c.showDiagnostics(diags) + return 1 + } + if plan.Backend.Config == nil { + // Should never happen; always indicates a bug in the creation of the plan file + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to read plan from plan file", + "The given plan file does not have a valid backend configuration. This is a bug in the OpenTofu command that generated this plan file.", + )) + c.showDiagnostics(diags) + return 1 + } + var backendDiags tfdiags.Diagnostics + b, backendDiags = c.BackendForLocalPlan(plan.Backend, enc.State()) + diags = diags.Append(backendDiags) + if backendDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + } else { + backendConfig, backendDiags := c.loadBackendConfig(configPath) + diags = diags.Append(backendDiags) + if diags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + b, backendDiags = c.Backend(&BackendOpts{ + Config: backendConfig, + }, enc.State()) + diags = diags.Append(backendDiags) + if backendDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + } + + // We require a local backend + local, ok := b.(backend.Local) + if !ok { + c.showDiagnostics(diags) // in case of any warnings in here + c.Ui.Error(ErrUnsupportedLocalOp) + return 1 + } + + // This is a read-only command + c.ignoreRemoteVersionConflict(b) + + // Build the operation + opReq := c.Operation(b, arguments.ViewHuman, enc) + opReq.ConfigDir = configPath + opReq.ConfigLoader, err = c.initConfigLoader() + opReq.PlanFile = planFile + opReq.AllowUnsetVariables = true + + // Inject information required for static evaluation + var callDiags tfdiags.Diagnostics + opReq.RootCall, callDiags = c.rootModuleCall(opReq.ConfigDir) + diags = diags.Append(callDiags) + if callDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + if err != nil { + diags = diags.Append(err) + c.showDiagnostics(diags) + return 1 + } + + // Get the context + lr, _, ctxDiags := local.LocalRun(opReq) + diags = diags.Append(ctxDiags) + if ctxDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + if graphTypeStr == "" { + switch { + case lr.Plan != nil: + graphTypeStr = "apply" + default: + graphTypeStr = "plan" + } + } + + var g *tofu.Graph + var graphDiags tfdiags.Diagnostics + switch graphTypeStr { + case "plan": + g, graphDiags = lr.Core.PlanGraphForUI(lr.Config, lr.InputState, plans.NormalMode) + case "plan-refresh-only": + g, graphDiags = lr.Core.PlanGraphForUI(lr.Config, lr.InputState, plans.RefreshOnlyMode) + case "plan-destroy": + g, graphDiags = lr.Core.PlanGraphForUI(lr.Config, lr.InputState, plans.DestroyMode) + case "apply": + plan := lr.Plan + + // Historically "tofu graph" would allow the nonsensical request to + // render an apply graph without a plan, so we continue to support that + // here, though perhaps one day this should be an error. + if lr.Plan == nil { + plan = &plans.Plan{ + Changes: plans.NewChanges(), + UIMode: plans.NormalMode, + PriorState: lr.InputState, + PrevRunState: lr.InputState, + } + } + + g, graphDiags = lr.Core.ApplyGraphForUI(plan, lr.Config) + case "eval", "validate": + // Terraform v0.12 through v1.0 supported both of these, but the + // graph variants for "eval" and "validate" are purely implementation + // details and don't reveal anything (user-model-wise) that you can't + // see in the plan graph. + graphDiags = graphDiags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Graph type no longer available", + fmt.Sprintf("The graph type %q is no longer available. Use -type=plan instead to get a similar result.", graphTypeStr), + )) + default: + graphDiags = graphDiags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unsupported graph type", + `The -type=... argument must be either "plan", "plan-refresh-only", "plan-destroy", or "apply".`, + )) + } + diags = diags.Append(graphDiags) + if graphDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + graphStr, err := tofu.GraphDot(g, &dag.DotOpts{ + DrawCycles: drawCycles, + MaxDepth: moduleDepth, + Verbose: verbose, + }) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error converting graph: %s", err)) + return 1 + } + + if diags.HasErrors() { + // For this command we only show diagnostics if there are errors, + // because printing out naked warnings could upset a naive program + // consuming our dot output. + c.showDiagnostics(diags) + return 1 + } + + c.Ui.Output(graphStr) + + return 0 +} + +func (c *GraphCommand) Help() string { + helpText := ` +Usage: tofu [global options] graph [options] + + Produces a representation of the dependency graph between different + objects in the current configuration and state. + + The graph is presented in the DOT language. The typical program that can + read this format is GraphViz, but many web services are also available + to read this format. + +Options: + + -plan=tfplan Render graph using the specified plan file instead of the + configuration in the current directory. + + -draw-cycles Highlight any cycles in the graph with colored edges. + This helps when diagnosing cycle errors. + + -type=plan Type of graph to output. Can be: plan, plan-refresh-only, + plan-destroy, or apply. By default OpenTofu chooses + "plan", or "apply" if you also set the -plan=... option. + + -module-depth=n (deprecated) In prior versions of OpenTofu, specified the + depth of modules to show in the output. + + -var 'foo=bar' Set a value for one of the input variables in the root + module of the configuration. Use this option more than + once to set more than one variable. + + -var-file=filename Load variable values from the given file, in addition + to the default files terraform.tfvars and *.auto.tfvars. + Use this option more than once to include more than one + variables file. +` + return strings.TrimSpace(helpText) +} + +func (c *GraphCommand) Synopsis() string { + return "Generate a Graphviz graph of the steps in an operation" +} diff --git a/pkg/command/graph_test.go b/pkg/command/graph_test.go new file mode 100644 index 00000000000..3b1c9b522b2 --- /dev/null +++ b/pkg/command/graph_test.go @@ -0,0 +1,165 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "os" + "strings" + "testing" + + "github.com/mitchellh/cli" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/states" +) + +func TestGraph(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("graph"), td) + defer testChdir(t, td)() + + ui := new(cli.MockUi) + c := &GraphCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(applyFixtureProvider()), + Ui: ui, + }, + } + + args := []string{} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + output := ui.OutputWriter.String() + if !strings.Contains(output, `provider[\"registry.opentofu.org/hashicorp/test\"]`) { + t.Fatalf("doesn't look like digraph: %s", output) + } +} + +func TestGraph_multipleArgs(t *testing.T) { + ui := new(cli.MockUi) + c := &GraphCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(applyFixtureProvider()), + Ui: ui, + }, + } + + args := []string{ + "bad", + "bad", + } + if code := c.Run(args); code != 1 { + t.Fatalf("bad: \n%s", ui.OutputWriter.String()) + } +} + +func TestGraph_noArgs(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("graph"), td) + defer testChdir(t, td)() + + ui := new(cli.MockUi) + c := &GraphCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(applyFixtureProvider()), + Ui: ui, + }, + } + + args := []string{} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + output := ui.OutputWriter.String() + if !strings.Contains(output, `provider[\"registry.opentofu.org/hashicorp/test\"]`) { + t.Fatalf("doesn't look like digraph: %s", output) + } +} + +func TestGraph_noConfig(t *testing.T) { + td := t.TempDir() + os.MkdirAll(td, 0755) + defer testChdir(t, td)() + + ui := new(cli.MockUi) + c := &GraphCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(applyFixtureProvider()), + Ui: ui, + }, + } + + // Running the graph command without a config should not panic, + // but this may be an error at some point in the future. + args := []string{"-type", "apply"} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } +} + +func TestGraph_plan(t *testing.T) { + testCwd(t) + + plan := &plans.Plan{ + Changes: plans.NewChanges(), + } + plan.Changes.Resources = append(plan.Changes.Resources, &plans.ResourceInstanceChangeSrc{ + Addr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "bar", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Delete, + Before: plans.DynamicValue(`{}`), + After: plans.DynamicValue(`null`), + }, + ProviderAddr: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + }) + beConfig := cty.ObjectVal(map[string]cty.Value{ + "path": cty.NilVal, + "workspace_dir": cty.NilVal, + }) + emptyConfig, err := plans.NewDynamicValue(beConfig, beConfig.Type()) + if err != nil { + t.Fatal(err) + } + plan.Backend = plans.Backend{ + Type: "local", + Config: emptyConfig, + } + _, configSnap := testModuleWithSnapshot(t, "graph") + + planPath := testPlanFile(t, configSnap, states.NewState(), plan) + + ui := new(cli.MockUi) + c := &GraphCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(applyFixtureProvider()), + Ui: ui, + }, + } + + args := []string{ + "-plan", planPath, + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + output := ui.OutputWriter.String() + if !strings.Contains(output, `provider[\"registry.opentofu.org/hashicorp/test\"]`) { + t.Fatalf("doesn't look like digraph: %s", output) + } +} diff --git a/pkg/command/helper.go b/pkg/command/helper.go new file mode 100644 index 00000000000..f4e760b71d3 --- /dev/null +++ b/pkg/command/helper.go @@ -0,0 +1,33 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/cloud" +) + +const failedToLoadSchemasMessage = ` +Warning: Failed to update data for external integrations + +OpenTofu was unable to generate a description of the updated +state for use with external integrations in the cloud backend. +Any integrations configured for this workspace which depend on +information from the state may not work correctly when using the +result of this action. + +This problem occurs when OpenTofu cannot read the schema for +one or more of the providers used in the state. The next successful +apply will correct the problem by re-generating the JSON description +of the state: + tofu apply +` + +func isCloudMode(b backend.Enhanced) bool { + _, ok := b.(*cloud.Cloud) + + return ok +} diff --git a/pkg/command/hook_module_install.go b/pkg/command/hook_module_install.go new file mode 100644 index 00000000000..dee8e0b27ab --- /dev/null +++ b/pkg/command/hook_module_install.go @@ -0,0 +1,38 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + + version "github.com/hashicorp/go-version" + "github.com/kubegems/opentofu/pkg/initwd" + "github.com/mitchellh/cli" +) + +type uiModuleInstallHooks struct { + initwd.ModuleInstallHooksImpl + Ui cli.Ui + ShowLocalPaths bool +} + +var _ initwd.ModuleInstallHooks = uiModuleInstallHooks{} + +func (h uiModuleInstallHooks) Download(modulePath, packageAddr string, v *version.Version) { + if v != nil { + h.Ui.Info(fmt.Sprintf("Downloading %s %s for %s...", packageAddr, v, modulePath)) + } else { + h.Ui.Info(fmt.Sprintf("Downloading %s for %s...", packageAddr, modulePath)) + } +} + +func (h uiModuleInstallHooks) Install(modulePath string, v *version.Version, localDir string) { + if h.ShowLocalPaths { + h.Ui.Info(fmt.Sprintf("- %s in %s", modulePath, localDir)) + } else { + h.Ui.Info(fmt.Sprintf("- %s", modulePath)) + } +} diff --git a/pkg/command/import.go b/pkg/command/import.go new file mode 100644 index 00000000000..e0da2cc1301 --- /dev/null +++ b/pkg/command/import.go @@ -0,0 +1,372 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "errors" + "fmt" + "log" + "os" + "strings" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/command/views" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" +) + +// ImportCommand is a cli.Command implementation that imports resources +// into the OpenTofu state. +type ImportCommand struct { + Meta +} + +func (c *ImportCommand) Run(args []string) int { + // Get the pwd since its our default -config flag value + pwd, err := os.Getwd() + if err != nil { + c.Ui.Error(fmt.Sprintf("Error getting pwd: %s", err)) + return 1 + } + + var configPath string + args = c.Meta.process(args) + + cmdFlags := c.Meta.extendedFlagSet("import") + cmdFlags.BoolVar(&c.ignoreRemoteVersion, "ignore-remote-version", false, "continue even if remote and local OpenTofu versions are incompatible") + cmdFlags.IntVar(&c.Meta.parallelism, "parallelism", DefaultParallelism, "parallelism") + cmdFlags.StringVar(&c.Meta.statePath, "state", "", "path") + cmdFlags.StringVar(&c.Meta.stateOutPath, "state-out", "", "path") + cmdFlags.StringVar(&c.Meta.backupPath, "backup", "", "path") + cmdFlags.StringVar(&configPath, "config", pwd, "path") + cmdFlags.BoolVar(&c.Meta.stateLock, "lock", true, "lock state") + cmdFlags.DurationVar(&c.Meta.stateLockTimeout, "lock-timeout", 0, "lock timeout") + cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } + if err := cmdFlags.Parse(args); err != nil { + return 1 + } + + args = cmdFlags.Args() + if len(args) != 2 { + c.Ui.Error("The import command expects two arguments.") + cmdFlags.Usage() + return 1 + } + + var diags tfdiags.Diagnostics + + // Parse the provided resource address. + traversalSrc := []byte(args[0]) + traversal, travDiags := hclsyntax.ParseTraversalAbs(traversalSrc, "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(travDiags) + if travDiags.HasErrors() { + c.registerSynthConfigSource("", traversalSrc) // so we can include a source snippet + c.showDiagnostics(diags) + c.Ui.Info(importCommandInvalidAddressReference) + return 1 + } + addr, addrDiags := addrs.ParseAbsResourceInstance(traversal) + diags = diags.Append(addrDiags) + if addrDiags.HasErrors() { + c.registerSynthConfigSource("", traversalSrc) // so we can include a source snippet + c.showDiagnostics(diags) + c.Ui.Info(importCommandInvalidAddressReference) + return 1 + } + + if addr.Resource.Resource.Mode != addrs.ManagedResourceMode { + diags = diags.Append(errors.New("A managed resource address is required. Importing into a data resource is not allowed.")) + c.showDiagnostics(diags) + return 1 + } + + if !c.dirIsConfigPath(configPath) { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "No OpenTofu configuration files", + Detail: fmt.Sprintf( + "The directory %s does not contain any OpenTofu configuration files (.tf or .tf.json). To specify a different configuration directory, use the -config=\"...\" command line option.", + configPath, + ), + }) + c.showDiagnostics(diags) + return 1 + } + + // Load the full config, so we can verify that the target resource is + // already configured. + config, configDiags := c.loadConfig(configPath) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // Load the encryption configuration + enc, encDiags := c.EncryptionFromPath(configPath) + diags = diags.Append(encDiags) + if encDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // Verify that the given address points to something that exists in config. + // This is to reduce the risk that a typo in the resource address will + // import something that OpenTofu will want to immediately destroy on + // the next plan, and generally acts as a reassurance of user intent. + targetConfig := config.DescendentForInstance(addr.Module) + if targetConfig == nil { + modulePath := addr.Module.String() + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Import to non-existent module", + Detail: fmt.Sprintf( + "%s is not defined in the configuration. Please add configuration for this module before importing into it.", + modulePath, + ), + }) + c.showDiagnostics(diags) + return 1 + } + targetMod := targetConfig.Module + rcs := targetMod.ManagedResources + var rc *configs.Resource + resourceRelAddr := addr.Resource.Resource + for _, thisRc := range rcs { + if resourceRelAddr.Type == thisRc.Type && resourceRelAddr.Name == thisRc.Name { + rc = thisRc + break + } + } + if rc == nil { + modulePath := addr.Module.String() + if modulePath == "" { + modulePath = "the root module" + } + + c.showDiagnostics(diags) + + // This is not a diagnostic because currently our diagnostics printer + // doesn't support having a code example in the detail, and there's + // a code example in this message. + // TODO: Improve the diagnostics printer so we can use it for this + // message. + c.Ui.Error(fmt.Sprintf( + importCommandMissingResourceFmt, + addr, modulePath, resourceRelAddr.Type, resourceRelAddr.Name, + )) + return 1 + } + + // Check for user-supplied plugin path + if c.pluginPath, err = c.loadPluginPath(); err != nil { + c.Ui.Error(fmt.Sprintf("Error loading plugin path: %s", err)) + return 1 + } + + // Load the backend + b, backendDiags := c.Backend(&BackendOpts{ + Config: config.Module.Backend, + }, enc.State()) + diags = diags.Append(backendDiags) + if backendDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // We require a backend.Local to build a context. + // This isn't necessarily a "local.Local" backend, which provides local + // operations, however that is the only current implementation. A + // "local.Local" backend also doesn't necessarily provide local state, as + // that may be delegated to a "remotestate.Backend". + local, ok := b.(backend.Local) + if !ok { + c.Ui.Error(ErrUnsupportedLocalOp) + return 1 + } + + // Build the operation + opReq := c.Operation(b, arguments.ViewHuman, enc) + opReq.ConfigDir = configPath + opReq.ConfigLoader, err = c.initConfigLoader() + if err != nil { + diags = diags.Append(err) + c.showDiagnostics(diags) + return 1 + } + opReq.Hooks = []tofu.Hook{c.uiHook()} + { + // Setup required variables/call for operation (usually done in Meta.RunOperation) + var moreDiags, callDiags tfdiags.Diagnostics + opReq.Variables, moreDiags = c.collectVariableValues() + opReq.RootCall, callDiags = c.rootModuleCall(opReq.ConfigDir) + diags = diags.Append(moreDiags).Append(callDiags) + if moreDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + } + opReq.View = views.NewOperation(arguments.ViewHuman, c.RunningInAutomation, c.View) + + // Check remote OpenTofu version is compatible + remoteVersionDiags := c.remoteVersionCheck(b, opReq.Workspace) + diags = diags.Append(remoteVersionDiags) + c.showDiagnostics(diags) + if diags.HasErrors() { + return 1 + } + + // Get the context + lr, state, ctxDiags := local.LocalRun(opReq) + diags = diags.Append(ctxDiags) + if ctxDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // Successfully creating the context can result in a lock, so ensure we release it + defer func() { + diags := opReq.StateLocker.Unlock() + if diags.HasErrors() { + c.showDiagnostics(diags) + } + }() + + // Perform the import. Note that as you can see it is possible for this + // API to import more than one resource at once. For now, we only allow + // one while we stabilize this feature. + newState, importDiags := lr.Core.Import(lr.Config, lr.InputState, &tofu.ImportOpts{ + Targets: []*tofu.ImportTarget{ + { + CommandLineImportTarget: &tofu.CommandLineImportTarget{ + Addr: addr, + ID: args[1], + }, + }, + }, + + // The LocalRun idea is designed around our primary operations, so + // the input variables end up represented as plan options even though + // this particular operation isn't really a plan. + SetVariables: lr.PlanOpts.SetVariables, + }) + diags = diags.Append(importDiags) + if diags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // Get schemas, if possible, before writing state + var schemas *tofu.Schemas + if isCloudMode(b) { + var schemaDiags tfdiags.Diagnostics + schemas, schemaDiags = c.MaybeGetSchemas(newState, nil) + diags = diags.Append(schemaDiags) + } + + // Persist the final state + log.Printf("[INFO] Writing state output to: %s", c.Meta.StateOutPath()) + if err := state.WriteState(newState); err != nil { + c.Ui.Error(fmt.Sprintf("Error writing state file: %s", err)) + return 1 + } + if err := state.PersistState(schemas); err != nil { + c.Ui.Error(fmt.Sprintf("Error writing state file: %s", err)) + return 1 + } + + c.Ui.Output(c.Colorize().Color("[reset][green]\n" + importCommandSuccessMsg)) + + c.showDiagnostics(diags) + if diags.HasErrors() { + return 1 + } + + return 0 +} + +func (c *ImportCommand) Help() string { + helpText := ` +Usage: tofu [global options] import [options] ADDR ID + + Import existing infrastructure into your OpenTofu state. + + This will find and import the specified resource into your OpenTofu + state, allowing existing infrastructure to come under OpenTofu + management without having to be initially created by OpenTofu. + + The ADDR specified is the address to import the resource to. Please + see the documentation online for resource addresses. The ID is a + resource-specific ID to identify that resource being imported. Please + reference the documentation for the resource type you're importing to + determine the ID syntax to use. It typically matches directly to the ID + that the provider uses. + + This command will not modify your infrastructure, but it will make + network requests to inspect parts of your infrastructure relevant to + the resource being imported. + +Options: + + -config=path Path to a directory of OpenTofu configuration files + to use to configure the provider. Defaults to pwd. + If no config files are present, they must be provided + via the input prompts or env vars. + + -input=false Disable interactive input prompts. + + -lock=false Don't hold a state lock during the operation. This is + dangerous if others might concurrently run commands + against the same workspace. + + -lock-timeout=0s Duration to retry a state lock. + + -no-color If specified, output won't contain any color. + + -var 'foo=bar' Set a variable in the OpenTofu configuration. This + flag can be set multiple times. This is only useful + with the "-config" flag. + + -var-file=foo Set variables in the OpenTofu configuration from + a file. If "terraform.tfvars" or any ".auto.tfvars" + files are present, they will be automatically loaded. + + -ignore-remote-version A rare option used for the remote backend only. See + the remote backend documentation for more information. + + -state, state-out, and -backup are legacy options supported for the local + backend only. For more information, see the local backend's documentation. + +` + return strings.TrimSpace(helpText) +} + +func (c *ImportCommand) Synopsis() string { + return "Associate existing infrastructure with a OpenTofu resource" +} + +const importCommandInvalidAddressReference = `For information on valid syntax, see: +https://opentofu.org/docs/cli/state/resource-addressing/` + +const importCommandMissingResourceFmt = `[reset][bold][red]Error:[reset][bold] resource address %q does not exist in the configuration.[reset] + +Before importing this resource, please create its configuration in %s. For example: + +resource %q %q { + # (resource arguments) +} +` + +const importCommandSuccessMsg = `Import successful! + +The resources that were imported are shown above. These resources are now in +your OpenTofu state and will henceforth be managed by OpenTofu. +` diff --git a/pkg/command/import_test.go b/pkg/command/import_test.go new file mode 100644 index 00000000000..e135dc950c1 --- /dev/null +++ b/pkg/command/import_test.go @@ -0,0 +1,984 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "log" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/mitchellh/cli" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/copy" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +func TestImport(t *testing.T) { + defer testChdir(t, testFixturePath("import-provider-implicit"))() + + statePath := testTempFile(t) + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &ImportCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + + p.ImportResourceStateFn = nil + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "test_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yay"), + }), + }, + }, + } + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + }, + }, + }, + }, + } + + args := []string{ + "-state", statePath, + "test_instance.foo", + "bar", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + if !p.ImportResourceStateCalled { + t.Fatal("ImportResourceState should be called") + } + + testStateOutput(t, statePath, testImportStr) +} + +func TestImport_providerConfig(t *testing.T) { + defer testChdir(t, testFixturePath("import-provider"))() + + statePath := testTempFile(t) + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &ImportCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + + p.ImportResourceStateFn = nil + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "test_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yay"), + }), + }, + }, + } + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + }, + }, + }, + }, + } + + configured := false + p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) providers.ConfigureProviderResponse { + configured = true + + cfg := req.Config + if !cfg.Type().HasAttribute("foo") { + return providers.ConfigureProviderResponse{ + Diagnostics: tfdiags.Diagnostics{}.Append(fmt.Errorf("configuration has no foo argument")), + } + } + if got, want := cfg.GetAttr("foo"), cty.StringVal("bar"); !want.RawEquals(got) { + return providers.ConfigureProviderResponse{ + Diagnostics: tfdiags.Diagnostics{}.Append(fmt.Errorf("foo argument is %#v, but want %#v", got, want)), + } + } + + return providers.ConfigureProviderResponse{} + } + + args := []string{ + "-state", statePath, + "test_instance.foo", + "bar", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + // Verify that we were called + if !configured { + t.Fatal("Configure should be called") + } + + if !p.ImportResourceStateCalled { + t.Fatal("ImportResourceState should be called") + } + + testStateOutput(t, statePath, testImportStr) +} + +// "remote" state provided by the "local" backend +func TestImport_remoteState(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("import-provider-remote-state"), td) + defer testChdir(t, td)() + + statePath := "imported.tfstate" + + providerSource, close := newMockProviderSource(t, map[string][]string{ + "test": []string{"1.2.3"}, + }) + defer close() + + // init our backend + ui := cli.NewMockUi() + view, _ := testView(t) + m := Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + ProviderSource: providerSource, + } + + ic := &InitCommand{ + Meta: m, + } + + // (Using log here rather than t.Log so that these messages interleave with other trace logs) + log.Print("[TRACE] TestImport_remoteState running: tofu init") + if code := ic.Run([]string{}); code != 0 { + t.Fatalf("init failed\n%s", ui.ErrorWriter) + } + + p := testProvider() + ui = new(cli.MockUi) + c := &ImportCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + + p.ImportResourceStateFn = nil + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "test_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yay"), + }), + }, + }, + } + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + }, + }, + }, + }, + } + + configured := false + p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) providers.ConfigureProviderResponse { + var diags tfdiags.Diagnostics + configured = true + if got, want := req.Config.GetAttr("foo"), cty.StringVal("bar"); !want.RawEquals(got) { + diags = diags.Append(fmt.Errorf("wrong \"foo\" value %#v; want %#v", got, want)) + } + return providers.ConfigureProviderResponse{ + Diagnostics: diags, + } + } + + args := []string{ + "test_instance.foo", + "bar", + } + log.Printf("[TRACE] TestImport_remoteState running: tofu import %s %s", args[0], args[1]) + if code := c.Run(args); code != 0 { + fmt.Println(ui.OutputWriter) + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + // verify that the local state was unlocked after import + if _, err := os.Stat(filepath.Join(td, fmt.Sprintf(".%s.lock.info", statePath))); !os.IsNotExist(err) { + t.Fatal("state left locked after import") + } + + // Verify that we were called + if !configured { + t.Fatal("Configure should be called") + } + + if !p.ImportResourceStateCalled { + t.Fatal("ImportResourceState should be called") + } + + testStateOutput(t, statePath, testImportStr) +} + +// early failure on import should not leave stale lock +func TestImport_initializationErrorShouldUnlock(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("import-provider-remote-state"), td) + defer testChdir(t, td)() + + statePath := "imported.tfstate" + + providerSource, close := newMockProviderSource(t, map[string][]string{ + "test": []string{"1.2.3"}, + }) + defer close() + + // init our backend + ui := cli.NewMockUi() + view, _ := testView(t) + m := Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + ProviderSource: providerSource, + } + + ic := &InitCommand{ + Meta: m, + } + + // (Using log here rather than t.Log so that these messages interleave with other trace logs) + log.Print("[TRACE] TestImport_initializationErrorShouldUnlock running: tofu init") + if code := ic.Run([]string{}); code != 0 { + t.Fatalf("init failed\n%s", ui.ErrorWriter) + } + + // overwrite the config with one including a resource from an invalid provider + copy.CopyFile(filepath.Join(testFixturePath("import-provider-invalid"), "main.tf"), filepath.Join(td, "main.tf")) + + p := testProvider() + ui = new(cli.MockUi) + c := &ImportCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + + args := []string{ + "unknown_instance.baz", + "bar", + } + log.Printf("[TRACE] TestImport_initializationErrorShouldUnlock running: tofu import %s %s", args[0], args[1]) + + // this should fail + if code := c.Run(args); code != 1 { + fmt.Println(ui.OutputWriter) + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + // specifically, it should fail due to a missing provider + msg := strings.ReplaceAll(ui.ErrorWriter.String(), "\n", " ") + if want := `provider registry.opentofu.org/hashicorp/unknown: required by this configuration but no version is selected`; !strings.Contains(msg, want) { + t.Errorf("incorrect message\nwant substring: %s\ngot:\n%s", want, msg) + } + + // verify that the local state was unlocked after initialization error + if _, err := os.Stat(filepath.Join(td, fmt.Sprintf(".%s.lock.info", statePath))); !os.IsNotExist(err) { + t.Fatal("state left locked after import") + } +} + +func TestImport_providerConfigWithVar(t *testing.T) { + defer testChdir(t, testFixturePath("import-provider-var"))() + + statePath := testTempFile(t) + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &ImportCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + + p.ImportResourceStateFn = nil + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "test_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yay"), + }), + }, + }, + } + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + }, + }, + }, + }, + } + + configured := false + p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) providers.ConfigureProviderResponse { + var diags tfdiags.Diagnostics + configured = true + if got, want := req.Config.GetAttr("foo"), cty.StringVal("bar"); !want.RawEquals(got) { + diags = diags.Append(fmt.Errorf("wrong \"foo\" value %#v; want %#v", got, want)) + } + return providers.ConfigureProviderResponse{ + Diagnostics: diags, + } + } + + args := []string{ + "-state", statePath, + "-var", "foo=bar", + "test_instance.foo", + "bar", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + // Verify that we were called + if !configured { + t.Fatal("Configure should be called") + } + + if !p.ImportResourceStateCalled { + t.Fatal("ImportResourceState should be called") + } + + testStateOutput(t, statePath, testImportStr) +} + +func TestImport_providerConfigWithDataSource(t *testing.T) { + defer testChdir(t, testFixturePath("import-provider-datasource"))() + + statePath := testTempFile(t) + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &ImportCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + + p.ImportResourceStateFn = nil + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "test_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yay"), + }), + }, + }, + } + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + }, + }, + }, + }, + DataSources: map[string]providers.Schema{ + "test_data": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + + args := []string{ + "-state", statePath, + "test_instance.foo", + "bar", + } + if code := c.Run(args); code != 1 { + t.Fatalf("bad, wanted error: %d\n\n%s", code, ui.ErrorWriter.String()) + } +} + +func TestImport_providerConfigWithVarDefault(t *testing.T) { + defer testChdir(t, testFixturePath("import-provider-var-default"))() + + statePath := testTempFile(t) + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &ImportCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + + p.ImportResourceStateFn = nil + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "test_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yay"), + }), + }, + }, + } + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + }, + }, + }, + }, + } + + configured := false + p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) providers.ConfigureProviderResponse { + var diags tfdiags.Diagnostics + configured = true + if got, want := req.Config.GetAttr("foo"), cty.StringVal("bar"); !want.RawEquals(got) { + diags = diags.Append(fmt.Errorf("wrong \"foo\" value %#v; want %#v", got, want)) + } + return providers.ConfigureProviderResponse{ + Diagnostics: diags, + } + } + + args := []string{ + "-state", statePath, + "test_instance.foo", + "bar", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + // Verify that we were called + if !configured { + t.Fatal("Configure should be called") + } + + if !p.ImportResourceStateCalled { + t.Fatal("ImportResourceState should be called") + } + + testStateOutput(t, statePath, testImportStr) +} + +func TestImport_providerConfigWithVarFile(t *testing.T) { + defer testChdir(t, testFixturePath("import-provider-var-file"))() + + statePath := testTempFile(t) + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &ImportCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + + p.ImportResourceStateFn = nil + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "test_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yay"), + }), + }, + }, + } + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + }, + }, + }, + }, + } + + configured := false + p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) providers.ConfigureProviderResponse { + var diags tfdiags.Diagnostics + configured = true + if got, want := req.Config.GetAttr("foo"), cty.StringVal("bar"); !want.RawEquals(got) { + diags = diags.Append(fmt.Errorf("wrong \"foo\" value %#v; want %#v", got, want)) + } + return providers.ConfigureProviderResponse{ + Diagnostics: diags, + } + } + + args := []string{ + "-state", statePath, + "-var-file", "blah.tfvars", + "test_instance.foo", + "bar", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + // Verify that we were called + if !configured { + t.Fatal("Configure should be called") + } + + if !p.ImportResourceStateCalled { + t.Fatal("ImportResourceState should be called") + } + + testStateOutput(t, statePath, testImportStr) +} + +func TestImport_emptyConfig(t *testing.T) { + defer testChdir(t, testFixturePath("empty"))() + + statePath := testTempFile(t) + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &ImportCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + + args := []string{ + "-state", statePath, + "test_instance.foo", + "bar", + } + code := c.Run(args) + if code != 1 { + t.Fatalf("import succeeded; expected failure") + } + + msg := ui.ErrorWriter.String() + if want := `No OpenTofu configuration files`; !strings.Contains(msg, want) { + t.Errorf("incorrect message\nwant substring: %s\ngot:\n%s", want, msg) + } +} + +func TestImport_missingResourceConfig(t *testing.T) { + defer testChdir(t, testFixturePath("import-missing-resource-config"))() + + statePath := testTempFile(t) + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &ImportCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + + args := []string{ + "-state", statePath, + "test_instance.foo", + "bar", + } + code := c.Run(args) + if code != 1 { + t.Fatalf("import succeeded; expected failure") + } + + msg := ui.ErrorWriter.String() + if want := `resource address "test_instance.foo" does not exist`; !strings.Contains(msg, want) { + t.Errorf("incorrect message\nwant substring: %s\ngot:\n%s", want, msg) + } +} + +func TestImport_missingModuleConfig(t *testing.T) { + defer testChdir(t, testFixturePath("import-missing-resource-config"))() + + statePath := testTempFile(t) + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &ImportCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + + args := []string{ + "-state", statePath, + "module.baz.test_instance.foo", + "bar", + } + code := c.Run(args) + if code != 1 { + t.Fatalf("import succeeded; expected failure") + } + + msg := ui.ErrorWriter.String() + if want := `module.baz is not defined in the configuration`; !strings.Contains(msg, want) { + t.Errorf("incorrect message\nwant substring: %s\ngot:\n%s", want, msg) + } +} + +func TestImportModuleVarFile(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("import-module-var-file"), td) + defer testChdir(t, td)() + + statePath := testTempFile(t) + + p := testProvider() + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + + providerSource, close := newMockProviderSource(t, map[string][]string{ + "test": []string{"1.2.3"}, + }) + defer close() + + // init to install the module + ui := new(cli.MockUi) + view, _ := testView(t) + m := Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + ProviderSource: providerSource, + } + + ic := &InitCommand{ + Meta: m, + } + if code := ic.Run([]string{}); code != 0 { + t.Fatalf("init failed\n%s", ui.ErrorWriter) + } + + // import + ui = new(cli.MockUi) + c := &ImportCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + args := []string{ + "-state", statePath, + "module.child.test_instance.foo", + "bar", + } + code := c.Run(args) + if code != 0 { + t.Fatalf("import failed; expected success") + } +} + +// This test covers an edge case where a module with a complex input variable +// of nested objects has an invalid default which is overridden by the calling +// context, and is used in locals. If we don't evaluate module call variables +// for the import walk, this results in an error. +// +// The specific example has a variable "foo" which is a nested object: +// +// foo = { bar = { baz = true } } +// +// This is used as foo = var.foo in the call to the child module, which then +// uses the traversal foo.bar.baz in a local. A default value in the child +// module of {} causes this local evaluation to error, breaking import. +func TestImportModuleInputVariableEvaluation(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("import-module-input-variable"), td) + defer testChdir(t, td)() + + statePath := testTempFile(t) + + p := testProvider() + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + + providerSource, close := newMockProviderSource(t, map[string][]string{ + "test": {"1.2.3"}, + }) + defer close() + + // init to install the module + ui := new(cli.MockUi) + view, _ := testView(t) + m := Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + ProviderSource: providerSource, + } + + ic := &InitCommand{ + Meta: m, + } + if code := ic.Run([]string{}); code != 0 { + t.Fatalf("init failed\n%s", ui.ErrorWriter) + } + + // import + ui = new(cli.MockUi) + c := &ImportCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + args := []string{ + "-state", statePath, + "module.child.test_instance.foo", + "bar", + } + code := c.Run(args) + if code != 0 { + t.Fatalf("import failed; expected success") + } +} + +func TestImport_dataResource(t *testing.T) { + defer testChdir(t, testFixturePath("import-missing-resource-config"))() + + statePath := testTempFile(t) + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &ImportCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + + args := []string{ + "-state", statePath, + "data.test_data_source.foo", + "bar", + } + code := c.Run(args) + if code != 1 { + t.Fatalf("import succeeded; expected failure") + } + + msg := ui.ErrorWriter.String() + if want := `A managed resource address is required`; !strings.Contains(msg, want) { + t.Errorf("incorrect message\nwant substring: %s\ngot:\n%s", want, msg) + } +} + +func TestImport_invalidResourceAddr(t *testing.T) { + defer testChdir(t, testFixturePath("import-missing-resource-config"))() + + statePath := testTempFile(t) + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &ImportCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + + args := []string{ + "-state", statePath, + "bananas", + "bar", + } + code := c.Run(args) + if code != 1 { + t.Fatalf("import succeeded; expected failure") + } + + msg := ui.ErrorWriter.String() + if want := `Error: Invalid address`; !strings.Contains(msg, want) { + t.Errorf("incorrect message\nwant substring: %s\ngot:\n%s", want, msg) + } +} + +func TestImport_targetIsModule(t *testing.T) { + defer testChdir(t, testFixturePath("import-missing-resource-config"))() + + statePath := testTempFile(t) + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &ImportCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + + args := []string{ + "-state", statePath, + "module.foo", + "bar", + } + code := c.Run(args) + if code != 1 { + t.Fatalf("import succeeded; expected failure") + } + + msg := ui.ErrorWriter.String() + if want := `Error: Invalid address`; !strings.Contains(msg, want) { + t.Errorf("incorrect message\nwant substring: %s\ngot:\n%s", want, msg) + } +} + +const testImportStr = ` +test_instance.foo: + ID = yay + provider = provider["registry.opentofu.org/hashicorp/test"] +` diff --git a/pkg/command/init.go b/pkg/command/init.go new file mode 100644 index 00000000000..b9d1b5f2d18 --- /dev/null +++ b/pkg/command/init.go @@ -0,0 +1,1363 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "context" + "fmt" + "log" + "reflect" + "sort" + "strings" + + "github.com/hashicorp/hcl/v2" + svchost "github.com/hashicorp/terraform-svchost" + "github.com/posener/complete" + "github.com/zclconf/go-cty/cty" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/backend" + backendInit "github.com/kubegems/opentofu/pkg/backend/init" + "github.com/kubegems/opentofu/pkg/cloud" + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/command/views" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/getproviders" + "github.com/kubegems/opentofu/pkg/providercache" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" + "github.com/kubegems/opentofu/pkg/tofumigrate" + tfversion "github.com/kubegems/opentofu/version" +) + +// InitCommand is a Command implementation that takes a Terraform +// module and clones it to the working directory. +type InitCommand struct { + Meta +} + +func (c *InitCommand) Run(args []string) int { + var flagFromModule, flagLockfile, testsDirectory string + var flagBackend, flagCloud, flagGet, flagUpgrade bool + var flagPluginPath FlagStringSlice + flagConfigExtra := newRawFlags("-backend-config") + + args = c.Meta.process(args) + cmdFlags := c.Meta.extendedFlagSet("init") + cmdFlags.BoolVar(&flagBackend, "backend", true, "") + cmdFlags.BoolVar(&flagCloud, "cloud", true, "") + cmdFlags.Var(flagConfigExtra, "backend-config", "") + cmdFlags.StringVar(&flagFromModule, "from-module", "", "copy the source of the given module into the directory before init") + cmdFlags.BoolVar(&flagGet, "get", true, "") + cmdFlags.BoolVar(&c.forceInitCopy, "force-copy", false, "suppress prompts about copying state data") + cmdFlags.BoolVar(&c.Meta.stateLock, "lock", true, "lock state") + cmdFlags.DurationVar(&c.Meta.stateLockTimeout, "lock-timeout", 0, "lock timeout") + cmdFlags.BoolVar(&c.reconfigure, "reconfigure", false, "reconfigure") + cmdFlags.BoolVar(&c.migrateState, "migrate-state", false, "migrate state") + cmdFlags.BoolVar(&flagUpgrade, "upgrade", false, "") + cmdFlags.Var(&flagPluginPath, "plugin-dir", "plugin directory") + cmdFlags.StringVar(&flagLockfile, "lockfile", "", "Set a dependency lockfile mode") + cmdFlags.BoolVar(&c.Meta.ignoreRemoteVersion, "ignore-remote-version", false, "continue even if remote and local OpenTofu versions are incompatible") + cmdFlags.StringVar(&testsDirectory, "test-directory", "tests", "test-directory") + cmdFlags.BoolVar(&c.outputInJSON, "json", false, "json") + cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } + if err := cmdFlags.Parse(args); err != nil { + return 1 + } + + if c.outputInJSON { + c.Meta.color = false + c.Meta.Color = false + c.oldUi = c.Ui + c.Ui = &WrappedUi{ + cliUi: c.oldUi, + jsonView: views.NewJSONView(c.View), + outputInJSON: true, + } + } + + backendFlagSet := arguments.FlagIsSet(cmdFlags, "backend") + cloudFlagSet := arguments.FlagIsSet(cmdFlags, "cloud") + + switch { + case backendFlagSet && cloudFlagSet: + c.Ui.Error("The -backend and -cloud options are aliases of one another and mutually-exclusive in their use") + return 1 + case backendFlagSet: + flagCloud = flagBackend + case cloudFlagSet: + flagBackend = flagCloud + } + + if c.migrateState && c.reconfigure { + c.Ui.Error("The -migrate-state and -reconfigure options are mutually-exclusive") + return 1 + } + + // Copying the state only happens during backend migration, so setting + // -force-copy implies -migrate-state + if c.forceInitCopy { + c.migrateState = true + } + + var diags tfdiags.Diagnostics + + if len(flagPluginPath) > 0 { + c.pluginPath = flagPluginPath + } + + // Validate the arg count and get the working directory + args = cmdFlags.Args() + path, err := modulePath(args) + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + + if err := c.storePluginPath(c.pluginPath); err != nil { + c.Ui.Error(fmt.Sprintf("Error saving -plugin-path values: %s", err)) + return 1 + } + + // Initialization can be aborted by interruption signals + ctx, done := c.InterruptibleContext(c.CommandContext()) + defer done() + + // This will track whether we outputted anything so that we know whether + // to output a newline before the success message + var header bool + + if flagFromModule != "" { + src := flagFromModule + + empty, err := configs.IsEmptyDir(path) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error validating destination directory: %s", err)) + return 1 + } + if !empty { + c.Ui.Error(strings.TrimSpace(errInitCopyNotEmpty)) + return 1 + } + + c.Ui.Output(c.Colorize().Color(fmt.Sprintf( + "[reset][bold]Copying configuration[reset] from %q...", src, + ))) + header = true + + hooks := uiModuleInstallHooks{ + Ui: c.Ui, + ShowLocalPaths: false, // since they are in a weird location for init + } + + ctx, span := tracer.Start(ctx, "-from-module=...", trace.WithAttributes( + attribute.String("module_source", src), + )) + + initDirFromModuleAbort, initDirFromModuleDiags := c.initDirFromModule(ctx, path, src, hooks) + diags = diags.Append(initDirFromModuleDiags) + if initDirFromModuleAbort || initDirFromModuleDiags.HasErrors() { + c.showDiagnostics(diags) + span.SetStatus(codes.Error, "module installation failed") + span.End() + return 1 + } + span.End() + + c.Ui.Output("") + } + + // If our directory is empty, then we're done. We can't get or set up + // the backend with an empty directory. + empty, err := configs.IsEmptyDir(path) + if err != nil { + diags = diags.Append(fmt.Errorf("Error checking configuration: %w", err)) + c.showDiagnostics(diags) + return 1 + } + if empty { + c.Ui.Output(c.Colorize().Color(strings.TrimSpace(outputInitEmpty))) + return 0 + } + + // Load just the root module to begin backend and module initialization + rootModEarly, earlyConfDiags := c.loadSingleModuleWithTests(path, testsDirectory) + + // There may be parsing errors in config loading but these will be shown later _after_ + // checking for core version requirement errors. Not meeting the version requirement should + // be the first error displayed if that is an issue, but other operations are required + // before being able to check core version requirements. + if rootModEarly == nil { + c.Ui.Error(c.Colorize().Color(strings.TrimSpace(errInitConfigError))) + diags = diags.Append(earlyConfDiags) + c.showDiagnostics(diags) + + return 1 + } + + // Load the encryption configuration + enc, encDiags := c.EncryptionFromModule(rootModEarly) + diags = diags.Append(encDiags) + if encDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + var back backend.Backend + + // There may be config errors or backend init errors but these will be shown later _after_ + // checking for core version requirement errors. + var backDiags tfdiags.Diagnostics + var backendOutput bool + + switch { + case flagCloud && rootModEarly.CloudConfig != nil: + back, backendOutput, backDiags = c.initCloud(ctx, rootModEarly, flagConfigExtra, enc) + case flagBackend: + back, backendOutput, backDiags = c.initBackend(ctx, rootModEarly, flagConfigExtra, enc) + default: + // load the previously-stored backend config + back, backDiags = c.Meta.backendFromState(ctx, enc.State()) + } + if backendOutput { + header = true + } + + var state *states.State + + // If we have a functional backend (either just initialized or initialized + // on a previous run) we'll use the current state as a potential source + // of provider dependencies. + if back != nil { + c.ignoreRemoteVersionConflict(back) + workspace, err := c.Workspace() + if err != nil { + c.Ui.Error(fmt.Sprintf("Error selecting workspace: %s", err)) + return 1 + } + sMgr, err := back.StateMgr(workspace) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error loading state: %s", err)) + return 1 + } + + if err := sMgr.RefreshState(); err != nil { + c.Ui.Error(fmt.Sprintf("Error refreshing state: %s", err)) + return 1 + } + + state = sMgr.State() + } + + if flagGet { + modsOutput, modsAbort, modsDiags := c.getModules(ctx, path, testsDirectory, rootModEarly, flagUpgrade) + diags = diags.Append(modsDiags) + if modsAbort || modsDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + if modsOutput { + header = true + } + } + + // With all of the modules (hopefully) installed, we can now try to load the + // whole configuration tree. + config, confDiags := c.loadConfigWithTests(path, testsDirectory) + // configDiags will be handled after the version constraint check, since an + // incorrect version of tofu may be producing errors for configuration + // constructs added in later versions. + + // Before we go further, we'll check to make sure none of the modules in + // the configuration declare that they don't support this OpenTofu + // version, so we can produce a version-related error message rather than + // potentially-confusing downstream errors. + versionDiags := tofu.CheckCoreVersionRequirements(config) + if versionDiags.HasErrors() { + c.showDiagnostics(versionDiags) + return 1 + } + + // We've passed the core version check, now we can show errors from the + // configuration and backend initialization. + + // Now, we can check the diagnostics from the early configuration and the + // backend. + diags = diags.Append(earlyConfDiags) + diags = diags.Append(backDiags) + if earlyConfDiags.HasErrors() { + c.Ui.Error(strings.TrimSpace(errInitConfigError)) + c.showDiagnostics(diags) + return 1 + } + + // Now, we can show any errors from initializing the backend, but we won't + // show the errInitConfigError preamble as we didn't detect problems with + // the early configuration. + if backDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // If everything is ok with the core version check and backend initialization, + // show other errors from loading the full configuration tree. + diags = diags.Append(confDiags) + if confDiags.HasErrors() { + c.Ui.Error(strings.TrimSpace(errInitConfigError)) + c.showDiagnostics(diags) + return 1 + } + + if cb, ok := back.(*cloud.Cloud); ok { + if c.RunningInAutomation { + if err := cb.AssertImportCompatible(config); err != nil { + diags = diags.Append(tfdiags.Sourceless(tfdiags.Error, "Compatibility error", err.Error())) + c.showDiagnostics(diags) + return 1 + } + } + } + + if state != nil { + // Since we now have the full configuration loaded, we can use it to migrate the in-memory state view + // prior to fetching providers. + migratedState, migrateDiags := tofumigrate.MigrateStateProviderAddresses(config, state) + diags = diags.Append(migrateDiags) + if migrateDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + state = migratedState + } + + // Now that we have loaded all modules, check the module tree for missing providers. + providersOutput, providersAbort, providerDiags := c.getProviders(ctx, config, state, flagUpgrade, flagPluginPath, flagLockfile) + diags = diags.Append(providerDiags) + if providersAbort || providerDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + if providersOutput { + header = true + } + + // If we outputted information, then we need to output a newline + // so that our success message is nicely spaced out from prior text. + if header { + c.Ui.Output("") + } + + // If we accumulated any warnings along the way that weren't accompanied + // by errors then we'll output them here so that the success message is + // still the final thing shown. + c.showDiagnostics(diags) + _, cloud := back.(*cloud.Cloud) + output := outputInitSuccess + if cloud { + output = outputInitSuccessCloud + } + + c.Ui.Output(c.Colorize().Color(strings.TrimSpace(output))) + + if !c.RunningInAutomation { + // If we're not running in an automation wrapper, give the user + // some more detailed next steps that are appropriate for interactive + // shell usage. + output = outputInitSuccessCLI + if cloud { + output = outputInitSuccessCLICloud + } + c.Ui.Output(c.Colorize().Color(strings.TrimSpace(output))) + } + return 0 +} + +func (c *InitCommand) getModules(ctx context.Context, path, testsDir string, earlyRoot *configs.Module, upgrade bool) (output bool, abort bool, diags tfdiags.Diagnostics) { + testModules := false // We can also have modules buried in test files. + for _, file := range earlyRoot.Tests { + for _, run := range file.Runs { + if run.Module != nil { + testModules = true + } + } + } + + if len(earlyRoot.ModuleCalls) == 0 && !testModules { + // Nothing to do + return false, false, nil + } + + ctx, span := tracer.Start(ctx, "install modules", trace.WithAttributes( + attribute.Bool("upgrade", upgrade), + )) + defer span.End() + + if upgrade { + c.Ui.Output(c.Colorize().Color("[reset][bold]Upgrading modules...")) + } else { + c.Ui.Output(c.Colorize().Color("[reset][bold]Initializing modules...")) + } + + hooks := uiModuleInstallHooks{ + Ui: c.Ui, + ShowLocalPaths: true, + } + + installAbort, installDiags := c.installModules(ctx, path, testsDir, upgrade, false, hooks) + diags = diags.Append(installDiags) + + // At this point, installModules may have generated error diags or been + // aborted by SIGINT. In any case we continue and the manifest as best + // we can. + + // Since module installer has modified the module manifest on disk, we need + // to refresh the cache of it in the loader. + if c.configLoader != nil { + if err := c.configLoader.RefreshModules(); err != nil { + // Should never happen + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to read module manifest", + fmt.Sprintf("After installing modules, OpenTofu could not re-read the manifest of installed modules. This is a bug in OpenTofu. %s.", err), + )) + } + } + + return true, installAbort, diags +} + +func (c *InitCommand) initCloud(ctx context.Context, root *configs.Module, extraConfig rawFlags, enc encryption.Encryption) (be backend.Backend, output bool, diags tfdiags.Diagnostics) { + ctx, span := tracer.Start(ctx, "initialize cloud backend") + _ = ctx // prevent staticcheck from complaining to avoid a maintenence hazard of having the wrong ctx in scope here + defer span.End() + + c.Ui.Output(c.Colorize().Color("\n[reset][bold]Initializing cloud backend...")) + + if len(extraConfig.AllItems()) != 0 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid command-line option", + "The -backend-config=... command line option is only for state backends, and is not applicable to cloud backend-based configurations.\n\nTo change the set of workspaces associated with this configuration, edit the Cloud configuration block in the root module.", + )) + return nil, true, diags + } + + backendConfig := root.CloudConfig.ToBackendConfig() + + opts := &BackendOpts{ + Config: &backendConfig, + Init: true, + } + + back, backDiags := c.Backend(opts, enc.State()) + diags = diags.Append(backDiags) + return back, true, diags +} + +func (c *InitCommand) initBackend(ctx context.Context, root *configs.Module, extraConfig rawFlags, enc encryption.Encryption) (be backend.Backend, output bool, diags tfdiags.Diagnostics) { + ctx, span := tracer.Start(ctx, "initialize backend") + _ = ctx // prevent staticcheck from complaining to avoid a maintenence hazard of having the wrong ctx in scope here + defer span.End() + + c.Ui.Output(c.Colorize().Color("\n[reset][bold]Initializing the backend...")) + + var backendConfig *configs.Backend + var backendConfigOverride hcl.Body + if root.Backend != nil { + backendType := root.Backend.Type + if backendType == "cloud" { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported backend type", + Detail: fmt.Sprintf("There is no explicit backend type named %q. To configure cloud backend, declare a 'cloud' block instead.", backendType), + Subject: &root.Backend.TypeRange, + }) + return nil, true, diags + } + + bf := backendInit.Backend(backendType) + if bf == nil { + detail := fmt.Sprintf("There is no backend type named %q.", backendType) + if msg, removed := backendInit.RemovedBackends[backendType]; removed { + detail = msg + } + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported backend type", + Detail: detail, + Subject: &root.Backend.TypeRange, + }) + return nil, true, diags + } + + b := bf(nil) // This is only used to get the schema, encryption should panic if attempted + backendSchema := b.ConfigSchema() + backendConfig = root.Backend + + var overrideDiags tfdiags.Diagnostics + backendConfigOverride, overrideDiags = c.backendConfigOverrideBody(extraConfig, backendSchema) + diags = diags.Append(overrideDiags) + if overrideDiags.HasErrors() { + return nil, true, diags + } + } else { + // If the user supplied a -backend-config on the CLI but no backend + // block was found in the configuration, it's likely - but not + // necessarily - a mistake. Return a warning. + if !extraConfig.Empty() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + "Missing backend configuration", + `-backend-config was used without a "backend" block in the configuration. + +If you intended to override the default local backend configuration, +no action is required, but you may add an explicit backend block to your +configuration to clear this warning: + +terraform { + backend "local" {} +} + +However, if you intended to override a defined backend, please verify that +the backend configuration is present and valid. +`, + )) + } + } + + opts := &BackendOpts{ + Config: backendConfig, + ConfigOverride: backendConfigOverride, + Init: true, + } + + back, backDiags := c.Backend(opts, enc.State()) + diags = diags.Append(backDiags) + return back, true, diags +} + +// Load the complete module tree, and fetch any missing providers. +// This method outputs its own Ui. +func (c *InitCommand) getProviders(ctx context.Context, config *configs.Config, state *states.State, upgrade bool, pluginDirs []string, flagLockfile string) (output, abort bool, diags tfdiags.Diagnostics) { + ctx, span := tracer.Start(ctx, "install providers") + defer span.End() + + // Dev overrides cause the result of "tofu init" to be irrelevant for + // any overridden providers, so we'll warn about it to avoid later + // confusion when OpenTofu ends up using a different provider than the + // lock file called for. + diags = diags.Append(c.providerDevOverrideInitWarnings()) + + // First we'll collect all the provider dependencies we can see in the + // configuration and the state. + reqs, hclDiags := config.ProviderRequirements() + diags = diags.Append(hclDiags) + if hclDiags.HasErrors() { + return false, true, diags + } + if state != nil { + stateReqs := state.ProviderRequirements() + reqs = reqs.Merge(stateReqs) + } + + potentialProviderConflicts := make(map[string][]string) + + for providerAddr := range reqs { + if providerAddr.Namespace == "hashicorp" || providerAddr.Namespace == "opentofu" { + potentialProviderConflicts[providerAddr.Type] = append(potentialProviderConflicts[providerAddr.Type], providerAddr.ForDisplay()) + } + + if providerAddr.IsLegacy() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid legacy provider address", + fmt.Sprintf( + "This configuration or its associated state refers to the unqualified provider %q.\n\nYou must complete the Terraform 0.13 upgrade process before upgrading to later versions.", + providerAddr.Type, + ), + )) + } + } + + for name, addrs := range potentialProviderConflicts { + if len(addrs) > 1 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + "Potential provider misconfiguration", + fmt.Sprintf( + "OpenTofu has detected multiple providers of type %s (%s) which may be a misconfiguration.\n\nIf this is intentional you can ignore this warning", + name, + strings.Join(addrs, ", "), + ), + )) + } + } + + previousLocks, moreDiags := c.lockedDependencies() + diags = diags.Append(moreDiags) + + if diags.HasErrors() { + return false, true, diags + } + + var inst *providercache.Installer + if len(pluginDirs) == 0 { + // By default we use a source that looks for providers in all of the + // standard locations, possibly customized by the user in CLI config. + inst = c.providerInstaller() + } else { + // If the user passes at least one -plugin-dir then that circumvents + // the usual sources and forces OpenTofu to consult only the given + // directories. Anything not available in one of those directories + // is not available for installation. + source := c.providerCustomLocalDirectorySource(pluginDirs) + inst = c.providerInstallerCustomSource(source) + + // The default (or configured) search paths are logged earlier, in provider_source.go + // Log that those are being overridden by the `-plugin-dir` command line options + log.Println("[DEBUG] init: overriding provider plugin search paths") + log.Printf("[DEBUG] will search for provider plugins in %s", pluginDirs) + } + + // We want to print out a nice warning if we don't manage to pull + // checksums for all our providers. This is tracked via callbacks + // and incomplete providers are stored here for later analysis. + var incompleteProviders []string + + // Because we're currently just streaming a series of events sequentially + // into the terminal, we're showing only a subset of the events to keep + // things relatively concise. Later it'd be nice to have a progress UI + // where statuses update in-place, but we can't do that as long as we + // are shimming our vt100 output to the legacy console API on Windows. + evts := &providercache.InstallerEvents{ + PendingProviders: func(reqs map[addrs.Provider]getproviders.VersionConstraints) { + c.Ui.Output(c.Colorize().Color( + "\n[reset][bold]Initializing provider plugins...", + )) + }, + ProviderAlreadyInstalled: func(provider addrs.Provider, selectedVersion getproviders.Version) { + c.Ui.Info(fmt.Sprintf("- Using previously-installed %s v%s", provider.ForDisplay(), selectedVersion)) + }, + BuiltInProviderAvailable: func(provider addrs.Provider) { + c.Ui.Info(fmt.Sprintf("- %s is built in to OpenTofu", provider.ForDisplay())) + }, + BuiltInProviderFailure: func(provider addrs.Provider, err error) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid dependency on built-in provider", + fmt.Sprintf("Cannot use %s: %s.", provider.ForDisplay(), err), + )) + }, + QueryPackagesBegin: func(provider addrs.Provider, versionConstraints getproviders.VersionConstraints, locked bool) { + if locked { + c.Ui.Info(fmt.Sprintf("- Reusing previous version of %s from the dependency lock file", provider.ForDisplay())) + } else { + if len(versionConstraints) > 0 { + c.Ui.Info(fmt.Sprintf("- Finding %s versions matching %q...", provider.ForDisplay(), getproviders.VersionConstraintsString(versionConstraints))) + } else { + c.Ui.Info(fmt.Sprintf("- Finding latest version of %s...", provider.ForDisplay())) + } + } + }, + LinkFromCacheBegin: func(provider addrs.Provider, version getproviders.Version, cacheRoot string) { + c.Ui.Info(fmt.Sprintf("- Using %s v%s from the shared cache directory", provider.ForDisplay(), version)) + }, + FetchPackageBegin: func(provider addrs.Provider, version getproviders.Version, location getproviders.PackageLocation) { + c.Ui.Info(fmt.Sprintf("- Installing %s v%s...", provider.ForDisplay(), version)) + }, + QueryPackagesFailure: func(provider addrs.Provider, err error) { + switch errorTy := err.(type) { + case getproviders.ErrProviderNotFound: + sources := errorTy.Sources + displaySources := make([]string, len(sources)) + for i, source := range sources { + displaySources[i] = fmt.Sprintf(" - %s", source) + } + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to query available provider packages", + fmt.Sprintf("Could not retrieve the list of available versions for provider %s: %s\n\n%s", + provider.ForDisplay(), err, strings.Join(displaySources, "\n"), + ), + )) + case getproviders.ErrRegistryProviderNotKnown: + // We might be able to suggest an alternative provider to use + // instead of this one. + suggestion := fmt.Sprintf("\n\nAll modules should specify their required_providers so that external consumers will get the correct providers when using a module. To see which modules are currently depending on %s, run the following command:\n tofu providers", provider.ForDisplay()) + alternative := getproviders.MissingProviderSuggestion(ctx, provider, inst.ProviderSource(), reqs) + if alternative != provider { + suggestion = fmt.Sprintf( + "\n\nDid you intend to use %s? If so, you must specify that source address in each module which requires that provider. To see which modules are currently depending on %s, run the following command:\n tofu providers", + alternative.ForDisplay(), provider.ForDisplay(), + ) + } + + if provider.Hostname == addrs.DefaultProviderRegistryHost { + suggestion += "\n\nIf you believe this provider is missing from the registry, please submit a issue on the OpenTofu Registry https://github.com/opentofu/registry/issues/new/choose" + } + + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to query available provider packages", + fmt.Sprintf("Could not retrieve the list of available versions for provider %s: %s%s", + provider.ForDisplay(), err, suggestion, + ), + )) + case getproviders.ErrHostNoProviders: + switch { + case errorTy.Hostname == svchost.Hostname("github.com") && !errorTy.HasOtherVersion: + // If a user copies the URL of a GitHub repository into + // the source argument and removes the schema to make it + // provider-address-shaped then that's one way we can end up + // here. We'll use a specialized error message in anticipation + // of that mistake. We only do this if github.com isn't a + // provider registry, to allow for the (admittedly currently + // rather unlikely) possibility that github.com starts being + // a real Terraform provider registry in the future. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid provider registry host", + fmt.Sprintf("The given source address %q specifies a GitHub repository rather than a OpenTofu provider. Refer to the documentation of the provider to find the correct source address to use.", + provider.String(), + ), + )) + + case errorTy.HasOtherVersion: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid provider registry host", + fmt.Sprintf("The host %q given in provider source address %q does not offer a OpenTofu provider registry that is compatible with this OpenTofu version, but it may be compatible with a different OpenTofu version.", + errorTy.Hostname, provider.String(), + ), + )) + + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid provider registry host", + fmt.Sprintf("The host %q given in provider source address %q does not offer a OpenTofu provider registry.", + errorTy.Hostname, provider.String(), + ), + )) + } + + case getproviders.ErrRequestCanceled: + // We don't attribute cancellation to any particular operation, + // but rather just emit a single general message about it at + // the end, by checking ctx.Err(). + + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to resolve provider packages", + fmt.Sprintf("Could not resolve provider %s: %s", + provider.ForDisplay(), err, + ), + )) + } + + }, + QueryPackagesWarning: func(provider addrs.Provider, warnings []string) { + displayWarnings := make([]string, len(warnings)) + for i, warning := range warnings { + displayWarnings[i] = fmt.Sprintf("- %s", warning) + } + + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + "Additional provider information from registry", + fmt.Sprintf("The remote registry returned warnings for %s:\n%s", + provider.String(), + strings.Join(displayWarnings, "\n"), + ), + )) + }, + LinkFromCacheFailure: func(provider addrs.Provider, version getproviders.Version, err error) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to install provider from shared cache", + fmt.Sprintf("Error while importing %s v%s from the shared cache directory: %s.", provider.ForDisplay(), version, err), + )) + }, + FetchPackageFailure: func(provider addrs.Provider, version getproviders.Version, err error) { + const summaryIncompatible = "Incompatible provider version" + switch err := err.(type) { + case getproviders.ErrProtocolNotSupported: + closestAvailable := err.Suggestion + switch { + case closestAvailable == getproviders.UnspecifiedVersion: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + summaryIncompatible, + fmt.Sprintf(errProviderVersionIncompatible, provider.String()), + )) + case version.GreaterThan(closestAvailable): + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + summaryIncompatible, + fmt.Sprintf(providerProtocolTooNew, provider.ForDisplay(), + version, tfversion.String(), closestAvailable, closestAvailable, + getproviders.VersionConstraintsString(reqs[provider]), + ), + )) + default: // version is less than closestAvailable + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + summaryIncompatible, + fmt.Sprintf(providerProtocolTooOld, provider.ForDisplay(), + version, tfversion.String(), closestAvailable, closestAvailable, + getproviders.VersionConstraintsString(reqs[provider]), + ), + )) + } + case getproviders.ErrPlatformNotSupported: + switch { + case err.MirrorURL != nil: + // If we're installing from a mirror then it may just be + // the mirror lacking the package, rather than it being + // unavailable from upstream. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + summaryIncompatible, + fmt.Sprintf( + "Your chosen provider mirror at %s does not have a %s v%s package available for your current platform, %s.\n\nProvider releases are separate from OpenTofu CLI releases, so this provider might not support your current platform. Alternatively, the mirror itself might have only a subset of the plugin packages available in the origin registry, at %s.", + err.MirrorURL, err.Provider, err.Version, err.Platform, + err.Provider.Hostname, + ), + )) + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + summaryIncompatible, + fmt.Sprintf( + "Provider %s v%s does not have a package available for your current platform, %s.\n\nProvider releases are separate from OpenTofu CLI releases, so not all providers are available for all platforms. Other versions of this provider may have different platforms supported.", + err.Provider, err.Version, err.Platform, + ), + )) + } + + case getproviders.ErrRequestCanceled: + // We don't attribute cancellation to any particular operation, + // but rather just emit a single general message about it at + // the end, by checking ctx.Err(). + + default: + // We can potentially end up in here under cancellation too, + // in spite of our getproviders.ErrRequestCanceled case above, + // because not all of the outgoing requests we do under the + // "fetch package" banner are source metadata requests. + // In that case we will emit a redundant error here about + // the request being cancelled, but we'll still detect it + // as a cancellation after the installer returns and do the + // normal cancellation handling. + + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to install provider", + fmt.Sprintf("Error while installing %s v%s: %s", provider.ForDisplay(), version, err), + )) + } + }, + FetchPackageSuccess: func(provider addrs.Provider, version getproviders.Version, localDir string, authResult *getproviders.PackageAuthenticationResult) { + var keyID string + if authResult != nil && authResult.Signed() { + keyID = authResult.KeyID + } + if keyID != "" { + keyID = c.Colorize().Color(fmt.Sprintf(", key ID [reset][bold]%s[reset]", keyID)) + } + + if authResult != nil && authResult.SigningSkipped() { + c.Ui.Warn(fmt.Sprintf("- Installed %s v%s. Signature validation was skipped due to the registry not containing GPG keys for this provider", provider.ForDisplay(), version)) + } else { + c.Ui.Info(fmt.Sprintf("- Installed %s v%s (%s%s)", provider.ForDisplay(), version, authResult, keyID)) + } + }, + ProvidersLockUpdated: func(provider addrs.Provider, version getproviders.Version, localHashes []getproviders.Hash, signedHashes []getproviders.Hash, priorHashes []getproviders.Hash) { + // We're going to use this opportunity to track if we have any + // "incomplete" installs of providers. An incomplete install is + // when we are only going to write the local hashes into our lock + // file which means a `tofu init` command will fail in future + // when used on machines of a different architecture. + // + // We want to print a warning about this. + + if len(signedHashes) > 0 { + // If we have any signedHashes hashes then we don't worry - as + // we know we retrieved all available hashes for this version + // anyway. + return + } + + // If local hashes and prior hashes are exactly the same then + // it means we didn't record any signed hashes previously, and + // we know we're not adding any extra in now (because we already + // checked the signedHashes), so that's a problem. + // + // In the actual check here, if we have any priorHashes and those + // hashes are not the same as the local hashes then we're going to + // accept that this provider has been configured correctly. + if len(priorHashes) > 0 && !reflect.DeepEqual(localHashes, priorHashes) { + return + } + + // Now, either signedHashes is empty, or priorHashes is exactly the + // same as our localHashes which means we never retrieved the + // signedHashes previously. + // + // Either way, this is bad. Let's complain/warn. + incompleteProviders = append(incompleteProviders, provider.ForDisplay()) + }, + ProvidersFetched: func(authResults map[addrs.Provider]*getproviders.PackageAuthenticationResult) { + thirdPartySigned := false + for _, authResult := range authResults { + if authResult.Signed() { + thirdPartySigned = true + break + } + } + if thirdPartySigned { + c.Ui.Info(fmt.Sprintf("\nProviders are signed by their developers.\n" + + "If you'd like to know more about provider signing, you can read about it here:\n" + + "https://opentofu.org/docs/cli/plugins/signing/")) + } + }, + } + ctx = evts.OnContext(ctx) + + mode := providercache.InstallNewProvidersOnly + if upgrade { + if flagLockfile == "readonly" { + c.Ui.Error("The -upgrade flag conflicts with -lockfile=readonly.") + return true, true, diags + } + + mode = providercache.InstallUpgrades + } + newLocks, err := inst.EnsureProviderVersions(ctx, previousLocks, reqs, mode) + if ctx.Err() == context.Canceled { + c.showDiagnostics(diags) + c.Ui.Error("Provider installation was canceled by an interrupt signal.") + return true, true, diags + } + if err != nil { + // The errors captured in "err" should be redundant with what we + // received via the InstallerEvents callbacks above, so we'll + // just return those as long as we have some. + if !diags.HasErrors() { + diags = diags.Append(err) + } + + return true, true, diags + } + + // If the provider dependencies have changed since the last run then we'll + // say a little about that in case the reader wasn't expecting a change. + // (When we later integrate module dependencies into the lock file we'll + // probably want to refactor this so that we produce one lock-file related + // message for all changes together, but this is here for now just because + // it's the smallest change relative to what came before it, which was + // a hidden JSON file specifically for tracking providers.) + if !newLocks.Equal(previousLocks) { + // if readonly mode + if flagLockfile == "readonly" { + // check if required provider dependences change + if !newLocks.EqualProviderAddress(previousLocks) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + `Provider dependency changes detected`, + `Changes to the required provider dependencies were detected, but the lock file is read-only. To use and record these requirements, run "tofu init" without the "-lockfile=readonly" flag.`, + )) + return true, true, diags + } + + // suppress updating the file to record any new information it learned, + // such as a hash using a new scheme. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + `Provider lock file not updated`, + `Changes to the provider selections were detected, but not saved in the .terraform.lock.hcl file. To record these selections, run "tofu init" without the "-lockfile=readonly" flag.`, + )) + return true, false, diags + } + + // Jump in here and add a warning if any of the providers are incomplete. + if len(incompleteProviders) > 0 { + // We don't really care about the order here, we just want the + // output to be deterministic. + sort.Slice(incompleteProviders, func(i, j int) bool { + return incompleteProviders[i] < incompleteProviders[j] + }) + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + incompleteLockFileInformationHeader, + fmt.Sprintf( + incompleteLockFileInformationBody, + strings.Join(incompleteProviders, "\n - "), + getproviders.CurrentPlatform.String()))) + } + + if previousLocks.Empty() { + // A change from empty to non-empty is special because it suggests + // we're running "tofu init" for the first time against a + // new configuration. In that case we'll take the opportunity to + // say a little about what the dependency lock file is, for new + // users or those who are upgrading from a previous Terraform + // version that didn't have dependency lock files. + c.Ui.Output(c.Colorize().Color(` +OpenTofu has created a lock file [bold].terraform.lock.hcl[reset] to record the provider +selections it made above. Include this file in your version control repository +so that OpenTofu can guarantee to make the same selections by default when +you run "tofu init" in the future.`)) + } else { + c.Ui.Output(c.Colorize().Color(` +OpenTofu has made some changes to the provider dependency selections recorded +in the .terraform.lock.hcl file. Review those changes and commit them to your +version control system if they represent changes you intended to make.`)) + } + + moreDiags = c.replaceLockedDependencies(newLocks) + diags = diags.Append(moreDiags) + } + + return true, false, diags +} + +// backendConfigOverrideBody interprets the raw values of -backend-config +// arguments into a hcl Body that should override the backend settings given +// in the configuration. +// +// If the result is nil then no override needs to be provided. +// +// If the returned diagnostics contains errors then the returned body may be +// incomplete or invalid. +func (c *InitCommand) backendConfigOverrideBody(flags rawFlags, schema *configschema.Block) (hcl.Body, tfdiags.Diagnostics) { + items := flags.AllItems() + if len(items) == 0 { + return nil, nil + } + + var ret hcl.Body + var diags tfdiags.Diagnostics + synthVals := make(map[string]cty.Value) + + mergeBody := func(newBody hcl.Body) { + if ret == nil { + ret = newBody + } else { + ret = configs.MergeBodies(ret, newBody) + } + } + flushVals := func() { + if len(synthVals) == 0 { + return + } + newBody := configs.SynthBody("-backend-config=...", synthVals) + mergeBody(newBody) + synthVals = make(map[string]cty.Value) + } + + if len(items) == 1 && items[0].Value == "" { + // Explicitly remove all -backend-config options. + // We do this by setting an empty but non-nil ConfigOverrides. + return configs.SynthBody("-backend-config=''", synthVals), diags + } + + for _, item := range items { + eq := strings.Index(item.Value, "=") + + if eq == -1 { + // The value is interpreted as a filename. + newBody, fileDiags := c.loadHCLFile(item.Value) + diags = diags.Append(fileDiags) + if fileDiags.HasErrors() { + continue + } + // Generate an HCL body schema for the backend block. + var bodySchema hcl.BodySchema + for name := range schema.Attributes { + // We intentionally ignore the `Required` attribute here + // because backend config override files can be partial. The + // goal is to make sure we're not loading a file with + // extraneous attributes or blocks. + bodySchema.Attributes = append(bodySchema.Attributes, hcl.AttributeSchema{ + Name: name, + }) + } + for name, block := range schema.BlockTypes { + var labelNames []string + if block.Nesting == configschema.NestingMap { + labelNames = append(labelNames, "key") + } + bodySchema.Blocks = append(bodySchema.Blocks, hcl.BlockHeaderSchema{ + Type: name, + LabelNames: labelNames, + }) + } + // Verify that the file body matches the expected backend schema. + _, schemaDiags := newBody.Content(&bodySchema) + diags = diags.Append(schemaDiags) + if schemaDiags.HasErrors() { + continue + } + flushVals() // deal with any accumulated individual values first + mergeBody(newBody) + } else { + name := item.Value[:eq] + rawValue := item.Value[eq+1:] + attrS := schema.Attributes[name] + if attrS == nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid backend configuration argument", + fmt.Sprintf("The backend configuration argument %q given on the command line is not expected for the selected backend type.", name), + )) + continue + } + value, valueDiags := configValueFromCLI(item.String(), rawValue, attrS.Type) + diags = diags.Append(valueDiags) + if valueDiags.HasErrors() { + continue + } + synthVals[name] = value + } + } + + flushVals() + + return ret, diags +} + +func (c *InitCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictDirs("") +} + +func (c *InitCommand) AutocompleteFlags() complete.Flags { + return complete.Flags{ + "-backend": completePredictBoolean, + "-cloud": completePredictBoolean, + "-backend-config": complete.PredictFiles("*.tfvars"), // can also be key=value, but we can't "predict" that + "-force-copy": complete.PredictNothing, + "-from-module": completePredictModuleSource, + "-get": completePredictBoolean, + "-input": completePredictBoolean, + "-lock": completePredictBoolean, + "-lock-timeout": complete.PredictAnything, + "-no-color": complete.PredictNothing, + "-plugin-dir": complete.PredictDirs(""), + "-reconfigure": complete.PredictNothing, + "-migrate-state": complete.PredictNothing, + "-upgrade": completePredictBoolean, + } +} + +func (c *InitCommand) Help() string { + helpText := ` +Usage: tofu [global options] init [options] + + Initialize a new or existing OpenTofu working directory by creating + initial files, loading any remote state, downloading modules, etc. + + This is the first command that should be run for any new or existing + OpenTofu configuration per machine. This sets up all the local data + necessary to run OpenTofu that is typically not committed to version + control. + + This command is always safe to run multiple times. Though subsequent runs + may give errors, this command will never delete your configuration or + state. Even so, if you have important information, please back it up prior + to running this command, just in case. + +Options: + + -backend=false Disable backend or cloud backend initialization + for this configuration and use what was previously + initialized instead. + + aliases: -cloud=false + + -backend-config=path Configuration to be merged with what is in the + configuration file's 'backend' block. This can be + either a path to an HCL file with key/value + assignments (same format as terraform.tfvars) or a + 'key=value' format, and can be specified multiple + times. The backend type must be in the configuration + itself. + + -force-copy Suppress prompts about copying state data when + initializating a new state backend. This is + equivalent to providing a "yes" to all confirmation + prompts. + + -from-module=SOURCE Copy the contents of the given module into the target + directory before initialization. + + -get=false Disable downloading modules for this configuration. + + -input=false Disable interactive prompts. Note that some actions may + require interactive prompts and will error if input is + disabled. + + -lock=false Don't hold a state lock during backend migration. + This is dangerous if others might concurrently run + commands against the same workspace. + + -lock-timeout=0s Duration to retry a state lock. + + -no-color If specified, output won't contain any color. + + -plugin-dir Directory containing plugin binaries. This overrides all + default search paths for plugins, and prevents the + automatic installation of plugins. This flag can be used + multiple times. + + -reconfigure Reconfigure a backend, ignoring any saved + configuration. + + -migrate-state Reconfigure a backend, and attempt to migrate any + existing state. + + -upgrade Install the latest module and provider versions + allowed within configured constraints, overriding the + default behavior of selecting exactly the version + recorded in the dependency lockfile. + + -lockfile=MODE Set a dependency lockfile mode. + Currently only "readonly" is valid. + + -ignore-remote-version A rare option used for cloud backend and the remote backend + only. Set this to ignore checking that the local and remote + OpenTofu versions use compatible state representations, making + an operation proceed even when there is a potential mismatch. + See the documentation on configuring OpenTofu with + cloud backend for more information. + + -test-directory=path Set the OpenTofu test directory, defaults to "tests". When set, the + test command will search for test files in the current directory and + in the one specified by the flag. + + -json Produce output in a machine-readable JSON format, + suitable for use in text editor integrations and other + automated systems. Always disables color. + + -var 'foo=bar' Set a value for one of the input variables in the root + module of the configuration. Use this option more than + once to set more than one variable. + + -var-file=filename Load variable values from the given file, in addition + to the default files terraform.tfvars and *.auto.tfvars. + Use this option more than once to include more than one + variables file. + +` + return strings.TrimSpace(helpText) +} + +func (c *InitCommand) Synopsis() string { + return "Prepare your working directory for other commands" +} + +const errInitConfigError = ` +[reset]OpenTofu encountered problems during initialization, including problems +with the configuration, described below. + +The OpenTofu configuration must be valid before initialization so that +OpenTofu can determine which modules and providers need to be installed. +` + +const errInitCopyNotEmpty = ` +The working directory already contains files. The -from-module option requires +an empty directory into which a copy of the referenced module will be placed. + +To initialize the configuration already in this working directory, omit the +-from-module option. +` + +const outputInitEmpty = ` +[reset][bold]OpenTofu initialized in an empty directory![reset] + +The directory has no OpenTofu configuration files. You may begin working +with OpenTofu immediately by creating OpenTofu configuration files. +` + +const outputInitSuccess = ` +[reset][bold][green]OpenTofu has been successfully initialized![reset][green] +` + +const outputInitSuccessCloud = ` +[reset][bold][green]Cloud backend has been successfully initialized![reset][green] +` + +const outputInitSuccessCLI = `[reset][green] +You may now begin working with OpenTofu. Try running "tofu plan" to see +any changes that are required for your infrastructure. All OpenTofu commands +should now work. + +If you ever set or change modules or backend configuration for OpenTofu, +rerun this command to reinitialize your working directory. If you forget, other +commands will detect it and remind you to do so if necessary. +` + +const outputInitSuccessCLICloud = `[reset][green] +You may now begin working with cloud backend. Try running "tofu plan" to +see any changes that are required for your infrastructure. + +If you ever set or change modules or OpenTofu Settings, run "tofu init" +again to reinitialize your working directory. +` + +// providerProtocolTooOld is a message sent to the CLI UI if the provider's +// supported protocol versions are too old for the user's version of tofu, +// but a newer version of the provider is compatible. +const providerProtocolTooOld = `Provider %q v%s is not compatible with OpenTofu %s. +Provider version %s is the latest compatible version. Select it with the following version constraint: + version = %q + +OpenTofu checked all of the plugin versions matching the given constraint: + %s + +Consult the documentation for this provider for more information on compatibility between provider and OpenTofu versions. +` + +// providerProtocolTooNew is a message sent to the CLI UI if the provider's +// supported protocol versions are too new for the user's version of tofu, +// and the user could either upgrade tofu or choose an older version of the +// provider. +const providerProtocolTooNew = `Provider %q v%s is not compatible with OpenTofu %s. +You need to downgrade to v%s or earlier. Select it with the following constraint: + version = %q + +OpenTofu checked all of the plugin versions matching the given constraint: + %s + +Consult the documentation for this provider for more information on compatibility between provider and OpenTofu versions. +Alternatively, upgrade to the latest version of OpenTofu for compatibility with newer provider releases. +` + +// No version of the provider is compatible. +const errProviderVersionIncompatible = `No compatible versions of provider %s were found.` + +// incompleteLockFileInformationHeader is the summary displayed to users when +// the lock file has only recorded local hashes. +const incompleteLockFileInformationHeader = `Incomplete lock file information for providers` + +// incompleteLockFileInformationBody is the body of text displayed to users when +// the lock file has only recorded local hashes. +const incompleteLockFileInformationBody = `Due to your customized provider installation methods, OpenTofu was forced to calculate lock file checksums locally for the following providers: + - %s + +The current .terraform.lock.hcl file only includes checksums for %s, so OpenTofu running on another platform will fail to install these providers. + +To calculate additional checksums for another platform, run: + tofu providers lock -platform=linux_amd64 +(where linux_amd64 is the platform to generate)` diff --git a/pkg/command/init_test.go b/pkg/command/init_test.go new file mode 100644 index 00000000000..e7d54118636 --- /dev/null +++ b/pkg/command/init_test.go @@ -0,0 +1,3136 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "log" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/google/go-cmp/cmp" + "github.com/mitchellh/cli" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/go-version" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/depsfile" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/getproviders" + "github.com/kubegems/opentofu/pkg/providercache" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/statefile" + "github.com/kubegems/opentofu/pkg/states/statemgr" +) + +func TestInit_empty(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + os.MkdirAll(td, 0755) + defer testChdir(t, td)() + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + args := []string{} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } +} + +func TestInit_multipleArgs(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + os.MkdirAll(td, 0755) + defer testChdir(t, td)() + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + args := []string{ + "bad", + "bad", + } + if code := c.Run(args); code != 1 { + t.Fatalf("bad: \n%s", ui.OutputWriter.String()) + } +} + +func TestInit_fromModule_cwdDest(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + os.MkdirAll(td, os.ModePerm) + defer testChdir(t, td)() + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + args := []string{ + "-from-module=" + testFixturePath("init"), + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + if _, err := os.Stat(filepath.Join(td, "hello.tf")); err != nil { + t.Fatalf("err: %s", err) + } +} + +// https://github.com/hashicorp/terraform/issues/518 +func TestInit_fromModule_dstInSrc(t *testing.T) { + dir := t.TempDir() + if err := os.MkdirAll(dir, 0755); err != nil { + t.Fatalf("err: %s", err) + } + + // Change to the temporary directory + cwd, err := os.Getwd() + if err != nil { + t.Fatalf("err: %s", err) + } + if err := os.Chdir(dir); err != nil { + t.Fatalf("err: %s", err) + } + defer os.Chdir(cwd) + + if err := os.Mkdir("foo", os.ModePerm); err != nil { + t.Fatal(err) + } + + if _, err := os.Create("issue518.tf"); err != nil { + t.Fatalf("err: %s", err) + } + + if err := os.Chdir("foo"); err != nil { + t.Fatalf("err: %s", err) + } + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + args := []string{ + "-from-module=./..", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + if _, err := os.Stat(filepath.Join(dir, "foo", "issue518.tf")); err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestInit_get(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-get"), td) + defer testChdir(t, td)() + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + args := []string{} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + // Check output + output := ui.OutputWriter.String() + if !strings.Contains(output, "foo in foo") { + t.Fatalf("doesn't look like we installed module 'foo': %s", output) + } +} + +func TestInit_getUpgradeModules(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-get"), td) + defer testChdir(t, td)() + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + args := []string{ + "-get=true", + "-upgrade", + } + if code := c.Run(args); code != 0 { + t.Fatalf("command did not complete successfully:\n%s", ui.ErrorWriter.String()) + } + + // Check output + output := ui.OutputWriter.String() + if !strings.Contains(output, "Upgrading modules...") { + t.Fatalf("doesn't look like get upgrade: %s", output) + } +} + +func TestInit_backend(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-backend"), td) + defer testChdir(t, td)() + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + args := []string{} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + if _, err := os.Stat(filepath.Join(DefaultDataDir, DefaultStateFilename)); err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestInit_backendUnset(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-backend"), td) + defer testChdir(t, td)() + + { + log.Printf("[TRACE] TestInit_backendUnset: beginning first init") + + ui := cli.NewMockUi() + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + // Init + args := []string{} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + log.Printf("[TRACE] TestInit_backendUnset: first init complete") + t.Logf("First run output:\n%s", ui.OutputWriter.String()) + t.Logf("First run errors:\n%s", ui.ErrorWriter.String()) + + if _, err := os.Stat(filepath.Join(DefaultDataDir, DefaultStateFilename)); err != nil { + t.Fatalf("err: %s", err) + } + } + + { + log.Printf("[TRACE] TestInit_backendUnset: beginning second init") + + // Unset + if err := os.WriteFile("main.tf", []byte(""), 0644); err != nil { + t.Fatalf("err: %s", err) + } + + ui := cli.NewMockUi() + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + args := []string{"-force-copy"} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + log.Printf("[TRACE] TestInit_backendUnset: second init complete") + t.Logf("Second run output:\n%s", ui.OutputWriter.String()) + t.Logf("Second run errors:\n%s", ui.ErrorWriter.String()) + + s := testDataStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename)) + if !s.Backend.Empty() { + t.Fatal("should not have backend config") + } + } +} + +func TestInit_backendConfigFile(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-backend-config-file"), td) + defer testChdir(t, td)() + + t.Run("good-config-file", func(t *testing.T) { + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + args := []string{"-backend-config", "input.config"} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + // Read our saved backend config and verify we have our settings + state := testDataStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename)) + if got, want := normalizeJSON(t, state.Backend.ConfigRaw), `{"path":"hello","workspace_dir":null}`; got != want { + t.Errorf("wrong config\ngot: %s\nwant: %s", got, want) + } + }) + + // the backend config file must not be a full tofu block + t.Run("full-backend-config-file", func(t *testing.T) { + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + args := []string{"-backend-config", "backend.config"} + if code := c.Run(args); code != 1 { + t.Fatalf("expected error, got success\n") + } + if !strings.Contains(ui.ErrorWriter.String(), "Unsupported block type") { + t.Fatalf("wrong error: %s", ui.ErrorWriter) + } + }) + + // the backend config file must match the schema for the backend + t.Run("invalid-config-file", func(t *testing.T) { + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + args := []string{"-backend-config", "invalid.config"} + if code := c.Run(args); code != 1 { + t.Fatalf("expected error, got success\n") + } + if !strings.Contains(ui.ErrorWriter.String(), "Unsupported argument") { + t.Fatalf("wrong error: %s", ui.ErrorWriter) + } + }) + + // missing file is an error + t.Run("missing-config-file", func(t *testing.T) { + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + args := []string{"-backend-config", "missing.config"} + if code := c.Run(args); code != 1 { + t.Fatalf("expected error, got success\n") + } + if !strings.Contains(ui.ErrorWriter.String(), "Failed to read file") { + t.Fatalf("wrong error: %s", ui.ErrorWriter) + } + }) + + // blank filename clears the backend config + t.Run("blank-config-file", func(t *testing.T) { + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + args := []string{"-backend-config=", "-migrate-state"} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + // Read our saved backend config and verify the backend config is empty + state := testDataStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename)) + if got, want := normalizeJSON(t, state.Backend.ConfigRaw), `{"path":null,"workspace_dir":null}`; got != want { + t.Errorf("wrong config\ngot: %s\nwant: %s", got, want) + } + }) + + // simulate the local backend having a required field which is not + // specified in the override file + t.Run("required-argument", func(t *testing.T) { + c := &InitCommand{} + schema := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "path": { + Type: cty.String, + Optional: true, + }, + "workspace_dir": { + Type: cty.String, + Required: true, + }, + }, + } + flagConfigExtra := newRawFlags("-backend-config") + flagConfigExtra.Set("input.config") + _, diags := c.backendConfigOverrideBody(flagConfigExtra, schema) + if len(diags) != 0 { + t.Errorf("expected no diags, got: %s", diags.Err()) + } + }) +} + +func TestInit_backendConfigFilePowershellConfusion(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-backend-config-file"), td) + defer testChdir(t, td)() + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + // SUBTLE: when using -flag=value with Powershell, unquoted values are + // broken into separate arguments. This results in the init command + // interpreting the flags as an empty backend-config setting (which is + // semantically valid!) followed by a custom configuration path. + // + // Adding the "=" here forces this codepath to be checked, and it should + // result in an early exit with a diagnostic that the provided + // configuration file is not a diretory. + args := []string{"-backend-config=", "./input.config"} + if code := c.Run(args); code != 1 { + t.Fatalf("got exit status %d; want 1\nstderr:\n%s\n\nstdout:\n%s", code, ui.ErrorWriter.String(), ui.OutputWriter.String()) + } + + output := ui.ErrorWriter.String() + if got, want := output, `Too many command line arguments`; !strings.Contains(got, want) { + t.Fatalf("wrong output\ngot:\n%s\n\nwant: message containing %q", got, want) + } +} + +func TestInit_backendReconfigure(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-backend"), td) + defer testChdir(t, td)() + + providerSource, close := newMockProviderSource(t, map[string][]string{ + "hashicorp/test": {"1.2.3"}, + }) + defer close() + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + ProviderSource: providerSource, + Ui: ui, + View: view, + }, + } + + // create some state, so the backend has something to migrate. + f, err := os.Create("foo") // this is the path" in the backend config + if err != nil { + t.Fatalf("err: %s", err) + } + err = writeStateForTesting(testState(), f) + f.Close() + if err != nil { + t.Fatalf("err: %s", err) + } + + args := []string{} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + // now run init again, changing the path. + // The -reconfigure flag prevents init from migrating + // Without -reconfigure, the test fails since the backend asks for input on migrating state + args = []string{"-reconfigure", "-backend-config", "path=changed"} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } +} + +func TestInit_backendConfigFileChange(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-backend-config-file-change"), td) + defer testChdir(t, td)() + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + args := []string{"-backend-config", "input.config", "-migrate-state"} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + // Read our saved backend config and verify we have our settings + state := testDataStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename)) + if got, want := normalizeJSON(t, state.Backend.ConfigRaw), `{"path":"hello","workspace_dir":null}`; got != want { + t.Errorf("wrong config\ngot: %s\nwant: %s", got, want) + } +} + +func TestInit_backendMigrateWhileLocked(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-backend-migrate-while-locked"), td) + defer testChdir(t, td)() + + providerSource, close := newMockProviderSource(t, map[string][]string{ + "hashicorp/test": {"1.2.3"}, + }) + defer close() + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + ProviderSource: providerSource, + Ui: ui, + View: view, + }, + } + + // Create some state, so the backend has something to migrate from + f, err := os.Create("local-state.tfstate") + if err != nil { + t.Fatalf("err: %s", err) + } + err = writeStateForTesting(testState(), f) + f.Close() + if err != nil { + t.Fatalf("err: %s", err) + } + + // Lock the source state + unlock, err := testLockState(t, testDataDir, "local-state.tfstate") + if err != nil { + t.Fatal(err) + } + defer unlock() + + // Attempt to migrate + args := []string{"-backend-config", "input.config", "-migrate-state", "-force-copy"} + if code := c.Run(args); code == 0 { + t.Fatalf("expected nonzero exit code: %s", ui.OutputWriter.String()) + } + + // Disabling locking should work + args = []string{"-backend-config", "input.config", "-migrate-state", "-force-copy", "-lock=false"} + if code := c.Run(args); code != 0 { + t.Fatalf("expected zero exit code, got %d: %s", code, ui.ErrorWriter.String()) + } +} + +func TestInit_backendConfigFileChangeWithExistingState(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-backend-config-file-change-migrate-existing"), td) + defer testChdir(t, td)() + + ui := new(cli.MockUi) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + }, + } + + oldState := testDataStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename)) + + // we deliberately do not provide the answer for backend-migrate-copy-to-empty to trigger error + args := []string{"-migrate-state", "-backend-config", "input.config", "-input=true"} + if code := c.Run(args); code == 0 { + t.Fatal("expected error") + } + + // Read our backend config and verify new settings are not saved + state := testDataStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename)) + if got, want := normalizeJSON(t, state.Backend.ConfigRaw), `{"path":"local-state.tfstate"}`; got != want { + t.Errorf("wrong config\ngot: %s\nwant: %s", got, want) + } + + // without changing config, hash should not change + if oldState.Backend.Hash != state.Backend.Hash { + t.Errorf("backend hash should not have changed\ngot: %d\nwant: %d", state.Backend.Hash, oldState.Backend.Hash) + } +} + +func TestInit_backendConfigKV(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-backend-config-kv"), td) + defer testChdir(t, td)() + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + args := []string{"-backend-config", "path=hello"} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + // Read our saved backend config and verify we have our settings + state := testDataStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename)) + if got, want := normalizeJSON(t, state.Backend.ConfigRaw), `{"path":"hello","workspace_dir":null}`; got != want { + t.Errorf("wrong config\ngot: %s\nwant: %s", got, want) + } +} + +func TestInit_backendConfigKVReInit(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-backend-config-kv"), td) + defer testChdir(t, td)() + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + args := []string{"-backend-config", "path=test"} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + ui = new(cli.MockUi) + c = &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + // a second init should require no changes, nor should it change the backend. + args = []string{"-input=false"} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + // make sure the backend is configured how we expect + configState := testDataStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename)) + cfg := map[string]interface{}{} + if err := json.Unmarshal(configState.Backend.ConfigRaw, &cfg); err != nil { + t.Fatal(err) + } + if cfg["path"] != "test" { + t.Fatalf(`expected backend path="test", got path="%v"`, cfg["path"]) + } + + // override the -backend-config options by settings + args = []string{"-input=false", "-backend-config", "", "-migrate-state"} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + // make sure the backend is configured how we expect + configState = testDataStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename)) + cfg = map[string]interface{}{} + if err := json.Unmarshal(configState.Backend.ConfigRaw, &cfg); err != nil { + t.Fatal(err) + } + if cfg["path"] != nil { + t.Fatalf(`expected backend path="", got path="%v"`, cfg["path"]) + } +} + +func TestInit_backendConfigKVReInitWithConfigDiff(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-backend"), td) + defer testChdir(t, td)() + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + args := []string{"-input=false"} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + ui = new(cli.MockUi) + c = &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + // a second init with identical config should require no changes, nor + // should it change the backend. + args = []string{"-input=false", "-backend-config", "path=foo"} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + // make sure the backend is configured how we expect + configState := testDataStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename)) + cfg := map[string]interface{}{} + if err := json.Unmarshal(configState.Backend.ConfigRaw, &cfg); err != nil { + t.Fatal(err) + } + if cfg["path"] != "foo" { + t.Fatalf(`expected backend path="foo", got path="%v"`, cfg["foo"]) + } +} + +func TestInit_backendCli_no_config_block(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init"), td) + defer testChdir(t, td)() + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + args := []string{"-backend-config", "path=test"} + if code := c.Run(args); code != 0 { + t.Fatalf("got exit status %d; want 0\nstderr:\n%s\n\nstdout:\n%s", code, ui.ErrorWriter.String(), ui.OutputWriter.String()) + } + + errMsg := ui.ErrorWriter.String() + if !strings.Contains(errMsg, "Warning: Missing backend configuration") { + t.Fatal("expected missing backend block warning, got", errMsg) + } +} + +func TestInit_backendReinitWithExtra(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("init-backend-empty"), td) + defer testChdir(t, td)() + + m := testMetaBackend(t, nil) + opts := &BackendOpts{ + ConfigOverride: configs.SynthBody("synth", map[string]cty.Value{ + "path": cty.StringVal("hello"), + }), + Init: true, + } + + _, cHash, err := m.backendConfig(opts) + if err != nil { + t.Fatal(err) + } + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + args := []string{"-backend-config", "path=hello"} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + // Read our saved backend config and verify we have our settings + state := testDataStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename)) + if got, want := normalizeJSON(t, state.Backend.ConfigRaw), `{"path":"hello","workspace_dir":null}`; got != want { + t.Errorf("wrong config\ngot: %s\nwant: %s", got, want) + } + + if state.Backend.Hash != uint64(cHash) { + t.Fatal("mismatched state and config backend hashes") + } + + // init again and make sure nothing changes + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + state = testDataStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename)) + if got, want := normalizeJSON(t, state.Backend.ConfigRaw), `{"path":"hello","workspace_dir":null}`; got != want { + t.Errorf("wrong config\ngot: %s\nwant: %s", got, want) + } + if state.Backend.Hash != uint64(cHash) { + t.Fatal("mismatched state and config backend hashes") + } +} + +// move option from config to -backend-config args +func TestInit_backendReinitConfigToExtra(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("init-backend"), td) + defer testChdir(t, td)() + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + if code := c.Run([]string{"-input=false"}); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + // Read our saved backend config and verify we have our settings + state := testDataStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename)) + if got, want := normalizeJSON(t, state.Backend.ConfigRaw), `{"path":"foo","workspace_dir":null}`; got != want { + t.Errorf("wrong config\ngot: %s\nwant: %s", got, want) + } + + backendHash := state.Backend.Hash + + // init again but remove the path option from the config + cfg := "terraform {\n backend \"local\" {}\n}\n" + if err := os.WriteFile("main.tf", []byte(cfg), 0644); err != nil { + t.Fatal(err) + } + + // We need a fresh InitCommand here because the old one now has our configuration + // file cached inside it, so it won't re-read the modification we just made. + c = &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + args := []string{"-input=false", "-backend-config=path=foo"} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + state = testDataStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename)) + if got, want := normalizeJSON(t, state.Backend.ConfigRaw), `{"path":"foo","workspace_dir":null}`; got != want { + t.Errorf("wrong config after moving to arg\ngot: %s\nwant: %s", got, want) + } + + if state.Backend.Hash == backendHash { + t.Fatal("state.Backend.Hash was not updated") + } +} + +func TestInit_backendCloudInvalidOptions(t *testing.T) { + // There are various "tofu init" options that are only for + // traditional backends and not applicable to Terraform Cloud mode. + // For those, we want to return an explicit error rather than + // just silently ignoring them, so that users will be aware that + // Cloud mode has more of an expected "happy path" than the + // less-vertically-integrated backends do, and to avoid these + // unapplicable options becoming compatibility constraints for + // future evolution of Cloud mode. + + // We use the same starting fixture for all of these tests, but some + // of them will customize it a bit as part of their work. + setupTempDir := func(t *testing.T) func() { + t.Helper() + td := t.TempDir() + testCopyDir(t, testFixturePath("init-cloud-simple"), td) + unChdir := testChdir(t, td) + return unChdir + } + + // Some of the tests need a non-empty placeholder state file to work + // with. + fakeState := states.BuildState(func(cb *states.SyncState) { + // Having a root module output value should be enough for this + // state file to be considered "non-empty" and thus a candidate + // for migration. + cb.SetOutputValue( + addrs.OutputValue{Name: "a"}.Absolute(addrs.RootModuleInstance), + cty.True, + false, + ) + }) + fakeStateFile := &statefile.File{ + Lineage: "boop", + Serial: 4, + TerraformVersion: version.Must(version.NewVersion("1.0.0")), + State: fakeState, + } + var fakeStateBuf bytes.Buffer + err := statefile.WriteForTest(fakeStateFile, &fakeStateBuf) + if err != nil { + t.Error(err) + } + fakeStateBytes := fakeStateBuf.Bytes() + + t.Run("-backend-config", func(t *testing.T) { + defer setupTempDir(t)() + + // We have -backend-config as a pragmatic way to dynamically set + // certain settings of backends that tend to vary depending on + // where OpenTofu is running, such as AWS authentication profiles + // that are naturally local only to the machine where OpenTofu is + // running. Those needs don't apply to Terraform Cloud, because + // the remote workspace encapsulates all of the details of how + // operations and state work in that case, and so the Cloud + // configuration is only about which workspaces we'll be working + // with. + ui := cli.NewMockUi() + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + Ui: ui, + View: view, + }, + } + args := []string{"-backend-config=anything"} + if code := c.Run(args); code == 0 { + t.Fatalf("unexpected success\n%s", ui.OutputWriter.String()) + } + + gotStderr := ui.ErrorWriter.String() + wantStderr := ` +Error: Invalid command-line option + +The -backend-config=... command line option is only for state backends, and +is not applicable to cloud backend-based configurations. + +To change the set of workspaces associated with this configuration, edit the +Cloud configuration block in the root module. + +` + if diff := cmp.Diff(wantStderr, gotStderr); diff != "" { + t.Errorf("wrong error output\n%s", diff) + } + }) + t.Run("-reconfigure", func(t *testing.T) { + defer setupTempDir(t)() + + // The -reconfigure option was originally imagined as a way to force + // skipping state migration when migrating between backends, but it + // has a historical flaw that it doesn't work properly when the + // initial situation is the implicit local backend with a state file + // present. The Terraform Cloud migration path has some additional + // steps to take care of more details automatically, and so + // -reconfigure doesn't really make sense in that context, particularly + // with its design bug with the handling of the implicit local backend. + ui := cli.NewMockUi() + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + Ui: ui, + View: view, + }, + } + args := []string{"-reconfigure"} + if code := c.Run(args); code == 0 { + t.Fatalf("unexpected success\n%s", ui.OutputWriter.String()) + } + + gotStderr := ui.ErrorWriter.String() + wantStderr := ` +Error: Invalid command-line option + +The -reconfigure option is for in-place reconfiguration of state backends +only, and is not needed when changing cloud backend settings. + +When using cloud backend, initialization automatically activates any new +Cloud configuration settings. + +` + if diff := cmp.Diff(wantStderr, gotStderr); diff != "" { + t.Errorf("wrong error output\n%s", diff) + } + }) + t.Run("-reconfigure when migrating in", func(t *testing.T) { + defer setupTempDir(t)() + + // We have a slightly different error message for the case where we + // seem to be trying to migrate to Terraform Cloud with existing + // state or explicit backend already present. + + if err := os.WriteFile("terraform.tfstate", fakeStateBytes, 0644); err != nil { + t.Fatal(err) + } + + ui := cli.NewMockUi() + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + Ui: ui, + View: view, + }, + } + args := []string{"-reconfigure"} + if code := c.Run(args); code == 0 { + t.Fatalf("unexpected success\n%s", ui.OutputWriter.String()) + } + + gotStderr := ui.ErrorWriter.String() + wantStderr := ` +Error: Invalid command-line option + +The -reconfigure option is unsupported when migrating to cloud backend, +because activating cloud backend involves some additional steps. + +` + if diff := cmp.Diff(wantStderr, gotStderr); diff != "" { + t.Errorf("wrong error output\n%s", diff) + } + }) + t.Run("-migrate-state", func(t *testing.T) { + defer setupTempDir(t)() + + // In Cloud mode, migrating in or out always proposes migrating state + // and changing configuration while staying in cloud mode never migrates + // state, so this special option isn't relevant. + ui := cli.NewMockUi() + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + Ui: ui, + View: view, + }, + } + args := []string{"-migrate-state"} + if code := c.Run(args); code == 0 { + t.Fatalf("unexpected success\n%s", ui.OutputWriter.String()) + } + + gotStderr := ui.ErrorWriter.String() + wantStderr := ` +Error: Invalid command-line option + +The -migrate-state option is for migration between state backends only, and +is not applicable when using cloud backend. + +State storage is handled automatically by cloud backend and so the state +storage location is not configurable. + +` + if diff := cmp.Diff(wantStderr, gotStderr); diff != "" { + t.Errorf("wrong error output\n%s", diff) + } + }) + t.Run("-migrate-state when migrating in", func(t *testing.T) { + defer setupTempDir(t)() + + // We have a slightly different error message for the case where we + // seem to be trying to migrate to Terraform Cloud with existing + // state or explicit backend already present. + + if err := os.WriteFile("terraform.tfstate", fakeStateBytes, 0644); err != nil { + t.Fatal(err) + } + + ui := cli.NewMockUi() + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + Ui: ui, + View: view, + }, + } + args := []string{"-migrate-state"} + if code := c.Run(args); code == 0 { + t.Fatalf("unexpected success\n%s", ui.OutputWriter.String()) + } + + gotStderr := ui.ErrorWriter.String() + wantStderr := ` +Error: Invalid command-line option + +The -migrate-state option is for migration between state backends only, and +is not applicable when using cloud backend. + +Cloud backend migration has additional steps, configured by interactive +prompts. + +` + if diff := cmp.Diff(wantStderr, gotStderr); diff != "" { + t.Errorf("wrong error output\n%s", diff) + } + }) + t.Run("-force-copy", func(t *testing.T) { + defer setupTempDir(t)() + + // In Cloud mode, migrating in or out always proposes migrating state + // and changing configuration while staying in cloud mode never migrates + // state, so this special option isn't relevant. + ui := cli.NewMockUi() + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + Ui: ui, + View: view, + }, + } + args := []string{"-force-copy"} + if code := c.Run(args); code == 0 { + t.Fatalf("unexpected success\n%s", ui.OutputWriter.String()) + } + + gotStderr := ui.ErrorWriter.String() + wantStderr := ` +Error: Invalid command-line option + +The -force-copy option is for migration between state backends only, and is +not applicable when using cloud backend. + +State storage is handled automatically by cloud backend and so the state +storage location is not configurable. + +` + if diff := cmp.Diff(wantStderr, gotStderr); diff != "" { + t.Errorf("wrong error output\n%s", diff) + } + }) + t.Run("-force-copy when migrating in", func(t *testing.T) { + defer setupTempDir(t)() + + // We have a slightly different error message for the case where we + // seem to be trying to migrate to Terraform Cloud with existing + // state or explicit backend already present. + + if err := os.WriteFile("terraform.tfstate", fakeStateBytes, 0644); err != nil { + t.Fatal(err) + } + + ui := cli.NewMockUi() + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + Ui: ui, + View: view, + }, + } + args := []string{"-force-copy"} + if code := c.Run(args); code == 0 { + t.Fatalf("unexpected success\n%s", ui.OutputWriter.String()) + } + + gotStderr := ui.ErrorWriter.String() + wantStderr := ` +Error: Invalid command-line option + +The -force-copy option is for migration between state backends only, and is +not applicable when using cloud backend. + +Cloud backend migration has additional steps, configured by interactive +prompts. + +` + if diff := cmp.Diff(wantStderr, gotStderr); diff != "" { + t.Errorf("wrong error output\n%s", diff) + } + }) + +} + +// make sure inputFalse stops execution on migrate +func TestInit_inputFalse(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("init-backend"), td) + defer testChdir(t, td)() + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + args := []string{"-input=false", "-backend-config=path=foo"} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter) + } + + // write different states for foo and bar + fooState := states.BuildState(func(s *states.SyncState) { + s.SetOutputValue( + addrs.OutputValue{Name: "foo"}.Absolute(addrs.RootModuleInstance), + cty.StringVal("foo"), + false, // not sensitive + ) + }) + if err := statemgr.WriteAndPersist(statemgr.NewFilesystem("foo", encryption.StateEncryptionDisabled()), fooState, nil); err != nil { + t.Fatal(err) + } + barState := states.BuildState(func(s *states.SyncState) { + s.SetOutputValue( + addrs.OutputValue{Name: "bar"}.Absolute(addrs.RootModuleInstance), + cty.StringVal("bar"), + false, // not sensitive + ) + }) + if err := statemgr.WriteAndPersist(statemgr.NewFilesystem("bar", encryption.StateEncryptionDisabled()), barState, nil); err != nil { + t.Fatal(err) + } + + ui = new(cli.MockUi) + c = &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + args = []string{"-input=false", "-backend-config=path=bar", "-migrate-state"} + if code := c.Run(args); code == 0 { + t.Fatal("init should have failed", ui.OutputWriter) + } + + errMsg := ui.ErrorWriter.String() + if !strings.Contains(errMsg, "interactive input is disabled") { + t.Fatal("expected input disabled error, got", errMsg) + } + + ui = new(cli.MockUi) + c = &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + // A missing input=false should abort rather than loop infinitely + args = []string{"-backend-config=path=baz"} + if code := c.Run(args); code == 0 { + t.Fatal("init should have failed", ui.OutputWriter) + } +} + +func TestInit_getProvider(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-get-providers"), td) + defer testChdir(t, td)() + + overrides := metaOverridesForProvider(testProvider()) + ui := new(cli.MockUi) + view, _ := testView(t) + providerSource, close := newMockProviderSource(t, map[string][]string{ + // looking for an exact version + "exact": {"1.2.3"}, + // config requires >= 2.3.3 + "greater-than": {"2.3.4", "2.3.3", "2.3.0"}, + // config specifies + "between": {"3.4.5", "2.3.4", "1.2.3"}, + }) + defer close() + m := Meta{ + testingOverrides: overrides, + Ui: ui, + View: view, + ProviderSource: providerSource, + } + + c := &InitCommand{ + Meta: m, + } + + args := []string{ + "-backend=false", // should be possible to install plugins without backend init + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + // check that we got the providers for our config + exactPath := fmt.Sprintf(".terraform/providers/registry.opentofu.org/hashicorp/exact/1.2.3/%s", getproviders.CurrentPlatform) + if _, err := os.Stat(exactPath); os.IsNotExist(err) { + t.Fatal("provider 'exact' not downloaded") + } + greaterThanPath := fmt.Sprintf(".terraform/providers/registry.opentofu.org/hashicorp/greater-than/2.3.4/%s", getproviders.CurrentPlatform) + if _, err := os.Stat(greaterThanPath); os.IsNotExist(err) { + t.Fatal("provider 'greater-than' not downloaded") + } + betweenPath := fmt.Sprintf(".terraform/providers/registry.opentofu.org/hashicorp/between/2.3.4/%s", getproviders.CurrentPlatform) + if _, err := os.Stat(betweenPath); os.IsNotExist(err) { + t.Fatal("provider 'between' not downloaded") + } + + t.Run("future-state", func(t *testing.T) { + // getting providers should fail if a state from a newer version of + // tofu exists, since InitCommand.getProviders needs to inspect that + // state. + + f, err := os.Create(DefaultStateFilename) + if err != nil { + t.Fatalf("err: %s", err) + } + defer f.Close() + + // Construct a mock state file from the far future + type FutureState struct { + Version uint `json:"version"` + Lineage string `json:"lineage"` + TerraformVersion string `json:"terraform_version"` + Outputs map[string]interface{} `json:"outputs"` + Resources []map[string]interface{} `json:"resources"` + } + fs := &FutureState{ + Version: 999, + Lineage: "123-456-789", + TerraformVersion: "999.0.0", + Outputs: make(map[string]interface{}), + Resources: make([]map[string]interface{}, 0), + } + src, err := json.MarshalIndent(fs, "", " ") + if err != nil { + t.Fatalf("failed to marshal future state: %s", err) + } + src = append(src, '\n') + _, err = f.Write(src) + if err != nil { + t.Fatal(err) + } + + ui := new(cli.MockUi) + view, _ := testView(t) + m.Ui = ui + m.View = view + c := &InitCommand{ + Meta: m, + } + + if code := c.Run(nil); code == 0 { + t.Fatal("expected error, got:", ui.OutputWriter) + } + + errMsg := ui.ErrorWriter.String() + if !strings.Contains(errMsg, "Unsupported state file format") { + t.Fatal("unexpected error:", errMsg) + } + }) +} + +func TestInit_getProviderSource(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-get-provider-source"), td) + defer testChdir(t, td)() + + overrides := metaOverridesForProvider(testProvider()) + ui := new(cli.MockUi) + view, _ := testView(t) + providerSource, close := newMockProviderSource(t, map[string][]string{ + // looking for an exact version + "acme/alpha": {"1.2.3"}, + // config doesn't specify versions for other providers + "registry.example.com/acme/beta": {"1.0.0"}, + "gamma": {"2.0.0"}, + }) + defer close() + m := Meta{ + testingOverrides: overrides, + Ui: ui, + View: view, + ProviderSource: providerSource, + } + + c := &InitCommand{ + Meta: m, + } + + args := []string{ + "-backend=false", // should be possible to install plugins without backend init + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + // check that we got the providers for our config + exactPath := fmt.Sprintf(".terraform/providers/registry.opentofu.org/acme/alpha/1.2.3/%s", getproviders.CurrentPlatform) + if _, err := os.Stat(exactPath); os.IsNotExist(err) { + t.Error("provider 'alpha' not downloaded") + } + greaterThanPath := fmt.Sprintf(".terraform/providers/registry.example.com/acme/beta/1.0.0/%s", getproviders.CurrentPlatform) + if _, err := os.Stat(greaterThanPath); os.IsNotExist(err) { + t.Error("provider 'beta' not downloaded") + } + betweenPath := fmt.Sprintf(".terraform/providers/registry.opentofu.org/hashicorp/gamma/2.0.0/%s", getproviders.CurrentPlatform) + if _, err := os.Stat(betweenPath); os.IsNotExist(err) { + t.Error("provider 'gamma' not downloaded") + } +} + +func TestInit_getProviderLegacyFromState(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-get-provider-legacy-from-state"), td) + defer testChdir(t, td)() + + overrides := metaOverridesForProvider(testProvider()) + ui := new(cli.MockUi) + view, _ := testView(t) + providerSource, close := newMockProviderSource(t, map[string][]string{ + "acme/alpha": {"1.2.3"}, + }) + defer close() + m := Meta{ + testingOverrides: overrides, + Ui: ui, + View: view, + ProviderSource: providerSource, + } + + c := &InitCommand{ + Meta: m, + } + + if code := c.Run(nil); code != 1 { + t.Fatalf("got exit status %d; want 1\nstderr:\n%s\n\nstdout:\n%s", code, ui.ErrorWriter.String(), ui.OutputWriter.String()) + } + + // Expect this diagnostic output + wants := []string{ + "Invalid legacy provider address", + "You must complete the Terraform 0.13 upgrade process", + } + got := ui.ErrorWriter.String() + for _, want := range wants { + if !strings.Contains(got, want) { + t.Fatalf("expected output to contain %q, got:\n\n%s", want, got) + } + } +} + +func TestInit_getProviderInvalidPackage(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-get-provider-invalid-package"), td) + defer testChdir(t, td)() + + overrides := metaOverridesForProvider(testProvider()) + ui := new(cli.MockUi) + view, _ := testView(t) + + // create a provider source which allows installing an invalid package + addr := addrs.MustParseProviderSourceString("invalid/package") + version := getproviders.MustParseVersion("1.0.0") + meta, close, err := getproviders.FakeInstallablePackageMeta( + addr, + version, + getproviders.VersionList{getproviders.MustParseVersion("5.0")}, + getproviders.CurrentPlatform, + "terraform-package", // should be "terraform-provider-package" + ) + defer close() + if err != nil { + t.Fatalf("failed to prepare fake package for %s %s: %s", addr.ForDisplay(), version, err) + } + providerSource := getproviders.NewMockSource([]getproviders.PackageMeta{meta}, nil) + + m := Meta{ + testingOverrides: overrides, + Ui: ui, + View: view, + ProviderSource: providerSource, + } + + c := &InitCommand{ + Meta: m, + } + + args := []string{ + "-backend=false", // should be possible to install plugins without backend init + } + if code := c.Run(args); code != 1 { + t.Fatalf("got exit status %d; want 1\nstderr:\n%s\n\nstdout:\n%s", code, ui.ErrorWriter.String(), ui.OutputWriter.String()) + } + + // invalid provider should be installed + packagePath := fmt.Sprintf(".terraform/providers/registry.opentofu.org/invalid/package/1.0.0/%s/terraform-package", getproviders.CurrentPlatform) + if _, err := os.Stat(packagePath); os.IsNotExist(err) { + t.Fatal("provider 'invalid/package' not downloaded") + } + + wantErrors := []string{ + "Failed to install provider", + "could not find executable file starting with terraform-provider-package", + } + got := ui.ErrorWriter.String() + for _, wantError := range wantErrors { + if !strings.Contains(got, wantError) { + t.Fatalf("missing error:\nwant: %q\ngot:\n%s", wantError, got) + } + } +} + +func TestInit_getProviderDetectedLegacy(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-get-provider-detected-legacy"), td) + defer testChdir(t, td)() + + // We need to construct a multisource with a mock source and a registry + // source: the mock source will return ErrRegistryProviderNotKnown for an + // unknown provider, and the registry source will allow us to look up the + // appropriate namespace if possible. + providerSource, psClose := newMockProviderSource(t, map[string][]string{ + "hashicorp/foo": {"1.2.3"}, + "terraform-providers/baz": {"2.3.4"}, // this will not be installed + }) + defer psClose() + registrySource, rsClose := testRegistrySource(t) + defer rsClose() + multiSource := getproviders.MultiSource{ + {Source: providerSource}, + {Source: registrySource}, + } + + ui := new(cli.MockUi) + view, _ := testView(t) + m := Meta{ + Ui: ui, + View: view, + ProviderSource: multiSource, + } + + c := &InitCommand{ + Meta: m, + } + + args := []string{ + "-backend=false", // should be possible to install plugins without backend init + } + if code := c.Run(args); code == 0 { + t.Fatalf("expected error, got output: \n%s", ui.OutputWriter.String()) + } + + // foo should be installed + fooPath := fmt.Sprintf(".terraform/providers/registry.opentofu.org/hashicorp/foo/1.2.3/%s", getproviders.CurrentPlatform) + if _, err := os.Stat(fooPath); os.IsNotExist(err) { + t.Error("provider 'foo' not installed") + } + // baz should not be installed + bazPath := fmt.Sprintf(".terraform/providers/registry.opentofu.org/terraform-providers/baz/2.3.4/%s", getproviders.CurrentPlatform) + if _, err := os.Stat(bazPath); !os.IsNotExist(err) { + t.Error("provider 'baz' installed, but should not be") + } + + // error output is the main focus of this test + errOutput := ui.ErrorWriter.String() + errors := []string{ + "Failed to query available provider packages", + "Could not retrieve the list of available versions", + "registry.opentofu.org/hashicorp/baz", + "registry.opentofu.org/hashicorp/frob", + } + for _, want := range errors { + if !strings.Contains(errOutput, want) { + t.Fatalf("expected error %q: %s", want, errOutput) + } + } +} + +func TestInit_getProviderDetectedDuplicate(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-get-provider-detected-duplicate"), td) + defer testChdir(t, td)() + + // We need to construct a multisource with a mock source and a registry + // source: the mock source will return ErrRegistryProviderNotKnown for an + // unknown provider, and the registry source will allow us to look up the + // appropriate namespace if possible. + providerSource, psClose := newMockProviderSource(t, map[string][]string{ + "hashicorp/foo": {"1.2.3"}, + "opentofu/foo": {"1.2.3"}, + "hashicorp/bar": {"1.2.3"}, + }) + defer psClose() + registrySource, rsClose := testRegistrySource(t) + defer rsClose() + multiSource := getproviders.MultiSource{ + {Source: providerSource}, + {Source: registrySource}, + } + + ui := new(cli.MockUi) + view, _ := testView(t) + m := Meta{ + Ui: ui, + View: view, + ProviderSource: multiSource, + } + + c := &InitCommand{ + Meta: m, + } + + args := []string{ + "-backend=false", // should be possible to install plugins without backend init + } + if code := c.Run(args); code != 0 { + t.Fatalf("expected error, got output: \n%s\n%s", ui.OutputWriter.String(), ui.ErrorWriter.String()) + } + + // error output is the main focus of this test + errOutput := ui.ErrorWriter.String() + errors := []string{ + "Warning: Potential provider misconfiguration", + "OpenTofu has detected multiple providers of type foo", + "If this is intentional you can ignore this warning", + } + unexpected := []string{ + "OpenTofu has detected multiple providers of type bar", + } + for _, want := range errors { + if !strings.Contains(errOutput, want) { + t.Fatalf("expected error %q: %s", want, errOutput) + } + } + for _, unwanted := range unexpected { + if strings.Contains(errOutput, unwanted) { + t.Fatalf("unexpected error %q: %s", unwanted, errOutput) + } + } + +} + +func TestInit_providerSource(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-required-providers"), td) + defer testChdir(t, td)() + + providerSource, close := newMockProviderSource(t, map[string][]string{ + "test": {"1.2.3", "1.2.4"}, + "test-beta": {"1.2.4"}, + "source": {"1.2.2", "1.2.3", "1.2.1"}, + }) + defer close() + + ui := cli.NewMockUi() + view, _ := testView(t) + m := Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + ProviderSource: providerSource, + } + + c := &InitCommand{ + Meta: m, + } + + args := []string{} + + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + if strings.Contains(ui.OutputWriter.String(), "OpenTofu has initialized, but configuration upgrades may be needed") { + t.Fatalf("unexpected \"configuration upgrade\" warning in output") + } + + cacheDir := m.providerLocalCacheDir() + gotPackages := cacheDir.AllAvailablePackages() + wantPackages := map[addrs.Provider][]providercache.CachedProvider{ + addrs.NewDefaultProvider("test"): { + { + Provider: addrs.NewDefaultProvider("test"), + Version: getproviders.MustParseVersion("1.2.3"), + PackageDir: expectedPackageInstallPath("test", "1.2.3", false), + }, + }, + addrs.NewDefaultProvider("test-beta"): { + { + Provider: addrs.NewDefaultProvider("test-beta"), + Version: getproviders.MustParseVersion("1.2.4"), + PackageDir: expectedPackageInstallPath("test-beta", "1.2.4", false), + }, + }, + addrs.NewDefaultProvider("source"): { + { + Provider: addrs.NewDefaultProvider("source"), + Version: getproviders.MustParseVersion("1.2.3"), + PackageDir: expectedPackageInstallPath("source", "1.2.3", false), + }, + }, + } + if diff := cmp.Diff(wantPackages, gotPackages); diff != "" { + t.Errorf("wrong cache directory contents after upgrade\n%s", diff) + } + + locks, err := m.lockedDependencies() + if err != nil { + t.Fatalf("failed to get locked dependencies: %s", err) + } + gotProviderLocks := locks.AllProviders() + wantProviderLocks := map[addrs.Provider]*depsfile.ProviderLock{ + addrs.NewDefaultProvider("test-beta"): depsfile.NewProviderLock( + addrs.NewDefaultProvider("test-beta"), + getproviders.MustParseVersion("1.2.4"), + getproviders.MustParseVersionConstraints("= 1.2.4"), + []getproviders.Hash{ + getproviders.HashScheme1.New("vEthLkqAecdQimaW6JHZ0SBRNtHibLnOb31tX9ZXlcI="), + }, + ), + addrs.NewDefaultProvider("test"): depsfile.NewProviderLock( + addrs.NewDefaultProvider("test"), + getproviders.MustParseVersion("1.2.3"), + getproviders.MustParseVersionConstraints("= 1.2.3"), + []getproviders.Hash{ + getproviders.HashScheme1.New("8CjxaUBuegKZSFnRos39Fs+CS78ax0Dyb7aIA5XBiNI="), + }, + ), + addrs.NewDefaultProvider("source"): depsfile.NewProviderLock( + addrs.NewDefaultProvider("source"), + getproviders.MustParseVersion("1.2.3"), + getproviders.MustParseVersionConstraints("= 1.2.3"), + []getproviders.Hash{ + getproviders.HashScheme1.New("ACYytVQ2Q6JfoEs7xxCqa1yGFf9HwF3SEHzJKBoJfo0="), + }, + ), + } + + if diff := cmp.Diff(gotProviderLocks, wantProviderLocks, depsfile.ProviderLockComparer); diff != "" { + t.Errorf("wrong version selections after upgrade\n%s", diff) + } + + if got, want := ui.OutputWriter.String(), "Installed hashicorp/test v1.2.3 (verified checksum)"; !strings.Contains(got, want) { + t.Fatalf("unexpected output: %s\nexpected to include %q", got, want) + } + if got, want := ui.ErrorWriter.String(), "\n - hashicorp/source\n - hashicorp/test\n - hashicorp/test-beta"; !strings.Contains(got, want) { + t.Fatalf("wrong error message\nshould contain: %s\ngot:\n%s", want, got) + } +} + +func TestInit_cancelModules(t *testing.T) { + // This test runs `tofu init` as if SIGINT (or similar on other + // platforms) were sent to it, testing that it is interruptible. + + td := t.TempDir() + testCopyDir(t, testFixturePath("init-registry-module"), td) + defer testChdir(t, td)() + + // Our shutdown channel is pre-closed so init will exit as soon as it + // starts a cancelable portion of the process. + shutdownCh := make(chan struct{}) + close(shutdownCh) + + ui := cli.NewMockUi() + view, _ := testView(t) + m := Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + ShutdownCh: shutdownCh, + } + + c := &InitCommand{ + Meta: m, + } + + args := []string{} + + if code := c.Run(args); code == 0 { + t.Fatalf("succeeded; wanted error\n%s", ui.OutputWriter.String()) + } + + if got, want := ui.ErrorWriter.String(), `Module installation was canceled by an interrupt signal`; !strings.Contains(got, want) { + t.Fatalf("wrong error message\nshould contain: %s\ngot:\n%s", want, got) + } +} + +func TestInit_cancelProviders(t *testing.T) { + // This test runs `tofu init` as if SIGINT (or similar on other + // platforms) were sent to it, testing that it is interruptible. + + td := t.TempDir() + testCopyDir(t, testFixturePath("init-required-providers"), td) + defer testChdir(t, td)() + + // Use a provider source implementation which is designed to hang indefinitely, + // to avoid a race between the closed shutdown channel and the provider source + // operations. + providerSource := &getproviders.HangingSource{} + + // Our shutdown channel is pre-closed so init will exit as soon as it + // starts a cancelable portion of the process. + shutdownCh := make(chan struct{}) + close(shutdownCh) + + ui := cli.NewMockUi() + view, _ := testView(t) + m := Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + ProviderSource: providerSource, + ShutdownCh: shutdownCh, + } + + c := &InitCommand{ + Meta: m, + } + + args := []string{} + + if code := c.Run(args); code == 0 { + t.Fatalf("succeeded; wanted error\n%s", ui.OutputWriter.String()) + } + // Currently the first operation that is cancelable is provider + // installation, so our error message comes from there. If we + // make the earlier steps cancelable in future then it'd be + // expected for this particular message to change. + if got, want := ui.ErrorWriter.String(), `Provider installation was canceled by an interrupt signal`; !strings.Contains(got, want) { + t.Fatalf("wrong error message\nshould contain: %s\ngot:\n%s", want, got) + } +} + +func TestInit_getUpgradePlugins(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-get-providers"), td) + defer testChdir(t, td)() + + providerSource, close := newMockProviderSource(t, map[string][]string{ + // looking for an exact version + "exact": {"1.2.3"}, + // config requires >= 2.3.3 + "greater-than": {"2.3.4", "2.3.3", "2.3.0"}, + // config specifies > 1.0.0 , < 3.0.0 + "between": {"3.4.5", "2.3.4", "1.2.3"}, + }) + defer close() + + ui := new(cli.MockUi) + view, _ := testView(t) + m := Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + ProviderSource: providerSource, + } + + installFakeProviderPackages(t, &m, map[string][]string{ + "exact": {"0.0.1"}, + "greater-than": {"2.3.3"}, + }) + + c := &InitCommand{ + Meta: m, + } + + args := []string{ + "-upgrade=true", + } + if code := c.Run(args); code != 0 { + t.Fatalf("command did not complete successfully:\n%s", ui.ErrorWriter.String()) + } + + cacheDir := m.providerLocalCacheDir() + gotPackages := cacheDir.AllAvailablePackages() + wantPackages := map[addrs.Provider][]providercache.CachedProvider{ + // "between" wasn't previously installed at all, so we installed + // the newest available version that matched the version constraints. + addrs.NewDefaultProvider("between"): { + { + Provider: addrs.NewDefaultProvider("between"), + Version: getproviders.MustParseVersion("2.3.4"), + PackageDir: expectedPackageInstallPath("between", "2.3.4", false), + }, + }, + // The existing version of "exact" did not match the version constraints, + // so we installed what the configuration selected as well. + addrs.NewDefaultProvider("exact"): { + { + Provider: addrs.NewDefaultProvider("exact"), + Version: getproviders.MustParseVersion("1.2.3"), + PackageDir: expectedPackageInstallPath("exact", "1.2.3", false), + }, + // Previous version is still there, but not selected + { + Provider: addrs.NewDefaultProvider("exact"), + Version: getproviders.MustParseVersion("0.0.1"), + PackageDir: expectedPackageInstallPath("exact", "0.0.1", false), + }, + }, + // The existing version of "greater-than" _did_ match the constraints, + // but a newer version was available and the user specified + // -upgrade and so we upgraded it anyway. + addrs.NewDefaultProvider("greater-than"): { + { + Provider: addrs.NewDefaultProvider("greater-than"), + Version: getproviders.MustParseVersion("2.3.4"), + PackageDir: expectedPackageInstallPath("greater-than", "2.3.4", false), + }, + // Previous version is still there, but not selected + { + Provider: addrs.NewDefaultProvider("greater-than"), + Version: getproviders.MustParseVersion("2.3.3"), + PackageDir: expectedPackageInstallPath("greater-than", "2.3.3", false), + }, + }, + } + if diff := cmp.Diff(wantPackages, gotPackages); diff != "" { + t.Errorf("wrong cache directory contents after upgrade\n%s", diff) + } + + locks, err := m.lockedDependencies() + if err != nil { + t.Fatalf("failed to get locked dependencies: %s", err) + } + gotProviderLocks := locks.AllProviders() + wantProviderLocks := map[addrs.Provider]*depsfile.ProviderLock{ + addrs.NewDefaultProvider("between"): depsfile.NewProviderLock( + addrs.NewDefaultProvider("between"), + getproviders.MustParseVersion("2.3.4"), + getproviders.MustParseVersionConstraints("> 1.0.0, < 3.0.0"), + []getproviders.Hash{ + getproviders.HashScheme1.New("ntfa04OlRqIfGL/Gkd+nGMJSHGWyAgMQplFWk7WEsOk="), + }, + ), + addrs.NewDefaultProvider("exact"): depsfile.NewProviderLock( + addrs.NewDefaultProvider("exact"), + getproviders.MustParseVersion("1.2.3"), + getproviders.MustParseVersionConstraints("= 1.2.3"), + []getproviders.Hash{ + getproviders.HashScheme1.New("Xgk+LFrzi9Mop6+d01TCTaD3kgSrUASCAUU1aDsEsJU="), + }, + ), + addrs.NewDefaultProvider("greater-than"): depsfile.NewProviderLock( + addrs.NewDefaultProvider("greater-than"), + getproviders.MustParseVersion("2.3.4"), + getproviders.MustParseVersionConstraints(">= 2.3.3"), + []getproviders.Hash{ + getproviders.HashScheme1.New("8M5DXICmUiVjbkxNNO0zXNsV6duCVNWzq3/Kf0mNIo4="), + }, + ), + } + if diff := cmp.Diff(gotProviderLocks, wantProviderLocks, depsfile.ProviderLockComparer); diff != "" { + t.Errorf("wrong version selections after upgrade\n%s", diff) + } +} + +func TestInit_getProviderMissing(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-get-providers"), td) + defer testChdir(t, td)() + + providerSource, close := newMockProviderSource(t, map[string][]string{ + // looking for exact version 1.2.3 + "exact": {"1.2.4"}, + // config requires >= 2.3.3 + "greater-than": {"2.3.4", "2.3.3", "2.3.0"}, + // config specifies + "between": {"3.4.5", "2.3.4", "1.2.3"}, + }) + defer close() + + ui := new(cli.MockUi) + view, _ := testView(t) + m := Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + ProviderSource: providerSource, + } + + c := &InitCommand{ + Meta: m, + } + + args := []string{} + if code := c.Run(args); code == 0 { + t.Fatalf("expected error, got output: \n%s", ui.OutputWriter.String()) + } + + if !strings.Contains(ui.ErrorWriter.String(), "no available releases match") { + t.Fatalf("unexpected error output: %s", ui.ErrorWriter) + } +} + +func TestInit_checkRequiredVersion(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-check-required-version"), td) + defer testChdir(t, td)() + + ui := cli.NewMockUi() + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + args := []string{} + if code := c.Run(args); code != 1 { + t.Fatalf("got exit status %d; want 1\nstderr:\n%s\n\nstdout:\n%s", code, ui.ErrorWriter.String(), ui.OutputWriter.String()) + } + errStr := ui.ErrorWriter.String() + if !strings.Contains(errStr, `required_version = "~> 0.9.0"`) { + t.Fatalf("output should point to unmet version constraint, but is:\n\n%s", errStr) + } + if strings.Contains(errStr, `required_version = ">= 0.13.0"`) { + t.Fatalf("output should not point to met version constraint, but is:\n\n%s", errStr) + } +} + +// Verify that init will error out with an invalid version constraint, even if +// there are other invalid configuration constructs. +func TestInit_checkRequiredVersionFirst(t *testing.T) { + t.Run("root_module", func(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("init-check-required-version-first"), td) + defer testChdir(t, td)() + + ui := cli.NewMockUi() + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + args := []string{} + if code := c.Run(args); code != 1 { + t.Fatalf("got exit status %d; want 1\nstderr:\n%s\n\nstdout:\n%s", code, ui.ErrorWriter.String(), ui.OutputWriter.String()) + } + errStr := ui.ErrorWriter.String() + if !strings.Contains(errStr, `Unsupported OpenTofu Core version`) { + t.Fatalf("output should point to unmet version constraint, but is:\n\n%s", errStr) + } + }) + t.Run("sub_module", func(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("init-check-required-version-first-module"), td) + defer testChdir(t, td)() + + ui := cli.NewMockUi() + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + args := []string{} + if code := c.Run(args); code != 1 { + t.Fatalf("got exit status %d; want 1\nstderr:\n%s\n\nstdout:\n%s", code, ui.ErrorWriter.String(), ui.OutputWriter.String()) + } + errStr := ui.ErrorWriter.String() + if !strings.Contains(errStr, `Unsupported OpenTofu Core version`) { + t.Fatalf("output should point to unmet version constraint, but is:\n\n%s", errStr) + } + }) +} + +func TestInit_providerLockFile(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-provider-lock-file"), td) + // The temporary directory does not have write permission (dr-xr-xr-x) after the copy + defer os.Chmod(td, os.ModePerm) + defer testChdir(t, td)() + + providerSource, close := newMockProviderSource(t, map[string][]string{ + "test": {"1.2.3"}, + }) + defer close() + + ui := new(cli.MockUi) + view, _ := testView(t) + m := Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + ProviderSource: providerSource, + } + + c := &InitCommand{ + Meta: m, + } + + args := []string{} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + lockFile := ".terraform.lock.hcl" + buf, err := os.ReadFile(lockFile) + if err != nil { + t.Fatalf("failed to read dependency lock file %s: %s", lockFile, err) + } + buf = bytes.TrimSpace(buf) + // The hash in here is for the fake package that newMockProviderSource produces + // (so it'll change if newMockProviderSource starts producing different contents) + wantLockFile := strings.TrimSpace(` +# This file is maintained automatically by "tofu init". +# Manual edits may be lost in future updates. + +provider "registry.opentofu.org/hashicorp/test" { + version = "1.2.3" + constraints = "1.2.3" + hashes = [ + "h1:8CjxaUBuegKZSFnRos39Fs+CS78ax0Dyb7aIA5XBiNI=", + ] +} +`) + if diff := cmp.Diff(wantLockFile, string(buf)); diff != "" { + t.Errorf("wrong dependency lock file contents\n%s", diff) + } + + // Make the local directory read-only, and verify that rerunning init + // succeeds, to ensure that we don't try to rewrite an unchanged lock file + os.Chmod(".", 0555) + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } +} + +func TestInit_providerLockFileReadonly(t *testing.T) { + // The hash in here is for the fake package that newMockProviderSource produces + // (so it'll change if newMockProviderSource starts producing different contents) + inputLockFile := strings.TrimSpace(` +# This file is maintained automatically by "tofu init". +# Manual edits may be lost in future updates. + +provider "registry.opentofu.org/hashicorp/test" { + version = "1.2.3" + constraints = "1.2.3" + hashes = [ + "zh:6f85a1f747dd09455cd77683c0e06da647d8240461b8b36b304b9056814d91f2", + ] +} +`) + + badLockFile := strings.TrimSpace(` +# This file is maintained automatically by "tofu init". +# Manual edits may be lost in future updates. + +provider "registry.opentofu.org/hashicorp/test" { + version = "1.2.3" + constraints = "1.2.3" + hashes = [ + "zh:0000000000000000000000000000000000000000000000000000000000000000", + ] +} +`) + + updatedLockFile := strings.TrimSpace(` +# This file is maintained automatically by "tofu init". +# Manual edits may be lost in future updates. + +provider "registry.opentofu.org/hashicorp/test" { + version = "1.2.3" + constraints = "1.2.3" + hashes = [ + "h1:8CjxaUBuegKZSFnRos39Fs+CS78ax0Dyb7aIA5XBiNI=", + "zh:6f85a1f747dd09455cd77683c0e06da647d8240461b8b36b304b9056814d91f2", + ] +} +`) + + emptyUpdatedLockFile := strings.TrimSpace(` +# This file is maintained automatically by "tofu init". +# Manual edits may be lost in future updates. +`) + + cases := []struct { + desc string + fixture string + providers map[string][]string + input string + args []string + ok bool + want string + }{ + { + desc: "default", + fixture: "init-provider-lock-file", + providers: map[string][]string{"test": {"1.2.3"}}, + input: inputLockFile, + args: []string{}, + ok: true, + want: updatedLockFile, + }, + { + desc: "unused provider", + fixture: "init-provider-now-unused", + providers: map[string][]string{"test": {"1.2.3"}}, + input: inputLockFile, + args: []string{}, + ok: true, + want: emptyUpdatedLockFile, + }, + { + desc: "readonly", + fixture: "init-provider-lock-file", + providers: map[string][]string{"test": {"1.2.3"}}, + input: inputLockFile, + args: []string{"-lockfile=readonly"}, + ok: true, + want: inputLockFile, + }, + { + desc: "unused provider readonly", + fixture: "init-provider-now-unused", + providers: map[string][]string{"test": {"1.2.3"}}, + input: inputLockFile, + args: []string{"-lockfile=readonly"}, + ok: false, + want: inputLockFile, + }, + { + desc: "conflict", + fixture: "init-provider-lock-file", + providers: map[string][]string{"test": {"1.2.3"}}, + input: inputLockFile, + args: []string{"-lockfile=readonly", "-upgrade"}, + ok: false, + want: inputLockFile, + }, + { + desc: "checksum mismatch", + fixture: "init-provider-lock-file", + providers: map[string][]string{"test": {"1.2.3"}}, + input: badLockFile, + args: []string{"-lockfile=readonly"}, + ok: false, + want: badLockFile, + }, + { + desc: "reject to change required provider dependences", + fixture: "init-provider-lock-file-readonly-add", + providers: map[string][]string{ + "test": {"1.2.3"}, + "foo": {"1.0.0"}, + }, + input: inputLockFile, + args: []string{"-lockfile=readonly"}, + ok: false, + want: inputLockFile, + }, + } + + for _, tc := range cases { + t.Run(tc.desc, func(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath(tc.fixture), td) + defer testChdir(t, td)() + + providerSource, close := newMockProviderSource(t, tc.providers) + defer close() + + ui := new(cli.MockUi) + m := Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + ProviderSource: providerSource, + } + + c := &InitCommand{ + Meta: m, + } + + //write input lockfile + lockFile := ".terraform.lock.hcl" + if err := os.WriteFile(lockFile, []byte(tc.input), 0644); err != nil { + t.Fatalf("failed to write input lockfile: %s", err) + } + + code := c.Run(tc.args) + if tc.ok && code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + if !tc.ok && code == 0 { + t.Fatalf("expected error, got output: \n%s", ui.OutputWriter.String()) + } + + buf, err := os.ReadFile(lockFile) + if err != nil { + t.Fatalf("failed to read dependency lock file %s: %s", lockFile, err) + } + buf = bytes.TrimSpace(buf) + if diff := cmp.Diff(tc.want, string(buf)); diff != "" { + t.Errorf("wrong dependency lock file contents\n%s", diff) + } + }) + } +} + +func TestInit_pluginDirReset(t *testing.T) { + td := testTempDir(t) + defer os.RemoveAll(td) + defer testChdir(t, td)() + + // An empty provider source + providerSource, close := newMockProviderSource(t, nil) + defer close() + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + ProviderSource: providerSource, + }, + } + + // make our vendor paths + pluginPath := []string{"a", "b", "c"} + for _, p := range pluginPath { + if err := os.MkdirAll(p, 0755); err != nil { + t.Fatal(err) + } + } + + // run once and save the -plugin-dir + args := []string{"-plugin-dir", "a"} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter) + } + + pluginDirs, err := c.loadPluginPath() + if err != nil { + t.Fatal(err) + } + + if len(pluginDirs) != 1 || pluginDirs[0] != "a" { + t.Fatalf(`expected plugin dir ["a"], got %q`, pluginDirs) + } + + ui = new(cli.MockUi) + c = &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + ProviderSource: providerSource, // still empty + }, + } + + // make sure we remove the plugin-dir record + args = []string{"-plugin-dir="} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter) + } + + pluginDirs, err = c.loadPluginPath() + if err != nil { + t.Fatal(err) + } + + if len(pluginDirs) != 0 { + t.Fatalf("expected no plugin dirs got %q", pluginDirs) + } +} + +// Test user-supplied -plugin-dir +func TestInit_pluginDirProviders(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("init-get-providers"), td) + defer testChdir(t, td)() + + // An empty provider source + providerSource, close := newMockProviderSource(t, nil) + defer close() + + ui := new(cli.MockUi) + view, _ := testView(t) + m := Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + ProviderSource: providerSource, + } + + c := &InitCommand{ + Meta: m, + } + + // make our vendor paths + pluginPath := []string{"a", "b", "c"} + for _, p := range pluginPath { + if err := os.MkdirAll(p, 0755); err != nil { + t.Fatal(err) + } + } + + // We'll put some providers in our plugin dirs. To do this, we'll pretend + // for a moment that they are provider cache directories just because that + // allows us to lean on our existing test helper functions to do this. + for i, def := range [][]string{ + {"exact", "1.2.3"}, + {"greater-than", "2.3.4"}, + {"between", "2.3.4"}, + } { + name, version := def[0], def[1] + dir := providercache.NewDir(pluginPath[i]) + installFakeProviderPackagesElsewhere(t, dir, map[string][]string{ + name: {version}, + }) + } + + args := []string{ + "-plugin-dir", "a", + "-plugin-dir", "b", + "-plugin-dir", "c", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter) + } + + locks, err := m.lockedDependencies() + if err != nil { + t.Fatalf("failed to get locked dependencies: %s", err) + } + gotProviderLocks := locks.AllProviders() + wantProviderLocks := map[addrs.Provider]*depsfile.ProviderLock{ + addrs.NewDefaultProvider("between"): depsfile.NewProviderLock( + addrs.NewDefaultProvider("between"), + getproviders.MustParseVersion("2.3.4"), + getproviders.MustParseVersionConstraints("> 1.0.0, < 3.0.0"), + []getproviders.Hash{ + getproviders.HashScheme1.New("ntfa04OlRqIfGL/Gkd+nGMJSHGWyAgMQplFWk7WEsOk="), + }, + ), + addrs.NewDefaultProvider("exact"): depsfile.NewProviderLock( + addrs.NewDefaultProvider("exact"), + getproviders.MustParseVersion("1.2.3"), + getproviders.MustParseVersionConstraints("= 1.2.3"), + []getproviders.Hash{ + getproviders.HashScheme1.New("Xgk+LFrzi9Mop6+d01TCTaD3kgSrUASCAUU1aDsEsJU="), + }, + ), + addrs.NewDefaultProvider("greater-than"): depsfile.NewProviderLock( + addrs.NewDefaultProvider("greater-than"), + getproviders.MustParseVersion("2.3.4"), + getproviders.MustParseVersionConstraints(">= 2.3.3"), + []getproviders.Hash{ + getproviders.HashScheme1.New("8M5DXICmUiVjbkxNNO0zXNsV6duCVNWzq3/Kf0mNIo4="), + }, + ), + } + if diff := cmp.Diff(gotProviderLocks, wantProviderLocks, depsfile.ProviderLockComparer); diff != "" { + t.Errorf("wrong version selections after upgrade\n%s", diff) + } + + // -plugin-dir overrides the normal provider source, so it should not have + // seen any calls at all. + if calls := providerSource.CallLog(); len(calls) > 0 { + t.Errorf("unexpected provider source calls (want none)\n%s", spew.Sdump(calls)) + } +} + +// Test user-supplied -plugin-dir doesn't allow auto-install +func TestInit_pluginDirProvidersDoesNotGet(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("init-get-providers"), td) + defer testChdir(t, td)() + + // Our provider source has a suitable package for "between" available, + // but we should ignore it because -plugin-dir is set and thus this + // source is temporarily overridden during install. + providerSource, close := newMockProviderSource(t, map[string][]string{ + "between": {"2.3.4"}, + }) + defer close() + + ui := cli.NewMockUi() + view, _ := testView(t) + m := Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + ProviderSource: providerSource, + } + + c := &InitCommand{ + Meta: m, + } + + // make our vendor paths + pluginPath := []string{"a", "b"} + for _, p := range pluginPath { + if err := os.MkdirAll(p, 0755); err != nil { + t.Fatal(err) + } + } + + // We'll put some providers in our plugin dirs. To do this, we'll pretend + // for a moment that they are provider cache directories just because that + // allows us to lean on our existing test helper functions to do this. + for i, def := range [][]string{ + {"exact", "1.2.3"}, + {"greater-than", "2.3.4"}, + } { + name, version := def[0], def[1] + dir := providercache.NewDir(pluginPath[i]) + installFakeProviderPackagesElsewhere(t, dir, map[string][]string{ + name: {version}, + }) + } + + args := []string{ + "-plugin-dir", "a", + "-plugin-dir", "b", + } + if code := c.Run(args); code == 0 { + // should have been an error + t.Fatalf("succeeded; want error\nstdout:\n%s\nstderr\n%s", ui.OutputWriter, ui.ErrorWriter) + } + + // The error output should mention the "between" provider but should not + // mention either the "exact" or "greater-than" provider, because the + // latter two are available via the -plugin-dir directories. + errStr := ui.ErrorWriter.String() + if subStr := "hashicorp/between"; !strings.Contains(errStr, subStr) { + t.Errorf("error output should mention the 'between' provider\nwant substr: %s\ngot:\n%s", subStr, errStr) + } + if subStr := "hashicorp/exact"; strings.Contains(errStr, subStr) { + t.Errorf("error output should not mention the 'exact' provider\ndo not want substr: %s\ngot:\n%s", subStr, errStr) + } + if subStr := "hashicorp/greater-than"; strings.Contains(errStr, subStr) { + t.Errorf("error output should not mention the 'greater-than' provider\ndo not want substr: %s\ngot:\n%s", subStr, errStr) + } + + if calls := providerSource.CallLog(); len(calls) > 0 { + t.Errorf("unexpected provider source calls (want none)\n%s", spew.Sdump(calls)) + } +} + +// Verify that plugin-dir doesn't prevent discovery of internal providers +func TestInit_pluginDirWithBuiltIn(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("init-internal"), td) + defer testChdir(t, td)() + + // An empty provider source + providerSource, close := newMockProviderSource(t, nil) + defer close() + + ui := cli.NewMockUi() + view, _ := testView(t) + m := Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + ProviderSource: providerSource, + } + + c := &InitCommand{ + Meta: m, + } + + args := []string{"-plugin-dir", "./"} + if code := c.Run(args); code != 0 { + t.Fatalf("error: %s", ui.ErrorWriter) + } + + outputStr := ui.OutputWriter.String() + if subStr := "terraform.io/builtin/terraform is built in to OpenTofu"; !strings.Contains(outputStr, subStr) { + t.Errorf("output should mention the tofu provider\nwant substr: %s\ngot:\n%s", subStr, outputStr) + } +} + +func TestInit_invalidBuiltInProviders(t *testing.T) { + // This test fixture includes two invalid provider dependencies: + // - an implied dependency on terraform.io/builtin/terraform with an + // explicit version number, which is not allowed because it's builtin. + // - an explicit dependency on terraform.io/builtin/nonexist, which does + // not exist at all. + td := t.TempDir() + testCopyDir(t, testFixturePath("init-internal-invalid"), td) + defer testChdir(t, td)() + + // An empty provider source + providerSource, close := newMockProviderSource(t, nil) + defer close() + + ui := cli.NewMockUi() + view, _ := testView(t) + m := Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + ProviderSource: providerSource, + } + + c := &InitCommand{ + Meta: m, + } + + if code := c.Run(nil); code == 0 { + t.Fatalf("succeeded, but was expecting error\nstdout:\n%s\nstderr:\n%s", ui.OutputWriter, ui.ErrorWriter) + } + + errStr := ui.ErrorWriter.String() + if subStr := "Cannot use terraform.io/builtin/terraform: built-in"; !strings.Contains(errStr, subStr) { + t.Errorf("error output should mention the terraform provider\nwant substr: %s\ngot:\n%s", subStr, errStr) + } + if subStr := "Cannot use terraform.io/builtin/nonexist: this OpenTofu release"; !strings.Contains(errStr, subStr) { + t.Errorf("error output should mention the 'nonexist' provider\nwant substr: %s\ngot:\n%s", subStr, errStr) + } +} + +func TestInit_invalidSyntaxNoBackend(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("init-syntax-invalid-no-backend"), td) + defer testChdir(t, td)() + + ui := cli.NewMockUi() + view, _ := testView(t) + m := Meta{ + Ui: ui, + View: view, + } + + c := &InitCommand{ + Meta: m, + } + + if code := c.Run(nil); code == 0 { + t.Fatalf("succeeded, but was expecting error\nstdout:\n%s\nstderr:\n%s", ui.OutputWriter, ui.ErrorWriter) + } + + errStr := ui.ErrorWriter.String() + if subStr := "OpenTofu encountered problems during initialization, including problems\nwith the configuration, described below."; !strings.Contains(errStr, subStr) { + t.Errorf("Error output should include preamble\nwant substr: %s\ngot:\n%s", subStr, errStr) + } + if subStr := "Error: Unsupported block type"; !strings.Contains(errStr, subStr) { + t.Errorf("Error output should mention the syntax problem\nwant substr: %s\ngot:\n%s", subStr, errStr) + } +} + +func TestInit_invalidSyntaxWithBackend(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("init-syntax-invalid-with-backend"), td) + defer testChdir(t, td)() + + ui := cli.NewMockUi() + view, _ := testView(t) + m := Meta{ + Ui: ui, + View: view, + } + + c := &InitCommand{ + Meta: m, + } + + if code := c.Run(nil); code == 0 { + t.Fatalf("succeeded, but was expecting error\nstdout:\n%s\nstderr:\n%s", ui.OutputWriter, ui.ErrorWriter) + } + + errStr := ui.ErrorWriter.String() + if subStr := "OpenTofu encountered problems during initialization, including problems\nwith the configuration, described below."; !strings.Contains(errStr, subStr) { + t.Errorf("Error output should include preamble\nwant substr: %s\ngot:\n%s", subStr, errStr) + } + if subStr := "Error: Unsupported block type"; !strings.Contains(errStr, subStr) { + t.Errorf("Error output should mention the syntax problem\nwant substr: %s\ngot:\n%s", subStr, errStr) + } +} + +func TestInit_invalidSyntaxInvalidBackend(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("init-syntax-invalid-backend-invalid"), td) + defer testChdir(t, td)() + + ui := cli.NewMockUi() + view, _ := testView(t) + m := Meta{ + Ui: ui, + View: view, + } + + c := &InitCommand{ + Meta: m, + } + + if code := c.Run(nil); code == 0 { + t.Fatalf("succeeded, but was expecting error\nstdout:\n%s\nstderr:\n%s", ui.OutputWriter, ui.ErrorWriter) + } + + errStr := ui.ErrorWriter.String() + if subStr := "OpenTofu encountered problems during initialization, including problems\nwith the configuration, described below."; !strings.Contains(errStr, subStr) { + t.Errorf("Error output should include preamble\nwant substr: %s\ngot:\n%s", subStr, errStr) + } + if subStr := "Error: Unsupported block type"; !strings.Contains(errStr, subStr) { + t.Errorf("Error output should mention syntax errors\nwant substr: %s\ngot:\n%s", subStr, errStr) + } + if subStr := "Error: Unsupported backend type"; !strings.Contains(errStr, subStr) { + t.Errorf("Error output should mention the invalid backend\nwant substr: %s\ngot:\n%s", subStr, errStr) + } +} + +func TestInit_invalidSyntaxBackendAttribute(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("init-syntax-invalid-backend-attribute-invalid"), td) + defer testChdir(t, td)() + + ui := cli.NewMockUi() + view, _ := testView(t) + m := Meta{ + Ui: ui, + View: view, + } + + c := &InitCommand{ + Meta: m, + } + + if code := c.Run(nil); code == 0 { + t.Fatalf("succeeded, but was expecting error\nstdout:\n%s\nstderr:\n%s", ui.OutputWriter, ui.ErrorWriter) + } + + errStr := ui.ErrorWriter.String() + if subStr := "OpenTofu encountered problems during initialization, including problems\nwith the configuration, described below."; !strings.Contains(errStr, subStr) { + t.Errorf("Error output should include preamble\nwant substr: %s\ngot:\n%s", subStr, errStr) + } + if subStr := "Error: Invalid character"; !strings.Contains(errStr, subStr) { + t.Errorf("Error output should mention the invalid character\nwant substr: %s\ngot:\n%s", subStr, errStr) + } + if subStr := "Error: Invalid expression"; !strings.Contains(errStr, subStr) { + t.Errorf("Error output should mention the invalid expression\nwant substr: %s\ngot:\n%s", subStr, errStr) + } +} + +func TestInit_tests(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-with-tests"), td) + defer testChdir(t, td)() + + provider := applyFixtureProvider() // We just want the types from this provider. + + providerSource, close := newMockProviderSource(t, map[string][]string{ + "hashicorp/test": {"1.0.0"}, + }) + defer close() + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(provider), + Ui: ui, + View: view, + ProviderSource: providerSource, + }, + } + + args := []string{} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } +} + +func TestInit_testsWithProvider(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-with-tests-with-provider"), td) + defer testChdir(t, td)() + + provider := applyFixtureProvider() // We just want the types from this provider. + + providerSource, close := newMockProviderSource(t, map[string][]string{ + "hashicorp/test": {"1.0.0"}, + }) + defer close() + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(provider), + Ui: ui, + View: view, + ProviderSource: providerSource, + }, + } + + args := []string{} + if code := c.Run(args); code == 0 { + t.Fatalf("expected failure but got: \n%s", ui.OutputWriter.String()) + } + + got := ui.ErrorWriter.String() + want := ` +Error: Failed to resolve provider packages + +Could not resolve provider hashicorp/test: no available releases match the +given constraints 1.0.1, 1.0.2 + +` + if diff := cmp.Diff(got, want); len(diff) > 0 { + t.Fatalf("wrong error message: \ngot:\n%s\nwant:\n%s\ndiff:\n%s", got, want, diff) + } +} + +func TestInit_testsWithModule(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-with-tests-with-module"), td) + defer testChdir(t, td)() + + provider := applyFixtureProvider() // We just want the types from this provider. + + providerSource, close := newMockProviderSource(t, map[string][]string{ + "hashicorp/test": {"1.0.0"}, + }) + defer close() + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(provider), + Ui: ui, + View: view, + ProviderSource: providerSource, + }, + } + + args := []string{} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + // Check output + output := ui.OutputWriter.String() + if !strings.Contains(output, "test.main.setup in setup") { + t.Fatalf("doesn't look like we installed the test module': %s", output) + } +} + +// Test variables are handled correctly when interacting with module sources +func TestInit_moduleSource(t *testing.T) { + t.Run("missing", func(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("init-module-variable-source"), td) + defer testChdir(t, td)() + + ui := cli.NewMockUi() + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + Ui: ui, + View: view, + }, + } + + if code := c.Run(nil); code != 1 { + t.Fatalf("got exit status %d; want 1\nstderr:\n%s\n\nstdout:\n%s", code, ui.ErrorWriter.String(), ui.OutputWriter.String()) + } + + errStr := ui.ErrorWriter.String() + if !strings.Contains(errStr, `Variable not provided`) { + t.Fatalf("output should point to unmet version constraint, but is:\n\n%s", errStr) + } + }) + + t.Run("provided", func(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("init-module-variable-source"), td) + defer testChdir(t, td)() + + ui := cli.NewMockUi() + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + Ui: ui, + View: view, + }, + } + + args := []string{"-var", "src=./mod"} + if code := c.Run(args); code != 0 { + t.Fatalf("got exit status %d; want 1\nstderr:\n%s\n\nstdout:\n%s", code, ui.ErrorWriter.String(), ui.OutputWriter.String()) + } + }) +} + +// Test variables are handled correctly when interacting with module versions +func TestInit_moduleVersion(t *testing.T) { + if os.Getenv("TF_ACC") == "" { + t.Skip("network access not allowed; use TF_ACC=1 to enable") + } + + t.Run("provided", func(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("init-module-variable-version"), td) + defer testChdir(t, td)() + + ui := cli.NewMockUi() + view, _ := testView(t) + c := &InitCommand{ + Meta: Meta{ + Ui: ui, + View: view, + }, + } + + args := []string{"-var", "modver=0.0.1"} + if code := c.Run(args); code != 0 { + t.Fatalf("got exit status %d; want 1\nstderr:\n%s\n\nstdout:\n%s", code, ui.ErrorWriter.String(), ui.OutputWriter.String()) + } + }) +} + +// newMockProviderSource is a helper to succinctly construct a mock provider +// source that contains a set of packages matching the given provider versions +// that are available for installation (from temporary local files). +// +// The caller must call the returned close callback once the source is no +// longer needed, at which point it will clean up all of the temporary files +// and the packages in the source will no longer be available for installation. +// +// Provider addresses must be valid source strings, and passing only the +// provider name will be interpreted as a "default" provider under +// registry.opentofu.org/hashicorp. If you need more control over the +// provider addresses, pass a full provider source string. +// +// This function also registers providers as belonging to the current platform, +// to ensure that they will be available to a provider installer operating in +// its default configuration. +// +// In case of any errors while constructing the source, this function will +// abort the current test using the given testing.T. Therefore a caller can +// assume that if this function returns then the result is valid and ready +// to use. +func newMockProviderSource(t *testing.T, availableProviderVersions map[string][]string) (source *getproviders.MockSource, close func()) { + t.Helper() + var packages []getproviders.PackageMeta + var closes []func() + close = func() { + for _, f := range closes { + f() + } + } + for source, versions := range availableProviderVersions { + addr := addrs.MustParseProviderSourceString(source) + for _, versionStr := range versions { + version, err := getproviders.ParseVersion(versionStr) + if err != nil { + close() + t.Fatalf("failed to parse %q as a version number for %q: %s", versionStr, addr.ForDisplay(), err) + } + meta, close, err := getproviders.FakeInstallablePackageMeta(addr, version, getproviders.VersionList{getproviders.MustParseVersion("5.0")}, getproviders.CurrentPlatform, "") + if err != nil { + close() + t.Fatalf("failed to prepare fake package for %s %s: %s", addr.ForDisplay(), versionStr, err) + } + closes = append(closes, close) + packages = append(packages, meta) + } + } + + return getproviders.NewMockSource(packages, nil), close +} + +// installFakeProviderPackages installs a fake package for the given provider +// names (interpreted as a "default" provider address) and versions into the +// local plugin cache for the given "meta". +// +// Any test using this must be using testChdir or some similar mechanism to +// make sure that it isn't writing directly into a test fixture or source +// directory within the codebase. +// +// If a requested package cannot be installed for some reason, this function +// will abort the test using the given testing.T. Therefore if this function +// returns the caller can assume that the requested providers have been +// installed. +func installFakeProviderPackages(t *testing.T, meta *Meta, providerVersions map[string][]string) { + t.Helper() + + cacheDir := meta.providerLocalCacheDir() + installFakeProviderPackagesElsewhere(t, cacheDir, providerVersions) +} + +// installFakeProviderPackagesElsewhere is a variant of installFakeProviderPackages +// that will install packages into the given provider cache directory, rather +// than forcing the use of the local cache of the current "Meta". +func installFakeProviderPackagesElsewhere(t *testing.T, cacheDir *providercache.Dir, providerVersions map[string][]string) { + t.Helper() + + // It can be hard to spot the mistake of forgetting to run testChdir before + // modifying the working directory, so we'll use a simple heuristic here + // to try to detect that mistake and make a noisy error about it instead. + wd, err := os.Getwd() + if err == nil { + wd = filepath.Clean(wd) + // If the directory we're in is named "command" or if we're under a + // directory named "testdata" then we'll assume a mistake and generate + // an error. This will cause the test to fail but won't block it from + // running. + if filepath.Base(wd) == "command" || filepath.Base(wd) == "testdata" || strings.Contains(filepath.ToSlash(wd), "/testdata/") { + t.Errorf("installFakeProviderPackage may be used only by tests that switch to a temporary working directory, e.g. using testChdir") + } + } + + for name, versions := range providerVersions { + addr := addrs.NewDefaultProvider(name) + for _, versionStr := range versions { + version, err := getproviders.ParseVersion(versionStr) + if err != nil { + t.Fatalf("failed to parse %q as a version number for %q: %s", versionStr, name, err) + } + meta, close, err := getproviders.FakeInstallablePackageMeta(addr, version, getproviders.VersionList{getproviders.MustParseVersion("5.0")}, getproviders.CurrentPlatform, "") + // We're going to install all these fake packages before we return, + // so we don't need to preserve them afterwards. + defer close() + if err != nil { + t.Fatalf("failed to prepare fake package for %s %s: %s", name, versionStr, err) + } + _, err = cacheDir.InstallPackage(context.Background(), meta, nil) + if err != nil { + t.Fatalf("failed to install fake package for %s %s: %s", name, versionStr, err) + } + } + } +} + +// expectedPackageInstallPath is a companion to installFakeProviderPackages +// that returns the path where the provider with the given name and version +// would be installed and, relatedly, where the installer will expect to +// find an already-installed version. +// +// Just as with installFakeProviderPackages, this function is a shortcut helper +// for "default-namespaced" providers as we commonly use in tests. If you need +// more control over the provider addresses, use functions of the underlying +// getproviders and providercache packages instead. +// +// The result always uses forward slashes, even on Windows, for consistency +// with how the getproviders and providercache packages build paths. +func expectedPackageInstallPath(name, version string, exe bool) string { + platform := getproviders.CurrentPlatform + baseDir := ".terraform/providers" + if exe { + p := fmt.Sprintf("registry.opentofu.org/hashicorp/%s/%s/%s/terraform-provider-%s_%s", name, version, platform, name, version) + if platform.OS == "windows" { + p += ".exe" + } + return filepath.ToSlash(filepath.Join(baseDir, p)) + } + return filepath.ToSlash(filepath.Join( + baseDir, fmt.Sprintf("registry.opentofu.org/hashicorp/%s/%s/%s", name, version, platform), + )) +} diff --git a/pkg/command/jsonchecks/checks.go b/pkg/command/jsonchecks/checks.go new file mode 100644 index 00000000000..486a5817555 --- /dev/null +++ b/pkg/command/jsonchecks/checks.go @@ -0,0 +1,121 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package jsonchecks + +import ( + "encoding/json" + "fmt" + "sort" + + "github.com/kubegems/opentofu/pkg/states" +) + +// MarshalCheckStates is the main entry-point for this package, which takes +// the top-level model object for checks in state and plan, and returns a +// JSON representation of it suitable for use in public integration points. +func MarshalCheckStates(results *states.CheckResults) []byte { + jsonResults := make([]checkResultStatic, 0, results.ConfigResults.Len()) + + for _, elem := range results.ConfigResults.Elems { + staticAddr := elem.Key + aggrResult := elem.Value + + objects := make([]checkResultDynamic, 0, aggrResult.ObjectResults.Len()) + for _, elem := range aggrResult.ObjectResults.Elems { + dynamicAddr := elem.Key + result := elem.Value + + problems := make([]checkProblem, 0, len(result.FailureMessages)) + for _, msg := range result.FailureMessages { + problems = append(problems, checkProblem{ + Message: msg, + }) + } + sort.Slice(problems, func(i, j int) bool { + return problems[i].Message < problems[j].Message + }) + + objects = append(objects, checkResultDynamic{ + Address: makeDynamicObjectAddr(dynamicAddr), + Status: checkStatusForJSON(result.Status), + Problems: problems, + }) + } + + sort.Slice(objects, func(i, j int) bool { + return objects[i].Address["to_display"].(string) < objects[j].Address["to_display"].(string) + }) + + jsonResults = append(jsonResults, checkResultStatic{ + Address: makeStaticObjectAddr(staticAddr), + Status: checkStatusForJSON(aggrResult.Status), + Instances: objects, + }) + } + + sort.Slice(jsonResults, func(i, j int) bool { + return jsonResults[i].Address["to_display"].(string) < jsonResults[j].Address["to_display"].(string) + }) + + ret, err := json.Marshal(jsonResults) + if err != nil { + // We totally control the input to json.Marshal, so any error here + // is a bug in the code above. + panic(fmt.Sprintf("invalid input to json.Marshal: %s", err)) + } + return ret +} + +// checkResultStatic is the container for the static, configuration-driven +// idea of "checkable object" -- a resource block with conditions, for example -- +// which ensures that we can always say _something_ about each checkable +// object in the configuration even if OpenTofu Core encountered an error +// before being able to determine the dynamic instances of the checkable object. +type checkResultStatic struct { + // Address is the address of the checkable object this result relates to. + Address staticObjectAddr `json:"address"` + + // Status is the aggregate status for all of the dynamic objects belonging + // to this static object. + Status checkStatus `json:"status"` + + // Instances contains the results for each individual dynamic object that + // belongs to this static object. + Instances []checkResultDynamic `json:"instances,omitempty"` +} + +// checkResultDynamic describes the check result for a dynamic object, which +// results from OpenTofu Core evaluating the "expansion" (e.g. count or for_each) +// of the containing object or its own containing module(s). +type checkResultDynamic struct { + // Address augments the Address of the containing checkResultStatic with + // instance-specific extra properties or overridden properties. + Address dynamicObjectAddr `json:"address"` + + // Status is the status for this specific dynamic object. + Status checkStatus `json:"status"` + + // Problems describes some optional details associated with a failure + // status, describing what fails. + // + // This does not include the errors for status "error", because OpenTofu + // Core emits those separately as normal diagnostics. However, if a + // particular object has a mixture of conditions that failed and conditions + // that were invalid then status can be "error" while simultaneously + // returning problems in this property. + Problems []checkProblem `json:"problems,omitempty"` +} + +// checkProblem describes one of potentially several problems that led to +// a check being classified as status "fail". +type checkProblem struct { + // Message is the condition error message provided by the author. + Message string `json:"message"` + + // We don't currently have any other problem-related data, but this is + // intentionally an object to allow us to add other data over time, such + // as the source location where the failing condition was defined. +} diff --git a/pkg/command/jsonchecks/checks_test.go b/pkg/command/jsonchecks/checks_test.go new file mode 100644 index 00000000000..31501b9b10d --- /dev/null +++ b/pkg/command/jsonchecks/checks_test.go @@ -0,0 +1,245 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package jsonchecks + +import ( + "encoding/json" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/checks" + "github.com/kubegems/opentofu/pkg/states" +) + +func TestMarshalCheckStates(t *testing.T) { + resourceAAddr := addrs.ConfigCheckable(addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test", + Name: "a", + }.InModule(addrs.RootModule)) + resourceAInstAddr := addrs.Checkable(addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test", + Name: "a", + }.Instance(addrs.StringKey("foo")).Absolute(addrs.RootModuleInstance)) + moduleChildAddr := addrs.RootModuleInstance.Child("child", addrs.IntKey(0)) + resourceBAddr := addrs.ConfigCheckable(addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test", + Name: "b", + }.InModule(moduleChildAddr.Module())) + resourceBInstAddr := addrs.Checkable(addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test", + Name: "b", + }.Instance(addrs.NoKey).Absolute(moduleChildAddr)) + outputAAddr := addrs.ConfigCheckable(addrs.OutputValue{Name: "a"}.InModule(addrs.RootModule)) + outputAInstAddr := addrs.Checkable(addrs.OutputValue{Name: "a"}.Absolute(addrs.RootModuleInstance)) + outputBAddr := addrs.ConfigCheckable(addrs.OutputValue{Name: "b"}.InModule(moduleChildAddr.Module())) + outputBInstAddr := addrs.Checkable(addrs.OutputValue{Name: "b"}.Absolute(moduleChildAddr)) + checkBlockAAddr := addrs.ConfigCheckable(addrs.Check{Name: "a"}.InModule(addrs.RootModule)) + checkBlockAInstAddr := addrs.Checkable(addrs.Check{Name: "a"}.Absolute(addrs.RootModuleInstance)) + + tests := map[string]struct { + Input *states.CheckResults + Want any + }{ + "empty": { + &states.CheckResults{}, + []any{}, + }, + "failures": { + &states.CheckResults{ + ConfigResults: addrs.MakeMap( + addrs.MakeMapElem(resourceAAddr, &states.CheckResultAggregate{ + Status: checks.StatusFail, + ObjectResults: addrs.MakeMap( + addrs.MakeMapElem(resourceAInstAddr, &states.CheckResultObject{ + Status: checks.StatusFail, + FailureMessages: []string{ + "Not enough boops.", + "Too many beeps.", + }, + }), + ), + }), + addrs.MakeMapElem(resourceBAddr, &states.CheckResultAggregate{ + Status: checks.StatusFail, + ObjectResults: addrs.MakeMap( + addrs.MakeMapElem(resourceBInstAddr, &states.CheckResultObject{ + Status: checks.StatusFail, + FailureMessages: []string{ + "Splines are too pointy.", + }, + }), + ), + }), + addrs.MakeMapElem(outputAAddr, &states.CheckResultAggregate{ + Status: checks.StatusFail, + ObjectResults: addrs.MakeMap( + addrs.MakeMapElem(outputAInstAddr, &states.CheckResultObject{ + Status: checks.StatusFail, + }), + ), + }), + addrs.MakeMapElem(outputBAddr, &states.CheckResultAggregate{ + Status: checks.StatusFail, + ObjectResults: addrs.MakeMap( + addrs.MakeMapElem(outputBInstAddr, &states.CheckResultObject{ + Status: checks.StatusFail, + FailureMessages: []string{ + "Not object-oriented enough.", + }, + }), + ), + }), + addrs.MakeMapElem(checkBlockAAddr, &states.CheckResultAggregate{ + Status: checks.StatusFail, + ObjectResults: addrs.MakeMap( + addrs.MakeMapElem(checkBlockAInstAddr, &states.CheckResultObject{ + Status: checks.StatusFail, + FailureMessages: []string{ + "Couldn't reverse the polarity.", + }, + }), + ), + }), + ), + }, + []any{ + map[string]any{ + "address": map[string]any{ + "kind": "check", + "to_display": "check.a", + "name": "a", + }, + "instances": []any{ + map[string]any{ + "address": map[string]any{ + "to_display": `check.a`, + }, + "problems": []any{ + map[string]any{ + "message": "Couldn't reverse the polarity.", + }, + }, + "status": "fail", + }, + }, + "status": "fail", + }, + map[string]any{ + "address": map[string]any{ + "kind": "output_value", + "module": "module.child", + "name": "b", + "to_display": "module.child.output.b", + }, + "instances": []any{ + map[string]any{ + "address": map[string]any{ + "module": "module.child[0]", + "to_display": "module.child[0].output.b", + }, + "problems": []any{ + map[string]any{ + "message": "Not object-oriented enough.", + }, + }, + "status": "fail", + }, + }, + "status": "fail", + }, + map[string]any{ + "address": map[string]any{ + "kind": "resource", + "mode": "managed", + "module": "module.child", + "name": "b", + "to_display": "module.child.test.b", + "type": "test", + }, + "instances": []any{ + map[string]any{ + "address": map[string]any{ + "module": "module.child[0]", + "to_display": "module.child[0].test.b", + }, + "problems": []any{ + map[string]any{ + "message": "Splines are too pointy.", + }, + }, + "status": "fail", + }, + }, + "status": "fail", + }, + map[string]any{ + "address": map[string]any{ + "kind": "output_value", + "name": "a", + "to_display": "output.a", + }, + "instances": []any{ + map[string]any{ + "address": map[string]any{ + "to_display": "output.a", + }, + "status": "fail", + }, + }, + "status": "fail", + }, + map[string]any{ + "address": map[string]any{ + "kind": "resource", + "mode": "managed", + "name": "a", + "to_display": "test.a", + "type": "test", + }, + "instances": []any{ + map[string]any{ + "address": map[string]any{ + "to_display": `test.a["foo"]`, + "instance_key": "foo", + }, + "problems": []any{ + map[string]any{ + "message": "Not enough boops.", + }, + map[string]any{ + "message": "Too many beeps.", + }, + }, + "status": "fail", + }, + }, + "status": "fail", + }, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + gotBytes := MarshalCheckStates(test.Input) + + var got any + err := json.Unmarshal(gotBytes, &got) + if err != nil { + t.Fatal(err) + } + + if diff := cmp.Diff(test.Want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + } +} diff --git a/pkg/command/jsonchecks/doc.go b/pkg/command/jsonchecks/doc.go new file mode 100644 index 00000000000..3f7b68b9859 --- /dev/null +++ b/pkg/command/jsonchecks/doc.go @@ -0,0 +1,9 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package jsonchecks implements the common JSON representation of check +// results/statuses that we use across both the JSON plan and JSON state +// representations. +package jsonchecks diff --git a/pkg/command/jsonchecks/objects.go b/pkg/command/jsonchecks/objects.go new file mode 100644 index 00000000000..d244fd8357c --- /dev/null +++ b/pkg/command/jsonchecks/objects.go @@ -0,0 +1,114 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package jsonchecks + +import ( + "fmt" + + "github.com/kubegems/opentofu/pkg/addrs" +) + +type staticObjectAddr map[string]interface{} + +func makeStaticObjectAddr(addr addrs.ConfigCheckable) staticObjectAddr { + ret := map[string]interface{}{ + "to_display": addr.String(), + } + + switch addr := addr.(type) { + case addrs.ConfigResource: + if kind := addr.CheckableKind(); kind != addrs.CheckableResource { + // Something has gone very wrong + panic(fmt.Sprintf("%T has CheckableKind %s", addr, kind)) + } + + ret["kind"] = "resource" + switch addr.Resource.Mode { + case addrs.ManagedResourceMode: + ret["mode"] = "managed" + case addrs.DataResourceMode: + ret["mode"] = "data" + default: + panic(fmt.Sprintf("unsupported resource mode %#v", addr.Resource.Mode)) + } + ret["type"] = addr.Resource.Type + ret["name"] = addr.Resource.Name + if !addr.Module.IsRoot() { + ret["module"] = addr.Module.String() + } + case addrs.ConfigOutputValue: + if kind := addr.CheckableKind(); kind != addrs.CheckableOutputValue { + // Something has gone very wrong + panic(fmt.Sprintf("%T has CheckableKind %s", addr, kind)) + } + + ret["kind"] = "output_value" + ret["name"] = addr.OutputValue.Name + if !addr.Module.IsRoot() { + ret["module"] = addr.Module.String() + } + case addrs.ConfigCheck: + if kind := addr.CheckableKind(); kind != addrs.CheckableCheck { + // Something has gone very wrong + panic(fmt.Sprintf("%T has CheckableKind %s", addr, kind)) + } + + ret["kind"] = "check" + ret["name"] = addr.Check.Name + if !addr.Module.IsRoot() { + ret["module"] = addr.Module.String() + } + case addrs.ConfigInputVariable: + if kind := addr.CheckableKind(); kind != addrs.CheckableInputVariable { + // Something has gone very wrong + panic(fmt.Sprintf("%T has CheckableKind %s", addr, kind)) + } + + ret["kind"] = "var" + ret["name"] = addr.Variable.Name + if !addr.Module.IsRoot() { + ret["module"] = addr.Module.String() + } + default: + panic(fmt.Sprintf("unsupported ConfigCheckable implementation %T", addr)) + } + + return ret +} + +type dynamicObjectAddr map[string]interface{} + +func makeDynamicObjectAddr(addr addrs.Checkable) dynamicObjectAddr { + ret := map[string]interface{}{ + "to_display": addr.String(), + } + + switch addr := addr.(type) { + case addrs.AbsResourceInstance: + if !addr.Module.IsRoot() { + ret["module"] = addr.Module.String() + } + if addr.Resource.Key != addrs.NoKey { + ret["instance_key"] = addr.Resource.Key + } + case addrs.AbsOutputValue: + if !addr.Module.IsRoot() { + ret["module"] = addr.Module.String() + } + case addrs.AbsCheck: + if !addr.Module.IsRoot() { + ret["module"] = addr.Module.String() + } + case addrs.AbsInputVariableInstance: + if !addr.Module.IsRoot() { + ret["module"] = addr.Module.String() + } + default: + panic(fmt.Sprintf("unsupported Checkable implementation %T", addr)) + } + + return ret +} diff --git a/pkg/command/jsonchecks/status.go b/pkg/command/jsonchecks/status.go new file mode 100644 index 00000000000..364bcd8cf05 --- /dev/null +++ b/pkg/command/jsonchecks/status.go @@ -0,0 +1,32 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package jsonchecks + +import ( + "fmt" + + "github.com/kubegems/opentofu/pkg/checks" +) + +type checkStatus []byte + +func checkStatusForJSON(s checks.Status) checkStatus { + if ret, ok := checkStatuses[s]; ok { + return ret + } + panic(fmt.Sprintf("unsupported check status %#v", s)) +} + +func (s checkStatus) MarshalJSON() ([]byte, error) { + return []byte(s), nil +} + +var checkStatuses = map[checks.Status]checkStatus{ + checks.StatusPass: checkStatus(`"pass"`), + checks.StatusFail: checkStatus(`"fail"`), + checks.StatusError: checkStatus(`"error"`), + checks.StatusUnknown: checkStatus(`"unknown"`), +} diff --git a/pkg/command/jsonconfig/config.go b/pkg/command/jsonconfig/config.go new file mode 100644 index 00000000000..7c3f3756e8d --- /dev/null +++ b/pkg/command/jsonconfig/config.go @@ -0,0 +1,570 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package jsonconfig + +import ( + "encoding/json" + "fmt" + "sort" + + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/getproviders" + "github.com/kubegems/opentofu/pkg/tofu" +) + +// Config represents the complete configuration source +type config struct { + ProviderConfigs map[string]providerConfig `json:"provider_config,omitempty"` + RootModule module `json:"root_module,omitempty"` +} + +// ProviderConfig describes all of the provider configurations throughout the +// configuration tree, flattened into a single map for convenience since +// provider configurations are the one concept in OpenTofu that can span across +// module boundaries. +type providerConfig struct { + Name string `json:"name,omitempty"` + FullName string `json:"full_name,omitempty"` + Alias string `json:"alias,omitempty"` + VersionConstraint string `json:"version_constraint,omitempty"` + ModuleAddress string `json:"module_address,omitempty"` + Expressions map[string]interface{} `json:"expressions,omitempty"` + parentKey string +} + +type module struct { + Outputs map[string]output `json:"outputs,omitempty"` + // Resources are sorted in a user-friendly order that is undefined at this + // time, but consistent. + Resources []resource `json:"resources,omitempty"` + ModuleCalls map[string]moduleCall `json:"module_calls,omitempty"` + Variables variables `json:"variables,omitempty"` +} + +type moduleCall struct { + Source string `json:"source,omitempty"` + Expressions map[string]interface{} `json:"expressions,omitempty"` + CountExpression *expression `json:"count_expression,omitempty"` + ForEachExpression *expression `json:"for_each_expression,omitempty"` + Module module `json:"module,omitempty"` + VersionConstraint string `json:"version_constraint,omitempty"` + DependsOn []string `json:"depends_on,omitempty"` +} + +// variables is the JSON representation of the variables provided to the current +// plan. +type variables map[string]*variable + +type variable struct { + Default json.RawMessage `json:"default,omitempty"` + Description string `json:"description,omitempty"` + Sensitive bool `json:"sensitive,omitempty"` +} + +// Resource is the representation of a resource in the config +type resource struct { + // Address is the absolute resource address + Address string `json:"address,omitempty"` + + // Mode can be "managed" or "data" + Mode string `json:"mode,omitempty"` + + Type string `json:"type,omitempty"` + Name string `json:"name,omitempty"` + + // ProviderConfigKey is the key into "provider_configs" (shown above) for + // the provider configuration that this resource is associated with. + // + // NOTE: If a given resource is in a ModuleCall, and the provider was + // configured outside of the module (in a higher level configuration file), + // the ProviderConfigKey will not match a key in the ProviderConfigs map. + ProviderConfigKey string `json:"provider_config_key,omitempty"` + + // Provisioners is an optional field which describes any provisioners. + // Connection info will not be included here. + Provisioners []provisioner `json:"provisioners,omitempty"` + + // Expressions" describes the resource-type-specific content of the + // configuration block. + Expressions map[string]interface{} `json:"expressions,omitempty"` + + // SchemaVersion indicates which version of the resource type schema the + // "values" property conforms to. + SchemaVersion uint64 `json:"schema_version"` + + // CountExpression and ForEachExpression describe the expressions given for + // the corresponding meta-arguments in the resource configuration block. + // These are omitted if the corresponding argument isn't set. + CountExpression *expression `json:"count_expression,omitempty"` + ForEachExpression *expression `json:"for_each_expression,omitempty"` + + DependsOn []string `json:"depends_on,omitempty"` +} + +type output struct { + Sensitive bool `json:"sensitive,omitempty"` + Expression expression `json:"expression,omitempty"` + DependsOn []string `json:"depends_on,omitempty"` + Description string `json:"description,omitempty"` +} + +type provisioner struct { + Type string `json:"type,omitempty"` + Expressions map[string]interface{} `json:"expressions,omitempty"` +} + +// Marshal returns the json encoding of tofu configuration. +func Marshal(c *configs.Config, schemas *tofu.Schemas) ([]byte, error) { + var output config + + pcs := make(map[string]providerConfig) + marshalProviderConfigs(c, schemas, pcs) + + rootModule, err := marshalModule(c, schemas, "") + if err != nil { + return nil, err + } + output.RootModule = rootModule + + normalizeModuleProviderKeys(&rootModule, pcs) + + for name, pc := range pcs { + if pc.parentKey != "" { + delete(pcs, name) + } + } + output.ProviderConfigs = pcs + + ret, err := json.Marshal(output) + return ret, err +} + +func marshalProviderConfigs( + c *configs.Config, + schemas *tofu.Schemas, + m map[string]providerConfig, +) { + if c == nil { + return + } + + // We want to determine only the provider requirements from this module, + // ignoring any descendants. Disregard any diagnostics when determining + // requirements because we want this marshalling to succeed even if there + // are invalid constraints. + reqs, _ := c.ProviderRequirementsShallow() + + // Add an entry for each provider configuration block in the module. + for k, pc := range c.Module.ProviderConfigs { + providerFqn := c.ProviderForConfigAddr(addrs.LocalProviderConfig{LocalName: pc.Name}) + schema := schemas.ProviderConfig(providerFqn) + + p := providerConfig{ + Name: pc.Name, + FullName: providerFqn.String(), + Alias: pc.Alias, + ModuleAddress: c.Path.String(), + Expressions: marshalExpressions(pc.Config, schema), + } + + // Store the fully resolved provider version constraint, rather than + // using the version argument in the configuration block. This is both + // future proof (for when we finish the deprecation of the provider config + // version argument) and more accurate (as it reflects the full set of + // constraints, in case there are multiple). + if vc, ok := reqs[providerFqn]; ok { + p.VersionConstraint = getproviders.VersionConstraintsString(vc) + } + + key := opaqueProviderKey(k, c.Path.String()) + + m[key] = p + } + + // Ensure that any required providers with no associated configuration + // block are included in the set. + for k, pr := range c.Module.ProviderRequirements.RequiredProviders { + // If a provider has aliases defined, process those first. + for _, alias := range pr.Aliases { + // If there exists a value for this provider, we have nothing to add + // to it, so skip. + key := opaqueProviderKey(alias.StringCompact(), c.Path.String()) + if _, exists := m[key]; exists { + continue + } + // Given no provider configuration block exists, the only fields we can + // fill here are the local name, FQN, module address, and version + // constraints. + p := providerConfig{ + Name: pr.Name, + FullName: pr.Type.String(), + ModuleAddress: c.Path.String(), + } + + if vc, ok := reqs[pr.Type]; ok { + p.VersionConstraint = getproviders.VersionConstraintsString(vc) + } + + m[key] = p + } + + // If there exists a value for this provider, we have nothing to add + // to it, so skip. + key := opaqueProviderKey(k, c.Path.String()) + if _, exists := m[key]; exists { + continue + } + + // Given no provider configuration block exists, the only fields we can + // fill here are the local name, module address, and version + // constraints. + p := providerConfig{ + Name: pr.Name, + FullName: pr.Type.String(), + ModuleAddress: c.Path.String(), + } + + if vc, ok := reqs[pr.Type]; ok { + p.VersionConstraint = getproviders.VersionConstraintsString(vc) + } + + if c.Parent != nil { + parentKey := opaqueProviderKey(pr.Name, c.Parent.Path.String()) + p.parentKey = findSourceProviderKey(parentKey, p.FullName, m) + } + + m[key] = p + } + + // Providers could be implicitly created or inherited from the parent module + // when no requirements and configuration block defined. + for req := range reqs { + // Only default providers could implicitly exist, + // so the provider name must be same as the provider type. + key := opaqueProviderKey(req.Type, c.Path.String()) + if _, exists := m[key]; exists { + continue + } + + p := providerConfig{ + Name: req.Type, + FullName: req.String(), + ModuleAddress: c.Path.String(), + } + + // In child modules, providers defined in the parent module can be implicitly used. + if c.Parent != nil { + parentKey := opaqueProviderKey(req.Type, c.Parent.Path.String()) + p.parentKey = findSourceProviderKey(parentKey, p.FullName, m) + } + + m[key] = p + } + + // Must also visit our child modules, recursively. + for name, mc := range c.Module.ModuleCalls { + // Keys in c.Children are guaranteed to match those in c.Module.ModuleCalls + cc := c.Children[name] + + // Add provider config map entries for passed provider configs, + // pointing at the passed configuration + for _, ppc := range mc.Providers { + // These provider names include aliases, if set + moduleProviderName := ppc.InChild.String() + parentProviderName := ppc.InParent.String() + + // Look up the provider FQN from the module context, using the non-aliased local name + providerFqn := cc.ProviderForConfigAddr(addrs.LocalProviderConfig{LocalName: ppc.InChild.Name}) + + // The presence of passed provider configs means that we cannot have + // any configuration expressions or version constraints here + p := providerConfig{ + Name: moduleProviderName, + FullName: providerFqn.String(), + ModuleAddress: cc.Path.String(), + } + + key := opaqueProviderKey(moduleProviderName, cc.Path.String()) + parentKey := opaqueProviderKey(parentProviderName, cc.Parent.Path.String()) + p.parentKey = findSourceProviderKey(parentKey, p.FullName, m) + + m[key] = p + } + + // Finally, marshal any other provider configs within the called module. + // It is safe to do this last because it is invalid to configure a + // provider which has passed provider configs in the module call. + marshalProviderConfigs(cc, schemas, m) + } +} + +func marshalModule(c *configs.Config, schemas *tofu.Schemas, addr string) (module, error) { + var module module + var rs []resource + + managedResources, err := marshalResources(c.Module.ManagedResources, schemas, addr) + if err != nil { + return module, err + } + dataResources, err := marshalResources(c.Module.DataResources, schemas, addr) + if err != nil { + return module, err + } + + rs = append(managedResources, dataResources...) + module.Resources = rs + + outputs := make(map[string]output) + for _, v := range c.Module.Outputs { + o := output{ + Sensitive: v.Sensitive, + Expression: marshalExpression(v.Expr), + } + if v.Description != "" { + o.Description = v.Description + } + if len(v.DependsOn) > 0 { + dependencies := make([]string, len(v.DependsOn)) + for i, d := range v.DependsOn { + ref, diags := addrs.ParseRef(d) + // we should not get an error here, because `tofu validate` + // would have complained well before this point, but if we do we'll + // silenty skip it. + if !diags.HasErrors() { + dependencies[i] = ref.Subject.String() + } + } + o.DependsOn = dependencies + } + + outputs[v.Name] = o + } + module.Outputs = outputs + + module.ModuleCalls = marshalModuleCalls(c, schemas) + + if len(c.Module.Variables) > 0 { + vars := make(variables, len(c.Module.Variables)) + for k, v := range c.Module.Variables { + var defaultValJSON []byte + if v.Default == cty.NilVal { + defaultValJSON = nil + } else { + defaultValJSON, err = ctyjson.Marshal(v.Default, v.Default.Type()) + if err != nil { + return module, err + } + } + vars[k] = &variable{ + Default: defaultValJSON, + Description: v.Description, + Sensitive: v.Sensitive, + } + } + module.Variables = vars + } + + return module, nil +} + +func marshalModuleCalls(c *configs.Config, schemas *tofu.Schemas) map[string]moduleCall { + ret := make(map[string]moduleCall) + + for name, mc := range c.Module.ModuleCalls { + mcConfig := c.Children[name] + ret[name] = marshalModuleCall(mcConfig, mc, schemas) + } + + return ret +} + +func marshalModuleCall(c *configs.Config, mc *configs.ModuleCall, schemas *tofu.Schemas) moduleCall { + // It is possible to have a module call with a nil config. + if c == nil { + return moduleCall{} + } + + ret := moduleCall{ + // We're intentionally echoing back exactly what the user entered + // here, rather than the normalized version in SourceAddr, because + // historically we only _had_ the raw address and thus it would be + // a (admittedly minor) breaking change to start normalizing them + // now, in case consumers of this data are expecting a particular + // non-normalized syntax. + Source: mc.SourceAddrRaw, + VersionConstraint: mc.Version.Required.String(), + } + cExp := marshalExpression(mc.Count) + if !cExp.Empty() { + ret.CountExpression = &cExp + } else { + fExp := marshalExpression(mc.ForEach) + if !fExp.Empty() { + ret.ForEachExpression = &fExp + } + } + + schema := &configschema.Block{} + schema.Attributes = make(map[string]*configschema.Attribute) + for _, variable := range c.Module.Variables { + schema.Attributes[variable.Name] = &configschema.Attribute{ + Required: variable.Default == cty.NilVal, + } + } + + ret.Expressions = marshalExpressions(mc.Config, schema) + + module, _ := marshalModule(c, schemas, c.Path.String()) + + ret.Module = module + + if len(mc.DependsOn) > 0 { + dependencies := make([]string, len(mc.DependsOn)) + for i, d := range mc.DependsOn { + ref, diags := addrs.ParseRef(d) + // we should not get an error here, because `tofu validate` + // would have complained well before this point, but if we do we'll + // silenty skip it. + if !diags.HasErrors() { + dependencies[i] = ref.Subject.String() + } + } + ret.DependsOn = dependencies + } + + return ret +} + +func marshalResources(resources map[string]*configs.Resource, schemas *tofu.Schemas, moduleAddr string) ([]resource, error) { + var rs []resource + for _, v := range resources { + providerConfigKey := opaqueProviderKey(v.ProviderConfigAddr().StringCompact(), moduleAddr) + r := resource{ + Address: v.Addr().String(), + Type: v.Type, + Name: v.Name, + ProviderConfigKey: providerConfigKey, + } + + switch v.Mode { + case addrs.ManagedResourceMode: + r.Mode = "managed" + case addrs.DataResourceMode: + r.Mode = "data" + default: + return rs, fmt.Errorf("resource %s has an unsupported mode %s", r.Address, v.Mode.String()) + } + + cExp := marshalExpression(v.Count) + if !cExp.Empty() { + r.CountExpression = &cExp + } else { + fExp := marshalExpression(v.ForEach) + if !fExp.Empty() { + r.ForEachExpression = &fExp + } + } + + schema, schemaVer := schemas.ResourceTypeConfig( + v.Provider, + v.Mode, + v.Type, + ) + if schema == nil { + return nil, fmt.Errorf("no schema found for %s (in provider %s)", v.Addr().String(), v.Provider) + } + r.SchemaVersion = schemaVer + + r.Expressions = marshalExpressions(v.Config, schema) + + // Managed is populated only for Mode = addrs.ManagedResourceMode + if v.Managed != nil && len(v.Managed.Provisioners) > 0 { + var provisioners []provisioner + for _, p := range v.Managed.Provisioners { + schema := schemas.ProvisionerConfig(p.Type) + prov := provisioner{ + Type: p.Type, + Expressions: marshalExpressions(p.Config, schema), + } + provisioners = append(provisioners, prov) + } + r.Provisioners = provisioners + } + + if len(v.DependsOn) > 0 { + dependencies := make([]string, len(v.DependsOn)) + for i, d := range v.DependsOn { + ref, diags := addrs.ParseRef(d) + // we should not get an error here, because `tofu validate` + // would have complained well before this point, but if we do we'll + // silenty skip it. + if !diags.HasErrors() { + dependencies[i] = ref.Subject.String() + } + } + r.DependsOn = dependencies + } + + rs = append(rs, r) + } + sort.Slice(rs, func(i, j int) bool { + return rs[i].Address < rs[j].Address + }) + return rs, nil +} + +// Flatten all resource provider keys in a module and its descendents, such +// that any resources from providers using a configuration passed through the +// module call have a direct refernce to that provider configuration. +func normalizeModuleProviderKeys(m *module, pcs map[string]providerConfig) { + for i, r := range m.Resources { + if pc, exists := pcs[r.ProviderConfigKey]; exists { + if _, hasParent := pcs[pc.parentKey]; hasParent { + m.Resources[i].ProviderConfigKey = pc.parentKey + } + } + } + + for _, mc := range m.ModuleCalls { + normalizeModuleProviderKeys(&mc.Module, pcs) + } +} + +// opaqueProviderKey generates a unique absProviderConfig-like string from the module +// address and provider +func opaqueProviderKey(provider string, addr string) (key string) { + key = provider + if addr != "" { + key = fmt.Sprintf("%s:%s", addr, provider) + } + return key +} + +// Traverse up the module call tree until we find the provider +// configuration which has no linked parent config. This is then +// the source of the configuration used in this module call, so +// we link to it directly +func findSourceProviderKey(startKey string, fullName string, m map[string]providerConfig) string { + var parentKey string + + key := startKey + for key != "" { + parent, exists := m[key] + if !exists || parent.FullName != fullName { + break + } + + parentKey = key + key = parent.parentKey + } + + return parentKey +} diff --git a/pkg/command/jsonconfig/config_test.go b/pkg/command/jsonconfig/config_test.go new file mode 100644 index 00000000000..0ef548a45b6 --- /dev/null +++ b/pkg/command/jsonconfig/config_test.go @@ -0,0 +1,105 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package jsonconfig + +import ( + "testing" +) + +func TestFindSourceProviderConfig(t *testing.T) { + tests := []struct { + StartKey string + FullName string + ProviderMap map[string]providerConfig + Want string + }{ + { + StartKey: "null", + FullName: "hashicorp/null", + ProviderMap: map[string]providerConfig{}, + Want: "", + }, + { + StartKey: "null", + FullName: "hashicorp/null", + ProviderMap: map[string]providerConfig{ + "null": { + Name: "null", + FullName: "hashicorp/null", + ModuleAddress: "", + }, + }, + Want: "null", + }, + { + StartKey: "null2", + FullName: "hashicorp/null", + ProviderMap: map[string]providerConfig{ + "null": { + Name: "null", + FullName: "hashicorp/null", + ModuleAddress: "", + }, + }, + Want: "", + }, + { + StartKey: "null", + FullName: "hashicorp2/null", + ProviderMap: map[string]providerConfig{ + "null": { + Name: "null", + FullName: "hashicorp/null", + ModuleAddress: "", + }, + }, + Want: "", + }, + { + StartKey: "module.a:null", + FullName: "hashicorp/null", + ProviderMap: map[string]providerConfig{ + "null": { + Name: "null", + FullName: "hashicorp/null", + ModuleAddress: "", + }, + "module.a:null": { + Name: "module.a:null", + FullName: "hashicorp/null", + ModuleAddress: "module.a", + parentKey: "null", + }, + }, + Want: "null", + }, + { + StartKey: "module.a:null", + FullName: "hashicorp2/null", + ProviderMap: map[string]providerConfig{ + "null": { + Name: "null", + FullName: "hashicorp/null", + ModuleAddress: "", + }, + "module.a:null": { + Name: "module.a:null", + FullName: "hashicorp2/null", + ModuleAddress: "module.a", + parentKey: "null", + }, + }, + Want: "module.a:null", + }, + } + + for _, test := range tests { + got := findSourceProviderKey(test.StartKey, test.FullName, test.ProviderMap) + if got != test.Want { + t.Errorf("wrong result:\nGot: %#v\nWant: %#v\n", got, test.Want) + } + } +} diff --git a/pkg/command/jsonconfig/doc.go b/pkg/command/jsonconfig/doc.go new file mode 100644 index 00000000000..7482b642ef0 --- /dev/null +++ b/pkg/command/jsonconfig/doc.go @@ -0,0 +1,8 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package jsonconfig implements methods for outputting a configuration snapshot +// in machine-readable json format +package jsonconfig diff --git a/pkg/command/jsonconfig/expression.go b/pkg/command/jsonconfig/expression.go new file mode 100644 index 00000000000..53ae9258113 --- /dev/null +++ b/pkg/command/jsonconfig/expression.go @@ -0,0 +1,187 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package jsonconfig + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hcldec" + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/lang" + "github.com/kubegems/opentofu/pkg/lang/blocktoattr" +) + +// expression represents any unparsed expression +type expression struct { + // "constant_value" is set only if the expression contains no references to + // other objects, in which case it gives the resulting constant value. This + // is mapped as for the individual values in the common value + // representation. + ConstantValue json.RawMessage `json:"constant_value,omitempty"` + + // Alternatively, "references" will be set to a list of references in the + // expression. Multi-step references will be unwrapped and duplicated for + // each significant traversal step, allowing callers to more easily + // recognize the objects they care about without attempting to parse the + // expressions. Callers should only use string equality checks here, since + // the syntax may be extended in future releases. + References []string `json:"references,omitempty"` +} + +func marshalExpression(ex hcl.Expression) expression { + var ret expression + if ex == nil { + return ret + } + + val, _ := ex.Value(nil) + if val != cty.NilVal { + valJSON, _ := ctyjson.Marshal(val, val.Type()) + ret.ConstantValue = valJSON + } + + refs, _ := lang.ReferencesInExpr(addrs.ParseRef, ex) + if len(refs) > 0 { + var varString []string + for _, ref := range refs { + // We work backwards here, starting with the full reference + + // reamining traversal, and then unwrapping the remaining traversals + // into parts until we end up at the smallest referencable address. + remains := ref.Remaining + for len(remains) > 0 { + varString = append(varString, fmt.Sprintf("%s%s", ref.Subject, traversalStr(remains))) + remains = remains[:(len(remains) - 1)] + } + varString = append(varString, ref.Subject.String()) + + switch ref.Subject.(type) { + case addrs.ModuleCallInstance: + if ref.Subject.(addrs.ModuleCallInstance).Key != addrs.NoKey { + // Include the module call, without the key + varString = append(varString, ref.Subject.(addrs.ModuleCallInstance).Call.String()) + } + case addrs.ResourceInstance: + if ref.Subject.(addrs.ResourceInstance).Key != addrs.NoKey { + // Include the resource, without the key + varString = append(varString, ref.Subject.(addrs.ResourceInstance).Resource.String()) + } + case addrs.ModuleCallInstanceOutput: + // Include the module name, without the output name + varString = append(varString, ref.Subject.(addrs.ModuleCallInstanceOutput).Call.String()) + } + } + ret.References = varString + } + + return ret +} + +func (e *expression) Empty() bool { + return e.ConstantValue == nil && e.References == nil +} + +// expressions is used to represent the entire content of a block. Attribute +// arguments are mapped directly with the attribute name as key and an +// expression as value. +type expressions map[string]interface{} + +func marshalExpressions(body hcl.Body, schema *configschema.Block) expressions { + // Since we want the raw, un-evaluated expressions we need to use the + // low-level HCL API here, rather than the hcldec decoder API. That means we + // need the low-level schema. + lowSchema := hcldec.ImpliedSchema(schema.DecoderSpec()) + // (lowSchema is an hcl.BodySchema: + // https://godoc.org/github.com/hashicorp/hcl/v2/hcl#BodySchema ) + + // fix any ConfigModeAttr blocks present from legacy providers + body = blocktoattr.FixUpBlockAttrs(body, schema) + + // Use the low-level schema with the body to decode one level We'll just + // ignore any additional content that's not covered by the schema, which + // will effectively ignore "dynamic" blocks, and may also ignore other + // unknown stuff but anything else would get flagged by OpenTofu as an + // error anyway, and so we wouldn't end up in here. + content, _, _ := body.PartialContent(lowSchema) + if content == nil { + // Should never happen for a valid body, but we'll just generate empty + // if there were any problems. + return nil + } + + ret := make(expressions) + + // Any attributes we encode directly as expression objects. + for name, attr := range content.Attributes { + ret[name] = marshalExpression(attr.Expr) // note: singular expression for this one + } + + // Any nested blocks require a recursive call to produce nested expressions + // objects. + for _, block := range content.Blocks { + typeName := block.Type + blockS, exists := schema.BlockTypes[typeName] + if !exists { + // Should never happen since only block types in the schema would be + // put in blocks list + continue + } + + switch blockS.Nesting { + case configschema.NestingSingle, configschema.NestingGroup: + ret[typeName] = marshalExpressions(block.Body, &blockS.Block) + case configschema.NestingList, configschema.NestingSet: + if _, exists := ret[typeName]; !exists { + ret[typeName] = make([]map[string]interface{}, 0, 1) + } + ret[typeName] = append(ret[typeName].([]map[string]interface{}), marshalExpressions(block.Body, &blockS.Block)) + case configschema.NestingMap: + if _, exists := ret[typeName]; !exists { + ret[typeName] = make(map[string]map[string]interface{}) + } + // NestingMap blocks always have the key in the first (and only) label + key := block.Labels[0] + retMap := ret[typeName].(map[string]map[string]interface{}) + retMap[key] = marshalExpressions(block.Body, &blockS.Block) + } + } + + return ret +} + +// traversalStr produces a representation of an HCL traversal that is compact, +// resembles HCL native syntax, and is suitable for display in the UI. +// +// This was copied (and simplified) from internal/command/views/json/diagnostic.go. +func traversalStr(traversal hcl.Traversal) string { + var buf bytes.Buffer + for _, step := range traversal { + switch tStep := step.(type) { + case hcl.TraverseRoot: + buf.WriteString(tStep.Name) + case hcl.TraverseAttr: + buf.WriteByte('.') + buf.WriteString(tStep.Name) + case hcl.TraverseIndex: + buf.WriteByte('[') + switch tStep.Key.Type() { + case cty.String: + buf.WriteString(fmt.Sprintf("%q", tStep.Key.AsString())) + case cty.Number: + bf := tStep.Key.AsBigFloat() + buf.WriteString(bf.Text('g', 10)) + } + buf.WriteByte(']') + } + } + return buf.String() +} diff --git a/pkg/command/jsonconfig/expression_test.go b/pkg/command/jsonconfig/expression_test.go new file mode 100644 index 00000000000..a6babd218e5 --- /dev/null +++ b/pkg/command/jsonconfig/expression_test.go @@ -0,0 +1,152 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package jsonconfig + +import ( + "encoding/json" + "reflect" + "testing" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/hashicorp/hcl/v2/hcltest" + "github.com/kubegems/opentofu/pkg/configs/configschema" +) + +func TestMarshalExpressions(t *testing.T) { + tests := []struct { + Input hcl.Body + Want expressions + }{ + { + &hclsyntax.Body{ + Attributes: hclsyntax.Attributes{ + "foo": &hclsyntax.Attribute{ + Expr: &hclsyntax.LiteralValueExpr{ + Val: cty.StringVal("bar"), + }, + }, + }, + }, + expressions{ + "foo": expression{ + ConstantValue: json.RawMessage([]byte(`"bar"`)), + References: []string(nil), + }, + }, + }, + { + hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "foo": { + Name: "foo", + Expr: hcltest.MockExprTraversalSrc(`var.list[1]`), + }, + }, + }), + expressions{ + "foo": expression{ + References: []string{"var.list[1]", "var.list"}, + }, + }, + }, + { + hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "foo": { + Name: "foo", + Expr: hcltest.MockExprTraversalSrc(`data.template_file.foo[1].vars["baz"]`), + }, + }, + }), + expressions{ + "foo": expression{ + References: []string{"data.template_file.foo[1].vars[\"baz\"]", "data.template_file.foo[1].vars", "data.template_file.foo[1]", "data.template_file.foo"}, + }, + }, + }, + { + hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "foo": { + Name: "foo", + Expr: hcltest.MockExprTraversalSrc(`module.foo.bar`), + }, + }, + }), + expressions{ + "foo": expression{ + References: []string{"module.foo.bar", "module.foo"}, + }, + }, + }, + { + hcltest.MockBody(&hcl.BodyContent{ + Blocks: hcl.Blocks{ + { + Type: "block_to_attr", + Body: hcltest.MockBody(&hcl.BodyContent{ + + Attributes: hcl.Attributes{ + "foo": { + Name: "foo", + Expr: hcltest.MockExprTraversalSrc(`module.foo.bar`), + }, + }, + }), + }, + }, + }), + expressions{ + "block_to_attr": expression{ + References: []string{"module.foo.bar", "module.foo"}, + }, + }, + }, + } + + for _, test := range tests { + schema := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.String, + Optional: true, + }, + "block_to_attr": { + Type: cty.List(cty.Object(map[string]cty.Type{ + "foo": cty.String, + })), + }, + }, + } + + got := marshalExpressions(test.Input, schema) + if !reflect.DeepEqual(got, test.Want) { + t.Errorf("wrong result:\nGot: %#v\nWant: %#v\n", got, test.Want) + } + } +} + +func TestMarshalExpression(t *testing.T) { + tests := []struct { + Input hcl.Expression + Want expression + }{ + { + nil, + expression{}, + }, + } + + for _, test := range tests { + got := marshalExpression(test.Input) + if !reflect.DeepEqual(got, test.Want) { + t.Fatalf("wrong result:\nGot: %#v\nWant: %#v\n", got, test.Want) + } + } +} diff --git a/pkg/command/jsonformat/README.md b/pkg/command/jsonformat/README.md new file mode 100644 index 00000000000..6b04300178e --- /dev/null +++ b/pkg/command/jsonformat/README.md @@ -0,0 +1,244 @@ +# jsonformat + +This package contains functionality around formatting and displaying the JSON +structured output produced by adding the `-json` flag to various OpenTofu +commands. + +## OpenTofu Structured Plan Renderer + +As of January 2023, this package contains only a single structure: the +`Renderer`. + +The renderer accepts the JSON structured output produced by the +`tofu show -json` command and writes it in a human-readable +format. + +Implementation details and decisions for the `Renderer` are discussed in the +following sections. + +### Implementation + +There are two subpackages within the `jsonformat` renderer package. The `differ` +package compares the `before` and `after` values of the given plan and produces +`Diff` objects from the `computed` package. + +This approach is aimed at ensuring the process by which the plan difference is +calculated is separated from the rendering itself. In this way it should be +possible to modify the rendering or add new renderer formats without being +concerned with the complex diff calculations. + +#### The `differ` package + +The `differ` package operates on `Change` objects. These are produced from +`jsonplan.Change` objects (which are produced by the `tofu show` command). +Each `jsonplan.Change` object represents a single resource within the overall +OpenTofu configuration. + +The `differ` package will iterate through the `Change` objects and produce a +single `Diff` that represents a processed summary of the changes described by +the `Change`. You will see that the produced changes are nested so a change to a +list attribute will contain a slice of changes, this is discussed in the +"[The computed package](#the-computed-package)" section. + +##### The `Change` object + +The `Change` objects contain raw Golang representations of JSON objects (generic +`interface{}` fields). These are produced by parsing the `json.RawMessage` +objects within the provided changes. + +The fields the differ cares about from the provided changes are: + +- `Before`: The value before the proposed change. +- `After`: The value after the proposed change. +- `Unknown`: If the value is being computed during the change. +- `BeforeSensitive`: If the value was sensitive before the change. +- `AfterSensitive`: If the value is sensitive after the change. +- `ReplacePaths`: If the change is causing the overall resource to be replaced. + +In addition, the changes define two additional meta fields that they set and +manipulate internally: + +- `BeforeExplicit`: If the value in `Before` is explicit or an implied result due to a change elsewhere. +- `AfterExplicit`: If the value in `After` is explicit or an implied result due to a change elsewhere. + +The actual concrete type of each of the generic fields is determined by the +overall schema. The changes are also recursive, this means as we iterate through +the `Change` we create relevant child values based on the schema for the given +resource. + +For example, the initial change is always a `block` type which means the +`Before` and `After` values will actually be `map[string]interface{}` types +mapping each attribute and block to their relevant values. The +`Unknown`, `BeforeSensitive`, `AfterSensitive` values will all be either a +`map[string]interface{}` which maps each attribute or nested block to their +unknown and sensitive status, or it could simply be a `boolean` which generally +means the entire block and all children are sensitive or computed. + +In total, a `Change` can represent the following types: + +- `Attribute` + - `map`: Values will typically be `map[string]interface{}`. + - `list`: Values will typically be `[]interface{}`. + - `set`: Values will typically be `[]interface{}`. + - `object`: Values will typically be `map[string]interface{}`. + - `tuple`: Values will typically be `[]interface{}`. + - `bool`: Values will typically be a `bool`. + - `number`: Values will typically be a `float64`. + - `string`: Values will typically be a `string`. +- `Block`: Values will typically be `map[string]interface{}`, but they can be + split between nested blocks and attributes. +- `Output` + - Outputs are interesting as we don't have a schema for them, as such they + can be any JSON type. + - We also use the Output type to represent dynamic attributes, since in both + cases we work out the type based on the JSON representation instead of the + schema. + +The `ReplacePaths` field is unique in that it's value doesn't actually change +based on the schema - it's always a slice of index slices. An index in this +context will either be an integer pointing to a child of a set or a list or a +string pointing to the child of a map, object or block. As we iterate through +the value we manipulate the outer slice to remove child slices where the index +doesn't match and propagate paths that do match onto the children. + +*Quick note on explicit vs implicit:* In practice, it is only possible to get +implicit changes when you manipulate a collection. That is to say child values +of a modified collection will insert `nil` entries into the relevant before +or after fields of their child changes to represent their values being deleted +or created. It is also possible for users to explicitly put null values into +their collections, and this behaviour is different to deleting an item in the +collection. With the `BeforeExplicit` and `AfterExplicit` values we can tell the +difference between whether this value was removed from a collection or this +value was set to null in a collection. + +*Quick note on the go-cty Value and Type objects:* The `Before` and `After` +fields are actually go-cty values, but we cannot convert them directly because +of the cloud backend redacted endpoint. The redacted endpoint turns sensitive +values into strings regardless of their types. Because of this, we cannot just +do a direct conversion using the ctyjson package. We would have to iterate +through the schema first, find the sensitive values and their mapped types, +update the types inside the schema to strings, and then go back and do the +overall conversion. This isn't including any of the more complicated parts +around what happens if something was sensitive before and isn't sensitive after +or vice versa. This would mean the type would need to change between the before +and after value. It is in fact just easier to iterate through the values as +generic JSON interfaces, and obfuscate the sensitive values as we never need to +print them anyway. + +##### Iterating through changes + +The `differ` package will recursively create child `Change` objects for the +complex objects. + +There are two key subtypes of a `Change`: `SliceChange` and `MapChange`. +`SliceChange` values are used by list, set, and tuple attributes. `MapChange` +values are used by map and object attributes, and blocks. For what it is worth +outputs and dynamic types can end up using both, but they're kind of special as +the processing for dynamic types works out the type from the JSON struct and +then just passes it into the relevant real types for actual processing. + +The two subtypes implement `GetChild` functions that retrieve a child change +for a relevant index (`int` for slice, `string` for map). These functions build +an entirely populated `Change` object, and the package will then recursively +compute the change for the child (and all other children). When a complex change +has all the children changes, it then passes that into the relevant complex +diff type. + +#### The `computed` package + +A computed `Diff` should contain all the relevant information it needs to render +itself. + +The `Diff` itself contains the action (eg. `Create`, `Delete`, `Update`), and +whether this change is causing the overall resource to be replaced (read from +the `ReplacePaths` field discussed in the previous section). The actual content +of the diffs is passed directly into the internal renderer field. The internal +renderer is then an implementation that knows the actual content of the changes +and what they represent. + +For example to instantiate a diff resulting from updating a list of +primitives: + +```go + listDiff := computed.NewDiff(renderers.List([]computed.Diff{ + computed.NewDiff(renderers.Primitive(0.0, 0.0, cty.Number), plans.NoOp, false), + computed.NewDiff(renderers.Primitive(1.0, nil, cty.Number), plans.Delete, false), + computed.NewDiff(renderers.Primitive(nil, 4.0, cty.Number), plans.Create, false), + computed.NewDiff(renderers.Primitive(2.0, 2.0, cty.Number), plans.NoOp, false) + }, plans.Update, false)) +``` + +##### The `RenderHuman` function + +Currently, there is only one way to render a change, and it is implemented via +the `RenderHuman` function. In the future, there may be additional rendering +capabilities, but for now the `RenderHuman` function just passes the call +directly onto the internal renderer. + +Rendering the above diff with: `listDiff.RenderHuman(0, RenderOpts{})` would +produce: + +```text +[ + 0, + - 1 -> null, + + 4, + 2, +] +``` + +Note, the render function itself doesn't print out metadata about its own change +(eg. there's no `~` symbol in front of the opening bracket). The expectation is +that parent changes control how child changes are rendered, so are responsible +for deciding on their opening indentation, whether they have a key (as in maps, +objects, and blocks), or how the action symbol is displayed. + +In the above example, the primitive renderer would print out only `1 -> null` +while the surrounding list renderer is providing the indentation, the symbol and +the line ending commas. + +##### Implementing new diff types + +To implement a new diff type, you must implement the internal Renderer +functionality. To do this you create a new implementation of the +`computed.DiffRenderer`, make sure it accepts all the data you need, and +implement the `RenderHuman` function (and any other additional render functions +that may exist). + +Some changes publish warnings that should be displayed alongside them. +If your new change has no warnings you can use the `NoWarningsRenderer` to avoid +implementing the additional `Warnings` function. + +If/when new Renderer types are implemented, additional `Render` like functions +will be added. You should implement all of these with your new change type. + +##### Implementing new renderer types for changes + +As of January 2023, there is only a single type of renderer (the human-readable) +renderer. As such, the `Diff` structure provides a single `RenderHuman` +function. + +To implement a new renderer: + +1. Add a new render function onto the internal `DiffRenderer` interface. +2. Add a new render function onto the `Diff` struct that passes the call onto + the internal renderer. +3. Implement the new function on all the existing internal interfaces. + +Since each internal renderer contains all the information it needs to provide +change information about itself, your new Render function should pass in +anything it needs. + +### New types of Renderer + +In the future, we may wish to add in different kinds of renderer, such as a +compact renderer, or an interactive renderer. To do this, you'll need to modify +the Renderer struct or create a new type of Renderer. + +The logic around creating the `Diff` structures will be shared (ie. calling +into the differ package should be consistent across renderers). But when it +comes to rendering the changes, I'd expect the `Diff` structures to implement +additional functions that allow them to internally organise the data as required +and return a relevant object. For the existing human-readable renderer that is +simply a string, but for a future interactive renderer it might be a model from +an MVC pattern. diff --git a/pkg/command/jsonformat/collections/action.go b/pkg/command/jsonformat/collections/action.go new file mode 100644 index 00000000000..e71d91e9eed --- /dev/null +++ b/pkg/command/jsonformat/collections/action.go @@ -0,0 +1,21 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package collections + +import "github.com/kubegems/opentofu/pkg/plans" + +// CompareActions will compare current and next, and return plans.Update if they +// are different, and current if they are the same. +func CompareActions(current, next plans.Action) plans.Action { + if next == plans.NoOp { + return current + } + + if current != next { + return plans.Update + } + return current +} diff --git a/pkg/command/jsonformat/collections/map.go b/pkg/command/jsonformat/collections/map.go new file mode 100644 index 00000000000..3e238a54647 --- /dev/null +++ b/pkg/command/jsonformat/collections/map.go @@ -0,0 +1,31 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package collections + +import ( + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed" + "github.com/kubegems/opentofu/pkg/plans" +) + +type ProcessKey func(key string) computed.Diff + +func TransformMap[Input any](before, after map[string]Input, keys []string, process ProcessKey) (map[string]computed.Diff, plans.Action) { + current := plans.NoOp + if before != nil && after == nil { + current = plans.Delete + } + if before == nil && after != nil { + current = plans.Create + } + + elements := make(map[string]computed.Diff) + for _, key := range keys { + elements[key] = process(key) + current = CompareActions(current, elements[key].Action) + } + + return elements, current +} diff --git a/pkg/command/jsonformat/collections/slice.go b/pkg/command/jsonformat/collections/slice.go new file mode 100644 index 00000000000..3176e810ff7 --- /dev/null +++ b/pkg/command/jsonformat/collections/slice.go @@ -0,0 +1,77 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package collections + +import ( + "reflect" + + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed" + + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/plans/objchange" +) + +type TransformIndices func(before, after int) computed.Diff +type ProcessIndices func(before, after int) +type IsObjType[Input any] func(input Input) bool + +func TransformSlice[Input any](before, after []Input, process TransformIndices, isObjType IsObjType[Input]) ([]computed.Diff, plans.Action) { + current := plans.NoOp + if before != nil && after == nil { + current = plans.Delete + } + if before == nil && after != nil { + current = plans.Create + } + + var elements []computed.Diff + ProcessSlice(before, after, func(before, after int) { + element := process(before, after) + elements = append(elements, element) + current = CompareActions(current, element.Action) + }, isObjType) + return elements, current +} + +func ProcessSlice[Input any](before, after []Input, process ProcessIndices, isObjType IsObjType[Input]) { + lcs := objchange.LongestCommonSubsequence(before, after, func(before, after Input) bool { + return reflect.DeepEqual(before, after) + }) + + var beforeIx, afterIx, lcsIx int + for beforeIx < len(before) || afterIx < len(after) || lcsIx < len(lcs) { + // Step through all the before values until we hit the next item in the + // longest common subsequence. We are going to just say that all of + // these have been deleted. + for beforeIx < len(before) && (lcsIx >= len(lcs) || !reflect.DeepEqual(before[beforeIx], lcs[lcsIx])) { + isObjectDiff := isObjType(before[beforeIx]) && afterIx < len(after) && isObjType(after[afterIx]) && (lcsIx >= len(lcs) || !reflect.DeepEqual(after[afterIx], lcs[lcsIx])) + if isObjectDiff { + process(beforeIx, afterIx) + beforeIx++ + afterIx++ + continue + } + + process(beforeIx, len(after)) + beforeIx++ + } + + // Now, step through all the after values until hit the next item in the + // LCS. We are going to say that all of these have been created. + for afterIx < len(after) && (lcsIx >= len(lcs) || !reflect.DeepEqual(after[afterIx], lcs[lcsIx])) { + process(len(before), afterIx) + afterIx++ + } + + // Finally, add the item in common as unchanged. + if lcsIx < len(lcs) { + process(beforeIx, afterIx) + beforeIx++ + afterIx++ + lcsIx++ + } + } +} diff --git a/pkg/command/jsonformat/computed/diff.go b/pkg/command/jsonformat/computed/diff.go new file mode 100644 index 00000000000..9e11a03a600 --- /dev/null +++ b/pkg/command/jsonformat/computed/diff.go @@ -0,0 +1,130 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package computed + +import ( + "github.com/mitchellh/colorstring" + + "github.com/kubegems/opentofu/pkg/plans" +) + +// Diff captures the computed diff for a single block, element or attribute. +// +// It essentially merges common functionality across all types of changes, +// namely the replace logic and the action / change type. Any remaining +// behaviour can be offloaded to the renderer which will be unique for the +// various change types (eg. maps, objects, lists, blocks, primitives, etc.). +type Diff struct { + // Renderer captures the uncommon functionality across the different kinds + // of changes. Each type of change (lists, blocks, sets, etc.) will have a + // unique renderer. + Renderer DiffRenderer + + // Action is the action described by this change (such as create, delete, + // update, etc.). + Action plans.Action + + // Replace tells the Change that it should add the `# forces replacement` + // suffix. + // + // Every single change could potentially add this suffix, so we embed it in + // the change as common functionality instead of in the specific renderers. + Replace bool +} + +// NewDiff creates a new Diff object with the provided renderer, action and +// replace context. +func NewDiff(renderer DiffRenderer, action plans.Action, replace bool) Diff { + return Diff{ + Renderer: renderer, + Action: action, + Replace: replace, + } +} + +// RenderHuman prints the Change into a human-readable string referencing the +// specified RenderOpts. +// +// If the returned string is a single line, then indent should be ignored. +// +// If the return string is multiple lines, then indent should be used to offset +// the beginning of all lines but the first by the specified amount. +func (diff Diff) RenderHuman(indent int, opts RenderHumanOpts) string { + return diff.Renderer.RenderHuman(diff, indent, opts) +} + +// WarningsHuman returns a list of strings that should be rendered as warnings +// before a given change is rendered. +// +// As with the RenderHuman function, the indent should only be applied on +// multiline warnings and on the second and following lines. +func (diff Diff) WarningsHuman(indent int, opts RenderHumanOpts) []string { + return diff.Renderer.WarningsHuman(diff, indent, opts) +} + +type DiffRenderer interface { + RenderHuman(diff Diff, indent int, opts RenderHumanOpts) string + WarningsHuman(diff Diff, indent int, opts RenderHumanOpts) []string +} + +// RenderHumanOpts contains options that can control how the human render +// function of the DiffRenderer will function. +type RenderHumanOpts struct { + Colorize *colorstring.Colorize + + // OverrideNullSuffix tells the Renderer not to display the `-> null` suffix + // that is normally displayed when an element, attribute, or block is + // deleted. + OverrideNullSuffix bool + + // OverrideForcesReplacement tells the Renderer to display the + // `# forces replacement` suffix, even if a diff doesn't have the Replace + // field set. + // + // Some renderers (like the Set renderer) don't display the suffix + // themselves but force their child diffs to display it instead. + OverrideForcesReplacement bool + + // ShowUnchangedChildren instructs the Renderer to render all children of a + // given complex change, instead of hiding unchanged items and compressing + // them into a single line. + ShowUnchangedChildren bool + + // HideDiffActionSymbols tells the renderer not to show the '+'/'-' symbols + // and to skip the places where the symbols would result in an offset. + HideDiffActionSymbols bool + + // ShowSensitive is used to display the value of variables marked as sensitive. + ShowSensitive bool +} + +// NewRenderHumanOpts creates a new RenderHumanOpts struct with the required +// fields set. +func NewRenderHumanOpts(colorize *colorstring.Colorize, showSensitive bool) RenderHumanOpts { + return RenderHumanOpts{ + Colorize: colorize, + ShowSensitive: showSensitive, + } +} + +// Clone returns a new RenderOpts object, that matches the original but can be +// edited without changing the original. +func (opts RenderHumanOpts) Clone() RenderHumanOpts { + return RenderHumanOpts{ + Colorize: opts.Colorize, + + OverrideNullSuffix: opts.OverrideNullSuffix, + ShowUnchangedChildren: opts.ShowUnchangedChildren, + HideDiffActionSymbols: opts.HideDiffActionSymbols, + + // OverrideForcesReplacement is a special case in that it doesn't + // cascade. So each diff should decide independently whether it's direct + // children should override their internal Replace logic, instead of + // an ancestor making the switch and affecting the entire tree. + OverrideForcesReplacement: false, + ShowSensitive: opts.ShowSensitive, + } +} diff --git a/pkg/command/jsonformat/computed/doc.go b/pkg/command/jsonformat/computed/doc.go new file mode 100644 index 00000000000..aa94706bd8f --- /dev/null +++ b/pkg/command/jsonformat/computed/doc.go @@ -0,0 +1,12 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package computed contains types that represent the computed diffs for +// OpenTofu blocks, attributes, and outputs. +// +// Each Diff struct is made up of a renderer, an action, and a boolean +// describing the diff. The renderer internally holds child diffs or concrete +// values that allow it to know how to render the diff appropriately. +package computed diff --git a/pkg/command/jsonformat/computed/renderers/block.go b/pkg/command/jsonformat/computed/renderers/block.go new file mode 100644 index 00000000000..6a757802bb3 --- /dev/null +++ b/pkg/command/jsonformat/computed/renderers/block.go @@ -0,0 +1,186 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package renderers + +import ( + "bytes" + "fmt" + "sort" + + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed" + + "github.com/kubegems/opentofu/pkg/plans" +) + +var ( + _ computed.DiffRenderer = (*blockRenderer)(nil) + + importantAttributes = []string{ + "id", + "name", + "tags", + } +) + +func importantAttribute(attr string) bool { + for _, attribute := range importantAttributes { + if attribute == attr { + return true + } + } + return false +} + +func Block(attributes map[string]computed.Diff, blocks Blocks) computed.DiffRenderer { + return &blockRenderer{ + attributes: attributes, + blocks: blocks, + } +} + +type blockRenderer struct { + NoWarningsRenderer + + attributes map[string]computed.Diff + blocks Blocks +} + +func (renderer blockRenderer) RenderHuman(diff computed.Diff, indent int, opts computed.RenderHumanOpts) string { + if len(renderer.attributes) == 0 && len(renderer.blocks.GetAllKeys()) == 0 { + return fmt.Sprintf("{}%s", forcesReplacement(diff.Replace, opts)) + } + + unchangedAttributes := 0 + unchangedBlocks := 0 + + maximumAttributeKeyLen := 0 + var attributeKeys []string + escapedAttributeKeys := make(map[string]string) + for key := range renderer.attributes { + attributeKeys = append(attributeKeys, key) + escapedKey := EnsureValidAttributeName(key) + escapedAttributeKeys[key] = escapedKey + if maximumAttributeKeyLen < len(escapedKey) { + maximumAttributeKeyLen = len(escapedKey) + } + } + sort.Strings(attributeKeys) + + importantAttributeOpts := opts.Clone() + importantAttributeOpts.ShowUnchangedChildren = true + + attributeOpts := opts.Clone() + + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf("{%s\n", forcesReplacement(diff.Replace, opts))) + for _, key := range attributeKeys { + attribute := renderer.attributes[key] + if importantAttribute(key) { + + // Always display the important attributes. + for _, warning := range attribute.WarningsHuman(indent+1, importantAttributeOpts) { + buf.WriteString(fmt.Sprintf("%s%s\n", formatIndent(indent+1), warning)) + } + buf.WriteString(fmt.Sprintf("%s%s%-*s = %s\n", formatIndent(indent+1), writeDiffActionSymbol(attribute.Action, importantAttributeOpts), maximumAttributeKeyLen, key, attribute.RenderHuman(indent+1, importantAttributeOpts))) + continue + } + if attribute.Action == plans.NoOp && !opts.ShowUnchangedChildren { + unchangedAttributes++ + continue + } + + for _, warning := range attribute.WarningsHuman(indent+1, opts) { + buf.WriteString(fmt.Sprintf("%s%s\n", formatIndent(indent+1), warning)) + } + buf.WriteString(fmt.Sprintf("%s%s%-*s = %s\n", formatIndent(indent+1), writeDiffActionSymbol(attribute.Action, attributeOpts), maximumAttributeKeyLen, escapedAttributeKeys[key], attribute.RenderHuman(indent+1, attributeOpts))) + } + + if unchangedAttributes > 0 { + buf.WriteString(fmt.Sprintf("%s%s%s\n", formatIndent(indent+1), writeDiffActionSymbol(plans.NoOp, opts), unchanged("attribute", unchangedAttributes, opts))) + } + + blockKeys := renderer.blocks.GetAllKeys() + for _, key := range blockKeys { + + foundChangedBlock := false + renderBlock := func(diff computed.Diff, mapKey string, opts computed.RenderHumanOpts) { + + creatingSensitiveValue := diff.Action == plans.Create && renderer.blocks.AfterSensitiveBlocks[key] + deletingSensitiveValue := diff.Action == plans.Delete && renderer.blocks.BeforeSensitiveBlocks[key] + modifyingSensitiveValue := (diff.Action == plans.Update || diff.Action == plans.NoOp) && (renderer.blocks.AfterSensitiveBlocks[key] || renderer.blocks.BeforeSensitiveBlocks[key]) + + if creatingSensitiveValue || deletingSensitiveValue || modifyingSensitiveValue { + // Intercept the renderer here if the sensitive data was set + // across all the blocks instead of individually. + action := diff.Action + if diff.Action == plans.NoOp && renderer.blocks.BeforeSensitiveBlocks[key] != renderer.blocks.AfterSensitiveBlocks[key] { + action = plans.Update + } + + diff = computed.NewDiff(SensitiveBlock(diff, renderer.blocks.BeforeSensitiveBlocks[key], renderer.blocks.AfterSensitiveBlocks[key]), action, diff.Replace) + } + + if diff.Action == plans.NoOp && !opts.ShowUnchangedChildren { + unchangedBlocks++ + return + } + + if !foundChangedBlock && len(renderer.attributes) > 0 { + // We always want to put an extra new line between the + // attributes and blocks, and between groups of blocks. + buf.WriteString("\n") + foundChangedBlock = true + } + + // If the force replacement metadata was set for every entry in the + // block we need to override that here. Our child blocks will only + // know about the replace function if it was set on them + // specifically, and not if it was set for all the blocks. + blockOpts := opts.Clone() + blockOpts.OverrideForcesReplacement = renderer.blocks.ReplaceBlocks[key] + + for _, warning := range diff.WarningsHuman(indent+1, blockOpts) { + buf.WriteString(fmt.Sprintf("%s%s\n", formatIndent(indent+1), warning)) + } + buf.WriteString(fmt.Sprintf("%s%s%s%s %s\n", formatIndent(indent+1), writeDiffActionSymbol(diff.Action, blockOpts), EnsureValidAttributeName(key), mapKey, diff.RenderHuman(indent+1, blockOpts))) + + } + + switch { + case renderer.blocks.IsSingleBlock(key): + renderBlock(renderer.blocks.SingleBlocks[key], "", opts) + case renderer.blocks.IsMapBlock(key): + var keys []string + for key := range renderer.blocks.MapBlocks[key] { + keys = append(keys, key) + } + sort.Strings(keys) + + for _, innerKey := range keys { + renderBlock(renderer.blocks.MapBlocks[key][innerKey], fmt.Sprintf(" %q", innerKey), opts) + } + case renderer.blocks.IsSetBlock(key): + + setOpts := opts.Clone() + setOpts.OverrideForcesReplacement = diff.Replace + + for _, block := range renderer.blocks.SetBlocks[key] { + renderBlock(block, "", opts) + } + case renderer.blocks.IsListBlock(key): + for _, block := range renderer.blocks.ListBlocks[key] { + renderBlock(block, "", opts) + } + } + } + + if unchangedBlocks > 0 { + buf.WriteString(fmt.Sprintf("\n%s%s%s\n", formatIndent(indent+1), writeDiffActionSymbol(plans.NoOp, opts), unchanged("block", unchangedBlocks, opts))) + } + + buf.WriteString(fmt.Sprintf("%s%s}", formatIndent(indent), writeDiffActionSymbol(plans.NoOp, opts))) + return buf.String() +} diff --git a/pkg/command/jsonformat/computed/renderers/blocks.go b/pkg/command/jsonformat/computed/renderers/blocks.go new file mode 100644 index 00000000000..f28b860acde --- /dev/null +++ b/pkg/command/jsonformat/computed/renderers/blocks.go @@ -0,0 +1,98 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package renderers + +import ( + "sort" + + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed" +) + +// Blocks is a helper struct for collating the different kinds of blocks in a +// simple way for rendering. +type Blocks struct { + SingleBlocks map[string]computed.Diff + ListBlocks map[string][]computed.Diff + SetBlocks map[string][]computed.Diff + MapBlocks map[string]map[string]computed.Diff + + // ReplaceBlocks and Before/AfterSensitiveBlocks carry forward the + // information about an entire group of blocks (eg. if all the blocks for a + // given list block are sensitive that isn't captured in the individual + // blocks as they are processed independently). These maps allow the + // renderer to check the metadata on the overall groups and respond + // accordingly. + + ReplaceBlocks map[string]bool + BeforeSensitiveBlocks map[string]bool + AfterSensitiveBlocks map[string]bool +} + +func (blocks *Blocks) GetAllKeys() []string { + var keys []string + for key := range blocks.SingleBlocks { + keys = append(keys, key) + } + for key := range blocks.ListBlocks { + keys = append(keys, key) + } + for key := range blocks.SetBlocks { + keys = append(keys, key) + } + for key := range blocks.MapBlocks { + keys = append(keys, key) + } + sort.Strings(keys) + return keys +} + +func (blocks *Blocks) IsSingleBlock(key string) bool { + _, ok := blocks.SingleBlocks[key] + return ok +} + +func (blocks *Blocks) IsListBlock(key string) bool { + _, ok := blocks.ListBlocks[key] + return ok +} + +func (blocks *Blocks) IsMapBlock(key string) bool { + _, ok := blocks.MapBlocks[key] + return ok +} + +func (blocks *Blocks) IsSetBlock(key string) bool { + _, ok := blocks.SetBlocks[key] + return ok +} + +func (blocks *Blocks) AddSingleBlock(key string, diff computed.Diff, replace, beforeSensitive, afterSensitive bool) { + blocks.SingleBlocks[key] = diff + blocks.ReplaceBlocks[key] = replace + blocks.BeforeSensitiveBlocks[key] = beforeSensitive + blocks.AfterSensitiveBlocks[key] = afterSensitive +} + +func (blocks *Blocks) AddAllListBlock(key string, diffs []computed.Diff, replace, beforeSensitive, afterSensitive bool) { + blocks.ListBlocks[key] = diffs + blocks.ReplaceBlocks[key] = replace + blocks.BeforeSensitiveBlocks[key] = beforeSensitive + blocks.AfterSensitiveBlocks[key] = afterSensitive +} + +func (blocks *Blocks) AddAllSetBlock(key string, diffs []computed.Diff, replace, beforeSensitive, afterSensitive bool) { + blocks.SetBlocks[key] = diffs + blocks.ReplaceBlocks[key] = replace + blocks.BeforeSensitiveBlocks[key] = beforeSensitive + blocks.AfterSensitiveBlocks[key] = afterSensitive +} + +func (blocks *Blocks) AddAllMapBlocks(key string, diffs map[string]computed.Diff, replace, beforeSensitive, afterSensitive bool) { + blocks.MapBlocks[key] = diffs + blocks.ReplaceBlocks[key] = replace + blocks.BeforeSensitiveBlocks[key] = beforeSensitive + blocks.AfterSensitiveBlocks[key] = afterSensitive +} diff --git a/pkg/command/jsonformat/computed/renderers/json.go b/pkg/command/jsonformat/computed/renderers/json.go new file mode 100644 index 00000000000..15ede980748 --- /dev/null +++ b/pkg/command/jsonformat/computed/renderers/json.go @@ -0,0 +1,43 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package renderers + +import ( + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed" + "github.com/kubegems/opentofu/pkg/command/jsonformat/jsondiff" + "github.com/kubegems/opentofu/pkg/plans" +) + +// RendererJsonOpts creates a jsondiff.JsonOpts object that returns the correct +// embedded renderers for each JSON type. +// +// We need to define this in our renderers package in order to avoid cycles, and +// to help with reuse between the output processing in the differs package, and +// our JSON string rendering here. +func RendererJsonOpts() jsondiff.JsonOpts { + return jsondiff.JsonOpts{ + Primitive: func(before, after interface{}, ctype cty.Type, action plans.Action) computed.Diff { + return computed.NewDiff(Primitive(before, after, ctype), action, false) + }, + Object: func(elements map[string]computed.Diff, action plans.Action) computed.Diff { + return computed.NewDiff(Object(elements), action, false) + }, + Array: func(elements []computed.Diff, action plans.Action) computed.Diff { + return computed.NewDiff(List(elements), action, false) + }, + Unknown: func(diff computed.Diff, action plans.Action) computed.Diff { + return computed.NewDiff(Unknown(diff), action, false) + }, + Sensitive: func(diff computed.Diff, beforeSensitive bool, afterSensitive bool, action plans.Action) computed.Diff { + return computed.NewDiff(Sensitive(diff, beforeSensitive, afterSensitive), action, false) + }, + TypeChange: func(before, after computed.Diff, action plans.Action) computed.Diff { + return computed.NewDiff(TypeChange(before, after), action, false) + }, + } +} diff --git a/pkg/command/jsonformat/computed/renderers/list.go b/pkg/command/jsonformat/computed/renderers/list.go new file mode 100644 index 00000000000..13cab843903 --- /dev/null +++ b/pkg/command/jsonformat/computed/renderers/list.go @@ -0,0 +1,129 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package renderers + +import ( + "bytes" + "fmt" + + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed" + "github.com/kubegems/opentofu/pkg/plans" +) + +var _ computed.DiffRenderer = (*listRenderer)(nil) + +func List(elements []computed.Diff) computed.DiffRenderer { + return &listRenderer{ + displayContext: true, + elements: elements, + } +} + +func NestedList(elements []computed.Diff) computed.DiffRenderer { + return &listRenderer{ + elements: elements, + } +} + +type listRenderer struct { + NoWarningsRenderer + + displayContext bool + elements []computed.Diff +} + +func (renderer listRenderer) RenderHuman(diff computed.Diff, indent int, opts computed.RenderHumanOpts) string { + if len(renderer.elements) == 0 { + return fmt.Sprintf("[]%s%s", nullSuffix(diff.Action, opts), forcesReplacement(diff.Replace, opts)) + } + + elementOpts := opts.Clone() + elementOpts.OverrideNullSuffix = true + + unchangedElementOpts := opts.Clone() + unchangedElementOpts.ShowUnchangedChildren = true + + var unchangedElements []computed.Diff + + // renderNext tells the renderer to print out the next element in the list + // whatever state it is in. So, even if a change is a NoOp we will still + // print it out if the last change we processed wants us to. + renderNext := false + + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf("[%s\n", forcesReplacement(diff.Replace, opts))) + for _, element := range renderer.elements { + if element.Action == plans.NoOp && !renderNext && !opts.ShowUnchangedChildren { + unchangedElements = append(unchangedElements, element) + continue + } + renderNext = false + + opts := elementOpts + + // If we want to display the context around this change, we want to + // render the change immediately before this change in the list, and the + // change immediately after in the list, even if both these changes are + // NoOps. This will give the user reading the diff some context as to + // where in the list these changes are being made, as order matters. + if renderer.displayContext { + // If our list of unchanged elements contains more than one entry + // we'll print out a count of the number of unchanged elements that + // we skipped. Note, this is the length of the unchanged elements + // minus 1 as the most recent unchanged element will be printed out + // in full. + if len(unchangedElements) > 1 { + buf.WriteString(fmt.Sprintf("%s%s%s\n", formatIndent(indent+1), writeDiffActionSymbol(plans.NoOp, opts), unchanged("element", len(unchangedElements)-1, opts))) + } + // If our list of unchanged elements contains at least one entry, + // we're going to print out the most recent change in full. That's + // what happens here. + if len(unchangedElements) > 0 { + lastElement := unchangedElements[len(unchangedElements)-1] + buf.WriteString(fmt.Sprintf("%s%s%s,\n", formatIndent(indent+1), writeDiffActionSymbol(lastElement.Action, unchangedElementOpts), lastElement.RenderHuman(indent+1, unchangedElementOpts))) + } + // We now reset the unchanged elements list, we've printed out a + // count of all the elements we skipped so we start counting from + // scratch again. This means that if we process a run of changed + // elements, they won't all start printing out summaries of every + // change that happened previously. + unchangedElements = nil + + if element.Action == plans.NoOp { + // If this is a NoOp action then we're going to render it below + // so we need to just override the opts we're going to use to + // make sure we use the unchanged opts. + opts = unchangedElementOpts + } else { + // As we also want to render the element immediately after any + // changes, we make a note here to say we should render the next + // change whatever it is. But, we only want to render the next + // change if the current change isn't a NoOp. If the current change + // is a NoOp then it was told to print by the last change and we + // don't want to cascade and print all changes from now on. + renderNext = true + } + } + + for _, warning := range element.WarningsHuman(indent+1, opts) { + buf.WriteString(fmt.Sprintf("%s%s\n", formatIndent(indent+1), warning)) + } + buf.WriteString(fmt.Sprintf("%s%s%s,\n", formatIndent(indent+1), writeDiffActionSymbol(element.Action, opts), element.RenderHuman(indent+1, opts))) + } + + // If we were not displaying any context alongside our changes then the + // unchangedElements list will contain every unchanged element, and we'll + // print that out as we do with every other collection. + // + // If we were displaying context, then this will contain any unchanged + // elements since our last change, so we should also print it out. + if len(unchangedElements) > 0 { + buf.WriteString(fmt.Sprintf("%s%s%s\n", formatIndent(indent+1), writeDiffActionSymbol(plans.NoOp, opts), unchanged("element", len(unchangedElements), opts))) + } + + buf.WriteString(fmt.Sprintf("%s%s]%s", formatIndent(indent), writeDiffActionSymbol(plans.NoOp, opts), nullSuffix(diff.Action, opts))) + return buf.String() +} diff --git a/pkg/command/jsonformat/computed/renderers/map.go b/pkg/command/jsonformat/computed/renderers/map.go new file mode 100644 index 00000000000..1e1047caba4 --- /dev/null +++ b/pkg/command/jsonformat/computed/renderers/map.go @@ -0,0 +1,112 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package renderers + +import ( + "bytes" + "fmt" + "sort" + + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed" + + "github.com/kubegems/opentofu/pkg/plans" +) + +var _ computed.DiffRenderer = (*mapRenderer)(nil) + +func Map(elements map[string]computed.Diff) computed.DiffRenderer { + return &mapRenderer{ + elements: elements, + alignKeys: true, + } +} + +func NestedMap(elements map[string]computed.Diff) computed.DiffRenderer { + return &mapRenderer{ + elements: elements, + overrideNullSuffix: true, + overrideForcesReplacement: true, + } +} + +type mapRenderer struct { + NoWarningsRenderer + + elements map[string]computed.Diff + + overrideNullSuffix bool + overrideForcesReplacement bool + alignKeys bool +} + +func (renderer mapRenderer) RenderHuman(diff computed.Diff, indent int, opts computed.RenderHumanOpts) string { + forcesReplacementSelf := diff.Replace && !renderer.overrideForcesReplacement + forcesReplacementChildren := diff.Replace && renderer.overrideForcesReplacement + + if len(renderer.elements) == 0 { + return fmt.Sprintf("{}%s%s", nullSuffix(diff.Action, opts), forcesReplacement(forcesReplacementSelf, opts)) + } + + // Sort the map elements by key, so we have a deterministic ordering in + // the output. + var keys []string + + // We need to make sure the keys are capable of rendering properly. + escapedKeys := make(map[string]string) + + maximumKeyLen := 0 + for key := range renderer.elements { + keys = append(keys, key) + + escapedKey := hclEscapeString(key) + escapedKeys[key] = escapedKey + if maximumKeyLen < len(escapedKey) { + maximumKeyLen = len(escapedKey) + } + } + sort.Strings(keys) + + unchangedElements := 0 + + elementOpts := opts.Clone() + elementOpts.OverrideNullSuffix = diff.Action == plans.Delete || renderer.overrideNullSuffix + elementOpts.OverrideForcesReplacement = forcesReplacementChildren + + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf("{%s\n", forcesReplacement(forcesReplacementSelf, opts))) + for _, key := range keys { + element := renderer.elements[key] + + if element.Action == plans.NoOp && !opts.ShowUnchangedChildren { + // Don't render NoOp operations when we are compact display. + unchangedElements++ + continue + } + + for _, warning := range element.WarningsHuman(indent+1, opts) { + buf.WriteString(fmt.Sprintf("%s%s\n", formatIndent(indent+1), warning)) + } + // Only show commas between elements for objects. + comma := "" + if _, ok := element.Renderer.(*objectRenderer); ok { + comma = "," + } + + if renderer.alignKeys { + buf.WriteString(fmt.Sprintf("%s%s%-*s = %s%s\n", formatIndent(indent+1), writeDiffActionSymbol(element.Action, elementOpts), maximumKeyLen, escapedKeys[key], element.RenderHuman(indent+1, elementOpts), comma)) + } else { + buf.WriteString(fmt.Sprintf("%s%s%s = %s%s\n", formatIndent(indent+1), writeDiffActionSymbol(element.Action, elementOpts), escapedKeys[key], element.RenderHuman(indent+1, elementOpts), comma)) + } + + } + + if unchangedElements > 0 { + buf.WriteString(fmt.Sprintf("%s%s%s\n", formatIndent(indent+1), writeDiffActionSymbol(plans.NoOp, opts), unchanged("element", unchangedElements, opts))) + } + + buf.WriteString(fmt.Sprintf("%s%s}%s", formatIndent(indent), writeDiffActionSymbol(plans.NoOp, opts), nullSuffix(diff.Action, opts))) + return buf.String() +} diff --git a/pkg/command/jsonformat/computed/renderers/object.go b/pkg/command/jsonformat/computed/renderers/object.go new file mode 100644 index 00000000000..5a54d798018 --- /dev/null +++ b/pkg/command/jsonformat/computed/renderers/object.go @@ -0,0 +1,100 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package renderers + +import ( + "bytes" + "fmt" + "sort" + + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed" + "github.com/kubegems/opentofu/pkg/plans" +) + +var _ computed.DiffRenderer = (*objectRenderer)(nil) + +func Object(attributes map[string]computed.Diff) computed.DiffRenderer { + return &objectRenderer{ + attributes: attributes, + overrideNullSuffix: true, + } +} + +func NestedObject(attributes map[string]computed.Diff) computed.DiffRenderer { + return &objectRenderer{ + attributes: attributes, + overrideNullSuffix: false, + } +} + +type objectRenderer struct { + NoWarningsRenderer + + attributes map[string]computed.Diff + overrideNullSuffix bool +} + +func (renderer objectRenderer) RenderHuman(diff computed.Diff, indent int, opts computed.RenderHumanOpts) string { + if len(renderer.attributes) == 0 { + return fmt.Sprintf("{}%s%s", nullSuffix(diff.Action, opts), forcesReplacement(diff.Replace, opts)) + } + + attributeOpts := opts.Clone() + attributeOpts.OverrideNullSuffix = renderer.overrideNullSuffix + + // We need to keep track of our keys in two ways. The first is the order in + // which we will display them. The second is a mapping to their safely + // escaped equivalent. + + maximumKeyLen := 0 + var keys []string + escapedKeys := make(map[string]string) + for key := range renderer.attributes { + keys = append(keys, key) + escapedKey := EnsureValidAttributeName(key) + escapedKeys[key] = escapedKey + if maximumKeyLen < len(escapedKey) { + maximumKeyLen = len(escapedKey) + } + } + sort.Strings(keys) + + unchangedAttributes := 0 + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf("{%s\n", forcesReplacement(diff.Replace, opts))) + for _, key := range keys { + attribute := renderer.attributes[key] + + if importantAttribute(key) { + importantAttributeOpts := attributeOpts.Clone() + importantAttributeOpts.ShowUnchangedChildren = true + + for _, warning := range attribute.WarningsHuman(indent+1, importantAttributeOpts) { + buf.WriteString(fmt.Sprintf("%s%s\n", formatIndent(indent+1), warning)) + } + buf.WriteString(fmt.Sprintf("%s%s%-*s = %s\n", formatIndent(indent+1), writeDiffActionSymbol(attribute.Action, importantAttributeOpts), maximumKeyLen, escapedKeys[key], attribute.RenderHuman(indent+1, importantAttributeOpts))) + continue + } + + if attribute.Action == plans.NoOp && !opts.ShowUnchangedChildren { + // Don't render NoOp operations when we are compact display. + unchangedAttributes++ + continue + } + + for _, warning := range attribute.WarningsHuman(indent+1, opts) { + buf.WriteString(fmt.Sprintf("%s%s\n", formatIndent(indent+1), warning)) + } + buf.WriteString(fmt.Sprintf("%s%s%-*s = %s\n", formatIndent(indent+1), writeDiffActionSymbol(attribute.Action, attributeOpts), maximumKeyLen, escapedKeys[key], attribute.RenderHuman(indent+1, attributeOpts))) + } + + if unchangedAttributes > 0 { + buf.WriteString(fmt.Sprintf("%s%s%s\n", formatIndent(indent+1), writeDiffActionSymbol(plans.NoOp, opts), unchanged("attribute", unchangedAttributes, opts))) + } + + buf.WriteString(fmt.Sprintf("%s%s}%s", formatIndent(indent), writeDiffActionSymbol(plans.NoOp, opts), nullSuffix(diff.Action, opts))) + return buf.String() +} diff --git a/pkg/command/jsonformat/computed/renderers/primitive.go b/pkg/command/jsonformat/computed/renderers/primitive.go new file mode 100644 index 00000000000..3e201bcd5a0 --- /dev/null +++ b/pkg/command/jsonformat/computed/renderers/primitive.go @@ -0,0 +1,244 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package renderers + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/command/jsonformat/collections" + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed" + "github.com/kubegems/opentofu/pkg/command/jsonformat/structured" + "github.com/kubegems/opentofu/pkg/command/jsonformat/structured/attribute_path" + "github.com/kubegems/opentofu/pkg/plans" +) + +var _ computed.DiffRenderer = (*primitiveRenderer)(nil) + +func Primitive(before, after interface{}, ctype cty.Type) computed.DiffRenderer { + return &primitiveRenderer{ + before: before, + after: after, + ctype: ctype, + } +} + +type primitiveRenderer struct { + NoWarningsRenderer + + before interface{} + after interface{} + ctype cty.Type +} + +func (renderer primitiveRenderer) RenderHuman(diff computed.Diff, indent int, opts computed.RenderHumanOpts) string { + if renderer.ctype == cty.String { + return renderer.renderStringDiff(diff, indent, opts) + } + + beforeValue := renderPrimitiveValue(renderer.before, renderer.ctype, opts) + afterValue := renderPrimitiveValue(renderer.after, renderer.ctype, opts) + + switch diff.Action { + case plans.Create: + return fmt.Sprintf("%s%s", afterValue, forcesReplacement(diff.Replace, opts)) + case plans.Delete: + return fmt.Sprintf("%s%s%s", beforeValue, nullSuffix(diff.Action, opts), forcesReplacement(diff.Replace, opts)) + case plans.NoOp: + return fmt.Sprintf("%s%s", beforeValue, forcesReplacement(diff.Replace, opts)) + default: + return fmt.Sprintf("%s %s %s%s", beforeValue, opts.Colorize.Color("[yellow]->[reset]"), afterValue, forcesReplacement(diff.Replace, opts)) + } +} + +func renderPrimitiveValue(value interface{}, t cty.Type, opts computed.RenderHumanOpts) string { + if value == nil { + return opts.Colorize.Color("[dark_gray]null[reset]") + } + + switch { + case t == cty.Bool: + if value.(bool) { + return "true" + } + return "false" + case t == cty.Number: + num := value.(json.Number) + return num.String() + default: + panic("unrecognized primitive type: " + t.FriendlyName()) + } +} + +func (renderer primitiveRenderer) renderStringDiff(diff computed.Diff, indent int, opts computed.RenderHumanOpts) string { + + // We process multiline strings at the end of the switch statement. + var lines []string + + switch diff.Action { + case plans.Create, plans.NoOp: + str := evaluatePrimitiveString(renderer.after, opts) + + if str.Json != nil { + if diff.Action == plans.NoOp { + return renderer.renderStringDiffAsJson(diff, indent, opts, str, str) + } else { + return renderer.renderStringDiffAsJson(diff, indent, opts, evaluatedString{}, str) + } + } + + if !str.IsMultiline { + return fmt.Sprintf("%s%s", str.RenderSimple(), forcesReplacement(diff.Replace, opts)) + } + + // We are creating a single multiline string, so let's split by the new + // line character. While we are doing this, we are going to insert our + // indents and make sure each line is formatted correctly. + lines = strings.Split(strings.ReplaceAll(str.String, "\n", fmt.Sprintf("\n%s%s", formatIndent(indent+1), writeDiffActionSymbol(plans.NoOp, opts))), "\n") + + // We now just need to do the same for the first entry in lines, because + // we split on the new line characters which won't have been at the + // beginning of the first line. + lines[0] = fmt.Sprintf("%s%s%s", formatIndent(indent+1), writeDiffActionSymbol(plans.NoOp, opts), lines[0]) + case plans.Delete: + str := evaluatePrimitiveString(renderer.before, opts) + if str.IsNull { + // We don't put the null suffix (-> null) here because the final + // render or null -> null would look silly. + return fmt.Sprintf("%s%s", str.RenderSimple(), forcesReplacement(diff.Replace, opts)) + } + + if str.Json != nil { + return renderer.renderStringDiffAsJson(diff, indent, opts, str, evaluatedString{}) + } + + if !str.IsMultiline { + return fmt.Sprintf("%s%s%s", str.RenderSimple(), nullSuffix(diff.Action, opts), forcesReplacement(diff.Replace, opts)) + } + + // We are creating a single multiline string, so let's split by the new + // line character. While we are doing this, we are going to insert our + // indents and make sure each line is formatted correctly. + lines = strings.Split(strings.ReplaceAll(str.String, "\n", fmt.Sprintf("\n%s%s", formatIndent(indent+1), writeDiffActionSymbol(plans.NoOp, opts))), "\n") + + // We now just need to do the same for the first entry in lines, because + // we split on the new line characters which won't have been at the + // beginning of the first line. + lines[0] = fmt.Sprintf("%s%s%s", formatIndent(indent+1), writeDiffActionSymbol(plans.NoOp, opts), lines[0]) + default: + beforeString := evaluatePrimitiveString(renderer.before, opts) + afterString := evaluatePrimitiveString(renderer.after, opts) + + if beforeString.Json != nil && afterString.Json != nil { + return renderer.renderStringDiffAsJson(diff, indent, opts, beforeString, afterString) + } + + if beforeString.Json != nil || afterString.Json != nil { + // This means one of the strings is JSON and one isn't. We're going + // to be a little inefficient here, but we can just reuse another + // renderer for this so let's keep it simple. + return computed.NewDiff( + TypeChange( + computed.NewDiff(Primitive(renderer.before, nil, cty.String), plans.Delete, false), + computed.NewDiff(Primitive(nil, renderer.after, cty.String), plans.Create, false)), + diff.Action, + diff.Replace).RenderHuman(indent, opts) + } + + if !beforeString.IsMultiline && !afterString.IsMultiline { + return fmt.Sprintf("%s %s %s%s", beforeString.RenderSimple(), opts.Colorize.Color("[yellow]->[reset]"), afterString.RenderSimple(), forcesReplacement(diff.Replace, opts)) + } + + beforeLines := strings.Split(beforeString.String, "\n") + afterLines := strings.Split(afterString.String, "\n") + + processIndices := func(beforeIx, afterIx int) { + if beforeIx < 0 || beforeIx >= len(beforeLines) { + lines = append(lines, fmt.Sprintf("%s%s%s", formatIndent(indent+1), writeDiffActionSymbol(plans.Create, opts), afterLines[afterIx])) + return + } + + if afterIx < 0 || afterIx >= len(afterLines) { + lines = append(lines, fmt.Sprintf("%s%s%s", formatIndent(indent+1), writeDiffActionSymbol(plans.Delete, opts), beforeLines[beforeIx])) + return + } + + lines = append(lines, fmt.Sprintf("%s%s%s", formatIndent(indent+1), writeDiffActionSymbol(plans.NoOp, opts), beforeLines[beforeIx])) + } + isObjType := func(_ string) bool { + return false + } + + collections.ProcessSlice(beforeLines, afterLines, processIndices, isObjType) + } + + // We return early if we find non-multiline strings or JSON strings, so we + // know here that we just render the lines slice properly. + return fmt.Sprintf("<<-EOT%s\n%s\n%s%sEOT%s", + forcesReplacement(diff.Replace, opts), + strings.Join(lines, "\n"), + formatIndent(indent), + writeDiffActionSymbol(plans.NoOp, opts), + nullSuffix(diff.Action, opts)) +} + +func (renderer primitiveRenderer) renderStringDiffAsJson(diff computed.Diff, indent int, opts computed.RenderHumanOpts, before evaluatedString, after evaluatedString) string { + jsonDiff := RendererJsonOpts().Transform(structured.Change{ + BeforeExplicit: diff.Action != plans.Create, + AfterExplicit: diff.Action != plans.Delete, + Before: before.Json, + After: after.Json, + Unknown: false, + BeforeSensitive: false, + AfterSensitive: false, + ReplacePaths: attribute_path.Empty(false), + RelevantAttributes: attribute_path.AlwaysMatcher(), + }) + + action := diff.Action + + jsonOpts := opts.Clone() + jsonOpts.OverrideNullSuffix = true + + var whitespace, replace string + if jsonDiff.Action == plans.NoOp && diff.Action == plans.Update { + // Then this means we are rendering a whitespace only change. The JSON + // differ will have ignored the whitespace changes so that makes the + // diff we are about to print out very confusing without extra + // explanation. + if diff.Replace { + whitespace = " # whitespace changes force replacement" + } else { + whitespace = " # whitespace changes" + } + + // Because we'd be showing no changes otherwise: + jsonOpts.ShowUnchangedChildren = true + + // Whitespace changes should not appear as if edited. + action = plans.NoOp + } else { + // We only show the replace suffix if we didn't print something out + // about whitespace changes. + replace = forcesReplacement(diff.Replace, opts) + } + + renderedJsonDiff := jsonDiff.RenderHuman(indent+1, jsonOpts) + + if diff.Action == plans.Create || diff.Action == plans.Delete { + // We don't display the '+' or '-' symbols on the JSON diffs, we should + // still display the '~' for an update action though. + action = plans.NoOp + } + + if strings.Contains(renderedJsonDiff, "\n") { + return fmt.Sprintf("jsonencode(%s\n%s%s%s%s\n%s%s)%s", whitespace, formatIndent(indent+1), writeDiffActionSymbol(action, opts), renderedJsonDiff, replace, formatIndent(indent), writeDiffActionSymbol(plans.NoOp, opts), nullSuffix(diff.Action, opts)) + } + return fmt.Sprintf("jsonencode(%s)%s%s", renderedJsonDiff, whitespace, replace) +} diff --git a/pkg/command/jsonformat/computed/renderers/renderer_test.go b/pkg/command/jsonformat/computed/renderers/renderer_test.go new file mode 100644 index 00000000000..38fc151f833 --- /dev/null +++ b/pkg/command/jsonformat/computed/renderers/renderer_test.go @@ -0,0 +1,2230 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package renderers + +import ( + "encoding/json" + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed" + + "github.com/google/go-cmp/cmp" + "github.com/mitchellh/colorstring" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/plans" +) + +func TestRenderers_Human(t *testing.T) { + colorize := colorstring.Colorize{ + Colors: colorstring.DefaultColors, + Disable: true, + } + + tcs := map[string]struct { + diff computed.Diff + expected string + opts computed.RenderHumanOpts + }{ + // We're using the string "null" in these tests to demonstrate the + // difference between rendering an actual string and rendering a null + // value. + "primitive_create_string": { + diff: computed.Diff{ + Renderer: Primitive(nil, "null", cty.String), + Action: plans.Create, + }, + expected: "\"null\"", + }, + "primitive_delete_string": { + diff: computed.Diff{ + Renderer: Primitive("null", nil, cty.String), + Action: plans.Delete, + }, + expected: "\"null\" -> null", + }, + "primitive_update_string_to_null": { + diff: computed.Diff{ + Renderer: Primitive("null", nil, cty.String), + Action: plans.Update, + }, + expected: "\"null\" -> null", + }, + "primitive_update_string_from_null": { + diff: computed.Diff{ + Renderer: Primitive(nil, "null", cty.String), + Action: plans.Update, + }, + expected: "null -> \"null\"", + }, + "primitive_update_multiline_string_to_null": { + diff: computed.Diff{ + Renderer: Primitive("nu\nll", nil, cty.String), + Action: plans.Update, + }, + expected: ` +<<-EOT + - nu + - ll + + null + EOT +`, + }, + "primitive_update_multiline_string_from_null": { + diff: computed.Diff{ + Renderer: Primitive(nil, "nu\nll", cty.String), + Action: plans.Update, + }, + expected: ` +<<-EOT + - null + + nu + + ll + EOT +`, + }, + "primitive_update_json_string_to_null": { + diff: computed.Diff{ + Renderer: Primitive("[null]", nil, cty.String), + Action: plans.Update, + }, + expected: ` +jsonencode( + [ + - null, + ] + ) -> null +`, + }, + "primitive_update_json_string_from_null": { + diff: computed.Diff{ + Renderer: Primitive(nil, "[null]", cty.String), + Action: plans.Update, + }, + expected: ` +null -> jsonencode( + [ + + null, + ] + ) +`, + }, + "primitive_create_null_string": { + diff: computed.Diff{ + Renderer: Primitive(nil, nil, cty.String), + Action: plans.Create, + }, + expected: "null", + }, + "primitive_delete_null_string": { + diff: computed.Diff{ + Renderer: Primitive(nil, nil, cty.String), + Action: plans.Delete, + }, + expected: "null", + }, + "primitive_create": { + diff: computed.Diff{ + Renderer: Primitive(nil, json.Number("1"), cty.Number), + Action: plans.Create, + }, + expected: "1", + }, + "primitive_delete": { + diff: computed.Diff{ + Renderer: Primitive(json.Number("1"), nil, cty.Number), + Action: plans.Delete, + }, + expected: "1 -> null", + }, + "primitive_delete_override": { + diff: computed.Diff{ + Renderer: Primitive(json.Number("1"), nil, cty.Number), + Action: plans.Delete, + }, + opts: computed.RenderHumanOpts{OverrideNullSuffix: true}, + expected: "1", + }, + "primitive_update_to_null": { + diff: computed.Diff{ + Renderer: Primitive(json.Number("1"), nil, cty.Number), + Action: plans.Update, + }, + expected: "1 -> null", + }, + "primitive_update_from_null": { + diff: computed.Diff{ + Renderer: Primitive(nil, json.Number("1"), cty.Number), + Action: plans.Update, + }, + expected: "null -> 1", + }, + "primitive_update": { + diff: computed.Diff{ + Renderer: Primitive(json.Number("0"), json.Number("1"), cty.Number), + Action: plans.Update, + }, + expected: "0 -> 1", + }, + "primitive_update_max_int64_to_min_int64": { + diff: computed.Diff{ + // 9223372036854775807 == math.MinInt64 + // -9223372036854775808 == math.MaxInt64 + Renderer: Primitive(json.Number("9223372036854775807"), json.Number("-9223372036854775808"), cty.Number), + Action: plans.Update, + }, + expected: "9223372036854775807 -> -9223372036854775808", + }, + "primitive_update_21_digits_number": { + diff: computed.Diff{ + // 18446744073709551615 == math.MaxUint64 (which has 20-digits number) + Renderer: Primitive(json.Number("918446744073709551615"), json.Number("0"), cty.Number), + Action: plans.Update, + }, + expected: "918446744073709551615 -> 0", + }, + "primitive_update_replace": { + diff: computed.Diff{ + Renderer: Primitive(json.Number("0"), json.Number("1"), cty.Number), + Action: plans.Update, + Replace: true, + }, + expected: "0 -> 1 # forces replacement", + }, + "primitive_multiline_string_create": { + diff: computed.Diff{ + Renderer: Primitive(nil, "hello\nworld", cty.String), + Action: plans.Create, + }, + expected: ` +<<-EOT + hello + world + EOT +`, + }, + "primitive_multiline_string_delete": { + diff: computed.Diff{ + Renderer: Primitive("hello\nworld", nil, cty.String), + Action: plans.Delete, + }, + expected: ` +<<-EOT + hello + world + EOT -> null +`, + }, + "primitive_multiline_string_update": { + diff: computed.Diff{ + Renderer: Primitive("hello\nold\nworld", "hello\nnew\nworld", cty.String), + Action: plans.Update, + }, + expected: ` +<<-EOT + hello + - old + + new + world + EOT +`, + }, + "primitive_json_string_create": { + diff: computed.Diff{ + Renderer: Primitive(nil, "{\"key_one\": \"value_one\",\"key_two\":\"value_two\"}", cty.String), + Action: plans.Create, + }, + expected: ` +jsonencode( + { + + key_one = "value_one" + + key_two = "value_two" + } + ) +`, + }, + "primitive_json_string_delete": { + diff: computed.Diff{ + Renderer: Primitive("{\"key_one\": \"value_one\",\"key_two\":\"value_two\"}", nil, cty.String), + Action: plans.Delete, + }, + expected: ` +jsonencode( + { + - key_one = "value_one" + - key_two = "value_two" + } + ) -> null +`, + }, + "primitive_json_string_update": { + diff: computed.Diff{ + Renderer: Primitive("{\"key_one\": \"value_one\",\"key_two\":\"value_two\"}", "{\"key_one\": \"value_one\",\"key_two\":\"value_two\",\"key_three\":\"value_three\"}", cty.String), + Action: plans.Update, + }, + expected: ` +jsonencode( + ~ { + + key_three = "value_three" + # (2 unchanged attributes hidden) + } + ) +`, + }, + "primitive_json_explicit_nulls": { + diff: computed.Diff{ + Renderer: Primitive("{\"key_one\":\"value_one\",\"key_two\":\"value_two\"}", "{\"key_one\":null}", cty.String), + Action: plans.Update, + }, + expected: ` +jsonencode( + ~ { + ~ key_one = "value_one" -> null + - key_two = "value_two" + } + ) +`, + }, + "primitive_fake_json_string_update": { + diff: computed.Diff{ + // This isn't valid JSON, our renderer should be okay with it. + Renderer: Primitive("{\"key_one\": \"value_one\",\"key_two\":\"value_two\"", "{\"key_one\": \"value_one\",\"key_two\":\"value_two\",\"key_three\":\"value_three\"", cty.String), + Action: plans.Update, + }, + expected: "\"{\\\"key_one\\\": \\\"value_one\\\",\\\"key_two\\\":\\\"value_two\\\"\" -> \"{\\\"key_one\\\": \\\"value_one\\\",\\\"key_two\\\":\\\"value_two\\\",\\\"key_three\\\":\\\"value_three\\\"\"", + }, + "primitive_multiline_to_json_update": { + diff: computed.Diff{ + Renderer: Primitive("hello\nworld", "{\"key_one\": \"value_one\",\"key_two\":\"value_two\"}", cty.String), + Action: plans.Update, + }, + expected: ` +<<-EOT + hello + world + EOT -> jsonencode( + { + + key_one = "value_one" + + key_two = "value_two" + } + ) +`, + }, + "primitive_json_to_multiline_update": { + diff: computed.Diff{ + Renderer: Primitive("{\"key_one\": \"value_one\",\"key_two\":\"value_two\"}", "hello\nworld", cty.String), + Action: plans.Update, + }, + expected: ` +jsonencode( + { + - key_one = "value_one" + - key_two = "value_two" + } + ) -> <<-EOT + hello + world + EOT +`, + }, + "primitive_json_to_string_update": { + diff: computed.Diff{ + Renderer: Primitive("{\"key_one\": \"value_one\",\"key_two\":\"value_two\"}", "hello world", cty.String), + Action: plans.Update, + }, + expected: ` +jsonencode( + { + - key_one = "value_one" + - key_two = "value_two" + } + ) -> "hello world" +`, + }, + "primitive_string_to_json_update": { + diff: computed.Diff{ + Renderer: Primitive("hello world", "{\"key_one\": \"value_one\",\"key_two\":\"value_two\"}", cty.String), + Action: plans.Update, + }, + expected: ` +"hello world" -> jsonencode( + { + + key_one = "value_one" + + key_two = "value_two" + } + ) +`, + }, + "primitive_multi_to_single_update": { + diff: computed.Diff{ + Renderer: Primitive("hello\nworld", "hello world", cty.String), + Action: plans.Update, + }, + expected: ` +<<-EOT + - hello + - world + + hello world + EOT +`, + }, + "primitive_single_to_multi_update": { + diff: computed.Diff{ + Renderer: Primitive("hello world", "hello\nworld", cty.String), + Action: plans.Update, + }, + expected: ` +<<-EOT + - hello world + + hello + + world + EOT +`, + }, + "sensitive_update": { + diff: computed.Diff{ + Renderer: Sensitive(computed.Diff{ + Renderer: Primitive(json.Number("0"), json.Number("1"), cty.Number), + Action: plans.Update, + }, true, true), + Action: plans.Update, + }, + expected: "(sensitive value)", + }, + "sensitive_update_replace": { + diff: computed.Diff{ + Renderer: Sensitive(computed.Diff{ + Renderer: Primitive(json.Number("0"), json.Number("1"), cty.Number), + Action: plans.Update, + Replace: true, + }, true, true), + Action: plans.Update, + Replace: true, + }, + expected: "(sensitive value) # forces replacement", + }, + "computed_create": { + diff: computed.Diff{ + Renderer: Unknown(computed.Diff{}), + Action: plans.Create, + }, + expected: "(known after apply)", + }, + "computed_update": { + diff: computed.Diff{ + Renderer: Unknown(computed.Diff{ + Renderer: Primitive(json.Number("0"), nil, cty.Number), + Action: plans.Delete, + }), + Action: plans.Update, + }, + expected: "0 -> (known after apply)", + }, + "computed_create_forces_replacement": { + diff: computed.Diff{ + Renderer: Unknown(computed.Diff{}), + Action: plans.Create, + Replace: true, + }, + expected: "(known after apply) # forces replacement", + }, + "computed_update_forces_replacement": { + diff: computed.Diff{ + Renderer: Unknown(computed.Diff{ + Renderer: Primitive(json.Number("0"), nil, cty.Number), + Action: plans.Delete, + }), + Action: plans.Update, + Replace: true, + }, + expected: "0 -> (known after apply) # forces replacement", + }, + "object_created": { + diff: computed.Diff{ + Renderer: Object(map[string]computed.Diff{}), + Action: plans.Create, + }, + expected: "{}", + }, + "object_created_with_attributes": { + diff: computed.Diff{ + Renderer: Object(map[string]computed.Diff{ + "attribute_one": { + Renderer: Primitive(nil, json.Number("0"), cty.Number), + Action: plans.Create, + }, + }), + Action: plans.Create, + }, + expected: ` +{ + + attribute_one = 0 + } +`, + }, + "object_deleted": { + diff: computed.Diff{ + Renderer: Object(map[string]computed.Diff{}), + Action: plans.Delete, + }, + expected: "{} -> null", + }, + "object_deleted_with_attributes": { + diff: computed.Diff{ + Renderer: Object(map[string]computed.Diff{ + "attribute_one": { + Renderer: Primitive(json.Number("0"), nil, cty.Number), + Action: plans.Delete, + }, + }), + Action: plans.Delete, + }, + expected: ` +{ + - attribute_one = 0 + } -> null +`, + }, + "nested_object_deleted": { + diff: computed.Diff{ + Renderer: NestedObject(map[string]computed.Diff{}), + Action: plans.Delete, + }, + expected: "{} -> null", + }, + "nested_object_deleted_with_attributes": { + diff: computed.Diff{ + Renderer: NestedObject(map[string]computed.Diff{ + "attribute_one": { + Renderer: Primitive(json.Number("0"), nil, cty.Number), + Action: plans.Delete, + }, + }), + Action: plans.Delete, + }, + expected: ` +{ + - attribute_one = 0 -> null + } -> null +`, + }, + "object_create_attribute": { + diff: computed.Diff{ + Renderer: Object(map[string]computed.Diff{ + "attribute_one": { + Renderer: Primitive(nil, json.Number("0"), cty.Number), + Action: plans.Create, + }, + }), + Action: plans.Update, + }, + expected: ` +{ + + attribute_one = 0 + } +`, + }, + "object_update_attribute": { + diff: computed.Diff{ + Renderer: Object(map[string]computed.Diff{ + "attribute_one": { + Renderer: Primitive(json.Number("0"), json.Number("1"), cty.Number), + Action: plans.Update, + }, + }), + Action: plans.Update, + }, + expected: ` +{ + ~ attribute_one = 0 -> 1 + } +`, + }, + "object_update_attribute_forces_replacement": { + diff: computed.Diff{ + Renderer: Object(map[string]computed.Diff{ + "attribute_one": { + Renderer: Primitive(json.Number("0"), json.Number("1"), cty.Number), + Action: plans.Update, + }, + }), + Action: plans.Update, + Replace: true, + }, + expected: ` +{ # forces replacement + ~ attribute_one = 0 -> 1 + } +`, + }, + "object_delete_attribute": { + diff: computed.Diff{ + Renderer: Object(map[string]computed.Diff{ + "attribute_one": { + Renderer: Primitive(json.Number("0"), nil, cty.Number), + Action: plans.Delete, + }, + }), + Action: plans.Update, + }, + expected: ` +{ + - attribute_one = 0 + } +`, + }, + "object_ignore_unchanged_attributes": { + diff: computed.Diff{ + Renderer: Object(map[string]computed.Diff{ + "attribute_one": { + Renderer: Primitive(json.Number("0"), json.Number("1"), cty.Number), + Action: plans.Update, + }, + "attribute_two": { + Renderer: Primitive(json.Number("0"), json.Number("0"), cty.Number), + Action: plans.NoOp, + }, + "attribute_three": { + Renderer: Primitive(nil, json.Number("1"), cty.Number), + Action: plans.Create, + }, + }), + Action: plans.Update, + }, + expected: ` +{ + ~ attribute_one = 0 -> 1 + + attribute_three = 1 + # (1 unchanged attribute hidden) + } +`, + }, + "object_create_sensitive_attribute": { + diff: computed.Diff{ + Renderer: Object(map[string]computed.Diff{ + "attribute_one": { + Renderer: Sensitive(computed.Diff{ + Renderer: Primitive(nil, json.Number("1"), cty.Number), + Action: plans.Create, + }, false, true), + Action: plans.Create, + }, + }), + Action: plans.Update, + }, + expected: ` +{ + + attribute_one = (sensitive value) + } +`, + }, + "object_update_sensitive_attribute": { + diff: computed.Diff{ + Renderer: Object(map[string]computed.Diff{ + "attribute_one": { + Renderer: Sensitive(computed.Diff{ + Renderer: Primitive(json.Number("0"), json.Number("1"), cty.Number), + Action: plans.Update, + }, true, true), + Action: plans.Update, + }, + }), + Action: plans.Update, + }, + expected: ` +{ + ~ attribute_one = (sensitive value) + } +`, + }, + "object_delete_sensitive_attribute": { + diff: computed.Diff{ + Renderer: Object(map[string]computed.Diff{ + "attribute_one": { + Renderer: Sensitive(computed.Diff{ + Renderer: Primitive(json.Number("0"), nil, cty.Number), + Action: plans.Delete, + }, true, false), + Action: plans.Delete, + }, + }), + Action: plans.Update, + }, + expected: ` +{ + - attribute_one = (sensitive value) + } +`, + }, + "object_create_computed_attribute": { + diff: computed.Diff{ + Renderer: Object(map[string]computed.Diff{ + "attribute_one": { + Renderer: Unknown(computed.Diff{Renderer: nil}), + Action: plans.Create, + }, + }), + Action: plans.Update, + }, + expected: ` +{ + + attribute_one = (known after apply) + } +`, + }, + "object_update_computed_attribute": { + diff: computed.Diff{ + Renderer: Object(map[string]computed.Diff{ + "attribute_one": { + Renderer: Unknown(computed.Diff{ + Renderer: Primitive(json.Number("1"), nil, cty.Number), + Action: plans.Delete, + }), + Action: plans.Update, + }, + }), + Action: plans.Update, + }, + expected: ` +{ + ~ attribute_one = 1 -> (known after apply) + } +`, + }, + "object_escapes_attribute_keys": { + diff: computed.Diff{ + Renderer: Object(map[string]computed.Diff{ + "attribute_one": { + Renderer: Primitive(json.Number("1"), json.Number("2"), cty.Number), + Action: plans.Update, + }, + "attribute:two": { + Renderer: Primitive(json.Number("2"), json.Number("3"), cty.Number), + Action: plans.Update, + }, + "attribute_six": { + Renderer: Primitive(json.Number("3"), json.Number("4"), cty.Number), + Action: plans.Update, + }, + }), + Action: plans.Update, + }, + expected: ` +{ + ~ "attribute:two" = 2 -> 3 + ~ attribute_one = 1 -> 2 + ~ attribute_six = 3 -> 4 + } +`, + }, + "map_create_empty": { + diff: computed.Diff{ + Renderer: Map(map[string]computed.Diff{}), + Action: plans.Create, + }, + expected: "{}", + }, + "map_create": { + diff: computed.Diff{ + Renderer: Map(map[string]computed.Diff{ + "element_one": { + Renderer: Primitive(nil, "new", cty.String), + Action: plans.Create, + }, + }), + Action: plans.Create, + }, + expected: ` +{ + + "element_one" = "new" + } +`, + }, + "map_delete_empty": { + diff: computed.Diff{ + Renderer: Map(map[string]computed.Diff{}), + Action: plans.Delete, + }, + expected: "{} -> null", + }, + "map_delete": { + diff: computed.Diff{ + Renderer: Map(map[string]computed.Diff{ + "element_one": { + Renderer: Primitive("old", nil, cty.String), + Action: plans.Delete, + }, + }), + Action: plans.Delete, + }, + expected: ` +{ + - "element_one" = "old" + } -> null +`, + }, + "map_create_element": { + diff: computed.Diff{ + Renderer: Map(map[string]computed.Diff{ + "element_one": { + Renderer: Primitive(nil, "new", cty.String), + Action: plans.Create, + }, + }), + Action: plans.Update, + }, + expected: ` +{ + + "element_one" = "new" + } +`, + }, + "map_update_element": { + diff: computed.Diff{ + Renderer: Map(map[string]computed.Diff{ + "element_one": { + Renderer: Primitive("old", "new", cty.String), + Action: plans.Update, + }, + }), + Action: plans.Update, + }, + expected: ` +{ + ~ "element_one" = "old" -> "new" + } +`, + }, + "map_delete_element": { + diff: computed.Diff{ + Renderer: Map(map[string]computed.Diff{ + "element_one": { + Renderer: Primitive("old", nil, cty.String), + Action: plans.Delete, + }, + }), + Action: plans.Update, + }, + expected: ` +{ + - "element_one" = "old" -> null + } +`, + }, + "map_update_forces_replacement": { + diff: computed.Diff{ + Renderer: Map(map[string]computed.Diff{ + "element_one": { + Renderer: Primitive("old", "new", cty.String), + Action: plans.Update, + }, + }), + Action: plans.Update, + Replace: true, + }, + expected: ` +{ # forces replacement + ~ "element_one" = "old" -> "new" + } +`, + }, + "map_ignore_unchanged_elements": { + diff: computed.Diff{ + Renderer: Map(map[string]computed.Diff{ + "element_one": { + Renderer: Primitive(nil, "new", cty.String), + Action: plans.Create, + }, + "element_two": { + Renderer: Primitive("old", "old", cty.String), + Action: plans.NoOp, + }, + "element_three": { + Renderer: Primitive("old", "new", cty.String), + Action: plans.Update, + }, + }), + Action: plans.Update, + }, + expected: ` +{ + + "element_one" = "new" + ~ "element_three" = "old" -> "new" + # (1 unchanged element hidden) + } +`, + }, + "map_create_sensitive_element": { + diff: computed.Diff{ + Renderer: Map(map[string]computed.Diff{ + "element_one": { + Renderer: Sensitive(computed.Diff{ + Renderer: Primitive(nil, json.Number("1"), cty.Number), + Action: plans.Create, + }, false, true), + Action: plans.Create, + }, + }), + Action: plans.Update, + }, + expected: ` +{ + + "element_one" = (sensitive value) + } +`, + }, + "map_update_sensitive_element": { + diff: computed.Diff{ + Renderer: Map(map[string]computed.Diff{ + "element_one": { + Renderer: Sensitive(computed.Diff{ + Renderer: Primitive(json.Number("0"), json.Number("1"), cty.Number), + Action: plans.Update, + }, true, true), + Action: plans.Update, + }, + }), + Action: plans.Update, + }, + expected: ` +{ + ~ "element_one" = (sensitive value) + } +`, + }, + "map_update_sensitive_element_status": { + diff: computed.Diff{ + Renderer: Map(map[string]computed.Diff{ + "element_one": { + Renderer: Sensitive(computed.Diff{ + Renderer: Primitive(json.Number("0"), json.Number("0"), cty.Number), + Action: plans.NoOp, + }, true, false), + Action: plans.Update, + }, + }), + Action: plans.Update, + }, + expected: ` +{ + # Warning: this attribute value will no longer be marked as sensitive + # after applying this change. The value is unchanged. + ~ "element_one" = (sensitive value) + } +`, + }, + "map_delete_sensitive_element": { + diff: computed.Diff{ + Renderer: Map(map[string]computed.Diff{ + "element_one": { + Renderer: Sensitive(computed.Diff{ + Renderer: Primitive(json.Number("0"), nil, cty.Number), + Action: plans.Delete, + }, true, false), + Action: plans.Delete, + }, + }), + Action: plans.Update, + }, + expected: ` +{ + - "element_one" = (sensitive value) -> null + } +`, + }, + "map_create_computed_element": { + diff: computed.Diff{ + Renderer: Map(map[string]computed.Diff{ + "element_one": { + Renderer: Unknown(computed.Diff{}), + Action: plans.Create, + }, + }), + Action: plans.Update, + }, + expected: ` +{ + + "element_one" = (known after apply) + } +`, + }, + "map_update_computed_element": { + diff: computed.Diff{ + Renderer: Map(map[string]computed.Diff{ + "element_one": { + Renderer: Unknown(computed.Diff{ + Renderer: Primitive(json.Number("1"), nil, cty.Number), + Action: plans.Delete, + }), + Action: plans.Update, + }, + }), + Action: plans.Update, + }, + expected: ` +{ + ~ "element_one" = 1 -> (known after apply) + } +`, + }, + "map_aligns_key": { + diff: computed.Diff{ + Renderer: Map(map[string]computed.Diff{ + "element_one": { + Renderer: Primitive(json.Number("1"), json.Number("2"), cty.Number), + Action: plans.Update, + }, + "element_two": { + Renderer: Primitive(json.Number("1"), json.Number("3"), cty.Number), + Action: plans.Update, + }, + "element_three": { + Renderer: Primitive(json.Number("1"), json.Number("4"), cty.Number), + Action: plans.Update, + }, + }), + Action: plans.Update, + }, + expected: ` +{ + ~ "element_one" = 1 -> 2 + ~ "element_three" = 1 -> 4 + ~ "element_two" = 1 -> 3 + } +`, + }, + "nested_map_does_not_align_keys": { + diff: computed.Diff{ + Renderer: NestedMap(map[string]computed.Diff{ + "element_one": { + Renderer: Primitive(json.Number("1"), json.Number("2"), cty.Number), + Action: plans.Update, + }, + "element_two": { + Renderer: Primitive(json.Number("1"), json.Number("3"), cty.Number), + Action: plans.Update, + }, + "element_three": { + Renderer: Primitive(json.Number("1"), json.Number("4"), cty.Number), + Action: plans.Update, + }, + }), + Action: plans.Update, + }, + expected: ` +{ + ~ "element_one" = 1 -> 2 + ~ "element_three" = 1 -> 4 + ~ "element_two" = 1 -> 3 + } +`, + }, + "list_create_empty": { + diff: computed.Diff{ + Renderer: List([]computed.Diff{}), + Action: plans.Create, + }, + expected: "[]", + }, + "list_create": { + diff: computed.Diff{ + Renderer: List([]computed.Diff{ + { + Renderer: Primitive(nil, json.Number("1"), cty.Number), + Action: plans.Create, + }, + }), + Action: plans.Create, + }, + expected: ` +[ + + 1, + ] +`, + }, + "list_delete_empty": { + diff: computed.Diff{ + Renderer: List([]computed.Diff{}), + Action: plans.Delete, + }, + expected: "[] -> null", + }, + "list_delete": { + diff: computed.Diff{ + Renderer: List([]computed.Diff{ + { + Renderer: Primitive(json.Number("1"), nil, cty.Number), + Action: plans.Delete, + }, + }), + Action: plans.Delete, + }, + expected: ` +[ + - 1, + ] -> null +`, + }, + "list_create_element": { + diff: computed.Diff{ + Renderer: List([]computed.Diff{ + { + Renderer: Primitive(nil, json.Number("1"), cty.Number), + Action: plans.Create, + }, + }), + Action: plans.Update, + }, + expected: ` +[ + + 1, + ] +`, + }, + "list_update_element": { + diff: computed.Diff{ + Renderer: List([]computed.Diff{ + { + Renderer: Primitive(json.Number("0"), json.Number("1"), cty.Number), + Action: plans.Update, + }, + }), + Action: plans.Update, + }, + expected: ` +[ + ~ 0 -> 1, + ] +`, + }, + "list_replace_element": { + diff: computed.Diff{ + Renderer: List([]computed.Diff{ + { + Renderer: Primitive(json.Number("0"), nil, cty.Number), + Action: plans.Delete, + }, + { + Renderer: Primitive(nil, json.Number("1"), cty.Number), + Action: plans.Create, + }, + }), + Action: plans.Update, + }, + expected: ` +[ + - 0, + + 1, + ] +`, + }, + "list_delete_element": { + diff: computed.Diff{ + Renderer: List([]computed.Diff{ + { + Renderer: Primitive(json.Number("0"), nil, cty.Number), + Action: plans.Delete, + }, + }), + Action: plans.Update, + }, + expected: ` +[ + - 0, + ] +`, + }, + "list_update_forces_replacement": { + diff: computed.Diff{ + Renderer: List([]computed.Diff{ + { + Renderer: Primitive(json.Number("0"), json.Number("1"), cty.Number), + Action: plans.Update, + }, + }), + Action: plans.Update, + Replace: true, + }, + expected: ` +[ # forces replacement + ~ 0 -> 1, + ] +`, + }, + "list_update_ignores_unchanged": { + diff: computed.Diff{ + Renderer: NestedList([]computed.Diff{ + { + Renderer: Primitive(json.Number("0"), json.Number("0"), cty.Number), + Action: plans.NoOp, + }, + { + Renderer: Primitive(json.Number("1"), json.Number("1"), cty.Number), + Action: plans.NoOp, + }, + { + Renderer: Primitive(json.Number("2"), json.Number("5"), cty.Number), + Action: plans.Update, + }, + { + Renderer: Primitive(json.Number("3"), json.Number("3"), cty.Number), + Action: plans.NoOp, + }, + { + Renderer: Primitive(json.Number("4"), json.Number("4"), cty.Number), + Action: plans.NoOp, + }, + }), + Action: plans.Update, + }, + expected: ` +[ + ~ 2 -> 5, + # (4 unchanged elements hidden) + ] +`, + }, + "list_update_ignored_unchanged_with_context": { + diff: computed.Diff{ + Renderer: List([]computed.Diff{ + { + Renderer: Primitive(json.Number("0"), json.Number("0"), cty.Number), + Action: plans.NoOp, + }, + { + Renderer: Primitive(json.Number("1"), json.Number("1"), cty.Number), + Action: plans.NoOp, + }, + { + Renderer: Primitive(json.Number("2"), json.Number("5"), cty.Number), + Action: plans.Update, + }, + { + Renderer: Primitive(json.Number("3"), json.Number("3"), cty.Number), + Action: plans.NoOp, + }, + { + Renderer: Primitive(json.Number("4"), json.Number("4"), cty.Number), + Action: plans.NoOp, + }, + }), + Action: plans.Update, + }, + expected: ` +[ + # (1 unchanged element hidden) + 1, + ~ 2 -> 5, + 3, + # (1 unchanged element hidden) + ] +`, + }, + "list_create_sensitive_element": { + diff: computed.Diff{ + Renderer: List([]computed.Diff{ + { + Renderer: Sensitive(computed.Diff{ + Renderer: Primitive(nil, json.Number("1"), cty.Number), + Action: plans.Create, + }, false, true), + Action: plans.Create, + }, + }), + Action: plans.Update, + }, + expected: ` +[ + + (sensitive value), + ] +`, + }, + "list_delete_sensitive_element": { + diff: computed.Diff{ + Renderer: List([]computed.Diff{ + { + Renderer: Sensitive(computed.Diff{ + Renderer: Primitive(json.Number("1"), nil, cty.Number), + Action: plans.Delete, + }, true, false), + Action: plans.Delete, + }, + }), + Action: plans.Update, + }, + expected: ` +[ + - (sensitive value), + ] +`, + }, + "list_update_sensitive_element": { + diff: computed.Diff{ + Renderer: List([]computed.Diff{ + { + Renderer: Sensitive(computed.Diff{ + Renderer: Primitive(json.Number("0"), json.Number("1"), cty.Number), + Action: plans.Update, + }, true, true), + Action: plans.Update, + }, + }), + Action: plans.Update, + }, + expected: ` +[ + ~ (sensitive value), + ] +`, + }, + "list_update_sensitive_element_status": { + diff: computed.Diff{ + Renderer: List([]computed.Diff{ + { + Renderer: Sensitive(computed.Diff{ + Renderer: Primitive(json.Number("1"), json.Number("1"), cty.Number), + Action: plans.NoOp, + }, false, true), + Action: plans.Update, + }, + }), + Action: plans.Update, + }, + expected: ` +[ + # Warning: this attribute value will be marked as sensitive and will not + # display in UI output after applying this change. The value is unchanged. + ~ (sensitive value), + ] +`, + }, + "list_create_computed_element": { + diff: computed.Diff{ + Renderer: List([]computed.Diff{ + { + Renderer: Unknown(computed.Diff{}), + Action: plans.Create, + }, + }), + Action: plans.Update, + }, + expected: ` +[ + + (known after apply), + ] +`, + }, + "list_update_computed_element": { + diff: computed.Diff{ + Renderer: List([]computed.Diff{ + { + Renderer: Unknown(computed.Diff{ + Renderer: Primitive(json.Number("0"), nil, cty.Number), + Action: plans.Delete, + }), + Action: plans.Update, + }, + }), + Action: plans.Update, + }, + expected: ` +[ + ~ 0 -> (known after apply), + ] +`, + }, + "set_create_empty": { + diff: computed.Diff{ + Renderer: Set([]computed.Diff{}), + Action: plans.Create, + }, + expected: "[]", + }, + "set_create": { + diff: computed.Diff{ + Renderer: Set([]computed.Diff{ + { + Renderer: Primitive(nil, json.Number("1"), cty.Number), + Action: plans.Create, + }, + }), + Action: plans.Create, + }, + expected: ` +[ + + 1, + ] +`, + }, + "set_delete_empty": { + diff: computed.Diff{ + Renderer: Set([]computed.Diff{}), + Action: plans.Delete, + }, + expected: "[] -> null", + }, + "set_delete": { + diff: computed.Diff{ + Renderer: Set([]computed.Diff{ + { + Renderer: Primitive(json.Number("1"), nil, cty.Number), + Action: plans.Delete, + }, + }), + Action: plans.Delete, + }, + expected: ` +[ + - 1, + ] -> null +`, + }, + "set_create_element": { + diff: computed.Diff{ + Renderer: Set([]computed.Diff{ + { + Renderer: Primitive(nil, json.Number("1"), cty.Number), + Action: plans.Create, + }, + }), + Action: plans.Update, + }, + expected: ` +[ + + 1, + ] +`, + }, + "set_update_element": { + diff: computed.Diff{ + Renderer: Set([]computed.Diff{ + { + Renderer: Primitive(json.Number("0"), json.Number("1"), cty.Number), + Action: plans.Update, + }, + }), + Action: plans.Update, + }, + expected: ` +[ + ~ 0 -> 1, + ] +`, + }, + "set_replace_element": { + diff: computed.Diff{ + Renderer: Set([]computed.Diff{ + { + Renderer: Primitive(json.Number("0"), nil, cty.Number), + Action: plans.Delete, + }, + { + Renderer: Primitive(nil, json.Number("1"), cty.Number), + Action: plans.Create, + }, + }), + Action: plans.Update, + }, + expected: ` +[ + - 0, + + 1, + ] +`, + }, + "set_delete_element": { + diff: computed.Diff{ + Renderer: Set([]computed.Diff{ + { + Renderer: Primitive(json.Number("0"), nil, cty.Number), + Action: plans.Delete, + }, + }), + Action: plans.Update, + }, + expected: ` +[ + - 0, + ] +`, + }, + "set_update_forces_replacement": { + diff: computed.Diff{ + Renderer: Set([]computed.Diff{ + { + Renderer: Primitive(json.Number("0"), json.Number("1"), cty.Number), + Action: plans.Update, + }, + }), + Action: plans.Update, + Replace: true, + }, + expected: ` +[ # forces replacement + ~ 0 -> 1, + ] +`, + }, + "nested_set_update_forces_replacement": { + diff: computed.Diff{ + Renderer: NestedSet([]computed.Diff{ + { + Renderer: Object(map[string]computed.Diff{ + "attribute_one": { + Renderer: Primitive(json.Number("0"), json.Number("1"), cty.Number), + Action: plans.Update, + }, + }), + Action: plans.Update, + }, + }), + Action: plans.Update, + Replace: true, + }, + expected: ` +[ + ~ { # forces replacement + ~ attribute_one = 0 -> 1 + }, + ] +`, + }, + "set_update_ignores_unchanged": { + diff: computed.Diff{ + Renderer: Set([]computed.Diff{ + { + Renderer: Primitive(json.Number("0"), json.Number("0"), cty.Number), + Action: plans.NoOp, + }, + { + Renderer: Primitive(json.Number("1"), json.Number("1"), cty.Number), + Action: plans.NoOp, + }, + { + Renderer: Primitive(json.Number("2"), json.Number("5"), cty.Number), + Action: plans.Update, + }, + { + Renderer: Primitive(json.Number("3"), json.Number("3"), cty.Number), + Action: plans.NoOp, + }, + { + Renderer: Primitive(json.Number("4"), json.Number("4"), cty.Number), + Action: plans.NoOp, + }, + }), + Action: plans.Update, + }, + expected: ` +[ + ~ 2 -> 5, + # (4 unchanged elements hidden) + ] +`, + }, + "set_create_sensitive_element": { + diff: computed.Diff{ + Renderer: Set([]computed.Diff{ + { + Renderer: Sensitive(computed.Diff{ + Renderer: Primitive(nil, json.Number("1"), cty.Number), + Action: plans.Create, + }, false, true), + Action: plans.Create, + }, + }), + Action: plans.Update, + }, + expected: ` +[ + + (sensitive value), + ] +`, + }, + "set_delete_sensitive_element": { + diff: computed.Diff{ + Renderer: Set([]computed.Diff{ + { + Renderer: Sensitive(computed.Diff{ + Renderer: Primitive(json.Number("1"), nil, cty.Number), + Action: plans.Delete, + }, false, true), + Action: plans.Delete, + }, + }), + Action: plans.Update, + }, + expected: ` +[ + - (sensitive value), + ] +`, + }, + "set_update_sensitive_element": { + diff: computed.Diff{ + Renderer: Set([]computed.Diff{ + { + Renderer: Sensitive(computed.Diff{ + Renderer: Primitive(json.Number("0"), json.Number("1"), cty.Number), + Action: plans.Update, + }, true, true), + Action: plans.Update, + }, + }), + Action: plans.Update, + }, + expected: ` +[ + ~ (sensitive value), + ] +`, + }, + "set_update_sensitive_element_status": { + diff: computed.Diff{ + Renderer: Set([]computed.Diff{ + { + Renderer: Sensitive(computed.Diff{ + Renderer: Primitive(json.Number("1"), json.Number("2"), cty.Number), + Action: plans.Update, + }, false, true), + Action: plans.Update, + }, + }), + Action: plans.Update, + }, + expected: ` +[ + # Warning: this attribute value will be marked as sensitive and will not + # display in UI output after applying this change. + ~ (sensitive value), + ] +`, + }, + "set_create_computed_element": { + diff: computed.Diff{ + Renderer: Set([]computed.Diff{ + { + Renderer: Unknown(computed.Diff{}), + Action: plans.Create, + }, + }), + Action: plans.Update, + }, + expected: ` +[ + + (known after apply), + ] +`, + }, + "set_update_computed_element": { + diff: computed.Diff{ + Renderer: Set([]computed.Diff{ + { + Renderer: Unknown(computed.Diff{ + Renderer: Primitive(json.Number("0"), nil, cty.Number), + Action: plans.Delete, + }), + Action: plans.Update, + }, + }), + Action: plans.Update, + }, + expected: ` +[ + ~ 0 -> (known after apply), + ] +`, + }, + "create_empty_block": { + diff: computed.Diff{ + Renderer: Block(nil, Blocks{}), + Action: plans.Create, + }, + expected: "{}", + }, + "create_populated_block": { + diff: computed.Diff{ + Renderer: Block(map[string]computed.Diff{ + "string": { + Renderer: Primitive(nil, "root", cty.String), + Action: plans.Create, + }, + "boolean": { + Renderer: Primitive(nil, true, cty.Bool), + Action: plans.Create, + }, + }, Blocks{ + SingleBlocks: map[string]computed.Diff{ + "nested_block": { + Renderer: Block(map[string]computed.Diff{ + "string": { + Renderer: Primitive(nil, "one", cty.String), + Action: plans.Create, + }, + }, Blocks{}), + Action: plans.Create, + }, + "nested_block_two": { + Renderer: Block(map[string]computed.Diff{ + "string": { + Renderer: Primitive(nil, "two", cty.String), + Action: plans.Create, + }, + }, Blocks{}), + Action: plans.Create, + }, + }, + }), + Action: plans.Create, + }, + expected: ` +{ + + boolean = true + + string = "root" + + + nested_block { + + string = "one" + } + + + nested_block_two { + + string = "two" + } + }`, + }, + "update_empty_block": { + diff: computed.Diff{ + Renderer: Block(map[string]computed.Diff{ + "string": { + Renderer: Primitive(nil, "root", cty.String), + Action: plans.Create, + }, + "boolean": { + Renderer: Primitive(nil, true, cty.Bool), + Action: plans.Create, + }, + }, Blocks{ + SingleBlocks: map[string]computed.Diff{ + "nested_block": { + + Renderer: Block(map[string]computed.Diff{ + "string": { + Renderer: Primitive(nil, "one", cty.String), + Action: plans.Create, + }, + }, Blocks{}), + Action: plans.Create, + }, + "nested_block_two": { + + Renderer: Block(map[string]computed.Diff{ + "string": { + Renderer: Primitive(nil, "two", cty.String), + Action: plans.Create, + }, + }, Blocks{}), + Action: plans.Create, + }, + }, + }), + Action: plans.Update, + }, + expected: ` +{ + + boolean = true + + string = "root" + + + nested_block { + + string = "one" + } + + + nested_block_two { + + string = "two" + } + }`, + }, + "update_populated_block": { + diff: computed.Diff{ + Renderer: Block(map[string]computed.Diff{ + "string": { + Renderer: Primitive(nil, "root", cty.String), + Action: plans.Create, + }, + "boolean": { + Renderer: Primitive(false, true, cty.Bool), + Action: plans.Update, + }, + }, Blocks{ + SingleBlocks: map[string]computed.Diff{ + "nested_block": { + Renderer: Block(map[string]computed.Diff{ + "string": { + Renderer: Primitive(nil, "one", cty.String), + Action: plans.NoOp, + }, + }, Blocks{}), + Action: plans.NoOp, + }, + "nested_block_two": { + Renderer: Block(map[string]computed.Diff{ + "string": { + Renderer: Primitive(nil, "two", cty.String), + Action: plans.Create, + }, + }, Blocks{}), + Action: plans.Create, + }, + }, + }), + Action: plans.Update, + }, + expected: ` +{ + ~ boolean = false -> true + + string = "root" + + + nested_block_two { + + string = "two" + } + + # (1 unchanged block hidden) + }`, + }, + "clear_populated_block": { + diff: computed.Diff{ + Renderer: Block(map[string]computed.Diff{ + "string": { + Renderer: Primitive("root", nil, cty.String), + Action: plans.Delete, + }, + "boolean": { + Renderer: Primitive(true, nil, cty.Bool), + Action: plans.Delete, + }, + }, Blocks{ + SingleBlocks: map[string]computed.Diff{ + "nested_block": { + Renderer: Block(map[string]computed.Diff{ + "string": { + Renderer: Primitive("one", nil, cty.String), + Action: plans.Delete, + }, + }, Blocks{}), + Action: plans.Delete, + }, + "nested_block_two": { + Renderer: Block(map[string]computed.Diff{ + "string": { + Renderer: Primitive("two", nil, cty.String), + Action: plans.Delete, + }, + }, Blocks{}), + Action: plans.Delete, + }, + }, + }), + Action: plans.Update, + }, + expected: ` +{ + - boolean = true -> null + - string = "root" -> null + + - nested_block { + - string = "one" -> null + } + + - nested_block_two { + - string = "two" -> null + } + }`, + }, + "delete_populated_block": { + diff: computed.Diff{ + Renderer: Block(map[string]computed.Diff{ + "string": { + Renderer: Primitive("root", nil, cty.String), + Action: plans.Delete, + }, + "boolean": { + Renderer: Primitive(true, nil, cty.Bool), + Action: plans.Delete, + }, + }, Blocks{ + SingleBlocks: map[string]computed.Diff{ + "nested_block": { + Renderer: Block(map[string]computed.Diff{ + "string": { + Renderer: Primitive("one", nil, cty.String), + Action: plans.Delete, + }, + }, Blocks{}), + Action: plans.Delete, + }, + "nested_block_two": { + Renderer: Block(map[string]computed.Diff{ + "string": { + Renderer: Primitive("two", nil, cty.String), + Action: plans.Delete, + }, + }, Blocks{}), + Action: plans.Delete, + }, + }, + }), + Action: plans.Delete, + }, + expected: ` +{ + - boolean = true -> null + - string = "root" -> null + + - nested_block { + - string = "one" -> null + } + + - nested_block_two { + - string = "two" -> null + } + }`, + }, + "list_block_update": { + diff: computed.Diff{ + Renderer: Block( + nil, + Blocks{ + ListBlocks: map[string][]computed.Diff{ + "list_blocks": { + { + Renderer: Block(map[string]computed.Diff{ + "number": { + Renderer: Primitive(json.Number("1"), json.Number("2"), cty.Number), + Action: plans.Update, + }, + "string": { + Renderer: Primitive(nil, "new", cty.String), + Action: plans.Create, + }, + }, Blocks{}), + Action: plans.Update, + }, + { + Renderer: Block(map[string]computed.Diff{ + "number": { + Renderer: Primitive(json.Number("1"), nil, cty.Number), + Action: plans.Delete, + }, + "string": { + Renderer: Primitive("old", "new", cty.String), + Action: plans.Update, + }, + }, Blocks{}), + Action: plans.Update, + }, + }, + }, + }), + }, + expected: ` +{ + ~ list_blocks { + ~ number = 1 -> 2 + + string = "new" + } + ~ list_blocks { + - number = 1 -> null + ~ string = "old" -> "new" + } + }`, + }, + "set_block_update": { + diff: computed.Diff{ + Renderer: Block( + nil, + Blocks{ + SetBlocks: map[string][]computed.Diff{ + "set_blocks": { + { + Renderer: Block(map[string]computed.Diff{ + "number": { + Renderer: Primitive(json.Number("1"), json.Number("2"), cty.Number), + Action: plans.Update, + }, + "string": { + Renderer: Primitive(nil, "new", cty.String), + Action: plans.Create, + }, + }, Blocks{}), + Action: plans.Update, + }, + { + Renderer: Block(map[string]computed.Diff{ + "number": { + Renderer: Primitive(json.Number("1"), nil, cty.Number), + Action: plans.Delete, + }, + "string": { + Renderer: Primitive("old", "new", cty.String), + Action: plans.Update, + }, + }, Blocks{}), + Action: plans.Update, + }, + }, + }, + }), + }, + expected: ` +{ + ~ set_blocks { + ~ number = 1 -> 2 + + string = "new" + } + ~ set_blocks { + - number = 1 -> null + ~ string = "old" -> "new" + } + }`, + }, + "map_block_update": { + diff: computed.Diff{ + Renderer: Block( + nil, + Blocks{ + MapBlocks: map[string]map[string]computed.Diff{ + "list_blocks": { + "key_one": { + Renderer: Block(map[string]computed.Diff{ + "number": { + Renderer: Primitive(json.Number("1"), json.Number("2"), cty.Number), + Action: plans.Update, + }, + "string": { + Renderer: Primitive(nil, "new", cty.String), + Action: plans.Create, + }, + }, Blocks{}), + Action: plans.Update, + }, + "key:two": { + Renderer: Block(map[string]computed.Diff{ + "number": { + Renderer: Primitive(json.Number("1"), nil, cty.Number), + Action: plans.Delete, + }, + "string": { + Renderer: Primitive("old", "new", cty.String), + Action: plans.Update, + }, + }, Blocks{}), + Action: plans.Update, + }, + }, + }, + }), + }, + expected: ` +{ + ~ list_blocks "key:two" { + - number = 1 -> null + ~ string = "old" -> "new" + } + ~ list_blocks "key_one" { + ~ number = 1 -> 2 + + string = "new" + } + } +`, + }, + "sensitive_block": { + diff: computed.Diff{ + Renderer: SensitiveBlock(computed.Diff{ + Renderer: Block(nil, Blocks{}), + Action: plans.NoOp, + }, true, true), + Action: plans.Update, + }, + expected: ` +{ + # At least one attribute in this block is (or was) sensitive, + # so its contents will not be displayed. + } +`, + }, + "delete_empty_block": { + diff: computed.Diff{ + Renderer: Block(nil, Blocks{}), + Action: plans.Delete, + }, + expected: "{}", + }, + "block_escapes_keys": { + diff: computed.Diff{ + Renderer: Block(map[string]computed.Diff{ + "attribute_one": { + Renderer: Primitive(json.Number("1"), json.Number("2"), cty.Number), + Action: plans.Update, + }, + "attribute:two": { + Renderer: Primitive(json.Number("2"), json.Number("3"), cty.Number), + Action: plans.Update, + }, + "attribute_six": { + Renderer: Primitive(json.Number("3"), json.Number("4"), cty.Number), + Action: plans.Update, + }, + }, Blocks{ + SingleBlocks: map[string]computed.Diff{ + "nested_block:one": { + Renderer: Block(map[string]computed.Diff{ + "string": { + Renderer: Primitive("one", "four", cty.String), + Action: plans.Update, + }, + }, Blocks{}), + Action: plans.Update, + }, + "nested_block_two": { + Renderer: Block(map[string]computed.Diff{ + "string": { + Renderer: Primitive("two", "three", cty.String), + Action: plans.Update, + }, + }, Blocks{}), + Action: plans.Update, + }, + }, + }), + Action: plans.Update, + }, + expected: ` +{ + ~ "attribute:two" = 2 -> 3 + ~ attribute_one = 1 -> 2 + ~ attribute_six = 3 -> 4 + + ~ "nested_block:one" { + ~ string = "one" -> "four" + } + + ~ nested_block_two { + ~ string = "two" -> "three" + } + }`, + }, + "block_always_includes_important_attributes": { + diff: computed.Diff{ + Renderer: Block(map[string]computed.Diff{ + "id": { + Renderer: Primitive("root", "root", cty.String), + Action: plans.NoOp, + }, + "boolean": { + Renderer: Primitive(false, false, cty.Bool), + Action: plans.NoOp, + }, + }, Blocks{ + SingleBlocks: map[string]computed.Diff{ + "nested_block": { + Renderer: Block(map[string]computed.Diff{ + "string": { + Renderer: Primitive("one", "one", cty.String), + Action: plans.NoOp, + }, + }, Blocks{}), + Action: plans.NoOp, + }, + "nested_block_two": { + Renderer: Block(map[string]computed.Diff{ + "string": { + Renderer: Primitive("two", "two", cty.String), + Action: plans.NoOp, + }, + }, Blocks{}), + Action: plans.NoOp, + }, + }, + }), + Action: plans.NoOp, + }, + expected: ` +{ + id = "root" + # (1 unchanged attribute hidden) + + # (2 unchanged blocks hidden) + }`, + }, + "output_map_to_list": { + diff: computed.Diff{ + Renderer: TypeChange(computed.Diff{ + Renderer: Map(map[string]computed.Diff{ + "element_one": { + Renderer: Primitive(json.Number("0"), nil, cty.Number), + Action: plans.Delete, + }, + "element_two": { + Renderer: Primitive(json.Number("1"), nil, cty.Number), + Action: plans.Delete, + }, + }), + Action: plans.Delete, + }, computed.Diff{ + Renderer: List([]computed.Diff{ + { + Renderer: Primitive(nil, json.Number("0"), cty.Number), + Action: plans.Create, + }, + { + Renderer: Primitive(nil, json.Number("1"), cty.Number), + Action: plans.Create, + }, + }), + Action: plans.Create, + }), + }, + expected: ` +{ + - "element_one" = 0 + - "element_two" = 1 + } -> [ + + 0, + + 1, + ] +`, + }, + "json_string_no_symbols": { + diff: computed.Diff{ + Renderer: Primitive("{\"key\":\"value\"}", "{\"key\":\"value\"}", cty.String), + Action: plans.NoOp, + }, + opts: computed.RenderHumanOpts{ + HideDiffActionSymbols: true, + ShowUnchangedChildren: true, + }, + expected: ` +jsonencode( + { + key = "value" + } +) +`, + }, + } + for name, tc := range tcs { + t.Run(name, func(t *testing.T) { + + opts := tc.opts.Clone() + opts.Colorize = &colorize + + expected := strings.TrimSpace(tc.expected) + actual := tc.diff.RenderHuman(0, opts) + if diff := cmp.Diff(expected, actual); len(diff) > 0 { + t.Fatalf("\nexpected:\n%s\nactual:\n%s\ndiff:\n%s\n", expected, actual, diff) + } + }) + } +} diff --git a/pkg/command/jsonformat/computed/renderers/sensitive.go b/pkg/command/jsonformat/computed/renderers/sensitive.go new file mode 100644 index 00000000000..4d60931481f --- /dev/null +++ b/pkg/command/jsonformat/computed/renderers/sensitive.go @@ -0,0 +1,60 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package renderers + +import ( + "fmt" + + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed" + "github.com/kubegems/opentofu/pkg/plans" +) + +var _ computed.DiffRenderer = (*sensitiveRenderer)(nil) + +func Sensitive(change computed.Diff, beforeSensitive, afterSensitive bool) computed.DiffRenderer { + return &sensitiveRenderer{ + inner: change, + beforeSensitive: beforeSensitive, + afterSensitive: afterSensitive, + } +} + +type sensitiveRenderer struct { + inner computed.Diff + + beforeSensitive bool + afterSensitive bool +} + +func (renderer sensitiveRenderer) RenderHuman(diff computed.Diff, indent int, opts computed.RenderHumanOpts) string { + // If the -show-sensitive argument is set, then invoke RenderHuman with the inner computed.Diff to display sensitive values. + if opts.ShowSensitive { + return renderer.inner.RenderHuman(indent, opts) + } + + return fmt.Sprintf("(sensitive value)%s%s", nullSuffix(diff.Action, opts), forcesReplacement(diff.Replace, opts)) +} + +func (renderer sensitiveRenderer) WarningsHuman(diff computed.Diff, indent int, opts computed.RenderHumanOpts) []string { + if (renderer.beforeSensitive == renderer.afterSensitive) || renderer.inner.Action == plans.Create || renderer.inner.Action == plans.Delete { + // Only display warnings for sensitive values if they are changing from + // being sensitive or to being sensitive and if they are not being + // destroyed or created. + return []string{} + } + + var warning string + if renderer.beforeSensitive { + warning = opts.Colorize.Color(fmt.Sprintf(" # [yellow]Warning[reset]: this attribute value will no longer be marked as sensitive\n%s # after applying this change.", formatIndent(indent))) + } else { + warning = opts.Colorize.Color(fmt.Sprintf(" # [yellow]Warning[reset]: this attribute value will be marked as sensitive and will not\n%s # display in UI output after applying this change.", formatIndent(indent))) + } + + if renderer.inner.Action == plans.NoOp { + return []string{fmt.Sprintf("%s The value is unchanged.", warning)} + } + return []string{warning} +} diff --git a/pkg/command/jsonformat/computed/renderers/sensitive_block.go b/pkg/command/jsonformat/computed/renderers/sensitive_block.go new file mode 100644 index 00000000000..260fd6e2303 --- /dev/null +++ b/pkg/command/jsonformat/computed/renderers/sensitive_block.go @@ -0,0 +1,49 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package renderers + +import ( + "fmt" + + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed" + "github.com/kubegems/opentofu/pkg/plans" +) + +func SensitiveBlock(diff computed.Diff, beforeSensitive, afterSensitive bool) computed.DiffRenderer { + return &sensitiveBlockRenderer{ + inner: diff, + beforeSensitive: beforeSensitive, + afterSensitive: afterSensitive, + } +} + +type sensitiveBlockRenderer struct { + inner computed.Diff + + afterSensitive bool + beforeSensitive bool +} + +func (renderer sensitiveBlockRenderer) RenderHuman(diff computed.Diff, indent int, opts computed.RenderHumanOpts) string { + cachedLinePrefix := fmt.Sprintf("%s%s", formatIndent(indent), writeDiffActionSymbol(plans.NoOp, opts)) + return fmt.Sprintf("{%s\n%s # At least one attribute in this block is (or was) sensitive,\n%s # so its contents will not be displayed.\n%s}", + forcesReplacement(diff.Replace, opts), cachedLinePrefix, cachedLinePrefix, cachedLinePrefix) +} + +func (renderer sensitiveBlockRenderer) WarningsHuman(diff computed.Diff, indent int, opts computed.RenderHumanOpts) []string { + if (renderer.beforeSensitive == renderer.afterSensitive) || renderer.inner.Action == plans.Create || renderer.inner.Action == plans.Delete { + // Only display warnings for sensitive values if they are changing from + // being sensitive or to being sensitive and if they are not being + // destroyed or created. + return []string{} + } + + if renderer.beforeSensitive { + return []string{opts.Colorize.Color(fmt.Sprintf(" # [yellow]Warning[reset]: this block will no longer be marked as sensitive\n%s # after applying this change.", formatIndent(indent)))} + } else { + return []string{opts.Colorize.Color(fmt.Sprintf(" # [yellow]Warning[reset]: this block will be marked as sensitive and will not\n%s # display in UI output after applying this change.", formatIndent(indent)))} + } +} diff --git a/pkg/command/jsonformat/computed/renderers/set.go b/pkg/command/jsonformat/computed/renderers/set.go new file mode 100644 index 00000000000..fc4ad0f09b1 --- /dev/null +++ b/pkg/command/jsonformat/computed/renderers/set.go @@ -0,0 +1,77 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package renderers + +import ( + "bytes" + "fmt" + + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed" + "github.com/kubegems/opentofu/pkg/plans" +) + +var _ computed.DiffRenderer = (*setRenderer)(nil) + +func Set(elements []computed.Diff) computed.DiffRenderer { + return &setRenderer{ + elements: elements, + } +} + +func NestedSet(elements []computed.Diff) computed.DiffRenderer { + return &setRenderer{ + elements: elements, + overrideForcesReplacement: true, + } +} + +type setRenderer struct { + NoWarningsRenderer + + elements []computed.Diff + + overrideForcesReplacement bool +} + +func (renderer setRenderer) RenderHuman(diff computed.Diff, indent int, opts computed.RenderHumanOpts) string { + // Sets are a bit finicky, nested sets don't render the forces replacement + // suffix themselves, but push it onto their children. So if we are + // overriding the forces replacement setting, we set it to true for children + // and false for ourselves. + displayForcesReplacementInSelf := diff.Replace && !renderer.overrideForcesReplacement + displayForcesReplacementInChildren := diff.Replace && renderer.overrideForcesReplacement + + if len(renderer.elements) == 0 { + return fmt.Sprintf("[]%s%s", nullSuffix(diff.Action, opts), forcesReplacement(displayForcesReplacementInSelf, opts)) + } + + elementOpts := opts.Clone() + elementOpts.OverrideNullSuffix = true + elementOpts.OverrideForcesReplacement = displayForcesReplacementInChildren + + unchangedElements := 0 + + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf("[%s\n", forcesReplacement(displayForcesReplacementInSelf, opts))) + for _, element := range renderer.elements { + if element.Action == plans.NoOp && !opts.ShowUnchangedChildren { + unchangedElements++ + continue + } + + for _, warning := range element.WarningsHuman(indent+1, opts) { + buf.WriteString(fmt.Sprintf("%s%s\n", formatIndent(indent+1), warning)) + } + buf.WriteString(fmt.Sprintf("%s%s%s,\n", formatIndent(indent+1), writeDiffActionSymbol(element.Action, elementOpts), element.RenderHuman(indent+1, elementOpts))) + } + + if unchangedElements > 0 { + buf.WriteString(fmt.Sprintf("%s%s%s\n", formatIndent(indent+1), writeDiffActionSymbol(plans.NoOp, opts), unchanged("element", unchangedElements, opts))) + } + + buf.WriteString(fmt.Sprintf("%s%s]%s", formatIndent(indent), writeDiffActionSymbol(plans.NoOp, opts), nullSuffix(diff.Action, opts))) + return buf.String() +} diff --git a/pkg/command/jsonformat/computed/renderers/string.go b/pkg/command/jsonformat/computed/renderers/string.go new file mode 100644 index 00000000000..e48154a47d8 --- /dev/null +++ b/pkg/command/jsonformat/computed/renderers/string.go @@ -0,0 +1,64 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package renderers + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" + + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed" +) + +type evaluatedString struct { + String string + Json interface{} + + IsMultiline bool + IsNull bool +} + +func evaluatePrimitiveString(value interface{}, opts computed.RenderHumanOpts) evaluatedString { + if value == nil { + return evaluatedString{ + String: opts.Colorize.Color("[dark_gray]null[reset]"), + IsNull: true, + } + } + + str := value.(string) + + if strings.HasPrefix(str, "{") || strings.HasPrefix(str, "[") { + var jv interface{} + decoder := json.NewDecoder(bytes.NewBufferString(str)) + decoder.UseNumber() + if err := decoder.Decode(&jv); err == nil { + return evaluatedString{ + String: str, + Json: jv, + } + } + } + + if strings.Contains(str, "\n") { + return evaluatedString{ + String: strings.TrimSpace(str), + IsMultiline: true, + } + } + + return evaluatedString{ + String: str, + } +} + +func (e evaluatedString) RenderSimple() string { + if e.IsNull { + return e.String + } + return fmt.Sprintf("%q", e.String) +} diff --git a/pkg/command/jsonformat/computed/renderers/testing.go b/pkg/command/jsonformat/computed/renderers/testing.go new file mode 100644 index 00000000000..c48b4a0fe2f --- /dev/null +++ b/pkg/command/jsonformat/computed/renderers/testing.go @@ -0,0 +1,323 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package renderers + +import ( + "sort" + "testing" + + "github.com/google/go-cmp/cmp" + + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed" + "github.com/kubegems/opentofu/pkg/plans" +) + +type ValidateDiffFunction func(t *testing.T, diff computed.Diff) + +func validateDiff(t *testing.T, diff computed.Diff, expectedAction plans.Action, expectedReplace bool) { + if diff.Replace != expectedReplace || diff.Action != expectedAction { + t.Errorf("\nreplace:\n\texpected:%t\n\tactual:%t\naction:\n\texpected:%s\n\tactual:%s", expectedReplace, diff.Replace, expectedAction, diff.Action) + } +} + +func ValidatePrimitive(before, after interface{}, action plans.Action, replace bool) ValidateDiffFunction { + return func(t *testing.T, diff computed.Diff) { + validateDiff(t, diff, action, replace) + + primitive, ok := diff.Renderer.(*primitiveRenderer) + if !ok { + t.Errorf("invalid renderer type: %T", diff.Renderer) + return + } + + beforeDiff := cmp.Diff(primitive.before, before) + afterDiff := cmp.Diff(primitive.after, after) + + if len(beforeDiff) > 0 || len(afterDiff) > 0 { + t.Errorf("before diff: (%s), after diff: (%s)", beforeDiff, afterDiff) + } + } +} + +func ValidateObject(attributes map[string]ValidateDiffFunction, action plans.Action, replace bool) ValidateDiffFunction { + return func(t *testing.T, diff computed.Diff) { + validateDiff(t, diff, action, replace) + + object, ok := diff.Renderer.(*objectRenderer) + if !ok { + t.Errorf("invalid renderer type: %T", diff.Renderer) + return + } + + if !object.overrideNullSuffix { + t.Errorf("created the wrong type of object renderer") + } + + validateMapType(t, object.attributes, attributes) + } +} + +func ValidateNestedObject(attributes map[string]ValidateDiffFunction, action plans.Action, replace bool) ValidateDiffFunction { + return func(t *testing.T, diff computed.Diff) { + validateDiff(t, diff, action, replace) + + object, ok := diff.Renderer.(*objectRenderer) + if !ok { + t.Errorf("invalid renderer type: %T", diff.Renderer) + return + } + + if object.overrideNullSuffix { + t.Errorf("created the wrong type of object renderer") + } + + validateMapType(t, object.attributes, attributes) + } +} + +func ValidateMap(elements map[string]ValidateDiffFunction, action plans.Action, replace bool) ValidateDiffFunction { + return func(t *testing.T, diff computed.Diff) { + validateDiff(t, diff, action, replace) + + m, ok := diff.Renderer.(*mapRenderer) + if !ok { + t.Errorf("invalid renderer type: %T", diff.Renderer) + return + } + + validateMapType(t, m.elements, elements) + } +} + +func validateMapType(t *testing.T, actual map[string]computed.Diff, expected map[string]ValidateDiffFunction) { + validateKeys(t, actual, expected) + + for key, expected := range expected { + if actual, ok := actual[key]; ok { + expected(t, actual) + } + } +} + +func validateKeys[C, V any](t *testing.T, actual map[string]C, expected map[string]V) { + if len(actual) != len(expected) { + + var actualAttributes []string + var expectedAttributes []string + + for key := range actual { + actualAttributes = append(actualAttributes, key) + } + for key := range expected { + expectedAttributes = append(expectedAttributes, key) + } + + sort.Strings(actualAttributes) + sort.Strings(expectedAttributes) + + if diff := cmp.Diff(actualAttributes, expectedAttributes); len(diff) > 0 { + t.Errorf("actual and expected attributes did not match: %s", diff) + } + } +} + +func ValidateList(elements []ValidateDiffFunction, action plans.Action, replace bool) ValidateDiffFunction { + return func(t *testing.T, diff computed.Diff) { + validateDiff(t, diff, action, replace) + + list, ok := diff.Renderer.(*listRenderer) + if !ok { + t.Errorf("invalid renderer type: %T", diff.Renderer) + return + } + + if !list.displayContext { + t.Errorf("created the wrong type of list renderer") + } + + validateSliceType(t, list.elements, elements) + } +} + +func ValidateNestedList(elements []ValidateDiffFunction, action plans.Action, replace bool) ValidateDiffFunction { + return func(t *testing.T, diff computed.Diff) { + validateDiff(t, diff, action, replace) + + list, ok := diff.Renderer.(*listRenderer) + if !ok { + t.Errorf("invalid renderer type: %T", diff.Renderer) + return + } + + if list.displayContext { + t.Errorf("created the wrong type of list renderer") + } + + validateSliceType(t, list.elements, elements) + } +} + +func ValidateSet(elements []ValidateDiffFunction, action plans.Action, replace bool) ValidateDiffFunction { + return func(t *testing.T, diff computed.Diff) { + validateDiff(t, diff, action, replace) + + set, ok := diff.Renderer.(*setRenderer) + if !ok { + t.Errorf("invalid renderer type: %T", diff.Renderer) + return + } + + validateSliceType(t, set.elements, elements) + } +} + +func validateSliceType(t *testing.T, actual []computed.Diff, expected []ValidateDiffFunction) { + if len(actual) != len(expected) { + t.Errorf("expected %d elements but found %d elements", len(expected), len(actual)) + return + } + + for ix := 0; ix < len(expected); ix++ { + expected[ix](t, actual[ix]) + } +} + +func ValidateBlock( + attributes map[string]ValidateDiffFunction, + singleBlocks map[string]ValidateDiffFunction, + listBlocks map[string][]ValidateDiffFunction, + mapBlocks map[string]map[string]ValidateDiffFunction, + setBlocks map[string][]ValidateDiffFunction, + action plans.Action, + replace bool) ValidateDiffFunction { + return func(t *testing.T, diff computed.Diff) { + validateDiff(t, diff, action, replace) + + block, ok := diff.Renderer.(*blockRenderer) + if !ok { + t.Errorf("invalid renderer type: %T", diff.Renderer) + return + } + + validateKeys(t, block.attributes, attributes) + validateKeys(t, block.blocks.SingleBlocks, singleBlocks) + validateKeys(t, block.blocks.ListBlocks, listBlocks) + validateKeys(t, block.blocks.MapBlocks, mapBlocks) + validateKeys(t, block.blocks.SetBlocks, setBlocks) + + for key, expected := range attributes { + if actual, ok := block.attributes[key]; ok { + expected(t, actual) + } + } + + for key, expected := range singleBlocks { + expected(t, block.blocks.SingleBlocks[key]) + } + + for key, expected := range listBlocks { + if actual, ok := block.blocks.ListBlocks[key]; ok { + if len(actual) != len(expected) { + t.Errorf("expected %d blocks within %s but found %d elements", len(expected), key, len(actual)) + } + for ix := range expected { + expected[ix](t, actual[ix]) + } + } + } + + for key, expected := range setBlocks { + if actual, ok := block.blocks.SetBlocks[key]; ok { + if len(actual) != len(expected) { + t.Errorf("expected %d blocks within %s but found %d elements", len(expected), key, len(actual)) + } + for ix := range expected { + expected[ix](t, actual[ix]) + } + } + } + + for key, expected := range setBlocks { + if actual, ok := block.blocks.SetBlocks[key]; ok { + if len(actual) != len(expected) { + t.Errorf("expected %d blocks within %s but found %d elements", len(expected), key, len(actual)) + } + for ix := range expected { + expected[ix](t, actual[ix]) + } + } + } + + for key, expected := range mapBlocks { + if actual, ok := block.blocks.MapBlocks[key]; ok { + if len(actual) != len(expected) { + t.Errorf("expected %d blocks within %s but found %d elements", len(expected), key, len(actual)) + } + for dKey := range expected { + expected[dKey](t, actual[dKey]) + } + } + } + } +} + +func ValidateTypeChange(before, after ValidateDiffFunction, action plans.Action, replace bool) ValidateDiffFunction { + return func(t *testing.T, diff computed.Diff) { + validateDiff(t, diff, action, replace) + + typeChange, ok := diff.Renderer.(*typeChangeRenderer) + if !ok { + t.Errorf("invalid renderer type: %T", diff.Renderer) + return + } + + before(t, typeChange.before) + after(t, typeChange.after) + } +} + +func ValidateSensitive(inner ValidateDiffFunction, beforeSensitive, afterSensitive bool, action plans.Action, replace bool) ValidateDiffFunction { + return func(t *testing.T, diff computed.Diff) { + validateDiff(t, diff, action, replace) + + sensitive, ok := diff.Renderer.(*sensitiveRenderer) + if !ok { + t.Errorf("invalid renderer type: %T", diff.Renderer) + return + } + + if beforeSensitive != sensitive.beforeSensitive || afterSensitive != sensitive.afterSensitive { + t.Errorf("before or after sensitive values don't match:\n\texpected; before: %t after: %t\n\tactual; before: %t, after: %t", beforeSensitive, afterSensitive, sensitive.beforeSensitive, sensitive.afterSensitive) + } + + inner(t, sensitive.inner) + } +} + +func ValidateUnknown(before ValidateDiffFunction, action plans.Action, replace bool) ValidateDiffFunction { + return func(t *testing.T, diff computed.Diff) { + validateDiff(t, diff, action, replace) + + unknown, ok := diff.Renderer.(*unknownRenderer) + if !ok { + t.Errorf("invalid renderer type: %T", diff.Renderer) + return + } + + if before == nil { + if unknown.before.Renderer != nil { + t.Errorf("did not expect a before renderer, but found one") + } + return + } + + if unknown.before.Renderer == nil { + t.Errorf("expected a before renderer, but found none") + } + + before(t, unknown.before) + } +} diff --git a/pkg/command/jsonformat/computed/renderers/type_change.go b/pkg/command/jsonformat/computed/renderers/type_change.go new file mode 100644 index 00000000000..9a41f08503f --- /dev/null +++ b/pkg/command/jsonformat/computed/renderers/type_change.go @@ -0,0 +1,33 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package renderers + +import ( + "fmt" + + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed" +) + +var _ computed.DiffRenderer = (*typeChangeRenderer)(nil) + +func TypeChange(before, after computed.Diff) computed.DiffRenderer { + return &typeChangeRenderer{ + before: before, + after: after, + } +} + +type typeChangeRenderer struct { + NoWarningsRenderer + + before computed.Diff + after computed.Diff +} + +func (renderer typeChangeRenderer) RenderHuman(diff computed.Diff, indent int, opts computed.RenderHumanOpts) string { + opts.OverrideNullSuffix = true // Never render null suffix for children of type changes. + return fmt.Sprintf("%s %s %s", renderer.before.RenderHuman(indent, opts), opts.Colorize.Color("[yellow]->[reset]"), renderer.after.RenderHuman(indent, opts)) +} diff --git a/pkg/command/jsonformat/computed/renderers/unknown.go b/pkg/command/jsonformat/computed/renderers/unknown.go new file mode 100644 index 00000000000..54baed51fd0 --- /dev/null +++ b/pkg/command/jsonformat/computed/renderers/unknown.go @@ -0,0 +1,38 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package renderers + +import ( + "fmt" + + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed" + + "github.com/kubegems/opentofu/pkg/plans" +) + +var _ computed.DiffRenderer = (*unknownRenderer)(nil) + +func Unknown(before computed.Diff) computed.DiffRenderer { + return &unknownRenderer{ + before: before, + } +} + +type unknownRenderer struct { + NoWarningsRenderer + + before computed.Diff +} + +func (renderer unknownRenderer) RenderHuman(diff computed.Diff, indent int, opts computed.RenderHumanOpts) string { + if diff.Action == plans.Create { + return fmt.Sprintf("(known after apply)%s", forcesReplacement(diff.Replace, opts)) + } + + // Never render null suffix for children of unknown changes. + opts.OverrideNullSuffix = true + return fmt.Sprintf("%s -> (known after apply)%s", renderer.before.RenderHuman(indent, opts), forcesReplacement(diff.Replace, opts)) +} diff --git a/pkg/command/jsonformat/computed/renderers/util.go b/pkg/command/jsonformat/computed/renderers/util.go new file mode 100644 index 00000000000..c3ab13017b5 --- /dev/null +++ b/pkg/command/jsonformat/computed/renderers/util.go @@ -0,0 +1,95 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package renderers + +import ( + "fmt" + "strings" + + "github.com/kubegems/opentofu/pkg/command/format" + + "github.com/hashicorp/hcl/v2/hclsyntax" + + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed" + "github.com/kubegems/opentofu/pkg/plans" +) + +// NoWarningsRenderer defines a Warnings function that returns an empty list of +// warnings. This can be used by other renderers to ensure we don't see lots of +// repeats of this empty function. +type NoWarningsRenderer struct{} + +// WarningsHuman returns an empty slice, as the name NoWarningsRenderer suggests. +func (render NoWarningsRenderer) WarningsHuman(_ computed.Diff, _ int, _ computed.RenderHumanOpts) []string { + return nil +} + +// nullSuffix returns the `-> null` suffix if the change is a delete action, and +// it has not been overridden. +func nullSuffix(action plans.Action, opts computed.RenderHumanOpts) string { + if !opts.OverrideNullSuffix && action == plans.Delete { + return opts.Colorize.Color(" [dark_gray]-> null[reset]") + } + return "" +} + +// forcesReplacement returns the `# forces replacement` suffix if this change is +// driving the entire resource to be replaced. +func forcesReplacement(replace bool, opts computed.RenderHumanOpts) string { + if replace || opts.OverrideForcesReplacement { + return opts.Colorize.Color(" [red]# forces replacement[reset]") + } + return "" +} + +// indent returns whitespace that is the required length for the specified +// indent. +func formatIndent(indent int) string { + return strings.Repeat(" ", indent) +} + +// unchanged prints out a description saying how many of 'keyword' have been +// hidden because they are unchanged or noop actions. +func unchanged(keyword string, count int, opts computed.RenderHumanOpts) string { + if count == 1 { + return opts.Colorize.Color(fmt.Sprintf("[dark_gray]# (%d unchanged %s hidden)[reset]", count, keyword)) + } + return opts.Colorize.Color(fmt.Sprintf("[dark_gray]# (%d unchanged %ss hidden)[reset]", count, keyword)) +} + +// EnsureValidAttributeName checks if `name` contains any HCL syntax and calls +// and returns hclEscapeString. +func EnsureValidAttributeName(name string) string { + if !hclsyntax.ValidIdentifier(name) { + return hclEscapeString(name) + } + return name +} + +// hclEscapeString formats the input string into a format that is safe for +// rendering within HCL. +// +// Note, this function doesn't actually do a very good job of this currently. We +// need to expose some internal functions from HCL in a future version and call +// them from here. For now, just use "%q" formatting. +func hclEscapeString(str string) string { + // TODO: Replace this with more complete HCL logic instead of the simple + // go workaround. + return fmt.Sprintf("%q", str) +} + +// writeDiffActionSymbol writes out the symbols for the associated action, and +// handles localized colorization of the symbol as well as indenting the symbol +// to be 4 spaces wide. +// +// If the opts has HideDiffActionSymbols set then this function returns an empty +// string. +func writeDiffActionSymbol(action plans.Action, opts computed.RenderHumanOpts) string { + if opts.HideDiffActionSymbols { + return "" + } + return fmt.Sprintf("%s ", opts.Colorize.Color(format.DiffActionSymbol(action))) +} diff --git a/pkg/command/jsonformat/diff.go b/pkg/command/jsonformat/diff.go new file mode 100644 index 00000000000..778546e224e --- /dev/null +++ b/pkg/command/jsonformat/diff.go @@ -0,0 +1,108 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package jsonformat + +import ( + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed" + "github.com/kubegems/opentofu/pkg/command/jsonformat/differ" + "github.com/kubegems/opentofu/pkg/command/jsonformat/structured" + "github.com/kubegems/opentofu/pkg/command/jsonformat/structured/attribute_path" + "github.com/kubegems/opentofu/pkg/command/jsonplan" + "github.com/kubegems/opentofu/pkg/plans" +) + +func precomputeDiffs(plan Plan, mode plans.Mode) diffs { + diffs := diffs{ + outputs: make(map[string]computed.Diff), + } + + for _, drift := range plan.ResourceDrift { + + var relevantAttrs attribute_path.Matcher + if mode == plans.RefreshOnlyMode { + // For a refresh only plan, we show all the drift. + relevantAttrs = attribute_path.AlwaysMatcher() + } else { + matcher := attribute_path.Empty(true) + + // Otherwise we only want to show the drift changes that are + // relevant. + for _, attr := range plan.RelevantAttributes { + if len(attr.Resource) == 0 || attr.Resource == drift.Address { + matcher = attribute_path.AppendSingle(matcher, attr.Attr) + } + } + + if len(matcher.Paths) > 0 { + relevantAttrs = matcher + } + } + + if relevantAttrs == nil { + // If we couldn't build a relevant attribute matcher, then we are + // not going to show anything for this drift. + continue + } + + schema := plan.getSchema(drift) + change := structured.FromJsonChange(drift.Change, relevantAttrs) + diffs.drift = append(diffs.drift, diff{ + change: drift, + diff: differ.ComputeDiffForBlock(change, schema.Block), + }) + } + + for _, change := range plan.ResourceChanges { + schema := plan.getSchema(change) + structuredChange := structured.FromJsonChange(change.Change, attribute_path.AlwaysMatcher()) + diffs.changes = append(diffs.changes, diff{ + change: change, + diff: differ.ComputeDiffForBlock(structuredChange, schema.Block), + }) + } + + for key, output := range plan.OutputChanges { + change := structured.FromJsonChange(output, attribute_path.AlwaysMatcher()) + diffs.outputs[key] = differ.ComputeDiffForOutput(change) + } + + return diffs +} + +type diffs struct { + drift []diff + changes []diff + outputs map[string]computed.Diff +} + +func (d diffs) Empty() bool { + for _, change := range d.changes { + if change.diff.Action != plans.NoOp || change.Moved() { + return false + } + } + + for _, output := range d.outputs { + if output.Action != plans.NoOp { + return false + } + } + + return true +} + +type diff struct { + change jsonplan.ResourceChange + diff computed.Diff +} + +func (d diff) Moved() bool { + return len(d.change.PreviousAddress) > 0 && d.change.PreviousAddress != d.change.Address +} + +func (d diff) Importing() bool { + return d.change.Change.Importing != nil +} diff --git a/pkg/command/jsonformat/differ/attribute.go b/pkg/command/jsonformat/differ/attribute.go new file mode 100644 index 00000000000..25d89c626f9 --- /dev/null +++ b/pkg/command/jsonformat/differ/attribute.go @@ -0,0 +1,89 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package differ + +import ( + "github.com/kubegems/opentofu/pkg/command/jsonformat/structured" + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed" + + "github.com/kubegems/opentofu/pkg/command/jsonprovider" +) + +func ComputeDiffForAttribute(change structured.Change, attribute *jsonprovider.Attribute) computed.Diff { + if attribute.AttributeNestedType != nil { + return computeDiffForNestedAttribute(change, attribute.AttributeNestedType) + } + return ComputeDiffForType(change, unmarshalAttribute(attribute)) +} + +func computeDiffForNestedAttribute(change structured.Change, nested *jsonprovider.NestedType) computed.Diff { + if sensitive, ok := checkForSensitiveNestedAttribute(change, nested); ok { + return sensitive + } + + if computed, ok := checkForUnknownNestedAttribute(change, nested); ok { + return computed + } + + switch NestingMode(nested.NestingMode) { + case nestingModeSingle, nestingModeGroup: + return computeAttributeDiffAsNestedObject(change, nested.Attributes) + case nestingModeMap: + return computeAttributeDiffAsNestedMap(change, nested.Attributes) + case nestingModeList: + return computeAttributeDiffAsNestedList(change, nested.Attributes) + case nestingModeSet: + return computeAttributeDiffAsNestedSet(change, nested.Attributes) + default: + panic("unrecognized nesting mode: " + nested.NestingMode) + } +} + +func ComputeDiffForType(change structured.Change, ctype cty.Type) computed.Diff { + if sensitive, ok := checkForSensitiveType(change, ctype); ok { + return sensitive + } + + if computed, ok := checkForUnknownType(change, ctype); ok { + return computed + } + + switch { + case ctype == cty.NilType, ctype == cty.DynamicPseudoType: + // Forward nil or dynamic types over to be processed as outputs. + // There is nothing particularly special about the way outputs are + // processed that make this unsafe, we could just as easily call this + // function computeChangeForDynamicValues(), but external callers will + // only be in this situation when processing outputs so this function + // is named for their benefit. + return ComputeDiffForOutput(change) + case ctype.IsPrimitiveType(): + return computeAttributeDiffAsPrimitive(change, ctype) + case ctype.IsObjectType(): + return computeAttributeDiffAsObject(change, ctype.AttributeTypes()) + case ctype.IsMapType(): + return computeAttributeDiffAsMap(change, ctype.ElementType()) + case ctype.IsListType(): + return computeAttributeDiffAsList(change, ctype.ElementType()) + case ctype.IsTupleType(): + return computeAttributeDiffAsTuple(change, ctype.TupleElementTypes()) + case ctype.IsSetType(): + return computeAttributeDiffAsSet(change, ctype.ElementType()) + default: + panic("unrecognized type: " + ctype.FriendlyName()) + } +} + +func unmarshalAttribute(attribute *jsonprovider.Attribute) cty.Type { + ctyType, err := ctyjson.UnmarshalType(attribute.AttributeType) + if err != nil { + panic("could not unmarshal attribute type: " + err.Error()) + } + return ctyType +} diff --git a/pkg/command/jsonformat/differ/block.go b/pkg/command/jsonformat/differ/block.go new file mode 100644 index 00000000000..cf4dfa81e26 --- /dev/null +++ b/pkg/command/jsonformat/differ/block.go @@ -0,0 +1,123 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package differ + +import ( + "github.com/kubegems/opentofu/pkg/command/jsonformat/collections" + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed" + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed/renderers" + "github.com/kubegems/opentofu/pkg/command/jsonformat/structured" + "github.com/kubegems/opentofu/pkg/command/jsonprovider" + "github.com/kubegems/opentofu/pkg/plans" +) + +func ComputeDiffForBlock(change structured.Change, block *jsonprovider.Block) computed.Diff { + if sensitive, ok := checkForSensitiveBlock(change, block); ok { + return sensitive + } + + if unknown, ok := checkForUnknownBlock(change, block); ok { + return unknown + } + + current := change.GetDefaultActionForIteration() + + blockValue := change.AsMap() + + attributes := make(map[string]computed.Diff) + for key, attr := range block.Attributes { + childValue := blockValue.GetChild(key) + + if !childValue.RelevantAttributes.MatchesPartial() { + // Mark non-relevant attributes as unchanged. + childValue = childValue.AsNoOp() + } + + // Empty strings in blocks should be considered null for legacy reasons. + // The SDK doesn't support null strings yet, so we work around this now. + if before, ok := childValue.Before.(string); ok && len(before) == 0 { + childValue.Before = nil + } + if after, ok := childValue.After.(string); ok && len(after) == 0 { + childValue.After = nil + } + + // Always treat changes to blocks as implicit. + childValue.BeforeExplicit = false + childValue.AfterExplicit = false + + childChange := ComputeDiffForAttribute(childValue, attr) + if childChange.Action == plans.NoOp && childValue.Before == nil && childValue.After == nil { + // Don't record nil values at all in blocks. + continue + } + + attributes[key] = childChange + current = collections.CompareActions(current, childChange.Action) + } + + blocks := renderers.Blocks{ + ReplaceBlocks: make(map[string]bool), + BeforeSensitiveBlocks: make(map[string]bool), + AfterSensitiveBlocks: make(map[string]bool), + SingleBlocks: make(map[string]computed.Diff), + ListBlocks: make(map[string][]computed.Diff), + SetBlocks: make(map[string][]computed.Diff), + MapBlocks: make(map[string]map[string]computed.Diff), + } + + for key, blockType := range block.BlockTypes { + childValue := blockValue.GetChild(key) + + if !childValue.RelevantAttributes.MatchesPartial() { + // Mark non-relevant attributes as unchanged. + childValue = childValue.AsNoOp() + } + + beforeSensitive := childValue.IsBeforeSensitive() + afterSensitive := childValue.IsAfterSensitive() + forcesReplacement := childValue.ReplacePaths.Matches() + + switch NestingMode(blockType.NestingMode) { + case nestingModeSet: + diffs, action := computeBlockDiffsAsSet(childValue, blockType.Block) + if action == plans.NoOp && childValue.Before == nil && childValue.After == nil { + // Don't record nil values in blocks. + continue + } + blocks.AddAllSetBlock(key, diffs, forcesReplacement, beforeSensitive, afterSensitive) + current = collections.CompareActions(current, action) + case nestingModeList: + diffs, action := computeBlockDiffsAsList(childValue, blockType.Block) + if action == plans.NoOp && childValue.Before == nil && childValue.After == nil { + // Don't record nil values in blocks. + continue + } + blocks.AddAllListBlock(key, diffs, forcesReplacement, beforeSensitive, afterSensitive) + current = collections.CompareActions(current, action) + case nestingModeMap: + diffs, action := computeBlockDiffsAsMap(childValue, blockType.Block) + if action == plans.NoOp && childValue.Before == nil && childValue.After == nil { + // Don't record nil values in blocks. + continue + } + blocks.AddAllMapBlocks(key, diffs, forcesReplacement, beforeSensitive, afterSensitive) + current = collections.CompareActions(current, action) + case nestingModeSingle, nestingModeGroup: + diff := ComputeDiffForBlock(childValue, blockType.Block) + if diff.Action == plans.NoOp && childValue.Before == nil && childValue.After == nil { + // Don't record nil values in blocks. + continue + } + blocks.AddSingleBlock(key, diff, forcesReplacement, beforeSensitive, afterSensitive) + current = collections.CompareActions(current, diff.Action) + default: + panic("unrecognized nesting mode: " + blockType.NestingMode) + } + } + + return computed.NewDiff(renderers.Block(attributes, blocks), current, change.ReplacePaths.Matches()) +} diff --git a/pkg/command/jsonformat/differ/differ.go b/pkg/command/jsonformat/differ/differ.go new file mode 100644 index 00000000000..8d83cea6cc1 --- /dev/null +++ b/pkg/command/jsonformat/differ/differ.go @@ -0,0 +1,17 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package differ + +import ( + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed" + "github.com/kubegems/opentofu/pkg/command/jsonformat/structured" +) + +// asDiff is a helper function to abstract away some simple and common +// functionality when converting a renderer into a concrete diff. +func asDiff(change structured.Change, renderer computed.DiffRenderer) computed.Diff { + return computed.NewDiff(renderer, change.CalculateAction(), change.ReplacePaths.Matches()) +} diff --git a/pkg/command/jsonformat/differ/differ_test.go b/pkg/command/jsonformat/differ/differ_test.go new file mode 100644 index 00000000000..f14ffa32000 --- /dev/null +++ b/pkg/command/jsonformat/differ/differ_test.go @@ -0,0 +1,2948 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package differ + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed/renderers" + "github.com/kubegems/opentofu/pkg/command/jsonformat/structured" + "github.com/kubegems/opentofu/pkg/command/jsonformat/structured/attribute_path" + "github.com/kubegems/opentofu/pkg/command/jsonprovider" + "github.com/kubegems/opentofu/pkg/plans" +) + +type SetDiff struct { + Before SetDiffEntry + After SetDiffEntry +} + +type SetDiffEntry struct { + SingleDiff renderers.ValidateDiffFunction + ObjectDiff map[string]renderers.ValidateDiffFunction + + Replace bool + Action plans.Action +} + +func (entry SetDiffEntry) Validate(obj func(attributes map[string]renderers.ValidateDiffFunction, action plans.Action, replace bool) renderers.ValidateDiffFunction) renderers.ValidateDiffFunction { + if entry.SingleDiff != nil { + return entry.SingleDiff + } + + return obj(entry.ObjectDiff, entry.Action, entry.Replace) +} + +func TestValue_SimpleBlocks(t *testing.T) { + // Most of the other test functions wrap the test cases in various + // collections or blocks. This function just very simply lets you specify + // individual test cases within blocks for some simple tests. + + tcs := map[string]struct { + input structured.Change + block *jsonprovider.Block + validate renderers.ValidateDiffFunction + }{ + "delete_with_null_sensitive_value": { + input: structured.Change{ + Before: map[string]interface{}{ + "normal_attribute": "some value", + }, + After: nil, + BeforeSensitive: map[string]interface{}{ + "sensitive_attribute": true, + }, + AfterSensitive: false, + }, + block: &jsonprovider.Block{ + Attributes: map[string]*jsonprovider.Attribute{ + "normal_attribute": { + AttributeType: unmarshalType(t, cty.String), + }, + "sensitive_attribute": { + AttributeType: unmarshalType(t, cty.String), + }, + }, + }, + validate: renderers.ValidateBlock(map[string]renderers.ValidateDiffFunction{ + "normal_attribute": renderers.ValidatePrimitive("some value", nil, plans.Delete, false), + }, nil, nil, nil, nil, plans.Delete, false), + }, + "create_with_null_sensitive_value": { + input: structured.Change{ + Before: nil, + After: map[string]interface{}{ + "normal_attribute": "some value", + }, + BeforeSensitive: map[string]interface{}{ + "sensitive_attribute": true, + }, + AfterSensitive: false, + }, + block: &jsonprovider.Block{ + Attributes: map[string]*jsonprovider.Attribute{ + "normal_attribute": { + AttributeType: unmarshalType(t, cty.String), + }, + "sensitive_attribute": { + AttributeType: unmarshalType(t, cty.String), + }, + }, + }, + validate: renderers.ValidateBlock(map[string]renderers.ValidateDiffFunction{ + "normal_attribute": renderers.ValidatePrimitive(nil, "some value", plans.Create, false), + }, nil, nil, nil, nil, plans.Create, false), + }, + } + for name, tc := range tcs { + // Set some default values + if tc.input.ReplacePaths == nil { + tc.input.ReplacePaths = &attribute_path.PathMatcher{} + } + + if tc.input.RelevantAttributes == nil { + tc.input.RelevantAttributes = attribute_path.AlwaysMatcher() + } + + t.Run(name, func(t *testing.T) { + tc.validate(t, ComputeDiffForBlock(tc.input, tc.block)) + }) + } +} + +func TestValue_ObjectAttributes(t *testing.T) { + // This function holds a range of test cases creating, deleting and editing + // objects. It is built in such a way that it can automatically test these + // operations on objects both directly and nested, as well as within all + // types of collections. + + tcs := map[string]struct { + input structured.Change + attributes map[string]cty.Type + validateSingleDiff renderers.ValidateDiffFunction + validateObject renderers.ValidateDiffFunction + validateNestedObject renderers.ValidateDiffFunction + validateDiffs map[string]renderers.ValidateDiffFunction + validateList renderers.ValidateDiffFunction + validateReplace bool + validateAction plans.Action + // Sets break changes out differently to the other collections, so they + // have their own entry. + validateSetDiffs *SetDiff + }{ + "create": { + input: structured.Change{ + Before: nil, + After: map[string]interface{}{ + "attribute_one": "new", + }, + }, + attributes: map[string]cty.Type{ + "attribute_one": cty.String, + }, + validateDiffs: map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidatePrimitive(nil, "new", plans.Create, false), + }, + validateAction: plans.Create, + validateReplace: false, + }, + "delete": { + input: structured.Change{ + Before: map[string]interface{}{ + "attribute_one": "old", + }, + After: nil, + }, + attributes: map[string]cty.Type{ + "attribute_one": cty.String, + }, + validateDiffs: map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidatePrimitive("old", nil, plans.Delete, false), + }, + validateAction: plans.Delete, + validateReplace: false, + }, + "create_sensitive": { + input: structured.Change{ + Before: nil, + After: map[string]interface{}{ + "attribute_one": "new", + }, + AfterSensitive: true, + }, + attributes: map[string]cty.Type{ + "attribute_one": cty.String, + }, + validateSingleDiff: renderers.ValidateSensitive(renderers.ValidateObject(map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidatePrimitive(nil, "new", plans.Create, false), + }, plans.Create, false), + false, + true, + plans.Create, + false), + validateNestedObject: renderers.ValidateSensitive(renderers.ValidateNestedObject(map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidatePrimitive(nil, "new", plans.Create, false), + }, plans.Create, false), + false, + true, + plans.Create, + false), + }, + "delete_sensitive": { + input: structured.Change{ + Before: map[string]interface{}{ + "attribute_one": "old", + }, + BeforeSensitive: true, + After: nil, + }, + attributes: map[string]cty.Type{ + "attribute_one": cty.String, + }, + validateSingleDiff: renderers.ValidateSensitive(renderers.ValidateObject(map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidatePrimitive("old", nil, plans.Delete, false), + }, plans.Delete, false), true, false, plans.Delete, false), + validateNestedObject: renderers.ValidateSensitive(renderers.ValidateNestedObject(map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidatePrimitive("old", nil, plans.Delete, false), + }, plans.Delete, false), true, false, plans.Delete, false), + }, + "create_unknown": { + input: structured.Change{ + Before: nil, + After: nil, + Unknown: true, + }, + attributes: map[string]cty.Type{ + "attribute_one": cty.String, + }, + validateSingleDiff: renderers.ValidateUnknown(nil, plans.Create, false), + }, + "update_unknown": { + input: structured.Change{ + Before: map[string]interface{}{ + "attribute_one": "old", + }, + After: nil, + Unknown: true, + }, + attributes: map[string]cty.Type{ + "attribute_one": cty.String, + }, + validateObject: renderers.ValidateUnknown(renderers.ValidateObject(map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidatePrimitive("old", nil, plans.Delete, false), + }, plans.Delete, false), plans.Update, false), + validateNestedObject: renderers.ValidateUnknown(renderers.ValidateNestedObject(map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidateUnknown(renderers.ValidatePrimitive("old", nil, plans.Delete, false), plans.Update, false), + }, plans.Update, false), plans.Update, false), + validateSetDiffs: &SetDiff{ + Before: SetDiffEntry{ + ObjectDiff: map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidatePrimitive("old", nil, plans.Delete, false), + }, + Action: plans.Delete, + Replace: false, + }, + After: SetDiffEntry{ + SingleDiff: renderers.ValidateUnknown(nil, plans.Create, false), + }, + }, + }, + "create_attribute": { + input: structured.Change{ + Before: map[string]interface{}{}, + After: map[string]interface{}{ + "attribute_one": "new", + }, + }, + attributes: map[string]cty.Type{ + "attribute_one": cty.String, + }, + validateDiffs: map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidatePrimitive(nil, "new", plans.Create, false), + }, + validateAction: plans.Update, + validateReplace: false, + validateSetDiffs: &SetDiff{ + Before: SetDiffEntry{ + ObjectDiff: nil, + Action: plans.Delete, + Replace: false, + }, + After: SetDiffEntry{ + ObjectDiff: map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidatePrimitive(nil, "new", plans.Create, false), + }, + Action: plans.Create, + Replace: false, + }, + }, + }, + "create_attribute_from_explicit_null": { + input: structured.Change{ + Before: map[string]interface{}{ + "attribute_one": nil, + }, + After: map[string]interface{}{ + "attribute_one": "new", + }, + }, + attributes: map[string]cty.Type{ + "attribute_one": cty.String, + }, + validateDiffs: map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidatePrimitive(nil, "new", plans.Create, false), + }, + validateAction: plans.Update, + validateReplace: false, + validateSetDiffs: &SetDiff{ + Before: SetDiffEntry{ + ObjectDiff: nil, + Action: plans.Delete, + Replace: false, + }, + After: SetDiffEntry{ + ObjectDiff: map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidatePrimitive(nil, "new", plans.Create, false), + }, + Action: plans.Create, + Replace: false, + }, + }, + }, + "delete_attribute": { + input: structured.Change{ + Before: map[string]interface{}{ + "attribute_one": "old", + }, + After: map[string]interface{}{}, + }, + attributes: map[string]cty.Type{ + "attribute_one": cty.String, + }, + validateDiffs: map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidatePrimitive("old", nil, plans.Delete, false), + }, + validateAction: plans.Update, + validateReplace: false, + validateSetDiffs: &SetDiff{ + Before: SetDiffEntry{ + ObjectDiff: map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidatePrimitive("old", nil, plans.Delete, false), + }, + Action: plans.Delete, + Replace: false, + }, + After: SetDiffEntry{ + ObjectDiff: nil, + Action: plans.Create, + Replace: false, + }, + }, + }, + "delete_attribute_to_explicit_null": { + input: structured.Change{ + Before: map[string]interface{}{ + "attribute_one": "old", + }, + After: map[string]interface{}{ + "attribute_one": nil, + }, + }, + attributes: map[string]cty.Type{ + "attribute_one": cty.String, + }, + validateDiffs: map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidatePrimitive("old", nil, plans.Delete, false), + }, + validateAction: plans.Update, + validateReplace: false, + validateSetDiffs: &SetDiff{ + Before: SetDiffEntry{ + ObjectDiff: map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidatePrimitive("old", nil, plans.Delete, false), + }, + Action: plans.Delete, + Replace: false, + }, + After: SetDiffEntry{ + ObjectDiff: nil, + Action: plans.Create, + Replace: false, + }, + }, + }, + "update_attribute": { + input: structured.Change{ + Before: map[string]interface{}{ + "attribute_one": "old", + }, + After: map[string]interface{}{ + "attribute_one": "new", + }, + }, + attributes: map[string]cty.Type{ + "attribute_one": cty.String, + }, + validateDiffs: map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidatePrimitive("old", "new", plans.Update, false), + }, + validateAction: plans.Update, + validateReplace: false, + validateSetDiffs: &SetDiff{ + Before: SetDiffEntry{ + ObjectDiff: map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidatePrimitive("old", nil, plans.Delete, false), + }, + Action: plans.Delete, + Replace: false, + }, + After: SetDiffEntry{ + ObjectDiff: map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidatePrimitive(nil, "new", plans.Create, false), + }, + Action: plans.Create, + Replace: false, + }, + }, + }, + "create_sensitive_attribute": { + input: structured.Change{ + Before: map[string]interface{}{}, + After: map[string]interface{}{ + "attribute_one": "new", + }, + AfterSensitive: map[string]interface{}{ + "attribute_one": true, + }, + }, + attributes: map[string]cty.Type{ + "attribute_one": cty.String, + }, + validateDiffs: map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidateSensitive(renderers.ValidatePrimitive(nil, "new", plans.Create, false), false, true, plans.Create, false), + }, + validateAction: plans.Update, + validateReplace: false, + validateSetDiffs: &SetDiff{ + Before: SetDiffEntry{ + ObjectDiff: nil, + Action: plans.Delete, + Replace: false, + }, + After: SetDiffEntry{ + ObjectDiff: map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidateSensitive(renderers.ValidatePrimitive(nil, "new", plans.Create, false), false, true, plans.Create, false), + }, + Action: plans.Create, + Replace: false, + }, + }, + }, + "delete_sensitive_attribute": { + input: structured.Change{ + Before: map[string]interface{}{ + "attribute_one": "old", + }, + BeforeSensitive: map[string]interface{}{ + "attribute_one": true, + }, + After: map[string]interface{}{}, + }, + attributes: map[string]cty.Type{ + "attribute_one": cty.String, + }, + validateDiffs: map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidateSensitive(renderers.ValidatePrimitive("old", nil, plans.Delete, false), true, false, plans.Delete, false), + }, + validateAction: plans.Update, + validateReplace: false, + validateSetDiffs: &SetDiff{ + Before: SetDiffEntry{ + ObjectDiff: map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidateSensitive(renderers.ValidatePrimitive("old", nil, plans.Delete, false), true, false, plans.Delete, false), + }, + Action: plans.Delete, + Replace: false, + }, + After: SetDiffEntry{ + ObjectDiff: nil, + Action: plans.Create, + Replace: false, + }, + }, + }, + "update_sensitive_attribute": { + input: structured.Change{ + Before: map[string]interface{}{ + "attribute_one": "old", + }, + BeforeSensitive: map[string]interface{}{ + "attribute_one": true, + }, + After: map[string]interface{}{ + "attribute_one": "new", + }, + AfterSensitive: map[string]interface{}{ + "attribute_one": true, + }, + }, + attributes: map[string]cty.Type{ + "attribute_one": cty.String, + }, + validateDiffs: map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidateSensitive(renderers.ValidatePrimitive("old", "new", plans.Update, false), true, true, plans.Update, false), + }, + validateAction: plans.Update, + validateReplace: false, + validateSetDiffs: &SetDiff{ + Before: SetDiffEntry{ + ObjectDiff: map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidateSensitive(renderers.ValidatePrimitive("old", nil, plans.Delete, false), true, false, plans.Delete, false), + }, + Action: plans.Delete, + Replace: false, + }, + After: SetDiffEntry{ + ObjectDiff: map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidateSensitive(renderers.ValidatePrimitive(nil, "new", plans.Create, false), false, true, plans.Create, false), + }, + Action: plans.Create, + Replace: false, + }, + }, + }, + "create_computed_attribute": { + input: structured.Change{ + Before: map[string]interface{}{}, + After: map[string]interface{}{}, + Unknown: map[string]interface{}{ + "attribute_one": true, + }, + }, + attributes: map[string]cty.Type{ + "attribute_one": cty.String, + }, + validateDiffs: map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidateUnknown(nil, plans.Create, false), + }, + validateAction: plans.Update, + validateReplace: false, + }, + "update_computed_attribute": { + input: structured.Change{ + Before: map[string]interface{}{ + "attribute_one": "old", + }, + After: map[string]interface{}{}, + Unknown: map[string]interface{}{ + "attribute_one": true, + }, + }, + attributes: map[string]cty.Type{ + "attribute_one": cty.String, + }, + validateDiffs: map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidateUnknown( + renderers.ValidatePrimitive("old", nil, plans.Delete, false), + plans.Update, + false), + }, + validateAction: plans.Update, + validateReplace: false, + validateSetDiffs: &SetDiff{ + Before: SetDiffEntry{ + ObjectDiff: map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidatePrimitive("old", nil, plans.Delete, false), + }, + Action: plans.Delete, + Replace: false, + }, + After: SetDiffEntry{ + ObjectDiff: map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidateUnknown(nil, plans.Create, false), + }, + Action: plans.Create, + Replace: false, + }, + }, + }, + "ignores_unset_fields": { + input: structured.Change{ + Before: map[string]interface{}{}, + After: map[string]interface{}{}, + }, + attributes: map[string]cty.Type{ + "attribute_one": cty.String, + }, + validateDiffs: map[string]renderers.ValidateDiffFunction{}, + validateAction: plans.NoOp, + validateReplace: false, + }, + "update_replace_self": { + input: structured.Change{ + Before: map[string]interface{}{ + "attribute_one": "old", + }, + After: map[string]interface{}{ + "attribute_one": "new", + }, + ReplacePaths: &attribute_path.PathMatcher{ + Paths: [][]interface{}{ + {}, + }, + }, + }, + attributes: map[string]cty.Type{ + "attribute_one": cty.String, + }, + validateDiffs: map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidatePrimitive("old", "new", plans.Update, false), + }, + validateAction: plans.Update, + validateReplace: true, + validateSetDiffs: &SetDiff{ + Before: SetDiffEntry{ + ObjectDiff: map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidatePrimitive("old", nil, plans.Delete, false), + }, + Action: plans.Delete, + Replace: true, + }, + After: SetDiffEntry{ + ObjectDiff: map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidatePrimitive(nil, "new", plans.Create, false), + }, + Action: plans.Create, + Replace: true, + }, + }, + }, + "update_replace_attribute": { + input: structured.Change{ + Before: map[string]interface{}{ + "attribute_one": "old", + }, + After: map[string]interface{}{ + "attribute_one": "new", + }, + ReplacePaths: &attribute_path.PathMatcher{ + Paths: [][]interface{}{ + {"attribute_one"}, + }, + }, + }, + attributes: map[string]cty.Type{ + "attribute_one": cty.String, + }, + validateDiffs: map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidatePrimitive("old", "new", plans.Update, true), + }, + validateAction: plans.Update, + validateReplace: false, + validateSetDiffs: &SetDiff{ + Before: SetDiffEntry{ + ObjectDiff: map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidatePrimitive("old", nil, plans.Delete, true), + }, + Action: plans.Delete, + Replace: false, + }, + After: SetDiffEntry{ + ObjectDiff: map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidatePrimitive(nil, "new", plans.Create, true), + }, + Action: plans.Create, + Replace: false, + }, + }, + }, + "update_includes_relevant_attributes": { + input: structured.Change{ + Before: map[string]interface{}{ + "attribute_one": "old_one", + "attribute_two": "old_two", + }, + After: map[string]interface{}{ + "attribute_one": "new_one", + "attribute_two": "new_two", + }, + RelevantAttributes: &attribute_path.PathMatcher{ + Paths: [][]interface{}{ + {"attribute_one"}, + }, + }, + }, + attributes: map[string]cty.Type{ + "attribute_one": cty.String, + "attribute_two": cty.String, + }, + validateDiffs: map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidatePrimitive("old_one", "new_one", plans.Update, false), + "attribute_two": renderers.ValidatePrimitive("old_two", "old_two", plans.NoOp, false), + }, + validateList: renderers.ValidateList([]renderers.ValidateDiffFunction{ + renderers.ValidateObject(map[string]renderers.ValidateDiffFunction{ + // Lists are a bit special, and in this case is actually + // going to ignore the relevant attributes. This is + // deliberate. See the comments in list.go for an + // explanation. + "attribute_one": renderers.ValidatePrimitive("old_one", "new_one", plans.Update, false), + "attribute_two": renderers.ValidatePrimitive("old_two", "new_two", plans.Update, false), + }, plans.Update, false), + }, plans.Update, false), + validateAction: plans.Update, + validateReplace: false, + validateSetDiffs: &SetDiff{ + Before: SetDiffEntry{ + ObjectDiff: map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidatePrimitive("old_one", nil, plans.Delete, false), + "attribute_two": renderers.ValidatePrimitive("old_two", nil, plans.Delete, false), + }, + Action: plans.Delete, + Replace: false, + }, + After: SetDiffEntry{ + ObjectDiff: map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidatePrimitive(nil, "new_one", plans.Create, false), + "attribute_two": renderers.ValidatePrimitive(nil, "new_two", plans.Create, false), + }, + Action: plans.Create, + Replace: false, + }, + }, + }, + } + + for name, tmp := range tcs { + tc := tmp + + // Let's set some default values on the input. + if tc.input.RelevantAttributes == nil { + tc.input.RelevantAttributes = attribute_path.AlwaysMatcher() + } + if tc.input.ReplacePaths == nil { + tc.input.ReplacePaths = &attribute_path.PathMatcher{} + } + + collectionDefaultAction := plans.Update + if name == "ignores_unset_fields" { + // Special case for this test, as it is the only one that doesn't + // have the collection types return an update. + collectionDefaultAction = plans.NoOp + } + + t.Run(name, func(t *testing.T) { + t.Run("object", func(t *testing.T) { + attribute := &jsonprovider.Attribute{ + AttributeType: unmarshalType(t, cty.Object(tc.attributes)), + } + + if tc.validateObject != nil { + tc.validateObject(t, ComputeDiffForAttribute(tc.input, attribute)) + return + } + + if tc.validateSingleDiff != nil { + tc.validateSingleDiff(t, ComputeDiffForAttribute(tc.input, attribute)) + return + } + + validate := renderers.ValidateObject(tc.validateDiffs, tc.validateAction, tc.validateReplace) + validate(t, ComputeDiffForAttribute(tc.input, attribute)) + }) + + t.Run("map", func(t *testing.T) { + attribute := &jsonprovider.Attribute{ + AttributeType: unmarshalType(t, cty.Map(cty.Object(tc.attributes))), + } + + input := wrapChangeInMap(tc.input) + + if tc.validateObject != nil { + validate := renderers.ValidateMap(map[string]renderers.ValidateDiffFunction{ + "element": tc.validateObject, + }, collectionDefaultAction, false) + validate(t, ComputeDiffForAttribute(input, attribute)) + return + } + + if tc.validateSingleDiff != nil { + validate := renderers.ValidateMap(map[string]renderers.ValidateDiffFunction{ + "element": tc.validateSingleDiff, + }, collectionDefaultAction, false) + validate(t, ComputeDiffForAttribute(input, attribute)) + return + } + + validate := renderers.ValidateMap(map[string]renderers.ValidateDiffFunction{ + "element": renderers.ValidateObject(tc.validateDiffs, tc.validateAction, tc.validateReplace), + }, collectionDefaultAction, false) + validate(t, ComputeDiffForAttribute(input, attribute)) + }) + + t.Run("list", func(t *testing.T) { + attribute := &jsonprovider.Attribute{ + AttributeType: unmarshalType(t, cty.List(cty.Object(tc.attributes))), + } + + input := wrapChangeInSlice(tc.input) + + if tc.validateList != nil { + tc.validateList(t, ComputeDiffForAttribute(input, attribute)) + return + } + + if tc.validateObject != nil { + validate := renderers.ValidateList([]renderers.ValidateDiffFunction{ + tc.validateObject, + }, collectionDefaultAction, false) + validate(t, ComputeDiffForAttribute(input, attribute)) + return + } + + if tc.validateSingleDiff != nil { + validate := renderers.ValidateList([]renderers.ValidateDiffFunction{ + tc.validateSingleDiff, + }, collectionDefaultAction, false) + validate(t, ComputeDiffForAttribute(input, attribute)) + return + } + + validate := renderers.ValidateList([]renderers.ValidateDiffFunction{ + renderers.ValidateObject(tc.validateDiffs, tc.validateAction, tc.validateReplace), + }, collectionDefaultAction, false) + validate(t, ComputeDiffForAttribute(input, attribute)) + }) + + t.Run("set", func(t *testing.T) { + attribute := &jsonprovider.Attribute{ + AttributeType: unmarshalType(t, cty.Set(cty.Object(tc.attributes))), + } + + input := wrapChangeInSlice(tc.input) + + if tc.validateSetDiffs != nil { + validate := renderers.ValidateSet(func() []renderers.ValidateDiffFunction { + var ret []renderers.ValidateDiffFunction + ret = append(ret, tc.validateSetDiffs.Before.Validate(renderers.ValidateObject)) + ret = append(ret, tc.validateSetDiffs.After.Validate(renderers.ValidateObject)) + return ret + }(), collectionDefaultAction, false) + validate(t, ComputeDiffForAttribute(input, attribute)) + return + } + + if tc.validateObject != nil { + validate := renderers.ValidateSet([]renderers.ValidateDiffFunction{ + tc.validateObject, + }, collectionDefaultAction, false) + validate(t, ComputeDiffForAttribute(input, attribute)) + return + } + + if tc.validateSingleDiff != nil { + validate := renderers.ValidateSet([]renderers.ValidateDiffFunction{ + tc.validateSingleDiff, + }, collectionDefaultAction, false) + validate(t, ComputeDiffForAttribute(input, attribute)) + return + } + + validate := renderers.ValidateSet([]renderers.ValidateDiffFunction{ + renderers.ValidateObject(tc.validateDiffs, tc.validateAction, tc.validateReplace), + }, collectionDefaultAction, false) + validate(t, ComputeDiffForAttribute(input, attribute)) + }) + }) + + t.Run(fmt.Sprintf("nested_%s", name), func(t *testing.T) { + t.Run("object", func(t *testing.T) { + attribute := &jsonprovider.Attribute{ + AttributeNestedType: &jsonprovider.NestedType{ + Attributes: func() map[string]*jsonprovider.Attribute { + attributes := make(map[string]*jsonprovider.Attribute) + for key, attribute := range tc.attributes { + attributes[key] = &jsonprovider.Attribute{ + AttributeType: unmarshalType(t, attribute), + } + } + return attributes + }(), + NestingMode: "single", + }, + } + + if tc.validateNestedObject != nil { + tc.validateNestedObject(t, ComputeDiffForAttribute(tc.input, attribute)) + return + } + + if tc.validateSingleDiff != nil { + tc.validateSingleDiff(t, ComputeDiffForAttribute(tc.input, attribute)) + return + } + + validate := renderers.ValidateNestedObject(tc.validateDiffs, tc.validateAction, tc.validateReplace) + validate(t, ComputeDiffForAttribute(tc.input, attribute)) + }) + + t.Run("map", func(t *testing.T) { + attribute := &jsonprovider.Attribute{ + AttributeNestedType: &jsonprovider.NestedType{ + Attributes: func() map[string]*jsonprovider.Attribute { + attributes := make(map[string]*jsonprovider.Attribute) + for key, attribute := range tc.attributes { + attributes[key] = &jsonprovider.Attribute{ + AttributeType: unmarshalType(t, attribute), + } + } + return attributes + }(), + NestingMode: "map", + }, + } + + input := wrapChangeInMap(tc.input) + + if tc.validateNestedObject != nil { + validate := renderers.ValidateMap(map[string]renderers.ValidateDiffFunction{ + "element": tc.validateNestedObject, + }, collectionDefaultAction, false) + validate(t, ComputeDiffForAttribute(input, attribute)) + return + } + + if tc.validateSingleDiff != nil { + validate := renderers.ValidateMap(map[string]renderers.ValidateDiffFunction{ + "element": tc.validateSingleDiff, + }, collectionDefaultAction, false) + validate(t, ComputeDiffForAttribute(input, attribute)) + return + } + + validate := renderers.ValidateMap(map[string]renderers.ValidateDiffFunction{ + "element": renderers.ValidateNestedObject(tc.validateDiffs, tc.validateAction, tc.validateReplace), + }, collectionDefaultAction, false) + validate(t, ComputeDiffForAttribute(input, attribute)) + }) + + t.Run("list", func(t *testing.T) { + attribute := &jsonprovider.Attribute{ + AttributeNestedType: &jsonprovider.NestedType{ + Attributes: func() map[string]*jsonprovider.Attribute { + attributes := make(map[string]*jsonprovider.Attribute) + for key, attribute := range tc.attributes { + attributes[key] = &jsonprovider.Attribute{ + AttributeType: unmarshalType(t, attribute), + } + } + return attributes + }(), + NestingMode: "list", + }, + } + + input := wrapChangeInSlice(tc.input) + + if tc.validateNestedObject != nil { + validate := renderers.ValidateNestedList([]renderers.ValidateDiffFunction{ + tc.validateNestedObject, + }, collectionDefaultAction, false) + validate(t, ComputeDiffForAttribute(input, attribute)) + return + } + + if tc.validateSingleDiff != nil { + validate := renderers.ValidateNestedList([]renderers.ValidateDiffFunction{ + tc.validateSingleDiff, + }, collectionDefaultAction, false) + validate(t, ComputeDiffForAttribute(input, attribute)) + return + } + + validate := renderers.ValidateNestedList([]renderers.ValidateDiffFunction{ + renderers.ValidateNestedObject(tc.validateDiffs, tc.validateAction, tc.validateReplace), + }, collectionDefaultAction, false) + validate(t, ComputeDiffForAttribute(input, attribute)) + }) + + t.Run("set", func(t *testing.T) { + attribute := &jsonprovider.Attribute{ + AttributeNestedType: &jsonprovider.NestedType{ + Attributes: func() map[string]*jsonprovider.Attribute { + attributes := make(map[string]*jsonprovider.Attribute) + for key, attribute := range tc.attributes { + attributes[key] = &jsonprovider.Attribute{ + AttributeType: unmarshalType(t, attribute), + } + } + return attributes + }(), + NestingMode: "set", + }, + } + + input := wrapChangeInSlice(tc.input) + + if tc.validateSetDiffs != nil { + validate := renderers.ValidateSet(func() []renderers.ValidateDiffFunction { + var ret []renderers.ValidateDiffFunction + ret = append(ret, tc.validateSetDiffs.Before.Validate(renderers.ValidateNestedObject)) + ret = append(ret, tc.validateSetDiffs.After.Validate(renderers.ValidateNestedObject)) + return ret + }(), collectionDefaultAction, false) + validate(t, ComputeDiffForAttribute(input, attribute)) + return + } + + if tc.validateNestedObject != nil { + validate := renderers.ValidateSet([]renderers.ValidateDiffFunction{ + tc.validateNestedObject, + }, collectionDefaultAction, false) + validate(t, ComputeDiffForAttribute(input, attribute)) + return + } + + if tc.validateSingleDiff != nil { + validate := renderers.ValidateSet([]renderers.ValidateDiffFunction{ + tc.validateSingleDiff, + }, collectionDefaultAction, false) + validate(t, ComputeDiffForAttribute(input, attribute)) + return + } + + validate := renderers.ValidateSet([]renderers.ValidateDiffFunction{ + renderers.ValidateNestedObject(tc.validateDiffs, tc.validateAction, tc.validateReplace), + }, collectionDefaultAction, false) + validate(t, ComputeDiffForAttribute(input, attribute)) + }) + }) + } +} + +func TestValue_BlockAttributesAndNestedBlocks(t *testing.T) { + // This function tests manipulating simple attributes and blocks within + // blocks. It automatically tests these operations within the contexts of + // different block types. + + tcs := map[string]struct { + before interface{} + after interface{} + block *jsonprovider.Block + validate renderers.ValidateDiffFunction + validateSet []renderers.ValidateDiffFunction + }{ + "create_attribute": { + before: map[string]interface{}{}, + after: map[string]interface{}{ + "attribute_one": "new", + }, + block: &jsonprovider.Block{ + Attributes: map[string]*jsonprovider.Attribute{ + "attribute_one": { + AttributeType: unmarshalType(t, cty.String), + }, + }, + }, + validate: renderers.ValidateBlock(map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidatePrimitive(nil, "new", plans.Create, false), + }, nil, nil, nil, nil, plans.Update, false), + validateSet: []renderers.ValidateDiffFunction{ + renderers.ValidateBlock(nil, nil, nil, nil, nil, plans.Delete, false), + renderers.ValidateBlock(map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidatePrimitive(nil, "new", plans.Create, false), + }, nil, nil, nil, nil, plans.Create, false), + }, + }, + "update_attribute": { + before: map[string]interface{}{ + "attribute_one": "old", + }, + after: map[string]interface{}{ + "attribute_one": "new", + }, + block: &jsonprovider.Block{ + Attributes: map[string]*jsonprovider.Attribute{ + "attribute_one": { + AttributeType: unmarshalType(t, cty.String), + }, + }, + }, + validate: renderers.ValidateBlock(map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidatePrimitive("old", "new", plans.Update, false), + }, nil, nil, nil, nil, plans.Update, false), + validateSet: []renderers.ValidateDiffFunction{ + renderers.ValidateBlock(map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidatePrimitive("old", nil, plans.Delete, false), + }, nil, nil, nil, nil, plans.Delete, false), + renderers.ValidateBlock(map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidatePrimitive(nil, "new", plans.Create, false), + }, nil, nil, nil, nil, plans.Create, false), + }, + }, + "delete_attribute": { + before: map[string]interface{}{ + "attribute_one": "old", + }, + after: map[string]interface{}{}, + block: &jsonprovider.Block{ + Attributes: map[string]*jsonprovider.Attribute{ + "attribute_one": { + AttributeType: unmarshalType(t, cty.String), + }, + }, + }, + validate: renderers.ValidateBlock(map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidatePrimitive("old", nil, plans.Delete, false), + }, nil, nil, nil, nil, plans.Update, false), + validateSet: []renderers.ValidateDiffFunction{ + renderers.ValidateBlock(map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidatePrimitive("old", nil, plans.Delete, false), + }, nil, nil, nil, nil, plans.Delete, false), + renderers.ValidateBlock(nil, nil, nil, nil, nil, plans.Create, false), + }, + }, + "create_block": { + before: map[string]interface{}{}, + after: map[string]interface{}{ + "block_one": map[string]interface{}{ + "attribute_one": "new", + }, + }, + block: &jsonprovider.Block{ + BlockTypes: map[string]*jsonprovider.BlockType{ + "block_one": { + Block: &jsonprovider.Block{ + Attributes: map[string]*jsonprovider.Attribute{ + "attribute_one": { + AttributeType: unmarshalType(t, cty.String), + }, + }, + }, + NestingMode: "single", + }, + }, + }, + validate: renderers.ValidateBlock(nil, map[string]renderers.ValidateDiffFunction{ + "block_one": renderers.ValidateBlock(map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidatePrimitive(nil, "new", plans.Create, false), + }, nil, nil, nil, nil, plans.Create, false), + }, nil, nil, nil, plans.Update, false), + validateSet: []renderers.ValidateDiffFunction{ + renderers.ValidateBlock(nil, nil, nil, nil, nil, plans.Delete, false), + renderers.ValidateBlock(nil, map[string]renderers.ValidateDiffFunction{ + "block_one": renderers.ValidateBlock(map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidatePrimitive(nil, "new", plans.Create, false), + }, nil, nil, nil, nil, plans.Create, false), + }, nil, nil, nil, plans.Create, false), + }, + }, + "update_block": { + before: map[string]interface{}{ + "block_one": map[string]interface{}{ + "attribute_one": "old", + }, + }, + after: map[string]interface{}{ + "block_one": map[string]interface{}{ + "attribute_one": "new", + }, + }, + block: &jsonprovider.Block{ + BlockTypes: map[string]*jsonprovider.BlockType{ + "block_one": { + Block: &jsonprovider.Block{ + Attributes: map[string]*jsonprovider.Attribute{ + "attribute_one": { + AttributeType: unmarshalType(t, cty.String), + }, + }, + }, + NestingMode: "single", + }, + }, + }, + validate: renderers.ValidateBlock(nil, map[string]renderers.ValidateDiffFunction{ + "block_one": renderers.ValidateBlock(map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidatePrimitive("old", "new", plans.Update, false), + }, nil, nil, nil, nil, plans.Update, false), + }, nil, nil, nil, plans.Update, false), + validateSet: []renderers.ValidateDiffFunction{ + renderers.ValidateBlock(nil, map[string]renderers.ValidateDiffFunction{ + "block_one": renderers.ValidateBlock(map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidatePrimitive("old", nil, plans.Delete, false), + }, nil, nil, nil, nil, plans.Delete, false), + }, nil, nil, nil, plans.Delete, false), + renderers.ValidateBlock(nil, map[string]renderers.ValidateDiffFunction{ + "block_one": renderers.ValidateBlock(map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidatePrimitive(nil, "new", plans.Create, false), + }, nil, nil, nil, nil, plans.Create, false), + }, nil, nil, nil, plans.Create, false), + }, + }, + "delete_block": { + before: map[string]interface{}{ + "block_one": map[string]interface{}{ + "attribute_one": "old", + }, + }, + after: map[string]interface{}{}, + block: &jsonprovider.Block{ + BlockTypes: map[string]*jsonprovider.BlockType{ + "block_one": { + Block: &jsonprovider.Block{ + Attributes: map[string]*jsonprovider.Attribute{ + "attribute_one": { + AttributeType: unmarshalType(t, cty.String), + }, + }, + }, + NestingMode: "single", + }, + }, + }, + validate: renderers.ValidateBlock(nil, map[string]renderers.ValidateDiffFunction{ + "block_one": renderers.ValidateBlock(map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidatePrimitive("old", nil, plans.Delete, false), + }, nil, nil, nil, nil, plans.Delete, false), + }, nil, nil, nil, plans.Update, false), + validateSet: []renderers.ValidateDiffFunction{ + renderers.ValidateBlock(nil, map[string]renderers.ValidateDiffFunction{ + "block_one": renderers.ValidateBlock(map[string]renderers.ValidateDiffFunction{ + "attribute_one": renderers.ValidatePrimitive("old", nil, plans.Delete, false), + }, nil, nil, nil, nil, plans.Delete, false), + }, nil, nil, nil, plans.Delete, false), + renderers.ValidateBlock(nil, nil, nil, nil, nil, plans.Create, false), + }, + }, + } + for name, tmp := range tcs { + tc := tmp + + t.Run(name, func(t *testing.T) { + t.Run("single", func(t *testing.T) { + input := structured.Change{ + Before: map[string]interface{}{ + "block_type": tc.before, + }, + After: map[string]interface{}{ + "block_type": tc.after, + }, + ReplacePaths: &attribute_path.PathMatcher{}, + RelevantAttributes: attribute_path.AlwaysMatcher(), + } + + block := &jsonprovider.Block{ + BlockTypes: map[string]*jsonprovider.BlockType{ + "block_type": { + Block: tc.block, + NestingMode: "single", + }, + }, + } + + validate := renderers.ValidateBlock(nil, map[string]renderers.ValidateDiffFunction{ + "block_type": tc.validate, + }, nil, nil, nil, plans.Update, false) + validate(t, ComputeDiffForBlock(input, block)) + }) + t.Run("map", func(t *testing.T) { + input := structured.Change{ + Before: map[string]interface{}{ + "block_type": map[string]interface{}{ + "one": tc.before, + }, + }, + After: map[string]interface{}{ + "block_type": map[string]interface{}{ + "one": tc.after, + }, + }, + ReplacePaths: &attribute_path.PathMatcher{}, + RelevantAttributes: attribute_path.AlwaysMatcher(), + } + + block := &jsonprovider.Block{ + BlockTypes: map[string]*jsonprovider.BlockType{ + "block_type": { + Block: tc.block, + NestingMode: "map", + }, + }, + } + + validate := renderers.ValidateBlock(nil, nil, nil, map[string]map[string]renderers.ValidateDiffFunction{ + "block_type": { + "one": tc.validate, + }, + }, nil, plans.Update, false) + validate(t, ComputeDiffForBlock(input, block)) + }) + t.Run("list", func(t *testing.T) { + input := structured.Change{ + Before: map[string]interface{}{ + "block_type": []interface{}{ + tc.before, + }, + }, + After: map[string]interface{}{ + "block_type": []interface{}{ + tc.after, + }, + }, + ReplacePaths: &attribute_path.PathMatcher{}, + RelevantAttributes: attribute_path.AlwaysMatcher(), + } + + block := &jsonprovider.Block{ + BlockTypes: map[string]*jsonprovider.BlockType{ + "block_type": { + Block: tc.block, + NestingMode: "list", + }, + }, + } + + validate := renderers.ValidateBlock(nil, nil, map[string][]renderers.ValidateDiffFunction{ + "block_type": { + tc.validate, + }, + }, nil, nil, plans.Update, false) + validate(t, ComputeDiffForBlock(input, block)) + }) + t.Run("set", func(t *testing.T) { + input := structured.Change{ + Before: map[string]interface{}{ + "block_type": []interface{}{ + tc.before, + }, + }, + After: map[string]interface{}{ + "block_type": []interface{}{ + tc.after, + }, + }, + ReplacePaths: &attribute_path.PathMatcher{}, + RelevantAttributes: attribute_path.AlwaysMatcher(), + } + + block := &jsonprovider.Block{ + BlockTypes: map[string]*jsonprovider.BlockType{ + "block_type": { + Block: tc.block, + NestingMode: "set", + }, + }, + } + + validate := renderers.ValidateBlock(nil, nil, nil, nil, map[string][]renderers.ValidateDiffFunction{ + "block_type": func() []renderers.ValidateDiffFunction { + if tc.validateSet != nil { + return tc.validateSet + } + return []renderers.ValidateDiffFunction{tc.validate} + }(), + }, plans.Update, false) + validate(t, ComputeDiffForBlock(input, block)) + }) + }) + } +} + +func TestValue_Outputs(t *testing.T) { + tcs := map[string]struct { + input structured.Change + validateDiff renderers.ValidateDiffFunction + }{ + "primitive_create": { + input: structured.Change{ + Before: nil, + After: "new", + }, + validateDiff: renderers.ValidatePrimitive(nil, "new", plans.Create, false), + }, + "object_create": { + input: structured.Change{ + Before: nil, + After: map[string]interface{}{ + "element_one": "new_one", + "element_two": "new_two", + }, + }, + validateDiff: renderers.ValidateObject(map[string]renderers.ValidateDiffFunction{ + "element_one": renderers.ValidatePrimitive(nil, "new_one", plans.Create, false), + "element_two": renderers.ValidatePrimitive(nil, "new_two", plans.Create, false), + }, plans.Create, false), + }, + "list_create": { + input: structured.Change{ + Before: nil, + After: []interface{}{ + "new_one", + "new_two", + }, + }, + validateDiff: renderers.ValidateList([]renderers.ValidateDiffFunction{ + renderers.ValidatePrimitive(nil, "new_one", plans.Create, false), + renderers.ValidatePrimitive(nil, "new_two", plans.Create, false), + }, plans.Create, false), + }, + "primitive_update": { + input: structured.Change{ + Before: "old", + After: "new", + }, + validateDiff: renderers.ValidatePrimitive("old", "new", plans.Update, false), + }, + "object_update": { + input: structured.Change{ + Before: map[string]interface{}{ + "element_one": "old_one", + "element_two": "old_two", + }, + After: map[string]interface{}{ + "element_one": "new_one", + "element_two": "new_two", + }, + }, + validateDiff: renderers.ValidateObject(map[string]renderers.ValidateDiffFunction{ + "element_one": renderers.ValidatePrimitive("old_one", "new_one", plans.Update, false), + "element_two": renderers.ValidatePrimitive("old_two", "new_two", plans.Update, false), + }, plans.Update, false), + }, + "list_update": { + input: structured.Change{ + Before: []interface{}{ + "old_one", + "old_two", + }, + After: []interface{}{ + "new_one", + "new_two", + }, + }, + validateDiff: renderers.ValidateList([]renderers.ValidateDiffFunction{ + renderers.ValidatePrimitive("old_one", nil, plans.Delete, false), + renderers.ValidatePrimitive("old_two", nil, plans.Delete, false), + renderers.ValidatePrimitive(nil, "new_one", plans.Create, false), + renderers.ValidatePrimitive(nil, "new_two", plans.Create, false), + }, plans.Update, false), + }, + "primitive_delete": { + input: structured.Change{ + Before: "old", + After: nil, + }, + validateDiff: renderers.ValidatePrimitive("old", nil, plans.Delete, false), + }, + "object_delete": { + input: structured.Change{ + Before: map[string]interface{}{ + "element_one": "old_one", + "element_two": "old_two", + }, + After: nil, + }, + validateDiff: renderers.ValidateObject(map[string]renderers.ValidateDiffFunction{ + "element_one": renderers.ValidatePrimitive("old_one", nil, plans.Delete, false), + "element_two": renderers.ValidatePrimitive("old_two", nil, plans.Delete, false), + }, plans.Delete, false), + }, + "list_delete": { + input: structured.Change{ + Before: []interface{}{ + "old_one", + "old_two", + }, + After: nil, + }, + validateDiff: renderers.ValidateList([]renderers.ValidateDiffFunction{ + renderers.ValidatePrimitive("old_one", nil, plans.Delete, false), + renderers.ValidatePrimitive("old_two", nil, plans.Delete, false), + }, plans.Delete, false), + }, + "primitive_to_list": { + input: structured.Change{ + Before: "old", + After: []interface{}{ + "new_one", + "new_two", + }, + }, + validateDiff: renderers.ValidateTypeChange( + renderers.ValidatePrimitive("old", nil, plans.Delete, false), + renderers.ValidateList([]renderers.ValidateDiffFunction{ + renderers.ValidatePrimitive(nil, "new_one", plans.Create, false), + renderers.ValidatePrimitive(nil, "new_two", plans.Create, false), + }, plans.Create, false), plans.Update, false), + }, + "primitive_to_object": { + input: structured.Change{ + Before: "old", + After: map[string]interface{}{ + "element_one": "new_one", + "element_two": "new_two", + }, + }, + validateDiff: renderers.ValidateTypeChange( + renderers.ValidatePrimitive("old", nil, plans.Delete, false), + renderers.ValidateObject(map[string]renderers.ValidateDiffFunction{ + "element_one": renderers.ValidatePrimitive(nil, "new_one", plans.Create, false), + "element_two": renderers.ValidatePrimitive(nil, "new_two", plans.Create, false), + }, plans.Create, false), plans.Update, false), + }, + "list_to_primitive": { + input: structured.Change{ + Before: []interface{}{ + "old_one", + "old_two", + }, + After: "new", + }, + validateDiff: renderers.ValidateTypeChange( + renderers.ValidateList([]renderers.ValidateDiffFunction{ + renderers.ValidatePrimitive("old_one", nil, plans.Delete, false), + renderers.ValidatePrimitive("old_two", nil, plans.Delete, false), + }, plans.Delete, false), + renderers.ValidatePrimitive(nil, "new", plans.Create, false), + plans.Update, false), + }, + "list_to_object": { + input: structured.Change{ + Before: []interface{}{ + "old_one", + "old_two", + }, + After: map[string]interface{}{ + "element_one": "new_one", + "element_two": "new_two", + }, + }, + validateDiff: renderers.ValidateTypeChange( + renderers.ValidateList([]renderers.ValidateDiffFunction{ + renderers.ValidatePrimitive("old_one", nil, plans.Delete, false), + renderers.ValidatePrimitive("old_two", nil, plans.Delete, false), + }, plans.Delete, false), + renderers.ValidateObject(map[string]renderers.ValidateDiffFunction{ + "element_one": renderers.ValidatePrimitive(nil, "new_one", plans.Create, false), + "element_two": renderers.ValidatePrimitive(nil, "new_two", plans.Create, false), + }, plans.Create, false), plans.Update, false), + }, + "object_to_primitive": { + input: structured.Change{ + Before: map[string]interface{}{ + "element_one": "old_one", + "element_two": "old_two", + }, + After: "new", + }, + validateDiff: renderers.ValidateTypeChange( + renderers.ValidateObject(map[string]renderers.ValidateDiffFunction{ + "element_one": renderers.ValidatePrimitive("old_one", nil, plans.Delete, false), + "element_two": renderers.ValidatePrimitive("old_two", nil, plans.Delete, false), + }, plans.Delete, false), + renderers.ValidatePrimitive(nil, "new", plans.Create, false), + plans.Update, false), + }, + "object_to_list": { + input: structured.Change{ + Before: map[string]interface{}{ + "element_one": "old_one", + "element_two": "old_two", + }, + After: []interface{}{ + "new_one", + "new_two", + }, + }, + validateDiff: renderers.ValidateTypeChange( + renderers.ValidateObject(map[string]renderers.ValidateDiffFunction{ + "element_one": renderers.ValidatePrimitive("old_one", nil, plans.Delete, false), + "element_two": renderers.ValidatePrimitive("old_two", nil, plans.Delete, false), + }, plans.Delete, false), + renderers.ValidateList([]renderers.ValidateDiffFunction{ + renderers.ValidatePrimitive(nil, "new_one", plans.Create, false), + renderers.ValidatePrimitive(nil, "new_two", plans.Create, false), + }, plans.Create, false), plans.Update, false), + }, + } + + for name, tc := range tcs { + + // Let's set some default values on the input. + if tc.input.RelevantAttributes == nil { + tc.input.RelevantAttributes = attribute_path.AlwaysMatcher() + } + if tc.input.ReplacePaths == nil { + tc.input.ReplacePaths = &attribute_path.PathMatcher{} + } + + t.Run(name, func(t *testing.T) { + tc.validateDiff(t, ComputeDiffForOutput(tc.input)) + }) + } +} + +func TestValue_PrimitiveAttributes(t *testing.T) { + // This function tests manipulating primitives: creating, deleting and + // updating. It also automatically tests these operations within the + // contexts of collections. + + tcs := map[string]struct { + input structured.Change + attribute cty.Type + validateDiff renderers.ValidateDiffFunction + validateSliceDiffs []renderers.ValidateDiffFunction // Lists are special in some cases. + }{ + "primitive_create": { + input: structured.Change{ + After: "new", + }, + attribute: cty.String, + validateDiff: renderers.ValidatePrimitive(nil, "new", plans.Create, false), + }, + "primitive_delete": { + input: structured.Change{ + Before: "old", + }, + attribute: cty.String, + validateDiff: renderers.ValidatePrimitive("old", nil, plans.Delete, false), + }, + "primitive_update": { + input: structured.Change{ + Before: "old", + After: "new", + }, + attribute: cty.String, + validateDiff: renderers.ValidatePrimitive("old", "new", plans.Update, false), + validateSliceDiffs: []renderers.ValidateDiffFunction{ + renderers.ValidatePrimitive("old", nil, plans.Delete, false), + renderers.ValidatePrimitive(nil, "new", plans.Create, false), + }, + }, + "primitive_set_explicit_null": { + input: structured.Change{ + Before: "old", + After: nil, + AfterExplicit: true, + }, + attribute: cty.String, + validateDiff: renderers.ValidatePrimitive("old", nil, plans.Update, false), + validateSliceDiffs: []renderers.ValidateDiffFunction{ + renderers.ValidatePrimitive("old", nil, plans.Delete, false), + renderers.ValidatePrimitive(nil, nil, plans.Create, false), + }, + }, + "primitive_unset_explicit_null": { + input: structured.Change{ + BeforeExplicit: true, + Before: nil, + After: "new", + }, + attribute: cty.String, + validateDiff: renderers.ValidatePrimitive(nil, "new", plans.Update, false), + validateSliceDiffs: []renderers.ValidateDiffFunction{ + renderers.ValidatePrimitive(nil, nil, plans.Delete, false), + renderers.ValidatePrimitive(nil, "new", plans.Create, false), + }, + }, + "primitive_create_sensitive": { + input: structured.Change{ + Before: nil, + After: "new", + AfterSensitive: true, + }, + attribute: cty.String, + validateDiff: renderers.ValidateSensitive(renderers.ValidatePrimitive(nil, "new", plans.Create, false), false, true, plans.Create, false), + }, + "primitive_delete_sensitive": { + input: structured.Change{ + Before: "old", + BeforeSensitive: true, + After: nil, + }, + attribute: cty.String, + validateDiff: renderers.ValidateSensitive(renderers.ValidatePrimitive("old", nil, plans.Delete, false), true, false, plans.Delete, false), + }, + "primitive_update_sensitive": { + input: structured.Change{ + Before: "old", + BeforeSensitive: true, + After: "new", + AfterSensitive: true, + }, + attribute: cty.String, + validateDiff: renderers.ValidateSensitive(renderers.ValidatePrimitive("old", "new", plans.Update, false), true, true, plans.Update, false), + validateSliceDiffs: []renderers.ValidateDiffFunction{ + renderers.ValidateSensitive(renderers.ValidatePrimitive("old", nil, plans.Delete, false), true, false, plans.Delete, false), + renderers.ValidateSensitive(renderers.ValidatePrimitive(nil, "new", plans.Create, false), false, true, plans.Create, false), + }, + }, + "primitive_create_computed": { + input: structured.Change{ + Before: nil, + After: nil, + Unknown: true, + }, + attribute: cty.String, + validateDiff: renderers.ValidateUnknown(nil, plans.Create, false), + }, + "primitive_update_computed": { + input: structured.Change{ + Before: "old", + After: nil, + Unknown: true, + }, + attribute: cty.String, + validateDiff: renderers.ValidateUnknown(renderers.ValidatePrimitive("old", nil, plans.Delete, false), plans.Update, false), + validateSliceDiffs: []renderers.ValidateDiffFunction{ + renderers.ValidatePrimitive("old", nil, plans.Delete, false), + renderers.ValidateUnknown(nil, plans.Create, false), + }, + }, + "primitive_update_replace": { + input: structured.Change{ + Before: "old", + After: "new", + ReplacePaths: &attribute_path.PathMatcher{ + Paths: [][]interface{}{ + {}, // An empty path suggests replace should be true. + }, + }, + }, + attribute: cty.String, + validateDiff: renderers.ValidatePrimitive("old", "new", plans.Update, true), + validateSliceDiffs: []renderers.ValidateDiffFunction{ + renderers.ValidatePrimitive("old", nil, plans.Delete, true), + renderers.ValidatePrimitive(nil, "new", plans.Create, true), + }, + }, + "noop": { + input: structured.Change{ + Before: "old", + After: "old", + }, + attribute: cty.String, + validateDiff: renderers.ValidatePrimitive("old", "old", plans.NoOp, false), + }, + "dynamic": { + input: structured.Change{ + Before: "old", + After: "new", + }, + attribute: cty.DynamicPseudoType, + validateDiff: renderers.ValidatePrimitive("old", "new", plans.Update, false), + validateSliceDiffs: []renderers.ValidateDiffFunction{ + renderers.ValidatePrimitive("old", nil, plans.Delete, false), + renderers.ValidatePrimitive(nil, "new", plans.Create, false), + }, + }, + "dynamic_type_change": { + input: structured.Change{ + Before: "old", + After: json.Number("4"), + }, + attribute: cty.DynamicPseudoType, + validateDiff: renderers.ValidateTypeChange( + renderers.ValidatePrimitive("old", nil, plans.Delete, false), + renderers.ValidatePrimitive(nil, json.Number("4"), plans.Create, false), + plans.Update, false), + validateSliceDiffs: []renderers.ValidateDiffFunction{ + renderers.ValidatePrimitive("old", nil, plans.Delete, false), + renderers.ValidatePrimitive(nil, json.Number("4"), plans.Create, false), + }, + }, + } + for name, tmp := range tcs { + tc := tmp + + // Let's set some default values on the input. + if tc.input.RelevantAttributes == nil { + tc.input.RelevantAttributes = attribute_path.AlwaysMatcher() + } + if tc.input.ReplacePaths == nil { + tc.input.ReplacePaths = &attribute_path.PathMatcher{} + } + + defaultCollectionsAction := plans.Update + if name == "noop" { + defaultCollectionsAction = plans.NoOp + } + + t.Run(name, func(t *testing.T) { + t.Run("direct", func(t *testing.T) { + tc.validateDiff(t, ComputeDiffForAttribute(tc.input, &jsonprovider.Attribute{ + AttributeType: unmarshalType(t, tc.attribute), + })) + }) + + t.Run("map", func(t *testing.T) { + input := wrapChangeInMap(tc.input) + attribute := &jsonprovider.Attribute{ + AttributeType: unmarshalType(t, cty.Map(tc.attribute)), + } + + validate := renderers.ValidateMap(map[string]renderers.ValidateDiffFunction{ + "element": tc.validateDiff, + }, defaultCollectionsAction, false) + validate(t, ComputeDiffForAttribute(input, attribute)) + }) + + t.Run("list", func(t *testing.T) { + input := wrapChangeInSlice(tc.input) + attribute := &jsonprovider.Attribute{ + AttributeType: unmarshalType(t, cty.List(tc.attribute)), + } + + if tc.validateSliceDiffs != nil { + validate := renderers.ValidateList(tc.validateSliceDiffs, defaultCollectionsAction, false) + validate(t, ComputeDiffForAttribute(input, attribute)) + return + } + + validate := renderers.ValidateList([]renderers.ValidateDiffFunction{ + tc.validateDiff, + }, defaultCollectionsAction, false) + validate(t, ComputeDiffForAttribute(input, attribute)) + }) + + t.Run("set", func(t *testing.T) { + input := wrapChangeInSlice(tc.input) + attribute := &jsonprovider.Attribute{ + AttributeType: unmarshalType(t, cty.Set(tc.attribute)), + } + + if tc.validateSliceDiffs != nil { + validate := renderers.ValidateSet(tc.validateSliceDiffs, defaultCollectionsAction, false) + validate(t, ComputeDiffForAttribute(input, attribute)) + return + } + + validate := renderers.ValidateSet([]renderers.ValidateDiffFunction{ + tc.validateDiff, + }, defaultCollectionsAction, false) + validate(t, ComputeDiffForAttribute(input, attribute)) + }) + }) + } +} + +func TestValue_CollectionAttributes(t *testing.T) { + // This function tests creating and deleting collections. Note, it does not + // generally cover editing collections except in special cases as editing + // collections is handled automatically by other functions. + tcs := map[string]struct { + input structured.Change + attribute *jsonprovider.Attribute + validateDiff renderers.ValidateDiffFunction + }{ + "map_create_empty": { + input: structured.Change{ + Before: nil, + After: map[string]interface{}{}, + }, + attribute: &jsonprovider.Attribute{ + AttributeType: unmarshalType(t, cty.Map(cty.String)), + }, + validateDiff: renderers.ValidateMap(nil, plans.Create, false), + }, + "map_create_populated": { + input: structured.Change{ + Before: nil, + After: map[string]interface{}{ + "element_one": "one", + "element_two": "two", + }, + }, + attribute: &jsonprovider.Attribute{ + AttributeType: unmarshalType(t, cty.Map(cty.String)), + }, + validateDiff: renderers.ValidateMap(map[string]renderers.ValidateDiffFunction{ + "element_one": renderers.ValidatePrimitive(nil, "one", plans.Create, false), + "element_two": renderers.ValidatePrimitive(nil, "two", plans.Create, false), + }, plans.Create, false), + }, + "map_delete_empty": { + input: structured.Change{ + Before: map[string]interface{}{}, + After: nil, + }, + attribute: &jsonprovider.Attribute{ + AttributeType: unmarshalType(t, cty.Map(cty.String)), + }, + validateDiff: renderers.ValidateMap(nil, plans.Delete, false), + }, + "map_delete_populated": { + input: structured.Change{ + Before: map[string]interface{}{ + "element_one": "one", + "element_two": "two", + }, + After: nil, + }, + attribute: &jsonprovider.Attribute{ + AttributeType: unmarshalType(t, cty.Map(cty.String)), + }, + validateDiff: renderers.ValidateMap(map[string]renderers.ValidateDiffFunction{ + "element_one": renderers.ValidatePrimitive("one", nil, plans.Delete, false), + "element_two": renderers.ValidatePrimitive("two", nil, plans.Delete, false), + }, plans.Delete, false), + }, + "map_create_sensitive": { + input: structured.Change{ + Before: nil, + After: map[string]interface{}{}, + AfterSensitive: true, + }, + attribute: &jsonprovider.Attribute{ + AttributeType: unmarshalType(t, cty.Map(cty.String)), + }, + validateDiff: renderers.ValidateSensitive(renderers.ValidateMap(nil, plans.Create, false), false, true, plans.Create, false), + }, + "map_update_sensitive": { + input: structured.Change{ + Before: map[string]interface{}{ + "element": "one", + }, + BeforeSensitive: true, + After: map[string]interface{}{}, + AfterSensitive: true, + }, + attribute: &jsonprovider.Attribute{ + AttributeType: unmarshalType(t, cty.Map(cty.String)), + }, + validateDiff: renderers.ValidateSensitive(renderers.ValidateMap(map[string]renderers.ValidateDiffFunction{ + "element": renderers.ValidatePrimitive("one", nil, plans.Delete, false), + }, plans.Update, false), true, true, plans.Update, false), + }, + "map_delete_sensitive": { + input: structured.Change{ + Before: map[string]interface{}{}, + BeforeSensitive: true, + After: nil, + }, + attribute: &jsonprovider.Attribute{ + AttributeType: unmarshalType(t, cty.Map(cty.String)), + }, + validateDiff: renderers.ValidateSensitive(renderers.ValidateMap(nil, plans.Delete, false), true, false, plans.Delete, false), + }, + "map_create_unknown": { + input: structured.Change{ + Before: nil, + After: map[string]interface{}{}, + Unknown: true, + }, + attribute: &jsonprovider.Attribute{ + AttributeType: unmarshalType(t, cty.Map(cty.String)), + }, + validateDiff: renderers.ValidateUnknown(nil, plans.Create, false), + }, + "map_update_unknown": { + input: structured.Change{ + Before: map[string]interface{}{}, + After: map[string]interface{}{ + "element": "one", + }, + Unknown: true, + }, + attribute: &jsonprovider.Attribute{ + AttributeType: unmarshalType(t, cty.Map(cty.String)), + }, + validateDiff: renderers.ValidateUnknown(renderers.ValidateMap(nil, plans.Delete, false), plans.Update, false), + }, + "list_create_empty": { + input: structured.Change{ + Before: nil, + After: []interface{}{}, + }, + attribute: &jsonprovider.Attribute{ + AttributeType: unmarshalType(t, cty.List(cty.String)), + }, + validateDiff: renderers.ValidateList(nil, plans.Create, false), + }, + "list_create_populated": { + input: structured.Change{ + Before: nil, + After: []interface{}{"one", "two"}, + }, + attribute: &jsonprovider.Attribute{ + AttributeType: unmarshalType(t, cty.List(cty.String)), + }, + validateDiff: renderers.ValidateList([]renderers.ValidateDiffFunction{ + renderers.ValidatePrimitive(nil, "one", plans.Create, false), + renderers.ValidatePrimitive(nil, "two", plans.Create, false), + }, plans.Create, false), + }, + "list_delete_empty": { + input: structured.Change{ + Before: []interface{}{}, + After: nil, + }, + attribute: &jsonprovider.Attribute{ + AttributeType: unmarshalType(t, cty.List(cty.String)), + }, + validateDiff: renderers.ValidateList(nil, plans.Delete, false), + }, + "list_delete_populated": { + input: structured.Change{ + Before: []interface{}{"one", "two"}, + After: nil, + }, + attribute: &jsonprovider.Attribute{ + AttributeType: unmarshalType(t, cty.List(cty.String)), + }, + validateDiff: renderers.ValidateList([]renderers.ValidateDiffFunction{ + renderers.ValidatePrimitive("one", nil, plans.Delete, false), + renderers.ValidatePrimitive("two", nil, plans.Delete, false), + }, plans.Delete, false), + }, + "list_create_sensitive": { + input: structured.Change{ + Before: nil, + After: []interface{}{}, + AfterSensitive: true, + }, + attribute: &jsonprovider.Attribute{ + AttributeType: unmarshalType(t, cty.List(cty.String)), + }, + validateDiff: renderers.ValidateSensitive(renderers.ValidateList(nil, plans.Create, false), false, true, plans.Create, false), + }, + "list_update_sensitive": { + input: structured.Change{ + Before: []interface{}{"one"}, + BeforeSensitive: true, + After: []interface{}{}, + AfterSensitive: true, + }, + attribute: &jsonprovider.Attribute{ + AttributeType: unmarshalType(t, cty.List(cty.String)), + }, + validateDiff: renderers.ValidateSensitive(renderers.ValidateList([]renderers.ValidateDiffFunction{ + renderers.ValidatePrimitive("one", nil, plans.Delete, false), + }, plans.Update, false), true, true, plans.Update, false), + }, + "list_delete_sensitive": { + input: structured.Change{ + Before: []interface{}{}, + BeforeSensitive: true, + After: nil, + }, + attribute: &jsonprovider.Attribute{ + AttributeType: unmarshalType(t, cty.List(cty.String)), + }, + validateDiff: renderers.ValidateSensitive(renderers.ValidateList(nil, plans.Delete, false), true, false, plans.Delete, false), + }, + "list_create_unknown": { + input: structured.Change{ + Before: nil, + After: []interface{}{}, + Unknown: true, + }, + attribute: &jsonprovider.Attribute{ + AttributeType: unmarshalType(t, cty.List(cty.String)), + }, + validateDiff: renderers.ValidateUnknown(nil, plans.Create, false), + }, + "list_update_unknown": { + input: structured.Change{ + Before: []interface{}{}, + After: []interface{}{"one"}, + Unknown: true, + }, + attribute: &jsonprovider.Attribute{ + AttributeType: unmarshalType(t, cty.List(cty.String)), + }, + validateDiff: renderers.ValidateUnknown(renderers.ValidateList(nil, plans.Delete, false), plans.Update, false), + }, + "set_create_empty": { + input: structured.Change{ + Before: nil, + After: []interface{}{}, + }, + attribute: &jsonprovider.Attribute{ + AttributeType: unmarshalType(t, cty.Set(cty.String)), + }, + validateDiff: renderers.ValidateSet(nil, plans.Create, false), + }, + "set_create_populated": { + input: structured.Change{ + Before: nil, + After: []interface{}{"one", "two"}, + }, + attribute: &jsonprovider.Attribute{ + AttributeType: unmarshalType(t, cty.Set(cty.String)), + }, + validateDiff: renderers.ValidateSet([]renderers.ValidateDiffFunction{ + renderers.ValidatePrimitive(nil, "one", plans.Create, false), + renderers.ValidatePrimitive(nil, "two", plans.Create, false), + }, plans.Create, false), + }, + "set_delete_empty": { + input: structured.Change{ + Before: []interface{}{}, + After: nil, + }, + attribute: &jsonprovider.Attribute{ + AttributeType: unmarshalType(t, cty.Set(cty.String)), + }, + validateDiff: renderers.ValidateSet(nil, plans.Delete, false), + }, + "set_delete_populated": { + input: structured.Change{ + Before: []interface{}{"one", "two"}, + After: nil, + }, + attribute: &jsonprovider.Attribute{ + AttributeType: unmarshalType(t, cty.Set(cty.String)), + }, + validateDiff: renderers.ValidateSet([]renderers.ValidateDiffFunction{ + renderers.ValidatePrimitive("one", nil, plans.Delete, false), + renderers.ValidatePrimitive("two", nil, plans.Delete, false), + }, plans.Delete, false), + }, + "set_create_sensitive": { + input: structured.Change{ + Before: nil, + After: []interface{}{}, + AfterSensitive: true, + }, + attribute: &jsonprovider.Attribute{ + AttributeType: unmarshalType(t, cty.Set(cty.String)), + }, + validateDiff: renderers.ValidateSensitive(renderers.ValidateSet(nil, plans.Create, false), false, true, plans.Create, false), + }, + "set_update_sensitive": { + input: structured.Change{ + Before: []interface{}{"one"}, + BeforeSensitive: true, + After: []interface{}{}, + AfterSensitive: true, + }, + attribute: &jsonprovider.Attribute{ + AttributeType: unmarshalType(t, cty.Set(cty.String)), + }, + validateDiff: renderers.ValidateSensitive(renderers.ValidateSet([]renderers.ValidateDiffFunction{ + renderers.ValidatePrimitive("one", nil, plans.Delete, false), + }, plans.Update, false), true, true, plans.Update, false), + }, + "set_delete_sensitive": { + input: structured.Change{ + Before: []interface{}{}, + BeforeSensitive: true, + After: nil, + }, + attribute: &jsonprovider.Attribute{ + AttributeType: unmarshalType(t, cty.Set(cty.String)), + }, + validateDiff: renderers.ValidateSensitive(renderers.ValidateSet(nil, plans.Delete, false), true, false, plans.Delete, false), + }, + "set_create_unknown": { + input: structured.Change{ + Before: nil, + After: []interface{}{}, + Unknown: true, + }, + attribute: &jsonprovider.Attribute{ + AttributeType: unmarshalType(t, cty.Set(cty.String)), + }, + validateDiff: renderers.ValidateUnknown(nil, plans.Create, false), + }, + "set_update_unknown": { + input: structured.Change{ + Before: []interface{}{}, + After: []interface{}{"one"}, + Unknown: true, + }, + attribute: &jsonprovider.Attribute{ + AttributeType: unmarshalType(t, cty.Set(cty.String)), + }, + validateDiff: renderers.ValidateUnknown(renderers.ValidateSet(nil, plans.Delete, false), plans.Update, false), + }, + "tuple_primitive": { + input: structured.Change{ + Before: []interface{}{ + "one", + json.Number("2"), + "three", + }, + After: []interface{}{ + "one", + json.Number("4"), + "three", + }, + }, + attribute: &jsonprovider.Attribute{ + AttributeType: unmarshalType(t, cty.Tuple([]cty.Type{cty.String, cty.Number, cty.String})), + }, + validateDiff: renderers.ValidateList([]renderers.ValidateDiffFunction{ + renderers.ValidatePrimitive("one", "one", plans.NoOp, false), + renderers.ValidatePrimitive(json.Number("2"), json.Number("4"), plans.Update, false), + renderers.ValidatePrimitive("three", "three", plans.NoOp, false), + }, plans.Update, false), + }, + } + + for name, tc := range tcs { + + // Let's set some default values on the input. + if tc.input.RelevantAttributes == nil { + tc.input.RelevantAttributes = attribute_path.AlwaysMatcher() + } + if tc.input.ReplacePaths == nil { + tc.input.ReplacePaths = &attribute_path.PathMatcher{} + } + + t.Run(name, func(t *testing.T) { + tc.validateDiff(t, ComputeDiffForAttribute(tc.input, tc.attribute)) + }) + } +} + +func TestRelevantAttributes(t *testing.T) { + tcs := map[string]struct { + input structured.Change + block *jsonprovider.Block + validate renderers.ValidateDiffFunction + }{ + "simple_attributes": { + input: structured.Change{ + Before: map[string]interface{}{ + "id": "old_id", + "ignore": "doesn't matter", + }, + After: map[string]interface{}{ + "id": "new_id", + "ignore": "doesn't matter but modified", + }, + RelevantAttributes: &attribute_path.PathMatcher{ + Paths: [][]interface{}{ + { + "id", + }, + }, + }, + }, + block: &jsonprovider.Block{ + Attributes: map[string]*jsonprovider.Attribute{ + "id": { + AttributeType: unmarshalType(t, cty.String), + }, + "ignore": { + AttributeType: unmarshalType(t, cty.String), + }, + }, + }, + validate: renderers.ValidateBlock(map[string]renderers.ValidateDiffFunction{ + "id": renderers.ValidatePrimitive("old_id", "new_id", plans.Update, false), + "ignore": renderers.ValidatePrimitive("doesn't matter", "doesn't matter", plans.NoOp, false), + }, nil, nil, nil, nil, plans.Update, false), + }, + "nested_attributes": { + input: structured.Change{ + Before: map[string]interface{}{ + "list_block": []interface{}{ + map[string]interface{}{ + "id": "old_one", + }, + map[string]interface{}{ + "id": "ignored", + }, + }, + }, + After: map[string]interface{}{ + "list_block": []interface{}{ + map[string]interface{}{ + "id": "new_one", + }, + map[string]interface{}{ + "id": "ignored_but_changed", + }, + }, + }, + RelevantAttributes: &attribute_path.PathMatcher{ + Paths: [][]interface{}{ + { + "list_block", + float64(0), + "id", + }, + }, + }, + }, + block: &jsonprovider.Block{ + BlockTypes: map[string]*jsonprovider.BlockType{ + "list_block": { + Block: &jsonprovider.Block{ + Attributes: map[string]*jsonprovider.Attribute{ + "id": { + AttributeType: unmarshalType(t, cty.String), + }, + }, + }, + NestingMode: "list", + }, + }, + }, + validate: renderers.ValidateBlock(nil, nil, map[string][]renderers.ValidateDiffFunction{ + "list_block": { + renderers.ValidateBlock(map[string]renderers.ValidateDiffFunction{ + "id": renderers.ValidatePrimitive("old_one", "new_one", plans.Update, false), + }, nil, nil, nil, nil, plans.Update, false), + renderers.ValidateBlock(map[string]renderers.ValidateDiffFunction{ + "id": renderers.ValidatePrimitive("ignored", "ignored", plans.NoOp, false), + }, nil, nil, nil, nil, plans.NoOp, false), + }, + }, nil, nil, plans.Update, false), + }, + "nested_attributes_in_object": { + input: structured.Change{ + Before: map[string]interface{}{ + "object": map[string]interface{}{ + "id": "old_id", + }, + }, + After: map[string]interface{}{ + "object": map[string]interface{}{ + "id": "new_id", + }, + }, + RelevantAttributes: &attribute_path.PathMatcher{ + Propagate: true, + Paths: [][]interface{}{ + { + "object", // Even though we just specify object, it should now include every below object as well. + }, + }, + }, + }, + block: &jsonprovider.Block{ + Attributes: map[string]*jsonprovider.Attribute{ + "object": { + AttributeType: unmarshalType(t, cty.Object(map[string]cty.Type{ + "id": cty.String, + })), + }, + }, + }, + validate: renderers.ValidateBlock(map[string]renderers.ValidateDiffFunction{ + "object": renderers.ValidateObject(map[string]renderers.ValidateDiffFunction{ + "id": renderers.ValidatePrimitive("old_id", "new_id", plans.Update, false), + }, plans.Update, false), + }, nil, nil, nil, nil, plans.Update, false), + }, + "elements_in_list": { + input: structured.Change{ + Before: map[string]interface{}{ + "list": []interface{}{ + json.Number("0"), json.Number("1"), json.Number("2"), json.Number("3"), json.Number("4"), + }, + }, + After: map[string]interface{}{ + "list": []interface{}{ + json.Number("0"), json.Number("5"), json.Number("6"), json.Number("7"), json.Number("4"), + }, + }, + RelevantAttributes: &attribute_path.PathMatcher{ + Paths: [][]interface{}{ // The list is actually just going to ignore this. + { + "list", + 0.0, + }, + { + "list", + 2.0, + }, + { + "list", + 4.0, + }, + }, + }, + }, + block: &jsonprovider.Block{ + Attributes: map[string]*jsonprovider.Attribute{ + "list": { + AttributeType: unmarshalType(t, cty.List(cty.Number)), + }, + }, + }, + validate: renderers.ValidateBlock(map[string]renderers.ValidateDiffFunction{ + // The list validator below just ignores our relevant + // attributes. This is deliberate. + "list": renderers.ValidateList([]renderers.ValidateDiffFunction{ + renderers.ValidatePrimitive(json.Number("0"), json.Number("0"), plans.NoOp, false), + renderers.ValidatePrimitive(json.Number("1"), nil, plans.Delete, false), + renderers.ValidatePrimitive(json.Number("2"), nil, plans.Delete, false), + renderers.ValidatePrimitive(json.Number("3"), nil, plans.Delete, false), + renderers.ValidatePrimitive(nil, json.Number("5"), plans.Create, false), + renderers.ValidatePrimitive(nil, json.Number("6"), plans.Create, false), + renderers.ValidatePrimitive(nil, json.Number("7"), plans.Create, false), + renderers.ValidatePrimitive(json.Number("4"), json.Number("4"), plans.NoOp, false), + }, plans.Update, false), + }, nil, nil, nil, nil, plans.Update, false), + }, + "elements_in_map": { + input: structured.Change{ + Before: map[string]interface{}{ + "map": map[string]interface{}{ + "key_one": "value_one", + "key_two": "value_two", + "key_three": "value_three", + }, + }, + After: map[string]interface{}{ + "map": map[string]interface{}{ + "key_one": "value_three", + "key_two": "value_seven", + "key_four": "value_four", + }, + }, + RelevantAttributes: &attribute_path.PathMatcher{ + Paths: [][]interface{}{ + { + "map", + "key_one", + }, + { + "map", + "key_three", + }, + { + "map", + "key_four", + }, + }, + }, + }, + block: &jsonprovider.Block{ + Attributes: map[string]*jsonprovider.Attribute{ + "map": { + AttributeType: unmarshalType(t, cty.Map(cty.String)), + }, + }, + }, + validate: renderers.ValidateBlock(map[string]renderers.ValidateDiffFunction{ + "map": renderers.ValidateMap(map[string]renderers.ValidateDiffFunction{ + "key_one": renderers.ValidatePrimitive("value_one", "value_three", plans.Update, false), + "key_two": renderers.ValidatePrimitive("value_two", "value_two", plans.NoOp, false), + "key_three": renderers.ValidatePrimitive("value_three", nil, plans.Delete, false), + "key_four": renderers.ValidatePrimitive(nil, "value_four", plans.Create, false), + }, plans.Update, false), + }, nil, nil, nil, nil, plans.Update, false), + }, + "elements_in_set": { + input: structured.Change{ + Before: map[string]interface{}{ + "set": []interface{}{ + json.Number("0"), json.Number("1"), json.Number("2"), json.Number("3"), json.Number("4"), + }, + }, + After: map[string]interface{}{ + "set": []interface{}{ + json.Number("0"), json.Number("2"), json.Number("4"), json.Number("5"), json.Number("6"), + }, + }, + RelevantAttributes: &attribute_path.PathMatcher{ + Propagate: true, + Paths: [][]interface{}{ + { + "set", + }, + }, + }, + }, + block: &jsonprovider.Block{ + Attributes: map[string]*jsonprovider.Attribute{ + "set": { + AttributeType: unmarshalType(t, cty.Set(cty.Number)), + }, + }, + }, + validate: renderers.ValidateBlock(map[string]renderers.ValidateDiffFunction{ + "set": renderers.ValidateSet([]renderers.ValidateDiffFunction{ + renderers.ValidatePrimitive(json.Number("0"), json.Number("0"), plans.NoOp, false), + renderers.ValidatePrimitive(json.Number("1"), nil, plans.Delete, false), + renderers.ValidatePrimitive(json.Number("2"), json.Number("2"), plans.NoOp, false), + renderers.ValidatePrimitive(json.Number("3"), nil, plans.Delete, false), + renderers.ValidatePrimitive(json.Number("4"), json.Number("4"), plans.NoOp, false), + renderers.ValidatePrimitive(nil, json.Number("5"), plans.Create, false), + renderers.ValidatePrimitive(nil, json.Number("6"), plans.Create, false), + }, plans.Update, false), + }, nil, nil, nil, nil, plans.Update, false), + }, + "dynamic_types": { + input: structured.Change{ + Before: map[string]interface{}{ + "dynamic_nested_type": map[string]interface{}{ + "nested_id": "nomatch", + "nested_object": map[string]interface{}{ + "nested_nested_id": "matched", + }, + }, + "dynamic_nested_type_match": map[string]interface{}{ + "nested_id": "allmatch", + "nested_object": map[string]interface{}{ + "nested_nested_id": "allmatch", + }, + }, + }, + After: map[string]interface{}{ + "dynamic_nested_type": map[string]interface{}{ + "nested_id": "nomatch_changed", + "nested_object": map[string]interface{}{ + "nested_nested_id": "matched", + }, + }, + "dynamic_nested_type_match": map[string]interface{}{ + "nested_id": "allmatch", + "nested_object": map[string]interface{}{ + "nested_nested_id": "allmatch", + }, + }, + }, + RelevantAttributes: &attribute_path.PathMatcher{ + Propagate: true, + Paths: [][]interface{}{ + { + "dynamic_nested_type", + "nested_object", + "nested_nested_id", + }, + { + "dynamic_nested_type_match", + }, + }, + }, + }, + block: &jsonprovider.Block{ + Attributes: map[string]*jsonprovider.Attribute{ + "dynamic_nested_type": { + AttributeType: unmarshalType(t, cty.DynamicPseudoType), + }, + "dynamic_nested_type_match": { + AttributeType: unmarshalType(t, cty.DynamicPseudoType), + }, + }, + }, + validate: renderers.ValidateBlock(map[string]renderers.ValidateDiffFunction{ + "dynamic_nested_type": renderers.ValidateObject(map[string]renderers.ValidateDiffFunction{ + "nested_id": renderers.ValidatePrimitive("nomatch", "nomatch", plans.NoOp, false), + "nested_object": renderers.ValidateObject(map[string]renderers.ValidateDiffFunction{ + "nested_nested_id": renderers.ValidatePrimitive("matched", "matched", plans.NoOp, false), + }, plans.NoOp, false), + }, plans.NoOp, false), + "dynamic_nested_type_match": renderers.ValidateObject(map[string]renderers.ValidateDiffFunction{ + "nested_id": renderers.ValidatePrimitive("allmatch", "allmatch", plans.NoOp, false), + "nested_object": renderers.ValidateObject(map[string]renderers.ValidateDiffFunction{ + "nested_nested_id": renderers.ValidatePrimitive("allmatch", "allmatch", plans.NoOp, false), + }, plans.NoOp, false), + }, plans.NoOp, false), + }, nil, nil, nil, nil, plans.NoOp, false), + }, + } + for name, tc := range tcs { + if tc.input.ReplacePaths == nil { + tc.input.ReplacePaths = &attribute_path.PathMatcher{} + } + t.Run(name, func(t *testing.T) { + tc.validate(t, ComputeDiffForBlock(tc.input, tc.block)) + }) + } +} + +func TestDynamicPseudoType(t *testing.T) { + tcs := map[string]struct { + input structured.Change + validate renderers.ValidateDiffFunction + }{ + "after_sensitive_in_dynamic_type": { + input: structured.Change{ + Before: nil, + After: map[string]interface{}{ + "key": "value", + }, + Unknown: false, + BeforeSensitive: false, + AfterSensitive: map[string]interface{}{ + "key": true, + }, + ReplacePaths: attribute_path.Empty(false), + RelevantAttributes: attribute_path.AlwaysMatcher(), + }, + validate: renderers.ValidateObject(map[string]renderers.ValidateDiffFunction{ + "key": renderers.ValidateSensitive(renderers.ValidatePrimitive(nil, "value", plans.Create, false), false, true, plans.Create, false), + }, plans.Create, false), + }, + "before_sensitive_in_dynamic_type": { + input: structured.Change{ + Before: map[string]interface{}{ + "key": "value", + }, + After: nil, + Unknown: false, + BeforeSensitive: map[string]interface{}{ + "key": true, + }, + AfterSensitive: false, + ReplacePaths: attribute_path.Empty(false), + RelevantAttributes: attribute_path.AlwaysMatcher(), + }, + validate: renderers.ValidateObject(map[string]renderers.ValidateDiffFunction{ + "key": renderers.ValidateSensitive(renderers.ValidatePrimitive("value", nil, plans.Delete, false), true, false, plans.Delete, false), + }, plans.Delete, false), + }, + "sensitive_in_dynamic_type": { + input: structured.Change{ + Before: map[string]interface{}{ + "key": "before", + }, + After: map[string]interface{}{ + "key": "after", + }, + Unknown: false, + BeforeSensitive: map[string]interface{}{ + "key": true, + }, + AfterSensitive: map[string]interface{}{ + "key": true, + }, + ReplacePaths: attribute_path.Empty(false), + RelevantAttributes: attribute_path.AlwaysMatcher(), + }, + validate: renderers.ValidateObject(map[string]renderers.ValidateDiffFunction{ + "key": renderers.ValidateSensitive(renderers.ValidatePrimitive("before", "after", plans.Update, false), true, true, plans.Update, false), + }, plans.Update, false), + }, + "create_unknown_in_dynamic_type": { + input: structured.Change{ + Before: nil, + After: map[string]interface{}{}, + Unknown: map[string]interface{}{ + "key": true, + }, + BeforeSensitive: false, + AfterSensitive: false, + ReplacePaths: attribute_path.Empty(false), + RelevantAttributes: attribute_path.AlwaysMatcher(), + }, + validate: renderers.ValidateObject(map[string]renderers.ValidateDiffFunction{ + "key": renderers.ValidateUnknown(nil, plans.Create, false), + }, plans.Create, false), + }, + "update_unknown_in_dynamic_type": { + input: structured.Change{ + Before: map[string]interface{}{ + "key": "before", + }, + After: map[string]interface{}{}, + Unknown: map[string]interface{}{ + "key": true, + }, + BeforeSensitive: false, + AfterSensitive: false, + ReplacePaths: attribute_path.Empty(false), + RelevantAttributes: attribute_path.AlwaysMatcher(), + }, + validate: renderers.ValidateObject(map[string]renderers.ValidateDiffFunction{ + "key": renderers.ValidateUnknown(renderers.ValidatePrimitive("before", nil, plans.Delete, false), plans.Update, false), + }, plans.Update, false), + }, + } + for key, tc := range tcs { + t.Run(key, func(t *testing.T) { + tc.validate(t, ComputeDiffForType(tc.input, cty.DynamicPseudoType)) + }) + } +} + +func TestSpecificCases(t *testing.T) { + // This is a special test that can contain any combination of individual + // cases and will execute against them. For testing/fixing specific issues + // you can generally put the test case in here. + tcs := map[string]struct { + input structured.Change + block *jsonprovider.Block + validate renderers.ValidateDiffFunction + }{ + "issues/33016/unknown": { + input: structured.Change{ + Before: nil, + After: map[string]interface{}{ + "triggers": map[string]interface{}{}, + }, + Unknown: map[string]interface{}{ + "id": true, + "triggers": map[string]interface{}{ + "rotation": true, + }, + }, + BeforeSensitive: false, + AfterSensitive: map[string]interface{}{ + "triggers": map[string]interface{}{}, + }, + ReplacePaths: attribute_path.Empty(false), + RelevantAttributes: attribute_path.AlwaysMatcher(), + }, + block: &jsonprovider.Block{ + Attributes: map[string]*jsonprovider.Attribute{ + "id": { + AttributeType: unmarshalType(t, cty.String), + }, + "triggers": { + AttributeType: unmarshalType(t, cty.Map(cty.String)), + }, + }, + }, + validate: renderers.ValidateBlock(map[string]renderers.ValidateDiffFunction{ + "id": renderers.ValidateUnknown(nil, plans.Create, false), + "triggers": renderers.ValidateMap(map[string]renderers.ValidateDiffFunction{ + "rotation": renderers.ValidateUnknown(nil, plans.Create, false), + }, plans.Create, false), + }, nil, nil, nil, nil, plans.Create, false), + }, + "issues/33016/null": { + input: structured.Change{ + Before: nil, + After: map[string]interface{}{ + "triggers": map[string]interface{}{ + "rotation": nil, + }, + }, + Unknown: map[string]interface{}{ + "id": true, + "triggers": map[string]interface{}{}, + }, + BeforeSensitive: false, + AfterSensitive: map[string]interface{}{ + "triggers": map[string]interface{}{}, + }, + ReplacePaths: attribute_path.Empty(false), + RelevantAttributes: attribute_path.AlwaysMatcher(), + }, + block: &jsonprovider.Block{ + Attributes: map[string]*jsonprovider.Attribute{ + "id": { + AttributeType: unmarshalType(t, cty.String), + }, + "triggers": { + AttributeType: unmarshalType(t, cty.Map(cty.String)), + }, + }, + }, + validate: renderers.ValidateBlock(map[string]renderers.ValidateDiffFunction{ + "id": renderers.ValidateUnknown(nil, plans.Create, false), + "triggers": renderers.ValidateMap(map[string]renderers.ValidateDiffFunction{ + "rotation": renderers.ValidatePrimitive(nil, nil, plans.Create, false), + }, plans.Create, false), + }, nil, nil, nil, nil, plans.Create, false), + }, + + // The following tests are from issue 33472. Basically OpenTofu allows + // callers to treat numbers as strings in references and expects us + // to coerce the strings into numbers. For example the following are + // equivalent. + // - test_resource.resource.list[0].attribute + // - test_resource.resource.list["0"].attribute + // + // We need our attribute_path package (used within the ReplacePaths and + // RelevantAttributes fields) to handle coercing strings into numbers + // when it's expected. + + "issues/33472/expected": { + input: structured.Change{ + Before: map[string]interface{}{ + "list": []interface{}{ + map[string]interface{}{ + "number": json.Number("-1"), + }, + }, + }, + After: map[string]interface{}{ + "list": []interface{}{ + map[string]interface{}{ + "number": json.Number("2"), + }, + }, + }, + Unknown: false, + BeforeSensitive: false, + AfterSensitive: false, + ReplacePaths: attribute_path.Empty(false), + RelevantAttributes: &attribute_path.PathMatcher{ + Propagate: true, + Paths: [][]interface{}{ + { + "list", + 0.0, // This is normal and expected so easy case. + "number", + }, + }, + }, + }, + block: &jsonprovider.Block{ + Attributes: map[string]*jsonprovider.Attribute{ + "list": { + AttributeType: unmarshalType(t, cty.List(cty.Object(map[string]cty.Type{ + "number": cty.Number, + }))), + }, + }, + }, + validate: renderers.ValidateBlock(map[string]renderers.ValidateDiffFunction{ + "list": renderers.ValidateList([]renderers.ValidateDiffFunction{ + renderers.ValidateObject(map[string]renderers.ValidateDiffFunction{ + "number": renderers.ValidatePrimitive(json.Number("-1"), json.Number("2"), plans.Update, false), + }, plans.Update, false), + }, plans.Update, false), + }, nil, nil, nil, nil, plans.Update, false), + }, + + "issues/33472/coerce": { + input: structured.Change{ + Before: map[string]interface{}{ + "list": []interface{}{ + map[string]interface{}{ + "number": json.Number("-1"), + }, + }, + }, + After: map[string]interface{}{ + "list": []interface{}{ + map[string]interface{}{ + "number": json.Number("2"), + }, + }, + }, + Unknown: false, + BeforeSensitive: false, + AfterSensitive: false, + ReplacePaths: attribute_path.Empty(false), + RelevantAttributes: &attribute_path.PathMatcher{ + Propagate: true, + Paths: [][]interface{}{ + { + "list", + "0", // Difficult but allowed, we need to handle this. + "number", + }, + }, + }, + }, + block: &jsonprovider.Block{ + Attributes: map[string]*jsonprovider.Attribute{ + "list": { + AttributeType: unmarshalType(t, cty.List(cty.Object(map[string]cty.Type{ + "number": cty.Number, + }))), + }, + }, + }, + validate: renderers.ValidateBlock(map[string]renderers.ValidateDiffFunction{ + "list": renderers.ValidateList([]renderers.ValidateDiffFunction{ + renderers.ValidateObject(map[string]renderers.ValidateDiffFunction{ + "number": renderers.ValidatePrimitive(json.Number("-1"), json.Number("2"), plans.Update, false), + }, plans.Update, false), + }, plans.Update, false), + }, nil, nil, nil, nil, plans.Update, false), + }, + } + for name, tc := range tcs { + t.Run(name, func(t *testing.T) { + tc.validate(t, ComputeDiffForBlock(tc.input, tc.block)) + }) + } +} + +// unmarshalType converts a cty.Type into a json.RawMessage understood by the +// schema. It also lets the testing framework handle any errors to keep the API +// clean. +func unmarshalType(t *testing.T, ctyType cty.Type) json.RawMessage { + msg, err := ctyjson.MarshalType(ctyType) + if err != nil { + t.Fatalf("invalid type: %s", ctyType.FriendlyName()) + } + return msg +} + +// wrapChangeInSlice does the same as wrapChangeInMap, except it wraps it into a +// slice internally. +func wrapChangeInSlice(input structured.Change) structured.Change { + return wrapChange(input, float64(0), func(value interface{}, unknown interface{}, explicit bool) interface{} { + switch value.(type) { + case nil: + if set, ok := unknown.(bool); (set && ok) || explicit { + return []interface{}{nil} + + } + return []interface{}{} + default: + return []interface{}{value} + } + }) +} + +// wrapChangeInMap access a single structured.Change and returns a new +// structured.Change that represents a map with a single element. That single +// element is the input value. +func wrapChangeInMap(input structured.Change) structured.Change { + return wrapChange(input, "element", func(value interface{}, unknown interface{}, explicit bool) interface{} { + switch value.(type) { + case nil: + if set, ok := unknown.(bool); (set && ok) || explicit { + return map[string]interface{}{ + "element": nil, + } + } + return map[string]interface{}{} + default: + return map[string]interface{}{ + "element": value, + } + } + }) +} + +func wrapChange(input structured.Change, step interface{}, wrap func(interface{}, interface{}, bool) interface{}) structured.Change { + + replacePaths := &attribute_path.PathMatcher{} + for _, path := range input.ReplacePaths.(*attribute_path.PathMatcher).Paths { + var updated []interface{} + updated = append(updated, step) + updated = append(updated, path...) + replacePaths.Paths = append(replacePaths.Paths, updated) + } + + // relevantAttributes usually default to AlwaysMatcher, which means we can + // just ignore it. But if we have had some paths specified we need to wrap + // those as well. + relevantAttributes := input.RelevantAttributes + if concrete, ok := relevantAttributes.(*attribute_path.PathMatcher); ok { + + newRelevantAttributes := &attribute_path.PathMatcher{} + for _, path := range concrete.Paths { + var updated []interface{} + updated = append(updated, step) + updated = append(updated, path...) + newRelevantAttributes.Paths = append(newRelevantAttributes.Paths, updated) + } + relevantAttributes = newRelevantAttributes + } + + return structured.Change{ + Before: wrap(input.Before, nil, input.BeforeExplicit), + After: wrap(input.After, input.Unknown, input.AfterExplicit), + Unknown: wrap(input.Unknown, nil, false), + BeforeSensitive: wrap(input.BeforeSensitive, nil, false), + AfterSensitive: wrap(input.AfterSensitive, nil, false), + ReplacePaths: replacePaths, + RelevantAttributes: relevantAttributes, + } +} diff --git a/pkg/command/jsonformat/differ/list.go b/pkg/command/jsonformat/differ/list.go new file mode 100644 index 00000000000..30fe0fbd29e --- /dev/null +++ b/pkg/command/jsonformat/differ/list.go @@ -0,0 +1,92 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package differ + +import ( + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/command/jsonformat/collections" + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed" + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed/renderers" + "github.com/kubegems/opentofu/pkg/command/jsonformat/structured" + "github.com/kubegems/opentofu/pkg/command/jsonformat/structured/attribute_path" + "github.com/kubegems/opentofu/pkg/command/jsonprovider" + "github.com/kubegems/opentofu/pkg/plans" +) + +func computeAttributeDiffAsList(change structured.Change, elementType cty.Type) computed.Diff { + sliceValue := change.AsSlice() + + processIndices := func(beforeIx, afterIx int) computed.Diff { + value := sliceValue.GetChild(beforeIx, afterIx) + + // It's actually really difficult to render the diffs when some indices + // within a slice are relevant and others aren't. To make this simpler + // we just treat all children of a relevant list or set as also + // relevant. + // + // Interestingly the tofu plan builder also agrees with this, and + // never sets relevant attributes beneath lists or sets. We're just + // going to enforce this logic here as well. If the collection is + // relevant (decided elsewhere), then every element in the collection is + // also relevant. To be clear, in practice even if we didn't do the + // following explicitly the effect would be the same. It's just nicer + // for us to be clear about the behaviour we expect. + // + // What makes this difficult is the fact that the beforeIx and afterIx + // can be different, and it's quite difficult to work out which one is + // the relevant one. For nested lists, block lists, and tuples it's much + // easier because we always process the same indices in the before and + // after. + value.RelevantAttributes = attribute_path.AlwaysMatcher() + + return ComputeDiffForType(value, elementType) + } + + isObjType := func(_ interface{}) bool { + return elementType.IsObjectType() + } + + elements, current := collections.TransformSlice(sliceValue.Before, sliceValue.After, processIndices, isObjType) + return computed.NewDiff(renderers.List(elements), current, change.ReplacePaths.Matches()) +} + +func computeAttributeDiffAsNestedList(change structured.Change, attributes map[string]*jsonprovider.Attribute) computed.Diff { + var elements []computed.Diff + current := change.GetDefaultActionForIteration() + processNestedList(change, func(value structured.Change) { + element := computeDiffForNestedAttribute(value, &jsonprovider.NestedType{ + Attributes: attributes, + NestingMode: "single", + }) + elements = append(elements, element) + current = collections.CompareActions(current, element.Action) + }) + return computed.NewDiff(renderers.NestedList(elements), current, change.ReplacePaths.Matches()) +} + +func computeBlockDiffsAsList(change structured.Change, block *jsonprovider.Block) ([]computed.Diff, plans.Action) { + var elements []computed.Diff + current := change.GetDefaultActionForIteration() + processNestedList(change, func(value structured.Change) { + element := ComputeDiffForBlock(value, block) + elements = append(elements, element) + current = collections.CompareActions(current, element.Action) + }) + return elements, current +} + +func processNestedList(change structured.Change, process func(value structured.Change)) { + sliceValue := change.AsSlice() + for ix := 0; ix < len(sliceValue.Before) || ix < len(sliceValue.After); ix++ { + value := sliceValue.GetChild(ix, ix) + if !value.RelevantAttributes.MatchesPartial() { + // Mark non-relevant attributes as unchanged. + value = value.AsNoOp() + } + process(value) + } +} diff --git a/pkg/command/jsonformat/differ/map.go b/pkg/command/jsonformat/differ/map.go new file mode 100644 index 00000000000..e371ce237a5 --- /dev/null +++ b/pkg/command/jsonformat/differ/map.go @@ -0,0 +1,58 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package differ + +import ( + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/command/jsonformat/collections" + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed" + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed/renderers" + "github.com/kubegems/opentofu/pkg/command/jsonformat/structured" + "github.com/kubegems/opentofu/pkg/command/jsonprovider" + "github.com/kubegems/opentofu/pkg/plans" +) + +func computeAttributeDiffAsMap(change structured.Change, elementType cty.Type) computed.Diff { + mapValue := change.AsMap() + elements, current := collections.TransformMap(mapValue.Before, mapValue.After, mapValue.AllKeys(), func(key string) computed.Diff { + value := mapValue.GetChild(key) + if !value.RelevantAttributes.MatchesPartial() { + // Mark non-relevant attributes as unchanged. + value = value.AsNoOp() + } + return ComputeDiffForType(value, elementType) + }) + return computed.NewDiff(renderers.Map(elements), current, change.ReplacePaths.Matches()) +} + +func computeAttributeDiffAsNestedMap(change structured.Change, attributes map[string]*jsonprovider.Attribute) computed.Diff { + mapValue := change.AsMap() + elements, current := collections.TransformMap(mapValue.Before, mapValue.After, mapValue.ExplicitKeys(), func(key string) computed.Diff { + value := mapValue.GetChild(key) + if !value.RelevantAttributes.MatchesPartial() { + // Mark non-relevant attributes as unchanged. + value = value.AsNoOp() + } + return computeDiffForNestedAttribute(value, &jsonprovider.NestedType{ + Attributes: attributes, + NestingMode: "single", + }) + }) + return computed.NewDiff(renderers.NestedMap(elements), current, change.ReplacePaths.Matches()) +} + +func computeBlockDiffsAsMap(change structured.Change, block *jsonprovider.Block) (map[string]computed.Diff, plans.Action) { + mapValue := change.AsMap() + return collections.TransformMap(mapValue.Before, mapValue.After, mapValue.ExplicitKeys(), func(key string) computed.Diff { + value := mapValue.GetChild(key) + if !value.RelevantAttributes.MatchesPartial() { + // Mark non-relevant attributes as unchanged. + value = value.AsNoOp() + } + return ComputeDiffForBlock(value, block) + }) +} diff --git a/pkg/command/jsonformat/differ/object.go b/pkg/command/jsonformat/differ/object.go new file mode 100644 index 00000000000..617abc88c13 --- /dev/null +++ b/pkg/command/jsonformat/differ/object.go @@ -0,0 +1,72 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package differ + +import ( + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/command/jsonformat/collections" + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed" + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed/renderers" + "github.com/kubegems/opentofu/pkg/command/jsonformat/structured" + "github.com/kubegems/opentofu/pkg/command/jsonprovider" + "github.com/kubegems/opentofu/pkg/plans" +) + +func computeAttributeDiffAsObject(change structured.Change, attributes map[string]cty.Type) computed.Diff { + attributeDiffs, action := processObject(change, attributes, func(value structured.Change, ctype cty.Type) computed.Diff { + return ComputeDiffForType(value, ctype) + }) + return computed.NewDiff(renderers.Object(attributeDiffs), action, change.ReplacePaths.Matches()) +} + +func computeAttributeDiffAsNestedObject(change structured.Change, attributes map[string]*jsonprovider.Attribute) computed.Diff { + attributeDiffs, action := processObject(change, attributes, func(value structured.Change, attribute *jsonprovider.Attribute) computed.Diff { + return ComputeDiffForAttribute(value, attribute) + }) + return computed.NewDiff(renderers.NestedObject(attributeDiffs), action, change.ReplacePaths.Matches()) +} + +// processObject steps through the children of value as if it is an object and +// calls out to the provided computeDiff function once it has collated the +// diffs for each child attribute. +// +// We have to make this generic as attributes and nested objects process either +// cty.Type or jsonprovider.Attribute children respectively. And we want to +// reuse as much code as possible. +// +// Also, as it generic we cannot make this function a method on Change as you +// can't create generic methods on structs. Instead, we make this a generic +// function that receives the value as an argument. +func processObject[T any](v structured.Change, attributes map[string]T, computeDiff func(structured.Change, T) computed.Diff) (map[string]computed.Diff, plans.Action) { + attributeDiffs := make(map[string]computed.Diff) + mapValue := v.AsMap() + + currentAction := v.GetDefaultActionForIteration() + for key, attribute := range attributes { + attributeValue := mapValue.GetChild(key) + + if !attributeValue.RelevantAttributes.MatchesPartial() { + // Mark non-relevant attributes as unchanged. + attributeValue = attributeValue.AsNoOp() + } + + // We always assume changes to object are implicit. + attributeValue.BeforeExplicit = false + attributeValue.AfterExplicit = false + + attributeDiff := computeDiff(attributeValue, attribute) + if attributeDiff.Action == plans.NoOp && attributeValue.Before == nil && attributeValue.After == nil { + // We skip attributes of objects that are null both before and + // after. We don't even count these as unchanged attributes. + continue + } + attributeDiffs[key] = attributeDiff + currentAction = collections.CompareActions(currentAction, attributeDiff.Action) + } + + return attributeDiffs, currentAction +} diff --git a/pkg/command/jsonformat/differ/output.go b/pkg/command/jsonformat/differ/output.go new file mode 100644 index 00000000000..27a05275fb2 --- /dev/null +++ b/pkg/command/jsonformat/differ/output.go @@ -0,0 +1,27 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package differ + +import ( + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed" + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed/renderers" + "github.com/kubegems/opentofu/pkg/command/jsonformat/structured" +) + +func ComputeDiffForOutput(change structured.Change) computed.Diff { + if sensitive, ok := checkForSensitiveType(change, cty.DynamicPseudoType); ok { + return sensitive + } + + if unknown, ok := checkForUnknownType(change, cty.DynamicPseudoType); ok { + return unknown + } + + jsonOpts := renderers.RendererJsonOpts() + return jsonOpts.Transform(change) +} diff --git a/pkg/command/jsonformat/differ/primitive.go b/pkg/command/jsonformat/differ/primitive.go new file mode 100644 index 00000000000..1a4f2bfce4c --- /dev/null +++ b/pkg/command/jsonformat/differ/primitive.go @@ -0,0 +1,18 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package differ + +import ( + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed" + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed/renderers" + "github.com/kubegems/opentofu/pkg/command/jsonformat/structured" +) + +func computeAttributeDiffAsPrimitive(change structured.Change, ctype cty.Type) computed.Diff { + return asDiff(change, renderers.Primitive(change.Before, change.After, ctype)) +} diff --git a/pkg/command/jsonformat/differ/sensitive.go b/pkg/command/jsonformat/differ/sensitive.go new file mode 100644 index 00000000000..4ab673f1492 --- /dev/null +++ b/pkg/command/jsonformat/differ/sensitive.go @@ -0,0 +1,48 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package differ + +import ( + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed" + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed/renderers" + "github.com/kubegems/opentofu/pkg/command/jsonformat/structured" + "github.com/kubegems/opentofu/pkg/command/jsonprovider" + "github.com/kubegems/opentofu/pkg/plans" +) + +type CreateSensitiveRenderer func(computed.Diff, bool, bool) computed.DiffRenderer + +func checkForSensitiveType(change structured.Change, ctype cty.Type) (computed.Diff, bool) { + return change.CheckForSensitive( + func(value structured.Change) computed.Diff { + return ComputeDiffForType(value, ctype) + }, func(inner computed.Diff, beforeSensitive, afterSensitive bool, action plans.Action) computed.Diff { + return computed.NewDiff(renderers.Sensitive(inner, beforeSensitive, afterSensitive), action, change.ReplacePaths.Matches()) + }, + ) +} + +func checkForSensitiveNestedAttribute(change structured.Change, attribute *jsonprovider.NestedType) (computed.Diff, bool) { + return change.CheckForSensitive( + func(value structured.Change) computed.Diff { + return computeDiffForNestedAttribute(value, attribute) + }, func(inner computed.Diff, beforeSensitive, afterSensitive bool, action plans.Action) computed.Diff { + return computed.NewDiff(renderers.Sensitive(inner, beforeSensitive, afterSensitive), action, change.ReplacePaths.Matches()) + }, + ) +} + +func checkForSensitiveBlock(change structured.Change, block *jsonprovider.Block) (computed.Diff, bool) { + return change.CheckForSensitive( + func(value structured.Change) computed.Diff { + return ComputeDiffForBlock(value, block) + }, func(inner computed.Diff, beforeSensitive, afterSensitive bool, action plans.Action) computed.Diff { + return computed.NewDiff(renderers.SensitiveBlock(inner, beforeSensitive, afterSensitive), action, change.ReplacePaths.Matches()) + }, + ) +} diff --git a/pkg/command/jsonformat/differ/set.go b/pkg/command/jsonformat/differ/set.go new file mode 100644 index 00000000000..1d77f211dcb --- /dev/null +++ b/pkg/command/jsonformat/differ/set.go @@ -0,0 +1,137 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package differ + +import ( + "reflect" + + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/command/jsonformat/collections" + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed" + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed/renderers" + "github.com/kubegems/opentofu/pkg/command/jsonformat/structured" + "github.com/kubegems/opentofu/pkg/command/jsonformat/structured/attribute_path" + "github.com/kubegems/opentofu/pkg/command/jsonprovider" + "github.com/kubegems/opentofu/pkg/plans" +) + +func computeAttributeDiffAsSet(change structured.Change, elementType cty.Type) computed.Diff { + var elements []computed.Diff + current := change.GetDefaultActionForIteration() + processSet(change, func(value structured.Change) { + element := ComputeDiffForType(value, elementType) + elements = append(elements, element) + current = collections.CompareActions(current, element.Action) + }) + return computed.NewDiff(renderers.Set(elements), current, change.ReplacePaths.Matches()) +} + +func computeAttributeDiffAsNestedSet(change structured.Change, attributes map[string]*jsonprovider.Attribute) computed.Diff { + var elements []computed.Diff + current := change.GetDefaultActionForIteration() + processSet(change, func(value structured.Change) { + element := computeDiffForNestedAttribute(value, &jsonprovider.NestedType{ + Attributes: attributes, + NestingMode: "single", + }) + elements = append(elements, element) + current = collections.CompareActions(current, element.Action) + }) + return computed.NewDiff(renderers.NestedSet(elements), current, change.ReplacePaths.Matches()) +} + +func computeBlockDiffsAsSet(change structured.Change, block *jsonprovider.Block) ([]computed.Diff, plans.Action) { + var elements []computed.Diff + current := change.GetDefaultActionForIteration() + processSet(change, func(value structured.Change) { + element := ComputeDiffForBlock(value, block) + elements = append(elements, element) + current = collections.CompareActions(current, element.Action) + }) + return elements, current +} + +func processSet(change structured.Change, process func(value structured.Change)) { + sliceValue := change.AsSlice() + + foundInBefore := make(map[int]int) + foundInAfter := make(map[int]int) + + // O(n^2) operation here to find matching pairs in the set, so we can make + // the display look pretty. There might be a better way to do this, so look + // here for potential optimisations. + + for ix := 0; ix < len(sliceValue.Before); ix++ { + matched := false + for jx := 0; jx < len(sliceValue.After); jx++ { + if _, ok := foundInAfter[jx]; ok { + // We've already found a match for this after value. + continue + } + + child := sliceValue.GetChild(ix, jx) + if reflect.DeepEqual(child.Before, child.After) && child.IsBeforeSensitive() == child.IsAfterSensitive() && !child.IsUnknown() { + matched = true + foundInBefore[ix] = jx + foundInAfter[jx] = ix + } + } + + if !matched { + foundInBefore[ix] = -1 + } + } + + clearRelevantStatus := func(change structured.Change) structured.Change { + // It's actually really difficult to render the diffs when some indices + // within a slice are relevant and others aren't. To make this simpler + // we just treat all children of a relevant list or set as also + // relevant. + // + // Interestingly the tofu plan builder also agrees with this, and + // never sets relevant attributes beneath lists or sets. We're just + // going to enforce this logic here as well. If the collection is + // relevant (decided elsewhere), then every element in the collection is + // also relevant. To be clear, in practice even if we didn't do the + // following explicitly the effect would be the same. It's just nicer + // for us to be clear about the behaviour we expect. + // + // What makes this difficult is the fact that the beforeIx and afterIx + // can be different, and it's quite difficult to work out which one is + // the relevant one. For nested lists, block lists, and tuples it's much + // easier because we always process the same indices in the before and + // after. + change.RelevantAttributes = attribute_path.AlwaysMatcher() + return change + } + + // Now everything in before should be a key in foundInBefore and a value + // in foundInAfter. If a key is mapped to -1 in foundInBefore it means it + // does not have an equivalent in foundInAfter and so has been deleted. + // Everything in foundInAfter has a matching value in foundInBefore, but + // some values in after may not be in foundInAfter. This means these values + // are newly created. + + for ix := 0; ix < len(sliceValue.Before); ix++ { + if jx := foundInBefore[ix]; jx >= 0 { + child := clearRelevantStatus(sliceValue.GetChild(ix, jx)) + process(child) + continue + } + child := clearRelevantStatus(sliceValue.GetChild(ix, len(sliceValue.After))) + process(child) + } + + for jx := 0; jx < len(sliceValue.After); jx++ { + if _, ok := foundInAfter[jx]; ok { + // Then this value was handled in the previous for loop. + continue + } + child := clearRelevantStatus(sliceValue.GetChild(len(sliceValue.Before), jx)) + process(child) + } +} diff --git a/pkg/command/jsonformat/differ/tuple.go b/pkg/command/jsonformat/differ/tuple.go new file mode 100644 index 00000000000..5f22c51d237 --- /dev/null +++ b/pkg/command/jsonformat/differ/tuple.go @@ -0,0 +1,32 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package differ + +import ( + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/command/jsonformat/collections" + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed" + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed/renderers" + "github.com/kubegems/opentofu/pkg/command/jsonformat/structured" +) + +func computeAttributeDiffAsTuple(change structured.Change, elementTypes []cty.Type) computed.Diff { + var elements []computed.Diff + current := change.GetDefaultActionForIteration() + sliceValue := change.AsSlice() + for ix, elementType := range elementTypes { + childValue := sliceValue.GetChild(ix, ix) + if !childValue.RelevantAttributes.MatchesPartial() { + // Mark non-relevant attributes as unchanged. + childValue = childValue.AsNoOp() + } + element := ComputeDiffForType(childValue, elementType) + elements = append(elements, element) + current = collections.CompareActions(current, element.Action) + } + return computed.NewDiff(renderers.List(elements), current, change.ReplacePaths.Matches()) +} diff --git a/pkg/command/jsonformat/differ/types.go b/pkg/command/jsonformat/differ/types.go new file mode 100644 index 00000000000..9fa84138708 --- /dev/null +++ b/pkg/command/jsonformat/differ/types.go @@ -0,0 +1,19 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package differ + +// NestingMode is a wrapper around a string type to describe the various +// different kinds of nesting modes that can be applied to nested blocks and +// objects. +type NestingMode string + +const ( + nestingModeSet NestingMode = "set" + nestingModeList NestingMode = "list" + nestingModeMap NestingMode = "map" + nestingModeSingle NestingMode = "single" + nestingModeGroup NestingMode = "group" +) diff --git a/pkg/command/jsonformat/differ/unknown.go b/pkg/command/jsonformat/differ/unknown.go new file mode 100644 index 00000000000..0bdc87e6793 --- /dev/null +++ b/pkg/command/jsonformat/differ/unknown.go @@ -0,0 +1,68 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package differ + +import ( + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed" + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed/renderers" + "github.com/kubegems/opentofu/pkg/command/jsonformat/structured" + "github.com/kubegems/opentofu/pkg/command/jsonprovider" +) + +func checkForUnknownType(change structured.Change, ctype cty.Type) (computed.Diff, bool) { + return change.CheckForUnknown( + false, + processUnknown, + createProcessUnknownWithBefore(func(value structured.Change) computed.Diff { + return ComputeDiffForType(value, ctype) + })) +} + +func checkForUnknownNestedAttribute(change structured.Change, attribute *jsonprovider.NestedType) (computed.Diff, bool) { + + // We want our child attributes to show up as computed instead of deleted. + // Let's populate that here. + childUnknown := make(map[string]interface{}) + for key := range attribute.Attributes { + childUnknown[key] = true + } + + return change.CheckForUnknown( + childUnknown, + processUnknown, + createProcessUnknownWithBefore(func(value structured.Change) computed.Diff { + return computeDiffForNestedAttribute(value, attribute) + })) +} + +func checkForUnknownBlock(change structured.Change, block *jsonprovider.Block) (computed.Diff, bool) { + + // We want our child attributes to show up as computed instead of deleted. + // Let's populate that here. + childUnknown := make(map[string]interface{}) + for key := range block.Attributes { + childUnknown[key] = true + } + + return change.CheckForUnknown( + childUnknown, + processUnknown, + createProcessUnknownWithBefore(func(value structured.Change) computed.Diff { + return ComputeDiffForBlock(value, block) + })) +} + +func processUnknown(current structured.Change) computed.Diff { + return asDiff(current, renderers.Unknown(computed.Diff{})) +} + +func createProcessUnknownWithBefore(computeDiff func(value structured.Change) computed.Diff) structured.ProcessUnknownWithBefore { + return func(current structured.Change, before structured.Change) computed.Diff { + return asDiff(current, renderers.Unknown(computeDiff(before))) + } +} diff --git a/pkg/command/jsonformat/jsondiff/diff.go b/pkg/command/jsonformat/jsondiff/diff.go new file mode 100644 index 00000000000..75b82dad0c4 --- /dev/null +++ b/pkg/command/jsonformat/jsondiff/diff.go @@ -0,0 +1,153 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package jsondiff + +import ( + "reflect" + + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/command/jsonformat/collections" + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed" + "github.com/kubegems/opentofu/pkg/command/jsonformat/structured" + "github.com/kubegems/opentofu/pkg/plans" +) + +type TransformPrimitiveJson func(before, after interface{}, ctype cty.Type, action plans.Action) computed.Diff +type TransformObjectJson func(map[string]computed.Diff, plans.Action) computed.Diff +type TransformArrayJson func([]computed.Diff, plans.Action) computed.Diff +type TransformUnknownJson func(computed.Diff, plans.Action) computed.Diff +type TransformSensitiveJson func(computed.Diff, bool, bool, plans.Action) computed.Diff +type TransformTypeChangeJson func(before, after computed.Diff, action plans.Action) computed.Diff + +// JsonOpts defines the external callback functions that callers should +// implement to process the supplied diffs. +type JsonOpts struct { + Primitive TransformPrimitiveJson + Object TransformObjectJson + Array TransformArrayJson + Unknown TransformUnknownJson + Sensitive TransformSensitiveJson + TypeChange TransformTypeChangeJson +} + +// Transform accepts a generic before and after value that is assumed to be JSON +// formatted and transforms it into a computed.Diff, using the callbacks +// supplied in the JsonOpts class. +func (opts JsonOpts) Transform(change structured.Change) computed.Diff { + if sensitive, ok := opts.processSensitive(change); ok { + return sensitive + } + + if unknown, ok := opts.processUnknown(change); ok { + return unknown + } + + beforeType := GetType(change.Before) + afterType := GetType(change.After) + + deleted := afterType == Null && !change.AfterExplicit + created := beforeType == Null && !change.BeforeExplicit + + if beforeType == afterType || (created || deleted) { + targetType := beforeType + if targetType == Null { + targetType = afterType + } + return opts.processUpdate(change, targetType) + } + + b := opts.processUpdate(change.AsDelete(), beforeType) + a := opts.processUpdate(change.AsCreate(), afterType) + return opts.TypeChange(b, a, plans.Update) +} + +func (opts JsonOpts) processUpdate(change structured.Change, jtype Type) computed.Diff { + switch jtype { + case Null: + return opts.processPrimitive(change, cty.NilType) + case Bool: + return opts.processPrimitive(change, cty.Bool) + case String: + return opts.processPrimitive(change, cty.String) + case Number: + return opts.processPrimitive(change, cty.Number) + case Object: + return opts.processObject(change.AsMap()) + case Array: + return opts.processArray(change.AsSlice()) + default: + panic("unrecognized json type: " + jtype) + } +} + +func (opts JsonOpts) processPrimitive(change structured.Change, ctype cty.Type) computed.Diff { + beforeMissing := change.Before == nil && !change.BeforeExplicit + afterMissing := change.After == nil && !change.AfterExplicit + + var action plans.Action + switch { + case beforeMissing && !afterMissing: + action = plans.Create + case !beforeMissing && afterMissing: + action = plans.Delete + case reflect.DeepEqual(change.Before, change.After): + action = plans.NoOp + default: + action = plans.Update + } + + return opts.Primitive(change.Before, change.After, ctype, action) +} + +func (opts JsonOpts) processArray(change structured.ChangeSlice) computed.Diff { + processIndices := func(beforeIx, afterIx int) computed.Diff { + // It's actually really difficult to render the diffs when some indices + // within a list are relevant and others aren't. To make this simpler + // we just treat all children of a relevant list as also relevant, so we + // ignore the relevant attributes field. + // + // Interestingly the tofu plan builder also agrees with this, and + // never sets relevant attributes beneath lists or sets. We're just + // going to enforce this logic here as well. If the list is relevant + // (decided elsewhere), then every element in the list is also relevant. + return opts.Transform(change.GetChild(beforeIx, afterIx)) + } + + isObjType := func(value interface{}) bool { + return GetType(value) == Object + } + + return opts.Array(collections.TransformSlice(change.Before, change.After, processIndices, isObjType)) +} + +func (opts JsonOpts) processObject(change structured.ChangeMap) computed.Diff { + return opts.Object(collections.TransformMap(change.Before, change.After, change.AllKeys(), func(key string) computed.Diff { + child := change.GetChild(key) + if !child.RelevantAttributes.MatchesPartial() { + child = child.AsNoOp() + } + + return opts.Transform(child) + })) +} + +func (opts JsonOpts) processUnknown(change structured.Change) (computed.Diff, bool) { + return change.CheckForUnknown( + false, + func(current structured.Change) computed.Diff { + return opts.Unknown(computed.Diff{}, plans.Create) + }, func(current structured.Change, before structured.Change) computed.Diff { + return opts.Unknown(opts.Transform(before), plans.Update) + }, + ) +} + +func (opts JsonOpts) processSensitive(change structured.Change) (computed.Diff, bool) { + return change.CheckForSensitive(opts.Transform, func(inner computed.Diff, beforeSensitive, afterSensitive bool, action plans.Action) computed.Diff { + return opts.Sensitive(inner, beforeSensitive, afterSensitive, action) + }) +} diff --git a/pkg/command/jsonformat/jsondiff/types.go b/pkg/command/jsonformat/jsondiff/types.go new file mode 100644 index 00000000000..2f5dcfb95b7 --- /dev/null +++ b/pkg/command/jsonformat/jsondiff/types.go @@ -0,0 +1,43 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package jsondiff + +import ( + "encoding/json" + "fmt" +) + +type Type string + +const ( + Number Type = "number" + Object Type = "object" + Array Type = "array" + Bool Type = "bool" + String Type = "string" + Null Type = "null" +) + +func GetType(val interface{}) Type { + switch val.(type) { + case []interface{}: + return Array + case float64: + return Number + case json.Number: + return Number + case string: + return String + case bool: + return Bool + case nil: + return Null + case map[string]interface{}: + return Object + default: + panic(fmt.Sprintf("unrecognized json type %T", val)) + } +} diff --git a/pkg/command/jsonformat/plan.go b/pkg/command/jsonformat/plan.go new file mode 100644 index 00000000000..c3facae88fd --- /dev/null +++ b/pkg/command/jsonformat/plan.go @@ -0,0 +1,554 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package jsonformat + +import ( + "bytes" + "encoding/json" + "fmt" + "sort" + "strings" + + "github.com/kubegems/opentofu/pkg/command/format" + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed" + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed/renderers" + "github.com/kubegems/opentofu/pkg/command/jsonplan" + "github.com/kubegems/opentofu/pkg/command/jsonprovider" + "github.com/kubegems/opentofu/pkg/command/jsonstate" + "github.com/kubegems/opentofu/pkg/plans" +) + +const ( + detectedDrift string = "drift" + proposedChange string = "change" +) + +type Plan struct { + PlanFormatVersion string `json:"plan_format_version"` + OutputChanges map[string]jsonplan.Change `json:"output_changes"` + ResourceChanges []jsonplan.ResourceChange `json:"resource_changes"` + ResourceDrift []jsonplan.ResourceChange `json:"resource_drift"` + RelevantAttributes []jsonplan.ResourceAttr `json:"relevant_attributes"` + + ProviderFormatVersion string `json:"provider_format_version"` + ProviderSchemas map[string]*jsonprovider.Provider `json:"provider_schemas"` +} + +func (plan Plan) getSchema(change jsonplan.ResourceChange) *jsonprovider.Schema { + switch change.Mode { + case jsonstate.ManagedResourceMode: + return plan.ProviderSchemas[change.ProviderName].ResourceSchemas[change.Type] + case jsonstate.DataResourceMode: + return plan.ProviderSchemas[change.ProviderName].DataSourceSchemas[change.Type] + default: + panic("found unrecognized resource mode: " + change.Mode) + } +} + +func (plan Plan) renderHuman(renderer Renderer, mode plans.Mode, opts ...plans.Quality) { + checkOpts := func(target plans.Quality) bool { + for _, opt := range opts { + if opt == target { + return true + } + } + return false + } + + diffs := precomputeDiffs(plan, mode) + haveRefreshChanges := renderHumanDiffDrift(renderer, diffs, mode) + + willPrintResourceChanges := false + counts := make(map[plans.Action]int) + importingCount := 0 + var changes []diff + for _, diff := range diffs.changes { + action := jsonplan.UnmarshalActions(diff.change.Change.Actions) + if action == plans.NoOp && !diff.Moved() && !diff.Importing() { + // Don't show anything for NoOp changes. + continue + } + if action == plans.Delete && diff.change.Mode != jsonstate.ManagedResourceMode { + // Don't render anything for deleted data sources. + continue + } + + changes = append(changes, diff) + + if diff.Importing() { + importingCount++ + } + + // Don't count move-only changes + if action != plans.NoOp { + willPrintResourceChanges = true + counts[action]++ + } + } + + // Precompute the outputs early, so we can make a decision about whether we + // display the "there are no changes messages". + outputs := renderHumanDiffOutputs(renderer, diffs.outputs) + + if len(changes) == 0 && len(outputs) == 0 { + // If we didn't find any changes to report at all then this is a + // "No changes" plan. How we'll present this depends on whether + // the plan is "applyable" and, if so, whether it had refresh changes + // that we already would've presented above. + + if checkOpts(plans.Errored) { + if haveRefreshChanges { + renderer.Streams.Print(format.HorizontalRule(renderer.Colorize, renderer.Streams.Stdout.Columns())) + renderer.Streams.Println() + } + renderer.Streams.Print( + renderer.Colorize.Color("\n[reset][bold][red]Planning failed.[reset][bold] OpenTofu encountered an error while generating this plan.[reset]\n\n"), + ) + } else { + switch mode { + case plans.RefreshOnlyMode: + if haveRefreshChanges { + // We already generated a sufficient prompt about what will + // happen if applying this change above, so we don't need to + // say anything more. + return + } + + renderer.Streams.Print(renderer.Colorize.Color("\n[reset][bold][green]No changes.[reset][bold] Your infrastructure still matches the configuration.[reset]\n\n")) + renderer.Streams.Println(format.WordWrap( + "OpenTofu has checked that the real remote objects still match the result of your most recent changes, and found no differences.", + renderer.Streams.Stdout.Columns())) + case plans.DestroyMode: + if haveRefreshChanges { + renderer.Streams.Print(format.HorizontalRule(renderer.Colorize, renderer.Streams.Stdout.Columns())) + fmt.Fprintln(renderer.Streams.Stdout.File) + } + renderer.Streams.Print(renderer.Colorize.Color("\n[reset][bold][green]No changes.[reset][bold] No objects need to be destroyed.[reset]\n\n")) + renderer.Streams.Println(format.WordWrap( + "Either you have not created any objects yet or the existing objects were already deleted outside of OpenTofu.", + renderer.Streams.Stdout.Columns())) + default: + if haveRefreshChanges { + renderer.Streams.Print(format.HorizontalRule(renderer.Colorize, renderer.Streams.Stdout.Columns())) + renderer.Streams.Println("") + } + renderer.Streams.Print( + renderer.Colorize.Color("\n[reset][bold][green]No changes.[reset][bold] Your infrastructure matches the configuration.[reset]\n\n"), + ) + + if haveRefreshChanges { + if !checkOpts(plans.NoChanges) { + // In this case, applying this plan will not change any + // remote objects but _will_ update the state to match what + // we detected during refresh, so we'll reassure the user + // about that. + renderer.Streams.Println(format.WordWrap( + "Your configuration already matches the changes detected above, so applying this plan will only update the state to include the changes detected above and won't change any real infrastructure.", + renderer.Streams.Stdout.Columns(), + )) + } else { + // In this case we detected changes during refresh but this isn't + // a planning mode where we consider those to be applyable. The + // user must re-run in refresh-only mode in order to update the + // state to match the upstream changes. + suggestion := "." + if !renderer.RunningInAutomation { + // The normal message includes a specific command line to run. + suggestion = ":\n tofu apply -refresh-only" + } + renderer.Streams.Println(format.WordWrap( + "Your configuration already matches the changes detected above. If you'd like to update the OpenTofu state to match, create and apply a refresh-only plan"+suggestion, + renderer.Streams.Stdout.Columns(), + )) + } + return + } + + // If we get down here then we're just in the simple situation where + // the plan isn't applyable at all. + renderer.Streams.Println(format.WordWrap( + "OpenTofu has compared your real infrastructure against your configuration and found no differences, so no changes are needed.", + renderer.Streams.Stdout.Columns(), + )) + } + } + } + if haveRefreshChanges { + renderer.Streams.Print(format.HorizontalRule(renderer.Colorize, renderer.Streams.Stdout.Columns())) + renderer.Streams.Println() + } + + if willPrintResourceChanges { + renderer.Streams.Println(format.WordWrap( + "\nOpenTofu used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols:", + renderer.Streams.Stdout.Columns())) + if counts[plans.Create] > 0 { + renderer.Streams.Println(renderer.Colorize.Color(actionDescription(plans.Create))) + } + if counts[plans.Update] > 0 { + renderer.Streams.Println(renderer.Colorize.Color(actionDescription(plans.Update))) + } + if counts[plans.Delete] > 0 { + renderer.Streams.Println(renderer.Colorize.Color(actionDescription(plans.Delete))) + } + if counts[plans.DeleteThenCreate] > 0 { + renderer.Streams.Println(renderer.Colorize.Color(actionDescription(plans.DeleteThenCreate))) + } + if counts[plans.CreateThenDelete] > 0 { + renderer.Streams.Println(renderer.Colorize.Color(actionDescription(plans.CreateThenDelete))) + } + if counts[plans.Read] > 0 { + renderer.Streams.Println(renderer.Colorize.Color(actionDescription(plans.Read))) + } + if counts[plans.Forget] > 0 { + renderer.Streams.Println(renderer.Colorize.Color(actionDescription(plans.Forget))) + } + } + + if len(changes) > 0 { + if checkOpts(plans.Errored) { + renderer.Streams.Printf("\nOpenTofu planned the following actions, but then encountered a problem:\n") + } else { + renderer.Streams.Printf("\nOpenTofu will perform the following actions:\n") + } + + for _, change := range changes { + diff, render := renderHumanDiff(renderer, change, proposedChange) + if render { + fmt.Fprintln(renderer.Streams.Stdout.File) + renderer.Streams.Println(diff) + } + } + + if importingCount > 0 { + renderer.Streams.Printf( + renderer.Colorize.Color("\n[bold]Plan:[reset] %d to import, %d to add, %d to change, %d to destroy.\n"), + importingCount, + counts[plans.Create]+counts[plans.DeleteThenCreate]+counts[plans.CreateThenDelete], + counts[plans.Update], + counts[plans.Delete]+counts[plans.DeleteThenCreate]+counts[plans.CreateThenDelete]) + } else { + renderer.Streams.Printf( + renderer.Colorize.Color("\n[bold]Plan:[reset] %d to add, %d to change, %d to destroy.\n"), + counts[plans.Create]+counts[plans.DeleteThenCreate]+counts[plans.CreateThenDelete], + counts[plans.Update], + counts[plans.Delete]+counts[plans.DeleteThenCreate]+counts[plans.CreateThenDelete]) + } + } + + if len(outputs) > 0 { + renderer.Streams.Print("\nChanges to Outputs:\n") + renderer.Streams.Printf("%s\n", outputs) + + if len(counts) == 0 { + // If we have output changes but not resource changes then we + // won't have output any indication about the changes at all yet, + // so we need some extra context about what it would mean to + // apply a change that _only_ includes output changes. + renderer.Streams.Println(format.WordWrap( + "\nYou can apply this plan to save these new output values to the OpenTofu state, without changing any real infrastructure.", + renderer.Streams.Stdout.Columns())) + } + } +} + +func renderHumanDiffOutputs(renderer Renderer, outputs map[string]computed.Diff) string { + var rendered []string + + var keys []string + escapedKeys := make(map[string]string) + var escapedKeyMaxLen int + for key := range outputs { + escapedKey := renderers.EnsureValidAttributeName(key) + keys = append(keys, key) + escapedKeys[key] = escapedKey + if len(escapedKey) > escapedKeyMaxLen { + escapedKeyMaxLen = len(escapedKey) + } + } + sort.Strings(keys) + + for _, key := range keys { + output := outputs[key] + if output.Action != plans.NoOp { + rendered = append(rendered, fmt.Sprintf("%s %-*s = %s", renderer.Colorize.Color(format.DiffActionSymbol(output.Action)), escapedKeyMaxLen, escapedKeys[key], output.RenderHuman(0, computed.NewRenderHumanOpts(renderer.Colorize, renderer.ShowSensitive)))) + } + } + return strings.Join(rendered, "\n") +} + +func renderHumanDiffDrift(renderer Renderer, diffs diffs, mode plans.Mode) bool { + var drs []diff + + // In refresh-only mode, we show all resources marked as drifted, + // including those which have moved without other changes. In other plan + // modes, move-only changes will be rendered in the planned changes, so + // we skip them here. + + if mode == plans.RefreshOnlyMode { + drs = diffs.drift + } else { + for _, dr := range diffs.drift { + if dr.diff.Action != plans.NoOp { + drs = append(drs, dr) + } + } + } + + if len(drs) == 0 { + return false + } + + // If the overall plan is empty, and it's not a refresh only plan then we + // won't show any drift changes. + if diffs.Empty() && mode != plans.RefreshOnlyMode { + return false + } + + renderer.Streams.Print(renderer.Colorize.Color("\n[bold][cyan]Note:[reset][bold] Objects have changed outside of OpenTofu\n")) + renderer.Streams.Println() + renderer.Streams.Print(format.WordWrap( + "OpenTofu detected the following changes made outside of OpenTofu since the last \"tofu apply\" which may have affected this plan:\n", + renderer.Streams.Stdout.Columns())) + + for _, drift := range drs { + diff, render := renderHumanDiff(renderer, drift, detectedDrift) + if render { + renderer.Streams.Println() + renderer.Streams.Println(diff) + } + } + + switch mode { + case plans.RefreshOnlyMode: + renderer.Streams.Println(format.WordWrap( + "\n\nThis is a refresh-only plan, so OpenTofu will not take any actions to undo these. If you were expecting these changes then you can apply this plan to record the updated values in the OpenTofu state without changing any remote objects.", + renderer.Streams.Stdout.Columns(), + )) + default: + renderer.Streams.Println(format.WordWrap( + "\n\nUnless you have made equivalent changes to your configuration, or ignored the relevant attributes using ignore_changes, the following plan may include actions to undo or respond to these changes.", + renderer.Streams.Stdout.Columns(), + )) + } + + return true +} + +func renderHumanDiff(renderer Renderer, diff diff, cause string) (string, bool) { + + // Internally, our computed diffs can't tell the difference between a + // replace action (eg. CreateThenDestroy, DestroyThenCreate) and a simple + // update action. So, at the top most level we rely on the action provided + // by the plan itself instead of what we compute. Nested attributes and + // blocks however don't have the replace type of actions, so we can trust + // the computed actions of these. + + action := jsonplan.UnmarshalActions(diff.change.Change.Actions) + if action == plans.NoOp && !diff.Moved() && !diff.Importing() { + // Skip resource changes that have nothing interesting to say. + return "", false + } + + var buf bytes.Buffer + buf.WriteString(renderer.Colorize.Color(resourceChangeComment(diff.change, action, cause))) + + opts := computed.NewRenderHumanOpts(renderer.Colorize, renderer.ShowSensitive) + + if action == plans.Forget { + opts.HideDiffActionSymbols = true + opts.OverrideNullSuffix = true + } + opts.ShowUnchangedChildren = diff.Importing() + + buf.WriteString(fmt.Sprintf("%s %s %s", renderer.Colorize.Color(format.DiffActionSymbol(action)), resourceChangeHeader(diff.change), diff.diff.RenderHuman(0, opts))) + return buf.String(), true +} + +func resourceChangeComment(resource jsonplan.ResourceChange, action plans.Action, changeCause string) string { + var buf bytes.Buffer + + dispAddr := resource.Address + if len(resource.Deposed) != 0 { + dispAddr = fmt.Sprintf("%s (deposed object %s)", dispAddr, resource.Deposed) + } + + var printedMoved bool + var printedImported bool + + switch action { + case plans.Create: + buf.WriteString(fmt.Sprintf("[bold] # %s[reset] will be created", dispAddr)) + case plans.Read: + buf.WriteString(fmt.Sprintf("[bold] # %s[reset] will be read during apply", dispAddr)) + switch resource.ActionReason { + case jsonplan.ResourceInstanceReadBecauseConfigUnknown: + buf.WriteString("\n # (config refers to values not yet known)") + case jsonplan.ResourceInstanceReadBecauseDependencyPending: + buf.WriteString("\n # (depends on a resource or a module with changes pending)") + case jsonplan.ResourceInstanceReadBecauseCheckNested: + buf.WriteString("\n # (config will be reloaded to verify a check block)") + } + case plans.Update: + switch changeCause { + case proposedChange: + buf.WriteString(fmt.Sprintf("[bold] # %s[reset] will be updated in-place", dispAddr)) + case detectedDrift: + buf.WriteString(fmt.Sprintf("[bold] # %s[reset] has changed", dispAddr)) + default: + buf.WriteString(fmt.Sprintf("[bold] # %s[reset] update (unknown reason %s)", dispAddr, changeCause)) + } + case plans.CreateThenDelete, plans.DeleteThenCreate: + switch resource.ActionReason { + case jsonplan.ResourceInstanceReplaceBecauseTainted: + buf.WriteString(fmt.Sprintf("[bold] # %s[reset] is tainted, so it must be [bold][red]replaced[reset]", dispAddr)) + case jsonplan.ResourceInstanceReplaceByRequest: + buf.WriteString(fmt.Sprintf("[bold] # %s[reset] will be [bold][red]replaced[reset], as requested", dispAddr)) + case jsonplan.ResourceInstanceReplaceByTriggers: + buf.WriteString(fmt.Sprintf("[bold] # %s[reset] will be [bold][red]replaced[reset] due to changes in replace_triggered_by", dispAddr)) + default: + buf.WriteString(fmt.Sprintf("[bold] # %s[reset] must be [bold][red]replaced[reset]", dispAddr)) + } + case plans.Delete: + switch changeCause { + case proposedChange: + buf.WriteString(fmt.Sprintf("[bold] # %s[reset] will be [bold][red]destroyed[reset]", dispAddr)) + case detectedDrift: + buf.WriteString(fmt.Sprintf("[bold] # %s[reset] has been deleted", dispAddr)) + default: + buf.WriteString(fmt.Sprintf("[bold] # %s[reset] delete (unknown reason %s)", dispAddr, changeCause)) + } + // We can sometimes give some additional detail about why we're + // proposing to delete. We show this as additional notes, rather than + // as additional wording in the main action statement, in an attempt + // to make the "will be destroyed" message prominent and consistent + // in all cases, for easier scanning of this often-risky action. + switch resource.ActionReason { + case jsonplan.ResourceInstanceDeleteBecauseNoResourceConfig: + buf.WriteString(fmt.Sprintf("\n # (because %s.%s is not in configuration)", resource.Type, resource.Name)) + case jsonplan.ResourceInstanceDeleteBecauseNoMoveTarget: + buf.WriteString(fmt.Sprintf("\n # (because %s was moved to %s, which is not in configuration)", resource.PreviousAddress, resource.Address)) + case jsonplan.ResourceInstanceDeleteBecauseNoModule: + // FIXME: Ideally we'd truncate addr.Module to reflect the earliest + // step that doesn't exist, so it's clearer which call this refers + // to, but we don't have enough information out here in the UI layer + // to decide that; only the "expander" in OpenTofu Core knows + // which module instance keys are actually declared. + buf.WriteString(fmt.Sprintf("\n # (because %s is not in configuration)", resource.ModuleAddress)) + case jsonplan.ResourceInstanceDeleteBecauseWrongRepetition: + var index interface{} + if resource.Index != nil { + if err := json.Unmarshal(resource.Index, &index); err != nil { + panic(err) + } + } + + // We have some different variations of this one + switch index.(type) { + case nil: + buf.WriteString("\n # (because resource uses count or for_each)") + case float64: + buf.WriteString("\n # (because resource does not use count)") + case string: + buf.WriteString("\n # (because resource does not use for_each)") + } + case jsonplan.ResourceInstanceDeleteBecauseCountIndex: + buf.WriteString(fmt.Sprintf("\n # (because index [%s] is out of range for count)", resource.Index)) + case jsonplan.ResourceInstanceDeleteBecauseEachKey: + buf.WriteString(fmt.Sprintf("\n # (because key [%s] is not in for_each map)", resource.Index)) + } + if len(resource.Deposed) != 0 { + // In the case where we partially failed to replace a resource + // configured with 'create_before_destroy' in a previous apply and + // the deposed instance is still in the state, we give some extra + // context about this unusual situation. + buf.WriteString("\n # (left over from a partially-failed replacement of this instance)") + } + case plans.Forget: + buf.WriteString(fmt.Sprintf("[bold] # %s[reset] will be removed from the OpenTofu state [bold][red]but will not be destroyed[reset]", dispAddr)) + + if len(resource.Deposed) != 0 { + // In the case where we partially failed to replace a resource + // configured with 'create_before_destroy' in a previous apply and + // the deposed instance is still in the state, we give some extra + // context about this unusual situation. + buf.WriteString("\n # (left over from a partially-failed replacement of this instance)") + } + case plans.NoOp: + if len(resource.PreviousAddress) > 0 && resource.PreviousAddress != resource.Address { + buf.WriteString(fmt.Sprintf("[bold] # %s[reset] has moved to [bold]%s[reset]", resource.PreviousAddress, dispAddr)) + printedMoved = true + break + } + if resource.Change.Importing != nil { + buf.WriteString(fmt.Sprintf("[bold] # %s[reset] will be imported", dispAddr)) + if len(resource.Change.GeneratedConfig) > 0 { + buf.WriteString("\n #[reset] (config will be generated)") + } + printedImported = true + break + } + fallthrough + default: + // should never happen, since the above is exhaustive + buf.WriteString(fmt.Sprintf("%s has an action the plan renderer doesn't support (this is a bug)", dispAddr)) + } + buf.WriteString("\n") + + if len(resource.PreviousAddress) > 0 && resource.PreviousAddress != resource.Address && !printedMoved { + buf.WriteString(fmt.Sprintf(" # [reset](moved from %s)\n", resource.PreviousAddress)) + } + if resource.Change.Importing != nil && !printedImported { + // We want to make this as forward compatible as possible, and we know + // the ID may be removed from the Importing metadata in favour of + // something else. + // As Importing metadata is loaded from a JSON struct, the effect of it + // being removed in the future will mean this renderer will receive it + // as an empty string + if len(resource.Change.Importing.ID) > 0 { + buf.WriteString(fmt.Sprintf(" # [reset](imported from \"%s\")\n", resource.Change.Importing.ID)) + } else { + // This means we're trying to render a plan from a future version + // and we didn't get given the ID. So we'll do our best. + buf.WriteString(" # [reset](will be imported first)\n") + } + } + if resource.Change.Importing != nil && (action == plans.CreateThenDelete || action == plans.DeleteThenCreate) { + buf.WriteString(" # [reset][yellow]Warning: this will destroy the imported resource[reset]\n") + } + + return buf.String() +} + +func resourceChangeHeader(change jsonplan.ResourceChange) string { + mode := "resource" + if change.Mode != jsonstate.ManagedResourceMode { + mode = "data" + } + return fmt.Sprintf("%s \"%s\" \"%s\"", mode, change.Type, change.Name) +} + +func actionDescription(action plans.Action) string { + switch action { + case plans.Create: + return " [green]+[reset] create" + case plans.Delete: + return " [red]-[reset] destroy" + case plans.Update: + return " [yellow]~[reset] update in-place" + case plans.CreateThenDelete: + return "[green]+[reset]/[red]-[reset] create replacement and then destroy" + case plans.DeleteThenCreate: + return "[red]-[reset]/[green]+[reset] destroy and then create replacement" + case plans.Read: + return " [cyan]<=[reset] read (data resources)" + case plans.Forget: + return " [red].[reset] forget" + + default: + panic(fmt.Sprintf("unrecognized change type: %s", action.String())) + } +} diff --git a/pkg/command/jsonformat/plan_test.go b/pkg/command/jsonformat/plan_test.go new file mode 100644 index 00000000000..915c63079bb --- /dev/null +++ b/pkg/command/jsonformat/plan_test.go @@ -0,0 +1,7518 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package jsonformat + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/mitchellh/colorstring" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/command/jsonformat/differ" + "github.com/kubegems/opentofu/pkg/command/jsonformat/structured" + "github.com/kubegems/opentofu/pkg/command/jsonformat/structured/attribute_path" + "github.com/kubegems/opentofu/pkg/command/jsonplan" + "github.com/kubegems/opentofu/pkg/command/jsonprovider" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/terminal" + "github.com/kubegems/opentofu/pkg/tofu" +) + +func TestRenderHuman_EmptyPlan(t *testing.T) { + color := &colorstring.Colorize{Colors: colorstring.DefaultColors, Disable: true} + streams, done := terminal.StreamsForTesting(t) + + plan := Plan{} + + renderer := Renderer{Colorize: color, Streams: streams} + plan.renderHuman(renderer, plans.NormalMode) + + want := ` +No changes. Your infrastructure matches the configuration. + +OpenTofu has compared your real infrastructure against your configuration and +found no differences, so no changes are needed. +` + + got := done(t).Stdout() + if diff := cmp.Diff(want, got); len(diff) > 0 { + t.Errorf("unexpected output\ngot:\n%s\nwant:\n%s\ndiff:\n%s", got, want, diff) + } +} + +func TestRenderHuman_EmptyOutputs(t *testing.T) { + color := &colorstring.Colorize{Colors: colorstring.DefaultColors, Disable: true} + streams, done := terminal.StreamsForTesting(t) + + outputVal, _ := json.Marshal("some-text") + plan := Plan{ + OutputChanges: map[string]jsonplan.Change{ + "a_string": { + Actions: []string{"no-op"}, + Before: outputVal, + After: outputVal, + }, + }, + } + + renderer := Renderer{Colorize: color, Streams: streams} + plan.renderHuman(renderer, plans.NormalMode) + + want := ` +No changes. Your infrastructure matches the configuration. + +OpenTofu has compared your real infrastructure against your configuration and +found no differences, so no changes are needed. +` + + got := done(t).Stdout() + if diff := cmp.Diff(want, got); len(diff) > 0 { + t.Errorf("unexpected output\ngot:\n%s\nwant:\n%s\ndiff:\n%s", got, want, diff) + } +} + +func TestRenderHuman_Imports(t *testing.T) { + color := &colorstring.Colorize{Colors: colorstring.DefaultColors, Disable: true} + + schemas := map[string]*jsonprovider.Provider{ + "test": { + ResourceSchemas: map[string]*jsonprovider.Schema{ + "test_resource": { + Block: &jsonprovider.Block{ + Attributes: map[string]*jsonprovider.Attribute{ + "id": { + AttributeType: marshalJson(t, "string"), + }, + "value": { + AttributeType: marshalJson(t, "string"), + }, + }, + }, + }, + }, + }, + } + + tcs := map[string]struct { + plan Plan + output string + }{ + "simple_import": { + plan: Plan{ + ResourceChanges: []jsonplan.ResourceChange{ + { + Address: "test_resource.resource", + Mode: "managed", + Type: "test_resource", + Name: "resource", + ProviderName: "test", + Change: jsonplan.Change{ + Actions: []string{"no-op"}, + Before: marshalJson(t, map[string]interface{}{ + "id": "1D5F5E9E-F2E5-401B-9ED5-692A215AC67E", + "value": "Hello, World!", + }), + After: marshalJson(t, map[string]interface{}{ + "id": "1D5F5E9E-F2E5-401B-9ED5-692A215AC67E", + "value": "Hello, World!", + }), + Importing: &jsonplan.Importing{ + ID: "1D5F5E9E-F2E5-401B-9ED5-692A215AC67E", + }, + }, + }, + }, + }, + output: ` +OpenTofu will perform the following actions: + + # test_resource.resource will be imported + resource "test_resource" "resource" { + id = "1D5F5E9E-F2E5-401B-9ED5-692A215AC67E" + value = "Hello, World!" + } + +Plan: 1 to import, 0 to add, 0 to change, 0 to destroy. +`, + }, + "simple_import_with_generated_config": { + plan: Plan{ + ResourceChanges: []jsonplan.ResourceChange{ + { + Address: "test_resource.resource", + Mode: "managed", + Type: "test_resource", + Name: "resource", + ProviderName: "test", + Change: jsonplan.Change{ + Actions: []string{"no-op"}, + Before: marshalJson(t, map[string]interface{}{ + "id": "1D5F5E9E-F2E5-401B-9ED5-692A215AC67E", + "value": "Hello, World!", + }), + After: marshalJson(t, map[string]interface{}{ + "id": "1D5F5E9E-F2E5-401B-9ED5-692A215AC67E", + "value": "Hello, World!", + }), + Importing: &jsonplan.Importing{ + ID: "1D5F5E9E-F2E5-401B-9ED5-692A215AC67E", + }, + GeneratedConfig: `resource "test_resource" "resource" { + id = "1D5F5E9E-F2E5-401B-9ED5-692A215AC67E" + value = "Hello, World!" +}`, + }, + }, + }, + }, + output: ` +OpenTofu will perform the following actions: + + # test_resource.resource will be imported + # (config will be generated) + resource "test_resource" "resource" { + id = "1D5F5E9E-F2E5-401B-9ED5-692A215AC67E" + value = "Hello, World!" + } + +Plan: 1 to import, 0 to add, 0 to change, 0 to destroy. +`, + }, + "import_and_move": { + plan: Plan{ + ResourceChanges: []jsonplan.ResourceChange{ + { + Address: "test_resource.after", + PreviousAddress: "test_resource.before", + Mode: "managed", + Type: "test_resource", + Name: "after", + ProviderName: "test", + Change: jsonplan.Change{ + Actions: []string{"no-op"}, + Before: marshalJson(t, map[string]interface{}{ + "id": "1D5F5E9E-F2E5-401B-9ED5-692A215AC67E", + "value": "Hello, World!", + }), + After: marshalJson(t, map[string]interface{}{ + "id": "1D5F5E9E-F2E5-401B-9ED5-692A215AC67E", + "value": "Hello, World!", + }), + Importing: &jsonplan.Importing{ + ID: "1D5F5E9E-F2E5-401B-9ED5-692A215AC67E", + }, + }, + }, + }, + }, + output: ` +OpenTofu will perform the following actions: + + # test_resource.before has moved to test_resource.after + # (imported from "1D5F5E9E-F2E5-401B-9ED5-692A215AC67E") + resource "test_resource" "after" { + id = "1D5F5E9E-F2E5-401B-9ED5-692A215AC67E" + value = "Hello, World!" + } + +Plan: 1 to import, 0 to add, 0 to change, 0 to destroy. +`, + }, + "import_move_and_update": { + plan: Plan{ + ResourceChanges: []jsonplan.ResourceChange{ + { + Address: "test_resource.after", + PreviousAddress: "test_resource.before", + Mode: "managed", + Type: "test_resource", + Name: "after", + ProviderName: "test", + Change: jsonplan.Change{ + Actions: []string{"update"}, + Before: marshalJson(t, map[string]interface{}{ + "id": "1D5F5E9E-F2E5-401B-9ED5-692A215AC67E", + "value": "Hello, World!", + }), + After: marshalJson(t, map[string]interface{}{ + "id": "1D5F5E9E-F2E5-401B-9ED5-692A215AC67E", + "value": "Hello, Universe!", + }), + Importing: &jsonplan.Importing{ + ID: "1D5F5E9E-F2E5-401B-9ED5-692A215AC67E", + }, + }, + }, + }, + }, + output: ` +OpenTofu used the selected providers to generate the following execution +plan. Resource actions are indicated with the following symbols: + ~ update in-place + +OpenTofu will perform the following actions: + + # test_resource.after will be updated in-place + # (moved from test_resource.before) + # (imported from "1D5F5E9E-F2E5-401B-9ED5-692A215AC67E") + ~ resource "test_resource" "after" { + id = "1D5F5E9E-F2E5-401B-9ED5-692A215AC67E" + ~ value = "Hello, World!" -> "Hello, Universe!" + } + +Plan: 1 to import, 0 to add, 1 to change, 0 to destroy. +`, + }, + "import_and_update": { + plan: Plan{ + ResourceChanges: []jsonplan.ResourceChange{ + { + Address: "test_resource.resource", + Mode: "managed", + Type: "test_resource", + Name: "resource", + ProviderName: "test", + Change: jsonplan.Change{ + Actions: []string{"update"}, + Before: marshalJson(t, map[string]interface{}{ + "id": "1D5F5E9E-F2E5-401B-9ED5-692A215AC67E", + "value": "Hello, World!", + }), + After: marshalJson(t, map[string]interface{}{ + "id": "1D5F5E9E-F2E5-401B-9ED5-692A215AC67E", + "value": "Hello, Universe!", + }), + Importing: &jsonplan.Importing{ + ID: "1D5F5E9E-F2E5-401B-9ED5-692A215AC67E", + }, + }, + }, + }, + }, + output: ` +OpenTofu used the selected providers to generate the following execution +plan. Resource actions are indicated with the following symbols: + ~ update in-place + +OpenTofu will perform the following actions: + + # test_resource.resource will be updated in-place + # (imported from "1D5F5E9E-F2E5-401B-9ED5-692A215AC67E") + ~ resource "test_resource" "resource" { + id = "1D5F5E9E-F2E5-401B-9ED5-692A215AC67E" + ~ value = "Hello, World!" -> "Hello, Universe!" + } + +Plan: 1 to import, 0 to add, 1 to change, 0 to destroy. +`, + }, + "import_and_update_with_no_id": { + plan: Plan{ + ResourceChanges: []jsonplan.ResourceChange{ + { + Address: "test_resource.resource", + Mode: "managed", + Type: "test_resource", + Name: "resource", + ProviderName: "test", + Change: jsonplan.Change{ + Actions: []string{"update"}, + Before: marshalJson(t, map[string]interface{}{ + "id": "1D5F5E9E-F2E5-401B-9ED5-692A215AC67E", + "value": "Hello, World!", + }), + After: marshalJson(t, map[string]interface{}{ + "id": "1D5F5E9E-F2E5-401B-9ED5-692A215AC67E", + "value": "Hello, Universe!", + }), + Importing: &jsonplan.Importing{}, + }, + }, + }, + }, + output: ` +OpenTofu used the selected providers to generate the following execution +plan. Resource actions are indicated with the following symbols: + ~ update in-place + +OpenTofu will perform the following actions: + + # test_resource.resource will be updated in-place + # (will be imported first) + ~ resource "test_resource" "resource" { + id = "1D5F5E9E-F2E5-401B-9ED5-692A215AC67E" + ~ value = "Hello, World!" -> "Hello, Universe!" + } + +Plan: 1 to import, 0 to add, 1 to change, 0 to destroy. +`, + }, + "import_and_replace": { + plan: Plan{ + ResourceChanges: []jsonplan.ResourceChange{ + { + Address: "test_resource.resource", + Mode: "managed", + Type: "test_resource", + Name: "resource", + ProviderName: "test", + Change: jsonplan.Change{ + Actions: []string{"create", "delete"}, + Before: marshalJson(t, map[string]interface{}{ + "id": "1D5F5E9E-F2E5-401B-9ED5-692A215AC67E", + "value": "Hello, World!", + }), + After: marshalJson(t, map[string]interface{}{ + "id": "9794FB1F-7260-442F-830C-F2D450E90CE3", + "value": "Hello, World!", + }), + ReplacePaths: marshalJson(t, [][]string{{"id"}}), + Importing: &jsonplan.Importing{ + ID: "1D5F5E9E-F2E5-401B-9ED5-692A215AC67E", + }, + }, + ActionReason: "", + }, + }, + }, + output: ` +OpenTofu used the selected providers to generate the following execution +plan. Resource actions are indicated with the following symbols: ++/- create replacement and then destroy + +OpenTofu will perform the following actions: + + # test_resource.resource must be replaced + # (imported from "1D5F5E9E-F2E5-401B-9ED5-692A215AC67E") + # Warning: this will destroy the imported resource ++/- resource "test_resource" "resource" { + ~ id = "1D5F5E9E-F2E5-401B-9ED5-692A215AC67E" -> "9794FB1F-7260-442F-830C-F2D450E90CE3" # forces replacement + value = "Hello, World!" + } + +Plan: 1 to import, 1 to add, 0 to change, 1 to destroy. +`, + }, + } + for name, tc := range tcs { + t.Run(name, func(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + + plan := tc.plan + plan.PlanFormatVersion = jsonplan.FormatVersion + plan.ProviderFormatVersion = jsonprovider.FormatVersion + plan.ProviderSchemas = schemas + + renderer := Renderer{ + Colorize: color, + Streams: streams, + } + plan.renderHuman(renderer, plans.NormalMode) + + got := done(t).Stdout() + want := tc.output + if diff := cmp.Diff(want, got); len(diff) > 0 { + t.Errorf("unexpected output\ngot:\n%s\nwant:\n%s\ndiff:\n%s", got, want, diff) + } + }) + } +} + +func TestResourceChange_primitiveTypes(t *testing.T) { + testCases := map[string]testCase{ + "creation": { + Action: plans.Create, + Mode: addrs.ManagedResourceMode, + Before: cty.NullVal(cty.EmptyObject), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be created + + resource "test_instance" "example" { + + id = (known after apply) + }`, + }, + "creation (null string)": { + Action: plans.Create, + Mode: addrs.ManagedResourceMode, + Before: cty.NullVal(cty.EmptyObject), + After: cty.ObjectVal(map[string]cty.Value{ + "string": cty.StringVal("null"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "string": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be created + + resource "test_instance" "example" { + + string = "null" + }`, + }, + "creation (null string with extra whitespace)": { + Action: plans.Create, + Mode: addrs.ManagedResourceMode, + Before: cty.NullVal(cty.EmptyObject), + After: cty.ObjectVal(map[string]cty.Value{ + "string": cty.StringVal("null "), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "string": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be created + + resource "test_instance" "example" { + + string = "null " + }`, + }, + "creation (object with quoted keys)": { + Action: plans.Create, + Mode: addrs.ManagedResourceMode, + Before: cty.NullVal(cty.EmptyObject), + After: cty.ObjectVal(map[string]cty.Value{ + "object": cty.ObjectVal(map[string]cty.Value{ + "unquoted": cty.StringVal("value"), + "quoted:key": cty.StringVal("some-value"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "object": {Type: cty.Object(map[string]cty.Type{ + "unquoted": cty.String, + "quoted:key": cty.String, + }), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be created + + resource "test_instance" "example" { + + object = { + + "quoted:key" = "some-value" + + unquoted = "value" + } + }`, + }, + "deletion": { + Action: plans.Delete, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + }), + After: cty.NullVal(cty.EmptyObject), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be destroyed + - resource "test_instance" "example" { + - id = "i-02ae66f368e8518a9" -> null + }`, + }, + "deletion of deposed object": { + Action: plans.Delete, + Mode: addrs.ManagedResourceMode, + DeposedKey: states.DeposedKey("byebye"), + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + }), + After: cty.NullVal(cty.EmptyObject), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example (deposed object byebye) will be destroyed + # (left over from a partially-failed replacement of this instance) + - resource "test_instance" "example" { + - id = "i-02ae66f368e8518a9" -> null + }`, + }, + "deletion (empty string)": { + Action: plans.Delete, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "intentionally_long": cty.StringVal(""), + }), + After: cty.NullVal(cty.EmptyObject), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "intentionally_long": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be destroyed + - resource "test_instance" "example" { + - id = "i-02ae66f368e8518a9" -> null + }`, + }, + "forget": { + Action: plans.Forget, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + }), + After: cty.NullVal(cty.EmptyObject), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be removed from the OpenTofu state but will not be destroyed + . resource "test_instance" "example" { + id = "i-02ae66f368e8518a9" +}`, + }, + "forget a deposed object": { + Action: plans.Forget, + Mode: addrs.ManagedResourceMode, + DeposedKey: states.DeposedKey("byebye"), + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + }), + After: cty.NullVal(cty.EmptyObject), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example (deposed object byebye) will be removed from the OpenTofu state but will not be destroyed + # (left over from a partially-failed replacement of this instance) + . resource "test_instance" "example" { + id = "i-02ae66f368e8518a9" +}`, + }, + "string in-place update": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + id = "i-02ae66f368e8518a9" + }`, + }, + "update with quoted key": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "saml:aud": cty.StringVal("https://example.com/saml"), + "zeta": cty.StringVal("alpha"), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "saml:aud": cty.StringVal("https://saml.example.com"), + "zeta": cty.StringVal("alpha"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "saml:aud": {Type: cty.String, Optional: true}, + "zeta": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + id = "i-02ae66f368e8518a9" + ~ "saml:aud" = "https://example.com/saml" -> "https://saml.example.com" + # (1 unchanged attribute hidden) + }`, + }, + "string force-new update": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(cty.Path{ + cty.GetAttrStep{Name: "ami"}, + }), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" # forces replacement + id = "i-02ae66f368e8518a9" + }`, + }, + "string in-place update (null values)": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "unchanged": cty.NullVal(cty.String), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "unchanged": cty.NullVal(cty.String), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "unchanged": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + id = "i-02ae66f368e8518a9" + }`, + }, + "in-place update of multi-line string field": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "more_lines": cty.StringVal(`original +long +multi-line +string +field`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "more_lines": cty.StringVal(`original +extremely long +multi-line +string +field`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "more_lines": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ more_lines = <<-EOT + original + - long + + extremely long + multi-line + string + field + EOT + }`, + }, + "addition of multi-line string field": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "more_lines": cty.NullVal(cty.String), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "more_lines": cty.StringVal(`original +new line`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "more_lines": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + + more_lines = <<-EOT + original + new line + EOT + }`, + }, + "force-new update of multi-line string field": { + Action: plans.DeleteThenCreate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "more_lines": cty.StringVal(`original`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "more_lines": cty.StringVal(`original +new line`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "more_lines": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(cty.Path{ + cty.GetAttrStep{Name: "more_lines"}, + }), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ more_lines = <<-EOT # forces replacement + original + + new line + EOT + }`, + }, + + // Sensitive + + "creation with sensitive field": { + Action: plans.Create, + Mode: addrs.ManagedResourceMode, + Before: cty.NullVal(cty.EmptyObject), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "password": cty.StringVal("top-secret"), + "conn_info": cty.ObjectVal(map[string]cty.Value{ + "user": cty.StringVal("not-secret"), + "password": cty.StringVal("top-secret"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "password": {Type: cty.String, Optional: true, Sensitive: true}, + "conn_info": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingSingle, + Attributes: map[string]*configschema.Attribute{ + "user": {Type: cty.String, Optional: true}, + "password": {Type: cty.String, Optional: true, Sensitive: true}, + }, + }, + }, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be created + + resource "test_instance" "example" { + + conn_info = { + + password = (sensitive value) + + user = "not-secret" + } + + id = (known after apply) + + password = (sensitive value) + }`, + }, + "update with equal sensitive field": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("blah"), + "str": cty.StringVal("before"), + "password": cty.StringVal("top-secret"), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "str": cty.StringVal("after"), + "password": cty.StringVal("top-secret"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "str": {Type: cty.String, Optional: true}, + "password": {Type: cty.String, Optional: true, Sensitive: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "blah" -> (known after apply) + ~ str = "before" -> "after" + # (1 unchanged attribute hidden) + }`, + }, + + // tainted objects + "replace tainted resource": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseTainted, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-AFTER"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(cty.Path{ + cty.GetAttrStep{Name: "ami"}, + }), + ExpectedOutput: ` # test_instance.example is tainted, so it must be replaced +-/+ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" # forces replacement + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + }`, + }, + "force replacement with empty before value": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("name"), + "forced": cty.NullVal(cty.String), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("name"), + "forced": cty.StringVal("example"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "name": {Type: cty.String, Optional: true}, + "forced": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(cty.Path{ + cty.GetAttrStep{Name: "forced"}, + }), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + + forced = "example" # forces replacement + name = "name" + }`, + }, + "force replacement with empty before value legacy": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("name"), + "forced": cty.StringVal(""), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("name"), + "forced": cty.StringVal("example"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "name": {Type: cty.String, Optional: true}, + "forced": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(cty.Path{ + cty.GetAttrStep{Name: "forced"}, + }), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + + forced = "example" # forces replacement + name = "name" + }`, + }, + "read during apply because of unknown configuration": { + Action: plans.Read, + ActionReason: plans.ResourceInstanceReadBecauseConfigUnknown, + Mode: addrs.DataResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("name"), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("name"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "name": {Type: cty.String, Optional: true}, + }, + }, + ExpectedOutput: ` # data.test_instance.example will be read during apply + # (config refers to values not yet known) + <= data "test_instance" "example" { + name = "name" + }`, + }, + "read during apply because of pending changes to upstream dependency": { + Action: plans.Read, + ActionReason: plans.ResourceInstanceReadBecauseDependencyPending, + Mode: addrs.DataResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("name"), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("name"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "name": {Type: cty.String, Optional: true}, + }, + }, + ExpectedOutput: ` # data.test_instance.example will be read during apply + # (depends on a resource or a module with changes pending) + <= data "test_instance" "example" { + name = "name" + }`, + }, + "read during apply for unspecified reason": { + Action: plans.Read, + Mode: addrs.DataResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("name"), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("name"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "name": {Type: cty.String, Optional: true}, + }, + }, + ExpectedOutput: ` # data.test_instance.example will be read during apply + <= data "test_instance" "example" { + name = "name" + }`, + }, + "show all identifying attributes even if unchanged": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "bar": cty.StringVal("bar"), + "foo": cty.StringVal("foo"), + "name": cty.StringVal("alice"), + "tags": cty.MapVal(map[string]cty.Value{ + "name": cty.StringVal("bob"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "bar": cty.StringVal("bar"), + "foo": cty.StringVal("foo"), + "name": cty.StringVal("alice"), + "tags": cty.MapVal(map[string]cty.Value{ + "name": cty.StringVal("bob"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "bar": {Type: cty.String, Optional: true}, + "foo": {Type: cty.String, Optional: true}, + "name": {Type: cty.String, Optional: true}, + "tags": {Type: cty.Map(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + id = "i-02ae66f368e8518a9" + name = "alice" + tags = { + "name" = "bob" + } + # (2 unchanged attributes hidden) + }`, + }, + } + + runTestCases(t, testCases) +} + +func TestResourceChange_JSON(t *testing.T) { + testCases := map[string]testCase{ + "creation": { + Action: plans.Create, + Mode: addrs.ManagedResourceMode, + Before: cty.NullVal(cty.EmptyObject), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`{ + "str": "value", + "list":["a","b", 234, true], + "obj": {"key": "val"} + }`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be created + + resource "test_instance" "example" { + + id = (known after apply) + + json_field = jsonencode( + { + + list = [ + + "a", + + "b", + + 234, + + true, + ] + + obj = { + + key = "val" + } + + str = "value" + } + ) + }`, + }, + "in-place update of object": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`{"aaa": "value","ccc": 5}`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`{"aaa": "value", "bbb": "new_value"}`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( + ~ { + + bbb = "new_value" + - ccc = 5 + # (1 unchanged attribute hidden) + } + ) + }`, + }, + "in-place update of object with quoted keys": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`{"aaa": "value", "c:c": "old_value"}`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`{"aaa": "value", "b:bb": "new_value"}`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( + ~ { + + "b:bb" = "new_value" + - "c:c" = "old_value" + # (1 unchanged attribute hidden) + } + ) + }`, + }, + "in-place update (from empty tuple)": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`{"aaa": []}`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`{"aaa": ["value"]}`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( + ~ { + ~ aaa = [ + + "value", + ] + } + ) + }`, + }, + "in-place update (to empty tuple)": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`{"aaa": ["value"]}`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`{"aaa": []}`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( + ~ { + ~ aaa = [ + - "value", + ] + } + ) + }`, + }, + "in-place update (tuple of different types)": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`{"aaa": [42, {"foo":"bar"}, "value"]}`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`{"aaa": [42, {"foo":"baz"}, "value"]}`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( + ~ { + ~ aaa = [ + 42, + ~ { + ~ foo = "bar" -> "baz" + }, + "value", + ] + } + ) + }`, + }, + "force-new update": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`{"aaa": "value"}`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`{"aaa": "value", "bbb": "new_value"}`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(cty.Path{ + cty.GetAttrStep{Name: "json_field"}, + }), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( + ~ { + + bbb = "new_value" + # (1 unchanged attribute hidden) + } # forces replacement + ) + }`, + }, + "in-place update (whitespace change)": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`{"aaa": "value", "bbb": "another"}`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`{"aaa":"value", + "bbb":"another"}`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( # whitespace changes + { + aaa = "value" + bbb = "another" + } + ) + }`, + }, + "force-new update (whitespace change)": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`{"aaa": "value", "bbb": "another"}`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`{"aaa":"value", + "bbb":"another"}`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(cty.Path{ + cty.GetAttrStep{Name: "json_field"}, + }), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( # whitespace changes force replacement + { + aaa = "value" + bbb = "another" + } + ) + }`, + }, + "creation (empty)": { + Action: plans.Create, + Mode: addrs.ManagedResourceMode, + Before: cty.NullVal(cty.EmptyObject), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`{}`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be created + + resource "test_instance" "example" { + + id = (known after apply) + + json_field = jsonencode({}) + }`, + }, + "JSON list item removal": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`["first","second","third"]`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`["first","second"]`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( + ~ [ + # (1 unchanged element hidden) + "second", + - "third", + ] + ) + }`, + }, + "JSON list item addition": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`["first","second"]`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`["first","second","third"]`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( + ~ [ + # (1 unchanged element hidden) + "second", + + "third", + ] + ) + }`, + }, + "JSON list object addition": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`{"first":"111"}`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`{"first":"111","second":"222"}`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( + ~ { + + second = "222" + # (1 unchanged attribute hidden) + } + ) + }`, + }, + "JSON object with nested list": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`{ + "Statement": ["first"] + }`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`{ + "Statement": ["first", "second"] + }`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( + ~ { + ~ Statement = [ + "first", + + "second", + ] + } + ) + }`, + }, + "JSON list of objects - adding item": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`[{"one": "111"}]`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`[{"one": "111"}, {"two": "222"}]`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( + ~ [ + { + one = "111" + }, + + { + + two = "222" + }, + ] + ) + }`, + }, + "JSON list of objects - removing item": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`[{"one": "111"}, {"two": "222"}, {"three": "333"}]`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`[{"one": "111"}, {"three": "333"}]`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( + ~ [ + { + one = "111" + }, + - { + - two = "222" + }, + { + three = "333" + }, + ] + ) + }`, + }, + "JSON object with list of objects": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`{"parent":[{"one": "111"}]}`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`{"parent":[{"one": "111"}, {"two": "222"}]}`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( + ~ { + ~ parent = [ + { + one = "111" + }, + + { + + two = "222" + }, + ] + } + ) + }`, + }, + "JSON object double nested lists": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`{"parent":[{"another_list": ["111"]}]}`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`{"parent":[{"another_list": ["111", "222"]}]}`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( + ~ { + ~ parent = [ + ~ { + ~ another_list = [ + "111", + + "222", + ] + }, + ] + } + ) + }`, + }, + "in-place update from object to tuple": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "json_field": cty.StringVal(`{"aaa": [42, {"foo":"bar"}, "value"]}`), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "json_field": cty.StringVal(`["aaa", 42, "something"]`), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "json_field": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ json_field = jsonencode( + ~ { + - aaa = [ + - 42, + - { + - foo = "bar" + }, + - "value", + ] + } -> [ + + "aaa", + + 42, + + "something", + ] + ) + }`, + }, + } + runTestCases(t, testCases) +} + +func TestResourceChange_listObject(t *testing.T) { + testCases := map[string]testCase{ + // https://github.com/hashicorp/terraform/issues/30641 + "updating non-identifying attribute": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "accounts": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("1"), + "name": cty.StringVal("production"), + "status": cty.StringVal("ACTIVE"), + }), + cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("2"), + "name": cty.StringVal("staging"), + "status": cty.StringVal("ACTIVE"), + }), + cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("3"), + "name": cty.StringVal("disaster-recovery"), + "status": cty.StringVal("ACTIVE"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "accounts": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("1"), + "name": cty.StringVal("production"), + "status": cty.StringVal("ACTIVE"), + }), + cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("2"), + "name": cty.StringVal("staging"), + "status": cty.StringVal("EXPLODED"), + }), + cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("3"), + "name": cty.StringVal("disaster-recovery"), + "status": cty.StringVal("ACTIVE"), + }), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "accounts": { + Type: cty.List(cty.Object(map[string]cty.Type{ + "id": cty.String, + "name": cty.String, + "status": cty.String, + })), + }, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ accounts = [ + { + id = "1" + name = "production" + status = "ACTIVE" + }, + ~ { + id = "2" + name = "staging" + ~ status = "ACTIVE" -> "EXPLODED" + }, + { + id = "3" + name = "disaster-recovery" + status = "ACTIVE" + }, + ] + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + }`, + }, + } + runTestCases(t, testCases) +} + +func TestResourceChange_primitiveList(t *testing.T) { + testCases := map[string]testCase{ + "in-place update - creation": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.NullVal(cty.List(cty.String)), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("new-element"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + + list_field = [ + + "new-element", + ] + # (1 unchanged attribute hidden) + }`, + }, + "in-place update - first addition": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListValEmpty(cty.String), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("new-element"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ list_field = [ + + "new-element", + ] + # (1 unchanged attribute hidden) + }`, + }, + "in-place update - insertion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("bbbb"), + cty.StringVal("dddd"), + cty.StringVal("eeee"), + cty.StringVal("ffff"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("bbbb"), + cty.StringVal("cccc"), + cty.StringVal("dddd"), + cty.StringVal("eeee"), + cty.StringVal("ffff"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ list_field = [ + # (1 unchanged element hidden) + "bbbb", + + "cccc", + "dddd", + # (2 unchanged elements hidden) + ] + # (1 unchanged attribute hidden) + }`, + }, + "force-new update - insertion": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("cccc"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("bbbb"), + cty.StringVal("cccc"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(cty.Path{ + cty.GetAttrStep{Name: "list_field"}, + }), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ list_field = [ # forces replacement + "aaaa", + + "bbbb", + "cccc", + ] + # (1 unchanged attribute hidden) + }`, + }, + "in-place update - deletion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("bbbb"), + cty.StringVal("cccc"), + cty.StringVal("dddd"), + cty.StringVal("eeee"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("bbbb"), + cty.StringVal("dddd"), + cty.StringVal("eeee"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ list_field = [ + - "aaaa", + "bbbb", + - "cccc", + "dddd", + # (1 unchanged element hidden) + ] + # (1 unchanged attribute hidden) + }`, + }, + "creation - empty list": { + Action: plans.Create, + Mode: addrs.ManagedResourceMode, + Before: cty.NullVal(cty.EmptyObject), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListValEmpty(cty.String), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be created + + resource "test_instance" "example" { + + ami = "ami-STATIC" + + id = (known after apply) + + list_field = [] + }`, + }, + "in-place update - full to empty": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("bbbb"), + cty.StringVal("cccc"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListValEmpty(cty.String), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ list_field = [ + - "aaaa", + - "bbbb", + - "cccc", + ] + # (1 unchanged attribute hidden) + }`, + }, + "in-place update - null to empty": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.NullVal(cty.List(cty.String)), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListValEmpty(cty.String), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + + list_field = [] + # (1 unchanged attribute hidden) + }`, + }, + "update to unknown element": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("bbbb"), + cty.StringVal("cccc"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.UnknownVal(cty.String), + cty.StringVal("cccc"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ list_field = [ + "aaaa", + - "bbbb", + + (known after apply), + "cccc", + ] + # (1 unchanged attribute hidden) + }`, + }, + "update - two new unknown elements": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("bbbb"), + cty.StringVal("cccc"), + cty.StringVal("dddd"), + cty.StringVal("eeee"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.UnknownVal(cty.String), + cty.UnknownVal(cty.String), + cty.StringVal("cccc"), + cty.StringVal("dddd"), + cty.StringVal("eeee"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ list_field = [ + "aaaa", + - "bbbb", + + (known after apply), + + (known after apply), + "cccc", + # (2 unchanged elements hidden) + ] + # (1 unchanged attribute hidden) + }`, + }, + } + runTestCases(t, testCases) +} + +func TestResourceChange_primitiveTuple(t *testing.T) { + testCases := map[string]testCase{ + "in-place update": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "tuple_field": cty.TupleVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("bbbb"), + cty.StringVal("dddd"), + cty.StringVal("eeee"), + cty.StringVal("ffff"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "tuple_field": cty.TupleVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("bbbb"), + cty.StringVal("cccc"), + cty.StringVal("eeee"), + cty.StringVal("ffff"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Required: true}, + "tuple_field": {Type: cty.Tuple([]cty.Type{cty.String, cty.String, cty.String, cty.String, cty.String}), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + id = "i-02ae66f368e8518a9" + ~ tuple_field = [ + # (1 unchanged element hidden) + "bbbb", + ~ "dddd" -> "cccc", + "eeee", + # (1 unchanged element hidden) + ] + }`, + }, + } + runTestCases(t, testCases) +} + +func TestResourceChange_primitiveSet(t *testing.T) { + testCases := map[string]testCase{ + "in-place update - creation": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.NullVal(cty.Set(cty.String)), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetVal([]cty.Value{ + cty.StringVal("new-element"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "set_field": {Type: cty.Set(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + + set_field = [ + + "new-element", + ] + # (1 unchanged attribute hidden) + }`, + }, + "in-place update - first insertion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetValEmpty(cty.String), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetVal([]cty.Value{ + cty.StringVal("new-element"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "set_field": {Type: cty.Set(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ set_field = [ + + "new-element", + ] + # (1 unchanged attribute hidden) + }`, + }, + "in-place update - insertion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("cccc"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("bbbb"), + cty.StringVal("cccc"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "set_field": {Type: cty.Set(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ set_field = [ + + "bbbb", + # (2 unchanged elements hidden) + ] + # (1 unchanged attribute hidden) + }`, + }, + "force-new update - insertion": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("cccc"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("bbbb"), + cty.StringVal("cccc"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "set_field": {Type: cty.Set(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(cty.Path{ + cty.GetAttrStep{Name: "set_field"}, + }), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ set_field = [ # forces replacement + + "bbbb", + # (2 unchanged elements hidden) + ] + # (1 unchanged attribute hidden) + }`, + }, + "in-place update - deletion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("bbbb"), + cty.StringVal("cccc"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetVal([]cty.Value{ + cty.StringVal("bbbb"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "set_field": {Type: cty.Set(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ set_field = [ + - "aaaa", + - "cccc", + # (1 unchanged element hidden) + ] + # (1 unchanged attribute hidden) + }`, + }, + "creation - empty set": { + Action: plans.Create, + Mode: addrs.ManagedResourceMode, + Before: cty.NullVal(cty.EmptyObject), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetValEmpty(cty.String), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "set_field": {Type: cty.Set(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be created + + resource "test_instance" "example" { + + ami = "ami-STATIC" + + id = (known after apply) + + set_field = [] + }`, + }, + "in-place update - full to empty set": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("bbbb"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetValEmpty(cty.String), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "set_field": {Type: cty.Set(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ set_field = [ + - "aaaa", + - "bbbb", + ] + # (1 unchanged attribute hidden) + }`, + }, + "in-place update - null to empty set": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.NullVal(cty.Set(cty.String)), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetValEmpty(cty.String), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "set_field": {Type: cty.Set(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + + set_field = [] + # (1 unchanged attribute hidden) + }`, + }, + "in-place update to unknown": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("bbbb"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.UnknownVal(cty.Set(cty.String)), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "set_field": {Type: cty.Set(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ set_field = [ + - "aaaa", + - "bbbb", + ] -> (known after apply) + # (1 unchanged attribute hidden) + }`, + }, + "in-place update to unknown element": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.StringVal("bbbb"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "set_field": cty.SetVal([]cty.Value{ + cty.StringVal("aaaa"), + cty.UnknownVal(cty.String), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "set_field": {Type: cty.Set(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ set_field = [ + - "bbbb", + + (known after apply), + # (1 unchanged element hidden) + ] + # (1 unchanged attribute hidden) + }`, + }, + } + runTestCases(t, testCases) +} + +func TestResourceChange_map(t *testing.T) { + testCases := map[string]testCase{ + "in-place update - creation": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "map_field": cty.NullVal(cty.Map(cty.String)), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "map_field": cty.MapVal(map[string]cty.Value{ + "new-key": cty.StringVal("new-element"), + "be:ep": cty.StringVal("boop"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "map_field": {Type: cty.Map(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + + map_field = { + + "be:ep" = "boop" + + "new-key" = "new-element" + } + # (1 unchanged attribute hidden) + }`, + }, + "in-place update - first insertion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "map_field": cty.MapValEmpty(cty.String), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "map_field": cty.MapVal(map[string]cty.Value{ + "new-key": cty.StringVal("new-element"), + "be:ep": cty.StringVal("boop"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "map_field": {Type: cty.Map(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ map_field = { + + "be:ep" = "boop" + + "new-key" = "new-element" + } + # (1 unchanged attribute hidden) + }`, + }, + "in-place update - insertion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "map_field": cty.MapVal(map[string]cty.Value{ + "a": cty.StringVal("aaaa"), + "c": cty.StringVal("cccc"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "map_field": cty.MapVal(map[string]cty.Value{ + "a": cty.StringVal("aaaa"), + "b": cty.StringVal("bbbb"), + "b:b": cty.StringVal("bbbb"), + "c": cty.StringVal("cccc"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "map_field": {Type: cty.Map(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ map_field = { + + "b" = "bbbb" + + "b:b" = "bbbb" + # (2 unchanged elements hidden) + } + # (1 unchanged attribute hidden) + }`, + }, + "force-new update - insertion": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "map_field": cty.MapVal(map[string]cty.Value{ + "a": cty.StringVal("aaaa"), + "c": cty.StringVal("cccc"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "map_field": cty.MapVal(map[string]cty.Value{ + "a": cty.StringVal("aaaa"), + "b": cty.StringVal("bbbb"), + "c": cty.StringVal("cccc"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "map_field": {Type: cty.Map(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(cty.Path{ + cty.GetAttrStep{Name: "map_field"}, + }), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ map_field = { # forces replacement + + "b" = "bbbb" + # (2 unchanged elements hidden) + } + # (1 unchanged attribute hidden) + }`, + }, + "in-place update - deletion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "map_field": cty.MapVal(map[string]cty.Value{ + "a": cty.StringVal("aaaa"), + "b": cty.StringVal("bbbb"), + "c": cty.StringVal("cccc"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "map_field": cty.MapVal(map[string]cty.Value{ + "b": cty.StringVal("bbbb"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "map_field": {Type: cty.Map(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ map_field = { + - "a" = "aaaa" -> null + - "c" = "cccc" -> null + # (1 unchanged element hidden) + } + # (1 unchanged attribute hidden) + }`, + }, + "creation - empty": { + Action: plans.Create, + Mode: addrs.ManagedResourceMode, + Before: cty.NullVal(cty.EmptyObject), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "map_field": cty.MapValEmpty(cty.String), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "map_field": {Type: cty.Map(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be created + + resource "test_instance" "example" { + + ami = "ami-STATIC" + + id = (known after apply) + + map_field = {} + }`, + }, + "update to unknown element": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-STATIC"), + "map_field": cty.MapVal(map[string]cty.Value{ + "a": cty.StringVal("aaaa"), + "b": cty.StringVal("bbbb"), + "c": cty.StringVal("cccc"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("ami-STATIC"), + "map_field": cty.MapVal(map[string]cty.Value{ + "a": cty.StringVal("aaaa"), + "b": cty.UnknownVal(cty.String), + "c": cty.StringVal("cccc"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "map_field": {Type: cty.Map(cty.String), Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ id = "i-02ae66f368e8518a9" -> (known after apply) + ~ map_field = { + ~ "b" = "bbbb" -> (known after apply) + # (2 unchanged elements hidden) + } + # (1 unchanged attribute hidden) + }`, + }, + } + runTestCases(t, testCases) +} + +func TestResourceChange_nestedList(t *testing.T) { + testCases := map[string]testCase{ + "in-place update - equal": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "root_block_device": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "root_block_device": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingList), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + id = "i-02ae66f368e8518a9" + # (1 unchanged attribute hidden) + + # (1 unchanged block hidden) + }`, + }, + "in-place update - creation": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "root_block_device": cty.ListValEmpty(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + })), + "disks": cty.ListValEmpty(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.ListVal([]cty.Value{cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + })}), + "root_block_device": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.NullVal(cty.String), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingList), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = [ + + { + + mount_point = "/var/diska" + + size = "50GB" + }, + ] + id = "i-02ae66f368e8518a9" + + + root_block_device {} + }`, + }, + "in-place update - first insertion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "root_block_device": cty.ListValEmpty(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + })), + "disks": cty.ListValEmpty(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.NullVal(cty.String), + }), + }), + "root_block_device": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingList), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = [ + + { + + mount_point = "/var/diska" + }, + ] + id = "i-02ae66f368e8518a9" + + + root_block_device { + + volume_type = "gp2" + } + }`, + }, + "in-place update - insertion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.NullVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diskb"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.NullVal(cty.String), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diskb"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaPlus(configschema.NestingList), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = [ + ~ { + + size = "50GB" + # (1 unchanged attribute hidden) + }, + # (1 unchanged element hidden) + ] + id = "i-02ae66f368e8518a9" + + ~ root_block_device { + + new_field = "new_value" + # (1 unchanged attribute hidden) + } + }`, + }, + "force-new update (inside blocks)": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diskb"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("different"), + }), + }), + }), + RequiredReplace: cty.NewPathSet( + cty.Path{ + cty.GetAttrStep{Name: "root_block_device"}, + cty.IndexStep{Key: cty.NumberIntVal(0)}, + cty.GetAttrStep{Name: "volume_type"}, + }, + cty.Path{ + cty.GetAttrStep{Name: "disks"}, + cty.IndexStep{Key: cty.NumberIntVal(0)}, + cty.GetAttrStep{Name: "mount_point"}, + }, + ), + Schema: testSchema(configschema.NestingList), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = [ + ~ { + ~ mount_point = "/var/diska" -> "/var/diskb" # forces replacement + # (1 unchanged attribute hidden) + }, + ] + id = "i-02ae66f368e8518a9" + + ~ root_block_device { + ~ volume_type = "gp2" -> "different" # forces replacement + } + }`, + }, + "force-new update (whole block)": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diskb"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("different"), + }), + }), + }), + RequiredReplace: cty.NewPathSet( + cty.Path{cty.GetAttrStep{Name: "root_block_device"}}, + cty.Path{cty.GetAttrStep{Name: "disks"}}, + ), + Schema: testSchema(configschema.NestingList), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = [ # forces replacement + ~ { + ~ mount_point = "/var/diska" -> "/var/diskb" + # (1 unchanged attribute hidden) + }, + ] + id = "i-02ae66f368e8518a9" + + ~ root_block_device { # forces replacement + ~ volume_type = "gp2" -> "different" + } + }`, + }, + "in-place update - deletion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.ListValEmpty(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + "root_block_device": cty.ListValEmpty(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + })), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingList), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = [ + - { + - mount_point = "/var/diska" -> null + - size = "50GB" -> null + }, + ] + id = "i-02ae66f368e8518a9" + + - root_block_device { + - volume_type = "gp2" -> null + } + }`, + }, + "with dynamically-typed attribute": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "block": cty.EmptyTupleVal, + }), + After: cty.ObjectVal(map[string]cty.Value{ + "block": cty.TupleVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }), + cty.ObjectVal(map[string]cty.Value{ + "attr": cty.True, + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "block": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "attr": {Type: cty.DynamicPseudoType, Optional: true}, + }, + }, + Nesting: configschema.NestingList, + }, + }, + }, + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + + block { + + attr = "foo" + } + + block { + + attr = true + } + }`, + }, + "in-place sequence update - deletion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "list": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{"attr": cty.StringVal("x")}), + cty.ObjectVal(map[string]cty.Value{"attr": cty.StringVal("y")}), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "list": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{"attr": cty.StringVal("y")}), + cty.ObjectVal(map[string]cty.Value{"attr": cty.StringVal("z")}), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "list": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "attr": { + Type: cty.String, + Required: true, + }, + }, + }, + Nesting: configschema.NestingList, + }, + }, + }, + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ list { + ~ attr = "x" -> "y" + } + ~ list { + ~ attr = "y" -> "z" + } + }`, + }, + "in-place update - unknown": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.UnknownVal(cty.List(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + }))), + "root_block_device": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaPlus(configschema.NestingList), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = [ + - { + - mount_point = "/var/diska" -> null + - size = "50GB" -> null + }, + ] -> (known after apply) + id = "i-02ae66f368e8518a9" + + # (1 unchanged block hidden) + }`, + }, + "in-place update - modification": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diskb"), + "size": cty.StringVal("50GB"), + }), + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diskc"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diskb"), + "size": cty.StringVal("75GB"), + }), + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diskc"), + "size": cty.StringVal("25GB"), + }), + }), + "root_block_device": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaPlus(configschema.NestingList), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = [ + ~ { + ~ size = "50GB" -> "75GB" + # (1 unchanged attribute hidden) + }, + ~ { + ~ size = "50GB" -> "25GB" + # (1 unchanged attribute hidden) + }, + # (1 unchanged element hidden) + ] + id = "i-02ae66f368e8518a9" + + # (1 unchanged block hidden) + }`, + }, + } + runTestCases(t, testCases) +} + +func TestResourceChange_nestedSet(t *testing.T) { + testCases := map[string]testCase{ + "creation from null - sensitive set": { + Action: plans.Create, + Mode: addrs.ManagedResourceMode, + Before: cty.NullVal(cty.Object(map[string]cty.Type{ + "id": cty.String, + "ami": cty.String, + "disks": cty.Set(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + "root_block_device": cty.Set(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + })), + })), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.NullVal(cty.String), + }), + }), + "root_block_device": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + AfterValMarks: []cty.PathValueMarks{ + { + Path: cty.Path{cty.GetAttrStep{Name: "disks"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingSet), + ExpectedOutput: ` # test_instance.example will be created + + resource "test_instance" "example" { + + ami = "ami-AFTER" + + disks = (sensitive value) + + id = "i-02ae66f368e8518a9" + + + root_block_device { + + volume_type = "gp2" + } + }`, + }, + "in-place update - creation": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.SetValEmpty(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + "root_block_device": cty.SetValEmpty(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + })), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.NullVal(cty.String), + }), + }), + "root_block_device": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingSet), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = [ + + { + + mount_point = "/var/diska" + }, + ] + id = "i-02ae66f368e8518a9" + + + root_block_device { + + volume_type = "gp2" + } + }`, + }, + "in-place update - creation - sensitive set": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.SetValEmpty(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + "root_block_device": cty.SetValEmpty(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + })), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.NullVal(cty.String), + }), + }), + "root_block_device": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + AfterValMarks: []cty.PathValueMarks{ + { + Path: cty.Path{cty.GetAttrStep{Name: "disks"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingSet), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + # Warning: this attribute value will be marked as sensitive and will not + # display in UI output after applying this change. + ~ disks = (sensitive value) + id = "i-02ae66f368e8518a9" + + + root_block_device { + + volume_type = "gp2" + } + }`, + }, + "in-place update - marking set sensitive": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.SetValEmpty(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + })), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.SetValEmpty(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + })), + }), + AfterValMarks: []cty.PathValueMarks{ + { + Path: cty.Path{cty.GetAttrStep{Name: "disks"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingSet), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + # Warning: this attribute value will be marked as sensitive and will not + # display in UI output after applying this change. The value is unchanged. + ~ disks = (sensitive value) + id = "i-02ae66f368e8518a9" + }`, + }, + "in-place update - insertion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.NullVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diskb"), + "size": cty.StringVal("100GB"), + }), + }), + "root_block_device": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.NullVal(cty.String), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diskb"), + "size": cty.StringVal("100GB"), + }), + }), + "root_block_device": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaPlus(configschema.NestingSet), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = [ + - { + - mount_point = "/var/diska" -> null + }, + + { + + mount_point = "/var/diska" + + size = "50GB" + }, + # (1 unchanged element hidden) + ] + id = "i-02ae66f368e8518a9" + + - root_block_device { + - volume_type = "gp2" -> null + } + + root_block_device { + + new_field = "new_value" + + volume_type = "gp2" + } + }`, + }, + "force-new update (whole block)": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "root_block_device": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "root_block_device": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("different"), + }), + }), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diskb"), + "size": cty.StringVal("50GB"), + }), + }), + }), + RequiredReplace: cty.NewPathSet( + cty.Path{cty.GetAttrStep{Name: "root_block_device"}}, + cty.Path{cty.GetAttrStep{Name: "disks"}}, + ), + Schema: testSchema(configschema.NestingSet), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = [ + - { # forces replacement + - mount_point = "/var/diska" -> null + - size = "50GB" -> null + }, + + { # forces replacement + + mount_point = "/var/diskb" + + size = "50GB" + }, + ] + id = "i-02ae66f368e8518a9" + + - root_block_device { # forces replacement + - volume_type = "gp2" -> null + } + + root_block_device { # forces replacement + + volume_type = "different" + } + }`, + }, + "in-place update - deletion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "root_block_device": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "root_block_device": cty.SetValEmpty(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + "new_field": cty.String, + })), + "disks": cty.SetValEmpty(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaPlus(configschema.NestingSet), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = [ + - { + - mount_point = "/var/diska" -> null + - size = "50GB" -> null + }, + ] + id = "i-02ae66f368e8518a9" + + - root_block_device { + - new_field = "new_value" -> null + - volume_type = "gp2" -> null + } + }`, + }, + "in-place update - empty nested sets": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.NullVal(cty.Set(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + }))), + "root_block_device": cty.SetValEmpty(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + })), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.SetValEmpty(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + "root_block_device": cty.SetValEmpty(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + })), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingSet), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + + disks = [] + id = "i-02ae66f368e8518a9" + }`, + }, + "in-place update - null insertion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.NullVal(cty.Set(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + }))), + "root_block_device": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.NullVal(cty.String), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaPlus(configschema.NestingSet), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + + disks = [ + + { + + mount_point = "/var/diska" + + size = "50GB" + }, + ] + id = "i-02ae66f368e8518a9" + + - root_block_device { + - volume_type = "gp2" -> null + } + + root_block_device { + + new_field = "new_value" + + volume_type = "gp2" + } + }`, + }, + "in-place update - unknown": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.UnknownVal(cty.Set(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + }))), + "root_block_device": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaPlus(configschema.NestingSet), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = [ + - { + - mount_point = "/var/diska" -> null + - size = "50GB" -> null + }, + ] -> (known after apply) + id = "i-02ae66f368e8518a9" + + # (1 unchanged block hidden) + }`, + }, + } + runTestCases(t, testCases) +} + +func TestResourceChange_nestedMap(t *testing.T) { + testCases := map[string]testCase{ + "creation from null": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.NullVal(cty.String), + "ami": cty.NullVal(cty.String), + "disks": cty.NullVal(cty.Map(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + }))), + "root_block_device": cty.NullVal(cty.Map(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + }))), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.NullVal(cty.String), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + + ami = "ami-AFTER" + + disks = { + + "disk_a" = { + + mount_point = "/var/diska" + }, + } + + id = "i-02ae66f368e8518a9" + + + root_block_device "a" { + + volume_type = "gp2" + } + }`, + }, + "in-place update - creation": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapValEmpty(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + "root_block_device": cty.MapValEmpty(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + })), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.NullVal(cty.String), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = { + + "disk_a" = { + + mount_point = "/var/diska" + }, + } + id = "i-02ae66f368e8518a9" + + + root_block_device "a" { + + volume_type = "gp2" + } + }`, + }, + "in-place update - change attr": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.NullVal(cty.String), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.NullVal(cty.String), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaPlus(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = { + ~ "disk_a" = { + + size = "50GB" + # (1 unchanged attribute hidden) + }, + } + id = "i-02ae66f368e8518a9" + + ~ root_block_device "a" { + + new_field = "new_value" + # (1 unchanged attribute hidden) + } + }`, + }, + "in-place update - insertion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.NullVal(cty.String), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + "disk_2": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/disk2"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.NullVal(cty.String), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaPlus(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = { + + "disk_2" = { + + mount_point = "/var/disk2" + + size = "50GB" + }, + # (1 unchanged element hidden) + } + id = "i-02ae66f368e8518a9" + + + root_block_device "b" { + + new_field = "new_value" + + volume_type = "gp2" + } + + # (1 unchanged block hidden) + }`, + }, + "force-new update (whole block)": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("standard"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("100GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("different"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("standard"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(cty.Path{ + cty.GetAttrStep{Name: "root_block_device"}, + cty.IndexStep{Key: cty.StringVal("a")}, + }, + cty.Path{cty.GetAttrStep{Name: "disks"}}, + ), + Schema: testSchema(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = { + ~ "disk_a" = { # forces replacement + ~ size = "50GB" -> "100GB" + # (1 unchanged attribute hidden) + }, + } + id = "i-02ae66f368e8518a9" + + ~ root_block_device "a" { # forces replacement + ~ volume_type = "gp2" -> "different" + } + + # (1 unchanged block hidden) + }`, + }, + "in-place update - deletion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapValEmpty(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + "root_block_device": cty.MapValEmpty(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + "new_field": cty.String, + })), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaPlus(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = { + - "disk_a" = { + - mount_point = "/var/diska" -> null + - size = "50GB" -> null + }, + } + id = "i-02ae66f368e8518a9" + + - root_block_device "a" { + - new_field = "new_value" -> null + - volume_type = "gp2" -> null + } + }`, + }, + "in-place update - unknown": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.UnknownVal(cty.Map(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + }))), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaPlus(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = { + - "disk_a" = { + - mount_point = "/var/diska" -> null + - size = "50GB" -> null + }, + } -> (known after apply) + id = "i-02ae66f368e8518a9" + + # (1 unchanged block hidden) + }`, + }, + "in-place update - insertion sensitive": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapValEmpty(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + }), + AfterValMarks: []cty.PathValueMarks{ + { + Path: cty.Path{cty.GetAttrStep{Name: "disks"}, + cty.IndexStep{Key: cty.StringVal("disk_a")}, + cty.GetAttrStep{Name: "mount_point"}, + }, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaPlus(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = { + + "disk_a" = { + + mount_point = (sensitive value) + + size = "50GB" + }, + } + id = "i-02ae66f368e8518a9" + + # (1 unchanged block hidden) + }`, + }, + "in-place update - multiple unchanged blocks": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + id = "i-02ae66f368e8518a9" + # (1 unchanged attribute hidden) + + # (2 unchanged blocks hidden) + }`, + }, + "in-place update - multiple blocks first changed": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp3"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + id = "i-02ae66f368e8518a9" + # (1 unchanged attribute hidden) + + ~ root_block_device "b" { + ~ volume_type = "gp2" -> "gp3" + } + + # (1 unchanged block hidden) + }`, + }, + "in-place update - multiple blocks second changed": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp3"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + id = "i-02ae66f368e8518a9" + # (1 unchanged attribute hidden) + + ~ root_block_device "a" { + ~ volume_type = "gp2" -> "gp3" + } + + # (1 unchanged block hidden) + }`, + }, + "in-place update - multiple blocks changed": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp3"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp3"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + id = "i-02ae66f368e8518a9" + # (1 unchanged attribute hidden) + + ~ root_block_device "a" { + ~ volume_type = "gp2" -> "gp3" + } + ~ root_block_device "b" { + ~ volume_type = "gp2" -> "gp3" + } + }`, + }, + "in-place update - multiple different unchanged blocks": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + "leaf_block_device": cty.MapVal(map[string]cty.Value{ + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + "leaf_block_device": cty.MapVal(map[string]cty.Value{ + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaMultipleBlocks(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + id = "i-02ae66f368e8518a9" + # (1 unchanged attribute hidden) + + # (2 unchanged blocks hidden) + }`, + }, + "in-place update - multiple different blocks first changed": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + "leaf_block_device": cty.MapVal(map[string]cty.Value{ + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + "leaf_block_device": cty.MapVal(map[string]cty.Value{ + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp3"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaMultipleBlocks(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + id = "i-02ae66f368e8518a9" + # (1 unchanged attribute hidden) + + ~ leaf_block_device "b" { + ~ volume_type = "gp2" -> "gp3" + } + + # (1 unchanged block hidden) + }`, + }, + "in-place update - multiple different blocks second changed": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + "leaf_block_device": cty.MapVal(map[string]cty.Value{ + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp3"), + }), + }), + "leaf_block_device": cty.MapVal(map[string]cty.Value{ + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaMultipleBlocks(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + id = "i-02ae66f368e8518a9" + # (1 unchanged attribute hidden) + + ~ root_block_device "a" { + ~ volume_type = "gp2" -> "gp3" + } + + # (1 unchanged block hidden) + }`, + }, + "in-place update - multiple different blocks changed": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + "leaf_block_device": cty.MapVal(map[string]cty.Value{ + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp3"), + }), + }), + "leaf_block_device": cty.MapVal(map[string]cty.Value{ + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp3"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaMultipleBlocks(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + id = "i-02ae66f368e8518a9" + # (1 unchanged attribute hidden) + + ~ leaf_block_device "b" { + ~ volume_type = "gp2" -> "gp3" + } + + ~ root_block_device "a" { + ~ volume_type = "gp2" -> "gp3" + } + }`, + }, + "in-place update - mixed blocks unchanged": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + "leaf_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + "leaf_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaMultipleBlocks(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + id = "i-02ae66f368e8518a9" + # (1 unchanged attribute hidden) + + # (4 unchanged blocks hidden) + }`, + }, + "in-place update - mixed blocks changed": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + "leaf_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + "root_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp3"), + }), + }), + "leaf_block_device": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp3"), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaMultipleBlocks(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + id = "i-02ae66f368e8518a9" + # (1 unchanged attribute hidden) + + ~ leaf_block_device "b" { + ~ volume_type = "gp2" -> "gp3" + } + + ~ root_block_device "b" { + ~ volume_type = "gp2" -> "gp3" + } + + # (2 unchanged blocks hidden) + }`, + }, + } + runTestCases(t, testCases) +} + +func TestResourceChange_nestedSingle(t *testing.T) { + testCases := map[string]testCase{ + "in-place update - equal": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "root_block_device": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "disk": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "root_block_device": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + "disk": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingSingle), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + id = "i-02ae66f368e8518a9" + # (1 unchanged attribute hidden) + + # (1 unchanged block hidden) + }`, + }, + "in-place update - creation": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "root_block_device": cty.NullVal(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + })), + "disk": cty.NullVal(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disk": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + "root_block_device": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.NullVal(cty.String), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingSingle), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + + disk = { + + mount_point = "/var/diska" + + size = "50GB" + } + id = "i-02ae66f368e8518a9" + + + root_block_device {} + }`, + }, + "force-new update (inside blocks)": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disk": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + "root_block_device": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disk": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diskb"), + "size": cty.StringVal("50GB"), + }), + "root_block_device": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("different"), + }), + }), + RequiredReplace: cty.NewPathSet( + cty.Path{ + cty.GetAttrStep{Name: "root_block_device"}, + cty.GetAttrStep{Name: "volume_type"}, + }, + cty.Path{ + cty.GetAttrStep{Name: "disk"}, + cty.GetAttrStep{Name: "mount_point"}, + }, + ), + Schema: testSchema(configschema.NestingSingle), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disk = { + ~ mount_point = "/var/diska" -> "/var/diskb" # forces replacement + # (1 unchanged attribute hidden) + } + id = "i-02ae66f368e8518a9" + + ~ root_block_device { + ~ volume_type = "gp2" -> "different" # forces replacement + } + }`, + }, + "force-new update (whole block)": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disk": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + "root_block_device": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disk": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diskb"), + "size": cty.StringVal("50GB"), + }), + "root_block_device": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("different"), + }), + }), + RequiredReplace: cty.NewPathSet( + cty.Path{cty.GetAttrStep{Name: "root_block_device"}}, + cty.Path{cty.GetAttrStep{Name: "disk"}}, + ), + Schema: testSchema(configschema.NestingSingle), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disk = { # forces replacement + ~ mount_point = "/var/diska" -> "/var/diskb" + # (1 unchanged attribute hidden) + } + id = "i-02ae66f368e8518a9" + + ~ root_block_device { # forces replacement + ~ volume_type = "gp2" -> "different" + } + }`, + }, + "in-place update - deletion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disk": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + "root_block_device": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "root_block_device": cty.NullVal(cty.Object(map[string]cty.Type{ + "volume_type": cty.String, + })), + "disk": cty.NullVal(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchema(configschema.NestingSingle), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + - disk = { + - mount_point = "/var/diska" -> null + - size = "50GB" -> null + } -> null + id = "i-02ae66f368e8518a9" + + - root_block_device { + - volume_type = "gp2" -> null + } + }`, + }, + "with dynamically-typed attribute": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "block": cty.NullVal(cty.Object(map[string]cty.Type{ + "attr": cty.String, + })), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "block": cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "block": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "attr": {Type: cty.DynamicPseudoType, Optional: true}, + }, + }, + Nesting: configschema.NestingSingle, + }, + }, + }, + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + + block { + + attr = "foo" + } + }`, + }, + "in-place update - unknown": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disk": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + "root_block_device": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disk": cty.UnknownVal(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + "root_block_device": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaPlus(configschema.NestingSingle), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disk = { + ~ mount_point = "/var/diska" -> (known after apply) + ~ size = "50GB" -> (known after apply) + } -> (known after apply) + id = "i-02ae66f368e8518a9" + + # (1 unchanged block hidden) + }`, + }, + "in-place update - modification": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disk": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + "root_block_device": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disk": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("25GB"), + }), + "root_block_device": cty.ObjectVal(map[string]cty.Value{ + "volume_type": cty.StringVal("gp2"), + "new_field": cty.StringVal("new_value"), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaPlus(configschema.NestingSingle), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disk = { + ~ size = "50GB" -> "25GB" + # (1 unchanged attribute hidden) + } + id = "i-02ae66f368e8518a9" + + # (1 unchanged block hidden) + }`, + }, + } + runTestCases(t, testCases) +} + +func TestResourceChange_nestedMapSensitiveSchema(t *testing.T) { + testCases := map[string]testCase{ + "creation from null": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.NullVal(cty.String), + "ami": cty.NullVal(cty.String), + "disks": cty.NullVal(cty.Map(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + }))), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.NullVal(cty.String), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaSensitive(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + + ami = "ami-AFTER" + + disks = (sensitive value) + + id = "i-02ae66f368e8518a9" + }`, + }, + "in-place update": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapValEmpty(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.NullVal(cty.String), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaSensitive(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = (sensitive value) + id = "i-02ae66f368e8518a9" + }`, + }, + "force-new update (whole block)": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("100GB"), + }), + }), + }), + RequiredReplace: cty.NewPathSet( + cty.Path{cty.GetAttrStep{Name: "disks"}}, + ), + Schema: testSchemaSensitive(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = (sensitive value) # forces replacement + id = "i-02ae66f368e8518a9" + }`, + }, + "in-place update - deletion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.NullVal(cty.Map(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + }))), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaSensitive(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + - disks = (sensitive value) -> null + id = "i-02ae66f368e8518a9" + }`, + }, + "in-place update - unknown": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.MapVal(map[string]cty.Value{ + "disk_a": cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.UnknownVal(cty.Map(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + }))), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaSensitive(configschema.NestingMap), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = (sensitive value) + id = "i-02ae66f368e8518a9" + }`, + }, + } + runTestCases(t, testCases) +} + +func TestResourceChange_nestedListSensitiveSchema(t *testing.T) { + testCases := map[string]testCase{ + "creation from null": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.NullVal(cty.String), + "ami": cty.NullVal(cty.String), + "disks": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + }))), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.NullVal(cty.String), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaSensitive(configschema.NestingList), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + + ami = "ami-AFTER" + + disks = (sensitive value) + + id = "i-02ae66f368e8518a9" + }`, + }, + "in-place update": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.ListValEmpty(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.NullVal(cty.String), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaSensitive(configschema.NestingList), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = (sensitive value) + id = "i-02ae66f368e8518a9" + }`, + }, + "force-new update (whole block)": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("100GB"), + }), + }), + }), + RequiredReplace: cty.NewPathSet( + cty.Path{cty.GetAttrStep{Name: "disks"}}, + ), + Schema: testSchemaSensitive(configschema.NestingList), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = (sensitive value) # forces replacement + id = "i-02ae66f368e8518a9" + }`, + }, + "in-place update - deletion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + }))), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaSensitive(configschema.NestingList), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + - disks = (sensitive value) -> null + id = "i-02ae66f368e8518a9" + }`, + }, + "in-place update - unknown": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.UnknownVal(cty.List(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + }))), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaSensitive(configschema.NestingList), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = (sensitive value) + id = "i-02ae66f368e8518a9" + }`, + }, + } + runTestCases(t, testCases) +} + +func TestResourceChange_nestedSetSensitiveSchema(t *testing.T) { + testCases := map[string]testCase{ + "creation from null": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.NullVal(cty.String), + "ami": cty.NullVal(cty.String), + "disks": cty.NullVal(cty.Set(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + }))), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.NullVal(cty.String), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaSensitive(configschema.NestingSet), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + + ami = "ami-AFTER" + + disks = (sensitive value) + + id = "i-02ae66f368e8518a9" + }`, + }, + "in-place update": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.SetValEmpty(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + })), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.NullVal(cty.String), + }), + }), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaSensitive(configschema.NestingSet), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = (sensitive value) + id = "i-02ae66f368e8518a9" + }`, + }, + "force-new update (whole block)": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("100GB"), + }), + }), + }), + RequiredReplace: cty.NewPathSet( + cty.Path{cty.GetAttrStep{Name: "disks"}}, + ), + Schema: testSchemaSensitive(configschema.NestingSet), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = (sensitive value) # forces replacement + id = "i-02ae66f368e8518a9" + }`, + }, + "in-place update - deletion": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.NullVal(cty.Set(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + }))), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaSensitive(configschema.NestingSet), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + - disks = (sensitive value) -> null + id = "i-02ae66f368e8518a9" + }`, + }, + "in-place update - unknown": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "disks": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "mount_point": cty.StringVal("/var/diska"), + "size": cty.StringVal("50GB"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "disks": cty.UnknownVal(cty.Set(cty.Object(map[string]cty.Type{ + "mount_point": cty.String, + "size": cty.String, + }))), + }), + RequiredReplace: cty.NewPathSet(), + Schema: testSchemaSensitive(configschema.NestingSet), + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = "ami-BEFORE" -> "ami-AFTER" + ~ disks = (sensitive value) + id = "i-02ae66f368e8518a9" + }`, + }, + } + runTestCases(t, testCases) +} + +func TestResourceChange_actionReason(t *testing.T) { + emptySchema := &configschema.Block{} + nullVal := cty.NullVal(cty.EmptyObject) + emptyVal := cty.EmptyObjectVal + + testCases := map[string]testCase{ + "delete for no particular reason": { + Action: plans.Delete, + ActionReason: plans.ResourceInstanceChangeNoReason, + Mode: addrs.ManagedResourceMode, + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be destroyed + - resource "test_instance" "example" {}`, + }, + "delete because of wrong repetition mode (NoKey)": { + Action: plans.Delete, + ActionReason: plans.ResourceInstanceDeleteBecauseWrongRepetition, + Mode: addrs.ManagedResourceMode, + InstanceKey: addrs.NoKey, + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be destroyed + # (because resource uses count or for_each) + - resource "test_instance" "example" {}`, + }, + "delete because of wrong repetition mode (IntKey)": { + Action: plans.Delete, + ActionReason: plans.ResourceInstanceDeleteBecauseWrongRepetition, + Mode: addrs.ManagedResourceMode, + InstanceKey: addrs.IntKey(1), + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example[1] will be destroyed + # (because resource does not use count) + - resource "test_instance" "example" {}`, + }, + "delete because of wrong repetition mode (StringKey)": { + Action: plans.Delete, + ActionReason: plans.ResourceInstanceDeleteBecauseWrongRepetition, + Mode: addrs.ManagedResourceMode, + InstanceKey: addrs.StringKey("a"), + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example["a"] will be destroyed + # (because resource does not use for_each) + - resource "test_instance" "example" {}`, + }, + "delete because no resource configuration": { + Action: plans.Delete, + ActionReason: plans.ResourceInstanceDeleteBecauseNoResourceConfig, + ModuleInst: addrs.RootModuleInstance.Child("foo", addrs.NoKey), + Mode: addrs.ManagedResourceMode, + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # module.foo.test_instance.example will be destroyed + # (because test_instance.example is not in configuration) + - resource "test_instance" "example" {}`, + }, + "delete because no module": { + Action: plans.Delete, + ActionReason: plans.ResourceInstanceDeleteBecauseNoModule, + ModuleInst: addrs.RootModuleInstance.Child("foo", addrs.IntKey(1)), + Mode: addrs.ManagedResourceMode, + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # module.foo[1].test_instance.example will be destroyed + # (because module.foo[1] is not in configuration) + - resource "test_instance" "example" {}`, + }, + "delete because out of range for count": { + Action: plans.Delete, + ActionReason: plans.ResourceInstanceDeleteBecauseCountIndex, + Mode: addrs.ManagedResourceMode, + InstanceKey: addrs.IntKey(1), + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example[1] will be destroyed + # (because index [1] is out of range for count) + - resource "test_instance" "example" {}`, + }, + "delete because out of range for for_each": { + Action: plans.Delete, + ActionReason: plans.ResourceInstanceDeleteBecauseEachKey, + Mode: addrs.ManagedResourceMode, + InstanceKey: addrs.StringKey("boop"), + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example["boop"] will be destroyed + # (because key ["boop"] is not in for_each map) + - resource "test_instance" "example" {}`, + }, + "replace for no particular reason (delete first)": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceChangeNoReason, + Mode: addrs.ManagedResourceMode, + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" {}`, + }, + "replace for no particular reason (create first)": { + Action: plans.CreateThenDelete, + ActionReason: plans.ResourceInstanceChangeNoReason, + Mode: addrs.ManagedResourceMode, + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example must be replaced ++/- resource "test_instance" "example" {}`, + }, + "replace by request (delete first)": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceByRequest, + Mode: addrs.ManagedResourceMode, + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be replaced, as requested +-/+ resource "test_instance" "example" {}`, + }, + "replace by request (create first)": { + Action: plans.CreateThenDelete, + ActionReason: plans.ResourceInstanceReplaceByRequest, + Mode: addrs.ManagedResourceMode, + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be replaced, as requested ++/- resource "test_instance" "example" {}`, + }, + "replace because tainted (delete first)": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseTainted, + Mode: addrs.ManagedResourceMode, + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example is tainted, so it must be replaced +-/+ resource "test_instance" "example" {}`, + }, + "replace because tainted (create first)": { + Action: plans.CreateThenDelete, + ActionReason: plans.ResourceInstanceReplaceBecauseTainted, + Mode: addrs.ManagedResourceMode, + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example is tainted, so it must be replaced ++/- resource "test_instance" "example" {}`, + }, + "replace because cannot update (delete first)": { + Action: plans.DeleteThenCreate, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + // This one has no special message, because the fuller explanation + // typically appears inline as a "# forces replacement" comment. + // (not shown here) + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" {}`, + }, + "replace because cannot update (create first)": { + Action: plans.CreateThenDelete, + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + Mode: addrs.ManagedResourceMode, + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + // This one has no special message, because the fuller explanation + // typically appears inline as a "# forces replacement" comment. + // (not shown here) + ExpectedOutput: ` # test_instance.example must be replaced ++/- resource "test_instance" "example" {}`, + }, + "forget for no particular reason": { + Action: plans.Forget, + ActionReason: plans.ResourceInstanceChangeNoReason, + Mode: addrs.ManagedResourceMode, + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be removed from the OpenTofu state but will not be destroyed + . resource "test_instance" "example" {}`, + }, + "forget because no resource configuration": { + Action: plans.Forget, + ActionReason: plans.ResourceInstanceDeleteBecauseNoResourceConfig, + ModuleInst: addrs.RootModuleInstance.Child("foo", addrs.NoKey), + Mode: addrs.ManagedResourceMode, + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # module.foo.test_instance.example will be removed from the OpenTofu state but will not be destroyed + . resource "test_instance" "example" {}`, + }, + "forget because no module": { + Action: plans.Forget, + ActionReason: plans.ResourceInstanceDeleteBecauseNoModule, + ModuleInst: addrs.RootModuleInstance.Child("foo", addrs.IntKey(1)), + Mode: addrs.ManagedResourceMode, + Before: emptyVal, + After: nullVal, + Schema: emptySchema, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # module.foo[1].test_instance.example will be removed from the OpenTofu state but will not be destroyed + . resource "test_instance" "example" {}`, + }, + } + + runTestCases(t, testCases) +} + +func TestResourceChange_sensitiveVariable(t *testing.T) { + testCases := map[string]testCase{ + "creation": { + Action: plans.Create, + Mode: addrs.ManagedResourceMode, + Before: cty.NullVal(cty.EmptyObject), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-123"), + "map_key": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.NumberIntVal(800), + "dinner": cty.NumberIntVal(2000), + }), + "map_whole": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.StringVal("pizza"), + "dinner": cty.StringVal("pizza"), + }), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("hello"), + cty.StringVal("friends"), + cty.StringVal("!"), + }), + "nested_block_list": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("secretval"), + "another": cty.StringVal("not secret"), + }), + }), + "nested_block_set": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("secretval"), + "another": cty.StringVal("not secret"), + }), + }), + }), + AfterValMarks: []cty.PathValueMarks{ + { + Path: cty.Path{cty.GetAttrStep{Name: "ami"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "list_field"}, cty.IndexStep{Key: cty.NumberIntVal(1)}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "map_whole"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "map_key"}, cty.IndexStep{Key: cty.StringVal("dinner")}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + // Nested blocks/sets will mark the whole set/block as sensitive + Path: cty.Path{cty.GetAttrStep{Name: "nested_block_list"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "nested_block_set"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + RequiredReplace: cty.NewPathSet(), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "map_whole": {Type: cty.Map(cty.String), Optional: true}, + "map_key": {Type: cty.Map(cty.Number), Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "nested_block_list": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "an_attr": {Type: cty.String, Optional: true}, + "another": {Type: cty.String, Optional: true}, + }, + }, + Nesting: configschema.NestingList, + }, + "nested_block_set": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "an_attr": {Type: cty.String, Optional: true}, + "another": {Type: cty.String, Optional: true}, + }, + }, + Nesting: configschema.NestingSet, + }, + }, + }, + ExpectedOutput: ` # test_instance.example will be created + + resource "test_instance" "example" { + + ami = (sensitive value) + + id = "i-02ae66f368e8518a9" + + list_field = [ + + "hello", + + (sensitive value), + + "!", + ] + + map_key = { + + "breakfast" = 800 + + "dinner" = (sensitive value) + } + + map_whole = (sensitive value) + + + nested_block_list { + # At least one attribute in this block is (or was) sensitive, + # so its contents will not be displayed. + } + + + nested_block_set { + # At least one attribute in this block is (or was) sensitive, + # so its contents will not be displayed. + } + }`, + }, + "in-place update - before sensitive": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "special": cty.BoolVal(true), + "some_number": cty.NumberIntVal(1), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("hello"), + cty.StringVal("friends"), + cty.StringVal("!"), + }), + "map_key": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.NumberIntVal(800), + "dinner": cty.NumberIntVal(2000), // sensitive key + }), + "map_whole": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.StringVal("pizza"), + "dinner": cty.StringVal("pizza"), + }), + "nested_block": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("secretval"), + }), + }), + "nested_block_set": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("secretval"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "special": cty.BoolVal(false), + "some_number": cty.NumberIntVal(2), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("hello"), + cty.StringVal("friends"), + cty.StringVal("."), + }), + "map_key": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.NumberIntVal(800), + "dinner": cty.NumberIntVal(1900), + }), + "map_whole": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.StringVal("cereal"), + "dinner": cty.StringVal("pizza"), + }), + "nested_block": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("changed"), + }), + }), + "nested_block_set": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("changed"), + }), + }), + }), + BeforeValMarks: []cty.PathValueMarks{ + { + Path: cty.Path{cty.GetAttrStep{Name: "ami"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "special"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "some_number"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "list_field"}, cty.IndexStep{Key: cty.NumberIntVal(2)}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "map_key"}, cty.IndexStep{Key: cty.StringVal("dinner")}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "map_whole"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "nested_block"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "nested_block_set"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + RequiredReplace: cty.NewPathSet(), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + "special": {Type: cty.Bool, Optional: true}, + "some_number": {Type: cty.Number, Optional: true}, + "map_key": {Type: cty.Map(cty.Number), Optional: true}, + "map_whole": {Type: cty.Map(cty.String), Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "nested_block": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "an_attr": {Type: cty.String, Optional: true}, + }, + }, + Nesting: configschema.NestingList, + }, + "nested_block_set": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "an_attr": {Type: cty.String, Optional: true}, + }, + }, + Nesting: configschema.NestingSet, + }, + }, + }, + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + # Warning: this attribute value will no longer be marked as sensitive + # after applying this change. + ~ ami = (sensitive value) + id = "i-02ae66f368e8518a9" + ~ list_field = [ + # (1 unchanged element hidden) + "friends", + - (sensitive value), + + ".", + ] + ~ map_key = { + # Warning: this attribute value will no longer be marked as sensitive + # after applying this change. + ~ "dinner" = (sensitive value) + # (1 unchanged element hidden) + } + # Warning: this attribute value will no longer be marked as sensitive + # after applying this change. + ~ map_whole = (sensitive value) + # Warning: this attribute value will no longer be marked as sensitive + # after applying this change. + ~ some_number = (sensitive value) + # Warning: this attribute value will no longer be marked as sensitive + # after applying this change. + ~ special = (sensitive value) + + # Warning: this block will no longer be marked as sensitive + # after applying this change. + ~ nested_block { + # At least one attribute in this block is (or was) sensitive, + # so its contents will not be displayed. + } + + - nested_block_set { + # At least one attribute in this block is (or was) sensitive, + # so its contents will not be displayed. + } + + nested_block_set { + + an_attr = "changed" + } + }`, + }, + "in-place update - after sensitive": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("hello"), + cty.StringVal("friends"), + }), + "map_key": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.NumberIntVal(800), + "dinner": cty.NumberIntVal(2000), // sensitive key + }), + "map_whole": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.StringVal("pizza"), + "dinner": cty.StringVal("pizza"), + }), + "nested_block_single": cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("original"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("goodbye"), + cty.StringVal("friends"), + }), + "map_key": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.NumberIntVal(700), + "dinner": cty.NumberIntVal(2100), // sensitive key + }), + "map_whole": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.StringVal("cereal"), + "dinner": cty.StringVal("pizza"), + }), + "nested_block_single": cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("changed"), + }), + }), + AfterValMarks: []cty.PathValueMarks{ + { + Path: cty.Path{cty.GetAttrStep{Name: "tags"}, cty.IndexStep{Key: cty.StringVal("address")}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "list_field"}, cty.IndexStep{Key: cty.NumberIntVal(0)}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "map_key"}, cty.IndexStep{Key: cty.StringVal("dinner")}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "map_whole"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "nested_block_single"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + RequiredReplace: cty.NewPathSet(), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + "map_key": {Type: cty.Map(cty.Number), Optional: true}, + "map_whole": {Type: cty.Map(cty.String), Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "nested_block_single": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "an_attr": {Type: cty.String, Optional: true}, + }, + }, + Nesting: configschema.NestingSingle, + }, + }, + }, + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + id = "i-02ae66f368e8518a9" + ~ list_field = [ + - "hello", + + (sensitive value), + "friends", + ] + ~ map_key = { + ~ "breakfast" = 800 -> 700 + # Warning: this attribute value will be marked as sensitive and will not + # display in UI output after applying this change. + ~ "dinner" = (sensitive value) + } + # Warning: this attribute value will be marked as sensitive and will not + # display in UI output after applying this change. + ~ map_whole = (sensitive value) + + # Warning: this block will be marked as sensitive and will not + # display in UI output after applying this change. + ~ nested_block_single { + # At least one attribute in this block is (or was) sensitive, + # so its contents will not be displayed. + } + }`, + }, + "in-place update - both sensitive": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("hello"), + cty.StringVal("friends"), + }), + "map_key": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.NumberIntVal(800), + "dinner": cty.NumberIntVal(2000), // sensitive key + }), + "map_whole": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.StringVal("pizza"), + "dinner": cty.StringVal("pizza"), + }), + "nested_block_map": cty.MapVal(map[string]cty.Value{ + "foo": cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("original"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("goodbye"), + cty.StringVal("friends"), + }), + "map_key": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.NumberIntVal(800), + "dinner": cty.NumberIntVal(1800), // sensitive key + }), + "map_whole": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.StringVal("cereal"), + "dinner": cty.StringVal("pizza"), + }), + "nested_block_map": cty.MapVal(map[string]cty.Value{ + "foo": cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.UnknownVal(cty.String), + }), + }), + }), + BeforeValMarks: []cty.PathValueMarks{ + { + Path: cty.Path{cty.GetAttrStep{Name: "ami"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "list_field"}, cty.IndexStep{Key: cty.NumberIntVal(0)}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "map_key"}, cty.IndexStep{Key: cty.StringVal("dinner")}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "map_whole"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "nested_block_map"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + AfterValMarks: []cty.PathValueMarks{ + { + Path: cty.Path{cty.GetAttrStep{Name: "ami"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "list_field"}, cty.IndexStep{Key: cty.NumberIntVal(0)}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "map_key"}, cty.IndexStep{Key: cty.StringVal("dinner")}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "map_whole"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "nested_block_map"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + RequiredReplace: cty.NewPathSet(), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + "map_key": {Type: cty.Map(cty.Number), Optional: true}, + "map_whole": {Type: cty.Map(cty.String), Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "nested_block_map": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "an_attr": {Type: cty.String, Optional: true}, + }, + }, + Nesting: configschema.NestingMap, + }, + }, + }, + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + ~ ami = (sensitive value) + id = "i-02ae66f368e8518a9" + ~ list_field = [ + - (sensitive value), + + (sensitive value), + "friends", + ] + ~ map_key = { + ~ "dinner" = (sensitive value) + # (1 unchanged element hidden) + } + ~ map_whole = (sensitive value) + + ~ nested_block_map "foo" { + # At least one attribute in this block is (or was) sensitive, + # so its contents will not be displayed. + } + }`, + }, + "in-place update - value unchanged, sensitivity changes": { + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "special": cty.BoolVal(true), + "some_number": cty.NumberIntVal(1), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("hello"), + cty.StringVal("friends"), + cty.StringVal("!"), + }), + "map_key": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.NumberIntVal(800), + "dinner": cty.NumberIntVal(2000), // sensitive key + }), + "map_whole": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.StringVal("pizza"), + "dinner": cty.StringVal("pizza"), + }), + "nested_block": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("secretval"), + }), + }), + "nested_block_set": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("secretval"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "special": cty.BoolVal(true), + "some_number": cty.NumberIntVal(1), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("hello"), + cty.StringVal("friends"), + cty.StringVal("!"), + }), + "map_key": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.NumberIntVal(800), + "dinner": cty.NumberIntVal(2000), // sensitive key + }), + "map_whole": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.StringVal("pizza"), + "dinner": cty.StringVal("pizza"), + }), + "nested_block": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("secretval"), + }), + }), + "nested_block_set": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("secretval"), + }), + }), + }), + BeforeValMarks: []cty.PathValueMarks{ + { + Path: cty.Path{cty.GetAttrStep{Name: "ami"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "special"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "some_number"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "list_field"}, cty.IndexStep{Key: cty.NumberIntVal(2)}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "map_key"}, cty.IndexStep{Key: cty.StringVal("dinner")}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "map_whole"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "nested_block"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "nested_block_set"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + RequiredReplace: cty.NewPathSet(), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + "special": {Type: cty.Bool, Optional: true}, + "some_number": {Type: cty.Number, Optional: true}, + "map_key": {Type: cty.Map(cty.Number), Optional: true}, + "map_whole": {Type: cty.Map(cty.String), Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "nested_block": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "an_attr": {Type: cty.String, Optional: true}, + }, + }, + Nesting: configschema.NestingList, + }, + "nested_block_set": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "an_attr": {Type: cty.String, Optional: true}, + }, + }, + Nesting: configschema.NestingSet, + }, + }, + }, + ExpectedOutput: ` # test_instance.example will be updated in-place + ~ resource "test_instance" "example" { + # Warning: this attribute value will no longer be marked as sensitive + # after applying this change. The value is unchanged. + ~ ami = (sensitive value) + id = "i-02ae66f368e8518a9" + ~ list_field = [ + # (1 unchanged element hidden) + "friends", + # Warning: this attribute value will no longer be marked as sensitive + # after applying this change. The value is unchanged. + ~ (sensitive value), + ] + ~ map_key = { + # Warning: this attribute value will no longer be marked as sensitive + # after applying this change. The value is unchanged. + ~ "dinner" = (sensitive value) + # (1 unchanged element hidden) + } + # Warning: this attribute value will no longer be marked as sensitive + # after applying this change. The value is unchanged. + ~ map_whole = (sensitive value) + # Warning: this attribute value will no longer be marked as sensitive + # after applying this change. The value is unchanged. + ~ some_number = (sensitive value) + # Warning: this attribute value will no longer be marked as sensitive + # after applying this change. The value is unchanged. + ~ special = (sensitive value) + + # Warning: this block will no longer be marked as sensitive + # after applying this change. + ~ nested_block { + # At least one attribute in this block is (or was) sensitive, + # so its contents will not be displayed. + } + + # Warning: this block will no longer be marked as sensitive + # after applying this change. + ~ nested_block_set { + # At least one attribute in this block is (or was) sensitive, + # so its contents will not be displayed. + } + }`, + }, + "deletion": { + Action: plans.Delete, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("hello"), + cty.StringVal("friends"), + }), + "map_key": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.NumberIntVal(800), + "dinner": cty.NumberIntVal(2000), // sensitive key + }), + "map_whole": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.StringVal("pizza"), + "dinner": cty.StringVal("pizza"), + }), + "nested_block": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("secret"), + "another": cty.StringVal("not secret"), + }), + }), + "nested_block_set": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("secret"), + "another": cty.StringVal("not secret"), + }), + }), + }), + After: cty.NullVal(cty.EmptyObject), + BeforeValMarks: []cty.PathValueMarks{ + { + Path: cty.Path{cty.GetAttrStep{Name: "ami"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "list_field"}, cty.IndexStep{Key: cty.NumberIntVal(1)}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "map_key"}, cty.IndexStep{Key: cty.StringVal("dinner")}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "map_whole"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "nested_block"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "nested_block_set"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + RequiredReplace: cty.NewPathSet(), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + "map_key": {Type: cty.Map(cty.Number), Optional: true}, + "map_whole": {Type: cty.Map(cty.String), Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "nested_block_set": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "an_attr": {Type: cty.String, Optional: true}, + "another": {Type: cty.String, Optional: true}, + }, + }, + Nesting: configschema.NestingSet, + }, + }, + }, + ExpectedOutput: ` # test_instance.example will be destroyed + - resource "test_instance" "example" { + - ami = (sensitive value) -> null + - id = "i-02ae66f368e8518a9" -> null + - list_field = [ + - "hello", + - (sensitive value), + ] -> null + - map_key = { + - "breakfast" = 800 + - "dinner" = (sensitive value) + } -> null + - map_whole = (sensitive value) -> null + + - nested_block_set { + # At least one attribute in this block is (or was) sensitive, + # so its contents will not be displayed. + } + }`, + }, + "forget": { + Action: plans.Forget, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "list_field": cty.ListVal([]cty.Value{ + cty.StringVal("hello"), + cty.StringVal("friends"), + }), + "map_key": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.NumberIntVal(800), + "dinner": cty.NumberIntVal(2000), // sensitive key + }), + "map_whole": cty.MapVal(map[string]cty.Value{ + "breakfast": cty.StringVal("pizza"), + "dinner": cty.StringVal("pizza"), + }), + "nested_block": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("secret"), + "another": cty.StringVal("not secret"), + }), + }), + "nested_block_set": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("secret"), + "another": cty.StringVal("not secret"), + }), + }), + }), + After: cty.NullVal(cty.EmptyObject), + BeforeValMarks: []cty.PathValueMarks{ + { + Path: cty.Path{cty.GetAttrStep{Name: "ami"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "list_field"}, cty.IndexStep{Key: cty.NumberIntVal(1)}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "map_key"}, cty.IndexStep{Key: cty.StringVal("dinner")}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "map_whole"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "nested_block"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.Path{cty.GetAttrStep{Name: "nested_block_set"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + RequiredReplace: cty.NewPathSet(), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "list_field": {Type: cty.List(cty.String), Optional: true}, + "map_key": {Type: cty.Map(cty.Number), Optional: true}, + "map_whole": {Type: cty.Map(cty.String), Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "nested_block_set": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "an_attr": {Type: cty.String, Optional: true}, + "another": {Type: cty.String, Optional: true}, + }, + }, + Nesting: configschema.NestingSet, + }, + }, + }, + ExpectedOutput: ` # test_instance.example will be removed from the OpenTofu state but will not be destroyed + . resource "test_instance" "example" { + ami = (sensitive value) + id = "i-02ae66f368e8518a9" + list_field = [ + "hello", + (sensitive value), + ] + map_key = { + "breakfast" = 800 + "dinner" = (sensitive value) + } + map_whole = (sensitive value) + + nested_block_set { + # At least one attribute in this block is (or was) sensitive, + # so its contents will not be displayed. + } +}`, + }, + "update with sensitive value forcing replacement": { + Action: plans.DeleteThenCreate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + "nested_block_set": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("secret"), + }), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + "nested_block_set": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "an_attr": cty.StringVal("changed"), + }), + }), + }), + BeforeValMarks: []cty.PathValueMarks{ + { + Path: cty.GetAttrPath("ami"), + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.GetAttrPath("nested_block_set"), + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + AfterValMarks: []cty.PathValueMarks{ + { + Path: cty.GetAttrPath("ami"), + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.GetAttrPath("nested_block_set"), + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "nested_block_set": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "an_attr": {Type: cty.String, Required: true}, + }, + }, + Nesting: configschema.NestingSet, + }, + }, + }, + RequiredReplace: cty.NewPathSet( + cty.GetAttrPath("ami"), + cty.GetAttrPath("nested_block_set"), + ), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ ami = (sensitive value) # forces replacement + id = "i-02ae66f368e8518a9" + + - nested_block_set { # forces replacement + # At least one attribute in this block is (or was) sensitive, + # so its contents will not be displayed. + } + + nested_block_set { # forces replacement + # At least one attribute in this block is (or was) sensitive, + # so its contents will not be displayed. + } + }`, + }, + "update with sensitive attribute forcing replacement": { + Action: plans.DeleteThenCreate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-BEFORE"), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "ami": cty.StringVal("ami-AFTER"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true, Computed: true, Sensitive: true}, + }, + }, + RequiredReplace: cty.NewPathSet( + cty.GetAttrPath("ami"), + ), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ ami = (sensitive value) # forces replacement + id = "i-02ae66f368e8518a9" + }`, + }, + "update with sensitive nested type attribute forcing replacement": { + Action: plans.DeleteThenCreate, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "conn_info": cty.ObjectVal(map[string]cty.Value{ + "user": cty.StringVal("not-secret"), + "password": cty.StringVal("top-secret"), + }), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-02ae66f368e8518a9"), + "conn_info": cty.ObjectVal(map[string]cty.Value{ + "user": cty.StringVal("not-secret"), + "password": cty.StringVal("new-secret"), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "conn_info": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingSingle, + Attributes: map[string]*configschema.Attribute{ + "user": {Type: cty.String, Optional: true}, + "password": {Type: cty.String, Optional: true, Sensitive: true}, + }, + }, + }, + }, + }, + RequiredReplace: cty.NewPathSet( + cty.GetAttrPath("conn_info"), + cty.GetAttrPath("password"), + ), + ExpectedOutput: ` # test_instance.example must be replaced +-/+ resource "test_instance" "example" { + ~ conn_info = { # forces replacement + ~ password = (sensitive value) + # (1 unchanged attribute hidden) + } + id = "i-02ae66f368e8518a9" + }`, + }, + } + runTestCases(t, testCases) +} + +func TestResourceChange_moved(t *testing.T) { + prevRunAddr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "previous", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + + testCases := map[string]testCase{ + "moved and updated": { + PrevRunAddr: prevRunAddr, + Action: plans.Update, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("12345"), + "foo": cty.StringVal("hello"), + "bar": cty.StringVal("baz"), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("12345"), + "foo": cty.StringVal("hello"), + "bar": cty.StringVal("boop"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "foo": {Type: cty.String, Optional: true}, + "bar": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be updated in-place + # (moved from test_instance.previous) + ~ resource "test_instance" "example" { + ~ bar = "baz" -> "boop" + id = "12345" + # (1 unchanged attribute hidden) + }`, + }, + "moved without changes": { + PrevRunAddr: prevRunAddr, + Action: plans.NoOp, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("12345"), + "foo": cty.StringVal("hello"), + "bar": cty.StringVal("baz"), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("12345"), + "foo": cty.StringVal("hello"), + "bar": cty.StringVal("baz"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "foo": {Type: cty.String, Optional: true}, + "bar": {Type: cty.String, Optional: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.previous has moved to test_instance.example + resource "test_instance" "example" { + id = "12345" + # (2 unchanged attributes hidden) + }`, + }, + "moved and forgotten": { + PrevRunAddr: prevRunAddr, + Action: plans.Forget, + Mode: addrs.ManagedResourceMode, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("12345"), + "foo": cty.StringVal("hello"), + "bar": cty.StringVal("baz"), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("12345"), + "foo": cty.StringVal("hello"), + "bar": cty.StringVal("boop"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + RequiredReplace: cty.NewPathSet(), + ExpectedOutput: ` # test_instance.example will be removed from the OpenTofu state but will not be destroyed + # (moved from test_instance.previous) + . resource "test_instance" "example" { + id = "12345" +}`, + }, + } + + runTestCases(t, testCases) +} + +type testCase struct { + Action plans.Action + ActionReason plans.ResourceInstanceChangeActionReason + ModuleInst addrs.ModuleInstance + Mode addrs.ResourceMode + InstanceKey addrs.InstanceKey + DeposedKey states.DeposedKey + Before cty.Value + BeforeValMarks []cty.PathValueMarks + AfterValMarks []cty.PathValueMarks + After cty.Value + Schema *configschema.Block + RequiredReplace cty.PathSet + ExpectedOutput string + PrevRunAddr addrs.AbsResourceInstance +} + +func runTestCases(t *testing.T, testCases map[string]testCase) { + color := &colorstring.Colorize{Colors: colorstring.DefaultColors, Disable: true} + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + ty := tc.Schema.ImpliedType() + + beforeVal := tc.Before + switch { // Some fixups to make the test cases a little easier to write + case beforeVal.IsNull(): + beforeVal = cty.NullVal(ty) // allow mistyped nulls + case !beforeVal.IsKnown(): + beforeVal = cty.UnknownVal(ty) // allow mistyped unknowns + } + + afterVal := tc.After + switch { // Some fixups to make the test cases a little easier to write + case afterVal.IsNull(): + afterVal = cty.NullVal(ty) // allow mistyped nulls + case !afterVal.IsKnown(): + afterVal = cty.UnknownVal(ty) // allow mistyped unknowns + } + + addr := addrs.Resource{ + Mode: tc.Mode, + Type: "test_instance", + Name: "example", + }.Instance(tc.InstanceKey).Absolute(tc.ModuleInst) + + prevRunAddr := tc.PrevRunAddr + // If no previous run address is given, reuse the current address + // to make initialization easier + if prevRunAddr.Resource.Resource.Type == "" { + prevRunAddr = addr + } + + beforeDynamicValue, err := plans.NewDynamicValue(beforeVal, ty) + if err != nil { + t.Fatalf("failed to create dynamic before value: " + err.Error()) + } + + afterDynamicValue, err := plans.NewDynamicValue(afterVal, ty) + if err != nil { + t.Fatalf("failed to create dynamic after value: " + err.Error()) + } + + src := &plans.ResourceInstanceChangeSrc{ + ChangeSrc: plans.ChangeSrc{ + Action: tc.Action, + Before: beforeDynamicValue, + BeforeValMarks: tc.BeforeValMarks, + After: afterDynamicValue, + AfterValMarks: tc.AfterValMarks, + }, + + Addr: addr, + PrevRunAddr: prevRunAddr, + DeposedKey: tc.DeposedKey, + ProviderAddr: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ActionReason: tc.ActionReason, + RequiredReplace: tc.RequiredReplace, + } + + tfschemas := &tofu.Schemas{ + Providers: map[addrs.Provider]providers.ProviderSchema{ + src.ProviderAddr.Provider: { + ResourceTypes: map[string]providers.Schema{ + src.Addr.Resource.Resource.Type: { + Block: tc.Schema, + }, + }, + DataSources: map[string]providers.Schema{ + src.Addr.Resource.Resource.Type: { + Block: tc.Schema, + }, + }, + }, + }, + } + jsonchanges, err := jsonplan.MarshalResourceChanges([]*plans.ResourceInstanceChangeSrc{src}, tfschemas) + if err != nil { + t.Errorf("failed to marshal resource changes: " + err.Error()) + return + } + + jsonschemas := jsonprovider.MarshalForRenderer(tfschemas) + change := structured.FromJsonChange(jsonchanges[0].Change, attribute_path.AlwaysMatcher()) + renderer := Renderer{Colorize: color} + diff := diff{ + change: jsonchanges[0], + diff: differ.ComputeDiffForBlock(change, jsonschemas[jsonchanges[0].ProviderName].ResourceSchemas[jsonchanges[0].Type].Block), + } + output, _ := renderHumanDiff(renderer, diff, proposedChange) + if diff := cmp.Diff(output, tc.ExpectedOutput); diff != "" { + t.Errorf("wrong output\nexpected:\n%s\nactual:\n%s\ndiff:\n%s\n", tc.ExpectedOutput, output, diff) + } + }) + } +} + +func TestOutputChanges(t *testing.T) { + color := &colorstring.Colorize{Colors: colorstring.DefaultColors, Disable: true} + + testCases := map[string]struct { + changes []*plans.OutputChangeSrc + output string + }{ + "new output value": { + []*plans.OutputChangeSrc{ + outputChange( + "foo", + cty.NullVal(cty.DynamicPseudoType), + cty.StringVal("bar"), + false, + ), + }, + ` + foo = "bar"`, + }, + "removed output": { + []*plans.OutputChangeSrc{ + outputChange( + "foo", + cty.StringVal("bar"), + cty.NullVal(cty.DynamicPseudoType), + false, + ), + }, + ` - foo = "bar" -> null`, + }, + "single string change": { + []*plans.OutputChangeSrc{ + outputChange( + "foo", + cty.StringVal("bar"), + cty.StringVal("baz"), + false, + ), + }, + ` ~ foo = "bar" -> "baz"`, + }, + "element added to list": { + []*plans.OutputChangeSrc{ + outputChange( + "foo", + cty.ListVal([]cty.Value{ + cty.StringVal("alpha"), + cty.StringVal("beta"), + cty.StringVal("delta"), + cty.StringVal("epsilon"), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("alpha"), + cty.StringVal("beta"), + cty.StringVal("gamma"), + cty.StringVal("delta"), + cty.StringVal("epsilon"), + }), + false, + ), + }, + ` ~ foo = [ + # (1 unchanged element hidden) + "beta", + + "gamma", + "delta", + # (1 unchanged element hidden) + ]`, + }, + "multiple outputs changed, one sensitive": { + []*plans.OutputChangeSrc{ + outputChange( + "a", + cty.NumberIntVal(1), + cty.NumberIntVal(2), + false, + ), + outputChange( + "b", + cty.StringVal("hunter2"), + cty.StringVal("correct-horse-battery-staple"), + true, + ), + outputChange( + "c", + cty.BoolVal(false), + cty.BoolVal(true), + false, + ), + }, + ` ~ a = 1 -> 2 + ~ b = (sensitive value) + ~ c = false -> true`, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + changes := &plans.Changes{ + Outputs: tc.changes, + } + + outputs, err := jsonplan.MarshalOutputChanges(changes) + if err != nil { + t.Fatalf("failed to marshal output changes") + } + + renderer := Renderer{Colorize: color} + diffs := precomputeDiffs(Plan{ + OutputChanges: outputs, + }, plans.NormalMode) + + output := renderHumanDiffOutputs(renderer, diffs.outputs) + if output != tc.output { + t.Errorf("Unexpected diff.\ngot:\n%s\nwant:\n%s\n", output, tc.output) + } + }) + } +} + +func outputChange(name string, before, after cty.Value, sensitive bool) *plans.OutputChangeSrc { + addr := addrs.AbsOutputValue{ + OutputValue: addrs.OutputValue{Name: name}, + } + + change := &plans.OutputChange{ + Addr: addr, Change: plans.Change{ + Before: before, + After: after, + }, + Sensitive: sensitive, + } + + changeSrc, err := change.Encode() + if err != nil { + panic(fmt.Sprintf("failed to encode change for %s: %s", addr, err)) + } + + return changeSrc +} + +// A basic test schema using a configurable NestingMode for one (NestedType) attribute and one block +func testSchema(nesting configschema.NestingMode) *configschema.Block { + var diskKey = "disks" + if nesting == configschema.NestingSingle { + diskKey = "disk" + } + + return &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + diskKey: { + NestedType: &configschema.Object{ + Attributes: map[string]*configschema.Attribute{ + "mount_point": {Type: cty.String, Optional: true}, + "size": {Type: cty.String, Optional: true}, + }, + Nesting: nesting, + }, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "root_block_device": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "volume_type": { + Type: cty.String, + Optional: true, + Computed: true, + }, + }, + }, + Nesting: nesting, + }, + }, + } +} + +// A basic test schema using a configurable NestingMode for one (NestedType) +// attribute marked sensitive. +func testSchemaSensitive(nesting configschema.NestingMode) *configschema.Block { + return &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "disks": { + Sensitive: true, + NestedType: &configschema.Object{ + Attributes: map[string]*configschema.Attribute{ + "mount_point": {Type: cty.String, Optional: true}, + "size": {Type: cty.String, Optional: true}, + }, + Nesting: nesting, + }, + }, + }, + } +} + +func testSchemaMultipleBlocks(nesting configschema.NestingMode) *configschema.Block { + return &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "disks": { + NestedType: &configschema.Object{ + Attributes: map[string]*configschema.Attribute{ + "mount_point": {Type: cty.String, Optional: true}, + "size": {Type: cty.String, Optional: true}, + }, + Nesting: nesting, + }, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "root_block_device": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "volume_type": { + Type: cty.String, + Optional: true, + Computed: true, + }, + }, + }, + Nesting: nesting, + }, + "leaf_block_device": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "volume_type": { + Type: cty.String, + Optional: true, + Computed: true, + }, + }, + }, + Nesting: nesting, + }, + }, + } +} + +// similar to testSchema with the addition of a "new_field" block +func testSchemaPlus(nesting configschema.NestingMode) *configschema.Block { + var diskKey = "disks" + if nesting == configschema.NestingSingle { + diskKey = "disk" + } + + return &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + diskKey: { + NestedType: &configschema.Object{ + Attributes: map[string]*configschema.Attribute{ + "mount_point": {Type: cty.String, Optional: true}, + "size": {Type: cty.String, Optional: true}, + }, + Nesting: nesting, + }, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "root_block_device": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "volume_type": { + Type: cty.String, + Optional: true, + Computed: true, + }, + "new_field": { + Type: cty.String, + Optional: true, + Computed: true, + }, + }, + }, + Nesting: nesting, + }, + }, + } +} + +func marshalJson(t *testing.T, data interface{}) json.RawMessage { + result, err := json.Marshal(data) + if err != nil { + t.Fatalf("failed to marshal json: %v", err) + } + return result +} diff --git a/pkg/command/jsonformat/renderer.go b/pkg/command/jsonformat/renderer.go new file mode 100644 index 00000000000..7044e13d111 --- /dev/null +++ b/pkg/command/jsonformat/renderer.go @@ -0,0 +1,180 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package jsonformat + +import ( + "fmt" + "strconv" + + "github.com/mitchellh/colorstring" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/kubegems/opentofu/pkg/command/format" + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed" + "github.com/kubegems/opentofu/pkg/command/jsonformat/differ" + "github.com/kubegems/opentofu/pkg/command/jsonformat/structured" + "github.com/kubegems/opentofu/pkg/command/jsonplan" + "github.com/kubegems/opentofu/pkg/command/jsonprovider" + "github.com/kubegems/opentofu/pkg/command/jsonstate" + viewsjson "github.com/kubegems/opentofu/pkg/command/views/json" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/terminal" +) + +type JSONLogType string + +type JSONLog struct { + Message string `json:"@message"` + Type JSONLogType `json:"type"` + Diagnostic *viewsjson.Diagnostic `json:"diagnostic"` + Outputs viewsjson.Outputs `json:"outputs"` + Hook map[string]interface{} `json:"hook"` +} + +const ( + LogApplyComplete JSONLogType = "apply_complete" + LogApplyErrored JSONLogType = "apply_errored" + LogApplyStart JSONLogType = "apply_start" + LogChangeSummary JSONLogType = "change_summary" + LogDiagnostic JSONLogType = "diagnostic" + LogPlannedChange JSONLogType = "planned_change" + LogProvisionComplete JSONLogType = "provision_complete" + LogProvisionErrored JSONLogType = "provision_errored" + LogProvisionProgress JSONLogType = "provision_progress" + LogProvisionStart JSONLogType = "provision_start" + LogOutputs JSONLogType = "outputs" + LogRefreshComplete JSONLogType = "refresh_complete" + LogRefreshStart JSONLogType = "refresh_start" + LogResourceDrift JSONLogType = "resource_drift" + LogVersion JSONLogType = "version" +) + +func incompatibleVersions(localVersion, remoteVersion string) bool { + var parsedLocal, parsedRemote float64 + var err error + + if parsedLocal, err = strconv.ParseFloat(localVersion, 64); err != nil { + return false + } + if parsedRemote, err = strconv.ParseFloat(remoteVersion, 64); err != nil { + return false + } + + // If the local version is less than the remote version then the remote + // version might contain things the local version doesn't know about, so + // we're going to say they are incompatible. + // + // So far, we have built the renderer and the json packages to be backwards + // compatible so if the local version is greater than the remote version + // then that is okay, we'll still render a complete and correct plan. + // + // Note, this might change in the future. For example, if we introduce a + // new major version in one of the formats the renderer may no longer be + // backward compatible. + return parsedLocal < parsedRemote +} + +type Renderer struct { + Streams *terminal.Streams + Colorize *colorstring.Colorize + + RunningInAutomation bool + ShowSensitive bool +} + +func (renderer Renderer) RenderHumanPlan(plan Plan, mode plans.Mode, opts ...plans.Quality) { + if incompatibleVersions(jsonplan.FormatVersion, plan.PlanFormatVersion) || incompatibleVersions(jsonprovider.FormatVersion, plan.ProviderFormatVersion) { + renderer.Streams.Println(format.WordWrap( + renderer.Colorize.Color("\n[bold][red]Warning:[reset][bold] This plan was generated using a different version of OpenTofu, the diff presented here may be missing representations of recent features."), + renderer.Streams.Stdout.Columns())) + } + + plan.renderHuman(renderer, mode, opts...) +} + +func (renderer Renderer) RenderHumanState(state State) { + if incompatibleVersions(jsonstate.FormatVersion, state.StateFormatVersion) || incompatibleVersions(jsonprovider.FormatVersion, state.ProviderFormatVersion) { + renderer.Streams.Println(format.WordWrap( + renderer.Colorize.Color("\n[bold][red]Warning:[reset][bold] This state was retrieved using a different version of OpenTofu, the state presented here maybe missing representations of recent features."), + renderer.Streams.Stdout.Columns())) + } + + if state.Empty() { + renderer.Streams.Println("The state file is empty. No resources are represented.") + return + } + + opts := computed.NewRenderHumanOpts(renderer.Colorize, renderer.ShowSensitive) + opts.ShowUnchangedChildren = true + opts.HideDiffActionSymbols = true + + state.renderHumanStateModule(renderer, state.RootModule, opts, true) + state.renderHumanStateOutputs(renderer, opts) +} + +func (renderer Renderer) RenderLog(log *JSONLog) error { + switch log.Type { + case LogRefreshComplete, + LogVersion, + LogPlannedChange, + LogProvisionComplete, + LogProvisionErrored, + LogApplyErrored: + // We won't display these types of logs + return nil + + case LogApplyStart, LogApplyComplete, LogRefreshStart, LogProvisionStart, LogResourceDrift: + msg := fmt.Sprintf(renderer.Colorize.Color("[bold]%s[reset]"), log.Message) + renderer.Streams.Println(msg) + + case LogDiagnostic: + diag := format.DiagnosticFromJSON(log.Diagnostic, renderer.Colorize, 78) + renderer.Streams.Print(diag) + + case LogOutputs: + if len(log.Outputs) > 0 { + renderer.Streams.Println(renderer.Colorize.Color("[bold][green]Outputs:[reset]")) + for name, output := range log.Outputs { + change := structured.FromJsonViewsOutput(output) + ctype, err := ctyjson.UnmarshalType(output.Type) + if err != nil { + return err + } + + opts := computed.NewRenderHumanOpts(renderer.Colorize, renderer.ShowSensitive) + opts.ShowUnchangedChildren = true + + outputDiff := differ.ComputeDiffForType(change, ctype) + outputStr := outputDiff.RenderHuman(0, opts) + + msg := fmt.Sprintf("%s = %s", name, outputStr) + renderer.Streams.Println(msg) + } + } + + case LogProvisionProgress: + provisioner := log.Hook["provisioner"].(string) + output := log.Hook["output"].(string) + resource := log.Hook["resource"].(map[string]interface{}) + resourceAddr := resource["addr"].(string) + + msg := fmt.Sprintf(renderer.Colorize.Color("[bold]%s: (%s):[reset] %s"), + resourceAddr, provisioner, output) + renderer.Streams.Println(msg) + + case LogChangeSummary: + // Normally, we will only render the apply change summary since the renderer + // generates a plan change summary for us + msg := fmt.Sprintf(renderer.Colorize.Color("[bold][green]%s[reset]"), log.Message) + renderer.Streams.Println("\n" + msg + "\n") + + default: + // If the log type is not a known log type, we will just print the log message + renderer.Streams.Println(log.Message) + } + + return nil +} diff --git a/pkg/command/jsonformat/renderer_test.go b/pkg/command/jsonformat/renderer_test.go new file mode 100644 index 00000000000..13228bff22c --- /dev/null +++ b/pkg/command/jsonformat/renderer_test.go @@ -0,0 +1,62 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package jsonformat + +import ( + "testing" + + "github.com/kubegems/opentofu/pkg/command/jsonplan" + "github.com/kubegems/opentofu/pkg/command/jsonprovider" + "github.com/kubegems/opentofu/pkg/command/jsonstate" +) + +func TestIncompatibleVersions(t *testing.T) { + tcs := map[string]struct { + local string + remote string + expected bool + }{ + "matching": { + local: "1.1", + remote: "1.1", + expected: false, + }, + "local_latest": { + local: "1.2", + remote: "1.1", + expected: false, + }, + "local_earliest": { + local: "1.1", + remote: "1.2", + expected: true, + }, + "parses_state_version": { + local: jsonstate.FormatVersion, + remote: jsonstate.FormatVersion, + expected: false, + }, + "parses_provider_version": { + local: jsonprovider.FormatVersion, + remote: jsonprovider.FormatVersion, + expected: false, + }, + "parses_plan_version": { + local: jsonplan.FormatVersion, + remote: jsonplan.FormatVersion, + expected: false, + }, + } + + for name, tc := range tcs { + t.Run(name, func(t *testing.T) { + actual := incompatibleVersions(tc.local, tc.remote) + if actual != tc.expected { + t.Errorf("expected %t but found %t", tc.expected, actual) + } + }) + } +} diff --git a/pkg/command/jsonformat/state.go b/pkg/command/jsonformat/state.go new file mode 100644 index 00000000000..9ec13d0a6a3 --- /dev/null +++ b/pkg/command/jsonformat/state.go @@ -0,0 +1,113 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package jsonformat + +import ( + "sort" + + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed" + "github.com/kubegems/opentofu/pkg/command/jsonformat/differ" + "github.com/kubegems/opentofu/pkg/command/jsonformat/structured" + "github.com/kubegems/opentofu/pkg/command/jsonprovider" + "github.com/kubegems/opentofu/pkg/command/jsonstate" +) + +type State struct { + StateFormatVersion string `json:"state_format_version"` + RootModule jsonstate.Module `json:"root"` + RootModuleOutputs map[string]jsonstate.Output `json:"root_module_outputs"` + + ProviderFormatVersion string `json:"provider_format_version"` + ProviderSchemas map[string]*jsonprovider.Provider `json:"provider_schemas"` +} + +func (state State) Empty() bool { + return len(state.RootModuleOutputs) == 0 && len(state.RootModule.Resources) == 0 && len(state.RootModule.ChildModules) == 0 +} + +func (state State) GetSchema(resource jsonstate.Resource) *jsonprovider.Schema { + switch resource.Mode { + case jsonstate.ManagedResourceMode: + return state.ProviderSchemas[resource.ProviderName].ResourceSchemas[resource.Type] + case jsonstate.DataResourceMode: + return state.ProviderSchemas[resource.ProviderName].DataSourceSchemas[resource.Type] + default: + panic("found unrecognized resource mode: " + resource.Mode) + } +} + +func (state State) renderHumanStateModule(renderer Renderer, module jsonstate.Module, opts computed.RenderHumanOpts, first bool) { + if len(module.Resources) > 0 && !first { + renderer.Streams.Println() + } + + for _, resource := range module.Resources { + + if !first { + renderer.Streams.Println() + } + + if first { + first = false + } + + if len(resource.DeposedKey) > 0 { + renderer.Streams.Printf("# %s: (deposed object %s)", resource.Address, resource.DeposedKey) + } else if resource.Tainted { + renderer.Streams.Printf("# %s: (tainted)", resource.Address) + } else { + renderer.Streams.Printf("# %s:", resource.Address) + } + + renderer.Streams.Println() + + schema := state.GetSchema(resource) + switch resource.Mode { + case jsonstate.ManagedResourceMode: + change := structured.FromJsonResource(resource) + renderer.Streams.Printf("resource %q %q %s", resource.Type, resource.Name, differ.ComputeDiffForBlock(change, schema.Block).RenderHuman(0, opts)) + case jsonstate.DataResourceMode: + change := structured.FromJsonResource(resource) + renderer.Streams.Printf("data %q %q %s", resource.Type, resource.Name, differ.ComputeDiffForBlock(change, schema.Block).RenderHuman(0, opts)) + default: + panic("found unrecognized resource mode: " + resource.Mode) + } + + renderer.Streams.Println() + } + + for _, child := range module.ChildModules { + state.renderHumanStateModule(renderer, child, opts, first) + } +} + +func (state State) renderHumanStateOutputs(renderer Renderer, opts computed.RenderHumanOpts) { + + if len(state.RootModuleOutputs) > 0 { + renderer.Streams.Printf("\n\nOutputs:\n\n") + + var keys []string + for key := range state.RootModuleOutputs { + keys = append(keys, key) + } + sort.Strings(keys) + + for _, key := range keys { + output := state.RootModuleOutputs[key] + change := structured.FromJsonOutput(output) + ctype, err := ctyjson.UnmarshalType(output.Type) + if err != nil { + // We can actually do this without the type, so even if we fail + // to work out the type let's just render this anyway. + renderer.Streams.Printf("%s = %s\n", key, differ.ComputeDiffForOutput(change).RenderHuman(0, opts)) + } else { + renderer.Streams.Printf("%s = %s\n", key, differ.ComputeDiffForType(change, ctype).RenderHuman(0, opts)) + } + } + } +} diff --git a/pkg/command/jsonformat/state_test.go b/pkg/command/jsonformat/state_test.go new file mode 100644 index 00000000000..611d8bdeb20 --- /dev/null +++ b/pkg/command/jsonformat/state_test.go @@ -0,0 +1,424 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package jsonformat + +import ( + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/mitchellh/colorstring" + + "github.com/kubegems/opentofu/pkg/command/jsonprovider" + "github.com/kubegems/opentofu/pkg/command/jsonstate" + "github.com/kubegems/opentofu/pkg/states/statefile" + "github.com/kubegems/opentofu/pkg/terminal" + + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tofu" +) + +func TestState(t *testing.T) { + color := &colorstring.Colorize{Colors: colorstring.DefaultColors, Disable: true} + + tests := []struct { + State *states.State + Schemas *tofu.Schemas + Want string + }{ + { + State: &states.State{}, + Schemas: &tofu.Schemas{}, + Want: "The state file is empty. No resources are represented.\n", + }, + { + State: basicState(t), + Schemas: testSchemas(), + Want: basicStateOutput, + }, + { + State: nestedState(t), + Schemas: testSchemas(), + Want: nestedStateOutput, + }, + { + State: deposedState(t), + Schemas: testSchemas(), + Want: deposedNestedStateOutput, + }, + { + State: onlyDeposedState(t), + Schemas: testSchemas(), + Want: onlyDeposedOutput, + }, + { + State: stateWithMoreOutputs(t), + Schemas: testSchemas(), + Want: stateWithMoreOutputsOutput, + }, + } + + for i, tt := range tests { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + + root, outputs, err := jsonstate.MarshalForRenderer(&statefile.File{ + State: tt.State, + }, tt.Schemas) + + if err != nil { + t.Errorf("found err: %v", err) + return + } + + streams, done := terminal.StreamsForTesting(t) + renderer := Renderer{ + Colorize: color, + Streams: streams, + } + + renderer.RenderHumanState(State{ + StateFormatVersion: jsonstate.FormatVersion, + RootModule: root, + RootModuleOutputs: outputs, + ProviderFormatVersion: jsonprovider.FormatVersion, + ProviderSchemas: jsonprovider.MarshalForRenderer(tt.Schemas), + }) + + result := done(t).All() + if diff := cmp.Diff(result, tt.Want); diff != "" { + t.Errorf("wrong output\nexpected:\n%s\nactual:\n%s\ndiff:\n%s\n", tt.Want, result, diff) + } + }) + } +} + +func testProvider() *tofu.MockProvider { + p := new(tofu.MockProvider) + p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { + return providers.ReadResourceResponse{NewState: req.PriorState} + } + + p.GetProviderSchemaResponse = testProviderSchema() + + return p +} + +func testProviderSchema() *providers.GetProviderSchemaResponse { + return &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "region": {Type: cty.String, Optional: true}, + }, + }, + }, + ResourceTypes: map[string]providers.Schema{ + "test_resource": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "foo": {Type: cty.String, Optional: true}, + "woozles": {Type: cty.String, Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "nested": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "compute": {Type: cty.String, Optional: true}, + "value": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + }, + }, + }, + DataSources: map[string]providers.Schema{ + "test_data_source": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "compute": {Type: cty.String, Optional: true}, + "value": {Type: cty.String, Computed: true}, + }, + }, + }, + }, + } +} + +func testSchemas() *tofu.Schemas { + provider := testProvider() + return &tofu.Schemas{ + Providers: map[addrs.Provider]providers.ProviderSchema{ + addrs.NewDefaultProvider("test"): provider.GetProviderSchema(), + }, + } +} + +const basicStateOutput = `# data.test_data_source.data: +data "test_data_source" "data" { + compute = "sure" +} + +# test_resource.baz[0]: +resource "test_resource" "baz" { + woozles = "confuzles" +} + + +Outputs: + +bar = "bar value" +` + +const nestedStateOutput = `# test_resource.baz[0]: +resource "test_resource" "baz" { + woozles = "confuzles" + + nested { + value = "42" + } +} +` + +const deposedNestedStateOutput = `# test_resource.baz[0]: +resource "test_resource" "baz" { + woozles = "confuzles" + + nested { + value = "42" + } +} + +# test_resource.baz[0]: (deposed object 1234) +resource "test_resource" "baz" { + woozles = "confuzles" + + nested { + value = "42" + } +} +` + +const onlyDeposedOutput = `# test_resource.baz[0]: (deposed object 1234) +resource "test_resource" "baz" { + woozles = "confuzles" + + nested { + value = "42" + } +} + +# test_resource.baz[0]: (deposed object 5678) +resource "test_resource" "baz" { + woozles = "confuzles" + + nested { + value = "42" + } +} +` + +const stateWithMoreOutputsOutput = `# test_resource.baz[0]: +resource "test_resource" "baz" { + woozles = "confuzles" +} + + +Outputs: + +bool_var = true +int_var = 42 +map_var = { + "first" = "foo" + "second" = "bar" +} +sensitive_var = (sensitive value) +string_var = "string value" +` + +func basicState(t *testing.T) *states.State { + state := states.NewState() + + rootModule := state.RootModule() + if rootModule == nil { + t.Errorf("root module is nil; want valid object") + } + + rootModule.SetLocalValue("foo", cty.StringVal("foo value")) + rootModule.SetOutputValue("bar", cty.StringVal("bar value"), false) + rootModule.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "baz", + }.Instance(addrs.IntKey(0)), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + SchemaVersion: 0, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + rootModule.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.DataResourceMode, + Type: "test_data_source", + Name: "data", + }.Instance(addrs.NoKey), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + SchemaVersion: 0, + AttrsJSON: []byte(`{"compute":"sure"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + return state +} + +func stateWithMoreOutputs(t *testing.T) *states.State { + state := states.NewState() + + rootModule := state.RootModule() + if rootModule == nil { + t.Errorf("root module is nil; want valid object") + } + + rootModule.SetOutputValue("string_var", cty.StringVal("string value"), false) + rootModule.SetOutputValue("int_var", cty.NumberIntVal(42), false) + rootModule.SetOutputValue("bool_var", cty.BoolVal(true), false) + rootModule.SetOutputValue("sensitive_var", cty.StringVal("secret!!!"), true) + rootModule.SetOutputValue("map_var", cty.MapVal(map[string]cty.Value{ + "first": cty.StringVal("foo"), + "second": cty.StringVal("bar"), + }), false) + + rootModule.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "baz", + }.Instance(addrs.IntKey(0)), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + SchemaVersion: 0, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + return state +} + +func nestedState(t *testing.T) *states.State { + state := states.NewState() + + rootModule := state.RootModule() + if rootModule == nil { + t.Errorf("root module is nil; want valid object") + } + + rootModule.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "baz", + }.Instance(addrs.IntKey(0)), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + SchemaVersion: 0, + AttrsJSON: []byte(`{"woozles":"confuzles","nested": [{"value": "42"}]}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + return state +} + +func deposedState(t *testing.T) *states.State { + state := nestedState(t) + rootModule := state.RootModule() + rootModule.SetResourceInstanceDeposed( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "baz", + }.Instance(addrs.IntKey(0)), + states.DeposedKey("1234"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + SchemaVersion: 0, + AttrsJSON: []byte(`{"woozles":"confuzles","nested": [{"value": "42"}]}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + return state +} + +// replicate a corrupt resource where only a deposed exists +func onlyDeposedState(t *testing.T) *states.State { + state := states.NewState() + + rootModule := state.RootModule() + if rootModule == nil { + t.Errorf("root module is nil; want valid object") + } + + rootModule.SetResourceInstanceDeposed( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "baz", + }.Instance(addrs.IntKey(0)), + states.DeposedKey("1234"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + SchemaVersion: 0, + AttrsJSON: []byte(`{"woozles":"confuzles","nested": [{"value": "42"}]}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + rootModule.SetResourceInstanceDeposed( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "baz", + }.Instance(addrs.IntKey(0)), + states.DeposedKey("5678"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + SchemaVersion: 0, + AttrsJSON: []byte(`{"woozles":"confuzles","nested": [{"value": "42"}]}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + return state +} diff --git a/pkg/command/jsonformat/structured/attribute_path/matcher.go b/pkg/command/jsonformat/structured/attribute_path/matcher.go new file mode 100644 index 00000000000..90393556f4b --- /dev/null +++ b/pkg/command/jsonformat/structured/attribute_path/matcher.go @@ -0,0 +1,234 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package attribute_path + +import ( + "encoding/json" + "fmt" + "strconv" +) + +// Matcher provides an interface for stepping through changes following an +// attribute path. +// +// GetChildWithKey and GetChildWithIndex will check if any of the internal paths +// match the provided key or index, and return a new Matcher that will match +// that children or potentially it's children. +// +// The caller of the above functions is required to know whether the next value +// in the path is a list type or an object type and call the relevant function, +// otherwise these functions will crash/panic. +// +// The Matches function returns true if the paths you have traversed until now +// ends. +type Matcher interface { + // Matches returns true if we have reached the end of a path and found an + // exact match. + Matches() bool + + // MatchesPartial returns true if the current attribute is part of a path + // but not necessarily at the end of the path. + MatchesPartial() bool + + GetChildWithKey(key string) Matcher + GetChildWithIndex(index int) Matcher +} + +// Parse accepts a json.RawMessage and outputs a formatted Matcher object. +// +// Parse expects the message to be a JSON array of JSON arrays containing +// strings and floats. This function happily accepts a null input representing +// none of the changes in this resource are causing a replacement. The propagate +// argument tells the matcher to propagate any matches to the matched attributes +// children. +// +// In general, this function is designed to accept messages that have been +// produced by the lossy cty.Paths conversion functions within the jsonplan +// package. There is nothing particularly special about that conversion process +// though, it just produces the nested JSON arrays described above. +func Parse(message json.RawMessage, propagate bool) Matcher { + matcher := &PathMatcher{ + Propagate: propagate, + } + if message == nil { + return matcher + } + + if err := json.Unmarshal(message, &matcher.Paths); err != nil { + panic("failed to unmarshal attribute paths: " + err.Error()) + } + + return matcher +} + +// Empty returns an empty PathMatcher that will by default match nothing. +// +// We give direct access to the PathMatcher struct so a matcher can be built +// in parts with the Append and AppendSingle functions. +func Empty(propagate bool) *PathMatcher { + return &PathMatcher{ + Propagate: propagate, + } +} + +// Append accepts an existing PathMatcher and returns a new one that attaches +// all the paths from message with the existing paths. +// +// The new PathMatcher is created fresh, and the existing one is unchanged. +func Append(matcher *PathMatcher, message json.RawMessage) *PathMatcher { + var values [][]interface{} + if err := json.Unmarshal(message, &values); err != nil { + panic("failed to unmarshal attribute paths: " + err.Error()) + } + + return &PathMatcher{ + Propagate: matcher.Propagate, + Paths: append(matcher.Paths, values...), + } +} + +// AppendSingle accepts an existing PathMatcher and returns a new one that +// attaches the single path from message with the existing paths. +// +// The new PathMatcher is created fresh, and the existing one is unchanged. +func AppendSingle(matcher *PathMatcher, message json.RawMessage) *PathMatcher { + var values []interface{} + if err := json.Unmarshal(message, &values); err != nil { + panic("failed to unmarshal attribute paths: " + err.Error()) + } + + return &PathMatcher{ + Propagate: matcher.Propagate, + Paths: append(matcher.Paths, values), + } +} + +// PathMatcher contains a slice of paths that represent paths through the values +// to relevant/tracked attributes. +type PathMatcher struct { + // We represent our internal paths as a [][]interface{} as the cty.Paths + // conversion process is lossy. Since the type information is lost there + // is no (easy) way to reproduce the original cty.Paths object. Instead, + // we simply rely on the external callers to know the type information and + // call the correct GetChild function. + Paths [][]interface{} + + // Propagate tells the matcher that it should propagate any matches it finds + // onto the children of that match. + Propagate bool +} + +func (p *PathMatcher) Matches() bool { + for _, path := range p.Paths { + if len(path) == 0 { + return true + } + } + return false +} + +func (p *PathMatcher) MatchesPartial() bool { + return len(p.Paths) > 0 +} + +func (p *PathMatcher) GetChildWithKey(key string) Matcher { + child := &PathMatcher{ + Propagate: p.Propagate, + } + for _, path := range p.Paths { + if len(path) == 0 { + // This means that the current value matched, but not necessarily + // it's child. + + if p.Propagate { + // If propagate is true, then our child match our matches + child.Paths = append(child.Paths, path) + } + + // If not we would simply drop this path from our set of paths but + // either way we just continue. + continue + } + + if path[0].(string) == key { + child.Paths = append(child.Paths, path[1:]) + } + } + return child +} + +func (p *PathMatcher) GetChildWithIndex(index int) Matcher { + child := &PathMatcher{ + Propagate: p.Propagate, + } + for _, path := range p.Paths { + if len(path) == 0 { + // This means that the current value matched, but not necessarily + // it's child. + + if p.Propagate { + // If propagate is true, then our child match our matches + child.Paths = append(child.Paths, path) + } + + // If not we would simply drop this path from our set of paths but + // either way we just continue. + continue + } + + // OpenTofu actually allows user to provide strings into indexes as + // long as the string can be interpreted into a number. For example, the + // following are equivalent and we need to support them. + // - test_resource.resource.list[0].attribute + // - test_resource.resource.list["0"].attribute + // + // Note, that OpenTofu will raise a validation error if the string + // can't be coerced into a number, so we will panic here if anything + // goes wrong safe in the knowledge the validation should stop this from + // happening. + + switch val := path[0].(type) { + case float64: + if int(path[0].(float64)) == index { + child.Paths = append(child.Paths, path[1:]) + } + case string: + f, err := strconv.ParseFloat(val, 64) + if err != nil { + panic(fmt.Errorf("found invalid type within path (%v:%T), the validation shouldn't have allowed this to happen; this is a bug in OpenTofu, please report it", val, val)) + } + if int(f) == index { + child.Paths = append(child.Paths, path[1:]) + } + default: + panic(fmt.Errorf("found invalid type within path (%v:%T), the validation shouldn't have allowed this to happen; this is a bug in OpenTofu, please report it", val, val)) + } + } + return child +} + +// AlwaysMatcher returns a matcher that will always match all paths. +func AlwaysMatcher() Matcher { + return &alwaysMatcher{} +} + +type alwaysMatcher struct{} + +func (a *alwaysMatcher) Matches() bool { + return true +} + +func (a *alwaysMatcher) MatchesPartial() bool { + return true +} + +func (a *alwaysMatcher) GetChildWithKey(_ string) Matcher { + return a +} + +func (a *alwaysMatcher) GetChildWithIndex(_ int) Matcher { + return a +} diff --git a/pkg/command/jsonformat/structured/attribute_path/matcher_test.go b/pkg/command/jsonformat/structured/attribute_path/matcher_test.go new file mode 100644 index 00000000000..c2305efbd36 --- /dev/null +++ b/pkg/command/jsonformat/structured/attribute_path/matcher_test.go @@ -0,0 +1,258 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package attribute_path + +import "testing" + +func TestPathMatcher_FollowsPath(t *testing.T) { + var matcher Matcher + + matcher = &PathMatcher{ + Paths: [][]interface{}{ + { + float64(0), + "key", + float64(0), + }, + }, + } + + if matcher.Matches() { + t.Errorf("should not have exact matched at base level") + } + if !matcher.MatchesPartial() { + t.Errorf("should have partial matched at base level") + } + + matcher = matcher.GetChildWithIndex(0) + + if matcher.Matches() { + t.Errorf("should not have exact matched at first level") + } + if !matcher.MatchesPartial() { + t.Errorf("should have partial matched at first level") + } + + matcher = matcher.GetChildWithKey("key") + + if matcher.Matches() { + t.Errorf("should not have exact matched at second level") + } + if !matcher.MatchesPartial() { + t.Errorf("should have partial matched at second level") + } + + matcher = matcher.GetChildWithIndex(0) + + if !matcher.Matches() { + t.Errorf("should have exact matched at leaf level") + } + if !matcher.MatchesPartial() { + t.Errorf("should have partial matched at leaf level") + } +} +func TestPathMatcher_Propagates(t *testing.T) { + var matcher Matcher + + matcher = &PathMatcher{ + Paths: [][]interface{}{ + { + float64(0), + "key", + }, + }, + Propagate: true, + } + + if matcher.Matches() { + t.Errorf("should not have exact matched at base level") + } + if !matcher.MatchesPartial() { + t.Errorf("should have partial matched at base level") + } + + matcher = matcher.GetChildWithIndex(0) + + if matcher.Matches() { + t.Errorf("should not have exact matched at first level") + } + if !matcher.MatchesPartial() { + t.Errorf("should have partial matched at first level") + } + + matcher = matcher.GetChildWithKey("key") + + if !matcher.Matches() { + t.Errorf("should have exact matched at second level") + } + if !matcher.MatchesPartial() { + t.Errorf("should have partial matched at second level") + } + + matcher = matcher.GetChildWithIndex(0) + + if !matcher.Matches() { + t.Errorf("should have exact matched at leaf level") + } + if !matcher.MatchesPartial() { + t.Errorf("should have partial matched at leaf level") + } +} +func TestPathMatcher_DoesNotPropagate(t *testing.T) { + var matcher Matcher + + matcher = &PathMatcher{ + Paths: [][]interface{}{ + { + float64(0), + "key", + }, + }, + } + + if matcher.Matches() { + t.Errorf("should not have exact matched at base level") + } + if !matcher.MatchesPartial() { + t.Errorf("should have partial matched at base level") + } + + matcher = matcher.GetChildWithIndex(0) + + if matcher.Matches() { + t.Errorf("should not have exact matched at first level") + } + if !matcher.MatchesPartial() { + t.Errorf("should have partial matched at first level") + } + + matcher = matcher.GetChildWithKey("key") + + if !matcher.Matches() { + t.Errorf("should have exact matched at second level") + } + if !matcher.MatchesPartial() { + t.Errorf("should have partial matched at second level") + } + + matcher = matcher.GetChildWithIndex(0) + + if matcher.Matches() { + t.Errorf("should not have exact matched at leaf level") + } + if matcher.MatchesPartial() { + t.Errorf("should not have partial matched at leaf level") + } +} + +func TestPathMatcher_BreaksPath(t *testing.T) { + var matcher Matcher + + matcher = &PathMatcher{ + Paths: [][]interface{}{ + { + float64(0), + "key", + float64(0), + }, + }, + } + + if matcher.Matches() { + t.Errorf("should not have exact matched at base level") + } + if !matcher.MatchesPartial() { + t.Errorf("should have partial matched at base level") + } + + matcher = matcher.GetChildWithIndex(0) + + if matcher.Matches() { + t.Errorf("should not have exact matched at first level") + } + if !matcher.MatchesPartial() { + t.Errorf("should have partial matched at first level") + } + + matcher = matcher.GetChildWithKey("invalid") + + if matcher.Matches() { + t.Errorf("should not have exact matched at second level") + } + if matcher.MatchesPartial() { + t.Errorf("should not have partial matched at second level") + + } +} + +func TestPathMatcher_MultiplePaths(t *testing.T) { + var matcher Matcher + + matcher = &PathMatcher{ + Paths: [][]interface{}{ + { + float64(0), + "key", + float64(0), + }, + { + float64(0), + "key", + float64(1), + }, + }, + } + + if matcher.Matches() { + t.Errorf("should not have exact matched at base level") + } + if !matcher.MatchesPartial() { + t.Errorf("should have partial matched at base level") + } + + matcher = matcher.GetChildWithIndex(0) + + if matcher.Matches() { + t.Errorf("should not have exact matched at first level") + } + if !matcher.MatchesPartial() { + t.Errorf("should have partial matched at first level") + } + + matcher = matcher.GetChildWithKey("key") + + if matcher.Matches() { + t.Errorf("should not have exact matched at second level") + } + if !matcher.MatchesPartial() { + t.Errorf("should have partial matched at second level") + } + + validZero := matcher.GetChildWithIndex(0) + validOne := matcher.GetChildWithIndex(1) + invalid := matcher.GetChildWithIndex(2) + + if !validZero.Matches() { + t.Errorf("should have exact matched at leaf level") + } + if !validZero.MatchesPartial() { + t.Errorf("should have partial matched at leaf level") + } + + if !validOne.Matches() { + t.Errorf("should have exact matched at leaf level") + } + if !validOne.MatchesPartial() { + t.Errorf("should have partial matched at leaf level") + } + + if invalid.Matches() { + t.Errorf("should not have exact matched at leaf level") + } + if invalid.MatchesPartial() { + t.Errorf("should not have partial matched at leaf level") + } +} diff --git a/pkg/command/jsonformat/structured/change.go b/pkg/command/jsonformat/structured/change.go new file mode 100644 index 00000000000..8f2665516d7 --- /dev/null +++ b/pkg/command/jsonformat/structured/change.go @@ -0,0 +1,285 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package structured + +import ( + "bytes" + "encoding/json" + "reflect" + + "github.com/kubegems/opentofu/pkg/command/jsonformat/structured/attribute_path" + "github.com/kubegems/opentofu/pkg/command/jsonplan" + "github.com/kubegems/opentofu/pkg/command/jsonstate" + viewsjson "github.com/kubegems/opentofu/pkg/command/views/json" + "github.com/kubegems/opentofu/pkg/plans" +) + +// Change contains the unmarshalled generic interface{} types that are output by +// the JSON functions in the various json packages (such as jsonplan and +// jsonprovider). +// +// A Change can be converted into a computed.Diff, ready for rendering, with the +// ComputeDiffForAttribute, ComputeDiffForOutput, and ComputeDiffForBlock +// functions. +// +// The Before and After fields are actually go-cty values, but we cannot convert +// them directly because of the Terraform Cloud redacted endpoint. The redacted +// endpoint turns sensitive values into strings regardless of their types. +// Because of this, we cannot just do a direct conversion using the ctyjson +// package. We would have to iterate through the schema first, find the +// sensitive values and their mapped types, update the types inside the schema +// to strings, and then go back and do the overall conversion. This isn't +// including any of the more complicated parts around what happens if something +// was sensitive before and isn't sensitive after or vice versa. This would mean +// the type would need to change between the before and after value. It is in +// fact just easier to iterate through the values as generic JSON interfaces. +type Change struct { + + // BeforeExplicit matches AfterExplicit except references the Before value. + BeforeExplicit bool + + // AfterExplicit refers to whether the After value is explicit or + // implicit. It is explicit if it has been specified by the user, and + // implicit if it has been set as a consequence of other changes. + // + // For example, explicitly setting a value to null in a list should result + // in After being null and AfterExplicit being true. In comparison, + // removing an element from a list should also result in After being null + // and AfterExplicit being false. Without the explicit information our + // functions would not be able to tell the difference between these two + // cases. + AfterExplicit bool + + // Before contains the value before the proposed change. + // + // The type of the value should be informed by the schema and cast + // appropriately when needed. + Before interface{} + + // After contains the value after the proposed change. + // + // The type of the value should be informed by the schema and cast + // appropriately when needed. + After interface{} + + // Unknown describes whether the After value is known or unknown at the time + // of the plan. In practice, this means the after value should be rendered + // simply as `(known after apply)`. + // + // The concrete value could be a boolean describing whether the entirety of + // the After value is unknown, or it could be a list or a map depending on + // the schema describing whether specific elements or attributes within the + // value are unknown. + Unknown interface{} + + // BeforeSensitive matches Unknown, but references whether the Before value + // is sensitive. + BeforeSensitive interface{} + + // AfterSensitive matches Unknown, but references whether the After value is + // sensitive. + AfterSensitive interface{} + + // ReplacePaths contains a set of paths that point to attributes/elements + // that are causing the overall resource to be replaced rather than simply + // updated. + ReplacePaths attribute_path.Matcher + + // RelevantAttributes contains a set of paths that point attributes/elements + // that we should display. Any element/attribute not matched by this Matcher + // should be skipped. + RelevantAttributes attribute_path.Matcher +} + +// FromJsonChange unmarshals the raw []byte values in the jsonplan.Change +// structs into generic interface{} types that can be reasoned about. +func FromJsonChange(change jsonplan.Change, relevantAttributes attribute_path.Matcher) Change { + return Change{ + Before: unmarshalGeneric(change.Before), + After: unmarshalGeneric(change.After), + Unknown: unmarshalGeneric(change.AfterUnknown), + BeforeSensitive: unmarshalGeneric(change.BeforeSensitive), + AfterSensitive: unmarshalGeneric(change.AfterSensitive), + ReplacePaths: attribute_path.Parse(change.ReplacePaths, false), + RelevantAttributes: relevantAttributes, + } +} + +// FromJsonResource unmarshals the raw values in the jsonstate.Resource structs +// into generic interface{} types that can be reasoned about. +func FromJsonResource(resource jsonstate.Resource) Change { + return Change{ + // We model resource formatting as NoOps. + Before: unwrapAttributeValues(resource.AttributeValues), + After: unwrapAttributeValues(resource.AttributeValues), + + // We have some sensitive values, but we don't have any unknown values. + Unknown: false, + BeforeSensitive: unmarshalGeneric(resource.SensitiveValues), + AfterSensitive: unmarshalGeneric(resource.SensitiveValues), + + // We don't display replacement data for resources, and all attributes + // are relevant. + ReplacePaths: attribute_path.Empty(false), + RelevantAttributes: attribute_path.AlwaysMatcher(), + } +} + +// FromJsonOutput unmarshals the raw values in the jsonstate.Output structs into +// generic interface{} types that can be reasoned about. +func FromJsonOutput(output jsonstate.Output) Change { + return Change{ + // We model resource formatting as NoOps. + Before: unmarshalGeneric(output.Value), + After: unmarshalGeneric(output.Value), + + // We have some sensitive values, but we don't have any unknown values. + Unknown: false, + BeforeSensitive: output.Sensitive, + AfterSensitive: output.Sensitive, + + // We don't display replacement data for resources, and all attributes + // are relevant. + ReplacePaths: attribute_path.Empty(false), + RelevantAttributes: attribute_path.AlwaysMatcher(), + } +} + +// FromJsonViewsOutput unmarshals the raw values in the viewsjson.Output structs into +// generic interface{} types that can be reasoned about. +func FromJsonViewsOutput(output viewsjson.Output) Change { + return Change{ + // We model resource formatting as NoOps. + Before: unmarshalGeneric(output.Value), + After: unmarshalGeneric(output.Value), + + // We have some sensitive values, but we don't have any unknown values. + Unknown: false, + BeforeSensitive: output.Sensitive, + AfterSensitive: output.Sensitive, + + // We don't display replacement data for resources, and all attributes + // are relevant. + ReplacePaths: attribute_path.Empty(false), + RelevantAttributes: attribute_path.AlwaysMatcher(), + } +} + +// CalculateAction does a very simple analysis to make the best guess at the +// action this change describes. For complex types such as objects, maps, lists, +// or sets it is likely more efficient to work out the action directly instead +// of relying on this function. +func (change Change) CalculateAction() plans.Action { + if (change.Before == nil && !change.BeforeExplicit) && (change.After != nil || change.AfterExplicit) { + return plans.Create + } + if (change.After == nil && !change.AfterExplicit) && (change.Before != nil || change.BeforeExplicit) { + return plans.Delete + } + + if reflect.DeepEqual(change.Before, change.After) && change.AfterExplicit == change.BeforeExplicit && change.IsAfterSensitive() == change.IsBeforeSensitive() { + return plans.NoOp + } + + return plans.Update +} + +// GetDefaultActionForIteration is used to guess what the change could be for +// complex attributes (collections and objects) and blocks. +// +// You can't really tell the difference between a NoOp and an Update just by +// looking at the attribute itself as you need to inspect the children. +// +// This function returns a Delete or a Create action if the before or after +// values were null, and returns a NoOp for all other cases. It should be used +// in conjunction with compareActions to calculate the actual action based on +// the actions of the children. +func (change Change) GetDefaultActionForIteration() plans.Action { + if change.Before == nil && change.After == nil { + return plans.NoOp + } + + if change.Before == nil { + return plans.Create + } + if change.After == nil { + return plans.Delete + } + return plans.NoOp +} + +// AsNoOp returns the current change as if it is a NoOp operation. +// +// Basically it replaces all the after values with the before values. +func (change Change) AsNoOp() Change { + return Change{ + BeforeExplicit: change.BeforeExplicit, + AfterExplicit: change.BeforeExplicit, + Before: change.Before, + After: change.Before, + Unknown: false, + BeforeSensitive: change.BeforeSensitive, + AfterSensitive: change.BeforeSensitive, + ReplacePaths: change.ReplacePaths, + RelevantAttributes: change.RelevantAttributes, + } +} + +// AsDelete returns the current change as if it is a Delete operation. +// +// Basically it replaces all the after values with nil or false. +func (change Change) AsDelete() Change { + return Change{ + BeforeExplicit: change.BeforeExplicit, + AfterExplicit: false, + Before: change.Before, + After: nil, + Unknown: nil, + BeforeSensitive: change.BeforeSensitive, + AfterSensitive: nil, + ReplacePaths: change.ReplacePaths, + RelevantAttributes: change.RelevantAttributes, + } +} + +// AsCreate returns the current change as if it is a Create operation. +// +// Basically it replaces all the before values with nil or false. +func (change Change) AsCreate() Change { + return Change{ + BeforeExplicit: false, + AfterExplicit: change.AfterExplicit, + Before: nil, + After: change.After, + Unknown: change.Unknown, + BeforeSensitive: nil, + AfterSensitive: change.AfterSensitive, + ReplacePaths: change.ReplacePaths, + RelevantAttributes: change.RelevantAttributes, + } +} + +func unmarshalGeneric(raw json.RawMessage) interface{} { + if raw == nil { + return nil + } + + decoder := json.NewDecoder(bytes.NewBuffer(raw)) + decoder.UseNumber() + var out interface{} + if err := decoder.Decode(&out); err != nil { + panic("unrecognized json type: " + err.Error()) + } + return out +} + +func unwrapAttributeValues(values jsonstate.AttributeValues) map[string]interface{} { + out := make(map[string]interface{}) + for key, value := range values { + out[key] = unmarshalGeneric(value) + } + return out +} diff --git a/pkg/command/jsonformat/structured/doc.go b/pkg/command/jsonformat/structured/doc.go new file mode 100644 index 00000000000..36830c0e46b --- /dev/null +++ b/pkg/command/jsonformat/structured/doc.go @@ -0,0 +1,11 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package structured contains the structured representation of the JSON changes +// returned by the jsonplan package. +// +// Placing these in a dedicated package allows for greater reuse across the +// various type of renderers. +package structured diff --git a/pkg/command/jsonformat/structured/map.go b/pkg/command/jsonformat/structured/map.go new file mode 100644 index 00000000000..43ef37885a1 --- /dev/null +++ b/pkg/command/jsonformat/structured/map.go @@ -0,0 +1,165 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package structured + +import ( + "github.com/kubegems/opentofu/pkg/command/jsonformat/structured/attribute_path" +) + +// ChangeMap is a Change that represents a Map or an Object type, and has +// converted the relevant interfaces into maps for easier access. +type ChangeMap struct { + // Before contains the value before the proposed change. + Before map[string]interface{} + + // After contains the value after the proposed change. + After map[string]interface{} + + // Unknown contains the unknown status of any elements/attributes of this + // map/object. + Unknown map[string]interface{} + + // BeforeSensitive contains the before sensitive status of any + // elements/attributes of this map/object. + BeforeSensitive map[string]interface{} + + // AfterSensitive contains the after sensitive status of any + // elements/attributes of this map/object. + AfterSensitive map[string]interface{} + + // ReplacePaths matches the same attributes in Change exactly. + ReplacePaths attribute_path.Matcher + + // RelevantAttributes matches the same attributes in Change exactly. + RelevantAttributes attribute_path.Matcher +} + +// AsMap converts the Change into an object or map representation by converting +// the internal Before, After, Unknown, BeforeSensitive, and AfterSensitive +// data structures into generic maps. +func (change Change) AsMap() ChangeMap { + return ChangeMap{ + Before: genericToMap(change.Before), + After: genericToMap(change.After), + Unknown: genericToMap(change.Unknown), + BeforeSensitive: genericToMap(change.BeforeSensitive), + AfterSensitive: genericToMap(change.AfterSensitive), + ReplacePaths: change.ReplacePaths, + RelevantAttributes: change.RelevantAttributes, + } +} + +// GetChild safely packages up a Change object for the given child, handling +// all the cases where the data might be null or a static boolean. +func (m ChangeMap) GetChild(key string) Change { + before, beforeExplicit := getFromGenericMap(m.Before, key) + after, afterExplicit := getFromGenericMap(m.After, key) + unknown, _ := getFromGenericMap(m.Unknown, key) + beforeSensitive, _ := getFromGenericMap(m.BeforeSensitive, key) + afterSensitive, _ := getFromGenericMap(m.AfterSensitive, key) + + return Change{ + BeforeExplicit: beforeExplicit, + AfterExplicit: afterExplicit, + Before: before, + After: after, + Unknown: unknown, + BeforeSensitive: beforeSensitive, + AfterSensitive: afterSensitive, + ReplacePaths: m.ReplacePaths.GetChildWithKey(key), + RelevantAttributes: m.RelevantAttributes.GetChildWithKey(key), + } +} + +// ExplicitKeys returns the keys in the Before and After, as opposed to AllKeys +// which also includes keys from the additional meta structures (like the +// sensitive and unknown values). +// +// This function is useful for processing nested attributes and repeated blocks +// where the unknown and sensitive structs contain information about the actual +// attributes, while the before and after structs hold the actual nested values. +func (m ChangeMap) ExplicitKeys() []string { + keys := make(map[string]bool) + for before := range m.Before { + if _, ok := keys[before]; ok { + continue + } + keys[before] = true + } + for after := range m.After { + if _, ok := keys[after]; ok { + continue + } + keys[after] = true + } + + var dedupedKeys []string + for key := range keys { + dedupedKeys = append(dedupedKeys, key) + } + return dedupedKeys +} + +// AllKeys returns all the possible keys for this map. The keys for the map are +// potentially hidden and spread across multiple internal data structures and +// so this function conveniently packages them up. +func (m ChangeMap) AllKeys() []string { + keys := make(map[string]bool) + for before := range m.Before { + if _, ok := keys[before]; ok { + continue + } + keys[before] = true + } + for after := range m.After { + if _, ok := keys[after]; ok { + continue + } + keys[after] = true + } + for unknown := range m.Unknown { + if _, ok := keys[unknown]; ok { + continue + } + keys[unknown] = true + } + for sensitive := range m.AfterSensitive { + if _, ok := keys[sensitive]; ok { + continue + } + keys[sensitive] = true + } + for sensitive := range m.BeforeSensitive { + if _, ok := keys[sensitive]; ok { + continue + } + keys[sensitive] = true + } + + var dedupedKeys []string + for key := range keys { + dedupedKeys = append(dedupedKeys, key) + } + return dedupedKeys +} + +func getFromGenericMap(generic map[string]interface{}, key string) (interface{}, bool) { + if generic == nil { + return nil, false + } + + if child, ok := generic[key]; ok { + return child, ok + } + return nil, false +} + +func genericToMap(generic interface{}) map[string]interface{} { + if concrete, ok := generic.(map[string]interface{}); ok { + return concrete + } + return nil +} diff --git a/pkg/command/jsonformat/structured/sensitive.go b/pkg/command/jsonformat/structured/sensitive.go new file mode 100644 index 00000000000..24f5a809959 --- /dev/null +++ b/pkg/command/jsonformat/structured/sensitive.go @@ -0,0 +1,94 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package structured + +import ( + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed" + "github.com/kubegems/opentofu/pkg/plans" +) + +type ProcessSensitiveInner func(change Change) computed.Diff +type CreateSensitiveDiff func(inner computed.Diff, beforeSensitive, afterSensitive bool, action plans.Action) computed.Diff + +func (change Change) IsBeforeSensitive() bool { + if sensitive, ok := change.BeforeSensitive.(bool); ok { + return sensitive + } + return false +} + +func (change Change) IsAfterSensitive() bool { + if sensitive, ok := change.AfterSensitive.(bool); ok { + return sensitive + } + return false +} + +// CheckForSensitive is a helper function that handles all common functionality +// for processing a sensitive value. +// +// It returns the computed sensitive diff and true if this value was sensitive +// and needs to be rendered as such, otherwise it returns the second return +// value as false and the first value can be discarded. +// +// The actual processing of sensitive values happens within the +// ProcessSensitiveInner and CreateSensitiveDiff functions. Callers should +// implement these functions as appropriate when using this function. +// +// The ProcessSensitiveInner function should simply return a computed.Diff for +// the provided Change. The provided Change will be the same as the original +// change but with the sensitive metadata removed. The new inner diff is then +// passed into the actual CreateSensitiveDiff function which should return the +// actual sensitive diff. +// +// We include the inner change into the sensitive diff as a way to let the +// sensitive renderer have as much information as possible, while still letting +// it do the actual rendering. +func (change Change) CheckForSensitive(processInner ProcessSensitiveInner, createDiff CreateSensitiveDiff) (computed.Diff, bool) { + beforeSensitive := change.IsBeforeSensitive() + afterSensitive := change.IsAfterSensitive() + + if !beforeSensitive && !afterSensitive { + return computed.Diff{}, false + } + + // We are still going to give the change the contents of the actual change. + // So we create a new Change with everything matching the current value, + // except for the sensitivity. + // + // The change can choose what to do with this information, in most cases + // it will just be ignored in favour of printing `(sensitive value)`. + + value := Change{ + BeforeExplicit: change.BeforeExplicit, + AfterExplicit: change.AfterExplicit, + Before: change.Before, + After: change.After, + Unknown: change.Unknown, + BeforeSensitive: false, + AfterSensitive: false, + ReplacePaths: change.ReplacePaths, + RelevantAttributes: change.RelevantAttributes, + } + + inner := processInner(value) + + action := inner.Action + sensitiveStatusChanged := beforeSensitive != afterSensitive + + // nullNoOp is a stronger NoOp, where not only is there no change happening + // but the before and after values are not explicitly set and are both + // null. This will override even the sensitive state changing. + nullNoOp := change.Before == nil && !change.BeforeExplicit && change.After == nil && !change.AfterExplicit + + if action == plans.NoOp && sensitiveStatusChanged && !nullNoOp { + // Let's override this, since it means the sensitive status has changed + // rather than the actual content of the value. + action = plans.Update + } + + return createDiff(inner, beforeSensitive, afterSensitive, action), true +} diff --git a/pkg/command/jsonformat/structured/slice.go b/pkg/command/jsonformat/structured/slice.go new file mode 100644 index 00000000000..415e1d99d40 --- /dev/null +++ b/pkg/command/jsonformat/structured/slice.go @@ -0,0 +1,96 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package structured + +import ( + "github.com/kubegems/opentofu/pkg/command/jsonformat/structured/attribute_path" +) + +// ChangeSlice is a Change that represents a Tuple, Set, or List type, and has +// converted the relevant interfaces into slices for easier access. +type ChangeSlice struct { + // Before contains the value before the proposed change. + Before []interface{} + + // After contains the value after the proposed change. + After []interface{} + + // Unknown contains the unknown status of any elements of this list/set. + Unknown []interface{} + + // BeforeSensitive contains the before sensitive status of any elements of + //this list/set. + BeforeSensitive []interface{} + + // AfterSensitive contains the after sensitive status of any elements of + //this list/set. + AfterSensitive []interface{} + + // ReplacePaths matches the same attributes in Change exactly. + ReplacePaths attribute_path.Matcher + + // RelevantAttributes matches the same attributes in Change exactly. + RelevantAttributes attribute_path.Matcher +} + +// AsSlice converts the Change into a slice representation by converting the +// internal Before, After, Unknown, BeforeSensitive, and AfterSensitive data +// structures into generic slices. +func (change Change) AsSlice() ChangeSlice { + return ChangeSlice{ + Before: genericToSlice(change.Before), + After: genericToSlice(change.After), + Unknown: genericToSlice(change.Unknown), + BeforeSensitive: genericToSlice(change.BeforeSensitive), + AfterSensitive: genericToSlice(change.AfterSensitive), + ReplacePaths: change.ReplacePaths, + RelevantAttributes: change.RelevantAttributes, + } +} + +// GetChild safely packages up a Change object for the given child, handling +// all the cases where the data might be null or a static boolean. +func (s ChangeSlice) GetChild(beforeIx, afterIx int) Change { + before, beforeExplicit := getFromGenericSlice(s.Before, beforeIx) + after, afterExplicit := getFromGenericSlice(s.After, afterIx) + unknown, _ := getFromGenericSlice(s.Unknown, afterIx) + beforeSensitive, _ := getFromGenericSlice(s.BeforeSensitive, beforeIx) + afterSensitive, _ := getFromGenericSlice(s.AfterSensitive, afterIx) + + mostRelevantIx := beforeIx + if beforeIx < 0 || beforeIx >= len(s.Before) { + mostRelevantIx = afterIx + } + + return Change{ + BeforeExplicit: beforeExplicit, + AfterExplicit: afterExplicit, + Before: before, + After: after, + Unknown: unknown, + BeforeSensitive: beforeSensitive, + AfterSensitive: afterSensitive, + ReplacePaths: s.ReplacePaths.GetChildWithIndex(mostRelevantIx), + RelevantAttributes: s.RelevantAttributes.GetChildWithIndex(mostRelevantIx), + } +} + +func getFromGenericSlice(generic []interface{}, ix int) (interface{}, bool) { + if generic == nil { + return nil, false + } + if ix < 0 || ix >= len(generic) { + return nil, false + } + return generic[ix], true +} + +func genericToSlice(generic interface{}) []interface{} { + if concrete, ok := generic.([]interface{}); ok { + return concrete + } + return nil +} diff --git a/pkg/command/jsonformat/structured/unknown.go b/pkg/command/jsonformat/structured/unknown.go new file mode 100644 index 00000000000..7536f28e056 --- /dev/null +++ b/pkg/command/jsonformat/structured/unknown.go @@ -0,0 +1,67 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package structured + +import ( + "github.com/kubegems/opentofu/pkg/command/jsonformat/computed" +) + +type ProcessUnknown func(current Change) computed.Diff +type ProcessUnknownWithBefore func(current Change, before Change) computed.Diff + +func (change Change) IsUnknown() bool { + if unknown, ok := change.Unknown.(bool); ok { + return unknown + } + return false +} + +// CheckForUnknown is a helper function that handles all common functionality +// for processing an unknown value. +// +// It returns the computed unknown diff and true if this value was unknown and +// needs to be rendered as such, otherwise it returns the second return value as +// false and the first return value should be discarded. +// +// The actual processing of unknown values happens in the ProcessUnknown and +// ProcessUnknownWithBefore functions. If a value is unknown and is being +// created, the ProcessUnknown function is called and the caller should decide +// how to create the unknown value. If a value is being updated the +// ProcessUnknownWithBefore function is called and the function provides the +// before value as if it is being deleted for the caller to handle. Note that +// values being deleted will never be marked as unknown so this case isn't +// handled. +// +// The childUnknown argument is meant to allow callers with extra information +// about the type being processed to provide a list of known children that might +// not be present in the before or after values. These values will be propagated +// as the unknown values in the before value should it be needed. +func (change Change) CheckForUnknown(childUnknown interface{}, process ProcessUnknown, processBefore ProcessUnknownWithBefore) (computed.Diff, bool) { + unknown := change.IsUnknown() + + if !unknown { + return computed.Diff{}, false + } + + // No matter what we do here, we want to treat the after value as explicit. + // This is because it is going to be null in the value, and we don't want + // the functions in this package to assume this means it has been deleted. + change.AfterExplicit = true + + if change.Before == nil { + return process(change), true + } + + // If we get here, then we have a before value. We're going to model a + // delete operation and our renderer later can render the overall change + // accurately. + before := change.AsDelete() + + // We also let our callers override the unknown values in any before, this + // is the renderers can display them as being computed instead of deleted. + before.Unknown = childUnknown + return processBefore(change, before), true +} diff --git a/pkg/command/jsonfunction/function.go b/pkg/command/jsonfunction/function.go new file mode 100644 index 00000000000..9da03e4198a --- /dev/null +++ b/pkg/command/jsonfunction/function.go @@ -0,0 +1,151 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package jsonfunction + +import ( + "encoding/json" + "fmt" + + "github.com/kubegems/opentofu/pkg/lang" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// FormatVersion represents the version of the json format and will be +// incremented for any change to this format that requires changes to a +// consuming parser. +const FormatVersion = "1.0" + +// functions is the top-level object returned when exporting function signatures +type functions struct { + FormatVersion string `json:"format_version"` + Signatures map[string]*FunctionSignature `json:"function_signatures,omitempty"` +} + +// FunctionSignature represents a function signature. +type FunctionSignature struct { + // Description is an optional human-readable description + // of the function + Description string `json:"description,omitempty"` + + // ReturnTypes is the ctyjson representation of the function's + // return types based on supplying all parameters using + // dynamic types. Functions can have dynamic return types. + ReturnType cty.Type `json:"return_type"` + + // Parameters describes the function's fixed positional parameters. + Parameters []*parameter `json:"parameters,omitempty"` + + // VariadicParameter describes the function's variadic + // parameters, if any are supported. + VariadicParameter *parameter `json:"variadic_parameter,omitempty"` +} + +func newFunctions() *functions { + signatures := make(map[string]*FunctionSignature) + return &functions{ + FormatVersion: FormatVersion, + Signatures: signatures, + } +} + +func Marshal(f map[string]function.Function) ([]byte, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + signatures := newFunctions() + + for name, v := range f { + if name == "can" || name == lang.CoreNamespace+"can" { + signatures.Signatures[name] = marshalCan(v) + } else if name == "try" || name == lang.CoreNamespace+"try" { + signatures.Signatures[name] = marshalTry(v) + } else { + signature, err := marshalFunction(v) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + fmt.Sprintf("Failed to serialize function %q", name), + err.Error(), + )) + } + signatures.Signatures[name] = signature + } + } + + if diags.HasErrors() { + return nil, diags + } + + ret, err := json.Marshal(signatures) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to serialize functions", + err.Error(), + )) + return nil, diags + } + return ret, nil +} + +func marshalFunction(f function.Function) (*FunctionSignature, error) { + var err error + var vp *parameter + if f.VarParam() != nil { + vp = marshalParameter(f.VarParam()) + } + + var p []*parameter + if len(f.Params()) > 0 { + p = marshalParameters(f.Params()) + } + + r, err := getReturnType(f) + if err != nil { + return nil, err + } + + return &FunctionSignature{ + Description: f.Description(), + ReturnType: r, + Parameters: p, + VariadicParameter: vp, + }, nil +} + +// marshalTry returns a static function signature for the try function. +// We need this exception because the function implementation uses capsule +// types that we can't marshal. +func marshalTry(try function.Function) *FunctionSignature { + return &FunctionSignature{ + Description: try.Description(), + ReturnType: cty.DynamicPseudoType, + VariadicParameter: ¶meter{ + Name: try.VarParam().Name, + Description: try.VarParam().Description, + IsNullable: try.VarParam().AllowNull, + Type: cty.DynamicPseudoType, + }, + } +} + +// marshalCan returns a static function signature for the can function. +// We need this exception because the function implementation uses capsule +// types that we can't marshal. +func marshalCan(can function.Function) *FunctionSignature { + return &FunctionSignature{ + Description: can.Description(), + ReturnType: cty.Bool, + Parameters: []*parameter{ + { + Name: can.Params()[0].Name, + Description: can.Params()[0].Description, + IsNullable: can.Params()[0].AllowNull, + Type: cty.DynamicPseudoType, + }, + }, + } +} diff --git a/pkg/command/jsonfunction/function_test.go b/pkg/command/jsonfunction/function_test.go new file mode 100644 index 00000000000..7030c82d425 --- /dev/null +++ b/pkg/command/jsonfunction/function_test.go @@ -0,0 +1,139 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package jsonfunction + +import ( + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/zclconf/go-cty-debug/ctydebug" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +func TestMarshal(t *testing.T) { + tests := []struct { + Name string + Input map[string]function.Function + Want string + WantErr string + }{ + { + "minimal function", + map[string]function.Function{ + "fun": function.New(&function.Spec{ + Type: function.StaticReturnType(cty.Bool), + }), + }, + `{"format_version":"1.0","function_signatures":{"fun":{"return_type":"bool"}}}`, + "", + }, + { + "function with description", + map[string]function.Function{ + "fun": function.New(&function.Spec{ + Description: "`timestamp` returns a UTC timestamp string.", + Type: function.StaticReturnType(cty.String), + }), + }, + "{\"format_version\":\"1.0\",\"function_signatures\":{\"fun\":{\"description\":\"`timestamp` returns a UTC timestamp string.\",\"return_type\":\"string\"}}}", + "", + }, + { + "function with parameters", + map[string]function.Function{ + "fun": function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "timestamp", + Description: "timestamp text", + Type: cty.String, + }, + { + Name: "duration", + Description: "duration text", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + }), + }, + `{"format_version":"1.0","function_signatures":{"fun":{"return_type":"string","parameters":[{"name":"timestamp","description":"timestamp text","type":"string"},{"name":"duration","description":"duration text","type":"string"}]}}}`, + "", + }, + { + "function with variadic parameter", + map[string]function.Function{ + "fun": function.New(&function.Spec{ + VarParam: &function.Parameter{ + Name: "default", + Description: "default description", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + AllowMarked: true, + }, + Type: function.StaticReturnType(cty.DynamicPseudoType), + }), + }, + `{"format_version":"1.0","function_signatures":{"fun":{"return_type":"dynamic","variadic_parameter":{"name":"default","description":"default description","is_nullable":true,"type":"dynamic"}}}}`, + "", + }, + { + "function with list types", + map[string]function.Function{ + "fun": function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.List(cty.String), + }, + }, + Type: function.StaticReturnType(cty.List(cty.String)), + }), + }, + `{"format_version":"1.0","function_signatures":{"fun":{"return_type":["list","string"],"parameters":[{"name":"list","type":["list","string"]}]}}}`, + "", + }, + { + "returns diagnostics on failure", + map[string]function.Function{ + "fun": function.New(&function.Spec{ + Params: []function.Parameter{}, + Type: func(args []cty.Value) (ret cty.Type, err error) { + return cty.DynamicPseudoType, fmt.Errorf("error") + }, + }), + }, + "", + "Failed to serialize function \"fun\": error", + }, + } + + for i, test := range tests { + t.Run(fmt.Sprintf("%d-%s", i, test.Name), func(t *testing.T) { + got, diags := Marshal(test.Input) + if test.WantErr != "" { + if !diags.HasErrors() { + t.Fatal("expected error, got none") + } + if diags.Err().Error() != test.WantErr { + t.Fatalf("expected error %q, got %q", test.WantErr, diags.Err()) + } + } else { + if diags.HasErrors() { + t.Fatal(diags) + } + + if diff := cmp.Diff(test.Want, string(got), ctydebug.CmpOptions); diff != "" { + t.Fatalf("mismatch of function signature: %s", diff) + } + } + }) + } +} diff --git a/pkg/command/jsonfunction/parameter.go b/pkg/command/jsonfunction/parameter.go new file mode 100644 index 00000000000..fc0cd8b7e11 --- /dev/null +++ b/pkg/command/jsonfunction/parameter.go @@ -0,0 +1,48 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package jsonfunction + +import ( + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// parameter represents a parameter to a function. +type parameter struct { + // Name is an optional name for the argument. + Name string `json:"name,omitempty"` + + // Description is an optional human-readable description + // of the argument + Description string `json:"description,omitempty"` + + // IsNullable is true if null is acceptable value for the argument + IsNullable bool `json:"is_nullable,omitempty"` + + // A type that any argument for this parameter must conform to. + Type cty.Type `json:"type"` +} + +func marshalParameter(p *function.Parameter) *parameter { + if p == nil { + return ¶meter{} + } + + return ¶meter{ + Name: p.Name, + Description: p.Description, + IsNullable: p.AllowNull, + Type: p.Type, + } +} + +func marshalParameters(parameters []function.Parameter) []*parameter { + ret := make([]*parameter, len(parameters)) + for k, p := range parameters { + ret[k] = marshalParameter(&p) + } + return ret +} diff --git a/pkg/command/jsonfunction/parameter_test.go b/pkg/command/jsonfunction/parameter_test.go new file mode 100644 index 00000000000..ed212ecb81a --- /dev/null +++ b/pkg/command/jsonfunction/parameter_test.go @@ -0,0 +1,69 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package jsonfunction + +import ( + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/zclconf/go-cty-debug/ctydebug" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +func TestMarshalParameter(t *testing.T) { + tests := []struct { + Name string + Input *function.Parameter + Want *parameter + }{ + { + "call with nil", + nil, + ¶meter{}, + }, + { + "parameter with description", + &function.Parameter{ + Name: "timestamp", + Description: "`timestamp` returns a UTC timestamp string in [RFC 3339]", + Type: cty.String, + }, + ¶meter{ + Name: "timestamp", + Description: "`timestamp` returns a UTC timestamp string in [RFC 3339]", + Type: cty.String, + }, + }, + { + "parameter with additional properties", + &function.Parameter{ + Name: "value", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowNull: true, + AllowMarked: true, + AllowDynamicType: true, + }, + ¶meter{ + Name: "value", + Type: cty.DynamicPseudoType, + IsNullable: true, + }, + }, + } + + for i, test := range tests { + t.Run(fmt.Sprintf("%d-%s", i, test.Name), func(t *testing.T) { + got := marshalParameter(test.Input) + + if diff := cmp.Diff(test.Want, got, ctydebug.CmpOptions); diff != "" { + t.Fatalf("mismatch of parameter signature: %s", diff) + } + }) + } +} diff --git a/pkg/command/jsonfunction/return_type.go b/pkg/command/jsonfunction/return_type.go new file mode 100644 index 00000000000..476b2bfe8b2 --- /dev/null +++ b/pkg/command/jsonfunction/return_type.go @@ -0,0 +1,23 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package jsonfunction + +import ( + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +func getReturnType(f function.Function) (cty.Type, error) { + args := make([]cty.Type, 0) + for _, param := range f.Params() { + args = append(args, param.Type) + } + if f.VarParam() != nil { + args = append(args, f.VarParam().Type) + } + + return f.ReturnType(args) +} diff --git a/pkg/command/jsonplan/doc.go b/pkg/command/jsonplan/doc.go new file mode 100644 index 00000000000..06934a89d47 --- /dev/null +++ b/pkg/command/jsonplan/doc.go @@ -0,0 +1,8 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package jsonplan implements methods for outputting a plan in a +// machine-readable json format +package jsonplan diff --git a/pkg/command/jsonplan/module.go b/pkg/command/jsonplan/module.go new file mode 100644 index 00000000000..e73440c56e1 --- /dev/null +++ b/pkg/command/jsonplan/module.go @@ -0,0 +1,21 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package jsonplan + +// Module is the representation of a module in state. This can be the root +// module or a child module. +type Module struct { + // Resources are sorted in a user-friendly order that is undefined at this + // time, but consistent. + Resources []Resource `json:"resources,omitempty"` + + // Address is the absolute module address, omitted for the root module + Address string `json:"address,omitempty"` + + // Each module object can optionally have its own nested "child_modules", + // recursively describing the full module tree. + ChildModules []Module `json:"child_modules,omitempty"` +} diff --git a/pkg/command/jsonplan/plan.go b/pkg/command/jsonplan/plan.go new file mode 100644 index 00000000000..9c218281564 --- /dev/null +++ b/pkg/command/jsonplan/plan.go @@ -0,0 +1,938 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package jsonplan + +import ( + "encoding/json" + "fmt" + "sort" + "strings" + "time" + + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/command/jsonchecks" + "github.com/kubegems/opentofu/pkg/command/jsonconfig" + "github.com/kubegems/opentofu/pkg/command/jsonstate" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/statefile" + "github.com/kubegems/opentofu/pkg/tofu" + "github.com/kubegems/opentofu/version" +) + +// FormatVersion represents the version of the json format and will be +// incremented for any change to this format that requires changes to a +// consuming parser. +const ( + FormatVersion = "1.2" + + ResourceInstanceReplaceBecauseCannotUpdate = "replace_because_cannot_update" + ResourceInstanceReplaceBecauseTainted = "replace_because_tainted" + ResourceInstanceReplaceByRequest = "replace_by_request" + ResourceInstanceReplaceByTriggers = "replace_by_triggers" + ResourceInstanceDeleteBecauseNoResourceConfig = "delete_because_no_resource_config" + ResourceInstanceDeleteBecauseWrongRepetition = "delete_because_wrong_repetition" + ResourceInstanceDeleteBecauseCountIndex = "delete_because_count_index" + ResourceInstanceDeleteBecauseEachKey = "delete_because_each_key" + ResourceInstanceDeleteBecauseNoModule = "delete_because_no_module" + ResourceInstanceDeleteBecauseNoMoveTarget = "delete_because_no_move_target" + ResourceInstanceReadBecauseConfigUnknown = "read_because_config_unknown" + ResourceInstanceReadBecauseDependencyPending = "read_because_dependency_pending" + ResourceInstanceReadBecauseCheckNested = "read_because_check_nested" +) + +// Plan is the top-level representation of the json format of a plan. It includes +// the complete config and current state. +type Plan struct { + FormatVersion string `json:"format_version,omitempty"` + TerraformVersion string `json:"terraform_version,omitempty"` + Variables Variables `json:"variables,omitempty"` + PlannedValues StateValues `json:"planned_values,omitempty"` + // ResourceDrift and ResourceChanges are sorted in a user-friendly order + // that is undefined at this time, but consistent. + ResourceDrift []ResourceChange `json:"resource_drift,omitempty"` + ResourceChanges []ResourceChange `json:"resource_changes,omitempty"` + OutputChanges map[string]Change `json:"output_changes,omitempty"` + PriorState json.RawMessage `json:"prior_state,omitempty"` + Config json.RawMessage `json:"configuration,omitempty"` + RelevantAttributes []ResourceAttr `json:"relevant_attributes,omitempty"` + Checks json.RawMessage `json:"checks,omitempty"` + Timestamp string `json:"timestamp,omitempty"` + Errored bool `json:"errored"` +} + +func newPlan() *Plan { + return &Plan{ + FormatVersion: FormatVersion, + } +} + +// ResourceAttr contains the address and attribute of an external for the +// RelevantAttributes in the plan. +type ResourceAttr struct { + Resource string `json:"resource"` + Attr json.RawMessage `json:"attribute"` +} + +// Change is the representation of a proposed change for an object. +type Change struct { + // Actions are the actions that will be taken on the object selected by the + // properties below. Valid actions values are: + // ["no-op"] + // ["create"] + // ["read"] + // ["update"] + // ["delete", "create"] + // ["create", "delete"] + // ["delete"] + // ["forget"] + // The two "replace" actions are represented in this way to allow callers to + // e.g. just scan the list for "delete" to recognize all three situations + // where the object will be deleted, allowing for any new deletion + // combinations that might be added in future. + Actions []string `json:"actions,omitempty"` + + // Before and After are representations of the object value both before and + // after the action. For ["delete"] and ["forget"] actions, the "after" + // value is unset. For ["create"] the "before" is unset. For ["no-op"], the + // before and after values are identical. The "after" value will be + // incomplete if there are values within it that won't be known until after + // apply. + Before json.RawMessage `json:"before,omitempty"` + After json.RawMessage `json:"after,omitempty"` + + // AfterUnknown is an object value with similar structure to After, but + // with all unknown leaf values replaced with true, and all known leaf + // values omitted. This can be combined with After to reconstruct a full + // value after the action, including values which will only be known after + // apply. + AfterUnknown json.RawMessage `json:"after_unknown,omitempty"` + + // BeforeSensitive and AfterSensitive are object values with similar + // structure to Before and After, but with all sensitive leaf values + // replaced with true, and all non-sensitive leaf values omitted. These + // objects should be combined with Before and After to prevent accidental + // display of sensitive values in user interfaces. + BeforeSensitive json.RawMessage `json:"before_sensitive,omitempty"` + AfterSensitive json.RawMessage `json:"after_sensitive,omitempty"` + + // ReplacePaths is an array of arrays representing a set of paths into the + // object value which resulted in the action being "replace". This will be + // omitted if the action is not replace, or if no paths caused the + // replacement (for example, if the resource was tainted). Each path + // consists of one or more steps, each of which will be a number or a + // string. + ReplacePaths json.RawMessage `json:"replace_paths,omitempty"` + + // Importing contains the import metadata about this operation. If importing + // is present (ie. not null) then the change is an import operation in + // addition to anything mentioned in the actions field. The actual contents + // of the Importing struct is subject to change, so downstream consumers + // should treat any values in here as strictly optional. + Importing *Importing `json:"importing,omitempty"` + + // GeneratedConfig contains any HCL config generated for this resource + // during planning as a string. + // + // If this is populated, then Importing should also be populated but this + // might change in the future. However, nNot all Importing changes will + // contain generated config. + GeneratedConfig string `json:"generated_config,omitempty"` +} + +// Importing is a nested object for the resource import metadata. +type Importing struct { + // The original ID of this resource used to target it as part of planned + // import operation. + ID string `json:"id,omitempty"` +} + +type Output struct { + Sensitive bool `json:"sensitive"` + Type json.RawMessage `json:"type,omitempty"` + Value json.RawMessage `json:"value,omitempty"` +} + +// Variables is the JSON representation of the variables provided to the current +// plan. +type Variables map[string]*Variable + +type Variable struct { + Value json.RawMessage `json:"value,omitempty"` +} + +// MarshalForRenderer returns the pre-json encoding changes of the requested +// plan, in a format available to the structured renderer. +// +// This function does a small part of the Marshal function, as it only returns +// the part of the plan required by the jsonformat.Plan renderer. +func MarshalForRenderer( + p *plans.Plan, + schemas *tofu.Schemas, +) (map[string]Change, []ResourceChange, []ResourceChange, []ResourceAttr, error) { + output := newPlan() + + var err error + if output.OutputChanges, err = MarshalOutputChanges(p.Changes); err != nil { + return nil, nil, nil, nil, err + } + + if output.ResourceChanges, err = MarshalResourceChanges(p.Changes.Resources, schemas); err != nil { + return nil, nil, nil, nil, err + } + + if len(p.DriftedResources) > 0 { + // In refresh-only mode, we render all resources marked as drifted, + // including those which have moved without other changes. In other plan + // modes, move-only changes will be included in the planned changes, so + // we skip them here. + var driftedResources []*plans.ResourceInstanceChangeSrc + if p.UIMode == plans.RefreshOnlyMode { + driftedResources = p.DriftedResources + } else { + for _, dr := range p.DriftedResources { + if dr.Action != plans.NoOp { + driftedResources = append(driftedResources, dr) + } + } + } + output.ResourceDrift, err = MarshalResourceChanges(driftedResources, schemas) + if err != nil { + return nil, nil, nil, nil, err + } + } + + if err := output.marshalRelevantAttrs(p); err != nil { + return nil, nil, nil, nil, err + } + + return output.OutputChanges, output.ResourceChanges, output.ResourceDrift, output.RelevantAttributes, nil +} + +// MarshalForLog returns the original JSON compatible plan, ready for a logging +// package to marshal further. +func MarshalForLog( + config *configs.Config, + p *plans.Plan, + sf *statefile.File, + schemas *tofu.Schemas, +) (*Plan, error) { + output := newPlan() + output.TerraformVersion = version.String() + output.Timestamp = p.Timestamp.Format(time.RFC3339) + output.Errored = p.Errored + + err := output.marshalPlanVariables(p.VariableValues, config.Module.Variables) + if err != nil { + return nil, fmt.Errorf("error in marshalPlanVariables: %w", err) + } + + // output.PlannedValues + err = output.marshalPlannedValues(p.Changes, schemas) + if err != nil { + return nil, fmt.Errorf("error in marshalPlannedValues: %w", err) + } + + // output.ResourceDrift + if len(p.DriftedResources) > 0 { + // In refresh-only mode, we render all resources marked as drifted, + // including those which have moved without other changes. In other plan + // modes, move-only changes will be included in the planned changes, so + // we skip them here. + var driftedResources []*plans.ResourceInstanceChangeSrc + if p.UIMode == plans.RefreshOnlyMode { + driftedResources = p.DriftedResources + } else { + for _, dr := range p.DriftedResources { + if dr.Action != plans.NoOp { + driftedResources = append(driftedResources, dr) + } + } + } + output.ResourceDrift, err = MarshalResourceChanges(driftedResources, schemas) + if err != nil { + return nil, fmt.Errorf("error in marshaling resource drift: %w", err) + } + } + + if err := output.marshalRelevantAttrs(p); err != nil { + return nil, fmt.Errorf("error marshaling relevant attributes for external changes: %w", err) + } + + // output.ResourceChanges + if p.Changes != nil { + output.ResourceChanges, err = MarshalResourceChanges(p.Changes.Resources, schemas) + if err != nil { + return nil, fmt.Errorf("error in marshaling resource changes: %w", err) + } + } + + // output.OutputChanges + if output.OutputChanges, err = MarshalOutputChanges(p.Changes); err != nil { + return nil, fmt.Errorf("error in marshaling output changes: %w", err) + } + + // output.Checks + if p.Checks != nil && p.Checks.ConfigResults.Len() > 0 { + output.Checks = jsonchecks.MarshalCheckStates(p.Checks) + } + + // output.PriorState + if sf != nil && !sf.State.Empty() { + output.PriorState, err = jsonstate.Marshal(sf, schemas) + if err != nil { + return nil, fmt.Errorf("error marshaling prior state: %w", err) + } + } + + // output.Config + output.Config, err = jsonconfig.Marshal(config, schemas) + if err != nil { + return nil, fmt.Errorf("error marshaling config: %w", err) + } + + return output, nil +} + +// Marshal returns the json encoding of a tofu plan. +func Marshal( + config *configs.Config, + p *plans.Plan, + sf *statefile.File, + schemas *tofu.Schemas, +) ([]byte, error) { + output, err := MarshalForLog(config, p, sf, schemas) + if err != nil { + return nil, err + } + + return json.Marshal(output) +} + +func (p *Plan) marshalPlanVariables(vars map[string]plans.DynamicValue, decls map[string]*configs.Variable) error { + p.Variables = make(Variables, len(vars)) + + for k, v := range vars { + val, err := v.Decode(cty.DynamicPseudoType) + if err != nil { + return err + } + valJSON, err := ctyjson.Marshal(val, val.Type()) + if err != nil { + return err + } + p.Variables[k] = &Variable{ + Value: valJSON, + } + } + + // In Terraform v1.1 and earlier we had some confusion about which subsystem + // of Terraform was the one responsible for substituting in default values + // for unset module variables, with root module variables being handled in + // three different places while child module variables were only handled + // during the Terraform Core graph walk. + // + // For Terraform v1.2 and later we rationalized that by having the Terraform + // Core graph walk always be responsible for selecting defaults regardless + // of root vs. child module, but unfortunately our earlier accidental + // misbehavior bled out into the public interface by making the defaults + // show up in the "vars" map to this function. Those are now correctly + // omitted (so that the plan file only records the variables _actually_ + // set by the caller) but consumers of the JSON plan format may be depending + // on our old behavior and so we'll fake it here just in time so that + // outside consumers won't see a behavior change. + for name, decl := range decls { + if _, ok := p.Variables[name]; ok { + continue + } + if val := decl.Default; val != cty.NilVal { + valJSON, err := ctyjson.Marshal(val, val.Type()) + if err != nil { + return err + } + p.Variables[name] = &Variable{ + Value: valJSON, + } + } + } + + if len(p.Variables) == 0 { + p.Variables = nil // omit this property if there are no variables to describe + } + + return nil +} + +// MarshalResourceChanges converts the provided internal representation of +// ResourceInstanceChangeSrc objects into the public structured JSON changes. +// +// This function is referenced directly from the structured renderer tests, to +// ensure parity between the renderers. It probably shouldn't be used anywhere +// else. +func MarshalResourceChanges(resources []*plans.ResourceInstanceChangeSrc, schemas *tofu.Schemas) ([]ResourceChange, error) { + var ret []ResourceChange + + var sortedResources []*plans.ResourceInstanceChangeSrc + sortedResources = append(sortedResources, resources...) + sort.Slice(sortedResources, func(i, j int) bool { + if !sortedResources[i].Addr.Equal(sortedResources[j].Addr) { + return sortedResources[i].Addr.Less(sortedResources[j].Addr) + } + return sortedResources[i].DeposedKey < sortedResources[j].DeposedKey + }) + + for _, rc := range sortedResources { + var r ResourceChange + addr := rc.Addr + r.Address = addr.String() + if !addr.Equal(rc.PrevRunAddr) { + r.PreviousAddress = rc.PrevRunAddr.String() + } + + dataSource := addr.Resource.Resource.Mode == addrs.DataResourceMode + // We create "delete" actions for data resources so we can clean up + // their entries in state, but this is an implementation detail that + // users shouldn't see. + if dataSource && rc.Action == plans.Delete { + continue + } + + schema, _ := schemas.ResourceTypeConfig( + rc.ProviderAddr.Provider, + addr.Resource.Resource.Mode, + addr.Resource.Resource.Type, + ) + if schema == nil { + return nil, fmt.Errorf("no schema found for %s (in provider %s)", r.Address, rc.ProviderAddr.Provider) + } + + changeV, err := rc.Decode(schema.ImpliedType()) + if err != nil { + return nil, err + } + // We drop the marks from the change, as decoding is only an + // intermediate step to re-encode the values as json + changeV.Before, _ = changeV.Before.UnmarkDeep() + changeV.After, _ = changeV.After.UnmarkDeep() + + var before, after []byte + var beforeSensitive, afterSensitive []byte + var afterUnknown cty.Value + + if changeV.Before != cty.NilVal { + before, err = ctyjson.Marshal(changeV.Before, changeV.Before.Type()) + if err != nil { + return nil, err + } + marks := rc.BeforeValMarks + if schema.ContainsSensitive() { + marks = append(marks, schema.ValueMarks(changeV.Before, nil)...) + } + bs := jsonstate.SensitiveAsBoolWithPathValueMarks(changeV.Before, marks) + beforeSensitive, err = ctyjson.Marshal(bs, bs.Type()) + if err != nil { + return nil, err + } + } + if changeV.After != cty.NilVal { + if changeV.After.IsWhollyKnown() { + after, err = ctyjson.Marshal(changeV.After, changeV.After.Type()) + if err != nil { + return nil, err + } + afterUnknown = cty.EmptyObjectVal + } else { + filteredAfter := omitUnknowns(changeV.After) + if filteredAfter.IsNull() { + after = nil + } else { + after, err = ctyjson.Marshal(filteredAfter, filteredAfter.Type()) + if err != nil { + return nil, err + } + } + afterUnknown = unknownAsBool(changeV.After) + } + marks := rc.AfterValMarks + if schema.ContainsSensitive() { + marks = append(marks, schema.ValueMarks(changeV.After, nil)...) + } + as := jsonstate.SensitiveAsBoolWithPathValueMarks(changeV.After, marks) + afterSensitive, err = ctyjson.Marshal(as, as.Type()) + if err != nil { + return nil, err + } + } + + a, err := ctyjson.Marshal(afterUnknown, afterUnknown.Type()) + if err != nil { + return nil, err + } + replacePaths, err := encodePaths(rc.RequiredReplace) + if err != nil { + return nil, err + } + + var importing *Importing + if rc.Importing != nil { + importing = &Importing{ID: rc.Importing.ID} + } + + r.Change = Change{ + Actions: actionString(rc.Action.String()), + Before: json.RawMessage(before), + After: json.RawMessage(after), + AfterUnknown: a, + BeforeSensitive: json.RawMessage(beforeSensitive), + AfterSensitive: json.RawMessage(afterSensitive), + ReplacePaths: replacePaths, + Importing: importing, + GeneratedConfig: rc.GeneratedConfig, + } + + if rc.DeposedKey != states.NotDeposed { + r.Deposed = rc.DeposedKey.String() + } + + key := addr.Resource.Key + if key != nil { + value := key.Value() + if r.Index, err = ctyjson.Marshal(value, value.Type()); err != nil { + return nil, err + } + } + + switch addr.Resource.Resource.Mode { + case addrs.ManagedResourceMode: + r.Mode = jsonstate.ManagedResourceMode + case addrs.DataResourceMode: + r.Mode = jsonstate.DataResourceMode + default: + return nil, fmt.Errorf("resource %s has an unsupported mode %s", r.Address, addr.Resource.Resource.Mode.String()) + } + r.ModuleAddress = addr.Module.String() + r.Name = addr.Resource.Resource.Name + r.Type = addr.Resource.Resource.Type + r.ProviderName = rc.ProviderAddr.Provider.String() + + switch rc.ActionReason { + case plans.ResourceInstanceChangeNoReason: + r.ActionReason = "" // will be omitted in output + case plans.ResourceInstanceReplaceBecauseCannotUpdate: + r.ActionReason = ResourceInstanceReplaceBecauseCannotUpdate + case plans.ResourceInstanceReplaceBecauseTainted: + r.ActionReason = ResourceInstanceReplaceBecauseTainted + case plans.ResourceInstanceReplaceByRequest: + r.ActionReason = ResourceInstanceReplaceByRequest + case plans.ResourceInstanceReplaceByTriggers: + r.ActionReason = ResourceInstanceReplaceByTriggers + case plans.ResourceInstanceDeleteBecauseNoResourceConfig: + r.ActionReason = ResourceInstanceDeleteBecauseNoResourceConfig + case plans.ResourceInstanceDeleteBecauseWrongRepetition: + r.ActionReason = ResourceInstanceDeleteBecauseWrongRepetition + case plans.ResourceInstanceDeleteBecauseCountIndex: + r.ActionReason = ResourceInstanceDeleteBecauseCountIndex + case plans.ResourceInstanceDeleteBecauseEachKey: + r.ActionReason = ResourceInstanceDeleteBecauseEachKey + case plans.ResourceInstanceDeleteBecauseNoModule: + r.ActionReason = ResourceInstanceDeleteBecauseNoModule + case plans.ResourceInstanceDeleteBecauseNoMoveTarget: + r.ActionReason = ResourceInstanceDeleteBecauseNoMoveTarget + case plans.ResourceInstanceReadBecauseConfigUnknown: + r.ActionReason = ResourceInstanceReadBecauseConfigUnknown + case plans.ResourceInstanceReadBecauseDependencyPending: + r.ActionReason = ResourceInstanceReadBecauseDependencyPending + case plans.ResourceInstanceReadBecauseCheckNested: + r.ActionReason = ResourceInstanceReadBecauseCheckNested + default: + return nil, fmt.Errorf("resource %s has an unsupported action reason %s", r.Address, rc.ActionReason) + } + + ret = append(ret, r) + + } + + return ret, nil +} + +// MarshalOutputChanges converts the provided internal representation of +// Changes objects into the structured JSON representation. +// +// This function is referenced directly from the structured renderer tests, to +// ensure parity between the renderers. It probably shouldn't be used anywhere +// else. +func MarshalOutputChanges(changes *plans.Changes) (map[string]Change, error) { + if changes == nil { + // Nothing to do! + return nil, nil + } + + outputChanges := make(map[string]Change, len(changes.Outputs)) + for _, oc := range changes.Outputs { + + // Skip output changes that are not from the root module. + // These are automatically stripped from plans that are written to disk + // elsewhere, we just need to duplicate the logic here in case anyone + // is converting this plan directly from memory. + if !oc.Addr.Module.IsRoot() { + continue + } + + changeV, err := oc.Decode() + if err != nil { + return nil, err + } + // We drop the marks from the change, as decoding is only an + // intermediate step to re-encode the values as json + changeV.Before, _ = changeV.Before.UnmarkDeep() + changeV.After, _ = changeV.After.UnmarkDeep() + + var before, after []byte + var afterUnknown cty.Value + + if changeV.Before != cty.NilVal { + before, err = ctyjson.Marshal(changeV.Before, changeV.Before.Type()) + if err != nil { + return nil, err + } + } + if changeV.After != cty.NilVal { + if changeV.After.IsWhollyKnown() { + after, err = ctyjson.Marshal(changeV.After, changeV.After.Type()) + if err != nil { + return nil, err + } + afterUnknown = cty.False + } else { + filteredAfter := omitUnknowns(changeV.After) + if filteredAfter.IsNull() { + after = nil + } else { + after, err = ctyjson.Marshal(filteredAfter, filteredAfter.Type()) + if err != nil { + return nil, err + } + } + afterUnknown = unknownAsBool(changeV.After) + } + } + + // The only information we have in the plan about output sensitivity is + // a boolean which is true if the output was or is marked sensitive. As + // a result, BeforeSensitive and AfterSensitive will be identical, and + // either false or true. + outputSensitive := cty.False + if oc.Sensitive { + outputSensitive = cty.True + } + sensitive, err := ctyjson.Marshal(outputSensitive, outputSensitive.Type()) + if err != nil { + return nil, err + } + + a, _ := ctyjson.Marshal(afterUnknown, afterUnknown.Type()) + + c := Change{ + Actions: actionString(oc.Action.String()), + Before: json.RawMessage(before), + After: json.RawMessage(after), + AfterUnknown: a, + BeforeSensitive: json.RawMessage(sensitive), + AfterSensitive: json.RawMessage(sensitive), + + // Just to be explicit, outputs cannot be imported so this is always + // nil. + Importing: nil, + } + + outputChanges[oc.Addr.OutputValue.Name] = c + } + + return outputChanges, nil +} + +func (p *Plan) marshalPlannedValues(changes *plans.Changes, schemas *tofu.Schemas) error { + // marshal the planned changes into a module + plan, err := marshalPlannedValues(changes, schemas) + if err != nil { + return err + } + p.PlannedValues.RootModule = plan + + // marshalPlannedOutputs + outputs, err := marshalPlannedOutputs(changes) + if err != nil { + return err + } + p.PlannedValues.Outputs = outputs + + return nil +} + +func (p *Plan) marshalRelevantAttrs(plan *plans.Plan) error { + for _, ra := range plan.RelevantAttributes { + addr := ra.Resource.String() + path, err := encodePath(ra.Attr) + if err != nil { + return err + } + + p.RelevantAttributes = append(p.RelevantAttributes, ResourceAttr{addr, path}) + } + + // We sort the relevant attributes by resource address to make the output + // deterministic. Our own equivalence tests rely on it. + sort.Slice(p.RelevantAttributes, func(i, j int) bool { + return p.RelevantAttributes[i].Resource < p.RelevantAttributes[j].Resource + }) + + return nil +} + +// omitUnknowns recursively walks the src cty.Value and returns a new cty.Value, +// omitting any unknowns. +// +// The result also normalizes some types: all sequence types are turned into +// tuple types and all mapping types are converted to object types, since we +// assume the result of this is just going to be serialized as JSON (and thus +// lose those distinctions) anyway. +func omitUnknowns(val cty.Value) cty.Value { + ty := val.Type() + switch { + case val.IsNull(): + return val + case !val.IsKnown(): + return cty.NilVal + case ty.IsPrimitiveType(): + return val + case ty.IsListType() || ty.IsTupleType() || ty.IsSetType(): + var vals []cty.Value + it := val.ElementIterator() + for it.Next() { + _, v := it.Element() + newVal := omitUnknowns(v) + if newVal != cty.NilVal { + vals = append(vals, newVal) + } else if newVal == cty.NilVal { + // element order is how we correlate unknownness, so we must + // replace unknowns with nulls + vals = append(vals, cty.NullVal(v.Type())) + } + } + // We use tuple types always here, because the work we did above + // may have caused the individual elements to have different types, + // and we're doing this work to produce JSON anyway and JSON marshalling + // represents all of these sequence types as an array. + return cty.TupleVal(vals) + case ty.IsMapType() || ty.IsObjectType(): + vals := make(map[string]cty.Value) + it := val.ElementIterator() + for it.Next() { + k, v := it.Element() + newVal := omitUnknowns(v) + if newVal != cty.NilVal { + vals[k.AsString()] = newVal + } + } + // We use object types always here, because the work we did above + // may have caused the individual elements to have different types, + // and we're doing this work to produce JSON anyway and JSON marshalling + // represents both of these mapping types as an object. + return cty.ObjectVal(vals) + default: + // Should never happen, since the above should cover all types + panic(fmt.Sprintf("omitUnknowns cannot handle %#v", val)) + } +} + +// recursively iterate through a cty.Value, replacing unknown values (including +// null) with cty.True and known values with cty.False. +// +// The result also normalizes some types: all sequence types are turned into +// tuple types and all mapping types are converted to object types, since we +// assume the result of this is just going to be serialized as JSON (and thus +// lose those distinctions) anyway. +// +// For map/object values, all known attribute values will be omitted instead of +// returning false, as this results in a more compact serialization. +func unknownAsBool(val cty.Value) cty.Value { + ty := val.Type() + switch { + case val.IsNull(): + return cty.False + case !val.IsKnown(): + if ty.IsPrimitiveType() || ty.Equals(cty.DynamicPseudoType) { + return cty.True + } + fallthrough + case ty.IsPrimitiveType(): + return cty.BoolVal(!val.IsKnown()) + case ty.IsListType() || ty.IsTupleType() || ty.IsSetType(): + length := val.LengthInt() + if length == 0 { + // If there are no elements then we can't have unknowns + return cty.EmptyTupleVal + } + vals := make([]cty.Value, 0, length) + it := val.ElementIterator() + for it.Next() { + _, v := it.Element() + vals = append(vals, unknownAsBool(v)) + } + // The above transform may have changed the types of some of the + // elements, so we'll always use a tuple here in case we've now made + // different elements have different types. Our ultimate goal is to + // marshal to JSON anyway, and all of these sequence types are + // indistinguishable in JSON. + return cty.TupleVal(vals) + case ty.IsMapType() || ty.IsObjectType(): + var length int + switch { + case ty.IsMapType(): + length = val.LengthInt() + default: + length = len(val.Type().AttributeTypes()) + } + if length == 0 { + // If there are no elements then we can't have unknowns + return cty.EmptyObjectVal + } + vals := make(map[string]cty.Value) + it := val.ElementIterator() + for it.Next() { + k, v := it.Element() + vAsBool := unknownAsBool(v) + // Omit all of the "false"s for known values for more compact + // serialization + if !vAsBool.RawEquals(cty.False) { + vals[k.AsString()] = vAsBool + } + } + // The above transform may have changed the types of some of the + // elements, so we'll always use an object here in case we've now made + // different elements have different types. Our ultimate goal is to + // marshal to JSON anyway, and all of these mapping types are + // indistinguishable in JSON. + return cty.ObjectVal(vals) + default: + // Should never happen, since the above should cover all types + panic(fmt.Sprintf("unknownAsBool cannot handle %#v", val)) + } +} + +func actionString(action string) []string { + switch { + case action == "NoOp": + return []string{"no-op"} + case action == "Create": + return []string{"create"} + case action == "Delete": + return []string{"delete"} + case action == "Update": + return []string{"update"} + case action == "CreateThenDelete": + return []string{"create", "delete"} + case action == "Read": + return []string{"read"} + case action == "DeleteThenCreate": + return []string{"delete", "create"} + case action == "Forget": + return []string{"forget"} + default: + return []string{action} + } +} + +// UnmarshalActions reverses the actionString function. +func UnmarshalActions(actions []string) plans.Action { + if len(actions) == 2 { + if actions[0] == "create" && actions[1] == "delete" { + return plans.CreateThenDelete + } + + if actions[0] == "delete" && actions[1] == "create" { + return plans.DeleteThenCreate + } + } + + if len(actions) == 1 { + switch actions[0] { + case "create": + return plans.Create + case "delete": + return plans.Delete + case "update": + return plans.Update + case "read": + return plans.Read + case "no-op": + return plans.NoOp + case "forget": + return plans.Forget + } + } + + panic("unrecognized action slice: " + strings.Join(actions, ", ")) +} + +// encodePaths lossily encodes a cty.PathSet into an array of arrays of step +// values, such as: +// +// [["length"],["triggers",0,"value"]] +// +// The lossiness is that we cannot distinguish between an IndexStep with string +// key and a GetAttr step. This is fine with JSON output, because JSON's type +// system means that those two steps are equivalent anyway: both are object +// indexes. +// +// JavaScript (or similar dynamic language) consumers of these values can +// iterate over the steps starting from the root object to reach the +// value that each path is describing. +func encodePaths(pathSet cty.PathSet) (json.RawMessage, error) { + if pathSet.Empty() { + return nil, nil + } + + pathList := pathSet.List() + jsonPaths := make([]json.RawMessage, 0, len(pathList)) + + for _, path := range pathList { + jsonPath, err := encodePath(path) + if err != nil { + return nil, err + } + jsonPaths = append(jsonPaths, jsonPath) + } + + return json.Marshal(jsonPaths) +} + +func encodePath(path cty.Path) (json.RawMessage, error) { + steps := make([]json.RawMessage, 0, len(path)) + for _, step := range path { + switch s := step.(type) { + case cty.IndexStep: + key, err := ctyjson.Marshal(s.Key, s.Key.Type()) + if err != nil { + return nil, fmt.Errorf("Failed to marshal index step key %#v: %w", s.Key, err) + } + steps = append(steps, key) + case cty.GetAttrStep: + name, err := json.Marshal(s.Name) + if err != nil { + return nil, fmt.Errorf("Failed to marshal get attr step name %#v: %w", s.Name, err) + } + steps = append(steps, name) + default: + return nil, fmt.Errorf("Unsupported path step %#v (%t)", step, step) + } + } + return json.Marshal(steps) +} diff --git a/pkg/command/jsonplan/plan_test.go b/pkg/command/jsonplan/plan_test.go new file mode 100644 index 00000000000..03a1e5307e4 --- /dev/null +++ b/pkg/command/jsonplan/plan_test.go @@ -0,0 +1,516 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package jsonplan + +import ( + "encoding/json" + "fmt" + "reflect" + "sort" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/plans" +) + +func TestOmitUnknowns(t *testing.T) { + tests := []struct { + Input cty.Value + Want cty.Value + }{ + { + cty.StringVal("hello"), + cty.StringVal("hello"), + }, + { + cty.NullVal(cty.String), + cty.NullVal(cty.String), + }, + { + cty.UnknownVal(cty.String), + cty.NilVal, + }, + { + cty.ListValEmpty(cty.String), + cty.EmptyTupleVal, + }, + { + cty.ListVal([]cty.Value{cty.StringVal("hello")}), + cty.TupleVal([]cty.Value{cty.StringVal("hello")}), + }, + { + cty.ListVal([]cty.Value{cty.NullVal(cty.String)}), + cty.TupleVal([]cty.Value{cty.NullVal(cty.String)}), + }, + { + cty.ListVal([]cty.Value{cty.UnknownVal(cty.String)}), + cty.TupleVal([]cty.Value{cty.NullVal(cty.String)}), + }, + { + cty.ListVal([]cty.Value{cty.StringVal("hello")}), + cty.TupleVal([]cty.Value{cty.StringVal("hello")}), + }, + // + { + cty.ListVal([]cty.Value{ + cty.StringVal("hello"), + cty.UnknownVal(cty.String)}), + cty.TupleVal([]cty.Value{ + cty.StringVal("hello"), + cty.NullVal(cty.String), + }), + }, + { + cty.MapVal(map[string]cty.Value{ + "hello": cty.True, + "world": cty.UnknownVal(cty.Bool), + }), + cty.ObjectVal(map[string]cty.Value{ + "hello": cty.True, + }), + }, + { + cty.TupleVal([]cty.Value{ + cty.StringVal("alpha"), + cty.UnknownVal(cty.String), + cty.StringVal("charlie"), + }), + cty.TupleVal([]cty.Value{ + cty.StringVal("alpha"), + cty.NullVal(cty.String), + cty.StringVal("charlie"), + }), + }, + { + cty.SetVal([]cty.Value{ + cty.StringVal("dev"), + cty.StringVal("foo"), + cty.StringVal("stg"), + cty.UnknownVal(cty.String), + }), + cty.TupleVal([]cty.Value{ + cty.StringVal("dev"), + cty.StringVal("foo"), + cty.StringVal("stg"), + cty.NullVal(cty.String), + }), + }, + { + cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "a": cty.UnknownVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("known"), + }), + }), + cty.TupleVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("known"), + }), + cty.EmptyObjectVal, + }), + }, + } + + for _, test := range tests { + got := omitUnknowns(test.Input) + if !reflect.DeepEqual(got, test.Want) { + t.Errorf( + "wrong result\ninput: %#v\ngot: %#v\nwant: %#v", + test.Input, got, test.Want, + ) + } + } +} + +func TestUnknownAsBool(t *testing.T) { + tests := []struct { + Input cty.Value + Want cty.Value + }{ + { + cty.StringVal("hello"), + cty.False, + }, + { + cty.NullVal(cty.String), + cty.False, + }, + { + cty.UnknownVal(cty.String), + cty.True, + }, + + { + cty.NullVal(cty.DynamicPseudoType), + cty.False, + }, + { + cty.NullVal(cty.Object(map[string]cty.Type{"test": cty.String})), + cty.False, + }, + { + cty.DynamicVal, + cty.True, + }, + + { + cty.ListValEmpty(cty.String), + cty.EmptyTupleVal, + }, + { + cty.ListVal([]cty.Value{cty.StringVal("hello")}), + cty.TupleVal([]cty.Value{cty.False}), + }, + { + cty.ListVal([]cty.Value{cty.NullVal(cty.String)}), + cty.TupleVal([]cty.Value{cty.False}), + }, + { + cty.ListVal([]cty.Value{cty.UnknownVal(cty.String)}), + cty.TupleVal([]cty.Value{cty.True}), + }, + { + cty.SetValEmpty(cty.String), + cty.EmptyTupleVal, + }, + { + cty.SetVal([]cty.Value{cty.StringVal("hello")}), + cty.TupleVal([]cty.Value{cty.False}), + }, + { + cty.SetVal([]cty.Value{cty.NullVal(cty.String)}), + cty.TupleVal([]cty.Value{cty.False}), + }, + { + cty.SetVal([]cty.Value{cty.UnknownVal(cty.String)}), + cty.TupleVal([]cty.Value{cty.True}), + }, + { + cty.EmptyTupleVal, + cty.EmptyTupleVal, + }, + { + cty.TupleVal([]cty.Value{cty.StringVal("hello")}), + cty.TupleVal([]cty.Value{cty.False}), + }, + { + cty.TupleVal([]cty.Value{cty.NullVal(cty.String)}), + cty.TupleVal([]cty.Value{cty.False}), + }, + { + cty.TupleVal([]cty.Value{cty.UnknownVal(cty.String)}), + cty.TupleVal([]cty.Value{cty.True}), + }, + { + cty.MapValEmpty(cty.String), + cty.EmptyObjectVal, + }, + { + cty.MapVal(map[string]cty.Value{"greeting": cty.StringVal("hello")}), + cty.EmptyObjectVal, + }, + { + cty.MapVal(map[string]cty.Value{"greeting": cty.NullVal(cty.String)}), + cty.EmptyObjectVal, + }, + { + cty.MapVal(map[string]cty.Value{"greeting": cty.UnknownVal(cty.String)}), + cty.ObjectVal(map[string]cty.Value{"greeting": cty.True}), + }, + { + cty.EmptyObjectVal, + cty.EmptyObjectVal, + }, + { + cty.ObjectVal(map[string]cty.Value{"greeting": cty.StringVal("hello")}), + cty.EmptyObjectVal, + }, + { + cty.ObjectVal(map[string]cty.Value{"greeting": cty.NullVal(cty.String)}), + cty.EmptyObjectVal, + }, + { + cty.ObjectVal(map[string]cty.Value{"greeting": cty.UnknownVal(cty.String)}), + cty.ObjectVal(map[string]cty.Value{"greeting": cty.True}), + }, + { + cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "a": cty.UnknownVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("known"), + }), + }), + cty.TupleVal([]cty.Value{ + cty.EmptyObjectVal, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.True, + }), + }), + }, + { + cty.SetVal([]cty.Value{ + cty.MapValEmpty(cty.String), + cty.MapVal(map[string]cty.Value{ + "a": cty.StringVal("known"), + }), + cty.MapVal(map[string]cty.Value{ + "a": cty.UnknownVal(cty.String), + }), + }), + cty.TupleVal([]cty.Value{ + cty.EmptyObjectVal, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.True, + }), + cty.EmptyObjectVal, + }), + }, + } + + for _, test := range tests { + got := unknownAsBool(test.Input) + if !reflect.DeepEqual(got, test.Want) { + t.Errorf( + "wrong result\ninput: %#v\ngot: %#v\nwant: %#v", + test.Input, got, test.Want, + ) + } + } +} + +func TestEncodePaths(t *testing.T) { + tests := map[string]struct { + Input cty.PathSet + Want json.RawMessage + }{ + "empty set": { + cty.NewPathSet(), + json.RawMessage(nil), + }, + "index path with string and int steps": { + cty.NewPathSet(cty.IndexStringPath("boop").IndexInt(0)), + json.RawMessage(`[["boop",0]]`), + }, + "get attr path with one step": { + cty.NewPathSet(cty.GetAttrPath("triggers")), + json.RawMessage(`[["triggers"]]`), + }, + "multiple paths of different types": { + // The order of the path sets is not guaranteed, so we sort the + // result by the number of elements in the path to make the test deterministic. + cty.NewPathSet( + cty.GetAttrPath("alpha").GetAttr("beta"), // 2 elements + cty.GetAttrPath("triggers").IndexString("name").IndexString("test"), // 3 elements + cty.IndexIntPath(0).IndexInt(1).IndexInt(2).IndexInt(3), // 4 elements + ), + json.RawMessage(`[[0,1,2,3],["alpha","beta"],["triggers","name","test"]]`), + }, + } + + // comp is a custom comparator for comparing JSON arrays. It sorts the + // arrays based on the number of elements in each path before comparing them. + // this allows our test cases to be more flexible about the order of the + // paths in the result. and deterministic on both 32 and 64 bit architectures. + comp := func(a, b json.RawMessage) (bool, error) { + if a == nil && b == nil { + return true, nil // Both are nil, they are equal + } + if a == nil || b == nil { + return false, nil // One is nil and the other is not, they are not equal + } + + var pathsA, pathsB [][]interface{} + err := json.Unmarshal(a, &pathsA) + if err != nil { + return false, fmt.Errorf("error unmarshalling first argument: %w", err) + } + err = json.Unmarshal(b, &pathsB) + if err != nil { + return false, fmt.Errorf("error unmarshalling second argument: %w", err) + } + + // Sort the slices based on the number of elements in each path + sort.Slice(pathsA, func(i, j int) bool { + return len(pathsA[i]) < len(pathsA[j]) + }) + sort.Slice(pathsB, func(i, j int) bool { + return len(pathsB[i]) < len(pathsB[j]) + }) + + return cmp.Equal(pathsA, pathsB), nil + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + got, err := encodePaths(test.Input) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + equal, err := comp(got, test.Want) + if err != nil { + t.Fatalf("error comparing JSON slices: %s", err) + } + if !equal { + t.Errorf("paths do not match:\n%s", cmp.Diff(got, test.Want)) + } + }) + } +} + +func TestOutputs(t *testing.T) { + root := addrs.RootModuleInstance + + child, diags := addrs.ParseModuleInstanceStr("module.child") + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + tests := map[string]struct { + changes *plans.Changes + expected map[string]Change + }{ + "copies all outputs": { + changes: &plans.Changes{ + Outputs: []*plans.OutputChangeSrc{ + { + Addr: root.OutputValue("first"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Create, + }, + }, + { + Addr: root.OutputValue("second"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Create, + }, + }, + }, + }, + expected: map[string]Change{ + "first": { + Actions: []string{"create"}, + Before: json.RawMessage("null"), + After: json.RawMessage("null"), + AfterUnknown: json.RawMessage("false"), + BeforeSensitive: json.RawMessage("false"), + AfterSensitive: json.RawMessage("false"), + }, + "second": { + Actions: []string{"create"}, + Before: json.RawMessage("null"), + After: json.RawMessage("null"), + AfterUnknown: json.RawMessage("false"), + BeforeSensitive: json.RawMessage("false"), + AfterSensitive: json.RawMessage("false"), + }, + }, + }, + "skips non root modules": { + changes: &plans.Changes{ + Outputs: []*plans.OutputChangeSrc{ + { + Addr: root.OutputValue("first"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Create, + }, + }, + { + Addr: child.OutputValue("second"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Create, + }, + }, + }, + }, + expected: map[string]Change{ + "first": { + Actions: []string{"create"}, + Before: json.RawMessage("null"), + After: json.RawMessage("null"), + AfterUnknown: json.RawMessage("false"), + BeforeSensitive: json.RawMessage("false"), + AfterSensitive: json.RawMessage("false"), + }, + }, + }, + } + for name, test := range tests { + t.Run(name, func(t *testing.T) { + changes, err := MarshalOutputChanges(test.changes) + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + + if !cmp.Equal(changes, test.expected) { + t.Errorf("wrong result:\n %v\n", cmp.Diff(changes, test.expected)) + } + }) + } +} + +func deepObjectValue(depth int) cty.Value { + v := cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("a"), + "b": cty.NumberIntVal(2), + "c": cty.True, + "d": cty.UnknownVal(cty.String), + }) + + result := v + + for i := 0; i < depth; i++ { + result = cty.ObjectVal(map[string]cty.Value{ + "a": result, + "b": result, + "c": result, + }) + } + + return result +} + +func BenchmarkUnknownAsBool_2(b *testing.B) { + value := deepObjectValue(2) + for n := 0; n < b.N; n++ { + unknownAsBool(value) + } +} + +func BenchmarkUnknownAsBool_3(b *testing.B) { + value := deepObjectValue(3) + for n := 0; n < b.N; n++ { + unknownAsBool(value) + } +} + +func BenchmarkUnknownAsBool_5(b *testing.B) { + value := deepObjectValue(5) + for n := 0; n < b.N; n++ { + unknownAsBool(value) + } +} + +func BenchmarkUnknownAsBool_7(b *testing.B) { + value := deepObjectValue(7) + for n := 0; n < b.N; n++ { + unknownAsBool(value) + } +} + +func BenchmarkUnknownAsBool_9(b *testing.B) { + value := deepObjectValue(9) + for n := 0; n < b.N; n++ { + unknownAsBool(value) + } +} diff --git a/pkg/command/jsonplan/resource.go b/pkg/command/jsonplan/resource.go new file mode 100644 index 00000000000..e5fecddb99d --- /dev/null +++ b/pkg/command/jsonplan/resource.go @@ -0,0 +1,97 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package jsonplan + +import ( + "encoding/json" + + "github.com/kubegems/opentofu/pkg/addrs" +) + +// Resource is the representation of a resource in the json plan +type Resource struct { + // Address is the absolute resource address + Address string `json:"address,omitempty"` + + // Mode can be "managed" or "data" + Mode string `json:"mode,omitempty"` + + Type string `json:"type,omitempty"` + Name string `json:"name,omitempty"` + + // Index is omitted for a resource not using `count` or `for_each` + Index addrs.InstanceKey `json:"index,omitempty"` + + // ProviderName allows the property "type" to be interpreted unambiguously + // in the unusual situation where a provider offers a resource type whose + // name does not start with its own name, such as the "googlebeta" provider + // offering "google_compute_instance". + ProviderName string `json:"provider_name,omitempty"` + + // SchemaVersion indicates which version of the resource type schema the + // "values" property conforms to. + SchemaVersion uint64 `json:"schema_version"` + + // AttributeValues is the JSON representation of the attribute values of the + // resource, whose structure depends on the resource type schema. Any + // unknown values are omitted or set to null, making them indistinguishable + // from absent values. + AttributeValues AttributeValues `json:"values,omitempty"` + + // SensitiveValues is similar to AttributeValues, but with all sensitive + // values replaced with true, and all non-sensitive leaf values omitted. + SensitiveValues json.RawMessage `json:"sensitive_values,omitempty"` +} + +// ResourceChange is a description of an individual change action that OpenTofu +// plans to use to move from the prior state to a new state matching the +// configuration. +type ResourceChange struct { + // Address is the absolute resource address + Address string `json:"address,omitempty"` + + // PreviousAddress is the absolute address that this resource instance had + // at the conclusion of a previous run. + // + // This will typically be omitted, but will be present if the previous + // resource instance was subject to a "moved" block that we handled in the + // process of creating this plan. + // + // Note that this behavior diverges from the internal plan data structure, + // where the previous address is set equal to the current address in the + // common case, rather than being omitted. + PreviousAddress string `json:"previous_address,omitempty"` + + // ModuleAddress is the module portion of the above address. Omitted if the + // instance is in the root module. + ModuleAddress string `json:"module_address,omitempty"` + + // "managed" or "data" + Mode string `json:"mode,omitempty"` + + Type string `json:"type,omitempty"` + Name string `json:"name,omitempty"` + Index json.RawMessage `json:"index,omitempty"` + ProviderName string `json:"provider_name,omitempty"` + + // "deposed", if set, indicates that this action applies to a "deposed" + // object of the given instance rather than to its "current" object. Omitted + // for changes to the current object. + Deposed string `json:"deposed,omitempty"` + + // Change describes the change that will be made to this object + Change Change `json:"change,omitempty"` + + // ActionReason is a keyword representing some optional extra context + // for why the actions in Change.Actions were chosen. + // + // This extra detail is only for display purposes, to help a UI layer + // present some additional explanation to a human user. The possible + // values here might grow and change over time, so any consumer of this + // information should be resilient to encountering unrecognized values + // and treat them as an unspecified reason. + ActionReason string `json:"action_reason,omitempty"` +} diff --git a/pkg/command/jsonplan/values.go b/pkg/command/jsonplan/values.go new file mode 100644 index 00000000000..4d3f54331ae --- /dev/null +++ b/pkg/command/jsonplan/values.go @@ -0,0 +1,287 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package jsonplan + +import ( + "encoding/json" + "fmt" + "sort" + + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/command/jsonstate" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tofu" +) + +// StateValues is the common representation of resolved values for both the +// prior state (which is always complete) and the planned new state. +type StateValues struct { + Outputs map[string]Output `json:"outputs,omitempty"` + RootModule Module `json:"root_module,omitempty"` +} + +// AttributeValues is the JSON representation of the attribute values of the +// resource, whose structure depends on the resource type schema. +type AttributeValues map[string]interface{} + +func marshalAttributeValues(value cty.Value, schema *configschema.Block) AttributeValues { + if value == cty.NilVal || value.IsNull() { + return nil + } + ret := make(AttributeValues) + + it := value.ElementIterator() + for it.Next() { + k, v := it.Element() + vJSON, _ := ctyjson.Marshal(v, v.Type()) + ret[k.AsString()] = json.RawMessage(vJSON) + } + return ret +} + +// marshalPlannedOutputs takes a list of changes and returns a map of output +// values +func marshalPlannedOutputs(changes *plans.Changes) (map[string]Output, error) { + if changes.Outputs == nil { + // No changes - we're done here! + return nil, nil + } + + ret := make(map[string]Output) + + for _, oc := range changes.Outputs { + if oc.ChangeSrc.Action == plans.Delete { + continue + } + + var after, afterType []byte + changeV, err := oc.Decode() + if err != nil { + return ret, err + } + // The values may be marked, but we must rely on the Sensitive flag + // as the decoded value is only an intermediate step in transcoding + // this to a json format. + changeV.After, _ = changeV.After.UnmarkDeep() + + if changeV.After != cty.NilVal && changeV.After.IsWhollyKnown() { + ty := changeV.After.Type() + after, err = ctyjson.Marshal(changeV.After, ty) + if err != nil { + return ret, err + } + afterType, err = ctyjson.MarshalType(ty) + if err != nil { + return ret, err + } + } + + ret[oc.Addr.OutputValue.Name] = Output{ + Value: json.RawMessage(after), + Type: json.RawMessage(afterType), + Sensitive: oc.Sensitive, + } + } + + return ret, nil + +} + +func marshalPlannedValues(changes *plans.Changes, schemas *tofu.Schemas) (Module, error) { + var ret Module + + // build two maps: + // module name -> [resource addresses] + // module -> [children modules] + moduleResourceMap := make(map[string][]addrs.AbsResourceInstance) + moduleMap := make(map[string][]addrs.ModuleInstance) + seenModules := make(map[string]bool) + + for _, resource := range changes.Resources { + // If the resource is being deleted, skip over it. + // Deposed instances are always conceptually a destroy, but if they + // were gone during refresh then the change becomes a noop. + if resource.Action != plans.Delete && resource.DeposedKey == states.NotDeposed { + containingModule := resource.Addr.Module.String() + moduleResourceMap[containingModule] = append(moduleResourceMap[containingModule], resource.Addr) + + // the root module has no parents + if !resource.Addr.Module.IsRoot() { + parent := resource.Addr.Module.Parent().String() + // we expect to see multiple resources in one module, so we + // only need to report the "parent" module for each child module + // once. + if !seenModules[containingModule] { + moduleMap[parent] = append(moduleMap[parent], resource.Addr.Module) + seenModules[containingModule] = true + } + + // If any given parent module has no resources, it needs to be + // added to the moduleMap. This walks through the current + // resources' modules' ancestors, taking advantage of the fact + // that Ancestors() returns an ordered slice, and verifies that + // each one is in the map. + ancestors := resource.Addr.Module.Ancestors() + for i, ancestor := range ancestors[:len(ancestors)-1] { + aStr := ancestor.String() + + // childStr here is the immediate child of the current step + childStr := ancestors[i+1].String() + // we likely will see multiple resources in one module, so we + // only need to report the "parent" module for each child module + // once. + if !seenModules[childStr] { + moduleMap[aStr] = append(moduleMap[aStr], ancestors[i+1]) + seenModules[childStr] = true + } + } + } + } + } + + // start with the root module + resources, err := marshalPlanResources(changes, moduleResourceMap[""], schemas) + if err != nil { + return ret, err + } + ret.Resources = resources + + childModules, err := marshalPlanModules(changes, schemas, moduleMap[""], moduleMap, moduleResourceMap) + if err != nil { + return ret, err + } + sort.Slice(childModules, func(i, j int) bool { + return childModules[i].Address < childModules[j].Address + }) + + ret.ChildModules = childModules + + return ret, nil +} + +// marshalPlanResources +func marshalPlanResources(changes *plans.Changes, ris []addrs.AbsResourceInstance, schemas *tofu.Schemas) ([]Resource, error) { + var ret []Resource + + for _, ri := range ris { + r := changes.ResourceInstance(ri) + if r.Action == plans.Delete { + continue + } + + resource := Resource{ + Address: r.Addr.String(), + Type: r.Addr.Resource.Resource.Type, + Name: r.Addr.Resource.Resource.Name, + ProviderName: r.ProviderAddr.Provider.String(), + Index: r.Addr.Resource.Key, + } + + switch r.Addr.Resource.Resource.Mode { + case addrs.ManagedResourceMode: + resource.Mode = "managed" + case addrs.DataResourceMode: + resource.Mode = "data" + default: + return nil, fmt.Errorf("resource %s has an unsupported mode %s", + r.Addr.String(), + r.Addr.Resource.Resource.Mode.String(), + ) + } + + schema, schemaVer := schemas.ResourceTypeConfig( + r.ProviderAddr.Provider, + r.Addr.Resource.Resource.Mode, + resource.Type, + ) + if schema == nil { + return nil, fmt.Errorf("no schema found for %s", r.Addr.String()) + } + resource.SchemaVersion = schemaVer + changeV, err := r.Decode(schema.ImpliedType()) + if err != nil { + return nil, err + } + + // copy the marked After values so we can use these in marshalSensitiveValues + markedAfter := changeV.After + + // The values may be marked, but we must rely on the Sensitive flag + // as the decoded value is only an intermediate step in transcoding + // this to a json format. + changeV.Before, _ = changeV.Before.UnmarkDeep() + changeV.After, _ = changeV.After.UnmarkDeep() + + if changeV.After != cty.NilVal { + if changeV.After.IsWhollyKnown() { + resource.AttributeValues = marshalAttributeValues(changeV.After, schema) + } else { + knowns := omitUnknowns(changeV.After) + resource.AttributeValues = marshalAttributeValues(knowns, schema) + } + } + + s := jsonstate.SensitiveAsBool(markedAfter) + v, err := ctyjson.Marshal(s, s.Type()) + if err != nil { + return nil, err + } + resource.SensitiveValues = v + + ret = append(ret, resource) + } + + sort.Slice(ret, func(i, j int) bool { + return ret[i].Address < ret[j].Address + }) + + return ret, nil +} + +// marshalPlanModules iterates over a list of modules to recursively describe +// the full module tree. +func marshalPlanModules( + changes *plans.Changes, + schemas *tofu.Schemas, + childModules []addrs.ModuleInstance, + moduleMap map[string][]addrs.ModuleInstance, + moduleResourceMap map[string][]addrs.AbsResourceInstance, +) ([]Module, error) { + + var ret []Module + + for _, child := range childModules { + moduleResources := moduleResourceMap[child.String()] + // cm for child module, naming things is hard. + var cm Module + // don't populate the address for the root module + if child.String() != "" { + cm.Address = child.String() + } + rs, err := marshalPlanResources(changes, moduleResources, schemas) + if err != nil { + return nil, err + } + cm.Resources = rs + + if len(moduleMap[child.String()]) > 0 { + moreChildModules, err := marshalPlanModules(changes, schemas, moduleMap[child.String()], moduleMap, moduleResourceMap) + if err != nil { + return nil, err + } + cm.ChildModules = moreChildModules + } + + ret = append(ret, cm) + } + + return ret, nil +} diff --git a/pkg/command/jsonplan/values_test.go b/pkg/command/jsonplan/values_test.go new file mode 100644 index 00000000000..69b74d3664c --- /dev/null +++ b/pkg/command/jsonplan/values_test.go @@ -0,0 +1,381 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package jsonplan + +import ( + "encoding/json" + "reflect" + "testing" + + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/tofu" +) + +func TestMarshalAttributeValues(t *testing.T) { + tests := []struct { + Attr cty.Value + Schema *configschema.Block + Want AttributeValues + }{ + { + cty.NilVal, + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.String, + Optional: true, + }, + }, + }, + nil, + }, + { + cty.NullVal(cty.String), + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.String, + Optional: true, + }, + }, + }, + nil, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }), + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.String, + Optional: true, + }, + }, + }, + AttributeValues{"foo": json.RawMessage(`"bar"`)}, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.NullVal(cty.String), + }), + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.String, + Optional: true, + }, + }, + }, + AttributeValues{"foo": json.RawMessage(`null`)}, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.MapVal(map[string]cty.Value{ + "hello": cty.StringVal("world"), + }), + "baz": cty.ListVal([]cty.Value{ + cty.StringVal("goodnight"), + cty.StringVal("moon"), + }), + }), + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "bar": { + Type: cty.Map(cty.String), + Required: true, + }, + "baz": { + Type: cty.List(cty.String), + Optional: true, + }, + }, + }, + AttributeValues{ + "bar": json.RawMessage(`{"hello":"world"}`), + "baz": json.RawMessage(`["goodnight","moon"]`), + }, + }, + } + + for _, test := range tests { + got := marshalAttributeValues(test.Attr, test.Schema) + eq := reflect.DeepEqual(got, test.Want) + if !eq { + t.Fatalf("wrong result:\nGot: %#v\nWant: %#v\n", got, test.Want) + } + } +} + +func TestMarshalPlannedOutputs(t *testing.T) { + after, _ := plans.NewDynamicValue(cty.StringVal("after"), cty.DynamicPseudoType) + + tests := []struct { + Changes *plans.Changes + Want map[string]Output + Err bool + }{ + { + &plans.Changes{}, + nil, + false, + }, + { + &plans.Changes{ + Outputs: []*plans.OutputChangeSrc{ + { + Addr: addrs.OutputValue{Name: "bar"}.Absolute(addrs.RootModuleInstance), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Create, + After: after, + }, + Sensitive: false, + }, + }, + }, + map[string]Output{ + "bar": { + Sensitive: false, + Type: json.RawMessage(`"string"`), + Value: json.RawMessage(`"after"`), + }, + }, + false, + }, + { // Delete action + &plans.Changes{ + Outputs: []*plans.OutputChangeSrc{ + { + Addr: addrs.OutputValue{Name: "bar"}.Absolute(addrs.RootModuleInstance), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Delete, + }, + Sensitive: false, + }, + }, + }, + map[string]Output{}, + false, + }, + } + + for _, test := range tests { + got, err := marshalPlannedOutputs(test.Changes) + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + eq := reflect.DeepEqual(got, test.Want) + if !eq { + t.Fatalf("wrong result:\nGot: %#v\nWant: %#v\n", got, test.Want) + } + } +} + +func TestMarshalPlanResources(t *testing.T) { + tests := map[string]struct { + Action plans.Action + Before cty.Value + After cty.Value + Want []Resource + Err bool + }{ + "create with unknowns": { + Action: plans.Create, + Before: cty.NullVal(cty.EmptyObject), + After: cty.ObjectVal(map[string]cty.Value{ + "woozles": cty.UnknownVal(cty.String), + "foozles": cty.UnknownVal(cty.String), + }), + Want: []Resource{{ + Address: "test_thing.example", + Mode: "managed", + Type: "test_thing", + Name: "example", + Index: addrs.InstanceKey(nil), + ProviderName: "registry.opentofu.org/hashicorp/test", + SchemaVersion: 1, + AttributeValues: AttributeValues{}, + SensitiveValues: json.RawMessage("{}"), + }}, + Err: false, + }, + "delete with null and nil": { + Action: plans.Delete, + Before: cty.NullVal(cty.EmptyObject), + After: cty.NilVal, + Want: nil, + Err: false, + }, + "delete": { + Action: plans.Delete, + Before: cty.ObjectVal(map[string]cty.Value{ + "woozles": cty.StringVal("foo"), + "foozles": cty.StringVal("bar"), + }), + After: cty.NullVal(cty.Object(map[string]cty.Type{ + "woozles": cty.String, + "foozles": cty.String, + })), + Want: nil, + Err: false, + }, + "update without unknowns": { + Action: plans.Update, + Before: cty.ObjectVal(map[string]cty.Value{ + "woozles": cty.StringVal("foo"), + "foozles": cty.StringVal("bar"), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "woozles": cty.StringVal("baz"), + "foozles": cty.StringVal("bat"), + }), + Want: []Resource{{ + Address: "test_thing.example", + Mode: "managed", + Type: "test_thing", + Name: "example", + Index: addrs.InstanceKey(nil), + ProviderName: "registry.opentofu.org/hashicorp/test", + SchemaVersion: 1, + AttributeValues: AttributeValues{ + "woozles": json.RawMessage(`"baz"`), + "foozles": json.RawMessage(`"bat"`), + }, + SensitiveValues: json.RawMessage("{}"), + }}, + Err: false, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + before, err := plans.NewDynamicValue(test.Before, test.Before.Type()) + if err != nil { + t.Fatal(err) + } + + after, err := plans.NewDynamicValue(test.After, test.After.Type()) + if err != nil { + t.Fatal(err) + } + testChange := &plans.Changes{ + Resources: []*plans.ResourceInstanceChangeSrc{ + { + Addr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "example", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + ProviderAddr: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ChangeSrc: plans.ChangeSrc{ + Action: test.Action, + Before: before, + After: after, + }, + }, + }, + } + + ris := testResourceAddrs() + + got, err := marshalPlanResources(testChange, ris, testSchemas()) + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + eq := reflect.DeepEqual(got, test.Want) + if !eq { + t.Fatalf("wrong result:\nGot: %#v\nWant: %#v\n", got, test.Want) + } + }) + } +} + +func TestMarshalPlanValuesNoopDeposed(t *testing.T) { + dynamicNull, err := plans.NewDynamicValue(cty.NullVal(cty.DynamicPseudoType), cty.DynamicPseudoType) + if err != nil { + t.Fatal(err) + } + testChange := &plans.Changes{ + Resources: []*plans.ResourceInstanceChangeSrc{ + { + Addr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "example", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + DeposedKey: "12345678", + ProviderAddr: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ChangeSrc: plans.ChangeSrc{ + Action: plans.NoOp, + Before: dynamicNull, + After: dynamicNull, + }, + }, + }, + } + + _, err = marshalPlannedValues(testChange, testSchemas()) + if err != nil { + t.Fatal(err) + } +} + +func testSchemas() *tofu.Schemas { + return &tofu.Schemas{ + Providers: map[addrs.Provider]providers.ProviderSchema{ + addrs.NewDefaultProvider("test"): providers.ProviderSchema{ + ResourceTypes: map[string]providers.Schema{ + "test_thing": { + Version: 1, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "woozles": {Type: cty.String, Optional: true, Computed: true}, + "foozles": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + }, + }, + } +} + +func testResourceAddrs() []addrs.AbsResourceInstance { + return []addrs.AbsResourceInstance{ + mustAddr("test_thing.example"), + } +} + +func mustAddr(str string) addrs.AbsResourceInstance { + addr, diags := addrs.ParseAbsResourceInstanceStr(str) + if diags.HasErrors() { + panic(diags.Err()) + } + return addr +} diff --git a/pkg/command/jsonprovider/attribute.go b/pkg/command/jsonprovider/attribute.go new file mode 100644 index 00000000000..0b0be1a08e8 --- /dev/null +++ b/pkg/command/jsonprovider/attribute.go @@ -0,0 +1,72 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package jsonprovider + +import ( + "encoding/json" + + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +type Attribute struct { + AttributeType json.RawMessage `json:"type,omitempty"` + AttributeNestedType *NestedType `json:"nested_type,omitempty"` + Description string `json:"description,omitempty"` + DescriptionKind string `json:"description_kind,omitempty"` + Deprecated bool `json:"deprecated,omitempty"` + Required bool `json:"required,omitempty"` + Optional bool `json:"optional,omitempty"` + Computed bool `json:"computed,omitempty"` + Sensitive bool `json:"sensitive,omitempty"` +} + +type NestedType struct { + Attributes map[string]*Attribute `json:"attributes,omitempty"` + NestingMode string `json:"nesting_mode,omitempty"` +} + +func marshalStringKind(sk configschema.StringKind) string { + switch sk { + default: + return "plain" + case configschema.StringMarkdown: + return "markdown" + } +} + +func marshalAttribute(attr *configschema.Attribute) *Attribute { + ret := &Attribute{ + Description: attr.Description, + DescriptionKind: marshalStringKind(attr.DescriptionKind), + Required: attr.Required, + Optional: attr.Optional, + Computed: attr.Computed, + Sensitive: attr.Sensitive, + Deprecated: attr.Deprecated, + } + + // we're not concerned about errors because at this point the schema has + // already been checked and re-checked. + if attr.Type != cty.NilType { + attrTy, _ := attr.Type.MarshalJSON() + ret.AttributeType = attrTy + } + + if attr.NestedType != nil { + nestedTy := NestedType{ + NestingMode: nestingModeString(attr.NestedType.Nesting), + } + attrs := make(map[string]*Attribute, len(attr.NestedType.Attributes)) + for k, attr := range attr.NestedType.Attributes { + attrs[k] = marshalAttribute(attr) + } + nestedTy.Attributes = attrs + ret.AttributeNestedType = &nestedTy + } + + return ret +} diff --git a/pkg/command/jsonprovider/attribute_test.go b/pkg/command/jsonprovider/attribute_test.go new file mode 100644 index 00000000000..5ca40067238 --- /dev/null +++ b/pkg/command/jsonprovider/attribute_test.go @@ -0,0 +1,49 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package jsonprovider + +import ( + "encoding/json" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/configs/configschema" +) + +func TestMarshalAttribute(t *testing.T) { + tests := []struct { + Input *configschema.Attribute + Want *Attribute + }{ + { + &configschema.Attribute{Type: cty.String, Optional: true, Computed: true}, + &Attribute{ + AttributeType: json.RawMessage(`"string"`), + Optional: true, + Computed: true, + DescriptionKind: "plain", + }, + }, + { // collection types look a little odd. + &configschema.Attribute{Type: cty.Map(cty.String), Optional: true, Computed: true}, + &Attribute{ + AttributeType: json.RawMessage(`["map","string"]`), + Optional: true, + Computed: true, + DescriptionKind: "plain", + }, + }, + } + + for _, test := range tests { + got := marshalAttribute(test.Input) + if !cmp.Equal(got, test.Want) { + t.Fatalf("wrong result:\n %v\n", cmp.Diff(got, test.Want)) + } + } +} diff --git a/pkg/command/jsonprovider/block.go b/pkg/command/jsonprovider/block.go new file mode 100644 index 00000000000..26b954fa57e --- /dev/null +++ b/pkg/command/jsonprovider/block.go @@ -0,0 +1,85 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package jsonprovider + +import ( + "github.com/kubegems/opentofu/pkg/configs/configschema" +) + +type Block struct { + Attributes map[string]*Attribute `json:"attributes,omitempty"` + BlockTypes map[string]*BlockType `json:"block_types,omitempty"` + Description string `json:"description,omitempty"` + DescriptionKind string `json:"description_kind,omitempty"` + Deprecated bool `json:"deprecated,omitempty"` +} + +type BlockType struct { + NestingMode string `json:"nesting_mode,omitempty"` + Block *Block `json:"block,omitempty"` + MinItems uint64 `json:"min_items,omitempty"` + MaxItems uint64 `json:"max_items,omitempty"` +} + +func marshalBlockTypes(nestedBlock *configschema.NestedBlock) *BlockType { + if nestedBlock == nil { + return &BlockType{} + } + ret := &BlockType{ + Block: marshalBlock(&nestedBlock.Block), + MinItems: uint64(nestedBlock.MinItems), + MaxItems: uint64(nestedBlock.MaxItems), + NestingMode: nestingModeString(nestedBlock.Nesting), + } + return ret +} + +func marshalBlock(configBlock *configschema.Block) *Block { + if configBlock == nil { + return &Block{} + } + + ret := Block{ + Deprecated: configBlock.Deprecated, + Description: configBlock.Description, + DescriptionKind: marshalStringKind(configBlock.DescriptionKind), + } + + if len(configBlock.Attributes) > 0 { + attrs := make(map[string]*Attribute, len(configBlock.Attributes)) + for k, attr := range configBlock.Attributes { + attrs[k] = marshalAttribute(attr) + } + ret.Attributes = attrs + } + + if len(configBlock.BlockTypes) > 0 { + blockTypes := make(map[string]*BlockType, len(configBlock.BlockTypes)) + for k, bt := range configBlock.BlockTypes { + blockTypes[k] = marshalBlockTypes(bt) + } + ret.BlockTypes = blockTypes + } + + return &ret +} + +func nestingModeString(mode configschema.NestingMode) string { + switch mode { + case configschema.NestingSingle: + return "single" + case configschema.NestingGroup: + return "group" + case configschema.NestingList: + return "list" + case configschema.NestingSet: + return "set" + case configschema.NestingMap: + return "map" + default: + return "invalid" + } +} diff --git a/pkg/command/jsonprovider/block_test.go b/pkg/command/jsonprovider/block_test.go new file mode 100644 index 00000000000..e9021f5c128 --- /dev/null +++ b/pkg/command/jsonprovider/block_test.go @@ -0,0 +1,73 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package jsonprovider + +import ( + "encoding/json" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/configs/configschema" +) + +func TestMarshalBlock(t *testing.T) { + tests := []struct { + Input *configschema.Block + Want *Block + }{ + { + nil, + &Block{}, + }, + { + Input: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "network_interface": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "device_index": {Type: cty.String, Optional: true}, + "description": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + }, + Want: &Block{ + Attributes: map[string]*Attribute{ + "ami": {AttributeType: json.RawMessage(`"string"`), Optional: true, DescriptionKind: "plain"}, + "id": {AttributeType: json.RawMessage(`"string"`), Optional: true, Computed: true, DescriptionKind: "plain"}, + }, + BlockTypes: map[string]*BlockType{ + "network_interface": { + NestingMode: "list", + Block: &Block{ + Attributes: map[string]*Attribute{ + "description": {AttributeType: json.RawMessage(`"string"`), Optional: true, DescriptionKind: "plain"}, + "device_index": {AttributeType: json.RawMessage(`"string"`), Optional: true, DescriptionKind: "plain"}, + }, + DescriptionKind: "plain", + }, + }, + }, + DescriptionKind: "plain", + }, + }, + } + + for _, test := range tests { + got := marshalBlock(test.Input) + if !cmp.Equal(got, test.Want) { + t.Fatalf("wrong result:\n %v\n", cmp.Diff(got, test.Want)) + } + } +} diff --git a/pkg/command/jsonprovider/doc.go b/pkg/command/jsonprovider/doc.go new file mode 100644 index 00000000000..7395147dc04 --- /dev/null +++ b/pkg/command/jsonprovider/doc.go @@ -0,0 +1,8 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package jsonprovider contains types and functions to marshal OpenTofu +// provider schemas into a json formatted output. +package jsonprovider diff --git a/pkg/command/jsonprovider/function.go b/pkg/command/jsonprovider/function.go new file mode 100644 index 00000000000..76208713918 --- /dev/null +++ b/pkg/command/jsonprovider/function.go @@ -0,0 +1,114 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package jsonprovider + +import ( + "github.com/kubegems/opentofu/pkg/providers" + "github.com/zclconf/go-cty/cty" +) + +const ( + mapTypeName = "map" + listTypeName = "list" + setTypeName = "set" + tupleTypeName = "tuple" +) + +// Function is the top-level object returned when exporting function schemas +type Function struct { + Description string `json:"description"` + Summary string `json:"summary"` + ReturnType any `json:"return_type"` + Parameters []*FunctionParam `json:"parameters,omitempty"` + VariadicParameter *FunctionParam `json:"variadic_parameter,omitempty"` +} + +// FunctionParam is the object for wrapping the functions parameters and return types +type FunctionParam struct { + Name string `json:"name"` + Description string `json:"description"` + Type any `json:"type"` + IsNullable *bool `json:"is_nullable,omitempty"` +} + +func marshalReturnType(returnType cty.Type) any { + switch { + case returnType.IsObjectType(): + return []any{ + returnType.FriendlyName(), + returnType.AttributeTypes(), + } + case returnType.IsListType(): + return []any{ + listTypeName, + returnType.ListElementType(), + } + case returnType.IsMapType(): + return []any{ + mapTypeName, + returnType.MapElementType(), + } + case returnType.IsSetType(): + return []any{ + setTypeName, + returnType.SetElementType(), + } + case returnType.IsTupleType(): + return []any{ + tupleTypeName, + returnType.TupleElementTypes(), + } + default: + return returnType.FriendlyName() + } +} + +func marshalParameter(parameter providers.FunctionParameterSpec) *FunctionParam { + var output FunctionParam + output.Description = parameter.Description + output.Name = parameter.Name + output.Type = marshalReturnType(parameter.Type) + + if parameter.AllowNullValue { + isNullable := true + output.IsNullable = &isNullable + } + + return &output +} + +func marshalParameters(parameters []providers.FunctionParameterSpec) []*FunctionParam { + output := make([]*FunctionParam, 0, len(parameters)) + for _, parameter := range parameters { + output = append(output, marshalParameter(parameter)) + } + + return output +} + +func marshalFunction(function providers.FunctionSpec) *Function { + var output Function + output.Description = function.Description + output.Summary = function.Summary + output.ReturnType = marshalReturnType(function.Return) + output.Parameters = marshalParameters(function.Parameters) + if function.VariadicParameter != nil { + output.VariadicParameter = marshalParameter(*function.VariadicParameter) + } + + return &output +} + +func marshalFunctions(functions map[string]providers.FunctionSpec) map[string]*Function { + if functions == nil { + return map[string]*Function{} + } + output := make(map[string]*Function, len(functions)) + for k, v := range functions { + output[k] = marshalFunction(v) + } + return output +} diff --git a/pkg/command/jsonprovider/function_test.go b/pkg/command/jsonprovider/function_test.go new file mode 100644 index 00000000000..7ffd279f224 --- /dev/null +++ b/pkg/command/jsonprovider/function_test.go @@ -0,0 +1,250 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package jsonprovider + +import ( + "encoding/json" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/zclconf/go-cty/cty" +) + +func TestMarshalReturnType(t *testing.T) { + type testcase struct { + Arg cty.Type + Expected any + } + + tests := map[string]testcase{ + "string": { + Arg: cty.String, + Expected: "string", + }, + "number": { + Arg: cty.Number, + Expected: "number", + }, + "bool": { + Arg: cty.Bool, + Expected: "bool", + }, + "object": { + Arg: cty.Object(map[string]cty.Type{"number_type": cty.Number}), + Expected: []any{ + string("object"), + map[string]cty.Type{"number_type": cty.Number}, + }, + }, + "map": { + Arg: cty.Map(cty.String), + Expected: []any{ + string("map"), + cty.String, + }, + }, + "list": { + Arg: cty.List(cty.Bool), + Expected: []any{ + string("list"), + cty.Bool, + }, + }, + "set": { + Arg: cty.Set(cty.Number), + Expected: []any{ + string("set"), + cty.Number, + }, + }, + "tuple": { + Arg: cty.Tuple([]cty.Type{cty.String}), + Expected: []any{ + string("tuple"), + []any{cty.String}, + }, + }, + } + + for tn, tc := range tests { + t.Run(tn, func(t *testing.T) { + actual := marshalReturnType(tc.Arg) + + // to avoid the nightmare of comparing cty primitve types we can marshal them to json and compare that + actualJSON, _ := json.Marshal(actual) + expectedJSON, _ := json.Marshal(tc.Expected) + if !cmp.Equal(actualJSON, expectedJSON) { + t.Fatalf("values don't match:\n %v\n", cmp.Diff(string(actualJSON), string(expectedJSON))) + } + }) + } +} + +func TestMarshalParameter(t *testing.T) { + // used so can make a pointer to it + trueBoolVal := true + + type testcase struct { + Arg providers.FunctionParameterSpec + Expected FunctionParam + } + + tests := map[string]testcase{ + "basic": { + Arg: providers.FunctionParameterSpec{ + Description: "basic string func", + Type: cty.String, + }, + Expected: FunctionParam{ + Description: "basic string func", + Type: cty.String, + }, + }, + "nullable": { + Arg: providers.FunctionParameterSpec{ + Description: "nullable number func", + Type: cty.Number, + AllowNullValue: trueBoolVal, + }, + Expected: FunctionParam{ + Description: "nullable number func", + Type: cty.Number, + IsNullable: &trueBoolVal, + }, + }, + } + + for tn, tc := range tests { + t.Run(tn, func(t *testing.T) { + actual := marshalParameter(tc.Arg) + + // to avoid the nightmare of comparing cty primitve types we can marshal them to json and compare that + actualJSON, _ := json.Marshal(actual) + expectedJSON, _ := json.Marshal(tc.Expected) + if !cmp.Equal(actualJSON, expectedJSON) { + t.Fatalf("values don't match:\n %v\n", cmp.Diff(string(actualJSON), string(expectedJSON))) + } + }) + } +} + +func TestMarshalParameters(t *testing.T) { + type testcase struct { + Arg []providers.FunctionParameterSpec + Expected []FunctionParam + } + + tests := map[string]testcase{ + "basic": { + Arg: []providers.FunctionParameterSpec{{ + Description: "basic string func", + Type: cty.String, + }}, + Expected: []FunctionParam{{ + Description: "basic string func", + Type: cty.String, + }}, + }, + } + + for tn, tc := range tests { + t.Run(tn, func(t *testing.T) { + actual := marshalParameters(tc.Arg) + + // to avoid the nightmare of comparing cty primitve types we can marshal them to json and compare that + actualJSON, _ := json.Marshal(actual) + expectedJSON, _ := json.Marshal(tc.Expected) + if !cmp.Equal(actualJSON, expectedJSON) { + t.Fatalf("values don't match:\n %v\n", cmp.Diff(string(actualJSON), string(expectedJSON))) + } + }) + } +} + +func TestMarshalFunction(t *testing.T) { + type testcase struct { + Arg providers.FunctionSpec + Expected Function + } + + tests := map[string]testcase{ + "basic": { + Arg: providers.FunctionSpec{ + Description: "basic string func", + Return: cty.String, + }, + Expected: Function{ + Description: "basic string func", + ReturnType: cty.String, + }, + }, + "variadic": { + Arg: providers.FunctionSpec{ + Description: "basic string func", + Return: cty.String, + VariadicParameter: &providers.FunctionParameterSpec{ + Description: "basic string func", + Type: cty.String, + }, + }, + Expected: Function{ + Description: "basic string func", + ReturnType: cty.String, + VariadicParameter: &FunctionParam{ + Description: "basic string func", + Type: cty.String, + }, + }, + }, + } + + for tn, tc := range tests { + t.Run(tn, func(t *testing.T) { + actual := marshalFunction(tc.Arg) + + // to avoid the nightmare of comparing cty primitve types we can marshal them to json and compare that + actualJSON, _ := json.Marshal(actual) + expectedJSON, _ := json.Marshal(tc.Expected) + if !cmp.Equal(actualJSON, expectedJSON) { + t.Fatalf("values don't match:\n %v\n", cmp.Diff(string(actualJSON), string(expectedJSON))) + } + }) + } +} + +func TestMarshalFunctions(t *testing.T) { + type testcase struct { + Arg map[string]providers.FunctionSpec + Expected map[string]Function + } + + tests := map[string]testcase{ + "basic": { + Arg: map[string]providers.FunctionSpec{"basic_func": { + Description: "basic string func", + Return: cty.String, + }}, + Expected: map[string]Function{"basic_func": { + Description: "basic string func", + ReturnType: cty.String, + }}, + }, + } + + for tn, tc := range tests { + t.Run(tn, func(t *testing.T) { + actual := marshalFunctions(tc.Arg) + + // to avoid the nightmare of comparing cty primitve types we can marshal them to json and compare that + actualJSON, _ := json.Marshal(actual) + expectedJSON, _ := json.Marshal(tc.Expected) + if !cmp.Equal(actualJSON, expectedJSON) { + t.Fatalf("values don't match:\n %v\n", cmp.Diff(string(actualJSON), string(expectedJSON))) + } + }) + } +} diff --git a/pkg/command/jsonprovider/provider.go b/pkg/command/jsonprovider/provider.go new file mode 100644 index 00000000000..28f57ab6d98 --- /dev/null +++ b/pkg/command/jsonprovider/provider.go @@ -0,0 +1,67 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package jsonprovider + +import ( + "encoding/json" + + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/tofu" +) + +// FormatVersion represents the version of the json format and will be +// incremented for any change to this format that requires changes to a +// consuming parser. +const FormatVersion = "1.0" + +// Providers is the top-level object returned when exporting provider schemas +type Providers struct { + FormatVersion string `json:"format_version"` + Schemas map[string]*Provider `json:"provider_schemas,omitempty"` +} + +type Provider struct { + Provider *Schema `json:"provider,omitempty"` + ResourceSchemas map[string]*Schema `json:"resource_schemas,omitempty"` + DataSourceSchemas map[string]*Schema `json:"data_source_schemas,omitempty"` + Functions map[string]*Function `json:"functions,omitempty"` +} + +func newProviders() *Providers { + schemas := make(map[string]*Provider) + return &Providers{ + FormatVersion: FormatVersion, + Schemas: schemas, + } +} + +// MarshalForRenderer converts the provided internation representation of the +// schema into the public structured JSON versions. +// +// This is a format that can be read by the structured plan renderer. +func MarshalForRenderer(s *tofu.Schemas) map[string]*Provider { + schemas := make(map[string]*Provider, len(s.Providers)) + for k, v := range s.Providers { + schemas[k.String()] = marshalProvider(v) + } + return schemas +} + +func Marshal(s *tofu.Schemas) ([]byte, error) { + providers := newProviders() + providers.Schemas = MarshalForRenderer(s) + ret, err := json.Marshal(providers) + return ret, err +} + +func marshalProvider(tps providers.ProviderSchema) *Provider { + return &Provider{ + Provider: marshalSchema(tps.Provider), + ResourceSchemas: marshalSchemas(tps.ResourceTypes), + DataSourceSchemas: marshalSchemas(tps.DataSources), + Functions: marshalFunctions(tps.Functions), + } +} diff --git a/pkg/command/jsonprovider/provider_test.go b/pkg/command/jsonprovider/provider_test.go new file mode 100644 index 00000000000..48ae898fffa --- /dev/null +++ b/pkg/command/jsonprovider/provider_test.go @@ -0,0 +1,230 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package jsonprovider + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/providers" +) + +func TestMarshalProvider(t *testing.T) { + tests := []struct { + Input providers.ProviderSchema + Want *Provider + }{ + { + providers.ProviderSchema{}, + &Provider{ + Provider: &Schema{}, + ResourceSchemas: map[string]*Schema{}, + DataSourceSchemas: map[string]*Schema{}, + Functions: map[string]*Function{}, + }, + }, + { + testProvider(), + &Provider{ + Provider: &Schema{ + Block: &Block{ + Attributes: map[string]*Attribute{ + "region": { + AttributeType: json.RawMessage(`"string"`), + Required: true, + DescriptionKind: "plain", + }, + }, + DescriptionKind: "plain", + }, + }, + ResourceSchemas: map[string]*Schema{ + "test_instance": { + Version: 42, + Block: &Block{ + Attributes: map[string]*Attribute{ + "id": { + AttributeType: json.RawMessage(`"string"`), + Optional: true, + Computed: true, + DescriptionKind: "plain", + }, + "ami": { + AttributeType: json.RawMessage(`"string"`), + Optional: true, + DescriptionKind: "plain", + }, + "volumes": { + AttributeNestedType: &NestedType{ + NestingMode: "list", + Attributes: map[string]*Attribute{ + "size": { + AttributeType: json.RawMessage(`"string"`), + Required: true, + DescriptionKind: "plain", + }, + "mount_point": { + AttributeType: json.RawMessage(`"string"`), + Required: true, + DescriptionKind: "plain", + }, + }, + }, + Optional: true, + DescriptionKind: "plain", + }, + }, + BlockTypes: map[string]*BlockType{ + "network_interface": { + Block: &Block{ + Attributes: map[string]*Attribute{ + "device_index": { + AttributeType: json.RawMessage(`"string"`), + Optional: true, + DescriptionKind: "plain", + }, + "description": { + AttributeType: json.RawMessage(`"string"`), + Optional: true, + DescriptionKind: "plain", + }, + }, + DescriptionKind: "plain", + }, + NestingMode: "list", + }, + }, + DescriptionKind: "plain", + }, + }, + }, + DataSourceSchemas: map[string]*Schema{ + "test_data_source": { + Version: 3, + Block: &Block{ + Attributes: map[string]*Attribute{ + "id": { + AttributeType: json.RawMessage(`"string"`), + Optional: true, + Computed: true, + DescriptionKind: "plain", + }, + "ami": { + AttributeType: json.RawMessage(`"string"`), + Optional: true, + DescriptionKind: "plain", + }, + }, + BlockTypes: map[string]*BlockType{ + "network_interface": { + Block: &Block{ + Attributes: map[string]*Attribute{ + "device_index": { + AttributeType: json.RawMessage(`"string"`), + Optional: true, + DescriptionKind: "plain", + }, + "description": { + AttributeType: json.RawMessage(`"string"`), + Optional: true, + DescriptionKind: "plain", + }, + }, + DescriptionKind: "plain", + }, + NestingMode: "list", + }, + }, + DescriptionKind: "plain", + }, + }, + }, + Functions: map[string]*Function{}, + }, + }, + } + + for i, test := range tests { + t.Run(fmt.Sprint(i), func(t *testing.T) { + got := marshalProvider(test.Input) + if !cmp.Equal(got, test.Want) { + t.Fatalf("wrong result:\n %v\n", cmp.Diff(got, test.Want)) + } + }) + } +} + +func testProvider() providers.ProviderSchema { + return providers.ProviderSchema{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "region": {Type: cty.String, Required: true}, + }, + }, + }, + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Version: 42, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "volumes": { + Optional: true, + NestedType: &configschema.Object{ + Nesting: configschema.NestingList, + Attributes: map[string]*configschema.Attribute{ + "size": {Type: cty.String, Required: true}, + "mount_point": {Type: cty.String, Required: true}, + }, + }, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "network_interface": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "device_index": {Type: cty.String, Optional: true}, + "description": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + }, + }, + }, + DataSources: map[string]providers.Schema{ + "test_data_source": { + Version: 3, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "network_interface": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "device_index": {Type: cty.String, Optional: true}, + "description": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + }, + }, + }, + Functions: map[string]providers.FunctionSpec{}, + } +} diff --git a/pkg/command/jsonprovider/schema.go b/pkg/command/jsonprovider/schema.go new file mode 100644 index 00000000000..3d598753c10 --- /dev/null +++ b/pkg/command/jsonprovider/schema.go @@ -0,0 +1,40 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package jsonprovider + +import ( + "github.com/kubegems/opentofu/pkg/providers" +) + +type Schema struct { + Version uint64 `json:"version"` + Block *Block `json:"block,omitempty"` +} + +// marshalSchema is a convenience wrapper around mashalBlock. Schema version +// should be set by the caller. +func marshalSchema(schema providers.Schema) *Schema { + if schema.Block == nil { + return &Schema{} + } + + var ret Schema + ret.Block = marshalBlock(schema.Block) + ret.Version = uint64(schema.Version) + + return &ret +} + +func marshalSchemas(schemas map[string]providers.Schema) map[string]*Schema { + if schemas == nil { + return map[string]*Schema{} + } + ret := make(map[string]*Schema, len(schemas)) + for k, v := range schemas { + ret[k] = marshalSchema(v) + } + return ret +} diff --git a/pkg/command/jsonprovider/schema_test.go b/pkg/command/jsonprovider/schema_test.go new file mode 100644 index 00000000000..79eda34bb6a --- /dev/null +++ b/pkg/command/jsonprovider/schema_test.go @@ -0,0 +1,52 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package jsonprovider + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + + "github.com/kubegems/opentofu/pkg/providers" +) + +func TestMarshalSchemas(t *testing.T) { + tests := []struct { + Input map[string]providers.Schema + Want map[string]*Schema + }{ + { + nil, + map[string]*Schema{}, + }, + } + + for _, test := range tests { + got := marshalSchemas(test.Input) + if !cmp.Equal(got, test.Want) { + t.Fatalf("wrong result:\n %v\n", cmp.Diff(got, test.Want)) + } + } +} + +func TestMarshalSchema(t *testing.T) { + tests := map[string]struct { + Input providers.Schema + Want *Schema + }{ + "nil_block": { + providers.Schema{}, + &Schema{}, + }, + } + + for _, test := range tests { + got := marshalSchema(test.Input) + if !cmp.Equal(got, test.Want) { + t.Fatalf("wrong result:\n %v\n", cmp.Diff(got, test.Want)) + } + } +} diff --git a/pkg/command/jsonstate/doc.go b/pkg/command/jsonstate/doc.go new file mode 100644 index 00000000000..c37b441e272 --- /dev/null +++ b/pkg/command/jsonstate/doc.go @@ -0,0 +1,8 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package jsonstate implements methods for outputting a state in a +// machine-readable json format +package jsonstate diff --git a/pkg/command/jsonstate/state.go b/pkg/command/jsonstate/state.go new file mode 100644 index 00000000000..ef2b11a47a8 --- /dev/null +++ b/pkg/command/jsonstate/state.go @@ -0,0 +1,625 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package jsonstate + +import ( + "encoding/json" + "fmt" + "sort" + + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/command/jsonchecks" + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/statefile" + "github.com/kubegems/opentofu/pkg/tofu" +) + +const ( + // FormatVersion represents the version of the json format and will be + // incremented for any change to this format that requires changes to a + // consuming parser. + FormatVersion = "1.0" + + ManagedResourceMode = "managed" + DataResourceMode = "data" +) + +// State is the top-level representation of the json format of a tofu +// state. +type State struct { + FormatVersion string `json:"format_version,omitempty"` + TerraformVersion string `json:"terraform_version,omitempty"` + Values *StateValues `json:"values,omitempty"` + Checks json.RawMessage `json:"checks,omitempty"` +} + +// StateValues is the common representation of resolved values for both the prior +// state (which is always complete) and the planned new state. +type StateValues struct { + Outputs map[string]Output `json:"outputs,omitempty"` + RootModule Module `json:"root_module,omitempty"` +} + +type Output struct { + Sensitive bool `json:"sensitive"` + Value json.RawMessage `json:"value,omitempty"` + Type json.RawMessage `json:"type,omitempty"` +} + +// Module is the representation of a module in state. This can be the root module +// or a child module +type Module struct { + // Resources are sorted in a user-friendly order that is undefined at this + // time, but consistent. + Resources []Resource `json:"resources,omitempty"` + + // Address is the absolute module address, omitted for the root module + Address string `json:"address,omitempty"` + + // Each module object can optionally have its own nested "child_modules", + // recursively describing the full module tree. + ChildModules []Module `json:"child_modules,omitempty"` +} + +// Resource is the representation of a resource in the state. +type Resource struct { + // Address is the absolute resource address + Address string `json:"address,omitempty"` + + // Mode can be "managed" or "data" + Mode string `json:"mode,omitempty"` + + Type string `json:"type,omitempty"` + Name string `json:"name,omitempty"` + + // Index is omitted for a resource not using `count` or `for_each`. + Index json.RawMessage `json:"index,omitempty"` + + // ProviderName allows the property "type" to be interpreted unambiguously + // in the unusual situation where a provider offers a resource type whose + // name does not start with its own name, such as the "googlebeta" provider + // offering "google_compute_instance". + ProviderName string `json:"provider_name"` + + // SchemaVersion indicates which version of the resource type schema the + // "values" property conforms to. + SchemaVersion uint64 `json:"schema_version"` + + // AttributeValues is the JSON representation of the attribute values of the + // resource, whose structure depends on the resource type schema. Any + // unknown values are omitted or set to null, making them indistinguishable + // from absent values. + AttributeValues AttributeValues `json:"values,omitempty"` + + // SensitiveValues is similar to AttributeValues, but with all sensitive + // values replaced with true, and all non-sensitive leaf values omitted. + SensitiveValues json.RawMessage `json:"sensitive_values,omitempty"` + + // DependsOn contains a list of the resource's dependencies. The entries are + // addresses relative to the containing module. + DependsOn []string `json:"depends_on,omitempty"` + + // Tainted is true if the resource is tainted in tofu state. + Tainted bool `json:"tainted,omitempty"` + + // Deposed is set if the resource is deposed in tofu state. + DeposedKey string `json:"deposed_key,omitempty"` +} + +// AttributeValues is the JSON representation of the attribute values of the +// resource, whose structure depends on the resource type schema. +type AttributeValues map[string]json.RawMessage + +func marshalAttributeValues(value cty.Value) AttributeValues { + // unmark our value to show all values + value, _ = value.UnmarkDeep() + + if value == cty.NilVal || value.IsNull() { + return nil + } + + ret := make(AttributeValues) + + it := value.ElementIterator() + for it.Next() { + k, v := it.Element() + vJSON, _ := ctyjson.Marshal(v, v.Type()) + ret[k.AsString()] = json.RawMessage(vJSON) + } + return ret +} + +// newState() returns a minimally-initialized state +func newState() *State { + return &State{ + FormatVersion: FormatVersion, + } +} + +// MarshalForRenderer returns the pre-json encoding changes of the state, in a +// format available to the structured renderer. +func MarshalForRenderer(sf *statefile.File, schemas *tofu.Schemas) (Module, map[string]Output, error) { + if sf.State.Modules == nil { + // Empty state case. + return Module{}, nil, nil + } + + outputs, err := MarshalOutputs(sf.State.RootModule().OutputValues) + if err != nil { + return Module{}, nil, err + } + + root, err := marshalRootModule(sf.State, schemas) + if err != nil { + return Module{}, nil, err + } + + return root, outputs, err +} + +// MarshalForLog returns the origin JSON compatible state, read for a logging +// package to marshal further. +func MarshalForLog(sf *statefile.File, schemas *tofu.Schemas) (*State, error) { + output := newState() + + if sf == nil || sf.State.Empty() { + return output, nil + } + + if sf.TerraformVersion != nil { + output.TerraformVersion = sf.TerraformVersion.String() + } + + // output.StateValues + err := output.marshalStateValues(sf.State, schemas) + if err != nil { + return nil, err + } + + // output.Checks + if sf.State.CheckResults != nil && sf.State.CheckResults.ConfigResults.Len() > 0 { + output.Checks = jsonchecks.MarshalCheckStates(sf.State.CheckResults) + } + + return output, nil +} + +// Marshal returns the json encoding of a tofu state. +func Marshal(sf *statefile.File, schemas *tofu.Schemas) ([]byte, error) { + output, err := MarshalForLog(sf, schemas) + if err != nil { + return nil, err + } + + ret, err := json.Marshal(output) + return ret, err +} + +func (jsonstate *State) marshalStateValues(s *states.State, schemas *tofu.Schemas) error { + var sv StateValues + var err error + + // only marshal the root module outputs + sv.Outputs, err = MarshalOutputs(s.RootModule().OutputValues) + if err != nil { + return err + } + + // use the state and module map to build up the module structure + sv.RootModule, err = marshalRootModule(s, schemas) + if err != nil { + return err + } + + jsonstate.Values = &sv + return nil +} + +// MarshalOutputs translates a map of states.OutputValue to a map of jsonstate.Output, +// which are defined for json encoding. +func MarshalOutputs(outputs map[string]*states.OutputValue) (map[string]Output, error) { + if outputs == nil { + return nil, nil + } + + ret := make(map[string]Output) + for k, v := range outputs { + ty := v.Value.Type() + ov, err := ctyjson.Marshal(v.Value, ty) + if err != nil { + return ret, err + } + ot, err := ctyjson.MarshalType(ty) + if err != nil { + return ret, err + } + ret[k] = Output{ + Value: ov, + Type: ot, + Sensitive: v.Sensitive, + } + } + + return ret, nil +} + +func marshalRootModule(s *states.State, schemas *tofu.Schemas) (Module, error) { + var ret Module + var err error + + ret.Address = "" + rs, err := marshalResources(s.RootModule().Resources, addrs.RootModuleInstance, schemas) + if err != nil { + return ret, err + } + ret.Resources = rs + + // build a map of module -> set[child module addresses] + moduleChildSet := make(map[string]map[string]struct{}) + for _, mod := range s.Modules { + if mod.Addr.IsRoot() { + continue + } else { + for childAddr := mod.Addr; !childAddr.IsRoot(); childAddr = childAddr.Parent() { + if _, ok := moduleChildSet[childAddr.Parent().String()]; !ok { + moduleChildSet[childAddr.Parent().String()] = map[string]struct{}{} + } + moduleChildSet[childAddr.Parent().String()][childAddr.String()] = struct{}{} + } + } + } + + // transform the previous map into map of module -> [child module addresses] + moduleMap := make(map[string][]addrs.ModuleInstance) + for parent, children := range moduleChildSet { + for child := range children { + childModuleInstance, diags := addrs.ParseModuleInstanceStr(child) + if diags.HasErrors() { + return ret, diags.Err() + } + moduleMap[parent] = append(moduleMap[parent], childModuleInstance) + } + } + + // use the state and module map to build up the module structure + ret.ChildModules, err = marshalModules(s, schemas, moduleMap[""], moduleMap) + return ret, err +} + +// marshalModules is an ungainly recursive function to build a module structure +// out of tofu state. +func marshalModules( + s *states.State, + schemas *tofu.Schemas, + modules []addrs.ModuleInstance, + moduleMap map[string][]addrs.ModuleInstance, +) ([]Module, error) { + var ret []Module + for _, child := range modules { + // cm for child module, naming things is hard. + cm := Module{Address: child.String()} + + // the module may be resourceless and contain only submodules, it will then be nil here + stateMod := s.Module(child) + if stateMod != nil { + rs, err := marshalResources(stateMod.Resources, stateMod.Addr, schemas) + if err != nil { + return nil, err + } + cm.Resources = rs + } + + if moduleMap[child.String()] != nil { + moreChildModules, err := marshalModules(s, schemas, moduleMap[child.String()], moduleMap) + if err != nil { + return nil, err + } + cm.ChildModules = moreChildModules + } + + ret = append(ret, cm) + } + + // sort the child modules by address for consistency. + sort.Slice(ret, func(i, j int) bool { + return ret[i].Address < ret[j].Address + }) + + return ret, nil +} + +func marshalResources(resources map[string]*states.Resource, module addrs.ModuleInstance, schemas *tofu.Schemas) ([]Resource, error) { + var ret []Resource + + var sortedResources []*states.Resource + for _, r := range resources { + sortedResources = append(sortedResources, r) + } + sort.Slice(sortedResources, func(i, j int) bool { + return sortedResources[i].Addr.Less(sortedResources[j].Addr) + }) + + for _, r := range sortedResources { + + var sortedKeys []addrs.InstanceKey + for k := range r.Instances { + sortedKeys = append(sortedKeys, k) + } + sort.Slice(sortedKeys, func(i, j int) bool { + return addrs.InstanceKeyLess(sortedKeys[i], sortedKeys[j]) + }) + + for _, k := range sortedKeys { + ri := r.Instances[k] + + var err error + + resAddr := r.Addr.Resource + + current := Resource{ + Address: r.Addr.Instance(k).String(), + Type: resAddr.Type, + Name: resAddr.Name, + ProviderName: r.ProviderConfig.Provider.String(), + } + + if k != nil { + index := k.Value() + if current.Index, err = ctyjson.Marshal(index, index.Type()); err != nil { + return nil, err + } + } + + switch resAddr.Mode { + case addrs.ManagedResourceMode: + current.Mode = ManagedResourceMode + case addrs.DataResourceMode: + current.Mode = DataResourceMode + default: + return ret, fmt.Errorf("resource %s has an unsupported mode %s", + resAddr.String(), + resAddr.Mode.String(), + ) + } + + schema, version := schemas.ResourceTypeConfig( + r.ProviderConfig.Provider, + resAddr.Mode, + resAddr.Type, + ) + + // It is possible that the only instance is deposed + if ri.Current != nil { + if version != ri.Current.SchemaVersion { + return nil, fmt.Errorf("schema version %d for %s in state does not match version %d from the provider", ri.Current.SchemaVersion, resAddr, version) + } + + current.SchemaVersion = ri.Current.SchemaVersion + + if schema == nil { + return nil, fmt.Errorf("no schema found for %s (in provider %s)", resAddr.String(), r.ProviderConfig.Provider) + } + riObj, err := ri.Current.Decode(schema.ImpliedType()) + if err != nil { + return nil, err + } + + current.AttributeValues = marshalAttributeValues(riObj.Value) + + value, marks := riObj.Value.UnmarkDeepWithPaths() + if schema.ContainsSensitive() { + marks = append(marks, schema.ValueMarks(value, nil)...) + } + s := SensitiveAsBoolWithPathValueMarks(value, marks) + v, err := ctyjson.Marshal(s, s.Type()) + if err != nil { + return nil, err + } + current.SensitiveValues = v + + if len(riObj.Dependencies) > 0 { + dependencies := make([]string, len(riObj.Dependencies)) + for i, v := range riObj.Dependencies { + dependencies[i] = v.String() + } + current.DependsOn = dependencies + } + + if riObj.Status == states.ObjectTainted { + current.Tainted = true + } + ret = append(ret, current) + } + + var sortedDeposedKeys []string + for k := range ri.Deposed { + sortedDeposedKeys = append(sortedDeposedKeys, string(k)) + } + sort.Strings(sortedDeposedKeys) + + for _, deposedKey := range sortedDeposedKeys { + rios := ri.Deposed[states.DeposedKey(deposedKey)] + + // copy the base fields from the current instance + deposed := Resource{ + Address: current.Address, + Type: current.Type, + Name: current.Name, + ProviderName: current.ProviderName, + Mode: current.Mode, + Index: current.Index, + } + + riObj, err := rios.Decode(schema.ImpliedType()) + if err != nil { + return nil, err + } + + deposed.AttributeValues = marshalAttributeValues(riObj.Value) + + value, marks := riObj.Value.UnmarkDeepWithPaths() + if schema.ContainsSensitive() { + marks = append(marks, schema.ValueMarks(value, nil)...) + } + s := SensitiveAsBool(value.MarkWithPaths(marks)) + v, err := ctyjson.Marshal(s, s.Type()) + if err != nil { + return nil, err + } + deposed.SensitiveValues = v + + if len(riObj.Dependencies) > 0 { + dependencies := make([]string, len(riObj.Dependencies)) + for i, v := range riObj.Dependencies { + dependencies[i] = v.String() + } + deposed.DependsOn = dependencies + } + + if riObj.Status == states.ObjectTainted { + deposed.Tainted = true + } + deposed.DeposedKey = deposedKey + ret = append(ret, deposed) + } + } + } + + return ret, nil +} + +func SensitiveAsBool(val cty.Value) cty.Value { + return SensitiveAsBoolWithPathValueMarks(val, nil) +} + +func SensitiveAsBoolWithPathValueMarks(val cty.Value, pvms []cty.PathValueMarks) cty.Value { + var sensitiveMarks []cty.PathValueMarks + for _, pvm := range pvms { + if _, ok := pvm.Marks[marks.Sensitive]; ok { + sensitiveMarks = append(sensitiveMarks, pvm) + } + } + return sensitiveAsBoolWithPathValueMarks(val, cty.Path{}, sensitiveMarks) +} + +func sensitiveAsBoolWithPathValueMarks(val cty.Value, path cty.Path, pvms []cty.PathValueMarks) cty.Value { + if val.HasMark(marks.Sensitive) { + return cty.True + } + for _, pvm := range pvms { + if path.Equals(pvm.Path) { + return cty.True + } + } + ty := val.Type() + switch { + case val.IsNull(), ty.IsPrimitiveType(), ty.Equals(cty.DynamicPseudoType): + return cty.False + case ty.IsListType() || ty.IsTupleType() || ty.IsSetType(): + return sensitiveCollectionAsBool(val, path, pvms) + case ty.IsMapType(): + return sensitiveMapAsBool(val, path, pvms) + case ty.IsObjectType(): + return sensitiveObjectAsBool(val, path, pvms) + default: + // Should never happen, since the above should cover all types + panic(fmt.Sprintf("sensitiveAsBoolWithPathValueMarks cannot handle %#v", val)) + } +} + +func sensitiveCollectionAsBool(val cty.Value, path []cty.PathStep, pvms []cty.PathValueMarks) cty.Value { + if !val.IsKnown() { + // If the collection is unknown we can't say anything about the + // sensitivity of its contents + return cty.EmptyTupleVal + } + length := val.LengthInt() + if length == 0 { + // If there are no elements then we can't have sensitive values + return cty.EmptyTupleVal + } + vals := make([]cty.Value, 0, length) + it := val.ElementIterator() + for it.Next() { + kv, ev := it.Element() + path = append(path, cty.IndexStep{ + Key: kv, + }) + vals = append(vals, sensitiveAsBoolWithPathValueMarks(ev, path, pvms)) + path = path[0 : len(path)-1] + } + // The above transform may have changed the types of some of the + // elements, so we'll always use a tuple here in case we've now made + // different elements have different types. Our ultimate goal is to + // marshal to JSON anyway, and all of these sequence types are + // indistinguishable in JSON. + return cty.TupleVal(vals) +} + +func sensitiveMapAsBool(val cty.Value, path []cty.PathStep, pvms []cty.PathValueMarks) cty.Value { + if !val.IsKnown() { + // If the map/object is unknown we can't say anything about the + // sensitivity of its attributes + return cty.EmptyObjectVal + } + length := val.LengthInt() + if length == 0 { + // If there are no elements then we can't have sensitive values + return cty.EmptyObjectVal + } + + vals := make(map[string]cty.Value) + it := val.ElementIterator() + for it.Next() { + kv, ev := it.Element() + path = append(path, cty.IndexStep{ + Key: kv, + }) + s := sensitiveAsBoolWithPathValueMarks(ev, path, pvms) + path = path[0 : len(path)-1] + // Omit all of the "false"s for non-sensitive values for more + // compact serialization + if !s.RawEquals(cty.False) { + vals[kv.AsString()] = s + } + } + // The above transform may have changed the types of some of the + // elements, so we'll always use an object here in case we've now made + // different elements have different types. Our ultimate goal is to + // marshal to JSON anyway, and all of these mapping types are + // indistinguishable in JSON. + return cty.ObjectVal(vals) +} + +func sensitiveObjectAsBool(val cty.Value, path []cty.PathStep, pvms []cty.PathValueMarks) cty.Value { + if !val.IsKnown() { + // If the map/object is unknown we can't say anything about the + // sensitivity of its attributes + return cty.EmptyObjectVal + } + ty := val.Type() + if len(ty.AttributeTypes()) == 0 { + // If there are no elements then we can't have sensitive values + return cty.EmptyObjectVal + } + vals := make(map[string]cty.Value) + for name := range ty.AttributeTypes() { + av := val.GetAttr(name) + path = append(path, cty.GetAttrStep{ + Name: name, + }) + s := sensitiveAsBoolWithPathValueMarks(av, path, pvms) + path = path[0 : len(path)-1] + if !s.RawEquals(cty.False) { + vals[name] = s + } + } + return cty.ObjectVal(vals) +} diff --git a/pkg/command/jsonstate/state_test.go b/pkg/command/jsonstate/state_test.go new file mode 100644 index 00000000000..67606f2b5dc --- /dev/null +++ b/pkg/command/jsonstate/state_test.go @@ -0,0 +1,1263 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package jsonstate + +import ( + "encoding/json" + "reflect" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tofu" +) + +func TestMarshalOutputs(t *testing.T) { + tests := []struct { + Outputs map[string]*states.OutputValue + Want map[string]Output + Err bool + }{ + { + nil, + nil, + false, + }, + { + map[string]*states.OutputValue{ + "test": { + Sensitive: true, + Value: cty.StringVal("sekret"), + }, + }, + map[string]Output{ + "test": { + Sensitive: true, + Value: json.RawMessage(`"sekret"`), + Type: json.RawMessage(`"string"`), + }, + }, + false, + }, + { + map[string]*states.OutputValue{ + "test": { + Sensitive: false, + Value: cty.StringVal("not_so_sekret"), + }, + }, + map[string]Output{ + "test": { + Sensitive: false, + Value: json.RawMessage(`"not_so_sekret"`), + Type: json.RawMessage(`"string"`), + }, + }, + false, + }, + { + map[string]*states.OutputValue{ + "mapstring": { + Sensitive: false, + Value: cty.MapVal(map[string]cty.Value{ + "beep": cty.StringVal("boop"), + }), + }, + "setnumber": { + Sensitive: false, + Value: cty.SetVal([]cty.Value{ + cty.NumberIntVal(3), + cty.NumberIntVal(5), + cty.NumberIntVal(7), + cty.NumberIntVal(11), + }), + }, + }, + map[string]Output{ + "mapstring": { + Sensitive: false, + Value: json.RawMessage(`{"beep":"boop"}`), + Type: json.RawMessage(`["map","string"]`), + }, + "setnumber": { + Sensitive: false, + Value: json.RawMessage(`[3,5,7,11]`), + Type: json.RawMessage(`["set","number"]`), + }, + }, + false, + }, + } + + for _, test := range tests { + got, err := MarshalOutputs(test.Outputs) + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if !cmp.Equal(test.Want, got) { + t.Fatalf("wrong result:\n%s", cmp.Diff(test.Want, got)) + } + } +} + +func TestMarshalAttributeValues(t *testing.T) { + tests := []struct { + Attr cty.Value + Want AttributeValues + }{ + { + cty.NilVal, + nil, + }, + { + cty.NullVal(cty.String), + nil, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }), + AttributeValues{"foo": json.RawMessage(`"bar"`)}, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.NullVal(cty.String), + }), + AttributeValues{"foo": json.RawMessage(`null`)}, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.MapVal(map[string]cty.Value{ + "hello": cty.StringVal("world"), + }), + "baz": cty.ListVal([]cty.Value{ + cty.StringVal("goodnight"), + cty.StringVal("moon"), + }), + }), + AttributeValues{ + "bar": json.RawMessage(`{"hello":"world"}`), + "baz": json.RawMessage(`["goodnight","moon"]`), + }, + }, + // Marked values + { + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.MapVal(map[string]cty.Value{ + "hello": cty.StringVal("world"), + }), + "baz": cty.ListVal([]cty.Value{ + cty.StringVal("goodnight"), + cty.StringVal("moon").Mark(marks.Sensitive), + }), + }), + AttributeValues{ + "bar": json.RawMessage(`{"hello":"world"}`), + "baz": json.RawMessage(`["goodnight","moon"]`), + }, + }, + } + + for _, test := range tests { + got := marshalAttributeValues(test.Attr) + eq := reflect.DeepEqual(got, test.Want) + if !eq { + t.Fatalf("wrong result:\nGot: %#v\nWant: %#v\n", got, test.Want) + } + } +} + +func TestMarshalResources(t *testing.T) { + deposedKey := states.NewDeposedKey() + tests := map[string]struct { + Resources map[string]*states.Resource + Schemas *tofu.Schemas + Want []Resource + Err bool + }{ + "nil": { + nil, + nil, + nil, + false, + }, + "single resource": { + map[string]*states.Resource{ + "test_thing.baz": { + Addr: addrs.AbsResource{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "bar", + }, + }, + Instances: map[addrs.InstanceKey]*states.ResourceInstance{ + addrs.NoKey: { + Current: &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + }, + }, + ProviderConfig: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + }, + }, + testSchemas(), + []Resource{ + { + Address: "test_thing.bar", + Mode: "managed", + Type: "test_thing", + Name: "bar", + Index: nil, + ProviderName: "registry.opentofu.org/hashicorp/test", + AttributeValues: AttributeValues{ + "foozles": json.RawMessage(`null`), + "woozles": json.RawMessage(`"confuzles"`), + }, + SensitiveValues: json.RawMessage("{\"foozles\":true}"), + }, + }, + false, + }, + "single resource_with_sensitive": { + map[string]*states.Resource{ + "test_thing.baz": { + Addr: addrs.AbsResource{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "bar", + }, + }, + Instances: map[addrs.InstanceKey]*states.ResourceInstance{ + addrs.NoKey: { + Current: &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"woozles":"confuzles","foozles":"sensuzles"}`), + }, + }, + }, + ProviderConfig: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + }, + }, + testSchemas(), + []Resource{ + { + Address: "test_thing.bar", + Mode: "managed", + Type: "test_thing", + Name: "bar", + Index: nil, + ProviderName: "registry.opentofu.org/hashicorp/test", + AttributeValues: AttributeValues{ + "foozles": json.RawMessage(`"sensuzles"`), + "woozles": json.RawMessage(`"confuzles"`), + }, + SensitiveValues: json.RawMessage("{\"foozles\":true}"), + }, + }, + false, + }, + "resource with marks": { + map[string]*states.Resource{ + "test_thing.bar": { + Addr: addrs.AbsResource{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "bar", + }, + }, + Instances: map[addrs.InstanceKey]*states.ResourceInstance{ + addrs.NoKey: { + Current: &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"foozles":"confuzles"}`), + AttrSensitivePaths: []cty.PathValueMarks{{ + Path: cty.Path{cty.GetAttrStep{Name: "foozles"}}, + Marks: cty.NewValueMarks(marks.Sensitive)}, + }, + }, + }, + }, + ProviderConfig: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + }, + }, + testSchemas(), + []Resource{ + { + Address: "test_thing.bar", + Mode: "managed", + Type: "test_thing", + Name: "bar", + Index: nil, + ProviderName: "registry.opentofu.org/hashicorp/test", + AttributeValues: AttributeValues{ + "foozles": json.RawMessage(`"confuzles"`), + "woozles": json.RawMessage(`null`), + }, + SensitiveValues: json.RawMessage(`{"foozles":true}`), + }, + }, + false, + }, + "single resource wrong schema": { + map[string]*states.Resource{ + "test_thing.baz": { + Addr: addrs.AbsResource{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "bar", + }, + }, + Instances: map[addrs.InstanceKey]*states.ResourceInstance{ + addrs.NoKey: { + Current: &states.ResourceInstanceObjectSrc{ + SchemaVersion: 1, + Status: states.ObjectReady, + AttrsJSON: []byte(`{"woozles":["confuzles"]}`), + }, + }, + }, + ProviderConfig: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + }, + }, + testSchemas(), + nil, + true, + }, + "resource with count": { + map[string]*states.Resource{ + "test_thing.bar": { + Addr: addrs.AbsResource{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "bar", + }, + }, + Instances: map[addrs.InstanceKey]*states.ResourceInstance{ + addrs.IntKey(0): { + Current: &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + }, + }, + ProviderConfig: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + }, + }, + testSchemas(), + []Resource{ + { + Address: "test_thing.bar[0]", + Mode: "managed", + Type: "test_thing", + Name: "bar", + Index: json.RawMessage(`0`), + ProviderName: "registry.opentofu.org/hashicorp/test", + AttributeValues: AttributeValues{ + "foozles": json.RawMessage(`null`), + "woozles": json.RawMessage(`"confuzles"`), + }, + SensitiveValues: json.RawMessage("{\"foozles\":true}"), + }, + }, + false, + }, + "resource with for_each": { + map[string]*states.Resource{ + "test_thing.bar": { + Addr: addrs.AbsResource{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "bar", + }, + }, + Instances: map[addrs.InstanceKey]*states.ResourceInstance{ + addrs.StringKey("rockhopper"): { + Current: &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + }, + }, + ProviderConfig: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + }, + }, + testSchemas(), + []Resource{ + { + Address: "test_thing.bar[\"rockhopper\"]", + Mode: "managed", + Type: "test_thing", + Name: "bar", + Index: json.RawMessage(`"rockhopper"`), + ProviderName: "registry.opentofu.org/hashicorp/test", + AttributeValues: AttributeValues{ + "foozles": json.RawMessage(`null`), + "woozles": json.RawMessage(`"confuzles"`), + }, + SensitiveValues: json.RawMessage("{\"foozles\":true}"), + }, + }, + false, + }, + "deposed resource": { + map[string]*states.Resource{ + "test_thing.baz": { + Addr: addrs.AbsResource{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "bar", + }, + }, + Instances: map[addrs.InstanceKey]*states.ResourceInstance{ + addrs.NoKey: { + Deposed: map[states.DeposedKey]*states.ResourceInstanceObjectSrc{ + states.DeposedKey(deposedKey): { + Status: states.ObjectReady, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + }, + }, + }, + ProviderConfig: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + }, + }, + testSchemas(), + []Resource{ + { + Address: "test_thing.bar", + Mode: "managed", + Type: "test_thing", + Name: "bar", + Index: nil, + ProviderName: "registry.opentofu.org/hashicorp/test", + DeposedKey: deposedKey.String(), + AttributeValues: AttributeValues{ + "foozles": json.RawMessage(`null`), + "woozles": json.RawMessage(`"confuzles"`), + }, + SensitiveValues: json.RawMessage("{\"foozles\":true}"), + }, + }, + false, + }, + "deposed and current resource": { + map[string]*states.Resource{ + "test_thing.baz": { + Addr: addrs.AbsResource{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "bar", + }, + }, + Instances: map[addrs.InstanceKey]*states.ResourceInstance{ + addrs.NoKey: { + Deposed: map[states.DeposedKey]*states.ResourceInstanceObjectSrc{ + states.DeposedKey(deposedKey): { + Status: states.ObjectReady, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + }, + Current: &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + }, + }, + ProviderConfig: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + }, + }, + testSchemas(), + []Resource{ + { + Address: "test_thing.bar", + Mode: "managed", + Type: "test_thing", + Name: "bar", + Index: nil, + ProviderName: "registry.opentofu.org/hashicorp/test", + AttributeValues: AttributeValues{ + "foozles": json.RawMessage(`null`), + "woozles": json.RawMessage(`"confuzles"`), + }, + SensitiveValues: json.RawMessage("{\"foozles\":true}"), + }, + { + Address: "test_thing.bar", + Mode: "managed", + Type: "test_thing", + Name: "bar", + Index: nil, + ProviderName: "registry.opentofu.org/hashicorp/test", + DeposedKey: deposedKey.String(), + AttributeValues: AttributeValues{ + "foozles": json.RawMessage(`null`), + "woozles": json.RawMessage(`"confuzles"`), + }, + SensitiveValues: json.RawMessage("{\"foozles\":true}"), + }, + }, + false, + }, + "resource with marked map attr": { + map[string]*states.Resource{ + "test_map_attr.bar": { + Addr: addrs.AbsResource{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_map_attr", + Name: "bar", + }, + }, + Instances: map[addrs.InstanceKey]*states.ResourceInstance{ + addrs.NoKey: { + Current: &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"data":{"woozles":"confuzles"}}`), + AttrSensitivePaths: []cty.PathValueMarks{{ + Path: cty.Path{cty.GetAttrStep{Name: "data"}}, + Marks: cty.NewValueMarks(marks.Sensitive)}, + }, + }, + }, + }, + ProviderConfig: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + }, + }, + testSchemas(), + []Resource{ + { + Address: "test_map_attr.bar", + Mode: "managed", + Type: "test_map_attr", + Name: "bar", + Index: nil, + ProviderName: "registry.opentofu.org/hashicorp/test", + AttributeValues: AttributeValues{ + "data": json.RawMessage(`{"woozles":"confuzles"}`), + }, + SensitiveValues: json.RawMessage(`{"data":true}`), + }, + }, + false, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + got, err := marshalResources(test.Resources, addrs.RootModuleInstance, test.Schemas) + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + diff := cmp.Diff(got, test.Want) + if diff != "" { + t.Fatalf("wrong result: %s\n", diff) + } + + }) + } +} + +func TestMarshalModules_basic(t *testing.T) { + childModule, _ := addrs.ParseModuleInstanceStr("module.child") + subModule, _ := addrs.ParseModuleInstanceStr("module.submodule") + testState := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(childModule), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"foo","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: childModule.Module(), + }, + ) + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(subModule), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"foo","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: subModule.Module(), + }, + ) + }) + moduleMap := make(map[string][]addrs.ModuleInstance) + moduleMap[""] = []addrs.ModuleInstance{childModule, subModule} + + got, err := marshalModules(testState, testSchemas(), moduleMap[""], moduleMap) + + if err != nil { + t.Fatalf("unexpected error: %s", err.Error()) + } + + if len(got) != 2 { + t.Fatalf("wrong result! got %d modules, expected 2", len(got)) + } + + if got[0].Address != "module.child" || got[1].Address != "module.submodule" { + t.Fatalf("wrong result! got %#v\n", got) + } + +} + +func TestMarshalModules_nested(t *testing.T) { + childModule, _ := addrs.ParseModuleInstanceStr("module.child") + subModule, _ := addrs.ParseModuleInstanceStr("module.child.module.submodule") + testState := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(childModule), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"foo","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: childModule.Module(), + }, + ) + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(subModule), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"foo","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: subModule.Module(), + }, + ) + }) + moduleMap := make(map[string][]addrs.ModuleInstance) + moduleMap[""] = []addrs.ModuleInstance{childModule} + moduleMap[childModule.String()] = []addrs.ModuleInstance{subModule} + + got, err := marshalModules(testState, testSchemas(), moduleMap[""], moduleMap) + + if err != nil { + t.Fatalf("unexpected error: %s", err.Error()) + } + + if len(got) != 1 { + t.Fatalf("wrong result! got %d modules, expected 1", len(got)) + } + + if got[0].Address != "module.child" { + t.Fatalf("wrong result! got %#v\n", got) + } + + if got[0].ChildModules[0].Address != "module.child.module.submodule" { + t.Fatalf("wrong result! got %#v\n", got) + } +} + +func TestMarshalModules_parent_no_resources(t *testing.T) { + subModule, _ := addrs.ParseModuleInstanceStr("module.child.module.submodule") + testState := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(subModule), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"foo","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: subModule.Module(), + }, + ) + }) + got, err := marshalRootModule(testState, testSchemas()) + + if err != nil { + t.Fatalf("unexpected error: %s", err.Error()) + } + + if len(got.ChildModules) != 1 { + t.Fatalf("wrong result! got %d modules, expected 1", len(got.ChildModules)) + } + + if got.ChildModules[0].Address != "module.child" { + t.Fatalf("wrong result! got %#v\n", got) + } + + if got.ChildModules[0].ChildModules[0].Address != "module.child.module.submodule" { + t.Fatalf("wrong result! got %#v\n", got) + } +} + +func testSchemas() *tofu.Schemas { + return &tofu.Schemas{ + Providers: map[addrs.Provider]providers.ProviderSchema{ + addrs.NewDefaultProvider("test"): { + ResourceTypes: map[string]providers.Schema{ + "test_thing": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "woozles": {Type: cty.String, Optional: true, Computed: true}, + "foozles": {Type: cty.String, Optional: true, Sensitive: true}, + }, + }, + }, + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "foo": {Type: cty.String, Optional: true}, + "bar": {Type: cty.String, Optional: true}, + }, + }, + }, + "test_map_attr": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "data": {Type: cty.Map(cty.String), Optional: true, Computed: true, Sensitive: true}, + }, + }, + }, + }, + }, + }, + } +} + +func TestSensitiveAsBool(t *testing.T) { + tests := []struct { + Input cty.Value + Want cty.Value + }{ + { + cty.StringVal("hello"), + cty.False, + }, + { + cty.NullVal(cty.String), + cty.False, + }, + { + cty.StringVal("hello").Mark(marks.Sensitive), + cty.True, + }, + { + cty.NullVal(cty.String).Mark(marks.Sensitive), + cty.True, + }, + + { + cty.NullVal(cty.DynamicPseudoType).Mark(marks.Sensitive), + cty.True, + }, + { + cty.NullVal(cty.Object(map[string]cty.Type{"test": cty.String})), + cty.False, + }, + { + cty.NullVal(cty.Object(map[string]cty.Type{"test": cty.String})).Mark(marks.Sensitive), + cty.True, + }, + { + cty.DynamicVal, + cty.False, + }, + { + cty.DynamicVal.Mark(marks.Sensitive), + cty.True, + }, + + { + cty.ListValEmpty(cty.String), + cty.EmptyTupleVal, + }, + { + cty.ListValEmpty(cty.String).Mark(marks.Sensitive), + cty.True, + }, + { + cty.ListVal([]cty.Value{ + cty.StringVal("hello"), + cty.StringVal("friend").Mark(marks.Sensitive), + }), + cty.TupleVal([]cty.Value{ + cty.False, + cty.True, + }), + }, + { + cty.SetValEmpty(cty.String), + cty.EmptyTupleVal, + }, + { + cty.SetValEmpty(cty.String).Mark(marks.Sensitive), + cty.True, + }, + { + cty.SetVal([]cty.Value{cty.StringVal("hello")}), + cty.TupleVal([]cty.Value{cty.False}), + }, + { + cty.SetVal([]cty.Value{cty.StringVal("hello").Mark(marks.Sensitive)}), + cty.True, + }, + { + cty.EmptyTupleVal.Mark(marks.Sensitive), + cty.True, + }, + { + cty.TupleVal([]cty.Value{ + cty.StringVal("hello"), + cty.StringVal("friend").Mark(marks.Sensitive), + }), + cty.TupleVal([]cty.Value{ + cty.False, + cty.True, + }), + }, + { + cty.MapValEmpty(cty.String), + cty.EmptyObjectVal, + }, + { + cty.MapValEmpty(cty.String).Mark(marks.Sensitive), + cty.True, + }, + { + cty.MapVal(map[string]cty.Value{ + "greeting": cty.StringVal("hello"), + "animal": cty.StringVal("horse"), + }), + cty.EmptyObjectVal, + }, + { + cty.MapVal(map[string]cty.Value{ + "greeting": cty.StringVal("hello"), + "animal": cty.StringVal("horse").Mark(marks.Sensitive), + }), + cty.ObjectVal(map[string]cty.Value{ + "animal": cty.True, + }), + }, + { + cty.MapVal(map[string]cty.Value{ + "greeting": cty.StringVal("hello"), + "animal": cty.StringVal("horse").Mark(marks.Sensitive), + }).Mark(marks.Sensitive), + cty.True, + }, + { + cty.EmptyObjectVal, + cty.EmptyObjectVal, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "greeting": cty.StringVal("hello"), + "animal": cty.StringVal("horse"), + }), + cty.EmptyObjectVal, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "greeting": cty.StringVal("hello"), + "animal": cty.StringVal("horse").Mark(marks.Sensitive), + }), + cty.ObjectVal(map[string]cty.Value{ + "animal": cty.True, + }), + }, + { + cty.ObjectVal(map[string]cty.Value{ + "greeting": cty.StringVal("hello"), + "animal": cty.StringVal("horse").Mark(marks.Sensitive), + }).Mark(marks.Sensitive), + cty.True, + }, + { + cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "a": cty.UnknownVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("known").Mark(marks.Sensitive), + }), + }), + cty.TupleVal([]cty.Value{ + cty.EmptyObjectVal, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.True, + }), + }), + }, + { + cty.ListVal([]cty.Value{ + cty.MapValEmpty(cty.String), + cty.MapVal(map[string]cty.Value{ + "a": cty.StringVal("known").Mark(marks.Sensitive), + }), + cty.MapVal(map[string]cty.Value{ + "a": cty.UnknownVal(cty.String), + }), + }), + cty.TupleVal([]cty.Value{ + cty.EmptyObjectVal, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.True, + }), + cty.EmptyObjectVal, + }), + }, + { + cty.ObjectVal(map[string]cty.Value{ + "list": cty.UnknownVal(cty.List(cty.String)), + "set": cty.UnknownVal(cty.Set(cty.Bool)), + "tuple": cty.UnknownVal(cty.Tuple([]cty.Type{cty.String, cty.Number})), + "map": cty.UnknownVal(cty.Map(cty.String)), + "object": cty.UnknownVal(cty.Object(map[string]cty.Type{"a": cty.String})), + }), + cty.ObjectVal(map[string]cty.Value{ + "list": cty.EmptyTupleVal, + "set": cty.EmptyTupleVal, + "tuple": cty.EmptyTupleVal, + "map": cty.EmptyObjectVal, + "object": cty.EmptyObjectVal, + }), + }, + } + + for _, test := range tests { + got := SensitiveAsBool(test.Input) + if !reflect.DeepEqual(got, test.Want) { + t.Errorf( + "wrong result\ninput: %#v\ngot: %#v\nwant: %#v", + test.Input, got, test.Want, + ) + } + } +} + +func TestSensitiveAsBoolWithPathValueMarks(t *testing.T) { + tests := []struct { + Input cty.Value + Pvms []cty.PathValueMarks + Want cty.Value + }{ + { + cty.ListVal([]cty.Value{ + cty.StringVal("hello"), + cty.StringVal("friend"), + }), + []cty.PathValueMarks{{ + Path: cty.Path{cty.IndexStep{Key: cty.NumberIntVal(1)}}, + Marks: cty.NewValueMarks(marks.Sensitive)}, + }, + cty.TupleVal([]cty.Value{ + cty.False, + cty.True, + }), + }, + { + cty.ListVal([]cty.Value{ + cty.StringVal("hello").Mark(marks.Sensitive), + cty.StringVal("friend"), + }), + []cty.PathValueMarks{{ + Path: cty.Path{cty.IndexStep{Key: cty.NumberIntVal(1)}}, + Marks: cty.NewValueMarks(marks.Sensitive)}, + }, + cty.TupleVal([]cty.Value{ + cty.True, + cty.True, + }), + }, + { + cty.TupleVal([]cty.Value{ + cty.StringVal("hello"), + cty.StringVal("friend"), + }), + []cty.PathValueMarks{{ + Path: cty.Path{cty.IndexStep{Key: cty.NumberIntVal(1)}}, + Marks: cty.NewValueMarks(marks.Sensitive)}, + }, + cty.TupleVal([]cty.Value{ + cty.False, + cty.True, + }), + }, + { + cty.TupleVal([]cty.Value{ + cty.StringVal("hello").Mark(marks.Sensitive), + cty.StringVal("friend"), + }), + []cty.PathValueMarks{{ + Path: cty.Path{cty.IndexStep{Key: cty.NumberIntVal(1)}}, + Marks: cty.NewValueMarks(marks.Sensitive)}, + }, + cty.TupleVal([]cty.Value{ + cty.True, + cty.True, + }), + }, + { + cty.SetVal([]cty.Value{ + cty.StringVal("hello"), + cty.StringVal("friend"), + }), + []cty.PathValueMarks{{ + Path: cty.Path{cty.IndexStep{Key: cty.StringVal("hello")}}, + Marks: cty.NewValueMarks(marks.Sensitive)}, + }, + cty.TupleVal([]cty.Value{ + cty.False, + cty.True, + }), + }, + { + cty.MapVal(map[string]cty.Value{ + "greeting": cty.StringVal("hello"), + "animal": cty.StringVal("horse"), + }), + []cty.PathValueMarks{{ + Path: cty.Path{cty.IndexStep{Key: cty.StringVal("animal")}}, + Marks: cty.NewValueMarks(marks.Sensitive)}, + }, + cty.ObjectVal(map[string]cty.Value{ + "animal": cty.True, + }), + }, + { + cty.ObjectVal(map[string]cty.Value{ + "greeting": cty.StringVal("hello"), + "animal": cty.StringVal("horse"), + }), + []cty.PathValueMarks{{ + Path: cty.Path{cty.GetAttrStep{Name: "animal"}}, + Marks: cty.NewValueMarks(marks.Sensitive)}, + }, + cty.ObjectVal(map[string]cty.Value{ + "animal": cty.True, + }), + }, + { + cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "a": cty.UnknownVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("known"), + }), + }), + []cty.PathValueMarks{{ + Path: cty.Path{cty.IndexStep{Key: cty.NumberIntVal(1)}, cty.GetAttrStep{Name: "a"}}, + Marks: cty.NewValueMarks(marks.Sensitive)}, + }, + + cty.TupleVal([]cty.Value{ + cty.EmptyObjectVal, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.True, + }), + }), + }, + { + cty.TupleVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "a": cty.UnknownVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("known"), + }), + }), + []cty.PathValueMarks{{ + Path: cty.Path{cty.IndexStep{Key: cty.NumberIntVal(1)}, cty.GetAttrStep{Name: "a"}}, + Marks: cty.NewValueMarks(marks.Sensitive)}, + }, + + cty.TupleVal([]cty.Value{ + cty.EmptyObjectVal, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.True, + }), + }), + }, + { + cty.ListVal([]cty.Value{ + cty.MapValEmpty(cty.String), + cty.MapVal(map[string]cty.Value{ + "a": cty.StringVal("known"), + }), + cty.MapVal(map[string]cty.Value{ + "a": cty.UnknownVal(cty.String), + }), + }), + []cty.PathValueMarks{{ + Path: cty.Path{cty.IndexStep{Key: cty.NumberIntVal(1)}, cty.IndexStep{Key: cty.StringVal("a")}}, + Marks: cty.NewValueMarks(marks.Sensitive)}, + }, + cty.TupleVal([]cty.Value{ + cty.EmptyObjectVal, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.True, + }), + cty.EmptyObjectVal, + }), + }, + { + cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "greeting": cty.StringVal("hello"), + "animal": cty.StringVal("cat"), + }), + cty.ObjectVal(map[string]cty.Value{ + "greeting": cty.StringVal("hello"), + "animal": cty.StringVal("horse"), + }), + }), + []cty.PathValueMarks{{ + Path: cty.Path{ + cty.IndexStep{Key: cty.ObjectVal(map[string]cty.Value{ + "greeting": cty.StringVal("hello"), + "animal": cty.StringVal("cat"), + })}, + cty.GetAttrStep{Name: "animal"}}, + Marks: cty.NewValueMarks(marks.Sensitive)}, + }, + + cty.TupleVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "animal": cty.True, + }), + cty.EmptyObjectVal, + }), + }, + } + + for _, test := range tests { + got := SensitiveAsBoolWithPathValueMarks(test.Input, test.Pvms) + if !reflect.DeepEqual(got, test.Want) { + t.Errorf( + "wrong result\ninput: %#v\ngot: %#v\nwant: %#v", + test.Input, got, test.Want, + ) + } + } +} diff --git a/pkg/command/login.go b/pkg/command/login.go new file mode 100644 index 00000000000..4f44bff3ac0 --- /dev/null +++ b/pkg/command/login.go @@ -0,0 +1,830 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "context" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "math/rand" + "net" + "net/http" + "net/url" + "path/filepath" + "strings" + + tfe "github.com/hashicorp/go-tfe" + svchost "github.com/hashicorp/terraform-svchost" + svcauth "github.com/hashicorp/terraform-svchost/auth" + "github.com/hashicorp/terraform-svchost/disco" + + "github.com/kubegems/opentofu/pkg/command/cliconfig" + "github.com/kubegems/opentofu/pkg/httpclient" + "github.com/kubegems/opentofu/pkg/logging" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" + "github.com/kubegems/opentofu/version" + + uuid "github.com/hashicorp/go-uuid" + "golang.org/x/oauth2" +) + +// This is HashiCorp's cloud host. +// There are a few special circumstances that depend on this whitelisted hostname. +const tfeHost = "app.terraform.io" + +// LoginCommand is a Command implementation that runs an interactive login +// flow for a remote service host. It then stashes credentials in a tfrc +// file in the user's home directory. +type LoginCommand struct { + Meta +} + +// Run implements cli.Command. +func (c *LoginCommand) Run(args []string) int { + args = c.Meta.process(args) + cmdFlags := c.Meta.extendedFlagSet("login") + cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } + if err := cmdFlags.Parse(args); err != nil { + return 1 + } + + args = cmdFlags.Args() + if len(args) != 1 { + c.Ui.Error( + "The login command expects exactly one argument: the host to log in to.") + cmdFlags.Usage() + return 1 + } + + var diags tfdiags.Diagnostics + + if !c.input { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Login is an interactive command", + "The \"tofu login\" command uses interactive prompts to obtain and record credentials, so it can't be run with input disabled.\n\nTo configure credentials in a non-interactive context, write existing credentials directly to a CLI configuration file.", + )) + c.showDiagnostics(diags) + return 1 + } + + givenHostname := args[0] + + hostname, err := svchost.ForComparison(givenHostname) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid hostname", + fmt.Sprintf("The given hostname %q is not valid: %s.", givenHostname, err.Error()), + )) + c.showDiagnostics(diags) + return 1 + } + + // From now on, since we've validated the given hostname, we should use + // dispHostname in the UI to ensure we're presenting it in the canonical + // form, in case that helpers users with debugging when things aren't + // working as expected. (Perhaps the normalization is part of the cause.) + dispHostname := hostname.ForDisplay() + + host, err := c.Services.Discover(hostname) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Service discovery failed for "+dispHostname, + + // Contrary to usual Go idiom, the Discover function returns + // full sentences with initial capitalization in its error messages, + // and they are written with the end-user as the audience. We + // only need to add the trailing period to make them consistent + // with our usual error reporting standards. + err.Error()+".", + )) + c.showDiagnostics(diags) + return 1 + } + + creds := c.Services.CredentialsSource().(*cliconfig.CredentialsSource) + filename, _ := creds.CredentialsFilePath() + credsCtx := &loginCredentialsContext{ + Location: creds.HostCredentialsLocation(hostname), + LocalFilename: filename, // empty in the very unlikely event that we can't select a config directory for this user + HelperType: creds.CredentialsHelperType(), + } + + clientConfig, err := host.ServiceOAuthClient("login.v1") + switch err.(type) { + case nil: + // Great! No problem, then. + case *disco.ErrServiceNotProvided: + // This is also fine! We'll try the manual token creation process. + case *disco.ErrVersionNotSupported: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + "Host does not support OpenTofu login", + fmt.Sprintf("The given hostname %q allows creating OpenTofu authorization tokens, but requires a newer version of OpenTofu CLI to do so.", dispHostname), + )) + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + "Host does not support OpenTofu login", + fmt.Sprintf("The given hostname %q cannot support \"tofu login\": %s.", dispHostname, err), + )) + } + + // If login service is unavailable, check for a TFE v2 API as fallback + var tfeservice *url.URL + if clientConfig == nil { + tfeservice, err = host.ServiceURL("tfe.v2") + switch err.(type) { + case nil: + // Success! + case *disco.ErrServiceNotProvided: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Host does not support OpenTofu tokens API", + fmt.Sprintf("The given hostname %q does not support creating OpenTofu authorization tokens.", dispHostname), + )) + case *disco.ErrVersionNotSupported: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Host does not support OpenTofu tokens API", + fmt.Sprintf("The given hostname %q allows creating OpenTofu authorization tokens, but requires a newer version of OpenTofu CLI to do so.", dispHostname), + )) + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Host does not support OpenTofu tokens API", + fmt.Sprintf("The given hostname %q cannot support \"tofu login\": %s.", dispHostname, err), + )) + } + } + + if credsCtx.Location == cliconfig.CredentialsInOtherFile { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + fmt.Sprintf("Credentials for %s are manually configured", dispHostname), + "The \"tofu login\" command cannot log in because credentials for this host are already configured in a CLI configuration file.\n\nTo log in, first revoke the existing credentials and remove that block from the CLI configuration.", + )) + } + + if diags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + var token svcauth.HostCredentialsToken + var tokenDiags tfdiags.Diagnostics + + // Prefer OpenTofu login if available + if clientConfig != nil { + var oauthToken *oauth2.Token + + switch { + case clientConfig.SupportedGrantTypes.Has(disco.OAuthAuthzCodeGrant): + // We prefer an OAuth code grant if the server supports it. + oauthToken, tokenDiags = c.interactiveGetTokenByCode(hostname, credsCtx, clientConfig) + case clientConfig.SupportedGrantTypes.Has(disco.OAuthOwnerPasswordGrant) && hostname == svchost.Hostname(tfeHost): + // The password grant type is allowed only for Terraform Cloud SaaS. + // Note this case is purely theoretical at this point, as TFC currently uses + // its own bespoke login protocol (tfe) + oauthToken, tokenDiags = c.interactiveGetTokenByPassword(hostname, credsCtx, clientConfig) + default: + tokenDiags = tokenDiags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Host does not support OpenTofu login", + fmt.Sprintf("The given hostname %q does not allow any OAuth grant types that are supported by this version of OpenTofu.", dispHostname), + )) + } + if oauthToken != nil { + token = svcauth.HostCredentialsToken(oauthToken.AccessToken) + } + } else if tfeservice != nil { + token, tokenDiags = c.interactiveGetTokenByUI(hostname, credsCtx, tfeservice) + } + + diags = diags.Append(tokenDiags) + if diags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + err = creds.StoreForHost(hostname, token) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to save API token", + fmt.Sprintf("The given host returned an API token, but OpenTofu failed to save it: %s.", err), + )) + } + + c.showDiagnostics(diags) + if diags.HasErrors() { + return 1 + } + + c.Ui.Output("\n---------------------------------------------------------------------------------\n") + if hostname == tfeHost { // Terraform Cloud + var motd struct { + Message string `json:"msg"` + Errors []interface{} `json:"errors"` + } + + // Throughout the entire process of fetching a MOTD from TFC, use a default + // message if the platform-provided message is unavailable for any reason - + // be it the service isn't provided, the request failed, or any sort of + // platform error returned. + + motdServiceURL, err := host.ServiceURL("motd.v1") + if err != nil { + c.logMOTDError(err) + c.outputDefaultTFCLoginSuccess() + return 0 + } + + req, err := http.NewRequest("GET", motdServiceURL.String(), nil) + if err != nil { + c.logMOTDError(err) + c.outputDefaultTFCLoginSuccess() + return 0 + } + + req.Header.Set("Authorization", "Bearer "+token.Token()) + + resp, err := httpclient.New().Do(req) + if err != nil { + c.logMOTDError(err) + c.outputDefaultTFCLoginSuccess() + return 0 + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + c.logMOTDError(err) + c.outputDefaultTFCLoginSuccess() + return 0 + } + + defer resp.Body.Close() + json.Unmarshal(body, &motd) + + if motd.Errors == nil && motd.Message != "" { + c.Ui.Output( + c.Colorize().Color(motd.Message), + ) + return 0 + } else { + c.logMOTDError(fmt.Errorf("platform responded with errors or an empty message")) + c.outputDefaultTFCLoginSuccess() + return 0 + } + } + + if tfeservice != nil { // Terraform Enterprise + c.outputDefaultTFELoginSuccess(dispHostname) + } else { + c.Ui.Output( + fmt.Sprintf( + c.Colorize().Color(strings.TrimSpace(` +[green][bold]Success![reset] [bold]OpenTofu has obtained and saved an API token.[reset] + +The new API token will be used for any future OpenTofu command that must make +authenticated requests to %s. +`)), + dispHostname, + ) + "\n", + ) + } + + return 0 +} + +func (c *LoginCommand) outputDefaultTFELoginSuccess(dispHostname string) { + c.Ui.Output( + fmt.Sprintf( + c.Colorize().Color(strings.TrimSpace(` +[green][bold]Success![reset] [bold]Logged in to the cloud backend (%s)[reset] +`)), + dispHostname, + ) + "\n", + ) +} + +func (c *LoginCommand) outputDefaultTFCLoginSuccess() { + c.Ui.Output(c.Colorize().Color(strings.TrimSpace(` +[green][bold]Success![reset] [bold]Logged in to cloud backend[reset] +` + "\n"))) +} + +func (c *LoginCommand) logMOTDError(err error) { + log.Printf("[TRACE] login: An error occurred attempting to fetch a message of the day for cloud backend: %s", err) +} + +// Help implements cli.Command. +func (c *LoginCommand) Help() string { + defaultFile := c.defaultOutputFile() + if defaultFile == "" { + // Because this is just for the help message and it's very unlikely + // that a user wouldn't have a functioning home directory anyway, + // we'll just use a placeholder here. The real command has some + // more complex behavior for this case. This result is not correct + // on all platforms, but given how unlikely we are to hit this case + // that seems okay. + defaultFile = "~/.terraform/credentials.tfrc.json" + } + + helpText := fmt.Sprintf(` +Usage: tofu [global options] login [hostname] + + Retrieves an authentication token for the given hostname, if it supports + automatic login, and saves it in a credentials file in your home directory. + + If not overridden by credentials helper settings in the CLI configuration, + the credentials will be written to the following local file: + %s +`, defaultFile) + return strings.TrimSpace(helpText) +} + +// Synopsis implements cli.Command. +func (c *LoginCommand) Synopsis() string { + return "Obtain and save credentials for a remote host" +} + +func (c *LoginCommand) defaultOutputFile() string { + if c.CLIConfigDir == "" { + return "" // no default available + } + return filepath.Join(c.CLIConfigDir, "credentials.tfrc.json") +} + +func (c *LoginCommand) interactiveGetTokenByCode(hostname svchost.Hostname, credsCtx *loginCredentialsContext, clientConfig *disco.OAuthClient) (*oauth2.Token, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + confirm, confirmDiags := c.interactiveContextConsent(hostname, disco.OAuthAuthzCodeGrant, credsCtx) + diags = diags.Append(confirmDiags) + if !confirm { + diags = diags.Append(errors.New("Login cancelled")) + return nil, diags + } + + // We'll use an entirely pseudo-random UUID for our temporary request + // state. The OAuth server must echo this back to us in the callback + // request to make it difficult for some other running process to + // interfere by sending its own request to our temporary server. + reqState, err := uuid.GenerateUUID() + if err != nil { + // This should be very unlikely, but could potentially occur if e.g. + // there's not enough pseudo-random entropy available. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Can't generate login request state", + fmt.Sprintf("Cannot generate random request identifier for login request: %s.", err), + )) + return nil, diags + } + + proofKey, proofKeyChallenge, err := c.proofKey() + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Can't generate login request state", + fmt.Sprintf("Cannot generate random prrof key for login request: %s.", err), + )) + return nil, diags + } + + listener, callbackURL, err := c.listenerForCallback(clientConfig.MinPort, clientConfig.MaxPort) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Can't start temporary login server", + fmt.Sprintf( + "The login process uses OAuth, which requires starting a temporary HTTP server on localhost. However, no TCP port numbers between %d and %d are available to create such a server.", + clientConfig.MinPort, clientConfig.MaxPort, + ), + )) + return nil, diags + } + + // codeCh will allow our temporary HTTP server to transmit the OAuth code + // to the main execution path that follows. + codeCh := make(chan string) + server := &http.Server{ + Handler: http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) { + log.Printf("[TRACE] login: request to callback server") + err := req.ParseForm() + if err != nil { + log.Printf("[ERROR] login: cannot ParseForm on callback request: %s", err) + resp.WriteHeader(400) + return + } + gotState := req.Form.Get("state") + if gotState != reqState { + log.Printf("[ERROR] login: incorrect \"state\" value in callback request") + resp.WriteHeader(400) + return + } + gotCode := req.Form.Get("code") + if gotCode == "" { + log.Printf("[ERROR] login: no \"code\" argument in callback request") + resp.WriteHeader(400) + return + } + + log.Printf("[TRACE] login: request contains an authorization code") + + // Send the code to our blocking wait below, so that the token + // fetching process can continue. + codeCh <- gotCode + close(codeCh) + + log.Printf("[TRACE] login: returning response from callback server") + + resp.Header().Add("Content-Type", "text/html") + resp.WriteHeader(200) + resp.Write([]byte(callbackSuccessMessage)) + }), + } + panicHandler := logging.PanicHandlerWithTraceFn() + go func() { + defer panicHandler() + err := server.Serve(listener) + if err != nil && err != http.ErrServerClosed { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Can't start temporary login server", + fmt.Sprintf( + "The login process uses OAuth, which requires starting a temporary HTTP server on localhost. However, no TCP port numbers between %d and %d are available to create such a server.", + clientConfig.MinPort, clientConfig.MaxPort, + ), + )) + close(codeCh) + } + }() + + oauthConfig := &oauth2.Config{ + ClientID: clientConfig.ID, + Endpoint: clientConfig.Endpoint(), + RedirectURL: callbackURL, + Scopes: clientConfig.Scopes, + } + + authCodeURL := oauthConfig.AuthCodeURL( + reqState, + oauth2.SetAuthURLParam("code_challenge", proofKeyChallenge), + oauth2.SetAuthURLParam("code_challenge_method", "S256"), + ) + + launchBrowserManually := false + if c.BrowserLauncher != nil { + err = c.BrowserLauncher.OpenURL(authCodeURL) + if err == nil { + c.Ui.Output(fmt.Sprintf("OpenTofu must now open a web browser to the login page for %s.\n", hostname.ForDisplay())) + c.Ui.Output(fmt.Sprintf("If a browser does not open this automatically, open the following URL to proceed:\n %s\n", authCodeURL)) + } else { + // Assume we're on a platform where opening a browser isn't possible. + launchBrowserManually = true + } + } else { + launchBrowserManually = true + } + + if launchBrowserManually { + c.Ui.Output(fmt.Sprintf("Open the following URL to access the login page for %s:\n %s\n", hostname.ForDisplay(), authCodeURL)) + } + + c.Ui.Output("OpenTofu will now wait for the host to signal that login was successful.\n") + + var code string + var ok bool + select { + case <-c.ShutdownCh: + diags = diags.Append( + tfdiags.Sourceless( + tfdiags.Error, + "Action aborted", + "Current command was aborted by the calling code.", + ), + ) + code, ok = "", true + close(codeCh) + case code, ok = <-codeCh: + } + + if !ok { + // If we got no code at all then the server wasn't able to start + // up, so we'll just give up. + return nil, diags + } + + if err := server.Close(); err != nil { + // The server will close soon enough when our process exits anyway, + // so we won't fuss about it for right now. + log.Printf("[WARN] login: callback server can't shut down: %s", err) + } + + if code == "" { + // empty code is not possible in happy path as it is validated in the HTTP handler of our callback server + // so it means, the current command was interrupted by the shutdown signal + return nil, diags + } + + ctx := context.WithValue(context.Background(), oauth2.HTTPClient, httpclient.New()) + token, err := oauthConfig.Exchange( + ctx, code, + oauth2.SetAuthURLParam("code_verifier", proofKey), + ) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to obtain auth token", + fmt.Sprintf("The remote server did not assign an auth token: %s.", err), + )) + return nil, diags + } + + return token, diags +} + +func (c *LoginCommand) interactiveGetTokenByPassword(hostname svchost.Hostname, credsCtx *loginCredentialsContext, clientConfig *disco.OAuthClient) (*oauth2.Token, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + confirm, confirmDiags := c.interactiveContextConsent(hostname, disco.OAuthOwnerPasswordGrant, credsCtx) + diags = diags.Append(confirmDiags) + if !confirm { + diags = diags.Append(errors.New("Login cancelled")) + return nil, diags + } + + c.Ui.Output("\n---------------------------------------------------------------------------------\n") + c.Ui.Output("OpenTofu must temporarily use your password to request an API token.\nThis password will NOT be saved locally.\n") + + username, err := c.UIInput().Input(context.Background(), &tofu.InputOpts{ + Id: "username", + Query: fmt.Sprintf("Username for %s:", hostname.ForDisplay()), + }) + if err != nil { + diags = diags.Append(fmt.Errorf("Failed to request username: %w", err)) + return nil, diags + } + password, err := c.UIInput().Input(context.Background(), &tofu.InputOpts{ + Id: "password", + Query: fmt.Sprintf("Password for %s:", hostname.ForDisplay()), + Secret: true, + }) + if err != nil { + diags = diags.Append(fmt.Errorf("Failed to request password: %w", err)) + return nil, diags + } + + oauthConfig := &oauth2.Config{ + ClientID: clientConfig.ID, + Endpoint: clientConfig.Endpoint(), + Scopes: clientConfig.Scopes, + } + token, err := oauthConfig.PasswordCredentialsToken(context.Background(), username, password) + if err != nil { + // FIXME: The OAuth2 library generates errors that are not appropriate + // for a Terraform end-user audience, so once we have more experience + // with which errors are most common we should try to recognize them + // here and produce better error messages for them. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to retrieve API token", + fmt.Sprintf("The remote host did not issue an API token: %s.", err), + )) + } + + return token, diags +} + +func (c *LoginCommand) interactiveGetTokenByUI(hostname svchost.Hostname, credsCtx *loginCredentialsContext, service *url.URL) (svcauth.HostCredentialsToken, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + confirm, confirmDiags := c.interactiveContextConsent(hostname, disco.OAuthGrantType(""), credsCtx) + diags = diags.Append(confirmDiags) + if !confirm { + diags = diags.Append(errors.New("Login cancelled")) + return "", diags + } + + c.Ui.Output("\n---------------------------------------------------------------------------------\n") + + tokensURL := url.URL{ + Scheme: "https", + Host: service.Hostname(), + Path: "/app/settings/tokens", + RawQuery: "source=terraform-login", + } + + launchBrowserManually := false + if c.BrowserLauncher != nil { + err := c.BrowserLauncher.OpenURL(tokensURL.String()) + if err == nil { + c.Ui.Output(fmt.Sprintf("OpenTofu must now open a web browser to the tokens page for %s.\n", hostname.ForDisplay())) + c.Ui.Output(fmt.Sprintf("If a browser does not open this automatically, open the following URL to proceed:\n %s\n", tokensURL.String())) + } else { + log.Printf("[DEBUG] error opening web browser: %s", err) + // Assume we're on a platform where opening a browser isn't possible. + launchBrowserManually = true + } + } else { + launchBrowserManually = true + } + + if launchBrowserManually { + c.Ui.Output(fmt.Sprintf("Open the following URL to access the tokens page for %s:\n %s\n", hostname.ForDisplay(), tokensURL.String())) + } + + c.Ui.Output("\n---------------------------------------------------------------------------------\n") + c.Ui.Output("Generate a token using your browser, and copy-paste it into this prompt.\n") + + // credsCtx might not be set if we're using a mock credentials source + // in a test, but it should always be set in normal use. + if credsCtx != nil { + switch credsCtx.Location { + case cliconfig.CredentialsViaHelper: + c.Ui.Output(fmt.Sprintf("OpenTofu will store the token in the configured %q credentials helper\nfor use by subsequent commands.\n", credsCtx.HelperType)) + case cliconfig.CredentialsInPrimaryFile, cliconfig.CredentialsNotAvailable: + c.Ui.Output(fmt.Sprintf("OpenTofu will store the token in plain text in the following file\nfor use by subsequent commands:\n %s\n", credsCtx.LocalFilename)) + } + } + + token, err := c.UIInput().Input(context.Background(), &tofu.InputOpts{ + Id: "token", + Query: fmt.Sprintf("Token for %s:", hostname.ForDisplay()), + Secret: true, + }) + if err != nil { + diags := diags.Append(fmt.Errorf("Failed to retrieve token: %w", err)) + return "", diags + } + + token = strings.TrimSpace(token) + cfg := &tfe.Config{ + Address: service.String(), + BasePath: service.Path, + Token: token, + Headers: make(http.Header), + } + + // Update user-agent from 'go-tfe' to opentofu + cfg.Headers.Set("User-Agent", httpclient.OpenTofuUserAgent(version.String())) + + client, err := tfe.NewClient(cfg) + if err != nil { + diags = diags.Append(fmt.Errorf("Failed to create API client: %w", err)) + return "", diags + } + user, err := client.Users.ReadCurrent(context.Background()) + if err == tfe.ErrUnauthorized { + diags = diags.Append(fmt.Errorf("Token is invalid: %w", err)) + return "", diags + } else if err != nil { + diags = diags.Append(fmt.Errorf("Failed to retrieve user account details: %w", err)) + return "", diags + } + c.Ui.Output(fmt.Sprintf(c.Colorize().Color("\nRetrieved token for user [bold]%s[reset]\n"), user.Username)) + + return svcauth.HostCredentialsToken(token), nil +} + +func (c *LoginCommand) interactiveContextConsent(hostname svchost.Hostname, grantType disco.OAuthGrantType, credsCtx *loginCredentialsContext) (bool, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + mechanism := "OAuth" + if grantType == "" { + mechanism = "your browser" + } + + c.Ui.Output(fmt.Sprintf("OpenTofu will request an API token for %s using %s.\n", hostname.ForDisplay(), mechanism)) + + if grantType.UsesAuthorizationEndpoint() { + c.Ui.Output( + "This will work only if you are able to use a web browser on this computer to\ncomplete a login process. If not, you must obtain an API token by another\nmeans and configure it in the CLI configuration manually.\n", + ) + } + + // credsCtx might not be set if we're using a mock credentials source + // in a test, but it should always be set in normal use. + if credsCtx != nil { + switch credsCtx.Location { + case cliconfig.CredentialsViaHelper: + c.Ui.Output(fmt.Sprintf("If login is successful, OpenTofu will store the token in the configured\n%q credentials helper for use by subsequent commands.\n", credsCtx.HelperType)) + case cliconfig.CredentialsInPrimaryFile, cliconfig.CredentialsNotAvailable: + c.Ui.Output(fmt.Sprintf("If login is successful, OpenTofu will store the token in plain text in\nthe following file for use by subsequent commands:\n %s\n", credsCtx.LocalFilename)) + } + } + + v, err := c.UIInput().Input(context.Background(), &tofu.InputOpts{ + Id: "approve", + Query: "Do you want to proceed?", + Description: `Only 'yes' will be accepted to confirm.`, + }) + if err != nil { + // Should not happen because this command checks that input is enabled + // before we get to this point. + diags = diags.Append(err) + return false, diags + } + + return strings.ToLower(v) == "yes", diags +} + +func (c *LoginCommand) listenerForCallback(minPort, maxPort uint16) (net.Listener, string, error) { + if minPort < 1024 || maxPort < 1024 { + // This should never happen because it should've been checked by + // the svchost/disco package when reading the service description, + // but we'll prefer to fail hard rather than inadvertently trying + // to open an unprivileged port if there are bugs at that layer. + panic("listenerForCallback called with privileged port number") + } + + availCount := int(maxPort) - int(minPort) + + // We're going to try port numbers within the range at random, so we need + // to terminate eventually in case _none_ of the ports are available. + // We'll make that 150% of the number of ports just to give us some room + // for the random number generator to generate the same port more than + // once. + // Note that we don't really care about true randomness here... we're just + // trying to hop around in the available port space rather than always + // working up from the lowest, because we have no information to predict + // that any particular number will be more likely to be available than + // another. + maxTries := availCount + (availCount / 2) + + for tries := 0; tries < maxTries; tries++ { + port := rand.Intn(availCount) + int(minPort) + addr := fmt.Sprintf("127.0.0.1:%d", port) + log.Printf("[TRACE] login: trying %s as a listen address for temporary OAuth callback server", addr) + l, err := net.Listen("tcp4", addr) + if err == nil { + // We use a path that doesn't end in a slash here because some + // OAuth server implementations don't allow callback URLs to + // end with slashes. + callbackURL := fmt.Sprintf("http://localhost:%d/login", port) + log.Printf("[TRACE] login: callback URL will be %s", callbackURL) + return l, callbackURL, nil + } + } + + return nil, "", fmt.Errorf("no suitable TCP ports (between %d and %d) are available for the temporary OAuth callback server", minPort, maxPort) +} + +func (c *LoginCommand) proofKey() (key, challenge string, err error) { + // Wel use a UUID-like string as the "proof key for code exchange" (PKCE) + // that will eventually authenticate our request to the token endpoint. + // Standard UUIDs are explicitly not suitable as secrets according to the + // UUID spec, but our go-uuid just generates totally random number sequences + // formatted in the conventional UUID syntax, so that concern does not + // apply here: this is just a 128-bit crypto-random number. + uu, err := uuid.GenerateUUID() + if err != nil { + return "", "", err + } + + key = fmt.Sprintf("%s.%09d", uu, rand.Intn(999999999)) + + h := sha256.New() + h.Write([]byte(key)) + challenge = base64.RawURLEncoding.EncodeToString(h.Sum(nil)) + + return key, challenge, nil +} + +type loginCredentialsContext struct { + Location cliconfig.CredentialsLocation + LocalFilename string + HelperType string +} + +const callbackSuccessMessage = ` + + +OpenTofu Login + + + + +

The login server has returned an authentication code to OpenTofu.

+

Now close this page and return to the terminal where tofu login +is running to see the result of the login process.

+ + + +` diff --git a/pkg/command/login_test.go b/pkg/command/login_test.go new file mode 100644 index 00000000000..3d009e7fd37 --- /dev/null +++ b/pkg/command/login_test.go @@ -0,0 +1,345 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "context" + "net/http/httptest" + "path/filepath" + "strings" + "testing" + + "github.com/mitchellh/cli" + + svchost "github.com/hashicorp/terraform-svchost" + "github.com/hashicorp/terraform-svchost/disco" + + "github.com/kubegems/opentofu/pkg/command/cliconfig" + oauthserver "github.com/kubegems/opentofu/pkg/command/testdata/login-oauth-server" + tfeserver "github.com/kubegems/opentofu/pkg/command/testdata/login-tfe-server" + "github.com/kubegems/opentofu/pkg/command/webbrowser" + "github.com/kubegems/opentofu/pkg/httpclient" + "github.com/kubegems/opentofu/version" +) + +func TestLogin(t *testing.T) { + // oauthserver.Handler is a stub OAuth server implementation that will, + // on success, always issue a bearer token named "good-token". + s := httptest.NewServer(oauthserver.Handler) + defer s.Close() + + // tfeserver.Handler is a stub TFE API implementation which will respond + // to ping and current account requests, when requests are authenticated + // with token "good-token" + ts := httptest.NewServer(tfeserver.Handler) + defer ts.Close() + + loginTestCase := func(test func(t *testing.T, c *LoginCommand, ui *cli.MockUi), useBrowserLauncher bool) func(t *testing.T) { + return func(t *testing.T) { + t.Helper() + workDir := t.TempDir() + + // We'll use this context to avoid asynchronous tasks outliving + // a single test run. + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Do not use the NewMockUi initializer here, as we want to delay + // the call to init until after setting up the input mocks + ui := new(cli.MockUi) + + var browserLauncher webbrowser.Launcher = nil + if useBrowserLauncher { + browserLauncher = webbrowser.NewMockLauncher(ctx) + } + + creds := cliconfig.EmptyCredentialsSourceForTests(filepath.Join(workDir, "credentials.tfrc.json")) + svcs := disco.NewWithCredentialsSource(creds) + svcs.SetUserAgent(httpclient.OpenTofuUserAgent(version.String())) + + svcs.ForceHostServices(svchost.Hostname("example.com"), map[string]interface{}{ + "login.v1": map[string]interface{}{ + // For this fake hostname we'll use a conventional OAuth flow, + // with browser-based consent that we'll mock away using a + // mock browser launcher below. + "client": "anything-goes", + "authz": s.URL + "/authz", + "token": s.URL + "/token", + }, + }) + svcs.ForceHostServices(svchost.Hostname("with-scopes.example.com"), map[string]interface{}{ + "login.v1": map[string]interface{}{ + // with scopes + // mock browser launcher below. + "client": "scopes_test", + "authz": s.URL + "/authz", + "token": s.URL + "/token", + "scopes": []interface{}{"app1.full_access", "app2.read_only"}, + }, + }) + svcs.ForceHostServices(svchost.Hostname(tfeHost), map[string]interface{}{ + // This represents Terraform Cloud, which does not yet support the + // login API, but does support its own bespoke tokens API. + "tfe.v2": ts.URL + "/api/v2", + "tfe.v2.1": ts.URL + "/api/v2", + "tfe.v2.2": ts.URL + "/api/v2", + "motd.v1": ts.URL + "/api/terraform/motd", + }) + svcs.ForceHostServices(svchost.Hostname("tfe.acme.com"), map[string]interface{}{ + // This represents a Terraform Enterprise instance which does not + // yet support the login API, but does support its own bespoke tokens API. + "tfe.v2": ts.URL + "/api/v2", + "tfe.v2.1": ts.URL + "/api/v2", + "tfe.v2.2": ts.URL + "/api/v2", + }) + svcs.ForceHostServices(svchost.Hostname("unsupported.example.net"), map[string]interface{}{ + // This host intentionally left blank. + }) + + c := &LoginCommand{ + Meta: Meta{ + Ui: ui, + BrowserLauncher: browserLauncher, + Services: svcs, + }, + } + + test(t, c, ui) + } + } + + t.Run("no hostname provided", loginTestCase(func(t *testing.T, c *LoginCommand, ui *cli.MockUi) { + status := c.Run([]string{}) + if status == 0 { + t.Fatalf("successful exit; want error") + } + + if got, want := ui.ErrorWriter.String(), "The login command expects exactly one argument"; !strings.Contains(got, want) { + t.Fatalf("missing expected error message\nwant: %s\nfull output:\n%s", want, got) + } + }, true)) + + t.Run(tfeHost+" (no login support)", loginTestCase(func(t *testing.T, c *LoginCommand, ui *cli.MockUi) { + // Enter "yes" at the consent prompt, then paste a token with some + // accidental whitespace. + defer testInputMap(t, map[string]string{ + "approve": "yes", + "token": " good-token ", + })() + status := c.Run([]string{tfeHost}) + if status != 0 { + t.Fatalf("unexpected error code %d\nstderr:\n%s", status, ui.ErrorWriter.String()) + } + + credsSrc := c.Services.CredentialsSource() + creds, err := credsSrc.ForHost(svchost.Hostname(tfeHost)) + if err != nil { + t.Errorf("failed to retrieve credentials: %s", err) + } + if got, want := creds.Token(), "good-token"; got != want { + t.Errorf("wrong token %q; want %q", got, want) + } + if got, want := ui.OutputWriter.String(), "Welcome to the cloud backend!"; !strings.Contains(got, want) { + t.Errorf("expected output to contain %q, but was:\n%s", want, got) + } + }, true)) + + t.Run("example.com with authorization code flow", loginTestCase(func(t *testing.T, c *LoginCommand, ui *cli.MockUi) { + // Enter "yes" at the consent prompt. + defer testInputMap(t, map[string]string{ + "approve": "yes", + })() + status := c.Run([]string{"example.com"}) + if status != 0 { + t.Fatalf("unexpected error code %d\nstderr:\n%s", status, ui.ErrorWriter.String()) + } + + credsSrc := c.Services.CredentialsSource() + creds, err := credsSrc.ForHost(svchost.Hostname("example.com")) + if err != nil { + t.Errorf("failed to retrieve credentials: %s", err) + } + if got, want := creds.Token(), "good-token"; got != want { + t.Errorf("wrong token %q; want %q", got, want) + } + + if got, want := ui.OutputWriter.String(), "OpenTofu has obtained and saved an API token."; !strings.Contains(got, want) { + t.Errorf("expected output to contain %q, but was:\n%s", want, got) + } + }, true)) + + t.Run("example.com results in no scopes", loginTestCase(func(t *testing.T, c *LoginCommand, ui *cli.MockUi) { + + host, _ := c.Services.Discover("example.com") + client, _ := host.ServiceOAuthClient("login.v1") + if len(client.Scopes) != 0 { + t.Errorf("unexpected scopes %q; expected none", client.Scopes) + } + }, true)) + + t.Run("with-scopes.example.com with authorization code flow and scopes", loginTestCase(func(t *testing.T, c *LoginCommand, ui *cli.MockUi) { + // Enter "yes" at the consent prompt. + defer testInputMap(t, map[string]string{ + "approve": "yes", + })() + status := c.Run([]string{"with-scopes.example.com"}) + if status != 0 { + t.Fatalf("unexpected error code %d\nstderr:\n%s", status, ui.ErrorWriter.String()) + } + + credsSrc := c.Services.CredentialsSource() + creds, err := credsSrc.ForHost(svchost.Hostname("with-scopes.example.com")) + + if err != nil { + t.Errorf("failed to retrieve credentials: %s", err) + } + + if got, want := creds.Token(), "good-token"; got != want { + t.Errorf("wrong token %q; want %q", got, want) + } + + if got, want := ui.OutputWriter.String(), "OpenTofu has obtained and saved an API token."; !strings.Contains(got, want) { + t.Errorf("expected output to contain %q, but was:\n%s", want, got) + } + }, true)) + + t.Run("with-scopes.example.com results in expected scopes", loginTestCase(func(t *testing.T, c *LoginCommand, ui *cli.MockUi) { + + host, _ := c.Services.Discover("with-scopes.example.com") + client, _ := host.ServiceOAuthClient("login.v1") + + expectedScopes := [2]string{"app1.full_access", "app2.read_only"} + + var foundScopes [2]string + copy(foundScopes[:], client.Scopes) + + if foundScopes != expectedScopes || len(client.Scopes) != len(expectedScopes) { + t.Errorf("unexpected scopes %q; want %q", client.Scopes, expectedScopes) + } + }, true)) + + t.Run("TFE host without login support", loginTestCase(func(t *testing.T, c *LoginCommand, ui *cli.MockUi) { + // Enter "yes" at the consent prompt, then paste a token with some + // accidental whitespace. + defer testInputMap(t, map[string]string{ + "approve": "yes", + "token": " good-token ", + })() + status := c.Run([]string{"tfe.acme.com"}) + if status != 0 { + t.Fatalf("unexpected error code %d\nstderr:\n%s", status, ui.ErrorWriter.String()) + } + + credsSrc := c.Services.CredentialsSource() + creds, err := credsSrc.ForHost(svchost.Hostname("tfe.acme.com")) + if err != nil { + t.Errorf("failed to retrieve credentials: %s", err) + } + if got, want := creds.Token(), "good-token"; got != want { + t.Errorf("wrong token %q; want %q", got, want) + } + + if got, want := ui.OutputWriter.String(), "Logged in to the cloud backend"; !strings.Contains(got, want) { + t.Errorf("expected output to contain %q, but was:\n%s", want, got) + } + }, true)) + + t.Run("TFE host without login support, incorrectly pasted token", loginTestCase(func(t *testing.T, c *LoginCommand, ui *cli.MockUi) { + // Enter "yes" at the consent prompt, then paste an invalid token. + defer testInputMap(t, map[string]string{ + "approve": "yes", + "token": "good-tok", + })() + status := c.Run([]string{"tfe.acme.com"}) + if status != 1 { + t.Fatalf("unexpected error code %d\nstderr:\n%s", status, ui.ErrorWriter.String()) + } + + credsSrc := c.Services.CredentialsSource() + creds, err := credsSrc.ForHost(svchost.Hostname("tfe.acme.com")) + if err != nil { + t.Errorf("failed to retrieve credentials: %s", err) + } + if creds != nil { + t.Errorf("wrong token %q; should have no token", creds.Token()) + } + }, true)) + + t.Run("host without login or TFE API support", loginTestCase(func(t *testing.T, c *LoginCommand, ui *cli.MockUi) { + status := c.Run([]string{"unsupported.example.net"}) + if status == 0 { + t.Fatalf("successful exit; want error") + } + + if got, want := ui.ErrorWriter.String(), "Error: Host does not support OpenTofu tokens API"; !strings.Contains(got, want) { + t.Fatalf("missing expected error message\nwant: %s\nfull output:\n%s", want, got) + } + }, true)) + + t.Run("answering no cancels", loginTestCase(func(t *testing.T, c *LoginCommand, ui *cli.MockUi) { + // Enter "no" at the consent prompt + defer testInputMap(t, map[string]string{ + "approve": "no", + })() + status := c.Run([]string{tfeHost}) + if status != 1 { + t.Fatalf("unexpected error code %d\nstderr:\n%s", status, ui.ErrorWriter.String()) + } + + if got, want := ui.ErrorWriter.String(), "Login cancelled"; !strings.Contains(got, want) { + t.Fatalf("missing expected error message\nwant: %s\nfull output:\n%s", want, got) + } + }, true)) + + t.Run("answering y cancels", loginTestCase(func(t *testing.T, c *LoginCommand, ui *cli.MockUi) { + // Enter "y" at the consent prompt + defer testInputMap(t, map[string]string{ + "approve": "y", + })() + status := c.Run([]string{tfeHost}) + if status != 1 { + t.Fatalf("unexpected error code %d\nstderr:\n%s", status, ui.ErrorWriter.String()) + } + + if got, want := ui.ErrorWriter.String(), "Login cancelled"; !strings.Contains(got, want) { + t.Fatalf("missing expected error message\nwant: %s\nfull output:\n%s", want, got) + } + }, true)) + + // The following test does not use browser MockLauncher() and forces `tofu login` command print URL + // and wait for the callback with code. + // There is no timeout in `tofu login` OAuth2 callback server code, so the only way to interrupt it + // is to wirte to the shutdown channel (or complete the login process). + t.Run("example.com Ctrl+C interrupts login command", loginTestCase(func(t *testing.T, c *LoginCommand, ui *cli.MockUi) { + // Enter "yes" at the consent prompt. + defer testInputMap(t, map[string]string{ + "approve": "yes", + })() + + // override the command's shutdown channel so we can write to it + abortCh := make(chan struct{}) + c.ShutdownCh = abortCh + + // statusCh will receive command Run result + statusCh := make(chan int) + go func() { + statusCh <- c.Run([]string{"example.com"}) + }() + + // abort background Login command and wait for its result + // removing the following lint results in default test timeout, since we don't run mocked webbrowser + // and OAuth2 callback server will never get request with 'code'. + abortCh <- struct{}{} + status := <-statusCh + if status != 1 { + t.Fatalf("unexpected error code %d after interrupting the command\nstderr:\n%s", status, ui.ErrorWriter.String()) + } + + if got, want := ui.ErrorWriter.String(), "Action aborted"; !strings.Contains(got, want) { + t.Fatalf("missing expected error message\nwant: %s\nfull output:\n%s", want, got) + } + }, false)) +} diff --git a/pkg/command/logout.go b/pkg/command/logout.go new file mode 100644 index 00000000000..4e8c6323a37 --- /dev/null +++ b/pkg/command/logout.go @@ -0,0 +1,155 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "path/filepath" + "strings" + + svchost "github.com/hashicorp/terraform-svchost" + + "github.com/kubegems/opentofu/pkg/command/cliconfig" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// LogoutCommand is a Command implementation which removes stored credentials +// for a remote service host. +type LogoutCommand struct { + Meta +} + +// Run implements cli.Command. +func (c *LogoutCommand) Run(args []string) int { + args = c.Meta.process(args) + cmdFlags := c.Meta.defaultFlagSet("logout") + cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } + if err := cmdFlags.Parse(args); err != nil { + return 1 + } + + args = cmdFlags.Args() + if len(args) != 1 { + c.Ui.Error( + "The logout command expects exactly one argument: the host to log out of.") + cmdFlags.Usage() + return 1 + } + + var diags tfdiags.Diagnostics + + givenHostname := args[0] + + hostname, err := svchost.ForComparison(givenHostname) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid hostname", + fmt.Sprintf("The given hostname %q is not valid: %s.", givenHostname, err.Error()), + )) + c.showDiagnostics(diags) + return 1 + } + + // From now on, since we've validated the given hostname, we should use + // dispHostname in the UI to ensure we're presenting it in the canonical + // form, in case that helps users with debugging when things aren't + // working as expected. (Perhaps the normalization is part of the cause.) + dispHostname := hostname.ForDisplay() + + creds := c.Services.CredentialsSource().(*cliconfig.CredentialsSource) + filename, _ := creds.CredentialsFilePath() + credsCtx := &loginCredentialsContext{ + Location: creds.HostCredentialsLocation(hostname), + LocalFilename: filename, // empty in the very unlikely event that we can't select a config directory for this user + HelperType: creds.CredentialsHelperType(), + } + + if credsCtx.Location == cliconfig.CredentialsInOtherFile { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + fmt.Sprintf("Credentials for %s are manually configured", dispHostname), + "The \"tofu logout\" command cannot log out because credentials for this host are manually configured in a CLI configuration file.\n\nTo log out, revoke the existing credentials and remove that block from the CLI configuration.", + )) + } + + if diags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + switch credsCtx.Location { + case cliconfig.CredentialsNotAvailable: + c.Ui.Output(fmt.Sprintf("No credentials for %s are stored.\n", dispHostname)) + return 0 + case cliconfig.CredentialsViaHelper: + c.Ui.Output(fmt.Sprintf("Removing the stored credentials for %s from the configured\n%q credentials helper.\n", dispHostname, credsCtx.HelperType)) + case cliconfig.CredentialsInPrimaryFile: + c.Ui.Output(fmt.Sprintf("Removing the stored credentials for %s from the following file:\n %s\n", dispHostname, credsCtx.LocalFilename)) + } + + err = creds.ForgetForHost(hostname) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to remove API token", + fmt.Sprintf("Unable to remove stored API token: %s", err), + )) + } + + c.showDiagnostics(diags) + if diags.HasErrors() { + return 1 + } + + c.Ui.Output( + fmt.Sprintf( + c.Colorize().Color(strings.TrimSpace(` +[green][bold]Success![reset] [bold]OpenTofu has removed the stored API token for %s.[reset] +`)), + dispHostname, + ) + "\n", + ) + + return 0 +} + +// Help implements cli.Command. +func (c *LogoutCommand) Help() string { + defaultFile := c.defaultOutputFile() + if defaultFile == "" { + // Because this is just for the help message and it's very unlikely + // that a user wouldn't have a functioning home directory anyway, + // we'll just use a placeholder here. The real command has some + // more complex behavior for this case. This result is not correct + // on all platforms, but given how unlikely we are to hit this case + // that seems okay. + defaultFile = "~/.terraform/credentials.tfrc.json" + } + + helpText := ` +Usage: tofu [global options] logout [hostname] + + Removes locally-stored credentials for specified hostname. + + Note: the API token is only removed from local storage, not destroyed on the + remote server, so it will remain valid until manually revoked. + %s +` + return strings.TrimSpace(helpText) +} + +// Synopsis implements cli.Command. +func (c *LogoutCommand) Synopsis() string { + return "Remove locally-stored credentials for a remote host" +} + +func (c *LogoutCommand) defaultOutputFile() string { + if c.CLIConfigDir == "" { + return "" // no default available + } + return filepath.Join(c.CLIConfigDir, "credentials.tfrc.json") +} diff --git a/pkg/command/logout_test.go b/pkg/command/logout_test.go new file mode 100644 index 00000000000..67aeb057510 --- /dev/null +++ b/pkg/command/logout_test.go @@ -0,0 +1,88 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "path/filepath" + "strings" + "testing" + + "github.com/mitchellh/cli" + + svchost "github.com/hashicorp/terraform-svchost" + svcauth "github.com/hashicorp/terraform-svchost/auth" + "github.com/hashicorp/terraform-svchost/disco" + "github.com/kubegems/opentofu/pkg/command/cliconfig" +) + +func TestLogout(t *testing.T) { + workDir := t.TempDir() + + ui := cli.NewMockUi() + credsSrc := cliconfig.EmptyCredentialsSourceForTests(filepath.Join(workDir, "credentials.tfrc.json")) + + c := &LogoutCommand{ + Meta: Meta{ + Ui: ui, + Services: disco.NewWithCredentialsSource(credsSrc), + }, + } + + t.Run("with no hostname", func(t *testing.T) { + status := c.Run([]string{}) + + if status != 1 { + t.Fatalf("unexpected error code %d\nstderr:\n%s", status, ui.ErrorWriter.String()) + } + + if !strings.Contains(ui.ErrorWriter.String(), "The logout command expects exactly one argument") { + t.Errorf("unexpected error message: %s", ui.ErrorWriter.String()) + } + }) + + testCases := []struct { + // Hostname to associate a pre-stored token + hostname string + // Command-line arguments + args []string + // true iff the token at hostname should be removed by the command + shouldRemove bool + }{ + // Can remove token for a hostname + {"tfe.example.com", []string{"tfe.example.com"}, true}, + + // Logout does not remove tokens for other hostnames + {"tfe.example.com", []string{"other-tfe.acme.com"}, false}, + } + + for _, tc := range testCases { + host := svchost.Hostname(tc.hostname) + token := svcauth.HostCredentialsToken("some-token") + err := credsSrc.StoreForHost(host, token) + if err != nil { + t.Fatalf("unexpected error storing credentials: %s", err) + } + + status := c.Run(tc.args) + if status != 0 { + t.Fatalf("unexpected error code %d\nstderr:\n%s", status, ui.ErrorWriter.String()) + } + + creds, err := credsSrc.ForHost(host) + if err != nil { + t.Errorf("failed to retrieve credentials: %s", err) + } + if tc.shouldRemove { + if creds != nil { + t.Errorf("wrong token %q; should have no token", creds.Token()) + } + } else { + if got, want := creds.Token(), "some-token"; got != want { + t.Errorf("wrong token %q; want %q", got, want) + } + } + } +} diff --git a/pkg/command/meta.go b/pkg/command/meta.go new file mode 100644 index 00000000000..b25d74c6f36 --- /dev/null +++ b/pkg/command/meta.go @@ -0,0 +1,924 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "bytes" + "context" + "errors" + "flag" + "fmt" + "io" + "log" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/terraform-svchost/disco" + "github.com/mitchellh/cli" + "github.com/mitchellh/colorstring" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/backend/local" + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/command/format" + "github.com/kubegems/opentofu/pkg/command/views" + "github.com/kubegems/opentofu/pkg/command/webbrowser" + "github.com/kubegems/opentofu/pkg/command/workdir" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configload" + "github.com/kubegems/opentofu/pkg/getproviders" + legacy "github.com/kubegems/opentofu/pkg/legacy/tofu" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/provisioners" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/terminal" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" +) + +// Meta are the meta-options that are available on all or most commands. +type Meta struct { + // The exported fields below should be set by anyone using a + // command with a Meta field. These are expected to be set externally + // (not from within the command itself). + + // WorkingDir is an object representing the "working directory" where we're + // running commands. In the normal case this literally refers to the + // working directory of the OpenTofu process, though this can take on + // a more symbolic meaning when the user has overridden default behavior + // to specify a different working directory or to override the special + // data directory where we'll persist settings that must survive between + // consecutive commands. + // + // We're currently gradually migrating the various bits of state that + // must persist between consecutive commands in a session to be encapsulated + // in here, but we're not there yet and so there are also some methods on + // Meta which directly read and modify paths inside the data directory. + WorkingDir *workdir.Dir + + // Streams tracks the raw Stdout, Stderr, and Stdin handles along with + // some basic metadata about them, such as whether each is connected to + // a terminal, how wide the possible terminal is, etc. + // + // For historical reasons this might not be set in unit test code, and + // so functions working with this field must check if it's nil and + // do some default behavior instead if so, rather than panicking. + Streams *terminal.Streams + + View *views.View + + Color bool // True if output should be colored + GlobalPluginDirs []string // Additional paths to search for plugins + Ui cli.Ui // Ui for output + + // Services provides access to remote endpoint information for + // 'tofu-native' services running at a specific user-facing hostname. + Services *disco.Disco + + // RunningInAutomation indicates that commands are being run by an + // automated system rather than directly at a command prompt. + // + // This is a hint to various command routines that it may be confusing + // to print out messages that suggest running specific follow-up + // commands, since the user consuming the output will not be + // in a position to run such commands. + // + // The intended use-case of this flag is when OpenTofu is running in + // some sort of workflow orchestration tool which is abstracting away + // the specific commands being run. + RunningInAutomation bool + + // CLIConfigDir is the directory from which CLI configuration files were + // read by the caller and the directory where any changes to CLI + // configuration files by commands should be made. + // + // If this is empty then no configuration directory is available and + // commands which require one cannot proceed. + CLIConfigDir string + + // PluginCacheDir, if non-empty, enables caching of downloaded plugins + // into the given directory. + PluginCacheDir string + + // PluginCacheMayBreakDependencyLockFile is a temporary CLI configuration-based + // opt out for the behavior of only using the plugin cache dir if its + // contents match checksums recorded in the dependency lock file. + // + // This is an accommodation for those who currently essentially ignore the + // dependency lock file -- treating it only as transient working directory + // state -- and therefore don't care if the plugin cache dir causes the + // checksums inside to only be sufficient for the computer where OpenTofu + // is currently running. + // + // We intend to remove this exception again (making the CLI configuration + // setting a silent no-op) in future once we've improved the dependency + // lock file mechanism so that it's usable for everyone and there are no + // longer any compelling reasons for folks to not lock their dependencies. + PluginCacheMayBreakDependencyLockFile bool + + // ProviderSource allows determining the available versions of a provider + // and determines where a distribution package for a particular + // provider version can be obtained. + ProviderSource getproviders.Source + + // BrowserLauncher is used by commands that need to open a URL in a + // web browser. + BrowserLauncher webbrowser.Launcher + + // A context.Context provided by the caller -- typically "package main" -- + // which might be carrying telemetry-related metadata and so should be + // used when creating downstream traces, etc. + // + // This isn't guaranteed to be set, so use [Meta.CommandContext] to + // safely create a context for the entire execution of a command, which + // will be connected to this parent context if it's present. + CallerContext context.Context + + // When this channel is closed, the command will be cancelled. + ShutdownCh <-chan struct{} + + // ProviderDevOverrides are providers where we ignore the lock file, the + // configured version constraints, and the local cache directory and just + // always use exactly the path specified. This is intended to allow + // provider developers to easily test local builds without worrying about + // what version number they might eventually be released as, or what + // checksums they have. + ProviderDevOverrides map[addrs.Provider]getproviders.PackageLocalDir + + // UnmanagedProviders are a set of providers that exist as processes + // predating OpenTofu, which OpenTofu should use but not worry about the + // lifecycle of. + // + // This is essentially a more extreme version of ProviderDevOverrides where + // OpenTofu doesn't even worry about how the provider server gets launched, + // just trusting that someone else did it before running OpenTofu. + UnmanagedProviders map[addrs.Provider]*plugin.ReattachConfig + + // AllowExperimentalFeatures controls whether a command that embeds this + // Meta is permitted to make use of experimental OpenTofu features. + // + // Set this field only during the initial creation of Meta. If you change + // this field after calling methods of type Meta then the resulting + // behavior is undefined. + // + // In normal code this would be set by package main only in builds + // explicitly marked as being alpha releases or development snapshots, + // making experimental features unavailable otherwise. Test code may + // choose to set this if it needs to exercise experimental features. + // + // Some experiments predated the addition of this setting, and may + // therefore still be available even if this flag is false. Our intent + // is that all/most _future_ experiments will be unavailable unless this + // flag is set, to reinforce that experiments are not for production use. + AllowExperimentalFeatures bool + + //---------------------------------------------------------- + // Protected: commands can set these + //---------------------------------------------------------- + + // pluginPath is a user defined set of directories to look for plugins. + // This is set during init with the `-plugin-dir` flag, saved to a file in + // the data directory. + // This overrides all other search paths when discovering plugins. + pluginPath []string + + // Override certain behavior for tests within this package + testingOverrides *testingOverrides + + //---------------------------------------------------------- + // Private: do not set these + //---------------------------------------------------------- + + // configLoader is a shared configuration loader that is used by + // LoadConfig and other commands that access configuration files. + // It is initialized on first use. + configLoader *configload.Loader + + // backendState is the currently active backend state + backendState *legacy.BackendState + + // Variables for the context (private) + variableArgs rawFlags + input bool + + // Targets for this context (private) + targets []addrs.Targetable + targetFlags []string + + // Internal fields + color bool + oldUi cli.Ui + + // The fields below are expected to be set by the command via + // command line flags. See the Apply command for an example. + // + // statePath is the path to the state file. If this is empty, then + // no state will be loaded. It is also okay for this to be a path to + // a file that doesn't exist; it is assumed that this means that there + // is simply no state. + // + // stateOutPath is used to override the output path for the state. + // If not provided, the StatePath is used causing the old state to + // be overridden. + // + // backupPath is used to backup the state file before writing a modified + // version. It defaults to stateOutPath + DefaultBackupExtension + // + // parallelism is used to control the number of concurrent operations + // allowed when walking the graph + // + // provider is to specify specific resource providers + // + // stateLock is set to false to disable state locking + // + // stateLockTimeout is the optional duration to retry a state locks locks + // when it is already locked by another process. + // + // forceInitCopy suppresses confirmation for copying state data during + // init. + // + // reconfigure forces init to ignore any stored configuration. + // + // migrateState confirms the user wishes to migrate from the prior backend + // configuration to a new configuration. + // + // compactWarnings (-compact-warnings) selects a more compact presentation + // of warnings in the output when they are not accompanied by errors. + statePath string + stateOutPath string + backupPath string + parallelism int + stateLock bool + stateLockTimeout time.Duration + forceInitCopy bool + reconfigure bool + migrateState bool + compactWarnings bool + + // Used with commands which write state to allow users to write remote + // state even if the remote and local OpenTofu versions don't match. + ignoreRemoteVersion bool + + outputInJSON bool + + // Used to cache the root module rootModuleCallCache and known variables. + // This helps prevent duplicate errors/warnings. + rootModuleCallCache *configs.StaticModuleCall + inputVariableCache map[string]backend.UnparsedVariableValue +} + +type testingOverrides struct { + Providers map[addrs.Provider]providers.Factory + Provisioners map[string]provisioners.Factory +} + +// initStatePaths is used to initialize the default values for +// statePath, stateOutPath, and backupPath +func (m *Meta) initStatePaths() { + if m.statePath == "" { + m.statePath = DefaultStateFilename + } + if m.stateOutPath == "" { + m.stateOutPath = m.statePath + } + if m.backupPath == "" { + m.backupPath = m.stateOutPath + DefaultBackupExtension + } +} + +// StateOutPath returns the true output path for the state file +func (m *Meta) StateOutPath() string { + return m.stateOutPath +} + +// Colorize returns the colorization structure for a command. +func (m *Meta) Colorize() *colorstring.Colorize { + colors := make(map[string]string) + for k, v := range colorstring.DefaultColors { + colors[k] = v + } + colors["purple"] = "38;5;57" + + return &colorstring.Colorize{ + Colors: colors, + Disable: !m.color, + Reset: true, + } +} + +// fixupMissingWorkingDir is a compensation for various existing tests which +// directly construct incomplete "Meta" objects. Specifically, it deals with +// a test that omits a WorkingDir value by constructing one just-in-time. +// +// We shouldn't ever rely on this in any real codepath, because it doesn't +// take into account the various ways users can override our default +// directory selection behaviors. +func (m *Meta) fixupMissingWorkingDir() { + if m.WorkingDir == nil { + log.Printf("[WARN] This 'Meta' object is missing its WorkingDir, so we're creating a default one suitable only for tests") + m.WorkingDir = workdir.NewDir(".") + } +} + +// DataDir returns the directory where local data will be stored. +// Defaults to DefaultDataDir in the current working directory. +func (m *Meta) DataDir() string { + m.fixupMissingWorkingDir() + return m.WorkingDir.DataDir() +} + +const ( + // InputModeEnvVar is the environment variable that, if set to "false" or + // "0", causes tofu commands to behave as if the `-input=false` flag was + // specified. + InputModeEnvVar = "TF_INPUT" +) + +// InputMode returns the type of input we should ask for in the form of +// tofu.InputMode which is passed directly to Context.Input. +func (m *Meta) InputMode() tofu.InputMode { + if test || !m.input { + return 0 + } + + if envVar := os.Getenv(InputModeEnvVar); envVar != "" { + if v, err := strconv.ParseBool(envVar); err == nil { + if !v { + return 0 + } + } + } + + var mode tofu.InputMode + mode |= tofu.InputModeProvider + + return mode +} + +// UIInput returns a UIInput object to be used for asking for input. +func (m *Meta) UIInput() tofu.UIInput { + return &UIInput{ + Colorize: m.Colorize(), + } +} + +// OutputColumns returns the number of columns that normal (non-error) UI +// output should be wrapped to fill. +// +// This is the column count to use if you'll be printing your message via +// the Output or Info methods of m.Ui. +func (m *Meta) OutputColumns() int { + if m.Streams == nil { + // A default for unit tests that don't populate Meta fully. + return 78 + } + return m.Streams.Stdout.Columns() +} + +// ErrorColumns returns the number of columns that error UI output should be +// wrapped to fill. +// +// This is the column count to use if you'll be printing your message via +// the Error or Warn methods of m.Ui. +func (m *Meta) ErrorColumns() int { + if m.Streams == nil { + // A default for unit tests that don't populate Meta fully. + return 78 + } + return m.Streams.Stderr.Columns() +} + +// StdinPiped returns true if the input is piped. +func (m *Meta) StdinPiped() bool { + if m.Streams == nil { + // If we don't have m.Streams populated then we're presumably in a unit + // test that doesn't properly populate Meta, so we'll just say the + // output _isn't_ piped because that's the common case and so most likely + // to be useful to a unit test. + return false + } + return !m.Streams.Stdin.IsTerminal() +} + +// InterruptibleContext returns a context.Context that will be cancelled +// if the process is interrupted by a platform-specific interrupt signal. +// +// The typical way to use this is to pass the result of [Meta.CommandContext] +// as the base context, but that's appropriate only if the interruptible +// context is being created directly inside the "Run" method of a particular +// command, to create a context representing the entire remaining runtime of +// that command: +// +// As usual with cancelable contexts, the caller must always call the given +// cancel function once all operations are complete in order to make sure +// that the context resources will still be freed even if there is no +// interruption. +// +// // This example is only for when using this function very early in +// // the "Run" method of a Command implementation. If you already have +// // an active context, pass that in as base instead. +// ctx, done := c.InterruptibleContext(c.CommandContext()) +// defer done() +func (m *Meta) InterruptibleContext(base context.Context) (context.Context, context.CancelFunc) { + if m.ShutdownCh == nil { + // If we're running in a unit testing context without a shutdown + // channel populated then we'll return an uncancelable channel. + return base, func() {} + } + + ctx, cancel := context.WithCancel(base) + go func() { + select { + case <-m.ShutdownCh: + cancel() + case <-ctx.Done(): + // finished without being interrupted + } + }() + return ctx, cancel +} + +// CommandContext returns the "root context" to use in the main Run function +// of a command. +// +// This method is just a substitute for passing a context directly to the +// "Run" method of a command, which we can't do because that API is owned by +// mitchellh/cli rather than by OpenTofu. Use this only in situations +// comparable to the context having been passed in as an argument to Run. +// +// If the caller (e.g. "package main") provided a context when it instantiated +// the Meta then the returned context will inherit all of its values, deadlines, +// etc. If the caller did not provide a context then the result is an inert +// background context ready to be passed to other functions. +func (m *Meta) CommandContext() context.Context { + if m.CallerContext == nil { + return context.Background() + } + // We just return the caller context directly for now, since we don't + // have anything to add to it. + return m.CallerContext +} + +// RunOperation executes the given operation on the given backend, blocking +// until that operation completes or is interrupted, and then returns +// the RunningOperation object representing the completed or +// aborted operation that is, despite the name, no longer running. +// +// An error is returned if the operation either fails to start or is cancelled. +// If the operation runs to completion then no error is returned even if the +// operation itself is unsuccessful. Use the "Result" field of the +// returned operation object to recognize operation-level failure. +func (m *Meta) RunOperation(b backend.Enhanced, opReq *backend.Operation) (*backend.RunningOperation, tfdiags.Diagnostics) { + if opReq.View == nil { + panic("RunOperation called with nil View") + } + if opReq.ConfigDir != "" { + opReq.ConfigDir = m.normalizePath(opReq.ConfigDir) + } + + // Inject variables and root module call + var diags, callDiags tfdiags.Diagnostics + opReq.Variables, diags = m.collectVariableValues() + opReq.RootCall, callDiags = m.rootModuleCall(opReq.ConfigDir) + diags = diags.Append(callDiags) + if diags.HasErrors() { + return nil, diags + } + + op, err := b.Operation(context.Background(), opReq) + if err != nil { + return nil, diags.Append(fmt.Errorf("error starting operation: %w", err)) + } + + // Wait for the operation to complete or an interrupt to occur + select { + case <-m.ShutdownCh: + // gracefully stop the operation + op.Stop() + + // Notify the user + opReq.View.Interrupted() + + // Still get the result, since there is still one + select { + case <-m.ShutdownCh: + opReq.View.FatalInterrupt() + + // cancel the operation completely + op.Cancel() + + // the operation should return asap + // but timeout just in case + select { + case <-op.Done(): + case <-time.After(5 * time.Second): + } + + return nil, diags.Append(errors.New("operation canceled")) + + case <-op.Done(): + // operation completed after Stop + } + case <-op.Done(): + // operation completed normally + } + + return op, diags +} + +// contextOpts returns the options to use to initialize a OpenTofu +// context with the settings from this Meta. +func (m *Meta) contextOpts() (*tofu.ContextOpts, error) { + workspace, err := m.Workspace() + if err != nil { + return nil, err + } + + var opts tofu.ContextOpts + + opts.UIInput = m.UIInput() + opts.Parallelism = m.parallelism + + // If testingOverrides are set, we'll skip the plugin discovery process + // and just work with what we've been given, thus allowing the tests + // to provide mock providers and provisioners. + if m.testingOverrides != nil { + opts.Providers = m.testingOverrides.Providers + opts.Provisioners = m.testingOverrides.Provisioners + } else { + var providerFactories map[addrs.Provider]providers.Factory + providerFactories, err = m.providerFactories() + opts.Providers = providerFactories + opts.Provisioners = m.provisionerFactories() + } + + opts.Meta = &tofu.ContextMeta{ + Env: workspace, + OriginalWorkingDir: m.WorkingDir.OriginalWorkingDir(), + } + + return &opts, err +} + +// defaultFlagSet creates a default flag set for commands. +// See also command/arguments/default.go +func (m *Meta) defaultFlagSet(n string) *flag.FlagSet { + f := flag.NewFlagSet(n, flag.ContinueOnError) + f.SetOutput(io.Discard) + + // Set the default Usage to empty + f.Usage = func() {} + + return f +} + +// ignoreRemoteVersionFlagSet add the ignore-remote version flag to suppress +// the error when the configured OpenTofu version on the remote workspace +// does not match the local OpenTofu version. +func (m *Meta) ignoreRemoteVersionFlagSet(n string) *flag.FlagSet { + f := m.defaultFlagSet(n) + + m.varFlagSet(f) + + f.BoolVar(&m.ignoreRemoteVersion, "ignore-remote-version", false, "continue even if remote and local OpenTofu versions are incompatible") + + return f +} + +func (m *Meta) varFlagSet(f *flag.FlagSet) { + if m.variableArgs.items == nil { + m.variableArgs = newRawFlags("-var") + } + varValues := m.variableArgs.Alias("-var") + varFiles := m.variableArgs.Alias("-var-file") + f.Var(varValues, "var", "variables") + f.Var(varFiles, "var-file", "variable file") +} + +// extendedFlagSet adds custom flags that are mostly used by commands +// that are used to run an operation like plan or apply. +func (m *Meta) extendedFlagSet(n string) *flag.FlagSet { + f := m.defaultFlagSet(n) + + f.BoolVar(&m.input, "input", true, "input") + f.Var((*FlagStringSlice)(&m.targetFlags), "target", "resource to target") + f.BoolVar(&m.compactWarnings, "compact-warnings", false, "use compact warnings") + + m.varFlagSet(f) + + // commands that bypass locking will supply their own flag on this var, + // but set the initial meta value to true as a failsafe. + m.stateLock = true + + return f +} + +// process will process any -no-color entries out of the arguments. This +// will potentially modify the args in-place. It will return the resulting +// slice, and update the Meta and Ui. +func (m *Meta) process(args []string) []string { + // We do this so that we retain the ability to technically call + // process multiple times, even if we have no plans to do so + if m.oldUi != nil { + m.Ui = m.oldUi + } + + // Set colorization + m.color = m.Color + i := 0 // output index + for _, v := range args { + if v == "-no-color" { + m.color = false + m.Color = false + } else { + // copy and increment index + args[i] = v + i++ + } + } + args = args[:i] + + // Set the UI + m.oldUi = m.Ui + m.Ui = &cli.ConcurrentUi{ + Ui: &ColorizeUi{ + Colorize: m.Colorize(), + ErrorColor: "[red]", + WarnColor: "[yellow]", + Ui: m.oldUi, + }, + } + + // Reconfigure the view. This is necessary for commands which use both + // views.View and cli.Ui during the migration phase. + if m.View != nil { + m.View.Configure(&arguments.View{ + CompactWarnings: m.compactWarnings, + NoColor: !m.Color, + }) + } + + return args +} + +// uiHook returns the UiHook to use with the context. +func (m *Meta) uiHook() *views.UiHook { + return views.NewUiHook(m.View) +} + +// confirm asks a yes/no confirmation. +func (m *Meta) confirm(opts *tofu.InputOpts) (bool, error) { + if !m.Input() { + return false, errors.New("input is disabled") + } + + for i := 0; i < 2; i++ { + v, err := m.UIInput().Input(context.Background(), opts) + if err != nil { + return false, fmt.Errorf( + "Error asking for confirmation: %w", err) + } + + switch strings.ToLower(v) { + case "no": + return false, nil + case "yes": + return true, nil + } + } + return false, nil +} + +// showDiagnostics displays error and warning messages in the UI. +// +// "Diagnostics" here means the Diagnostics type from the tfdiag package, +// though as a convenience this function accepts anything that could be +// passed to the "Append" method on that type, converting it to Diagnostics +// before displaying it. +// +// Internally this function uses Diagnostics.Append, and so it will panic +// if given unsupported value types, just as Append does. +func (m *Meta) showDiagnostics(vals ...interface{}) { + var diags tfdiags.Diagnostics + diags = diags.Append(vals...) + diags.Sort() + + if len(diags) == 0 { + return + } + + if m.outputInJSON { + jsonView := views.NewJSONView(m.View) + jsonView.Diagnostics(diags) + return + } + + outputWidth := m.ErrorColumns() + + diags = diags.ConsolidateWarnings(1) + + // Since warning messages are generally competing + if m.compactWarnings { + // If the user selected compact warnings and all of the diagnostics are + // warnings then we'll use a more compact representation of the warnings + // that only includes their summaries. + // We show full warnings if there are also errors, because a warning + // can sometimes serve as good context for a subsequent error. + useCompact := true + for _, diag := range diags { + if diag.Severity() != tfdiags.Warning { + useCompact = false + break + } + } + if useCompact { + msg := format.DiagnosticWarningsCompact(diags, m.Colorize()) + msg = "\n" + msg + "\nTo see the full warning notes, run OpenTofu without -compact-warnings.\n" + m.Ui.Warn(msg) + return + } + } + + for _, diag := range diags { + var msg string + if m.Color { + msg = format.Diagnostic(diag, m.configSources(), m.Colorize(), outputWidth) + } else { + msg = format.DiagnosticPlain(diag, m.configSources(), outputWidth) + } + + switch diag.Severity() { + case tfdiags.Error: + m.Ui.Error(msg) + case tfdiags.Warning: + m.Ui.Warn(msg) + default: + m.Ui.Output(msg) + } + } +} + +// WorkspaceNameEnvVar is the name of the environment variable that can be used +// to set the name of the OpenTofu workspace, overriding the workspace chosen +// by `tofu workspace select`. +// +// Note that this environment variable is ignored by `tofu workspace new` +// and `tofu workspace delete`. +const WorkspaceNameEnvVar = "TF_WORKSPACE" + +var errInvalidWorkspaceNameEnvVar = fmt.Errorf("Invalid workspace name set using %s", WorkspaceNameEnvVar) + +// Workspace returns the name of the currently configured workspace, corresponding +// to the desired named state. +func (m *Meta) Workspace() (string, error) { + current, overridden := m.WorkspaceOverridden() + if overridden && !validWorkspaceName(current) { + return "", errInvalidWorkspaceNameEnvVar + } + return current, nil +} + +// WorkspaceOverridden returns the name of the currently configured workspace, +// corresponding to the desired named state, as well as a bool saying whether +// this was set via the TF_WORKSPACE environment variable. +func (m *Meta) WorkspaceOverridden() (string, bool) { + if envVar := os.Getenv(WorkspaceNameEnvVar); envVar != "" { + return envVar, true + } + + envData, err := os.ReadFile(filepath.Join(m.DataDir(), local.DefaultWorkspaceFile)) + current := string(bytes.TrimSpace(envData)) + if current == "" { + current = backend.DefaultStateName + } + + if err != nil && !os.IsNotExist(err) { + // always return the default if we can't get a workspace name + log.Printf("[ERROR] failed to read current workspace: %s", err) + } + + return current, false +} + +// SetWorkspace saves the given name as the current workspace in the local +// filesystem. +func (m *Meta) SetWorkspace(name string) error { + err := os.MkdirAll(m.DataDir(), 0755) + if err != nil { + return err + } + + err = os.WriteFile(filepath.Join(m.DataDir(), local.DefaultWorkspaceFile), []byte(name), 0644) + if err != nil { + return err + } + return nil +} + +// isAutoVarFile determines if the file ends with .auto.tfvars or .auto.tfvars.json +func isAutoVarFile(path string) bool { + return strings.HasSuffix(path, ".auto.tfvars") || + strings.HasSuffix(path, ".auto.tfvars.json") +} + +// FIXME: as an interim refactoring step, we apply the contents of the state +// arguments directly to the Meta object. Future work would ideally update the +// code paths which use these arguments to be passed them directly for clarity. +func (m *Meta) applyStateArguments(args *arguments.State) { + m.stateLock = args.Lock + m.stateLockTimeout = args.LockTimeout + m.statePath = args.StatePath + m.stateOutPath = args.StateOutPath + m.backupPath = args.BackupPath +} + +// checkRequiredVersion loads the config and check if the +// core version requirements are satisfied. +func (m *Meta) checkRequiredVersion() tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + loader, err := m.initConfigLoader() + if err != nil { + diags = diags.Append(err) + return diags + } + + pwd, err := os.Getwd() + if err != nil { + diags = diags.Append(fmt.Errorf("Error getting pwd: %w", err)) + return diags + } + + call, callDiags := m.rootModuleCall(pwd) + if callDiags.HasErrors() { + diags = diags.Append(callDiags) + return diags + } + + config, configDiags := loader.LoadConfig(pwd, call) + if configDiags.HasErrors() { + diags = diags.Append(configDiags) + return diags + } + + versionDiags := tofu.CheckCoreVersionRequirements(config) + if versionDiags.HasErrors() { + diags = diags.Append(versionDiags) + return diags + } + + return nil +} + +// MaybeGetSchemas attempts to load and return the schemas +// If there is not enough information to return the schemas, +// it could potentially return nil without errors. It is the +// responsibility of the caller to handle the lack of schema +// information accordingly +func (c *Meta) MaybeGetSchemas(state *states.State, config *configs.Config) (*tofu.Schemas, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + path, err := os.Getwd() + if err != nil { + diags.Append(tfdiags.SimpleWarning(failedToLoadSchemasMessage)) + return nil, diags + } + + if config == nil { + config, diags = c.loadConfig(path) + if diags.HasErrors() { + diags.Append(tfdiags.SimpleWarning(failedToLoadSchemasMessage)) + return nil, diags + } + } + + if config != nil || state != nil { + opts, err := c.contextOpts() + if err != nil { + diags = diags.Append(err) + return nil, diags + } + tfCtx, ctxDiags := tofu.NewContext(opts) + diags = diags.Append(ctxDiags) + if ctxDiags.HasErrors() { + return nil, diags + } + var schemaDiags tfdiags.Diagnostics + schemas, schemaDiags := tfCtx.Schemas(config, state) + diags = diags.Append(schemaDiags) + if schemaDiags.HasErrors() { + return nil, diags + } + return schemas, diags + + } + return nil, diags +} diff --git a/pkg/command/meta_backend.go b/pkg/command/meta_backend.go new file mode 100644 index 00000000000..ee89a97a257 --- /dev/null +++ b/pkg/command/meta_backend.go @@ -0,0 +1,1694 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +// This file contains all the Backend-related function calls on Meta, +// exported and private. + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "log" + "path/filepath" + "strconv" + "strings" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hcldec" + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/cloud" + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/command/clistate" + "github.com/kubegems/opentofu/pkg/command/views" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" + + backendInit "github.com/kubegems/opentofu/pkg/backend/init" + backendLocal "github.com/kubegems/opentofu/pkg/backend/local" + legacy "github.com/kubegems/opentofu/pkg/legacy/tofu" +) + +// BackendOpts are the options used to initialize a backend.Backend. +type BackendOpts struct { + // Config is a representation of the backend configuration block given in + // the root module, or nil if no such block is present. + Config *configs.Backend + + // ConfigOverride is an hcl.Body that, if non-nil, will be used with + // configs.MergeBodies to override the type-specific backend configuration + // arguments in Config. + ConfigOverride hcl.Body + + // Init should be set to true if initialization is allowed. If this is + // false, then any configuration that requires configuration will show + // an error asking the user to reinitialize. + Init bool + + // ForceLocal will force a purely local backend, including state. + // You probably don't want to set this. + ForceLocal bool + + // ViewType will set console output format for the + // initialization operation (JSON or human-readable). + ViewType arguments.ViewType +} + +// BackendWithRemoteTerraformVersion is a shared interface between the 'remote' and 'cloud' backends +// for simplified type checking when calling functions common to those particular backends. +type BackendWithRemoteTerraformVersion interface { + IgnoreVersionConflict() + VerifyWorkspaceTerraformVersion(workspace string) tfdiags.Diagnostics + IsLocalOperations() bool +} + +// Backend initializes and returns the backend for this CLI session. +// +// The backend is used to perform the actual OpenTofu operations. This +// abstraction enables easily sliding in new OpenTofu behavior such as +// remote state storage, remote operations, etc. while allowing the CLI +// to remain mostly identical. +// +// This will initialize a new backend for each call, which can carry some +// overhead with it. Please reuse the returned value for optimal behavior. +// +// Only one backend should be used per Meta. This function is stateful +// and is unsafe to create multiple backends used at once. This function +// can be called multiple times with each backend being "live" (usable) +// one at a time. +// +// A side-effect of this method is the population of m.backendState, recording +// the final resolved backend configuration after dealing with overrides from +// the "tofu init" command line, etc. +func (m *Meta) Backend(opts *BackendOpts, enc encryption.StateEncryption) (backend.Enhanced, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // If no opts are set, then initialize + if opts == nil { + opts = &BackendOpts{} + } + + // Initialize a backend from the config unless we're forcing a purely + // local operation. + var b backend.Backend + if !opts.ForceLocal { + var backendDiags tfdiags.Diagnostics + b, backendDiags = m.backendFromConfig(opts, enc) + diags = diags.Append(backendDiags) + + if diags.HasErrors() { + return nil, diags + } + + log.Printf("[TRACE] Meta.Backend: instantiated backend of type %T", b) + } + + // Set up the CLI opts we pass into backends that support it. + cliOpts, err := m.backendCLIOpts() + if err != nil { + if errs := providerPluginErrors(nil); errors.As(err, &errs) { + // This is a special type returned by m.providerFactories, which + // indicates one or more inconsistencies between the dependency + // lock file and the provider plugins actually available in the + // local cache directory. + // + // If initialization is allowed, we ignore this error, as it may + // be resolved by the later step where providers are fetched. + if !opts.Init { + var buf bytes.Buffer + for addr, err := range errs { + fmt.Fprintf(&buf, "\n - %s: %s", addr, err) + } + suggestion := "To download the plugins required for this configuration, run:\n tofu init" + if m.RunningInAutomation { + // Don't mention "tofu init" specifically if we're running in an automation wrapper + suggestion = "You must install the required plugins before running OpenTofu operations." + } + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Required plugins are not installed", + fmt.Sprintf( + "The installed provider plugins are not consistent with the packages selected in the dependency lock file:%s\n\nOpenTofu uses external plugins to integrate with a variety of different infrastructure services. %s", + buf.String(), suggestion, + ), + )) + return nil, diags + } + } else { + // All other errors just get generic handling. + diags = diags.Append(err) + return nil, diags + } + } + cliOpts.Validation = true + + // If the backend supports CLI initialization, do it. + if cli, ok := b.(backend.CLI); ok { + if err := cli.CLIInit(cliOpts); err != nil { + diags = diags.Append(fmt.Errorf( + "Error initializing backend %T: %w\n\n"+ + "This is a bug; please report it to the backend developer", + b, err, + )) + return nil, diags + } + } + + // If the result of loading the backend is an enhanced backend, + // then return that as-is. This works even if b == nil (it will be !ok). + if enhanced, ok := b.(backend.Enhanced); ok { + log.Printf("[TRACE] Meta.Backend: backend %T supports operations", b) + return enhanced, nil + } + + // We either have a non-enhanced backend or no backend configured at + // all. In either case, we use local as our enhanced backend and the + // non-enhanced (if any) as the state backend. + + if !opts.ForceLocal { + log.Printf("[TRACE] Meta.Backend: backend %T does not support operations, so wrapping it in a local backend", b) + } + + // Build the local backend + local := backendLocal.NewWithBackend(b, enc) + if err := local.CLIInit(cliOpts); err != nil { + // Local backend isn't allowed to fail. It would be a bug. + panic(err) + } + + // If we got here from backendFromConfig returning nil then m.backendState + // won't be set, since that codepath considers that to be no backend at all, + // but our caller considers that to be the local backend with no config + // and so we'll synthesize a backend state so other code doesn't need to + // care about this special case. + // + // FIXME: We should refactor this so that we more directly and explicitly + // treat the local backend as the default, including in the UI shown to + // the user, since the local backend should only be used when learning or + // in exceptional cases and so it's better to help the user learn that + // by introducing it as a concept. + if m.backendState == nil { + // NOTE: This synthetic object is intentionally _not_ retained in the + // on-disk record of the backend configuration, which was already dealt + // with inside backendFromConfig, because we still need that codepath + // to be able to recognize the lack of a config as distinct from + // explicitly setting local until we do some more refactoring here. + m.backendState = &legacy.BackendState{ + Type: "local", + ConfigRaw: json.RawMessage("{}"), + } + } + + return local, nil +} + +// selectWorkspace gets a list of existing workspaces and then checks +// if the currently selected workspace is valid. If not, it will ask +// the user to select a workspace from the list. +func (m *Meta) selectWorkspace(b backend.Backend) error { + workspaces, err := b.Workspaces() + if err == backend.ErrWorkspacesNotSupported { + return nil + } + if err != nil { + return fmt.Errorf("Failed to get existing workspaces: %w", err) + } + if len(workspaces) == 0 { + if c, ok := b.(*cloud.Cloud); ok && m.input { + // len is always 1 if using Name; 0 means we're using Tags and there + // aren't any matching workspaces. Which might be normal and fine, so + // let's just ask: + name, err := m.UIInput().Input(context.Background(), &tofu.InputOpts{ + Id: "create-workspace", + Query: "\n[reset][bold][yellow]No workspaces found.[reset]", + Description: fmt.Sprintf(inputCloudInitCreateWorkspace, strings.Join(c.WorkspaceMapping.Tags, ", ")), + }) + if err != nil { + return fmt.Errorf("Couldn't create initial workspace: %w", err) + } + name = strings.TrimSpace(name) + if name == "" { + return fmt.Errorf("Couldn't create initial workspace: no name provided") + } + log.Printf("[TRACE] Meta.selectWorkspace: selecting the new TFC workspace requested by the user (%s)", name) + return m.SetWorkspace(name) + } else { + return fmt.Errorf(strings.TrimSpace(errBackendNoExistingWorkspaces)) + } + } + + // Get the currently selected workspace. + workspace, err := m.Workspace() + if err != nil { + return err + } + + // Check if any of the existing workspaces matches the selected + // workspace and create a numbered list of existing workspaces. + var list strings.Builder + for i, w := range workspaces { + if w == workspace { + log.Printf("[TRACE] Meta.selectWorkspace: the currently selected workspace is present in the configured backend (%s)", workspace) + return nil + } + fmt.Fprintf(&list, "%d. %s\n", i+1, w) + } + + // If the backend only has a single workspace, select that as the current workspace + if len(workspaces) == 1 { + log.Printf("[TRACE] Meta.selectWorkspace: automatically selecting the single workspace provided by the backend (%s)", workspaces[0]) + return m.SetWorkspace(workspaces[0]) + } + + if !m.input { + return fmt.Errorf("Currently selected workspace %q does not exist", workspace) + } + + // Otherwise, ask the user to select a workspace from the list of existing workspaces. + v, err := m.UIInput().Input(context.Background(), &tofu.InputOpts{ + Id: "select-workspace", + Query: fmt.Sprintf( + "\n[reset][bold][yellow]The currently selected workspace (%s) does not exist.[reset]", + workspace), + Description: fmt.Sprintf( + strings.TrimSpace(inputBackendSelectWorkspace), list.String()), + }) + if err != nil { + return fmt.Errorf("Failed to select workspace: %w", err) + } + + idx, err := strconv.Atoi(v) + if err != nil || (idx < 1 || idx > len(workspaces)) { + return fmt.Errorf("Failed to select workspace: input not a valid number") + } + + workspace = workspaces[idx-1] + log.Printf("[TRACE] Meta.selectWorkspace: setting the current workspace according to user selection (%s)", workspace) + return m.SetWorkspace(workspace) +} + +// BackendForLocalPlan is similar to Backend, but uses backend settings that were +// stored in a plan. +// +// The current workspace name is also stored as part of the plan, and so this +// method will check that it matches the currently-selected workspace name +// and produce error diagnostics if not. +func (m *Meta) BackendForLocalPlan(settings plans.Backend, enc encryption.StateEncryption) (backend.Enhanced, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + f := backendInit.Backend(settings.Type) + if f == nil { + diags = diags.Append(fmt.Errorf(strings.TrimSpace(errBackendSavedUnknown), settings.Type)) + return nil, diags + } + b := f(enc) + log.Printf("[TRACE] Meta.BackendForLocalPlan: instantiated backend of type %T", b) + + schema := b.ConfigSchema() + configVal, err := settings.Config.Decode(schema.ImpliedType()) + if err != nil { + diags = diags.Append(fmt.Errorf("saved backend configuration is invalid: %w", err)) + return nil, diags + } + + newVal, validateDiags := b.PrepareConfig(configVal) + diags = diags.Append(validateDiags) + if validateDiags.HasErrors() { + return nil, diags + } + + configureDiags := b.Configure(newVal) + diags = diags.Append(configureDiags) + if configureDiags.HasErrors() { + return nil, diags + } + + // If the backend supports CLI initialization, do it. + if cli, ok := b.(backend.CLI); ok { + cliOpts, err := m.backendCLIOpts() + if err != nil { + diags = diags.Append(err) + return nil, diags + } + if err := cli.CLIInit(cliOpts); err != nil { + diags = diags.Append(fmt.Errorf( + "Error initializing backend %T: %w\n\n"+ + "This is a bug; please report it to the backend developer", + b, err, + )) + return nil, diags + } + } + + // If the result of loading the backend is an enhanced backend, + // then return that as-is. This works even if b == nil (it will be !ok). + if enhanced, ok := b.(backend.Enhanced); ok { + log.Printf("[TRACE] Meta.BackendForPlan: backend %T supports operations", b) + if err := m.setupEnhancedBackendAliases(enhanced); err != nil { + diags = diags.Append(err) + return nil, diags + } + return enhanced, nil + } + + // Otherwise, we'll wrap our state-only remote backend in the local backend + // to cause any operations to be run locally. + log.Printf("[TRACE] Meta.BackendForLocalPlan: backend %T does not support operations, so wrapping it in a local backend", b) + cliOpts, err := m.backendCLIOpts() + if err != nil { + diags = diags.Append(err) + return nil, diags + } + cliOpts.Validation = false // don't validate here in case config contains file(...) calls where the file doesn't exist + local := backendLocal.NewWithBackend(b, enc) + if err := local.CLIInit(cliOpts); err != nil { + // Local backend should never fail, so this is always a bug. + panic(err) + } + + return local, diags +} + +// backendCLIOpts returns a backend.CLIOpts object that should be passed to +// a backend that supports local CLI operations. +func (m *Meta) backendCLIOpts() (*backend.CLIOpts, error) { + contextOpts, err := m.contextOpts() + if contextOpts == nil && err != nil { + return nil, err + } + return &backend.CLIOpts{ + CLI: m.Ui, + CLIColor: m.Colorize(), + Streams: m.Streams, + StatePath: m.statePath, + StateOutPath: m.stateOutPath, + StateBackupPath: m.backupPath, + ContextOpts: contextOpts, + Input: m.Input(), + RunningInAutomation: m.RunningInAutomation, + }, err +} + +// Operation initializes a new backend.Operation struct. +// +// This prepares the operation. After calling this, the caller is expected +// to modify fields of the operation such as Sequence to specify what will +// be called. +func (m *Meta) Operation(b backend.Backend, vt arguments.ViewType, enc encryption.Encryption) *backend.Operation { + schema := b.ConfigSchema() + workspace, err := m.Workspace() + if err != nil { + // An invalid workspace error would have been raised when creating the + // backend, and the caller should have already exited. Seeing the error + // here first is a bug, so panic. + panic(fmt.Sprintf("invalid workspace: %s", err)) + } + planOutBackend, err := m.backendState.ForPlan(schema, workspace) + if err != nil { + // Always indicates an implementation error in practice, because + // errors here indicate invalid encoding of the backend configuration + // in memory, and we should always have validated that by the time + // we get here. + panic(fmt.Sprintf("failed to encode backend configuration for plan: %s", err)) + } + + stateLocker := clistate.NewNoopLocker() + if m.stateLock { + view := views.NewStateLocker(vt, m.View) + stateLocker = clistate.NewLocker(m.stateLockTimeout, view) + } + + depLocks, diags := m.lockedDependencies() + if diags.HasErrors() { + // We can't actually report errors from here, but m.lockedDependencies + // should always have been called earlier to prepare the "ContextOpts" + // for the backend anyway, so we should never actually get here in + // a real situation. If we do get here then the backend will inevitably + // fail downstream somwhere if it tries to use the empty depLocks. + log.Printf("[WARN] Failed to load dependency locks while preparing backend operation (ignored): %s", diags.Err().Error()) + } + + return &backend.Operation{ + Encryption: enc, + PlanOutBackend: planOutBackend, + Targets: m.targets, + UIIn: m.UIInput(), + UIOut: m.Ui, + Workspace: workspace, + StateLocker: stateLocker, + DependencyLocks: depLocks, + } +} + +// backendConfig returns the local configuration for the backend +func (m *Meta) backendConfig(opts *BackendOpts) (*configs.Backend, int, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + if opts.Config == nil { + // check if the config was missing, or just not required + conf, moreDiags := m.loadBackendConfig(".") + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + return nil, 0, diags + } + + if conf == nil { + log.Println("[TRACE] Meta.Backend: no config given or present on disk, so returning nil config") + return nil, 0, nil + } + + log.Printf("[TRACE] Meta.Backend: BackendOpts.Config not set, so using settings loaded from %s", conf.DeclRange) + opts.Config = conf + } + + c := opts.Config + + if c == nil { + log.Println("[TRACE] Meta.Backend: no explicit backend config, so returning nil config") + return nil, 0, nil + } + + bf := backendInit.Backend(c.Type) + if bf == nil { + detail := fmt.Sprintf("There is no backend type named %q.", c.Type) + if msg, removed := backendInit.RemovedBackends[c.Type]; removed { + detail = msg + } + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid backend type", + Detail: detail, + Subject: &c.TypeRange, + }) + return nil, 0, diags + } + b := bf(nil) // Just using this for config/schema, don't need encryption here + + configSchema := b.ConfigSchema() + configBody := c.Config + configHash, cfgDiags := c.Hash(configSchema) + diags = diags.Append(cfgDiags) + if diags.HasErrors() { + return nil, 0, diags + } + + // If we have an override configuration body then we must apply it now. + if opts.ConfigOverride != nil { + log.Println("[TRACE] Meta.Backend: merging -backend-config=... CLI overrides into backend configuration") + configBody = configs.MergeBodies(configBody, opts.ConfigOverride) + } + + log.Printf("[TRACE] Meta.Backend: built configuration for %q backend with hash value %d", c.Type, configHash) + + // We'll shallow-copy configs.Backend here so that we can replace the + // body without affecting others that hold this reference. + configCopy := *c + configCopy.Config = configBody + return &configCopy, configHash, diags +} + +// backendFromConfig returns the initialized (not configured) backend +// directly from the config/state.. +// +// This function handles various edge cases around backend config loading. For +// example: new config changes, backend type changes, etc. +// +// As of the 0.12 release it can no longer migrate from legacy remote state +// to backends, and will instead instruct users to use 0.11 or earlier as +// a stepping-stone to do that migration. +// +// This function may query the user for input unless input is disabled, in +// which case this function will error. +func (m *Meta) backendFromConfig(opts *BackendOpts, enc encryption.StateEncryption) (backend.Backend, tfdiags.Diagnostics) { + // Get the local backend configuration. + c, cHash, diags := m.backendConfig(opts) + if diags.HasErrors() { + return nil, diags + } + + // ------------------------------------------------------------------------ + // For historical reasons, current backend configuration for a working + // directory is kept in a *state-like* file, using the legacy state + // structures in the OpenTofu package. It is not actually a OpenTofu + // state, and so only the "backend" portion of it is actually used. + // + // The remainder of this code often confusingly refers to this as a "state", + // so it's unfortunately important to remember that this is not actually + // what we _usually_ think of as "state", and is instead a local working + // directory "backend configuration state" that is never persisted anywhere. + // + // Since the "real" state has since moved on to be represented by + // states.State, we can recognize the special meaning of state that applies + // to this function and its callees by their continued use of the + // otherwise-obsolete tofu.State. + // ------------------------------------------------------------------------ + + // Get the path to where we store a local cache of backend configuration + // if we're using a remote backend. This may not yet exist which means + // we haven't used a non-local backend before. That is okay. + statePath := filepath.Join(m.DataDir(), DefaultStateFilename) + sMgr := &clistate.LocalState{Path: statePath} + if err := sMgr.RefreshState(); err != nil { + diags = diags.Append(fmt.Errorf("Failed to load state: %w", err)) + return nil, diags + } + + // Load the state, it must be non-nil for the tests below but can be empty + s := sMgr.State() + if s == nil { + log.Printf("[TRACE] Meta.Backend: backend has not previously been initialized in this working directory") + s = legacy.NewState() + } else if s.Backend != nil { + log.Printf("[TRACE] Meta.Backend: working directory was previously initialized for %q backend", s.Backend.Type) + } else { + log.Printf("[TRACE] Meta.Backend: working directory was previously initialized but has no backend (is using legacy remote state?)") + } + + // if we want to force reconfiguration of the backend, we set the backend + // state to nil on this copy. This will direct us through the correct + // configuration path in the switch statement below. + if m.reconfigure { + s.Backend = nil + } + + // Upon return, we want to set the state we're using in-memory so that + // we can access it for commands. + m.backendState = nil + defer func() { + if s := sMgr.State(); s != nil && !s.Backend.Empty() { + m.backendState = s.Backend + } + }() + + if !s.Remote.Empty() { + // Legacy remote state is no longer supported. User must first + // migrate with Terraform 0.11 or earlier. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Legacy remote state not supported", + "This working directory is configured for legacy remote state, which is no longer supported from Terraform v0.12 onwards, and thus not supported by OpenTofu, either. To migrate this environment, first run \"terraform init\" under a Terraform 0.11 release, and then upgrade to OpenTofu.", + )) + return nil, diags + } + + // This switch statement covers all the different combinations of + // configuring new backends, updating previously-configured backends, etc. + switch { + // No configuration set at all. Pure local state. + case c == nil && s.Backend.Empty(): + log.Printf("[TRACE] Meta.Backend: using default local state only (no backend configuration, and no existing initialized backend)") + return nil, nil + + // We're unsetting a backend (moving from backend => local) + case c == nil && !s.Backend.Empty(): + log.Printf("[TRACE] Meta.Backend: previously-initialized %q backend is no longer present in config", s.Backend.Type) + + initReason := fmt.Sprintf("Unsetting the previously set backend %q", s.Backend.Type) + if !opts.Init { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Backend initialization required, please run \"tofu init\"", + fmt.Sprintf(strings.TrimSpace(errBackendInit), initReason), + )) + return nil, diags + } + + if s.Backend.Type != "cloud" && !m.migrateState { + diags = diags.Append(migrateOrReconfigDiag) + return nil, diags + } + + return m.backend_c_r_S(c, cHash, sMgr, true, opts, enc) + + // Configuring a backend for the first time or -reconfigure flag was used + case c != nil && s.Backend.Empty(): + log.Printf("[TRACE] Meta.Backend: moving from default local state only to %q backend", c.Type) + if !opts.Init { + if c.Type == "cloud" { + initReason := "Initial configuration of cloud backend" + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Cloud backend initialization required: please run \"tofu init\"", + fmt.Sprintf(strings.TrimSpace(errBackendInitCloud), initReason), + )) + } else { + initReason := fmt.Sprintf("Initial configuration of the requested backend %q", c.Type) + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Backend initialization required, please run \"tofu init\"", + fmt.Sprintf(strings.TrimSpace(errBackendInit), initReason), + )) + } + return nil, diags + } + return m.backend_C_r_s(c, cHash, sMgr, opts, enc) + // Potentially changing a backend configuration + case c != nil && !s.Backend.Empty(): + // We are not going to migrate if... + // + // We're not initializing + // AND the backend cache hash values match, indicating that the stored config is valid and completely unchanged. + // AND we're not providing any overrides. An override can mean a change overriding an unchanged backend block (indicated by the hash value). + if (uint64(cHash) == s.Backend.Hash) && (!opts.Init || opts.ConfigOverride == nil) { + log.Printf("[TRACE] Meta.Backend: using already-initialized, unchanged %q backend configuration", c.Type) + savedBackend, diags := m.savedBackend(sMgr, enc) + // Verify that selected workspace exist. Otherwise prompt user to create one + if opts.Init && savedBackend != nil { + if err := m.selectWorkspace(savedBackend); err != nil { + diags = diags.Append(err) + return nil, diags + } + } + return savedBackend, diags + } + + // If our configuration (the result of both the literal configuration and given + // -backend-config options) is the same, then we're just initializing a previously + // configured backend. The literal configuration may differ, however, so while we + // don't need to migrate, we update the backend cache hash value. + if !m.backendConfigNeedsMigration(c, s.Backend) { + log.Printf("[TRACE] Meta.Backend: using already-initialized %q backend configuration", c.Type) + savedBackend, moreDiags := m.savedBackend(sMgr, enc) + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + return nil, diags + } + + // It's possible for a backend to be unchanged, and the config itself to + // have changed by moving a parameter from the config to `-backend-config` + // In this case, we update the Hash. + moreDiags = m.updateSavedBackendHash(cHash, sMgr) + if moreDiags.HasErrors() { + return nil, diags + } + // Verify that selected workspace exist. Otherwise prompt user to create one + if opts.Init && savedBackend != nil { + if err := m.selectWorkspace(savedBackend); err != nil { + diags = diags.Append(err) + return nil, diags + } + } + + return savedBackend, diags + } + log.Printf("[TRACE] Meta.Backend: backend configuration has changed (from type %q to type %q)", s.Backend.Type, c.Type) + + cloudMode := cloud.DetectConfigChangeType(s.Backend, c, false) + + if !opts.Init { + //user ran another cmd that is not init but they are required to initialize because of a potential relevant change to their backend configuration + initDiag := m.determineInitReason(s.Backend.Type, c.Type, cloudMode) + diags = diags.Append(initDiag) + return nil, diags + } + + if !cloudMode.InvolvesCloud() && !m.migrateState { + diags = diags.Append(migrateOrReconfigDiag) + return nil, diags + } + + log.Printf("[WARN] backend config has changed since last init") + return m.backend_C_r_S_changed(c, cHash, sMgr, true, opts, enc) + + default: + diags = diags.Append(fmt.Errorf( + "Unhandled backend configuration state. This is a bug. Please\n"+ + "report this error with the following information.\n\n"+ + "Config Nil: %v\n"+ + "Saved Backend Empty: %v\n", + c == nil, s.Backend.Empty(), + )) + return nil, diags + } +} + +func (m *Meta) determineInitReason(previousBackendType string, currentBackendType string, cloudMode cloud.ConfigChangeMode) tfdiags.Diagnostics { + initReason := "" + switch cloudMode { + case cloud.ConfigMigrationIn: + initReason = fmt.Sprintf("Changed from backend %q to cloud backend", previousBackendType) + case cloud.ConfigMigrationOut: + initReason = fmt.Sprintf("Changed from cloud backend to backend %q", previousBackendType) + case cloud.ConfigChangeInPlace: + initReason = "Cloud backend configuration block has changed" + default: + switch { + case previousBackendType != currentBackendType: + initReason = fmt.Sprintf("Backend type changed from %q to %q", previousBackendType, currentBackendType) + default: + initReason = "Backend configuration block has changed" + } + } + + var diags tfdiags.Diagnostics + switch cloudMode { + case cloud.ConfigChangeInPlace: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Cloud backend initialization required: please run \"tofu init\"", + fmt.Sprintf(strings.TrimSpace(errBackendInitCloud), initReason), + )) + case cloud.ConfigMigrationIn: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Cloud backend initialization required: please run \"tofu init\"", + fmt.Sprintf(strings.TrimSpace(errBackendInitCloud), initReason), + )) + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Backend initialization required: please run \"tofu init\"", + fmt.Sprintf(strings.TrimSpace(errBackendInit), initReason), + )) + } + + return diags +} + +// backendFromState returns the initialized (not configured) backend directly +// from the backend state. This should be used only when a user runs +// `tofu init -backend=false`. This function returns a local backend if +// there is no backend state or no backend configured. +func (m *Meta) backendFromState(ctx context.Context, enc encryption.StateEncryption) (backend.Backend, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + // Get the path to where we store a local cache of backend configuration + // if we're using a remote backend. This may not yet exist which means + // we haven't used a non-local backend before. That is okay. + statePath := filepath.Join(m.DataDir(), DefaultStateFilename) + sMgr := &clistate.LocalState{Path: statePath} + if err := sMgr.RefreshState(); err != nil { + diags = diags.Append(fmt.Errorf("Failed to load state: %w", err)) + return nil, diags + } + s := sMgr.State() + if s == nil { + // no state, so return a local backend + log.Printf("[TRACE] Meta.Backend: backend has not previously been initialized in this working directory") + return backendLocal.New(enc), diags + } + if s.Backend == nil { + // s.Backend is nil, so return a local backend + log.Printf("[TRACE] Meta.Backend: working directory was previously initialized but has no backend (is using legacy remote state?)") + return backendLocal.New(enc), diags + } + log.Printf("[TRACE] Meta.Backend: working directory was previously initialized for %q backend", s.Backend.Type) + + //backend init function + if s.Backend.Type == "" { + return backendLocal.New(enc), diags + } + f := backendInit.Backend(s.Backend.Type) + if f == nil { + diags = diags.Append(fmt.Errorf(strings.TrimSpace(errBackendSavedUnknown), s.Backend.Type)) + return nil, diags + } + b := f(enc) + + // The configuration saved in the working directory state file is used + // in this case, since it will contain any additional values that + // were provided via -backend-config arguments on tofu init. + schema := b.ConfigSchema() + configVal, err := s.Backend.Config(schema) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to decode current backend config", + fmt.Sprintf("The backend configuration created by the most recent run of \"tofu init\" could not be decoded: %s. The configuration may have been initialized by an earlier version that used an incompatible configuration structure. Run \"tofu init -reconfigure\" to force re-initialization of the backend.", err), + )) + return nil, diags + } + + // Validate the config and then configure the backend + newVal, validDiags := b.PrepareConfig(configVal) + diags = diags.Append(validDiags) + if validDiags.HasErrors() { + return nil, diags + } + + configDiags := b.Configure(newVal) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + return nil, diags + } + + // If the result of loading the backend is an enhanced backend, + // then set up enhanced backend service aliases. + if enhanced, ok := b.(backend.Enhanced); ok { + log.Printf("[TRACE] Meta.BackendForPlan: backend %T supports operations", b) + + if err := m.setupEnhancedBackendAliases(enhanced); err != nil { + diags = diags.Append(err) + return nil, diags + } + } + + return b, diags +} + +//------------------------------------------------------------------- +// Backend Config Scenarios +// +// The functions below cover handling all the various scenarios that +// can exist when loading a backend. They are named in the format of +// "backend_C_R_S" where C, R, S may be upper or lowercase. Lowercase +// means it is false, uppercase means it is true. The full set of eight +// possible cases is handled. +// +// The fields are: +// +// * C - Backend configuration is set and changed in TF files +// * R - Legacy remote state is set +// * S - Backend configuration is set in the state +// +//------------------------------------------------------------------- + +// Unconfiguring a backend (moving from backend => local). +func (m *Meta) backend_c_r_S( + c *configs.Backend, cHash int, sMgr *clistate.LocalState, output bool, opts *BackendOpts, enc encryption.StateEncryption) (backend.Backend, tfdiags.Diagnostics) { + + var diags tfdiags.Diagnostics + + vt := arguments.ViewJSON + // Set default viewtype if none was set as the StateLocker needs to know exactly + // what viewType we want to have. + if opts == nil || opts.ViewType != vt { + vt = arguments.ViewHuman + } + + s := sMgr.State() + + cloudMode := cloud.DetectConfigChangeType(s.Backend, c, false) + diags = diags.Append(m.assertSupportedCloudInitOptions(cloudMode)) + if diags.HasErrors() { + return nil, diags + } + + // Get the backend type for output + backendType := s.Backend.Type + + if cloudMode == cloud.ConfigMigrationOut { + m.Ui.Output("Migrating from cloud backend to local state.") + } else { + m.Ui.Output(fmt.Sprintf(strings.TrimSpace(outputBackendMigrateLocal), s.Backend.Type)) + } + + // Grab a purely local backend to get the local state if it exists + localB, moreDiags := m.Backend(&BackendOpts{ForceLocal: true, Init: true}, enc) + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + return nil, diags + } + + // Initialize the configured backend + b, moreDiags := m.savedBackend(sMgr, enc) + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + return nil, diags + } + + // Perform the migration + err := m.backendMigrateState(&backendMigrateOpts{ + SourceType: s.Backend.Type, + DestinationType: "local", + Source: b, + Destination: localB, + ViewType: vt, + }) + if err != nil { + diags = diags.Append(err) + return nil, diags + } + + // Remove the stored metadata + s.Backend = nil + if err := sMgr.WriteState(s); err != nil { + diags = diags.Append(fmt.Errorf(strings.TrimSpace(errBackendClearSaved), err)) + return nil, diags + } + if err := sMgr.PersistState(); err != nil { + diags = diags.Append(fmt.Errorf(strings.TrimSpace(errBackendClearSaved), err)) + return nil, diags + } + + if output { + m.Ui.Output(m.Colorize().Color(fmt.Sprintf( + "[reset][green]\n\n"+ + strings.TrimSpace(successBackendUnset), backendType))) + } + + // Return no backend + return nil, diags +} + +// Configuring a backend for the first time. +func (m *Meta) backend_C_r_s(c *configs.Backend, cHash int, sMgr *clistate.LocalState, opts *BackendOpts, enc encryption.StateEncryption) (backend.Backend, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + vt := arguments.ViewJSON + // Set default viewtype if none was set as the StateLocker needs to know exactly + // what viewType we want to have. + if opts == nil || opts.ViewType != vt { + vt = arguments.ViewHuman + } + + // Grab a purely local backend to get the local state if it exists + localB, localBDiags := m.Backend(&BackendOpts{ForceLocal: true, Init: true}, enc) + if localBDiags.HasErrors() { + diags = diags.Append(localBDiags) + return nil, diags + } + + workspaces, err := localB.Workspaces() + if err != nil { + diags = diags.Append(fmt.Errorf(errBackendLocalRead, err)) + return nil, diags + } + + var localStates []statemgr.Full + for _, workspace := range workspaces { + localState, err := localB.StateMgr(workspace) + if err != nil { + diags = diags.Append(fmt.Errorf(errBackendLocalRead, err)) + return nil, diags + } + if err := localState.RefreshState(); err != nil { + diags = diags.Append(fmt.Errorf(errBackendLocalRead, err)) + return nil, diags + } + + // We only care about non-empty states. + if localS := localState.State(); !localS.Empty() { + log.Printf("[TRACE] Meta.Backend: will need to migrate workspace states because of existing %q workspace", workspace) + localStates = append(localStates, localState) + } else { + log.Printf("[TRACE] Meta.Backend: ignoring local %q workspace because its state is empty", workspace) + } + } + + cloudMode := cloud.DetectConfigChangeType(nil, c, len(localStates) > 0) + diags = diags.Append(m.assertSupportedCloudInitOptions(cloudMode)) + if diags.HasErrors() { + return nil, diags + } + + // Get the backend + b, configVal, moreDiags := m.backendInitFromConfig(c, enc) + diags = diags.Append(moreDiags) + if diags.HasErrors() { + return nil, diags + } + + if len(localStates) > 0 { + // Perform the migration + err = m.backendMigrateState(&backendMigrateOpts{ + SourceType: "local", + DestinationType: c.Type, + Source: localB, + Destination: b, + ViewType: vt, + }) + if err != nil { + diags = diags.Append(err) + return nil, diags + } + + // we usually remove the local state after migration to prevent + // confusion, but adding a default local backend block to the config + // can get us here too. Don't delete our state if the old and new paths + // are the same. + erase := true + if newLocalB, ok := b.(*backendLocal.Local); ok { + if localB, ok := localB.(*backendLocal.Local); ok { + if newLocalB.PathsConflictWith(localB) { + erase = false + log.Printf("[TRACE] Meta.Backend: both old and new backends share the same local state paths, so not erasing old state") + } + } + } + + if erase { + log.Printf("[TRACE] Meta.Backend: removing old state snapshots from old backend") + for _, localState := range localStates { + // We always delete the local state, unless that was our new state too. + if err := localState.WriteState(nil); err != nil { + diags = diags.Append(fmt.Errorf(errBackendMigrateLocalDelete, err)) + return nil, diags + } + if err := localState.PersistState(nil); err != nil { + diags = diags.Append(fmt.Errorf(errBackendMigrateLocalDelete, err)) + return nil, diags + } + } + } + } + + if m.stateLock { + view := views.NewStateLocker(vt, m.View) + stateLocker := clistate.NewLocker(m.stateLockTimeout, view) + if d := stateLocker.Lock(sMgr, "backend from plan"); d != nil { + diags = diags.Append(fmt.Errorf("Error locking state: %s", d)) + return nil, diags + } + defer stateLocker.Unlock() + } + + configJSON, err := ctyjson.Marshal(configVal, b.ConfigSchema().ImpliedType()) + if err != nil { + diags = diags.Append(fmt.Errorf("Can't serialize backend configuration as JSON: %w", err)) + return nil, diags + } + + // Store the metadata in our saved state location + s := sMgr.State() + if s == nil { + s = legacy.NewState() + } + s.Backend = &legacy.BackendState{ + Type: c.Type, + ConfigRaw: json.RawMessage(configJSON), + Hash: uint64(cHash), + } + + // Verify that selected workspace exists in the backend. + if opts.Init && b != nil { + err := m.selectWorkspace(b) + if err != nil { + diags = diags.Append(err) + + // FIXME: A compatibility oddity with the 'remote' backend. + // As an awkward legacy UX, when the remote backend is configured and there + // are no workspaces, the output to the user saying that there are none and + // the user should create one with 'workspace new' takes the form of an + // error message - even though it's happy path, expected behavior. + // + // Therefore, only return nil with errored diags for everything else, and + // allow the remote backend to continue and write its configuration to state + // even though no workspace is selected. + if c.Type != "remote" { + return nil, diags + } + } + } + + if err := sMgr.WriteState(s); err != nil { + diags = diags.Append(fmt.Errorf(errBackendWriteSaved, err)) + return nil, diags + } + if err := sMgr.PersistState(); err != nil { + diags = diags.Append(fmt.Errorf(errBackendWriteSaved, err)) + return nil, diags + } + + // By now the backend is successfully configured. If using Terraform Cloud, the success + // message is handled as part of the final init message + if _, ok := b.(*cloud.Cloud); !ok { + m.Ui.Output(m.Colorize().Color(fmt.Sprintf( + "[reset][green]\n"+strings.TrimSpace(successBackendSet), s.Backend.Type))) + } + + return b, diags +} + +// Changing a previously saved backend. +func (m *Meta) backend_C_r_S_changed(c *configs.Backend, cHash int, sMgr *clistate.LocalState, output bool, opts *BackendOpts, enc encryption.StateEncryption) (backend.Backend, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + vt := arguments.ViewJSON + // Set default viewtype if none was set as the StateLocker needs to know exactly + // what viewType we want to have. + if opts == nil || opts.ViewType != vt { + vt = arguments.ViewHuman + } + + // Get the old state + s := sMgr.State() + + cloudMode := cloud.DetectConfigChangeType(s.Backend, c, false) + diags = diags.Append(m.assertSupportedCloudInitOptions(cloudMode)) + if diags.HasErrors() { + return nil, diags + } + + if output { + // Notify the user + switch cloudMode { + case cloud.ConfigChangeInPlace: + m.Ui.Output("Cloud backend configuration has changed.") + case cloud.ConfigMigrationIn: + m.Ui.Output(fmt.Sprintf("Migrating from backend %q to cloud backend.", s.Backend.Type)) + case cloud.ConfigMigrationOut: + m.Ui.Output(fmt.Sprintf("Migrating from cloud backend to backend %q.", c.Type)) + default: + if s.Backend.Type != c.Type { + output := fmt.Sprintf(outputBackendMigrateChange, s.Backend.Type, c.Type) + m.Ui.Output(m.Colorize().Color(fmt.Sprintf( + "[reset]%s\n", + strings.TrimSpace(output)))) + } else { + m.Ui.Output(m.Colorize().Color(fmt.Sprintf( + "[reset]%s\n", + strings.TrimSpace(outputBackendReconfigure)))) + } + } + } + + // Get the backend + b, configVal, moreDiags := m.backendInitFromConfig(c, enc) + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + return nil, diags + } + + // If this is a migration into, out of, or irrelevant to Terraform Cloud + // mode then we will do state migration here. Otherwise, we just update + // the working directory initialization directly, because Terraform Cloud + // doesn't have configurable state storage anyway -- we're only changing + // which workspaces are relevant to this configuration, not where their + // state lives. + if cloudMode != cloud.ConfigChangeInPlace { + // Grab the existing backend + oldB, oldBDiags := m.savedBackend(sMgr, enc) + diags = diags.Append(oldBDiags) + if oldBDiags.HasErrors() { + return nil, diags + } + + // Perform the migration + err := m.backendMigrateState(&backendMigrateOpts{ + SourceType: s.Backend.Type, + DestinationType: c.Type, + Source: oldB, + Destination: b, + ViewType: vt, + }) + if err != nil { + diags = diags.Append(err) + return nil, diags + } + + if m.stateLock { + view := views.NewStateLocker(vt, m.View) + stateLocker := clistate.NewLocker(m.stateLockTimeout, view) + if d := stateLocker.Lock(sMgr, "backend from plan"); d != nil { + diags = diags.Append(fmt.Errorf("Error locking state: %s", d)) + return nil, diags + } + defer stateLocker.Unlock() + } + } + + configJSON, err := ctyjson.Marshal(configVal, b.ConfigSchema().ImpliedType()) + if err != nil { + diags = diags.Append(fmt.Errorf("Can't serialize backend configuration as JSON: %w", err)) + return nil, diags + } + + // Update the backend state + s = sMgr.State() + if s == nil { + s = legacy.NewState() + } + s.Backend = &legacy.BackendState{ + Type: c.Type, + ConfigRaw: json.RawMessage(configJSON), + Hash: uint64(cHash), + } + + // Verify that selected workspace exist. Otherwise prompt user to create one + if opts.Init && b != nil { + if err := m.selectWorkspace(b); err != nil { + diags = diags.Append(err) + return b, diags + } + } + + if err := sMgr.WriteState(s); err != nil { + diags = diags.Append(fmt.Errorf(errBackendWriteSaved, err)) + return nil, diags + } + if err := sMgr.PersistState(); err != nil { + diags = diags.Append(fmt.Errorf(errBackendWriteSaved, err)) + return nil, diags + } + + if output { + // By now the backend is successfully configured. If using Terraform Cloud, the success + // message is handled as part of the final init message + if _, ok := b.(*cloud.Cloud); !ok { + m.Ui.Output(m.Colorize().Color(fmt.Sprintf( + "[reset][green]\n"+strings.TrimSpace(successBackendSet), s.Backend.Type))) + } + } + + return b, diags +} + +// Initializing a saved backend from the cache file (legacy state file) +// +// TODO: This is extremely similar to Meta.backendFromState() but for legacy reasons this is the +// function used by the migration APIs within this file. The other handles 'init -backend=false', +// specifically. +func (m *Meta) savedBackend(sMgr *clistate.LocalState, enc encryption.StateEncryption) (backend.Backend, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + s := sMgr.State() + + // Get the backend + f := backendInit.Backend(s.Backend.Type) + if f == nil { + diags = diags.Append(fmt.Errorf(strings.TrimSpace(errBackendSavedUnknown), s.Backend.Type)) + return nil, diags + } + b := f(enc) + + // The configuration saved in the working directory state file is used + // in this case, since it will contain any additional values that + // were provided via -backend-config arguments on tofu init. + schema := b.ConfigSchema() + configVal, err := s.Backend.Config(schema) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to decode current backend config", + fmt.Sprintf("The backend configuration created by the most recent run of \"tofu init\" could not be decoded: %s. The configuration may have been initialized by an earlier version that used an incompatible configuration structure. Run \"tofu init -reconfigure\" to force re-initialization of the backend.", err), + )) + return nil, diags + } + + // Validate the config and then configure the backend + newVal, validDiags := b.PrepareConfig(configVal) + diags = diags.Append(validDiags) + if validDiags.HasErrors() { + return nil, diags + } + + configDiags := b.Configure(newVal) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + return nil, diags + } + + // If the result of loading the backend is an enhanced backend, + // then set up enhanced backend service aliases. + if enhanced, ok := b.(backend.Enhanced); ok { + log.Printf("[TRACE] Meta.BackendForPlan: backend %T supports operations", b) + + if err := m.setupEnhancedBackendAliases(enhanced); err != nil { + diags = diags.Append(err) + return nil, diags + } + } + + return b, diags +} + +func (m *Meta) updateSavedBackendHash(cHash int, sMgr *clistate.LocalState) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + s := sMgr.State() + + if s.Backend.Hash != uint64(cHash) { + s.Backend.Hash = uint64(cHash) + if err := sMgr.WriteState(s); err != nil { + diags = diags.Append(err) + } + } + + return diags +} + +//------------------------------------------------------------------- +// Reusable helper functions for backend management +//------------------------------------------------------------------- + +// backendConfigNeedsMigration returns true if migration might be required to +// move from the configured backend to the given cached backend config. +// +// This must be called with the synthetic *configs.Backend that results from +// merging in any command-line options for correct behavior. +// +// If either the given configuration or cached configuration are invalid then +// this function will conservatively assume that migration is required, +// expecting that the migration code will subsequently deal with the same +// errors. +func (m *Meta) backendConfigNeedsMigration(c *configs.Backend, s *legacy.BackendState) bool { + if s == nil || s.Empty() { + log.Print("[TRACE] backendConfigNeedsMigration: no cached config, so migration is required") + return true + } + if c.Type != s.Type { + log.Printf("[TRACE] backendConfigNeedsMigration: type changed from %q to %q, so migration is required", s.Type, c.Type) + return true + } + + // We need the backend's schema to do our comparison here. + f := backendInit.Backend(c.Type) + if f == nil { + log.Printf("[TRACE] backendConfigNeedsMigration: no backend of type %q, which migration codepath must handle", c.Type) + return true // let the migration codepath deal with the missing backend + } + b := f(nil) // We don't need encryption here as it's only used for config/schema + + schema := b.ConfigSchema() + decSpec := schema.NoneRequired().DecoderSpec() + givenVal, diags := hcldec.Decode(c.Config, decSpec, nil) + if diags.HasErrors() { + log.Printf("[TRACE] backendConfigNeedsMigration: failed to decode given config; migration codepath must handle problem: %s", diags.Error()) + return true // let the migration codepath deal with these errors + } + + cachedVal, err := s.Config(schema) + if err != nil { + log.Printf("[TRACE] backendConfigNeedsMigration: failed to decode cached config; migration codepath must handle problem: %s", err) + return true // let the migration codepath deal with the error + } + + // If we get all the way down here then it's the exact equality of the + // two decoded values that decides our outcome. It's safe to use RawEquals + // here (rather than Equals) because we know that unknown values can + // never appear in backend configurations. + if cachedVal.RawEquals(givenVal) { + log.Print("[TRACE] backendConfigNeedsMigration: given configuration matches cached configuration, so no migration is required") + return false + } + log.Print("[TRACE] backendConfigNeedsMigration: configuration values have changed, so migration is required") + return true +} + +func (m *Meta) backendInitFromConfig(c *configs.Backend, enc encryption.StateEncryption) (backend.Backend, cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // Get the backend + f := backendInit.Backend(c.Type) + if f == nil { + diags = diags.Append(fmt.Errorf(strings.TrimSpace(errBackendNewUnknown), c.Type)) + return nil, cty.NilVal, diags + } + b := f(enc) + + schema := b.ConfigSchema() + configVal, hclDiags := c.Decode(schema.NoneRequired()) + diags = diags.Append(hclDiags) + if hclDiags.HasErrors() { + return nil, cty.NilVal, diags + } + + if !configVal.IsWhollyKnown() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unknown values within backend definition", + "The `tofu` configuration block should contain only concrete and static values. Another diagnostic should contain more information about which part of the configuration is problematic.")) + return nil, cty.NilVal, diags + } + + // TODO: test + if m.Input() { + var err error + configVal, err = m.inputForSchema(configVal, schema) + if err != nil { + diags = diags.Append(fmt.Errorf("Error asking for input to configure backend %q: %w", c.Type, err)) + } + + // We get an unknown here if the if the user aborted input, but we can't + // turn that into a config value, so set it to null and let the provider + // handle it in PrepareConfig. + if !configVal.IsKnown() { + configVal = cty.NullVal(configVal.Type()) + } + } + + newVal, validateDiags := b.PrepareConfig(configVal) + diags = diags.Append(validateDiags.InConfigBody(c.Config, "")) + if validateDiags.HasErrors() { + return nil, cty.NilVal, diags + } + + configureDiags := b.Configure(newVal) + diags = diags.Append(configureDiags.InConfigBody(c.Config, "")) + + // If the result of loading the backend is an enhanced backend, + // then set up enhanced backend service aliases. + if enhanced, ok := b.(backend.Enhanced); ok { + log.Printf("[TRACE] Meta.BackendForPlan: backend %T supports operations", b) + if err := m.setupEnhancedBackendAliases(enhanced); err != nil { + diags = diags.Append(err) + return nil, cty.NilVal, diags + } + } + + return b, configVal, diags +} + +// Helper method to get aliases from the enhanced backend and alias them +// in the Meta service discovery. It's unfortunate that the Meta backend +// is modifying the service discovery at this level, but the owner +// of the service discovery pointer does not have easy access to the backend. +func (m *Meta) setupEnhancedBackendAliases(b backend.Enhanced) error { + // Set up the service discovery aliases specified by the enhanced backend. + serviceAliases, err := b.ServiceDiscoveryAliases() + if err != nil { + return err + } + + for _, alias := range serviceAliases { + m.Services.Alias(alias.From, alias.To) + } + return nil +} + +// Helper method to ignore remote/cloud backend version conflicts. Only call this +// for commands which cannot accidentally upgrade remote state files. +func (m *Meta) ignoreRemoteVersionConflict(b backend.Backend) { + if back, ok := b.(BackendWithRemoteTerraformVersion); ok { + back.IgnoreVersionConflict() + } +} + +// Helper method to check the local OpenTofu version against the configured +// version in the remote workspace, returning diagnostics if they conflict. +func (m *Meta) remoteVersionCheck(b backend.Backend, workspace string) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + if back, ok := b.(BackendWithRemoteTerraformVersion); ok { + // Allow user override based on command-line flag + if m.ignoreRemoteVersion { + back.IgnoreVersionConflict() + } + // If the override is set, this check will return a warning instead of + // an error + versionDiags := back.VerifyWorkspaceTerraformVersion(workspace) + diags = diags.Append(versionDiags) + // If there are no errors resulting from this check, we do not need to + // check again + if !diags.HasErrors() { + back.IgnoreVersionConflict() + } + } + + return diags +} + +// assertSupportedCloudInitOptions returns diagnostics with errors if the +// init-related command line options (implied inside the Meta receiver) +// are incompatible with the given cloud configuration change mode. +func (m *Meta) assertSupportedCloudInitOptions(mode cloud.ConfigChangeMode) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + if mode.InvolvesCloud() { + log.Printf("[TRACE] Meta.Backend: Cloud backend mode initialization type: %s", mode) + if m.reconfigure { + if mode.IsCloudMigration() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid command-line option", + "The -reconfigure option is unsupported when migrating to cloud backend, because activating cloud backend involves some additional steps.", + )) + } else { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid command-line option", + "The -reconfigure option is for in-place reconfiguration of state backends only, and is not needed when changing cloud backend settings.\n\nWhen using cloud backend, initialization automatically activates any new Cloud configuration settings.", + )) + } + } + if m.migrateState { + name := "-migrate-state" + if m.forceInitCopy { + // -force copy implies -migrate-state in "tofu init", + // so m.migrateState is forced to true in this case even if + // the user didn't actually specify it. We'll use the other + // name here to avoid being confusing, then. + name = "-force-copy" + } + if mode.IsCloudMigration() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid command-line option", + fmt.Sprintf("The %s option is for migration between state backends only, and is not applicable when using cloud backend.\n\nCloud backend migration has additional steps, configured by interactive prompts.", name), + )) + } else { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid command-line option", + fmt.Sprintf("The %s option is for migration between state backends only, and is not applicable when using cloud backend.\n\nState storage is handled automatically by cloud backend and so the state storage location is not configurable.", name), + )) + } + } + } + return diags +} + +//------------------------------------------------------------------- +// Output constants and initialization code +//------------------------------------------------------------------- + +const errBackendLocalRead = ` +Error reading local state: %w + +OpenTofu is trying to read your local state to determine if there is +state to migrate to your newly configured backend. OpenTofu can't continue +without this check because that would risk losing state. Please resolve the +error above and try again. +` + +const errBackendMigrateLocalDelete = ` +Error deleting local state after migration: %w + +Your local state is deleted after successfully migrating it to the newly +configured backend. As part of the deletion process, a backup is made at +the standard backup path unless explicitly asked not to. To cleanly operate +with a backend, we must delete the local state file. Please resolve the +issue above and retry the command. +` + +const errBackendNewUnknown = ` +The backend %q could not be found. + +This is the backend specified in your OpenTofu configuration file. +This error could be a simple typo in your configuration, but it can also +be caused by using a OpenTofu version that doesn't support the specified +backend type. Please check your configuration and your OpenTofu version. + +If you'd like to run OpenTofu and store state locally, you can fix this +error by removing the backend configuration from your configuration. +` + +const errBackendNoExistingWorkspaces = ` +No existing workspaces. + +Use the "tofu workspace" command to create and select a new workspace. +If the backend already contains existing workspaces, you may need to update +the backend configuration. +` + +const errBackendSavedUnknown = ` +The backend %q could not be found. + +This is the backend that this OpenTofu environment is configured to use +both in your configuration and saved locally as your last-used backend. +If it isn't found, it could mean an alternate version of OpenTofu was +used with this configuration. Please use the proper version of OpenTofu that +contains support for this backend. + +If you'd like to force remove this backend, you must update your configuration +to not use the backend and run "tofu init" (or any other command) again. +` + +const errBackendClearSaved = ` +Error clearing the backend configuration: %w + +OpenTofu removes the saved backend configuration when you're removing a +configured backend. This must be done so future OpenTofu runs know to not +use the backend configuration. Please look at the error above, resolve it, +and try again. +` + +const errBackendInit = ` +Reason: %s + +The "backend" is the interface that OpenTofu uses to store state, +perform operations, etc. If this message is showing up, it means that the +OpenTofu configuration you're using is using a custom configuration for +the OpenTofu backend. + +Changes to backend configurations require reinitialization. This allows +OpenTofu to set up the new configuration, copy existing state, etc. Please run +"tofu init" with either the "-reconfigure" or "-migrate-state" flags to +use the current configuration. + +If the change reason above is incorrect, please verify your configuration +hasn't changed and try again. At this point, no changes to your existing +configuration or state have been made. +` + +const errBackendInitCloud = ` +Reason: %s. + +Changes to the cloud backend configuration block require reinitialization, to discover any changes to the available workspaces. + +To re-initialize, run: + tofu init + +OpenTofu has not yet made changes to your existing configuration or state. +` + +const errBackendWriteSaved = ` +Error saving the backend configuration: %w + +OpenTofu saves the complete backend configuration in a local file for +configuring the backend on future operations. This cannot be disabled. Errors +are usually due to simple file permission errors. Please look at the error +above, resolve it, and try again. +` + +const outputBackendMigrateChange = ` +OpenTofu detected that the backend type changed from %q to %q. +` + +const outputBackendMigrateLocal = ` +OpenTofu has detected you're unconfiguring your previously set %q backend. +` + +const outputBackendReconfigure = ` +[reset][bold]Backend configuration changed![reset] + +OpenTofu has detected that the configuration specified for the backend +has changed. OpenTofu will now check for existing state in the backends. +` + +const inputCloudInitCreateWorkspace = ` +There are no workspaces with the configured tags (%s) +in your cloud backend organization. To finish initializing, OpenTofu needs at +least one workspace available. + +OpenTofu can create a properly tagged workspace for you now. Please enter a +name to create a new cloud backend workspace. +` + +const successBackendUnset = ` +Successfully unset the backend %q. OpenTofu will now operate locally. +` + +const successBackendSet = ` +Successfully configured the backend %q! OpenTofu will automatically +use this backend unless the backend configuration changes. +` + +var migrateOrReconfigDiag = tfdiags.Sourceless( + tfdiags.Error, + "Backend configuration changed", + "A change in the backend configuration has been detected, which may require migrating existing state.\n\n"+ + "If you wish to attempt automatic migration of the state, use \"tofu init -migrate-state\".\n"+ + `If you wish to store the current configuration with no changes to the state, use "tofu init -reconfigure".`) diff --git a/pkg/command/meta_backend_migrate.go b/pkg/command/meta_backend_migrate.go new file mode 100644 index 00000000000..437c95a6621 --- /dev/null +++ b/pkg/command/meta_backend_migrate.go @@ -0,0 +1,1124 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "bytes" + "context" + "errors" + "fmt" + "log" + "os" + "path/filepath" + "sort" + "strings" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/backend/remote" + "github.com/kubegems/opentofu/pkg/cloud" + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/command/clistate" + "github.com/kubegems/opentofu/pkg/command/views" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "github.com/kubegems/opentofu/pkg/tofu" +) + +type backendMigrateOpts struct { + SourceType, DestinationType string + Source, Destination backend.Backend + ViewType arguments.ViewType + + // Fields below are set internally when migrate is called + + sourceWorkspace string + destinationWorkspace string + force bool // if true, won't ask for confirmation +} + +// backendMigrateState handles migrating (copying) state from one backend +// to another. This function handles asking the user for confirmation +// as well as the copy itself. +// +// This function can handle all scenarios of state migration regardless +// of the existence of state in either backend. +// +// After migrating the state, the existing state in the first backend +// remains untouched. +// +// This will attempt to lock both states for the migration. +func (m *Meta) backendMigrateState(opts *backendMigrateOpts) error { + log.Printf("[INFO] backendMigrateState: need to migrate from %q to %q backend config", opts.SourceType, opts.DestinationType) + // We need to check what the named state status is. If we're converting + // from multi-state to single-state for example, we need to handle that. + var sourceSingleState, destinationSingleState, sourceTFC, destinationTFC bool + + _, sourceTFC = opts.Source.(*cloud.Cloud) + _, destinationTFC = opts.Destination.(*cloud.Cloud) + + sourceWorkspaces, sourceSingleState, err := retrieveWorkspaces(opts.Source, opts.SourceType) + if err != nil { + return err + } + destinationWorkspaces, destinationSingleState, err := retrieveWorkspaces(opts.Destination, opts.SourceType) + if err != nil { + return err + } + + // Set up defaults + opts.sourceWorkspace = backend.DefaultStateName + opts.destinationWorkspace = backend.DefaultStateName + opts.force = m.forceInitCopy + + // Disregard remote OpenTofu version for the state source backend. If it's a + // Terraform Cloud remote backend, we don't care about the remote version, + // as we are migrating away and will not break a remote workspace. + m.ignoreRemoteVersionConflict(opts.Source) + + // Disregard remote OpenTofu version if instructed to do so via CLI flag. + if m.ignoreRemoteVersion { + m.ignoreRemoteVersionConflict(opts.Destination) + } else { + // Check the remote OpenTofu version for the state destination backend. If + // it's a Terraform Cloud remote backend, we want to ensure that we don't + // break the workspace by uploading an incompatible state file. + for _, workspace := range destinationWorkspaces { + diags := m.remoteVersionCheck(opts.Destination, workspace) + if diags.HasErrors() { + return diags.Err() + } + } + // If there are no specified destination workspaces, perform a remote + // backend version check with the default workspace. + // Ensure that we are not dealing with Terraform Cloud migrations, as it + // does not support the default name. + if len(destinationWorkspaces) == 0 && !destinationTFC { + diags := m.remoteVersionCheck(opts.Destination, backend.DefaultStateName) + if diags.HasErrors() { + return diags.Err() + } + } + } + + // Determine migration behavior based on whether the source/destination + // supports multi-state. + switch { + case sourceTFC || destinationTFC: + return m.backendMigrateTFC(opts) + + // Single-state to single-state. This is the easiest case: we just + // copy the default state directly. + case sourceSingleState && destinationSingleState: + return m.backendMigrateState_s_s(opts) + + // Single-state to multi-state. This is easy since we just copy + // the default state and ignore the rest in the destination. + case sourceSingleState && !destinationSingleState: + return m.backendMigrateState_s_s(opts) + + // Multi-state to single-state. If the source has more than the default + // state this is complicated since we have to ask the user what to do. + case !sourceSingleState && destinationSingleState: + // If the source only has one state and it is the default, + // treat it as if it doesn't support multi-state. + if len(sourceWorkspaces) == 1 && sourceWorkspaces[0] == backend.DefaultStateName { + return m.backendMigrateState_s_s(opts) + } + + return m.backendMigrateState_S_s(opts) + + // Multi-state to multi-state. We merge the states together (migrating + // each from the source to the destination one by one). + case !sourceSingleState && !destinationSingleState: + // If the source only has one state and it is the default, + // treat it as if it doesn't support multi-state. + if len(sourceWorkspaces) == 1 && sourceWorkspaces[0] == backend.DefaultStateName { + return m.backendMigrateState_s_s(opts) + } + + return m.backendMigrateState_S_S(opts) + } + + return nil +} + +//------------------------------------------------------------------- +// State Migration Scenarios +// +// The functions below cover handling all the various scenarios that +// can exist when migrating state. They are named in an immediately not +// obvious format but is simple: +// +// Format: backendMigrateState_s1_s2[_suffix] +// +// When s1 or s2 is lower case, it means that it is a single state backend. +// When either is uppercase, it means that state is a multi-state backend. +// The suffix is used to disambiguate multiple cases with the same type of +// states. +// +//------------------------------------------------------------------- + +// Multi-state to multi-state. +func (m *Meta) backendMigrateState_S_S(opts *backendMigrateOpts) error { + log.Print("[INFO] backendMigrateState: migrating all named workspaces") + + migrate := opts.force + if !migrate { + var err error + // Ask the user if they want to migrate their existing remote state + migrate, err = m.confirm(&tofu.InputOpts{ + Id: "backend-migrate-multistate-to-multistate", + Query: fmt.Sprintf( + "Do you want to migrate all workspaces to %q?", + opts.DestinationType), + Description: fmt.Sprintf( + strings.TrimSpace(inputBackendMigrateMultiToMulti), + opts.SourceType, opts.DestinationType), + }) + if err != nil { + return fmt.Errorf( + "Error asking for state migration action: %w", err) + } + } + if !migrate { + return fmt.Errorf("Migration aborted by user.") + } + + // Read all the states + sourceWorkspaces, err := opts.Source.Workspaces() + if err != nil { + return fmt.Errorf(strings.TrimSpace( + errMigrateLoadStates), opts.SourceType, err) + } + + // Sort the states so they're always copied alphabetically + sort.Strings(sourceWorkspaces) + + // Go through each and migrate + for _, name := range sourceWorkspaces { + // Copy the same names + opts.sourceWorkspace = name + opts.destinationWorkspace = name + + // Force it, we confirmed above + opts.force = true + + // Perform the migration + if err := m.backendMigrateState_s_s(opts); err != nil { + return fmt.Errorf(strings.TrimSpace( + errMigrateMulti), name, opts.SourceType, opts.DestinationType, err) + } + } + + return nil +} + +// Multi-state to single state. +func (m *Meta) backendMigrateState_S_s(opts *backendMigrateOpts) error { + log.Printf("[INFO] backendMigrateState: destination backend type %q does not support named workspaces", opts.DestinationType) + + currentWorkspace, err := m.Workspace() + if err != nil { + return err + } + + migrate := opts.force + if !migrate { + var err error + // Ask the user if they want to migrate their existing remote state + migrate, err = m.confirm(&tofu.InputOpts{ + Id: "backend-migrate-multistate-to-single", + Query: fmt.Sprintf( + "Destination state %q doesn't support workspaces.\n"+ + "Do you want to copy only your current workspace?", + opts.DestinationType), + Description: fmt.Sprintf( + strings.TrimSpace(inputBackendMigrateMultiToSingle), + opts.SourceType, opts.DestinationType, currentWorkspace), + }) + if err != nil { + return fmt.Errorf( + "Error asking for state migration action: %w", err) + } + } + + if !migrate { + return fmt.Errorf("Migration aborted by user.") + } + + // Copy the default state + opts.sourceWorkspace = currentWorkspace + + // now switch back to the default env so we can acccess the new backend + m.SetWorkspace(backend.DefaultStateName) + + return m.backendMigrateState_s_s(opts) +} + +// Single state to single state, assumed default state name. +func (m *Meta) backendMigrateState_s_s(opts *backendMigrateOpts) error { + log.Printf("[INFO] backendMigrateState: single-to-single migrating %q workspace to %q workspace", opts.sourceWorkspace, opts.destinationWorkspace) + + sourceState, err := opts.Source.StateMgr(opts.sourceWorkspace) + if err != nil { + return fmt.Errorf(strings.TrimSpace( + errMigrateSingleLoadDefault), opts.SourceType, err) + } + if err := sourceState.RefreshState(); err != nil { + return fmt.Errorf(strings.TrimSpace( + errMigrateSingleLoadDefault), opts.SourceType, err) + } + + // Do not migrate workspaces without state. + if sourceState.State().Empty() { + log.Print("[TRACE] backendMigrateState: source workspace has empty state, so nothing to migrate") + return nil + } + + destinationState, err := opts.Destination.StateMgr(opts.destinationWorkspace) + if err == backend.ErrDefaultWorkspaceNotSupported { + // If the backend doesn't support using the default state, we ask the user + // for a new name and migrate the default state to the given named state. + destinationState, err = func() (statemgr.Full, error) { + log.Print("[TRACE] backendMigrateState: destination doesn't support a default workspace, so we must prompt for a new name") + name, err := m.promptNewWorkspaceName(opts.DestinationType) + if err != nil { + return nil, err + } + + // Update the name of the destination state. + opts.destinationWorkspace = name + + destinationState, err := opts.Destination.StateMgr(opts.destinationWorkspace) + if err != nil { + return nil, err + } + + // Ignore invalid workspace name as it is irrelevant in this context. + workspace, _ := m.Workspace() + + // If the currently selected workspace is the default workspace, then set + // the named workspace as the new selected workspace. + if workspace == backend.DefaultStateName { + if err := m.SetWorkspace(opts.destinationWorkspace); err != nil { + return nil, fmt.Errorf("Failed to set new workspace: %w", err) + } + } + + return destinationState, nil + }() + } + if err != nil { + return fmt.Errorf(strings.TrimSpace( + errMigrateSingleLoadDefault), opts.DestinationType, err) + } + if err := destinationState.RefreshState(); err != nil { + return fmt.Errorf(strings.TrimSpace( + errMigrateSingleLoadDefault), opts.DestinationType, err) + } + + // Check if we need migration at all. + // This is before taking a lock, because they may also correspond to the same lock. + source := sourceState.State() + destination := destinationState.State() + + // no reason to migrate if the state is already there + if source.Equal(destination) { + // Equal isn't identical; it doesn't check lineage. + sm1, _ := sourceState.(statemgr.PersistentMeta) + sm2, _ := destinationState.(statemgr.PersistentMeta) + if source != nil && destination != nil { + if sm1 == nil || sm2 == nil { + log.Print("[TRACE] backendMigrateState: both source and destination workspaces have no state, so no migration is needed") + return nil + } + if sm1.StateSnapshotMeta().Lineage == sm2.StateSnapshotMeta().Lineage { + log.Printf("[TRACE] backendMigrateState: both source and destination workspaces have equal state with lineage %q, so no migration is needed", sm1.StateSnapshotMeta().Lineage) + return nil + } + } + } + + if m.stateLock { + lockCtx := context.Background() + vt := arguments.ViewJSON + // Set default viewtype if none was set as the StateLocker needs to know exactly + // what viewType we want to have. + if opts == nil || opts.ViewType != vt { + vt = arguments.ViewHuman + } + view := views.NewStateLocker(vt, m.View) + locker := clistate.NewLocker(m.stateLockTimeout, view) + + lockerSource := locker.WithContext(lockCtx) + if diags := lockerSource.Lock(sourceState, "migration source state"); diags.HasErrors() { + return diags.Err() + } + defer lockerSource.Unlock() + + lockerDestination := locker.WithContext(lockCtx) + if diags := lockerDestination.Lock(destinationState, "migration destination state"); diags.HasErrors() { + return diags.Err() + } + defer lockerDestination.Unlock() + + // We now own a lock, so double check that we have the version + // corresponding to the lock. + log.Print("[TRACE] backendMigrateState: refreshing source workspace state") + if err := sourceState.RefreshState(); err != nil { + return fmt.Errorf(strings.TrimSpace( + errMigrateSingleLoadDefault), opts.SourceType, err) + } + log.Print("[TRACE] backendMigrateState: refreshing destination workspace state") + if err := destinationState.RefreshState(); err != nil { + return fmt.Errorf(strings.TrimSpace( + errMigrateSingleLoadDefault), opts.SourceType, err) + } + + source = sourceState.State() + destination = destinationState.State() + } + + var confirmFunc func(statemgr.Full, statemgr.Full, *backendMigrateOpts) (bool, error) + switch { + // No migration necessary + case source.Empty() && destination.Empty(): + log.Print("[TRACE] backendMigrateState: both source and destination workspaces have empty state, so no migration is required") + return nil + + // No migration necessary if we're inheriting state. + case source.Empty() && !destination.Empty(): + log.Print("[TRACE] backendMigrateState: source workspace has empty state, so no migration is required") + return nil + + // We have existing state moving into no state. Ask the user if + // they'd like to do this. + case !source.Empty() && destination.Empty(): + if opts.SourceType == "cloud" || opts.DestinationType == "cloud" { + // HACK: backendMigrateTFC has its own earlier prompt for + // whether to migrate state in the cloud case, so we'll skip + // this later prompt for Cloud, even though we do still need it + // for state backends. + confirmFunc = func(statemgr.Full, statemgr.Full, *backendMigrateOpts) (bool, error) { + return true, nil // the answer is implied to be "yes" if we reached this point + } + } else { + log.Print("[TRACE] backendMigrateState: destination workspace has empty state, so might copy source workspace state") + confirmFunc = m.backendMigrateEmptyConfirm + } + + // Both states are non-empty, meaning we need to determine which + // state should be used and update accordingly. + case !source.Empty() && !destination.Empty(): + log.Print("[TRACE] backendMigrateState: both source and destination workspaces have states, so might overwrite destination with source") + confirmFunc = m.backendMigrateNonEmptyConfirm + } + + if confirmFunc == nil { + panic("confirmFunc must not be nil") + } + + if !opts.force { + // Abort if we can't ask for input. + if !m.input { + log.Print("[TRACE] backendMigrateState: can't prompt for input, so aborting migration") + return errors.New(strings.TrimSpace(errInteractiveInputDisabled)) + } + + // Confirm with the user whether we want to copy state over + confirm, err := confirmFunc(sourceState, destinationState, opts) + if err != nil { + log.Print("[TRACE] backendMigrateState: error reading input, so aborting migration") + return err + } + if !confirm { + log.Print("[TRACE] backendMigrateState: user cancelled at confirmation prompt, so aborting migration") + return nil + } + } + + // Confirmed! We'll have the statemgr package handle the migration, which + // includes preserving any lineage/serial information where possible, if + // both managers support such metadata. + log.Print("[TRACE] backendMigrateState: migration confirmed, so migrating") + if err := statemgr.Migrate(destinationState, sourceState); err != nil { + return fmt.Errorf(strings.TrimSpace(errBackendStateCopy), + opts.SourceType, opts.DestinationType, err) + } + // The backend is currently handled before providers are installed during init, + // so requiring schemas here could lead to a catch-22 where it requires some manual + // intervention to proceed far enough for provider installation. To avoid this, + // when migrating to TFC backend, the initial JSON varient of state won't be generated and stored. + if err := destinationState.PersistState(nil); err != nil { + return fmt.Errorf(strings.TrimSpace(errBackendStateCopy), + opts.SourceType, opts.DestinationType, err) + } + + // And we're done. + return nil +} + +func (m *Meta) backendMigrateEmptyConfirm(source, destination statemgr.Full, opts *backendMigrateOpts) (bool, error) { + var inputOpts *tofu.InputOpts + if opts.DestinationType == "cloud" { + inputOpts = &tofu.InputOpts{ + Id: "backend-migrate-copy-to-empty-cloud", + Query: "Do you want to copy existing state to cloud backend?", + Description: fmt.Sprintf(strings.TrimSpace(inputBackendMigrateEmptyCloud), opts.SourceType), + } + } else { + inputOpts = &tofu.InputOpts{ + Id: "backend-migrate-copy-to-empty", + Query: "Do you want to copy existing state to the new backend?", + Description: fmt.Sprintf( + strings.TrimSpace(inputBackendMigrateEmpty), + opts.SourceType, opts.DestinationType), + } + } + + return m.confirm(inputOpts) +} + +func (m *Meta) backendMigrateNonEmptyConfirm( + sourceState, destinationState statemgr.Full, opts *backendMigrateOpts) (bool, error) { + // We need to grab both states so we can write them to a file + source := sourceState.State() + destination := destinationState.State() + + // Save both to a temporary + td, err := os.MkdirTemp("", "terraform") + if err != nil { + return false, fmt.Errorf("Error creating temporary directory: %w", err) + } + defer os.RemoveAll(td) + + // Helper to write the state + saveHelper := func(n, path string, s *states.State) error { + return statemgr.WriteAndPersist(statemgr.NewFilesystem(path, encryption.StateEncryptionDisabled()), s, nil) + } + + // Write the states + sourcePath := filepath.Join(td, fmt.Sprintf("1-%s.tfstate", opts.SourceType)) + destinationPath := filepath.Join(td, fmt.Sprintf("2-%s.tfstate", opts.DestinationType)) + if err := saveHelper(opts.SourceType, sourcePath, source); err != nil { + return false, fmt.Errorf("Error saving temporary state: %w", err) + } + if err := saveHelper(opts.DestinationType, destinationPath, destination); err != nil { + return false, fmt.Errorf("Error saving temporary state: %w", err) + } + + // Ask for confirmation + var inputOpts *tofu.InputOpts + if opts.DestinationType == "cloud" { + inputOpts = &tofu.InputOpts{ + Id: "backend-migrate-to-tfc", + Query: "Do you want to copy existing state to cloud backend?", + Description: fmt.Sprintf( + strings.TrimSpace(inputBackendMigrateNonEmptyCloud), + opts.SourceType, sourcePath, destinationPath), + } + } else { + inputOpts = &tofu.InputOpts{ + Id: "backend-migrate-to-backend", + Query: "Do you want to copy existing state to the new backend?", + Description: fmt.Sprintf( + strings.TrimSpace(inputBackendMigrateNonEmpty), + opts.SourceType, opts.DestinationType, sourcePath, destinationPath), + } + } + + // Confirm with the user that the copy should occur + return m.confirm(inputOpts) +} + +func retrieveWorkspaces(back backend.Backend, sourceType string) ([]string, bool, error) { + var singleState bool + var err error + workspaces, err := back.Workspaces() + if err == backend.ErrWorkspacesNotSupported { + singleState = true + err = nil + } + if err != nil { + return nil, singleState, fmt.Errorf(strings.TrimSpace( + errMigrateLoadStates), sourceType, err) + } + + return workspaces, singleState, err +} + +func (m *Meta) backendMigrateTFC(opts *backendMigrateOpts) error { + _, sourceTFC := opts.Source.(*cloud.Cloud) + cloudBackendDestination, destinationTFC := opts.Destination.(*cloud.Cloud) + + sourceWorkspaces, sourceSingleState, err := retrieveWorkspaces(opts.Source, opts.SourceType) + if err != nil { + return err + } + //to be used below, not yet implamented + // destinationWorkspaces, destinationSingleState + _, _, err = retrieveWorkspaces(opts.Destination, opts.SourceType) + if err != nil { + return err + } + + // from TFC to non-TFC backend + if sourceTFC && !destinationTFC { + // From Terraform Cloud to another backend. This is not yet implemented, and + // we recommend people to use the TFC API. + return fmt.Errorf(strings.TrimSpace(errTFCMigrateNotYetImplemented)) + } + + // Everything below, by the above two conditionals, now assumes that the + // destination is always Terraform Cloud (TFC). + + sourceSingle := sourceSingleState || (len(sourceWorkspaces) == 1) + if sourceSingle { + if cloudBackendDestination.WorkspaceMapping.Strategy() == cloud.WorkspaceNameStrategy { + // If we know the name via WorkspaceNameStrategy, then set the + // destinationWorkspace to the new Name and skip the user prompt. Here the + // destinationWorkspace is not set to `default` thereby we will create it + // in TFC if it does not exist. + opts.destinationWorkspace = cloudBackendDestination.WorkspaceMapping.Name + } + + currentWorkspace, err := m.Workspace() + if err != nil { + return err + } + opts.sourceWorkspace = currentWorkspace + + log.Printf("[INFO] backendMigrateTFC: single-to-single migration from source %s to destination %q", opts.sourceWorkspace, opts.destinationWorkspace) + + // If the current workspace is has no state we do not need to ask + // if they want to migrate the state. + sourceState, err := opts.Source.StateMgr(currentWorkspace) + if err != nil { + return err + } + if err := sourceState.RefreshState(); err != nil { + return err + } + if sourceState.State().Empty() { + log.Printf("[INFO] backendMigrateTFC: skipping migration because source %s is empty", opts.sourceWorkspace) + return nil + } + + // Run normal single-to-single state migration. + // This will handle both situations where the new cloud backend + // configuration is using a workspace.name strategy or workspace.tags + // strategy. + // + // We do prompt first though, because state migration is mandatory + // for moving to Cloud and the user should get an opportunity to + // confirm that first. + if migrate, err := m.promptSingleToCloudSingleStateMigration(opts); err != nil { + return err + } else if !migrate { + return nil //skip migrating but return successfully + } + + return m.backendMigrateState_s_s(opts) + } + + destinationTagsStrategy := cloudBackendDestination.WorkspaceMapping.Strategy() == cloud.WorkspaceTagsStrategy + destinationNameStrategy := cloudBackendDestination.WorkspaceMapping.Strategy() == cloud.WorkspaceNameStrategy + + multiSource := !sourceSingleState && len(sourceWorkspaces) > 1 + if multiSource && destinationNameStrategy { + currentWorkspace, err := m.Workspace() + if err != nil { + return err + } + + opts.sourceWorkspace = currentWorkspace + opts.destinationWorkspace = cloudBackendDestination.WorkspaceMapping.Name + if err := m.promptMultiToSingleCloudMigration(opts); err != nil { + return err + } + + log.Printf("[INFO] backendMigrateTFC: multi-to-single migration from source %s to destination %q", opts.sourceWorkspace, opts.destinationWorkspace) + + return m.backendMigrateState_s_s(opts) + } + + // Multiple sources, and using tags strategy. So migrate every source + // workspace over to new one, prompt for workspace name pattern (*), + // and start migrating, and create tags for each workspace. + if multiSource && destinationTagsStrategy { + log.Printf("[INFO] backendMigrateTFC: multi-to-multi migration from source workspaces %q", sourceWorkspaces) + return m.backendMigrateState_S_TFC(opts, sourceWorkspaces) + } + + // TODO(omar): after the check for sourceSingle is done, everything following + // it has to be multi. So rework the code to not need to check for multi, adn + // return m.backendMigrateState_S_TFC here. + return nil +} + +// migrates a multi-state backend to Terraform Cloud +func (m *Meta) backendMigrateState_S_TFC(opts *backendMigrateOpts, sourceWorkspaces []string) error { + log.Print("[TRACE] backendMigrateState: migrating all named workspaces") + + currentWorkspace, err := m.Workspace() + if err != nil { + return err + } + newCurrentWorkspace := "" + + // This map is used later when doing the migration per source/destination. + // If a source has 'default' and has state, then we ask what the new name should be. + // And further down when we actually run state migration for each + // source/destination workspace, we use this new name (where source is 'default') + // and set as destinationWorkspace. If the default workspace does not have + // state we will not prompt the user for a new name because empty workspaces + // do not get migrated. + defaultNewName := map[string]string{} + for i := 0; i < len(sourceWorkspaces); i++ { + if sourceWorkspaces[i] == backend.DefaultStateName { + // For the default workspace we want to look to see if there is any state + // before we ask for a workspace name to migrate the default workspace into. + sourceState, err := opts.Source.StateMgr(backend.DefaultStateName) + if err != nil { + return fmt.Errorf(strings.TrimSpace( + errMigrateSingleLoadDefault), opts.SourceType, err) + } + // RefreshState is what actually pulls the state to be evaluated. + if err := sourceState.RefreshState(); err != nil { + return fmt.Errorf(strings.TrimSpace( + errMigrateSingleLoadDefault), opts.SourceType, err) + } + if !sourceState.State().Empty() { + newName, err := m.promptNewWorkspaceName(opts.DestinationType) + if err != nil { + return err + } + defaultNewName[sourceWorkspaces[i]] = newName + } + } + } + + // Fetch the pattern that will be used to rename the workspaces for Terraform Cloud. + // + // * For the general case, this will be a pattern provided by the user. + // + // * Specifically for a migration from the "remote" backend using 'prefix', we will + // instead 'migrate' the workspaces using a pattern based on the old prefix+name, + // not allowing a user to accidentally input the wrong pattern to line up with + // what the remote backend was already using before (which presumably already + // meets the naming considerations for Terraform Cloud). + // In other words, this is a fast-track migration path from the remote backend, retaining + // how things already are in Terraform Cloud with no user intervention needed. + pattern := "" + if remoteBackend, ok := opts.Source.(*remote.Remote); ok { + if err := m.promptRemotePrefixToCloudTagsMigration(opts); err != nil { + return err + } + pattern = remoteBackend.WorkspaceNamePattern() + log.Printf("[TRACE] backendMigrateTFC: Remote backend reports workspace name pattern as: %q", pattern) + } + + if pattern == "" { + pattern, err = m.promptMultiStateMigrationPattern(opts.SourceType) + if err != nil { + return err + } + } + + // Go through each and migrate + for _, name := range sourceWorkspaces { + + // Copy the same names + opts.sourceWorkspace = name + if newName, ok := defaultNewName[name]; ok { + // this has to be done before setting destinationWorkspace + name = newName + } + opts.destinationWorkspace = strings.Replace(pattern, "*", name, -1) + + // Force it, we confirmed above + opts.force = true + + // Perform the migration + log.Printf("[INFO] backendMigrateTFC: multi-to-multi migration, source workspace %q to destination workspace %q", opts.sourceWorkspace, opts.destinationWorkspace) + if err := m.backendMigrateState_s_s(opts); err != nil { + return fmt.Errorf(strings.TrimSpace( + errMigrateMulti), name, opts.SourceType, opts.DestinationType, err) + } + + if currentWorkspace == opts.sourceWorkspace { + newCurrentWorkspace = opts.destinationWorkspace + } + } + + // After migrating multiple workspaces, we need to reselect the current workspace as it may + // have been renamed. Query the backend first to be sure it now exists. + workspaces, err := opts.Destination.Workspaces() + if err != nil { + return err + } + + var workspacePresent bool + for _, name := range workspaces { + if name == newCurrentWorkspace { + workspacePresent = true + } + } + + // If we couldn't select the workspace automatically from the backend (maybe it was empty + // and wasn't migrated, for instance), ask the user to select one instead and be done. + if !workspacePresent { + if err = m.selectWorkspace(opts.Destination); err != nil { + return err + } + return nil + } + + // The newly renamed current workspace is present, so we'll automatically select it for the + // user, as well as display the equivalent of 'workspace list' to show how the workspaces + // were changed (as well as the newly selected current workspace). + if err = m.SetWorkspace(newCurrentWorkspace); err != nil { + return err + } + + m.Ui.Output(m.Colorize().Color("[reset][bold]Migration complete! Your workspaces are as follows:[reset]")) + var out bytes.Buffer + for _, name := range workspaces { + if name == newCurrentWorkspace { + out.WriteString("* ") + } else { + out.WriteString(" ") + } + out.WriteString(name + "\n") + } + + m.Ui.Output(out.String()) + + return nil +} + +func (m *Meta) promptSingleToCloudSingleStateMigration(opts *backendMigrateOpts) (bool, error) { + if !m.input { + log.Print("[TRACE] backendMigrateState: can't prompt for input, so aborting migration") + return false, errors.New(strings.TrimSpace(errInteractiveInputDisabled)) + } + migrate := opts.force + if !migrate { + var err error + migrate, err = m.confirm(&tofu.InputOpts{ + Id: "backend-migrate-state-single-to-cloud-single", + Query: "Do you wish to proceed?", + Description: strings.TrimSpace(tfcInputBackendMigrateStateSingleToCloudSingle), + }) + if err != nil { + return false, fmt.Errorf("Error asking for state migration action: %w", err) + } + } + + return migrate, nil +} + +func (m *Meta) promptRemotePrefixToCloudTagsMigration(opts *backendMigrateOpts) error { + if !m.input { + log.Print("[TRACE] backendMigrateState: can't prompt for input, so aborting migration") + return errors.New(strings.TrimSpace(errInteractiveInputDisabled)) + } + migrate := opts.force + if !migrate { + var err error + migrate, err = m.confirm(&tofu.InputOpts{ + Id: "backend-migrate-remote-multistate-to-cloud", + Query: "Do you wish to proceed?", + Description: strings.TrimSpace(tfcInputBackendMigrateRemoteMultiToCloud), + }) + if err != nil { + return fmt.Errorf("Error asking for state migration action: %w", err) + } + } + + if !migrate { + return fmt.Errorf("Migration aborted by user.") + } + + return nil +} + +// Multi-state to single state. +func (m *Meta) promptMultiToSingleCloudMigration(opts *backendMigrateOpts) error { + if !m.input { + log.Print("[TRACE] backendMigrateState: can't prompt for input, so aborting migration") + return errors.New(strings.TrimSpace(errInteractiveInputDisabled)) + } + migrate := opts.force + if !migrate { + var err error + // Ask the user if they want to migrate their existing remote state + migrate, err = m.confirm(&tofu.InputOpts{ + Id: "backend-migrate-multistate-to-single", + Query: "Do you want to copy only your current workspace?", + Description: fmt.Sprintf( + strings.TrimSpace(tfcInputBackendMigrateMultiToSingle), + opts.SourceType, opts.destinationWorkspace), + }) + if err != nil { + return fmt.Errorf("Error asking for state migration action: %w", err) + } + } + + if !migrate { + return fmt.Errorf("Migration aborted by user.") + } + + return nil +} + +func (m *Meta) promptNewWorkspaceName(destinationType string) (string, error) { + message := fmt.Sprintf("[reset][bold][yellow]The %q backend configuration only allows "+ + "named workspaces![reset]", destinationType) + if destinationType == "cloud" { + if !m.input { + log.Print("[TRACE] backendMigrateState: can't prompt for input, so aborting migration") + return "", errors.New(strings.TrimSpace(errInteractiveInputDisabled)) + } + message = `[reset][bold][yellow]Cloud backend requires all workspaces to be given an explicit name.[reset]` + } + name, err := m.UIInput().Input(context.Background(), &tofu.InputOpts{ + Id: "new-state-name", + Query: message, + Description: strings.TrimSpace(inputBackendNewWorkspaceName), + }) + if err != nil { + return "", fmt.Errorf("Error asking for new state name: %w", err) + } + + return name, nil +} + +func (m *Meta) promptMultiStateMigrationPattern(sourceType string) (string, error) { + // This is not the first prompt a user would be presented with in the migration to TFC, so no + // guard on m.input is needed here. + renameWorkspaces, err := m.UIInput().Input(context.Background(), &tofu.InputOpts{ + Id: "backend-migrate-multistate-to-tfc", + Query: fmt.Sprintf("[reset][bold][yellow]%s[reset]", "Would you like to rename your workspaces?"), + Description: fmt.Sprintf(strings.TrimSpace(tfcInputBackendMigrateMultiToMulti), sourceType), + }) + if err != nil { + return "", fmt.Errorf("Error asking for state migration action: %w", err) + } + if renameWorkspaces != "2" && renameWorkspaces != "1" { + return "", fmt.Errorf("Please select 1 or 2 as part of this option.") + } + if renameWorkspaces == "2" { + // this means they did not want to rename their workspaces, and we are + // returning a generic '*' that means use the same workspace name during + // migration. + return "*", nil + } + + pattern, err := m.UIInput().Input(context.Background(), &tofu.InputOpts{ + Id: "backend-migrate-multistate-to-tfc-pattern", + Query: fmt.Sprintf("[reset][bold][yellow]%s[reset]", "How would you like to rename your workspaces?"), + Description: strings.TrimSpace(tfcInputBackendMigrateMultiToMultiPattern), + }) + if err != nil { + return "", fmt.Errorf("Error asking for state migration action: %w", err) + } + if !strings.Contains(pattern, "*") { + return "", fmt.Errorf("The pattern must have an '*'") + } + + if count := strings.Count(pattern, "*"); count > 1 { + return "", fmt.Errorf("The pattern '*' cannot be used more than once.") + } + + return pattern, nil +} + +const errMigrateLoadStates = ` +Error inspecting states in the %q backend: + %w + +Prior to changing backends, OpenTofu inspects the source and destination +states to determine what kind of migration steps need to be taken, if any. +OpenTofu failed to load the states. The data in both the source and the +destination remain unmodified. Please resolve the above error and try again. +` + +const errMigrateSingleLoadDefault = ` +Error loading default state from the %q backend: + %w + +State migration cannot occur unless the state can be loaded. Backend +modification and state migration has been aborted. The state in both the +source and the destination remain unmodified. Please resolve the +above error and try again. +` + +const errMigrateMulti = ` +Error migrating the workspace %q from the previous %q backend +to the newly configured %q backend: + %w + +OpenTofu copies workspaces in alphabetical order. Any workspaces +alphabetically earlier than this one have been copied. Any workspaces +later than this haven't been modified in the destination. No workspaces +in the source state have been modified. + +Please resolve the error above and run the initialization command again. +This will attempt to copy (with permission) all workspaces again. +` + +const errBackendStateCopy = ` +Error copying state from the previous %q backend to the newly configured +%q backend: + %w + +The state in the previous backend remains intact and unmodified. Please resolve +the error above and try again. +` + +const errTFCMigrateNotYetImplemented = ` +Migrating state from cloud backend to another backend is not yet implemented. +` + +const errInteractiveInputDisabled = ` +Can't ask approval for state migration when interactive input is disabled. + +Please remove the "-input=false" option and try again. +` + +const tfcInputBackendMigrateMultiToMultiPattern = ` +Enter a pattern with an asterisk (*) to rename all workspaces based on their +previous names. The asterisk represents the current workspace name. + +For example, if a workspace is currently named 'prod', the pattern 'app-*' would yield +'app-prod' for a new workspace name; 'app-*-region1' would yield 'app-prod-region1'. +` + +const tfcInputBackendMigrateMultiToMulti = ` +Unlike typical OpenTofu workspaces representing an environment associated with a particular +configuration (e.g. production, staging, development), cloud backend workspaces are named uniquely +across all configurations used within an organization. A typical strategy to start with is +-- (e.g. networking-prod-us-east, networking-staging-us-east). + +When migrating existing workspaces from the backend %[1]q to cloud backend, would you like to +rename your workspaces? Enter 1 or 2. + +1. Yes, I'd like to rename all workspaces according to a pattern I will provide. +2. No, I would not like to rename my workspaces. Migrate them as currently named. +` + +const tfcInputBackendMigrateMultiToSingle = ` +The previous backend %[1]q has multiple workspaces, but cloud backend has +been configured to use a single workspace (%[2]q). By continuing, you will +only migrate your current workspace. If you wish to migrate all workspaces +from the previous backend, you may cancel this operation and use the 'tags' +strategy in your workspace configuration block instead. + +Enter "yes" to proceed or "no" to cancel. +` + +const tfcInputBackendMigrateStateSingleToCloudSingle = ` +As part of migrating to cloud backend, OpenTofu can optionally copy your +current workspace state to the configured cloud backend workspace. + +Answer "yes" to copy the latest state snapshot to the configured +cloud backend workspace. + +Answer "no" to ignore the existing state and just activate the configured +cloud backend workspace with its existing state, if any. + +Should OpenTofu migrate your existing state? +` + +const tfcInputBackendMigrateRemoteMultiToCloud = ` +When migrating from the 'remote' backend to OpenTofu's native integration +with cloud backend, OpenTofu will automatically create or use existing +workspaces based on the previous backend configuration's 'prefix' value. + +When the migration is complete, workspace names in OpenTofu will match the +fully qualified cloud backend workspace name. If necessary, the workspace +tags configured in the 'cloud' option block will be added to the associated +cloud backend workspaces. + +Enter "yes" to proceed or "no" to cancel. +` + +const inputBackendMigrateEmpty = ` +Pre-existing state was found while migrating the previous %q backend to the +newly configured %q backend. No existing state was found in the newly +configured %[2]q backend. Do you want to copy this state to the new %[2]q +backend? Enter "yes" to copy and "no" to start with an empty state. +` + +const inputBackendMigrateEmptyCloud = ` +Pre-existing state was found while migrating the previous %q backend to cloud backend. +No existing state was found in cloud backend. Do you want to copy this state to cloud backend? +Enter "yes" to copy and "no" to start with an empty state. +` + +const inputBackendMigrateNonEmpty = ` +Pre-existing state was found while migrating the previous %q backend to the +newly configured %q backend. An existing non-empty state already exists in +the new backend. The two states have been saved to temporary files that will be +removed after responding to this query. + +Previous (type %[1]q): %[3]s +New (type %[2]q): %[4]s + +Do you want to overwrite the state in the new backend with the previous state? +Enter "yes" to copy and "no" to start with the existing state in the newly +configured %[2]q backend. +` + +const inputBackendMigrateNonEmptyCloud = ` +Pre-existing state was found while migrating the previous %q backend to +cloud backend. An existing non-empty state already exists in cloud backend. +The two states have been saved to temporary files that will be removed after +responding to this query. + +Previous (type %[1]q): %[2]s +New (cloud backend): %[3]s + +Do you want to overwrite the state in cloud backend with the previous state? +Enter "yes" to copy and "no" to start with the existing state in cloud backend. +` + +const inputBackendMigrateMultiToSingle = ` +The existing %[1]q backend supports workspaces and you currently are +using more than one. The newly configured %[2]q backend doesn't support +workspaces. If you continue, OpenTofu will copy your current workspace %[3]q +to the default workspace in the new backend. Your existing workspaces in the +source backend won't be modified. If you want to switch workspaces, back them +up, or cancel altogether, answer "no" and OpenTofu will abort. +` + +const inputBackendMigrateMultiToMulti = ` +Both the existing %[1]q backend and the newly configured %[2]q backend +support workspaces. When migrating between backends, OpenTofu will copy +all workspaces (with the same names). THIS WILL OVERWRITE any conflicting +states in the destination. + +OpenTofu initialization doesn't currently migrate only select workspaces. +If you want to migrate a select number of workspaces, you must manually +pull and push those states. + +If you answer "yes", OpenTofu will migrate all states. If you answer +"no", OpenTofu will abort. +` + +const inputBackendNewWorkspaceName = ` +Please provide a new workspace name (e.g. dev, test) that will be used +to migrate the existing default workspace. +` + +const inputBackendSelectWorkspace = ` +This is expected behavior when the selected workspace did not have an +existing non-empty state. Please enter a number to select a workspace: + +%s +` diff --git a/pkg/command/meta_backend_migrate_test.go b/pkg/command/meta_backend_migrate_test.go new file mode 100644 index 00000000000..99b5ccbd86c --- /dev/null +++ b/pkg/command/meta_backend_migrate_test.go @@ -0,0 +1,66 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "testing" +) + +func TestBackendMigrate_promptMultiStatePattern(t *testing.T) { + // Setup the meta + + cases := map[string]struct { + renamePrompt string + patternPrompt string + expectedErr string + }{ + "valid pattern": { + renamePrompt: "1", + patternPrompt: "hello-*", + expectedErr: "", + }, + "invalid pattern, only one asterisk allowed": { + renamePrompt: "1", + patternPrompt: "hello-*-world-*", + expectedErr: "The pattern '*' cannot be used more than once.", + }, + "invalid pattern, missing asterisk": { + renamePrompt: "1", + patternPrompt: "hello-world", + expectedErr: "The pattern must have an '*'", + }, + "invalid rename": { + renamePrompt: "3", + expectedErr: "Please select 1 or 2 as part of this option.", + }, + "no rename": { + renamePrompt: "2", + }, + } + for name, tc := range cases { + t.Log("Test: ", name) + m := testMetaBackend(t, nil) + input := map[string]string{} + cleanup := testInputMap(t, input) + if tc.renamePrompt != "" { + input["backend-migrate-multistate-to-tfc"] = tc.renamePrompt + } + if tc.patternPrompt != "" { + input["backend-migrate-multistate-to-tfc-pattern"] = tc.patternPrompt + } + + sourceType := "cloud" + _, err := m.promptMultiStateMigrationPattern(sourceType) + if tc.expectedErr == "" && err != nil { + t.Fatalf("expected error to be nil, but was %s", err.Error()) + } + if tc.expectedErr != "" && tc.expectedErr != err.Error() { + t.Fatalf("expected error to eq %s but got %s", tc.expectedErr, err.Error()) + } + + cleanup() + } +} diff --git a/pkg/command/meta_backend_test.go b/pkg/command/meta_backend_test.go new file mode 100644 index 00000000000..a753d3aca6a --- /dev/null +++ b/pkg/command/meta_backend_test.go @@ -0,0 +1,1971 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "context" + "os" + "path/filepath" + "reflect" + "sort" + "strings" + "testing" + + "github.com/mitchellh/cli" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/copy" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/statefile" + "github.com/kubegems/opentofu/pkg/states/statemgr" + + backendInit "github.com/kubegems/opentofu/pkg/backend/init" + backendLocal "github.com/kubegems/opentofu/pkg/backend/local" + backendInmem "github.com/kubegems/opentofu/pkg/backend/remote-state/inmem" +) + +// Test empty directory with no config/state creates a local state. +func TestMetaBackend_emptyDir(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + os.MkdirAll(td, 0755) + defer testChdir(t, td)() + + // Get the backend + m := testMetaBackend(t, nil) + b, diags := m.Backend(&BackendOpts{Init: true}, encryption.StateEncryptionDisabled()) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + // Write some state + s, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + s.WriteState(testState()) + if err := s.PersistState(nil); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + // Verify it exists where we expect it to + if isEmptyState(DefaultStateFilename) { + t.Fatalf("no state was written") + } + + // Verify no backup since it was empty to start + if !isEmptyState(DefaultStateFilename + DefaultBackupExtension) { + t.Fatal("backup state should be empty") + } + + // Verify no backend state was made + if !isEmptyState(filepath.Join(m.DataDir(), DefaultStateFilename)) { + t.Fatal("backend state should be empty") + } +} + +// check for no state. Either the file doesn't exist, or is empty +func isEmptyState(path string) bool { + fi, err := os.Stat(path) + if os.IsNotExist(err) { + return true + } + + if fi.Size() == 0 { + return true + } + + return false +} + +// Test a directory with a legacy state and no config continues to +// use the legacy state. +func TestMetaBackend_emptyWithDefaultState(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + os.MkdirAll(td, 0755) + defer testChdir(t, td)() + + // Write the legacy state + statePath := DefaultStateFilename + { + f, err := os.Create(statePath) + if err != nil { + t.Fatalf("err: %s", err) + } + err = writeStateForTesting(testState(), f) + f.Close() + if err != nil { + t.Fatalf("err: %s", err) + } + } + + // Get the backend + m := testMetaBackend(t, nil) + b, diags := m.Backend(&BackendOpts{Init: true}, encryption.StateEncryptionDisabled()) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + // Check the state + s, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if err := s.RefreshState(); err != nil { + t.Fatalf("err: %s", err) + } + if actual := s.State().String(); actual != testState().String() { + t.Fatalf("bad: %s", actual) + } + + // Verify it exists where we expect it to + if _, err := os.Stat(DefaultStateFilename); err != nil { + t.Fatalf("err: %s", err) + } + + stateName := filepath.Join(m.DataDir(), DefaultStateFilename) + if !isEmptyState(stateName) { + t.Fatal("expected no state at", stateName) + } + + // Write some state + next := testState() + next.RootModule().SetOutputValue("foo", cty.StringVal("bar"), false) + s.WriteState(next) + if err := s.PersistState(nil); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + // Verify a backup was made since we're modifying a pre-existing state + if isEmptyState(DefaultStateFilename + DefaultBackupExtension) { + t.Fatal("backup state should not be empty") + } +} + +// Test an empty directory with an explicit state path (outside the dir) +func TestMetaBackend_emptyWithExplicitState(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + os.MkdirAll(td, 0755) + defer testChdir(t, td)() + + // Create another directory to store our state + stateDir := t.TempDir() + os.MkdirAll(stateDir, 0755) + + // Write the legacy state + statePath := filepath.Join(stateDir, "foo") + { + f, err := os.Create(statePath) + if err != nil { + t.Fatalf("err: %s", err) + } + err = writeStateForTesting(testState(), f) + f.Close() + if err != nil { + t.Fatalf("err: %s", err) + } + } + + // Setup the meta + m := testMetaBackend(t, nil) + m.statePath = statePath + + // Get the backend + b, diags := m.Backend(&BackendOpts{Init: true}, encryption.StateEncryptionDisabled()) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + // Check the state + s, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if err := s.RefreshState(); err != nil { + t.Fatalf("err: %s", err) + } + if actual := s.State().String(); actual != testState().String() { + t.Fatalf("bad: %s", actual) + } + + // Verify neither defaults exist + if _, err := os.Stat(DefaultStateFilename); err == nil { + t.Fatal("file should not exist") + } + + stateName := filepath.Join(m.DataDir(), DefaultStateFilename) + if !isEmptyState(stateName) { + t.Fatal("expected no state at", stateName) + } + + // Write some state + next := testState() + markStateForMatching(next, "bar") // just any change so it shows as different than before + s.WriteState(next) + if err := s.PersistState(nil); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + // Verify a backup was made since we're modifying a pre-existing state + if isEmptyState(statePath + DefaultBackupExtension) { + t.Fatal("backup state should not be empty") + } +} + +// Verify that interpolations are allowed +func TestMetaBackend_configureInterpolation(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("backend-new-interp"), td) + defer testChdir(t, td)() + + // Setup the meta + m := testMetaBackend(t, nil) + + // Get the backend + _, err := m.Backend(&BackendOpts{Init: true}, encryption.StateEncryptionDisabled()) + if err != nil { + t.Fatal("should not error") + } +} + +// Newly configured backend +func TestMetaBackend_configureNew(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("backend-new"), td) + defer testChdir(t, td)() + + // Setup the meta + m := testMetaBackend(t, nil) + + // Get the backend + b, diags := m.Backend(&BackendOpts{Init: true}, encryption.StateEncryptionDisabled()) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + // Check the state + s, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if err := s.RefreshState(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + state := s.State() + if state != nil { + t.Fatal("state should be nil") + } + + // Write some state + state = states.NewState() + mark := markStateForMatching(state, "changing") + + s.WriteState(state) + if err := s.PersistState(nil); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + // Verify the state is where we expect + { + f, err := os.Open("local-state.tfstate") + if err != nil { + t.Fatalf("err: %s", err) + } + actual, err := statefile.Read(f, encryption.StateEncryptionDisabled()) + f.Close() + if err != nil { + t.Fatalf("err: %s", err) + } + + assertStateHasMarker(t, actual.State, mark) + } + + // Verify the default paths don't exist + if _, err := os.Stat(DefaultStateFilename); err == nil { + t.Fatal("file should not exist") + } + + // Verify a backup doesn't exist + if _, err := os.Stat(DefaultStateFilename + DefaultBackupExtension); err == nil { + t.Fatal("file should not exist") + } +} + +// Newly configured backend with prior local state and no remote state +func TestMetaBackend_configureNewWithState(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("backend-new-migrate"), td) + defer testChdir(t, td)() + + // Ask input + defer testInteractiveInput(t, []string{"yes"})() + + // Setup the meta + m := testMetaBackend(t, nil) + + // This combination should not require the extra -migrate-state flag, since + // there is no existing backend config + m.migrateState = false + + // Get the backend + b, diags := m.Backend(&BackendOpts{Init: true}, encryption.StateEncryptionDisabled()) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + // Check the state + s, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + state, err := statemgr.RefreshAndRead(s) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if state == nil { + t.Fatal("state is nil") + } + + if got, want := testStateMgrCurrentLineage(s), "backend-new-migrate"; got != want { + t.Fatalf("lineage changed during migration\nnow: %s\nwas: %s", got, want) + } + + // Write some state + state = states.NewState() + mark := markStateForMatching(state, "changing") + + if err := statemgr.WriteAndPersist(s, state, nil); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + // Verify the state is where we expect + { + f, err := os.Open("local-state.tfstate") + if err != nil { + t.Fatalf("err: %s", err) + } + actual, err := statefile.Read(f, encryption.StateEncryptionDisabled()) + f.Close() + if err != nil { + t.Fatalf("err: %s", err) + } + + assertStateHasMarker(t, actual.State, mark) + } + + // Verify the default paths don't exist + if !isEmptyState(DefaultStateFilename) { + data, _ := os.ReadFile(DefaultStateFilename) + + t.Fatal("state should not exist, but contains:\n", string(data)) + } + + // Verify a backup does exist + if isEmptyState(DefaultStateFilename + DefaultBackupExtension) { + t.Fatal("backup state is empty or missing") + } +} + +// Newly configured backend with matching local and remote state doesn't prompt +// for copy. +func TestMetaBackend_configureNewWithoutCopy(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("backend-new-migrate"), td) + defer testChdir(t, td)() + + if err := copy.CopyFile(DefaultStateFilename, "local-state.tfstate"); err != nil { + t.Fatal(err) + } + + // Setup the meta + m := testMetaBackend(t, nil) + m.input = false + + // init the backend + _, diags := m.Backend(&BackendOpts{Init: true}, encryption.StateEncryptionDisabled()) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + // Verify the state is where we expect + f, err := os.Open("local-state.tfstate") + if err != nil { + t.Fatalf("err: %s", err) + } + actual, err := statefile.Read(f, encryption.StateEncryptionDisabled()) + f.Close() + if err != nil { + t.Fatalf("err: %s", err) + } + + if actual.Lineage != "backend-new-migrate" { + t.Fatalf("incorrect state lineage: %q", actual.Lineage) + } + + // Verify the default paths don't exist + if !isEmptyState(DefaultStateFilename) { + data, _ := os.ReadFile(DefaultStateFilename) + + t.Fatal("state should not exist, but contains:\n", string(data)) + } + + // Verify a backup does exist + if isEmptyState(DefaultStateFilename + DefaultBackupExtension) { + t.Fatal("backup state is empty or missing") + } +} + +// Newly configured backend with prior local state and no remote state, +// but opting to not migrate. +func TestMetaBackend_configureNewWithStateNoMigrate(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("backend-new-migrate"), td) + defer testChdir(t, td)() + + // Ask input + defer testInteractiveInput(t, []string{"no"})() + + // Setup the meta + m := testMetaBackend(t, nil) + + // Get the backend + b, diags := m.Backend(&BackendOpts{Init: true}, encryption.StateEncryptionDisabled()) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + // Check the state + s, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if err := s.RefreshState(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + if state := s.State(); state != nil { + t.Fatal("state is not nil") + } + + // Verify the default paths don't exist + if !isEmptyState(DefaultStateFilename) { + data, _ := os.ReadFile(DefaultStateFilename) + + t.Fatal("state should not exist, but contains:\n", string(data)) + } + + // Verify a backup does exist + if isEmptyState(DefaultStateFilename + DefaultBackupExtension) { + t.Fatal("backup state is empty or missing") + } +} + +// Newly configured backend with prior local state and remote state +func TestMetaBackend_configureNewWithStateExisting(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("backend-new-migrate-existing"), td) + defer testChdir(t, td)() + + // Setup the meta + m := testMetaBackend(t, nil) + // suppress input + m.forceInitCopy = true + + // Get the backend + b, diags := m.Backend(&BackendOpts{Init: true}, encryption.StateEncryptionDisabled()) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + // Check the state + s, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if err := s.RefreshState(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + state := s.State() + if state == nil { + t.Fatal("state is nil") + } + if got, want := testStateMgrCurrentLineage(s), "local"; got != want { + t.Fatalf("wrong lineage %q; want %q", got, want) + } + + // Write some state + state = states.NewState() + mark := markStateForMatching(state, "changing") + + s.WriteState(state) + if err := s.PersistState(nil); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + // Verify the state is where we expect + { + f, err := os.Open("local-state.tfstate") + if err != nil { + t.Fatalf("err: %s", err) + } + actual, err := statefile.Read(f, encryption.StateEncryptionDisabled()) + f.Close() + if err != nil { + t.Fatalf("err: %s", err) + } + + assertStateHasMarker(t, actual.State, mark) + } + + // Verify the default paths don't exist + if !isEmptyState(DefaultStateFilename) { + data, _ := os.ReadFile(DefaultStateFilename) + + t.Fatal("state should not exist, but contains:\n", string(data)) + } + + // Verify a backup does exist + if isEmptyState(DefaultStateFilename + DefaultBackupExtension) { + t.Fatal("backup state is empty or missing") + } +} + +// Newly configured backend with prior local state and remote state +func TestMetaBackend_configureNewWithStateExistingNoMigrate(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("backend-new-migrate-existing"), td) + defer testChdir(t, td)() + + // Ask input + defer testInteractiveInput(t, []string{"no"})() + + // Setup the meta + m := testMetaBackend(t, nil) + + // Get the backend + b, diags := m.Backend(&BackendOpts{Init: true}, encryption.StateEncryptionDisabled()) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + // Check the state + s, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if err := s.RefreshState(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + state := s.State() + if state == nil { + t.Fatal("state is nil") + } + if testStateMgrCurrentLineage(s) != "remote" { + t.Fatalf("bad: %#v", state) + } + + // Write some state + state = states.NewState() + mark := markStateForMatching(state, "changing") + s.WriteState(state) + if err := s.PersistState(nil); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + // Verify the state is where we expect + { + f, err := os.Open("local-state.tfstate") + if err != nil { + t.Fatalf("err: %s", err) + } + actual, err := statefile.Read(f, encryption.StateEncryptionDisabled()) + f.Close() + if err != nil { + t.Fatalf("err: %s", err) + } + + assertStateHasMarker(t, actual.State, mark) + } + + // Verify the default paths don't exist + if !isEmptyState(DefaultStateFilename) { + data, _ := os.ReadFile(DefaultStateFilename) + + t.Fatal("state should not exist, but contains:\n", string(data)) + } + + // Verify a backup does exist + if isEmptyState(DefaultStateFilename + DefaultBackupExtension) { + t.Fatal("backup state is empty or missing") + } +} + +// Saved backend state matching config +func TestMetaBackend_configuredUnchanged(t *testing.T) { + defer testChdir(t, testFixturePath("backend-unchanged"))() + + // Setup the meta + m := testMetaBackend(t, nil) + + // Get the backend + b, diags := m.Backend(&BackendOpts{Init: true}, encryption.StateEncryptionDisabled()) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + // Check the state + s, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if err := s.RefreshState(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + state := s.State() + if state == nil { + t.Fatal("nil state") + } + if testStateMgrCurrentLineage(s) != "configuredUnchanged" { + t.Fatalf("bad: %#v", state) + } + + // Verify the default paths don't exist + if _, err := os.Stat(DefaultStateFilename); err == nil { + t.Fatal("file should not exist") + } + + // Verify a backup doesn't exist + if _, err := os.Stat(DefaultStateFilename + DefaultBackupExtension); err == nil { + t.Fatal("file should not exist") + } +} + +// Changing a configured backend +func TestMetaBackend_configuredChange(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("backend-change"), td) + defer testChdir(t, td)() + + // Ask input + defer testInteractiveInput(t, []string{"no"})() + + // Setup the meta + m := testMetaBackend(t, nil) + + // Get the backend + b, diags := m.Backend(&BackendOpts{Init: true}, encryption.StateEncryptionDisabled()) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + // Check the state + s, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if err := s.RefreshState(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + state := s.State() + if state != nil { + t.Fatal("state should be nil") + } + + // Verify the default paths don't exist + if _, err := os.Stat(DefaultStateFilename); err == nil { + t.Fatal("file should not exist") + } + + // Verify a backup doesn't exist + if _, err := os.Stat(DefaultStateFilename + DefaultBackupExtension); err == nil { + t.Fatal("file should not exist") + } + + // Write some state + state = states.NewState() + mark := markStateForMatching(state, "changing") + + s.WriteState(state) + if err := s.PersistState(nil); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + // Verify the state is where we expect + { + f, err := os.Open("local-state-2.tfstate") + if err != nil { + t.Fatalf("err: %s", err) + } + actual, err := statefile.Read(f, encryption.StateEncryptionDisabled()) + f.Close() + if err != nil { + t.Fatalf("err: %s", err) + } + + assertStateHasMarker(t, actual.State, mark) + } + + // Verify no local state + if _, err := os.Stat(DefaultStateFilename); err == nil { + t.Fatal("file should not exist") + } + + // Verify no local backup + if _, err := os.Stat(DefaultStateFilename + DefaultBackupExtension); err == nil { + t.Fatal("file should not exist") + } +} + +// Reconfiguring with an already configured backend. +// This should ignore the existing backend config, and configure the new +// backend is if this is the first time. +func TestMetaBackend_reconfigureChange(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("backend-change-single-to-single"), td) + defer testChdir(t, td)() + + // Register the single-state backend + backendInit.Set("local-single", backendLocal.TestNewLocalSingle) + defer backendInit.Set("local-single", nil) + + // Setup the meta + m := testMetaBackend(t, nil) + + // this should not ask for input + m.input = false + + // cli flag -reconfigure + m.reconfigure = true + + // Get the backend + b, diags := m.Backend(&BackendOpts{Init: true}, encryption.StateEncryptionDisabled()) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + // Check the state + s, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if err := s.RefreshState(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + newState := s.State() + if newState != nil || !newState.Empty() { + t.Fatal("state should be nil/empty after forced reconfiguration") + } + + // verify that the old state is still there + s = statemgr.NewFilesystem("local-state.tfstate", encryption.StateEncryptionDisabled()) + if err := s.RefreshState(); err != nil { + t.Fatal(err) + } + oldState := s.State() + if oldState == nil || oldState.Empty() { + t.Fatal("original state should be untouched") + } +} + +// Initializing a backend which supports workspaces and does *not* have +// the currently selected workspace should prompt the user with a list of +// workspaces to choose from to select a valid one, if more than one workspace +// is available. +func TestMetaBackend_initSelectedWorkspaceDoesNotExist(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-backend-selected-workspace-doesnt-exist-multi"), td) + defer testChdir(t, td)() + + // Setup the meta + m := testMetaBackend(t, nil) + + defer testInputMap(t, map[string]string{ + "select-workspace": "2", + })() + + // Get the backend + _, diags := m.Backend(&BackendOpts{Init: true}, encryption.StateEncryptionDisabled()) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + expected := "foo" + actual, err := m.Workspace() + if err != nil { + t.Fatal(err) + } + + if actual != expected { + t.Fatalf("expected selected workspace to be %q, but was %q", expected, actual) + } +} + +// Initializing a backend which supports workspaces and does *not* have the +// currently selected workspace - and which only has a single workspace - should +// automatically select that single workspace. +func TestMetaBackend_initSelectedWorkspaceDoesNotExistAutoSelect(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-backend-selected-workspace-doesnt-exist-single"), td) + defer testChdir(t, td)() + + // Setup the meta + m := testMetaBackend(t, nil) + + // this should not ask for input + m.input = false + + // Assert test precondition: The current selected workspace is "bar" + previousName, err := m.Workspace() + if err != nil { + t.Fatal(err) + } + + if previousName != "bar" { + t.Fatalf("expected test fixture to start with 'bar' as the current selected workspace") + } + + // Get the backend + _, diags := m.Backend(&BackendOpts{Init: true}, encryption.StateEncryptionDisabled()) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + expected := "default" + actual, err := m.Workspace() + if err != nil { + t.Fatal(err) + } + + if actual != expected { + t.Fatalf("expected selected workspace to be %q, but was %q", expected, actual) + } +} + +// Initializing a backend which supports workspaces and does *not* have +// the currently selected workspace with input=false should fail. +func TestMetaBackend_initSelectedWorkspaceDoesNotExistInputFalse(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-backend-selected-workspace-doesnt-exist-multi"), td) + defer testChdir(t, td)() + + // Setup the meta + m := testMetaBackend(t, nil) + m.input = false + + // Get the backend + _, diags := m.Backend(&BackendOpts{Init: true}, encryption.StateEncryptionDisabled()) + + // Should fail immediately + if got, want := diags.ErrWithWarnings().Error(), `Currently selected workspace "bar" does not exist`; !strings.Contains(got, want) { + t.Fatalf("wrong error\ngot: %s\nwant: %s", got, want) + } +} + +// Changing a configured backend, copying state +func TestMetaBackend_configuredChangeCopy(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("backend-change"), td) + defer testChdir(t, td)() + + // Ask input + defer testInteractiveInput(t, []string{"yes", "yes"})() + + // Setup the meta + m := testMetaBackend(t, nil) + + // Get the backend + b, diags := m.Backend(&BackendOpts{Init: true}, encryption.StateEncryptionDisabled()) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + // Check the state + s, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if err := s.RefreshState(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + state := s.State() + if state == nil { + t.Fatal("state should not be nil") + } + if testStateMgrCurrentLineage(s) != "backend-change" { + t.Fatalf("bad: %#v", state) + } + + // Verify no local state + if _, err := os.Stat(DefaultStateFilename); err == nil { + t.Fatal("file should not exist") + } + + // Verify no local backup + if _, err := os.Stat(DefaultStateFilename + DefaultBackupExtension); err == nil { + t.Fatal("file should not exist") + } +} + +// Changing a configured backend that supports only single states to another +// backend that only supports single states. +func TestMetaBackend_configuredChangeCopy_singleState(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("backend-change-single-to-single"), td) + defer testChdir(t, td)() + + // Register the single-state backend + backendInit.Set("local-single", backendLocal.TestNewLocalSingle) + defer backendInit.Set("local-single", nil) + + // Ask input + defer testInputMap(t, map[string]string{ + "backend-migrate-copy-to-empty": "yes", + })() + + // Setup the meta + m := testMetaBackend(t, nil) + + // Get the backend + b, diags := m.Backend(&BackendOpts{Init: true}, encryption.StateEncryptionDisabled()) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + // Check the state + s, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if err := s.RefreshState(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + state := s.State() + if state == nil { + t.Fatal("state should not be nil") + } + if testStateMgrCurrentLineage(s) != "backend-change" { + t.Fatalf("bad: %#v", state) + } + + // Verify no local state + if _, err := os.Stat(DefaultStateFilename); err == nil { + t.Fatal("file should not exist") + } + + // Verify no local backup + if _, err := os.Stat(DefaultStateFilename + DefaultBackupExtension); err == nil { + t.Fatal("file should not exist") + } +} + +// Changing a configured backend that supports multi-state to a +// backend that only supports single states. The multi-state only has +// a default state. +func TestMetaBackend_configuredChangeCopy_multiToSingleDefault(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("backend-change-multi-default-to-single"), td) + defer testChdir(t, td)() + + // Register the single-state backend + backendInit.Set("local-single", backendLocal.TestNewLocalSingle) + defer backendInit.Set("local-single", nil) + + // Ask input + defer testInputMap(t, map[string]string{ + "backend-migrate-copy-to-empty": "yes", + })() + + // Setup the meta + m := testMetaBackend(t, nil) + + // Get the backend + b, diags := m.Backend(&BackendOpts{Init: true}, encryption.StateEncryptionDisabled()) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + // Check the state + s, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if err := s.RefreshState(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + state := s.State() + if state == nil { + t.Fatal("state should not be nil") + } + if testStateMgrCurrentLineage(s) != "backend-change" { + t.Fatalf("bad: %#v", state) + } + + // Verify no local state + if _, err := os.Stat(DefaultStateFilename); err == nil { + t.Fatal("file should not exist") + } + + // Verify no local backup + if _, err := os.Stat(DefaultStateFilename + DefaultBackupExtension); err == nil { + t.Fatal("file should not exist") + } +} + +// Changing a configured backend that supports multi-state to a +// backend that only supports single states. +func TestMetaBackend_configuredChangeCopy_multiToSingle(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("backend-change-multi-to-single"), td) + defer testChdir(t, td)() + + // Register the single-state backend + backendInit.Set("local-single", backendLocal.TestNewLocalSingle) + defer backendInit.Set("local-single", nil) + + // Ask input + defer testInputMap(t, map[string]string{ + "backend-migrate-multistate-to-single": "yes", + "backend-migrate-copy-to-empty": "yes", + })() + + // Setup the meta + m := testMetaBackend(t, nil) + + // Get the backend + b, diags := m.Backend(&BackendOpts{Init: true}, encryption.StateEncryptionDisabled()) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + // Check the state + s, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if err := s.RefreshState(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + state := s.State() + if state == nil { + t.Fatal("state should not be nil") + } + if testStateMgrCurrentLineage(s) != "backend-change" { + t.Fatalf("bad: %#v", state) + } + + // Verify no local state + if _, err := os.Stat(DefaultStateFilename); err == nil { + t.Fatal("file should not exist") + } + + // Verify no local backup + if _, err := os.Stat(DefaultStateFilename + DefaultBackupExtension); err == nil { + t.Fatal("file should not exist") + } + + // Verify existing workspaces exist + envPath := filepath.Join(backendLocal.DefaultWorkspaceDir, "env2", backendLocal.DefaultStateFilename) + if _, err := os.Stat(envPath); err != nil { + t.Fatal("env should exist") + } + + // Verify we are now in the default env, or we may not be able to access the new backend + env, err := m.Workspace() + if err != nil { + t.Fatal(err) + } + if env != backend.DefaultStateName { + t.Fatal("using non-default env with single-env backend") + } +} + +// Changing a configured backend that supports multi-state to a +// backend that only supports single states. +func TestMetaBackend_configuredChangeCopy_multiToSingleCurrentEnv(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("backend-change-multi-to-single"), td) + defer testChdir(t, td)() + + // Register the single-state backend + backendInit.Set("local-single", backendLocal.TestNewLocalSingle) + defer backendInit.Set("local-single", nil) + + // Ask input + defer testInputMap(t, map[string]string{ + "backend-migrate-multistate-to-single": "yes", + "backend-migrate-copy-to-empty": "yes", + })() + + // Setup the meta + m := testMetaBackend(t, nil) + + // Change env + if err := m.SetWorkspace("env2"); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + // Get the backend + b, diags := m.Backend(&BackendOpts{Init: true}, encryption.StateEncryptionDisabled()) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + // Check the state + s, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if err := s.RefreshState(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + state := s.State() + if state == nil { + t.Fatal("state should not be nil") + } + if testStateMgrCurrentLineage(s) != "backend-change-env2" { + t.Fatalf("bad: %#v", state) + } + + // Verify no local state + if _, err := os.Stat(DefaultStateFilename); err == nil { + t.Fatal("file should not exist") + } + + // Verify no local backup + if _, err := os.Stat(DefaultStateFilename + DefaultBackupExtension); err == nil { + t.Fatal("file should not exist") + } + + // Verify existing workspaces exist + envPath := filepath.Join(backendLocal.DefaultWorkspaceDir, "env2", backendLocal.DefaultStateFilename) + if _, err := os.Stat(envPath); err != nil { + t.Fatal("env should exist") + } +} + +// Changing a configured backend that supports multi-state to a +// backend that also supports multi-state. +func TestMetaBackend_configuredChangeCopy_multiToMulti(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("backend-change-multi-to-multi"), td) + defer testChdir(t, td)() + + // Ask input + defer testInputMap(t, map[string]string{ + "backend-migrate-multistate-to-multistate": "yes", + })() + + // Setup the meta + m := testMetaBackend(t, nil) + + // Get the backend + b, diags := m.Backend(&BackendOpts{Init: true}, encryption.StateEncryptionDisabled()) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + // Check resulting states + workspaces, err := b.Workspaces() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + sort.Strings(workspaces) + expected := []string{"default", "env2"} + if !reflect.DeepEqual(workspaces, expected) { + t.Fatalf("bad: %#v", workspaces) + } + + { + // Check the default state + s, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if err := s.RefreshState(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + state := s.State() + if state == nil { + t.Fatal("state should not be nil") + } + if testStateMgrCurrentLineage(s) != "backend-change" { + t.Fatalf("bad: %#v", state) + } + } + + { + // Check the other state + s, err := b.StateMgr("env2") + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if err := s.RefreshState(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + state := s.State() + if state == nil { + t.Fatal("state should not be nil") + } + if testStateMgrCurrentLineage(s) != "backend-change-env2" { + t.Fatalf("bad: %#v", state) + } + } + + // Verify no local backup + if _, err := os.Stat(DefaultStateFilename + DefaultBackupExtension); err == nil { + t.Fatal("file should not exist") + } + + { + // Verify existing workspaces exist + envPath := filepath.Join(backendLocal.DefaultWorkspaceDir, "env2", backendLocal.DefaultStateFilename) + if _, err := os.Stat(envPath); err != nil { + t.Fatalf("%s should exist, but does not", envPath) + } + } + + { + // Verify new workspaces exist + envPath := filepath.Join("envdir-new", "env2", backendLocal.DefaultStateFilename) + if _, err := os.Stat(envPath); err != nil { + t.Fatalf("%s should exist, but does not", envPath) + } + } +} + +// Changing a configured backend that supports multi-state to a +// backend that also supports multi-state, but doesn't allow a +// default state while the default state is non-empty. +func TestMetaBackend_configuredChangeCopy_multiToNoDefaultWithDefault(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("backend-change-multi-to-no-default-with-default"), td) + defer testChdir(t, td)() + + // Register the single-state backend + backendInit.Set("local-no-default", backendLocal.TestNewLocalNoDefault) + defer backendInit.Set("local-no-default", nil) + + // Ask input + defer testInputMap(t, map[string]string{ + "backend-migrate-multistate-to-multistate": "yes", + "new-state-name": "env1", + })() + + // Setup the meta + m := testMetaBackend(t, nil) + + // Get the backend + b, diags := m.Backend(&BackendOpts{Init: true}, encryption.StateEncryptionDisabled()) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + // Check resulting states + workspaces, err := b.Workspaces() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + sort.Strings(workspaces) + expected := []string{"env1", "env2"} + if !reflect.DeepEqual(workspaces, expected) { + t.Fatalf("bad: %#v", workspaces) + } + + { + // Check the renamed default state + s, err := b.StateMgr("env1") + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if err := s.RefreshState(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + state := s.State() + if state == nil { + t.Fatal("state should not be nil") + } + if testStateMgrCurrentLineage(s) != "backend-change-env1" { + t.Fatalf("bad: %#v", state) + } + } + + { + // Verify existing workspaces exist + envPath := filepath.Join(backendLocal.DefaultWorkspaceDir, "env2", backendLocal.DefaultStateFilename) + if _, err := os.Stat(envPath); err != nil { + t.Fatal("env should exist") + } + } + + { + // Verify new workspaces exist + envPath := filepath.Join("envdir-new", "env2", backendLocal.DefaultStateFilename) + if _, err := os.Stat(envPath); err != nil { + t.Fatal("env should exist") + } + } +} + +// Changing a configured backend that supports multi-state to a +// backend that also supports multi-state, but doesn't allow a +// default state while the default state is empty. +func TestMetaBackend_configuredChangeCopy_multiToNoDefaultWithoutDefault(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("backend-change-multi-to-no-default-without-default"), td) + defer testChdir(t, td)() + + // Register the single-state backend + backendInit.Set("local-no-default", backendLocal.TestNewLocalNoDefault) + defer backendInit.Set("local-no-default", nil) + + // Ask input + defer testInputMap(t, map[string]string{ + "backend-migrate-multistate-to-multistate": "yes", + })() + + // Setup the meta + m := testMetaBackend(t, nil) + + // Get the backend + b, diags := m.Backend(&BackendOpts{Init: true}, encryption.StateEncryptionDisabled()) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + // Check resulting states + workspaces, err := b.Workspaces() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + sort.Strings(workspaces) + expected := []string{"env2"} // default is skipped because it is absent in the source backend + if !reflect.DeepEqual(workspaces, expected) { + t.Fatalf("wrong workspaces\ngot: %#v\nwant: %#v", workspaces, expected) + } + + { + // Check the named state + s, err := b.StateMgr("env2") + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if err := s.RefreshState(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + state := s.State() + if state == nil { + t.Fatal("state should not be nil") + } + if testStateMgrCurrentLineage(s) != "backend-change-env2" { + t.Fatalf("bad: %#v", state) + } + } + + { + // Verify existing workspaces exist + envPath := filepath.Join(backendLocal.DefaultWorkspaceDir, "env2", backendLocal.DefaultStateFilename) + if _, err := os.Stat(envPath); err != nil { + t.Fatalf("%s should exist, but does not", envPath) + } + } + + { + // Verify new workspaces exist + envPath := filepath.Join("envdir-new", "env2", backendLocal.DefaultStateFilename) + if _, err := os.Stat(envPath); err != nil { + t.Fatalf("%s should exist, but does not", envPath) + } + } +} + +// Unsetting a saved backend +func TestMetaBackend_configuredUnset(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("backend-unset"), td) + defer testChdir(t, td)() + + // Ask input + defer testInteractiveInput(t, []string{"no"})() + + // Setup the meta + m := testMetaBackend(t, nil) + + // Get the backend + b, diags := m.Backend(&BackendOpts{Init: true}, encryption.StateEncryptionDisabled()) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + // Check the state + s, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if err := s.RefreshState(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + state := s.State() + if state != nil { + t.Fatal("state should be nil") + } + + // Verify the default paths don't exist + if !isEmptyState(DefaultStateFilename) { + data, _ := os.ReadFile(DefaultStateFilename) + t.Fatal("state should not exist, but contains:\n", string(data)) + } + + // Verify a backup doesn't exist + if !isEmptyState(DefaultStateFilename + DefaultBackupExtension) { + data, _ := os.ReadFile(DefaultStateFilename + DefaultBackupExtension) + t.Fatal("backup should not exist, but contains:\n", string(data)) + } + + // Write some state + s.WriteState(testState()) + if err := s.PersistState(nil); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + // Verify it exists where we expect it to + if isEmptyState(DefaultStateFilename) { + t.Fatal(DefaultStateFilename, "is empty") + } + + // Verify no backup since it was empty to start + if !isEmptyState(DefaultStateFilename + DefaultBackupExtension) { + data, _ := os.ReadFile(DefaultStateFilename + DefaultBackupExtension) + t.Fatal("backup state should be empty, but contains:\n", string(data)) + } +} + +// Unsetting a saved backend and copying the remote state +func TestMetaBackend_configuredUnsetCopy(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("backend-unset"), td) + defer testChdir(t, td)() + + // Ask input + defer testInteractiveInput(t, []string{"yes", "yes"})() + + // Setup the meta + m := testMetaBackend(t, nil) + + // Get the backend + b, diags := m.Backend(&BackendOpts{Init: true}, encryption.StateEncryptionDisabled()) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + // Check the state + s, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if err := s.RefreshState(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + state := s.State() + if state == nil { + t.Fatal("state is nil") + } + if got, want := testStateMgrCurrentLineage(s), "configuredUnset"; got != want { + t.Fatalf("wrong state lineage %q; want %q", got, want) + } + + // Verify a backup doesn't exist + if !isEmptyState(DefaultStateFilename + DefaultBackupExtension) { + t.Fatalf("backup state should be empty") + } + + // Write some state + s.WriteState(testState()) + if err := s.PersistState(nil); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + // Verify it exists where we expect it to + if _, err := os.Stat(DefaultStateFilename); err != nil { + t.Fatalf("err: %s", err) + } + + // Verify a backup since it wasn't empty to start + if isEmptyState(DefaultStateFilename + DefaultBackupExtension) { + t.Fatal("backup is empty") + } +} + +// A plan that has uses the local backend +func TestMetaBackend_planLocal(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("backend-plan-local"), td) + defer testChdir(t, td)() + + backendConfigBlock := cty.ObjectVal(map[string]cty.Value{ + "path": cty.NullVal(cty.String), + "workspace_dir": cty.NullVal(cty.String), + }) + backendConfigRaw, err := plans.NewDynamicValue(backendConfigBlock, backendConfigBlock.Type()) + if err != nil { + t.Fatal(err) + } + backendConfig := plans.Backend{ + Type: "local", + Config: backendConfigRaw, + Workspace: "default", + } + + // Setup the meta + m := testMetaBackend(t, nil) + + // Get the backend + b, diags := m.BackendForLocalPlan(backendConfig, encryption.StateEncryptionDisabled()) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + // Check the state + s, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if err := s.RefreshState(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + state := s.State() + if state != nil { + t.Fatalf("state should be nil: %#v", state) + } + + // The default state file should not exist yet + if !isEmptyState(DefaultStateFilename) { + t.Fatal("expected empty state") + } + + // A backup file shouldn't exist yet either. + if !isEmptyState(DefaultStateFilename + DefaultBackupExtension) { + t.Fatal("expected empty backup") + } + + // Verify we have no configured backend + path := filepath.Join(m.DataDir(), DefaultStateFilename) + if _, err := os.Stat(path); err == nil { + t.Fatalf("should not have backend configured") + } + + // Write some state + state = states.NewState() + mark := markStateForMatching(state, "changing") + + s.WriteState(state) + if err := s.PersistState(nil); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + // Verify the state is where we expect + { + f, err := os.Open(DefaultStateFilename) + if err != nil { + t.Fatalf("err: %s", err) + } + actual, err := statefile.Read(f, encryption.StateEncryptionDisabled()) + f.Close() + if err != nil { + t.Fatalf("err: %s", err) + } + + assertStateHasMarker(t, actual.State, mark) + } + + // Verify no local backup + if !isEmptyState(DefaultStateFilename + DefaultBackupExtension) { + t.Fatalf("backup state should be empty") + } +} + +// A plan with a custom state save path +func TestMetaBackend_planLocalStatePath(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("backend-plan-local"), td) + defer testChdir(t, td)() + + original := testState() + markStateForMatching(original, "hello") + + backendConfigBlock := cty.ObjectVal(map[string]cty.Value{ + "path": cty.NullVal(cty.String), + "workspace_dir": cty.NullVal(cty.String), + }) + backendConfigRaw, err := plans.NewDynamicValue(backendConfigBlock, backendConfigBlock.Type()) + if err != nil { + t.Fatal(err) + } + plannedBackend := plans.Backend{ + Type: "local", + Config: backendConfigRaw, + Workspace: "default", + } + + // Create an alternate output path + statePath := "foo.tfstate" + + // put an initial state there that needs to be backed up + err = statemgr.WriteAndPersist(statemgr.NewFilesystem(statePath, encryption.StateEncryptionDisabled()), original, nil) + if err != nil { + t.Fatal(err) + } + + // Setup the meta + m := testMetaBackend(t, nil) + m.stateOutPath = statePath + + // Get the backend + b, diags := m.BackendForLocalPlan(plannedBackend, encryption.StateEncryptionDisabled()) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + // Check the state + s, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if err := s.RefreshState(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + state := s.State() + if state != nil { + t.Fatal("default workspace state is not nil, but should be because we've not put anything there") + } + + // Verify the default path doesn't exist + if _, err := os.Stat(DefaultStateFilename); err == nil { + t.Fatalf("err: %s", err) + } + + // Verify a backup doesn't exists + if _, err := os.Stat(DefaultStateFilename + DefaultBackupExtension); err == nil { + t.Fatal("file should not exist") + } + + // Verify we have no configured backend/legacy + path := filepath.Join(m.DataDir(), DefaultStateFilename) + if _, err := os.Stat(path); err == nil { + t.Fatalf("should not have backend configured") + } + + // Write some state + state = states.NewState() + mark := markStateForMatching(state, "changing") + + s.WriteState(state) + if err := s.PersistState(nil); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + // Verify the state is where we expect + { + f, err := os.Open(statePath) + if err != nil { + t.Fatalf("err: %s", err) + } + actual, err := statefile.Read(f, encryption.StateEncryptionDisabled()) + f.Close() + if err != nil { + t.Fatalf("err: %s", err) + } + + assertStateHasMarker(t, actual.State, mark) + } + + // Verify we have a backup + if isEmptyState(statePath + DefaultBackupExtension) { + t.Fatal("backup is empty") + } +} + +// A plan that has no backend config, matching local state +func TestMetaBackend_planLocalMatch(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("backend-plan-local-match"), td) + defer testChdir(t, td)() + + backendConfigBlock := cty.ObjectVal(map[string]cty.Value{ + "path": cty.NullVal(cty.String), + "workspace_dir": cty.NullVal(cty.String), + }) + backendConfigRaw, err := plans.NewDynamicValue(backendConfigBlock, backendConfigBlock.Type()) + if err != nil { + t.Fatal(err) + } + backendConfig := plans.Backend{ + Type: "local", + Config: backendConfigRaw, + Workspace: "default", + } + + // Setup the meta + m := testMetaBackend(t, nil) + + // Get the backend + b, diags := m.BackendForLocalPlan(backendConfig, encryption.StateEncryptionDisabled()) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + // Check the state + s, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if err := s.RefreshState(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + state := s.State() + if state == nil { + t.Fatal("should is nil") + } + if testStateMgrCurrentLineage(s) != "hello" { + t.Fatalf("bad: %#v", state) + } + + // Verify the default path + if isEmptyState(DefaultStateFilename) { + t.Fatal("state is empty") + } + + // Verify we have no configured backend/legacy + path := filepath.Join(m.DataDir(), DefaultStateFilename) + if _, err := os.Stat(path); err == nil { + t.Fatalf("should not have backend configured") + } + + // Write some state + state = states.NewState() + mark := markStateForMatching(state, "changing") + + s.WriteState(state) + if err := s.PersistState(nil); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + // Verify the state is where we expect + { + f, err := os.Open(DefaultStateFilename) + if err != nil { + t.Fatalf("err: %s", err) + } + actual, err := statefile.Read(f, encryption.StateEncryptionDisabled()) + f.Close() + if err != nil { + t.Fatalf("err: %s", err) + } + + assertStateHasMarker(t, actual.State, mark) + } + + // Verify local backup + if isEmptyState(DefaultStateFilename + DefaultBackupExtension) { + t.Fatal("backup is empty") + } +} + +// init a backend using -backend-config options multiple times +func TestMetaBackend_configureWithExtra(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-backend-empty"), td) + defer testChdir(t, td)() + + extras := map[string]cty.Value{"path": cty.StringVal("hello")} + m := testMetaBackend(t, nil) + opts := &BackendOpts{ + ConfigOverride: configs.SynthBody("synth", extras), + Init: true, + } + + _, cHash, err := m.backendConfig(opts) + if err != nil { + t.Fatal(err) + } + + // init the backend + _, diags := m.Backend(&BackendOpts{ + ConfigOverride: configs.SynthBody("synth", extras), + Init: true, + }, encryption.StateEncryptionDisabled()) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + // Check the state + s := testDataStateRead(t, filepath.Join(DefaultDataDir, backendLocal.DefaultStateFilename)) + if s.Backend.Hash != uint64(cHash) { + t.Fatal("mismatched state and config backend hashes") + } + + // init the backend again with the same options + m = testMetaBackend(t, nil) + _, err = m.Backend(&BackendOpts{ + ConfigOverride: configs.SynthBody("synth", extras), + Init: true, + }, encryption.StateEncryptionDisabled()) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + // Check the state + s = testDataStateRead(t, filepath.Join(DefaultDataDir, backendLocal.DefaultStateFilename)) + if s.Backend.Hash != uint64(cHash) { + t.Fatal("mismatched state and config backend hashes") + } +} + +// when configuring a default local state, don't delete local state +func TestMetaBackend_localDoesNotDeleteLocal(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-backend-empty"), td) + defer testChdir(t, td)() + + //// create our local state + orig := states.NewState() + orig.Module(addrs.RootModuleInstance).SetOutputValue("foo", cty.StringVal("bar"), false) + testStateFileDefault(t, orig) + + m := testMetaBackend(t, nil) + m.forceInitCopy = true + // init the backend + _, diags := m.Backend(&BackendOpts{Init: true}, encryption.StateEncryptionDisabled()) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + // check that we can read the state + s := testStateRead(t, DefaultStateFilename) + if s.Empty() { + t.Fatal("our state was deleted") + } +} + +// move options from config to -backend-config +func TestMetaBackend_configToExtra(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("init-backend"), td) + defer testChdir(t, td)() + + // init the backend + m := testMetaBackend(t, nil) + _, err := m.Backend(&BackendOpts{ + Init: true, + }, encryption.StateEncryptionDisabled()) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + // Check the state + s := testDataStateRead(t, filepath.Join(DefaultDataDir, backendLocal.DefaultStateFilename)) + backendHash := s.Backend.Hash + + // init again but remove the path option from the config + cfg := "terraform {\n backend \"local\" {}\n}\n" + if err := os.WriteFile("main.tf", []byte(cfg), 0644); err != nil { + t.Fatal(err) + } + + // init the backend again with the options + extras := map[string]cty.Value{"path": cty.StringVal("hello")} + m = testMetaBackend(t, nil) + m.forceInitCopy = true + _, diags := m.Backend(&BackendOpts{ + ConfigOverride: configs.SynthBody("synth", extras), + Init: true, + }, encryption.StateEncryptionDisabled()) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + s = testDataStateRead(t, filepath.Join(DefaultDataDir, backendLocal.DefaultStateFilename)) + + if s.Backend.Hash == backendHash { + t.Fatal("state.Backend.Hash was not updated") + } +} + +// no config; return inmem backend stored in state +func TestBackendFromState(t *testing.T) { + wd := tempWorkingDirFixture(t, "backend-from-state") + defer testChdir(t, wd.RootModuleDir())() + + // Setup the meta + m := testMetaBackend(t, nil) + m.WorkingDir = wd + // tofu caches a small "state" file that stores the backend config. + // This test must override m.dataDir so it loads the "terraform.tfstate" file in the + // test directory as the backend config cache. This fixture is really a + // fixture for the data dir rather than the module dir, so we'll override + // them to match just for this test. + wd.OverrideDataDir(".") + + stateBackend, diags := m.backendFromState(context.Background(), encryption.StateEncryptionDisabled()) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + if _, ok := stateBackend.(*backendInmem.Backend); !ok { + t.Fatal("did not get expected inmem backend") + } +} + +func testMetaBackend(t *testing.T, args []string) *Meta { + var m Meta + m.Ui = new(cli.MockUi) + view, _ := testView(t) + m.View = view + m.process(args) + f := m.extendedFlagSet("test") + if err := f.Parse(args); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + // metaBackend tests are verifying migrate actions + m.migrateState = true + + return &m +} diff --git a/pkg/command/meta_config.go b/pkg/command/meta_config.go new file mode 100644 index 00000000000..cf72f0405ac --- /dev/null +++ b/pkg/command/meta_config.go @@ -0,0 +1,539 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "context" + "fmt" + "os" + "path/filepath" + "sort" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configload" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/initwd" + "github.com/kubegems/opentofu/pkg/registry" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" +) + +// normalizePath normalizes a given path so that it is, if possible, relative +// to the current working directory. This is primarily used to prepare +// paths used to load configuration, because we want to prefer recording +// relative paths in source code references within the configuration. +func (m *Meta) normalizePath(path string) string { + m.fixupMissingWorkingDir() + return m.WorkingDir.NormalizePath(path) +} + +// loadConfig reads a configuration from the given directory, which should +// contain a root module and have already have any required descendent modules +// installed. +func (m *Meta) loadConfig(rootDir string) (*configs.Config, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + rootDir = m.normalizePath(rootDir) + + loader, err := m.initConfigLoader() + if err != nil { + diags = diags.Append(err) + return nil, diags + } + + call, callDiags := m.rootModuleCall(rootDir) + diags = diags.Append(callDiags) + if callDiags.HasErrors() { + return nil, diags + } + + config, hclDiags := loader.LoadConfig(rootDir, call) + diags = diags.Append(hclDiags) + return config, diags +} + +// loadConfigWithTests matches loadConfig, except it also loads any test files +// into the config alongside the main configuration. +func (m *Meta) loadConfigWithTests(rootDir, testDir string) (*configs.Config, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + rootDir = m.normalizePath(rootDir) + + loader, err := m.initConfigLoader() + if err != nil { + diags = diags.Append(err) + return nil, diags + } + + call, vDiags := m.rootModuleCall(rootDir) + diags = diags.Append(vDiags) + if diags.HasErrors() { + return nil, diags + } + + config, hclDiags := loader.LoadConfigWithTests(rootDir, testDir, call) + diags = diags.Append(hclDiags) + return config, diags +} + +// loadSingleModule reads configuration from the given directory and returns +// a description of that module only, without attempting to assemble a module +// tree for referenced child modules. +// +// Most callers should use loadConfig. This method exists to support early +// initialization use-cases where the root module must be inspected in order +// to determine what else needs to be installed before the full configuration +// can be used. +func (m *Meta) loadSingleModule(dir string, load configs.SelectiveLoader) (*configs.Module, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + dir = m.normalizePath(dir) + + loader, err := m.initConfigLoader() + if err != nil { + diags = diags.Append(err) + return nil, diags + } + + call, vDiags := m.rootModuleCall(dir) + diags = diags.Append(vDiags) + if diags.HasErrors() { + return nil, diags + } + + module, hclDiags := loader.Parser().LoadConfigDirSelective(dir, call, load) + diags = diags.Append(hclDiags) + return module, diags +} + +func (m *Meta) rootModuleCall(rootDir string) (configs.StaticModuleCall, tfdiags.Diagnostics) { + if m.rootModuleCallCache != nil { + return *m.rootModuleCallCache, nil + } + variables, diags := m.collectVariableValues() + + workspace, err := m.Workspace() + if err != nil { + diags = diags.Append(err) + } + + call := configs.NewStaticModuleCall(addrs.RootModule, func(variable *configs.Variable) (cty.Value, hcl.Diagnostics) { + name := variable.Name + v, ok := variables[name] + if !ok { + // For now, we are simply failing when the user omits a required variable. This is due to complex interactions between variables in different code paths (apply existing plan for example) + // Ideally, we should be able to use something like backend_local.go:interactiveCollectVariables() in the future to allow users to provide values if input is enabled + // This is probably blocked by command package refactoring + if variable.Required() { + // Not specified on CLI or in var files, without a valid default. + return cty.NilVal, hcl.Diagnostics{&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Variable not provided via -var, tfvars files, or env", + Subject: variable.DeclRange.Ptr(), + }} + } + return variable.Default, nil + } + + parsed, parsedDiags := v.ParseVariableValue(variable.ParsingMode) + return parsed.Value, parsedDiags.ToHCL() + }, rootDir, workspace) + m.rootModuleCallCache = &call + return call, diags +} + +// loadSingleModuleWithTests matches loadSingleModule except it also loads any +// tests for the target module. +func (m *Meta) loadSingleModuleWithTests(dir string, testDir string) (*configs.Module, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + dir = m.normalizePath(dir) + + loader, err := m.initConfigLoader() + if err != nil { + diags = diags.Append(err) + return nil, diags + } + + call, vDiags := m.rootModuleCall(dir) + diags = diags.Append(vDiags) + if diags.HasErrors() { + return nil, diags + } + + module, hclDiags := loader.Parser().LoadConfigDirWithTests(dir, testDir, call) + diags = diags.Append(hclDiags) + return module, diags +} + +// dirIsConfigPath checks if the given path is a directory that contains at +// least one OpenTofu configuration file (.tf or .tf.json), returning true +// if so. +// +// In the unlikely event that the underlying config loader cannot be initalized, +// this function optimistically returns true, assuming that the caller will +// then do some other operation that requires the config loader and get an +// error at that point. +func (m *Meta) dirIsConfigPath(dir string) bool { + loader, err := m.initConfigLoader() + if err != nil { + return true + } + + return loader.IsConfigDir(dir) +} + +// loadBackendConfig reads configuration from the given directory and returns +// the backend configuration defined by that module, if any. Nil is returned +// if the specified module does not have an explicit backend configuration. +// +// This is a convenience method for command code that will delegate to the +// configured backend to do most of its work, since in that case it is the +// backend that will do the full configuration load. +// +// Although this method returns only the backend configuration, at present it +// actually loads and validates the entire configuration first. Therefore errors +// returned may be about other aspects of the configuration. This behavior may +// change in future, so callers must not rely on it. (That is, they must expect +// that a call to loadSingleModule or loadConfig could fail on the same +// directory even if loadBackendConfig succeeded.) +func (m *Meta) loadBackendConfig(rootDir string) (*configs.Backend, tfdiags.Diagnostics) { + mod, diags := m.loadSingleModule(rootDir, configs.SelectiveLoadBackend) + + // Only return error diagnostics at this point. Any warnings will be caught + // again later and duplicated in the output. + if diags.HasErrors() { + return nil, diags + } + + if mod.CloudConfig != nil { + backendConfig := mod.CloudConfig.ToBackendConfig() + return &backendConfig, nil + } + + return mod.Backend, nil +} + +// loadHCLFile reads an arbitrary HCL file and returns the unprocessed body +// representing its toplevel. Most callers should use one of the more +// specialized "load..." methods to get a higher-level representation. +func (m *Meta) loadHCLFile(filename string) (hcl.Body, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + filename = m.normalizePath(filename) + + loader, err := m.initConfigLoader() + if err != nil { + diags = diags.Append(err) + return nil, diags + } + + body, hclDiags := loader.Parser().LoadHCLFile(filename) + diags = diags.Append(hclDiags) + return body, diags +} + +// installModules reads a root module from the given directory and attempts +// recursively to install all of its descendent modules. +// +// The given hooks object will be notified of installation progress, which +// can then be relayed to the end-user. The uiModuleInstallHooks type in +// this package has a reasonable implementation for displaying notifications +// via a provided cli.Ui. +func (m *Meta) installModules(ctx context.Context, rootDir, testsDir string, upgrade, installErrsOnly bool, hooks initwd.ModuleInstallHooks) (abort bool, diags tfdiags.Diagnostics) { + ctx, span := tracer.Start(ctx, "install modules") + defer span.End() + + rootDir = m.normalizePath(rootDir) + + err := os.MkdirAll(m.modulesDir(), os.ModePerm) + if err != nil { + diags = diags.Append(fmt.Errorf("failed to create local modules directory: %w", err)) + return true, diags + } + + loader, err := m.initConfigLoader() + if err != nil { + diags = diags.Append(err) + return true, diags + } + + inst := initwd.NewModuleInstaller(m.modulesDir(), loader, m.registryClient()) + + call, vDiags := m.rootModuleCall(rootDir) + diags = diags.Append(vDiags) + if diags.HasErrors() { + return true, diags + } + + _, moreDiags := inst.InstallModules(ctx, rootDir, testsDir, upgrade, installErrsOnly, hooks, call) + diags = diags.Append(moreDiags) + + if ctx.Err() == context.Canceled { + m.showDiagnostics(diags) + m.Ui.Error("Module installation was canceled by an interrupt signal.") + return true, diags + } + + return false, diags +} + +// initDirFromModule initializes the given directory (which should be +// pre-verified as empty by the caller) by copying the source code from the +// given module address. +// +// Internally this runs similar steps to installModules. +// The given hooks object will be notified of installation progress, which +// can then be relayed to the end-user. The uiModuleInstallHooks type in +// this package has a reasonable implementation for displaying notifications +// via a provided cli.Ui. +func (m *Meta) initDirFromModule(ctx context.Context, targetDir string, addr string, hooks initwd.ModuleInstallHooks) (abort bool, diags tfdiags.Diagnostics) { + ctx, span := tracer.Start(ctx, "initialize directory from module", trace.WithAttributes( + attribute.String("source_addr", addr), + )) + defer span.End() + + loader, err := m.initConfigLoader() + if err != nil { + diags = diags.Append(err) + return true, diags + } + + targetDir = m.normalizePath(targetDir) + moreDiags := initwd.DirFromModule(ctx, loader, targetDir, m.modulesDir(), addr, m.registryClient(), hooks) + diags = diags.Append(moreDiags) + if ctx.Err() == context.Canceled { + m.showDiagnostics(diags) + m.Ui.Error("Module initialization was canceled by an interrupt signal.") + return true, diags + } + return false, diags +} + +// inputForSchema uses interactive prompts to try to populate any +// not-yet-populated required attributes in the given object value to +// comply with the given schema. +// +// An error will be returned if input is disabled for this meta or if +// values cannot be obtained for some other operational reason. Errors are +// not returned for invalid input since the input loop itself will report +// that interactively. +// +// It is not guaranteed that the result will be valid, since certain attribute +// types and nested blocks are not supported for input. +// +// The given value must conform to the given schema. If not, this method will +// panic. +func (m *Meta) inputForSchema(given cty.Value, schema *configschema.Block) (cty.Value, error) { + if given.IsNull() || !given.IsKnown() { + // This is not reasonable input, but we'll tolerate it anyway and + // just pass it through for the caller to handle downstream. + return given, nil + } + + retVals := given.AsValueMap() + names := make([]string, 0, len(schema.Attributes)) + for name, attrS := range schema.Attributes { + if attrS.Required && retVals[name].IsNull() && attrS.Type.IsPrimitiveType() { + names = append(names, name) + } + } + sort.Strings(names) + + input := m.UIInput() + for _, name := range names { + attrS := schema.Attributes[name] + + for { + strVal, err := input.Input(context.Background(), &tofu.InputOpts{ + Id: name, + Query: name, + Description: attrS.Description, + }) + if err != nil { + return cty.UnknownVal(schema.ImpliedType()), fmt.Errorf("%s: %w", name, err) + } + + val := cty.StringVal(strVal) + val, err = convert.Convert(val, attrS.Type) + if err != nil { + m.showDiagnostics(fmt.Errorf("Invalid value: %w", err)) + continue + } + + retVals[name] = val + break + } + } + + return cty.ObjectVal(retVals), nil +} + +// configSources returns the source cache from the receiver's config loader, +// which the caller must not modify. +// +// If a config loader has not yet been instantiated then no files could have +// been loaded already, so this method returns a nil map in that case. +func (m *Meta) configSources() map[string]*hcl.File { + if m.configLoader == nil { + return nil + } + + return m.configLoader.Sources() +} + +func (m *Meta) modulesDir() string { + return filepath.Join(m.DataDir(), "modules") +} + +// registerSynthConfigSource allows commands to add synthetic additional source +// buffers to the config loader's cache of sources (as returned by +// configSources), which is useful when a command is directly parsing something +// from the command line that may produce diagnostics, so that diagnostic +// snippets can still be produced. +// +// If this is called before a configLoader has been initialized then it will +// try to initialize the loader but ignore any initialization failure, turning +// the call into a no-op. (We presume that a caller will later call a different +// function that also initializes the config loader as a side effect, at which +// point those errors can be returned.) +func (m *Meta) registerSynthConfigSource(filename string, src []byte) { + loader, err := m.initConfigLoader() + if err != nil || loader == nil { + return // treated as no-op, since this is best-effort + } + loader.Parser().ForceFileSource(filename, src) +} + +// initConfigLoader initializes the shared configuration loader if it isn't +// already initialized. +// +// If the loader cannot be created for some reason then an error is returned +// and no loader is created. Subsequent calls will presumably see the same +// error. Loader initialization errors will tend to prevent any further use +// of most OpenTofu features, so callers should report any error and safely +// terminate. +func (m *Meta) initConfigLoader() (*configload.Loader, error) { + if m.configLoader == nil { + loader, err := configload.NewLoader(&configload.Config{ + ModulesDir: m.modulesDir(), + Services: m.Services, + }) + if err != nil { + return nil, err + } + loader.AllowLanguageExperiments(m.AllowExperimentalFeatures) + m.configLoader = loader + if m.View != nil { + m.View.SetConfigSources(loader.Sources) + } + } + return m.configLoader, nil +} + +// registryClient instantiates and returns a new Registry client. +func (m *Meta) registryClient() *registry.Client { + return registry.NewClient(m.Services, nil) +} + +// configValueFromCLI parses a configuration value that was provided in a +// context in the CLI where only strings can be provided, such as on the +// command line or in an environment variable, and returns the resulting +// value. +func configValueFromCLI(synthFilename, rawValue string, wantType cty.Type) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + switch { + case wantType.IsPrimitiveType(): + // Primitive types are handled as conversions from string. + val := cty.StringVal(rawValue) + var err error + val, err = convert.Convert(val, wantType) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid backend configuration value", + fmt.Sprintf("Invalid backend configuration argument %s: %s", synthFilename, err), + )) + val = cty.DynamicVal // just so we return something valid-ish + } + return val, diags + default: + // Non-primitives are parsed as HCL expressions + src := []byte(rawValue) + expr, hclDiags := hclsyntax.ParseExpression(src, synthFilename, hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(hclDiags) + if hclDiags.HasErrors() { + return cty.DynamicVal, diags + } + val, hclDiags := expr.Value(nil) + diags = diags.Append(hclDiags) + if hclDiags.HasErrors() { + val = cty.DynamicVal + } + return val, diags + } +} + +// rawFlags is a flag.Value implementation that just appends raw flag +// names and values to a slice. +type rawFlags struct { + flagName string + items *[]rawFlag +} + +func newRawFlags(flagName string) rawFlags { + var items []rawFlag + return rawFlags{ + flagName: flagName, + items: &items, + } +} + +func (f rawFlags) Empty() bool { + if f.items == nil { + return true + } + return len(*f.items) == 0 +} + +func (f rawFlags) AllItems() []rawFlag { + if f.items == nil { + return nil + } + return *f.items +} + +func (f rawFlags) Alias(flagName string) rawFlags { + return rawFlags{ + flagName: flagName, + items: f.items, + } +} + +func (f rawFlags) String() string { + return "" +} + +func (f rawFlags) Set(str string) error { + *f.items = append(*f.items, rawFlag{ + Name: f.flagName, + Value: str, + }) + return nil +} + +type rawFlag struct { + Name string + Value string +} + +func (f rawFlag) String() string { + return fmt.Sprintf("%s=%q", f.Name, f.Value) +} diff --git a/pkg/command/meta_dependencies.go b/pkg/command/meta_dependencies.go new file mode 100644 index 00000000000..5a82e7e2c6a --- /dev/null +++ b/pkg/command/meta_dependencies.go @@ -0,0 +1,98 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "log" + "os" + + "github.com/kubegems/opentofu/pkg/depsfile" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// dependenclyLockFilename is the filename of the dependency lock file. +// +// This file should live in the same directory as the .tf files for the +// root module of the configuration, alongside the .terraform directory +// as long as that directory's path isn't overridden by the TF_DATA_DIR +// environment variable. +// +// We always expect to find this file in the current working directory +// because that should also be the root module directory. +// +// Some commands have legacy command line arguments that make the root module +// directory something other than the root module directory; when using those, +// the lock file will be written in the "wrong" place (the current working +// directory instead of the root module directory) but we do that intentionally +// to match where the ".terraform" directory would also be written in that +// case. Eventually we will phase out those legacy arguments in favor of the +// global -chdir=... option, which _does_ preserve the intended invariant +// that the root module directory is always the current working directory. +const dependencyLockFilename = ".terraform.lock.hcl" + +// lockedDependencies reads the dependency lock information from the lock file +// in the current working directory. +// +// If the lock file doesn't exist at the time of the call, lockedDependencies +// indicates success and returns an empty Locks object. If the file does +// exist then the result is either a representation of the contents of that +// file at the instant of the call or error diagnostics explaining some way +// in which the lock file is invalid. +// +// The result is a snapshot of the locked dependencies at the time of the call +// and does not update as a result of calling replaceLockedDependencies +// or any other modification method. +func (m *Meta) lockedDependencies() (*depsfile.Locks, tfdiags.Diagnostics) { + // We check that the file exists first, because the underlying HCL + // parser doesn't distinguish that error from other error types + // in a machine-readable way but we want to treat that as a success + // with no locks. There is in theory a race condition here in that + // the file could be created or removed in the meantime, but we're not + // promising to support two concurrent dependency installation processes. + _, err := os.Stat(dependencyLockFilename) + if os.IsNotExist(err) { + return m.annotateDependencyLocksWithOverrides(depsfile.NewLocks()), nil + } + + ret, diags := depsfile.LoadLocksFromFile(dependencyLockFilename) + return m.annotateDependencyLocksWithOverrides(ret), diags +} + +// replaceLockedDependencies creates or overwrites the lock file in the +// current working directory to contain the information recorded in the given +// locks object. +func (m *Meta) replaceLockedDependencies(new *depsfile.Locks) tfdiags.Diagnostics { + return depsfile.SaveLocksToFile(new, dependencyLockFilename) +} + +// annotateDependencyLocksWithOverrides modifies the given Locks object in-place +// to track as overridden any provider address that's subject to testing +// overrides, development overrides, or "unmanaged provider" status. +// +// This is just an implementation detail of the lockedDependencies method, +// not intended for use anywhere else. +func (m *Meta) annotateDependencyLocksWithOverrides(ret *depsfile.Locks) *depsfile.Locks { + if ret == nil { + return ret + } + + for addr := range m.ProviderDevOverrides { + log.Printf("[DEBUG] Provider %s is overridden by dev_overrides", addr) + ret.SetProviderOverridden(addr) + } + for addr := range m.UnmanagedProviders { + log.Printf("[DEBUG] Provider %s is overridden as an \"unmanaged provider\"", addr) + ret.SetProviderOverridden(addr) + } + if m.testingOverrides != nil { + for addr := range m.testingOverrides.Providers { + log.Printf("[DEBUG] Provider %s is overridden in Meta.testingOverrides", addr) + ret.SetProviderOverridden(addr) + } + } + + return ret +} diff --git a/pkg/command/meta_encryption.go b/pkg/command/meta_encryption.go new file mode 100644 index 00000000000..d257aa7778a --- /dev/null +++ b/pkg/command/meta_encryption.go @@ -0,0 +1,59 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "os" + + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/encryption/config" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +const encryptionConfigEnvName = "TF_ENCRYPTION" + +func (m *Meta) Encryption() (encryption.Encryption, tfdiags.Diagnostics) { + path, err := os.Getwd() + if err != nil { + return nil, tfdiags.Diagnostics{}.Append(fmt.Errorf("Error getting pwd: %w", err)) + } + + return m.EncryptionFromPath(path) +} + +func (m *Meta) EncryptionFromPath(path string) (encryption.Encryption, tfdiags.Diagnostics) { + // This is not ideal, but given how fragmented the command package is, loading the root module here is our best option + // See other meta commands like version check which do that same. + module, diags := m.loadSingleModule(path, configs.SelectiveLoadEncryption) + if diags.HasErrors() { + return nil, diags + } + enc, encDiags := m.EncryptionFromModule(module) + diags = diags.Append(encDiags) + return enc, diags +} + +func (m *Meta) EncryptionFromModule(module *configs.Module) (encryption.Encryption, tfdiags.Diagnostics) { + cfg := module.Encryption + var diags tfdiags.Diagnostics + + env := os.Getenv(encryptionConfigEnvName) + if len(env) != 0 { + envCfg, envDiags := config.LoadConfigFromString(encryptionConfigEnvName, env) + diags = diags.Append(envDiags) + if envDiags.HasErrors() { + return nil, diags + } + cfg = cfg.Merge(envCfg) + } + + enc, encDiags := encryption.New(encryption.DefaultRegistry, cfg, module.StaticEvaluator) + diags = diags.Append(encDiags) + + return enc, diags +} diff --git a/pkg/command/meta_new.go b/pkg/command/meta_new.go new file mode 100644 index 00000000000..d449fabd1eb --- /dev/null +++ b/pkg/command/meta_new.go @@ -0,0 +1,53 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "os" + "strconv" + + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/plans/planfile" +) + +// NOTE: Temporary file until this branch is cleaned up. + +// Input returns whether or not input asking is enabled. +func (m *Meta) Input() bool { + if test || !m.input { + return false + } + + if envVar := os.Getenv(InputModeEnvVar); envVar != "" { + if v, err := strconv.ParseBool(envVar); err == nil && !v { + return false + } + } + + return true +} + +// PlanFile loads the plan file at the given path, which might be either a local +// or cloud plan. +// +// If the return value and error are both nil, the given path exists but seems +// to be a configuration directory instead. +// +// Error will be non-nil if path refers to something which looks like a plan +// file and loading the file fails. +func (m *Meta) PlanFile(path string, enc encryption.PlanEncryption) (*planfile.WrappedPlanFile, error) { + fi, err := os.Stat(path) + if err != nil { + return nil, err + } + + if fi.IsDir() { + // Looks like a configuration directory. + return nil, nil + } + + return planfile.OpenWrapped(path, enc) +} diff --git a/pkg/command/meta_providers.go b/pkg/command/meta_providers.go new file mode 100644 index 00000000000..deed1734bb3 --- /dev/null +++ b/pkg/command/meta_providers.go @@ -0,0 +1,513 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "bytes" + "errors" + "fmt" + "log" + "os" + "os/exec" + "strings" + + plugin "github.com/hashicorp/go-plugin" + + "github.com/kubegems/opentofu/pkg/addrs" + terraformProvider "github.com/kubegems/opentofu/pkg/builtin/providers/tf" + "github.com/kubegems/opentofu/pkg/getproviders" + "github.com/kubegems/opentofu/pkg/logging" + tfplugin "github.com/kubegems/opentofu/pkg/plugin" + tfplugin6 "github.com/kubegems/opentofu/pkg/plugin6" + "github.com/kubegems/opentofu/pkg/providercache" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +var errUnsupportedProtocolVersion = errors.New("unsupported protocol version") + +// The TF_DISABLE_PLUGIN_TLS environment variable is intended only for use by +// the plugin SDK test framework, to reduce startup overhead when rapidly +// launching and killing lots of instances of the same provider. +// +// This is not intended to be set by end-users. +var enableProviderAutoMTLS = os.Getenv("TF_DISABLE_PLUGIN_TLS") == "" + +// providerInstaller returns an object that knows how to install providers and +// how to recover the selections from a prior installation process. +// +// The resulting provider installer is constructed from the results of +// the other methods providerLocalCacheDir, providerGlobalCacheDir, and +// providerInstallSource. +// +// Only one object returned from this method should be live at any time, +// because objects inside contain caches that must be maintained properly. +// Because this method wraps a result from providerLocalCacheDir, that +// limitation applies also to results from that method. +func (m *Meta) providerInstaller() *providercache.Installer { + return m.providerInstallerCustomSource(m.providerInstallSource()) +} + +// providerInstallerCustomSource is a variant of providerInstaller that +// allows the caller to specify a different installation source than the one +// that would naturally be selected. +// +// The result of this method has the same dependencies and constraints as +// providerInstaller. +// +// The result of providerInstallerCustomSource differs from +// providerInstaller only in how it determines package installation locations +// during EnsureProviderVersions. A caller that doesn't call +// EnsureProviderVersions (anything other than "tofu init") can safely +// just use the providerInstaller method unconditionally. +func (m *Meta) providerInstallerCustomSource(source getproviders.Source) *providercache.Installer { + targetDir := m.providerLocalCacheDir() + globalCacheDir := m.providerGlobalCacheDir() + inst := providercache.NewInstaller(targetDir, source) + if globalCacheDir != nil { + inst.SetGlobalCacheDir(globalCacheDir) + inst.SetGlobalCacheDirMayBreakDependencyLockFile(m.PluginCacheMayBreakDependencyLockFile) + } + var builtinProviderTypes []string + for ty := range m.internalProviders() { + builtinProviderTypes = append(builtinProviderTypes, ty) + } + inst.SetBuiltInProviderTypes(builtinProviderTypes) + unmanagedProviderTypes := make(map[addrs.Provider]struct{}, len(m.UnmanagedProviders)) + for ty := range m.UnmanagedProviders { + unmanagedProviderTypes[ty] = struct{}{} + } + inst.SetUnmanagedProviderTypes(unmanagedProviderTypes) + return inst +} + +// providerCustomLocalDirectorySource produces a provider source that consults +// only the given local filesystem directories for plugins to install. +// +// This is used to implement the -plugin-dir option for "tofu init", where +// the result of this method is used instead of what would've been returned +// from m.providerInstallSource. +// +// If the given list of directories is empty then the resulting source will +// have no providers available for installation at all. +func (m *Meta) providerCustomLocalDirectorySource(dirs []string) getproviders.Source { + var ret getproviders.MultiSource + for _, dir := range dirs { + ret = append(ret, getproviders.MultiSourceSelector{ + Source: getproviders.NewFilesystemMirrorSource(dir), + }) + } + return ret +} + +// providerLocalCacheDir returns an object representing the +// configuration-specific local cache directory. This is the +// only location consulted for provider plugin packages for OpenTofu +// operations other than provider installation. +// +// Only the provider installer (in "tofu init") is permitted to make +// modifications to this cache directory. All other commands must treat it +// as read-only. +// +// Only one object returned from this method should be live at any time, +// because objects inside contain caches that must be maintained properly. +func (m *Meta) providerLocalCacheDir() *providercache.Dir { + m.fixupMissingWorkingDir() + dir := m.WorkingDir.ProviderLocalCacheDir() + return providercache.NewDir(dir) +} + +// providerGlobalCacheDir returns an object representing the shared global +// provider cache directory, used as a read-through cache when installing +// new provider plugin packages. +// +// This function may return nil, in which case there is no global cache +// configured and new packages should be downloaded directly into individual +// configuration-specific cache directories. +// +// Only one object returned from this method should be live at any time, +// because objects inside contain caches that must be maintained properly. +func (m *Meta) providerGlobalCacheDir() *providercache.Dir { + dir := m.PluginCacheDir + if dir == "" { + return nil // cache disabled + } + return providercache.NewDir(dir) +} + +// providerInstallSource returns an object that knows how to consult one or +// more external sources to determine the availability of and package +// locations for versions of OpenTofu providers that are available for +// automatic installation. +// +// This returns the standard provider install source that consults a number +// of directories selected either automatically or via the CLI configuration. +// Users may choose to override this during a "tofu init" command by +// specifying one or more -plugin-dir options, in which case the installation +// process will construct its own source consulting only those directories +// and use that instead. +func (m *Meta) providerInstallSource() getproviders.Source { + // A provider source should always be provided in normal use, but our + // unit tests might not always populate Meta fully and so we'll be robust + // by returning a non-nil source that just always answers that no plugins + // are available. + if m.ProviderSource == nil { + // A multi-source with no underlying sources is effectively an + // always-empty source. + return getproviders.MultiSource(nil) + } + return m.ProviderSource +} + +// providerDevOverrideInitWarnings returns a diagnostics that contains at +// least one warning if and only if there is at least one provider development +// override in effect. If not, the result is always empty. The result never +// contains error diagnostics. +// +// The init command can use this to include a warning that the results +// may differ from what's expected due to the development overrides. For +// other commands, providerDevOverrideRuntimeWarnings should be used. +func (m *Meta) providerDevOverrideInitWarnings() tfdiags.Diagnostics { + if len(m.ProviderDevOverrides) == 0 { + return nil + } + var detailMsg strings.Builder + detailMsg.WriteString("The following provider development overrides are set in the CLI configuration:\n") + for addr, path := range m.ProviderDevOverrides { + detailMsg.WriteString(fmt.Sprintf(" - %s in %s\n", addr.ForDisplay(), path)) + } + detailMsg.WriteString("\nSkip tofu init when using provider development overrides. It is not necessary and may error unexpectedly.") + return tfdiags.Diagnostics{ + tfdiags.Sourceless( + tfdiags.Warning, + "Provider development overrides are in effect", + detailMsg.String(), + ), + } +} + +// providerDevOverrideRuntimeWarnings returns a diagnostics that contains at +// least one warning if and only if there is at least one provider development +// override in effect. If not, the result is always empty. The result never +// contains error diagnostics. +// +// Certain commands can use this to include a warning that their results +// may differ from what's expected due to the development overrides. It's +// not necessary to bother the user with this warning on every command, but +// it's helpful to return it on commands that have externally-visible side +// effects and on commands that are used to verify conformance to schemas. +// +// See providerDevOverrideInitWarnings for warnings specific to the init +// command. +func (m *Meta) providerDevOverrideRuntimeWarnings() tfdiags.Diagnostics { + if len(m.ProviderDevOverrides) == 0 { + return nil + } + var detailMsg strings.Builder + detailMsg.WriteString("The following provider development overrides are set in the CLI configuration:\n") + for addr, path := range m.ProviderDevOverrides { + detailMsg.WriteString(fmt.Sprintf(" - %s in %s\n", addr.ForDisplay(), path)) + } + detailMsg.WriteString("\nThe behavior may therefore not match any released version of the provider and applying changes may cause the state to become incompatible with published releases.") + return tfdiags.Diagnostics{ + tfdiags.Sourceless( + tfdiags.Warning, + "Provider development overrides are in effect", + detailMsg.String(), + ), + } +} + +// providerFactories uses the selections made previously by an installer in +// the local cache directory (m.providerLocalCacheDir) to produce a map +// from provider addresses to factory functions to create instances of +// those providers. +// +// providerFactories will return an error if the installer's selections cannot +// be honored with what is currently in the cache, such as if a selected +// package has been removed from the cache or if the contents of a selected +// package have been modified outside of the installer. If it returns an error, +// the returned map may be incomplete or invalid, but will be as complete +// as possible given the cause of the error. +func (m *Meta) providerFactories() (map[addrs.Provider]providers.Factory, error) { + locks, diags := m.lockedDependencies() + if diags.HasErrors() { + return nil, fmt.Errorf("failed to read dependency lock file: %w", diags.Err()) + } + + // We'll always run through all of our providers, even if one of them + // encounters an error, so that we can potentially report multiple errors + // where appropriate and so that callers can potentially make use of the + // partial result we return if e.g. they want to enumerate which providers + // are available, or call into one of the providers that didn't fail. + errs := make(map[addrs.Provider]error) + + // For the providers from the lock file, we expect them to be already + // available in the provider cache because "tofu init" should already + // have put them there. + providerLocks := locks.AllProviders() + cacheDir := m.providerLocalCacheDir() + + // The internal providers are _always_ available, even if the configuration + // doesn't request them, because they don't need any special installation + // and they'll just be ignored if not used. + internalFactories := m.internalProviders() + + // We have two different special cases aimed at provider development + // use-cases, which are not for "production" use: + // - The CLI config can specify that a particular provider should always + // use a plugin from a particular local directory, ignoring anything the + // lock file or cache directory might have to say about it. This is useful + // for manual testing of local development builds. + // - The Terraform SDK test harness (and possibly other callers in future) + // can ask that we use its own already-started provider servers, which we + // call "unmanaged" because OpenTofu isn't responsible for starting + // and stopping them. This is intended for automated testing where a + // calling harness is responsible both for starting the provider server + // and orchestrating one or more non-interactive OpenTofu runs that then + // exercise it. + // Unmanaged providers take precedence over overridden providers because + // overrides are typically a "session-level" setting while unmanaged + // providers are typically scoped to a single unattended command. + devOverrideProviders := m.ProviderDevOverrides + unmanagedProviders := m.UnmanagedProviders + + factories := make(map[addrs.Provider]providers.Factory, len(providerLocks)+len(internalFactories)+len(unmanagedProviders)) + for name, factory := range internalFactories { + factories[addrs.NewBuiltInProvider(name)] = factory + } + for provider, lock := range providerLocks { + reportError := func(thisErr error) { + errs[provider] = thisErr + // We'll populate a provider factory that just echoes our error + // again if called, which allows us to still report a helpful + // error even if it gets detected downstream somewhere from the + // caller using our partial result. + factories[provider] = providerFactoryError(thisErr) + } + + if locks.ProviderIsOverridden(provider) { + // Overridden providers we'll handle with the other separate + // loops below, for dev overrides etc. + continue + } + + version := lock.Version() + cached := cacheDir.ProviderVersion(provider, version) + if cached == nil { + reportError(fmt.Errorf( + "there is no package for %s %s cached in %s", + provider, version, cacheDir.BasePath(), + )) + continue + } + // The cached package must match one of the checksums recorded in + // the lock file, if any. + if allowedHashes := lock.PreferredHashes(); len(allowedHashes) != 0 { + matched, err := cached.MatchesAnyHash(allowedHashes) + if err != nil { + reportError(fmt.Errorf( + "failed to verify checksum of %s %s package cached in in %s: %w", + provider, version, cacheDir.BasePath(), err, + )) + continue + } + if !matched { + reportError(fmt.Errorf( + "the cached package for %s %s (in %s) does not match any of the checksums recorded in the dependency lock file", + provider, version, cacheDir.BasePath(), + )) + continue + } + } + factories[provider] = providerFactory(cached) + } + for provider, localDir := range devOverrideProviders { + factories[provider] = devOverrideProviderFactory(provider, localDir) + } + for provider, reattach := range unmanagedProviders { + factories[provider] = unmanagedProviderFactory(provider, reattach) + } + + var err error + if len(errs) > 0 { + err = providerPluginErrors(errs) + } + return factories, err +} + +func (m *Meta) internalProviders() map[string]providers.Factory { + return map[string]providers.Factory{ + "terraform": func() (providers.Interface, error) { + return terraformProvider.NewProvider(), nil + }, + } +} + +// providerFactory produces a provider factory that runs up the executable +// file in the given cache package and uses go-plugin to implement +// providers.Interface against it. +func providerFactory(meta *providercache.CachedProvider) providers.Factory { + return func() (providers.Interface, error) { + execFile, err := meta.ExecutableFile() + if err != nil { + return nil, err + } + + config := &plugin.ClientConfig{ + HandshakeConfig: tfplugin.Handshake, + Logger: logging.NewProviderLogger(""), + AllowedProtocols: []plugin.Protocol{plugin.ProtocolGRPC}, + Managed: true, + Cmd: exec.Command(execFile), + AutoMTLS: enableProviderAutoMTLS, + VersionedPlugins: tfplugin.VersionedPlugins, + SyncStdout: logging.PluginOutputMonitor(fmt.Sprintf("%s:stdout", meta.Provider)), + SyncStderr: logging.PluginOutputMonitor(fmt.Sprintf("%s:stderr", meta.Provider)), + } + + client := plugin.NewClient(config) + rpcClient, err := client.Client() + if err != nil { + return nil, err + } + + raw, err := rpcClient.Dispense(tfplugin.ProviderPluginName) + if err != nil { + return nil, err + } + + protoVer := client.NegotiatedVersion() + p, err := initializeProviderInstance(raw, protoVer, client, meta.Provider) + if errors.Is(err, errUnsupportedProtocolVersion) { + panic(err) + } + + return p, err + } +} + +// initializeProviderInstance uses the plugin dispensed by the RPC client, and initializes a plugin instance +// per the protocol version +func initializeProviderInstance(plugin interface{}, protoVer int, pluginClient *plugin.Client, pluginAddr addrs.Provider) (providers.Interface, error) { + // store the client so that the plugin can kill the child process + switch protoVer { + case 5: + p := plugin.(*tfplugin.GRPCProvider) + p.PluginClient = pluginClient + p.Addr = pluginAddr + return p, nil + case 6: + p := plugin.(*tfplugin6.GRPCProvider) + p.PluginClient = pluginClient + p.Addr = pluginAddr + return p, nil + default: + return nil, errUnsupportedProtocolVersion + } +} + +func devOverrideProviderFactory(provider addrs.Provider, localDir getproviders.PackageLocalDir) providers.Factory { + // A dev override is essentially a synthetic cache entry for our purposes + // here, so that's how we'll construct it. The providerFactory function + // doesn't actually care about the version, so we can leave it + // unspecified: overridden providers are not explicitly versioned. + log.Printf("[DEBUG] Provider %s is overridden to load from %s", provider, localDir) + return providerFactory(&providercache.CachedProvider{ + Provider: provider, + Version: getproviders.UnspecifiedVersion, + PackageDir: string(localDir), + }) +} + +// unmanagedProviderFactory produces a provider factory that uses the passed +// reattach information to connect to go-plugin processes that are already +// running, and implements providers.Interface against it. +func unmanagedProviderFactory(provider addrs.Provider, reattach *plugin.ReattachConfig) providers.Factory { + return func() (providers.Interface, error) { + config := &plugin.ClientConfig{ + HandshakeConfig: tfplugin.Handshake, + Logger: logging.NewProviderLogger("unmanaged."), + AllowedProtocols: []plugin.Protocol{plugin.ProtocolGRPC}, + Managed: false, + Reattach: reattach, + SyncStdout: logging.PluginOutputMonitor(fmt.Sprintf("%s:stdout", provider)), + SyncStderr: logging.PluginOutputMonitor(fmt.Sprintf("%s:stderr", provider)), + } + + if reattach.ProtocolVersion == 0 { + // As of the 0.15 release, sdk.v2 doesn't include the protocol + // version in the ReattachConfig (only recently added to + // go-plugin), so client.NegotiatedVersion() always returns 0. We + // assume that an unmanaged provider reporting protocol version 0 is + // actually using proto v5 for backwards compatibility. + if defaultPlugins, ok := tfplugin.VersionedPlugins[5]; ok { + config.Plugins = defaultPlugins + } else { + return nil, errors.New("no supported plugins for protocol 0") + } + } else if plugins, ok := tfplugin.VersionedPlugins[reattach.ProtocolVersion]; !ok { + return nil, fmt.Errorf("no supported plugins for protocol %d", reattach.ProtocolVersion) + } else { + config.Plugins = plugins + } + + client := plugin.NewClient(config) + rpcClient, err := client.Client() + if err != nil { + return nil, err + } + + raw, err := rpcClient.Dispense(tfplugin.ProviderPluginName) + if err != nil { + return nil, err + } + + protoVer := client.NegotiatedVersion() + if protoVer == 0 { + // As of the 0.15 release, sdk.v2 doesn't include the protocol + // version in the ReattachConfig (only recently added to + // go-plugin), so client.NegotiatedVersion() always returns 0. We + // assume that an unmanaged provider reporting protocol version 0 is + // actually using proto v5 for backwards compatibility. + protoVer = 5 + } + + return initializeProviderInstance(raw, protoVer, client, provider) + } +} + +// providerFactoryError is a stub providers.Factory that returns an error +// when called. It's used to allow providerFactories to still produce a +// factory for each available provider in an error case, for situations +// where the caller can do something useful with that partial result. +func providerFactoryError(err error) providers.Factory { + return func() (providers.Interface, error) { + return nil, err + } +} + +// providerPluginErrors is an error implementation we can return from +// Meta.providerFactories to capture potentially multiple errors about the +// locally-cached plugins (or lack thereof) for particular external providers. +// +// Some functions closer to the UI layer can sniff for this error type in order +// to return a more helpful error message. +type providerPluginErrors map[addrs.Provider]error + +func (errs providerPluginErrors) Error() string { + if len(errs) == 1 { + for addr, err := range errs { + return fmt.Sprintf("%s: %s", addr, err) + } + } + var buf bytes.Buffer + fmt.Fprintf(&buf, "missing or corrupted provider plugins:") + for addr, err := range errs { + fmt.Fprintf(&buf, "\n - %s: %s", addr, err) + } + return buf.String() +} diff --git a/pkg/command/meta_test.go b/pkg/command/meta_test.go new file mode 100644 index 00000000000..1f3cd4389c8 --- /dev/null +++ b/pkg/command/meta_test.go @@ -0,0 +1,413 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "os" + "path/filepath" + "reflect" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/backend/local" + "github.com/kubegems/opentofu/pkg/tofu" + "github.com/mitchellh/cli" +) + +func TestMetaColorize(t *testing.T) { + var m *Meta + var args, args2 []string + + // Test basic, color + m = new(Meta) + m.Color = true + args = []string{"foo", "bar"} + args2 = []string{"foo", "bar"} + args = m.process(args) + if !reflect.DeepEqual(args, args2) { + t.Fatalf("bad: %#v", args) + } + if m.Colorize().Disable { + t.Fatal("should not be disabled") + } + + // Test basic, no change + m = new(Meta) + args = []string{"foo", "bar"} + args2 = []string{"foo", "bar"} + args = m.process(args) + if !reflect.DeepEqual(args, args2) { + t.Fatalf("bad: %#v", args) + } + if !m.Colorize().Disable { + t.Fatal("should be disabled") + } + + // Test disable #1 + m = new(Meta) + m.Color = true + args = []string{"foo", "-no-color", "bar"} + args2 = []string{"foo", "bar"} + args = m.process(args) + if !reflect.DeepEqual(args, args2) { + t.Fatalf("bad: %#v", args) + } + if !m.Colorize().Disable { + t.Fatal("should be disabled") + } + + // Test disable #2 + // Verify multiple -no-color options are removed from args slice. + // E.g. an additional -no-color arg could be added by TF_CLI_ARGS. + m = new(Meta) + m.Color = true + args = []string{"foo", "-no-color", "bar", "-no-color"} + args2 = []string{"foo", "bar"} + args = m.process(args) + if !reflect.DeepEqual(args, args2) { + t.Fatalf("bad: %#v", args) + } + if !m.Colorize().Disable { + t.Fatal("should be disabled") + } +} + +func TestMetaInputMode(t *testing.T) { + test = false + defer func() { test = true }() + + m := new(Meta) + args := []string{} + + fs := m.extendedFlagSet("foo") + if err := fs.Parse(args); err != nil { + t.Fatalf("err: %s", err) + } + + if m.InputMode() != tofu.InputModeStd { + t.Fatalf("bad: %#v", m.InputMode()) + } +} + +func TestMetaInputMode_envVar(t *testing.T) { + test = false + defer func() { test = true }() + + m := new(Meta) + args := []string{} + + fs := m.extendedFlagSet("foo") + if err := fs.Parse(args); err != nil { + t.Fatalf("err: %s", err) + } + + off := tofu.InputMode(0) + on := tofu.InputModeStd + cases := []struct { + EnvVar string + Expected tofu.InputMode + }{ + {"false", off}, + {"0", off}, + {"true", on}, + {"1", on}, + } + + for _, tc := range cases { + t.Setenv(InputModeEnvVar, tc.EnvVar) + if m.InputMode() != tc.Expected { + t.Fatalf("expected InputMode: %#v, got: %#v", tc.Expected, m.InputMode()) + } + } +} + +func TestMetaInputMode_disable(t *testing.T) { + test = false + defer func() { test = true }() + + m := new(Meta) + args := []string{"-input=false"} + + fs := m.extendedFlagSet("foo") + if err := fs.Parse(args); err != nil { + t.Fatalf("err: %s", err) + } + + if m.InputMode() > 0 { + t.Fatalf("bad: %#v", m.InputMode()) + } +} + +func TestMeta_initStatePaths(t *testing.T) { + m := new(Meta) + m.initStatePaths() + + if m.statePath != DefaultStateFilename { + t.Fatalf("bad: %#v", m) + } + if m.stateOutPath != DefaultStateFilename { + t.Fatalf("bad: %#v", m) + } + if m.backupPath != DefaultStateFilename+DefaultBackupExtension { + t.Fatalf("bad: %#v", m) + } + + m = new(Meta) + m.statePath = "foo" + m.initStatePaths() + + if m.stateOutPath != "foo" { + t.Fatalf("bad: %#v", m) + } + if m.backupPath != "foo"+DefaultBackupExtension { + t.Fatalf("bad: %#v", m) + } + + m = new(Meta) + m.stateOutPath = "foo" + m.initStatePaths() + + if m.statePath != DefaultStateFilename { + t.Fatalf("bad: %#v", m) + } + if m.backupPath != "foo"+DefaultBackupExtension { + t.Fatalf("bad: %#v", m) + } +} + +func TestMeta_Env(t *testing.T) { + td := t.TempDir() + os.MkdirAll(td, 0755) + defer testChdir(t, td)() + + m := new(Meta) + + env, err := m.Workspace() + if err != nil { + t.Fatal(err) + } + + if env != backend.DefaultStateName { + t.Fatalf("expected env %q, got env %q", backend.DefaultStateName, env) + } + + testEnv := "test_env" + if err := m.SetWorkspace(testEnv); err != nil { + t.Fatal("error setting env:", err) + } + + env, _ = m.Workspace() + if env != testEnv { + t.Fatalf("expected env %q, got env %q", testEnv, env) + } + + if err := m.SetWorkspace(backend.DefaultStateName); err != nil { + t.Fatal("error setting env:", err) + } + + env, _ = m.Workspace() + if env != backend.DefaultStateName { + t.Fatalf("expected env %q, got env %q", backend.DefaultStateName, env) + } +} + +func TestMeta_Workspace_override(t *testing.T) { + m := new(Meta) + + testCases := map[string]struct { + workspace string + err error + }{ + "": { + "default", + nil, + }, + "development": { + "development", + nil, + }, + "invalid name": { + "", + errInvalidWorkspaceNameEnvVar, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + t.Setenv(WorkspaceNameEnvVar, name) + workspace, err := m.Workspace() + if workspace != tc.workspace { + t.Errorf("Unexpected workspace\n got: %s\nwant: %s\n", workspace, tc.workspace) + } + if err != tc.err { + t.Errorf("Unexpected error\n got: %s\nwant: %s\n", err, tc.err) + } + }) + } +} + +func TestMeta_Workspace_invalidSelected(t *testing.T) { + td := t.TempDir() + os.MkdirAll(td, 0755) + defer testChdir(t, td)() + + // this is an invalid workspace name + workspace := "test workspace" + + // create the workspace directories + if err := os.MkdirAll(filepath.Join(local.DefaultWorkspaceDir, workspace), 0755); err != nil { + t.Fatal(err) + } + + // create the workspace file to select it + if err := os.MkdirAll(DefaultDataDir, 0755); err != nil { + t.Fatal(err) + } + if err := os.WriteFile(filepath.Join(DefaultDataDir, local.DefaultWorkspaceFile), []byte(workspace), 0644); err != nil { + t.Fatal(err) + } + + m := new(Meta) + + ws, err := m.Workspace() + if ws != workspace { + t.Errorf("Unexpected workspace\n got: %s\nwant: %s\n", ws, workspace) + } + if err != nil { + t.Errorf("Unexpected error: %s", err) + } +} + +func TestMeta_process(t *testing.T) { + test = false + defer func() { test = true }() + + // Create a temporary directory for our cwd + d := t.TempDir() + os.MkdirAll(d, 0755) + defer testChdir(t, d)() + + // At one point it was the responsibility of this process function to + // insert fake additional -var-file options into the command line + // if the automatic tfvars files were present. This is no longer the + // responsibility of process (it happens in collectVariableValues instead) + // but we're still testing with these files in place to verify that + // they _aren't_ being interpreted by process, since that could otherwise + // cause them to be added more than once and mess up the precedence order. + defaultVarsfile := "terraform.tfvars" + err := os.WriteFile( + filepath.Join(d, defaultVarsfile), + []byte(""), + 0644) + if err != nil { + t.Fatalf("err: %s", err) + } + fileFirstAlphabetical := "a-file.auto.tfvars" + err = os.WriteFile( + filepath.Join(d, fileFirstAlphabetical), + []byte(""), + 0644) + if err != nil { + t.Fatalf("err: %s", err) + } + fileLastAlphabetical := "z-file.auto.tfvars" + err = os.WriteFile( + filepath.Join(d, fileLastAlphabetical), + []byte(""), + 0644) + if err != nil { + t.Fatalf("err: %s", err) + } + // Regular tfvars files will not be autoloaded + fileIgnored := "ignored.tfvars" + err = os.WriteFile( + filepath.Join(d, fileIgnored), + []byte(""), + 0644) + if err != nil { + t.Fatalf("err: %s", err) + } + + tests := []struct { + GivenArgs []string + FilteredArgs []string + ExtraCheck func(*testing.T, *Meta) + }{ + { + []string{}, + []string{}, + func(t *testing.T, m *Meta) { + if got, want := m.color, true; got != want { + t.Errorf("wrong m.color value %#v; want %#v", got, want) + } + if got, want := m.Color, true; got != want { + t.Errorf("wrong m.Color value %#v; want %#v", got, want) + } + }, + }, + { + []string{"-no-color"}, + []string{}, + func(t *testing.T, m *Meta) { + if got, want := m.color, false; got != want { + t.Errorf("wrong m.color value %#v; want %#v", got, want) + } + if got, want := m.Color, false; got != want { + t.Errorf("wrong m.Color value %#v; want %#v", got, want) + } + }, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%s", test.GivenArgs), func(t *testing.T) { + m := new(Meta) + m.Color = true // this is the default also for normal use, overridden by -no-color + args := test.GivenArgs + args = m.process(args) + + if !cmp.Equal(test.FilteredArgs, args) { + t.Errorf("wrong filtered arguments\n%s", cmp.Diff(test.FilteredArgs, args)) + } + + if test.ExtraCheck != nil { + test.ExtraCheck(t, m) + } + }) + } +} + +func TestCommand_checkRequiredVersion(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("command-check-required-version"), td) + defer testChdir(t, td)() + + ui := cli.NewMockUi() + meta := Meta{ + Ui: ui, + } + + diags := meta.checkRequiredVersion() + if diags == nil { + t.Fatalf("diagnostics should contain unmet version constraint, but is nil") + } + + meta.showDiagnostics(diags) + + // Required version diags are correct + errStr := ui.ErrorWriter.String() + if !strings.Contains(errStr, `required_version = "~> 0.9.0"`) { + t.Fatalf("output should point to unmet version constraint, but is:\n\n%s", errStr) + } + if strings.Contains(errStr, `required_version = ">= 0.13.0"`) { + t.Fatalf("output should not point to met version constraint, but is:\n\n%s", errStr) + } +} diff --git a/pkg/command/meta_ui.go b/pkg/command/meta_ui.go new file mode 100644 index 00000000000..b3e4d70be7d --- /dev/null +++ b/pkg/command/meta_ui.go @@ -0,0 +1,65 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "github.com/mitchellh/cli" + + "github.com/kubegems/opentofu/pkg/command/views" +) + +// WrappedUi is a shim which adds json compatibility to those commands which +// have not yet been refactored to support output by views.View. +// +// For those not support json output command, all output is printed by cli.Ui. +// So we create WrappedUi, contains the old cli.Ui and views.JSONView, +// implement cli.Ui interface, so that we can make all command support json +// output in a short time. +type WrappedUi struct { + cliUi cli.Ui + jsonView *views.JSONView + outputInJSON bool +} + +func (m *WrappedUi) Ask(s string) (string, error) { + return m.cliUi.Ask(s) +} + +func (m *WrappedUi) AskSecret(s string) (string, error) { + return m.cliUi.AskSecret(s) +} + +func (m *WrappedUi) Output(s string) { + if m.outputInJSON { + m.jsonView.Output(s) + return + } + m.cliUi.Output(s) +} + +func (m *WrappedUi) Info(s string) { + if m.outputInJSON { + m.jsonView.Info(s) + return + } + m.cliUi.Info(s) +} + +func (m *WrappedUi) Error(s string) { + if m.outputInJSON { + m.jsonView.Error(s) + return + } + m.cliUi.Error(s) +} + +func (m *WrappedUi) Warn(s string) { + if m.outputInJSON { + m.jsonView.Warn(s) + return + } + m.cliUi.Warn(s) +} diff --git a/pkg/command/meta_vars.go b/pkg/command/meta_vars.go new file mode 100644 index 00000000000..3c6de1ad1ab --- /dev/null +++ b/pkg/command/meta_vars.go @@ -0,0 +1,294 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + hcljson "github.com/hashicorp/hcl/v2/json" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" +) + +// VarEnvPrefix is the prefix for environment variables that represent values +// for root module input variables. +const VarEnvPrefix = "TF_VAR_" + +// collectVariableValues inspects the various places that root module input variable +// values can come from and constructs a map ready to be passed to the +// backend as part of a backend.Operation. +// +// This method returns diagnostics relating to the collection of the values, +// but the values themselves may produce additional diagnostics when finally +// parsed. +func (m *Meta) collectVariableValues() (map[string]backend.UnparsedVariableValue, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + if m.inputVariableCache != nil { + return m.inputVariableCache, nil + } + + ret := map[string]backend.UnparsedVariableValue{} + + // First we'll deal with environment variables, since they have the lowest + // precedence. + { + env := os.Environ() + for _, raw := range env { + if !strings.HasPrefix(raw, VarEnvPrefix) { + continue + } + raw = raw[len(VarEnvPrefix):] // trim the prefix + + eq := strings.Index(raw, "=") + if eq == -1 { + // Seems invalid, so we'll ignore it. + continue + } + + name := raw[:eq] + rawVal := raw[eq+1:] + + ret[name] = unparsedVariableValueString{ + str: rawVal, + name: name, + sourceType: tofu.ValueFromEnvVar, + } + } + } + + // Next up we load implicit files from the specified directory (first root then tests dir + // as tests dir files have higher precendence). These files are automatically loaded if present. + // There's the original terraform.tfvars (DefaultVarsFilename) along with the later-added + // search for all files ending in .auto.tfvars. + diags = diags.Append(m.addVarsFromDir(".", ret)) + diags = diags.Append(m.addVarsFromDir("tests", ret)) + + // Finally we process values given explicitly on the command line, either + // as individual literal settings or as additional files to read. + for _, rawFlag := range m.variableArgs.AllItems() { + switch rawFlag.Name { + case "-var": + // Value should be in the form "name=value", where value is a + // raw string whose interpretation will depend on the variable's + // parsing mode. + raw := rawFlag.Value + eq := strings.Index(raw, "=") + if eq == -1 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid -var option", + fmt.Sprintf("The given -var option %q is not correctly specified. Must be a variable name and value separated by an equals sign, like -var=\"key=value\".", raw), + )) + continue + } + name := raw[:eq] + rawVal := raw[eq+1:] + if strings.HasSuffix(name, " ") { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid -var option", + fmt.Sprintf("Variable name %q is invalid due to trailing space. Did you mean -var=\"%s=%s\"?", name, strings.TrimSuffix(name, " "), strings.TrimPrefix(rawVal, " ")), + )) + continue + } + ret[name] = unparsedVariableValueString{ + str: rawVal, + name: name, + sourceType: tofu.ValueFromCLIArg, + } + + case "-var-file": + moreDiags := m.addVarsFromFile(rawFlag.Value, tofu.ValueFromNamedFile, ret) + diags = diags.Append(moreDiags) + + default: + // Should never happen; always a bug in the code that built up + // the contents of m.variableArgs. + diags = diags.Append(fmt.Errorf("unsupported variable option name %q (this is a bug in OpenTofu)", rawFlag.Name)) + } + } + m.inputVariableCache = ret + + return ret, diags +} + +func (m *Meta) addVarsFromDir(currDir string, ret map[string]backend.UnparsedVariableValue) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + if _, err := os.Stat(filepath.Join(currDir, DefaultVarsFilename)); err == nil { + moreDiags := m.addVarsFromFile(filepath.Join(currDir, DefaultVarsFilename), tofu.ValueFromAutoFile, ret) + diags = diags.Append(moreDiags) + } + const defaultVarsFilenameJSON = DefaultVarsFilename + ".json" + if _, err := os.Stat(filepath.Join(currDir, defaultVarsFilenameJSON)); err == nil { + moreDiags := m.addVarsFromFile(filepath.Join(currDir, defaultVarsFilenameJSON), tofu.ValueFromAutoFile, ret) + diags = diags.Append(moreDiags) + } + if infos, err := os.ReadDir(currDir); err == nil { + // "infos" is already sorted by name, so we just need to filter it here. + for _, info := range infos { + name := info.Name() + if !isAutoVarFile(name) { + continue + } + moreDiags := m.addVarsFromFile(filepath.Join(currDir, name), tofu.ValueFromAutoFile, ret) + diags = diags.Append(moreDiags) + } + } + + return diags +} + +func (m *Meta) addVarsFromFile(filename string, sourceType tofu.ValueSourceType, to map[string]backend.UnparsedVariableValue) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + src, err := os.ReadFile(filename) + if err != nil { + if os.IsNotExist(err) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to read variables file", + fmt.Sprintf("Given variables file %s does not exist.", filename), + )) + } else { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to read variables file", + fmt.Sprintf("Error while reading %s: %s.", filename, err), + )) + } + return diags + } + + loader, err := m.initConfigLoader() + if err != nil { + diags = diags.Append(err) + return diags + } + + // Record the file source code for snippets in diagnostic messages. + loader.Parser().ForceFileSource(filename, src) + + var f *hcl.File + + extJSON := strings.HasSuffix(filename, ".json") + extTfvars := strings.HasSuffix(filename, DefaultVarsExtension) + + // Only try json detection if ambiguous + // Ex: -var-file=<(./scripts/vars.sh) + detectJSON := !extJSON && !extTfvars && strings.HasPrefix(strings.TrimSpace(string(src)), "{") + + if extJSON || detectJSON { + var hclDiags hcl.Diagnostics + f, hclDiags = hcljson.Parse(src, filename) + diags = diags.Append(hclDiags) + if f == nil || f.Body == nil { + return diags + } + } else { + var hclDiags hcl.Diagnostics + f, hclDiags = hclsyntax.ParseConfig(src, filename, hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(hclDiags) + if f == nil || f.Body == nil { + return diags + } + } + + // Before we do our real decode, we'll probe to see if there are any blocks + // of type "variable" in this body, since it's a common mistake for new + // users to put variable declarations in tfvars rather than variable value + // definitions, and otherwise our error message for that case is not so + // helpful. + { + content, _, _ := f.Body.PartialContent(&hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "variable", + LabelNames: []string{"name"}, + }, + }, + }) + for _, block := range content.Blocks { + name := block.Labels[0] + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Variable declaration in .tfvars file", + Detail: fmt.Sprintf("A .tfvars file is used to assign values to variables that have already been declared in .tf files, not to declare new variables. To declare variable %q, place this block in one of your .tf files, such as variables.tf.\n\nTo set a value for this variable in %s, use the definition syntax instead:\n %s = ", name, block.TypeRange.Filename, name), + Subject: &block.TypeRange, + }) + } + if diags.HasErrors() { + // If we already found problems then JustAttributes below will find + // the same problems with less-helpful messages, so we'll bail for + // now to let the user focus on the immediate problem. + return diags + } + } + + attrs, hclDiags := f.Body.JustAttributes() + diags = diags.Append(hclDiags) + + for name, attr := range attrs { + to[name] = unparsedVariableValueExpression{ + expr: attr.Expr, + sourceType: sourceType, + } + } + return diags +} + +// unparsedVariableValueExpression is a backend.UnparsedVariableValue +// implementation that was actually already parsed (!). This is +// intended to deal with expressions inside "tfvars" files. +type unparsedVariableValueExpression struct { + expr hcl.Expression + sourceType tofu.ValueSourceType +} + +func (v unparsedVariableValueExpression) ParseVariableValue(mode configs.VariableParsingMode) (*tofu.InputValue, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + val, hclDiags := v.expr.Value(nil) // nil because no function calls or variable references are allowed here + diags = diags.Append(hclDiags) + + rng := tfdiags.SourceRangeFromHCL(v.expr.Range()) + + return &tofu.InputValue{ + Value: val, + SourceType: v.sourceType, + SourceRange: rng, + }, diags +} + +// unparsedVariableValueString is a backend.UnparsedVariableValue +// implementation that parses its value from a string. This can be used +// to deal with values given directly on the command line and via environment +// variables. +type unparsedVariableValueString struct { + str string + name string + sourceType tofu.ValueSourceType +} + +func (v unparsedVariableValueString) ParseVariableValue(mode configs.VariableParsingMode) (*tofu.InputValue, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + val, hclDiags := mode.Parse(v.name, v.str) + diags = diags.Append(hclDiags) + + return &tofu.InputValue{ + Value: val, + SourceType: v.sourceType, + }, diags +} diff --git a/pkg/command/meta_vars_test.go b/pkg/command/meta_vars_test.go new file mode 100644 index 00000000000..2fa71fd0bc2 --- /dev/null +++ b/pkg/command/meta_vars_test.go @@ -0,0 +1,78 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "os" + "path/filepath" + "testing" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/tofu" +) + +func TestMeta_addVarsFromFile(t *testing.T) { + d := t.TempDir() + defer testChdir(t, d)() + + hclData := `foo = "bar"` + jsonData := `{"foo": "bar"}` + + cases := []struct { + filename string + contents string + errors bool + }{ + { + filename: "input.tfvars", + contents: hclData, + errors: false, + }, + { + filename: "input.json", + contents: jsonData, + errors: false, + }, + { + filename: "input_a.unknown", + contents: hclData, + errors: false, + }, + { + filename: "input_b.unknown", + contents: jsonData, + errors: false, + }, + { + filename: "mismatch.tfvars", + contents: jsonData, + errors: true, + }, + { + filename: "mismatch.json", + contents: hclData, + errors: true, + }, + } + + for _, tc := range cases { + t.Run(tc.filename, func(t *testing.T) { + target := filepath.Join(d, tc.filename) + err := os.WriteFile(target, []byte(tc.contents), 0600) + if err != nil { + t.Fatalf("err: %s", err) + } + + m := new(Meta) + to := make(map[string]backend.UnparsedVariableValue) + diags := m.addVarsFromFile(target, tofu.ValueFromAutoFile, to) + if tc.errors != diags.HasErrors() { + t.Log(diags.Err()) + t.Errorf("Expected: %v, got %v", tc.errors, diags.HasErrors()) + } + }) + } +} diff --git a/pkg/command/metadata_command.go b/pkg/command/metadata_command.go new file mode 100644 index 00000000000..9a35e7dd942 --- /dev/null +++ b/pkg/command/metadata_command.go @@ -0,0 +1,36 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + + "github.com/mitchellh/cli" +) + +// MetadataCommand is a Command implementation that just shows help for +// the subcommands nested below it. +type MetadataCommand struct { + Meta +} + +func (c *MetadataCommand) Run(args []string) int { + return cli.RunResultHelp +} + +func (c *MetadataCommand) Help() string { + helpText := ` +Usage: tofu [global options] metadata [options] [args] + + This command has subcommands for metadata related purposes. + +` + return strings.TrimSpace(helpText) +} + +func (c *MetadataCommand) Synopsis() string { + return "Metadata related commands" +} diff --git a/pkg/command/metadata_functions.go b/pkg/command/metadata_functions.go new file mode 100644 index 00000000000..9284c80ecae --- /dev/null +++ b/pkg/command/metadata_functions.go @@ -0,0 +1,87 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty/function" + + "github.com/kubegems/opentofu/pkg/command/jsonfunction" + "github.com/kubegems/opentofu/pkg/lang" +) + +var ( + ignoredFunctions = []string{"map", "list"} +) + +// MetadataFunctionsCommand is a Command implementation that prints out information +// about the available functions in OpenTofu. +type MetadataFunctionsCommand struct { + Meta +} + +func (c *MetadataFunctionsCommand) Help() string { + return metadataFunctionsCommandHelp +} + +func (c *MetadataFunctionsCommand) Synopsis() string { + return "Show signatures and descriptions for the available functions" +} + +func (c *MetadataFunctionsCommand) Run(args []string) int { + args = c.Meta.process(args) + cmdFlags := c.Meta.defaultFlagSet("metadata functions") + var jsonOutput bool + cmdFlags.BoolVar(&jsonOutput, "json", false, "produce JSON output") + + cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } + if err := cmdFlags.Parse(args); err != nil { + c.Ui.Error(fmt.Sprintf("Error parsing command-line flags: %s\n", err.Error())) + return 1 + } + + if !jsonOutput { + c.Ui.Error( + "The `tofu metadata functions` command requires the `-json` flag.\n") + cmdFlags.Usage() + return 1 + } + + scope := &lang.Scope{} + funcs := scope.Functions() + filteredFuncs := make(map[string]function.Function) + for k, v := range funcs { + if isIgnoredFunction(k) { + continue + } + filteredFuncs[k] = v + } + + jsonFunctions, marshalDiags := jsonfunction.Marshal(filteredFuncs) + if marshalDiags.HasErrors() { + c.showDiagnostics(marshalDiags) + return 1 + } + c.Ui.Output(string(jsonFunctions)) + + return 0 +} + +const metadataFunctionsCommandHelp = ` +Usage: tofu [global options] metadata functions -json + + Prints out a json representation of the available function signatures. +` + +func isIgnoredFunction(name string) bool { + for _, i := range ignoredFunctions { + if i == name || lang.CoreNamespace+i == name { + return true + } + } + return false +} diff --git a/pkg/command/metadata_functions_test.go b/pkg/command/metadata_functions_test.go new file mode 100644 index 00000000000..1437bd563d1 --- /dev/null +++ b/pkg/command/metadata_functions_test.go @@ -0,0 +1,76 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "encoding/json" + "testing" + + "github.com/mitchellh/cli" +) + +func TestMetadataFunctions_error(t *testing.T) { + ui := new(cli.MockUi) + c := &MetadataFunctionsCommand{ + Meta: Meta{ + Ui: ui, + }, + } + + // This test will always error because it's missing the -json flag + if code := c.Run(nil); code != 1 { + t.Fatalf("expected error, got:\n%s", ui.OutputWriter.String()) + } +} + +func TestMetadataFunctions_output(t *testing.T) { + ui := new(cli.MockUi) + m := Meta{Ui: ui} + c := &MetadataFunctionsCommand{Meta: m} + + if code := c.Run([]string{"-json"}); code != 0 { + t.Fatalf("wrong exit status %d; want 0\nstderr: %s", code, ui.ErrorWriter.String()) + } + + var got functions + gotString := ui.OutputWriter.String() + err := json.Unmarshal([]byte(gotString), &got) + if err != nil { + t.Fatal(err) + } + + if len(got.Signatures) < 100 { + t.Fatalf("expected at least 100 function signatures, got %d", len(got.Signatures)) + } + + // check if one particular stable function is correct + gotMax, ok := got.Signatures["max"] + wantMax := "{\"description\":\"`max` takes one or more numbers and returns the greatest number from the set.\",\"return_type\":\"number\",\"variadic_parameter\":{\"name\":\"numbers\",\"type\":\"number\"}}" + if !ok { + t.Fatal(`missing function signature for "max"`) + } + if string(gotMax) != wantMax { + t.Fatalf("wrong function signature for \"max\":\ngot: %q\nwant: %q", gotMax, wantMax) + } + + stderr := ui.ErrorWriter.String() + if stderr != "" { + t.Fatalf("expected empty stderr, got:\n%s", stderr) + } + + // test that ignored functions are not part of the json + for _, v := range ignoredFunctions { + _, ok := got.Signatures[v] + if ok { + t.Fatalf("found ignored function %q inside output", v) + } + } +} + +type functions struct { + FormatVersion string `json:"format_version"` + Signatures map[string]json.RawMessage `json:"function_signatures,omitempty"` +} diff --git a/pkg/command/output.go b/pkg/command/output.go new file mode 100644 index 00000000000..71a4ac05bf8 --- /dev/null +++ b/pkg/command/output.go @@ -0,0 +1,170 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/command/views" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// OutputCommand is a Command implementation that reads an output +// from a OpenTofu state and prints it. +type OutputCommand struct { + Meta +} + +func (c *OutputCommand) Run(rawArgs []string) int { + // Parse and apply global view arguments + common, rawArgs := arguments.ParseView(rawArgs) + c.View.Configure(common) + + // Parse and validate flags + args, diags := arguments.ParseOutput(rawArgs) + if diags.HasErrors() { + c.View.Diagnostics(diags) + c.View.HelpPrompt("output") + return 1 + } + + c.View.SetShowSensitive(args.ShowSensitive) + + view := views.NewOutput(args.ViewType, c.View) + + // Inject variables from args into meta for static evaluation + c.GatherVariables(args.Vars) + + // Load the encryption configuration + enc, encDiags := c.Encryption() + diags = diags.Append(encDiags) + if encDiags.HasErrors() { + c.View.Diagnostics(diags) + return 1 + } + + // Fetch data from state + outputs, diags := c.Outputs(args.StatePath, enc) + if diags.HasErrors() { + view.Diagnostics(diags) + return 1 + } + + // Render the view + viewDiags := view.Output(args.Name, outputs) + diags = diags.Append(viewDiags) + + view.Diagnostics(diags) + + if diags.HasErrors() { + return 1 + } + + return 0 +} + +func (c *OutputCommand) Outputs(statePath string, enc encryption.Encryption) (map[string]*states.OutputValue, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // Allow state path override + if statePath != "" { + c.Meta.statePath = statePath + } + + // Load the backend + b, backendDiags := c.Backend(nil, enc.State()) + diags = diags.Append(backendDiags) + if diags.HasErrors() { + return nil, diags + } + + // This is a read-only command + c.ignoreRemoteVersionConflict(b) + + env, err := c.Workspace() + if err != nil { + diags = diags.Append(fmt.Errorf("Error selecting workspace: %w", err)) + return nil, diags + } + + // Get the state + stateStore, err := b.StateMgr(env) + if err != nil { + diags = diags.Append(fmt.Errorf("Failed to load state: %w", err)) + return nil, diags + } + + output, err := stateStore.GetRootOutputValues() + if err != nil { + return nil, diags.Append(err) + } + + return output, diags +} + +func (c *OutputCommand) GatherVariables(args *arguments.Vars) { + // FIXME the arguments package currently trivially gathers variable related + // arguments in a heterogenous slice, in order to minimize the number of + // code paths gathering variables during the transition to this structure. + // Once all commands that gather variables have been converted to this + // structure, we could move the variable gathering code to the arguments + // package directly, removing this shim layer. + + varArgs := args.All() + items := make([]rawFlag, len(varArgs)) + for i := range varArgs { + items[i].Name = varArgs[i].Name + items[i].Value = varArgs[i].Value + } + c.Meta.variableArgs = rawFlags{items: &items} +} + +func (c *OutputCommand) Help() string { + helpText := ` +Usage: tofu [global options] output [options] [NAME] + + Reads an output variable from a OpenTofu state file and prints + the value. With no additional arguments, output will display all + the outputs for the root module. If NAME is not specified, all + outputs are printed. + +Options: + + -state=path Path to the state file to read. Defaults to + "terraform.tfstate". Ignored when remote + state is used. + + -no-color If specified, output won't contain any color. + + -json If specified, machine readable output will be + printed in JSON format. + + -raw For value types that can be automatically + converted to a string, will print the raw + string directly, rather than a human-oriented + representation of the value. + + -show-sensitive If specified, sensitive values will be displayed. + + -var 'foo=bar' Set a value for one of the input variables in the root + module of the configuration. Use this option more than + once to set more than one variable. + + -var-file=filename Load variable values from the given file, in addition + to the default files terraform.tfvars and *.auto.tfvars. + Use this option more than once to include more than one + variables file. +` + return strings.TrimSpace(helpText) +} + +func (c *OutputCommand) Synopsis() string { + return "Show output values from your root module" +} diff --git a/pkg/command/output_test.go b/pkg/command/output_test.go new file mode 100644 index 00000000000..9a468c1e713 --- /dev/null +++ b/pkg/command/output_test.go @@ -0,0 +1,397 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "os" + "path/filepath" + "strings" + "testing" + + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/states" +) + +func TestOutput(t *testing.T) { + originalState := states.BuildState(func(s *states.SyncState) { + s.SetOutputValue( + addrs.OutputValue{Name: "foo"}.Absolute(addrs.RootModuleInstance), + cty.StringVal("bar"), + false, + ) + }) + + statePath := testStateFile(t, originalState) + + view, done := testView(t) + c := &OutputCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + View: view, + }, + } + + args := []string{ + "-state", statePath, + "foo", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: \n%s", output.Stderr()) + } + + actual := strings.TrimSpace(output.Stdout()) + if actual != `"bar"` { + t.Fatalf("bad: %#v", actual) + } +} + +func TestOutput_json(t *testing.T) { + originalState := states.BuildState(func(s *states.SyncState) { + s.SetOutputValue( + addrs.OutputValue{Name: "foo"}.Absolute(addrs.RootModuleInstance), + cty.StringVal("bar"), + false, + ) + }) + + statePath := testStateFile(t, originalState) + + view, done := testView(t) + c := &OutputCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + View: view, + }, + } + + args := []string{ + "-state", statePath, + "-json", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: \n%s", output.Stderr()) + } + + actual := strings.TrimSpace(output.Stdout()) + expected := "{\n \"foo\": {\n \"sensitive\": false,\n \"type\": \"string\",\n \"value\": \"bar\"\n }\n}" + if actual != expected { + t.Fatalf("wrong output\ngot: %#v\nwant: %#v", actual, expected) + } +} + +func TestOutput_emptyOutputs(t *testing.T) { + originalState := states.NewState() + statePath := testStateFile(t, originalState) + + p := testProvider() + view, done := testView(t) + c := &OutputCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-no-color", + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: \n%s", output.Stderr()) + } + // Warning diagnostics should go to stdout + if got, want := output.Stdout(), "Warning: No outputs found"; !strings.Contains(got, want) { + t.Fatalf("bad output: expected to contain %q, got:\n%s", want, got) + } +} + +func TestOutput_badVar(t *testing.T) { + originalState := states.BuildState(func(s *states.SyncState) { + s.SetOutputValue( + addrs.OutputValue{Name: "foo"}.Absolute(addrs.RootModuleInstance), + cty.StringVal("bar"), + false, + ) + }) + statePath := testStateFile(t, originalState) + + view, done := testView(t) + c := &OutputCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + View: view, + }, + } + + args := []string{ + "-state", statePath, + "bar", + } + code := c.Run(args) + output := done(t) + if code != 1 { + t.Fatalf("bad: \n%s", output.Stderr()) + } +} + +func TestOutput_blank(t *testing.T) { + originalState := states.BuildState(func(s *states.SyncState) { + s.SetOutputValue( + addrs.OutputValue{Name: "foo"}.Absolute(addrs.RootModuleInstance), + cty.StringVal("bar"), + false, + ) + s.SetOutputValue( + addrs.OutputValue{Name: "name"}.Absolute(addrs.RootModuleInstance), + cty.StringVal("john-doe"), + false, + ) + }) + statePath := testStateFile(t, originalState) + + view, done := testView(t) + c := &OutputCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + View: view, + }, + } + + args := []string{ + "-state", statePath, + "", + } + + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: \n%s", output.Stderr()) + } + + expectedOutput := "foo = \"bar\"\nname = \"john-doe\"\n" + if got := output.Stdout(); got != expectedOutput { + t.Fatalf("wrong output\ngot: %#v\nwant: %#v", got, expectedOutput) + } +} + +func TestOutput_manyArgs(t *testing.T) { + view, done := testView(t) + c := &OutputCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + View: view, + }, + } + + args := []string{ + "bad", + "bad", + } + code := c.Run(args) + output := done(t) + if code != 1 { + t.Fatalf("bad: \n%s", output.Stdout()) + } +} + +func TestOutput_noArgs(t *testing.T) { + view, done := testView(t) + c := &OutputCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + View: view, + }, + } + + args := []string{} + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: \n%s", output.Stdout()) + } +} + +func TestOutput_noState(t *testing.T) { + originalState := states.NewState() + statePath := testStateFile(t, originalState) + + view, done := testView(t) + c := &OutputCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + View: view, + }, + } + + args := []string{ + "-state", statePath, + "foo", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: \n%s", output.Stderr()) + } +} + +func TestOutput_noVars(t *testing.T) { + originalState := states.NewState() + + statePath := testStateFile(t, originalState) + + view, done := testView(t) + c := &OutputCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + View: view, + }, + } + + args := []string{ + "-state", statePath, + "bar", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: \n%s", output.Stderr()) + } +} + +func TestOutput_stateDefault(t *testing.T) { + originalState := states.BuildState(func(s *states.SyncState) { + s.SetOutputValue( + addrs.OutputValue{Name: "foo"}.Absolute(addrs.RootModuleInstance), + cty.StringVal("bar"), + false, + ) + }) + + // Write the state file in a temporary directory with the + // default filename. + td := testTempDir(t) + statePath := filepath.Join(td, DefaultStateFilename) + + f, err := os.Create(statePath) + if err != nil { + t.Fatalf("err: %s", err) + } + err = writeStateForTesting(originalState, f) + f.Close() + if err != nil { + t.Fatalf("err: %s", err) + } + + // Change to that directory + cwd, err := os.Getwd() + if err != nil { + t.Fatalf("err: %s", err) + } + if err := os.Chdir(filepath.Dir(statePath)); err != nil { + t.Fatalf("err: %s", err) + } + defer os.Chdir(cwd) + + view, done := testView(t) + c := &OutputCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + View: view, + }, + } + + args := []string{ + "foo", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: \n%s", output.Stderr()) + } + + actual := strings.TrimSpace(output.Stdout()) + if actual != `"bar"` { + t.Fatalf("bad: %#v", actual) + } +} + +func TestOutput_showSensitiveArg(t *testing.T) { + originalState := stateWithSensitiveValueForOutput() + + statePath := testStateFile(t, originalState) + + view, done := testView(t) + c := &OutputCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + View: view, + }, + } + + args := []string{ + "-state", statePath, + "-show-sensitive", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: \n%s", output.Stderr()) + } + + actual := strings.TrimSpace(output.Stdout()) + if actual != "foo = \"bar\"" { + t.Fatalf("bad: %#v", actual) + } +} + +func TestOutput_withoutShowSensitiveArg(t *testing.T) { + originalState := stateWithSensitiveValueForOutput() + + statePath := testStateFile(t, originalState) + + view, done := testView(t) + c := &OutputCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + View: view, + }, + } + + args := []string{ + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: \n%s", output.Stderr()) + } + + actual := strings.TrimSpace(output.Stdout()) + if actual != "foo = " { + t.Fatalf("bad: %#v", actual) + } +} + +// stateWithSensitiveValueForOutput return a state with an output value +// marked as sensitive. +func stateWithSensitiveValueForOutput() *states.State { + state := states.BuildState(func(s *states.SyncState) { + s.SetOutputValue( + addrs.OutputValue{Name: "foo"}.Absolute(addrs.RootModuleInstance), + cty.StringVal("bar"), + true, + ) + }) + return state +} diff --git a/pkg/command/plan.go b/pkg/command/plan.go new file mode 100644 index 00000000000..0da34a1c403 --- /dev/null +++ b/pkg/command/plan.go @@ -0,0 +1,301 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/command/views" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// PlanCommand is a Command implementation that compares a OpenTofu +// configuration to an actual infrastructure and shows the differences. +type PlanCommand struct { + Meta +} + +func (c *PlanCommand) Run(rawArgs []string) int { + // Parse and apply global view arguments + common, rawArgs := arguments.ParseView(rawArgs) + c.View.Configure(common) + + // Propagate -no-color for legacy use of Ui. The remote backend and + // cloud package use this; it should be removed when/if they are + // migrated to views. + c.Meta.color = !common.NoColor + c.Meta.Color = c.Meta.color + + // Parse and validate flags + args, diags := arguments.ParsePlan(rawArgs) + + c.View.SetShowSensitive(args.ShowSensitive) + + // Instantiate the view, even if there are flag errors, so that we render + // diagnostics according to the desired view + view := views.NewPlan(args.ViewType, c.View) + + if diags.HasErrors() { + view.Diagnostics(diags) + view.HelpPrompt() + return 1 + } + + // Check for user-supplied plugin path + var err error + if c.pluginPath, err = c.loadPluginPath(); err != nil { + diags = diags.Append(err) + view.Diagnostics(diags) + return 1 + } + + // FIXME: the -input flag value is needed to initialize the backend and the + // operation, but there is no clear path to pass this value down, so we + // continue to mutate the Meta object state for now. + c.Meta.input = args.InputEnabled + + // FIXME: the -parallelism flag is used to control the concurrency of + // OpenTofu operations. At the moment, this value is used both to + // initialize the backend via the ContextOpts field inside CLIOpts, and to + // set a largely unused field on the Operation request. Again, there is no + // clear path to pass this value down, so we continue to mutate the Meta + // object state for now. + c.Meta.parallelism = args.Operation.Parallelism + + diags = diags.Append(c.providerDevOverrideRuntimeWarnings()) + + // Inject variables from args into meta for static evaluation + c.GatherVariables(args.Vars) + + // Load the encryption configuration + enc, encDiags := c.Encryption() + diags = diags.Append(encDiags) + if encDiags.HasErrors() { + view.Diagnostics(diags) + return 1 + } + + // Prepare the backend with the backend-specific arguments + be, beDiags := c.PrepareBackend(args.State, args.ViewType, enc) + diags = diags.Append(beDiags) + if diags.HasErrors() { + view.Diagnostics(diags) + return 1 + } + + // Build the operation request + opReq, opDiags := c.OperationRequest(be, view, args.ViewType, args.Operation, args.OutPath, args.GenerateConfigPath, enc) + diags = diags.Append(opDiags) + if diags.HasErrors() { + view.Diagnostics(diags) + return 1 + } + + // Before we delegate to the backend, we'll print any warning diagnostics + // we've accumulated here, since the backend will start fresh with its own + // diagnostics. + view.Diagnostics(diags) + diags = nil + + // Perform the operation + op, diags := c.RunOperation(be, opReq) + view.Diagnostics(diags) + if diags.HasErrors() { + return 1 + } + + if op.Result != backend.OperationSuccess { + return op.Result.ExitStatus() + } + if args.DetailedExitCode && !op.PlanEmpty { + return 2 + } + + return op.Result.ExitStatus() +} + +func (c *PlanCommand) PrepareBackend(args *arguments.State, viewType arguments.ViewType, enc encryption.Encryption) (backend.Enhanced, tfdiags.Diagnostics) { + // FIXME: we need to apply the state arguments to the meta object here + // because they are later used when initializing the backend. Carving a + // path to pass these arguments to the functions that need them is + // difficult but would make their use easier to understand. + c.Meta.applyStateArguments(args) + + backendConfig, diags := c.loadBackendConfig(".") + if diags.HasErrors() { + return nil, diags + } + + // Load the backend + be, beDiags := c.Backend(&BackendOpts{ + Config: backendConfig, + ViewType: viewType, + }, enc.State()) + diags = diags.Append(beDiags) + if beDiags.HasErrors() { + return nil, diags + } + + return be, diags +} + +func (c *PlanCommand) OperationRequest( + be backend.Enhanced, + view views.Plan, + viewType arguments.ViewType, + args *arguments.Operation, + planOutPath string, + generateConfigOut string, + enc encryption.Encryption, +) (*backend.Operation, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // Build the operation + opReq := c.Operation(be, viewType, enc) + opReq.ConfigDir = "." + opReq.PlanMode = args.PlanMode + opReq.Hooks = view.Hooks() + opReq.PlanRefresh = args.Refresh + opReq.PlanOutPath = planOutPath + opReq.GenerateConfigOut = generateConfigOut + opReq.Targets = args.Targets + opReq.ForceReplace = args.ForceReplace + opReq.Type = backend.OperationTypePlan + opReq.View = view.Operation() + + var err error + opReq.ConfigLoader, err = c.initConfigLoader() + if err != nil { + diags = diags.Append(fmt.Errorf("Failed to initialize config loader: %w", err)) + return nil, diags + } + + return opReq, diags +} + +func (c *PlanCommand) GatherVariables(args *arguments.Vars) { + // FIXME the arguments package currently trivially gathers variable related + // arguments in a heterogenous slice, in order to minimize the number of + // code paths gathering variables during the transition to this structure. + // Once all commands that gather variables have been converted to this + // structure, we could move the variable gathering code to the arguments + // package directly, removing this shim layer. + + varArgs := args.All() + items := make([]rawFlag, len(varArgs)) + for i := range varArgs { + items[i].Name = varArgs[i].Name + items[i].Value = varArgs[i].Value + } + c.Meta.variableArgs = rawFlags{items: &items} +} + +func (c *PlanCommand) Help() string { + helpText := ` +Usage: tofu [global options] plan [options] + + Generates a speculative execution plan, showing what actions OpenTofu + would take to apply the current configuration. This command will not + actually perform the planned actions. + + You can optionally save the plan to a file, which you can then pass to + the "apply" command to perform exactly the actions described in the plan. + +Plan Customization Options: + + The following options customize how OpenTofu will produce its plan. You + can also use these options when you run "tofu apply" without passing + it a saved plan, in order to plan and apply in a single command. + + -destroy Select the "destroy" planning mode, which creates a plan + to destroy all objects currently managed by this + OpenTofu configuration instead of the usual behavior. + + -refresh-only Select the "refresh only" planning mode, which checks + whether remote objects still match the outcome of the + most recent OpenTofu apply but does not propose any + actions to undo any changes made outside of OpenTofu. + + -refresh=false Skip checking for external changes to remote objects + while creating the plan. This can potentially make + planning faster, but at the expense of possibly planning + against a stale record of the remote system state. + + -replace=resource Force replacement of a particular resource instance using + its resource address. If the plan would've normally + produced an update or no-op action for this instance, + OpenTofu will plan to replace it instead. You can use + this option multiple times to replace more than one object. + + -target=resource Limit the planning operation to only the given module, + resource, or resource instance and all of its + dependencies. You can use this option multiple times to + include more than one object. This is for exceptional + use only. + + -var 'foo=bar' Set a value for one of the input variables in the root + module of the configuration. Use this option more than + once to set more than one variable. + + -var-file=filename Load variable values from the given file, in addition + to the default files terraform.tfvars and *.auto.tfvars. + Use this option more than once to include more than one + variables file. + +Other Options: + + -compact-warnings If OpenTofu produces any warnings that are not + accompanied by errors, shows them in a more compact + form that includes only the summary messages. + + -detailed-exitcode Return detailed exit codes when the command exits. + This will change the meaning of exit codes to: + 0 - Succeeded, diff is empty (no changes) + 1 - Errored + 2 - Succeeded, there is a diff + + -generate-config-out=path (Experimental) If import blocks are present in + configuration, instructs OpenTofu to generate HCL + for any imported resources not already present. The + configuration is written to a new file at PATH, + which must not already exist. OpenTofu may still + attempt to write configuration if the plan errors. + + -input=true Ask for input for variables if not directly set. + + -lock=false Don't hold a state lock during the operation. This + is dangerous if others might concurrently run + commands against the same workspace. + + -lock-timeout=0s Duration to retry a state lock. + + -no-color If specified, output won't contain any color. + + -concise Displays plan output in a concise way, skipping the + refreshing log lines. + + -out=path Write a plan file to the given path. This can be + used as input to the "apply" command. + + -parallelism=n Limit the number of concurrent operations. Defaults + to 10. + + -state=statefile A legacy option used for the local backend only. + See the local backend's documentation for more + information. + + -show-sensitive If specified, sensitive values will be displayed. +` + return strings.TrimSpace(helpText) +} + +func (c *PlanCommand) Synopsis() string { + return "Show changes required by the current configuration" +} diff --git a/pkg/command/plan_test.go b/pkg/command/plan_test.go new file mode 100644 index 00000000000..aac91176ccd --- /dev/null +++ b/pkg/command/plan_test.go @@ -0,0 +1,1812 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "bytes" + "context" + "fmt" + "os" + "path" + "path/filepath" + "strings" + "sync" + "testing" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + backendinit "github.com/kubegems/opentofu/pkg/backend/init" + "github.com/kubegems/opentofu/pkg/checks" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" +) + +func TestPlan(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("plan"), td) + defer testChdir(t, td)() + + p := planFixtureProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{} + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } +} +func TestPlan_conditionalSensitive(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("apply-plan-conditional-sensitive"), td) + defer testChdir(t, td)() + + p := planFixtureProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{} + code := c.Run(args) + output := done(t).Stderr() + if code != 1 { + t.Fatalf("bad status code: %d\n\n%s", code, output) + } + + if strings.Count(output, "Output refers to sensitive values") != 9 { + t.Fatal("Not all outputs have issue with refer to sensitive value", output) + } +} + +func TestPlan_lockedState(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("plan"), td) + defer testChdir(t, td)() + + unlock, err := testLockState(t, testDataDir, filepath.Join(td, DefaultStateFilename)) + if err != nil { + t.Fatal(err) + } + defer unlock() + + p := planFixtureProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{} + code := c.Run(args) + if code == 0 { + t.Fatal("expected error", done(t).Stdout()) + } + + output := done(t).Stderr() + if !strings.Contains(output, "lock") { + t.Fatal("command output does not look like a lock error:", output) + } +} + +func TestPlan_plan(t *testing.T) { + testCwd(t) + + planPath := testPlanFileNoop(t) + + p := testProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{planPath} + code := c.Run(args) + output := done(t) + if code != 1 { + t.Fatalf("wrong exit status %d; want 1\nstderr: %s", code, output.Stderr()) + } +} + +func TestPlan_destroy(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("plan"), td) + defer testChdir(t, td)() + + originalState := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + outPath := testTempFile(t) + statePath := testStateFile(t, originalState) + + p := planFixtureProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-destroy", + "-out", outPath, + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + plan := testReadPlan(t, outPath) + for _, rc := range plan.Changes.Resources { + if got, want := rc.Action, plans.Delete; got != want { + t.Fatalf("wrong action %s for %s; want %s\nplanned change: %s", got, rc.Addr, want, spew.Sdump(rc)) + } + } +} + +func TestPlan_noState(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("plan"), td) + defer testChdir(t, td)() + + p := planFixtureProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{} + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + // Verify that refresh was called + if p.ReadResourceCalled { + t.Fatal("ReadResource should not be called") + } + + // Verify that the provider was called with the existing state + actual := p.PlanResourceChangeRequest.PriorState + expected := cty.NullVal(p.GetProviderSchemaResponse.ResourceTypes["test_instance"].Block.ImpliedType()) + if !expected.RawEquals(actual) { + t.Fatalf("wrong prior state\ngot: %#v\nwant: %#v", actual, expected) + } +} + +func TestPlan_generatedConfigPath(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("plan-import-config-gen"), td) + defer testChdir(t, td)() + + genPath := filepath.Join(td, "generated.tf") + + p := planFixtureProvider() + view, done := testView(t) + + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "test_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("bar"), + }), + Private: nil, + }, + }, + } + + args := []string{ + "-generate-config-out", genPath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + testFileEquals(t, genPath, filepath.Join(td, "generated.tf.expected")) +} + +func TestPlan_outPath(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("plan"), td) + defer testChdir(t, td)() + + outPath := filepath.Join(td, "test.plan") + + p := planFixtureProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + p.PlanResourceChangeResponse = &providers.PlanResourceChangeResponse{ + PlannedState: cty.NullVal(cty.EmptyObject), + } + + args := []string{ + "-out", outPath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + testReadPlan(t, outPath) // will call t.Fatal itself if the file cannot be read +} + +func TestPlan_outPathNoChange(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("plan"), td) + defer testChdir(t, td)() + + originalState := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + // Aside from "id" (which is computed) the values here must + // exactly match the values in the "plan" test fixture in order + // to produce the empty plan we need for this test. + AttrsJSON: []byte(`{"id":"bar","ami":"bar","network_interface":[{"description":"Main network interface","device_index":"0"}]}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, originalState) + + outPath := filepath.Join(td, "test.plan") + + p := planFixtureProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-out", outPath, + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + plan := testReadPlan(t, outPath) + if !plan.Changes.Empty() { + t.Fatalf("Expected empty plan to be written to plan file, got: %s", spew.Sdump(plan)) + } +} + +func TestPlan_outPathWithError(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("plan-fail-condition"), td) + defer testChdir(t, td)() + + outPath := filepath.Join(td, "test.plan") + + p := planFixtureProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + p.PlanResourceChangeResponse = &providers.PlanResourceChangeResponse{ + PlannedState: cty.NullVal(cty.EmptyObject), + } + + args := []string{ + "-out", outPath, + } + code := c.Run(args) + output := done(t) + if code == 0 { + t.Fatal("expected non-zero exit status", output) + } + + plan := testReadPlan(t, outPath) // will call t.Fatal itself if the file cannot be read + if !plan.Errored { + t.Fatal("plan should be marked with Errored") + } + + if plan.Checks == nil { + t.Fatal("plan contains no checks") + } + + // the checks should only contain one failure + results := plan.Checks.ConfigResults.Elements() + if len(results) != 1 { + t.Fatal("incorrect number of check results", len(results)) + } + if results[0].Value.Status != checks.StatusFail { + t.Errorf("incorrect status, got %s", results[0].Value.Status) + } +} + +// When using "-out" with a backend, the plan should encode the backend config +func TestPlan_outBackend(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("plan-out-backend"), td) + defer testChdir(t, td)() + + originalState := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","ami":"bar"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + + // Set up our backend state + dataState, srv := testBackendState(t, originalState, 200) + defer srv.Close() + testStateFileRemote(t, dataState) + + outPath := "foo" + p := testProvider() + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "ami": { + Type: cty.String, + Optional: true, + }, + }, + }, + }, + }, + } + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-out", outPath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Logf("stdout: %s", output.Stdout()) + t.Fatalf("plan command failed with exit code %d\n\n%s", code, output.Stderr()) + } + + plan := testReadPlan(t, outPath) + if !plan.Changes.Empty() { + t.Fatalf("Expected empty plan to be written to plan file, got: %s", spew.Sdump(plan)) + } + + if got, want := plan.Backend.Type, "http"; got != want { + t.Errorf("wrong backend type %q; want %q", got, want) + } + if got, want := plan.Backend.Workspace, "default"; got != want { + t.Errorf("wrong backend workspace %q; want %q", got, want) + } + { + httpBackend := backendinit.Backend("http")(encryption.StateEncryptionDisabled()) + schema := httpBackend.ConfigSchema() + got, err := plan.Backend.Config.Decode(schema.ImpliedType()) + if err != nil { + t.Fatalf("failed to decode backend config in plan: %s", err) + } + want, err := dataState.Backend.Config(schema) + if err != nil { + t.Fatalf("failed to decode cached config: %s", err) + } + if !want.RawEquals(got) { + t.Errorf("wrong backend config\ngot: %#v\nwant: %#v", got, want) + } + } +} + +func TestPlan_refreshFalse(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("plan-existing-state"), td) + defer testChdir(t, td)() + + p := planFixtureProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-refresh=false", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if p.ReadResourceCalled { + t.Fatal("ReadResource should not have been called") + } +} + +func TestPlan_refreshTrue(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("plan-existing-state"), td) + defer testChdir(t, td)() + + p := planFixtureProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-refresh=true", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if !p.ReadResourceCalled { + t.Fatalf("ReadResource should have been called") + } +} + +// A consumer relies on the fact that running +// tofu plan -refresh=false -refresh=true gives the same result as +// tofu plan -refresh=true. +// While the flag logic itself is handled by the stdlib flags package (and code +// in main() that is tested elsewhere), we verify the overall plan command +// behaviour here in case we accidentally break this with additional logic. +func TestPlan_refreshFalseRefreshTrue(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("plan-existing-state"), td) + defer testChdir(t, td)() + + p := planFixtureProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-refresh=false", + "-refresh=true", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if !p.ReadResourceCalled { + t.Fatal("ReadResource should have been called") + } +} + +func TestPlan_state(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("plan"), td) + defer testChdir(t, td)() + + originalState := testState() + statePath := testStateFile(t, originalState) + + p := planFixtureProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + // Verify that the provider was called with the existing state + actual := p.PlanResourceChangeRequest.PriorState + expected := cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("bar"), + "ami": cty.NullVal(cty.String), + "network_interface": cty.ListValEmpty(cty.Object(map[string]cty.Type{ + "device_index": cty.String, + "description": cty.String, + })), + }) + if !expected.RawEquals(actual) { + t.Fatalf("wrong prior state\ngot: %#v\nwant: %#v", actual, expected) + } +} + +func TestPlan_stateDefault(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("plan"), td) + defer testChdir(t, td)() + + // Generate state and move it to the default path + originalState := testState() + statePath := testStateFile(t, originalState) + os.Rename(statePath, path.Join(td, "terraform.tfstate")) + + p := planFixtureProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{} + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + // Verify that the provider was called with the existing state + actual := p.PlanResourceChangeRequest.PriorState + expected := cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("bar"), + "ami": cty.NullVal(cty.String), + "network_interface": cty.ListValEmpty(cty.Object(map[string]cty.Type{ + "device_index": cty.String, + "description": cty.String, + })), + }) + if !expected.RawEquals(actual) { + t.Fatalf("wrong prior state\ngot: %#v\nwant: %#v", actual, expected) + } +} + +func TestPlan_validate(t *testing.T) { + // This is triggered by not asking for input so we have to set this to false + test = false + defer func() { test = true }() + + td := t.TempDir() + testCopyDir(t, testFixturePath("plan-invalid"), td) + defer testChdir(t, td)() + + p := testProvider() + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + }, + }, + }, + }, + } + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{"-no-color"} + code := c.Run(args) + output := done(t) + if code != 1 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + actual := output.Stderr() + if want := "Error: Invalid count argument"; !strings.Contains(actual, want) { + t.Fatalf("unexpected error output\ngot:\n%s\n\nshould contain: %s", actual, want) + } + if want := "9: count = timestamp()"; !strings.Contains(actual, want) { + t.Fatalf("unexpected error output\ngot:\n%s\n\nshould contain: %s", actual, want) + } +} + +func TestPlan_vars(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("plan-vars"), td) + defer testChdir(t, td)() + + p := planVarsFixtureProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + actual := "" + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + actual = req.ProposedNewState.GetAttr("value").AsString() + resp.PlannedState = req.ProposedNewState + return + } + + args := []string{ + "-var", "foo=bar", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if actual != "bar" { + t.Fatal("didn't work") + } +} + +func TestPlan_varsInvalid(t *testing.T) { + testCases := []struct { + args []string + wantErr string + }{ + { + []string{"-var", "foo"}, + `The given -var option "foo" is not correctly specified.`, + }, + { + []string{"-var", "foo = bar"}, + `Variable name "foo " is invalid due to trailing space.`, + }, + } + + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("plan-vars"), td) + defer testChdir(t, td)() + + for _, tc := range testCases { + t.Run(strings.Join(tc.args, " "), func(t *testing.T) { + p := planVarsFixtureProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + code := c.Run(tc.args) + output := done(t) + if code != 1 { + t.Fatalf("bad: %d\n\n%s", code, output.Stdout()) + } + + got := output.Stderr() + if !strings.Contains(got, tc.wantErr) { + t.Fatalf("bad error output, want %q, got:\n%s", tc.wantErr, got) + } + }) + } +} + +func TestPlan_varsUnset(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("plan-vars"), td) + defer testChdir(t, td)() + + // The plan command will prompt for interactive input of var.foo. + // We'll answer "bar" to that prompt, which should then allow this + // configuration to apply even though var.foo doesn't have a + // default value and there are no -var arguments on our command line. + + // This will (helpfully) panic if more than one variable is requested during plan: + // https://github.com/hashicorp/terraform/issues/26027 + close := testInteractiveInput(t, []string{"bar"}) + defer close() + + p := planVarsFixtureProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{} + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } +} + +// This test adds a required argument to the test provider to validate +// processing of user input: +// https://github.com/hashicorp/terraform/issues/26035 +func TestPlan_providerArgumentUnset(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("plan"), td) + defer testChdir(t, td)() + + // Disable test mode so input would be asked + test = false + defer func() { test = true }() + + // The plan command will prompt for interactive input of provider.test.region + defaultInputReader = bytes.NewBufferString("us-east-1\n") + + p := planFixtureProvider() + // override the planFixtureProvider schema to include a required provider argument + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "region": {Type: cty.String, Required: true}, + }, + }, + }, + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true, Computed: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "network_interface": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "device_index": {Type: cty.String, Optional: true}, + "description": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + }, + }, + }, + DataSources: map[string]providers.Schema{ + "test_data_source": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Required: true, + }, + "valid": { + Type: cty.Bool, + Computed: true, + }, + }, + }, + }, + }, + } + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{} + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } +} + +// Test that tofu properly merges provider configuration that's split +// between config files and interactive input variables. +// https://github.com/hashicorp/terraform/issues/28956 +func TestPlan_providerConfigMerge(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("plan-provider-input"), td) + defer testChdir(t, td)() + + // Disable test mode so input would be asked + test = false + defer func() { test = true }() + + // The plan command will prompt for interactive input of provider.test.region + defaultInputReader = bytes.NewBufferString("us-east-1\n") + + p := planFixtureProvider() + // override the planFixtureProvider schema to include a required provider argument and a nested block + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "region": {Type: cty.String, Required: true}, + "url": {Type: cty.String, Required: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "auth": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "user": {Type: cty.String, Required: true}, + "password": {Type: cty.String, Required: true}, + }, + }, + }, + }, + }, + }, + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + }, + }, + }, + }, + } + + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{} + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if !p.ConfigureProviderCalled { + t.Fatal("configure provider not called") + } + + // For this test, we want to confirm that we've sent the expected config + // value *to* the provider. + got := p.ConfigureProviderRequest.Config + want := cty.ObjectVal(map[string]cty.Value{ + "auth": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "user": cty.StringVal("one"), + "password": cty.StringVal("onepw"), + }), + cty.ObjectVal(map[string]cty.Value{ + "user": cty.StringVal("two"), + "password": cty.StringVal("twopw"), + }), + }), + "region": cty.StringVal("us-east-1"), + "url": cty.StringVal("example.com"), + }) + + if !got.RawEquals(want) { + t.Fatal("wrong provider config") + } + +} + +func TestPlan_varFile(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("plan-vars"), td) + defer testChdir(t, td)() + + varFilePath := testTempFile(t) + if err := os.WriteFile(varFilePath, []byte(planVarFile), 0644); err != nil { + t.Fatalf("err: %s", err) + } + + p := planVarsFixtureProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + actual := "" + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + actual = req.ProposedNewState.GetAttr("value").AsString() + resp.PlannedState = req.ProposedNewState + return + } + + args := []string{ + "-var-file", varFilePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if actual != "bar" { + t.Fatal("didn't work") + } +} + +func TestPlan_varFileDefault(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("plan-vars"), td) + defer testChdir(t, td)() + + varFilePath := filepath.Join(td, "terraform.tfvars") + if err := os.WriteFile(varFilePath, []byte(planVarFile), 0644); err != nil { + t.Fatalf("err: %s", err) + } + + p := planVarsFixtureProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + actual := "" + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + actual = req.ProposedNewState.GetAttr("value").AsString() + resp.PlannedState = req.ProposedNewState + return + } + + args := []string{} + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if actual != "bar" { + t.Fatal("didn't work") + } +} + +func TestPlan_varFileWithDecls(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("plan-vars"), td) + defer testChdir(t, td)() + + varFilePath := testTempFile(t) + if err := os.WriteFile(varFilePath, []byte(planVarFileWithDecl), 0644); err != nil { + t.Fatalf("err: %s", err) + } + + p := planVarsFixtureProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-var-file", varFilePath, + } + code := c.Run(args) + output := done(t) + if code == 0 { + t.Fatalf("succeeded; want failure\n\n%s", output.Stdout()) + } + + msg := output.Stderr() + if got, want := msg, "Variable declaration in .tfvars file"; !strings.Contains(got, want) { + t.Fatalf("missing expected error message\nwant message containing %q\ngot:\n%s", want, got) + } +} + +func TestPlan_detailedExitcode(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("plan"), td) + defer testChdir(t, td)() + + t.Run("return 1", func(t *testing.T) { + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + // Running plan without setting testingOverrides is similar to plan without init + View: view, + }, + } + code := c.Run([]string{"-detailed-exitcode"}) + output := done(t) + if code != 1 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + }) + + t.Run("return 2", func(t *testing.T) { + p := planFixtureProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + code := c.Run([]string{"-detailed-exitcode"}) + output := done(t) + if code != 2 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + }) +} + +func TestPlan_detailedExitcode_emptyDiff(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("plan-emptydiff"), td) + defer testChdir(t, td)() + + p := testProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{"-detailed-exitcode"} + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } +} + +func TestPlan_shutdown(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply-shutdown"), td) + defer testChdir(t, td)() + + cancelled := make(chan struct{}) + shutdownCh := make(chan struct{}) + + p := testProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + ShutdownCh: shutdownCh, + }, + } + + p.StopFn = func() error { + close(cancelled) + return nil + } + + var once sync.Once + + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + once.Do(func() { + shutdownCh <- struct{}{} + }) + + // Because of the internal lock in the MockProvider, we can't + // coordinate directly with the calling of Stop, and making the + // MockProvider concurrent is disruptive to a lot of existing tests. + // Wait here a moment to help make sure the main goroutine gets to the + // Stop call before we exit, or the plan may finish before it can be + // canceled. + time.Sleep(200 * time.Millisecond) + + s := req.ProposedNewState.AsValueMap() + s["ami"] = cty.StringVal("bar") + resp.PlannedState = cty.ObjectVal(s) + return + } + + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "ami": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + + code := c.Run([]string{}) + output := done(t) + if code != 1 { + t.Errorf("wrong exit code %d; want 1\noutput:\n%s", code, output.Stdout()) + } + + select { + case <-cancelled: + default: + t.Error("command not cancelled") + } +} + +func TestPlan_init_required(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("plan"), td) + defer testChdir(t, td)() + + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + // Running plan without setting testingOverrides is similar to plan without init + View: view, + }, + } + + args := []string{"-no-color"} + code := c.Run(args) + output := done(t) + if code != 1 { + t.Fatalf("expected error, got success") + } + got := output.Stderr() + if !(strings.Contains(got, "tofu init") && strings.Contains(got, "provider registry.opentofu.org/hashicorp/test: required by this configuration but no version is selected")) { + t.Fatal("wrong error message in output:", got) + } +} + +// Config with multiple resources, targeting plan of a subset +func TestPlan_targeted(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("apply-targeted"), td) + defer testChdir(t, td)() + + p := testProvider() + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + }, + }, + } + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-target", "test_instance.foo", + "-target", "test_instance.baz", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if got, want := output.Stdout(), "3 to add, 0 to change, 0 to destroy"; !strings.Contains(got, want) { + t.Fatalf("bad change summary, want %q, got:\n%s", want, got) + } +} + +// Diagnostics for invalid -target flags +func TestPlan_targetFlagsDiags(t *testing.T) { + testCases := map[string]string{ + "test_instance.": "Dot must be followed by attribute name.", + "test_instance": "Resource specification must include a resource type and name.", + } + + for target, wantDiag := range testCases { + t.Run(target, func(t *testing.T) { + td := testTempDir(t) + defer os.RemoveAll(td) + defer testChdir(t, td)() + + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + View: view, + }, + } + + args := []string{ + "-target", target, + } + code := c.Run(args) + output := done(t) + if code != 1 { + t.Fatalf("bad: %d\n\n%s", code, output.Stdout()) + } + + got := output.Stderr() + if !strings.Contains(got, target) { + t.Fatalf("bad error output, want %q, got:\n%s", target, got) + } + if !strings.Contains(got, wantDiag) { + t.Fatalf("bad error output, want %q, got:\n%s", wantDiag, got) + } + }) + } +} + +func TestPlan_replace(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("plan-replace"), td) + defer testChdir(t, td)() + + originalState := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "a", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"hello"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, originalState) + + p := testProvider() + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + }, + }, + } + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-state", statePath, + "-no-color", + "-replace", "test_instance.a", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("wrong exit code %d\n\n%s", code, output.Stderr()) + } + + stdout := output.Stdout() + if got, want := stdout, "1 to add, 0 to change, 1 to destroy"; !strings.Contains(got, want) { + t.Errorf("wrong plan summary\ngot output:\n%s\n\nwant substring: %s", got, want) + } + if got, want := stdout, "test_instance.a will be replaced, as requested"; !strings.Contains(got, want) { + t.Errorf("missing replace explanation\ngot output:\n%s\n\nwant substring: %s", got, want) + } +} + +// Verify that the parallelism flag allows no more than the desired number of +// concurrent calls to PlanResourceChange. +func TestPlan_parallelism(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("parallelism"), td) + defer testChdir(t, td)() + + par := 4 + + // started is a semaphore that we use to ensure that we never have more + // than "par" plan operations happening concurrently + started := make(chan struct{}, par) + + // beginCtx is used as a starting gate to hold back PlanResourceChange + // calls until we reach the desired concurrency. The cancel func "begin" is + // called once we reach the desired concurrency, allowing all apply calls + // to proceed in unison. + beginCtx, begin := context.WithCancel(context.Background()) + + // Since our mock provider has its own mutex preventing concurrent calls + // to ApplyResourceChange, we need to use a number of separate providers + // here. They will all have the same mock implementation function assigned + // but crucially they will each have their own mutex. + providerFactories := map[addrs.Provider]providers.Factory{} + for i := 0; i < 10; i++ { + name := fmt.Sprintf("test%d", i) + provider := &tofu.MockProvider{} + provider.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + name + "_instance": {Block: &configschema.Block{}}, + }, + } + provider.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + // If we ever have more than our intended parallelism number of + // plan operations running concurrently, the semaphore will fail. + select { + case started <- struct{}{}: + defer func() { + <-started + }() + default: + t.Fatal("too many concurrent apply operations") + } + + // If we never reach our intended parallelism, the context will + // never be canceled and the test will time out. + if len(started) >= par { + begin() + } + <-beginCtx.Done() + + // do some "work" + // Not required for correctness, but makes it easier to spot a + // failure when there is more overlap. + time.Sleep(10 * time.Millisecond) + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + providerFactories[addrs.NewDefaultProvider(name)] = providers.FactoryFixed(provider) + } + testingOverrides := &testingOverrides{ + Providers: providerFactories, + } + + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: testingOverrides, + View: view, + }, + } + + args := []string{ + fmt.Sprintf("-parallelism=%d", par), + } + + res := c.Run(args) + output := done(t) + if res != 0 { + t.Fatal(output.Stdout()) + } +} + +func TestPlan_warnings(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("plan"), td) + defer testChdir(t, td)() + + t.Run("full warnings", func(t *testing.T) { + p := planWarningsFixtureProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + code := c.Run([]string{}) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + // the output should contain 3 warnings (returned by planWarningsFixtureProvider()) + wantWarnings := []string{ + "warning 1", + "warning 2", + "warning 3", + } + for _, want := range wantWarnings { + if !strings.Contains(output.Stdout(), want) { + t.Errorf("missing warning %s", want) + } + } + }) + + t.Run("compact warnings", func(t *testing.T) { + p := planWarningsFixtureProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + code := c.Run([]string{"-compact-warnings"}) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + // the output should contain 3 warnings (returned by planWarningsFixtureProvider()) + // and the message that plan was run with -compact-warnings + wantWarnings := []string{ + "warning 1", + "warning 2", + "warning 3", + "To see the full warning notes, run OpenTofu without -compact-warnings.", + } + for _, want := range wantWarnings { + if !strings.Contains(output.Stdout(), want) { + t.Errorf("missing warning %s", want) + } + } + }) +} + +func TestPlan_jsonGoldenReference(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("plan"), td) + defer testChdir(t, td)() + + p := planFixtureProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-json", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + checkGoldenReference(t, output, "plan") +} + +// planFixtureSchema returns a schema suitable for processing the +// configuration in testdata/plan . This schema should be +// assigned to a mock provider named "test". +func planFixtureSchema() *providers.GetProviderSchemaResponse { + return &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "network_interface": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "device_index": {Type: cty.String, Optional: true}, + "description": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + }, + }, + }, + DataSources: map[string]providers.Schema{ + "test_data_source": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Required: true, + }, + "valid": { + Type: cty.Bool, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func TestPlan_showSensitiveArg(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("plan-sensitive-output"), td) + defer testChdir(t, td)() + + p := planFixtureProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-show-sensitive", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad status code: \n%s", output.Stderr()) + } + + if got, want := output.Stdout(), "sensitive = \"Hello world\""; !strings.Contains(got, want) { + t.Fatalf("got incorrect output, want %q, got:\n%s", want, got) + } +} + +func TestPlan_withoutShowSensitiveArg(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("plan-sensitive-output"), td) + defer testChdir(t, td)() + + p := planFixtureProvider() + view, done := testView(t) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{} + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad status code: \n%s", output.Stderr()) + } + + if got, want := output.Stdout(), "sensitive = (sensitive value)"; !strings.Contains(got, want) { + t.Fatalf("got incorrect output, want %q, got:\n%s", want, got) + } +} + +// planFixtureProvider returns a mock provider that is configured for basic +// operation with the configuration in testdata/plan. This mock has +// GetSchemaResponse and PlanResourceChangeFn populated, with the plan +// step just passing through the new object proposed by OpenTofu Core. +func planFixtureProvider() *tofu.MockProvider { + p := testProvider() + p.GetProviderSchemaResponse = planFixtureSchema() + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + return providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("zzzzz"), + "valid": cty.BoolVal(true), + }), + } + } + return p +} + +// planVarsFixtureSchema returns a schema suitable for processing the +// configuration in testdata/plan-vars . This schema should be +// assigned to a mock provider named "test". +func planVarsFixtureSchema() *providers.GetProviderSchemaResponse { + return &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "value": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } +} + +// planVarsFixtureProvider returns a mock provider that is configured for basic +// operation with the configuration in testdata/plan-vars. This mock has +// GetSchemaResponse and PlanResourceChangeFn populated, with the plan +// step just passing through the new object proposed by OpenTofu Core. +func planVarsFixtureProvider() *tofu.MockProvider { + p := testProvider() + p.GetProviderSchemaResponse = planVarsFixtureSchema() + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + return providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("zzzzz"), + "valid": cty.BoolVal(true), + }), + } + } + return p +} + +// planFixtureProvider returns a mock provider that is configured for basic +// operation with the configuration in testdata/plan. This mock has +// GetSchemaResponse and PlanResourceChangeFn populated, returning 3 warnings. +func planWarningsFixtureProvider() *tofu.MockProvider { + p := testProvider() + p.GetProviderSchemaResponse = planFixtureSchema() + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + Diagnostics: tfdiags.Diagnostics{ + tfdiags.SimpleWarning("warning 1"), + tfdiags.SimpleWarning("warning 2"), + tfdiags.SimpleWarning("warning 3"), + }, + PlannedState: req.ProposedNewState, + } + } + p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + return providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("zzzzz"), + "valid": cty.BoolVal(true), + }), + } + } + return p +} + +const planVarFile = ` +foo = "bar" +` + +const planVarFileWithDecl = ` +foo = "bar" + +variable "nope" { +} +` diff --git a/pkg/command/plugins.go b/pkg/command/plugins.go new file mode 100644 index 00000000000..a60b8dd61c6 --- /dev/null +++ b/pkg/command/plugins.go @@ -0,0 +1,176 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "log" + "os/exec" + "path/filepath" + "runtime" + + plugin "github.com/hashicorp/go-plugin" + "github.com/kardianos/osext" + + fileprovisioner "github.com/kubegems/opentofu/pkg/builtin/provisioners/file" + localexec "github.com/kubegems/opentofu/pkg/builtin/provisioners/local-exec" + remoteexec "github.com/kubegems/opentofu/pkg/builtin/provisioners/remote-exec" + "github.com/kubegems/opentofu/pkg/logging" + tfplugin "github.com/kubegems/opentofu/pkg/plugin" + "github.com/kubegems/opentofu/pkg/plugin/discovery" + "github.com/kubegems/opentofu/pkg/provisioners" +) + +// NOTE WELL: The logic in this file is primarily about plugin types OTHER THAN +// providers, which use an older set of approaches implemented here. +// +// The provider-related functions live primarily in meta_providers.go, and +// lean on some different underlying mechanisms in order to support automatic +// installation and a hierarchical addressing namespace, neither of which +// are supported for other plugin types. + +// store the user-supplied path for plugin discovery +func (m *Meta) storePluginPath(pluginPath []string) error { + if len(pluginPath) == 0 { + return nil + } + + m.fixupMissingWorkingDir() + + // remove the plugin dir record if the path was set to an empty string + if len(pluginPath) == 1 && (pluginPath[0] == "") { + return m.WorkingDir.SetForcedPluginDirs(nil) + } + + return m.WorkingDir.SetForcedPluginDirs(pluginPath) +} + +// Load the user-defined plugin search path into Meta.pluginPath if the file +// exists. +func (m *Meta) loadPluginPath() ([]string, error) { + m.fixupMissingWorkingDir() + return m.WorkingDir.ForcedPluginDirs() +} + +// the default location for automatically installed plugins +func (m *Meta) pluginDir() string { + return filepath.Join(m.DataDir(), "plugins", fmt.Sprintf("%s_%s", runtime.GOOS, runtime.GOARCH)) +} + +// pluginDirs return a list of directories to search for plugins. +// +// Earlier entries in this slice get priority over later when multiple copies +// of the same plugin version are found, but newer versions always override +// older versions where both satisfy the provider version constraints. +func (m *Meta) pluginDirs(includeAutoInstalled bool) []string { + // user defined paths take precedence + if len(m.pluginPath) > 0 { + return m.pluginPath + } + + // When searching the following directories, earlier entries get precedence + // if the same plugin version is found twice, but newer versions will + // always get preference below regardless of where they are coming from. + // TODO: Add auto-install dir, default vendor dir and optional override + // vendor dir(s). + dirs := []string{"."} + + // Look in the same directory as the OpenTofu executable. + // If found, this replaces what we found in the config path. + exePath, err := osext.Executable() + if err != nil { + log.Printf("[ERROR] Error discovering exe directory: %s", err) + } else { + dirs = append(dirs, filepath.Dir(exePath)) + } + + // add the user vendor directory + dirs = append(dirs, DefaultPluginVendorDir) + + if includeAutoInstalled { + dirs = append(dirs, m.pluginDir()) + } + dirs = append(dirs, m.GlobalPluginDirs...) + + return dirs +} + +func (m *Meta) provisionerFactories() map[string]provisioners.Factory { + dirs := m.pluginDirs(true) + plugins := discovery.FindPlugins("provisioner", dirs) + plugins, _ = plugins.ValidateVersions() + + // For now our goal is to just find the latest version of each plugin + // we have on the system. All provisioners should be at version 0.0.0 + // currently, so there should actually only be one instance of each plugin + // name here, even though the discovery interface forces us to pretend + // that might not be true. + + factories := make(map[string]provisioners.Factory) + + // Wire up the internal provisioners first. These might be overridden + // by discovered provisioners below. + for name, factory := range internalProvisionerFactories() { + factories[name] = factory + } + + byName := plugins.ByName() + for name, metas := range byName { + // Since we validated versions above and we partitioned the sets + // by name, we're guaranteed that the metas in our set all have + // valid versions and that there's at least one meta. + newest := metas.Newest() + + factories[name] = provisionerFactory(newest) + } + + return factories +} + +func provisionerFactory(meta discovery.PluginMeta) provisioners.Factory { + return func() (provisioners.Interface, error) { + cfg := &plugin.ClientConfig{ + Cmd: exec.Command(meta.Path), + HandshakeConfig: tfplugin.Handshake, + VersionedPlugins: tfplugin.VersionedPlugins, + Managed: true, + Logger: logging.NewLogger("provisioner"), + AllowedProtocols: []plugin.Protocol{plugin.ProtocolGRPC}, + AutoMTLS: enableProviderAutoMTLS, + SyncStdout: logging.PluginOutputMonitor(fmt.Sprintf("%s:stdout", meta.Name)), + SyncStderr: logging.PluginOutputMonitor(fmt.Sprintf("%s:stderr", meta.Name)), + } + client := plugin.NewClient(cfg) + return newProvisionerClient(client) + } +} + +func internalProvisionerFactories() map[string]provisioners.Factory { + return map[string]provisioners.Factory{ + "file": provisioners.FactoryFixed(fileprovisioner.New()), + "local-exec": provisioners.FactoryFixed(localexec.New()), + "remote-exec": provisioners.FactoryFixed(remoteexec.New()), + } +} + +func newProvisionerClient(client *plugin.Client) (provisioners.Interface, error) { + // Request the RPC client so we can get the provisioner + // so we can build the actual RPC-implemented provisioner. + rpcClient, err := client.Client() + if err != nil { + return nil, err + } + + raw, err := rpcClient.Dispense(tfplugin.ProvisionerPluginName) + if err != nil { + return nil, err + } + + // store the client so that the plugin can kill the child process + p := raw.(*tfplugin.GRPCProvisioner) + p.PluginClient = client + return p, nil +} diff --git a/pkg/command/plugins_test.go b/pkg/command/plugins_test.go new file mode 100644 index 00000000000..da8ffe3aa9b --- /dev/null +++ b/pkg/command/plugins_test.go @@ -0,0 +1,49 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "os" + "reflect" + "testing" +) + +func TestPluginPath(t *testing.T) { + td := testTempDir(t) + defer os.RemoveAll(td) + defer testChdir(t, td)() + + pluginPath := []string{"a", "b", "c"} + + m := Meta{} + if err := m.storePluginPath(pluginPath); err != nil { + t.Fatal(err) + } + + restoredPath, err := m.loadPluginPath() + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(pluginPath, restoredPath) { + t.Fatalf("expected plugin path %#v, got %#v", pluginPath, restoredPath) + } +} + +func TestInternalProviders(t *testing.T) { + m := Meta{} + internal := m.internalProviders() + tfProvider, err := internal["terraform"]() + if err != nil { + t.Fatal(err) + } + + schema := tfProvider.GetProviderSchema() + _, found := schema.DataSources["terraform_remote_state"] + if !found { + t.Errorf("didn't find terraform_remote_state in internal \"terraform\" provider") + } +} diff --git a/pkg/command/providers.go b/pkg/command/providers.go new file mode 100644 index 00000000000..489ca29a1d0 --- /dev/null +++ b/pkg/command/providers.go @@ -0,0 +1,212 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "path/filepath" + "strings" + + "github.com/xlab/treeprint" + + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/getproviders" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// ProvidersCommand is a Command implementation that prints out information +// about the providers used in the current configuration/state. +type ProvidersCommand struct { + Meta +} + +func (c *ProvidersCommand) Help() string { + return providersCommandHelp +} + +func (c *ProvidersCommand) Synopsis() string { + return "Show the providers required for this configuration" +} + +func (c *ProvidersCommand) Run(args []string) int { + var testsDirectory string + + args = c.Meta.process(args) + cmdFlags := c.Meta.defaultFlagSet("providers") + c.Meta.varFlagSet(cmdFlags) + cmdFlags.StringVar(&testsDirectory, "test-directory", "tests", "test-directory") + cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } + if err := cmdFlags.Parse(args); err != nil { + c.Ui.Error(fmt.Sprintf("Error parsing command-line flags: %s\n", err.Error())) + return 1 + } + + configPath, err := modulePath(cmdFlags.Args()) + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + + var diags tfdiags.Diagnostics + + empty, err := configs.IsEmptyDir(configPath) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Error validating configuration directory", + fmt.Sprintf("OpenTofu encountered an unexpected error while verifying that the given configuration directory is valid: %s.", err), + )) + c.showDiagnostics(diags) + return 1 + } + if empty { + absPath, err := filepath.Abs(configPath) + if err != nil { + absPath = configPath + } + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "No configuration files", + fmt.Sprintf("The directory %s contains no OpenTofu configuration files.", absPath), + )) + c.showDiagnostics(diags) + return 1 + } + + config, configDiags := c.loadConfigWithTests(configPath, testsDirectory) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // Load the encryption configuration + enc, encDiags := c.EncryptionFromPath(configPath) + diags = diags.Append(encDiags) + if encDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // Load the backend + b, backendDiags := c.Backend(&BackendOpts{ + Config: config.Module.Backend, + }, enc.State()) + diags = diags.Append(backendDiags) + if backendDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // This is a read-only command + c.ignoreRemoteVersionConflict(b) + + // Get the state + env, err := c.Workspace() + if err != nil { + c.Ui.Error(fmt.Sprintf("Error selecting workspace: %s", err)) + return 1 + } + s, err := b.StateMgr(env) + if err != nil { + c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) + return 1 + } + if err := s.RefreshState(); err != nil { + c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) + return 1 + } + + reqs, reqDiags := config.ProviderRequirementsByModule() + diags = diags.Append(reqDiags) + if diags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + state := s.State() + var stateReqs getproviders.Requirements + if state != nil { + stateReqs = state.ProviderRequirements() + } + + printRoot := treeprint.New() + c.populateTreeNode(printRoot, reqs) + + c.Ui.Output("\nProviders required by configuration:") + c.Ui.Output(printRoot.String()) + + if len(stateReqs) > 0 { + c.Ui.Output("Providers required by state:\n") + for fqn := range stateReqs { + c.Ui.Output(fmt.Sprintf(" provider[%s]\n", fqn.String())) + } + } + + c.showDiagnostics(diags) + if diags.HasErrors() { + return 1 + } + return 0 +} + +func (c *ProvidersCommand) populateTreeNode(tree treeprint.Tree, node *configs.ModuleRequirements) { + for fqn, dep := range node.Requirements { + versionsStr := getproviders.VersionConstraintsString(dep) + if versionsStr != "" { + versionsStr = " " + versionsStr + } + tree.AddNode(fmt.Sprintf("provider[%s]%s", fqn.String(), versionsStr)) + } + for name, testNode := range node.Tests { + name = strings.TrimSuffix(name, ".tftest.hcl") + name = strings.ReplaceAll(name, "/", ".") + branch := tree.AddBranch(fmt.Sprintf("test.%s", name)) + + for fqn, dep := range testNode.Requirements { + versionsStr := getproviders.VersionConstraintsString(dep) + if versionsStr != "" { + versionsStr = " " + versionsStr + } + branch.AddNode(fmt.Sprintf("provider[%s]%s", fqn.String(), versionsStr)) + } + + for _, run := range testNode.Runs { + branch := branch.AddBranch(fmt.Sprintf("run.%s", run.Name)) + c.populateTreeNode(branch, run) + } + } + for name, childNode := range node.Children { + branch := tree.AddBranch(fmt.Sprintf("module.%s", name)) + c.populateTreeNode(branch, childNode) + } +} + +const providersCommandHelp = ` +Usage: tofu [global options] providers [options] [DIR] + + Prints out a tree of modules in the referenced configuration annotated with + their provider requirements. + + This provides an overview of all of the provider requirements across all + referenced modules, as an aid to understanding why particular provider + plugins are needed and why particular versions are selected. + +Options: + + -test-directory=path Set the OpenTofu test directory, defaults to "tests". When set, the + test command will search for test files in the current directory and + in the one specified by the flag. + + -var 'foo=bar' Set a value for one of the input variables in the root + module of the configuration. Use this option more than + once to set more than one variable. + + -var-file=filename Load variable values from the given file, in addition + to the default files terraform.tfvars and *.auto.tfvars. + Use this option more than once to include more than one + variables file. +` diff --git a/pkg/command/providers_lock.go b/pkg/command/providers_lock.go new file mode 100644 index 00000000000..26b8c9c14ee --- /dev/null +++ b/pkg/command/providers_lock.go @@ -0,0 +1,442 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "net/url" + "os" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/depsfile" + "github.com/kubegems/opentofu/pkg/getproviders" + "github.com/kubegems/opentofu/pkg/providercache" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +type providersLockChangeType string + +const ( + providersLockChangeTypeNoChange providersLockChangeType = "providersLockChangeTypeNoChange" + providersLockChangeTypeNewProvider providersLockChangeType = "providersLockChangeTypeNewProvider" + providersLockChangeTypeNewHashes providersLockChangeType = "providersLockChangeTypeNewHashes" +) + +// ProvidersLockCommand is a Command implementation that implements the +// "tofu providers lock" command, which creates or updates the current +// configuration's dependency lock file using information from upstream +// registries, regardless of the provider installation configuration that +// is configured for normal provider installation. +type ProvidersLockCommand struct { + Meta +} + +func (c *ProvidersLockCommand) Synopsis() string { + return "Write out dependency locks for the configured providers" +} + +func (c *ProvidersLockCommand) Run(args []string) int { + args = c.Meta.process(args) + cmdFlags := c.Meta.defaultFlagSet("providers lock") + c.Meta.varFlagSet(cmdFlags) + var optPlatforms FlagStringSlice + var fsMirrorDir string + var netMirrorURL string + cmdFlags.Var(&optPlatforms, "platform", "target platform") + cmdFlags.StringVar(&fsMirrorDir, "fs-mirror", "", "filesystem mirror directory") + cmdFlags.StringVar(&netMirrorURL, "net-mirror", "", "network mirror base URL") + cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } + if err := cmdFlags.Parse(args); err != nil { + c.Ui.Error(fmt.Sprintf("Error parsing command-line flags: %s\n", err.Error())) + return 1 + } + + var diags tfdiags.Diagnostics + + if fsMirrorDir != "" && netMirrorURL != "" { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid installation method options", + "The -fs-mirror and -net-mirror command line options are mutually-exclusive.", + )) + c.showDiagnostics(diags) + return 1 + } + + providerStrs := cmdFlags.Args() + + var platforms []getproviders.Platform + if len(optPlatforms) == 0 { + platforms = []getproviders.Platform{getproviders.CurrentPlatform} + } else { + platforms = make([]getproviders.Platform, 0, len(optPlatforms)) + for _, platformStr := range optPlatforms { + platform, err := getproviders.ParsePlatform(platformStr) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid target platform", + fmt.Sprintf("The string %q given in the -platform option is not a valid target platform: %s.", platformStr, err), + )) + continue + } + platforms = append(platforms, platform) + } + } + + // Installation steps can be cancelled by SIGINT and similar. + ctx, done := c.InterruptibleContext(c.CommandContext()) + defer done() + + // Unlike other commands, this command ignores the installation methods + // selected in the CLI configuration and instead chooses an installation + // method based on CLI options. + // + // This is so that folks who use a local mirror for everyday use can + // use this command to populate their lock files from upstream so + // subsequent "tofu init" calls can then verify the local mirror + // against the upstream checksums. + var source getproviders.Source + switch { + case fsMirrorDir != "": + source = getproviders.NewFilesystemMirrorSource(fsMirrorDir) + case netMirrorURL != "": + u, err := url.Parse(netMirrorURL) + if err != nil || u.Scheme != "https" { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid network mirror URL", + "The -net-mirror option requires a valid https: URL as the mirror base URL.", + )) + c.showDiagnostics(diags) + return 1 + } + source = getproviders.NewHTTPMirrorSource(u, c.Services.CredentialsSource()) + default: + // With no special options we consult upstream registries directly, + // because that gives us the most information to produce as complete + // and portable as possible a lock entry. + source = getproviders.NewRegistrySource(c.Services) + } + + config, confDiags := c.loadConfig(".") + diags = diags.Append(confDiags) + reqs, hclDiags := config.ProviderRequirements() + diags = diags.Append(hclDiags) + + // If we have explicit provider selections on the command line then + // we'll modify "reqs" to only include those. Modifying this is okay + // because config.ProviderRequirements generates a fresh map result + // for each call. + if len(providerStrs) != 0 { + providers := map[addrs.Provider]struct{}{} + for _, raw := range providerStrs { + addr, moreDiags := addrs.ParseProviderSourceString(raw) + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + continue + } + providers[addr] = struct{}{} + if _, exists := reqs[addr]; !exists { + // Can't request a provider that isn't required by the + // current configuration. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid provider argument", + fmt.Sprintf("The provider %s is not required by the current configuration.", addr.String()), + )) + } + } + + for addr := range reqs { + if _, exists := providers[addr]; !exists { + delete(reqs, addr) + } + } + } + + // We'll also ignore any providers that don't participate in locking. + for addr := range reqs { + if !depsfile.ProviderIsLockable(addr) { + delete(reqs, addr) + } + } + + // We'll start our work with whatever locks we already have, so that + // we'll honor any existing version selections and just add additional + // hashes for them. + oldLocks, moreDiags := c.lockedDependencies() + diags = diags.Append(moreDiags) + + // If we have any error diagnostics already then we won't proceed further. + if diags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // Our general strategy here is to install the requested providers into + // a separate temporary directory -- thus ensuring that the results won't + // ever be inadvertently executed by other OpenTofu commands -- and then + // use the results of that installation to update the lock file for the + // current working directory. Because we throwaway the packages we + // downloaded after completing our work, a subsequent "tofu init" will + // then respect the CLI configuration's provider installation strategies + // but will verify the packages against the hashes we found upstream. + + // Because our Installer abstraction is a per-platform idea, we'll + // instantiate one for each of the platforms the user requested, and then + // merge all of the generated locks together at the end. + updatedLocks := map[getproviders.Platform]*depsfile.Locks{} + selectedVersions := map[addrs.Provider]getproviders.Version{} + for _, platform := range platforms { + tempDir, err := os.MkdirTemp("", "terraform-providers-lock") + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Could not create temporary directory", + fmt.Sprintf("Failed to create a temporary directory for staging the requested provider packages: %s.", err), + )) + break + } + defer os.RemoveAll(tempDir) + + evts := &providercache.InstallerEvents{ + // Our output from this command is minimal just to show that + // we're making progress, rather than just silently hanging. + FetchPackageBegin: func(provider addrs.Provider, version getproviders.Version, loc getproviders.PackageLocation) { + c.Ui.Output(fmt.Sprintf("- Fetching %s %s for %s...", provider.ForDisplay(), version, platform)) + if prevVersion, exists := selectedVersions[provider]; exists && version != prevVersion { + // This indicates a weird situation where we ended up + // selecting a different version for one platform than + // for another. We won't be able to merge the result + // in that case, so we'll generate an error. + // + // This could potentially happen if there's a provider + // we've not previously recorded in the lock file and + // the available versions change while we're running. To + // avoid that would require pre-locking all of the + // providers, which is complicated to do with the building + // blocks we have here, and so we'll wait to do it only + // if this situation arises often in practice. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Inconsistent provider versions", + fmt.Sprintf( + "The version constraint for %s selected inconsistent versions for different platforms, which is unexpected.\n\nThe upstream registry may have changed its available versions during OpenTofu's work. If so, re-running this command may produce a successful result.", + provider, + ), + )) + } + selectedVersions[provider] = version + }, + FetchPackageSuccess: func(provider addrs.Provider, version getproviders.Version, localDir string, auth *getproviders.PackageAuthenticationResult) { + var keyID string + if auth != nil && auth.Signed() { + keyID = auth.KeyID + } + if keyID != "" { + keyID = c.Colorize().Color(fmt.Sprintf(", key ID [reset][bold]%s[reset]", keyID)) + } + c.Ui.Output(fmt.Sprintf("- Retrieved %s %s for %s (%s%s)", provider.ForDisplay(), version, platform, auth, keyID)) + }, + } + ctx := evts.OnContext(ctx) + + dir := providercache.NewDirWithPlatform(tempDir, platform) + installer := providercache.NewInstaller(dir, source) + + newLocks, err := installer.EnsureProviderVersions(ctx, oldLocks, reqs, providercache.InstallNewProvidersForce) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Could not retrieve providers for locking", + fmt.Sprintf("OpenTofu failed to fetch the requested providers for %s in order to calculate their checksums: %s.", platform, err), + )) + break + } + updatedLocks[platform] = newLocks + } + + // If we have any error diagnostics from installation then we won't + // proceed to merging and updating the lock file on disk. + if diags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // Track whether we've made any changes to the lock file as part of this + // operation. We can customise the final message based on our actions. + madeAnyChange := false + + // We now have a separate updated locks object for each platform. We need + // to merge those all together so that the final result has the union of + // all of the checksums we saw for each of the providers we've worked on. + // + // We'll copy the old locks first because we want to retain any existing + // locks for providers that we _didn't_ visit above. + newLocks := oldLocks.DeepCopy() + for provider := range reqs { + oldLock := oldLocks.Provider(provider) + + var version getproviders.Version + var constraints getproviders.VersionConstraints + var hashes []getproviders.Hash + if oldLock != nil { + version = oldLock.Version() + constraints = oldLock.VersionConstraints() + hashes = append(hashes, oldLock.AllHashes()...) + } + for platform, platformLocks := range updatedLocks { + platformLock := platformLocks.Provider(provider) + if platformLock == nil { + continue // weird, but we'll tolerate it to avoid crashing + } + version = platformLock.Version() + constraints = platformLock.VersionConstraints() + + // We don't make any effort to deduplicate hashes between different + // platforms here, because the SetProvider method we call below + // handles that automatically. + hashes = append(hashes, platformLock.AllHashes()...) + + // At this point, we've merged all the hashes for this (provider, platform) + // combo into the combined hashes for this provider. Let's take this + // opportunity to print out a summary for this particular combination. + switch providersLockCalculateChangeType(oldLock, platformLock) { + case providersLockChangeTypeNewProvider: + madeAnyChange = true + c.Ui.Output( + fmt.Sprintf( + "- Obtained %s checksums for %s; This was a new provider and the checksums for this platform are now tracked in the lock file", + provider.ForDisplay(), + platform)) + case providersLockChangeTypeNewHashes: + madeAnyChange = true + c.Ui.Output( + fmt.Sprintf( + "- Obtained %s checksums for %s; Additional checksums for this platform are now tracked in the lock file", + provider.ForDisplay(), + platform)) + case providersLockChangeTypeNoChange: + c.Ui.Output( + fmt.Sprintf( + "- Obtained %s checksums for %s; All checksums for this platform were already tracked in the lock file", + provider.ForDisplay(), + platform)) + } + } + newLocks.SetProvider(provider, version, constraints, hashes) + } + + moreDiags = c.replaceLockedDependencies(newLocks) + diags = diags.Append(moreDiags) + + c.showDiagnostics(diags) + if diags.HasErrors() { + return 1 + } + + if madeAnyChange { + c.Ui.Output(c.Colorize().Color("\n[bold][green]Success![reset] [bold]OpenTofu has updated the lock file.[reset]")) + c.Ui.Output("\nReview the changes in .terraform.lock.hcl and then commit to your\nversion control system to retain the new checksums.\n") + } else { + c.Ui.Output(c.Colorize().Color("\n[bold][green]Success![reset] [bold]OpenTofu has validated the lock file and found no need for changes.[reset]")) + } + return 0 +} + +func (c *ProvidersLockCommand) Help() string { + return ` +Usage: tofu [global options] providers lock [options] [providers...] + + Normally the dependency lock file (.terraform.lock.hcl) is updated + automatically by "tofu init", but the information available to the + normal provider installer can be constrained when you're installing providers + from filesystem or network mirrors, and so the generated lock file can end + up incomplete. + + The "providers lock" subcommand addresses that by updating the lock file + based on the official packages available in the origin registry, ignoring + the currently-configured installation strategy. + + After this command succeeds, the lock file will contain suitable checksums + to allow installation of the providers needed by the current configuration + on all of the selected platforms. + + By default this command updates the lock file for every provider declared + in the configuration. You can override that behavior by providing one or + more provider source addresses on the command line. + +Options: + + -fs-mirror=dir Consult the given filesystem mirror directory instead + of the origin registry for each of the given providers. + + This would be necessary to generate lock file entries for + a provider that is available only via a mirror, and not + published in an upstream registry. In this case, the set + of valid checksums will be limited only to what OpenTofu + can learn from the data in the mirror directory. + + -net-mirror=url Consult the given network mirror (given as a base URL) + instead of the origin registry for each of the given + providers. + + This would be necessary to generate lock file entries for + a provider that is available only via a mirror, and not + published in an upstream registry. In this case, the set + of valid checksums will be limited only to what OpenTofu + can learn from the data in the mirror indices. + + -platform=os_arch Choose a target platform to request package checksums + for. + + By default OpenTofu will request package checksums + suitable only for the platform where you run this + command. Use this option multiple times to include + checksums for multiple target systems. + + Target names consist of an operating system and a CPU + architecture. For example, "linux_amd64" selects the + Linux operating system running on an AMD64 or x86_64 + CPU. Each provider is available only for a limited + set of target platforms. + + -var 'foo=bar' Set a value for one of the input variables in the root + module of the configuration. Use this option more than + once to set more than one variable. + + -var-file=filename Load variable values from the given file, in addition + to the default files terraform.tfvars and *.auto.tfvars. + Use this option more than once to include more than one + variables file. +` +} + +// providersLockCalculateChangeType works out whether there is any difference +// between oldLock and newLock and returns a variable the main function can use +// to decide on which message to print. +// +// One assumption made here that is not obvious without the context from the +// main function is that while platformLock contains the lock information for a +// single platform after the current run, oldLock contains the combined +// information of all platforms from when the versions were last checked. A +// simple equality check is not sufficient for deciding on change as we expect +// that oldLock will be a superset of platformLock if no new hashes have been +// found. +// +// We've separated this function out so we can write unit tests around the +// logic. This function assumes the platformLock is not nil, as the main +// function explicitly checks this before calling this function. +func providersLockCalculateChangeType(oldLock *depsfile.ProviderLock, platformLock *depsfile.ProviderLock) providersLockChangeType { + if oldLock == nil { + return providersLockChangeTypeNewProvider + } + if oldLock.ContainsAll(platformLock) { + return providersLockChangeTypeNoChange + } + return providersLockChangeTypeNewHashes +} diff --git a/pkg/command/providers_lock_test.go b/pkg/command/providers_lock_test.go new file mode 100644 index 00000000000..4024fe5e8f8 --- /dev/null +++ b/pkg/command/providers_lock_test.go @@ -0,0 +1,262 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + + "github.com/mitchellh/cli" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/depsfile" + "github.com/kubegems/opentofu/pkg/getproviders" +) + +func TestProvidersLock(t *testing.T) { + t.Run("noop", func(t *testing.T) { + // in the most basic case, running providers lock in a directory with no configuration at all should succeed. + // create an empty working directory + td := t.TempDir() + os.MkdirAll(td, 0755) + defer testChdir(t, td)() + + ui := new(cli.MockUi) + c := &ProvidersLockCommand{ + Meta: Meta{ + Ui: ui, + }, + } + code := c.Run([]string{}) + if code != 0 { + t.Fatalf("wrong exit code; expected 0, got %d", code) + } + }) + + // This test depends on the -fs-mirror argument, so we always know what results to expect + t.Run("basic", func(t *testing.T) { + testDirectory := "providers-lock/basic" + expected := `# This file is maintained automatically by "tofu init". +# Manual edits may be lost in future updates. + +provider "registry.opentofu.org/hashicorp/test" { + version = "1.0.0" + hashes = [ + "h1:7MjN4eFisdTv4tlhXH5hL4QQd39Jy4baPhFxwAd/EFE=", + ] +} +` + runProviderLockGenericTest(t, testDirectory, expected) + }) + + // This test depends on the -fs-mirror argument, so we always know what results to expect + t.Run("append", func(t *testing.T) { + testDirectory := "providers-lock/append" + expected := `# This file is maintained automatically by "tofu init". +# Manual edits may be lost in future updates. + +provider "registry.opentofu.org/hashicorp/test" { + version = "1.0.0" + hashes = [ + "h1:7MjN4eFisdTv4tlhXH5hL4QQd39Jy4baPhFxwAd/EFE=", + "h1:invalid", + ] +} +` + runProviderLockGenericTest(t, testDirectory, expected) + }) +} + +func runProviderLockGenericTest(t *testing.T, testDirectory, expected string) { + td := t.TempDir() + testCopyDir(t, testFixturePath(testDirectory), td) + defer testChdir(t, td)() + + // Our fixture dir has a generic os_arch dir, which we need to customize + // to the actual OS/arch where this test is running in order to get the + // desired result. + fixtMachineDir := filepath.Join(td, "fs-mirror/registry.opentofu.org/hashicorp/test/1.0.0/os_arch") + wantMachineDir := filepath.Join(td, "fs-mirror/registry.opentofu.org/hashicorp/test/1.0.0/", fmt.Sprintf("%s_%s", runtime.GOOS, runtime.GOARCH)) + err := os.Rename(fixtMachineDir, wantMachineDir) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + p := testProvider() + ui := new(cli.MockUi) + c := &ProvidersLockCommand{ + Meta: Meta{ + Ui: ui, + testingOverrides: metaOverridesForProvider(p), + }, + } + + args := []string{"-fs-mirror=fs-mirror"} + code := c.Run(args) + if code != 0 { + t.Fatalf("wrong exit code; expected 0, got %d", code) + } + + lockfile, err := os.ReadFile(".terraform.lock.hcl") + if err != nil { + t.Fatal("error reading lockfile") + } + + if string(lockfile) != expected { + t.Fatalf("wrong lockfile content") + } +} + +func TestProvidersLock_args(t *testing.T) { + + t.Run("mirror collision", func(t *testing.T) { + ui := new(cli.MockUi) + c := &ProvidersLockCommand{ + Meta: Meta{ + Ui: ui, + }, + } + + // only one of these arguments can be used at a time + args := []string{ + "-fs-mirror=/foo/", + "-net-mirror=www.foo.com", + } + code := c.Run(args) + + if code != 1 { + t.Fatalf("wrong exit code; expected 1, got %d", code) + } + output := ui.ErrorWriter.String() + if !strings.Contains(output, "The -fs-mirror and -net-mirror command line options are mutually-exclusive.") { + t.Fatalf("missing expected error message: %s", output) + } + }) + + t.Run("invalid platform", func(t *testing.T) { + ui := new(cli.MockUi) + c := &ProvidersLockCommand{ + Meta: Meta{ + Ui: ui, + }, + } + + // not a valid platform + args := []string{"-platform=arbitrary_nonsense_that_isnt_valid"} + code := c.Run(args) + + if code != 1 { + t.Fatalf("wrong exit code; expected 1, got %d", code) + } + output := ui.ErrorWriter.String() + if !strings.Contains(output, "must be two words separated by an underscore.") { + t.Fatalf("missing expected error message: %s", output) + } + }) + + t.Run("invalid provider argument", func(t *testing.T) { + ui := new(cli.MockUi) + c := &ProvidersLockCommand{ + Meta: Meta{ + Ui: ui, + }, + } + + // There is no configuration, so it's not valid to use any provider argument + args := []string{"hashicorp/random"} + code := c.Run(args) + + if code != 1 { + t.Fatalf("wrong exit code; expected 1, got %d", code) + } + output := ui.ErrorWriter.String() + if !strings.Contains(output, "The provider registry.opentofu.org/hashicorp/random is not required by the\ncurrent configuration.") { + t.Fatalf("missing expected error message: %s", output) + } + }) +} + +func TestProvidersLockCalculateChangeType(t *testing.T) { + provider := addrs.NewDefaultProvider("provider") + v2 := getproviders.MustParseVersion("2.0.0") + v2EqConstraints := getproviders.MustParseVersionConstraints("2.0.0") + + t.Run("oldLock == nil", func(t *testing.T) { + platformLock := depsfile.NewProviderLock(provider, v2, v2EqConstraints, []getproviders.Hash{ + "9r3i9a9QmASqMnQM", + "K43RHM2klOoywtyW", + "swJPXfuCNhJsTM5c", + }) + + if ct := providersLockCalculateChangeType(nil, platformLock); ct != providersLockChangeTypeNewProvider { + t.Fatalf("output was %s but should be %s", ct, providersLockChangeTypeNewProvider) + } + }) + + t.Run("oldLock == platformLock", func(t *testing.T) { + platformLock := depsfile.NewProviderLock(provider, v2, v2EqConstraints, []getproviders.Hash{ + "9r3i9a9QmASqMnQM", + "K43RHM2klOoywtyW", + "swJPXfuCNhJsTM5c", + }) + + oldLock := depsfile.NewProviderLock(provider, v2, v2EqConstraints, []getproviders.Hash{ + "9r3i9a9QmASqMnQM", + "K43RHM2klOoywtyW", + "swJPXfuCNhJsTM5c", + }) + + if ct := providersLockCalculateChangeType(oldLock, platformLock); ct != providersLockChangeTypeNoChange { + t.Fatalf("output was %s but should be %s", ct, providersLockChangeTypeNoChange) + } + }) + + t.Run("oldLock > platformLock", func(t *testing.T) { + platformLock := depsfile.NewProviderLock(provider, v2, v2EqConstraints, []getproviders.Hash{ + "9r3i9a9QmASqMnQM", + "K43RHM2klOoywtyW", + "swJPXfuCNhJsTM5c", + }) + + oldLock := depsfile.NewProviderLock(provider, v2, v2EqConstraints, []getproviders.Hash{ + "9r3i9a9QmASqMnQM", + "1ZAChGWUMWn4zmIk", + "K43RHM2klOoywtyW", + "HWjRvIuWZ1LVatnc", + "swJPXfuCNhJsTM5c", + "KwhJK4p/U2dqbKhI", + }) + + if ct := providersLockCalculateChangeType(oldLock, platformLock); ct != providersLockChangeTypeNoChange { + t.Fatalf("output was %s but should be %s", ct, providersLockChangeTypeNoChange) + } + }) + + t.Run("oldLock < platformLock", func(t *testing.T) { + platformLock := depsfile.NewProviderLock(provider, v2, v2EqConstraints, []getproviders.Hash{ + "9r3i9a9QmASqMnQM", + "1ZAChGWUMWn4zmIk", + "K43RHM2klOoywtyW", + "HWjRvIuWZ1LVatnc", + "swJPXfuCNhJsTM5c", + "KwhJK4p/U2dqbKhI", + }) + + oldLock := depsfile.NewProviderLock(provider, v2, v2EqConstraints, []getproviders.Hash{ + "9r3i9a9QmASqMnQM", + "K43RHM2klOoywtyW", + "swJPXfuCNhJsTM5c", + }) + + if ct := providersLockCalculateChangeType(oldLock, platformLock); ct != providersLockChangeTypeNewHashes { + t.Fatalf("output was %s but should be %s", ct, providersLockChangeTypeNoChange) + } + }) +} diff --git a/pkg/command/providers_mirror.go b/pkg/command/providers_mirror.go new file mode 100644 index 00000000000..a6dc01ced1c --- /dev/null +++ b/pkg/command/providers_mirror.go @@ -0,0 +1,394 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "encoding/json" + "fmt" + "net/url" + "os" + "path/filepath" + + "github.com/apparentlymart/go-versions/versions" + "github.com/hashicorp/go-getter" + + "github.com/kubegems/opentofu/pkg/getproviders" + "github.com/kubegems/opentofu/pkg/httpclient" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// ProvidersMirrorCommand is a Command implementation that implements the +// "tofu providers mirror" command, which populates a directory with +// local copies of provider plugins needed by the current configuration so +// that the mirror can be used to work offline, or similar. +type ProvidersMirrorCommand struct { + Meta +} + +func (c *ProvidersMirrorCommand) Synopsis() string { + return "Save local copies of all required provider plugins" +} + +func (c *ProvidersMirrorCommand) Run(args []string) int { + args = c.Meta.process(args) + cmdFlags := c.Meta.defaultFlagSet("providers mirror") + c.Meta.varFlagSet(cmdFlags) + var optPlatforms FlagStringSlice + cmdFlags.Var(&optPlatforms, "platform", "target platform") + cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } + if err := cmdFlags.Parse(args); err != nil { + c.Ui.Error(fmt.Sprintf("Error parsing command-line flags: %s\n", err.Error())) + return 1 + } + + var diags tfdiags.Diagnostics + + args = cmdFlags.Args() + if len(args) != 1 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "No output directory specified", + "The providers mirror command requires an output directory as a command-line argument.", + )) + c.showDiagnostics(diags) + return 1 + } + outputDir := args[0] + + var platforms []getproviders.Platform + if len(optPlatforms) == 0 { + platforms = []getproviders.Platform{getproviders.CurrentPlatform} + } else { + platforms = make([]getproviders.Platform, 0, len(optPlatforms)) + for _, platformStr := range optPlatforms { + platform, err := getproviders.ParsePlatform(platformStr) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid target platform", + fmt.Sprintf("The string %q given in the -platform option is not a valid target platform: %s.", platformStr, err), + )) + continue + } + platforms = append(platforms, platform) + } + } + + // Installation steps can be cancelled by SIGINT and similar. + ctx, done := c.InterruptibleContext(c.CommandContext()) + defer done() + + config, confDiags := c.loadConfig(".") + diags = diags.Append(confDiags) + reqs, moreDiags := config.ProviderRequirements() + diags = diags.Append(moreDiags) + + // Read lock file + lockedDeps, lockedDepsDiags := c.Meta.lockedDependencies() + diags = diags.Append(lockedDepsDiags) + + // If we have any error diagnostics already then we won't proceed further. + if diags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // If lock file is present, validate it against configuration + if !lockedDeps.Empty() { + if errs := config.VerifyDependencySelections(lockedDeps); len(errs) > 0 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Inconsistent dependency lock file", + fmt.Sprintf("To update the locked dependency selections to match a changed configuration, run:\n tofu init -upgrade\n got:%v", errs), + )) + } + } + + // Unlike other commands, this command always consults the origin registry + // for every provider so that it can be used to update a local mirror + // directory without needing to first disable that local mirror + // in the CLI configuration. + source := getproviders.NewMemoizeSource( + getproviders.NewRegistrySource(c.Services), + ) + + // Providers from registries always use HTTP, so we don't need the full + // generality of go-getter but it's still handy to use the HTTP getter + // as an easy way to download over HTTP into a file on disk. + httpGetter := getter.HttpGetter{ + Client: httpclient.New(), + Netrc: true, + XTerraformGetDisabled: true, + } + + // The following logic is similar to that used by the provider installer + // in package providercache, but different in a few ways: + // - It produces the packed directory layout rather than the unpacked + // layout we require in provider cache directories. + // - It generates JSON index files that can be read by the + // getproviders.HTTPMirrorSource installation method if the result were + // copied into the docroot of an HTTP server. + // - It can mirror packages for potentially many different target platforms, + // so that we can construct a multi-platform mirror regardless of which + // platform we run this command on. + // - It ignores what's already present and just always downloads everything + // that the configuration requires. This is a command intended to be run + // infrequently to update a mirror, so it doesn't need to optimize away + // fetches of packages that might already be present. + + for provider, constraints := range reqs { + if provider.IsBuiltIn() { + c.Ui.Output(fmt.Sprintf("- Skipping %s because it is built in to OpenTofu CLI", provider.ForDisplay())) + continue + } + constraintsStr := getproviders.VersionConstraintsString(constraints) + c.Ui.Output(fmt.Sprintf("- Mirroring %s...", provider.ForDisplay())) + // First we'll look for the latest version that matches the given + // constraint, which we'll then try to mirror for each target platform. + acceptable := versions.MeetingConstraints(constraints) + avail, _, err := source.AvailableVersions(ctx, provider) + candidates := avail.Filter(acceptable) + if err == nil && len(candidates) == 0 { + err = fmt.Errorf("no releases match the given constraints %s", constraintsStr) + } + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider not available", + fmt.Sprintf("Failed to download %s from its origin registry: %s.", provider.String(), err), + )) + continue + } + selected := candidates.Newest() + if !lockedDeps.Empty() { + selected = lockedDeps.Provider(provider).Version() + c.Ui.Output(fmt.Sprintf(" - Selected v%s to match dependency lock file", selected.String())) + } else if len(constraintsStr) > 0 { + c.Ui.Output(fmt.Sprintf(" - Selected v%s to meet constraints %s", selected.String(), constraintsStr)) + } else { + c.Ui.Output(fmt.Sprintf(" - Selected v%s with no constraints", selected.String())) + } + for _, platform := range platforms { + c.Ui.Output(fmt.Sprintf(" - Downloading package for %s...", platform.String())) + meta, err := source.PackageMeta(ctx, provider, selected, platform) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider release not available", + fmt.Sprintf("Failed to download %s v%s for %s: %s.", provider.String(), selected.String(), platform.String(), err), + )) + continue + } + urlStr, ok := meta.Location.(getproviders.PackageHTTPURL) + if !ok { + // We don't expect to get non-HTTP locations here because we're + // using the registry source, so this seems like a bug in the + // registry source. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider release not available", + fmt.Sprintf("Failed to download %s v%s for %s: OpenTofu's provider registry client returned unexpected location type %T. This is a bug in OpenTofu.", provider.String(), selected.String(), platform.String(), meta.Location), + )) + continue + } + urlObj, err := url.Parse(string(urlStr)) + if err != nil { + // We don't expect to get non-HTTP locations here because we're + // using the registry source, so this seems like a bug in the + // registry source. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid URL for provider release", + fmt.Sprintf("The origin registry for %s returned an invalid URL for v%s on %s: %s.", provider.String(), selected.String(), platform.String(), err), + )) + continue + } + // targetPath is the path where we ultimately want to place the + // downloaded archive, but we'll place it initially at stagingPath + // so we can verify its checksums and signatures before making + // it discoverable to mirror clients. (stagingPath intentionally + // does not follow the filesystem mirror file naming convention.) + targetPath := meta.PackedFilePath(outputDir) + stagingPath := filepath.Join(filepath.Dir(targetPath), "."+filepath.Base(targetPath)) + err = httpGetter.GetFile(stagingPath, urlObj) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Cannot download provider release", + fmt.Sprintf("Failed to download %s v%s for %s: %s.", provider.String(), selected.String(), platform.String(), err), + )) + continue + } + if meta.Authentication != nil { + result, err := meta.Authentication.AuthenticatePackage(getproviders.PackageLocalArchive(stagingPath)) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid provider package", + fmt.Sprintf("Failed to authenticate %s v%s for %s: %s.", provider.String(), selected.String(), platform.String(), err), + )) + continue + } + c.Ui.Output(fmt.Sprintf(" - Package authenticated: %s", result)) + } + os.Remove(targetPath) // okay if it fails because we're going to try to rename over it next anyway + err = os.Rename(stagingPath, targetPath) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Cannot download provider release", + fmt.Sprintf("Failed to place %s package into mirror directory: %s.", provider.String(), err), + )) + continue + } + } + } + + // Now we'll generate or update the JSON index files in the directory. + // We do this by scanning the directory to see what is present, rather than + // by relying on the selections we made above, because we want to still + // include in the indices any packages that were already present and + // not affected by the changes we just made. + available, err := getproviders.SearchLocalDirectory(outputDir) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to update indexes", + fmt.Sprintf("Could not scan the output directory to get package metadata for the JSON indexes: %s.", err), + )) + available = nil // the following loop will be a no-op + } + for provider, metas := range available { + if len(metas) == 0 { + continue // should never happen, but we'll be resilient + } + // The index files live in the same directory as the package files, + // so to figure that out without duplicating the path-building logic + // we'll ask the getproviders package to build an archive filename + // for a fictitious package and then use the directory portion of it. + indexDir := filepath.Dir(getproviders.PackedFilePathForPackage( + outputDir, provider, versions.Unspecified, getproviders.CurrentPlatform, + )) + indexVersions := map[string]interface{}{} + indexArchives := map[getproviders.Version]map[string]interface{}{} + for _, meta := range metas { + archivePath, ok := meta.Location.(getproviders.PackageLocalArchive) + if !ok { + // only archive files are eligible to be included in JSON + // indices for a network mirror. + continue + } + archiveFilename := filepath.Base(string(archivePath)) + version := meta.Version + platform := meta.TargetPlatform + hash, err := meta.Hash() + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to update indexes", + fmt.Sprintf("Failed to determine a hash value for %s v%s on %s: %s.", provider, version, platform, err), + )) + continue + } + indexVersions[meta.Version.String()] = map[string]interface{}{} + if _, ok := indexArchives[version]; !ok { + indexArchives[version] = map[string]interface{}{} + } + indexArchives[version][platform.String()] = map[string]interface{}{ + "url": archiveFilename, // a relative URL from the index file's URL + "hashes": []string{hash.String()}, // an array to allow for additional hash formats in future + } + } + mainIndex := map[string]interface{}{ + "versions": indexVersions, + } + mainIndexJSON, err := json.MarshalIndent(mainIndex, "", " ") + if err != nil { + // Should never happen because the input here is entirely under + // our control. + panic(fmt.Sprintf("failed to encode main index: %s", err)) + } + // TODO: Ideally we would do these updates as atomic swap operations by + // creating a new file and then renaming it over the old one, in case + // this directory is the docroot of a live mirror. An atomic swap + // requires platform-specific code though: os.Rename alone can't do it + // when running on Windows as of Go 1.13. We should revisit this once + // we're supporting network mirrors, to avoid having them briefly + // become corrupted during updates. + err = os.WriteFile(filepath.Join(indexDir, "index.json"), mainIndexJSON, 0644) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to update indexes", + fmt.Sprintf("Failed to write an updated JSON index for %s: %s.", provider, err), + )) + } + for version, archiveIndex := range indexArchives { + versionIndex := map[string]interface{}{ + "archives": archiveIndex, + } + versionIndexJSON, err := json.MarshalIndent(versionIndex, "", " ") + if err != nil { + // Should never happen because the input here is entirely under + // our control. + panic(fmt.Sprintf("failed to encode version index: %s", err)) + } + err = os.WriteFile(filepath.Join(indexDir, version.String()+".json"), versionIndexJSON, 0644) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to update indexes", + fmt.Sprintf("Failed to write an updated JSON index for %s v%s: %s.", provider, version, err), + )) + } + } + } + + c.showDiagnostics(diags) + if diags.HasErrors() { + return 1 + } + return 0 +} + +func (c *ProvidersMirrorCommand) Help() string { + return ` +Usage: tofu [global options] providers mirror [options] + + Populates a local directory with copies of the provider plugins needed for + the current configuration, so that the directory can be used either directly + as a filesystem mirror or as the basis for a network mirror and thus obtain + those providers without access to their origin registries in future. + + The mirror directory will contain JSON index files that can be published + along with the mirrored packages on a static HTTP file server to produce + a network mirror. Those index files will be ignored if the directory is + used instead as a local filesystem mirror. + +Options: + + -platform=os_arch Choose which target platform to build a mirror for. + By default OpenTofu will obtain plugin packages + suitable for the platform where you run this command. + Use this flag multiple times to include packages for + multiple target systems. + + Target names consist of an operating system and a CPU + architecture. For example, "linux_amd64" selects the + Linux operating system running on an AMD64 or x86_64 + CPU. Each provider is available only for a limited + set of target platforms. + + -var 'foo=bar' Set a value for one of the input variables in the root + module of the configuration. Use this option more than + once to set more than one variable. + + -var-file=filename Load variable values from the given file, in addition + to the default files terraform.tfvars and *.auto.tfvars. + Use this option more than once to include more than one + variables file. +` +} diff --git a/pkg/command/providers_mirror_test.go b/pkg/command/providers_mirror_test.go new file mode 100644 index 00000000000..e166792ed92 --- /dev/null +++ b/pkg/command/providers_mirror_test.go @@ -0,0 +1,41 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + "testing" + + "github.com/mitchellh/cli" +) + +// More thorough tests for providers mirror can be found in the e2etest +func TestProvidersMirror(t *testing.T) { + // noop example + t.Run("noop", func(t *testing.T) { + c := &ProvidersMirrorCommand{} + code := c.Run([]string{"."}) + if code != 0 { + t.Fatalf("wrong exit code. expected 0, got %d", code) + } + }) + + t.Run("missing arg error", func(t *testing.T) { + ui := new(cli.MockUi) + c := &ProvidersMirrorCommand{ + Meta: Meta{Ui: ui}, + } + code := c.Run([]string{}) + if code != 1 { + t.Fatalf("wrong exit code. expected 1, got %d", code) + } + + got := ui.ErrorWriter.String() + if !strings.Contains(got, "Error: No output directory specified") { + t.Fatalf("missing directory error from output, got:\n%s\n", got) + } + }) +} diff --git a/pkg/command/providers_schema.go b/pkg/command/providers_schema.go new file mode 100644 index 00000000000..68fc224ab27 --- /dev/null +++ b/pkg/command/providers_schema.go @@ -0,0 +1,154 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "os" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/command/jsonprovider" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// ProvidersSchemaCommand is a Command implementation that prints out information +// about the providers used in the current configuration/state. +type ProvidersSchemaCommand struct { + Meta +} + +func (c *ProvidersSchemaCommand) Help() string { + return providersSchemaCommandHelp +} + +func (c *ProvidersSchemaCommand) Synopsis() string { + return "Show schemas for the providers used in the configuration" +} + +func (c *ProvidersSchemaCommand) Run(args []string) int { + args = c.Meta.process(args) + cmdFlags := c.Meta.defaultFlagSet("providers schema") + c.Meta.varFlagSet(cmdFlags) + var jsonOutput bool + cmdFlags.BoolVar(&jsonOutput, "json", false, "produce JSON output") + + cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } + if err := cmdFlags.Parse(args); err != nil { + c.Ui.Error(fmt.Sprintf("Error parsing command-line flags: %s\n", err.Error())) + return 1 + } + + if !jsonOutput { + c.Ui.Error( + "The `tofu providers schema` command requires the `-json` flag.\n") + cmdFlags.Usage() + return 1 + } + + // Check for user-supplied plugin path + var err error + if c.pluginPath, err = c.loadPluginPath(); err != nil { + c.Ui.Error(fmt.Sprintf("Error loading plugin path: %s", err)) + return 1 + } + + var diags tfdiags.Diagnostics + + enc, encDiags := c.Encryption() + diags = diags.Append(encDiags) + if encDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // Load the backend + b, backendDiags := c.Backend(nil, enc.State()) + diags = diags.Append(backendDiags) + if backendDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // We require a local backend + local, ok := b.(backend.Local) + if !ok { + c.showDiagnostics(diags) // in case of any warnings in here + c.Ui.Error(ErrUnsupportedLocalOp) + return 1 + } + + // This is a read-only command + c.ignoreRemoteVersionConflict(b) + + // we expect that the config dir is the cwd + cwd, err := os.Getwd() + if err != nil { + c.Ui.Error(fmt.Sprintf("Error getting cwd: %s", err)) + return 1 + } + + // Build the operation + opReq := c.Operation(b, arguments.ViewJSON, enc) + opReq.ConfigDir = cwd + opReq.ConfigLoader, err = c.initConfigLoader() + var callDiags tfdiags.Diagnostics + opReq.RootCall, callDiags = c.rootModuleCall(opReq.ConfigDir) + diags = diags.Append(callDiags) + if callDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + opReq.AllowUnsetVariables = true + if err != nil { + diags = diags.Append(err) + c.showDiagnostics(diags) + return 1 + } + + // Get the context + lr, _, ctxDiags := local.LocalRun(opReq) + diags = diags.Append(ctxDiags) + if ctxDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + schemas, moreDiags := lr.Core.Schemas(lr.Config, lr.InputState) + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + jsonSchemas, err := jsonprovider.Marshal(schemas) + if err != nil { + c.Ui.Error(fmt.Sprintf("Failed to marshal provider schemas to json: %s", err)) + return 1 + } + c.Ui.Output(string(jsonSchemas)) + + return 0 +} + +const providersSchemaCommandHelp = ` +Usage: tofu [global options] providers schema [options] -json + + Prints out a json representation of the schemas for all providers used + in the current configuration. + +Options: + + -var 'foo=bar' Set a value for one of the input variables in the root + module of the configuration. Use this option more than + once to set more than one variable. + + -var-file=filename Load variable values from the given file, in addition + to the default files terraform.tfvars and *.auto.tfvars. + Use this option more than once to include more than one + variables file. +` diff --git a/pkg/command/providers_schema_test.go b/pkg/command/providers_schema_test.go new file mode 100644 index 00000000000..d5bae31afaa --- /dev/null +++ b/pkg/command/providers_schema_test.go @@ -0,0 +1,175 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/tofu" + "github.com/mitchellh/cli" + "github.com/zclconf/go-cty/cty" +) + +func TestProvidersSchema_error(t *testing.T) { + ui := new(cli.MockUi) + c := &ProvidersSchemaCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + }, + } + + if code := c.Run(nil); code != 1 { + fmt.Println(ui.OutputWriter.String()) + t.Fatalf("expected error: \n%s", ui.OutputWriter.String()) + } +} + +func TestProvidersSchema_output(t *testing.T) { + fixtureDir := "testdata/providers-schema" + testDirs, err := os.ReadDir(fixtureDir) + if err != nil { + t.Fatal(err) + } + + for _, entry := range testDirs { + if !entry.IsDir() { + continue + } + t.Run(entry.Name(), func(t *testing.T) { + td := t.TempDir() + inputDir := filepath.Join(fixtureDir, entry.Name()) + testCopyDir(t, inputDir, td) + defer testChdir(t, td)() + + providerSource, close := newMockProviderSource(t, map[string][]string{ + "test": {"1.2.3"}, + }) + defer close() + + p := providersSchemaFixtureProvider() + ui := new(cli.MockUi) + m := Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + ProviderSource: providerSource, + } + + // `terrafrom init` + ic := &InitCommand{ + Meta: m, + } + if code := ic.Run([]string{}); code != 0 { + t.Fatalf("init failed\n%s", ui.ErrorWriter) + } + + // flush the init output from the mock ui + ui.OutputWriter.Reset() + + // `tofu provider schemas` command + pc := &ProvidersSchemaCommand{Meta: m} + if code := pc.Run([]string{"-json"}); code != 0 { + t.Fatalf("wrong exit status %d; want 0\nstderr: %s", code, ui.ErrorWriter.String()) + } + var got, want providerSchemas + + gotString := ui.OutputWriter.String() + json.Unmarshal([]byte(gotString), &got) + + wantFile, err := os.Open("output.json") + if err != nil { + t.Fatalf("err: %s", err) + } + defer wantFile.Close() + byteValue, err := io.ReadAll(wantFile) + if err != nil { + t.Fatalf("err: %s", err) + } + json.Unmarshal([]byte(byteValue), &want) + + if !cmp.Equal(got, want) { + t.Fatalf("wrong result:\n %v\n", cmp.Diff(got, want)) + } + }) + } +} + +type providerSchemas struct { + FormatVersion string `json:"format_version"` + Schemas map[string]providerSchema `json:"provider_schemas"` +} + +type providerSchema struct { + Provider interface{} `json:"provider,omitempty"` + ResourceSchemas map[string]interface{} `json:"resource_schemas,omitempty"` + DataSourceSchemas map[string]interface{} `json:"data_source_schemas,omitempty"` + Functions map[string]interface{} `json:"functions,omitempty"` +} + +// testProvider returns a mock provider that is configured for basic +// operation with the configuration in testdata/providers-schema. +func providersSchemaFixtureProvider() *tofu.MockProvider { + p := testProvider() + p.GetProviderSchemaResponse = providersSchemaFixtureSchema() + return p +} + +// providersSchemaFixtureSchema returns a schema suitable for processing the +// configuration in testdata/providers-schema.ß +func providersSchemaFixtureSchema() *providers.GetProviderSchemaResponse { + return &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "region": {Type: cty.String, Optional: true}, + }, + }, + }, + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "volumes": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingList, + Attributes: map[string]*configschema.Attribute{ + "size": {Type: cty.String, Required: true}, + "mount_point": {Type: cty.String, Required: true}, + }, + }, + Optional: true, + }, + }, + }, + }, + }, + Functions: map[string]providers.FunctionSpec{ + "test_func": { + Description: "a basic string function", + Return: cty.String, + Summary: "test", + Parameters: []providers.FunctionParameterSpec{{ + Name: "input", + Type: cty.Number, + }}, + VariadicParameter: &providers.FunctionParameterSpec{ + Name: "variadic_input", + Type: cty.List(cty.Bool), + }, + }, + }, + } +} diff --git a/pkg/command/providers_test.go b/pkg/command/providers_test.go new file mode 100644 index 00000000000..571d51cf34b --- /dev/null +++ b/pkg/command/providers_test.go @@ -0,0 +1,206 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "os" + "strings" + "testing" + + "github.com/mitchellh/cli" +) + +func TestProviders(t *testing.T) { + cwd, err := os.Getwd() + if err != nil { + t.Fatalf("err: %s", err) + } + if err := os.Chdir(testFixturePath("providers/basic")); err != nil { + t.Fatalf("err: %s", err) + } + defer os.Chdir(cwd) + + ui := new(cli.MockUi) + c := &ProvidersCommand{ + Meta: Meta{ + Ui: ui, + }, + } + + args := []string{} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + wantOutput := []string{ + "provider[registry.opentofu.org/hashicorp/foo]", + "provider[registry.opentofu.org/hashicorp/bar]", + "provider[registry.opentofu.org/hashicorp/baz]", + } + + output := ui.OutputWriter.String() + for _, want := range wantOutput { + if !strings.Contains(output, want) { + t.Errorf("output missing %s:\n%s", want, output) + } + } +} + +func TestProviders_noConfigs(t *testing.T) { + cwd, err := os.Getwd() + if err != nil { + t.Fatalf("err: %s", err) + } + if err := os.Chdir(testFixturePath("")); err != nil { + t.Fatalf("err: %s", err) + } + defer os.Chdir(cwd) + + ui := new(cli.MockUi) + c := &ProvidersCommand{ + Meta: Meta{ + Ui: ui, + }, + } + + args := []string{} + if code := c.Run(args); code == 0 { + t.Fatal("expected command to return non-zero exit code" + + " when no configs are available") + } + + output := ui.ErrorWriter.String() + expectedErrMsg := "No configuration files" + if !strings.Contains(output, expectedErrMsg) { + t.Errorf("Expected error message: %s\nGiven output: %s", expectedErrMsg, output) + } +} + +func TestProviders_modules(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("providers/modules"), td) + defer testChdir(t, td)() + + // first run init with mock provider sources to install the module + initUi := new(cli.MockUi) + providerSource, close := newMockProviderSource(t, map[string][]string{ + "foo": {"1.0.0"}, + "bar": {"2.0.0"}, + "baz": {"1.2.2"}, + }) + defer close() + m := Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: initUi, + ProviderSource: providerSource, + } + ic := &InitCommand{ + Meta: m, + } + if code := ic.Run([]string{}); code != 0 { + t.Fatalf("init failed\n%s", initUi.ErrorWriter) + } + + // Providers command + ui := new(cli.MockUi) + c := &ProvidersCommand{ + Meta: Meta{ + Ui: ui, + }, + } + + args := []string{} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + wantOutput := []string{ + "provider[registry.opentofu.org/hashicorp/foo] 1.0.0", // from required_providers + "provider[registry.opentofu.org/hashicorp/bar] 2.0.0", // from provider config + "── module.kiddo", // tree node for child module + "provider[registry.opentofu.org/hashicorp/baz]", // implied by a resource in the child module + } + + output := ui.OutputWriter.String() + for _, want := range wantOutput { + if !strings.Contains(output, want) { + t.Errorf("output missing %s:\n%s", want, output) + } + } +} + +func TestProviders_state(t *testing.T) { + cwd, err := os.Getwd() + if err != nil { + t.Fatalf("err: %s", err) + } + if err := os.Chdir(testFixturePath("providers/state")); err != nil { + t.Fatalf("err: %s", err) + } + defer os.Chdir(cwd) + + ui := new(cli.MockUi) + c := &ProvidersCommand{ + Meta: Meta{ + Ui: ui, + }, + } + + args := []string{} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + wantOutput := []string{ + "provider[registry.opentofu.org/hashicorp/foo] 1.0.0", // from required_providers + "provider[registry.opentofu.org/hashicorp/bar] 2.0.0", // from a provider config block + "Providers required by state", // header for state providers + "provider[registry.opentofu.org/hashicorp/baz]", // from a resouce in state (only) + } + + output := ui.OutputWriter.String() + for _, want := range wantOutput { + if !strings.Contains(output, want) { + t.Errorf("output missing %s:\n%s", want, output) + } + } +} + +func TestProviders_tests(t *testing.T) { + cwd, err := os.Getwd() + if err != nil { + t.Fatalf("err: %s", err) + } + if err := os.Chdir(testFixturePath("providers/tests")); err != nil { + t.Fatalf("err: %s", err) + } + defer os.Chdir(cwd) + + ui := new(cli.MockUi) + c := &ProvidersCommand{ + Meta: Meta{ + Ui: ui, + }, + } + + args := []string{} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + wantOutput := []string{ + "provider[registry.opentofu.org/hashicorp/foo]", + "test.main", + "provider[registry.opentofu.org/hashicorp/bar]", + } + + output := ui.OutputWriter.String() + for _, want := range wantOutput { + if !strings.Contains(output, want) { + t.Errorf("output missing %s:\n%s", want, output) + } + } +} diff --git a/pkg/command/push.go b/pkg/command/push.go new file mode 100644 index 00000000000..d0f137a474b --- /dev/null +++ b/pkg/command/push.go @@ -0,0 +1,41 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +type PushCommand struct { + Meta +} + +func (c *PushCommand) Run(args []string) int { + // This command is no longer supported, but we'll retain it just to + // give the user some next-steps after upgrading. + c.showDiagnostics(tfdiags.Sourceless( + tfdiags.Error, + "Command \"tofu push\" is no longer supported", + "This command was used to push configuration to Terraform Enterprise legacy (v1), which has now reached end-of-life. To push configuration to a new cloud backend, use its REST API.", + )) + return 1 +} + +func (c *PushCommand) Help() string { + helpText := ` +Usage: tofu [global options] push [options] [DIR] + + This command was for the legacy version of Terraform Enterprise (v1), which + has now reached end-of-life. Therefore this command is no longer supported. +` + return strings.TrimSpace(helpText) +} + +func (c *PushCommand) Synopsis() string { + return "Obsolete command for Terraform Enterprise legacy (v1)" +} diff --git a/pkg/command/refresh.go b/pkg/command/refresh.go new file mode 100644 index 00000000000..f8e136f4f68 --- /dev/null +++ b/pkg/command/refresh.go @@ -0,0 +1,231 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/command/views" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// RefreshCommand is a cli.Command implementation that refreshes the state +// file. +type RefreshCommand struct { + Meta +} + +func (c *RefreshCommand) Run(rawArgs []string) int { + var diags tfdiags.Diagnostics + + // Parse and apply global view arguments + common, rawArgs := arguments.ParseView(rawArgs) + c.View.Configure(common) + + // Propagate -no-color for legacy use of Ui. The remote backend and + // cloud package use this; it should be removed when/if they are + // migrated to views. + c.Meta.color = !common.NoColor + c.Meta.Color = c.Meta.color + + // Parse and validate flags + args, diags := arguments.ParseRefresh(rawArgs) + + // Instantiate the view, even if there are flag errors, so that we render + // diagnostics according to the desired view + view := views.NewRefresh(args.ViewType, c.View) + + if diags.HasErrors() { + view.Diagnostics(diags) + view.HelpPrompt() + return 1 + } + + // Check for user-supplied plugin path + var err error + if c.pluginPath, err = c.loadPluginPath(); err != nil { + diags = diags.Append(err) + view.Diagnostics(diags) + return 1 + } + + // FIXME: the -input flag value is needed to initialize the backend and the + // operation, but there is no clear path to pass this value down, so we + // continue to mutate the Meta object state for now. + c.Meta.input = args.InputEnabled + + // FIXME: the -parallelism flag is used to control the concurrency of + // OpenTofu operations. At the moment, this value is used both to + // initialize the backend via the ContextOpts field inside CLIOpts, and to + // set a largely unused field on the Operation request. Again, there is no + // clear path to pass this value down, so we continue to mutate the Meta + // object state for now. + c.Meta.parallelism = args.Operation.Parallelism + + // Inject variables from args into meta for static evaluation + c.GatherVariables(args.Vars) + + // Load the encryption configuration + enc, encDiags := c.Encryption() + diags = diags.Append(encDiags) + if encDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // Prepare the backend with the backend-specific arguments + be, beDiags := c.PrepareBackend(args.State, args.ViewType, enc) + diags = diags.Append(beDiags) + if diags.HasErrors() { + view.Diagnostics(diags) + return 1 + } + + // Build the operation request + opReq, opDiags := c.OperationRequest(be, view, args.ViewType, args.Operation, enc) + diags = diags.Append(opDiags) + if diags.HasErrors() { + view.Diagnostics(diags) + return 1 + } + + // Before we delegate to the backend, we'll print any warning diagnostics + // we've accumulated here, since the backend will start fresh with its own + // diagnostics. + view.Diagnostics(diags) + diags = nil + + // Perform the operation + op, diags := c.RunOperation(be, opReq) + view.Diagnostics(diags) + if diags.HasErrors() { + return 1 + } + + if op.State != nil { + view.Outputs(op.State.RootModule().OutputValues) + } + + return op.Result.ExitStatus() +} + +func (c *RefreshCommand) PrepareBackend(args *arguments.State, viewType arguments.ViewType, enc encryption.Encryption) (backend.Enhanced, tfdiags.Diagnostics) { + // FIXME: we need to apply the state arguments to the meta object here + // because they are later used when initializing the backend. Carving a + // path to pass these arguments to the functions that need them is + // difficult but would make their use easier to understand. + c.Meta.applyStateArguments(args) + + backendConfig, diags := c.loadBackendConfig(".") + if diags.HasErrors() { + return nil, diags + } + + // Load the backend + be, beDiags := c.Backend(&BackendOpts{ + Config: backendConfig, + ViewType: viewType, + }, enc.State()) + diags = diags.Append(beDiags) + if beDiags.HasErrors() { + return nil, diags + } + + return be, diags +} + +func (c *RefreshCommand) OperationRequest(be backend.Enhanced, view views.Refresh, viewType arguments.ViewType, args *arguments.Operation, enc encryption.Encryption, +) (*backend.Operation, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // Build the operation + opReq := c.Operation(be, viewType, enc) + opReq.ConfigDir = "." + opReq.Hooks = view.Hooks() + opReq.Targets = args.Targets + opReq.Type = backend.OperationTypeRefresh + opReq.View = view.Operation() + + var err error + opReq.ConfigLoader, err = c.initConfigLoader() + if err != nil { + diags = diags.Append(fmt.Errorf("Failed to initialize config loader: %w", err)) + return nil, diags + } + + return opReq, diags +} + +func (c *RefreshCommand) GatherVariables(args *arguments.Vars) { + // FIXME the arguments package currently trivially gathers variable related + // arguments in a heterogenous slice, in order to minimize the number of + // code paths gathering variables during the transition to this structure. + // Once all commands that gather variables have been converted to this + // structure, we could move the variable gathering code to the arguments + // package directly, removing this shim layer. + + varArgs := args.All() + items := make([]rawFlag, len(varArgs)) + for i := range varArgs { + items[i].Name = varArgs[i].Name + items[i].Value = varArgs[i].Value + } + c.Meta.variableArgs = rawFlags{items: &items} +} + +func (c *RefreshCommand) Help() string { + helpText := ` +Usage: tofu [global options] refresh [options] + + Update the state file of your infrastructure with metadata that matches + the physical resources they are tracking. + + This will not modify your infrastructure, but it can modify your + state file to update metadata. This metadata might cause new changes + to occur when you generate a plan or call apply next. + +Options: + + -compact-warnings If OpenTofu produces any warnings that are not + accompanied by errors, show them in a more compact form + that includes only the summary messages. + + -input=true Ask for input for variables if not directly set. + + -lock=false Don't hold a state lock during the operation. This is + dangerous if others might concurrently run commands + against the same workspace. + + -lock-timeout=0s Duration to retry a state lock. + + -no-color If specified, output won't contain any color. + + -parallelism=n Limit the number of concurrent operations. Defaults to 10. + + -target=resource Resource to target. Operation will be limited to this + resource and its dependencies. This flag can be used + multiple times. + + -var 'foo=bar' Set a variable in the OpenTofu configuration. This + flag can be set multiple times. + + -var-file=foo Set variables in the OpenTofu configuration from + a file. If "terraform.tfvars" or any ".auto.tfvars" + files are present, they will be automatically loaded. + + -state, state-out, and -backup are legacy options supported for the local + backend only. For more information, see the local backend's documentation. +` + return strings.TrimSpace(helpText) +} + +func (c *RefreshCommand) Synopsis() string { + return "Update the state to match remote systems" +} diff --git a/pkg/command/refresh_test.go b/pkg/command/refresh_test.go new file mode 100644 index 00000000000..b17f9e06c85 --- /dev/null +++ b/pkg/command/refresh_test.go @@ -0,0 +1,980 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "reflect" + "strings" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/mitchellh/cli" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/statefile" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +var equateEmpty = cmpopts.EquateEmpty() + +func TestRefresh(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("refresh"), td) + defer testChdir(t, td)() + + state := testState() + statePath := testStateFile(t, state) + + p := testProvider() + view, done := testView(t) + c := &RefreshCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + p.GetProviderSchemaResponse = refreshFixtureSchema() + p.ReadResourceFn = nil + p.ReadResourceResponse = &providers.ReadResourceResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yes"), + }), + } + + args := []string{ + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if !p.ReadResourceCalled { + t.Fatal("ReadResource should have been called") + } + + f, err := os.Open(statePath) + if err != nil { + t.Fatalf("err: %s", err) + } + + newStateFile, err := statefile.Read(f, encryption.StateEncryptionDisabled()) + f.Close() + if err != nil { + t.Fatalf("err: %s", err) + } + + actual := strings.TrimSpace(newStateFile.State.String()) + expected := strings.TrimSpace(testRefreshStr) + if actual != expected { + t.Fatalf("bad:\n\n%s", actual) + } +} + +func TestRefresh_empty(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("refresh-empty"), td) + defer testChdir(t, td)() + + p := testProvider() + view, done := testView(t) + c := &RefreshCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + p.ReadResourceFn = nil + p.ReadResourceResponse = &providers.ReadResourceResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yes"), + }), + } + + args := []string{} + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if p.ReadResourceCalled { + t.Fatal("ReadResource should not have been called") + } +} + +func TestRefresh_lockedState(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("refresh"), td) + defer testChdir(t, td)() + + state := testState() + statePath := testStateFile(t, state) + + unlock, err := testLockState(t, testDataDir, statePath) + if err != nil { + t.Fatal(err) + } + defer unlock() + + p := testProvider() + view, done := testView(t) + c := &RefreshCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + p.GetProviderSchemaResponse = refreshFixtureSchema() + p.ReadResourceFn = nil + p.ReadResourceResponse = &providers.ReadResourceResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yes"), + }), + } + + args := []string{ + "-state", statePath, + } + + code := c.Run(args) + output := done(t) + if code == 0 { + t.Fatal("expected error") + } + + got := output.Stderr() + if !strings.Contains(got, "lock") { + t.Fatal("command output does not look like a lock error:", got) + } +} + +func TestRefresh_cwd(t *testing.T) { + cwd, err := os.Getwd() + if err != nil { + t.Fatalf("err: %s", err) + } + if err := os.Chdir(testFixturePath("refresh")); err != nil { + t.Fatalf("err: %s", err) + } + defer os.Chdir(cwd) + + state := testState() + statePath := testStateFile(t, state) + + p := testProvider() + view, done := testView(t) + c := &RefreshCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + p.GetProviderSchemaResponse = refreshFixtureSchema() + p.ReadResourceFn = nil + p.ReadResourceResponse = &providers.ReadResourceResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yes"), + }), + } + + args := []string{ + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if !p.ReadResourceCalled { + t.Fatal("ReadResource should have been called") + } + + f, err := os.Open(statePath) + if err != nil { + t.Fatalf("err: %s", err) + } + + newStateFile, err := statefile.Read(f, encryption.StateEncryptionDisabled()) + f.Close() + if err != nil { + t.Fatalf("err: %s", err) + } + + actual := strings.TrimSpace(newStateFile.State.String()) + expected := strings.TrimSpace(testRefreshCwdStr) + if actual != expected { + t.Fatalf("bad:\n\n%s", actual) + } +} + +func TestRefresh_defaultState(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("refresh"), td) + defer testChdir(t, td)() + + originalState := testState() + + // Write the state file in a temporary directory with the + // default filename. + statePath := testStateFile(t, originalState) + + localState := statemgr.NewFilesystem(statePath, encryption.StateEncryptionDisabled()) + if err := localState.RefreshState(); err != nil { + t.Fatal(err) + } + s := localState.State() + if s == nil { + t.Fatal("empty test state") + } + + // Change to that directory + cwd, err := os.Getwd() + if err != nil { + t.Fatalf("err: %s", err) + } + if err := os.Chdir(filepath.Dir(statePath)); err != nil { + t.Fatalf("err: %s", err) + } + defer os.Chdir(cwd) + + p := testProvider() + view, done := testView(t) + c := &RefreshCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + p.GetProviderSchemaResponse = refreshFixtureSchema() + p.ReadResourceFn = nil + p.ReadResourceResponse = &providers.ReadResourceResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yes"), + }), + } + + args := []string{ + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if !p.ReadResourceCalled { + t.Fatal("ReadResource should have been called") + } + + newState := testStateRead(t, statePath) + + actual := newState.RootModule().Resources["test_instance.foo"].Instances[addrs.NoKey].Current + expected := &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"ami":null,"id":"yes"}`), + Dependencies: []addrs.ConfigResource{}, + } + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("wrong new object\ngot: %swant: %s", spew.Sdump(actual), spew.Sdump(expected)) + } + + backupState := testStateRead(t, statePath+DefaultBackupExtension) + + actual = backupState.RootModule().Resources["test_instance.foo"].Instances[addrs.NoKey].Current + expected = originalState.RootModule().Resources["test_instance.foo"].Instances[addrs.NoKey].Current + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("wrong new object\ngot: %swant: %s", spew.Sdump(actual), spew.Sdump(expected)) + } +} + +func TestRefresh_outPath(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("refresh"), td) + defer testChdir(t, td)() + + state := testState() + statePath := testStateFile(t, state) + + // Output path + outf, err := os.CreateTemp(td, "tf") + if err != nil { + t.Fatalf("err: %s", err) + } + outPath := outf.Name() + outf.Close() + os.Remove(outPath) + + p := testProvider() + view, done := testView(t) + c := &RefreshCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + p.GetProviderSchemaResponse = refreshFixtureSchema() + p.ReadResourceFn = nil + p.ReadResourceResponse = &providers.ReadResourceResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yes"), + }), + } + + args := []string{ + "-state", statePath, + "-state-out", outPath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + newState := testStateRead(t, statePath) + if !reflect.DeepEqual(newState, state) { + t.Fatalf("bad: %#v", newState) + } + + newState = testStateRead(t, outPath) + actual := newState.RootModule().Resources["test_instance.foo"].Instances[addrs.NoKey].Current + expected := &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"ami":null,"id":"yes"}`), + Dependencies: []addrs.ConfigResource{}, + } + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("wrong new object\ngot: %swant: %s", spew.Sdump(actual), spew.Sdump(expected)) + } + + if _, err := os.Stat(outPath + DefaultBackupExtension); !os.IsNotExist(err) { + if err != nil { + t.Fatalf("failed to test for backup file: %s", err) + } + t.Fatalf("backup file exists, but it should not because output file did not initially exist") + } +} + +func TestRefresh_var(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("refresh-var"), td) + defer testChdir(t, td)() + + state := testState() + statePath := testStateFile(t, state) + + p := testProvider() + view, done := testView(t) + c := &RefreshCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + p.GetProviderSchemaResponse = refreshVarFixtureSchema() + + args := []string{ + "-var", "foo=bar", + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if !p.ConfigureProviderCalled { + t.Fatal("configure should be called") + } + if got, want := p.ConfigureProviderRequest.Config.GetAttr("value"), cty.StringVal("bar"); !want.RawEquals(got) { + t.Fatalf("wrong provider configuration\ngot: %#v\nwant: %#v", got, want) + } +} + +func TestRefresh_varFile(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("refresh-var"), td) + defer testChdir(t, td)() + + state := testState() + statePath := testStateFile(t, state) + + p := testProvider() + view, done := testView(t) + c := &RefreshCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + p.GetProviderSchemaResponse = refreshVarFixtureSchema() + + varFilePath := testTempFile(t) + if err := os.WriteFile(varFilePath, []byte(refreshVarFile), 0644); err != nil { + t.Fatalf("err: %s", err) + } + + args := []string{ + "-var-file", varFilePath, + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if !p.ConfigureProviderCalled { + t.Fatal("configure should be called") + } + if got, want := p.ConfigureProviderRequest.Config.GetAttr("value"), cty.StringVal("bar"); !want.RawEquals(got) { + t.Fatalf("wrong provider configuration\ngot: %#v\nwant: %#v", got, want) + } +} + +func TestRefresh_varFileDefault(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("refresh-var"), td) + defer testChdir(t, td)() + + state := testState() + statePath := testStateFile(t, state) + + p := testProvider() + view, done := testView(t) + c := &RefreshCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + p.GetProviderSchemaResponse = refreshVarFixtureSchema() + + varFilePath := filepath.Join(td, "terraform.tfvars") + if err := os.WriteFile(varFilePath, []byte(refreshVarFile), 0644); err != nil { + t.Fatalf("err: %s", err) + } + + args := []string{ + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + if !p.ConfigureProviderCalled { + t.Fatal("configure should be called") + } + if got, want := p.ConfigureProviderRequest.Config.GetAttr("value"), cty.StringVal("bar"); !want.RawEquals(got) { + t.Fatalf("wrong provider configuration\ngot: %#v\nwant: %#v", got, want) + } +} + +func TestRefresh_varsUnset(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("refresh-unset-var"), td) + defer testChdir(t, td)() + + // Disable test mode so input would be asked + test = false + defer func() { test = true }() + + defaultInputReader = bytes.NewBufferString("bar\n") + + state := testState() + statePath := testStateFile(t, state) + + p := testProvider() + ui := new(cli.MockUi) + view, done := testView(t) + c := &RefreshCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + + args := []string{ + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } +} + +func TestRefresh_backup(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("refresh"), td) + defer testChdir(t, td)() + + state := testState() + statePath := testStateFile(t, state) + + // Output path + outf, err := os.CreateTemp(td, "tf") + if err != nil { + t.Fatalf("err: %s", err) + } + outPath := outf.Name() + defer outf.Close() + + // Need to put some state content in the output file so that there's + // something to back up. + err = statefile.Write(statefile.New(state, "baz", 0), outf, encryption.StateEncryptionDisabled()) + if err != nil { + t.Fatalf("error writing initial output state file %s", err) + } + + // Backup path + backupf, err := os.CreateTemp(td, "tf") + if err != nil { + t.Fatalf("err: %s", err) + } + backupPath := backupf.Name() + backupf.Close() + os.Remove(backupPath) + + p := testProvider() + view, done := testView(t) + c := &RefreshCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + p.GetProviderSchemaResponse = refreshFixtureSchema() + p.ReadResourceFn = nil + p.ReadResourceResponse = &providers.ReadResourceResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("changed"), + }), + } + + args := []string{ + "-state", statePath, + "-state-out", outPath, + "-backup", backupPath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + newState := testStateRead(t, statePath) + if !cmp.Equal(newState, state, cmpopts.EquateEmpty()) { + t.Fatalf("got:\n%s\nexpected:\n%s\n", newState, state) + } + + newState = testStateRead(t, outPath) + actual := newState.RootModule().Resources["test_instance.foo"].Instances[addrs.NoKey].Current + expected := &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"ami":null,"id":"changed"}`), + Dependencies: []addrs.ConfigResource{}, + } + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("wrong new object\ngot: %swant: %s", spew.Sdump(actual), spew.Sdump(expected)) + } + + backupState := testStateRead(t, backupPath) + actualStr := strings.TrimSpace(backupState.String()) + expectedStr := strings.TrimSpace(state.String()) + if actualStr != expectedStr { + t.Fatalf("bad:\n\n%s\n\n%s", actualStr, expectedStr) + } +} + +func TestRefresh_disableBackup(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("refresh"), td) + defer testChdir(t, td)() + + state := testState() + statePath := testStateFile(t, state) + + // Output path + outf, err := os.CreateTemp(td, "tf") + if err != nil { + t.Fatalf("err: %s", err) + } + outPath := outf.Name() + outf.Close() + os.Remove(outPath) + + p := testProvider() + view, done := testView(t) + c := &RefreshCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + p.GetProviderSchemaResponse = refreshFixtureSchema() + p.ReadResourceFn = nil + p.ReadResourceResponse = &providers.ReadResourceResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yes"), + }), + } + + args := []string{ + "-state", statePath, + "-state-out", outPath, + "-backup", "-", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + newState := testStateRead(t, statePath) + if !cmp.Equal(state, newState, equateEmpty) { + spew.Config.DisableMethods = true + fmt.Println(cmp.Diff(state, newState, equateEmpty)) + t.Fatalf("bad: %s", newState) + } + + newState = testStateRead(t, outPath) + actual := newState.RootModule().Resources["test_instance.foo"].Instances[addrs.NoKey].Current + expected := &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"ami":null,"id":"yes"}`), + Dependencies: []addrs.ConfigResource{}, + } + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("wrong new object\ngot: %swant: %s", spew.Sdump(actual), spew.Sdump(expected)) + } + + // Ensure there is no backup + _, err = os.Stat(outPath + DefaultBackupExtension) + if err == nil || !os.IsNotExist(err) { + t.Fatalf("backup should not exist") + } + _, err = os.Stat("-") + if err == nil || !os.IsNotExist(err) { + t.Fatalf("backup should not exist") + } +} + +func TestRefresh_displaysOutputs(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("refresh-output"), td) + defer testChdir(t, td)() + + state := testState() + statePath := testStateFile(t, state) + + p := testProvider() + view, done := testView(t) + c := &RefreshCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + + args := []string{ + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + // Test that outputs were displayed + outputValue := "foo.example.com" + actual := output.Stdout() + if !strings.Contains(actual, outputValue) { + t.Fatalf("Expected:\n%s\n\nTo include: %q", actual, outputValue) + } +} + +// Config with multiple resources, targeting refresh of a subset +func TestRefresh_targeted(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("refresh-targeted"), td) + defer testChdir(t, td)() + + state := testState() + statePath := testStateFile(t, state) + + p := testProvider() + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + }, + }, + } + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + + view, done := testView(t) + c := &RefreshCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args := []string{ + "-target", "test_instance.foo", + "-state", statePath, + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + got := output.Stdout() + if want := "test_instance.foo: Refreshing"; !strings.Contains(got, want) { + t.Fatalf("expected output to contain %q, got:\n%s", want, got) + } + if doNotWant := "test_instance.bar: Refreshing"; strings.Contains(got, doNotWant) { + t.Fatalf("expected output not to contain %q, got:\n%s", doNotWant, got) + } +} + +// Diagnostics for invalid -target flags +func TestRefresh_targetFlagsDiags(t *testing.T) { + testCases := map[string]string{ + "test_instance.": "Dot must be followed by attribute name.", + "test_instance": "Resource specification must include a resource type and name.", + } + + for target, wantDiag := range testCases { + t.Run(target, func(t *testing.T) { + td := testTempDir(t) + defer os.RemoveAll(td) + defer testChdir(t, td)() + + view, done := testView(t) + c := &RefreshCommand{ + Meta: Meta{ + View: view, + }, + } + + args := []string{ + "-target", target, + } + code := c.Run(args) + output := done(t) + if code != 1 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + got := output.Stderr() + if !strings.Contains(got, target) { + t.Fatalf("bad error output, want %q, got:\n%s", target, got) + } + if !strings.Contains(got, wantDiag) { + t.Fatalf("bad error output, want %q, got:\n%s", wantDiag, got) + } + }) + } +} + +func TestRefresh_warnings(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("apply"), td) + defer testChdir(t, td)() + + p := testProvider() + p.GetProviderSchemaResponse = refreshFixtureSchema() + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + Diagnostics: tfdiags.Diagnostics{ + tfdiags.SimpleWarning("warning 1"), + tfdiags.SimpleWarning("warning 2"), + }, + } + } + + t.Run("full warnings", func(t *testing.T) { + view, done := testView(t) + c := &RefreshCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + code := c.Run([]string{}) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + wantWarnings := []string{ + "warning 1", + "warning 2", + } + for _, want := range wantWarnings { + if !strings.Contains(output.Stdout(), want) { + t.Errorf("missing warning %s", want) + } + } + }) + + t.Run("compact warnings", func(t *testing.T) { + view, done := testView(t) + c := &RefreshCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + code := c.Run([]string{"-compact-warnings"}) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + // the output should contain 2 warnings and a message about -compact-warnings + wantWarnings := []string{ + "warning 1", + "warning 2", + "To see the full warning notes, run OpenTofu without -compact-warnings.", + } + for _, want := range wantWarnings { + if !strings.Contains(output.Stdout(), want) { + t.Errorf("missing warning %s", want) + } + } + }) +} + +// configuration in testdata/refresh . This schema should be +// assigned to a mock provider named "test". +func refreshFixtureSchema() *providers.GetProviderSchemaResponse { + return &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } +} + +// refreshVarFixtureSchema returns a schema suitable for processing the +// configuration in testdata/refresh-var . This schema should be +// assigned to a mock provider named "test". +func refreshVarFixtureSchema() *providers.GetProviderSchemaResponse { + return &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "value": {Type: cty.String, Optional: true}, + }, + }, + }, + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + }, + }, + }, + }, + } +} + +const refreshVarFile = ` +foo = "bar" +` + +const testRefreshStr = ` +test_instance.foo: + ID = yes + provider = provider["registry.opentofu.org/hashicorp/test"] +` +const testRefreshCwdStr = ` +test_instance.foo: + ID = yes + provider = provider["registry.opentofu.org/hashicorp/test"] +` diff --git a/pkg/command/show.go b/pkg/command/show.go new file mode 100644 index 00000000000..a20f99fd98b --- /dev/null +++ b/pkg/command/show.go @@ -0,0 +1,441 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "context" + "errors" + "fmt" + "os" + "strings" + + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/cloud" + "github.com/kubegems/opentofu/pkg/cloud/cloudplan" + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/command/views" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/plans/planfile" + "github.com/kubegems/opentofu/pkg/states/statefile" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" + "github.com/kubegems/opentofu/pkg/tofumigrate" +) + +// Many of the methods we get data from can emit special error types if they're +// pretty sure about the file type but still can't use it. But they can't all do +// that! So, we have to do a couple ourselves if we want to preserve that data. +type errUnusableDataMisc struct { + inner error + kind string +} + +func errUnusable(err error, kind string) *errUnusableDataMisc { + return &errUnusableDataMisc{inner: err, kind: kind} +} +func (e *errUnusableDataMisc) Error() string { + return e.inner.Error() +} +func (e *errUnusableDataMisc) Unwrap() error { + return e.inner +} + +// ShowCommand is a Command implementation that reads and outputs the +// contents of a OpenTofu plan or state file. +type ShowCommand struct { + Meta + viewType arguments.ViewType +} + +func (c *ShowCommand) Run(rawArgs []string) int { + // Parse and apply global view arguments + common, rawArgs := arguments.ParseView(rawArgs) + c.View.Configure(common) + + // Parse and validate flags + args, diags := arguments.ParseShow(rawArgs) + if diags.HasErrors() { + c.View.Diagnostics(diags) + c.View.HelpPrompt("show") + return 1 + } + c.viewType = args.ViewType + c.View.SetShowSensitive(args.ShowSensitive) + + // Set up view + view := views.NewShow(args.ViewType, c.View) + + // Check for user-supplied plugin path + var err error + if c.pluginPath, err = c.loadPluginPath(); err != nil { + diags = diags.Append(fmt.Errorf("error loading plugin path: %w", err)) + view.Diagnostics(diags) + return 1 + } + + // Inject variables from args into meta for static evaluation + c.GatherVariables(args.Vars) + + // Load the encryption configuration + enc, encDiags := c.Encryption() + diags = diags.Append(encDiags) + if encDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // Get the data we need to display + plan, jsonPlan, stateFile, config, schemas, showDiags := c.show(args.Path, enc) + diags = diags.Append(showDiags) + if showDiags.HasErrors() { + view.Diagnostics(diags) + return 1 + } + + // Display the data + return view.Display(config, plan, jsonPlan, stateFile, schemas) +} + +func (c *ShowCommand) Help() string { + helpText := ` +Usage: tofu [global options] show [options] [path] + + Reads and outputs a OpenTofu state or plan file in a human-readable + form. If no path is specified, the current state will be shown. + +Options: + + -no-color If specified, output won't contain any color. + + -json If specified, output the OpenTofu plan or state in + a machine-readable form. + + -show-sensitive If specified, sensitive values will be displayed. + + -var 'foo=bar' Set a value for one of the input variables in the root + module of the configuration. Use this option more than + once to set more than one variable. + + -var-file=filename Load variable values from the given file, in addition + to the default files terraform.tfvars and *.auto.tfvars. + Use this option more than once to include more than one + variables file. + +` + return strings.TrimSpace(helpText) +} + +func (c *ShowCommand) Synopsis() string { + return "Show the current state or a saved plan" +} + +func (c *ShowCommand) GatherVariables(args *arguments.Vars) { + // FIXME the arguments package currently trivially gathers variable related + // arguments in a heterogenous slice, in order to minimize the number of + // code paths gathering variables during the transition to this structure. + // Once all commands that gather variables have been converted to this + // structure, we could move the variable gathering code to the arguments + // package directly, removing this shim layer. + + varArgs := args.All() + items := make([]rawFlag, len(varArgs)) + for i := range varArgs { + items[i].Name = varArgs[i].Name + items[i].Value = varArgs[i].Value + } + c.Meta.variableArgs = rawFlags{items: &items} +} + +func (c *ShowCommand) show(path string, enc encryption.Encryption) (*plans.Plan, *cloudplan.RemotePlanJSON, *statefile.File, *configs.Config, *tofu.Schemas, tfdiags.Diagnostics) { + var diags, showDiags, migrateDiags tfdiags.Diagnostics + var plan *plans.Plan + var jsonPlan *cloudplan.RemotePlanJSON + var stateFile *statefile.File + var config *configs.Config + var schemas *tofu.Schemas + + // No plan file or state file argument provided, + // so get the latest state snapshot + if path == "" { + stateFile, showDiags = c.showFromLatestStateSnapshot(enc) + diags = diags.Append(showDiags) + if showDiags.HasErrors() { + return plan, jsonPlan, stateFile, config, schemas, diags + } + } + + // Plan file or state file argument provided, + // so try to load the argument as a plan file first. + // If that fails, try to load it as a statefile. + if path != "" { + plan, jsonPlan, stateFile, config, showDiags = c.showFromPath(path, enc) + diags = diags.Append(showDiags) + if showDiags.HasErrors() { + return plan, jsonPlan, stateFile, config, schemas, diags + } + } + + if stateFile != nil { + stateFile.State, migrateDiags = tofumigrate.MigrateStateProviderAddresses(config, stateFile.State) + diags = diags.Append(migrateDiags) + if migrateDiags.HasErrors() { + return plan, jsonPlan, stateFile, config, schemas, diags + } + } + + // Get schemas, if possible + if config != nil || stateFile != nil { + schemas, diags = c.MaybeGetSchemas(stateFile.State, config) + if diags.HasErrors() { + return plan, jsonPlan, stateFile, config, schemas, diags + } + } + + return plan, jsonPlan, stateFile, config, schemas, diags +} +func (c *ShowCommand) showFromLatestStateSnapshot(enc encryption.Encryption) (*statefile.File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // Load the backend + b, backendDiags := c.Backend(nil, enc.State()) + diags = diags.Append(backendDiags) + if backendDiags.HasErrors() { + return nil, diags + } + c.ignoreRemoteVersionConflict(b) + + // Load the workspace + workspace, err := c.Workspace() + if err != nil { + diags = diags.Append(fmt.Errorf("error selecting workspace: %w", err)) + return nil, diags + } + + // Get the latest state snapshot from the backend for the current workspace + stateFile, stateErr := getStateFromBackend(b, workspace) + if stateErr != nil { + diags = diags.Append(stateErr) + return nil, diags + } + + return stateFile, diags +} + +func (c *ShowCommand) showFromPath(path string, enc encryption.Encryption) (*plans.Plan, *cloudplan.RemotePlanJSON, *statefile.File, *configs.Config, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + var planErr, stateErr error + var plan *plans.Plan + var jsonPlan *cloudplan.RemotePlanJSON + var stateFile *statefile.File + var config *configs.Config + + rootCall, callDiags := c.rootModuleCall(".") + diags = diags.Append(callDiags) + if diags.HasErrors() { + return nil, nil, nil, nil, diags + } + + // Path might be a local plan file, a bookmark to a saved cloud plan, or a + // state file. First, try to get a plan and associated data from a local + // plan file. If that fails, try to get a json plan from the path argument. + // If that fails, try to get the statefile from the path argument. + plan, jsonPlan, stateFile, config, planErr = c.getPlanFromPath(path, enc, rootCall) + if planErr != nil { + stateFile, stateErr = getStateFromPath(path, enc) + if stateErr != nil { + // To avoid spamming the user with irrelevant errors, first check to + // see if one of our errors happens to know for a fact what file + // type we were dealing with. If so, then we can ignore the other + // ones (which are likely to be something unhelpful like "not a + // valid zip file"). If not, we can fall back to dumping whatever + // we've got. + var unLocal *planfile.ErrUnusableLocalPlan + var unState *statefile.ErrUnusableState + var unMisc *errUnusableDataMisc + if errors.As(planErr, &unLocal) { + diags = diags.Append( + tfdiags.Sourceless( + tfdiags.Error, + "Couldn't show local plan", + fmt.Sprintf("Plan read error: %s", unLocal), + ), + ) + } else if errors.As(planErr, &unMisc) { + diags = diags.Append( + tfdiags.Sourceless( + tfdiags.Error, + fmt.Sprintf("Couldn't show %s", unMisc.kind), + fmt.Sprintf("Plan read error: %s", unMisc), + ), + ) + } else if errors.As(stateErr, &unState) { + diags = diags.Append( + tfdiags.Sourceless( + tfdiags.Error, + "Couldn't show state file", + fmt.Sprintf("Plan read error: %s", unState), + ), + ) + } else if errors.As(stateErr, &unMisc) { + diags = diags.Append( + tfdiags.Sourceless( + tfdiags.Error, + fmt.Sprintf("Couldn't show %s", unMisc.kind), + fmt.Sprintf("Plan read error: %s", unMisc), + ), + ) + } else { + // Ok, give up and show the really big error + diags = diags.Append( + tfdiags.Sourceless( + tfdiags.Error, + "Failed to read the given file as a state or plan file", + fmt.Sprintf("State read error: %s\n\nPlan read error: %s", stateErr, planErr), + ), + ) + } + + return nil, nil, nil, nil, diags + } + } + return plan, jsonPlan, stateFile, config, diags +} + +// getPlanFromPath returns a plan, json plan, statefile, and config if the +// user-supplied path points to either a local or cloud plan file. Note that +// some of the return values will be nil no matter what; local plan files do not +// yield a json plan, and cloud plans do not yield real plan/state/config +// structs. An error generally suggests that the given path is either a +// directory or a statefile. +func (c *ShowCommand) getPlanFromPath(path string, enc encryption.Encryption, rootCall configs.StaticModuleCall) (*plans.Plan, *cloudplan.RemotePlanJSON, *statefile.File, *configs.Config, error) { + var err error + var plan *plans.Plan + var jsonPlan *cloudplan.RemotePlanJSON + var stateFile *statefile.File + var config *configs.Config + + pf, err := planfile.OpenWrapped(path, enc.Plan()) + if err != nil { + return nil, nil, nil, nil, err + } + + if lp, ok := pf.Local(); ok { + plan, stateFile, config, err = getDataFromPlanfileReader(lp, rootCall) + } else if cp, ok := pf.Cloud(); ok { + redacted := c.viewType != arguments.ViewJSON + jsonPlan, err = c.getDataFromCloudPlan(cp, redacted, enc) + } + + return plan, jsonPlan, stateFile, config, err +} + +func (c *ShowCommand) getDataFromCloudPlan(plan *cloudplan.SavedPlanBookmark, redacted bool, enc encryption.Encryption) (*cloudplan.RemotePlanJSON, error) { + // Set up the backend + b, backendDiags := c.Backend(nil, enc.State()) + if backendDiags.HasErrors() { + return nil, errUnusable(backendDiags.Err(), "cloud plan") + } + // Cloud plans only work if we're cloud. + cl, ok := b.(*cloud.Cloud) + if !ok { + return nil, errUnusable(fmt.Errorf("can't show a saved cloud plan unless the current root module is connected to Terraform Cloud"), "cloud plan") + } + + result, err := cl.ShowPlanForRun(context.Background(), plan.RunID, plan.Hostname, redacted) + if err != nil { + err = errUnusable(err, "cloud plan") + } + return result, err +} + +// getDataFromPlanfileReader returns a plan, statefile, and config, extracted from a local plan file. +func getDataFromPlanfileReader(planReader *planfile.Reader, rootCall configs.StaticModuleCall) (*plans.Plan, *statefile.File, *configs.Config, error) { + // Get plan + plan, err := planReader.ReadPlan() + if err != nil { + return nil, nil, nil, err + } + + // Get statefile + stateFile, err := planReader.ReadStateFile() + if err != nil { + return nil, nil, nil, err + } + + subCall := rootCall.WithVariables(func(variable *configs.Variable) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + name := variable.Name + v, ok := plan.VariableValues[name] + if !ok { + if variable.Required() { + // This should not happen... + return cty.DynamicVal, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing plan variable " + variable.Name, + }) + } + return variable.Default, nil + } + + parsed, parsedErr := v.Decode(cty.DynamicPseudoType) + if parsedErr != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: parsedErr.Error(), + }) + } + return parsed, diags + }) + + // Get config + config, diags := planReader.ReadConfig(subCall) + if diags.HasErrors() { + return nil, nil, nil, errUnusable(diags.Err(), "local plan") + } + + return plan, stateFile, config, err +} + +// getStateFromPath returns a statefile if the user-supplied path points to a statefile. +func getStateFromPath(path string, enc encryption.Encryption) (*statefile.File, error) { + file, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("Error loading statefile: %w", err) + } + defer file.Close() + + var stateFile *statefile.File + stateFile, err = statefile.Read(file, enc.State()) + if err != nil { + return nil, fmt.Errorf("Error reading %s as a statefile: %w", path, err) + } + return stateFile, nil +} + +// getStateFromBackend returns the State for the current workspace, if available. +func getStateFromBackend(b backend.Backend, workspace string) (*statefile.File, error) { + // Get the state store for the given workspace + stateStore, err := b.StateMgr(workspace) + if err != nil { + return nil, fmt.Errorf("Failed to load state manager: %w", err) + } + + // Refresh the state store with the latest state snapshot from persistent storage + if err := stateStore.RefreshState(); err != nil { + return nil, fmt.Errorf("Failed to load state: %w", err) + } + + // Get the latest state snapshot and return it + stateFile := statemgr.Export(stateStore) + return stateFile, nil +} diff --git a/pkg/command/show_test.go b/pkg/command/show_test.go new file mode 100644 index 00000000000..c04d7336050 --- /dev/null +++ b/pkg/command/show_test.go @@ -0,0 +1,1291 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "encoding/json" + "io" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/mitchellh/cli" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "github.com/kubegems/opentofu/pkg/tofu" + "github.com/kubegems/opentofu/version" +) + +func TestShow_badArgs(t *testing.T) { + view, done := testView(t) + c := &ShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + View: view, + }, + } + + args := []string{ + "bad", + "bad", + "-no-color", + } + + code := c.Run(args) + output := done(t) + + if code != 1 { + t.Fatalf("unexpected exit status %d; want 1\ngot: %s", code, output.Stdout()) + } +} + +func TestShow_noArgsNoState(t *testing.T) { + view, done := testView(t) + c := &ShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + View: view, + }, + } + + code := c.Run([]string{}) + output := done(t) + + if code != 0 { + t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, output.Stderr()) + } + + got := output.Stdout() + want := `No state.` + if !strings.Contains(got, want) { + t.Fatalf("unexpected output\ngot: %s\nwant: %s", got, want) + } +} + +func TestShow_noArgsWithState(t *testing.T) { + // Get a temp cwd + testCwd(t) + // Create the default state + testStateFileDefault(t, testState()) + + view, done := testView(t) + c := &ShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(showFixtureProvider()), + View: view, + }, + } + + code := c.Run([]string{}) + output := done(t) + + if code != 0 { + t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, output.Stderr()) + } + + got := output.Stdout() + want := `# test_instance.foo:` + if !strings.Contains(got, want) { + t.Fatalf("unexpected output\ngot: %s\nwant: %s", got, want) + } +} + +func TestShow_argsWithState(t *testing.T) { + // Create the default state + statePath := testStateFile(t, testState()) + stateDir := filepath.Dir(statePath) + defer os.RemoveAll(stateDir) + defer testChdir(t, stateDir)() + + view, done := testView(t) + c := &ShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(showFixtureProvider()), + View: view, + }, + } + + path := filepath.Base(statePath) + args := []string{ + path, + "-no-color", + } + code := c.Run(args) + output := done(t) + + if code != 0 { + t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, output.Stderr()) + } +} + +// https://github.com/hashicorp/terraform/issues/21462 +func TestShow_argsWithStateAliasedProvider(t *testing.T) { + // Create the default state with aliased resource + testState := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + // The weird whitespace here is reflective of how this would + // get written out in a real state file, due to the indentation + // of all of the containing wrapping objects and arrays. + AttrsJSON: []byte("{\n \"id\": \"bar\"\n }"), + Status: states.ObjectReady, + Dependencies: []addrs.ConfigResource{}, + }, + addrs.RootModuleInstance.ProviderConfigAliased(addrs.NewDefaultProvider("test"), "alias"), + ) + }) + + statePath := testStateFile(t, testState) + stateDir := filepath.Dir(statePath) + defer os.RemoveAll(stateDir) + defer testChdir(t, stateDir)() + + view, done := testView(t) + c := &ShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(showFixtureProvider()), + View: view, + }, + } + + path := filepath.Base(statePath) + args := []string{ + path, + "-no-color", + } + code := c.Run(args) + output := done(t) + + if code != 0 { + t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, output.Stderr()) + } + + got := output.Stdout() + want := `# missing schema for provider \"test.alias\"` + if strings.Contains(got, want) { + t.Fatalf("unexpected output\ngot: %s", got) + } +} + +func TestShow_argsPlanFileDoesNotExist(t *testing.T) { + view, done := testView(t) + c := &ShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + View: view, + }, + } + + args := []string{ + "doesNotExist.tfplan", + "-no-color", + } + code := c.Run(args) + output := done(t) + + if code != 1 { + t.Fatalf("unexpected exit status %d; want 1\ngot: %s", code, output.Stdout()) + } + + got := output.Stderr() + want1 := `Plan read error: couldn't load the provided path` + want2 := `open doesNotExist.tfplan: no such file or directory` + if !strings.Contains(got, want1) { + t.Errorf("unexpected output\ngot: %s\nwant:\n%s", got, want1) + } + if !strings.Contains(got, want2) { + t.Errorf("unexpected output\ngot: %s\nwant:\n%s", got, want2) + } +} + +func TestShow_argsStatefileDoesNotExist(t *testing.T) { + view, done := testView(t) + c := &ShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + View: view, + }, + } + + args := []string{ + "doesNotExist.tfstate", + "-no-color", + } + code := c.Run(args) + output := done(t) + + if code != 1 { + t.Fatalf("unexpected exit status %d; want 1\ngot: %s", code, output.Stdout()) + } + + got := output.Stderr() + want := `State read error: Error loading statefile:` + if !strings.Contains(got, want) { + t.Errorf("unexpected output\ngot: %s\nwant:\n%s", got, want) + } +} + +func TestShow_json_argsPlanFileDoesNotExist(t *testing.T) { + view, done := testView(t) + c := &ShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + View: view, + }, + } + + args := []string{ + "-json", + "doesNotExist.tfplan", + "-no-color", + } + code := c.Run(args) + output := done(t) + + if code != 1 { + t.Fatalf("unexpected exit status %d; want 1\ngot: %s", code, output.Stdout()) + } + + got := output.Stderr() + want1 := `Plan read error: couldn't load the provided path` + want2 := `open doesNotExist.tfplan: no such file or directory` + if !strings.Contains(got, want1) { + t.Errorf("unexpected output\ngot: %s\nwant:\n%s", got, want1) + } + if !strings.Contains(got, want2) { + t.Errorf("unexpected output\ngot: %s\nwant:\n%s", got, want2) + } +} + +func TestShow_json_argsStatefileDoesNotExist(t *testing.T) { + view, done := testView(t) + c := &ShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + View: view, + }, + } + + args := []string{ + "-json", + "doesNotExist.tfstate", + "-no-color", + } + code := c.Run(args) + output := done(t) + + if code != 1 { + t.Fatalf("unexpected exit status %d; want 1\ngot: %s", code, output.Stdout()) + } + + got := output.Stderr() + want := `State read error: Error loading statefile:` + if !strings.Contains(got, want) { + t.Errorf("unexpected output\ngot: %s\nwant:\n%s", got, want) + } +} + +func TestShow_planNoop(t *testing.T) { + planPath := testPlanFileNoop(t) + + view, done := testView(t) + c := &ShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + View: view, + }, + } + + args := []string{ + planPath, + "-no-color", + } + code := c.Run(args) + output := done(t) + + if code != 0 { + t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, output.Stderr()) + } + + got := output.Stdout() + want := `No changes. Your infrastructure matches the configuration.` + if !strings.Contains(got, want) { + t.Errorf("unexpected output\ngot: %s\nwant:\n%s", got, want) + } +} + +func TestShow_planWithChanges(t *testing.T) { + planPathWithChanges := showFixturePlanFile(t, plans.DeleteThenCreate) + + view, done := testView(t) + c := &ShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(showFixtureProvider()), + View: view, + }, + } + + args := []string{ + planPathWithChanges, + "-no-color", + } + code := c.Run(args) + output := done(t) + + if code != 0 { + t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, output.Stderr()) + } + + got := output.Stdout() + want := `test_instance.foo must be replaced` + if !strings.Contains(got, want) { + t.Fatalf("unexpected output\ngot: %s\nwant: %s", got, want) + } +} + +func TestShow_planWithForceReplaceChange(t *testing.T) { + // The main goal of this test is to see that the "replace by request" + // resource instance action reason can round-trip through a plan file and + // be reflected correctly in the "tofu show" output, the same way + // as it would appear in "tofu plan" output. + + _, snap := testModuleWithSnapshot(t, "show") + plannedVal := cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("bar"), + }) + priorValRaw, err := plans.NewDynamicValue(cty.NullVal(plannedVal.Type()), plannedVal.Type()) + if err != nil { + t.Fatal(err) + } + plannedValRaw, err := plans.NewDynamicValue(plannedVal, plannedVal.Type()) + if err != nil { + t.Fatal(err) + } + plan := testPlan(t) + plan.Changes.SyncWrapper().AppendResourceInstanceChange(&plans.ResourceInstanceChangeSrc{ + Addr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + ProviderAddr: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ChangeSrc: plans.ChangeSrc{ + Action: plans.CreateThenDelete, + Before: priorValRaw, + After: plannedValRaw, + }, + ActionReason: plans.ResourceInstanceReplaceByRequest, + }) + planFilePath := testPlanFile( + t, + snap, + states.NewState(), + plan, + ) + + view, done := testView(t) + c := &ShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(showFixtureProvider()), + View: view, + }, + } + + args := []string{ + planFilePath, + "-no-color", + } + code := c.Run(args) + output := done(t) + + if code != 0 { + t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, output.Stderr()) + } + + got := output.Stdout() + want := `test_instance.foo will be replaced, as requested` + if !strings.Contains(got, want) { + t.Fatalf("unexpected output\ngot: %s\nwant: %s", got, want) + } + + want = `Plan: 1 to add, 0 to change, 1 to destroy.` + if !strings.Contains(got, want) { + t.Fatalf("unexpected output\ngot: %s\nwant: %s", got, want) + } +} + +func TestShow_planErrored(t *testing.T) { + _, snap := testModuleWithSnapshot(t, "show") + plan := testPlan(t) + plan.Errored = true + planFilePath := testPlanFile( + t, + snap, + states.NewState(), + plan, + ) + + view, done := testView(t) + c := &ShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(showFixtureProvider()), + View: view, + }, + } + + args := []string{ + planFilePath, + "-no-color", + } + code := c.Run(args) + output := done(t) + + if code != 0 { + t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, output.Stderr()) + } + + got := output.Stdout() + want := `Planning failed. OpenTofu encountered an error while generating this plan.` + if !strings.Contains(got, want) { + t.Fatalf("unexpected output\ngot: %s\nwant: %s", got, want) + } +} + +func TestShow_plan_json(t *testing.T) { + planPath := showFixturePlanFile(t, plans.Create) + + view, done := testView(t) + c := &ShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(showFixtureProvider()), + View: view, + }, + } + + args := []string{ + "-json", + planPath, + "-no-color", + } + code := c.Run(args) + output := done(t) + + if code != 0 { + t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, output.Stderr()) + } +} + +func TestShow_state(t *testing.T) { + originalState := testState() + root := originalState.RootModule() + root.SetOutputValue("test", cty.ObjectVal(map[string]cty.Value{ + "attr": cty.NullVal(cty.DynamicPseudoType), + "null": cty.NullVal(cty.String), + "list": cty.ListVal([]cty.Value{cty.NullVal(cty.Number)}), + }), false) + + statePath := testStateFile(t, originalState) + defer os.RemoveAll(filepath.Dir(statePath)) + + view, done := testView(t) + c := &ShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(showFixtureProvider()), + View: view, + }, + } + + args := []string{ + statePath, + "-no-color", + } + code := c.Run(args) + output := done(t) + + if code != 0 { + t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, output.Stderr()) + } +} + +func TestShow_json_output(t *testing.T) { + fixtureDir := "testdata/show-json" + testDirs, err := os.ReadDir(fixtureDir) + if err != nil { + t.Fatal(err) + } + + for _, entry := range testDirs { + if !entry.IsDir() { + continue + } + + t.Run(entry.Name(), func(t *testing.T) { + td := t.TempDir() + inputDir := filepath.Join(fixtureDir, entry.Name()) + testCopyDir(t, inputDir, td) + defer testChdir(t, td)() + + expectError := strings.Contains(entry.Name(), "error") + + providerSource, close := newMockProviderSource(t, map[string][]string{ + "test": {"1.2.3"}, + "hashicorp2/test": {"1.2.3"}, + }) + defer close() + + p := showFixtureProvider() + + // init + ui := new(cli.MockUi) + ic := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + ProviderSource: providerSource, + }, + } + if code := ic.Run([]string{}); code != 0 { + if expectError { + // this should error, but not panic. + return + } + t.Fatalf("init failed\n%s", ui.ErrorWriter) + } + + // read expected output + wantFile, err := os.Open("output.json") + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + defer wantFile.Close() + byteValue, err := io.ReadAll(wantFile) + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + + var want plan + json.Unmarshal([]byte(byteValue), &want) + + // plan + planView, planDone := testView(t) + pc := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: planView, + ProviderSource: providerSource, + }, + } + + args := []string{ + "-out=tofu.plan", + } + + code := pc.Run(args) + planOutput := planDone(t) + + var wantedCode int + if want.Errored { + wantedCode = 1 + } else { + wantedCode = 0 + } + + if code != wantedCode { + t.Fatalf("unexpected exit status %d; want %d\ngot: %s", code, wantedCode, planOutput.Stderr()) + } + + // show + showView, showDone := testView(t) + sc := &ShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: showView, + ProviderSource: providerSource, + }, + } + + args = []string{ + "-json", + "tofu.plan", + } + defer os.Remove("tofu.plan") + code = sc.Run(args) + showOutput := showDone(t) + + if code != 0 { + t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, showOutput.Stderr()) + } + + // compare view output to wanted output + var got plan + + gotString := showOutput.Stdout() + json.Unmarshal([]byte(gotString), &got) + + // Disregard format version to reduce needless test fixture churn + want.FormatVersion = got.FormatVersion + + if !cmp.Equal(got, want) { + t.Fatalf("wrong result:\n %v\n", cmp.Diff(got, want)) + } + }) + } +} + +func TestShow_json_output_sensitive(t *testing.T) { + td := t.TempDir() + inputDir := "testdata/show-json-sensitive" + testCopyDir(t, inputDir, td) + defer testChdir(t, td)() + + providerSource, close := newMockProviderSource(t, map[string][]string{"test": {"1.2.3"}}) + defer close() + + p := showFixtureSensitiveProvider() + + // init + ui := new(cli.MockUi) + ic := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + ProviderSource: providerSource, + }, + } + if code := ic.Run([]string{}); code != 0 { + t.Fatalf("init failed\n%s", ui.ErrorWriter) + } + + // plan + planView, planDone := testView(t) + pc := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: planView, + ProviderSource: providerSource, + }, + } + + args := []string{ + "-out=tofu.plan", + } + code := pc.Run(args) + planOutput := planDone(t) + + if code != 0 { + t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, planOutput.Stderr()) + } + + // show + showView, showDone := testView(t) + sc := &ShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: showView, + ProviderSource: providerSource, + }, + } + + args = []string{ + "-json", + "tofu.plan", + } + defer os.Remove("tofu.plan") + code = sc.Run(args) + showOutput := showDone(t) + + if code != 0 { + t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, showOutput.Stderr()) + } + + // compare ui output to wanted output + var got, want plan + + gotString := showOutput.Stdout() + json.Unmarshal([]byte(gotString), &got) + + wantFile, err := os.Open("output.json") + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + defer wantFile.Close() + byteValue, err := io.ReadAll(wantFile) + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + json.Unmarshal([]byte(byteValue), &want) + + // Disregard format version to reduce needless test fixture churn + want.FormatVersion = got.FormatVersion + + if !cmp.Equal(got, want) { + t.Fatalf("wrong result:\n %v\n", cmp.Diff(got, want)) + } +} + +// Failing conditions are only present in JSON output for refresh-only plans, +// so we test that separately here. +func TestShow_json_output_conditions_refresh_only(t *testing.T) { + td := t.TempDir() + inputDir := "testdata/show-json/conditions" + testCopyDir(t, inputDir, td) + defer testChdir(t, td)() + + providerSource, close := newMockProviderSource(t, map[string][]string{"test": {"1.2.3"}}) + defer close() + + p := showFixtureSensitiveProvider() + + // init + ui := new(cli.MockUi) + ic := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + ProviderSource: providerSource, + }, + } + if code := ic.Run([]string{}); code != 0 { + t.Fatalf("init failed\n%s", ui.ErrorWriter) + } + + // plan + planView, planDone := testView(t) + pc := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: planView, + ProviderSource: providerSource, + }, + } + + args := []string{ + "-refresh-only", + "-out=tofu.plan", + "-var=ami=bad-ami", + "-state=for-refresh.tfstate", + } + code := pc.Run(args) + planOutput := planDone(t) + + if code != 0 { + t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, planOutput.Stderr()) + } + + // show + showView, showDone := testView(t) + sc := &ShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: showView, + ProviderSource: providerSource, + }, + } + + args = []string{ + "-json", + "tofu.plan", + } + defer os.Remove("tofu.plan") + code = sc.Run(args) + showOutput := showDone(t) + + if code != 0 { + t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, showOutput.Stderr()) + } + + // compare JSON output to wanted output + var got, want plan + + gotString := showOutput.Stdout() + json.Unmarshal([]byte(gotString), &got) + + wantFile, err := os.Open("output-refresh-only.json") + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + defer wantFile.Close() + byteValue, err := io.ReadAll(wantFile) + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + json.Unmarshal([]byte(byteValue), &want) + + // Disregard format version to reduce needless test fixture churn + want.FormatVersion = got.FormatVersion + + if !cmp.Equal(got, want) { + t.Fatalf("wrong result:\n %v\n", cmp.Diff(got, want)) + } +} + +// similar test as above, without the plan +func TestShow_json_output_state(t *testing.T) { + fixtureDir := "testdata/show-json-state" + testDirs, err := os.ReadDir(fixtureDir) + if err != nil { + t.Fatal(err) + } + + for _, entry := range testDirs { + if !entry.IsDir() { + continue + } + + t.Run(entry.Name(), func(t *testing.T) { + td := t.TempDir() + inputDir := filepath.Join(fixtureDir, entry.Name()) + testCopyDir(t, inputDir, td) + defer testChdir(t, td)() + + providerSource, close := newMockProviderSource(t, map[string][]string{ + "test": {"1.2.3"}, + }) + defer close() + + p := showFixtureProvider() + + // init + ui := new(cli.MockUi) + ic := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + ProviderSource: providerSource, + }, + } + if code := ic.Run([]string{}); code != 0 { + t.Fatalf("init failed\n%s", ui.ErrorWriter) + } + + // show + showView, showDone := testView(t) + sc := &ShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: showView, + ProviderSource: providerSource, + }, + } + + code := sc.Run([]string{"-json"}) + showOutput := showDone(t) + + if code != 0 { + t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, showOutput.Stderr()) + } + + // compare ui output to wanted output + type state struct { + FormatVersion string `json:"format_version,omitempty"` + TerraformVersion string `json:"terraform_version"` + Values map[string]interface{} `json:"values,omitempty"` + SensitiveValues map[string]bool `json:"sensitive_values,omitempty"` + } + var got, want state + + gotString := showOutput.Stdout() + json.Unmarshal([]byte(gotString), &got) + + wantFile, err := os.Open("output.json") + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + defer wantFile.Close() + byteValue, err := io.ReadAll(wantFile) + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + json.Unmarshal([]byte(byteValue), &want) + + if !cmp.Equal(got, want) { + t.Fatalf("wrong result:\n %v\n", cmp.Diff(got, want)) + } + }) + } +} + +func TestShow_planWithNonDefaultStateLineage(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("show"), td) + defer testChdir(t, td)() + + // Write default state file with a testing lineage ("fake-for-testing") + testStateFileDefault(t, testState()) + + // Create a plan with a different lineage, which we should still be able + // to show + _, snap := testModuleWithSnapshot(t, "show") + state := testState() + plan := testPlan(t) + stateMeta := statemgr.SnapshotMeta{ + Lineage: "fake-for-plan", + Serial: 1, + TerraformVersion: version.SemVer, + } + planPath := testPlanFileMatchState(t, snap, state, plan, stateMeta) + + view, done := testView(t) + c := &ShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + View: view, + }, + } + + args := []string{ + planPath, + "-no-color", + } + code := c.Run(args) + output := done(t) + + if code != 0 { + t.Fatalf("unexpected exit status %d; want 0\ngot: %s", code, output.Stderr()) + } + + got := output.Stdout() + want := `No changes. Your infrastructure matches the configuration.` + if !strings.Contains(got, want) { + t.Fatalf("unexpected output\ngot: %s\nwant: %s", got, want) + } +} + +func TestShow_corruptStatefile(t *testing.T) { + td := t.TempDir() + inputDir := "testdata/show-corrupt-statefile" + testCopyDir(t, inputDir, td) + defer testChdir(t, td)() + + view, done := testView(t) + c := &ShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + View: view, + }, + } + + code := c.Run([]string{}) + output := done(t) + + if code != 1 { + t.Fatalf("unexpected exit status %d; want 1\ngot: %s", code, output.Stdout()) + } + + got := output.Stderr() + want := `Unsupported state file format` + if !strings.Contains(got, want) { + t.Errorf("unexpected output\ngot: %s\nwant:\n%s", got, want) + } +} + +func TestShow_showSensitiveArg(t *testing.T) { + td := t.TempDir() + defer testChdir(t, td)() + + originalState := stateWithSensitiveValueForShow() + + testStateFileDefault(t, originalState) + + view, done := testView(t) + c := &ShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + View: view, + }, + } + + args := []string{ + "-show-sensitive", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: \n%s", output.Stderr()) + } + + actual := strings.TrimSpace(output.Stdout()) + expected := "Outputs:\n\nfoo = \"bar\"" + if actual != expected { + t.Fatalf("got incorrect output: %#v", actual) + } +} + +func TestShow_withoutShowSensitiveArg(t *testing.T) { + td := t.TempDir() + defer testChdir(t, td)() + + originalState := stateWithSensitiveValueForShow() + + testStateFileDefault(t, originalState) + + view, done := testView(t) + c := &ShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + View: view, + }, + } + + code := c.Run([]string{}) + output := done(t) + if code != 0 { + t.Fatalf("bad: \n%s", output.Stderr()) + } + + actual := strings.TrimSpace(output.Stdout()) + expected := "Outputs:\n\nfoo = (sensitive value)" + if actual != expected { + t.Fatalf("got incorrect output: %#v", actual) + } +} + +// stateWithSensitiveValueForShow return a state with an output value +// marked as sensitive. +func stateWithSensitiveValueForShow() *states.State { + state := states.BuildState(func(s *states.SyncState) { + s.SetOutputValue( + addrs.OutputValue{Name: "foo"}.Absolute(addrs.RootModuleInstance), + cty.StringVal("bar"), + true, + ) + }) + return state +} + +// showFixtureSchema returns a schema suitable for processing the configuration +// in testdata/show. This schema should be assigned to a mock provider +// named "test". +func showFixtureSchema() *providers.GetProviderSchemaResponse { + return &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "region": {Type: cty.String, Optional: true}, + }, + }, + }, + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } +} + +// showFixtureSensitiveSchema returns a schema suitable for processing the configuration +// in testdata/show. This schema should be assigned to a mock provider +// named "test". It includes a sensitive attribute. +func showFixtureSensitiveSchema() *providers.GetProviderSchemaResponse { + return &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "region": {Type: cty.String, Optional: true}, + }, + }, + }, + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "password": {Type: cty.String, Optional: true, Sensitive: true}, + }, + }, + }, + }, + } +} + +// showFixtureProvider returns a mock provider that is configured for basic +// operation with the configuration in testdata/show. This mock has +// GetSchemaResponse, PlanResourceChangeFn, and ApplyResourceChangeFn populated, +// with the plan/apply steps just passing through the data determined by +// OpenTofu Core. +func showFixtureProvider() *tofu.MockProvider { + p := testProvider() + p.GetProviderSchemaResponse = showFixtureSchema() + p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { + idVal := req.PriorState.GetAttr("id") + amiVal := req.PriorState.GetAttr("ami") + if amiVal.RawEquals(cty.StringVal("refresh-me")) { + amiVal = cty.StringVal("refreshed") + } + return providers.ReadResourceResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "id": idVal, + "ami": amiVal, + }), + Private: req.Private, + } + } + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + // this is a destroy plan, + if req.ProposedNewState.IsNull() { + resp.PlannedState = req.ProposedNewState + resp.PlannedPrivate = req.PriorPrivate + return resp + } + + idVal := req.ProposedNewState.GetAttr("id") + amiVal := req.ProposedNewState.GetAttr("ami") + if idVal.IsNull() { + idVal = cty.UnknownVal(cty.String) + } + var reqRep []cty.Path + if amiVal.RawEquals(cty.StringVal("force-replace")) { + reqRep = append(reqRep, cty.GetAttrPath("ami")) + } + return providers.PlanResourceChangeResponse{ + PlannedState: cty.ObjectVal(map[string]cty.Value{ + "id": idVal, + "ami": amiVal, + }), + RequiresReplace: reqRep, + } + } + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + idVal := req.PlannedState.GetAttr("id") + amiVal := req.PlannedState.GetAttr("ami") + if !idVal.IsKnown() { + idVal = cty.StringVal("placeholder") + } + return providers.ApplyResourceChangeResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "id": idVal, + "ami": amiVal, + }), + } + } + return p +} + +// showFixtureSensitiveProvider returns a mock provider that is configured for basic +// operation with the configuration in testdata/show. This mock has +// GetSchemaResponse, PlanResourceChangeFn, and ApplyResourceChangeFn populated, +// with the plan/apply steps just passing through the data determined by +// OpenTofu Core. It also has a sensitive attribute in the provider schema. +func showFixtureSensitiveProvider() *tofu.MockProvider { + p := testProvider() + p.GetProviderSchemaResponse = showFixtureSensitiveSchema() + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + idVal := req.ProposedNewState.GetAttr("id") + if idVal.IsNull() { + idVal = cty.UnknownVal(cty.String) + } + return providers.PlanResourceChangeResponse{ + PlannedState: cty.ObjectVal(map[string]cty.Value{ + "id": idVal, + "ami": req.ProposedNewState.GetAttr("ami"), + "password": req.ProposedNewState.GetAttr("password"), + }), + } + } + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + idVal := req.PlannedState.GetAttr("id") + if !idVal.IsKnown() { + idVal = cty.StringVal("placeholder") + } + return providers.ApplyResourceChangeResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "id": idVal, + "ami": req.PlannedState.GetAttr("ami"), + "password": req.PlannedState.GetAttr("password"), + }), + } + } + return p +} + +// showFixturePlanFile creates a plan file at a temporary location containing a +// single change to create or update the test_instance.foo that is included in the "show" +// test fixture, returning the location of that plan file. +// `action` is the planned change you would like to elicit +func showFixturePlanFile(t *testing.T, action plans.Action) string { + _, snap := testModuleWithSnapshot(t, "show") + plannedVal := cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "ami": cty.StringVal("bar"), + }) + priorValRaw, err := plans.NewDynamicValue(cty.NullVal(plannedVal.Type()), plannedVal.Type()) + if err != nil { + t.Fatal(err) + } + plannedValRaw, err := plans.NewDynamicValue(plannedVal, plannedVal.Type()) + if err != nil { + t.Fatal(err) + } + plan := testPlan(t) + plan.Changes.SyncWrapper().AppendResourceInstanceChange(&plans.ResourceInstanceChangeSrc{ + Addr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + ProviderAddr: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ChangeSrc: plans.ChangeSrc{ + Action: action, + Before: priorValRaw, + After: plannedValRaw, + }, + }) + return testPlanFile( + t, + snap, + states.NewState(), + plan, + ) +} + +// this simplified plan struct allows us to preserve field order when marshaling +// the command output. NOTE: we are leaving "terraform_version" out of this test +// to avoid needing to constantly update the expected output; as a potential +// TODO we could write a jsonplan compare function. +type plan struct { + FormatVersion string `json:"format_version,omitempty"` + Variables map[string]interface{} `json:"variables,omitempty"` + PlannedValues map[string]interface{} `json:"planned_values,omitempty"` + ResourceDrift []interface{} `json:"resource_drift,omitempty"` + ResourceChanges []interface{} `json:"resource_changes,omitempty"` + OutputChanges map[string]interface{} `json:"output_changes,omitempty"` + PriorState priorState `json:"prior_state,omitempty"` + Config map[string]interface{} `json:"configuration,omitempty"` + Errored bool `json:"errored"` +} + +type priorState struct { + FormatVersion string `json:"format_version,omitempty"` + Values map[string]interface{} `json:"values,omitempty"` + SensitiveValues map[string]bool `json:"sensitive_values,omitempty"` +} diff --git a/pkg/command/state_command.go b/pkg/command/state_command.go new file mode 100644 index 00000000000..6fea3958d81 --- /dev/null +++ b/pkg/command/state_command.go @@ -0,0 +1,45 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + + "github.com/mitchellh/cli" +) + +// StateCommand is a Command implementation that just shows help for +// the subcommands nested below it. +type StateCommand struct { + StateMeta +} + +func (c *StateCommand) Run(args []string) int { + return cli.RunResultHelp +} + +func (c *StateCommand) Help() string { + helpText := ` +Usage: tofu [global options] state [options] [args] + + This command has subcommands for advanced state management. + + These subcommands can be used to slice and dice the OpenTofu state. + This is sometimes necessary in advanced cases. For your safety, all + state management commands that modify the state create a timestamped + backup of the state prior to making modifications. + + The structure and output of the commands is specifically tailored to work + well with the common Unix utilities such as grep, awk, etc. We recommend + using those tools to perform more advanced state tasks. + +` + return strings.TrimSpace(helpText) +} + +func (c *StateCommand) Synopsis() string { + return "Advanced state management" +} diff --git a/pkg/command/state_list.go b/pkg/command/state_list.go new file mode 100644 index 00000000000..6954bd96e75 --- /dev/null +++ b/pkg/command/state_list.go @@ -0,0 +1,165 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/mitchellh/cli" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// StateListCommand is a Command implementation that lists the resources +// within a state file. +type StateListCommand struct { + Meta + StateMeta +} + +func (c *StateListCommand) Run(args []string) int { + args = c.Meta.process(args) + var statePath string + cmdFlags := c.Meta.defaultFlagSet("state list") + c.Meta.varFlagSet(cmdFlags) + cmdFlags.StringVar(&statePath, "state", "", "path") + lookupId := cmdFlags.String("id", "", "Restrict output to paths with a resource having the specified ID.") + if err := cmdFlags.Parse(args); err != nil { + c.Ui.Error(fmt.Sprintf("Error parsing command-line flags: %s\n", err.Error())) + return cli.RunResultHelp + } + args = cmdFlags.Args() + + if statePath != "" { + c.Meta.statePath = statePath + } + + // Load the encryption configuration + enc, encDiags := c.Encryption() + if encDiags.HasErrors() { + c.showDiagnostics(encDiags) + return 1 + } + + // Load the backend + b, backendDiags := c.Backend(nil, enc.State()) + if backendDiags.HasErrors() { + c.showDiagnostics(backendDiags) + return 1 + } + + // This is a read-only command + c.ignoreRemoteVersionConflict(b) + + // Get the state + env, err := c.Workspace() + if err != nil { + c.Ui.Error(fmt.Sprintf("Error selecting workspace: %s", err)) + return 1 + } + stateMgr, err := b.StateMgr(env) + if err != nil { + c.Ui.Error(fmt.Sprintf(errStateLoadingState, err)) + return 1 + } + if err := stateMgr.RefreshState(); err != nil { + c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) + return 1 + } + + state := stateMgr.State() + if state == nil { + c.Ui.Error(errStateNotFound) + return 1 + } + + var addrs []addrs.AbsResourceInstance + var diags tfdiags.Diagnostics + if len(args) == 0 { + addrs, diags = c.lookupAllResourceInstanceAddrs(state) + } else { + addrs, diags = c.lookupResourceInstanceAddrs(state, args...) + } + if diags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + for _, addr := range addrs { + if is := state.ResourceInstance(addr); is != nil { + if *lookupId == "" || *lookupId == states.LegacyInstanceObjectID(is.Current) { + c.Ui.Output(addr.String()) + } + } + } + + c.showDiagnostics(diags) + + return 0 +} + +func (c *StateListCommand) Help() string { + helpText := ` +Usage: tofu [global options] state (list|ls) [options] [address...] + + List resources in the OpenTofu state. + + This command lists resource instances in the OpenTofu state. The address + argument can be used to filter the instances by resource or module. If + no pattern is given, all resource instances are listed. + + The addresses must either be module addresses or absolute resource + addresses, such as: + aws_instance.example + module.example + module.example.module.child + module.example.aws_instance.example + + An error will be returned if any of the resources or modules given as + filter addresses do not exist in the state. + +Options: + + -state=statefile Path to a OpenTofu state file to use to look + up OpenTofu-managed resources. By default, OpenTofu + will consult the state of the currently-selected + workspace. + + -id=ID Filters the results to include only instances whose + resource types have an attribute named "id" whose value + equals the given id string. + + -var 'foo=bar' Set a value for one of the input variables in the root + module of the configuration. Use this option more than + once to set more than one variable. + + -var-file=filename Load variable values from the given file, in addition + to the default files terraform.tfvars and *.auto.tfvars. + Use this option more than once to include more than one + variables file. + +` + return strings.TrimSpace(helpText) +} + +func (c *StateListCommand) Synopsis() string { + return "List resources in the state" +} + +const errStateLoadingState = `Error loading the state: %[1]s + +Please ensure that your OpenTofu state exists and that you've +configured it properly. You can use the "-state" flag to point +OpenTofu at another state file.` + +const errStateNotFound = `No state file was found! + +State management commands require a state file. Run this command +in a directory where OpenTofu has been run or use the -state flag +to point the command to a specific state location.` diff --git a/pkg/command/state_list_test.go b/pkg/command/state_list_test.go new file mode 100644 index 00000000000..0717f4fc9aa --- /dev/null +++ b/pkg/command/state_list_test.go @@ -0,0 +1,275 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + "testing" + + "github.com/mitchellh/cli" +) + +func TestStateList(t *testing.T) { + state := testState() + statePath := testStateFile(t, state) + + p := testProvider() + ui := cli.NewMockUi() + c := &StateListCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + }, + } + + args := []string{ + "-state", statePath, + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + // Test that outputs were displayed + expected := strings.TrimSpace(testStateListOutput) + "\n" + actual := ui.OutputWriter.String() + if actual != expected { + t.Fatalf("Expected:\n%q\n\nTo equal: %q", actual, expected) + } +} + +func TestStateListWithID(t *testing.T) { + state := testState() + statePath := testStateFile(t, state) + + p := testProvider() + ui := cli.NewMockUi() + c := &StateListCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + }, + } + + args := []string{ + "-state", statePath, + "-id", "bar", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + // Test that outputs were displayed + expected := strings.TrimSpace(testStateListOutput) + "\n" + actual := ui.OutputWriter.String() + if actual != expected { + t.Fatalf("Expected:\n%q\n\nTo equal: %q", actual, expected) + } +} + +func TestStateListWithNonExistentID(t *testing.T) { + state := testState() + statePath := testStateFile(t, state) + + p := testProvider() + ui := cli.NewMockUi() + c := &StateListCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + }, + } + + args := []string{ + "-state", statePath, + "-id", "baz", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + // Test that output is empty + if ui.OutputWriter != nil { + actual := ui.OutputWriter.String() + if actual != "" { + t.Fatalf("Expected an empty output but got: %q", actual) + } + } +} + +func TestStateList_backendDefaultState(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("state-list-backend-default"), td) + defer testChdir(t, td)() + + p := testProvider() + ui := cli.NewMockUi() + c := &StateListCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + }, + } + + args := []string{} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + // Test that outputs were displayed + expected := "null_resource.a\n" + actual := ui.OutputWriter.String() + if actual != expected { + t.Fatalf("Expected:\n%q\n\nTo equal: %q", actual, expected) + } +} + +func TestStateList_backendCustomState(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("state-list-backend-custom"), td) + defer testChdir(t, td)() + + p := testProvider() + ui := cli.NewMockUi() + c := &StateListCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + }, + } + + args := []string{} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + // Test that outputs were displayed + expected := "null_resource.a\n" + actual := ui.OutputWriter.String() + if actual != expected { + t.Fatalf("Expected:\n%q\n\nTo equal: %q", actual, expected) + } +} + +func TestStateList_backendOverrideState(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("state-list-backend-custom"), td) + defer testChdir(t, td)() + + p := testProvider() + ui := cli.NewMockUi() + c := &StateListCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + }, + } + + // This test is configured to use a local backend that has + // a custom path defined. So we test if we can still pass + // is a user defined state file that will then override the + // one configured in the backend. As this file does not exist + // it should exit with a no state found error. + args := []string{"-state=" + DefaultStateFilename} + if code := c.Run(args); code != 1 { + t.Fatalf("bad: %d", code) + } + if !strings.Contains(ui.ErrorWriter.String(), "No state file was found!") { + t.Fatalf("expected a no state file error, got: %s", ui.ErrorWriter.String()) + } +} + +func TestStateList_noState(t *testing.T) { + testCwd(t) + + p := testProvider() + ui := cli.NewMockUi() + c := &StateListCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + }, + } + + args := []string{} + if code := c.Run(args); code != 1 { + t.Fatalf("bad: %d", code) + } +} + +func TestStateList_modules(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("state-list-nested-modules"), td) + defer testChdir(t, td)() + + p := testProvider() + ui := cli.NewMockUi() + c := &StateListCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + }, + } + + t.Run("list resources in module and submodules", func(t *testing.T) { + args := []string{"module.nest"} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d", code) + } + + // resources in the module and any submodules should be included in the outputs + expected := "module.nest.test_instance.nest\nmodule.nest.module.subnest.test_instance.subnest\n" + actual := ui.OutputWriter.String() + if actual != expected { + t.Fatalf("Expected:\n%q\n\nTo equal: %q", actual, expected) + } + }) + + t.Run("submodule has resources only", func(t *testing.T) { + // now get the state for a module that has no resources, only another nested module + ui.OutputWriter.Reset() + args := []string{"module.nonexist"} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d", code) + } + expected := "module.nonexist.module.child.test_instance.child\n" + actual := ui.OutputWriter.String() + if actual != expected { + t.Fatalf("Expected:\n%q\n\nTo equal: %q", actual, expected) + } + }) + + t.Run("expanded module", func(t *testing.T) { + // finally get the state for a module with an index + ui.OutputWriter.Reset() + args := []string{"module.count"} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d", code) + } + expected := "module.count[0].test_instance.count\nmodule.count[1].test_instance.count\n" + actual := ui.OutputWriter.String() + if actual != expected { + t.Fatalf("Expected:\n%q\n\nTo equal: %q", actual, expected) + } + }) + + t.Run("completely nonexistent module", func(t *testing.T) { + // finally get the state for a module with an index + ui.OutputWriter.Reset() + args := []string{"module.notevenalittlebit"} + if code := c.Run(args); code != 1 { + t.Fatalf("bad: %d", code) + } + }) + +} + +const testStateListOutput = ` +test_instance.foo +` diff --git a/pkg/command/state_meta.go b/pkg/command/state_meta.go new file mode 100644 index 00000000000..991e3d59f84 --- /dev/null +++ b/pkg/command/state_meta.go @@ -0,0 +1,226 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "sort" + "time" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "github.com/kubegems/opentofu/pkg/tfdiags" + + backendLocal "github.com/kubegems/opentofu/pkg/backend/local" +) + +// StateMeta is the meta struct that should be embedded in state subcommands. +type StateMeta struct { + Meta +} + +// State returns the state for this meta. This gets the appropriate state from +// the backend, but changes the way that backups are done. This configures +// backups to be timestamped rather than just the original state path plus a +// backup path. +func (c *StateMeta) State(enc encryption.Encryption) (statemgr.Full, error) { + var realState statemgr.Full + backupPath := c.backupPath + stateOutPath := c.statePath + + // use the specified state + if c.statePath != "" { + realState = statemgr.NewFilesystem(c.statePath, encryption.StateEncryptionDisabled()) // User specified state file should not be encrypted + } else { + // Load the backend + b, backendDiags := c.Backend(nil, enc.State()) + if backendDiags.HasErrors() { + return nil, backendDiags.Err() + } + + workspace, err := c.Workspace() + if err != nil { + return nil, err + } + + // Check remote OpenTofu version is compatible + remoteVersionDiags := c.remoteVersionCheck(b, workspace) + c.showDiagnostics(remoteVersionDiags) + if remoteVersionDiags.HasErrors() { + return nil, fmt.Errorf("Error checking remote OpenTofu version") + } + + // Get the state + s, err := b.StateMgr(workspace) + if err != nil { + return nil, err + } + + // Get a local backend + localRaw, backendDiags := c.Backend(&BackendOpts{ForceLocal: true}, enc.State()) + if backendDiags.HasErrors() { + // This should never fail + panic(backendDiags.Err()) + } + localB := localRaw.(*backendLocal.Local) + _, stateOutPath, _ = localB.StatePaths(workspace) + + realState = s + } + + // We always backup state commands, so set the back if none was specified + // (the default is "-", but some tests bypass the flag parsing). + if backupPath == "-" || backupPath == "" { + // Determine the backup path. stateOutPath is set to the resulting + // file where state is written (cached in the case of remote state) + backupPath = fmt.Sprintf( + "%s.%d%s", + stateOutPath, + time.Now().UTC().Unix(), + DefaultBackupExtension) + } + + // If the backend is local (which it should always be, given our asserting + // of it above) we can now enable backups for it. + if lb, ok := realState.(*statemgr.Filesystem); ok { + lb.SetBackupPath(backupPath) + } + + return realState, nil +} + +func (c *StateMeta) lookupResourceInstanceAddr(state *states.State, allowMissing bool, addrStr string) ([]addrs.AbsResourceInstance, tfdiags.Diagnostics) { + target, diags := addrs.ParseTargetStr(addrStr) + if diags.HasErrors() { + return nil, diags + } + + targetAddr := target.Subject + var ret []addrs.AbsResourceInstance + switch addr := targetAddr.(type) { + case addrs.ModuleInstance: + // Matches all instances within the indicated module and all of its + // descendent modules. + + // found is used to identify cases where the selected module has no + // resources, but one or more of its submodules does. + found := false + ms := state.Module(addr) + if ms != nil { + found = true + ret = append(ret, c.collectModuleResourceInstances(ms)...) + } + for _, cms := range state.Modules { + if !addr.Equal(cms.Addr) { + if addr.IsAncestor(cms.Addr) || addr.TargetContains(cms.Addr) { + found = true + ret = append(ret, c.collectModuleResourceInstances(cms)...) + } + } + } + + if !found && !allowMissing { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unknown module", + fmt.Sprintf(`The current state contains no module at %s. If you've just added this module to the configuration, you must run "tofu apply" first to create the module's entry in the state.`, addr), + )) + } + + case addrs.AbsResource: + // Matches all instances of the specific selected resource. + rs := state.Resource(addr) + if rs == nil { + if !allowMissing { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unknown resource", + fmt.Sprintf(`The current state contains no resource %s. If you've just added this resource to the configuration, you must run "tofu apply" first to create the resource's entry in the state.`, addr), + )) + } + break + } + ret = append(ret, c.collectResourceInstances(addr.Module, rs)...) + case addrs.AbsResourceInstance: + is := state.ResourceInstance(addr) + if is == nil { + if !allowMissing { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unknown resource instance", + fmt.Sprintf(`The current state contains no resource instance %s. If you've just added its resource to the configuration or have changed the count or for_each arguments, you must run "tofu apply" first to update the resource's entry in the state.`, addr), + )) + } + break + } + ret = append(ret, addr) + } + sort.Slice(ret, func(i, j int) bool { + return ret[i].Less(ret[j]) + }) + + return ret, diags +} + +func (c *StateMeta) lookupSingleStateObjectAddr(state *states.State, addrStr string) (addrs.Targetable, tfdiags.Diagnostics) { + target, diags := addrs.ParseTargetStr(addrStr) + if diags.HasErrors() { + return nil, diags + } + return target.Subject, diags +} + +func (c *StateMeta) lookupResourceInstanceAddrs(state *states.State, addrStrs ...string) ([]addrs.AbsResourceInstance, tfdiags.Diagnostics) { + var ret []addrs.AbsResourceInstance + var diags tfdiags.Diagnostics + for _, addrStr := range addrStrs { + moreAddrs, moreDiags := c.lookupResourceInstanceAddr(state, false, addrStr) + ret = append(ret, moreAddrs...) + diags = diags.Append(moreDiags) + } + return ret, diags +} + +func (c *StateMeta) lookupAllResourceInstanceAddrs(state *states.State) ([]addrs.AbsResourceInstance, tfdiags.Diagnostics) { + var ret []addrs.AbsResourceInstance + var diags tfdiags.Diagnostics + for _, ms := range state.Modules { + ret = append(ret, c.collectModuleResourceInstances(ms)...) + } + sort.Slice(ret, func(i, j int) bool { + return ret[i].Less(ret[j]) + }) + return ret, diags +} + +func (c *StateMeta) collectModuleResourceInstances(ms *states.Module) []addrs.AbsResourceInstance { + var ret []addrs.AbsResourceInstance + for _, rs := range ms.Resources { + ret = append(ret, c.collectResourceInstances(ms.Addr, rs)...) + } + return ret +} + +func (c *StateMeta) collectResourceInstances(moduleAddr addrs.ModuleInstance, rs *states.Resource) []addrs.AbsResourceInstance { + var ret []addrs.AbsResourceInstance + for key := range rs.Instances { + ret = append(ret, rs.Addr.Instance(key)) + } + return ret +} + +func (c *StateMeta) lookupAllResources(state *states.State) ([]*states.Resource, tfdiags.Diagnostics) { + var ret []*states.Resource + var diags tfdiags.Diagnostics + for _, ms := range state.Modules { + for _, resource := range ms.Resources { + ret = append(ret, resource) + } + } + return ret, diags +} diff --git a/pkg/command/state_mv.go b/pkg/command/state_mv.go new file mode 100644 index 00000000000..2796bfb72f5 --- /dev/null +++ b/pkg/command/state_mv.go @@ -0,0 +1,586 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/mitchellh/cli" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/command/clistate" + "github.com/kubegems/opentofu/pkg/command/views" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" +) + +// StateMvCommand is a Command implementation that shows a single resource. +type StateMvCommand struct { + StateMeta +} + +func (c *StateMvCommand) Run(args []string) int { + args = c.Meta.process(args) + // We create two metas to track the two states + var backupPathOut, statePathOut string + + var dryRun bool + cmdFlags := c.Meta.ignoreRemoteVersionFlagSet("state mv") + cmdFlags.BoolVar(&dryRun, "dry-run", false, "dry run") + cmdFlags.StringVar(&c.backupPath, "backup", "-", "backup") + cmdFlags.StringVar(&backupPathOut, "backup-out", "-", "backup") + cmdFlags.BoolVar(&c.Meta.stateLock, "lock", true, "lock states") + cmdFlags.DurationVar(&c.Meta.stateLockTimeout, "lock-timeout", 0, "lock timeout") + cmdFlags.StringVar(&c.statePath, "state", "", "path") + cmdFlags.StringVar(&statePathOut, "state-out", "", "path") + if err := cmdFlags.Parse(args); err != nil { + c.Ui.Error(fmt.Sprintf("Error parsing command-line flags: %s\n", err.Error())) + return 1 + } + args = cmdFlags.Args() + if len(args) != 2 { + c.Ui.Error("Exactly two arguments expected.\n") + return cli.RunResultHelp + } + + if diags := c.Meta.checkRequiredVersion(); diags != nil { + c.showDiagnostics(diags) + return 1 + } + + // If backup or backup-out options are set + // and the state option is not set, make sure + // the backend is local + backupOptionSetWithoutStateOption := c.backupPath != "-" && c.statePath == "" + backupOutOptionSetWithoutStateOption := backupPathOut != "-" && c.statePath == "" + + var setLegacyLocalBackendOptions []string + if backupOptionSetWithoutStateOption { + setLegacyLocalBackendOptions = append(setLegacyLocalBackendOptions, "-backup") + } + if backupOutOptionSetWithoutStateOption { + setLegacyLocalBackendOptions = append(setLegacyLocalBackendOptions, "-backup-out") + } + + // Load the encryption configuration + enc, encDiags := c.Encryption() + if encDiags.HasErrors() { + c.showDiagnostics(encDiags) + return 1 + } + + if len(setLegacyLocalBackendOptions) > 0 { + currentBackend, diags := c.backendFromConfig(&BackendOpts{}, enc.State()) + if diags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // If currentBackend is nil and diags didn't have errors, + // this means we have an implicit local backend + _, isLocalBackend := currentBackend.(backend.Local) + if currentBackend != nil && !isLocalBackend { + diags = diags.Append( + tfdiags.Sourceless( + tfdiags.Error, + fmt.Sprintf("Invalid command line options: %s", strings.Join(setLegacyLocalBackendOptions[:], ", ")), + "Command line options -backup and -backup-out are legacy options that operate on a local state file only. You must specify a local state file with the -state option or switch to the local backend.", + ), + ) + c.showDiagnostics(diags) + return 1 + } + } + + // Read the from state + stateFromMgr, err := c.State(enc) + if err != nil { + c.Ui.Error(fmt.Sprintf(errStateLoadingState, err)) + return 1 + } + + if c.stateLock { + stateLocker := clistate.NewLocker(c.stateLockTimeout, views.NewStateLocker(arguments.ViewHuman, c.View)) + if diags := stateLocker.Lock(stateFromMgr, "state-mv"); diags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + defer func() { + if diags := stateLocker.Unlock(); diags.HasErrors() { + c.showDiagnostics(diags) + } + }() + } + + if err := stateFromMgr.RefreshState(); err != nil { + c.Ui.Error(fmt.Sprintf("Failed to refresh source state: %s", err)) + return 1 + } + + stateFrom := stateFromMgr.State() + if stateFrom == nil { + c.Ui.Error(errStateNotFound) + return 1 + } + + // Read the destination state + stateToMgr := stateFromMgr + stateTo := stateFrom + + if statePathOut != "" { + c.statePath = statePathOut + c.backupPath = backupPathOut + + stateToMgr, err = c.State(enc) + if err != nil { + c.Ui.Error(fmt.Sprintf(errStateLoadingState, err)) + return 1 + } + + if c.stateLock { + stateLocker := clistate.NewLocker(c.stateLockTimeout, views.NewStateLocker(arguments.ViewHuman, c.View)) + if diags := stateLocker.Lock(stateToMgr, "state-mv"); diags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + defer func() { + if diags := stateLocker.Unlock(); diags.HasErrors() { + c.showDiagnostics(diags) + } + }() + } + + if err := stateToMgr.RefreshState(); err != nil { + c.Ui.Error(fmt.Sprintf("Failed to refresh destination state: %s", err)) + return 1 + } + + stateTo = stateToMgr.State() + if stateTo == nil { + stateTo = states.NewState() + } + } + + var diags tfdiags.Diagnostics + sourceAddr, moreDiags := c.lookupSingleStateObjectAddr(stateFrom, args[0]) + diags = diags.Append(moreDiags) + destAddr, moreDiags := c.lookupSingleStateObjectAddr(stateFrom, args[1]) + diags = diags.Append(moreDiags) + if diags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + prefix := "Move" + if dryRun { + prefix = "Would move" + } + + const msgInvalidSource = "Invalid source address" + const msgInvalidTarget = "Invalid target address" + + var moved int + ssFrom := stateFrom.SyncWrapper() + sourceAddrs := c.sourceObjectAddrs(stateFrom, sourceAddr) + if len(sourceAddrs) == 0 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + msgInvalidSource, + fmt.Sprintf("Cannot move %s: does not match anything in the current state.", sourceAddr), + )) + c.showDiagnostics(diags) + return 1 + } + for _, rawAddrFrom := range sourceAddrs { + switch addrFrom := rawAddrFrom.(type) { + case addrs.ModuleInstance: + search := sourceAddr.(addrs.ModuleInstance) + addrTo, ok := destAddr.(addrs.ModuleInstance) + if !ok { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + msgInvalidTarget, + fmt.Sprintf("Cannot move %s to %s: the target must also be a module.", addrFrom, destAddr), + )) + c.showDiagnostics(diags) + return 1 + } + + if len(search) < len(addrFrom) { + n := make(addrs.ModuleInstance, 0, len(addrTo)+len(addrFrom)-len(search)) + n = append(n, addrTo...) + n = append(n, addrFrom[len(search):]...) + addrTo = n + } + + if stateTo.Module(addrTo) != nil { + c.Ui.Error(fmt.Sprintf(errStateMv, "destination module already exists")) + return 1 + } + + ms := ssFrom.Module(addrFrom) + if ms == nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + msgInvalidSource, + fmt.Sprintf("The current state does not contain %s.", addrFrom), + )) + c.showDiagnostics(diags) + return 1 + } + + moved++ + c.Ui.Output(fmt.Sprintf("%s %q to %q", prefix, addrFrom.String(), addrTo.String())) + if !dryRun { + ssFrom.RemoveModule(addrFrom) + + // Update the address before adding it to the state. + ms.Addr = addrTo + stateTo.Modules[addrTo.String()] = ms + } + + case addrs.AbsResource: + addrTo, ok := destAddr.(addrs.AbsResource) + if !ok { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + msgInvalidTarget, + fmt.Sprintf("Cannot move %s to %s: the source is a whole resource (not a resource instance) so the target must also be a whole resource.", addrFrom, destAddr), + )) + c.showDiagnostics(diags) + return 1 + } + diags = diags.Append(c.validateResourceMove(addrFrom, addrTo)) + + if stateTo.Resource(addrTo) != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + msgInvalidTarget, + fmt.Sprintf("Cannot move to %s: there is already a resource at that address in the current state.", addrTo), + )) + } + + rs := ssFrom.Resource(addrFrom) + if rs == nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + msgInvalidSource, + fmt.Sprintf("The current state does not contain %s.", addrFrom), + )) + } + + if diags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + moved++ + c.Ui.Output(fmt.Sprintf("%s %q to %q", prefix, addrFrom.String(), addrTo.String())) + if !dryRun { + ssFrom.RemoveResource(addrFrom) + + // Update the address before adding it to the state. + rs.Addr = addrTo + stateTo.EnsureModule(addrTo.Module).Resources[addrTo.Resource.String()] = rs + } + + case addrs.AbsResourceInstance: + addrTo, ok := destAddr.(addrs.AbsResourceInstance) + if !ok { + ra, ok := destAddr.(addrs.AbsResource) + if !ok { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + msgInvalidTarget, + fmt.Sprintf("Cannot move %s to %s: the target must also be a resource instance.", addrFrom, destAddr), + )) + c.showDiagnostics(diags) + return 1 + } + addrTo = ra.Instance(addrs.NoKey) + } + + diags = diags.Append(c.validateResourceMove(addrFrom.ContainingResource(), addrTo.ContainingResource())) + + if stateTo.Module(addrTo.Module) == nil { + // moving something to a mew module, so we need to ensure it exists + stateTo.EnsureModule(addrTo.Module) + } + if stateTo.ResourceInstance(addrTo) != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + msgInvalidTarget, + fmt.Sprintf("Cannot move to %s: there is already a resource instance at that address in the current state.", addrTo), + )) + } + + is := ssFrom.ResourceInstance(addrFrom) + if is == nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + msgInvalidSource, + fmt.Sprintf("The current state does not contain %s.", addrFrom), + )) + } + + if diags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + moved++ + c.Ui.Output(fmt.Sprintf("%s %q to %q", prefix, addrFrom.String(), args[1])) + if !dryRun { + fromResourceAddr := addrFrom.ContainingResource() + fromResource := ssFrom.Resource(fromResourceAddr) + fromProviderAddr := fromResource.ProviderConfig + ssFrom.ForgetResourceInstanceAll(addrFrom) + ssFrom.RemoveResourceIfEmpty(fromResourceAddr) + + rs := stateTo.Resource(addrTo.ContainingResource()) + if rs == nil { + // If we're moving to an address without an index then that + // suggests the user's intent is to establish both the + // resource and the instance at the same time (since the + // address covers both). If there's an index in the + // target then allow creating the new instance here. + resourceAddr := addrTo.ContainingResource() + stateTo.SyncWrapper().SetResourceProvider( + resourceAddr, + fromProviderAddr, // in this case, we bring the provider along as if we were moving the whole resource + ) + rs = stateTo.Resource(resourceAddr) + } + + rs.Instances[addrTo.Resource.Key] = is + } + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + msgInvalidSource, + fmt.Sprintf("Cannot move %s: OpenTofu doesn't know how to move this object.", rawAddrFrom), + )) + } + + // Look for any dependencies that may be effected and + // remove them to ensure they are recreated in full. + for _, mod := range stateTo.Modules { + for _, res := range mod.Resources { + for _, ins := range res.Instances { + if ins.Current == nil { + continue + } + + for _, dep := range ins.Current.Dependencies { + // check both directions here, since we may be moving + // an instance which is in a resource, or a module + // which can contain a resource. + if dep.TargetContains(rawAddrFrom) || rawAddrFrom.TargetContains(dep) { + ins.Current.Dependencies = nil + break + } + } + } + } + } + } + + if dryRun { + if moved == 0 { + c.Ui.Output("Would have moved nothing.") + } + return 0 // This is as far as we go in dry-run mode + } + + b, backendDiags := c.Backend(nil, enc.State()) + diags = diags.Append(backendDiags) + if backendDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // Get schemas, if possible, before writing state + var schemas *tofu.Schemas + if isCloudMode(b) { + var schemaDiags tfdiags.Diagnostics + schemas, schemaDiags = c.MaybeGetSchemas(stateTo, nil) + diags = diags.Append(schemaDiags) + } + + // Write the new state + if err := stateToMgr.WriteState(stateTo); err != nil { + c.Ui.Error(fmt.Sprintf(errStateRmPersist, err)) + return 1 + } + if err := stateToMgr.PersistState(schemas); err != nil { + c.Ui.Error(fmt.Sprintf(errStateRmPersist, err)) + return 1 + } + + // Write the old state if it is different + if stateTo != stateFrom { + if err := stateFromMgr.WriteState(stateFrom); err != nil { + c.Ui.Error(fmt.Sprintf(errStateRmPersist, err)) + return 1 + } + if err := stateFromMgr.PersistState(schemas); err != nil { + c.Ui.Error(fmt.Sprintf(errStateRmPersist, err)) + return 1 + } + } + + c.showDiagnostics(diags) + + if moved == 0 { + c.Ui.Output("No matching objects found.") + } else { + c.Ui.Output(fmt.Sprintf("Successfully moved %d object(s).", moved)) + } + return 0 +} + +// sourceObjectAddrs takes a single source object address and expands it to +// potentially multiple objects that need to be handled within it. +// +// In particular, this handles the case where a module is requested directly: +// if it has any child modules, then they must also be moved. It also resolves +// the ambiguity that an index-less resource address could either be a resource +// address or a resource instance address, by making a decision about which +// is intended based on the current state of the resource in question. +func (c *StateMvCommand) sourceObjectAddrs(state *states.State, matched addrs.Targetable) []addrs.Targetable { + var ret []addrs.Targetable + + switch addr := matched.(type) { + case addrs.ModuleInstance: + for _, mod := range state.Modules { + if len(mod.Addr) < len(addr) { + continue // can't possibly be our selection or a child of it + } + if !mod.Addr[:len(addr)].Equal(addr) { + continue + } + ret = append(ret, mod.Addr) + } + case addrs.AbsResource: + // If this refers to a resource without "count" or "for_each" set then + // we'll assume the user intended it to be a resource instance + // address instead, to allow for requests like this: + // tofu state mv aws_instance.foo aws_instance.bar[1] + // That wouldn't be allowed if aws_instance.foo had multiple instances + // since we can't move multiple instances into one. + if rs := state.Resource(addr); rs != nil { + if _, ok := rs.Instances[addrs.NoKey]; ok { + ret = append(ret, addr.Instance(addrs.NoKey)) + } else { + ret = append(ret, addr) + } + } + default: + ret = append(ret, matched) + } + + return ret +} + +func (c *StateMvCommand) validateResourceMove(addrFrom, addrTo addrs.AbsResource) tfdiags.Diagnostics { + const msgInvalidRequest = "Invalid state move request" + + var diags tfdiags.Diagnostics + if addrFrom.Resource.Mode != addrTo.Resource.Mode { + switch addrFrom.Resource.Mode { + case addrs.ManagedResourceMode: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + msgInvalidRequest, + fmt.Sprintf("Cannot move %s to %s: a managed resource can be moved only to another managed resource address.", addrFrom, addrTo), + )) + case addrs.DataResourceMode: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + msgInvalidRequest, + fmt.Sprintf("Cannot move %s to %s: a data resource can be moved only to another data resource address.", addrFrom, addrTo), + )) + default: + // In case a new mode is added in future, this unhelpful error is better than nothing. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + msgInvalidRequest, + fmt.Sprintf("Cannot move %s to %s: cannot change resource mode.", addrFrom, addrTo), + )) + } + } + if addrFrom.Resource.Type != addrTo.Resource.Type { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + msgInvalidRequest, + fmt.Sprintf("Cannot move %s to %s: resource types don't match.", addrFrom, addrTo), + )) + } + return diags +} + +func (c *StateMvCommand) Help() string { + helpText := ` +Usage: tofu [global options] state (move|mv) [options] SOURCE DESTINATION + + This command will move an item matched by the address given to the + destination address. This command can also move to a destination address + in a completely different state file. + + This can be used for simple resource renaming, moving items to and from + a module, moving entire modules, and more. And because this command can also + move data to a completely new state, it can also be used for refactoring + one configuration into multiple separately managed OpenTofu configurations. + + This command will output a backup copy of the state prior to saving any + changes. The backup cannot be disabled. Due to the destructive nature + of this command, backups are required. + + If you're moving an item to a different state file, a backup will be created + for each state file. + +Options: + + -dry-run If set, prints out what would've been moved but doesn't + actually move anything. + + -lock=false Don't hold a state lock during the operation. This is + dangerous if others might concurrently run commands + against the same workspace. + + -lock-timeout=0s Duration to retry a state lock. + + -ignore-remote-version A rare option used for the remote backend only. See + the remote backend documentation for more information. + + -var 'foo=bar' Set a value for one of the input variables in the root + module of the configuration. Use this option more than + once to set more than one variable. + + -var-file=filename Load variable values from the given file, in addition + to the default files terraform.tfvars and *.auto.tfvars. + Use this option more than once to include more than one + variables file. + + -state, state-out, and -backup are legacy options supported for the local + backend only. For more information, see the local backend's documentation. + +` + return strings.TrimSpace(helpText) +} + +func (c *StateMvCommand) Synopsis() string { + return "Move an item in the state" +} + +const errStateMv = `Error moving state: %s + +Please ensure your addresses and state paths are valid. No +state was persisted. Your existing states are untouched.` diff --git a/pkg/command/state_mv_test.go b/pkg/command/state_mv_test.go new file mode 100644 index 00000000000..fa035127e04 --- /dev/null +++ b/pkg/command/state_mv_test.go @@ -0,0 +1,2133 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/mitchellh/cli" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/states" +) + +func TestStateMv(t *testing.T) { + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "baz", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"foo","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + Dependencies: []addrs.ConfigResource{mustResourceAddr("test_instance.foo")}, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, state) + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StateMvCommand{ + StateMeta{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + }, + } + + args := []string{ + "-state", statePath, + "test_instance.foo", + "test_instance.bar", + } + if code := c.Run(args); code != 0 { + t.Fatalf("return code: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + // Test it is correct + testStateOutput(t, statePath, testStateMvOutput) + + // Test we have backups + backups := testStateBackups(t, filepath.Dir(statePath)) + if len(backups) != 1 { + t.Fatalf("bad: %#v", backups) + } + testStateOutput(t, backups[0], testStateMvOutputOriginal) + + // Change the single instance to a counted instance + args = []string{ + "-state", statePath, + "test_instance.bar", + "test_instance.bar[0]", + } + if code := c.Run(args); code != 0 { + t.Fatalf("return code: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + // extract the resource and verify the mode + s := testStateRead(t, statePath) + addr, diags := addrs.ParseAbsResourceStr("test_instance.bar") + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + for key := range s.Resource(addr).Instances { + if _, ok := key.(addrs.IntKey); !ok { + t.Fatalf("expected each mode List, got key %q", key) + } + } + + // change from list to map + args = []string{ + "-state", statePath, + "test_instance.bar[0]", + "test_instance.bar[\"baz\"]", + } + if code := c.Run(args); code != 0 { + t.Fatalf("return code: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + // extract the resource and verify the mode + s = testStateRead(t, statePath) + addr, diags = addrs.ParseAbsResourceStr("test_instance.bar") + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + for key := range s.Resource(addr).Instances { + if _, ok := key.(addrs.StringKey); !ok { + t.Fatalf("expected each mode map, found key %q", key) + } + } + + // change from from map back to single + args = []string{ + "-state", statePath, + "test_instance.bar[\"baz\"]", + "test_instance.bar", + } + if code := c.Run(args); code != 0 { + t.Fatalf("return code: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + // extract the resource and verify the mode + s = testStateRead(t, statePath) + addr, diags = addrs.ParseAbsResourceStr("test_instance.bar") + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + for key := range s.Resource(addr).Instances { + if key != addrs.NoKey { + t.Fatalf("expected no each mode, found key %q", key) + } + } + +} + +func TestStateMv_backupAndBackupOutOptionsWithNonLocalBackend(t *testing.T) { + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + + t.Run("backup option specified", func(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("init-backend-http"), td) + defer testChdir(t, td)() + + backupPath := filepath.Join(td, "backup") + + // Set up our backend state using mock state + dataState, srv := testBackendState(t, state, 200) + defer srv.Close() + testStateFileRemote(t, dataState) + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StateMvCommand{ + StateMeta{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + }, + } + + args := []string{ + "-backup", backupPath, + "test_instance.foo", + "test_instance.bar", + } + if code := c.Run(args); code == 0 { + t.Fatalf("expected error output, got:\n%s", ui.OutputWriter.String()) + } + + gotErr := ui.ErrorWriter.String() + wantErr := ` +Error: Invalid command line options: -backup + +Command line options -backup and -backup-out are legacy options that operate +on a local state file only. You must specify a local state file with the +-state option or switch to the local backend. + +` + if gotErr != wantErr { + t.Fatalf("expected error\ngot:%s\n\nwant:%s", gotErr, wantErr) + } + }) + + t.Run("backup-out option specified", func(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("init-backend-http"), td) + defer testChdir(t, td)() + + backupOutPath := filepath.Join(td, "backup-out") + + // Set up our backend state using mock state + dataState, srv := testBackendState(t, state, 200) + defer srv.Close() + testStateFileRemote(t, dataState) + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StateMvCommand{ + StateMeta{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + }, + } + + args := []string{ + "-backup-out", backupOutPath, + "test_instance.foo", + "test_instance.bar", + } + if code := c.Run(args); code == 0 { + t.Fatalf("expected error output, got:\n%s", ui.OutputWriter.String()) + } + + gotErr := ui.ErrorWriter.String() + wantErr := ` +Error: Invalid command line options: -backup-out + +Command line options -backup and -backup-out are legacy options that operate +on a local state file only. You must specify a local state file with the +-state option or switch to the local backend. + +` + if gotErr != wantErr { + t.Fatalf("expected error\ngot:%s\n\nwant:%s", gotErr, wantErr) + } + }) + + t.Run("backup and backup-out options specified", func(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("init-backend-http"), td) + defer testChdir(t, td)() + + backupPath := filepath.Join(td, "backup") + backupOutPath := filepath.Join(td, "backup-out") + + // Set up our backend state using mock state + dataState, srv := testBackendState(t, state, 200) + defer srv.Close() + testStateFileRemote(t, dataState) + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StateMvCommand{ + StateMeta{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + }, + } + + args := []string{ + "-backup", backupPath, + "-backup-out", backupOutPath, + "test_instance.foo", + "test_instance.bar", + } + if code := c.Run(args); code == 0 { + t.Fatalf("expected error output, got:\n%s", ui.OutputWriter.String()) + } + + gotErr := ui.ErrorWriter.String() + wantErr := ` +Error: Invalid command line options: -backup, -backup-out + +Command line options -backup and -backup-out are legacy options that operate +on a local state file only. You must specify a local state file with the +-state option or switch to the local backend. + +` + if gotErr != wantErr { + t.Fatalf("expected error\ngot:%s\n\nwant:%s", gotErr, wantErr) + } + }) + + t.Run("backup option specified with state option", func(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("init-backend-http"), td) + defer testChdir(t, td)() + + statePath := testStateFile(t, state) + backupPath := filepath.Join(td, "backup") + + // Set up our backend state using mock state + dataState, srv := testBackendState(t, state, 200) + defer srv.Close() + testStateFileRemote(t, dataState) + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StateMvCommand{ + StateMeta{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + }, + } + + args := []string{ + "-state", statePath, + "-backup", backupPath, + "test_instance.foo", + "test_instance.bar", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + // Test it is correct + testStateOutput(t, statePath, testStateMvBackupAndBackupOutOptionsWithNonLocalBackendOutput) + }) + + t.Run("backup-out option specified with state option", func(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("init-backend-http"), td) + defer testChdir(t, td)() + + statePath := testStateFile(t, state) + backupOutPath := filepath.Join(td, "backup-out") + + // Set up our backend state using mock state + dataState, srv := testBackendState(t, state, 200) + defer srv.Close() + testStateFileRemote(t, dataState) + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StateMvCommand{ + StateMeta{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + }, + } + + args := []string{ + "-state", statePath, + "-backup-out", backupOutPath, + "test_instance.foo", + "test_instance.bar", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + // Test it is correct + testStateOutput(t, statePath, testStateMvBackupAndBackupOutOptionsWithNonLocalBackendOutput) + }) +} + +func TestStateMv_resourceToInstance(t *testing.T) { + // A single resource (no count defined) + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "baz", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"foo","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + Dependencies: []addrs.ConfigResource{mustResourceAddr("test_instance.foo")}, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + s.SetResourceProvider( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "bar", + }.Absolute(addrs.RootModuleInstance), + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, state) + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StateMvCommand{ + StateMeta{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + }, + } + + args := []string{ + "-state", statePath, + "test_instance.foo", + "test_instance.bar[0]", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + // Test it is correct + testStateOutput(t, statePath, ` +test_instance.bar.0: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +test_instance.baz: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +`) + + // Test we have backups + backups := testStateBackups(t, filepath.Dir(statePath)) + if len(backups) != 1 { + t.Fatalf("bad: %#v", backups) + } + testStateOutput(t, backups[0], testStateMvOutputOriginal) +} + +func TestStateMv_resourceToInstanceErr(t *testing.T) { + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.IntKey(0)).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + s.SetResourceProvider( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "bar", + }.Absolute(addrs.RootModuleInstance), + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, state) + + p := testProvider() + ui := cli.NewMockUi() + view, _ := testView(t) + + c := &StateMvCommand{ + StateMeta{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + }, + } + + args := []string{ + "-state", statePath, + "test_instance.foo", + "test_instance.bar[0]", + } + + if code := c.Run(args); code == 0 { + t.Fatalf("expected error output, got:\n%s", ui.OutputWriter.String()) + } + + expectedErr := ` +Error: Invalid target address + +Cannot move test_instance.foo to test_instance.bar[0]: the source is a whole +resource (not a resource instance) so the target must also be a whole +resource. + +` + errOutput := ui.ErrorWriter.String() + if errOutput != expectedErr { + t.Errorf("wrong output\n%s", cmp.Diff(errOutput, expectedErr)) + } +} + +func TestStateMv_resourceToInstanceErrInAutomation(t *testing.T) { + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.IntKey(0)).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + s.SetResourceProvider( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "bar", + }.Absolute(addrs.RootModuleInstance), + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, state) + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StateMvCommand{ + StateMeta{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + RunningInAutomation: true, + }, + }, + } + + args := []string{ + "-state", statePath, + "test_instance.foo", + "test_instance.bar[0]", + } + + if code := c.Run(args); code == 0 { + t.Fatalf("expected error output, got:\n%s", ui.OutputWriter.String()) + } + + expectedErr := ` +Error: Invalid target address + +Cannot move test_instance.foo to test_instance.bar[0]: the source is a whole +resource (not a resource instance) so the target must also be a whole +resource. + +` + errOutput := ui.ErrorWriter.String() + if errOutput != expectedErr { + t.Errorf("Unexpected diff.\ngot:\n%s\nwant:\n%s\n", errOutput, expectedErr) + t.Errorf("%s", cmp.Diff(errOutput, expectedErr)) + } +} + +func TestStateMv_instanceToResource(t *testing.T) { + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.IntKey(0)).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "baz", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"foo","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, state) + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StateMvCommand{ + StateMeta{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + }, + } + + args := []string{ + "-state", statePath, + "test_instance.foo[0]", + "test_instance.bar", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + // Test it is correct + testStateOutput(t, statePath, ` +test_instance.bar: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +test_instance.baz: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +`) + + // Test we have backups + backups := testStateBackups(t, filepath.Dir(statePath)) + if len(backups) != 1 { + t.Fatalf("bad: %#v", backups) + } + testStateOutput(t, backups[0], ` +test_instance.baz: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +test_instance.foo.0: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +`) +} + +func TestStateMv_instanceToNewResource(t *testing.T) { + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.IntKey(0)).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, state) + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StateMvCommand{ + StateMeta{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + }, + } + + args := []string{ + "-state", statePath, + "test_instance.foo[0]", + "test_instance.bar[\"new\"]", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + // Test it is correct + testStateOutput(t, statePath, ` +test_instance.bar["new"]: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +`) + + // now move the instance to a new resource in a new module + args = []string{ + "-state", statePath, + "test_instance.bar[\"new\"]", + "module.test.test_instance.baz[\"new\"]", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + // Test it is correct + testStateOutput(t, statePath, ` + +module.test: + test_instance.baz["new"]: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +`) +} + +func TestStateMv_differentResourceTypes(t *testing.T) { + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, state) + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StateMvCommand{ + StateMeta{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + }, + } + + args := []string{ + "-state", statePath, + "test_instance.foo", + "test_network.bar", + } + if code := c.Run(args); code == 0 { + t.Fatalf("expected error output, got:\n%s", ui.OutputWriter.String()) + } + + gotErr := ui.ErrorWriter.String() + wantErr := ` +Error: Invalid state move request + +Cannot move test_instance.foo to test_network.bar: resource types don't +match. + +` + if gotErr != wantErr { + t.Fatalf("expected initialization error\ngot:\n%s\n\nwant:%s", gotErr, wantErr) + } +} + +// don't modify backend state is we supply a -state flag +func TestStateMv_explicitWithBackend(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("init-backend"), td) + defer testChdir(t, td)() + + backupPath := filepath.Join(td, "backup") + + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "baz", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"foo","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, state) + + // init our backend + ui := new(cli.MockUi) + view, _ := testView(t) + ic := &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + args := []string{} + if code := ic.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + // only modify statePath + p := testProvider() + ui = new(cli.MockUi) + c := &StateMvCommand{ + StateMeta{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + }, + } + + args = []string{ + "-backup", backupPath, + "-state", statePath, + "test_instance.foo", + "test_instance.bar", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + // Test it is correct + testStateOutput(t, statePath, testStateMvOutput) +} + +func TestStateMv_backupExplicit(t *testing.T) { + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "baz", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"foo","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + Dependencies: []addrs.ConfigResource{mustResourceAddr("test_instance.foo")}, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, state) + backupPath := statePath + ".backup.test" + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StateMvCommand{ + StateMeta{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + }, + } + + args := []string{ + "-backup", backupPath, + "-state", statePath, + "test_instance.foo", + "test_instance.bar", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + // Test it is correct + testStateOutput(t, statePath, testStateMvOutput) + + // Test backup + testStateOutput(t, backupPath, testStateMvOutputOriginal) +} + +func TestStateMv_stateOutNew(t *testing.T) { + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, state) + stateOutPath := statePath + ".out" + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StateMvCommand{ + StateMeta{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + }, + } + + args := []string{ + "-state", statePath, + "-state-out", stateOutPath, + "test_instance.foo", + "test_instance.bar", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + // Test it is correct + testStateOutput(t, stateOutPath, testStateMvOutput_stateOut) + testStateOutput(t, statePath, testStateMvOutput_stateOutSrc) + + // Test we have backups + backups := testStateBackups(t, filepath.Dir(statePath)) + if len(backups) != 1 { + t.Fatalf("bad: %#v", backups) + } + testStateOutput(t, backups[0], testStateMvOutput_stateOutOriginal) +} + +func TestStateMv_stateOutExisting(t *testing.T) { + stateSrc := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, stateSrc) + + stateDst := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "qux", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + stateOutPath := testStateFile(t, stateDst) + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StateMvCommand{ + StateMeta{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + }, + } + + args := []string{ + "-state", statePath, + "-state-out", stateOutPath, + "test_instance.foo", + "test_instance.bar", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + // Test it is correct + testStateOutput(t, stateOutPath, testStateMvExisting_stateDst) + testStateOutput(t, statePath, testStateMvExisting_stateSrc) + + // Test we have backups + backups := testStateBackups(t, filepath.Dir(statePath)) + if len(backups) != 1 { + t.Fatalf("bad: %#v", backups) + } + testStateOutput(t, backups[0], testStateMvExisting_stateSrcOriginal) + + backups = testStateBackups(t, filepath.Dir(stateOutPath)) + if len(backups) != 1 { + t.Fatalf("bad: %#v", backups) + } + testStateOutput(t, backups[0], testStateMvExisting_stateDstOriginal) +} + +func TestStateMv_noState(t *testing.T) { + testCwd(t) + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StateMvCommand{ + StateMeta{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + }, + } + + args := []string{"from", "to"} + if code := c.Run(args); code != 1 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } +} + +func TestStateMv_stateOutNew_count(t *testing.T) { + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.IntKey(0)).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"foo","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.IntKey(1)).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "bar", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, state) + stateOutPath := statePath + ".out" + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StateMvCommand{ + StateMeta{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + }, + } + + args := []string{ + "-state", statePath, + "-state-out", stateOutPath, + "test_instance.foo", + "test_instance.bar", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + // Test it is correct + testStateOutput(t, stateOutPath, testStateMvCount_stateOut) + testStateOutput(t, statePath, testStateMvCount_stateOutSrc) + + // Test we have backups + backups := testStateBackups(t, filepath.Dir(statePath)) + if len(backups) != 1 { + t.Fatalf("bad: %#v", backups) + } + testStateOutput(t, backups[0], testStateMvCount_stateOutOriginal) +} + +// Modules with more than 10 resources were sorted lexically, causing the +// indexes in the new location to change. +func TestStateMv_stateOutNew_largeCount(t *testing.T) { + state := states.BuildState(func(s *states.SyncState) { + // test_instance.foo has 11 instances, all the same except for their ids + for i := 0; i < 11; i++ { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.IntKey(i)).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(fmt.Sprintf(`{"id":"foo%d","foo":"value","bar":"value"}`, i)), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + } + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "bar", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, state) + stateOutPath := statePath + ".out" + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StateMvCommand{ + StateMeta{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + }, + } + + args := []string{ + "-state", statePath, + "-state-out", stateOutPath, + "test_instance.foo", + "test_instance.bar", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + // Test it is correct + testStateOutput(t, stateOutPath, testStateMvLargeCount_stateOut) + testStateOutput(t, statePath, testStateMvLargeCount_stateOutSrc) + + // Test we have backups + backups := testStateBackups(t, filepath.Dir(statePath)) + if len(backups) != 1 { + t.Fatalf("bad: %#v", backups) + } + testStateOutput(t, backups[0], testStateMvLargeCount_stateOutOriginal) +} + +func TestStateMv_stateOutNew_nestedModule(t *testing.T) { + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance.Child("foo", addrs.NoKey).Child("child1", addrs.NoKey)), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance.Child("foo", addrs.NoKey).Child("child2", addrs.NoKey)), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + + statePath := testStateFile(t, state) + stateOutPath := statePath + ".out" + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StateMvCommand{ + StateMeta{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + }, + } + + args := []string{ + "-state", statePath, + "-state-out", stateOutPath, + "module.foo", + "module.bar", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + // Test it is correct + testStateOutput(t, stateOutPath, testStateMvNestedModule_stateOut) + testStateOutput(t, statePath, testStateMvNestedModule_stateOutSrc) + + // Test we have backups + backups := testStateBackups(t, filepath.Dir(statePath)) + if len(backups) != 1 { + t.Fatalf("bad: %#v", backups) + } + testStateOutput(t, backups[0], testStateMvNestedModule_stateOutOriginal) +} + +func TestStateMv_toNewModule(t *testing.T) { + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "bar", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + + statePath := testStateFile(t, state) + stateOutPath1 := statePath + ".out1" + stateOutPath2 := statePath + ".out2" + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StateMvCommand{ + StateMeta{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + }, + } + + args := []string{ + "-state", statePath, + "-state-out", stateOutPath1, + "test_instance.bar", + "module.bar.test_instance.bar", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + // Test it is correct + testStateOutput(t, stateOutPath1, testStateMvNewModule_stateOut) + testStateOutput(t, statePath, testStateMvNestedModule_stateOutSrc) + + // Test we have backups + backups := testStateBackups(t, filepath.Dir(statePath)) + if len(backups) != 1 { + t.Fatalf("bad: %#v", backups) + } + testStateOutput(t, backups[0], testStateMvNewModule_stateOutOriginal) + + // now verify we can move the module itself + args = []string{ + "-state", stateOutPath1, + "-state-out", stateOutPath2, + "module.bar", + "module.foo", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + testStateOutput(t, stateOutPath2, testStateMvModuleNewModule_stateOut) +} + +func TestStateMv_withinBackend(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("backend-unchanged"), td) + defer testChdir(t, td)() + + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "baz", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"foo","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + Dependencies: []addrs.ConfigResource{mustResourceAddr("test_instance.foo")}, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + + // the local backend state file is "foo" + statePath := "local-state.tfstate" + backupPath := "local-state.backup" + + f, err := os.Create(statePath) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + if err := writeStateForTesting(state, f); err != nil { + t.Fatal(err) + } + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StateMvCommand{ + StateMeta{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + }, + } + + args := []string{ + "-backup", backupPath, + "test_instance.foo", + "test_instance.bar", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + testStateOutput(t, statePath, testStateMvOutput) + testStateOutput(t, backupPath, testStateMvOutputOriginal) +} + +func TestStateMv_fromBackendToLocal(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("backend-unchanged"), td) + defer testChdir(t, td)() + + state := states.NewState() + state.Module(addrs.RootModuleInstance).SetResourceInstanceCurrent( + mustResourceAddr("test_instance.foo").Resource.Instance(addrs.NoKey), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + state.Module(addrs.RootModuleInstance).SetResourceInstanceCurrent( + mustResourceAddr("test_instance.baz").Resource.Instance(addrs.NoKey), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"foo","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + + // the local backend state file is "foo" + statePath := "local-state.tfstate" + + // real "local" state file + statePathOut := "real-local.tfstate" + + f, err := os.Create(statePath) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + if err := writeStateForTesting(state, f); err != nil { + t.Fatal(err) + } + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StateMvCommand{ + StateMeta{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + }, + } + + args := []string{ + "-state-out", statePathOut, + "test_instance.foo", + "test_instance.bar", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + testStateOutput(t, statePathOut, testStateMvCount_stateOutSrc) + + // the backend state should be left with only baz + testStateOutput(t, statePath, testStateMvOriginal_backend) +} + +// This test covers moving the only resource in a module to a new address in +// that module, which triggers the maybePruneModule functionality. This caused +// a panic report: https://github.com/hashicorp/terraform/issues/25520 +func TestStateMv_onlyResourceInModule(t *testing.T) { + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.IntKey(0)).Absolute(addrs.RootModuleInstance.Child("foo", addrs.NoKey)), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + + statePath := testStateFile(t, state) + testStateOutput(t, statePath, testStateMvOnlyResourceInModule_original) + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StateMvCommand{ + StateMeta{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + }, + } + + args := []string{ + "-state", statePath, + "module.foo.test_instance.foo", + "module.foo.test_instance.bar", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + // Test it is correct + testStateOutput(t, statePath, testStateMvOnlyResourceInModule_output) + + // Test we have backups + backups := testStateBackups(t, filepath.Dir(statePath)) + if len(backups) != 1 { + t.Fatalf("bad: %#v", backups) + } + testStateOutput(t, backups[0], testStateMvOnlyResourceInModule_original) +} + +func TestStateMvHelp(t *testing.T) { + c := &StateMvCommand{} + if strings.ContainsRune(c.Help(), '\t') { + t.Fatal("help text contains tab character, which will result in poor formatting") + } +} + +func TestStateMvInvalidSourceAddress(t *testing.T) { + state := states.BuildState(func(s *states.SyncState) {}) + statePath := testStateFile(t, state) + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StateMvCommand{ + StateMeta{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + }, + } + + args := []string{ + "-state", statePath, + "foo.bar1", + "foo.bar2", + } + code := c.Run(args) + if code != 1 { + t.Fatalf("expected error code 1, got:\n%d", code) + } +} + +func TestStateMv_checkRequiredVersion(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("command-check-required-version"), td) + defer testChdir(t, td)() + + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "baz", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"foo","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + Dependencies: []addrs.ConfigResource{mustResourceAddr("test_instance.foo")}, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, state) + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StateMvCommand{ + StateMeta{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + }, + } + + args := []string{ + "-state", statePath, + "test_instance.foo", + "test_instance.bar", + } + + if code := c.Run(args); code != 1 { + t.Fatalf("got exit status %d; want 1\nstderr:\n%s\n\nstdout:\n%s", code, ui.ErrorWriter.String(), ui.OutputWriter.String()) + } + + // State is unchanged + testStateOutput(t, statePath, testStateMvOutputOriginal) + + // Required version diags are correct + errStr := ui.ErrorWriter.String() + if !strings.Contains(errStr, `required_version = "~> 0.9.0"`) { + t.Fatalf("output should point to unmet version constraint, but is:\n\n%s", errStr) + } + if strings.Contains(errStr, `required_version = ">= 0.13.0"`) { + t.Fatalf("output should not point to met version constraint, but is:\n\n%s", errStr) + } +} + +const testStateMvOutputOriginal = ` +test_instance.baz: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value + + Dependencies: + test_instance.foo +test_instance.foo: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +` + +const testStateMvOutput = ` +test_instance.bar: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +test_instance.baz: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +` + +const testStateMvBackupAndBackupOutOptionsWithNonLocalBackendOutput = ` +test_instance.bar: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +` + +const testStateMvCount_stateOut = ` +test_instance.bar.0: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +test_instance.bar.1: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +` + +const testStateMvCount_stateOutSrc = ` +test_instance.bar: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +` + +const testStateMvCount_stateOutOriginal = ` +test_instance.bar: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +test_instance.foo.0: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +test_instance.foo.1: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +` + +const testStateMvLargeCount_stateOut = ` +test_instance.bar.0: + ID = foo0 + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +test_instance.bar.1: + ID = foo1 + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +test_instance.bar.2: + ID = foo2 + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +test_instance.bar.3: + ID = foo3 + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +test_instance.bar.4: + ID = foo4 + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +test_instance.bar.5: + ID = foo5 + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +test_instance.bar.6: + ID = foo6 + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +test_instance.bar.7: + ID = foo7 + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +test_instance.bar.8: + ID = foo8 + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +test_instance.bar.9: + ID = foo9 + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +test_instance.bar.10: + ID = foo10 + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +` + +const testStateMvLargeCount_stateOutSrc = ` +test_instance.bar: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +` + +const testStateMvLargeCount_stateOutOriginal = ` +test_instance.bar: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +test_instance.foo.0: + ID = foo0 + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +test_instance.foo.1: + ID = foo1 + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +test_instance.foo.2: + ID = foo2 + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +test_instance.foo.3: + ID = foo3 + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +test_instance.foo.4: + ID = foo4 + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +test_instance.foo.5: + ID = foo5 + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +test_instance.foo.6: + ID = foo6 + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +test_instance.foo.7: + ID = foo7 + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +test_instance.foo.8: + ID = foo8 + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +test_instance.foo.9: + ID = foo9 + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +test_instance.foo.10: + ID = foo10 + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +` + +const testStateMvNestedModule_stateOut = ` + +module.bar.child1: + test_instance.foo: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +module.bar.child2: + test_instance.foo: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +` + +const testStateMvNewModule_stateOut = ` + +module.bar: + test_instance.bar: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +` + +const testStateMvModuleNewModule_stateOut = ` + +module.foo: + test_instance.bar: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +` + +const testStateMvNewModule_stateOutOriginal = ` +test_instance.bar: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +` + +const testStateMvNestedModule_stateOutSrc = ` + +` + +const testStateMvNestedModule_stateOutOriginal = ` + +module.foo.child1: + test_instance.foo: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +module.foo.child2: + test_instance.foo: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +` + +const testStateMvOutput_stateOut = ` +test_instance.bar: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +` + +const testStateMvOutput_stateOutSrc = ` + +` + +const testStateMvOutput_stateOutOriginal = ` +test_instance.foo: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +` + +const testStateMvExisting_stateSrc = ` + +` + +const testStateMvExisting_stateDst = ` +test_instance.bar: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +test_instance.qux: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/test"] +` + +const testStateMvExisting_stateSrcOriginal = ` +test_instance.foo: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +` + +const testStateMvExisting_stateDstOriginal = ` +test_instance.qux: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/test"] +` + +const testStateMvOriginal_backend = ` +test_instance.baz: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +` + +const testStateMvOnlyResourceInModule_original = ` + +module.foo: + test_instance.foo.0: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +` + +const testStateMvOnlyResourceInModule_output = ` + +module.foo: + test_instance.bar.0: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +` diff --git a/pkg/command/state_pull.go b/pkg/command/state_pull.go new file mode 100644 index 00000000000..48fceea8245 --- /dev/null +++ b/pkg/command/state_pull.go @@ -0,0 +1,118 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "bytes" + "fmt" + "strings" + + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/states/statefile" + "github.com/kubegems/opentofu/pkg/states/statemgr" +) + +// StatePullCommand is a Command implementation that shows a single resource. +type StatePullCommand struct { + Meta + StateMeta +} + +func (c *StatePullCommand) Run(args []string) int { + args = c.Meta.process(args) + cmdFlags := c.Meta.defaultFlagSet("state pull") + c.Meta.varFlagSet(cmdFlags) + if err := cmdFlags.Parse(args); err != nil { + c.Ui.Error(fmt.Sprintf("Error parsing command-line flags: %s\n", err.Error())) + return 1 + } + + if diags := c.Meta.checkRequiredVersion(); diags != nil { + c.showDiagnostics(diags) + return 1 + } + + // Load the encryption configuration + enc, encDiags := c.Encryption() + if encDiags.HasErrors() { + c.showDiagnostics(encDiags) + return 1 + } + + // Load the backend + b, backendDiags := c.Backend(nil, enc.State()) + if backendDiags.HasErrors() { + c.showDiagnostics(backendDiags) + return 1 + } + + // This is a read-only command + c.ignoreRemoteVersionConflict(b) + + // Get the state manager for the current workspace + env, err := c.Workspace() + if err != nil { + c.Ui.Error(fmt.Sprintf("Error selecting workspace: %s", err)) + return 1 + } + stateMgr, err := b.StateMgr(env) + if err != nil { + c.Ui.Error(fmt.Sprintf(errStateLoadingState, err)) + return 1 + } + if err := stateMgr.RefreshState(); err != nil { + c.Ui.Error(fmt.Sprintf("Failed to refresh state: %s", err)) + return 1 + } + + // Get a statefile object representing the latest snapshot + stateFile := statemgr.Export(stateMgr) + + if stateFile != nil { // we produce no output if the statefile is nil + var buf bytes.Buffer + err = statefile.Write(stateFile, &buf, encryption.StateEncryptionDisabled()) // Don't encrypt to stdout + if err != nil { + c.Ui.Error(fmt.Sprintf("Failed to write state: %s", err)) + return 1 + } + + c.Ui.Output(buf.String()) + } + + return 0 +} + +func (c *StatePullCommand) Help() string { + helpText := ` +Usage: tofu [global options] state pull [options] + + Pull the state from its location, upgrade the local copy, and output it + to stdout. + + This command "pulls" the current state and outputs it to stdout. + As part of this process, OpenTofu will upgrade the state format of the + local copy to the current version. + + The primary use of this is for state stored remotely. This command + will still work with local state but is less useful for this. + +Options: + + -var 'foo=bar' Set a value for one of the input variables in the root + module of the configuration. Use this option more than + once to set more than one variable. + + -var-file=filename Load variable values from the given file, in addition + to the default files terraform.tfvars and *.auto.tfvars. + Use this option more than once to include more than one + variables file. +` + return strings.TrimSpace(helpText) +} + +func (c *StatePullCommand) Synopsis() string { + return "Pull current state and output to stdout" +} diff --git a/pkg/command/state_pull_test.go b/pkg/command/state_pull_test.go new file mode 100644 index 00000000000..c56dc02f75c --- /dev/null +++ b/pkg/command/state_pull_test.go @@ -0,0 +1,99 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "bytes" + "os" + "strings" + "testing" + + "github.com/mitchellh/cli" +) + +func TestStatePull(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("state-pull-backend"), td) + defer testChdir(t, td)() + + expected, err := os.ReadFile("local-state.tfstate") + if err != nil { + t.Fatalf("error reading state: %v", err) + } + + p := testProvider() + ui := new(cli.MockUi) + c := &StatePullCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + }, + } + + args := []string{} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + actual := ui.OutputWriter.Bytes() + if bytes.Equal(actual, expected) { + t.Fatalf("expected:\n%s\n\nto include: %q", actual, expected) + } +} + +func TestStatePull_noState(t *testing.T) { + testCwd(t) + + p := testProvider() + ui := cli.NewMockUi() + c := &StatePullCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + }, + } + + args := []string{} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + actual := ui.OutputWriter.String() + if actual != "" { + t.Fatalf("bad: %s", actual) + } +} + +func TestStatePull_checkRequiredVersion(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("command-check-required-version"), td) + defer testChdir(t, td)() + + p := testProvider() + ui := cli.NewMockUi() + c := &StatePullCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + }, + } + + args := []string{} + if code := c.Run(args); code != 1 { + t.Fatalf("got exit status %d; want 1\nstderr:\n%s\n\nstdout:\n%s", code, ui.ErrorWriter.String(), ui.OutputWriter.String()) + } + + // Required version diags are correct + errStr := ui.ErrorWriter.String() + if !strings.Contains(errStr, `required_version = "~> 0.9.0"`) { + t.Fatalf("output should point to unmet version constraint, but is:\n\n%s", errStr) + } + if strings.Contains(errStr, `required_version = ">= 0.13.0"`) { + t.Fatalf("output should not point to met version constraint, but is:\n\n%s", errStr) + } +} diff --git a/pkg/command/state_push.go b/pkg/command/state_push.go new file mode 100644 index 00000000000..92c31caa07b --- /dev/null +++ b/pkg/command/state_push.go @@ -0,0 +1,210 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "io" + "os" + "strings" + + "github.com/mitchellh/cli" + + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/command/clistate" + "github.com/kubegems/opentofu/pkg/command/views" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/states/statefile" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" +) + +// StatePushCommand is a Command implementation that shows a single resource. +type StatePushCommand struct { + Meta + StateMeta +} + +func (c *StatePushCommand) Run(args []string) int { + args = c.Meta.process(args) + var flagForce bool + cmdFlags := c.Meta.ignoreRemoteVersionFlagSet("state push") + cmdFlags.BoolVar(&flagForce, "force", false, "") + cmdFlags.BoolVar(&c.Meta.stateLock, "lock", true, "lock state") + cmdFlags.DurationVar(&c.Meta.stateLockTimeout, "lock-timeout", 0, "lock timeout") + if err := cmdFlags.Parse(args); err != nil { + c.Ui.Error(fmt.Sprintf("Error parsing command-line flags: %s\n", err.Error())) + return 1 + } + args = cmdFlags.Args() + + if len(args) != 1 { + c.Ui.Error("Exactly one argument expected.\n") + return cli.RunResultHelp + } + + if diags := c.Meta.checkRequiredVersion(); diags != nil { + c.showDiagnostics(diags) + return 1 + } + + // Load the encryption configuration + enc, encDiags := c.Encryption() + if encDiags.HasErrors() { + c.showDiagnostics(encDiags) + return 1 + } + + // Determine our reader for the input state. This is the filepath + // or stdin if "-" is given. + var r io.Reader = os.Stdin + if args[0] != "-" { + f, err := os.Open(args[0]) + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + + // Note: we don't need to defer a Close here because we do a close + // automatically below directly after the read. + + r = f + } + + // Read the state + srcStateFile, err := statefile.Read(r, encryption.StateEncryptionDisabled()) // Assume the given statefile is not encrypted + if c, ok := r.(io.Closer); ok { + // Close the reader if possible right now since we're done with it. + c.Close() + } + if err != nil { + c.Ui.Error(fmt.Sprintf("Error reading source state %q: %s", args[0], err)) + return 1 + } + + // Load the backend + b, backendDiags := c.Backend(nil, enc.State()) + if backendDiags.HasErrors() { + c.showDiagnostics(backendDiags) + return 1 + } + + // Determine the workspace name + workspace, err := c.Workspace() + if err != nil { + c.Ui.Error(fmt.Sprintf("Error selecting workspace: %s", err)) + return 1 + } + + // Check remote OpenTofu version is compatible + remoteVersionDiags := c.remoteVersionCheck(b, workspace) + c.showDiagnostics(remoteVersionDiags) + if remoteVersionDiags.HasErrors() { + return 1 + } + + // Get the state manager for the currently-selected workspace + stateMgr, err := b.StateMgr(workspace) + if err != nil { + c.Ui.Error(fmt.Sprintf("Failed to load destination state: %s", err)) + return 1 + } + + if c.stateLock { + stateLocker := clistate.NewLocker(c.stateLockTimeout, views.NewStateLocker(arguments.ViewHuman, c.View)) + if diags := stateLocker.Lock(stateMgr, "state-push"); diags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + defer func() { + if diags := stateLocker.Unlock(); diags.HasErrors() { + c.showDiagnostics(diags) + } + }() + } + + if err := stateMgr.RefreshState(); err != nil { + c.Ui.Error(fmt.Sprintf("Failed to refresh destination state: %s", err)) + return 1 + } + + if srcStateFile == nil { + // We'll push a new empty state instead + srcStateFile = statemgr.NewStateFile() + } + + // Import it, forcing through the lineage/serial if requested and possible. + if err := statemgr.Import(srcStateFile, stateMgr, flagForce); err != nil { + c.Ui.Error(fmt.Sprintf("Failed to write state: %s", err)) + return 1 + } + + // Get schemas, if possible, before writing state + var schemas *tofu.Schemas + var diags tfdiags.Diagnostics + if isCloudMode(b) { + schemas, diags = c.MaybeGetSchemas(srcStateFile.State, nil) + } + + if err := stateMgr.WriteState(srcStateFile.State); err != nil { + c.Ui.Error(fmt.Sprintf("Failed to write state: %s", err)) + return 1 + } + if err := stateMgr.PersistState(schemas); err != nil { + c.Ui.Error(fmt.Sprintf("Failed to persist state: %s", err)) + return 1 + } + + c.showDiagnostics(diags) + return 0 +} + +func (c *StatePushCommand) Help() string { + helpText := ` +Usage: tofu [global options] state push [options] PATH + + Update remote state from a local state file at PATH. + + This command "pushes" a local state and overwrites remote state + with a local state file. The command will protect you against writing + an older serial or a different state file lineage unless you specify the + "-force" flag. + + This command works with local state (it will overwrite the local + state), but is less useful for this use case. + + If PATH is "-", then this command will read the state to push from stdin. + Data from stdin is not streamed to the backend: it is loaded completely + (until pipe close), verified, and then pushed. + +Options: + + -force Write the state even if lineages don't match or the + remote serial is higher. + + -lock=false Don't hold a state lock during the operation. This is + dangerous if others might concurrently run commands + against the same workspace. + + -lock-timeout=0s Duration to retry a state lock. + + -var 'foo=bar' Set a value for one of the input variables in the root + module of the configuration. Use this option more than + once to set more than one variable. + + -var-file=filename Load variable values from the given file, in addition + to the default files terraform.tfvars and *.auto.tfvars. + Use this option more than once to include more than one + variables file. + +` + return strings.TrimSpace(helpText) +} + +func (c *StatePushCommand) Synopsis() string { + return "Update remote state from a local state file" +} diff --git a/pkg/command/state_push_test.go b/pkg/command/state_push_test.go new file mode 100644 index 00000000000..7b9cb0eb29c --- /dev/null +++ b/pkg/command/state_push_test.go @@ -0,0 +1,322 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "bytes" + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/backend/remote-state/inmem" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/states" + "github.com/mitchellh/cli" +) + +func TestStatePush_empty(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("state-push-good"), td) + defer testChdir(t, td)() + + expected := testStateRead(t, "replace.tfstate") + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StatePushCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + + args := []string{"replace.tfstate"} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + actual := testStateRead(t, "local-state.tfstate") + if !actual.Equal(expected) { + t.Fatalf("bad: %#v", actual) + } +} + +func TestStatePush_lockedState(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("state-push-good"), td) + defer testChdir(t, td)() + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StatePushCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + + unlock, err := testLockState(t, testDataDir, "local-state.tfstate") + if err != nil { + t.Fatal(err) + } + defer unlock() + + args := []string{"replace.tfstate"} + if code := c.Run(args); code != 1 { + t.Fatalf("bad: %d", code) + } + if !strings.Contains(ui.ErrorWriter.String(), "Error acquiring the state lock") { + t.Fatalf("expected a lock error, got: %s", ui.ErrorWriter.String()) + } +} + +func TestStatePush_replaceMatch(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("state-push-replace-match"), td) + defer testChdir(t, td)() + + expected := testStateRead(t, "replace.tfstate") + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StatePushCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + + args := []string{"replace.tfstate"} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + actual := testStateRead(t, "local-state.tfstate") + if !actual.Equal(expected) { + t.Fatalf("bad: %#v", actual) + } +} + +func TestStatePush_replaceMatchStdin(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("state-push-replace-match"), td) + defer testChdir(t, td)() + + expected := testStateRead(t, "replace.tfstate") + + // Set up the replacement to come from stdin + var buf bytes.Buffer + if err := writeStateForTesting(expected, &buf); err != nil { + t.Fatalf("err: %s", err) + } + defer testStdinPipe(t, &buf)() + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StatePushCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + + args := []string{"-force", "-"} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + actual := testStateRead(t, "local-state.tfstate") + if !actual.Equal(expected) { + t.Fatalf("bad: %#v", actual) + } +} + +func TestStatePush_lineageMismatch(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("state-push-bad-lineage"), td) + defer testChdir(t, td)() + + expected := testStateRead(t, "local-state.tfstate") + + p := testProvider() + ui := cli.NewMockUi() + view, _ := testView(t) + c := &StatePushCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + + args := []string{"replace.tfstate"} + if code := c.Run(args); code != 1 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + actual := testStateRead(t, "local-state.tfstate") + if !actual.Equal(expected) { + t.Fatalf("bad: %#v", actual) + } +} + +func TestStatePush_serialNewer(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("state-push-serial-newer"), td) + defer testChdir(t, td)() + + expected := testStateRead(t, "local-state.tfstate") + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StatePushCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + + args := []string{"replace.tfstate"} + if code := c.Run(args); code != 1 { + t.Fatalf("bad: %d", code) + } + + actual := testStateRead(t, "local-state.tfstate") + if !actual.Equal(expected) { + t.Fatalf("bad: %#v", actual) + } +} + +func TestStatePush_serialOlder(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("state-push-serial-older"), td) + defer testChdir(t, td)() + + expected := testStateRead(t, "replace.tfstate") + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StatePushCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + + args := []string{"replace.tfstate"} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + actual := testStateRead(t, "local-state.tfstate") + if !actual.Equal(expected) { + t.Fatalf("bad: %#v", actual) + } +} + +func TestStatePush_forceRemoteState(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("inmem-backend"), td) + defer testChdir(t, td)() + defer inmem.Reset() + + s := states.NewState() + statePath := testStateFile(t, s) + + // init the backend + ui := new(cli.MockUi) + view, _ := testView(t) + initCmd := &InitCommand{ + Meta: Meta{Ui: ui, View: view}, + } + if code := initCmd.Run([]string{}); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + // create a new workspace + ui = new(cli.MockUi) + newCmd := &WorkspaceNewCommand{ + Meta: Meta{Ui: ui, View: view}, + } + if code := newCmd.Run([]string{"test"}); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter) + } + + // put a dummy state in place, so we have something to force + b := backend.TestBackendConfig(t, inmem.New(encryption.StateEncryptionDisabled()), nil) + sMgr, err := b.StateMgr("test") + if err != nil { + t.Fatal(err) + } + if err := sMgr.WriteState(states.NewState()); err != nil { + t.Fatal(err) + } + if err := sMgr.PersistState(nil); err != nil { + t.Fatal(err) + } + + // push our local state to that new workspace + ui = new(cli.MockUi) + c := &StatePushCommand{ + Meta: Meta{Ui: ui, View: view}, + } + + args := []string{"-force", statePath} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } +} + +func TestStatePush_checkRequiredVersion(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("command-check-required-version"), td) + defer testChdir(t, td)() + + p := testProvider() + ui := cli.NewMockUi() + view, _ := testView(t) + c := &StatePushCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + + args := []string{"replace.tfstate"} + if code := c.Run(args); code != 1 { + t.Fatalf("got exit status %d; want 1\nstderr:\n%s\n\nstdout:\n%s", code, ui.ErrorWriter.String(), ui.OutputWriter.String()) + } + + // Required version diags are correct + errStr := ui.ErrorWriter.String() + if !strings.Contains(errStr, `required_version = "~> 0.9.0"`) { + t.Fatalf("output should point to unmet version constraint, but is:\n\n%s", errStr) + } + if strings.Contains(errStr, `required_version = ">= 0.13.0"`) { + t.Fatalf("output should not point to met version constraint, but is:\n\n%s", errStr) + } +} diff --git a/pkg/command/state_replace_provider.go b/pkg/command/state_replace_provider.go new file mode 100644 index 00000000000..ad3b32ab371 --- /dev/null +++ b/pkg/command/state_replace_provider.go @@ -0,0 +1,245 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/mitchellh/cli" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/command/clistate" + "github.com/kubegems/opentofu/pkg/command/views" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" +) + +// StateReplaceProviderCommand is a Command implementation that allows users +// to change the provider associated with existing resources. This is only +// likely to be useful if a provider is forked or changes its fully-qualified +// name. + +type StateReplaceProviderCommand struct { + StateMeta +} + +func (c *StateReplaceProviderCommand) Run(args []string) int { + args = c.Meta.process(args) + + var autoApprove bool + cmdFlags := c.Meta.ignoreRemoteVersionFlagSet("state replace-provider") + cmdFlags.BoolVar(&autoApprove, "auto-approve", false, "skip interactive approval of replacements") + cmdFlags.StringVar(&c.backupPath, "backup", "-", "backup") + cmdFlags.BoolVar(&c.Meta.stateLock, "lock", true, "lock states") + cmdFlags.DurationVar(&c.Meta.stateLockTimeout, "lock-timeout", 0, "lock timeout") + cmdFlags.StringVar(&c.statePath, "state", "", "path") + if err := cmdFlags.Parse(args); err != nil { + c.Ui.Error(fmt.Sprintf("Error parsing command-line flags: %s\n", err.Error())) + return cli.RunResultHelp + } + args = cmdFlags.Args() + if len(args) != 2 { + c.Ui.Error("Exactly two arguments expected.\n") + return cli.RunResultHelp + } + + if diags := c.Meta.checkRequiredVersion(); diags != nil { + c.showDiagnostics(diags) + return 1 + } + + var diags tfdiags.Diagnostics + + // Parse from/to arguments into providers + from, fromDiags := addrs.ParseProviderSourceString(args[0]) + if fromDiags.HasErrors() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + fmt.Sprintf(`Invalid "from" provider %q`, args[0]), + fromDiags.Err().Error(), + )) + } + to, toDiags := addrs.ParseProviderSourceString(args[1]) + if toDiags.HasErrors() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + fmt.Sprintf(`Invalid "to" provider %q`, args[1]), + toDiags.Err().Error(), + )) + } + if diags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // Load the encryption configuration + enc, encDiags := c.Encryption() + diags = diags.Append(encDiags) + if encDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // Initialize the state manager as configured + stateMgr, err := c.State(enc) + if err != nil { + c.Ui.Error(fmt.Sprintf(errStateLoadingState, err)) + return 1 + } + + // Acquire lock if requested + if c.stateLock { + stateLocker := clistate.NewLocker(c.stateLockTimeout, views.NewStateLocker(arguments.ViewHuman, c.View)) + if diags := stateLocker.Lock(stateMgr, "state-replace-provider"); diags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + defer func() { + if diags := stateLocker.Unlock(); diags.HasErrors() { + c.showDiagnostics(diags) + } + }() + } + + // Refresh and load state + if err := stateMgr.RefreshState(); err != nil { + c.Ui.Error(fmt.Sprintf("Failed to refresh source state: %s", err)) + return 1 + } + + state := stateMgr.State() + if state == nil { + c.Ui.Error(errStateNotFound) + return 1 + } + + // Fetch all resources from the state + resources, diags := c.lookupAllResources(state) + if diags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + var willReplace []*states.Resource + + // Update all matching resources with new provider + for _, resource := range resources { + if resource.ProviderConfig.Provider.Equals(from) { + willReplace = append(willReplace, resource) + } + } + c.showDiagnostics(diags) + + if len(willReplace) == 0 { + c.Ui.Output("No matching resources found.") + return 0 + } + + // Explain the changes + colorize := c.Colorize() + c.Ui.Output("OpenTofu will perform the following actions:\n") + c.Ui.Output(colorize.Color(" [yellow]~[reset] Updating provider:")) + c.Ui.Output(colorize.Color(fmt.Sprintf(" [red]-[reset] %s", from))) + c.Ui.Output(colorize.Color(fmt.Sprintf(" [green]+[reset] %s\n", to))) + + c.Ui.Output(colorize.Color(fmt.Sprintf("[bold]Changing[reset] %d resources:\n", len(willReplace)))) + for _, resource := range willReplace { + c.Ui.Output(colorize.Color(fmt.Sprintf(" %s", resource.Addr))) + } + + // Confirm + if !autoApprove { + c.Ui.Output(colorize.Color( + "\n[bold]Do you want to make these changes?[reset]\n" + + "Only 'yes' will be accepted to continue.\n", + )) + v, err := c.Ui.Ask("Enter a value:") + if err != nil { + c.Ui.Error(fmt.Sprintf("Error asking for approval: %s", err)) + return 1 + } + if v != "yes" { + c.Ui.Output("Cancelled replacing providers.") + return 0 + } + } + + // Update the provider for each resource + for _, resource := range willReplace { + resource.ProviderConfig.Provider = to + } + + b, backendDiags := c.Backend(nil, enc.State()) + diags = diags.Append(backendDiags) + if backendDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // Get schemas, if possible, before writing state + var schemas *tofu.Schemas + if isCloudMode(b) { + var schemaDiags tfdiags.Diagnostics + schemas, schemaDiags = c.MaybeGetSchemas(state, nil) + diags = diags.Append(schemaDiags) + } + + // Write the updated state + if err := stateMgr.WriteState(state); err != nil { + c.Ui.Error(fmt.Sprintf(errStateRmPersist, err)) + return 1 + } + if err := stateMgr.PersistState(schemas); err != nil { + c.Ui.Error(fmt.Sprintf(errStateRmPersist, err)) + return 1 + } + + c.showDiagnostics(diags) + c.Ui.Output(fmt.Sprintf("\nSuccessfully replaced provider for %d resources.", len(willReplace))) + return 0 +} + +func (c *StateReplaceProviderCommand) Help() string { + helpText := ` +Usage: tofu [global options] state replace-provider [options] FROM_PROVIDER_FQN TO_PROVIDER_FQN + + Replace provider for resources in the OpenTofu state. + +Options: + + -auto-approve Skip interactive approval. + + -lock=false Don't hold a state lock during the operation. This is + dangerous if others might concurrently run commands + against the same workspace. + + -lock-timeout=0s Duration to retry a state lock. + + -ignore-remote-version A rare option used for the remote backend only. See + the remote backend documentation for more information. + + -var 'foo=bar' Set a value for one of the input variables in the root + module of the configuration. Use this option more than + once to set more than one variable. + + -var-file=filename Load variable values from the given file, in addition + to the default files terraform.tfvars and *.auto.tfvars. + Use this option more than once to include more than one + variables file. + + -state, state-out, and -backup are legacy options supported for the local + backend only. For more information, see the local backend's documentation. + +` + return strings.TrimSpace(helpText) +} + +func (c *StateReplaceProviderCommand) Synopsis() string { + return "Replace provider in the state" +} diff --git a/pkg/command/state_replace_provider_test.go b/pkg/command/state_replace_provider_test.go new file mode 100644 index 00000000000..bf7e5ded4bd --- /dev/null +++ b/pkg/command/state_replace_provider_test.go @@ -0,0 +1,427 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "bytes" + "path/filepath" + "strings" + "testing" + + "github.com/mitchellh/cli" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/states" +) + +func TestStateReplaceProvider(t *testing.T) { + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "alpha", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"alpha","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("aws"), + Module: addrs.RootModule, + }, + ) + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "beta", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"beta","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("aws"), + Module: addrs.RootModule, + }, + ) + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "azurerm_virtual_machine", + Name: "gamma", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"gamma","baz":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewLegacyProvider("azurerm"), + Module: addrs.RootModule, + }, + ) + }) + + t.Run("happy path", func(t *testing.T) { + statePath := testStateFile(t, state) + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StateReplaceProviderCommand{ + StateMeta{ + Meta: Meta{ + Ui: ui, + View: view, + }, + }, + } + + inputBuf := &bytes.Buffer{} + ui.InputReader = inputBuf + inputBuf.WriteString("yes\n") + + args := []string{ + "-state", statePath, + "hashicorp/aws", + "acmecorp/aws", + } + if code := c.Run(args); code != 0 { + t.Fatalf("return code: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + testStateOutput(t, statePath, testStateReplaceProviderOutput) + + backups := testStateBackups(t, filepath.Dir(statePath)) + if len(backups) != 1 { + t.Fatalf("unexpected backups: %#v", backups) + } + testStateOutput(t, backups[0], testStateReplaceProviderOutputOriginal) + }) + + t.Run("auto approve", func(t *testing.T) { + statePath := testStateFile(t, state) + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StateReplaceProviderCommand{ + StateMeta{ + Meta: Meta{ + Ui: ui, + View: view, + }, + }, + } + + inputBuf := &bytes.Buffer{} + ui.InputReader = inputBuf + + args := []string{ + "-state", statePath, + "-auto-approve", + "hashicorp/aws", + "acmecorp/aws", + } + if code := c.Run(args); code != 0 { + t.Fatalf("return code: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + testStateOutput(t, statePath, testStateReplaceProviderOutput) + + backups := testStateBackups(t, filepath.Dir(statePath)) + if len(backups) != 1 { + t.Fatalf("unexpected backups: %#v", backups) + } + testStateOutput(t, backups[0], testStateReplaceProviderOutputOriginal) + }) + + t.Run("cancel at approval step", func(t *testing.T) { + statePath := testStateFile(t, state) + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StateReplaceProviderCommand{ + StateMeta{ + Meta: Meta{ + Ui: ui, + View: view, + }, + }, + } + + inputBuf := &bytes.Buffer{} + ui.InputReader = inputBuf + inputBuf.WriteString("no\n") + + args := []string{ + "-state", statePath, + "hashicorp/aws", + "acmecorp/aws", + } + if code := c.Run(args); code != 0 { + t.Fatalf("return code: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + testStateOutput(t, statePath, testStateReplaceProviderOutputOriginal) + + backups := testStateBackups(t, filepath.Dir(statePath)) + if len(backups) != 0 { + t.Fatalf("unexpected backups: %#v", backups) + } + }) + + t.Run("no matching provider found", func(t *testing.T) { + statePath := testStateFile(t, state) + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StateReplaceProviderCommand{ + StateMeta{ + Meta: Meta{ + Ui: ui, + View: view, + }, + }, + } + + args := []string{ + "-state", statePath, + "hashicorp/google", + "acmecorp/google", + } + if code := c.Run(args); code != 0 { + t.Fatalf("return code: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + testStateOutput(t, statePath, testStateReplaceProviderOutputOriginal) + + backups := testStateBackups(t, filepath.Dir(statePath)) + if len(backups) != 0 { + t.Fatalf("unexpected backups: %#v", backups) + } + }) + + t.Run("invalid flags", func(t *testing.T) { + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StateReplaceProviderCommand{ + StateMeta{ + Meta: Meta{ + Ui: ui, + View: view, + }, + }, + } + + args := []string{ + "-invalid", + "hashicorp/google", + "acmecorp/google", + } + if code := c.Run(args); code == 0 { + t.Fatalf("successful exit; want error") + } + + if got, want := ui.ErrorWriter.String(), "Error parsing command-line flags"; !strings.Contains(got, want) { + t.Fatalf("missing expected error message\nwant: %s\nfull output:\n%s", want, got) + } + }) + + t.Run("wrong number of arguments", func(t *testing.T) { + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StateReplaceProviderCommand{ + StateMeta{ + Meta: Meta{ + Ui: ui, + View: view, + }, + }, + } + + args := []string{"a", "b", "c", "d"} + if code := c.Run(args); code == 0 { + t.Fatalf("successful exit; want error") + } + + if got, want := ui.ErrorWriter.String(), "Exactly two arguments expected"; !strings.Contains(got, want) { + t.Fatalf("missing expected error message\nwant: %s\nfull output:\n%s", want, got) + } + }) + + t.Run("invalid provider strings", func(t *testing.T) { + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StateReplaceProviderCommand{ + StateMeta{ + Meta: Meta{ + Ui: ui, + View: view, + }, + }, + } + + args := []string{ + "hashicorp/google_cloud", + "-/-/google", + } + if code := c.Run(args); code == 0 { + t.Fatalf("successful exit; want error") + } + + got := ui.ErrorWriter.String() + msgs := []string{ + `Invalid "from" provider "hashicorp/google_cloud"`, + "Invalid provider type", + `Invalid "to" provider "-/-/google"`, + "Invalid provider source hostname", + } + for _, msg := range msgs { + if !strings.Contains(got, msg) { + t.Errorf("missing expected error message\nwant: %s\nfull output:\n%s", msg, got) + } + } + }) +} + +func TestStateReplaceProvider_docs(t *testing.T) { + c := &StateReplaceProviderCommand{} + + if got, want := c.Help(), "Usage: tofu [global options] state replace-provider"; !strings.Contains(got, want) { + t.Fatalf("unexpected help text\nwant: %s\nfull output:\n%s", want, got) + } + + if got, want := c.Synopsis(), "Replace provider in the state"; got != want { + t.Fatalf("unexpected synopsis\nwant: %s\nfull output:\n%s", want, got) + } +} + +func TestStateReplaceProvider_checkRequiredVersion(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("command-check-required-version"), td) + defer testChdir(t, td)() + + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "alpha", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"alpha","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("aws"), + Module: addrs.RootModule, + }, + ) + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "beta", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"beta","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("aws"), + Module: addrs.RootModule, + }, + ) + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "azurerm_virtual_machine", + Name: "gamma", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"gamma","baz":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewLegacyProvider("azurerm"), + Module: addrs.RootModule, + }, + ) + }) + + statePath := testStateFile(t, state) + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StateReplaceProviderCommand{ + StateMeta{ + Meta: Meta{ + Ui: ui, + View: view, + }, + }, + } + + inputBuf := &bytes.Buffer{} + ui.InputReader = inputBuf + inputBuf.WriteString("yes\n") + + args := []string{ + "-state", statePath, + "hashicorp/aws", + "acmecorp/aws", + } + if code := c.Run(args); code != 1 { + t.Fatalf("got exit status %d; want 1\nstderr:\n%s\n\nstdout:\n%s", code, ui.ErrorWriter.String(), ui.OutputWriter.String()) + } + + // State is unchanged + testStateOutput(t, statePath, testStateReplaceProviderOutputOriginal) + + // Required version diags are correct + errStr := ui.ErrorWriter.String() + if !strings.Contains(errStr, `required_version = "~> 0.9.0"`) { + t.Fatalf("output should point to unmet version constraint, but is:\n\n%s", errStr) + } + if strings.Contains(errStr, `required_version = ">= 0.13.0"`) { + t.Fatalf("output should not point to met version constraint, but is:\n\n%s", errStr) + } +} + +const testStateReplaceProviderOutputOriginal = ` +aws_instance.alpha: + ID = alpha + provider = provider["registry.opentofu.org/hashicorp/aws"] + bar = value + foo = value +aws_instance.beta: + ID = beta + provider = provider["registry.opentofu.org/hashicorp/aws"] + bar = value + foo = value +azurerm_virtual_machine.gamma: + ID = gamma + provider = provider["registry.opentofu.org/-/azurerm"] + baz = value +` + +const testStateReplaceProviderOutput = ` +aws_instance.alpha: + ID = alpha + provider = provider["registry.opentofu.org/acmecorp/aws"] + bar = value + foo = value +aws_instance.beta: + ID = beta + provider = provider["registry.opentofu.org/acmecorp/aws"] + bar = value + foo = value +azurerm_virtual_machine.gamma: + ID = gamma + provider = provider["registry.opentofu.org/-/azurerm"] + baz = value +` diff --git a/pkg/command/state_rm.go b/pkg/command/state_rm.go new file mode 100644 index 00000000000..8ad89f39de6 --- /dev/null +++ b/pkg/command/state_rm.go @@ -0,0 +1,228 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/mitchellh/cli" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/command/clistate" + "github.com/kubegems/opentofu/pkg/command/views" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" +) + +// StateRmCommand is a Command implementation that shows a single resource. +type StateRmCommand struct { + StateMeta +} + +func (c *StateRmCommand) Run(args []string) int { + args = c.Meta.process(args) + var dryRun bool + cmdFlags := c.Meta.ignoreRemoteVersionFlagSet("state rm") + cmdFlags.BoolVar(&dryRun, "dry-run", false, "dry run") + cmdFlags.StringVar(&c.backupPath, "backup", "-", "backup") + cmdFlags.BoolVar(&c.Meta.stateLock, "lock", true, "lock state") + cmdFlags.DurationVar(&c.Meta.stateLockTimeout, "lock-timeout", 0, "lock timeout") + cmdFlags.StringVar(&c.statePath, "state", "", "path") + if err := cmdFlags.Parse(args); err != nil { + c.Ui.Error(fmt.Sprintf("Error parsing command-line flags: %s\n", err.Error())) + return 1 + } + + args = cmdFlags.Args() + if len(args) < 1 { + c.Ui.Error("At least one address is required.\n") + return cli.RunResultHelp + } + + if diags := c.Meta.checkRequiredVersion(); diags != nil { + c.showDiagnostics(diags) + return 1 + } + + // Load the encryption configuration + enc, encDiags := c.Encryption() + if encDiags.HasErrors() { + c.showDiagnostics(encDiags) + return 1 + } + + // Get the state + stateMgr, err := c.State(enc) + if err != nil { + c.Ui.Error(fmt.Sprintf(errStateLoadingState, err)) + return 1 + } + + if c.stateLock { + stateLocker := clistate.NewLocker(c.stateLockTimeout, views.NewStateLocker(arguments.ViewHuman, c.View)) + if diags := stateLocker.Lock(stateMgr, "state-rm"); diags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + defer func() { + if diags := stateLocker.Unlock(); diags.HasErrors() { + c.showDiagnostics(diags) + } + }() + } + + if err := stateMgr.RefreshState(); err != nil { + c.Ui.Error(fmt.Sprintf("Failed to refresh state: %s", err)) + return 1 + } + + state := stateMgr.State() + if state == nil { + c.Ui.Error(errStateNotFound) + return 1 + } + + // This command primarily works with resource instances, though it will + // also clean up any modules and resources left empty by actions it takes. + var addrs []addrs.AbsResourceInstance + var diags tfdiags.Diagnostics + for _, addrStr := range args { + moreAddrs, moreDiags := c.lookupResourceInstanceAddr(state, true, addrStr) + addrs = append(addrs, moreAddrs...) + diags = diags.Append(moreDiags) + } + if diags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + prefix := "Removed " + if dryRun { + prefix = "Would remove " + } + + var isCount int + ss := state.SyncWrapper() + for _, addr := range addrs { + isCount++ + c.Ui.Output(prefix + addr.String()) + if !dryRun { + ss.ForgetResourceInstanceAll(addr) + ss.RemoveResourceIfEmpty(addr.ContainingResource()) + } + } + + if dryRun { + if isCount == 0 { + c.Ui.Output("Would have removed nothing.") + } + return 0 // This is as far as we go in dry-run mode + } + + b, backendDiags := c.Backend(nil, enc.State()) + diags = diags.Append(backendDiags) + if backendDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // Get schemas, if possible, before writing state + var schemas *tofu.Schemas + if isCloudMode(b) { + var schemaDiags tfdiags.Diagnostics + schemas, schemaDiags = c.MaybeGetSchemas(state, nil) + diags = diags.Append(schemaDiags) + } + + if err := stateMgr.WriteState(state); err != nil { + c.Ui.Error(fmt.Sprintf(errStateRmPersist, err)) + return 1 + } + if err := stateMgr.PersistState(schemas); err != nil { + c.Ui.Error(fmt.Sprintf(errStateRmPersist, err)) + return 1 + } + + if len(diags) > 0 && isCount != 0 { + c.showDiagnostics(diags) + } + + if isCount == 0 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid target address", + "No matching objects found. To view the available instances, use \"tofu state list\". Please modify the address to reference a specific instance.", + )) + c.showDiagnostics(diags) + return 1 + } + + c.Ui.Output(fmt.Sprintf("Successfully removed %d resource instance(s).", isCount)) + return 0 +} + +func (c *StateRmCommand) Help() string { + helpText := ` +Usage: tofu [global options] state (remove|rm) [options] ADDRESS... + + Remove one or more items from the OpenTofu state, causing OpenTofu to + "forget" those items without first destroying them in the remote system. + + This command removes one or more resource instances from the OpenTofu state + based on the addresses given. You can view and list the available instances + with "tofu state list". + + If you give the address of an entire module then all of the instances in + that module and any of its child modules will be removed from the state. + + If you give the address of a resource that has "count" or "for_each" set, + all of the instances of that resource will be removed from the state. + +Options: + + -dry-run If set, prints out what would've been removed but + doesn't actually remove anything. + + -backup=PATH Path where OpenTofu should write the backup + state. + + -lock=false Don't hold a state lock during the operation. This is + dangerous if others might concurrently run commands + against the same workspace. + + -lock-timeout=0s Duration to retry a state lock. + + -state=PATH Path to the state file to update. Defaults to the + current workspace state. + + -ignore-remote-version Continue even if remote and local OpenTofu versions + are incompatible. This may result in an unusable + workspace, and should be used with extreme caution. + + -var 'foo=bar' Set a value for one of the input variables in the root + module of the configuration. Use this option more than + once to set more than one variable. + + -var-file=filename Load variable values from the given file, in addition + to the default files terraform.tfvars and *.auto.tfvars. + Use this option more than once to include more than one + variables file. + +` + return strings.TrimSpace(helpText) +} + +func (c *StateRmCommand) Synopsis() string { + return "Remove instances from the state" +} + +const errStateRmPersist = `Error saving the state: %s + +The state was not saved. No items were removed from the persisted +state. No backup was created since no modification occurred. Please +resolve the issue above and try again.` diff --git a/pkg/command/state_rm_test.go b/pkg/command/state_rm_test.go new file mode 100644 index 00000000000..ace28af95a3 --- /dev/null +++ b/pkg/command/state_rm_test.go @@ -0,0 +1,584 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "os" + "path/filepath" + "strings" + "testing" + + "github.com/mitchellh/cli" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/states" +) + +func TestStateRm(t *testing.T) { + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "bar", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"foo","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, state) + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StateRmCommand{ + StateMeta{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + }, + } + + args := []string{ + "-state", statePath, + "test_instance.foo", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + // Test it is correct + testStateOutput(t, statePath, testStateRmOutput) + + // Test we have backups + backups := testStateBackups(t, filepath.Dir(statePath)) + if len(backups) != 1 { + t.Fatalf("bad: %#v", backups) + } + testStateOutput(t, backups[0], testStateRmOutputOriginal) +} + +func TestStateRmNotChildModule(t *testing.T) { + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + // This second instance has the same local address as the first but + // is in a child module. Older versions of Terraform would incorrectly + // remove this one too, since they failed to check the module address. + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance.Child("child", addrs.NoKey)), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"foo","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, state) + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StateRmCommand{ + StateMeta{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + }, + } + + args := []string{ + "-state", statePath, + "test_instance.foo", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + // Test it is correct + testStateOutput(t, statePath, ` + +module.child: + test_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +`) + + // Test we have backups + backups := testStateBackups(t, filepath.Dir(statePath)) + if len(backups) != 1 { + t.Fatalf("bad: %#v", backups) + } + testStateOutput(t, backups[0], ` +test_instance.foo: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value + +module.child: + test_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +`) +} + +func TestStateRmNoArgs(t *testing.T) { + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "bar", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"foo","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, state) + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StateRmCommand{ + StateMeta{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + }, + } + + args := []string{ + "-state", statePath, + } + if code := c.Run(args); code == 0 { + t.Errorf("expected non-zero exit code, got: %d", code) + } + + if msg := ui.ErrorWriter.String(); !strings.Contains(msg, "At least one address") { + t.Errorf("not the error we were looking for:\n%s", msg) + } + +} + +func TestStateRmNonExist(t *testing.T) { + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "bar", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"foo","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, state) + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StateRmCommand{ + StateMeta{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + }, + } + + args := []string{ + "-state", statePath, + "test_instance.baz", // doesn't exist in the state constructed above + } + if code := c.Run(args); code != 1 { + t.Fatalf("expected exit status %d, got: %d", 1, code) + } +} + +func TestStateRm_backupExplicit(t *testing.T) { + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "bar", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"foo","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, state) + backupPath := statePath + ".backup.test" + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StateRmCommand{ + StateMeta{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + }, + } + + args := []string{ + "-backup", backupPath, + "-state", statePath, + "test_instance.foo", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + // Test it is correct + testStateOutput(t, statePath, testStateRmOutput) + + // Test backup + testStateOutput(t, backupPath, testStateRmOutputOriginal) +} + +func TestStateRm_noState(t *testing.T) { + testCwd(t) + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StateRmCommand{ + StateMeta{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + }, + } + + args := []string{"foo"} + if code := c.Run(args); code != 1 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } +} + +func TestStateRm_needsInit(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("backend-change"), td) + defer testChdir(t, td)() + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StateRmCommand{ + StateMeta{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + }, + } + + args := []string{"foo"} + if code := c.Run(args); code == 0 { + t.Fatalf("expected error output, got:\n%s", ui.OutputWriter.String()) + } + + if !strings.Contains(ui.ErrorWriter.String(), "Backend initialization") { + t.Fatalf("expected initialization error, got:\n%s", ui.ErrorWriter.String()) + } +} + +func TestStateRm_backendState(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("backend-unchanged"), td) + defer testChdir(t, td)() + + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "bar", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"foo","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + + statePath := "local-state.tfstate" + backupPath := "local-state.backup" + + f, err := os.Create(statePath) + if err != nil { + t.Fatalf("failed to create state file %s: %s", statePath, err) + } + defer f.Close() + + err = writeStateForTesting(state, f) + if err != nil { + t.Fatalf("failed to write state to file %s: %s", statePath, err) + } + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StateRmCommand{ + StateMeta{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + }, + } + + args := []string{ + "-backup", backupPath, + "test_instance.foo", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + // Test it is correct + testStateOutput(t, statePath, testStateRmOutput) + + // Test backup + testStateOutput(t, backupPath, testStateRmOutputOriginal) +} + +func TestStateRm_checkRequiredVersion(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("command-check-required-version"), td) + defer testChdir(t, td)() + + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "bar", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"foo","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, state) + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &StateRmCommand{ + StateMeta{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + }, + } + + args := []string{ + "-state", statePath, + "test_instance.foo", + } + if code := c.Run(args); code != 1 { + t.Fatalf("got exit status %d; want 1\nstderr:\n%s\n\nstdout:\n%s", code, ui.ErrorWriter.String(), ui.OutputWriter.String()) + } + + // State is unchanged + testStateOutput(t, statePath, testStateRmOutputOriginal) + + // Required version diags are correct + errStr := ui.ErrorWriter.String() + if !strings.Contains(errStr, `required_version = "~> 0.9.0"`) { + t.Fatalf("output should point to unmet version constraint, but is:\n\n%s", errStr) + } + if strings.Contains(errStr, `required_version = ">= 0.13.0"`) { + t.Fatalf("output should not point to met version constraint, but is:\n\n%s", errStr) + } +} + +const testStateRmOutputOriginal = ` +test_instance.bar: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +test_instance.foo: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +` + +const testStateRmOutput = ` +test_instance.bar: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/test"] + bar = value + foo = value +` diff --git a/pkg/command/state_show.go b/pkg/command/state_show.go new file mode 100644 index 00000000000..542686da155 --- /dev/null +++ b/pkg/command/state_show.go @@ -0,0 +1,246 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "os" + "strings" + + "github.com/mitchellh/cli" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/command/jsonformat" + "github.com/kubegems/opentofu/pkg/command/jsonprovider" + "github.com/kubegems/opentofu/pkg/command/jsonstate" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/statefile" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofumigrate" +) + +// StateShowCommand is a Command implementation that shows a single resource. +type StateShowCommand struct { + Meta + StateMeta +} + +func (c *StateShowCommand) Run(args []string) int { + args = c.Meta.process(args) + cmdFlags := c.Meta.defaultFlagSet("state show") + c.Meta.varFlagSet(cmdFlags) + cmdFlags.StringVar(&c.Meta.statePath, "state", "", "path") + + showSensitive := false + cmdFlags.BoolVar(&showSensitive, "show-sensitive", false, "displays sensitive values") + + if err := cmdFlags.Parse(args); err != nil { + c.Streams.Eprintf("Error parsing command-line flags: %s\n", err.Error()) + return 1 + } + args = cmdFlags.Args() + if len(args) != 1 { + c.Streams.Eprint("Exactly one argument expected.\n") + return cli.RunResultHelp + } + + // Check for user-supplied plugin path + var err error + if c.pluginPath, err = c.loadPluginPath(); err != nil { + c.Streams.Eprintf("Error loading plugin path: %\n", err) + return 1 + } + + // Load the encryption configuration + enc, encDiags := c.Encryption() + if encDiags.HasErrors() { + c.showDiagnostics(encDiags) + return 1 + } + + // Load the backend + b, backendDiags := c.Backend(nil, enc.State()) + if backendDiags.HasErrors() { + c.showDiagnostics(backendDiags) + return 1 + } + + // We require a local backend + local, ok := b.(backend.Local) + if !ok { + c.Streams.Eprint(ErrUnsupportedLocalOp) + return 1 + } + + // This is a read-only command + c.ignoreRemoteVersionConflict(b) + + // Check if the address can be parsed + addr, addrDiags := addrs.ParseAbsResourceInstanceStr(args[0]) + if addrDiags.HasErrors() { + c.Streams.Eprintln(fmt.Sprintf(errParsingAddress, args[0])) + return 1 + } + + // We expect the config dir to always be the cwd + cwd, err := os.Getwd() + if err != nil { + c.Streams.Eprintf("Error getting cwd: %s\n", err) + return 1 + } + + // Build the operation (required to get the schemas) + opReq := c.Operation(b, arguments.ViewHuman, enc) + opReq.AllowUnsetVariables = true + opReq.ConfigDir = cwd + var callDiags tfdiags.Diagnostics + opReq.RootCall, callDiags = c.rootModuleCall(opReq.ConfigDir) + if callDiags.HasErrors() { + c.showDiagnostics(callDiags) + return 1 + } + + opReq.ConfigLoader, err = c.initConfigLoader() + if err != nil { + c.Streams.Eprintf("Error initializing config loader: %s\n", err) + return 1 + } + + // Get the context (required to get the schemas) + lr, _, ctxDiags := local.LocalRun(opReq) + if ctxDiags.HasErrors() { + c.View.Diagnostics(ctxDiags) + return 1 + } + + // Get the schemas from the context + schemas, diags := lr.Core.Schemas(lr.Config, lr.InputState) + if diags.HasErrors() { + c.View.Diagnostics(diags) + return 1 + } + + // Get the state + env, err := c.Workspace() + if err != nil { + c.Streams.Eprintf("Error selecting workspace: %s\n", err) + return 1 + } + stateMgr, err := b.StateMgr(env) + if err != nil { + c.Streams.Eprintln(fmt.Sprintf(errStateLoadingState, err)) + return 1 + } + if err := stateMgr.RefreshState(); err != nil { + c.Streams.Eprintf("Failed to refresh state: %s\n", err) + return 1 + } + + state := stateMgr.State() + if state == nil { + c.Streams.Eprintln(errStateNotFound) + return 1 + } + migratedState, migrateDiags := tofumigrate.MigrateStateProviderAddresses(lr.Config, state) + diags = diags.Append(migrateDiags) + if migrateDiags.HasErrors() { + c.View.Diagnostics(diags) + return 1 + } + state = migratedState + + is := state.ResourceInstance(addr) + if !is.HasCurrent() { + c.Streams.Eprintln(errNoInstanceFound) + return 1 + } + + // check if the resource has a configured provider, otherwise this will use the default provider + rs := state.Resource(addr.ContainingResource()) + absPc := addrs.AbsProviderConfig{ + Provider: rs.ProviderConfig.Provider, + Alias: rs.ProviderConfig.Alias, + Module: addrs.RootModule, + } + singleInstance := states.NewState() + singleInstance.EnsureModule(addr.Module).SetResourceInstanceCurrent( + addr.Resource, + is.Current, + absPc, + ) + + root, outputs, err := jsonstate.MarshalForRenderer(statefile.New(singleInstance, "", 0), schemas) + if err != nil { + c.Streams.Eprintf("Failed to marshal state to json: %s", err) + } + + jstate := jsonformat.State{ + StateFormatVersion: jsonstate.FormatVersion, + ProviderFormatVersion: jsonprovider.FormatVersion, + RootModule: root, + RootModuleOutputs: outputs, + ProviderSchemas: jsonprovider.MarshalForRenderer(schemas), + } + + renderer := jsonformat.Renderer{ + Streams: c.Streams, + Colorize: c.Colorize(), + RunningInAutomation: c.RunningInAutomation, + ShowSensitive: showSensitive, + } + + renderer.RenderHumanState(jstate) + return 0 +} + +func (c *StateShowCommand) Help() string { + helpText := ` +Usage: tofu [global options] state show [options] ADDRESS + + Shows the attributes of a resource in the OpenTofu state. + + This command shows the attributes of a single resource in the OpenTofu + state. The address argument must be used to specify a single resource. + You can view the list of available resources with "tofu state list". + +Options: + + -state=statefile Path to a OpenTofu state file to use to look + up OpenTofu-managed resources. By default it will + use the state "terraform.tfstate" if it exists. + + -show-sensitive If specified, sensitive values will be displayed. + + -var 'foo=bar' Set a value for one of the input variables in the root + module of the configuration. Use this option more than + once to set more than one variable. + + -var-file=filename Load variable values from the given file, in addition + to the default files terraform.tfvars and *.auto.tfvars. + Use this option more than once to include more than one + variables file. + +` + return strings.TrimSpace(helpText) +} + +func (c *StateShowCommand) Synopsis() string { + return "Show a resource in the state" +} + +const errNoInstanceFound = `No instance found for the given address! + +This command requires that the address references one specific instance. +To view the available instances, use "tofu state list". Please modify +the address to reference a specific instance.` + +const errParsingAddress = `Error parsing instance address: %s + +This command requires that the address references one specific instance. +To view the available instances, use "tofu state list". Please modify +the address to reference a specific instance.` diff --git a/pkg/command/state_show_test.go b/pkg/command/state_show_test.go new file mode 100644 index 00000000000..61db9a8ad42 --- /dev/null +++ b/pkg/command/state_show_test.go @@ -0,0 +1,398 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/terminal" + "github.com/zclconf/go-cty/cty" +) + +func TestStateShow(t *testing.T) { + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, state) + + p := testProvider() + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "foo": {Type: cty.String, Optional: true}, + "bar": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + + streams, done := terminal.StreamsForTesting(t) + c := &StateShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Streams: streams, + }, + } + + args := []string{ + "-state", statePath, + "test_instance.foo", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + // Test that outputs were displayed + expected := strings.TrimSpace(testStateShowOutput) + "\n" + actual := output.Stdout() + if actual != expected { + t.Fatalf("Expected:\n%q\n\nTo equal:\n%q", actual, expected) + } +} + +func TestStateShow_multi(t *testing.T) { + submod, _ := addrs.ParseModuleInstanceStr("module.sub") + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(submod), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"foo","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: submod.Module(), + }, + ) + }) + statePath := testStateFile(t, state) + + p := testProvider() + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "foo": {Type: cty.String, Optional: true}, + "bar": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + + streams, done := terminal.StreamsForTesting(t) + c := &StateShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Streams: streams, + }, + } + + args := []string{ + "-state", statePath, + "test_instance.foo", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + // Test that outputs were displayed + expected := strings.TrimSpace(testStateShowOutput) + "\n" + actual := output.Stdout() + if actual != expected { + t.Fatalf("Expected:\n%q\n\nTo equal:\n%q", actual, expected) + } +} + +func TestStateShow_noState(t *testing.T) { + testCwd(t) + + p := testProvider() + streams, done := terminal.StreamsForTesting(t) + c := &StateShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Streams: streams, + }, + } + + args := []string{ + "test_instance.foo", + } + if code := c.Run(args); code != 1 { + t.Fatalf("bad: %d", code) + } + output := done(t) + if !strings.Contains(output.Stderr(), "No state file was found!") { + t.Fatalf("expected a no state file error, got: %s", output.Stderr()) + } +} + +func TestStateShow_emptyState(t *testing.T) { + state := states.NewState() + statePath := testStateFile(t, state) + + p := testProvider() + streams, done := terminal.StreamsForTesting(t) + c := &StateShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Streams: streams, + }, + } + + args := []string{ + "-state", statePath, + "test_instance.foo", + } + if code := c.Run(args); code != 1 { + t.Fatalf("bad: %d", code) + } + output := done(t) + if !strings.Contains(output.Stderr(), "No instance found for the given address!") { + t.Fatalf("expected a no instance found error, got: %s", output.Stderr()) + } +} + +func TestStateShow_configured_provider(t *testing.T) { + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test-beta"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, state) + + p := testProvider() + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "foo": {Type: cty.String, Optional: true}, + "bar": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + + streams, done := terminal.StreamsForTesting(t) + c := &StateShowCommand{ + Meta: Meta{ + testingOverrides: &testingOverrides{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test-beta"): providers.FactoryFixed(p), + }, + }, + Streams: streams, + }, + } + + args := []string{ + "-state", statePath, + "test_instance.foo", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: %d\n\n%s", code, output.Stderr()) + } + + // Test that outputs were displayed + expected := strings.TrimSpace(testStateShowOutput) + "\n" + actual := output.Stdout() + if actual != expected { + t.Fatalf("Expected:\n%q\n\nTo equal:\n%q", actual, expected) + } +} + +func TestStateShow_withoutShowSensitiveArg(t *testing.T) { + state := stateWithSensitiveValueForStateShow() + statePath := testStateFile(t, state) + + p := testProvider() + p.GetProviderSchemaResponse = providerWithSensitiveValueForStateShow() + + streams, done := terminal.StreamsForTesting(t) + c := &StateShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Streams: streams, + }, + } + + args := []string{ + "-state", statePath, + "test_instance.foo", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: \n%s", output.Stderr()) + } + + expected := `# test_instance.foo: +resource "test_instance" "foo" { + bar = "value" + foo = "value" + id = (sensitive value) +}` + actual := strings.TrimSpace(output.Stdout()) + if diff := cmp.Diff(actual, expected); len(diff) > 0 { + t.Fatalf("got incorrect output\n %v", diff) + } +} + +func TestStateShow_showSensitiveArg(t *testing.T) { + state := stateWithSensitiveValueForStateShow() + statePath := testStateFile(t, state) + + p := testProvider() + p.GetProviderSchemaResponse = providerWithSensitiveValueForStateShow() + + streams, done := terminal.StreamsForTesting(t) + c := &StateShowCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Streams: streams, + }, + } + + args := []string{ + "-show-sensitive", + "-state", statePath, + "test_instance.foo", + } + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: \n%s", output.Stderr()) + } + + expected := `# test_instance.foo: +resource "test_instance" "foo" { + bar = "value" + foo = "value" + id = "bar" +}` + actual := strings.TrimSpace(output.Stdout()) + if diff := cmp.Diff(actual, expected); len(diff) > 0 { + t.Fatalf("got incorrect output\n %v", diff) + } +} + +// stateWithSensitiveValueForStateShow returns a state with a resource +// instance. +func stateWithSensitiveValueForStateShow() *states.State { + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + + return state +} + +// providerWithSensitiveValueForStateShow returns a provider schema response +// with the "id" attribute flagged as sensitive. +func providerWithSensitiveValueForStateShow() *providers.GetProviderSchemaResponse { + return &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true, Sensitive: true}, + "foo": {Type: cty.String, Optional: true}, + "bar": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } +} + +const testStateShowOutput = ` +# test_instance.foo: +resource "test_instance" "foo" { + bar = "value" + foo = "value" + id = "bar" +} +` diff --git a/pkg/command/state_test.go b/pkg/command/state_test.go new file mode 100644 index 00000000000..a2d0042d7dc --- /dev/null +++ b/pkg/command/state_test.go @@ -0,0 +1,46 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "path/filepath" + "regexp" + "sort" + "testing" + + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/states/statemgr" +) + +// testStateBackups returns the list of backups in order of creation +// (oldest first) in the given directory. +func testStateBackups(t *testing.T, dir string) []string { + // Find all the backups + list, err := filepath.Glob(filepath.Join(dir, "*"+DefaultBackupExtension)) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Sort them which will put them naturally in the right order + sort.Strings(list) + + return list +} + +func TestStateDefaultBackupExtension(t *testing.T) { + testCwd(t) + + s, err := (&StateMeta{}).State(encryption.Disabled()) + if err != nil { + t.Fatal(err) + } + + backupPath := s.(*statemgr.Filesystem).BackupPath() + match := regexp.MustCompile(`terraform\.tfstate\.\d+\.backup$`).MatchString + if !match(backupPath) { + t.Fatal("Bad backup path:", backupPath) + } +} diff --git a/pkg/command/taint.go b/pkg/command/taint.go new file mode 100644 index 00000000000..bf425242cbe --- /dev/null +++ b/pkg/command/taint.go @@ -0,0 +1,271 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/command/clistate" + "github.com/kubegems/opentofu/pkg/command/views" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" +) + +// TaintCommand is a cli.Command implementation that manually taints +// a resource, marking it for recreation. +type TaintCommand struct { + Meta +} + +func (c *TaintCommand) Run(args []string) int { + args = c.Meta.process(args) + var allowMissing bool + cmdFlags := c.Meta.ignoreRemoteVersionFlagSet("taint") + cmdFlags.BoolVar(&allowMissing, "allow-missing", false, "allow missing") + cmdFlags.StringVar(&c.Meta.backupPath, "backup", "", "path") + cmdFlags.BoolVar(&c.Meta.stateLock, "lock", true, "lock state") + cmdFlags.DurationVar(&c.Meta.stateLockTimeout, "lock-timeout", 0, "lock timeout") + cmdFlags.StringVar(&c.Meta.statePath, "state", "", "path") + cmdFlags.StringVar(&c.Meta.stateOutPath, "state-out", "", "path") + cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } + if err := cmdFlags.Parse(args); err != nil { + c.Ui.Error(fmt.Sprintf("Error parsing command-line flags: %s\n", err.Error())) + return 1 + } + + var diags tfdiags.Diagnostics + + // Require the one argument for the resource to taint + args = cmdFlags.Args() + if len(args) != 1 { + c.Ui.Error("The taint command expects exactly one argument.") + cmdFlags.Usage() + return 1 + } + + addr, addrDiags := addrs.ParseAbsResourceInstanceStr(args[0]) + diags = diags.Append(addrDiags) + if addrDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + if addr.Resource.Resource.Mode != addrs.ManagedResourceMode { + c.Ui.Error(fmt.Sprintf("Resource instance %s cannot be tainted", addr)) + return 1 + } + + if diags := c.Meta.checkRequiredVersion(); diags != nil { + c.showDiagnostics(diags) + return 1 + } + + // Load the encryption configuration + enc, encDiags := c.Encryption() + if encDiags.HasErrors() { + c.showDiagnostics(encDiags) + return 1 + } + + // Load the backend + b, backendDiags := c.Backend(nil, enc.State()) + diags = diags.Append(backendDiags) + if backendDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // Determine the workspace name + workspace, err := c.Workspace() + if err != nil { + c.Ui.Error(fmt.Sprintf("Error selecting workspace: %s", err)) + return 1 + } + + // Check remote OpenTofu version is compatible + remoteVersionDiags := c.remoteVersionCheck(b, workspace) + diags = diags.Append(remoteVersionDiags) + c.showDiagnostics(diags) + if diags.HasErrors() { + return 1 + } + + // Get the state + stateMgr, err := b.StateMgr(workspace) + if err != nil { + c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) + return 1 + } + + if c.stateLock { + stateLocker := clistate.NewLocker(c.stateLockTimeout, views.NewStateLocker(arguments.ViewHuman, c.View)) + if diags := stateLocker.Lock(stateMgr, "taint"); diags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + defer func() { + if diags := stateLocker.Unlock(); diags.HasErrors() { + c.showDiagnostics(diags) + } + }() + } + + if err := stateMgr.RefreshState(); err != nil { + c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) + return 1 + } + + // Get the actual state structure + state := stateMgr.State() + if state.Empty() { + if allowMissing { + return c.allowMissingExit(addr) + } + + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "No such resource instance", + "The state currently contains no resource instances whatsoever. This may occur if the configuration has never been applied or if it has recently been destroyed.", + )) + c.showDiagnostics(diags) + return 1 + } + + // Get schemas, if possible, before writing state + var schemas *tofu.Schemas + if isCloudMode(b) { + var schemaDiags tfdiags.Diagnostics + schemas, schemaDiags = c.MaybeGetSchemas(state, nil) + diags = diags.Append(schemaDiags) + } + + ss := state.SyncWrapper() + + // Get the resource and instance we're going to taint + rs := ss.Resource(addr.ContainingResource()) + is := ss.ResourceInstance(addr) + if is == nil { + if allowMissing { + return c.allowMissingExit(addr) + } + + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "No such resource instance", + fmt.Sprintf("There is no resource instance in the state with the address %s. If the resource configuration has just been added, you must run \"tofu apply\" once to create the corresponding instance(s) before they can be tainted.", addr), + )) + c.showDiagnostics(diags) + return 1 + } + + obj := is.Current + if obj == nil { + if len(is.Deposed) != 0 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "No such resource instance", + fmt.Sprintf("Resource instance %s is currently part-way through a create_before_destroy replacement action. Run \"tofu apply\" to complete its replacement before tainting it.", addr), + )) + } else { + // Don't know why we're here, but we'll produce a generic error message anyway. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "No such resource instance", + fmt.Sprintf("Resource instance %s does not currently have a remote object associated with it, so it cannot be tainted.", addr), + )) + } + c.showDiagnostics(diags) + return 1 + } + + obj.Status = states.ObjectTainted + ss.SetResourceInstanceCurrent(addr, obj, rs.ProviderConfig) + + if err := stateMgr.WriteState(state); err != nil { + c.Ui.Error(fmt.Sprintf("Error writing state file: %s", err)) + return 1 + } + if err := stateMgr.PersistState(schemas); err != nil { + c.Ui.Error(fmt.Sprintf("Error writing state file: %s", err)) + return 1 + } + + c.showDiagnostics(diags) + c.Ui.Output(fmt.Sprintf("Resource instance %s has been marked as tainted.", addr)) + return 0 +} + +func (c *TaintCommand) Help() string { + helpText := ` +Usage: tofu [global options] taint [options]
+ + OpenTofu uses the term "tainted" to describe a resource instance + which may not be fully functional, either because its creation + partially failed or because you've manually marked it as such using + this command. + + This will not modify your infrastructure directly, but subsequent + OpenTofu plans will include actions to destroy the remote object + and create a new object to replace it. + + You can remove the "taint" state from a resource instance using + the "tofu untaint" command. + + The address is in the usual resource address syntax, such as: + aws_instance.foo + aws_instance.bar[1] + module.foo.module.bar.aws_instance.baz + + Use your shell's quoting or escaping syntax to ensure that the + address will reach OpenTofu correctly, without any special + interpretation. + +Options: + + -allow-missing If specified, the command will succeed (exit code 0) + even if the resource is missing. + + -lock=false Don't hold a state lock during the operation. This is + dangerous if others might concurrently run commands + against the same workspace. + + -lock-timeout=0s Duration to retry a state lock. + + -ignore-remote-version A rare option used for the remote backend only. See + the remote backend documentation for more information. + + -var 'foo=bar' Set a value for one of the input variables in the root + module of the configuration. Use this option more than + once to set more than one variable. + + -var-file=filename Load variable values from the given file, in addition + to the default files terraform.tfvars and *.auto.tfvars. + Use this option more than once to include more than one + variables file. + + -state, state-out, and -backup are legacy options supported for the local + backend only. For more information, see the local backend's documentation. + +` + return strings.TrimSpace(helpText) +} + +func (c *TaintCommand) Synopsis() string { + return "Mark a resource instance as not fully functional" +} + +func (c *TaintCommand) allowMissingExit(name addrs.AbsResourceInstance) int { + c.showDiagnostics(tfdiags.Sourceless( + tfdiags.Warning, + "No such resource instance", + fmt.Sprintf("Resource instance %s was not found, but this is not an error because -allow-missing was set.", name), + )) + return 0 +} diff --git a/pkg/command/taint_test.go b/pkg/command/taint_test.go new file mode 100644 index 00000000000..9126e70967f --- /dev/null +++ b/pkg/command/taint_test.go @@ -0,0 +1,569 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "os" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/mitchellh/cli" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/states" +) + +func TestTaint(t *testing.T) { + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, state) + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &TaintCommand{ + Meta: Meta{ + Ui: ui, + View: view, + }, + } + + args := []string{ + "-state", statePath, + "test_instance.foo", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + testStateOutput(t, statePath, testTaintStr) +} + +func TestTaint_lockedState(t *testing.T) { + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, state) + + unlock, err := testLockState(t, testDataDir, statePath) + if err != nil { + t.Fatal(err) + } + defer unlock() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &TaintCommand{ + Meta: Meta{ + Ui: ui, + View: view, + }, + } + + args := []string{ + "-state", statePath, + "test_instance.foo", + } + if code := c.Run(args); code == 0 { + t.Fatal("expected error") + } + + output := ui.ErrorWriter.String() + if !strings.Contains(output, "lock") { + t.Fatal("command output does not look like a lock error:", output) + } +} + +func TestTaint_backup(t *testing.T) { + // Get a temp cwd + testCwd(t) + + // Write the temp state + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + testStateFileDefault(t, state) + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &TaintCommand{ + Meta: Meta{ + Ui: ui, + View: view, + }, + } + + args := []string{ + "test_instance.foo", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + testStateOutput(t, DefaultStateFilename+".backup", testTaintDefaultStr) + testStateOutput(t, DefaultStateFilename, testTaintStr) +} + +func TestTaint_backupDisable(t *testing.T) { + // Get a temp cwd + testCwd(t) + + // Write the temp state + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + testStateFileDefault(t, state) + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &TaintCommand{ + Meta: Meta{ + Ui: ui, + View: view, + }, + } + + args := []string{ + "-backup", "-", + "test_instance.foo", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + if _, err := os.Stat(DefaultStateFilename + ".backup"); err == nil { + t.Fatal("backup path should not exist") + } + + testStateOutput(t, DefaultStateFilename, testTaintStr) +} + +func TestTaint_badState(t *testing.T) { + ui := new(cli.MockUi) + view, _ := testView(t) + c := &TaintCommand{ + Meta: Meta{ + Ui: ui, + View: view, + }, + } + + args := []string{ + "-state", "i-should-not-exist-ever", + "foo", + } + if code := c.Run(args); code != 1 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } +} + +func TestTaint_defaultState(t *testing.T) { + // Get a temp cwd + testCwd(t) + + // Write the temp state + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + testStateFileDefault(t, state) + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &TaintCommand{ + Meta: Meta{ + Ui: ui, + View: view, + }, + } + + args := []string{ + "test_instance.foo", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + testStateOutput(t, DefaultStateFilename, testTaintStr) +} + +func TestTaint_defaultWorkspaceState(t *testing.T) { + // Get a temp cwd + testCwd(t) + + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + testWorkspace := "development" + path := testStateFileWorkspaceDefault(t, testWorkspace, state) + + ui := new(cli.MockUi) + view, _ := testView(t) + meta := Meta{Ui: ui, View: view} + meta.SetWorkspace(testWorkspace) + c := &TaintCommand{ + Meta: meta, + } + + args := []string{ + "test_instance.foo", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + testStateOutput(t, path, testTaintStr) +} + +func TestTaint_missing(t *testing.T) { + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, state) + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &TaintCommand{ + Meta: Meta{ + Ui: ui, + View: view, + }, + } + + args := []string{ + "-state", statePath, + "test_instance.bar", + } + if code := c.Run(args); code == 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.OutputWriter.String()) + } +} + +func TestTaint_missingAllow(t *testing.T) { + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, state) + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &TaintCommand{ + Meta: Meta{ + Ui: ui, + View: view, + }, + } + + args := []string{ + "-allow-missing", + "-state", statePath, + "test_instance.bar", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + // Check for the warning + actual := strings.TrimSpace(ui.ErrorWriter.String()) + expected := strings.TrimSpace(` +Warning: No such resource instance + +Resource instance test_instance.bar was not found, but this is not an error +because -allow-missing was set. + +`) + if diff := cmp.Diff(expected, actual); diff != "" { + t.Fatalf("wrong output\n%s", diff) + } +} + +func TestTaint_stateOut(t *testing.T) { + // Get a temp cwd + testCwd(t) + + // Write the temp state + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + testStateFileDefault(t, state) + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &TaintCommand{ + Meta: Meta{ + Ui: ui, + View: view, + }, + } + + args := []string{ + "-state-out", "foo", + "test_instance.foo", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + testStateOutput(t, DefaultStateFilename, testTaintDefaultStr) + testStateOutput(t, "foo", testTaintStr) +} + +func TestTaint_module(t *testing.T) { + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "blah", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance.Child("child", addrs.NoKey)), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"blah"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, state) + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &TaintCommand{ + Meta: Meta{ + Ui: ui, + View: view, + }, + } + + args := []string{ + "-state", statePath, + "module.child.test_instance.blah", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + testStateOutput(t, statePath, testTaintModuleStr) +} + +func TestTaint_checkRequiredVersion(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("command-check-required-version"), td) + defer testChdir(t, td)() + + // Write the temp state + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + path := testStateFile(t, state) + + ui := cli.NewMockUi() + view, _ := testView(t) + c := &TaintCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + View: view, + }, + } + + args := []string{"test_instance.foo"} + if code := c.Run(args); code != 1 { + t.Fatalf("got exit status %d; want 1\nstderr:\n%s\n\nstdout:\n%s", code, ui.ErrorWriter.String(), ui.OutputWriter.String()) + } + + // State is unchanged + testStateOutput(t, path, testTaintDefaultStr) + + // Required version diags are correct + errStr := ui.ErrorWriter.String() + if !strings.Contains(errStr, `required_version = "~> 0.9.0"`) { + t.Fatalf("output should point to unmet version constraint, but is:\n\n%s", errStr) + } + if strings.Contains(errStr, `required_version = ">= 0.13.0"`) { + t.Fatalf("output should not point to met version constraint, but is:\n\n%s", errStr) + } +} + +const testTaintStr = ` +test_instance.foo: (tainted) + ID = bar + provider = provider["registry.opentofu.org/hashicorp/test"] +` + +const testTaintDefaultStr = ` +test_instance.foo: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/test"] +` + +const testTaintModuleStr = ` +test_instance.foo: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/test"] + +module.child: + test_instance.blah: (tainted) + ID = blah + provider = provider["registry.opentofu.org/hashicorp/test"] +` diff --git a/pkg/command/telemetry.go b/pkg/command/telemetry.go new file mode 100644 index 00000000000..810d527f566 --- /dev/null +++ b/pkg/command/telemetry.go @@ -0,0 +1,17 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/trace" +) + +var tracer trace.Tracer + +func init() { + tracer = otel.Tracer("github.com/kubegems/opentofu/pkg/command") +} diff --git a/pkg/command/test.go b/pkg/command/test.go new file mode 100644 index 00000000000..95a18523a4f --- /dev/null +++ b/pkg/command/test.go @@ -0,0 +1,1256 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "context" + "fmt" + "log" + "path" + "sort" + "strings" + "time" + + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" + "golang.org/x/exp/slices" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/command/views" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/logging" + "github.com/kubegems/opentofu/pkg/moduletest" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" +) + +const ( + MainStateIdentifier = "" +) + +type TestCommand struct { + Meta +} + +func (c *TestCommand) Help() string { + helpText := ` +Usage: tofu [global options] test [options] + + Executes automated integration tests against the current OpenTofu + configuration. + + OpenTofu will search for .tftest.hcl files within the current configuration + and testing directories. OpenTofu will then execute the testing run blocks + within any testing files in order, and verify conditional checks and + assertions against the created infrastructure. + + This command creates real infrastructure and will attempt to clean up the + testing infrastructure on completion. Monitor the output carefully to ensure + this cleanup process is successful. + +Options: + + -filter=testfile If specified, OpenTofu will only execute the test files + specified by this flag. You can use this option multiple + times to execute more than one test file. + + -json If specified, machine readable output will be printed in + JSON format + + -no-color If specified, output won't contain any color. + + -test-directory=path Set the OpenTofu test directory, defaults to "tests". When set, the + test command will search for test files in the current directory and + in the one specified by the flag. + + -var 'foo=bar' Set a value for one of the input variables in the root + module of the configuration. Use this option more than + once to set more than one variable. + + -var-file=filename Load variable values from the given file, in addition + to the default files terraform.tfvars and *.auto.tfvars. + Use this option more than once to include more than one + variables file. + + -verbose Print the plan or state for each test run block as it + executes. + + -var 'foo=bar' Set a value for one of the input variables in the root + module of the configuration. Use this option more than + once to set more than one variable. + + -var-file=filename Load variable values from the given file, in addition + to the default files terraform.tfvars and *.auto.tfvars. + Use this option more than once to include more than one + variables file. +` + return strings.TrimSpace(helpText) +} + +func (c *TestCommand) Synopsis() string { + return "Execute integration tests for OpenTofu modules" +} + +func (c *TestCommand) Run(rawArgs []string) int { + var diags tfdiags.Diagnostics + + common, rawArgs := arguments.ParseView(rawArgs) + c.View.Configure(common) + + args, diags := arguments.ParseTest(rawArgs) + if diags.HasErrors() { + c.View.Diagnostics(diags) + c.View.HelpPrompt("test") + return 1 + } + + view := views.NewTest(args.ViewType, c.View) + + // Users can also specify variables via the command line, so we'll parse + // all that here. + var items []rawFlag + for _, variable := range args.Vars.All() { + items = append(items, rawFlag{ + Name: variable.Name, + Value: variable.Value, + }) + } + c.variableArgs = rawFlags{items: &items} + + variables, variableDiags := c.collectVariableValues() + diags = diags.Append(variableDiags) + if variableDiags.HasErrors() { + view.Diagnostics(nil, nil, diags) + return 1 + } + + config, configDiags := c.loadConfigWithTests(".", args.TestDirectory) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + view.Diagnostics(nil, nil, diags) + return 1 + } + + runCount := 0 + fileCount := 0 + + var fileDiags tfdiags.Diagnostics + suite := moduletest.Suite{ + Files: func() map[string]*moduletest.File { + files := make(map[string]*moduletest.File) + + if len(args.Filter) > 0 { + for _, name := range args.Filter { + file, ok := config.Module.Tests[name] + if !ok { + // If the filter is invalid, we'll simply skip this + // entry and print a warning. But we could still execute + // any other tests within the filter. + fileDiags.Append(tfdiags.Sourceless( + tfdiags.Warning, + "Unknown test file", + fmt.Sprintf("The specified test file, %s, could not be found.", name))) + continue + } + + fileCount++ + + var runs []*moduletest.Run + for ix, run := range file.Runs { + runs = append(runs, &moduletest.Run{ + Config: run, + Index: ix, + Name: run.Name, + }) + } + + runCount += len(runs) + files[name] = &moduletest.File{ + Config: file, + Name: name, + Runs: runs, + } + } + + return files + } + + // Otherwise, we'll just do all the tests in the directory! + for name, file := range config.Module.Tests { + fileCount++ + + var runs []*moduletest.Run + for ix, run := range file.Runs { + runs = append(runs, &moduletest.Run{ + Config: run, + Index: ix, + Name: run.Name, + }) + } + + runCount += len(runs) + files[name] = &moduletest.File{ + Config: file, + Name: name, + Runs: runs, + } + } + return files + }(), + } + + log.Printf("[DEBUG] TestCommand: found %d files with %d run blocks", fileCount, runCount) + + diags = diags.Append(fileDiags) + if fileDiags.HasErrors() { + view.Diagnostics(nil, nil, diags) + return 1 + } + + opts, err := c.contextOpts() + if err != nil { + diags = diags.Append(err) + view.Diagnostics(nil, nil, diags) + return 1 + } + + // Don't use encryption during testing + opts.Encryption = encryption.Disabled() + + // Print out all the diagnostics we have from the setup. These will just be + // warnings, and we want them out of the way before we start the actual + // testing. + view.Diagnostics(nil, nil, diags) + + // We have two levels of interrupt here. A 'stop' and a 'cancel'. A 'stop' + // is a soft request to stop. We'll finish the current test, do the tidy up, + // but then skip all remaining tests and run blocks. A 'cancel' is a hard + // request to stop now. We'll cancel the current operation immediately + // even if it's a delete operation, and we won't clean up any infrastructure + // if we're halfway through a test. We'll print details explaining what was + // stopped so the user can do their best to recover from it. + + runningCtx, done := context.WithCancel(context.Background()) + stopCtx, stop := context.WithCancel(runningCtx) + cancelCtx, cancel := context.WithCancel(context.Background()) + + runner := &TestSuiteRunner{ + command: c, + + Suite: &suite, + Config: config, + View: view, + + GlobalVariables: variables, + Opts: opts, + + CancelledCtx: cancelCtx, + StoppedCtx: stopCtx, + + // Just to be explicit, we'll set the following fields even though they + // default to these values. + Cancelled: false, + Stopped: false, + + Verbose: args.Verbose, + } + + view.Abstract(&suite) + + panicHandler := logging.PanicHandlerWithTraceFn() + go func() { + defer panicHandler() + defer done() + defer stop() + defer cancel() + + runner.Start(variables) + }() + + // Wait for the operation to complete, or for an interrupt to occur. + select { + case <-c.ShutdownCh: + // Nice request to be cancelled. + + view.Interrupted() + runner.Stopped = true + stop() + + select { + case <-c.ShutdownCh: + // The user pressed it again, now we have to get it to stop as + // fast as possible. + + view.FatalInterrupt() + runner.Cancelled = true + cancel() + + // We'll wait 5 seconds for this operation to finish now, regardless + // of whether it finishes successfully or not. + select { + case <-runningCtx.Done(): + case <-time.After(5 * time.Second): + } + + case <-runningCtx.Done(): + // The application finished nicely after the request was stopped. + } + case <-runningCtx.Done(): + // tests finished normally with no interrupts. + } + + if runner.Cancelled { + // Don't print out the conclusion if the test was cancelled. + return 1 + } + + view.Conclusion(&suite) + + if suite.Status != moduletest.Pass { + return 1 + } + return 0 +} + +// test runner + +type TestSuiteRunner struct { + command *TestCommand + + Suite *moduletest.Suite + Config *configs.Config + + GlobalVariables map[string]backend.UnparsedVariableValue + Opts *tofu.ContextOpts + + View views.Test + + // Stopped and Cancelled track whether the user requested the testing + // process to be interrupted. Stopped is a nice graceful exit, we'll still + // tidy up any state that was created and mark the tests with relevant + // `skipped` status updates. Cancelled is a hard stop right now exit, we + // won't attempt to clean up any state left hanging, and tests will just + // be left showing `pending` as the status. We will still print out the + // destroy summary diagnostics that tell the user what state has been left + // behind and needs manual clean up. + Stopped bool + Cancelled bool + + // StoppedCtx and CancelledCtx allow in progress OpenTofu operations to + // respond to external calls from the test command. + StoppedCtx context.Context + CancelledCtx context.Context + + // Verbose tells the runner to print out plan files during each test run. + Verbose bool +} + +func (runner *TestSuiteRunner) Start(globals map[string]backend.UnparsedVariableValue) { + var files []string + for name := range runner.Suite.Files { + files = append(files, name) + } + sort.Strings(files) // execute the files in alphabetical order + + runner.Suite.Status = moduletest.Pass + for _, name := range files { + if runner.Cancelled { + return + } + + file := runner.Suite.Files[name] + + fileRunner := &TestFileRunner{ + Suite: runner, + States: map[string]*TestFileState{ + MainStateIdentifier: { + Run: nil, + State: states.NewState(), + }, + }, + } + + fileRunner.ExecuteTestFile(file) + fileRunner.Cleanup(file) + runner.Suite.Status = runner.Suite.Status.Merge(file.Status) + } +} + +type TestFileRunner struct { + Suite *TestSuiteRunner + + States map[string]*TestFileState +} + +type TestFileState struct { + Run *moduletest.Run + State *states.State +} + +func (runner *TestFileRunner) ExecuteTestFile(file *moduletest.File) { + log.Printf("[TRACE] TestFileRunner: executing test file %s", file.Name) + + file.Status = file.Status.Merge(moduletest.Pass) + for _, run := range file.Runs { + if runner.Suite.Cancelled { + // This means a hard stop has been requested, in this case we don't + // even stop to mark future tests as having been skipped. They'll + // just show up as pending in the printed summary. + return + } + + if runner.Suite.Stopped { + // Then the test was requested to be stopped, so we just mark each + // following test as skipped and move on. + run.Status = moduletest.Skip + continue + } + + if file.Status == moduletest.Error { + // If the overall test file has errored, we don't keep trying to + // execute tests. Instead, we mark all remaining run blocks as + // skipped. + run.Status = moduletest.Skip + continue + } + + key := MainStateIdentifier + config := runner.Suite.Config + if run.Config.ConfigUnderTest != nil { + config = run.Config.ConfigUnderTest + // Then we need to load an alternate state and not the main one. + + key = run.Config.Module.Source.String() + if key == MainStateIdentifier { + // This is bad. It means somehow the module we're loading has + // the same key as main state and we're about to corrupt things. + + run.Diagnostics = run.Diagnostics.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid module source", + Detail: fmt.Sprintf("The source for the selected module evaluated to %s which should not be possible. This is a bug in OpenTofu - please report it!", key), + Subject: run.Config.Module.DeclRange.Ptr(), + }) + + run.Status = moduletest.Error + file.Status = moduletest.Error + continue // Abort! + } + + if _, exists := runner.States[key]; !exists { + runner.States[key] = &TestFileState{ + Run: nil, + State: states.NewState(), + } + } + } + + state, updatedState := runner.ExecuteTestRun(run, file, runner.States[key].State, config) + if updatedState { + // Only update the most recent run and state if the state was + // actually updated by this change. We want to use the run that + // most recently updated the tracked state as the cleanup + // configuration. + runner.States[key].State = state + runner.States[key].Run = run + } + + file.Status = file.Status.Merge(run.Status) + } + + runner.Suite.View.File(file) + for _, run := range file.Runs { + runner.Suite.View.Run(run, file) + } +} + +func (runner *TestFileRunner) ExecuteTestRun(run *moduletest.Run, file *moduletest.File, state *states.State, config *configs.Config) (*states.State, bool) { + log.Printf("[TRACE] TestFileRunner: executing run block %s/%s", file.Name, run.Name) + + if runner.Suite.Cancelled { + // Don't do anything, just give up and return immediately. + // The surrounding functions should stop this even being called, but in + // case of race conditions or something we can still verify this. + return state, false + } + + if runner.Suite.Stopped { + // Basically the same as above, except we'll be a bit nicer. + run.Status = moduletest.Skip + return state, false + } + + run.Diagnostics = run.Diagnostics.Append(file.Config.Validate()) + if run.Diagnostics.HasErrors() { + run.Status = moduletest.Error + return state, false + } + + run.Diagnostics = run.Diagnostics.Append(run.Config.Validate()) + if run.Diagnostics.HasErrors() { + run.Status = moduletest.Error + return state, false + } + + resetConfig, configDiags := config.TransformForTest(run.Config, file.Config) + defer resetConfig() + + run.Diagnostics = run.Diagnostics.Append(configDiags) + if configDiags.HasErrors() { + run.Status = moduletest.Error + return state, false + } + + validateDiags := runner.validate(config, run, file) + run.Diagnostics = run.Diagnostics.Append(validateDiags) + if validateDiags.HasErrors() { + run.Status = moduletest.Error + return state, false + } + + planCtx, plan, planDiags := runner.plan(config, state, run, file) + if run.Config.Command == configs.PlanTestCommand { + // Then we want to assess our conditions and diagnostics differently. + planDiags = run.ValidateExpectedFailures(planDiags) + run.Diagnostics = run.Diagnostics.Append(planDiags) + if planDiags.HasErrors() { + run.Status = moduletest.Error + return state, false + } + + variables, resetVariables, variableDiags := runner.prepareInputVariablesForAssertions(config, run, file, runner.Suite.GlobalVariables) + defer resetVariables() + + run.Diagnostics = run.Diagnostics.Append(variableDiags) + if variableDiags.HasErrors() { + run.Status = moduletest.Error + return state, false + } + + if runner.Suite.Verbose { + schemas, diags := planCtx.Schemas(config, plan.PlannedState) + + // If we're going to fail to render the plan, let's not fail the overall + // test. It can still have succeeded. So we'll add the diagnostics, but + // still report the test status as a success. + if diags.HasErrors() { + // This is very unlikely. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + "Failed to print verbose output", + fmt.Sprintf("OpenTofu failed to print the verbose output for %s, other diagnostics will contain more details as to why.", path.Join(file.Name, run.Name)))) + } else { + run.Verbose = &moduletest.Verbose{ + Plan: plan, + State: plan.PlannedState, + Config: config, + Providers: schemas.Providers, + Provisioners: schemas.Provisioners, + } + } + + run.Diagnostics = run.Diagnostics.Append(diags) + } + + planCtx.TestContext(config, plan.PlannedState, plan, variables).EvaluateAgainstPlan(run) + return state, false + } + + // Otherwise any error during the planning prevents our apply from + // continuing which is an error. + run.Diagnostics = run.Diagnostics.Append(planDiags) + if planDiags.HasErrors() { + run.Status = moduletest.Error + return state, false + } + + // Since we're carrying on an executing the apply operation as well, we're + // just going to do some post processing of the diagnostics. We remove the + // warnings generated from check blocks, as the apply operation will either + // reproduce them or fix them and we don't want fixed diagnostics to be + // reported and we don't want duplicates either. + var filteredDiags tfdiags.Diagnostics + for _, diag := range run.Diagnostics { + if rule, ok := addrs.DiagnosticOriginatesFromCheckRule(diag); ok && rule.Container.CheckableKind() == addrs.CheckableCheck { + continue + } + filteredDiags = filteredDiags.Append(diag) + } + run.Diagnostics = filteredDiags + + applyCtx, updated, applyDiags := runner.apply(plan, state, config, run, file) + + // Remove expected diagnostics, and add diagnostics in case anything that should have failed didn't. + applyDiags = run.ValidateExpectedFailures(applyDiags) + + run.Diagnostics = run.Diagnostics.Append(applyDiags) + if applyDiags.HasErrors() { + run.Status = moduletest.Error + // Even though the apply operation failed, the graph may have done + // partial updates and the returned state should reflect this. + return updated, true + } + + variables, resetVariables, variableDiags := runner.prepareInputVariablesForAssertions(config, run, file, runner.Suite.GlobalVariables) + defer resetVariables() + + run.Diagnostics = run.Diagnostics.Append(variableDiags) + if variableDiags.HasErrors() { + run.Status = moduletest.Error + return updated, true + } + + if runner.Suite.Verbose { + schemas, diags := planCtx.Schemas(config, plan.PlannedState) + + // If we're going to fail to render the plan, let's not fail the overall + // test. It can still have succeeded. So we'll add the diagnostics, but + // still report the test status as a success. + if diags.HasErrors() { + // This is very unlikely. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + "Failed to print verbose output", + fmt.Sprintf("OpenTofu failed to print the verbose output for %s, other diagnostics will contain more details as to why.", path.Join(file.Name, run.Name)))) + } else { + run.Verbose = &moduletest.Verbose{ + Plan: plan, + State: updated, + Config: config, + Providers: schemas.Providers, + Provisioners: schemas.Provisioners, + } + } + + run.Diagnostics = run.Diagnostics.Append(diags) + } + + applyCtx.TestContext(config, updated, plan, variables).EvaluateAgainstState(run) + return updated, true +} + +func (runner *TestFileRunner) validate(config *configs.Config, run *moduletest.Run, file *moduletest.File) tfdiags.Diagnostics { + log.Printf("[TRACE] TestFileRunner: called validate for %s/%s", file.Name, run.Name) + + var diags tfdiags.Diagnostics + + tfCtx, ctxDiags := tofu.NewContext(runner.Suite.Opts) + diags = diags.Append(ctxDiags) + if ctxDiags.HasErrors() { + return diags + } + + runningCtx, done := context.WithCancel(context.Background()) + + var validateDiags tfdiags.Diagnostics + panicHandler := logging.PanicHandlerWithTraceFn() + go func() { + defer panicHandler() + defer done() + + log.Printf("[DEBUG] TestFileRunner: starting validate for %s/%s", file.Name, run.Name) + validateDiags = tfCtx.Validate(config) + log.Printf("[DEBUG] TestFileRunner: completed validate for %s/%s", file.Name, run.Name) + }() + waitDiags, cancelled := runner.wait(tfCtx, runningCtx, run, file, nil) + + if cancelled { + diags = diags.Append(tfdiags.Sourceless(tfdiags.Error, "Test interrupted", "The test operation could not be completed due to an interrupt signal. Please read the remaining diagnostics carefully for any sign of failed state cleanup or dangling resources.")) + } + + diags = diags.Append(waitDiags) + diags = diags.Append(validateDiags) + + return diags +} + +func (runner *TestFileRunner) destroy(config *configs.Config, state *states.State, run *moduletest.Run, file *moduletest.File) (*states.State, tfdiags.Diagnostics) { + + log.Printf("[TRACE] TestFileRunner: called destroy for %s/%s", file.Name, run.Name) + + if state.Empty() { + // Nothing to do! + return state, nil + } + + var diags tfdiags.Diagnostics + + evalCtx, ctxDiags := getEvalContextForTest(runner.States, config, runner.Suite.GlobalVariables) + diags = diags.Append(ctxDiags) + + variables, variableDiags := buildInputVariablesForTest(run, file, config, runner.Suite.GlobalVariables, evalCtx) + diags = diags.Append(variableDiags) + + if diags.HasErrors() { + return state, diags + } + + planOpts := &tofu.PlanOpts{ + Mode: plans.DestroyMode, + SetVariables: variables, + } + + tfCtx, ctxDiags := tofu.NewContext(runner.Suite.Opts) + diags = diags.Append(ctxDiags) + if ctxDiags.HasErrors() { + return state, diags + } + + runningCtx, done := context.WithCancel(context.Background()) + + var plan *plans.Plan + var planDiags tfdiags.Diagnostics + panicHandler := logging.PanicHandlerWithTraceFn() + go func() { + defer panicHandler() + defer done() + + log.Printf("[DEBUG] TestFileRunner: starting destroy plan for %s/%s", file.Name, run.Name) + plan, planDiags = tfCtx.Plan(config, state, planOpts) + log.Printf("[DEBUG] TestFileRunner: completed destroy plan for %s/%s", file.Name, run.Name) + }() + waitDiags, cancelled := runner.wait(tfCtx, runningCtx, run, file, nil) + + if cancelled { + diags = diags.Append(tfdiags.Sourceless(tfdiags.Error, "Test interrupted", "The test operation could not be completed due to an interrupt signal. Please read the remaining diagnostics carefully for any sign of failed state cleanup or dangling resources.")) + } + + diags = diags.Append(waitDiags) + diags = diags.Append(planDiags) + + if diags.HasErrors() { + return state, diags + } + + _, updated, applyDiags := runner.apply(plan, state, config, run, file) + diags = diags.Append(applyDiags) + return updated, diags +} + +func (runner *TestFileRunner) plan(config *configs.Config, state *states.State, run *moduletest.Run, file *moduletest.File) (*tofu.Context, *plans.Plan, tfdiags.Diagnostics) { + log.Printf("[TRACE] TestFileRunner: called plan for %s/%s", file.Name, run.Name) + + var diags tfdiags.Diagnostics + + targets, targetDiags := run.GetTargets() + diags = diags.Append(targetDiags) + + replaces, replaceDiags := run.GetReplaces() + diags = diags.Append(replaceDiags) + + references, referenceDiags := run.GetReferences() + diags = diags.Append(referenceDiags) + + evalCtx, ctxDiags := getEvalContextForTest(runner.States, config, runner.Suite.GlobalVariables) + diags = diags.Append(ctxDiags) + + variables, variableDiags := buildInputVariablesForTest(run, file, config, runner.Suite.GlobalVariables, evalCtx) + diags = diags.Append(variableDiags) + + if diags.HasErrors() { + return nil, nil, diags + } + + planOpts := &tofu.PlanOpts{ + Mode: func() plans.Mode { + switch run.Config.Options.Mode { + case configs.RefreshOnlyTestMode: + return plans.RefreshOnlyMode + default: + return plans.NormalMode + } + }(), + Targets: targets, + ForceReplace: replaces, + SkipRefresh: !run.Config.Options.Refresh, + SetVariables: variables, + ExternalReferences: references, + } + + tfCtx, ctxDiags := tofu.NewContext(runner.Suite.Opts) + diags = diags.Append(ctxDiags) + if ctxDiags.HasErrors() { + return nil, nil, diags + } + + runningCtx, done := context.WithCancel(context.Background()) + + var plan *plans.Plan + var planDiags tfdiags.Diagnostics + panicHandler := logging.PanicHandlerWithTraceFn() + go func() { + defer panicHandler() + defer done() + + log.Printf("[DEBUG] TestFileRunner: starting plan for %s/%s", file.Name, run.Name) + plan, planDiags = tfCtx.Plan(config, state, planOpts) + log.Printf("[DEBUG] TestFileRunner: completed plan for %s/%s", file.Name, run.Name) + }() + waitDiags, cancelled := runner.wait(tfCtx, runningCtx, run, file, nil) + + if cancelled { + diags = diags.Append(tfdiags.Sourceless(tfdiags.Error, "Test interrupted", "The test operation could not be completed due to an interrupt signal. Please read the remaining diagnostics carefully for any sign of failed state cleanup or dangling resources.")) + } + + diags = diags.Append(waitDiags) + diags = diags.Append(planDiags) + + return tfCtx, plan, diags +} + +func (runner *TestFileRunner) apply(plan *plans.Plan, state *states.State, config *configs.Config, run *moduletest.Run, file *moduletest.File) (*tofu.Context, *states.State, tfdiags.Diagnostics) { + log.Printf("[TRACE] TestFileRunner: called apply for %s/%s", file.Name, run.Name) + + var diags tfdiags.Diagnostics + + // If things get cancelled while we are executing the apply operation below + // we want to print out all the objects that we were creating so the user + // can verify we managed to tidy everything up possibly. + // + // Unfortunately, this creates a race condition as the apply operation can + // edit the plan (by removing changes once they are applied) while at the + // same time our cancellation process will try to read the plan. + // + // We take a quick copy of the changes we care about here, which will then + // be used in place of the plan when we print out the objects to be created + // as part of the cancellation process. + var created []*plans.ResourceInstanceChangeSrc + for _, change := range plan.Changes.Resources { + if change.Action != plans.Create { + continue + } + created = append(created, change) + } + + tfCtx, ctxDiags := tofu.NewContext(runner.Suite.Opts) + diags = diags.Append(ctxDiags) + if ctxDiags.HasErrors() { + return nil, state, diags + } + + runningCtx, done := context.WithCancel(context.Background()) + + var updated *states.State + var applyDiags tfdiags.Diagnostics + + panicHandler := logging.PanicHandlerWithTraceFn() + go func() { + defer panicHandler() + defer done() + log.Printf("[DEBUG] TestFileRunner: starting apply for %s/%s", file.Name, run.Name) + updated, applyDiags = tfCtx.Apply(plan, config) + log.Printf("[DEBUG] TestFileRunner: completed apply for %s/%s", file.Name, run.Name) + }() + waitDiags, cancelled := runner.wait(tfCtx, runningCtx, run, file, created) + + if cancelled { + diags = diags.Append(tfdiags.Sourceless(tfdiags.Error, "Test interrupted", "The test operation could not be completed due to an interrupt signal. Please read the remaining diagnostics carefully for any sign of failed state cleanup or dangling resources.")) + } + + diags = diags.Append(waitDiags) + diags = diags.Append(applyDiags) + + return tfCtx, updated, diags +} + +func (runner *TestFileRunner) wait(ctx *tofu.Context, runningCtx context.Context, run *moduletest.Run, file *moduletest.File, created []*plans.ResourceInstanceChangeSrc) (diags tfdiags.Diagnostics, cancelled bool) { + var identifier string + if file == nil { + identifier = "validate" + } else { + identifier = file.Name + if run != nil { + identifier = fmt.Sprintf("%s/%s", identifier, run.Name) + } + } + log.Printf("[TRACE] TestFileRunner: waiting for execution during %s", identifier) + + // This function handles what happens when the user presses the second + // interrupt. This is a "hard cancel", we are going to stop doing whatever + // it is we're doing. This means even if we're halfway through creating or + // destroying infrastructure we just give up. + handleCancelled := func() { + log.Printf("[DEBUG] TestFileRunner: test execution cancelled during %s", identifier) + + states := make(map[*moduletest.Run]*states.State) + states[nil] = runner.States[MainStateIdentifier].State + for key, module := range runner.States { + if key == MainStateIdentifier { + continue + } + states[module.Run] = module.State + } + runner.Suite.View.FatalInterruptSummary(run, file, states, created) + + cancelled = true + go ctx.Stop() + + // Just wait for things to finish now, the overall test execution will + // exit early if this takes too long. + <-runningCtx.Done() + } + + // This function handles what happens when the user presses the first + // interrupt. This is essentially a "soft cancel", we're not going to do + // anything but just wait for things to finish safely. But, we do listen + // for the crucial second interrupt which will prompt a hard stop / cancel. + handleStopped := func() { + log.Printf("[DEBUG] TestFileRunner: test execution stopped during %s", identifier) + + select { + case <-runner.Suite.CancelledCtx.Done(): + // We've been asked again. This time we stop whatever we're doing + // and abandon all attempts to do anything reasonable. + handleCancelled() + case <-runningCtx.Done(): + // Do nothing, we finished safely and skipping the remaining tests + // will be handled elsewhere. + } + + } + + select { + case <-runner.Suite.StoppedCtx.Done(): + handleStopped() + case <-runner.Suite.CancelledCtx.Done(): + handleCancelled() + case <-runningCtx.Done(): + // The operation exited normally. + } + + return diags, cancelled +} + +func (runner *TestFileRunner) Cleanup(file *moduletest.File) { + log.Printf("[TRACE] TestStateManager: cleaning up state for %s", file.Name) + + if runner.Suite.Cancelled { + // Don't try and clean anything up if the execution has been cancelled. + log.Printf("[DEBUG] TestStateManager: skipping state cleanup for %s due to cancellation", file.Name) + return + } + + var states []*TestFileState + for key, state := range runner.States { + if state.Run == nil { + if state.State.Empty() { + // We can see a run block being empty when the state is empty if + // a module was only used to execute plan commands. So this is + // okay, and means we have nothing to cleanup so we'll just + // skip it. + continue + } + + if key == MainStateIdentifier { + log.Printf("[ERROR] TestFileRunner: found inconsistent run block and state file in %s", file.Name) + } else { + log.Printf("[ERROR] TestFileRunner: found inconsistent run block and state file in %s for module %s", file.Name, key) + } + + // Otherwise something bad has happened, and we have no way to + // recover from it. This shouldn't happen in reality, but we'll + // print a diagnostic instead of panicking later. + + var diags tfdiags.Diagnostics + diags = diags.Append(tfdiags.Sourceless(tfdiags.Error, "Inconsistent state", fmt.Sprintf("Found inconsistent state while cleaning up %s. This is a bug in OpenTofu - please report it", file.Name))) + runner.Suite.View.DestroySummary(diags, nil, file, state.State) + continue + } + + states = append(states, state) + } + + slices.SortFunc(states, func(a, b *TestFileState) int { + // We want to clean up later run blocks first. So, we'll sort this in + // reverse according to index. This means larger indices first. + return b.Run.Index - a.Run.Index + }) + + // Clean up all the states (for main and custom modules) in reverse order. + for _, state := range states { + log.Printf("[DEBUG] TestStateManager: cleaning up state for %s/%s", file.Name, state.Run.Name) + + if runner.Suite.Cancelled { + // In case the cancellation came while a previous state was being + // destroyed. + log.Printf("[DEBUG] TestStateManager: skipping state cleanup for %s/%s due to cancellation", file.Name, state.Run.Name) + return + } + + var diags tfdiags.Diagnostics + var runConfig *configs.Config + + isMainState := state.Run.Config.Module == nil + if isMainState { + runConfig = runner.Suite.Config + } else { + runConfig = state.Run.Config.ConfigUnderTest + } + + reset, configDiags := runConfig.TransformForTest(state.Run.Config, file.Config) + diags = diags.Append(configDiags) + + updated := state.State + if !diags.HasErrors() { + var destroyDiags tfdiags.Diagnostics + updated, destroyDiags = runner.destroy(runConfig, state.State, state.Run, file) + diags = diags.Append(destroyDiags) + } + runner.Suite.View.DestroySummary(diags, state.Run, file, updated) + + if updated.HasManagedResourceInstanceObjects() { + views.SaveErroredTestStateFile(updated, state.Run, file, runner.Suite.View) + } + reset() + } +} + +// helper functions + +// buildInputVariablesForTest creates a tofu.InputValues mapping for +// variable values that are relevant to the config being tested. +// +// Crucially, it differs from prepareInputVariablesForAssertions in that it only +// includes variables that are reference by the config and not everything that +// is defined within the test run block and test file. +func buildInputVariablesForTest(run *moduletest.Run, file *moduletest.File, config *configs.Config, globals map[string]backend.UnparsedVariableValue, evalCtx *hcl.EvalContext) (tofu.InputValues, tfdiags.Diagnostics) { + variables := make(map[string]backend.UnparsedVariableValue) + for name := range config.Module.Variables { + if run != nil { + if expr, exists := run.Config.Variables[name]; exists { + // Local variables take precedence. + variables[name] = testVariableValueExpression{ + expr: expr, + sourceType: tofu.ValueFromConfig, + ctx: evalCtx, + } + continue + } + } + + if file != nil { + if expr, exists := file.Config.Variables[name]; exists { + // If it's not set locally, it maybe set for the entire file. + variables[name] = testVariableValueExpression{ + expr: expr, + sourceType: tofu.ValueFromConfig, + ctx: evalCtx, + } + continue + } + } + + if globals != nil { + // If it's not set locally or at the file level, maybe it was + // defined globally. + if variable, exists := globals[name]; exists { + variables[name] = variable + } + } + + // If it's not set at all that might be okay if the variable is optional + // so we'll just not add anything to the map. + } + + return backend.ParseVariableValues(variables, config.Module.Variables) +} + +// getEvalContextForTest constructs an hcl.EvalContext based on the provided map of +// TestFileState instances, configuration and global variables. +// It extracts the relevant information from the input parameters to create a +// context suitable for HCL evaluation. +func getEvalContextForTest(states map[string]*TestFileState, config *configs.Config, globals map[string]backend.UnparsedVariableValue) (*hcl.EvalContext, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + runCtx := make(map[string]cty.Value) + for _, state := range states { + if state.Run == nil { + continue + } + outputs := make(map[string]cty.Value) + mod := state.State.Modules[""] // Empty string is what is used by the module in the test runner + for outName, out := range mod.OutputValues { + outputs[outName] = out.Value + } + runCtx[state.Run.Name] = cty.ObjectVal(outputs) + } + + // If the variable is referenced in the tfvars file or TF_VAR_ environment variable, then lookup the value + // in global variables; otherwise, assign the default value. + inputValues, diags := parseAndApplyDefaultValues(globals, config.Module.Variables) + diags.Append(diags) + + varCtx := make(map[string]cty.Value) + for name, val := range inputValues { + varCtx[name] = val.Value + } + + ctx := &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "run": cty.ObjectVal(runCtx), + "var": cty.ObjectVal(varCtx), + }, + } + return ctx, diags +} + +type testVariableValueExpression struct { + expr hcl.Expression + sourceType tofu.ValueSourceType + ctx *hcl.EvalContext +} + +func (v testVariableValueExpression) ParseVariableValue(mode configs.VariableParsingMode) (*tofu.InputValue, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + val, hclDiags := v.expr.Value(v.ctx) + diags = diags.Append(hclDiags) + + rng := tfdiags.SourceRangeFromHCL(v.expr.Range()) + + return &tofu.InputValue{ + Value: val, + SourceType: v.sourceType, + SourceRange: rng, + }, diags +} + +// parseAndApplyDefaultValues parses the given unparsed variables into tofu.InputValues +// and applies default values from the configuration variables where applicable. +// This ensures all variables are correctly initialized and returns the resulting tofu.InputValues. +func parseAndApplyDefaultValues(unparsedVariables map[string]backend.UnparsedVariableValue, configVariables map[string]*configs.Variable) (tofu.InputValues, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + inputs := make(tofu.InputValues, len(unparsedVariables)) + for name, variable := range unparsedVariables { + value, valueDiags := variable.ParseVariableValue(configs.VariableParseLiteral) + diags = diags.Append(valueDiags) + inputs[name] = value + } + + // Now, we're going to apply any default values from the configuration. + // We do this after the conversion into tofu.InputValues, as the + // defaults have already been converted into cty.Value objects. + for name, variable := range configVariables { + if _, exists := unparsedVariables[name]; exists { + // Then we don't want to apply the default for this variable as we + // already have a value. + continue + } + + if variable.Default != cty.NilVal { + inputs[name] = &tofu.InputValue{ + Value: variable.Default, + SourceType: tofu.ValueFromConfig, + SourceRange: tfdiags.SourceRangeFromHCL(variable.DeclRange), + } + } + } + + return inputs, diags +} + +// prepareInputVariablesForAssertions creates a tofu.InputValues mapping +// that contains all the variables defined for a given run and file, alongside +// any unset variables that have defaults within the provided config. +// +// Crucially, it differs from buildInputVariablesForTest in that the returned +// input values include all variables available even if they are not defined +// within the config. This allows the assertions to refer to variables defined +// solely within the test file, and not only those within the configuration. +// +// It also allows references to previously run test module's outputs as variable +// expressions. This relies upon the evaluation order and will not sort the test cases +// to run in the dependent order. +// +// In addition, it modifies the provided config so that any variables that are +// available are also defined in the config. It returns a function that resets +// the config which must be called so the config can be reused going forward. +func (runner *TestFileRunner) prepareInputVariablesForAssertions(config *configs.Config, run *moduletest.Run, file *moduletest.File, globals map[string]backend.UnparsedVariableValue) (tofu.InputValues, func(), tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + ctx, ctxDiags := getEvalContextForTest(runner.States, config, globals) + diags = diags.Append(ctxDiags) + + variables := make(map[string]backend.UnparsedVariableValue) + + if run != nil { + for name, expr := range run.Config.Variables { + variables[name] = testVariableValueExpression{ + expr: expr, + sourceType: tofu.ValueFromConfig, + ctx: ctx, + } + } + } + + if file != nil { + for name, expr := range file.Config.Variables { + if _, exists := variables[name]; exists { + // Then this variable was defined at the run level and we want + // that value to take precedence. + continue + } + variables[name] = testVariableValueExpression{ + expr: expr, + sourceType: tofu.ValueFromConfig, + ctx: ctx, + } + } + } + + for name, variable := range globals { + if _, exists := variables[name]; exists { + // Then this value was already defined at either the run level + // or the file level, and we want those values to take + // precedence. + continue + } + variables[name] = variable + } + + // We've gathered all the values we have, let's convert them into + // tofu.InputValues so they can be passed into the OpenTofu graph. + // Also, apply default values from the configuration variables where applicable. + inputs, valDiags := parseAndApplyDefaultValues(variables, config.Module.Variables) + diags.Append(valDiags) + + // Finally, we're going to do a some modifications to the config. + // If we have got variable values from the test file we need to make sure + // they have an equivalent entry in the configuration. We're going to do + // that dynamically here. + + // First, take a backup of the existing configuration so we can easily + // restore it later. + currentVars := make(map[string]*configs.Variable) + for name, variable := range config.Module.Variables { + currentVars[name] = variable + } + + // Next, let's go through our entire inputs and add any that aren't already + // defined into the config. + for name, value := range inputs { + if _, exists := config.Module.Variables[name]; exists { + continue + } + + config.Module.Variables[name] = &configs.Variable{ + Name: name, + Type: value.Value.Type(), + ConstraintType: value.Value.Type(), + DeclRange: value.SourceRange.ToHCL(), + } + } + + // We return our input values, a function that will reset the variables + // within the config so it can be used again, and any diagnostics reporting + // variables that we couldn't parse. + + return inputs, func() { + config.Module.Variables = currentVars + }, diags +} diff --git a/pkg/command/test_test.go b/pkg/command/test_test.go new file mode 100644 index 00000000000..32914e3bb11 --- /dev/null +++ b/pkg/command/test_test.go @@ -0,0 +1,1302 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "path" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/mitchellh/cli" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + testing_command "github.com/kubegems/opentofu/pkg/command/testing" + "github.com/kubegems/opentofu/pkg/command/views" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/terminal" +) + +func TestTest(t *testing.T) { + tcs := map[string]struct { + override string + args []string + expected string + code int + skip bool + }{ + "simple_pass": { + expected: "1 passed, 0 failed.", + code: 0, + }, + "simple_pass_nested": { + expected: "1 passed, 0 failed.", + code: 0, + }, + "simple_pass_nested_alternate": { + args: []string{"-test-directory", "other"}, + expected: "1 passed, 0 failed.", + code: 0, + }, + "simple_pass_very_nested": { + args: []string{"-test-directory", "tests/subdir"}, + expected: "1 passed, 0 failed.", + code: 0, + }, + "simple_pass_very_nested_alternate": { + override: "simple_pass_very_nested", + args: []string{"-test-directory", "./tests/subdir"}, + expected: "1 passed, 0 failed.", + code: 0, + }, + "pass_with_locals": { + expected: "1 passed, 0 failed.", + code: 0, + }, + "pass_with_outputs": { + expected: "1 passed, 0 failed.", + code: 0, + }, + "pass_with_variables": { + expected: "2 passed, 0 failed.", + code: 0, + }, + "plan_then_apply": { + expected: "2 passed, 0 failed.", + code: 0, + }, + "expect_failures_checks": { + expected: "1 passed, 0 failed.", + code: 0, + }, + "expect_failures_inputs": { + expected: "1 passed, 0 failed.", + code: 0, + }, + "expect_failures_outputs": { + expected: "1 passed, 0 failed.", + code: 0, + }, + "expect_runtime_check_fail": { + expected: "0 passed, 1 failed.", + code: 1, + }, + "expect_runtime_check_pass_with_expect": { + expected: "1 passed, 0 failed.", + code: 0, + }, + "expect_runtime_check_pass_command_plan_expected": { + expected: "1 passed, 0 failed.", + code: 0, + }, + "expect_runtime_check_fail_command_plan": { + expected: "0 passed, 1 failed.", + code: 1, + }, + "expect_failures_resources": { + expected: "1 passed, 0 failed.", + code: 0, + }, + "multiple_files": { + expected: "2 passed, 0 failed", + code: 0, + }, + "multiple_files_with_filter": { + override: "multiple_files", + args: []string{"-filter=one.tftest.hcl"}, + expected: "1 passed, 0 failed", + code: 0, + }, + "variables": { + expected: "2 passed, 0 failed", + code: 0, + }, + "variables_overridden": { + override: "variables", + args: []string{"-var=input=foo"}, + expected: "1 passed, 1 failed", + code: 1, + }, + "simple_fail": { + expected: "0 passed, 1 failed.", + code: 1, + }, + "custom_condition_checks": { + expected: "0 passed, 1 failed.", + code: 1, + }, + "custom_condition_inputs": { + expected: "0 passed, 1 failed.", + code: 1, + }, + "custom_condition_outputs": { + expected: "0 passed, 1 failed.", + code: 1, + }, + "custom_condition_resources": { + expected: "0 passed, 1 failed.", + code: 1, + }, + "no_providers_in_main": { + expected: "1 passed, 0 failed", + code: 0, + }, + "default_variables": { + expected: "1 passed, 0 failed.", + code: 0, + }, + "undefined_variables": { + expected: "1 passed, 0 failed.", + code: 0, + }, + "refresh_only": { + expected: "3 passed, 0 failed.", + code: 0, + }, + "null_output": { + expected: "1 passed, 0 failed.", + code: 0, + }, + "pass_with_tests_dir_variables": { + expected: "1 passed, 0 failed.", + code: 0, + }, + "override_with_tests_dir_variables": { + expected: "1 passed, 0 failed.", + code: 0, + }, + } + for name, tc := range tcs { + t.Run(name, func(t *testing.T) { + if tc.skip { + t.Skip() + } + + file := name + if len(tc.override) > 0 { + file = tc.override + } + + td := t.TempDir() + testCopyDir(t, testFixturePath(path.Join("test", file)), td) + defer testChdir(t, td)() + + provider := testing_command.NewProvider(nil) + view, done := testView(t) + + c := &TestCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(provider.Provider), + View: view, + }, + } + + code := c.Run(tc.args) + output := done(t) + + if code != tc.code { + t.Errorf("expected status code %d but got %d", tc.code, code) + } + + if !strings.Contains(output.Stdout(), tc.expected) { + t.Errorf("output didn't contain expected string:\n\n%s", output.All()) + } + + if provider.ResourceCount() > 0 { + t.Errorf("should have deleted all resources on completion but left %v", provider.ResourceString()) + } + }) + } +} +func TestTest_Full_Output(t *testing.T) { + tcs := map[string]struct { + override string + args []string + expected string + code int + skip bool + }{ + "broken_no_valid_hcl": { + expected: "Unsupported block type", + code: 1, + }, + "expect_runtime_check_fail_command_plan": { + expected: "Check block assertion known after apply", + code: 1, + }, + "broken_wrong_block_resource": { + expected: "Blocks of type \"resource\" are not expected here.", + code: 1, + }, + "broken_wrong_block_data": { + expected: "Blocks of type \"data\" are not expected here.", + code: 1, + }, + "broken_wrong_block_output": { + expected: "Blocks of type \"output\" are not expected here.", + code: 1, + }, + "broken_wrong_block_check": { + expected: "Blocks of type \"check\" are not expected here.", + code: 1, + }, + "not_exists_output": { + expected: "Error: Reference to undeclared output value", + args: []string{"-no-color"}, + code: 1, + }, + "refresh_conflicting_config": { + expected: "Incompatible plan options", + code: 1, + }, + "is_sorted": { + expected: "1.tftest.hcl... pass\n run \"a\"... pass\n2.tftest.hcl... pass\n run \"b\"... pass\n3.tftest.hcl... pass\n run \"c\"... pass", + code: 0, + args: []string{"-no-color"}, + }, + } + for name, tc := range tcs { + t.Run(name, func(t *testing.T) { + if tc.skip { + t.Skip() + } + + file := name + if len(tc.override) > 0 { + file = tc.override + } + + td := t.TempDir() + testCopyDir(t, testFixturePath(path.Join("test", file)), td) + defer testChdir(t, td)() + + provider := testing_command.NewProvider(nil) + view, done := testView(t) + + c := &TestCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(provider.Provider), + View: view, + }, + } + + code := c.Run(tc.args) + output := done(t) + + if code != tc.code { + t.Errorf("expected status code %d but got %d", tc.code, code) + } + + if !strings.Contains(output.All(), tc.expected) { + t.Errorf("output didn't contain expected string:\n\n%s \n\n----\n\nexpected: %s", output.All(), tc.expected) + } + + if provider.ResourceCount() > 0 { + t.Errorf("should have deleted all resources on completion but left %v", provider.ResourceString()) + } + }) + } +} + +func TestTest_Interrupt(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath(path.Join("test", "with_interrupt")), td) + defer testChdir(t, td)() + + provider := testing_command.NewProvider(nil) + view, done := testView(t) + + interrupt := make(chan struct{}) + provider.Interrupt = interrupt + + c := &TestCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(provider.Provider), + View: view, + ShutdownCh: interrupt, + }, + } + + c.Run(nil) + output := done(t).All() + + if !strings.Contains(output, "Interrupt received") { + t.Errorf("output didn't produce the right output:\n\n%s", output) + } + + if provider.ResourceCount() > 0 { + // we asked for a nice stop in this one, so it should still have tidied everything up. + t.Errorf("should have deleted all resources on completion but left %v", provider.ResourceString()) + } +} + +func TestTest_DoubleInterrupt(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath(path.Join("test", "with_double_interrupt")), td) + defer testChdir(t, td)() + + provider := testing_command.NewProvider(nil) + view, done := testView(t) + + interrupt := make(chan struct{}) + provider.Interrupt = interrupt + + c := &TestCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(provider.Provider), + View: view, + ShutdownCh: interrupt, + }, + } + + c.Run(nil) + output := done(t).All() + + if !strings.Contains(output, "Two interrupts received") { + t.Errorf("output didn't produce the right output:\n\n%s", output) + } + + cleanupMessage := `OpenTofu was interrupted while executing main.tftest.hcl, and may not have +performed the expected cleanup operations. + +OpenTofu has already created the following resources from the module under +test: + - test_resource.primary + - test_resource.secondary + - test_resource.tertiary` + + // It's really important that the above message is printed, so we're testing + // for it specifically and making sure it contains all the resources. + if !strings.Contains(output, cleanupMessage) { + t.Errorf("output didn't produce the right output:\n\n%s", output) + } + + // This time the test command shouldn't have cleaned up the resource because + // of the hard interrupt. + if provider.ResourceCount() != 3 { + // we asked for a nice stop in this one, so it should still have tidied everything up. + t.Errorf("should not have deleted all resources on completion but left %v", provider.ResourceString()) + } +} + +func TestTest_ProviderAlias(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath(path.Join("test", "with_provider_alias")), td) + defer testChdir(t, td)() + + provider := testing_command.NewProvider(nil) + + providerSource, close := newMockProviderSource(t, map[string][]string{ + "test": {"1.0.0"}, + }) + defer close() + + streams, done := terminal.StreamsForTesting(t) + view := views.NewView(streams) + ui := new(cli.MockUi) + + meta := Meta{ + testingOverrides: metaOverridesForProvider(provider.Provider), + Ui: ui, + View: view, + Streams: streams, + ProviderSource: providerSource, + } + + init := &InitCommand{ + Meta: meta, + } + + if code := init.Run(nil); code != 0 { + t.Fatalf("expected status code 0 but got %d: %s", code, ui.ErrorWriter) + } + + command := &TestCommand{ + Meta: meta, + } + + code := command.Run(nil) + output := done(t) + + printedOutput := false + + if code != 0 { + printedOutput = true + t.Errorf("expected status code 0 but got %d: %s", code, output.All()) + } + + if provider.ResourceCount() > 0 { + if !printedOutput { + t.Errorf("should have deleted all resources on completion but left %s\n\n%s", provider.ResourceString(), output.All()) + } else { + t.Errorf("should have deleted all resources on completion but left %s", provider.ResourceString()) + } + } +} + +func TestTest_ModuleDependencies(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath(path.Join("test", "with_setup_module")), td) + defer testChdir(t, td)() + + // Our two providers will share a common set of values to make things + // easier. + store := &testing_command.ResourceStore{ + Data: make(map[string]cty.Value), + } + + // We set it up so the module provider will update the data sources + // available to the core mock provider. + test := testing_command.NewProvider(store) + setup := testing_command.NewProvider(store) + + test.SetDataPrefix("data") + test.SetResourcePrefix("resource") + + // Let's make the setup provider write into the data for test provider. + setup.SetResourcePrefix("data") + + providerSource, close := newMockProviderSource(t, map[string][]string{ + "test": {"1.0.0"}, + "setup": {"1.0.0"}, + }) + defer close() + + streams, done := terminal.StreamsForTesting(t) + view := views.NewView(streams) + ui := new(cli.MockUi) + + meta := Meta{ + testingOverrides: &testingOverrides{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): providers.FactoryFixed(test.Provider), + addrs.NewDefaultProvider("setup"): providers.FactoryFixed(setup.Provider), + }, + }, + Ui: ui, + View: view, + Streams: streams, + ProviderSource: providerSource, + } + + init := &InitCommand{ + Meta: meta, + } + + if code := init.Run(nil); code != 0 { + t.Fatalf("expected status code 0 but got %d: %s", code, ui.ErrorWriter) + } + + command := &TestCommand{ + Meta: meta, + } + + code := command.Run(nil) + output := done(t) + + printedOutput := false + + if code != 0 { + printedOutput = true + t.Errorf("expected status code 0 but got %d: %s", code, output.All()) + } + + if test.ResourceCount() > 0 { + if !printedOutput { + printedOutput = true + t.Errorf("should have deleted all resources on completion but left %s\n\n%s", test.ResourceString(), output.All()) + } else { + t.Errorf("should have deleted all resources on completion but left %s", test.ResourceString()) + } + } + + if setup.ResourceCount() > 0 { + if !printedOutput { + t.Errorf("should have deleted all resources on completion but left %s\n\n%s", setup.ResourceString(), output.All()) + } else { + t.Errorf("should have deleted all resources on completion but left %s", setup.ResourceString()) + } + } +} + +func TestTest_CatchesErrorsBeforeDestroy(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath(path.Join("test", "invalid_default_state")), td) + defer testChdir(t, td)() + + provider := testing_command.NewProvider(nil) + view, done := testView(t) + + c := &TestCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(provider.Provider), + View: view, + }, + } + + code := c.Run([]string{"-no-color"}) + output := done(t) + + if code != 1 { + t.Errorf("expected status code 0 but got %d", code) + } + + expectedOut := `main.tftest.hcl... fail + run "test"... fail + +Failure! 0 passed, 1 failed. +` + + expectedErr := ` +Error: No value for required variable + + on main.tf line 2: + 2: variable "input" { + +The root module input variable "input" is not set, and has no default value. +Use a -var or -var-file command line argument to provide a value for this +variable. +` + + actualOut := output.Stdout() + actualErr := output.Stderr() + + if diff := cmp.Diff(actualOut, expectedOut); len(diff) > 0 { + t.Errorf("std out didn't match expected:\nexpected:\n%s\nactual:\n%s\ndiff:\n%s", expectedOut, actualOut, diff) + } + + if diff := cmp.Diff(actualErr, expectedErr); len(diff) > 0 { + t.Errorf("std err didn't match expected:\nexpected:\n%s\nactual:\n%s\ndiff:\n%s", expectedErr, actualErr, diff) + } + + if provider.ResourceCount() > 0 { + t.Errorf("should have deleted all resources on completion but left %v", provider.ResourceString()) + } +} + +func TestTest_Verbose(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath(path.Join("test", "plan_then_apply")), td) + defer testChdir(t, td)() + + provider := testing_command.NewProvider(nil) + view, done := testView(t) + + c := &TestCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(provider.Provider), + View: view, + }, + } + + code := c.Run([]string{"-verbose", "-no-color"}) + output := done(t) + + if code != 0 { + t.Errorf("expected status code 0 but got %d", code) + } + + expected := `main.tftest.hcl... pass + run "validate_test_resource"... pass + +OpenTofu used the selected providers to generate the following execution +plan. Resource actions are indicated with the following symbols: + + create + +OpenTofu will perform the following actions: + + # test_resource.foo will be created + + resource "test_resource" "foo" { + + id = "constant_value" + + value = "bar" + } + +Plan: 1 to add, 0 to change, 0 to destroy. + run "validate_test_resource"... pass +# test_resource.foo: +resource "test_resource" "foo" { + id = "constant_value" + value = "bar" +} + +Success! 2 passed, 0 failed. +` + + actual := output.All() + + if diff := cmp.Diff(actual, expected); len(diff) > 0 { + t.Errorf("output didn't match expected:\nexpected:\n%s\nactual:\n%s\ndiff:\n%s", expected, actual, diff) + } + + if provider.ResourceCount() > 0 { + t.Errorf("should have deleted all resources on completion but left %v", provider.ResourceString()) + } +} + +func TestTest_ValidatesBeforeExecution(t *testing.T) { + tcs := map[string]struct { + expectedOut string + expectedErr string + }{ + "invalid": { + expectedOut: `main.tftest.hcl... fail + run "invalid"... fail + +Failure! 0 passed, 1 failed. +`, + expectedErr: ` +Error: Invalid ` + "`expect_failures`" + ` reference + + on main.tftest.hcl line 5, in run "invalid": + 5: local.my_value, + +You cannot expect failures from local.my_value. You can only expect failures +from checkable objects such as input variables, output values, check blocks, +managed resources and data sources. +`, + }, + "invalid-module": { + expectedOut: `main.tftest.hcl... fail + run "invalid"... fail + run "test"... skip + +Failure! 0 passed, 1 failed, 1 skipped. +`, + expectedErr: ` +Error: Reference to undeclared input variable + + on setup/main.tf line 3, in resource "test_resource" "setup": + 3: value = var.not_real // Oh no! + +An input variable with the name "not_real" has not been declared. This +variable can be declared with a variable "not_real" {} block. +`, + }, + "missing-provider": { + expectedOut: `main.tftest.hcl... fail + run "passes_validation"... fail + +Failure! 0 passed, 1 failed. +`, + expectedErr: ` +Error: Provider configuration not present + +To work with test_resource.secondary its original provider configuration at +provider["registry.opentofu.org/hashicorp/test"].secondary is required, but +it has been removed. This occurs when a provider configuration is removed +while objects created by that provider still exist in the state. Re-add the +provider configuration to destroy test_resource.secondary, after which you +can remove the provider configuration again. +`, + }, + "missing-provider-in-run-block": { + expectedOut: `main.tftest.hcl... fail + run "passes_validation"... fail + +Failure! 0 passed, 1 failed. +`, + expectedErr: ` +Error: Provider configuration not present + +To work with test_resource.secondary its original provider configuration at +provider["registry.opentofu.org/hashicorp/test"].secondary is required, but +it has been removed. This occurs when a provider configuration is removed +while objects created by that provider still exist in the state. Re-add the +provider configuration to destroy test_resource.secondary, after which you +can remove the provider configuration again. +`, + }, + "missing-provider-in-test-module": { + expectedOut: `main.tftest.hcl... fail + run "passes_validation_primary"... pass + run "passes_validation_secondary"... fail + +Failure! 1 passed, 1 failed. +`, + expectedErr: ` +Error: Provider configuration not present + +To work with test_resource.secondary its original provider configuration at +provider["registry.opentofu.org/hashicorp/test"].secondary is required, but +it has been removed. This occurs when a provider configuration is removed +while objects created by that provider still exist in the state. Re-add the +provider configuration to destroy test_resource.secondary, after which you +can remove the provider configuration again. +`, + }, + } + + for file, tc := range tcs { + t.Run(file, func(t *testing.T) { + + td := t.TempDir() + testCopyDir(t, testFixturePath(path.Join("test", file)), td) + defer testChdir(t, td)() + + provider := testing_command.NewProvider(nil) + + providerSource, close := newMockProviderSource(t, map[string][]string{ + "test": {"1.0.0"}, + }) + defer close() + + streams, done := terminal.StreamsForTesting(t) + view := views.NewView(streams) + ui := new(cli.MockUi) + + meta := Meta{ + testingOverrides: metaOverridesForProvider(provider.Provider), + Ui: ui, + View: view, + Streams: streams, + ProviderSource: providerSource, + } + + init := &InitCommand{ + Meta: meta, + } + + if code := init.Run(nil); code != 0 { + t.Fatalf("expected status code 0 but got %d: %s", code, ui.ErrorWriter) + } + + c := &TestCommand{ + Meta: meta, + } + + code := c.Run([]string{"-no-color"}) + output := done(t) + + if code != 1 { + t.Errorf("expected status code 1 but got %d", code) + } + + actualOut, expectedOut := output.Stdout(), tc.expectedOut + actualErr, expectedErr := output.Stderr(), tc.expectedErr + + if diff := cmp.Diff(actualOut, expectedOut); len(diff) > 0 { + t.Errorf("output didn't match expected:\nexpected:\n%s\nactual:\n%s\ndiff:\n%s", expectedOut, actualOut, diff) + } + + if diff := cmp.Diff(actualErr, expectedErr); len(diff) > 0 { + t.Errorf("error didn't match expected:\nexpected:\n%s\nactual:\n%s\ndiff:\n%s", expectedErr, actualErr, diff) + } + + if provider.ResourceCount() > 0 { + t.Errorf("should have deleted all resources on completion but left %v", provider.ResourceString()) + } + }) + } +} + +func TestTest_Modules(t *testing.T) { + tcs := map[string]struct { + expected string + code int + skip bool + }{ + "pass_module_with_no_resource": { + expected: "main.tftest.hcl... pass\n run \"run\"... pass\n\nSuccess! 1 passed, 0 failed.\n", + code: 0, + }, + "with_nested_setup_modules": { + expected: "main.tftest.hcl... pass\n run \"load_module\"... pass\n\nSuccess! 1 passed, 0 failed.\n", + code: 0, + }, + "with_verify_module": { + expected: "main.tftest.hcl... pass\n run \"test\"... pass\n run \"verify\"... pass\n\nSuccess! 2 passed, 0 failed.\n", + code: 0, + }, + "only_modules": { + expected: "main.tftest.hcl... pass\n run \"first\"... pass\n run \"second\"... pass\n\nSuccess! 2 passed, 0 failed.\n", + code: 0, + }, + "variables_reference": { + expected: "main.tftest.hcl... pass\n run \"setup\"... pass\n run \"test\"... pass\n\nSuccess! 2 passed, 0 failed.\n", + code: 0, + }, + } + + for name, tc := range tcs { + t.Run(name, func(t *testing.T) { + if tc.skip { + t.Skip() + } + + file := name + + td := t.TempDir() + testCopyDir(t, testFixturePath(path.Join("test", file)), td) + defer testChdir(t, td)() + + provider := testing_command.NewProvider(nil) + providerSource, close := newMockProviderSource(t, map[string][]string{ + "test": {"1.0.0"}, + }) + defer close() + + streams, done := terminal.StreamsForTesting(t) + view := views.NewView(streams) + ui := new(cli.MockUi) + meta := Meta{ + testingOverrides: metaOverridesForProvider(provider.Provider), + Ui: ui, + View: view, + Streams: streams, + ProviderSource: providerSource, + } + + init := &InitCommand{ + Meta: meta, + } + + if code := init.Run(nil); code != 0 { + t.Fatalf("expected status code 0 but got %d: %s", code, ui.ErrorWriter) + } + + command := &TestCommand{ + Meta: meta, + } + + code := command.Run([]string{"-no-color"}) + output := done(t) + printedOutput := false + + if code != tc.code { + printedOutput = true + t.Errorf("expected status code %d but got %d: %s", tc.code, code, output.All()) + } + + actual := output.All() + + if diff := cmp.Diff(actual, tc.expected); len(diff) > 0 { + t.Errorf("output didn't match expected:\nexpected:\n%s\nactual:\n%s\ndiff:\n%s", tc.expected, actual, diff) + } + + if provider.ResourceCount() > 0 { + if !printedOutput { + t.Errorf("should have deleted all resources on completion but left %s\n\n%s", provider.ResourceString(), output.All()) + } else { + t.Errorf("should have deleted all resources on completion but left %s", provider.ResourceString()) + } + } + + if provider.DataSourceCount() > 0 { + if !printedOutput { + t.Errorf("should have deleted all data sources on completion but left %s\n\n%s", provider.DataSourceString(), output.All()) + } else { + t.Errorf("should have deleted all data sources on completion but left %s", provider.DataSourceString()) + } + } + }) + } +} + +func TestTest_StatePropagation(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath(path.Join("test", "state_propagation")), td) + defer testChdir(t, td)() + + provider := testing_command.NewProvider(nil) + + providerSource, close := newMockProviderSource(t, map[string][]string{ + "test": {"1.0.0"}, + }) + defer close() + + streams, done := terminal.StreamsForTesting(t) + view := views.NewView(streams) + ui := new(cli.MockUi) + + meta := Meta{ + testingOverrides: metaOverridesForProvider(provider.Provider), + Ui: ui, + View: view, + Streams: streams, + ProviderSource: providerSource, + } + + init := &InitCommand{ + Meta: meta, + } + + if code := init.Run(nil); code != 0 { + t.Fatalf("expected status code 0 but got %d: %s", code, ui.ErrorWriter) + } + + c := &TestCommand{ + Meta: meta, + } + + code := c.Run([]string{"-verbose", "-no-color"}) + output := done(t) + + if code != 0 { + t.Errorf("expected status code 0 but got %d", code) + } + + expected := `main.tftest.hcl... pass + run "initial_apply_example"... pass +# test_resource.module_resource: +resource "test_resource" "module_resource" { + id = "df6h8as9" + value = "start" +} + run "initial_apply"... pass +# test_resource.resource: +resource "test_resource" "resource" { + id = "598318e0" + value = "start" +} + run "plan_second_example"... pass + +OpenTofu used the selected providers to generate the following execution +plan. Resource actions are indicated with the following symbols: + + create + +OpenTofu will perform the following actions: + + # test_resource.second_module_resource will be created + + resource "test_resource" "second_module_resource" { + + id = "b6a1d8cb" + + value = "start" + } + +Plan: 1 to add, 0 to change, 0 to destroy. + run "plan_update"... pass + +OpenTofu used the selected providers to generate the following execution +plan. Resource actions are indicated with the following symbols: + ~ update in-place + +OpenTofu will perform the following actions: + + # test_resource.resource will be updated in-place + ~ resource "test_resource" "resource" { + id = "598318e0" + ~ value = "start" -> "update" + } + +Plan: 0 to add, 1 to change, 0 to destroy. + run "plan_update_example"... pass + +OpenTofu used the selected providers to generate the following execution +plan. Resource actions are indicated with the following symbols: + ~ update in-place + +OpenTofu will perform the following actions: + + # test_resource.module_resource will be updated in-place + ~ resource "test_resource" "module_resource" { + id = "df6h8as9" + ~ value = "start" -> "update" + } + +Plan: 0 to add, 1 to change, 0 to destroy. + +Success! 5 passed, 0 failed. +` + + actual := output.All() + + if diff := cmp.Diff(actual, expected); len(diff) > 0 { + t.Errorf("output didn't match expected:\nexpected:\n%s\nactual:\n%s\ndiff:\n%s", expected, actual, diff) + } + + if provider.ResourceCount() > 0 { + t.Errorf("should have deleted all resources on completion but left %v", provider.ResourceString()) + } +} + +func TestTest_PartialUpdates(t *testing.T) { + tcs := map[string]struct { + expectedOut string + expectedErr string + expectedCode int + }{ + "partial_updates": { + expectedOut: `main.tftest.hcl... pass + run "first"... pass + +Warning: Resource targeting is in effect + +You are creating a plan with the -target option, which means that the result +of this plan may not represent all of the changes requested by the current +configuration. + +The -target option is not for routine use, and is provided only for +exceptional situations such as recovering from errors or mistakes, or when +OpenTofu specifically suggests to use it as part of an error message. + +Warning: Applied changes may be incomplete + +The plan was created with the -target option in effect, so some changes +requested in the configuration may have been ignored and the output values +may not be fully updated. Run the following command to verify that no other +changes are pending: + tofu plan + +Note that the -target option is not suitable for routine use, and is provided +only for exceptional situations such as recovering from errors or mistakes, +or when OpenTofu specifically suggests to use it as part of an error message. + run "second"... pass + +Success! 2 passed, 0 failed. +`, + expectedCode: 0, + }, + "partial_update_failure": { + expectedOut: `main.tftest.hcl... fail + run "partial"... fail + +Warning: Resource targeting is in effect + +You are creating a plan with the -target option, which means that the result +of this plan may not represent all of the changes requested by the current +configuration. + +The -target option is not for routine use, and is provided only for +exceptional situations such as recovering from errors or mistakes, or when +OpenTofu specifically suggests to use it as part of an error message. + +Warning: Applied changes may be incomplete + +The plan was created with the -target option in effect, so some changes +requested in the configuration may have been ignored and the output values +may not be fully updated. Run the following command to verify that no other +changes are pending: + tofu plan + +Note that the -target option is not suitable for routine use, and is provided +only for exceptional situations such as recovering from errors or mistakes, +or when OpenTofu specifically suggests to use it as part of an error message. + +Failure! 0 passed, 1 failed. +`, + expectedErr: ` +Error: Unknown condition run + + on main.tftest.hcl line 7, in run "partial": + 7: condition = test_resource.bar.value == "bar" + +Condition expression could not be evaluated at this time. +`, + expectedCode: 1, + }, + } + + for file, tc := range tcs { + t.Run(file, func(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath(path.Join("test", file)), td) + defer testChdir(t, td)() + + provider := testing_command.NewProvider(nil) + view, done := testView(t) + + c := &TestCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(provider.Provider), + View: view, + }, + } + + code := c.Run([]string{"-no-color"}) + output := done(t) + + actualOut, expectedOut := output.Stdout(), tc.expectedOut + actualErr, expectedErr := output.Stderr(), tc.expectedErr + expectedCode := tc.expectedCode + + if code != expectedCode { + t.Errorf("expected status code %d but got %d", expectedCode, code) + } + + if diff := cmp.Diff(actualOut, expectedOut); len(diff) > 0 { + t.Errorf("output didn't match expected:\nexpected:\n%s\nactual:\n%s\ndiff:\n%s", expectedOut, actualOut, diff) + } + + if diff := cmp.Diff(actualErr, expectedErr); len(diff) > 0 { + t.Errorf("error didn't match expected:\nexpected:\n%s\nactual:\n%s\ndiff:\n%s", expectedErr, actualErr, diff) + } + + if provider.ResourceCount() > 0 { + t.Errorf("should have deleted all resources on completion but left %v", provider.ResourceString()) + } + }) + } +} + +func TestTest_LocalVariables(t *testing.T) { + tcs := map[string]struct { + expected string + code int + skip bool + }{ + "pass_with_local_variable": { + expected: `tests/test.tftest.hcl... pass + run "first"... pass + + +Outputs: + +foo = "bar" + run "second"... pass + +No changes. Your infrastructure matches the configuration. + +OpenTofu has compared your real infrastructure against your configuration and +found no differences, so no changes are needed. + +Success! 2 passed, 0 failed. +`, + code: 0, + }, + "pass_var_inside_variables": { + expected: `main.tftest.hcl... pass + run "first"... pass + + +Outputs: + +sss = "false" + +Success! 1 passed, 0 failed. +`, + code: 0, + }, + "pass_var_with_default_value_inside_variables": { + expected: `main.tftest.hcl... pass + run "first"... pass + + +Outputs: + +sss = "true" + +Success! 1 passed, 0 failed. +`, + code: 0, + }, + } + + for name, tc := range tcs { + t.Run(name, func(t *testing.T) { + if tc.skip { + t.Skip() + } + + file := name + + td := t.TempDir() + testCopyDir(t, testFixturePath(path.Join("test", file)), td) + defer testChdir(t, td)() + + provider := testing_command.NewProvider(nil) + providerSource, providerClose := newMockProviderSource(t, map[string][]string{ + "test": {"1.0.0"}, + }) + defer providerClose() + + streams, done := terminal.StreamsForTesting(t) + view := views.NewView(streams) + ui := new(cli.MockUi) + meta := Meta{ + testingOverrides: metaOverridesForProvider(provider.Provider), + Ui: ui, + View: view, + Streams: streams, + ProviderSource: providerSource, + } + + init := &InitCommand{ + Meta: meta, + } + + if code := init.Run(nil); code != 0 { + t.Fatalf("expected status code 0 but got %d: %s", code, ui.ErrorWriter) + } + + command := &TestCommand{ + Meta: meta, + } + + code := command.Run([]string{"-verbose", "-no-color"}) + output := done(t) + + if code != tc.code { + t.Errorf("expected status code %d but got %d: %s", tc.code, code, output.All()) + } + + actual := output.All() + + if diff := cmp.Diff(actual, tc.expected); len(diff) > 0 { + t.Errorf("output didn't match expected:\nexpected:\n%s\nactual:\n%s\ndiff:\n%s", tc.expected, actual, diff) + } + }) + } +} + +func TestTest_RunBlock(t *testing.T) { + tcs := map[string]struct { + expected string + code int + skip bool + }{ + "invalid_run_block_name": { + expected: ` +Error: Invalid run block name + + on tests/main.tftest.hcl line 1, in run "sample run": + 1: run "sample run" { + +A name must start with a letter or underscore and may contain only letters, +digits, underscores, and dashes. +`, + code: 1, + }, + } + + for name, tc := range tcs { + t.Run(name, func(t *testing.T) { + if tc.skip { + t.Skip() + } + + file := name + + td := t.TempDir() + testCopyDir(t, testFixturePath(path.Join("test", file)), td) + defer testChdir(t, td)() + + provider := testing_command.NewProvider(nil) + providerSource, close := newMockProviderSource(t, map[string][]string{ + "test": {"1.0.0"}, + }) + defer close() + + streams, _ := terminal.StreamsForTesting(t) + view := views.NewView(streams) + ui := new(cli.MockUi) + meta := Meta{ + testingOverrides: metaOverridesForProvider(provider.Provider), + Ui: ui, + View: view, + Streams: streams, + ProviderSource: providerSource, + } + + init := &InitCommand{ + Meta: meta, + } + + if code := init.Run(nil); code != tc.code { + t.Fatalf("expected status code 0 but got %d: %s", code, ui.ErrorWriter) + } + }) + } +} diff --git a/pkg/command/testdata/apply-config-invalid/main.tf b/pkg/command/testdata/apply-config-invalid/main.tf new file mode 100644 index 00000000000..81ea8d18551 --- /dev/null +++ b/pkg/command/testdata/apply-config-invalid/main.tf @@ -0,0 +1,3 @@ +resource "test_instance" "foo" { + ami = "${var.nope}" +} diff --git a/pkg/command/testdata/apply-destroy-targeted/main.tf b/pkg/command/testdata/apply-destroy-targeted/main.tf new file mode 100644 index 00000000000..0f249b384a3 --- /dev/null +++ b/pkg/command/testdata/apply-destroy-targeted/main.tf @@ -0,0 +1,7 @@ +resource "test_instance" "foo" { + count = 3 +} + +resource "test_load_balancer" "foo" { + instances = test_instance.foo.*.id +} diff --git a/pkg/command/testdata/apply-error/main.tf b/pkg/command/testdata/apply-error/main.tf new file mode 100644 index 00000000000..a6d6cc0df5a --- /dev/null +++ b/pkg/command/testdata/apply-error/main.tf @@ -0,0 +1,7 @@ +resource "test_instance" "foo" { + ami = "bar" +} + +resource "test_instance" "bar" { + error = "true" +} diff --git a/pkg/command/testdata/apply-input-partial/main.tf b/pkg/command/testdata/apply-input-partial/main.tf new file mode 100644 index 00000000000..85ada15d2de --- /dev/null +++ b/pkg/command/testdata/apply-input-partial/main.tf @@ -0,0 +1,9 @@ +variable "foo" {} +variable "bar" {} + +output "foo" { + value = "${var.foo}" +} +output "bar" { + value = "${var.bar}" +} diff --git a/pkg/command/testdata/apply-input/main.tf b/pkg/command/testdata/apply-input/main.tf new file mode 100644 index 00000000000..b7f2958a5d0 --- /dev/null +++ b/pkg/command/testdata/apply-input/main.tf @@ -0,0 +1,5 @@ +variable "foo" {} + +output "result" { + value = var.foo +} diff --git a/pkg/command/testdata/apply-plan-conditional-sensitive/main.tf b/pkg/command/testdata/apply-plan-conditional-sensitive/main.tf new file mode 100644 index 00000000000..288cf5a6263 --- /dev/null +++ b/pkg/command/testdata/apply-plan-conditional-sensitive/main.tf @@ -0,0 +1,89 @@ +output "string" { + value = var.string != null ? var.string : "" +} + +variable "string" { + type = string + default = null + sensitive = true +} + +output "list" { + value = var.list != null ? var.list : [] +} + +variable "list" { + type = list(string) + default = null + sensitive = true +} + +output "bool" { + value = var.bool != null ? var.bool : false +} + +variable "bool" { + type = bool + default = null + sensitive = true +} + +output "map" { + value = var.map != null ? var.map : { foo = "bar" } +} + +variable "map" { + type = map(string) + default = null + sensitive = true +} + +output "number" { + value = var.number != null ? var.number : 6 +} + +variable "number" { + type = number + default = null + sensitive = true +} + +output "object" { + value = var.object != null ? var.object : {} +} + +variable "object" { + type = object({}) + default = null + sensitive = true +} + +output "set" { + value = var.set != null ? var.set : [false] +} + +variable "set" { + type = set(bool) + default = null + sensitive = true +} + +output "tuple" { + value = var.tuple != null ? var.tuple :[] +} + +variable "tuple" { + type = tuple([string]) + default = null + sensitive = true +} + +output "any" { + value = var.any != null ? var.any : "" +} + +variable "any" { + type = any + default = null + sensitive = true +} \ No newline at end of file diff --git a/pkg/command/testdata/apply-plan-no-module/main.tf b/pkg/command/testdata/apply-plan-no-module/main.tf new file mode 100644 index 00000000000..deea30d6698 --- /dev/null +++ b/pkg/command/testdata/apply-plan-no-module/main.tf @@ -0,0 +1,7 @@ +resource "test_instance" "tmpl" { + foo = "${file("${path.module}/template.txt")}" +} + +output "template" { + value = "${test_instance.tmpl.foo}" +} diff --git a/pkg/command/testdata/apply-replace/main.tf b/pkg/command/testdata/apply-replace/main.tf new file mode 100644 index 00000000000..efc6729f90d --- /dev/null +++ b/pkg/command/testdata/apply-replace/main.tf @@ -0,0 +1,2 @@ +resource "test_instance" "a" { +} diff --git a/pkg/command/testdata/apply-sensitive-output/main.tf b/pkg/command/testdata/apply-sensitive-output/main.tf new file mode 100644 index 00000000000..87994ae9fcd --- /dev/null +++ b/pkg/command/testdata/apply-sensitive-output/main.tf @@ -0,0 +1,12 @@ +variable "input" { + default = "Hello world" +} + +output "notsensitive" { + value = "${var.input}" +} + +output "sensitive" { + sensitive = true + value = "${var.input}" +} diff --git a/pkg/command/testdata/apply-shutdown/main.tf b/pkg/command/testdata/apply-shutdown/main.tf new file mode 100644 index 00000000000..5fc966034f8 --- /dev/null +++ b/pkg/command/testdata/apply-shutdown/main.tf @@ -0,0 +1,7 @@ +resource "test_instance" "foo" { + ami = "bar" +} + +resource "test_instance" "bar" { + ami = "${test_instance.foo.ami}" +} diff --git a/pkg/command/testdata/apply-targeted/main.tf b/pkg/command/testdata/apply-targeted/main.tf new file mode 100644 index 00000000000..1b6c42450de --- /dev/null +++ b/pkg/command/testdata/apply-targeted/main.tf @@ -0,0 +1,9 @@ +resource "test_instance" "foo" { + count = 2 +} + +resource "test_instance" "bar" { +} + +resource "test_instance" "baz" { +} diff --git a/pkg/command/testdata/apply-tf-workspace/main.tf b/pkg/command/testdata/apply-tf-workspace/main.tf new file mode 100644 index 00000000000..87d60c29e66 --- /dev/null +++ b/pkg/command/testdata/apply-tf-workspace/main.tf @@ -0,0 +1,3 @@ +output "output" { + value = terraform.workspace +} \ No newline at end of file diff --git a/pkg/command/testdata/apply-tofu-workspace/main.tf b/pkg/command/testdata/apply-tofu-workspace/main.tf new file mode 100644 index 00000000000..12052fd023c --- /dev/null +++ b/pkg/command/testdata/apply-tofu-workspace/main.tf @@ -0,0 +1,3 @@ +output "output" { + value = tofu.workspace +} diff --git a/pkg/command/testdata/apply-vars/main.tf b/pkg/command/testdata/apply-vars/main.tf new file mode 100644 index 00000000000..1d6da85c3d1 --- /dev/null +++ b/pkg/command/testdata/apply-vars/main.tf @@ -0,0 +1,5 @@ +variable "foo" {} + +resource "test_instance" "foo" { + value = var.foo +} diff --git a/pkg/command/testdata/apply/main.tf b/pkg/command/testdata/apply/main.tf new file mode 100644 index 00000000000..1b101299190 --- /dev/null +++ b/pkg/command/testdata/apply/main.tf @@ -0,0 +1,3 @@ +resource "test_instance" "foo" { + ami = "bar" +} diff --git a/pkg/command/testdata/apply/output.jsonlog b/pkg/command/testdata/apply/output.jsonlog new file mode 100644 index 00000000000..0c63ab56c8b --- /dev/null +++ b/pkg/command/testdata/apply/output.jsonlog @@ -0,0 +1,7 @@ +{"@level":"info","@message":"Terraform 0.15.0-dev","@module":"tofu.ui","terraform":"0.15.0-dev","type":"version","ui":"0.1.0"} +{"@level":"info","@message":"test_instance.foo: Plan to create","@module":"tofu.ui","change":{"resource":{"addr":"test_instance.foo","module":"","resource":"test_instance.foo","implied_provider":"test","resource_type":"test_instance","resource_name":"foo","resource_key":null},"action":"create"},"type":"planned_change"} +{"@level":"info","@message":"Plan: 1 to add, 0 to change, 0 to destroy.","@module":"tofu.ui","changes":{"add":1,"import":0,"change":0,"remove":0,"operation":"plan"},"type":"change_summary"} +{"@level":"info","@message":"test_instance.foo: Creating...","@module":"tofu.ui","hook":{"resource":{"addr":"test_instance.foo","module":"","resource":"test_instance.foo","implied_provider":"test","resource_type":"test_instance","resource_name":"foo","resource_key":null},"action":"create"},"type":"apply_start"} +{"@level":"info","@message":"test_instance.foo: Creation complete after 0s","@module":"tofu.ui","hook":{"resource":{"addr":"test_instance.foo","module":"","resource":"test_instance.foo","implied_provider":"test","resource_type":"test_instance","resource_name":"foo","resource_key":null},"action":"create","elapsed_seconds":0},"type":"apply_complete"} +{"@level":"info","@message":"Apply complete! Resources: 1 added, 0 changed, 0 destroyed.","@module":"tofu.ui","changes":{"add":1,"import":0,"change":0,"remove":0,"operation":"apply"},"type":"change_summary"} +{"@level":"info","@message":"Outputs: 0","@module":"tofu.ui","outputs":{},"type":"outputs"} diff --git a/pkg/command/testdata/backend-change-multi-default-to-single/.terraform/terraform.tfstate b/pkg/command/testdata/backend-change-multi-default-to-single/.terraform/terraform.tfstate new file mode 100644 index 00000000000..073bd7a8223 --- /dev/null +++ b/pkg/command/testdata/backend-change-multi-default-to-single/.terraform/terraform.tfstate @@ -0,0 +1,22 @@ +{ + "version": 3, + "serial": 0, + "lineage": "666f9301-7e65-4b19-ae23-71184bb19b03", + "backend": { + "type": "local", + "config": { + "path": "local-state.tfstate" + }, + "hash": 9073424445967744180 + }, + "modules": [ + { + "path": [ + "root" + ], + "outputs": {}, + "resources": {}, + "depends_on": [] + } + ] +} diff --git a/pkg/command/testdata/backend-change-multi-default-to-single/local-state.tfstate b/pkg/command/testdata/backend-change-multi-default-to-single/local-state.tfstate new file mode 100644 index 00000000000..60c275e9028 --- /dev/null +++ b/pkg/command/testdata/backend-change-multi-default-to-single/local-state.tfstate @@ -0,0 +1,12 @@ +{ + "version": 4, + "terraform_version": "0.14.0", + "serial": 7, + "lineage": "backend-change", + "outputs": { + "foo": { + "type": "string", + "value": "bar" + } + } +} diff --git a/pkg/command/testdata/backend-change-multi-default-to-single/main.tf b/pkg/command/testdata/backend-change-multi-default-to-single/main.tf new file mode 100644 index 00000000000..2f67c6f1b27 --- /dev/null +++ b/pkg/command/testdata/backend-change-multi-default-to-single/main.tf @@ -0,0 +1,5 @@ +terraform { + backend "local-single" { + path = "local-state-2.tfstate" + } +} diff --git a/pkg/command/testdata/backend-change-multi-to-multi/.terraform/terraform.tfstate b/pkg/command/testdata/backend-change-multi-to-multi/.terraform/terraform.tfstate new file mode 100644 index 00000000000..073bd7a8223 --- /dev/null +++ b/pkg/command/testdata/backend-change-multi-to-multi/.terraform/terraform.tfstate @@ -0,0 +1,22 @@ +{ + "version": 3, + "serial": 0, + "lineage": "666f9301-7e65-4b19-ae23-71184bb19b03", + "backend": { + "type": "local", + "config": { + "path": "local-state.tfstate" + }, + "hash": 9073424445967744180 + }, + "modules": [ + { + "path": [ + "root" + ], + "outputs": {}, + "resources": {}, + "depends_on": [] + } + ] +} diff --git a/pkg/command/testdata/backend-change-multi-to-multi/local-state.tfstate b/pkg/command/testdata/backend-change-multi-to-multi/local-state.tfstate new file mode 100644 index 00000000000..60c275e9028 --- /dev/null +++ b/pkg/command/testdata/backend-change-multi-to-multi/local-state.tfstate @@ -0,0 +1,12 @@ +{ + "version": 4, + "terraform_version": "0.14.0", + "serial": 7, + "lineage": "backend-change", + "outputs": { + "foo": { + "type": "string", + "value": "bar" + } + } +} diff --git a/pkg/command/testdata/backend-change-multi-to-multi/main.tf b/pkg/command/testdata/backend-change-multi-to-multi/main.tf new file mode 100644 index 00000000000..c8d630d533e --- /dev/null +++ b/pkg/command/testdata/backend-change-multi-to-multi/main.tf @@ -0,0 +1,5 @@ +terraform { + backend "local" { + workspace_dir = "envdir-new" + } +} diff --git a/pkg/command/testdata/backend-change-multi-to-multi/terraform.tfstate.d/env2/terraform.tfstate b/pkg/command/testdata/backend-change-multi-to-multi/terraform.tfstate.d/env2/terraform.tfstate new file mode 100644 index 00000000000..2847289f9e7 --- /dev/null +++ b/pkg/command/testdata/backend-change-multi-to-multi/terraform.tfstate.d/env2/terraform.tfstate @@ -0,0 +1,12 @@ +{ + "version": 4, + "terraform_version": "0.14.0", + "serial": 7, + "lineage": "backend-change-env2", + "outputs": { + "foo": { + "type": "string", + "value": "bar" + } + } +} diff --git a/pkg/command/testdata/backend-change-multi-to-no-default-with-default/.terraform/terraform.tfstate b/pkg/command/testdata/backend-change-multi-to-no-default-with-default/.terraform/terraform.tfstate new file mode 100644 index 00000000000..073bd7a8223 --- /dev/null +++ b/pkg/command/testdata/backend-change-multi-to-no-default-with-default/.terraform/terraform.tfstate @@ -0,0 +1,22 @@ +{ + "version": 3, + "serial": 0, + "lineage": "666f9301-7e65-4b19-ae23-71184bb19b03", + "backend": { + "type": "local", + "config": { + "path": "local-state.tfstate" + }, + "hash": 9073424445967744180 + }, + "modules": [ + { + "path": [ + "root" + ], + "outputs": {}, + "resources": {}, + "depends_on": [] + } + ] +} diff --git a/pkg/command/testdata/backend-change-multi-to-no-default-with-default/local-state.tfstate b/pkg/command/testdata/backend-change-multi-to-no-default-with-default/local-state.tfstate new file mode 100644 index 00000000000..d42eb03aab6 --- /dev/null +++ b/pkg/command/testdata/backend-change-multi-to-no-default-with-default/local-state.tfstate @@ -0,0 +1,12 @@ +{ + "version": 4, + "terraform_version": "0.14.0", + "serial": 7, + "lineage": "backend-change-env1", + "outputs": { + "foo": { + "type": "string", + "value": "bar" + } + } +} diff --git a/pkg/command/testdata/backend-change-multi-to-no-default-with-default/main.tf b/pkg/command/testdata/backend-change-multi-to-no-default-with-default/main.tf new file mode 100644 index 00000000000..6328a4fb9aa --- /dev/null +++ b/pkg/command/testdata/backend-change-multi-to-no-default-with-default/main.tf @@ -0,0 +1,5 @@ +terraform { + backend "local-no-default" { + workspace_dir = "envdir-new" + } +} diff --git a/pkg/command/testdata/backend-change-multi-to-no-default-with-default/terraform.tfstate.d/env2/terraform.tfstate b/pkg/command/testdata/backend-change-multi-to-no-default-with-default/terraform.tfstate.d/env2/terraform.tfstate new file mode 100644 index 00000000000..2847289f9e7 --- /dev/null +++ b/pkg/command/testdata/backend-change-multi-to-no-default-with-default/terraform.tfstate.d/env2/terraform.tfstate @@ -0,0 +1,12 @@ +{ + "version": 4, + "terraform_version": "0.14.0", + "serial": 7, + "lineage": "backend-change-env2", + "outputs": { + "foo": { + "type": "string", + "value": "bar" + } + } +} diff --git a/pkg/command/testdata/backend-change-multi-to-no-default-without-default/.terraform/terraform.tfstate b/pkg/command/testdata/backend-change-multi-to-no-default-without-default/.terraform/terraform.tfstate new file mode 100644 index 00000000000..073bd7a8223 --- /dev/null +++ b/pkg/command/testdata/backend-change-multi-to-no-default-without-default/.terraform/terraform.tfstate @@ -0,0 +1,22 @@ +{ + "version": 3, + "serial": 0, + "lineage": "666f9301-7e65-4b19-ae23-71184bb19b03", + "backend": { + "type": "local", + "config": { + "path": "local-state.tfstate" + }, + "hash": 9073424445967744180 + }, + "modules": [ + { + "path": [ + "root" + ], + "outputs": {}, + "resources": {}, + "depends_on": [] + } + ] +} diff --git a/pkg/command/testdata/backend-change-multi-to-no-default-without-default/main.tf b/pkg/command/testdata/backend-change-multi-to-no-default-without-default/main.tf new file mode 100644 index 00000000000..6328a4fb9aa --- /dev/null +++ b/pkg/command/testdata/backend-change-multi-to-no-default-without-default/main.tf @@ -0,0 +1,5 @@ +terraform { + backend "local-no-default" { + workspace_dir = "envdir-new" + } +} diff --git a/pkg/command/testdata/backend-change-multi-to-no-default-without-default/terraform.tfstate.d/env2/terraform.tfstate b/pkg/command/testdata/backend-change-multi-to-no-default-without-default/terraform.tfstate.d/env2/terraform.tfstate new file mode 100644 index 00000000000..2847289f9e7 --- /dev/null +++ b/pkg/command/testdata/backend-change-multi-to-no-default-without-default/terraform.tfstate.d/env2/terraform.tfstate @@ -0,0 +1,12 @@ +{ + "version": 4, + "terraform_version": "0.14.0", + "serial": 7, + "lineage": "backend-change-env2", + "outputs": { + "foo": { + "type": "string", + "value": "bar" + } + } +} diff --git a/pkg/command/testdata/backend-change-multi-to-single/.terraform/environment b/pkg/command/testdata/backend-change-multi-to-single/.terraform/environment new file mode 100644 index 00000000000..e5e60109566 --- /dev/null +++ b/pkg/command/testdata/backend-change-multi-to-single/.terraform/environment @@ -0,0 +1 @@ +env1 diff --git a/pkg/command/testdata/backend-change-multi-to-single/.terraform/terraform.tfstate b/pkg/command/testdata/backend-change-multi-to-single/.terraform/terraform.tfstate new file mode 100644 index 00000000000..073bd7a8223 --- /dev/null +++ b/pkg/command/testdata/backend-change-multi-to-single/.terraform/terraform.tfstate @@ -0,0 +1,22 @@ +{ + "version": 3, + "serial": 0, + "lineage": "666f9301-7e65-4b19-ae23-71184bb19b03", + "backend": { + "type": "local", + "config": { + "path": "local-state.tfstate" + }, + "hash": 9073424445967744180 + }, + "modules": [ + { + "path": [ + "root" + ], + "outputs": {}, + "resources": {}, + "depends_on": [] + } + ] +} diff --git a/pkg/command/testdata/backend-change-multi-to-single/main.tf b/pkg/command/testdata/backend-change-multi-to-single/main.tf new file mode 100644 index 00000000000..2f67c6f1b27 --- /dev/null +++ b/pkg/command/testdata/backend-change-multi-to-single/main.tf @@ -0,0 +1,5 @@ +terraform { + backend "local-single" { + path = "local-state-2.tfstate" + } +} diff --git a/pkg/command/testdata/backend-change-multi-to-single/terraform.tfstate.d/env1/terraform.tfstate b/pkg/command/testdata/backend-change-multi-to-single/terraform.tfstate.d/env1/terraform.tfstate new file mode 100644 index 00000000000..60c275e9028 --- /dev/null +++ b/pkg/command/testdata/backend-change-multi-to-single/terraform.tfstate.d/env1/terraform.tfstate @@ -0,0 +1,12 @@ +{ + "version": 4, + "terraform_version": "0.14.0", + "serial": 7, + "lineage": "backend-change", + "outputs": { + "foo": { + "type": "string", + "value": "bar" + } + } +} diff --git a/pkg/command/testdata/backend-change-multi-to-single/terraform.tfstate.d/env2/terraform.tfstate b/pkg/command/testdata/backend-change-multi-to-single/terraform.tfstate.d/env2/terraform.tfstate new file mode 100644 index 00000000000..2847289f9e7 --- /dev/null +++ b/pkg/command/testdata/backend-change-multi-to-single/terraform.tfstate.d/env2/terraform.tfstate @@ -0,0 +1,12 @@ +{ + "version": 4, + "terraform_version": "0.14.0", + "serial": 7, + "lineage": "backend-change-env2", + "outputs": { + "foo": { + "type": "string", + "value": "bar" + } + } +} diff --git a/pkg/command/testdata/backend-change-single-to-single/.terraform/terraform.tfstate b/pkg/command/testdata/backend-change-single-to-single/.terraform/terraform.tfstate new file mode 100644 index 00000000000..be6ed26d987 --- /dev/null +++ b/pkg/command/testdata/backend-change-single-to-single/.terraform/terraform.tfstate @@ -0,0 +1,22 @@ +{ + "version": 3, + "serial": 0, + "lineage": "666f9301-7e65-4b19-ae23-71184bb19b03", + "backend": { + "type": "local-single", + "config": { + "path": "local-state.tfstate" + }, + "hash": 9073424445967744180 + }, + "modules": [ + { + "path": [ + "root" + ], + "outputs": {}, + "resources": {}, + "depends_on": [] + } + ] +} diff --git a/pkg/command/testdata/backend-change-single-to-single/local-state.tfstate b/pkg/command/testdata/backend-change-single-to-single/local-state.tfstate new file mode 100644 index 00000000000..60c275e9028 --- /dev/null +++ b/pkg/command/testdata/backend-change-single-to-single/local-state.tfstate @@ -0,0 +1,12 @@ +{ + "version": 4, + "terraform_version": "0.14.0", + "serial": 7, + "lineage": "backend-change", + "outputs": { + "foo": { + "type": "string", + "value": "bar" + } + } +} diff --git a/pkg/command/testdata/backend-change-single-to-single/main.tf b/pkg/command/testdata/backend-change-single-to-single/main.tf new file mode 100644 index 00000000000..2f67c6f1b27 --- /dev/null +++ b/pkg/command/testdata/backend-change-single-to-single/main.tf @@ -0,0 +1,5 @@ +terraform { + backend "local-single" { + path = "local-state-2.tfstate" + } +} diff --git a/pkg/command/testdata/backend-change/.terraform/terraform.tfstate b/pkg/command/testdata/backend-change/.terraform/terraform.tfstate new file mode 100644 index 00000000000..073bd7a8223 --- /dev/null +++ b/pkg/command/testdata/backend-change/.terraform/terraform.tfstate @@ -0,0 +1,22 @@ +{ + "version": 3, + "serial": 0, + "lineage": "666f9301-7e65-4b19-ae23-71184bb19b03", + "backend": { + "type": "local", + "config": { + "path": "local-state.tfstate" + }, + "hash": 9073424445967744180 + }, + "modules": [ + { + "path": [ + "root" + ], + "outputs": {}, + "resources": {}, + "depends_on": [] + } + ] +} diff --git a/pkg/command/testdata/backend-change/local-state.tfstate b/pkg/command/testdata/backend-change/local-state.tfstate new file mode 100644 index 00000000000..60c275e9028 --- /dev/null +++ b/pkg/command/testdata/backend-change/local-state.tfstate @@ -0,0 +1,12 @@ +{ + "version": 4, + "terraform_version": "0.14.0", + "serial": 7, + "lineage": "backend-change", + "outputs": { + "foo": { + "type": "string", + "value": "bar" + } + } +} diff --git a/pkg/command/testdata/backend-change/main.tf b/pkg/command/testdata/backend-change/main.tf new file mode 100644 index 00000000000..0277003a5f9 --- /dev/null +++ b/pkg/command/testdata/backend-change/main.tf @@ -0,0 +1,5 @@ +terraform { + backend "local" { + path = "local-state-2.tfstate" + } +} diff --git a/pkg/command/testdata/backend-changed-with-legacy/.terraform/terraform.tfstate b/pkg/command/testdata/backend-changed-with-legacy/.terraform/terraform.tfstate new file mode 100644 index 00000000000..1e8c0a17d44 --- /dev/null +++ b/pkg/command/testdata/backend-changed-with-legacy/.terraform/terraform.tfstate @@ -0,0 +1,28 @@ +{ + "version": 3, + "serial": 0, + "lineage": "666f9301-7e65-4b19-ae23-71184bb19b03", + "backend": { + "type": "local", + "config": { + "path": "local-state.tfstate" + }, + "hash": 9073424445967744180 + }, + "remote": { + "type": "local", + "config": { + "path": "local-state-old.tfstate" + } + }, + "modules": [ + { + "path": [ + "root" + ], + "outputs": {}, + "resources": {}, + "depends_on": [] + } + ] +} diff --git a/pkg/command/testdata/backend-changed-with-legacy/local-state-old.tfstate b/pkg/command/testdata/backend-changed-with-legacy/local-state-old.tfstate new file mode 100644 index 00000000000..e8cb7d8166a --- /dev/null +++ b/pkg/command/testdata/backend-changed-with-legacy/local-state-old.tfstate @@ -0,0 +1,6 @@ +{ + "version": 4, + "terraform_version": "0.14.0", + "serial": 7, + "lineage": "legacy" +} diff --git a/pkg/command/testdata/backend-changed-with-legacy/local-state.tfstate b/pkg/command/testdata/backend-changed-with-legacy/local-state.tfstate new file mode 100644 index 00000000000..a3b08cacaa3 --- /dev/null +++ b/pkg/command/testdata/backend-changed-with-legacy/local-state.tfstate @@ -0,0 +1,6 @@ +{ + "version": 4, + "terraform_version": "0.14.0", + "serial": 7, + "lineage": "configured" +} diff --git a/pkg/command/testdata/backend-changed-with-legacy/main.tf b/pkg/command/testdata/backend-changed-with-legacy/main.tf new file mode 100644 index 00000000000..0277003a5f9 --- /dev/null +++ b/pkg/command/testdata/backend-changed-with-legacy/main.tf @@ -0,0 +1,5 @@ +terraform { + backend "local" { + path = "local-state-2.tfstate" + } +} diff --git a/pkg/command/testdata/backend-from-state/terraform.tfstate b/pkg/command/testdata/backend-from-state/terraform.tfstate new file mode 100644 index 00000000000..091ecc19a63 --- /dev/null +++ b/pkg/command/testdata/backend-from-state/terraform.tfstate @@ -0,0 +1,10 @@ +{ + "version": 3, + "terraform_version": "0.12.0", + "serial": 7, + "lineage": "configured", + "backend": { + "type": "inmem", + "config": {} + } +} diff --git a/pkg/command/testdata/backend-inmem-locked/main.tf b/pkg/command/testdata/backend-inmem-locked/main.tf new file mode 100644 index 00000000000..9fb065d7ef9 --- /dev/null +++ b/pkg/command/testdata/backend-inmem-locked/main.tf @@ -0,0 +1,5 @@ +terraform { + backend "inmem" { + lock_id = "2b6a6738-5dd5-50d6-c0ae-f6352977666b" + } +} diff --git a/pkg/command/testdata/backend-new-interp/main.tf b/pkg/command/testdata/backend-new-interp/main.tf new file mode 100644 index 00000000000..136d0f3baa6 --- /dev/null +++ b/pkg/command/testdata/backend-new-interp/main.tf @@ -0,0 +1,7 @@ +variable "foo" { default = "bar" } + +terraform { + backend "local" { + path = "${var.foo}" + } +} diff --git a/pkg/command/testdata/backend-new-legacy/.terraform/terraform.tfstate b/pkg/command/testdata/backend-new-legacy/.terraform/terraform.tfstate new file mode 100644 index 00000000000..481edc635b9 --- /dev/null +++ b/pkg/command/testdata/backend-new-legacy/.terraform/terraform.tfstate @@ -0,0 +1,21 @@ +{ + "version": 3, + "serial": 0, + "lineage": "666f9301-7e65-4b19-ae23-71184bb19b03", + "remote": { + "type": "local", + "config": { + "path": "local-state-old.tfstate" + } + }, + "modules": [ + { + "path": [ + "root" + ], + "outputs": {}, + "resources": {}, + "depends_on": [] + } + ] +} diff --git a/pkg/command/testdata/backend-new-legacy/local-state-old.tfstate b/pkg/command/testdata/backend-new-legacy/local-state-old.tfstate new file mode 100644 index 00000000000..5f491bcdec9 --- /dev/null +++ b/pkg/command/testdata/backend-new-legacy/local-state-old.tfstate @@ -0,0 +1,12 @@ +{ + "version": 4, + "terraform_version": "0.14.0", + "serial": 7, + "lineage": "backend-new-legacy", + "remote": { + "type": "local", + "config": { + "path": "local-state-old.tfstate" + } + } +} diff --git a/pkg/command/testdata/backend-new-legacy/main.tf b/pkg/command/testdata/backend-new-legacy/main.tf new file mode 100644 index 00000000000..ca1bd3921e1 --- /dev/null +++ b/pkg/command/testdata/backend-new-legacy/main.tf @@ -0,0 +1,5 @@ +terraform { + backend "local" { + path = "local-state.tfstate" + } +} diff --git a/pkg/command/testdata/backend-new-migrate-existing/local-state.tfstate b/pkg/command/testdata/backend-new-migrate-existing/local-state.tfstate new file mode 100644 index 00000000000..d9c0d27e243 --- /dev/null +++ b/pkg/command/testdata/backend-new-migrate-existing/local-state.tfstate @@ -0,0 +1,6 @@ +{ + "version": 4, + "terraform_version": "0.14.0", + "serial": 8, + "lineage": "remote" +} diff --git a/pkg/command/testdata/backend-new-migrate-existing/main.tf b/pkg/command/testdata/backend-new-migrate-existing/main.tf new file mode 100644 index 00000000000..ca1bd3921e1 --- /dev/null +++ b/pkg/command/testdata/backend-new-migrate-existing/main.tf @@ -0,0 +1,5 @@ +terraform { + backend "local" { + path = "local-state.tfstate" + } +} diff --git a/pkg/command/testdata/backend-new-migrate-existing/terraform.tfstate b/pkg/command/testdata/backend-new-migrate-existing/terraform.tfstate new file mode 100644 index 00000000000..f90be0a399c --- /dev/null +++ b/pkg/command/testdata/backend-new-migrate-existing/terraform.tfstate @@ -0,0 +1,12 @@ +{ + "version": 4, + "terraform_version": "0.8.2", + "serial": 8, + "lineage": "local", + "outputs": { + "foo": { + "type": "string", + "value": "bar" + } + } +} diff --git a/pkg/command/testdata/backend-new-migrate/main.tf b/pkg/command/testdata/backend-new-migrate/main.tf new file mode 100644 index 00000000000..ca1bd3921e1 --- /dev/null +++ b/pkg/command/testdata/backend-new-migrate/main.tf @@ -0,0 +1,5 @@ +terraform { + backend "local" { + path = "local-state.tfstate" + } +} diff --git a/pkg/command/testdata/backend-new-migrate/terraform.tfstate b/pkg/command/testdata/backend-new-migrate/terraform.tfstate new file mode 100644 index 00000000000..fe645dedbac --- /dev/null +++ b/pkg/command/testdata/backend-new-migrate/terraform.tfstate @@ -0,0 +1,12 @@ +{ + "version": 4, + "terraform_version": "0.14.0", + "serial": 8, + "lineage": "backend-new-migrate", + "outputs": { + "foo": { + "type": "string", + "value": "bar" + } + } +} diff --git a/pkg/command/testdata/backend-new/main.tf b/pkg/command/testdata/backend-new/main.tf new file mode 100644 index 00000000000..ca1bd3921e1 --- /dev/null +++ b/pkg/command/testdata/backend-new/main.tf @@ -0,0 +1,5 @@ +terraform { + backend "local" { + path = "local-state.tfstate" + } +} diff --git a/pkg/command/testdata/backend-plan-backend-empty-config/.terraform/terraform.tfstate b/pkg/command/testdata/backend-plan-backend-empty-config/.terraform/terraform.tfstate new file mode 100644 index 00000000000..073bd7a8223 --- /dev/null +++ b/pkg/command/testdata/backend-plan-backend-empty-config/.terraform/terraform.tfstate @@ -0,0 +1,22 @@ +{ + "version": 3, + "serial": 0, + "lineage": "666f9301-7e65-4b19-ae23-71184bb19b03", + "backend": { + "type": "local", + "config": { + "path": "local-state.tfstate" + }, + "hash": 9073424445967744180 + }, + "modules": [ + { + "path": [ + "root" + ], + "outputs": {}, + "resources": {}, + "depends_on": [] + } + ] +} diff --git a/pkg/command/testdata/backend-plan-backend-empty-config/local-state.tfstate b/pkg/command/testdata/backend-plan-backend-empty-config/local-state.tfstate new file mode 100644 index 00000000000..8f4112c93c8 --- /dev/null +++ b/pkg/command/testdata/backend-plan-backend-empty-config/local-state.tfstate @@ -0,0 +1,5 @@ +{ + "version": 4, + "serial": 0, + "lineage": "hello" +} diff --git a/pkg/command/testdata/backend-plan-backend-empty-config/main.tf b/pkg/command/testdata/backend-plan-backend-empty-config/main.tf new file mode 100644 index 00000000000..ca1bd3921e1 --- /dev/null +++ b/pkg/command/testdata/backend-plan-backend-empty-config/main.tf @@ -0,0 +1,5 @@ +terraform { + backend "local" { + path = "local-state.tfstate" + } +} diff --git a/pkg/command/testdata/backend-plan-backend-empty/readme.txt b/pkg/command/testdata/backend-plan-backend-empty/readme.txt new file mode 100644 index 00000000000..e2d6fa2091f --- /dev/null +++ b/pkg/command/testdata/backend-plan-backend-empty/readme.txt @@ -0,0 +1 @@ +This directory is empty on purpose. diff --git a/pkg/command/testdata/backend-plan-backend-match/local-state.tfstate b/pkg/command/testdata/backend-plan-backend-match/local-state.tfstate new file mode 100644 index 00000000000..8f4112c93c8 --- /dev/null +++ b/pkg/command/testdata/backend-plan-backend-match/local-state.tfstate @@ -0,0 +1,5 @@ +{ + "version": 4, + "serial": 0, + "lineage": "hello" +} diff --git a/pkg/command/testdata/backend-plan-backend-match/readme.txt b/pkg/command/testdata/backend-plan-backend-match/readme.txt new file mode 100644 index 00000000000..b3817536d8e --- /dev/null +++ b/pkg/command/testdata/backend-plan-backend-match/readme.txt @@ -0,0 +1 @@ +This directory has no configuration on purpose. diff --git a/pkg/command/testdata/backend-plan-backend-mismatch/local-state.tfstate b/pkg/command/testdata/backend-plan-backend-mismatch/local-state.tfstate new file mode 100644 index 00000000000..3ea735804eb --- /dev/null +++ b/pkg/command/testdata/backend-plan-backend-mismatch/local-state.tfstate @@ -0,0 +1,5 @@ +{ + "version": 4, + "serial": 0, + "lineage": "different" +} diff --git a/pkg/command/testdata/backend-plan-legacy-data/local-state.tfstate b/pkg/command/testdata/backend-plan-legacy-data/local-state.tfstate new file mode 100644 index 00000000000..8f4112c93c8 --- /dev/null +++ b/pkg/command/testdata/backend-plan-legacy-data/local-state.tfstate @@ -0,0 +1,5 @@ +{ + "version": 4, + "serial": 0, + "lineage": "hello" +} diff --git a/pkg/command/testdata/backend-plan-legacy-data/main.tf b/pkg/command/testdata/backend-plan-legacy-data/main.tf new file mode 100644 index 00000000000..b7db25411d0 --- /dev/null +++ b/pkg/command/testdata/backend-plan-legacy-data/main.tf @@ -0,0 +1 @@ +# Empty diff --git a/pkg/command/testdata/backend-plan-legacy-data/state.tfstate b/pkg/command/testdata/backend-plan-legacy-data/state.tfstate new file mode 100644 index 00000000000..b5e51935298 --- /dev/null +++ b/pkg/command/testdata/backend-plan-legacy-data/state.tfstate @@ -0,0 +1,11 @@ +{ + "version": 4, + "serial": 0, + "lineage": "666f9301-7e65-4b19-ae23-71184bb19b03", + "remote": { + "type": "local", + "config": { + "path": "local-state.tfstate" + } + } +} diff --git a/pkg/command/testdata/backend-plan-legacy/readme.txt b/pkg/command/testdata/backend-plan-legacy/readme.txt new file mode 100644 index 00000000000..08c2a3505e0 --- /dev/null +++ b/pkg/command/testdata/backend-plan-legacy/readme.txt @@ -0,0 +1 @@ +No configs on purpose diff --git a/pkg/command/testdata/backend-plan-local-match/main.tf b/pkg/command/testdata/backend-plan-local-match/main.tf new file mode 100644 index 00000000000..b7db25411d0 --- /dev/null +++ b/pkg/command/testdata/backend-plan-local-match/main.tf @@ -0,0 +1 @@ +# Empty diff --git a/pkg/command/testdata/backend-plan-local-match/terraform.tfstate b/pkg/command/testdata/backend-plan-local-match/terraform.tfstate new file mode 100644 index 00000000000..55a5b747ff7 --- /dev/null +++ b/pkg/command/testdata/backend-plan-local-match/terraform.tfstate @@ -0,0 +1,6 @@ +{ + "version": 4, + "terraform_version": "0.14.0", + "serial": 7, + "lineage": "hello" +} diff --git a/pkg/command/testdata/backend-plan-local-mismatch-lineage/main.tf b/pkg/command/testdata/backend-plan-local-mismatch-lineage/main.tf new file mode 100644 index 00000000000..b7db25411d0 --- /dev/null +++ b/pkg/command/testdata/backend-plan-local-mismatch-lineage/main.tf @@ -0,0 +1 @@ +# Empty diff --git a/pkg/command/testdata/backend-plan-local-mismatch-lineage/terraform.tfstate b/pkg/command/testdata/backend-plan-local-mismatch-lineage/terraform.tfstate new file mode 100644 index 00000000000..55a5b747ff7 --- /dev/null +++ b/pkg/command/testdata/backend-plan-local-mismatch-lineage/terraform.tfstate @@ -0,0 +1,6 @@ +{ + "version": 4, + "terraform_version": "0.14.0", + "serial": 7, + "lineage": "hello" +} diff --git a/pkg/command/testdata/backend-plan-local-newer/main.tf b/pkg/command/testdata/backend-plan-local-newer/main.tf new file mode 100644 index 00000000000..b7db25411d0 --- /dev/null +++ b/pkg/command/testdata/backend-plan-local-newer/main.tf @@ -0,0 +1 @@ +# Empty diff --git a/pkg/command/testdata/backend-plan-local-newer/terraform.tfstate b/pkg/command/testdata/backend-plan-local-newer/terraform.tfstate new file mode 100644 index 00000000000..e7ff8f6612c --- /dev/null +++ b/pkg/command/testdata/backend-plan-local-newer/terraform.tfstate @@ -0,0 +1,6 @@ +{ + "version": 4, + "terraform_version": "0.14.0", + "serial": 10, + "lineage": "hello" +} diff --git a/pkg/command/testdata/backend-plan-local/main.tf b/pkg/command/testdata/backend-plan-local/main.tf new file mode 100644 index 00000000000..fec56017dc1 --- /dev/null +++ b/pkg/command/testdata/backend-plan-local/main.tf @@ -0,0 +1 @@ +# Hello diff --git a/pkg/command/testdata/backend-unchanged-with-legacy/.terraform/terraform.tfstate b/pkg/command/testdata/backend-unchanged-with-legacy/.terraform/terraform.tfstate new file mode 100644 index 00000000000..1e8c0a17d44 --- /dev/null +++ b/pkg/command/testdata/backend-unchanged-with-legacy/.terraform/terraform.tfstate @@ -0,0 +1,28 @@ +{ + "version": 3, + "serial": 0, + "lineage": "666f9301-7e65-4b19-ae23-71184bb19b03", + "backend": { + "type": "local", + "config": { + "path": "local-state.tfstate" + }, + "hash": 9073424445967744180 + }, + "remote": { + "type": "local", + "config": { + "path": "local-state-old.tfstate" + } + }, + "modules": [ + { + "path": [ + "root" + ], + "outputs": {}, + "resources": {}, + "depends_on": [] + } + ] +} diff --git a/pkg/command/testdata/backend-unchanged-with-legacy/local-state-old.tfstate b/pkg/command/testdata/backend-unchanged-with-legacy/local-state-old.tfstate new file mode 100644 index 00000000000..59c7336a321 --- /dev/null +++ b/pkg/command/testdata/backend-unchanged-with-legacy/local-state-old.tfstate @@ -0,0 +1,6 @@ +{ + "version": 4, + "terraform_version": "0.14.0", + "serial": 7, + "lineage": "backend-unchanged-with-legacy" +} diff --git a/pkg/command/testdata/backend-unchanged-with-legacy/local-state.tfstate b/pkg/command/testdata/backend-unchanged-with-legacy/local-state.tfstate new file mode 100644 index 00000000000..a3b08cacaa3 --- /dev/null +++ b/pkg/command/testdata/backend-unchanged-with-legacy/local-state.tfstate @@ -0,0 +1,6 @@ +{ + "version": 4, + "terraform_version": "0.14.0", + "serial": 7, + "lineage": "configured" +} diff --git a/pkg/command/testdata/backend-unchanged-with-legacy/main.tf b/pkg/command/testdata/backend-unchanged-with-legacy/main.tf new file mode 100644 index 00000000000..ca1bd3921e1 --- /dev/null +++ b/pkg/command/testdata/backend-unchanged-with-legacy/main.tf @@ -0,0 +1,5 @@ +terraform { + backend "local" { + path = "local-state.tfstate" + } +} diff --git a/pkg/command/testdata/backend-unchanged/.terraform/terraform.tfstate b/pkg/command/testdata/backend-unchanged/.terraform/terraform.tfstate new file mode 100644 index 00000000000..122adb81231 --- /dev/null +++ b/pkg/command/testdata/backend-unchanged/.terraform/terraform.tfstate @@ -0,0 +1,23 @@ +{ + "version": 3, + "serial": 0, + "lineage": "666f9301-7e65-4b19-ae23-71184bb19b03", + "backend": { + "type": "local", + "config": { + "path": "local-state.tfstate", + "workspace_dir": null + }, + "hash": 4282859327 + }, + "modules": [ + { + "path": [ + "root" + ], + "outputs": {}, + "resources": {}, + "depends_on": [] + } + ] +} diff --git a/pkg/command/testdata/backend-unchanged/local-state.tfstate b/pkg/command/testdata/backend-unchanged/local-state.tfstate new file mode 100644 index 00000000000..3e61bf44fb9 --- /dev/null +++ b/pkg/command/testdata/backend-unchanged/local-state.tfstate @@ -0,0 +1,6 @@ +{ + "version": 4, + "terraform_version": "0.14.0", + "serial": 7, + "lineage": "configuredUnchanged" +} diff --git a/pkg/command/testdata/backend-unchanged/main.tf b/pkg/command/testdata/backend-unchanged/main.tf new file mode 100644 index 00000000000..ca1bd3921e1 --- /dev/null +++ b/pkg/command/testdata/backend-unchanged/main.tf @@ -0,0 +1,5 @@ +terraform { + backend "local" { + path = "local-state.tfstate" + } +} diff --git a/pkg/command/testdata/backend-unset-with-legacy/.terraform/terraform.tfstate b/pkg/command/testdata/backend-unset-with-legacy/.terraform/terraform.tfstate new file mode 100644 index 00000000000..1e8c0a17d44 --- /dev/null +++ b/pkg/command/testdata/backend-unset-with-legacy/.terraform/terraform.tfstate @@ -0,0 +1,28 @@ +{ + "version": 3, + "serial": 0, + "lineage": "666f9301-7e65-4b19-ae23-71184bb19b03", + "backend": { + "type": "local", + "config": { + "path": "local-state.tfstate" + }, + "hash": 9073424445967744180 + }, + "remote": { + "type": "local", + "config": { + "path": "local-state-old.tfstate" + } + }, + "modules": [ + { + "path": [ + "root" + ], + "outputs": {}, + "resources": {}, + "depends_on": [] + } + ] +} diff --git a/pkg/command/testdata/backend-unset-with-legacy/local-state-old.tfstate b/pkg/command/testdata/backend-unset-with-legacy/local-state-old.tfstate new file mode 100644 index 00000000000..e8cb7d8166a --- /dev/null +++ b/pkg/command/testdata/backend-unset-with-legacy/local-state-old.tfstate @@ -0,0 +1,6 @@ +{ + "version": 4, + "terraform_version": "0.14.0", + "serial": 7, + "lineage": "legacy" +} diff --git a/pkg/command/testdata/backend-unset-with-legacy/local-state.tfstate b/pkg/command/testdata/backend-unset-with-legacy/local-state.tfstate new file mode 100644 index 00000000000..1ea457cc1b1 --- /dev/null +++ b/pkg/command/testdata/backend-unset-with-legacy/local-state.tfstate @@ -0,0 +1,6 @@ +{ + "version": 4, + "terraform_version": "0.14.0", + "serial": 7, + "lineage": "backend" +} diff --git a/pkg/command/testdata/backend-unset-with-legacy/main.tf b/pkg/command/testdata/backend-unset-with-legacy/main.tf new file mode 100644 index 00000000000..0422cd4b622 --- /dev/null +++ b/pkg/command/testdata/backend-unset-with-legacy/main.tf @@ -0,0 +1 @@ +# Empty, we're unsetting diff --git a/pkg/command/testdata/backend-unset/.terraform/terraform.tfstate b/pkg/command/testdata/backend-unset/.terraform/terraform.tfstate new file mode 100644 index 00000000000..073bd7a8223 --- /dev/null +++ b/pkg/command/testdata/backend-unset/.terraform/terraform.tfstate @@ -0,0 +1,22 @@ +{ + "version": 3, + "serial": 0, + "lineage": "666f9301-7e65-4b19-ae23-71184bb19b03", + "backend": { + "type": "local", + "config": { + "path": "local-state.tfstate" + }, + "hash": 9073424445967744180 + }, + "modules": [ + { + "path": [ + "root" + ], + "outputs": {}, + "resources": {}, + "depends_on": [] + } + ] +} diff --git a/pkg/command/testdata/backend-unset/local-state.tfstate b/pkg/command/testdata/backend-unset/local-state.tfstate new file mode 100644 index 00000000000..45964a3866e --- /dev/null +++ b/pkg/command/testdata/backend-unset/local-state.tfstate @@ -0,0 +1,12 @@ +{ + "version": 4, + "terraform_version": "0.14.0", + "serial": 7, + "lineage": "configuredUnset", + "outputs": { + "foo": { + "type": "string", + "value": "bar" + } + } +} diff --git a/pkg/command/testdata/backend-unset/main.tf b/pkg/command/testdata/backend-unset/main.tf new file mode 100644 index 00000000000..3571c40e36f --- /dev/null +++ b/pkg/command/testdata/backend-unset/main.tf @@ -0,0 +1 @@ +# Empty, unset! diff --git a/pkg/command/testdata/command-check-required-version/main.tf b/pkg/command/testdata/command-check-required-version/main.tf new file mode 100644 index 00000000000..f3f71a6ead3 --- /dev/null +++ b/pkg/command/testdata/command-check-required-version/main.tf @@ -0,0 +1,7 @@ +terraform { + required_version = "~> 0.9.0" +} + +terraform { + required_version = ">= 0.13.0" +} diff --git a/pkg/command/testdata/console-multiline-vars/main.tf b/pkg/command/testdata/console-multiline-vars/main.tf new file mode 100644 index 00000000000..4d230ac3481 --- /dev/null +++ b/pkg/command/testdata/console-multiline-vars/main.tf @@ -0,0 +1,13 @@ +variable "bar" { + default = "baz" +} + +variable "foo" {} + +variable "counts" { + type = map(any) + default = { + "lalala" = 1, + "lololo" = 2, + } +} diff --git a/pkg/command/testdata/empty-file b/pkg/command/testdata/empty-file new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/command/testdata/empty/README b/pkg/command/testdata/empty/README new file mode 100644 index 00000000000..8f55f302df3 --- /dev/null +++ b/pkg/command/testdata/empty/README @@ -0,0 +1,2 @@ +This directory is intentionally empty, for testing any specialized error +messages that deal with empty configuration directories. diff --git a/pkg/command/testdata/fmt/general_in.tf b/pkg/command/testdata/fmt/general_in.tf new file mode 100644 index 00000000000..94db1893fdb --- /dev/null +++ b/pkg/command/testdata/fmt/general_in.tf @@ -0,0 +1,53 @@ +# This test case is intended to cover many of the main formatting +# rules of "terraform fmt" at once. It's fine to add new stuff in +# here, but you can also add other _in.tf/_out.tf pairs in the +# same directory if you want to test something complicated that, +# for example, requires specific nested context. +# +# The input file of this test intentionally has strange whitespace +# alignment, because the goal is to see the fmt command fix it. +# If you're applying batch formatting to all .tf files in the +# repository (or similar), be sure to skip this one to avoid +# invalidating the test. + +terraform { +required_providers { +foo = { version = "1.0.0" } +barbaz = { + version = "2.0.0" +} +} +} + +variable instance_type { + +} + +resource foo_instance foo { + instance_type = "${var.instance_type}" +} + +resource foo_instance "bar" { + instance_type = "${var.instance_type}-2" +} + +resource "foo_instance" /* ... */ "baz" { + instance_type = "${var.instance_type}${var.instance_type}" + + beep boop {} + beep blep { + thingy = "${var.instance_type}" + } +} + + provider "" { +} + +locals { + name = "${contains(["foo"], var.my_var) ? "${var.my_var}-bar" : + contains(["baz"], var.my_var) ? "baz-${var.my_var}" : + file("ERROR: unsupported type ${var.my_var}")}" + wrapped = "${(var.my_var == null ? 1 : + var.your_var == null ? 2 : + 3)}" +} diff --git a/pkg/command/testdata/fmt/general_out.tf b/pkg/command/testdata/fmt/general_out.tf new file mode 100644 index 00000000000..1fe6b5b3cb9 --- /dev/null +++ b/pkg/command/testdata/fmt/general_out.tf @@ -0,0 +1,53 @@ +# This test case is intended to cover many of the main formatting +# rules of "terraform fmt" at once. It's fine to add new stuff in +# here, but you can also add other _in.tf/_out.tf pairs in the +# same directory if you want to test something complicated that, +# for example, requires specific nested context. +# +# The input file of this test intentionally has strange whitespace +# alignment, because the goal is to see the fmt command fix it. +# If you're applying batch formatting to all .tf files in the +# repository (or similar), be sure to skip this one to avoid +# invalidating the test. + +terraform { + required_providers { + foo = { version = "1.0.0" } + barbaz = { + version = "2.0.0" + } + } +} + +variable "instance_type" { + +} + +resource "foo_instance" "foo" { + instance_type = var.instance_type +} + +resource "foo_instance" "bar" { + instance_type = "${var.instance_type}-2" +} + +resource "foo_instance" "baz" { + instance_type = "${var.instance_type}${var.instance_type}" + + beep "boop" {} + beep "blep" { + thingy = var.instance_type + } +} + +provider "" { +} + +locals { + name = (contains(["foo"], var.my_var) ? "${var.my_var}-bar" : + contains(["baz"], var.my_var) ? "baz-${var.my_var}" : + file("ERROR: unsupported type ${var.my_var}")) + wrapped = (var.my_var == null ? 1 : + var.your_var == null ? 2 : + 3) +} diff --git a/pkg/command/testdata/fmt/variable_type_in.tf b/pkg/command/testdata/fmt/variable_type_in.tf new file mode 100644 index 00000000000..3d6781202c8 --- /dev/null +++ b/pkg/command/testdata/fmt/variable_type_in.tf @@ -0,0 +1,57 @@ +variable "a" { + type = string +} + +variable "b" { + type = list +} + +variable "c" { + type = map +} + +variable "d" { + type = set +} + +variable "e" { + type = "string" +} + +variable "f" { + type = "list" +} + +variable "g" { + type = "map" +} + +variable "h" { + type = object({}) +} + +variable "i" { + type = object({ + foo = string + }) +} + +variable "j" { + type = tuple([]) +} + +variable "k" { + type = tuple([number]) +} + +variable "l" { + type = list(string) +} + +variable "m" { + type = list( + object({ + foo = bool + }) + ) +} diff --git a/pkg/command/testdata/fmt/variable_type_out.tf b/pkg/command/testdata/fmt/variable_type_out.tf new file mode 100644 index 00000000000..f4f2df2923d --- /dev/null +++ b/pkg/command/testdata/fmt/variable_type_out.tf @@ -0,0 +1,57 @@ +variable "a" { + type = string +} + +variable "b" { + type = list(any) +} + +variable "c" { + type = map(any) +} + +variable "d" { + type = set(any) +} + +variable "e" { + type = string +} + +variable "f" { + type = list(string) +} + +variable "g" { + type = map(string) +} + +variable "h" { + type = object({}) +} + +variable "i" { + type = object({ + foo = string + }) +} + +variable "j" { + type = tuple([]) +} + +variable "k" { + type = tuple([number]) +} + +variable "l" { + type = list(string) +} + +variable "m" { + type = list( + object({ + foo = bool + }) + ) +} diff --git a/pkg/command/testdata/get/foo/main.tf b/pkg/command/testdata/get/foo/main.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/command/testdata/get/main.tf b/pkg/command/testdata/get/main.tf new file mode 100644 index 00000000000..0ce1c38d3ba --- /dev/null +++ b/pkg/command/testdata/get/main.tf @@ -0,0 +1,3 @@ +module "foo" { + source = "./foo" +} diff --git a/pkg/command/testdata/graph/main.tf b/pkg/command/testdata/graph/main.tf new file mode 100644 index 00000000000..1b101299190 --- /dev/null +++ b/pkg/command/testdata/graph/main.tf @@ -0,0 +1,3 @@ +resource "test_instance" "foo" { + ami = "bar" +} diff --git a/pkg/command/testdata/import-missing-resource-config/main.tf b/pkg/command/testdata/import-missing-resource-config/main.tf new file mode 100644 index 00000000000..d644bad3191 --- /dev/null +++ b/pkg/command/testdata/import-missing-resource-config/main.tf @@ -0,0 +1,5 @@ +provider "test" { + +} + +# No resource block present, so import fails diff --git a/pkg/command/testdata/import-module-input-variable/child/main.tf b/pkg/command/testdata/import-module-input-variable/child/main.tf new file mode 100644 index 00000000000..6327b7c1e86 --- /dev/null +++ b/pkg/command/testdata/import-module-input-variable/child/main.tf @@ -0,0 +1,11 @@ +variable "foo" { + default = {} +} + +locals { + baz = var.foo.bar.baz +} + +resource "test_instance" "foo" { + foo = local.baz +} diff --git a/pkg/command/testdata/import-module-input-variable/main.tf b/pkg/command/testdata/import-module-input-variable/main.tf new file mode 100644 index 00000000000..13334c11e1d --- /dev/null +++ b/pkg/command/testdata/import-module-input-variable/main.tf @@ -0,0 +1,8 @@ +variable "foo" { + default = {} +} + +module "child" { + source = "./child" + foo = var.foo +} diff --git a/pkg/command/testdata/import-module-input-variable/terraform.tfvars b/pkg/command/testdata/import-module-input-variable/terraform.tfvars new file mode 100644 index 00000000000..39ab5cb5dae --- /dev/null +++ b/pkg/command/testdata/import-module-input-variable/terraform.tfvars @@ -0,0 +1 @@ +foo = { bar = { baz = true } } diff --git a/pkg/command/testdata/import-module-var-file/child/main.tf b/pkg/command/testdata/import-module-var-file/child/main.tf new file mode 100644 index 00000000000..6a4ae0d096e --- /dev/null +++ b/pkg/command/testdata/import-module-var-file/child/main.tf @@ -0,0 +1,5 @@ +variable "foo" {} + +resource "test_instance" "foo" { + foo = var.foo +} diff --git a/pkg/command/testdata/import-module-var-file/main.tf b/pkg/command/testdata/import-module-var-file/main.tf new file mode 100644 index 00000000000..222348f9f16 --- /dev/null +++ b/pkg/command/testdata/import-module-var-file/main.tf @@ -0,0 +1,6 @@ +variable "foo" {} + +module "child" { + source = "./child" + foo = var.foo +} diff --git a/pkg/command/testdata/import-module-var-file/terraform.tfvars b/pkg/command/testdata/import-module-var-file/terraform.tfvars new file mode 100644 index 00000000000..5abc475eb91 --- /dev/null +++ b/pkg/command/testdata/import-module-var-file/terraform.tfvars @@ -0,0 +1 @@ +foo = "bar" diff --git a/pkg/command/testdata/import-provider-aliased/main.tf b/pkg/command/testdata/import-provider-aliased/main.tf new file mode 100644 index 00000000000..9ef6de2ce45 --- /dev/null +++ b/pkg/command/testdata/import-provider-aliased/main.tf @@ -0,0 +1,8 @@ +provider "test" { + foo = "bar" + + alias = "alias" +} + +resource "test_instance" "foo" { +} diff --git a/pkg/command/testdata/import-provider-datasource/main.tf b/pkg/command/testdata/import-provider-datasource/main.tf new file mode 100644 index 00000000000..3b9fa3a3e83 --- /dev/null +++ b/pkg/command/testdata/import-provider-datasource/main.tf @@ -0,0 +1,13 @@ +provider "test" { + foo = data.test_data.key.id +} + +provider "test" { + alias = "credentials" +} + +data "test_data" "key" { + provider = test.credentials +} + +resource "test_instance" "foo" {} diff --git a/pkg/command/testdata/import-provider-implicit/main.tf b/pkg/command/testdata/import-provider-implicit/main.tf new file mode 100644 index 00000000000..02ffc5bc775 --- /dev/null +++ b/pkg/command/testdata/import-provider-implicit/main.tf @@ -0,0 +1,4 @@ +# Declaring this resource implies that we depend on the +# "test" provider, making it available for import. +resource "test_instance" "foo" { +} diff --git a/pkg/command/testdata/import-provider-invalid/main.tf b/pkg/command/testdata/import-provider-invalid/main.tf new file mode 100644 index 00000000000..c156850d3da --- /dev/null +++ b/pkg/command/testdata/import-provider-invalid/main.tf @@ -0,0 +1,15 @@ +terraform { + backend "local" { + path = "imported.tfstate" + } +} + +provider "test" { + foo = "bar" +} + +resource "test_instance" "foo" { +} + +resource "unknown_instance" "baz" { +} diff --git a/pkg/command/testdata/import-provider-remote-state/main.tf b/pkg/command/testdata/import-provider-remote-state/main.tf new file mode 100644 index 00000000000..23ebfb4c62b --- /dev/null +++ b/pkg/command/testdata/import-provider-remote-state/main.tf @@ -0,0 +1,12 @@ +terraform { + backend "local" { + path = "imported.tfstate" + } +} + +provider "test" { + foo = "bar" +} + +resource "test_instance" "foo" { +} diff --git a/pkg/command/testdata/import-provider-var-default/main.tf b/pkg/command/testdata/import-provider-var-default/main.tf new file mode 100644 index 00000000000..c63b4c063b4 --- /dev/null +++ b/pkg/command/testdata/import-provider-var-default/main.tf @@ -0,0 +1,8 @@ +variable "foo" {} + +provider "test" { + foo = "${var.foo}" +} + +resource "test_instance" "foo" { +} diff --git a/pkg/command/testdata/import-provider-var-default/terraform.tfvars b/pkg/command/testdata/import-provider-var-default/terraform.tfvars new file mode 100644 index 00000000000..5abc475eb91 --- /dev/null +++ b/pkg/command/testdata/import-provider-var-default/terraform.tfvars @@ -0,0 +1 @@ +foo = "bar" diff --git a/pkg/command/testdata/import-provider-var-file/blah.tfvars b/pkg/command/testdata/import-provider-var-file/blah.tfvars new file mode 100644 index 00000000000..5abc475eb91 --- /dev/null +++ b/pkg/command/testdata/import-provider-var-file/blah.tfvars @@ -0,0 +1 @@ +foo = "bar" diff --git a/pkg/command/testdata/import-provider-var-file/main.tf b/pkg/command/testdata/import-provider-var-file/main.tf new file mode 100644 index 00000000000..c63b4c063b4 --- /dev/null +++ b/pkg/command/testdata/import-provider-var-file/main.tf @@ -0,0 +1,8 @@ +variable "foo" {} + +provider "test" { + foo = "${var.foo}" +} + +resource "test_instance" "foo" { +} diff --git a/pkg/command/testdata/import-provider-var/main.tf b/pkg/command/testdata/import-provider-var/main.tf new file mode 100644 index 00000000000..c63b4c063b4 --- /dev/null +++ b/pkg/command/testdata/import-provider-var/main.tf @@ -0,0 +1,8 @@ +variable "foo" {} + +provider "test" { + foo = "${var.foo}" +} + +resource "test_instance" "foo" { +} diff --git a/pkg/command/testdata/import-provider/main.tf b/pkg/command/testdata/import-provider/main.tf new file mode 100644 index 00000000000..943e8b33f49 --- /dev/null +++ b/pkg/command/testdata/import-provider/main.tf @@ -0,0 +1,6 @@ +provider "test" { + foo = "bar" +} + +resource "test_instance" "foo" { +} diff --git a/pkg/command/testdata/init-backend-config-file-change-migrate-existing/.terraform/terraform.tfstate b/pkg/command/testdata/init-backend-config-file-change-migrate-existing/.terraform/terraform.tfstate new file mode 100644 index 00000000000..073bd7a8223 --- /dev/null +++ b/pkg/command/testdata/init-backend-config-file-change-migrate-existing/.terraform/terraform.tfstate @@ -0,0 +1,22 @@ +{ + "version": 3, + "serial": 0, + "lineage": "666f9301-7e65-4b19-ae23-71184bb19b03", + "backend": { + "type": "local", + "config": { + "path": "local-state.tfstate" + }, + "hash": 9073424445967744180 + }, + "modules": [ + { + "path": [ + "root" + ], + "outputs": {}, + "resources": {}, + "depends_on": [] + } + ] +} diff --git a/pkg/command/testdata/init-backend-config-file-change-migrate-existing/input.config b/pkg/command/testdata/init-backend-config-file-change-migrate-existing/input.config new file mode 100644 index 00000000000..6cd14f4a3d0 --- /dev/null +++ b/pkg/command/testdata/init-backend-config-file-change-migrate-existing/input.config @@ -0,0 +1 @@ +path = "hello" diff --git a/pkg/command/testdata/init-backend-config-file-change-migrate-existing/local-state.tfstate b/pkg/command/testdata/init-backend-config-file-change-migrate-existing/local-state.tfstate new file mode 100644 index 00000000000..ce8d954f497 --- /dev/null +++ b/pkg/command/testdata/init-backend-config-file-change-migrate-existing/local-state.tfstate @@ -0,0 +1,21 @@ +{ + "version": 3, + "terraform_version": "0.8.2", + "serial": 8, + "lineage": "local", + "modules": [ + { + "path": [ + "root" + ], + "outputs": { + "foo": { + "type": "string", + "value": "bar" + } + }, + "resources": {}, + "depends_on": [] + } + ] +} diff --git a/pkg/command/testdata/init-backend-config-file-change-migrate-existing/main.tf b/pkg/command/testdata/init-backend-config-file-change-migrate-existing/main.tf new file mode 100644 index 00000000000..ca1bd3921e1 --- /dev/null +++ b/pkg/command/testdata/init-backend-config-file-change-migrate-existing/main.tf @@ -0,0 +1,5 @@ +terraform { + backend "local" { + path = "local-state.tfstate" + } +} diff --git a/pkg/command/testdata/init-backend-config-file-change/.terraform/terraform.tfstate b/pkg/command/testdata/init-backend-config-file-change/.terraform/terraform.tfstate new file mode 100644 index 00000000000..073bd7a8223 --- /dev/null +++ b/pkg/command/testdata/init-backend-config-file-change/.terraform/terraform.tfstate @@ -0,0 +1,22 @@ +{ + "version": 3, + "serial": 0, + "lineage": "666f9301-7e65-4b19-ae23-71184bb19b03", + "backend": { + "type": "local", + "config": { + "path": "local-state.tfstate" + }, + "hash": 9073424445967744180 + }, + "modules": [ + { + "path": [ + "root" + ], + "outputs": {}, + "resources": {}, + "depends_on": [] + } + ] +} diff --git a/pkg/command/testdata/init-backend-config-file-change/input.config b/pkg/command/testdata/init-backend-config-file-change/input.config new file mode 100644 index 00000000000..6cd14f4a3d0 --- /dev/null +++ b/pkg/command/testdata/init-backend-config-file-change/input.config @@ -0,0 +1 @@ +path = "hello" diff --git a/pkg/command/testdata/init-backend-config-file-change/main.tf b/pkg/command/testdata/init-backend-config-file-change/main.tf new file mode 100644 index 00000000000..ca1bd3921e1 --- /dev/null +++ b/pkg/command/testdata/init-backend-config-file-change/main.tf @@ -0,0 +1,5 @@ +terraform { + backend "local" { + path = "local-state.tfstate" + } +} diff --git a/pkg/command/testdata/init-backend-config-file/backend.config b/pkg/command/testdata/init-backend-config-file/backend.config new file mode 100644 index 00000000000..c3d7524bc71 --- /dev/null +++ b/pkg/command/testdata/init-backend-config-file/backend.config @@ -0,0 +1,7 @@ +// the -backend-config flag on init cannot be used to point to a "full" backend +// block +terraform { + backend "local" { + path = "hello" + } +} diff --git a/pkg/command/testdata/init-backend-config-file/input.config b/pkg/command/testdata/init-backend-config-file/input.config new file mode 100644 index 00000000000..6cd14f4a3d0 --- /dev/null +++ b/pkg/command/testdata/init-backend-config-file/input.config @@ -0,0 +1 @@ +path = "hello" diff --git a/pkg/command/testdata/init-backend-config-file/invalid.config b/pkg/command/testdata/init-backend-config-file/invalid.config new file mode 100644 index 00000000000..39f97f5bf92 --- /dev/null +++ b/pkg/command/testdata/init-backend-config-file/invalid.config @@ -0,0 +1,2 @@ +path = "hello" +foo = "bar" diff --git a/pkg/command/testdata/init-backend-config-file/main.tf b/pkg/command/testdata/init-backend-config-file/main.tf new file mode 100644 index 00000000000..c08b42fb036 --- /dev/null +++ b/pkg/command/testdata/init-backend-config-file/main.tf @@ -0,0 +1,3 @@ +terraform { + backend "local" {} +} diff --git a/pkg/command/testdata/init-backend-config-kv/main.tf b/pkg/command/testdata/init-backend-config-kv/main.tf new file mode 100644 index 00000000000..c08b42fb036 --- /dev/null +++ b/pkg/command/testdata/init-backend-config-kv/main.tf @@ -0,0 +1,3 @@ +terraform { + backend "local" {} +} diff --git a/pkg/command/testdata/init-backend-empty/main.tf b/pkg/command/testdata/init-backend-empty/main.tf new file mode 100644 index 00000000000..7f62e0e1972 --- /dev/null +++ b/pkg/command/testdata/init-backend-empty/main.tf @@ -0,0 +1,4 @@ +terraform { + backend "local" { + } +} diff --git a/pkg/command/testdata/init-backend-http/main.tf b/pkg/command/testdata/init-backend-http/main.tf new file mode 100644 index 00000000000..4ca44e9b5b4 --- /dev/null +++ b/pkg/command/testdata/init-backend-http/main.tf @@ -0,0 +1,4 @@ +terraform { + backend "http" { + } +} diff --git a/pkg/command/testdata/init-backend-migrate-while-locked/.terraform/terraform.tfstate b/pkg/command/testdata/init-backend-migrate-while-locked/.terraform/terraform.tfstate new file mode 100644 index 00000000000..073bd7a8223 --- /dev/null +++ b/pkg/command/testdata/init-backend-migrate-while-locked/.terraform/terraform.tfstate @@ -0,0 +1,22 @@ +{ + "version": 3, + "serial": 0, + "lineage": "666f9301-7e65-4b19-ae23-71184bb19b03", + "backend": { + "type": "local", + "config": { + "path": "local-state.tfstate" + }, + "hash": 9073424445967744180 + }, + "modules": [ + { + "path": [ + "root" + ], + "outputs": {}, + "resources": {}, + "depends_on": [] + } + ] +} diff --git a/pkg/command/testdata/init-backend-migrate-while-locked/input.config b/pkg/command/testdata/init-backend-migrate-while-locked/input.config new file mode 100644 index 00000000000..6cd14f4a3d0 --- /dev/null +++ b/pkg/command/testdata/init-backend-migrate-while-locked/input.config @@ -0,0 +1 @@ +path = "hello" diff --git a/pkg/command/testdata/init-backend-migrate-while-locked/main.tf b/pkg/command/testdata/init-backend-migrate-while-locked/main.tf new file mode 100644 index 00000000000..bea8e789f8b --- /dev/null +++ b/pkg/command/testdata/init-backend-migrate-while-locked/main.tf @@ -0,0 +1,5 @@ +terraform { + backend "local" { + path = "local-state.tfstate" + } +} diff --git a/pkg/command/testdata/init-backend-selected-workspace-doesnt-exist-multi/.terraform/environment b/pkg/command/testdata/init-backend-selected-workspace-doesnt-exist-multi/.terraform/environment new file mode 100644 index 00000000000..5716ca5987c --- /dev/null +++ b/pkg/command/testdata/init-backend-selected-workspace-doesnt-exist-multi/.terraform/environment @@ -0,0 +1 @@ +bar diff --git a/pkg/command/testdata/init-backend-selected-workspace-doesnt-exist-multi/.terraform/terraform.tfstate b/pkg/command/testdata/init-backend-selected-workspace-doesnt-exist-multi/.terraform/terraform.tfstate new file mode 100644 index 00000000000..19a90cc6b89 --- /dev/null +++ b/pkg/command/testdata/init-backend-selected-workspace-doesnt-exist-multi/.terraform/terraform.tfstate @@ -0,0 +1,23 @@ +{ + "version": 3, + "serial": 2, + "lineage": "2f3864a6-1d3e-1999-0f84-36cdb61179d3", + "backend": { + "type": "local", + "config": { + "path": null, + "workspace_dir": null + }, + "hash": 666019178 + }, + "modules": [ + { + "path": [ + "root" + ], + "outputs": {}, + "resources": {}, + "depends_on": [] + } + ] +} diff --git a/pkg/command/testdata/init-backend-selected-workspace-doesnt-exist-multi/main.tf b/pkg/command/testdata/init-backend-selected-workspace-doesnt-exist-multi/main.tf new file mode 100644 index 00000000000..da6f209e14e --- /dev/null +++ b/pkg/command/testdata/init-backend-selected-workspace-doesnt-exist-multi/main.tf @@ -0,0 +1,7 @@ +terraform { + backend "local" {} +} + +output "foo" { + value = "bar" +} diff --git a/pkg/command/testdata/init-backend-selected-workspace-doesnt-exist-multi/terraform.tfstate b/pkg/command/testdata/init-backend-selected-workspace-doesnt-exist-multi/terraform.tfstate new file mode 100644 index 00000000000..47de0a47e7d --- /dev/null +++ b/pkg/command/testdata/init-backend-selected-workspace-doesnt-exist-multi/terraform.tfstate @@ -0,0 +1,13 @@ +{ + "version": 4, + "terraform_version": "1.1.0", + "serial": 1, + "lineage": "cc4bb587-aa35-87ad-b3b7-7abdb574f2a1", + "outputs": { + "foo": { + "value": "bar", + "type": "string" + } + }, + "resources": [] +} diff --git a/pkg/command/testdata/init-backend-selected-workspace-doesnt-exist-multi/terraform.tfstate.d/foo/terraform.tfstate b/pkg/command/testdata/init-backend-selected-workspace-doesnt-exist-multi/terraform.tfstate.d/foo/terraform.tfstate new file mode 100644 index 00000000000..70021d04ad4 --- /dev/null +++ b/pkg/command/testdata/init-backend-selected-workspace-doesnt-exist-multi/terraform.tfstate.d/foo/terraform.tfstate @@ -0,0 +1,13 @@ +{ + "version": 4, + "terraform_version": "1.1.0", + "serial": 1, + "lineage": "8ad3c77d-51aa-d90a-4f12-176f538b6e8b", + "outputs": { + "foo": { + "value": "bar", + "type": "string" + } + }, + "resources": [] +} diff --git a/pkg/command/testdata/init-backend-selected-workspace-doesnt-exist-single/.terraform/environment b/pkg/command/testdata/init-backend-selected-workspace-doesnt-exist-single/.terraform/environment new file mode 100644 index 00000000000..5716ca5987c --- /dev/null +++ b/pkg/command/testdata/init-backend-selected-workspace-doesnt-exist-single/.terraform/environment @@ -0,0 +1 @@ +bar diff --git a/pkg/command/testdata/init-backend-selected-workspace-doesnt-exist-single/.terraform/terraform.tfstate b/pkg/command/testdata/init-backend-selected-workspace-doesnt-exist-single/.terraform/terraform.tfstate new file mode 100644 index 00000000000..19a90cc6b89 --- /dev/null +++ b/pkg/command/testdata/init-backend-selected-workspace-doesnt-exist-single/.terraform/terraform.tfstate @@ -0,0 +1,23 @@ +{ + "version": 3, + "serial": 2, + "lineage": "2f3864a6-1d3e-1999-0f84-36cdb61179d3", + "backend": { + "type": "local", + "config": { + "path": null, + "workspace_dir": null + }, + "hash": 666019178 + }, + "modules": [ + { + "path": [ + "root" + ], + "outputs": {}, + "resources": {}, + "depends_on": [] + } + ] +} diff --git a/pkg/command/testdata/init-backend-selected-workspace-doesnt-exist-single/main.tf b/pkg/command/testdata/init-backend-selected-workspace-doesnt-exist-single/main.tf new file mode 100644 index 00000000000..da6f209e14e --- /dev/null +++ b/pkg/command/testdata/init-backend-selected-workspace-doesnt-exist-single/main.tf @@ -0,0 +1,7 @@ +terraform { + backend "local" {} +} + +output "foo" { + value = "bar" +} diff --git a/pkg/command/testdata/init-backend/main.tf b/pkg/command/testdata/init-backend/main.tf new file mode 100644 index 00000000000..a6bafdab8d2 --- /dev/null +++ b/pkg/command/testdata/init-backend/main.tf @@ -0,0 +1,5 @@ +terraform { + backend "local" { + path = "foo" + } +} diff --git a/pkg/command/testdata/init-check-required-version-first-module/main.tf b/pkg/command/testdata/init-check-required-version-first-module/main.tf new file mode 100644 index 00000000000..ba846846994 --- /dev/null +++ b/pkg/command/testdata/init-check-required-version-first-module/main.tf @@ -0,0 +1,3 @@ +module "mod" { + source = "./mod" +} diff --git a/pkg/command/testdata/init-check-required-version-first-module/mod/main.tf b/pkg/command/testdata/init-check-required-version-first-module/mod/main.tf new file mode 100644 index 00000000000..ab311d06695 --- /dev/null +++ b/pkg/command/testdata/init-check-required-version-first-module/mod/main.tf @@ -0,0 +1,17 @@ +terraform { + required_version = ">200.0.0" + + bad { + block = "false" + } + + required_providers { + bang = { + oops = "boom" + } + } +} + +nope { + boom {} +} diff --git a/pkg/command/testdata/init-check-required-version-first/main.tf b/pkg/command/testdata/init-check-required-version-first/main.tf new file mode 100644 index 00000000000..ab311d06695 --- /dev/null +++ b/pkg/command/testdata/init-check-required-version-first/main.tf @@ -0,0 +1,17 @@ +terraform { + required_version = ">200.0.0" + + bad { + block = "false" + } + + required_providers { + bang = { + oops = "boom" + } + } +} + +nope { + boom {} +} diff --git a/pkg/command/testdata/init-check-required-version/main.tf b/pkg/command/testdata/init-check-required-version/main.tf new file mode 100644 index 00000000000..00725e8991f --- /dev/null +++ b/pkg/command/testdata/init-check-required-version/main.tf @@ -0,0 +1,7 @@ +terraform { + required_version = "~> 0.9.0" +} + +terraform { + required_version = ">= 0.13.0" +} diff --git a/pkg/command/testdata/init-cloud-simple/init-cloud-simple.tf b/pkg/command/testdata/init-cloud-simple/init-cloud-simple.tf new file mode 100644 index 00000000000..2493abe6b08 --- /dev/null +++ b/pkg/command/testdata/init-cloud-simple/init-cloud-simple.tf @@ -0,0 +1,13 @@ +# This is a simple configuration with Terraform Cloud mode minimally +# activated, but it's suitable only for testing things that we can exercise +# without actually accessing Terraform Cloud, such as checking of invalid +# command-line options to "terraform init". + +terraform { + cloud { + organization = "PLACEHOLDER" + workspaces { + name = "PLACEHOLDER" + } + } +} diff --git a/pkg/command/testdata/init-get-provider-detected-duplicate/child/main.tf b/pkg/command/testdata/init-get-provider-detected-duplicate/child/main.tf new file mode 100644 index 00000000000..f2cea5e5b96 --- /dev/null +++ b/pkg/command/testdata/init-get-provider-detected-duplicate/child/main.tf @@ -0,0 +1,10 @@ +terraform { + required_providers { + dupechild = { + source = "hashicorp/bar" + } + } +} + +// This will try to install hashicorp/foo +provider foo {} diff --git a/pkg/command/testdata/init-get-provider-detected-duplicate/main.tf b/pkg/command/testdata/init-get-provider-detected-duplicate/main.tf new file mode 100644 index 00000000000..4b6e9f22fa0 --- /dev/null +++ b/pkg/command/testdata/init-get-provider-detected-duplicate/main.tf @@ -0,0 +1,15 @@ +terraform { + required_providers { + foo = { + // This will conflict with the child modules hashicorp/foo + source = "opentofu/foo" + } + dupe = { + // This should not conflict with the child modules hashicorp/bar + source = "bar" + } + } +} +module "some-baz-stuff" { + source = "./child" +} diff --git a/pkg/command/testdata/init-get-provider-detected-legacy/.terraform/modules/dicerolls/terraform-random-bar-1.0.0/main.tf b/pkg/command/testdata/init-get-provider-detected-legacy/.terraform/modules/dicerolls/terraform-random-bar-1.0.0/main.tf new file mode 100644 index 00000000000..ae4c998a2ef --- /dev/null +++ b/pkg/command/testdata/init-get-provider-detected-legacy/.terraform/modules/dicerolls/terraform-random-bar-1.0.0/main.tf @@ -0,0 +1,7 @@ +// This will try to install hashicorp/baz, fail, and then suggest +// terraform-providers/baz +provider baz {} + +output "d6" { + value = 4 // chosen by fair dice roll, guaranteed to be random +} diff --git a/pkg/command/testdata/init-get-provider-detected-legacy/.terraform/modules/modules.json b/pkg/command/testdata/init-get-provider-detected-legacy/.terraform/modules/modules.json new file mode 100644 index 00000000000..8ee988105ac --- /dev/null +++ b/pkg/command/testdata/init-get-provider-detected-legacy/.terraform/modules/modules.json @@ -0,0 +1 @@ +{"Modules":[{"Key":"dicerolls","Source":"acme/bar/random","Version":"1.0.0","Dir":".terraform/modules/dicerolls/terraform-random-bar-1.0.0"},{"Key":"","Source":"","Dir":"."}]} diff --git a/pkg/command/testdata/init-get-provider-detected-legacy/child/main.tf b/pkg/command/testdata/init-get-provider-detected-legacy/child/main.tf new file mode 100644 index 00000000000..6c8b883f425 --- /dev/null +++ b/pkg/command/testdata/init-get-provider-detected-legacy/child/main.tf @@ -0,0 +1,3 @@ +// This will try to install hashicorp/baz, fail, and then suggest +// terraform-providers/baz +provider baz {} diff --git a/pkg/command/testdata/init-get-provider-detected-legacy/main.tf b/pkg/command/testdata/init-get-provider-detected-legacy/main.tf new file mode 100644 index 00000000000..4ba7ef4d32e --- /dev/null +++ b/pkg/command/testdata/init-get-provider-detected-legacy/main.tf @@ -0,0 +1,18 @@ +// This should result in installing hashicorp/foo +provider foo {} + +// This will try to install hashicorp/baz, fail, and then suggest +// terraform-providers/baz +provider baz {} + +// This will try to install hashicrop/frob, fail, find no suggestions, and +// result in an error +provider frob {} + +module "some-baz-stuff" { + source = "./child" +} + +module "dicerolls" { + source = "acme/bar/random" +} diff --git a/pkg/command/testdata/init-get-provider-invalid-package/main.tf b/pkg/command/testdata/init-get-provider-invalid-package/main.tf new file mode 100644 index 00000000000..5f93ef7fc8c --- /dev/null +++ b/pkg/command/testdata/init-get-provider-invalid-package/main.tf @@ -0,0 +1,8 @@ +terraform { + required_providers { + package = { + source = "invalid/package" + version = "1.0.0" + } + } +} diff --git a/pkg/command/testdata/init-get-provider-legacy-from-state/main.tf b/pkg/command/testdata/init-get-provider-legacy-from-state/main.tf new file mode 100644 index 00000000000..36651819251 --- /dev/null +++ b/pkg/command/testdata/init-get-provider-legacy-from-state/main.tf @@ -0,0 +1,12 @@ +terraform { + required_providers { + alpha = { + source = "acme/alpha" + version = "1.2.3" + } + } +} + +resource "alpha_resource" "a" { + index = 1 +} diff --git a/pkg/command/testdata/init-get-provider-legacy-from-state/terraform.tfstate b/pkg/command/testdata/init-get-provider-legacy-from-state/terraform.tfstate new file mode 100644 index 00000000000..33b2fb0c5d4 --- /dev/null +++ b/pkg/command/testdata/init-get-provider-legacy-from-state/terraform.tfstate @@ -0,0 +1,25 @@ +{ + "version": 4, + "terraform_version": "0.12.28", + "serial": 1, + "lineage": "481bf512-f245-4c60-42dc-7005f4fa9181", + "outputs": {}, + "resources": [ + { + "mode": "managed", + "type": "alpha_resource", + "name": "a", + "provider": "provider.alpha", + "instances": [ + { + "schema_version": 0, + "attributes": { + "id": "a", + "index": 1 + }, + "private": "bnVsbA==" + } + ] + } + ] +} diff --git a/pkg/command/testdata/init-get-provider-source/main.tf b/pkg/command/testdata/init-get-provider-source/main.tf new file mode 100644 index 00000000000..a434a9788ec --- /dev/null +++ b/pkg/command/testdata/init-get-provider-source/main.tf @@ -0,0 +1,21 @@ +provider alpha { + version = "1.2.3" +} + +resource beta_resource b {} +resource gamma_resource g {} + +terraform { + required_providers { + alpha = { + source = "acme/alpha" + } + beta = { + source = "registry.example.com/acme/beta" + } + } +} + +provider beta { + region = "foo" +} diff --git a/pkg/command/testdata/init-get-providers/main.tf b/pkg/command/testdata/init-get-providers/main.tf new file mode 100644 index 00000000000..aa288acaa74 --- /dev/null +++ b/pkg/command/testdata/init-get-providers/main.tf @@ -0,0 +1,14 @@ +provider "exact" { + version = "1.2.3" +} + +provider "greater-than" { + version = ">= 2.3.3" +} + +provider "between" { + # The second constraint here intentionally has + # no space after the < operator to make sure + # that we can parse that form too. + version = "> 1.0.0 , <3.0.0" +} diff --git a/pkg/command/testdata/init-get/foo/main.tf b/pkg/command/testdata/init-get/foo/main.tf new file mode 100644 index 00000000000..b7db25411d0 --- /dev/null +++ b/pkg/command/testdata/init-get/foo/main.tf @@ -0,0 +1 @@ +# Empty diff --git a/pkg/command/testdata/init-get/main.tf b/pkg/command/testdata/init-get/main.tf new file mode 100644 index 00000000000..0ce1c38d3ba --- /dev/null +++ b/pkg/command/testdata/init-get/main.tf @@ -0,0 +1,3 @@ +module "foo" { + source = "./foo" +} diff --git a/pkg/command/testdata/init-internal-invalid/main.tf b/pkg/command/testdata/init-internal-invalid/main.tf new file mode 100644 index 00000000000..109242bc9b2 --- /dev/null +++ b/pkg/command/testdata/init-internal-invalid/main.tf @@ -0,0 +1,10 @@ +terraform { + required_providers { + nonexist = { + source = "terraform.io/builtin/nonexist" + } + terraform = { + version = "1.2.0" + } + } +} diff --git a/pkg/command/testdata/init-internal/main.tf b/pkg/command/testdata/init-internal/main.tf new file mode 100644 index 00000000000..962d7114bd5 --- /dev/null +++ b/pkg/command/testdata/init-internal/main.tf @@ -0,0 +1 @@ +provider "terraform" {} diff --git a/pkg/command/testdata/init-legacy-rc/main.tf b/pkg/command/testdata/init-legacy-rc/main.tf new file mode 100644 index 00000000000..4b04a89e431 --- /dev/null +++ b/pkg/command/testdata/init-legacy-rc/main.tf @@ -0,0 +1 @@ +provider "legacy" {} diff --git a/pkg/command/testdata/init-module-variable-source/main.tf b/pkg/command/testdata/init-module-variable-source/main.tf new file mode 100644 index 00000000000..6f0d0723b72 --- /dev/null +++ b/pkg/command/testdata/init-module-variable-source/main.tf @@ -0,0 +1,7 @@ +variable "src" { + type = string +} + +module "mod" { + source = var.src +} diff --git a/pkg/command/testdata/init-module-variable-source/mod/mod.tf b/pkg/command/testdata/init-module-variable-source/mod/mod.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/command/testdata/init-module-variable-version/main.tf b/pkg/command/testdata/init-module-variable-version/main.tf new file mode 100644 index 00000000000..7fba2664ede --- /dev/null +++ b/pkg/command/testdata/init-module-variable-version/main.tf @@ -0,0 +1,10 @@ +# See internal/initwd/testdata/registry-modules/root.tf for more information on the module required + +variable "modver" { + type = string +} + +module "acctest_root" { + source = "hashicorp/module-installer-acctest/aws" + version = nonsensitive(var.modver) +} diff --git a/pkg/command/testdata/init-provider-lock-file-readonly-add/main.tf b/pkg/command/testdata/init-provider-lock-file-readonly-add/main.tf new file mode 100644 index 00000000000..a706a538382 --- /dev/null +++ b/pkg/command/testdata/init-provider-lock-file-readonly-add/main.tf @@ -0,0 +1,10 @@ +terraform { + required_providers { + test = { + version = "1.2.3" + } + foo = { + version = "1.0.0" + } + } +} diff --git a/pkg/command/testdata/init-provider-lock-file/main.tf b/pkg/command/testdata/init-provider-lock-file/main.tf new file mode 100644 index 00000000000..7eed7c5613b --- /dev/null +++ b/pkg/command/testdata/init-provider-lock-file/main.tf @@ -0,0 +1,3 @@ +provider "test" { + version = "1.2.3" +} diff --git a/pkg/command/testdata/init-provider-now-unused/main.tf b/pkg/command/testdata/init-provider-now-unused/main.tf new file mode 100644 index 00000000000..e1f1ec18b93 --- /dev/null +++ b/pkg/command/testdata/init-provider-now-unused/main.tf @@ -0,0 +1,3 @@ +# Intentionally blank, but intended to be used in a test case which +# uses an input lock file which already had an entry for the hashicorp/test +# provider, and should therefore detect it as no longer used. diff --git a/pkg/command/testdata/init-providers-lock/main.tf b/pkg/command/testdata/init-providers-lock/main.tf new file mode 100644 index 00000000000..7eed7c5613b --- /dev/null +++ b/pkg/command/testdata/init-providers-lock/main.tf @@ -0,0 +1,3 @@ +provider "test" { + version = "1.2.3" +} diff --git a/pkg/command/testdata/init-registry-module/main.tf b/pkg/command/testdata/init-registry-module/main.tf new file mode 100644 index 00000000000..cc388d7c59d --- /dev/null +++ b/pkg/command/testdata/init-registry-module/main.tf @@ -0,0 +1,4 @@ +module "foo" { + source = "registry.does.not.exist/example_corp/foo/bar" + version = "0.1.0" +} diff --git a/pkg/command/testdata/init-required-providers/main.tf b/pkg/command/testdata/init-required-providers/main.tf new file mode 100644 index 00000000000..20dac12b896 --- /dev/null +++ b/pkg/command/testdata/init-required-providers/main.tf @@ -0,0 +1,11 @@ +terraform { + required_providers { + test = "1.2.3" + source = { + version = "1.2.3" + } + test-beta = { + version = "1.2.4" + } + } +} diff --git a/pkg/command/testdata/init-syntax-invalid-backend-attribute-invalid/main.tf b/pkg/command/testdata/init-syntax-invalid-backend-attribute-invalid/main.tf new file mode 100644 index 00000000000..8e50cf0b0ed --- /dev/null +++ b/pkg/command/testdata/init-syntax-invalid-backend-attribute-invalid/main.tf @@ -0,0 +1,10 @@ + +terraform { + backend "local" { + path = $invalid + } +} + +variable "input" { + type = string +} diff --git a/pkg/command/testdata/init-syntax-invalid-backend-invalid/main.tf b/pkg/command/testdata/init-syntax-invalid-backend-invalid/main.tf new file mode 100644 index 00000000000..4fb3f33692d --- /dev/null +++ b/pkg/command/testdata/init-syntax-invalid-backend-invalid/main.tf @@ -0,0 +1,7 @@ +terraform { + backend "nonexistent" {} +} + +bad_block { +} + diff --git a/pkg/command/testdata/init-syntax-invalid-no-backend/main.tf b/pkg/command/testdata/init-syntax-invalid-no-backend/main.tf new file mode 100644 index 00000000000..5f1451d26c4 --- /dev/null +++ b/pkg/command/testdata/init-syntax-invalid-no-backend/main.tf @@ -0,0 +1,3 @@ +bad_block { +} + diff --git a/pkg/command/testdata/init-syntax-invalid-with-backend/main.tf b/pkg/command/testdata/init-syntax-invalid-with-backend/main.tf new file mode 100644 index 00000000000..2ea4406cc13 --- /dev/null +++ b/pkg/command/testdata/init-syntax-invalid-with-backend/main.tf @@ -0,0 +1,7 @@ +terraform { + backend "local" {} +} + +bad_block { +} + diff --git a/pkg/command/testdata/init-with-tests-with-module/main.tf b/pkg/command/testdata/init-with-tests-with-module/main.tf new file mode 100644 index 00000000000..2b976525ac0 --- /dev/null +++ b/pkg/command/testdata/init-with-tests-with-module/main.tf @@ -0,0 +1,3 @@ +resource "test_instance" "foo" { + ami = "bar" +} diff --git a/pkg/command/testdata/init-with-tests-with-module/main.tftest.hcl b/pkg/command/testdata/init-with-tests-with-module/main.tftest.hcl new file mode 100644 index 00000000000..8b0776e1a79 --- /dev/null +++ b/pkg/command/testdata/init-with-tests-with-module/main.tftest.hcl @@ -0,0 +1,12 @@ +run "setup" { + module { + source = "./setup" + } +} + +run "test" { + assert { + condition = test_instance.foo.ami == "bar" + error_message = "incorrect value" + } +} diff --git a/pkg/command/testdata/init-with-tests-with-module/setup/main.tf b/pkg/command/testdata/init-with-tests-with-module/setup/main.tf new file mode 100644 index 00000000000..f1017765c50 --- /dev/null +++ b/pkg/command/testdata/init-with-tests-with-module/setup/main.tf @@ -0,0 +1,3 @@ +resource "test_instance" "baz" { + ami = "baz" +} diff --git a/pkg/command/testdata/init-with-tests-with-provider/main.tf b/pkg/command/testdata/init-with-tests-with-provider/main.tf new file mode 100644 index 00000000000..fe11fd160e1 --- /dev/null +++ b/pkg/command/testdata/init-with-tests-with-provider/main.tf @@ -0,0 +1,12 @@ +terraform { + required_providers { + test = { + source = "hashicorp/test" + version = "1.0.2" + } + } +} + +resource "test_instance" "foo" { + ami = "bar" +} diff --git a/pkg/command/testdata/init-with-tests-with-provider/main.tftest.hcl b/pkg/command/testdata/init-with-tests-with-provider/main.tftest.hcl new file mode 100644 index 00000000000..8b0776e1a79 --- /dev/null +++ b/pkg/command/testdata/init-with-tests-with-provider/main.tftest.hcl @@ -0,0 +1,12 @@ +run "setup" { + module { + source = "./setup" + } +} + +run "test" { + assert { + condition = test_instance.foo.ami == "bar" + error_message = "incorrect value" + } +} diff --git a/pkg/command/testdata/init-with-tests-with-provider/setup/main.tf b/pkg/command/testdata/init-with-tests-with-provider/setup/main.tf new file mode 100644 index 00000000000..b0d3436f4dd --- /dev/null +++ b/pkg/command/testdata/init-with-tests-with-provider/setup/main.tf @@ -0,0 +1,12 @@ +terraform { + required_providers { + test = { + source = "hashicorp/test" + version = "1.0.1" + } + } +} + +resource "test_instance" "baz" { + ami = "baz" +} diff --git a/pkg/command/testdata/init-with-tests/main.tf b/pkg/command/testdata/init-with-tests/main.tf new file mode 100644 index 00000000000..2b976525ac0 --- /dev/null +++ b/pkg/command/testdata/init-with-tests/main.tf @@ -0,0 +1,3 @@ +resource "test_instance" "foo" { + ami = "bar" +} diff --git a/pkg/command/testdata/init-with-tests/main.tftest.hcl b/pkg/command/testdata/init-with-tests/main.tftest.hcl new file mode 100644 index 00000000000..b68725876b2 --- /dev/null +++ b/pkg/command/testdata/init-with-tests/main.tftest.hcl @@ -0,0 +1,6 @@ +run "test" { + assert { + condition = test_instance.foo.ami == "bar" + error_message = "incorrect value" + } +} diff --git a/pkg/command/testdata/init/hello.tf b/pkg/command/testdata/init/hello.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/command/testdata/inmem-backend/main.tf b/pkg/command/testdata/inmem-backend/main.tf new file mode 100644 index 00000000000..df9309a5c46 --- /dev/null +++ b/pkg/command/testdata/inmem-backend/main.tf @@ -0,0 +1,3 @@ +terraform { + backend "inmem" {} +} diff --git a/pkg/command/testdata/login-oauth-server/main.go b/pkg/command/testdata/login-oauth-server/main.go new file mode 100644 index 00000000000..b4b6ade159f --- /dev/null +++ b/pkg/command/testdata/login-oauth-server/main.go @@ -0,0 +1,72 @@ +//go:build ignore +// +build ignore + +// This file is a helper for those doing _manual_ testing of "tofu login" +// and/or "tofu logout" and want to start up a test OAuth server in a +// separate process for convenience: +// +// go run ./command/testdata/login-oauth-server/main.go :8080 +// +// This is _not_ the main way to use this oauthserver package. For automated +// test code, import it as a normal Go package instead: +// +// import oauthserver "github.com/kubegems/opentofu/pkg/command/testdata/login-oauth-server" + +package main + +import ( + "fmt" + "net" + "net/http" + "os" + + oauthserver "github.com/kubegems/opentofu/pkg/command/testdata/login-oauth-server" +) + +func main() { + if len(os.Args) < 2 { + fmt.Fprintln(os.Stderr, "Usage: go run ./command/testdata/login-oauth-server/main.go ") + os.Exit(1) + } + + host, port, err := net.SplitHostPort(os.Args[1]) + if err != nil { + fmt.Fprintln(os.Stderr, "Invalid address: %s", err) + os.Exit(1) + } + + if host == "" { + host = "127.0.0.1" + } + addr := fmt.Sprintf("%s:%s", host, port) + + fmt.Printf("Will listen on %s...\n", addr) + fmt.Printf( + configExampleFmt, + fmt.Sprintf("http://%s:%s/authz", host, port), + fmt.Sprintf("http://%s:%s/token", host, port), + fmt.Sprintf("http://%s:%s/revoke", host, port), + ) + + server := &http.Server{ + Addr: addr, + Handler: oauthserver.Handler, + } + err = server.ListenAndServe() + fmt.Fprintln(os.Stderr, err.Error()) +} + +const configExampleFmt = ` +host "login-test.example.com" { + services = { + "login.v1" = { + authz = %q + token = %q + client = "placeholder" + grant_types = ["code", "password"] + } + "logout.v1" = %q + } +} + +` diff --git a/pkg/command/testdata/login-oauth-server/oauthserver.go b/pkg/command/testdata/login-oauth-server/oauthserver.go new file mode 100644 index 00000000000..de0c80d739e --- /dev/null +++ b/pkg/command/testdata/login-oauth-server/oauthserver.go @@ -0,0 +1,179 @@ +// Package oauthserver is a very simplistic OAuth server used only for +// the testing of the "tofu login" and "tofu logout" commands. +package oauthserver + +import ( + "crypto/sha256" + "encoding/base64" + "fmt" + "html" + "log" + "net/http" + "net/url" + "strings" +) + +// Handler is an implementation of net/http.Handler that provides a stub +// OAuth server implementation with the following endpoints: +// +// /authz - authorization endpoint +// /token - token endpoint +// /revoke - token revocation (logout) endpoint +// +// The authorization endpoint returns HTML per normal OAuth conventions, but +// it also includes an HTTP header X-Redirect-To giving the same URL that the +// link in the HTML indicates, allowing a non-browser user-agent to traverse +// this robotically in automated tests. +var Handler http.Handler + +type handler struct{} + +func (h handler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { + switch req.URL.Path { + case "/authz": + h.serveAuthz(resp, req) + case "/token": + h.serveToken(resp, req) + case "/revoke": + h.serveRevoke(resp, req) + default: + resp.WriteHeader(404) + } +} + +func (h handler) serveAuthz(resp http.ResponseWriter, req *http.Request) { + args := req.URL.Query() + if rt := args.Get("response_type"); rt != "code" { + resp.WriteHeader(400) + resp.Write([]byte("wrong response_type")) + log.Printf("/authz: incorrect response type %q", rt) + return + } + redirectURL, err := url.Parse(args.Get("redirect_uri")) + if err != nil { + resp.WriteHeader(400) + resp.Write([]byte(fmt.Sprintf("invalid redirect_uri %s: %s", args.Get("redirect_uri"), err))) + return + } + + state := args.Get("state") + challenge := args.Get("code_challenge") + challengeMethod := args.Get("code_challenge_method") + if challengeMethod == "" { + challengeMethod = "plain" + } + + // NOTE: This is not a suitable implementation for a real OAuth server + // because the code challenge is providing no security whatsoever. This + // is just a simple implementation for this stub server. + code := fmt.Sprintf("%s:%s", challengeMethod, challenge) + + redirectQuery := redirectURL.Query() + redirectQuery.Set("code", code) + if state != "" { + redirectQuery.Set("state", state) + } + redirectURL.RawQuery = redirectQuery.Encode() + + respBody := fmt.Sprintf(`Log In and Consent`, html.EscapeString(redirectURL.String())) + resp.Header().Set("Content-Type", "text/html") + resp.Header().Set("Content-Length", fmt.Sprintf("%d", len(respBody))) + resp.Header().Set("X-Redirect-To", redirectURL.String()) // For robotic clients, using webbrowser.MockLauncher + resp.WriteHeader(200) + resp.Write([]byte(respBody)) +} + +func (h handler) serveToken(resp http.ResponseWriter, req *http.Request) { + if req.Method != "POST" { + resp.WriteHeader(405) + log.Printf("/token: unsupported request method %q", req.Method) + return + } + + if err := req.ParseForm(); err != nil { + resp.WriteHeader(500) + log.Printf("/token: error parsing body: %s", err) + return + } + + grantType := req.Form.Get("grant_type") + log.Printf("/token: grant_type is %q", grantType) + switch grantType { + + case "authorization_code": + code := req.Form.Get("code") + codeParts := strings.SplitN(code, ":", 2) + if len(codeParts) != 2 { + log.Printf("/token: invalid code %q", code) + resp.Header().Set("Content-Type", "application/json") + resp.WriteHeader(400) + resp.Write([]byte(`{"error":"invalid_grant"}`)) + return + } + + codeVerifier := req.Form.Get("code_verifier") + + switch codeParts[0] { + case "plain": + if codeParts[1] != codeVerifier { + log.Printf("/token: incorrect code verifier %q; want %q", codeParts[1], codeVerifier) + resp.Header().Set("Content-Type", "application/json") + resp.WriteHeader(400) + resp.Write([]byte(`{"error":"invalid_grant"}`)) + return + } + case "S256": + h := sha256.New() + h.Write([]byte(codeVerifier)) + encVerifier := base64.RawURLEncoding.EncodeToString(h.Sum(nil)) + if codeParts[1] != encVerifier { + log.Printf("/token: incorrect code verifier %q; want %q", codeParts[1], encVerifier) + resp.Header().Set("Content-Type", "application/json") + resp.WriteHeader(400) + resp.Write([]byte(`{"error":"invalid_grant"}`)) + return + } + default: + log.Printf("/token: unsupported challenge method %q", codeParts[0]) + resp.Header().Set("Content-Type", "application/json") + resp.WriteHeader(400) + resp.Write([]byte(`{"error":"invalid_grant"}`)) + return + } + + resp.Header().Set("Content-Type", "application/json") + resp.WriteHeader(200) + resp.Write([]byte(`{"access_token":"good-token","token_type":"bearer"}`)) + log.Println("/token: successful request") + + case "password": + username := req.Form.Get("username") + password := req.Form.Get("password") + + if username == "wrong" || password == "wrong" { + // These special "credentials" allow testing for the error case. + resp.Header().Set("Content-Type", "application/json") + resp.WriteHeader(400) + resp.Write([]byte(`{"error":"invalid_grant"}`)) + log.Println("/token: 'wrong' credentials") + return + } + + resp.Header().Set("Content-Type", "application/json") + resp.WriteHeader(200) + resp.Write([]byte(`{"access_token":"good-token","token_type":"bearer"}`)) + log.Println("/token: successful request") + + default: + resp.WriteHeader(400) + log.Printf("/token: unsupported grant type %q", grantType) + } +} + +func (h handler) serveRevoke(resp http.ResponseWriter, req *http.Request) { + resp.WriteHeader(404) +} + +func init() { + Handler = handler{} +} diff --git a/pkg/command/testdata/login-tfe-server/tfeserver.go b/pkg/command/testdata/login-tfe-server/tfeserver.go new file mode 100644 index 00000000000..cb8bb8d27ec --- /dev/null +++ b/pkg/command/testdata/login-tfe-server/tfeserver.go @@ -0,0 +1,62 @@ +// Package tfeserver is a test stub implementing a subset of the TFE API used +// only for the testing of the "tofu login" command. +package tfeserver + +import ( + "fmt" + "net/http" + "strings" +) + +const ( + goodToken = "good-token" + accountDetails = `{"data":{"id":"user-abc123","type":"users","attributes":{"username":"testuser","email":"testuser@example.com"}}}` + MOTD = `{"msg":"Welcome to the cloud backend!"}` +) + +// Handler is an implementation of net/http.Handler that provides a stub +// TFE API server implementation with the following endpoints: +// +// /ping - API existence endpoint +// /account/details - current user endpoint +var Handler http.Handler + +type handler struct{} + +func (h handler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { + resp.Header().Set("Content-Type", "application/vnd.api+json") + switch req.URL.Path { + case "/api/v2/ping": + h.servePing(resp, req) + case "/api/v2/account/details": + h.serveAccountDetails(resp, req) + case "/api/terraform/motd": + h.serveMOTD(resp, req) + default: + fmt.Printf("404 when fetching %s\n", req.URL.String()) + http.Error(resp, `{"errors":[{"status":"404","title":"not found"}]}`, http.StatusNotFound) + } +} + +func (h handler) servePing(resp http.ResponseWriter, req *http.Request) { + resp.WriteHeader(http.StatusNoContent) +} + +func (h handler) serveAccountDetails(resp http.ResponseWriter, req *http.Request) { + if !strings.Contains(req.Header.Get("Authorization"), goodToken) { + http.Error(resp, `{"errors":[{"status":"401","title":"unauthorized"}]}`, http.StatusUnauthorized) + return + } + + resp.WriteHeader(http.StatusOK) + resp.Write([]byte(accountDetails)) +} + +func (h handler) serveMOTD(resp http.ResponseWriter, req *http.Request) { + resp.WriteHeader(http.StatusOK) + resp.Write([]byte(MOTD)) +} + +func init() { + Handler = handler{} +} diff --git a/pkg/command/testdata/modules/.terraform/modules/modules.json b/pkg/command/testdata/modules/.terraform/modules/modules.json new file mode 100644 index 00000000000..b812559fd0f --- /dev/null +++ b/pkg/command/testdata/modules/.terraform/modules/modules.json @@ -0,0 +1 @@ +{"Modules":[{"Key":"","Source":"","Dir":"."},{"Key":"child","Source":"./child","Dir":"child"},{"Key":"count_child","Source":"./child","Dir":"child"}]} \ No newline at end of file diff --git a/pkg/command/testdata/modules/child/main.tf b/pkg/command/testdata/modules/child/main.tf new file mode 100644 index 00000000000..f059e25f9e9 --- /dev/null +++ b/pkg/command/testdata/modules/child/main.tf @@ -0,0 +1,5 @@ +resource "test_instance" "test" { +} +output "myoutput" { + value = "bar" +} diff --git a/pkg/command/testdata/modules/main.tf b/pkg/command/testdata/modules/main.tf new file mode 100644 index 00000000000..4802b5639cd --- /dev/null +++ b/pkg/command/testdata/modules/main.tf @@ -0,0 +1,11 @@ +locals { + foo = 3 +} + +module "child" { + source = "./child" +} +module "count_child" { + count = 1 + source = "./child" +} \ No newline at end of file diff --git a/pkg/command/testdata/modules/terraform.tfstate b/pkg/command/testdata/modules/terraform.tfstate new file mode 100644 index 00000000000..64337f1b37a --- /dev/null +++ b/pkg/command/testdata/modules/terraform.tfstate @@ -0,0 +1,23 @@ +{ + "version": 4, + "terraform_version": "0.13.0", + "serial": 7, + "lineage": "9cb740e3-d64d-e53e-a8e4-99b9bcacf24b", + "outputs": {}, + "resources": [ + { + "module": "module.child", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider": "provider[\"registry.opentofu.org/hashicorp/test\"]", + "instances": [ + { + "schema_version": 0, + "attributes": {} + } + ] + } + ] + } + \ No newline at end of file diff --git a/pkg/command/testdata/parallelism/main.tf b/pkg/command/testdata/parallelism/main.tf new file mode 100644 index 00000000000..6032c62d154 --- /dev/null +++ b/pkg/command/testdata/parallelism/main.tf @@ -0,0 +1,10 @@ +resource "test0_instance" "foo" {} +resource "test1_instance" "foo" {} +resource "test2_instance" "foo" {} +resource "test3_instance" "foo" {} +resource "test4_instance" "foo" {} +resource "test5_instance" "foo" {} +resource "test6_instance" "foo" {} +resource "test7_instance" "foo" {} +resource "test8_instance" "foo" {} +resource "test9_instance" "foo" {} diff --git a/pkg/command/testdata/plan-emptydiff/main.tf b/pkg/command/testdata/plan-emptydiff/main.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/command/testdata/plan-existing-state/main.tf b/pkg/command/testdata/plan-existing-state/main.tf new file mode 100644 index 00000000000..7b30915731c --- /dev/null +++ b/pkg/command/testdata/plan-existing-state/main.tf @@ -0,0 +1,13 @@ +resource "test_instance" "foo" { + ami = "bar" + + # This is here because at some point it caused a test failure + network_interface { + device_index = 0 + description = "Main network interface" + } +} + +data "test_data_source" "a" { + id = "zzzzz" +} diff --git a/pkg/command/testdata/plan-existing-state/terraform.tfstate b/pkg/command/testdata/plan-existing-state/terraform.tfstate new file mode 100644 index 00000000000..44f62a8d040 --- /dev/null +++ b/pkg/command/testdata/plan-existing-state/terraform.tfstate @@ -0,0 +1,23 @@ +{ + "version": 4, + "terraform_version": "1.6.0", + "serial": 1, + "lineage": "d496625c-bde2-aebc-f5f4-ebbf54eabed2", + "outputs": {}, + "resources": [ + { + "module": "module.child", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider": "provider[\"registry.opentofu.org/hashicorp/test\"]", + "instances": [ + { + "schema_version": 0, + "attributes": {} + } + ] + } + ], + "check_results": null +} diff --git a/pkg/command/testdata/plan-fail-condition/main.tf b/pkg/command/testdata/plan-fail-condition/main.tf new file mode 100644 index 00000000000..5cee0ffb5bb --- /dev/null +++ b/pkg/command/testdata/plan-fail-condition/main.tf @@ -0,0 +1,15 @@ +locals { + ami = "bar" +} + +resource "test_instance" "foo" { + ami = local.ami + + lifecycle { + precondition { + // failing condition + condition = local.ami != "bar" + error_message = "ami is bar" + } + } +} diff --git a/pkg/command/testdata/plan-import-config-gen/generated.tf.expected b/pkg/command/testdata/plan-import-config-gen/generated.tf.expected new file mode 100644 index 00000000000..790f9f6c73c --- /dev/null +++ b/pkg/command/testdata/plan-import-config-gen/generated.tf.expected @@ -0,0 +1,7 @@ +# __generated__ by OpenTofu +# Please review these resources and move them into your main configuration files. + +# __generated__ by OpenTofu from "bar" +resource "test_instance" "foo" { + ami = null +} diff --git a/pkg/command/testdata/plan-import-config-gen/main.tf b/pkg/command/testdata/plan-import-config-gen/main.tf new file mode 100644 index 00000000000..ea89e71b0f0 --- /dev/null +++ b/pkg/command/testdata/plan-import-config-gen/main.tf @@ -0,0 +1,4 @@ +import { + id = "bar" + to = test_instance.foo +} diff --git a/pkg/command/testdata/plan-invalid/main.tf b/pkg/command/testdata/plan-invalid/main.tf new file mode 100644 index 00000000000..a81c80e914f --- /dev/null +++ b/pkg/command/testdata/plan-invalid/main.tf @@ -0,0 +1,10 @@ +resource "test_instance" "foo" { + count = 5 +} + +resource "test_instance" "bar" { + # This is invalid because timestamp() returns an unknown value during plan, + # but the "count" argument in particular must always be known during plan + # so we can predict how many instances we will operate on. + count = timestamp() +} diff --git a/pkg/command/testdata/plan-out-backend-legacy/main.tf b/pkg/command/testdata/plan-out-backend-legacy/main.tf new file mode 100644 index 00000000000..1b101299190 --- /dev/null +++ b/pkg/command/testdata/plan-out-backend-legacy/main.tf @@ -0,0 +1,3 @@ +resource "test_instance" "foo" { + ami = "bar" +} diff --git a/pkg/command/testdata/plan-out-backend/main.tf b/pkg/command/testdata/plan-out-backend/main.tf new file mode 100644 index 00000000000..38ba171da4c --- /dev/null +++ b/pkg/command/testdata/plan-out-backend/main.tf @@ -0,0 +1,8 @@ +terraform { + backend "http" { + } +} + +resource "test_instance" "foo" { + ami = "bar" +} diff --git a/pkg/command/testdata/plan-provider-input/main.tf b/pkg/command/testdata/plan-provider-input/main.tf new file mode 100644 index 00000000000..4211ba35246 --- /dev/null +++ b/pkg/command/testdata/plan-provider-input/main.tf @@ -0,0 +1,20 @@ +variable "users" { + default = { + one = "onepw" + two = "twopw" + } +} + +provider "test" { + url = "example.com" + + dynamic "auth" { + for_each = var.users + content { + user = auth.key + password = auth.value + } + } +} + +resource "test_instance" "test" {} \ No newline at end of file diff --git a/pkg/command/testdata/plan-replace/main.tf b/pkg/command/testdata/plan-replace/main.tf new file mode 100644 index 00000000000..efc6729f90d --- /dev/null +++ b/pkg/command/testdata/plan-replace/main.tf @@ -0,0 +1,2 @@ +resource "test_instance" "a" { +} diff --git a/pkg/command/testdata/plan-sensitive-output/main.tf b/pkg/command/testdata/plan-sensitive-output/main.tf new file mode 100644 index 00000000000..87994ae9fcd --- /dev/null +++ b/pkg/command/testdata/plan-sensitive-output/main.tf @@ -0,0 +1,12 @@ +variable "input" { + default = "Hello world" +} + +output "notsensitive" { + value = "${var.input}" +} + +output "sensitive" { + sensitive = true + value = "${var.input}" +} diff --git a/pkg/command/testdata/plan-vars/main.tf b/pkg/command/testdata/plan-vars/main.tf new file mode 100644 index 00000000000..005abad095f --- /dev/null +++ b/pkg/command/testdata/plan-vars/main.tf @@ -0,0 +1,5 @@ +variable "foo" {} + +resource "test_instance" "foo" { + value = "${var.foo}" +} diff --git a/pkg/command/testdata/plan/main.tf b/pkg/command/testdata/plan/main.tf new file mode 100644 index 00000000000..7b30915731c --- /dev/null +++ b/pkg/command/testdata/plan/main.tf @@ -0,0 +1,13 @@ +resource "test_instance" "foo" { + ami = "bar" + + # This is here because at some point it caused a test failure + network_interface { + device_index = 0 + description = "Main network interface" + } +} + +data "test_data_source" "a" { + id = "zzzzz" +} diff --git a/pkg/command/testdata/plan/output.jsonlog b/pkg/command/testdata/plan/output.jsonlog new file mode 100644 index 00000000000..4a54f369302 --- /dev/null +++ b/pkg/command/testdata/plan/output.jsonlog @@ -0,0 +1,5 @@ +{"@level":"info","@message":"Terraform 1.3.0-dev","@module":"tofu.ui","terraform":"1.3.0-dev","type":"version","ui":"1.0"} +{"@level":"info","@message":"data.test_data_source.a: Refreshing...","@module":"tofu.ui","hook":{"resource":{"addr":"data.test_data_source.a","module":"","resource":"data.test_data_source.a","implied_provider":"test","resource_type":"test_data_source","resource_name":"a","resource_key":null},"action":"read"},"type":"apply_start"} +{"@level":"info","@message":"data.test_data_source.a: Refresh complete after 0s [id=zzzzz]","@module":"tofu.ui","hook":{"resource":{"addr":"data.test_data_source.a","module":"","resource":"data.test_data_source.a","implied_provider":"test","resource_type":"test_data_source","resource_name":"a","resource_key":null},"action":"read","id_key":"id","id_value":"zzzzz","elapsed_seconds":0},"type":"apply_complete"} +{"@level":"info","@message":"test_instance.foo: Plan to create","@module":"tofu.ui","change":{"resource":{"addr":"test_instance.foo","module":"","resource":"test_instance.foo","implied_provider":"test","resource_type":"test_instance","resource_name":"foo","resource_key":null},"action":"create"},"type":"planned_change"} +{"@level":"info","@message":"Plan: 1 to add, 0 to change, 0 to destroy.","@module":"tofu.ui","changes":{"add":1,"import":0,"change":0,"remove":0,"operation":"plan"},"type":"change_summary"} diff --git a/pkg/command/testdata/providers-lock/append/.terraform.lock.hcl b/pkg/command/testdata/providers-lock/append/.terraform.lock.hcl new file mode 100644 index 00000000000..28f5d874ec1 --- /dev/null +++ b/pkg/command/testdata/providers-lock/append/.terraform.lock.hcl @@ -0,0 +1,9 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.opentofu.org/hashicorp/test" { + version = "1.0.0" + hashes = [ + "h1:invalid", + ] +} diff --git a/pkg/command/testdata/providers-lock/append/fs-mirror/registry.opentofu.org/hashicorp/test/1.0.0/os_arch/terraform-provider-test b/pkg/command/testdata/providers-lock/append/fs-mirror/registry.opentofu.org/hashicorp/test/1.0.0/os_arch/terraform-provider-test new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/command/testdata/providers-lock/append/main.tf b/pkg/command/testdata/providers-lock/append/main.tf new file mode 100644 index 00000000000..d3de379059f --- /dev/null +++ b/pkg/command/testdata/providers-lock/append/main.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + test = { + source = "hashicorp/test" + } + } +} diff --git a/pkg/command/testdata/providers-lock/basic/fs-mirror/registry.opentofu.org/hashicorp/test/1.0.0/os_arch/terraform-provider-test b/pkg/command/testdata/providers-lock/basic/fs-mirror/registry.opentofu.org/hashicorp/test/1.0.0/os_arch/terraform-provider-test new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/command/testdata/providers-lock/basic/main.tf b/pkg/command/testdata/providers-lock/basic/main.tf new file mode 100644 index 00000000000..41b211f2688 --- /dev/null +++ b/pkg/command/testdata/providers-lock/basic/main.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + test = { + source = "hashicorp/test" + } + } +} \ No newline at end of file diff --git a/pkg/command/testdata/providers-schema/basic/output.json b/pkg/command/testdata/providers-schema/basic/output.json new file mode 100644 index 00000000000..579e49e1747 --- /dev/null +++ b/pkg/command/testdata/providers-schema/basic/output.json @@ -0,0 +1,82 @@ +{ + "format_version": "1.0", + "provider_schemas": { + "registry.opentofu.org/hashicorp/test": { + "provider": { + "version": 0, + "block": { + "attributes": { + "region": { + "description_kind": "plain", + "optional": true, + "type": "string" + } + }, + "description_kind": "plain" + } + }, + "resource_schemas": { + "test_instance": { + "version": 0, + "block": { + "attributes": { + "ami": { + "type": "string", + "optional": true, + "description_kind": "plain" + }, + "id": { + "type": "string", + "optional": true, + "computed": true, + "description_kind": "plain" + }, + "volumes": { + "nested_type": { + "nesting_mode": "list", + "attributes": { + "size": { + "type": "string", + "required": true, + "description_kind": "plain" + }, + "mount_point": { + "type": "string", + "required": true, + "description_kind": "plain" + } + } + }, + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "functions": { + "test_func": { + "description": "a basic string function", + "summary": "test", + "return_type": "string", + "parameters": [ + { + "name": "input", + "description": "", + "type": "number" + } + ], + "variadic_parameter": { + "name": "variadic_input", + "description": "", + "type": [ + "list", + "bool" + ] + } + } + } + } + } +} \ No newline at end of file diff --git a/pkg/command/testdata/providers-schema/basic/provider.tf b/pkg/command/testdata/providers-schema/basic/provider.tf new file mode 100644 index 00000000000..06e511104e1 --- /dev/null +++ b/pkg/command/testdata/providers-schema/basic/provider.tf @@ -0,0 +1,3 @@ +provider "test" { + +} diff --git a/pkg/command/testdata/providers-schema/empty/main.tf b/pkg/command/testdata/providers-schema/empty/main.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/command/testdata/providers-schema/empty/output.json b/pkg/command/testdata/providers-schema/empty/output.json new file mode 100644 index 00000000000..381450cade5 --- /dev/null +++ b/pkg/command/testdata/providers-schema/empty/output.json @@ -0,0 +1,3 @@ +{ + "format_version": "1.0" +} diff --git a/pkg/command/testdata/providers-schema/required/output.json b/pkg/command/testdata/providers-schema/required/output.json new file mode 100644 index 00000000000..579e49e1747 --- /dev/null +++ b/pkg/command/testdata/providers-schema/required/output.json @@ -0,0 +1,82 @@ +{ + "format_version": "1.0", + "provider_schemas": { + "registry.opentofu.org/hashicorp/test": { + "provider": { + "version": 0, + "block": { + "attributes": { + "region": { + "description_kind": "plain", + "optional": true, + "type": "string" + } + }, + "description_kind": "plain" + } + }, + "resource_schemas": { + "test_instance": { + "version": 0, + "block": { + "attributes": { + "ami": { + "type": "string", + "optional": true, + "description_kind": "plain" + }, + "id": { + "type": "string", + "optional": true, + "computed": true, + "description_kind": "plain" + }, + "volumes": { + "nested_type": { + "nesting_mode": "list", + "attributes": { + "size": { + "type": "string", + "required": true, + "description_kind": "plain" + }, + "mount_point": { + "type": "string", + "required": true, + "description_kind": "plain" + } + } + }, + "description_kind": "plain", + "optional": true + } + }, + "description_kind": "plain" + } + } + }, + "functions": { + "test_func": { + "description": "a basic string function", + "summary": "test", + "return_type": "string", + "parameters": [ + { + "name": "input", + "description": "", + "type": "number" + } + ], + "variadic_parameter": { + "name": "variadic_input", + "description": "", + "type": [ + "list", + "bool" + ] + } + } + } + } + } +} \ No newline at end of file diff --git a/pkg/command/testdata/providers-schema/required/provider.tf b/pkg/command/testdata/providers-schema/required/provider.tf new file mode 100644 index 00000000000..a6475e1bcf8 --- /dev/null +++ b/pkg/command/testdata/providers-schema/required/provider.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + test = { + source = "hashicorp/test" + } + } +} diff --git a/pkg/command/testdata/providers/basic/main.tf b/pkg/command/testdata/providers/basic/main.tf new file mode 100644 index 00000000000..d22a2b50974 --- /dev/null +++ b/pkg/command/testdata/providers/basic/main.tf @@ -0,0 +1,11 @@ +provider "foo" { + +} + +resource "bar_instance" "test" { + +} + +provider "baz" { + version = "1.2.0" +} diff --git a/pkg/command/testdata/providers/modules/child/main.tf b/pkg/command/testdata/providers/modules/child/main.tf new file mode 100644 index 00000000000..e5bd70220bf --- /dev/null +++ b/pkg/command/testdata/providers/modules/child/main.tf @@ -0,0 +1 @@ +resource "baz_resource" "baz" {} diff --git a/pkg/command/testdata/providers/modules/main.tf b/pkg/command/testdata/providers/modules/main.tf new file mode 100644 index 00000000000..37e179307e6 --- /dev/null +++ b/pkg/command/testdata/providers/modules/main.tf @@ -0,0 +1,15 @@ +terraform { + required_providers { + foo = { + version = "1.0" + } + } +} + +provider "bar" { + version = "2.0.0" +} + +module "kiddo" { + source = "./child" +} diff --git a/pkg/command/testdata/providers/state/main.tf b/pkg/command/testdata/providers/state/main.tf new file mode 100644 index 00000000000..b34c855be63 --- /dev/null +++ b/pkg/command/testdata/providers/state/main.tf @@ -0,0 +1,11 @@ +terraform { + required_providers { + foo = { + version = "1.0" + } + } +} + +provider "bar" { + version = "2.0.0" +} diff --git a/pkg/command/testdata/providers/state/terraform.tfstate b/pkg/command/testdata/providers/state/terraform.tfstate new file mode 100644 index 00000000000..bca2d8510d0 --- /dev/null +++ b/pkg/command/testdata/providers/state/terraform.tfstate @@ -0,0 +1,24 @@ +{ + "version": 4, + "terraform_version": "0.13.0", + "serial": 1, + "lineage": "00bfda35-ad61-ec8d-c013-14b0320bc416", + "outputs": {}, + "resources": [ + { + "mode": "managed", + "type": "baz_instance", + "name": "example", + "provider": "provider[\"registry.opentofu.org/hashicorp/baz\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "id": "621124146446964903" + }, + "private": "bnVsbA==" + } + ] + } + ] +} diff --git a/pkg/command/testdata/providers/tests/main.tf b/pkg/command/testdata/providers/tests/main.tf new file mode 100644 index 00000000000..f2388a802f2 --- /dev/null +++ b/pkg/command/testdata/providers/tests/main.tf @@ -0,0 +1,3 @@ +resource "bar_instance" "test" { + +} diff --git a/pkg/command/testdata/providers/tests/main.tftest.hcl b/pkg/command/testdata/providers/tests/main.tftest.hcl new file mode 100644 index 00000000000..08de6964a10 --- /dev/null +++ b/pkg/command/testdata/providers/tests/main.tftest.hcl @@ -0,0 +1,3 @@ +provider "foo" { + +} diff --git a/pkg/command/testdata/push-backend-new/main.tf b/pkg/command/testdata/push-backend-new/main.tf new file mode 100644 index 00000000000..68a49b44a54 --- /dev/null +++ b/pkg/command/testdata/push-backend-new/main.tf @@ -0,0 +1,5 @@ +terraform { + backend "inmem" {} +} + +atlas { name = "hello" } diff --git a/pkg/command/testdata/push-input-partial/main.tf b/pkg/command/testdata/push-input-partial/main.tf new file mode 100644 index 00000000000..8285c1ada86 --- /dev/null +++ b/pkg/command/testdata/push-input-partial/main.tf @@ -0,0 +1,8 @@ +variable "foo" {} +variable "bar" {} + +resource "test_instance" "foo" {} + +atlas { + name = "foo" +} diff --git a/pkg/command/testdata/push-input/main.tf b/pkg/command/testdata/push-input/main.tf new file mode 100644 index 00000000000..3bd930cf320 --- /dev/null +++ b/pkg/command/testdata/push-input/main.tf @@ -0,0 +1,7 @@ +variable "foo" {} + +resource "test_instance" "foo" {} + +atlas { + name = "foo" +} diff --git a/pkg/command/testdata/push-no-remote/main.tf b/pkg/command/testdata/push-no-remote/main.tf new file mode 100644 index 00000000000..2651626363b --- /dev/null +++ b/pkg/command/testdata/push-no-remote/main.tf @@ -0,0 +1,5 @@ +resource "aws_instance" "foo" {} + +atlas { + name = "foo" +} diff --git a/pkg/command/testdata/push-no-upload/child/main.tf b/pkg/command/testdata/push-no-upload/child/main.tf new file mode 100644 index 00000000000..b7db25411d0 --- /dev/null +++ b/pkg/command/testdata/push-no-upload/child/main.tf @@ -0,0 +1 @@ +# Empty diff --git a/pkg/command/testdata/push-no-upload/main.tf b/pkg/command/testdata/push-no-upload/main.tf new file mode 100644 index 00000000000..c70c8b6111d --- /dev/null +++ b/pkg/command/testdata/push-no-upload/main.tf @@ -0,0 +1 @@ +module "example" { source = "./child" } diff --git a/pkg/command/testdata/push-tfvars/main.tf b/pkg/command/testdata/push-tfvars/main.tf new file mode 100644 index 00000000000..2699d50eec2 --- /dev/null +++ b/pkg/command/testdata/push-tfvars/main.tf @@ -0,0 +1,22 @@ +variable "foo" {} + +variable "bar" {} + +variable "baz" { + type = "map" + + default = { + "A" = "a" + } +} + +variable "fob" { + type = "list" + default = ["a", "quotes \"in\" quotes"] +} + +resource "test_instance" "foo" {} + +atlas { + name = "foo" +} diff --git a/pkg/command/testdata/push-tfvars/terraform.tfvars b/pkg/command/testdata/push-tfvars/terraform.tfvars new file mode 100644 index 00000000000..92292f024a1 --- /dev/null +++ b/pkg/command/testdata/push-tfvars/terraform.tfvars @@ -0,0 +1,2 @@ +foo = "bar" +bar = "foo" diff --git a/pkg/command/testdata/push/main.tf b/pkg/command/testdata/push/main.tf new file mode 100644 index 00000000000..28f267cd26b --- /dev/null +++ b/pkg/command/testdata/push/main.tf @@ -0,0 +1,5 @@ +resource "test_instance" "foo" {} + +atlas { + name = "foo" +} diff --git a/pkg/command/testdata/refresh-empty/main.tf b/pkg/command/testdata/refresh-empty/main.tf new file mode 100644 index 00000000000..fec56017dc1 --- /dev/null +++ b/pkg/command/testdata/refresh-empty/main.tf @@ -0,0 +1 @@ +# Hello diff --git a/pkg/command/testdata/refresh-output/main.tf b/pkg/command/testdata/refresh-output/main.tf new file mode 100644 index 00000000000..d7efff6e4ee --- /dev/null +++ b/pkg/command/testdata/refresh-output/main.tf @@ -0,0 +1,7 @@ +resource "test_instance" "foo" { + ami = "bar" +} + +output "endpoint" { + value = "foo.example.com" +} diff --git a/pkg/command/testdata/refresh-targeted/main.tf b/pkg/command/testdata/refresh-targeted/main.tf new file mode 100644 index 00000000000..734f585494d --- /dev/null +++ b/pkg/command/testdata/refresh-targeted/main.tf @@ -0,0 +1,7 @@ +resource "test_instance" "foo" { + id = "foo" +} + +resource "test_instance" "bar" { + id = "bar" +} diff --git a/pkg/command/testdata/refresh-unset-var/main.tf b/pkg/command/testdata/refresh-unset-var/main.tf new file mode 100644 index 00000000000..446cf70e2cb --- /dev/null +++ b/pkg/command/testdata/refresh-unset-var/main.tf @@ -0,0 +1,7 @@ +variable "should_ask" {} + +provider "test" {} + +resource "test_instance" "foo" { + ami = "${var.should_ask}" +} diff --git a/pkg/command/testdata/refresh-var/main.tf b/pkg/command/testdata/refresh-var/main.tf new file mode 100644 index 00000000000..af73b1c1b7e --- /dev/null +++ b/pkg/command/testdata/refresh-var/main.tf @@ -0,0 +1,7 @@ +variable "foo" {} + +provider "test" { + value = "${var.foo}" +} + +resource "test_instance" "foo" {} diff --git a/pkg/command/testdata/refresh/main.tf b/pkg/command/testdata/refresh/main.tf new file mode 100644 index 00000000000..1b101299190 --- /dev/null +++ b/pkg/command/testdata/refresh/main.tf @@ -0,0 +1,3 @@ +resource "test_instance" "foo" { + ami = "bar" +} diff --git a/pkg/command/testdata/show-corrupt-statefile/terraform.tfstate b/pkg/command/testdata/show-corrupt-statefile/terraform.tfstate new file mode 100644 index 00000000000..9977a2836c1 --- /dev/null +++ b/pkg/command/testdata/show-corrupt-statefile/terraform.tfstate @@ -0,0 +1 @@ +invalid diff --git a/pkg/command/testdata/show-json-sensitive/main.tf b/pkg/command/testdata/show-json-sensitive/main.tf new file mode 100644 index 00000000000..a980e4dc4fb --- /dev/null +++ b/pkg/command/testdata/show-json-sensitive/main.tf @@ -0,0 +1,21 @@ +provider "test" { + region = "somewhere" +} + +variable "test_var" { + default = "bar" + sensitive = true +} + +resource "test_instance" "test" { + // this variable is sensitive + ami = var.test_var + // the password attribute is sensitive in the showFixtureSensitiveProvider schema. + password = "secret" + count = 3 +} + +output "test" { + value = var.test_var + sensitive = true +} diff --git a/pkg/command/testdata/show-json-sensitive/output.json b/pkg/command/testdata/show-json-sensitive/output.json new file mode 100644 index 00000000000..67c8d380e95 --- /dev/null +++ b/pkg/command/testdata/show-json-sensitive/output.json @@ -0,0 +1,220 @@ +{ + "format_version": "1.0", + "variables": { + "test_var": { + "value": "bar" + } + }, + "planned_values": { + "outputs": { + "test": { + "sensitive": true, + "type": "string", + "value": "bar" + } + }, + "root_module": { + "resources": [ + { + "address": "test_instance.test[0]", + "index": 0, + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "bar", + "password": "secret" + }, + "sensitive_values": { + "ami": true, + "password": true + } + }, + { + "address": "test_instance.test[1]", + "index": 1, + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "bar", + "password": "secret" + }, + "sensitive_values": { + "ami": true, + "password": true + } + }, + { + "address": "test_instance.test[2]", + "index": 2, + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "bar", + "password": "secret" + }, + "sensitive_values": { + "ami": true, + "password": true + } + } + ] + } + }, + "prior_state": { + "format_version": "1.0", + "values": { + "outputs": { + "test": { + "sensitive": true, + "type": "string", + "value": "bar" + } + }, + "root_module": {} + } + }, + "resource_changes": [ + { + "address": "test_instance.test[0]", + "index": 0, + "mode": "managed", + "type": "test_instance", + "provider_name": "registry.opentofu.org/hashicorp/test", + "name": "test", + "change": { + "actions": [ + "create" + ], + "before": null, + "after_unknown": { + "id": true + }, + "after": { + "ami": "bar", + "password": "secret" + }, + "after_sensitive": {"ami": true, "password": true}, + "before_sensitive": false + } + }, + { + "address": "test_instance.test[1]", + "index": 1, + "mode": "managed", + "type": "test_instance", + "provider_name": "registry.opentofu.org/hashicorp/test", + "name": "test", + "change": { + "actions": [ + "create" + ], + "before": null, + "after_unknown": { + "id": true + }, + "after": { + "ami": "bar", + "password": "secret" + }, + "after_sensitive": {"ami": true, "password": true}, + "before_sensitive": false + } + }, + { + "address": "test_instance.test[2]", + "index": 2, + "mode": "managed", + "type": "test_instance", + "provider_name": "registry.opentofu.org/hashicorp/test", + "name": "test", + "change": { + "actions": [ + "create" + ], + "before": null, + "after_unknown": { + "id": true + }, + "after": { + "ami": "bar", + "password": "secret" + }, + "after_sensitive": {"ami": true, "password": true}, + "before_sensitive": false + } + } + ], + "output_changes": { + "test": { + "actions": [ + "create" + ], + "before": null, + "after": "bar", + "after_unknown": false, + "before_sensitive": true, + "after_sensitive": true + } + }, + "configuration": { + "provider_config": { + "test": { + "name": "test", + "full_name": "registry.opentofu.org/hashicorp/test", + "expressions": { + "region": { + "constant_value": "somewhere" + } + } + } + }, + "root_module": { + "outputs": { + "test": { + "expression": { + "references": [ + "var.test_var" + ] + }, + "sensitive": true + } + }, + "resources": [ + { + "address": "test_instance.test", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_config_key": "test", + "schema_version": 0, + "expressions": { + "ami": { + "references": [ + "var.test_var" + ] + }, + "password": {"constant_value": "secret"} + }, + "count_expression": { + "constant_value": 3 + } + } + ], + "variables": { + "test_var": { + "default": "bar", + "sensitive": true + } + } + } + } +} diff --git a/pkg/command/testdata/show-json-state/basic/output.json b/pkg/command/testdata/show-json-state/basic/output.json new file mode 100644 index 00000000000..47dc8611b8f --- /dev/null +++ b/pkg/command/testdata/show-json-state/basic/output.json @@ -0,0 +1,38 @@ +{ + "format_version": "1.0", + "terraform_version": "0.12.0", + "values": { + "root_module": { + "resources": [ + { + "address": "test_instance.example[0]", + "mode": "managed", + "type": "test_instance", + "name": "example", + "index": 0, + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": null, + "id": "621124146446964903" + }, + "sensitive_values": {} + }, + { + "address": "test_instance.example[1]", + "mode": "managed", + "type": "test_instance", + "name": "example", + "index": 1, + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": null, + "id": "4330206298367988603" + }, + "sensitive_values": {} + } + ] + } + } +} diff --git a/pkg/command/testdata/show-json-state/basic/terraform.tfstate b/pkg/command/testdata/show-json-state/basic/terraform.tfstate new file mode 100644 index 00000000000..7477557af1b --- /dev/null +++ b/pkg/command/testdata/show-json-state/basic/terraform.tfstate @@ -0,0 +1,34 @@ +{ + "version": 4, + "terraform_version": "0.12.0", + "serial": 1, + "lineage": "00bfda35-ad61-ec8d-c013-14b0320bc416", + "outputs": {}, + "resources": [ + { + "mode": "managed", + "type": "test_instance", + "name": "example", + "each": "list", + "provider": "provider[\"registry.opentofu.org/hashicorp/test\"]", + "instances": [ + { + "index_key": 0, + "schema_version": 0, + "attributes": { + "id": "621124146446964903" + }, + "private": "bnVsbA==" + }, + { + "index_key": 1, + "schema_version": 0, + "attributes": { + "id": "4330206298367988603" + }, + "private": "bnVsbA==" + } + ] + } + ] +} diff --git a/pkg/command/testdata/show-json-state/empty/output.json b/pkg/command/testdata/show-json-state/empty/output.json new file mode 100644 index 00000000000..381450cade5 --- /dev/null +++ b/pkg/command/testdata/show-json-state/empty/output.json @@ -0,0 +1,3 @@ +{ + "format_version": "1.0" +} diff --git a/pkg/command/testdata/show-json-state/empty/terraform.tfstate b/pkg/command/testdata/show-json-state/empty/terraform.tfstate new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/command/testdata/show-json-state/modules/bar/main.tf b/pkg/command/testdata/show-json-state/modules/bar/main.tf new file mode 100644 index 00000000000..5d1788a0e81 --- /dev/null +++ b/pkg/command/testdata/show-json-state/modules/bar/main.tf @@ -0,0 +1,11 @@ +variable "test_var" { + default = "bar-var" +} + +output "test" { + value = var.test_var +} + +resource "test_instance" "test" { + ami = var.test_var +} diff --git a/pkg/command/testdata/show-json-state/modules/foo/main.tf b/pkg/command/testdata/show-json-state/modules/foo/main.tf new file mode 100644 index 00000000000..d1ae7929a45 --- /dev/null +++ b/pkg/command/testdata/show-json-state/modules/foo/main.tf @@ -0,0 +1,14 @@ +variable "test_var" { + default = "foo-var" +} + +resource "test_instance" "test" { + ami = var.test_var + count = 1 +} + +output "test" { + value = var.test_var +} + +provider "test" {} diff --git a/pkg/command/testdata/show-json-state/modules/main.tf b/pkg/command/testdata/show-json-state/modules/main.tf new file mode 100644 index 00000000000..d1cabad3ec5 --- /dev/null +++ b/pkg/command/testdata/show-json-state/modules/main.tf @@ -0,0 +1,13 @@ +module "module_test_foo" { + source = "./foo" + test_var = "baz" +} + +module "module_test_bar" { + source = "./bar" +} + +output "test" { + value = module.module_test_foo.test + depends_on = [module.module_test_foo] +} diff --git a/pkg/command/testdata/show-json-state/modules/output.json b/pkg/command/testdata/show-json-state/modules/output.json new file mode 100644 index 00000000000..4024aa284ad --- /dev/null +++ b/pkg/command/testdata/show-json-state/modules/output.json @@ -0,0 +1,54 @@ +{ + "format_version": "1.0", + "terraform_version": "0.12.0", + "values": { + "outputs": { + "test": { + "sensitive": false, + "type": "string", + "value": "baz" + } + }, + "root_module": { + "child_modules": [ + { + "resources": [ + { + "address": "module.module_test_bar.test_instance.example", + "mode": "managed", + "type": "test_instance", + "name": "example", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "bar-var", + "id": null + }, + "sensitive_values": {} + } + ], + "address": "module.module_test_bar" + }, + { + "resources": [ + { + "address": "module.module_test_foo.test_instance.example[0]", + "mode": "managed", + "type": "test_instance", + "name": "example", + "index": 0, + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "foo-var", + "id": null + }, + "sensitive_values": {} + } + ], + "address": "module.module_test_foo" + } + ] + } + } +} diff --git a/pkg/command/testdata/show-json-state/modules/terraform.tfstate b/pkg/command/testdata/show-json-state/modules/terraform.tfstate new file mode 100644 index 00000000000..74f1daf2979 --- /dev/null +++ b/pkg/command/testdata/show-json-state/modules/terraform.tfstate @@ -0,0 +1,48 @@ +{ + "version": 4, + "terraform_version": "0.12.0", + "serial": 8, + "lineage": "00bfda35-ad61-ec8d-c013-14b0320bc416", + "outputs": { + "test": { + "value": "baz", + "type": "string" + } + }, + "resources": [ + { + "module": "module.module_test_foo", + "mode": "managed", + "type": "test_instance", + "name": "example", + "each": "list", + "provider": "provider[\"registry.opentofu.org/hashicorp/test\"]", + "instances": [ + { + "index_key": 0, + "schema_version": 0, + "attributes": { + "ami": "foo-var" + }, + "private": "bnVsbA==" + } + ] + }, + { + "module": "module.module_test_bar", + "mode": "managed", + "type": "test_instance", + "name": "example", + "provider": "provider[\"registry.opentofu.org/hashicorp/test\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "ami": "bar-var" + }, + "private": "bnVsbA==" + } + ] + } + ] +} diff --git a/pkg/command/testdata/show-json-state/no-state/output.json b/pkg/command/testdata/show-json-state/no-state/output.json new file mode 100644 index 00000000000..381450cade5 --- /dev/null +++ b/pkg/command/testdata/show-json-state/no-state/output.json @@ -0,0 +1,3 @@ +{ + "format_version": "1.0" +} diff --git a/pkg/command/testdata/show-json-state/sensitive-variables/output.json b/pkg/command/testdata/show-json-state/sensitive-variables/output.json new file mode 100644 index 00000000000..c0e0423d855 --- /dev/null +++ b/pkg/command/testdata/show-json-state/sensitive-variables/output.json @@ -0,0 +1,25 @@ +{ + "format_version": "1.0", + "terraform_version": "0.14.0", + "values": { + "root_module": { + "resources": [ + { + "address": "test_instance.test", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "id": "621124146446964903", + "ami": "abc" + }, + "sensitive_values": { + "ami": true + } + } + ] + } + } +} diff --git a/pkg/command/testdata/show-json-state/sensitive-variables/terraform.tfstate b/pkg/command/testdata/show-json-state/sensitive-variables/terraform.tfstate new file mode 100644 index 00000000000..dd132cc6704 --- /dev/null +++ b/pkg/command/testdata/show-json-state/sensitive-variables/terraform.tfstate @@ -0,0 +1,33 @@ +{ + "version": 4, + "terraform_version": "0.14.0", + "serial": 1, + "lineage": "d7a6880b-6875-288f-13a9-696a65c73036", + "outputs": {}, + "resources": [ + { + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider": "provider[\"registry.opentofu.org/hashicorp/test\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "id": "621124146446964903", + "ami": "abc" + }, + "sensitive_attributes": [ + [ + { + "type": "get_attr", + "value": "ami" + } + ] + ], + "private": "bnVsbA==" + } + ] + } + ] +} diff --git a/pkg/command/testdata/show-json/basic-create/main.tf b/pkg/command/testdata/show-json/basic-create/main.tf new file mode 100644 index 00000000000..2f41f530483 --- /dev/null +++ b/pkg/command/testdata/show-json/basic-create/main.tf @@ -0,0 +1,16 @@ +provider "test" { + region = "somewhere" +} + +variable "test_var" { + default = "bar" +} + +resource "test_instance" "test" { + ami = var.test_var + count = 3 +} + +output "test" { + value = var.test_var +} diff --git a/pkg/command/testdata/show-json/basic-create/output.json b/pkg/command/testdata/show-json/basic-create/output.json new file mode 100644 index 00000000000..a128533a6bd --- /dev/null +++ b/pkg/command/testdata/show-json/basic-create/output.json @@ -0,0 +1,202 @@ +{ + "format_version": "1.0", + "variables": { + "test_var": { + "value": "bar" + } + }, + "planned_values": { + "outputs": { + "test": { + "sensitive": false, + "type": "string", + "value": "bar" + } + }, + "root_module": { + "resources": [ + { + "address": "test_instance.test[0]", + "index": 0, + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "bar" + }, + "sensitive_values": {} + }, + { + "address": "test_instance.test[1]", + "index": 1, + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "bar" + }, + "sensitive_values": {} + }, + { + "address": "test_instance.test[2]", + "index": 2, + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "bar" + }, + "sensitive_values": {} + } + ] + } + }, + "prior_state": { + "format_version": "1.0", + "values": { + "outputs": { + "test": { + "sensitive": false, + "type": "string", + "value": "bar" + } + }, + "root_module": {} + } + }, + "resource_changes": [ + { + "address": "test_instance.test[0]", + "index": 0, + "mode": "managed", + "type": "test_instance", + "provider_name": "registry.opentofu.org/hashicorp/test", + "name": "test", + "change": { + "actions": [ + "create" + ], + "before": null, + "after_unknown": { + "id": true + }, + "after": { + "ami": "bar" + }, + "after_sensitive": {}, + "before_sensitive": false + } + }, + { + "address": "test_instance.test[1]", + "index": 1, + "mode": "managed", + "type": "test_instance", + "provider_name": "registry.opentofu.org/hashicorp/test", + "name": "test", + "change": { + "actions": [ + "create" + ], + "before": null, + "after_unknown": { + "id": true + }, + "after": { + "ami": "bar" + }, + "after_sensitive": {}, + "before_sensitive": false + } + }, + { + "address": "test_instance.test[2]", + "index": 2, + "mode": "managed", + "type": "test_instance", + "provider_name": "registry.opentofu.org/hashicorp/test", + "name": "test", + "change": { + "actions": [ + "create" + ], + "before": null, + "after_unknown": { + "id": true + }, + "after": { + "ami": "bar" + }, + "after_sensitive": {}, + "before_sensitive": false + } + } + ], + "output_changes": { + "test": { + "actions": [ + "create" + ], + "before": null, + "after": "bar", + "after_unknown": false, + "before_sensitive": false, + "after_sensitive": false + } + }, + "configuration": { + "provider_config": { + "test": { + "name": "test", + "full_name": "registry.opentofu.org/hashicorp/test", + "expressions": { + "region": { + "constant_value": "somewhere" + } + } + } + }, + "root_module": { + "outputs": { + "test": { + "expression": { + "references": [ + "var.test_var" + ] + } + } + }, + "resources": [ + { + "address": "test_instance.test", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_config_key": "test", + "schema_version": 0, + "expressions": { + "ami": { + "references": [ + "var.test_var" + ] + } + }, + "count_expression": { + "constant_value": 3 + } + } + ], + "variables": { + "test_var": { + "default": "bar" + } + } + } + } +} diff --git a/pkg/command/testdata/show-json/basic-delete/main.tf b/pkg/command/testdata/show-json/basic-delete/main.tf new file mode 100644 index 00000000000..52fea375d5d --- /dev/null +++ b/pkg/command/testdata/show-json/basic-delete/main.tf @@ -0,0 +1,10 @@ +variable "test_var" { + default = "bar" +} +resource "test_instance" "test" { + ami = var.test_var +} + +output "test" { + value = var.test_var +} \ No newline at end of file diff --git a/pkg/command/testdata/show-json/basic-delete/output.json b/pkg/command/testdata/show-json/basic-delete/output.json new file mode 100644 index 00000000000..4d5f69dcc3d --- /dev/null +++ b/pkg/command/testdata/show-json/basic-delete/output.json @@ -0,0 +1,175 @@ +{ + "format_version": "1.0", + "variables": { + "test_var": { + "value": "bar" + } + }, + "planned_values": { + "outputs": { + "test": { + "sensitive": false, + "type": "string", + "value": "bar" + } + }, + "root_module": { + "resources": [ + { + "address": "test_instance.test", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "bar", + "id": "placeholder" + }, + "sensitive_values": {} + } + ] + } + }, + "resource_changes": [ + { + "address": "test_instance.test", + "mode": "managed", + "type": "test_instance", + "provider_name": "registry.opentofu.org/hashicorp/test", + "name": "test", + "change": { + "actions": [ + "update" + ], + "before": { + "ami": "foo", + "id": "placeholder" + }, + "after": { + "ami": "bar", + "id": "placeholder" + }, + "after_unknown": {}, + "after_sensitive": {}, + "before_sensitive": {} + } + }, + { + "address": "test_instance.test-delete", + "mode": "managed", + "type": "test_instance", + "provider_name": "registry.opentofu.org/hashicorp/test", + "name": "test-delete", + "action_reason": "delete_because_no_resource_config", + "change": { + "actions": [ + "delete" + ], + "before": { + "ami": "foo", + "id": "placeholder" + }, + "after": null, + "after_unknown": {}, + "after_sensitive": false, + "before_sensitive": {} + } + } + ], + "output_changes": { + "test": { + "actions": [ + "create" + ], + "before": null, + "after": "bar", + "after_unknown": false, + "before_sensitive": false, + "after_sensitive": false + } + }, + "prior_state": { + "format_version": "1.0", + "values": { + "outputs": { + "test": { + "sensitive": false, + "type": "string", + "value": "bar" + } + }, + "root_module": { + "resources": [ + { + "address": "test_instance.test", + "schema_version": 0, + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_name": "registry.opentofu.org/hashicorp/test", + "values": { + "ami": "foo", + "id": "placeholder" + }, + "sensitive_values": {} + }, + { + "address": "test_instance.test-delete", + "schema_version": 0, + "mode": "managed", + "type": "test_instance", + "name": "test-delete", + "provider_name": "registry.opentofu.org/hashicorp/test", + "values": { + "ami": "foo", + "id": "placeholder" + }, + "sensitive_values": {} + } + ] + } + } + }, + "configuration": { + "provider_config": { + "test": { + "name": "test", + "full_name": "registry.opentofu.org/hashicorp/test" + } + }, + "root_module": { + "outputs": { + "test": { + "expression": { + "references": [ + "var.test_var" + ] + } + } + }, + "resources": [ + { + "address": "test_instance.test", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_config_key": "test", + "schema_version": 0, + "expressions": { + "ami": { + "references": [ + "var.test_var" + ] + } + } + } + ], + "variables": { + "test_var": { + "default": "bar" + } + } + } + } +} diff --git a/pkg/command/testdata/show-json/basic-delete/terraform.tfstate b/pkg/command/testdata/show-json/basic-delete/terraform.tfstate new file mode 100644 index 00000000000..01148b75611 --- /dev/null +++ b/pkg/command/testdata/show-json/basic-delete/terraform.tfstate @@ -0,0 +1,39 @@ +{ + "version": 4, + "terraform_version": "0.12.0", + "serial": 7, + "lineage": "configuredUnchanged", + "outputs": {}, + "resources": [ + { + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider": "provider[\"registry.opentofu.org/hashicorp/test\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "ami": "foo", + "id": "placeholder" + } + } + ] + }, + { + "mode": "managed", + "type": "test_instance", + "name": "test-delete", + "provider": "provider[\"registry.opentofu.org/hashicorp/test\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "ami": "foo", + "id": "placeholder" + } + } + ] + } + ] +} diff --git a/pkg/command/testdata/show-json/basic-update/main.tf b/pkg/command/testdata/show-json/basic-update/main.tf new file mode 100644 index 00000000000..52fea375d5d --- /dev/null +++ b/pkg/command/testdata/show-json/basic-update/main.tf @@ -0,0 +1,10 @@ +variable "test_var" { + default = "bar" +} +resource "test_instance" "test" { + ami = var.test_var +} + +output "test" { + value = var.test_var +} \ No newline at end of file diff --git a/pkg/command/testdata/show-json/basic-update/output.json b/pkg/command/testdata/show-json/basic-update/output.json new file mode 100644 index 00000000000..0619b7efc8c --- /dev/null +++ b/pkg/command/testdata/show-json/basic-update/output.json @@ -0,0 +1,141 @@ +{ + "format_version": "1.0", + "variables": { + "test_var": { + "value": "bar" + } + }, + "planned_values": { + "outputs": { + "test": { + "sensitive": false, + "type": "string", + "value": "bar" + } + }, + "root_module": { + "resources": [ + { + "address": "test_instance.test", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "bar", + "id": "placeholder" + }, + "sensitive_values": {} + } + ] + } + }, + "resource_changes": [ + { + "address": "test_instance.test", + "mode": "managed", + "type": "test_instance", + "provider_name": "registry.opentofu.org/hashicorp/test", + "name": "test", + "change": { + "actions": [ + "no-op" + ], + "before": { + "ami": "bar", + "id": "placeholder" + }, + "after": { + "ami": "bar", + "id": "placeholder" + }, + "after_unknown": {}, + "after_sensitive": {}, + "before_sensitive": {} + } + } + ], + "output_changes": { + "test": { + "actions": [ + "no-op" + ], + "before": "bar", + "after": "bar", + "after_unknown": false, + "before_sensitive": false, + "after_sensitive": false + } + }, + "prior_state": { + "format_version": "1.0", + "values": { + "outputs": { + "test": { + "sensitive": false, + "type": "string", + "value": "bar" + } + }, + "root_module": { + "resources": [ + { + "address": "test_instance.test", + "mode": "managed", + "type": "test_instance", + "name": "test", + "schema_version": 0, + "provider_name": "registry.opentofu.org/hashicorp/test", + "values": { + "ami": "bar", + "id": "placeholder" + }, + "sensitive_values": {} + } + ] + } + } + }, + "configuration": { + "provider_config": { + "test": { + "name": "test", + "full_name": "registry.opentofu.org/hashicorp/test" + } + }, + "root_module": { + "outputs": { + "test": { + "expression": { + "references": [ + "var.test_var" + ] + } + } + }, + "resources": [ + { + "address": "test_instance.test", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_config_key": "test", + "schema_version": 0, + "expressions": { + "ami": { + "references": [ + "var.test_var" + ] + } + } + } + ], + "variables": { + "test_var": { + "default": "bar" + } + } + } + } +} diff --git a/pkg/command/testdata/show-json/basic-update/terraform.tfstate b/pkg/command/testdata/show-json/basic-update/terraform.tfstate new file mode 100644 index 00000000000..48e52b3fb46 --- /dev/null +++ b/pkg/command/testdata/show-json/basic-update/terraform.tfstate @@ -0,0 +1,29 @@ +{ + "version": 4, + "terraform_version": "0.12.0", + "serial": 7, + "lineage": "configuredUnchanged", + "outputs": { + "test": { + "value": "bar", + "type": "string" + } + }, + "resources": [ + { + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider": "provider[\"registry.opentofu.org/hashicorp/test\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "ami": "bar", + "id": "placeholder" + } + } + ] + } + ] +} diff --git a/pkg/command/testdata/show-json/conditions/for-refresh.tfstate b/pkg/command/testdata/show-json/conditions/for-refresh.tfstate new file mode 100644 index 00000000000..b5146137e60 --- /dev/null +++ b/pkg/command/testdata/show-json/conditions/for-refresh.tfstate @@ -0,0 +1,55 @@ +{ + "version": 4, + "terraform_version": "1.2.0-dev", + "serial": 1, + "lineage": "no", + "outputs": {}, + "resources": [ + { + "mode": "managed", + "type": "test_instance", + "name": "foo", + "provider": "provider[\"registry.opentofu.org/hashicorp/test\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "ami": "ami-test", + "id": "placeholder" + }, + "sensitive_attributes": [ + [ + { + "type": "get_attr", + "value": "password" + } + ] + ] + } + ] + }, + { + "mode": "managed", + "type": "test_instance", + "name": "bar", + "provider": "provider[\"registry.opentofu.org/hashicorp/test\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "ami": "ami-test", + "id": "placeheld" + }, + "sensitive_attributes": [ + [ + { + "type": "get_attr", + "value": "password" + } + ] + ] + } + ] + } + ] +} diff --git a/pkg/command/testdata/show-json/conditions/main.tf b/pkg/command/testdata/show-json/conditions/main.tf new file mode 100644 index 00000000000..f63dca9fcd0 --- /dev/null +++ b/pkg/command/testdata/show-json/conditions/main.tf @@ -0,0 +1,40 @@ +variable "ami" { + type = string + default = "ami-test" +} + +variable "id_minimum_length" { + type = number + default = 10 +} + +resource "test_instance" "foo" { + ami = var.ami + + lifecycle { + precondition { + condition = can(regex("^ami-", var.ami)) + error_message = "Invalid AMI ID: must start with \"ami-\"." + } + } +} + +resource "test_instance" "bar" { + ami = "ami-boop" + + lifecycle { + postcondition { + condition = length(self.id) >= var.id_minimum_length + error_message = "Resource ID is unacceptably short (${length(self.id)} characters)." + } + } +} + +output "foo_id" { + value = test_instance.foo.id + + precondition { + condition = test_instance.foo.ami != "ami-bad" + error_message = "Foo has a bad AMI again!" + } +} diff --git a/pkg/command/testdata/show-json/conditions/output-refresh-only.json b/pkg/command/testdata/show-json/conditions/output-refresh-only.json new file mode 100644 index 00000000000..9e13ab3cb19 --- /dev/null +++ b/pkg/command/testdata/show-json/conditions/output-refresh-only.json @@ -0,0 +1,171 @@ +{ + "format_version": "1.1", + "terraform_version": "1.2.0-dev", + "variables": { + "ami": { + "value": "bad-ami" + }, + "id_minimum_length": { + "value": 10 + } + }, + "planned_values": { + "outputs": { + "foo_id": { + "sensitive": false, + "type": "string", + "value": "placeholder" + } + }, + "root_module": {} + }, + "output_changes": { + "foo_id": { + "actions": [ + "create" + ], + "before": null, + "after": "placeholder", + "after_unknown": false, + "before_sensitive": false, + "after_sensitive": false + } + }, + "prior_state": { + "format_version": "1.0", + "terraform_version": "1.1.0", + "values": { + "outputs": { + "foo_id": { + "sensitive": false, + "type": "string", + "value": "placeholder" + } + }, + "root_module": { + "resources": [ + { + "address": "test_instance.bar", + "mode": "managed", + "type": "test_instance", + "name": "bar", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "ami-test", + "id": "placeheld", + "password": null + }, + "sensitive_values": { + "password": true + } + }, + { + "address": "test_instance.foo", + "mode": "managed", + "type": "test_instance", + "name": "foo", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "ami-test", + "id": "placeholder", + "password": null + }, + "sensitive_values": { + "password": true + } + } + ] + } + } + }, + "configuration": { + "provider_config": { + "test": { + "name": "test", + "full_name": "registry.opentofu.org/hashicorp/test" + } + }, + "root_module": { + "outputs": { + "foo_id": { + "expression": { + "references": [ + "test_instance.foo.id", + "test_instance.foo" + ] + } + } + }, + "resources": [ + { + "address": "test_instance.bar", + "mode": "managed", + "type": "test_instance", + "name": "bar", + "provider_config_key": "test", + "expressions": { + "ami": { + "constant_value": "ami-boop" + } + }, + "schema_version": 0 + }, + { + "address": "test_instance.foo", + "mode": "managed", + "type": "test_instance", + "name": "foo", + "provider_config_key": "test", + "expressions": { + "ami": { + "references": [ + "var.ami" + ] + } + }, + "schema_version": 0 + } + ], + "variables": { + "ami": { + "default": "ami-test" + }, + "id_minimum_length": { + "default": 10 + } + } + } + }, + "relevant_attributes": [ + { + "resource": "test_instance.foo", + "attribute": [ + "id" + ] + } + ], + "condition_results": [ + { + "address": "output.foo_id", + "condition_type": "OutputPrecondition", + "result": true, + "unknown": false + }, + { + "address": "test_instance.bar", + "condition_type": "ResourcePostcondition", + "result": false, + "unknown": false, + "error_message": "Resource ID is unacceptably short (9 characters)." + }, + { + "address": "test_instance.foo", + "condition_type": "ResourcePrecondition", + "result": false, + "unknown": false, + "error_message": "Invalid AMI ID: must start with \"ami-\"." + } + ] +} diff --git a/pkg/command/testdata/show-json/conditions/output.json b/pkg/command/testdata/show-json/conditions/output.json new file mode 100644 index 00000000000..fa43118be26 --- /dev/null +++ b/pkg/command/testdata/show-json/conditions/output.json @@ -0,0 +1,188 @@ +{ + "format_version": "1.1", + "terraform_version": "1.2.0-dev", + "variables": { + "ami": { + "value": "ami-test" + }, + "id_minimum_length": { + "value": 10 + } + }, + "planned_values": { + "outputs": { + "foo_id": { + "sensitive": false + } + }, + "root_module": { + "resources": [ + { + "address": "test_instance.bar", + "mode": "managed", + "type": "test_instance", + "name": "bar", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "ami-boop" + }, + "sensitive_values": {} + }, + { + "address": "test_instance.foo", + "mode": "managed", + "type": "test_instance", + "name": "foo", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "ami-test" + }, + "sensitive_values": {} + } + ] + } + }, + "resource_changes": [ + { + "address": "test_instance.bar", + "mode": "managed", + "type": "test_instance", + "name": "bar", + "provider_name": "registry.opentofu.org/hashicorp/test", + "change": { + "actions": [ + "create" + ], + "before": null, + "after": { + "ami": "ami-boop" + }, + "after_unknown": { + "id": true + }, + "before_sensitive": false, + "after_sensitive": {} + } + }, + { + "address": "test_instance.foo", + "mode": "managed", + "type": "test_instance", + "name": "foo", + "provider_name": "registry.opentofu.org/hashicorp/test", + "change": { + "actions": [ + "create" + ], + "before": null, + "after": { + "ami": "ami-test" + }, + "after_unknown": { + "id": true + }, + "before_sensitive": false, + "after_sensitive": {} + } + } + ], + "output_changes": { + "foo_id": { + "actions": [ + "create" + ], + "before": null, + "after_unknown": true, + "before_sensitive": false, + "after_sensitive": false + } + }, + "configuration": { + "provider_config": { + "test": { + "name": "test", + "full_name": "registry.opentofu.org/hashicorp/test" + } + }, + "root_module": { + "outputs": { + "foo_id": { + "expression": { + "references": [ + "test_instance.foo.id", + "test_instance.foo" + ] + } + } + }, + "resources": [ + { + "address": "test_instance.bar", + "mode": "managed", + "type": "test_instance", + "name": "bar", + "provider_config_key": "test", + "expressions": { + "ami": { + "constant_value": "ami-boop" + } + }, + "schema_version": 0 + }, + { + "address": "test_instance.foo", + "mode": "managed", + "type": "test_instance", + "name": "foo", + "provider_config_key": "test", + "expressions": { + "ami": { + "references": [ + "var.ami" + ] + } + }, + "schema_version": 0 + } + ], + "variables": { + "ami": { + "default": "ami-test" + }, + "id_minimum_length": { + "default": 10 + } + } + } + }, + "relevant_attributes": [ + { + "resource": "test_instance.foo", + "attribute": [ + "id" + ] + } + ], + "condition_results": [ + { + "address": "output.foo_id", + "condition_type": "OutputPrecondition", + "result": true, + "unknown": false + }, + { + "address": "test_instance.bar", + "condition_type": "ResourcePostcondition", + "result": false, + "unknown": true + }, + { + "address": "test_instance.foo", + "condition_type": "ResourcePrecondition", + "result": true, + "unknown": false + } + ] +} diff --git a/pkg/command/testdata/show-json/drift/main.tf b/pkg/command/testdata/show-json/drift/main.tf new file mode 100644 index 00000000000..18995de7a5c --- /dev/null +++ b/pkg/command/testdata/show-json/drift/main.tf @@ -0,0 +1,13 @@ +# In state with `ami = "foo"`, so this should be a regular update. The provider +# should not detect changes on refresh. +resource "test_instance" "no_refresh" { + ami = "bar" +} + +# In state with `ami = "refresh-me"`, but the provider will return +# `"refreshed"` after the refresh phase. The plan should show the drift +# (`"refresh-me"` to `"refreshed"`) and plan the update (`"refreshed"` to +# `"baz"`). +resource "test_instance" "should_refresh" { + ami = "baz" +} diff --git a/pkg/command/testdata/show-json/drift/output.json b/pkg/command/testdata/show-json/drift/output.json new file mode 100644 index 00000000000..cde927e6435 --- /dev/null +++ b/pkg/command/testdata/show-json/drift/output.json @@ -0,0 +1,181 @@ +{ + "format_version": "1.0", + "planned_values": { + "root_module": { + "resources": [ + { + "address": "test_instance.no_refresh", + "mode": "managed", + "type": "test_instance", + "name": "no_refresh", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "bar", + "id": "placeholder" + }, + "sensitive_values": {} + }, + { + "address": "test_instance.should_refresh", + "mode": "managed", + "type": "test_instance", + "name": "should_refresh", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "baz", + "id": "placeholder" + }, + "sensitive_values": {} + } + ] + } + }, + "resource_drift": [ + { + "address": "test_instance.should_refresh", + "mode": "managed", + "type": "test_instance", + "provider_name": "registry.opentofu.org/hashicorp/test", + "name": "should_refresh", + "change": { + "actions": [ + "update" + ], + "before": { + "ami": "refresh-me", + "id": "placeholder" + }, + "after": { + "ami": "refreshed", + "id": "placeholder" + }, + "after_sensitive": {}, + "after_unknown": {}, + "before_sensitive": {} + } + } + ], + "resource_changes": [ + { + "address": "test_instance.no_refresh", + "mode": "managed", + "type": "test_instance", + "provider_name": "registry.opentofu.org/hashicorp/test", + "name": "no_refresh", + "change": { + "actions": [ + "update" + ], + "before": { + "ami": "foo", + "id": "placeholder" + }, + "after": { + "ami": "bar", + "id": "placeholder" + }, + "after_unknown": {}, + "after_sensitive": {}, + "before_sensitive": {} + } + }, + { + "address": "test_instance.should_refresh", + "mode": "managed", + "type": "test_instance", + "provider_name": "registry.opentofu.org/hashicorp/test", + "name": "should_refresh", + "change": { + "actions": [ + "update" + ], + "before": { + "ami": "refreshed", + "id": "placeholder" + }, + "after": { + "ami": "baz", + "id": "placeholder" + }, + "after_unknown": {}, + "after_sensitive": {}, + "before_sensitive": {} + } + } + ], + "prior_state": { + "format_version": "1.0", + "values": { + "root_module": { + "resources": [ + { + "address": "test_instance.no_refresh", + "mode": "managed", + "type": "test_instance", + "name": "no_refresh", + "schema_version": 0, + "provider_name": "registry.opentofu.org/hashicorp/test", + "values": { + "ami": "foo", + "id": "placeholder" + }, + "sensitive_values": {} + }, + { + "address": "test_instance.should_refresh", + "mode": "managed", + "type": "test_instance", + "name": "should_refresh", + "schema_version": 0, + "provider_name": "registry.opentofu.org/hashicorp/test", + "values": { + "ami": "refreshed", + "id": "placeholder" + }, + "sensitive_values": {} + } + ] + } + } + }, + "configuration": { + "provider_config": { + "test": { + "name": "test", + "full_name": "registry.opentofu.org/hashicorp/test" + } + }, + "root_module": { + "resources": [ + { + "address": "test_instance.no_refresh", + "mode": "managed", + "type": "test_instance", + "name": "no_refresh", + "provider_config_key": "test", + "schema_version": 0, + "expressions": { + "ami": { + "constant_value": "bar" + } + } + }, + { + "address": "test_instance.should_refresh", + "mode": "managed", + "type": "test_instance", + "name": "should_refresh", + "provider_config_key": "test", + "schema_version": 0, + "expressions": { + "ami": { + "constant_value": "baz" + } + } + } + ] + } + } +} diff --git a/pkg/command/testdata/show-json/drift/terraform.tfstate b/pkg/command/testdata/show-json/drift/terraform.tfstate new file mode 100644 index 00000000000..94a0b40ffbe --- /dev/null +++ b/pkg/command/testdata/show-json/drift/terraform.tfstate @@ -0,0 +1,38 @@ +{ + "version": 4, + "terraform_version": "0.12.0", + "serial": 7, + "lineage": "configuredUnchanged", + "resources": [ + { + "mode": "managed", + "type": "test_instance", + "name": "no_refresh", + "provider": "provider[\"registry.opentofu.org/hashicorp/test\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "ami": "foo", + "id": "placeholder" + } + } + ] + }, + { + "mode": "managed", + "type": "test_instance", + "name": "should_refresh", + "provider": "provider[\"registry.opentofu.org/hashicorp/test\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "ami": "refresh-me", + "id": "placeholder" + } + } + ] + } + ] +} diff --git a/pkg/command/testdata/show-json/module-depends-on/foo/main.tf b/pkg/command/testdata/show-json/module-depends-on/foo/main.tf new file mode 100644 index 00000000000..50e1c122744 --- /dev/null +++ b/pkg/command/testdata/show-json/module-depends-on/foo/main.tf @@ -0,0 +1,3 @@ +variable "test_var" { + default = "foo-var" +} diff --git a/pkg/command/testdata/show-json/module-depends-on/main.tf b/pkg/command/testdata/show-json/module-depends-on/main.tf new file mode 100644 index 00000000000..f7185149031 --- /dev/null +++ b/pkg/command/testdata/show-json/module-depends-on/main.tf @@ -0,0 +1,11 @@ +module "foo" { + source = "./foo" + + depends_on = [ + test_instance.test + ] +} + +resource "test_instance" "test" { + ami = "foo-bar" +} diff --git a/pkg/command/testdata/show-json/module-depends-on/output.json b/pkg/command/testdata/show-json/module-depends-on/output.json new file mode 100644 index 00000000000..2679681b834 --- /dev/null +++ b/pkg/command/testdata/show-json/module-depends-on/output.json @@ -0,0 +1,85 @@ +{ + "format_version": "1.0", + "terraform_version": "0.13.1-dev", + "planned_values": { + "root_module": { + "resources": [ + { + "address": "test_instance.test", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "foo-bar" + }, + "sensitive_values": {} + } + ] + } + }, + "resource_changes": [ + { + "address": "test_instance.test", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_name": "registry.opentofu.org/hashicorp/test", + "change": { + "actions": [ + "create" + ], + "before": null, + "after": { + "ami": "foo-bar" + }, + "after_unknown": { + "id": true + }, + "after_sensitive": {}, + "before_sensitive": false + } + } + ], + "configuration": { + "provider_config": { + "test": { + "name": "test", + "full_name": "registry.opentofu.org/hashicorp/test" + } + }, + "root_module": { + "resources": [ + { + "address": "test_instance.test", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_config_key": "test", + "expressions": { + "ami": { + "constant_value": "foo-bar" + } + }, + "schema_version": 0 + } + ], + "module_calls": { + "foo": { + "depends_on": [ + "test_instance.test" + ], + "source": "./foo", + "module": { + "variables": { + "test_var": { + "default": "foo-var" + } + } + } + } + } + } + } +} diff --git a/pkg/command/testdata/show-json/modules/bar/main.tf b/pkg/command/testdata/show-json/modules/bar/main.tf new file mode 100644 index 00000000000..5d1788a0e81 --- /dev/null +++ b/pkg/command/testdata/show-json/modules/bar/main.tf @@ -0,0 +1,11 @@ +variable "test_var" { + default = "bar-var" +} + +output "test" { + value = var.test_var +} + +resource "test_instance" "test" { + ami = var.test_var +} diff --git a/pkg/command/testdata/show-json/modules/foo/main.tf b/pkg/command/testdata/show-json/modules/foo/main.tf new file mode 100644 index 00000000000..2694ca85b04 --- /dev/null +++ b/pkg/command/testdata/show-json/modules/foo/main.tf @@ -0,0 +1,13 @@ +variable "test_var" { + default = "foo-var" +} +resource "test_instance" "test" { + ami = var.test_var + count = 3 +} + +output "test" { + value = var.test_var +} + +provider "test" {} diff --git a/pkg/command/testdata/show-json/modules/main.tf b/pkg/command/testdata/show-json/modules/main.tf new file mode 100644 index 00000000000..d1cabad3ec5 --- /dev/null +++ b/pkg/command/testdata/show-json/modules/main.tf @@ -0,0 +1,13 @@ +module "module_test_foo" { + source = "./foo" + test_var = "baz" +} + +module "module_test_bar" { + source = "./bar" +} + +output "test" { + value = module.module_test_foo.test + depends_on = [module.module_test_foo] +} diff --git a/pkg/command/testdata/show-json/modules/output.json b/pkg/command/testdata/show-json/modules/output.json new file mode 100644 index 00000000000..40e0f7e7e4e --- /dev/null +++ b/pkg/command/testdata/show-json/modules/output.json @@ -0,0 +1,306 @@ +{ + "format_version": "1.0", + "planned_values": { + "outputs": { + "test": { + "sensitive": false, + "type": "string", + "value": "baz" + } + }, + "root_module": { + "child_modules": [ + { + "resources": [ + { + "address": "module.module_test_bar.test_instance.test", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "bar-var" + }, + "sensitive_values": {} + } + ], + "address": "module.module_test_bar" + }, + { + "resources": [ + { + "address": "module.module_test_foo.test_instance.test[0]", + "mode": "managed", + "type": "test_instance", + "name": "test", + "index": 0, + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "baz" + }, + "sensitive_values": {} + }, + { + "address": "module.module_test_foo.test_instance.test[1]", + "mode": "managed", + "type": "test_instance", + "name": "test", + "index": 1, + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "baz" + }, + "sensitive_values": {} + }, + { + "address": "module.module_test_foo.test_instance.test[2]", + "mode": "managed", + "type": "test_instance", + "name": "test", + "index": 2, + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "baz" + }, + "sensitive_values": {} + } + ], + "address": "module.module_test_foo" + } + ] + } + }, + "prior_state": { + "format_version": "1.0", + "values": { + "outputs": { + "test": { + "sensitive": false, + "type": "string", + "value": "baz" + } + }, + "root_module": {} + } + }, + "resource_changes": [ + { + "address": "module.module_test_bar.test_instance.test", + "module_address": "module.module_test_bar", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_name": "registry.opentofu.org/hashicorp/test", + "change": { + "actions": [ + "create" + ], + "before": null, + "after": { + "ami": "bar-var" + }, + "after_unknown": { + "id": true + }, + "after_sensitive": {}, + "before_sensitive": false + } + }, + { + "address": "module.module_test_foo.test_instance.test[0]", + "module_address": "module.module_test_foo", + "mode": "managed", + "type": "test_instance", + "provider_name": "registry.opentofu.org/hashicorp/test", + "name": "test", + "index": 0, + "change": { + "actions": [ + "create" + ], + "before": null, + "after": { + "ami": "baz" + }, + "after_unknown": { + "id": true + }, + "after_sensitive": {}, + "before_sensitive": false + } + }, + { + "address": "module.module_test_foo.test_instance.test[1]", + "module_address": "module.module_test_foo", + "mode": "managed", + "type": "test_instance", + "provider_name": "registry.opentofu.org/hashicorp/test", + "name": "test", + "index": 1, + "change": { + "actions": [ + "create" + ], + "before": null, + "after": { + "ami": "baz" + }, + "after_unknown": { + "id": true + }, + "after_sensitive": {}, + "before_sensitive": false + } + }, + { + "address": "module.module_test_foo.test_instance.test[2]", + "module_address": "module.module_test_foo", + "mode": "managed", + "type": "test_instance", + "provider_name": "registry.opentofu.org/hashicorp/test", + "name": "test", + "index": 2, + "change": { + "actions": [ + "create" + ], + "before": null, + "after": { + "ami": "baz" + }, + "after_unknown": { + "id": true + }, + "after_sensitive": {}, + "before_sensitive": false + } + } + ], + "output_changes": { + "test": { + "actions": [ + "create" + ], + "before": null, + "after": "baz", + "after_unknown": false, + "before_sensitive": false, + "after_sensitive": false + } + }, + "configuration": { + "root_module": { + "outputs": { + "test": { + "expression": { + "references": [ + "module.module_test_foo.test", + "module.module_test_foo" + ] + }, + "depends_on": [ + "module.module_test_foo" + ] + } + }, + "module_calls": { + "module_test_bar": { + "source": "./bar", + "module": { + "outputs": { + "test": { + "expression": { + "references": [ + "var.test_var" + ] + } + } + }, + "resources": [ + { + "address": "test_instance.test", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_config_key": "module.module_test_bar:test", + "expressions": { + "ami": { + "references": [ + "var.test_var" + ] + } + }, + "schema_version": 0 + } + ], + "variables": { + "test_var": { + "default": "bar-var" + } + } + } + }, + "module_test_foo": { + "source": "./foo", + "expressions": { + "test_var": { + "constant_value": "baz" + } + }, + "module": { + "outputs": { + "test": { + "expression": { + "references": [ + "var.test_var" + ] + } + } + }, + "resources": [ + { + "address": "test_instance.test", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_config_key": "module.module_test_foo:test", + "expressions": { + "ami": { + "references": [ + "var.test_var" + ] + } + }, + "schema_version": 0, + "count_expression": { + "constant_value": 3 + } + } + ], + "variables": { + "test_var": { + "default": "foo-var" + } + } + } + } + } + }, + "provider_config": { + "module.module_test_foo:test": { + "module_address": "module.module_test_foo", + "name": "test", + "full_name": "registry.opentofu.org/hashicorp/test" + }, + "module.module_test_bar:test": { + "module_address": "module.module_test_bar", + "name": "test", + "full_name": "registry.opentofu.org/hashicorp/test" + } + } + } +} diff --git a/pkg/command/testdata/show-json/moved-drift/main.tf b/pkg/command/testdata/show-json/moved-drift/main.tf new file mode 100644 index 00000000000..9f93a109f8a --- /dev/null +++ b/pkg/command/testdata/show-json/moved-drift/main.tf @@ -0,0 +1,18 @@ +# In state with `ami = "foo"`, so this should be a regular update. The provider +# should not detect changes on refresh. +resource "test_instance" "no_refresh" { + ami = "bar" +} + +# In state with `ami = "refresh-me"`, but the provider will return +# `"refreshed"` after the refresh phase. The plan should show the drift +# (`"refresh-me"` to `"refreshed"`) and plan the update (`"refreshed"` to +# `"baz"`). +resource "test_instance" "should_refresh_with_move" { + ami = "baz" +} + +moved { + from = test_instance.should_refresh + to = test_instance.should_refresh_with_move +} diff --git a/pkg/command/testdata/show-json/moved-drift/output.json b/pkg/command/testdata/show-json/moved-drift/output.json new file mode 100644 index 00000000000..1fd80e5e73f --- /dev/null +++ b/pkg/command/testdata/show-json/moved-drift/output.json @@ -0,0 +1,183 @@ +{ + "format_version": "1.0", + "planned_values": { + "root_module": { + "resources": [ + { + "address": "test_instance.no_refresh", + "mode": "managed", + "type": "test_instance", + "name": "no_refresh", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "bar", + "id": "placeholder" + }, + "sensitive_values": {} + }, + { + "address": "test_instance.should_refresh_with_move", + "mode": "managed", + "type": "test_instance", + "name": "should_refresh_with_move", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "baz", + "id": "placeholder" + }, + "sensitive_values": {} + } + ] + } + }, + "resource_drift": [ + { + "address": "test_instance.should_refresh_with_move", + "mode": "managed", + "type": "test_instance", + "previous_address": "test_instance.should_refresh", + "provider_name": "registry.opentofu.org/hashicorp/test", + "name": "should_refresh_with_move", + "change": { + "actions": [ + "update" + ], + "before": { + "ami": "refresh-me", + "id": "placeholder" + }, + "after": { + "ami": "refreshed", + "id": "placeholder" + }, + "after_sensitive": {}, + "after_unknown": {}, + "before_sensitive": {} + } + } + ], + "resource_changes": [ + { + "address": "test_instance.no_refresh", + "mode": "managed", + "type": "test_instance", + "provider_name": "registry.opentofu.org/hashicorp/test", + "name": "no_refresh", + "change": { + "actions": [ + "update" + ], + "before": { + "ami": "foo", + "id": "placeholder" + }, + "after": { + "ami": "bar", + "id": "placeholder" + }, + "after_unknown": {}, + "after_sensitive": {}, + "before_sensitive": {} + } + }, + { + "address": "test_instance.should_refresh_with_move", + "mode": "managed", + "type": "test_instance", + "previous_address": "test_instance.should_refresh", + "provider_name": "registry.opentofu.org/hashicorp/test", + "name": "should_refresh_with_move", + "change": { + "actions": [ + "update" + ], + "before": { + "ami": "refreshed", + "id": "placeholder" + }, + "after": { + "ami": "baz", + "id": "placeholder" + }, + "after_unknown": {}, + "after_sensitive": {}, + "before_sensitive": {} + } + } + ], + "prior_state": { + "format_version": "1.0", + "values": { + "root_module": { + "resources": [ + { + "address": "test_instance.no_refresh", + "mode": "managed", + "type": "test_instance", + "name": "no_refresh", + "schema_version": 0, + "provider_name": "registry.opentofu.org/hashicorp/test", + "values": { + "ami": "foo", + "id": "placeholder" + }, + "sensitive_values": {} + }, + { + "address": "test_instance.should_refresh_with_move", + "mode": "managed", + "type": "test_instance", + "name": "should_refresh_with_move", + "schema_version": 0, + "provider_name": "registry.opentofu.org/hashicorp/test", + "values": { + "ami": "refreshed", + "id": "placeholder" + }, + "sensitive_values": {} + } + ] + } + } + }, + "configuration": { + "provider_config": { + "test": { + "name": "test", + "full_name": "registry.opentofu.org/hashicorp/test" + } + }, + "root_module": { + "resources": [ + { + "address": "test_instance.no_refresh", + "mode": "managed", + "type": "test_instance", + "name": "no_refresh", + "provider_config_key": "test", + "schema_version": 0, + "expressions": { + "ami": { + "constant_value": "bar" + } + } + }, + { + "address": "test_instance.should_refresh_with_move", + "mode": "managed", + "type": "test_instance", + "name": "should_refresh_with_move", + "provider_config_key": "test", + "schema_version": 0, + "expressions": { + "ami": { + "constant_value": "baz" + } + } + } + ] + } + } +} diff --git a/pkg/command/testdata/show-json/moved-drift/terraform.tfstate b/pkg/command/testdata/show-json/moved-drift/terraform.tfstate new file mode 100644 index 00000000000..94a0b40ffbe --- /dev/null +++ b/pkg/command/testdata/show-json/moved-drift/terraform.tfstate @@ -0,0 +1,38 @@ +{ + "version": 4, + "terraform_version": "0.12.0", + "serial": 7, + "lineage": "configuredUnchanged", + "resources": [ + { + "mode": "managed", + "type": "test_instance", + "name": "no_refresh", + "provider": "provider[\"registry.opentofu.org/hashicorp/test\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "ami": "foo", + "id": "placeholder" + } + } + ] + }, + { + "mode": "managed", + "type": "test_instance", + "name": "should_refresh", + "provider": "provider[\"registry.opentofu.org/hashicorp/test\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "ami": "refresh-me", + "id": "placeholder" + } + } + ] + } + ] +} diff --git a/pkg/command/testdata/show-json/moved/main.tf b/pkg/command/testdata/show-json/moved/main.tf new file mode 100644 index 00000000000..0f11c2c0a4e --- /dev/null +++ b/pkg/command/testdata/show-json/moved/main.tf @@ -0,0 +1,8 @@ +resource "test_instance" "baz" { + ami = "baz" +} + +moved { + from = test_instance.foo + to = test_instance.baz +} diff --git a/pkg/command/testdata/show-json/moved/output.json b/pkg/command/testdata/show-json/moved/output.json new file mode 100644 index 00000000000..9b340d49f61 --- /dev/null +++ b/pkg/command/testdata/show-json/moved/output.json @@ -0,0 +1,95 @@ +{ + "format_version": "1.0", + "planned_values": { + "root_module": { + "resources": [ + { + "address": "test_instance.baz", + "mode": "managed", + "type": "test_instance", + "name": "baz", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "baz", + "id": "placeholder" + }, + "sensitive_values": {} + } + ] + } + }, + "resource_changes": [ + { + "address": "test_instance.baz", + "mode": "managed", + "type": "test_instance", + "previous_address": "test_instance.foo", + "provider_name": "registry.opentofu.org/hashicorp/test", + "name": "baz", + "change": { + "actions": [ + "update" + ], + "before": { + "ami": "foo", + "id": "placeholder" + }, + "after": { + "ami": "baz", + "id": "placeholder" + }, + "after_unknown": {}, + "after_sensitive": {}, + "before_sensitive": {} + } + } + ], + "prior_state": { + "format_version": "1.0", + "values": { + "root_module": { + "resources": [ + { + "address": "test_instance.baz", + "mode": "managed", + "type": "test_instance", + "name": "baz", + "schema_version": 0, + "provider_name": "registry.opentofu.org/hashicorp/test", + "values": { + "ami": "foo", + "id": "placeholder" + }, + "sensitive_values": {} + } + ] + } + } + }, + "configuration": { + "provider_config": { + "test": { + "name": "test", + "full_name": "registry.opentofu.org/hashicorp/test" + } + }, + "root_module": { + "resources": [ + { + "address": "test_instance.baz", + "mode": "managed", + "type": "test_instance", + "name": "baz", + "provider_config_key": "test", + "schema_version": 0, + "expressions": { + "ami": { + "constant_value": "baz" + } + } + } + ] + } + } +} diff --git a/pkg/command/testdata/show-json/moved/terraform.tfstate b/pkg/command/testdata/show-json/moved/terraform.tfstate new file mode 100644 index 00000000000..8130d4c71dd --- /dev/null +++ b/pkg/command/testdata/show-json/moved/terraform.tfstate @@ -0,0 +1,23 @@ +{ + "version": 4, + "terraform_version": "0.12.0", + "serial": 7, + "lineage": "configuredUnchanged", + "resources": [ + { + "mode": "managed", + "type": "test_instance", + "name": "foo", + "provider": "provider[\"registry.opentofu.org/hashicorp/test\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "ami": "foo", + "id": "placeholder" + } + } + ] + } + ] +} diff --git a/pkg/command/testdata/show-json/multi-resource-update/main.tf b/pkg/command/testdata/show-json/multi-resource-update/main.tf new file mode 100644 index 00000000000..3ead9dd32b7 --- /dev/null +++ b/pkg/command/testdata/show-json/multi-resource-update/main.tf @@ -0,0 +1,13 @@ +variable "test_var" { + default = "bar" +} + +// There is a single instance in state. The plan will add a resource. +resource "test_instance" "test" { + ami = var.test_var + count = 2 +} + +output "test" { + value = var.test_var +} diff --git a/pkg/command/testdata/show-json/multi-resource-update/output.json b/pkg/command/testdata/show-json/multi-resource-update/output.json new file mode 100644 index 00000000000..0f6b26d1304 --- /dev/null +++ b/pkg/command/testdata/show-json/multi-resource-update/output.json @@ -0,0 +1,185 @@ +{ + "format_version": "1.0", + "terraform_version": "0.13.0", + "variables": { + "test_var": { + "value": "bar" + } + }, + "planned_values": { + "outputs": { + "test": { + "sensitive": false, + "type": "string", + "value": "bar" + } + }, + "root_module": { + "resources": [ + { + "address": "test_instance.test[0]", + "mode": "managed", + "type": "test_instance", + "name": "test", + "index": 0, + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "bar", + "id": "placeholder" + }, + "sensitive_values": {} + }, + { + "address": "test_instance.test[1]", + "mode": "managed", + "type": "test_instance", + "name": "test", + "index": 1, + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "bar" + }, + "sensitive_values": {} + } + ] + } + }, + "resource_changes": [ + { + "address": "test_instance.test[0]", + "mode": "managed", + "type": "test_instance", + "name": "test", + "index": 0, + "previous_address": "test_instance.test", + "provider_name": "registry.opentofu.org/hashicorp/test", + "change": { + "actions": [ + "no-op" + ], + "before": { + "ami": "bar", + "id": "placeholder" + }, + "after": { + "ami": "bar", + "id": "placeholder" + }, + "after_unknown": {}, + "after_sensitive": {}, + "before_sensitive": {} + } + }, + { + "address": "test_instance.test[1]", + "mode": "managed", + "type": "test_instance", + "name": "test", + "index": 1, + "provider_name": "registry.opentofu.org/hashicorp/test", + "change": { + "actions": [ + "create" + ], + "before": null, + "after": { + "ami": "bar" + }, + "after_unknown": { + "id": true + }, + "after_sensitive": {}, + "before_sensitive": false + } + } + ], + "output_changes": { + "test": { + "actions": [ + "no-op" + ], + "before": "bar", + "after": "bar", + "after_unknown": false, + "before_sensitive": false, + "after_sensitive": false + } + }, + "prior_state": { + "format_version": "1.0", + "terraform_version": "0.13.0", + "values": { + "outputs": { + "test": { + "sensitive": false, + "type": "string", + "value": "bar" + } + }, + "root_module": { + "resources": [ + { + "address": "test_instance.test[0]", + "mode": "managed", + "type": "test_instance", + "name": "test", + "index": 0, + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "bar", + "id": "placeholder" + }, + "sensitive_values": {} + } + ] + } + } + }, + "configuration": { + "provider_config": { + "test": { + "name": "test", + "full_name": "registry.opentofu.org/hashicorp/test" + } + }, + "root_module": { + "outputs": { + "test": { + "expression": { + "references": [ + "var.test_var" + ] + } + } + }, + "resources": [ + { + "address": "test_instance.test", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_config_key": "test", + "expressions": { + "ami": { + "references": [ + "var.test_var" + ] + } + }, + "schema_version": 0, + "count_expression": { + "constant_value": 2 + } + } + ], + "variables": { + "test_var": { + "default": "bar" + } + } + } + } +} diff --git a/pkg/command/testdata/show-json/multi-resource-update/terraform.tfstate b/pkg/command/testdata/show-json/multi-resource-update/terraform.tfstate new file mode 100644 index 00000000000..48e52b3fb46 --- /dev/null +++ b/pkg/command/testdata/show-json/multi-resource-update/terraform.tfstate @@ -0,0 +1,29 @@ +{ + "version": 4, + "terraform_version": "0.12.0", + "serial": 7, + "lineage": "configuredUnchanged", + "outputs": { + "test": { + "value": "bar", + "type": "string" + } + }, + "resources": [ + { + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider": "provider[\"registry.opentofu.org/hashicorp/test\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "ami": "bar", + "id": "placeholder" + } + } + ] + } + ] +} diff --git a/pkg/command/testdata/show-json/nested-module-error/main.tf b/pkg/command/testdata/show-json/nested-module-error/main.tf new file mode 100644 index 00000000000..ef0bad2bbcd --- /dev/null +++ b/pkg/command/testdata/show-json/nested-module-error/main.tf @@ -0,0 +1,3 @@ +module "my_module" { + source = "./modules" +} diff --git a/pkg/command/testdata/show-json/nested-module-error/modules/main.tf b/pkg/command/testdata/show-json/nested-module-error/modules/main.tf new file mode 100644 index 00000000000..990155ecb33 --- /dev/null +++ b/pkg/command/testdata/show-json/nested-module-error/modules/main.tf @@ -0,0 +1,3 @@ +module "more" { + source = "./more-modules" +} diff --git a/pkg/command/testdata/show-json/nested-module-error/modules/more-modules/main.tf b/pkg/command/testdata/show-json/nested-module-error/modules/more-modules/main.tf new file mode 100644 index 00000000000..488a2719307 --- /dev/null +++ b/pkg/command/testdata/show-json/nested-module-error/modules/more-modules/main.tf @@ -0,0 +1,4 @@ +variable "misspelled" { + default = "ehllo" + descriptoni = "I am a misspelled attribute" +} diff --git a/pkg/command/testdata/show-json/nested-modules/main.tf b/pkg/command/testdata/show-json/nested-modules/main.tf new file mode 100644 index 00000000000..ef0bad2bbcd --- /dev/null +++ b/pkg/command/testdata/show-json/nested-modules/main.tf @@ -0,0 +1,3 @@ +module "my_module" { + source = "./modules" +} diff --git a/pkg/command/testdata/show-json/nested-modules/modules/main.tf b/pkg/command/testdata/show-json/nested-modules/modules/main.tf new file mode 100644 index 00000000000..990155ecb33 --- /dev/null +++ b/pkg/command/testdata/show-json/nested-modules/modules/main.tf @@ -0,0 +1,3 @@ +module "more" { + source = "./more-modules" +} diff --git a/pkg/command/testdata/show-json/nested-modules/modules/more-modules/main.tf b/pkg/command/testdata/show-json/nested-modules/modules/more-modules/main.tf new file mode 100644 index 00000000000..2e5273a5726 --- /dev/null +++ b/pkg/command/testdata/show-json/nested-modules/modules/more-modules/main.tf @@ -0,0 +1,7 @@ +variable "test_var" { + default = "bar-var" +} + +resource "test_instance" "test" { + ami = var.test_var +} diff --git a/pkg/command/testdata/show-json/nested-modules/output.json b/pkg/command/testdata/show-json/nested-modules/output.json new file mode 100644 index 00000000000..43d9dae9ce9 --- /dev/null +++ b/pkg/command/testdata/show-json/nested-modules/output.json @@ -0,0 +1,98 @@ +{ + "format_version": "1.0", + "planned_values": { + "root_module": { + "child_modules": [ + { + "address": "module.my_module", + "child_modules": [ + { + "resources": [ + { + "address": "module.my_module.module.more.test_instance.test", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "bar-var" + }, + "sensitive_values": {} + } + ], + "address": "module.my_module.module.more" + } + ] + } + ] + } + }, + "resource_changes": [ + { + "address": "module.my_module.module.more.test_instance.test", + "module_address": "module.my_module.module.more", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_name": "registry.opentofu.org/hashicorp/test", + "change": { + "actions": ["create"], + "before": null, + "after": { + "ami": "bar-var" + }, + "after_unknown": { + "id": true + }, + "after_sensitive": {}, + "before_sensitive": false + } + } + ], + "configuration": { + "provider_config": { + "module.my_module.module.more:test": { + "module_address": "module.my_module.module.more", + "name": "test", + "full_name": "registry.opentofu.org/hashicorp/test" + } + }, + "root_module": { + "module_calls": { + "my_module": { + "source": "./modules", + "module": { + "module_calls": { + "more": { + "source": "./more-modules", + "module": { + "resources": [ + { + "address": "test_instance.test", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_config_key": "module.my_module.module.more:test", + "expressions": { + "ami": { + "references": ["var.test_var"] + } + }, + "schema_version": 0 + } + ], + "variables": { + "test_var": { + "default": "bar-var" + } + } + } + } + } + } + } + } + } + } +} diff --git a/pkg/command/testdata/show-json/plan-error/main.tf b/pkg/command/testdata/show-json/plan-error/main.tf new file mode 100644 index 00000000000..c26c1a0aa4a --- /dev/null +++ b/pkg/command/testdata/show-json/plan-error/main.tf @@ -0,0 +1,15 @@ +locals { + ami = "bar" +} + +resource "test_instance" "test" { + ami = local.ami + + lifecycle { + precondition { + // failing condition + condition = local.ami != "bar" + error_message = "ami is bar" + } + } +} \ No newline at end of file diff --git a/pkg/command/testdata/show-json/plan-error/output.json b/pkg/command/testdata/show-json/plan-error/output.json new file mode 100644 index 00000000000..50167b5e2c1 --- /dev/null +++ b/pkg/command/testdata/show-json/plan-error/output.json @@ -0,0 +1,35 @@ +{ + "format_version": "1.2", + "planned_values": { + "root_module": {} + }, + "prior_state": {}, + "configuration": { + "provider_config": { + "test": { + "full_name": "registry.opentofu.org/hashicorp/test", + "name": "test" + } + }, + "root_module": { + "resources": [ + { + "address": "test_instance.test", + "expressions": { + "ami": { + "references": [ + "local.ami" + ] + } + }, + "mode": "managed", + "name": "test", + "provider_config_key": "test", + "schema_version": 0, + "type": "test_instance" + } + ] + } + }, + "errored": true +} \ No newline at end of file diff --git a/pkg/command/testdata/show-json/provider-aliasing-conflict/child/main.tf b/pkg/command/testdata/show-json/provider-aliasing-conflict/child/main.tf new file mode 100644 index 00000000000..2479df3359f --- /dev/null +++ b/pkg/command/testdata/show-json/provider-aliasing-conflict/child/main.tf @@ -0,0 +1,11 @@ +terraform { + required_providers { + test = { + source = "hashicorp2/test" + } + } +} + +resource "test_instance" "test" { + ami = "bar" +} diff --git a/pkg/command/testdata/show-json/provider-aliasing-conflict/main.tf b/pkg/command/testdata/show-json/provider-aliasing-conflict/main.tf new file mode 100644 index 00000000000..2d8bc0b9012 --- /dev/null +++ b/pkg/command/testdata/show-json/provider-aliasing-conflict/main.tf @@ -0,0 +1,11 @@ +provider "test" { + region = "somewhere" +} + +resource "test_instance" "test" { + ami = "foo" +} + +module "child" { + source = "./child" +} diff --git a/pkg/command/testdata/show-json/provider-aliasing-conflict/output.json b/pkg/command/testdata/show-json/provider-aliasing-conflict/output.json new file mode 100644 index 00000000000..18a70b31f56 --- /dev/null +++ b/pkg/command/testdata/show-json/provider-aliasing-conflict/output.json @@ -0,0 +1,143 @@ +{ + "format_version": "1.0", + "terraform_version": "1.1.0-dev", + "planned_values": { + "root_module": { + "resources": [ + { + "address": "test_instance.test", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "foo" + }, + "sensitive_values": {} + } + ], + "child_modules": [ + { + "resources": [ + { + "address": "module.child.test_instance.test", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_name": "registry.opentofu.org/hashicorp2/test", + "schema_version": 0, + "values": { + "ami": "bar" + }, + "sensitive_values": {} + } + ], + "address": "module.child" + } + ] + } + }, + "resource_changes": [ + { + "address": "test_instance.test", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_name": "registry.opentofu.org/hashicorp/test", + "change": { + "actions": [ + "create" + ], + "before": null, + "after": { + "ami": "foo" + }, + "after_unknown": { + "id": true + }, + "before_sensitive": false, + "after_sensitive": {} + } + }, + { + "address": "module.child.test_instance.test", + "module_address": "module.child", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_name": "registry.opentofu.org/hashicorp2/test", + "change": { + "actions": [ + "create" + ], + "before": null, + "after": { + "ami": "bar" + }, + "after_unknown": { + "id": true + }, + "before_sensitive": false, + "after_sensitive": {} + } + } + ], + "configuration": { + "provider_config": { + "test": { + "name": "test", + "full_name": "registry.opentofu.org/hashicorp/test", + "expressions": { + "region": { + "constant_value": "somewhere" + } + } + }, + "module.child:test": { + "module_address": "module.child", + "name": "test", + "full_name": "registry.opentofu.org/hashicorp2/test" + } + }, + "root_module": { + "resources": [ + { + "address": "test_instance.test", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_config_key": "test", + "expressions": { + "ami": { + "constant_value": "foo" + } + }, + "schema_version": 0 + } + ], + "module_calls": { + "child": { + "source": "./child", + "module": { + "resources": [ + { + "address": "test_instance.test", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_config_key": "module.child:test", + "expressions": { + "ami": { + "constant_value": "bar" + } + }, + "schema_version": 0 + } + ] + } + } + } + } + } +} diff --git a/pkg/command/testdata/show-json/provider-aliasing-default/child/main.tf b/pkg/command/testdata/show-json/provider-aliasing-default/child/main.tf new file mode 100644 index 00000000000..b865e3e14a9 --- /dev/null +++ b/pkg/command/testdata/show-json/provider-aliasing-default/child/main.tf @@ -0,0 +1,20 @@ +terraform { + required_providers { + test = { + source = "hashicorp/test" + } + } +} + +resource "test_instance" "test" { + ami = "bar" +} + +module "with_requirement" { + source = "./nested" + depends_on = [module.no_requirements] +} + +module "no_requirements" { + source = "./nested-no-requirements" +} diff --git a/pkg/command/testdata/show-json/provider-aliasing-default/child/nested-no-requirements/main.tf b/pkg/command/testdata/show-json/provider-aliasing-default/child/nested-no-requirements/main.tf new file mode 100644 index 00000000000..20781869684 --- /dev/null +++ b/pkg/command/testdata/show-json/provider-aliasing-default/child/nested-no-requirements/main.tf @@ -0,0 +1,3 @@ +resource "test_instance" "test" { + ami = "qux" +} diff --git a/pkg/command/testdata/show-json/provider-aliasing-default/child/nested/main.tf b/pkg/command/testdata/show-json/provider-aliasing-default/child/nested/main.tf new file mode 100644 index 00000000000..1590c5cea37 --- /dev/null +++ b/pkg/command/testdata/show-json/provider-aliasing-default/child/nested/main.tf @@ -0,0 +1,11 @@ +terraform { + required_providers { + test = { + source = "hashicorp/test" + } + } +} + +resource "test_instance" "test" { + ami = "baz" +} diff --git a/pkg/command/testdata/show-json/provider-aliasing-default/main.tf b/pkg/command/testdata/show-json/provider-aliasing-default/main.tf new file mode 100644 index 00000000000..f5e63f0aa3b --- /dev/null +++ b/pkg/command/testdata/show-json/provider-aliasing-default/main.tf @@ -0,0 +1,19 @@ +provider "test" { + region = "somewhere" +} + +provider "test" { + alias = "backup" + region = "elsewhere" +} + +resource "test_instance" "test" { + ami = "foo" +} + +module "child" { + source = "./child" + providers = { + test = test.backup + } +} diff --git a/pkg/command/testdata/show-json/provider-aliasing-default/output.json b/pkg/command/testdata/show-json/provider-aliasing-default/output.json new file mode 100644 index 00000000000..2172f2a7d10 --- /dev/null +++ b/pkg/command/testdata/show-json/provider-aliasing-default/output.json @@ -0,0 +1,271 @@ +{ + "format_version": "1.0", + "terraform_version": "1.1.0-dev", + "planned_values": { + "root_module": { + "resources": [ + { + "address": "test_instance.test", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "foo" + }, + "sensitive_values": {} + } + ], + "child_modules": [ + { + "resources": [ + { + "address": "module.child.test_instance.test", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "bar" + }, + "sensitive_values": {} + } + ], + "address": "module.child", + "child_modules": [ + { + "resources": [ + { + "address": "module.child.module.no_requirements.test_instance.test", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "qux" + }, + "sensitive_values": {} + } + ], + "address": "module.child.module.no_requirements" + }, + { + "resources": [ + { + "address": "module.child.module.with_requirement.test_instance.test", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "baz" + }, + "sensitive_values": {} + } + ], + "address": "module.child.module.with_requirement" + } + ] + } + ] + } + }, + "resource_changes": [ + { + "address": "test_instance.test", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_name": "registry.opentofu.org/hashicorp/test", + "change": { + "actions": [ + "create" + ], + "before": null, + "after": { + "ami": "foo" + }, + "after_unknown": { + "id": true + }, + "before_sensitive": false, + "after_sensitive": {} + } + }, + { + "address": "module.child.test_instance.test", + "module_address": "module.child", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_name": "registry.opentofu.org/hashicorp/test", + "change": { + "actions": [ + "create" + ], + "before": null, + "after": { + "ami": "bar" + }, + "after_unknown": { + "id": true + }, + "before_sensitive": false, + "after_sensitive": {} + } + }, + { + "address": "module.child.module.no_requirements.test_instance.test", + "module_address": "module.child.module.no_requirements", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_name": "registry.opentofu.org/hashicorp/test", + "change": { + "actions": [ + "create" + ], + "before": null, + "after": { + "ami": "qux" + }, + "after_unknown": { + "id": true + }, + "before_sensitive": false, + "after_sensitive": {} + } + }, + { + "address": "module.child.module.with_requirement.test_instance.test", + "module_address": "module.child.module.with_requirement", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_name": "registry.opentofu.org/hashicorp/test", + "change": { + "actions": [ + "create" + ], + "before": null, + "after": { + "ami": "baz" + }, + "after_unknown": { + "id": true + }, + "before_sensitive": false, + "after_sensitive": {} + } + } + ], + "configuration": { + "provider_config": { + "test": { + "name": "test", + "full_name": "registry.opentofu.org/hashicorp/test", + "expressions": { + "region": { + "constant_value": "somewhere" + } + } + }, + "test.backup": { + "name": "test", + "full_name": "registry.opentofu.org/hashicorp/test", + "alias": "backup", + "expressions": { + "region": { + "constant_value": "elsewhere" + } + } + } + }, + "root_module": { + "resources": [ + { + "address": "test_instance.test", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_config_key": "test", + "expressions": { + "ami": { + "constant_value": "foo" + } + }, + "schema_version": 0 + } + ], + "module_calls": { + "child": { + "source": "./child", + "module": { + "resources": [ + { + "address": "test_instance.test", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_config_key": "test.backup", + "expressions": { + "ami": { + "constant_value": "bar" + } + }, + "schema_version": 0 + } + ], + "module_calls": { + "no_requirements": { + "source": "./nested-no-requirements", + "module": { + "resources": [ + { + "address": "test_instance.test", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_config_key": "test.backup", + "expressions": { + "ami": { + "constant_value": "qux" + } + }, + "schema_version": 0 + } + ] + } + }, + "with_requirement": { + "source": "./nested", + "depends_on": ["module.no_requirements"], + "module": { + "resources": [ + { + "address": "test_instance.test", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_config_key": "test.backup", + "expressions": { + "ami": { + "constant_value": "baz" + } + }, + "schema_version": 0 + } + ] + } + } + } + } + } + } + } + } +} diff --git a/pkg/command/testdata/show-json/provider-aliasing/child/main.tf b/pkg/command/testdata/show-json/provider-aliasing/child/main.tf new file mode 100644 index 00000000000..42555752cfd --- /dev/null +++ b/pkg/command/testdata/show-json/provider-aliasing/child/main.tf @@ -0,0 +1,26 @@ +terraform { + required_providers { + test = { + source = "hashicorp/test" + configuration_aliases = [test, test.second] + } + } +} + +resource "test_instance" "test_primary" { + ami = "primary" + provider = test +} + +resource "test_instance" "test_secondary" { + ami = "secondary" + provider = test.second +} + +module "grandchild" { + source = "./nested" + providers = { + test = test + test.alt = test.second + } +} diff --git a/pkg/command/testdata/show-json/provider-aliasing/child/nested/main.tf b/pkg/command/testdata/show-json/provider-aliasing/child/nested/main.tf new file mode 100644 index 00000000000..ff1fe9a1afb --- /dev/null +++ b/pkg/command/testdata/show-json/provider-aliasing/child/nested/main.tf @@ -0,0 +1,18 @@ +terraform { + required_providers { + test = { + source = "hashicorp/test" + configuration_aliases = [test, test.alt] + } + } +} + +resource "test_instance" "test_main" { + ami = "main" + provider = test +} + +resource "test_instance" "test_alternate" { + ami = "secondary" + provider = test.alt +} diff --git a/pkg/command/testdata/show-json/provider-aliasing/main.tf b/pkg/command/testdata/show-json/provider-aliasing/main.tf new file mode 100644 index 00000000000..7f6b0a3e337 --- /dev/null +++ b/pkg/command/testdata/show-json/provider-aliasing/main.tf @@ -0,0 +1,34 @@ +provider "test" { + region = "somewhere" +} + +provider "test" { + alias = "backup" + region = "elsewhere" +} + +resource "test_instance" "test" { + ami = "foo" + provider = test +} + +resource "test_instance" "test_backup" { + ami = "foo-backup" + provider = test.backup +} + +module "child" { + source = "./child" + providers = { + test = test + test.second = test.backup + } +} + +module "sibling" { + source = "./child" + providers = { + test = test + test.second = test + } +} diff --git a/pkg/command/testdata/show-json/provider-aliasing/output.json b/pkg/command/testdata/show-json/provider-aliasing/output.json new file mode 100755 index 00000000000..a246e121390 --- /dev/null +++ b/pkg/command/testdata/show-json/provider-aliasing/output.json @@ -0,0 +1,567 @@ +{ + "format_version": "1.0", + "terraform_version": "1.1.0-dev", + "planned_values": { + "root_module": { + "resources": [ + { + "address": "test_instance.test", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "foo" + }, + "sensitive_values": {} + }, + { + "address": "test_instance.test_backup", + "mode": "managed", + "type": "test_instance", + "name": "test_backup", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "foo-backup" + }, + "sensitive_values": {} + } + ], + "child_modules": [ + { + "resources": [ + { + "address": "module.child.test_instance.test_primary", + "mode": "managed", + "type": "test_instance", + "name": "test_primary", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "primary" + }, + "sensitive_values": {} + }, + { + "address": "module.child.test_instance.test_secondary", + "mode": "managed", + "type": "test_instance", + "name": "test_secondary", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "secondary" + }, + "sensitive_values": {} + } + ], + "address": "module.child", + "child_modules": [ + { + "resources": [ + { + "address": "module.child.module.grandchild.test_instance.test_alternate", + "mode": "managed", + "type": "test_instance", + "name": "test_alternate", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "secondary" + }, + "sensitive_values": {} + }, + { + "address": "module.child.module.grandchild.test_instance.test_main", + "mode": "managed", + "type": "test_instance", + "name": "test_main", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "main" + }, + "sensitive_values": {} + } + ], + "address": "module.child.module.grandchild" + } + ] + }, + { + "resources": [ + { + "address": "module.sibling.test_instance.test_primary", + "mode": "managed", + "type": "test_instance", + "name": "test_primary", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "primary" + }, + "sensitive_values": {} + }, + { + "address": "module.sibling.test_instance.test_secondary", + "mode": "managed", + "type": "test_instance", + "name": "test_secondary", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "secondary" + }, + "sensitive_values": {} + } + ], + "address": "module.sibling", + "child_modules": [ + { + "resources": [ + { + "address": "module.sibling.module.grandchild.test_instance.test_alternate", + "mode": "managed", + "type": "test_instance", + "name": "test_alternate", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "secondary" + }, + "sensitive_values": {} + }, + { + "address": "module.sibling.module.grandchild.test_instance.test_main", + "mode": "managed", + "type": "test_instance", + "name": "test_main", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "main" + }, + "sensitive_values": {} + } + ], + "address": "module.sibling.module.grandchild" + } + ] + } + ] + } + }, + "resource_changes": [ + { + "address": "test_instance.test", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_name": "registry.opentofu.org/hashicorp/test", + "change": { + "actions": [ + "create" + ], + "before": null, + "after": { + "ami": "foo" + }, + "after_unknown": { + "id": true + }, + "before_sensitive": false, + "after_sensitive": {} + } + }, + { + "address": "test_instance.test_backup", + "mode": "managed", + "type": "test_instance", + "name": "test_backup", + "provider_name": "registry.opentofu.org/hashicorp/test", + "change": { + "actions": [ + "create" + ], + "before": null, + "after": { + "ami": "foo-backup" + }, + "after_unknown": { + "id": true + }, + "before_sensitive": false, + "after_sensitive": {} + } + }, + { + "address": "module.child.test_instance.test_primary", + "module_address": "module.child", + "mode": "managed", + "type": "test_instance", + "name": "test_primary", + "provider_name": "registry.opentofu.org/hashicorp/test", + "change": { + "actions": [ + "create" + ], + "before": null, + "after": { + "ami": "primary" + }, + "after_unknown": { + "id": true + }, + "before_sensitive": false, + "after_sensitive": {} + } + }, + { + "address": "module.child.test_instance.test_secondary", + "module_address": "module.child", + "mode": "managed", + "type": "test_instance", + "name": "test_secondary", + "provider_name": "registry.opentofu.org/hashicorp/test", + "change": { + "actions": [ + "create" + ], + "before": null, + "after": { + "ami": "secondary" + }, + "after_unknown": { + "id": true + }, + "before_sensitive": false, + "after_sensitive": {} + } + }, + { + "address": "module.sibling.test_instance.test_primary", + "module_address": "module.sibling", + "mode": "managed", + "type": "test_instance", + "name": "test_primary", + "provider_name": "registry.opentofu.org/hashicorp/test", + "change": { + "actions": [ + "create" + ], + "before": null, + "after": { + "ami": "primary" + }, + "after_unknown": { + "id": true + }, + "before_sensitive": false, + "after_sensitive": {} + } + }, + { + "address": "module.sibling.test_instance.test_secondary", + "module_address": "module.sibling", + "mode": "managed", + "type": "test_instance", + "name": "test_secondary", + "provider_name": "registry.opentofu.org/hashicorp/test", + "change": { + "actions": [ + "create" + ], + "before": null, + "after": { + "ami": "secondary" + }, + "after_unknown": { + "id": true + }, + "before_sensitive": false, + "after_sensitive": {} + } + }, + { + "address": "module.child.module.grandchild.test_instance.test_alternate", + "module_address": "module.child.module.grandchild", + "mode": "managed", + "type": "test_instance", + "name": "test_alternate", + "provider_name": "registry.opentofu.org/hashicorp/test", + "change": { + "actions": [ + "create" + ], + "before": null, + "after": { + "ami": "secondary" + }, + "after_unknown": { + "id": true + }, + "before_sensitive": false, + "after_sensitive": {} + } + }, + { + "address": "module.child.module.grandchild.test_instance.test_main", + "module_address": "module.child.module.grandchild", + "mode": "managed", + "type": "test_instance", + "name": "test_main", + "provider_name": "registry.opentofu.org/hashicorp/test", + "change": { + "actions": [ + "create" + ], + "before": null, + "after": { + "ami": "main" + }, + "after_unknown": { + "id": true + }, + "before_sensitive": false, + "after_sensitive": {} + } + }, + { + "address": "module.sibling.module.grandchild.test_instance.test_alternate", + "module_address": "module.sibling.module.grandchild", + "mode": "managed", + "type": "test_instance", + "name": "test_alternate", + "provider_name": "registry.opentofu.org/hashicorp/test", + "change": { + "actions": [ + "create" + ], + "before": null, + "after": { + "ami": "secondary" + }, + "after_unknown": { + "id": true + }, + "before_sensitive": false, + "after_sensitive": {} + } + }, + { + "address": "module.sibling.module.grandchild.test_instance.test_main", + "module_address": "module.sibling.module.grandchild", + "mode": "managed", + "type": "test_instance", + "name": "test_main", + "provider_name": "registry.opentofu.org/hashicorp/test", + "change": { + "actions": [ + "create" + ], + "before": null, + "after": { + "ami": "main" + }, + "after_unknown": { + "id": true + }, + "before_sensitive": false, + "after_sensitive": {} + } + } + ], + "configuration": { + "provider_config": { + "test": { + "name": "test", + "full_name": "registry.opentofu.org/hashicorp/test", + "expressions": { + "region": { + "constant_value": "somewhere" + } + } + }, + "test.backup": { + "name": "test", + "full_name": "registry.opentofu.org/hashicorp/test", + "alias": "backup", + "expressions": { + "region": { + "constant_value": "elsewhere" + } + } + } + }, + "root_module": { + "resources": [ + { + "address": "test_instance.test", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_config_key": "test", + "expressions": { + "ami": { + "constant_value": "foo" + } + }, + "schema_version": 0 + }, + { + "address": "test_instance.test_backup", + "mode": "managed", + "type": "test_instance", + "name": "test_backup", + "provider_config_key": "test.backup", + "expressions": { + "ami": { + "constant_value": "foo-backup" + } + }, + "schema_version": 0 + } + ], + "module_calls": { + "child": { + "source": "./child", + "module": { + "resources": [ + { + "address": "test_instance.test_primary", + "mode": "managed", + "type": "test_instance", + "name": "test_primary", + "provider_config_key": "test", + "expressions": { + "ami": { + "constant_value": "primary" + } + }, + "schema_version": 0 + }, + { + "address": "test_instance.test_secondary", + "mode": "managed", + "type": "test_instance", + "name": "test_secondary", + "provider_config_key": "test.backup", + "expressions": { + "ami": { + "constant_value": "secondary" + } + }, + "schema_version": 0 + } + ], + "module_calls": { + "grandchild": { + "source": "./nested", + "module": { + "resources": [ + { + "address": "test_instance.test_alternate", + "mode": "managed", + "type": "test_instance", + "name": "test_alternate", + "provider_config_key": "test.backup", + "expressions": { + "ami": { + "constant_value": "secondary" + } + }, + "schema_version": 0 + }, + { + "address": "test_instance.test_main", + "mode": "managed", + "type": "test_instance", + "name": "test_main", + "provider_config_key": "test", + "expressions": { + "ami": { + "constant_value": "main" + } + }, + "schema_version": 0 + } + ] + } + } + } + } + }, + "sibling": { + "source": "./child", + "module": { + "resources": [ + { + "address": "test_instance.test_primary", + "mode": "managed", + "type": "test_instance", + "name": "test_primary", + "provider_config_key": "test", + "expressions": { + "ami": { + "constant_value": "primary" + } + }, + "schema_version": 0 + }, + { + "address": "test_instance.test_secondary", + "mode": "managed", + "type": "test_instance", + "name": "test_secondary", + "provider_config_key": "test", + "expressions": { + "ami": { + "constant_value": "secondary" + } + }, + "schema_version": 0 + } + ], + "module_calls": { + "grandchild": { + "source": "./nested", + "module": { + "resources": [ + { + "address": "test_instance.test_alternate", + "mode": "managed", + "type": "test_instance", + "name": "test_alternate", + "provider_config_key": "test", + "expressions": { + "ami": { + "constant_value": "secondary" + } + }, + "schema_version": 0 + }, + { + "address": "test_instance.test_main", + "mode": "managed", + "type": "test_instance", + "name": "test_main", + "provider_config_key": "test", + "expressions": { + "ami": { + "constant_value": "main" + } + }, + "schema_version": 0 + } + ] + } + } + } + } + } + } + } + } +} diff --git a/pkg/command/testdata/show-json/provider-version-no-config/main.tf b/pkg/command/testdata/show-json/provider-version-no-config/main.tf new file mode 100644 index 00000000000..c5df9bf79ce --- /dev/null +++ b/pkg/command/testdata/show-json/provider-version-no-config/main.tf @@ -0,0 +1,21 @@ +terraform { + required_providers { + test = { + source = "hashicorp/test" + version = ">= 1.2.3" + } + } +} + +variable "test_var" { + default = "bar" +} + +resource "test_instance" "test" { + ami = var.test_var + count = 3 +} + +output "test" { + value = var.test_var +} diff --git a/pkg/command/testdata/show-json/provider-version-no-config/output.json b/pkg/command/testdata/show-json/provider-version-no-config/output.json new file mode 100644 index 00000000000..79a8e8ba473 --- /dev/null +++ b/pkg/command/testdata/show-json/provider-version-no-config/output.json @@ -0,0 +1,198 @@ +{ + "format_version": "1.0", + "variables": { + "test_var": { + "value": "bar" + } + }, + "planned_values": { + "outputs": { + "test": { + "sensitive": false, + "type": "string", + "value": "bar" + } + }, + "root_module": { + "resources": [ + { + "address": "test_instance.test[0]", + "index": 0, + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "bar" + }, + "sensitive_values": {} + }, + { + "address": "test_instance.test[1]", + "index": 1, + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "bar" + }, + "sensitive_values": {} + }, + { + "address": "test_instance.test[2]", + "index": 2, + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "bar" + }, + "sensitive_values": {} + } + ] + } + }, + "prior_state": { + "format_version": "1.0", + "values": { + "outputs": { + "test": { + "sensitive": false, + "type": "string", + "value": "bar" + } + }, + "root_module": {} + } + }, + "resource_changes": [ + { + "address": "test_instance.test[0]", + "index": 0, + "mode": "managed", + "type": "test_instance", + "provider_name": "registry.opentofu.org/hashicorp/test", + "name": "test", + "change": { + "actions": [ + "create" + ], + "before": null, + "after_unknown": { + "id": true + }, + "after": { + "ami": "bar" + }, + "after_sensitive": {}, + "before_sensitive": false + } + }, + { + "address": "test_instance.test[1]", + "index": 1, + "mode": "managed", + "type": "test_instance", + "provider_name": "registry.opentofu.org/hashicorp/test", + "name": "test", + "change": { + "actions": [ + "create" + ], + "before": null, + "after_unknown": { + "id": true + }, + "after": { + "ami": "bar" + }, + "after_sensitive": {}, + "before_sensitive": false + } + }, + { + "address": "test_instance.test[2]", + "index": 2, + "mode": "managed", + "type": "test_instance", + "provider_name": "registry.opentofu.org/hashicorp/test", + "name": "test", + "change": { + "actions": [ + "create" + ], + "before": null, + "after_unknown": { + "id": true + }, + "after": { + "ami": "bar" + }, + "after_sensitive": {}, + "before_sensitive": false + } + } + ], + "output_changes": { + "test": { + "actions": [ + "create" + ], + "before": null, + "after": "bar", + "after_unknown": false, + "before_sensitive": false, + "after_sensitive": false + } + }, + "configuration": { + "provider_config": { + "test": { + "name": "test", + "full_name": "registry.opentofu.org/hashicorp/test", + "version_constraint": ">= 1.2.3" + } + }, + "root_module": { + "outputs": { + "test": { + "expression": { + "references": [ + "var.test_var" + ] + } + } + }, + "resources": [ + { + "address": "test_instance.test", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_config_key": "test", + "schema_version": 0, + "expressions": { + "ami": { + "references": [ + "var.test_var" + ] + } + }, + "count_expression": { + "constant_value": 3 + } + } + ], + "variables": { + "test_var": { + "default": "bar" + } + } + } + } +} diff --git a/pkg/command/testdata/show-json/provider-version/main.tf b/pkg/command/testdata/show-json/provider-version/main.tf new file mode 100644 index 00000000000..b6d3cb9745b --- /dev/null +++ b/pkg/command/testdata/show-json/provider-version/main.tf @@ -0,0 +1,26 @@ +terraform { + required_providers { + test = { + source = "hashicorp/test" + version = ">= 1.2.3" + } + } +} + +provider "test" { + region = "somewhere" + version = "1.2.3" +} + +variable "test_var" { + default = "bar" +} + +resource "test_instance" "test" { + ami = var.test_var + count = 3 +} + +output "test" { + value = var.test_var +} diff --git a/pkg/command/testdata/show-json/provider-version/output.json b/pkg/command/testdata/show-json/provider-version/output.json new file mode 100644 index 00000000000..1db796b6a89 --- /dev/null +++ b/pkg/command/testdata/show-json/provider-version/output.json @@ -0,0 +1,203 @@ +{ + "format_version": "1.0", + "variables": { + "test_var": { + "value": "bar" + } + }, + "planned_values": { + "outputs": { + "test": { + "sensitive": false, + "type": "string", + "value": "bar" + } + }, + "root_module": { + "resources": [ + { + "address": "test_instance.test[0]", + "index": 0, + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "bar" + }, + "sensitive_values": {} + }, + { + "address": "test_instance.test[1]", + "index": 1, + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "bar" + }, + "sensitive_values": {} + }, + { + "address": "test_instance.test[2]", + "index": 2, + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "bar" + }, + "sensitive_values": {} + } + ] + } + }, + "prior_state": { + "format_version": "1.0", + "values": { + "outputs": { + "test": { + "sensitive": false, + "type": "string", + "value": "bar" + } + }, + "root_module": {} + } + }, + "resource_changes": [ + { + "address": "test_instance.test[0]", + "index": 0, + "mode": "managed", + "type": "test_instance", + "provider_name": "registry.opentofu.org/hashicorp/test", + "name": "test", + "change": { + "actions": [ + "create" + ], + "before": null, + "after_unknown": { + "id": true + }, + "after": { + "ami": "bar" + }, + "after_sensitive": {}, + "before_sensitive": false + } + }, + { + "address": "test_instance.test[1]", + "index": 1, + "mode": "managed", + "type": "test_instance", + "provider_name": "registry.opentofu.org/hashicorp/test", + "name": "test", + "change": { + "actions": [ + "create" + ], + "before": null, + "after_unknown": { + "id": true + }, + "after": { + "ami": "bar" + }, + "after_sensitive": {}, + "before_sensitive": false + } + }, + { + "address": "test_instance.test[2]", + "index": 2, + "mode": "managed", + "type": "test_instance", + "provider_name": "registry.opentofu.org/hashicorp/test", + "name": "test", + "change": { + "actions": [ + "create" + ], + "before": null, + "after_unknown": { + "id": true + }, + "after": { + "ami": "bar" + }, + "after_sensitive": {}, + "before_sensitive": false + } + } + ], + "output_changes": { + "test": { + "actions": [ + "create" + ], + "before": null, + "after": "bar", + "after_unknown": false, + "before_sensitive": false, + "after_sensitive": false + } + }, + "configuration": { + "provider_config": { + "test": { + "name": "test", + "full_name": "registry.opentofu.org/hashicorp/test", + "expressions": { + "region": { + "constant_value": "somewhere" + } + }, + "version_constraint": ">= 1.2.3, 1.2.3" + } + }, + "root_module": { + "outputs": { + "test": { + "expression": { + "references": [ + "var.test_var" + ] + } + } + }, + "resources": [ + { + "address": "test_instance.test", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_config_key": "test", + "schema_version": 0, + "expressions": { + "ami": { + "references": [ + "var.test_var" + ] + } + }, + "count_expression": { + "constant_value": 3 + } + } + ], + "variables": { + "test_var": { + "default": "bar" + } + } + } + } +} diff --git a/pkg/command/testdata/show-json/requires-replace/main.tf b/pkg/command/testdata/show-json/requires-replace/main.tf new file mode 100644 index 00000000000..6be6611c670 --- /dev/null +++ b/pkg/command/testdata/show-json/requires-replace/main.tf @@ -0,0 +1,3 @@ +resource "test_instance" "test" { + ami = "force-replace" +} diff --git a/pkg/command/testdata/show-json/requires-replace/output.json b/pkg/command/testdata/show-json/requires-replace/output.json new file mode 100644 index 00000000000..47ea143b702 --- /dev/null +++ b/pkg/command/testdata/show-json/requires-replace/output.json @@ -0,0 +1,97 @@ +{ + "format_version": "1.0", + "planned_values": { + "root_module": { + "resources": [ + { + "address": "test_instance.test", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "force-replace" + }, + "sensitive_values": {} + } + ] + } + }, + "resource_changes": [ + { + "address": "test_instance.test", + "mode": "managed", + "type": "test_instance", + "provider_name": "registry.opentofu.org/hashicorp/test", + "name": "test", + "change": { + "actions": [ + "delete", + "create" + ], + "before": { + "ami": "bar", + "id": "placeholder" + }, + "after": { + "ami": "force-replace" + }, + "after_unknown": { + "id": true + }, + "after_sensitive": {}, + "before_sensitive": {}, + "replace_paths": [["ami"]] + }, + "action_reason": "replace_because_cannot_update" + } + ], + "prior_state": { + "format_version": "1.0", + "values": { + "root_module": { + "resources": [ + { + "address": "test_instance.test", + "mode": "managed", + "type": "test_instance", + "name": "test", + "schema_version": 0, + "provider_name": "registry.opentofu.org/hashicorp/test", + "values": { + "ami": "bar", + "id": "placeholder" + }, + "sensitive_values": {} + } + ] + } + } + }, + "configuration": { + "provider_config": { + "test": { + "name": "test", + "full_name": "registry.opentofu.org/hashicorp/test" + } + }, + "root_module": { + "resources": [ + { + "address": "test_instance.test", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_config_key": "test", + "schema_version": 0, + "expressions": { + "ami": { + "constant_value": "force-replace" + } + } + } + ] + } + } +} diff --git a/pkg/command/testdata/show-json/requires-replace/terraform.tfstate b/pkg/command/testdata/show-json/requires-replace/terraform.tfstate new file mode 100644 index 00000000000..02df6f17584 --- /dev/null +++ b/pkg/command/testdata/show-json/requires-replace/terraform.tfstate @@ -0,0 +1,24 @@ +{ + "version": 4, + "terraform_version": "0.12.0", + "serial": 7, + "lineage": "configuredUnchanged", + "outputs": {}, + "resources": [ + { + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider": "provider[\"registry.opentofu.org/hashicorp/test\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "ami": "bar", + "id": "placeholder" + } + } + ] + } + ] +} diff --git a/pkg/command/testdata/show-json/sensitive-values/main.tf b/pkg/command/testdata/show-json/sensitive-values/main.tf new file mode 100644 index 00000000000..3f8ba824c98 --- /dev/null +++ b/pkg/command/testdata/show-json/sensitive-values/main.tf @@ -0,0 +1,13 @@ +variable "test_var" { + default = "boop" + sensitive = true +} + +resource "test_instance" "test" { + ami = var.test_var +} + +output "test" { + value = test_instance.test.ami + sensitive = true +} diff --git a/pkg/command/testdata/show-json/sensitive-values/output.json b/pkg/command/testdata/show-json/sensitive-values/output.json new file mode 100644 index 00000000000..bba81db04f5 --- /dev/null +++ b/pkg/command/testdata/show-json/sensitive-values/output.json @@ -0,0 +1,129 @@ +{ + "format_version": "1.0", + "variables": { + "test_var": { + "value": "boop" + } + }, + "planned_values": { + "outputs": { + "test": { + "sensitive": true, + "type": "string", + "value": "boop" + } + }, + "root_module": { + "resources": [ + { + "address": "test_instance.test", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0, + "values": { + "ami": "boop" + }, + "sensitive_values": { + "ami": true + } + } + ] + } + }, + "resource_changes": [ + { + "address": "test_instance.test", + "mode": "managed", + "type": "test_instance", + "provider_name": "registry.opentofu.org/hashicorp/test", + "name": "test", + "change": { + "actions": [ + "create" + ], + "before": null, + "after": { + "ami": "boop" + }, + "after_unknown": { + "id": true + }, + "after_sensitive": { + "ami": true + }, + "before_sensitive": false + } + } + ], + "output_changes": { + "test": { + "actions": [ + "create" + ], + "before": null, + "after": "boop", + "after_unknown": false, + "before_sensitive": true, + "after_sensitive": true + } + }, + "prior_state": { + "format_version": "1.0", + "values": { + "outputs": { + "test": { + "sensitive": true, + "type": "string", + "value": "boop" + } + }, + "root_module": {} + } + }, + "configuration": { + "provider_config": { + "test": { + "name": "test", + "full_name": "registry.opentofu.org/hashicorp/test" + } + }, + "root_module": { + "outputs": { + "test": { + "expression": { + "references": [ + "test_instance.test.ami", + "test_instance.test" + ] + }, + "sensitive": true + } + }, + "resources": [ + { + "address": "test_instance.test", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_config_key": "test", + "schema_version": 0, + "expressions": { + "ami": { + "references": [ + "var.test_var" + ] + } + } + } + ], + "variables": { + "test_var": { + "default": "boop", + "sensitive": true + } + } + } + } +} diff --git a/pkg/command/testdata/show-json/unknown-output/main.tf b/pkg/command/testdata/show-json/unknown-output/main.tf new file mode 100644 index 00000000000..d97891e2246 --- /dev/null +++ b/pkg/command/testdata/show-json/unknown-output/main.tf @@ -0,0 +1,19 @@ +output "foo" { + value = "hello" +} + +output "bar" { + value = tolist([ + "hello", + timestamp(), + "world", + ]) +} + +output "baz" { + value = { + greeting: "hello", + time: timestamp(), + subject: "world", + } +} diff --git a/pkg/command/testdata/show-json/unknown-output/output.json b/pkg/command/testdata/show-json/unknown-output/output.json new file mode 100644 index 00000000000..8a52b8dc57c --- /dev/null +++ b/pkg/command/testdata/show-json/unknown-output/output.json @@ -0,0 +1,96 @@ +{ + "format_version": "1.1", + "terraform_version": "1.3.0-dev", + "planned_values": { + "outputs": { + "bar": { + "sensitive": false + }, + "baz": { + "sensitive": false + }, + "foo": { + "sensitive": false, + "type": "string", + "value": "hello" + } + }, + "root_module": {} + }, + "output_changes": { + "bar": { + "actions": [ + "create" + ], + "before": null, + "after": [ + "hello", + null, + "world" + ], + "after_unknown": [ + false, + true, + false + ], + "before_sensitive": false, + "after_sensitive": false + }, + "baz": { + "actions": [ + "create" + ], + "before": null, + "after": { + "greeting": "hello", + "subject": "world" + }, + "after_unknown": { + "time": true + }, + "before_sensitive": false, + "after_sensitive": false + }, + "foo": { + "actions": [ + "create" + ], + "before": null, + "after": "hello", + "after_unknown": false, + "before_sensitive": false, + "after_sensitive": false + } + }, + "prior_state": { + "format_version": "1.0", + "terraform_version": "1.3.0", + "values": { + "outputs": { + "foo": { + "sensitive": false, + "value": "hello", + "type": "string" + } + }, + "root_module": {} + } + }, + "configuration": { + "root_module": { + "outputs": { + "bar": { + "expression": {} + }, + "baz": { + "expression": {} + }, + "foo": { + "expression": { + "constant_value": "hello" + } + } + } + } + } +} diff --git a/pkg/command/testdata/show/main.tf b/pkg/command/testdata/show/main.tf new file mode 100644 index 00000000000..1b101299190 --- /dev/null +++ b/pkg/command/testdata/show/main.tf @@ -0,0 +1,3 @@ +resource "test_instance" "foo" { + ami = "bar" +} diff --git a/pkg/command/testdata/state-list-backend-custom/.terraform/terraform.tfstate b/pkg/command/testdata/state-list-backend-custom/.terraform/terraform.tfstate new file mode 100644 index 00000000000..122adb81231 --- /dev/null +++ b/pkg/command/testdata/state-list-backend-custom/.terraform/terraform.tfstate @@ -0,0 +1,23 @@ +{ + "version": 3, + "serial": 0, + "lineage": "666f9301-7e65-4b19-ae23-71184bb19b03", + "backend": { + "type": "local", + "config": { + "path": "local-state.tfstate", + "workspace_dir": null + }, + "hash": 4282859327 + }, + "modules": [ + { + "path": [ + "root" + ], + "outputs": {}, + "resources": {}, + "depends_on": [] + } + ] +} diff --git a/pkg/command/testdata/state-list-backend-custom/local-state.tfstate b/pkg/command/testdata/state-list-backend-custom/local-state.tfstate new file mode 100644 index 00000000000..f24330c7598 --- /dev/null +++ b/pkg/command/testdata/state-list-backend-custom/local-state.tfstate @@ -0,0 +1,24 @@ +{ + "version": 4, + "terraform_version": "0.12.0", + "serial": 7, + "lineage": "configuredUnchanged", + "outputs": {}, + "resources": [ + { + "mode": "managed", + "type": "null_resource", + "name": "a", + "provider": "provider[\"registry.opentofu.org/-/null\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "id": "8521602373864259745", + "triggers": null + } + } + ] + } + ] +} diff --git a/pkg/command/testdata/state-list-backend-custom/main.tf b/pkg/command/testdata/state-list-backend-custom/main.tf new file mode 100644 index 00000000000..ca1bd3921e1 --- /dev/null +++ b/pkg/command/testdata/state-list-backend-custom/main.tf @@ -0,0 +1,5 @@ +terraform { + backend "local" { + path = "local-state.tfstate" + } +} diff --git a/pkg/command/testdata/state-list-backend-default/.terraform/terraform.tfstate b/pkg/command/testdata/state-list-backend-default/.terraform/terraform.tfstate new file mode 100644 index 00000000000..44ed4f6726d --- /dev/null +++ b/pkg/command/testdata/state-list-backend-default/.terraform/terraform.tfstate @@ -0,0 +1,23 @@ +{ + "version": 3, + "serial": 0, + "lineage": "666f9301-7e65-4b19-ae23-71184bb19b03", + "backend": { + "type": "local", + "config": { + "path": null, + "workspace_dir": null + }, + "hash": 666019178 + }, + "modules": [ + { + "path": [ + "root" + ], + "outputs": {}, + "resources": {}, + "depends_on": [] + } + ] +} diff --git a/pkg/command/testdata/state-list-backend-default/main.tf b/pkg/command/testdata/state-list-backend-default/main.tf new file mode 100644 index 00000000000..7f62e0e1972 --- /dev/null +++ b/pkg/command/testdata/state-list-backend-default/main.tf @@ -0,0 +1,4 @@ +terraform { + backend "local" { + } +} diff --git a/pkg/command/testdata/state-list-backend-default/terraform.tfstate b/pkg/command/testdata/state-list-backend-default/terraform.tfstate new file mode 100644 index 00000000000..f24330c7598 --- /dev/null +++ b/pkg/command/testdata/state-list-backend-default/terraform.tfstate @@ -0,0 +1,24 @@ +{ + "version": 4, + "terraform_version": "0.12.0", + "serial": 7, + "lineage": "configuredUnchanged", + "outputs": {}, + "resources": [ + { + "mode": "managed", + "type": "null_resource", + "name": "a", + "provider": "provider[\"registry.opentofu.org/-/null\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "id": "8521602373864259745", + "triggers": null + } + } + ] + } + ] +} diff --git a/pkg/command/testdata/state-list-nested-modules/terraform.tfstate b/pkg/command/testdata/state-list-nested-modules/terraform.tfstate new file mode 100644 index 00000000000..4c08afdcf49 --- /dev/null +++ b/pkg/command/testdata/state-list-nested-modules/terraform.tfstate @@ -0,0 +1,91 @@ +{ + "version": 4, + "terraform_version": "0.15.0", + "serial": 8, + "lineage": "00bfda35-ad61-ec8d-c013-14b0320bc416", + "resources": [ + { + "mode": "managed", + "type": "test_instance", + "name": "root", + "provider": "provider[\"registry.opentofu.org/hashicorp/test\"]", + "instances": [ + { + "attributes": { + "id": "root" + } + } + ] + }, + { + "module": "module.nest", + "mode": "managed", + "type": "test_instance", + "name": "nest", + "provider": "provider[\"registry.opentofu.org/hashicorp/test\"]", + "instances": [ + { + "attributes": { + "ami": "nested" + } + } + ] + }, + { + "module": "module.nest.module.subnest", + "mode": "managed", + "type": "test_instance", + "name": "subnest", + "provider": "provider[\"registry.opentofu.org/hashicorp/test\"]", + "instances": [ + { + "attributes": { + "id": "subnested" + } + } + ] + }, + { + "module": "module.nonexist.module.child", + "mode": "managed", + "type": "test_instance", + "name": "child", + "provider": "provider[\"registry.opentofu.org/hashicorp/test\"]", + "instances": [ + { + "attributes": { + "id": "child" + } + } + ] + }, + { + "module": "module.count[0]", + "mode": "managed", + "type": "test_instance", + "name": "count", + "provider": "provider[\"registry.opentofu.org/hashicorp/test\"]", + "instances": [ + { + "attributes": { + "id": "zero" + } + } + ] + }, + { + "module": "module.count[1]", + "mode": "managed", + "type": "test_instance", + "name": "count", + "provider": "provider[\"registry.opentofu.org/hashicorp/test\"]", + "instances": [ + { + "attributes": { + "id": "one" + } + } + ] + } + ] +} diff --git a/pkg/command/testdata/state-pull-backend/.terraform/terraform.tfstate b/pkg/command/testdata/state-pull-backend/.terraform/terraform.tfstate new file mode 100644 index 00000000000..122adb81231 --- /dev/null +++ b/pkg/command/testdata/state-pull-backend/.terraform/terraform.tfstate @@ -0,0 +1,23 @@ +{ + "version": 3, + "serial": 0, + "lineage": "666f9301-7e65-4b19-ae23-71184bb19b03", + "backend": { + "type": "local", + "config": { + "path": "local-state.tfstate", + "workspace_dir": null + }, + "hash": 4282859327 + }, + "modules": [ + { + "path": [ + "root" + ], + "outputs": {}, + "resources": {}, + "depends_on": [] + } + ] +} diff --git a/pkg/command/testdata/state-pull-backend/local-state.tfstate b/pkg/command/testdata/state-pull-backend/local-state.tfstate new file mode 100644 index 00000000000..db3d0b7c7d7 --- /dev/null +++ b/pkg/command/testdata/state-pull-backend/local-state.tfstate @@ -0,0 +1,24 @@ +{ + "version": 4, + "terraform_version": "0.12.0", + "serial": 7, + "lineage": "configuredUnchanged", + "outputs": {}, + "resources": [ + { + "mode": "managed", + "type": "null_resource", + "name": "a", + "provider": "provider.null", + "instances": [ + { + "schema_version": 0, + "attributes": { + "id": "8521602373864259745", + "triggers": null + } + } + ] + } + ] +} diff --git a/pkg/command/testdata/state-pull-backend/main.tf b/pkg/command/testdata/state-pull-backend/main.tf new file mode 100644 index 00000000000..ca1bd3921e1 --- /dev/null +++ b/pkg/command/testdata/state-pull-backend/main.tf @@ -0,0 +1,5 @@ +terraform { + backend "local" { + path = "local-state.tfstate" + } +} diff --git a/pkg/command/testdata/state-push-bad-lineage/.terraform/terraform.tfstate b/pkg/command/testdata/state-push-bad-lineage/.terraform/terraform.tfstate new file mode 100644 index 00000000000..073bd7a8223 --- /dev/null +++ b/pkg/command/testdata/state-push-bad-lineage/.terraform/terraform.tfstate @@ -0,0 +1,22 @@ +{ + "version": 3, + "serial": 0, + "lineage": "666f9301-7e65-4b19-ae23-71184bb19b03", + "backend": { + "type": "local", + "config": { + "path": "local-state.tfstate" + }, + "hash": 9073424445967744180 + }, + "modules": [ + { + "path": [ + "root" + ], + "outputs": {}, + "resources": {}, + "depends_on": [] + } + ] +} diff --git a/pkg/command/testdata/state-push-bad-lineage/local-state.tfstate b/pkg/command/testdata/state-push-bad-lineage/local-state.tfstate new file mode 100644 index 00000000000..d4317e76bb5 --- /dev/null +++ b/pkg/command/testdata/state-push-bad-lineage/local-state.tfstate @@ -0,0 +1,11 @@ +{ + "version": 4, + "serial": 1, + "lineage": "mismatch", + "outputs": { + "foo": { + "type": "string", + "value": "bar" + } + } +} diff --git a/pkg/command/testdata/state-push-bad-lineage/main.tf b/pkg/command/testdata/state-push-bad-lineage/main.tf new file mode 100644 index 00000000000..ca1bd3921e1 --- /dev/null +++ b/pkg/command/testdata/state-push-bad-lineage/main.tf @@ -0,0 +1,5 @@ +terraform { + backend "local" { + path = "local-state.tfstate" + } +} diff --git a/pkg/command/testdata/state-push-bad-lineage/replace.tfstate b/pkg/command/testdata/state-push-bad-lineage/replace.tfstate new file mode 100644 index 00000000000..670f0cc0544 --- /dev/null +++ b/pkg/command/testdata/state-push-bad-lineage/replace.tfstate @@ -0,0 +1,11 @@ +{ + "version": 4, + "serial": 2, + "lineage": "hello", + "outputs": { + "foo": { + "type": "string", + "value": "baz" + } + } +} diff --git a/pkg/command/testdata/state-push-good/.terraform/terraform.tfstate b/pkg/command/testdata/state-push-good/.terraform/terraform.tfstate new file mode 100644 index 00000000000..122adb81231 --- /dev/null +++ b/pkg/command/testdata/state-push-good/.terraform/terraform.tfstate @@ -0,0 +1,23 @@ +{ + "version": 3, + "serial": 0, + "lineage": "666f9301-7e65-4b19-ae23-71184bb19b03", + "backend": { + "type": "local", + "config": { + "path": "local-state.tfstate", + "workspace_dir": null + }, + "hash": 4282859327 + }, + "modules": [ + { + "path": [ + "root" + ], + "outputs": {}, + "resources": {}, + "depends_on": [] + } + ] +} diff --git a/pkg/command/testdata/state-push-good/main.tf b/pkg/command/testdata/state-push-good/main.tf new file mode 100644 index 00000000000..ca1bd3921e1 --- /dev/null +++ b/pkg/command/testdata/state-push-good/main.tf @@ -0,0 +1,5 @@ +terraform { + backend "local" { + path = "local-state.tfstate" + } +} diff --git a/pkg/command/testdata/state-push-good/replace.tfstate b/pkg/command/testdata/state-push-good/replace.tfstate new file mode 100644 index 00000000000..914425d600b --- /dev/null +++ b/pkg/command/testdata/state-push-good/replace.tfstate @@ -0,0 +1 @@ +{"version":4,"serial":0,"lineage":"hello","outputs":{},"resources":[{"mode":"managed","type":"null_resource","name":"b","provider":"provider.null","instances":[{"schema_version":0,"attributes":{"id":"9051675049789185374","triggers":null}}]}]} diff --git a/pkg/command/testdata/state-push-replace-match/.terraform/terraform.tfstate b/pkg/command/testdata/state-push-replace-match/.terraform/terraform.tfstate new file mode 100644 index 00000000000..122adb81231 --- /dev/null +++ b/pkg/command/testdata/state-push-replace-match/.terraform/terraform.tfstate @@ -0,0 +1,23 @@ +{ + "version": 3, + "serial": 0, + "lineage": "666f9301-7e65-4b19-ae23-71184bb19b03", + "backend": { + "type": "local", + "config": { + "path": "local-state.tfstate", + "workspace_dir": null + }, + "hash": 4282859327 + }, + "modules": [ + { + "path": [ + "root" + ], + "outputs": {}, + "resources": {}, + "depends_on": [] + } + ] +} diff --git a/pkg/command/testdata/state-push-replace-match/local-state.tfstate b/pkg/command/testdata/state-push-replace-match/local-state.tfstate new file mode 100644 index 00000000000..0d191d52888 --- /dev/null +++ b/pkg/command/testdata/state-push-replace-match/local-state.tfstate @@ -0,0 +1 @@ +{"version":4,"serial":1,"lineage":"hello","outputs":{},"resources":[{"mode":"managed","type":"null_resource","name":"a","provider":"provider.null","instances":[{"schema_version":0,"attributes":{"id":"8521602373864259745","triggers":null}}]}]} diff --git a/pkg/command/testdata/state-push-replace-match/main.tf b/pkg/command/testdata/state-push-replace-match/main.tf new file mode 100644 index 00000000000..ca1bd3921e1 --- /dev/null +++ b/pkg/command/testdata/state-push-replace-match/main.tf @@ -0,0 +1,5 @@ +terraform { + backend "local" { + path = "local-state.tfstate" + } +} diff --git a/pkg/command/testdata/state-push-replace-match/replace.tfstate b/pkg/command/testdata/state-push-replace-match/replace.tfstate new file mode 100644 index 00000000000..206685b25e8 --- /dev/null +++ b/pkg/command/testdata/state-push-replace-match/replace.tfstate @@ -0,0 +1 @@ +{"version":4,"serial":2,"lineage":"hello","outputs":{},"resources":[{"mode":"managed","type":"null_resource","name":"b","provider":"provider.null","instances":[{"schema_version":0,"attributes":{"id":"9051675049789185374","triggers":null}}]}]} diff --git a/pkg/command/testdata/state-push-serial-newer/.terraform/terraform.tfstate b/pkg/command/testdata/state-push-serial-newer/.terraform/terraform.tfstate new file mode 100644 index 00000000000..122adb81231 --- /dev/null +++ b/pkg/command/testdata/state-push-serial-newer/.terraform/terraform.tfstate @@ -0,0 +1,23 @@ +{ + "version": 3, + "serial": 0, + "lineage": "666f9301-7e65-4b19-ae23-71184bb19b03", + "backend": { + "type": "local", + "config": { + "path": "local-state.tfstate", + "workspace_dir": null + }, + "hash": 4282859327 + }, + "modules": [ + { + "path": [ + "root" + ], + "outputs": {}, + "resources": {}, + "depends_on": [] + } + ] +} diff --git a/pkg/command/testdata/state-push-serial-newer/local-state.tfstate b/pkg/command/testdata/state-push-serial-newer/local-state.tfstate new file mode 100644 index 00000000000..ad336a6bb4a --- /dev/null +++ b/pkg/command/testdata/state-push-serial-newer/local-state.tfstate @@ -0,0 +1,23 @@ +{ + "version": 4, + "serial": 3, + "lineage": "hello", + "outputs": {}, + "resources": [ + { + "mode": "managed", + "type": "null_resource", + "name": "a", + "provider": "provider[\"registry.opentofu.org/-/null\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "id": "8521602373864259745", + "triggers": null + } + } + ] + } + ] +} diff --git a/pkg/command/testdata/state-push-serial-newer/main.tf b/pkg/command/testdata/state-push-serial-newer/main.tf new file mode 100644 index 00000000000..ca1bd3921e1 --- /dev/null +++ b/pkg/command/testdata/state-push-serial-newer/main.tf @@ -0,0 +1,5 @@ +terraform { + backend "local" { + path = "local-state.tfstate" + } +} diff --git a/pkg/command/testdata/state-push-serial-newer/replace.tfstate b/pkg/command/testdata/state-push-serial-newer/replace.tfstate new file mode 100644 index 00000000000..614ae72480f --- /dev/null +++ b/pkg/command/testdata/state-push-serial-newer/replace.tfstate @@ -0,0 +1,23 @@ +{ + "version": 4, + "serial": 2, + "lineage": "hello", + "outputs": {}, + "resources": [ + { + "mode": "managed", + "type": "null_resource", + "name": "b", + "provider": "provider[\"registry.opentofu.org/-/null\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "id": "9051675049789185374", + "triggers": null + } + } + ] + } + ] +} diff --git a/pkg/command/testdata/state-push-serial-older/.terraform/terraform.tfstate b/pkg/command/testdata/state-push-serial-older/.terraform/terraform.tfstate new file mode 100644 index 00000000000..122adb81231 --- /dev/null +++ b/pkg/command/testdata/state-push-serial-older/.terraform/terraform.tfstate @@ -0,0 +1,23 @@ +{ + "version": 3, + "serial": 0, + "lineage": "666f9301-7e65-4b19-ae23-71184bb19b03", + "backend": { + "type": "local", + "config": { + "path": "local-state.tfstate", + "workspace_dir": null + }, + "hash": 4282859327 + }, + "modules": [ + { + "path": [ + "root" + ], + "outputs": {}, + "resources": {}, + "depends_on": [] + } + ] +} diff --git a/pkg/command/testdata/state-push-serial-older/local-state.tfstate b/pkg/command/testdata/state-push-serial-older/local-state.tfstate new file mode 100644 index 00000000000..0d191d52888 --- /dev/null +++ b/pkg/command/testdata/state-push-serial-older/local-state.tfstate @@ -0,0 +1 @@ +{"version":4,"serial":1,"lineage":"hello","outputs":{},"resources":[{"mode":"managed","type":"null_resource","name":"a","provider":"provider.null","instances":[{"schema_version":0,"attributes":{"id":"8521602373864259745","triggers":null}}]}]} diff --git a/pkg/command/testdata/state-push-serial-older/main.tf b/pkg/command/testdata/state-push-serial-older/main.tf new file mode 100644 index 00000000000..ca1bd3921e1 --- /dev/null +++ b/pkg/command/testdata/state-push-serial-older/main.tf @@ -0,0 +1,5 @@ +terraform { + backend "local" { + path = "local-state.tfstate" + } +} diff --git a/pkg/command/testdata/state-push-serial-older/replace.tfstate b/pkg/command/testdata/state-push-serial-older/replace.tfstate new file mode 100644 index 00000000000..206685b25e8 --- /dev/null +++ b/pkg/command/testdata/state-push-serial-older/replace.tfstate @@ -0,0 +1 @@ +{"version":4,"serial":2,"lineage":"hello","outputs":{},"resources":[{"mode":"managed","type":"null_resource","name":"b","provider":"provider.null","instances":[{"schema_version":0,"attributes":{"id":"9051675049789185374","triggers":null}}]}]} diff --git a/pkg/command/testdata/statelocker.go b/pkg/command/testdata/statelocker.go new file mode 100644 index 00000000000..413c5ab1c0e --- /dev/null +++ b/pkg/command/testdata/statelocker.go @@ -0,0 +1,54 @@ +// statelocker use used for testing command with a locked state. +// This will lock the state file at a given path, then wait for a sigal. On +// SIGINT and SIGTERM the state will be Unlocked before exit. +package main + +import ( + "io" + "log" + "os" + "os/signal" + "syscall" + "time" + + "github.com/kubegems/opentofu/pkg/command/clistate" + "github.com/kubegems/opentofu/pkg/states/statemgr" +) + +func main() { + if len(os.Args) != 2 { + log.Fatal(os.Args[0], "statefile") + } + + s := &clistate.LocalState{ + Path: os.Args[1], + } + + info := statemgr.NewLockInfo() + info.Operation = "test" + info.Info = "state locker" + + lockID, err := s.Lock(info) + if err != nil { + io.WriteString(os.Stderr, err.Error()) + return + } + + // signal to the caller that we're locked + io.WriteString(os.Stdout, "LOCKID "+lockID) + + defer func() { + if err := s.Unlock(lockID); err != nil { + io.WriteString(os.Stderr, err.Error()) + } + }() + + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP) + + // timeout after 10 second in case we don't get cleaned up by the test + select { + case <-time.After(10 * time.Second): + case <-c: + } +} diff --git a/pkg/command/testdata/test/broken_no_valid_hcl/main.tf b/pkg/command/testdata/test/broken_no_valid_hcl/main.tf new file mode 100644 index 00000000000..818161eba82 --- /dev/null +++ b/pkg/command/testdata/test/broken_no_valid_hcl/main.tf @@ -0,0 +1,2 @@ +resource "test_resource" "resource" { +} diff --git a/pkg/command/testdata/test/broken_no_valid_hcl/main.tftest.hcl b/pkg/command/testdata/test/broken_no_valid_hcl/main.tftest.hcl new file mode 100644 index 00000000000..8d329bc469a --- /dev/null +++ b/pkg/command/testdata/test/broken_no_valid_hcl/main.tftest.hcl @@ -0,0 +1,2 @@ +This is not valid HCL +run "test" {} diff --git a/pkg/command/testdata/test/broken_wrong_block_check/main.tf b/pkg/command/testdata/test/broken_wrong_block_check/main.tf new file mode 100644 index 00000000000..818161eba82 --- /dev/null +++ b/pkg/command/testdata/test/broken_wrong_block_check/main.tf @@ -0,0 +1,2 @@ +resource "test_resource" "resource" { +} diff --git a/pkg/command/testdata/test/broken_wrong_block_check/main.tftest.hcl b/pkg/command/testdata/test/broken_wrong_block_check/main.tftest.hcl new file mode 100644 index 00000000000..e624432ab9c --- /dev/null +++ b/pkg/command/testdata/test/broken_wrong_block_check/main.tftest.hcl @@ -0,0 +1,8 @@ +check "expected_to_fail" { + assert { + condition = test_resource.resource.value != "value" + error_message = "something" + } +} + +run "test" {} diff --git a/pkg/command/testdata/test/broken_wrong_block_data/main.tf b/pkg/command/testdata/test/broken_wrong_block_data/main.tf new file mode 100644 index 00000000000..818161eba82 --- /dev/null +++ b/pkg/command/testdata/test/broken_wrong_block_data/main.tf @@ -0,0 +1,2 @@ +resource "test_resource" "resource" { +} diff --git a/pkg/command/testdata/test/broken_wrong_block_data/main.tftest.hcl b/pkg/command/testdata/test/broken_wrong_block_data/main.tftest.hcl new file mode 100644 index 00000000000..6822b600321 --- /dev/null +++ b/pkg/command/testdata/test/broken_wrong_block_data/main.tftest.hcl @@ -0,0 +1,4 @@ +data "test_resource" "i_cant_write_that_here" { +} + +run "test" {} diff --git a/pkg/command/testdata/test/broken_wrong_block_output/main.tf b/pkg/command/testdata/test/broken_wrong_block_output/main.tf new file mode 100644 index 00000000000..818161eba82 --- /dev/null +++ b/pkg/command/testdata/test/broken_wrong_block_output/main.tf @@ -0,0 +1,2 @@ +resource "test_resource" "resource" { +} diff --git a/pkg/command/testdata/test/broken_wrong_block_output/main.tftest.hcl b/pkg/command/testdata/test/broken_wrong_block_output/main.tftest.hcl new file mode 100644 index 00000000000..b453c1fc5b8 --- /dev/null +++ b/pkg/command/testdata/test/broken_wrong_block_output/main.tftest.hcl @@ -0,0 +1,5 @@ +output "abc" { + value = "1" +} + +run "test" {} diff --git a/pkg/command/testdata/test/broken_wrong_block_resource/main.tf b/pkg/command/testdata/test/broken_wrong_block_resource/main.tf new file mode 100644 index 00000000000..818161eba82 --- /dev/null +++ b/pkg/command/testdata/test/broken_wrong_block_resource/main.tf @@ -0,0 +1,2 @@ +resource "test_resource" "resource" { +} diff --git a/pkg/command/testdata/test/broken_wrong_block_resource/main.tftest.hcl b/pkg/command/testdata/test/broken_wrong_block_resource/main.tftest.hcl new file mode 100644 index 00000000000..1a21307f68d --- /dev/null +++ b/pkg/command/testdata/test/broken_wrong_block_resource/main.tftest.hcl @@ -0,0 +1,4 @@ +resource "test_resource" "i_cant_write_that_here" { +} + +run "test" {} diff --git a/pkg/command/testdata/test/custom_condition_checks/main.tf b/pkg/command/testdata/test/custom_condition_checks/main.tf new file mode 100644 index 00000000000..d010847f854 --- /dev/null +++ b/pkg/command/testdata/test/custom_condition_checks/main.tf @@ -0,0 +1,15 @@ + +variable "input" { + type = string +} + +resource "test_resource" "resource" { + value = var.input +} + +check "expected_to_fail" { + assert { + condition = test_resource.resource.value != var.input + error_message = "this really should fail" + } +} diff --git a/pkg/command/testdata/test/custom_condition_checks/main.tftest.hcl b/pkg/command/testdata/test/custom_condition_checks/main.tftest.hcl new file mode 100644 index 00000000000..d3ead1fe15f --- /dev/null +++ b/pkg/command/testdata/test/custom_condition_checks/main.tftest.hcl @@ -0,0 +1,5 @@ +variables { + input = "some value" +} + +run "test" {} diff --git a/pkg/command/testdata/test/custom_condition_inputs/main.tf b/pkg/command/testdata/test/custom_condition_inputs/main.tf new file mode 100644 index 00000000000..027c8e8fdfc --- /dev/null +++ b/pkg/command/testdata/test/custom_condition_inputs/main.tf @@ -0,0 +1,13 @@ + +variable "input" { + type = string + + validation { + condition = var.input == "something very specific" + error_message = "this should definitely fail" + } +} + +resource "test_resource" "resource" { + value = var.input +} diff --git a/pkg/command/testdata/test/custom_condition_inputs/main.tftest.hcl b/pkg/command/testdata/test/custom_condition_inputs/main.tftest.hcl new file mode 100644 index 00000000000..d3ead1fe15f --- /dev/null +++ b/pkg/command/testdata/test/custom_condition_inputs/main.tftest.hcl @@ -0,0 +1,5 @@ +variables { + input = "some value" +} + +run "test" {} diff --git a/pkg/command/testdata/test/custom_condition_outputs/main.tf b/pkg/command/testdata/test/custom_condition_outputs/main.tf new file mode 100644 index 00000000000..af05c486a04 --- /dev/null +++ b/pkg/command/testdata/test/custom_condition_outputs/main.tf @@ -0,0 +1,13 @@ + +variable "input" { + type = string +} + +output "output" { + value = var.input + + precondition { + condition = var.input == "something incredibly specific" + error_message = "this should fail" + } +} diff --git a/pkg/command/testdata/test/custom_condition_outputs/main.tftest.hcl b/pkg/command/testdata/test/custom_condition_outputs/main.tftest.hcl new file mode 100644 index 00000000000..d3ead1fe15f --- /dev/null +++ b/pkg/command/testdata/test/custom_condition_outputs/main.tftest.hcl @@ -0,0 +1,5 @@ +variables { + input = "some value" +} + +run "test" {} diff --git a/pkg/command/testdata/test/custom_condition_resources/main.tf b/pkg/command/testdata/test/custom_condition_resources/main.tf new file mode 100644 index 00000000000..0b12d8cda06 --- /dev/null +++ b/pkg/command/testdata/test/custom_condition_resources/main.tf @@ -0,0 +1,15 @@ + +variable "input" { + type = string +} + +resource "test_resource" "resource" { + value = var.input + + lifecycle { + postcondition { + condition = self.value != var.input + error_message = "this really should fail" + } + } +} diff --git a/pkg/command/testdata/test/custom_condition_resources/main.tftest.hcl b/pkg/command/testdata/test/custom_condition_resources/main.tftest.hcl new file mode 100644 index 00000000000..d3ead1fe15f --- /dev/null +++ b/pkg/command/testdata/test/custom_condition_resources/main.tftest.hcl @@ -0,0 +1,5 @@ +variables { + input = "some value" +} + +run "test" {} diff --git a/pkg/command/testdata/test/default_variables/main.tf b/pkg/command/testdata/test/default_variables/main.tf new file mode 100644 index 00000000000..ce7d9e83d82 --- /dev/null +++ b/pkg/command/testdata/test/default_variables/main.tf @@ -0,0 +1,5 @@ + +variable "input" { + type = string + default = "Hello, world!" +} diff --git a/pkg/command/testdata/test/default_variables/main.tftest.hcl b/pkg/command/testdata/test/default_variables/main.tftest.hcl new file mode 100644 index 00000000000..a6292d09235 --- /dev/null +++ b/pkg/command/testdata/test/default_variables/main.tftest.hcl @@ -0,0 +1,7 @@ + +run "applies_defaults" { + assert { + condition = var.input == "Hello, world!" + error_message = "should have applied default value" + } +} diff --git a/pkg/command/testdata/test/expect_failures_checks/main.tf b/pkg/command/testdata/test/expect_failures_checks/main.tf new file mode 100644 index 00000000000..d010847f854 --- /dev/null +++ b/pkg/command/testdata/test/expect_failures_checks/main.tf @@ -0,0 +1,15 @@ + +variable "input" { + type = string +} + +resource "test_resource" "resource" { + value = var.input +} + +check "expected_to_fail" { + assert { + condition = test_resource.resource.value != var.input + error_message = "this really should fail" + } +} diff --git a/pkg/command/testdata/test/expect_failures_checks/main.tftest.hcl b/pkg/command/testdata/test/expect_failures_checks/main.tftest.hcl new file mode 100644 index 00000000000..bcac23b6532 --- /dev/null +++ b/pkg/command/testdata/test/expect_failures_checks/main.tftest.hcl @@ -0,0 +1,9 @@ +variables { + input = "some value" +} + +run "test" { + expect_failures = [ + check.expected_to_fail + ] +} diff --git a/pkg/command/testdata/test/expect_failures_inputs/main.tf b/pkg/command/testdata/test/expect_failures_inputs/main.tf new file mode 100644 index 00000000000..027c8e8fdfc --- /dev/null +++ b/pkg/command/testdata/test/expect_failures_inputs/main.tf @@ -0,0 +1,13 @@ + +variable "input" { + type = string + + validation { + condition = var.input == "something very specific" + error_message = "this should definitely fail" + } +} + +resource "test_resource" "resource" { + value = var.input +} diff --git a/pkg/command/testdata/test/expect_failures_inputs/main.tftest.hcl b/pkg/command/testdata/test/expect_failures_inputs/main.tftest.hcl new file mode 100644 index 00000000000..ec603112b99 --- /dev/null +++ b/pkg/command/testdata/test/expect_failures_inputs/main.tftest.hcl @@ -0,0 +1,11 @@ +variables { + input = "some value" +} + +run "test" { + command = plan + + expect_failures = [ + var.input + ] +} diff --git a/pkg/command/testdata/test/expect_failures_outputs/main.tf b/pkg/command/testdata/test/expect_failures_outputs/main.tf new file mode 100644 index 00000000000..af05c486a04 --- /dev/null +++ b/pkg/command/testdata/test/expect_failures_outputs/main.tf @@ -0,0 +1,13 @@ + +variable "input" { + type = string +} + +output "output" { + value = var.input + + precondition { + condition = var.input == "something incredibly specific" + error_message = "this should fail" + } +} diff --git a/pkg/command/testdata/test/expect_failures_outputs/main.tftest.hcl b/pkg/command/testdata/test/expect_failures_outputs/main.tftest.hcl new file mode 100644 index 00000000000..49b2f169734 --- /dev/null +++ b/pkg/command/testdata/test/expect_failures_outputs/main.tftest.hcl @@ -0,0 +1,12 @@ +variables { + input = "some value" +} + +run "test" { + + command = plan + + expect_failures = [ + output.output + ] +} diff --git a/pkg/command/testdata/test/expect_failures_resources/main.tf b/pkg/command/testdata/test/expect_failures_resources/main.tf new file mode 100644 index 00000000000..0b12d8cda06 --- /dev/null +++ b/pkg/command/testdata/test/expect_failures_resources/main.tf @@ -0,0 +1,15 @@ + +variable "input" { + type = string +} + +resource "test_resource" "resource" { + value = var.input + + lifecycle { + postcondition { + condition = self.value != var.input + error_message = "this really should fail" + } + } +} diff --git a/pkg/command/testdata/test/expect_failures_resources/main.tftest.hcl b/pkg/command/testdata/test/expect_failures_resources/main.tftest.hcl new file mode 100644 index 00000000000..899469dd882 --- /dev/null +++ b/pkg/command/testdata/test/expect_failures_resources/main.tftest.hcl @@ -0,0 +1,11 @@ +variables { + input = "some value" +} + +run "test" { + command = plan + + expect_failures = [ + test_resource.resource + ] +} diff --git a/pkg/command/testdata/test/expect_runtime_check_fail/main.tf b/pkg/command/testdata/test/expect_runtime_check_fail/main.tf new file mode 100644 index 00000000000..879a901c0ef --- /dev/null +++ b/pkg/command/testdata/test/expect_runtime_check_fail/main.tf @@ -0,0 +1,8 @@ +resource "test_resource" "resource" {} + +check "check" { + assert { + condition = test_resource.resource.id == "" + error_message = "check block: resource has no id" + } +} \ No newline at end of file diff --git a/pkg/command/testdata/test/expect_runtime_check_fail/main.tftest.hcl b/pkg/command/testdata/test/expect_runtime_check_fail/main.tftest.hcl new file mode 100644 index 00000000000..eb3013a8779 --- /dev/null +++ b/pkg/command/testdata/test/expect_runtime_check_fail/main.tftest.hcl @@ -0,0 +1 @@ +run "test" {} diff --git a/pkg/command/testdata/test/expect_runtime_check_fail_command_plan/main.tf b/pkg/command/testdata/test/expect_runtime_check_fail_command_plan/main.tf new file mode 100644 index 00000000000..879a901c0ef --- /dev/null +++ b/pkg/command/testdata/test/expect_runtime_check_fail_command_plan/main.tf @@ -0,0 +1,8 @@ +resource "test_resource" "resource" {} + +check "check" { + assert { + condition = test_resource.resource.id == "" + error_message = "check block: resource has no id" + } +} \ No newline at end of file diff --git a/pkg/command/testdata/test/expect_runtime_check_fail_command_plan/main.tftest.hcl b/pkg/command/testdata/test/expect_runtime_check_fail_command_plan/main.tftest.hcl new file mode 100644 index 00000000000..ae482b633c0 --- /dev/null +++ b/pkg/command/testdata/test/expect_runtime_check_fail_command_plan/main.tftest.hcl @@ -0,0 +1,3 @@ +run "test" { + command = plan +} diff --git a/pkg/command/testdata/test/expect_runtime_check_pass_command_plan_expected/main.tf b/pkg/command/testdata/test/expect_runtime_check_pass_command_plan_expected/main.tf new file mode 100644 index 00000000000..879a901c0ef --- /dev/null +++ b/pkg/command/testdata/test/expect_runtime_check_pass_command_plan_expected/main.tf @@ -0,0 +1,8 @@ +resource "test_resource" "resource" {} + +check "check" { + assert { + condition = test_resource.resource.id == "" + error_message = "check block: resource has no id" + } +} \ No newline at end of file diff --git a/pkg/command/testdata/test/expect_runtime_check_pass_command_plan_expected/main.tftest.hcl b/pkg/command/testdata/test/expect_runtime_check_pass_command_plan_expected/main.tftest.hcl new file mode 100644 index 00000000000..8bc2e3f9f1c --- /dev/null +++ b/pkg/command/testdata/test/expect_runtime_check_pass_command_plan_expected/main.tftest.hcl @@ -0,0 +1,6 @@ +run "test" { + command = plan + expect_failures = [ + check.check + ] +} diff --git a/pkg/command/testdata/test/expect_runtime_check_pass_with_expect/main.tf b/pkg/command/testdata/test/expect_runtime_check_pass_with_expect/main.tf new file mode 100644 index 00000000000..879a901c0ef --- /dev/null +++ b/pkg/command/testdata/test/expect_runtime_check_pass_with_expect/main.tf @@ -0,0 +1,8 @@ +resource "test_resource" "resource" {} + +check "check" { + assert { + condition = test_resource.resource.id == "" + error_message = "check block: resource has no id" + } +} \ No newline at end of file diff --git a/pkg/command/testdata/test/expect_runtime_check_pass_with_expect/main.tftest.hcl b/pkg/command/testdata/test/expect_runtime_check_pass_with_expect/main.tftest.hcl new file mode 100644 index 00000000000..22123703457 --- /dev/null +++ b/pkg/command/testdata/test/expect_runtime_check_pass_with_expect/main.tftest.hcl @@ -0,0 +1,5 @@ +run "test" { + expect_failures = [ + check.check + ] +} diff --git a/pkg/command/testdata/test/invalid-module/main.tf b/pkg/command/testdata/test/invalid-module/main.tf new file mode 100644 index 00000000000..0b62a12f691 --- /dev/null +++ b/pkg/command/testdata/test/invalid-module/main.tf @@ -0,0 +1,8 @@ + +locals { + my_value = "Hello, world!" +} + +resource "test_resource" "example" { + value = local.my_value +} diff --git a/pkg/command/testdata/test/invalid-module/main.tftest.hcl b/pkg/command/testdata/test/invalid-module/main.tftest.hcl new file mode 100644 index 00000000000..4d5a1efa0a6 --- /dev/null +++ b/pkg/command/testdata/test/invalid-module/main.tftest.hcl @@ -0,0 +1,8 @@ + +run "invalid" { + module { + source = "./setup" + } +} + +run "test" {} diff --git a/pkg/command/testdata/test/invalid-module/setup/main.tf b/pkg/command/testdata/test/invalid-module/setup/main.tf new file mode 100644 index 00000000000..2573bf34d46 --- /dev/null +++ b/pkg/command/testdata/test/invalid-module/setup/main.tf @@ -0,0 +1,4 @@ + +resource "test_resource" "setup" { + value = var.not_real // Oh no! +} diff --git a/pkg/command/testdata/test/invalid/main.tf b/pkg/command/testdata/test/invalid/main.tf new file mode 100644 index 00000000000..0b62a12f691 --- /dev/null +++ b/pkg/command/testdata/test/invalid/main.tf @@ -0,0 +1,8 @@ + +locals { + my_value = "Hello, world!" +} + +resource "test_resource" "example" { + value = local.my_value +} diff --git a/pkg/command/testdata/test/invalid/main.tftest.hcl b/pkg/command/testdata/test/invalid/main.tftest.hcl new file mode 100644 index 00000000000..336f52b56eb --- /dev/null +++ b/pkg/command/testdata/test/invalid/main.tftest.hcl @@ -0,0 +1,8 @@ + +run "invalid" { + + expect_failures = [ + local.my_value, + ] + +} diff --git a/pkg/command/testdata/test/invalid_default_state/main.tf b/pkg/command/testdata/test/invalid_default_state/main.tf new file mode 100644 index 00000000000..5cbdbbedcf3 --- /dev/null +++ b/pkg/command/testdata/test/invalid_default_state/main.tf @@ -0,0 +1,8 @@ + +variable "input" { + type = string +} + +resource "test_resource" "resource" { + value = var.input +} diff --git a/pkg/command/testdata/test/invalid_default_state/main.tftest.hcl b/pkg/command/testdata/test/invalid_default_state/main.tftest.hcl new file mode 100644 index 00000000000..2b9dbba21d3 --- /dev/null +++ b/pkg/command/testdata/test/invalid_default_state/main.tftest.hcl @@ -0,0 +1,6 @@ +run "test" { + assert { + condition = test_resource.resource.value == "Hello, world!" + error_message = "wrong condition" + } +} diff --git a/pkg/command/testdata/test/invalid_run_block_name/main.tf b/pkg/command/testdata/test/invalid_run_block_name/main.tf new file mode 100644 index 00000000000..f9c3e7081b1 --- /dev/null +++ b/pkg/command/testdata/test/invalid_run_block_name/main.tf @@ -0,0 +1,3 @@ +provider "test" { + value = "foo" +} \ No newline at end of file diff --git a/pkg/command/testdata/test/invalid_run_block_name/tests/main.tftest.hcl b/pkg/command/testdata/test/invalid_run_block_name/tests/main.tftest.hcl new file mode 100644 index 00000000000..18c2d875e0d --- /dev/null +++ b/pkg/command/testdata/test/invalid_run_block_name/tests/main.tftest.hcl @@ -0,0 +1,5 @@ +run "sample run" { + module { + source = "./.." + } +} \ No newline at end of file diff --git a/pkg/command/testdata/test/is_sorted/1.tftest.hcl b/pkg/command/testdata/test/is_sorted/1.tftest.hcl new file mode 100644 index 00000000000..1374421a989 --- /dev/null +++ b/pkg/command/testdata/test/is_sorted/1.tftest.hcl @@ -0,0 +1,6 @@ +run "a" { + assert { + condition = test_resource.resource.value == null + error_message = "should pass" + } +} diff --git a/pkg/command/testdata/test/is_sorted/2.tftest.hcl b/pkg/command/testdata/test/is_sorted/2.tftest.hcl new file mode 100644 index 00000000000..a0c45332f52 --- /dev/null +++ b/pkg/command/testdata/test/is_sorted/2.tftest.hcl @@ -0,0 +1,6 @@ +run "b" { + assert { + condition = test_resource.resource.value == null + error_message = "should pass" + } +} diff --git a/pkg/command/testdata/test/is_sorted/3.tftest.hcl b/pkg/command/testdata/test/is_sorted/3.tftest.hcl new file mode 100644 index 00000000000..b1c1cda2e60 --- /dev/null +++ b/pkg/command/testdata/test/is_sorted/3.tftest.hcl @@ -0,0 +1,6 @@ +run "c" { + assert { + condition = test_resource.resource.value == null + error_message = "should pass" + } +} diff --git a/pkg/command/testdata/test/is_sorted/main.tf b/pkg/command/testdata/test/is_sorted/main.tf new file mode 100644 index 00000000000..8e8f142df02 --- /dev/null +++ b/pkg/command/testdata/test/is_sorted/main.tf @@ -0,0 +1,2 @@ +resource "test_resource" "resource" { +} \ No newline at end of file diff --git a/pkg/command/testdata/test/missing-provider-in-run-block/main.tf b/pkg/command/testdata/test/missing-provider-in-run-block/main.tf new file mode 100644 index 00000000000..ded2238470e --- /dev/null +++ b/pkg/command/testdata/test/missing-provider-in-run-block/main.tf @@ -0,0 +1,17 @@ +terraform { + required_providers { + test = { + source = "hashicorp/test" + configuration_aliases = [ test.secondary ] + } + } +} + +resource "test_resource" "primary" { + value = "foo" +} + +resource "test_resource" "secondary" { + provider = test.secondary + value = "bar" +} diff --git a/pkg/command/testdata/test/missing-provider-in-run-block/main.tftest.hcl b/pkg/command/testdata/test/missing-provider-in-run-block/main.tftest.hcl new file mode 100644 index 00000000000..ec840ccba7f --- /dev/null +++ b/pkg/command/testdata/test/missing-provider-in-run-block/main.tftest.hcl @@ -0,0 +1,23 @@ + +provider "test" {} + +provider "test" { + alias = "secondary" +} + +run "passes_validation" { + + providers = { + test = test + } + + assert { + condition = test_resource.primary.value == "foo" + error_message = "primary contains invalid value" + } + + assert { + condition = test_resource.secondary.value == "bar" + error_message = "secondary contains invalid value" + } +} diff --git a/pkg/command/testdata/test/missing-provider-in-test-module/main.tf b/pkg/command/testdata/test/missing-provider-in-test-module/main.tf new file mode 100644 index 00000000000..de4efc04cbf --- /dev/null +++ b/pkg/command/testdata/test/missing-provider-in-test-module/main.tf @@ -0,0 +1,3 @@ +resource "test_resource" "primary" { + value = "foo" +} diff --git a/pkg/command/testdata/test/missing-provider-in-test-module/main.tftest.hcl b/pkg/command/testdata/test/missing-provider-in-test-module/main.tftest.hcl new file mode 100644 index 00000000000..98dd73ae232 --- /dev/null +++ b/pkg/command/testdata/test/missing-provider-in-test-module/main.tftest.hcl @@ -0,0 +1,40 @@ + +provider "test" {} + +provider "test" { + alias = "secondary" +} + +run "passes_validation_primary" { + + providers = { + test = test + } + + assert { + condition = test_resource.primary.value == "foo" + error_message = "primary contains invalid value" + } + +} + +run "passes_validation_secondary" { + + providers = { + test = test + } + + module { + source = "./setup" + } + + assert { + condition = test_resource.primary.value == "foo" + error_message = "primary contains invalid value" + } + + assert { + condition = test_resource.secondary.value == "bar" + error_message = "secondary contains invalid value" + } +} \ No newline at end of file diff --git a/pkg/command/testdata/test/missing-provider-in-test-module/setup/main.tf b/pkg/command/testdata/test/missing-provider-in-test-module/setup/main.tf new file mode 100644 index 00000000000..ded2238470e --- /dev/null +++ b/pkg/command/testdata/test/missing-provider-in-test-module/setup/main.tf @@ -0,0 +1,17 @@ +terraform { + required_providers { + test = { + source = "hashicorp/test" + configuration_aliases = [ test.secondary ] + } + } +} + +resource "test_resource" "primary" { + value = "foo" +} + +resource "test_resource" "secondary" { + provider = test.secondary + value = "bar" +} diff --git a/pkg/command/testdata/test/missing-provider/main.tf b/pkg/command/testdata/test/missing-provider/main.tf new file mode 100644 index 00000000000..ded2238470e --- /dev/null +++ b/pkg/command/testdata/test/missing-provider/main.tf @@ -0,0 +1,17 @@ +terraform { + required_providers { + test = { + source = "hashicorp/test" + configuration_aliases = [ test.secondary ] + } + } +} + +resource "test_resource" "primary" { + value = "foo" +} + +resource "test_resource" "secondary" { + provider = test.secondary + value = "bar" +} diff --git a/pkg/command/testdata/test/missing-provider/main.tftest.hcl b/pkg/command/testdata/test/missing-provider/main.tftest.hcl new file mode 100644 index 00000000000..4901cdc8027 --- /dev/null +++ b/pkg/command/testdata/test/missing-provider/main.tftest.hcl @@ -0,0 +1,14 @@ + +provider "test" {} + +run "passes_validation" { + assert { + condition = test_resource.primary.value == "foo" + error_message = "primary contains invalid value" + } + + assert { + condition = test_resource.secondary.value == "bar" + error_message = "secondary contains invalid value" + } +} diff --git a/pkg/command/testdata/test/multiple_files/main.tf b/pkg/command/testdata/test/multiple_files/main.tf new file mode 100644 index 00000000000..41cc84e5c4e --- /dev/null +++ b/pkg/command/testdata/test/multiple_files/main.tf @@ -0,0 +1,3 @@ +resource "test_resource" "foo" { + value = "bar" +} diff --git a/pkg/command/testdata/test/multiple_files/one.tftest.hcl b/pkg/command/testdata/test/multiple_files/one.tftest.hcl new file mode 100644 index 00000000000..6feaf3cc5ce --- /dev/null +++ b/pkg/command/testdata/test/multiple_files/one.tftest.hcl @@ -0,0 +1,6 @@ +run "validate_test_resource" { + assert { + condition = test_resource.foo.value == "bar" + error_message = "invalid value" + } +} diff --git a/pkg/command/testdata/test/multiple_files/two.tftest.hcl b/pkg/command/testdata/test/multiple_files/two.tftest.hcl new file mode 100644 index 00000000000..6feaf3cc5ce --- /dev/null +++ b/pkg/command/testdata/test/multiple_files/two.tftest.hcl @@ -0,0 +1,6 @@ +run "validate_test_resource" { + assert { + condition = test_resource.foo.value == "bar" + error_message = "invalid value" + } +} diff --git a/pkg/command/testdata/test/no_providers_in_main/main.tf b/pkg/command/testdata/test/no_providers_in_main/main.tf new file mode 100644 index 00000000000..fe07905e589 --- /dev/null +++ b/pkg/command/testdata/test/no_providers_in_main/main.tf @@ -0,0 +1,19 @@ + +terraform { + required_providers { + test = { + source = "hashicorp/test" + configuration_aliases = [test.primary, test.secondary] + } + } +} + +resource "test_resource" "primary" { + provider = test.primary + value = "foo" +} + +resource "test_resource" "secondary" { + provider = test.secondary + value = "bar" +} diff --git a/pkg/command/testdata/test/no_providers_in_main/main.tftest.hcl b/pkg/command/testdata/test/no_providers_in_main/main.tftest.hcl new file mode 100644 index 00000000000..6d888d177c1 --- /dev/null +++ b/pkg/command/testdata/test/no_providers_in_main/main.tftest.hcl @@ -0,0 +1,20 @@ + +provider "test" { + alias = "primary" +} + +provider "test" { + alias = "secondary" +} + +run "passes_validation" { + assert { + condition = test_resource.primary.value == "foo" + error_message = "primary contains invalid value" + } + + assert { + condition = test_resource.secondary.value == "bar" + error_message = "secondary contains invalid value" + } +} diff --git a/pkg/command/testdata/test/not_exists_output/main.tf b/pkg/command/testdata/test/not_exists_output/main.tf new file mode 100644 index 00000000000..8e8f142df02 --- /dev/null +++ b/pkg/command/testdata/test/not_exists_output/main.tf @@ -0,0 +1,2 @@ +resource "test_resource" "resource" { +} \ No newline at end of file diff --git a/pkg/command/testdata/test/not_exists_output/main.tftest.hcl b/pkg/command/testdata/test/not_exists_output/main.tftest.hcl new file mode 100644 index 00000000000..53c4ca064e5 --- /dev/null +++ b/pkg/command/testdata/test/not_exists_output/main.tftest.hcl @@ -0,0 +1,6 @@ +run "not_exists" { + assert { + condition = output.something_that_does_not_exist == null + error_message = "Should fail for Reference to undeclared output value" + } +} diff --git a/pkg/command/testdata/test/null_output/main.tf b/pkg/command/testdata/test/null_output/main.tf new file mode 100644 index 00000000000..012bae927f0 --- /dev/null +++ b/pkg/command/testdata/test/null_output/main.tf @@ -0,0 +1,3 @@ +output "my_null_output" { + value = null +} \ No newline at end of file diff --git a/pkg/command/testdata/test/null_output/main.tftest.hcl b/pkg/command/testdata/test/null_output/main.tftest.hcl new file mode 100644 index 00000000000..aacc8d7c946 --- /dev/null +++ b/pkg/command/testdata/test/null_output/main.tftest.hcl @@ -0,0 +1,6 @@ +run "null" { + assert { + condition = output.my_null_output == null + error_message = "Should work" + } +} \ No newline at end of file diff --git a/pkg/command/testdata/test/only_modules/example/main.tf b/pkg/command/testdata/test/only_modules/example/main.tf new file mode 100644 index 00000000000..3e6809599ca --- /dev/null +++ b/pkg/command/testdata/test/only_modules/example/main.tf @@ -0,0 +1,9 @@ + +variable "input" { + type = string +} + +resource "test_resource" "module_resource" { + id = "df6h8as9" + value = var.input +} diff --git a/pkg/command/testdata/test/only_modules/main.tf b/pkg/command/testdata/test/only_modules/main.tf new file mode 100644 index 00000000000..0cad6cfbbc4 --- /dev/null +++ b/pkg/command/testdata/test/only_modules/main.tf @@ -0,0 +1,9 @@ + +variable "input" { + type = string +} + +resource "test_resource" "resource" { + id = "598318e0" + value = var.input +} diff --git a/pkg/command/testdata/test/only_modules/main.tftest.hcl b/pkg/command/testdata/test/only_modules/main.tftest.hcl new file mode 100644 index 00000000000..480e8bb93e5 --- /dev/null +++ b/pkg/command/testdata/test/only_modules/main.tftest.hcl @@ -0,0 +1,23 @@ +# This is an example test file from a use case requested by a user. We only +# refer to alternate modules and not the main configuration. This means we +# shouldn't have to provide any data for the main configuration. + +run "first" { + module { + source = "./example" + } + + variables { + input = "start" + } +} + +run "second" { + module { + source = "./example" + } + + variables { + input = "update" + } +} diff --git a/pkg/command/testdata/test/override_with_tests_dir_variables/main.tf b/pkg/command/testdata/test/override_with_tests_dir_variables/main.tf new file mode 100644 index 00000000000..ff87bac8e8f --- /dev/null +++ b/pkg/command/testdata/test/override_with_tests_dir_variables/main.tf @@ -0,0 +1,7 @@ +variable "testVar" { + type = string +} + +resource "test_resource" "testRes" { + value = var.testVar +} diff --git a/pkg/command/testdata/test/override_with_tests_dir_variables/terraform.tfvars b/pkg/command/testdata/test/override_with_tests_dir_variables/terraform.tfvars new file mode 100644 index 00000000000..1f3399c64c7 --- /dev/null +++ b/pkg/command/testdata/test/override_with_tests_dir_variables/terraform.tfvars @@ -0,0 +1 @@ +testVar = "ValueFROM./tfvars" diff --git a/pkg/command/testdata/test/override_with_tests_dir_variables/tests/main.tftest.hcl b/pkg/command/testdata/test/override_with_tests_dir_variables/tests/main.tftest.hcl new file mode 100644 index 00000000000..832360947c2 --- /dev/null +++ b/pkg/command/testdata/test/override_with_tests_dir_variables/tests/main.tftest.hcl @@ -0,0 +1,6 @@ +run "validate_test_resource" { + assert { + condition = test_resource.testRes.value == "ValueFROMtests/tfvars" + error_message = "invalid value" + } +} diff --git a/pkg/command/testdata/test/override_with_tests_dir_variables/tests/terraform.tfvars b/pkg/command/testdata/test/override_with_tests_dir_variables/tests/terraform.tfvars new file mode 100644 index 00000000000..917e0d98133 --- /dev/null +++ b/pkg/command/testdata/test/override_with_tests_dir_variables/tests/terraform.tfvars @@ -0,0 +1 @@ +testVar = "ValueFROMtests/tfvars" diff --git a/pkg/command/testdata/test/partial_update_failure/main.tf b/pkg/command/testdata/test/partial_update_failure/main.tf new file mode 100644 index 00000000000..6675af5dfb0 --- /dev/null +++ b/pkg/command/testdata/test/partial_update_failure/main.tf @@ -0,0 +1,7 @@ +resource "test_resource" "foo" { + value = "foo" +} + +resource "test_resource" "bar" { + value = "bar" +} diff --git a/pkg/command/testdata/test/partial_update_failure/main.tftest.hcl b/pkg/command/testdata/test/partial_update_failure/main.tftest.hcl new file mode 100644 index 00000000000..5a1fd096df0 --- /dev/null +++ b/pkg/command/testdata/test/partial_update_failure/main.tftest.hcl @@ -0,0 +1,10 @@ +run "partial" { + plan_options { + target = [test_resource.foo] + } + + assert { + condition = test_resource.bar.value == "bar" + error_message = "should fail" + } +} diff --git a/pkg/command/testdata/test/partial_updates/main.tf b/pkg/command/testdata/test/partial_updates/main.tf new file mode 100644 index 00000000000..0da3bb1ff03 --- /dev/null +++ b/pkg/command/testdata/test/partial_updates/main.tf @@ -0,0 +1,15 @@ + +resource "test_resource" "resource" {} + +locals { + follow = { + (test_resource.resource.id): "follow" + } +} + +resource "test_resource" "follow" { + for_each = local.follow + + id = each.key + value = each.value +} diff --git a/pkg/command/testdata/test/partial_updates/main.tftest.hcl b/pkg/command/testdata/test/partial_updates/main.tftest.hcl new file mode 100644 index 00000000000..04cdfa4034e --- /dev/null +++ b/pkg/command/testdata/test/partial_updates/main.tftest.hcl @@ -0,0 +1,10 @@ + +run "first" { + plan_options { + target = [ + test_resource.resource, + ] + } +} + +run "second" {} diff --git a/pkg/command/testdata/test/pass_module_with_no_resource/first/main.tf b/pkg/command/testdata/test/pass_module_with_no_resource/first/main.tf new file mode 100644 index 00000000000..25408ebd8d7 --- /dev/null +++ b/pkg/command/testdata/test/pass_module_with_no_resource/first/main.tf @@ -0,0 +1,7 @@ +module "second" { + source = "../second" +} + +output "id" { + value = module.second.id +} \ No newline at end of file diff --git a/pkg/command/testdata/test/pass_module_with_no_resource/main.tf b/pkg/command/testdata/test/pass_module_with_no_resource/main.tf new file mode 100644 index 00000000000..1d945425af0 --- /dev/null +++ b/pkg/command/testdata/test/pass_module_with_no_resource/main.tf @@ -0,0 +1,5 @@ +module "first" { + source = "./first" +} + +resource "test_resource" "resource" {} \ No newline at end of file diff --git a/pkg/command/testdata/test/pass_module_with_no_resource/main.tftest.hcl b/pkg/command/testdata/test/pass_module_with_no_resource/main.tftest.hcl new file mode 100644 index 00000000000..43d4f75f3b9 --- /dev/null +++ b/pkg/command/testdata/test/pass_module_with_no_resource/main.tftest.hcl @@ -0,0 +1,8 @@ +run "run" { + command = apply + + assert { + condition = module.first.id != 0 + error_message = "Fail" + } +} \ No newline at end of file diff --git a/pkg/command/testdata/test/pass_module_with_no_resource/second/main.tf b/pkg/command/testdata/test/pass_module_with_no_resource/second/main.tf new file mode 100644 index 00000000000..0b3cd15c2d5 --- /dev/null +++ b/pkg/command/testdata/test/pass_module_with_no_resource/second/main.tf @@ -0,0 +1,5 @@ +resource "test_resource" "resource" {} + +output "id" { + value = test_resource.resource.id +} \ No newline at end of file diff --git a/pkg/command/testdata/test/pass_var_inside_variables/main.tf b/pkg/command/testdata/test/pass_var_inside_variables/main.tf new file mode 100644 index 00000000000..6a9de968dee --- /dev/null +++ b/pkg/command/testdata/test/pass_var_inside_variables/main.tf @@ -0,0 +1,10 @@ +variable "var1" { + default = true +} + +variable "var2" { +} + +output "sss" { + value = var.var2 +} \ No newline at end of file diff --git a/pkg/command/testdata/test/pass_var_inside_variables/main.tftest.hcl b/pkg/command/testdata/test/pass_var_inside_variables/main.tftest.hcl new file mode 100644 index 00000000000..292b7a29f30 --- /dev/null +++ b/pkg/command/testdata/test/pass_var_inside_variables/main.tftest.hcl @@ -0,0 +1,10 @@ +variables { + var2 = var.var1 ? "true" : "false" +} + +run "first" { + assert { + condition = output.sss == "false" + error_message = "Should work" + } +} \ No newline at end of file diff --git a/pkg/command/testdata/test/pass_var_inside_variables/terraform.tfvars b/pkg/command/testdata/test/pass_var_inside_variables/terraform.tfvars new file mode 100644 index 00000000000..fbd49b98796 --- /dev/null +++ b/pkg/command/testdata/test/pass_var_inside_variables/terraform.tfvars @@ -0,0 +1 @@ +var1 = false \ No newline at end of file diff --git a/pkg/command/testdata/test/pass_var_with_default_value_inside_variables/main.tf b/pkg/command/testdata/test/pass_var_with_default_value_inside_variables/main.tf new file mode 100644 index 00000000000..6a9de968dee --- /dev/null +++ b/pkg/command/testdata/test/pass_var_with_default_value_inside_variables/main.tf @@ -0,0 +1,10 @@ +variable "var1" { + default = true +} + +variable "var2" { +} + +output "sss" { + value = var.var2 +} \ No newline at end of file diff --git a/pkg/command/testdata/test/pass_var_with_default_value_inside_variables/main.tftest.hcl b/pkg/command/testdata/test/pass_var_with_default_value_inside_variables/main.tftest.hcl new file mode 100644 index 00000000000..a469b7c1718 --- /dev/null +++ b/pkg/command/testdata/test/pass_var_with_default_value_inside_variables/main.tftest.hcl @@ -0,0 +1,10 @@ +variables { + var2 = var.var1 ? "true" : "false" +} + +run "first" { + assert { + condition = output.sss == "true" + error_message = "Should work" + } +} \ No newline at end of file diff --git a/pkg/command/testdata/test/pass_with_local_variable/main.tf b/pkg/command/testdata/test/pass_with_local_variable/main.tf new file mode 100644 index 00000000000..548fe68384f --- /dev/null +++ b/pkg/command/testdata/test/pass_with_local_variable/main.tf @@ -0,0 +1,4 @@ +output "foo" { + description = "Output" + value = "bar" +} \ No newline at end of file diff --git a/pkg/command/testdata/test/pass_with_local_variable/tests/test.tftest.hcl b/pkg/command/testdata/test/pass_with_local_variable/tests/test.tftest.hcl new file mode 100644 index 00000000000..4ccdf202257 --- /dev/null +++ b/pkg/command/testdata/test/pass_with_local_variable/tests/test.tftest.hcl @@ -0,0 +1,15 @@ +run "first" { + command = apply +} + +run "second" { + command = plan + + module { + source = "./tests/testmodule" + } + + variables { + foo = run.first.foo + } +} \ No newline at end of file diff --git a/pkg/command/testdata/test/pass_with_local_variable/tests/testmodule/main.tf b/pkg/command/testdata/test/pass_with_local_variable/tests/testmodule/main.tf new file mode 100644 index 00000000000..7fe30007837 --- /dev/null +++ b/pkg/command/testdata/test/pass_with_local_variable/tests/testmodule/main.tf @@ -0,0 +1,3 @@ +variable "foo" { + type = string +} diff --git a/pkg/command/testdata/test/pass_with_locals/main.tf b/pkg/command/testdata/test/pass_with_locals/main.tf new file mode 100644 index 00000000000..d398a7b8fa1 --- /dev/null +++ b/pkg/command/testdata/test/pass_with_locals/main.tf @@ -0,0 +1,7 @@ +resource "test_resource" "foo" { + value = "bar" +} + +locals { + value = test_resource.foo.value +} diff --git a/pkg/command/testdata/test/pass_with_locals/main.tftest.hcl b/pkg/command/testdata/test/pass_with_locals/main.tftest.hcl new file mode 100644 index 00000000000..396eb3021a3 --- /dev/null +++ b/pkg/command/testdata/test/pass_with_locals/main.tftest.hcl @@ -0,0 +1,6 @@ +run "validate_test_resource" { + assert { + condition = local.value == "bar" + error_message = "invalid value" + } +} diff --git a/pkg/command/testdata/test/pass_with_outputs/main.tf b/pkg/command/testdata/test/pass_with_outputs/main.tf new file mode 100644 index 00000000000..7354f944a11 --- /dev/null +++ b/pkg/command/testdata/test/pass_with_outputs/main.tf @@ -0,0 +1,7 @@ +resource "test_resource" "foo" { + value = "bar" +} + +output "value" { + value = test_resource.foo.value +} diff --git a/pkg/command/testdata/test/pass_with_outputs/main.tftest.hcl b/pkg/command/testdata/test/pass_with_outputs/main.tftest.hcl new file mode 100644 index 00000000000..bdf84aa5566 --- /dev/null +++ b/pkg/command/testdata/test/pass_with_outputs/main.tftest.hcl @@ -0,0 +1,6 @@ +run "validate_test_resource" { + assert { + condition = output.value == "bar" + error_message = "invalid value" + } +} diff --git a/pkg/command/testdata/test/pass_with_tests_dir_variables/main.tf b/pkg/command/testdata/test/pass_with_tests_dir_variables/main.tf new file mode 100644 index 00000000000..ff87bac8e8f --- /dev/null +++ b/pkg/command/testdata/test/pass_with_tests_dir_variables/main.tf @@ -0,0 +1,7 @@ +variable "testVar" { + type = string +} + +resource "test_resource" "testRes" { + value = var.testVar +} diff --git a/pkg/command/testdata/test/pass_with_tests_dir_variables/tests/main.tftest.hcl b/pkg/command/testdata/test/pass_with_tests_dir_variables/tests/main.tftest.hcl new file mode 100644 index 00000000000..832360947c2 --- /dev/null +++ b/pkg/command/testdata/test/pass_with_tests_dir_variables/tests/main.tftest.hcl @@ -0,0 +1,6 @@ +run "validate_test_resource" { + assert { + condition = test_resource.testRes.value == "ValueFROMtests/tfvars" + error_message = "invalid value" + } +} diff --git a/pkg/command/testdata/test/pass_with_tests_dir_variables/tests/terraform.tfvars b/pkg/command/testdata/test/pass_with_tests_dir_variables/tests/terraform.tfvars new file mode 100644 index 00000000000..917e0d98133 --- /dev/null +++ b/pkg/command/testdata/test/pass_with_tests_dir_variables/tests/terraform.tfvars @@ -0,0 +1 @@ +testVar = "ValueFROMtests/tfvars" diff --git a/pkg/command/testdata/test/pass_with_variables/main.tf b/pkg/command/testdata/test/pass_with_variables/main.tf new file mode 100644 index 00000000000..3d98070d876 --- /dev/null +++ b/pkg/command/testdata/test/pass_with_variables/main.tf @@ -0,0 +1,7 @@ +variable "input" { + type = string +} + +resource "test_resource" "foo" { + value = var.input +} diff --git a/pkg/command/testdata/test/pass_with_variables/main.tftest.hcl b/pkg/command/testdata/test/pass_with_variables/main.tftest.hcl new file mode 100644 index 00000000000..80689560945 --- /dev/null +++ b/pkg/command/testdata/test/pass_with_variables/main.tftest.hcl @@ -0,0 +1,21 @@ +variables { + input = "bar" +} + +run "validate_test_resource" { + assert { + condition = test_resource.foo.value == "bar" + error_message = "invalid value" + } +} + +run "validate_test_resource" { + variables { + input = "zap" + } + + assert { + condition = test_resource.foo.value == "zap" + error_message = "invalid value" + } +} diff --git a/pkg/command/testdata/test/plan_then_apply/main.tf b/pkg/command/testdata/test/plan_then_apply/main.tf new file mode 100644 index 00000000000..52edae3fd50 --- /dev/null +++ b/pkg/command/testdata/test/plan_then_apply/main.tf @@ -0,0 +1,4 @@ +resource "test_resource" "foo" { + id = "constant_value" + value = "bar" +} diff --git a/pkg/command/testdata/test/plan_then_apply/main.tftest.hcl b/pkg/command/testdata/test/plan_then_apply/main.tftest.hcl new file mode 100644 index 00000000000..1eda6d4d7c7 --- /dev/null +++ b/pkg/command/testdata/test/plan_then_apply/main.tftest.hcl @@ -0,0 +1,16 @@ +run "validate_test_resource" { + + command = plan + + assert { + condition = test_resource.foo.value == "bar" + error_message = "invalid value" + } +} + +run "validate_test_resource" { + assert { + condition = test_resource.foo.value == "bar" + error_message = "invalid value" + } +} diff --git a/pkg/command/testdata/test/refresh_conflicting_config/main.tf b/pkg/command/testdata/test/refresh_conflicting_config/main.tf new file mode 100644 index 00000000000..41cc84e5c4e --- /dev/null +++ b/pkg/command/testdata/test/refresh_conflicting_config/main.tf @@ -0,0 +1,3 @@ +resource "test_resource" "foo" { + value = "bar" +} diff --git a/pkg/command/testdata/test/refresh_conflicting_config/main.tftest.hcl b/pkg/command/testdata/test/refresh_conflicting_config/main.tftest.hcl new file mode 100644 index 00000000000..4d3f683b24a --- /dev/null +++ b/pkg/command/testdata/test/refresh_conflicting_config/main.tftest.hcl @@ -0,0 +1,6 @@ +run "apply" { + plan_options { + mode=refresh-only + refresh=false + } +} diff --git a/pkg/command/testdata/test/refresh_only/main.tf b/pkg/command/testdata/test/refresh_only/main.tf new file mode 100644 index 00000000000..f2ac6b7fcb6 --- /dev/null +++ b/pkg/command/testdata/test/refresh_only/main.tf @@ -0,0 +1,5 @@ +variable "input" {} + +resource "test_resource" "foo" { + value = var.input +} diff --git a/pkg/command/testdata/test/refresh_only/main.tftest.hcl b/pkg/command/testdata/test/refresh_only/main.tftest.hcl new file mode 100644 index 00000000000..2801049e8ec --- /dev/null +++ b/pkg/command/testdata/test/refresh_only/main.tftest.hcl @@ -0,0 +1,42 @@ +run "first" { + variables { + input = "first" + } + + assert { + condition = test_resource.foo.value == "first" + error_message = "invalid value" + } +} + +run "second" { + command=plan + plan_options { + mode=refresh-only + } + + variables { + input = "second" + } + + assert { + condition = test_resource.foo.value == "first" + error_message = "invalid value" + } +} + +run "third" { + command=plan + plan_options { + mode=normal + } + + variables { + input = "second" + } + + assert { + condition = test_resource.foo.value == "second" + error_message = "invalid value" + } +} diff --git a/pkg/command/testdata/test/simple_fail/main.tf b/pkg/command/testdata/test/simple_fail/main.tf new file mode 100644 index 00000000000..41cc84e5c4e --- /dev/null +++ b/pkg/command/testdata/test/simple_fail/main.tf @@ -0,0 +1,3 @@ +resource "test_resource" "foo" { + value = "bar" +} diff --git a/pkg/command/testdata/test/simple_fail/main.tftest.hcl b/pkg/command/testdata/test/simple_fail/main.tftest.hcl new file mode 100644 index 00000000000..319a217673a --- /dev/null +++ b/pkg/command/testdata/test/simple_fail/main.tftest.hcl @@ -0,0 +1,6 @@ +run "validate_test_resource" { + assert { + condition = test_resource.foo.value == "zap" + error_message = "invalid value" + } +} diff --git a/pkg/command/testdata/test/simple_pass/main.tf b/pkg/command/testdata/test/simple_pass/main.tf new file mode 100644 index 00000000000..41cc84e5c4e --- /dev/null +++ b/pkg/command/testdata/test/simple_pass/main.tf @@ -0,0 +1,3 @@ +resource "test_resource" "foo" { + value = "bar" +} diff --git a/pkg/command/testdata/test/simple_pass/main.tftest.hcl b/pkg/command/testdata/test/simple_pass/main.tftest.hcl new file mode 100644 index 00000000000..6feaf3cc5ce --- /dev/null +++ b/pkg/command/testdata/test/simple_pass/main.tftest.hcl @@ -0,0 +1,6 @@ +run "validate_test_resource" { + assert { + condition = test_resource.foo.value == "bar" + error_message = "invalid value" + } +} diff --git a/pkg/command/testdata/test/simple_pass_nested/main.tf b/pkg/command/testdata/test/simple_pass_nested/main.tf new file mode 100644 index 00000000000..41cc84e5c4e --- /dev/null +++ b/pkg/command/testdata/test/simple_pass_nested/main.tf @@ -0,0 +1,3 @@ +resource "test_resource" "foo" { + value = "bar" +} diff --git a/pkg/command/testdata/test/simple_pass_nested/tests/main.tftest.hcl b/pkg/command/testdata/test/simple_pass_nested/tests/main.tftest.hcl new file mode 100644 index 00000000000..6feaf3cc5ce --- /dev/null +++ b/pkg/command/testdata/test/simple_pass_nested/tests/main.tftest.hcl @@ -0,0 +1,6 @@ +run "validate_test_resource" { + assert { + condition = test_resource.foo.value == "bar" + error_message = "invalid value" + } +} diff --git a/pkg/command/testdata/test/simple_pass_nested_alternate/main.tf b/pkg/command/testdata/test/simple_pass_nested_alternate/main.tf new file mode 100644 index 00000000000..41cc84e5c4e --- /dev/null +++ b/pkg/command/testdata/test/simple_pass_nested_alternate/main.tf @@ -0,0 +1,3 @@ +resource "test_resource" "foo" { + value = "bar" +} diff --git a/pkg/command/testdata/test/simple_pass_nested_alternate/other/main.tftest.hcl b/pkg/command/testdata/test/simple_pass_nested_alternate/other/main.tftest.hcl new file mode 100644 index 00000000000..6feaf3cc5ce --- /dev/null +++ b/pkg/command/testdata/test/simple_pass_nested_alternate/other/main.tftest.hcl @@ -0,0 +1,6 @@ +run "validate_test_resource" { + assert { + condition = test_resource.foo.value == "bar" + error_message = "invalid value" + } +} diff --git a/pkg/command/testdata/test/simple_pass_very_nested/main.tf b/pkg/command/testdata/test/simple_pass_very_nested/main.tf new file mode 100644 index 00000000000..41cc84e5c4e --- /dev/null +++ b/pkg/command/testdata/test/simple_pass_very_nested/main.tf @@ -0,0 +1,3 @@ +resource "test_resource" "foo" { + value = "bar" +} diff --git a/pkg/command/testdata/test/simple_pass_very_nested/tests/subdir/main.tftest.hcl b/pkg/command/testdata/test/simple_pass_very_nested/tests/subdir/main.tftest.hcl new file mode 100644 index 00000000000..6feaf3cc5ce --- /dev/null +++ b/pkg/command/testdata/test/simple_pass_very_nested/tests/subdir/main.tftest.hcl @@ -0,0 +1,6 @@ +run "validate_test_resource" { + assert { + condition = test_resource.foo.value == "bar" + error_message = "invalid value" + } +} diff --git a/pkg/command/testdata/test/state_propagation/example/main.tf b/pkg/command/testdata/test/state_propagation/example/main.tf new file mode 100644 index 00000000000..3e6809599ca --- /dev/null +++ b/pkg/command/testdata/test/state_propagation/example/main.tf @@ -0,0 +1,9 @@ + +variable "input" { + type = string +} + +resource "test_resource" "module_resource" { + id = "df6h8as9" + value = var.input +} diff --git a/pkg/command/testdata/test/state_propagation/main.tf b/pkg/command/testdata/test/state_propagation/main.tf new file mode 100644 index 00000000000..0cad6cfbbc4 --- /dev/null +++ b/pkg/command/testdata/test/state_propagation/main.tf @@ -0,0 +1,9 @@ + +variable "input" { + type = string +} + +resource "test_resource" "resource" { + id = "598318e0" + value = var.input +} diff --git a/pkg/command/testdata/test/state_propagation/main.tftest.hcl b/pkg/command/testdata/test/state_propagation/main.tftest.hcl new file mode 100644 index 00000000000..e4fca75561e --- /dev/null +++ b/pkg/command/testdata/test/state_propagation/main.tftest.hcl @@ -0,0 +1,54 @@ +# Our test will run this in verbose mode and we should see the plan output for +# the second run block showing the resource being updated as the state should +# be propagated from the first one to the second one. +# +# We also interweave alternate modules to test the handling of multiple states +# within the file. + +run "initial_apply_example" { + module { + source = "./example" + } + + variables { + input = "start" + } +} + +run "initial_apply" { + variables { + input = "start" + } +} + +run "plan_second_example" { + command = plan + + module { + source = "./second_example" + } + + variables { + input = "start" + } +} + +run "plan_update" { + command = plan + + variables { + input = "update" + } +} + +run "plan_update_example" { + command = plan + + module { + source = "./example" + } + + variables { + input = "update" + } +} diff --git a/pkg/command/testdata/test/state_propagation/second_example/main.tf b/pkg/command/testdata/test/state_propagation/second_example/main.tf new file mode 100644 index 00000000000..f61e0cf2e7d --- /dev/null +++ b/pkg/command/testdata/test/state_propagation/second_example/main.tf @@ -0,0 +1,9 @@ + +variable "input" { + type = string +} + +resource "test_resource" "second_module_resource" { + id = "b6a1d8cb" + value = var.input +} diff --git a/pkg/command/testdata/test/undefined_variables/main.tf b/pkg/command/testdata/test/undefined_variables/main.tf new file mode 100644 index 00000000000..ce7d9e83d82 --- /dev/null +++ b/pkg/command/testdata/test/undefined_variables/main.tf @@ -0,0 +1,5 @@ + +variable "input" { + type = string + default = "Hello, world!" +} diff --git a/pkg/command/testdata/test/undefined_variables/main.tftest.hcl b/pkg/command/testdata/test/undefined_variables/main.tftest.hcl new file mode 100644 index 00000000000..d3009c37bc1 --- /dev/null +++ b/pkg/command/testdata/test/undefined_variables/main.tftest.hcl @@ -0,0 +1,13 @@ + +variables { + # config_free isn't defined in the config, but we'll + # still let users refer to it within the assertions. + config_free = "Hello, world!" +} + +run "applies_defaults" { + assert { + condition = var.input == var.config_free + error_message = "should have applied default value" + } +} diff --git a/pkg/command/testdata/test/variables/main.tf b/pkg/command/testdata/test/variables/main.tf new file mode 100644 index 00000000000..bf4c24b14df --- /dev/null +++ b/pkg/command/testdata/test/variables/main.tf @@ -0,0 +1,8 @@ +variable "input" { + type = string + default = "bar" +} + +resource "test_resource" "foo" { + value = var.input +} diff --git a/pkg/command/testdata/test/variables/main.tftest.hcl b/pkg/command/testdata/test/variables/main.tftest.hcl new file mode 100644 index 00000000000..6feaf3cc5ce --- /dev/null +++ b/pkg/command/testdata/test/variables/main.tftest.hcl @@ -0,0 +1,6 @@ +run "validate_test_resource" { + assert { + condition = test_resource.foo.value == "bar" + error_message = "invalid value" + } +} diff --git a/pkg/command/testdata/test/variables/set_variables.tftest.hcl b/pkg/command/testdata/test/variables/set_variables.tftest.hcl new file mode 100644 index 00000000000..7f6a9cee656 --- /dev/null +++ b/pkg/command/testdata/test/variables/set_variables.tftest.hcl @@ -0,0 +1,10 @@ +run "validate_test_resource" { + variables { + input = "bar" + } + + assert { + condition = test_resource.foo.value == "bar" + error_message = "invalid value" + } +} diff --git a/pkg/command/testdata/test/variables_reference/main.tf b/pkg/command/testdata/test/variables_reference/main.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/command/testdata/test/variables_reference/main.tftest.hcl b/pkg/command/testdata/test/variables_reference/main.tftest.hcl new file mode 100644 index 00000000000..c72e27259e7 --- /dev/null +++ b/pkg/command/testdata/test/variables_reference/main.tftest.hcl @@ -0,0 +1,15 @@ +variables { + content = "some value" +} + +run "setup" { + module { + source = "./setup" + } +} + +run "test" { + variables { + file_name = run.setup.file_name + } +} diff --git a/pkg/command/testdata/test/variables_reference/setup/main.tf b/pkg/command/testdata/test/variables_reference/setup/main.tf new file mode 100644 index 00000000000..0bf33f4f1f6 --- /dev/null +++ b/pkg/command/testdata/test/variables_reference/setup/main.tf @@ -0,0 +1,7 @@ +variable "content" { + type = string +} + +output "file_name" { + value = "output_value" +} diff --git a/pkg/command/testdata/test/with_double_interrupt/main.tf b/pkg/command/testdata/test/with_double_interrupt/main.tf new file mode 100644 index 00000000000..92846111d77 --- /dev/null +++ b/pkg/command/testdata/test/with_double_interrupt/main.tf @@ -0,0 +1,25 @@ + +variable "interrupts" { + type = number +} + +resource "test_resource" "primary" { + value = "primary" +} + +resource "test_resource" "secondary" { + value = "secondary" + interrupt_count = var.interrupts + + depends_on = [ + test_resource.primary + ] +} + +resource "test_resource" "tertiary" { + value = "tertiary" + + depends_on = [ + test_resource.secondary + ] +} diff --git a/pkg/command/testdata/test/with_double_interrupt/main.tftest.hcl b/pkg/command/testdata/test/with_double_interrupt/main.tftest.hcl new file mode 100644 index 00000000000..47439767099 --- /dev/null +++ b/pkg/command/testdata/test/with_double_interrupt/main.tftest.hcl @@ -0,0 +1,17 @@ +variables { + interrupts = 0 +} + +run "primary" { + +} + +run "secondary" { + variables { + interrupts = 2 + } +} + +run "tertiary" { + +} diff --git a/pkg/command/testdata/test/with_interrupt/main.tf b/pkg/command/testdata/test/with_interrupt/main.tf new file mode 100644 index 00000000000..92846111d77 --- /dev/null +++ b/pkg/command/testdata/test/with_interrupt/main.tf @@ -0,0 +1,25 @@ + +variable "interrupts" { + type = number +} + +resource "test_resource" "primary" { + value = "primary" +} + +resource "test_resource" "secondary" { + value = "secondary" + interrupt_count = var.interrupts + + depends_on = [ + test_resource.primary + ] +} + +resource "test_resource" "tertiary" { + value = "tertiary" + + depends_on = [ + test_resource.secondary + ] +} diff --git a/pkg/command/testdata/test/with_interrupt/main.tftest.hcl b/pkg/command/testdata/test/with_interrupt/main.tftest.hcl new file mode 100644 index 00000000000..e2260f96a79 --- /dev/null +++ b/pkg/command/testdata/test/with_interrupt/main.tftest.hcl @@ -0,0 +1,17 @@ +variables { + interrupts = 0 +} + +run "primary" { + +} + +run "secondary" { + variables { + interrupts = 1 + } +} + +run "tertiary" { + +} diff --git a/pkg/command/testdata/test/with_nested_setup_modules/main.tf b/pkg/command/testdata/test/with_nested_setup_modules/main.tf new file mode 100644 index 00000000000..f304def8b1b --- /dev/null +++ b/pkg/command/testdata/test/with_nested_setup_modules/main.tf @@ -0,0 +1,2 @@ + +resource "test_resource" "resource" {} diff --git a/pkg/command/testdata/test/with_nested_setup_modules/main.tftest.hcl b/pkg/command/testdata/test/with_nested_setup_modules/main.tftest.hcl new file mode 100644 index 00000000000..3172b8c6dc4 --- /dev/null +++ b/pkg/command/testdata/test/with_nested_setup_modules/main.tftest.hcl @@ -0,0 +1,14 @@ +variables { + value = "Hello, world!" +} + +run "load_module" { + module { + source = "./setup" + } + + assert { + condition = output.value == "Hello, world!" + error_message = "invalid value" + } +} \ No newline at end of file diff --git a/pkg/command/testdata/test/with_nested_setup_modules/setup/main.tf b/pkg/command/testdata/test/with_nested_setup_modules/setup/main.tf new file mode 100644 index 00000000000..54e468b7dba --- /dev/null +++ b/pkg/command/testdata/test/with_nested_setup_modules/setup/main.tf @@ -0,0 +1,14 @@ + +variable "value" { + type = string +} + +module "child" { + source = "./other" + + value = var.value +} + +output "value" { + value = module.child.value +} diff --git a/pkg/command/testdata/test/with_nested_setup_modules/setup/other/main.tf b/pkg/command/testdata/test/with_nested_setup_modules/setup/other/main.tf new file mode 100644 index 00000000000..e1f6e52f3f1 --- /dev/null +++ b/pkg/command/testdata/test/with_nested_setup_modules/setup/other/main.tf @@ -0,0 +1,12 @@ + +variable "value" { + type = string +} + +resource "test_resource" "resource" { + value = var.value +} + +output "value" { + value = test_resource.resource.value +} diff --git a/pkg/command/testdata/test/with_provider_alias/main.tf b/pkg/command/testdata/test/with_provider_alias/main.tf new file mode 100644 index 00000000000..3b60dc01cc7 --- /dev/null +++ b/pkg/command/testdata/test/with_provider_alias/main.tf @@ -0,0 +1,12 @@ + +variable "managed_id" { + type = string +} + +data "test_data_source" "managed_data" { + id = var.managed_id +} + +resource "test_resource" "created" { + value = data.test_data_source.managed_data.value +} diff --git a/pkg/command/testdata/test/with_provider_alias/main.tftest.hcl b/pkg/command/testdata/test/with_provider_alias/main.tftest.hcl new file mode 100644 index 00000000000..7529a5ef42f --- /dev/null +++ b/pkg/command/testdata/test/with_provider_alias/main.tftest.hcl @@ -0,0 +1,37 @@ +provider "test" { + data_prefix = "data" + resource_prefix = "resource" +} + +provider "test" { + alias = "setup" + + # The setup provider will write into the main providers data sources. + resource_prefix = "data" +} + +variables { + managed_id = "B853C121" +} + +run "setup" { + module { + source = "./setup" + } + + variables { + value = "Hello, world!" + id = "B853C121" + } + + providers = { + test = test.setup + } +} + +run "test" { + assert { + condition = test_resource.created.value == "Hello, world!" + error_message = "bad value" + } +} diff --git a/pkg/command/testdata/test/with_provider_alias/setup/main.tf b/pkg/command/testdata/test/with_provider_alias/setup/main.tf new file mode 100644 index 00000000000..03a99362628 --- /dev/null +++ b/pkg/command/testdata/test/with_provider_alias/setup/main.tf @@ -0,0 +1,12 @@ +variable "value" { + type = string +} + +variable "id" { + type = string +} + +resource "test_resource" "managed" { + id = var.id + value = var.value +} diff --git a/pkg/command/testdata/test/with_setup_module/main.tf b/pkg/command/testdata/test/with_setup_module/main.tf new file mode 100644 index 00000000000..3b60dc01cc7 --- /dev/null +++ b/pkg/command/testdata/test/with_setup_module/main.tf @@ -0,0 +1,12 @@ + +variable "managed_id" { + type = string +} + +data "test_data_source" "managed_data" { + id = var.managed_id +} + +resource "test_resource" "created" { + value = data.test_data_source.managed_data.value +} diff --git a/pkg/command/testdata/test/with_setup_module/main.tftest.hcl b/pkg/command/testdata/test/with_setup_module/main.tftest.hcl new file mode 100644 index 00000000000..1f2a6a94aa9 --- /dev/null +++ b/pkg/command/testdata/test/with_setup_module/main.tftest.hcl @@ -0,0 +1,21 @@ +variables { + managed_id = "B853C121" +} + +run "setup" { + module { + source = "./setup" + } + + variables { + value = "Hello, world!" + id = "B853C121" + } +} + +run "test" { + assert { + condition = test_resource.created.value == "Hello, world!" + error_message = "bad value" + } +} diff --git a/pkg/command/testdata/test/with_setup_module/setup/main.tf b/pkg/command/testdata/test/with_setup_module/setup/main.tf new file mode 100644 index 00000000000..49056bbea73 --- /dev/null +++ b/pkg/command/testdata/test/with_setup_module/setup/main.tf @@ -0,0 +1,13 @@ +variable "value" { + type = string +} + +variable "id" { + type = string +} + +resource "test_resource" "managed" { + provider = setup + id = var.id + value = var.value +} diff --git a/pkg/command/testdata/test/with_verify_module/main.tf b/pkg/command/testdata/test/with_verify_module/main.tf new file mode 100644 index 00000000000..eb75736ea75 --- /dev/null +++ b/pkg/command/testdata/test/with_verify_module/main.tf @@ -0,0 +1,12 @@ +variable "id" { + type = string +} + +variable "value" { + type = string +} + +resource "test_resource" "resource" { + id = var.id + value = var.value +} \ No newline at end of file diff --git a/pkg/command/testdata/test/with_verify_module/main.tftest.hcl b/pkg/command/testdata/test/with_verify_module/main.tftest.hcl new file mode 100644 index 00000000000..2aa7f372443 --- /dev/null +++ b/pkg/command/testdata/test/with_verify_module/main.tftest.hcl @@ -0,0 +1,23 @@ +variables { + id = "resource" + value = "Hello, world!" +} + +run "test" { +} + +run "verify" { + module { + source = "./verify" + } + + assert { + condition = data.test_data_source.resource_data.value == "Hello, world!" + error_message = "bad value" + } + + assert { + condition = test_resource.another_resource.id == "hi" + error_message = "bad value" + } +} \ No newline at end of file diff --git a/pkg/command/testdata/test/with_verify_module/verify/main.tf b/pkg/command/testdata/test/with_verify_module/verify/main.tf new file mode 100644 index 00000000000..f44a0199e3f --- /dev/null +++ b/pkg/command/testdata/test/with_verify_module/verify/main.tf @@ -0,0 +1,11 @@ +variable "id" { + type = string +} + +data "test_data_source" "resource_data" { + id = var.id +} + +resource "test_resource" "another_resource" { + id = "hi" +} \ No newline at end of file diff --git a/pkg/command/testdata/tftest-fmt/main_in.tftest.hcl b/pkg/command/testdata/tftest-fmt/main_in.tftest.hcl new file mode 100644 index 00000000000..c8c5fb17c71 --- /dev/null +++ b/pkg/command/testdata/tftest-fmt/main_in.tftest.hcl @@ -0,0 +1,18 @@ + +variables { + first = "value" +second = "value" +} + +run "some_run_block" { + command=plan + + plan_options={ + refresh=false + } + + assert { + condition = var.input == 12 + error_message = "something" + } +} diff --git a/pkg/command/testdata/tftest-fmt/main_out.tftest.hcl b/pkg/command/testdata/tftest-fmt/main_out.tftest.hcl new file mode 100644 index 00000000000..c6de0ddd73d --- /dev/null +++ b/pkg/command/testdata/tftest-fmt/main_out.tftest.hcl @@ -0,0 +1,18 @@ + +variables { + first = "value" + second = "value" +} + +run "some_run_block" { + command = plan + + plan_options = { + refresh = false + } + + assert { + condition = var.input == 12 + error_message = "something" + } +} diff --git a/pkg/command/testdata/validate-invalid/duplicate_import_targets/main.tf b/pkg/command/testdata/validate-invalid/duplicate_import_targets/main.tf new file mode 100644 index 00000000000..3c663bd105f --- /dev/null +++ b/pkg/command/testdata/validate-invalid/duplicate_import_targets/main.tf @@ -0,0 +1,12 @@ +resource "aws_instance" "web" { +} + +import { + to = aws_instance.web + id = "test" +} + +import { + to = aws_instance.web + id = "test2" +} diff --git a/pkg/command/testdata/validate-invalid/duplicate_import_targets/output.json b/pkg/command/testdata/validate-invalid/duplicate_import_targets/output.json new file mode 100644 index 00000000000..bad183b0c9c --- /dev/null +++ b/pkg/command/testdata/validate-invalid/duplicate_import_targets/output.json @@ -0,0 +1,34 @@ +{ + "format_version": "1.0", + "valid": false, + "error_count": 1, + "warning_count": 0, + "diagnostics": [ + { + "severity": "error", + "summary": "Duplicate import configuration for \"aws_instance.web\"", + "detail": "An import block for the resource \"aws_instance.web\" was already declared at testdata/validate-invalid/duplicate_import_targets/main.tf:4,1-7. A resource can have only one import block.", + "range": { + "filename": "testdata/validate-invalid/duplicate_import_targets/main.tf", + "start": { + "line": 9, + "column": 1, + "byte": 85 + }, + "end": { + "line": 9, + "column": 7, + "byte": 91 + } + }, + "snippet": { + "context": null, + "code": "import {", + "start_line": 9, + "highlight_start_offset": 0, + "highlight_end_offset": 6, + "values": [] + } + } + ] +} diff --git a/pkg/command/testdata/validate-invalid/incorrectmodulename/main.tf b/pkg/command/testdata/validate-invalid/incorrectmodulename/main.tf new file mode 100644 index 00000000000..45509406c3d --- /dev/null +++ b/pkg/command/testdata/validate-invalid/incorrectmodulename/main.tf @@ -0,0 +1,6 @@ +module "super#module" { +} + +module "super" { + source = var.modulename +} diff --git a/pkg/command/testdata/validate-invalid/incorrectmodulename/output.json b/pkg/command/testdata/validate-invalid/incorrectmodulename/output.json new file mode 100644 index 00000000000..bf94e2b93b2 --- /dev/null +++ b/pkg/command/testdata/validate-invalid/incorrectmodulename/output.json @@ -0,0 +1,86 @@ +{ + "format_version": "1.0", + "valid": false, + "error_count": 3, + "warning_count": 0, + "diagnostics": [ + { + "severity": "error", + "summary": "Missing required argument", + "detail": "The argument \"source\" is required, but no definition was found.", + "range": { + "filename": "testdata/validate-invalid/incorrectmodulename/main.tf", + "start": { + "line": 1, + "column": 23, + "byte": 22 + }, + "end": { + "line": 1, + "column": 24, + "byte": 23 + } + }, + "snippet": { + "context": "module \"super#module\"", + "code": "module \"super#module\" {", + "start_line": 1, + "highlight_start_offset": 22, + "highlight_end_offset": 23, + "values": [] + } + }, + { + "severity": "error", + "summary": "Invalid module instance name", + "detail": "A name must start with a letter or underscore and may contain only letters, digits, underscores, and dashes.", + "range": { + "filename": "testdata/validate-invalid/incorrectmodulename/main.tf", + "start": { + "line": 1, + "column": 8, + "byte": 7 + }, + "end": { + "line": 1, + "column": 22, + "byte": 21 + } + }, + "snippet": { + "context": "module \"super#module\"", + "code": "module \"super#module\" {", + "start_line": 1, + "highlight_start_offset": 7, + "highlight_end_offset": 21, + "values": [] + } + }, + { + "severity": "error", + "summary": "Undefined variable", + "detail": "Undefined variable var.modulename", + "range": { + "filename": "testdata/validate-invalid/incorrectmodulename/main.tf", + "start": { + "line": 5, + "column": 12, + "byte": 55 + }, + "end": { + "line": 5, + "column": 26, + "byte": 69 + } + }, + "snippet": { + "context": "module \"super\"", + "code": " source = var.modulename", + "start_line": 5, + "highlight_start_offset": 11, + "highlight_end_offset": 25, + "values": [] + } + } + ] +} diff --git a/pkg/command/testdata/validate-invalid/interpolation/main.tf b/pkg/command/testdata/validate-invalid/interpolation/main.tf new file mode 100644 index 00000000000..bbb8e6978c4 --- /dev/null +++ b/pkg/command/testdata/validate-invalid/interpolation/main.tf @@ -0,0 +1,11 @@ +variable "otherresourcename" { + default = "aws_instance.web1" +} + +variable "vairable_with_interpolation" { + default = "${var.otherresourcename}" +} + +resource "aws_instance" "web" { + depends_on = ["${var.otherresourcename}}"] +} diff --git a/pkg/command/testdata/validate-invalid/interpolation/output.json b/pkg/command/testdata/validate-invalid/interpolation/output.json new file mode 100644 index 00000000000..2843b19121f --- /dev/null +++ b/pkg/command/testdata/validate-invalid/interpolation/output.json @@ -0,0 +1,60 @@ +{ + "format_version": "1.0", + "valid": false, + "error_count": 2, + "warning_count": 0, + "diagnostics": [ + { + "severity": "error", + "summary": "Variables not allowed", + "detail": "Variables may not be used here.", + "range": { + "filename": "testdata/validate-invalid/interpolation/main.tf", + "start": { + "line": 6, + "column": 16, + "byte": 122 + }, + "end": { + "line": 6, + "column": 19, + "byte": 125 + } + }, + "snippet": { + "context": "variable \"vairable_with_interpolation\"", + "code": " default = \"${var.otherresourcename}\"", + "start_line": 6, + "highlight_start_offset": 15, + "highlight_end_offset": 18, + "values": [] + } + }, + { + "severity": "error", + "summary": "Invalid expression", + "detail": "A single static variable reference is required: only attribute access and indexing with constant keys. No calculations, function calls, template expressions, etc are allowed here.", + "range": { + "filename": "testdata/validate-invalid/interpolation/main.tf", + "start": { + "line": 10, + "column": 17, + "byte": 197 + }, + "end": { + "line": 10, + "column": 44, + "byte": 224 + } + }, + "snippet": { + "context": "resource \"aws_instance\" \"web\"", + "code": " depends_on = [\"${var.otherresourcename}}\"]", + "start_line": 10, + "highlight_start_offset": 16, + "highlight_end_offset": 43, + "values": [] + } + } + ] +} diff --git a/pkg/command/testdata/validate-invalid/main.tf b/pkg/command/testdata/validate-invalid/main.tf new file mode 100644 index 00000000000..b1d63348a66 --- /dev/null +++ b/pkg/command/testdata/validate-invalid/main.tf @@ -0,0 +1,8 @@ +resorce "test_instance" "foo" { # Intentional typo to test error reporting + ami = "bar" + + network_interface { + device_index = 0 + description = "Main network interface" + } +} diff --git a/pkg/command/testdata/validate-invalid/missing_defined_var/main.tf b/pkg/command/testdata/validate-invalid/missing_defined_var/main.tf new file mode 100644 index 00000000000..b3e12217257 --- /dev/null +++ b/pkg/command/testdata/validate-invalid/missing_defined_var/main.tf @@ -0,0 +1,10 @@ +resource "test_instance" "foo" { + ami = "bar" + + network_interface { + device_index = 0 + description = "Main network interface ${var.name}" + } +} + +variable "name" {} diff --git a/pkg/command/testdata/validate-invalid/missing_defined_var/output.json b/pkg/command/testdata/validate-invalid/missing_defined_var/output.json new file mode 100644 index 00000000000..40258a98cd2 --- /dev/null +++ b/pkg/command/testdata/validate-invalid/missing_defined_var/output.json @@ -0,0 +1,7 @@ +{ + "format_version": "1.0", + "valid": true, + "error_count": 0, + "warning_count": 0, + "diagnostics": [] +} diff --git a/pkg/command/testdata/validate-invalid/missing_quote/main.tf b/pkg/command/testdata/validate-invalid/missing_quote/main.tf new file mode 100644 index 00000000000..c8e0785ec27 --- /dev/null +++ b/pkg/command/testdata/validate-invalid/missing_quote/main.tf @@ -0,0 +1,9 @@ +resource "test_instance" "foo" { + ami = "bar" + + network_interface { + device_index = 0 + name = test + description = "Main network interface" + } +} diff --git a/pkg/command/testdata/validate-invalid/missing_quote/output.json b/pkg/command/testdata/validate-invalid/missing_quote/output.json new file mode 100644 index 00000000000..87aeca8b781 --- /dev/null +++ b/pkg/command/testdata/validate-invalid/missing_quote/output.json @@ -0,0 +1,34 @@ +{ + "format_version": "1.0", + "valid": false, + "error_count": 1, + "warning_count": 0, + "diagnostics": [ + { + "severity": "error", + "summary": "Invalid reference", + "detail": "A reference to a resource type must be followed by at least one attribute access, specifying the resource name.", + "range": { + "filename": "testdata/validate-invalid/missing_quote/main.tf", + "start": { + "line": 6, + "column": 14, + "byte": 110 + }, + "end": { + "line": 6, + "column": 18, + "byte": 114 + } + }, + "snippet": { + "context": "resource \"test_instance\" \"foo\"", + "code": " name = test", + "start_line": 6, + "highlight_start_offset": 13, + "highlight_end_offset": 17, + "values": [] + } + } + ] +} diff --git a/pkg/command/testdata/validate-invalid/missing_var/main.tf b/pkg/command/testdata/validate-invalid/missing_var/main.tf new file mode 100644 index 00000000000..37a77555e01 --- /dev/null +++ b/pkg/command/testdata/validate-invalid/missing_var/main.tf @@ -0,0 +1,8 @@ +resource "test_instance" "foo" { + ami = "bar" + + network_interface { + device_index = 0 + description = var.description + } +} diff --git a/pkg/command/testdata/validate-invalid/missing_var/output.json b/pkg/command/testdata/validate-invalid/missing_var/output.json new file mode 100644 index 00000000000..6f0b9d5d4c8 --- /dev/null +++ b/pkg/command/testdata/validate-invalid/missing_var/output.json @@ -0,0 +1,34 @@ +{ + "format_version": "1.0", + "valid": false, + "error_count": 1, + "warning_count": 0, + "diagnostics": [ + { + "severity": "error", + "summary": "Reference to undeclared input variable", + "detail": "An input variable with the name \"description\" has not been declared. This variable can be declared with a variable \"description\" {} block.", + "range": { + "filename": "testdata/validate-invalid/missing_var/main.tf", + "start": { + "line": 6, + "column": 21, + "byte": 117 + }, + "end": { + "line": 6, + "column": 36, + "byte": 132 + } + }, + "snippet": { + "context": "resource \"test_instance\" \"foo\"", + "code": " description = var.description", + "start_line": 6, + "highlight_start_offset": 20, + "highlight_end_offset": 35, + "values": [] + } + } + ] +} diff --git a/pkg/command/testdata/validate-invalid/multiple_modules/main.tf b/pkg/command/testdata/validate-invalid/multiple_modules/main.tf new file mode 100644 index 00000000000..28b339e12de --- /dev/null +++ b/pkg/command/testdata/validate-invalid/multiple_modules/main.tf @@ -0,0 +1,7 @@ +module "multi_module" { + source = "./foo" +} + +module "multi_module" { + source = "./foo" +} diff --git a/pkg/command/testdata/validate-invalid/multiple_modules/output.json b/pkg/command/testdata/validate-invalid/multiple_modules/output.json new file mode 100644 index 00000000000..1aeaf929a91 --- /dev/null +++ b/pkg/command/testdata/validate-invalid/multiple_modules/output.json @@ -0,0 +1,34 @@ +{ + "format_version": "1.0", + "valid": false, + "error_count": 1, + "warning_count": 0, + "diagnostics": [ + { + "severity": "error", + "summary": "Duplicate module call", + "detail": "A module call named \"multi_module\" was already defined at testdata/validate-invalid/multiple_modules/main.tf:1,1-22. Module calls must have unique names within a module.", + "range": { + "filename": "testdata/validate-invalid/multiple_modules/main.tf", + "start": { + "line": 5, + "column": 1, + "byte": 46 + }, + "end": { + "line": 5, + "column": 22, + "byte": 67 + } + }, + "snippet": { + "context": null, + "code": "module \"multi_module\" {", + "start_line": 5, + "highlight_start_offset": 0, + "highlight_end_offset": 21, + "values": [] + } + } + ] +} diff --git a/pkg/command/testdata/validate-invalid/multiple_providers/main.tf b/pkg/command/testdata/validate-invalid/multiple_providers/main.tf new file mode 100644 index 00000000000..e1df9c99544 --- /dev/null +++ b/pkg/command/testdata/validate-invalid/multiple_providers/main.tf @@ -0,0 +1,11 @@ +provider "aws" { + access_key = "123" + secret_key = "233" + region = "us-east-1" +} + +provider "aws" { + access_key = "123" + secret_key = "233" + region = "us-east-1" +} diff --git a/pkg/command/testdata/validate-invalid/multiple_providers/output.json b/pkg/command/testdata/validate-invalid/multiple_providers/output.json new file mode 100644 index 00000000000..309cf0ea7c3 --- /dev/null +++ b/pkg/command/testdata/validate-invalid/multiple_providers/output.json @@ -0,0 +1,34 @@ +{ + "format_version": "1.0", + "valid": false, + "error_count": 1, + "warning_count": 0, + "diagnostics": [ + { + "severity": "error", + "summary": "Duplicate provider configuration", + "detail": "A default (non-aliased) provider configuration for \"aws\" was already given at testdata/validate-invalid/multiple_providers/main.tf:1,1-15. If multiple configurations are required, set the \"alias\" argument for alternative configurations.", + "range": { + "filename": "testdata/validate-invalid/multiple_providers/main.tf", + "start": { + "line": 7, + "column": 1, + "byte": 85 + }, + "end": { + "line": 7, + "column": 15, + "byte": 99 + } + }, + "snippet": { + "context": null, + "code": "provider \"aws\" {", + "start_line": 7, + "highlight_start_offset": 0, + "highlight_end_offset": 14, + "values": [] + } + } + ] +} diff --git a/pkg/command/testdata/validate-invalid/multiple_resources/main.tf b/pkg/command/testdata/validate-invalid/multiple_resources/main.tf new file mode 100644 index 00000000000..7866b4844d4 --- /dev/null +++ b/pkg/command/testdata/validate-invalid/multiple_resources/main.tf @@ -0,0 +1,5 @@ +resource "aws_instance" "web" { +} + +resource "aws_instance" "web" { +} diff --git a/pkg/command/testdata/validate-invalid/multiple_resources/output.json b/pkg/command/testdata/validate-invalid/multiple_resources/output.json new file mode 100644 index 00000000000..ded584e6846 --- /dev/null +++ b/pkg/command/testdata/validate-invalid/multiple_resources/output.json @@ -0,0 +1,34 @@ +{ + "format_version": "1.0", + "valid": false, + "error_count": 1, + "warning_count": 0, + "diagnostics": [ + { + "severity": "error", + "summary": "Duplicate resource \"aws_instance\" configuration", + "detail": "A aws_instance resource named \"web\" was already declared at testdata/validate-invalid/multiple_resources/main.tf:1,1-30. Resource names must be unique per type in each module.", + "range": { + "filename": "testdata/validate-invalid/multiple_resources/main.tf", + "start": { + "line": 4, + "column": 1, + "byte": 35 + }, + "end": { + "line": 4, + "column": 30, + "byte": 64 + } + }, + "snippet": { + "context": null, + "code": "resource \"aws_instance\" \"web\" {", + "start_line": 4, + "highlight_start_offset": 0, + "highlight_end_offset": 29, + "values": [] + } + } + ] +} diff --git a/pkg/command/testdata/validate-invalid/output.json b/pkg/command/testdata/validate-invalid/output.json new file mode 100644 index 00000000000..73254853932 --- /dev/null +++ b/pkg/command/testdata/validate-invalid/output.json @@ -0,0 +1,34 @@ +{ + "format_version": "1.0", + "valid": false, + "error_count": 1, + "warning_count": 0, + "diagnostics": [ + { + "severity": "error", + "summary": "Unsupported block type", + "detail": "Blocks of type \"resorce\" are not expected here. Did you mean \"resource\"?", + "range": { + "filename": "testdata/validate-invalid/main.tf", + "start": { + "line": 1, + "column": 1, + "byte": 0 + }, + "end": { + "line": 1, + "column": 8, + "byte": 7 + } + }, + "snippet": { + "context": null, + "code": "resorce \"test_instance\" \"foo\" { # Intentional typo to test error reporting", + "start_line": 1, + "highlight_start_offset": 0, + "highlight_end_offset": 7, + "values": [] + } + } + ] +} diff --git a/pkg/command/testdata/validate-invalid/outputs/main.tf b/pkg/command/testdata/validate-invalid/outputs/main.tf new file mode 100644 index 00000000000..fa35d2a383e --- /dev/null +++ b/pkg/command/testdata/validate-invalid/outputs/main.tf @@ -0,0 +1,3 @@ +output "myvalue" { + values = "Some value" +} diff --git a/pkg/command/testdata/validate-invalid/outputs/output.json b/pkg/command/testdata/validate-invalid/outputs/output.json new file mode 100644 index 00000000000..f774b458be4 --- /dev/null +++ b/pkg/command/testdata/validate-invalid/outputs/output.json @@ -0,0 +1,60 @@ +{ + "format_version": "1.0", + "valid": false, + "error_count": 2, + "warning_count": 0, + "diagnostics": [ + { + "severity": "error", + "summary": "Missing required argument", + "detail": "The argument \"value\" is required, but no definition was found.", + "range": { + "filename": "testdata/validate-invalid/outputs/main.tf", + "start": { + "line": 1, + "column": 18, + "byte": 17 + }, + "end": { + "line": 1, + "column": 19, + "byte": 18 + } + }, + "snippet": { + "context": "output \"myvalue\"", + "code": "output \"myvalue\" {", + "start_line": 1, + "highlight_start_offset": 17, + "highlight_end_offset": 18, + "values": [] + } + }, + { + "severity": "error", + "summary": "Unsupported argument", + "detail": "An argument named \"values\" is not expected here. Did you mean \"value\"?", + "range": { + "filename": "testdata/validate-invalid/outputs/main.tf", + "start": { + "line": 2, + "column": 3, + "byte": 21 + }, + "end": { + "line": 2, + "column": 9, + "byte": 27 + } + }, + "snippet": { + "context": "output \"myvalue\"", + "code": " values = \"Some value\"", + "start_line": 2, + "highlight_start_offset": 2, + "highlight_end_offset": 8, + "values": [] + } + } + ] +} diff --git a/pkg/command/testdata/validate-valid/main.tf b/pkg/command/testdata/validate-valid/main.tf new file mode 100644 index 00000000000..2dcb1eccd0c --- /dev/null +++ b/pkg/command/testdata/validate-valid/main.tf @@ -0,0 +1,14 @@ +variable "var_with_escaped_interp" { + # This is here because in the past it failed. See Github #13001 + default = "foo-$${bar.baz}" +} + +resource "test_instance" "foo" { + ami = "bar" + + # This is here because at some point it caused a test failure + network_interface { + device_index = 0 + description = "Main network interface" + } +} diff --git a/pkg/command/testdata/validate-valid/output.json b/pkg/command/testdata/validate-valid/output.json new file mode 100644 index 00000000000..40258a98cd2 --- /dev/null +++ b/pkg/command/testdata/validate-valid/output.json @@ -0,0 +1,7 @@ +{ + "format_version": "1.0", + "valid": true, + "error_count": 0, + "warning_count": 0, + "diagnostics": [] +} diff --git a/pkg/command/testdata/validate-valid/with-tfvars-file/main.tf b/pkg/command/testdata/validate-valid/with-tfvars-file/main.tf new file mode 100644 index 00000000000..b318fbf0c46 --- /dev/null +++ b/pkg/command/testdata/validate-valid/with-tfvars-file/main.tf @@ -0,0 +1,4 @@ +variable "var_without_default" { + type = string +} + diff --git a/pkg/command/testdata/validate-valid/with-tfvars-file/terraform.tfvars b/pkg/command/testdata/validate-valid/with-tfvars-file/terraform.tfvars new file mode 100644 index 00000000000..5b6bd874a0e --- /dev/null +++ b/pkg/command/testdata/validate-valid/with-tfvars-file/terraform.tfvars @@ -0,0 +1 @@ +var_without_default = "foo" diff --git a/pkg/command/testdata/variables/main.tf b/pkg/command/testdata/variables/main.tf new file mode 100644 index 00000000000..84a3f9ad9f7 --- /dev/null +++ b/pkg/command/testdata/variables/main.tf @@ -0,0 +1,16 @@ +variable "foo" { + default = "bar" +} + +variable "snack" { + default = "popcorn" +} + +variable "secret_snack" { + default = "seaweed snacks" + sensitive = true +} + +locals { + snack_bar = [var.snack, var.secret_snack] +} diff --git a/pkg/command/testing/test_provider.go b/pkg/command/testing/test_provider.go new file mode 100644 index 00000000000..1ada45074ba --- /dev/null +++ b/pkg/command/testing/test_provider.go @@ -0,0 +1,316 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package testing + +import ( + "fmt" + "path" + "strings" + "sync" + "time" + + "github.com/hashicorp/go-uuid" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" +) + +var ( + ProviderSchema = &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "data_prefix": {Type: cty.String, Optional: true}, + "resource_prefix": {Type: cty.String, Optional: true}, + }, + }, + }, + ResourceTypes: map[string]providers.Schema{ + "test_resource": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "value": {Type: cty.String, Optional: true}, + "interrupt_count": {Type: cty.Number, Optional: true}, + }, + }, + }, + }, + DataSources: map[string]providers.Schema{ + "test_data_source": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Required: true}, + "value": {Type: cty.String, Computed: true}, + "interrupt_count": {Type: cty.Number, Computed: true}, + }, + }, + }, + }, + } +) + +// TestProvider is a wrapper around tofu.MockProvider that defines dynamic +// schemas, and keeps track of the resources and data sources that it contains. +type TestProvider struct { + Provider *tofu.MockProvider + + data, resource cty.Value + + Interrupt chan<- struct{} + + Store *ResourceStore +} + +func NewProvider(store *ResourceStore) *TestProvider { + if store == nil { + store = &ResourceStore{ + Data: make(map[string]cty.Value), + } + } + + provider := &TestProvider{ + Provider: new(tofu.MockProvider), + Store: store, + } + + provider.Provider.GetProviderSchemaResponse = ProviderSchema + provider.Provider.ConfigureProviderFn = provider.ConfigureProvider + provider.Provider.PlanResourceChangeFn = provider.PlanResourceChange + provider.Provider.ApplyResourceChangeFn = provider.ApplyResourceChange + provider.Provider.ReadResourceFn = provider.ReadResource + provider.Provider.ReadDataSourceFn = provider.ReadDataSource + + return provider +} + +func (provider *TestProvider) DataPrefix() string { + var prefix string + if !provider.data.IsNull() && provider.data.IsKnown() { + prefix = provider.data.AsString() + } + return prefix +} + +func (provider *TestProvider) SetDataPrefix(prefix string) { + provider.data = cty.StringVal(prefix) +} + +func (provider *TestProvider) GetDataKey(id string) string { + if !provider.data.IsNull() && provider.data.IsKnown() { + return path.Join(provider.data.AsString(), id) + } + return id +} + +func (provider *TestProvider) ResourcePrefix() string { + var prefix string + if !provider.resource.IsNull() && provider.resource.IsKnown() { + prefix = provider.resource.AsString() + } + return prefix +} + +func (provider *TestProvider) SetResourcePrefix(prefix string) { + provider.resource = cty.StringVal(prefix) +} + +func (provider *TestProvider) GetResourceKey(id string) string { + if !provider.resource.IsNull() && provider.resource.IsKnown() { + return path.Join(provider.resource.AsString(), id) + } + return id +} + +func (provider *TestProvider) ResourceString() string { + return provider.string(provider.ResourcePrefix()) +} + +func (provider *TestProvider) ResourceCount() int { + return provider.count(provider.ResourcePrefix()) +} + +func (provider *TestProvider) DataSourceString() string { + return provider.string(provider.DataPrefix()) +} + +func (provider *TestProvider) DataSourceCount() int { + return provider.count(provider.DataPrefix()) +} + +func (provider *TestProvider) count(prefix string) int { + provider.Store.mutex.RLock() + defer provider.Store.mutex.RUnlock() + + if len(prefix) == 0 { + return len(provider.Store.Data) + } + + count := 0 + for key := range provider.Store.Data { + if strings.HasPrefix(key, prefix) { + count++ + } + } + return count +} + +func (provider *TestProvider) string(prefix string) string { + provider.Store.mutex.RLock() + defer provider.Store.mutex.RUnlock() + + var keys []string + for key := range provider.Store.Data { + if strings.HasPrefix(key, prefix) { + keys = append(keys, key) + } + } + return strings.Join(keys, ", ") +} + +func (provider *TestProvider) ConfigureProvider(request providers.ConfigureProviderRequest) providers.ConfigureProviderResponse { + provider.resource = request.Config.GetAttr("resource_prefix") + provider.data = request.Config.GetAttr("data_prefix") + return providers.ConfigureProviderResponse{} +} + +func (provider *TestProvider) PlanResourceChange(request providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + if request.ProposedNewState.IsNull() { + // Then this is a delete operation. + return providers.PlanResourceChangeResponse{ + PlannedState: request.ProposedNewState, + } + } + + resource := request.ProposedNewState + if id := resource.GetAttr("id"); !id.IsKnown() || id.IsNull() { + vals := resource.AsValueMap() + vals["id"] = cty.UnknownVal(cty.String) + resource = cty.ObjectVal(vals) + } + + return providers.PlanResourceChangeResponse{ + PlannedState: resource, + } +} + +func (provider *TestProvider) ApplyResourceChange(request providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + if request.PlannedState.IsNull() { + // Then this is a delete operation. + provider.Store.Delete(provider.GetResourceKey(request.PriorState.GetAttr("id").AsString())) + return providers.ApplyResourceChangeResponse{ + NewState: request.PlannedState, + } + } + + resource := request.PlannedState + id := resource.GetAttr("id") + if !id.IsKnown() { + val, err := uuid.GenerateUUID() + if err != nil { + panic(fmt.Errorf("failed to generate uuid: %w", err)) + } + + id = cty.StringVal(val) + + vals := resource.AsValueMap() + vals["id"] = id + resource = cty.ObjectVal(vals) + } + + interrupts := resource.GetAttr("interrupt_count") + if !interrupts.IsNull() && interrupts.IsKnown() && provider.Interrupt != nil { + count, _ := interrupts.AsBigFloat().Int64() + for ix := 0; ix < int(count); ix++ { + provider.Interrupt <- struct{}{} + } + + // Wait for a second to make sure the interrupts are processed by + // OpenTofu before the provider finishes. This is an attempt to ensure + // the output of any tests that rely on this behaviour is deterministic. + time.Sleep(time.Second) + } + + provider.Store.Put(provider.GetResourceKey(id.AsString()), resource) + return providers.ApplyResourceChangeResponse{ + NewState: resource, + } +} + +func (provider *TestProvider) ReadResource(request providers.ReadResourceRequest) providers.ReadResourceResponse { + var diags tfdiags.Diagnostics + + id := request.PriorState.GetAttr("id").AsString() + resource := provider.Store.Get(provider.GetResourceKey(id)) + if resource == cty.NilVal { + diags = diags.Append(tfdiags.Sourceless(tfdiags.Error, "not found", fmt.Sprintf("%s does not exist", id))) + } + + return providers.ReadResourceResponse{ + NewState: resource, + Diagnostics: diags, + } +} + +func (provider *TestProvider) ReadDataSource(request providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + var diags tfdiags.Diagnostics + + id := request.Config.GetAttr("id").AsString() + resource := provider.Store.Get(provider.GetDataKey(id)) + if resource == cty.NilVal { + diags = diags.Append(tfdiags.Sourceless(tfdiags.Error, "not found", fmt.Sprintf("%s does not exist", id))) + } + + return providers.ReadDataSourceResponse{ + State: resource, + Diagnostics: diags, + } +} + +// ResourceStore manages a set of cty.Value resources that can be shared between +// TestProvider providers. +type ResourceStore struct { + mutex sync.RWMutex + + Data map[string]cty.Value +} + +func (store *ResourceStore) Delete(key string) cty.Value { + store.mutex.Lock() + defer store.mutex.Unlock() + + if resource, ok := store.Data[key]; ok { + delete(store.Data, key) + return resource + } + return cty.NilVal +} + +func (store *ResourceStore) Get(key string) cty.Value { + store.mutex.RLock() + defer store.mutex.RUnlock() + + return store.get(key) +} + +func (store *ResourceStore) Put(key string, resource cty.Value) cty.Value { + store.mutex.Lock() + defer store.mutex.Unlock() + + old := store.get(key) + store.Data[key] = resource + return old +} + +func (store *ResourceStore) get(key string) cty.Value { + if resource, ok := store.Data[key]; ok { + return resource + } + return cty.NilVal +} diff --git a/pkg/command/ui_input.go b/pkg/command/ui_input.go new file mode 100644 index 00000000000..041ab0c2349 --- /dev/null +++ b/pkg/command/ui_input.go @@ -0,0 +1,196 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "bufio" + "bytes" + "context" + "errors" + "fmt" + "io" + "log" + "os" + "os/signal" + "strings" + "sync" + "sync/atomic" + "unicode" + + "github.com/bgentry/speakeasy" + "github.com/kubegems/opentofu/pkg/tofu" + "github.com/mattn/go-isatty" + "github.com/mitchellh/colorstring" +) + +var defaultInputReader io.Reader +var defaultInputWriter io.Writer +var testInputResponse []string +var testInputResponseMap map[string]string + +// UIInput is an implementation of tofu.UIInput that asks the CLI +// for input stdin. +type UIInput struct { + // Colorize will color the output. + Colorize *colorstring.Colorize + + // Reader and Writer for IO. If these aren't set, they will default to + // Stdin and Stdout respectively. + Reader io.Reader + Writer io.Writer + + listening int32 + result chan string + err chan string + + interrupted bool + l sync.Mutex + once sync.Once +} + +func (i *UIInput) Input(ctx context.Context, opts *tofu.InputOpts) (string, error) { + i.once.Do(i.init) + + r := i.Reader + w := i.Writer + if r == nil { + r = defaultInputReader + } + if w == nil { + w = defaultInputWriter + } + if r == nil { + r = os.Stdin + } + if w == nil { + w = os.Stdout + } + + // Make sure we only ask for input once at a time. OpenTofu + // should enforce this, but it doesn't hurt to verify. + i.l.Lock() + defer i.l.Unlock() + + // If we're interrupted, then don't ask for input + if i.interrupted { + return "", errors.New("interrupted") + } + + // If we have test results, return those. testInputResponse is the + // "old" way of doing it and we should remove that. + if testInputResponse != nil { + v := testInputResponse[0] + testInputResponse = testInputResponse[1:] + return v, nil + } + + // testInputResponseMap is the new way for test responses, based on + // the query ID. + if testInputResponseMap != nil { + v, ok := testInputResponseMap[opts.Id] + if !ok { + return "", fmt.Errorf("unexpected input request in test: %s", opts.Id) + } + + delete(testInputResponseMap, opts.Id) + return v, nil + } + + log.Printf("[DEBUG] command: asking for input: %q", opts.Query) + + // Listen for interrupts so we can cancel the input ask + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, os.Interrupt) + defer signal.Stop(sigCh) + + // Build the output format for asking + var buf bytes.Buffer + buf.WriteString("[reset]") + buf.WriteString(fmt.Sprintf("[bold]%s[reset]\n", opts.Query)) + if opts.Description != "" { + s := bufio.NewScanner(strings.NewReader(opts.Description)) + for s.Scan() { + buf.WriteString(fmt.Sprintf(" %s\n", s.Text())) + } + buf.WriteString("\n") + } + if opts.Default != "" { + buf.WriteString(" [bold]Default:[reset] ") + buf.WriteString(opts.Default) + buf.WriteString("\n") + } + buf.WriteString(" [bold]Enter a value:[reset] ") + + // Ask the user for their input + if _, err := fmt.Fprint(w, i.Colorize.Color(buf.String())); err != nil { + return "", err + } + + // Listen for the input in a goroutine. This will allow us to + // interrupt this if we are interrupted (SIGINT). + go func() { + if !atomic.CompareAndSwapInt32(&i.listening, 0, 1) { + return // We are already listening for input. + } + defer atomic.CompareAndSwapInt32(&i.listening, 1, 0) + + var line string + var err error + if opts.Secret && isatty.IsTerminal(os.Stdin.Fd()) { + line, err = speakeasy.Ask("") + } else { + buf := bufio.NewReader(r) + line, err = buf.ReadString('\n') + } + if err != nil { + log.Printf("[ERR] UIInput scan err: %s", err) + i.err <- string(err.Error()) + } else { + i.result <- strings.TrimRightFunc(line, unicode.IsSpace) + } + }() + + select { + case err := <-i.err: + return "", errors.New(err) + + case line := <-i.result: + fmt.Fprint(w, "\n") + + if line == "" { + line = opts.Default + } + + return line, nil + case <-ctx.Done(): + // Print a newline so that any further output starts properly + // on a new line. + fmt.Fprintln(w) + + return "", ctx.Err() + case <-sigCh: + // Print a newline so that any further output starts properly + // on a new line. + fmt.Fprintln(w) + + // Mark that we were interrupted so future Ask calls fail. + i.interrupted = true + + return "", errors.New("interrupted") + } +} + +func (i *UIInput) init() { + i.result = make(chan string) + i.err = make(chan string) + + if i.Colorize == nil { + i.Colorize = &colorstring.Colorize{ + Colors: colorstring.DefaultColors, + Disable: true, + } + } +} diff --git a/pkg/command/ui_input_test.go b/pkg/command/ui_input_test.go new file mode 100644 index 00000000000..d117d4c3999 --- /dev/null +++ b/pkg/command/ui_input_test.go @@ -0,0 +1,124 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "bytes" + "context" + "fmt" + "io" + "sync/atomic" + "testing" + "time" + + "github.com/kubegems/opentofu/pkg/tofu" +) + +func TestUIInput_impl(t *testing.T) { + var _ tofu.UIInput = new(UIInput) +} + +func TestUIInputInput(t *testing.T) { + i := &UIInput{ + Reader: bytes.NewBufferString("foo\n"), + Writer: bytes.NewBuffer(nil), + } + + v, err := i.Input(context.Background(), &tofu.InputOpts{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if v != "foo" { + t.Fatalf("unexpected input: %s", v) + } +} + +func TestUIInputInput_canceled(t *testing.T) { + r, w := io.Pipe() + i := &UIInput{ + Reader: r, + Writer: bytes.NewBuffer(nil), + } + + // Make a context that can be canceled. + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + // Cancel the context after 2 seconds. + time.Sleep(2 * time.Second) + cancel() + }() + + // Get input until the context is canceled. + v, err := i.Input(ctx, &tofu.InputOpts{}) + if err != context.Canceled { + t.Fatalf("expected a context.Canceled error, got: %v", err) + } + + // As the context was canceled v should be empty. + if v != "" { + t.Fatalf("unexpected input: %s", v) + } + + // As the context was canceled we should still be listening. + listening := atomic.LoadInt32(&i.listening) + if listening != 1 { + t.Fatalf("expected listening to be 1, got: %d", listening) + } + + go func() { + // Fake input is given after 1 second. + time.Sleep(time.Second) + fmt.Fprint(w, "foo\n") + w.Close() + }() + + v, err = i.Input(context.Background(), &tofu.InputOpts{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if v != "foo" { + t.Fatalf("unexpected input: %s", v) + } +} + +func TestUIInputInput_spaces(t *testing.T) { + i := &UIInput{ + Reader: bytes.NewBufferString("foo bar\n"), + Writer: bytes.NewBuffer(nil), + } + + v, err := i.Input(context.Background(), &tofu.InputOpts{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if v != "foo bar" { + t.Fatalf("unexpected input: %s", v) + } +} + +func TestUIInputInput_Error(t *testing.T) { + i := &UIInput{ + Reader: bytes.NewBuffer(nil), + Writer: bytes.NewBuffer(nil), + } + + v, err := i.Input(context.Background(), &tofu.InputOpts{}) + if err == nil { + t.Fatalf("Error is not 'nil'") + } + + if err.Error() != "EOF" { + t.Fatalf("unexpected error: %v", err) + } + + if v != "" { + t.Fatalf("input must be empty") + } +} diff --git a/pkg/command/unlock.go b/pkg/command/unlock.go new file mode 100644 index 00000000000..d35f10592fc --- /dev/null +++ b/pkg/command/unlock.go @@ -0,0 +1,170 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "context" + "fmt" + "strings" + + "github.com/kubegems/opentofu/pkg/states/statemgr" + + "github.com/mitchellh/cli" + + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" +) + +// UnlockCommand is a cli.Command implementation that manually unlocks +// the state. +type UnlockCommand struct { + Meta +} + +func (c *UnlockCommand) Run(args []string) int { + args = c.Meta.process(args) + var force bool + cmdFlags := c.Meta.defaultFlagSet("force-unlock") + c.Meta.varFlagSet(cmdFlags) + cmdFlags.BoolVar(&force, "force", false, "force") + cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } + if err := cmdFlags.Parse(args); err != nil { + c.Ui.Error(fmt.Sprintf("Error parsing command-line flags: %s\n", err.Error())) + return 1 + } + + args = cmdFlags.Args() + if len(args) != 1 { + c.Ui.Error("Expected a single argument: LOCK_ID") + return cli.RunResultHelp + } + + lockID := args[0] + args = args[1:] + + // assume everything is initialized. The user can manually init if this is + // required. + configPath, err := modulePath(args) + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + + // Load the encryption configuration + enc, encDiags := c.EncryptionFromPath(configPath) + if encDiags.HasErrors() { + c.showDiagnostics(encDiags) + return 1 + } + + var diags tfdiags.Diagnostics + + backendConfig, backendDiags := c.loadBackendConfig(configPath) + diags = diags.Append(backendDiags) + if diags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // Load the backend + b, backendDiags := c.Backend(&BackendOpts{ + Config: backendConfig, + }, enc.State()) + diags = diags.Append(backendDiags) + if backendDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // unlocking is read only when looking at state data + c.ignoreRemoteVersionConflict(b) + + env, err := c.Workspace() + if err != nil { + c.Ui.Error(fmt.Sprintf("Error selecting workspace: %s", err)) + return 1 + } + stateMgr, err := b.StateMgr(env) + if err != nil { + c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) + return 1 + } + + _, isLocal := stateMgr.(*statemgr.Filesystem) + + if !force { + // Forcing this doesn't do anything, but doesn't break anything either, + // and allows us to run the basic command test too. + if isLocal { + c.Ui.Error("Local state cannot be unlocked by another process") + return 1 + } + + desc := "OpenTofu will remove the lock on the remote state.\n" + + "This will allow local OpenTofu commands to modify this state, even though it\n" + + "may still be in use. Only 'yes' will be accepted to confirm." + + v, err := c.UIInput().Input(context.Background(), &tofu.InputOpts{ + Id: "force-unlock", + Query: "Do you really want to force-unlock?", + Description: desc, + }) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error asking for confirmation: %s", err)) + return 1 + } + if v != "yes" { + c.Ui.Output("force-unlock cancelled.") + return 1 + } + } + + if err := stateMgr.Unlock(lockID); err != nil { + c.Ui.Error(fmt.Sprintf("Failed to unlock state: %s", err)) + return 1 + } + + c.Ui.Output(c.Colorize().Color(strings.TrimSpace(outputUnlockSuccess))) + return 0 +} + +func (c *UnlockCommand) Help() string { + helpText := ` +Usage: tofu [global options] force-unlock [options] LOCK_ID + + Manually unlock the state for the defined configuration. + + This will not modify your infrastructure. This command removes the lock on the + state for the current workspace. The behavior of this lock is dependent + on the backend being used. Local state files cannot be unlocked by another + process. + +Options: + + -force Don't ask for input for unlock confirmation. + + -var 'foo=bar' Set a value for one of the input variables in the root + module of the configuration. Use this option more than + once to set more than one variable. + + -var-file=filename Load variable values from the given file, in addition + to the default files terraform.tfvars and *.auto.tfvars. + Use this option more than once to include more than one + variables file. +` + return strings.TrimSpace(helpText) +} + +func (c *UnlockCommand) Synopsis() string { + return "Release a stuck lock on the current workspace" +} + +const outputUnlockSuccess = ` +[reset][bold][green]OpenTofu state has been successfully unlocked![reset][green] + +The state has been unlocked, and OpenTofu commands should now be able to +obtain a new lock on the remote state. +` diff --git a/pkg/command/unlock_test.go b/pkg/command/unlock_test.go new file mode 100644 index 00000000000..6763ce3b005 --- /dev/null +++ b/pkg/command/unlock_test.go @@ -0,0 +1,123 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "os" + "testing" + + "github.com/kubegems/opentofu/pkg/backend/remote-state/inmem" + "github.com/mitchellh/cli" + + legacy "github.com/kubegems/opentofu/pkg/legacy/tofu" +) + +// Since we can't unlock a local state file, just test that calling unlock +// doesn't fail. +func TestUnlock(t *testing.T) { + td := t.TempDir() + os.MkdirAll(td, 0755) + defer testChdir(t, td)() + + // Write the legacy state + statePath := DefaultStateFilename + { + f, err := os.Create(statePath) + if err != nil { + t.Fatalf("err: %s", err) + } + err = legacy.WriteState(legacy.NewState(), f) + f.Close() + if err != nil { + t.Fatalf("err: %s", err) + } + } + + p := testProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &UnlockCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + + args := []string{ + "-force", + "LOCK_ID", + } + + if code := c.Run(args); code != 1 { + t.Fatalf("bad: %d\n%s\n%s", code, ui.OutputWriter.String(), ui.ErrorWriter.String()) + } + + // make sure we don't crash with arguments in the wrong order + args = []string{ + "LOCK_ID", + "-force", + } + + if code := c.Run(args); code != cli.RunResultHelp { + t.Fatalf("bad: %d\n%s\n%s", code, ui.OutputWriter.String(), ui.ErrorWriter.String()) + } +} + +// Newly configured backend +func TestUnlock_inmemBackend(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + testCopyDir(t, testFixturePath("backend-inmem-locked"), td) + defer testChdir(t, td)() + defer inmem.Reset() + + // init backend + ui := new(cli.MockUi) + view, _ := testView(t) + ci := &InitCommand{ + Meta: Meta{ + Ui: ui, + View: view, + }, + } + if code := ci.Run(nil); code != 0 { + t.Fatalf("bad: %d\n%s", code, ui.ErrorWriter) + } + + ui = new(cli.MockUi) + c := &UnlockCommand{ + Meta: Meta{ + Ui: ui, + View: view, + }, + } + + // run with the incorrect lock ID + args := []string{ + "-force", + "LOCK_ID", + } + + if code := c.Run(args); code == 0 { + t.Fatalf("bad: %d\n%s\n%s", code, ui.OutputWriter.String(), ui.ErrorWriter.String()) + } + + ui = new(cli.MockUi) + c = &UnlockCommand{ + Meta: Meta{ + Ui: ui, + View: view, + }, + } + + // lockID set in the test fixture + args[1] = "2b6a6738-5dd5-50d6-c0ae-f6352977666b" + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n%s\n%s", code, ui.OutputWriter.String(), ui.ErrorWriter.String()) + } + +} diff --git a/pkg/command/untaint.go b/pkg/command/untaint.go new file mode 100644 index 00000000000..363e18fd0d4 --- /dev/null +++ b/pkg/command/untaint.go @@ -0,0 +1,263 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/command/clistate" + "github.com/kubegems/opentofu/pkg/command/views" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" +) + +// UntaintCommand is a cli.Command implementation that manually untaints +// a resource, marking it as primary and ready for service. +type UntaintCommand struct { + Meta +} + +func (c *UntaintCommand) Run(args []string) int { + args = c.Meta.process(args) + var allowMissing bool + cmdFlags := c.Meta.ignoreRemoteVersionFlagSet("untaint") + cmdFlags.BoolVar(&allowMissing, "allow-missing", false, "allow missing") + cmdFlags.StringVar(&c.Meta.backupPath, "backup", "", "path") + cmdFlags.BoolVar(&c.Meta.stateLock, "lock", true, "lock state") + cmdFlags.DurationVar(&c.Meta.stateLockTimeout, "lock-timeout", 0, "lock timeout") + cmdFlags.StringVar(&c.Meta.statePath, "state", "", "path") + cmdFlags.StringVar(&c.Meta.stateOutPath, "state-out", "", "path") + cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } + if err := cmdFlags.Parse(args); err != nil { + c.Ui.Error(fmt.Sprintf("Error parsing command-line flags: %s\n", err.Error())) + return 1 + } + + var diags tfdiags.Diagnostics + + // Require the one argument for the resource to untaint + args = cmdFlags.Args() + if len(args) != 1 { + c.Ui.Error("The untaint command expects exactly one argument.") + cmdFlags.Usage() + return 1 + } + + addr, addrDiags := addrs.ParseAbsResourceInstanceStr(args[0]) + diags = diags.Append(addrDiags) + if addrDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // Load the encryption configuration + enc, encDiags := c.Encryption() + diags = diags.Append(encDiags) + if encDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // Load the backend + b, backendDiags := c.Backend(nil, enc.State()) + diags = diags.Append(backendDiags) + if backendDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // Determine the workspace name + workspace, err := c.Workspace() + if err != nil { + c.Ui.Error(fmt.Sprintf("Error selecting workspace: %s", err)) + return 1 + } + + // Check remote OpenTofu version is compatible + remoteVersionDiags := c.remoteVersionCheck(b, workspace) + diags = diags.Append(remoteVersionDiags) + c.showDiagnostics(diags) + if diags.HasErrors() { + return 1 + } + + // Get the state + stateMgr, err := b.StateMgr(workspace) + if err != nil { + c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) + return 1 + } + + if c.stateLock { + stateLocker := clistate.NewLocker(c.stateLockTimeout, views.NewStateLocker(arguments.ViewHuman, c.View)) + if diags := stateLocker.Lock(stateMgr, "untaint"); diags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + defer func() { + if diags := stateLocker.Unlock(); diags.HasErrors() { + c.showDiagnostics(diags) + } + }() + } + + if err := stateMgr.RefreshState(); err != nil { + c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) + return 1 + } + + // Get the actual state structure + state := stateMgr.State() + if state.Empty() { + if allowMissing { + return c.allowMissingExit(addr) + } + + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "No such resource instance", + "The state currently contains no resource instances whatsoever. This may occur if the configuration has never been applied or if it has recently been destroyed.", + )) + c.showDiagnostics(diags) + return 1 + } + + ss := state.SyncWrapper() + + // Get the resource and instance we're going to taint + rs := ss.Resource(addr.ContainingResource()) + is := ss.ResourceInstance(addr) + if is == nil { + if allowMissing { + return c.allowMissingExit(addr) + } + + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "No such resource instance", + fmt.Sprintf("There is no resource instance in the state with the address %s. If the resource configuration has just been added, you must run \"tofu apply\" once to create the corresponding instance(s) before they can be tainted.", addr), + )) + c.showDiagnostics(diags) + return 1 + } + + obj := is.Current + if obj == nil { + if len(is.Deposed) != 0 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "No such resource instance", + fmt.Sprintf("Resource instance %s is currently part-way through a create_before_destroy replacement action. Run \"tofu apply\" to complete its replacement before tainting it.", addr), + )) + } else { + // Don't know why we're here, but we'll produce a generic error message anyway. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "No such resource instance", + fmt.Sprintf("Resource instance %s does not currently have a remote object associated with it, so it cannot be tainted.", addr), + )) + } + c.showDiagnostics(diags) + return 1 + } + + if obj.Status != states.ObjectTainted { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Resource instance is not tainted", + fmt.Sprintf("Resource instance %s is not currently tainted, and so it cannot be untainted.", addr), + )) + c.showDiagnostics(diags) + return 1 + } + + // Get schemas, if possible, before writing state + var schemas *tofu.Schemas + if isCloudMode(b) { + var schemaDiags tfdiags.Diagnostics + schemas, schemaDiags = c.MaybeGetSchemas(state, nil) + diags = diags.Append(schemaDiags) + } + + obj.Status = states.ObjectReady + ss.SetResourceInstanceCurrent(addr, obj, rs.ProviderConfig) + + if err := stateMgr.WriteState(state); err != nil { + c.Ui.Error(fmt.Sprintf("Error writing state file: %s", err)) + return 1 + } + if err := stateMgr.PersistState(schemas); err != nil { + c.Ui.Error(fmt.Sprintf("Error writing state file: %s", err)) + return 1 + } + + c.showDiagnostics(diags) + c.Ui.Output(fmt.Sprintf("Resource instance %s has been successfully untainted.", addr)) + return 0 +} + +func (c *UntaintCommand) Help() string { + helpText := ` +Usage: tofu [global options] untaint [options] name + + OpenTofu uses the term "tainted" to describe a resource instance + which may not be fully functional, either because its creation + partially failed or because you've manually marked it as such using + the "tofu taint" command. + + This command removes that state from a resource instance, causing + OpenTofu to see it as fully-functional and not in need of + replacement. + + This will not modify your infrastructure directly. It only avoids + OpenTofu planning to replace a tainted instance in a future operation. + +Options: + + -allow-missing If specified, the command will succeed (exit code 0) + even if the resource is missing. + + -lock=false Don't hold a state lock during the operation. This is + dangerous if others might concurrently run commands + against the same workspace. + + -lock-timeout=0s Duration to retry a state lock. + + -ignore-remote-version A rare option used for the remote backend only. See + the remote backend documentation for more information. + + -var 'foo=bar' Set a value for one of the input variables in the root + module of the configuration. Use this option more than + once to set more than one variable. + + -var-file=filename Load variable values from the given file, in addition + to the default files terraform.tfvars and *.auto.tfvars. + Use this option more than once to include more than one + variables file. + + -state, state-out, and -backup are legacy options supported for the local + backend only. For more information, see the local backend's documentation. + +` + return strings.TrimSpace(helpText) +} + +func (c *UntaintCommand) Synopsis() string { + return "Remove the 'tainted' state from a resource instance" +} + +func (c *UntaintCommand) allowMissingExit(name addrs.AbsResourceInstance) int { + c.showDiagnostics(tfdiags.Sourceless( + tfdiags.Warning, + "No such resource instance", + fmt.Sprintf("Resource instance %s was not found, but this is not an error because -allow-missing was set.", name), + )) + return 0 +} diff --git a/pkg/command/untaint_test.go b/pkg/command/untaint_test.go new file mode 100644 index 00000000000..08317ca6106 --- /dev/null +++ b/pkg/command/untaint_test.go @@ -0,0 +1,537 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "os" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/states" + "github.com/mitchellh/cli" +) + +func TestUntaint(t *testing.T) { + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar"}`), + Status: states.ObjectTainted, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, state) + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &UntaintCommand{ + Meta: Meta{ + Ui: ui, + View: view, + }, + } + + args := []string{ + "-state", statePath, + "test_instance.foo", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + expected := strings.TrimSpace(` +test_instance.foo: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/test"] + `) + testStateOutput(t, statePath, expected) +} + +func TestUntaint_lockedState(t *testing.T) { + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar"}`), + Status: states.ObjectTainted, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, state) + unlock, err := testLockState(t, testDataDir, statePath) + if err != nil { + t.Fatal(err) + } + defer unlock() + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &UntaintCommand{ + Meta: Meta{ + Ui: ui, + View: view, + }, + } + + args := []string{ + "-state", statePath, + "test_instance.foo", + } + if code := c.Run(args); code == 0 { + t.Fatal("expected error") + } + + output := ui.ErrorWriter.String() + if !strings.Contains(output, "lock") { + t.Fatal("command output does not look like a lock error:", output) + } +} + +func TestUntaint_backup(t *testing.T) { + // Get a temp cwd + testCwd(t) + + // Write the temp state + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar"}`), + Status: states.ObjectTainted, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + testStateFileDefault(t, state) + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &UntaintCommand{ + Meta: Meta{ + Ui: ui, + View: view, + }, + } + + args := []string{ + "test_instance.foo", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + // Backup is still tainted + testStateOutput(t, DefaultStateFilename+".backup", strings.TrimSpace(` +test_instance.foo: (tainted) + ID = bar + provider = provider["registry.opentofu.org/hashicorp/test"] + `)) + + // State is untainted + testStateOutput(t, DefaultStateFilename, strings.TrimSpace(` +test_instance.foo: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/test"] + `)) +} + +func TestUntaint_backupDisable(t *testing.T) { + // Get a temp cwd + testCwd(t) + + // Write the temp state + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar"}`), + Status: states.ObjectTainted, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + testStateFileDefault(t, state) + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &UntaintCommand{ + Meta: Meta{ + Ui: ui, + View: view, + }, + } + + args := []string{ + "-backup", "-", + "test_instance.foo", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + if _, err := os.Stat(DefaultStateFilename + ".backup"); err == nil { + t.Fatal("backup path should not exist") + } + + testStateOutput(t, DefaultStateFilename, strings.TrimSpace(` +test_instance.foo: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/test"] + `)) +} + +func TestUntaint_badState(t *testing.T) { + ui := new(cli.MockUi) + view, _ := testView(t) + c := &UntaintCommand{ + Meta: Meta{ + Ui: ui, + View: view, + }, + } + + args := []string{ + "-state", "i-should-not-exist-ever", + "foo", + } + if code := c.Run(args); code != 1 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } +} + +func TestUntaint_defaultState(t *testing.T) { + // Get a temp cwd + testCwd(t) + + // Write the temp state + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar"}`), + Status: states.ObjectTainted, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + testStateFileDefault(t, state) + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &UntaintCommand{ + Meta: Meta{ + Ui: ui, + View: view, + }, + } + + args := []string{ + "test_instance.foo", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + testStateOutput(t, DefaultStateFilename, strings.TrimSpace(` +test_instance.foo: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/test"] + `)) +} + +func TestUntaint_defaultWorkspaceState(t *testing.T) { + // Get a temp cwd + testCwd(t) + + // Write the temp state + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar"}`), + Status: states.ObjectTainted, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + testWorkspace := "development" + path := testStateFileWorkspaceDefault(t, testWorkspace, state) + + ui := new(cli.MockUi) + view, _ := testView(t) + meta := Meta{Ui: ui, View: view} + meta.SetWorkspace(testWorkspace) + c := &UntaintCommand{ + Meta: meta, + } + + args := []string{ + "test_instance.foo", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + testStateOutput(t, path, strings.TrimSpace(` +test_instance.foo: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/test"] + `)) +} + +func TestUntaint_missing(t *testing.T) { + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar"}`), + Status: states.ObjectTainted, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, state) + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &UntaintCommand{ + Meta: Meta{ + Ui: ui, + View: view, + }, + } + + args := []string{ + "-state", statePath, + "test_instance.bar", + } + if code := c.Run(args); code == 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.OutputWriter.String()) + } +} + +func TestUntaint_missingAllow(t *testing.T) { + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar"}`), + Status: states.ObjectTainted, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, state) + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &UntaintCommand{ + Meta: Meta{ + Ui: ui, + View: view, + }, + } + + args := []string{ + "-allow-missing", + "-state", statePath, + "test_instance.bar", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + // Check for the warning + actual := strings.TrimSpace(ui.ErrorWriter.String()) + expected := strings.TrimSpace(` +Warning: No such resource instance + +Resource instance test_instance.bar was not found, but this is not an error +because -allow-missing was set. + +`) + if diff := cmp.Diff(expected, actual); diff != "" { + t.Fatalf("wrong output\n%s", diff) + } +} + +func TestUntaint_stateOut(t *testing.T) { + // Get a temp cwd + testCwd(t) + + // Write the temp state + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar"}`), + Status: states.ObjectTainted, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + testStateFileDefault(t, state) + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &UntaintCommand{ + Meta: Meta{ + Ui: ui, + View: view, + }, + } + + args := []string{ + "-state-out", "foo", + "test_instance.foo", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + testStateOutput(t, DefaultStateFilename, strings.TrimSpace(` +test_instance.foo: (tainted) + ID = bar + provider = provider["registry.opentofu.org/hashicorp/test"] + `)) + testStateOutput(t, "foo", strings.TrimSpace(` +test_instance.foo: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/test"] + `)) +} + +func TestUntaint_module(t *testing.T) { + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar"}`), + Status: states.ObjectTainted, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "blah", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance.Child("child", addrs.NoKey)), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar"}`), + Status: states.ObjectTainted, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, state) + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &UntaintCommand{ + Meta: Meta{ + Ui: ui, + View: view, + }, + } + + args := []string{ + "-state", statePath, + "module.child.test_instance.blah", + } + if code := c.Run(args); code != 0 { + t.Fatalf("command exited with status code %d; want 0\n\n%s", code, ui.ErrorWriter.String()) + } + + testStateOutput(t, statePath, strings.TrimSpace(` +test_instance.foo: (tainted) + ID = bar + provider = provider["registry.opentofu.org/hashicorp/test"] + +module.child: + test_instance.blah: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/test"] + `)) +} diff --git a/pkg/command/validate.go b/pkg/command/validate.go new file mode 100644 index 00000000000..1537879ed9d --- /dev/null +++ b/pkg/command/validate.go @@ -0,0 +1,221 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "path/filepath" + "strings" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/command/views" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" +) + +// ValidateCommand is a Command implementation that validates the tofu files +type ValidateCommand struct { + Meta +} + +func (c *ValidateCommand) Run(rawArgs []string) int { + // Parse and apply global view arguments + common, rawArgs := arguments.ParseView(rawArgs) + c.View.Configure(common) + + // Parse and validate flags + args, diags := arguments.ParseValidate(rawArgs) + if diags.HasErrors() { + c.View.Diagnostics(diags) + c.View.HelpPrompt("validate") + return 1 + } + + view := views.NewValidate(args.ViewType, c.View) + + // After this point, we must only produce JSON output if JSON mode is + // enabled, so all errors should be accumulated into diags and we'll + // print out a suitable result at the end, depending on the format + // selection. All returns from this point on must be tail-calls into + // view.Results in order to produce the expected output. + + dir, err := filepath.Abs(args.Path) + if err != nil { + diags = diags.Append(fmt.Errorf("unable to locate module: %w", err)) + return view.Results(diags) + } + + // Check for user-supplied plugin path + if c.pluginPath, err = c.loadPluginPath(); err != nil { + diags = diags.Append(fmt.Errorf("error loading plugin path: %w", err)) + return view.Results(diags) + } + + // Inject variables from args into meta for static evaluation + c.GatherVariables(args.Vars) + + validateDiags := c.validate(dir, args.TestDirectory, args.NoTests) + diags = diags.Append(validateDiags) + + // Validating with dev overrides in effect means that the result might + // not be valid for a stable release, so we'll warn about that in case + // the user is trying to use "tofu validate" as a sort of pre-flight + // check before submitting a change. + diags = diags.Append(c.providerDevOverrideRuntimeWarnings()) + + return view.Results(diags) +} + +func (c *ValidateCommand) GatherVariables(args *arguments.Vars) { + // FIXME the arguments package currently trivially gathers variable related + // arguments in a heterogenous slice, in order to minimize the number of + // code paths gathering variables during the transition to this structure. + // Once all commands that gather variables have been converted to this + // structure, we could move the variable gathering code to the arguments + // package directly, removing this shim layer. + + varArgs := args.All() + items := make([]rawFlag, len(varArgs)) + for i := range varArgs { + items[i].Name = varArgs[i].Name + items[i].Value = varArgs[i].Value + } + c.Meta.variableArgs = rawFlags{items: &items} +} + +func (c *ValidateCommand) validate(dir, testDir string, noTests bool) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + var cfg *configs.Config + + if noTests { + cfg, diags = c.loadConfig(dir) + } else { + cfg, diags = c.loadConfigWithTests(dir, testDir) + } + if diags.HasErrors() { + return diags + } + + validate := func(cfg *configs.Config) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + opts, err := c.contextOpts() + if err != nil { + diags = diags.Append(err) + return diags + } + + tfCtx, ctxDiags := tofu.NewContext(opts) + diags = diags.Append(ctxDiags) + if ctxDiags.HasErrors() { + return diags + } + + return diags.Append(tfCtx.Validate(cfg)) + } + + diags = diags.Append(validate(cfg)) + + if noTests { + return diags + } + + validatedModules := make(map[string]bool) + + // We'll also do a quick validation of the OpenTofu test files. These live + // outside the OpenTofu graph so we have to do this separately. + for _, file := range cfg.Module.Tests { + + diags = diags.Append(file.Validate()) + + for _, run := range file.Runs { + + if run.Module != nil { + // Then we can also validate the referenced modules, but we are + // only going to do this is if they are local modules. + // + // Basically, local testing modules are something the user can + // reasonably go and fix. If it's a module being downloaded from + // the registry, the expectation is that the author of the + // module should have ran `tofu validate` themselves. + if _, ok := run.Module.Source.(addrs.ModuleSourceLocal); ok { + + if validated := validatedModules[run.Module.Source.String()]; !validated { + + // Since we can reference the same module twice, let's + // not validate the same thing multiple times. + + validatedModules[run.Module.Source.String()] = true + diags = diags.Append(validate(run.ConfigUnderTest)) + } + + } + } + + diags = diags.Append(run.Validate()) + } + } + + return diags +} + +func (c *ValidateCommand) Synopsis() string { + return "Check whether the configuration is valid" +} + +func (c *ValidateCommand) Help() string { + helpText := ` +Usage: tofu [global options] validate [options] + + Validate the configuration files in a directory, referring only to the + configuration and not accessing any remote services such as remote state, + provider APIs, etc. + + Validate runs checks that verify whether a configuration is syntactically + valid and internally consistent, regardless of any provided variables or + existing state. It is thus primarily useful for general verification of + reusable modules, including correctness of attribute names and value types. + + It is safe to run this command automatically, for example as a post-save + check in a text editor or as a test step for a re-usable module in a CI + system. + + Validation requires an initialized working directory with any referenced + plugins and modules installed. To initialize a working directory for + validation without accessing any configured remote backend, use: + tofu init -backend=false + + To verify configuration in the context of a particular run (a particular + target workspace, input variable values, etc), use the 'tofu plan' + command instead, which includes an implied validation check. + +Options: + + -json Produce output in a machine-readable JSON format, + suitable for use in text editor integrations and other + automated systems. Always disables color. + + -no-color If specified, output won't contain any color. + + -no-tests If specified, OpenTofu will not validate test files. + + -test-directory=path Set the OpenTofu test directory, defaults to "tests". When set, the + test command will search for test files in the current directory and + in the one specified by the flag. + + -var 'foo=bar' Set a value for one of the input variables in the root + module of the configuration. Use this option more than + once to set more than one variable. + + -var-file=filename Load variable values from the given file, in addition + to the default files terraform.tfvars and *.auto.tfvars. + Use this option more than once to include more than one + variables file. +` + return strings.TrimSpace(helpText) +} diff --git a/pkg/command/validate_test.go b/pkg/command/validate_test.go new file mode 100644 index 00000000000..eedf0ebed6e --- /dev/null +++ b/pkg/command/validate_test.go @@ -0,0 +1,371 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "encoding/json" + "io" + "os" + "path" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/mitchellh/cli" + "github.com/zclconf/go-cty/cty" + + testing_command "github.com/kubegems/opentofu/pkg/command/testing" + "github.com/kubegems/opentofu/pkg/command/views" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/terminal" +) + +func setupTest(t *testing.T, fixturepath string, args ...string) (*terminal.TestOutput, int) { + view, done := testView(t) + p := testProvider() + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "ami": {Type: cty.String, Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "network_interface": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "device_index": {Type: cty.String, Optional: true}, + "description": {Type: cty.String, Optional: true}, + "name": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + }, + }, + }, + } + c := &ValidateCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + View: view, + }, + } + + args = append(args, "-no-color") + args = append(args, testFixturePath(fixturepath)) + + code := c.Run(args) + return done(t), code +} + +func TestValidateCommand(t *testing.T) { + if output, code := setupTest(t, "validate-valid"); code != 0 { + t.Fatalf("unexpected non-successful exit code %d\n\n%s", code, output.Stderr()) + } +} + +func TestValidateCommandWithTfvarsFile(t *testing.T) { + // Create a temporary working directory that is empty because this test + // requires scanning the current working directory by validate command. + td := t.TempDir() + testCopyDir(t, testFixturePath("validate-valid/with-tfvars-file"), td) + defer testChdir(t, td)() + + view, done := testView(t) + c := &ValidateCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + View: view, + }, + } + + args := []string{} + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad %d\n\n%s", code, output.Stderr()) + } +} + +func TestValidateFailingCommand(t *testing.T) { + if output, code := setupTest(t, "validate-invalid"); code != 1 { + t.Fatalf("Should have failed: %d\n\n%s", code, output.Stderr()) + } +} + +func TestValidateFailingCommandMissingQuote(t *testing.T) { + output, code := setupTest(t, "validate-invalid/missing_quote") + + if code != 1 { + t.Fatalf("Should have failed: %d\n\n%s", code, output.Stderr()) + } + wantError := "Error: Invalid reference" + if !strings.Contains(output.Stderr(), wantError) { + t.Fatalf("Missing error string %q\n\n'%s'", wantError, output.Stderr()) + } +} + +func TestValidateFailingCommandMissingVariable(t *testing.T) { + output, code := setupTest(t, "validate-invalid/missing_var") + if code != 1 { + t.Fatalf("Should have failed: %d\n\n%s", code, output.Stderr()) + } + wantError := "Error: Reference to undeclared input variable" + if !strings.Contains(output.Stderr(), wantError) { + t.Fatalf("Missing error string %q\n\n'%s'", wantError, output.Stderr()) + } +} + +func TestSameProviderMutipleTimesShouldFail(t *testing.T) { + output, code := setupTest(t, "validate-invalid/multiple_providers") + if code != 1 { + t.Fatalf("Should have failed: %d\n\n%s", code, output.Stderr()) + } + wantError := "Error: Duplicate provider configuration" + if !strings.Contains(output.Stderr(), wantError) { + t.Fatalf("Missing error string %q\n\n'%s'", wantError, output.Stderr()) + } +} + +func TestSameModuleMultipleTimesShouldFail(t *testing.T) { + output, code := setupTest(t, "validate-invalid/multiple_modules") + if code != 1 { + t.Fatalf("Should have failed: %d\n\n%s", code, output.Stderr()) + } + wantError := "Error: Duplicate module call" + if !strings.Contains(output.Stderr(), wantError) { + t.Fatalf("Missing error string %q\n\n'%s'", wantError, output.Stderr()) + } +} + +func TestSameResourceMultipleTimesShouldFail(t *testing.T) { + output, code := setupTest(t, "validate-invalid/multiple_resources") + if code != 1 { + t.Fatalf("Should have failed: %d\n\n%s", code, output.Stderr()) + } + wantError := `Error: Duplicate resource "aws_instance" configuration` + if !strings.Contains(output.Stderr(), wantError) { + t.Fatalf("Missing error string %q\n\n'%s'", wantError, output.Stderr()) + } +} + +func TestSameImportTargetMultipleTimesShouldFail(t *testing.T) { + output, code := setupTest(t, "validate-invalid/duplicate_import_targets") + if code != 1 { + t.Fatalf("Should have failed: %d\n\n%s", code, output.Stderr()) + } + wantError := `Error: Duplicate import configuration for "aws_instance.web"` + if !strings.Contains(output.Stderr(), wantError) { + t.Fatalf("Missing error string %q\n\n'%s'", wantError, output.Stderr()) + } +} + +func TestOutputWithoutValueShouldFail(t *testing.T) { + output, code := setupTest(t, "validate-invalid/outputs") + if code != 1 { + t.Fatalf("Should have failed: %d\n\n%s", code, output.Stderr()) + } + wantError := `The argument "value" is required, but no definition was found.` + if !strings.Contains(output.Stderr(), wantError) { + t.Fatalf("Missing error string %q\n\n'%s'", wantError, output.Stderr()) + } + wantError = `An argument named "values" is not expected here. Did you mean "value"?` + if !strings.Contains(output.Stderr(), wantError) { + t.Fatalf("Missing error string %q\n\n'%s'", wantError, output.Stderr()) + } +} + +func TestModuleWithIncorrectNameShouldFail(t *testing.T) { + output, code := setupTest(t, "validate-invalid/incorrectmodulename") + if code != 1 { + t.Fatalf("Should have failed: %d\n\n%s", code, output.Stderr()) + } + + wantError := `Error: Invalid module instance name` + if !strings.Contains(output.Stderr(), wantError) { + t.Fatalf("Missing error string %q\n\n'%s'", wantError, output.Stderr()) + } +} + +func TestWronglyUsedInterpolationShouldFail(t *testing.T) { + output, code := setupTest(t, "validate-invalid/interpolation") + if code != 1 { + t.Fatalf("Should have failed: %d\n\n%s", code, output.Stderr()) + } + + wantError := `Error: Variables not allowed` + if !strings.Contains(output.Stderr(), wantError) { + t.Fatalf("Missing error string %q\n\n'%s'", wantError, output.Stderr()) + } + wantError = `A single static variable reference is required` + if !strings.Contains(output.Stderr(), wantError) { + t.Fatalf("Missing error string %q\n\n'%s'", wantError, output.Stderr()) + } +} + +func TestMissingDefinedVar(t *testing.T) { + output, code := setupTest(t, "validate-invalid/missing_defined_var") + // This is allowed because validate tests only that variables are referenced + // correctly, not that they all have defined values. + if code != 0 { + t.Fatalf("Should have passed: %d\n\n%s", code, output.Stderr()) + } +} + +func TestValidateWithInvalidTestFile(t *testing.T) { + + // We're reusing some testing configs that were written for testing the + // test command here, so we have to initalise things slightly differently + // to the other tests. + + view, done := testView(t) + provider := testing_command.NewProvider(nil) + c := &ValidateCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(provider.Provider), + View: view, + }, + } + + var args []string + args = append(args, "-no-color") + args = append(args, testFixturePath("test/invalid")) + + code := c.Run(args) + output := done(t) + + if code != 1 { + t.Fatalf("Should have failed: %d\n\n%s", code, output.Stderr()) + } + + wantError := "Error: Invalid `expect_failures` reference" + if !strings.Contains(output.Stderr(), wantError) { + t.Fatalf("Missing error string %q\n\n'%s'", wantError, output.Stderr()) + } +} + +func TestValidateWithInvalidTestModule(t *testing.T) { + + // We're reusing some testing configs that were written for testing the + // test command here, so we have to initalise things slightly differently + // to the other tests. + + td := t.TempDir() + testCopyDir(t, testFixturePath(path.Join("test", "invalid-module")), td) + defer testChdir(t, td)() + + streams, done := terminal.StreamsForTesting(t) + view := views.NewView(streams) + ui := new(cli.MockUi) + + provider := testing_command.NewProvider(nil) + + providerSource, close := newMockProviderSource(t, map[string][]string{ + "test": {"1.0.0"}, + }) + defer close() + + meta := Meta{ + testingOverrides: metaOverridesForProvider(provider.Provider), + Ui: ui, + View: view, + Streams: streams, + ProviderSource: providerSource, + } + + init := &InitCommand{ + Meta: meta, + } + + if code := init.Run(nil); code != 0 { + t.Fatalf("expected status code 0 but got %d: %s", code, ui.ErrorWriter) + } + + c := &ValidateCommand{ + Meta: meta, + } + + var args []string + args = append(args, "-no-color") + + code := c.Run(args) + output := done(t) + + if code != 1 { + t.Fatalf("Should have failed: %d\n\n%s", code, output.Stderr()) + } + + wantError := "Error: Reference to undeclared input variable" + if !strings.Contains(output.Stderr(), wantError) { + t.Fatalf("Missing error string %q\n\n'%s'", wantError, output.Stderr()) + } +} + +func TestValidate_json(t *testing.T) { + tests := []struct { + path string + valid bool + }{ + {"validate-valid", true}, + {"validate-invalid", false}, + {"validate-invalid/missing_quote", false}, + {"validate-invalid/missing_var", false}, + {"validate-invalid/multiple_providers", false}, + {"validate-invalid/multiple_modules", false}, + {"validate-invalid/multiple_resources", false}, + {"validate-invalid/duplicate_import_targets", false}, + {"validate-invalid/outputs", false}, + {"validate-invalid/incorrectmodulename", false}, + {"validate-invalid/interpolation", false}, + {"validate-invalid/missing_defined_var", true}, + } + + for _, tc := range tests { + t.Run(tc.path, func(t *testing.T) { + var want, got map[string]interface{} + + wantFile, err := os.Open(path.Join(testFixturePath(tc.path), "output.json")) + if err != nil { + t.Fatalf("failed to open output file: %s", err) + } + defer wantFile.Close() + wantBytes, err := io.ReadAll(wantFile) + if err != nil { + t.Fatalf("failed to read output file: %s", err) + } + err = json.Unmarshal([]byte(wantBytes), &want) + if err != nil { + t.Fatalf("failed to unmarshal expected JSON: %s", err) + } + + output, code := setupTest(t, tc.path, "-json") + + gotString := output.Stdout() + err = json.Unmarshal([]byte(gotString), &got) + if err != nil { + t.Fatalf("failed to unmarshal actual JSON: %s", err) + } + + if !cmp.Equal(got, want) { + t.Errorf("wrong output:\n %v\n", cmp.Diff(got, want)) + t.Errorf("raw output:\n%s\n", gotString) + } + + if tc.valid && code != 0 { + t.Errorf("wrong exit code: want 0, got %d", code) + } else if !tc.valid && code != 1 { + t.Errorf("wrong exit code: want 1, got %d", code) + } + + if errorOutput := output.Stderr(); errorOutput != "" { + t.Errorf("unexpected error output:\n%s", errorOutput) + } + }) + } +} diff --git a/pkg/command/version.go b/pkg/command/version.go new file mode 100644 index 00000000000..feb617fc063 --- /dev/null +++ b/pkg/command/version.go @@ -0,0 +1,137 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "bytes" + "encoding/json" + "fmt" + "sort" + "strings" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/depsfile" + "github.com/kubegems/opentofu/pkg/getproviders" +) + +// VersionCommand is a Command implementation prints the version. +type VersionCommand struct { + Meta + + Version string + VersionPrerelease string + Platform getproviders.Platform +} + +type VersionOutput struct { + Version string `json:"terraform_version"` + Platform string `json:"platform"` + ProviderSelections map[string]string `json:"provider_selections"` +} + +func (c *VersionCommand) Help() string { + helpText := ` +Usage: tofu [global options] version [options] + + Displays the version of OpenTofu and all installed plugins + +Options: + + -json Output the version information as a JSON object. +` + return strings.TrimSpace(helpText) +} + +func (c *VersionCommand) Run(args []string) int { + var versionString bytes.Buffer + args = c.Meta.process(args) + var jsonOutput bool + cmdFlags := c.Meta.defaultFlagSet("version") + cmdFlags.BoolVar(&jsonOutput, "json", false, "json") + // Enable but ignore the global version flags. In main.go, if any of the + // arguments are -v, -version, or --version, this command will be called + // with the rest of the arguments, so we need to be able to cope with + // those. + cmdFlags.Bool("v", true, "version") + cmdFlags.Bool("version", true, "version") + cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } + if err := cmdFlags.Parse(args); err != nil { + c.Ui.Error(fmt.Sprintf("Error parsing command-line flags: %s\n", err.Error())) + return 1 + } + + fmt.Fprintf(&versionString, "OpenTofu v%s", c.Version) + if c.VersionPrerelease != "" { + fmt.Fprintf(&versionString, "-%s", c.VersionPrerelease) + } + + // We'll also attempt to print out the selected plugin versions. We do + // this based on the dependency lock file, and so the result might be + // empty or incomplete if the user hasn't successfully run "tofu init" + // since the most recent change to dependencies. + // + // Generally-speaking this is a best-effort thing that will give us a good + // result in the usual case where the user successfully ran "tofu init" + // and then hit a problem running _another_ command. + var providerVersions []string + var providerLocks map[addrs.Provider]*depsfile.ProviderLock + if locks, err := c.lockedDependencies(); err == nil { + providerLocks = locks.AllProviders() + for providerAddr, lock := range providerLocks { + version := lock.Version().String() + if version == "0.0.0" { + providerVersions = append(providerVersions, fmt.Sprintf("+ provider %s (unversioned)", providerAddr)) + } else { + providerVersions = append(providerVersions, fmt.Sprintf("+ provider %s v%s", providerAddr, version)) + } + } + } + + if jsonOutput { + selectionsOutput := make(map[string]string) + for providerAddr, lock := range providerLocks { + version := lock.Version().String() + selectionsOutput[providerAddr.String()] = version + } + + var versionOutput string + if c.VersionPrerelease != "" { + versionOutput = c.Version + "-" + c.VersionPrerelease + } else { + versionOutput = c.Version + } + + output := VersionOutput{ + Version: versionOutput, + Platform: c.Platform.String(), + ProviderSelections: selectionsOutput, + } + + jsonOutput, err := json.MarshalIndent(output, "", " ") + if err != nil { + c.Ui.Error(fmt.Sprintf("\nError marshalling JSON: %s", err)) + return 1 + } + c.Ui.Output(string(jsonOutput)) + return 0 + } else { + c.Ui.Output(versionString.String()) + c.Ui.Output(fmt.Sprintf("on %s", c.Platform)) + + if len(providerVersions) != 0 { + sort.Strings(providerVersions) + for _, str := range providerVersions { + c.Ui.Output(str) + } + } + } + + return 0 +} + +func (c *VersionCommand) Synopsis() string { + return "Show the current OpenTofu version" +} diff --git a/pkg/command/version_test.go b/pkg/command/version_test.go new file mode 100644 index 00000000000..d1fe60e6140 --- /dev/null +++ b/pkg/command/version_test.go @@ -0,0 +1,174 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/mitchellh/cli" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/depsfile" + "github.com/kubegems/opentofu/pkg/getproviders" +) + +func TestVersionCommand_implements(t *testing.T) { + var _ cli.Command = &VersionCommand{} +} + +func TestVersion(t *testing.T) { + td := t.TempDir() + defer testChdir(t, td)() + + // We'll create a fixed dependency lock file in our working directory + // so we can verify that the version command shows the information + // from it. + locks := depsfile.NewLocks() + locks.SetProvider( + addrs.NewDefaultProvider("test2"), + getproviders.MustParseVersion("1.2.3"), + nil, + nil, + ) + locks.SetProvider( + addrs.NewDefaultProvider("test1"), + getproviders.MustParseVersion("7.8.9-beta.2"), + nil, + nil, + ) + + ui := cli.NewMockUi() + c := &VersionCommand{ + Meta: Meta{ + Ui: ui, + }, + Version: "4.5.6", + VersionPrerelease: "foo", + Platform: getproviders.Platform{OS: "aros", Arch: "riscv64"}, + } + if err := c.replaceLockedDependencies(locks); err != nil { + t.Fatal(err) + } + if code := c.Run([]string{}); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + actual := strings.TrimSpace(ui.OutputWriter.String()) + expected := "OpenTofu v4.5.6-foo\non aros_riscv64\n+ provider registry.opentofu.org/hashicorp/test1 v7.8.9-beta.2\n+ provider registry.opentofu.org/hashicorp/test2 v1.2.3" + if actual != expected { + t.Fatalf("wrong output\ngot:\n%s\nwant:\n%s", actual, expected) + } + +} + +func TestVersion_flags(t *testing.T) { + ui := new(cli.MockUi) + m := Meta{ + Ui: ui, + } + + // `tofu version` + c := &VersionCommand{ + Meta: m, + Version: "4.5.6", + VersionPrerelease: "foo", + Platform: getproviders.Platform{OS: "aros", Arch: "riscv64"}, + } + + if code := c.Run([]string{"-v", "-version"}); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + actual := strings.TrimSpace(ui.OutputWriter.String()) + expected := "OpenTofu v4.5.6-foo\non aros_riscv64" + if actual != expected { + t.Fatalf("wrong output\ngot: %#v\nwant: %#v", actual, expected) + } +} + +func TestVersion_json(t *testing.T) { + td := t.TempDir() + defer testChdir(t, td)() + + ui := cli.NewMockUi() + meta := Meta{ + Ui: ui, + } + + // `tofu version -json` without prerelease + c := &VersionCommand{ + Meta: meta, + Version: "4.5.6", + Platform: getproviders.Platform{OS: "aros", Arch: "riscv64"}, + } + if code := c.Run([]string{"-json"}); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + actual := strings.TrimSpace(ui.OutputWriter.String()) + expected := strings.TrimSpace(` +{ + "terraform_version": "4.5.6", + "platform": "aros_riscv64", + "provider_selections": {} +} +`) + if diff := cmp.Diff(expected, actual); diff != "" { + t.Fatalf("wrong output\n%s", diff) + } + + // flush the output from the mock ui + ui.OutputWriter.Reset() + + // Now we'll create a fixed dependency lock file in our working directory + // so we can verify that the version command shows the information + // from it. + locks := depsfile.NewLocks() + locks.SetProvider( + addrs.NewDefaultProvider("test2"), + getproviders.MustParseVersion("1.2.3"), + nil, + nil, + ) + locks.SetProvider( + addrs.NewDefaultProvider("test1"), + getproviders.MustParseVersion("7.8.9-beta.2"), + nil, + nil, + ) + + // `tofu version -json` with prerelease and provider dependencies + c = &VersionCommand{ + Meta: meta, + Version: "4.5.6", + VersionPrerelease: "foo", + Platform: getproviders.Platform{OS: "aros", Arch: "riscv64"}, + } + if err := c.replaceLockedDependencies(locks); err != nil { + t.Fatal(err) + } + if code := c.Run([]string{"-json"}); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + actual = strings.TrimSpace(ui.OutputWriter.String()) + expected = strings.TrimSpace(` +{ + "terraform_version": "4.5.6-foo", + "platform": "aros_riscv64", + "provider_selections": { + "registry.opentofu.org/hashicorp/test1": "7.8.9-beta.2", + "registry.opentofu.org/hashicorp/test2": "1.2.3" + } +} +`) + if diff := cmp.Diff(expected, actual); diff != "" { + t.Fatalf("wrong output\n%s", diff) + } + +} diff --git a/pkg/command/views/apply.go b/pkg/command/views/apply.go new file mode 100644 index 00000000000..6c841b7553f --- /dev/null +++ b/pkg/command/views/apply.go @@ -0,0 +1,176 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package views + +import ( + "fmt" + + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/command/format" + "github.com/kubegems/opentofu/pkg/command/views/json" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" +) + +// The Apply view is used for the apply command. +type Apply interface { + ResourceCount(stateOutPath string) + Outputs(outputValues map[string]*states.OutputValue) + + Operation() Operation + Hooks() []tofu.Hook + + Diagnostics(diags tfdiags.Diagnostics) + HelpPrompt() +} + +// NewApply returns an initialized Apply implementation for the given ViewType. +func NewApply(vt arguments.ViewType, destroy bool, view *View) Apply { + switch vt { + case arguments.ViewJSON: + return &ApplyJSON{ + view: NewJSONView(view), + destroy: destroy, + countHook: &countHook{}, + } + case arguments.ViewHuman: + return &ApplyHuman{ + view: view, + destroy: destroy, + inAutomation: view.RunningInAutomation(), + countHook: &countHook{}, + } + default: + panic(fmt.Sprintf("unknown view type %v", vt)) + } +} + +// The ApplyHuman implementation renders human-readable text logs, suitable for +// a scrolling terminal. +type ApplyHuman struct { + view *View + + destroy bool + inAutomation bool + + countHook *countHook +} + +var _ Apply = (*ApplyHuman)(nil) + +func (v *ApplyHuman) ResourceCount(stateOutPath string) { + if v.destroy { + v.view.streams.Printf( + v.view.colorize.Color("[reset][bold][green]\nDestroy complete! Resources: %d destroyed.\n"), + v.countHook.Removed, + ) + } else if v.countHook.Imported > 0 { + v.view.streams.Printf( + v.view.colorize.Color("[reset][bold][green]\nApply complete! Resources: %d imported, %d added, %d changed, %d destroyed.\n"), + v.countHook.Imported, + v.countHook.Added, + v.countHook.Changed, + v.countHook.Removed, + ) + } else { + v.view.streams.Printf( + v.view.colorize.Color("[reset][bold][green]\nApply complete! Resources: %d added, %d changed, %d destroyed.\n"), + v.countHook.Added, + v.countHook.Changed, + v.countHook.Removed, + ) + } + if (v.countHook.Added > 0 || v.countHook.Changed > 0) && stateOutPath != "" { + v.view.streams.Printf("\n%s\n\n", format.WordWrap(stateOutPathPostApply, v.view.outputColumns())) + v.view.streams.Printf("State path: %s\n", stateOutPath) + } +} + +func (v *ApplyHuman) Outputs(outputValues map[string]*states.OutputValue) { + if len(outputValues) > 0 { + v.view.streams.Print(v.view.colorize.Color("[reset][bold][green]\nOutputs:\n\n")) + NewOutput(arguments.ViewHuman, v.view).Output("", outputValues) + } +} + +func (v *ApplyHuman) Operation() Operation { + return NewOperation(arguments.ViewHuman, v.inAutomation, v.view) +} + +func (v *ApplyHuman) Hooks() []tofu.Hook { + return []tofu.Hook{ + v.countHook, + NewUiHook(v.view), + } +} + +func (v *ApplyHuman) Diagnostics(diags tfdiags.Diagnostics) { + v.view.Diagnostics(diags) +} + +func (v *ApplyHuman) HelpPrompt() { + command := "apply" + if v.destroy { + command = "destroy" + } + v.view.HelpPrompt(command) +} + +const stateOutPathPostApply = "The state of your infrastructure has been saved to the path below. This state is required to modify and destroy your infrastructure, so keep it safe. To inspect the complete state use the `tofu show` command." + +// The ApplyJSON implementation renders streaming JSON logs, suitable for +// integrating with other software. +type ApplyJSON struct { + view *JSONView + + destroy bool + + countHook *countHook +} + +var _ Apply = (*ApplyJSON)(nil) + +func (v *ApplyJSON) ResourceCount(stateOutPath string) { + operation := json.OperationApplied + if v.destroy { + operation = json.OperationDestroyed + } + v.view.ChangeSummary(&json.ChangeSummary{ + Add: v.countHook.Added, + Change: v.countHook.Changed, + Remove: v.countHook.Removed, + Import: v.countHook.Imported, + Operation: operation, + }) +} + +func (v *ApplyJSON) Outputs(outputValues map[string]*states.OutputValue) { + outputs, diags := json.OutputsFromMap(outputValues) + if diags.HasErrors() { + v.Diagnostics(diags) + } else { + v.view.Outputs(outputs) + } +} + +func (v *ApplyJSON) Operation() Operation { + return &OperationJSON{view: v.view} +} + +func (v *ApplyJSON) Hooks() []tofu.Hook { + return []tofu.Hook{ + v.countHook, + newJSONHook(v.view), + } +} + +func (v *ApplyJSON) Diagnostics(diags tfdiags.Diagnostics) { + v.view.Diagnostics(diags) +} + +func (v *ApplyJSON) HelpPrompt() { +} diff --git a/pkg/command/views/apply_test.go b/pkg/command/views/apply_test.go new file mode 100644 index 00000000000..b48b1722a2f --- /dev/null +++ b/pkg/command/views/apply_test.go @@ -0,0 +1,272 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package views + +import ( + "fmt" + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/terminal" + "github.com/zclconf/go-cty/cty" +) + +// This test is mostly because I am paranoid about having two consecutive +// boolean arguments. +func TestApply_new(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + defer done(t) + v := NewApply(arguments.ViewHuman, false, NewView(streams).SetRunningInAutomation(true)) + hv, ok := v.(*ApplyHuman) + if !ok { + t.Fatalf("unexpected return type %t", v) + } + + if hv.destroy != false { + t.Fatalf("unexpected destroy value") + } + + if hv.inAutomation != true { + t.Fatalf("unexpected inAutomation value") + } +} + +// Basic test coverage of Outputs, since most of its functionality is tested +// elsewhere. +func TestApplyHuman_outputs(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := NewApply(arguments.ViewHuman, false, NewView(streams)) + + v.Outputs(map[string]*states.OutputValue{ + "foo": {Value: cty.StringVal("secret")}, + }) + + got := done(t).Stdout() + for _, want := range []string{"Outputs:", `foo = "secret"`} { + if !strings.Contains(got, want) { + t.Errorf("wrong result\ngot: %q\nwant: %q", got, want) + } + } +} + +// Outputs should do nothing if there are no outputs to render. +func TestApplyHuman_outputsEmpty(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := NewApply(arguments.ViewHuman, false, NewView(streams)) + + v.Outputs(map[string]*states.OutputValue{}) + + got := done(t).Stdout() + if got != "" { + t.Errorf("output should be empty, but got: %q", got) + } +} + +// Ensure that the correct view type and in-automation settings propagate to the +// Operation view. +func TestApplyHuman_operation(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + defer done(t) + v := NewApply(arguments.ViewHuman, false, NewView(streams).SetRunningInAutomation(true)).Operation() + if hv, ok := v.(*OperationHuman); !ok { + t.Fatalf("unexpected return type %t", v) + } else if hv.inAutomation != true { + t.Fatalf("unexpected inAutomation value on Operation view") + } +} + +// This view is used for both apply and destroy commands, so the help output +// needs to cover both. +func TestApplyHuman_help(t *testing.T) { + testCases := map[string]bool{ + "apply": false, + "destroy": true, + } + + for name, destroy := range testCases { + t.Run(name, func(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := NewApply(arguments.ViewHuman, destroy, NewView(streams)) + v.HelpPrompt() + got := done(t).Stderr() + if !strings.Contains(got, name) { + t.Errorf("wrong result\ngot: %q\nwant: %q", got, name) + } + }) + } +} + +// Hooks and ResourceCount are tangled up and easiest to test together. +func TestApply_resourceCount(t *testing.T) { + testCases := map[string]struct { + destroy bool + want string + importing bool + }{ + "apply": { + false, + "Apply complete! Resources: 1 added, 2 changed, 3 destroyed.", + false, + }, + "destroy": { + true, + "Destroy complete! Resources: 3 destroyed.", + false, + }, + "import": { + false, + "Apply complete! Resources: 1 imported, 1 added, 2 changed, 3 destroyed.", + true, + }, + } + + // For compatibility reasons, these tests should hold true for both human + // and JSON output modes + views := []arguments.ViewType{arguments.ViewHuman, arguments.ViewJSON} + + for name, tc := range testCases { + for _, viewType := range views { + t.Run(fmt.Sprintf("%s (%s view)", name, viewType), func(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := NewApply(viewType, tc.destroy, NewView(streams)) + hooks := v.Hooks() + + var count *countHook + for _, hook := range hooks { + if ch, ok := hook.(*countHook); ok { + count = ch + } + } + if count == nil { + t.Fatalf("expected Hooks to include a countHook: %#v", hooks) + } + + count.Added = 1 + count.Changed = 2 + count.Removed = 3 + + if tc.importing { + count.Imported = 1 + } + + v.ResourceCount("") + + got := done(t).Stdout() + if !strings.Contains(got, tc.want) { + t.Errorf("wrong result\ngot: %q\nwant: %q", got, tc.want) + } + }) + } + } +} + +func TestApplyHuman_resourceCountStatePath(t *testing.T) { + testCases := map[string]struct { + added int + changed int + removed int + statePath string + wantContains bool + }{ + "default state path": { + added: 1, + changed: 2, + removed: 3, + statePath: "", + wantContains: false, + }, + "only removed": { + added: 0, + changed: 0, + removed: 5, + statePath: "foo.tfstate", + wantContains: false, + }, + "added": { + added: 5, + changed: 0, + removed: 0, + statePath: "foo.tfstate", + wantContains: true, + }, + "changed": { + added: 0, + changed: 5, + removed: 0, + statePath: "foo.tfstate", + wantContains: true, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := NewApply(arguments.ViewHuman, false, NewView(streams)) + hooks := v.Hooks() + + var count *countHook + for _, hook := range hooks { + if ch, ok := hook.(*countHook); ok { + count = ch + } + } + if count == nil { + t.Fatalf("expected Hooks to include a countHook: %#v", hooks) + } + + count.Added = tc.added + count.Changed = tc.changed + count.Removed = tc.removed + + v.ResourceCount(tc.statePath) + + got := done(t).Stdout() + want := "State path: " + tc.statePath + contains := strings.Contains(got, want) + if contains && !tc.wantContains { + t.Errorf("wrong result\ngot: %q\nshould not contain: %q", got, want) + } else if !contains && tc.wantContains { + t.Errorf("wrong result\ngot: %q\nshould contain: %q", got, want) + } + }) + } +} + +// Basic test coverage of Outputs, since most of its functionality is tested +// elsewhere. +func TestApplyJSON_outputs(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := NewApply(arguments.ViewJSON, false, NewView(streams)) + + v.Outputs(map[string]*states.OutputValue{ + "boop_count": {Value: cty.NumberIntVal(92)}, + "password": {Value: cty.StringVal("horse-battery").Mark(marks.Sensitive), Sensitive: true}, + }) + + want := []map[string]interface{}{ + { + "@level": "info", + "@message": "Outputs: 2", + "@module": "tofu.ui", + "type": "outputs", + "outputs": map[string]interface{}{ + "boop_count": map[string]interface{}{ + "sensitive": false, + "value": float64(92), + "type": "number", + }, + "password": map[string]interface{}{ + "sensitive": true, + "type": "string", + }, + }, + }, + } + testJSONViewOutputEquals(t, done(t).Stdout(), want) +} diff --git a/pkg/command/views/hook_count.go b/pkg/command/views/hook_count.go new file mode 100644 index 00000000000..50e40a33938 --- /dev/null +++ b/pkg/command/views/hook_count.go @@ -0,0 +1,121 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package views + +import ( + "sync" + + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tofu" +) + +// countHook is a hook that counts the number of resources +// added, removed, changed during the course of an apply. +type countHook struct { + Added int + Changed int + Removed int + Imported int + + ToAdd int + ToChange int + ToRemove int + ToRemoveAndAdd int + + sync.Mutex + pending map[string]plans.Action + + tofu.NilHook +} + +var _ tofu.Hook = (*countHook)(nil) + +func (h *countHook) Reset() { + h.Lock() + defer h.Unlock() + + h.pending = nil + h.Added = 0 + h.Changed = 0 + h.Removed = 0 + h.Imported = 0 +} + +func (h *countHook) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (tofu.HookAction, error) { + h.Lock() + defer h.Unlock() + + if h.pending == nil { + h.pending = make(map[string]plans.Action) + } + + h.pending[addr.String()] = action + + return tofu.HookActionContinue, nil +} + +func (h *countHook) PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (tofu.HookAction, error) { + h.Lock() + defer h.Unlock() + + if h.pending != nil { + pendingKey := addr.String() + if action, ok := h.pending[pendingKey]; ok { + delete(h.pending, pendingKey) + + if err == nil { + switch action { + case plans.CreateThenDelete, plans.DeleteThenCreate: + h.Added++ + h.Removed++ + case plans.Create: + h.Added++ + case plans.Delete: + h.Removed++ + case plans.Update: + h.Changed++ + } + } + } + } + + return tofu.HookActionContinue, nil +} + +func (h *countHook) PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (tofu.HookAction, error) { + h.Lock() + defer h.Unlock() + + // We don't count anything for data resources + if addr.Resource.Resource.Mode == addrs.DataResourceMode { + return tofu.HookActionContinue, nil + } + + switch action { + case plans.CreateThenDelete, plans.DeleteThenCreate: + h.ToRemoveAndAdd += 1 + case plans.Create: + h.ToAdd += 1 + case plans.Delete: + h.ToRemove += 1 + case plans.Update: + h.ToChange += 1 + } + + return tofu.HookActionContinue, nil +} + +func (h *countHook) PostApplyImport(addr addrs.AbsResourceInstance, importing plans.ImportingSrc) (tofu.HookAction, error) { + h.Lock() + defer h.Unlock() + + h.Imported++ + return tofu.HookActionContinue, nil +} diff --git a/pkg/command/views/hook_count_test.go b/pkg/command/views/hook_count_test.go new file mode 100644 index 00000000000..45b75ff1c66 --- /dev/null +++ b/pkg/command/views/hook_count_test.go @@ -0,0 +1,342 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package views + +import ( + "reflect" + "testing" + + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tofu" + + legacy "github.com/kubegems/opentofu/pkg/legacy/tofu" +) + +func TestCountHook_impl(t *testing.T) { + var _ tofu.Hook = new(countHook) +} + +func TestCountHookPostDiff_DestroyDeposed(t *testing.T) { + h := new(countHook) + + resources := map[string]*legacy.InstanceDiff{ + "lorem": &legacy.InstanceDiff{DestroyDeposed: true}, + } + + for k := range resources { + addr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: k, + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + + h.PostDiff(addr, states.DeposedKey("deadbeef"), plans.Delete, cty.DynamicVal, cty.DynamicVal) + } + + expected := new(countHook) + expected.ToAdd = 0 + expected.ToChange = 0 + expected.ToRemoveAndAdd = 0 + expected.ToRemove = 1 + + if !reflect.DeepEqual(expected, h) { + t.Fatalf("Expected %#v, got %#v instead.", expected, h) + } +} + +func TestCountHookPostDiff_DestroyOnly(t *testing.T) { + h := new(countHook) + + resources := map[string]*legacy.InstanceDiff{ + "foo": &legacy.InstanceDiff{Destroy: true}, + "bar": &legacy.InstanceDiff{Destroy: true}, + "lorem": &legacy.InstanceDiff{Destroy: true}, + "ipsum": &legacy.InstanceDiff{Destroy: true}, + } + + for k := range resources { + addr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: k, + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + + h.PostDiff(addr, states.CurrentGen, plans.Delete, cty.DynamicVal, cty.DynamicVal) + } + + expected := new(countHook) + expected.ToAdd = 0 + expected.ToChange = 0 + expected.ToRemoveAndAdd = 0 + expected.ToRemove = 4 + + if !reflect.DeepEqual(expected, h) { + t.Fatalf("Expected %#v, got %#v instead.", expected, h) + } +} + +func TestCountHookPostDiff_AddOnly(t *testing.T) { + h := new(countHook) + + resources := map[string]*legacy.InstanceDiff{ + "foo": &legacy.InstanceDiff{ + Attributes: map[string]*legacy.ResourceAttrDiff{ + "foo": &legacy.ResourceAttrDiff{RequiresNew: true}, + }, + }, + "bar": &legacy.InstanceDiff{ + Attributes: map[string]*legacy.ResourceAttrDiff{ + "foo": &legacy.ResourceAttrDiff{RequiresNew: true}, + }, + }, + "lorem": &legacy.InstanceDiff{ + Attributes: map[string]*legacy.ResourceAttrDiff{ + "foo": &legacy.ResourceAttrDiff{RequiresNew: true}, + }, + }, + } + + for k := range resources { + addr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: k, + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + + h.PostDiff(addr, states.CurrentGen, plans.Create, cty.DynamicVal, cty.DynamicVal) + } + + expected := new(countHook) + expected.ToAdd = 3 + expected.ToChange = 0 + expected.ToRemoveAndAdd = 0 + expected.ToRemove = 0 + + if !reflect.DeepEqual(expected, h) { + t.Fatalf("Expected %#v, got %#v instead.", expected, h) + } +} + +func TestCountHookPostDiff_ChangeOnly(t *testing.T) { + h := new(countHook) + + resources := map[string]*legacy.InstanceDiff{ + "foo": &legacy.InstanceDiff{ + Destroy: false, + Attributes: map[string]*legacy.ResourceAttrDiff{ + "foo": &legacy.ResourceAttrDiff{}, + }, + }, + "bar": &legacy.InstanceDiff{ + Destroy: false, + Attributes: map[string]*legacy.ResourceAttrDiff{ + "foo": &legacy.ResourceAttrDiff{}, + }, + }, + "lorem": &legacy.InstanceDiff{ + Destroy: false, + Attributes: map[string]*legacy.ResourceAttrDiff{ + "foo": &legacy.ResourceAttrDiff{}, + }, + }, + } + + for k := range resources { + addr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: k, + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + + h.PostDiff(addr, states.CurrentGen, plans.Update, cty.DynamicVal, cty.DynamicVal) + } + + expected := new(countHook) + expected.ToAdd = 0 + expected.ToChange = 3 + expected.ToRemoveAndAdd = 0 + expected.ToRemove = 0 + + if !reflect.DeepEqual(expected, h) { + t.Fatalf("Expected %#v, got %#v instead.", expected, h) + } +} + +func TestCountHookPostDiff_Mixed(t *testing.T) { + h := new(countHook) + + resources := map[string]plans.Action{ + "foo": plans.Delete, + "bar": plans.NoOp, + "lorem": plans.Update, + "ipsum": plans.Delete, + } + + for k, a := range resources { + addr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: k, + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + + h.PostDiff(addr, states.CurrentGen, a, cty.DynamicVal, cty.DynamicVal) + } + + expected := new(countHook) + expected.ToAdd = 0 + expected.ToChange = 1 + expected.ToRemoveAndAdd = 0 + expected.ToRemove = 2 + + if !reflect.DeepEqual(expected, h) { + t.Fatalf("Expected %#v, got %#v instead.", + expected, h) + } +} + +func TestCountHookPostDiff_NoChange(t *testing.T) { + h := new(countHook) + + resources := map[string]*legacy.InstanceDiff{ + "foo": &legacy.InstanceDiff{}, + "bar": &legacy.InstanceDiff{}, + "lorem": &legacy.InstanceDiff{}, + "ipsum": &legacy.InstanceDiff{}, + } + + for k := range resources { + addr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: k, + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + + h.PostDiff(addr, states.CurrentGen, plans.NoOp, cty.DynamicVal, cty.DynamicVal) + } + + expected := new(countHook) + expected.ToAdd = 0 + expected.ToChange = 0 + expected.ToRemoveAndAdd = 0 + expected.ToRemove = 0 + + if !reflect.DeepEqual(expected, h) { + t.Fatalf("Expected %#v, got %#v instead.", + expected, h) + } +} + +func TestCountHookPostDiff_DataSource(t *testing.T) { + h := new(countHook) + + resources := map[string]plans.Action{ + "foo": plans.Delete, + "bar": plans.NoOp, + "lorem": plans.Update, + "ipsum": plans.Delete, + } + + for k, a := range resources { + addr := addrs.Resource{ + Mode: addrs.DataResourceMode, + Type: "test_instance", + Name: k, + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + + h.PostDiff(addr, states.CurrentGen, a, cty.DynamicVal, cty.DynamicVal) + } + + expected := new(countHook) + expected.ToAdd = 0 + expected.ToChange = 0 + expected.ToRemoveAndAdd = 0 + expected.ToRemove = 0 + + if !reflect.DeepEqual(expected, h) { + t.Fatalf("Expected %#v, got %#v instead.", + expected, h) + } +} + +func TestCountHookApply_ChangeOnly(t *testing.T) { + h := new(countHook) + + resources := map[string]*legacy.InstanceDiff{ + "foo": &legacy.InstanceDiff{ + Destroy: false, + Attributes: map[string]*legacy.ResourceAttrDiff{ + "foo": &legacy.ResourceAttrDiff{}, + }, + }, + "bar": &legacy.InstanceDiff{ + Destroy: false, + Attributes: map[string]*legacy.ResourceAttrDiff{ + "foo": &legacy.ResourceAttrDiff{}, + }, + }, + "lorem": &legacy.InstanceDiff{ + Destroy: false, + Attributes: map[string]*legacy.ResourceAttrDiff{ + "foo": &legacy.ResourceAttrDiff{}, + }, + }, + } + + for k := range resources { + addr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: k, + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + + h.PreApply(addr, states.CurrentGen, plans.Update, cty.DynamicVal, cty.DynamicVal) + h.PostApply(addr, states.CurrentGen, cty.DynamicVal, nil) + } + + expected := &countHook{pending: make(map[string]plans.Action)} + expected.Added = 0 + expected.Changed = 3 + expected.Removed = 0 + + if !reflect.DeepEqual(expected, h) { + t.Fatalf("Expected:\n%#v\nGot:\n%#v\n", expected, h) + } +} + +func TestCountHookApply_DestroyOnly(t *testing.T) { + h := new(countHook) + + resources := map[string]*legacy.InstanceDiff{ + "foo": &legacy.InstanceDiff{Destroy: true}, + "bar": &legacy.InstanceDiff{Destroy: true}, + "lorem": &legacy.InstanceDiff{Destroy: true}, + "ipsum": &legacy.InstanceDiff{Destroy: true}, + } + + for k := range resources { + addr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: k, + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + + h.PreApply(addr, states.CurrentGen, plans.Delete, cty.DynamicVal, cty.DynamicVal) + h.PostApply(addr, states.CurrentGen, cty.DynamicVal, nil) + } + + expected := &countHook{pending: make(map[string]plans.Action)} + expected.Added = 0 + expected.Changed = 0 + expected.Removed = 4 + + if !reflect.DeepEqual(expected, h) { + t.Fatalf("Expected:\n%#v\nGot:\n%#v\n", expected, h) + } +} diff --git a/pkg/command/views/hook_json.go b/pkg/command/views/hook_json.go new file mode 100644 index 00000000000..bd0f51fa8b8 --- /dev/null +++ b/pkg/command/views/hook_json.go @@ -0,0 +1,176 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package views + +import ( + "bufio" + "strings" + "sync" + "time" + "unicode" + + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/command/format" + "github.com/kubegems/opentofu/pkg/command/views/json" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tofu" +) + +// How long to wait between sending heartbeat/progress messages +const heartbeatInterval = 10 * time.Second + +func newJSONHook(view *JSONView) *jsonHook { + return &jsonHook{ + view: view, + applying: make(map[string]applyProgress), + timeNow: time.Now, + timeAfter: time.After, + } +} + +type jsonHook struct { + tofu.NilHook + + view *JSONView + + applyingLock sync.Mutex + // Concurrent map of resource addresses to allow the sequence of pre-apply, + // progress, and post-apply messages to share data about the resource + applying map[string]applyProgress + + // Mockable functions for testing the progress timer goroutine + timeNow func() time.Time + timeAfter func(time.Duration) <-chan time.Time +} + +var _ tofu.Hook = (*jsonHook)(nil) + +type applyProgress struct { + addr addrs.AbsResourceInstance + action plans.Action + start time.Time + + // done is used for post-apply to stop the progress goroutine + done chan struct{} + + // heartbeatDone is used to allow tests to safely wait for the progress + // goroutine to finish + heartbeatDone chan struct{} + + // elapsed is used to allow tests to safely check for heartbeat executions + elapsed chan time.Duration +} + +func (h *jsonHook) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (tofu.HookAction, error) { + if action != plans.NoOp { + idKey, idValue := format.ObjectValueIDOrName(priorState) + h.view.Hook(json.NewApplyStart(addr, action, idKey, idValue)) + } + + progress := applyProgress{ + addr: addr, + action: action, + start: h.timeNow().Round(time.Second), + elapsed: make(chan time.Duration), + done: make(chan struct{}), + heartbeatDone: make(chan struct{}), + } + h.applyingLock.Lock() + h.applying[addr.String()] = progress + h.applyingLock.Unlock() + + if action != plans.NoOp { + go h.applyingHeartbeat(progress) + } + return tofu.HookActionContinue, nil +} + +func (h *jsonHook) applyingHeartbeat(progress applyProgress) { + defer close(progress.heartbeatDone) + defer close(progress.elapsed) + for { + select { + case <-progress.done: + return + case <-h.timeAfter(heartbeatInterval): + } + + elapsed := h.timeNow().Round(time.Second).Sub(progress.start) + h.view.Hook(json.NewApplyProgress(progress.addr, progress.action, elapsed)) + progress.elapsed <- elapsed + } +} + +func (h *jsonHook) PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (tofu.HookAction, error) { + key := addr.String() + h.applyingLock.Lock() + progress := h.applying[key] + if progress.done != nil { + close(progress.done) + } + delete(h.applying, key) + h.applyingLock.Unlock() + + if progress.action == plans.NoOp { + return tofu.HookActionContinue, nil + } + + elapsed := h.timeNow().Round(time.Second).Sub(progress.start) + + if err != nil { + // Errors are collected and displayed post-apply, so no need to + // re-render them here. Instead just signal that this resource failed + // to apply. + h.view.Hook(json.NewApplyErrored(addr, progress.action, elapsed)) + } else { + idKey, idValue := format.ObjectValueID(newState) + h.view.Hook(json.NewApplyComplete(addr, progress.action, idKey, idValue, elapsed)) + } + return tofu.HookActionContinue, nil +} + +func (h *jsonHook) PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (tofu.HookAction, error) { + h.view.Hook(json.NewProvisionStart(addr, typeName)) + return tofu.HookActionContinue, nil +} + +func (h *jsonHook) PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (tofu.HookAction, error) { + if err != nil { + // Errors are collected and displayed post-apply, so no need to + // re-render them here. Instead just signal that this provisioner step + // failed. + h.view.Hook(json.NewProvisionErrored(addr, typeName)) + } else { + h.view.Hook(json.NewProvisionComplete(addr, typeName)) + } + return tofu.HookActionContinue, nil +} + +func (h *jsonHook) ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, msg string) { + s := bufio.NewScanner(strings.NewReader(msg)) + s.Split(scanLines) + for s.Scan() { + line := strings.TrimRightFunc(s.Text(), unicode.IsSpace) + if line != "" { + h.view.Hook(json.NewProvisionProgress(addr, typeName, line)) + } + } +} + +func (h *jsonHook) PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (tofu.HookAction, error) { + idKey, idValue := format.ObjectValueID(priorState) + h.view.Hook(json.NewRefreshStart(addr, idKey, idValue)) + return tofu.HookActionContinue, nil +} + +func (h *jsonHook) PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (tofu.HookAction, error) { + idKey, idValue := format.ObjectValueID(newState) + h.view.Hook(json.NewRefreshComplete(addr, idKey, idValue)) + return tofu.HookActionContinue, nil +} diff --git a/pkg/command/views/hook_json_test.go b/pkg/command/views/hook_json_test.go new file mode 100644 index 00000000000..583b44bf735 --- /dev/null +++ b/pkg/command/views/hook_json_test.go @@ -0,0 +1,359 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package views + +import ( + "fmt" + "sync" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/terminal" + "github.com/kubegems/opentofu/pkg/tofu" + "github.com/zclconf/go-cty/cty" +) + +// Test a sequence of hooks associated with creating a resource +func TestJSONHook_create(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + hook := newJSONHook(NewJSONView(NewView(streams))) + + var nowMu sync.Mutex + now := time.Now() + hook.timeNow = func() time.Time { + nowMu.Lock() + defer nowMu.Unlock() + return now + } + + after := make(chan time.Time, 1) + hook.timeAfter = func(time.Duration) <-chan time.Time { return after } + + addr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "boop", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + priorState := cty.NullVal(cty.Object(map[string]cty.Type{ + "id": cty.String, + "bar": cty.List(cty.String), + })) + plannedNewState := cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("test"), + "bar": cty.ListVal([]cty.Value{ + cty.StringVal("baz"), + }), + }) + + action, err := hook.PreApply(addr, states.CurrentGen, plans.Create, priorState, plannedNewState) + testHookReturnValues(t, action, err) + + action, err = hook.PreProvisionInstanceStep(addr, "local-exec") + testHookReturnValues(t, action, err) + + hook.ProvisionOutput(addr, "local-exec", `Executing: ["/bin/sh" "-c" "touch /etc/motd"]`) + + action, err = hook.PostProvisionInstanceStep(addr, "local-exec", nil) + testHookReturnValues(t, action, err) + + elapsedChan := hook.applying[addr.String()].elapsed + + // Travel 10s into the future, notify the progress goroutine, and wait + // for execution via 'elapsed' progress + nowMu.Lock() + now = now.Add(10 * time.Second) + after <- now + nowMu.Unlock() + elapsed := <-elapsedChan + testDurationEqual(t, 10*time.Second, elapsed) + + // Travel 10s into the future, notify the progress goroutine, and wait + // for execution via 'elapsed' progress + nowMu.Lock() + now = now.Add(10 * time.Second) + after <- now + nowMu.Unlock() + elapsed = <-elapsedChan + testDurationEqual(t, 20*time.Second, elapsed) + + // Travel 2s into the future. We have arrived! + nowMu.Lock() + now = now.Add(2 * time.Second) + nowMu.Unlock() + + action, err = hook.PostApply(addr, states.CurrentGen, plannedNewState, nil) + testHookReturnValues(t, action, err) + + // Shut down the progress goroutine if still active + hook.applyingLock.Lock() + for key, progress := range hook.applying { + close(progress.done) + close(progress.elapsed) + <-progress.heartbeatDone + delete(hook.applying, key) + } + hook.applyingLock.Unlock() + + wantResource := map[string]interface{}{ + "addr": string("test_instance.boop"), + "implied_provider": string("test"), + "module": string(""), + "resource": string("test_instance.boop"), + "resource_key": nil, + "resource_name": string("boop"), + "resource_type": string("test_instance"), + } + want := []map[string]interface{}{ + { + "@level": "info", + "@message": "test_instance.boop: Creating...", + "@module": "tofu.ui", + "type": "apply_start", + "hook": map[string]interface{}{ + "action": string("create"), + "resource": wantResource, + }, + }, + { + "@level": "info", + "@message": "test_instance.boop: Provisioning with 'local-exec'...", + "@module": "tofu.ui", + "type": "provision_start", + "hook": map[string]interface{}{ + "provisioner": "local-exec", + "resource": wantResource, + }, + }, + { + "@level": "info", + "@message": `test_instance.boop: (local-exec): Executing: ["/bin/sh" "-c" "touch /etc/motd"]`, + "@module": "tofu.ui", + "type": "provision_progress", + "hook": map[string]interface{}{ + "output": `Executing: ["/bin/sh" "-c" "touch /etc/motd"]`, + "provisioner": "local-exec", + "resource": wantResource, + }, + }, + { + "@level": "info", + "@message": "test_instance.boop: (local-exec) Provisioning complete", + "@module": "tofu.ui", + "type": "provision_complete", + "hook": map[string]interface{}{ + "provisioner": "local-exec", + "resource": wantResource, + }, + }, + { + "@level": "info", + "@message": "test_instance.boop: Still creating... [10s elapsed]", + "@module": "tofu.ui", + "type": "apply_progress", + "hook": map[string]interface{}{ + "action": string("create"), + "elapsed_seconds": float64(10), + "resource": wantResource, + }, + }, + { + "@level": "info", + "@message": "test_instance.boop: Still creating... [20s elapsed]", + "@module": "tofu.ui", + "type": "apply_progress", + "hook": map[string]interface{}{ + "action": string("create"), + "elapsed_seconds": float64(20), + "resource": wantResource, + }, + }, + { + "@level": "info", + "@message": "test_instance.boop: Creation complete after 22s [id=test]", + "@module": "tofu.ui", + "type": "apply_complete", + "hook": map[string]interface{}{ + "action": string("create"), + "elapsed_seconds": float64(22), + "id_key": "id", + "id_value": "test", + "resource": wantResource, + }, + }, + } + + testJSONViewOutputEquals(t, done(t).Stdout(), want) +} + +func TestJSONHook_errors(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + hook := newJSONHook(NewJSONView(NewView(streams))) + + addr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "boop", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + priorState := cty.NullVal(cty.Object(map[string]cty.Type{ + "id": cty.String, + "bar": cty.List(cty.String), + })) + plannedNewState := cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("test"), + "bar": cty.ListVal([]cty.Value{ + cty.StringVal("baz"), + }), + }) + + action, err := hook.PreApply(addr, states.CurrentGen, plans.Delete, priorState, plannedNewState) + testHookReturnValues(t, action, err) + + provisionError := fmt.Errorf("provisioner didn't want to") + action, err = hook.PostProvisionInstanceStep(addr, "local-exec", provisionError) + testHookReturnValues(t, action, err) + + applyError := fmt.Errorf("provider was sad") + action, err = hook.PostApply(addr, states.CurrentGen, plannedNewState, applyError) + testHookReturnValues(t, action, err) + + // Shut down the progress goroutine + hook.applyingLock.Lock() + for key, progress := range hook.applying { + close(progress.done) + close(progress.elapsed) + <-progress.heartbeatDone + delete(hook.applying, key) + } + hook.applyingLock.Unlock() + + wantResource := map[string]interface{}{ + "addr": string("test_instance.boop"), + "implied_provider": string("test"), + "module": string(""), + "resource": string("test_instance.boop"), + "resource_key": nil, + "resource_name": string("boop"), + "resource_type": string("test_instance"), + } + want := []map[string]interface{}{ + { + "@level": "info", + "@message": "test_instance.boop: Destroying...", + "@module": "tofu.ui", + "type": "apply_start", + "hook": map[string]interface{}{ + "action": string("delete"), + "resource": wantResource, + }, + }, + { + "@level": "info", + "@message": "test_instance.boop: (local-exec) Provisioning errored", + "@module": "tofu.ui", + "type": "provision_errored", + "hook": map[string]interface{}{ + "provisioner": "local-exec", + "resource": wantResource, + }, + }, + { + "@level": "info", + "@message": "test_instance.boop: Destruction errored after 0s", + "@module": "tofu.ui", + "type": "apply_errored", + "hook": map[string]interface{}{ + "action": string("delete"), + "elapsed_seconds": float64(0), + "resource": wantResource, + }, + }, + } + + testJSONViewOutputEquals(t, done(t).Stdout(), want) +} + +func TestJSONHook_refresh(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + hook := newJSONHook(NewJSONView(NewView(streams))) + + addr := addrs.Resource{ + Mode: addrs.DataResourceMode, + Type: "test_data_source", + Name: "beep", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + state := cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("honk"), + "bar": cty.ListVal([]cty.Value{ + cty.StringVal("baz"), + }), + }) + + action, err := hook.PreRefresh(addr, states.CurrentGen, state) + testHookReturnValues(t, action, err) + + action, err = hook.PostRefresh(addr, states.CurrentGen, state, state) + testHookReturnValues(t, action, err) + + wantResource := map[string]interface{}{ + "addr": string("data.test_data_source.beep"), + "implied_provider": string("test"), + "module": string(""), + "resource": string("data.test_data_source.beep"), + "resource_key": nil, + "resource_name": string("beep"), + "resource_type": string("test_data_source"), + } + want := []map[string]interface{}{ + { + "@level": "info", + "@message": "data.test_data_source.beep: Refreshing state... [id=honk]", + "@module": "tofu.ui", + "type": "refresh_start", + "hook": map[string]interface{}{ + "resource": wantResource, + "id_key": "id", + "id_value": "honk", + }, + }, + { + "@level": "info", + "@message": "data.test_data_source.beep: Refresh complete [id=honk]", + "@module": "tofu.ui", + "type": "refresh_complete", + "hook": map[string]interface{}{ + "resource": wantResource, + "id_key": "id", + "id_value": "honk", + }, + }, + } + + testJSONViewOutputEquals(t, done(t).Stdout(), want) +} + +func testHookReturnValues(t *testing.T, action tofu.HookAction, err error) { + t.Helper() + + if err != nil { + t.Fatal(err) + } + if action != tofu.HookActionContinue { + t.Fatalf("Expected hook to continue, given: %#v", action) + } +} + +func testDurationEqual(t *testing.T, wantedDuration time.Duration, gotDuration time.Duration) { + t.Helper() + + if !cmp.Equal(wantedDuration, gotDuration) { + t.Errorf("unexpected time elapsed:%s\n", cmp.Diff(wantedDuration, gotDuration)) + } +} diff --git a/pkg/command/views/hook_ui.go b/pkg/command/views/hook_ui.go new file mode 100644 index 00000000000..84da9537a6e --- /dev/null +++ b/pkg/command/views/hook_ui.go @@ -0,0 +1,404 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package views + +import ( + "bufio" + "bytes" + "fmt" + "strings" + "sync" + "time" + "unicode" + + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/command/format" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tofu" +) + +const defaultPeriodicUiTimer = 10 * time.Second +const maxIdLen = 80 + +func NewUiHook(view *View) *UiHook { + return &UiHook{ + view: view, + periodicUiTimer: defaultPeriodicUiTimer, + resources: make(map[string]uiResourceState), + } +} + +type UiHook struct { + tofu.NilHook + + viewLock sync.Mutex + view *View + + periodicUiTimer time.Duration + + resourcesLock sync.Mutex + resources map[string]uiResourceState +} + +var _ tofu.Hook = (*UiHook)(nil) + +// uiResourceState tracks the state of a single resource +type uiResourceState struct { + DispAddr string + IDKey, IDValue string + Op uiResourceOp + Start time.Time + + DoneCh chan struct{} // To be used for cancellation + + done chan struct{} // used to coordinate tests +} + +// uiResourceOp is an enum for operations on a resource +type uiResourceOp byte + +const ( + uiResourceUnknown uiResourceOp = iota + uiResourceCreate + uiResourceModify + uiResourceDestroy + uiResourceRead + uiResourceNoOp +) + +func (h *UiHook) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (tofu.HookAction, error) { + dispAddr := addr.String() + if gen != states.CurrentGen { + dispAddr = fmt.Sprintf("%s (deposed object %s)", dispAddr, gen) + } + + var operation string + var op uiResourceOp + idKey, idValue := format.ObjectValueIDOrName(priorState) + switch action { + case plans.Delete: + operation = "Destroying..." + op = uiResourceDestroy + case plans.Create: + operation = "Creating..." + op = uiResourceCreate + case plans.Update: + operation = "Modifying..." + op = uiResourceModify + case plans.Read: + operation = "Reading..." + op = uiResourceRead + case plans.NoOp: + op = uiResourceNoOp + default: + // We don't expect any other actions in here, so anything else is a + // bug in the caller but we'll ignore it in order to be robust. + h.println(fmt.Sprintf("(Unknown action %s for %s)", action, dispAddr)) + return tofu.HookActionContinue, nil + } + + var stateIdSuffix string + if idKey != "" && idValue != "" { + stateIdSuffix = fmt.Sprintf(" [%s=%s]", idKey, idValue) + } else { + // Make sure they are both empty so we can deal with this more + // easily in the other hook methods. + idKey = "" + idValue = "" + } + + if operation != "" { + h.println(fmt.Sprintf( + h.view.colorize.Color("[reset][bold]%s: %s%s[reset]"), + dispAddr, + operation, + stateIdSuffix, + )) + } + + key := addr.String() + uiState := uiResourceState{ + DispAddr: key, + IDKey: idKey, + IDValue: idValue, + Op: op, + Start: time.Now().Round(time.Second), + DoneCh: make(chan struct{}), + done: make(chan struct{}), + } + + h.resourcesLock.Lock() + h.resources[key] = uiState + h.resourcesLock.Unlock() + + // Start goroutine that shows progress + if op != uiResourceNoOp { + go h.stillApplying(uiState) + } + + return tofu.HookActionContinue, nil +} + +func (h *UiHook) stillApplying(state uiResourceState) { + defer close(state.done) + for { + select { + case <-state.DoneCh: + return + + case <-time.After(h.periodicUiTimer): + // Timer up, show status + } + + var msg string + switch state.Op { + case uiResourceModify: + msg = "Still modifying..." + case uiResourceDestroy: + msg = "Still destroying..." + case uiResourceCreate: + msg = "Still creating..." + case uiResourceRead: + msg = "Still reading..." + case uiResourceUnknown: + return + } + + idSuffix := "" + if state.IDKey != "" { + idSuffix = fmt.Sprintf("%s=%s, ", state.IDKey, truncateId(state.IDValue, maxIdLen)) + } + + h.println(fmt.Sprintf( + h.view.colorize.Color("[reset][bold]%s: %s [%s%s elapsed][reset]"), + state.DispAddr, + msg, + idSuffix, + time.Now().Round(time.Second).Sub(state.Start), + )) + } +} + +func (h *UiHook) PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, applyerr error) (tofu.HookAction, error) { + id := addr.String() + + h.resourcesLock.Lock() + state := h.resources[id] + if state.DoneCh != nil { + close(state.DoneCh) + } + + delete(h.resources, id) + h.resourcesLock.Unlock() + + var stateIdSuffix string + if k, v := format.ObjectValueID(newState); k != "" && v != "" { + stateIdSuffix = fmt.Sprintf(" [%s=%s]", k, v) + } + + var msg string + switch state.Op { + case uiResourceModify: + msg = "Modifications complete" + case uiResourceDestroy: + msg = "Destruction complete" + case uiResourceCreate: + msg = "Creation complete" + case uiResourceRead: + msg = "Read complete" + case uiResourceNoOp: + // We don't make any announcements about no-op changes + return tofu.HookActionContinue, nil + case uiResourceUnknown: + return tofu.HookActionContinue, nil + } + + if applyerr != nil { + // Errors are collected and printed in ApplyCommand, no need to duplicate + return tofu.HookActionContinue, nil + } + + addrStr := addr.String() + if depKey, ok := gen.(states.DeposedKey); ok { + addrStr = fmt.Sprintf("%s (deposed object %s)", addrStr, depKey) + } + + colorized := fmt.Sprintf( + h.view.colorize.Color("[reset][bold]%s: %s after %s%s"), + addrStr, msg, time.Now().Round(time.Second).Sub(state.Start), stateIdSuffix) + + h.println(colorized) + + return tofu.HookActionContinue, nil +} + +func (h *UiHook) PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (tofu.HookAction, error) { + h.println(fmt.Sprintf( + h.view.colorize.Color("[reset][bold]%s: Provisioning with '%s'...[reset]"), + addr, typeName, + )) + return tofu.HookActionContinue, nil +} + +func (h *UiHook) ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, msg string) { + var buf bytes.Buffer + + prefix := fmt.Sprintf( + h.view.colorize.Color("[reset][bold]%s (%s):[reset] "), + addr, typeName, + ) + s := bufio.NewScanner(strings.NewReader(msg)) + s.Split(scanLines) + for s.Scan() { + line := strings.TrimRightFunc(s.Text(), unicode.IsSpace) + if line != "" { + buf.WriteString(fmt.Sprintf("%s%s\n", prefix, line)) + } + } + + h.println(strings.TrimSpace(buf.String())) +} + +func (h *UiHook) PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (tofu.HookAction, error) { + var stateIdSuffix string + if k, v := format.ObjectValueID(priorState); k != "" && v != "" { + stateIdSuffix = fmt.Sprintf(" [%s=%s]", k, v) + } + + addrStr := addr.String() + if depKey, ok := gen.(states.DeposedKey); ok { + addrStr = fmt.Sprintf("%s (deposed object %s)", addrStr, depKey) + } + if !h.view.concise { + h.println(fmt.Sprintf( + h.view.colorize.Color("[reset][bold]%s: Refreshing state...%s"), + addrStr, stateIdSuffix)) + } + return tofu.HookActionContinue, nil +} + +func (h *UiHook) PreImportState(addr addrs.AbsResourceInstance, importID string) (tofu.HookAction, error) { + h.println(fmt.Sprintf( + h.view.colorize.Color("[reset][bold]%s: Importing from ID %q..."), + addr, importID, + )) + return tofu.HookActionContinue, nil +} + +func (h *UiHook) PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (tofu.HookAction, error) { + h.println(fmt.Sprintf( + h.view.colorize.Color("[reset][bold][green]%s: Import prepared!"), + addr, + )) + for _, s := range imported { + h.println(fmt.Sprintf( + h.view.colorize.Color("[reset][green] Prepared %s for import"), + s.TypeName, + )) + } + + return tofu.HookActionContinue, nil +} + +func (h *UiHook) PrePlanImport(addr addrs.AbsResourceInstance, importID string) (tofu.HookAction, error) { + h.println(fmt.Sprintf( + h.view.colorize.Color("[reset][bold]%s: Preparing import... [id=%s]"), + addr, importID, + )) + + return tofu.HookActionContinue, nil +} + +func (h *UiHook) PreApplyImport(addr addrs.AbsResourceInstance, importing plans.ImportingSrc) (tofu.HookAction, error) { + h.println(fmt.Sprintf( + h.view.colorize.Color("[reset][bold]%s: Importing... [id=%s]"), + addr, importing.ID, + )) + + return tofu.HookActionContinue, nil +} + +func (h *UiHook) PostApplyImport(addr addrs.AbsResourceInstance, importing plans.ImportingSrc) (tofu.HookAction, error) { + h.println(fmt.Sprintf( + h.view.colorize.Color("[reset][bold]%s: Import complete [id=%s]"), + addr, importing.ID, + )) + + return tofu.HookActionContinue, nil +} + +// Wrap calls to the view so that concurrent calls do not interleave println. +func (h *UiHook) println(s string) { + h.viewLock.Lock() + defer h.viewLock.Unlock() + h.view.streams.Println(s) +} + +// scanLines is basically copied from the Go standard library except +// we've modified it to also fine `\r`. +func scanLines(data []byte, atEOF bool) (advance int, token []byte, err error) { + if atEOF && len(data) == 0 { + return 0, nil, nil + } + if i := bytes.IndexByte(data, '\n'); i >= 0 { + // We have a full newline-terminated line. + return i + 1, dropCR(data[0:i]), nil + } + if i := bytes.IndexByte(data, '\r'); i >= 0 { + // We have a full carriage-return-terminated line. + return i + 1, dropCR(data[0:i]), nil + } + // If we're at EOF, we have a final, non-terminated line. Return it. + if atEOF { + return len(data), dropCR(data), nil + } + // Request more data. + return 0, nil, nil +} + +// dropCR drops a terminal \r from the data. +func dropCR(data []byte) []byte { + if len(data) > 0 && data[len(data)-1] == '\r' { + return data[0 : len(data)-1] + } + return data +} + +func truncateId(id string, maxLen int) string { + // Note that the id may contain multibyte characters. + // We need to truncate it to maxLen characters, not maxLen bytes. + rid := []rune(id) + totalLength := len(rid) + if totalLength <= maxLen { + return id + } + if maxLen < 5 { + // We don't shorten to less than 5 chars + // as that would be pointless with ... (3 chars) + maxLen = 5 + } + + dots := []rune("...") + partLen := maxLen / 2 + + leftIdx := partLen - 1 + leftPart := rid[0:leftIdx] + + rightIdx := totalLength - partLen - 1 + + overlap := maxLen - (partLen*2 + len(dots)) + if overlap < 0 { + rightIdx -= overlap + } + + rightPart := rid[rightIdx:] + + return string(leftPart) + string(dots) + string(rightPart) +} diff --git a/pkg/command/views/hook_ui_test.go b/pkg/command/views/hook_ui_test.go new file mode 100644 index 00000000000..f6f14777744 --- /dev/null +++ b/pkg/command/views/hook_ui_test.go @@ -0,0 +1,693 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package views + +import ( + "fmt" + "regexp" + "testing" + "time" + + "strings" + + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/terminal" + "github.com/kubegems/opentofu/pkg/tofu" +) + +// Test the PreApply hook for creating a new resource +func TestUiHookPreApply_create(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + view := NewView(streams) + h := NewUiHook(view) + h.resources = map[string]uiResourceState{ + "test_instance.foo": { + Op: uiResourceCreate, + Start: time.Now(), + }, + } + + addr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + + priorState := cty.NullVal(cty.Object(map[string]cty.Type{ + "id": cty.String, + "bar": cty.List(cty.String), + })) + plannedNewState := cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("test"), + "bar": cty.ListVal([]cty.Value{ + cty.StringVal("baz"), + }), + }) + + action, err := h.PreApply(addr, states.CurrentGen, plans.Create, priorState, plannedNewState) + if err != nil { + t.Fatal(err) + } + if action != tofu.HookActionContinue { + t.Fatalf("Expected hook to continue, given: %#v", action) + } + + // stop the background writer + uiState := h.resources[addr.String()] + close(uiState.DoneCh) + <-uiState.done + + expectedOutput := "test_instance.foo: Creating...\n" + result := done(t) + output := result.Stdout() + if output != expectedOutput { + t.Fatalf("Output didn't match.\nExpected: %q\nGiven: %q", expectedOutput, output) + } + + expectedErrOutput := "" + errOutput := result.Stderr() + if errOutput != expectedErrOutput { + t.Fatalf("Error output didn't match.\nExpected: %q\nGiven: %q", expectedErrOutput, errOutput) + } +} + +// Test the PreApply hook's use of a periodic timer to display "still working" +// log lines +func TestUiHookPreApply_periodicTimer(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + view := NewView(streams) + h := NewUiHook(view) + h.periodicUiTimer = 1 * time.Second + h.resources = map[string]uiResourceState{ + "test_instance.foo": { + Op: uiResourceModify, + Start: time.Now(), + }, + } + + addr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + + priorState := cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("test"), + "bar": cty.ListValEmpty(cty.String), + }) + plannedNewState := cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("test"), + "bar": cty.ListVal([]cty.Value{ + cty.StringVal("baz"), + }), + }) + + action, err := h.PreApply(addr, states.CurrentGen, plans.Update, priorState, plannedNewState) + if err != nil { + t.Fatal(err) + } + if action != tofu.HookActionContinue { + t.Fatalf("Expected hook to continue, given: %#v", action) + } + + time.Sleep(3100 * time.Millisecond) + + // stop the background writer + uiState := h.resources[addr.String()] + close(uiState.DoneCh) + <-uiState.done + + expectedOutput := `test_instance.foo: Modifying... [id=test] +test_instance.foo: Still modifying... [id=test, 1s elapsed] +test_instance.foo: Still modifying... [id=test, 2s elapsed] +test_instance.foo: Still modifying... [id=test, 3s elapsed] +` + result := done(t) + output := result.Stdout() + if output != expectedOutput { + t.Fatalf("Output didn't match.\nExpected: %q\nGiven: %q", expectedOutput, output) + } + + expectedErrOutput := "" + errOutput := result.Stderr() + if errOutput != expectedErrOutput { + t.Fatalf("Error output didn't match.\nExpected: %q\nGiven: %q", expectedErrOutput, errOutput) + } +} + +// Test the PreApply hook's destroy path, including passing a deposed key as +// the gen argument. +func TestUiHookPreApply_destroy(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + view := NewView(streams) + h := NewUiHook(view) + h.resources = map[string]uiResourceState{ + "test_instance.foo": { + Op: uiResourceDestroy, + Start: time.Now(), + }, + } + + addr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + + priorState := cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("abc123"), + "verbs": cty.ListVal([]cty.Value{ + cty.StringVal("boop"), + }), + }) + plannedNewState := cty.NullVal(cty.Object(map[string]cty.Type{ + "id": cty.String, + "verbs": cty.List(cty.String), + })) + + key := states.NewDeposedKey() + action, err := h.PreApply(addr, key, plans.Delete, priorState, plannedNewState) + if err != nil { + t.Fatal(err) + } + if action != tofu.HookActionContinue { + t.Fatalf("Expected hook to continue, given: %#v", action) + } + + // stop the background writer + uiState := h.resources[addr.String()] + close(uiState.DoneCh) + <-uiState.done + + result := done(t) + expectedOutput := fmt.Sprintf("test_instance.foo (deposed object %s): Destroying... [id=abc123]\n", key) + output := result.Stdout() + if output != expectedOutput { + t.Fatalf("Output didn't match.\nExpected: %q\nGiven: %q", expectedOutput, output) + } + + expectedErrOutput := "" + errOutput := result.Stderr() + if errOutput != expectedErrOutput { + t.Fatalf("Error output didn't match.\nExpected: %q\nGiven: %q", expectedErrOutput, errOutput) + } +} + +// Verify that colorize is called on format strings, not user input, by adding +// valid color codes as resource names and IDs. +func TestUiHookPostApply_colorInterpolation(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + view := NewView(streams) + view.Configure(&arguments.View{NoColor: false}) + h := NewUiHook(view) + h.resources = map[string]uiResourceState{ + "test_instance.foo[\"[red]\"]": { + Op: uiResourceCreate, + Start: time.Now(), + }, + } + + addr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.StringKey("[red]")).Absolute(addrs.RootModuleInstance) + + newState := cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("[blue]"), + }) + + action, err := h.PostApply(addr, states.CurrentGen, newState, nil) + if err != nil { + t.Fatal(err) + } + if action != tofu.HookActionContinue { + t.Fatalf("Expected hook to continue, given: %#v", action) + } + result := done(t) + + reset := "\x1b[0m" + bold := "\x1b[1m" + wantPrefix := reset + bold + `test_instance.foo["[red]"]: Creation complete after` + wantSuffix := "[id=[blue]]" + reset + "\n" + output := result.Stdout() + + if !strings.HasPrefix(output, wantPrefix) { + t.Fatalf("wrong output prefix\n got: %#v\nwant: %#v", output, wantPrefix) + } + + if !strings.HasSuffix(output, wantSuffix) { + t.Fatalf("wrong output suffix\n got: %#v\nwant: %#v", output, wantSuffix) + } + + expectedErrOutput := "" + errOutput := result.Stderr() + if errOutput != expectedErrOutput { + t.Fatalf("Error output didn't match.\nExpected: %q\nGiven: %q", expectedErrOutput, errOutput) + } +} + +// Test that the PostApply hook renders a total time. +func TestUiHookPostApply_emptyState(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + view := NewView(streams) + h := NewUiHook(view) + h.resources = map[string]uiResourceState{ + "data.google_compute_zones.available": { + Op: uiResourceDestroy, + Start: time.Now(), + }, + } + + addr := addrs.Resource{ + Mode: addrs.DataResourceMode, + Type: "google_compute_zones", + Name: "available", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + + newState := cty.NullVal(cty.Object(map[string]cty.Type{ + "id": cty.String, + "names": cty.List(cty.String), + })) + + action, err := h.PostApply(addr, states.CurrentGen, newState, nil) + if err != nil { + t.Fatal(err) + } + if action != tofu.HookActionContinue { + t.Fatalf("Expected hook to continue, given: %#v", action) + } + result := done(t) + + expectedRegexp := "^data.google_compute_zones.available: Destruction complete after -?[a-z0-9µ.]+\n$" + output := result.Stdout() + if matched, _ := regexp.MatchString(expectedRegexp, output); !matched { + t.Fatalf("Output didn't match regexp.\nExpected: %q\nGiven: %q", expectedRegexp, output) + } + + expectedErrOutput := "" + errOutput := result.Stderr() + if errOutput != expectedErrOutput { + t.Fatalf("Error output didn't match.\nExpected: %q\nGiven: %q", expectedErrOutput, errOutput) + } +} + +func TestPreProvisionInstanceStep(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + view := NewView(streams) + h := NewUiHook(view) + + addr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + + action, err := h.PreProvisionInstanceStep(addr, "local-exec") + if err != nil { + t.Fatal(err) + } + if action != tofu.HookActionContinue { + t.Fatalf("Expected hook to continue, given: %#v", action) + } + result := done(t) + + if got, want := result.Stdout(), "test_instance.foo: Provisioning with 'local-exec'...\n"; got != want { + t.Fatalf("unexpected output\n got: %q\nwant: %q", got, want) + } +} + +// Test ProvisionOutput, including lots of edge cases for the output +// whitespace/line ending logic. +func TestProvisionOutput(t *testing.T) { + addr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + + testCases := map[string]struct { + provisioner string + input string + wantOutput string + }{ + "single line": { + "local-exec", + "foo\n", + "test_instance.foo (local-exec): foo\n", + }, + "multiple lines": { + "x", + `foo +bar +baz +`, + `test_instance.foo (x): foo +test_instance.foo (x): bar +test_instance.foo (x): baz +`, + }, + "trailing whitespace": { + "x", + "foo \nbar\n", + "test_instance.foo (x): foo\ntest_instance.foo (x): bar\n", + }, + "blank lines": { + "x", + "foo\n\nbar\n\n\nbaz\n", + `test_instance.foo (x): foo +test_instance.foo (x): bar +test_instance.foo (x): baz +`, + }, + "no final newline": { + "x", + `foo +bar`, + `test_instance.foo (x): foo +test_instance.foo (x): bar +`, + }, + "CR, no LF": { + "MacOS 9?", + "foo\rbar\r", + `test_instance.foo (MacOS 9?): foo +test_instance.foo (MacOS 9?): bar +`, + }, + "CRLF": { + "winrm", + "foo\r\nbar\r\n", + `test_instance.foo (winrm): foo +test_instance.foo (winrm): bar +`, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + view := NewView(streams) + h := NewUiHook(view) + + h.ProvisionOutput(addr, tc.provisioner, tc.input) + result := done(t) + + if got := result.Stdout(); got != tc.wantOutput { + t.Fatalf("unexpected output\n got: %q\nwant: %q", got, tc.wantOutput) + } + }) + } +} + +// Test the PreRefresh hook in the normal path where the resource exists with +// an ID key and value in the state. +func TestPreRefresh(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + view := NewView(streams) + h := NewUiHook(view) + + addr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + + priorState := cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("test"), + "bar": cty.ListValEmpty(cty.String), + }) + + action, err := h.PreRefresh(addr, states.CurrentGen, priorState) + + if err != nil { + t.Fatal(err) + } + if action != tofu.HookActionContinue { + t.Fatalf("Expected hook to continue, given: %#v", action) + } + result := done(t) + + if got, want := result.Stdout(), "test_instance.foo: Refreshing state... [id=test]\n"; got != want { + t.Fatalf("unexpected output\n got: %q\nwant: %q", got, want) + } +} + +func TestPreRefresh_concise(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + view := NewView(streams) + view.concise = true + h := NewUiHook(view) + + addr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + + priorState := cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("test"), + "bar": cty.ListValEmpty(cty.String), + }) + + _, err := h.PreRefresh(addr, states.CurrentGen, priorState) + + if err != nil { + t.Fatal(err) + } + + result := done(t) + + if got, want := result.Stdout(), ""; got != want { + t.Fatalf("unexpected output\n got: %q\nwant: %q", got, want) + } +} + +// Test that PreRefresh still works if no ID key and value can be determined +// from state. +func TestPreRefresh_noID(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + view := NewView(streams) + h := NewUiHook(view) + + addr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + + priorState := cty.ObjectVal(map[string]cty.Value{ + "bar": cty.ListValEmpty(cty.String), + }) + + action, err := h.PreRefresh(addr, states.CurrentGen, priorState) + + if err != nil { + t.Fatal(err) + } + if action != tofu.HookActionContinue { + t.Fatalf("Expected hook to continue, given: %#v", action) + } + result := done(t) + + if got, want := result.Stdout(), "test_instance.foo: Refreshing state...\n"; got != want { + t.Fatalf("unexpected output\n got: %q\nwant: %q", got, want) + } +} + +// Test the very simple PreImportState hook. +func TestPreImportState(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + view := NewView(streams) + h := NewUiHook(view) + + addr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + + action, err := h.PreImportState(addr, "test") + + if err != nil { + t.Fatal(err) + } + if action != tofu.HookActionContinue { + t.Fatalf("Expected hook to continue, given: %#v", action) + } + result := done(t) + + if got, want := result.Stdout(), "test_instance.foo: Importing from ID \"test\"...\n"; got != want { + t.Fatalf("unexpected output\n got: %q\nwant: %q", got, want) + } +} + +// Test the PostImportState UI hook. Again, this hook behaviour seems odd to +// me (see below), so please don't consider these tests as justification for +// keeping this behaviour. +func TestPostImportState(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + view := NewView(streams) + h := NewUiHook(view) + + addr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + + // The "Prepared [...] for import" lines display the type name of each of + // the imported resources passed to the hook. I'm not sure how it's + // possible for an import to result in a different resource type name than + // the target address, but the hook works like this so we're covering it. + imported := []providers.ImportedResource{ + { + TypeName: "test_some_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("test"), + }), + }, + { + TypeName: "test_other_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("test"), + }), + }, + } + + action, err := h.PostImportState(addr, imported) + + if err != nil { + t.Fatal(err) + } + if action != tofu.HookActionContinue { + t.Fatalf("Expected hook to continue, given: %#v", action) + } + result := done(t) + + want := `test_instance.foo: Import prepared! + Prepared test_some_instance for import + Prepared test_other_instance for import +` + if got := result.Stdout(); got != want { + t.Fatalf("unexpected output\n got: %q\nwant: %q", got, want) + } +} + +func TestTruncateId(t *testing.T) { + testCases := []struct { + Input string + Expected string + MaxLen int + }{ + { + Input: "Hello world", + Expected: "H...d", + MaxLen: 3, + }, + { + Input: "Hello world", + Expected: "H...d", + MaxLen: 5, + }, + { + Input: "Hello world", + Expected: "He...d", + MaxLen: 6, + }, + { + Input: "Hello world", + Expected: "He...ld", + MaxLen: 7, + }, + { + Input: "Hello world", + Expected: "Hel...ld", + MaxLen: 8, + }, + { + Input: "Hello world", + Expected: "Hel...rld", + MaxLen: 9, + }, + { + Input: "Hello world", + Expected: "Hell...rld", + MaxLen: 10, + }, + { + Input: "Hello world", + Expected: "Hello world", + MaxLen: 11, + }, + { + Input: "Hello world", + Expected: "Hello world", + MaxLen: 12, + }, + { + Input: "あいうえおかきくけこさ", + Expected: "あ...さ", + MaxLen: 3, + }, + { + Input: "あいうえおかきくけこさ", + Expected: "あ...さ", + MaxLen: 5, + }, + { + Input: "あいうえおかきくけこさ", + Expected: "あい...さ", + MaxLen: 6, + }, + { + Input: "あいうえおかきくけこさ", + Expected: "あい...こさ", + MaxLen: 7, + }, + { + Input: "あいうえおかきくけこさ", + Expected: "あいう...こさ", + MaxLen: 8, + }, + { + Input: "あいうえおかきくけこさ", + Expected: "あいう...けこさ", + MaxLen: 9, + }, + { + Input: "あいうえおかきくけこさ", + Expected: "あいうえ...けこさ", + MaxLen: 10, + }, + { + Input: "あいうえおかきくけこさ", + Expected: "あいうえおかきくけこさ", + MaxLen: 11, + }, + { + Input: "あいうえおかきくけこさ", + Expected: "あいうえおかきくけこさ", + MaxLen: 12, + }, + } + for i, tc := range testCases { + testName := fmt.Sprintf("%d", i) + t.Run(testName, func(t *testing.T) { + out := truncateId(tc.Input, tc.MaxLen) + if out != tc.Expected { + t.Fatalf("Expected %q to be shortened to %d as %q (given: %q)", + tc.Input, tc.MaxLen, tc.Expected, out) + } + }) + } +} diff --git a/pkg/command/views/json/change.go b/pkg/command/views/json/change.go new file mode 100644 index 00000000000..edf7f5d25c9 --- /dev/null +++ b/pkg/command/views/json/change.go @@ -0,0 +1,156 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package json + +import ( + "fmt" + + "github.com/kubegems/opentofu/pkg/plans" +) + +func NewResourceInstanceChange(change *plans.ResourceInstanceChangeSrc) *ResourceInstanceChange { + c := &ResourceInstanceChange{ + Resource: newResourceAddr(change.Addr), + Action: changeAction(change.Action), + Reason: changeReason(change.ActionReason), + GeneratedConfig: change.GeneratedConfig, + } + + // The order here matters, we want the moved action to take precedence over + // the import action. We're basically taking "the most recent action" as the + // primary action in the streamed logs. That is to say, that if a resource + // is imported and then moved in a single operation then the change for that + // resource will be reported as ActionMove while the Importing flag will + // still be set to true. + // + // Since both the moved and imported actions only overwrite a NoOp this + // behaviour is consistent across the other actions as well. Something that + // is imported and then updated, or moved and then updated, will have the + // ActionUpdate as the recognised action for the change. + + if !change.Addr.Equal(change.PrevRunAddr) { + if c.Action == ActionNoOp { + c.Action = ActionMove + } + pr := newResourceAddr(change.PrevRunAddr) + c.PreviousResource = &pr + } + if change.Importing != nil { + if c.Action == ActionNoOp { + c.Action = ActionImport + } + c.Importing = &Importing{ID: change.Importing.ID} + } + + return c +} + +type ResourceInstanceChange struct { + Resource ResourceAddr `json:"resource"` + PreviousResource *ResourceAddr `json:"previous_resource,omitempty"` + Action ChangeAction `json:"action"` + Reason ChangeReason `json:"reason,omitempty"` + Importing *Importing `json:"importing,omitempty"` + GeneratedConfig string `json:"generated_config,omitempty"` +} + +func (c *ResourceInstanceChange) String() string { + return fmt.Sprintf("%s: Plan to %s", c.Resource.Addr, c.Action) +} + +type ChangeAction string + +const ( + ActionNoOp ChangeAction = "noop" + ActionMove ChangeAction = "move" + ActionCreate ChangeAction = "create" + ActionRead ChangeAction = "read" + ActionUpdate ChangeAction = "update" + ActionReplace ChangeAction = "replace" + ActionDelete ChangeAction = "delete" + ActionImport ChangeAction = "import" + ActionForget ChangeAction = "remove" +) + +func changeAction(action plans.Action) ChangeAction { + switch action { + case plans.NoOp: + return ActionNoOp + case plans.Create: + return ActionCreate + case plans.Read: + return ActionRead + case plans.Update: + return ActionUpdate + case plans.DeleteThenCreate, plans.CreateThenDelete: + return ActionReplace + case plans.Delete: + return ActionDelete + case plans.Forget: + return ActionForget + default: + return ActionNoOp + } +} + +type ChangeReason string + +const ( + ReasonNone ChangeReason = "" + ReasonTainted ChangeReason = "tainted" + ReasonRequested ChangeReason = "requested" + ReasonReplaceTriggeredBy ChangeReason = "replace_triggered_by" + ReasonCannotUpdate ChangeReason = "cannot_update" + ReasonUnknown ChangeReason = "unknown" + + ReasonDeleteBecauseNoResourceConfig ChangeReason = "delete_because_no_resource_config" + ReasonDeleteBecauseWrongRepetition ChangeReason = "delete_because_wrong_repetition" + ReasonDeleteBecauseCountIndex ChangeReason = "delete_because_count_index" + ReasonDeleteBecauseEachKey ChangeReason = "delete_because_each_key" + ReasonDeleteBecauseNoModule ChangeReason = "delete_because_no_module" + ReasonDeleteBecauseNoMoveTarget ChangeReason = "delete_because_no_move_target" + ReasonReadBecauseConfigUnknown ChangeReason = "read_because_config_unknown" + ReasonReadBecauseDependencyPending ChangeReason = "read_because_dependency_pending" + ReasonReadBecauseCheckNested ChangeReason = "read_because_check_nested" +) + +func changeReason(reason plans.ResourceInstanceChangeActionReason) ChangeReason { + switch reason { + case plans.ResourceInstanceChangeNoReason: + return ReasonNone + case plans.ResourceInstanceReplaceBecauseTainted: + return ReasonTainted + case plans.ResourceInstanceReplaceByRequest: + return ReasonRequested + case plans.ResourceInstanceReplaceBecauseCannotUpdate: + return ReasonCannotUpdate + case plans.ResourceInstanceReplaceByTriggers: + return ReasonReplaceTriggeredBy + case plans.ResourceInstanceDeleteBecauseNoResourceConfig: + return ReasonDeleteBecauseNoResourceConfig + case plans.ResourceInstanceDeleteBecauseWrongRepetition: + return ReasonDeleteBecauseWrongRepetition + case plans.ResourceInstanceDeleteBecauseCountIndex: + return ReasonDeleteBecauseCountIndex + case plans.ResourceInstanceDeleteBecauseEachKey: + return ReasonDeleteBecauseEachKey + case plans.ResourceInstanceDeleteBecauseNoModule: + return ReasonDeleteBecauseNoModule + case plans.ResourceInstanceReadBecauseConfigUnknown: + return ReasonReadBecauseConfigUnknown + case plans.ResourceInstanceDeleteBecauseNoMoveTarget: + return ReasonDeleteBecauseNoMoveTarget + case plans.ResourceInstanceReadBecauseDependencyPending: + return ReasonReadBecauseDependencyPending + case plans.ResourceInstanceReadBecauseCheckNested: + return ReasonReadBecauseCheckNested + default: + // This should never happen, but there's no good way to guarantee + // exhaustive handling of the enum, so a generic fall back is better + // than a misleading result or a panic + return ReasonUnknown + } +} diff --git a/pkg/command/views/json/change_summary.go b/pkg/command/views/json/change_summary.go new file mode 100644 index 00000000000..2083efc8a1f --- /dev/null +++ b/pkg/command/views/json/change_summary.go @@ -0,0 +1,46 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package json + +import "fmt" + +type Operation string + +const ( + OperationApplied Operation = "apply" + OperationDestroyed Operation = "destroy" + OperationPlanned Operation = "plan" +) + +type ChangeSummary struct { + Add int `json:"add"` + Change int `json:"change"` + Import int `json:"import"` + Remove int `json:"remove"` + Operation Operation `json:"operation"` +} + +// The summary strings for apply and plan are accidentally a public interface +// used by Terraform Cloud and Terraform Enterprise, so the exact formats of +// these strings are important. +func (cs *ChangeSummary) String() string { + switch cs.Operation { + case OperationApplied: + if cs.Import > 0 { + return fmt.Sprintf("Apply complete! Resources: %d imported, %d added, %d changed, %d destroyed.", cs.Import, cs.Add, cs.Change, cs.Remove) + } + return fmt.Sprintf("Apply complete! Resources: %d added, %d changed, %d destroyed.", cs.Add, cs.Change, cs.Remove) + case OperationDestroyed: + return fmt.Sprintf("Destroy complete! Resources: %d destroyed.", cs.Remove) + case OperationPlanned: + if cs.Import > 0 { + return fmt.Sprintf("Plan: %d to import, %d to add, %d to change, %d to destroy.", cs.Import, cs.Add, cs.Change, cs.Remove) + } + return fmt.Sprintf("Plan: %d to add, %d to change, %d to destroy.", cs.Add, cs.Change, cs.Remove) + default: + return fmt.Sprintf("%s: %d add, %d change, %d destroy", cs.Operation, cs.Add, cs.Change, cs.Remove) + } +} diff --git a/pkg/command/views/json/diagnostic.go b/pkg/command/views/json/diagnostic.go new file mode 100644 index 00000000000..a335c39888b --- /dev/null +++ b/pkg/command/views/json/diagnostic.go @@ -0,0 +1,517 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package json + +import ( + "bufio" + "bytes" + "fmt" + "sort" + "strings" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hcled" + "github.com/hashicorp/hcl/v2/hclparse" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +// These severities map to the tfdiags.Severity values, plus an explicit +// unknown in case that enum grows without us noticing here. +const ( + DiagnosticSeverityUnknown = "unknown" + DiagnosticSeverityError = "error" + DiagnosticSeverityWarning = "warning" +) + +// Diagnostic represents any tfdiags.Diagnostic value. The simplest form has +// just a severity, single line summary, and optional detail. If there is more +// information about the source of the diagnostic, this is represented in the +// range field. +type Diagnostic struct { + Severity string `json:"severity"` + Summary string `json:"summary"` + Detail string `json:"detail"` + Address string `json:"address,omitempty"` + Range *DiagnosticRange `json:"range,omitempty"` + Snippet *DiagnosticSnippet `json:"snippet,omitempty"` +} + +// Pos represents a position in the source code. +type Pos struct { + // Line is a one-based count for the line in the indicated file. + Line int `json:"line"` + + // Column is a one-based count of Unicode characters from the start of the line. + Column int `json:"column"` + + // Byte is a zero-based offset into the indicated file. + Byte int `json:"byte"` +} + +// DiagnosticRange represents the filename and position of the diagnostic +// subject. This defines the range of the source to be highlighted in the +// output. Note that the snippet may include additional surrounding source code +// if the diagnostic has a context range. +// +// The Start position is inclusive, and the End position is exclusive. Exact +// positions are intended for highlighting for human interpretation only and +// are subject to change. +type DiagnosticRange struct { + Filename string `json:"filename"` + Start Pos `json:"start"` + End Pos `json:"end"` +} + +// DiagnosticSnippet represents source code information about the diagnostic. +// It is possible for a diagnostic to have a source (and therefore a range) but +// no source code can be found. In this case, the range field will be present and +// the snippet field will not. +type DiagnosticSnippet struct { + // Context is derived from HCL's hcled.ContextString output. This gives a + // high-level summary of the root context of the diagnostic: for example, + // the resource block in which an expression causes an error. + Context *string `json:"context"` + + // Code is a possibly-multi-line string of OpenTofu configuration, which + // includes both the diagnostic source and any relevant context as defined + // by the diagnostic. + Code string `json:"code"` + + // StartLine is the line number in the source file for the first line of + // the snippet code block. This is not necessarily the same as the value of + // Range.Start.Line, as it is possible to have zero or more lines of + // context source code before the diagnostic range starts. + StartLine int `json:"start_line"` + + // HighlightStartOffset is the character offset into Code at which the + // diagnostic source range starts, which ought to be highlighted as such by + // the consumer of this data. + HighlightStartOffset int `json:"highlight_start_offset"` + + // HighlightEndOffset is the character offset into Code at which the + // diagnostic source range ends. + HighlightEndOffset int `json:"highlight_end_offset"` + + // Values is a sorted slice of expression values which may be useful in + // understanding the source of an error in a complex expression. + Values []DiagnosticExpressionValue `json:"values"` + + // FunctionCall is information about a function call whose failure is + // being reported by this diagnostic, if any. + FunctionCall *DiagnosticFunctionCall `json:"function_call,omitempty"` +} + +// DiagnosticExpressionValue represents an HCL traversal string (e.g. +// "var.foo") and a statement about its value while the expression was +// evaluated (e.g. "is a string", "will be known only after apply"). These are +// intended to help the consumer diagnose why an expression caused a diagnostic +// to be emitted. +type DiagnosticExpressionValue struct { + Traversal string `json:"traversal"` + Statement string `json:"statement"` +} + +// DiagnosticFunctionCall represents a function call whose information is +// being included as part of a diagnostic snippet. +type DiagnosticFunctionCall struct { + // CalledAs is the full name that was used to call this function, + // potentially including namespace prefixes if the function does not belong + // to the default function namespace. + CalledAs string `json:"called_as"` + + // Signature is a description of the signature of the function that was + // called, if any. Might be omitted if we're reporting that a call failed + // because the given function name isn't known, for example. + Signature *Function `json:"signature,omitempty"` +} + +// NewDiagnostic takes a tfdiags.Diagnostic and a map of configuration sources, +// and returns a Diagnostic struct. +func NewDiagnostic(diag tfdiags.Diagnostic, sources map[string]*hcl.File) *Diagnostic { //nolint:funlen,gocognit,gocyclo,cyclop // TODO(1818): Refactor this function. + var sev string + switch diag.Severity() { + case tfdiags.Error: + sev = DiagnosticSeverityError + case tfdiags.Warning: + sev = DiagnosticSeverityWarning + default: + sev = DiagnosticSeverityUnknown + } + + desc := diag.Description() + + diagnostic := &Diagnostic{ + Severity: sev, + Summary: desc.Summary, + Detail: desc.Detail, + Address: desc.Address, + } + + sourceRefs := diag.Source() + if sourceRefs.Subject != nil { + // We'll borrow HCL's range implementation here, because it has some + // handy features to help us produce a nice source code snippet. + highlightRange := sourceRefs.Subject.ToHCL() + + // Some diagnostic sources fail to set the end of the subject range. + if highlightRange.End == (hcl.Pos{}) { + highlightRange.End = highlightRange.Start + } + + snippetRange := highlightRange + if sourceRefs.Context != nil { + snippetRange = sourceRefs.Context.ToHCL() + } + + // Make sure the snippet includes the highlight. This should be true + // for any reasonable diagnostic, but we'll make sure. + snippetRange = hcl.RangeOver(snippetRange, highlightRange) + + // Empty ranges result in odd diagnostic output, so extend the end to + // ensure there's at least one byte in the snippet or highlight. + if snippetRange.Empty() { + snippetRange.End.Byte++ + snippetRange.End.Column++ + } + if highlightRange.Empty() { + highlightRange.End.Byte++ + highlightRange.End.Column++ + } + + diagnostic.Range = &DiagnosticRange{ + Filename: highlightRange.Filename, + Start: Pos{ + Line: highlightRange.Start.Line, + Column: highlightRange.Start.Column, + Byte: highlightRange.Start.Byte, + }, + End: Pos{ + Line: highlightRange.End.Line, + Column: highlightRange.End.Column, + Byte: highlightRange.End.Byte, + }, + } + + var src []byte + if sources != nil { + if f, ok := sources[highlightRange.Filename]; ok { + src = f.Bytes + } + } + + // If we have a source file for the diagnostic, we can emit a code + // snippet. + if src != nil { + diagnostic.Snippet = &DiagnosticSnippet{ + StartLine: snippetRange.Start.Line, + + // Ensure that the default Values struct is an empty array, as this + // makes consuming the JSON structure easier in most languages. + Values: []DiagnosticExpressionValue{}, + } + + file, offset := parseRange(src, highlightRange) + + // Some diagnostics may have a useful top-level context to add to + // the code snippet output. + contextStr := hcled.ContextString(file, offset-1) + if contextStr != "" { + diagnostic.Snippet.Context = &contextStr + } + + // Build the string of the code snippet, tracking at which byte of + // the file the snippet starts. + var codeStartByte int + sc := hcl.NewRangeScanner(src, highlightRange.Filename, bufio.ScanLines) + var code strings.Builder + for sc.Scan() { + lineRange := sc.Range() + if lineRange.Overlaps(snippetRange) { + if codeStartByte == 0 && code.Len() == 0 { + codeStartByte = lineRange.Start.Byte + } + code.Write(lineRange.SliceBytes(src)) + code.WriteRune('\n') + } + } + codeStr := strings.TrimSuffix(code.String(), "\n") + diagnostic.Snippet.Code = codeStr + + // Calculate the start and end byte of the highlight range relative + // to the code snippet string. + start := highlightRange.Start.Byte - codeStartByte + end := start + (highlightRange.End.Byte - highlightRange.Start.Byte) + + // We can end up with some quirky results here in edge cases like + // when a source range starts or ends at a newline character, + // so we'll cap the results at the bounds of the highlight range + // so that consumers of this data don't need to contend with + // out-of-bounds errors themselves. + if start < 0 { + start = 0 + } else if start > len(codeStr) { + start = len(codeStr) + } + if end < 0 { + end = 0 + } else if end > len(codeStr) { + end = len(codeStr) + } + + diagnostic.Snippet.HighlightStartOffset = start + diagnostic.Snippet.HighlightEndOffset = end + + if fromExpr := diag.FromExpr(); fromExpr != nil { + // We may also be able to generate information about the dynamic + // values of relevant variables at the point of evaluation, then. + // This is particularly useful for expressions that get evaluated + // multiple times with different values, such as blocks using + // "count" and "for_each", or within "for" expressions. + expr := fromExpr.Expression + ctx := fromExpr.EvalContext + vars := expr.Variables() + values := make([]DiagnosticExpressionValue, 0, len(vars)) + seen := make(map[string]struct{}, len(vars)) + includeUnknown := tfdiags.DiagnosticCausedByUnknown(diag) + includeSensitive := tfdiags.DiagnosticCausedBySensitive(diag) + Traversals: + for _, traversal := range vars { + for len(traversal) > 1 { + val, diags := traversal.TraverseAbs(ctx) + if diags.HasErrors() { + // Skip anything that generates errors, since we probably + // already have the same error in our diagnostics set + // already. + traversal = traversal[:len(traversal)-1] + continue + } + + traversalStr := traversalStr(traversal) + if _, exists := seen[traversalStr]; exists { + continue Traversals // don't show duplicates when the same variable is referenced multiple times + } + value := DiagnosticExpressionValue{ + Traversal: traversalStr, + } + switch { + case val.HasMark(marks.Sensitive): + // We only mention a sensitive value if the diagnostic + // we're rendering is explicitly marked as being + // caused by sensitive values, because otherwise + // readers tend to be misled into thinking the error + // is caused by the sensitive value even when it isn't. + if !includeSensitive { + continue Traversals + } + // Even when we do mention one, we keep it vague + // in order to minimize the chance of giving away + // whatever was sensitive about it. + value.Statement = "has a sensitive value" + case !val.IsKnown(): + // We'll avoid saying anything about unknown or + // "known after apply" unless the diagnostic is + // explicitly marked as being caused by unknown + // values, because otherwise readers tend to be + // misled into thinking the error is caused by the + // unknown value even when it isn't. + if ty := val.Type(); ty != cty.DynamicPseudoType { + if includeUnknown { + switch { + case ty.IsCollectionType(): + valRng := val.Range() + minLen := valRng.LengthLowerBound() + maxLen := valRng.LengthUpperBound() + const maxLimit = 1024 // (upper limit is just an arbitrary value to avoid showing distracting large numbers in the UI) + switch { + case minLen == maxLen: + value.Statement = fmt.Sprintf("is a %s of length %d, known only after apply", ty.FriendlyName(), minLen) + case minLen != 0 && maxLen <= maxLimit: + value.Statement = fmt.Sprintf("is a %s with between %d and %d elements, known only after apply", ty.FriendlyName(), minLen, maxLen) + case minLen != 0: + value.Statement = fmt.Sprintf("is a %s with at least %d elements, known only after apply", ty.FriendlyName(), minLen) + case maxLen <= maxLimit: + value.Statement = fmt.Sprintf("is a %s with up to %d elements, known only after apply", ty.FriendlyName(), maxLen) + default: + value.Statement = fmt.Sprintf("is a %s, known only after apply", ty.FriendlyName()) + } + default: + value.Statement = fmt.Sprintf("is a %s, known only after apply", ty.FriendlyName()) + } + } else { + value.Statement = fmt.Sprintf("is a %s", ty.FriendlyName()) + } + } else { + if !includeUnknown { + continue Traversals + } + value.Statement = "will be known only after apply" + } + default: + value.Statement = fmt.Sprintf("is %s", compactValueStr(val)) + } + values = append(values, value) + seen[traversalStr] = struct{}{} + } + } + sort.Slice(values, func(i, j int) bool { + return values[i].Traversal < values[j].Traversal + }) + diagnostic.Snippet.Values = values + + if callInfo := tfdiags.ExtraInfo[hclsyntax.FunctionCallDiagExtra](diag); callInfo != nil && callInfo.CalledFunctionName() != "" { + calledAs := callInfo.CalledFunctionName() + baseName := calledAs + if idx := strings.LastIndex(baseName, "::"); idx >= 0 { + baseName = baseName[idx+2:] + } + callInfo := &DiagnosticFunctionCall{ + CalledAs: calledAs, + } + if f, ok := ctx.Functions[calledAs]; ok { + callInfo.Signature = DescribeFunction(baseName, f) + } + diagnostic.Snippet.FunctionCall = callInfo + } + + } + + } + } + + return diagnostic +} + +func parseRange(src []byte, rng hcl.Range) (*hcl.File, int) { + filename := rng.Filename + offset := rng.Start.Byte + + // We need to re-parse here to get a *hcl.File we can interrogate. This + // is not awesome since we presumably already parsed the file earlier too, + // but this re-parsing is architecturally simpler than retaining all of + // the hcl.File objects and we only do this in the case of an error anyway + // so the overhead here is not a big problem. + parser := hclparse.NewParser() + var file *hcl.File + + // Ignore diagnostics here as there is nothing we can do with them. + if strings.HasSuffix(filename, ".json") { + file, _ = parser.ParseJSON(src, filename) + } else { + file, _ = parser.ParseHCL(src, filename) + } + + return file, offset +} + +// compactValueStr produces a compact, single-line summary of a given value +// that is suitable for display in the UI. +// +// For primitives it returns a full representation, while for more complex +// types it instead summarizes the type, size, etc to produce something +// that is hopefully still somewhat useful but not as verbose as a rendering +// of the entire data structure. +func compactValueStr(val cty.Value) string { + // This is a specialized subset of value rendering tailored to producing + // helpful but concise messages in diagnostics. It is not comprehensive + // nor intended to be used for other purposes. + + if val.HasMark(marks.Sensitive) { + // We check this in here just to make sure, but note that the caller + // of compactValueStr ought to have already checked this and skipped + // calling into compactValueStr anyway, so this shouldn't actually + // be reachable. + return "(sensitive value)" + } + + // WARNING: We've only checked that the value isn't sensitive _shallowly_ + // here, and so we must never show any element values from complex types + // in here. However, it's fine to show map keys and attribute names because + // those are never sensitive in isolation: the entire value would be + // sensitive in that case. + + ty := val.Type() + switch { + case val.IsNull(): + return "null" + case !val.IsKnown(): + // Should never happen here because we should filter before we get + // in here, but we'll do something reasonable rather than panic. + return "(not yet known)" + case ty == cty.Bool: + if val.True() { + return "true" + } + return "false" + case ty == cty.Number: + bf := val.AsBigFloat() + return bf.Text('g', 10) + case ty == cty.String: + // Go string syntax is not exactly the same as HCL native string syntax, + // but we'll accept the minor edge-cases where this is different here + // for now, just to get something reasonable here. + return fmt.Sprintf("%q", val.AsString()) + case ty.IsCollectionType() || ty.IsTupleType(): + l := val.LengthInt() + switch l { + case 0: + return "empty " + ty.FriendlyName() + case 1: + return ty.FriendlyName() + " with 1 element" + default: + return fmt.Sprintf("%s with %d elements", ty.FriendlyName(), l) + } + case ty.IsObjectType(): + atys := ty.AttributeTypes() + l := len(atys) + switch l { + case 0: + return "object with no attributes" + case 1: + var name string + for k := range atys { + name = k + } + return fmt.Sprintf("object with 1 attribute %q", name) + default: + return fmt.Sprintf("object with %d attributes", l) + } + default: + return ty.FriendlyName() + } +} + +// traversalStr produces a representation of an HCL traversal that is compact, +// resembles HCL native syntax, and is suitable for display in the UI. +func traversalStr(traversal hcl.Traversal) string { + // This is a specialized subset of traversal rendering tailored to + // producing helpful contextual messages in diagnostics. It is not + // comprehensive nor intended to be used for other purposes. + + var buf bytes.Buffer + for _, step := range traversal { + switch tStep := step.(type) { + case hcl.TraverseRoot: + buf.WriteString(tStep.Name) + case hcl.TraverseAttr: + buf.WriteByte('.') + buf.WriteString(tStep.Name) + case hcl.TraverseIndex: + buf.WriteByte('[') + if keyTy := tStep.Key.Type(); keyTy.IsPrimitiveType() { + buf.WriteString(compactValueStr(tStep.Key)) + } else { + // We'll just use a placeholder for more complex values, + // since otherwise our result could grow ridiculously long. + buf.WriteString("...") + } + buf.WriteByte(']') + } + } + return buf.String() +} diff --git a/pkg/command/views/json/diagnostic_test.go b/pkg/command/views/json/diagnostic_test.go new file mode 100644 index 00000000000..f4756c0154b --- /dev/null +++ b/pkg/command/views/json/diagnostic_test.go @@ -0,0 +1,961 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package json + +import ( + "encoding/json" + "fmt" + "io" + "os" + "path" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hcltest" + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +func TestNewDiagnostic(t *testing.T) { + // Common HCL for diags with source ranges. This does not have any real + // semantic errors, but we can synthesize fake HCL errors which will + // exercise the diagnostic rendering code using this + sources := map[string]*hcl.File{ + "test.tf": {Bytes: []byte(`resource "test_resource" "test" { + foo = var.boop["hello!"] + bar = { + baz = maybe + } +} +`)}, + "short.tf": {Bytes: []byte("bad source code")}, + "odd-comment.tf": {Bytes: []byte("foo\n\n#\n")}, + "values.tf": {Bytes: []byte(`[ + var.a, + var.b, + var.c, + var.d, + var.e, + var.f, + var.g, + var.h, + var.i, + var.j, + var.k, +] +`)}, + } + testCases := map[string]struct { + diag interface{} // allow various kinds of diags + want *Diagnostic + }{ + "sourceless warning": { + tfdiags.Sourceless( + tfdiags.Warning, + "Oh no", + "Something is broken", + ), + &Diagnostic{ + Severity: "warning", + Summary: "Oh no", + Detail: "Something is broken", + }, + }, + "error with source code unavailable": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Bad news", + Detail: "It went wrong", + Subject: &hcl.Range{ + Filename: "modules/oops/missing.tf", + Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, + End: hcl.Pos{Line: 2, Column: 12, Byte: 33}, + }, + }, + &Diagnostic{ + Severity: "error", + Summary: "Bad news", + Detail: "It went wrong", + Range: &DiagnosticRange{ + Filename: "modules/oops/missing.tf", + Start: Pos{ + Line: 1, + Column: 6, + Byte: 5, + }, + End: Pos{ + Line: 2, + Column: 12, + Byte: 33, + }, + }, + }, + }, + "error with source code subject": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Tiny explosion", + Detail: "Unexpected detonation while parsing", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 10, Byte: 9}, + End: hcl.Pos{Line: 1, Column: 25, Byte: 24}, + }, + }, + &Diagnostic{ + Severity: "error", + Summary: "Tiny explosion", + Detail: "Unexpected detonation while parsing", + Range: &DiagnosticRange{ + Filename: "test.tf", + Start: Pos{ + Line: 1, + Column: 10, + Byte: 9, + }, + End: Pos{ + Line: 1, + Column: 25, + Byte: 24, + }, + }, + Snippet: &DiagnosticSnippet{ + Context: strPtr(`resource "test_resource" "test"`), + Code: `resource "test_resource" "test" {`, + StartLine: 1, + HighlightStartOffset: 9, + HighlightEndOffset: 24, + Values: []DiagnosticExpressionValue{}, + }, + }, + }, + "error with source code subject but no context": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Nonsense input", + Detail: "What you wrote makes no sense", + Subject: &hcl.Range{ + Filename: "short.tf", + Start: hcl.Pos{Line: 1, Column: 5, Byte: 4}, + End: hcl.Pos{Line: 1, Column: 10, Byte: 9}, + }, + }, + &Diagnostic{ + Severity: "error", + Summary: "Nonsense input", + Detail: "What you wrote makes no sense", + Range: &DiagnosticRange{ + Filename: "short.tf", + Start: Pos{ + Line: 1, + Column: 5, + Byte: 4, + }, + End: Pos{ + Line: 1, + Column: 10, + Byte: 9, + }, + }, + Snippet: &DiagnosticSnippet{ + Context: nil, + Code: (`bad source code`), + StartLine: (1), + HighlightStartOffset: (4), + HighlightEndOffset: (9), + Values: []DiagnosticExpressionValue{}, + }, + }, + }, + "error with multi-line snippet": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "In this house we respect booleans", + Detail: "True or false, there is no maybe", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 4, Column: 11, Byte: 81}, + End: hcl.Pos{Line: 4, Column: 16, Byte: 86}, + }, + Context: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 3, Column: 3, Byte: 63}, + End: hcl.Pos{Line: 5, Column: 4, Byte: 90}, + }, + }, + &Diagnostic{ + Severity: "error", + Summary: "In this house we respect booleans", + Detail: "True or false, there is no maybe", + Range: &DiagnosticRange{ + Filename: "test.tf", + Start: Pos{ + Line: 4, + Column: 11, + Byte: 81, + }, + End: Pos{ + Line: 4, + Column: 16, + Byte: 86, + }, + }, + Snippet: &DiagnosticSnippet{ + Context: strPtr(`resource "test_resource" "test"`), + Code: " bar = {\n baz = maybe\n }", + StartLine: 3, + HighlightStartOffset: 20, + HighlightEndOffset: 25, + Values: []DiagnosticExpressionValue{}, + }, + }, + }, + "error with empty highlight range at end of source code": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "You forgot something", + Detail: "Please finish your thought", + Subject: &hcl.Range{ + Filename: "short.tf", + Start: hcl.Pos{Line: 1, Column: 16, Byte: 15}, + End: hcl.Pos{Line: 1, Column: 16, Byte: 15}, + }, + }, + &Diagnostic{ + Severity: "error", + Summary: "You forgot something", + Detail: "Please finish your thought", + Range: &DiagnosticRange{ + Filename: "short.tf", + Start: Pos{ + Line: 1, + Column: 16, + Byte: 15, + }, + End: Pos{ + Line: 1, + Column: 17, + Byte: 16, + }, + }, + Snippet: &DiagnosticSnippet{ + Code: ("bad source code"), + StartLine: (1), + HighlightStartOffset: (15), + HighlightEndOffset: (15), + Values: []DiagnosticExpressionValue{}, + }, + }, + }, + "error with unset highlight end position": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "There is no end", + Detail: "But there is a beginning", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 16, Byte: 15}, + End: hcl.Pos{Line: 0, Column: 0, Byte: 0}, + }, + }, + &Diagnostic{ + Severity: "error", + Summary: "There is no end", + Detail: "But there is a beginning", + Range: &DiagnosticRange{ + Filename: "test.tf", + Start: Pos{ + Line: 1, + Column: 16, + Byte: 15, + }, + End: Pos{ + Line: 1, + Column: 17, + Byte: 16, + }, + }, + Snippet: &DiagnosticSnippet{ + Context: strPtr(`resource "test_resource" "test"`), + Code: `resource "test_resource" "test" {`, + StartLine: 1, + HighlightStartOffset: 15, + HighlightEndOffset: 16, + Values: []DiagnosticExpressionValue{}, + }, + }, + }, + "error whose range starts at a newline": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid newline", + Detail: "How awkward!", + Subject: &hcl.Range{ + Filename: "odd-comment.tf", + Start: hcl.Pos{Line: 2, Column: 5, Byte: 4}, + End: hcl.Pos{Line: 3, Column: 1, Byte: 6}, + }, + }, + &Diagnostic{ + Severity: "error", + Summary: "Invalid newline", + Detail: "How awkward!", + Range: &DiagnosticRange{ + Filename: "odd-comment.tf", + Start: Pos{ + Line: 2, + Column: 5, + Byte: 4, + }, + End: Pos{ + Line: 3, + Column: 1, + Byte: 6, + }, + }, + Snippet: &DiagnosticSnippet{ + Code: `#`, + StartLine: 2, + Values: []DiagnosticExpressionValue{}, + + // Due to the range starting at a newline on a blank + // line, we end up stripping off the initial newline + // to produce only a one-line snippet. That would + // therefore cause the start offset to naturally be + // -1, just before the Code we returned, but then we + // force it to zero so that the result will still be + // in range for a byte-oriented slice of Code. + HighlightStartOffset: 0, + HighlightEndOffset: 1, + }, + }, + }, + "error with source code subject and known expression": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Wrong noises", + Detail: "Biological sounds are not allowed", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 2, Column: 9, Byte: 42}, + End: hcl.Pos{Line: 2, Column: 26, Byte: 59}, + }, + Expression: hcltest.MockExprTraversal(hcl.Traversal{ + hcl.TraverseRoot{Name: "var"}, + hcl.TraverseAttr{Name: "boop"}, + hcl.TraverseIndex{Key: cty.StringVal("hello!")}, + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "var": cty.ObjectVal(map[string]cty.Value{ + "boop": cty.MapVal(map[string]cty.Value{ + "hello!": cty.StringVal("bleurgh"), + }), + }), + }, + }, + }, + &Diagnostic{ + Severity: "error", + Summary: "Wrong noises", + Detail: "Biological sounds are not allowed", + Range: &DiagnosticRange{ + Filename: "test.tf", + Start: Pos{ + Line: 2, + Column: 9, + Byte: 42, + }, + End: Pos{ + Line: 2, + Column: 26, + Byte: 59, + }, + }, + Snippet: &DiagnosticSnippet{ + Context: strPtr(`resource "test_resource" "test"`), + Code: (` foo = var.boop["hello!"]`), + StartLine: (2), + HighlightStartOffset: (8), + HighlightEndOffset: (25), + Values: []DiagnosticExpressionValue{ + { + Traversal: `var.boop["hello!"]`, + Statement: `is "bleurgh"`, + }, + }, + }, + }, + }, + "error with source code subject and expression referring to sensitive value": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Wrong noises", + Detail: "Biological sounds are not allowed", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 2, Column: 9, Byte: 42}, + End: hcl.Pos{Line: 2, Column: 26, Byte: 59}, + }, + Expression: hcltest.MockExprTraversal(hcl.Traversal{ + hcl.TraverseRoot{Name: "var"}, + hcl.TraverseAttr{Name: "boop"}, + hcl.TraverseIndex{Key: cty.StringVal("hello!")}, + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "var": cty.ObjectVal(map[string]cty.Value{ + "boop": cty.MapVal(map[string]cty.Value{ + "hello!": cty.StringVal("bleurgh").Mark(marks.Sensitive), + }), + }), + }, + }, + Extra: diagnosticCausedBySensitive(true), + }, + &Diagnostic{ + Severity: "error", + Summary: "Wrong noises", + Detail: "Biological sounds are not allowed", + Range: &DiagnosticRange{ + Filename: "test.tf", + Start: Pos{ + Line: 2, + Column: 9, + Byte: 42, + }, + End: Pos{ + Line: 2, + Column: 26, + Byte: 59, + }, + }, + Snippet: &DiagnosticSnippet{ + Context: strPtr(`resource "test_resource" "test"`), + Code: (` foo = var.boop["hello!"]`), + StartLine: (2), + HighlightStartOffset: (8), + HighlightEndOffset: (25), + Values: []DiagnosticExpressionValue{ + { + Traversal: `var.boop["hello!"]`, + Statement: `has a sensitive value`, + }, + }, + }, + }, + }, + "error with source code subject and expression referring to sensitive value when not caused by sensitive values": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Wrong noises", + Detail: "Biological sounds are not allowed", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 2, Column: 9, Byte: 42}, + End: hcl.Pos{Line: 2, Column: 26, Byte: 59}, + }, + Expression: hcltest.MockExprTraversal(hcl.Traversal{ + hcl.TraverseRoot{Name: "var"}, + hcl.TraverseAttr{Name: "boop"}, + hcl.TraverseIndex{Key: cty.StringVal("hello!")}, + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "var": cty.ObjectVal(map[string]cty.Value{ + "boop": cty.MapVal(map[string]cty.Value{ + "hello!": cty.StringVal("bleurgh").Mark(marks.Sensitive), + }), + }), + }, + }, + }, + &Diagnostic{ + Severity: "error", + Summary: "Wrong noises", + Detail: "Biological sounds are not allowed", + Range: &DiagnosticRange{ + Filename: "test.tf", + Start: Pos{ + Line: 2, + Column: 9, + Byte: 42, + }, + End: Pos{ + Line: 2, + Column: 26, + Byte: 59, + }, + }, + Snippet: &DiagnosticSnippet{ + Context: strPtr(`resource "test_resource" "test"`), + Code: (` foo = var.boop["hello!"]`), + StartLine: (2), + HighlightStartOffset: (8), + HighlightEndOffset: (25), + Values: []DiagnosticExpressionValue{ + // The sensitive value is filtered out because this is + // not a sensitive-value-related diagnostic message. + }, + }, + }, + }, + "error with source code subject and expression referring to a collection containing a sensitive value": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Wrong noises", + Detail: "Biological sounds are not allowed", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 2, Column: 9, Byte: 42}, + End: hcl.Pos{Line: 2, Column: 26, Byte: 59}, + }, + Expression: hcltest.MockExprTraversal(hcl.Traversal{ + hcl.TraverseRoot{Name: "var"}, + hcl.TraverseAttr{Name: "boop"}, + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "var": cty.ObjectVal(map[string]cty.Value{ + "boop": cty.MapVal(map[string]cty.Value{ + "hello!": cty.StringVal("bleurgh").Mark(marks.Sensitive), + }), + }), + }, + }, + }, + &Diagnostic{ + Severity: "error", + Summary: "Wrong noises", + Detail: "Biological sounds are not allowed", + Range: &DiagnosticRange{ + Filename: "test.tf", + Start: Pos{ + Line: 2, + Column: 9, + Byte: 42, + }, + End: Pos{ + Line: 2, + Column: 26, + Byte: 59, + }, + }, + Snippet: &DiagnosticSnippet{ + Context: strPtr(`resource "test_resource" "test"`), + Code: (` foo = var.boop["hello!"]`), + StartLine: (2), + HighlightStartOffset: (8), + HighlightEndOffset: (25), + Values: []DiagnosticExpressionValue{ + { + Traversal: `var.boop`, + Statement: `is map of string with 1 element`, + }, + }, + }, + }, + }, + "error with source code subject and unknown string expression": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Wrong noises", + Detail: "Biological sounds are not allowed", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 2, Column: 9, Byte: 42}, + End: hcl.Pos{Line: 2, Column: 26, Byte: 59}, + }, + Expression: hcltest.MockExprTraversal(hcl.Traversal{ + hcl.TraverseRoot{Name: "var"}, + hcl.TraverseAttr{Name: "boop"}, + hcl.TraverseIndex{Key: cty.StringVal("hello!")}, + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "var": cty.ObjectVal(map[string]cty.Value{ + "boop": cty.MapVal(map[string]cty.Value{ + "hello!": cty.UnknownVal(cty.String), + }), + }), + }, + }, + Extra: diagnosticCausedByUnknown(true), + }, + &Diagnostic{ + Severity: "error", + Summary: "Wrong noises", + Detail: "Biological sounds are not allowed", + Range: &DiagnosticRange{ + Filename: "test.tf", + Start: Pos{ + Line: 2, + Column: 9, + Byte: 42, + }, + End: Pos{ + Line: 2, + Column: 26, + Byte: 59, + }, + }, + Snippet: &DiagnosticSnippet{ + Context: strPtr(`resource "test_resource" "test"`), + Code: (` foo = var.boop["hello!"]`), + StartLine: (2), + HighlightStartOffset: (8), + HighlightEndOffset: (25), + Values: []DiagnosticExpressionValue{ + { + Traversal: `var.boop["hello!"]`, + Statement: `is a string, known only after apply`, + }, + }, + }, + }, + }, + "error with source code subject and unknown expression of unknown type": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Wrong noises", + Detail: "Biological sounds are not allowed", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 2, Column: 9, Byte: 42}, + End: hcl.Pos{Line: 2, Column: 26, Byte: 59}, + }, + Expression: hcltest.MockExprTraversal(hcl.Traversal{ + hcl.TraverseRoot{Name: "var"}, + hcl.TraverseAttr{Name: "boop"}, + hcl.TraverseIndex{Key: cty.StringVal("hello!")}, + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "var": cty.ObjectVal(map[string]cty.Value{ + "boop": cty.MapVal(map[string]cty.Value{ + "hello!": cty.UnknownVal(cty.DynamicPseudoType), + }), + }), + }, + }, + Extra: diagnosticCausedByUnknown(true), + }, + &Diagnostic{ + Severity: "error", + Summary: "Wrong noises", + Detail: "Biological sounds are not allowed", + Range: &DiagnosticRange{ + Filename: "test.tf", + Start: Pos{ + Line: 2, + Column: 9, + Byte: 42, + }, + End: Pos{ + Line: 2, + Column: 26, + Byte: 59, + }, + }, + Snippet: &DiagnosticSnippet{ + Context: strPtr(`resource "test_resource" "test"`), + Code: (` foo = var.boop["hello!"]`), + StartLine: (2), + HighlightStartOffset: (8), + HighlightEndOffset: (25), + Values: []DiagnosticExpressionValue{ + { + Traversal: `var.boop["hello!"]`, + Statement: `will be known only after apply`, + }, + }, + }, + }, + }, + "error with source code subject and unknown expression of unknown type when not caused by unknown values": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Wrong noises", + Detail: "Biological sounds are not allowed", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 2, Column: 9, Byte: 42}, + End: hcl.Pos{Line: 2, Column: 26, Byte: 59}, + }, + Expression: hcltest.MockExprTraversal(hcl.Traversal{ + hcl.TraverseRoot{Name: "var"}, + hcl.TraverseAttr{Name: "boop"}, + hcl.TraverseIndex{Key: cty.StringVal("hello!")}, + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "var": cty.ObjectVal(map[string]cty.Value{ + "boop": cty.MapVal(map[string]cty.Value{ + "hello!": cty.UnknownVal(cty.DynamicPseudoType), + }), + }), + }, + }, + }, + &Diagnostic{ + Severity: "error", + Summary: "Wrong noises", + Detail: "Biological sounds are not allowed", + Range: &DiagnosticRange{ + Filename: "test.tf", + Start: Pos{ + Line: 2, + Column: 9, + Byte: 42, + }, + End: Pos{ + Line: 2, + Column: 26, + Byte: 59, + }, + }, + Snippet: &DiagnosticSnippet{ + Context: strPtr(`resource "test_resource" "test"`), + Code: (` foo = var.boop["hello!"]`), + StartLine: (2), + HighlightStartOffset: (8), + HighlightEndOffset: (25), + Values: []DiagnosticExpressionValue{ + // The unknown value is filtered out because this is + // not an unknown-value-related diagnostic message. + }, + }, + }, + }, + "error with source code subject with multiple expression values": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Catastrophic failure", + Detail: "Basically, everything went wrong", + Subject: &hcl.Range{ + Filename: "values.tf", + Start: hcl.Pos{Line: 1, Column: 1, Byte: 0}, + End: hcl.Pos{Line: 13, Column: 2, Byte: 102}, + }, + Expression: hcltest.MockExprList([]hcl.Expression{ + hcltest.MockExprTraversalSrc("var.a"), + hcltest.MockExprTraversalSrc("var.b"), + hcltest.MockExprTraversalSrc("var.c"), + hcltest.MockExprTraversalSrc("var.d"), + hcltest.MockExprTraversalSrc("var.e"), + hcltest.MockExprTraversalSrc("var.f"), + hcltest.MockExprTraversalSrc("var.g"), + hcltest.MockExprTraversalSrc("var.h"), + hcltest.MockExprTraversalSrc("var.i"), + hcltest.MockExprTraversalSrc("var.j"), + hcltest.MockExprTraversalSrc("var.k"), + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "var": cty.ObjectVal(map[string]cty.Value{ + "a": cty.True, + "b": cty.NumberFloatVal(123.45), + "c": cty.NullVal(cty.String), + "d": cty.StringVal("secret").Mark(marks.Sensitive), + "e": cty.False, + "f": cty.ListValEmpty(cty.String), + "g": cty.MapVal(map[string]cty.Value{ + "boop": cty.StringVal("beep"), + }), + "h": cty.ListVal([]cty.Value{ + cty.StringVal("boop"), + cty.StringVal("beep"), + cty.StringVal("blorp"), + }), + "i": cty.EmptyObjectVal, + "j": cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }), + "k": cty.ObjectVal(map[string]cty.Value{ + "a": cty.True, + "b": cty.False, + }), + }), + }, + }, + Extra: diagnosticCausedBySensitive(true), + }, + &Diagnostic{ + Severity: "error", + Summary: "Catastrophic failure", + Detail: "Basically, everything went wrong", + Range: &DiagnosticRange{ + Filename: "values.tf", + Start: Pos{ + Line: 1, + Column: 1, + Byte: 0, + }, + End: Pos{ + Line: 13, + Column: 2, + Byte: 102, + }, + }, + Snippet: &DiagnosticSnippet{ + Code: `[ + var.a, + var.b, + var.c, + var.d, + var.e, + var.f, + var.g, + var.h, + var.i, + var.j, + var.k, +]`, + StartLine: (1), + HighlightStartOffset: (0), + HighlightEndOffset: (102), + Values: []DiagnosticExpressionValue{ + { + Traversal: `var.a`, + Statement: `is true`, + }, + { + Traversal: `var.b`, + Statement: `is 123.45`, + }, + { + Traversal: `var.c`, + Statement: `is null`, + }, + { + Traversal: `var.d`, + Statement: `has a sensitive value`, + }, + { + Traversal: `var.e`, + Statement: `is false`, + }, + { + Traversal: `var.f`, + Statement: `is empty list of string`, + }, + { + Traversal: `var.g`, + Statement: `is map of string with 1 element`, + }, + { + Traversal: `var.h`, + Statement: `is list of string with 3 elements`, + }, + { + Traversal: `var.i`, + Statement: `is object with no attributes`, + }, + { + Traversal: `var.j`, + Statement: `is object with 1 attribute "foo"`, + }, + { + Traversal: `var.k`, + Statement: `is object with 2 attributes`, + }, + }, + }, + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + // Convert the diag into a tfdiags.Diagnostic + var diags tfdiags.Diagnostics + diags = diags.Append(tc.diag) + + got := NewDiagnostic(diags[0], sources) + if !cmp.Equal(tc.want, got) { + t.Fatalf("wrong result\n:%s", cmp.Diff(tc.want, got)) + } + }) + + t.Run(fmt.Sprintf("golden test for %s", name), func(t *testing.T) { + // Convert the diag into a tfdiags.Diagnostic + var diags tfdiags.Diagnostics + diags = diags.Append(tc.diag) + + got := NewDiagnostic(diags[0], sources) + + // Render the diagnostic to indented JSON + gotBytes, err := json.MarshalIndent(got, "", " ") + if err != nil { + t.Fatal(err) + } + + // Compare against the golden reference + filename := path.Join( + "testdata", + "diagnostic", + fmt.Sprintf("%s.json", strings.ReplaceAll(name, " ", "-")), + ) + + // Generate golden reference by uncommenting the next two lines: + // gotBytes = append(gotBytes, '\n') + // os.WriteFile(filename, gotBytes, 0644) + + wantFile, err := os.Open(filename) + if err != nil { + t.Fatalf("failed to open golden file: %s", err) + } + defer wantFile.Close() + wantBytes, err := io.ReadAll(wantFile) + if err != nil { + t.Fatalf("failed to read output file: %s", err) + } + + // Don't care about leading or trailing whitespace + gotString := normaliseNewlines(strings.TrimSpace(string(gotBytes))) + wantString := normaliseNewlines(strings.TrimSpace(string(wantBytes))) + + if !cmp.Equal(wantString, gotString) { + t.Fatalf("wrong result\n:%s", cmp.Diff(wantString, gotString)) + } + }) + } +} + +// Function to normalise newlines in a string for Windows +func normaliseNewlines(s string) string { + return strings.ReplaceAll(s, "\r\n", "\n") +} + +// Helper function to make constructing literal Diagnostics easier. There +// are fields which are pointer-to-string to ensure that the rendered JSON +// results in `null` for an empty value, rather than `""`. +func strPtr(s string) *string { return &s } + +// diagnosticCausedByUnknown is a testing helper for exercising our logic +// for selectively showing unknown values alongside our source snippets for +// diagnostics that are explicitly marked as being caused by unknown values. +type diagnosticCausedByUnknown bool + +var _ tfdiags.DiagnosticExtraBecauseUnknown = diagnosticCausedByUnknown(true) + +func (e diagnosticCausedByUnknown) DiagnosticCausedByUnknown() bool { + return bool(e) +} + +// diagnosticCausedBySensitive is a testing helper for exercising our logic +// for selectively showing sensitive values alongside our source snippets for +// diagnostics that are explicitly marked as being caused by sensitive values. +type diagnosticCausedBySensitive bool + +var _ tfdiags.DiagnosticExtraBecauseSensitive = diagnosticCausedBySensitive(true) + +func (e diagnosticCausedBySensitive) DiagnosticCausedBySensitive() bool { + return bool(e) +} diff --git a/pkg/command/views/json/function.go b/pkg/command/views/json/function.go new file mode 100644 index 00000000000..5cfe2f85b09 --- /dev/null +++ b/pkg/command/views/json/function.go @@ -0,0 +1,117 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package json + +import ( + "encoding/json" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// Function is a description of the JSON representation of the signature of +// a function callable from the OpenTofu language. +type Function struct { + // Name is the leaf name of the function, without any namespace prefix. + Name string `json:"name"` + + Params []FunctionParam `json:"params"` + VariadicParam *FunctionParam `json:"variadic_param,omitempty"` + + // ReturnType is type constraint which is a static approximation of the + // possibly-dynamic return type of the function. + ReturnType json.RawMessage `json:"return_type"` + + Description string `json:"description,omitempty"` + DescriptionKind string `json:"description_kind,omitempty"` +} + +// FunctionParam represents a single parameter to a function, as represented +// by type Function. +type FunctionParam struct { + // Name is a name for the function which is used primarily for + // documentation purposes, because function arguments are positional + // and therefore don't appear directly in configuration source code. + Name string `json:"name"` + + // Type is a type constraint which is a static approximation of the + // possibly-dynamic type of the parameter. Particular functions may + // have additional requirements that a type constraint alone cannot + // represent. + Type json.RawMessage `json:"type"` + + // Maybe some of the other fields in function.Parameter would be + // interesting to describe here too, but we'll wait to see if there + // is a use-case first. + + Description string `json:"description,omitempty"` + DescriptionKind string `json:"description_kind,omitempty"` +} + +// DescribeFunction returns a description of the signature of the given cty +// function, as a pointer to this package's serializable type Function. +func DescribeFunction(name string, f function.Function) *Function { + ret := &Function{ + Name: name, + } + + params := f.Params() + ret.Params = make([]FunctionParam, len(params)) + typeCheckArgs := make([]cty.Type, len(params), len(params)+1) + for i, param := range params { + ret.Params[i] = describeFunctionParam(¶m) + typeCheckArgs[i] = param.Type + } + if varParam := f.VarParam(); varParam != nil { + descParam := describeFunctionParam(varParam) + ret.VariadicParam = &descParam + typeCheckArgs = append(typeCheckArgs, varParam.Type) + } + + retType, err := f.ReturnType(typeCheckArgs) + if err != nil { + // Getting an error when type-checking with exactly the type constraints + // the function called for is weird, so we'll just treat it as if it + // has a dynamic return type instead, for our purposes here. + // One reason this can happen is for a function which has a variadic + // parameter but has logic inside it which considers it invalid to + // specify exactly one argument for that parameter (since that's what + // we did in typeCheckArgs as an approximation of a valid call above.) + retType = cty.DynamicPseudoType + } + + if raw, err := retType.MarshalJSON(); err != nil { + // Again, we'll treat any errors as if the function is dynamically + // typed because it would be weird to get here. + ret.ReturnType = json.RawMessage(`"dynamic"`) + } else { + ret.ReturnType = json.RawMessage(raw) + } + + // We don't currently have any sense of descriptions for functions and + // their parameters, so we'll just leave those fields unpopulated for now. + + return ret +} + +func describeFunctionParam(p *function.Parameter) FunctionParam { + ret := FunctionParam{ + Name: p.Name, + } + + if raw, err := p.Type.MarshalJSON(); err != nil { + // We'll treat any errors as if the function is dynamically + // typed because it would be weird to get here. + ret.Type = json.RawMessage(`"dynamic"`) + } else { + ret.Type = json.RawMessage(raw) + } + + // We don't currently have any sense of descriptions for functions and + // their parameters, so we'll just leave those fields unpopulated for now. + + return ret +} diff --git a/pkg/command/views/json/function_test.go b/pkg/command/views/json/function_test.go new file mode 100644 index 00000000000..b029ac32d7f --- /dev/null +++ b/pkg/command/views/json/function_test.go @@ -0,0 +1,97 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package json + +import ( + "encoding/json" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/function/stdlib" +) + +func TestDescribeFunction(t *testing.T) { + // NOTE: This test case is referring to some real functions in other + // packages. and so if those functions change signature later it will + // probably make some cases here fail. If that is the cause of the failure, + // it's fine to update the test here to match rather than to revert the + // change to the function signature, as long as the change to the + // function signature is otherwise within the bounds of our compatibility + // promises. + + tests := map[string]struct { + Function function.Function + Want *Function + }{ + "upper": { + Function: stdlib.UpperFunc, + Want: &Function{ + Name: "upper", + Params: []FunctionParam{ + { + Name: "str", + Type: json.RawMessage(`"string"`), + }, + }, + ReturnType: json.RawMessage(`"string"`), + }, + }, + "coalesce": { + Function: stdlib.CoalesceFunc, + Want: &Function{ + Name: "coalesce", + Params: []FunctionParam{}, + VariadicParam: &FunctionParam{ + Name: "vals", + Type: json.RawMessage(`"dynamic"`), + }, + ReturnType: json.RawMessage(`"dynamic"`), + }, + }, + "join": { + Function: stdlib.JoinFunc, + Want: &Function{ + Name: "join", + Params: []FunctionParam{ + { + Name: "separator", + Type: json.RawMessage(`"string"`), + }, + }, + VariadicParam: &FunctionParam{ + Name: "lists", + Type: json.RawMessage(`["list","string"]`), + }, + ReturnType: json.RawMessage(`"string"`), + }, + }, + "jsonencode": { + Function: stdlib.JSONEncodeFunc, + Want: &Function{ + Name: "jsonencode", + Params: []FunctionParam{ + { + Name: "val", + Type: json.RawMessage(`"dynamic"`), + }, + }, + ReturnType: json.RawMessage(`"string"`), + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + got := DescribeFunction(name, test.Function) + want := test.Want + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + } +} diff --git a/pkg/command/views/json/hook.go b/pkg/command/views/json/hook.go new file mode 100644 index 00000000000..9db0ee44e2f --- /dev/null +++ b/pkg/command/views/json/hook.go @@ -0,0 +1,387 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package json + +import ( + "fmt" + "time" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/plans" +) + +type Hook interface { + HookType() MessageType + String() string +} + +// ApplyStart: triggered by PreApply hook +type applyStart struct { + Resource ResourceAddr `json:"resource"` + Action ChangeAction `json:"action"` + IDKey string `json:"id_key,omitempty"` + IDValue string `json:"id_value,omitempty"` + actionVerb string +} + +var _ Hook = (*applyStart)(nil) + +func (h *applyStart) HookType() MessageType { + return MessageApplyStart +} + +func (h *applyStart) String() string { + var id string + if h.IDKey != "" && h.IDValue != "" { + id = fmt.Sprintf(" [%s=%s]", h.IDKey, h.IDValue) + } + return fmt.Sprintf("%s: %s...%s", h.Resource.Addr, h.actionVerb, id) +} + +func NewApplyStart(addr addrs.AbsResourceInstance, action plans.Action, idKey string, idValue string) Hook { + hook := &applyStart{ + Resource: newResourceAddr(addr), + Action: changeAction(action), + IDKey: idKey, + IDValue: idValue, + actionVerb: startActionVerb(action), + } + + return hook +} + +// ApplyProgress: currently triggered by a timer started on PreApply. In +// future, this might also be triggered by provider progress reporting. +type applyProgress struct { + Resource ResourceAddr `json:"resource"` + Action ChangeAction `json:"action"` + Elapsed float64 `json:"elapsed_seconds"` + actionVerb string + elapsed time.Duration +} + +var _ Hook = (*applyProgress)(nil) + +func (h *applyProgress) HookType() MessageType { + return MessageApplyProgress +} + +func (h *applyProgress) String() string { + return fmt.Sprintf("%s: Still %s... [%s elapsed]", h.Resource.Addr, h.actionVerb, h.elapsed) +} + +func NewApplyProgress(addr addrs.AbsResourceInstance, action plans.Action, elapsed time.Duration) Hook { + return &applyProgress{ + Resource: newResourceAddr(addr), + Action: changeAction(action), + Elapsed: elapsed.Seconds(), + actionVerb: progressActionVerb(action), + elapsed: elapsed, + } +} + +// ApplyComplete: triggered by PostApply hook +type applyComplete struct { + Resource ResourceAddr `json:"resource"` + Action ChangeAction `json:"action"` + IDKey string `json:"id_key,omitempty"` + IDValue string `json:"id_value,omitempty"` + Elapsed float64 `json:"elapsed_seconds"` + actionNoun string + elapsed time.Duration +} + +var _ Hook = (*applyComplete)(nil) + +func (h *applyComplete) HookType() MessageType { + return MessageApplyComplete +} + +func (h *applyComplete) String() string { + var id string + if h.IDKey != "" && h.IDValue != "" { + id = fmt.Sprintf(" [%s=%s]", h.IDKey, h.IDValue) + } + return fmt.Sprintf("%s: %s complete after %s%s", h.Resource.Addr, h.actionNoun, h.elapsed, id) +} + +func NewApplyComplete(addr addrs.AbsResourceInstance, action plans.Action, idKey, idValue string, elapsed time.Duration) Hook { + return &applyComplete{ + Resource: newResourceAddr(addr), + Action: changeAction(action), + IDKey: idKey, + IDValue: idValue, + Elapsed: elapsed.Seconds(), + actionNoun: actionNoun(action), + elapsed: elapsed, + } +} + +// ApplyErrored: triggered by PostApply hook on failure. This will be followed +// by diagnostics when the apply finishes. +type applyErrored struct { + Resource ResourceAddr `json:"resource"` + Action ChangeAction `json:"action"` + Elapsed float64 `json:"elapsed_seconds"` + actionNoun string + elapsed time.Duration +} + +var _ Hook = (*applyErrored)(nil) + +func (h *applyErrored) HookType() MessageType { + return MessageApplyErrored +} + +func (h *applyErrored) String() string { + return fmt.Sprintf("%s: %s errored after %s", h.Resource.Addr, h.actionNoun, h.elapsed) +} + +func NewApplyErrored(addr addrs.AbsResourceInstance, action plans.Action, elapsed time.Duration) Hook { + return &applyErrored{ + Resource: newResourceAddr(addr), + Action: changeAction(action), + Elapsed: elapsed.Seconds(), + actionNoun: actionNoun(action), + elapsed: elapsed, + } +} + +// ProvisionStart: triggered by PreProvisionInstanceStep hook +type provisionStart struct { + Resource ResourceAddr `json:"resource"` + Provisioner string `json:"provisioner"` +} + +var _ Hook = (*provisionStart)(nil) + +func (h *provisionStart) HookType() MessageType { + return MessageProvisionStart +} + +func (h *provisionStart) String() string { + return fmt.Sprintf("%s: Provisioning with '%s'...", h.Resource.Addr, h.Provisioner) +} + +func NewProvisionStart(addr addrs.AbsResourceInstance, provisioner string) Hook { + return &provisionStart{ + Resource: newResourceAddr(addr), + Provisioner: provisioner, + } +} + +// ProvisionProgress: triggered by ProvisionOutput hook +type provisionProgress struct { + Resource ResourceAddr `json:"resource"` + Provisioner string `json:"provisioner"` + Output string `json:"output"` +} + +var _ Hook = (*provisionProgress)(nil) + +func (h *provisionProgress) HookType() MessageType { + return MessageProvisionProgress +} + +func (h *provisionProgress) String() string { + return fmt.Sprintf("%s: (%s): %s", h.Resource.Addr, h.Provisioner, h.Output) +} + +func NewProvisionProgress(addr addrs.AbsResourceInstance, provisioner string, output string) Hook { + return &provisionProgress{ + Resource: newResourceAddr(addr), + Provisioner: provisioner, + Output: output, + } +} + +// ProvisionComplete: triggered by PostProvisionInstanceStep hook +type provisionComplete struct { + Resource ResourceAddr `json:"resource"` + Provisioner string `json:"provisioner"` +} + +var _ Hook = (*provisionComplete)(nil) + +func (h *provisionComplete) HookType() MessageType { + return MessageProvisionComplete +} + +func (h *provisionComplete) String() string { + return fmt.Sprintf("%s: (%s) Provisioning complete", h.Resource.Addr, h.Provisioner) +} + +func NewProvisionComplete(addr addrs.AbsResourceInstance, provisioner string) Hook { + return &provisionComplete{ + Resource: newResourceAddr(addr), + Provisioner: provisioner, + } +} + +// ProvisionErrored: triggered by PostProvisionInstanceStep hook on failure. +// This will be followed by diagnostics when the apply finishes. +type provisionErrored struct { + Resource ResourceAddr `json:"resource"` + Provisioner string `json:"provisioner"` +} + +var _ Hook = (*provisionErrored)(nil) + +func (h *provisionErrored) HookType() MessageType { + return MessageProvisionErrored +} + +func (h *provisionErrored) String() string { + return fmt.Sprintf("%s: (%s) Provisioning errored", h.Resource.Addr, h.Provisioner) +} + +func NewProvisionErrored(addr addrs.AbsResourceInstance, provisioner string) Hook { + return &provisionErrored{ + Resource: newResourceAddr(addr), + Provisioner: provisioner, + } +} + +// RefreshStart: triggered by PreRefresh hook +type refreshStart struct { + Resource ResourceAddr `json:"resource"` + IDKey string `json:"id_key,omitempty"` + IDValue string `json:"id_value,omitempty"` +} + +var _ Hook = (*refreshStart)(nil) + +func (h *refreshStart) HookType() MessageType { + return MessageRefreshStart +} + +func (h *refreshStart) String() string { + var id string + if h.IDKey != "" && h.IDValue != "" { + id = fmt.Sprintf(" [%s=%s]", h.IDKey, h.IDValue) + } + return fmt.Sprintf("%s: Refreshing state...%s", h.Resource.Addr, id) +} + +func NewRefreshStart(addr addrs.AbsResourceInstance, idKey, idValue string) Hook { + return &refreshStart{ + Resource: newResourceAddr(addr), + IDKey: idKey, + IDValue: idValue, + } +} + +// RefreshComplete: triggered by PostRefresh hook +type refreshComplete struct { + Resource ResourceAddr `json:"resource"` + IDKey string `json:"id_key,omitempty"` + IDValue string `json:"id_value,omitempty"` +} + +var _ Hook = (*refreshComplete)(nil) + +func (h *refreshComplete) HookType() MessageType { + return MessageRefreshComplete +} + +func (h *refreshComplete) String() string { + var id string + if h.IDKey != "" && h.IDValue != "" { + id = fmt.Sprintf(" [%s=%s]", h.IDKey, h.IDValue) + } + return fmt.Sprintf("%s: Refresh complete%s", h.Resource.Addr, id) +} + +func NewRefreshComplete(addr addrs.AbsResourceInstance, idKey, idValue string) Hook { + return &refreshComplete{ + Resource: newResourceAddr(addr), + IDKey: idKey, + IDValue: idValue, + } +} + +// Convert the subset of plans.Action values we expect to receive into a +// present-tense verb for the applyStart hook message. +func startActionVerb(action plans.Action) string { + switch action { + case plans.Create: + return "Creating" + case plans.Update: + return "Modifying" + case plans.Delete: + return "Destroying" + case plans.Read: + return "Refreshing" + case plans.CreateThenDelete, plans.DeleteThenCreate: + // This is not currently possible to reach, as we receive separate + // passes for create and delete + return "Replacing" + case plans.Forget: + return "Removing" + case plans.NoOp: + // This should never be possible: a no-op planned change should not + // be applied. We'll fall back to "Applying". + fallthrough + default: + return "Applying" + } +} + +// Convert the subset of plans.Action values we expect to receive into a +// present-tense verb for the applyProgress hook message. This will be +// prefixed with "Still ", so it is lower-case. +func progressActionVerb(action plans.Action) string { + switch action { + case plans.Create: + return "creating" + case plans.Update: + return "modifying" + case plans.Delete: + return "destroying" + case plans.Read: + return "refreshing" + case plans.CreateThenDelete, plans.DeleteThenCreate: + // This is not currently possible to reach, as we receive separate + // passes for create and delete + return "replacing" + case plans.Forget: + return "removing" + case plans.NoOp: + // This should never be possible: a no-op planned change should not + // be applied. We'll fall back to "applying". + fallthrough + default: + return "applying" + } +} + +// Convert the subset of plans.Action values we expect to receive into a +// noun for the applyComplete and applyErrored hook messages. This will be +// combined into a phrase like "Creation complete after 1m4s". +func actionNoun(action plans.Action) string { + switch action { + case plans.Create: + return "Creation" + case plans.Update: + return "Modifications" + case plans.Delete: + return "Destruction" + case plans.Read: + return "Refresh" + case plans.CreateThenDelete, plans.DeleteThenCreate: + // This is not currently possible to reach, as we receive separate + // passes for create and delete + return "Replacement" + case plans.Forget: + return "Removal" + case plans.NoOp: + // This should never be possible: a no-op planned change should not + // be applied. We'll fall back to "Apply". + fallthrough + default: + return "Apply" + } +} diff --git a/pkg/command/views/json/importing.go b/pkg/command/views/json/importing.go new file mode 100644 index 00000000000..cbf5322ec65 --- /dev/null +++ b/pkg/command/views/json/importing.go @@ -0,0 +1,18 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package json + +// Importing contains metadata about a resource change that includes an import +// action. +// +// Every field in here should be treated as optional as future versions do not +// make a guarantee that they will retain the format of this change. +// +// Consumers should be capable of rendering/parsing the Importing struct even +// if it does not have the ID field set. +type Importing struct { + ID string `json:"id,omitempty"` +} diff --git a/pkg/command/views/json/message_types.go b/pkg/command/views/json/message_types.go new file mode 100644 index 00000000000..5eccdc63a59 --- /dev/null +++ b/pkg/command/views/json/message_types.go @@ -0,0 +1,43 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package json + +type MessageType string + +const ( + // Generic messages + MessageVersion MessageType = "version" + MessageLog MessageType = "log" + MessageDiagnostic MessageType = "diagnostic" + + // Operation results + MessageResourceDrift MessageType = "resource_drift" + MessagePlannedChange MessageType = "planned_change" + MessageChangeSummary MessageType = "change_summary" + MessageOutputs MessageType = "outputs" + + // Hook-driven messages + MessageApplyStart MessageType = "apply_start" + MessageApplyProgress MessageType = "apply_progress" + MessageApplyComplete MessageType = "apply_complete" + MessageApplyErrored MessageType = "apply_errored" + MessageProvisionStart MessageType = "provision_start" + MessageProvisionProgress MessageType = "provision_progress" + MessageProvisionComplete MessageType = "provision_complete" + MessageProvisionErrored MessageType = "provision_errored" + MessageRefreshStart MessageType = "refresh_start" + MessageRefreshComplete MessageType = "refresh_complete" + + // Test messages + MessageTestAbstract MessageType = "test_abstract" + MessageTestFile MessageType = "test_file" + MessageTestRun MessageType = "test_run" + MessageTestPlan MessageType = "test_plan" + MessageTestState MessageType = "test_state" + MessageTestSummary MessageType = "test_summary" + MessageTestCleanup MessageType = "test_cleanup" + MessageTestInterrupt MessageType = "test_interrupt" +) diff --git a/pkg/command/views/json/output.go b/pkg/command/views/json/output.go new file mode 100644 index 00000000000..1b3e975eee0 --- /dev/null +++ b/pkg/command/views/json/output.go @@ -0,0 +1,80 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package json + +import ( + "encoding/json" + "fmt" + + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +type Output struct { + Sensitive bool `json:"sensitive"` + Type json.RawMessage `json:"type,omitempty"` + Value json.RawMessage `json:"value,omitempty"` + Action ChangeAction `json:"action,omitempty"` +} + +type Outputs map[string]Output + +func OutputsFromMap(outputValues map[string]*states.OutputValue) (Outputs, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + outputs := make(map[string]Output, len(outputValues)) + + for name, ov := range outputValues { + unmarked, _ := ov.Value.UnmarkDeep() + value, err := ctyjson.Marshal(unmarked, unmarked.Type()) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + fmt.Sprintf("Error serializing output %q", name), + fmt.Sprintf("Error: %s", err), + )) + return nil, diags + } + valueType, err := ctyjson.MarshalType(unmarked.Type()) + if err != nil { + diags = diags.Append(err) + return nil, diags + } + + var redactedValue json.RawMessage + if !ov.Sensitive { + redactedValue = json.RawMessage(value) + } + + outputs[name] = Output{ + Sensitive: ov.Sensitive, + Type: json.RawMessage(valueType), + Value: redactedValue, + } + } + + return outputs, nil +} + +func OutputsFromChanges(changes []*plans.OutputChangeSrc) Outputs { + outputs := make(map[string]Output, len(changes)) + + for _, change := range changes { + outputs[change.Addr.OutputValue.Name] = Output{ + Sensitive: change.Sensitive, + Action: changeAction(change.Action), + } + } + + return outputs +} + +func (o Outputs) String() string { + return fmt.Sprintf("Outputs: %d", len(o)) +} diff --git a/pkg/command/views/json/output_test.go b/pkg/command/views/json/output_test.go new file mode 100644 index 00000000000..2d1143e0ccb --- /dev/null +++ b/pkg/command/views/json/output_test.go @@ -0,0 +1,185 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package json + +import ( + "encoding/json" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/states" + "github.com/zclconf/go-cty/cty" +) + +func TestOutputsFromMap(t *testing.T) { + got, diags := OutputsFromMap(map[string]*states.OutputValue{ + // Normal non-sensitive output + "boop": { + Value: cty.NumberIntVal(1234), + }, + // Sensitive string output + "beep": { + Value: cty.StringVal("horse-battery").Mark(marks.Sensitive), + Sensitive: true, + }, + // Sensitive object output which is marked at the leaf + "blorp": { + Value: cty.ObjectVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "b": cty.ObjectVal(map[string]cty.Value{ + "c": cty.StringVal("oh, hi").Mark(marks.Sensitive), + }), + }), + }), + Sensitive: true, + }, + // Null value + "honk": { + Value: cty.NullVal(cty.Map(cty.Bool)), + }, + }) + if len(diags) > 0 { + t.Fatal(diags.Err()) + } + + want := Outputs{ + "boop": { + Sensitive: false, + Type: json.RawMessage(`"number"`), + Value: json.RawMessage(`1234`), + }, + "beep": { + Sensitive: true, + Type: json.RawMessage(`"string"`), + }, + "blorp": { + Sensitive: true, + Type: json.RawMessage(`["object",{"a":["object",{"b":["object",{"c":"string"}]}]}]`), + }, + "honk": { + Sensitive: false, + Type: json.RawMessage(`["map","bool"]`), + Value: json.RawMessage(`null`), + }, + } + + if !cmp.Equal(want, got) { + t.Fatalf("unexpected result\n%s", cmp.Diff(want, got)) + } +} + +func TestOutputsFromChanges(t *testing.T) { + root := addrs.RootModuleInstance + num, err := plans.NewDynamicValue(cty.NumberIntVal(1234), cty.Number) + if err != nil { + t.Fatalf("unexpected error creating dynamic value: %v", err) + } + str, err := plans.NewDynamicValue(cty.StringVal("1234"), cty.String) + if err != nil { + t.Fatalf("unexpected error creating dynamic value: %v", err) + } + + got := OutputsFromChanges([]*plans.OutputChangeSrc{ + // Unchanged output "boop", value 1234 + { + Addr: root.OutputValue("boop"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.NoOp, + Before: num, + After: num, + }, + Sensitive: false, + }, + // New output "beep", value 1234 + { + Addr: root.OutputValue("beep"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Create, + Before: nil, + After: num, + }, + Sensitive: false, + }, + // Deleted output "blorp", prior value 1234 + { + Addr: root.OutputValue("blorp"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Delete, + Before: num, + After: nil, + }, + Sensitive: false, + }, + // Updated output "honk", prior value 1234, new value "1234" + { + Addr: root.OutputValue("honk"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Update, + Before: num, + After: str, + }, + Sensitive: false, + }, + // New sensitive output "secret", value "1234" + { + Addr: root.OutputValue("secret"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Create, + Before: nil, + After: str, + }, + Sensitive: true, + }, + }) + + want := Outputs{ + "boop": { + Action: "noop", + Sensitive: false, + }, + "beep": { + Action: "create", + Sensitive: false, + }, + "blorp": { + Action: "delete", + Sensitive: false, + }, + "honk": { + Action: "update", + Sensitive: false, + }, + "secret": { + Action: "create", + Sensitive: true, + }, + } + + if !cmp.Equal(want, got) { + t.Fatalf("unexpected result\n%s", cmp.Diff(want, got)) + } +} + +func TestOutputs_String(t *testing.T) { + outputs := Outputs{ + "boop": { + Sensitive: false, + Type: json.RawMessage(`"number"`), + Value: json.RawMessage(`1234`), + }, + "beep": { + Sensitive: true, + Type: json.RawMessage(`"string"`), + Value: json.RawMessage(`"horse-battery"`), + }, + } + if got, want := outputs.String(), "Outputs: 2"; got != want { + t.Fatalf("unexpected value\n got: %q\nwant: %q", got, want) + } +} diff --git a/pkg/command/views/json/resource_addr.go b/pkg/command/views/json/resource_addr.go new file mode 100644 index 00000000000..c1223396116 --- /dev/null +++ b/pkg/command/views/json/resource_addr.go @@ -0,0 +1,39 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package json + +import ( + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/kubegems/opentofu/pkg/addrs" +) + +type ResourceAddr struct { + Addr string `json:"addr"` + Module string `json:"module"` + Resource string `json:"resource"` + ImpliedProvider string `json:"implied_provider"` + ResourceType string `json:"resource_type"` + ResourceName string `json:"resource_name"` + ResourceKey ctyjson.SimpleJSONValue `json:"resource_key"` +} + +func newResourceAddr(addr addrs.AbsResourceInstance) ResourceAddr { + resourceKey := ctyjson.SimpleJSONValue{Value: cty.NilVal} + if addr.Resource.Key != nil { + resourceKey.Value = addr.Resource.Key.Value() + } + return ResourceAddr{ + Addr: addr.String(), + Module: addr.Module.String(), + Resource: addr.Resource.String(), + ImpliedProvider: addr.Resource.Resource.ImpliedProvider(), + ResourceType: addr.Resource.Resource.Type, + ResourceName: addr.Resource.Resource.Name, + ResourceKey: resourceKey, + } +} diff --git a/pkg/command/views/json/test.go b/pkg/command/views/json/test.go new file mode 100644 index 00000000000..9ae8bc54d7d --- /dev/null +++ b/pkg/command/views/json/test.go @@ -0,0 +1,54 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package json + +import ( + "strings" + + "github.com/kubegems/opentofu/pkg/moduletest" +) + +type TestSuiteAbstract map[string][]string + +type TestStatus string + +type TestFileStatus struct { + Path string `json:"path"` + Status TestStatus `json:"status"` +} + +type TestRunStatus struct { + Path string `json:"path"` + Run string `json:"run"` + Status TestStatus `json:"status"` +} + +type TestSuiteSummary struct { + Status TestStatus `json:"status"` + Passed int `json:"passed"` + Failed int `json:"failed"` + Errored int `json:"errored"` + Skipped int `json:"skipped"` +} + +type TestFileCleanup struct { + FailedResources []TestFailedResource `json:"failed_resources"` +} + +type TestFailedResource struct { + Instance string `json:"instance"` + DeposedKey string `json:"deposed_key,omitempty"` +} + +type TestFatalInterrupt struct { + State []TestFailedResource `json:"state,omitempty"` + States map[string][]TestFailedResource `json:"states,omitempty"` + Planned []string `json:"planned,omitempty"` +} + +func ToTestStatus(status moduletest.Status) TestStatus { + return TestStatus(strings.ToLower(status.String())) +} diff --git a/pkg/command/views/json/testdata/diagnostic/error-whose-range-starts-at-a-newline.json b/pkg/command/views/json/testdata/diagnostic/error-whose-range-starts-at-a-newline.json new file mode 100644 index 00000000000..27c9d1131bf --- /dev/null +++ b/pkg/command/views/json/testdata/diagnostic/error-whose-range-starts-at-a-newline.json @@ -0,0 +1,26 @@ +{ + "severity": "error", + "summary": "Invalid newline", + "detail": "How awkward!", + "range": { + "filename": "odd-comment.tf", + "start": { + "line": 2, + "column": 5, + "byte": 4 + }, + "end": { + "line": 3, + "column": 1, + "byte": 6 + } + }, + "snippet": { + "context": null, + "code": "#", + "start_line": 2, + "highlight_start_offset": 0, + "highlight_end_offset": 1, + "values": [] + } +} \ No newline at end of file diff --git a/pkg/command/views/json/testdata/diagnostic/error-with-empty-highlight-range-at-end-of-source-code.json b/pkg/command/views/json/testdata/diagnostic/error-with-empty-highlight-range-at-end-of-source-code.json new file mode 100644 index 00000000000..e8cd00991b6 --- /dev/null +++ b/pkg/command/views/json/testdata/diagnostic/error-with-empty-highlight-range-at-end-of-source-code.json @@ -0,0 +1,26 @@ +{ + "severity": "error", + "summary": "You forgot something", + "detail": "Please finish your thought", + "range": { + "filename": "short.tf", + "start": { + "line": 1, + "column": 16, + "byte": 15 + }, + "end": { + "line": 1, + "column": 17, + "byte": 16 + } + }, + "snippet": { + "context": null, + "code": "bad source code", + "start_line": 1, + "highlight_start_offset": 15, + "highlight_end_offset": 15, + "values": [] + } +} diff --git a/pkg/command/views/json/testdata/diagnostic/error-with-multi-line-snippet.json b/pkg/command/views/json/testdata/diagnostic/error-with-multi-line-snippet.json new file mode 100644 index 00000000000..5acb67d0834 --- /dev/null +++ b/pkg/command/views/json/testdata/diagnostic/error-with-multi-line-snippet.json @@ -0,0 +1,26 @@ +{ + "severity": "error", + "summary": "In this house we respect booleans", + "detail": "True or false, there is no maybe", + "range": { + "filename": "test.tf", + "start": { + "line": 4, + "column": 11, + "byte": 81 + }, + "end": { + "line": 4, + "column": 16, + "byte": 86 + } + }, + "snippet": { + "context": "resource \"test_resource\" \"test\"", + "code": " bar = {\n baz = maybe\n }", + "start_line": 3, + "highlight_start_offset": 20, + "highlight_end_offset": 25, + "values": [] + } +} diff --git a/pkg/command/views/json/testdata/diagnostic/error-with-source-code-subject-and-expression-referring-to-a-collection-containing-a-sensitive-value.json b/pkg/command/views/json/testdata/diagnostic/error-with-source-code-subject-and-expression-referring-to-a-collection-containing-a-sensitive-value.json new file mode 100644 index 00000000000..e6599faf5a6 --- /dev/null +++ b/pkg/command/views/json/testdata/diagnostic/error-with-source-code-subject-and-expression-referring-to-a-collection-containing-a-sensitive-value.json @@ -0,0 +1,31 @@ +{ + "severity": "error", + "summary": "Wrong noises", + "detail": "Biological sounds are not allowed", + "range": { + "filename": "test.tf", + "start": { + "line": 2, + "column": 9, + "byte": 42 + }, + "end": { + "line": 2, + "column": 26, + "byte": 59 + } + }, + "snippet": { + "context": "resource \"test_resource\" \"test\"", + "code": " foo = var.boop[\"hello!\"]", + "start_line": 2, + "highlight_start_offset": 8, + "highlight_end_offset": 25, + "values": [ + { + "traversal": "var.boop", + "statement": "is map of string with 1 element" + } + ] + } +} diff --git a/pkg/command/views/json/testdata/diagnostic/error-with-source-code-subject-and-expression-referring-to-sensitive-value-when-not-caused-by-sensitive-values.json b/pkg/command/views/json/testdata/diagnostic/error-with-source-code-subject-and-expression-referring-to-sensitive-value-when-not-caused-by-sensitive-values.json new file mode 100644 index 00000000000..8d3625dbe78 --- /dev/null +++ b/pkg/command/views/json/testdata/diagnostic/error-with-source-code-subject-and-expression-referring-to-sensitive-value-when-not-caused-by-sensitive-values.json @@ -0,0 +1,26 @@ +{ + "severity": "error", + "summary": "Wrong noises", + "detail": "Biological sounds are not allowed", + "range": { + "filename": "test.tf", + "start": { + "line": 2, + "column": 9, + "byte": 42 + }, + "end": { + "line": 2, + "column": 26, + "byte": 59 + } + }, + "snippet": { + "context": "resource \"test_resource\" \"test\"", + "code": " foo = var.boop[\"hello!\"]", + "start_line": 2, + "highlight_start_offset": 8, + "highlight_end_offset": 25, + "values": [] + } +} diff --git a/pkg/command/views/json/testdata/diagnostic/error-with-source-code-subject-and-expression-referring-to-sensitive-value.json b/pkg/command/views/json/testdata/diagnostic/error-with-source-code-subject-and-expression-referring-to-sensitive-value.json new file mode 100644 index 00000000000..abffcdb33e8 --- /dev/null +++ b/pkg/command/views/json/testdata/diagnostic/error-with-source-code-subject-and-expression-referring-to-sensitive-value.json @@ -0,0 +1,31 @@ +{ + "severity": "error", + "summary": "Wrong noises", + "detail": "Biological sounds are not allowed", + "range": { + "filename": "test.tf", + "start": { + "line": 2, + "column": 9, + "byte": 42 + }, + "end": { + "line": 2, + "column": 26, + "byte": 59 + } + }, + "snippet": { + "context": "resource \"test_resource\" \"test\"", + "code": " foo = var.boop[\"hello!\"]", + "start_line": 2, + "highlight_start_offset": 8, + "highlight_end_offset": 25, + "values": [ + { + "traversal": "var.boop[\"hello!\"]", + "statement": "has a sensitive value" + } + ] + } +} diff --git a/pkg/command/views/json/testdata/diagnostic/error-with-source-code-subject-and-known-expression.json b/pkg/command/views/json/testdata/diagnostic/error-with-source-code-subject-and-known-expression.json new file mode 100644 index 00000000000..dde961cdf51 --- /dev/null +++ b/pkg/command/views/json/testdata/diagnostic/error-with-source-code-subject-and-known-expression.json @@ -0,0 +1,31 @@ +{ + "severity": "error", + "summary": "Wrong noises", + "detail": "Biological sounds are not allowed", + "range": { + "filename": "test.tf", + "start": { + "line": 2, + "column": 9, + "byte": 42 + }, + "end": { + "line": 2, + "column": 26, + "byte": 59 + } + }, + "snippet": { + "context": "resource \"test_resource\" \"test\"", + "code": " foo = var.boop[\"hello!\"]", + "start_line": 2, + "highlight_start_offset": 8, + "highlight_end_offset": 25, + "values": [ + { + "traversal": "var.boop[\"hello!\"]", + "statement": "is \"bleurgh\"" + } + ] + } +} diff --git a/pkg/command/views/json/testdata/diagnostic/error-with-source-code-subject-and-unknown-expression-of-unknown-type-when-not-caused-by-unknown-values.json b/pkg/command/views/json/testdata/diagnostic/error-with-source-code-subject-and-unknown-expression-of-unknown-type-when-not-caused-by-unknown-values.json new file mode 100644 index 00000000000..8d3625dbe78 --- /dev/null +++ b/pkg/command/views/json/testdata/diagnostic/error-with-source-code-subject-and-unknown-expression-of-unknown-type-when-not-caused-by-unknown-values.json @@ -0,0 +1,26 @@ +{ + "severity": "error", + "summary": "Wrong noises", + "detail": "Biological sounds are not allowed", + "range": { + "filename": "test.tf", + "start": { + "line": 2, + "column": 9, + "byte": 42 + }, + "end": { + "line": 2, + "column": 26, + "byte": 59 + } + }, + "snippet": { + "context": "resource \"test_resource\" \"test\"", + "code": " foo = var.boop[\"hello!\"]", + "start_line": 2, + "highlight_start_offset": 8, + "highlight_end_offset": 25, + "values": [] + } +} diff --git a/pkg/command/views/json/testdata/diagnostic/error-with-source-code-subject-and-unknown-expression-of-unknown-type.json b/pkg/command/views/json/testdata/diagnostic/error-with-source-code-subject-and-unknown-expression-of-unknown-type.json new file mode 100644 index 00000000000..d8aa3c4ca8b --- /dev/null +++ b/pkg/command/views/json/testdata/diagnostic/error-with-source-code-subject-and-unknown-expression-of-unknown-type.json @@ -0,0 +1,31 @@ +{ + "severity": "error", + "summary": "Wrong noises", + "detail": "Biological sounds are not allowed", + "range": { + "filename": "test.tf", + "start": { + "line": 2, + "column": 9, + "byte": 42 + }, + "end": { + "line": 2, + "column": 26, + "byte": 59 + } + }, + "snippet": { + "context": "resource \"test_resource\" \"test\"", + "code": " foo = var.boop[\"hello!\"]", + "start_line": 2, + "highlight_start_offset": 8, + "highlight_end_offset": 25, + "values": [ + { + "traversal": "var.boop[\"hello!\"]", + "statement": "will be known only after apply" + } + ] + } +} diff --git a/pkg/command/views/json/testdata/diagnostic/error-with-source-code-subject-and-unknown-string-expression.json b/pkg/command/views/json/testdata/diagnostic/error-with-source-code-subject-and-unknown-string-expression.json new file mode 100644 index 00000000000..8255af8f6ea --- /dev/null +++ b/pkg/command/views/json/testdata/diagnostic/error-with-source-code-subject-and-unknown-string-expression.json @@ -0,0 +1,31 @@ +{ + "severity": "error", + "summary": "Wrong noises", + "detail": "Biological sounds are not allowed", + "range": { + "filename": "test.tf", + "start": { + "line": 2, + "column": 9, + "byte": 42 + }, + "end": { + "line": 2, + "column": 26, + "byte": 59 + } + }, + "snippet": { + "context": "resource \"test_resource\" \"test\"", + "code": " foo = var.boop[\"hello!\"]", + "start_line": 2, + "highlight_start_offset": 8, + "highlight_end_offset": 25, + "values": [ + { + "traversal": "var.boop[\"hello!\"]", + "statement": "is a string, known only after apply" + } + ] + } +} diff --git a/pkg/command/views/json/testdata/diagnostic/error-with-source-code-subject-but-no-context.json b/pkg/command/views/json/testdata/diagnostic/error-with-source-code-subject-but-no-context.json new file mode 100644 index 00000000000..9cfafbc43c4 --- /dev/null +++ b/pkg/command/views/json/testdata/diagnostic/error-with-source-code-subject-but-no-context.json @@ -0,0 +1,26 @@ +{ + "severity": "error", + "summary": "Nonsense input", + "detail": "What you wrote makes no sense", + "range": { + "filename": "short.tf", + "start": { + "line": 1, + "column": 5, + "byte": 4 + }, + "end": { + "line": 1, + "column": 10, + "byte": 9 + } + }, + "snippet": { + "context": null, + "code": "bad source code", + "start_line": 1, + "highlight_start_offset": 4, + "highlight_end_offset": 9, + "values": [] + } +} diff --git a/pkg/command/views/json/testdata/diagnostic/error-with-source-code-subject-with-multiple-expression-values.json b/pkg/command/views/json/testdata/diagnostic/error-with-source-code-subject-with-multiple-expression-values.json new file mode 100644 index 00000000000..b88e4905918 --- /dev/null +++ b/pkg/command/views/json/testdata/diagnostic/error-with-source-code-subject-with-multiple-expression-values.json @@ -0,0 +1,71 @@ +{ + "severity": "error", + "summary": "Catastrophic failure", + "detail": "Basically, everything went wrong", + "range": { + "filename": "values.tf", + "start": { + "line": 1, + "column": 1, + "byte": 0 + }, + "end": { + "line": 13, + "column": 2, + "byte": 102 + } + }, + "snippet": { + "context": null, + "code": "[\n var.a,\n var.b,\n var.c,\n var.d,\n var.e,\n var.f,\n var.g,\n var.h,\n var.i,\n var.j,\n var.k,\n]", + "start_line": 1, + "highlight_start_offset": 0, + "highlight_end_offset": 102, + "values": [ + { + "traversal": "var.a", + "statement": "is true" + }, + { + "traversal": "var.b", + "statement": "is 123.45" + }, + { + "traversal": "var.c", + "statement": "is null" + }, + { + "traversal": "var.d", + "statement": "has a sensitive value" + }, + { + "traversal": "var.e", + "statement": "is false" + }, + { + "traversal": "var.f", + "statement": "is empty list of string" + }, + { + "traversal": "var.g", + "statement": "is map of string with 1 element" + }, + { + "traversal": "var.h", + "statement": "is list of string with 3 elements" + }, + { + "traversal": "var.i", + "statement": "is object with no attributes" + }, + { + "traversal": "var.j", + "statement": "is object with 1 attribute \"foo\"" + }, + { + "traversal": "var.k", + "statement": "is object with 2 attributes" + } + ] + } +} diff --git a/pkg/command/views/json/testdata/diagnostic/error-with-source-code-subject.json b/pkg/command/views/json/testdata/diagnostic/error-with-source-code-subject.json new file mode 100644 index 00000000000..762d811db31 --- /dev/null +++ b/pkg/command/views/json/testdata/diagnostic/error-with-source-code-subject.json @@ -0,0 +1,26 @@ +{ + "severity": "error", + "summary": "Tiny explosion", + "detail": "Unexpected detonation while parsing", + "range": { + "filename": "test.tf", + "start": { + "line": 1, + "column": 10, + "byte": 9 + }, + "end": { + "line": 1, + "column": 25, + "byte": 24 + } + }, + "snippet": { + "context": "resource \"test_resource\" \"test\"", + "code": "resource \"test_resource\" \"test\" {", + "start_line": 1, + "highlight_start_offset": 9, + "highlight_end_offset": 24, + "values": [] + } +} diff --git a/pkg/command/views/json/testdata/diagnostic/error-with-source-code-unavailable.json b/pkg/command/views/json/testdata/diagnostic/error-with-source-code-unavailable.json new file mode 100644 index 00000000000..2646f474b0f --- /dev/null +++ b/pkg/command/views/json/testdata/diagnostic/error-with-source-code-unavailable.json @@ -0,0 +1,18 @@ +{ + "severity": "error", + "summary": "Bad news", + "detail": "It went wrong", + "range": { + "filename": "modules/oops/missing.tf", + "start": { + "line": 1, + "column": 6, + "byte": 5 + }, + "end": { + "line": 2, + "column": 12, + "byte": 33 + } + } +} diff --git a/pkg/command/views/json/testdata/diagnostic/error-with-unset-highlight-end-position.json b/pkg/command/views/json/testdata/diagnostic/error-with-unset-highlight-end-position.json new file mode 100644 index 00000000000..1f7351f093f --- /dev/null +++ b/pkg/command/views/json/testdata/diagnostic/error-with-unset-highlight-end-position.json @@ -0,0 +1,26 @@ +{ + "severity": "error", + "summary": "There is no end", + "detail": "But there is a beginning", + "range": { + "filename": "test.tf", + "start": { + "line": 1, + "column": 16, + "byte": 15 + }, + "end": { + "line": 1, + "column": 17, + "byte": 16 + } + }, + "snippet": { + "context": "resource \"test_resource\" \"test\"", + "code": "resource \"test_resource\" \"test\" {", + "start_line": 1, + "highlight_start_offset": 15, + "highlight_end_offset": 16, + "values": [] + } +} diff --git a/pkg/command/views/json/testdata/diagnostic/sourceless-warning.json b/pkg/command/views/json/testdata/diagnostic/sourceless-warning.json new file mode 100644 index 00000000000..56e171852ec --- /dev/null +++ b/pkg/command/views/json/testdata/diagnostic/sourceless-warning.json @@ -0,0 +1,5 @@ +{ + "severity": "warning", + "summary": "Oh no", + "detail": "Something is broken" +} diff --git a/pkg/command/views/json_view.go b/pkg/command/views/json_view.go new file mode 100644 index 00000000000..9edcfbcdd8a --- /dev/null +++ b/pkg/command/views/json_view.go @@ -0,0 +1,151 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package views + +import ( + encJson "encoding/json" + "fmt" + + "github.com/hashicorp/go-hclog" + + "github.com/kubegems/opentofu/pkg/command/views/json" + "github.com/kubegems/opentofu/pkg/tfdiags" + tfversion "github.com/kubegems/opentofu/version" +) + +// This version describes the schema of JSON UI messages. This version must be +// updated after making any changes to this view, the jsonHook, or any of the +// command/views/json package. +const JSON_UI_VERSION = "1.2" + +func NewJSONView(view *View) *JSONView { + log := hclog.New(&hclog.LoggerOptions{ + Name: "tofu.ui", + Output: view.streams.Stdout.File, + JSONFormat: true, + JSONEscapeDisabled: true, + }) + jv := &JSONView{ + log: log, + view: view, + } + jv.Version() + return jv +} + +type JSONView struct { + // hclog is used for all output in JSON UI mode. The logger has an internal + // mutex to ensure that messages are not interleaved. + log hclog.Logger + + // We hold a reference to the view entirely to allow us to access the + // ConfigSources function pointer, in order to render source snippets into + // diagnostics. This is even more unfortunate than the same reference in the + // view. + // + // Do not be tempted to dereference the configSource value upon logger init, + // as it will likely be updated later. + view *View +} + +func (v *JSONView) Version() { + version := tfversion.String() + v.log.Info( + fmt.Sprintf("OpenTofu %s", version), + "type", json.MessageVersion, + "tofu", version, + "ui", JSON_UI_VERSION, + ) +} + +func (v *JSONView) Log(message string) { + v.log.Info(message, "type", json.MessageLog) +} + +func (v *JSONView) StateDump(state string) { + v.log.Info( + "Emergency state dump", + "type", json.MessageLog, + "state", encJson.RawMessage(state), + ) +} + +func (v *JSONView) Diagnostics(diags tfdiags.Diagnostics, metadata ...interface{}) { + sources := v.view.configSources() + for _, diag := range diags { + diagnostic := json.NewDiagnostic(diag, sources) + + args := []interface{}{"type", json.MessageDiagnostic, "diagnostic", diagnostic} + args = append(args, metadata...) + + switch diag.Severity() { + case tfdiags.Warning: + v.log.Warn(fmt.Sprintf("Warning: %s", diag.Description().Summary), args...) + default: + v.log.Error(fmt.Sprintf("Error: %s", diag.Description().Summary), args...) + } + } +} + +func (v *JSONView) PlannedChange(c *json.ResourceInstanceChange) { + v.log.Info( + c.String(), + "type", json.MessagePlannedChange, + "change", c, + ) +} + +func (v *JSONView) ResourceDrift(c *json.ResourceInstanceChange) { + v.log.Info( + fmt.Sprintf("%s: Drift detected (%s)", c.Resource.Addr, c.Action), + "type", json.MessageResourceDrift, + "change", c, + ) +} + +func (v *JSONView) ChangeSummary(cs *json.ChangeSummary) { + v.log.Info( + cs.String(), + "type", json.MessageChangeSummary, + "changes", cs, + ) +} + +func (v *JSONView) Hook(h json.Hook) { + v.log.Info( + h.String(), + "type", h.HookType(), + "hook", h, + ) +} + +func (v *JSONView) Outputs(outputs json.Outputs) { + v.log.Info( + outputs.String(), + "type", json.MessageOutputs, + "outputs", outputs, + ) +} + +// Output is designed for supporting command.WrappedUi +func (v *JSONView) Output(message string) { + v.log.Info(message, "type", "output") +} + +// Info is designed for supporting command.WrappedUi +func (v *JSONView) Info(message string) { + v.log.Info(message) +} + +// Warn is designed for supporting command.WrappedUi +func (v *JSONView) Warn(message string) { + v.log.Warn(message) +} + +// Error is designed for supporting command.WrappedUi +func (v *JSONView) Error(message string) { + v.log.Error(message) +} diff --git a/pkg/command/views/json_view_test.go b/pkg/command/views/json_view_test.go new file mode 100644 index 00000000000..582a788cf39 --- /dev/null +++ b/pkg/command/views/json_view_test.go @@ -0,0 +1,461 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package views + +import ( + "encoding/json" + "fmt" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + + "github.com/kubegems/opentofu/pkg/addrs" + viewsjson "github.com/kubegems/opentofu/pkg/command/views/json" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/terminal" + "github.com/kubegems/opentofu/pkg/tfdiags" + tfversion "github.com/kubegems/opentofu/version" +) + +// Calling NewJSONView should also always output a version message, which is a +// convenient way to test that NewJSONView works. +func TestNewJSONView(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + NewJSONView(NewView(streams)) + + version := tfversion.String() + want := []map[string]interface{}{ + { + "@level": "info", + "@message": fmt.Sprintf("OpenTofu %s", version), + "@module": "tofu.ui", + "type": "version", + "tofu": version, + "ui": JSON_UI_VERSION, + }, + } + + testJSONViewOutputEqualsFull(t, done(t).Stdout(), want) +} + +func TestJSONView_Log(t *testing.T) { + testCases := []struct { + caseName string + input string + want []map[string]interface{} + }{ + { + "log with regular character", + "hello, world", + []map[string]interface{}{ + { + "@level": "info", + "@message": "hello, world", + "@module": "tofu.ui", + "type": "log", + }, + }, + }, + { + "log with special character", + "hello, special char, <>&", + []map[string]interface{}{ + { + "@level": "info", + "@message": "hello, special char, <>&", + "@module": "tofu.ui", + "type": "log", + }, + }, + }, + } + for _, tc := range testCases { + t.Run(tc.caseName, func(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + jv := NewJSONView(NewView(streams)) + jv.Log(tc.input) + testJSONViewOutputEquals(t, done(t).Stdout(), tc.want) + }) + } +} + +// This test covers only the basics of JSON diagnostic rendering, as more +// complex diagnostics are tested elsewhere. +func TestJSONView_Diagnostics(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + jv := NewJSONView(NewView(streams)) + + var diags tfdiags.Diagnostics + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + `Improper use of "less"`, + `You probably mean "10 buckets or fewer"`, + )) + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unusually stripey cat detected", + "Are you sure this random_pet isn't a cheetah?", + )) + + jv.Diagnostics(diags) + + want := []map[string]interface{}{ + { + "@level": "warn", + "@message": `Warning: Improper use of "less"`, + "@module": "tofu.ui", + "type": "diagnostic", + "diagnostic": map[string]interface{}{ + "severity": "warning", + "summary": `Improper use of "less"`, + "detail": `You probably mean "10 buckets or fewer"`, + }, + }, + { + "@level": "error", + "@message": "Error: Unusually stripey cat detected", + "@module": "tofu.ui", + "type": "diagnostic", + "diagnostic": map[string]interface{}{ + "severity": "error", + "summary": "Unusually stripey cat detected", + "detail": "Are you sure this random_pet isn't a cheetah?", + }, + }, + } + testJSONViewOutputEquals(t, done(t).Stdout(), want) +} + +func TestJSONView_DiagnosticsWithMetadata(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + jv := NewJSONView(NewView(streams)) + + var diags tfdiags.Diagnostics + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + `Improper use of "less"`, + `You probably mean "10 buckets or fewer"`, + )) + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unusually stripey cat detected", + "Are you sure this random_pet isn't a cheetah?", + )) + + jv.Diagnostics(diags, "@meta", "extra_info") + + want := []map[string]interface{}{ + { + "@level": "warn", + "@message": `Warning: Improper use of "less"`, + "@module": "tofu.ui", + "type": "diagnostic", + "diagnostic": map[string]interface{}{ + "severity": "warning", + "summary": `Improper use of "less"`, + "detail": `You probably mean "10 buckets or fewer"`, + }, + "@meta": "extra_info", + }, + { + "@level": "error", + "@message": "Error: Unusually stripey cat detected", + "@module": "tofu.ui", + "type": "diagnostic", + "diagnostic": map[string]interface{}{ + "severity": "error", + "summary": "Unusually stripey cat detected", + "detail": "Are you sure this random_pet isn't a cheetah?", + }, + "@meta": "extra_info", + }, + } + testJSONViewOutputEquals(t, done(t).Stdout(), want) +} + +func TestJSONView_PlannedChange(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + jv := NewJSONView(NewView(streams)) + + foo, diags := addrs.ParseModuleInstanceStr("module.foo") + if len(diags) > 0 { + t.Fatal(diags.Err()) + } + managed := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_instance", Name: "bar"} + cs := &plans.ResourceInstanceChangeSrc{ + Addr: managed.Instance(addrs.StringKey("boop")).Absolute(foo), + PrevRunAddr: managed.Instance(addrs.StringKey("boop")).Absolute(foo), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Create, + }, + } + jv.PlannedChange(viewsjson.NewResourceInstanceChange(cs)) + + want := []map[string]interface{}{ + { + "@level": "info", + "@message": `module.foo.test_instance.bar["boop"]: Plan to create`, + "@module": "tofu.ui", + "type": "planned_change", + "change": map[string]interface{}{ + "action": "create", + "resource": map[string]interface{}{ + "addr": `module.foo.test_instance.bar["boop"]`, + "implied_provider": "test", + "module": "module.foo", + "resource": `test_instance.bar["boop"]`, + "resource_key": "boop", + "resource_name": "bar", + "resource_type": "test_instance", + }, + }, + }, + } + testJSONViewOutputEquals(t, done(t).Stdout(), want) +} + +func TestJSONView_ResourceDrift(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + jv := NewJSONView(NewView(streams)) + + foo, diags := addrs.ParseModuleInstanceStr("module.foo") + if len(diags) > 0 { + t.Fatal(diags.Err()) + } + managed := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_instance", Name: "bar"} + cs := &plans.ResourceInstanceChangeSrc{ + Addr: managed.Instance(addrs.StringKey("boop")).Absolute(foo), + PrevRunAddr: managed.Instance(addrs.StringKey("boop")).Absolute(foo), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Update, + }, + } + jv.ResourceDrift(viewsjson.NewResourceInstanceChange(cs)) + + want := []map[string]interface{}{ + { + "@level": "info", + "@message": `module.foo.test_instance.bar["boop"]: Drift detected (update)`, + "@module": "tofu.ui", + "type": "resource_drift", + "change": map[string]interface{}{ + "action": "update", + "resource": map[string]interface{}{ + "addr": `module.foo.test_instance.bar["boop"]`, + "implied_provider": "test", + "module": "module.foo", + "resource": `test_instance.bar["boop"]`, + "resource_key": "boop", + "resource_name": "bar", + "resource_type": "test_instance", + }, + }, + }, + } + testJSONViewOutputEquals(t, done(t).Stdout(), want) +} + +func TestJSONView_ChangeSummary(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + jv := NewJSONView(NewView(streams)) + + jv.ChangeSummary(&viewsjson.ChangeSummary{ + Add: 1, + Change: 2, + Remove: 3, + Operation: viewsjson.OperationApplied, + }) + + want := []map[string]interface{}{ + { + "@level": "info", + "@message": "Apply complete! Resources: 1 added, 2 changed, 3 destroyed.", + "@module": "tofu.ui", + "type": "change_summary", + "changes": map[string]interface{}{ + "add": float64(1), + "import": float64(0), + "change": float64(2), + "remove": float64(3), + "operation": "apply", + }, + }, + } + testJSONViewOutputEquals(t, done(t).Stdout(), want) +} + +func TestJSONView_ChangeSummaryWithImport(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + jv := NewJSONView(NewView(streams)) + + jv.ChangeSummary(&viewsjson.ChangeSummary{ + Add: 1, + Change: 2, + Remove: 3, + Import: 1, + Operation: viewsjson.OperationApplied, + }) + + want := []map[string]interface{}{ + { + "@level": "info", + "@message": "Apply complete! Resources: 1 imported, 1 added, 2 changed, 3 destroyed.", + "@module": "tofu.ui", + "type": "change_summary", + "changes": map[string]interface{}{ + "add": float64(1), + "change": float64(2), + "remove": float64(3), + "import": float64(1), + "operation": "apply", + }, + }, + } + testJSONViewOutputEquals(t, done(t).Stdout(), want) +} + +func TestJSONView_Hook(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + jv := NewJSONView(NewView(streams)) + + foo, diags := addrs.ParseModuleInstanceStr("module.foo") + if len(diags) > 0 { + t.Fatal(diags.Err()) + } + managed := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_instance", Name: "bar"} + addr := managed.Instance(addrs.StringKey("boop")).Absolute(foo) + hook := viewsjson.NewApplyComplete(addr, plans.Create, "id", "boop-beep", 34*time.Second) + + jv.Hook(hook) + + want := []map[string]interface{}{ + { + "@level": "info", + "@message": `module.foo.test_instance.bar["boop"]: Creation complete after 34s [id=boop-beep]`, + "@module": "tofu.ui", + "type": "apply_complete", + "hook": map[string]interface{}{ + "resource": map[string]interface{}{ + "addr": `module.foo.test_instance.bar["boop"]`, + "implied_provider": "test", + "module": "module.foo", + "resource": `test_instance.bar["boop"]`, + "resource_key": "boop", + "resource_name": "bar", + "resource_type": "test_instance", + }, + "action": "create", + "id_key": "id", + "id_value": "boop-beep", + "elapsed_seconds": float64(34), + }, + }, + } + testJSONViewOutputEquals(t, done(t).Stdout(), want) +} + +func TestJSONView_Outputs(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + jv := NewJSONView(NewView(streams)) + + jv.Outputs(viewsjson.Outputs{ + "boop_count": { + Sensitive: false, + Value: json.RawMessage(`92`), + Type: json.RawMessage(`"number"`), + }, + "password": { + Sensitive: true, + Value: json.RawMessage(`"horse-battery"`), + Type: json.RawMessage(`"string"`), + }, + }) + + want := []map[string]interface{}{ + { + "@level": "info", + "@message": "Outputs: 2", + "@module": "tofu.ui", + "type": "outputs", + "outputs": map[string]interface{}{ + "boop_count": map[string]interface{}{ + "sensitive": false, + "value": float64(92), + "type": "number", + }, + "password": map[string]interface{}{ + "sensitive": true, + "value": "horse-battery", + "type": "string", + }, + }, + }, + } + testJSONViewOutputEquals(t, done(t).Stdout(), want) +} + +// This helper function tests a possibly multi-line JSONView output string +// against a slice of structs representing the desired log messages. It +// verifies that the output of JSONView is in JSON log format, one message per +// line. +func testJSONViewOutputEqualsFull(t *testing.T, output string, want []map[string]interface{}, options ...cmp.Option) { + t.Helper() + + // Remove final trailing newline + output = strings.TrimSuffix(output, "\n") + + // Split log into lines, each of which should be a JSON log message + gotLines := strings.Split(output, "\n") + + if len(gotLines) != len(want) { + t.Errorf("unexpected number of messages. got %d, want %d", len(gotLines), len(want)) + } + + // Unmarshal each line and compare to the expected value + for i := range gotLines { + var gotStruct map[string]interface{} + if i >= len(want) { + t.Error("reached end of want messages too soon") + break + } + wantStruct := want[i] + + if err := json.Unmarshal([]byte(gotLines[i]), &gotStruct); err != nil { + t.Fatal(err) + } + + if timestamp, ok := gotStruct["@timestamp"]; !ok { + t.Errorf("message has no timestamp: %#v", gotStruct) + } else { + // Remove the timestamp value from the struct to allow comparison + delete(gotStruct, "@timestamp") + + // Verify the timestamp format + if _, err := time.Parse("2006-01-02T15:04:05.000000Z07:00", timestamp.(string)); err != nil { + t.Errorf("error parsing timestamp on line %d: %s", i, err) + } + } + + if !cmp.Equal(wantStruct, gotStruct, options...) { + t.Errorf("unexpected output on line %d:\n%s", i, cmp.Diff(wantStruct, gotStruct)) + } + } +} + +// testJSONViewOutputEquals skips the first line of output, since it ought to +// be a version message that we don't care about for most of our tests. +func testJSONViewOutputEquals(t *testing.T, output string, want []map[string]interface{}, options ...cmp.Option) { + t.Helper() + + // Remove up to the first newline + index := strings.Index(output, "\n") + if index >= 0 { + output = output[index+1:] + } + testJSONViewOutputEqualsFull(t, output, want, options...) +} diff --git a/pkg/command/views/operation.go b/pkg/command/views/operation.go new file mode 100644 index 00000000000..89dbfb21d19 --- /dev/null +++ b/pkg/command/views/operation.go @@ -0,0 +1,311 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package views + +import ( + "bytes" + "fmt" + "strings" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/command/format" + "github.com/kubegems/opentofu/pkg/command/jsonformat" + "github.com/kubegems/opentofu/pkg/command/jsonplan" + "github.com/kubegems/opentofu/pkg/command/jsonprovider" + "github.com/kubegems/opentofu/pkg/command/views/json" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/states/statefile" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" +) + +type Operation interface { + Interrupted() + FatalInterrupt() + Stopping() + Cancelled(planMode plans.Mode) + + EmergencyDumpState(stateFile *statefile.File, enc encryption.StateEncryption) error + + PlannedChange(change *plans.ResourceInstanceChangeSrc) + Plan(plan *plans.Plan, schemas *tofu.Schemas) + PlanNextStep(planPath string, genConfigPath string) + + Diagnostics(diags tfdiags.Diagnostics) +} + +func NewOperation(vt arguments.ViewType, inAutomation bool, view *View) Operation { + switch vt { + case arguments.ViewHuman: + return &OperationHuman{view: view, inAutomation: inAutomation} + default: + panic(fmt.Sprintf("unknown view type %v", vt)) + } +} + +type OperationHuman struct { + view *View + + // inAutomation indicates that commands are being run by an + // automated system rather than directly at a command prompt. + // + // This is a hint not to produce messages that expect that a user can + // run a follow-up command, perhaps because OpenTofu is running in + // some sort of workflow automation tool that abstracts away the + // exact commands that are being run. + inAutomation bool +} + +var _ Operation = (*OperationHuman)(nil) + +func (v *OperationHuman) Interrupted() { + v.view.streams.Println(format.WordWrap(interrupted, v.view.outputColumns())) +} + +func (v *OperationHuman) FatalInterrupt() { + v.view.streams.Eprintln(format.WordWrap(fatalInterrupt, v.view.errorColumns())) +} + +func (v *OperationHuman) Stopping() { + v.view.streams.Println("Stopping operation...") +} + +func (v *OperationHuman) Cancelled(planMode plans.Mode) { + switch planMode { + case plans.DestroyMode: + v.view.streams.Println("Destroy cancelled.") + default: + v.view.streams.Println("Apply cancelled.") + } +} + +func (v *OperationHuman) EmergencyDumpState(stateFile *statefile.File, enc encryption.StateEncryption) error { + stateBuf := new(bytes.Buffer) + jsonErr := statefile.Write(stateFile, stateBuf, enc) + if jsonErr != nil { + return jsonErr + } + v.view.streams.Eprintln(stateBuf) + return nil +} + +func (v *OperationHuman) Plan(plan *plans.Plan, schemas *tofu.Schemas) { + outputs, changed, drift, attrs, err := jsonplan.MarshalForRenderer(plan, schemas) + if err != nil { + v.view.streams.Eprintf("Failed to marshal plan to json: %s", err) + return + } + + renderer := jsonformat.Renderer{ + Colorize: v.view.colorize, + Streams: v.view.streams, + RunningInAutomation: v.inAutomation, + ShowSensitive: v.view.showSensitive, + } + + jplan := jsonformat.Plan{ + PlanFormatVersion: jsonplan.FormatVersion, + ProviderFormatVersion: jsonprovider.FormatVersion, + OutputChanges: outputs, + ResourceChanges: changed, + ResourceDrift: drift, + ProviderSchemas: jsonprovider.MarshalForRenderer(schemas), + RelevantAttributes: attrs, + } + + // Side load some data that we can't extract from the JSON plan. + var opts []plans.Quality + if !plan.CanApply() { + opts = append(opts, plans.NoChanges) + } + if plan.Errored { + opts = append(opts, plans.Errored) + } + + renderer.RenderHumanPlan(jplan, plan.UIMode, opts...) +} + +func (v *OperationHuman) PlannedChange(change *plans.ResourceInstanceChangeSrc) { + // PlannedChange is primarily for machine-readable output in order to + // get a per-resource-instance change description. We don't use it + // with OperationHuman because the output of Plan already includes the + // change details for all resource instances. +} + +// PlanNextStep gives the user some next-steps, unless we're running in an +// automation tool which is presumed to provide its own UI for further actions. +func (v *OperationHuman) PlanNextStep(planPath string, genConfigPath string) { + if v.inAutomation { + return + } + v.view.outputHorizRule() + + if genConfigPath != "" { + v.view.streams.Printf( + format.WordWrap( + "\n"+strings.TrimSpace(fmt.Sprintf(planHeaderGenConfig, genConfigPath)), + v.view.outputColumns(), + ) + "\n") + } + + if planPath == "" { + v.view.streams.Print( + format.WordWrap( + "\n"+strings.TrimSpace(planHeaderNoOutput), + v.view.outputColumns(), + ) + "\n", + ) + } else { + v.view.streams.Printf( + format.WordWrap( + "\n"+strings.TrimSpace(fmt.Sprintf(planHeaderYesOutput, planPath, planPath)), + v.view.outputColumns(), + ) + "\n", + ) + } +} + +func (v *OperationHuman) Diagnostics(diags tfdiags.Diagnostics) { + v.view.Diagnostics(diags) +} + +type OperationJSON struct { + view *JSONView +} + +var _ Operation = (*OperationJSON)(nil) + +func (v *OperationJSON) Interrupted() { + v.view.Log(interrupted) +} + +func (v *OperationJSON) FatalInterrupt() { + v.view.Log(fatalInterrupt) +} + +func (v *OperationJSON) Stopping() { + v.view.Log("Stopping operation...") +} + +func (v *OperationJSON) Cancelled(planMode plans.Mode) { + switch planMode { + case plans.DestroyMode: + v.view.Log("Destroy cancelled") + default: + v.view.Log("Apply cancelled") + } +} + +func (v *OperationJSON) EmergencyDumpState(stateFile *statefile.File, enc encryption.StateEncryption) error { + stateBuf := new(bytes.Buffer) + jsonErr := statefile.Write(stateFile, stateBuf, enc) + if jsonErr != nil { + return jsonErr + } + v.view.StateDump(stateBuf.String()) + return nil +} + +// Log a change summary and a series of "planned" messages for the changes in +// the plan. +func (v *OperationJSON) Plan(plan *plans.Plan, schemas *tofu.Schemas) { + for _, dr := range plan.DriftedResources { + // In refresh-only mode, we output all resources marked as drifted, + // including those which have moved without other changes. In other plan + // modes, move-only changes will be included in the planned changes, so + // we skip them here. + if dr.Action != plans.NoOp || plan.UIMode == plans.RefreshOnlyMode { + v.view.ResourceDrift(json.NewResourceInstanceChange(dr)) + } + } + + cs := &json.ChangeSummary{ + Operation: json.OperationPlanned, + } + for _, change := range plan.Changes.Resources { + if change.Action == plans.Delete && change.Addr.Resource.Resource.Mode == addrs.DataResourceMode { + // Avoid rendering data sources on deletion + continue + } + + if change.Importing != nil { + cs.Import++ + } + + switch change.Action { + case plans.Create: + cs.Add++ + case plans.Delete: + cs.Remove++ + case plans.Update: + cs.Change++ + case plans.CreateThenDelete, plans.DeleteThenCreate: + cs.Add++ + cs.Remove++ + } + + if change.Action != plans.NoOp || !change.Addr.Equal(change.PrevRunAddr) || change.Importing != nil { + v.view.PlannedChange(json.NewResourceInstanceChange(change)) + } + } + + v.view.ChangeSummary(cs) + + var rootModuleOutputs []*plans.OutputChangeSrc + for _, output := range plan.Changes.Outputs { + if !output.Addr.Module.IsRoot() { + continue + } + rootModuleOutputs = append(rootModuleOutputs, output) + } + if len(rootModuleOutputs) > 0 { + v.view.Outputs(json.OutputsFromChanges(rootModuleOutputs)) + } +} + +func (v *OperationJSON) PlannedChange(change *plans.ResourceInstanceChangeSrc) { + if change.Action == plans.Delete && change.Addr.Resource.Resource.Mode == addrs.DataResourceMode { + // Avoid rendering data sources on deletion + return + } + v.view.PlannedChange(json.NewResourceInstanceChange(change)) +} + +// PlanNextStep does nothing for the JSON view as it is a hook for user-facing +// output only applicable to human-readable UI. +func (v *OperationJSON) PlanNextStep(planPath string, genConfigPath string) { +} + +func (v *OperationJSON) Diagnostics(diags tfdiags.Diagnostics) { + v.view.Diagnostics(diags) +} + +const fatalInterrupt = ` +Two interrupts received. Exiting immediately. Note that data loss may have occurred. +` + +const interrupted = ` +Interrupt received. +Please wait for OpenTofu to exit or data loss may occur. +Gracefully shutting down... +` + +const planHeaderNoOutput = ` +Note: You didn't use the -out option to save this plan, so OpenTofu can't guarantee to take exactly these actions if you run "tofu apply" now. +` + +const planHeaderYesOutput = ` +Saved the plan to: %s + +To perform exactly these actions, run the following command to apply: + tofu apply %q +` + +const planHeaderGenConfig = ` +OpenTofu has generated configuration and written it to %s. Please review the configuration and edit it as necessary before adding it to version control. +` diff --git a/pkg/command/views/operation_test.go b/pkg/command/views/operation_test.go new file mode 100644 index 00000000000..890232d9cec --- /dev/null +++ b/pkg/command/views/operation_test.go @@ -0,0 +1,1332 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package views + +import ( + "bytes" + "encoding/json" + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/lang/globalref" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/statefile" + "github.com/kubegems/opentofu/pkg/terminal" + "github.com/kubegems/opentofu/pkg/tofu" + "github.com/zclconf/go-cty/cty" +) + +func TestOperation_stopping(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := NewOperation(arguments.ViewHuman, false, NewView(streams)) + + v.Stopping() + + if got, want := done(t).Stdout(), "Stopping operation...\n"; got != want { + t.Errorf("wrong result\ngot: %q\nwant: %q", got, want) + } +} + +func TestOperation_cancelled(t *testing.T) { + testCases := map[string]struct { + planMode plans.Mode + want string + }{ + "apply": { + planMode: plans.NormalMode, + want: "Apply cancelled.\n", + }, + "destroy": { + planMode: plans.DestroyMode, + want: "Destroy cancelled.\n", + }, + } + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := NewOperation(arguments.ViewHuman, false, NewView(streams)) + + v.Cancelled(tc.planMode) + + if got, want := done(t).Stdout(), tc.want; got != want { + t.Errorf("wrong result\ngot: %q\nwant: %q", got, want) + } + }) + } +} + +func TestOperation_emergencyDumpState(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := NewOperation(arguments.ViewHuman, false, NewView(streams)) + + stateFile := statefile.New(nil, "foo", 1) + + err := v.EmergencyDumpState(stateFile, encryption.StateEncryptionDisabled()) + if err != nil { + t.Fatalf("unexpected error dumping state: %s", err) + } + + // Check that the result (on stderr) looks like JSON state + raw := done(t).Stderr() + var state map[string]interface{} + if err := json.Unmarshal([]byte(raw), &state); err != nil { + t.Fatalf("unexpected error parsing dumped state: %s\nraw:\n%s", err, raw) + } +} + +func TestOperation_planNoChanges(t *testing.T) { + + tests := map[string]struct { + plan func(schemas *tofu.Schemas) *plans.Plan + wantText string + }{ + "nothing at all in normal mode": { + func(schemas *tofu.Schemas) *plans.Plan { + return &plans.Plan{ + UIMode: plans.NormalMode, + Changes: plans.NewChanges(), + } + }, + "no differences, so no changes are needed.", + }, + "nothing at all in refresh-only mode": { + func(schemas *tofu.Schemas) *plans.Plan { + return &plans.Plan{ + UIMode: plans.RefreshOnlyMode, + Changes: plans.NewChanges(), + } + }, + "OpenTofu has checked that the real remote objects still match", + }, + "nothing at all in destroy mode": { + func(schemas *tofu.Schemas) *plans.Plan { + return &plans.Plan{ + UIMode: plans.DestroyMode, + Changes: plans.NewChanges(), + } + }, + "No objects need to be destroyed.", + }, + "no drift detected in normal noop": { + func(schemas *tofu.Schemas) *plans.Plan { + addr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "somewhere", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + schema, _ := schemas.ResourceTypeConfig( + addrs.NewDefaultProvider("test"), + addr.Resource.Resource.Mode, + addr.Resource.Resource.Type, + ) + ty := schema.ImpliedType() + rc := &plans.ResourceInstanceChange{ + Addr: addr, + PrevRunAddr: addr, + ProviderAddr: addrs.RootModuleInstance.ProviderConfigDefault( + addrs.NewDefaultProvider("test"), + ), + Change: plans.Change{ + Action: plans.Update, + Before: cty.NullVal(ty), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("1234"), + "foo": cty.StringVal("bar"), + }), + }, + } + rcs, err := rc.Encode(ty) + if err != nil { + panic(err) + } + drs := []*plans.ResourceInstanceChangeSrc{rcs} + return &plans.Plan{ + UIMode: plans.NormalMode, + Changes: plans.NewChanges(), + DriftedResources: drs, + } + }, + "No changes", + }, + "drift detected in normal mode": { + func(schemas *tofu.Schemas) *plans.Plan { + addr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "somewhere", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + schema, _ := schemas.ResourceTypeConfig( + addrs.NewDefaultProvider("test"), + addr.Resource.Resource.Mode, + addr.Resource.Resource.Type, + ) + ty := schema.ImpliedType() + rc := &plans.ResourceInstanceChange{ + Addr: addr, + PrevRunAddr: addr, + ProviderAddr: addrs.RootModuleInstance.ProviderConfigDefault( + addrs.NewDefaultProvider("test"), + ), + Change: plans.Change{ + Action: plans.Update, + Before: cty.NullVal(ty), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("1234"), + "foo": cty.StringVal("bar"), + }), + }, + } + rcs, err := rc.Encode(ty) + if err != nil { + panic(err) + } + drs := []*plans.ResourceInstanceChangeSrc{rcs} + changes := plans.NewChanges() + changes.Resources = drs + return &plans.Plan{ + UIMode: plans.NormalMode, + Changes: changes, + DriftedResources: drs, + RelevantAttributes: []globalref.ResourceAttr{{ + Resource: addr, + Attr: cty.GetAttrPath("id"), + }}, + } + }, + "Objects have changed outside of OpenTofu", + }, + "drift detected in refresh-only mode": { + func(schemas *tofu.Schemas) *plans.Plan { + addr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "somewhere", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + schema, _ := schemas.ResourceTypeConfig( + addrs.NewDefaultProvider("test"), + addr.Resource.Resource.Mode, + addr.Resource.Resource.Type, + ) + ty := schema.ImpliedType() + rc := &plans.ResourceInstanceChange{ + Addr: addr, + PrevRunAddr: addr, + ProviderAddr: addrs.RootModuleInstance.ProviderConfigDefault( + addrs.NewDefaultProvider("test"), + ), + Change: plans.Change{ + Action: plans.Update, + Before: cty.NullVal(ty), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("1234"), + "foo": cty.StringVal("bar"), + }), + }, + } + rcs, err := rc.Encode(ty) + if err != nil { + panic(err) + } + drs := []*plans.ResourceInstanceChangeSrc{rcs} + return &plans.Plan{ + UIMode: plans.RefreshOnlyMode, + Changes: plans.NewChanges(), + DriftedResources: drs, + } + }, + "If you were expecting these changes then you can apply this plan", + }, + "move-only changes in refresh-only mode": { + func(schemas *tofu.Schemas) *plans.Plan { + addr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "somewhere", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + addrPrev := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "anywhere", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + schema, _ := schemas.ResourceTypeConfig( + addrs.NewDefaultProvider("test"), + addr.Resource.Resource.Mode, + addr.Resource.Resource.Type, + ) + ty := schema.ImpliedType() + rc := &plans.ResourceInstanceChange{ + Addr: addr, + PrevRunAddr: addrPrev, + ProviderAddr: addrs.RootModuleInstance.ProviderConfigDefault( + addrs.NewDefaultProvider("test"), + ), + Change: plans.Change{ + Action: plans.NoOp, + Before: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("1234"), + "foo": cty.StringVal("bar"), + }), + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("1234"), + "foo": cty.StringVal("bar"), + }), + }, + } + rcs, err := rc.Encode(ty) + if err != nil { + panic(err) + } + drs := []*plans.ResourceInstanceChangeSrc{rcs} + return &plans.Plan{ + UIMode: plans.RefreshOnlyMode, + Changes: plans.NewChanges(), + DriftedResources: drs, + } + }, + "test_resource.anywhere has moved to test_resource.somewhere", + }, + "drift detected in destroy mode": { + func(schemas *tofu.Schemas) *plans.Plan { + return &plans.Plan{ + UIMode: plans.DestroyMode, + Changes: plans.NewChanges(), + PrevRunState: states.BuildState(func(state *states.SyncState) { + state.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "somewhere", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{}`), + }, + addrs.RootModuleInstance.ProviderConfigDefault(addrs.NewDefaultProvider("test")), + ) + }), + PriorState: states.NewState(), + } + }, + "No objects need to be destroyed.", + }, + } + + schemas := testSchemas() + for name, test := range tests { + t.Run(name, func(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := NewOperation(arguments.ViewHuman, false, NewView(streams)) + plan := test.plan(schemas) + v.Plan(plan, schemas) + got := done(t).Stdout() + if want := test.wantText; want != "" && !strings.Contains(got, want) { + t.Errorf("missing expected message\ngot:\n%s\n\nwant substring: %s", got, want) + } + }) + } +} + +func TestOperation_plan(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := NewOperation(arguments.ViewHuman, true, NewView(streams)) + + plan := testPlan(t) + schemas := testSchemas() + v.Plan(plan, schemas) + + want := ` +OpenTofu used the selected providers to generate the following execution +plan. Resource actions are indicated with the following symbols: + + create + +OpenTofu will perform the following actions: + + # test_resource.foo will be created + + resource "test_resource" "foo" { + + foo = "bar" + + id = (known after apply) + } + +Plan: 1 to add, 0 to change, 0 to destroy. +` + + if got := done(t).Stdout(); got != want { + t.Errorf("unexpected output\ngot:\n%s\nwant:\n%s", got, want) + } +} + +func TestOperation_planWithDatasource(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := NewOperation(arguments.ViewHuman, true, NewView(streams)) + + plan := testPlanWithDatasource(t) + schemas := testSchemas() + v.Plan(plan, schemas) + + want := ` +OpenTofu used the selected providers to generate the following execution +plan. Resource actions are indicated with the following symbols: + + create + <= read (data resources) + +OpenTofu will perform the following actions: + + # data.test_data_source.bar will be read during apply + <= data "test_data_source" "bar" { + + bar = "foo" + + id = "C6743020-40BD-4591-81E6-CD08494341D3" + } + + # test_resource.foo will be created + + resource "test_resource" "foo" { + + foo = "bar" + + id = (known after apply) + } + +Plan: 1 to add, 0 to change, 0 to destroy. +` + + if got := done(t).Stdout(); got != want { + t.Errorf("unexpected output\ngot:\n%s\nwant:\n%s", got, want) + } +} + +func TestOperation_planWithDatasourceAndDrift(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := NewOperation(arguments.ViewHuman, true, NewView(streams)) + + plan := testPlanWithDatasource(t) + schemas := testSchemas() + v.Plan(plan, schemas) + + want := ` +OpenTofu used the selected providers to generate the following execution +plan. Resource actions are indicated with the following symbols: + + create + <= read (data resources) + +OpenTofu will perform the following actions: + + # data.test_data_source.bar will be read during apply + <= data "test_data_source" "bar" { + + bar = "foo" + + id = "C6743020-40BD-4591-81E6-CD08494341D3" + } + + # test_resource.foo will be created + + resource "test_resource" "foo" { + + foo = "bar" + + id = (known after apply) + } + +Plan: 1 to add, 0 to change, 0 to destroy. +` + + if got := done(t).Stdout(); got != want { + t.Errorf("unexpected output\ngot:\n%s\nwant:\n%s", got, want) + } +} + +func TestOperation_planNextStep(t *testing.T) { + testCases := map[string]struct { + path string + want string + }{ + "no state path": { + path: "", + want: "You didn't use the -out option", + }, + "state path": { + path: "good plan.tfplan", + want: `tofu apply "good plan.tfplan"`, + }, + } + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := NewOperation(arguments.ViewHuman, false, NewView(streams)) + + v.PlanNextStep(tc.path, "") + + if got := done(t).Stdout(); !strings.Contains(got, tc.want) { + t.Errorf("wrong result\ngot: %q\nwant: %q", got, tc.want) + } + }) + } +} + +// The in-automation state is on the view itself, so testing it separately is +// clearer. +func TestOperation_planNextStepInAutomation(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := NewOperation(arguments.ViewHuman, true, NewView(streams)) + + v.PlanNextStep("", "") + + if got := done(t).Stdout(); got != "" { + t.Errorf("unexpected output\ngot: %q", got) + } +} + +// Test all the trivial OperationJSON methods together. Y'know, for brevity. +// This test is not a realistic stream of messages. +func TestOperationJSON_logs(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := &OperationJSON{view: NewJSONView(NewView(streams))} + + v.Cancelled(plans.NormalMode) + v.Cancelled(plans.DestroyMode) + v.Stopping() + v.Interrupted() + v.FatalInterrupt() + + want := []map[string]interface{}{ + { + "@level": "info", + "@message": "Apply cancelled", + "@module": "tofu.ui", + "type": "log", + }, + { + "@level": "info", + "@message": "Destroy cancelled", + "@module": "tofu.ui", + "type": "log", + }, + { + "@level": "info", + "@message": "Stopping operation...", + "@module": "tofu.ui", + "type": "log", + }, + { + "@level": "info", + "@message": interrupted, + "@module": "tofu.ui", + "type": "log", + }, + { + "@level": "info", + "@message": fatalInterrupt, + "@module": "tofu.ui", + "type": "log", + }, + } + + testJSONViewOutputEquals(t, done(t).Stdout(), want) +} + +// This is a fairly circular test, but it's such a rarely executed code path +// that I think it's probably still worth having. We're not testing against +// a fixed state JSON output because this test ought not fail just because +// we upgrade state format in the future. +func TestOperationJSON_emergencyDumpState(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := &OperationJSON{view: NewJSONView(NewView(streams))} + + stateFile := statefile.New(nil, "foo", 1) + stateBuf := new(bytes.Buffer) + err := statefile.Write(stateFile, stateBuf, encryption.StateEncryptionDisabled()) + if err != nil { + t.Fatal(err) + } + var stateJSON map[string]interface{} + err = json.Unmarshal(stateBuf.Bytes(), &stateJSON) + if err != nil { + t.Fatal(err) + } + + err = v.EmergencyDumpState(stateFile, encryption.StateEncryptionDisabled()) + if err != nil { + t.Fatalf("unexpected error dumping state: %s", err) + } + + want := []map[string]interface{}{ + { + "@level": "info", + "@message": "Emergency state dump", + "@module": "tofu.ui", + "type": "log", + "state": stateJSON, + }, + } + + testJSONViewOutputEquals(t, done(t).Stdout(), want) +} + +func TestOperationJSON_planNoChanges(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := &OperationJSON{view: NewJSONView(NewView(streams))} + + plan := &plans.Plan{ + Changes: plans.NewChanges(), + } + v.Plan(plan, nil) + + want := []map[string]interface{}{ + { + "@level": "info", + "@message": "Plan: 0 to add, 0 to change, 0 to destroy.", + "@module": "tofu.ui", + "type": "change_summary", + "changes": map[string]interface{}{ + "operation": "plan", + "add": float64(0), + "import": float64(0), + "change": float64(0), + "remove": float64(0), + }, + }, + } + + testJSONViewOutputEquals(t, done(t).Stdout(), want) +} + +func TestOperationJSON_plan(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := &OperationJSON{view: NewJSONView(NewView(streams))} + + root := addrs.RootModuleInstance + vpc, diags := addrs.ParseModuleInstanceStr("module.vpc") + if len(diags) > 0 { + t.Fatal(diags.Err()) + } + boop := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_resource", Name: "boop"} + beep := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_resource", Name: "beep"} + derp := addrs.Resource{Mode: addrs.DataResourceMode, Type: "test_source", Name: "derp"} + + plan := &plans.Plan{ + Changes: &plans.Changes{ + Resources: []*plans.ResourceInstanceChangeSrc{ + { + Addr: boop.Instance(addrs.IntKey(0)).Absolute(root), + PrevRunAddr: boop.Instance(addrs.IntKey(0)).Absolute(root), + ChangeSrc: plans.ChangeSrc{Action: plans.CreateThenDelete}, + }, + { + Addr: boop.Instance(addrs.IntKey(1)).Absolute(root), + PrevRunAddr: boop.Instance(addrs.IntKey(1)).Absolute(root), + ChangeSrc: plans.ChangeSrc{Action: plans.Create}, + }, + { + Addr: boop.Instance(addrs.IntKey(0)).Absolute(vpc), + PrevRunAddr: boop.Instance(addrs.IntKey(0)).Absolute(vpc), + ChangeSrc: plans.ChangeSrc{Action: plans.Delete}, + }, + { + Addr: beep.Instance(addrs.NoKey).Absolute(root), + PrevRunAddr: beep.Instance(addrs.NoKey).Absolute(root), + ChangeSrc: plans.ChangeSrc{Action: plans.DeleteThenCreate}, + }, + { + Addr: beep.Instance(addrs.NoKey).Absolute(vpc), + PrevRunAddr: beep.Instance(addrs.NoKey).Absolute(vpc), + ChangeSrc: plans.ChangeSrc{Action: plans.Update}, + }, + // Data source deletion should not show up in the logs + { + Addr: derp.Instance(addrs.NoKey).Absolute(root), + PrevRunAddr: derp.Instance(addrs.NoKey).Absolute(root), + ChangeSrc: plans.ChangeSrc{Action: plans.Delete}, + }, + }, + }, + } + v.Plan(plan, testSchemas()) + + want := []map[string]interface{}{ + // Create-then-delete should result in replace + { + "@level": "info", + "@message": "test_resource.boop[0]: Plan to replace", + "@module": "tofu.ui", + "type": "planned_change", + "change": map[string]interface{}{ + "action": "replace", + "resource": map[string]interface{}{ + "addr": `test_resource.boop[0]`, + "implied_provider": "test", + "module": "", + "resource": `test_resource.boop[0]`, + "resource_key": float64(0), + "resource_name": "boop", + "resource_type": "test_resource", + }, + }, + }, + // Simple create + { + "@level": "info", + "@message": "test_resource.boop[1]: Plan to create", + "@module": "tofu.ui", + "type": "planned_change", + "change": map[string]interface{}{ + "action": "create", + "resource": map[string]interface{}{ + "addr": `test_resource.boop[1]`, + "implied_provider": "test", + "module": "", + "resource": `test_resource.boop[1]`, + "resource_key": float64(1), + "resource_name": "boop", + "resource_type": "test_resource", + }, + }, + }, + // Simple delete + { + "@level": "info", + "@message": "module.vpc.test_resource.boop[0]: Plan to delete", + "@module": "tofu.ui", + "type": "planned_change", + "change": map[string]interface{}{ + "action": "delete", + "resource": map[string]interface{}{ + "addr": `module.vpc.test_resource.boop[0]`, + "implied_provider": "test", + "module": "module.vpc", + "resource": `test_resource.boop[0]`, + "resource_key": float64(0), + "resource_name": "boop", + "resource_type": "test_resource", + }, + }, + }, + // Delete-then-create is also a replace + { + "@level": "info", + "@message": "test_resource.beep: Plan to replace", + "@module": "tofu.ui", + "type": "planned_change", + "change": map[string]interface{}{ + "action": "replace", + "resource": map[string]interface{}{ + "addr": `test_resource.beep`, + "implied_provider": "test", + "module": "", + "resource": `test_resource.beep`, + "resource_key": nil, + "resource_name": "beep", + "resource_type": "test_resource", + }, + }, + }, + // Simple update + { + "@level": "info", + "@message": "module.vpc.test_resource.beep: Plan to update", + "@module": "tofu.ui", + "type": "planned_change", + "change": map[string]interface{}{ + "action": "update", + "resource": map[string]interface{}{ + "addr": `module.vpc.test_resource.beep`, + "implied_provider": "test", + "module": "module.vpc", + "resource": `test_resource.beep`, + "resource_key": nil, + "resource_name": "beep", + "resource_type": "test_resource", + }, + }, + }, + // These counts are 3 add/1 change/3 destroy because the replace + // changes result in both add and destroy counts. + { + "@level": "info", + "@message": "Plan: 3 to add, 1 to change, 3 to destroy.", + "@module": "tofu.ui", + "type": "change_summary", + "changes": map[string]interface{}{ + "operation": "plan", + "add": float64(3), + "import": float64(0), + "change": float64(1), + "remove": float64(3), + }, + }, + } + + testJSONViewOutputEquals(t, done(t).Stdout(), want) +} + +func TestOperationJSON_planWithImport(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := &OperationJSON{view: NewJSONView(NewView(streams))} + + root := addrs.RootModuleInstance + vpc, diags := addrs.ParseModuleInstanceStr("module.vpc") + if len(diags) > 0 { + t.Fatal(diags.Err()) + } + boop := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_resource", Name: "boop"} + beep := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_resource", Name: "beep"} + + plan := &plans.Plan{ + Changes: &plans.Changes{ + Resources: []*plans.ResourceInstanceChangeSrc{ + { + Addr: boop.Instance(addrs.IntKey(0)).Absolute(vpc), + PrevRunAddr: boop.Instance(addrs.IntKey(0)).Absolute(vpc), + ChangeSrc: plans.ChangeSrc{Action: plans.NoOp, Importing: &plans.ImportingSrc{ID: "DECD6D77"}}, + }, + { + Addr: boop.Instance(addrs.IntKey(1)).Absolute(vpc), + PrevRunAddr: boop.Instance(addrs.IntKey(1)).Absolute(vpc), + ChangeSrc: plans.ChangeSrc{Action: plans.Delete, Importing: &plans.ImportingSrc{ID: "DECD6D77"}}, + }, + { + Addr: boop.Instance(addrs.IntKey(0)).Absolute(root), + PrevRunAddr: boop.Instance(addrs.IntKey(0)).Absolute(root), + ChangeSrc: plans.ChangeSrc{Action: plans.CreateThenDelete, Importing: &plans.ImportingSrc{ID: "DECD6D77"}}, + }, + { + Addr: beep.Instance(addrs.NoKey).Absolute(root), + PrevRunAddr: beep.Instance(addrs.NoKey).Absolute(root), + ChangeSrc: plans.ChangeSrc{Action: plans.Update, Importing: &plans.ImportingSrc{ID: "DECD6D77"}}, + }, + }, + }, + } + v.Plan(plan, testSchemas()) + + want := []map[string]interface{}{ + // Simple import + { + "@level": "info", + "@message": "module.vpc.test_resource.boop[0]: Plan to import", + "@module": "tofu.ui", + "type": "planned_change", + "change": map[string]interface{}{ + "action": "import", + "resource": map[string]interface{}{ + "addr": `module.vpc.test_resource.boop[0]`, + "implied_provider": "test", + "module": "module.vpc", + "resource": `test_resource.boop[0]`, + "resource_key": float64(0), + "resource_name": "boop", + "resource_type": "test_resource", + }, + "importing": map[string]interface{}{ + "id": "DECD6D77", + }, + }, + }, + // Delete after importing + { + "@level": "info", + "@message": "module.vpc.test_resource.boop[1]: Plan to delete", + "@module": "tofu.ui", + "type": "planned_change", + "change": map[string]interface{}{ + "action": "delete", + "resource": map[string]interface{}{ + "addr": `module.vpc.test_resource.boop[1]`, + "implied_provider": "test", + "module": "module.vpc", + "resource": `test_resource.boop[1]`, + "resource_key": float64(1), + "resource_name": "boop", + "resource_type": "test_resource", + }, + "importing": map[string]interface{}{ + "id": "DECD6D77", + }, + }, + }, + // Create-then-delete after importing. + { + "@level": "info", + "@message": "test_resource.boop[0]: Plan to replace", + "@module": "tofu.ui", + "type": "planned_change", + "change": map[string]interface{}{ + "action": "replace", + "resource": map[string]interface{}{ + "addr": `test_resource.boop[0]`, + "implied_provider": "test", + "module": "", + "resource": `test_resource.boop[0]`, + "resource_key": float64(0), + "resource_name": "boop", + "resource_type": "test_resource", + }, + "importing": map[string]interface{}{ + "id": "DECD6D77", + }, + }, + }, + // Update after importing + { + "@level": "info", + "@message": "test_resource.beep: Plan to update", + "@module": "tofu.ui", + "type": "planned_change", + "change": map[string]interface{}{ + "action": "update", + "resource": map[string]interface{}{ + "addr": `test_resource.beep`, + "implied_provider": "test", + "module": "", + "resource": `test_resource.beep`, + "resource_key": nil, + "resource_name": "beep", + "resource_type": "test_resource", + }, + "importing": map[string]interface{}{ + "id": "DECD6D77", + }, + }, + }, + { + "@level": "info", + "@message": "Plan: 4 to import, 1 to add, 1 to change, 2 to destroy.", + "@module": "tofu.ui", + "type": "change_summary", + "changes": map[string]interface{}{ + "operation": "plan", + "add": float64(1), + "import": float64(4), + "change": float64(1), + "remove": float64(2), + }, + }, + } + + testJSONViewOutputEquals(t, done(t).Stdout(), want) +} + +func TestOperationJSON_planDriftWithMove(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := &OperationJSON{view: NewJSONView(NewView(streams))} + + root := addrs.RootModuleInstance + boop := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_resource", Name: "boop"} + beep := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_resource", Name: "beep"} + blep := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_resource", Name: "blep"} + honk := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_resource", Name: "honk"} + + plan := &plans.Plan{ + UIMode: plans.NormalMode, + Changes: &plans.Changes{ + Resources: []*plans.ResourceInstanceChangeSrc{ + { + Addr: honk.Instance(addrs.StringKey("bonk")).Absolute(root), + PrevRunAddr: honk.Instance(addrs.IntKey(0)).Absolute(root), + ChangeSrc: plans.ChangeSrc{Action: plans.NoOp}, + }, + }, + }, + DriftedResources: []*plans.ResourceInstanceChangeSrc{ + { + Addr: beep.Instance(addrs.NoKey).Absolute(root), + PrevRunAddr: beep.Instance(addrs.NoKey).Absolute(root), + ChangeSrc: plans.ChangeSrc{Action: plans.Delete}, + }, + { + Addr: boop.Instance(addrs.NoKey).Absolute(root), + PrevRunAddr: blep.Instance(addrs.NoKey).Absolute(root), + ChangeSrc: plans.ChangeSrc{Action: plans.Update}, + }, + // Move-only resource drift should not be present in normal mode plans + { + Addr: honk.Instance(addrs.StringKey("bonk")).Absolute(root), + PrevRunAddr: honk.Instance(addrs.IntKey(0)).Absolute(root), + ChangeSrc: plans.ChangeSrc{Action: plans.NoOp}, + }, + }, + } + v.Plan(plan, testSchemas()) + + want := []map[string]interface{}{ + // Drift detected: delete + { + "@level": "info", + "@message": "test_resource.beep: Drift detected (delete)", + "@module": "tofu.ui", + "type": "resource_drift", + "change": map[string]interface{}{ + "action": "delete", + "resource": map[string]interface{}{ + "addr": "test_resource.beep", + "implied_provider": "test", + "module": "", + "resource": "test_resource.beep", + "resource_key": nil, + "resource_name": "beep", + "resource_type": "test_resource", + }, + }, + }, + // Drift detected: update with move + { + "@level": "info", + "@message": "test_resource.boop: Drift detected (update)", + "@module": "tofu.ui", + "type": "resource_drift", + "change": map[string]interface{}{ + "action": "update", + "resource": map[string]interface{}{ + "addr": "test_resource.boop", + "implied_provider": "test", + "module": "", + "resource": "test_resource.boop", + "resource_key": nil, + "resource_name": "boop", + "resource_type": "test_resource", + }, + "previous_resource": map[string]interface{}{ + "addr": "test_resource.blep", + "implied_provider": "test", + "module": "", + "resource": "test_resource.blep", + "resource_key": nil, + "resource_name": "blep", + "resource_type": "test_resource", + }, + }, + }, + // Move-only change + { + "@level": "info", + "@message": `test_resource.honk["bonk"]: Plan to move`, + "@module": "tofu.ui", + "type": "planned_change", + "change": map[string]interface{}{ + "action": "move", + "resource": map[string]interface{}{ + "addr": `test_resource.honk["bonk"]`, + "implied_provider": "test", + "module": "", + "resource": `test_resource.honk["bonk"]`, + "resource_key": "bonk", + "resource_name": "honk", + "resource_type": "test_resource", + }, + "previous_resource": map[string]interface{}{ + "addr": `test_resource.honk[0]`, + "implied_provider": "test", + "module": "", + "resource": `test_resource.honk[0]`, + "resource_key": float64(0), + "resource_name": "honk", + "resource_type": "test_resource", + }, + }, + }, + // No changes + { + "@level": "info", + "@message": "Plan: 0 to add, 0 to change, 0 to destroy.", + "@module": "tofu.ui", + "type": "change_summary", + "changes": map[string]interface{}{ + "operation": "plan", + "add": float64(0), + "import": float64(0), + "change": float64(0), + "remove": float64(0), + }, + }, + } + + testJSONViewOutputEquals(t, done(t).Stdout(), want) +} + +func TestOperationJSON_planDriftWithMoveRefreshOnly(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := &OperationJSON{view: NewJSONView(NewView(streams))} + + root := addrs.RootModuleInstance + boop := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_resource", Name: "boop"} + beep := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_resource", Name: "beep"} + blep := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_resource", Name: "blep"} + honk := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_resource", Name: "honk"} + + plan := &plans.Plan{ + UIMode: plans.RefreshOnlyMode, + Changes: &plans.Changes{ + Resources: []*plans.ResourceInstanceChangeSrc{}, + }, + DriftedResources: []*plans.ResourceInstanceChangeSrc{ + { + Addr: beep.Instance(addrs.NoKey).Absolute(root), + PrevRunAddr: beep.Instance(addrs.NoKey).Absolute(root), + ChangeSrc: plans.ChangeSrc{Action: plans.Delete}, + }, + { + Addr: boop.Instance(addrs.NoKey).Absolute(root), + PrevRunAddr: blep.Instance(addrs.NoKey).Absolute(root), + ChangeSrc: plans.ChangeSrc{Action: plans.Update}, + }, + // Move-only resource drift should be present in refresh-only plans + { + Addr: honk.Instance(addrs.StringKey("bonk")).Absolute(root), + PrevRunAddr: honk.Instance(addrs.IntKey(0)).Absolute(root), + ChangeSrc: plans.ChangeSrc{Action: plans.NoOp}, + }, + }, + } + v.Plan(plan, testSchemas()) + + want := []map[string]interface{}{ + // Drift detected: delete + { + "@level": "info", + "@message": "test_resource.beep: Drift detected (delete)", + "@module": "tofu.ui", + "type": "resource_drift", + "change": map[string]interface{}{ + "action": "delete", + "resource": map[string]interface{}{ + "addr": "test_resource.beep", + "implied_provider": "test", + "module": "", + "resource": "test_resource.beep", + "resource_key": nil, + "resource_name": "beep", + "resource_type": "test_resource", + }, + }, + }, + // Drift detected: update + { + "@level": "info", + "@message": "test_resource.boop: Drift detected (update)", + "@module": "tofu.ui", + "type": "resource_drift", + "change": map[string]interface{}{ + "action": "update", + "resource": map[string]interface{}{ + "addr": "test_resource.boop", + "implied_provider": "test", + "module": "", + "resource": "test_resource.boop", + "resource_key": nil, + "resource_name": "boop", + "resource_type": "test_resource", + }, + "previous_resource": map[string]interface{}{ + "addr": "test_resource.blep", + "implied_provider": "test", + "module": "", + "resource": "test_resource.blep", + "resource_key": nil, + "resource_name": "blep", + "resource_type": "test_resource", + }, + }, + }, + // Drift detected: Move-only change + { + "@level": "info", + "@message": `test_resource.honk["bonk"]: Drift detected (move)`, + "@module": "tofu.ui", + "type": "resource_drift", + "change": map[string]interface{}{ + "action": "move", + "resource": map[string]interface{}{ + "addr": `test_resource.honk["bonk"]`, + "implied_provider": "test", + "module": "", + "resource": `test_resource.honk["bonk"]`, + "resource_key": "bonk", + "resource_name": "honk", + "resource_type": "test_resource", + }, + "previous_resource": map[string]interface{}{ + "addr": `test_resource.honk[0]`, + "implied_provider": "test", + "module": "", + "resource": `test_resource.honk[0]`, + "resource_key": float64(0), + "resource_name": "honk", + "resource_type": "test_resource", + }, + }, + }, + // No changes + { + "@level": "info", + "@message": "Plan: 0 to add, 0 to change, 0 to destroy.", + "@module": "tofu.ui", + "type": "change_summary", + "changes": map[string]interface{}{ + "operation": "plan", + "add": float64(0), + "import": float64(0), + "change": float64(0), + "remove": float64(0), + }, + }, + } + + testJSONViewOutputEquals(t, done(t).Stdout(), want) +} + +func TestOperationJSON_planOutputChanges(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := &OperationJSON{view: NewJSONView(NewView(streams))} + + root := addrs.RootModuleInstance + + plan := &plans.Plan{ + Changes: &plans.Changes{ + Resources: []*plans.ResourceInstanceChangeSrc{}, + Outputs: []*plans.OutputChangeSrc{ + { + Addr: root.OutputValue("boop"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.NoOp, + }, + }, + { + Addr: root.OutputValue("beep"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Create, + }, + }, + { + Addr: root.OutputValue("bonk"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Delete, + }, + }, + { + Addr: root.OutputValue("honk"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Update, + }, + Sensitive: true, + }, + }, + }, + } + v.Plan(plan, testSchemas()) + + want := []map[string]interface{}{ + // No resource changes + { + "@level": "info", + "@message": "Plan: 0 to add, 0 to change, 0 to destroy.", + "@module": "tofu.ui", + "type": "change_summary", + "changes": map[string]interface{}{ + "operation": "plan", + "add": float64(0), + "import": float64(0), + "change": float64(0), + "remove": float64(0), + }, + }, + // Output changes + { + "@level": "info", + "@message": "Outputs: 4", + "@module": "tofu.ui", + "type": "outputs", + "outputs": map[string]interface{}{ + "boop": map[string]interface{}{ + "action": "noop", + "sensitive": false, + }, + "beep": map[string]interface{}{ + "action": "create", + "sensitive": false, + }, + "bonk": map[string]interface{}{ + "action": "delete", + "sensitive": false, + }, + "honk": map[string]interface{}{ + "action": "update", + "sensitive": true, + }, + }, + }, + } + + testJSONViewOutputEquals(t, done(t).Stdout(), want) +} + +func TestOperationJSON_plannedChange(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := &OperationJSON{view: NewJSONView(NewView(streams))} + + root := addrs.RootModuleInstance + boop := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_instance", Name: "boop"} + derp := addrs.Resource{Mode: addrs.DataResourceMode, Type: "test_source", Name: "derp"} + + // Replace requested by user + v.PlannedChange(&plans.ResourceInstanceChangeSrc{ + Addr: boop.Instance(addrs.IntKey(0)).Absolute(root), + PrevRunAddr: boop.Instance(addrs.IntKey(0)).Absolute(root), + ChangeSrc: plans.ChangeSrc{Action: plans.DeleteThenCreate}, + ActionReason: plans.ResourceInstanceReplaceByRequest, + }) + + // Simple create + v.PlannedChange(&plans.ResourceInstanceChangeSrc{ + Addr: boop.Instance(addrs.IntKey(1)).Absolute(root), + PrevRunAddr: boop.Instance(addrs.IntKey(1)).Absolute(root), + ChangeSrc: plans.ChangeSrc{Action: plans.Create}, + }) + + // Data source deletion + v.PlannedChange(&plans.ResourceInstanceChangeSrc{ + Addr: derp.Instance(addrs.NoKey).Absolute(root), + PrevRunAddr: derp.Instance(addrs.NoKey).Absolute(root), + ChangeSrc: plans.ChangeSrc{Action: plans.Delete}, + }) + + // Expect only two messages, as the data source deletion should be a no-op + want := []map[string]interface{}{ + { + "@level": "info", + "@message": "test_instance.boop[0]: Plan to replace", + "@module": "tofu.ui", + "type": "planned_change", + "change": map[string]interface{}{ + "action": "replace", + "reason": "requested", + "resource": map[string]interface{}{ + "addr": `test_instance.boop[0]`, + "implied_provider": "test", + "module": "", + "resource": `test_instance.boop[0]`, + "resource_key": float64(0), + "resource_name": "boop", + "resource_type": "test_instance", + }, + }, + }, + { + "@level": "info", + "@message": "test_instance.boop[1]: Plan to create", + "@module": "tofu.ui", + "type": "planned_change", + "change": map[string]interface{}{ + "action": "create", + "resource": map[string]interface{}{ + "addr": `test_instance.boop[1]`, + "implied_provider": "test", + "module": "", + "resource": `test_instance.boop[1]`, + "resource_key": float64(1), + "resource_name": "boop", + "resource_type": "test_instance", + }, + }, + }, + } + + testJSONViewOutputEquals(t, done(t).Stdout(), want) +} diff --git a/pkg/command/views/output.go b/pkg/command/views/output.go new file mode 100644 index 00000000000..e97f1f9557d --- /dev/null +++ b/pkg/command/views/output.go @@ -0,0 +1,290 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package views + +import ( + "bytes" + "encoding/json" + "fmt" + "sort" + "strings" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/repl" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// The Output view renders either one or all outputs, depending on whether or +// not the name argument is empty. +type Output interface { + Output(name string, outputs map[string]*states.OutputValue) tfdiags.Diagnostics + Diagnostics(diags tfdiags.Diagnostics) +} + +// NewOutput returns an initialized Output implementation for the given ViewType. +func NewOutput(vt arguments.ViewType, view *View) Output { + switch vt { + case arguments.ViewJSON: + return &OutputJSON{view: view} + case arguments.ViewRaw: + return &OutputRaw{view: view} + case arguments.ViewHuman: + return &OutputHuman{view: view} + default: + panic(fmt.Sprintf("unknown view type %v", vt)) + } +} + +// The OutputHuman implementation renders outputs in a format equivalent to HCL +// source. This uses the same formatting logic as in the console REPL. +type OutputHuman struct { + view *View +} + +var _ Output = (*OutputHuman)(nil) + +func (v *OutputHuman) Output(name string, outputs map[string]*states.OutputValue) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + if len(outputs) == 0 { + diags = diags.Append(noOutputsWarning()) + return diags + } + + if name != "" { + output, ok := outputs[name] + if !ok { + diags = diags.Append(missingOutputError(name)) + return diags + } + result := repl.FormatValue(output.Value, 0) + v.view.streams.Println(result) + return nil + } + + outputBuf := new(bytes.Buffer) + if len(outputs) > 0 { + // Output the outputs in alphabetical order + keyLen := 0 + ks := make([]string, 0, len(outputs)) + for key := range outputs { + ks = append(ks, key) + if len(key) > keyLen { + keyLen = len(key) + } + } + sort.Strings(ks) + + for _, k := range ks { + vs := outputs[k] + if vs.Sensitive && !v.view.showSensitive { + outputBuf.WriteString(fmt.Sprintf("%s = \n", k)) + continue + } + + result := repl.FormatValue(vs.Value, 0) + outputBuf.WriteString(fmt.Sprintf("%s = %s\n", k, result)) + } + } + + v.view.streams.Println(strings.TrimSpace(outputBuf.String())) + + return nil +} + +func (v *OutputHuman) Diagnostics(diags tfdiags.Diagnostics) { + v.view.Diagnostics(diags) +} + +// The OutputRaw implementation renders single string, number, or boolean +// output values directly and without quotes or other formatting. This is +// intended for use in shell scripting or other environments where the exact +// type of an output value is not important. +type OutputRaw struct { + view *View +} + +var _ Output = (*OutputRaw)(nil) + +func (v *OutputRaw) Output(name string, outputs map[string]*states.OutputValue) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + if len(outputs) == 0 { + diags = diags.Append(noOutputsWarning()) + return diags + } + + if name == "" { + diags = diags.Append(fmt.Errorf("Raw output format is only supported for single outputs")) + return diags + } + + output, ok := outputs[name] + if !ok { + diags = diags.Append(missingOutputError(name)) + return diags + } + + strV, err := convert.Convert(output.Value, cty.String) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unsupported value for raw output", + fmt.Sprintf( + "The -raw option only supports strings, numbers, and boolean values, but output value %q is %s.\n\nUse the -json option for machine-readable representations of output values that have complex types.", + name, output.Value.Type().FriendlyName(), + ), + )) + return diags + } + if strV.IsNull() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unsupported value for raw output", + fmt.Sprintf( + "The value for output value %q is null, so -raw mode cannot print it.", + name, + ), + )) + return diags + } + if !strV.IsKnown() { + // Since we're working with values from the state it would be very + // odd to end up in here, but we'll handle it anyway to avoid a + // panic in case our rules somehow change in future. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unsupported value for raw output", + fmt.Sprintf( + "The value for output value %q won't be known until after a successful tofu apply, so -raw mode cannot print it.", + name, + ), + )) + return diags + } + // If we get out here then we should have a valid string to print. + // We're writing it using Print here so that a shell caller will get + // exactly the value and no extra whitespace (including trailing newline). + v.view.streams.Print(strV.AsString()) + return nil +} + +func (v *OutputRaw) Diagnostics(diags tfdiags.Diagnostics) { + v.view.Diagnostics(diags) +} + +// The OutputJSON implementation renders outputs as JSON values. When rendering +// a single output, only the value is displayed. When rendering all outputs, +// the result is a JSON object with keys matching the output names and object +// values including type and sensitivity metadata. +type OutputJSON struct { + view *View +} + +var _ Output = (*OutputJSON)(nil) + +func (v *OutputJSON) Output(name string, outputs map[string]*states.OutputValue) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + if name != "" { + output, ok := outputs[name] + if !ok { + diags = diags.Append(missingOutputError(name)) + return diags + } + value := output.Value + + jsonOutput, err := ctyjson.Marshal(value, value.Type()) + if err != nil { + diags = diags.Append(err) + return diags + } + + v.view.streams.Println(string(jsonOutput)) + + return nil + } + + // Due to a historical accident, the switch from state version 2 to + // 3 caused our JSON output here to be the full metadata about the + // outputs rather than just the output values themselves as we'd + // show in the single value case. We must now maintain that behavior + // for compatibility, so this is an emulation of the JSON + // serialization of outputs used in state format version 3. + type OutputMeta struct { + Sensitive bool `json:"sensitive"` + Type json.RawMessage `json:"type"` + Value json.RawMessage `json:"value"` + } + outputMetas := map[string]OutputMeta{} + + for n, os := range outputs { + jsonVal, err := ctyjson.Marshal(os.Value, os.Value.Type()) + if err != nil { + diags = diags.Append(err) + return diags + } + jsonType, err := ctyjson.MarshalType(os.Value.Type()) + if err != nil { + diags = diags.Append(err) + return diags + } + outputMetas[n] = OutputMeta{ + Sensitive: os.Sensitive, + Type: json.RawMessage(jsonType), + Value: json.RawMessage(jsonVal), + } + } + + jsonOutputs, err := json.MarshalIndent(outputMetas, "", " ") + if err != nil { + diags = diags.Append(err) + return diags + } + + v.view.streams.Println(string(jsonOutputs)) + + return nil +} + +func (v *OutputJSON) Diagnostics(diags tfdiags.Diagnostics) { + v.view.Diagnostics(diags) +} + +// For text and raw output modes, an empty map of outputs is considered a +// separate and higher priority failure mode than an output not being present +// in a non-empty map. This warning diagnostic explains how this might have +// happened. +func noOutputsWarning() tfdiags.Diagnostic { + return tfdiags.Sourceless( + tfdiags.Warning, + "No outputs found", + "The state file either has no outputs defined, or all the defined "+ + "outputs are empty. Please define an output in your configuration "+ + "with the `output` keyword and run `tofu refresh` for it to "+ + "become available. If you are using interpolation, please verify "+ + "the interpolated value is not empty. You can use the "+ + "`tofu console` command to assist.", + ) +} + +// Attempting to display a missing output results in this failure, which +// includes suggestions on how to rectify the problem. +func missingOutputError(name string) tfdiags.Diagnostic { + return tfdiags.Sourceless( + tfdiags.Error, + fmt.Sprintf("Output %q not found", name), + "The output variable requested could not be found in the state "+ + "file. If you recently added this to your configuration, be "+ + "sure to run `tofu apply`, since the state won't be updated "+ + "with new output variables until that command is run.", + ) +} diff --git a/pkg/command/views/output_test.go b/pkg/command/views/output_test.go new file mode 100644 index 00000000000..e368dfe4e63 --- /dev/null +++ b/pkg/command/views/output_test.go @@ -0,0 +1,368 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package views + +import ( + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/terminal" + "github.com/zclconf/go-cty/cty" +) + +// Test various single output values for human-readable UI. Note that since +// OutputHuman defers to repl.FormatValue to render a single value, most of the +// test coverage should be in that package. +func TestOutputHuman_single(t *testing.T) { + testCases := map[string]struct { + value cty.Value + want string + wantErr bool + }{ + "string": { + value: cty.StringVal("hello"), + want: "\"hello\"\n", + }, + "list of maps": { + value: cty.ListVal([]cty.Value{ + cty.MapVal(map[string]cty.Value{ + "key": cty.StringVal("value"), + "key2": cty.StringVal("value2"), + }), + cty.MapVal(map[string]cty.Value{ + "key": cty.StringVal("value"), + }), + }), + want: `tolist([ + tomap({ + "key" = "value" + "key2" = "value2" + }), + tomap({ + "key" = "value" + }), +]) +`, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := NewOutput(arguments.ViewHuman, NewView(streams)) + + outputs := map[string]*states.OutputValue{ + "foo": {Value: tc.value}, + } + diags := v.Output("foo", outputs) + + if diags.HasErrors() { + if !tc.wantErr { + t.Fatalf("unexpected diagnostics: %s", diags) + } + } else if tc.wantErr { + t.Fatalf("succeeded, but want error") + } + + if got, want := done(t).Stdout(), tc.want; got != want { + t.Errorf("wrong result\ngot: %q\nwant: %q", got, want) + } + }) + } +} + +// Sensitive output values are rendered to the console intentionally when +// requesting a single output. +func TestOutput_sensitive(t *testing.T) { + testCases := map[string]arguments.ViewType{ + "human": arguments.ViewHuman, + "json": arguments.ViewJSON, + "raw": arguments.ViewRaw, + } + for name, vt := range testCases { + t.Run(name, func(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := NewOutput(vt, NewView(streams)) + + outputs := map[string]*states.OutputValue{ + "foo": { + Value: cty.StringVal("secret"), + Sensitive: true, + }, + } + diags := v.Output("foo", outputs) + + if diags.HasErrors() { + t.Fatalf("unexpected diagnostics: %s", diags) + } + + // Test for substring match here because we don't care about exact + // output format in this test, just the presence of the sensitive + // value. + if got, want := done(t).Stdout(), "secret"; !strings.Contains(got, want) { + t.Errorf("wrong result\ngot: %q\nwant: %q", got, want) + } + }) + } +} + +// Showing all outputs is supported by human and JSON output format. +func TestOutput_all(t *testing.T) { + outputs := map[string]*states.OutputValue{ + "foo": { + Value: cty.StringVal("secret"), + Sensitive: true, + }, + "bar": { + Value: cty.ListVal([]cty.Value{cty.True, cty.False, cty.True}), + }, + "baz": { + Value: cty.ObjectVal(map[string]cty.Value{ + "boop": cty.NumberIntVal(5), + "beep": cty.StringVal("true"), + }), + }, + } + + testCases := map[string]struct { + vt arguments.ViewType + want string + }{ + "human": { + arguments.ViewHuman, + `bar = tolist([ + true, + false, + true, +]) +baz = { + "beep" = "true" + "boop" = 5 +} +foo = +`, + }, + "json": { + arguments.ViewJSON, + `{ + "bar": { + "sensitive": false, + "type": [ + "list", + "bool" + ], + "value": [ + true, + false, + true + ] + }, + "baz": { + "sensitive": false, + "type": [ + "object", + { + "beep": "string", + "boop": "number" + } + ], + "value": { + "beep": "true", + "boop": 5 + } + }, + "foo": { + "sensitive": true, + "type": "string", + "value": "secret" + } +} +`, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := NewOutput(tc.vt, NewView(streams)) + diags := v.Output("", outputs) + + if diags.HasErrors() { + t.Fatalf("unexpected diagnostics: %s", diags) + } + + if got := done(t).Stdout(); got != tc.want { + t.Errorf("wrong result\ngot: %q\nwant: %q", got, tc.want) + } + }) + } +} + +// JSON output format supports empty outputs by rendering an empty object +// without diagnostics. +func TestOutputJSON_empty(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := NewOutput(arguments.ViewJSON, NewView(streams)) + + diags := v.Output("", map[string]*states.OutputValue{}) + + if diags.HasErrors() { + t.Fatalf("unexpected diagnostics: %s", diags) + } + + if got, want := done(t).Stdout(), "{}\n"; got != want { + t.Errorf("wrong result\ngot: %q\nwant: %q", got, want) + } +} + +// Human and raw formats render a warning if there are no outputs. +func TestOutput_emptyWarning(t *testing.T) { + testCases := map[string]arguments.ViewType{ + "human": arguments.ViewHuman, + "raw": arguments.ViewRaw, + } + + for name, vt := range testCases { + t.Run(name, func(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := NewOutput(vt, NewView(streams)) + + diags := v.Output("", map[string]*states.OutputValue{}) + + if got, want := done(t).Stdout(), ""; got != want { + t.Errorf("wrong result\ngot: %q\nwant: %q", got, want) + } + + if len(diags) != 1 { + t.Fatalf("expected 1 diagnostic, got %d", len(diags)) + } + + if diags.HasErrors() { + t.Fatalf("unexpected error diagnostics: %s", diags) + } + + if got, want := diags[0].Description().Summary, "No outputs found"; got != want { + t.Errorf("unexpected diagnostics: %s", diags) + } + }) + } +} + +// Raw output is a simple unquoted output format designed for shell scripts, +// which relies on the cty.AsString() implementation. This test covers +// formatting for supported value types. +func TestOutputRaw(t *testing.T) { + values := map[string]cty.Value{ + "str": cty.StringVal("bar"), + "multistr": cty.StringVal("bar\nbaz"), + "num": cty.NumberIntVal(2), + "bool": cty.True, + "obj": cty.EmptyObjectVal, + "null": cty.NullVal(cty.String), + "unknown": cty.UnknownVal(cty.String), + } + + tests := map[string]struct { + WantOutput string + WantErr bool + }{ + "str": {WantOutput: "bar"}, + "multistr": {WantOutput: "bar\nbaz"}, + "num": {WantOutput: "2"}, + "bool": {WantOutput: "true"}, + "obj": {WantErr: true}, + "null": {WantErr: true}, + "unknown": {WantErr: true}, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := NewOutput(arguments.ViewRaw, NewView(streams)) + + value := values[name] + outputs := map[string]*states.OutputValue{ + name: {Value: value}, + } + diags := v.Output(name, outputs) + + if diags.HasErrors() { + if !test.WantErr { + t.Fatalf("unexpected diagnostics: %s", diags) + } + } else if test.WantErr { + t.Fatalf("succeeded, but want error") + } + + if got, want := done(t).Stdout(), test.WantOutput; got != want { + t.Errorf("wrong result\ngot: %q\nwant: %q", got, want) + } + }) + } +} + +// Raw cannot render all outputs. +func TestOutputRaw_all(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := NewOutput(arguments.ViewRaw, NewView(streams)) + + outputs := map[string]*states.OutputValue{ + "foo": {Value: cty.StringVal("secret")}, + "bar": {Value: cty.True}, + } + diags := v.Output("", outputs) + + if got, want := done(t).Stdout(), ""; got != want { + t.Errorf("wrong result\ngot: %q\nwant: %q", got, want) + } + + if !diags.HasErrors() { + t.Fatalf("expected diagnostics, got %s", diags) + } + + if got, want := diags.Err().Error(), "Raw output format is only supported for single outputs"; got != want { + t.Errorf("unexpected diagnostics: %s", diags) + } +} + +// All outputs render an error if a specific output is requested which is +// missing from the map of outputs. +func TestOutput_missing(t *testing.T) { + testCases := map[string]arguments.ViewType{ + "human": arguments.ViewHuman, + "json": arguments.ViewJSON, + "raw": arguments.ViewRaw, + } + + for name, vt := range testCases { + t.Run(name, func(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := NewOutput(vt, NewView(streams)) + + diags := v.Output("foo", map[string]*states.OutputValue{ + "bar": {Value: cty.StringVal("boop")}, + }) + + if len(diags) != 1 { + t.Fatalf("expected 1 diagnostic, got %d", len(diags)) + } + + if !diags.HasErrors() { + t.Fatalf("expected error diagnostics, got %s", diags) + } + + if got, want := diags[0].Description().Summary, `Output "foo" not found`; got != want { + t.Errorf("unexpected diagnostics: %s", diags) + } + + if got, want := done(t).Stdout(), ""; got != want { + t.Errorf("wrong result\ngot: %q\nwant: %q", got, want) + } + }) + } +} diff --git a/pkg/command/views/plan.go b/pkg/command/views/plan.go new file mode 100644 index 00000000000..a6c2823cf3f --- /dev/null +++ b/pkg/command/views/plan.go @@ -0,0 +1,93 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package views + +import ( + "fmt" + + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" +) + +// The Plan view is used for the plan command. +type Plan interface { + Operation() Operation + Hooks() []tofu.Hook + + Diagnostics(diags tfdiags.Diagnostics) + HelpPrompt() +} + +// NewPlan returns an initialized Plan implementation for the given ViewType. +func NewPlan(vt arguments.ViewType, view *View) Plan { + switch vt { + case arguments.ViewJSON: + return &PlanJSON{ + view: NewJSONView(view), + } + case arguments.ViewHuman: + return &PlanHuman{ + view: view, + inAutomation: view.RunningInAutomation(), + } + default: + panic(fmt.Sprintf("unknown view type %v", vt)) + } +} + +// The PlanHuman implementation renders human-readable text logs, suitable for +// a scrolling terminal. +type PlanHuman struct { + view *View + + inAutomation bool +} + +var _ Plan = (*PlanHuman)(nil) + +func (v *PlanHuman) Operation() Operation { + return NewOperation(arguments.ViewHuman, v.inAutomation, v.view) +} + +func (v *PlanHuman) Hooks() []tofu.Hook { + return []tofu.Hook{ + NewUiHook(v.view), + } +} + +func (v *PlanHuman) Diagnostics(diags tfdiags.Diagnostics) { + v.view.Diagnostics(diags) +} + +func (v *PlanHuman) HelpPrompt() { + v.view.HelpPrompt("plan") +} + +// The PlanJSON implementation renders streaming JSON logs, suitable for +// integrating with other software. +type PlanJSON struct { + view *JSONView +} + +var _ Plan = (*PlanJSON)(nil) + +func (v *PlanJSON) Operation() Operation { + return &OperationJSON{view: v.view} +} + +func (v *PlanJSON) Hooks() []tofu.Hook { + return []tofu.Hook{ + newJSONHook(v.view), + } +} + +func (v *PlanJSON) Diagnostics(diags tfdiags.Diagnostics) { + v.view.Diagnostics(diags) +} + +func (v *PlanJSON) HelpPrompt() { +} diff --git a/pkg/command/views/plan_test.go b/pkg/command/views/plan_test.go new file mode 100644 index 00000000000..eea31f42223 --- /dev/null +++ b/pkg/command/views/plan_test.go @@ -0,0 +1,181 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package views + +import ( + "testing" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/terminal" + "github.com/kubegems/opentofu/pkg/tofu" + "github.com/zclconf/go-cty/cty" +) + +// Ensure that the correct view type and in-automation settings propagate to the +// Operation view. +func TestPlanHuman_operation(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + defer done(t) + v := NewPlan(arguments.ViewHuman, NewView(streams).SetRunningInAutomation(true)).Operation() + if hv, ok := v.(*OperationHuman); !ok { + t.Fatalf("unexpected return type %t", v) + } else if hv.inAutomation != true { + t.Fatalf("unexpected inAutomation value on Operation view") + } +} + +// Verify that Hooks includes a UI hook +func TestPlanHuman_hooks(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + defer done(t) + v := NewPlan(arguments.ViewHuman, NewView(streams).SetRunningInAutomation((true))) + hooks := v.Hooks() + + var uiHook *UiHook + for _, hook := range hooks { + if ch, ok := hook.(*UiHook); ok { + uiHook = ch + } + } + if uiHook == nil { + t.Fatalf("expected Hooks to include a UiHook: %#v", hooks) + } +} + +// Helper functions to build a trivial test plan, to exercise the plan +// renderer. +func testPlan(t *testing.T) *plans.Plan { + t.Helper() + + plannedVal := cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "foo": cty.StringVal("bar"), + }) + priorValRaw, err := plans.NewDynamicValue(cty.NullVal(plannedVal.Type()), plannedVal.Type()) + if err != nil { + t.Fatal(err) + } + plannedValRaw, err := plans.NewDynamicValue(plannedVal, plannedVal.Type()) + if err != nil { + t.Fatal(err) + } + + changes := plans.NewChanges() + addr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + + changes.SyncWrapper().AppendResourceInstanceChange(&plans.ResourceInstanceChangeSrc{ + Addr: addr, + PrevRunAddr: addr, + ProviderAddr: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ChangeSrc: plans.ChangeSrc{ + Action: plans.Create, + Before: priorValRaw, + After: plannedValRaw, + }, + }) + + return &plans.Plan{ + Changes: changes, + } +} + +func testPlanWithDatasource(t *testing.T) *plans.Plan { + plan := testPlan(t) + + addr := addrs.Resource{ + Mode: addrs.DataResourceMode, + Type: "test_data_source", + Name: "bar", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + + dataVal := cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("C6743020-40BD-4591-81E6-CD08494341D3"), + "bar": cty.StringVal("foo"), + }) + priorValRaw, err := plans.NewDynamicValue(cty.NullVal(dataVal.Type()), dataVal.Type()) + if err != nil { + t.Fatal(err) + } + plannedValRaw, err := plans.NewDynamicValue(dataVal, dataVal.Type()) + if err != nil { + t.Fatal(err) + } + + plan.Changes.SyncWrapper().AppendResourceInstanceChange(&plans.ResourceInstanceChangeSrc{ + Addr: addr, + PrevRunAddr: addr, + ProviderAddr: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ChangeSrc: plans.ChangeSrc{ + Action: plans.Read, + Before: priorValRaw, + After: plannedValRaw, + }, + }) + + return plan +} + +func testSchemas() *tofu.Schemas { + provider := testProvider() + return &tofu.Schemas{ + Providers: map[addrs.Provider]providers.ProviderSchema{ + addrs.NewDefaultProvider("test"): provider.GetProviderSchema(), + }, + } +} + +func testProvider() *tofu.MockProvider { + p := new(tofu.MockProvider) + p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { + return providers.ReadResourceResponse{NewState: req.PriorState} + } + + p.GetProviderSchemaResponse = testProviderSchema() + + return p +} + +func testProviderSchema() *providers.GetProviderSchemaResponse { + return &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{}, + }, + ResourceTypes: map[string]providers.Schema{ + "test_resource": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + DataSources: map[string]providers.Schema{ + "test_data_source": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Required: true}, + "bar": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } +} diff --git a/pkg/command/views/refresh.go b/pkg/command/views/refresh.go new file mode 100644 index 00000000000..5a068922e32 --- /dev/null +++ b/pkg/command/views/refresh.go @@ -0,0 +1,117 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package views + +import ( + "fmt" + + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/command/views/json" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" +) + +// The Refresh view is used for the refresh command. +type Refresh interface { + Outputs(outputValues map[string]*states.OutputValue) + + Operation() Operation + Hooks() []tofu.Hook + + Diagnostics(diags tfdiags.Diagnostics) + HelpPrompt() +} + +// NewRefresh returns an initialized Refresh implementation for the given ViewType. +func NewRefresh(vt arguments.ViewType, view *View) Refresh { + switch vt { + case arguments.ViewJSON: + return &RefreshJSON{ + view: NewJSONView(view), + } + case arguments.ViewHuman: + return &RefreshHuman{ + view: view, + inAutomation: view.RunningInAutomation(), + countHook: &countHook{}, + } + default: + panic(fmt.Sprintf("unknown view type %v", vt)) + } +} + +// The RefreshHuman implementation renders human-readable text logs, suitable for +// a scrolling terminal. +type RefreshHuman struct { + view *View + + inAutomation bool + + countHook *countHook +} + +var _ Refresh = (*RefreshHuman)(nil) + +func (v *RefreshHuman) Outputs(outputValues map[string]*states.OutputValue) { + if len(outputValues) > 0 { + v.view.streams.Print(v.view.colorize.Color("[reset][bold][green]\nOutputs:\n\n")) + NewOutput(arguments.ViewHuman, v.view).Output("", outputValues) + } +} + +func (v *RefreshHuman) Operation() Operation { + return NewOperation(arguments.ViewHuman, v.inAutomation, v.view) +} + +func (v *RefreshHuman) Hooks() []tofu.Hook { + return []tofu.Hook{ + v.countHook, + NewUiHook(v.view), + } +} + +func (v *RefreshHuman) Diagnostics(diags tfdiags.Diagnostics) { + v.view.Diagnostics(diags) +} + +func (v *RefreshHuman) HelpPrompt() { + v.view.HelpPrompt("refresh") +} + +// The RefreshJSON implementation renders streaming JSON logs, suitable for +// integrating with other software. +type RefreshJSON struct { + view *JSONView +} + +var _ Refresh = (*RefreshJSON)(nil) + +func (v *RefreshJSON) Outputs(outputValues map[string]*states.OutputValue) { + outputs, diags := json.OutputsFromMap(outputValues) + if diags.HasErrors() { + v.Diagnostics(diags) + } else { + v.view.Outputs(outputs) + } +} + +func (v *RefreshJSON) Operation() Operation { + return &OperationJSON{view: v.view} +} + +func (v *RefreshJSON) Hooks() []tofu.Hook { + return []tofu.Hook{ + newJSONHook(v.view), + } +} + +func (v *RefreshJSON) Diagnostics(diags tfdiags.Diagnostics) { + v.view.Diagnostics(diags) +} + +func (v *RefreshJSON) HelpPrompt() { +} diff --git a/pkg/command/views/refresh_test.go b/pkg/command/views/refresh_test.go new file mode 100644 index 00000000000..99706af31c1 --- /dev/null +++ b/pkg/command/views/refresh_test.go @@ -0,0 +1,112 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package views + +import ( + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/terminal" + "github.com/zclconf/go-cty/cty" +) + +// Ensure that the correct view type and in-automation settings propagate to the +// Operation view. +func TestRefreshHuman_operation(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + defer done(t) + v := NewRefresh(arguments.ViewHuman, NewView(streams).SetRunningInAutomation(true)).Operation() + if hv, ok := v.(*OperationHuman); !ok { + t.Fatalf("unexpected return type %t", v) + } else if hv.inAutomation != true { + t.Fatalf("unexpected inAutomation value on Operation view") + } +} + +// Verify that Hooks includes a UI hook +func TestRefreshHuman_hooks(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + defer done(t) + v := NewRefresh(arguments.ViewHuman, NewView(streams).SetRunningInAutomation(true)) + hooks := v.Hooks() + + var uiHook *UiHook + for _, hook := range hooks { + if ch, ok := hook.(*UiHook); ok { + uiHook = ch + } + } + if uiHook == nil { + t.Fatalf("expected Hooks to include a UiHook: %#v", hooks) + } +} + +// Basic test coverage of Outputs, since most of its functionality is tested +// elsewhere. +func TestRefreshHuman_outputs(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := NewRefresh(arguments.ViewHuman, NewView(streams)) + + v.Outputs(map[string]*states.OutputValue{ + "foo": {Value: cty.StringVal("secret")}, + }) + + got := done(t).Stdout() + for _, want := range []string{"Outputs:", `foo = "secret"`} { + if !strings.Contains(got, want) { + t.Errorf("wrong result\ngot: %q\nwant: %q", got, want) + } + } +} + +// Outputs should do nothing if there are no outputs to render. +func TestRefreshHuman_outputsEmpty(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := NewRefresh(arguments.ViewHuman, NewView(streams)) + + v.Outputs(map[string]*states.OutputValue{}) + + got := done(t).Stdout() + if got != "" { + t.Errorf("output should be empty, but got: %q", got) + } +} + +// Basic test coverage of Outputs, since most of its functionality is tested +// elsewhere. +func TestRefreshJSON_outputs(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + v := NewRefresh(arguments.ViewJSON, NewView(streams)) + + v.Outputs(map[string]*states.OutputValue{ + "boop_count": {Value: cty.NumberIntVal(92)}, + "password": {Value: cty.StringVal("horse-battery").Mark(marks.Sensitive), Sensitive: true}, + }) + + want := []map[string]interface{}{ + { + "@level": "info", + "@message": "Outputs: 2", + "@module": "tofu.ui", + "type": "outputs", + "outputs": map[string]interface{}{ + "boop_count": map[string]interface{}{ + "sensitive": false, + "value": float64(92), + "type": "number", + }, + "password": map[string]interface{}{ + "sensitive": true, + "type": "string", + }, + }, + }, + } + testJSONViewOutputEquals(t, done(t).Stdout(), want) +} diff --git a/pkg/command/views/show.go b/pkg/command/views/show.go new file mode 100644 index 00000000000..bd366563a08 --- /dev/null +++ b/pkg/command/views/show.go @@ -0,0 +1,172 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package views + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/kubegems/opentofu/pkg/cloud/cloudplan" + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/command/jsonformat" + "github.com/kubegems/opentofu/pkg/command/jsonplan" + "github.com/kubegems/opentofu/pkg/command/jsonprovider" + "github.com/kubegems/opentofu/pkg/command/jsonstate" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/states/statefile" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" +) + +type Show interface { + // Display renders the plan, if it is available. If plan is nil, it renders the statefile. + Display(config *configs.Config, plan *plans.Plan, planJSON *cloudplan.RemotePlanJSON, stateFile *statefile.File, schemas *tofu.Schemas) int + + // Diagnostics renders early diagnostics, resulting from argument parsing. + Diagnostics(diags tfdiags.Diagnostics) +} + +func NewShow(vt arguments.ViewType, view *View) Show { + switch vt { + case arguments.ViewJSON: + return &ShowJSON{view: view} + case arguments.ViewHuman: + return &ShowHuman{view: view} + default: + panic(fmt.Sprintf("unknown view type %v", vt)) + } +} + +type ShowHuman struct { + view *View +} + +var _ Show = (*ShowHuman)(nil) + +func (v *ShowHuman) Display(config *configs.Config, plan *plans.Plan, planJSON *cloudplan.RemotePlanJSON, stateFile *statefile.File, schemas *tofu.Schemas) int { + renderer := jsonformat.Renderer{ + Colorize: v.view.colorize, + Streams: v.view.streams, + RunningInAutomation: v.view.runningInAutomation, + ShowSensitive: v.view.showSensitive, + } + + // Prefer to display a pre-built JSON plan, if we got one; then, fall back + // to building one ourselves. + if planJSON != nil { + if !planJSON.Redacted { + v.view.streams.Eprintf("Didn't get renderable JSON plan format for human display") + return 1 + } + // The redacted json plan format can be decoded into a jsonformat.Plan + p := jsonformat.Plan{} + r := bytes.NewReader(planJSON.JSONBytes) + if err := json.NewDecoder(r).Decode(&p); err != nil { + v.view.streams.Eprintf("Couldn't decode renderable JSON plan format: %s", err) + } + + v.view.streams.Print(v.view.colorize.Color(planJSON.RunHeader + "\n")) + renderer.RenderHumanPlan(p, planJSON.Mode, planJSON.Qualities...) + v.view.streams.Print(v.view.colorize.Color("\n" + planJSON.RunFooter + "\n")) + } else if plan != nil { + outputs, changed, drift, attrs, err := jsonplan.MarshalForRenderer(plan, schemas) + if err != nil { + v.view.streams.Eprintf("Failed to marshal plan to json: %s", err) + return 1 + } + + jplan := jsonformat.Plan{ + PlanFormatVersion: jsonplan.FormatVersion, + ProviderFormatVersion: jsonprovider.FormatVersion, + OutputChanges: outputs, + ResourceChanges: changed, + ResourceDrift: drift, + ProviderSchemas: jsonprovider.MarshalForRenderer(schemas), + RelevantAttributes: attrs, + } + + var opts []plans.Quality + if !plan.CanApply() { + opts = append(opts, plans.NoChanges) + } + if plan.Errored { + opts = append(opts, plans.Errored) + } + + renderer.RenderHumanPlan(jplan, plan.UIMode, opts...) + } else { + if stateFile == nil { + v.view.streams.Println("No state.") + return 0 + } + + root, outputs, err := jsonstate.MarshalForRenderer(stateFile, schemas) + if err != nil { + v.view.streams.Eprintf("Failed to marshal state to json: %s", err) + return 1 + } + + jstate := jsonformat.State{ + StateFormatVersion: jsonstate.FormatVersion, + ProviderFormatVersion: jsonprovider.FormatVersion, + RootModule: root, + RootModuleOutputs: outputs, + ProviderSchemas: jsonprovider.MarshalForRenderer(schemas), + } + + renderer.RenderHumanState(jstate) + } + return 0 +} + +func (v *ShowHuman) Diagnostics(diags tfdiags.Diagnostics) { + v.view.Diagnostics(diags) +} + +type ShowJSON struct { + view *View +} + +var _ Show = (*ShowJSON)(nil) + +func (v *ShowJSON) Display(config *configs.Config, plan *plans.Plan, planJSON *cloudplan.RemotePlanJSON, stateFile *statefile.File, schemas *tofu.Schemas) int { + // Prefer to display a pre-built JSON plan, if we got one; then, fall back + // to building one ourselves. + if planJSON != nil { + if planJSON.Redacted { + v.view.streams.Eprintf("Didn't get external JSON plan format") + return 1 + } + v.view.streams.Println(string(planJSON.JSONBytes)) + } else if plan != nil { + planJSON, err := jsonplan.Marshal(config, plan, stateFile, schemas) + + if err != nil { + v.view.streams.Eprintf("Failed to marshal plan to json: %s", err) + return 1 + } + v.view.streams.Println(string(planJSON)) + } else { + // It is possible that there is neither state nor a plan. + // That's ok, we'll just return an empty object. + jsonState, err := jsonstate.Marshal(stateFile, schemas) + if err != nil { + v.view.streams.Eprintf("Failed to marshal state to json: %s", err) + return 1 + } + v.view.streams.Println(string(jsonState)) + } + return 0 +} + +// Diagnostics should only be called if show cannot be executed. +// In this case, we choose to render human-readable diagnostic output, +// primarily for backwards compatibility. +func (v *ShowJSON) Diagnostics(diags tfdiags.Diagnostics) { + v.view.Diagnostics(diags) +} diff --git a/pkg/command/views/show_test.go b/pkg/command/views/show_test.go new file mode 100644 index 00000000000..677504c013b --- /dev/null +++ b/pkg/command/views/show_test.go @@ -0,0 +1,241 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package views + +import ( + "encoding/json" + "os" + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/cloud/cloudplan" + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/initwd" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/statefile" + "github.com/kubegems/opentofu/pkg/terminal" + "github.com/kubegems/opentofu/pkg/tofu" + + "github.com/zclconf/go-cty/cty" +) + +func TestShowHuman(t *testing.T) { + redactedPath := "./testdata/plans/redacted-plan.json" + redactedPlanJson, err := os.ReadFile(redactedPath) + if err != nil { + t.Fatalf("couldn't read json plan test data at %s for showing a cloud plan. Did the file get moved?", redactedPath) + } + testCases := map[string]struct { + plan *plans.Plan + jsonPlan *cloudplan.RemotePlanJSON + stateFile *statefile.File + schemas *tofu.Schemas + wantExact bool + wantString string + }{ + "plan file": { + testPlan(t), + nil, + nil, + testSchemas(), + false, + "# test_resource.foo will be created", + }, + "cloud plan file": { + nil, + &cloudplan.RemotePlanJSON{ + JSONBytes: redactedPlanJson, + Redacted: true, + Mode: plans.NormalMode, + Qualities: []plans.Quality{}, + RunHeader: "[reset][yellow]To view this run in a browser, visit:\nhttps://app.example.com/app/example_org/example_workspace/runs/run-run-bugsBUGSbugsBUGS[reset]", + RunFooter: "[reset][green]Run status: planned and saved (confirmable)[reset]\n[green]Workspace is unlocked[reset]", + }, + nil, + nil, + false, + "# null_resource.foo will be created", + }, + "statefile": { + nil, + nil, + &statefile.File{ + Serial: 0, + Lineage: "fake-for-testing", + State: testState(), + }, + testSchemas(), + false, + "# test_resource.foo:", + }, + "empty statefile": { + nil, + nil, + &statefile.File{ + Serial: 0, + Lineage: "fake-for-testing", + State: states.NewState(), + }, + testSchemas(), + true, + "The state file is empty. No resources are represented.\n", + }, + "nothing": { + nil, + nil, + nil, + nil, + true, + "No state.\n", + }, + } + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + view := NewView(streams) + view.Configure(&arguments.View{NoColor: true}) + v := NewShow(arguments.ViewHuman, view) + + code := v.Display(nil, testCase.plan, testCase.jsonPlan, testCase.stateFile, testCase.schemas) + if code != 0 { + t.Errorf("expected 0 return code, got %d", code) + } + + output := done(t) + got := output.Stdout() + want := testCase.wantString + if (testCase.wantExact && got != want) || (!testCase.wantExact && !strings.Contains(got, want)) { + t.Fatalf("unexpected output\ngot: %s\nwant: %s", got, want) + } + }) + } +} + +func TestShowJSON(t *testing.T) { + unredactedPath := "../testdata/show-json/basic-create/output.json" + unredactedPlanJson, err := os.ReadFile(unredactedPath) + if err != nil { + t.Fatalf("couldn't read json plan test data at %s for showing a cloud plan. Did the file get moved?", unredactedPath) + } + testCases := map[string]struct { + plan *plans.Plan + jsonPlan *cloudplan.RemotePlanJSON + stateFile *statefile.File + }{ + "plan file": { + testPlan(t), + nil, + nil, + }, + "cloud plan file": { + nil, + &cloudplan.RemotePlanJSON{ + JSONBytes: unredactedPlanJson, + Redacted: false, + Mode: plans.NormalMode, + Qualities: []plans.Quality{}, + RunHeader: "[reset][yellow]To view this run in a browser, visit:\nhttps://app.example.com/app/example_org/example_workspace/runs/run-run-bugsBUGSbugsBUGS[reset]", + RunFooter: "[reset][green]Run status: planned and saved (confirmable)[reset]\n[green]Workspace is unlocked[reset]", + }, + nil, + }, + "statefile": { + nil, + nil, + &statefile.File{ + Serial: 0, + Lineage: "fake-for-testing", + State: testState(), + }, + }, + "empty statefile": { + nil, + nil, + &statefile.File{ + Serial: 0, + Lineage: "fake-for-testing", + State: states.NewState(), + }, + }, + "nothing": { + nil, + nil, + nil, + }, + } + + config, _, configCleanup := initwd.MustLoadConfigForTests(t, "./testdata/show", "tests") + defer configCleanup() + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + view := NewView(streams) + view.Configure(&arguments.View{NoColor: true}) + v := NewShow(arguments.ViewJSON, view) + + schemas := &tofu.Schemas{ + Providers: map[addrs.Provider]providers.ProviderSchema{ + addrs.NewDefaultProvider("test"): { + ResourceTypes: map[string]providers.Schema{ + "test_resource": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + }, + }, + } + + code := v.Display(config, testCase.plan, testCase.jsonPlan, testCase.stateFile, schemas) + + if code != 0 { + t.Errorf("expected 0 return code, got %d", code) + } + + // Make sure the result looks like JSON; we comprehensively test + // the structure of this output in the command package tests. + var result map[string]interface{} + got := done(t).All() + t.Logf("output: %s", got) + if err := json.Unmarshal([]byte(got), &result); err != nil { + t.Fatal(err) + } + }) + } +} + +// testState returns a test State structure. +func testState() *states.State { + return states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","foo":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + // DeepCopy is used here to ensure our synthetic state matches exactly + // with a state that will have been copied during the command + // operation, and all fields have been copied correctly. + }).DeepCopy() +} diff --git a/pkg/command/views/state_locker.go b/pkg/command/views/state_locker.go new file mode 100644 index 00000000000..fea0cb7b93b --- /dev/null +++ b/pkg/command/views/state_locker.go @@ -0,0 +1,84 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package views + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/kubegems/opentofu/pkg/command/arguments" +) + +// The StateLocker view is used to display locking/unlocking status messages +// if the state lock process takes longer than expected. +type StateLocker interface { + Locking() + Unlocking() +} + +// NewStateLocker returns an initialized StateLocker implementation for the given ViewType. +func NewStateLocker(vt arguments.ViewType, view *View) StateLocker { + switch vt { + case arguments.ViewHuman: + return &StateLockerHuman{view: view} + case arguments.ViewJSON: + return &StateLockerJSON{view: view} + default: + panic(fmt.Sprintf("unknown view type %v", vt)) + } +} + +// StateLockerHuman is an implementation of StateLocker which prints status to +// a terminal. +type StateLockerHuman struct { + view *View +} + +var _ StateLocker = (*StateLockerHuman)(nil) +var _ StateLocker = (*StateLockerJSON)(nil) + +func (v *StateLockerHuman) Locking() { + v.view.streams.Println("Acquiring state lock. This may take a few moments...") +} + +func (v *StateLockerHuman) Unlocking() { + v.view.streams.Println("Releasing state lock. This may take a few moments...") +} + +// StateLockerJSON is an implementation of StateLocker which prints the state lock status +// to a terminal in machine-readable JSON form. +type StateLockerJSON struct { + view *View +} + +func (v *StateLockerJSON) Locking() { + current_timestamp := time.Now().Format(time.RFC3339) + + json_data := map[string]string{ + "@level": "info", + "@message": "Acquiring state lock. This may take a few moments...", + "@module": "tofu.ui", + "@timestamp": current_timestamp, + "type": "state_lock_acquire"} + + lock_info_message, _ := json.Marshal(json_data) + v.view.streams.Println(string(lock_info_message)) +} + +func (v *StateLockerJSON) Unlocking() { + current_timestamp := time.Now().Format(time.RFC3339) + + json_data := map[string]string{ + "@level": "info", + "@message": "Releasing state lock. This may take a few moments...", + "@module": "tofu.ui", + "@timestamp": current_timestamp, + "type": "state_lock_release"} + + lock_info_message, _ := json.Marshal(json_data) + v.view.streams.Println(string(lock_info_message)) +} diff --git a/pkg/command/views/test.go b/pkg/command/views/test.go new file mode 100644 index 00000000000..eab2e52aa2a --- /dev/null +++ b/pkg/command/views/test.go @@ -0,0 +1,630 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package views + +import ( + "bytes" + "fmt" + + "github.com/mitchellh/colorstring" + + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/command/format" + "github.com/kubegems/opentofu/pkg/command/jsonformat" + "github.com/kubegems/opentofu/pkg/command/jsonplan" + "github.com/kubegems/opentofu/pkg/command/jsonprovider" + "github.com/kubegems/opentofu/pkg/command/jsonstate" + "github.com/kubegems/opentofu/pkg/command/views/json" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/moduletest" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/statefile" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/pkg/tofu" +) + +// Test renders outputs for test executions. +type Test interface { + // Abstract should print an early summary of the tests that will be + // executed. This will be called before the tests have been executed so + // the status for everything within suite will be test.Pending. + // + // This should be used to state what is going to be tested. + Abstract(suite *moduletest.Suite) + + // Conclusion should print out a summary of the tests including their + // completed status. + Conclusion(suite *moduletest.Suite) + + // File prints out the summary for an entire test file. + File(file *moduletest.File) + + // Run prints out the summary for a single test run block. + Run(run *moduletest.Run, file *moduletest.File) + + // DestroySummary prints out the summary of the destroy step of each test + // file. If everything goes well, this should be empty. + DestroySummary(diags tfdiags.Diagnostics, run *moduletest.Run, file *moduletest.File, state *states.State) + + // Diagnostics prints out the provided diagnostics. + Diagnostics(run *moduletest.Run, file *moduletest.File, diags tfdiags.Diagnostics) + + // Interrupted prints out a message stating that an interrupt has been + // received and testing will stop. + Interrupted() + + // FatalInterrupt prints out a message stating that a hard interrupt has + // been received and testing will stop and cleanup will be skipped. + FatalInterrupt() + + // FatalInterruptSummary prints out the resources that were held in state + // and were being created at the time the FatalInterrupt was received. + // + // This will typically be called in place of DestroySummary, as there is no + // guarantee that this function will be called during a FatalInterrupt. In + // addition, this function prints additional details about the current + // operation alongside the current state as the state will be missing newly + // created resources that also need to be handled manually. + FatalInterruptSummary(run *moduletest.Run, file *moduletest.File, states map[*moduletest.Run]*states.State, created []*plans.ResourceInstanceChangeSrc) +} + +func NewTest(vt arguments.ViewType, view *View) Test { + switch vt { + case arguments.ViewJSON: + return &TestJSON{ + view: NewJSONView(view), + } + case arguments.ViewHuman: + return &TestHuman{ + view: view, + } + default: + panic(fmt.Sprintf("unknown view type %v", vt)) + } +} + +type TestHuman struct { + view *View +} + +var _ Test = (*TestHuman)(nil) + +func (t *TestHuman) Abstract(_ *moduletest.Suite) { + // Do nothing, we don't print an abstract for the human view. +} + +func (t *TestHuman) Conclusion(suite *moduletest.Suite) { + t.view.streams.Println() + + counts := make(map[moduletest.Status]int) + for _, file := range suite.Files { + for _, run := range file.Runs { + count := counts[run.Status] + counts[run.Status] = count + 1 + } + } + + if suite.Status <= moduletest.Skip { + // Then no tests. + t.view.streams.Print("Executed 0 tests") + if counts[moduletest.Skip] > 0 { + t.view.streams.Printf(", %d skipped.\n", counts[moduletest.Skip]) + } else { + t.view.streams.Println(".") + } + return + } + + if suite.Status == moduletest.Pass { + t.view.streams.Print(t.view.colorize.Color("[green]Success![reset]")) + } else { + t.view.streams.Print(t.view.colorize.Color("[red]Failure![reset]")) + } + + t.view.streams.Printf(" %d passed, %d failed", counts[moduletest.Pass], counts[moduletest.Fail]+counts[moduletest.Error]) + if counts[moduletest.Skip] > 0 { + t.view.streams.Printf(", %d skipped.\n", counts[moduletest.Skip]) + } else { + t.view.streams.Println(".") + } +} + +func (t *TestHuman) File(file *moduletest.File) { + t.view.streams.Printf("%s... %s\n", file.Name, colorizeTestStatus(file.Status, t.view.colorize)) + t.Diagnostics(nil, file, file.Diagnostics) +} + +func (t *TestHuman) Run(run *moduletest.Run, file *moduletest.File) { + t.view.streams.Printf(" run %q... %s\n", run.Name, colorizeTestStatus(run.Status, t.view.colorize)) + + if run.Verbose != nil { + // We're going to be more verbose about what we print, here's the plan + // or the state depending on the type of run we did. + + schemas := &tofu.Schemas{ + Providers: run.Verbose.Providers, + Provisioners: run.Verbose.Provisioners, + } + + renderer := jsonformat.Renderer{ + Streams: t.view.streams, + Colorize: t.view.colorize, + RunningInAutomation: t.view.runningInAutomation, + } + + if run.Config.Command == configs.ApplyTestCommand { + // Then we'll print the state. + root, outputs, err := jsonstate.MarshalForRenderer(statefile.New(run.Verbose.State, file.Name, uint64(run.Index)), schemas) + if err != nil { + run.Diagnostics = run.Diagnostics.Append(tfdiags.Sourceless( + tfdiags.Warning, + "Failed to render test state", + fmt.Sprintf("OpenTofu could not marshal the state for display: %v", err))) + } else { + state := jsonformat.State{ + StateFormatVersion: jsonstate.FormatVersion, + ProviderFormatVersion: jsonprovider.FormatVersion, + RootModule: root, + RootModuleOutputs: outputs, + ProviderSchemas: jsonprovider.MarshalForRenderer(schemas), + } + + renderer.RenderHumanState(state) + } + } else { + // We'll print the plan. + outputs, changed, drift, attrs, err := jsonplan.MarshalForRenderer(run.Verbose.Plan, schemas) + if err != nil { + run.Diagnostics = run.Diagnostics.Append(tfdiags.Sourceless( + tfdiags.Warning, + "Failed to render test plan", + fmt.Sprintf("OpenTofu could not marshal the plan for display: %v", err))) + } else { + plan := jsonformat.Plan{ + PlanFormatVersion: jsonplan.FormatVersion, + ProviderFormatVersion: jsonprovider.FormatVersion, + OutputChanges: outputs, + ResourceChanges: changed, + ResourceDrift: drift, + ProviderSchemas: jsonprovider.MarshalForRenderer(schemas), + RelevantAttributes: attrs, + } + + var opts []plans.Quality + if !run.Verbose.Plan.CanApply() { + opts = append(opts, plans.NoChanges) + } + if run.Verbose.Plan.Errored { + opts = append(opts, plans.Errored) + } + + renderer.RenderHumanPlan(plan, run.Verbose.Plan.UIMode, opts...) + } + } + } + + // Finally we'll print out a summary of the diagnostics from the run. + t.Diagnostics(run, file, run.Diagnostics) +} + +func (t *TestHuman) DestroySummary(diags tfdiags.Diagnostics, run *moduletest.Run, file *moduletest.File, state *states.State) { + identifier := file.Name + if run != nil { + identifier = fmt.Sprintf("%s/%s", identifier, run.Name) + } + + if diags.HasErrors() { + t.view.streams.Eprint(format.WordWrap(fmt.Sprintf("OpenTofu encountered an error destroying resources created while executing %s.\n", identifier), t.view.errorColumns())) + } + t.Diagnostics(run, file, diags) + + if state.HasManagedResourceInstanceObjects() { + t.view.streams.Eprint(format.WordWrap(fmt.Sprintf("\nOpenTofu left the following resources in state after executing %s, these left-over resources can be viewed by reading the statefile written to disk(errored_test.tfstate) and they need to be cleaned up manually:\n", identifier), t.view.errorColumns())) + for _, resource := range state.AllResourceInstanceObjectAddrs() { + if resource.DeposedKey != states.NotDeposed { + t.view.streams.Eprintf(" - %s (%s)\n", resource.Instance, resource.DeposedKey) + continue + } + t.view.streams.Eprintf(" - %s\n", resource.Instance) + } + } +} + +func (t *TestHuman) Diagnostics(_ *moduletest.Run, _ *moduletest.File, diags tfdiags.Diagnostics) { + t.view.Diagnostics(diags) +} + +func (t *TestHuman) Interrupted() { + t.view.streams.Eprintln(format.WordWrap(interrupted, t.view.errorColumns())) +} + +func (t *TestHuman) FatalInterrupt() { + t.view.streams.Eprintln(format.WordWrap(fatalInterrupt, t.view.errorColumns())) +} + +func (t *TestHuman) FatalInterruptSummary(run *moduletest.Run, file *moduletest.File, existingStates map[*moduletest.Run]*states.State, created []*plans.ResourceInstanceChangeSrc) { + t.view.streams.Eprint(format.WordWrap(fmt.Sprintf("\nOpenTofu was interrupted while executing %s, and may not have performed the expected cleanup operations.\n", file.Name), t.view.errorColumns())) + + // Print out the main state first, this is the state that isn't associated + // with a run block. + if state, exists := existingStates[nil]; exists && !state.Empty() { + t.view.streams.Eprint(format.WordWrap("\nOpenTofu has already created the following resources from the module under test:\n", t.view.errorColumns())) + for _, resource := range state.AllResourceInstanceObjectAddrs() { + if resource.DeposedKey != states.NotDeposed { + t.view.streams.Eprintf(" - %s (%s)\n", resource.Instance, resource.DeposedKey) + continue + } + t.view.streams.Eprintf(" - %s\n", resource.Instance) + } + } + + // Then print out the other states in order. + for _, run := range file.Runs { + state, exists := existingStates[run] + if !exists || state.Empty() { + continue + } + + t.view.streams.Eprint(format.WordWrap(fmt.Sprintf("\nOpenTofu has already created the following resources for %q from %q:\n", run.Name, run.Config.Module.Source), t.view.errorColumns())) + for _, resource := range state.AllResourceInstanceObjectAddrs() { + if resource.DeposedKey != states.NotDeposed { + t.view.streams.Eprintf(" - %s (%s)\n", resource.Instance, resource.DeposedKey) + continue + } + t.view.streams.Eprintf(" - %s\n", resource.Instance) + } + } + + if len(created) == 0 { + // No planned changes, so we won't print anything. + return + } + + var resources []string + for _, change := range created { + resources = append(resources, change.Addr.String()) + } + + if len(resources) > 0 { + module := "the module under test" + if run.Config.ConfigUnderTest != nil { + module = fmt.Sprintf("%q", run.Config.Module.Source.String()) + } + + t.view.streams.Eprint(format.WordWrap(fmt.Sprintf("\nOpenTofu was in the process of creating the following resources for %q from %s, and they may not have been destroyed:\n", run.Name, module), t.view.errorColumns())) + for _, resource := range resources { + t.view.streams.Eprintf(" - %s\n", resource) + } + } +} + +type TestJSON struct { + view *JSONView +} + +var _ Test = (*TestJSON)(nil) + +func (t *TestJSON) Abstract(suite *moduletest.Suite) { + var fileCount, runCount int + + abstract := json.TestSuiteAbstract{} + for name, file := range suite.Files { + fileCount++ + var runs []string + for _, run := range file.Runs { + runCount++ + runs = append(runs, run.Name) + } + abstract[name] = runs + } + + files := "files" + runs := "run blocks" + + if fileCount == 1 { + files = "file" + } + + if runCount == 1 { + runs = "run block" + } + + t.view.log.Info( + fmt.Sprintf("Found %d %s and %d %s", fileCount, files, runCount, runs), + "type", json.MessageTestAbstract, + json.MessageTestAbstract, abstract) +} + +func (t *TestJSON) Conclusion(suite *moduletest.Suite) { + summary := json.TestSuiteSummary{ + Status: json.ToTestStatus(suite.Status), + } + for _, file := range suite.Files { + for _, run := range file.Runs { + switch run.Status { + case moduletest.Skip: + summary.Skipped++ + case moduletest.Pass: + summary.Passed++ + case moduletest.Error: + summary.Errored++ + case moduletest.Fail: + summary.Failed++ + } + } + } + + var message bytes.Buffer + if suite.Status <= moduletest.Skip { + // Then no tests. + message.WriteString("Executed 0 tests") + if summary.Skipped > 0 { + message.WriteString(fmt.Sprintf(", %d skipped.", summary.Skipped)) + } else { + message.WriteString(".") + } + } else { + if suite.Status == moduletest.Pass { + message.WriteString("Success!") + } else { + message.WriteString("Failure!") + } + + message.WriteString(fmt.Sprintf(" %d passed, %d failed", summary.Passed, summary.Failed+summary.Errored)) + if summary.Skipped > 0 { + message.WriteString(fmt.Sprintf(", %d skipped.", summary.Skipped)) + } else { + message.WriteString(".") + } + } + + t.view.log.Info( + message.String(), + "type", json.MessageTestSummary, + json.MessageTestSummary, summary) +} + +func (t *TestJSON) File(file *moduletest.File) { + t.view.log.Info( + fmt.Sprintf("%s... %s", file.Name, testStatus(file.Status)), + "type", json.MessageTestFile, + json.MessageTestFile, json.TestFileStatus{file.Name, json.ToTestStatus(file.Status)}, + "@testfile", file.Name) + t.Diagnostics(nil, file, file.Diagnostics) +} + +func (t *TestJSON) Run(run *moduletest.Run, file *moduletest.File) { + t.view.log.Info( + fmt.Sprintf(" %q... %s", run.Name, testStatus(run.Status)), + "type", json.MessageTestRun, + json.MessageTestRun, json.TestRunStatus{file.Name, run.Name, json.ToTestStatus(run.Status)}, + "@testfile", file.Name, + "@testrun", run.Name) + + if run.Verbose != nil { + + schemas := &tofu.Schemas{ + Providers: run.Verbose.Providers, + Provisioners: run.Verbose.Provisioners, + } + + if run.Config.Command == configs.ApplyTestCommand { + state, err := jsonstate.MarshalForLog(statefile.New(run.Verbose.State, file.Name, uint64(run.Index)), schemas) + if err != nil { + run.Diagnostics = run.Diagnostics.Append(tfdiags.Sourceless( + tfdiags.Warning, + "Failed to render test state", + fmt.Sprintf("OpenTofu could not marshal the state for display: %v", err))) + } else { + t.view.log.Info( + "-verbose flag enabled, printing state", + "type", json.MessageTestState, + json.MessageTestState, state, + "@testfile", file.Name, + "@testrun", run.Name) + } + } else { + plan, err := jsonplan.MarshalForLog(run.Verbose.Config, run.Verbose.Plan, nil, schemas) + if err != nil { + run.Diagnostics = run.Diagnostics.Append(tfdiags.Sourceless( + tfdiags.Warning, + "Failed to render test plan", + fmt.Sprintf("OpenTofu could not marshal the plan for display: %v", err))) + } else { + t.view.log.Info( + "-verbose flag enabled, printing plan", + "type", json.MessageTestPlan, + json.MessageTestPlan, plan, + "@testfile", file.Name, + "@testrun", run.Name) + } + } + } + + t.Diagnostics(run, file, run.Diagnostics) +} + +func (t *TestJSON) DestroySummary(diags tfdiags.Diagnostics, run *moduletest.Run, file *moduletest.File, state *states.State) { + if state.HasManagedResourceInstanceObjects() { + cleanup := json.TestFileCleanup{} + for _, resource := range state.AllResourceInstanceObjectAddrs() { + cleanup.FailedResources = append(cleanup.FailedResources, json.TestFailedResource{ + Instance: resource.Instance.String(), + DeposedKey: resource.DeposedKey.String(), + }) + } + + if run != nil { + t.view.log.Error( + fmt.Sprintf("OpenTofu left some resources in state after executing %s/%s, these left-over resources can be viewed by reading the statefile written to disk(errored_test.tfstate) and they need to be cleaned up manually:", file.Name, run.Name), + "type", json.MessageTestCleanup, + json.MessageTestCleanup, cleanup, + "@testfile", file.Name, + "@testrun", run.Name) + } else { + t.view.log.Error( + fmt.Sprintf("OpenTofu left some resources in state after executing %s, these left-over resources can be viewed by reading the statefile written to disk(errored_test.tfstate) and they need to be cleaned up manually:", file.Name), + "type", json.MessageTestCleanup, + json.MessageTestCleanup, cleanup, + "@testfile", file.Name) + } + } + t.Diagnostics(run, file, diags) +} + +func (t *TestJSON) Diagnostics(run *moduletest.Run, file *moduletest.File, diags tfdiags.Diagnostics) { + var metadata []interface{} + if file != nil { + metadata = append(metadata, "@testfile", file.Name) + } + if run != nil { + metadata = append(metadata, "@testrun", run.Name) + } + t.view.Diagnostics(diags, metadata...) +} + +func (t *TestJSON) Interrupted() { + t.view.Log(interrupted) +} + +func (t *TestJSON) FatalInterrupt() { + t.view.Log(fatalInterrupt) +} + +func (t *TestJSON) FatalInterruptSummary(run *moduletest.Run, file *moduletest.File, existingStates map[*moduletest.Run]*states.State, created []*plans.ResourceInstanceChangeSrc) { + + message := json.TestFatalInterrupt{ + States: make(map[string][]json.TestFailedResource), + } + + for run, state := range existingStates { + if state.Empty() { + continue + } + + var resources []json.TestFailedResource + for _, resource := range state.AllResourceInstanceObjectAddrs() { + resources = append(resources, json.TestFailedResource{ + Instance: resource.Instance.String(), + DeposedKey: resource.DeposedKey.String(), + }) + } + + if run == nil { + message.State = resources + } else { + message.States[run.Name] = resources + } + } + + if len(created) > 0 { + for _, change := range created { + message.Planned = append(message.Planned, change.Addr.String()) + } + } + + if len(message.States) == 0 && len(message.State) == 0 && len(message.Planned) == 0 { + // Then we don't have any information to share with the user. + return + } + + t.view.log.Error( + "OpenTofu was interrupted during test execution, and may not have performed the expected cleanup operations.", + "type", json.MessageTestInterrupt, + json.MessageTestInterrupt, message, + "@testfile", file.Name) +} + +func colorizeTestStatus(status moduletest.Status, color *colorstring.Colorize) string { + switch status { + case moduletest.Error, moduletest.Fail: + return color.Color("[red]fail[reset]") + case moduletest.Pass: + return color.Color("[green]pass[reset]") + case moduletest.Skip: + return color.Color("[light_gray]skip[reset]") + case moduletest.Pending: + return color.Color("[light_gray]pending[reset]") + default: + panic("unrecognized status: " + status.String()) + } +} + +func testStatus(status moduletest.Status) string { + switch status { + case moduletest.Error, moduletest.Fail: + return "fail" + case moduletest.Pass: + return "pass" + case moduletest.Skip: + return "skip" + case moduletest.Pending: + return "pending" + default: + panic("unrecognized status: " + status.String()) + } +} + +// SaveErroredTestStateFile is a helper function to invoked in DestorySummary +// to store the state to errored_test.tfstate and handle associated diagnostics and errors with this operation +func SaveErroredTestStateFile(state *states.State, run *moduletest.Run, file *moduletest.File, view Test) { + var diags tfdiags.Diagnostics + localFileSystem := statemgr.NewFilesystem("errored_test.tfstate", encryption.StateEncryptionDisabled()) + stateFile := statemgr.NewStateFile() + stateFile.State = state + + //creating an operation to invoke EmergencyDumpState() + var op Operation + switch v := view.(type) { + case *TestHuman: + op = NewOperation(arguments.ViewHuman, false, v.view) + v.view.streams.Eprint(format.WordWrap("\nWriting state to file: errored_test.tfstate\n", v.view.errorColumns())) + case *TestJSON: + op = &OperationJSON{ + view: v.view, + } + v.view.log.Info("Writing state to file: errored_test.tfstate") + default: + } + + writeErr := localFileSystem.WriteStateForMigration(stateFile, true) + if writeErr != nil { + // if the write operation to errored_test.tfstate executed by WriteStateForMigration fails, as a final attempt to + // prevent leaving the user with no state file at all, the JSON state is printed onto the terminal by EmergencyDumpState() + + if dumpErr := op.EmergencyDumpState(stateFile, encryption.StateEncryptionDisabled()); dumpErr != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to serialize state", + fmt.Sprintf(stateWriteFatalErrorFmt, dumpErr), + )) + } + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to persist state", + stateWriteConsoleFallbackError, + )) + } + view.Diagnostics(run, file, diags) +} + +const stateWriteFatalErrorFmt = `Failed to save state after an errored test run. + +Error serializing state: %s + +A catastrophic error has prevented OpenTofu from persisting the state during an errored test run. + +This is a serious bug in OpenTofu and should be reported. +` + +const stateWriteConsoleFallbackError = `The errors shown above prevented OpenTofu from writing the state to +the errored_test.tfstate. As a fallback, the raw state data is printed above as a JSON object. + +To retry writing this state, copy the state data (from the first { to the last } inclusive) and save it into a local file named "errored_test.tfstate". +` diff --git a/pkg/command/views/test_test.go b/pkg/command/views/test_test.go new file mode 100644 index 00000000000..7e24a318a8e --- /dev/null +++ b/pkg/command/views/test_test.go @@ -0,0 +1,3584 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package views + +import ( + "os" + "path/filepath" + "runtime" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/moduletest" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/terminal" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +func TestTestHuman_Conclusion(t *testing.T) { + tcs := map[string]struct { + Suite *moduletest.Suite + Expected string + }{ + "no tests": { + Suite: &moduletest.Suite{}, + Expected: "\nExecuted 0 tests.\n", + }, + + "only skipped tests": { + Suite: &moduletest.Suite{ + Status: moduletest.Skip, + Files: map[string]*moduletest.File{ + "descriptive_test_name.tftest.hcl": { + Name: "descriptive_test_name.tftest.hcl", + Status: moduletest.Skip, + Runs: []*moduletest.Run{ + { + Name: "test_one", + Status: moduletest.Skip, + }, + { + Name: "test_two", + Status: moduletest.Skip, + }, + { + Name: "test_three", + Status: moduletest.Skip, + }, + }, + }, + "other_descriptive_test_name.tftest.hcl": { + Name: "other_descriptive_test_name.tftest.hcl", + Status: moduletest.Skip, + Runs: []*moduletest.Run{ + { + Name: "test_one", + Status: moduletest.Skip, + }, + { + Name: "test_two", + Status: moduletest.Skip, + }, + { + Name: "test_three", + Status: moduletest.Skip, + }, + }, + }, + }, + }, + Expected: "\nExecuted 0 tests, 6 skipped.\n", + }, + + "only passed tests": { + Suite: &moduletest.Suite{ + Status: moduletest.Pass, + Files: map[string]*moduletest.File{ + "descriptive_test_name.tftest.hcl": { + Name: "descriptive_test_name.tftest.hcl", + Status: moduletest.Pass, + Runs: []*moduletest.Run{ + { + Name: "test_one", + Status: moduletest.Pass, + }, + { + Name: "test_two", + Status: moduletest.Pass, + }, + { + Name: "test_three", + Status: moduletest.Pass, + }, + }, + }, + "other_descriptive_test_name.tftest.hcl": { + Name: "other_descriptive_test_name.tftest.hcl", + Status: moduletest.Pass, + Runs: []*moduletest.Run{ + { + Name: "test_one", + Status: moduletest.Pass, + }, + { + Name: "test_two", + Status: moduletest.Pass, + }, + { + Name: "test_three", + Status: moduletest.Pass, + }, + }, + }, + }, + }, + Expected: "\nSuccess! 6 passed, 0 failed.\n", + }, + + "passed and skipped tests": { + Suite: &moduletest.Suite{ + Status: moduletest.Pass, + Files: map[string]*moduletest.File{ + "descriptive_test_name.tftest.hcl": { + Name: "descriptive_test_name.tftest.hcl", + Status: moduletest.Pass, + Runs: []*moduletest.Run{ + { + Name: "test_one", + Status: moduletest.Pass, + }, + { + Name: "test_two", + Status: moduletest.Skip, + }, + { + Name: "test_three", + Status: moduletest.Pass, + }, + }, + }, + "other_descriptive_test_name.tftest.hcl": { + Name: "other_descriptive_test_name.tftest.hcl", + Status: moduletest.Pass, + Runs: []*moduletest.Run{ + { + Name: "test_one", + Status: moduletest.Skip, + }, + { + Name: "test_two", + Status: moduletest.Pass, + }, + { + Name: "test_three", + Status: moduletest.Pass, + }, + }, + }, + }, + }, + Expected: "\nSuccess! 4 passed, 0 failed, 2 skipped.\n", + }, + + "only failed tests": { + Suite: &moduletest.Suite{ + Status: moduletest.Fail, + Files: map[string]*moduletest.File{ + "descriptive_test_name.tftest.hcl": { + Name: "descriptive_test_name.tftest.hcl", + Status: moduletest.Fail, + Runs: []*moduletest.Run{ + { + Name: "test_one", + Status: moduletest.Fail, + }, + { + Name: "test_two", + Status: moduletest.Fail, + }, + { + Name: "test_three", + Status: moduletest.Fail, + }, + }, + }, + "other_descriptive_test_name.tftest.hcl": { + Name: "other_descriptive_test_name.tftest.hcl", + Status: moduletest.Fail, + Runs: []*moduletest.Run{ + { + Name: "test_one", + Status: moduletest.Fail, + }, + { + Name: "test_two", + Status: moduletest.Fail, + }, + { + Name: "test_three", + Status: moduletest.Fail, + }, + }, + }, + }, + }, + Expected: "\nFailure! 0 passed, 6 failed.\n", + }, + + "failed and skipped tests": { + Suite: &moduletest.Suite{ + Status: moduletest.Fail, + Files: map[string]*moduletest.File{ + "descriptive_test_name.tftest.hcl": { + Name: "descriptive_test_name.tftest.hcl", + Status: moduletest.Fail, + Runs: []*moduletest.Run{ + { + Name: "test_one", + Status: moduletest.Fail, + }, + { + Name: "test_two", + Status: moduletest.Skip, + }, + { + Name: "test_three", + Status: moduletest.Fail, + }, + }, + }, + "other_descriptive_test_name.tftest.hcl": { + Name: "other_descriptive_test_name.tftest.hcl", + Status: moduletest.Fail, + Runs: []*moduletest.Run{ + { + Name: "test_one", + Status: moduletest.Fail, + }, + { + Name: "test_two", + Status: moduletest.Fail, + }, + { + Name: "test_three", + Status: moduletest.Skip, + }, + }, + }, + }, + }, + Expected: "\nFailure! 0 passed, 4 failed, 2 skipped.\n", + }, + + "failed, passed and skipped tests": { + Suite: &moduletest.Suite{ + Status: moduletest.Fail, + Files: map[string]*moduletest.File{ + "descriptive_test_name.tftest.hcl": { + Name: "descriptive_test_name.tftest.hcl", + Status: moduletest.Fail, + Runs: []*moduletest.Run{ + { + Name: "test_one", + Status: moduletest.Fail, + }, + { + Name: "test_two", + Status: moduletest.Pass, + }, + { + Name: "test_three", + Status: moduletest.Skip, + }, + }, + }, + "other_descriptive_test_name.tftest.hcl": { + Name: "other_descriptive_test_name.tftest.hcl", + Status: moduletest.Fail, + Runs: []*moduletest.Run{ + { + Name: "test_one", + Status: moduletest.Skip, + }, + { + Name: "test_two", + Status: moduletest.Fail, + }, + { + Name: "test_three", + Status: moduletest.Pass, + }, + }, + }, + }, + }, + Expected: "\nFailure! 2 passed, 2 failed, 2 skipped.\n", + }, + + "failed and errored tests": { + Suite: &moduletest.Suite{ + Status: moduletest.Error, + Files: map[string]*moduletest.File{ + "descriptive_test_name.tftest.hcl": { + Name: "descriptive_test_name.tftest.hcl", + Status: moduletest.Error, + Runs: []*moduletest.Run{ + { + Name: "test_one", + Status: moduletest.Fail, + }, + { + Name: "test_two", + Status: moduletest.Error, + }, + { + Name: "test_three", + Status: moduletest.Fail, + }, + }, + }, + "other_descriptive_test_name.tftest.hcl": { + Name: "other_descriptive_test_name.tftest.hcl", + Status: moduletest.Error, + Runs: []*moduletest.Run{ + { + Name: "test_one", + Status: moduletest.Fail, + }, + { + Name: "test_two", + Status: moduletest.Error, + }, + { + Name: "test_three", + Status: moduletest.Error, + }, + }, + }, + }, + }, + Expected: "\nFailure! 0 passed, 6 failed.\n", + }, + + "failed, errored, passed, and skipped tests": { + Suite: &moduletest.Suite{ + Status: moduletest.Error, + Files: map[string]*moduletest.File{ + "descriptive_test_name.tftest.hcl": { + Name: "descriptive_test_name.tftest.hcl", + Status: moduletest.Fail, + Runs: []*moduletest.Run{ + { + Name: "test_one", + Status: moduletest.Pass, + }, + { + Name: "test_two", + Status: moduletest.Pass, + }, + { + Name: "test_three", + Status: moduletest.Fail, + }, + }, + }, + "other_descriptive_test_name.tftest.hcl": { + Name: "other_descriptive_test_name.tftest.hcl", + Status: moduletest.Error, + Runs: []*moduletest.Run{ + { + Name: "test_one", + Status: moduletest.Error, + }, + { + Name: "test_two", + Status: moduletest.Skip, + }, + { + Name: "test_three", + Status: moduletest.Skip, + }, + }, + }, + }, + }, + Expected: "\nFailure! 2 passed, 2 failed, 2 skipped.\n", + }, + } + for name, tc := range tcs { + t.Run(name, func(t *testing.T) { + + streams, done := terminal.StreamsForTesting(t) + view := NewTest(arguments.ViewHuman, NewView(streams)) + + view.Conclusion(tc.Suite) + + actual := done(t).Stdout() + expected := tc.Expected + if diff := cmp.Diff(expected, actual); len(diff) > 0 { + t.Fatalf("expected:\n%s\nactual:\n%s\ndiff:\n%s", expected, actual, diff) + } + }) + } +} + +func TestTestHuman_File(t *testing.T) { + tcs := map[string]struct { + File *moduletest.File + Expected string + }{ + "pass": { + File: &moduletest.File{Name: "main.tf", Status: moduletest.Pass}, + Expected: "main.tf... pass\n", + }, + + "pending": { + File: &moduletest.File{Name: "main.tf", Status: moduletest.Pending}, + Expected: "main.tf... pending\n", + }, + + "skip": { + File: &moduletest.File{Name: "main.tf", Status: moduletest.Skip}, + Expected: "main.tf... skip\n", + }, + + "fail": { + File: &moduletest.File{Name: "main.tf", Status: moduletest.Fail}, + Expected: "main.tf... fail\n", + }, + + "error": { + File: &moduletest.File{Name: "main.tf", Status: moduletest.Error}, + Expected: "main.tf... fail\n", + }, + } + for name, tc := range tcs { + t.Run(name, func(t *testing.T) { + + streams, done := terminal.StreamsForTesting(t) + view := NewTest(arguments.ViewHuman, NewView(streams)) + + view.File(tc.File) + + actual := done(t).Stdout() + expected := tc.Expected + if diff := cmp.Diff(expected, actual); len(diff) > 0 { + t.Fatalf("expected:\n%s\nactual:\n%s\ndiff:\n%s", expected, actual, diff) + } + }) + } +} + +func TestTestHuman_Run(t *testing.T) { + tcs := map[string]struct { + Run *moduletest.Run + StdOut string + StdErr string + }{ + "pass": { + Run: &moduletest.Run{Name: "run_block", Status: moduletest.Pass}, + StdOut: " run \"run_block\"... pass\n", + }, + + "pass_with_diags": { + Run: &moduletest.Run{ + Name: "run_block", + Status: moduletest.Pass, + Diagnostics: tfdiags.Diagnostics{tfdiags.Sourceless(tfdiags.Warning, "a warning occurred", "some warning happened during this test")}, + }, + StdOut: ` run "run_block"... pass + +Warning: a warning occurred + +some warning happened during this test +`, + }, + + "pending": { + Run: &moduletest.Run{Name: "run_block", Status: moduletest.Pending}, + StdOut: " run \"run_block\"... pending\n", + }, + + "skip": { + Run: &moduletest.Run{Name: "run_block", Status: moduletest.Skip}, + StdOut: " run \"run_block\"... skip\n", + }, + + "fail": { + Run: &moduletest.Run{Name: "run_block", Status: moduletest.Fail}, + StdOut: " run \"run_block\"... fail\n", + }, + + "fail_with_diags": { + Run: &moduletest.Run{ + Name: "run_block", + Status: moduletest.Fail, + Diagnostics: tfdiags.Diagnostics{ + tfdiags.Sourceless(tfdiags.Error, "a comparison failed", "details details details"), + tfdiags.Sourceless(tfdiags.Error, "a second comparison failed", "other details"), + }, + }, + StdOut: " run \"run_block\"... fail\n", + StdErr: ` +Error: a comparison failed + +details details details + +Error: a second comparison failed + +other details +`, + }, + + "error": { + Run: &moduletest.Run{Name: "run_block", Status: moduletest.Error}, + StdOut: " run \"run_block\"... fail\n", + }, + + "error_with_diags": { + Run: &moduletest.Run{ + Name: "run_block", + Status: moduletest.Error, + Diagnostics: tfdiags.Diagnostics{tfdiags.Sourceless(tfdiags.Error, "an error occurred", "something bad happened during this test")}, + }, + StdOut: " run \"run_block\"... fail\n", + StdErr: ` +Error: an error occurred + +something bad happened during this test +`, + }, + "verbose_plan": { + Run: &moduletest.Run{ + Name: "run_block", + Status: moduletest.Pass, + Config: &configs.TestRun{ + Command: configs.PlanTestCommand, + }, + Verbose: &moduletest.Verbose{ + Plan: &plans.Plan{ + Changes: &plans.Changes{ + Resources: []*plans.ResourceInstanceChangeSrc{ + { + Addr: addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "creating", + }, + }, + }, + PrevRunAddr: addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "creating", + }, + }, + }, + ProviderAddr: addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.Provider{ + Hostname: addrs.DefaultProviderRegistryHost, + Namespace: "hashicorp", + Type: "test", + }, + }, + ChangeSrc: plans.ChangeSrc{ + Action: plans.Create, + After: dynamicValue( + t, + cty.ObjectVal(map[string]cty.Value{ + "value": cty.StringVal("Hello, world!"), + }), + cty.Object(map[string]cty.Type{ + "value": cty.String, + })), + }, + }, + }, + }, + }, + State: states.NewState(), // empty state + Config: &configs.Config{}, + Providers: map[addrs.Provider]providers.ProviderSchema{ + addrs.Provider{ + Hostname: addrs.DefaultProviderRegistryHost, + Namespace: "hashicorp", + Type: "test", + }: { + ResourceTypes: map[string]providers.Schema{ + "test_resource": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "value": { + Type: cty.String, + }, + }, + }, + }, + }, + }, + }, + }, + }, + StdOut: ` run "run_block"... pass + +OpenTofu used the selected providers to generate the following execution +plan. Resource actions are indicated with the following symbols: + + create + +OpenTofu will perform the following actions: + + # test_resource.creating will be created + + resource "test_resource" "creating" { + + value = "Hello, world!" + } + +Plan: 1 to add, 0 to change, 0 to destroy. +`, + }, + "verbose_apply": { + Run: &moduletest.Run{ + Name: "run_block", + Status: moduletest.Pass, + Config: &configs.TestRun{ + Command: configs.ApplyTestCommand, + }, + Verbose: &moduletest.Verbose{ + Plan: &plans.Plan{}, // empty plan + State: states.BuildState(func(state *states.SyncState) { + state.SetResourceInstanceCurrent( + addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "creating", + }, + }, + }, + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"value":"foobar"}`), + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.Provider{ + Hostname: addrs.DefaultProviderRegistryHost, + Namespace: "hashicorp", + Type: "test", + }, + }) + }), + Config: &configs.Config{}, + Providers: map[addrs.Provider]providers.ProviderSchema{ + addrs.Provider{ + Hostname: addrs.DefaultProviderRegistryHost, + Namespace: "hashicorp", + Type: "test", + }: { + ResourceTypes: map[string]providers.Schema{ + "test_resource": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "value": { + Type: cty.String, + }, + }, + }, + }, + }, + }, + }, + }, + }, + StdOut: ` run "run_block"... pass +# test_resource.creating: +resource "test_resource" "creating" { + value = "foobar" +} +`, + }, + } + for name, tc := range tcs { + t.Run(name, func(t *testing.T) { + file := &moduletest.File{ + Name: "main.tftest.hcl", + } + + streams, done := terminal.StreamsForTesting(t) + view := NewTest(arguments.ViewHuman, NewView(streams)) + + view.Run(tc.Run, file) + + output := done(t) + actual, expected := output.Stdout(), tc.StdOut + if diff := cmp.Diff(expected, actual); len(diff) > 0 { + t.Errorf("expected:\n%s\nactual:\n%s\ndiff:\n%s", expected, actual, diff) + } + + actual, expected = output.Stderr(), tc.StdErr + if diff := cmp.Diff(expected, actual); len(diff) > 0 { + t.Errorf("expected:\n%s\nactual:\n%s\ndiff:\n%s", expected, actual, diff) + } + }) + } +} + +func TestTestHuman_DestroySummary(t *testing.T) { + tcs := map[string]struct { + diags tfdiags.Diagnostics + run *moduletest.Run + file *moduletest.File + state *states.State + stdout string + stderr string + }{ + "empty": { + diags: nil, + file: &moduletest.File{Name: "main.tftest.hcl"}, + state: states.NewState(), + }, + "empty_state_only_warnings": { + diags: tfdiags.Diagnostics{ + tfdiags.Sourceless(tfdiags.Warning, "first warning", "some thing not very bad happened"), + tfdiags.Sourceless(tfdiags.Warning, "second warning", "some thing not very bad happened again"), + }, + file: &moduletest.File{Name: "main.tftest.hcl"}, + state: states.NewState(), + stdout: ` +Warning: first warning + +some thing not very bad happened + +Warning: second warning + +some thing not very bad happened again +`, + }, + "empty_state_with_errors": { + diags: tfdiags.Diagnostics{ + tfdiags.Sourceless(tfdiags.Warning, "first warning", "some thing not very bad happened"), + tfdiags.Sourceless(tfdiags.Warning, "second warning", "some thing not very bad happened again"), + tfdiags.Sourceless(tfdiags.Error, "first error", "this time it is very bad"), + }, + file: &moduletest.File{Name: "main.tftest.hcl"}, + state: states.NewState(), + stdout: ` +Warning: first warning + +some thing not very bad happened + +Warning: second warning + +some thing not very bad happened again +`, + stderr: `OpenTofu encountered an error destroying resources created while executing +main.tftest.hcl. + +Error: first error + +this time it is very bad +`, + }, + "error_from_run": { + diags: tfdiags.Diagnostics{ + tfdiags.Sourceless(tfdiags.Error, "first error", "this time it is very bad"), + }, + run: &moduletest.Run{Name: "run_block"}, + file: &moduletest.File{Name: "main.tftest.hcl"}, + state: states.NewState(), + stderr: `OpenTofu encountered an error destroying resources created while executing +main.tftest.hcl/run_block. + +Error: first error + +this time it is very bad +`, + }, + "state_only_warnings": { + diags: tfdiags.Diagnostics{ + tfdiags.Sourceless(tfdiags.Warning, "first warning", "some thing not very bad happened"), + tfdiags.Sourceless(tfdiags.Warning, "second warning", "some thing not very bad happened again"), + }, + file: &moduletest.File{Name: "main.tftest.hcl"}, + state: states.BuildState(func(state *states.SyncState) { + state.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("test"), + }) + state.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test", + Name: "bar", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("test"), + }) + state.SetResourceInstanceDeposed( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test", + Name: "bar", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + "0fcb640a", + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("test"), + }) + }), + stdout: ` +Warning: first warning + +some thing not very bad happened + +Warning: second warning + +some thing not very bad happened again +`, + stderr: ` +OpenTofu left the following resources in state after executing +main.tftest.hcl, these left-over resources can be viewed by reading the +statefile written to disk(errored_test.tfstate) and they need to be cleaned +up manually: + - test.bar + - test.bar (0fcb640a) + - test.foo +`, + }, + "state_with_errors": { + diags: tfdiags.Diagnostics{ + tfdiags.Sourceless(tfdiags.Warning, "first warning", "some thing not very bad happened"), + tfdiags.Sourceless(tfdiags.Warning, "second warning", "some thing not very bad happened again"), + tfdiags.Sourceless(tfdiags.Error, "first error", "this time it is very bad"), + }, + file: &moduletest.File{Name: "main.tftest.hcl"}, + state: states.BuildState(func(state *states.SyncState) { + state.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("test"), + }) + state.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test", + Name: "bar", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("test"), + }) + state.SetResourceInstanceDeposed( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test", + Name: "bar", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + "0fcb640a", + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("test"), + }) + }), + stdout: ` +Warning: first warning + +some thing not very bad happened + +Warning: second warning + +some thing not very bad happened again +`, + stderr: `OpenTofu encountered an error destroying resources created while executing +main.tftest.hcl. + +Error: first error + +this time it is very bad + +OpenTofu left the following resources in state after executing +main.tftest.hcl, these left-over resources can be viewed by reading the +statefile written to disk(errored_test.tfstate) and they need to be cleaned +up manually: + - test.bar + - test.bar (0fcb640a) + - test.foo +`, + }, + "state_null_resource_with_errors": { + diags: tfdiags.Diagnostics{ + tfdiags.Sourceless(tfdiags.Warning, "first warning", "some thing not very bad happened"), + tfdiags.Sourceless(tfdiags.Warning, "second warning", "some thing not very bad happened again"), + tfdiags.Sourceless(tfdiags.Error, "first error", "this time it is very bad"), + }, + file: &moduletest.File{Name: "main.tftest.hcl"}, + state: states.BuildState(func(state *states.SyncState) { + state.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "failing_will_depend_on_me", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("null"), + }) + state.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "failing", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + Dependencies: []addrs.ConfigResource{ + { + Module: []string{}, + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "failing_will_depend_on_me", + }, + }, + }, + CreateBeforeDestroy: false, + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("null"), + }) + }), + stdout: ` +Warning: first warning + +some thing not very bad happened + +Warning: second warning + +some thing not very bad happened again +`, + stderr: `OpenTofu encountered an error destroying resources created while executing +main.tftest.hcl. + +Error: first error + +this time it is very bad + +OpenTofu left the following resources in state after executing +main.tftest.hcl, these left-over resources can be viewed by reading the +statefile written to disk(errored_test.tfstate) and they need to be cleaned +up manually: + - null_resource.failing + - null_resource.failing_will_depend_on_me +`, + }, + } + for name, tc := range tcs { + t.Run(name, func(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + view := NewTest(arguments.ViewHuman, NewView(streams)) + + view.DestroySummary(tc.diags, tc.run, tc.file, tc.state) + + output := done(t) + actual, expected := output.Stdout(), tc.stdout + if diff := cmp.Diff(expected, actual); len(diff) > 0 { + t.Errorf("expected:\n%s\nactual:\n%s\ndiff:\n%s", expected, actual, diff) + } + + actual, expected = output.Stderr(), tc.stderr + if diff := cmp.Diff(expected, actual); len(diff) > 0 { + t.Errorf("expected:\n%s\nactual:\n%s\ndiff:\n%s", expected, actual, diff) + } + }) + } +} + +func TestTestHuman_FatalInterruptSummary(t *testing.T) { + tcs := map[string]struct { + states map[*moduletest.Run]*states.State + run *moduletest.Run + created []*plans.ResourceInstanceChangeSrc + want string + }{ + "no_state_only_plan": { + states: make(map[*moduletest.Run]*states.State), + run: &moduletest.Run{ + Config: &configs.TestRun{}, + Name: "run_block", + }, + created: []*plans.ResourceInstanceChangeSrc{ + { + Addr: addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "one", + }, + }, + }, + ChangeSrc: plans.ChangeSrc{ + Action: plans.Create, + }, + }, + { + Addr: addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "two", + }, + }, + }, + ChangeSrc: plans.ChangeSrc{ + Action: plans.Create, + }, + }, + }, + want: ` +OpenTofu was interrupted while executing main.tftest.hcl, and may not have +performed the expected cleanup operations. + +OpenTofu was in the process of creating the following resources for +"run_block" from the module under test, and they may not have been destroyed: + - test_instance.one + - test_instance.two +`, + }, + "file_state_no_plan": { + states: map[*moduletest.Run]*states.State{ + nil: states.BuildState(func(state *states.SyncState) { + state.SetResourceInstanceCurrent( + addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "one", + }, + }, + }, + &states.ResourceInstanceObjectSrc{}, + addrs.AbsProviderConfig{}) + + state.SetResourceInstanceCurrent( + addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "two", + }, + }, + }, + &states.ResourceInstanceObjectSrc{}, + addrs.AbsProviderConfig{}) + }), + }, + created: nil, + want: ` +OpenTofu was interrupted while executing main.tftest.hcl, and may not have +performed the expected cleanup operations. + +OpenTofu has already created the following resources from the module under +test: + - test_instance.one + - test_instance.two +`, + }, + "run_states_no_plan": { + states: map[*moduletest.Run]*states.State{ + &moduletest.Run{ + Name: "setup_block", + Config: &configs.TestRun{ + Module: &configs.TestRunModuleCall{ + Source: addrs.ModuleSourceLocal("../setup"), + }, + }, + }: states.BuildState(func(state *states.SyncState) { + state.SetResourceInstanceCurrent( + addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "one", + }, + }, + }, + &states.ResourceInstanceObjectSrc{}, + addrs.AbsProviderConfig{}) + + state.SetResourceInstanceCurrent( + addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "two", + }, + }, + }, + &states.ResourceInstanceObjectSrc{}, + addrs.AbsProviderConfig{}) + }), + }, + created: nil, + want: ` +OpenTofu was interrupted while executing main.tftest.hcl, and may not have +performed the expected cleanup operations. + +OpenTofu has already created the following resources for "setup_block" from +"../setup": + - test_instance.one + - test_instance.two +`, + }, + "all_states_with_plan": { + states: map[*moduletest.Run]*states.State{ + &moduletest.Run{ + Name: "setup_block", + Config: &configs.TestRun{ + Module: &configs.TestRunModuleCall{ + Source: addrs.ModuleSourceLocal("../setup"), + }, + }, + }: states.BuildState(func(state *states.SyncState) { + state.SetResourceInstanceCurrent( + addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "setup_one", + }, + }, + }, + &states.ResourceInstanceObjectSrc{}, + addrs.AbsProviderConfig{}) + + state.SetResourceInstanceCurrent( + addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "setup_two", + }, + }, + }, + &states.ResourceInstanceObjectSrc{}, + addrs.AbsProviderConfig{}) + }), + nil: states.BuildState(func(state *states.SyncState) { + state.SetResourceInstanceCurrent( + addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "one", + }, + }, + }, + &states.ResourceInstanceObjectSrc{}, + addrs.AbsProviderConfig{}) + + state.SetResourceInstanceCurrent( + addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "two", + }, + }, + }, + &states.ResourceInstanceObjectSrc{}, + addrs.AbsProviderConfig{}) + }), + }, + created: []*plans.ResourceInstanceChangeSrc{ + { + Addr: addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "new_one", + }, + }, + }, + ChangeSrc: plans.ChangeSrc{ + Action: plans.Create, + }, + }, + { + Addr: addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "new_two", + }, + }, + }, + ChangeSrc: plans.ChangeSrc{ + Action: plans.Create, + }, + }, + }, + run: &moduletest.Run{ + Config: &configs.TestRun{}, + Name: "run_block", + }, + want: ` +OpenTofu was interrupted while executing main.tftest.hcl, and may not have +performed the expected cleanup operations. + +OpenTofu has already created the following resources from the module under +test: + - test_instance.one + - test_instance.two + +OpenTofu has already created the following resources for "setup_block" from +"../setup": + - test_instance.setup_one + - test_instance.setup_two + +OpenTofu was in the process of creating the following resources for +"run_block" from the module under test, and they may not have been destroyed: + - test_instance.new_one + - test_instance.new_two +`, + }, + } + for name, tc := range tcs { + t.Run(name, func(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + view := NewTest(arguments.ViewHuman, NewView(streams)) + + file := &moduletest.File{ + Name: "main.tftest.hcl", + Runs: func() []*moduletest.Run { + var runs []*moduletest.Run + for run := range tc.states { + if run != nil { + runs = append(runs, run) + } + } + return runs + }(), + } + + view.FatalInterruptSummary(tc.run, file, tc.states, tc.created) + actual, expected := done(t).Stderr(), tc.want + if diff := cmp.Diff(expected, actual); len(diff) > 0 { + t.Errorf("expected:\n%s\nactual:\n%s\ndiff:\n%s", expected, actual, diff) + } + }) + } +} + +func TestTestJSON_Abstract(t *testing.T) { + tcs := map[string]struct { + suite *moduletest.Suite + want []map[string]interface{} + }{ + "single": { + suite: &moduletest.Suite{ + Files: map[string]*moduletest.File{ + "main.tftest.hcl": { + Runs: []*moduletest.Run{ + { + Name: "setup", + }, + }, + }, + }, + }, + want: []map[string]interface{}{ + { + "@level": "info", + "@message": "Found 1 file and 1 run block", + "@module": "tofu.ui", + "test_abstract": map[string]interface{}{ + "main.tftest.hcl": []interface{}{ + "setup", + }, + }, + "type": "test_abstract", + }, + }, + }, + "plural": { + suite: &moduletest.Suite{ + Files: map[string]*moduletest.File{ + "main.tftest.hcl": { + Runs: []*moduletest.Run{ + { + Name: "setup", + }, + { + Name: "test", + }, + }, + }, + "other.tftest.hcl": { + Runs: []*moduletest.Run{ + { + Name: "test", + }, + }, + }, + }, + }, + want: []map[string]interface{}{ + { + "@level": "info", + "@message": "Found 2 files and 3 run blocks", + "@module": "tofu.ui", + "test_abstract": map[string]interface{}{ + "main.tftest.hcl": []interface{}{ + "setup", + "test", + }, + "other.tftest.hcl": []interface{}{ + "test", + }, + }, + "type": "test_abstract", + }, + }, + }, + } + for name, tc := range tcs { + t.Run(name, func(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + view := NewTest(arguments.ViewJSON, NewView(streams)) + + view.Abstract(tc.suite) + testJSONViewOutputEquals(t, done(t).All(), tc.want) + }) + } +} + +func TestTestJSON_Conclusion(t *testing.T) { + tcs := map[string]struct { + suite *moduletest.Suite + want []map[string]interface{} + }{ + "no tests": { + suite: &moduletest.Suite{}, + want: []map[string]interface{}{ + { + "@level": "info", + "@message": "Executed 0 tests.", + "@module": "tofu.ui", + "test_summary": map[string]interface{}{ + "status": "pending", + "errored": 0.0, + "failed": 0.0, + "passed": 0.0, + "skipped": 0.0, + }, + "type": "test_summary", + }, + }, + }, + + "only skipped tests": { + suite: &moduletest.Suite{ + Status: moduletest.Skip, + Files: map[string]*moduletest.File{ + "descriptive_test_name.tftest.hcl": { + Name: "descriptive_test_name.tftest.hcl", + Status: moduletest.Skip, + Runs: []*moduletest.Run{ + { + Name: "test_one", + Status: moduletest.Skip, + }, + { + Name: "test_two", + Status: moduletest.Skip, + }, + { + Name: "test_three", + Status: moduletest.Skip, + }, + }, + }, + "other_descriptive_test_name.tftest.hcl": { + Name: "other_descriptive_test_name.tftest.hcl", + Status: moduletest.Skip, + Runs: []*moduletest.Run{ + { + Name: "test_one", + Status: moduletest.Skip, + }, + { + Name: "test_two", + Status: moduletest.Skip, + }, + { + Name: "test_three", + Status: moduletest.Skip, + }, + }, + }, + }, + }, + want: []map[string]interface{}{ + { + "@level": "info", + "@message": "Executed 0 tests, 6 skipped.", + "@module": "tofu.ui", + "test_summary": map[string]interface{}{ + "status": "skip", + "errored": 0.0, + "failed": 0.0, + "passed": 0.0, + "skipped": 6.0, + }, + "type": "test_summary", + }, + }, + }, + + "only passed tests": { + suite: &moduletest.Suite{ + Status: moduletest.Pass, + Files: map[string]*moduletest.File{ + "descriptive_test_name.tftest.hcl": { + Name: "descriptive_test_name.tftest.hcl", + Status: moduletest.Pass, + Runs: []*moduletest.Run{ + { + Name: "test_one", + Status: moduletest.Pass, + }, + { + Name: "test_two", + Status: moduletest.Pass, + }, + { + Name: "test_three", + Status: moduletest.Pass, + }, + }, + }, + "other_descriptive_test_name.tftest.hcl": { + Name: "other_descriptive_test_name.tftest.hcl", + Status: moduletest.Pass, + Runs: []*moduletest.Run{ + { + Name: "test_one", + Status: moduletest.Pass, + }, + { + Name: "test_two", + Status: moduletest.Pass, + }, + { + Name: "test_three", + Status: moduletest.Pass, + }, + }, + }, + }, + }, + want: []map[string]interface{}{ + { + "@level": "info", + "@message": "Success! 6 passed, 0 failed.", + "@module": "tofu.ui", + "test_summary": map[string]interface{}{ + "status": "pass", + "errored": 0.0, + "failed": 0.0, + "passed": 6.0, + "skipped": 0.0, + }, + "type": "test_summary", + }, + }, + }, + + "passed and skipped tests": { + suite: &moduletest.Suite{ + Status: moduletest.Pass, + Files: map[string]*moduletest.File{ + "descriptive_test_name.tftest.hcl": { + Name: "descriptive_test_name.tftest.hcl", + Status: moduletest.Pass, + Runs: []*moduletest.Run{ + { + Name: "test_one", + Status: moduletest.Pass, + }, + { + Name: "test_two", + Status: moduletest.Skip, + }, + { + Name: "test_three", + Status: moduletest.Pass, + }, + }, + }, + "other_descriptive_test_name.tftest.hcl": { + Name: "other_descriptive_test_name.tftest.hcl", + Status: moduletest.Pass, + Runs: []*moduletest.Run{ + { + Name: "test_one", + Status: moduletest.Skip, + }, + { + Name: "test_two", + Status: moduletest.Pass, + }, + { + Name: "test_three", + Status: moduletest.Pass, + }, + }, + }, + }, + }, + want: []map[string]interface{}{ + { + "@level": "info", + "@message": "Success! 4 passed, 0 failed, 2 skipped.", + "@module": "tofu.ui", + "test_summary": map[string]interface{}{ + "status": "pass", + "errored": 0.0, + "failed": 0.0, + "passed": 4.0, + "skipped": 2.0, + }, + "type": "test_summary", + }, + }, + }, + + "only failed tests": { + suite: &moduletest.Suite{ + Status: moduletest.Fail, + Files: map[string]*moduletest.File{ + "descriptive_test_name.tftest.hcl": { + Name: "descriptive_test_name.tftest.hcl", + Status: moduletest.Fail, + Runs: []*moduletest.Run{ + { + Name: "test_one", + Status: moduletest.Fail, + }, + { + Name: "test_two", + Status: moduletest.Fail, + }, + { + Name: "test_three", + Status: moduletest.Fail, + }, + }, + }, + "other_descriptive_test_name.tftest.hcl": { + Name: "other_descriptive_test_name.tftest.hcl", + Status: moduletest.Fail, + Runs: []*moduletest.Run{ + { + Name: "test_one", + Status: moduletest.Fail, + }, + { + Name: "test_two", + Status: moduletest.Fail, + }, + { + Name: "test_three", + Status: moduletest.Fail, + }, + }, + }, + }, + }, + want: []map[string]interface{}{ + { + "@level": "info", + "@message": "Failure! 0 passed, 6 failed.", + "@module": "tofu.ui", + "test_summary": map[string]interface{}{ + "status": "fail", + "errored": 0.0, + "failed": 6.0, + "passed": 0.0, + "skipped": 0.0, + }, + "type": "test_summary", + }, + }, + }, + + "failed and skipped tests": { + suite: &moduletest.Suite{ + Status: moduletest.Fail, + Files: map[string]*moduletest.File{ + "descriptive_test_name.tftest.hcl": { + Name: "descriptive_test_name.tftest.hcl", + Status: moduletest.Fail, + Runs: []*moduletest.Run{ + { + Name: "test_one", + Status: moduletest.Fail, + }, + { + Name: "test_two", + Status: moduletest.Skip, + }, + { + Name: "test_three", + Status: moduletest.Fail, + }, + }, + }, + "other_descriptive_test_name.tftest.hcl": { + Name: "other_descriptive_test_name.tftest.hcl", + Status: moduletest.Fail, + Runs: []*moduletest.Run{ + { + Name: "test_one", + Status: moduletest.Fail, + }, + { + Name: "test_two", + Status: moduletest.Fail, + }, + { + Name: "test_three", + Status: moduletest.Skip, + }, + }, + }, + }, + }, + want: []map[string]interface{}{ + { + "@level": "info", + "@message": "Failure! 0 passed, 4 failed, 2 skipped.", + "@module": "tofu.ui", + "test_summary": map[string]interface{}{ + "status": "fail", + "errored": 0.0, + "failed": 4.0, + "passed": 0.0, + "skipped": 2.0, + }, + "type": "test_summary", + }, + }, + }, + + "failed, passed and skipped tests": { + suite: &moduletest.Suite{ + Status: moduletest.Fail, + Files: map[string]*moduletest.File{ + "descriptive_test_name.tftest.hcl": { + Name: "descriptive_test_name.tftest.hcl", + Status: moduletest.Fail, + Runs: []*moduletest.Run{ + { + Name: "test_one", + Status: moduletest.Fail, + }, + { + Name: "test_two", + Status: moduletest.Pass, + }, + { + Name: "test_three", + Status: moduletest.Skip, + }, + }, + }, + "other_descriptive_test_name.tftest.hcl": { + Name: "other_descriptive_test_name.tftest.hcl", + Status: moduletest.Fail, + Runs: []*moduletest.Run{ + { + Name: "test_one", + Status: moduletest.Skip, + }, + { + Name: "test_two", + Status: moduletest.Fail, + }, + { + Name: "test_three", + Status: moduletest.Pass, + }, + }, + }, + }, + }, + want: []map[string]interface{}{ + { + "@level": "info", + "@message": "Failure! 2 passed, 2 failed, 2 skipped.", + "@module": "tofu.ui", + "test_summary": map[string]interface{}{ + "status": "fail", + "errored": 0.0, + "failed": 2.0, + "passed": 2.0, + "skipped": 2.0, + }, + "type": "test_summary", + }, + }, + }, + + "failed and errored tests": { + suite: &moduletest.Suite{ + Status: moduletest.Error, + Files: map[string]*moduletest.File{ + "descriptive_test_name.tftest.hcl": { + Name: "descriptive_test_name.tftest.hcl", + Status: moduletest.Error, + Runs: []*moduletest.Run{ + { + Name: "test_one", + Status: moduletest.Fail, + }, + { + Name: "test_two", + Status: moduletest.Error, + }, + { + Name: "test_three", + Status: moduletest.Fail, + }, + }, + }, + "other_descriptive_test_name.tftest.hcl": { + Name: "other_descriptive_test_name.tftest.hcl", + Status: moduletest.Error, + Runs: []*moduletest.Run{ + { + Name: "test_one", + Status: moduletest.Fail, + }, + { + Name: "test_two", + Status: moduletest.Error, + }, + { + Name: "test_three", + Status: moduletest.Error, + }, + }, + }, + }, + }, + want: []map[string]interface{}{ + { + "@level": "info", + "@message": "Failure! 0 passed, 6 failed.", + "@module": "tofu.ui", + "test_summary": map[string]interface{}{ + "status": "error", + "errored": 3.0, + "failed": 3.0, + "passed": 0.0, + "skipped": 0.0, + }, + "type": "test_summary", + }, + }, + }, + + "failed, errored, passed, and skipped tests": { + suite: &moduletest.Suite{ + Status: moduletest.Error, + Files: map[string]*moduletest.File{ + "descriptive_test_name.tftest.hcl": { + Name: "descriptive_test_name.tftest.hcl", + Status: moduletest.Fail, + Runs: []*moduletest.Run{ + { + Name: "test_one", + Status: moduletest.Pass, + }, + { + Name: "test_two", + Status: moduletest.Pass, + }, + { + Name: "test_three", + Status: moduletest.Fail, + }, + }, + }, + "other_descriptive_test_name.tftest.hcl": { + Name: "other_descriptive_test_name.tftest.hcl", + Status: moduletest.Error, + Runs: []*moduletest.Run{ + { + Name: "test_one", + Status: moduletest.Error, + }, + { + Name: "test_two", + Status: moduletest.Skip, + }, + { + Name: "test_three", + Status: moduletest.Skip, + }, + }, + }, + }, + }, + want: []map[string]interface{}{ + { + "@level": "info", + "@message": "Failure! 2 passed, 2 failed, 2 skipped.", + "@module": "tofu.ui", + "test_summary": map[string]interface{}{ + "status": "error", + "errored": 1.0, + "failed": 1.0, + "passed": 2.0, + "skipped": 2.0, + }, + "type": "test_summary", + }, + }, + }, + } + for name, tc := range tcs { + t.Run(name, func(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + view := NewTest(arguments.ViewJSON, NewView(streams)) + + view.Conclusion(tc.suite) + testJSONViewOutputEquals(t, done(t).All(), tc.want) + }) + } +} + +func TestTestJSON_DestroySummary(t *testing.T) { + tcs := map[string]struct { + file *moduletest.File + run *moduletest.Run + state *states.State + diags tfdiags.Diagnostics + want []map[string]interface{} + }{ + "empty_state_only_warnings": { + diags: tfdiags.Diagnostics{ + tfdiags.Sourceless(tfdiags.Warning, "first warning", "something not very bad happened"), + tfdiags.Sourceless(tfdiags.Warning, "second warning", "something not very bad happened again"), + }, + file: &moduletest.File{Name: "main.tftest.hcl"}, + state: states.NewState(), + want: []map[string]interface{}{ + { + "@level": "warn", + "@message": "Warning: first warning", + "@module": "tofu.ui", + "@testfile": "main.tftest.hcl", + "diagnostic": map[string]interface{}{ + "detail": "something not very bad happened", + "severity": "warning", + "summary": "first warning", + }, + "type": "diagnostic", + }, + { + "@level": "warn", + "@message": "Warning: second warning", + "@module": "tofu.ui", + "@testfile": "main.tftest.hcl", + "diagnostic": map[string]interface{}{ + "detail": "something not very bad happened again", + "severity": "warning", + "summary": "second warning", + }, + "type": "diagnostic", + }, + }, + }, + "empty_state_with_errors": { + diags: tfdiags.Diagnostics{ + tfdiags.Sourceless(tfdiags.Warning, "first warning", "something not very bad happened"), + tfdiags.Sourceless(tfdiags.Warning, "second warning", "something not very bad happened again"), + tfdiags.Sourceless(tfdiags.Error, "first error", "this time it is very bad"), + }, + file: &moduletest.File{Name: "main.tftest.hcl"}, + state: states.NewState(), + want: []map[string]interface{}{ + { + "@level": "warn", + "@message": "Warning: first warning", + "@module": "tofu.ui", + "@testfile": "main.tftest.hcl", + "diagnostic": map[string]interface{}{ + "detail": "something not very bad happened", + "severity": "warning", + "summary": "first warning", + }, + "type": "diagnostic", + }, + { + "@level": "warn", + "@message": "Warning: second warning", + "@module": "tofu.ui", + "@testfile": "main.tftest.hcl", + "diagnostic": map[string]interface{}{ + "detail": "something not very bad happened again", + "severity": "warning", + "summary": "second warning", + }, + "type": "diagnostic", + }, + { + "@level": "error", + "@message": "Error: first error", + "@module": "tofu.ui", + "@testfile": "main.tftest.hcl", + "diagnostic": map[string]interface{}{ + "detail": "this time it is very bad", + "severity": "error", + "summary": "first error", + }, + "type": "diagnostic", + }, + }, + }, + "state_from_run": { + file: &moduletest.File{Name: "main.tftest.hcl"}, + run: &moduletest.Run{Name: "run_block"}, + state: states.BuildState(func(state *states.SyncState) { + state.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("test"), + }) + }), + want: []map[string]interface{}{ + { + "@level": "error", + "@message": "OpenTofu left some resources in state after executing main.tftest.hcl/run_block, these left-over resources can be viewed by reading the statefile written to disk(errored_test.tfstate) and they need to be cleaned up manually:", + "@module": "tofu.ui", + "@testfile": "main.tftest.hcl", + "@testrun": "run_block", + "test_cleanup": map[string]interface{}{ + "failed_resources": []interface{}{ + map[string]interface{}{ + "instance": "test.foo", + }, + }, + }, + "type": "test_cleanup", + }, + }, + }, + "state_only_warnings": { + diags: tfdiags.Diagnostics{ + tfdiags.Sourceless(tfdiags.Warning, "first warning", "something not very bad happened"), + tfdiags.Sourceless(tfdiags.Warning, "second warning", "something not very bad happened again"), + }, + file: &moduletest.File{Name: "main.tftest.hcl"}, + state: states.BuildState(func(state *states.SyncState) { + state.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("test"), + }) + state.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test", + Name: "bar", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("test"), + }) + state.SetResourceInstanceDeposed( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test", + Name: "bar", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + "0fcb640a", + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("test"), + }) + }), + want: []map[string]interface{}{ + { + "@level": "error", + "@message": "OpenTofu left some resources in state after executing main.tftest.hcl, these left-over resources can be viewed by reading the statefile written to disk(errored_test.tfstate) and they need to be cleaned up manually:", + "@module": "tofu.ui", + "@testfile": "main.tftest.hcl", + "test_cleanup": map[string]interface{}{ + "failed_resources": []interface{}{ + map[string]interface{}{ + "instance": "test.bar", + }, + map[string]interface{}{ + "instance": "test.bar", + "deposed_key": "0fcb640a", + }, + map[string]interface{}{ + "instance": "test.foo", + }, + }, + }, + "type": "test_cleanup", + }, + { + "@level": "warn", + "@message": "Warning: first warning", + "@module": "tofu.ui", + "@testfile": "main.tftest.hcl", + "diagnostic": map[string]interface{}{ + "detail": "something not very bad happened", + "severity": "warning", + "summary": "first warning", + }, + "type": "diagnostic", + }, + { + "@level": "warn", + "@message": "Warning: second warning", + "@module": "tofu.ui", + "@testfile": "main.tftest.hcl", + "diagnostic": map[string]interface{}{ + "detail": "something not very bad happened again", + "severity": "warning", + "summary": "second warning", + }, + "type": "diagnostic", + }, + }, + }, + "state_with_errors": { + diags: tfdiags.Diagnostics{ + tfdiags.Sourceless(tfdiags.Warning, "first warning", "something not very bad happened"), + tfdiags.Sourceless(tfdiags.Warning, "second warning", "something not very bad happened again"), + tfdiags.Sourceless(tfdiags.Error, "first error", "this time it is very bad"), + }, + file: &moduletest.File{Name: "main.tftest.hcl"}, + state: states.BuildState(func(state *states.SyncState) { + state.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("test"), + }) + state.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test", + Name: "bar", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("test"), + }) + state.SetResourceInstanceDeposed( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test", + Name: "bar", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + "0fcb640a", + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("test"), + }) + }), + want: []map[string]interface{}{ + { + "@level": "error", + "@message": "OpenTofu left some resources in state after executing main.tftest.hcl, these left-over resources can be viewed by reading the statefile written to disk(errored_test.tfstate) and they need to be cleaned up manually:", + "@module": "tofu.ui", + "@testfile": "main.tftest.hcl", + "test_cleanup": map[string]interface{}{ + "failed_resources": []interface{}{ + map[string]interface{}{ + "instance": "test.bar", + }, + map[string]interface{}{ + "instance": "test.bar", + "deposed_key": "0fcb640a", + }, + map[string]interface{}{ + "instance": "test.foo", + }, + }, + }, + "type": "test_cleanup", + }, + { + "@level": "warn", + "@message": "Warning: first warning", + "@module": "tofu.ui", + "@testfile": "main.tftest.hcl", + "diagnostic": map[string]interface{}{ + "detail": "something not very bad happened", + "severity": "warning", + "summary": "first warning", + }, + "type": "diagnostic", + }, + { + "@level": "warn", + "@message": "Warning: second warning", + "@module": "tofu.ui", + "@testfile": "main.tftest.hcl", + "diagnostic": map[string]interface{}{ + "detail": "something not very bad happened again", + "severity": "warning", + "summary": "second warning", + }, + "type": "diagnostic", + }, + { + "@level": "error", + "@message": "Error: first error", + "@module": "tofu.ui", + "@testfile": "main.tftest.hcl", + "diagnostic": map[string]interface{}{ + "detail": "this time it is very bad", + "severity": "error", + "summary": "first error", + }, + "type": "diagnostic", + }, + }, + }, + "state_null_resource_with_errors": { + diags: tfdiags.Diagnostics{ + tfdiags.Sourceless(tfdiags.Warning, "first warning", "something not very bad happened"), + tfdiags.Sourceless(tfdiags.Warning, "second warning", "something not very bad happened again"), + tfdiags.Sourceless(tfdiags.Error, "first error", "this time it is very bad"), + }, + file: &moduletest.File{Name: "main.tftest.hcl"}, + state: states.BuildState(func(state *states.SyncState) { + state.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "failing_will_depend_on_me", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("null"), + }) + state.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "failing", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + Dependencies: []addrs.ConfigResource{ + { + Module: []string{}, + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "failing_will_depend_on_me", + }, + }, + }, + CreateBeforeDestroy: false, + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("null"), + }) + }), want: []map[string]interface{}{ + { + "@level": "error", + "@message": "OpenTofu left some resources in state after executing main.tftest.hcl, these left-over resources can be viewed by reading the statefile written to disk(errored_test.tfstate) and they need to be cleaned up manually:", + "@module": "tofu.ui", + "@testfile": "main.tftest.hcl", + "test_cleanup": map[string]interface{}{ + "failed_resources": []interface{}{ + map[string]interface{}{ + "instance": "null_resource.failing", + }, + map[string]interface{}{ + "instance": "null_resource.failing_will_depend_on_me", + }, + }, + }, + "type": "test_cleanup", + }, + { + "@level": "warn", + "@message": "Warning: first warning", + "@module": "tofu.ui", + "@testfile": "main.tftest.hcl", + "diagnostic": map[string]interface{}{ + "detail": "something not very bad happened", + "severity": "warning", + "summary": "first warning", + }, + "type": "diagnostic", + }, + { + "@level": "warn", + "@message": "Warning: second warning", + "@module": "tofu.ui", + "@testfile": "main.tftest.hcl", + "diagnostic": map[string]interface{}{ + "detail": "something not very bad happened again", + "severity": "warning", + "summary": "second warning", + }, + "type": "diagnostic", + }, + { + "@level": "error", + "@message": "Error: first error", + "@module": "tofu.ui", + "@testfile": "main.tftest.hcl", + "diagnostic": map[string]interface{}{ + "detail": "this time it is very bad", + "severity": "error", + "summary": "first error", + }, + "type": "diagnostic", + }, + }, + }, + } + for name, tc := range tcs { + t.Run(name, func(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + view := NewTest(arguments.ViewJSON, NewView(streams)) + + view.DestroySummary(tc.diags, tc.run, tc.file, tc.state) + testJSONViewOutputEquals(t, done(t).All(), tc.want) + }) + } +} + +func TestTestJSON_File(t *testing.T) { + tcs := map[string]struct { + file *moduletest.File + want []map[string]interface{} + }{ + "pass": { + file: &moduletest.File{Name: "main.tf", Status: moduletest.Pass}, + want: []map[string]interface{}{ + { + "@level": "info", + "@message": "main.tf... pass", + "@module": "tofu.ui", + "@testfile": "main.tf", + "test_file": map[string]interface{}{ + "path": "main.tf", + "status": "pass", + }, + "type": "test_file", + }, + }, + }, + + "pending": { + file: &moduletest.File{Name: "main.tf", Status: moduletest.Pending}, + want: []map[string]interface{}{ + { + "@level": "info", + "@message": "main.tf... pending", + "@module": "tofu.ui", + "@testfile": "main.tf", + "test_file": map[string]interface{}{ + "path": "main.tf", + "status": "pending", + }, + "type": "test_file", + }, + }, + }, + + "skip": { + file: &moduletest.File{Name: "main.tf", Status: moduletest.Skip}, + want: []map[string]interface{}{ + { + "@level": "info", + "@message": "main.tf... skip", + "@module": "tofu.ui", + "@testfile": "main.tf", + "test_file": map[string]interface{}{ + "path": "main.tf", + "status": "skip", + }, + "type": "test_file", + }, + }, + }, + + "fail": { + file: &moduletest.File{Name: "main.tf", Status: moduletest.Fail}, + want: []map[string]interface{}{ + { + "@level": "info", + "@message": "main.tf... fail", + "@module": "tofu.ui", + "@testfile": "main.tf", + "test_file": map[string]interface{}{ + "path": "main.tf", + "status": "fail", + }, + "type": "test_file", + }, + }, + }, + + "error": { + file: &moduletest.File{Name: "main.tf", Status: moduletest.Error}, + want: []map[string]interface{}{ + { + "@level": "info", + "@message": "main.tf... fail", + "@module": "tofu.ui", + "@testfile": "main.tf", + "test_file": map[string]interface{}{ + "path": "main.tf", + "status": "error", + }, + "type": "test_file", + }, + }, + }, + } + for name, tc := range tcs { + t.Run(name, func(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + view := NewTest(arguments.ViewJSON, NewView(streams)) + + view.File(tc.file) + testJSONViewOutputEquals(t, done(t).All(), tc.want) + }) + } +} + +func TestTestJSON_Run(t *testing.T) { + tcs := map[string]struct { + run *moduletest.Run + want []map[string]interface{} + }{ + "pass": { + run: &moduletest.Run{Name: "run_block", Status: moduletest.Pass}, + want: []map[string]interface{}{ + { + "@level": "info", + "@message": " \"run_block\"... pass", + "@module": "tofu.ui", + "@testfile": "main.tftest.hcl", + "@testrun": "run_block", + "test_run": map[string]interface{}{ + "path": "main.tftest.hcl", + "run": "run_block", + "status": "pass", + }, + "type": "test_run", + }, + }, + }, + + "pass_with_diags": { + run: &moduletest.Run{ + Name: "run_block", + Status: moduletest.Pass, + Diagnostics: tfdiags.Diagnostics{tfdiags.Sourceless(tfdiags.Warning, "a warning occurred", "some warning happened during this test")}, + }, + want: []map[string]interface{}{ + { + "@level": "info", + "@message": " \"run_block\"... pass", + "@module": "tofu.ui", + "@testfile": "main.tftest.hcl", + "@testrun": "run_block", + "test_run": map[string]interface{}{ + "path": "main.tftest.hcl", + "run": "run_block", + "status": "pass", + }, + "type": "test_run", + }, + { + "@level": "warn", + "@message": "Warning: a warning occurred", + "@module": "tofu.ui", + "@testfile": "main.tftest.hcl", + "@testrun": "run_block", + "diagnostic": map[string]interface{}{ + "detail": "some warning happened during this test", + "severity": "warning", + "summary": "a warning occurred", + }, + "type": "diagnostic", + }, + }, + }, + + "pending": { + run: &moduletest.Run{Name: "run_block", Status: moduletest.Pending}, + want: []map[string]interface{}{ + { + "@level": "info", + "@message": " \"run_block\"... pending", + "@module": "tofu.ui", + "@testfile": "main.tftest.hcl", + "@testrun": "run_block", + "test_run": map[string]interface{}{ + "path": "main.tftest.hcl", + "run": "run_block", + "status": "pending", + }, + "type": "test_run", + }, + }, + }, + + "skip": { + run: &moduletest.Run{Name: "run_block", Status: moduletest.Skip}, + want: []map[string]interface{}{ + { + "@level": "info", + "@message": " \"run_block\"... skip", + "@module": "tofu.ui", + "@testfile": "main.tftest.hcl", + "@testrun": "run_block", + "test_run": map[string]interface{}{ + "path": "main.tftest.hcl", + "run": "run_block", + "status": "skip", + }, + "type": "test_run", + }, + }, + }, + + "fail": { + run: &moduletest.Run{Name: "run_block", Status: moduletest.Fail}, + want: []map[string]interface{}{ + { + "@level": "info", + "@message": " \"run_block\"... fail", + "@module": "tofu.ui", + "@testfile": "main.tftest.hcl", + "@testrun": "run_block", + "test_run": map[string]interface{}{ + "path": "main.tftest.hcl", + "run": "run_block", + "status": "fail", + }, + "type": "test_run", + }, + }, + }, + + "fail_with_diags": { + run: &moduletest.Run{ + Name: "run_block", + Status: moduletest.Fail, + Diagnostics: tfdiags.Diagnostics{ + tfdiags.Sourceless(tfdiags.Error, "a comparison failed", "details details details"), + tfdiags.Sourceless(tfdiags.Error, "a second comparison failed", "other details"), + }, + }, + want: []map[string]interface{}{ + { + "@level": "info", + "@message": " \"run_block\"... fail", + "@module": "tofu.ui", + "@testfile": "main.tftest.hcl", + "@testrun": "run_block", + "test_run": map[string]interface{}{ + "path": "main.tftest.hcl", + "run": "run_block", + "status": "fail", + }, + "type": "test_run", + }, + { + "@level": "error", + "@message": "Error: a comparison failed", + "@module": "tofu.ui", + "@testfile": "main.tftest.hcl", + "@testrun": "run_block", + "diagnostic": map[string]interface{}{ + "detail": "details details details", + "severity": "error", + "summary": "a comparison failed", + }, + "type": "diagnostic", + }, + { + "@level": "error", + "@message": "Error: a second comparison failed", + "@module": "tofu.ui", + "@testfile": "main.tftest.hcl", + "@testrun": "run_block", + "diagnostic": map[string]interface{}{ + "detail": "other details", + "severity": "error", + "summary": "a second comparison failed", + }, + "type": "diagnostic", + }, + }, + }, + + "error": { + run: &moduletest.Run{Name: "run_block", Status: moduletest.Error}, + want: []map[string]interface{}{ + { + "@level": "info", + "@message": " \"run_block\"... fail", + "@module": "tofu.ui", + "@testfile": "main.tftest.hcl", + "@testrun": "run_block", + "test_run": map[string]interface{}{ + "path": "main.tftest.hcl", + "run": "run_block", + "status": "error", + }, + "type": "test_run", + }, + }, + }, + + "error_with_diags": { + run: &moduletest.Run{ + Name: "run_block", + Status: moduletest.Error, + Diagnostics: tfdiags.Diagnostics{tfdiags.Sourceless(tfdiags.Error, "an error occurred", "something bad happened during this test")}, + }, + want: []map[string]interface{}{ + { + "@level": "info", + "@message": " \"run_block\"... fail", + "@module": "tofu.ui", + "@testfile": "main.tftest.hcl", + "@testrun": "run_block", + "test_run": map[string]interface{}{ + "path": "main.tftest.hcl", + "run": "run_block", + "status": "error", + }, + "type": "test_run", + }, + { + "@level": "error", + "@message": "Error: an error occurred", + "@module": "tofu.ui", + "@testfile": "main.tftest.hcl", + "@testrun": "run_block", + "diagnostic": map[string]interface{}{ + "detail": "something bad happened during this test", + "severity": "error", + "summary": "an error occurred", + }, + "type": "diagnostic", + }, + }, + }, + + "verbose_plan": { + run: &moduletest.Run{ + Name: "run_block", + Status: moduletest.Pass, + Config: &configs.TestRun{ + Command: configs.PlanTestCommand, + }, + Verbose: &moduletest.Verbose{ + Plan: &plans.Plan{ + Changes: &plans.Changes{ + Resources: []*plans.ResourceInstanceChangeSrc{ + { + Addr: addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "creating", + }, + }, + }, + PrevRunAddr: addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "creating", + }, + }, + }, + ProviderAddr: addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.Provider{ + Hostname: addrs.DefaultProviderRegistryHost, + Namespace: "hashicorp", + Type: "test", + }, + }, + ChangeSrc: plans.ChangeSrc{ + Action: plans.Create, + After: dynamicValue( + t, + cty.ObjectVal(map[string]cty.Value{ + "value": cty.StringVal("foobar"), + }), + cty.Object(map[string]cty.Type{ + "value": cty.String, + })), + }, + }, + }, + }, + }, + State: states.NewState(), // empty state + Config: &configs.Config{ + Module: &configs.Module{ + ProviderRequirements: &configs.RequiredProviders{}, + }, + }, + Providers: map[addrs.Provider]providers.ProviderSchema{ + addrs.Provider{ + Hostname: addrs.DefaultProviderRegistryHost, + Namespace: "hashicorp", + Type: "test", + }: { + ResourceTypes: map[string]providers.Schema{ + "test_resource": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "value": { + Type: cty.String, + }, + }, + }, + }, + }, + }, + }, + }, + }, + want: []map[string]interface{}{ + { + "@level": "info", + "@message": " \"run_block\"... pass", + "@module": "tofu.ui", + "@testfile": "main.tftest.hcl", + "@testrun": "run_block", + "test_run": map[string]interface{}{ + "path": "main.tftest.hcl", + "run": "run_block", + "status": "pass", + }, + "type": "test_run", + }, + { + "@level": "info", + "@message": "-verbose flag enabled, printing plan", + "@module": "tofu.ui", + "@testfile": "main.tftest.hcl", + "@testrun": "run_block", + "test_plan": map[string]interface{}{ + "configuration": map[string]interface{}{ + "root_module": map[string]interface{}{}, + }, + "errored": false, + "planned_values": map[string]interface{}{ + "root_module": map[string]interface{}{ + "resources": []interface{}{ + map[string]interface{}{ + "address": "test_resource.creating", + "mode": "managed", + "name": "creating", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0.0, + "sensitive_values": map[string]interface{}{}, + "type": "test_resource", + "values": map[string]interface{}{ + "value": "foobar", + }, + }, + }, + }, + }, + "resource_changes": []interface{}{ + map[string]interface{}{ + "address": "test_resource.creating", + "change": map[string]interface{}{ + "actions": []interface{}{"create"}, + "after": map[string]interface{}{ + "value": "foobar", + }, + "after_sensitive": map[string]interface{}{}, + "after_unknown": map[string]interface{}{}, + "before": nil, + "before_sensitive": false, + }, + "mode": "managed", + "name": "creating", + "provider_name": "registry.opentofu.org/hashicorp/test", + "type": "test_resource", + }, + }, + }, + "type": "test_plan", + }, + }, + }, + "verbose_apply": { + run: &moduletest.Run{ + Name: "run_block", + Status: moduletest.Pass, + Config: &configs.TestRun{ + Command: configs.ApplyTestCommand, + }, + Verbose: &moduletest.Verbose{ + Plan: &plans.Plan{}, // empty plan + State: states.BuildState(func(state *states.SyncState) { + state.SetResourceInstanceCurrent( + addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "creating", + }, + }, + }, + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"value":"foobar"}`), + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.Provider{ + Hostname: addrs.DefaultProviderRegistryHost, + Namespace: "hashicorp", + Type: "test", + }, + }) + }), + Config: &configs.Config{ + Module: &configs.Module{}, + }, + Providers: map[addrs.Provider]providers.ProviderSchema{ + addrs.Provider{ + Hostname: addrs.DefaultProviderRegistryHost, + Namespace: "hashicorp", + Type: "test", + }: { + ResourceTypes: map[string]providers.Schema{ + "test_resource": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "value": { + Type: cty.String, + }, + }, + }, + }, + }, + }, + }, + }, + }, + want: []map[string]interface{}{ + { + "@level": "info", + "@message": " \"run_block\"... pass", + "@module": "tofu.ui", + "@testfile": "main.tftest.hcl", + "@testrun": "run_block", + "test_run": map[string]interface{}{ + "path": "main.tftest.hcl", + "run": "run_block", + "status": "pass", + }, + "type": "test_run", + }, + { + "@level": "info", + "@message": "-verbose flag enabled, printing state", + "@module": "tofu.ui", + "@testfile": "main.tftest.hcl", + "@testrun": "run_block", + "test_state": map[string]interface{}{ + "values": map[string]interface{}{ + "root_module": map[string]interface{}{ + "resources": []interface{}{ + map[string]interface{}{ + "address": "test_resource.creating", + "mode": "managed", + "name": "creating", + "provider_name": "registry.opentofu.org/hashicorp/test", + "schema_version": 0.0, + "sensitive_values": map[string]interface{}{}, + "type": "test_resource", + "values": map[string]interface{}{ + "value": "foobar", + }, + }, + }, + }, + }, + }, + "type": "test_state", + }, + }, + }, + } + for name, tc := range tcs { + t.Run(name, func(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + view := NewTest(arguments.ViewJSON, NewView(streams)) + + file := &moduletest.File{Name: "main.tftest.hcl"} + + view.Run(tc.run, file) + testJSONViewOutputEquals(t, done(t).All(), tc.want, cmp.FilterPath(func(path cmp.Path) bool { + return strings.Contains(path.Last().String(), "version") || strings.Contains(path.Last().String(), "timestamp") + }, cmp.Ignore())) + }) + } +} + +func TestTestJSON_FatalInterruptSummary(t *testing.T) { + tcs := map[string]struct { + states map[*moduletest.Run]*states.State + changes []*plans.ResourceInstanceChangeSrc + want []map[string]interface{} + }{ + "no_state_only_plan": { + states: make(map[*moduletest.Run]*states.State), + changes: []*plans.ResourceInstanceChangeSrc{ + { + Addr: addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "one", + }, + }, + }, + ChangeSrc: plans.ChangeSrc{ + Action: plans.Create, + }, + }, + { + Addr: addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "two", + }, + }, + }, + ChangeSrc: plans.ChangeSrc{ + Action: plans.Create, + }, + }, + }, + want: []map[string]interface{}{ + { + "@level": "error", + "@message": "OpenTofu was interrupted during test execution, and may not have performed the expected cleanup operations.", + "@module": "tofu.ui", + "@testfile": "main.tftest.hcl", + "test_interrupt": map[string]interface{}{ + "planned": []interface{}{ + "test_instance.one", + "test_instance.two", + }, + }, + "type": "test_interrupt", + }, + }, + }, + "file_state_no_plan": { + states: map[*moduletest.Run]*states.State{ + nil: states.BuildState(func(state *states.SyncState) { + state.SetResourceInstanceCurrent( + addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "one", + }, + }, + }, + &states.ResourceInstanceObjectSrc{}, + addrs.AbsProviderConfig{}) + + state.SetResourceInstanceCurrent( + addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "two", + }, + }, + }, + &states.ResourceInstanceObjectSrc{}, + addrs.AbsProviderConfig{}) + }), + }, + changes: nil, + want: []map[string]interface{}{ + { + "@level": "error", + "@message": "OpenTofu was interrupted during test execution, and may not have performed the expected cleanup operations.", + "@module": "tofu.ui", + "@testfile": "main.tftest.hcl", + "test_interrupt": map[string]interface{}{ + "state": []interface{}{ + map[string]interface{}{ + "instance": "test_instance.one", + }, + map[string]interface{}{ + "instance": "test_instance.two", + }, + }, + }, + "type": "test_interrupt", + }, + }, + }, + "run_states_no_plan": { + states: map[*moduletest.Run]*states.State{ + &moduletest.Run{Name: "setup_block"}: states.BuildState(func(state *states.SyncState) { + state.SetResourceInstanceCurrent( + addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "one", + }, + }, + }, + &states.ResourceInstanceObjectSrc{}, + addrs.AbsProviderConfig{}) + + state.SetResourceInstanceCurrent( + addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "two", + }, + }, + }, + &states.ResourceInstanceObjectSrc{}, + addrs.AbsProviderConfig{}) + }), + }, + changes: nil, + want: []map[string]interface{}{ + { + "@level": "error", + "@message": "OpenTofu was interrupted during test execution, and may not have performed the expected cleanup operations.", + "@module": "tofu.ui", + "@testfile": "main.tftest.hcl", + "test_interrupt": map[string]interface{}{ + "states": map[string]interface{}{ + "setup_block": []interface{}{ + map[string]interface{}{ + "instance": "test_instance.one", + }, + map[string]interface{}{ + "instance": "test_instance.two", + }, + }, + }, + }, + "type": "test_interrupt", + }, + }, + }, + "all_states_with_plan": { + states: map[*moduletest.Run]*states.State{ + &moduletest.Run{Name: "setup_block"}: states.BuildState(func(state *states.SyncState) { + state.SetResourceInstanceCurrent( + addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "setup_one", + }, + }, + }, + &states.ResourceInstanceObjectSrc{}, + addrs.AbsProviderConfig{}) + + state.SetResourceInstanceCurrent( + addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "setup_two", + }, + }, + }, + &states.ResourceInstanceObjectSrc{}, + addrs.AbsProviderConfig{}) + }), + nil: states.BuildState(func(state *states.SyncState) { + state.SetResourceInstanceCurrent( + addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "one", + }, + }, + }, + &states.ResourceInstanceObjectSrc{}, + addrs.AbsProviderConfig{}) + + state.SetResourceInstanceCurrent( + addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "two", + }, + }, + }, + &states.ResourceInstanceObjectSrc{}, + addrs.AbsProviderConfig{}) + }), + }, + changes: []*plans.ResourceInstanceChangeSrc{ + { + Addr: addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "new_one", + }, + }, + }, + ChangeSrc: plans.ChangeSrc{ + Action: plans.Create, + }, + }, + { + Addr: addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "new_two", + }, + }, + }, + ChangeSrc: plans.ChangeSrc{ + Action: plans.Create, + }, + }, + }, + want: []map[string]interface{}{ + { + "@level": "error", + "@message": "OpenTofu was interrupted during test execution, and may not have performed the expected cleanup operations.", + "@module": "tofu.ui", + "@testfile": "main.tftest.hcl", + "test_interrupt": map[string]interface{}{ + "state": []interface{}{ + map[string]interface{}{ + "instance": "test_instance.one", + }, + map[string]interface{}{ + "instance": "test_instance.two", + }, + }, + "states": map[string]interface{}{ + "setup_block": []interface{}{ + map[string]interface{}{ + "instance": "test_instance.setup_one", + }, + map[string]interface{}{ + "instance": "test_instance.setup_two", + }, + }, + }, + "planned": []interface{}{ + "test_instance.new_one", + "test_instance.new_two", + }, + }, + "type": "test_interrupt", + }, + }, + }, + } + for name, tc := range tcs { + t.Run(name, func(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + view := NewTest(arguments.ViewJSON, NewView(streams)) + + file := &moduletest.File{Name: "main.tftest.hcl"} + run := &moduletest.Run{Name: "run_block"} + + view.FatalInterruptSummary(run, file, tc.states, tc.changes) + testJSONViewOutputEquals(t, done(t).All(), tc.want) + }) + } +} + +func TestSaveErroredStateFile(t *testing.T) { + tcsHuman := map[string]struct { + state *states.State + run *moduletest.Run + file *moduletest.File + stderr string + want interface{} + }{ + "state_foo_bar_human": { + file: &moduletest.File{Name: "main.tftest.hcl"}, + state: states.BuildState(func(state *states.SyncState) { + state.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("test"), + }) + state.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test", + Name: "bar", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("test"), + }) + state.SetResourceInstanceDeposed( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test", + Name: "bar", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + "0fcb640a", + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("test"), + }) + }), + stderr: ` +Writing state to file: errored_test.tfstate +`, + want: nil, + }, + "state_null_resource_human": { + file: &moduletest.File{Name: "main.tftest.hcl"}, + state: states.BuildState(func(state *states.SyncState) { + state.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "failing_will_depend_on_me", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("null"), + }) + state.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "failing", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + Dependencies: []addrs.ConfigResource{ + { + Module: []string{}, + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "failing_will_depend_on_me", + }, + }, + }, + CreateBeforeDestroy: false, + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("null"), + }) + }), + stderr: ` +Writing state to file: errored_test.tfstate +`, + want: nil, + }, + } + + tcsJson := map[string]struct { + state *states.State + run *moduletest.Run + file *moduletest.File + stderr string + want interface{} + }{ + "state_with_run_json": { + file: &moduletest.File{Name: "main.tftest.hcl"}, + run: &moduletest.Run{Name: "run_block"}, + state: states.BuildState(func(state *states.SyncState) { + state.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("test"), + }) + }), + stderr: "", + want: []map[string]interface{}{ + { + "@level": "info", + "@message": "Writing state to file: errored_test.tfstate", + "@module": string("tofu.ui"), + }, + }, + }, + "state_foo_bar_json": { + file: &moduletest.File{Name: "main.tftest.hcl"}, + state: states.BuildState(func(state *states.SyncState) { + state.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("test"), + }) + state.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test", + Name: "bar", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("test"), + }) + state.SetResourceInstanceDeposed( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test", + Name: "bar", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + "0fcb640a", + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("test"), + }) + }), + stderr: "", + want: []map[string]interface{}{ + { + "@level": "info", + "@message": "Writing state to file: errored_test.tfstate", + "@module": "tofu.ui", + }, + }, + }, + "state_null_resource_with_errors": { + file: &moduletest.File{Name: "main.tftest.hcl"}, + state: states.BuildState(func(state *states.SyncState) { + state.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "failing_will_depend_on_me", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("null"), + }) + state.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "failing", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + Dependencies: []addrs.ConfigResource{ + { + Module: []string{}, + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "failing_will_depend_on_me", + }, + }, + }, + CreateBeforeDestroy: false, + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("null"), + }) + }), + stderr: "", + want: []map[string]interface{}{ + { + "@level": "info", + "@message": "Writing state to file: errored_test.tfstate", + "@module": "tofu.ui", + }, + }, + }, + } + // Run tests for Human view + runTestSaveErroredStateFile(t, tcsHuman, arguments.ViewHuman) + + // Run tests for JSON view + runTestSaveErroredStateFile(t, tcsJson, arguments.ViewJSON) +} + +func runTestSaveErroredStateFile(t *testing.T, tc map[string]struct { + state *states.State + run *moduletest.Run + file *moduletest.File + stderr string + want interface{} +}, viewType arguments.ViewType) { + for name, data := range tc { + t.Run(name, func(t *testing.T) { + // Create a temporary directory + tempDir := t.TempDir() + + // Modify the state file path to use the temporary directory + tempStateFilePath := filepath.Clean(filepath.Join(tempDir, "errored_test.tfstate")) + + // Get the current working directory + originalDir, err := os.Getwd() + if err != nil { + t.Fatalf("Error getting current working directory: %v", err) + } + + // Change the working directory to the temporary directory + if err := os.Chdir(tempDir); err != nil { + t.Fatalf("Error changing working directory: %v", err) + } + defer func() { + // Change the working directory back to the original directory after the test + if err := os.Chdir(originalDir); err != nil { + t.Fatalf("Error changing working directory back: %v", err) + } + }() + + streams, done := terminal.StreamsForTesting(t) + + if viewType == arguments.ViewHuman { + view := NewTest(arguments.ViewHuman, NewView(streams)) + SaveErroredTestStateFile(data.state, data.run, data.file, view) + output := done(t) + + actual, expected := output.Stderr(), data.stderr + if diff := cmp.Diff(expected, actual); len(diff) > 0 { + t.Errorf("expected:\n%s\nactual:\n%s\ndiff:\n%s", expected, actual, diff) + } + } else if viewType == arguments.ViewJSON { + view := NewTest(arguments.ViewJSON, NewView(streams)) + SaveErroredTestStateFile(data.state, data.run, data.file, view) + want, ok := data.want.([]map[string]interface{}) + if !ok { + t.Fatalf("Failed to assert want as []map[string]interface{}") + } + testJSONViewOutputEquals(t, done(t).All(), want) + } else { + t.Fatalf("Unsupported view type: %v", viewType) + } + + // Check if the state file exists + if _, err := os.Stat(tempStateFilePath); os.IsNotExist(err) { + // File does not exist + t.Errorf("Expected state file 'errored_test.tfstate' to exist in: %s, but it does not.", tempDir) + } + // Trigger garbage collection to ensure that all open file handles are closed. + // This prevents TempDir RemoveAll cleanup errors on Windows. + if runtime.GOOS == "windows" { + runtime.GC() + } + }) + } +} + +func dynamicValue(t *testing.T, value cty.Value, typ cty.Type) plans.DynamicValue { + d, err := plans.NewDynamicValue(value, typ) + if err != nil { + t.Fatalf("failed to create dynamic value: %s", err) + } + return d +} diff --git a/pkg/command/views/testdata/plans/redacted-plan.json b/pkg/command/views/testdata/plans/redacted-plan.json new file mode 100644 index 00000000000..3e3b067798f --- /dev/null +++ b/pkg/command/views/testdata/plans/redacted-plan.json @@ -0,0 +1,116 @@ +{ + "plan_format_version": "1.1", + "resource_drift": [], + "resource_changes": [ + { + "address": "null_resource.foo", + "mode": "managed", + "type": "null_resource", + "name": "foo", + "provider_name": "registry.opentofu.org/hashicorp/null", + "change": { + "actions": [ + "create" + ], + "before": null, + "after": { + "triggers": null + }, + "after_unknown": { + "id": true + }, + "before_sensitive": false, + "after_sensitive": {} + } + } + ], + "relevant_attributes": [], + "output_changes": {}, + "provider_schemas": { + "registry.opentofu.org/hashicorp/null": { + "provider": { + "version": 0, + "block": { + "description_kind": "plain" + } + }, + "resource_schemas": { + "null_resource": { + "version": 0, + "block": { + "attributes": { + "id": { + "type": "string", + "description": "This is set to a random value at create time.", + "description_kind": "plain", + "computed": true + }, + "triggers": { + "type": [ + "map", + "string" + ], + "description": "A map of arbitrary strings that, when changed, will force the null resource to be replaced, re-running any associated provisioners.", + "description_kind": "plain", + "optional": true + } + }, + "description": "The `null_resource` resource implements the standard resource lifecycle but takes no further action.\n\nThe `triggers` argument allows specifying an arbitrary set of values that, when changed, will cause the resource to be replaced.", + "description_kind": "plain" + } + } + }, + "data_source_schemas": { + "null_data_source": { + "version": 0, + "block": { + "attributes": { + "has_computed_default": { + "type": "string", + "description": "If set, its literal value will be stored and returned. If not, its value defaults to `\"default\"`. This argument exists primarily for testing and has little practical use.", + "description_kind": "plain", + "optional": true, + "computed": true + }, + "id": { + "type": "string", + "description": "This attribute is only present for some legacy compatibility issues and should not be used. It will be removed in a future version.", + "description_kind": "plain", + "deprecated": true, + "computed": true + }, + "inputs": { + "type": [ + "map", + "string" + ], + "description": "A map of arbitrary strings that is copied into the `outputs` attribute, and accessible directly for interpolation.", + "description_kind": "plain", + "optional": true + }, + "outputs": { + "type": [ + "map", + "string" + ], + "description": "After the data source is \"read\", a copy of the `inputs` map.", + "description_kind": "plain", + "computed": true + }, + "random": { + "type": "string", + "description": "A random value. This is primarily for testing and has little practical use; prefer the [hashicorp/random provider](https://registry.terraform.io/providers/hashicorp/random) for more practical random number use-cases.", + "description_kind": "plain", + "computed": true + } + }, + "description": "The `null_data_source` data source implements the standard data source lifecycle but does not\ninteract with any external APIs.\n\nHistorically, the `null_data_source` was typically used to construct intermediate values to re-use elsewhere in configuration. The\nsame can now be achieved using [locals](https://www.terraform.io/docs/language/values/locals.html).\n", + "description_kind": "plain", + "deprecated": true + } + } + } + } + }, + "provider_format_version": "1.0" +} diff --git a/pkg/command/views/testdata/show/main.tf b/pkg/command/views/testdata/show/main.tf new file mode 100644 index 00000000000..e1cca23dd69 --- /dev/null +++ b/pkg/command/views/testdata/show/main.tf @@ -0,0 +1,3 @@ +resource "test_resource" "foo" { + foo = "value" +} diff --git a/pkg/command/views/validate.go b/pkg/command/views/validate.go new file mode 100644 index 00000000000..374185011e0 --- /dev/null +++ b/pkg/command/views/validate.go @@ -0,0 +1,143 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package views + +import ( + "encoding/json" + "fmt" + + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/command/format" + viewsjson "github.com/kubegems/opentofu/pkg/command/views/json" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// The Validate is used for the validate command. +type Validate interface { + // Results renders the diagnostics returned from a validation walk, and + // returns a CLI exit code: 0 if there are no errors, 1 otherwise + Results(diags tfdiags.Diagnostics) int + + // Diagnostics renders early diagnostics, resulting from argument parsing. + Diagnostics(diags tfdiags.Diagnostics) +} + +// NewValidate returns an initialized Validate implementation for the given ViewType. +func NewValidate(vt arguments.ViewType, view *View) Validate { + switch vt { + case arguments.ViewJSON: + return &ValidateJSON{view: view} + case arguments.ViewHuman: + return &ValidateHuman{view: view} + default: + panic(fmt.Sprintf("unknown view type %v", vt)) + } +} + +// The ValidateHuman implementation renders diagnostics in a human-readable form, +// along with a success/failure message if OpenTofu is able to execute the +// validation walk. +type ValidateHuman struct { + view *View +} + +var _ Validate = (*ValidateHuman)(nil) + +func (v *ValidateHuman) Results(diags tfdiags.Diagnostics) int { + columns := v.view.outputColumns() + + if len(diags) == 0 { + v.view.streams.Println(format.WordWrap(v.view.colorize.Color(validateSuccess), columns)) + } else { + v.Diagnostics(diags) + + if !diags.HasErrors() { + v.view.streams.Println(format.WordWrap(v.view.colorize.Color(validateWarnings), columns)) + } + } + + if diags.HasErrors() { + return 1 + } + return 0 +} + +const validateSuccess = "[green][bold]Success![reset] The configuration is valid." + +const validateWarnings = "[green][bold]Success![reset] The configuration is valid, but there were some validation warnings as shown above." + +func (v *ValidateHuman) Diagnostics(diags tfdiags.Diagnostics) { + v.view.Diagnostics(diags) +} + +// The ValidateJSON implementation renders validation results as a JSON object. +// This object includes top-level fields summarizing the result, and an array +// of JSON diagnostic objects. +type ValidateJSON struct { + view *View +} + +var _ Validate = (*ValidateJSON)(nil) + +func (v *ValidateJSON) Results(diags tfdiags.Diagnostics) int { + // FormatVersion represents the version of the json format and will be + // incremented for any change to this format that requires changes to a + // consuming parser. + const FormatVersion = "1.0" + + type Output struct { + FormatVersion string `json:"format_version"` + + // We include some summary information that is actually redundant + // with the detailed diagnostics, but avoids the need for callers + // to re-implement our logic for deciding these. + Valid bool `json:"valid"` + ErrorCount int `json:"error_count"` + WarningCount int `json:"warning_count"` + Diagnostics []*viewsjson.Diagnostic `json:"diagnostics"` + } + + output := Output{ + FormatVersion: FormatVersion, + Valid: true, // until proven otherwise + } + configSources := v.view.configSources() + for _, diag := range diags { + output.Diagnostics = append(output.Diagnostics, viewsjson.NewDiagnostic(diag, configSources)) + + switch diag.Severity() { + case tfdiags.Error: + output.ErrorCount++ + output.Valid = false + case tfdiags.Warning: + output.WarningCount++ + } + } + if output.Diagnostics == nil { + // Make sure this always appears as an array in our output, since + // this is easier to consume for dynamically-typed languages. + output.Diagnostics = []*viewsjson.Diagnostic{} + } + + j, err := json.MarshalIndent(&output, "", " ") + if err != nil { + // Should never happen because we fully-control the input here + panic(err) + } + v.view.streams.Println(string(j)) + + if diags.HasErrors() { + return 1 + } + return 0 +} + +// Diagnostics should only be called if the validation walk cannot be executed. +// In this case, we choose to render human-readable diagnostic output, +// primarily for backwards compatibility. +func (v *ValidateJSON) Diagnostics(diags tfdiags.Diagnostics) { + v.view.Diagnostics(diags) +} diff --git a/pkg/command/views/validate_test.go b/pkg/command/views/validate_test.go new file mode 100644 index 00000000000..9354f9dc361 --- /dev/null +++ b/pkg/command/views/validate_test.go @@ -0,0 +1,138 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package views + +import ( + "encoding/json" + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/terminal" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +func TestValidateHuman(t *testing.T) { + testCases := map[string]struct { + diag tfdiags.Diagnostic + wantSuccess bool + wantSubstring string + }{ + "success": { + nil, + true, + "The configuration is valid.", + }, + "warning": { + tfdiags.Sourceless( + tfdiags.Warning, + "Your shoelaces are untied", + "Watch out, or you'll trip!", + ), + true, + "The configuration is valid, but there were some validation warnings", + }, + "error": { + tfdiags.Sourceless( + tfdiags.Error, + "Configuration is missing random_pet", + "Every configuration should have a random_pet.", + ), + false, + "Error: Configuration is missing random_pet", + }, + } + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + view := NewView(streams) + view.Configure(&arguments.View{NoColor: true}) + v := NewValidate(arguments.ViewHuman, view) + + var diags tfdiags.Diagnostics + + if tc.diag != nil { + diags = diags.Append(tc.diag) + } + + ret := v.Results(diags) + + if tc.wantSuccess && ret != 0 { + t.Errorf("expected 0 return code, got %d", ret) + } else if !tc.wantSuccess && ret != 1 { + t.Errorf("expected 1 return code, got %d", ret) + } + + got := done(t).All() + if strings.Contains(got, "Success!") != tc.wantSuccess { + t.Errorf("unexpected output:\n%s", got) + } + if !strings.Contains(got, tc.wantSubstring) { + t.Errorf("expected output to include %q, but was:\n%s", tc.wantSubstring, got) + } + }) + } +} + +func TestValidateJSON(t *testing.T) { + testCases := map[string]struct { + diag tfdiags.Diagnostic + wantSuccess bool + }{ + "success": { + nil, + true, + }, + "warning": { + tfdiags.Sourceless( + tfdiags.Warning, + "Your shoelaces are untied", + "Watch out, or you'll trip!", + ), + true, + }, + "error": { + tfdiags.Sourceless( + tfdiags.Error, + "Configuration is missing random_pet", + "Every configuration should have a random_pet.", + ), + false, + }, + } + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + view := NewView(streams) + view.Configure(&arguments.View{NoColor: true}) + v := NewValidate(arguments.ViewJSON, view) + + var diags tfdiags.Diagnostics + + if tc.diag != nil { + diags = diags.Append(tc.diag) + } + + ret := v.Results(diags) + + if tc.wantSuccess && ret != 0 { + t.Errorf("expected 0 return code, got %d", ret) + } else if !tc.wantSuccess && ret != 1 { + t.Errorf("expected 1 return code, got %d", ret) + } + + got := done(t).All() + + // Make sure the result looks like JSON; we comprehensively test + // the structure of this output in the command package tests. + var result map[string]interface{} + + if err := json.Unmarshal([]byte(got), &result); err != nil { + t.Fatal(err) + } + }) + } +} diff --git a/pkg/command/views/view.go b/pkg/command/views/view.go new file mode 100644 index 00000000000..082babbac2f --- /dev/null +++ b/pkg/command/views/view.go @@ -0,0 +1,180 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package views + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/command/format" + "github.com/kubegems/opentofu/pkg/terminal" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/mitchellh/colorstring" +) + +// View is the base layer for command views, encapsulating a set of I/O +// streams, a colorize implementation, and implementing a human friendly view +// for diagnostics. +type View struct { + streams *terminal.Streams + colorize *colorstring.Colorize + + compactWarnings bool + + // When this is true it's a hint that OpenTofu is being run indirectly + // via a wrapper script or other automation and so we may wish to replace + // direct examples of commands to run with more conceptual directions. + // However, we only do this on a best-effort basis, typically prioritizing + // the messages that users are most likely to see. + runningInAutomation bool + + // Concise is used to reduce the level of noise in the output and display + // only the important details. + concise bool + + // showSensitive is used to display the value of variables marked as sensitive. + showSensitive bool + + // This unfortunate wart is required to enable rendering of diagnostics which + // have associated source code in the configuration. This function pointer + // will be dereferenced as late as possible when rendering diagnostics in + // order to access the config loader cache. + configSources func() map[string]*hcl.File +} + +// Initialize a View with the given streams, a disabled colorize object, and a +// no-op configSources callback. +func NewView(streams *terminal.Streams) *View { + return &View{ + streams: streams, + colorize: &colorstring.Colorize{ + Colors: colorstring.DefaultColors, + Disable: true, + Reset: true, + }, + configSources: func() map[string]*hcl.File { return nil }, + } +} + +// SetRunningInAutomation modifies the view's "running in automation" flag, +// which causes some slight adjustments to certain messages that would normally +// suggest specific OpenTofu commands to run, to make more conceptual gestures +// instead for situations where the user isn't running OpenTofu directly. +// +// For convenient use during initialization (in conjunction with NewView), +// SetRunningInAutomation returns the reciever after modifying it. +func (v *View) SetRunningInAutomation(new bool) *View { + v.runningInAutomation = new + return v +} + +func (v *View) RunningInAutomation() bool { + return v.runningInAutomation +} + +// Configure applies the global view configuration flags. +func (v *View) Configure(view *arguments.View) { + v.colorize.Disable = view.NoColor + v.compactWarnings = view.CompactWarnings + v.concise = view.Concise +} + +// SetConfigSources overrides the default no-op callback with a new function +// pointer, and should be called when the config loader is initialized. +func (v *View) SetConfigSources(cb func() map[string]*hcl.File) { + v.configSources = cb +} + +// Diagnostics renders a set of warnings and errors in human-readable form. +// Warnings are printed to stdout, and errors to stderr. +func (v *View) Diagnostics(diags tfdiags.Diagnostics) { + diags.Sort() + + if len(diags) == 0 { + return + } + + diags = diags.ConsolidateWarnings(1) + + // Since warning messages are generally competing + if v.compactWarnings { + // If the user selected compact warnings and all of the diagnostics are + // warnings then we'll use a more compact representation of the warnings + // that only includes their summaries. + // We show full warnings if there are also errors, because a warning + // can sometimes serve as good context for a subsequent error. + useCompact := true + for _, diag := range diags { + if diag.Severity() != tfdiags.Warning { + useCompact = false + break + } + } + if useCompact { + msg := format.DiagnosticWarningsCompact(diags, v.colorize) + msg = "\n" + msg + "\nTo see the full warning notes, run OpenTofu without -compact-warnings.\n" + v.streams.Print(msg) + return + } + } + + for _, diag := range diags { + var msg string + if v.colorize.Disable { + msg = format.DiagnosticPlain(diag, v.configSources(), v.streams.Stderr.Columns()) + } else { + msg = format.Diagnostic(diag, v.configSources(), v.colorize, v.streams.Stderr.Columns()) + } + + if diag.Severity() == tfdiags.Error { + v.streams.Eprint(msg) + } else { + v.streams.Print(msg) + } + } +} + +// HelpPrompt is intended to be called from commands which fail to parse all +// of their CLI arguments successfully. It refers users to the full help output +// rather than rendering it directly, which can be overwhelming and confusing. +func (v *View) HelpPrompt(command string) { + v.streams.Eprintf(helpPrompt, command) +} + +const helpPrompt = ` +For more help on using this command, run: + tofu %s -help +` + +// outputColumns returns the number of text character cells any non-error +// output should be wrapped to. +// +// This is the number of columns to use if you are calling v.streams.Print or +// related functions. +func (v *View) outputColumns() int { + return v.streams.Stdout.Columns() +} + +// errorColumns returns the number of text character cells any error +// output should be wrapped to. +// +// This is the number of columns to use if you are calling v.streams.Eprint +// or related functions. +func (v *View) errorColumns() int { + return v.streams.Stderr.Columns() +} + +// outputHorizRule will call v.streams.Println with enough horizontal line +// characters to fill an entire row of output. +// +// If UI color is enabled, the rule will get a dark grey coloring to try to +// visually de-emphasize it. +func (v *View) outputHorizRule() { + v.streams.Println(format.HorizontalRule(v.colorize, v.outputColumns())) +} + +func (v *View) SetShowSensitive(showSensitive bool) { + v.showSensitive = showSensitive +} diff --git a/pkg/command/webbrowser/mock.go b/pkg/command/webbrowser/mock.go new file mode 100644 index 00000000000..c3b3e4795ec --- /dev/null +++ b/pkg/command/webbrowser/mock.go @@ -0,0 +1,155 @@ +package webbrowser + +import ( + "context" + "fmt" + "log" + "net/http" + "net/url" + "sync" + + "github.com/kubegems/opentofu/pkg/httpclient" +) + +// NewMockLauncher creates and returns a mock implementation of Launcher, +// with some special behavior designed for use in unit tests. +// +// See the documentation of MockLauncher itself for more information. +func NewMockLauncher(ctx context.Context) *MockLauncher { + client := httpclient.New() + return &MockLauncher{ + Client: client, + Context: ctx, + } +} + +// MockLauncher is a mock implementation of Launcher that has some special +// behavior designed for use in unit tests. +// +// When OpenURL is called, MockLauncher will make an HTTP request to the given +// URL rather than interacting with a "real" browser. +// +// In normal situations it will then return with no further action, but if +// the response to the given URL is either a standard HTTP redirect response +// or includes the custom HTTP header X-Redirect-To then MockLauncher will +// send a follow-up request to that target URL, and continue in this manner +// until it reaches a URL that is not a redirect. (The X-Redirect-To header +// is there so that a server can potentially offer a normal HTML page to +// an actual browser while also giving a next-hop hint for MockLauncher.) +// +// Since MockLauncher is not a full programmable user-agent implementation +// it can't be used for testing of real-world web applications, but it can +// be used for testing against specialized test servers that are written +// with MockLauncher in mind and know how to drive the request flow through +// whatever steps are required to complete the desired test. +// +// All of the actions taken by MockLauncher happen asynchronously in the +// background, to simulate the concurrency of a separate web browser. +// Test code using MockLauncher should provide a context which is cancelled +// when the test completes, to help avoid leaking MockLaunchers. +type MockLauncher struct { + // Client is the HTTP client that MockLauncher will use to make requests. + // By default (if you use NewMockLauncher) this is a new client created + // via httpclient.New, but callers may override it if they need customized + // behavior for a particular test. + // + // Do not use a client that is shared with any other subsystem, because + // MockLauncher will customize the settings of the given client. + Client *http.Client + + // Context can be cancelled in order to abort an OpenURL call before it + // would naturally complete. + Context context.Context + + // Responses is a log of all of the responses recieved from the launcher's + // requests, in the order requested. + Responses []*http.Response + + // done is a waitgroup used internally to signal when the async work is + // complete, in order to make this mock more convenient to use in tests. + done sync.WaitGroup +} + +var _ Launcher = (*MockLauncher)(nil) + +// OpenURL is the mock implementation of Launcher, which has the special +// behavior described for type MockLauncher. +func (l *MockLauncher) OpenURL(u string) error { + // We run our operation in the background because it's supposed to be + // behaving like a web browser running in a separate process. + log.Printf("[TRACE] webbrowser.MockLauncher: OpenURL(%q) starting in the background", u) + l.done.Add(1) + go func() { + err := l.openURL(u) + if err != nil { + // Can't really do anything with this asynchronously, so we'll + // just log it so that someone debugging will be able to see it. + log.Printf("[ERROR] webbrowser.MockLauncher: OpenURL(%q): %s", u, err) + } else { + log.Printf("[TRACE] webbrowser.MockLauncher: OpenURL(%q) has concluded", u) + } + l.done.Done() + }() + return nil +} + +func (l *MockLauncher) openURL(u string) error { + // We need to disable automatic redirect following so that we can implement + // it ourselves below, and thus be able to see the redirects in our + // responses log. + l.Client.CheckRedirect = func(req *http.Request, via []*http.Request) error { + return http.ErrUseLastResponse + } + + // We'll keep looping as long as the server keeps giving us new URLs to + // request. + for u != "" { + log.Printf("[DEBUG] webbrowser.MockLauncher: requesting %s", u) + req, err := http.NewRequest("GET", u, nil) + if err != nil { + return fmt.Errorf("failed to construct HTTP request for %s: %w", u, err) + } + resp, err := l.Client.Do(req) + if err != nil { + log.Printf("[DEBUG] webbrowser.MockLauncher: request failed: %s", err) + return fmt.Errorf("error requesting %s: %w", u, err) + } + l.Responses = append(l.Responses, resp) + if resp.StatusCode >= 400 { + log.Printf("[DEBUG] webbrowser.MockLauncher: request failed: %s", resp.Status) + return fmt.Errorf("error requesting %s: %s", u, resp.Status) + } + log.Printf("[DEBUG] webbrowser.MockLauncher: request succeeded: %s", resp.Status) + + u = "" // unless it's a redirect, we'll stop after this + if location := resp.Header.Get("Location"); location != "" { + u = location + } else if redirectTo := resp.Header.Get("X-Redirect-To"); redirectTo != "" { + u = redirectTo + } + + if u != "" { + // HTTP technically doesn't permit relative URLs in Location, but + // browsers tolerate it and so real-world servers do it, and thus + // we'll allow it here too. + oldURL := resp.Request.URL + givenURL, err := url.Parse(u) + if err != nil { + return fmt.Errorf("invalid redirect URL %s: %w", u, err) + } + u = oldURL.ResolveReference(givenURL).String() + log.Printf("[DEBUG] webbrowser.MockLauncher: redirected to %s", u) + } + } + + log.Printf("[DEBUG] webbrowser.MockLauncher: all done") + return nil +} + +// Wait blocks until the MockLauncher has finished its asynchronous work of +// making HTTP requests and following redirects, at which point it will have +// reached a request that didn't redirect anywhere and stopped iterating. +func (l *MockLauncher) Wait() { + log.Printf("[TRACE] webbrowser.MockLauncher: Wait() for current work to complete") + l.done.Wait() +} diff --git a/pkg/command/webbrowser/mock_test.go b/pkg/command/webbrowser/mock_test.go new file mode 100644 index 00000000000..610f83d8772 --- /dev/null +++ b/pkg/command/webbrowser/mock_test.go @@ -0,0 +1,95 @@ +package webbrowser + +import ( + "context" + "net/http" + "net/http/httptest" + "testing" +) + +func TestMockLauncher(t *testing.T) { + s := httptest.NewServer(http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) { + resp.Header().Set("Content-Length", "0") + switch req.URL.Path { + case "/standard-redirect-source": + resp.Header().Set("Location", "/standard-redirect-target") + resp.WriteHeader(302) + case "/custom-redirect-source": + resp.Header().Set("X-Redirect-To", "/custom-redirect-target") + resp.WriteHeader(200) + case "/error": + resp.WriteHeader(500) + default: + resp.WriteHeader(200) + } + })) + defer s.Close() + + t.Run("no redirects", func(t *testing.T) { + l := NewMockLauncher(context.Background()) + err := l.OpenURL(s.URL) + if err != nil { + t.Fatal(err) + } + l.Wait() // Let the async work complete + if got, want := len(l.Responses), 1; got != want { + t.Fatalf("wrong number of responses %d; want %d", got, want) + } + if got, want := l.Responses[0].Request.URL.Path, ""; got != want { + t.Fatalf("wrong request URL %q; want %q", got, want) + } + }) + t.Run("error", func(t *testing.T) { + l := NewMockLauncher(context.Background()) + err := l.OpenURL(s.URL + "/error") + if err != nil { + // Th is kind of error is supposed to happen asynchronously, so we + // should not see it here. + t.Fatal(err) + } + l.Wait() // Let the async work complete + if got, want := len(l.Responses), 1; got != want { + t.Fatalf("wrong number of responses %d; want %d", got, want) + } + if got, want := l.Responses[0].Request.URL.Path, "/error"; got != want { + t.Fatalf("wrong request URL %q; want %q", got, want) + } + if got, want := l.Responses[0].StatusCode, 500; got != want { + t.Fatalf("wrong response status %d; want %d", got, want) + } + }) + t.Run("standard redirect", func(t *testing.T) { + l := NewMockLauncher(context.Background()) + err := l.OpenURL(s.URL + "/standard-redirect-source") + if err != nil { + t.Fatal(err) + } + l.Wait() // Let the async work complete + if got, want := len(l.Responses), 2; got != want { + t.Fatalf("wrong number of responses %d; want %d", got, want) + } + if got, want := l.Responses[0].Request.URL.Path, "/standard-redirect-source"; got != want { + t.Fatalf("wrong request 0 URL %q; want %q", got, want) + } + if got, want := l.Responses[1].Request.URL.Path, "/standard-redirect-target"; got != want { + t.Fatalf("wrong request 1 URL %q; want %q", got, want) + } + }) + t.Run("custom redirect", func(t *testing.T) { + l := NewMockLauncher(context.Background()) + err := l.OpenURL(s.URL + "/custom-redirect-source") + if err != nil { + t.Fatal(err) + } + l.Wait() // Let the async work complete + if got, want := len(l.Responses), 2; got != want { + t.Fatalf("wrong number of responses %d; want %d", got, want) + } + if got, want := l.Responses[0].Request.URL.Path, "/custom-redirect-source"; got != want { + t.Fatalf("wrong request 0 URL %q; want %q", got, want) + } + if got, want := l.Responses[1].Request.URL.Path, "/custom-redirect-target"; got != want { + t.Fatalf("wrong request 1 URL %q; want %q", got, want) + } + }) +} diff --git a/pkg/command/webbrowser/native.go b/pkg/command/webbrowser/native.go new file mode 100644 index 00000000000..4424cca11eb --- /dev/null +++ b/pkg/command/webbrowser/native.go @@ -0,0 +1,23 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package webbrowser + +import ( + "github.com/cli/browser" +) + +// NewNativeLauncher creates and returns a Launcher that will attempt to interact +// with the browser-launching mechanisms of the operating system where the +// program is currently running. +func NewNativeLauncher() Launcher { + return nativeLauncher{} +} + +type nativeLauncher struct{} + +func (l nativeLauncher) OpenURL(url string) error { + return browser.OpenURL(url) +} diff --git a/pkg/command/webbrowser/webbrowser.go b/pkg/command/webbrowser/webbrowser.go new file mode 100644 index 00000000000..f095d09c98b --- /dev/null +++ b/pkg/command/webbrowser/webbrowser.go @@ -0,0 +1,24 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package webbrowser + +// Launcher is an object that knows how to open a given URL in a new tab in +// some suitable browser on the current system. +// +// Launching of browsers is a very target-platform-sensitive activity, so +// this interface serves as an abstraction over many possible implementations +// which can be selected based on what is appropriate for a specific situation. +type Launcher interface { + // OpenURL opens the given URL in a web browser. + // + // Depending on the circumstances and on the target platform, this may or + // may not cause the browser to take input focus. Because of this + // uncertainty, any caller of this method must be sure to include some + // language in its UI output to let the user know that a browser tab has + // opened somewhere, so that they can go and find it if the focus didn't + // switch automatically. + OpenURL(url string) error +} diff --git a/pkg/command/workdir/dir.go b/pkg/command/workdir/dir.go new file mode 100644 index 00000000000..d57e4d13ccd --- /dev/null +++ b/pkg/command/workdir/dir.go @@ -0,0 +1,154 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package workdir + +import ( + "fmt" + "os" + "path/filepath" +) + +// Dir represents a single OpenTofu working directory. +// +// "Working directory" is unfortunately a slight misnomer, because non-default +// options can potentially stretch the definition such that multiple working +// directories end up appearing to share a data directory, or other similar +// anomolies, but we continue to use this terminology both for historical +// reasons and because it reflects the common case without any special +// overrides. +// +// The naming convention for methods on this type is that methods whose names +// begin with "Override" affect only characteristics of the particular object +// they're called on, changing where it looks for data, while methods whose +// names begin with "Set" will write settings to disk such that other instances +// referring to the same directories will also see them. Given that, the +// "Override" methods should be used only during the initialization steps +// for a Dir object, typically only inside "package main", so that all +// subsequent work elsewhere will access consistent locations on disk. +// +// We're gradually transitioning to using this type to manage working directory +// settings, and so not everything in the working directory "data dir" is +// encapsulated here yet, but hopefully we'll gradually migrate all of those +// settings here over time. The working directory state not yet managed in here +// is typically managed directly in the "command" package, either directly +// inside commands or in methods of the giant command.Meta type. +type Dir struct { + // mainDir is the path to the directory that we present as the + // "working directory" in the user model, which is typically the + // current working directory when running OpenTofu CLI, or the + // directory explicitly chosen by the user using the -chdir=... + // global option. + mainDir string + + // originalDir is the path to the working directory that was + // selected when creating the OpenTofu CLI process, regardless of + // -chdir=... being set. This is only for very limited purposes + // related to backward compatibility; most functionality should + // use mainDir instead. + originalDir string + + // dataDir is the path to the directory where we will store our + // working directory settings and artifacts. This is typically a + // directory named ".terraform" within mainDir, but users may + // override it. + dataDir string +} + +// NewDir constructs a new working directory, anchored at the given path. +// +// In normal use, mainPath should be "." to reflect the current working +// directory, with "package main" having switched the process's current +// working directory if necessary prior to calling this function. However, +// unusual situations in tests may set mainPath to a temporary directory, or +// similar. +// +// WARNING: Although the logic in this package is intended to work regardless +// of whether mainPath is actually the current working directory, we're +// currently in a transitional state where this package shares responsibility +// for the working directory with various command.Meta methods, and those +// often assume that the main path of the working directory will always be +// ".". If you're writing test code that spans across both areas of +// responsibility then you must ensure that the test temporarily changes the +// test process's working directory to the directory returned by RootModuleDir +// before using the result inside a command.Meta. +func NewDir(mainPath string) *Dir { + mainPath = filepath.Clean(mainPath) + return &Dir{ + mainDir: mainPath, + originalDir: mainPath, + dataDir: filepath.Join(mainPath, ".terraform"), + } +} + +// OverrideOriginalWorkingDir records a different path as the +// "original working directory" for the reciever. +// +// Use this only to record the original working directory when OpenTofu is run +// with the -chdir=... global option. In that case, the directory given in +// -chdir=... is the "main path" to pass in to NewDir, while the original +// working directory should be sent to this method. +func (d *Dir) OverrideOriginalWorkingDir(originalPath string) { + d.originalDir = filepath.Clean(originalPath) +} + +// OverrideDataDir chooses a specific alternative directory to read and write +// the persistent working directory settings. +// +// "package main" can call this if it detects that the user has overridden +// the default location by setting the relevant environment variable. Don't +// call this when that environment variable isn't set, in order to preserve +// the default setting of a dot-prefixed directory directly inside the main +// working directory. +func (d *Dir) OverrideDataDir(dataDir string) { + d.dataDir = filepath.Clean(dataDir) +} + +// RootModuleDir returns the directory where we expect to find the root module +// configuration for this working directory. +func (d *Dir) RootModuleDir() string { + // The root module configuration is just directly inside the main directory. + return d.mainDir +} + +// OriginalWorkingDir returns the true, operating-system-originated working +// directory that the current OpenTofu process was launched from. +// +// This is usually the same as the main working directory, but differs in the +// special case where the user ran OpenTofu with the global -chdir=... +// option. This is here only for a few backward compatibility affordances +// from before we had the -chdir=... option, so should typically not be used +// for anything new. +func (d *Dir) OriginalWorkingDir() string { + return d.originalDir +} + +// DataDir returns the base path where the reciever keeps all of the settings +// and artifacts that must persist between consecutive commands in a single +// session. +// +// This is exported only to allow the legacy behaviors in command.Meta to +// continue accessing this directory directly. Over time we should replace +// all of those direct accesses with methods on this type, and then remove +// this method. Avoid using this method for new use-cases. +func (d *Dir) DataDir() string { + return d.dataDir +} + +// ensureDataDir creates the data directory and all of the necessary parent +// directories that lead to it, if they don't already exist. +// +// For directories that already exist ensureDataDir will preserve their +// permissions, while it'll create any new directories to be owned by the user +// running OpenTofu, readable and writable by that user, and readable by +// all other users, or some approximation of that on non-Unix platforms which +// have a different permissions model. +func (d *Dir) ensureDataDir() error { + err := os.MkdirAll(d.dataDir, 0755) + if err != nil { + return fmt.Errorf("failed to prepare working directory: %w", err) + } + return nil +} diff --git a/pkg/command/workdir/doc.go b/pkg/command/workdir/doc.go new file mode 100644 index 00000000000..cbf68478448 --- /dev/null +++ b/pkg/command/workdir/doc.go @@ -0,0 +1,21 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package workdir models the various local artifacts and state we keep inside +// a OpenTofu "working directory". +// +// The working directory artifacts and settings are typically initialized or +// modified by "tofu init", after which they persist for use by other +// commands in the same directory, but are not visible to commands run in +// other working directories or on other computers. +// +// Although "tofu init" is the main command which modifies a workdir, +// other commands do sometimes make more focused modifications for settings +// which can typically change multiple times during a session, such as the +// currently-selected workspace name. Any command which modifies the working +// directory settings must discard and reload any objects which derived from +// those settings, because otherwise the existing objects will often continue +// to follow the settings that were present when they were created. +package workdir diff --git a/pkg/command/workdir/normalize_path.go b/pkg/command/workdir/normalize_path.go new file mode 100644 index 00000000000..9ab88259318 --- /dev/null +++ b/pkg/command/workdir/normalize_path.go @@ -0,0 +1,57 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package workdir + +import ( + "path/filepath" +) + +// NormalizePath attempts to transform the given path so that it's relative +// to the working directory, which is our preferred way to present and store +// paths to files and directories within a configuration so that they can +// be portable to operations in other working directories. +// +// It isn't always possible to produce a relative path. For example, on Windows +// the given path might be on a different volume (e.g. drive letter or network +// share) than the working directory. +// +// Note that the result will be relative to the main directory of the receiver, +// which should always be the actual process working directory in normal code, +// but might be some other temporary working directory when in test code. +// If you need to access the file or directory that the result refers to with +// functions that aren't aware of our base directory, you can use something +// like the following, which again should be needed only in test code which +// might need to inspect the filesystem in order to make assertions: +// +// filepath.Join(d.RootModuleDir(), normalizePathResult) +// +// The above is suitable only for situations where the given path is known +// to be beneath the working directory, which is the typical situation for +// temporary working directories created for automated tests. +func (d *Dir) NormalizePath(given string) string { + // We need an absolute version of d.mainDir in order for our "Rel" + // result to be reliable. + absMain, err := filepath.Abs(d.mainDir) + if err != nil { + // Weird, but okay... + return filepath.Clean(given) + } + + if !filepath.IsAbs(given) { + given = filepath.Join(absMain, given) + } + + ret, err := filepath.Rel(absMain, given) + if err != nil { + // It's not always possible to find a relative path. For example, + // the given path might be on an entirely separate volume + // (e.g. drive letter or network share) on a Windows system, which + // always requires an absolute path. + return filepath.Clean(given) + } + + return ret +} diff --git a/pkg/command/workdir/plugin_dirs.go b/pkg/command/workdir/plugin_dirs.go new file mode 100644 index 00000000000..13db2d9f029 --- /dev/null +++ b/pkg/command/workdir/plugin_dirs.go @@ -0,0 +1,87 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package workdir + +import ( + "encoding/json" + "os" + "path/filepath" +) + +const PluginPathFilename = "plugin_path" + +// ProviderLocalCacheDir returns the directory we'll use as the +// working-directory-specific local cache of providers. +// +// The provider installer's job is to make sure that all providers needed for +// a particular working directory are available in this cache directory. No +// other component may write here, and in particular a Dir object itself +// never reads or writes into this directory, instead just delegating all of +// that responsibility to other components. +// +// Typically, the caller will ultimately pass the result of this method either +// directly or indirectly into providercache.NewDir, to get an object +// responsible for managing the contents. +func (d *Dir) ProviderLocalCacheDir() string { + return filepath.Join(d.dataDir, "providers") +} + +// ForcedPluginDirs returns a list of directories to use to find plugins, +// instead of the default locations. +// +// Returns an zero-length list and no error in the normal case where there +// are no overridden search directories. If ForcedPluginDirs returns a +// non-empty list with no errors then the result totally replaces the default +// search directories. +func (d *Dir) ForcedPluginDirs() ([]string, error) { + raw, err := os.ReadFile(filepath.Join(d.dataDir, PluginPathFilename)) + if os.IsNotExist(err) { + return nil, nil + } + + if err != nil { + return nil, err + } + + var pluginPath []string + if err := json.Unmarshal(raw, &pluginPath); err != nil { + return nil, err + } + return pluginPath, nil +} + +// SetForcedPluginDirs records an overridden list of directories to search +// to find plugins, instead of the default locations. See ForcePluginDirs +// for more information. +// +// Pass a zero-length list to deactivate forced plugin directories altogether, +// thus allowing the working directory to return to using the default +// search directories. +func (d *Dir) SetForcedPluginDirs(dirs []string) error { + + filePath := filepath.Join(d.dataDir, PluginPathFilename) + switch { + case len(dirs) == 0: + err := os.Remove(filePath) + if !os.IsNotExist(err) { + return err + } + return nil + default: + // We'll ignore errors from this one, because if we fail to create + // the directory then we'll fail to create the file below too, + // and that subsequent error will more directly reflect what we + // are trying to do here. + d.ensureDataDir() + + raw, err := json.MarshalIndent(dirs, "", " ") + if err != nil { + return err + } + + return os.WriteFile(filePath, raw, 0644) + } +} diff --git a/pkg/command/workdir/plugin_dirs_test.go b/pkg/command/workdir/plugin_dirs_test.go new file mode 100644 index 00000000000..0e02919269e --- /dev/null +++ b/pkg/command/workdir/plugin_dirs_test.go @@ -0,0 +1,60 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package workdir + +import ( + "path/filepath" + "testing" + + "github.com/google/go-cmp/cmp" +) + +func TestDirForcedPluginDirs(t *testing.T) { + tmpDir := t.TempDir() + + dir := NewDir(tmpDir) + // We'll use the default convention of a data dir nested inside the + // working directory, so we don't need to override anything on "dir". + + want := []string(nil) + got, err := dir.ForcedPluginDirs() + if err != nil { + t.Fatal(err) + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong initial settings\n%s", diff) + } + + fakeDir1 := filepath.Join(tmpDir, "boop1") + fakeDir2 := filepath.Join(tmpDir, "boop2") + err = dir.SetForcedPluginDirs([]string{fakeDir1, fakeDir2}) + if err != nil { + t.Fatal(err) + } + + want = []string{fakeDir1, fakeDir2} + got, err = dir.ForcedPluginDirs() + if err != nil { + t.Fatal(err) + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong updated settings\n%s", diff) + } + + err = dir.SetForcedPluginDirs(nil) + if err != nil { + t.Fatal(err) + } + + want = nil + got, err = dir.ForcedPluginDirs() + if err != nil { + t.Fatal(err) + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong final settings, after reverting back to defaults\n%s", diff) + } +} diff --git a/pkg/command/workspace_command.go b/pkg/command/workspace_command.go new file mode 100644 index 00000000000..2e828234e81 --- /dev/null +++ b/pkg/command/workspace_command.go @@ -0,0 +1,130 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "net/url" + "strings" + + "github.com/mitchellh/cli" +) + +// WorkspaceCommand is a Command Implementation that manipulates workspaces, +// which allow multiple distinct states and variables from a single config. +type WorkspaceCommand struct { + Meta + LegacyName bool +} + +func (c *WorkspaceCommand) Run(args []string) int { + c.Meta.process(args) + envCommandShowWarning(c.Ui, c.LegacyName) + + cmdFlags := c.Meta.extendedFlagSet("workspace") + cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } + + return cli.RunResultHelp +} + +func (c *WorkspaceCommand) Help() string { + helpText := ` +Usage: tofu [global options] workspace + + new, list, show, select and delete OpenTofu workspaces. + +` + return strings.TrimSpace(helpText) +} + +func (c *WorkspaceCommand) Synopsis() string { + return "Workspace management" +} + +// validWorkspaceName returns true is this name is valid to use as a workspace name. +// Since most named states are accessed via a filesystem path or URL, check if +// escaping the name would be required. +func validWorkspaceName(name string) bool { + return name == url.PathEscape(name) +} + +func envCommandShowWarning(ui cli.Ui, show bool) { + if !show { + return + } + + ui.Warn(`Warning: the "tofu env" family of commands is deprecated. + +"Workspace" is now the preferred term for what earlier OpenTofu versions +called "environment", to reduce ambiguity caused by the latter term colliding +with other concepts. + +The "tofu workspace" commands should be used instead. "tofu env" +will be removed in a future OpenTofu version. +`) +} + +const ( + envExists = `Workspace %q already exists` + + envDoesNotExist = ` +Workspace %q doesn't exist. + +You can create this workspace with the "new" subcommand +or include the "-or-create" flag with the "select" subcommand.` + + envChanged = `[reset][green]Switched to workspace %q.` + + envCreated = ` +[reset][green][bold]Created and switched to workspace %q![reset][green] + +You're now on a new, empty workspace. Workspaces isolate their state, +so if you run "tofu plan" OpenTofu will not see any existing state +for this configuration. +` + + envDeleted = `[reset][green]Deleted workspace %q!` + + envWarnNotEmpty = `[reset][yellow]WARNING: %q was non-empty. +The resources managed by the deleted workspace may still exist, +but are no longer manageable by OpenTofu since the state has +been deleted. +` + + envDelCurrent = ` +Workspace %[1]q is your active workspace. + +You cannot delete the currently active workspace. Please switch +to another workspace and try again. +` + + envInvalidName = ` +The workspace name %q is not allowed. The name must contain only URL safe +characters, and no path separators. +` + + envIsOverriddenNote = ` + +The active workspace is being overridden using the TF_WORKSPACE environment +variable. +` + + envIsOverriddenSelectError = ` +The selected workspace is currently overridden using the TF_WORKSPACE +environment variable. + +To select a new workspace, either update this environment variable or unset +it and then run this command again. +` + + envIsOverriddenNewError = ` +The workspace is currently overridden using the TF_WORKSPACE environment +variable. You cannot create a new workspace when using this setting. + +To create a new workspace, either unset this environment variable or update it +to match the workspace name you are trying to create, and then run this command +again. +` +) diff --git a/pkg/command/workspace_command_test.go b/pkg/command/workspace_command_test.go new file mode 100644 index 00000000000..40e5377fc5d --- /dev/null +++ b/pkg/command/workspace_command_test.go @@ -0,0 +1,470 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "os" + "path/filepath" + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/backend" + "github.com/kubegems/opentofu/pkg/backend/local" + "github.com/kubegems/opentofu/pkg/backend/remote-state/inmem" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "github.com/mitchellh/cli" + + legacy "github.com/kubegems/opentofu/pkg/legacy/tofu" +) + +func TestWorkspace_createAndChange(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + os.MkdirAll(td, 0755) + defer testChdir(t, td)() + + newCmd := &WorkspaceNewCommand{} + + current, _ := newCmd.Workspace() + if current != backend.DefaultStateName { + t.Fatal("current workspace should be 'default'") + } + + args := []string{"test"} + ui := new(cli.MockUi) + view, _ := testView(t) + newCmd.Meta = Meta{Ui: ui, View: view} + if code := newCmd.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter) + } + + current, _ = newCmd.Workspace() + if current != "test" { + t.Fatalf("current workspace should be 'test', got %q", current) + } + + selCmd := &WorkspaceSelectCommand{} + args = []string{backend.DefaultStateName} + ui = new(cli.MockUi) + selCmd.Meta = Meta{Ui: ui, View: view} + if code := selCmd.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter) + } + + current, _ = newCmd.Workspace() + if current != backend.DefaultStateName { + t.Fatal("current workspace should be 'default'") + } + +} + +// Create some workspaces and test the list output. +// This also ensures we switch to the correct env after each call +func TestWorkspace_createAndList(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + os.MkdirAll(td, 0755) + defer testChdir(t, td)() + + // make sure a vars file doesn't interfere + err := os.WriteFile( + DefaultVarsFilename, + []byte(`foo = "bar"`), + 0644, + ) + if err != nil { + t.Fatal(err) + } + + envs := []string{"test_a", "test_b", "test_c"} + + // create multiple workspaces + for _, env := range envs { + ui := new(cli.MockUi) + view, _ := testView(t) + newCmd := &WorkspaceNewCommand{ + Meta: Meta{Ui: ui, View: view}, + } + if code := newCmd.Run([]string{env}); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter) + } + } + + listCmd := &WorkspaceListCommand{} + ui := new(cli.MockUi) + view, _ := testView(t) + listCmd.Meta = Meta{Ui: ui, View: view} + + if code := listCmd.Run(nil); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter) + } + + actual := strings.TrimSpace(ui.OutputWriter.String()) + expected := "default\n test_a\n test_b\n* test_c" + + if actual != expected { + t.Fatalf("\nexpected: %q\nactual: %q", expected, actual) + } +} + +// Create some workspaces and test the show output. +func TestWorkspace_createAndShow(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + os.MkdirAll(td, 0755) + defer testChdir(t, td)() + + // make sure a vars file doesn't interfere + err := os.WriteFile( + DefaultVarsFilename, + []byte(`foo = "bar"`), + 0644, + ) + if err != nil { + t.Fatal(err) + } + + // make sure current workspace show outputs "default" + showCmd := &WorkspaceShowCommand{} + ui := new(cli.MockUi) + view, _ := testView(t) + showCmd.Meta = Meta{Ui: ui, View: view} + + if code := showCmd.Run(nil); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter) + } + + actual := strings.TrimSpace(ui.OutputWriter.String()) + expected := "default" + + if actual != expected { + t.Fatalf("\nexpected: %q\nactual: %q", expected, actual) + } + + newCmd := &WorkspaceNewCommand{} + + env := []string{"test_a"} + + // create test_a workspace + ui = new(cli.MockUi) + newCmd.Meta = Meta{Ui: ui, View: view} + if code := newCmd.Run(env); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter) + } + + selCmd := &WorkspaceSelectCommand{} + ui = new(cli.MockUi) + selCmd.Meta = Meta{Ui: ui, View: view} + if code := selCmd.Run(env); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter) + } + + showCmd = &WorkspaceShowCommand{} + ui = new(cli.MockUi) + showCmd.Meta = Meta{Ui: ui, View: view} + + if code := showCmd.Run(nil); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter) + } + + actual = strings.TrimSpace(ui.OutputWriter.String()) + expected = "test_a" + + if actual != expected { + t.Fatalf("\nexpected: %q\nactual: %q", expected, actual) + } +} + +// Don't allow names that aren't URL safe +func TestWorkspace_createInvalid(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + os.MkdirAll(td, 0755) + defer testChdir(t, td)() + + envs := []string{"test_a*", "test_b/foo", "../../../test_c", "好_d"} + + // create multiple workspaces + for _, env := range envs { + ui := new(cli.MockUi) + view, _ := testView(t) + newCmd := &WorkspaceNewCommand{ + Meta: Meta{Ui: ui, View: view}, + } + if code := newCmd.Run([]string{env}); code == 0 { + t.Fatalf("expected failure: \n%s", ui.OutputWriter) + } + } + + // list workspaces to make sure none were created + listCmd := &WorkspaceListCommand{} + ui := new(cli.MockUi) + view, _ := testView(t) + listCmd.Meta = Meta{Ui: ui, View: view} + + if code := listCmd.Run(nil); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter) + } + + actual := strings.TrimSpace(ui.OutputWriter.String()) + expected := "* default" + + if actual != expected { + t.Fatalf("\nexpected: %q\nactual: %q", expected, actual) + } +} + +func TestWorkspace_createWithState(t *testing.T) { + td := t.TempDir() + testCopyDir(t, testFixturePath("inmem-backend"), td) + defer testChdir(t, td)() + defer inmem.Reset() + + // init the backend + ui := new(cli.MockUi) + view, _ := testView(t) + initCmd := &InitCommand{ + Meta: Meta{Ui: ui, View: view}, + } + if code := initCmd.Run([]string{}); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + originalState := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + + err := statemgr.WriteAndPersist(statemgr.NewFilesystem("test.tfstate", encryption.StateEncryptionDisabled()), originalState, nil) + if err != nil { + t.Fatal(err) + } + + workspace := "test_workspace" + + args := []string{"-state", "test.tfstate", workspace} + ui = new(cli.MockUi) + newCmd := &WorkspaceNewCommand{ + Meta: Meta{Ui: ui, View: view}, + } + if code := newCmd.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter) + } + + newPath := filepath.Join(local.DefaultWorkspaceDir, "test", DefaultStateFilename) + envState := statemgr.NewFilesystem(newPath, encryption.StateEncryptionDisabled()) + err = envState.RefreshState() + if err != nil { + t.Fatal(err) + } + + b := backend.TestBackendConfig(t, inmem.New(encryption.StateEncryptionDisabled()), nil) + sMgr, err := b.StateMgr(workspace) + if err != nil { + t.Fatal(err) + } + + newState := sMgr.State() + + if got, want := newState.String(), originalState.String(); got != want { + t.Fatalf("states not equal\ngot: %s\nwant: %s", got, want) + } +} + +func TestWorkspace_delete(t *testing.T) { + td := t.TempDir() + os.MkdirAll(td, 0755) + defer testChdir(t, td)() + + // create the workspace directories + if err := os.MkdirAll(filepath.Join(local.DefaultWorkspaceDir, "test"), 0755); err != nil { + t.Fatal(err) + } + + // create the workspace file + if err := os.MkdirAll(DefaultDataDir, 0755); err != nil { + t.Fatal(err) + } + if err := os.WriteFile(filepath.Join(DefaultDataDir, local.DefaultWorkspaceFile), []byte("test"), 0644); err != nil { + t.Fatal(err) + } + + ui := new(cli.MockUi) + view, _ := testView(t) + delCmd := &WorkspaceDeleteCommand{ + Meta: Meta{Ui: ui, View: view}, + } + + current, _ := delCmd.Workspace() + if current != "test" { + t.Fatal("wrong workspace:", current) + } + + // we can't delete our current workspace + args := []string{"test"} + if code := delCmd.Run(args); code == 0 { + t.Fatal("expected error deleting current workspace") + } + + // change back to default + if err := delCmd.SetWorkspace(backend.DefaultStateName); err != nil { + t.Fatal(err) + } + + // try the delete again + ui = new(cli.MockUi) + delCmd.Meta.Ui = ui + if code := delCmd.Run(args); code != 0 { + t.Fatalf("error deleting workspace: %s", ui.ErrorWriter) + } + + current, _ = delCmd.Workspace() + if current != backend.DefaultStateName { + t.Fatalf("wrong workspace: %q", current) + } +} + +func TestWorkspace_deleteInvalid(t *testing.T) { + td := t.TempDir() + os.MkdirAll(td, 0755) + defer testChdir(t, td)() + + // choose an invalid workspace name + workspace := "test workspace" + path := filepath.Join(local.DefaultWorkspaceDir, workspace) + + // create the workspace directories + if err := os.MkdirAll(path, 0755); err != nil { + t.Fatal(err) + } + + ui := new(cli.MockUi) + view, _ := testView(t) + delCmd := &WorkspaceDeleteCommand{ + Meta: Meta{Ui: ui, View: view}, + } + + // delete the workspace + if code := delCmd.Run([]string{workspace}); code != 0 { + t.Fatalf("error deleting workspace: %s", ui.ErrorWriter) + } + + if _, err := os.Stat(path); err == nil { + t.Fatalf("should have deleted workspace, but %s still exists", path) + } else if !os.IsNotExist(err) { + t.Fatalf("unexpected error for workspace path: %s", err) + } +} + +func TestWorkspace_deleteWithState(t *testing.T) { + td := t.TempDir() + os.MkdirAll(td, 0755) + defer testChdir(t, td)() + + // create the workspace directories + if err := os.MkdirAll(filepath.Join(local.DefaultWorkspaceDir, "test"), 0755); err != nil { + t.Fatal(err) + } + + // create a non-empty state + originalState := &legacy.State{ + Modules: []*legacy.ModuleState{ + { + Path: []string{"root"}, + Resources: map[string]*legacy.ResourceState{ + "test_instance.foo": { + Type: "test_instance", + Primary: &legacy.InstanceState{ + ID: "bar", + }, + }, + }, + }, + }, + } + + f, err := os.Create(filepath.Join(local.DefaultWorkspaceDir, "test", "terraform.tfstate")) + if err != nil { + t.Fatal(err) + } + defer f.Close() + if err := legacy.WriteState(originalState, f); err != nil { + t.Fatal(err) + } + + ui := cli.NewMockUi() + view, _ := testView(t) + delCmd := &WorkspaceDeleteCommand{ + Meta: Meta{Ui: ui, View: view}, + } + args := []string{"test"} + if code := delCmd.Run(args); code == 0 { + t.Fatalf("expected failure without -force.\noutput: %s", ui.OutputWriter) + } + gotStderr := ui.ErrorWriter.String() + if want, got := `Workspace "test" is currently tracking the following resource instances`, gotStderr; !strings.Contains(got, want) { + t.Errorf("missing expected error message\nwant substring: %s\ngot:\n%s", want, got) + } + if want, got := `- test_instance.foo`, gotStderr; !strings.Contains(got, want) { + t.Errorf("error message doesn't mention the remaining instance\nwant substring: %s\ngot:\n%s", want, got) + } + + ui = new(cli.MockUi) + delCmd.Meta.Ui = ui + + args = []string{"-force", "test"} + if code := delCmd.Run(args); code != 0 { + t.Fatalf("failure: %s", ui.ErrorWriter) + } + + if _, err := os.Stat(filepath.Join(local.DefaultWorkspaceDir, "test")); !os.IsNotExist(err) { + t.Fatal("env 'test' still exists!") + } +} + +func TestWorkspace_selectWithOrCreate(t *testing.T) { + // Create a temporary working directory that is empty + td := t.TempDir() + os.MkdirAll(td, 0755) + defer testChdir(t, td)() + + selectCmd := &WorkspaceSelectCommand{} + + current, _ := selectCmd.Workspace() + if current != backend.DefaultStateName { + t.Fatal("current workspace should be 'default'") + } + + args := []string{"-or-create", "test"} + ui := new(cli.MockUi) + view, _ := testView(t) + selectCmd.Meta = Meta{Ui: ui, View: view} + if code := selectCmd.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter) + } + + current, _ = selectCmd.Workspace() + if current != "test" { + t.Fatalf("current workspace should be 'test', got %q", current) + } + +} diff --git a/pkg/command/workspace_delete.go b/pkg/command/workspace_delete.go new file mode 100644 index 00000000000..19575179be1 --- /dev/null +++ b/pkg/command/workspace_delete.go @@ -0,0 +1,253 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + "time" + + "github.com/mitchellh/cli" + "github.com/posener/complete" + + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/command/clistate" + "github.com/kubegems/opentofu/pkg/command/views" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +type WorkspaceDeleteCommand struct { + Meta + LegacyName bool +} + +func (c *WorkspaceDeleteCommand) Run(args []string) int { + args = c.Meta.process(args) + envCommandShowWarning(c.Ui, c.LegacyName) + + var force bool + var stateLock bool + var stateLockTimeout time.Duration + cmdFlags := c.Meta.defaultFlagSet("workspace delete") + c.Meta.varFlagSet(cmdFlags) + cmdFlags.BoolVar(&force, "force", false, "force removal of a non-empty workspace") + cmdFlags.BoolVar(&stateLock, "lock", true, "lock state") + cmdFlags.DurationVar(&stateLockTimeout, "lock-timeout", 0, "lock timeout") + cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } + if err := cmdFlags.Parse(args); err != nil { + c.Ui.Error(fmt.Sprintf("Error parsing command-line flags: %s\n", err.Error())) + return 1 + } + + args = cmdFlags.Args() + if len(args) != 1 { + c.Ui.Error("Expected a single argument: NAME.\n") + return cli.RunResultHelp + } + + configPath, err := modulePath(args[1:]) + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + + var diags tfdiags.Diagnostics + + backendConfig, backendDiags := c.loadBackendConfig(configPath) + diags = diags.Append(backendDiags) + if diags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // Load the encryption configuration + enc, encDiags := c.EncryptionFromPath(configPath) + diags = diags.Append(encDiags) + if encDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // Load the backend + b, backendDiags := c.Backend(&BackendOpts{ + Config: backendConfig, + }, enc.State()) + diags = diags.Append(backendDiags) + if backendDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // This command will not write state + c.ignoreRemoteVersionConflict(b) + + workspaces, err := b.Workspaces() + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + + workspace := args[0] + exists := false + for _, ws := range workspaces { + if workspace == ws { + exists = true + break + } + } + + if !exists { + c.Ui.Error(fmt.Sprintf(strings.TrimSpace(envDoesNotExist), workspace)) + return 1 + } + + currentWorkspace, err := c.Workspace() + if err != nil { + c.Ui.Error(fmt.Sprintf("Error selecting workspace: %s", err)) + return 1 + } + if workspace == currentWorkspace { + c.Ui.Error(fmt.Sprintf(strings.TrimSpace(envDelCurrent), workspace)) + return 1 + } + + // we need the actual state to see if it's empty + stateMgr, err := b.StateMgr(workspace) + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + + var stateLocker clistate.Locker + if stateLock { + stateLocker = clistate.NewLocker(c.stateLockTimeout, views.NewStateLocker(arguments.ViewHuman, c.View)) + if diags := stateLocker.Lock(stateMgr, "state-replace-provider"); diags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + } else { + stateLocker = clistate.NewNoopLocker() + } + + if err := stateMgr.RefreshState(); err != nil { + // We need to release the lock before exit + stateLocker.Unlock() + c.Ui.Error(err.Error()) + return 1 + } + + hasResources := stateMgr.State().HasManagedResourceInstanceObjects() + + if hasResources && !force { + // We'll collect a list of what's being managed here as extra context + // for the message. + var buf strings.Builder + for _, obj := range stateMgr.State().AllResourceInstanceObjectAddrs() { + if obj.DeposedKey == states.NotDeposed { + fmt.Fprintf(&buf, "\n - %s", obj.Instance.String()) + } else { + fmt.Fprintf(&buf, "\n - %s (deposed object %s)", obj.Instance.String(), obj.DeposedKey) + } + } + + // We need to release the lock before exit + stateLocker.Unlock() + + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Workspace is not empty", + fmt.Sprintf( + "Workspace %q is currently tracking the following resource instances:%s\n\nDeleting this workspace would cause OpenTofu to lose track of any associated remote objects, which would then require you to delete them manually outside of OpenTofu. You should destroy these objects with OpenTofu before deleting the workspace.\n\nIf you want to delete this workspace anyway, and have OpenTofu forget about these managed objects, use the -force option to disable this safety check.", + workspace, buf.String(), + ), + )) + c.showDiagnostics(diags) + return 1 + } + + // We need to release the lock just before deleting the state, in case + // the backend can't remove the resource while holding the lock. This + // is currently true for Windows local files. + // + // TODO: While there is little safety in locking while deleting the + // state, it might be nice to be able to coordinate processes around + // state deletion, i.e. in a CI environment. Adding Delete() as a + // required method of States would allow the removal of the resource to + // be delegated from the Backend to the State itself. + stateLocker.Unlock() + + err = b.DeleteWorkspace(workspace, force) + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + + c.Ui.Output( + c.Colorize().Color( + fmt.Sprintf(envDeleted, workspace), + ), + ) + + if hasResources { + c.Ui.Output( + c.Colorize().Color( + fmt.Sprintf(envWarnNotEmpty, workspace), + ), + ) + } + + return 0 +} + +func (c *WorkspaceDeleteCommand) AutocompleteArgs() complete.Predictor { + return completePredictSequence{ + c.completePredictWorkspaceName(), + complete.PredictDirs(""), + } +} + +func (c *WorkspaceDeleteCommand) AutocompleteFlags() complete.Flags { + return complete.Flags{ + "-force": complete.PredictNothing, + } +} + +func (c *WorkspaceDeleteCommand) Help() string { + helpText := ` +Usage: tofu [global options] workspace delete [options] NAME + + Delete a OpenTofu workspace + + +Options: + + -force Remove a workspace even if it is managing resources. + OpenTofu can no longer track or manage the workspace's + infrastructure. + + -lock=false Don't hold a state lock during the operation. This is + dangerous if others might concurrently run commands + against the same workspace. + + -lock-timeout=0s Duration to retry a state lock. + + -var 'foo=bar' Set a value for one of the input variables in the root + module of the configuration. Use this option more than + once to set more than one variable. + + -var-file=filename Load variable values from the given file, in addition + to the default files terraform.tfvars and *.auto.tfvars. + Use this option more than once to include more than one + variables file. + +` + return strings.TrimSpace(helpText) +} + +func (c *WorkspaceDeleteCommand) Synopsis() string { + return "Delete a workspace" +} diff --git a/pkg/command/workspace_list.go b/pkg/command/workspace_list.go new file mode 100644 index 00000000000..5f0c8cb2db6 --- /dev/null +++ b/pkg/command/workspace_list.go @@ -0,0 +1,128 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "bytes" + "fmt" + "strings" + + "github.com/posener/complete" + + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +type WorkspaceListCommand struct { + Meta + LegacyName bool +} + +func (c *WorkspaceListCommand) Run(args []string) int { + args = c.Meta.process(args) + envCommandShowWarning(c.Ui, c.LegacyName) + + cmdFlags := c.Meta.defaultFlagSet("workspace list") + c.Meta.varFlagSet(cmdFlags) + cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } + if err := cmdFlags.Parse(args); err != nil { + c.Ui.Error(fmt.Sprintf("Error parsing command-line flags: %s\n", err.Error())) + return 1 + } + + args = cmdFlags.Args() + configPath, err := modulePath(args) + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + + // Load the encryption configuration + enc, encDiags := c.EncryptionFromPath(configPath) + if encDiags.HasErrors() { + c.showDiagnostics(encDiags) + return 1 + } + + var diags tfdiags.Diagnostics + + backendConfig, backendDiags := c.loadBackendConfig(configPath) + diags = diags.Append(backendDiags) + if diags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // Load the backend + b, backendDiags := c.Backend(&BackendOpts{ + Config: backendConfig, + }, enc.State()) + diags = diags.Append(backendDiags) + if backendDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // This command will not write state + c.ignoreRemoteVersionConflict(b) + + states, err := b.Workspaces() + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + + env, isOverridden := c.WorkspaceOverridden() + + var out bytes.Buffer + for _, s := range states { + if s == env { + out.WriteString("* ") + } else { + out.WriteString(" ") + } + out.WriteString(s + "\n") + } + + c.Ui.Output(out.String()) + + if isOverridden { + c.Ui.Output(envIsOverriddenNote) + } + + return 0 +} + +func (c *WorkspaceListCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictDirs("") +} + +func (c *WorkspaceListCommand) AutocompleteFlags() complete.Flags { + return nil +} + +func (c *WorkspaceListCommand) Help() string { + helpText := ` +Usage: tofu [global options] workspace list [options] + + List OpenTofu workspaces. + +Options: + + -var 'foo=bar' Set a value for one of the input variables in the root + module of the configuration. Use this option more than + once to set more than one variable. + + -var-file=filename Load variable values from the given file, in addition + to the default files terraform.tfvars and *.auto.tfvars. + Use this option more than once to include more than one + variables file. +` + return strings.TrimSpace(helpText) +} + +func (c *WorkspaceListCommand) Synopsis() string { + return "List Workspaces" +} diff --git a/pkg/command/workspace_new.go b/pkg/command/workspace_new.go new file mode 100644 index 00000000000..ddff94a382c --- /dev/null +++ b/pkg/command/workspace_new.go @@ -0,0 +1,228 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "os" + "strings" + "time" + + "github.com/mitchellh/cli" + "github.com/posener/complete" + + "github.com/kubegems/opentofu/pkg/command/arguments" + "github.com/kubegems/opentofu/pkg/command/clistate" + "github.com/kubegems/opentofu/pkg/command/views" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/states/statefile" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +type WorkspaceNewCommand struct { + Meta + LegacyName bool +} + +func (c *WorkspaceNewCommand) Run(args []string) int { + args = c.Meta.process(args) + envCommandShowWarning(c.Ui, c.LegacyName) + + var stateLock bool + var stateLockTimeout time.Duration + var statePath string + cmdFlags := c.Meta.defaultFlagSet("workspace new") + c.Meta.varFlagSet(cmdFlags) + cmdFlags.BoolVar(&stateLock, "lock", true, "lock state") + cmdFlags.DurationVar(&stateLockTimeout, "lock-timeout", 0, "lock timeout") + cmdFlags.StringVar(&statePath, "state", "", "tofu state file") + cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } + if err := cmdFlags.Parse(args); err != nil { + c.Ui.Error(fmt.Sprintf("Error parsing command-line flags: %s\n", err.Error())) + return 1 + } + + args = cmdFlags.Args() + if len(args) != 1 { + c.Ui.Error("Expected a single argument: NAME.\n") + return cli.RunResultHelp + } + + workspace := args[0] + + if !validWorkspaceName(workspace) { + c.Ui.Error(fmt.Sprintf(envInvalidName, workspace)) + return 1 + } + + // You can't ask to create a workspace when you're overriding the + // workspace name to be something different. + if current, isOverridden := c.WorkspaceOverridden(); current != workspace && isOverridden { + c.Ui.Error(envIsOverriddenNewError) + return 1 + } + + configPath, err := modulePath(args[1:]) + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + + var diags tfdiags.Diagnostics + + backendConfig, backendDiags := c.loadBackendConfig(configPath) + diags = diags.Append(backendDiags) + if diags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // Load the encryption configuration + enc, encDiags := c.EncryptionFromPath(configPath) + diags = diags.Append(encDiags) + if encDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // Load the backend + b, backendDiags := c.Backend(&BackendOpts{ + Config: backendConfig, + }, enc.State()) + diags = diags.Append(backendDiags) + if backendDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // This command will not write state + c.ignoreRemoteVersionConflict(b) + + workspaces, err := b.Workspaces() + if err != nil { + c.Ui.Error(fmt.Sprintf("Failed to get configured named states: %s", err)) + return 1 + } + for _, ws := range workspaces { + if workspace == ws { + c.Ui.Error(fmt.Sprintf(envExists, workspace)) + return 1 + } + } + + _, err = b.StateMgr(workspace) + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + + // now set the current workspace locally + if err := c.SetWorkspace(workspace); err != nil { + c.Ui.Error(fmt.Sprintf("Error selecting new workspace: %s", err)) + return 1 + } + + c.Ui.Output(c.Colorize().Color(fmt.Sprintf( + strings.TrimSpace(envCreated), workspace))) + + if statePath == "" { + // if we're not loading a state, then we're done + return 0 + } + + // load the new Backend state + stateMgr, err := b.StateMgr(workspace) + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + + if stateLock { + stateLocker := clistate.NewLocker(c.stateLockTimeout, views.NewStateLocker(arguments.ViewHuman, c.View)) + if diags := stateLocker.Lock(stateMgr, "workspace-new"); diags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + defer func() { + if diags := stateLocker.Unlock(); diags.HasErrors() { + c.showDiagnostics(diags) + } + }() + } + + // read the existing state file + f, err := os.Open(statePath) + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + + stateFile, err := statefile.Read(f, encryption.StateEncryptionDisabled()) // Assume given statefile is not encrypted + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + + // save the existing state in the new Backend. + err = stateMgr.WriteState(stateFile.State) + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + err = stateMgr.PersistState(nil) + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + + return 0 +} + +func (c *WorkspaceNewCommand) AutocompleteArgs() complete.Predictor { + return completePredictSequence{ + complete.PredictAnything, + complete.PredictDirs(""), + } +} + +func (c *WorkspaceNewCommand) AutocompleteFlags() complete.Flags { + return complete.Flags{ + "-state": complete.PredictFiles("*.tfstate"), + } +} + +func (c *WorkspaceNewCommand) Help() string { + helpText := ` +Usage: tofu [global options] workspace new [OPTIONS] NAME + + Create a new OpenTofu workspace. + +Options: + + -lock=false Don't hold a state lock during the operation. This is + dangerous if others might concurrently run commands + against the same workspace. + + -lock-timeout=0s Duration to retry a state lock. + + -state=path Copy an existing state file into the new workspace. + + + -var 'foo=bar' Set a value for one of the input variables in the root + module of the configuration. Use this option more than + once to set more than one variable. + + -var-file=filename Load variable values from the given file, in addition + to the default files terraform.tfvars and *.auto.tfvars. + Use this option more than once to include more than one + variables file. +` + return strings.TrimSpace(helpText) +} + +func (c *WorkspaceNewCommand) Synopsis() string { + return "Create a new workspace" +} diff --git a/pkg/command/workspace_select.go b/pkg/command/workspace_select.go new file mode 100644 index 00000000000..f24ffefed4d --- /dev/null +++ b/pkg/command/workspace_select.go @@ -0,0 +1,181 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" + + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +type WorkspaceSelectCommand struct { + Meta + LegacyName bool +} + +func (c *WorkspaceSelectCommand) Run(args []string) int { + args = c.Meta.process(args) + envCommandShowWarning(c.Ui, c.LegacyName) + + var orCreate bool + cmdFlags := c.Meta.defaultFlagSet("workspace select") + c.Meta.varFlagSet(cmdFlags) + cmdFlags.BoolVar(&orCreate, "or-create", false, "create workspace if it does not exist") + cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } + if err := cmdFlags.Parse(args); err != nil { + c.Ui.Error(fmt.Sprintf("Error parsing command-line flags: %s\n", err.Error())) + return 1 + } + + args = cmdFlags.Args() + if len(args) != 1 { + c.Ui.Error("Expected a single argument: NAME.\n") + return cli.RunResultHelp + } + + configPath, err := modulePath(args[1:]) + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + + var diags tfdiags.Diagnostics + + backendConfig, backendDiags := c.loadBackendConfig(configPath) + diags = diags.Append(backendDiags) + if diags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + current, isOverridden := c.WorkspaceOverridden() + if isOverridden { + c.Ui.Error(envIsOverriddenSelectError) + return 1 + } + + // Load the encryption configuration + enc, encDiags := c.EncryptionFromPath(configPath) + diags = diags.Append(encDiags) + if encDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // Load the backend + b, backendDiags := c.Backend(&BackendOpts{ + Config: backendConfig, + }, enc.State()) + diags = diags.Append(backendDiags) + if backendDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // This command will not write state + c.ignoreRemoteVersionConflict(b) + + name := args[0] + if !validWorkspaceName(name) { + c.Ui.Error(fmt.Sprintf(envInvalidName, name)) + return 1 + } + + states, err := b.Workspaces() + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + + if name == current { + // already using this workspace + return 0 + } + + found := false + for _, s := range states { + if name == s { + found = true + break + } + } + + var newState bool + + if !found { + if orCreate { + _, err = b.StateMgr(name) + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + newState = true + } else { + c.Ui.Error(fmt.Sprintf(envDoesNotExist, name)) + return 1 + } + } + + err = c.SetWorkspace(name) + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + + if newState { + c.Ui.Output(c.Colorize().Color(fmt.Sprintf( + strings.TrimSpace(envCreated), name))) + } else { + c.Ui.Output( + c.Colorize().Color( + fmt.Sprintf(envChanged, name), + ), + ) + } + + return 0 +} + +func (c *WorkspaceSelectCommand) AutocompleteArgs() complete.Predictor { + return completePredictSequence{ + c.completePredictWorkspaceName(), + complete.PredictDirs(""), + } +} + +func (c *WorkspaceSelectCommand) AutocompleteFlags() complete.Flags { + return nil +} + +func (c *WorkspaceSelectCommand) Help() string { + helpText := ` +Usage: tofu [global options] workspace select [options] NAME + + Select a different OpenTofu workspace. + +Options: + + -or-create=false Create the OpenTofu workspace if it doesn't exist. + + -var 'foo=bar' Set a value for one of the input variables in the root + module of the configuration. Use this option more than + once to set more than one variable. + + -var-file=filename Load variable values from the given file, in addition + to the default files terraform.tfvars and *.auto.tfvars. + Use this option more than once to include more than one + variables file. +` + return strings.TrimSpace(helpText) +} + +func (c *WorkspaceSelectCommand) Synopsis() string { + return "Select a workspace" +} diff --git a/pkg/command/workspace_show.go b/pkg/command/workspace_show.go new file mode 100644 index 00000000000..a93e426d9df --- /dev/null +++ b/pkg/command/workspace_show.go @@ -0,0 +1,57 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/posener/complete" +) + +type WorkspaceShowCommand struct { + Meta +} + +func (c *WorkspaceShowCommand) Run(args []string) int { + args = c.Meta.process(args) + cmdFlags := c.Meta.extendedFlagSet("workspace show") + cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } + if err := cmdFlags.Parse(args); err != nil { + c.Ui.Error(fmt.Sprintf("Error parsing command-line flags: %s\n", err.Error())) + return 1 + } + + workspace, err := c.Workspace() + if err != nil { + c.Ui.Error(fmt.Sprintf("Error selecting workspace: %s", err)) + return 1 + } + c.Ui.Output(workspace) + + return 0 +} + +func (c *WorkspaceShowCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictNothing +} + +func (c *WorkspaceShowCommand) AutocompleteFlags() complete.Flags { + return nil +} + +func (c *WorkspaceShowCommand) Help() string { + helpText := ` +Usage: tofu [global options] workspace show + + Show the name of the current workspace. +` + return strings.TrimSpace(helpText) +} + +func (c *WorkspaceShowCommand) Synopsis() string { + return "Show the name of the current workspace" +} diff --git a/pkg/communicator/communicator.go b/pkg/communicator/communicator.go new file mode 100644 index 00000000000..8792e016032 --- /dev/null +++ b/pkg/communicator/communicator.go @@ -0,0 +1,175 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package communicator + +import ( + "context" + "fmt" + "io" + "log" + "sync" + "sync/atomic" + "time" + + "github.com/kubegems/opentofu/pkg/communicator/remote" + "github.com/kubegems/opentofu/pkg/communicator/shared" + "github.com/kubegems/opentofu/pkg/communicator/ssh" + "github.com/kubegems/opentofu/pkg/communicator/winrm" + "github.com/kubegems/opentofu/pkg/provisioners" + "github.com/zclconf/go-cty/cty" +) + +// Communicator is an interface that must be implemented by all communicators +// used for any of the provisioners +type Communicator interface { + // Connect is used to set up the connection + Connect(provisioners.UIOutput) error + + // Disconnect is used to terminate the connection + Disconnect() error + + // Timeout returns the configured connection timeout + Timeout() time.Duration + + // ScriptPath returns the configured script path + ScriptPath() string + + // Start executes a remote command in a new session + Start(*remote.Cmd) error + + // Upload is used to upload a single file + Upload(string, io.Reader) error + + // UploadScript is used to upload a file as an executable script + UploadScript(string, io.Reader) error + + // UploadDir is used to upload a directory + UploadDir(string, string) error +} + +// New returns a configured Communicator or an error if the connection type is not supported +func New(v cty.Value) (Communicator, error) { + v, err := shared.ConnectionBlockSupersetSchema.CoerceValue(v) + if err != nil { + return nil, err + } + + typeVal := v.GetAttr("type") + connType := "" + if !typeVal.IsNull() { + connType = typeVal.AsString() + } + + switch connType { + case "ssh", "": // The default connection type is ssh, so if connType is empty use ssh + return ssh.New(v) + case "winrm": + return winrm.New(v) + default: + return nil, fmt.Errorf("connection type '%s' not supported", connType) + } +} + +// maxBackoffDelay is the maximum delay between retry attempts +var maxBackoffDelay = 20 * time.Second +var initialBackoffDelay = time.Second + +// in practice we want to abort the retry asap, but for tests we need to +// synchronize the return. +var retryTestWg *sync.WaitGroup + +// Fatal is an interface that error values can return to halt Retry +type Fatal interface { + FatalError() error +} + +// Retry retries the function f until it returns a nil error, a Fatal error, or +// the context expires. +func Retry(ctx context.Context, f func() error) error { + // container for atomic error value + type errWrap struct { + E error + } + + // Try the function in a goroutine + var errVal atomic.Value + doneCh := make(chan struct{}) + go func() { + if retryTestWg != nil { + defer retryTestWg.Done() + } + + defer close(doneCh) + + delay := time.Duration(0) + for { + // If our context ended, we want to exit right away. + select { + case <-ctx.Done(): + return + case <-time.After(delay): + } + + // Try the function call + err := f() + + // return if we have no error, or a FatalError + done := false + switch e := err.(type) { + case nil: + done = true + case Fatal: + err = e.FatalError() + done = true + } + + errVal.Store(errWrap{err}) + + if done { + return + } + + log.Printf("[WARN] retryable error: %v", err) + + delay *= 2 + + if delay == 0 { + delay = initialBackoffDelay + } + + if delay > maxBackoffDelay { + delay = maxBackoffDelay + } + + log.Printf("[INFO] sleeping for %s", delay) + } + }() + + // Wait for completion + select { + case <-ctx.Done(): + case <-doneCh: + } + + var lastErr error + // Check if we got an error executing + if ev, ok := errVal.Load().(errWrap); ok { + lastErr = ev.E + } + + // Check if we have a context error to check if we're interrupted or timeout + switch ctx.Err() { + case context.Canceled: + return fmt.Errorf("interrupted - last error: %w", lastErr) + case context.DeadlineExceeded: + return fmt.Errorf("timeout - last error: %w", lastErr) + } + + if lastErr != nil { + return lastErr + } + return nil +} diff --git a/pkg/communicator/communicator_mock.go b/pkg/communicator/communicator_mock.go new file mode 100644 index 00000000000..76163a274fb --- /dev/null +++ b/pkg/communicator/communicator_mock.go @@ -0,0 +1,111 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package communicator + +import ( + "bytes" + "fmt" + "io" + "strings" + "time" + + "github.com/kubegems/opentofu/pkg/communicator/remote" + "github.com/kubegems/opentofu/pkg/provisioners" +) + +// MockCommunicator is an implementation of Communicator that can be used for tests. +type MockCommunicator struct { + RemoteScriptPath string + Commands map[string]bool + Uploads map[string]string + UploadScripts map[string]string + UploadDirs map[string]string + CommandFunc func(*remote.Cmd) error + DisconnectFunc func() error + ConnTimeout time.Duration +} + +// Connect implementation of communicator.Communicator interface +func (c *MockCommunicator) Connect(o provisioners.UIOutput) error { + return nil +} + +// Disconnect implementation of communicator.Communicator interface +func (c *MockCommunicator) Disconnect() error { + if c.DisconnectFunc != nil { + return c.DisconnectFunc() + } + return nil +} + +// Timeout implementation of communicator.Communicator interface +func (c *MockCommunicator) Timeout() time.Duration { + if c.ConnTimeout != 0 { + return c.ConnTimeout + } + return time.Duration(5 * time.Second) +} + +// ScriptPath implementation of communicator.Communicator interface +func (c *MockCommunicator) ScriptPath() string { + return c.RemoteScriptPath +} + +// Start implementation of communicator.Communicator interface +func (c *MockCommunicator) Start(r *remote.Cmd) error { + r.Init() + + if c.CommandFunc != nil { + return c.CommandFunc(r) + } + + if !c.Commands[r.Command] { + return fmt.Errorf("Command not found!") + } + + r.SetExitStatus(0, nil) + + return nil +} + +// Upload implementation of communicator.Communicator interface +func (c *MockCommunicator) Upload(path string, input io.Reader) error { + f, ok := c.Uploads[path] + if !ok { + return fmt.Errorf("Path %q not found!", path) + } + + var buf bytes.Buffer + buf.ReadFrom(input) + content := strings.TrimSpace(buf.String()) + + f = strings.TrimSpace(f) + if f != content { + return fmt.Errorf("expected: %q\n\ngot: %q\n", f, content) + } + + return nil +} + +// UploadScript implementation of communicator.Communicator interface +func (c *MockCommunicator) UploadScript(path string, input io.Reader) error { + c.Uploads = c.UploadScripts + return c.Upload(path, input) +} + +// UploadDir implementation of communicator.Communicator interface +func (c *MockCommunicator) UploadDir(dst string, src string) error { + v, ok := c.UploadDirs[src] + if !ok { + return fmt.Errorf("Directory not found!") + } + + if v != dst { + return fmt.Errorf("expected: %q\n\ngot: %q\n", v, dst) + } + + return nil +} diff --git a/pkg/communicator/communicator_test.go b/pkg/communicator/communicator_test.go new file mode 100644 index 00000000000..50c832898f8 --- /dev/null +++ b/pkg/communicator/communicator_test.go @@ -0,0 +1,108 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package communicator + +import ( + "context" + "errors" + "io" + "net" + "sync" + "testing" + "time" + + "github.com/zclconf/go-cty/cty" +) + +func TestCommunicator_new(t *testing.T) { + cfg := map[string]cty.Value{ + "type": cty.StringVal("telnet"), + "host": cty.StringVal("127.0.0.1"), + } + + if _, err := New(cty.ObjectVal(cfg)); err == nil { + t.Fatalf("expected error with telnet") + } + + cfg["type"] = cty.StringVal("ssh") + if _, err := New(cty.ObjectVal(cfg)); err != nil { + t.Fatalf("err: %v", err) + } + + cfg["type"] = cty.StringVal("winrm") + if _, err := New(cty.ObjectVal(cfg)); err != nil { + t.Fatalf("err: %v", err) + } +} +func TestRetryFunc(t *testing.T) { + origMax := maxBackoffDelay + maxBackoffDelay = time.Second + origStart := initialBackoffDelay + initialBackoffDelay = 10 * time.Millisecond + + defer func() { + maxBackoffDelay = origMax + initialBackoffDelay = origStart + }() + + // succeed on the third try + errs := []error{io.EOF, &net.OpError{Err: errors.New("ERROR")}, nil} + count := 0 + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + err := Retry(ctx, func() error { + if count >= len(errs) { + return errors.New("failed to stop after nil error") + } + + err := errs[count] + count++ + + return err + }) + + if count != 3 { + t.Fatal("retry func should have been called 3 times") + } + + if err != nil { + t.Fatal(err) + } +} + +func TestRetryFuncBackoff(t *testing.T) { + origMax := maxBackoffDelay + maxBackoffDelay = time.Second + origStart := initialBackoffDelay + initialBackoffDelay = 100 * time.Millisecond + + retryTestWg = &sync.WaitGroup{} + retryTestWg.Add(1) + + defer func() { + maxBackoffDelay = origMax + initialBackoffDelay = origStart + retryTestWg = nil + }() + + count := 0 + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + Retry(ctx, func() error { + count++ + return io.EOF + }) + cancel() + retryTestWg.Wait() + + if count > 4 { + t.Fatalf("retry func failed to backoff. called %d times", count) + } +} diff --git a/pkg/communicator/remote/command.go b/pkg/communicator/remote/command.go new file mode 100644 index 00000000000..0ceb50f19e4 --- /dev/null +++ b/pkg/communicator/remote/command.go @@ -0,0 +1,101 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package remote + +import ( + "fmt" + "io" + "sync" +) + +// Cmd represents a remote command being prepared or run. +type Cmd struct { + // Command is the command to run remotely. This is executed as if + // it were a shell command, so you are expected to do any shell escaping + // necessary. + Command string + + // Stdin specifies the process's standard input. If Stdin is + // nil, the process reads from an empty bytes.Buffer. + Stdin io.Reader + + // Stdout and Stderr represent the process's standard output and + // error. + // + // If either is nil, it will be set to ioutil.Discard. + Stdout io.Writer + Stderr io.Writer + + // Once Wait returns, his will contain the exit code of the process. + exitStatus int + + // Internal fields + exitCh chan struct{} + + // err is used to store any error reported by the Communicator during + // execution. + err error + + // This thing is a mutex, lock when making modifications concurrently + sync.Mutex +} + +// Init must be called by the Communicator before executing the command. +func (c *Cmd) Init() { + c.Lock() + defer c.Unlock() + + c.exitCh = make(chan struct{}) +} + +// SetExitStatus stores the exit status of the remote command as well as any +// communicator related error. SetExitStatus then unblocks any pending calls +// to Wait. +// This should only be called by communicators executing the remote.Cmd. +func (c *Cmd) SetExitStatus(status int, err error) { + c.Lock() + defer c.Unlock() + + c.exitStatus = status + c.err = err + + close(c.exitCh) +} + +// Wait waits for the remote command to complete. +// Wait may return an error from the communicator, or an ExitError if the +// process exits with a non-zero exit status. +func (c *Cmd) Wait() error { + <-c.exitCh + + c.Lock() + defer c.Unlock() + + if c.err != nil || c.exitStatus != 0 { + return &ExitError{ + Command: c.Command, + ExitStatus: c.exitStatus, + Err: c.err, + } + } + + return nil +} + +// ExitError is returned by Wait to indicate and error executing the remote +// command, or a non-zero exit status. +type ExitError struct { + Command string + ExitStatus int + Err error +} + +func (e *ExitError) Error() string { + if e.Err != nil { + return fmt.Sprintf("error executing %q: %v", e.Command, e.Err) + } + return fmt.Sprintf("%q exit status: %d", e.Command, e.ExitStatus) +} diff --git a/pkg/communicator/remote/command_test.go b/pkg/communicator/remote/command_test.go new file mode 100644 index 00000000000..c90b07f1083 --- /dev/null +++ b/pkg/communicator/remote/command_test.go @@ -0,0 +1,6 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package remote diff --git a/pkg/communicator/shared/shared.go b/pkg/communicator/shared/shared.go new file mode 100644 index 00000000000..f9a80e64314 --- /dev/null +++ b/pkg/communicator/shared/shared.go @@ -0,0 +1,158 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package shared + +import ( + "fmt" + "net" + + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +// ConnectionBlockSupersetSchema is a schema representing the superset of all +// possible arguments for "connection" blocks across all supported connection +// types. +// +// This currently lives here because we've not yet updated our communicator +// subsystem to be aware of schema itself. Once that is done, we can remove +// this and use a type-specific schema from the communicator to validate +// exactly what is expected for a given connection type. +var ConnectionBlockSupersetSchema = &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + // Common attributes for both connection types + "host": { + Type: cty.String, + Required: true, + }, + "type": { + Type: cty.String, + Optional: true, + }, + "user": { + Type: cty.String, + Optional: true, + }, + "password": { + Type: cty.String, + Optional: true, + }, + "port": { + Type: cty.Number, + Optional: true, + }, + "timeout": { + Type: cty.String, + Optional: true, + }, + "script_path": { + Type: cty.String, + Optional: true, + }, + // For type=ssh only (enforced in ssh communicator) + "target_platform": { + Type: cty.String, + Optional: true, + }, + "private_key": { + Type: cty.String, + Optional: true, + }, + "certificate": { + Type: cty.String, + Optional: true, + }, + "host_key": { + Type: cty.String, + Optional: true, + }, + "agent": { + Type: cty.Bool, + Optional: true, + }, + "agent_identity": { + Type: cty.String, + Optional: true, + }, + "proxy_scheme": { + Type: cty.String, + Optional: true, + }, + "proxy_host": { + Type: cty.String, + Optional: true, + }, + "proxy_port": { + Type: cty.Number, + Optional: true, + }, + "proxy_user_name": { + Type: cty.String, + Optional: true, + }, + "proxy_user_password": { + Type: cty.String, + Optional: true, + }, + "bastion_host": { + Type: cty.String, + Optional: true, + }, + "bastion_host_key": { + Type: cty.String, + Optional: true, + }, + "bastion_port": { + Type: cty.Number, + Optional: true, + }, + "bastion_user": { + Type: cty.String, + Optional: true, + }, + "bastion_password": { + Type: cty.String, + Optional: true, + }, + "bastion_private_key": { + Type: cty.String, + Optional: true, + }, + "bastion_certificate": { + Type: cty.String, + Optional: true, + }, + + // For type=winrm only (enforced in winrm communicator) + "https": { + Type: cty.Bool, + Optional: true, + }, + "insecure": { + Type: cty.Bool, + Optional: true, + }, + "cacert": { + Type: cty.String, + Optional: true, + }, + "use_ntlm": { + Type: cty.Bool, + Optional: true, + }, + }, +} + +// IpFormat formats the IP correctly, so we don't provide IPv6 address in an IPv4 format during node communication. We return the ip parameter as is if it's an IPv4 address or a hostname. +func IpFormat(ip string) string { + ipObj := net.ParseIP(ip) + // Return the ip/host as is if it's either a hostname or an IPv4 address. + if ipObj == nil || ipObj.To4() != nil { + return ip + } + + return fmt.Sprintf("[%s]", ip) +} diff --git a/pkg/communicator/shared/shared_test.go b/pkg/communicator/shared/shared_test.go new file mode 100644 index 00000000000..67d0c0b53b3 --- /dev/null +++ b/pkg/communicator/shared/shared_test.go @@ -0,0 +1,31 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package shared + +import ( + "testing" +) + +func TestIpFormatting_Ipv4(t *testing.T) { + formatted := IpFormat("127.0.0.1") + if formatted != "127.0.0.1" { + t.Fatal("expected", "127.0.0.1", "got", formatted) + } +} + +func TestIpFormatting_Hostname(t *testing.T) { + formatted := IpFormat("example.com") + if formatted != "example.com" { + t.Fatal("expected", "example.com", "got", formatted) + } +} + +func TestIpFormatting_Ipv6(t *testing.T) { + formatted := IpFormat("::1") + if formatted != "[::1]" { + t.Fatal("expected", "[::1]", "got", formatted) + } +} diff --git a/pkg/communicator/ssh/communicator.go b/pkg/communicator/ssh/communicator.go new file mode 100644 index 00000000000..208cb8bbaec --- /dev/null +++ b/pkg/communicator/ssh/communicator.go @@ -0,0 +1,900 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ssh + +import ( + "bufio" + "bytes" + "context" + "errors" + "fmt" + "io" + "log" + "math/rand" + "net" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + "github.com/apparentlymart/go-shquot/shquot" + "github.com/kubegems/opentofu/pkg/communicator/remote" + "github.com/kubegems/opentofu/pkg/provisioners" + "github.com/zclconf/go-cty/cty" + "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/agent" + + _ "github.com/kubegems/opentofu/pkg/logging" +) + +const ( + // DefaultShebang is added at the top of a SSH script file + DefaultShebang = "#!/bin/sh\n" +) + +var ( + // randShared is a global random generator object that is shared. This must be + // shared since it is seeded by the current time and creating multiple can + // result in the same values. By using a shared RNG we assure different numbers + // per call. + randLock sync.Mutex + randShared *rand.Rand + + // enable ssh keeplive probes by default + keepAliveInterval = 2 * time.Second + + // max time to wait for for a KeepAlive response before considering the + // connection to be dead. + maxKeepAliveDelay = 120 * time.Second +) + +// Communicator represents the SSH communicator +type Communicator struct { + connInfo *connectionInfo + client *ssh.Client + config *sshConfig + conn net.Conn + cancelKeepAlive context.CancelFunc + + lock sync.Mutex +} + +type sshConfig struct { + // The configuration of the Go SSH connection + config *ssh.ClientConfig + + // connection returns a new connection. The current connection + // in use will be closed as part of the Close method, or in the + // case an error occurs. + connection func() (net.Conn, error) + + // noPty, if true, will not request a pty from the remote end. + noPty bool + + // sshAgent is a struct surrounding the agent.Agent client and the net.Conn + // to the SSH Agent. It is nil if no SSH agent is configured + sshAgent *sshAgent +} + +type fatalError struct { + error +} + +func (e fatalError) FatalError() error { + return e.error +} + +// New creates a new communicator implementation over SSH. +func New(v cty.Value) (*Communicator, error) { + connInfo, err := parseConnectionInfo(v) + if err != nil { + return nil, err + } + + config, err := prepareSSHConfig(connInfo) + if err != nil { + return nil, err + } + + // Set up the random number generator once. The seed value is the + // time multiplied by the PID. This can overflow the int64 but that + // is okay. We multiply by the PID in case we have multiple processes + // grabbing this at the same time. This is possible with OpenTofu and + // if we communicate to the same host at the same instance, we could + // overwrite the same files. Multiplying by the PID prevents this. + randLock.Lock() + defer randLock.Unlock() + if randShared == nil { + randShared = rand.New(rand.NewSource( + time.Now().UnixNano() * int64(os.Getpid()))) + } + + comm := &Communicator{ + connInfo: connInfo, + config: config, + } + + return comm, nil +} + +// Connect implementation of communicator.Communicator interface +func (c *Communicator) Connect(o provisioners.UIOutput) (err error) { + // Grab a lock so we can modify our internal attributes + c.lock.Lock() + defer c.lock.Unlock() + + if c.conn != nil { + c.conn.Close() + } + + // Set the conn and client to nil since we'll recreate it + c.conn = nil + c.client = nil + + if o != nil { + o.Output(fmt.Sprintf( + "Connecting to remote host via SSH...\n"+ + " Host: %s\n"+ + " User: %s\n"+ + " Password: %t\n"+ + " Private key: %t\n"+ + " Certificate: %t\n"+ + " SSH Agent: %t\n"+ + " Checking Host Key: %t\n"+ + " Target Platform: %s\n", + c.connInfo.Host, c.connInfo.User, + c.connInfo.Password != "", + c.connInfo.PrivateKey != "", + c.connInfo.Certificate != "", + c.connInfo.Agent, + c.connInfo.HostKey != "", + c.connInfo.TargetPlatform, + )) + + if c.connInfo.BastionHost != "" { + o.Output(fmt.Sprintf( + "Using configured bastion host...\n"+ + " Host: %s\n"+ + " User: %s\n"+ + " Password: %t\n"+ + " Private key: %t\n"+ + " Certificate: %t\n"+ + " SSH Agent: %t\n"+ + " Checking Host Key: %t", + c.connInfo.BastionHost, c.connInfo.BastionUser, + c.connInfo.BastionPassword != "", + c.connInfo.BastionPrivateKey != "", + c.connInfo.BastionCertificate != "", + c.connInfo.Agent, + c.connInfo.BastionHostKey != "", + )) + } + + if c.connInfo.ProxyHost != "" { + o.Output(fmt.Sprintf( + "Using configured proxy host...\n"+ + " ProxyHost: %s\n"+ + " ProxyPort: %d\n"+ + " ProxyUserName: %s\n"+ + " ProxyUserPassword: %t", + c.connInfo.ProxyHost, + c.connInfo.ProxyPort, + c.connInfo.ProxyUserName, + c.connInfo.ProxyUserPassword != "", + )) + } + } + + hostAndPort := fmt.Sprintf("%s:%d", c.connInfo.Host, c.connInfo.Port) + log.Printf("[DEBUG] Connecting to %s for SSH", hostAndPort) + c.conn, err = c.config.connection() + if err != nil { + // Explicitly set this to the REAL nil. Connection() can return + // a nil implementation of net.Conn which will make the + // "if c.conn == nil" check fail above. Read here for more information + // on this psychotic language feature: + // + // http://golang.org/doc/faq#nil_error + c.conn = nil + + log.Printf("[ERROR] connection error: %s", err) + return err + } + + log.Printf("[DEBUG] Connection established. Handshaking for user %v", c.connInfo.User) + sshConn, sshChan, req, err := ssh.NewClientConn(c.conn, hostAndPort, c.config.config) + if err != nil { + err = fmt.Errorf("SSH authentication failed (%s@%s): %w", c.connInfo.User, hostAndPort, err) + + // While in theory this should be a fatal error, some hosts may start + // the ssh service before it is properly configured, or before user + // authentication data is available. + // Log the error, and allow the provisioner to retry. + log.Printf("[WARN] %s", err) + return err + } + + c.client = ssh.NewClient(sshConn, sshChan, req) + + if c.config.sshAgent != nil { + log.Printf("[DEBUG] Telling SSH config to forward to agent") + if err := c.config.sshAgent.ForwardToAgent(c.client); err != nil { + return fatalError{err} + } + + log.Printf("[DEBUG] Setting up a session to request agent forwarding") + session, err := c.client.NewSession() + if err != nil { + return err + } + defer session.Close() + + err = agent.RequestAgentForwarding(session) + + if err == nil { + log.Printf("[INFO] agent forwarding enabled") + } else { + log.Printf("[WARN] error forwarding agent: %s", err) + } + } + + if err != nil { + return err + } + + if o != nil { + o.Output("Connected!") + } + + ctx, cancelKeepAlive := context.WithCancel(context.TODO()) + c.cancelKeepAlive = cancelKeepAlive + + // Start a keepalive goroutine to help maintain the connection for + // long-running commands. + log.Printf("[DEBUG] starting ssh KeepAlives") + + // We want a local copy of the ssh client pointer, so that a reconnect + // doesn't race with the running keep-alive loop. + sshClient := c.client + go func() { + defer cancelKeepAlive() + // Along with the KeepAlives generating packets to keep the tcp + // connection open, we will use the replies to verify liveness of the + // connection. This will prevent dead connections from blocking the + // provisioner indefinitely. + respCh := make(chan error, 1) + + go func() { + t := time.NewTicker(keepAliveInterval) + defer t.Stop() + for { + select { + case <-t.C: + _, _, err := sshClient.SendRequest("keepalive@terraform.io", true, nil) + respCh <- err + case <-ctx.Done(): + return + } + } + }() + + after := time.NewTimer(maxKeepAliveDelay) + defer after.Stop() + + for { + select { + case err := <-respCh: + if err != nil { + log.Printf("[ERROR] ssh keepalive: %s", err) + sshConn.Close() + return + } + case <-after.C: + // abort after too many missed keepalives + log.Println("[ERROR] no reply from ssh server") + sshConn.Close() + return + case <-ctx.Done(): + return + } + if !after.Stop() { + <-after.C + } + after.Reset(maxKeepAliveDelay) + } + }() + + return nil +} + +// Disconnect implementation of communicator.Communicator interface +func (c *Communicator) Disconnect() error { + c.lock.Lock() + defer c.lock.Unlock() + + if c.cancelKeepAlive != nil { + c.cancelKeepAlive() + } + + if c.config.sshAgent != nil { + if err := c.config.sshAgent.Close(); err != nil { + return err + } + } + + if c.conn != nil { + conn := c.conn + c.conn = nil + return conn.Close() + } + + return nil +} + +// Timeout implementation of communicator.Communicator interface +func (c *Communicator) Timeout() time.Duration { + return c.connInfo.TimeoutVal +} + +// ScriptPath implementation of communicator.Communicator interface +func (c *Communicator) ScriptPath() string { + randLock.Lock() + defer randLock.Unlock() + + return strings.Replace( + c.connInfo.ScriptPath, "%RAND%", + strconv.FormatInt(int64(randShared.Int31()), 10), -1) +} + +// Start implementation of communicator.Communicator interface +func (c *Communicator) Start(cmd *remote.Cmd) error { + cmd.Init() + + session, err := c.newSession() + if err != nil { + return err + } + + // Set up our session + session.Stdin = cmd.Stdin + session.Stdout = cmd.Stdout + session.Stderr = cmd.Stderr + + if !c.config.noPty && c.connInfo.TargetPlatform != TargetPlatformWindows { + // Request a PTY + termModes := ssh.TerminalModes{ + ssh.ECHO: 0, // do not echo + ssh.TTY_OP_ISPEED: 14400, // input speed = 14.4kbaud + ssh.TTY_OP_OSPEED: 14400, // output speed = 14.4kbaud + } + + if err := session.RequestPty("xterm", 80, 40, termModes); err != nil { + return err + } + } + + log.Printf("[DEBUG] starting remote command: %s", cmd.Command) + err = session.Start(strings.TrimSpace(cmd.Command) + "\n") + if err != nil { + return err + } + + // Start a goroutine to wait for the session to end and set the + // exit boolean and status. + go func() { + defer session.Close() + + err := session.Wait() + exitStatus := 0 + if err != nil { + exitErr, ok := err.(*ssh.ExitError) + if ok { + exitStatus = exitErr.ExitStatus() + } + } + + cmd.SetExitStatus(exitStatus, err) + log.Printf("[DEBUG] remote command exited with '%d': %s", exitStatus, cmd.Command) + }() + + return nil +} + +// Upload implementation of communicator.Communicator interface +func (c *Communicator) Upload(path string, input io.Reader) error { + // The target directory and file for talking the SCP protocol + targetDir := filepath.Dir(path) + targetFile := filepath.Base(path) + + // On windows, filepath.Dir uses backslash separators (ie. "\tmp"). + // This does not work when the target host is unix. Switch to forward slash + // which works for unix and windows + targetDir = filepath.ToSlash(targetDir) + + // Skip copying if we can get the file size directly from common io.Readers + size := int64(0) + + switch src := input.(type) { + case *os.File: + fi, err := src.Stat() + if err == nil { + size = fi.Size() + } + case *bytes.Buffer: + size = int64(src.Len()) + case *bytes.Reader: + size = int64(src.Len()) + case *strings.Reader: + size = int64(src.Len()) + } + + scpFunc := func(w io.Writer, stdoutR *bufio.Reader) error { + return scpUploadFile(targetFile, input, w, stdoutR, size) + } + + cmd, err := quoteShell([]string{"scp", "-vt", targetDir}, c.connInfo.TargetPlatform) + if err != nil { + return err + } + return c.scpSession(cmd, scpFunc) +} + +// UploadScript implementation of communicator.Communicator interface +func (c *Communicator) UploadScript(path string, input io.Reader) error { + reader := bufio.NewReader(input) + prefix, err := reader.Peek(2) + if err != nil { + return fmt.Errorf("Error reading script: %w", err) + } + var script bytes.Buffer + + if string(prefix) != "#!" && c.connInfo.TargetPlatform != TargetPlatformWindows { + script.WriteString(DefaultShebang) + } + script.ReadFrom(reader) + + if err := c.Upload(path, &script); err != nil { + return err + } + if c.connInfo.TargetPlatform != TargetPlatformWindows { + var stdout, stderr bytes.Buffer + cmd := &remote.Cmd{ + Command: fmt.Sprintf("chmod 0777 %s", path), + Stdout: &stdout, + Stderr: &stderr, + } + if err := c.Start(cmd); err != nil { + return fmt.Errorf( + "Error chmodding script file to 0777 in remote "+ + "machine: %s", err) + } + + if err := cmd.Wait(); err != nil { + return fmt.Errorf( + "Error chmodding script file to 0777 in remote "+ + "machine %v: %s %s", err, stdout.String(), stderr.String()) + } + } + return nil +} + +// UploadDir implementation of communicator.Communicator interface +func (c *Communicator) UploadDir(dst string, src string) error { + log.Printf("[DEBUG] Uploading dir '%s' to '%s'", src, dst) + scpFunc := func(w io.Writer, r *bufio.Reader) error { + uploadEntries := func() error { + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + entries, err := f.Readdir(-1) + if err != nil { + return err + } + + return scpUploadDir(src, entries, w, r) + } + + if src[len(src)-1] != '/' { + log.Printf("[DEBUG] No trailing slash, creating the source directory name") + return scpUploadDirProtocol(filepath.Base(src), w, r, uploadEntries) + } + // Trailing slash, so only upload the contents + return uploadEntries() + } + + cmd, err := quoteShell([]string{"scp", "-rvt", dst}, c.connInfo.TargetPlatform) + if err != nil { + return err + } + return c.scpSession(cmd, scpFunc) +} + +func (c *Communicator) newSession() (session *ssh.Session, err error) { + log.Println("[DEBUG] opening new ssh session") + if c.client == nil { + err = errors.New("ssh client is not connected") + } else { + session, err = c.client.NewSession() + } + + if err != nil { + log.Printf("[WARN] ssh session open error: '%s', attempting reconnect", err) + if err := c.Connect(nil); err != nil { + return nil, err + } + + return c.client.NewSession() + } + + return session, nil +} + +func (c *Communicator) scpSession(scpCommand string, f func(io.Writer, *bufio.Reader) error) error { + session, err := c.newSession() + if err != nil { + return err + } + defer session.Close() + + // Get a pipe to stdin so that we can send data down + stdinW, err := session.StdinPipe() + if err != nil { + return err + } + + // We only want to close once, so we nil w after we close it, + // and only close in the defer if it hasn't been closed already. + defer func() { + if stdinW != nil { + stdinW.Close() + } + }() + + // Get a pipe to stdout so that we can get responses back + stdoutPipe, err := session.StdoutPipe() + if err != nil { + return err + } + stdoutR := bufio.NewReader(stdoutPipe) + + // Set stderr to a bytes buffer + stderr := new(bytes.Buffer) + session.Stderr = stderr + + // Start the sink mode on the other side + // TODO(mitchellh): There are probably issues with shell escaping the path + log.Println("[DEBUG] Starting remote scp process: ", scpCommand) + if err := session.Start(scpCommand); err != nil { + return err + } + + // Call our callback that executes in the context of SCP. We ignore + // EOF errors if they occur because it usually means that SCP prematurely + // ended on the other side. + log.Println("[DEBUG] Started SCP session, beginning transfers...") + if err := f(stdinW, stdoutR); err != nil && err != io.EOF { + return err + } + + // Close the stdin, which sends an EOF, and then set w to nil so that + // our defer func doesn't close it again since that is unsafe with + // the Go SSH package. + log.Println("[DEBUG] SCP session complete, closing stdin pipe.") + stdinW.Close() + stdinW = nil + + // Wait for the SCP connection to close, meaning it has consumed all + // our data and has completed. Or has errored. + log.Println("[DEBUG] Waiting for SSH session to complete.") + err = session.Wait() + + // log any stderr before exiting on an error + scpErr := stderr.String() + if len(scpErr) > 0 { + log.Printf("[ERROR] scp stderr: %q", stderr) + } + + if err != nil { + if exitErr, ok := err.(*ssh.ExitError); ok { + // Otherwise, we have an ExitErorr, meaning we can just read + // the exit status + log.Printf("[ERROR] %s", exitErr) + + // If we exited with status 127, it means SCP isn't available. + // Return a more descriptive error for that. + if exitErr.ExitStatus() == 127 { + return errors.New( + "SCP failed to start. This usually means that SCP is not\n" + + "properly installed on the remote system.") + } + } + + return err + } + + return nil +} + +// checkSCPStatus checks that a prior command sent to SCP completed +// successfully. If it did not complete successfully, an error will +// be returned. +func checkSCPStatus(r *bufio.Reader) error { + code, err := r.ReadByte() + if err != nil { + return err + } + + if code != 0 { + // Treat any non-zero (really 1 and 2) as fatal errors + message, _, err := r.ReadLine() + if err != nil { + return fmt.Errorf("Error reading error message: %w", err) + } + + return errors.New(string(message)) + } + + return nil +} + +var testUploadSizeHook func(size int64) + +func scpUploadFile(dst string, src io.Reader, w io.Writer, r *bufio.Reader, size int64) error { + if testUploadSizeHook != nil { + testUploadSizeHook(size) + } + + if size == 0 { + // Create a temporary file where we can copy the contents of the src + // so that we can determine the length, since SCP is length-prefixed. + tf, err := os.CreateTemp("", "terraform-upload") + if err != nil { + return fmt.Errorf("Error creating temporary file for upload: %w", err) + } + defer os.Remove(tf.Name()) + defer tf.Close() + + log.Println("[DEBUG] Copying input data into temporary file so we can read the length") + if _, err := io.Copy(tf, src); err != nil { + return err + } + + // Sync the file so that the contents are definitely on disk, then + // read the length of it. + if err := tf.Sync(); err != nil { + return fmt.Errorf("Error creating temporary file for upload: %w", err) + } + + // Seek the file to the beginning so we can re-read all of it + if _, err := tf.Seek(0, 0); err != nil { + return fmt.Errorf("Error creating temporary file for upload: %w", err) + } + + fi, err := tf.Stat() + if err != nil { + return fmt.Errorf("Error creating temporary file for upload: %w", err) + } + + src = tf + size = fi.Size() + } + + // Start the protocol + log.Println("[DEBUG] Beginning file upload...") + fmt.Fprintln(w, "C0644", size, dst) + if err := checkSCPStatus(r); err != nil { + return err + } + + if _, err := io.Copy(w, src); err != nil { + return err + } + + fmt.Fprint(w, "\x00") + if err := checkSCPStatus(r); err != nil { + return err + } + + return nil +} + +func scpUploadDirProtocol(name string, w io.Writer, r *bufio.Reader, f func() error) error { + log.Printf("[DEBUG] SCP: starting directory upload: %s", name) + fmt.Fprintln(w, "D0755 0", name) + err := checkSCPStatus(r) + if err != nil { + return err + } + + if err := f(); err != nil { + return err + } + + _, err = fmt.Fprintln(w, "E") + if err != nil { + return err + } + + return nil +} + +func scpUploadDir(root string, fs []os.FileInfo, w io.Writer, r *bufio.Reader) error { + for _, fi := range fs { + realPath := filepath.Join(root, fi.Name()) + + // Track if this is actually a symlink to a directory. If it is + // a symlink to a file we don't do any special behavior because uploading + // a file just works. If it is a directory, we need to know so we + // treat it as such. + isSymlinkToDir := false + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + symPath, err := filepath.EvalSymlinks(realPath) + if err != nil { + return err + } + + symFi, err := os.Lstat(symPath) + if err != nil { + return err + } + + isSymlinkToDir = symFi.IsDir() + } + + if !fi.IsDir() && !isSymlinkToDir { + // It is a regular file (or symlink to a file), just upload it + f, err := os.Open(realPath) + if err != nil { + return err + } + + err = func() error { + defer f.Close() + return scpUploadFile(fi.Name(), f, w, r, fi.Size()) + }() + + if err != nil { + return err + } + + continue + } + + // It is a directory, recursively upload + err := scpUploadDirProtocol(fi.Name(), w, r, func() error { + f, err := os.Open(realPath) + if err != nil { + return err + } + defer f.Close() + + entries, err := f.Readdir(-1) + if err != nil { + return err + } + + return scpUploadDir(realPath, entries, w, r) + }) + if err != nil { + return err + } + } + + return nil +} + +// ConnectFunc is a convenience method for returning a function +// that just uses net.Dial to communicate with the remote end that +// is suitable for use with the SSH communicator configuration. +func ConnectFunc(network, addr string, p *proxyInfo) func() (net.Conn, error) { + return func() (net.Conn, error) { + var c net.Conn + var err error + + // Wrap connection to host if proxy server is configured + if p != nil { + RegisterDialerType() + c, err = newHttpProxyConn(p, addr) + } else { + c, err = net.DialTimeout(network, addr, 15*time.Second) + } + + if err != nil { + return nil, err + } + + if tcpConn, ok := c.(*net.TCPConn); ok { + tcpConn.SetKeepAlive(true) + } + + return c, nil + } +} + +// BastionConnectFunc is a convenience method for returning a function +// that connects to a host over a bastion connection. +func BastionConnectFunc( + bProto string, + bAddr string, + bConf *ssh.ClientConfig, + proto string, + addr string, + p *proxyInfo) func() (net.Conn, error) { + return func() (net.Conn, error) { + log.Printf("[DEBUG] Connecting to bastion: %s", bAddr) + var bastion *ssh.Client + var err error + + // Wrap connection to bastion server if proxy server is configured + if p != nil { + var pConn net.Conn + var bConn ssh.Conn + var bChans <-chan ssh.NewChannel + var bReq <-chan *ssh.Request + + RegisterDialerType() + pConn, err = newHttpProxyConn(p, bAddr) + + if err != nil { + return nil, fmt.Errorf("Error connecting to proxy: %w", err) + } + + bConn, bChans, bReq, err = ssh.NewClientConn(pConn, bAddr, bConf) + + if err != nil { + return nil, fmt.Errorf("Error creating new client connection via proxy: %w", err) + } + + bastion = ssh.NewClient(bConn, bChans, bReq) + } else { + bastion, err = ssh.Dial(bProto, bAddr, bConf) + } + + if err != nil { + return nil, fmt.Errorf("Error connecting to bastion: %w", err) + } + + log.Printf("[DEBUG] Connecting via bastion (%s) to host: %s", bAddr, addr) + conn, err := bastion.Dial(proto, addr) + if err != nil { + bastion.Close() + return nil, err + } + + // Wrap it up so we close both things properly + return &bastionConn{ + Conn: conn, + Bastion: bastion, + }, nil + } +} + +type bastionConn struct { + net.Conn + Bastion *ssh.Client +} + +func (c *bastionConn) Close() error { + c.Conn.Close() + return c.Bastion.Close() +} + +func quoteShell(args []string, targetPlatform string) (string, error) { + if targetPlatform == TargetPlatformUnix { + return shquot.POSIXShell(args), nil + } + if targetPlatform == TargetPlatformWindows { + return shquot.WindowsArgv(args), nil + } + + return "", fmt.Errorf("Cannot quote shell command, target platform unknown: %s", targetPlatform) + +} diff --git a/pkg/communicator/ssh/communicator_test.go b/pkg/communicator/ssh/communicator_test.go new file mode 100644 index 00000000000..35971c2fca9 --- /dev/null +++ b/pkg/communicator/ssh/communicator_test.go @@ -0,0 +1,763 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !race +// +build !race + +package ssh + +import ( + "bufio" + "bytes" + "encoding/base64" + "fmt" + "io" + "math/rand" + "net" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "testing" + "time" + + "github.com/kubegems/opentofu/pkg/communicator/remote" + "github.com/zclconf/go-cty/cty" + "golang.org/x/crypto/ssh" +) + +// private key for mock server +const testServerPrivateKey = `-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA19lGVsTqIT5iiNYRgnoY1CwkbETW5cq+Rzk5v/kTlf31XpSU +70HVWkbTERECjaYdXM2gGcbb+sxpq6GtXf1M3kVomycqhxwhPv4Cr6Xp4WT/jkFx +9z+FFzpeodGJWjOH6L2H5uX1Cvr9EDdQp9t9/J32/qBFntY8GwoUI/y/1MSTmMiF +tupdMODN064vd3gyMKTwrlQ8tZM6aYuyOPsutLlUY7M5x5FwMDYvnPDSeyT/Iw0z +s3B+NCyqeeMd2T7YzQFnRATj0M7rM5LoSs7DVqVriOEABssFyLj31PboaoLhOKgc +qoM9khkNzr7FHVvi+DhYM2jD0DwvqZLN6NmnLwIDAQABAoIBAQCGVj+kuSFOV1lT ++IclQYA6bM6uY5mroqcSBNegVxCNhWU03BxlW//BE9tA/+kq53vWylMeN9mpGZea +riEMIh25KFGWXqXlOOioH8bkMsqA8S7sBmc7jljyv+0toQ9vCCtJ+sueNPhxQQxH +D2YvUjfzBQ04I9+wn30BByDJ1QA/FoPsunxIOUCcRBE/7jxuLYcpR+JvEF68yYIh +atXRld4W4in7T65YDR8jK1Uj9XAcNeDYNpT/M6oFLx1aPIlkG86aCWRO19S1jLPT +b1ZAKHHxPMCVkSYW0RqvIgLXQOR62D0Zne6/2wtzJkk5UCjkSQ2z7ZzJpMkWgDgN +ifCULFPBAoGBAPoMZ5q1w+zB+knXUD33n1J+niN6TZHJulpf2w5zsW+m2K6Zn62M +MXndXlVAHtk6p02q9kxHdgov34Uo8VpuNjbS1+abGFTI8NZgFo+bsDxJdItemwC4 +KJ7L1iz39hRN/ZylMRLz5uTYRGddCkeIHhiG2h7zohH/MaYzUacXEEy3AoGBANz8 +e/msleB+iXC0cXKwds26N4hyMdAFE5qAqJXvV3S2W8JZnmU+sS7vPAWMYPlERPk1 +D8Q2eXqdPIkAWBhrx4RxD7rNc5qFNcQWEhCIxC9fccluH1y5g2M+4jpMX2CT8Uv+ +3z+NoJ5uDTXZTnLCfoZzgZ4nCZVZ+6iU5U1+YXFJAoGBANLPpIV920n/nJmmquMj +orI1R/QXR9Cy56cMC65agezlGOfTYxk5Cfl5Ve+/2IJCfgzwJyjWUsFx7RviEeGw +64o7JoUom1HX+5xxdHPsyZ96OoTJ5RqtKKoApnhRMamau0fWydH1yeOEJd+TRHhc +XStGfhz8QNa1dVFvENczja1vAoGABGWhsd4VPVpHMc7lUvrf4kgKQtTC2PjA4xoc +QJ96hf/642sVE76jl+N6tkGMzGjnVm4P2j+bOy1VvwQavKGoXqJBRd5Apppv727g +/SM7hBXKFc/zH80xKBBgP/i1DR7kdjakCoeu4ngeGywvu2jTS6mQsqzkK+yWbUxJ +I7mYBsECgYB/KNXlTEpXtz/kwWCHFSYA8U74l7zZbVD8ul0e56JDK+lLcJ0tJffk +gqnBycHj6AhEycjda75cs+0zybZvN4x65KZHOGW/O/7OAWEcZP5TPb3zf9ned3Hl +NsZoFj52ponUM6+99A2CmezFCN16c4mbA//luWF+k3VVqR6BpkrhKw== +-----END RSA PRIVATE KEY-----` + +// this cert was signed by the key from testCAPublicKey +const testServerHostCert = `ssh-rsa-cert-v01@openssh.com AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgvQ3Bs1ex7277b9q6I0fNaWsVEC16f+LcT8RLPSVMEVMAAAADAQABAAABAQDX2UZWxOohPmKI1hGCehjULCRsRNblyr5HOTm/+ROV/fVelJTvQdVaRtMREQKNph1czaAZxtv6zGmroa1d/UzeRWibJyqHHCE+/gKvpenhZP+OQXH3P4UXOl6h0YlaM4fovYfm5fUK+v0QN1Cn2338nfb+oEWe1jwbChQj/L/UxJOYyIW26l0w4M3Tri93eDIwpPCuVDy1kzppi7I4+y60uVRjsznHkXAwNi+c8NJ7JP8jDTOzcH40LKp54x3ZPtjNAWdEBOPQzuszkuhKzsNWpWuI4QAGywXIuPfU9uhqguE4qByqgz2SGQ3OvsUdW+L4OFgzaMPQPC+pks3o2acvAAAAAAAAAAAAAAACAAAAB2NhLXRlc3QAAAANAAAACTEyNy4wLjAuMQAAAABag0jkAAAAAHDcHtAAAAAAAAAAAAAAAAAAAAEXAAAAB3NzaC1yc2EAAAADAQABAAABAQCrozyZIhdEvalCn+eSzHH94cO9ykiywA13ntWI7mJcHBwYTeCYWG8E9zGXyp2iDOjCGudM0Tdt8o0OofKChk9Z/qiUN0G8y1kmaXBlBM3qA5R9NPpvMYMNkYLfX6ivtZCnqrsbzaoqN2Oc/7H2StHzJWh/XCGu9otQZA6vdv1oSmAsZOjw/xIGaGQqDUaLq21J280PP1qSbdJHf76iSHE+TWe3YpqV946JWM5tCh0DykZ10VznvxYpUjzhr07IN3tVKxOXbPnnU7lX6IaLIWgfzLqwSyheeux05c3JLF9iF4sFu8ou4hwQz1iuUTU1jxgwZP0w/bkXgFFs0949lW81AAABDwAAAAdzc2gtcnNhAAABAEyoiVkZ5z79nh3WSU5mU2U7e2BItnnEqsJIm9EN+35uG0yORSXmQoaa9mtli7G3r79tyqEJd/C95EdNvU/9TjaoDcbH8OHP+Ue9XSfUzBuQ6bGSXe6mlZlO7QJ1cIyWphFP3MkrweDSiJ+SpeXzLzZkiJ7zKv5czhBEyG/MujFgvikotL+eUNG42y2cgsesXSjENSBS3l11q55a+RM2QKt3W32im8CsSxrH6Mz6p4JXQNgsVvZRknLxNlWXULFB2HLTunPKzJNMTf6xZf66oivSBAXVIdNKhlVpAQ3dT/dW5K6J4aQF/hjWByyLprFwZ16cPDqvtalnTCpbRYelNbw=` + +const testCAPublicKey = `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCrozyZIhdEvalCn+eSzHH94cO9ykiywA13ntWI7mJcHBwYTeCYWG8E9zGXyp2iDOjCGudM0Tdt8o0OofKChk9Z/qiUN0G8y1kmaXBlBM3qA5R9NPpvMYMNkYLfX6ivtZCnqrsbzaoqN2Oc/7H2StHzJWh/XCGu9otQZA6vdv1oSmAsZOjw/xIGaGQqDUaLq21J280PP1qSbdJHf76iSHE+TWe3YpqV946JWM5tCh0DykZ10VznvxYpUjzhr07IN3tVKxOXbPnnU7lX6IaLIWgfzLqwSyheeux05c3JLF9iF4sFu8ou4hwQz1iuUTU1jxgwZP0w/bkXgFFs0949lW81` + +func newMockLineServer(t *testing.T, signer ssh.Signer, pubKey string) string { + serverConfig := &ssh.ServerConfig{ + PasswordCallback: acceptUserPass("user", "pass"), + PublicKeyCallback: acceptPublicKey(pubKey), + } + + var err error + if signer == nil { + signer, err = ssh.ParsePrivateKey([]byte(testServerPrivateKey)) + if err != nil { + t.Fatalf("unable to parse private key: %s", err) + } + } + serverConfig.AddHostKey(signer) + + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("Unable to listen for connection: %s", err) + } + + go func() { + defer l.Close() + c, err := l.Accept() + if err != nil { + t.Errorf("Unable to accept incoming connection: %s", err) + } + defer c.Close() + conn, chans, _, err := ssh.NewServerConn(c, serverConfig) + if err != nil { + t.Logf("Handshaking error: %v", err) + } + t.Log("Accepted SSH connection") + + for newChannel := range chans { + channel, requests, err := newChannel.Accept() + if err != nil { + t.Errorf("Unable to accept channel.") + } + t.Log("Accepted channel") + + go func(in <-chan *ssh.Request) { + defer channel.Close() + for req := range in { + // since this channel's requests are serviced serially, + // this will block keepalive probes, and can simulate a + // hung connection. + if bytes.Contains(req.Payload, []byte("sleep")) { + time.Sleep(time.Second) + } + + if req.WantReply { + req.Reply(true, nil) + } + } + }(requests) + } + conn.Close() + }() + + return l.Addr().String() +} + +func TestNew_Invalid(t *testing.T) { + address := newMockLineServer(t, nil, testClientPublicKey) + parts := strings.Split(address, ":") + + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("ssh"), + "user": cty.StringVal("user"), + "password": cty.StringVal("i-am-invalid"), + "host": cty.StringVal(parts[0]), + "port": cty.StringVal(parts[1]), + "timeout": cty.StringVal("30s"), + }) + + c, err := New(v) + if err != nil { + t.Fatalf("error creating communicator: %s", err) + } + + err = c.Connect(nil) + if err == nil { + t.Fatal("should have had an error connecting") + } +} + +func TestNew_InvalidHost(t *testing.T) { + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("ssh"), + "user": cty.StringVal("user"), + "password": cty.StringVal("i-am-invalid"), + "port": cty.StringVal("22"), + "timeout": cty.StringVal("30s"), + }) + + _, err := New(v) + if err == nil { + t.Fatal("should have had an error creating communicator") + } +} + +func TestStart(t *testing.T) { + address := newMockLineServer(t, nil, testClientPublicKey) + parts := strings.Split(address, ":") + + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("ssh"), + "user": cty.StringVal("user"), + "password": cty.StringVal("pass"), + "host": cty.StringVal(parts[0]), + "port": cty.StringVal(parts[1]), + "timeout": cty.StringVal("30s"), + }) + + c, err := New(v) + if err != nil { + t.Fatalf("error creating communicator: %s", err) + } + + var cmd remote.Cmd + stdout := new(bytes.Buffer) + cmd.Command = "echo foo" + cmd.Stdout = stdout + + err = c.Start(&cmd) + if err != nil { + t.Fatalf("error executing remote command: %s", err) + } +} + +// TestKeepAlives verifies that the keepalive messages don't interfere with +// normal operation of the client. +func TestKeepAlives(t *testing.T) { + ivl := keepAliveInterval + keepAliveInterval = 250 * time.Millisecond + defer func() { keepAliveInterval = ivl }() + + address := newMockLineServer(t, nil, testClientPublicKey) + parts := strings.Split(address, ":") + + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("ssh"), + "user": cty.StringVal("user"), + "password": cty.StringVal("pass"), + "host": cty.StringVal(parts[0]), + "port": cty.StringVal(parts[1]), + }) + + c, err := New(v) + if err != nil { + t.Fatalf("error creating communicator: %s", err) + } + + if err := c.Connect(nil); err != nil { + t.Fatal(err) + } + + var cmd remote.Cmd + stdout := new(bytes.Buffer) + cmd.Command = "sleep" + cmd.Stdout = stdout + + // wait a bit before executing the command, so that at least 1 keepalive is sent + time.Sleep(500 * time.Millisecond) + + err = c.Start(&cmd) + if err != nil { + t.Fatalf("error executing remote command: %s", err) + } +} + +// TestDeadConnection verifies that failed keepalive messages will eventually +// kill the connection. +func TestFailedKeepAlives(t *testing.T) { + ivl := keepAliveInterval + del := maxKeepAliveDelay + maxKeepAliveDelay = 500 * time.Millisecond + keepAliveInterval = 250 * time.Millisecond + defer func() { + keepAliveInterval = ivl + maxKeepAliveDelay = del + }() + + address := newMockLineServer(t, nil, testClientPublicKey) + parts := strings.Split(address, ":") + + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("ssh"), + "user": cty.StringVal("user"), + "password": cty.StringVal("pass"), + "host": cty.StringVal(parts[0]), + "port": cty.StringVal(parts[1]), + "timeout": cty.StringVal("30s"), + }) + + c, err := New(v) + if err != nil { + t.Fatalf("error creating communicator: %s", err) + } + + if err := c.Connect(nil); err != nil { + t.Fatal(err) + } + var cmd remote.Cmd + stdout := new(bytes.Buffer) + cmd.Command = "sleep" + cmd.Stdout = stdout + + err = c.Start(&cmd) + if err == nil { + t.Fatal("expected connection error") + } +} + +func TestLostConnection(t *testing.T) { + address := newMockLineServer(t, nil, testClientPublicKey) + parts := strings.Split(address, ":") + + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("ssh"), + "user": cty.StringVal("user"), + "password": cty.StringVal("pass"), + "host": cty.StringVal(parts[0]), + "port": cty.StringVal(parts[1]), + "timeout": cty.StringVal("30s"), + }) + + c, err := New(v) + if err != nil { + t.Fatalf("error creating communicator: %s", err) + } + + var cmd remote.Cmd + stdout := new(bytes.Buffer) + cmd.Command = "echo foo" + cmd.Stdout = stdout + + err = c.Start(&cmd) + if err != nil { + t.Fatalf("error executing remote command: %s", err) + } + + // The test server can't execute anything, so Wait will block, unless + // there's an error. Disconnect the communicator transport, to cause the + // command to fail. + go func() { + time.Sleep(100 * time.Millisecond) + c.Disconnect() + }() + + err = cmd.Wait() + if err == nil { + t.Fatal("expected communicator error") + } +} + +func TestHostKey(t *testing.T) { + // get the server's public key + signer, err := ssh.ParsePrivateKey([]byte(testServerPrivateKey)) + if err != nil { + t.Fatalf("unable to parse private key: %v", err) + } + pubKey := fmt.Sprintf("ssh-rsa %s", base64.StdEncoding.EncodeToString(signer.PublicKey().Marshal())) + + address := newMockLineServer(t, nil, testClientPublicKey) + host, p, _ := net.SplitHostPort(address) + port, _ := strconv.Atoi(p) + + connInfo := &connectionInfo{ + User: "user", + Password: "pass", + Host: host, + HostKey: pubKey, + Port: uint16(port), + Timeout: "30s", + } + + cfg, err := prepareSSHConfig(connInfo) + if err != nil { + t.Fatal(err) + } + + c := &Communicator{ + connInfo: connInfo, + config: cfg, + } + + var cmd remote.Cmd + stdout := new(bytes.Buffer) + cmd.Command = "echo foo" + cmd.Stdout = stdout + + if err := c.Start(&cmd); err != nil { + t.Fatal(err) + } + if err := c.Disconnect(); err != nil { + t.Fatal(err) + } + + // now check with the wrong HostKey + address = newMockLineServer(t, nil, testClientPublicKey) + _, p, _ = net.SplitHostPort(address) + port, _ = strconv.Atoi(p) + + connInfo.HostKey = testClientPublicKey + connInfo.Port = uint16(port) + + cfg, err = prepareSSHConfig(connInfo) + if err != nil { + t.Fatal(err) + } + + c = &Communicator{ + connInfo: connInfo, + config: cfg, + } + + err = c.Start(&cmd) + if err == nil || !strings.Contains(err.Error(), "mismatch") { + t.Fatalf("expected host key mismatch, got error:%v", err) + } +} + +func TestHostCert(t *testing.T) { + pk, _, _, _, err := ssh.ParseAuthorizedKey([]byte(testServerHostCert)) + if err != nil { + t.Fatal(err) + } + + signer, err := ssh.ParsePrivateKey([]byte(testServerPrivateKey)) + if err != nil { + t.Fatal(err) + } + + signer, err = ssh.NewCertSigner(pk.(*ssh.Certificate), signer) + if err != nil { + t.Fatal(err) + } + + address := newMockLineServer(t, signer, testClientPublicKey) + host, p, _ := net.SplitHostPort(address) + port, _ := strconv.Atoi(p) + + connInfo := &connectionInfo{ + User: "user", + Password: "pass", + Host: host, + HostKey: testCAPublicKey, + Port: uint16(port), + Timeout: "30s", + } + + cfg, err := prepareSSHConfig(connInfo) + if err != nil { + t.Fatal(err) + } + + c := &Communicator{ + connInfo: connInfo, + config: cfg, + } + + var cmd remote.Cmd + stdout := new(bytes.Buffer) + cmd.Command = "echo foo" + cmd.Stdout = stdout + + if err := c.Start(&cmd); err != nil { + t.Fatal(err) + } + if err := c.Disconnect(); err != nil { + t.Fatal(err) + } + + // now check with the wrong HostKey + address = newMockLineServer(t, signer, testClientPublicKey) + _, p, _ = net.SplitHostPort(address) + port, _ = strconv.Atoi(p) + + connInfo.HostKey = testClientPublicKey + connInfo.Port = uint16(port) + + cfg, err = prepareSSHConfig(connInfo) + if err != nil { + t.Fatal(err) + } + + c = &Communicator{ + connInfo: connInfo, + config: cfg, + } + + err = c.Start(&cmd) + if err == nil || !strings.Contains(err.Error(), "authorities") { + t.Fatalf("expected host key mismatch, got error:%v", err) + } +} + +const SERVER_PEM = `-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA8CkDr7uxCFt6lQUVwS8NyPO+fQNxORoGnMnN/XhVJZvpqyKR +Uji9R0d8D66bYxUUsabXjP2y4HTVzbZtnvXFZZshk0cOtJjjekpYJaLK2esPR/iX +wvSltNkrDQDPN/RmgEEMIevW8AgrPsqrnybFHxTpd7rEUHXBOe4nMNRIg3XHykB6 +jZk8q5bBPUe3I/f0DK5TJEBpTc6dO3P/j93u55VUqr39/SPRHnld2mCw+c8v6UOh +sssO/DIZFPScD3DYqsk2N+/nz9zXfcOTdWGhawgxuIo1DTokrNQbG3pDrLqcWgqj +13vqJFCmRA0O2CQIwJePd6+Np/XO3Uh/KL6FlQIDAQABAoIBAQCmvQMXNmvCDqk7 +30zsVDvw4fHGH+azK3Od1aqTqcEMHISOUbCtckFPxLzIsoSltRQqB1kuRVG07skm +Stsu+xny4lLcSwBVuLRuykEK2EyYIc/5Owo6y9pkhkaSf5ZfFes4bnD6+B/BhRpp +PRMMq0E+xCkX/G6iIi9mhgdlqm0x/vKtjzQeeshw9+gRcRLUpX+UeKFKXMXcDayx +qekr1bAaQKNBhTK+CbZjcqzG4f+BXVGRTZ9nsPAV+yTnWUCU0TghwPmtthHbebqa +9hlkum7qik/bQj/tjJ8/b0vTfHQSVxhtPG/ZV2Tn9ZuL/vrkYqeyMU8XkJ/uaEvH +WPyOcB4BAoGBAP5o5JSEtPog+U3JFrLNSRjz5ofZNVkJzice+0XyqlzJDHhX5tF8 +mriYQZLLXYhckBm4IdkhTn/dVbXNQTzyy2WVuO5nU8bkCMvGL9CGpW4YGqwGf7NX +e4H3emtRjLv8VZpUHe/RUUDhmYvMSt1qmXuskfpROuGfLhQBUd6A4J+BAoGBAPGp +UcMKjrxZ5qjYU6DLgS+xeca4Eu70HgdbSQbRo45WubXjyXvTRFij36DrpxJWf1D7 +lIsyBifoTra/lAuC1NQXGYWjTCdk2ey8Ll5qOgiXvE6lINHABr+U/Z90/g6LuML2 +VzaZbq/QLcT3yVsdyTogKckzCaKsCpusyHE1CXAVAoGAd6kMglKc8N0bhZukgnsN ++5+UeacPcY6sGTh4RWErAjNKGzx1A2lROKvcg9gFaULoQECcIw2IZ5nKW5VsLueg +BWrTrcaJ4A2XmYjhKnp6SvspaGoyHD90hx/Iw7t6r1yzQsB3yDmytwqldtyjBdvC +zynPC2azhDWjraMlR7tka4ECgYAxwvLiHa9sm3qCtCDsUFtmrb3srITBjaUNUL/F +1q8+JR+Sk7gudj9xnTT0VvINNaB71YIt83wPBagHu4VJpYQbtDH+MbUBu6OgOtO1 +f1w53rzY2OncJxV8p7pd9mJGLoE6LC2jQY7oRw7Vq0xcJdME1BCmrIrEY3a/vaF8 +pjYuTQKBgQCIOH23Xita8KmhH0NdlWxZfcQt1j3AnOcKe6UyN4BsF8hqS7eTA52s +WjG5X2IBl7gs1eMM1qkqR8npS9nwfO/pBmZPwjiZoilypXxWj+c+P3vwre2yija4 +bXgFVj4KFBwhr1+8KcobxC0SAPEouMvSkxzjjw+gnebozUtPlud9jA== +-----END RSA PRIVATE KEY----- +` +const CLIENT_CERT_SIGNED_BY_SERVER = `ssh-rsa-cert-v01@openssh.com AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgbMDNUn4M2TtzrSH7MOT2QsvLzZWjehJ5TYrBOp9p+lwAAAADAQABAAABAQCyu57E7zIWRyEWuaiOiikOSZKFjbwLkpE9fboFfLLsNUJj4zw+5bZUJtzWK8roPjgL8s1oPncro5wuTtI2Nu4fkpeFK0Hb33o6Eyksuj4Om4+6Uemn1QEcb0bZqK8Zyg9Dg9deP7LeE0v78b5/jZafFgwxv+/sMhM0PRD34NCDYcYmkkHlvQtQWFAdbPXCgghObedZyYdoqZVuhTsiPMWtQS/cc9M4tv6mPOuQlhZt3R/Oh/kwUyu45oGRb5bhO4JicozFS3oeClpU+UMbgslkzApJqxZBWN7+PDFSZhKk2GslyeyP4sH3E30Z00yVi/lQYgmQsB+Hg6ClemNQMNu/AAAAAAAAAAAAAAACAAAABHVzZXIAAAAIAAAABHVzZXIAAAAAWzBjXAAAAAB/POfPAAAAAAAAAAAAAAAAAAABFwAAAAdzc2gtcnNhAAAAAwEAAQAAAQEA8CkDr7uxCFt6lQUVwS8NyPO+fQNxORoGnMnN/XhVJZvpqyKRUji9R0d8D66bYxUUsabXjP2y4HTVzbZtnvXFZZshk0cOtJjjekpYJaLK2esPR/iXwvSltNkrDQDPN/RmgEEMIevW8AgrPsqrnybFHxTpd7rEUHXBOe4nMNRIg3XHykB6jZk8q5bBPUe3I/f0DK5TJEBpTc6dO3P/j93u55VUqr39/SPRHnld2mCw+c8v6UOhsssO/DIZFPScD3DYqsk2N+/nz9zXfcOTdWGhawgxuIo1DTokrNQbG3pDrLqcWgqj13vqJFCmRA0O2CQIwJePd6+Np/XO3Uh/KL6FlQAAAQ8AAAAHc3NoLXJzYQAAAQC6sKEQHyl954BQn2BXuTgOB3NkENBxN7SD8ZaS8PNkDESytLjSIqrzoE6m7xuzprA+G23XRrCY/um3UvM7+7+zbwig2NIBbGbp3QFliQHegQKW6hTZP09jAQZk5jRrrEr/QT/s+gtHPmjxJK7XOQYxhInDKj+aJg62ExcwpQlP/0ATKNOIkdzTzzq916p0UOnnVaaPMKibh5Lv69GafIhKJRZSuuLN9fvs1G1RuUbxn/BNSeoRCr54L++Ztg09fJxunoyELs8mwgzCgB3pdZoUR2Z6ak05W4mvH3lkSz2BKUrlwxI6mterxhJy1GuN1K/zBG0gEMl2UTLajGK3qKM8 itbitloaner@MacBook-Pro-4.fios-router.home` +const CLIENT_PEM = `-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAsruexO8yFkchFrmojoopDkmShY28C5KRPX26BXyy7DVCY+M8 +PuW2VCbc1ivK6D44C/LNaD53K6OcLk7SNjbuH5KXhStB2996OhMpLLo+DpuPulHp +p9UBHG9G2aivGcoPQ4PXXj+y3hNL+/G+f42WnxYMMb/v7DITND0Q9+DQg2HGJpJB +5b0LUFhQHWz1woIITm3nWcmHaKmVboU7IjzFrUEv3HPTOLb+pjzrkJYWbd0fzof5 +MFMruOaBkW+W4TuCYnKMxUt6HgpaVPlDG4LJZMwKSasWQVje/jwxUmYSpNhrJcns +j+LB9xN9GdNMlYv5UGIJkLAfh4OgpXpjUDDbvwIDAQABAoIBAEu2ctFVyk/pnbi0 +uRR4rl+hBvKQUeJNGj2ELvL4Ggs5nIAX2IOEZ7JKLC6FqpSrFq7pEd5g57aSvixX +s3DH4CN7w7fj1ShBCNPlHgIWewdRGpeA74vrDWdwNAEsFdDE6aZeCTOhpDGy1vNJ +OrtpzS5i9pN0jTvvEneEjtWSZIHiiVlN+0hsFaiwZ6KXON+sDccZPmnP6Fzwj5Rc +WS0dKSwnxnx0otWgwWFs8nr306nSeMsNmQkHsS9lz4DEVpp9owdzrX1JmbQvNYAV +ohmB3ET4JYFgerqPXJfed9poueGuWCP6MYhsjNeHN35QhofxdO5/0i3JlZfqwZei +tNq/0oECgYEA6SqjRqDiIp3ajwyB7Wf0cIQG/P6JZDyN1jl//htgniliIH5UP1Tm +uAMG5MincV6X9lOyXyh6Yofu5+NR0yt9SqbDZVJ3ZCxKTun7pxJvQFd7wl5bMkiJ +qVfS08k6gQHHDoO+eel+DtpIfWc+e3tvX0aihSU0GZEMqDXYkkphLGECgYEAxDxb ++JwJ3N5UEjjkuvFBpuJnmjIaN9HvQkTv3inlx1gLE4iWBZXXsu4aWF8MCUeAAZyP +42hQDSkCYX/A22tYCEn/jfrU6A+6rkWBTjdUlYLvlSkhosSnO+117WEItb5cUE95 +hF4UY7LNs1AsDkV4WE87f/EjpxSwUAjB2Lfd/B8CgYAJ/JiHsuZcozQ0Qk3iVDyF +ATKnbWOHFozgqw/PW27U92LLj32eRM2o/gAylmGNmoaZt1YBe2NaiwXxiqv7hnZU +VzYxRcn1UWxRWvY7Xq/DKrwTRCVVzwOObEOMbKcD1YaoGX50DEso6bKHJH/pnAzW +INlfKIvFuI+5OK0w/tyQoQKBgQCf/jpaOxaLfrV62eobRQJrByLDBGB97GsvU7di +IjTWz8DQH0d5rE7d8uWF8ZCFrEcAiV6DYZQK9smbJqbd/uoacAKtBro5rkFdPwwK +8m/DKqsdqRhkdgOHh7bjYH7Sdy8ax4Fi27WyB6FQtmgFBrz0+zyetsODwQlzZ4Bs +qpSRrwKBgQC0vWHrY5aGIdF+b8EpP0/SSLLALpMySHyWhDyxYcPqdhszYbjDcavv +xrrLXNUD2duBHKPVYE+7uVoDkpZXLUQ4x8argo/IwQM6Kh2ma1y83TYMT6XhL1+B +5UPcl6RXZBCkiU7nFIG6/0XKFqVWc3fU8e09X+iJwXIJ5Jatywtg+g== +-----END RSA PRIVATE KEY----- +` + +func TestCertificateBasedAuth(t *testing.T) { + signer, err := ssh.ParsePrivateKey([]byte(SERVER_PEM)) + if err != nil { + t.Fatalf("unable to parse private key: %v", err) + } + address := newMockLineServer(t, signer, CLIENT_CERT_SIGNED_BY_SERVER) + host, p, _ := net.SplitHostPort(address) + port, _ := strconv.Atoi(p) + + connInfo := &connectionInfo{ + User: "user", + Host: host, + PrivateKey: CLIENT_PEM, + Certificate: CLIENT_CERT_SIGNED_BY_SERVER, + Port: uint16(port), + Timeout: "30s", + } + + cfg, err := prepareSSHConfig(connInfo) + if err != nil { + t.Fatal(err) + } + + c := &Communicator{ + connInfo: connInfo, + config: cfg, + } + + var cmd remote.Cmd + stdout := new(bytes.Buffer) + cmd.Command = "echo foo" + cmd.Stdout = stdout + + if err := c.Start(&cmd); err != nil { + t.Fatal(err) + } + if err := c.Disconnect(); err != nil { + t.Fatal(err) + } +} + +func TestAccUploadFile(t *testing.T) { + // use the local ssh server and scp binary to check uploads + if ok := os.Getenv("SSH_UPLOAD_TEST"); ok == "" { + t.Log("Skipping Upload Acceptance without SSH_UPLOAD_TEST set") + t.Skip() + } + + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("ssh"), + "user": cty.StringVal(os.Getenv("USER")), + "host": cty.StringVal("127.0.0.1"), + "port": cty.StringVal("22"), + "timeout": cty.StringVal("30s"), + }) + + c, err := New(v) + if err != nil { + t.Fatalf("error creating communicator: %s", err) + } + + tmpDir := t.TempDir() + source, err := os.CreateTemp(tmpDir, "tempfile.in") + if err != nil { + t.Fatal(err) + } + + content := "this is the file content" + if _, err := source.WriteString(content); err != nil { + t.Fatal(err) + } + source.Seek(0, io.SeekStart) + + tmpFile := filepath.Join(tmpDir, "tempFile.out") + + testUploadSizeHook = func(size int64) { + if size != int64(len(content)) { + t.Errorf("expected %d bytes, got %d\n", len(content), size) + } + } + defer func() { + testUploadSizeHook = nil + }() + + err = c.Upload(tmpFile, source) + if err != nil { + t.Fatalf("error uploading file: %s", err) + } + + data, err := os.ReadFile(tmpFile) + if err != nil { + t.Fatal(err) + } + + if string(data) != content { + t.Fatalf("bad: %s", data) + } +} + +func TestAccHugeUploadFile(t *testing.T) { + // use the local ssh server and scp binary to check uploads + if ok := os.Getenv("SSH_UPLOAD_TEST"); ok == "" { + t.Log("Skipping Upload Acceptance without SSH_UPLOAD_TEST set") + t.Skip() + } + + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("ssh"), + "host": cty.StringVal("127.0.0.1"), + "user": cty.StringVal(os.Getenv("USER")), + "port": cty.StringVal("22"), + "timeout": cty.StringVal("30s"), + }) + + c, err := New(v) + if err != nil { + t.Fatalf("error creating communicator: %s", err) + } + + // copy 4GB of data, random to prevent compression. + size := int64(1 << 32) + source := io.LimitReader(rand.New(rand.NewSource(0)), size) + + dest, err := os.CreateTemp("", "communicator") + if err != nil { + t.Fatal(err) + } + destName := dest.Name() + dest.Close() + defer os.Remove(destName) + + t.Log("Uploading to", destName) + + // bypass the Upload method so we can directly supply the file size + // preventing the extra copy of the huge file. + targetDir := filepath.Dir(destName) + targetFile := filepath.Base(destName) + + scpFunc := func(w io.Writer, stdoutR *bufio.Reader) error { + return scpUploadFile(targetFile, source, w, stdoutR, size) + } + + cmd, err := quoteShell([]string{"scp", "-vt", targetDir}, c.connInfo.TargetPlatform) + if err != nil { + t.Fatal(err) + } + err = c.scpSession(cmd, scpFunc) + if err != nil { + t.Fatal(err) + } + + // check the final file size + fs, err := os.Stat(destName) + if err != nil { + t.Fatal(err) + } + + if fs.Size() != size { + t.Fatalf("expected file size of %d, got %d", size, fs.Size()) + } +} + +func TestScriptPath(t *testing.T) { + cases := []struct { + Input string + Pattern string + }{ + { + "/tmp/script.sh", + `^/tmp/script\.sh$`, + }, + { + "/tmp/script_%RAND%.sh", + `^/tmp/script_(\d+)\.sh$`, + }, + } + + for _, tc := range cases { + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("ssh"), + "host": cty.StringVal("127.0.0.1"), + "script_path": cty.StringVal(tc.Input), + }) + + comm, err := New(v) + if err != nil { + t.Fatalf("err: %s", err) + } + output := comm.ScriptPath() + + match, err := regexp.Match(tc.Pattern, []byte(output)) + if err != nil { + t.Fatalf("bad: %s\n\nerr: %s", tc.Input, err) + } + if !match { + t.Fatalf("bad: %s\n\n%s", tc.Input, output) + } + } +} + +func TestScriptPath_randSeed(t *testing.T) { + // Pre GH-4186 fix, this value was the deterministic start the pseudorandom + // chain of unseeded math/rand values for Int31(). + staticSeedPath := "/tmp/terraform_1298498081.sh" + c, err := New(cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("ssh"), + "host": cty.StringVal("127.0.0.1"), + })) + if err != nil { + t.Fatalf("err: %s", err) + } + path := c.ScriptPath() + if path == staticSeedPath { + t.Fatalf("rand not seeded! got: %s", path) + } +} + +var testClientPublicKey = `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDE6A1c4n+OtEPEFlNKTZf2i03L3NylSYmvmJ8OLmzLuPZmJBJt4G3VZ/60s1aKzwLKrTq20S+ONG4zvnK5zIPoauoNNdUJKbg944hB4OE+HDbrBhk7SH+YWCsCILBoSXwAVdUEic6FWf/SeqBSmTBySHvpuNOw16J+SK6Ardx8k64F2tRkZuC6AmOZijgKa/sQKjWAIVPk34ECM6OLfPc3kKUEfkdpYLvuMfuRMfSTlxn5lFC0b0SovK9aWfNMBH9iXLQkieQ5rXoyzUC7mwgnASgl8cqw1UrToiUuhvneduXBhbQfmC/Upv+tL6dSSk+0DlgVKEHuJmc8s8+/qpdL` + +func acceptUserPass(goodUser, goodPass string) func(ssh.ConnMetadata, []byte) (*ssh.Permissions, error) { + return func(c ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) { + if c.User() == goodUser && string(pass) == goodPass { + return nil, nil + } + return nil, fmt.Errorf("password rejected for %q", c.User()) + } +} + +func acceptPublicKey(keystr string) func(ssh.ConnMetadata, ssh.PublicKey) (*ssh.Permissions, error) { + return func(_ ssh.ConnMetadata, inkey ssh.PublicKey) (*ssh.Permissions, error) { + goodkey, _, _, _, err := ssh.ParseAuthorizedKey([]byte(keystr)) + if err != nil { + return nil, fmt.Errorf("error parsing key: %w", err) + } + + if bytes.Equal(inkey.Marshal(), goodkey.Marshal()) { + return nil, nil + } + + return nil, fmt.Errorf("public key rejected") + } +} diff --git a/pkg/communicator/ssh/http_proxy.go b/pkg/communicator/ssh/http_proxy.go new file mode 100644 index 00000000000..7c52ab34750 --- /dev/null +++ b/pkg/communicator/ssh/http_proxy.go @@ -0,0 +1,157 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ssh + +import ( + "bufio" + "fmt" + "net" + "net/http" + "net/url" + "time" + + "golang.org/x/net/proxy" +) + +// Dialer implements for SSH over HTTP Proxy. +type proxyDialer struct { + proxy proxyInfo + // forwarding Dialer + forward proxy.Dialer +} + +type proxyInfo struct { + // HTTP Proxy host or host:port + host string + // HTTP Proxy scheme + scheme string + // An immutable encapsulation of username and password details for a URL + userInfo *url.Userinfo +} + +func newProxyInfo(host, scheme, username, password string) *proxyInfo { + p := &proxyInfo{ + host: host, + scheme: scheme, + } + + p.userInfo = url.UserPassword(username, password) + + if p.scheme == "" { + p.scheme = "http" + } + + return p +} + +func (p *proxyInfo) url() *url.URL { + return &url.URL{ + Scheme: p.scheme, + User: p.userInfo, + Host: p.host, + } +} + +func (p *proxyDialer) Dial(network, addr string) (net.Conn, error) { + // Dial the proxy host + c, err := p.forward.Dial(network, p.proxy.host) + + if err != nil { + return nil, err + } + + err = c.SetDeadline(time.Now().Add(15 * time.Second)) + if err != nil { + return nil, err + } + + // Generate request URL to host accessed through the proxy + reqUrl := &url.URL{ + Scheme: "", + Host: addr, + } + + // Create a request object using the CONNECT method to instruct the proxy server to tunnel a protocol other than HTTP. + req, err := http.NewRequest("CONNECT", reqUrl.String(), nil) + if err != nil { + c.Close() + return nil, err + } + + // If http proxy requires authentication, configure settings for basic authentication. + if p.proxy.userInfo.String() != "" { + username := p.proxy.userInfo.Username() + password, _ := p.proxy.userInfo.Password() + req.SetBasicAuth(username, password) + req.Header.Add("Proxy-Authorization", req.Header.Get("Authorization")) + } + + // Do not close the connection after sending this request and reading its response. + req.Close = false + + // Writes the request in the form expected by an HTTP proxy. + err = req.Write(c) + if err != nil { + c.Close() + return nil, err + } + + res, err := http.ReadResponse(bufio.NewReader(c), req) + + if err != nil { + res.Body.Close() + c.Close() + return nil, err + } + + res.Body.Close() + + if res.StatusCode != http.StatusOK { + c.Close() + return nil, fmt.Errorf("Connection Error: StatusCode: %d", res.StatusCode) + } + + return c, nil +} + +// NewHttpProxyDialer generate Http Proxy Dialer +func newHttpProxyDialer(u *url.URL, forward proxy.Dialer) (proxy.Dialer, error) { + var proxyUserName, proxyPassword string + if u.User != nil { + proxyUserName = u.User.Username() + proxyPassword, _ = u.User.Password() + } + + pd := &proxyDialer{ + proxy: *newProxyInfo(u.Host, u.Scheme, proxyUserName, proxyPassword), + forward: forward, + } + + return pd, nil +} + +// RegisterDialerType register schemes used by `proxy.FromURL` +func RegisterDialerType() { + proxy.RegisterDialerType("http", newHttpProxyDialer) + proxy.RegisterDialerType("https", newHttpProxyDialer) +} + +// NewHttpProxyConn create a connection to connect through the proxy server. +func newHttpProxyConn(p *proxyInfo, targetAddr string) (net.Conn, error) { + pd, err := proxy.FromURL(p.url(), proxy.Direct) + + if err != nil { + return nil, err + } + + proxyConn, err := pd.Dial("tcp", targetAddr) + + if err != nil { + return nil, err + } + + return proxyConn, err +} diff --git a/pkg/communicator/ssh/password.go b/pkg/communicator/ssh/password.go new file mode 100644 index 00000000000..f285da37b32 --- /dev/null +++ b/pkg/communicator/ssh/password.go @@ -0,0 +1,33 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ssh + +import ( + "log" + + "golang.org/x/crypto/ssh" +) + +// An implementation of ssh.KeyboardInteractiveChallenge that simply sends +// back the password for all questions. The questions are logged. +func PasswordKeyboardInteractive(password string) ssh.KeyboardInteractiveChallenge { + return func(user, instruction string, questions []string, echos []bool) ([]string, error) { + log.Printf("Keyboard interactive challenge: ") + log.Printf("-- User: %s", user) + log.Printf("-- Instructions: %s", instruction) + for i, question := range questions { + log.Printf("-- Question %d: %s", i+1, question) + } + + // Just send the password back for all questions + answers := make([]string, len(questions)) + for i := range answers { + answers[i] = string(password) + } + + return answers, nil + } +} diff --git a/pkg/communicator/ssh/password_test.go b/pkg/communicator/ssh/password_test.go new file mode 100644 index 00000000000..0ba61f7eddf --- /dev/null +++ b/pkg/communicator/ssh/password_test.go @@ -0,0 +1,23 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ssh + +import ( + "reflect" + "testing" +) + +func TestPasswordKeybardInteractive_Challenge(t *testing.T) { + p := PasswordKeyboardInteractive("foo") + result, err := p("foo", "bar", []string{"one", "two"}, nil) + if err != nil { + t.Fatalf("err not nil: %s", err) + } + + if !reflect.DeepEqual(result, []string{"foo", "foo"}) { + t.Fatalf("invalid password: %#v", result) + } +} diff --git a/pkg/communicator/ssh/provisioner.go b/pkg/communicator/ssh/provisioner.go new file mode 100644 index 00000000000..c437745fa9c --- /dev/null +++ b/pkg/communicator/ssh/provisioner.go @@ -0,0 +1,597 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ssh + +import ( + "bytes" + "encoding/pem" + "errors" + "fmt" + "log" + "net" + "os" + "path/filepath" + "strings" + "time" + + "github.com/kubegems/opentofu/pkg/communicator/shared" + sshagent "github.com/xanzy/ssh-agent" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/gocty" + "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/agent" + "golang.org/x/crypto/ssh/knownhosts" +) + +const ( + // DefaultUser is used if there is no user given + DefaultUser = "root" + + // DefaultPort is used if there is no port given + DefaultPort = 22 + + // DefaultUnixScriptPath is used as the path to copy the file to + // for remote execution on unix if not provided otherwise. + DefaultUnixScriptPath = "/tmp/terraform_%RAND%.sh" + // DefaultWindowsScriptPath is used as the path to copy the file to + // for remote execution on windows if not provided otherwise. + DefaultWindowsScriptPath = "C:/windows/temp/terraform_%RAND%.cmd" + + // DefaultTimeout is used if there is no timeout given + DefaultTimeout = 5 * time.Minute + + // TargetPlatformUnix used for cleaner code, and is used if no target platform has been specified + TargetPlatformUnix = "unix" + //TargetPlatformWindows used for cleaner code + TargetPlatformWindows = "windows" +) + +// connectionInfo is decoded from the ConnInfo of the resource. These are the +// only keys we look at. If a PrivateKey is given, that is used instead +// of a password. +type connectionInfo struct { + User string + Password string + PrivateKey string + Certificate string + Host string + HostKey string + Port uint16 + Agent bool + ScriptPath string + TargetPlatform string + Timeout string + TimeoutVal time.Duration + + ProxyScheme string + ProxyHost string + ProxyPort uint16 + ProxyUserName string + ProxyUserPassword string + + BastionUser string + BastionPassword string + BastionPrivateKey string + BastionCertificate string + BastionHost string + BastionHostKey string + BastionPort uint16 + + AgentIdentity string +} + +// decodeConnInfo decodes the given cty.Value using the same behavior as the +// lgeacy mapstructure decoder in order to preserve as much of the existing +// logic as possible for compatibility. +func decodeConnInfo(v cty.Value) (*connectionInfo, error) { + connInfo := &connectionInfo{} + if v.IsNull() { + return connInfo, nil + } + + for k, v := range v.AsValueMap() { + if v.IsNull() { + continue + } + + switch k { + case "user": + connInfo.User = v.AsString() + case "password": + connInfo.Password = v.AsString() + case "private_key": + connInfo.PrivateKey = v.AsString() + case "certificate": + connInfo.Certificate = v.AsString() + case "host": + connInfo.Host = v.AsString() + case "host_key": + connInfo.HostKey = v.AsString() + case "port": + if err := gocty.FromCtyValue(v, &connInfo.Port); err != nil { + return nil, err + } + case "agent": + connInfo.Agent = v.True() + case "script_path": + connInfo.ScriptPath = v.AsString() + case "target_platform": + connInfo.TargetPlatform = v.AsString() + case "timeout": + connInfo.Timeout = v.AsString() + case "proxy_scheme": + connInfo.ProxyScheme = v.AsString() + case "proxy_host": + connInfo.ProxyHost = v.AsString() + case "proxy_port": + if err := gocty.FromCtyValue(v, &connInfo.ProxyPort); err != nil { + return nil, err + } + case "proxy_user_name": + connInfo.ProxyUserName = v.AsString() + case "proxy_user_password": + connInfo.ProxyUserPassword = v.AsString() + case "bastion_user": + connInfo.BastionUser = v.AsString() + case "bastion_password": + connInfo.BastionPassword = v.AsString() + case "bastion_private_key": + connInfo.BastionPrivateKey = v.AsString() + case "bastion_certificate": + connInfo.BastionCertificate = v.AsString() + case "bastion_host": + connInfo.BastionHost = v.AsString() + case "bastion_host_key": + connInfo.BastionHostKey = v.AsString() + case "bastion_port": + if err := gocty.FromCtyValue(v, &connInfo.BastionPort); err != nil { + return nil, err + } + case "agent_identity": + connInfo.AgentIdentity = v.AsString() + } + } + return connInfo, nil +} + +// parseConnectionInfo is used to convert the raw configuration into the +// *connectionInfo struct. +func parseConnectionInfo(v cty.Value) (*connectionInfo, error) { + v, err := shared.ConnectionBlockSupersetSchema.CoerceValue(v) + if err != nil { + return nil, err + } + + connInfo, err := decodeConnInfo(v) + if err != nil { + return nil, err + } + + // To default Agent to true, we need to check the raw string, since the + // decoded boolean can't represent "absence of config". + // + // And if SSH_AUTH_SOCK is not set, there's no agent to connect to, so we + // shouldn't try. + agent := v.GetAttr("agent") + if agent.IsNull() && os.Getenv("SSH_AUTH_SOCK") != "" { + connInfo.Agent = true + } + + if connInfo.User == "" { + connInfo.User = DefaultUser + } + + // Check if host is empty. + // Otherwise return error. + if connInfo.Host == "" { + return nil, fmt.Errorf("host for provisioner cannot be empty") + } + + // Format the host if needed. + // Needed for IPv6 support. + connInfo.Host = shared.IpFormat(connInfo.Host) + + if connInfo.Port == 0 { + connInfo.Port = DefaultPort + } + // Set default targetPlatform to unix if it's empty + if connInfo.TargetPlatform == "" { + connInfo.TargetPlatform = TargetPlatformUnix + } else if connInfo.TargetPlatform != TargetPlatformUnix && connInfo.TargetPlatform != TargetPlatformWindows { + return nil, fmt.Errorf("target_platform for provisioner has to be either %s or %s", TargetPlatformUnix, TargetPlatformWindows) + } + // Choose an appropriate default script path based on the target platform. There is no single + // suitable default script path which works on both UNIX and Windows targets. + if connInfo.ScriptPath == "" && connInfo.TargetPlatform == TargetPlatformUnix { + connInfo.ScriptPath = DefaultUnixScriptPath + } + if connInfo.ScriptPath == "" && connInfo.TargetPlatform == TargetPlatformWindows { + connInfo.ScriptPath = DefaultWindowsScriptPath + } + if connInfo.Timeout != "" { + connInfo.TimeoutVal = safeDuration(connInfo.Timeout, DefaultTimeout) + } else { + connInfo.TimeoutVal = DefaultTimeout + } + + // Default all bastion config attrs to their non-bastion counterparts + if connInfo.BastionHost != "" { + // Format the bastion host if needed. + // Needed for IPv6 support. + connInfo.BastionHost = shared.IpFormat(connInfo.BastionHost) + + if connInfo.BastionUser == "" { + connInfo.BastionUser = connInfo.User + } + if connInfo.BastionPassword == "" { + connInfo.BastionPassword = connInfo.Password + } + if connInfo.BastionPrivateKey == "" { + connInfo.BastionPrivateKey = connInfo.PrivateKey + } + if connInfo.BastionCertificate == "" { + connInfo.BastionCertificate = connInfo.Certificate + } + if connInfo.BastionPort == 0 { + connInfo.BastionPort = connInfo.Port + } + } + + return connInfo, nil +} + +// safeDuration returns either the parsed duration or a default value +func safeDuration(dur string, defaultDur time.Duration) time.Duration { + d, err := time.ParseDuration(dur) + if err != nil { + log.Printf("Invalid duration '%s', using default of %s", dur, defaultDur) + return defaultDur + } + return d +} + +// prepareSSHConfig is used to turn the *ConnectionInfo provided into a +// usable *SSHConfig for client initialization. +func prepareSSHConfig(connInfo *connectionInfo) (*sshConfig, error) { + sshAgent, err := connectToAgent(connInfo) + if err != nil { + return nil, err + } + + host := fmt.Sprintf("%s:%d", connInfo.Host, connInfo.Port) + + sshConf, err := buildSSHClientConfig(sshClientConfigOpts{ + user: connInfo.User, + host: host, + privateKey: connInfo.PrivateKey, + password: connInfo.Password, + hostKey: connInfo.HostKey, + certificate: connInfo.Certificate, + sshAgent: sshAgent, + }) + if err != nil { + return nil, err + } + + var p *proxyInfo + + if connInfo.ProxyHost != "" { + p = newProxyInfo( + fmt.Sprintf("%s:%d", connInfo.ProxyHost, connInfo.ProxyPort), + connInfo.ProxyScheme, + connInfo.ProxyUserName, + connInfo.ProxyUserPassword, + ) + } + + connectFunc := ConnectFunc("tcp", host, p) + + var bastionConf *ssh.ClientConfig + if connInfo.BastionHost != "" { + bastionHost := fmt.Sprintf("%s:%d", connInfo.BastionHost, connInfo.BastionPort) + + bastionConf, err = buildSSHClientConfig(sshClientConfigOpts{ + user: connInfo.BastionUser, + host: bastionHost, + privateKey: connInfo.BastionPrivateKey, + password: connInfo.BastionPassword, + hostKey: connInfo.HostKey, + certificate: connInfo.BastionCertificate, + sshAgent: sshAgent, + }) + if err != nil { + return nil, err + } + + connectFunc = BastionConnectFunc("tcp", bastionHost, bastionConf, "tcp", host, p) + } + + config := &sshConfig{ + config: sshConf, + connection: connectFunc, + sshAgent: sshAgent, + } + return config, nil +} + +type sshClientConfigOpts struct { + privateKey string + password string + sshAgent *sshAgent + certificate string + user string + host string + hostKey string +} + +func buildSSHClientConfig(opts sshClientConfigOpts) (*ssh.ClientConfig, error) { + hkCallback := ssh.InsecureIgnoreHostKey() + + if opts.hostKey != "" { + // The knownhosts package only takes paths to files, but tofu + // generally wants to handle config data in-memory. Rather than making + // the known_hosts file an exception, write out the data to a temporary + // file to create the HostKeyCallback. + tf, err := os.CreateTemp("", "tf-known_hosts") + if err != nil { + return nil, fmt.Errorf("failed to create temp known_hosts file: %w", err) + } + defer tf.Close() + defer os.RemoveAll(tf.Name()) + + // we mark this as a CA as well, but the host key fallback will still + // use it as a direct match if the remote host doesn't return a + // certificate. + if _, err := tf.WriteString(fmt.Sprintf("@cert-authority %s %s\n", opts.host, opts.hostKey)); err != nil { + return nil, fmt.Errorf("failed to write temp known_hosts file: %w", err) + } + tf.Sync() + + hkCallback, err = knownhosts.New(tf.Name()) + if err != nil { + return nil, err + } + } + + conf := &ssh.ClientConfig{ + HostKeyCallback: hkCallback, + User: opts.user, + } + + if opts.privateKey != "" { + if opts.certificate != "" { + log.Println("using client certificate for authentication") + + certSigner, err := signCertWithPrivateKey(opts.privateKey, opts.certificate) + if err != nil { + return nil, err + } + conf.Auth = append(conf.Auth, certSigner) + } else { + log.Println("using private key for authentication") + + pubKeyAuth, err := readPrivateKey(opts.privateKey) + if err != nil { + return nil, err + } + conf.Auth = append(conf.Auth, pubKeyAuth) + } + } + + if opts.password != "" { + conf.Auth = append(conf.Auth, ssh.Password(opts.password)) + conf.Auth = append(conf.Auth, ssh.KeyboardInteractive( + PasswordKeyboardInteractive(opts.password))) + } + + if opts.sshAgent != nil { + conf.Auth = append(conf.Auth, opts.sshAgent.Auth()) + } + + return conf, nil +} + +// Create a Cert Signer and return ssh.AuthMethod +func signCertWithPrivateKey(pk string, certificate string) (ssh.AuthMethod, error) { + rawPk, err := ssh.ParseRawPrivateKey([]byte(pk)) + if err != nil { + return nil, fmt.Errorf("failed to parse private key %q: %w", pk, err) + } + + pcert, _, _, _, err := ssh.ParseAuthorizedKey([]byte(certificate)) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate %q: %w", certificate, err) + } + + usigner, err := ssh.NewSignerFromKey(rawPk) + if err != nil { + return nil, fmt.Errorf("failed to create signer from raw private key %q: %w", rawPk, err) + } + + ucertSigner, err := ssh.NewCertSigner(pcert.(*ssh.Certificate), usigner) + if err != nil { + return nil, fmt.Errorf("failed to create cert signer %q: %w", usigner, err) + } + + return ssh.PublicKeys(ucertSigner), nil +} + +func readPrivateKey(pk string) (ssh.AuthMethod, error) { + // We parse the private key on our own first so that we can + // show a nicer error if the private key has a password. + block, _ := pem.Decode([]byte(pk)) + if block == nil { + return nil, errors.New("Failed to read ssh private key: no key found") + } + if block.Headers["Proc-Type"] == "4,ENCRYPTED" { + return nil, errors.New( + "Failed to read ssh private key: password protected keys are\n" + + "not supported. Please decrypt the key prior to use.") + } + + signer, err := ssh.ParsePrivateKey([]byte(pk)) + if err != nil { + return nil, fmt.Errorf("Failed to parse ssh private key: %w", err) + } + + return ssh.PublicKeys(signer), nil +} + +func connectToAgent(connInfo *connectionInfo) (*sshAgent, error) { + if !connInfo.Agent { + // No agent configured + return nil, nil + } + + agent, conn, err := sshagent.New() + if err != nil { + return nil, err + } + + // connection close is handled over in Communicator + return &sshAgent{ + agent: agent, + conn: conn, + id: connInfo.AgentIdentity, + }, nil + +} + +// A tiny wrapper around an agent.Agent to expose the ability to close its +// associated connection on request. +type sshAgent struct { + agent agent.Agent + conn net.Conn + id string +} + +func (a *sshAgent) Close() error { + if a.conn == nil { + return nil + } + + return a.conn.Close() +} + +// make an attempt to either read the identity file or find a corresponding +// public key file using the typical openssh naming convention. +// This returns the public key in wire format, or nil when a key is not found. +func findIDPublicKey(id string) []byte { + for _, d := range idKeyData(id) { + signer, err := ssh.ParsePrivateKey(d) + if err == nil { + log.Println("[DEBUG] parsed id private key") + pk := signer.PublicKey() + return pk.Marshal() + } + + // try it as a publicKey + pk, err := ssh.ParsePublicKey(d) + if err == nil { + log.Println("[DEBUG] parsed id public key") + return pk.Marshal() + } + + // finally try it as an authorized key + pk, _, _, _, err = ssh.ParseAuthorizedKey(d) + if err == nil { + log.Println("[DEBUG] parsed id authorized key") + return pk.Marshal() + } + } + + return nil +} + +// Try to read an id file using the id as the file path. Also read the .pub +// file if it exists, as the id file may be encrypted. Return only the file +// data read. We don't need to know what data came from which path, as we will +// try parsing each as a private key, a public key and an authorized key +// regardless. +func idKeyData(id string) [][]byte { + idPath, err := filepath.Abs(id) + if err != nil { + return nil + } + + var fileData [][]byte + + paths := []string{idPath} + + if !strings.HasSuffix(idPath, ".pub") { + paths = append(paths, idPath+".pub") + } + + for _, p := range paths { + d, err := os.ReadFile(p) + if err != nil { + log.Printf("[DEBUG] error reading %q: %s", p, err) + continue + } + log.Printf("[DEBUG] found identity data at %q", p) + fileData = append(fileData, d) + } + + return fileData +} + +// sortSigners moves a signer with an agent comment field matching the +// agent_identity to the head of the list when attempting authentication. This +// helps when there are more keys loaded in an agent than the host will allow +// attempts. +func (s *sshAgent) sortSigners(signers []ssh.Signer) { + if s.id == "" || len(signers) < 2 { + return + } + + // if we can locate the public key, either by extracting it from the id or + // locating the .pub file, then we can more easily determine an exact match + idPk := findIDPublicKey(s.id) + + // if we have a signer with a connect field that matches the id, send that + // first, otherwise put close matches at the front of the list. + head := 0 + for i := range signers { + pk := signers[i].PublicKey() + k, ok := pk.(*agent.Key) + if !ok { + continue + } + + // check for an exact match first + if bytes.Equal(pk.Marshal(), idPk) || s.id == k.Comment { + signers[0], signers[i] = signers[i], signers[0] + break + } + + // no exact match yet, move it to the front if it's close. The agent + // may have loaded as a full filepath, while the config refers to it by + // filename only. + if strings.HasSuffix(k.Comment, s.id) { + signers[head], signers[i] = signers[i], signers[head] + head++ + continue + } + } +} + +func (s *sshAgent) Signers() ([]ssh.Signer, error) { + signers, err := s.agent.Signers() + if err != nil { + return nil, err + } + + s.sortSigners(signers) + return signers, nil +} + +func (a *sshAgent) Auth() ssh.AuthMethod { + return ssh.PublicKeysCallback(a.Signers) +} + +func (a *sshAgent) ForwardToAgent(client *ssh.Client) error { + return agent.ForwardToAgent(client, a.agent) +} diff --git a/pkg/communicator/ssh/provisioner_test.go b/pkg/communicator/ssh/provisioner_test.go new file mode 100644 index 00000000000..826bf607c74 --- /dev/null +++ b/pkg/communicator/ssh/provisioner_test.go @@ -0,0 +1,231 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ssh + +import ( + "testing" + + "github.com/zclconf/go-cty/cty" +) + +func TestProvisioner_connInfo(t *testing.T) { + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("ssh"), + "user": cty.StringVal("root"), + "password": cty.StringVal("supersecret"), + "private_key": cty.StringVal("someprivatekeycontents"), + "certificate": cty.StringVal("somecertificate"), + "host": cty.StringVal("127.0.0.1"), + "port": cty.StringVal("22"), + "timeout": cty.StringVal("30s"), + "bastion_host": cty.StringVal("127.0.1.1"), + "bastion_port": cty.NumberIntVal(20022), + }) + + conf, err := parseConnectionInfo(v) + if err != nil { + t.Fatalf("err: %v", err) + } + + if conf.User != "root" { + t.Fatalf("bad: %v", conf) + } + if conf.Password != "supersecret" { + t.Fatalf("bad: %v", conf) + } + if conf.PrivateKey != "someprivatekeycontents" { + t.Fatalf("bad: %v", conf) + } + if conf.Certificate != "somecertificate" { + t.Fatalf("bad: %v", conf) + } + if conf.Host != "127.0.0.1" { + t.Fatalf("bad: %v", conf) + } + if conf.Port != 22 { + t.Fatalf("bad: %v", conf) + } + if conf.Timeout != "30s" { + t.Fatalf("bad: %v", conf) + } + if conf.ScriptPath != DefaultUnixScriptPath { + t.Fatalf("bad: %v", conf) + } + if conf.TargetPlatform != TargetPlatformUnix { + t.Fatalf("bad: %v", conf) + } + if conf.BastionHost != "127.0.1.1" { + t.Fatalf("bad: %v", conf) + } + if conf.BastionPort != 20022 { + t.Fatalf("bad: %v", conf) + } + if conf.BastionUser != "root" { + t.Fatalf("bad: %v", conf) + } + if conf.BastionPassword != "supersecret" { + t.Fatalf("bad: %v", conf) + } + if conf.BastionPrivateKey != "someprivatekeycontents" { + t.Fatalf("bad: %v", conf) + } +} + +func TestProvisioner_connInfoIpv6(t *testing.T) { + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("ssh"), + "user": cty.StringVal("root"), + "password": cty.StringVal("supersecret"), + "private_key": cty.StringVal("someprivatekeycontents"), + "host": cty.StringVal("::1"), + "port": cty.StringVal("22"), + "timeout": cty.StringVal("30s"), + "bastion_host": cty.StringVal("::1"), + }) + + conf, err := parseConnectionInfo(v) + if err != nil { + t.Fatalf("err: %v", err) + } + + if conf.Host != "[::1]" { + t.Fatalf("bad: %v", conf) + } + + if conf.BastionHost != "[::1]" { + t.Fatalf("bad %v", conf) + } +} + +func TestProvisioner_connInfoHostname(t *testing.T) { + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("ssh"), + "user": cty.StringVal("root"), + "password": cty.StringVal("supersecret"), + "private_key": cty.StringVal("someprivatekeycontents"), + "host": cty.StringVal("example.com"), + "port": cty.StringVal("22"), + "timeout": cty.StringVal("30s"), + "bastion_host": cty.StringVal("example.com"), + }) + + conf, err := parseConnectionInfo(v) + if err != nil { + t.Fatalf("err: %v", err) + } + + if conf.Host != "example.com" { + t.Fatalf("bad: %v", conf) + } + + if conf.BastionHost != "example.com" { + t.Fatalf("bad %v", conf) + } +} + +func TestProvisioner_connInfoEmptyHostname(t *testing.T) { + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("ssh"), + "user": cty.StringVal("root"), + "password": cty.StringVal("supersecret"), + "private_key": cty.StringVal("someprivatekeycontents"), + "port": cty.StringVal("22"), + "timeout": cty.StringVal("30s"), + }) + + _, err := parseConnectionInfo(v) + if err == nil { + t.Fatalf("bad: should not allow empty host") + } +} + +func TestProvisioner_connInfoProxy(t *testing.T) { + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("ssh"), + "user": cty.StringVal("root"), + "password": cty.StringVal("supersecret"), + "private_key": cty.StringVal("someprivatekeycontents"), + "host": cty.StringVal("example.com"), + "port": cty.StringVal("22"), + "timeout": cty.StringVal("30s"), + "proxy_scheme": cty.StringVal("http"), + "proxy_host": cty.StringVal("proxy.example.com"), + "proxy_port": cty.StringVal("80"), + "proxy_user_name": cty.StringVal("proxyuser"), + "proxy_user_password": cty.StringVal("proxyuser_password"), + }) + + conf, err := parseConnectionInfo(v) + if err != nil { + t.Fatalf("err: %v", err) + } + + if conf.Host != "example.com" { + t.Fatalf("bad: %v", conf) + } + + if conf.ProxyScheme != "http" { + t.Fatalf("bad: %v", conf) + } + + if conf.ProxyHost != "proxy.example.com" { + t.Fatalf("bad: %v", conf) + } + + if conf.ProxyPort != 80 { + t.Fatalf("bad: %v", conf) + } + + if conf.ProxyUserName != "proxyuser" { + t.Fatalf("bad: %v", conf) + } + + if conf.ProxyUserPassword != "proxyuser_password" { + t.Fatalf("bad: %v", conf) + } +} + +func TestProvisioner_stringBastionPort(t *testing.T) { + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("ssh"), + "user": cty.StringVal("root"), + "password": cty.StringVal("supersecret"), + "private_key": cty.StringVal("someprivatekeycontents"), + "host": cty.StringVal("example.com"), + "port": cty.StringVal("22"), + "timeout": cty.StringVal("30s"), + "bastion_host": cty.StringVal("example.com"), + "bastion_port": cty.StringVal("12345"), + }) + + conf, err := parseConnectionInfo(v) + if err != nil { + t.Fatalf("err: %v", err) + } + + if conf.BastionPort != 12345 { + t.Fatalf("bad %v", conf) + } +} + +func TestProvisioner_invalidPortNumber(t *testing.T) { + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("ssh"), + "user": cty.StringVal("root"), + "password": cty.StringVal("supersecret"), + "private_key": cty.StringVal("someprivatekeycontents"), + "host": cty.StringVal("example.com"), + "port": cty.NumberIntVal(123456789), + }) + + _, err := parseConnectionInfo(v) + if err == nil { + t.Fatalf("bad: should not allow invalid port number") + } + if got, want := err.Error(), "value must be a whole number, between 0 and 65535 inclusive"; got != want { + t.Errorf("unexpected error\n got: %s\nwant: %s", got, want) + } +} diff --git a/pkg/communicator/ssh/ssh_test.go b/pkg/communicator/ssh/ssh_test.go new file mode 100644 index 00000000000..9da4127bbd9 --- /dev/null +++ b/pkg/communicator/ssh/ssh_test.go @@ -0,0 +1,105 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ssh + +import ( + "bytes" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "os" + "path/filepath" + "testing" + + "golang.org/x/crypto/ssh" +) + +// verify that we can locate public key data +func TestFindKeyData(t *testing.T) { + // set up a test directory + td := t.TempDir() + cwd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + if err := os.Chdir(td); err != nil { + t.Fatal(err) + } + defer os.Chdir(cwd) + + id := "provisioner_id" + + pub := generateSSHKey(t, id) + pubData := pub.Marshal() + + // backup the pub file, and replace it with a broken file to ensure we + // extract the public key from the private key. + if err := os.Rename(id+".pub", "saved.pub"); err != nil { + t.Fatal(err) + } + if err := os.WriteFile(id+".pub", []byte("not a public key"), 0600); err != nil { + t.Fatal(err) + } + + foundData := findIDPublicKey(id) + if !bytes.Equal(foundData, pubData) { + t.Fatalf("public key %q does not match", foundData) + } + + // move the pub file back, and break the private key file to simulate an + // encrypted private key + if err := os.Rename("saved.pub", id+".pub"); err != nil { + t.Fatal(err) + } + + if err := os.WriteFile(id, []byte("encrypted private key"), 0600); err != nil { + t.Fatal(err) + } + + foundData = findIDPublicKey(id) + if !bytes.Equal(foundData, pubData) { + t.Fatalf("public key %q does not match", foundData) + } + + // check the file by path too + foundData = findIDPublicKey(filepath.Join(".", id)) + if !bytes.Equal(foundData, pubData) { + t.Fatalf("public key %q does not match", foundData) + } +} + +func generateSSHKey(t *testing.T, idFile string) ssh.PublicKey { + t.Helper() + + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + t.Fatal(err) + } + + privFile, err := os.OpenFile(idFile, os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + t.Fatal(err) + } + defer privFile.Close() + privPEM := &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)} + if err := pem.Encode(privFile, privPEM); err != nil { + t.Fatal(err) + } + + // generate and write public key + pub, err := ssh.NewPublicKey(&priv.PublicKey) + if err != nil { + t.Fatal(err) + } + + err = os.WriteFile(idFile+".pub", ssh.MarshalAuthorizedKey(pub), 0600) + if err != nil { + t.Fatal(err) + } + + return pub +} diff --git a/pkg/communicator/winrm/communicator.go b/pkg/communicator/winrm/communicator.go new file mode 100644 index 00000000000..e301a614740 --- /dev/null +++ b/pkg/communicator/winrm/communicator.go @@ -0,0 +1,207 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package winrm + +import ( + "fmt" + "io" + "log" + "math/rand" + "strconv" + "strings" + "time" + + "github.com/kubegems/opentofu/pkg/communicator/remote" + "github.com/kubegems/opentofu/pkg/provisioners" + "github.com/masterzen/winrm" + "github.com/packer-community/winrmcp/winrmcp" + "github.com/zclconf/go-cty/cty" +) + +// Communicator represents the WinRM communicator +type Communicator struct { + connInfo *connectionInfo + client *winrm.Client + endpoint *winrm.Endpoint + rand *rand.Rand +} + +// New creates a new communicator implementation over WinRM. +func New(v cty.Value) (*Communicator, error) { + connInfo, err := parseConnectionInfo(v) + if err != nil { + return nil, err + } + + endpoint := &winrm.Endpoint{ + Host: connInfo.Host, + Port: int(connInfo.Port), + HTTPS: connInfo.HTTPS, + Insecure: connInfo.Insecure, + Timeout: connInfo.TimeoutVal, + } + if len(connInfo.CACert) > 0 { + endpoint.CACert = []byte(connInfo.CACert) + } + + comm := &Communicator{ + connInfo: connInfo, + endpoint: endpoint, + // Seed our own rand source so that script paths are not deterministic + rand: rand.New(rand.NewSource(time.Now().UnixNano())), + } + + return comm, nil +} + +// Connect implementation of communicator.Communicator interface +func (c *Communicator) Connect(o provisioners.UIOutput) error { + // Set the client to nil since we'll (re)create it + c.client = nil + + params := winrm.DefaultParameters + params.Timeout = formatDuration(c.Timeout()) + if c.connInfo.NTLM { + params.TransportDecorator = func() winrm.Transporter { return &winrm.ClientNTLM{} } + } + + client, err := winrm.NewClientWithParameters( + c.endpoint, c.connInfo.User, c.connInfo.Password, params) + if err != nil { + return err + } + + if o != nil { + o.Output(fmt.Sprintf( + "Connecting to remote host via WinRM...\n"+ + " Host: %s\n"+ + " Port: %d\n"+ + " User: %s\n"+ + " Password: %t\n"+ + " HTTPS: %t\n"+ + " Insecure: %t\n"+ + " NTLM: %t\n"+ + " CACert: %t", + c.connInfo.Host, + c.connInfo.Port, + c.connInfo.User, + c.connInfo.Password != "", + c.connInfo.HTTPS, + c.connInfo.Insecure, + c.connInfo.NTLM, + c.connInfo.CACert != "", + )) + } + + log.Printf("[DEBUG] connecting to remote shell using WinRM") + shell, err := client.CreateShell() + if err != nil { + log.Printf("[ERROR] error creating shell: %s", err) + return err + } + + err = shell.Close() + if err != nil { + log.Printf("[ERROR] error closing shell: %s", err) + return err + } + + if o != nil { + o.Output("Connected!") + } + + c.client = client + + return nil +} + +// Disconnect implementation of communicator.Communicator interface +func (c *Communicator) Disconnect() error { + c.client = nil + return nil +} + +// Timeout implementation of communicator.Communicator interface +func (c *Communicator) Timeout() time.Duration { + return c.connInfo.TimeoutVal +} + +// ScriptPath implementation of communicator.Communicator interface +func (c *Communicator) ScriptPath() string { + return strings.Replace( + c.connInfo.ScriptPath, "%RAND%", + strconv.FormatInt(int64(c.rand.Int31()), 10), -1) +} + +// Start implementation of communicator.Communicator interface +func (c *Communicator) Start(rc *remote.Cmd) error { + rc.Init() + log.Printf("[DEBUG] starting remote command: %s", rc.Command) + + // TODO: make sure communicators always connect first, so we can get output + // from the connection. + if c.client == nil { + log.Println("[WARN] winrm client not connected, attempting to connect") + if err := c.Connect(nil); err != nil { + return err + } + } + + status, err := c.client.Run(rc.Command, rc.Stdout, rc.Stderr) + rc.SetExitStatus(status, err) + + return nil +} + +// Upload implementation of communicator.Communicator interface +func (c *Communicator) Upload(path string, input io.Reader) error { + wcp, err := c.newCopyClient() + if err != nil { + return err + } + log.Printf("[DEBUG] Uploading file to '%s'", path) + return wcp.Write(path, input) +} + +// UploadScript implementation of communicator.Communicator interface +func (c *Communicator) UploadScript(path string, input io.Reader) error { + return c.Upload(path, input) +} + +// UploadDir implementation of communicator.Communicator interface +func (c *Communicator) UploadDir(dst string, src string) error { + log.Printf("[DEBUG] Uploading dir '%s' to '%s'", src, dst) + wcp, err := c.newCopyClient() + if err != nil { + return err + } + return wcp.Copy(src, dst) +} + +func (c *Communicator) newCopyClient() (*winrmcp.Winrmcp, error) { + addr := fmt.Sprintf("%s:%d", c.endpoint.Host, c.endpoint.Port) + + config := winrmcp.Config{ + Auth: winrmcp.Auth{ + User: c.connInfo.User, + Password: c.connInfo.Password, + }, + Https: c.connInfo.HTTPS, + Insecure: c.connInfo.Insecure, + OperationTimeout: c.Timeout(), + MaxOperationsPerShell: 15, // lowest common denominator + } + + if c.connInfo.NTLM { + config.TransportDecorator = func() winrm.Transporter { return &winrm.ClientNTLM{} } + } + + if c.connInfo.CACert != "" { + config.CACertBytes = []byte(c.connInfo.CACert) + } + + return winrmcp.New(addr, &config) +} diff --git a/pkg/communicator/winrm/communicator_test.go b/pkg/communicator/winrm/communicator_test.go new file mode 100644 index 00000000000..ae3b79c86da --- /dev/null +++ b/pkg/communicator/winrm/communicator_test.go @@ -0,0 +1,223 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package winrm + +import ( + "bytes" + "io" + "regexp" + "strconv" + "testing" + + "github.com/dylanmei/winrmtest" + "github.com/kubegems/opentofu/pkg/communicator/remote" + "github.com/kubegems/opentofu/pkg/communicator/shared" + "github.com/zclconf/go-cty/cty" +) + +func newMockWinRMServer(t *testing.T) *winrmtest.Remote { + wrm := winrmtest.NewRemote() + + wrm.CommandFunc( + winrmtest.MatchText("echo foo"), + func(out, err io.Writer) int { + out.Write([]byte("foo")) + return 0 + }) + + wrm.CommandFunc( + winrmtest.MatchPattern(`^echo c29tZXRoaW5n >> ".*"$`), + func(out, err io.Writer) int { + return 0 + }) + + wrm.CommandFunc( + winrmtest.MatchPattern(`^powershell.exe -EncodedCommand .*$`), + func(out, err io.Writer) int { + return 0 + }) + + wrm.CommandFunc( + winrmtest.MatchText("powershell"), + func(out, err io.Writer) int { + return 0 + }) + + return wrm +} + +func TestStart(t *testing.T) { + wrm := newMockWinRMServer(t) + defer wrm.Close() + + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("winrm"), + "user": cty.StringVal("user"), + "password": cty.StringVal("pass"), + "host": cty.StringVal(wrm.Host), + "port": cty.StringVal(strconv.Itoa(wrm.Port)), + "timeout": cty.StringVal("30s"), + }) + + c, err := New(v) + if err != nil { + t.Fatalf("error creating communicator: %s", err) + } + + var cmd remote.Cmd + stdout := new(bytes.Buffer) + cmd.Command = "echo foo" + cmd.Stdout = stdout + + err = c.Start(&cmd) + if err != nil { + t.Fatalf("error executing remote command: %s", err) + } + cmd.Wait() + + if stdout.String() != "foo" { + t.Fatalf("bad command response: expected %q, got %q", "foo", stdout.String()) + } +} + +func TestUpload(t *testing.T) { + wrm := newMockWinRMServer(t) + defer wrm.Close() + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("winrm"), + "user": cty.StringVal("user"), + "password": cty.StringVal("pass"), + "host": cty.StringVal(wrm.Host), + "port": cty.StringVal(strconv.Itoa(wrm.Port)), + "timeout": cty.StringVal("30s"), + }) + + c, err := New(v) + if err != nil { + t.Fatalf("error creating communicator: %s", err) + } + + err = c.Connect(nil) + if err != nil { + t.Fatalf("error connecting communicator: %s", err) + } + defer c.Disconnect() + + err = c.Upload("C:/Temp/terraform.cmd", bytes.NewReader([]byte("something"))) + if err != nil { + t.Fatalf("error uploading file: %s", err) + } +} + +func TestScriptPath(t *testing.T) { + cases := []struct { + Input string + Pattern string + }{ + { + "/tmp/script.sh", + `^/tmp/script\.sh$`, + }, + { + "/tmp/script_%RAND%.sh", + `^/tmp/script_(\d+)\.sh$`, + }, + } + + for _, tc := range cases { + v := cty.ObjectVal(map[string]cty.Value{ + "host": cty.StringVal(""), + "type": cty.StringVal("winrm"), + "script_path": cty.StringVal(tc.Input), + }) + + comm, err := New(v) + if err != nil { + t.Fatalf("err: %s", err) + } + output := comm.ScriptPath() + + match, err := regexp.Match(tc.Pattern, []byte(output)) + if err != nil { + t.Fatalf("bad: %s\n\nerr: %s", tc.Input, err) + } + if !match { + t.Fatalf("bad: %s\n\n%s", tc.Input, output) + } + } +} + +func TestNoTransportDecorator(t *testing.T) { + wrm := newMockWinRMServer(t) + defer wrm.Close() + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("winrm"), + "user": cty.StringVal("user"), + "password": cty.StringVal("pass"), + "host": cty.StringVal(wrm.Host), + "port": cty.StringVal(strconv.Itoa(wrm.Port)), + "timeout": cty.StringVal("30s"), + }) + + c, err := New(v) + if err != nil { + t.Fatalf("error creating communicator: %s", err) + } + + err = c.Connect(nil) + if err != nil { + t.Fatalf("error connecting communicator: %s", err) + } + defer c.Disconnect() + + if c.client.TransportDecorator != nil { + t.Fatal("bad TransportDecorator: expected nil, got non-nil") + } +} + +func TestTransportDecorator(t *testing.T) { + wrm := newMockWinRMServer(t) + defer wrm.Close() + + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("winrm"), + "user": cty.StringVal("user"), + "password": cty.StringVal("pass"), + "host": cty.StringVal(wrm.Host), + "port": cty.StringVal(strconv.Itoa(wrm.Port)), + "use_ntlm": cty.StringVal("true"), + "timeout": cty.StringVal("30s"), + }) + + c, err := New(v) + if err != nil { + t.Fatalf("error creating communicator: %s", err) + } + + err = c.Connect(nil) + if err != nil { + t.Fatalf("error connecting communicator: %s", err) + } + defer c.Disconnect() + + if c.client.TransportDecorator == nil { + t.Fatal("bad TransportDecorator: expected non-nil, got nil") + } +} + +func TestScriptPath_randSeed(t *testing.T) { + // Pre GH-4186 fix, this value was the deterministic start the pseudorandom + // chain of unseeded math/rand values for Int31(). + staticSeedPath := "C:/Temp/terraform_1298498081.cmd" + c, err := New(cty.NullVal(shared.ConnectionBlockSupersetSchema.ImpliedType())) + if err != nil { + t.Fatalf("err: %s", err) + } + path := c.ScriptPath() + if path == staticSeedPath { + t.Fatalf("rand not seeded! got: %s", path) + } +} diff --git a/pkg/communicator/winrm/provisioner.go b/pkg/communicator/winrm/provisioner.go new file mode 100644 index 00000000000..a43121f5eb1 --- /dev/null +++ b/pkg/communicator/winrm/provisioner.go @@ -0,0 +1,174 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package winrm + +import ( + "fmt" + "log" + "path/filepath" + "strings" + "time" + + "github.com/kubegems/opentofu/pkg/communicator/shared" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/gocty" +) + +const ( + // DefaultUser is used if there is no user given + DefaultUser = "Administrator" + + // DefaultPort is used if there is no port given + DefaultPort = 5985 + + // DefaultHTTPSPort is used if there is no port given and HTTPS is true + DefaultHTTPSPort = 5986 + + // DefaultScriptPath is used as the path to copy the file to + // for remote execution if not provided otherwise. + DefaultScriptPath = "C:/Temp/terraform_%RAND%.cmd" + + // DefaultTimeout is used if there is no timeout given + DefaultTimeout = 5 * time.Minute +) + +// connectionInfo is decoded from the ConnInfo of the resource. These are the +// only keys we look at. If a KeyFile is given, that is used instead +// of a password. +type connectionInfo struct { + User string + Password string + Host string + Port uint16 + HTTPS bool + Insecure bool + NTLM bool `mapstructure:"use_ntlm"` + CACert string `mapstructure:"cacert"` + Timeout string + ScriptPath string `mapstructure:"script_path"` + TimeoutVal time.Duration `mapstructure:"-"` +} + +// decodeConnInfo decodes the given cty.Value using the same behavior as the +// lgeacy mapstructure decoder in order to preserve as much of the existing +// logic as possible for compatibility. +func decodeConnInfo(v cty.Value) (*connectionInfo, error) { + connInfo := &connectionInfo{} + if v.IsNull() { + return connInfo, nil + } + + for k, v := range v.AsValueMap() { + if v.IsNull() { + continue + } + + switch k { + case "user": + connInfo.User = v.AsString() + case "password": + connInfo.Password = v.AsString() + case "host": + connInfo.Host = v.AsString() + case "port": + if err := gocty.FromCtyValue(v, &connInfo.Port); err != nil { + return nil, err + } + case "https": + connInfo.HTTPS = v.True() + case "insecure": + connInfo.Insecure = v.True() + case "use_ntlm": + connInfo.NTLM = v.True() + case "cacert": + connInfo.CACert = v.AsString() + case "script_path": + connInfo.ScriptPath = v.AsString() + case "timeout": + connInfo.Timeout = v.AsString() + } + } + return connInfo, nil +} + +// parseConnectionInfo is used to convert the ConnInfo of the InstanceState into +// a ConnectionInfo struct +func parseConnectionInfo(v cty.Value) (*connectionInfo, error) { + v, err := shared.ConnectionBlockSupersetSchema.CoerceValue(v) + if err != nil { + return nil, err + } + + connInfo, err := decodeConnInfo(v) + if err != nil { + return nil, err + } + // Check on script paths which point to the default Windows TEMP folder because files + // which are put in there very early in the boot process could get cleaned/deleted + // before you had the change to execute them. + // + // TODO (SvH) Needs some more debugging to fully understand the exact sequence of events + // causing this... + if strings.HasPrefix(filepath.ToSlash(connInfo.ScriptPath), "C:/Windows/Temp") { + return nil, fmt.Errorf( + `Using the C:\Windows\Temp folder is not supported. Please use a different 'script_path'.`) + } + + if connInfo.User == "" { + connInfo.User = DefaultUser + } + + // Format the host if needed. + // Needed for IPv6 support. + connInfo.Host = shared.IpFormat(connInfo.Host) + + if connInfo.Port == 0 { + if connInfo.HTTPS { + connInfo.Port = DefaultHTTPSPort + } else { + connInfo.Port = DefaultPort + } + } + if connInfo.ScriptPath == "" { + connInfo.ScriptPath = DefaultScriptPath + } + if connInfo.Timeout != "" { + connInfo.TimeoutVal = safeDuration(connInfo.Timeout, DefaultTimeout) + } else { + connInfo.TimeoutVal = DefaultTimeout + } + + return connInfo, nil +} + +// safeDuration returns either the parsed duration or a default value +func safeDuration(dur string, defaultDur time.Duration) time.Duration { + d, err := time.ParseDuration(dur) + if err != nil { + log.Printf("Invalid duration '%s', using default of %s", dur, defaultDur) + return defaultDur + } + return d +} + +func formatDuration(duration time.Duration) string { + h := int(duration.Hours()) + m := int(duration.Minutes()) - h*60 + s := int(duration.Seconds()) - (h*3600 + m*60) + + res := "PT" + if h > 0 { + res = fmt.Sprintf("%s%dH", res, h) + } + if m > 0 { + res = fmt.Sprintf("%s%dM", res, m) + } + if s > 0 { + res = fmt.Sprintf("%s%dS", res, s) + } + + return res +} diff --git a/pkg/communicator/winrm/provisioner_test.go b/pkg/communicator/winrm/provisioner_test.go new file mode 100644 index 00000000000..39e5a995f11 --- /dev/null +++ b/pkg/communicator/winrm/provisioner_test.go @@ -0,0 +1,262 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package winrm + +import ( + "testing" + + "github.com/zclconf/go-cty/cty" +) + +func TestProvisioner_defaultHTTPSPort(t *testing.T) { + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("winrm"), + "user": cty.StringVal("Administrator"), + "password": cty.StringVal("supersecret"), + "host": cty.StringVal("127.0.0.1"), + "https": cty.True, + }) + + conf, err := parseConnectionInfo(v) + if err != nil { + t.Fatalf("err: %v", err) + } + if conf.Port != 5986 { + t.Fatalf("expected: %v: got: %v", 5986, conf) + } + if conf.HTTPS != true { + t.Fatalf("expected: %v: got: %v", true, conf) + } +} + +func TestProvisioner_connInfo(t *testing.T) { + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("winrm"), + "user": cty.StringVal("Administrator"), + "password": cty.StringVal("supersecret"), + "host": cty.StringVal("127.0.0.1"), + "port": cty.StringVal("5985"), + "https": cty.True, + "use_ntlm": cty.True, + "timeout": cty.StringVal("30s"), + }) + + conf, err := parseConnectionInfo(v) + if err != nil { + t.Fatalf("err: %v", err) + } + + if conf.User != "Administrator" { + t.Fatalf("expected: %v: got: %v", "Administrator", conf) + } + if conf.Password != "supersecret" { + t.Fatalf("expected: %v: got: %v", "supersecret", conf) + } + if conf.Host != "127.0.0.1" { + t.Fatalf("expected: %v: got: %v", "127.0.0.1", conf) + } + if conf.Port != 5985 { + t.Fatalf("expected: %v: got: %v", 5985, conf) + } + if conf.HTTPS != true { + t.Fatalf("expected: %v: got: %v", true, conf) + } + if conf.NTLM != true { + t.Fatalf("expected: %v: got: %v", true, conf) + } + if conf.Timeout != "30s" { + t.Fatalf("expected: %v: got: %v", "30s", conf) + } + if conf.ScriptPath != DefaultScriptPath { + t.Fatalf("expected: %v: got: %v", DefaultScriptPath, conf) + } +} + +func TestProvisioner_connInfoCACert(t *testing.T) { + caCert := ` +-----BEGIN CERTIFICATE----- +MIIDBjCCAe4CCQCGWwBmOiHQdTANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJB +VTETMBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0 +cyBQdHkgTHRkMB4XDTE2MDYyMTE2MzM0MVoXDTE3MDYyMTE2MzM0MVowRTELMAkG +A1UEBhMCQVUxEzARBgNVBAgTClNvbWUtU3RhdGUxITAfBgNVBAoTGEludGVybmV0 +IFdpZGdpdHMgUHR5IEx0ZDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +AL+LFlsCJG5txZp4yuu+lQnuUrgBXRG+irQqcTXlV91Bp5hpmRIyhnGCtWxxDBUL +xrh4WN3VV/0jDzKT976oLgOy3hj56Cdqf+JlZ1qgMN5bHB3mm3aVWnrnsLbBsfwZ +SEbk3Kht/cE1nK2toNVW+rznS3m+eoV3Zn/DUNwGlZr42hGNs6ETn2jURY78ETqR +mW47xvjf86eIo7vULHJaY6xyarPqkL8DZazOmvY06hUGvGwGBny7gugfXqDG+I8n +cPBsGJGSAmHmVV8o0RCB9UjY+TvSMQRpEDoVlvyrGuglsD8to/4+7UcsuDGlRYN6 +jmIOC37mOi/jwRfWL1YUa4MCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAPDxTH0oQ +JjKXoJgkmQxurB81RfnK/NrswJVzWbOv6ejcbhwh+/ZgJTMc15BrYcxU6vUW1V/i +Z7APU0qJ0icECACML+a2fRI7YdLCTiPIOmY66HY8MZHAn3dGjU5TeiUflC0n0zkP +mxKJe43kcYLNDItbfvUDo/GoxTXrC3EFVZyU0RhFzoVJdODlTHXMVFCzcbQEBrBJ +xKdShCEc8nFMneZcGFeEU488ntZoWzzms8/QpYrKa5S0Sd7umEU2Kwu4HTkvUFg/ +CqDUFjhydXxYRsxXBBrEiLOE5BdtJR1sH/QHxIJe23C9iHI2nS1NbLziNEApLwC4 +GnSud83VUo9G9w== +-----END CERTIFICATE----- +` + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("winrm"), + "user": cty.StringVal("Administrator"), + "password": cty.StringVal("supersecret"), + "host": cty.StringVal("127.0.0.1"), + "port": cty.StringVal("5985"), + "https": cty.True, + "timeout": cty.StringVal("30s"), + "cacert": cty.StringVal(caCert), + }) + + conf, err := parseConnectionInfo(v) + if err != nil { + t.Fatalf("err: %v", err) + } + + if conf.User != "Administrator" { + t.Fatalf("expected: %v: got: %v", "Administrator", conf) + } + if conf.Password != "supersecret" { + t.Fatalf("expected: %v: got: %v", "supersecret", conf) + } + if conf.Host != "127.0.0.1" { + t.Fatalf("expected: %v: got: %v", "127.0.0.1", conf) + } + if conf.Port != 5985 { + t.Fatalf("expected: %v: got: %v", 5985, conf) + } + if conf.HTTPS != true { + t.Fatalf("expected: %v: got: %v", true, conf) + } + if conf.Timeout != "30s" { + t.Fatalf("expected: %v: got: %v", "30s", conf) + } + if conf.ScriptPath != DefaultScriptPath { + t.Fatalf("expected: %v: got: %v", DefaultScriptPath, conf) + } + if conf.CACert != caCert { + t.Fatalf("expected: %v: got: %v", caCert, conf.CACert) + } +} + +func TestProvisioner_connInfoIpv6(t *testing.T) { + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("winrm"), + "user": cty.StringVal("Administrator"), + "password": cty.StringVal("supersecret"), + "host": cty.StringVal("::1"), + "port": cty.StringVal("5985"), + "https": cty.True, + "timeout": cty.StringVal("30s"), + }) + + conf, err := parseConnectionInfo(v) + if err != nil { + t.Fatalf("err: %v", err) + } + + if conf.User != "Administrator" { + t.Fatalf("expected: %v: got: %v", "Administrator", conf) + } + if conf.Password != "supersecret" { + t.Fatalf("expected: %v: got: %v", "supersecret", conf) + } + if conf.Host != "[::1]" { + t.Fatalf("expected: %v: got: %v", "[::1]", conf) + } + if conf.Port != 5985 { + t.Fatalf("expected: %v: got: %v", 5985, conf) + } + if conf.HTTPS != true { + t.Fatalf("expected: %v: got: %v", true, conf) + } + if conf.Timeout != "30s" { + t.Fatalf("expected: %v: got: %v", "30s", conf) + } + if conf.ScriptPath != DefaultScriptPath { + t.Fatalf("expected: %v: got: %v", DefaultScriptPath, conf) + } +} + +func TestProvisioner_connInfoHostname(t *testing.T) { + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("winrm"), + "user": cty.StringVal("Administrator"), + "password": cty.StringVal("supersecret"), + "host": cty.StringVal("example.com"), + "port": cty.StringVal("5985"), + "https": cty.True, + "timeout": cty.StringVal("30s"), + }) + + conf, err := parseConnectionInfo(v) + if err != nil { + t.Fatalf("err: %v", err) + } + + if conf.User != "Administrator" { + t.Fatalf("expected: %v: got: %v", "Administrator", conf) + } + if conf.Password != "supersecret" { + t.Fatalf("expected: %v: got: %v", "supersecret", conf) + } + if conf.Host != "example.com" { + t.Fatalf("expected: %v: got: %v", "example.com", conf) + } + if conf.Port != 5985 { + t.Fatalf("expected: %v: got: %v", 5985, conf) + } + if conf.HTTPS != true { + t.Fatalf("expected: %v: got: %v", true, conf) + } + if conf.Timeout != "30s" { + t.Fatalf("expected: %v: got: %v", "30s", conf) + } + if conf.ScriptPath != DefaultScriptPath { + t.Fatalf("expected: %v: got: %v", DefaultScriptPath, conf) + } +} + +func TestProvisioner_formatDuration(t *testing.T) { + cases := map[string]struct { + Config map[string]cty.Value + Result string + }{ + "testSeconds": { + Config: map[string]cty.Value{ + "timeout": cty.StringVal("90s"), + }, + + Result: "PT1M30S", + }, + "testMinutes": { + Config: map[string]cty.Value{ + "timeout": cty.StringVal("5m"), + }, + + Result: "PT5M", + }, + "testHours": { + Config: map[string]cty.Value{ + "timeout": cty.StringVal("1h"), + }, + + Result: "PT1H", + }, + } + + for name, tc := range cases { + // host is required in the schema + tc.Config["host"] = cty.StringVal("") + + conf, err := parseConnectionInfo(cty.ObjectVal(tc.Config)) + if err != nil { + t.Fatalf("err: %v", err) + } + + result := formatDuration(conf.TimeoutVal) + if result != tc.Result { + t.Fatalf("%s: expected: %s got: %s", name, tc.Result, result) + } + } +} diff --git a/pkg/configs/backend.go b/pkg/configs/backend.go new file mode 100644 index 00000000000..c5e90631707 --- /dev/null +++ b/pkg/configs/backend.go @@ -0,0 +1,99 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hcldec" + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/lang" + "github.com/zclconf/go-cty/cty" +) + +// Backend represents a "backend" block inside a "terraform" block in a module +// or file. +type Backend struct { + Type string + Config hcl.Body + Eval *StaticEvaluator + + TypeRange hcl.Range + DeclRange hcl.Range +} + +func decodeBackendBlock(block *hcl.Block) (*Backend, hcl.Diagnostics) { + return &Backend{ + Type: block.Labels[0], + TypeRange: block.LabelRanges[0], + Config: block.Body, + DeclRange: block.DefRange, + }, nil +} + +// Hash produces a hash value for the reciever that covers the type and the +// portions of the config that conform to the given schema. +// +// If the config does not conform to the schema then the result is not +// meaningful for comparison since it will be based on an incomplete result. +// +// As an exception, required attributes in the schema are treated as optional +// for the purpose of hashing, so that an incomplete configuration can still +// be hashed. Other errors, such as extraneous attributes, have no such special +// case. +func (b *Backend) Hash(schema *configschema.Block) (int, hcl.Diagnostics) { + // Don't fail if required attributes are not set. Instead, we'll just + // hash them as nulls. + schema = schema.NoneRequired() + + // This is a bit of an odd workaround, but the decode below intentionally ignores + // errors. I don't want to try to change that at this point, but it may be worth doing + // at some point. For now, I'm just looking to see if there are any references that are + // not valid that the user should look at, instead of just producing an invalid backend object. + diags := b.referenceDiagnostics(schema) + + val, _ := b.Decode(schema) + if val == cty.NilVal { + val = cty.UnknownVal(schema.ImpliedType()) + } + + toHash := cty.TupleVal([]cty.Value{ + cty.StringVal(b.Type), + val, + }) + + return toHash.Hash(), diags +} + +func (b *Backend) Decode(schema *configschema.Block) (cty.Value, hcl.Diagnostics) { + return b.Eval.DecodeBlock(b.Config, schema.DecoderSpec(), StaticIdentifier{ + Module: addrs.RootModule, + Subject: fmt.Sprintf("backend.%s", b.Type), + DeclRange: b.DeclRange, + }) +} + +// This is a hack that may not be needed, but preserves the idea that invalid backends will show a cryptic error about running init duing plan/apply startup. +func (b *Backend) referenceDiagnostics(schema *configschema.Block) hcl.Diagnostics { + var diags hcl.Diagnostics + + refs, refsDiags := lang.References(addrs.ParseRef, hcldec.Variables(b.Config, schema.DecoderSpec())) + diags = append(diags, refsDiags.ToHCL()...) + if diags.HasErrors() { + return diags + } + + _, ctxDiags := b.Eval.scope(StaticIdentifier{ + Module: addrs.RootModule, + Subject: fmt.Sprintf("backend.%s", b.Type), + DeclRange: b.DeclRange, + }).EvalContext(refs) + diags = append(diags, ctxDiags.ToHCL()...) + + return diags +} diff --git a/pkg/configs/checks.go b/pkg/configs/checks.go new file mode 100644 index 00000000000..21c73fd4f9f --- /dev/null +++ b/pkg/configs/checks.go @@ -0,0 +1,262 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/lang" +) + +// CheckRule represents a configuration-defined validation rule, precondition, +// or postcondition. Blocks of this sort can appear in a few different places +// in configuration, including "validation" blocks for variables, +// and "precondition" and "postcondition" blocks for resources. +type CheckRule struct { + // Condition is an expression that must evaluate to true if the condition + // holds or false if it does not. If the expression produces an error then + // that's considered to be a bug in the module defining the check. + // + // The available variables in a condition expression vary depending on what + // a check is attached to. For example, validation rules attached to + // input variables can only refer to the variable that is being validated. + Condition hcl.Expression + + // ErrorMessage should be one or more full sentences, which should be in + // English for consistency with the rest of the error message output but + // can in practice be in any language. The message should describe what is + // required for the condition to return true in a way that would make sense + // to a caller of the module. + // + // The error message expression has the same variables available for + // interpolation as the corresponding condition. + ErrorMessage hcl.Expression + + DeclRange hcl.Range +} + +// validateSelfReferences looks for references in the check rule matching the +// specified resource address, returning error diagnostics if such a reference +// is found. +func (cr *CheckRule) validateSelfReferences(checkType string, addr addrs.Resource) hcl.Diagnostics { + var diags hcl.Diagnostics + exprs := []hcl.Expression{ + cr.Condition, + cr.ErrorMessage, + } + for _, expr := range exprs { + if expr == nil { + continue + } + refs, _ := lang.References(addrs.ParseRef, expr.Variables()) + for _, ref := range refs { + var refAddr addrs.Resource + + switch rs := ref.Subject.(type) { + case addrs.Resource: + refAddr = rs + case addrs.ResourceInstance: + refAddr = rs.Resource + default: + continue + } + + if refAddr.Equal(addr) { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Invalid reference in %s", checkType), + Detail: fmt.Sprintf("Configuration for %s may not refer to itself.", addr.String()), + Subject: expr.Range().Ptr(), + }) + break + } + } + } + return diags +} + +// decodeCheckRuleBlock decodes the contents of the given block as a check rule. +// +// Unlike most of our "decode..." functions, this one can be applied to blocks +// of various types as long as their body structures are "check-shaped". The +// function takes the containing block only because some error messages will +// refer to its location, and the returned object's DeclRange will be the +// block's header. +func decodeCheckRuleBlock(block *hcl.Block, override bool) (*CheckRule, hcl.Diagnostics) { + var diags hcl.Diagnostics + cr := &CheckRule{ + DeclRange: block.DefRange, + } + + if override { + // For now we'll just forbid overriding check blocks, to simplify + // the initial design. If we can find a clear use-case for overriding + // checks in override files and there's a way to define it that + // isn't confusing then we could relax this. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Can't override %s blocks", block.Type), + Detail: fmt.Sprintf("Override files cannot override %q blocks.", block.Type), + Subject: cr.DeclRange.Ptr(), + }) + return cr, diags + } + + content, moreDiags := block.Body.Content(checkRuleBlockSchema) + diags = append(diags, moreDiags...) + + if attr, exists := content.Attributes["condition"]; exists { + cr.Condition = attr.Expr + + if len(cr.Condition.Variables()) == 0 { + // A condition expression that doesn't refer to any variable is + // pointless, because its result would always be a constant. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Invalid %s expression", block.Type), + Detail: "The condition expression must refer to at least one object from elsewhere in the configuration, or else its result would not be checking anything.", + Subject: cr.Condition.Range().Ptr(), + }) + } + } + + if attr, exists := content.Attributes["error_message"]; exists { + cr.ErrorMessage = attr.Expr + } + + return cr, diags +} + +var checkRuleBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "condition", + Required: true, + }, + { + Name: "error_message", + Required: true, + }, + }, +} + +// Check represents a configuration defined check block. +// +// A check block contains 0-1 data blocks, and 0-n assert blocks. The check +// block will load the data block, and execute the assert blocks as check rules +// during the plan and apply OpenTofu operations. +type Check struct { + Name string + + DataResource *Resource + Asserts []*CheckRule + + DeclRange hcl.Range +} + +func (c Check) Addr() addrs.Check { + return addrs.Check{ + Name: c.Name, + } +} + +func (c Check) Accessible(addr addrs.Referenceable) bool { + if check, ok := addr.(addrs.Check); ok { + return check.Equal(c.Addr()) + } + return false +} + +func decodeCheckBlock(block *hcl.Block, override bool) (*Check, hcl.Diagnostics) { + var diags hcl.Diagnostics + + check := &Check{ + Name: block.Labels[0], + DeclRange: block.DefRange, + } + + if override { + // For now we'll just forbid overriding check blocks, to simplify + // the initial design. If we can find a clear use-case for overriding + // checks in override files and there's a way to define it that + // isn't confusing then we could relax this. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Can't override check blocks", + Detail: "Override files cannot override check blocks.", + Subject: check.DeclRange.Ptr(), + }) + return check, diags + } + + content, moreDiags := block.Body.Content(checkBlockSchema) + diags = append(diags, moreDiags...) + + if !hclsyntax.ValidIdentifier(check.Name) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid check block name", + Detail: badIdentifierDetail, + Subject: &block.LabelRanges[0], + }) + } + + for _, block := range content.Blocks { + switch block.Type { + case "data": + + if check.DataResource != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Multiple data resource blocks", + Detail: fmt.Sprintf("This check block already has a data resource defined at %s.", check.DataResource.DeclRange.Ptr()), + Subject: block.DefRange.Ptr(), + }) + continue + } + + data, moreDiags := decodeDataBlock(block, override, true) + diags = append(diags, moreDiags...) + if !moreDiags.HasErrors() { + // Connect this data block back up to this check block. + data.Container = check + + // Finally, save the data block. + check.DataResource = data + } + case "assert": + assert, moreDiags := decodeCheckRuleBlock(block, override) + diags = append(diags, moreDiags...) + if !moreDiags.HasErrors() { + check.Asserts = append(check.Asserts, assert) + } + default: + panic(fmt.Sprintf("unhandled check nested block %q", block.Type)) + } + } + + if len(check.Asserts) == 0 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Zero assert blocks", + Detail: "Check blocks must have at least one assert block.", + Subject: check.DeclRange.Ptr(), + }) + } + + return check, diags +} + +var checkBlockSchema = &hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + {Type: "data", LabelNames: []string{"type", "name"}}, + {Type: "assert"}, + }, +} diff --git a/pkg/configs/cloud.go b/pkg/configs/cloud.go new file mode 100644 index 00000000000..18ab8091d4c --- /dev/null +++ b/pkg/configs/cloud.go @@ -0,0 +1,34 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "github.com/hashicorp/hcl/v2" +) + +// Cloud represents a "cloud" block inside a "terraform" block in a module +// or file. +type CloudConfig struct { + Config hcl.Body + eval *StaticEvaluator + + DeclRange hcl.Range +} + +func decodeCloudBlock(block *hcl.Block) (*CloudConfig, hcl.Diagnostics) { + return &CloudConfig{ + Config: block.Body, + DeclRange: block.DefRange, + }, nil +} + +func (c *CloudConfig) ToBackendConfig() Backend { + return Backend{ + Type: "cloud", + Config: c.Config, + Eval: c.eval, + } +} diff --git a/pkg/configs/compat_shim.go b/pkg/configs/compat_shim.go new file mode 100644 index 00000000000..5f929ab57bb --- /dev/null +++ b/pkg/configs/compat_shim.go @@ -0,0 +1,114 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/zclconf/go-cty/cty" +) + +// ------------------------------------------------------------------------- +// Functions in this file are compatibility shims intended to ease conversion +// from the old configuration loader. Any use of these functions that makes +// a change should generate a deprecation warning explaining to the user how +// to update their code for new patterns. +// +// Shims are particularly important for any patterns that have been widely +// documented in books, tutorials, etc. Users will still be starting from +// these examples and we want to help them adopt the latest patterns rather +// than leave them stranded. +// ------------------------------------------------------------------------- + +// shimTraversalInString takes any arbitrary expression and checks if it is +// a quoted string in the native syntax. If it _is_, then it is parsed as a +// traversal and re-wrapped into a synthetic traversal expression and a +// warning is generated. Otherwise, the given expression is just returned +// verbatim. +// +// This function has no effect on expressions from the JSON syntax, since +// traversals in strings are the required pattern in that syntax. +// +// If wantKeyword is set, the generated warning diagnostic will talk about +// keywords rather than references. The behavior is otherwise unchanged, and +// the caller remains responsible for checking that the result is indeed +// a keyword, e.g. using hcl.ExprAsKeyword. +func shimTraversalInString(expr hcl.Expression, wantKeyword bool) (hcl.Expression, hcl.Diagnostics) { + // ObjectConsKeyExpr is a special wrapper type used for keys on object + // constructors to deal with the fact that naked identifiers are normally + // handled as "bareword" strings rather than as variable references. Since + // we know we're interpreting as a traversal anyway (and thus it won't + // matter whether it's a string or an identifier) we can safely just unwrap + // here and then process whatever we find inside as normal. + if ocke, ok := expr.(*hclsyntax.ObjectConsKeyExpr); ok { + expr = ocke.Wrapped + } + + if !exprIsNativeQuotedString(expr) { + return expr, nil + } + + strVal, diags := expr.Value(nil) + if diags.HasErrors() || strVal.IsNull() || !strVal.IsKnown() { + // Since we're not even able to attempt a shim here, we'll discard + // the diagnostics we saw so far and let the caller's own error + // handling take care of reporting the invalid expression. + return expr, nil + } + + // The position handling here isn't _quite_ right because it won't + // take into account any escape sequences in the literal string, but + // it should be close enough for any error reporting to make sense. + srcRange := expr.Range() + startPos := srcRange.Start // copy + startPos.Column++ // skip initial quote + startPos.Byte++ // skip initial quote + + traversal, tDiags := hclsyntax.ParseTraversalAbs( + []byte(strVal.AsString()), + srcRange.Filename, + startPos, + ) + diags = append(diags, tDiags...) + + if wantKeyword { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "Quoted keywords are deprecated", + Detail: "In this context, keywords are expected literally rather than in quotes. OpenTofu 0.11 and earlier required quotes, but quoted keywords are now deprecated and will be removed in a future version of OpenTofu. Remove the quotes surrounding this keyword to silence this warning.", + Subject: &srcRange, + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "Quoted references are deprecated", + Detail: "In this context, references are expected literally rather than in quotes. OpenTofu 0.11 and earlier required quotes, but quoted references are now deprecated and will be removed in a future version of OpenTofu. Remove the quotes surrounding this reference to silence this warning.", + Subject: &srcRange, + }) + } + + return &hclsyntax.ScopeTraversalExpr{ + Traversal: traversal, + SrcRange: srcRange, + }, diags +} + +// shimIsIgnoreChangesStar returns true if the given expression seems to be +// a string literal whose value is "*". This is used to support a legacy +// form of ignore_changes = all . +// +// This function does not itself emit any diagnostics, so it's the caller's +// responsibility to emit a warning diagnostic when this function returns true. +func shimIsIgnoreChangesStar(expr hcl.Expression) bool { + val, valDiags := expr.Value(nil) + if valDiags.HasErrors() { + return false + } + if val.Type() != cty.String || val.IsNull() || !val.IsKnown() { + return false + } + return val.AsString() == "*" +} diff --git a/pkg/configs/config.go b/pkg/configs/config.go new file mode 100644 index 00000000000..299ccb45f86 --- /dev/null +++ b/pkg/configs/config.go @@ -0,0 +1,1190 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "fmt" + "log" + "sort" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/hcl/v2" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/depsfile" + "github.com/kubegems/opentofu/pkg/getproviders" +) + +// A Config is a node in the tree of modules within a configuration. +// +// The module tree is constructed by following ModuleCall instances recursively +// through the root module transitively into descendent modules. +// +// A module tree described in *this* package represents the static tree +// represented by configuration. During evaluation a static ModuleNode may +// expand into zero or more module instances depending on the use of count and +// for_each configuration attributes within each call. +type Config struct { + // RootModule points to the Config for the root module within the same + // module tree as this module. If this module _is_ the root module then + // this is self-referential. + Root *Config + + // ParentModule points to the Config for the module that directly calls + // this module. If this is the root module then this field is nil. + Parent *Config + + // Path is a sequence of module logical names that traverse from the root + // module to this config. Path is empty for the root module. + // + // This should only be used to display paths to the end-user in rare cases + // where we are talking about the static module tree, before module calls + // have been resolved. In most cases, an addrs.ModuleInstance describing + // a node in the dynamic module tree is better, since it will then include + // any keys resulting from evaluating "count" and "for_each" arguments. + Path addrs.Module + + // ChildModules points to the Config for each of the direct child modules + // called from this module. The keys in this map match the keys in + // Module.ModuleCalls. + Children map[string]*Config + + // Module points to the object describing the configuration for the + // various elements (variables, resources, etc) defined by this module. + Module *Module + + // CallRange is the source range for the header of the module block that + // requested this module. + // + // This field is meaningless for the root module, where its contents are undefined. + CallRange hcl.Range + + // SourceAddr is the source address that the referenced module was requested + // from, as specified in configuration. SourceAddrRaw is the same + // information, but as the raw string the user originally entered. + // + // These fields are meaningless for the root module, where their contents are undefined. + SourceAddr addrs.ModuleSource + SourceAddrRaw string + + // SourceAddrRange is the location in the configuration source where the + // SourceAddr value was set, for use in diagnostic messages. + // + // This field is meaningless for the root module, where its contents are undefined. + SourceAddrRange hcl.Range + + // Version is the specific version that was selected for this module, + // based on version constraints given in configuration. + // + // This field is nil if the module was loaded from a non-registry source, + // since versions are not supported for other sources. + // + // This field is meaningless for the root module, where it will always + // be nil. + Version *version.Version +} + +// ModuleRequirements represents the provider requirements for an individual +// module, along with references to any child modules. This is used to +// determine which modules require which providers. +type ModuleRequirements struct { + Name string + SourceAddr addrs.ModuleSource + SourceDir string + Requirements getproviders.Requirements + Children map[string]*ModuleRequirements + Tests map[string]*TestFileModuleRequirements +} + +// TestFileModuleRequirements maps the runs for a given test file to the module +// requirements for that run block. +type TestFileModuleRequirements struct { + Requirements getproviders.Requirements + Runs map[string]*ModuleRequirements +} + +// NewEmptyConfig constructs a single-node configuration tree with an empty +// root module. This is generally a pretty useless thing to do, so most callers +// should instead use BuildConfig. +func NewEmptyConfig() *Config { + ret := &Config{} + ret.Root = ret + ret.Children = make(map[string]*Config) + ret.Module = &Module{} + return ret +} + +// Depth returns the number of "hops" the receiver is from the root of its +// module tree, with the root module having a depth of zero. +func (c *Config) Depth() int { + ret := 0 + this := c + for this.Parent != nil { + ret++ + this = this.Parent + } + return ret +} + +// DeepEach calls the given function once for each module in the tree, starting +// with the receiver. +// +// A parent is always called before its children and children of a particular +// node are visited in lexicographic order by their names. +func (c *Config) DeepEach(cb func(c *Config)) { + cb(c) + + names := make([]string, 0, len(c.Children)) + for name := range c.Children { + names = append(names, name) + } + + for _, name := range names { + c.Children[name].DeepEach(cb) + } +} + +// AllModules returns a slice of all the receiver and all of its descendent +// nodes in the module tree, in the same order they would be visited by +// DeepEach. +func (c *Config) AllModules() []*Config { + var ret []*Config + c.DeepEach(func(c *Config) { + ret = append(ret, c) + }) + return ret +} + +// Descendent returns the descendent config that has the given path beneath +// the receiver, or nil if there is no such module. +// +// The path traverses the static module tree, prior to any expansion to handle +// count and for_each arguments. +// +// An empty path will just return the receiver, and is therefore pointless. +func (c *Config) Descendent(path addrs.Module) *Config { + current := c + for _, name := range path { + current = current.Children[name] + if current == nil { + return nil + } + } + return current +} + +// DescendentForInstance is like Descendent except that it accepts a path +// to a particular module instance in the dynamic module graph, returning +// the node from the static module graph that corresponds to it. +// +// All instances created by a particular module call share the same +// configuration, so the keys within the given path are disregarded. +func (c *Config) DescendentForInstance(path addrs.ModuleInstance) *Config { + current := c + for _, step := range path { + current = current.Children[step.Name] + if current == nil { + return nil + } + } + return current +} + +// EntersNewPackage returns true if this call is to an external module, either +// directly via a remote source address or indirectly via a registry source +// address. +// +// Other behaviors in OpenTofu may treat package crossings as a special +// situation, because that indicates that the caller and callee can change +// independently of one another and thus we should disallow using any features +// where the caller assumes anything about the callee other than its input +// variables, required provider configurations, and output values. +// +// It's not meaningful to ask if the Config representing the root module enters +// a new package because the root module is always outside of all module +// packages, and so this function will arbitrarily return false in that case. +func (c *Config) EntersNewPackage() bool { + return moduleSourceAddrEntersNewPackage(c.SourceAddr) +} + +// VerifyDependencySelections checks whether the given locked dependencies +// are acceptable for all of the version constraints reported in the +// configuration tree represented by the reciever. +// +// This function will errors only if any of the locked dependencies are out of +// range for corresponding constraints in the configuration. If there are +// multiple inconsistencies then it will attempt to describe as many of them +// as possible, rather than stopping at the first problem. +// +// It's typically the responsibility of "tofu init" to change the locked +// dependencies to conform with the configuration, and so +// VerifyDependencySelections is intended for other commands to check whether +// it did so correctly and to catch if anything has changed in configuration +// since the last "tofu init" which requires re-initialization. However, +// it's up to the caller to decide how to advise users recover from these +// errors, because the advise can vary depending on what operation the user +// is attempting. +func (c *Config) VerifyDependencySelections(depLocks *depsfile.Locks) []error { + var errs []error + + reqs, diags := c.ProviderRequirements() + if diags.HasErrors() { + // It should be very unusual to get here, but unfortunately we can + // end up here in some edge cases where the config loader doesn't + // process version constraint strings in exactly the same way as + // the requirements resolver. (See the addProviderRequirements method + // for more information.) + errs = append(errs, fmt.Errorf("failed to determine the configuration's provider requirements: %s", diags.Error())) + } + + for providerAddr, constraints := range reqs { + if !depsfile.ProviderIsLockable(providerAddr) { + continue // disregard builtin providers, and such + } + if depLocks != nil && depLocks.ProviderIsOverridden(providerAddr) { + // The "overridden" case is for unusual special situations like + // dev overrides, so we'll explicitly note it in the logs just in + // case we see bug reports with these active and it helps us + // understand why we ended up using the "wrong" plugin. + log.Printf("[DEBUG] Config.VerifyDependencySelections: skipping %s because it's overridden by a special configuration setting", providerAddr) + continue + } + + var lock *depsfile.ProviderLock + if depLocks != nil { // Should always be true in main code, but unfortunately sometimes not true in old tests that don't fill out arguments completely + lock = depLocks.Provider(providerAddr) + } + if lock == nil { + log.Printf("[TRACE] Config.VerifyDependencySelections: provider %s has no lock file entry to satisfy %q", providerAddr, getproviders.VersionConstraintsString(constraints)) + errs = append(errs, fmt.Errorf("provider %s: required by this configuration but no version is selected", providerAddr)) + continue + } + + selectedVersion := lock.Version() + allowedVersions := getproviders.MeetingConstraints(constraints) + log.Printf("[TRACE] Config.VerifyDependencySelections: provider %s has %s to satisfy %q", providerAddr, selectedVersion.String(), getproviders.VersionConstraintsString(constraints)) + if !allowedVersions.Has(selectedVersion) { + // The most likely cause of this is that the author of a module + // has changed its constraints, but this could also happen in + // some other unusual situations, such as the user directly + // editing the lock file to record something invalid. We'll + // distinguish those cases here in order to avoid the more + // specific error message potentially being a red herring in + // the edge-cases. + currentConstraints := getproviders.VersionConstraintsString(constraints) + lockedConstraints := getproviders.VersionConstraintsString(lock.VersionConstraints()) + switch { + case currentConstraints != lockedConstraints: + errs = append(errs, fmt.Errorf("provider %s: locked version selection %s doesn't match the updated version constraints %q", providerAddr, selectedVersion.String(), currentConstraints)) + default: + errs = append(errs, fmt.Errorf("provider %s: version constraints %q don't match the locked version selection %s", providerAddr, currentConstraints, selectedVersion.String())) + } + } + } + + // Return multiple errors in an arbitrary-but-deterministic order. + sort.Slice(errs, func(i, j int) bool { + return errs[i].Error() < errs[j].Error() + }) + + return errs +} + +// ProviderRequirements searches the full tree of modules under the receiver +// for both explicit and implicit dependencies on providers. +// +// The result is a full manifest of all of the providers that must be available +// in order to work with the receiving configuration. +// +// If the returned diagnostics includes errors then the resulting Requirements +// may be incomplete. +func (c *Config) ProviderRequirements() (getproviders.Requirements, hcl.Diagnostics) { + reqs := make(getproviders.Requirements) + diags := c.addProviderRequirements(reqs, true, true) + + return reqs, diags +} + +// ProviderRequirementsShallow searches only the direct receiver for explicit +// and implicit dependencies on providers. Descendant modules are ignored. +// +// If the returned diagnostics includes errors then the resulting Requirements +// may be incomplete. +func (c *Config) ProviderRequirementsShallow() (getproviders.Requirements, hcl.Diagnostics) { + reqs := make(getproviders.Requirements) + diags := c.addProviderRequirements(reqs, false, true) + + return reqs, diags +} + +// ProviderRequirementsByModule searches the full tree of modules under the +// receiver for both explicit and implicit dependencies on providers, +// constructing a tree where the requirements are broken out by module. +// +// If the returned diagnostics includes errors then the resulting Requirements +// may be incomplete. +func (c *Config) ProviderRequirementsByModule() (*ModuleRequirements, hcl.Diagnostics) { + reqs := make(getproviders.Requirements) + diags := c.addProviderRequirements(reqs, false, false) + + children := make(map[string]*ModuleRequirements) + for name, child := range c.Children { + childReqs, childDiags := child.ProviderRequirementsByModule() + childReqs.Name = name + children[name] = childReqs + diags = append(diags, childDiags...) + } + + tests := make(map[string]*TestFileModuleRequirements) + for name, test := range c.Module.Tests { + testReqs := &TestFileModuleRequirements{ + Requirements: make(getproviders.Requirements), + Runs: make(map[string]*ModuleRequirements), + } + + for _, provider := range test.Providers { + diags = append(diags, c.addProviderRequirementsFromProviderBlock(testReqs.Requirements, provider)...) + } + + for _, run := range test.Runs { + if run.ConfigUnderTest == nil { + continue + } + + runReqs, runDiags := run.ConfigUnderTest.ProviderRequirementsByModule() + runReqs.Name = run.Name + testReqs.Runs[run.Name] = runReqs + diags = append(diags, runDiags...) + } + + tests[name] = testReqs + } + + ret := &ModuleRequirements{ + SourceAddr: c.SourceAddr, + SourceDir: c.Module.SourceDir, + Requirements: reqs, + Children: children, + Tests: tests, + } + + return ret, diags +} + +// addProviderRequirements is the main part of the ProviderRequirements +// implementation, gradually mutating a shared requirements object to +// eventually return. If the recurse argument is true, the requirements will +// include all descendant modules; otherwise, only the specified module. +func (c *Config) addProviderRequirements(reqs getproviders.Requirements, recurse, tests bool) hcl.Diagnostics { + var diags hcl.Diagnostics + + // First we'll deal with the requirements directly in _our_ module... + if c.Module.ProviderRequirements != nil { + for _, providerReqs := range c.Module.ProviderRequirements.RequiredProviders { + fqn := providerReqs.Type + if _, ok := reqs[fqn]; !ok { + // We'll at least have an unconstrained dependency then, but might + // add to this in the loop below. + reqs[fqn] = nil + } + // The model of version constraints in this package is still the + // old one using a different upstream module to represent versions, + // so we'll need to shim that out here for now. The two parsers + // don't exactly agree in practice 🙄 so this might produce new errors. + // TODO: Use the new parser throughout this package so we can get the + // better error messages it produces in more situations. + constraints, err := getproviders.ParseVersionConstraints(providerReqs.Requirement.Required.String()) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid version constraint", + // The errors returned by ParseVersionConstraint already include + // the section of input that was incorrect, so we don't need to + // include that here. + Detail: fmt.Sprintf("Incorrect version constraint syntax: %s.", err.Error()), + Subject: providerReqs.Requirement.DeclRange.Ptr(), + }) + } + reqs[fqn] = append(reqs[fqn], constraints...) + } + } + + // Each resource in the configuration creates an *implicit* provider + // dependency, though we'll only record it if there isn't already + // an explicit dependency on the same provider. + for _, rc := range c.Module.ManagedResources { + fqn := rc.Provider + if _, exists := reqs[fqn]; exists { + // Explicit dependency already present + continue + } + reqs[fqn] = nil + } + for _, rc := range c.Module.DataResources { + fqn := rc.Provider + if _, exists := reqs[fqn]; exists { + // Explicit dependency already present + continue + } + reqs[fqn] = nil + } + for _, i := range c.Module.Import { + implied, err := addrs.ParseProviderPart(i.StaticTo.Resource.ImpliedProvider()) + if err == nil { + provider := c.Module.ImpliedProviderForUnqualifiedType(implied) + if _, exists := reqs[provider]; exists { + // Explicit dependency already present + continue + } + reqs[provider] = nil + } + // We don't return a diagnostic here, because the invalid address will + // have been caught elsewhere. + } + + // Import blocks that are generating config may also have a custom provider + // meta argument. Like the provider meta argument used in resource blocks, + // we use this opportunity to load any implicit providers. + // + // We'll also use this to validate that import blocks and targeted resource + // blocks agree on which provider they should be using. If they don't agree, + // this will be because the user has written explicit provider arguments + // that don't agree and we'll get them to fix it. + for _, i := range c.Module.Import { + if len(i.StaticTo.Module) > 0 { + // All provider information for imports into modules should come + // from the module block, so we don't need to load anything for + // import targets within modules. + continue + } + + if target, exists := c.Module.ManagedResources[i.StaticTo.String()]; exists { + // This means the information about the provider for this import + // should come from the resource block itself and not the import + // block. + // + // In general, we say that you shouldn't set the provider attribute + // on import blocks in this case. But to make config generation + // easier, we will say that if it is set in both places and it's the + // same then that is okay. + + if i.ProviderConfigRef != nil { + if target.ProviderConfigRef == nil { + // This means we have a provider specified in the import + // block and not in the resource block. This isn't the right + // way round so let's consider this a failure. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid import provider argument", + Detail: "The provider argument can only be specified in import blocks that will generate configuration.\n\nUse the provider argument in the target resource block to configure the provider for a resource with explicit provider configuration.", + Subject: i.ProviderDeclRange.Ptr(), + }) + continue + } + + if i.ProviderConfigRef.Name != target.ProviderConfigRef.Name || i.ProviderConfigRef.Alias != target.ProviderConfigRef.Alias { + // This means we have a provider specified in both the + // import block and the resource block, and they disagree. + // This is bad as OpenTofu now has different instructions + // about which provider to use. + // + // The general guidance is that only the resource should be + // specifying the provider as the import block provider + // attribute is just for generating config. So, let's just + // tell the user to only set the provider argument in the + // resource. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid import provider argument", + Detail: "The provider argument can only be specified in import blocks that will generate configuration.\n\nUse the provider argument in the target resource block to configure the provider for a resource with explicit provider configuration.", + Subject: i.ProviderDeclRange.Ptr(), + }) + continue + } + } + + // All the provider information should come from the target resource + // which has already been processed, so skip the rest of this + // processing. + continue + } + + // Otherwise we are generating config for the resource being imported, + // so all the provider information must come from this import block. + fqn := i.Provider + if _, exists := reqs[fqn]; exists { + // Explicit dependency already present + continue + } + reqs[fqn] = nil + } + + // "provider" block can also contain version constraints + for _, provider := range c.Module.ProviderConfigs { + moreDiags := c.addProviderRequirementsFromProviderBlock(reqs, provider) + diags = append(diags, moreDiags...) + } + + // We may have provider blocks and required_providers set in some testing + // files. + if tests { + for _, file := range c.Module.Tests { + for _, provider := range file.Providers { + moreDiags := c.addProviderRequirementsFromProviderBlock(reqs, provider) + diags = append(diags, moreDiags...) + } + + if recurse { + // Then we'll also look for requirements in testing modules. + for _, run := range file.Runs { + if run.ConfigUnderTest != nil { + moreDiags := run.ConfigUnderTest.addProviderRequirements(reqs, true, false) + diags = append(diags, moreDiags...) + } + } + } + } + } + + if recurse { + for _, childConfig := range c.Children { + moreDiags := childConfig.addProviderRequirements(reqs, true, false) + diags = append(diags, moreDiags...) + } + } + + return diags +} + +func (c *Config) addProviderRequirementsFromProviderBlock(reqs getproviders.Requirements, provider *Provider) hcl.Diagnostics { + var diags hcl.Diagnostics + + fqn := c.Module.ProviderForLocalConfig(addrs.LocalProviderConfig{LocalName: provider.Name}) + if _, ok := reqs[fqn]; !ok { + // We'll at least have an unconstrained dependency then, but might + // add to this in the loop below. + reqs[fqn] = nil + } + if provider.Version.Required != nil { + // The model of version constraints in this package is still the + // old one using a different upstream module to represent versions, + // so we'll need to shim that out here for now. The two parsers + // don't exactly agree in practice 🙄 so this might produce new errors. + // TODO: Use the new parser throughout this package so we can get the + // better error messages it produces in more situations. + constraints, err := getproviders.ParseVersionConstraints(provider.Version.Required.String()) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid version constraint", + // The errors returned by ParseVersionConstraint already include + // the section of input that was incorrect, so we don't need to + // include that here. + Detail: fmt.Sprintf("Incorrect version constraint syntax: %s.", err.Error()), + Subject: provider.Version.DeclRange.Ptr(), + }) + } + reqs[fqn] = append(reqs[fqn], constraints...) + } + + return diags +} + +// resolveProviderTypes walks through the providers in the module and ensures +// the true types are assigned based on the provider requirements for the +// module. +func (c *Config) resolveProviderTypes() map[string]addrs.Provider { + for _, child := range c.Children { + child.resolveProviderTypes() + } + + // collect the required_providers, and then add any missing default providers + providers := map[string]addrs.Provider{} + for name, p := range c.Module.ProviderRequirements.RequiredProviders { + providers[name] = p.Type + } + + // ensure all provider configs know their correct type + for _, p := range c.Module.ProviderConfigs { + addr, required := providers[p.Name] + if required { + p.providerType = addr + } else { + addr := addrs.NewDefaultProvider(p.Name) + p.providerType = addr + providers[p.Name] = addr + } + } + + // connect module call providers to the correct type + for _, mod := range c.Module.ModuleCalls { + for _, p := range mod.Providers { + if addr, known := providers[p.InParent.Name]; known { + p.InParent.providerType = addr + } + } + } + + // fill in parent module calls too + if c.Parent != nil { + for _, mod := range c.Parent.Module.ModuleCalls { + for _, p := range mod.Providers { + if addr, known := providers[p.InChild.Name]; known { + p.InChild.providerType = addr + } + } + } + } + + return providers +} + +// resolveProviderTypesForTests matches resolveProviderTypes except it uses +// the information from resolveProviderTypes to resolve the provider types for +// providers defined within the configs test files. +func (c *Config) resolveProviderTypesForTests(providers map[string]addrs.Provider) { + + for _, test := range c.Module.Tests { + + // testProviders contains the configuration blocks for all the providers + // defined by this test file. It is keyed by the name of the provider + // and the values are a slice of provider configurations which contains + // all the definitions of a named provider of which there can be + // multiple because of aliases. + testProviders := make(map[string][]*Provider) + for _, provider := range test.Providers { + testProviders[provider.Name] = append(testProviders[provider.Name], provider) + } + + // matchedProviders maps the names of providers from testProviders to + // the provider type we have identified for them so far. If during the + // course of resolving the types we find a run block is attempting to + // reuse a provider that has already been assigned a different type, + // then this is an error that we can raise now. + matchedProviders := make(map[string]addrs.Provider) + + // First, we primarily draw our provider types from the main + // configuration under test. The providers for the main configuration + // are provided to us in the argument. + + // We've now set provider types for all the providers required by the + // main configuration. But we can have modules with their own required + // providers referenced by the run blocks. We also have passed provider + // configs that can affect the types of providers when the names don't + // match, so we'll do that here. + + for _, run := range test.Runs { + + // If this run block is executing against our main configuration, we + // want to use the external providers passed in. If we are executing + // against a different module then we need to resolve the provider + // types for that first, and then use those providers. + providers := providers + if run.ConfigUnderTest != nil { + providers = run.ConfigUnderTest.resolveProviderTypes() + } + + // We now check to see what providers this run block is actually + // using, and we can then assign types back to the + + if len(run.Providers) > 0 { + // This provider is only using the subset of providers specified + // within the provider block. + + for _, p := range run.Providers { + addr, exists := providers[p.InChild.Name] + if !exists { + // If this provider wasn't explicitly defined in the + // target module, then we'll set it to the default. + addr = addrs.NewDefaultProvider(p.InChild.Name) + } + + // The child type is always just derived from the providers + // within the config this run block is using. + p.InChild.providerType = addr + + // If we have previously assigned a type to the provider + // for the parent reference, then we use that for the + // parent type. + if addr, exists := matchedProviders[p.InParent.Name]; exists { + p.InParent.providerType = addr + continue + } + + // Otherwise, we'll define the parent type based on the + // child and reference that backwards. + p.InParent.providerType = p.InChild.providerType + + if aliases, exists := testProviders[p.InParent.Name]; exists { + matchedProviders[p.InParent.Name] = p.InParent.providerType + for _, alias := range aliases { + alias.providerType = p.InParent.providerType + } + } + } + + } else { + // This provider is going to load all the providers it can using + // simple name matching. + + for name, addr := range providers { + + if _, exists := matchedProviders[name]; exists { + // Then we've already handled providers of this type + // previously. + continue + } + + if aliases, exists := testProviders[name]; exists { + // Then this provider has been defined within our test + // config. Let's give it the appropriate type. + matchedProviders[name] = addr + for _, alias := range aliases { + alias.providerType = addr + } + + continue + } + + // If we get here then it means we don't actually have a + // provider block for this provider name within our test + // file. This is fine, it just means we don't have to do + // anything and the test will use the default provider for + // that name. + + } + } + + } + + // Now, we've analysed all the test runs for this file. If any providers + // have not been claimed then we'll just give them the default provider + // for their name. + for name, aliases := range testProviders { + if _, exists := matchedProviders[name]; exists { + // Then this provider has a type already. + continue + } + + addr := addrs.NewDefaultProvider(name) + matchedProviders[name] = addr + + for _, alias := range aliases { + alias.providerType = addr + } + } + + } + +} + +// ProviderTypes returns the FQNs of each distinct provider type referenced +// in the receiving configuration. +// +// This is a helper for easily determining which provider types are required +// to fully interpret the configuration, though it does not include version +// information and so callers are expected to have already dealt with +// provider version selection in an earlier step and have identified suitable +// versions for each provider. +func (c *Config) ProviderTypes() []addrs.Provider { + // Ignore diagnostics here because they relate to version constraints + reqs, _ := c.ProviderRequirements() + + ret := make([]addrs.Provider, 0, len(reqs)) + for k := range reqs { + ret = append(ret, k) + } + sort.Slice(ret, func(i, j int) bool { + return ret[i].String() < ret[j].String() + }) + return ret +} + +// ResolveAbsProviderAddr returns the AbsProviderConfig represented by the given +// ProviderConfig address, which must not be nil or this method will panic. +// +// If the given address is already an AbsProviderConfig then this method returns +// it verbatim, and will always succeed. If it's a LocalProviderConfig then +// it will consult the local-to-FQN mapping table for the given module +// to find the absolute address corresponding to the given local one. +// +// The module address to resolve local addresses in must be given in the second +// argument, and must refer to a module that exists under the receiver or +// else this method will panic. +func (c *Config) ResolveAbsProviderAddr(addr addrs.ProviderConfig, inModule addrs.Module) addrs.AbsProviderConfig { + switch addr := addr.(type) { + + case addrs.AbsProviderConfig: + return addr + + case addrs.LocalProviderConfig: + // Find the descendent Config that contains the module that this + // local config belongs to. + mc := c.Descendent(inModule) + if mc == nil { + panic(fmt.Sprintf("ResolveAbsProviderAddr with non-existent module %s", inModule.String())) + } + + var provider addrs.Provider + if providerReq, exists := c.Module.ProviderRequirements.RequiredProviders[addr.LocalName]; exists { + provider = providerReq.Type + } else { + provider = addrs.ImpliedProviderForUnqualifiedType(addr.LocalName) + } + + return addrs.AbsProviderConfig{ + Module: inModule, + Provider: provider, + Alias: addr.Alias, + } + + default: + panic(fmt.Sprintf("cannot ResolveAbsProviderAddr(%v, ...)", addr)) + } + +} + +// ProviderForConfigAddr returns the FQN for a given addrs.ProviderConfig, first +// by checking for the provider in module.ProviderRequirements and falling +// back to addrs.NewDefaultProvider if it is not found. +func (c *Config) ProviderForConfigAddr(addr addrs.LocalProviderConfig) addrs.Provider { + if provider, exists := c.Module.ProviderRequirements.RequiredProviders[addr.LocalName]; exists { + return provider.Type + } + return c.ResolveAbsProviderAddr(addr, addrs.RootModule).Provider +} + +func (c *Config) CheckCoreVersionRequirements() hcl.Diagnostics { + var diags hcl.Diagnostics + + diags = diags.Extend(c.Module.CheckCoreVersionRequirements(c.Path, c.SourceAddr)) + + for _, c := range c.Children { + childDiags := c.CheckCoreVersionRequirements() + diags = diags.Extend(childDiags) + } + + return diags +} + +// TransformForTest prepares the config to execute the given test. +// +// This function directly edits the config that is to be tested, and returns a +// function that will reset the config back to its original state. +// +// Tests will call this before they execute, and then call the deferred function +// to reset the config before the next test. +func (c *Config) TransformForTest(run *TestRun, file *TestFile) (func(), hcl.Diagnostics) { + var diags hcl.Diagnostics + + // These transformation functions must be in sync of what is being transformed, + // currently all the functions operate on different fields of configuration. + transformFuncs := []func(*TestRun, *TestFile) (func(), hcl.Diagnostics){ + c.transformProviderConfigsForTest, + c.transformOverriddenResourcesForTest, + c.transformOverriddenModulesForTest, + } + + var resetFuncs []func() + + // We call each function to transform the configuration + // and gather transformation diags as well as reset functions. + for _, f := range transformFuncs { + resetFunc, moreDiags := f(run, file) + diags = append(diags, moreDiags...) + resetFuncs = append(resetFuncs, resetFunc) + } + + // Order of calls doesn't matter as long as transformation functions + // don't operate on the same set of fields. + return func() { + for _, f := range resetFuncs { + f() + } + }, diags +} + +func (c *Config) transformProviderConfigsForTest(run *TestRun, file *TestFile) (func(), hcl.Diagnostics) { + var diags hcl.Diagnostics + + // We need to override the provider settings. + // + // We can have a set of providers defined within the config, we can also + // have a set of providers defined within the test file. Then the run can + // also specify a set of overrides that tell OpenTofu exactly which + // providers from the test file to apply into the config. + // + // The process here is as follows: + // 1. Take all the providers in the original config keyed by name.alias, + // we call this `previous` + // 2. Copy them all into a new map, we call this `next`. + // 3a. If the run has configuration specifying provider overrides, we copy + // only the specified providers from the test file into `next`. While + // doing this we ensure to preserve the name and alias from the + // original config. + // 3b. If the run has no override configuration, we copy all the providers + // (including mocks) from the test file into `next`, overriding all providers + // with name collisions from the original config. + // 4. We then modify the original configuration so that the providers it + // holds are the combination specified by the original config, the test + // file and the run file. + // 5. We then return a function that resets the original config back to + // its original state. This can be called by the surrounding test once + // completed so future run blocks can safely execute. + + // First, initialise `previous` and `next`. `previous` contains a backup of + // the providers from the original config. `next` contains the set of + // providers that will be used by the test. `next` starts with the set of + // providers from the original config. + previous := c.Module.ProviderConfigs + next := make(map[string]*Provider) + for key, value := range previous { + next[key] = value + } + + if run != nil && len(run.Providers) > 0 { + // Then we'll only copy over and overwrite the specific providers asked + // for by this run block. + + for _, ref := range run.Providers { + + testProvider, ok := file.getTestProviderOrMock(ref.InParent.String()) + if !ok { + // Then this reference was invalid as we didn't have the + // specified provider in the parent. This should have been + // caught earlier in validation anyway so is unlikely to happen. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Missing provider definition for %s", ref.InParent.String()), + Detail: "This provider block references a provider definition that does not exist.", + Subject: ref.InParent.NameRange.Ptr(), + }) + continue + } + + next[ref.InChild.String()] = &Provider{ + Name: ref.InChild.Name, + NameRange: ref.InChild.NameRange, + Alias: ref.InChild.Alias, + AliasRange: ref.InChild.AliasRange, + Version: testProvider.Version, + Config: testProvider.Config, + DeclRange: testProvider.DeclRange, + IsMocked: testProvider.IsMocked, + MockResources: testProvider.MockResources, + } + + } + } else { + // Otherwise, let's copy over and overwrite all providers specified by + // the test file itself. + for key, provider := range file.Providers { + next[key] = provider + } + for _, mp := range file.MockProviders { + next[mp.moduleUniqueKey()] = &Provider{ + Name: mp.Name, + NameRange: mp.NameRange, + Alias: mp.Alias, + AliasRange: mp.AliasRange, + DeclRange: mp.DeclRange, + IsMocked: true, + MockResources: mp.MockResources, + } + } + } + + c.Module.ProviderConfigs = next + + return func() { + // Reset the original config within the returned function. + c.Module.ProviderConfigs = previous + }, diags +} + +func (c *Config) transformOverriddenResourcesForTest(run *TestRun, file *TestFile) (func(), hcl.Diagnostics) { + resources, diags := mergeOverriddenResources(run.OverrideResources, file.OverrideResources) + + // We want to pass override values to resources being overridden. + for _, overrideRes := range resources { + targetConfig := c.Root.Descendent(overrideRes.TargetParsed.Module) + if targetConfig == nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Module not found: %v", overrideRes.TargetParsed.Module), + Detail: "Target points to resource in undefined module. Please, ensure module exists.", + Subject: overrideRes.Target.SourceRange().Ptr(), + }) + continue + } + + res := targetConfig.Module.ResourceByAddr(overrideRes.TargetParsed.Resource) + if res == nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Resource not found: %v", overrideRes.TargetParsed), + Detail: "Target points to undefined resource. Please, ensure resource exists.", + Subject: overrideRes.Target.SourceRange().Ptr(), + }) + continue + } + + if res.Mode != overrideRes.Mode { + blockName, targetMode := blockNameOverrideResource, "data" + if overrideRes.Mode == addrs.DataResourceMode { + blockName, targetMode = blockNameOverrideData, "resource" + } + // It could be a warning, but for the sake of consistent UX let's make it an error + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Unsupported `%v` target in `%v` block", targetMode, blockName), + Detail: fmt.Sprintf("Target `%v` is `%v` block itself and cannot be overridden with `%v`.", + overrideRes.TargetParsed, targetMode, blockName), + Subject: overrideRes.Target.SourceRange().Ptr(), + }) + } + + res.IsOverridden = true + res.OverrideValues = overrideRes.Values + } + + return func() { + // Reset all the overridden resources. + for _, o := range run.OverrideResources { + m := c.Root.Descendent(o.TargetParsed.Module) + if m == nil { + continue + } + + res := m.Module.ResourceByAddr(o.TargetParsed.Resource) + if res == nil { + continue + } + + res.IsOverridden = false + res.OverrideValues = nil + } + }, diags +} + +func (c *Config) transformOverriddenModulesForTest(run *TestRun, file *TestFile) (func(), hcl.Diagnostics) { + modules, diags := mergeOverriddenModules(run.OverrideModules, file.OverrideModules) + + for _, overrideMod := range modules { + targetConfig := c.Root.Descendent(overrideMod.TargetParsed) + if targetConfig == nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Module not found: %v", overrideMod.TargetParsed), + Detail: "Target points to an undefined module. Please, ensure module exists.", + Subject: overrideMod.Target.SourceRange().Ptr(), + }) + continue + } + + for overrideKey := range overrideMod.Outputs { + if _, ok := targetConfig.Module.Outputs[overrideKey]; !ok { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: fmt.Sprintf("Output not found: %v", overrideKey), + Detail: "Specified output to override is not present in the module and will be ignored.", + Subject: overrideMod.Target.SourceRange().Ptr(), + }) + } + } + + targetConfig.Module.IsOverridden = true + + for key, output := range targetConfig.Module.Outputs { + output.IsOverridden = true + + // Override outputs are optional so it's okay to set IsOverridden with no OverrideValue. + if v, ok := overrideMod.Outputs[key]; ok { + output.OverrideValue = &v + } + } + } + + return func() { + for _, overrideMod := range run.OverrideModules { + targetConfig := c.Root.Descendent(overrideMod.TargetParsed) + if targetConfig == nil { + continue + } + + targetConfig.Module.IsOverridden = false + + for _, output := range targetConfig.Module.Outputs { + output.IsOverridden = false + output.OverrideValue = nil + } + } + }, diags +} + +func mergeOverriddenResources(runResources, fileResources []*OverrideResource) ([]*OverrideResource, hcl.Diagnostics) { + // resAddrsInRun is a unique set of resource addresses in run block. + // It's already validated for duplicates previously. + resAddrsInRun := make(map[string]struct{}) + for _, r := range runResources { + resAddrsInRun[r.TargetParsed.String()] = struct{}{} + } + + var diags hcl.Diagnostics + + resources := runResources + for _, r := range fileResources { + addr := r.TargetParsed.String() + + // Run and file override resources could have overlap + // so we warn user and proceed with the definition from the smaller scope. + if _, ok := resAddrsInRun[addr]; ok { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: fmt.Sprintf("Multiple `%v` blocks for the same address", r.getBlockName()), + Detail: fmt.Sprintf("`%v` is overridden in both global file and local run blocks. The declaration in global file block will be ignored.", addr), + Subject: r.Target.SourceRange().Ptr(), + }) + continue + } + + resources = append(resources, r) + } + + return resources, diags +} + +func mergeOverriddenModules(runModules, fileModules []*OverrideModule) ([]*OverrideModule, hcl.Diagnostics) { + // modAddrsInRun is a unique set of module addresses in run block. + // It's already validated for duplicates previously. + modAddrsInRun := make(map[string]struct{}) + for _, m := range runModules { + modAddrsInRun[m.TargetParsed.String()] = struct{}{} + } + + var diags hcl.Diagnostics + + modules := runModules + for _, m := range fileModules { + addr := m.TargetParsed.String() + + // Run and file override modules could have overlap + // so we warn user and proceed with the definition from the smaller scope. + if _, ok := modAddrsInRun[addr]; ok { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "Multiple `override_module` blocks for the same address", + Detail: fmt.Sprintf("Module `%v` is overridden in both global file and local run blocks. The declaration in global file block will be ignored.", addr), + Subject: m.Target.SourceRange().Ptr(), + }) + continue + } + + modules = append(modules, m) + } + + return modules, diags +} diff --git a/pkg/configs/config_build.go b/pkg/configs/config_build.go new file mode 100644 index 00000000000..fee6b41d25c --- /dev/null +++ b/pkg/configs/config_build.go @@ -0,0 +1,329 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "fmt" + "path/filepath" + "sort" + "strings" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/hcl/v2" + + "github.com/kubegems/opentofu/pkg/addrs" +) + +// BuildConfig constructs a Config from a root module by loading all of its +// descendent modules via the given ModuleWalker. +// +// The result is a module tree that has so far only had basic module- and +// file-level invariants validated. If the returned diagnostics contains errors, +// the returned module tree may be incomplete but can still be used carefully +// for static analysis. +func BuildConfig(root *Module, walker ModuleWalker) (*Config, hcl.Diagnostics) { + var diags hcl.Diagnostics + cfg := &Config{ + Module: root, + } + cfg.Root = cfg // Root module is self-referential. + cfg.Children, diags = buildChildModules(cfg, walker) + diags = append(diags, buildTestModules(cfg, walker)...) + + // Skip provider resolution if there are any errors, since the provider + // configurations themselves may not be valid. + if !diags.HasErrors() { + // Now that the config is built, we can connect the provider names to all + // the known types for validation. + providers := cfg.resolveProviderTypes() + cfg.resolveProviderTypesForTests(providers) + } + + diags = append(diags, validateProviderConfigs(nil, cfg, nil)...) + diags = append(diags, validateProviderConfigsForTests(cfg)...) + + return cfg, diags +} + +func buildTestModules(root *Config, walker ModuleWalker) hcl.Diagnostics { + var diags hcl.Diagnostics + + for name, file := range root.Module.Tests { + for _, run := range file.Runs { + if run.Module == nil { + continue + } + + // We want to make sure the path for the testing modules are unique + // so we create a dedicated path for them. + // + // Some examples: + // - file: main.tftest.hcl, run: setup - test.main.setup + // - file: tests/main.tftest.hcl, run: setup - test.tests.main.setup + + dir := filepath.Dir(name) + base := filepath.Base(name) + + path := addrs.Module{} + path = append(path, "test") + if dir != "." { + path = append(path, strings.Split(dir, "/")...) + } + path = append(path, strings.TrimSuffix(base, ".tftest.hcl"), run.Name) + req := ModuleRequest{ + Name: run.Name, + Path: path, + SourceAddr: run.Module.Source, + SourceAddrRange: run.Module.SourceDeclRange, + VersionConstraint: run.Module.Version, + Parent: root, + CallRange: run.Module.DeclRange, + } + + cfg, modDiags := loadModule(root, &req, walker) + diags = append(diags, modDiags...) + + if cfg != nil { + // To get the loader to work, we need to set a bunch of values + // (like the name, path, and parent) as if the module was being + // loaded as a child of the root config. + // + // In actuality, when this is executed it will be as if the + // module was the root. So, we'll post-process some things to + // get it to behave as expected later. + + // First, update the main module for this test run to behave as + // if it is the root module. + cfg.Parent = nil + + // Then we need to update the paths for this config and all + // children, so they think they are all relative to the root + // module we just created. + rebaseChildModule(cfg, cfg) + + // Finally, link the new config back into our test run so + // it can be retrieved later. + run.ConfigUnderTest = cfg + } + } + } + + return diags +} + +func buildChildModules(parent *Config, walker ModuleWalker) (map[string]*Config, hcl.Diagnostics) { + var diags hcl.Diagnostics + ret := map[string]*Config{} + + calls := parent.Module.ModuleCalls + + // We'll sort the calls by their local names so that they'll appear in a + // predictable order in any logging that's produced during the walk. + callNames := make([]string, 0, len(calls)) + for k := range calls { + callNames = append(callNames, k) + } + sort.Strings(callNames) + + for _, callName := range callNames { + call := calls[callName] + path := make([]string, len(parent.Path)+1) + copy(path, parent.Path) + path[len(path)-1] = call.Name + + req := ModuleRequest{ + Name: call.Name, + Path: path, + SourceAddr: call.SourceAddr, + VersionConstraint: call.Version, + Parent: parent, + CallRange: call.DeclRange, + Call: NewStaticModuleCall(path, call.Variables, parent.Root.Module.SourceDir, call.Workspace), + } + if call.Source != nil { + // Invalid modules sometimes have a nil source field which is handled through loadModule below + req.SourceAddrRange = call.Source.Range() + } + child, modDiags := loadModule(parent.Root, &req, walker) + diags = append(diags, modDiags...) + if child == nil { + // This means an error occurred, there should be diagnostics within + // modDiags for this. + continue + } + + ret[call.Name] = child + } + + return ret, diags +} + +func loadModule(root *Config, req *ModuleRequest, walker ModuleWalker) (*Config, hcl.Diagnostics) { + var diags hcl.Diagnostics + + mod, ver, modDiags := walker.LoadModule(req) + diags = append(diags, modDiags...) + if mod == nil { + // nil can be returned if the source address was invalid and so + // nothing could be loaded whatsoever. LoadModule should've + // returned at least one error diagnostic in that case. + return nil, diags + } + + cfg := &Config{ + Parent: req.Parent, + Root: root, + Path: req.Path, + Module: mod, + CallRange: req.CallRange, + SourceAddr: req.SourceAddr, + SourceAddrRange: req.SourceAddrRange, + Version: ver, + } + + cfg.Children, modDiags = buildChildModules(cfg, walker) + diags = append(diags, modDiags...) + + if mod.Backend != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "Backend configuration ignored", + Detail: "Any selected backend applies to the entire configuration, so OpenTofu expects provider configurations only in the root module.\n\nThis is a warning rather than an error because it's sometimes convenient to temporarily call a root module as a child module for testing purposes, but this backend configuration block will have no effect.", + Subject: mod.Backend.DeclRange.Ptr(), + }) + } + + if len(mod.Import) > 0 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid import configuration", + Detail: fmt.Sprintf("An import block was detected in %q. Import blocks are only allowed in the root module.", cfg.Path), + Subject: mod.Import[0].DeclRange.Ptr(), + }) + } + + return cfg, diags +} + +// rebaseChildModule updates cfg to make it act as if root is the base of the +// module tree. +// +// This is used for modules loaded directly from test files. In order to load +// them properly, and reuse the code for loading modules from normal +// configuration files, we pretend they are children of the main configuration +// object. Later, when it comes time for them to execute they will act as if +// they are the root module directly. +// +// This function updates cfg so that it treats the provided root as the actual +// root of this module tree. It then recurses into all the child modules and +// does the same for them. +func rebaseChildModule(cfg *Config, root *Config) { + for _, child := range cfg.Children { + rebaseChildModule(child, root) + } + + cfg.Path = cfg.Path[len(root.Path):] + cfg.Root = root +} + +// A ModuleWalker knows how to find and load a child module given details about +// the module to be loaded and a reference to its partially-loaded parent +// Config. +type ModuleWalker interface { + // LoadModule finds and loads a requested child module. + // + // If errors are detected during loading, implementations should return them + // in the diagnostics object. If the diagnostics object contains any errors + // then the caller will tolerate the returned module being nil or incomplete. + // If no errors are returned, it should be non-nil and complete. + // + // Full validation need not have been performed but an implementation should + // ensure that the basic file- and module-validations performed by the + // LoadConfigDir function (valid syntax, no namespace collisions, etc) have + // been performed before returning a module. + LoadModule(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) +} + +// ModuleWalkerFunc is an implementation of ModuleWalker that directly wraps +// a callback function, for more convenient use of that interface. +type ModuleWalkerFunc func(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) + +// LoadModule implements ModuleWalker. +func (f ModuleWalkerFunc) LoadModule(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) { + return f(req) +} + +// ModuleRequest is used with the ModuleWalker interface to describe a child +// module that must be loaded. +type ModuleRequest struct { + // Name is the "logical name" of the module call within configuration. + // This is provided in case the name is used as part of a storage key + // for the module, but implementations must otherwise treat it as an + // opaque string. It is guaranteed to have already been validated as an + // HCL identifier and UTF-8 encoded. + Name string + + // Path is a list of logical names that traverse from the root module to + // this module. This can be used, for example, to form a lookup key for + // each distinct module call in a configuration, allowing for multiple + // calls with the same name at different points in the tree. + Path addrs.Module + + // SourceAddr is the source address string provided by the user in + // configuration. + SourceAddr addrs.ModuleSource + + // SourceAddrRange is the source range for the SourceAddr value as it + // was provided in configuration. This can and should be used to generate + // diagnostics about the source address having invalid syntax, referring + // to a non-existent object, etc. + SourceAddrRange hcl.Range + + // VersionConstraint is the version constraint applied to the module in + // configuration. This data structure includes the source range for + // the constraint, which can and should be used to generate diagnostics + // about constraint-related issues, such as constraints that eliminate all + // available versions of a module whose source is otherwise valid. + VersionConstraint VersionConstraint + + // Parent is the partially-constructed module tree node that the loaded + // module will be added to. Callers may refer to any field of this + // structure except Children, which is still under construction when + // ModuleRequest objects are created and thus has undefined content. + // The main reason this is provided is so that full module paths can + // be constructed for uniqueness. + Parent *Config + + // CallRange is the source range for the header of the "module" block + // in configuration that prompted this request. This can be used as the + // subject of an error diagnostic that relates to the module call itself, + // rather than to either its source address or its version number. + CallRange hcl.Range + + // This is where variables and other information from the calling module + // are propogated to the child module for use in the static evaluator + Call StaticModuleCall +} + +// DisabledModuleWalker is a ModuleWalker that doesn't support +// child modules at all, and so will return an error if asked to load one. +// +// This is provided primarily for testing. There is no good reason to use this +// in the main application. +var DisabledModuleWalker ModuleWalker + +func init() { + DisabledModuleWalker = ModuleWalkerFunc(func(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) { + return nil, nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Child modules are not supported", + Detail: "Child module calls are not allowed in this context.", + Subject: &req.CallRange, + }, + } + }) +} diff --git a/pkg/configs/config_build_test.go b/pkg/configs/config_build_test.go new file mode 100644 index 00000000000..256246bfce3 --- /dev/null +++ b/pkg/configs/config_build_test.go @@ -0,0 +1,431 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "fmt" + "os" + "path" + "path/filepath" + "reflect" + "sort" + "strings" + "testing" + + "github.com/davecgh/go-spew/spew" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/hcl/v2" +) + +func TestBuildConfig(t *testing.T) { + parser := NewParser(nil) + mod, diags := parser.LoadConfigDir("testdata/config-build", RootModuleCallForTesting()) + assertNoDiagnostics(t, diags) + if mod == nil { + t.Fatal("got nil root module; want non-nil") + } + + versionI := 0 + cfg, diags := BuildConfig(mod, ModuleWalkerFunc( + func(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) { + // For the sake of this test we're going to just treat our + // SourceAddr as a path relative to our fixture directory. + // A "real" implementation of ModuleWalker should accept the + // various different source address syntaxes OpenTofu supports. + sourcePath := filepath.Join("testdata/config-build", req.SourceAddr.String()) + + mod, modDiags := parser.LoadConfigDir(sourcePath, req.Call) + version, _ := version.NewVersion(fmt.Sprintf("1.0.%d", versionI)) + versionI++ + return mod, version, modDiags + }, + )) + assertNoDiagnostics(t, diags) + if cfg == nil { + t.Fatal("got nil config; want non-nil") + } + + var got []string + cfg.DeepEach(func(c *Config) { + got = append(got, fmt.Sprintf("%s %s", strings.Join(c.Path, "."), c.Version)) + }) + sort.Strings(got) + want := []string{ + " ", + "child_a 1.0.0", + "child_a.child_c 1.0.1", + "child_b 1.0.2", + "child_b.child_c 1.0.3", + } + + if !reflect.DeepEqual(got, want) { + t.Fatalf("wrong result\ngot: %swant: %s", spew.Sdump(got), spew.Sdump(want)) + } + + if _, exists := cfg.Children["child_a"].Children["child_c"].Module.Outputs["hello"]; !exists { + t.Fatalf("missing output 'hello' in child_a.child_c") + } + if _, exists := cfg.Children["child_b"].Children["child_c"].Module.Outputs["hello"]; !exists { + t.Fatalf("missing output 'hello' in child_b.child_c") + } + if cfg.Children["child_a"].Children["child_c"].Module == cfg.Children["child_b"].Children["child_c"].Module { + t.Fatalf("child_a.child_c is same object as child_b.child_c; should not be") + } +} + +func TestBuildConfigDiags(t *testing.T) { + parser := NewParser(nil) + mod, diags := parser.LoadConfigDir("testdata/nested-errors", RootModuleCallForTesting()) + assertNoDiagnostics(t, diags) + if mod == nil { + t.Fatal("got nil root module; want non-nil") + } + + versionI := 0 + cfg, diags := BuildConfig(mod, ModuleWalkerFunc( + func(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) { + // For the sake of this test we're going to just treat our + // SourceAddr as a path relative to our fixture directory. + // A "real" implementation of ModuleWalker should accept the + // various different source address syntaxes OpenTofu supports. + sourcePath := filepath.Join("testdata/nested-errors", req.SourceAddr.String()) + + mod, modDiags := parser.LoadConfigDir(sourcePath, req.Call) + version, _ := version.NewVersion(fmt.Sprintf("1.0.%d", versionI)) + versionI++ + return mod, version, modDiags + }, + )) + + wantDiag := filepath.FromSlash(`testdata/nested-errors/child_c/child_c.tf:5,1-8: `) + + `Unsupported block type; Blocks of type "invalid" are not expected here.` + assertExactDiagnostics(t, diags, []string{wantDiag}) + + // we should still have module structure loaded + var got []string + cfg.DeepEach(func(c *Config) { + got = append(got, fmt.Sprintf("%s %s", strings.Join(c.Path, "."), c.Version)) + }) + sort.Strings(got) + want := []string{ + " ", + "child_a 1.0.0", + "child_a.child_c 1.0.1", + } + + if !reflect.DeepEqual(got, want) { + t.Fatalf("wrong result\ngot: %swant: %s", spew.Sdump(got), spew.Sdump(want)) + } +} + +func TestBuildConfigChildModuleBackend(t *testing.T) { + parser := NewParser(nil) + mod, diags := parser.LoadConfigDir("testdata/nested-backend-warning", RootModuleCallForTesting()) + assertNoDiagnostics(t, diags) + if mod == nil { + t.Fatal("got nil root module; want non-nil") + } + + cfg, diags := BuildConfig(mod, ModuleWalkerFunc( + func(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) { + // For the sake of this test we're going to just treat our + // SourceAddr as a path relative to our fixture directory. + // A "real" implementation of ModuleWalker should accept the + // various different source address syntaxes OpenTofu supports. + sourcePath := filepath.Join("testdata/nested-backend-warning", req.SourceAddr.String()) + + mod, modDiags := parser.LoadConfigDir(sourcePath, req.Call) + version, _ := version.NewVersion("1.0.0") + return mod, version, modDiags + }, + )) + + assertDiagnosticSummary(t, diags, "Backend configuration ignored") + + // we should still have module structure loaded + var got []string + cfg.DeepEach(func(c *Config) { + got = append(got, fmt.Sprintf("%s %s", strings.Join(c.Path, "."), c.Version)) + }) + sort.Strings(got) + want := []string{ + " ", + "child 1.0.0", + } + + if !reflect.DeepEqual(got, want) { + t.Fatalf("wrong result\ngot: %swant: %s", spew.Sdump(got), spew.Sdump(want)) + } +} + +func TestBuildConfigInvalidModules(t *testing.T) { + testDir := "testdata/config-diagnostics" + dirs, err := os.ReadDir(testDir) + if err != nil { + t.Fatal(err) + } + + for _, info := range dirs { + name := info.Name() + t.Run(name, func(t *testing.T) { + parser := NewParser(nil) + path := filepath.Join(testDir, name) + + mod, diags := parser.LoadConfigDirWithTests(path, "tests", RootModuleCallForTesting()) + if diags.HasErrors() { + // these tests should only trigger errors that are caught in + // the config loader. + t.Errorf("error loading config dir") + for _, diag := range diags { + t.Logf("- %s", diag) + } + } + + readDiags := func(data []byte, _ error) []string { + var expected []string + for _, s := range strings.Split(string(data), "\n") { + msg := strings.TrimSpace(s) + msg = strings.ReplaceAll(msg, `\n`, "\n") + // The filepath preset in testdata with unix-style slash. + // We should from slash to adapt to Linux, Windows and others OS. + msgSplit := strings.SplitN(msg, ":", 2) + if len(msgSplit) == 2 { + msgSplit[0] = filepath.FromSlash(msgSplit[0]) + msg = strings.Join(msgSplit, ":") + } + + if msg != "" { + expected = append(expected, msg) + } + } + return expected + } + + // Load expected errors and warnings. + // Each line in the file is matched as a substring against the + // diagnostic outputs. + // Capturing part of the path and source range in the message lets + // us also ensure the diagnostic is being attributed to the + // expected location in the source, but is not required. + // The literal characters `\n` are replaced with newlines, but + // otherwise the string is unchanged. + expectedErrs := readDiags(os.ReadFile(filepath.Join(testDir, name, "errors"))) + expectedWarnings := readDiags(os.ReadFile(filepath.Join(testDir, name, "warnings"))) + + _, buildDiags := BuildConfig(mod, ModuleWalkerFunc( + func(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) { + // for simplicity, these tests will treat all source + // addresses as relative to the root module + sourcePath := filepath.Join(path, req.SourceAddr.String()) + mod, diags := parser.LoadConfigDir(sourcePath, req.Call) + version, _ := version.NewVersion("1.0.0") + return mod, version, diags + }, + )) + + // we can make this less repetitive later if we want + for _, msg := range expectedErrs { + found := false + for _, diag := range buildDiags { + if diag.Severity == hcl.DiagError && strings.Contains(diag.Error(), msg) { + found = true + break + } + } + + if !found { + t.Errorf("Expected error diagnostic containing:\n %s", msg) + } + } + + for _, diag := range buildDiags { + if diag.Severity != hcl.DiagError { + continue + } + found := false + for _, msg := range expectedErrs { + if strings.Contains(diag.Error(), msg) { + found = true + break + } + } + + if !found { + t.Errorf("Unexpected error:\n %s", diag) + } + } + + for _, msg := range expectedWarnings { + found := false + for _, diag := range buildDiags { + if diag.Severity == hcl.DiagWarning && strings.Contains(diag.Error(), msg) { + found = true + break + } + } + + if !found { + t.Errorf("Expected warning diagnostic containing:\n %s", msg) + } + } + + for _, diag := range buildDiags { + if diag.Severity != hcl.DiagWarning { + continue + } + found := false + for _, msg := range expectedWarnings { + if strings.Contains(diag.Error(), msg) { + found = true + break + } + } + + if !found { + t.Errorf("Unexpected warning:\n %s", diag) + } + } + + }) + } +} + +func TestBuildConfig_WithNestedTestModules(t *testing.T) { + parser := NewParser(nil) + mod, diags := parser.LoadConfigDirWithTests("testdata/valid-modules/with-tests-nested-module", "tests", RootModuleCallForTesting()) + assertNoDiagnostics(t, diags) + if mod == nil { + t.Fatal("got nil root module; want non-nil") + } + + cfg, diags := BuildConfig(mod, ModuleWalkerFunc( + func(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) { + + // Bit of a hack to get the test working, but we know all the source + // addresses in this test are locals, so we can just treat them as + // paths in the filesystem. + + addr := req.SourceAddr.String() + current := req.Parent + for current.SourceAddr != nil { + addr = path.Join(current.SourceAddr.String(), addr) + current = current.Parent + } + sourcePath := filepath.Join("testdata/valid-modules/with-tests-nested-module", addr) + + mod, modDiags := parser.LoadConfigDir(sourcePath, req.Call) + version, _ := version.NewVersion("1.0.0") + return mod, version, modDiags + }, + )) + assertNoDiagnostics(t, diags) + if cfg == nil { + t.Fatal("got nil config; want non-nil") + } + + // We should have loaded our test case, and one of the test runs should + // have loaded an alternate module. + + if len(cfg.Module.Tests) != 1 { + t.Fatalf("expected exactly one test case but found %d", len(cfg.Module.Tests)) + } + + test := cfg.Module.Tests["main.tftest.hcl"] + if len(test.Runs) != 1 { + t.Fatalf("expected two test runs but found %d", len(test.Runs)) + } + + run := test.Runs[0] + if run.ConfigUnderTest == nil { + t.Fatalf("the first test run should have loaded config but did not") + } + + if run.ConfigUnderTest.Parent != nil { + t.Errorf("config under test should not have a parent") + } + + if run.ConfigUnderTest.Root != run.ConfigUnderTest { + t.Errorf("config under test root should be itself") + } + + if len(run.ConfigUnderTest.Path) > 0 { + t.Errorf("config under test path should be the root module") + } + + // We should also have loaded a single child underneath the config under + // test, and it should have valid paths. + + child := run.ConfigUnderTest.Children["child"] + + if child.Parent != run.ConfigUnderTest { + t.Errorf("child should point back to root") + } + + if len(child.Path) != 1 || child.Path[0] != "child" { + t.Errorf("child should have rebased against virtual root") + } + + if child.Root != run.ConfigUnderTest { + t.Errorf("child root should be main config under test") + } +} + +func TestBuildConfig_WithTestModule(t *testing.T) { + parser := NewParser(nil) + mod, diags := parser.LoadConfigDirWithTests("testdata/valid-modules/with-tests-module", "tests", RootModuleCallForTesting()) + assertNoDiagnostics(t, diags) + if mod == nil { + t.Fatal("got nil root module; want non-nil") + } + + cfg, diags := BuildConfig(mod, ModuleWalkerFunc( + func(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) { + // For the sake of this test we're going to just treat our + // SourceAddr as a path relative to our fixture directory. + // A "real" implementation of ModuleWalker should accept the + // various different source address syntaxes OpenTofu supports. + sourcePath := filepath.Join("testdata/valid-modules/with-tests-module", req.SourceAddr.String()) + + mod, modDiags := parser.LoadConfigDir(sourcePath, req.Call) + version, _ := version.NewVersion("1.0.0") + return mod, version, modDiags + }, + )) + assertNoDiagnostics(t, diags) + if cfg == nil { + t.Fatal("got nil config; want non-nil") + } + + // We should have loaded our test case, and one of the test runs should + // have loaded an alternate module. + + if len(cfg.Module.Tests) != 1 { + t.Fatalf("expected exactly one test case but found %d", len(cfg.Module.Tests)) + } + + test := cfg.Module.Tests["main.tftest.hcl"] + if len(test.Runs) != 2 { + t.Fatalf("expected two test runs but found %d", len(test.Runs)) + } + + run := test.Runs[0] + if run.ConfigUnderTest == nil { + t.Fatalf("the first test run should have loaded config but did not") + } + + if run.ConfigUnderTest.Parent != nil { + t.Errorf("config under test should not have a parent") + } + + if run.ConfigUnderTest.Root != run.ConfigUnderTest { + t.Errorf("config under test root should be itself") + } + + if len(run.ConfigUnderTest.Path) > 0 { + t.Errorf("config under test path should be the root module") + } +} diff --git a/pkg/configs/config_test.go b/pkg/configs/config_test.go new file mode 100644 index 00000000000..a5f2d05e2f1 --- /dev/null +++ b/pkg/configs/config_test.go @@ -0,0 +1,832 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/go-test/deep" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclparse" + "github.com/zclconf/go-cty/cty" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/hcl/v2/hclsyntax" + svchost "github.com/hashicorp/terraform-svchost" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/depsfile" + "github.com/kubegems/opentofu/pkg/getproviders" +) + +func TestConfigProviderTypes(t *testing.T) { + // nil cfg should return an empty map + got := NewEmptyConfig().ProviderTypes() + if len(got) != 0 { + t.Fatal("expected empty result from empty config") + } + + cfg, diags := testModuleConfigFromFile("testdata/valid-files/providers-explicit-implied.tf") + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + got = cfg.ProviderTypes() + want := []addrs.Provider{ + addrs.NewDefaultProvider("aws"), + addrs.NewDefaultProvider("local"), + addrs.NewDefaultProvider("null"), + addrs.NewDefaultProvider("template"), + addrs.NewDefaultProvider("test"), + } + for _, problem := range deep.Equal(got, want) { + t.Error(problem) + } +} + +func TestConfigProviderTypes_nested(t *testing.T) { + // basic test with a nil config + c := NewEmptyConfig() + got := c.ProviderTypes() + if len(got) != 0 { + t.Fatalf("wrong result!\ngot: %#v\nwant: nil\n", got) + } + + // config with two provider sources, and one implicit (default) provider + cfg, diags := testNestedModuleConfigFromDir(t, "testdata/valid-modules/nested-providers-fqns") + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + got = cfg.ProviderTypes() + want := []addrs.Provider{ + addrs.NewProvider(addrs.DefaultProviderRegistryHost, "bar", "test"), + addrs.NewProvider(addrs.DefaultProviderRegistryHost, "foo", "test"), + addrs.NewDefaultProvider("test"), + } + + for _, problem := range deep.Equal(got, want) { + t.Error(problem) + } +} + +func TestConfigResolveAbsProviderAddr(t *testing.T) { + cfg, diags := testModuleConfigFromDir("testdata/providers-explicit-fqn") + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + t.Run("already absolute", func(t *testing.T) { + addr := addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("test"), + Alias: "boop", + } + got := cfg.ResolveAbsProviderAddr(addr, addrs.RootModule) + if got, want := got.String(), addr.String(); got != want { + t.Errorf("wrong result\ngot: %s\nwant: %s", got, want) + } + }) + t.Run("local, implied mapping", func(t *testing.T) { + addr := addrs.LocalProviderConfig{ + LocalName: "implied", + Alias: "boop", + } + got := cfg.ResolveAbsProviderAddr(addr, addrs.RootModule) + want := addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("implied"), + Alias: "boop", + } + if got, want := got.String(), want.String(); got != want { + t.Errorf("wrong result\ngot: %s\nwant: %s", got, want) + } + }) + t.Run("local, explicit mapping", func(t *testing.T) { + addr := addrs.LocalProviderConfig{ + LocalName: "foo-test", // this is explicitly set in the config + Alias: "boop", + } + got := cfg.ResolveAbsProviderAddr(addr, addrs.RootModule) + want := addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewProvider(addrs.DefaultProviderRegistryHost, "foo", "test"), + Alias: "boop", + } + if got, want := got.String(), want.String(); got != want { + t.Errorf("wrong result\ngot: %s\nwant: %s", got, want) + } + }) +} + +func TestConfigProviderRequirements(t *testing.T) { + cfg, diags := testNestedModuleConfigFromDir(t, "testdata/provider-reqs") + // TODO: Version Constraint Deprecation. + // Once we've removed the version argument from provider configuration + // blocks, this can go back to expected 0 diagnostics. + // assertNoDiagnostics(t, diags) + assertDiagnosticCount(t, diags, 1) + assertDiagnosticSummary(t, diags, "Version constraints inside provider configuration blocks are deprecated") + + tlsProvider := addrs.NewProvider( + addrs.DefaultProviderRegistryHost, + "hashicorp", "tls", + ) + happycloudProvider := addrs.NewProvider( + svchost.Hostname("tf.example.com"), + "awesomecorp", "happycloud", + ) + nullProvider := addrs.NewDefaultProvider("null") + randomProvider := addrs.NewDefaultProvider("random") + impliedProvider := addrs.NewDefaultProvider("implied") + terraformProvider := addrs.NewBuiltInProvider("terraform") + configuredProvider := addrs.NewDefaultProvider("configured") + grandchildProvider := addrs.NewDefaultProvider("grandchild") + + got, diags := cfg.ProviderRequirements() + assertNoDiagnostics(t, diags) + want := getproviders.Requirements{ + // the nullProvider constraints from the two modules are merged + nullProvider: getproviders.MustParseVersionConstraints("~> 2.0.0, 2.0.1"), + randomProvider: getproviders.MustParseVersionConstraints("~> 1.2.0"), + tlsProvider: getproviders.MustParseVersionConstraints("~> 3.0"), + configuredProvider: getproviders.MustParseVersionConstraints("~> 1.4"), + impliedProvider: nil, + happycloudProvider: nil, + terraformProvider: nil, + grandchildProvider: nil, + } + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } +} + +func TestConfigProviderRequirementsInclTests(t *testing.T) { + cfg, diags := testNestedModuleConfigFromDirWithTests(t, "testdata/provider-reqs-with-tests") + // TODO: Version Constraint Deprecation. + // Once we've removed the version argument from provider configuration + // blocks, this can go back to expected 0 diagnostics. + // assertNoDiagnostics(t, diags) + assertDiagnosticCount(t, diags, 1) + assertDiagnosticSummary(t, diags, "Version constraints inside provider configuration blocks are deprecated") + + tlsProvider := addrs.NewProvider( + addrs.DefaultProviderRegistryHost, + "hashicorp", "tls", + ) + nullProvider := addrs.NewDefaultProvider("null") + randomProvider := addrs.NewDefaultProvider("random") + impliedProvider := addrs.NewDefaultProvider("implied") + terraformProvider := addrs.NewBuiltInProvider("terraform") + configuredProvider := addrs.NewDefaultProvider("configured") + + got, diags := cfg.ProviderRequirements() + assertNoDiagnostics(t, diags) + want := getproviders.Requirements{ + // the nullProvider constraints from the two modules are merged + nullProvider: getproviders.MustParseVersionConstraints("~> 2.0.0"), + randomProvider: getproviders.MustParseVersionConstraints("~> 1.2.0"), + tlsProvider: getproviders.MustParseVersionConstraints("~> 3.0"), + configuredProvider: getproviders.MustParseVersionConstraints("~> 1.4"), + impliedProvider: nil, + terraformProvider: nil, + } + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } +} + +func TestConfigProviderRequirementsDuplicate(t *testing.T) { + _, diags := testNestedModuleConfigFromDir(t, "testdata/duplicate-local-name") + assertDiagnosticCount(t, diags, 3) + assertDiagnosticSummary(t, diags, "Duplicate required provider") +} + +func TestConfigProviderRequirementsShallow(t *testing.T) { + cfg, diags := testNestedModuleConfigFromDir(t, "testdata/provider-reqs") + // TODO: Version Constraint Deprecation. + // Once we've removed the version argument from provider configuration + // blocks, this can go back to expected 0 diagnostics. + // assertNoDiagnostics(t, diags) + assertDiagnosticCount(t, diags, 1) + assertDiagnosticSummary(t, diags, "Version constraints inside provider configuration blocks are deprecated") + + tlsProvider := addrs.NewProvider( + addrs.DefaultProviderRegistryHost, + "hashicorp", "tls", + ) + nullProvider := addrs.NewDefaultProvider("null") + randomProvider := addrs.NewDefaultProvider("random") + impliedProvider := addrs.NewDefaultProvider("implied") + terraformProvider := addrs.NewBuiltInProvider("terraform") + configuredProvider := addrs.NewDefaultProvider("configured") + + got, diags := cfg.ProviderRequirementsShallow() + assertNoDiagnostics(t, diags) + want := getproviders.Requirements{ + // the nullProvider constraint is only from the root module + nullProvider: getproviders.MustParseVersionConstraints("~> 2.0.0"), + randomProvider: getproviders.MustParseVersionConstraints("~> 1.2.0"), + tlsProvider: getproviders.MustParseVersionConstraints("~> 3.0"), + configuredProvider: getproviders.MustParseVersionConstraints("~> 1.4"), + impliedProvider: nil, + terraformProvider: nil, + } + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } +} + +func TestConfigProviderRequirementsShallowInclTests(t *testing.T) { + cfg, diags := testNestedModuleConfigFromDirWithTests(t, "testdata/provider-reqs-with-tests") + // TODO: Version Constraint Deprecation. + // Once we've removed the version argument from provider configuration + // blocks, this can go back to expected 0 diagnostics. + // assertNoDiagnostics(t, diags) + assertDiagnosticCount(t, diags, 1) + assertDiagnosticSummary(t, diags, "Version constraints inside provider configuration blocks are deprecated") + + tlsProvider := addrs.NewProvider( + addrs.DefaultProviderRegistryHost, + "hashicorp", "tls", + ) + impliedProvider := addrs.NewDefaultProvider("implied") + terraformProvider := addrs.NewBuiltInProvider("terraform") + configuredProvider := addrs.NewDefaultProvider("configured") + + got, diags := cfg.ProviderRequirementsShallow() + assertNoDiagnostics(t, diags) + want := getproviders.Requirements{ + tlsProvider: getproviders.MustParseVersionConstraints("~> 3.0"), + configuredProvider: getproviders.MustParseVersionConstraints("~> 1.4"), + impliedProvider: nil, + terraformProvider: nil, + } + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } +} + +func TestConfigProviderRequirementsByModule(t *testing.T) { + cfg, diags := testNestedModuleConfigFromDir(t, "testdata/provider-reqs") + // TODO: Version Constraint Deprecation. + // Once we've removed the version argument from provider configuration + // blocks, this can go back to expected 0 diagnostics. + // assertNoDiagnostics(t, diags) + assertDiagnosticCount(t, diags, 1) + assertDiagnosticSummary(t, diags, "Version constraints inside provider configuration blocks are deprecated") + + tlsProvider := addrs.NewProvider( + addrs.DefaultProviderRegistryHost, + "hashicorp", "tls", + ) + happycloudProvider := addrs.NewProvider( + svchost.Hostname("tf.example.com"), + "awesomecorp", "happycloud", + ) + nullProvider := addrs.NewDefaultProvider("null") + randomProvider := addrs.NewDefaultProvider("random") + impliedProvider := addrs.NewDefaultProvider("implied") + terraformProvider := addrs.NewBuiltInProvider("terraform") + configuredProvider := addrs.NewDefaultProvider("configured") + grandchildProvider := addrs.NewDefaultProvider("grandchild") + + got, diags := cfg.ProviderRequirementsByModule() + assertNoDiagnostics(t, diags) + want := &ModuleRequirements{ + Name: "", + SourceAddr: nil, + SourceDir: "testdata/provider-reqs", + Requirements: getproviders.Requirements{ + // Only the root module's version is present here + nullProvider: getproviders.MustParseVersionConstraints("~> 2.0.0"), + randomProvider: getproviders.MustParseVersionConstraints("~> 1.2.0"), + tlsProvider: getproviders.MustParseVersionConstraints("~> 3.0"), + configuredProvider: getproviders.MustParseVersionConstraints("~> 1.4"), + impliedProvider: nil, + terraformProvider: nil, + }, + Children: map[string]*ModuleRequirements{ + "kinder": { + Name: "kinder", + SourceAddr: addrs.ModuleSourceLocal("./child"), + SourceDir: filepath.FromSlash("testdata/provider-reqs/child"), + Requirements: getproviders.Requirements{ + nullProvider: getproviders.MustParseVersionConstraints("= 2.0.1"), + happycloudProvider: nil, + }, + Children: map[string]*ModuleRequirements{ + "nested": { + Name: "nested", + SourceAddr: addrs.ModuleSourceLocal("./grandchild"), + SourceDir: filepath.FromSlash("testdata/provider-reqs/child/grandchild"), + Requirements: getproviders.Requirements{ + grandchildProvider: nil, + }, + Children: map[string]*ModuleRequirements{}, + Tests: make(map[string]*TestFileModuleRequirements), + }, + }, + Tests: make(map[string]*TestFileModuleRequirements), + }, + }, + Tests: make(map[string]*TestFileModuleRequirements), + } + + ignore := cmpopts.IgnoreUnexported(version.Constraint{}, cty.Value{}, hclsyntax.Body{}) + if diff := cmp.Diff(want, got, ignore); diff != "" { + t.Errorf("wrong result\n%s", diff) + } +} + +func TestConfigProviderRequirementsByModuleInclTests(t *testing.T) { + cfg, diags := testNestedModuleConfigFromDirWithTests(t, "testdata/provider-reqs-with-tests") + // TODO: Version Constraint Deprecation. + // Once we've removed the version argument from provider configuration + // blocks, this can go back to expected 0 diagnostics. + // assertNoDiagnostics(t, diags) + assertDiagnosticCount(t, diags, 1) + assertDiagnosticSummary(t, diags, "Version constraints inside provider configuration blocks are deprecated") + + tlsProvider := addrs.NewProvider( + addrs.DefaultProviderRegistryHost, + "hashicorp", "tls", + ) + nullProvider := addrs.NewDefaultProvider("null") + randomProvider := addrs.NewDefaultProvider("random") + impliedProvider := addrs.NewDefaultProvider("implied") + terraformProvider := addrs.NewBuiltInProvider("terraform") + configuredProvider := addrs.NewDefaultProvider("configured") + + got, diags := cfg.ProviderRequirementsByModule() + assertNoDiagnostics(t, diags) + want := &ModuleRequirements{ + Name: "", + SourceAddr: nil, + SourceDir: "testdata/provider-reqs-with-tests", + Requirements: getproviders.Requirements{ + // Only the root module's version is present here + tlsProvider: getproviders.MustParseVersionConstraints("~> 3.0"), + impliedProvider: nil, + terraformProvider: nil, + }, + Children: make(map[string]*ModuleRequirements), + Tests: map[string]*TestFileModuleRequirements{ + "provider-reqs-root.tftest.hcl": { + Requirements: getproviders.Requirements{ + configuredProvider: getproviders.MustParseVersionConstraints("~> 1.4"), + }, + Runs: map[string]*ModuleRequirements{ + "setup": { + Name: "setup", + SourceAddr: addrs.ModuleSourceLocal("./setup"), + SourceDir: filepath.FromSlash("testdata/provider-reqs-with-tests/setup"), + Requirements: getproviders.Requirements{ + nullProvider: getproviders.MustParseVersionConstraints("~> 2.0.0"), + randomProvider: getproviders.MustParseVersionConstraints("~> 1.2.0"), + }, + Children: make(map[string]*ModuleRequirements), + Tests: make(map[string]*TestFileModuleRequirements), + }, + }, + }, + }, + } + + ignore := cmpopts.IgnoreUnexported(version.Constraint{}, cty.Value{}, hclsyntax.Body{}) + if diff := cmp.Diff(want, got, ignore); diff != "" { + t.Errorf("wrong result\n%s", diff) + } +} + +func TestVerifyDependencySelections(t *testing.T) { + cfg, diags := testNestedModuleConfigFromDir(t, "testdata/provider-reqs") + // TODO: Version Constraint Deprecation. + // Once we've removed the version argument from provider configuration + // blocks, this can go back to expected 0 diagnostics. + // assertNoDiagnostics(t, diags) + assertDiagnosticCount(t, diags, 1) + assertDiagnosticSummary(t, diags, "Version constraints inside provider configuration blocks are deprecated") + + tlsProvider := addrs.NewProvider( + addrs.DefaultProviderRegistryHost, + "hashicorp", "tls", + ) + happycloudProvider := addrs.NewProvider( + svchost.Hostname("tf.example.com"), + "awesomecorp", "happycloud", + ) + nullProvider := addrs.NewDefaultProvider("null") + randomProvider := addrs.NewDefaultProvider("random") + impliedProvider := addrs.NewDefaultProvider("implied") + configuredProvider := addrs.NewDefaultProvider("configured") + grandchildProvider := addrs.NewDefaultProvider("grandchild") + + tests := map[string]struct { + PrepareLocks func(*depsfile.Locks) + WantErrs []string + }{ + "empty locks": { + func(*depsfile.Locks) { + // Intentionally blank + }, + []string{ + `provider registry.opentofu.org/hashicorp/configured: required by this configuration but no version is selected`, + `provider registry.opentofu.org/hashicorp/grandchild: required by this configuration but no version is selected`, + `provider registry.opentofu.org/hashicorp/implied: required by this configuration but no version is selected`, + `provider registry.opentofu.org/hashicorp/null: required by this configuration but no version is selected`, + `provider registry.opentofu.org/hashicorp/random: required by this configuration but no version is selected`, + `provider registry.opentofu.org/hashicorp/tls: required by this configuration but no version is selected`, + `provider tf.example.com/awesomecorp/happycloud: required by this configuration but no version is selected`, + }, + }, + "suitable locks": { + func(locks *depsfile.Locks) { + locks.SetProvider(configuredProvider, getproviders.MustParseVersion("1.4.0"), nil, nil) + locks.SetProvider(grandchildProvider, getproviders.MustParseVersion("0.1.0"), nil, nil) + locks.SetProvider(impliedProvider, getproviders.MustParseVersion("0.2.0"), nil, nil) + locks.SetProvider(nullProvider, getproviders.MustParseVersion("2.0.1"), nil, nil) + locks.SetProvider(randomProvider, getproviders.MustParseVersion("1.2.2"), nil, nil) + locks.SetProvider(tlsProvider, getproviders.MustParseVersion("3.0.1"), nil, nil) + locks.SetProvider(happycloudProvider, getproviders.MustParseVersion("0.0.1"), nil, nil) + }, + nil, + }, + "null provider constraints changed": { + func(locks *depsfile.Locks) { + locks.SetProvider(configuredProvider, getproviders.MustParseVersion("1.4.0"), nil, nil) + locks.SetProvider(grandchildProvider, getproviders.MustParseVersion("0.1.0"), nil, nil) + locks.SetProvider(impliedProvider, getproviders.MustParseVersion("0.2.0"), nil, nil) + locks.SetProvider(nullProvider, getproviders.MustParseVersion("3.0.0"), nil, nil) + locks.SetProvider(randomProvider, getproviders.MustParseVersion("1.2.2"), nil, nil) + locks.SetProvider(tlsProvider, getproviders.MustParseVersion("3.0.1"), nil, nil) + locks.SetProvider(happycloudProvider, getproviders.MustParseVersion("0.0.1"), nil, nil) + }, + []string{ + `provider registry.opentofu.org/hashicorp/null: locked version selection 3.0.0 doesn't match the updated version constraints "~> 2.0.0, 2.0.1"`, + }, + }, + "null provider lock changed": { + func(locks *depsfile.Locks) { + // In this case, we set the lock file version constraints to + // match the configuration, and so our error message changes + // to not assume the configuration changed anymore. + locks.SetProvider(nullProvider, getproviders.MustParseVersion("3.0.0"), getproviders.MustParseVersionConstraints("~> 2.0.0, 2.0.1"), nil) + + locks.SetProvider(configuredProvider, getproviders.MustParseVersion("1.4.0"), nil, nil) + locks.SetProvider(grandchildProvider, getproviders.MustParseVersion("0.1.0"), nil, nil) + locks.SetProvider(impliedProvider, getproviders.MustParseVersion("0.2.0"), nil, nil) + locks.SetProvider(randomProvider, getproviders.MustParseVersion("1.2.2"), nil, nil) + locks.SetProvider(tlsProvider, getproviders.MustParseVersion("3.0.1"), nil, nil) + locks.SetProvider(happycloudProvider, getproviders.MustParseVersion("0.0.1"), nil, nil) + }, + []string{ + `provider registry.opentofu.org/hashicorp/null: version constraints "~> 2.0.0, 2.0.1" don't match the locked version selection 3.0.0`, + }, + }, + "overridden provider": { + func(locks *depsfile.Locks) { + locks.SetProviderOverridden(happycloudProvider) + }, + []string{ + // We still catch all of the other ones, because only happycloud was overridden + `provider registry.opentofu.org/hashicorp/configured: required by this configuration but no version is selected`, + `provider registry.opentofu.org/hashicorp/grandchild: required by this configuration but no version is selected`, + `provider registry.opentofu.org/hashicorp/implied: required by this configuration but no version is selected`, + `provider registry.opentofu.org/hashicorp/null: required by this configuration but no version is selected`, + `provider registry.opentofu.org/hashicorp/random: required by this configuration but no version is selected`, + `provider registry.opentofu.org/hashicorp/tls: required by this configuration but no version is selected`, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + depLocks := depsfile.NewLocks() + test.PrepareLocks(depLocks) + gotErrs := cfg.VerifyDependencySelections(depLocks) + + var gotErrsStr []string + if gotErrs != nil { + gotErrsStr = make([]string, len(gotErrs)) + for i, err := range gotErrs { + gotErrsStr[i] = err.Error() + } + } + + if diff := cmp.Diff(test.WantErrs, gotErrsStr); diff != "" { + t.Errorf("wrong errors\n%s", diff) + } + }) + } +} + +func TestConfigProviderForConfigAddr(t *testing.T) { + cfg, diags := testModuleConfigFromDir("testdata/valid-modules/providers-fqns") + assertNoDiagnostics(t, diags) + + got := cfg.ProviderForConfigAddr(addrs.NewDefaultLocalProviderConfig("foo-test")) + want := addrs.NewProvider(addrs.DefaultProviderRegistryHost, "foo", "test") + if !got.Equals(want) { + t.Errorf("wrong result\ngot: %s\nwant: %s", got, want) + } + + // now check a provider that isn't in the configuration. It should return a DefaultProvider. + got = cfg.ProviderForConfigAddr(addrs.NewDefaultLocalProviderConfig("bar-test")) + want = addrs.NewDefaultProvider("bar-test") + if !got.Equals(want) { + t.Errorf("wrong result\ngot: %s\nwant: %s", got, want) + } +} + +func TestConfigAddProviderRequirements(t *testing.T) { + cfg, diags := testModuleConfigFromFile("testdata/valid-files/providers-explicit-implied.tf") + assertNoDiagnostics(t, diags) + + reqs := getproviders.Requirements{ + addrs.NewDefaultProvider("null"): nil, + } + diags = cfg.addProviderRequirements(reqs, true, false) + assertNoDiagnostics(t, diags) +} + +func TestConfigImportProviderClashesWithModules(t *testing.T) { + src, err := os.ReadFile("testdata/invalid-import-files/import-and-module-clash.tf") + if err != nil { + t.Fatal(err) + } + + parser := testParser(map[string]string{ + "main.tf": string(src), + }) + + _, diags := parser.LoadConfigFile("main.tf") + assertExactDiagnostics(t, diags, []string{ + `main.tf:9,3-19: Invalid import provider argument; The provider argument can only be specified in import blocks that will generate configuration. + +Use the providers argument within the module block to configure providers for all resources within a module, including imported resources.`, + }) +} + +func TestConfigImportProviderClashesWithResources(t *testing.T) { + cfg, diags := testModuleConfigFromFile("testdata/invalid-import-files/import-and-resource-clash.tf") + assertNoDiagnostics(t, diags) + + diags = cfg.addProviderRequirements(getproviders.Requirements{}, true, false) + assertExactDiagnostics(t, diags, []string{ + `testdata/invalid-import-files/import-and-resource-clash.tf:9,3-19: Invalid import provider argument; The provider argument can only be specified in import blocks that will generate configuration. + +Use the provider argument in the target resource block to configure the provider for a resource with explicit provider configuration.`, + }) +} + +func TestConfigImportProviderWithNoResourceProvider(t *testing.T) { + cfg, diags := testModuleConfigFromFile("testdata/invalid-import-files/import-and-no-resource.tf") + assertNoDiagnostics(t, diags) + + diags = cfg.addProviderRequirements(getproviders.Requirements{}, true, false) + assertExactDiagnostics(t, diags, []string{ + `testdata/invalid-import-files/import-and-no-resource.tf:5,3-19: Invalid import provider argument; The provider argument can only be specified in import blocks that will generate configuration. + +Use the provider argument in the target resource block to configure the provider for a resource with explicit provider configuration.`, + }) +} + +func TestTransformForTest(t *testing.T) { + + str := func(providers map[string]string) string { + var buffer bytes.Buffer + for key, config := range providers { + buffer.WriteString(fmt.Sprintf("%s: %s\n", key, config)) + } + return buffer.String() + } + + convertToProviders := func(t *testing.T, contents map[string]string) map[string]*Provider { + t.Helper() + + providers := make(map[string]*Provider) + for key, content := range contents { + parser := hclparse.NewParser() + file, diags := parser.ParseHCL([]byte(content), fmt.Sprintf("%s.hcl", key)) + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + provider := &Provider{ + Config: file.Body, + } + + parts := strings.Split(key, ".") + provider.Name = parts[0] + if len(parts) > 1 { + provider.Alias = parts[1] + } + + providers[key] = provider + } + return providers + } + + validate := func(t *testing.T, msg string, expected map[string]string, actual map[string]*Provider) { + t.Helper() + + converted := make(map[string]string) + for key, provider := range actual { + content, err := provider.Config.Content(&hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + {Name: "source", Required: true}, + }, + }) + if err != nil { + t.Fatal(err) + } + + source, diags := content.Attributes["source"].Expr.Value(nil) + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + converted[key] = fmt.Sprintf("source = %q", source.AsString()) + } + + if diff := cmp.Diff(expected, converted); len(diff) > 0 { + t.Errorf("%s\nexpected:\n%s\nactual:\n%s\ndiff:\n%s", msg, str(expected), str(converted), diff) + } + } + + tcs := map[string]struct { + configProviders map[string]string + fileProviders map[string]string + runProviders []PassedProviderConfig + expectedProviders map[string]string + expectedErrors []string + }{ + "empty": { + configProviders: make(map[string]string), + expectedProviders: make(map[string]string), + }, + "only providers in config": { + configProviders: map[string]string{ + "foo": "source = \"config\"", + "bar": "source = \"config\"", + }, + expectedProviders: map[string]string{ + "foo": "source = \"config\"", + "bar": "source = \"config\"", + }, + }, + "only providers in test file": { + configProviders: make(map[string]string), + fileProviders: map[string]string{ + "foo": "source = \"testfile\"", + "bar": "source = \"testfile\"", + }, + expectedProviders: map[string]string{ + "foo": "source = \"testfile\"", + "bar": "source = \"testfile\"", + }, + }, + "only providers in run block": { + configProviders: make(map[string]string), + runProviders: []PassedProviderConfig{ + { + InChild: &ProviderConfigRef{ + Name: "foo", + }, + InParent: &ProviderConfigRef{ + Name: "bar", + }, + }, + }, + expectedProviders: make(map[string]string), + expectedErrors: []string{ + ":0,0-0: Missing provider definition for bar; This provider block references a provider definition that does not exist.", + }, + }, + "subset of providers in test file": { + configProviders: make(map[string]string), + fileProviders: map[string]string{ + "bar": "source = \"testfile\"", + }, + runProviders: []PassedProviderConfig{ + { + InChild: &ProviderConfigRef{ + Name: "foo", + }, + InParent: &ProviderConfigRef{ + Name: "bar", + }, + }, + }, + expectedProviders: map[string]string{ + "foo": "source = \"testfile\"", + }, + }, + "overrides providers in config": { + configProviders: map[string]string{ + "foo": "source = \"config\"", + "bar": "source = \"config\"", + }, + fileProviders: map[string]string{ + "bar": "source = \"testfile\"", + }, + expectedProviders: map[string]string{ + "foo": "source = \"config\"", + "bar": "source = \"testfile\"", + }, + }, + "overrides subset of providers in config": { + configProviders: map[string]string{ + "foo": "source = \"config\"", + "bar": "source = \"config\"", + }, + fileProviders: map[string]string{ + "foo": "source = \"testfile\"", + "bar": "source = \"testfile\"", + }, + runProviders: []PassedProviderConfig{ + { + InChild: &ProviderConfigRef{ + Name: "bar", + }, + InParent: &ProviderConfigRef{ + Name: "bar", + }, + }, + }, + expectedProviders: map[string]string{ + "foo": "source = \"config\"", + "bar": "source = \"testfile\"", + }, + }, + "handles aliases": { + configProviders: map[string]string{ + "foo.primary": "source = \"config\"", + "foo.secondary": "source = \"config\"", + }, + fileProviders: map[string]string{ + "foo": "source = \"testfile\"", + }, + runProviders: []PassedProviderConfig{ + { + InChild: &ProviderConfigRef{ + Name: "foo.secondary", + }, + InParent: &ProviderConfigRef{ + Name: "foo", + }, + }, + }, + expectedProviders: map[string]string{ + "foo.primary": "source = \"config\"", + "foo.secondary": "source = \"testfile\"", + }, + }, + } + for name, tc := range tcs { + t.Run(name, func(t *testing.T) { + config := &Config{ + Module: &Module{ + ProviderConfigs: convertToProviders(t, tc.configProviders), + }, + } + + file := &TestFile{ + Providers: convertToProviders(t, tc.fileProviders), + } + + run := &TestRun{ + Providers: tc.runProviders, + } + + reset, diags := config.TransformForTest(run, file) + + var actualErrs []string + for _, err := range diags.Errs() { + actualErrs = append(actualErrs, err.Error()) + } + if diff := cmp.Diff(actualErrs, tc.expectedErrors, cmpopts.IgnoreUnexported()); len(diff) > 0 { + t.Errorf("unmatched errors\nexpected:\n%s\nactual:\n%s\ndiff:\n%s", strings.Join(tc.expectedErrors, "\n"), strings.Join(actualErrs, "\n"), diff) + } + + validate(t, "after transform mismatch", tc.expectedProviders, config.Module.ProviderConfigs) + reset() + validate(t, "after reset mismatch", tc.configProviders, config.Module.ProviderConfigs) + + }) + } +} diff --git a/pkg/configs/configload/doc.go b/pkg/configs/configload/doc.go new file mode 100644 index 00000000000..8f816de0c7f --- /dev/null +++ b/pkg/configs/configload/doc.go @@ -0,0 +1,9 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package configload knows how to install modules into the .terraform/modules +// directory and to load modules from those installed locations. It is used +// in conjunction with the LoadConfig function in the parent package. +package configload diff --git a/pkg/configs/configload/loader.go b/pkg/configs/configload/loader.go new file mode 100644 index 00000000000..41c599db707 --- /dev/null +++ b/pkg/configs/configload/loader.go @@ -0,0 +1,169 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configload + +import ( + "fmt" + "path/filepath" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/terraform-svchost/disco" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/registry" + "github.com/spf13/afero" +) + +// A Loader instance is the main entry-point for loading configurations via +// this package. +// +// It extends the general config-loading functionality in the parent package +// "configs" to support installation of modules from remote sources and +// loading full configurations using modules that were previously installed. +type Loader struct { + // parser is used to read configuration + parser *configs.Parser + + // modules is used to install and locate descendent modules that are + // referenced (directly or indirectly) from the root module. + modules moduleMgr +} + +// Config is used with NewLoader to specify configuration arguments for the +// loader. +type Config struct { + // ModulesDir is a path to a directory where descendent modules are + // (or should be) installed. (This is usually the + // .terraform/modules directory, in the common case where this package + // is being loaded from the main OpenTofu CLI package.) + ModulesDir string + + // Services is the service discovery client to use when locating remote + // module registry endpoints. If this is nil then registry sources are + // not supported, which should be true only in specialized circumstances + // such as in tests. + Services *disco.Disco +} + +// NewLoader creates and returns a loader that reads configuration from the +// real OS filesystem. +// +// The loader has some internal state about the modules that are currently +// installed, which is read from disk as part of this function. If that +// manifest cannot be read then an error will be returned. +func NewLoader(config *Config) (*Loader, error) { + fs := afero.NewOsFs() + parser := configs.NewParser(fs) + reg := registry.NewClient(config.Services, nil) + + ret := &Loader{ + parser: parser, + modules: moduleMgr{ + FS: afero.Afero{Fs: fs}, + CanInstall: true, + Dir: config.ModulesDir, + Services: config.Services, + Registry: reg, + }, + } + + err := ret.modules.readModuleManifestSnapshot() + if err != nil { + return nil, fmt.Errorf("failed to read module manifest: %w", err) + } + + return ret, nil +} + +// ModulesDir returns the path to the directory where the loader will look for +// the local cache of remote module packages. +func (l *Loader) ModulesDir() string { + return l.modules.Dir +} + +// RefreshModules updates the in-memory cache of the module manifest from the +// module manifest file on disk. This is not necessary in normal use because +// module installation and configuration loading are separate steps, but it +// can be useful in tests where module installation is done as a part of +// configuration loading by a helper function. +// +// Call this function after any module installation where an existing loader +// is already alive and may be used again later. +// +// An error is returned if the manifest file cannot be read. +func (l *Loader) RefreshModules() error { + if l == nil { + // Nothing to do, then. + return nil + } + return l.modules.readModuleManifestSnapshot() +} + +// Parser returns the underlying parser for this loader. +// +// This is useful for loading other sorts of files than the module directories +// that a loader deals with, since then they will share the source code cache +// for this loader and can thus be shown as snippets in diagnostic messages. +func (l *Loader) Parser() *configs.Parser { + return l.parser +} + +// Sources returns the source code cache for the underlying parser of this +// loader. This is a shorthand for l.Parser().Sources(). +func (l *Loader) Sources() map[string]*hcl.File { + return l.parser.Sources() +} + +// IsConfigDir returns true if and only if the given directory contains at +// least one OpenTofu configuration file. This is a wrapper around calling +// the same method name on the loader's parser. +func (l *Loader) IsConfigDir(path string) bool { + return l.parser.IsConfigDir(path) +} + +// ImportSources writes into the receiver's source code map the given source +// code buffers. +// +// This is useful in the situation where an ancillary loader is created for +// some reason (e.g. loading config from a plan file) but the cached source +// code from that loader must be imported into the "main" loader in order +// to return source code snapshots in diagnostic messages. +// +// loader.ImportSources(otherLoader.Sources()) +func (l *Loader) ImportSources(sources map[string][]byte) { + p := l.Parser() + for name, src := range sources { + p.ForceFileSource(name, src) + } +} + +// ImportSourcesFromSnapshot writes into the receiver's source code the +// source files from the given snapshot. +// +// This is similar to ImportSources but knows how to unpack and flatten a +// snapshot data structure to get the corresponding flat source file map. +func (l *Loader) ImportSourcesFromSnapshot(snap *Snapshot) { + p := l.Parser() + for _, m := range snap.Modules { + baseDir := m.Dir + for fn, src := range m.Files { + fullPath := filepath.Join(baseDir, fn) + p.ForceFileSource(fullPath, src) + } + } +} + +// AllowLanguageExperiments specifies whether subsequent LoadConfig (and +// similar) calls will allow opting in to experimental language features. +// +// If this method is never called for a particular loader, the default behavior +// is to disallow language experiments. +// +// Main code should set this only for alpha or development builds. Test code +// is responsible for deciding for itself whether and how to call this +// method. +func (l *Loader) AllowLanguageExperiments(allowed bool) { + l.parser.AllowLanguageExperiments(allowed) +} diff --git a/pkg/configs/configload/loader_load.go b/pkg/configs/configload/loader_load.go new file mode 100644 index 00000000000..557211bfe70 --- /dev/null +++ b/pkg/configs/configload/loader_load.go @@ -0,0 +1,128 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configload + +import ( + "fmt" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/hcl/v2" + + "github.com/kubegems/opentofu/pkg/configs" +) + +// LoadConfig reads the OpenTofu module in the given directory and uses it as the +// root module to build the static module tree that represents a configuration, +// assuming that all required descendent modules have already been installed. +// +// If error diagnostics are returned, the returned configuration may be either +// nil or incomplete. In the latter case, cautious static analysis is possible +// in spite of the errors. +// +// LoadConfig performs the basic syntax and uniqueness validations that are +// required to process the individual modules +func (l *Loader) LoadConfig(rootDir string, call configs.StaticModuleCall) (*configs.Config, hcl.Diagnostics) { + return l.loadConfig(l.parser.LoadConfigDir(rootDir, call)) +} + +// LoadConfigWithTests matches LoadConfig, except the configs.Config contains +// any relevant .tftest.hcl files. +func (l *Loader) LoadConfigWithTests(rootDir string, testDir string, call configs.StaticModuleCall) (*configs.Config, hcl.Diagnostics) { + return l.loadConfig(l.parser.LoadConfigDirWithTests(rootDir, testDir, call)) +} + +func (l *Loader) loadConfig(rootMod *configs.Module, diags hcl.Diagnostics) (*configs.Config, hcl.Diagnostics) { + if rootMod == nil || diags.HasErrors() { + // Ensure we return any parsed modules here so that required_version + // constraints can be verified even when encountering errors. + cfg := &configs.Config{ + Module: rootMod, + } + + return cfg, diags + } + + cfg, cDiags := configs.BuildConfig(rootMod, configs.ModuleWalkerFunc(l.moduleWalkerLoad)) + diags = append(diags, cDiags...) + + return cfg, diags +} + +// moduleWalkerLoad is a configs.ModuleWalkerFunc for loading modules that +// are presumed to have already been installed. +func (l *Loader) moduleWalkerLoad(req *configs.ModuleRequest) (*configs.Module, *version.Version, hcl.Diagnostics) { + // Since we're just loading here, we expect that all referenced modules + // will be already installed and described in our manifest. However, we + // do verify that the manifest and the configuration are in agreement + // so that we can prompt the user to run "tofu init" if not. + + key := l.modules.manifest.ModuleKey(req.Path) + record, exists := l.modules.manifest[key] + + if !exists { + return nil, nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Module not installed", + Detail: "This module is not yet installed. Run \"tofu init\" to install all modules required by this configuration.", + Subject: &req.CallRange, + }, + } + } + + var diags hcl.Diagnostics + + // Check for inconsistencies between manifest and config. + + // We ignore a nil SourceAddr here, which represents a failure during + // configuration parsing, and will be reported in a diagnostic elsewhere. + if req.SourceAddr != nil && req.SourceAddr.String() != record.SourceAddr { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Module source has changed", + Detail: fmt.Sprintf("The source address was changed from %q to %q since this module was installed. Run \"tofu init\" to install all modules required by this configuration.", record.SourceAddr, req.SourceAddr.String()), + Subject: &req.SourceAddrRange, + }) + } + if len(req.VersionConstraint.Required) > 0 && record.Version == nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Module version requirements have changed", + Detail: "The version requirements have changed since this module was installed and the installed version is no longer acceptable. Run \"tofu init\" to install all modules required by this configuration.", + Subject: &req.SourceAddrRange, + }) + } + if record.Version != nil && !req.VersionConstraint.Required.Check(record.Version) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Module version requirements have changed", + Detail: fmt.Sprintf( + "The version requirements have changed since this module was installed and the installed version (%s) is no longer acceptable. Run \"tofu init\" to install all modules required by this configuration.", + record.Version, + ), + Subject: &req.SourceAddrRange, + }) + } + + mod, mDiags := l.parser.LoadConfigDir(record.Dir, req.Call) + diags = append(diags, mDiags...) + if mod == nil { + // nil specifically indicates that the directory does not exist or + // cannot be read, so in this case we'll discard any generic diagnostics + // returned from LoadConfigDir and produce our own context-sensitive + // error message. + return nil, nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Module not installed", + Detail: fmt.Sprintf("This module's local cache directory %s could not be read. Run \"tofu init\" to install all modules required by this configuration.", record.Dir), + Subject: &req.CallRange, + }, + } + } + + return mod, record.Version, diags +} diff --git a/pkg/configs/configload/loader_load_test.go b/pkg/configs/configload/loader_load_test.go new file mode 100644 index 00000000000..df773a91370 --- /dev/null +++ b/pkg/configs/configload/loader_load_test.go @@ -0,0 +1,212 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configload + +import ( + "path/filepath" + "reflect" + "sort" + "strings" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/configs" +) + +func TestLoaderLoadConfig_okay(t *testing.T) { + fixtureDir := filepath.Clean("testdata/already-installed") + loader, err := NewLoader(&Config{ + ModulesDir: filepath.Join(fixtureDir, ".terraform/modules"), + }) + if err != nil { + t.Fatalf("unexpected error from NewLoader: %s", err) + } + + cfg, diags := loader.LoadConfig(fixtureDir, configs.RootModuleCallForTesting()) + assertNoDiagnostics(t, diags) + if cfg == nil { + t.Fatalf("config is nil; want non-nil") + } + + var gotPaths []string + cfg.DeepEach(func(c *configs.Config) { + gotPaths = append(gotPaths, strings.Join(c.Path, ".")) + }) + sort.Strings(gotPaths) + wantPaths := []string{ + "", // root module + "child_a", + "child_a.child_c", + "child_b", + "child_b.child_d", + } + + if !reflect.DeepEqual(gotPaths, wantPaths) { + t.Fatalf("wrong module paths\ngot: %swant %s", spew.Sdump(gotPaths), spew.Sdump(wantPaths)) + } + + t.Run("child_a.child_c output", func(t *testing.T) { + output := cfg.Children["child_a"].Children["child_c"].Module.Outputs["hello"] + got, diags := output.Expr.Value(nil) + assertNoDiagnostics(t, diags) + assertResultCtyEqual(t, got, cty.StringVal("Hello from child_c")) + }) + t.Run("child_b.child_d output", func(t *testing.T) { + output := cfg.Children["child_b"].Children["child_d"].Module.Outputs["hello"] + got, diags := output.Expr.Value(nil) + assertNoDiagnostics(t, diags) + assertResultCtyEqual(t, got, cty.StringVal("Hello from child_d")) + }) +} + +func TestLoaderLoadConfig_addVersion(t *testing.T) { + // This test is for what happens when there is a version constraint added + // to a module that previously didn't have one. + fixtureDir := filepath.Clean("testdata/add-version-constraint") + loader, err := NewLoader(&Config{ + ModulesDir: filepath.Join(fixtureDir, ".terraform/modules"), + }) + if err != nil { + t.Fatalf("unexpected error from NewLoader: %s", err) + } + + _, diags := loader.LoadConfig(fixtureDir, configs.RootModuleCallForTesting()) + if !diags.HasErrors() { + t.Fatalf("success; want error") + } + got := diags.Error() + want := "Module version requirements have changed" + if !strings.Contains(got, want) { + t.Fatalf("wrong error\ngot:\n%s\n\nwant: containing %q", got, want) + } +} + +func TestLoaderLoadConfig_loadDiags(t *testing.T) { + // building a config which didn't load correctly may cause configs to panic + fixtureDir := filepath.Clean("testdata/invalid-names") + loader, err := NewLoader(&Config{ + ModulesDir: filepath.Join(fixtureDir, ".terraform/modules"), + }) + if err != nil { + t.Fatalf("unexpected error from NewLoader: %s", err) + } + + cfg, diags := loader.LoadConfig(fixtureDir, configs.RootModuleCallForTesting()) + if !diags.HasErrors() { + t.Fatal("success; want error") + } + + if cfg == nil { + t.Fatal("partial config not returned with diagnostics") + } + + if cfg.Module == nil { + t.Fatal("expected config module") + } +} + +func TestLoaderLoadConfig_loadDiagsFromSubmodules(t *testing.T) { + // building a config which didn't load correctly may cause configs to panic + fixtureDir := filepath.Clean("testdata/invalid-names-in-submodules") + loader, err := NewLoader(&Config{ + ModulesDir: filepath.Join(fixtureDir, ".terraform/modules"), + }) + if err != nil { + t.Fatalf("unexpected error from NewLoader: %s", err) + } + + cfg, diags := loader.LoadConfig(fixtureDir, configs.RootModuleCallForTesting()) + if !diags.HasErrors() { + t.Fatalf("loading succeeded; want an error") + } + if got, want := diags.Error(), " Invalid provider local name"; !strings.Contains(got, want) { + t.Errorf("missing expected error\nwant substring: %s\ngot: %s", want, got) + } + + if cfg == nil { + t.Fatal("partial config not returned with diagnostics") + } + + if cfg.Module == nil { + t.Fatal("expected config module") + } +} + +func TestLoaderLoadConfig_childProviderGrandchildCount(t *testing.T) { + // This test is focused on the specific situation where: + // - A child module contains a nested provider block, which is no longer + // recommended but supported for backward-compatibility. + // - A child of that child does _not_ contain a nested provider block, + // and is called with "count" (would also apply to "for_each" and + // "depends_on"). + // It isn't valid to use "count" with a module that _itself_ contains + // a provider configuration, but it _is_ valid for a module with a + // provider configuration to call another module with count. We previously + // botched this rule and so this is a regression test to cover the + // solution to that mistake: + // https://github.com/hashicorp/terraform/issues/31081 + + // Since this test is based on success rather than failure and it's + // covering a relatively large set of code where only a small part + // contributes to the test, we'll make sure to test both the success and + // failure cases here so that we'll have a better chance of noticing if a + // future change makes this succeed only because we've reorganized the code + // so that the check isn't happening at all anymore. + // + // If the "not okay" subtest fails, you should also be skeptical about + // whether the "okay" subtest is still valid, even if it happens to + // still be passing. + t.Run("okay", func(t *testing.T) { + fixtureDir := filepath.Clean("testdata/child-provider-grandchild-count") + loader, err := NewLoader(&Config{ + ModulesDir: filepath.Join(fixtureDir, ".terraform/modules"), + }) + if err != nil { + t.Fatalf("unexpected error from NewLoader: %s", err) + } + + cfg, diags := loader.LoadConfig(fixtureDir, configs.RootModuleCallForTesting()) + assertNoDiagnostics(t, diags) + if cfg == nil { + t.Fatalf("config is nil; want non-nil") + } + + var gotPaths []string + cfg.DeepEach(func(c *configs.Config) { + gotPaths = append(gotPaths, strings.Join(c.Path, ".")) + }) + sort.Strings(gotPaths) + wantPaths := []string{ + "", // root module + "child", + "child.grandchild", + } + + if !reflect.DeepEqual(gotPaths, wantPaths) { + t.Fatalf("wrong module paths\ngot: %swant %s", spew.Sdump(gotPaths), spew.Sdump(wantPaths)) + } + }) + t.Run("not okay", func(t *testing.T) { + fixtureDir := filepath.Clean("testdata/child-provider-child-count") + loader, err := NewLoader(&Config{ + ModulesDir: filepath.Join(fixtureDir, ".terraform/modules"), + }) + if err != nil { + t.Fatalf("unexpected error from NewLoader: %s", err) + } + + _, diags := loader.LoadConfig(fixtureDir, configs.RootModuleCallForTesting()) + if !diags.HasErrors() { + t.Fatalf("loading succeeded; want an error") + } + if got, want := diags.Error(), "Module is incompatible with count, for_each, and depends_on"; !strings.Contains(got, want) { + t.Errorf("missing expected error\nwant substring: %s\ngot: %s", want, got) + } + }) + +} diff --git a/pkg/configs/configload/loader_snapshot.go b/pkg/configs/configload/loader_snapshot.go new file mode 100644 index 00000000000..a34c54c1e94 --- /dev/null +++ b/pkg/configs/configload/loader_snapshot.go @@ -0,0 +1,513 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configload + +import ( + "fmt" + "io" + "os" + "path/filepath" + "sort" + "time" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/hcl/v2" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/modsdir" + "github.com/spf13/afero" +) + +// LoadConfigWithSnapshot is a variant of LoadConfig that also simultaneously +// creates an in-memory snapshot of the configuration files used, which can +// be later used to create a loader that may read only from this snapshot. +func (l *Loader) LoadConfigWithSnapshot(rootDir string, call configs.StaticModuleCall) (*configs.Config, *Snapshot, hcl.Diagnostics) { + rootMod, diags := l.parser.LoadConfigDir(rootDir, call) + if rootMod == nil { + return nil, nil, diags + } + + snap := &Snapshot{ + Modules: map[string]*SnapshotModule{}, + } + walker := l.makeModuleWalkerSnapshot(snap) + cfg, cDiags := configs.BuildConfig(rootMod, walker) + diags = append(diags, cDiags...) + + addDiags := l.addModuleToSnapshot(snap, "", rootDir, "", nil) + diags = append(diags, addDiags...) + + return cfg, snap, diags +} + +// NewLoaderFromSnapshot creates a Loader that reads files only from the +// given snapshot. +// +// A snapshot-based loader cannot install modules, so calling InstallModules +// on the return value will cause a panic. +// +// A snapshot-based loader also has access only to configuration files. Its +// underlying parser does not have access to other files in the native +// filesystem, such as values files. For those, either use a normal loader +// (created by NewLoader) or use the configs.Parser API directly. +func NewLoaderFromSnapshot(snap *Snapshot) *Loader { + fs := snapshotFS{snap} + parser := configs.NewParser(fs) + + ret := &Loader{ + parser: parser, + modules: moduleMgr{ + FS: afero.Afero{Fs: fs}, + CanInstall: false, + manifest: snap.moduleManifest(), + }, + } + + return ret +} + +// Snapshot is an in-memory representation of the source files from a +// configuration, which can be used as an alternative configurations source +// for a loader with NewLoaderFromSnapshot. +// +// The primary purpose of a Snapshot is to build the configuration portion +// of a plan file (see ../../plans/planfile) so that it can later be reloaded +// and used to recover the exact configuration that the plan was built from. +type Snapshot struct { + // Modules is a map from opaque module keys (suitable for use as directory + // names on all supported operating systems) to the snapshot information + // about each module. + Modules map[string]*SnapshotModule +} + +// NewEmptySnapshot constructs and returns a snapshot containing only an empty +// root module. This is not useful for anything except placeholders in tests. +func NewEmptySnapshot() *Snapshot { + return &Snapshot{ + Modules: map[string]*SnapshotModule{ + "": &SnapshotModule{ + Files: map[string][]byte{}, + }, + }, + } +} + +// SnapshotModule represents a single module within a Snapshot. +type SnapshotModule struct { + // Dir is the path, relative to the root directory given when the + // snapshot was created, where the module appears in the snapshot's + // virtual filesystem. + Dir string + + // Files is a map from each configuration file filename for the + // module to a raw byte representation of the source file contents. + Files map[string][]byte + + // SourceAddr is the source address given for this module in configuration. + SourceAddr string `json:"Source"` + + // Version is the version of the module that is installed, or nil if + // the module is installed from a source that does not support versions. + Version *version.Version `json:"-"` +} + +// moduleManifest constructs a module manifest based on the contents of +// the receiving snapshot. +func (s *Snapshot) moduleManifest() modsdir.Manifest { + ret := make(modsdir.Manifest) + + for k, modSnap := range s.Modules { + ret[k] = modsdir.Record{ + Key: k, + Dir: modSnap.Dir, + SourceAddr: modSnap.SourceAddr, + Version: modSnap.Version, + } + } + + return ret +} + +// makeModuleWalkerSnapshot creates a configs.ModuleWalker that will exhibit +// the same lookup behaviors as l.moduleWalkerLoad but will additionally write +// source files from the referenced modules into the given snapshot. +func (l *Loader) makeModuleWalkerSnapshot(snap *Snapshot) configs.ModuleWalker { + return configs.ModuleWalkerFunc( + func(req *configs.ModuleRequest) (*configs.Module, *version.Version, hcl.Diagnostics) { + mod, v, diags := l.moduleWalkerLoad(req) + if diags.HasErrors() { + return mod, v, diags + } + + key := l.modules.manifest.ModuleKey(req.Path) + record, exists := l.modules.manifest[key] + + if !exists { + // Should never happen, since otherwise moduleWalkerLoader would've + // returned an error and we would've returned already. + panic(fmt.Sprintf("module %s is not present in manifest", key)) + } + + addDiags := l.addModuleToSnapshot(snap, key, record.Dir, record.SourceAddr, record.Version) + diags = append(diags, addDiags...) + + return mod, v, diags + }, + ) +} + +func (l *Loader) addModuleToSnapshot(snap *Snapshot, key string, dir string, sourceAddr string, v *version.Version) hcl.Diagnostics { + var diags hcl.Diagnostics + + primaryFiles, overrideFiles, moreDiags := l.parser.ConfigDirFiles(dir) + if moreDiags.HasErrors() { + // Any diagnostics we get here should be already present + // in diags, so it's weird if we get here but we'll allow it + // and return a general error message in that case. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Failed to read directory for module", + Detail: fmt.Sprintf("The source directory %s could not be read", dir), + }) + return diags + } + + snapMod := &SnapshotModule{ + Dir: dir, + Files: map[string][]byte{}, + SourceAddr: sourceAddr, + Version: v, + } + + files := make([]string, 0, len(primaryFiles)+len(overrideFiles)) + files = append(files, primaryFiles...) + files = append(files, overrideFiles...) + sources := l.Sources() // should be populated with all the files we need by now + for _, filePath := range files { + filename := filepath.Base(filePath) + src, exists := sources[filePath] + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing source file for snapshot", + Detail: fmt.Sprintf("The source code for file %s could not be found to produce a configuration snapshot.", filePath), + }) + continue + } + snapMod.Files[filepath.Clean(filename)] = src.Bytes + } + + snap.Modules[key] = snapMod + + return diags +} + +// snapshotFS is an implementation of afero.Fs that reads from a snapshot. +// +// This is not intended as a general-purpose filesystem implementation. Instead, +// it just supports the minimal functionality required to support the +// configuration loader and parser as an implementation detail of creating +// a loader from a snapshot. +type snapshotFS struct { + snap *Snapshot +} + +var _ afero.Fs = snapshotFS{} + +func (fs snapshotFS) Create(name string) (afero.File, error) { + return nil, fmt.Errorf("cannot create file inside configuration snapshot") +} + +func (fs snapshotFS) Mkdir(name string, perm os.FileMode) error { + return fmt.Errorf("cannot create directory inside configuration snapshot") +} + +func (fs snapshotFS) MkdirAll(name string, perm os.FileMode) error { + return fmt.Errorf("cannot create directories inside configuration snapshot") +} + +func (fs snapshotFS) Open(name string) (afero.File, error) { + + // Our "filesystem" is sparsely populated only with the directories + // mentioned by modules in our snapshot, so the high-level process + // for opening a file is: + // - Find the module snapshot corresponding to the containing directory + // - Find the file within that snapshot + // - Wrap the resulting byte slice in a snapshotFile to return + // + // The other possibility handled here is if the given name is for the + // module directory itself, in which case we'll return a snapshotDir + // instead. + // + // This function doesn't try to be incredibly robust in supporting + // different permutations of paths, etc because in practice we only + // need to support the path forms that our own loader and parser will + // generate. + + dir := filepath.Dir(name) + fn := filepath.Base(name) + directDir := filepath.Clean(name) + + // First we'll check to see if this is an exact path for a module directory. + // We need to do this first (rather than as part of the next loop below) + // because a module in a child directory of another module can otherwise + // appear to be a file in that parent directory. + for _, candidate := range fs.snap.Modules { + modDir := filepath.Clean(candidate.Dir) + if modDir == directDir { + // We've matched the module directory itself + filenames := make([]string, 0, len(candidate.Files)) + for n := range candidate.Files { + filenames = append(filenames, n) + } + sort.Strings(filenames) + return &snapshotDir{ + filenames: filenames, + }, nil + } + } + + // If we get here then the given path isn't a module directory exactly, so + // we'll treat it as a file path and try to find a module directory it + // could be located in. + var modSnap *SnapshotModule + for _, candidate := range fs.snap.Modules { + modDir := filepath.Clean(candidate.Dir) + if modDir == dir { + modSnap = candidate + break + } + } + if modSnap == nil { + return nil, os.ErrNotExist + } + + src, exists := modSnap.Files[fn] + if !exists { + return nil, os.ErrNotExist + } + + return &snapshotFile{ + src: src, + }, nil +} + +func (fs snapshotFS) OpenFile(name string, flag int, perm os.FileMode) (afero.File, error) { + return fs.Open(name) +} + +func (fs snapshotFS) Remove(name string) error { + return fmt.Errorf("cannot remove file inside configuration snapshot") +} + +func (fs snapshotFS) RemoveAll(path string) error { + return fmt.Errorf("cannot remove files inside configuration snapshot") +} + +func (fs snapshotFS) Rename(old, new string) error { + return fmt.Errorf("cannot rename file inside configuration snapshot") +} + +func (fs snapshotFS) Stat(name string) (os.FileInfo, error) { + f, err := fs.Open(name) + if err != nil { + return nil, err + } + _, isDir := f.(*snapshotDir) + return snapshotFileInfo{ + name: filepath.Base(name), + isDir: isDir, + }, nil +} + +func (fs snapshotFS) Name() string { + return "ConfigSnapshotFS" +} + +func (fs snapshotFS) Chmod(name string, mode os.FileMode) error { + return fmt.Errorf("cannot set file mode inside configuration snapshot") +} + +func (snapshotFS) Chown(name string, uid int, gid int) error { + return fmt.Errorf("cannot set file owner inside configuration snapshot") +} + +func (fs snapshotFS) Chtimes(name string, atime, mtime time.Time) error { + return fmt.Errorf("cannot set file times inside configuration snapshot") +} + +type snapshotFile struct { + snapshotFileStub + src []byte + at int64 +} + +var _ afero.File = (*snapshotFile)(nil) + +func (f *snapshotFile) Read(p []byte) (n int, err error) { + if len(p) > 0 && f.at == int64(len(f.src)) { + return 0, io.EOF + } + if f.at > int64(len(f.src)) { + return 0, io.ErrUnexpectedEOF + } + if int64(len(f.src))-f.at >= int64(len(p)) { + n = len(p) + } else { + n = int(int64(len(f.src)) - f.at) + } + copy(p, f.src[f.at:f.at+int64(n)]) + f.at += int64(n) + return +} + +func (f *snapshotFile) ReadAt(p []byte, off int64) (n int, err error) { + f.at = off + return f.Read(p) +} + +func (f *snapshotFile) Seek(offset int64, whence int) (int64, error) { + switch whence { + case 0: + f.at = offset + case 1: + f.at += offset + case 2: + f.at = int64(len(f.src)) + offset + } + return f.at, nil +} + +type snapshotDir struct { + snapshotFileStub + filenames []string + at int +} + +var _ afero.File = (*snapshotDir)(nil) + +func (f *snapshotDir) Readdir(count int) ([]os.FileInfo, error) { + names, err := f.Readdirnames(count) + if err != nil { + return nil, err + } + ret := make([]os.FileInfo, len(names)) + for i, name := range names { + ret[i] = snapshotFileInfo{ + name: name, + isDir: false, + } + } + return ret, nil +} + +func (f *snapshotDir) Readdirnames(count int) ([]string, error) { + var outLen int + names := f.filenames[f.at:] + if count > 0 { + if len(names) < count { + outLen = len(names) + } else { + outLen = count + } + if len(names) == 0 { + return nil, io.EOF + } + } else { + outLen = len(names) + } + f.at += outLen + + return names[:outLen], nil +} + +// snapshotFileInfo is a minimal implementation of os.FileInfo to support our +// virtual filesystem from snapshots. +type snapshotFileInfo struct { + name string + isDir bool +} + +var _ os.FileInfo = snapshotFileInfo{} + +func (fi snapshotFileInfo) Name() string { + return fi.name +} + +func (fi snapshotFileInfo) Size() int64 { + // In practice, our parser and loader never call Size + return -1 +} + +func (fi snapshotFileInfo) Mode() os.FileMode { + return os.ModePerm +} + +func (fi snapshotFileInfo) ModTime() time.Time { + return time.Now() +} + +func (fi snapshotFileInfo) IsDir() bool { + return fi.isDir +} + +func (fi snapshotFileInfo) Sys() interface{} { + return nil +} + +type snapshotFileStub struct{} + +func (f snapshotFileStub) Close() error { + return nil +} + +func (f snapshotFileStub) Read(p []byte) (n int, err error) { + return 0, fmt.Errorf("cannot read") +} + +func (f snapshotFileStub) ReadAt(p []byte, off int64) (n int, err error) { + return 0, fmt.Errorf("cannot read") +} + +func (f snapshotFileStub) Seek(offset int64, whence int) (int64, error) { + return 0, fmt.Errorf("cannot seek") +} + +func (f snapshotFileStub) Write(p []byte) (n int, err error) { + return f.WriteAt(p, 0) +} + +func (f snapshotFileStub) WriteAt(p []byte, off int64) (n int, err error) { + return 0, fmt.Errorf("cannot write to file in snapshot") +} + +func (f snapshotFileStub) WriteString(s string) (n int, err error) { + return 0, fmt.Errorf("cannot write to file in snapshot") +} + +func (f snapshotFileStub) Name() string { + // in practice, the loader and parser never use this + return "" +} + +func (f snapshotFileStub) Readdir(count int) ([]os.FileInfo, error) { + return nil, fmt.Errorf("cannot use Readdir on a file") +} + +func (f snapshotFileStub) Readdirnames(count int) ([]string, error) { + return nil, fmt.Errorf("cannot use Readdir on a file") +} + +func (f snapshotFileStub) Stat() (os.FileInfo, error) { + return nil, fmt.Errorf("cannot stat") +} + +func (f snapshotFileStub) Sync() error { + return nil +} + +func (f snapshotFileStub) Truncate(size int64) error { + return fmt.Errorf("cannot write to file in snapshot") +} diff --git a/pkg/configs/configload/loader_snapshot_test.go b/pkg/configs/configload/loader_snapshot_test.go new file mode 100644 index 00000000000..06160624267 --- /dev/null +++ b/pkg/configs/configload/loader_snapshot_test.go @@ -0,0 +1,156 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configload + +import ( + "os" + "path/filepath" + "reflect" + "strings" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/go-test/deep" + "github.com/kubegems/opentofu/pkg/configs" +) + +func TestLoadConfigWithSnapshot(t *testing.T) { + fixtureDir := filepath.Clean("testdata/already-installed") + loader, err := NewLoader(&Config{ + ModulesDir: filepath.Join(fixtureDir, ".terraform/modules"), + }) + if err != nil { + t.Fatalf("unexpected error from NewLoader: %s", err) + } + + _, got, diags := loader.LoadConfigWithSnapshot(fixtureDir, configs.RootModuleCallForTesting()) + assertNoDiagnostics(t, diags) + if got == nil { + t.Fatalf("snapshot is nil; want non-nil") + } + + t.Log(spew.Sdump(got)) + + { + gotModuleDirs := map[string]string{} + for k, m := range got.Modules { + gotModuleDirs[k] = m.Dir + } + wantModuleDirs := map[string]string{ + "": "testdata/already-installed", + "child_a": "testdata/already-installed/.terraform/modules/child_a", + "child_a.child_c": "testdata/already-installed/.terraform/modules/child_a/child_c", + "child_b": "testdata/already-installed/.terraform/modules/child_b", + "child_b.child_d": "testdata/already-installed/.terraform/modules/child_b.child_d", + } + + for key, module := range wantModuleDirs { + wantModuleDirs[key] = filepath.Clean(module) + } + problems := deep.Equal(wantModuleDirs, gotModuleDirs) + for _, problem := range problems { + t.Errorf(problem) + } + if len(problems) > 0 { + return + } + } + + gotRoot := got.Modules[""] + wantRoot := &SnapshotModule{ + Dir: filepath.Join("testdata", "already-installed"), + Files: map[string][]byte{ + "root.tf": []byte(` +module "child_a" { + source = "example.com/foo/bar_a/baz" + version = ">= 1.0.0" +} + +module "child_b" { + source = "example.com/foo/bar_b/baz" + version = ">= 1.0.0" +} +`), + }, + } + // Normalise line endings and file paths for Windows + for k, v := range gotRoot.Files { + gotRoot.Files[k] = []byte(strings.ReplaceAll(string(v), "\r\n", "\n")) + } + if !reflect.DeepEqual(gotRoot, wantRoot) { + t.Errorf("wrong root module snapshot\ngot: %swant: %s", spew.Sdump(gotRoot), spew.Sdump(wantRoot)) + } + +} + +func TestLoadConfigWithSnapshot_invalidSource(t *testing.T) { + fixtureDir := filepath.Clean("testdata/already-installed-now-invalid") + + old, _ := os.Getwd() + os.Chdir(fixtureDir) + defer os.Chdir(old) + + loader, err := NewLoader(&Config{ + ModulesDir: ".terraform/modules", + }) + if err != nil { + t.Fatalf("unexpected error from NewLoader: %s", err) + } + + _, _, diags := loader.LoadConfigWithSnapshot(".", configs.RootModuleCallForTesting()) + if !diags.HasErrors() { + t.Error("LoadConfigWithSnapshot succeeded; want errors", configs.RootModuleCallForTesting()) + } +} + +func TestSnapshotRoundtrip(t *testing.T) { + fixtureDir := filepath.Clean("testdata/already-installed") + loader, err := NewLoader(&Config{ + ModulesDir: filepath.Join(fixtureDir, ".terraform/modules"), + }) + if err != nil { + t.Fatalf("unexpected error from NewLoader: %s", err) + } + + _, snap, diags := loader.LoadConfigWithSnapshot(fixtureDir, configs.RootModuleCallForTesting()) + assertNoDiagnostics(t, diags) + if snap == nil { + t.Fatalf("snapshot is nil; want non-nil") + } + + snapLoader := NewLoaderFromSnapshot(snap) + if loader == nil { + t.Fatalf("loader is nil; want non-nil") + } + + config, diags := snapLoader.LoadConfig(fixtureDir, configs.RootModuleCallForTesting()) + assertNoDiagnostics(t, diags) + if config == nil { + t.Fatalf("config is nil; want non-nil") + } + if config.Module == nil { + t.Fatalf("config has no root module") + } + if got, want := config.Module.SourceDir, filepath.Clean("testdata/already-installed"); got != want { + t.Errorf("wrong root module sourcedir %q; want %q", got, want) + } + if got, want := len(config.Module.ModuleCalls), 2; got != want { + t.Errorf("wrong number of module calls in root module %d; want %d", got, want) + } + childA := config.Children["child_a"] + if childA == nil { + t.Fatalf("child_a config is nil; want non-nil") + } + if childA.Module == nil { + t.Fatalf("child_a config has no module") + } + if got, want := childA.Module.SourceDir, filepath.Clean("testdata/already-installed/.terraform/modules/child_a"); got != want { + t.Errorf("wrong child_a sourcedir %q; want %q", got, want) + } + if got, want := len(childA.Module.ModuleCalls), 1; got != want { + t.Errorf("wrong number of module calls in child_a %d; want %d", got, want) + } +} diff --git a/pkg/configs/configload/loader_test.go b/pkg/configs/configload/loader_test.go new file mode 100644 index 00000000000..eb6f5c51902 --- /dev/null +++ b/pkg/configs/configload/loader_test.go @@ -0,0 +1,38 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configload + +import ( + "testing" + + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" +) + +func assertNoDiagnostics(t *testing.T, diags hcl.Diagnostics) bool { + t.Helper() + return assertDiagnosticCount(t, diags, 0) +} + +func assertDiagnosticCount(t *testing.T, diags hcl.Diagnostics, want int) bool { + t.Helper() + if len(diags) != want { + t.Errorf("wrong number of diagnostics %d; want %d", len(diags), want) + for _, diag := range diags { + t.Logf("- %s", diag) + } + return true + } + return false +} +func assertResultCtyEqual(t *testing.T, got, want cty.Value) bool { + t.Helper() + if !got.RawEquals(want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, want) + return true + } + return false +} diff --git a/pkg/configs/configload/module_mgr.go b/pkg/configs/configload/module_mgr.go new file mode 100644 index 00000000000..8e39e96844c --- /dev/null +++ b/pkg/configs/configload/module_mgr.go @@ -0,0 +1,67 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configload + +import ( + "os" + "path/filepath" + + "github.com/hashicorp/terraform-svchost/disco" + "github.com/kubegems/opentofu/pkg/modsdir" + "github.com/kubegems/opentofu/pkg/registry" + "github.com/spf13/afero" +) + +type moduleMgr struct { + FS afero.Afero + + // CanInstall is true for a module manager that can support installation. + // + // This must be set only if FS is an afero.OsFs, because the installer + // (which uses go-getter) is not aware of the virtual filesystem + // abstraction and will always write into the "real" filesystem. + CanInstall bool + + // Dir is the path where descendent modules are (or will be) installed. + Dir string + + // Services is a service discovery client that will be used to find + // remote module registry endpoints. This object may be pre-loaded with + // cached discovery information. + Services *disco.Disco + + // Registry is a client for the module registry protocol, which is used + // when a module is requested from a registry source. + Registry *registry.Client + + // manifest tracks the currently-installed modules for this manager. + // + // The loader may read this. Only the installer may write to it, and + // after a set of updates are completed the installer must call + // writeModuleManifestSnapshot to persist a snapshot of the manifest + // to disk for use on subsequent runs. + manifest modsdir.Manifest +} + +func (m *moduleMgr) manifestSnapshotPath() string { + return filepath.Join(m.Dir, modsdir.ManifestSnapshotFilename) +} + +// readModuleManifestSnapshot loads a manifest snapshot from the filesystem. +func (m *moduleMgr) readModuleManifestSnapshot() error { + r, err := m.FS.Open(m.manifestSnapshotPath()) + if err != nil { + if os.IsNotExist(err) { + // We'll treat a missing file as an empty manifest + m.manifest = make(modsdir.Manifest) + return nil + } + return err + } + + m.manifest, err = modsdir.ReadManifestSnapshot(r) + return err +} diff --git a/pkg/configs/configload/testdata/add-version-constraint/.terraform/modules/child/empty.tf b/pkg/configs/configload/testdata/add-version-constraint/.terraform/modules/child/empty.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/configs/configload/testdata/add-version-constraint/.terraform/modules/modules.json b/pkg/configs/configload/testdata/add-version-constraint/.terraform/modules/modules.json new file mode 100644 index 00000000000..c02f40016b0 --- /dev/null +++ b/pkg/configs/configload/testdata/add-version-constraint/.terraform/modules/modules.json @@ -0,0 +1,14 @@ +{ + "Modules": [ + { + "Key": "", + "Source": "", + "Dir": "testdata/add-version-constraint" + }, + { + "Key": "child", + "Source": "hashicorp/module-installer-acctest/aws", + "Dir": "testdata/add-version-constraint/.terraform/modules/child" + } + ] +} diff --git a/pkg/configs/configload/testdata/add-version-constraint/add-version-constraint.tf b/pkg/configs/configload/testdata/add-version-constraint/add-version-constraint.tf new file mode 100644 index 00000000000..2d407a4d61e --- /dev/null +++ b/pkg/configs/configload/testdata/add-version-constraint/add-version-constraint.tf @@ -0,0 +1,10 @@ +# This fixture depends on a registry module, which indirectly refers to the +# following github repository: +# +# However, the test that uses it is testing for an error, so in practice the +# registry does not need to be accessed when this test is successful. + +module "child" { + source = "hashicorp/module-installer-acctest/aws" + version = "0.0.1" +} diff --git a/pkg/configs/configload/testdata/already-installed-now-invalid/.terraform/modules/modules.json b/pkg/configs/configload/testdata/already-installed-now-invalid/.terraform/modules/modules.json new file mode 100644 index 00000000000..32a4ace575d --- /dev/null +++ b/pkg/configs/configload/testdata/already-installed-now-invalid/.terraform/modules/modules.json @@ -0,0 +1 @@ +{"Modules":[{"Key":"","Source":"","Dir":"."},{"Key":"foo","Source":"./foo","Dir":"foo"},{"Key":"foo.bar","Source":"./bar","Dir":"foo/bar"}]} \ No newline at end of file diff --git a/pkg/configs/configload/testdata/already-installed-now-invalid/foo/bar/main.tf b/pkg/configs/configload/testdata/already-installed-now-invalid/foo/bar/main.tf new file mode 100644 index 00000000000..48b5e2e0673 --- /dev/null +++ b/pkg/configs/configload/testdata/already-installed-now-invalid/foo/bar/main.tf @@ -0,0 +1,3 @@ +output "hello" { + value = "Hello from foo/bar" +} diff --git a/pkg/configs/configload/testdata/already-installed-now-invalid/foo/main.tf b/pkg/configs/configload/testdata/already-installed-now-invalid/foo/main.tf new file mode 100644 index 00000000000..9fba57235c2 --- /dev/null +++ b/pkg/configs/configload/testdata/already-installed-now-invalid/foo/main.tf @@ -0,0 +1,3 @@ +module "bar" { + source = "${path.module}/bar" +} diff --git a/pkg/configs/configload/testdata/already-installed-now-invalid/root.tf b/pkg/configs/configload/testdata/already-installed-now-invalid/root.tf new file mode 100644 index 00000000000..020494e84d6 --- /dev/null +++ b/pkg/configs/configload/testdata/already-installed-now-invalid/root.tf @@ -0,0 +1,3 @@ +module "foo" { + source = "./foo" +} diff --git a/pkg/configs/configload/testdata/already-installed/.terraform/modules/child_a/child_a.tf b/pkg/configs/configload/testdata/already-installed/.terraform/modules/child_a/child_a.tf new file mode 100644 index 00000000000..2f4d0f1a0b8 --- /dev/null +++ b/pkg/configs/configload/testdata/already-installed/.terraform/modules/child_a/child_a.tf @@ -0,0 +1,4 @@ + +module "child_c" { + source = "./child_c" +} diff --git a/pkg/configs/configload/testdata/already-installed/.terraform/modules/child_a/child_c/child_c.tf b/pkg/configs/configload/testdata/already-installed/.terraform/modules/child_a/child_c/child_c.tf new file mode 100644 index 00000000000..785d98d98ac --- /dev/null +++ b/pkg/configs/configload/testdata/already-installed/.terraform/modules/child_a/child_c/child_c.tf @@ -0,0 +1,4 @@ + +output "hello" { + value = "Hello from child_c" +} diff --git a/pkg/configs/configload/testdata/already-installed/.terraform/modules/child_b.child_d/child_d.tf b/pkg/configs/configload/testdata/already-installed/.terraform/modules/child_b.child_d/child_d.tf new file mode 100644 index 00000000000..145576a365e --- /dev/null +++ b/pkg/configs/configload/testdata/already-installed/.terraform/modules/child_b.child_d/child_d.tf @@ -0,0 +1,4 @@ + +output "hello" { + value = "Hello from child_d" +} diff --git a/pkg/configs/configload/testdata/already-installed/.terraform/modules/child_b/child_b.tf b/pkg/configs/configload/testdata/already-installed/.terraform/modules/child_b/child_b.tf new file mode 100644 index 00000000000..4a1b247d39c --- /dev/null +++ b/pkg/configs/configload/testdata/already-installed/.terraform/modules/child_b/child_b.tf @@ -0,0 +1,5 @@ + +module "child_d" { + source = "example.com/foo/bar_d/baz" + # Intentionally no version here +} diff --git a/pkg/configs/configload/testdata/already-installed/.terraform/modules/modules.json b/pkg/configs/configload/testdata/already-installed/.terraform/modules/modules.json new file mode 100644 index 00000000000..43439865a9b --- /dev/null +++ b/pkg/configs/configload/testdata/already-installed/.terraform/modules/modules.json @@ -0,0 +1 @@ +{"Modules":[{"Key":"","Source":"","Dir":"testdata/already-installed"},{"Key":"child_a","Source":"example.com/foo/bar_a/baz","Version":"1.0.1","Dir":"testdata/already-installed/.terraform/modules/child_a"},{"Key":"child_b","Source":"example.com/foo/bar_b/baz","Version":"1.0.0","Dir":"testdata/already-installed/.terraform/modules/child_b"},{"Key":"child_a.child_c","Source":"./child_c","Dir":"testdata/already-installed/.terraform/modules/child_a/child_c"},{"Key":"child_b.child_d","Source":"example.com/foo/bar_d/baz","Version":"1.2.0","Dir":"testdata/already-installed/.terraform/modules/child_b.child_d"}]} \ No newline at end of file diff --git a/pkg/configs/configload/testdata/already-installed/root.tf b/pkg/configs/configload/testdata/already-installed/root.tf new file mode 100644 index 00000000000..8a4473942da --- /dev/null +++ b/pkg/configs/configload/testdata/already-installed/root.tf @@ -0,0 +1,10 @@ + +module "child_a" { + source = "example.com/foo/bar_a/baz" + version = ">= 1.0.0" +} + +module "child_b" { + source = "example.com/foo/bar_b/baz" + version = ">= 1.0.0" +} diff --git a/pkg/configs/configload/testdata/child-provider-child-count/.terraform/modules/modules.json b/pkg/configs/configload/testdata/child-provider-child-count/.terraform/modules/modules.json new file mode 100644 index 00000000000..3a80b67442b --- /dev/null +++ b/pkg/configs/configload/testdata/child-provider-child-count/.terraform/modules/modules.json @@ -0,0 +1,19 @@ +{ + "Modules": [ + { + "Key": "", + "Source": "", + "Dir": "." + }, + { + "Key": "child", + "Source": "./child", + "Dir": "testdata/child-provider-child-count/child" + }, + { + "Key": "child.grandchild", + "Source": "../grandchild", + "Dir": "testdata/child-provider-child-count/grandchild" + } + ] +} diff --git a/pkg/configs/configload/testdata/child-provider-child-count/child-provider-child-count.tf b/pkg/configs/configload/testdata/child-provider-child-count/child-provider-child-count.tf new file mode 100644 index 00000000000..5b39941a03c --- /dev/null +++ b/pkg/configs/configload/testdata/child-provider-child-count/child-provider-child-count.tf @@ -0,0 +1,4 @@ +module "child" { + source = "./child" + count = 1 +} diff --git a/pkg/configs/configload/testdata/child-provider-child-count/child/child-provider-child-count-child.tf b/pkg/configs/configload/testdata/child-provider-child-count/child/child-provider-child-count-child.tf new file mode 100644 index 00000000000..524742c3fcf --- /dev/null +++ b/pkg/configs/configload/testdata/child-provider-child-count/child/child-provider-child-count-child.tf @@ -0,0 +1,7 @@ +provider "boop" { + blah = true +} + +module "grandchild" { + source = "../grandchild" +} diff --git a/pkg/configs/configload/testdata/child-provider-child-count/grandchild/child-provider-child-count-grandchild.tf b/pkg/configs/configload/testdata/child-provider-child-count/grandchild/child-provider-child-count-grandchild.tf new file mode 100644 index 00000000000..ccd9dcef9ec --- /dev/null +++ b/pkg/configs/configload/testdata/child-provider-child-count/grandchild/child-provider-child-count-grandchild.tf @@ -0,0 +1 @@ +# Intentionally blank diff --git a/pkg/configs/configload/testdata/child-provider-grandchild-count/.terraform/modules/modules.json b/pkg/configs/configload/testdata/child-provider-grandchild-count/.terraform/modules/modules.json new file mode 100644 index 00000000000..a9239e3a8b6 --- /dev/null +++ b/pkg/configs/configload/testdata/child-provider-grandchild-count/.terraform/modules/modules.json @@ -0,0 +1,19 @@ +{ + "Modules": [ + { + "Key": "", + "Source": "", + "Dir": "." + }, + { + "Key": "child", + "Source": "./child", + "Dir": "testdata/child-provider-grandchild-count/child" + }, + { + "Key": "child.grandchild", + "Source": "../grandchild", + "Dir": "testdata/child-provider-grandchild-count/grandchild" + } + ] +} diff --git a/pkg/configs/configload/testdata/child-provider-grandchild-count/child-provider-grandchild-count.tf b/pkg/configs/configload/testdata/child-provider-grandchild-count/child-provider-grandchild-count.tf new file mode 100644 index 00000000000..1f95749fa7e --- /dev/null +++ b/pkg/configs/configload/testdata/child-provider-grandchild-count/child-provider-grandchild-count.tf @@ -0,0 +1,3 @@ +module "child" { + source = "./child" +} diff --git a/pkg/configs/configload/testdata/child-provider-grandchild-count/child/child-provider-grandchild-count-child.tf b/pkg/configs/configload/testdata/child-provider-grandchild-count/child/child-provider-grandchild-count-child.tf new file mode 100644 index 00000000000..8d3fe1023d2 --- /dev/null +++ b/pkg/configs/configload/testdata/child-provider-grandchild-count/child/child-provider-grandchild-count-child.tf @@ -0,0 +1,12 @@ +provider "boop" { + blah = true +} + +module "grandchild" { + source = "../grandchild" + + # grandchild's caller (this file) has a legacy nested provider block, but + # grandchild itself does not and so it's valid to use "count" here even + # though it wouldn't be valid to call "child" (this file) with "count". + count = 2 +} diff --git a/pkg/configs/configload/testdata/child-provider-grandchild-count/grandchild/child-provider-grandchild-count-grandchild.tf b/pkg/configs/configload/testdata/child-provider-grandchild-count/grandchild/child-provider-grandchild-count-grandchild.tf new file mode 100644 index 00000000000..ccd9dcef9ec --- /dev/null +++ b/pkg/configs/configload/testdata/child-provider-grandchild-count/grandchild/child-provider-grandchild-count-grandchild.tf @@ -0,0 +1 @@ +# Intentionally blank diff --git a/pkg/configs/configload/testdata/empty/.gitignore b/pkg/configs/configload/testdata/empty/.gitignore new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/configs/configload/testdata/go-getter-modules/.gitignore b/pkg/configs/configload/testdata/go-getter-modules/.gitignore new file mode 100644 index 00000000000..6e0db03a8b7 --- /dev/null +++ b/pkg/configs/configload/testdata/go-getter-modules/.gitignore @@ -0,0 +1 @@ +.terraform/* diff --git a/pkg/configs/configload/testdata/go-getter-modules/root.tf b/pkg/configs/configload/testdata/go-getter-modules/root.tf new file mode 100644 index 00000000000..9b174a7a501 --- /dev/null +++ b/pkg/configs/configload/testdata/go-getter-modules/root.tf @@ -0,0 +1,21 @@ +# This fixture depends on a github repo at: +# https://github.com/hashicorp/terraform-aws-module-installer-acctest +# ...and expects its v0.0.1 tag to be pointing at the following commit: +# d676ab2559d4e0621d59e3c3c4cbb33958ac4608 + +variable "v" { + description = "in local caller for go-getter-modules" + default = "" +} + +module "acctest_root" { + source = "github.com/hashicorp/terraform-aws-module-installer-acctest?ref=v0.0.1" +} + +module "acctest_child_a" { + source = "github.com/hashicorp/terraform-aws-module-installer-acctest//modules/child_a?ref=v0.0.1" +} + +module "acctest_child_b" { + source = "github.com/hashicorp/terraform-aws-module-installer-acctest//modules/child_b?ref=v0.0.1" +} diff --git a/pkg/configs/configload/testdata/invalid-names-in-submodules/.terraform/modules/modules.json b/pkg/configs/configload/testdata/invalid-names-in-submodules/.terraform/modules/modules.json new file mode 100644 index 00000000000..c55c1cf54ff --- /dev/null +++ b/pkg/configs/configload/testdata/invalid-names-in-submodules/.terraform/modules/modules.json @@ -0,0 +1,14 @@ +{ + "Modules": [ + { + "Key": "test", + "Source": "./sub", + "Dir": "testdata/invalid-names-in-submodules/sub" + }, + { + "Key": "", + "Source": "", + "Dir": "." + } + ] +} \ No newline at end of file diff --git a/pkg/configs/configload/testdata/invalid-names-in-submodules/main.tf b/pkg/configs/configload/testdata/invalid-names-in-submodules/main.tf new file mode 100644 index 00000000000..3fbc8c68cf0 --- /dev/null +++ b/pkg/configs/configload/testdata/invalid-names-in-submodules/main.tf @@ -0,0 +1,3 @@ +module "test" { + source = "./sub" +} diff --git a/pkg/configs/configload/testdata/invalid-names-in-submodules/sub/main.tf b/pkg/configs/configload/testdata/invalid-names-in-submodules/sub/main.tf new file mode 100644 index 00000000000..aacab2c441d --- /dev/null +++ b/pkg/configs/configload/testdata/invalid-names-in-submodules/sub/main.tf @@ -0,0 +1,7 @@ +resource "aws-_foo" "test" { + +} + +data "aws-_bar" "test" { + +} diff --git a/pkg/configs/configload/testdata/invalid-names/main.tf b/pkg/configs/configload/testdata/invalid-names/main.tf new file mode 100644 index 00000000000..d4eee4c3e28 --- /dev/null +++ b/pkg/configs/configload/testdata/invalid-names/main.tf @@ -0,0 +1,3 @@ +provider "42_bad!" { + invalid_provider_name = "yes" +} diff --git a/pkg/configs/configload/testdata/local-modules/child_a/child_a.tf b/pkg/configs/configload/testdata/local-modules/child_a/child_a.tf new file mode 100644 index 00000000000..68ebb8e4048 --- /dev/null +++ b/pkg/configs/configload/testdata/local-modules/child_a/child_a.tf @@ -0,0 +1,9 @@ + +variable "v" { + description = "in child_a module" + default = "" +} + +module "child_b" { + source = "./child_b" +} diff --git a/pkg/configs/configload/testdata/local-modules/child_a/child_b/child_b.tf b/pkg/configs/configload/testdata/local-modules/child_a/child_b/child_b.tf new file mode 100644 index 00000000000..e2e2209164c --- /dev/null +++ b/pkg/configs/configload/testdata/local-modules/child_a/child_b/child_b.tf @@ -0,0 +1,9 @@ + +variable "v" { + description = "in child_b module" + default = "" +} + +output "hello" { + value = "Hello from child_b!" +} diff --git a/pkg/configs/configload/testdata/local-modules/root.tf b/pkg/configs/configload/testdata/local-modules/root.tf new file mode 100644 index 00000000000..3b4c6416d7d --- /dev/null +++ b/pkg/configs/configload/testdata/local-modules/root.tf @@ -0,0 +1,9 @@ + +variable "v" { + description = "in root module" + default = "" +} + +module "child_a" { + source = "./child_a" +} diff --git a/pkg/configs/configload/testdata/module-depends-on/.terraform/modules/modules.json b/pkg/configs/configload/testdata/module-depends-on/.terraform/modules/modules.json new file mode 100644 index 00000000000..28f813039ee --- /dev/null +++ b/pkg/configs/configload/testdata/module-depends-on/.terraform/modules/modules.json @@ -0,0 +1,24 @@ +{ + "Modules": [ + { + "Key": "", + "Source": "", + "Dir": "testdata/expand-modules/nested-provider" + }, + { + "Key": "child", + "Source": "./child", + "Dir": "testdata/expand-modules/nested-provider/child" + }, + { + "Key": "child2", + "Source": "./child2", + "Dir": "testdata/expand-modules/nested-provider/child2" + }, + { + "Key": "child.child2", + "Source": "../child2", + "Dir": "testdata/expand-modules/nested-provider/child2" + } + ] +} diff --git a/pkg/configs/configload/testdata/module-depends-on/child/main.tf b/pkg/configs/configload/testdata/module-depends-on/child/main.tf new file mode 100644 index 00000000000..f6b74b7a1cb --- /dev/null +++ b/pkg/configs/configload/testdata/module-depends-on/child/main.tf @@ -0,0 +1,3 @@ +module "child2" { + source = "../child2" +} diff --git a/pkg/configs/configload/testdata/module-depends-on/child2/main.tf b/pkg/configs/configload/testdata/module-depends-on/child2/main.tf new file mode 100644 index 00000000000..bea1f0c4fde --- /dev/null +++ b/pkg/configs/configload/testdata/module-depends-on/child2/main.tf @@ -0,0 +1,6 @@ +provider "aws" { +} + +output "my_output" { + value = "my output" +} diff --git a/pkg/configs/configload/testdata/module-depends-on/root.tf b/pkg/configs/configload/testdata/module-depends-on/root.tf new file mode 100644 index 00000000000..b14d24c45e5 --- /dev/null +++ b/pkg/configs/configload/testdata/module-depends-on/root.tf @@ -0,0 +1,7 @@ +module "child" { + source = "./child" + depends_on = [test_resource.a] +} + +resource "test_resource" "a" { +} diff --git a/pkg/configs/configload/testdata/registry-modules/.gitignore b/pkg/configs/configload/testdata/registry-modules/.gitignore new file mode 100644 index 00000000000..6e0db03a8b7 --- /dev/null +++ b/pkg/configs/configload/testdata/registry-modules/.gitignore @@ -0,0 +1 @@ +.terraform/* diff --git a/pkg/configs/configload/testdata/registry-modules/root.tf b/pkg/configs/configload/testdata/registry-modules/root.tf new file mode 100644 index 00000000000..4b5ad1f1edd --- /dev/null +++ b/pkg/configs/configload/testdata/registry-modules/root.tf @@ -0,0 +1,33 @@ +# This fixture indirectly depends on a github repo at: +# https://github.com/hashicorp/terraform-aws-module-installer-acctest +# ...and expects its v0.0.1 tag to be pointing at the following commit: +# d676ab2559d4e0621d59e3c3c4cbb33958ac4608 +# +# This repository is accessed indirectly via: +# https://registry.terraform.io/modules/hashicorp/module-installer-acctest/aws/0.0.1 +# +# Since the tag's id is included in a downloaded archive, it is expected to +# have the following id: +# 853d03855b3290a3ca491d4c3a7684572dd42237 +# (this particular assumption is encoded in the tests that use this fixture) + + +variable "v" { + description = "in local caller for registry-modules" + default = "" +} + +module "acctest_root" { + source = "hashicorp/module-installer-acctest/aws" + version = "0.0.1" +} + +module "acctest_child_a" { + source = "hashicorp/module-installer-acctest/aws//modules/child_a" + version = "0.0.1" +} + +module "acctest_child_b" { + source = "hashicorp/module-installer-acctest/aws//modules/child_b" + version = "0.0.1" +} diff --git a/pkg/configs/configload/testing.go b/pkg/configs/configload/testing.go new file mode 100644 index 00000000000..81b8324baaf --- /dev/null +++ b/pkg/configs/configload/testing.go @@ -0,0 +1,47 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configload + +import ( + "os" + "testing" +) + +// NewLoaderForTests is a variant of NewLoader that is intended to be more +// convenient for unit tests. +// +// The loader's modules directory is a separate temporary directory created +// for each call. Along with the created loader, this function returns a +// cleanup function that should be called before the test completes in order +// to remove that temporary directory. +// +// In the case of any errors, t.Fatal (or similar) will be called to halt +// execution of the test, so the calling test does not need to handle errors +// itself. +func NewLoaderForTests(t *testing.T) (*Loader, func()) { + t.Helper() + + modulesDir, err := os.MkdirTemp("", "tf-configs") + if err != nil { + t.Fatalf("failed to create temporary modules dir: %s", err) + return nil, func() {} + } + + cleanup := func() { + os.RemoveAll(modulesDir) + } + + loader, err := NewLoader(&Config{ + ModulesDir: modulesDir, + }) + if err != nil { + cleanup() + t.Fatalf("failed to create config loader: %s", err) + return nil, func() {} + } + + return loader, cleanup +} diff --git a/pkg/configs/configschema/coerce_value.go b/pkg/configs/configschema/coerce_value.go new file mode 100644 index 00000000000..470d5351572 --- /dev/null +++ b/pkg/configs/configschema/coerce_value.go @@ -0,0 +1,270 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configschema + +import ( + "fmt" + "sort" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// CoerceValue attempts to force the given value to conform to the type +// implied by the receiever. +// +// This is useful in situations where a configuration must be derived from +// an already-decoded value. It is always better to decode directly from +// configuration where possible since then source location information is +// still available to produce diagnostics, but in special situations this +// function allows a compatible result to be obtained even if the +// configuration objects are not available. +// +// If the given value cannot be converted to conform to the receiving schema +// then an error is returned describing one of possibly many problems. This +// error may be a cty.PathError indicating a position within the nested +// data structure where the problem applies. +func (b *Block) CoerceValue(in cty.Value) (cty.Value, error) { + var path cty.Path + return b.coerceValue(in, path) +} + +func (b *Block) coerceValue(in cty.Value, path cty.Path) (cty.Value, error) { + convType := b.specType() + impliedType := convType.WithoutOptionalAttributesDeep() + + switch { + case in.IsNull(): + return cty.NullVal(impliedType), nil + case !in.IsKnown(): + return cty.UnknownVal(impliedType), nil + } + + ty := in.Type() + if !ty.IsObjectType() { + return cty.UnknownVal(impliedType), path.NewErrorf("an object is required") + } + + for name := range ty.AttributeTypes() { + if _, defined := b.Attributes[name]; defined { + continue + } + if _, defined := b.BlockTypes[name]; defined { + continue + } + return cty.UnknownVal(impliedType), path.NewErrorf("unexpected attribute %q", name) + } + + attrs := make(map[string]cty.Value) + + // Stable sort keys for consistent error reports + attrKeys := make([]string, 0, len(b.Attributes)) + for key := range b.Attributes { + attrKeys = append(attrKeys, key) + } + sort.Strings(attrKeys) + + for _, name := range attrKeys { + attrS := b.Attributes[name] + attrType := impliedType.AttributeType(name) + attrConvType := convType.AttributeType(name) + + var val cty.Value + switch { + case ty.HasAttribute(name): + val = in.GetAttr(name) + case attrS.Computed || attrS.Optional: + val = cty.NullVal(attrType) + default: + return cty.UnknownVal(impliedType), path.NewErrorf("attribute %q is required", name) + } + + val, err := convert.Convert(val, attrConvType) + if err != nil { + return cty.UnknownVal(impliedType), append(path, cty.GetAttrStep{Name: name}).NewError(err) + } + attrs[name] = val + } + + // Stable sort keys for consistent error reports + typeKeys := make([]string, 0, len(b.BlockTypes)) + for key := range b.BlockTypes { + typeKeys = append(typeKeys, key) + } + sort.Strings(typeKeys) + + for _, typeName := range typeKeys { + blockS := b.BlockTypes[typeName] + switch blockS.Nesting { + + case NestingSingle, NestingGroup: + switch { + case ty.HasAttribute(typeName): + var err error + val := in.GetAttr(typeName) + attrs[typeName], err = blockS.coerceValue(val, append(path, cty.GetAttrStep{Name: typeName})) + if err != nil { + return cty.UnknownVal(impliedType), err + } + default: + attrs[typeName] = blockS.EmptyValue() + } + + case NestingList: + switch { + case ty.HasAttribute(typeName): + coll := in.GetAttr(typeName) + + switch { + case coll.IsNull(): + attrs[typeName] = cty.NullVal(cty.List(blockS.ImpliedType())) + continue + case !coll.IsKnown(): + attrs[typeName] = cty.UnknownVal(cty.List(blockS.ImpliedType())) + continue + } + + if !coll.CanIterateElements() { + return cty.UnknownVal(impliedType), path.NewErrorf("must be a list") + } + l := coll.LengthInt() + + if l == 0 { + attrs[typeName] = cty.ListValEmpty(blockS.ImpliedType()) + continue + } + elems := make([]cty.Value, 0, l) + { + path = append(path, cty.GetAttrStep{Name: typeName}) + for it := coll.ElementIterator(); it.Next(); { + var err error + idx, val := it.Element() + val, err = blockS.coerceValue(val, append(path, cty.IndexStep{Key: idx})) + if err != nil { + return cty.UnknownVal(impliedType), err + } + elems = append(elems, val) + } + } + attrs[typeName] = cty.ListVal(elems) + default: + attrs[typeName] = cty.ListValEmpty(blockS.ImpliedType()) + } + + case NestingSet: + switch { + case ty.HasAttribute(typeName): + coll := in.GetAttr(typeName) + + switch { + case coll.IsNull(): + attrs[typeName] = cty.NullVal(cty.Set(blockS.ImpliedType())) + continue + case !coll.IsKnown(): + attrs[typeName] = cty.UnknownVal(cty.Set(blockS.ImpliedType())) + continue + } + + if !coll.CanIterateElements() { + return cty.UnknownVal(impliedType), path.NewErrorf("must be a set") + } + l := coll.LengthInt() + + if l == 0 { + attrs[typeName] = cty.SetValEmpty(blockS.ImpliedType()) + continue + } + elems := make([]cty.Value, 0, l) + { + path = append(path, cty.GetAttrStep{Name: typeName}) + for it := coll.ElementIterator(); it.Next(); { + var err error + idx, val := it.Element() + val, err = blockS.coerceValue(val, append(path, cty.IndexStep{Key: idx})) + if err != nil { + return cty.UnknownVal(impliedType), err + } + elems = append(elems, val) + } + } + attrs[typeName] = cty.SetVal(elems) + default: + attrs[typeName] = cty.SetValEmpty(blockS.ImpliedType()) + } + + case NestingMap: + switch { + case ty.HasAttribute(typeName): + coll := in.GetAttr(typeName) + + switch { + case coll.IsNull(): + attrs[typeName] = cty.NullVal(cty.Map(blockS.ImpliedType())) + continue + case !coll.IsKnown(): + attrs[typeName] = cty.UnknownVal(cty.Map(blockS.ImpliedType())) + continue + } + + if !coll.CanIterateElements() { + return cty.UnknownVal(impliedType), path.NewErrorf("must be a map") + } + l := coll.LengthInt() + if l == 0 { + attrs[typeName] = cty.MapValEmpty(blockS.ImpliedType()) + continue + } + elems := make(map[string]cty.Value) + { + path = append(path, cty.GetAttrStep{Name: typeName}) + for it := coll.ElementIterator(); it.Next(); { + var err error + key, val := it.Element() + if key.Type() != cty.String || key.IsNull() || !key.IsKnown() { + return cty.UnknownVal(impliedType), path.NewErrorf("must be a map") + } + val, err = blockS.coerceValue(val, append(path, cty.IndexStep{Key: key})) + if err != nil { + return cty.UnknownVal(impliedType), err + } + elems[key.AsString()] = val + } + } + + // If the attribute values here contain any DynamicPseudoTypes, + // the concrete type must be an object. + useObject := false + switch { + case coll.Type().IsObjectType(): + useObject = true + default: + // It's possible that we were given a map, and need to coerce it to an object + ety := coll.Type().ElementType() + for _, v := range elems { + if !v.Type().Equals(ety) { + useObject = true + break + } + } + } + + if useObject { + attrs[typeName] = cty.ObjectVal(elems) + } else { + attrs[typeName] = cty.MapVal(elems) + } + default: + attrs[typeName] = cty.MapValEmpty(blockS.ImpliedType()) + } + + default: + // should never happen because above is exhaustive + panic(fmt.Errorf("unsupported nesting mode %#v", blockS.Nesting)) + } + } + + return cty.ObjectVal(attrs), nil +} diff --git a/pkg/configs/configschema/coerce_value_test.go b/pkg/configs/configschema/coerce_value_test.go new file mode 100644 index 00000000000..d2a8e2dc244 --- /dev/null +++ b/pkg/configs/configschema/coerce_value_test.go @@ -0,0 +1,630 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configschema + +import ( + "testing" + + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +func TestCoerceValue(t *testing.T) { + tests := map[string]struct { + Schema *Block + Input cty.Value + WantValue cty.Value + WantErr string + }{ + "empty schema and value": { + &Block{}, + cty.EmptyObjectVal, + cty.EmptyObjectVal, + ``, + }, + "attribute present": { + &Block{ + Attributes: map[string]*Attribute{ + "foo": { + Type: cty.String, + Optional: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.True, + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("true"), + }), + ``, + }, + "single block present": { + &Block{ + BlockTypes: map[string]*NestedBlock{ + "foo": { + Block: Block{}, + Nesting: NestingSingle, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.EmptyObjectVal, + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.EmptyObjectVal, + }), + ``, + }, + "single block wrong type": { + &Block{ + BlockTypes: map[string]*NestedBlock{ + "foo": { + Block: Block{}, + Nesting: NestingSingle, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.True, + }), + cty.DynamicVal, + `.foo: an object is required`, + }, + "list block with one item": { + &Block{ + BlockTypes: map[string]*NestedBlock{ + "foo": { + Block: Block{}, + Nesting: NestingList, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{cty.EmptyObjectVal}), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{cty.EmptyObjectVal}), + }), + ``, + }, + "set block with one item": { + &Block{ + BlockTypes: map[string]*NestedBlock{ + "foo": { + Block: Block{}, + Nesting: NestingSet, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{cty.EmptyObjectVal}), // can implicitly convert to set + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.SetVal([]cty.Value{cty.EmptyObjectVal}), + }), + ``, + }, + "map block with one item": { + &Block{ + BlockTypes: map[string]*NestedBlock{ + "foo": { + Block: Block{}, + Nesting: NestingMap, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.MapVal(map[string]cty.Value{"foo": cty.EmptyObjectVal}), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.MapVal(map[string]cty.Value{"foo": cty.EmptyObjectVal}), + }), + ``, + }, + "list block with one item having an attribute": { + &Block{ + BlockTypes: map[string]*NestedBlock{ + "foo": { + Block: Block{ + Attributes: map[string]*Attribute{ + "bar": { + Type: cty.String, + Required: true, + }, + }, + }, + Nesting: NestingList, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("hello"), + })}), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("hello"), + })}), + }), + ``, + }, + "list block with one item having a missing attribute": { + &Block{ + BlockTypes: map[string]*NestedBlock{ + "foo": { + Block: Block{ + Attributes: map[string]*Attribute{ + "bar": { + Type: cty.String, + Required: true, + }, + }, + }, + Nesting: NestingList, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{cty.EmptyObjectVal}), + }), + cty.DynamicVal, + `.foo[0]: attribute "bar" is required`, + }, + "list block with one item having an extraneous attribute": { + &Block{ + BlockTypes: map[string]*NestedBlock{ + "foo": { + Block: Block{}, + Nesting: NestingList, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("hello"), + })}), + }), + cty.DynamicVal, + `.foo[0]: unexpected attribute "bar"`, + }, + "missing optional attribute": { + &Block{ + Attributes: map[string]*Attribute{ + "foo": { + Type: cty.String, + Optional: true, + }, + }, + }, + cty.EmptyObjectVal, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.NullVal(cty.String), + }), + ``, + }, + "missing optional single block": { + &Block{ + BlockTypes: map[string]*NestedBlock{ + "foo": { + Block: Block{}, + Nesting: NestingSingle, + }, + }, + }, + cty.EmptyObjectVal, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.NullVal(cty.EmptyObject), + }), + ``, + }, + "missing optional list block": { + &Block{ + BlockTypes: map[string]*NestedBlock{ + "foo": { + Block: Block{}, + Nesting: NestingList, + }, + }, + }, + cty.EmptyObjectVal, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListValEmpty(cty.EmptyObject), + }), + ``, + }, + "missing optional set block": { + &Block{ + BlockTypes: map[string]*NestedBlock{ + "foo": { + Block: Block{}, + Nesting: NestingSet, + }, + }, + }, + cty.EmptyObjectVal, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.SetValEmpty(cty.EmptyObject), + }), + ``, + }, + "missing optional map block": { + &Block{ + BlockTypes: map[string]*NestedBlock{ + "foo": { + Block: Block{}, + Nesting: NestingMap, + }, + }, + }, + cty.EmptyObjectVal, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.MapValEmpty(cty.EmptyObject), + }), + ``, + }, + "missing required attribute": { + &Block{ + Attributes: map[string]*Attribute{ + "foo": { + Type: cty.String, + Required: true, + }, + }, + }, + cty.EmptyObjectVal, + cty.DynamicVal, + `attribute "foo" is required`, + }, + "missing required single block": { + &Block{ + BlockTypes: map[string]*NestedBlock{ + "foo": { + Block: Block{}, + Nesting: NestingSingle, + MinItems: 1, + MaxItems: 1, + }, + }, + }, + cty.EmptyObjectVal, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.NullVal(cty.EmptyObject), + }), + ``, + }, + "unknown nested list": { + &Block{ + Attributes: map[string]*Attribute{ + "attr": { + Type: cty.String, + Required: true, + }, + }, + BlockTypes: map[string]*NestedBlock{ + "foo": { + Block: Block{}, + Nesting: NestingList, + MinItems: 2, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("test"), + "foo": cty.UnknownVal(cty.EmptyObject), + }), + cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("test"), + "foo": cty.UnknownVal(cty.List(cty.EmptyObject)), + }), + "", + }, + "unknowns in nested list": { + &Block{ + BlockTypes: map[string]*NestedBlock{ + "foo": { + Block: Block{ + Attributes: map[string]*Attribute{ + "attr": { + Type: cty.String, + Required: true, + }, + }, + }, + Nesting: NestingList, + MinItems: 2, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "attr": cty.UnknownVal(cty.String), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "attr": cty.UnknownVal(cty.String), + }), + }), + }), + "", + }, + "unknown nested set": { + &Block{ + Attributes: map[string]*Attribute{ + "attr": { + Type: cty.String, + Required: true, + }, + }, + BlockTypes: map[string]*NestedBlock{ + "foo": { + Block: Block{}, + Nesting: NestingSet, + MinItems: 1, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("test"), + "foo": cty.UnknownVal(cty.EmptyObject), + }), + cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("test"), + "foo": cty.UnknownVal(cty.Set(cty.EmptyObject)), + }), + "", + }, + "unknown nested map": { + &Block{ + Attributes: map[string]*Attribute{ + "attr": { + Type: cty.String, + Required: true, + }, + }, + BlockTypes: map[string]*NestedBlock{ + "foo": { + Block: Block{}, + Nesting: NestingMap, + MinItems: 1, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("test"), + "foo": cty.UnknownVal(cty.Map(cty.String)), + }), + cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("test"), + "foo": cty.UnknownVal(cty.Map(cty.EmptyObject)), + }), + "", + }, + "extraneous attribute": { + &Block{}, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }), + cty.DynamicVal, + `unexpected attribute "foo"`, + }, + "wrong attribute type": { + &Block{ + Attributes: map[string]*Attribute{ + "foo": { + Type: cty.Number, + Required: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.False, + }), + cty.DynamicVal, + `.foo: number required`, + }, + "unset computed value": { + &Block{ + Attributes: map[string]*Attribute{ + "foo": { + Type: cty.String, + Optional: true, + Computed: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{}), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.NullVal(cty.String), + }), + ``, + }, + "dynamic value attributes": { + &Block{ + BlockTypes: map[string]*NestedBlock{ + "foo": { + Nesting: NestingMap, + Block: Block{ + Attributes: map[string]*Attribute{ + "bar": { + Type: cty.String, + Optional: true, + Computed: true, + }, + "baz": { + Type: cty.DynamicPseudoType, + Optional: true, + Computed: true, + }, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ObjectVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("beep"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("boop"), + "baz": cty.NumberIntVal(8), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ObjectVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("beep"), + "baz": cty.NullVal(cty.DynamicPseudoType), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("boop"), + "baz": cty.NumberIntVal(8), + }), + }), + }), + ``, + }, + "dynamic attributes in map": { + // Convert a block represented as a map to an object if a + // DynamicPseudoType causes the element types to mismatch. + &Block{ + BlockTypes: map[string]*NestedBlock{ + "foo": { + Nesting: NestingMap, + Block: Block{ + Attributes: map[string]*Attribute{ + "bar": { + Type: cty.String, + Optional: true, + Computed: true, + }, + "baz": { + Type: cty.DynamicPseudoType, + Optional: true, + Computed: true, + }, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("beep"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("boop"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ObjectVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("beep"), + "baz": cty.NullVal(cty.DynamicPseudoType), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("boop"), + "baz": cty.NullVal(cty.DynamicPseudoType), + }), + }), + }), + ``, + }, + "nested types": { + // handle NestedTypes + &Block{ + Attributes: map[string]*Attribute{ + "foo": { + NestedType: &Object{ + Nesting: NestingList, + Attributes: map[string]*Attribute{ + "bar": { + Type: cty.String, + Required: true, + }, + "baz": { + Type: cty.Map(cty.String), + Optional: true, + }, + }, + }, + Optional: true, + }, + "fob": { + NestedType: &Object{ + Nesting: NestingSet, + Attributes: map[string]*Attribute{ + "bar": { + Type: cty.String, + Optional: true, + }, + }, + }, + Optional: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("beep"), + }), + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("boop"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("beep"), + "baz": cty.NullVal(cty.Map(cty.String)), + }), + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("boop"), + "baz": cty.NullVal(cty.Map(cty.String)), + }), + }), + "fob": cty.NullVal(cty.Set(cty.Object(map[string]cty.Type{ + "bar": cty.String, + }))), + }), + ``, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + gotValue, gotErrObj := test.Schema.CoerceValue(test.Input) + + if gotErrObj == nil { + if test.WantErr != "" { + t.Fatalf("coersion succeeded; want error: %q", test.WantErr) + } + } else { + gotErr := tfdiags.FormatError(gotErrObj) + if gotErr != test.WantErr { + t.Fatalf("wrong error\ngot: %s\nwant: %s", gotErr, test.WantErr) + } + return + } + + if !gotValue.RawEquals(test.WantValue) { + t.Errorf("wrong result\ninput: %#v\ngot: %#v\nwant: %#v", test.Input, gotValue, test.WantValue) + } + }) + } +} diff --git a/pkg/configs/configschema/decoder_spec.go b/pkg/configs/configschema/decoder_spec.go new file mode 100644 index 00000000000..3f7a556b02f --- /dev/null +++ b/pkg/configs/configschema/decoder_spec.go @@ -0,0 +1,228 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configschema + +import ( + "runtime" + "sync" + "unsafe" + + "github.com/hashicorp/hcl/v2/hcldec" + "github.com/zclconf/go-cty/cty" +) + +var mapLabelNames = []string{"key"} + +// specCache is a global cache of all the generated hcldec.Spec values for +// Blocks. This cache is used by the Block.DecoderSpec method to memoize calls +// and prevent unnecessary regeneration of the spec, especially when they are +// large and deeply nested. +// Caching these externally rather than within the struct is required because +// Blocks are used by value and copied when working with NestedBlocks, and the +// copying of the value prevents any safe synchronisation of the struct itself. +// +// While we are using the *Block pointer as the cache key, and the Block +// contents are mutable, once a Block is created it is treated as immutable for +// the duration of its life. Because a Block is a representation of a logical +// schema, which cannot change while it's being used, any modifications to the +// schema during execution would be an error. +type specCache struct { + sync.Mutex + specs map[uintptr]hcldec.Spec +} + +var decoderSpecCache = specCache{ + specs: map[uintptr]hcldec.Spec{}, +} + +// get returns the Spec associated with eth given Block, or nil if non is +// found. +func (s *specCache) get(b *Block) hcldec.Spec { + s.Lock() + defer s.Unlock() + k := uintptr(unsafe.Pointer(b)) + return s.specs[k] +} + +// set stores the given Spec as being the result of b.DecoderSpec(). +func (s *specCache) set(b *Block, spec hcldec.Spec) { + s.Lock() + defer s.Unlock() + + // the uintptr value gets us a unique identifier for each block, without + // tying this to the block value itself. + k := uintptr(unsafe.Pointer(b)) + if _, ok := s.specs[k]; ok { + return + } + + s.specs[k] = spec + + // This must use a finalizer tied to the Block, otherwise we'll continue to + // build up Spec values as the Blocks are recycled. + runtime.SetFinalizer(b, s.delete) +} + +// delete removes the spec associated with the given Block. +func (s *specCache) delete(b *Block) { + s.Lock() + defer s.Unlock() + + k := uintptr(unsafe.Pointer(b)) + delete(s.specs, k) +} + +// DecoderSpec returns a hcldec.Spec that can be used to decode a HCL Body +// using the facilities in the hcldec package. +// +// The returned specification is guaranteed to return a value of the same type +// returned by method ImpliedType, but it may contain null values if any of the +// block attributes are defined as optional and/or computed respectively. +func (b *Block) DecoderSpec() hcldec.Spec { + ret := hcldec.ObjectSpec{} + if b == nil { + return ret + } + + if spec := decoderSpecCache.get(b); spec != nil { + return spec + } + + for name, attrS := range b.Attributes { + ret[name] = attrS.decoderSpec(name) + } + + for name, blockS := range b.BlockTypes { + if _, exists := ret[name]; exists { + // This indicates an invalid schema, since it's not valid to define + // both an attribute and a block type of the same name. We assume + // that the provider has already used something like + // InternalValidate to validate their schema. + continue + } + + childSpec := blockS.Block.DecoderSpec() + + switch blockS.Nesting { + case NestingSingle, NestingGroup: + ret[name] = &hcldec.BlockSpec{ + TypeName: name, + Nested: childSpec, + Required: blockS.MinItems == 1, + } + if blockS.Nesting == NestingGroup { + ret[name] = &hcldec.DefaultSpec{ + Primary: ret[name], + Default: &hcldec.LiteralSpec{ + Value: blockS.EmptyValue(), + }, + } + } + case NestingList: + // We prefer to use a list where possible, since it makes our + // implied type more complete, but if there are any + // dynamically-typed attributes inside we must use a tuple + // instead, at the expense of our type then not being predictable. + if blockS.Block.specType().HasDynamicTypes() { + ret[name] = &hcldec.BlockTupleSpec{ + TypeName: name, + Nested: childSpec, + MinItems: blockS.MinItems, + MaxItems: blockS.MaxItems, + } + } else { + ret[name] = &hcldec.BlockListSpec{ + TypeName: name, + Nested: childSpec, + MinItems: blockS.MinItems, + MaxItems: blockS.MaxItems, + } + } + case NestingSet: + // We forbid dynamically-typed attributes inside NestingSet in + // InternalValidate, so we don't do anything special to handle that + // here. (There is no set analog to tuple and object types, because + // cty's set implementation depends on knowing the static type in + // order to properly compute its internal hashes.) We assume that + // the provider has already used something like InternalValidate to + // validate their schema. + ret[name] = &hcldec.BlockSetSpec{ + TypeName: name, + Nested: childSpec, + MinItems: blockS.MinItems, + MaxItems: blockS.MaxItems, + } + case NestingMap: + // We prefer to use a list where possible, since it makes our + // implied type more complete, but if there are any + // dynamically-typed attributes inside we must use a tuple + // instead, at the expense of our type then not being predictable. + if blockS.Block.specType().HasDynamicTypes() { + ret[name] = &hcldec.BlockObjectSpec{ + TypeName: name, + Nested: childSpec, + LabelNames: mapLabelNames, + } + } else { + ret[name] = &hcldec.BlockMapSpec{ + TypeName: name, + Nested: childSpec, + LabelNames: mapLabelNames, + } + } + default: + // Invalid nesting type is just ignored. It's checked by + // InternalValidate. We assume that the provider has already used + // something like InternalValidate to validate their schema. + continue + } + } + + decoderSpecCache.set(b, ret) + return ret +} + +func (a *Attribute) decoderSpec(name string) hcldec.Spec { + ret := &hcldec.AttrSpec{Name: name} + if a == nil { + return ret + } + + if a.NestedType != nil { + if a.Type != cty.NilType { + panic("Invalid attribute schema: NestedType and Type cannot both be set. This is a bug in the provider.") + } + + ty := a.NestedType.specType() + ret.Type = ty + ret.Required = a.Required + return ret + } + + ret.Type = a.Type + ret.Required = a.Required + return ret +} + +// listOptionalAttrsFromObject is a helper function which does *not* recurse +// into NestedType Attributes, because the optional types for each of those will +// belong to their own cty.Object definitions. It is used in other functions +// which themselves handle that recursion. +func listOptionalAttrsFromObject(o *Object) []string { + ret := make([]string, 0) + + // This is unlikely to happen outside of tests. + if o == nil { + return ret + } + + for name, attr := range o.Attributes { + if attr.Optional || attr.Computed { + ret = append(ret, name) + } + } + return ret +} diff --git a/pkg/configs/configschema/decoder_spec_test.go b/pkg/configs/configschema/decoder_spec_test.go new file mode 100644 index 00000000000..e49f20e6914 --- /dev/null +++ b/pkg/configs/configschema/decoder_spec_test.go @@ -0,0 +1,949 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configschema + +import ( + "sort" + "testing" + + "github.com/apparentlymart/go-dump/dump" + "github.com/davecgh/go-spew/spew" + "github.com/google/go-cmp/cmp" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hcldec" + "github.com/hashicorp/hcl/v2/hcltest" + "github.com/zclconf/go-cty/cty" +) + +func TestBlockDecoderSpec(t *testing.T) { + tests := map[string]struct { + Schema *Block + TestBody hcl.Body + Want cty.Value + DiagCount int + }{ + "empty": { + &Block{}, + hcl.EmptyBody(), + cty.EmptyObjectVal, + 0, + }, + "nil": { + nil, + hcl.EmptyBody(), + cty.EmptyObjectVal, + 0, + }, + "attributes": { + &Block{ + Attributes: map[string]*Attribute{ + "optional": { + Type: cty.Number, + Optional: true, + }, + "required": { + Type: cty.String, + Required: true, + }, + "computed": { + Type: cty.List(cty.Bool), + Computed: true, + }, + "optional_computed": { + Type: cty.Map(cty.Bool), + Optional: true, + Computed: true, + }, + "optional_computed_overridden": { + Type: cty.Bool, + Optional: true, + Computed: true, + }, + "optional_computed_unknown": { + Type: cty.String, + Optional: true, + Computed: true, + }, + }, + }, + hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "required": { + Name: "required", + Expr: hcltest.MockExprLiteral(cty.NumberIntVal(5)), + }, + "optional_computed_overridden": { + Name: "optional_computed_overridden", + Expr: hcltest.MockExprLiteral(cty.True), + }, + "optional_computed_unknown": { + Name: "optional_computed_overridden", + Expr: hcltest.MockExprLiteral(cty.UnknownVal(cty.String)), + }, + }, + }), + cty.ObjectVal(map[string]cty.Value{ + "optional": cty.NullVal(cty.Number), + "required": cty.StringVal("5"), // converted from number to string + "computed": cty.NullVal(cty.List(cty.Bool)), + "optional_computed": cty.NullVal(cty.Map(cty.Bool)), + "optional_computed_overridden": cty.True, + "optional_computed_unknown": cty.UnknownVal(cty.String), + }), + 0, + }, + "dynamically-typed attribute": { + &Block{ + Attributes: map[string]*Attribute{ + "foo": { + Type: cty.DynamicPseudoType, // any type is permitted + Required: true, + }, + }, + }, + hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "foo": { + Name: "foo", + Expr: hcltest.MockExprLiteral(cty.True), + }, + }, + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.True, + }), + 0, + }, + "dynamically-typed attribute omitted": { + &Block{ + Attributes: map[string]*Attribute{ + "foo": { + Type: cty.DynamicPseudoType, // any type is permitted + Optional: true, + }, + }, + }, + hcltest.MockBody(&hcl.BodyContent{}), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.NullVal(cty.DynamicPseudoType), + }), + 0, + }, + "required attribute omitted": { + &Block{ + Attributes: map[string]*Attribute{ + "foo": { + Type: cty.Bool, + Required: true, + }, + }, + }, + hcltest.MockBody(&hcl.BodyContent{}), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.NullVal(cty.Bool), + }), + 1, // missing required attribute + }, + "wrong attribute type": { + &Block{ + Attributes: map[string]*Attribute{ + "optional": { + Type: cty.Number, + Optional: true, + }, + }, + }, + hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "optional": { + Name: "optional", + Expr: hcltest.MockExprLiteral(cty.True), + }, + }, + }), + cty.ObjectVal(map[string]cty.Value{ + "optional": cty.UnknownVal(cty.Number), + }), + 1, // incorrect type; number required + }, + "blocks": { + &Block{ + BlockTypes: map[string]*NestedBlock{ + "single": { + Nesting: NestingSingle, + Block: Block{}, + }, + "list": { + Nesting: NestingList, + Block: Block{}, + }, + "set": { + Nesting: NestingSet, + Block: Block{}, + }, + "map": { + Nesting: NestingMap, + Block: Block{}, + }, + }, + }, + hcltest.MockBody(&hcl.BodyContent{ + Blocks: hcl.Blocks{ + &hcl.Block{ + Type: "list", + Body: hcl.EmptyBody(), + }, + &hcl.Block{ + Type: "single", + Body: hcl.EmptyBody(), + }, + &hcl.Block{ + Type: "list", + Body: hcl.EmptyBody(), + }, + &hcl.Block{ + Type: "set", + Body: hcl.EmptyBody(), + }, + &hcl.Block{ + Type: "map", + Labels: []string{"foo"}, + LabelRanges: []hcl.Range{hcl.Range{}}, + Body: hcl.EmptyBody(), + }, + &hcl.Block{ + Type: "map", + Labels: []string{"bar"}, + LabelRanges: []hcl.Range{hcl.Range{}}, + Body: hcl.EmptyBody(), + }, + &hcl.Block{ + Type: "set", + Body: hcl.EmptyBody(), + }, + }, + }), + cty.ObjectVal(map[string]cty.Value{ + "single": cty.EmptyObjectVal, + "list": cty.ListVal([]cty.Value{ + cty.EmptyObjectVal, + cty.EmptyObjectVal, + }), + "set": cty.SetVal([]cty.Value{ + cty.EmptyObjectVal, + cty.EmptyObjectVal, + }), + "map": cty.MapVal(map[string]cty.Value{ + "foo": cty.EmptyObjectVal, + "bar": cty.EmptyObjectVal, + }), + }), + 0, + }, + "blocks with dynamically-typed attributes": { + &Block{ + BlockTypes: map[string]*NestedBlock{ + "single": { + Nesting: NestingSingle, + Block: Block{ + Attributes: map[string]*Attribute{ + "a": { + Type: cty.DynamicPseudoType, + Optional: true, + }, + }, + }, + }, + "list": { + Nesting: NestingList, + Block: Block{ + Attributes: map[string]*Attribute{ + "a": { + Type: cty.DynamicPseudoType, + Optional: true, + }, + }, + }, + }, + "map": { + Nesting: NestingMap, + Block: Block{ + Attributes: map[string]*Attribute{ + "a": { + Type: cty.DynamicPseudoType, + Optional: true, + }, + }, + }, + }, + }, + }, + hcltest.MockBody(&hcl.BodyContent{ + Blocks: hcl.Blocks{ + &hcl.Block{ + Type: "list", + Body: hcl.EmptyBody(), + }, + &hcl.Block{ + Type: "single", + Body: hcl.EmptyBody(), + }, + &hcl.Block{ + Type: "list", + Body: hcl.EmptyBody(), + }, + &hcl.Block{ + Type: "map", + Labels: []string{"foo"}, + LabelRanges: []hcl.Range{hcl.Range{}}, + Body: hcl.EmptyBody(), + }, + &hcl.Block{ + Type: "map", + Labels: []string{"bar"}, + LabelRanges: []hcl.Range{hcl.Range{}}, + Body: hcl.EmptyBody(), + }, + }, + }), + cty.ObjectVal(map[string]cty.Value{ + "single": cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.DynamicPseudoType), + }), + "list": cty.TupleVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.DynamicPseudoType), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.DynamicPseudoType), + }), + }), + "map": cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.DynamicPseudoType), + }), + "bar": cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.DynamicPseudoType), + }), + }), + }), + 0, + }, + "too many list items": { + &Block{ + BlockTypes: map[string]*NestedBlock{ + "foo": { + Nesting: NestingList, + Block: Block{}, + MaxItems: 1, + }, + }, + }, + hcltest.MockBody(&hcl.BodyContent{ + Blocks: hcl.Blocks{ + &hcl.Block{ + Type: "foo", + Body: hcl.EmptyBody(), + }, + &hcl.Block{ + Type: "foo", + Body: unknownBody{hcl.EmptyBody()}, + }, + }, + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.UnknownVal(cty.List(cty.EmptyObject)), + }), + 0, // max items cannot be validated during decode + }, + // dynamic blocks may fulfill MinItems, but there is only one block to + // decode. + "required MinItems": { + &Block{ + BlockTypes: map[string]*NestedBlock{ + "foo": { + Nesting: NestingList, + Block: Block{}, + MinItems: 2, + }, + }, + }, + hcltest.MockBody(&hcl.BodyContent{ + Blocks: hcl.Blocks{ + &hcl.Block{ + Type: "foo", + Body: unknownBody{hcl.EmptyBody()}, + }, + }, + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.UnknownVal(cty.List(cty.EmptyObject)), + }), + 0, + }, + "extraneous attribute": { + &Block{}, + hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "extra": { + Name: "extra", + Expr: hcltest.MockExprLiteral(cty.StringVal("hello")), + }, + }, + }), + cty.EmptyObjectVal, + 1, // extraneous attribute + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + spec := test.Schema.DecoderSpec() + + got, diags := hcldec.Decode(test.TestBody, spec, nil) + if len(diags) != test.DiagCount { + t.Errorf("wrong number of diagnostics %d; want %d", len(diags), test.DiagCount) + for _, diag := range diags { + t.Logf("- %s", diag.Error()) + } + } + + if !got.RawEquals(test.Want) { + t.Logf("[INFO] implied schema is %s", spew.Sdump(hcldec.ImpliedSchema(spec))) + t.Errorf("wrong result\ngot: %s\nwant: %s", dump.Value(got), dump.Value(test.Want)) + } + + // Double-check that we're producing consistent results for DecoderSpec + // and ImpliedType. + impliedType := test.Schema.ImpliedType() + if errs := got.Type().TestConformance(impliedType); len(errs) != 0 { + t.Errorf("result does not conform to the schema's implied type") + for _, err := range errs { + t.Logf("- %s", err.Error()) + } + } + }) + } +} + +// this satisfies hcldec.UnknownBody to simulate a dynamic block with an +// unknown number of values. +type unknownBody struct { + hcl.Body +} + +func (b unknownBody) Unknown() bool { + return true +} + +func TestAttributeDecoderSpec(t *testing.T) { + tests := map[string]struct { + Schema *Attribute + TestBody hcl.Body + Want cty.Value + DiagCount int + }{ + "optional string (null)": { + &Attribute{ + Type: cty.String, + Optional: true, + }, + hcltest.MockBody(&hcl.BodyContent{}), + cty.NullVal(cty.String), + 0, + }, + "optional string": { + &Attribute{ + Type: cty.String, + Optional: true, + }, + hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "attr": { + Name: "attr", + Expr: hcltest.MockExprLiteral(cty.StringVal("bar")), + }, + }, + }), + cty.StringVal("bar"), + 0, + }, + "NestedType with required string": { + &Attribute{ + NestedType: &Object{ + Nesting: NestingSingle, + Attributes: map[string]*Attribute{ + "foo": { + Type: cty.String, + Required: true, + }, + }, + }, + Optional: true, + }, + hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "attr": { + Name: "attr", + Expr: hcltest.MockExprLiteral(cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + })), + }, + }, + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }), + 0, + }, + "NestedType with optional attributes": { + &Attribute{ + NestedType: &Object{ + Nesting: NestingSingle, + Attributes: map[string]*Attribute{ + "foo": { + Type: cty.String, + Optional: true, + }, + "bar": { + Type: cty.String, + Optional: true, + }, + }, + }, + Optional: true, + }, + hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "attr": { + Name: "attr", + Expr: hcltest.MockExprLiteral(cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + })), + }, + }, + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + "bar": cty.NullVal(cty.String), + }), + 0, + }, + "NestedType with missing required string": { + &Attribute{ + NestedType: &Object{ + Nesting: NestingSingle, + Attributes: map[string]*Attribute{ + "foo": { + Type: cty.String, + Required: true, + }, + }, + }, + Optional: true, + }, + hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "attr": { + Name: "attr", + Expr: hcltest.MockExprLiteral(cty.EmptyObjectVal), + }, + }, + }), + cty.UnknownVal(cty.Object(map[string]cty.Type{ + "foo": cty.String, + })), + 1, + }, + // NestedModes + "NestedType NestingModeList valid": { + &Attribute{ + NestedType: &Object{ + Nesting: NestingList, + Attributes: map[string]*Attribute{ + "foo": { + Type: cty.String, + Required: true, + }, + }, + }, + Optional: true, + }, + hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "attr": { + Name: "attr", + Expr: hcltest.MockExprLiteral(cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("baz"), + }), + })), + }, + }, + }), + cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{"foo": cty.StringVal("bar")}), + cty.ObjectVal(map[string]cty.Value{"foo": cty.StringVal("baz")}), + }), + 0, + }, + "NestedType NestingModeList invalid": { + &Attribute{ + NestedType: &Object{ + Nesting: NestingList, + Attributes: map[string]*Attribute{ + "foo": { + Type: cty.String, + Required: true, + }, + }, + }, + Optional: true, + }, + hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "attr": { + Name: "attr", + Expr: hcltest.MockExprLiteral(cty.ListVal([]cty.Value{cty.ObjectVal(map[string]cty.Value{ + // "foo" should be a string, not a list + "foo": cty.ListVal([]cty.Value{cty.StringVal("bar"), cty.StringVal("baz")}), + })})), + }, + }, + }), + cty.UnknownVal(cty.List(cty.Object(map[string]cty.Type{"foo": cty.String}))), + 1, + }, + "NestedType NestingModeSet valid": { + &Attribute{ + NestedType: &Object{ + Nesting: NestingSet, + Attributes: map[string]*Attribute{ + "foo": { + Type: cty.String, + Required: true, + }, + }, + }, + Optional: true, + }, + hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "attr": { + Name: "attr", + Expr: hcltest.MockExprLiteral(cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("baz"), + }), + })), + }, + }, + }), + cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{"foo": cty.StringVal("bar")}), + cty.ObjectVal(map[string]cty.Value{"foo": cty.StringVal("baz")}), + }), + 0, + }, + "NestedType NestingModeSet invalid": { + &Attribute{ + NestedType: &Object{ + Nesting: NestingSet, + Attributes: map[string]*Attribute{ + "foo": { + Type: cty.String, + Required: true, + }, + }, + }, + Optional: true, + }, + hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "attr": { + Name: "attr", + Expr: hcltest.MockExprLiteral(cty.SetVal([]cty.Value{cty.ObjectVal(map[string]cty.Value{ + // "foo" should be a string, not a list + "foo": cty.ListVal([]cty.Value{cty.StringVal("bar"), cty.StringVal("baz")}), + })})), + }, + }, + }), + cty.UnknownVal(cty.Set(cty.Object(map[string]cty.Type{"foo": cty.String}))), + 1, + }, + "NestedType NestingModeMap valid": { + &Attribute{ + NestedType: &Object{ + Nesting: NestingMap, + Attributes: map[string]*Attribute{ + "foo": { + Type: cty.String, + Required: true, + }, + }, + }, + Optional: true, + }, + hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "attr": { + Name: "attr", + Expr: hcltest.MockExprLiteral(cty.MapVal(map[string]cty.Value{ + "one": cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }), + "two": cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("baz"), + }), + })), + }, + }, + }), + cty.MapVal(map[string]cty.Value{ + "one": cty.ObjectVal(map[string]cty.Value{"foo": cty.StringVal("bar")}), + "two": cty.ObjectVal(map[string]cty.Value{"foo": cty.StringVal("baz")}), + }), + 0, + }, + "NestedType NestingModeMap invalid": { + &Attribute{ + NestedType: &Object{ + Nesting: NestingMap, + Attributes: map[string]*Attribute{ + "foo": { + Type: cty.String, + Required: true, + }, + }, + }, + Optional: true, + }, + hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "attr": { + Name: "attr", + Expr: hcltest.MockExprLiteral(cty.MapVal(map[string]cty.Value{ + "one": cty.ObjectVal(map[string]cty.Value{ + // "foo" should be a string, not a list + "foo": cty.ListVal([]cty.Value{cty.StringVal("bar"), cty.StringVal("baz")}), + }), + })), + }, + }, + }), + cty.UnknownVal(cty.Map(cty.Object(map[string]cty.Type{"foo": cty.String}))), + 1, + }, + "deeply NestedType NestingModeList valid": { + &Attribute{ + NestedType: &Object{ + Nesting: NestingList, + Attributes: map[string]*Attribute{ + "foo": { + NestedType: &Object{ + Nesting: NestingList, + Attributes: map[string]*Attribute{ + "bar": { + Type: cty.String, + Required: true, + }, + }, + }, + Required: true, + }, + }, + }, + Optional: true, + }, + hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "attr": { + Name: "attr", + Expr: hcltest.MockExprLiteral(cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{"bar": cty.StringVal("baz")}), + cty.ObjectVal(map[string]cty.Value{"bar": cty.StringVal("boz")}), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{"bar": cty.StringVal("biz")}), + cty.ObjectVal(map[string]cty.Value{"bar": cty.StringVal("buz")}), + }), + }), + })), + }, + }, + }), + cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{"foo": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{"bar": cty.StringVal("baz")}), + cty.ObjectVal(map[string]cty.Value{"bar": cty.StringVal("boz")}), + })}), + cty.ObjectVal(map[string]cty.Value{"foo": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{"bar": cty.StringVal("biz")}), + cty.ObjectVal(map[string]cty.Value{"bar": cty.StringVal("buz")}), + })}), + }), + 0, + }, + "deeply NestedType NestingList invalid": { + &Attribute{ + NestedType: &Object{ + Nesting: NestingList, + Attributes: map[string]*Attribute{ + "foo": { + NestedType: &Object{ + Nesting: NestingList, + Attributes: map[string]*Attribute{ + "bar": { + Type: cty.Number, + Required: true, + }, + }, + }, + Required: true, + }, + }, + }, + Optional: true, + }, + hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "attr": { + Name: "attr", + Expr: hcltest.MockExprLiteral(cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + // bar should be a Number + cty.ObjectVal(map[string]cty.Value{"bar": cty.True}), + cty.ObjectVal(map[string]cty.Value{"bar": cty.False}), + }), + }), + })), + }, + }, + }), + cty.UnknownVal(cty.List(cty.Object(map[string]cty.Type{ + "foo": cty.List(cty.Object(map[string]cty.Type{"bar": cty.Number})), + }))), + 1, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + spec := test.Schema.decoderSpec("attr") + got, diags := hcldec.Decode(test.TestBody, spec, nil) + if len(diags) != test.DiagCount { + t.Errorf("wrong number of diagnostics %d; want %d", len(diags), test.DiagCount) + for _, diag := range diags { + t.Logf("- %s", diag.Error()) + } + } + + if !got.RawEquals(test.Want) { + t.Logf("[INFO] implied schema is %s", spew.Sdump(hcldec.ImpliedSchema(spec))) + t.Errorf("wrong result\ngot: %s\nwant: %s", dump.Value(got), dump.Value(test.Want)) + } + }) + } + +} + +// TestAttributeDecodeSpec_panic is a temporary test which verifies that +// decoderSpec panics when an invalid Attribute schema is encountered. It will +// be removed when InternalValidate() is extended to validate Attribute specs +// (and is used). See the #FIXME in decoderSpec. +func TestAttributeDecoderSpec_panic(t *testing.T) { + attrS := &Attribute{ + Type: cty.Object(map[string]cty.Type{ + "nested_attribute": cty.String, + }), + NestedType: &Object{}, + Optional: true, + } + + defer func() { recover() }() + attrS.decoderSpec("attr") + t.Errorf("expected panic") +} + +// TestAttributeDecodeSpecDecode_panic is a test which verifies that hcldec.Decode panics. +func TestAttributeDecoderSpecDecode_panic(t *testing.T) { + tests := []struct { + name string + inputSchema *Attribute + }{ + { + name: "empty", + inputSchema: &Attribute{}, + }, + { + name: "nil", + inputSchema: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + spec := tt.inputSchema.decoderSpec("attr") + + defer func() { recover() }() + _, _ = hcldec.Decode(nil, spec, nil) + t.Errorf(`expected panic when execute hcldec.Decode`) + }) + } +} + +func TestListOptionalAttrsFromObject(t *testing.T) { + tests := []struct { + input *Object + want []string + }{ + { + nil, + []string{}, + }, + { + &Object{}, + []string{}, + }, + { + &Object{ + Nesting: NestingSingle, + Attributes: map[string]*Attribute{ + "optional": {Type: cty.String, Optional: true}, + "required": {Type: cty.Number, Required: true}, + "computed": {Type: cty.List(cty.Bool), Computed: true}, + "optional_computed": {Type: cty.Map(cty.Bool), Optional: true, Computed: true}, + }, + }, + []string{"optional", "computed", "optional_computed"}, + }, + } + + for _, test := range tests { + got := listOptionalAttrsFromObject(test.input) + + // order is irrelevant + sort.Strings(got) + sort.Strings(test.want) + + if diff := cmp.Diff(got, test.want); diff != "" { + t.Fatalf("wrong result: %s\n", diff) + } + } +} diff --git a/pkg/configs/configschema/doc.go b/pkg/configs/configschema/doc.go new file mode 100644 index 00000000000..ec25574ad9f --- /dev/null +++ b/pkg/configs/configschema/doc.go @@ -0,0 +1,19 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package configschema contains types for describing the expected structure +// of a configuration block whose shape is not known until runtime. +// +// For example, this is used to describe the expected contents of a resource +// configuration block, which is defined by the corresponding provider plugin +// and thus not compiled into OpenTofu core. +// +// A configschema primarily describes the shape of configuration, but it is +// also suitable for use with other structures derived from the configuration, +// such as the cached state of a resource or a resource diff. +// +// This package should not be confused with the package helper/schema, which +// is the higher-level helper library used to implement providers themselves. +package configschema diff --git a/pkg/configs/configschema/empty_value.go b/pkg/configs/configschema/empty_value.go new file mode 100644 index 00000000000..eca4e078ba7 --- /dev/null +++ b/pkg/configs/configschema/empty_value.go @@ -0,0 +1,64 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configschema + +import ( + "github.com/zclconf/go-cty/cty" +) + +// EmptyValue returns the "empty value" for the recieving block, which for +// a block type is a non-null object where all of the attribute values are +// the empty values of the block's attributes and nested block types. +// +// In other words, it returns the value that would be returned if an empty +// block were decoded against the recieving schema, assuming that no required +// attribute or block constraints were honored. +func (b *Block) EmptyValue() cty.Value { + vals := make(map[string]cty.Value) + for name, attrS := range b.Attributes { + vals[name] = attrS.EmptyValue() + } + for name, blockS := range b.BlockTypes { + vals[name] = blockS.EmptyValue() + } + return cty.ObjectVal(vals) +} + +// EmptyValue returns the "empty value" for the receiving attribute, which is +// the value that would be returned if there were no definition of the attribute +// at all, ignoring any required constraint. +func (a *Attribute) EmptyValue() cty.Value { + return cty.NullVal(a.ImpliedType()) +} + +// EmptyValue returns the "empty value" for when there are zero nested blocks +// present of the receiving type. +func (b *NestedBlock) EmptyValue() cty.Value { + switch b.Nesting { + case NestingSingle: + return cty.NullVal(b.Block.ImpliedType()) + case NestingGroup: + return b.Block.EmptyValue() + case NestingList: + if ty := b.Block.ImpliedType(); ty.HasDynamicTypes() { + return cty.EmptyTupleVal + } else { + return cty.ListValEmpty(ty) + } + case NestingMap: + if ty := b.Block.ImpliedType(); ty.HasDynamicTypes() { + return cty.EmptyObjectVal + } else { + return cty.MapValEmpty(ty) + } + case NestingSet: + return cty.SetValEmpty(b.Block.ImpliedType()) + default: + // Should never get here because the above is intended to be exhaustive, + // but we'll be robust and return a result nonetheless. + return cty.NullVal(cty.DynamicPseudoType) + } +} diff --git a/pkg/configs/configschema/empty_value_test.go b/pkg/configs/configschema/empty_value_test.go new file mode 100644 index 00000000000..78bf2ae5e91 --- /dev/null +++ b/pkg/configs/configschema/empty_value_test.go @@ -0,0 +1,262 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configschema + +import ( + "fmt" + "testing" + + "github.com/apparentlymart/go-dump/dump" + "github.com/davecgh/go-spew/spew" + "github.com/zclconf/go-cty/cty" +) + +func TestBlockEmptyValue(t *testing.T) { + tests := []struct { + Schema *Block + Want cty.Value + }{ + { + &Block{}, + cty.EmptyObjectVal, + }, + { + &Block{ + Attributes: map[string]*Attribute{ + "str": {Type: cty.String, Required: true}, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "str": cty.NullVal(cty.String), + }), + }, + { + &Block{ + BlockTypes: map[string]*NestedBlock{ + "single": { + Nesting: NestingSingle, + Block: Block{ + Attributes: map[string]*Attribute{ + "str": {Type: cty.String, Required: true}, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "single": cty.NullVal(cty.Object(map[string]cty.Type{ + "str": cty.String, + })), + }), + }, + { + &Block{ + BlockTypes: map[string]*NestedBlock{ + "group": { + Nesting: NestingGroup, + Block: Block{ + Attributes: map[string]*Attribute{ + "str": {Type: cty.String, Required: true}, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "group": cty.ObjectVal(map[string]cty.Value{ + "str": cty.NullVal(cty.String), + }), + }), + }, + { + &Block{ + BlockTypes: map[string]*NestedBlock{ + "list": { + Nesting: NestingList, + Block: Block{ + Attributes: map[string]*Attribute{ + "str": {Type: cty.String, Required: true}, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "list": cty.ListValEmpty(cty.Object(map[string]cty.Type{ + "str": cty.String, + })), + }), + }, + { + &Block{ + BlockTypes: map[string]*NestedBlock{ + "list_dynamic": { + Nesting: NestingList, + Block: Block{ + Attributes: map[string]*Attribute{ + "str": {Type: cty.DynamicPseudoType, Required: true}, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "list_dynamic": cty.EmptyTupleVal, + }), + }, + { + &Block{ + BlockTypes: map[string]*NestedBlock{ + "map": { + Nesting: NestingMap, + Block: Block{ + Attributes: map[string]*Attribute{ + "str": {Type: cty.String, Required: true}, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "map": cty.MapValEmpty(cty.Object(map[string]cty.Type{ + "str": cty.String, + })), + }), + }, + { + &Block{ + BlockTypes: map[string]*NestedBlock{ + "map_dynamic": { + Nesting: NestingMap, + Block: Block{ + Attributes: map[string]*Attribute{ + "str": {Type: cty.DynamicPseudoType, Required: true}, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "map_dynamic": cty.EmptyObjectVal, + }), + }, + { + &Block{ + BlockTypes: map[string]*NestedBlock{ + "set": { + Nesting: NestingSet, + Block: Block{ + Attributes: map[string]*Attribute{ + "str": {Type: cty.String, Required: true}, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "set": cty.SetValEmpty(cty.Object(map[string]cty.Type{ + "str": cty.String, + })), + }), + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%#v", test.Schema), func(t *testing.T) { + got := test.Schema.EmptyValue() + if !test.Want.RawEquals(got) { + t.Errorf("wrong result\nschema: %s\ngot: %s\nwant: %s", spew.Sdump(test.Schema), dump.Value(got), dump.Value(test.Want)) + } + }) + } +} + +// Attribute EmptyValue() is well covered by the Block tests above; these tests +// focus on the behavior with NestedType field inside an Attribute +func TestAttributeEmptyValue(t *testing.T) { + tests := []struct { + Schema *Attribute + Want cty.Value + }{ + { + &Attribute{}, + cty.NilVal, + }, + { + &Attribute{ + Type: cty.String, + }, + cty.NullVal(cty.String), + }, + { + &Attribute{ + NestedType: &Object{ + Nesting: NestingSingle, + Attributes: map[string]*Attribute{ + "str": {Type: cty.String, Required: true}, + }, + }, + }, + cty.NullVal(cty.Object(map[string]cty.Type{ + "str": cty.String, + })), + }, + { + &Attribute{ + NestedType: &Object{ + Nesting: NestingList, + Attributes: map[string]*Attribute{ + "str": {Type: cty.String, Required: true}, + }, + }, + }, + cty.NullVal(cty.List( + cty.Object(map[string]cty.Type{ + "str": cty.String, + }), + )), + }, + { + &Attribute{ + NestedType: &Object{ + Nesting: NestingMap, + Attributes: map[string]*Attribute{ + "str": {Type: cty.String, Required: true}, + }, + }, + }, + cty.NullVal(cty.Map( + cty.Object(map[string]cty.Type{ + "str": cty.String, + }), + )), + }, + { + &Attribute{ + NestedType: &Object{ + Nesting: NestingSet, + Attributes: map[string]*Attribute{ + "str": {Type: cty.String, Required: true}, + }, + }, + }, + cty.NullVal(cty.Set( + cty.Object(map[string]cty.Type{ + "str": cty.String, + }), + )), + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%#v", test.Schema), func(t *testing.T) { + got := test.Schema.EmptyValue() + if !test.Want.RawEquals(got) { + t.Errorf("wrong result\nschema: %s\ngot: %s\nwant: %s", spew.Sdump(test.Schema), dump.Value(got), dump.Value(test.Want)) + } + }) + } +} diff --git a/pkg/configs/configschema/filter.go b/pkg/configs/configschema/filter.go new file mode 100644 index 00000000000..ee640980b4a --- /dev/null +++ b/pkg/configs/configschema/filter.go @@ -0,0 +1,100 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configschema + +type FilterT[T any] func(string, T) bool + +var ( + FilterReadOnlyAttribute = func(name string, attribute *Attribute) bool { + return attribute.Computed && !attribute.Optional + } + + FilterHelperSchemaIdAttribute = func(name string, attribute *Attribute) bool { + if name == "id" && attribute.Computed && attribute.Optional { + return true + } + return false + } + + FilterDeprecatedAttribute = func(name string, attribute *Attribute) bool { + return attribute.Deprecated + } + + FilterDeprecatedBlock = func(name string, block *NestedBlock) bool { + return block.Deprecated + } +) + +func FilterOr[T any](filters ...FilterT[T]) FilterT[T] { + return func(name string, value T) bool { + for _, f := range filters { + if f(name, value) { + return true + } + } + return false + } +} + +func (b *Block) Filter(filterAttribute FilterT[*Attribute], filterBlock FilterT[*NestedBlock]) *Block { + ret := &Block{ + Description: b.Description, + DescriptionKind: b.DescriptionKind, + Deprecated: b.Deprecated, + } + + if b.Attributes != nil { + ret.Attributes = make(map[string]*Attribute, len(b.Attributes)) + } + for name, attrS := range b.Attributes { + if filterAttribute == nil || !filterAttribute(name, attrS) { + ret.Attributes[name] = attrS + } + + if attrS.NestedType != nil { + ret.Attributes[name].NestedType = filterNestedType(attrS.NestedType, filterAttribute) + } + } + + if b.BlockTypes != nil { + ret.BlockTypes = make(map[string]*NestedBlock, len(b.BlockTypes)) + } + for name, blockS := range b.BlockTypes { + if filterBlock == nil || !filterBlock(name, blockS) { + block := blockS.Filter(filterAttribute, filterBlock) + ret.BlockTypes[name] = &NestedBlock{ + Block: *block, + Nesting: blockS.Nesting, + MinItems: blockS.MinItems, + MaxItems: blockS.MaxItems, + } + } + } + + return ret +} + +func filterNestedType(obj *Object, filterAttribute FilterT[*Attribute]) *Object { + if obj == nil { + return nil + } + + ret := &Object{ + Attributes: map[string]*Attribute{}, + Nesting: obj.Nesting, + } + + for name, attrS := range obj.Attributes { + if filterAttribute == nil || !filterAttribute(name, attrS) { + ret.Attributes[name] = attrS + if attrS.NestedType != nil { + ret.Attributes[name].NestedType = filterNestedType(attrS.NestedType, filterAttribute) + } + } + } + + return ret +} diff --git a/pkg/configs/configschema/filter_test.go b/pkg/configs/configschema/filter_test.go new file mode 100644 index 00000000000..3a7ede14068 --- /dev/null +++ b/pkg/configs/configschema/filter_test.go @@ -0,0 +1,283 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configschema + +import ( + "testing" + + "github.com/zclconf/go-cty/cty" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" +) + +func TestFilter(t *testing.T) { + testCases := map[string]struct { + schema *Block + filterAttribute FilterT[*Attribute] + filterBlock FilterT[*NestedBlock] + want *Block + }{ + "empty": { + schema: &Block{}, + filterAttribute: FilterDeprecatedAttribute, + filterBlock: FilterDeprecatedBlock, + want: &Block{}, + }, + "noop": { + schema: &Block{ + Attributes: map[string]*Attribute{ + "string": { + Type: cty.String, + Required: true, + }, + }, + BlockTypes: map[string]*NestedBlock{ + "list": { + Nesting: NestingList, + Block: Block{ + Attributes: map[string]*Attribute{ + "string": { + Type: cty.String, + Required: true, + }, + }, + }, + }, + }, + }, + filterAttribute: nil, + filterBlock: nil, + want: &Block{ + Attributes: map[string]*Attribute{ + "string": { + Type: cty.String, + Required: true, + }, + }, + BlockTypes: map[string]*NestedBlock{ + "list": { + Nesting: NestingList, + Block: Block{ + Attributes: map[string]*Attribute{ + "string": { + Type: cty.String, + Required: true, + }, + }, + }, + }, + }, + }, + }, + "filter_deprecated": { + schema: &Block{ + Attributes: map[string]*Attribute{ + "string": { + Type: cty.String, + Optional: true, + }, + "deprecated_string": { + Type: cty.String, + Deprecated: true, + }, + "nested": { + NestedType: &Object{ + Attributes: map[string]*Attribute{ + "string": { + Type: cty.String, + }, + "deprecated_string": { + Type: cty.String, + Deprecated: true, + }, + }, + Nesting: NestingList, + }, + }, + }, + + BlockTypes: map[string]*NestedBlock{ + "list": { + Nesting: NestingList, + Block: Block{ + Attributes: map[string]*Attribute{ + "string": { + Type: cty.String, + Optional: true, + }, + }, + Deprecated: true, + }, + }, + }, + }, + filterAttribute: FilterDeprecatedAttribute, + filterBlock: FilterDeprecatedBlock, + want: &Block{ + Attributes: map[string]*Attribute{ + "string": { + Type: cty.String, + Optional: true, + }, + "nested": { + NestedType: &Object{ + Attributes: map[string]*Attribute{ + "string": { + Type: cty.String, + }, + }, + Nesting: NestingList, + }, + }, + }, + }, + }, + "filter_read_only": { + schema: &Block{ + Attributes: map[string]*Attribute{ + "string": { + Type: cty.String, + Optional: true, + }, + "read_only_string": { + Type: cty.String, + Computed: true, + }, + "nested": { + NestedType: &Object{ + Attributes: map[string]*Attribute{ + "string": { + Type: cty.String, + Optional: true, + }, + "read_only_string": { + Type: cty.String, + Computed: true, + }, + "deeply_nested": { + NestedType: &Object{ + Attributes: map[string]*Attribute{ + "number": { + Type: cty.Number, + Required: true, + }, + "read_only_number": { + Type: cty.Number, + Computed: true, + }, + }, + Nesting: NestingList, + }, + }, + }, + Nesting: NestingList, + }, + }, + }, + + BlockTypes: map[string]*NestedBlock{ + "list": { + Nesting: NestingList, + Block: Block{ + Attributes: map[string]*Attribute{ + "string": { + Type: cty.String, + Optional: true, + }, + "read_only_string": { + Type: cty.String, + Computed: true, + }, + }, + }, + }, + }, + }, + filterAttribute: FilterReadOnlyAttribute, + filterBlock: nil, + want: &Block{ + Attributes: map[string]*Attribute{ + "string": { + Type: cty.String, + Optional: true, + }, + "nested": { + NestedType: &Object{ + Attributes: map[string]*Attribute{ + "string": { + Type: cty.String, + Optional: true, + }, + "deeply_nested": { + NestedType: &Object{ + Attributes: map[string]*Attribute{ + "number": { + Type: cty.Number, + Required: true, + }, + }, + Nesting: NestingList, + }, + }, + }, + Nesting: NestingList, + }, + }, + }, + BlockTypes: map[string]*NestedBlock{ + "list": { + Nesting: NestingList, + Block: Block{ + Attributes: map[string]*Attribute{ + "string": { + Type: cty.String, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + "filter_optional_computed_id": { + schema: &Block{ + Attributes: map[string]*Attribute{ + "id": { + Type: cty.String, + Optional: true, + Computed: true, + }, + "string": { + Type: cty.String, + Optional: true, + Computed: true, + }, + }, + }, + filterAttribute: FilterHelperSchemaIdAttribute, + filterBlock: nil, + want: &Block{ + Attributes: map[string]*Attribute{ + "string": { + Type: cty.String, + Optional: true, + Computed: true, + }, + }, + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got := tc.schema.Filter(tc.filterAttribute, tc.filterBlock) + if !cmp.Equal(got, tc.want, cmp.Comparer(cty.Type.Equals), cmpopts.EquateEmpty()) { + t.Fatal(cmp.Diff(got, tc.want, cmp.Comparer(cty.Type.Equals), cmpopts.EquateEmpty())) + } + }) + } +} diff --git a/pkg/configs/configschema/implied_type.go b/pkg/configs/configschema/implied_type.go new file mode 100644 index 00000000000..70167c74f74 --- /dev/null +++ b/pkg/configs/configschema/implied_type.go @@ -0,0 +1,138 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configschema + +import ( + "github.com/hashicorp/hcl/v2/hcldec" + "github.com/zclconf/go-cty/cty" +) + +// ImpliedType returns the cty.Type that would result from decoding a +// configuration block using the receiving block schema. +// +// The type returned from Block.ImpliedType differs from the type returned by +// hcldec.ImpliedType in that there will be no objects with optional +// attributes, since this value is not to be used for the decoding of +// configuration. +// +// ImpliedType always returns a result, even if the given schema is +// inconsistent. Code that creates configschema.Block objects should be +// tested using the InternalValidate method to detect any inconsistencies +// that would cause this method to fall back on defaults and assumptions. +func (b *Block) ImpliedType() cty.Type { + return b.specType().WithoutOptionalAttributesDeep() +} + +// specType returns the cty.Type used for decoding a configuration +// block using the receiving block schema. This is the type used internally by +// hcldec to decode configuration. +func (b *Block) specType() cty.Type { + if b == nil { + return cty.EmptyObject + } + + return hcldec.ImpliedType(b.DecoderSpec()) +} + +// ContainsSensitive returns true if any of the attributes of the receiving +// block or any of its descendent blocks are marked as sensitive. +// +// Blocks themselves cannot be sensitive as a whole -- sensitivity is a +// per-attribute idea -- but sometimes we want to include a whole object +// decoded from a block in some UI output, and that is safe to do only if +// none of the contained attributes are sensitive. +func (b *Block) ContainsSensitive() bool { + for _, attrS := range b.Attributes { + if attrS.Sensitive { + return true + } + if attrS.NestedType != nil && attrS.NestedType.ContainsSensitive() { + return true + } + } + for _, blockS := range b.BlockTypes { + if blockS.ContainsSensitive() { + return true + } + } + return false +} + +// ImpliedType returns the cty.Type that would result from decoding a Block's +// ImpliedType and getting the resulting AttributeType. +// +// ImpliedType always returns a result, even if the given schema is +// inconsistent. Code that creates configschema.Object objects should be tested +// using the InternalValidate method to detect any inconsistencies that would +// cause this method to fall back on defaults and assumptions. +func (a *Attribute) ImpliedType() cty.Type { + if a.NestedType != nil { + return a.NestedType.specType().WithoutOptionalAttributesDeep() + } + return a.Type +} + +// ImpliedType returns the cty.Type that would result from decoding a +// NestedType Attribute using the receiving block schema. +// +// ImpliedType always returns a result, even if the given schema is +// inconsistent. Code that creates configschema.Object objects should be tested +// using the InternalValidate method to detect any inconsistencies that would +// cause this method to fall back on defaults and assumptions. +func (o *Object) ImpliedType() cty.Type { + return o.specType().WithoutOptionalAttributesDeep() +} + +// specType returns the cty.Type used for decoding a NestedType Attribute using +// the receiving block schema. +func (o *Object) specType() cty.Type { + if o == nil { + return cty.EmptyObject + } + + attrTys := make(map[string]cty.Type, len(o.Attributes)) + for name, attrS := range o.Attributes { + if attrS.NestedType != nil { + attrTys[name] = attrS.NestedType.specType() + } else { + attrTys[name] = attrS.Type + } + } + optAttrs := listOptionalAttrsFromObject(o) + + var ret cty.Type + if len(optAttrs) > 0 { + ret = cty.ObjectWithOptionalAttrs(attrTys, optAttrs) + } else { + ret = cty.Object(attrTys) + } + switch o.Nesting { + case NestingSingle: + return ret + case NestingList: + return cty.List(ret) + case NestingMap: + return cty.Map(ret) + case NestingSet: + return cty.Set(ret) + default: // Should never happen + return cty.EmptyObject + } +} + +// ContainsSensitive returns true if any of the attributes of the receiving +// Object are marked as sensitive. +func (o *Object) ContainsSensitive() bool { + for _, attrS := range o.Attributes { + if attrS.Sensitive { + return true + } + if attrS.NestedType != nil && attrS.NestedType.ContainsSensitive() { + return true + } + } + return false +} diff --git a/pkg/configs/configschema/implied_type_test.go b/pkg/configs/configschema/implied_type_test.go new file mode 100644 index 00000000000..1bfb7df97fb --- /dev/null +++ b/pkg/configs/configschema/implied_type_test.go @@ -0,0 +1,609 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configschema + +import ( + "testing" + + "github.com/zclconf/go-cty/cty" +) + +func TestBlockImpliedType(t *testing.T) { + tests := map[string]struct { + Schema *Block + Want cty.Type + }{ + "nil": { + nil, + cty.EmptyObject, + }, + "empty": { + &Block{}, + cty.EmptyObject, + }, + "attributes": { + &Block{ + Attributes: map[string]*Attribute{ + "optional": { + Type: cty.String, + Optional: true, + }, + "required": { + Type: cty.Number, + Required: true, + }, + "computed": { + Type: cty.List(cty.Bool), + Computed: true, + }, + "optional_computed": { + Type: cty.Map(cty.Bool), + Optional: true, + Computed: true, + }, + }, + }, + cty.Object(map[string]cty.Type{ + "optional": cty.String, + "required": cty.Number, + "computed": cty.List(cty.Bool), + "optional_computed": cty.Map(cty.Bool), + }), + }, + "blocks": { + &Block{ + BlockTypes: map[string]*NestedBlock{ + "single": &NestedBlock{ + Nesting: NestingSingle, + Block: Block{ + Attributes: map[string]*Attribute{ + "foo": { + Type: cty.DynamicPseudoType, + Required: true, + }, + }, + }, + }, + "list": &NestedBlock{ + Nesting: NestingList, + }, + "set": &NestedBlock{ + Nesting: NestingSet, + }, + "map": &NestedBlock{ + Nesting: NestingMap, + }, + }, + }, + cty.Object(map[string]cty.Type{ + "single": cty.Object(map[string]cty.Type{ + "foo": cty.DynamicPseudoType, + }), + "list": cty.List(cty.EmptyObject), + "set": cty.Set(cty.EmptyObject), + "map": cty.Map(cty.EmptyObject), + }), + }, + "deep block nesting": { + &Block{ + BlockTypes: map[string]*NestedBlock{ + "single": &NestedBlock{ + Nesting: NestingSingle, + Block: Block{ + BlockTypes: map[string]*NestedBlock{ + "list": &NestedBlock{ + Nesting: NestingList, + Block: Block{ + BlockTypes: map[string]*NestedBlock{ + "set": &NestedBlock{ + Nesting: NestingSet, + }, + }, + }, + }, + }, + }, + }, + }, + }, + cty.Object(map[string]cty.Type{ + "single": cty.Object(map[string]cty.Type{ + "list": cty.List(cty.Object(map[string]cty.Type{ + "set": cty.Set(cty.EmptyObject), + })), + }), + }), + }, + "nested objects with optional attrs": { + &Block{ + Attributes: map[string]*Attribute{ + "map": { + Optional: true, + NestedType: &Object{ + Nesting: NestingMap, + Attributes: map[string]*Attribute{ + "optional": {Type: cty.String, Optional: true}, + "required": {Type: cty.Number, Required: true}, + "computed": {Type: cty.List(cty.Bool), Computed: true}, + "optional_computed": {Type: cty.Map(cty.Bool), Optional: true, Computed: true}, + }, + }, + }, + }, + }, + // The ImpliedType from the type-level block should not contain any + // optional attributes. + cty.Object(map[string]cty.Type{ + "map": cty.Map(cty.Object( + map[string]cty.Type{ + "optional": cty.String, + "required": cty.Number, + "computed": cty.List(cty.Bool), + "optional_computed": cty.Map(cty.Bool), + }, + )), + }), + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + got := test.Schema.ImpliedType() + if !got.Equals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestBlockContainsSensitive(t *testing.T) { + tests := map[string]struct { + Schema *Block + Want bool + }{ + "object contains sensitive": { + &Block{ + Attributes: map[string]*Attribute{ + "sensitive": {Sensitive: true}, + }, + }, + true, + }, + "no sensitive attrs": { + &Block{ + Attributes: map[string]*Attribute{ + "insensitive": {}, + }, + }, + false, + }, + "nested object contains sensitive": { + &Block{ + Attributes: map[string]*Attribute{ + "nested": { + NestedType: &Object{ + Nesting: NestingSingle, + Attributes: map[string]*Attribute{ + "sensitive": {Sensitive: true}, + }, + }, + }, + }, + }, + true, + }, + "nested obj, no sensitive attrs": { + &Block{ + Attributes: map[string]*Attribute{ + "nested": { + NestedType: &Object{ + Nesting: NestingSingle, + Attributes: map[string]*Attribute{ + "public": {}, + }, + }, + }, + }, + }, + false, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + got := test.Schema.ContainsSensitive() + if got != test.Want { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } + +} + +func TestObjectImpliedType(t *testing.T) { + tests := map[string]struct { + Schema *Object + Want cty.Type + }{ + "nil": { + nil, + cty.EmptyObject, + }, + "empty": { + &Object{}, + cty.EmptyObject, + }, + "attributes": { + &Object{ + Nesting: NestingSingle, + Attributes: map[string]*Attribute{ + "optional": {Type: cty.String, Optional: true}, + "required": {Type: cty.Number, Required: true}, + "computed": {Type: cty.List(cty.Bool), Computed: true}, + "optional_computed": {Type: cty.Map(cty.Bool), Optional: true, Computed: true}, + }, + }, + cty.Object( + map[string]cty.Type{ + "optional": cty.String, + "required": cty.Number, + "computed": cty.List(cty.Bool), + "optional_computed": cty.Map(cty.Bool), + }, + ), + }, + "nested attributes": { + &Object{ + Nesting: NestingSingle, + Attributes: map[string]*Attribute{ + "nested_type": { + NestedType: &Object{ + Nesting: NestingSingle, + Attributes: map[string]*Attribute{ + "optional": {Type: cty.String, Optional: true}, + "required": {Type: cty.Number, Required: true}, + "computed": {Type: cty.List(cty.Bool), Computed: true}, + "optional_computed": {Type: cty.Map(cty.Bool), Optional: true, Computed: true}, + }, + }, + Optional: true, + }, + }, + }, + cty.Object(map[string]cty.Type{ + "nested_type": cty.Object(map[string]cty.Type{ + "optional": cty.String, + "required": cty.Number, + "computed": cty.List(cty.Bool), + "optional_computed": cty.Map(cty.Bool), + }), + }), + }, + "nested object-type attributes": { + &Object{ + Nesting: NestingSingle, + Attributes: map[string]*Attribute{ + "nested_type": { + NestedType: &Object{ + Nesting: NestingSingle, + Attributes: map[string]*Attribute{ + "optional": {Type: cty.String, Optional: true}, + "required": {Type: cty.Number, Required: true}, + "computed": {Type: cty.List(cty.Bool), Computed: true}, + "optional_computed": {Type: cty.Map(cty.Bool), Optional: true, Computed: true}, + "object": { + Type: cty.ObjectWithOptionalAttrs(map[string]cty.Type{ + "optional": cty.String, + "required": cty.Number, + }, []string{"optional"}), + }, + }, + }, + Optional: true, + }, + }, + }, + cty.Object(map[string]cty.Type{ + "nested_type": cty.Object(map[string]cty.Type{ + "optional": cty.String, + "required": cty.Number, + "computed": cty.List(cty.Bool), + "optional_computed": cty.Map(cty.Bool), + "object": cty.Object(map[string]cty.Type{"optional": cty.String, "required": cty.Number}), + }), + }), + }, + "NestingList": { + &Object{ + Nesting: NestingList, + Attributes: map[string]*Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + cty.List(cty.Object(map[string]cty.Type{"foo": cty.String})), + }, + "NestingMap": { + &Object{ + Nesting: NestingMap, + Attributes: map[string]*Attribute{ + "foo": {Type: cty.String}, + }, + }, + cty.Map(cty.Object(map[string]cty.Type{"foo": cty.String})), + }, + "NestingSet": { + &Object{ + Nesting: NestingSet, + Attributes: map[string]*Attribute{ + "foo": {Type: cty.String}, + }, + }, + cty.Set(cty.Object(map[string]cty.Type{"foo": cty.String})), + }, + "deeply nested NestingList": { + &Object{ + Nesting: NestingList, + Attributes: map[string]*Attribute{ + "foo": { + NestedType: &Object{ + Nesting: NestingList, + Attributes: map[string]*Attribute{ + "bar": {Type: cty.String}, + }, + }, + }, + }, + }, + cty.List(cty.Object(map[string]cty.Type{"foo": cty.List(cty.Object(map[string]cty.Type{"bar": cty.String}))})), + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + got := test.Schema.ImpliedType() + if !got.Equals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestObjectContainsSensitive(t *testing.T) { + tests := map[string]struct { + Schema *Object + Want bool + }{ + "object contains sensitive": { + &Object{ + Attributes: map[string]*Attribute{ + "sensitive": {Sensitive: true}, + }, + }, + true, + }, + "no sensitive attrs": { + &Object{ + Attributes: map[string]*Attribute{ + "insensitive": {}, + }, + }, + false, + }, + "nested object contains sensitive": { + &Object{ + Attributes: map[string]*Attribute{ + "nested": { + NestedType: &Object{ + Nesting: NestingSingle, + Attributes: map[string]*Attribute{ + "sensitive": {Sensitive: true}, + }, + }, + }, + }, + }, + true, + }, + "nested obj, no sensitive attrs": { + &Object{ + Attributes: map[string]*Attribute{ + "nested": { + NestedType: &Object{ + Nesting: NestingSingle, + Attributes: map[string]*Attribute{ + "public": {}, + }, + }, + }, + }, + }, + false, + }, + "several nested objects, one contains sensitive": { + &Object{ + Attributes: map[string]*Attribute{ + "alpha": { + NestedType: &Object{ + Nesting: NestingSingle, + Attributes: map[string]*Attribute{ + "nonsensitive": {}, + }, + }, + }, + "beta": { + NestedType: &Object{ + Nesting: NestingSingle, + Attributes: map[string]*Attribute{ + "sensitive": {Sensitive: true}, + }, + }, + }, + "gamma": { + NestedType: &Object{ + Nesting: NestingSingle, + Attributes: map[string]*Attribute{ + "nonsensitive": {}, + }, + }, + }, + }, + }, + true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + got := test.Schema.ContainsSensitive() + if got != test.Want { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } + +} + +// Nested attribute should return optional object attributes for decoding. +func TestObjectSpecType(t *testing.T) { + tests := map[string]struct { + Schema *Object + Want cty.Type + }{ + "attributes": { + &Object{ + Nesting: NestingSingle, + Attributes: map[string]*Attribute{ + "optional": {Type: cty.String, Optional: true}, + "required": {Type: cty.Number, Required: true}, + "computed": {Type: cty.List(cty.Bool), Computed: true}, + "optional_computed": {Type: cty.Map(cty.Bool), Optional: true, Computed: true}, + }, + }, + cty.ObjectWithOptionalAttrs( + map[string]cty.Type{ + "optional": cty.String, + "required": cty.Number, + "computed": cty.List(cty.Bool), + "optional_computed": cty.Map(cty.Bool), + }, + []string{"optional", "computed", "optional_computed"}, + ), + }, + "nested attributes": { + &Object{ + Nesting: NestingSingle, + Attributes: map[string]*Attribute{ + "nested_type": { + NestedType: &Object{ + Nesting: NestingSingle, + Attributes: map[string]*Attribute{ + "optional": {Type: cty.String, Optional: true}, + "required": {Type: cty.Number, Required: true}, + "computed": {Type: cty.List(cty.Bool), Computed: true}, + "optional_computed": {Type: cty.Map(cty.Bool), Optional: true, Computed: true}, + }, + }, + Optional: true, + }, + }, + }, + cty.ObjectWithOptionalAttrs(map[string]cty.Type{ + "nested_type": cty.ObjectWithOptionalAttrs(map[string]cty.Type{ + "optional": cty.String, + "required": cty.Number, + "computed": cty.List(cty.Bool), + "optional_computed": cty.Map(cty.Bool), + }, []string{"optional", "computed", "optional_computed"}), + }, []string{"nested_type"}), + }, + "nested object-type attributes": { + &Object{ + Nesting: NestingSingle, + Attributes: map[string]*Attribute{ + "nested_type": { + NestedType: &Object{ + Nesting: NestingSingle, + Attributes: map[string]*Attribute{ + "optional": {Type: cty.String, Optional: true}, + "required": {Type: cty.Number, Required: true}, + "computed": {Type: cty.List(cty.Bool), Computed: true}, + "optional_computed": {Type: cty.Map(cty.Bool), Optional: true, Computed: true}, + "object": { + Type: cty.ObjectWithOptionalAttrs(map[string]cty.Type{ + "optional": cty.String, + "required": cty.Number, + }, []string{"optional"}), + }, + }, + }, + Optional: true, + }, + }, + }, + cty.ObjectWithOptionalAttrs(map[string]cty.Type{ + "nested_type": cty.ObjectWithOptionalAttrs(map[string]cty.Type{ + "optional": cty.String, + "required": cty.Number, + "computed": cty.List(cty.Bool), + "optional_computed": cty.Map(cty.Bool), + "object": cty.ObjectWithOptionalAttrs(map[string]cty.Type{"optional": cty.String, "required": cty.Number}, []string{"optional"}), + }, []string{"optional", "computed", "optional_computed"}), + }, []string{"nested_type"}), + }, + "NestingList": { + &Object{ + Nesting: NestingList, + Attributes: map[string]*Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + cty.List(cty.ObjectWithOptionalAttrs(map[string]cty.Type{"foo": cty.String}, []string{"foo"})), + }, + "NestingMap": { + &Object{ + Nesting: NestingMap, + Attributes: map[string]*Attribute{ + "foo": {Type: cty.String}, + }, + }, + cty.Map(cty.Object(map[string]cty.Type{"foo": cty.String})), + }, + "NestingSet": { + &Object{ + Nesting: NestingSet, + Attributes: map[string]*Attribute{ + "foo": {Type: cty.String}, + }, + }, + cty.Set(cty.Object(map[string]cty.Type{"foo": cty.String})), + }, + "deeply nested NestingList": { + &Object{ + Nesting: NestingList, + Attributes: map[string]*Attribute{ + "foo": { + NestedType: &Object{ + Nesting: NestingList, + Attributes: map[string]*Attribute{ + "bar": {Type: cty.String}, + }, + }, + }, + }, + }, + cty.List(cty.Object(map[string]cty.Type{"foo": cty.List(cty.Object(map[string]cty.Type{"bar": cty.String}))})), + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + got := test.Schema.specType() + if !got.Equals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} diff --git a/pkg/configs/configschema/internal_validate.go b/pkg/configs/configschema/internal_validate.go new file mode 100644 index 00000000000..85c64caf32b --- /dev/null +++ b/pkg/configs/configschema/internal_validate.go @@ -0,0 +1,164 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configschema + +import ( + "fmt" + "regexp" + + "github.com/zclconf/go-cty/cty" + + multierror "github.com/hashicorp/go-multierror" +) + +var validName = regexp.MustCompile(`^[a-z0-9_]+$`) + +// InternalValidate returns an error if the receiving block and its child schema +// definitions have any inconsistencies with the documented rules for valid +// schema. +// +// This can be used within unit tests to detect when a given schema is invalid, +// and is run when tofu loads provider schemas during NewContext. +func (b *Block) InternalValidate() error { + if b == nil { + return fmt.Errorf("top-level block schema is nil") + } + return b.internalValidate("") +} + +func (b *Block) internalValidate(prefix string) error { + var multiErr *multierror.Error + + for name, attrS := range b.Attributes { + if attrS == nil { + multiErr = multierror.Append(multiErr, fmt.Errorf("%s%s: attribute schema is nil", prefix, name)) + continue + } + multiErr = multierror.Append(multiErr, attrS.internalValidate(name, prefix)) + } + + for name, blockS := range b.BlockTypes { + if blockS == nil { + multiErr = multierror.Append(multiErr, fmt.Errorf("%s%s: block schema is nil", prefix, name)) + continue + } + + if _, isAttr := b.Attributes[name]; isAttr { + multiErr = multierror.Append(multiErr, fmt.Errorf("%s%s: name defined as both attribute and child block type", prefix, name)) + } else if !validName.MatchString(name) { + multiErr = multierror.Append(multiErr, fmt.Errorf("%s%s: name may contain only lowercase letters, digits and underscores", prefix, name)) + } + + if blockS.MinItems < 0 || blockS.MaxItems < 0 { + multiErr = multierror.Append(multiErr, fmt.Errorf("%s%s: MinItems and MaxItems must both be greater than zero", prefix, name)) + } + + switch blockS.Nesting { + case NestingSingle: + switch { + case blockS.MinItems != blockS.MaxItems: + multiErr = multierror.Append(multiErr, fmt.Errorf("%s%s: MinItems and MaxItems must match in NestingSingle mode", prefix, name)) + case blockS.MinItems < 0 || blockS.MinItems > 1: + multiErr = multierror.Append(multiErr, fmt.Errorf("%s%s: MinItems and MaxItems must be set to either 0 or 1 in NestingSingle mode", prefix, name)) + } + case NestingGroup: + if blockS.MinItems != 0 || blockS.MaxItems != 0 { + multiErr = multierror.Append(multiErr, fmt.Errorf("%s%s: MinItems and MaxItems cannot be used in NestingGroup mode", prefix, name)) + } + case NestingList, NestingSet: + if blockS.MinItems > blockS.MaxItems && blockS.MaxItems != 0 { + multiErr = multierror.Append(multiErr, fmt.Errorf("%s%s: MinItems must be less than or equal to MaxItems in %s mode", prefix, name, blockS.Nesting)) + } + if blockS.Nesting == NestingSet { + ety := blockS.Block.ImpliedType() + if ety.HasDynamicTypes() { + // This is not permitted because the HCL (cty) set implementation + // needs to know the exact type of set elements in order to + // properly hash them, and so can't support mixed types. + multiErr = multierror.Append(multiErr, fmt.Errorf("%s%s: NestingSet blocks may not contain attributes of cty.DynamicPseudoType", prefix, name)) + } + } + case NestingMap: + if blockS.MinItems != 0 || blockS.MaxItems != 0 { + multiErr = multierror.Append(multiErr, fmt.Errorf("%s%s: MinItems and MaxItems must both be 0 in NestingMap mode", prefix, name)) + } + default: + multiErr = multierror.Append(multiErr, fmt.Errorf("%s%s: invalid nesting mode %s", prefix, name, blockS.Nesting)) + } + + subPrefix := prefix + name + "." + multiErr = multierror.Append(multiErr, blockS.Block.internalValidate(subPrefix)) + } + + return multiErr.ErrorOrNil() +} + +// InternalValidate returns an error if the receiving attribute and its child +// schema definitions have any inconsistencies with the documented rules for +// valid schema. +func (a *Attribute) InternalValidate(name string) error { + if a == nil { + return fmt.Errorf("attribute schema is nil") + } + return a.internalValidate(name, "") +} + +func (a *Attribute) internalValidate(name, prefix string) error { + var err *multierror.Error + + /* FIXME: this validation breaks certain existing providers and cannot be enforced without coordination. + if !validName.MatchString(name) { + err = multierror.Append(err, fmt.Errorf("%s%s: name may contain only lowercase letters, digits and underscores", prefix, name)) + } + */ + if !a.Optional && !a.Required && !a.Computed { + err = multierror.Append(err, fmt.Errorf("%s%s: must set Optional, Required or Computed", prefix, name)) + } + if a.Optional && a.Required { + err = multierror.Append(err, fmt.Errorf("%s%s: cannot set both Optional and Required", prefix, name)) + } + if a.Computed && a.Required { + err = multierror.Append(err, fmt.Errorf("%s%s: cannot set both Computed and Required", prefix, name)) + } + + if a.Type == cty.NilType && a.NestedType == nil { + err = multierror.Append(err, fmt.Errorf("%s%s: either Type or NestedType must be defined", prefix, name)) + } + + if a.Type != cty.NilType { + if a.NestedType != nil { + err = multierror.Append(err, fmt.Errorf("%s: Type and NestedType cannot both be set", name)) + } + } + + if a.NestedType != nil { + switch a.NestedType.Nesting { + case NestingSingle, NestingMap: + // no validations to perform + case NestingList, NestingSet: + if a.NestedType.Nesting == NestingSet { + ety := a.ImpliedType() + if ety.HasDynamicTypes() { + // This is not permitted because the HCL (cty) set implementation + // needs to know the exact type of set elements in order to + // properly hash them, and so can't support mixed types. + err = multierror.Append(err, fmt.Errorf("%s%s: NestingSet blocks may not contain attributes of cty.DynamicPseudoType", prefix, name)) + } + } + default: + err = multierror.Append(err, fmt.Errorf("%s%s: invalid nesting mode %s", prefix, name, a.NestedType.Nesting)) + } + for name, attrS := range a.NestedType.Attributes { + if attrS == nil { + err = multierror.Append(err, fmt.Errorf("%s%s: attribute schema is nil", prefix, name)) + continue + } + err = multierror.Append(err, attrS.internalValidate(name, prefix)) + } + } + + return err.ErrorOrNil() +} diff --git a/pkg/configs/configschema/internal_validate_test.go b/pkg/configs/configschema/internal_validate_test.go new file mode 100644 index 00000000000..059f506b895 --- /dev/null +++ b/pkg/configs/configschema/internal_validate_test.go @@ -0,0 +1,334 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configschema + +import ( + "testing" + + "github.com/zclconf/go-cty/cty" + + multierror "github.com/hashicorp/go-multierror" +) + +func TestBlockInternalValidate(t *testing.T) { + tests := map[string]struct { + Block *Block + Errs []string + }{ + "empty": { + &Block{}, + []string{}, + }, + "valid": { + &Block{ + Attributes: map[string]*Attribute{ + "foo": { + Type: cty.String, + Required: true, + }, + "bar": { + Type: cty.String, + Optional: true, + }, + "baz": { + Type: cty.String, + Computed: true, + }, + "baz_maybe": { + Type: cty.String, + Optional: true, + Computed: true, + }, + }, + BlockTypes: map[string]*NestedBlock{ + "single": { + Nesting: NestingSingle, + Block: Block{}, + }, + "single_required": { + Nesting: NestingSingle, + Block: Block{}, + MinItems: 1, + MaxItems: 1, + }, + "list": { + Nesting: NestingList, + Block: Block{}, + }, + "list_required": { + Nesting: NestingList, + Block: Block{}, + MinItems: 1, + }, + "set": { + Nesting: NestingSet, + Block: Block{}, + }, + "set_required": { + Nesting: NestingSet, + Block: Block{}, + MinItems: 1, + }, + "map": { + Nesting: NestingMap, + Block: Block{}, + }, + }, + }, + []string{}, + }, + "attribute with no flags set": { + &Block{ + Attributes: map[string]*Attribute{ + "foo": { + Type: cty.String, + }, + }, + }, + []string{"foo: must set Optional, Required or Computed"}, + }, + "attribute required and optional": { + &Block{ + Attributes: map[string]*Attribute{ + "foo": { + Type: cty.String, + Required: true, + Optional: true, + }, + }, + }, + []string{"foo: cannot set both Optional and Required"}, + }, + "attribute required and computed": { + &Block{ + Attributes: map[string]*Attribute{ + "foo": { + Type: cty.String, + Required: true, + Computed: true, + }, + }, + }, + []string{"foo: cannot set both Computed and Required"}, + }, + "attribute optional and computed": { + &Block{ + Attributes: map[string]*Attribute{ + "foo": { + Type: cty.String, + Optional: true, + Computed: true, + }, + }, + }, + []string{}, + }, + "attribute with missing type": { + &Block{ + Attributes: map[string]*Attribute{ + "foo": { + Optional: true, + }, + }, + }, + []string{"foo: either Type or NestedType must be defined"}, + }, + "attribute with both type and nestedtype should not suppress other validation messages": { + &Block{ + Attributes: map[string]*Attribute{ + "foo": { + // These properties are here to make sure other errors are also reported. + Optional: true, + Required: true, + // Here's what we actually want to validate: + Type: cty.String, + NestedType: &Object{ + Nesting: NestingSingle, + Attributes: map[string]*Attribute{ + "foo": { + Type: cty.String, + Required: true, + }, + }, + }, + }, + }, + }, + []string{ + "foo: cannot set both Optional and Required", + "foo: Type and NestedType cannot both be set", + }, + }, + /* FIXME: This caused errors when applied to existing providers (oci) + and cannot be enforced without coordination. + + "attribute with invalid name": {&Block{Attributes: + map[string]*Attribute{"fooBar": {Type: cty.String, Optional: + true, + }, + }, + }, + []string{"fooBar: name may contain only lowercase letters, digits and underscores"}, + }, + */ + "attribute with invalid NestedType attribute": { + &Block{ + Attributes: map[string]*Attribute{ + "foo": { + NestedType: &Object{ + Nesting: NestingSingle, + Attributes: map[string]*Attribute{ + "foo": { + Type: cty.String, + Required: true, + Optional: true, + }, + }, + }, + Optional: true, + }, + }, + }, + []string{"foo: cannot set both Optional and Required"}, + }, + "block type with invalid name": { + &Block{ + BlockTypes: map[string]*NestedBlock{ + "fooBar": { + Nesting: NestingSingle, + }, + }, + }, + []string{"fooBar: name may contain only lowercase letters, digits and underscores"}, + }, + "colliding names": { + &Block{ + Attributes: map[string]*Attribute{ + "foo": { + Type: cty.String, + Optional: true, + }, + }, + BlockTypes: map[string]*NestedBlock{ + "foo": { + Nesting: NestingSingle, + }, + }, + }, + []string{"foo: name defined as both attribute and child block type"}, + }, + "nested block with badness": { + &Block{ + BlockTypes: map[string]*NestedBlock{ + "bad": { + Nesting: NestingSingle, + Block: Block{ + Attributes: map[string]*Attribute{ + "nested_bad": { + Type: cty.String, + Required: true, + Optional: true, + }, + }, + }, + }, + }, + }, + []string{"bad.nested_bad: cannot set both Optional and Required"}, + }, + "nested list block with dynamically-typed attribute": { + &Block{ + BlockTypes: map[string]*NestedBlock{ + "bad": { + Nesting: NestingList, + Block: Block{ + Attributes: map[string]*Attribute{ + "nested_bad": { + Type: cty.DynamicPseudoType, + Optional: true, + }, + }, + }, + }, + }, + }, + []string{}, + }, + "nested set block with dynamically-typed attribute": { + &Block{ + BlockTypes: map[string]*NestedBlock{ + "bad": { + Nesting: NestingSet, + Block: Block{ + Attributes: map[string]*Attribute{ + "nested_bad": { + Type: cty.DynamicPseudoType, + Optional: true, + }, + }, + }, + }, + }, + }, + []string{"bad: NestingSet blocks may not contain attributes of cty.DynamicPseudoType"}, + }, + "nil": { + nil, + []string{"top-level block schema is nil"}, + }, + "nil attr": { + &Block{ + Attributes: map[string]*Attribute{ + "bad": nil, + }, + }, + []string{"bad: attribute schema is nil"}, + }, + "nil block type": { + &Block{ + BlockTypes: map[string]*NestedBlock{ + "bad": nil, + }, + }, + []string{"bad: block schema is nil"}, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + errs := multierrorErrors(test.Block.InternalValidate()) + if got, want := len(errs), len(test.Errs); got != want { + t.Errorf("wrong number of errors %d; want %d", got, want) + for _, err := range errs { + t.Logf("- %s", err.Error()) + } + } else { + if len(errs) > 0 { + for i := range errs { + if errs[i].Error() != test.Errs[i] { + t.Errorf("wrong error: got %s, want %s", errs[i].Error(), test.Errs[i]) + } + } + } + } + }) + } +} + +func multierrorErrors(err error) []error { + // A function like this should really be part of the multierror package... + + if err == nil { + return nil + } + + switch terr := err.(type) { + case *multierror.Error: + return terr.Errors + default: + return []error{err} + } +} diff --git a/pkg/configs/configschema/marks.go b/pkg/configs/configschema/marks.go new file mode 100644 index 00000000000..cb281d15099 --- /dev/null +++ b/pkg/configs/configschema/marks.go @@ -0,0 +1,158 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configschema + +import ( + "fmt" + + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/zclconf/go-cty/cty" +) + +// copyAndExtendPath returns a copy of a cty.Path with some additional +// `cty.PathStep`s appended to its end, to simplify creating new child paths. +func copyAndExtendPath(path cty.Path, nextSteps ...cty.PathStep) cty.Path { + newPath := make(cty.Path, len(path), len(path)+len(nextSteps)) + copy(newPath, path) + newPath = append(newPath, nextSteps...) + return newPath +} + +// ValueMarks returns a set of path value marks for a given value and path, +// based on the sensitive flag for each attribute within the schema. Nested +// blocks are descended (if present in the given value). +func (b *Block) ValueMarks(val cty.Value, path cty.Path) []cty.PathValueMarks { + var pvm []cty.PathValueMarks + + // We can mark attributes as sensitive even if the value is null + for name, attrS := range b.Attributes { + if attrS.Sensitive { + // Create a copy of the path, with this step added, to add to our PathValueMarks slice + attrPath := copyAndExtendPath(path, cty.GetAttrStep{Name: name}) + pvm = append(pvm, cty.PathValueMarks{ + Path: attrPath, + Marks: cty.NewValueMarks(marks.Sensitive), + }) + } + } + + // If the value is null, no other marks are possible + if val.IsNull() { + return pvm + } + + // Extract marks for nested attribute type values + for name, attrS := range b.Attributes { + // If the attribute has no nested type, or the nested type doesn't + // contain any sensitive attributes, skip inspecting it + if attrS.NestedType == nil || !attrS.NestedType.ContainsSensitive() { + continue + } + + // Create a copy of the path, with this step added, to add to our PathValueMarks slice + attrPath := copyAndExtendPath(path, cty.GetAttrStep{Name: name}) + + pvm = append(pvm, attrS.NestedType.ValueMarks(val.GetAttr(name), attrPath)...) + } + + // Extract marks for nested blocks + for name, blockS := range b.BlockTypes { + // If our block doesn't contain any sensitive attributes, skip inspecting it + if !blockS.Block.ContainsSensitive() { + continue + } + + blockV := val.GetAttr(name) + if blockV.IsNull() || !blockV.IsKnown() { + continue + } + + // Create a copy of the path, with this step added, to add to our PathValueMarks slice + blockPath := copyAndExtendPath(path, cty.GetAttrStep{Name: name}) + + switch blockS.Nesting { + case NestingSingle, NestingGroup: + pvm = append(pvm, blockS.Block.ValueMarks(blockV, blockPath)...) + case NestingList, NestingMap, NestingSet: + for it := blockV.ElementIterator(); it.Next(); { + idx, blockEV := it.Element() + // Create a copy of the path, with this block instance's index + // step added, to add to our PathValueMarks slice + blockInstancePath := copyAndExtendPath(blockPath, cty.IndexStep{Key: idx}) + morePaths := blockS.Block.ValueMarks(blockEV, blockInstancePath) + pvm = append(pvm, morePaths...) + } + default: + panic(fmt.Sprintf("unsupported nesting mode %s", blockS.Nesting)) + } + } + return pvm +} + +// ValueMarks returns a set of path value marks for a given value and path, +// based on the sensitive flag for each attribute within the nested attribute. +// Attributes with nested types are descended (if present in the given value). +func (o *Object) ValueMarks(val cty.Value, path cty.Path) []cty.PathValueMarks { + var pvm []cty.PathValueMarks + + if val.IsNull() || !val.IsKnown() { + return pvm + } + + for name, attrS := range o.Attributes { + // Skip attributes which can never produce sensitive path value marks + if !attrS.Sensitive && (attrS.NestedType == nil || !attrS.NestedType.ContainsSensitive()) { + continue + } + + switch o.Nesting { + case NestingSingle, NestingGroup: + // Create a path to this attribute + attrPath := copyAndExtendPath(path, cty.GetAttrStep{Name: name}) + + if attrS.Sensitive { + // If the entire attribute is sensitive, mark it so + pvm = append(pvm, cty.PathValueMarks{ + Path: attrPath, + Marks: cty.NewValueMarks(marks.Sensitive), + }) + } else { + // The attribute has a nested type which contains sensitive + // attributes, so recurse + pvm = append(pvm, attrS.NestedType.ValueMarks(val.GetAttr(name), attrPath)...) + } + case NestingList, NestingMap, NestingSet: + // For nested attribute types which have a non-single nesting mode, + // we add path value marks for each element of the collection + for it := val.ElementIterator(); it.Next(); { + idx, attrEV := it.Element() + attrV := attrEV.GetAttr(name) + + // Create a path to this element of the attribute's collection. Note + // that the path is extended in opposite order to the iteration order + // of the loops: index into the collection, then the contained + // attribute name. This is because we have one type + // representing multiple collection elements. + attrPath := copyAndExtendPath(path, cty.IndexStep{Key: idx}, cty.GetAttrStep{Name: name}) + + if attrS.Sensitive { + // If the entire attribute is sensitive, mark it so + pvm = append(pvm, cty.PathValueMarks{ + Path: attrPath, + Marks: cty.NewValueMarks(marks.Sensitive), + }) + } else { + // The attribute has a nested type which contains sensitive + // attributes, so recurse + pvm = append(pvm, attrS.NestedType.ValueMarks(attrV, attrPath)...) + } + } + default: + panic(fmt.Sprintf("unsupported nesting mode %s", attrS.NestedType.Nesting)) + } + } + return pvm +} diff --git a/pkg/configs/configschema/marks_test.go b/pkg/configs/configschema/marks_test.go new file mode 100644 index 00000000000..347416dd067 --- /dev/null +++ b/pkg/configs/configschema/marks_test.go @@ -0,0 +1,187 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configschema + +import ( + "testing" + + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/zclconf/go-cty/cty" +) + +func TestBlockValueMarks(t *testing.T) { + schema := &Block{ + Attributes: map[string]*Attribute{ + "unsensitive": { + Type: cty.String, + Optional: true, + }, + "sensitive": { + Type: cty.String, + Sensitive: true, + }, + "nested": { + NestedType: &Object{ + Attributes: map[string]*Attribute{ + "boop": { + Type: cty.String, + }, + "honk": { + Type: cty.String, + Sensitive: true, + }, + }, + Nesting: NestingList, + }, + }, + }, + + BlockTypes: map[string]*NestedBlock{ + "list": { + Nesting: NestingList, + Block: Block{ + Attributes: map[string]*Attribute{ + "unsensitive": { + Type: cty.String, + Optional: true, + }, + "sensitive": { + Type: cty.String, + Sensitive: true, + }, + }, + }, + }, + }, + } + + testCases := map[string]struct { + given cty.Value + expect cty.Value + }{ + "unknown object": { + cty.UnknownVal(schema.ImpliedType()), + cty.UnknownVal(schema.ImpliedType()), + }, + "null object": { + cty.NullVal(schema.ImpliedType()), + cty.NullVal(schema.ImpliedType()), + }, + "object with unknown attributes and blocks": { + cty.ObjectVal(map[string]cty.Value{ + "sensitive": cty.UnknownVal(cty.String), + "unsensitive": cty.UnknownVal(cty.String), + "nested": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ + "boop": cty.String, + "honk": cty.String, + }))), + "list": cty.UnknownVal(schema.BlockTypes["list"].ImpliedType()), + }), + cty.ObjectVal(map[string]cty.Value{ + "sensitive": cty.UnknownVal(cty.String).Mark(marks.Sensitive), + "unsensitive": cty.UnknownVal(cty.String), + "nested": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ + "boop": cty.String, + "honk": cty.String, + }))), + "list": cty.UnknownVal(schema.BlockTypes["list"].ImpliedType()), + }), + }, + "object with block value": { + cty.ObjectVal(map[string]cty.Value{ + "sensitive": cty.NullVal(cty.String), + "unsensitive": cty.UnknownVal(cty.String), + "nested": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ + "boop": cty.String, + "honk": cty.String, + }))), + "list": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "sensitive": cty.UnknownVal(cty.String), + "unsensitive": cty.UnknownVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "sensitive": cty.NullVal(cty.String), + "unsensitive": cty.NullVal(cty.String), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "sensitive": cty.NullVal(cty.String).Mark(marks.Sensitive), + "unsensitive": cty.UnknownVal(cty.String), + "nested": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ + "boop": cty.String, + "honk": cty.String, + }))), + "list": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "sensitive": cty.UnknownVal(cty.String).Mark(marks.Sensitive), + "unsensitive": cty.UnknownVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "sensitive": cty.NullVal(cty.String).Mark(marks.Sensitive), + "unsensitive": cty.NullVal(cty.String), + }), + }), + }), + }, + "object with known values and nested attribute": { + cty.ObjectVal(map[string]cty.Value{ + "sensitive": cty.StringVal("foo"), + "unsensitive": cty.StringVal("bar"), + "nested": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "boop": cty.StringVal("foo"), + "honk": cty.StringVal("bar"), + }), + cty.ObjectVal(map[string]cty.Value{ + "boop": cty.NullVal(cty.String), + "honk": cty.NullVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "boop": cty.UnknownVal(cty.String), + "honk": cty.UnknownVal(cty.String), + }), + }), + "list": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ + "sensitive": cty.String, + "unsensitive": cty.String, + }))), + }), + cty.ObjectVal(map[string]cty.Value{ + "sensitive": cty.StringVal("foo").Mark(marks.Sensitive), + "unsensitive": cty.StringVal("bar"), + "nested": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "boop": cty.StringVal("foo"), + "honk": cty.StringVal("bar").Mark(marks.Sensitive), + }), + cty.ObjectVal(map[string]cty.Value{ + "boop": cty.NullVal(cty.String), + "honk": cty.NullVal(cty.String).Mark(marks.Sensitive), + }), + cty.ObjectVal(map[string]cty.Value{ + "boop": cty.UnknownVal(cty.String), + "honk": cty.UnknownVal(cty.String).Mark(marks.Sensitive), + }), + }), + "list": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ + "sensitive": cty.String, + "unsensitive": cty.String, + }))), + }), + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got := tc.given.MarkWithPaths(schema.ValueMarks(tc.given, nil)) + if !got.RawEquals(tc.expect) { + t.Fatalf("\nexpected: %#v\ngot: %#v\n", tc.expect, got) + } + }) + } +} diff --git a/pkg/configs/configschema/nestingmode_string.go b/pkg/configs/configschema/nestingmode_string.go new file mode 100644 index 00000000000..febe743e11a --- /dev/null +++ b/pkg/configs/configschema/nestingmode_string.go @@ -0,0 +1,28 @@ +// Code generated by "stringer -type=NestingMode"; DO NOT EDIT. + +package configschema + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[nestingModeInvalid-0] + _ = x[NestingSingle-1] + _ = x[NestingGroup-2] + _ = x[NestingList-3] + _ = x[NestingSet-4] + _ = x[NestingMap-5] +} + +const _NestingMode_name = "nestingModeInvalidNestingSingleNestingGroupNestingListNestingSetNestingMap" + +var _NestingMode_index = [...]uint8{0, 18, 31, 43, 54, 64, 74} + +func (i NestingMode) String() string { + if i < 0 || i >= NestingMode(len(_NestingMode_index)-1) { + return "NestingMode(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _NestingMode_name[_NestingMode_index[i]:_NestingMode_index[i+1]] +} diff --git a/pkg/configs/configschema/none_required.go b/pkg/configs/configschema/none_required.go new file mode 100644 index 00000000000..baa54e19576 --- /dev/null +++ b/pkg/configs/configschema/none_required.go @@ -0,0 +1,43 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configschema + +// NoneRequired returns a deep copy of the receiver with any required +// attributes translated to optional. +func (b *Block) NoneRequired() *Block { + ret := &Block{} + + if b.Attributes != nil { + ret.Attributes = make(map[string]*Attribute, len(b.Attributes)) + } + for name, attrS := range b.Attributes { + ret.Attributes[name] = attrS.forceOptional() + } + + if b.BlockTypes != nil { + ret.BlockTypes = make(map[string]*NestedBlock, len(b.BlockTypes)) + } + for name, blockS := range b.BlockTypes { + ret.BlockTypes[name] = blockS.noneRequired() + } + + return ret +} + +func (b *NestedBlock) noneRequired() *NestedBlock { + ret := *b + ret.Block = *(ret.Block.NoneRequired()) + ret.MinItems = 0 + ret.MaxItems = 0 + return &ret +} + +func (a *Attribute) forceOptional() *Attribute { + ret := *a + ret.Optional = true + ret.Required = false + return &ret +} diff --git a/pkg/configs/configschema/path.go b/pkg/configs/configschema/path.go new file mode 100644 index 00000000000..5f3adba5d3d --- /dev/null +++ b/pkg/configs/configschema/path.go @@ -0,0 +1,60 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configschema + +import ( + "github.com/zclconf/go-cty/cty" +) + +// AttributeByPath looks up the Attribute schema which corresponds to the given +// cty.Path. A nil value is returned if the given path does not correspond to a +// specific attribute. +func (b *Block) AttributeByPath(path cty.Path) *Attribute { + block := b + for i, step := range path { + switch step := step.(type) { + case cty.GetAttrStep: + if attr := block.Attributes[step.Name]; attr != nil { + // If the Attribute is defined with a NestedType and there's + // more to the path, descend into the NestedType + if attr.NestedType != nil && i < len(path)-1 { + return attr.NestedType.AttributeByPath(path[i+1:]) + } else if i < len(path)-1 { // There's more to the path, but not more to this Attribute. + return nil + } + return attr + } + + if nestedBlock := block.BlockTypes[step.Name]; nestedBlock != nil { + block = &nestedBlock.Block + continue + } + + return nil + } + } + return nil +} + +// AttributeByPath recurses through a NestedType to look up the Attribute scheme +// which corresponds to the given cty.Path. A nil value is returned if the given +// path does not correspond to a specific attribute. +func (o *Object) AttributeByPath(path cty.Path) *Attribute { + for i, step := range path { + switch step := step.(type) { + case cty.GetAttrStep: + if attr := o.Attributes[step.Name]; attr != nil { + if attr.NestedType != nil && i < len(path)-1 { + return attr.NestedType.AttributeByPath(path[i+1:]) + } else if i < len(path)-1 { // There's more to the path, but not more to this Attribute. + return nil + } + return attr + } + } + } + return nil +} diff --git a/pkg/configs/configschema/path_test.go b/pkg/configs/configschema/path_test.go new file mode 100644 index 00000000000..47933736622 --- /dev/null +++ b/pkg/configs/configschema/path_test.go @@ -0,0 +1,234 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configschema + +import ( + "testing" + + "github.com/zclconf/go-cty/cty" +) + +func TestAttributeByPath(t *testing.T) { + schema := &Block{ + Attributes: map[string]*Attribute{ + "a1": {Description: "a1"}, + "a2": {Description: "a2"}, + "a3": { + Description: "a3", + NestedType: &Object{ + Nesting: NestingList, + Attributes: map[string]*Attribute{ + "nt1": {Description: "nt1"}, + "nt2": { + Description: "nt2", + NestedType: &Object{ + Nesting: NestingSingle, + Attributes: map[string]*Attribute{ + "deeply_nested": {Description: "deeply_nested"}, + }, + }, + }, + }, + }, + }, + }, + BlockTypes: map[string]*NestedBlock{ + "b1": { + Nesting: NestingList, + Block: Block{ + Attributes: map[string]*Attribute{ + "a3": {Description: "a3"}, + "a4": {Description: "a4"}, + }, + BlockTypes: map[string]*NestedBlock{ + "b2": { + Nesting: NestingMap, + Block: Block{ + Attributes: map[string]*Attribute{ + "a5": {Description: "a5"}, + "a6": {Description: "a6"}, + }, + }, + }, + }, + }, + }, + "b3": { + Nesting: NestingMap, + Block: Block{ + Attributes: map[string]*Attribute{ + "a7": {Description: "a7"}, + "a8": {Description: "a8"}, + }, + BlockTypes: map[string]*NestedBlock{ + "b4": { + Nesting: NestingSet, + Block: Block{ + Attributes: map[string]*Attribute{ + "a9": {Description: "a9"}, + "a10": {Description: "a10"}, + }, + }, + }, + }, + }, + }, + }, + } + + for _, tc := range []struct { + path cty.Path + attrDescription string + exists bool + }{ + { + cty.GetAttrPath("a2"), + "a2", + true, + }, + { + cty.GetAttrPath("a3").IndexInt(1).GetAttr("nt2"), + "nt2", + true, + }, + { + cty.GetAttrPath("a3").IndexInt(1).GetAttr("b2").IndexString("foo").GetAttr("no"), + "missing", + false, + }, + { + cty.GetAttrPath("b1"), + "block", + false, + }, + { + cty.GetAttrPath("b1").IndexInt(1).GetAttr("a3"), + "a3", + true, + }, + { + cty.GetAttrPath("b1").IndexInt(1).GetAttr("b2").IndexString("foo").GetAttr("a7"), + "missing", + false, + }, + { + cty.GetAttrPath("b1").IndexInt(1).GetAttr("b2").IndexString("foo").GetAttr("a6"), + "a6", + true, + }, + { + cty.GetAttrPath("b3").IndexString("foo").GetAttr("b2").IndexString("foo").GetAttr("a7"), + "missing_block", + false, + }, + { + cty.GetAttrPath("b3").IndexString("foo").GetAttr("a7"), + "a7", + true, + }, + { + // Index steps don't apply to the schema, so the set Index value doesn't matter. + cty.GetAttrPath("b3").IndexString("foo").GetAttr("b4").Index(cty.EmptyObjectVal).GetAttr("a9"), + "a9", + true, + }, + } { + t.Run(tc.attrDescription, func(t *testing.T) { + attr := schema.AttributeByPath(tc.path) + if !tc.exists && attr == nil { + return + } + + if attr == nil { + t.Fatalf("missing attribute from path %#v\n", tc.path) + } + + if attr.Description != tc.attrDescription { + t.Fatalf("expected Attribute for %q, got %#v\n", tc.attrDescription, attr) + } + }) + } +} + +func TestObject_AttributeByPath(t *testing.T) { + obj := &Object{ + Nesting: NestingList, + Attributes: map[string]*Attribute{ + "a1": {Description: "a1"}, + "a2": { + Description: "a2", + NestedType: &Object{ + Nesting: NestingSingle, + Attributes: map[string]*Attribute{ + "n1": {Description: "n1"}, + "n2": { + Description: "n2", + NestedType: &Object{ + Attributes: map[string]*Attribute{ + "dn1": {Description: "dn1"}, + }, + }, + }, + }, + }, + }, + }, + } + + tests := []struct { + path cty.Path + attrDescription string + exists bool + }{ + { + cty.GetAttrPath("a2"), + "a2", + true, + }, + { + cty.GetAttrPath("a3"), + "missing", + false, + }, + { + cty.GetAttrPath("a2").IndexString("foo").GetAttr("n1"), + "n1", + true, + }, + { + cty.GetAttrPath("a2").IndexString("foo").GetAttr("n2").IndexInt(11).GetAttr("dn1"), + "dn1", + true, + }, + { + cty.GetAttrPath("a2").IndexString("foo").GetAttr("n2").IndexInt(11).GetAttr("dn1").IndexString("hello").GetAttr("nope"), + "missing_nested", + false, + }, + } + + for _, tc := range tests { + t.Run(tc.attrDescription, func(t *testing.T) { + attr := obj.AttributeByPath(tc.path) + if !tc.exists && attr == nil { + return + } + + if !tc.exists && attr != nil { + t.Fatalf("found Attribute, expected nil from path %#v\n", tc.path) + } + + if attr == nil { + t.Fatalf("missing attribute from path %#v\n", tc.path) + } + + if attr.Description != tc.attrDescription { + t.Fatalf("expected Attribute for %q, got %#v\n", tc.attrDescription, attr) + } + }) + } + +} diff --git a/pkg/configs/configschema/schema.go b/pkg/configs/configschema/schema.go new file mode 100644 index 00000000000..9ef11e2a570 --- /dev/null +++ b/pkg/configs/configschema/schema.go @@ -0,0 +1,169 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configschema + +import ( + "github.com/zclconf/go-cty/cty" +) + +type StringKind int + +const ( + StringPlain StringKind = iota + StringMarkdown +) + +// Block represents a configuration block. +// +// "Block" here is a logical grouping construct, though it happens to map +// directly onto the physical block syntax of OpenTofu's native configuration +// syntax. It may be a more a matter of convention in other syntaxes, such as +// JSON. +// +// When converted to a value, a Block always becomes an instance of an object +// type derived from its defined attributes and nested blocks +type Block struct { + // Attributes describes any attributes that may appear directly inside + // the block. + Attributes map[string]*Attribute + + // BlockTypes describes any nested block types that may appear directly + // inside the block. + BlockTypes map[string]*NestedBlock + + Description string + DescriptionKind StringKind + + Deprecated bool +} + +// Attribute represents a configuration attribute, within a block. +type Attribute struct { + // Type is a type specification that the attribute's value must conform to. + // It conflicts with NestedType. + Type cty.Type + + // NestedType indicates that the attribute is a NestedBlock-style object. + // This field conflicts with Type. + NestedType *Object + + // Description is an English-language description of the purpose and + // usage of the attribute. A description should be concise and use only + // one or two sentences, leaving full definition to longer-form + // documentation defined elsewhere. + Description string + DescriptionKind StringKind + + // Required, if set to true, specifies that an omitted or null value is + // not permitted. + Required bool + + // Optional, if set to true, specifies that an omitted or null value is + // permitted. This field conflicts with Required. + Optional bool + + // Computed, if set to true, specifies that the value comes from the + // provider rather than from configuration. If combined with Optional, + // then the config may optionally provide an overridden value. + Computed bool + + // Sensitive, if set to true, indicates that an attribute may contain + // sensitive information. + // + // At present nothing is done with this information, but callers are + // encouraged to set it where appropriate so that it may be used in the + // future to help OpenTofu mask sensitive information. (OpenTofu + // currently achieves this in a limited sense via other mechanisms.) + Sensitive bool + + Deprecated bool +} + +// Object represents the embedding of a structural object inside an Attribute. +type Object struct { + // Attributes describes the nested attributes which may appear inside the + // Object. + Attributes map[string]*Attribute + + // Nesting provides the nesting mode for this Object, which determines how + // many instances of the Object are allowed, how many labels it expects, and + // how the resulting data will be converted into a data structure. + Nesting NestingMode +} + +// NestedBlock represents the embedding of one block within another. +type NestedBlock struct { + // Block is the description of the block that's nested. + Block + + // Nesting provides the nesting mode for the child block, which determines + // how many instances of the block are allowed, how many labels it expects, + // and how the resulting data will be converted into a data structure. + Nesting NestingMode + + // MinItems and MaxItems set, for the NestingList and NestingSet nesting + // modes, lower and upper limits on the number of child blocks allowed + // of the given type. If both are left at zero, no limit is applied. + // + // As a special case, both values can be set to 1 for NestingSingle in + // order to indicate that a particular single block is required. + // + // These fields are ignored for other nesting modes and must both be left + // at zero. + MinItems, MaxItems int +} + +// NestingMode is an enumeration of modes for nesting blocks inside other +// blocks. +type NestingMode int + +// Object represents the embedding of a NestedBl + +//go:generate go run golang.org/x/tools/cmd/stringer -type=NestingMode + +const ( + nestingModeInvalid NestingMode = iota + + // NestingSingle indicates that only a single instance of a given + // block type is permitted, with no labels, and its content should be + // provided directly as an object value. + NestingSingle + + // NestingGroup is similar to NestingSingle in that it calls for only a + // single instance of a given block type with no labels, but it additonally + // guarantees that its result will never be null, even if the block is + // absent, and instead the nested attributes and blocks will be treated + // as absent in that case. (Any required attributes or blocks within the + // nested block are not enforced unless the block is explicitly present + // in the configuration, so they are all effectively optional when the + // block is not present.) + // + // This is useful for the situation where a remote API has a feature that + // is always enabled but has a group of settings related to that feature + // that themselves have default values. By using NestingGroup instead of + // NestingSingle in that case, generated plans will show the block as + // present even when not present in configuration, thus allowing any + // default values within to be displayed to the user. + NestingGroup + + // NestingList indicates that multiple blocks of the given type are + // permitted, with no labels, and that their corresponding objects should + // be provided in a list. + NestingList + + // NestingSet indicates that multiple blocks of the given type are + // permitted, with no labels, and that their corresponding objects should + // be provided in a set. + NestingSet + + // NestingMap indicates that multiple blocks of the given type are + // permitted, each with a single label, and that their corresponding + // objects should be provided in a map whose keys are the labels. + // + // It's an error, therefore, to use the same label value on multiple + // blocks. + NestingMap +) diff --git a/pkg/configs/configschema/validate_traversal.go b/pkg/configs/configschema/validate_traversal.go new file mode 100644 index 00000000000..0903991f1b6 --- /dev/null +++ b/pkg/configs/configschema/validate_traversal.go @@ -0,0 +1,198 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configschema + +import ( + "fmt" + "sort" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/didyoumean" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// StaticValidateTraversal checks whether the given traversal (which must be +// relative) refers to a construct in the receiving schema, returning error +// diagnostics if any problems are found. +// +// This method is "optimistic" in that it will not return errors for possible +// problems that cannot be detected statically. It is possible that a +// traversal which passed static validation will still fail when evaluated. +func (b *Block) StaticValidateTraversal(traversal hcl.Traversal) tfdiags.Diagnostics { + if !traversal.IsRelative() { + panic("StaticValidateTraversal on absolute traversal") + } + if len(traversal) == 0 { + return nil + } + + var diags tfdiags.Diagnostics + + next := traversal[0] + after := traversal[1:] + + var name string + switch step := next.(type) { + case hcl.TraverseAttr: + name = step.Name + case hcl.TraverseIndex: + // No other traversal step types are allowed directly at a block. + // If it looks like the user was trying to use index syntax to + // access an attribute then we'll produce a specialized message. + key := step.Key + if key.Type() == cty.String && key.IsKnown() && !key.IsNull() { + maybeName := key.AsString() + if hclsyntax.ValidIdentifier(maybeName) { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid index operation`, + Detail: fmt.Sprintf(`Only attribute access is allowed here. Did you mean to access attribute %q using the dot operator?`, maybeName), + Subject: &step.SrcRange, + }) + return diags + } + } + // If it looks like some other kind of index then we'll use a generic error. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid index operation`, + Detail: `Only attribute access is allowed here, using the dot operator.`, + Subject: &step.SrcRange, + }) + return diags + default: + // No other traversal types should appear in a normal valid traversal, + // but we'll handle this with a generic error anyway to be robust. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid operation`, + Detail: `Only attribute access is allowed here, using the dot operator.`, + Subject: next.SourceRange().Ptr(), + }) + return diags + } + + if attrS, exists := b.Attributes[name]; exists { + // Check for Deprecated status of this attribute. + // We currently can't provide the user with any useful guidance because + // the deprecation string is not part of the schema, but we can at + // least warn them. + // + // This purposely does not attempt to recurse into nested attribute + // types. Because nested attribute values are often not accessed via a + // direct traversal to the leaf attributes, we cannot reliably detect + // if a nested, deprecated attribute value is actually used from the + // traversal alone. More precise detection of deprecated attributes + // would require adding metadata like marks to the cty value itself, to + // be caught during evaluation. + if attrS.Deprecated { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: `Deprecated attribute`, + Detail: fmt.Sprintf(`The attribute %q is deprecated. Refer to the provider documentation for details.`, name), + Subject: next.SourceRange().Ptr(), + }) + } + + // For attribute validation we will just apply the rest of the + // traversal to an unknown value of the attribute type and pass + // through HCL's own errors, since we don't want to replicate all + // of HCL's type checking rules here. + val := cty.UnknownVal(attrS.ImpliedType()) + _, hclDiags := after.TraverseRel(val) + return diags.Append(hclDiags) + } + + if blockS, exists := b.BlockTypes[name]; exists { + moreDiags := blockS.staticValidateTraversal(name, after) + diags = diags.Append(moreDiags) + return diags + } + + // If we get here then the name isn't valid at all. We'll collect up + // all of the names that _are_ valid to use as suggestions. + var suggestions []string + for name := range b.Attributes { + suggestions = append(suggestions, name) + } + for name := range b.BlockTypes { + suggestions = append(suggestions, name) + } + sort.Strings(suggestions) + suggestion := didyoumean.NameSuggestion(name, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Unsupported attribute`, + Detail: fmt.Sprintf(`This object has no argument, nested block, or exported attribute named %q.%s`, name, suggestion), + Subject: next.SourceRange().Ptr(), + }) + + return diags +} + +func (b *NestedBlock) staticValidateTraversal(typeName string, traversal hcl.Traversal) tfdiags.Diagnostics { + if b.Nesting == NestingSingle || b.Nesting == NestingGroup { + // Single blocks are easy: just pass right through. + return b.Block.StaticValidateTraversal(traversal) + } + + if len(traversal) == 0 { + // It's always valid to access a nested block's attribute directly. + return nil + } + + var diags tfdiags.Diagnostics + next := traversal[0] + after := traversal[1:] + + switch b.Nesting { + + case NestingSet: + // Can't traverse into a set at all, since it does not have any keys + // to index with. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Cannot index a set value`, + Detail: fmt.Sprintf(`Block type %q is represented by a set of objects, and set elements do not have addressable keys. To find elements matching specific criteria, use a "for" expression with an "if" clause.`, typeName), + Subject: next.SourceRange().Ptr(), + }) + return diags + + case NestingList: + if _, ok := next.(hcl.TraverseIndex); ok { + moreDiags := b.Block.StaticValidateTraversal(after) + diags = diags.Append(moreDiags) + } else { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid operation`, + Detail: fmt.Sprintf(`Block type %q is represented by a list of objects, so it must be indexed using a numeric key, like .%s[0].`, typeName, typeName), + Subject: next.SourceRange().Ptr(), + }) + } + return diags + + case NestingMap: + // Both attribute and index steps are valid for maps, so we'll just + // pass through here and let normal evaluation catch an + // incorrectly-typed index key later, if present. + moreDiags := b.Block.StaticValidateTraversal(after) + diags = diags.Append(moreDiags) + return diags + + default: + // Invalid nesting type is just ignored. It's checked by + // InternalValidate. (Note that we handled NestingSingle separately + // back at the start of this function.) + return nil + } +} diff --git a/pkg/configs/configschema/validate_traversal_test.go b/pkg/configs/configschema/validate_traversal_test.go new file mode 100644 index 00000000000..581f69e5c09 --- /dev/null +++ b/pkg/configs/configschema/validate_traversal_test.go @@ -0,0 +1,263 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configschema + +import ( + "testing" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/zclconf/go-cty/cty" +) + +func TestStaticValidateTraversal(t *testing.T) { + attrs := map[string]*Attribute{ + "str": {Type: cty.String, Optional: true}, + "list": {Type: cty.List(cty.String), Optional: true}, + "dyn": {Type: cty.DynamicPseudoType, Optional: true}, + "deprecated": {Type: cty.String, Computed: true, Deprecated: true}, + "nested_single": { + Optional: true, + NestedType: &Object{ + Nesting: NestingSingle, + Attributes: map[string]*Attribute{ + "optional": {Type: cty.String, Optional: true}, + }, + }, + }, + "nested_list": { + Optional: true, + NestedType: &Object{ + Nesting: NestingList, + Attributes: map[string]*Attribute{ + "optional": {Type: cty.String, Optional: true}, + }, + }, + }, + "nested_set": { + Optional: true, + NestedType: &Object{ + Nesting: NestingSet, + Attributes: map[string]*Attribute{ + "optional": {Type: cty.String, Optional: true}, + }, + }, + }, + "nested_map": { + Optional: true, + NestedType: &Object{ + Nesting: NestingMap, + Attributes: map[string]*Attribute{ + "optional": {Type: cty.String, Optional: true}, + }, + }, + }, + } + schema := &Block{ + Attributes: attrs, + BlockTypes: map[string]*NestedBlock{ + "single_block": { + Nesting: NestingSingle, + Block: Block{ + Attributes: attrs, + }, + }, + "list_block": { + Nesting: NestingList, + Block: Block{ + Attributes: attrs, + }, + }, + "set_block": { + Nesting: NestingSet, + Block: Block{ + Attributes: attrs, + }, + }, + "map_block": { + Nesting: NestingMap, + Block: Block{ + Attributes: attrs, + }, + }, + }, + } + + tests := []struct { + Traversal string + WantError string + }{ + { + `obj`, + ``, + }, + { + `obj.str`, + ``, + }, + { + `obj.str.nonexist`, + `Unsupported attribute: Can't access attributes on a primitive-typed value (string).`, + }, + { + `obj.list`, + ``, + }, + { + `obj.list[0]`, + ``, + }, + { + `obj.list.nonexist`, + `Unsupported attribute: This value does not have any attributes.`, + }, + { + `obj.dyn`, + ``, + }, + { + `obj.dyn.anything_goes`, + ``, + }, + { + `obj.dyn[0]`, + ``, + }, + { + `obj.nonexist`, + `Unsupported attribute: This object has no argument, nested block, or exported attribute named "nonexist".`, + }, + { + `obj[1]`, + `Invalid index operation: Only attribute access is allowed here, using the dot operator.`, + }, + { + `obj["str"]`, // we require attribute access for the first step to avoid ambiguity with resource instance indices + `Invalid index operation: Only attribute access is allowed here. Did you mean to access attribute "str" using the dot operator?`, + }, + { + `obj.atr`, + `Unsupported attribute: This object has no argument, nested block, or exported attribute named "atr". Did you mean "str"?`, + }, + { + `obj.single_block`, + ``, + }, + { + `obj.single_block.str`, + ``, + }, + { + `obj.single_block.nonexist`, + `Unsupported attribute: This object has no argument, nested block, or exported attribute named "nonexist".`, + }, + { + `obj.list_block`, + ``, + }, + { + `obj.list_block[0]`, + ``, + }, + { + `obj.list_block[0].str`, + ``, + }, + { + `obj.list_block[0].nonexist`, + `Unsupported attribute: This object has no argument, nested block, or exported attribute named "nonexist".`, + }, + { + `obj.list_block.str`, + `Invalid operation: Block type "list_block" is represented by a list of objects, so it must be indexed using a numeric key, like .list_block[0].`, + }, + { + `obj.set_block`, + ``, + }, + { + `obj.set_block[0]`, + `Cannot index a set value: Block type "set_block" is represented by a set of objects, and set elements do not have addressable keys. To find elements matching specific criteria, use a "for" expression with an "if" clause.`, + }, + { + `obj.set_block.str`, + `Cannot index a set value: Block type "set_block" is represented by a set of objects, and set elements do not have addressable keys. To find elements matching specific criteria, use a "for" expression with an "if" clause.`, + }, + { + `obj.map_block`, + ``, + }, + { + `obj.map_block.anything`, + ``, + }, + { + `obj.map_block["anything"]`, + ``, + }, + { + `obj.map_block.anything.str`, + ``, + }, + { + `obj.map_block["anything"].str`, + ``, + }, + { + `obj.map_block.anything.nonexist`, + `Unsupported attribute: This object has no argument, nested block, or exported attribute named "nonexist".`, + }, + { + `obj.nested_single.optional`, + ``, + }, + { + `obj.nested_list[0].optional`, + ``, + }, + { + `obj.nested_set[0].optional`, + `Invalid index: Elements of a set are identified only by their value and don't have any separate index or key to select with, so it's only possible to perform operations across all elements of the set.`, + }, + { + `obj.nested_map["key"].optional`, + ``, + }, + { + `obj.deprecated`, + `Deprecated attribute: The attribute "deprecated" is deprecated. Refer to the provider documentation for details.`, + }, + } + + for _, test := range tests { + t.Run(test.Traversal, func(t *testing.T) { + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(test.Traversal), "", hcl.Pos{Line: 1, Column: 1}) + for _, diag := range parseDiags { + t.Error(diag.Error()) + } + + // We trim the "obj." portion from the front since StaticValidateTraversal + // only works with relative traversals. + traversal = traversal[1:] + + diags := schema.StaticValidateTraversal(traversal) + if test.WantError == "" { + if diags.HasErrors() { + t.Errorf("unexpected error: %s", diags.Err().Error()) + } + } else { + err := diags.ErrWithWarnings() + if err != nil { + if got := err.Error(); got != test.WantError { + t.Errorf("wrong error\ngot: %s\nwant: %s", got, test.WantError) + } + } else { + t.Errorf("wrong error\ngot: \nwant: %s", test.WantError) + } + } + }) + } +} diff --git a/pkg/configs/container.go b/pkg/configs/container.go new file mode 100644 index 00000000000..988b8cb9b5d --- /dev/null +++ b/pkg/configs/container.go @@ -0,0 +1,21 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import "github.com/kubegems/opentofu/pkg/addrs" + +// Container provides an interface for scoped resources. +// +// Any resources contained within a Container should not be accessible from +// outside the container. +type Container interface { + // Accessible should return true if the resource specified by addr can + // reference other items within this Container. + // + // Typically, that means that addr will either be the container itself or + // something within the container. + Accessible(addr addrs.Referenceable) bool +} diff --git a/pkg/configs/depends_on.go b/pkg/configs/depends_on.go new file mode 100644 index 00000000000..3f746c12bff --- /dev/null +++ b/pkg/configs/depends_on.go @@ -0,0 +1,28 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "github.com/hashicorp/hcl/v2" +) + +func decodeDependsOn(attr *hcl.Attribute) ([]hcl.Traversal, hcl.Diagnostics) { + var ret []hcl.Traversal + exprs, diags := hcl.ExprList(attr.Expr) + + for _, expr := range exprs { + expr, shimDiags := shimTraversalInString(expr, false) + diags = append(diags, shimDiags...) + + traversal, travDiags := hcl.AbsTraversalForExpr(expr) + diags = append(diags, travDiags...) + if len(traversal) != 0 { + ret = append(ret, traversal) + } + } + + return ret, diags +} diff --git a/pkg/configs/doc.go b/pkg/configs/doc.go new file mode 100644 index 00000000000..e984594b81f --- /dev/null +++ b/pkg/configs/doc.go @@ -0,0 +1,24 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package configs contains types that represent OpenTofu configurations and +// the different elements thereof. +// +// The functionality in this package can be used for some static analyses of +// OpenTofu configurations, but this package generally exposes representations +// of the configuration source code rather than the result of evaluating these +// objects. The sibling package "lang" deals with evaluation of structures +// and expressions in the configuration. +// +// Due to its close relationship with HCL, this package makes frequent use +// of types from the HCL API, including raw HCL diagnostic messages. Such +// diagnostics can be converted into OpenTofu-flavored diagnostics, if needed, +// using functions in the sibling package tfdiags. +// +// The Parser type is the main entry-point into this package. The LoadConfigDir +// method can be used to load a single module directory, and then a full +// configuration (including any descendent modules) can be produced using +// the top-level BuildConfig method. +package configs diff --git a/pkg/configs/escaping_blocks_test.go b/pkg/configs/escaping_blocks_test.go new file mode 100644 index 00000000000..bbeae896a6c --- /dev/null +++ b/pkg/configs/escaping_blocks_test.go @@ -0,0 +1,313 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" +) + +// "Escaping Blocks" are a special mechanism we have inside our block types +// that accept a mixture of meta-arguments and externally-defined arguments, +// which allow an author to force particular argument names to be interpreted +// as externally-defined even if they have the same name as a meta-argument. +// +// An escaping block is a block with the special type name "_" (just an +// underscore), and is allowed at the top-level of any resource, data, or +// module block. It intentionally has a rather "odd" look so that it stands +// out as something special and rare. +// +// This is not something we expect to see used a lot, but it's an important +// part of our strategy to evolve the OpenTofu language in future using +// editions, so that later editions can define new meta-arguments without +// blocking access to externally-defined arguments of the same name. +// +// We should still define new meta-arguments with care to avoid squatting on +// commonly-used names, but we can't see all modules and all providers in +// the world and so this is an escape hatch for edge cases. Module migration +// tools for future editions that define new meta-arguments should detect +// collisions and automatically migrate existing arguments into an escaping +// block. + +func TestEscapingBlockResource(t *testing.T) { + // (this also tests escaping blocks in provisioner blocks, because + // they only appear nested inside resource blocks.) + + parser := NewParser(nil) + mod, diags := parser.LoadConfigDir("testdata/escaping-blocks/resource", RootModuleCallForTesting()) + assertNoDiagnostics(t, diags) + if mod == nil { + t.Fatal("got nil root module; want non-nil") + } + + rc := mod.ManagedResources["foo.bar"] + if rc == nil { + t.Fatal("no managed resource named foo.bar") + } + + t.Run("resource body", func(t *testing.T) { + if got := rc.Count; got == nil { + t.Errorf("count not set; want count = 2") + } else { + got, diags := got.Value(nil) + assertNoDiagnostics(t, diags) + if want := cty.NumberIntVal(2); !want.RawEquals(got) { + t.Errorf("wrong count\ngot: %#v\nwant: %#v", got, want) + } + } + if got, want := rc.ForEach, hcl.Expression(nil); got != want { + // Shouldn't have any count because our test fixture only has + // for_each in the escaping block. + t.Errorf("wrong for_each\ngot: %#v\nwant: %#v", got, want) + } + + schema := &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + {Name: "normal", Required: true}, + {Name: "count", Required: true}, + {Name: "for_each", Required: true}, + }, + Blocks: []hcl.BlockHeaderSchema{ + {Type: "normal_block"}, + {Type: "lifecycle"}, + {Type: "_"}, + }, + } + content, diags := rc.Config.Content(schema) + assertNoDiagnostics(t, diags) + + normalVal, diags := content.Attributes["normal"].Expr.Value(nil) + assertNoDiagnostics(t, diags) + if got, want := normalVal, cty.StringVal("yes"); !want.RawEquals(got) { + t.Errorf("wrong value for 'normal'\ngot: %#v\nwant: %#v", got, want) + } + + countVal, diags := content.Attributes["count"].Expr.Value(nil) + assertNoDiagnostics(t, diags) + if got, want := countVal, cty.StringVal("not actually count"); !want.RawEquals(got) { + t.Errorf("wrong value for 'count'\ngot: %#v\nwant: %#v", got, want) + } + + var gotBlockTypes []string + for _, block := range content.Blocks { + gotBlockTypes = append(gotBlockTypes, block.Type) + } + wantBlockTypes := []string{"normal_block", "lifecycle", "_"} + if diff := cmp.Diff(gotBlockTypes, wantBlockTypes); diff != "" { + t.Errorf("wrong block types\n%s", diff) + } + }) + t.Run("provisioner body", func(t *testing.T) { + if got, want := len(rc.Managed.Provisioners), 1; got != want { + t.Fatalf("wrong number of provisioners %d; want %d", got, want) + } + pc := rc.Managed.Provisioners[0] + + schema := &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + {Name: "when", Required: true}, + {Name: "normal", Required: true}, + }, + Blocks: []hcl.BlockHeaderSchema{ + {Type: "normal_block"}, + {Type: "lifecycle"}, + {Type: "_"}, + }, + } + content, diags := pc.Config.Content(schema) + assertNoDiagnostics(t, diags) + + normalVal, diags := content.Attributes["normal"].Expr.Value(nil) + assertNoDiagnostics(t, diags) + if got, want := normalVal, cty.StringVal("yep"); !want.RawEquals(got) { + t.Errorf("wrong value for 'normal'\ngot: %#v\nwant: %#v", got, want) + } + whenVal, diags := content.Attributes["when"].Expr.Value(nil) + assertNoDiagnostics(t, diags) + if got, want := whenVal, cty.StringVal("hell freezes over"); !want.RawEquals(got) { + t.Errorf("wrong value for 'normal'\ngot: %#v\nwant: %#v", got, want) + } + }) +} + +func TestEscapingBlockData(t *testing.T) { + parser := NewParser(nil) + mod, diags := parser.LoadConfigDir("testdata/escaping-blocks/data", RootModuleCallForTesting()) + assertNoDiagnostics(t, diags) + if mod == nil { + t.Fatal("got nil root module; want non-nil") + } + + rc := mod.DataResources["data.foo.bar"] + if rc == nil { + t.Fatal("no data resource named data.foo.bar") + } + + if got := rc.Count; got == nil { + t.Errorf("count not set; want count = 2") + } else { + got, diags := got.Value(nil) + assertNoDiagnostics(t, diags) + if want := cty.NumberIntVal(2); !want.RawEquals(got) { + t.Errorf("wrong count\ngot: %#v\nwant: %#v", got, want) + } + } + if got, want := rc.ForEach, hcl.Expression(nil); got != want { + // Shouldn't have any count because our test fixture only has + // for_each in the escaping block. + t.Errorf("wrong for_each\ngot: %#v\nwant: %#v", got, want) + } + + schema := &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + {Name: "normal", Required: true}, + {Name: "count", Required: true}, + {Name: "for_each", Required: true}, + }, + Blocks: []hcl.BlockHeaderSchema{ + {Type: "normal_block"}, + {Type: "lifecycle"}, + {Type: "_"}, + }, + } + content, diags := rc.Config.Content(schema) + assertNoDiagnostics(t, diags) + + normalVal, diags := content.Attributes["normal"].Expr.Value(nil) + assertNoDiagnostics(t, diags) + if got, want := normalVal, cty.StringVal("yes"); !want.RawEquals(got) { + t.Errorf("wrong value for 'normal'\ngot: %#v\nwant: %#v", got, want) + } + + countVal, diags := content.Attributes["count"].Expr.Value(nil) + assertNoDiagnostics(t, diags) + if got, want := countVal, cty.StringVal("not actually count"); !want.RawEquals(got) { + t.Errorf("wrong value for 'count'\ngot: %#v\nwant: %#v", got, want) + } + + var gotBlockTypes []string + for _, block := range content.Blocks { + gotBlockTypes = append(gotBlockTypes, block.Type) + } + wantBlockTypes := []string{"normal_block", "lifecycle", "_"} + if diff := cmp.Diff(gotBlockTypes, wantBlockTypes); diff != "" { + t.Errorf("wrong block types\n%s", diff) + } + +} + +func TestEscapingBlockModule(t *testing.T) { + parser := NewParser(nil) + mod, diags := parser.LoadConfigDir("testdata/escaping-blocks/module", RootModuleCallForTesting()) + assertNoDiagnostics(t, diags) + if mod == nil { + t.Fatal("got nil root module; want non-nil") + } + + mc := mod.ModuleCalls["foo"] + if mc == nil { + t.Fatal("no module call named foo") + } + + if got := mc.Count; got == nil { + t.Errorf("count not set; want count = 2") + } else { + got, diags := got.Value(nil) + assertNoDiagnostics(t, diags) + if want := cty.NumberIntVal(2); !want.RawEquals(got) { + t.Errorf("wrong count\ngot: %#v\nwant: %#v", got, want) + } + } + if got, want := mc.ForEach, hcl.Expression(nil); got != want { + // Shouldn't have any count because our test fixture only has + // for_each in the escaping block. + t.Errorf("wrong for_each\ngot: %#v\nwant: %#v", got, want) + } + + schema := &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + {Name: "normal", Required: true}, + {Name: "count", Required: true}, + {Name: "for_each", Required: true}, + }, + Blocks: []hcl.BlockHeaderSchema{ + {Type: "normal_block"}, + {Type: "lifecycle"}, + {Type: "_"}, + }, + } + content, diags := mc.Config.Content(schema) + assertNoDiagnostics(t, diags) + + normalVal, diags := content.Attributes["normal"].Expr.Value(nil) + assertNoDiagnostics(t, diags) + if got, want := normalVal, cty.StringVal("yes"); !want.RawEquals(got) { + t.Errorf("wrong value for 'normal'\ngot: %#v\nwant: %#v", got, want) + } + + countVal, diags := content.Attributes["count"].Expr.Value(nil) + assertNoDiagnostics(t, diags) + if got, want := countVal, cty.StringVal("not actually count"); !want.RawEquals(got) { + t.Errorf("wrong value for 'count'\ngot: %#v\nwant: %#v", got, want) + } + + var gotBlockTypes []string + for _, block := range content.Blocks { + gotBlockTypes = append(gotBlockTypes, block.Type) + } + wantBlockTypes := []string{"normal_block", "lifecycle", "_"} + if diff := cmp.Diff(gotBlockTypes, wantBlockTypes); diff != "" { + t.Errorf("wrong block types\n%s", diff) + } + +} + +func TestEscapingBlockProvider(t *testing.T) { + parser := NewParser(nil) + mod, diags := parser.LoadConfigDir("testdata/escaping-blocks/provider", RootModuleCallForTesting()) + assertNoDiagnostics(t, diags) + if mod == nil { + t.Fatal("got nil root module; want non-nil") + } + + pc := mod.ProviderConfigs["foo.bar"] + if pc == nil { + t.Fatal("no provider configuration named foo.bar") + } + + if got, want := pc.Alias, "bar"; got != want { + t.Errorf("wrong alias\ngot: %#v\nwant: %#v", got, want) + } + + schema := &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + {Name: "normal", Required: true}, + {Name: "alias", Required: true}, + {Name: "version", Required: true}, + }, + } + content, diags := pc.Config.Content(schema) + assertNoDiagnostics(t, diags) + + normalVal, diags := content.Attributes["normal"].Expr.Value(nil) + assertNoDiagnostics(t, diags) + if got, want := normalVal, cty.StringVal("yes"); !want.RawEquals(got) { + t.Errorf("wrong value for 'normal'\ngot: %#v\nwant: %#v", got, want) + } + aliasVal, diags := content.Attributes["alias"].Expr.Value(nil) + assertNoDiagnostics(t, diags) + if got, want := aliasVal, cty.StringVal("not actually alias"); !want.RawEquals(got) { + t.Errorf("wrong value for 'alias'\ngot: %#v\nwant: %#v", got, want) + } + versionVal, diags := content.Attributes["version"].Expr.Value(nil) + assertNoDiagnostics(t, diags) + if got, want := versionVal, cty.StringVal("not actually version"); !want.RawEquals(got) { + t.Errorf("wrong value for 'version'\ngot: %#v\nwant: %#v", got, want) + } +} diff --git a/pkg/configs/experiments.go b/pkg/configs/experiments.go new file mode 100644 index 00000000000..50607ac97b2 --- /dev/null +++ b/pkg/configs/experiments.go @@ -0,0 +1,241 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/kubegems/opentofu/pkg/experiments" + "github.com/kubegems/opentofu/version" +) + +// When developing UI for experimental features, you can temporarily disable +// the experiment warning by setting this package-level variable to a non-empty +// value using a link-time flag: +// +// go install -ldflags="-X 'github.com/kubegems/opentofu/pkg/configs.disableExperimentWarnings=yes'" +// +// This functionality is for development purposes only and is not a feature we +// are committing to supporting for end users. +var disableExperimentWarnings = "" + +// sniffActiveExperiments does minimal parsing of the given body for +// "terraform" blocks with "experiments" attributes, returning the +// experiments found. +// +// This is separate from other processing so that we can be sure that all of +// the experiments are known before we process the result of the module config, +// and thus we can take into account which experiments are active when deciding +// how to decode. +func sniffActiveExperiments(body hcl.Body, allowed bool) (experiments.Set, hcl.Diagnostics) { + rootContent, _, diags := body.PartialContent(configFileTerraformBlockSniffRootSchema) + + ret := experiments.NewSet() + + for _, block := range rootContent.Blocks { + content, _, blockDiags := block.Body.PartialContent(configFileExperimentsSniffBlockSchema) + diags = append(diags, blockDiags...) + + if attr, exists := content.Attributes["language"]; exists { + // We don't yet have a sense of selecting an edition of the + // language, but we're reserving this syntax for now so that + // if and when we do this later older versions of Terraform + // will emit a more helpful error message than just saying + // this attribute doesn't exist. Handling this as part of + // experiments is a bit odd for now but justified by the + // fact that a future fuller implementation of switchable + // languages would be likely use a similar implementation + // strategy as experiments, and thus would lead to this + // function being refactored to deal with both concerns at + // once. We'll see, though! + kw := hcl.ExprAsKeyword(attr.Expr) + currentVersion := version.SemVer.String() + const firstEdition = "TF2021" + switch { + case kw == "": // (the expression wasn't a keyword at all) + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid language edition", + Detail: fmt.Sprintf( + "The language argument expects a bare language edition keyword. OpenTofu %s supports only language edition %s, which is the default.", + currentVersion, firstEdition, + ), + Subject: attr.Expr.Range().Ptr(), + }) + case kw != firstEdition: + rel := "different" + if kw > firstEdition { // would be weird for this not to be true, but it's user input so anything goes + rel = "newer" + } + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported language edition", + Detail: fmt.Sprintf( + "OpenTofu v%s only supports language edition %s. This module requires a %s version of OpenTofu CLI.", + currentVersion, firstEdition, rel, + ), + Subject: attr.Expr.Range().Ptr(), + }) + } + } + + attr, exists := content.Attributes["experiments"] + if !exists { + continue + } + + exps, expDiags := decodeExperimentsAttr(attr) + + // Because we concluded this particular experiment in the same + // release as we made experiments alpha-releases-only, we need to + // treat it as special to avoid masking the "experiment has concluded" + // error with the more general "experiments are not available at all" + // error. Note that this experiment is marked as concluded so this + // only "allows" showing the different error message that it is + // concluded, and does not allow actually using the experiment outside + // of an alpha. + // NOTE: We should be able to remove this special exception a release + // or two after v1.3 when folks have had a chance to notice that the + // experiment has concluded and update their modules accordingly. + // When we do so, we might also consider changing decodeExperimentsAttr + // to _not_ include concluded experiments in the returned set, since + // we're doing that right now only to make this condition work. + if exps.Has(experiments.ModuleVariableOptionalAttrs) && len(exps) == 1 { + allowed = true + } + + if allowed { + diags = append(diags, expDiags...) + if !expDiags.HasErrors() { + ret = experiments.SetUnion(ret, exps) + } + } else { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Module uses experimental features", + Detail: "Experimental features are intended only for gathering early feedback on new language designs, and so are available only in alpha releases of OpenTofu.", + Subject: attr.NameRange.Ptr(), + }) + } + } + + return ret, diags +} + +func decodeExperimentsAttr(attr *hcl.Attribute) (experiments.Set, hcl.Diagnostics) { + var diags hcl.Diagnostics + + exprs, moreDiags := hcl.ExprList(attr.Expr) + diags = append(diags, moreDiags...) + if moreDiags.HasErrors() { + return nil, diags + } + + var ret = experiments.NewSet() + for _, expr := range exprs { + kw := hcl.ExprAsKeyword(expr) + if kw == "" { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid experiment keyword", + Detail: "Elements of \"experiments\" must all be keywords representing active experiments.", + Subject: expr.Range().Ptr(), + }) + continue + } + + exp, err := experiments.GetCurrent(kw) + switch err := err.(type) { + case experiments.UnavailableError: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unknown experiment keyword", + Detail: fmt.Sprintf("There is no current experiment with the keyword %q.", kw), + Subject: expr.Range().Ptr(), + }) + case experiments.ConcludedError: + // As a special case we still include the optional attributes + // experiment if it's present, because our caller treats that + // as special. See the comment in sniffActiveExperiments for + // more information, and remove this special case here one the + // special case up there is also removed. + if kw == "module_variable_optional_attrs" { + ret.Add(experiments.ModuleVariableOptionalAttrs) + } + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Experiment has concluded", + Detail: fmt.Sprintf("Experiment %q is no longer available. %s", kw, err.Message), + Subject: expr.Range().Ptr(), + }) + case nil: + // No error at all means it's valid and current. + ret.Add(exp) + + if disableExperimentWarnings == "" { + // However, experimental features are subject to breaking changes + // in future releases, so we'll warn about them to help make sure + // folks aren't inadvertently using them in places where that'd be + // inappropriate, particularly if the experiment is active in a + // shared module they depend on. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: fmt.Sprintf("Experimental feature %q is active", exp.Keyword()), + Detail: "Experimental features are available only in alpha releases of OpenTofu and are subject to breaking changes or total removal in later versions, based on feedback. We recommend against using experimental features in production.\n\nIf you have feedback on the design of this feature, please open a GitHub issue to discuss it.", + Subject: expr.Range().Ptr(), + }) + } + + default: + // This should never happen, because GetCurrent is not documented + // to return any other error type, but we'll handle it to be robust. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid experiment keyword", + Detail: fmt.Sprintf("Could not parse %q as an experiment keyword: %s.", kw, err.Error()), + Subject: expr.Range().Ptr(), + }) + } + } + return ret, diags +} + +func checkModuleExperiments(m *Module) hcl.Diagnostics { + var diags hcl.Diagnostics + + // When we have current experiments, this is a good place to check that + // the features in question can only be used when the experiments are + // active. Return error diagnostics if a feature is being used without + // opting in to the feature. For example: + /* + if !m.ActiveExperiments.Has(experiments.ResourceForEach) { + for _, rc := range m.ManagedResources { + if rc.ForEach != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Resource for_each is experimental", + Detail: "This feature is currently an opt-in experiment, subject to change in future releases based on feedback.\n\nActivate the feature for this module by adding resource_for_each to the list of active experiments.", + Subject: rc.ForEach.Range().Ptr(), + }) + } + } + for _, rc := range m.DataResources { + if rc.ForEach != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Resource for_each is experimental", + Detail: "This feature is currently an opt-in experiment, subject to change in future releases based on feedback.\n\nActivate the feature for this module by adding resource_for_each to the list of active experiments.", + Subject: rc.ForEach.Range().Ptr(), + }) + } + } + } + */ + + return diags +} diff --git a/pkg/configs/experiments_test.go b/pkg/configs/experiments_test.go new file mode 100644 index 00000000000..80689b17613 --- /dev/null +++ b/pkg/configs/experiments_test.go @@ -0,0 +1,145 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "path/filepath" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/hcl/v2" + + "github.com/kubegems/opentofu/pkg/experiments" +) + +func TestExperimentsConfig(t *testing.T) { + // The experiment registrations are global, so we need to do some special + // patching in order to get a predictable set for our tests. + current := experiments.Experiment("current") + concluded := experiments.Experiment("concluded") + currentExperiments := experiments.NewSet(current) + concludedExperiments := map[experiments.Experiment]string{ + concluded: "Reticulate your splines.", + } + defer experiments.OverrideForTesting(t, currentExperiments, concludedExperiments)() + + t.Run("current", func(t *testing.T) { + parser := NewParser(nil) + parser.AllowLanguageExperiments(true) + mod, diags := parser.LoadConfigDir("testdata/experiments/current", RootModuleCallForTesting()) + if got, want := len(diags), 1; got != want { + t.Fatalf("wrong number of diagnostics %d; want %d", got, want) + } + got := diags[0] + want := &hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: `Experimental feature "current" is active`, + Detail: "Experimental features are available only in alpha releases of OpenTofu and are subject to breaking changes or total removal in later versions, based on feedback. We recommend against using experimental features in production.\n\nIf you have feedback on the design of this feature, please open a GitHub issue to discuss it.", + Subject: &hcl.Range{ + Filename: filepath.FromSlash("testdata/experiments/current/current_experiment.tf"), + Start: hcl.Pos{Line: 2, Column: 18, Byte: 29}, + End: hcl.Pos{Line: 2, Column: 25, Byte: 36}, + }, + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong warning\n%s", diff) + } + if got, want := len(mod.ActiveExperiments), 1; got != want { + t.Errorf("wrong number of experiments %d; want %d", got, want) + } + if !mod.ActiveExperiments.Has(current) { + t.Errorf("module does not indicate current experiment as active") + } + }) + t.Run("concluded", func(t *testing.T) { + parser := NewParser(nil) + parser.AllowLanguageExperiments(true) + _, diags := parser.LoadConfigDir("testdata/experiments/concluded", RootModuleCallForTesting()) + if got, want := len(diags), 1; got != want { + t.Fatalf("wrong number of diagnostics %d; want %d", got, want) + } + got := diags[0] + want := &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Experiment has concluded`, + Detail: `Experiment "concluded" is no longer available. Reticulate your splines.`, + Subject: &hcl.Range{ + Filename: filepath.FromSlash("testdata/experiments/concluded/concluded_experiment.tf"), + Start: hcl.Pos{Line: 2, Column: 18, Byte: 29}, + End: hcl.Pos{Line: 2, Column: 27, Byte: 38}, + }, + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong error\n%s", diff) + } + }) + t.Run("concluded", func(t *testing.T) { + parser := NewParser(nil) + parser.AllowLanguageExperiments(true) + _, diags := parser.LoadConfigDir("testdata/experiments/unknown", RootModuleCallForTesting()) + if got, want := len(diags), 1; got != want { + t.Fatalf("wrong number of diagnostics %d; want %d", got, want) + } + got := diags[0] + want := &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Unknown experiment keyword`, + Detail: `There is no current experiment with the keyword "unknown".`, + Subject: &hcl.Range{ + Filename: filepath.FromSlash("testdata/experiments/unknown/unknown_experiment.tf"), + Start: hcl.Pos{Line: 2, Column: 18, Byte: 29}, + End: hcl.Pos{Line: 2, Column: 25, Byte: 36}, + }, + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong error\n%s", diff) + } + }) + t.Run("invalid", func(t *testing.T) { + parser := NewParser(nil) + parser.AllowLanguageExperiments(true) + _, diags := parser.LoadConfigDir("testdata/experiments/invalid", RootModuleCallForTesting()) + if got, want := len(diags), 1; got != want { + t.Fatalf("wrong number of diagnostics %d; want %d", got, want) + } + got := diags[0] + want := &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid expression`, + Detail: `A static list expression is required.`, + Subject: &hcl.Range{ + Filename: filepath.FromSlash("testdata/experiments/invalid/invalid_experiments.tf"), + Start: hcl.Pos{Line: 2, Column: 17, Byte: 28}, + End: hcl.Pos{Line: 2, Column: 24, Byte: 35}, + }, + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong error\n%s", diff) + } + }) + t.Run("disallowed", func(t *testing.T) { + parser := NewParser(nil) + parser.AllowLanguageExperiments(false) // The default situation for release builds + _, diags := parser.LoadConfigDir("testdata/experiments/current", RootModuleCallForTesting()) + if got, want := len(diags), 1; got != want { + t.Fatalf("wrong number of diagnostics %d; want %d", got, want) + } + got := diags[0] + want := &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Module uses experimental features`, + Detail: `Experimental features are intended only for gathering early feedback on new language designs, and so are available only in alpha releases of OpenTofu.`, + Subject: &hcl.Range{ + Filename: filepath.FromSlash("testdata/experiments/current/current_experiment.tf"), + Start: hcl.Pos{Line: 2, Column: 3, Byte: 14}, + End: hcl.Pos{Line: 2, Column: 14, Byte: 25}, + }, + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong error\n%s", diff) + } + }) +} diff --git a/pkg/configs/hcl2shim/flatmap.go b/pkg/configs/hcl2shim/flatmap.go new file mode 100644 index 00000000000..e079e8191a7 --- /dev/null +++ b/pkg/configs/hcl2shim/flatmap.go @@ -0,0 +1,429 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package hcl2shim + +import ( + "fmt" + "strconv" + "strings" + + "github.com/zclconf/go-cty/cty/convert" + + "github.com/zclconf/go-cty/cty" +) + +// FlatmapValueFromHCL2 converts a value from HCL2 (really, from the cty dynamic +// types library that HCL2 uses) to a map compatible with what would be +// produced by the "flatmap" package. +// +// The type of the given value informs the structure of the resulting map. +// The value must be of an object type or this function will panic. +// +// Flatmap values can only represent maps when they are of primitive types, +// so the given value must not have any maps of complex types or the result +// is undefined. +func FlatmapValueFromHCL2(v cty.Value) map[string]string { + if v.IsNull() { + return nil + } + + if !v.Type().IsObjectType() { + panic(fmt.Sprintf("HCL2ValueFromFlatmap called on %#v", v.Type())) + } + + m := make(map[string]string) + flatmapValueFromHCL2Map(m, "", v) + return m +} + +func flatmapValueFromHCL2Value(m map[string]string, key string, val cty.Value) { + ty := val.Type() + switch { + case ty.IsPrimitiveType() || ty == cty.DynamicPseudoType: + flatmapValueFromHCL2Primitive(m, key, val) + case ty.IsObjectType() || ty.IsMapType(): + flatmapValueFromHCL2Map(m, key+".", val) + case ty.IsTupleType() || ty.IsListType() || ty.IsSetType(): + flatmapValueFromHCL2Seq(m, key+".", val) + default: + panic(fmt.Sprintf("cannot encode %s to flatmap", ty.FriendlyName())) + } +} + +func flatmapValueFromHCL2Primitive(m map[string]string, key string, val cty.Value) { + if !val.IsKnown() { + m[key] = UnknownVariableValue + return + } + if val.IsNull() { + // Omit entirely + return + } + + var err error + val, err = convert.Convert(val, cty.String) + if err != nil { + // Should not be possible, since all primitive types can convert to string. + panic(fmt.Sprintf("invalid primitive encoding to flatmap: %s", err)) + } + m[key] = val.AsString() +} + +func flatmapValueFromHCL2Map(m map[string]string, prefix string, val cty.Value) { + if val.IsNull() { + // Omit entirely + return + } + if !val.IsKnown() { + switch { + case val.Type().IsObjectType(): + // Whole objects can't be unknown in flatmap, so instead we'll + // just write all of the attribute values out as unknown. + for name, aty := range val.Type().AttributeTypes() { + flatmapValueFromHCL2Value(m, prefix+name, cty.UnknownVal(aty)) + } + default: + m[prefix+"%"] = UnknownVariableValue + } + return + } + + len := 0 + for it := val.ElementIterator(); it.Next(); { + ak, av := it.Element() + name := ak.AsString() + flatmapValueFromHCL2Value(m, prefix+name, av) + len++ + } + if !val.Type().IsObjectType() { // objects don't have an explicit count included, since their attribute count is fixed + m[prefix+"%"] = strconv.Itoa(len) + } +} + +func flatmapValueFromHCL2Seq(m map[string]string, prefix string, val cty.Value) { + if val.IsNull() { + // Omit entirely + return + } + if !val.IsKnown() { + m[prefix+"#"] = UnknownVariableValue + return + } + + // For sets this won't actually generate exactly what helper/schema would've + // generated, because we don't have access to the set key function it + // would've used. However, in practice it doesn't actually matter what the + // keys are as long as they are unique, so we'll just generate sequential + // indexes for them as if it were a list. + // + // An important implication of this, however, is that the set ordering will + // not be consistent across mutations and so different keys may be assigned + // to the same value when round-tripping. Since this shim is intended to + // be short-lived and not used for round-tripping, we accept this. + i := 0 + for it := val.ElementIterator(); it.Next(); { + _, av := it.Element() + key := prefix + strconv.Itoa(i) + flatmapValueFromHCL2Value(m, key, av) + i++ + } + m[prefix+"#"] = strconv.Itoa(i) +} + +// HCL2ValueFromFlatmap converts a map compatible with what would be produced +// by the "flatmap" package to a HCL2 (really, the cty dynamic types library +// that HCL2 uses) object type. +// +// The intended result type must be provided in order to guide how the +// map contents are decoded. This must be an object type or this function +// will panic. +// +// Flatmap values can only represent maps when they are of primitive types, +// so the given type must not have any maps of complex types or the result +// is undefined. +// +// The result may contain null values if the given map does not contain keys +// for all of the different key paths implied by the given type. +func HCL2ValueFromFlatmap(m map[string]string, ty cty.Type) (cty.Value, error) { + if m == nil { + return cty.NullVal(ty), nil + } + if !ty.IsObjectType() { + panic(fmt.Sprintf("HCL2ValueFromFlatmap called on %#v", ty)) + } + + return hcl2ValueFromFlatmapObject(m, "", ty.AttributeTypes()) +} + +func hcl2ValueFromFlatmapValue(m map[string]string, key string, ty cty.Type) (cty.Value, error) { + var val cty.Value + var err error + switch { + case ty.IsPrimitiveType(): + val, err = hcl2ValueFromFlatmapPrimitive(m, key, ty) + case ty.IsObjectType(): + val, err = hcl2ValueFromFlatmapObject(m, key+".", ty.AttributeTypes()) + case ty.IsTupleType(): + val, err = hcl2ValueFromFlatmapTuple(m, key+".", ty.TupleElementTypes()) + case ty.IsMapType(): + val, err = hcl2ValueFromFlatmapMap(m, key+".", ty) + case ty.IsListType(): + val, err = hcl2ValueFromFlatmapList(m, key+".", ty) + case ty.IsSetType(): + val, err = hcl2ValueFromFlatmapSet(m, key+".", ty) + default: + err = fmt.Errorf("cannot decode %s from flatmap", ty.FriendlyName()) + } + + if err != nil { + return cty.DynamicVal, err + } + return val, nil +} + +func hcl2ValueFromFlatmapPrimitive(m map[string]string, key string, ty cty.Type) (cty.Value, error) { + rawVal, exists := m[key] + if !exists { + return cty.NullVal(ty), nil + } + if rawVal == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + var err error + val := cty.StringVal(rawVal) + val, err = convert.Convert(val, ty) + if err != nil { + // This should never happen for _valid_ input, but flatmap data might + // be tampered with by the user and become invalid. + return cty.DynamicVal, fmt.Errorf("invalid value for %q in state: %w", key, err) + } + + return val, nil +} + +func hcl2ValueFromFlatmapObject(m map[string]string, prefix string, atys map[string]cty.Type) (cty.Value, error) { + vals := make(map[string]cty.Value) + for name, aty := range atys { + val, err := hcl2ValueFromFlatmapValue(m, prefix+name, aty) + if err != nil { + return cty.DynamicVal, err + } + vals[name] = val + } + return cty.ObjectVal(vals), nil +} + +func hcl2ValueFromFlatmapTuple(m map[string]string, prefix string, etys []cty.Type) (cty.Value, error) { + var vals []cty.Value + + // if the container is unknown, there is no count string + listName := strings.TrimRight(prefix, ".") + if m[listName] == UnknownVariableValue { + return cty.UnknownVal(cty.Tuple(etys)), nil + } + + countStr, exists := m[prefix+"#"] + if !exists { + return cty.NullVal(cty.Tuple(etys)), nil + } + if countStr == UnknownVariableValue { + return cty.UnknownVal(cty.Tuple(etys)), nil + } + + count, err := strconv.Atoi(countStr) + if err != nil { + return cty.DynamicVal, fmt.Errorf("invalid count value for %q in state: %w", prefix, err) + } + if count != len(etys) { + return cty.DynamicVal, fmt.Errorf("wrong number of values for %q in state: got %d, but need %d", prefix, count, len(etys)) + } + + vals = make([]cty.Value, len(etys)) + for i, ety := range etys { + key := prefix + strconv.Itoa(i) + val, err := hcl2ValueFromFlatmapValue(m, key, ety) + if err != nil { + return cty.DynamicVal, err + } + vals[i] = val + } + return cty.TupleVal(vals), nil +} + +func hcl2ValueFromFlatmapMap(m map[string]string, prefix string, ty cty.Type) (cty.Value, error) { + vals := make(map[string]cty.Value) + ety := ty.ElementType() + + // if the container is unknown, there is no count string + listName := strings.TrimRight(prefix, ".") + if m[listName] == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + // We actually don't really care about the "count" of a map for our + // purposes here, but we do need to check if it _exists_ in order to + // recognize the difference between null (not set at all) and empty. + if strCount, exists := m[prefix+"%"]; !exists { + return cty.NullVal(ty), nil + } else if strCount == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + for fullKey := range m { + if !strings.HasPrefix(fullKey, prefix) { + continue + } + + // The flatmap format doesn't allow us to distinguish between keys + // that contain periods and nested objects, so by convention a + // map is only ever of primitive type in flatmap, and we just assume + // that the remainder of the raw key (dots and all) is the key we + // want in the result value. + key := fullKey[len(prefix):] + if key == "%" { + // Ignore the "count" key + continue + } + + val, err := hcl2ValueFromFlatmapValue(m, fullKey, ety) + if err != nil { + return cty.DynamicVal, err + } + vals[key] = val + } + + if len(vals) == 0 { + return cty.MapValEmpty(ety), nil + } + return cty.MapVal(vals), nil +} + +func hcl2ValueFromFlatmapList(m map[string]string, prefix string, ty cty.Type) (cty.Value, error) { + var vals []cty.Value + + // if the container is unknown, there is no count string + listName := strings.TrimRight(prefix, ".") + if m[listName] == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + countStr, exists := m[prefix+"#"] + if !exists { + return cty.NullVal(ty), nil + } + if countStr == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + count, err := strconv.Atoi(countStr) + if err != nil { + return cty.DynamicVal, fmt.Errorf("invalid count value for %q in state: %w", prefix, err) + } + + ety := ty.ElementType() + if count == 0 { + return cty.ListValEmpty(ety), nil + } + + vals = make([]cty.Value, count) + for i := 0; i < count; i++ { + key := prefix + strconv.Itoa(i) + val, err := hcl2ValueFromFlatmapValue(m, key, ety) + if err != nil { + return cty.DynamicVal, err + } + vals[i] = val + } + + return cty.ListVal(vals), nil +} + +func hcl2ValueFromFlatmapSet(m map[string]string, prefix string, ty cty.Type) (cty.Value, error) { + var vals []cty.Value + ety := ty.ElementType() + + // if the container is unknown, there is no count string + listName := strings.TrimRight(prefix, ".") + if m[listName] == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + strCount, exists := m[prefix+"#"] + if !exists { + return cty.NullVal(ty), nil + } else if strCount == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + // Keep track of keys we've seen, se we don't add the same set value + // multiple times. The cty.Set will normally de-duplicate values, but we may + // have unknown values that would not show as equivalent. + seen := map[string]bool{} + + for fullKey := range m { + if !strings.HasPrefix(fullKey, prefix) { + continue + } + subKey := fullKey[len(prefix):] + if subKey == "#" { + // Ignore the "count" key + continue + } + key := fullKey + if dot := strings.IndexByte(subKey, '.'); dot != -1 { + key = fullKey[:dot+len(prefix)] + } + + if seen[key] { + continue + } + + seen[key] = true + + // The flatmap format doesn't allow us to distinguish between keys + // that contain periods and nested objects, so by convention a + // map is only ever of primitive type in flatmap, and we just assume + // that the remainder of the raw key (dots and all) is the key we + // want in the result value. + + val, err := hcl2ValueFromFlatmapValue(m, key, ety) + if err != nil { + return cty.DynamicVal, err + } + vals = append(vals, val) + } + + if len(vals) == 0 && strCount == "1" { + // An empty set wouldn't be represented in the flatmap, so this must be + // a single empty object since the count is actually 1. + // Add an appropriately typed null value to the set. + var val cty.Value + switch { + case ety.IsMapType(): + val = cty.MapValEmpty(ety) + case ety.IsListType(): + val = cty.ListValEmpty(ety) + case ety.IsSetType(): + val = cty.SetValEmpty(ety) + case ety.IsObjectType(): + // TODO: cty.ObjectValEmpty + objectMap := map[string]cty.Value{} + for attr, ty := range ety.AttributeTypes() { + objectMap[attr] = cty.NullVal(ty) + } + val = cty.ObjectVal(objectMap) + default: + val = cty.NullVal(ety) + } + vals = append(vals, val) + + } else if len(vals) == 0 { + return cty.SetValEmpty(ety), nil + } + + return cty.SetVal(vals), nil +} diff --git a/pkg/configs/hcl2shim/flatmap_test.go b/pkg/configs/hcl2shim/flatmap_test.go new file mode 100644 index 00000000000..25b6409ec74 --- /dev/null +++ b/pkg/configs/hcl2shim/flatmap_test.go @@ -0,0 +1,757 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package hcl2shim + +import ( + "fmt" + "testing" + + "github.com/go-test/deep" + + "github.com/zclconf/go-cty/cty" +) + +func TestFlatmapValueFromHCL2(t *testing.T) { + tests := []struct { + Value cty.Value + Want map[string]string + }{ + { + cty.EmptyObjectVal, + map[string]string{}, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("hello"), + }), + map[string]string{ + "foo": "hello", + }, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.UnknownVal(cty.Bool), + }), + map[string]string{ + "foo": UnknownVariableValue, + }, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.NumberIntVal(12), + }), + map[string]string{ + "foo": "12", + }, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.True, + "bar": cty.False, + }), + map[string]string{ + "foo": "true", + "bar": "false", + }, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("hello"), + "bar": cty.StringVal("world"), + "baz": cty.StringVal("whelp"), + }), + map[string]string{ + "foo": "hello", + "bar": "world", + "baz": "whelp", + }, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListValEmpty(cty.String), + }), + map[string]string{ + "foo.#": "0", + }, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.UnknownVal(cty.List(cty.String)), + }), + map[string]string{ + "foo.#": UnknownVariableValue, + }, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.StringVal("hello"), + }), + }), + map[string]string{ + "foo.#": "1", + "foo.0": "hello", + }, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.StringVal("hello"), + cty.StringVal("world"), + }), + }), + map[string]string{ + "foo.#": "2", + "foo.0": "hello", + "foo.1": "world", + }, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.MapVal(map[string]cty.Value{ + "hello": cty.NumberIntVal(12), + "hello.world": cty.NumberIntVal(10), + }), + }), + map[string]string{ + "foo.%": "2", + "foo.hello": "12", + "foo.hello.world": "10", + }, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.UnknownVal(cty.Map(cty.String)), + }), + map[string]string{ + "foo.%": UnknownVariableValue, + }, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.MapVal(map[string]cty.Value{ + "hello": cty.NumberIntVal(12), + "hello.world": cty.NumberIntVal(10), + }), + }), + map[string]string{ + "foo.%": "2", + "foo.hello": "12", + "foo.hello.world": "10", + }, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.SetVal([]cty.Value{ + cty.StringVal("hello"), + cty.StringVal("world"), + }), + }), + map[string]string{ + "foo.#": "2", + "foo.0": "hello", + "foo.1": "world", + }, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.UnknownVal(cty.Set(cty.Number)), + }), + map[string]string{ + "foo.#": UnknownVariableValue, + }, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("hello"), + "baz": cty.StringVal("world"), + }), + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("bloo"), + "baz": cty.StringVal("blaa"), + }), + }), + }), + map[string]string{ + "foo.#": "2", + "foo.0.bar": "hello", + "foo.0.baz": "world", + "foo.1.bar": "bloo", + "foo.1.baz": "blaa", + }, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("hello"), + "baz": cty.ListVal([]cty.Value{ + cty.True, + cty.True, + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("bloo"), + "baz": cty.ListVal([]cty.Value{ + cty.False, + cty.True, + }), + }), + }), + }), + map[string]string{ + "foo.#": "2", + "foo.0.bar": "hello", + "foo.0.baz.#": "2", + "foo.0.baz.0": "true", + "foo.0.baz.1": "true", + "foo.1.bar": "bloo", + "foo.1.baz.#": "2", + "foo.1.baz.0": "false", + "foo.1.baz.1": "true", + }, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.UnknownVal(cty.Object(map[string]cty.Type{ + "bar": cty.String, + "baz": cty.List(cty.Bool), + "bap": cty.Map(cty.Number), + })), + }), + }), + map[string]string{ + "foo.#": "1", + "foo.0.bar": UnknownVariableValue, + "foo.0.baz.#": UnknownVariableValue, + "foo.0.bap.%": UnknownVariableValue, + }, + }, + { + cty.NullVal(cty.Object(map[string]cty.Type{ + "foo": cty.Set(cty.Object(map[string]cty.Type{ + "bar": cty.String, + })), + })), + nil, + }, + } + + for _, test := range tests { + t.Run(test.Value.GoString(), func(t *testing.T) { + got := FlatmapValueFromHCL2(test.Value) + + for _, problem := range deep.Equal(got, test.Want) { + t.Error(problem) + } + }) + } +} + +func TestFlatmapValueFromHCL2FromFlatmap(t *testing.T) { + tests := []struct { + Name string + Map map[string]string + Type cty.Type + }{ + { + "empty flatmap with collections", + map[string]string{}, + cty.Object(map[string]cty.Type{ + "foo": cty.Map(cty.String), + "bar": cty.Set(cty.String), + }), + }, + { + "nil flatmap with collections", + nil, + cty.Object(map[string]cty.Type{ + "foo": cty.Map(cty.String), + "bar": cty.Set(cty.String), + }), + }, + { + "empty flatmap with nested collections", + map[string]string{}, + cty.Object(map[string]cty.Type{ + "foo": cty.Object( + map[string]cty.Type{ + "baz": cty.Map(cty.String), + }, + ), + "bar": cty.Set(cty.String), + }), + }, + { + "partial flatmap with nested collections", + map[string]string{ + "foo.baz.%": "1", + "foo.baz.key": "val", + }, + cty.Object(map[string]cty.Type{ + "foo": cty.Object( + map[string]cty.Type{ + "baz": cty.Map(cty.String), + "biz": cty.Map(cty.String), + }, + ), + "bar": cty.Set(cty.String), + }), + }, + } + + for _, test := range tests { + t.Run(test.Name, func(t *testing.T) { + val, err := HCL2ValueFromFlatmap(test.Map, test.Type) + if err != nil { + t.Fatal(err) + } + + got := FlatmapValueFromHCL2(val) + + for _, problem := range deep.Equal(got, test.Map) { + t.Error(problem) + } + }) + } +} +func TestHCL2ValueFromFlatmap(t *testing.T) { + tests := []struct { + Flatmap map[string]string + Type cty.Type + Want cty.Value + WantErr string + }{ + { + Flatmap: map[string]string{}, + Type: cty.EmptyObject, + Want: cty.EmptyObjectVal, + }, + { + Flatmap: map[string]string{ + "ignored": "foo", + }, + Type: cty.EmptyObject, + Want: cty.EmptyObjectVal, + }, + { + Flatmap: map[string]string{ + "foo": "blah", + "bar": "true", + "baz": "12.5", + "unk": UnknownVariableValue, + }, + Type: cty.Object(map[string]cty.Type{ + "foo": cty.String, + "bar": cty.Bool, + "baz": cty.Number, + "unk": cty.Bool, + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("blah"), + "bar": cty.True, + "baz": cty.NumberFloatVal(12.5), + "unk": cty.UnknownVal(cty.Bool), + }), + }, + { + Flatmap: map[string]string{ + "foo.#": "0", + }, + Type: cty.Object(map[string]cty.Type{ + "foo": cty.List(cty.String), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListValEmpty(cty.String), + }), + }, + { + Flatmap: map[string]string{ + "foo.#": UnknownVariableValue, + }, + Type: cty.Object(map[string]cty.Type{ + "foo": cty.List(cty.String), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.UnknownVal(cty.List(cty.String)), + }), + }, + { + Flatmap: map[string]string{ + "foo.#": "1", + "foo.0": "hello", + }, + Type: cty.Object(map[string]cty.Type{ + "foo": cty.List(cty.String), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.StringVal("hello"), + }), + }), + }, + { + Flatmap: map[string]string{ + "foo.#": "2", + "foo.0": "true", + "foo.1": "false", + "foo.2": "ignored", // (because the count is 2, so this is out of range) + }, + Type: cty.Object(map[string]cty.Type{ + "foo": cty.List(cty.Bool), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.True, + cty.False, + }), + }), + }, + { + Flatmap: map[string]string{ + "foo.#": "2", + "foo.0": "hello", + }, + Type: cty.Object(map[string]cty.Type{ + "foo": cty.Tuple([]cty.Type{ + cty.String, + cty.Bool, + }), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.TupleVal([]cty.Value{ + cty.StringVal("hello"), + cty.NullVal(cty.Bool), + }), + }), + }, + { + Flatmap: map[string]string{ + "foo.#": UnknownVariableValue, + }, + Type: cty.Object(map[string]cty.Type{ + "foo": cty.Tuple([]cty.Type{ + cty.String, + cty.Bool, + }), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.UnknownVal(cty.Tuple([]cty.Type{ + cty.String, + cty.Bool, + })), + }), + }, + { + Flatmap: map[string]string{ + "foo.#": "0", + }, + Type: cty.Object(map[string]cty.Type{ + "foo": cty.Set(cty.String), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.SetValEmpty(cty.String), + }), + }, + { + Flatmap: map[string]string{ + "foo.#": UnknownVariableValue, + }, + Type: cty.Object(map[string]cty.Type{ + "foo": cty.Set(cty.String), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.UnknownVal(cty.Set(cty.String)), + }), + }, + { + Flatmap: map[string]string{ + "foo.#": "1", + "foo.24534534": "hello", + }, + Type: cty.Object(map[string]cty.Type{ + "foo": cty.Set(cty.String), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.SetVal([]cty.Value{ + cty.StringVal("hello"), + }), + }), + }, + { + Flatmap: map[string]string{ + "foo.#": "1", + "foo.24534534": "true", + "foo.95645644": "true", + "foo.34533452": "false", + }, + Type: cty.Object(map[string]cty.Type{ + "foo": cty.Set(cty.Bool), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.SetVal([]cty.Value{ + cty.True, + cty.False, + }), + }), + }, + { + Flatmap: map[string]string{ + "foo.%": "0", + }, + Type: cty.Object(map[string]cty.Type{ + "foo": cty.Map(cty.String), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.MapValEmpty(cty.String), + }), + }, + { + Flatmap: map[string]string{ + "foo.%": "2", + "foo.baz": "true", + "foo.bar.baz": "false", + }, + Type: cty.Object(map[string]cty.Type{ + "foo": cty.Map(cty.Bool), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.MapVal(map[string]cty.Value{ + "baz": cty.True, + "bar.baz": cty.False, + }), + }), + }, + { + Flatmap: map[string]string{ + "foo.%": UnknownVariableValue, + }, + Type: cty.Object(map[string]cty.Type{ + "foo": cty.Map(cty.Bool), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.UnknownVal(cty.Map(cty.Bool)), + }), + }, + { + Flatmap: map[string]string{ + "foo.#": "2", + "foo.0.bar": "hello", + "foo.0.baz": "1", + "foo.1.bar": "world", + "foo.1.baz": "false", + }, + Type: cty.Object(map[string]cty.Type{ + "foo": cty.List(cty.Object(map[string]cty.Type{ + "bar": cty.String, + "baz": cty.Bool, + })), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("hello"), + "baz": cty.True, + }), + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("world"), + "baz": cty.False, + }), + }), + }), + }, + { + Flatmap: map[string]string{ + "foo.#": "2", + "foo.34534534.bar": "hello", + "foo.34534534.baz": "1", + "foo.93453345.bar": "world", + "foo.93453345.baz": "false", + }, + Type: cty.Object(map[string]cty.Type{ + "foo": cty.Set(cty.Object(map[string]cty.Type{ + "bar": cty.String, + "baz": cty.Bool, + })), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("hello"), + "baz": cty.True, + }), + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("world"), + "baz": cty.False, + }), + }), + }), + }, + { + Flatmap: map[string]string{ + "foo.#": "not-valid", + }, + Type: cty.Object(map[string]cty.Type{ + "foo": cty.List(cty.String), + }), + WantErr: `invalid count value for "foo." in state: strconv.Atoi: parsing "not-valid": invalid syntax`, + }, + { + Flatmap: nil, + Type: cty.Object(map[string]cty.Type{ + "foo": cty.Set(cty.Object(map[string]cty.Type{ + "bar": cty.String, + })), + }), + Want: cty.NullVal(cty.Object(map[string]cty.Type{ + "foo": cty.Set(cty.Object(map[string]cty.Type{ + "bar": cty.String, + })), + })), + }, + { + Flatmap: map[string]string{ + "foo.#": "2", + "foo.0.%": "2", + "foo.0.a": "a", + "foo.0.b": "b", + "foo.1.%": "1", + "foo.1.a": "a", + }, + Type: cty.Object(map[string]cty.Type{ + "foo": cty.List(cty.Map(cty.String)), + }), + + Want: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.MapVal(map[string]cty.Value{ + "a": cty.StringVal("a"), + "b": cty.StringVal("b"), + }), + cty.MapVal(map[string]cty.Value{ + "a": cty.StringVal("a"), + }), + }), + }), + }, + { + Flatmap: map[string]string{ + "single.#": "1", + "single.~1.value": "a", + "single.~1.optional": UnknownVariableValue, + "two.#": "2", + "two.~2381914684.value": "a", + "two.~2381914684.optional": UnknownVariableValue, + "two.~2798940671.value": "b", + "two.~2798940671.optional": UnknownVariableValue, + }, + Type: cty.Object(map[string]cty.Type{ + "single": cty.Set( + cty.Object(map[string]cty.Type{ + "value": cty.String, + "optional": cty.String, + }), + ), + "two": cty.Set( + cty.Object(map[string]cty.Type{ + "optional": cty.String, + "value": cty.String, + }), + ), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "single": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "value": cty.StringVal("a"), + "optional": cty.UnknownVal(cty.String), + }), + }), + "two": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "value": cty.StringVal("a"), + "optional": cty.UnknownVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "value": cty.StringVal("b"), + "optional": cty.UnknownVal(cty.String), + }), + }), + }), + }, + { + Flatmap: map[string]string{ + "foo.#": "1", + }, + Type: cty.Object(map[string]cty.Type{ + "foo": cty.Set(cty.Object(map[string]cty.Type{ + "bar": cty.String, + })), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.NullVal(cty.String), + }), + }), + }), + }, + { + Flatmap: map[string]string{ + "multi.#": "1", + "multi.2.set.#": "1", + "multi.2.set.3.required": "val", + }, + Type: cty.Object(map[string]cty.Type{ + "multi": cty.Set(cty.Object(map[string]cty.Type{ + "set": cty.Set(cty.Object(map[string]cty.Type{ + "required": cty.String, + })), + })), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "multi": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "set": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "required": cty.StringVal("val"), + }), + }), + }), + }), + }), + }, + } + + for i, test := range tests { + t.Run(fmt.Sprintf("%d %#v as %#v", i, test.Flatmap, test.Type), func(t *testing.T) { + got, err := HCL2ValueFromFlatmap(test.Flatmap, test.Type) + + if test.WantErr != "" { + if err == nil { + t.Fatalf("succeeded; want error: %s", test.WantErr) + } + if got, want := err.Error(), test.WantErr; got != want { + t.Fatalf("wrong error\ngot: %s\nwant: %s", got, want) + } + if got == cty.NilVal { + t.Fatalf("result is cty.NilVal; want valid placeholder value") + } + return + } else { + if err != nil { + t.Fatalf("unexpected error: %s", err.Error()) + } + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} diff --git a/pkg/configs/hcl2shim/merge_body.go b/pkg/configs/hcl2shim/merge_body.go new file mode 100644 index 00000000000..bda8f5b538c --- /dev/null +++ b/pkg/configs/hcl2shim/merge_body.go @@ -0,0 +1,146 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package hcl2shim + +import ( + "github.com/hashicorp/hcl/v2" +) + +// MergeBodies creates a new HCL body that contains a combination of the +// given base and override bodies. Attributes and blocks defined in the +// override body take precedence over those of the same name defined in +// the base body. +// +// If any block of a particular type appears in "override" then it will +// replace _all_ of the blocks of the same type in "base" in the new +// body. +func MergeBodies(base, override hcl.Body) hcl.Body { + return mergeBody{ + Base: base, + Override: override, + } +} + +// mergeBody is a hcl.Body implementation that wraps a pair of other bodies +// and allows attributes and blocks within the override to take precedence +// over those defined in the base body. +// +// This is used to deal with dynamically-processed bodies in Module.mergeFile. +// It uses a shallow-only merging strategy where direct attributes defined +// in Override will override attributes of the same name in Base, while any +// blocks defined in Override will hide all blocks of the same type in Base. +// +// This cannot possibly "do the right thing" in all cases, because we don't +// have enough information about user intent. However, this behavior is intended +// to be reasonable for simple overriding use-cases. +type mergeBody struct { + Base hcl.Body + Override hcl.Body +} + +var _ hcl.Body = mergeBody{} + +func (b mergeBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { + var diags hcl.Diagnostics + baseSchema := schemaWithDynamic(schema) + overrideSchema := schemaWithDynamic(SchemaForOverrides(schema)) + + baseContent, _, cDiags := b.Base.PartialContent(baseSchema) + diags = append(diags, cDiags...) + overrideContent, _, cDiags := b.Override.PartialContent(overrideSchema) + diags = append(diags, cDiags...) + + content := b.prepareContent(baseContent, overrideContent) + + return content, diags +} + +func (b mergeBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { + var diags hcl.Diagnostics + baseSchema := schemaWithDynamic(schema) + overrideSchema := schemaWithDynamic(SchemaForOverrides(schema)) + + baseContent, baseRemain, cDiags := b.Base.PartialContent(baseSchema) + diags = append(diags, cDiags...) + overrideContent, overrideRemain, cDiags := b.Override.PartialContent(overrideSchema) + diags = append(diags, cDiags...) + + content := b.prepareContent(baseContent, overrideContent) + + remain := MergeBodies(baseRemain, overrideRemain) + + return content, remain, diags +} + +func (b mergeBody) prepareContent(base *hcl.BodyContent, override *hcl.BodyContent) *hcl.BodyContent { + content := &hcl.BodyContent{ + Attributes: make(hcl.Attributes), + } + + // For attributes we just assign from each map in turn and let the override + // map clobber any matching entries from base. + for k, a := range base.Attributes { + content.Attributes[k] = a + } + for k, a := range override.Attributes { + content.Attributes[k] = a + } + + // Things are a little more interesting for blocks because they arrive + // as a flat list. Our merging semantics call for us to suppress blocks + // from base if at least one block of the same type appears in override. + // We explicitly do not try to correlate and deeply merge nested blocks, + // since we don't have enough context here to infer user intent. + + overriddenBlockTypes := make(map[string]bool) + for _, block := range override.Blocks { + if block.Type == "dynamic" { + overriddenBlockTypes[block.Labels[0]] = true + continue + } + overriddenBlockTypes[block.Type] = true + } + for _, block := range base.Blocks { + // We skip over dynamic blocks whose type label is an overridden type + // but note that below we do still leave them as dynamic blocks in + // the result because expanding the dynamic blocks that are left is + // done much later during the core graph walks, where we can safely + // evaluate the expressions. + if block.Type == "dynamic" && overriddenBlockTypes[block.Labels[0]] { + continue + } + if overriddenBlockTypes[block.Type] { + continue + } + content.Blocks = append(content.Blocks, block) + } + content.Blocks = append(content.Blocks, override.Blocks...) + + return content +} + +func (b mergeBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { + var diags hcl.Diagnostics + ret := make(hcl.Attributes) + + baseAttrs, aDiags := b.Base.JustAttributes() + diags = append(diags, aDiags...) + overrideAttrs, aDiags := b.Override.JustAttributes() + diags = append(diags, aDiags...) + + for k, a := range baseAttrs { + ret[k] = a + } + for k, a := range overrideAttrs { + ret[k] = a + } + + return ret, diags +} + +func (b mergeBody) MissingItemRange() hcl.Range { + return b.Base.MissingItemRange() +} diff --git a/pkg/configs/hcl2shim/mock_value_composer.go b/pkg/configs/hcl2shim/mock_value_composer.go new file mode 100644 index 00000000000..00a6a0e0e43 --- /dev/null +++ b/pkg/configs/hcl2shim/mock_value_composer.go @@ -0,0 +1,385 @@ +package hcl2shim + +import ( + "cmp" + "fmt" + "math/rand" + "slices" + "strings" + + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +// MockValueComposer provides different ways to generate mock values based on +// config schema, attributes, blocks and cty types in general. +type MockValueComposer struct { + rand *rand.Rand +} + +func NewMockValueComposer(seed int64) MockValueComposer { + return MockValueComposer{ + rand: rand.New(rand.NewSource(seed)), //nolint:gosec // It doesn't need to be secure. + } +} + +// ComposeBySchema composes mock value based on schema configuration. It uses +// configuration value as a baseline and populates null values with provided defaults. +// If the provided defaults doesn't contain needed fields, ComposeBySchema uses +// its own defaults. ComposeBySchema fails if schema contains dynamic types. +// ComposeBySchema produces the same result with the given input values (seed and func arguments). +// It does so by traversing schema attributes, blocks and data structure elements / fields +// in a stable way by sorting keys or elements beforehand. Then, randomized values match +// between multiple ComposeBySchema calls, because seed and random sequences are the same. +func (mvc MockValueComposer) ComposeBySchema(schema *configschema.Block, config cty.Value, defaults map[string]cty.Value) (cty.Value, tfdiags.Diagnostics) { + var configMap map[string]cty.Value + var diags tfdiags.Diagnostics + + if !config.IsNull() { + configMap = config.AsValueMap() + } + + impliedTypes := schema.ImpliedType().AttributeTypes() + + mockAttrs, moreDiags := mvc.composeMockValueForAttributes(schema, configMap, defaults) + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + return cty.NilVal, diags + } + + mockBlocks, moreDiags := mvc.composeMockValueForBlocks(schema, configMap, defaults) + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + return cty.NilVal, diags + } + + mockValues := mockAttrs + for k, v := range mockBlocks { + mockValues[k] = v + } + + for k := range defaults { + if _, ok := impliedTypes[k]; !ok { + diags = diags.Append(tfdiags.WholeContainingBody( + tfdiags.Warning, + fmt.Sprintf("Ignored mock/override field `%v`", k), + "The field is unknown. Please, ensure it is a part of resource definition.", + )) + } + } + + return cty.ObjectVal(mockValues), diags +} + +func (mvc MockValueComposer) composeMockValueForAttributes(schema *configschema.Block, configMap map[string]cty.Value, defaults map[string]cty.Value) (map[string]cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + addPotentialDefaultsWarning := func(key, description string) { + if _, ok := defaults[key]; ok { + diags = diags.Append(tfdiags.WholeContainingBody( + tfdiags.Warning, + fmt.Sprintf("Ignored mock/override field `%v`", key), + description, + )) + } + } + + mockAttrs := make(map[string]cty.Value) + + impliedTypes := schema.ImpliedType().AttributeTypes() + + // Stable order is important here so random values match its fields between function calls. + for _, kv := range mapToSortedSlice(schema.Attributes) { + k, attr := kv.k, kv.v + + // If the value present in configuration - just use it. + if cv, ok := configMap[k]; ok && !cv.IsNull() { + mockAttrs[k] = cv + addPotentialDefaultsWarning(k, "The field is ignored since overriding configuration values is not allowed.") + continue + } + + // Non-computed attributes can't be generated + // so we set them from configuration only. + if !attr.Computed { + mockAttrs[k] = cty.NullVal(attr.Type) + addPotentialDefaultsWarning(k, "The field is ignored since overriding non-computed fields is not allowed.") + continue + } + + // If the attribute is computed and not configured, + // we use provided value from defaults. + if ov, ok := defaults[k]; ok { + typeConformanceErrs := ov.Type().TestConformance(attr.Type) + if len(typeConformanceErrs) == 0 { + mockAttrs[k] = ov + continue + } + + for _, err := range typeConformanceErrs { + diags = diags.Append(tfdiags.WholeContainingBody( + tfdiags.Warning, + fmt.Sprintf("Ignored mock/override field `%v`", k), + fmt.Sprintf("Values provided for override / mock must match resource fields types: %v.", err), + )) + } + } + + // If there's no value in defaults, we generate our own. + v, ok := mvc.getMockValueByType(impliedTypes[k]) + if !ok { + diags = diags.Append(tfdiags.WholeContainingBody( + tfdiags.Error, + "Failed to generate mock value", + fmt.Sprintf("Mock value cannot be generated for dynamic type. Please specify the `%v` field explicitly in the configuration.", k), + )) + continue + } + + mockAttrs[k] = v + } + + return mockAttrs, diags +} + +func (mvc MockValueComposer) composeMockValueForBlocks(schema *configschema.Block, configMap map[string]cty.Value, defaults map[string]cty.Value) (map[string]cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + mockBlocks := make(map[string]cty.Value) + + impliedTypes := schema.ImpliedType().AttributeTypes() + + // Stable order is important here so random values match its fields between function calls. + for _, kv := range mapToSortedSlice(schema.BlockTypes) { + k, block := kv.k, kv.v + + // Checking if the config value really present for the block. + // It should be non-null and non-empty collection. + + configVal, hasConfigVal := configMap[k] + if hasConfigVal && configVal.IsNull() { + hasConfigVal = false + } + + if hasConfigVal && !configVal.IsKnown() { + hasConfigVal = false + } + + if hasConfigVal && configVal.Type().IsCollectionType() && configVal.LengthInt() == 0 { + hasConfigVal = false + } + + defaultVal, hasDefaultVal := defaults[k] + if hasDefaultVal && !defaultVal.Type().IsObjectType() { + hasDefaultVal = false + diags = diags.Append(tfdiags.WholeContainingBody( + tfdiags.Warning, + fmt.Sprintf("Ignored mock/override field `%v`", k), + fmt.Sprintf("Blocks can be overridden only by objects, got `%s`", defaultVal.Type().FriendlyName()), + )) + } + + // We must keep blocks the same as it defined in configuration, + // so provider response validation succeeds later. + if !hasConfigVal { + mockBlocks[k] = block.EmptyValue() + + if hasDefaultVal { + diags = diags.Append(tfdiags.WholeContainingBody( + tfdiags.Warning, + fmt.Sprintf("Ignored mock/override field `%v`", k), + "Cannot overridde block value, because it's not present in configuration.", + )) + } + + continue + } + + var blockDefaults map[string]cty.Value + + if hasDefaultVal { + blockDefaults = defaultVal.AsValueMap() + } + + v, moreDiags := mvc.getMockValueForBlock(impliedTypes[k], configVal, &block.Block, blockDefaults) + diags = append(diags, moreDiags...) + if moreDiags.HasErrors() { + return nil, diags + } + + mockBlocks[k] = v + } + + return mockBlocks, diags +} + +// getMockValueForBlock uses an object from the defaults (overrides) +// to compose each value from the block's inner collection. It recursevily calls +// composeMockValueBySchema to proceed with all the inner attributes and blocks +// the same way so all the nested blocks follow the same logic. +func (mvc MockValueComposer) getMockValueForBlock(targetType cty.Type, configVal cty.Value, block *configschema.Block, defaults map[string]cty.Value) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + switch { + case targetType.IsObjectType(): + mockBlockVal, moreDiags := mvc.ComposeBySchema(block, configVal, defaults) + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + return cty.NilVal, diags + } + + return mockBlockVal, diags + + case targetType.ListElementType() != nil || targetType.SetElementType() != nil: + var mockBlockVals []cty.Value + + var iterator = configVal.ElementIterator() + + // Stable order is important here so random values match its fields between function calls. + for iterator.Next() { + _, blockConfigV := iterator.Element() + + mockBlockVal, moreDiags := mvc.ComposeBySchema(block, blockConfigV, defaults) + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + return cty.NilVal, diags + } + + mockBlockVals = append(mockBlockVals, mockBlockVal) + } + + if targetType.ListElementType() != nil { + return cty.ListVal(mockBlockVals), diags + } else { + return cty.SetVal(mockBlockVals), diags + } + + case targetType.MapElementType() != nil: + var mockBlockVals = make(map[string]cty.Value) + + var iterator = configVal.ElementIterator() + + // Stable order is important here so random values match its fields between function calls. + for iterator.Next() { + blockConfigK, blockConfigV := iterator.Element() + + mockBlockVal, moreDiags := mvc.ComposeBySchema(block, blockConfigV, defaults) + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + return cty.NilVal, diags + } + + mockBlockVals[blockConfigK.AsString()] = mockBlockVal + } + + return cty.MapVal(mockBlockVals), diags + + default: + // Shouldn't happen as long as blocks are represented by lists / maps / sets / objs. + return cty.NilVal, diags.Append(tfdiags.WholeContainingBody( + tfdiags.Error, + fmt.Sprintf("Unexpected block type: %v", targetType.FriendlyName()), + "Failed to generate mock value for this block type. Please, report it as an issue at OpenTofu repository, since it's not expected.", + )) + } +} + +// getMockValueByType tries to generate mock cty.Value based on provided cty.Type. +// It will return non-ok response if it encounters dynamic type. +func (mvc MockValueComposer) getMockValueByType(t cty.Type) (cty.Value, bool) { + var v cty.Value + + // just to be sure for cases when the logic below misses something + if t.HasDynamicTypes() { + return cty.Value{}, false + } + + switch { + // primitives + case t.Equals(cty.Number): + v = cty.Zero + case t.Equals(cty.Bool): + v = cty.False + case t.Equals(cty.String): + v = cty.StringVal(mvc.getMockString()) + + // collections + case t.ListElementType() != nil: + v = cty.ListValEmpty(*t.ListElementType()) + case t.MapElementType() != nil: + v = cty.MapValEmpty(*t.MapElementType()) + case t.SetElementType() != nil: + v = cty.SetValEmpty(*t.SetElementType()) + + // structural + case t.IsObjectType(): + objVals := make(map[string]cty.Value) + + // Populate the object with mock values. Stable order is important here + // so random values match its fields between function calls. + for _, kv := range mapToSortedSlice(t.AttributeTypes()) { + k, at := kv.k, kv.v + + if t.AttributeOptional(k) { + continue + } + + objV, ok := mvc.getMockValueByType(at) + if !ok { + return cty.Value{}, false + } + + objVals[k] = objV + } + + v = cty.ObjectVal(objVals) + case t.IsTupleType(): + v = cty.EmptyTupleVal + + // dynamically typed values are not supported + default: + return cty.Value{}, false + } + + return v, true +} + +func (mvc MockValueComposer) getMockString() string { + const chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890" + + const minLength, maxLength = 4, 16 + + length := mvc.rand.Intn(maxLength-minLength) + minLength + + b := strings.Builder{} + b.Grow(length) + + for i := 0; i < length; i++ { + b.WriteByte(chars[mvc.rand.Intn(len(chars))]) + } + + return b.String() +} + +type keyValue[K cmp.Ordered, V any] struct { + k K + v V +} + +// mapToSortedSlice makes it possible to iterate over map in a stable manner. +func mapToSortedSlice[K cmp.Ordered, V any](m map[K]V) []keyValue[K, V] { + keys := make([]K, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + + slices.Sort(keys) + + s := make([]keyValue[K, V], 0, len(m)) + for _, k := range keys { + s = append(s, keyValue[K, V]{k, m[k]}) + } + + return s +} diff --git a/pkg/configs/hcl2shim/mock_value_composer_test.go b/pkg/configs/hcl2shim/mock_value_composer_test.go new file mode 100644 index 00000000000..3eb649327e6 --- /dev/null +++ b/pkg/configs/hcl2shim/mock_value_composer_test.go @@ -0,0 +1,524 @@ +package hcl2shim + +import ( + "testing" + + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +// TestComposeMockValueBySchema ensures different configschema.Block values +// processed correctly (lists, maps, objects, etc). Also, it should ensure that +// the resulting values are equal given the same set of inputs (seed, schema, etc). +func TestComposeMockValueBySchema(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + schema *configschema.Block + config cty.Value + defaults map[string]cty.Value + wantVal cty.Value + wantWarning bool + wantError bool + }{ + "diff-props-in-root-attributes": { + schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "required-only": { + Type: cty.String, + Required: true, + Optional: false, + Computed: false, + Sensitive: false, + }, + "required-computed": { + Type: cty.String, + Required: true, + Optional: false, + Computed: true, + Sensitive: false, + }, + "optional": { + Type: cty.String, + Required: false, + Optional: true, + Computed: false, + Sensitive: false, + }, + "optional-computed": { + Type: cty.String, + Required: false, + Optional: true, + Computed: true, + Sensitive: false, + }, + "computed-only": { + Type: cty.String, + Required: false, + Optional: false, + Computed: true, + Sensitive: false, + }, + "sensitive-optional": { + Type: cty.String, + Required: false, + Optional: true, + Computed: false, + Sensitive: true, + }, + "sensitive-required": { + Type: cty.String, + Required: true, + Optional: false, + Computed: false, + Sensitive: true, + }, + "sensitive-computed": { + Type: cty.String, + Required: true, + Optional: false, + Computed: true, + Sensitive: true, + }, + }, + }, + config: cty.NilVal, + wantVal: cty.ObjectVal(map[string]cty.Value{ + "required-only": cty.NullVal(cty.String), + "required-computed": cty.StringVal("xNmGyAVmNkB4"), + "optional": cty.NullVal(cty.String), + "optional-computed": cty.StringVal("6zQu0"), + "computed-only": cty.StringVal("l3INvNSQT"), + "sensitive-optional": cty.NullVal(cty.String), + "sensitive-required": cty.NullVal(cty.String), + "sensitive-computed": cty.StringVal("ionwj3qrsh4xyC9"), + }), + }, + "diff-props-in-single-block-attributes": { + schema: &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "nested": { + Nesting: configschema.NestingSingle, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "required-only": { + Type: cty.String, + Required: true, + Optional: false, + Computed: false, + Sensitive: false, + }, + "required-computed": { + Type: cty.String, + Required: true, + Optional: false, + Computed: true, + Sensitive: false, + }, + "optional": { + Type: cty.String, + Required: false, + Optional: true, + Computed: false, + Sensitive: false, + }, + "optional-computed": { + Type: cty.String, + Required: false, + Optional: true, + Computed: true, + Sensitive: false, + }, + "computed-only": { + Type: cty.String, + Required: false, + Optional: false, + Computed: true, + Sensitive: false, + }, + "sensitive-optional": { + Type: cty.String, + Required: false, + Optional: true, + Computed: false, + Sensitive: true, + }, + "sensitive-required": { + Type: cty.String, + Required: true, + Optional: false, + Computed: false, + Sensitive: true, + }, + "sensitive-computed": { + Type: cty.String, + Required: true, + Optional: false, + Computed: true, + Sensitive: true, + }, + }, + }, + }, + }, + }, + config: cty.ObjectVal(map[string]cty.Value{ + "nested": cty.ObjectVal(map[string]cty.Value{}), + }), + wantVal: cty.ObjectVal(map[string]cty.Value{ + "nested": cty.ObjectVal(map[string]cty.Value{ + "required-only": cty.NullVal(cty.String), + "required-computed": cty.StringVal("xNmGyAVmNkB4"), + "optional": cty.NullVal(cty.String), + "optional-computed": cty.StringVal("6zQu0"), + "computed-only": cty.StringVal("l3INvNSQT"), + "sensitive-optional": cty.NullVal(cty.String), + "sensitive-required": cty.NullVal(cty.String), + "sensitive-computed": cty.StringVal("ionwj3qrsh4xyC9"), + }), + }), + }, + "basic-group-block": { + schema: &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "nested": { + Nesting: configschema.NestingGroup, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "field": { + Type: cty.Number, + Computed: true, + }, + }, + }, + }, + }, + }, + config: cty.ObjectVal(map[string]cty.Value{ + "nested": cty.ObjectVal(map[string]cty.Value{}), + }), + wantVal: cty.ObjectVal(map[string]cty.Value{ + "nested": cty.ObjectVal(map[string]cty.Value{ + "field": cty.NumberIntVal(0), + }), + }), + }, + "basic-list-block": { + schema: &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "nested": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "num": { + Type: cty.Number, + Computed: true, + }, + "str1": { + Type: cty.String, + Computed: true, + }, + "str2": { + Type: cty.String, + Computed: true, + }, + }, + }, + }, + }, + }, + config: cty.ObjectVal(map[string]cty.Value{ + "nested": cty.ListVal([]cty.Value{cty.ObjectVal(map[string]cty.Value{})}), + }), + wantVal: cty.ObjectVal(map[string]cty.Value{ + "nested": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "num": cty.NumberIntVal(0), + "str1": cty.StringVal("l3INvNSQT"), + "str2": cty.StringVal("6zQu0"), + }), + }), + }), + }, + "basic-set-block": { + schema: &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "nested": { + Nesting: configschema.NestingSet, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "num": { + Type: cty.Number, + Computed: true, + }, + "str1": { + Type: cty.String, + Computed: true, + }, + "str2": { + Type: cty.String, + Computed: true, + }, + }, + }, + }, + }, + }, + config: cty.ObjectVal(map[string]cty.Value{ + "nested": cty.SetVal([]cty.Value{cty.ObjectVal(map[string]cty.Value{})}), + }), + wantVal: cty.ObjectVal(map[string]cty.Value{ + "nested": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "num": cty.NumberIntVal(0), + "str1": cty.StringVal("l3INvNSQT"), + "str2": cty.StringVal("6zQu0"), + }), + }), + }), + }, + "basic-map-block": { + schema: &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "nested": { + Nesting: configschema.NestingMap, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "num": { + Type: cty.Number, + Computed: true, + }, + "str1": { + Type: cty.String, + Computed: true, + }, + "str2": { + Type: cty.String, + Computed: true, + }, + }, + }, + }, + }, + }, + config: cty.ObjectVal(map[string]cty.Value{ + "nested": cty.MapVal(map[string]cty.Value{ + "somelabel": cty.ObjectVal(map[string]cty.Value{}), + }), + }), + wantVal: cty.ObjectVal(map[string]cty.Value{ + "nested": cty.MapVal(map[string]cty.Value{ + "somelabel": cty.ObjectVal(map[string]cty.Value{ + "num": cty.NumberIntVal(0), + "str1": cty.StringVal("l3INvNSQT"), + "str2": cty.StringVal("6zQu0"), + }), + }), + }), + }, + "basic-mocked-attributes": { + schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "num": { + Type: cty.Number, + Computed: true, + Optional: true, + }, + "str": { + Type: cty.String, + Computed: true, + Optional: true, + }, + "bool": { + Type: cty.Bool, + Computed: true, + Optional: true, + }, + "obj": { + Type: cty.Object(map[string]cty.Type{ + "fieldNum": cty.Number, + "fieldStr1": cty.String, + "fieldStr2": cty.String, + }), + Computed: true, + Optional: true, + }, + "list": { + Type: cty.List(cty.String), + Computed: true, + Optional: true, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "nested": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "num": { + Type: cty.Number, + Computed: true, + Optional: true, + }, + "str1": { + Type: cty.String, + Computed: true, + Optional: true, + }, + "str2": { + Type: cty.String, + Computed: true, + Optional: true, + }, + "bool": { + Type: cty.Bool, + Computed: true, + Optional: true, + }, + "obj": { + Type: cty.Object(map[string]cty.Type{ + "fieldNum": cty.Number, + "fieldStr": cty.String, + }), + Computed: true, + Optional: true, + }, + "list": { + Type: cty.List(cty.String), + Computed: true, + Optional: true, + }, + }, + }, + }, + }, + }, + config: cty.ObjectVal(map[string]cty.Value{ + "nested": cty.ListVal([]cty.Value{cty.ObjectVal(map[string]cty.Value{})}), + }), + wantVal: cty.ObjectVal(map[string]cty.Value{ + "num": cty.NumberIntVal(0), + "str": cty.StringVal("xNmGyAVmNkB4"), + "bool": cty.False, + "obj": cty.ObjectVal(map[string]cty.Value{ + "fieldNum": cty.NumberIntVal(0), + "fieldStr1": cty.StringVal("l3INvNSQT"), + "fieldStr2": cty.StringVal("6zQu0"), + }), + "list": cty.ListValEmpty(cty.String), + "nested": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "num": cty.NumberIntVal(0), + "str1": cty.StringVal("mCp2gObD"), + "str2": cty.StringVal("iOtQNQsLiFD5"), + "bool": cty.False, + "obj": cty.ObjectVal(map[string]cty.Value{ + "fieldNum": cty.NumberIntVal(0), + "fieldStr": cty.StringVal("ionwj3qrsh4xyC9"), + }), + "list": cty.ListValEmpty(cty.String), + }), + }), + }), + }, + "source-priority": { + schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "useConfigValue": { + Type: cty.String, + Computed: true, + Optional: true, + }, + "useDefaultsValue": { + Type: cty.String, + Computed: true, + Optional: true, + }, + "generateMockValue": { + Type: cty.String, + Computed: true, + Optional: true, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "nested": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "useConfigValue": { + Type: cty.String, + Computed: true, + Optional: true, + }, + "useDefaultsValue": { + Type: cty.String, + Computed: true, + Optional: true, + }, + "generateMockValue": { + Type: cty.String, + Computed: true, + Optional: true, + }, + }, + }, + }, + }, + }, + config: cty.ObjectVal(map[string]cty.Value{ + "useConfigValue": cty.StringVal("iAmFromConfig"), + "nested": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "useConfigValue": cty.StringVal("iAmFromConfig"), + }), + }), + }), + defaults: map[string]cty.Value{ + "useConfigValue": cty.StringVal("iAmFromDefaults"), + "useDefaultsValue": cty.StringVal("iAmFromDefaults"), + "nested": cty.ObjectVal(map[string]cty.Value{ + "useConfigValue": cty.StringVal("iAmFromDefaults"), + "useDefaultsValue": cty.StringVal("iAmFromDefaults"), + }), + }, + wantVal: cty.ObjectVal(map[string]cty.Value{ + "useConfigValue": cty.StringVal("iAmFromConfig"), + "useDefaultsValue": cty.StringVal("iAmFromDefaults"), + "generateMockValue": cty.StringVal("l3INvNSQT"), + "nested": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "useConfigValue": cty.StringVal("iAmFromConfig"), + "useDefaultsValue": cty.StringVal("iAmFromDefaults"), + "generateMockValue": cty.StringVal("6zQu0"), + }), + }), + }), + wantWarning: true, // ignored value in defaults + }, + } + + for name, test := range tests { + test := test + + t.Run(name, func(t *testing.T) { + t.Parallel() + + gotVal, gotDiags := NewMockValueComposer(42).ComposeBySchema(test.schema, test.config, test.defaults) + switch { + case test.wantError && !gotDiags.HasErrors(): + t.Fatalf("Expected error in diags, but none returned") + + case !test.wantError && gotDiags.HasErrors(): + t.Fatalf("Got unexpected error diags: %v", gotDiags.ErrWithWarnings()) + + case test.wantWarning && len(gotDiags) == 0: + t.Fatalf("Expected warning in diags, but none returned") + + case !test.wantWarning && len(gotDiags) != 0: + t.Fatalf("Got unexpected diags: %v", gotDiags.ErrWithWarnings()) + + case !test.wantVal.RawEquals(gotVal): + t.Fatalf("Got unexpected value: %v", gotVal.GoString()) + } + }) + } +} diff --git a/pkg/configs/hcl2shim/paths.go b/pkg/configs/hcl2shim/paths.go new file mode 100644 index 00000000000..bcc1c7b6d3b --- /dev/null +++ b/pkg/configs/hcl2shim/paths.go @@ -0,0 +1,281 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package hcl2shim + +import ( + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/zclconf/go-cty/cty" +) + +// RequiresReplace takes a list of flatmapped paths from a +// InstanceDiff.Attributes along with the corresponding cty.Type, and returns +// the list of the cty.Paths that are flagged as causing the resource +// replacement (RequiresNew). +// This will filter out redundant paths, paths that refer to flatmapped indexes +// (e.g. "#", "%"), and will return any changes within a set as the path to the +// set itself. +func RequiresReplace(attrs []string, ty cty.Type) ([]cty.Path, error) { + var paths []cty.Path + + for _, attr := range attrs { + p, err := requiresReplacePath(attr, ty) + if err != nil { + return nil, err + } + + paths = append(paths, p) + } + + // now trim off any trailing paths that aren't GetAttrSteps, since only an + // attribute itself can require replacement + paths = trimPaths(paths) + + // There may be redundant paths due to set elements or index attributes + // Do some ugly n^2 filtering, but these are always fairly small sets. + for i := 0; i < len(paths)-1; i++ { + for j := i + 1; j < len(paths); j++ { + if reflect.DeepEqual(paths[i], paths[j]) { + // swap the tail and slice it off + paths[j], paths[len(paths)-1] = paths[len(paths)-1], paths[j] + paths = paths[:len(paths)-1] + j-- + } + } + } + + return paths, nil +} + +// trimPaths removes any trailing steps that aren't of type GetAttrSet, since +// only an attribute itself can require replacement +func trimPaths(paths []cty.Path) []cty.Path { + var trimmed []cty.Path + for _, path := range paths { + path = trimPath(path) + if len(path) > 0 { + trimmed = append(trimmed, path) + } + } + return trimmed +} + +func trimPath(path cty.Path) cty.Path { + for len(path) > 0 { + _, isGetAttr := path[len(path)-1].(cty.GetAttrStep) + if isGetAttr { + break + } + path = path[:len(path)-1] + } + return path +} + +// requiresReplacePath takes a key from a flatmap along with the cty.Type +// describing the structure, and returns the cty.Path that would be used to +// reference the nested value in the data structure. +// This is used specifically to record the RequiresReplace attributes from a +// ResourceInstanceDiff. +func requiresReplacePath(k string, ty cty.Type) (cty.Path, error) { + if k == "" { + return nil, nil + } + if !ty.IsObjectType() { + panic(fmt.Sprintf("requires replace path on non-object type: %#v", ty)) + } + + path, err := pathFromFlatmapKeyObject(k, ty.AttributeTypes()) + if err != nil { + return path, fmt.Errorf("[%s] %w", k, err) + } + return path, nil +} + +func pathSplit(p string) (string, string) { + parts := strings.SplitN(p, ".", 2) + head := parts[0] + rest := "" + if len(parts) > 1 { + rest = parts[1] + } + return head, rest +} + +func pathFromFlatmapKeyObject(key string, atys map[string]cty.Type) (cty.Path, error) { + k, rest := pathSplit(key) + + path := cty.Path{cty.GetAttrStep{Name: k}} + + ty, ok := atys[k] + if !ok { + return path, fmt.Errorf("attribute %q not found", k) + } + + if rest == "" { + return path, nil + } + + p, err := pathFromFlatmapKeyValue(rest, ty) + if err != nil { + return path, err + } + + return append(path, p...), nil +} + +func pathFromFlatmapKeyValue(key string, ty cty.Type) (cty.Path, error) { + var path cty.Path + var err error + + switch { + case ty.IsPrimitiveType(): + err = fmt.Errorf("invalid step %q with type %#v", key, ty) + case ty.IsObjectType(): + path, err = pathFromFlatmapKeyObject(key, ty.AttributeTypes()) + case ty.IsTupleType(): + path, err = pathFromFlatmapKeyTuple(key, ty.TupleElementTypes()) + case ty.IsMapType(): + path, err = pathFromFlatmapKeyMap(key, ty) + case ty.IsListType(): + path, err = pathFromFlatmapKeyList(key, ty) + case ty.IsSetType(): + path, err = pathFromFlatmapKeySet(key, ty) + default: + err = fmt.Errorf("unrecognized type: %s", ty.FriendlyName()) + } + + if err != nil { + return path, err + } + + return path, nil +} + +func pathFromFlatmapKeyTuple(key string, etys []cty.Type) (cty.Path, error) { + var path cty.Path + var err error + + k, rest := pathSplit(key) + + // we don't need to convert the index keys to paths + if k == "#" { + return path, nil + } + + idx, err := strconv.Atoi(k) + if err != nil { + return path, err + } + + path = cty.Path{cty.IndexStep{Key: cty.NumberIntVal(int64(idx))}} + + if idx >= len(etys) { + return path, fmt.Errorf("index %s out of range in %#v", key, etys) + } + + if rest == "" { + return path, nil + } + + ty := etys[idx] + + p, err := pathFromFlatmapKeyValue(rest, ty.ElementType()) + if err != nil { + return path, err + } + + return append(path, p...), nil +} + +func pathFromFlatmapKeyMap(key string, ty cty.Type) (cty.Path, error) { + var path cty.Path + var err error + + k, rest := key, "" + if !ty.ElementType().IsPrimitiveType() { + k, rest = pathSplit(key) + } + + // we don't need to convert the index keys to paths + if k == "%" { + return path, nil + } + + path = cty.Path{cty.IndexStep{Key: cty.StringVal(k)}} + + if rest == "" { + return path, nil + } + + p, err := pathFromFlatmapKeyValue(rest, ty.ElementType()) + if err != nil { + return path, err + } + + return append(path, p...), nil +} + +func pathFromFlatmapKeyList(key string, ty cty.Type) (cty.Path, error) { + var path cty.Path + var err error + + k, rest := pathSplit(key) + + // we don't need to convert the index keys to paths + if key == "#" { + return path, nil + } + + idx, err := strconv.Atoi(k) + if err != nil { + return path, err + } + + path = cty.Path{cty.IndexStep{Key: cty.NumberIntVal(int64(idx))}} + + if rest == "" { + return path, nil + } + + p, err := pathFromFlatmapKeyValue(rest, ty.ElementType()) + if err != nil { + return path, err + } + + return append(path, p...), nil +} + +func pathFromFlatmapKeySet(key string, ty cty.Type) (cty.Path, error) { + // once we hit a set, we can't return consistent paths, so just mark the + // set as a whole changed. + return nil, nil +} + +// FlatmapKeyFromPath returns the flatmap equivalent of the given cty.Path for +// use in generating legacy style diffs. +func FlatmapKeyFromPath(path cty.Path) string { + var parts []string + + for _, step := range path { + switch step := step.(type) { + case cty.GetAttrStep: + parts = append(parts, step.Name) + case cty.IndexStep: + switch ty := step.Key.Type(); { + case ty == cty.String: + parts = append(parts, step.Key.AsString()) + case ty == cty.Number: + i, _ := step.Key.AsBigFloat().Int64() + parts = append(parts, strconv.Itoa(int(i))) + } + } + } + + return strings.Join(parts, ".") +} diff --git a/pkg/configs/hcl2shim/paths_test.go b/pkg/configs/hcl2shim/paths_test.go new file mode 100644 index 00000000000..1ffe7570efb --- /dev/null +++ b/pkg/configs/hcl2shim/paths_test.go @@ -0,0 +1,413 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package hcl2shim + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "testing" + + "github.com/google/go-cmp/cmp/cmpopts" + + "github.com/google/go-cmp/cmp" + + "github.com/zclconf/go-cty/cty" +) + +var ( + ignoreUnexported = cmpopts.IgnoreUnexported(cty.GetAttrStep{}, cty.IndexStep{}) + valueComparer = cmp.Comparer(cty.Value.RawEquals) +) + +func TestPathFromFlatmap(t *testing.T) { + tests := []struct { + Flatmap string + Type cty.Type + Want cty.Path + WantErr string + }{ + { + Flatmap: "", + Type: cty.EmptyObject, + Want: nil, + }, + { + Flatmap: "attr", + Type: cty.EmptyObject, + Want: nil, + WantErr: `attribute "attr" not found`, + }, + { + Flatmap: "foo", + Type: cty.Object(map[string]cty.Type{ + "foo": cty.String, + }), + Want: cty.Path{ + cty.GetAttrStep{Name: "foo"}, + }, + }, + { + Flatmap: "foo.#", + Type: cty.Object(map[string]cty.Type{ + "foo": cty.List(cty.String), + }), + Want: cty.Path{ + cty.GetAttrStep{Name: "foo"}, + }, + }, + { + Flatmap: "foo.1", + Type: cty.Object(map[string]cty.Type{ + "foo": cty.List(cty.String), + }), + Want: cty.Path{ + cty.GetAttrStep{Name: "foo"}, + cty.IndexStep{Key: cty.NumberIntVal(1)}, + }, + }, + { + Flatmap: "foo.1", + Type: cty.Object(map[string]cty.Type{ + "foo": cty.Tuple([]cty.Type{ + cty.String, + cty.Bool, + }), + }), + Want: cty.Path{ + cty.GetAttrStep{Name: "foo"}, + cty.IndexStep{Key: cty.NumberIntVal(1)}, + }, + }, + { + // a set index returns the set itself, since this being applied to + // a diff and the set is changing. + Flatmap: "foo.24534534", + Type: cty.Object(map[string]cty.Type{ + "foo": cty.Set(cty.String), + }), + Want: cty.Path{ + cty.GetAttrStep{Name: "foo"}, + }, + }, + { + Flatmap: "foo.%", + Type: cty.Object(map[string]cty.Type{ + "foo": cty.Map(cty.String), + }), + Want: cty.Path{ + cty.GetAttrStep{Name: "foo"}, + }, + }, + { + Flatmap: "foo.baz", + Type: cty.Object(map[string]cty.Type{ + "foo": cty.Map(cty.Bool), + }), + Want: cty.Path{ + cty.GetAttrStep{Name: "foo"}, + cty.IndexStep{Key: cty.StringVal("baz")}, + }, + }, + { + Flatmap: "foo.bar.baz", + Type: cty.Object(map[string]cty.Type{ + "foo": cty.Map( + cty.Map(cty.Bool), + ), + }), + Want: cty.Path{ + cty.GetAttrStep{Name: "foo"}, + cty.IndexStep{Key: cty.StringVal("bar")}, + cty.IndexStep{Key: cty.StringVal("baz")}, + }, + }, + { + Flatmap: "foo.bar.baz", + Type: cty.Object(map[string]cty.Type{ + "foo": cty.Map( + cty.Object(map[string]cty.Type{ + "baz": cty.String, + }), + ), + }), + Want: cty.Path{ + cty.GetAttrStep{Name: "foo"}, + cty.IndexStep{Key: cty.StringVal("bar")}, + cty.GetAttrStep{Name: "baz"}, + }, + }, + { + Flatmap: "foo.0.bar", + Type: cty.Object(map[string]cty.Type{ + "foo": cty.List(cty.Object(map[string]cty.Type{ + "bar": cty.String, + "baz": cty.Bool, + })), + }), + Want: cty.Path{ + cty.GetAttrStep{Name: "foo"}, + cty.IndexStep{Key: cty.NumberIntVal(0)}, + cty.GetAttrStep{Name: "bar"}, + }, + }, + { + Flatmap: "foo.34534534.baz", + Type: cty.Object(map[string]cty.Type{ + "foo": cty.Set(cty.Object(map[string]cty.Type{ + "bar": cty.String, + "baz": cty.Bool, + })), + }), + Want: cty.Path{ + cty.GetAttrStep{Name: "foo"}, + }, + }, + { + Flatmap: "foo.bar.bang", + Type: cty.Object(map[string]cty.Type{ + "foo": cty.String, + }), + WantErr: `invalid step "bar.bang"`, + }, + { + // there should not be any attribute names with dots + Flatmap: "foo.bar.bang", + Type: cty.Object(map[string]cty.Type{ + "foo.bar": cty.Map(cty.String), + }), + WantErr: `attribute "foo" not found`, + }, + { + // We can only handle key names with dots if the map elements are a + // primitive type. + Flatmap: "foo.bar.bop", + Type: cty.Object(map[string]cty.Type{ + "foo": cty.Map(cty.String), + }), + Want: cty.Path{ + cty.GetAttrStep{Name: "foo"}, + cty.IndexStep{Key: cty.StringVal("bar.bop")}, + }, + }, + { + Flatmap: "foo.bar.0.baz", + Type: cty.Object(map[string]cty.Type{ + "foo": cty.Map( + cty.List( + cty.Map(cty.String), + ), + ), + }), + Want: cty.Path{ + cty.GetAttrStep{Name: "foo"}, + cty.IndexStep{Key: cty.StringVal("bar")}, + cty.IndexStep{Key: cty.NumberIntVal(0)}, + cty.IndexStep{Key: cty.StringVal("baz")}, + }, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%s as %#v", test.Flatmap, test.Type), func(t *testing.T) { + got, err := requiresReplacePath(test.Flatmap, test.Type) + + if test.WantErr != "" { + if err == nil { + t.Fatalf("succeeded; want error: %s", test.WantErr) + } + if got, want := err.Error(), test.WantErr; !strings.Contains(got, want) { + t.Fatalf("wrong error\ngot: %s\nwant: %s", got, want) + } + return + } else { + if err != nil { + t.Fatalf("unexpected error: %s", err.Error()) + } + } + + if !reflect.DeepEqual(got, test.Want) { + t.Fatalf("incorrect path\ngot: %#v\nwant: %#v\n", got, test.Want) + } + }) + } +} + +func TestRequiresReplace(t *testing.T) { + for _, tc := range []struct { + name string + attrs []string + expected []cty.Path + ty cty.Type + }{ + { + name: "basic", + attrs: []string{ + "foo", + }, + ty: cty.Object(map[string]cty.Type{ + "foo": cty.String, + }), + expected: []cty.Path{ + cty.Path{cty.GetAttrStep{Name: "foo"}}, + }, + }, + { + name: "two", + attrs: []string{ + "foo", + "bar", + }, + ty: cty.Object(map[string]cty.Type{ + "foo": cty.String, + "bar": cty.String, + }), + expected: []cty.Path{ + cty.Path{cty.GetAttrStep{Name: "foo"}}, + cty.Path{cty.GetAttrStep{Name: "bar"}}, + }, + }, + { + name: "nested object", + attrs: []string{ + "foo.bar", + }, + ty: cty.Object(map[string]cty.Type{ + "foo": cty.Object(map[string]cty.Type{ + "bar": cty.String, + }), + }), + expected: []cty.Path{ + cty.Path{cty.GetAttrStep{Name: "foo"}, cty.GetAttrStep{Name: "bar"}}, + }, + }, + { + name: "nested objects", + attrs: []string{ + "foo.bar.baz", + }, + ty: cty.Object(map[string]cty.Type{ + "foo": cty.Object(map[string]cty.Type{ + "bar": cty.Object(map[string]cty.Type{ + "baz": cty.String, + }), + }), + }), + expected: []cty.Path{ + cty.Path{cty.GetAttrStep{Name: "foo"}, cty.GetAttrStep{Name: "bar"}, cty.GetAttrStep{Name: "baz"}}, + }, + }, + { + name: "nested map", + attrs: []string{ + "foo.%", + "foo.bar", + }, + ty: cty.Object(map[string]cty.Type{ + "foo": cty.Map(cty.String), + }), + expected: []cty.Path{ + cty.Path{cty.GetAttrStep{Name: "foo"}}, + }, + }, + { + name: "nested list", + attrs: []string{ + "foo.#", + "foo.1", + }, + ty: cty.Object(map[string]cty.Type{ + "foo": cty.Map(cty.String), + }), + expected: []cty.Path{ + cty.Path{cty.GetAttrStep{Name: "foo"}}, + }, + }, + { + name: "object in map", + attrs: []string{ + "foo.bar.baz", + }, + ty: cty.Object(map[string]cty.Type{ + "foo": cty.Map(cty.Object( + map[string]cty.Type{ + "baz": cty.String, + }, + )), + }), + expected: []cty.Path{ + cty.Path{cty.GetAttrStep{Name: "foo"}, cty.IndexStep{Key: cty.StringVal("bar")}, cty.GetAttrStep{Name: "baz"}}, + }, + }, + { + name: "object in list", + attrs: []string{ + "foo.1.baz", + }, + ty: cty.Object(map[string]cty.Type{ + "foo": cty.List(cty.Object( + map[string]cty.Type{ + "baz": cty.String, + }, + )), + }), + expected: []cty.Path{ + cty.Path{cty.GetAttrStep{Name: "foo"}, cty.IndexStep{Key: cty.NumberIntVal(1)}, cty.GetAttrStep{Name: "baz"}}, + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + rp, err := RequiresReplace(tc.attrs, tc.ty) + if err != nil { + t.Fatal(err) + } + if !cmp.Equal(tc.expected, rp, ignoreUnexported, valueComparer) { + t.Fatalf("\nexpected: %#v\ngot: %#v\n", tc.expected, rp) + } + }) + + } +} + +func TestFlatmapKeyFromPath(t *testing.T) { + for i, tc := range []struct { + path cty.Path + attr string + }{ + { + path: cty.Path{ + cty.GetAttrStep{Name: "force_new"}, + }, + attr: "force_new", + }, + { + path: cty.Path{ + cty.GetAttrStep{Name: "attr"}, + cty.IndexStep{Key: cty.NumberIntVal(0)}, + cty.GetAttrStep{Name: "force_new"}, + }, + attr: "attr.0.force_new", + }, + { + path: cty.Path{ + cty.GetAttrStep{Name: "attr"}, + cty.IndexStep{Key: cty.StringVal("key")}, + cty.GetAttrStep{Name: "obj_attr"}, + cty.IndexStep{Key: cty.NumberIntVal(0)}, + cty.GetAttrStep{Name: "force_new"}, + }, + attr: "attr.key.obj_attr.0.force_new", + }, + } { + t.Run(strconv.Itoa(i), func(t *testing.T) { + attr := FlatmapKeyFromPath(tc.path) + if attr != tc.attr { + t.Fatalf("expected:%q got:%q", tc.attr, attr) + } + }) + } +} diff --git a/pkg/configs/hcl2shim/single_attr_body.go b/pkg/configs/hcl2shim/single_attr_body.go new file mode 100644 index 00000000000..a16d85ecf95 --- /dev/null +++ b/pkg/configs/hcl2shim/single_attr_body.go @@ -0,0 +1,90 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package hcl2shim + +import ( + "fmt" + + hcl2 "github.com/hashicorp/hcl/v2" +) + +// SingleAttrBody is a weird implementation of hcl2.Body that acts as if +// it has a single attribute whose value is the given expression. +// +// This is used to shim Resource.RawCount and Output.RawConfig to behave +// more like they do in the old HCL loader. +type SingleAttrBody struct { + Name string + Expr hcl2.Expression +} + +var _ hcl2.Body = SingleAttrBody{} + +func (b SingleAttrBody) Content(schema *hcl2.BodySchema) (*hcl2.BodyContent, hcl2.Diagnostics) { + content, all, diags := b.content(schema) + if !all { + // This should never happen because this body implementation should only + // be used by code that is aware that it's using a single-attr body. + diags = append(diags, &hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: "Invalid attribute", + Detail: fmt.Sprintf("The correct attribute name is %q.", b.Name), + Subject: b.Expr.Range().Ptr(), + }) + } + return content, diags +} + +func (b SingleAttrBody) PartialContent(schema *hcl2.BodySchema) (*hcl2.BodyContent, hcl2.Body, hcl2.Diagnostics) { + content, all, diags := b.content(schema) + var remain hcl2.Body + if all { + // If the request matched the one attribute we represent, then the + // remaining body is empty. + remain = hcl2.EmptyBody() + } else { + remain = b + } + return content, remain, diags +} + +func (b SingleAttrBody) content(schema *hcl2.BodySchema) (*hcl2.BodyContent, bool, hcl2.Diagnostics) { + ret := &hcl2.BodyContent{} + all := false + var diags hcl2.Diagnostics + + for _, attrS := range schema.Attributes { + if attrS.Name == b.Name { + attrs, _ := b.JustAttributes() + ret.Attributes = attrs + all = true + } else if attrS.Required { + diags = append(diags, &hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: "Missing attribute", + Detail: fmt.Sprintf("The attribute %q is required.", attrS.Name), + Subject: b.Expr.Range().Ptr(), + }) + } + } + + return ret, all, diags +} + +func (b SingleAttrBody) JustAttributes() (hcl2.Attributes, hcl2.Diagnostics) { + return hcl2.Attributes{ + b.Name: { + Expr: b.Expr, + Name: b.Name, + NameRange: b.Expr.Range(), + Range: b.Expr.Range(), + }, + }, nil +} + +func (b SingleAttrBody) MissingItemRange() hcl2.Range { + return b.Expr.Range() +} diff --git a/pkg/configs/hcl2shim/synth_body.go b/pkg/configs/hcl2shim/synth_body.go new file mode 100644 index 00000000000..79924c9cf54 --- /dev/null +++ b/pkg/configs/hcl2shim/synth_body.go @@ -0,0 +1,123 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package hcl2shim + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/zclconf/go-cty/cty" +) + +// SynthBody produces a synthetic hcl.Body that behaves as if it had attributes +// corresponding to the elements given in the values map. +// +// This is useful in situations where, for example, values provided on the +// command line can override values given in configuration, using MergeBodies. +// +// The given filename is used in case any diagnostics are returned. Since +// the created body is synthetic, it is likely that this will not be a "real" +// filename. For example, if from a command line argument it could be +// a representation of that argument's name, such as "-var=...". +func SynthBody(filename string, values map[string]cty.Value) hcl.Body { + return synthBody{ + Filename: filename, + Values: values, + } +} + +type synthBody struct { + Filename string + Values map[string]cty.Value +} + +func (b synthBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { + content, remain, diags := b.PartialContent(schema) + remainS := remain.(synthBody) + for name := range remainS.Values { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported attribute", + Detail: fmt.Sprintf("An attribute named %q is not expected here.", name), + Subject: b.synthRange().Ptr(), + }) + } + return content, diags +} + +func (b synthBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { + var diags hcl.Diagnostics + content := &hcl.BodyContent{ + Attributes: make(hcl.Attributes), + MissingItemRange: b.synthRange(), + } + + remainValues := make(map[string]cty.Value) + for attrName, val := range b.Values { + remainValues[attrName] = val + } + + for _, attrS := range schema.Attributes { + delete(remainValues, attrS.Name) + val, defined := b.Values[attrS.Name] + if !defined { + if attrS.Required { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing required attribute", + Detail: fmt.Sprintf("The attribute %q is required, but no definition was found.", attrS.Name), + Subject: b.synthRange().Ptr(), + }) + } + continue + } + content.Attributes[attrS.Name] = b.synthAttribute(attrS.Name, val) + } + + // We just ignore blocks altogether, because this body type never has + // nested blocks. + + remain := synthBody{ + Filename: b.Filename, + Values: remainValues, + } + + return content, remain, diags +} + +func (b synthBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { + ret := make(hcl.Attributes) + for name, val := range b.Values { + ret[name] = b.synthAttribute(name, val) + } + return ret, nil +} + +func (b synthBody) MissingItemRange() hcl.Range { + return b.synthRange() +} + +func (b synthBody) synthAttribute(name string, val cty.Value) *hcl.Attribute { + rng := b.synthRange() + return &hcl.Attribute{ + Name: name, + Expr: &hclsyntax.LiteralValueExpr{ + Val: val, + SrcRange: rng, + }, + NameRange: rng, + Range: rng, + } +} + +func (b synthBody) synthRange() hcl.Range { + return hcl.Range{ + Filename: b.Filename, + Start: hcl.Pos{Line: 1, Column: 1}, + End: hcl.Pos{Line: 1, Column: 1}, + } +} diff --git a/pkg/configs/hcl2shim/synth_body_test.go b/pkg/configs/hcl2shim/synth_body_test.go new file mode 100644 index 00000000000..5c9892f8009 --- /dev/null +++ b/pkg/configs/hcl2shim/synth_body_test.go @@ -0,0 +1,70 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package hcl2shim + +import ( + "testing" + + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" +) + +func TestSynthBodyContent(t *testing.T) { + tests := map[string]struct { + Values map[string]cty.Value + Schema *hcl.BodySchema + DiagCount int + }{ + "empty": { + Values: map[string]cty.Value{}, + Schema: &hcl.BodySchema{}, + DiagCount: 0, + }, + "missing required attribute": { + Values: map[string]cty.Value{}, + Schema: &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "nonexist", + Required: true, + }, + }, + }, + DiagCount: 1, // missing required attribute + }, + "missing optional attribute": { + Values: map[string]cty.Value{}, + Schema: &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "nonexist", + }, + }, + }, + DiagCount: 0, + }, + "extraneous attribute": { + Values: map[string]cty.Value{ + "foo": cty.StringVal("unwanted"), + }, + Schema: &hcl.BodySchema{}, + DiagCount: 1, // unsupported attribute + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + body := SynthBody("synth", test.Values) + _, diags := body.Content(test.Schema) + if got, want := len(diags), test.DiagCount; got != want { + t.Errorf("wrong number of diagnostics %d; want %d", got, want) + for _, diag := range diags { + t.Logf("- %s", diag) + } + } + }) + } +} diff --git a/pkg/configs/hcl2shim/util.go b/pkg/configs/hcl2shim/util.go new file mode 100644 index 00000000000..9956d1d4dd6 --- /dev/null +++ b/pkg/configs/hcl2shim/util.go @@ -0,0 +1,94 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package hcl2shim + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" +) + +// exprIsNativeQuotedString determines whether the given expression looks like +// it's a quoted string in the HCL native syntax. +// +// This should be used sparingly only for situations where our legacy HCL +// decoding would've expected a keyword or reference in quotes but our new +// decoding expects the keyword or reference to be provided directly as +// an identifier-based expression. +func ExprIsNativeQuotedString(expr hcl.Expression) bool { + _, ok := expr.(*hclsyntax.TemplateExpr) + return ok +} + +// schemaForOverrides takes a *hcl.BodySchema and produces a new one that is +// equivalent except that any required attributes are forced to not be required. +// +// This is useful for dealing with "override" config files, which are allowed +// to omit things that they don't wish to override from the main configuration. +// +// The returned schema may have some pointers in common with the given schema, +// so neither the given schema nor the returned schema should be modified after +// using this function in order to avoid confusion. +// +// Overrides are rarely used, so it's recommended to just create the override +// schema on the fly only when it's needed, rather than storing it in a global +// variable as we tend to do for a primary schema. +func SchemaForOverrides(schema *hcl.BodySchema) *hcl.BodySchema { + ret := &hcl.BodySchema{ + Attributes: make([]hcl.AttributeSchema, len(schema.Attributes)), + Blocks: schema.Blocks, + } + + for i, attrS := range schema.Attributes { + ret.Attributes[i] = attrS + ret.Attributes[i].Required = false + } + + return ret +} + +// schemaWithDynamic takes a *hcl.BodySchema and produces a new one that +// is equivalent except that it accepts an additional block type "dynamic" with +// a single label, used to recognize usage of the HCL dynamic block extension. +func schemaWithDynamic(schema *hcl.BodySchema) *hcl.BodySchema { + ret := &hcl.BodySchema{ + Attributes: schema.Attributes, + Blocks: make([]hcl.BlockHeaderSchema, len(schema.Blocks), len(schema.Blocks)+1), + } + + copy(ret.Blocks, schema.Blocks) + ret.Blocks = append(ret.Blocks, hcl.BlockHeaderSchema{ + Type: "dynamic", + LabelNames: []string{"type"}, + }) + + return ret +} + +// ConvertJSONExpressionToHCL is used to convert HCL *json.expression into +// regular hcl syntax. +// Sometimes, we manually parse an expression instead of using the hcl library +// for parsing. In this case we need to handle json configs specially, as the +// values will be json strings rather than hcl. +func ConvertJSONExpressionToHCL(expr hcl.Expression) (hcl.Expression, hcl.Diagnostics) { + var diags hcl.Diagnostics + // We can abuse the hcl json api and rely on the fact that calling + // Value on a json expression with no EvalContext will return the + // raw string. We can then parse that as normal hcl syntax, and + // continue with the decoding. + value, ds := expr.Value(nil) + diags = append(diags, ds...) + if diags.HasErrors() { + return nil, diags + } + + expr, ds = hclsyntax.ParseExpression([]byte(value.AsString()), expr.Range().Filename, expr.Range().Start) + diags = append(diags, ds...) + if diags.HasErrors() { + return nil, diags + } + + return expr, diags +} diff --git a/pkg/configs/hcl2shim/util_test.go b/pkg/configs/hcl2shim/util_test.go new file mode 100644 index 00000000000..8ead122db36 --- /dev/null +++ b/pkg/configs/hcl2shim/util_test.go @@ -0,0 +1,60 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package hcl2shim + +import ( + "reflect" + "testing" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + hclJSON "github.com/hashicorp/hcl/v2/json" +) + +func TestConvertJSONExpressionToHCL(t *testing.T) { + tests := []struct { + Input string + }{ + { + Input: "hello", + }, + { + Input: "resource.test_resource[0]", + }, + } + + for _, test := range tests { + JSONExpr, diags := hclJSON.ParseExpression([]byte(`"`+test.Input+`"`), "") + if diags.HasErrors() { + t.Errorf("got %d diagnostics; want 0", len(diags)) + for _, d := range diags { + t.Logf(" - %s", d.Error()) + } + } + + want, diags := hclsyntax.ParseExpression([]byte(test.Input), "", hcl.Pos{Line: 1, Column: 1}) + if diags.HasErrors() { + t.Errorf("got %d diagnostics; want 0", len(diags)) + for _, d := range diags { + t.Logf(" - %s", d.Error()) + } + } + + t.Run(test.Input, func(t *testing.T) { + resultExpr, diags := ConvertJSONExpressionToHCL(JSONExpr) + if diags.HasErrors() { + t.Errorf("got %d diagnostics; want 0", len(diags)) + for _, d := range diags { + t.Logf(" - %s", d.Error()) + } + } + + if !reflect.DeepEqual(resultExpr, want) { + t.Errorf("got %s, but want %s", resultExpr, want) + } + }) + } +} diff --git a/pkg/configs/hcl2shim/values.go b/pkg/configs/hcl2shim/values.go new file mode 100644 index 00000000000..641f7825247 --- /dev/null +++ b/pkg/configs/hcl2shim/values.go @@ -0,0 +1,235 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package hcl2shim + +import ( + "fmt" + "math/big" + + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/configs/configschema" +) + +// UnknownVariableValue is a sentinel value that can be used +// to denote that the value of a variable is unknown at this time. +// RawConfig uses this information to build up data about +// unknown keys. +const UnknownVariableValue = "74D93920-ED26-11E3-AC10-0800200C9A66" + +// ConfigValueFromHCL2Block is like ConfigValueFromHCL2 but it works only for +// known object values and uses the provided block schema to perform some +// additional normalization to better mimic the shape of value that the old +// HCL1/HIL-based codepaths would've produced. +// +// In particular, it discards the collections that we use to represent nested +// blocks (other than NestingSingle) if they are empty, which better mimics +// the HCL1 behavior because HCL1 had no knowledge of the schema and so didn't +// know that an unspecified block _could_ exist. +// +// The given object value must conform to the schema's implied type or this +// function will panic or produce incorrect results. +// +// This is primarily useful for the final transition from new-style values to +// tofu.ResourceConfig before calling to a legacy provider, since +// helper/schema (the old provider SDK) is particularly sensitive to these +// subtle differences within its validation code. +func ConfigValueFromHCL2Block(v cty.Value, schema *configschema.Block) map[string]interface{} { + if v.IsNull() { + return nil + } + if !v.IsKnown() { + panic("ConfigValueFromHCL2Block used with unknown value") + } + if !v.Type().IsObjectType() { + panic(fmt.Sprintf("ConfigValueFromHCL2Block used with non-object value %#v", v)) + } + + atys := v.Type().AttributeTypes() + ret := make(map[string]interface{}) + + for name := range schema.Attributes { + if _, exists := atys[name]; !exists { + continue + } + + av := v.GetAttr(name) + if av.IsNull() { + // Skip nulls altogether, to better mimic how HCL1 would behave + continue + } + ret[name] = ConfigValueFromHCL2(av) + } + + for name, blockS := range schema.BlockTypes { + if _, exists := atys[name]; !exists { + continue + } + bv := v.GetAttr(name) + if !bv.IsKnown() { + ret[name] = UnknownVariableValue + continue + } + if bv.IsNull() { + continue + } + + switch blockS.Nesting { + + case configschema.NestingSingle, configschema.NestingGroup: + ret[name] = ConfigValueFromHCL2Block(bv, &blockS.Block) + + case configschema.NestingList, configschema.NestingSet: + l := bv.LengthInt() + if l == 0 { + // skip empty collections to better mimic how HCL1 would behave + continue + } + + elems := make([]interface{}, 0, l) + for it := bv.ElementIterator(); it.Next(); { + _, ev := it.Element() + if !ev.IsKnown() { + elems = append(elems, UnknownVariableValue) + continue + } + elems = append(elems, ConfigValueFromHCL2Block(ev, &blockS.Block)) + } + ret[name] = elems + + case configschema.NestingMap: + if bv.LengthInt() == 0 { + // skip empty collections to better mimic how HCL1 would behave + continue + } + + elems := make(map[string]interface{}) + for it := bv.ElementIterator(); it.Next(); { + ek, ev := it.Element() + if !ev.IsKnown() { + elems[ek.AsString()] = UnknownVariableValue + continue + } + elems[ek.AsString()] = ConfigValueFromHCL2Block(ev, &blockS.Block) + } + ret[name] = elems + } + } + + return ret +} + +// ConfigValueFromHCL2 converts a value from HCL2 (really, from the cty dynamic +// types library that HCL2 uses) to a value type that matches what would've +// been produced from the HCL-based interpolator for an equivalent structure. +// +// This function will transform a cty null value into a Go nil value, which +// isn't a possible outcome of the HCL/HIL-based decoder and so callers may +// need to detect and reject any null values. +func ConfigValueFromHCL2(v cty.Value) interface{} { + if !v.IsKnown() { + return UnknownVariableValue + } + if v.IsNull() { + return nil + } + + switch v.Type() { + case cty.Bool: + return v.True() // like HCL.BOOL + case cty.String: + return v.AsString() // like HCL token.STRING or token.HEREDOC + case cty.Number: + // We can't match HCL _exactly_ here because it distinguishes between + // int and float values, but we'll get as close as we can by using + // an int if the number is exactly representable, and a float if not. + // The conversion to float will force precision to that of a float64, + // which is potentially losing information from the specific number + // given, but no worse than what HCL would've done in its own conversion + // to float. + + f := v.AsBigFloat() + if i, acc := f.Int64(); acc == big.Exact { + // if we're on a 32-bit system and the number is too big for 32-bit + // int then we'll fall through here and use a float64. + const MaxInt = int(^uint(0) >> 1) + const MinInt = -MaxInt - 1 + if i <= int64(MaxInt) && i >= int64(MinInt) { + return int(i) // Like HCL token.NUMBER + } + } + + f64, _ := f.Float64() + return f64 // like HCL token.FLOAT + } + + if v.Type().IsListType() || v.Type().IsSetType() || v.Type().IsTupleType() { + l := make([]interface{}, 0, v.LengthInt()) + it := v.ElementIterator() + for it.Next() { + _, ev := it.Element() + l = append(l, ConfigValueFromHCL2(ev)) + } + return l + } + + if v.Type().IsMapType() || v.Type().IsObjectType() { + l := make(map[string]interface{}) + it := v.ElementIterator() + for it.Next() { + ek, ev := it.Element() + cv := ConfigValueFromHCL2(ev) + if cv != nil { + l[ek.AsString()] = cv + } + } + return l + } + + // If we fall out here then we have some weird type that we haven't + // accounted for. This should never happen unless the caller is using + // capsule types, and we don't currently have any such types defined. + panic(fmt.Errorf("can't convert %#v to config value", v)) +} + +// HCL2ValueFromConfigValue is the opposite of configValueFromHCL2: it takes +// a value as would be returned from the old interpolator and turns it into +// a cty.Value so it can be used within, for example, an HCL2 EvalContext. +func HCL2ValueFromConfigValue(v interface{}) cty.Value { + if v == nil { + return cty.NullVal(cty.DynamicPseudoType) + } + if v == UnknownVariableValue { + return cty.DynamicVal + } + + switch tv := v.(type) { + case bool: + return cty.BoolVal(tv) + case string: + return cty.StringVal(tv) + case int: + return cty.NumberIntVal(int64(tv)) + case float64: + return cty.NumberFloatVal(tv) + case []interface{}: + vals := make([]cty.Value, len(tv)) + for i, ev := range tv { + vals[i] = HCL2ValueFromConfigValue(ev) + } + return cty.TupleVal(vals) + case map[string]interface{}: + vals := map[string]cty.Value{} + for k, ev := range tv { + vals[k] = HCL2ValueFromConfigValue(ev) + } + return cty.ObjectVal(vals) + default: + // HCL/HIL should never generate anything that isn't caught by + // the above, so if we get here something has gone very wrong. + panic(fmt.Errorf("can't convert %#v to cty.Value", v)) + } +} diff --git a/pkg/configs/hcl2shim/values_equiv.go b/pkg/configs/hcl2shim/values_equiv.go new file mode 100644 index 00000000000..794e537d5b1 --- /dev/null +++ b/pkg/configs/hcl2shim/values_equiv.go @@ -0,0 +1,219 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package hcl2shim + +import ( + "github.com/zclconf/go-cty/cty" +) + +// ValuesSDKEquivalent returns true if both of the given values seem equivalent +// as far as the legacy SDK diffing code would be concerned. +// +// Since SDK diffing is a fuzzy, inexact operation, this function is also +// fuzzy and inexact. It will err on the side of returning false if it +// encounters an ambiguous situation. Ambiguity is most common in the presence +// of sets because in practice it is impossible to exactly correlate +// nonequal-but-equivalent set elements because they have no identity separate +// from their value. +// +// This must be used _only_ for comparing values for equivalence within the +// SDK planning code. It is only meaningful to compare the "prior state" +// provided by OpenTofu Core with the "planned new state" produced by the +// legacy SDK code via shims. In particular it is not valid to use this +// function with their the config value or the "proposed new state" value +// because they contain only the subset of data that OpenTofu Core itself is +// able to determine. +func ValuesSDKEquivalent(a, b cty.Value) bool { + if a == cty.NilVal || b == cty.NilVal { + // We don't generally expect nils to appear, but we'll allow them + // for robustness since the data structures produced by legacy SDK code + // can sometimes be non-ideal. + return a == b // equivalent if they are _both_ nil + } + if a.RawEquals(b) { + // Easy case. We use RawEquals because we want two unknowns to be + // considered equal here, whereas "Equals" would return unknown. + return true + } + if !a.IsKnown() || !b.IsKnown() { + // Two unknown values are equivalent regardless of type. A known is + // never equivalent to an unknown. + return a.IsKnown() == b.IsKnown() + } + if aZero, bZero := valuesSDKEquivalentIsNullOrZero(a), valuesSDKEquivalentIsNullOrZero(b); aZero || bZero { + // Two null/zero values are equivalent regardless of type. A non-zero is + // never equivalent to a zero. + return aZero == bZero + } + + // If we get down here then we are guaranteed that both a and b are known, + // non-null values. + + aTy := a.Type() + bTy := b.Type() + switch { + case aTy.IsSetType() && bTy.IsSetType(): + return valuesSDKEquivalentSets(a, b) + case aTy.IsListType() && bTy.IsListType(): + return valuesSDKEquivalentSequences(a, b) + case aTy.IsTupleType() && bTy.IsTupleType(): + return valuesSDKEquivalentSequences(a, b) + case aTy.IsMapType() && bTy.IsMapType(): + return valuesSDKEquivalentMappings(a, b) + case aTy.IsObjectType() && bTy.IsObjectType(): + return valuesSDKEquivalentMappings(a, b) + case aTy == cty.Number && bTy == cty.Number: + return valuesSDKEquivalentNumbers(a, b) + default: + // We've now covered all the interesting cases, so anything that falls + // down here cannot be equivalent. + return false + } +} + +// valuesSDKEquivalentIsNullOrZero returns true if the given value is either +// null or is the "zero value" (in the SDK/Go sense) for its type. +func valuesSDKEquivalentIsNullOrZero(v cty.Value) bool { + if v == cty.NilVal { + return true + } + + ty := v.Type() + switch { + case !v.IsKnown(): + return false + case v.IsNull(): + return true + + // After this point, v is always known and non-null + case ty.IsListType() || ty.IsSetType() || ty.IsMapType() || ty.IsObjectType() || ty.IsTupleType(): + return v.LengthInt() == 0 + case ty == cty.String: + return v.RawEquals(cty.StringVal("")) + case ty == cty.Number: + return v.RawEquals(cty.Zero) + case ty == cty.Bool: + return v.RawEquals(cty.False) + default: + // The above is exhaustive, but for robustness we'll consider anything + // else to _not_ be zero unless it is null. + return false + } +} + +// valuesSDKEquivalentSets returns true only if each of the elements in a can +// be correlated with at least one equivalent element in b and vice-versa. +// This is a fuzzy operation that prefers to signal non-equivalence if it cannot +// be certain that all elements are accounted for. +func valuesSDKEquivalentSets(a, b cty.Value) bool { + if aLen, bLen := a.LengthInt(), b.LengthInt(); aLen != bLen { + return false + } + + // Our methodology here is a little tricky, to deal with the fact that + // it's impossible to directly correlate two non-equal set elements because + // they don't have identities separate from their values. + // The approach is to count the number of equivalent elements each element + // of a has in b and vice-versa, and then return true only if each element + // in both sets has at least one equivalent. + as := a.AsValueSlice() + bs := b.AsValueSlice() + aeqs := make([]bool, len(as)) + beqs := make([]bool, len(bs)) + for ai, av := range as { + for bi, bv := range bs { + if ValuesSDKEquivalent(av, bv) { + aeqs[ai] = true + beqs[bi] = true + } + } + } + + for _, eq := range aeqs { + if !eq { + return false + } + } + for _, eq := range beqs { + if !eq { + return false + } + } + return true +} + +// valuesSDKEquivalentSequences decides equivalence for two sequence values +// (lists or tuples). +func valuesSDKEquivalentSequences(a, b cty.Value) bool { + as := a.AsValueSlice() + bs := b.AsValueSlice() + if len(as) != len(bs) { + return false + } + + for i := range as { + if !ValuesSDKEquivalent(as[i], bs[i]) { + return false + } + } + return true +} + +// valuesSDKEquivalentMappings decides equivalence for two mapping values +// (maps or objects). +func valuesSDKEquivalentMappings(a, b cty.Value) bool { + as := a.AsValueMap() + bs := b.AsValueMap() + if len(as) != len(bs) { + return false + } + + for k, av := range as { + bv, ok := bs[k] + if !ok { + return false + } + if !ValuesSDKEquivalent(av, bv) { + return false + } + } + return true +} + +// valuesSDKEquivalentNumbers decides equivalence for two number values based +// on the fact that the SDK uses int and float64 representations while +// cty (and thus OpenTofu Core) uses big.Float, and so we expect to lose +// precision in the round-trip. +// +// This does _not_ attempt to allow for an epsilon difference that may be +// caused by accumulated innacuracy in a float calculation, under the +// expectation that providers generally do not actually do compuations on +// floats and instead just pass string representations of them on verbatim +// to remote APIs. A remote API _itself_ may introduce inaccuracy, but that's +// a problem for the provider itself to deal with, based on its knowledge of +// the remote system, e.g. using DiffSuppressFunc. +func valuesSDKEquivalentNumbers(a, b cty.Value) bool { + if a.RawEquals(b) { + return true // easy + } + + af := a.AsBigFloat() + bf := b.AsBigFloat() + + if af.IsInt() != bf.IsInt() { + return false + } + if af.IsInt() && bf.IsInt() { + return false // a.RawEquals(b) test above is good enough for integers + } + + // The SDK supports only int and float64, so if it's not an integer + // we know that only a float64-level of precision can possibly be + // significant. + af64, _ := af.Float64() + bf64, _ := bf.Float64() + return af64 == bf64 +} diff --git a/pkg/configs/hcl2shim/values_equiv_test.go b/pkg/configs/hcl2shim/values_equiv_test.go new file mode 100644 index 00000000000..f993b412f90 --- /dev/null +++ b/pkg/configs/hcl2shim/values_equiv_test.go @@ -0,0 +1,434 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package hcl2shim + +import ( + "fmt" + "math/big" + "testing" + + "github.com/zclconf/go-cty/cty" +) + +func TestValuesSDKEquivalent(t *testing.T) { + piBig, _, err := big.ParseFloat("3.14159265358979323846264338327950288419716939937510582097494459", 10, 512, big.ToZero) + if err != nil { + t.Fatal(err) + } + pi64, _ := piBig.Float64() + + tests := []struct { + A, B cty.Value + Want bool + }{ + // Strings + { + cty.StringVal("hello"), + cty.StringVal("hello"), + true, + }, + { + cty.StringVal("hello"), + cty.StringVal("world"), + false, + }, + { + cty.StringVal("hello"), + cty.StringVal(""), + false, + }, + { + cty.NullVal(cty.String), + cty.StringVal(""), + true, + }, + + // Numbers + { + cty.NumberIntVal(1), + cty.NumberIntVal(1), + true, + }, + { + cty.NumberIntVal(1), + cty.NumberIntVal(2), + false, + }, + { + cty.NumberIntVal(1), + cty.Zero, + false, + }, + { + cty.NullVal(cty.Number), + cty.Zero, + true, + }, + { + cty.NumberVal(piBig), + cty.Zero, + false, + }, + { + cty.NumberFloatVal(pi64), + cty.Zero, + false, + }, + { + cty.NumberFloatVal(pi64), + cty.NumberVal(piBig), + true, + }, + + // Bools + { + cty.True, + cty.True, + true, + }, + { + cty.True, + cty.False, + false, + }, + { + cty.NullVal(cty.Bool), + cty.False, + true, + }, + + // Mixed primitives + { + cty.StringVal("hello"), + cty.False, + false, + }, + { + cty.StringVal(""), + cty.False, + true, + }, + { + cty.NumberIntVal(0), + cty.False, + true, + }, + { + cty.StringVal(""), + cty.NumberIntVal(0), + true, + }, + { + cty.NullVal(cty.Bool), + cty.NullVal(cty.Number), + true, + }, + { + cty.StringVal(""), + cty.NullVal(cty.Number), + true, + }, + + // Lists + { + cty.ListValEmpty(cty.String), + cty.ListValEmpty(cty.String), + true, + }, + { + cty.ListValEmpty(cty.String), + cty.NullVal(cty.List(cty.String)), + true, + }, + { + cty.ListVal([]cty.Value{cty.StringVal("hello")}), + cty.ListVal([]cty.Value{cty.StringVal("hello"), cty.StringVal("hello")}), + false, + }, + { + cty.ListVal([]cty.Value{cty.StringVal("hello")}), + cty.ListValEmpty(cty.String), + false, + }, + { + cty.ListVal([]cty.Value{cty.StringVal("hello")}), + cty.ListVal([]cty.Value{cty.StringVal("hello")}), + true, + }, + { + cty.ListVal([]cty.Value{cty.StringVal("hello")}), + cty.ListVal([]cty.Value{cty.StringVal("world")}), + false, + }, + { + cty.ListVal([]cty.Value{cty.NullVal(cty.String)}), + cty.ListVal([]cty.Value{cty.StringVal("")}), + true, + }, + + // Tuples + { + cty.EmptyTupleVal, + cty.EmptyTupleVal, + true, + }, + { + cty.EmptyTupleVal, + cty.NullVal(cty.EmptyTuple), + true, + }, + { + cty.TupleVal([]cty.Value{cty.StringVal("hello")}), + cty.TupleVal([]cty.Value{cty.StringVal("hello"), cty.StringVal("hello")}), + false, + }, + { + cty.TupleVal([]cty.Value{cty.StringVal("hello")}), + cty.EmptyTupleVal, + false, + }, + { + cty.TupleVal([]cty.Value{cty.StringVal("hello")}), + cty.TupleVal([]cty.Value{cty.StringVal("hello")}), + true, + }, + { + cty.TupleVal([]cty.Value{cty.StringVal("hello")}), + cty.TupleVal([]cty.Value{cty.StringVal("world")}), + false, + }, + { + cty.TupleVal([]cty.Value{cty.NullVal(cty.String)}), + cty.TupleVal([]cty.Value{cty.StringVal("")}), + true, + }, + + // Sets + { + cty.SetValEmpty(cty.String), + cty.SetValEmpty(cty.String), + true, + }, + { + cty.SetValEmpty(cty.String), + cty.NullVal(cty.Set(cty.String)), + true, + }, + { + cty.SetVal([]cty.Value{cty.StringVal("hello")}), + cty.SetValEmpty(cty.String), + false, + }, + { + cty.SetVal([]cty.Value{cty.StringVal("hello")}), + cty.SetVal([]cty.Value{cty.StringVal("hello")}), + true, + }, + { + cty.SetVal([]cty.Value{cty.StringVal("hello")}), + cty.SetVal([]cty.Value{cty.StringVal("world")}), + false, + }, + { + cty.SetVal([]cty.Value{cty.NullVal(cty.String)}), + cty.SetVal([]cty.Value{cty.StringVal("")}), + true, + }, + { + cty.SetVal([]cty.Value{ + cty.NullVal(cty.String), + cty.StringVal(""), + }), + cty.SetVal([]cty.Value{ + cty.NullVal(cty.String), + }), + false, // because the element count is different + }, + { + cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal(""), + "b": cty.StringVal(""), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.String), + "b": cty.StringVal(""), + }), + }), + cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal(""), + "b": cty.StringVal(""), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal(""), + "b": cty.NullVal(cty.String), + }), + }), + true, + }, + { + cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("boop"), + "b": cty.StringVal(""), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.String), + "b": cty.StringVal(""), + }), + }), + cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("beep"), + "b": cty.StringVal(""), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal(""), + "b": cty.NullVal(cty.String), + }), + }), + false, + }, + { + cty.SetVal([]cty.Value{cty.ObjectVal(map[string]cty.Value{ + "list": cty.ListValEmpty(cty.String), + "list_block": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "unused": cty.StringVal(""), + }), + }), + })}), + cty.SetVal([]cty.Value{cty.ObjectVal(map[string]cty.Value{ + "list": cty.ListValEmpty(cty.String), + "list_block": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "unused": cty.NullVal(cty.String), + }), + }), + })}), + true, + }, + + // Maps + { + cty.MapValEmpty(cty.String), + cty.MapValEmpty(cty.String), + true, + }, + { + cty.MapValEmpty(cty.String), + cty.NullVal(cty.Map(cty.String)), + true, + }, + { + cty.MapVal(map[string]cty.Value{"hi": cty.StringVal("hello")}), + cty.MapVal(map[string]cty.Value{"hi": cty.StringVal("hello"), "hey": cty.StringVal("hello")}), + false, + }, + { + cty.MapVal(map[string]cty.Value{"hi": cty.StringVal("hello")}), + cty.MapValEmpty(cty.String), + false, + }, + { + cty.MapVal(map[string]cty.Value{"hi": cty.StringVal("hello")}), + cty.MapVal(map[string]cty.Value{"hi": cty.StringVal("hello")}), + true, + }, + { + cty.MapVal(map[string]cty.Value{"hi": cty.StringVal("hello")}), + cty.MapVal(map[string]cty.Value{"hi": cty.StringVal("world")}), + false, + }, + { + cty.MapVal(map[string]cty.Value{"hi": cty.NullVal(cty.String)}), + cty.MapVal(map[string]cty.Value{"hi": cty.StringVal("")}), + true, + }, + + // Objects + { + cty.EmptyObjectVal, + cty.EmptyObjectVal, + true, + }, + { + cty.EmptyObjectVal, + cty.NullVal(cty.EmptyObject), + true, + }, + { + cty.ObjectVal(map[string]cty.Value{"hi": cty.StringVal("hello")}), + cty.ObjectVal(map[string]cty.Value{"hi": cty.StringVal("hello"), "hey": cty.StringVal("hello")}), + false, + }, + { + cty.ObjectVal(map[string]cty.Value{"hi": cty.StringVal("hello")}), + cty.EmptyObjectVal, + false, + }, + { + cty.ObjectVal(map[string]cty.Value{"hi": cty.StringVal("hello")}), + cty.ObjectVal(map[string]cty.Value{"hi": cty.StringVal("hello")}), + true, + }, + { + cty.ObjectVal(map[string]cty.Value{"hi": cty.StringVal("hello")}), + cty.ObjectVal(map[string]cty.Value{"hi": cty.StringVal("world")}), + false, + }, + { + cty.ObjectVal(map[string]cty.Value{"hi": cty.NullVal(cty.String)}), + cty.ObjectVal(map[string]cty.Value{"hi": cty.StringVal("")}), + true, + }, + + // Unknown values + { + cty.UnknownVal(cty.String), + cty.UnknownVal(cty.String), + true, + }, + { + cty.StringVal("hello"), + cty.UnknownVal(cty.String), + false, + }, + { + cty.StringVal(""), + cty.UnknownVal(cty.String), + false, + }, + { + cty.NullVal(cty.String), + cty.UnknownVal(cty.String), + false, + }, + } + + run := func(t *testing.T, a, b cty.Value, want bool) { + got := ValuesSDKEquivalent(a, b) + + if got != want { + t.Errorf("wrong result\nfor: %#v ≈ %#v\ngot %#v, but want %#v", a, b, got, want) + } + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%#v ≈ %#v", test.A, test.B), func(t *testing.T) { + run(t, test.A, test.B, test.Want) + }) + // This function is symmetrical, so we'll also test in reverse so + // we don't need to manually copy all of the test cases. (But this does + // mean that one failure normally becomes two, of course!) + if !test.A.RawEquals(test.B) { + t.Run(fmt.Sprintf("%#v ≈ %#v", test.B, test.A), func(t *testing.T) { + run(t, test.B, test.A, test.Want) + }) + } + } +} diff --git a/pkg/configs/hcl2shim/values_test.go b/pkg/configs/hcl2shim/values_test.go new file mode 100644 index 00000000000..9586eab1e23 --- /dev/null +++ b/pkg/configs/hcl2shim/values_test.go @@ -0,0 +1,420 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package hcl2shim + +import ( + "fmt" + "reflect" + "testing" + + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +func TestConfigValueFromHCL2Block(t *testing.T) { + tests := []struct { + Input cty.Value + Schema *configschema.Block + Want map[string]interface{} + }{ + { + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("Ermintrude"), + "age": cty.NumberIntVal(19), + "address": cty.ObjectVal(map[string]cty.Value{ + "street": cty.ListVal([]cty.Value{cty.StringVal("421 Shoreham Loop")}), + "city": cty.StringVal("Fridgewater"), + "state": cty.StringVal("MA"), + "zip": cty.StringVal("91037"), + }), + }), + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "name": {Type: cty.String, Optional: true}, + "age": {Type: cty.Number, Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "address": { + Nesting: configschema.NestingSingle, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "street": {Type: cty.List(cty.String), Optional: true}, + "city": {Type: cty.String, Optional: true}, + "state": {Type: cty.String, Optional: true}, + "zip": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + }, + map[string]interface{}{ + "name": "Ermintrude", + "age": int(19), + "address": map[string]interface{}{ + "street": []interface{}{"421 Shoreham Loop"}, + "city": "Fridgewater", + "state": "MA", + "zip": "91037", + }, + }, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("Ermintrude"), + "age": cty.NumberIntVal(19), + "address": cty.NullVal(cty.Object(map[string]cty.Type{ + "street": cty.List(cty.String), + "city": cty.String, + "state": cty.String, + "zip": cty.String, + })), + }), + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "name": {Type: cty.String, Optional: true}, + "age": {Type: cty.Number, Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "address": { + Nesting: configschema.NestingSingle, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "street": {Type: cty.List(cty.String), Optional: true}, + "city": {Type: cty.String, Optional: true}, + "state": {Type: cty.String, Optional: true}, + "zip": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + }, + map[string]interface{}{ + "name": "Ermintrude", + "age": int(19), + }, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("Ermintrude"), + "age": cty.NumberIntVal(19), + "address": cty.ObjectVal(map[string]cty.Value{ + "street": cty.ListVal([]cty.Value{cty.StringVal("421 Shoreham Loop")}), + "city": cty.StringVal("Fridgewater"), + "state": cty.StringVal("MA"), + "zip": cty.NullVal(cty.String), // should be omitted altogether in result + }), + }), + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "name": {Type: cty.String, Optional: true}, + "age": {Type: cty.Number, Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "address": { + Nesting: configschema.NestingSingle, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "street": {Type: cty.List(cty.String), Optional: true}, + "city": {Type: cty.String, Optional: true}, + "state": {Type: cty.String, Optional: true}, + "zip": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + }, + map[string]interface{}{ + "name": "Ermintrude", + "age": int(19), + "address": map[string]interface{}{ + "street": []interface{}{"421 Shoreham Loop"}, + "city": "Fridgewater", + "state": "MA", + }, + }, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "address": cty.ListVal([]cty.Value{cty.EmptyObjectVal}), + }), + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "address": { + Nesting: configschema.NestingList, + Block: configschema.Block{}, + }, + }, + }, + map[string]interface{}{ + "address": []interface{}{ + map[string]interface{}{}, + }, + }, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "address": cty.ListValEmpty(cty.EmptyObject), // should be omitted altogether in result + }), + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "address": { + Nesting: configschema.NestingList, + Block: configschema.Block{}, + }, + }, + }, + map[string]interface{}{}, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "address": cty.SetVal([]cty.Value{cty.EmptyObjectVal}), + }), + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "address": { + Nesting: configschema.NestingSet, + Block: configschema.Block{}, + }, + }, + }, + map[string]interface{}{ + "address": []interface{}{ + map[string]interface{}{}, + }, + }, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "address": cty.SetValEmpty(cty.EmptyObject), + }), + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "address": { + Nesting: configschema.NestingSet, + Block: configschema.Block{}, + }, + }, + }, + map[string]interface{}{}, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "address": cty.MapVal(map[string]cty.Value{"foo": cty.EmptyObjectVal}), + }), + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "address": { + Nesting: configschema.NestingMap, + Block: configschema.Block{}, + }, + }, + }, + map[string]interface{}{ + "address": map[string]interface{}{ + "foo": map[string]interface{}{}, + }, + }, + }, + { + cty.ObjectVal(map[string]cty.Value{ + "address": cty.MapValEmpty(cty.EmptyObject), + }), + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "address": { + Nesting: configschema.NestingMap, + Block: configschema.Block{}, + }, + }, + }, + map[string]interface{}{}, + }, + { + cty.NullVal(cty.EmptyObject), + &configschema.Block{}, + nil, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%#v", test.Input), func(t *testing.T) { + got := ConfigValueFromHCL2Block(test.Input, test.Schema) + if !reflect.DeepEqual(got, test.Want) { + t.Errorf("wrong result\ninput: %#v\ngot: %#v\nwant: %#v", test.Input, got, test.Want) + } + }) + } +} + +func TestConfigValueFromHCL2(t *testing.T) { + tests := []struct { + Input cty.Value + Want interface{} + }{ + { + cty.True, + true, + }, + { + cty.False, + false, + }, + { + cty.NumberIntVal(12), + int(12), + }, + { + cty.NumberFloatVal(12.5), + float64(12.5), + }, + { + cty.StringVal("hello world"), + "hello world", + }, + { + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("Ermintrude"), + "age": cty.NumberIntVal(19), + "address": cty.ObjectVal(map[string]cty.Value{ + "street": cty.ListVal([]cty.Value{cty.StringVal("421 Shoreham Loop")}), + "city": cty.StringVal("Fridgewater"), + "state": cty.StringVal("MA"), + "zip": cty.StringVal("91037"), + }), + }), + map[string]interface{}{ + "name": "Ermintrude", + "age": int(19), + "address": map[string]interface{}{ + "street": []interface{}{"421 Shoreham Loop"}, + "city": "Fridgewater", + "state": "MA", + "zip": "91037", + }, + }, + }, + { + cty.MapVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + "bar": cty.StringVal("baz"), + }), + map[string]interface{}{ + "foo": "bar", + "bar": "baz", + }, + }, + { + cty.TupleVal([]cty.Value{ + cty.StringVal("foo"), + cty.True, + }), + []interface{}{ + "foo", + true, + }, + }, + { + cty.NullVal(cty.String), + nil, + }, + { + cty.UnknownVal(cty.String), + UnknownVariableValue, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%#v", test.Input), func(t *testing.T) { + got := ConfigValueFromHCL2(test.Input) + if !reflect.DeepEqual(got, test.Want) { + t.Errorf("wrong result\ninput: %#v\ngot: %#v\nwant: %#v", test.Input, got, test.Want) + } + }) + } +} + +func TestHCL2ValueFromConfigValue(t *testing.T) { + tests := []struct { + Input interface{} + Want cty.Value + }{ + { + nil, + cty.NullVal(cty.DynamicPseudoType), + }, + { + UnknownVariableValue, + cty.DynamicVal, + }, + { + true, + cty.True, + }, + { + false, + cty.False, + }, + { + int(12), + cty.NumberIntVal(12), + }, + { + int(0), + cty.Zero, + }, + { + float64(12.5), + cty.NumberFloatVal(12.5), + }, + { + "hello world", + cty.StringVal("hello world"), + }, + { + "O\u0308", // decomposed letter + diacritic + cty.StringVal("\u00D6"), // NFC-normalized on entry into cty + }, + { + []interface{}{}, + cty.EmptyTupleVal, + }, + { + []interface{}(nil), + cty.EmptyTupleVal, + }, + { + []interface{}{"hello", "world"}, + cty.TupleVal([]cty.Value{cty.StringVal("hello"), cty.StringVal("world")}), + }, + { + map[string]interface{}{}, + cty.EmptyObjectVal, + }, + { + map[string]interface{}(nil), + cty.EmptyObjectVal, + }, + { + map[string]interface{}{ + "foo": "bar", + "bar": "baz", + }, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + "bar": cty.StringVal("baz"), + }), + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%#v", test.Input), func(t *testing.T) { + got := HCL2ValueFromConfigValue(test.Input) + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ninput: %#v\ngot: %#v\nwant: %#v", test.Input, got, test.Want) + } + }) + } +} diff --git a/pkg/configs/import.go b/pkg/configs/import.go new file mode 100644 index 00000000000..14373a0f1a9 --- /dev/null +++ b/pkg/configs/import.go @@ -0,0 +1,210 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + hcljson "github.com/hashicorp/hcl/v2/json" + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs/hcl2shim" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +type Import struct { + ID hcl.Expression + + // To is the address HCL expression given in the `import` block configuration. + // It supports the following address formats: + // - aws_s3_bucket.my_bucket + // - module.my_module.aws_s3_bucket.my_bucket + // - aws_s3_bucket.my_bucket["static_key"] + // - module.my_module[0].aws_s3_bucket.my_buckets["static_key"] + // - aws_s3_bucket.my_bucket[expression] + // - module.my_module[expression].aws_s3_bucket.my_buckets[expression] + // A dynamic instance key supports a dynamic expression like - a variable, a local, a condition (for example, + // ternary), a resource block attribute, a data block attribute, etc. + To hcl.Expression + // StaticTo is the corresponding resource and module that the address is referring to. When decoding, as long + // as the `to` field is in the accepted format, we could determine the actual modules and resource that the + // address represents. However, we do not yet know for certain what module instance and resource instance this + // address refers to. So, Static import is mainly used to figure out the Module and Resource, and Provider of the + // import target resource + // If we could not determine the StaticTo when decoding the block, then the address is in an unacceptable format + StaticTo addrs.ConfigResource + // ResolvedTo will be a reference to the resource instance of the import target, if it can be resolved when decoding + // the `import` block. If the `to` field does not represent a static address + // (for example: module.my_module[var.var1].aws_s3_bucket.bucket), then this will be nil. + // However, if the address is static and can be fully resolved at decode time + // (for example: module.my_module[2].aws_s3_bucket.bucket), then this will be a reference to the resource instance's + // address + // Mainly used for early validations on the import block address, for example making sure there are no duplicate + // import blocks targeting the same resource + ResolvedTo *addrs.AbsResourceInstance + + ForEach hcl.Expression + + ProviderConfigRef *ProviderConfigRef + Provider addrs.Provider + + DeclRange hcl.Range + ProviderDeclRange hcl.Range +} + +func decodeImportBlock(block *hcl.Block) (*Import, hcl.Diagnostics) { + var diags hcl.Diagnostics + imp := &Import{ + DeclRange: block.DefRange, + } + + content, moreDiags := block.Body.Content(importBlockSchema) + diags = append(diags, moreDiags...) + + if attr, exists := content.Attributes["id"]; exists { + imp.ID = attr.Expr + } + + if attr, exists := content.Attributes["to"]; exists { + toExpr := attr.Expr + // Since we are manually parsing the 'to' argument, we need to specially + // handle json configs, in which case the values will be json strings + // rather than hcl + isJSON := hcljson.IsJSONExpression(attr.Expr) + + if isJSON { + convertedExpr, convertDiags := hcl2shim.ConvertJSONExpressionToHCL(toExpr) + diags = append(diags, convertDiags...) + + if diags.HasErrors() { + return imp, diags + } + + toExpr = convertedExpr + } + + imp.To = toExpr + staticAddress, addressDiags := staticImportAddress(toExpr) + diags = append(diags, addressDiags.ToHCL()...) + + // Exit early if there are issues resolving the static address part. We wouldn't be able to validate the provider in such a case + if addressDiags.HasErrors() { + return imp, diags + } + imp.StaticTo = staticAddress + + imp.ResolvedTo = resolvedImportAddress(imp.To) + } + + if attr, exists := content.Attributes["provider"]; exists { + if len(imp.StaticTo.Module) > 0 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid import provider argument", + Detail: "The provider argument can only be specified in import blocks that will generate configuration.\n\nUse the providers argument within the module block to configure providers for all resources within a module, including imported resources.", + Subject: attr.Range.Ptr(), + }) + } + + var providerDiags hcl.Diagnostics + imp.ProviderConfigRef, providerDiags = decodeProviderConfigRef(attr.Expr, "provider") + imp.ProviderDeclRange = attr.Range + diags = append(diags, providerDiags...) + } + + if attr, exists := content.Attributes["for_each"]; exists { + imp.ForEach = attr.Expr + } + + return imp, diags +} + +var importBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "provider", + }, + { + Name: "id", + Required: true, + }, + { + Name: "to", + Required: true, + }, + { + Name: "for_each", + }, + }, +} + +// absTraversalForImportToExpr returns a static traversal of an import block's "to" field. +// It is inspired by hcl.AbsTraversalForExpr and by tofu.triggersExprToTraversal +// The use-case here is different - we want to also allow for hclsyntax.IndexExpr to be allowed, +// but we don't really care about the key part of it. We just want a traversal that could be converted to an address +// of a resource, so we could determine the module + resource + provider +// +// Currently, there are 4 types of HCL epressions that support AsTraversal: +// - hclsyntax.ScopeTraversalExpr - Simply returns the Traversal. Same for our use-case here +// - hclsyntax.RelativeTraversalExpr - Calculates hcl.AbsTraversalForExpr for the Source, and adds the Traversal to it. Same here, with absTraversalForImportToExpr instead +// - hclsyntax.LiteralValueExpr - Mainly for null/false/true values. Not relevant in our use-case, as it's could not really be part of a reference (unless it is inside of an index, which is irrelevant here anyway) +// - hclsyntax.ObjectConsKeyExpr - Not relevant here +// +// In addition to these, we need to also support hclsyntax.IndexExpr. For it - we do not care about what's in the index. +// We need only know the traversal parts of it the "Collection", as the index doesn't affect which resource/module this is +func absTraversalForImportToExpr(expr hcl.Expression) (traversal hcl.Traversal, diags tfdiags.Diagnostics) { + switch e := expr.(type) { + case *hclsyntax.IndexExpr: + t, d := absTraversalForImportToExpr(e.Collection) + diags = diags.Append(d) + traversal = append(traversal, t...) + case *hclsyntax.RelativeTraversalExpr: + t, d := absTraversalForImportToExpr(e.Source) + diags = diags.Append(d) + traversal = append(traversal, t...) + traversal = append(traversal, e.Traversal...) + case *hclsyntax.ScopeTraversalExpr: + traversal = append(traversal, e.Traversal...) + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid import address expression", + Detail: "Import address must be a reference to a resource's address, and only allows for indexing with dynamic keys. For example: module.my_module[expression1].aws_s3_bucket.my_buckets[expression2] for resources inside of modules, or simply aws_s3_bucket.my_bucket for a resource in the root module", + Subject: expr.Range().Ptr(), + }) + } + return +} + +// staticImportAddress returns an addrs.ConfigResource representing the module and resource of the import target. +// If the address is of an unacceptable format, the function will return error diags +func staticImportAddress(expr hcl.Expression) (addrs.ConfigResource, tfdiags.Diagnostics) { + traversal, diags := absTraversalForImportToExpr(expr) + if diags.HasErrors() { + return addrs.ConfigResource{}, diags + } + + absResourceInstance, diags := addrs.ParseAbsResourceInstance(traversal) + return absResourceInstance.ConfigResource(), diags +} + +// resolvedImportAddress attempts to find the resource instance of the import target, if possible. +// Here, we attempt to resolve the address as though it is a static absolute traversal, if that's possible. +// This would only be possible if the `import` block's "to" field does not rely on any data that is dynamic +func resolvedImportAddress(expr hcl.Expression) *addrs.AbsResourceInstance { + var diags hcl.Diagnostics + traversal, traversalDiags := hcl.AbsTraversalForExpr(expr) + diags = append(diags, traversalDiags...) + if diags.HasErrors() { + return nil + } + + to, toDiags := addrs.ParseAbsResourceInstance(traversal) + diags = append(diags, toDiags.ToHCL()...) + if diags.HasErrors() { + return nil + } + return &to +} diff --git a/pkg/configs/import_test.go b/pkg/configs/import_test.go new file mode 100644 index 00000000000..66497804f88 --- /dev/null +++ b/pkg/configs/import_test.go @@ -0,0 +1,329 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "reflect" + "testing" + + "github.com/hashicorp/hcl/v2/hclsyntax" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hcltest" + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/zclconf/go-cty/cty" +) + +var ( + typeComparer = cmp.Comparer(cty.Type.Equals) + valueComparer = cmp.Comparer(cty.Value.RawEquals) + traversalComparer = cmp.Comparer(traversalsAreEquivalent) +) + +func TestImportBlock_decode(t *testing.T) { + blockRange := hcl.Range{ + Filename: "mock.tf", + Start: hcl.Pos{Line: 3, Column: 12, Byte: 27}, + End: hcl.Pos{Line: 3, Column: 19, Byte: 34}, + } + pos := hcl.Pos{Line: 1, Column: 1} + + fooStrExpr, hclDiags := hclsyntax.ParseExpression([]byte("\"foo\""), "", pos) + if hclDiags.HasErrors() { + t.Fatal(hclDiags) + } + barExpr, hclDiags := hclsyntax.ParseExpression([]byte("test_instance.bar"), "", pos) + if hclDiags.HasErrors() { + t.Fatal(hclDiags) + } + + barIndexExpr, hclDiags := hclsyntax.ParseExpression([]byte("test_instance.bar[\"one\"]"), "", pos) + if hclDiags.HasErrors() { + t.Fatal(hclDiags) + } + + modBarExpr, hclDiags := hclsyntax.ParseExpression([]byte("module.bar.test_instance.bar"), "", pos) + if hclDiags.HasErrors() { + t.Fatal(hclDiags) + } + + dynamicBarExpr, hclDiags := hclsyntax.ParseExpression([]byte("test_instance.bar[var.var1]"), "", pos) + if hclDiags.HasErrors() { + t.Fatal(hclDiags) + } + + invalidExpr, hclDiags := hclsyntax.ParseExpression([]byte("var.var1 ? test_instance.bar : test_instance.foo"), "", pos) + if hclDiags.HasErrors() { + t.Fatal(hclDiags) + } + + barResource := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "bar", + } + + tests := map[string]struct { + input *hcl.Block + want *Import + err string + }{ + "success": { + &hcl.Block{ + Type: "import", + Body: hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "id": { + Name: "id", + Expr: fooStrExpr, + }, + "to": { + Name: "to", + Expr: barExpr, + }, + }, + }), + DefRange: blockRange, + }, + &Import{ + To: barExpr, + ResolvedTo: &addrs.AbsResourceInstance{ + Resource: addrs.ResourceInstance{Resource: barResource}, + }, + StaticTo: addrs.ConfigResource{ + Resource: barResource, + }, + ID: fooStrExpr, + DeclRange: blockRange, + }, + ``, + }, + "indexed resources": { + &hcl.Block{ + Type: "import", + Body: hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "id": { + Name: "id", + Expr: fooStrExpr, + }, + "to": { + Name: "to", + Expr: barIndexExpr, + }, + }, + }), + DefRange: blockRange, + }, + &Import{ + To: barIndexExpr, + StaticTo: addrs.ConfigResource{ + Resource: barResource, + }, + ResolvedTo: &addrs.AbsResourceInstance{ + Resource: addrs.ResourceInstance{ + Resource: barResource, + Key: addrs.StringKey("one"), + }, + }, + ID: fooStrExpr, + DeclRange: blockRange, + }, + ``, + }, + "resource inside module": { + &hcl.Block{ + Type: "import", + Body: hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "id": { + Name: "id", + Expr: fooStrExpr, + }, + "to": { + Name: "to", + Expr: modBarExpr, + }, + }, + }), + DefRange: blockRange, + }, + &Import{ + To: modBarExpr, + StaticTo: addrs.ConfigResource{ + Module: addrs.Module{"bar"}, + Resource: barResource, + }, + ResolvedTo: &addrs.AbsResourceInstance{ + Module: addrs.ModuleInstance{addrs.ModuleInstanceStep{ + Name: "bar", + }}, + Resource: addrs.ResourceInstance{ + Resource: barResource, + }, + }, + ID: fooStrExpr, + DeclRange: blockRange, + }, + ``, + }, + "dynamic resource index": { + &hcl.Block{ + Type: "import", + Body: hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "id": { + Name: "id", + Expr: fooStrExpr, + }, + "to": { + Name: "to", + Expr: dynamicBarExpr, + }, + }, + }), + DefRange: blockRange, + }, + &Import{ + To: dynamicBarExpr, + StaticTo: addrs.ConfigResource{ + Resource: barResource, + }, + ID: fooStrExpr, + DeclRange: blockRange, + }, + ``, + }, + "error: missing id argument": { + &hcl.Block{ + Type: "import", + Body: hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "to": { + Name: "to", + Expr: barExpr, + }, + }, + }), + DefRange: blockRange, + }, + &Import{ + To: barExpr, + ResolvedTo: &addrs.AbsResourceInstance{ + Resource: addrs.ResourceInstance{Resource: barResource}, + }, + StaticTo: addrs.ConfigResource{ + Resource: barResource, + }, + DeclRange: blockRange, + }, + "Missing required argument", + }, + "error: missing to argument": { + &hcl.Block{ + Type: "import", + Body: hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "id": { + Name: "id", + Expr: fooStrExpr, + }, + }, + }), + DefRange: blockRange, + }, + &Import{ + ID: fooStrExpr, + DeclRange: blockRange, + }, + "Missing required argument", + }, + "error: invalid import address": { + &hcl.Block{ + Type: "import", + Body: hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "id": { + Name: "id", + Expr: fooStrExpr, + }, + "to": { + Name: "to", + Expr: invalidExpr, + }, + }, + }), + DefRange: blockRange, + }, + &Import{ + To: invalidExpr, + ID: fooStrExpr, + DeclRange: blockRange, + }, + "Invalid import address expression", + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + got, diags := decodeImportBlock(test.input) + + if diags.HasErrors() { + if test.err == "" { + t.Fatalf("unexpected error: %s", diags.Errs()) + } + if gotErr := diags[0].Summary; gotErr != test.err { + t.Errorf("wrong error, got %q, want %q", gotErr, test.err) + } + } else if test.err != "" { + t.Fatal("expected error") + } + + if !cmp.Equal(got, test.want, typeComparer, valueComparer, traversalComparer) { + t.Fatalf("wrong result: %s", cmp.Diff(got, test.want, typeComparer, valueComparer, traversalComparer)) + } + }) + } +} + +// Taken from traversalsAreEquivalent of hcl/v2 +func traversalsAreEquivalent(a, b hcl.Traversal) bool { + if len(a) != len(b) { + return false + } + for i := range a { + aStep := a[i] + bStep := b[i] + + if reflect.TypeOf(aStep) != reflect.TypeOf(bStep) { + return false + } + + // We can now assume that both are of the same type. + switch ts := aStep.(type) { + + case hcl.TraverseRoot: + if bStep.(hcl.TraverseRoot).Name != ts.Name { + return false + } + + case hcl.TraverseAttr: + if bStep.(hcl.TraverseAttr).Name != ts.Name { + return false + } + + case hcl.TraverseIndex: + if !bStep.(hcl.TraverseIndex).Key.RawEquals(ts.Key) { + return false + } + + default: + return false + } + } + return true +} diff --git a/pkg/configs/module.go b/pkg/configs/module.go new file mode 100644 index 00000000000..a45398e662a --- /dev/null +++ b/pkg/configs/module.go @@ -0,0 +1,861 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/encryption/config" + "github.com/kubegems/opentofu/pkg/experiments" + tfversion "github.com/kubegems/opentofu/version" +) + +// Module is a container for a set of configuration constructs that are +// evaluated within a common namespace. +type Module struct { + // SourceDir is the filesystem directory that the module was loaded from. + // + // This is populated automatically only for configurations loaded with + // LoadConfigDir. If the parser is using a virtual filesystem then the + // path here will be in terms of that virtual filesystem. + + // Any other caller that constructs a module directly with NewModule may + // assign a suitable value to this attribute before using it for other + // purposes. It should be treated as immutable by all consumers of Module + // values. + SourceDir string + + CoreVersionConstraints []VersionConstraint + + ActiveExperiments experiments.Set + + Backend *Backend + CloudConfig *CloudConfig + ProviderConfigs map[string]*Provider + ProviderRequirements *RequiredProviders + ProviderLocalNames map[addrs.Provider]string + ProviderMetas map[addrs.Provider]*ProviderMeta + Encryption *config.EncryptionConfig + + Variables map[string]*Variable + Locals map[string]*Local + Outputs map[string]*Output + + ModuleCalls map[string]*ModuleCall + + ManagedResources map[string]*Resource + DataResources map[string]*Resource + + Moved []*Moved + Import []*Import + Removed []*Removed + + Checks map[string]*Check + + Tests map[string]*TestFile + + // IsOverridden indicates if the module is being overridden. It's used in + // testing framework to not call the underlying module. + IsOverridden bool + + // StaticEvaluator is used to evaluate static expressions in the scope of the Module. + StaticEvaluator *StaticEvaluator +} + +// GetProviderConfig uses name and alias to find the respective Provider configuration. +func (m *Module) GetProviderConfig(name, alias string) (*Provider, bool) { + tp := &Provider{Name: name, Alias: alias} + p, ok := m.ProviderConfigs[tp.moduleUniqueKey()] + return p, ok +} + +// File describes the contents of a single configuration file. +// +// Individual files are not usually used alone, but rather combined together +// with other files (conventionally, those in the same directory) to produce +// a *Module, using NewModule. +// +// At the level of an individual file we represent directly the structural +// elements present in the file, without any attempt to detect conflicting +// declarations. A File object can therefore be used for some basic static +// analysis of individual elements, but must be built into a Module to detect +// duplicate declarations. +type File struct { + CoreVersionConstraints []VersionConstraint + + ActiveExperiments experiments.Set + + Backends []*Backend + CloudConfigs []*CloudConfig + ProviderConfigs []*Provider + ProviderMetas []*ProviderMeta + RequiredProviders []*RequiredProviders + Encryptions []*config.EncryptionConfig + + Variables []*Variable + Locals []*Local + Outputs []*Output + + ModuleCalls []*ModuleCall + + ManagedResources []*Resource + DataResources []*Resource + + Moved []*Moved + Import []*Import + Removed []*Removed + + Checks []*Check +} + +// SelectiveLoader allows the consumer to only load and validate the portions of files needed for the given operations/contexts +type SelectiveLoader int + +const ( + SelectiveLoadAll SelectiveLoader = 0 + SelectiveLoadBackend SelectiveLoader = 1 + SelectiveLoadEncryption SelectiveLoader = 2 +) + +// Apply the selective filter to the input files +func (s SelectiveLoader) filter(input []*File) []*File { + if s == SelectiveLoadAll { + return input + } + + out := make([]*File, len(input)) + for i, inFile := range input { + outFile := &File{ + Variables: inFile.Variables, + Locals: inFile.Locals, + } + + switch s { //nolint:exhaustive // SelectiveLoadAll handled above + case SelectiveLoadBackend: + outFile.Backends = inFile.Backends + outFile.CloudConfigs = inFile.CloudConfigs + case SelectiveLoadEncryption: + outFile.Encryptions = inFile.Encryptions + } + out[i] = outFile + } + return out +} + +// NewModuleWithTests matches NewModule except it will also load in the provided +// test files. +func NewModuleWithTests(primaryFiles, overrideFiles []*File, testFiles map[string]*TestFile, call StaticModuleCall, sourceDir string) (*Module, hcl.Diagnostics) { + mod, diags := NewModule(primaryFiles, overrideFiles, call, sourceDir, SelectiveLoadAll) + if mod != nil { + mod.Tests = testFiles + } + return mod, diags +} + +// NewModule takes a list of primary files and a list of override files and +// produces a *Module by combining the files together. +// +// If there are any conflicting declarations in the given files -- for example, +// if the same variable name is defined twice -- then the resulting module +// will be incomplete and error diagnostics will be returned. Careful static +// analysis of the returned Module is still possible in this case, but the +// module will probably not be semantically valid. +func NewModule(primaryFiles, overrideFiles []*File, call StaticModuleCall, sourceDir string, load SelectiveLoader) (*Module, hcl.Diagnostics) { + var diags hcl.Diagnostics + mod := &Module{ + ProviderConfigs: map[string]*Provider{}, + ProviderLocalNames: map[addrs.Provider]string{}, + Variables: map[string]*Variable{}, + Locals: map[string]*Local{}, + Outputs: map[string]*Output{}, + ModuleCalls: map[string]*ModuleCall{}, + ManagedResources: map[string]*Resource{}, + DataResources: map[string]*Resource{}, + Checks: map[string]*Check{}, + ProviderMetas: map[addrs.Provider]*ProviderMeta{}, + Tests: map[string]*TestFile{}, + SourceDir: sourceDir, + } + + // Apply selective load rules + primaryFiles = load.filter(primaryFiles) + overrideFiles = load.filter(overrideFiles) + + // Process the required_providers blocks first, to ensure that all + // resources have access to the correct provider FQNs + for _, file := range primaryFiles { + for _, r := range file.RequiredProviders { + if mod.ProviderRequirements != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate required providers configuration", + Detail: fmt.Sprintf("A module may have only one required providers configuration. The required providers were previously configured at %s.", mod.ProviderRequirements.DeclRange), + Subject: &r.DeclRange, + }) + continue + } + mod.ProviderRequirements = r + } + } + + // If no required_providers block is configured, create a useful empty + // state to reduce nil checks elsewhere + if mod.ProviderRequirements == nil { + mod.ProviderRequirements = &RequiredProviders{ + RequiredProviders: make(map[string]*RequiredProvider), + } + } + + // Any required_providers blocks in override files replace the entire + // block for each provider + for _, file := range overrideFiles { + for _, override := range file.RequiredProviders { + for name, rp := range override.RequiredProviders { + mod.ProviderRequirements.RequiredProviders[name] = rp + } + } + } + + for _, file := range primaryFiles { + fileDiags := mod.appendFile(file) + diags = append(diags, fileDiags...) + } + + for _, file := range overrideFiles { + fileDiags := mod.mergeFile(file) + diags = append(diags, fileDiags...) + } + + // Static evaluation to build a StaticContext now that module has all relevant Locals / Variables + mod.StaticEvaluator = NewStaticEvaluator(mod, call) + + // If we have a backend, it may have fields that require locals/vars + if mod.Backend != nil { + // We don't know the backend type / loader at this point so we save the context for later use + mod.Backend.Eval = mod.StaticEvaluator + } + if mod.CloudConfig != nil { + mod.CloudConfig.eval = mod.StaticEvaluator + } + + // Process all module calls now that we have the static context + for _, mc := range mod.ModuleCalls { + mDiags := mc.decodeStaticFields(mod.StaticEvaluator) + diags = append(diags, mDiags...) + } + + diags = append(diags, checkModuleExperiments(mod)...) + + // Generate the FQN -> LocalProviderName map + mod.gatherProviderLocalNames() + + return mod, diags +} + +// ResourceByAddr returns the configuration for the resource with the given +// address, or nil if there is no such resource. +func (m *Module) ResourceByAddr(addr addrs.Resource) *Resource { + key := addr.String() + switch addr.Mode { + case addrs.ManagedResourceMode: + return m.ManagedResources[key] + case addrs.DataResourceMode: + return m.DataResources[key] + default: + return nil + } +} + +func (m *Module) appendFile(file *File) hcl.Diagnostics { + var diags hcl.Diagnostics + + // If there are any conflicting requirements then we'll catch them + // when we actually check these constraints. + m.CoreVersionConstraints = append(m.CoreVersionConstraints, file.CoreVersionConstraints...) + + m.ActiveExperiments = experiments.SetUnion(m.ActiveExperiments, file.ActiveExperiments) + + for _, b := range file.Backends { + if m.Backend != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate backend configuration", + Detail: fmt.Sprintf("A module may have only one backend configuration. The backend was previously configured at %s.", m.Backend.DeclRange), + Subject: &b.DeclRange, + }) + continue + } + m.Backend = b + } + + for _, c := range file.CloudConfigs { + if m.CloudConfig != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate cloud configurations", + Detail: fmt.Sprintf("A module may have only one 'cloud' block configuring a cloud backend. A cloud backend was previously configured at %s.", m.CloudConfig.DeclRange), + Subject: &c.DeclRange, + }) + continue + } + + m.CloudConfig = c + } + + if m.Backend != nil && m.CloudConfig != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Both a backend and a cloud configuration are present", + Detail: fmt.Sprintf("A module may declare either one 'cloud' block configuring a cloud backend OR one 'backend' block configuring a state backend. A cloud backend is configured at %s; a backend is configured at %s. Remove the backend block to configure a cloud backend.", m.CloudConfig.DeclRange, m.Backend.DeclRange), + Subject: &m.Backend.DeclRange, + }) + } + + for _, pc := range file.ProviderConfigs { + key := pc.moduleUniqueKey() + if existing, exists := m.ProviderConfigs[key]; exists { + if existing.Alias == "" { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate provider configuration", + Detail: fmt.Sprintf("A default (non-aliased) provider configuration for %q was already given at %s. If multiple configurations are required, set the \"alias\" argument for alternative configurations.", existing.Name, existing.DeclRange), + Subject: &pc.DeclRange, + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate provider configuration", + Detail: fmt.Sprintf("A provider configuration for %q with alias %q was already given at %s. Each configuration for the same provider must have a distinct alias.", existing.Name, existing.Alias, existing.DeclRange), + Subject: &pc.DeclRange, + }) + } + continue + } + m.ProviderConfigs[key] = pc + } + + for _, pm := range file.ProviderMetas { + provider := m.ProviderForLocalConfig(addrs.LocalProviderConfig{LocalName: pm.Provider}) + if existing, exists := m.ProviderMetas[provider]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate provider_meta block", + Detail: fmt.Sprintf("A provider_meta block for provider %q was already declared at %s. Providers may only have one provider_meta block per module.", existing.Provider, existing.DeclRange), + Subject: &pm.DeclRange, + }) + } + m.ProviderMetas[provider] = pm + } + + for _, e := range file.Encryptions { + if m.Encryption != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate encryption configuration", + Detail: fmt.Sprintf("A module may have only one encryption configuration. Encryption was previously configured at %s.", m.Encryption.DeclRange), + Subject: &e.DeclRange, + }) + continue + } + m.Encryption = e + } + + for _, v := range file.Variables { + if existing, exists := m.Variables[v.Name]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate variable declaration", + Detail: fmt.Sprintf("A variable named %q was already declared at %s. Variable names must be unique within a module.", existing.Name, existing.DeclRange), + Subject: &v.DeclRange, + }) + } + m.Variables[v.Name] = v + } + + for _, l := range file.Locals { + if existing, exists := m.Locals[l.Name]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate local value definition", + Detail: fmt.Sprintf("A local value named %q was already defined at %s. Local value names must be unique within a module.", existing.Name, existing.DeclRange), + Subject: &l.DeclRange, + }) + } + m.Locals[l.Name] = l + } + + for _, o := range file.Outputs { + if existing, exists := m.Outputs[o.Name]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate output definition", + Detail: fmt.Sprintf("An output named %q was already defined at %s. Output names must be unique within a module.", existing.Name, existing.DeclRange), + Subject: &o.DeclRange, + }) + } + m.Outputs[o.Name] = o + } + + for _, mc := range file.ModuleCalls { + if existing, exists := m.ModuleCalls[mc.Name]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate module call", + Detail: fmt.Sprintf("A module call named %q was already defined at %s. Module calls must have unique names within a module.", existing.Name, existing.DeclRange), + Subject: &mc.DeclRange, + }) + } + m.ModuleCalls[mc.Name] = mc + } + + for _, r := range file.ManagedResources { + key := r.moduleUniqueKey() + if existing, exists := m.ManagedResources[key]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Duplicate resource %q configuration", existing.Type), + Detail: fmt.Sprintf("A %s resource named %q was already declared at %s. Resource names must be unique per type in each module.", existing.Type, existing.Name, existing.DeclRange), + Subject: &r.DeclRange, + }) + continue + } + m.ManagedResources[key] = r + + // set the provider FQN for the resource + if r.ProviderConfigRef != nil { + r.Provider = m.ProviderForLocalConfig(r.ProviderConfigAddr()) + } else { + // an invalid resource name (for e.g. "null resource" instead of + // "null_resource") can cause a panic down the line in addrs: + // https://github.com/hashicorp/terraform/issues/25560 + implied, err := addrs.ParseProviderPart(r.Addr().ImpliedProvider()) + if err == nil { + r.Provider = m.ImpliedProviderForUnqualifiedType(implied) + } + // We don't return a diagnostic because the invalid resource name + // will already have been caught. + } + } + + // Data sources can either be defined at the module root level, or within a + // single check block. We'll merge the data sources from both into the + // single module level DataResources map. + for _, r := range file.DataResources { + key := r.moduleUniqueKey() + if existing, exists := m.DataResources[key]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Duplicate data %q configuration", existing.Type), + Detail: fmt.Sprintf("A %s data resource named %q was already declared at %s. Resource names must be unique per type in each module.", existing.Type, existing.Name, existing.DeclRange), + Subject: &r.DeclRange, + }) + continue + } + m.DataResources[key] = r + } + + for _, c := range file.Checks { + if c.DataResource != nil { + key := c.DataResource.moduleUniqueKey() + if existing, exists := m.DataResources[key]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Duplicate data %q configuration", existing.Type), + Detail: fmt.Sprintf("A %s data resource named %q was already declared at %s. Resource names must be unique per type in each module, including within check blocks.", existing.Type, existing.Name, existing.DeclRange), + Subject: &c.DataResource.DeclRange, + }) + continue + } + m.DataResources[key] = c.DataResource + } + + if existing, exists := m.Checks[c.Name]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Duplicate check %q configuration", existing.Name), + Detail: fmt.Sprintf("A check block named %q was already declared at %s. Check blocks must be unique within each module.", existing.Name, existing.DeclRange), + Subject: &c.DeclRange, + }) + continue + } + m.Checks[c.Name] = c + } + + // Handle the provider associations for all data resources together. + for _, r := range m.DataResources { + // set the provider FQN for the resource + if r.ProviderConfigRef != nil { + r.Provider = m.ProviderForLocalConfig(r.ProviderConfigAddr()) + } else { + // an invalid data source name (for e.g. "null resource" instead of + // "null_resource") can cause a panic down the line in addrs: + // https://github.com/hashicorp/terraform/issues/25560 + implied, err := addrs.ParseProviderPart(r.Addr().ImpliedProvider()) + if err == nil { + r.Provider = m.ImpliedProviderForUnqualifiedType(implied) + } + // We don't return a diagnostic because the invalid resource name + // will already have been caught. + } + } + + // "Moved" blocks just append, because they are all independent of one + // another at this level. (We handle any references between them at + // runtime.) + m.Moved = append(m.Moved, file.Moved...) + + for _, i := range file.Import { + for _, mi := range m.Import { + if i.ResolvedTo != nil && mi.ResolvedTo != nil && (*i.ResolvedTo).Equal(*mi.ResolvedTo) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Duplicate import configuration for %q", *i.ResolvedTo), + Detail: fmt.Sprintf("An import block for the resource %q was already declared at %s. A resource can have only one import block.", *i.ResolvedTo, mi.DeclRange), + Subject: &i.DeclRange, + }) + continue + } + } + + if i.ProviderConfigRef != nil { + i.Provider = m.ProviderForLocalConfig(addrs.LocalProviderConfig{ + LocalName: i.ProviderConfigRef.Name, + Alias: i.ProviderConfigRef.Alias, + }) + } else { + implied, err := addrs.ParseProviderPart(i.StaticTo.Resource.ImpliedProvider()) + if err == nil { + i.Provider = m.ImpliedProviderForUnqualifiedType(implied) + } + // We don't return a diagnostic because the invalid resource name + // will already have been caught. + } + + m.Import = append(m.Import, i) + } + + m.Removed = append(m.Removed, file.Removed...) + + return diags +} + +func (m *Module) mergeFile(file *File) hcl.Diagnostics { + var diags hcl.Diagnostics + + if len(file.CoreVersionConstraints) != 0 { + // This is a bit of a strange case for overriding since we normally + // would union together across multiple files anyway, but we'll + // allow it and have each override file clobber any existing list. + m.CoreVersionConstraints = nil + m.CoreVersionConstraints = append(m.CoreVersionConstraints, file.CoreVersionConstraints...) + } + + if len(file.Backends) != 0 { + switch len(file.Backends) { + case 1: + m.CloudConfig = nil // A backend block is mutually exclusive with a cloud one, and overwrites any cloud config + m.Backend = file.Backends[0] + default: + // An override file with multiple backends is still invalid, even + // though it can override backends from _other_ files. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate backend configuration", + Detail: fmt.Sprintf("Each override file may have only one backend configuration. A backend was previously configured at %s.", file.Backends[0].DeclRange), + Subject: &file.Backends[1].DeclRange, + }) + } + } + + if len(file.CloudConfigs) != 0 { + switch len(file.CloudConfigs) { + case 1: + m.Backend = nil // A cloud block is mutually exclusive with a backend one, and overwrites any backend + m.CloudConfig = file.CloudConfigs[0] + default: + // An override file with multiple cloud blocks is still invalid, even + // though it can override cloud/backend blocks from _other_ files. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate cloud configurations", + Detail: fmt.Sprintf("A module may have only one 'cloud' block configuring a cloud backend. A cloud backend was previously configured at %s.", file.CloudConfigs[0].DeclRange), + Subject: &file.CloudConfigs[1].DeclRange, + }) + } + } + + for _, pc := range file.ProviderConfigs { + key := pc.moduleUniqueKey() + existing, exists := m.ProviderConfigs[key] + if pc.Alias == "" { + // We allow overriding a non-existing _default_ provider configuration + // because the user model is that an absent provider configuration + // implies an empty provider configuration, which is what the user + // is therefore overriding here. + if exists { + mergeDiags := existing.merge(pc) + diags = append(diags, mergeDiags...) + } else { + m.ProviderConfigs[key] = pc + } + } else { + // For aliased providers, there must be a base configuration to + // override. This allows us to detect and report alias typos + // that might otherwise cause the override to not apply. + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing base provider configuration for override", + Detail: fmt.Sprintf("There is no %s provider configuration with the alias %q. An override file can only override an aliased provider configuration that was already defined in a primary configuration file.", pc.Name, pc.Alias), + Subject: &pc.DeclRange, + }) + continue + } + mergeDiags := existing.merge(pc) + diags = append(diags, mergeDiags...) + } + } + + if len(file.Encryptions) != 0 { + switch len(file.Encryptions) { + case 1: + m.Encryption = m.Encryption.Merge(file.Encryptions[0]) + default: + // An override file with multiple encryptions is still invalid, even + // though it can override encryptions from _other_ files. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate encryption configuration", + Detail: fmt.Sprintf("Each override file may have only one encryption configuration. Encryption was previously configured at %s.", file.Encryptions[0].DeclRange), + Subject: &file.Encryptions[1].DeclRange, + }) + } + } + + for _, v := range file.Variables { + existing, exists := m.Variables[v.Name] + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing base variable declaration to override", + Detail: fmt.Sprintf("There is no variable named %q. An override file can only override a variable that was already declared in a primary configuration file.", v.Name), + Subject: &v.DeclRange, + }) + continue + } + mergeDiags := existing.merge(v) + diags = append(diags, mergeDiags...) + } + + for _, l := range file.Locals { + existing, exists := m.Locals[l.Name] + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing base local value definition to override", + Detail: fmt.Sprintf("There is no local value named %q. An override file can only override a local value that was already defined in a primary configuration file.", l.Name), + Subject: &l.DeclRange, + }) + continue + } + mergeDiags := existing.merge(l) + diags = append(diags, mergeDiags...) + } + + for _, o := range file.Outputs { + existing, exists := m.Outputs[o.Name] + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing base output definition to override", + Detail: fmt.Sprintf("There is no output named %q. An override file can only override an output that was already defined in a primary configuration file.", o.Name), + Subject: &o.DeclRange, + }) + continue + } + mergeDiags := existing.merge(o) + diags = append(diags, mergeDiags...) + } + + for _, mc := range file.ModuleCalls { + existing, exists := m.ModuleCalls[mc.Name] + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing module call to override", + Detail: fmt.Sprintf("There is no module call named %q. An override file can only override a module call that was defined in a primary configuration file.", mc.Name), + Subject: &mc.DeclRange, + }) + continue + } + mergeDiags := existing.merge(mc) + diags = append(diags, mergeDiags...) + } + + for _, r := range file.ManagedResources { + key := r.moduleUniqueKey() + existing, exists := m.ManagedResources[key] + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing resource to override", + Detail: fmt.Sprintf("There is no %s resource named %q. An override file can only override a resource block defined in a primary configuration file.", r.Type, r.Name), + Subject: &r.DeclRange, + }) + continue + } + mergeDiags := existing.merge(r, m.ProviderRequirements.RequiredProviders) + diags = append(diags, mergeDiags...) + } + + for _, r := range file.DataResources { + key := r.moduleUniqueKey() + existing, exists := m.DataResources[key] + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing data resource to override", + Detail: fmt.Sprintf("There is no %s data resource named %q. An override file can only override a data block defined in a primary configuration file.", r.Type, r.Name), + Subject: &r.DeclRange, + }) + continue + } + mergeDiags := existing.merge(r, m.ProviderRequirements.RequiredProviders) + diags = append(diags, mergeDiags...) + } + + for _, m := range file.Moved { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Cannot override 'moved' blocks", + Detail: "Records of moved objects can appear only in normal files, not in override files.", + Subject: m.DeclRange.Ptr(), + }) + } + + for _, m := range file.Import { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Cannot override 'import' blocks", + Detail: "Import blocks can appear only in normal files, not in override files.", + Subject: m.DeclRange.Ptr(), + }) + } + + for _, m := range file.Removed { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Cannot override 'Removed' blocks", + Detail: "Removed blocks can appear only in normal files, not in override files.", + Subject: m.DeclRange.Ptr(), + }) + } + + return diags +} + +// gatherProviderLocalNames is a helper function that populatesA a map of +// provider FQNs -> provider local names. This information is useful for +// user-facing output, which should include both the FQN and LocalName. It must +// only be populated after the module has been parsed. +func (m *Module) gatherProviderLocalNames() { + providers := make(map[addrs.Provider]string) + for k, v := range m.ProviderRequirements.RequiredProviders { + providers[v.Type] = k + } + m.ProviderLocalNames = providers +} + +// LocalNameForProvider returns the module-specific user-supplied local name for +// a given provider FQN, or the default local name if none was supplied. +func (m *Module) LocalNameForProvider(p addrs.Provider) string { + if existing, exists := m.ProviderLocalNames[p]; exists { + return existing + } else { + // If there isn't a map entry, fall back to the default: + // Type = LocalName + return p.Type + } +} + +// ProviderForLocalConfig returns the provider FQN for a given +// LocalProviderConfig, based on its local name. +func (m *Module) ProviderForLocalConfig(pc addrs.LocalProviderConfig) addrs.Provider { + return m.ImpliedProviderForUnqualifiedType(pc.LocalName) +} + +// ImpliedProviderForUnqualifiedType returns the provider FQN for a given type, +// first by looking up the type in the provider requirements map, and falling +// back to an implied default provider. +// +// The intended behaviour is that configuring a provider with local name "foo" +// in a required_providers block will result in resources with type "foo" using +// that provider. +func (m *Module) ImpliedProviderForUnqualifiedType(pType string) addrs.Provider { + if provider, exists := m.ProviderRequirements.RequiredProviders[pType]; exists { + return provider.Type + } + return addrs.ImpliedProviderForUnqualifiedType(pType) +} + +func (m *Module) CheckCoreVersionRequirements(path addrs.Module, sourceAddr addrs.ModuleSource) hcl.Diagnostics { + var diags hcl.Diagnostics + + for _, constraint := range m.CoreVersionConstraints { + // Before checking if the constraints are met, check that we are not using any prerelease fields as these + // are not currently supported. + var prereleaseDiags hcl.Diagnostics + for _, required := range constraint.Required { + if required.Prerelease() { + prereleaseDiags = prereleaseDiags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid required_version constraint", + Detail: fmt.Sprintf( + "Prerelease version constraints are not supported: %s. Remove the prerelease information from the constraint. Prerelease versions of OpenTofu will match constraints using their version core only.", + required.String()), + Subject: constraint.DeclRange.Ptr(), + }) + } + } + + if len(prereleaseDiags) > 0 { + // There were some prerelease fields in the constraints. Don't check the constraints as they will + // fail, and populate the diagnostics for these constraints with the prerelease diagnostics. + diags = diags.Extend(prereleaseDiags) + continue + } + + if !constraint.Required.Check(tfversion.SemVer) { + switch { + case len(path) == 0: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported OpenTofu Core version", + Detail: fmt.Sprintf( + "This configuration does not support OpenTofu version %s. To proceed, either choose another supported OpenTofu version or update this version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.", + tfversion.String(), + ), + Subject: constraint.DeclRange.Ptr(), + }) + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported OpenTofu Core version", + Detail: fmt.Sprintf( + "Module %s (from %s) does not support OpenTofu version %s. To proceed, either choose another supported OpenTofu version or update this version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.", + path, sourceAddr, tfversion.String(), + ), + Subject: constraint.DeclRange.Ptr(), + }) + } + } + } + + return diags +} diff --git a/pkg/configs/module_call.go b/pkg/configs/module_call.go new file mode 100644 index 00000000000..ecfc9cb139a --- /dev/null +++ b/pkg/configs/module_call.go @@ -0,0 +1,386 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "errors" + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/getmodules" +) + +// ModuleCall represents a "module" block in a module or file. +type ModuleCall struct { + Name string + + Source hcl.Expression + SourceAddrRaw string + SourceAddr addrs.ModuleSource + SourceSet bool + + // Used when building the corresponding StaticModuleCall + Variables StaticModuleVariables + Workspace string + + Config hcl.Body + + VersionAttr *hcl.Attribute + Version VersionConstraint + + Count hcl.Expression + ForEach hcl.Expression + + Providers []PassedProviderConfig + + DependsOn []hcl.Traversal + + DeclRange hcl.Range +} + +func decodeModuleBlock(block *hcl.Block, override bool) (*ModuleCall, hcl.Diagnostics) { + var diags hcl.Diagnostics + + mc := &ModuleCall{ + DeclRange: block.DefRange, + Name: block.Labels[0], + } + + schema := moduleBlockSchema + if override { + schema = schemaForOverrides(schema) + } + + content, remain, moreDiags := block.Body.PartialContent(schema) + diags = append(diags, moreDiags...) + mc.Config = remain + + if !hclsyntax.ValidIdentifier(mc.Name) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid module instance name", + Detail: badIdentifierDetail, + Subject: &block.LabelRanges[0], + }) + } + + if attr, exists := content.Attributes["version"]; exists { + mc.VersionAttr = attr + } + + if attr, exists := content.Attributes["source"]; exists { + mc.SourceSet = true + mc.Source = attr.Expr + } + + if attr, exists := content.Attributes["count"]; exists { + mc.Count = attr.Expr + } + + if attr, exists := content.Attributes["for_each"]; exists { + if mc.Count != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid combination of "count" and "for_each"`, + Detail: `The "count" and "for_each" meta-arguments are mutually-exclusive, only one should be used to be explicit about the number of resources to be created.`, + Subject: &attr.NameRange, + }) + } + + mc.ForEach = attr.Expr + } + + if attr, exists := content.Attributes["depends_on"]; exists { + deps, depsDiags := decodeDependsOn(attr) + diags = append(diags, depsDiags...) + mc.DependsOn = append(mc.DependsOn, deps...) + } + + if attr, exists := content.Attributes["providers"]; exists { + providers, providerDiags := decodePassedProviderConfigs(attr) + diags = append(diags, providerDiags...) + mc.Providers = append(mc.Providers, providers...) + } + + var seenEscapeBlock *hcl.Block + for _, block := range content.Blocks { + switch block.Type { + case "_": + if seenEscapeBlock != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate escaping block", + Detail: fmt.Sprintf( + "The special block type \"_\" can be used to force particular arguments to be interpreted as module input variables rather than as meta-arguments, but each module block can have only one such block. The first escaping block was at %s.", + seenEscapeBlock.DefRange, + ), + Subject: &block.DefRange, + }) + continue + } + seenEscapeBlock = block + + // When there's an escaping block its content merges with the + // existing config we extracted earlier, so later decoding + // will see a blend of both. + mc.Config = hcl.MergeBodies([]hcl.Body{mc.Config, block.Body}) + + default: + // All of the other block types in our schema are reserved. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved block type name in module block", + Detail: fmt.Sprintf("The block type name %q is reserved for use by OpenTofu in a future version.", block.Type), + Subject: &block.TypeRange, + }) + } + } + + return mc, diags +} + +func (mc *ModuleCall) decodeStaticFields(eval *StaticEvaluator) hcl.Diagnostics { + mc.Workspace = eval.call.workspace + mc.decodeStaticVariables(eval) + + var diags hcl.Diagnostics + diags = diags.Extend(mc.decodeStaticSource(eval)) + diags = diags.Extend(mc.decodeStaticVersion(eval)) + return diags +} + +func (mc *ModuleCall) decodeStaticSource(eval *StaticEvaluator) hcl.Diagnostics { + if mc.Source == nil { + // This is an invalid module. We already have error handling that has more context and can produce better errors in this + // scenario. Follow the trail of mc.SourceAddr -> req.SourceAddr through the command package. + return nil + } + + // Decode source field + diags := eval.DecodeExpression(mc.Source, StaticIdentifier{Module: eval.call.addr, Subject: fmt.Sprintf("module.%s.source", mc.Name), DeclRange: mc.Source.Range()}, &mc.SourceAddrRaw) + //nolint:nestif // Keeping this similar to the original decode logic for easy review + if !diags.HasErrors() { + // NOTE: This code was originally executed as part of decodeModuleBlock and is now deferred until we have the config merged and static context built + var err error + if mc.VersionAttr != nil { + mc.SourceAddr, err = addrs.ParseModuleSourceRegistry(mc.SourceAddrRaw) + } else { + mc.SourceAddr, err = addrs.ParseModuleSource(mc.SourceAddrRaw) + } + if err != nil { + // NOTE: We leave SourceAddr as nil for any situation where the + // source attribute is invalid, so any code which tries to carefully + // use the partial result of a failed config decode must be + // resilient to that. + mc.SourceAddr = nil + + // NOTE: In practice it's actually very unlikely to end up here, + // because our source address parser can turn just about any string + // into some sort of remote package address, and so for most errors + // we'll detect them only during module installation. There are + // still a _few_ purely-syntax errors we can catch at parsing time, + // though, mostly related to remote package sub-paths and local + // paths. + var pathErr *getmodules.MaybeRelativePathErr + if errors.As(err, &pathErr) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid module source address", + Detail: fmt.Sprintf( + "OpenTofu failed to determine your intended installation method for remote module package %q.\n\nIf you intended this as a path relative to the current module, use \"./%s\" instead. The \"./\" prefix indicates that the address is a relative filesystem path.", + pathErr.Addr, pathErr.Addr, + ), + Subject: mc.Source.Range().Ptr(), + }) + } else { + if mc.VersionAttr != nil { + // In this case we'll include some extra context that + // we assumed a registry source address due to the + // version argument. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid registry module source address", + Detail: fmt.Sprintf("Failed to parse module registry address: %s.\n\nOpenTofu assumed that you intended a module registry source address because you also set the argument \"version\", which applies only to registry modules.", err), + Subject: mc.Source.Range().Ptr(), + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid module source address", + Detail: fmt.Sprintf("Failed to parse module source address: %s.", err), + Subject: mc.Source.Range().Ptr(), + }) + } + } + } + } + + return diags +} + +func (mc *ModuleCall) decodeStaticVersion(eval *StaticEvaluator) hcl.Diagnostics { + var diags hcl.Diagnostics + + if mc.VersionAttr == nil { + return diags + } + + val, valDiags := eval.Evaluate(mc.VersionAttr.Expr, StaticIdentifier{ + Module: eval.call.addr, + Subject: fmt.Sprintf("module.%s.version", mc.Name), + DeclRange: mc.VersionAttr.Range, + }) + diags = diags.Extend(valDiags) + if diags.HasErrors() { + return diags + } + + var verDiags hcl.Diagnostics + mc.Version, verDiags = decodeVersionConstraintValue(mc.VersionAttr, val) + return diags.Extend(verDiags) +} + +func (mc *ModuleCall) decodeStaticVariables(eval *StaticEvaluator) { + attr, _ := mc.Config.JustAttributes() + + mc.Variables = func(variable *Variable) (cty.Value, hcl.Diagnostics) { + v, ok := attr[variable.Name] + if !ok { + if variable.Required() { + return cty.NilVal, hcl.Diagnostics{&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing required variable in module call", + Subject: mc.Config.MissingItemRange().Ptr(), + }} + } + return variable.Default, nil + } + + ident := StaticIdentifier{ + Module: eval.call.addr.Child(mc.Name), + Subject: fmt.Sprintf("var.%s", variable.Name), + DeclRange: v.Range, + } + return eval.Evaluate(v.Expr, ident) + } +} + +// EntersNewPackage returns true if this call is to an external module, either +// directly via a remote source address or indirectly via a registry source +// address. +// +// Other behaviors in OpenTofu may treat package crossings as a special +// situation, because that indicates that the caller and callee can change +// independently of one another and thus we should disallow using any features +// where the caller assumes anything about the callee other than its input +// variables, required provider configurations, and output values. +func (mc *ModuleCall) EntersNewPackage() bool { + return moduleSourceAddrEntersNewPackage(mc.SourceAddr) +} + +// PassedProviderConfig represents a provider config explicitly passed down to +// a child module, possibly giving it a new local address in the process. +type PassedProviderConfig struct { + InChild *ProviderConfigRef + InParent *ProviderConfigRef +} + +func decodePassedProviderConfigs(attr *hcl.Attribute) ([]PassedProviderConfig, hcl.Diagnostics) { + var diags hcl.Diagnostics + var providers []PassedProviderConfig + + seen := make(map[string]hcl.Range) + pairs, pDiags := hcl.ExprMap(attr.Expr) + diags = append(diags, pDiags...) + for _, pair := range pairs { + key, keyDiags := decodeProviderConfigRef(pair.Key, "providers") + diags = append(diags, keyDiags...) + value, valueDiags := decodeProviderConfigRef(pair.Value, "providers") + diags = append(diags, valueDiags...) + if keyDiags.HasErrors() || valueDiags.HasErrors() { + continue + } + + matchKey := key.String() + if prev, exists := seen[matchKey]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate provider address", + Detail: fmt.Sprintf("A provider configuration was already passed to %s at %s. Each child provider configuration can be assigned only once.", matchKey, prev), + Subject: pair.Value.Range().Ptr(), + }) + continue + } + + rng := hcl.RangeBetween(pair.Key.Range(), pair.Value.Range()) + seen[matchKey] = rng + providers = append(providers, PassedProviderConfig{ + InChild: key, + InParent: value, + }) + } + return providers, diags +} + +var moduleBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "source", + Required: true, + }, + { + Name: "version", + }, + { + Name: "count", + }, + { + Name: "for_each", + }, + { + Name: "depends_on", + }, + { + Name: "providers", + }, + }, + Blocks: []hcl.BlockHeaderSchema{ + {Type: "_"}, // meta-argument escaping block + + // These are all reserved for future use. + {Type: "lifecycle"}, + {Type: "locals"}, + {Type: "provider", LabelNames: []string{"type"}}, + }, +} + +func moduleSourceAddrEntersNewPackage(addr addrs.ModuleSource) bool { + switch addr.(type) { + case nil: + // There are only two situations where we should get here: + // - We've been asked about the source address of the root module, + // which is always nil. + // - We've been asked about a ModuleCall that is part of the partial + // result of a failed decode. + // The root module exists outside of all module packages, so we'll + // just return false for that case. For the error case it doesn't + // really matter what we return as long as we don't panic, because + // we only make a best-effort to allow careful inspection of objects + // representing invalid configuration. + return false + case addrs.ModuleSourceLocal: + // Local source addresses are the only address type that remains within + // the same package. + return false + default: + // All other address types enter a new package. + return true + } +} diff --git a/pkg/configs/module_call_test.go b/pkg/configs/module_call_test.go new file mode 100644 index 00000000000..cfdd6b3e686 --- /dev/null +++ b/pkg/configs/module_call_test.go @@ -0,0 +1,197 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "os" + "runtime" + "testing" + + "github.com/go-test/deep" + "github.com/hashicorp/hcl/v2" + "github.com/kubegems/opentofu/pkg/addrs" +) + +func TestLoadModuleCall(t *testing.T) { + src, err := os.ReadFile("testdata/invalid-files/module-calls.tf") + if err != nil { + t.Fatal(err) + } + + parser := testParser(map[string]string{ + "module-calls.tf": string(src), + }) + + file, diags := parser.LoadConfigFile("module-calls.tf") + assertExactDiagnostics(t, diags, []string{ + `module-calls.tf:20,3-11: Invalid combination of "count" and "for_each"; The "count" and "for_each" meta-arguments are mutually-exclusive, only one should be used to be explicit about the number of resources to be created.`, + }) + + gotModules := file.ModuleCalls + wantModules := []*ModuleCall{ + { + Name: "foo", + SourceAddr: addrs.ModuleSourceLocal("./foo"), + SourceAddrRaw: "./foo", + SourceSet: true, + DeclRange: hcl.Range{ + Filename: "module-calls.tf", + Start: hcl.Pos{Line: 2, Column: 1, Byte: 1}, + End: hcl.Pos{Line: 2, Column: 13, Byte: 13}, + }, + }, + { + Name: "bar", + SourceAddr: addrs.ModuleSourceRegistry{ + Package: addrs.ModuleRegistryPackage{ + Host: addrs.DefaultModuleRegistryHost, + Namespace: "hashicorp", + Name: "bar", + TargetSystem: "aws", + }, + }, + SourceAddrRaw: "hashicorp/bar/aws", + SourceSet: true, + DeclRange: hcl.Range{ + Filename: "module-calls.tf", + Start: hcl.Pos{Line: 7, Column: 1, Byte: 87}, + End: hcl.Pos{Line: 7, Column: 13, Byte: 99}, + }, + }, + { + Name: "baz", + SourceAddr: addrs.ModuleSourceRemote{ + Package: addrs.ModulePackage("git::https://example.com/"), + }, + SourceAddrRaw: "git::https://example.com/", + SourceSet: true, + DependsOn: []hcl.Traversal{ + { + hcl.TraverseRoot{ + Name: "module", + SrcRange: hcl.Range{ + Filename: "module-calls.tf", + Start: hcl.Pos{Line: 23, Column: 5, Byte: 295}, + End: hcl.Pos{Line: 23, Column: 11, Byte: 301}, + }, + }, + hcl.TraverseAttr{ + Name: "bar", + SrcRange: hcl.Range{ + Filename: "module-calls.tf", + Start: hcl.Pos{Line: 23, Column: 11, Byte: 301}, + End: hcl.Pos{Line: 23, Column: 15, Byte: 305}, + }, + }, + }, + }, + Providers: []PassedProviderConfig{ + { + InChild: &ProviderConfigRef{ + Name: "aws", + NameRange: hcl.Range{ + Filename: "module-calls.tf", + Start: hcl.Pos{Line: 27, Column: 5, Byte: 332}, + End: hcl.Pos{Line: 27, Column: 8, Byte: 335}, + }, + }, + InParent: &ProviderConfigRef{ + Name: "aws", + NameRange: hcl.Range{ + Filename: "module-calls.tf", + Start: hcl.Pos{Line: 27, Column: 11, Byte: 338}, + End: hcl.Pos{Line: 27, Column: 14, Byte: 341}, + }, + Alias: "foo", + AliasRange: &hcl.Range{ + Filename: "module-calls.tf", + Start: hcl.Pos{Line: 27, Column: 14, Byte: 341}, + End: hcl.Pos{Line: 27, Column: 18, Byte: 345}, + }, + }, + }, + }, + DeclRange: hcl.Range{ + Filename: "module-calls.tf", + Start: hcl.Pos{Line: 14, Column: 1, Byte: 167}, + End: hcl.Pos{Line: 14, Column: 13, Byte: 179}, + }, + }, + } + + // We'll hide all of the bodies/exprs since we're treating them as opaque + // here anyway... the point of this test is to ensure we handle everything + // else properly. + for _, m := range gotModules { + // This is a structural issue which existed before static evaluation, but has been made worse by it + // See https://github.com/kubegems/opentofu/issues/1467 for more details + eval := NewStaticEvaluator(nil, RootModuleCallForTesting()) + diags := m.decodeStaticFields(eval) + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + m.Source = nil + + m.Config = nil + m.Count = nil + m.ForEach = nil + } + + for _, problem := range deep.Equal(gotModules, wantModules) { + t.Error(problem) + } +} + +func TestModuleSourceAddrEntersNewPackage(t *testing.T) { + absolutePath := "/absolute/path" + if runtime.GOOS == "windows" { + absolutePath = "C:\\absolute\\path" + } + + tests := []struct { + Addr string + Want bool + }{ + { + "./", + false, + }, + { + "../bork", + false, + }, + { + absolutePath, + true, + }, + { + "github.com/example/foo", + true, + }, + { + "hashicorp/subnets/cidr", // registry module + true, + }, + { + "registry.opentofu.org/hashicorp/subnets/cidr", // registry module + true, + }, + } + + for _, test := range tests { + t.Run(test.Addr, func(t *testing.T) { + addr, err := addrs.ParseModuleSource(test.Addr) + if err != nil { + t.Fatalf("parsing failed for %q: %s", test.Addr, err) + } + + got := moduleSourceAddrEntersNewPackage(addr) + if got != test.Want { + t.Errorf("wrong result for %q\ngot: %#v\nwant: %#v", addr, got, test.Want) + } + }) + } +} diff --git a/pkg/configs/module_merge.go b/pkg/configs/module_merge.go new file mode 100644 index 00000000000..da945a379c3 --- /dev/null +++ b/pkg/configs/module_merge.go @@ -0,0 +1,274 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "fmt" + + "github.com/kubegems/opentofu/pkg/addrs" + + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// The methods in this file are used by Module.mergeFile to apply overrides +// to our different configuration elements. These methods all follow the +// pattern of mutating the receiver to incorporate settings from the parameter, +// returning error diagnostics if any aspect of the parameter cannot be merged +// into the receiver for some reason. +// +// User expectation is that anything _explicitly_ set in the given object +// should take precedence over the corresponding settings in the receiver, +// but that anything omitted in the given object should be left unchanged. +// In some cases it may be reasonable to do a "deep merge" of certain nested +// features, if it is possible to unambiguously correlate the nested elements +// and their behaviors are orthogonal to each other. + +func (p *Provider) merge(op *Provider) hcl.Diagnostics { + var diags hcl.Diagnostics + + if op.Version.Required != nil { + p.Version = op.Version + } + + p.Config = MergeBodies(p.Config, op.Config) + + return diags +} + +func (v *Variable) merge(ov *Variable) hcl.Diagnostics { + var diags hcl.Diagnostics + + if ov.DescriptionSet { + v.Description = ov.Description + v.DescriptionSet = ov.DescriptionSet + } + if ov.SensitiveSet { + v.Sensitive = ov.Sensitive + v.SensitiveSet = ov.SensitiveSet + } + if ov.Default != cty.NilVal { + v.Default = ov.Default + } + if ov.Type != cty.NilType { + v.Type = ov.Type + v.ConstraintType = ov.ConstraintType + } + if ov.ParsingMode != 0 { + v.ParsingMode = ov.ParsingMode + } + if ov.NullableSet { + v.Nullable = ov.Nullable + v.NullableSet = ov.NullableSet + } + + // If the override file overrode type without default or vice-versa then + // it may have created an invalid situation, which we'll catch now by + // attempting to re-convert the value. + // + // Note that here we may be re-converting an already-converted base value + // from the base config. This will be a no-op if the type was not changed, + // but in particular might be user-observable in the edge case where the + // literal value in config could've been converted to the overridden type + // constraint but the converted value cannot. In practice, this situation + // should be rare since most of our conversions are interchangable. + if v.Default != cty.NilVal { + val, err := convert.Convert(v.Default, v.ConstraintType) + if err != nil { + // What exactly we'll say in the error message here depends on whether + // it was Default or Type that was overridden here. + switch { + case ov.Type != cty.NilType && ov.Default == cty.NilVal: + // If only the type was overridden + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid default value for variable", + Detail: fmt.Sprintf("Overriding this variable's type constraint has made its default value invalid: %s.", err), + Subject: &ov.DeclRange, + }) + case ov.Type == cty.NilType && ov.Default != cty.NilVal: + // Only the default was overridden + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid default value for variable", + Detail: fmt.Sprintf("The overridden default value for this variable is not compatible with the variable's type constraint: %s.", err), + Subject: &ov.DeclRange, + }) + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid default value for variable", + Detail: fmt.Sprintf("This variable's default value is not compatible with its type constraint: %s.", err), + Subject: &ov.DeclRange, + }) + } + } else { + v.Default = val + } + + // ensure a null default wasn't merged in when it is not allowed + if !v.Nullable && v.Default.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid default value for variable", + Detail: "A null default value is not valid when nullable=false.", + Subject: &ov.DeclRange, + }) + } + } + + return diags +} + +func (l *Local) merge(ol *Local) hcl.Diagnostics { + var diags hcl.Diagnostics + + // Since a local is just a single expression in configuration, the + // override definition entirely replaces the base definition, including + // the source range so that we'll send the user to the right place if + // there is an error. + l.Expr = ol.Expr + l.DeclRange = ol.DeclRange + + return diags +} + +func (o *Output) merge(oo *Output) hcl.Diagnostics { + var diags hcl.Diagnostics + + if oo.Description != "" { + o.Description = oo.Description + } + if oo.Expr != nil { + o.Expr = oo.Expr + } + if oo.SensitiveSet { + o.Sensitive = oo.Sensitive + o.SensitiveSet = oo.SensitiveSet + } + + // We don't allow depends_on to be overridden because that is likely to + // cause confusing misbehavior. + if len(oo.DependsOn) != 0 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported override", + Detail: "The depends_on argument may not be overridden.", + Subject: oo.DependsOn[0].SourceRange().Ptr(), // the first item is the closest range we have + }) + } + + return diags +} + +func (mc *ModuleCall) merge(omc *ModuleCall) hcl.Diagnostics { + var diags hcl.Diagnostics + + if omc.SourceSet { + mc.Source = omc.Source + mc.SourceSet = omc.SourceSet + } + + if omc.Count != nil { + mc.Count = omc.Count + } + + if omc.ForEach != nil { + mc.ForEach = omc.ForEach + } + + if omc.VersionAttr != nil { + mc.VersionAttr = omc.VersionAttr + } + + mc.Config = MergeBodies(mc.Config, omc.Config) + + if len(omc.Providers) != 0 { + mc.Providers = omc.Providers + } + + // We don't allow depends_on to be overridden because that is likely to + // cause confusing misbehavior. + if len(omc.DependsOn) != 0 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported override", + Detail: "The depends_on argument may not be overridden.", + Subject: omc.DependsOn[0].SourceRange().Ptr(), // the first item is the closest range we have + }) + } + + return diags +} + +func (r *Resource) merge(or *Resource, rps map[string]*RequiredProvider) hcl.Diagnostics { + var diags hcl.Diagnostics + + if r.Mode != or.Mode { + // This is always a programming error, since managed and data resources + // are kept in separate maps in the configuration structures. + panic(fmt.Errorf("can't merge %s into %s", or.Mode, r.Mode)) + } + + if or.Count != nil { + r.Count = or.Count + } + if or.ForEach != nil { + r.ForEach = or.ForEach + } + + if or.ProviderConfigRef != nil { + r.ProviderConfigRef = or.ProviderConfigRef + if existing, exists := rps[or.ProviderConfigRef.Name]; exists { + r.Provider = existing.Type + } else { + r.Provider = addrs.ImpliedProviderForUnqualifiedType(r.ProviderConfigRef.Name) + } + } + + // Provider FQN is set by OpenTofu during Merge + + if r.Mode == addrs.ManagedResourceMode { + // or.Managed is always non-nil for managed resource mode + + if or.Managed.Connection != nil { + r.Managed.Connection = or.Managed.Connection + } + if or.Managed.CreateBeforeDestroySet { + r.Managed.CreateBeforeDestroy = or.Managed.CreateBeforeDestroy + r.Managed.CreateBeforeDestroySet = or.Managed.CreateBeforeDestroySet + } + if len(or.Managed.IgnoreChanges) != 0 { + r.Managed.IgnoreChanges = or.Managed.IgnoreChanges + } + if or.Managed.IgnoreAllChanges { + r.Managed.IgnoreAllChanges = true + } + if or.Managed.PreventDestroySet { + r.Managed.PreventDestroy = or.Managed.PreventDestroy + r.Managed.PreventDestroySet = or.Managed.PreventDestroySet + } + if len(or.Managed.Provisioners) != 0 { + r.Managed.Provisioners = or.Managed.Provisioners + } + } + + r.Config = MergeBodies(r.Config, or.Config) + + // We don't allow depends_on to be overridden because that is likely to + // cause confusing misbehavior. + if len(or.DependsOn) != 0 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported override", + Detail: "The depends_on argument may not be overridden.", + Subject: or.DependsOn[0].SourceRange().Ptr(), // the first item is the closest range we have + }) + } + + return diags +} diff --git a/pkg/configs/module_merge_test.go b/pkg/configs/module_merge_test.go new file mode 100644 index 00000000000..7c923658aa9 --- /dev/null +++ b/pkg/configs/module_merge_test.go @@ -0,0 +1,347 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "fmt" + "path/filepath" + "testing" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/gohcl" + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/zclconf/go-cty/cty" +) + +func TestModuleOverrideVariable(t *testing.T) { + mod, diags := testModuleFromDir("testdata/valid-modules/override-variable") + assertNoDiagnostics(t, diags) + if mod == nil { + t.Fatalf("module is nil") + } + + got := mod.Variables + want := map[string]*Variable{ + "fully_overridden": { + Name: "fully_overridden", + Description: "b_override description", + DescriptionSet: true, + Default: cty.StringVal("b_override"), + Nullable: false, + NullableSet: true, + Type: cty.String, + ConstraintType: cty.String, + ParsingMode: VariableParseLiteral, + DeclRange: hcl.Range{ + Filename: filepath.FromSlash("testdata/valid-modules/override-variable/primary.tf"), + Start: hcl.Pos{ + Line: 1, + Column: 1, + Byte: 0, + }, + End: hcl.Pos{ + Line: 1, + Column: 28, + Byte: 27, + }, + }, + }, + "partially_overridden": { + Name: "partially_overridden", + Description: "base description", + DescriptionSet: true, + Default: cty.StringVal("b_override partial"), + Nullable: true, + NullableSet: false, + Type: cty.String, + ConstraintType: cty.String, + ParsingMode: VariableParseLiteral, + DeclRange: hcl.Range{ + Filename: filepath.FromSlash("testdata/valid-modules/override-variable/primary.tf"), + Start: hcl.Pos{ + Line: 7, + Column: 1, + Byte: 103, + }, + End: hcl.Pos{ + Line: 7, + Column: 32, + Byte: 134, + }, + }, + }, + } + assertResultDeepEqual(t, got, want) +} + +func TestModuleOverrideModule(t *testing.T) { + mod, diags := testModuleFromDir("testdata/valid-modules/override-module") + assertNoDiagnostics(t, diags) + if mod == nil { + t.Fatalf("module is nil") + } + + if _, exists := mod.ModuleCalls["example"]; !exists { + t.Fatalf("no module 'example'") + } + if len(mod.ModuleCalls) != 1 { + t.Fatalf("wrong number of module calls in result %d; want 1", len(mod.ModuleCalls)) + } + + got := mod.ModuleCalls["example"] + want := &ModuleCall{ + Name: "example", + SourceAddr: addrs.ModuleSourceLocal("./example2-a_override"), + SourceAddrRaw: "./example2-a_override", + SourceSet: true, + DeclRange: hcl.Range{ + Filename: filepath.FromSlash("testdata/valid-modules/override-module/primary.tf"), + Start: hcl.Pos{ + Line: 2, + Column: 1, + Byte: 1, + }, + End: hcl.Pos{ + Line: 2, + Column: 17, + Byte: 17, + }, + }, + DependsOn: []hcl.Traversal{ + { + hcl.TraverseRoot{ + Name: "null_resource", + SrcRange: hcl.Range{ + Filename: filepath.FromSlash("testdata/valid-modules/override-module/primary.tf"), + Start: hcl.Pos{Line: 11, Column: 17, Byte: 149}, + End: hcl.Pos{Line: 11, Column: 30, Byte: 162}, + }, + }, + hcl.TraverseAttr{ + Name: "test", + SrcRange: hcl.Range{ + Filename: filepath.FromSlash("testdata/valid-modules/override-module/primary.tf"), + Start: hcl.Pos{Line: 11, Column: 30, Byte: 162}, + End: hcl.Pos{Line: 11, Column: 35, Byte: 167}, + }, + }, + }, + }, + Providers: []PassedProviderConfig{ + { + InChild: &ProviderConfigRef{ + Name: "test", + NameRange: hcl.Range{ + Filename: filepath.FromSlash("testdata/valid-modules/override-module/b_override.tf"), + Start: hcl.Pos{Line: 7, Column: 5, Byte: 97}, + End: hcl.Pos{Line: 7, Column: 9, Byte: 101}, + }, + }, + InParent: &ProviderConfigRef{ + Name: "test", + NameRange: hcl.Range{ + Filename: filepath.FromSlash("testdata/valid-modules/override-module/b_override.tf"), + Start: hcl.Pos{Line: 7, Column: 12, Byte: 104}, + End: hcl.Pos{Line: 7, Column: 16, Byte: 108}, + }, + Alias: "b_override", + AliasRange: &hcl.Range{ + Filename: filepath.FromSlash("testdata/valid-modules/override-module/b_override.tf"), + Start: hcl.Pos{Line: 7, Column: 16, Byte: 108}, + End: hcl.Pos{Line: 7, Column: 27, Byte: 119}, + }, + }, + }, + }, + } + + // We're going to extract and nil out our hcl.Body here because DeepEqual + // is not a useful way to assert on that. + gotConfig := got.Config + got.Config = nil + got.Source = nil + got.Variables = nil + + assertResultDeepEqual(t, got, want) + + type content struct { + Kept *string `hcl:"kept"` + Foo *string `hcl:"foo"` + New *string `hcl:"new"` + Newer *string `hcl:"newer"` + } + var gotArgs content + diags = gohcl.DecodeBody(gotConfig, nil, &gotArgs) + assertNoDiagnostics(t, diags) + + wantArgs := content{ + Kept: stringPtr("primary kept"), + Foo: stringPtr("a_override foo"), + New: stringPtr("b_override new"), + Newer: stringPtr("b_override newer"), + } + + assertResultDeepEqual(t, gotArgs, wantArgs) +} + +func TestModuleOverrideDynamic(t *testing.T) { + schema := &hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + {Type: "foo"}, + {Type: "dynamic", LabelNames: []string{"type"}}, + }, + } + + t.Run("base is dynamic", func(t *testing.T) { + mod, diags := testModuleFromDir("testdata/valid-modules/override-dynamic-block-base") + assertNoDiagnostics(t, diags) + if mod == nil { + t.Fatalf("module is nil") + } + + if _, exists := mod.ManagedResources["test.foo"]; !exists { + t.Fatalf("no module 'example'") + } + if len(mod.ManagedResources) != 1 { + t.Fatalf("wrong number of managed resources in result %d; want 1", len(mod.ManagedResources)) + } + + body := mod.ManagedResources["test.foo"].Config + content, diags := body.Content(schema) + assertNoDiagnostics(t, diags) + + if len(content.Blocks) != 1 { + t.Fatalf("wrong number of blocks in result %d; want 1", len(content.Blocks)) + } + if got, want := content.Blocks[0].Type, "foo"; got != want { + t.Fatalf("wrong block type %q; want %q", got, want) + } + }) + t.Run("override is dynamic", func(t *testing.T) { + mod, diags := testModuleFromDir("testdata/valid-modules/override-dynamic-block-override") + assertNoDiagnostics(t, diags) + if mod == nil { + t.Fatalf("module is nil") + } + + if _, exists := mod.ManagedResources["test.foo"]; !exists { + t.Fatalf("no module 'example'") + } + if len(mod.ManagedResources) != 1 { + t.Fatalf("wrong number of managed resources in result %d; want 1", len(mod.ManagedResources)) + } + + body := mod.ManagedResources["test.foo"].Config + content, diags := body.Content(schema) + assertNoDiagnostics(t, diags) + + if len(content.Blocks) != 1 { + t.Fatalf("wrong number of blocks in result %d; want 1", len(content.Blocks)) + } + if got, want := content.Blocks[0].Type, "dynamic"; got != want { + t.Fatalf("wrong block type %q; want %q", got, want) + } + if got, want := content.Blocks[0].Labels[0], "foo"; got != want { + t.Fatalf("wrong dynamic block label %q; want %q", got, want) + } + }) +} + +func TestModuleOverrideSensitiveVariable(t *testing.T) { + type testCase struct { + sensitive bool + sensitiveSet bool + } + cases := map[string]testCase{ + "false_true": { + sensitive: true, + sensitiveSet: true, + }, + "true_false": { + sensitive: false, + sensitiveSet: true, + }, + "false_false_true": { + sensitive: true, + sensitiveSet: true, + }, + "true_true_false": { + sensitive: false, + sensitiveSet: true, + }, + "false_true_false": { + sensitive: false, + sensitiveSet: true, + }, + "true_false_true": { + sensitive: true, + sensitiveSet: true, + }, + } + + mod, diags := testModuleFromDir("testdata/valid-modules/override-variable-sensitive") + + assertNoDiagnostics(t, diags) + + if mod == nil { + t.Fatalf("module is nil") + } + + got := mod.Variables + + for v, want := range cases { + t.Run(fmt.Sprintf("variable %s", v), func(t *testing.T) { + if got[v].Sensitive != want.sensitive { + t.Errorf("wrong result for sensitive\ngot: %t want: %t", got[v].Sensitive, want.sensitive) + } + + if got[v].SensitiveSet != want.sensitiveSet { + t.Errorf("wrong result for sensitive set\ngot: %t want: %t", got[v].Sensitive, want.sensitive) + } + }) + } +} + +func TestModuleOverrideResourceFQNs(t *testing.T) { + mod, diags := testModuleFromDir("testdata/valid-modules/override-resource-provider") + assertNoDiagnostics(t, diags) + + got := mod.ManagedResources["test_instance.explicit"] + wantProvider := addrs.NewProvider(addrs.DefaultProviderRegistryHost, "bar", "test") + wantProviderCfg := &ProviderConfigRef{ + Name: "bar-test", + NameRange: hcl.Range{ + Filename: filepath.FromSlash("testdata/valid-modules/override-resource-provider/a_override.tf"), + Start: hcl.Pos{Line: 2, Column: 14, Byte: 51}, + End: hcl.Pos{Line: 2, Column: 22, Byte: 59}, + }, + } + + if !got.Provider.Equals(wantProvider) { + t.Fatalf("wrong provider %s, want %s", got.Provider, wantProvider) + } + assertResultDeepEqual(t, got.ProviderConfigRef, wantProviderCfg) + + // now verify that a resource with no provider config falls back to default + got = mod.ManagedResources["test_instance.default"] + wantProvider = addrs.NewDefaultProvider("test") + if !got.Provider.Equals(wantProvider) { + t.Fatalf("wrong provider %s, want %s", got.Provider, wantProvider) + } + if got.ProviderConfigRef != nil { + t.Fatalf("wrong result: found provider config ref %s, expected nil", got.ProviderConfigRef) + } +} + +func TestModuleOverrideIgnoreAllChanges(t *testing.T) { + mod, diags := testModuleFromDir("testdata/valid-modules/override-ignore-changes") + assertNoDiagnostics(t, diags) + + r := mod.ManagedResources["test_instance.foo"] + if !r.Managed.IgnoreAllChanges { + t.Fatalf("wrong result: expected r.Managed.IgnoreAllChanges to be true") + } +} diff --git a/pkg/configs/module_test.go b/pkg/configs/module_test.go new file mode 100644 index 00000000000..5bb1e72e3b0 --- /dev/null +++ b/pkg/configs/module_test.go @@ -0,0 +1,420 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/zclconf/go-cty/cty" +) + +// TestNewModule_provider_fqns exercises module.gatherProviderLocalNames() +func TestNewModule_provider_local_name(t *testing.T) { + mod, diags := testModuleFromDir("testdata/providers-explicit-fqn") + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + p := addrs.NewProvider(addrs.DefaultProviderRegistryHost, "foo", "test") + if name, exists := mod.ProviderLocalNames[p]; !exists { + t.Fatal("provider FQN foo/test not found") + } else { + if name != "foo-test" { + t.Fatalf("provider localname mismatch: got %s, want foo-test", name) + } + } + + // ensure the reverse lookup (fqn to local name) works as well + localName := mod.LocalNameForProvider(p) + if localName != "foo-test" { + t.Fatal("provider local name not found") + } + + // if there is not a local name for a provider, it should return the type name + localName = mod.LocalNameForProvider(addrs.NewDefaultProvider("nonexist")) + if localName != "nonexist" { + t.Error("wrong local name returned for a non-local provider") + } + + // can also look up the "terraform" provider and see that it sources is + // allowed to be overridden, even though there is a builtin provider + // called "terraform". + p = addrs.NewProvider(addrs.DefaultProviderRegistryHost, "not-builtin", "not-terraform") + if name, exists := mod.ProviderLocalNames[p]; !exists { + t.Fatal("provider FQN not-builtin/not-terraform not found") + } else { + if name != "terraform" { + t.Fatalf("provider localname mismatch: got %s, want terraform", name) + } + } +} + +// This test validates the provider FQNs set in each Resource +func TestNewModule_resource_providers(t *testing.T) { + cfg, diags := testNestedModuleConfigFromDir(t, "testdata/valid-modules/nested-providers-fqns") + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + // both the root and child module have two resources, one which should use + // the default implied provider and one explicitly using a provider set in + // required_providers + wantImplicit := addrs.NewDefaultProvider("test") + wantFoo := addrs.NewProvider(addrs.DefaultProviderRegistryHost, "foo", "test") + wantBar := addrs.NewProvider(addrs.DefaultProviderRegistryHost, "bar", "test") + + // root module + if !cfg.Module.ManagedResources["test_instance.explicit"].Provider.Equals(wantFoo) { + t.Fatalf("wrong provider for \"test_instance.explicit\"\ngot: %s\nwant: %s", + cfg.Module.ManagedResources["test_instance.explicit"].Provider, + wantFoo, + ) + } + if !cfg.Module.ManagedResources["test_instance.implicit"].Provider.Equals(wantImplicit) { + t.Fatalf("wrong provider for \"test_instance.implicit\"\ngot: %s\nwant: %s", + cfg.Module.ManagedResources["test_instance.implicit"].Provider, + wantImplicit, + ) + } + + // a data source + if !cfg.Module.DataResources["data.test_resource.explicit"].Provider.Equals(wantFoo) { + t.Fatalf("wrong provider for \"module.child.test_instance.explicit\"\ngot: %s\nwant: %s", + cfg.Module.ManagedResources["test_instance.explicit"].Provider, + wantBar, + ) + } + + // child module + cm := cfg.Children["child"].Module + if !cm.ManagedResources["test_instance.explicit"].Provider.Equals(wantBar) { + t.Fatalf("wrong provider for \"module.child.test_instance.explicit\"\ngot: %s\nwant: %s", + cfg.Module.ManagedResources["test_instance.explicit"].Provider, + wantBar, + ) + } + if !cm.ManagedResources["test_instance.implicit"].Provider.Equals(wantImplicit) { + t.Fatalf("wrong provider for \"module.child.test_instance.implicit\"\ngot: %s\nwant: %s", + cfg.Module.ManagedResources["test_instance.implicit"].Provider, + wantImplicit, + ) + } +} + +func TestProviderForLocalConfig(t *testing.T) { + mod, diags := testModuleFromDir("testdata/providers-explicit-fqn") + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + lc := addrs.LocalProviderConfig{LocalName: "foo-test"} + got := mod.ProviderForLocalConfig(lc) + want := addrs.NewProvider(addrs.DefaultProviderRegistryHost, "foo", "test") + if !got.Equals(want) { + t.Fatalf("wrong result! got %#v, want %#v\n", got, want) + } +} + +// At most one required_providers block per module is permitted. +func TestModule_required_providers_multiple(t *testing.T) { + _, diags := testModuleFromDir("testdata/invalid-modules/multiple-required-providers") + if !diags.HasErrors() { + t.Fatal("module should have error diags, but does not") + } + + want := `Duplicate required providers configuration` + if got := diags.Error(); !strings.Contains(got, want) { + t.Fatalf("expected error to contain %q\nerror was:\n%s", want, got) + } +} + +// A module may have required_providers configured in files loaded later than +// resources. These provider settings should still be reflected in the +// resources' configuration. +func TestModule_required_providers_after_resource(t *testing.T) { + mod, diags := testModuleFromDir("testdata/valid-modules/required-providers-after-resource") + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + want := addrs.NewProvider(addrs.DefaultProviderRegistryHost, "foo", "test") + + req, exists := mod.ProviderRequirements.RequiredProviders["test"] + if !exists { + t.Fatal("no provider requirements found for \"test\"") + } + if req.Type != want { + t.Errorf("wrong provider addr for \"test\"\ngot: %s\nwant: %s", + req.Type, want, + ) + } + + if got := mod.ManagedResources["test_instance.my-instance"].Provider; !got.Equals(want) { + t.Errorf("wrong provider addr for \"test_instance.my-instance\"\ngot: %s\nwant: %s", + got, want, + ) + } +} + +// We support overrides for required_providers blocks, which should replace the +// entire block for each provider localname, leaving other blocks unaffected. +// This should also be reflected in any resources in the module using this +// provider. +func TestModule_required_provider_overrides(t *testing.T) { + mod, diags := testModuleFromDir("testdata/valid-modules/required-providers-overrides") + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + // The foo provider and resource should be unaffected + want := addrs.NewProvider(addrs.DefaultProviderRegistryHost, "acme", "foo") + req, exists := mod.ProviderRequirements.RequiredProviders["foo"] + if !exists { + t.Fatal("no provider requirements found for \"foo\"") + } + if req.Type != want { + t.Errorf("wrong provider addr for \"foo\"\ngot: %s\nwant: %s", + req.Type, want, + ) + } + if got := mod.ManagedResources["foo_thing.ft"].Provider; !got.Equals(want) { + t.Errorf("wrong provider addr for \"foo_thing.ft\"\ngot: %s\nwant: %s", + got, want, + ) + } + + // The bar provider and resource should be using the override config + want = addrs.NewProvider(addrs.DefaultProviderRegistryHost, "blorp", "bar") + req, exists = mod.ProviderRequirements.RequiredProviders["bar"] + if !exists { + t.Fatal("no provider requirements found for \"bar\"") + } + if req.Type != want { + t.Errorf("wrong provider addr for \"bar\"\ngot: %s\nwant: %s", + req.Type, want, + ) + } + if gotVer, wantVer := req.Requirement.Required.String(), "~>2.0.0"; gotVer != wantVer { + t.Errorf("wrong provider version constraint for \"bar\"\ngot: %s\nwant: %s", + gotVer, wantVer, + ) + } + if got := mod.ManagedResources["bar_thing.bt"].Provider; !got.Equals(want) { + t.Errorf("wrong provider addr for \"bar_thing.bt\"\ngot: %s\nwant: %s", + got, want, + ) + } +} + +// Resources without explicit provider configuration are assigned a provider +// implied based on the resource type. For example, this resource: +// +// resource "foo_instance" "test" {} +// +// ...is assigned to whichever provider has local name "foo" in the current +// module. +// +// To find the correct provider, we first look in the module's provider +// requirements map for a local name matching the resource type, and fall back +// to a default provider if none is found. This applies to both managed and +// data resources. +func TestModule_implied_provider(t *testing.T) { + mod, diags := testModuleFromDir("testdata/valid-modules/implied-providers") + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + // The three providers used in the config resources + foo := addrs.NewProvider("registry.acme.corp", "acme", "foo") + whatever := addrs.NewProvider(addrs.DefaultProviderRegistryHost, "acme", "something") + bar := addrs.NewDefaultProvider("bar") + + // Verify that the registry.acme.corp/acme/foo provider is defined in the + // module provider requirements with local name "foo" + req, exists := mod.ProviderRequirements.RequiredProviders["foo"] + if !exists { + t.Fatal("no provider requirements found for \"foo\"") + } + if req.Type != foo { + t.Errorf("wrong provider addr for \"foo\"\ngot: %s\nwant: %s", + req.Type, foo, + ) + } + + // Verify that the acme/something provider is defined in the + // module provider requirements with local name "whatever" + req, exists = mod.ProviderRequirements.RequiredProviders["whatever"] + if !exists { + t.Fatal("no provider requirements found for \"foo\"") + } + if req.Type != whatever { + t.Errorf("wrong provider addr for \"whatever\"\ngot: %s\nwant: %s", + req.Type, whatever, + ) + } + + // Check that resources are assigned the correct providers: foo_* resources + // should have the custom foo provider, bar_* resources the default bar + // provider. + tests := []struct { + Address string + Provider addrs.Provider + }{ + {"foo_resource.a", foo}, + {"data.foo_resource.b", foo}, + {"bar_resource.c", bar}, + {"data.bar_resource.d", bar}, + {"whatever_resource.e", whatever}, + {"data.whatever_resource.f", whatever}, + } + for _, test := range tests { + resources := mod.ManagedResources + if strings.HasPrefix(test.Address, "data.") { + resources = mod.DataResources + } + resource, exists := resources[test.Address] + if !exists { + t.Errorf("could not find resource %q in %#v", test.Address, resources) + continue + } + if got := resource.Provider; !got.Equals(test.Provider) { + t.Errorf("wrong provider addr for %q\ngot: %s\nwant: %s", + test.Address, got, test.Provider, + ) + } + } +} + +func TestImpliedProviderForUnqualifiedType(t *testing.T) { + mod, diags := testModuleFromDir("testdata/valid-modules/implied-providers") + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + foo := addrs.NewProvider("registry.acme.corp", "acme", "foo") + whatever := addrs.NewProvider(addrs.DefaultProviderRegistryHost, "acme", "something") + bar := addrs.NewDefaultProvider("bar") + tf := addrs.NewBuiltInProvider("terraform") + + tests := []struct { + Type string + Provider addrs.Provider + }{ + {"foo", foo}, + {"whatever", whatever}, + {"bar", bar}, + {"terraform", tf}, + } + for _, test := range tests { + got := mod.ImpliedProviderForUnqualifiedType(test.Type) + if !got.Equals(test.Provider) { + t.Errorf("wrong result for %q: got %#v, want %#v\n", test.Type, got, test.Provider) + } + } +} + +func TestModule_backend_override(t *testing.T) { + mod, diags := testModuleFromDir("testdata/valid-modules/override-backend") + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + gotType := mod.Backend.Type + wantType := "bar" + + if gotType != wantType { + t.Errorf("wrong result for backend type: got %#v, want %#v\n", gotType, wantType) + } + + attrs, _ := mod.Backend.Config.JustAttributes() + + gotAttr, diags := attrs["path"].Expr.Value(nil) + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + wantAttr := cty.StringVal("CHANGED/relative/path/to/terraform.tfstate") + + if !gotAttr.RawEquals(wantAttr) { + t.Errorf("wrong result for backend 'path': got %#v, want %#v\n", gotAttr, wantAttr) + } +} + +// Unlike most other overrides, backend blocks do not require a base configuration in a primary +// configuration file, as an omitted backend there implies the local backend. +func TestModule_backend_override_no_base(t *testing.T) { + mod, diags := testModuleFromDir("testdata/valid-modules/override-backend-no-base") + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + if mod.Backend == nil { + t.Errorf("expected module Backend not to be nil") + } +} + +func TestModule_cloud_override_backend(t *testing.T) { + mod, diags := testModuleFromDir("testdata/valid-modules/override-backend-with-cloud") + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + if mod.Backend != nil { + t.Errorf("expected module Backend to be nil") + } + + if mod.CloudConfig == nil { + t.Errorf("expected module CloudConfig not to be nil") + } +} + +// Unlike most other overrides, cloud blocks do not require a base configuration in a primary +// configuration file, as an omitted backend there implies the local backend and cloud blocks +// override backends. +func TestModule_cloud_override_no_base(t *testing.T) { + mod, diags := testModuleFromDir("testdata/valid-modules/override-cloud-no-base") + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + if mod.CloudConfig == nil { + t.Errorf("expected module CloudConfig not to be nil") + } +} + +func TestModule_cloud_override(t *testing.T) { + mod, diags := testModuleFromDir("testdata/valid-modules/override-cloud") + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + attrs, _ := mod.CloudConfig.Config.JustAttributes() + + gotAttr, diags := attrs["organization"].Expr.Value(nil) + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + wantAttr := cty.StringVal("CHANGED") + + if !gotAttr.RawEquals(wantAttr) { + t.Errorf("wrong result for Cloud 'organization': got %#v, want %#v\n", gotAttr, wantAttr) + } + + // The override should have completely replaced the cloud block in the primary file, no merging + if attrs["should_not_be_present_with_override"] != nil { + t.Errorf("expected 'should_not_be_present_with_override' attribute to be nil") + } +} + +func TestModule_cloud_duplicate_overrides(t *testing.T) { + _, diags := testModuleFromDir("testdata/invalid-modules/override-cloud-duplicates") + want := `Duplicate cloud configurations` + if got := diags.Error(); !strings.Contains(got, want) { + t.Fatalf("expected module error to contain %q\nerror was:\n%s", want, got) + } +} diff --git a/pkg/configs/moved.go b/pkg/configs/moved.go new file mode 100644 index 00000000000..c541320bfd3 --- /dev/null +++ b/pkg/configs/moved.go @@ -0,0 +1,77 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/kubegems/opentofu/pkg/addrs" +) + +type Moved struct { + From *addrs.MoveEndpoint + To *addrs.MoveEndpoint + + DeclRange hcl.Range +} + +func decodeMovedBlock(block *hcl.Block) (*Moved, hcl.Diagnostics) { + var diags hcl.Diagnostics + moved := &Moved{ + DeclRange: block.DefRange, + } + + content, moreDiags := block.Body.Content(movedBlockSchema) + diags = append(diags, moreDiags...) + + if attr, exists := content.Attributes["from"]; exists { + from, traversalDiags := hcl.AbsTraversalForExpr(attr.Expr) + diags = append(diags, traversalDiags...) + if !traversalDiags.HasErrors() { + from, fromDiags := addrs.ParseMoveEndpoint(from) + diags = append(diags, fromDiags.ToHCL()...) + moved.From = from + } + } + + if attr, exists := content.Attributes["to"]; exists { + to, traversalDiags := hcl.AbsTraversalForExpr(attr.Expr) + diags = append(diags, traversalDiags...) + if !traversalDiags.HasErrors() { + to, toDiags := addrs.ParseMoveEndpoint(to) + diags = append(diags, toDiags.ToHCL()...) + moved.To = to + } + } + + // we can only move from a module to a module, resource to resource, etc. + if !diags.HasErrors() { + if !moved.From.MightUnifyWith(moved.To) { + // We can catch some obviously-wrong combinations early here, + // but we still have other dynamic validation to do at runtime. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid \"moved\" addresses", + Detail: "The \"from\" and \"to\" addresses must either both refer to resources or both refer to modules.", + Subject: &moved.DeclRange, + }) + } + } + + return moved, diags +} + +var movedBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "from", + Required: true, + }, + { + Name: "to", + Required: true, + }, + }, +} diff --git a/pkg/configs/moved_test.go b/pkg/configs/moved_test.go new file mode 100644 index 00000000000..b6eebeaf7e2 --- /dev/null +++ b/pkg/configs/moved_test.go @@ -0,0 +1,213 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hcltest" + "github.com/kubegems/opentofu/pkg/addrs" +) + +func TestMovedBlock_decode(t *testing.T) { + blockRange := hcl.Range{ + Filename: "mock.tf", + Start: hcl.Pos{Line: 3, Column: 12, Byte: 27}, + End: hcl.Pos{Line: 3, Column: 19, Byte: 34}, + } + + foo_expr := hcltest.MockExprTraversalSrc("test_instance.foo") + bar_expr := hcltest.MockExprTraversalSrc("test_instance.bar") + + foo_index_expr := hcltest.MockExprTraversalSrc("test_instance.foo[1]") + bar_index_expr := hcltest.MockExprTraversalSrc("test_instance.bar[\"one\"]") + + mod_foo_expr := hcltest.MockExprTraversalSrc("module.foo") + mod_bar_expr := hcltest.MockExprTraversalSrc("module.bar") + + tests := map[string]struct { + input *hcl.Block + want *Moved + err string + }{ + "success": { + &hcl.Block{ + Type: "moved", + Body: hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "from": { + Name: "from", + Expr: foo_expr, + }, + "to": { + Name: "to", + Expr: bar_expr, + }, + }, + }), + DefRange: blockRange, + }, + &Moved{ + From: mustMoveEndpointFromExpr(foo_expr), + To: mustMoveEndpointFromExpr(bar_expr), + DeclRange: blockRange, + }, + ``, + }, + "indexed resources": { + &hcl.Block{ + Type: "moved", + Body: hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "from": { + Name: "from", + Expr: foo_index_expr, + }, + "to": { + Name: "to", + Expr: bar_index_expr, + }, + }, + }), + DefRange: blockRange, + }, + &Moved{ + From: mustMoveEndpointFromExpr(foo_index_expr), + To: mustMoveEndpointFromExpr(bar_index_expr), + DeclRange: blockRange, + }, + ``, + }, + "modules": { + &hcl.Block{ + Type: "moved", + Body: hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "from": { + Name: "from", + Expr: mod_foo_expr, + }, + "to": { + Name: "to", + Expr: mod_bar_expr, + }, + }, + }), + DefRange: blockRange, + }, + &Moved{ + From: mustMoveEndpointFromExpr(mod_foo_expr), + To: mustMoveEndpointFromExpr(mod_bar_expr), + DeclRange: blockRange, + }, + ``, + }, + "error: missing argument": { + &hcl.Block{ + Type: "moved", + Body: hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "from": { + Name: "from", + Expr: foo_expr, + }, + }, + }), + DefRange: blockRange, + }, + &Moved{ + From: mustMoveEndpointFromExpr(foo_expr), + DeclRange: blockRange, + }, + "Missing required argument", + }, + "error: type mismatch": { + &hcl.Block{ + Type: "moved", + Body: hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "to": { + Name: "to", + Expr: foo_expr, + }, + "from": { + Name: "from", + Expr: mod_foo_expr, + }, + }, + }), + DefRange: blockRange, + }, + &Moved{ + To: mustMoveEndpointFromExpr(foo_expr), + From: mustMoveEndpointFromExpr(mod_foo_expr), + DeclRange: blockRange, + }, + "Invalid \"moved\" addresses", + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + got, diags := decodeMovedBlock(test.input) + + if diags.HasErrors() { + if test.err == "" { + t.Fatalf("unexpected error: %s", diags.Errs()) + } + if gotErr := diags[0].Summary; gotErr != test.err { + t.Errorf("wrong error, got %q, want %q", gotErr, test.err) + } + } else if test.err != "" { + t.Fatal("expected error") + } + + if !cmp.Equal(got, test.want, cmp.AllowUnexported(addrs.MoveEndpoint{})) { + t.Fatalf("wrong result: %s", cmp.Diff(got, test.want)) + } + }) + } +} + +func TestMovedBlock_inModule(t *testing.T) { + parser := NewParser(nil) + mod, diags := parser.LoadConfigDir("testdata/valid-modules/moved-blocks", RootModuleCallForTesting()) + if diags.HasErrors() { + t.Errorf("unexpected error: %s", diags.Error()) + } + + var gotPairs [][2]string + for _, mc := range mod.Moved { + gotPairs = append(gotPairs, [2]string{mc.From.String(), mc.To.String()}) + } + wantPairs := [][2]string{ + {`test.foo`, `test.bar`}, + {`test.foo`, `test.bar["bloop"]`}, + {`module.a`, `module.b`}, + {`module.a`, `module.a["foo"]`}, + {`test.foo`, `module.a.test.foo`}, + {`data.test.foo`, `data.test.bar`}, + } + if diff := cmp.Diff(wantPairs, gotPairs); diff != "" { + t.Errorf("wrong addresses\n%s", diff) + } +} + +func mustMoveEndpointFromExpr(expr hcl.Expression) *addrs.MoveEndpoint { + traversal, hcldiags := hcl.AbsTraversalForExpr(expr) + if hcldiags.HasErrors() { + panic(hcldiags.Errs()) + } + + ep, diags := addrs.ParseMoveEndpoint(traversal) + if diags.HasErrors() { + panic(diags.Err()) + } + + return ep +} diff --git a/pkg/configs/named_values.go b/pkg/configs/named_values.go new file mode 100644 index 00000000000..78b6b9a28c9 --- /dev/null +++ b/pkg/configs/named_values.go @@ -0,0 +1,582 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/ext/typeexpr" + "github.com/hashicorp/hcl/v2/gohcl" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + + "github.com/kubegems/opentofu/pkg/addrs" +) + +// A consistent detail message for all "not a valid identifier" diagnostics. +const badIdentifierDetail = "A name must start with a letter or underscore and may contain only letters, digits, underscores, and dashes." + +// Variable represents a "variable" block in a module or file. +type Variable struct { + Name string + Description string + Default cty.Value + + // Type is the concrete type of the variable value. + Type cty.Type + // ConstraintType is used for decoding and type conversions, and may + // contain nested ObjectWithOptionalAttr types. + ConstraintType cty.Type + TypeDefaults *typeexpr.Defaults + + ParsingMode VariableParsingMode + Validations []*CheckRule + Sensitive bool + + DescriptionSet bool + SensitiveSet bool + + // Nullable indicates that null is a valid value for this variable. Setting + // Nullable to false means that the module can expect this variable to + // never be null. + Nullable bool + NullableSet bool + + DeclRange hcl.Range +} + +func decodeVariableBlock(block *hcl.Block, override bool) (*Variable, hcl.Diagnostics) { + v := &Variable{ + Name: block.Labels[0], + DeclRange: block.DefRange, + } + + // Unless we're building an override, we'll set some defaults + // which we might override with attributes below. We leave these + // as zero-value in the override case so we can recognize whether + // or not they are set when we merge. + if !override { + v.Type = cty.DynamicPseudoType + v.ConstraintType = cty.DynamicPseudoType + v.ParsingMode = VariableParseLiteral + } + + content, diags := block.Body.Content(variableBlockSchema) + + if !hclsyntax.ValidIdentifier(v.Name) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid variable name", + Detail: badIdentifierDetail, + Subject: &block.LabelRanges[0], + }) + } + + // Don't allow declaration of variables that would conflict with the + // reserved attribute and block type names in a "module" block, since + // these won't be usable for child modules. + for _, attr := range moduleBlockSchema.Attributes { + if attr.Name == v.Name { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid variable name", + Detail: fmt.Sprintf("The variable name %q is reserved due to its special meaning inside module blocks.", attr.Name), + Subject: &block.LabelRanges[0], + }) + } + } + for _, blockS := range moduleBlockSchema.Blocks { + if blockS.Type == v.Name { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid variable name", + Detail: fmt.Sprintf("The variable name %q is reserved due to its special meaning inside module blocks.", blockS.Type), + Subject: &block.LabelRanges[0], + }) + } + } + + if attr, exists := content.Attributes["description"]; exists { + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &v.Description) + diags = append(diags, valDiags...) + v.DescriptionSet = true + } + + if attr, exists := content.Attributes["type"]; exists { + ty, tyDefaults, parseMode, tyDiags := decodeVariableType(attr.Expr) + diags = append(diags, tyDiags...) + v.ConstraintType = ty + v.TypeDefaults = tyDefaults + v.Type = ty.WithoutOptionalAttributesDeep() + v.ParsingMode = parseMode + } + + if attr, exists := content.Attributes["sensitive"]; exists { + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &v.Sensitive) + diags = append(diags, valDiags...) + v.SensitiveSet = true + } + + if attr, exists := content.Attributes["nullable"]; exists { + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &v.Nullable) + diags = append(diags, valDiags...) + v.NullableSet = true + } else { + // The current default is true, which is subject to change in a future + // language edition. + v.Nullable = true + } + + if attr, exists := content.Attributes["default"]; exists { + val, valDiags := attr.Expr.Value(nil) + diags = append(diags, valDiags...) + + // Convert the default to the expected type so we can catch invalid + // defaults early and allow later code to assume validity. + // Note that this depends on us having already processed any "type" + // attribute above. + // However, we can't do this if we're in an override file where + // the type might not be set; we'll catch that during merge. + if v.ConstraintType != cty.NilType { + var err error + // If the type constraint has defaults, we must apply those + // defaults to the variable default value before type conversion, + // unless the default value is null. Null is excluded from the + // type default application process as a special case, to allow + // nullable variables to have a null default value. + if v.TypeDefaults != nil && !val.IsNull() { + val = v.TypeDefaults.Apply(val) + } + val, err = convert.Convert(val, v.ConstraintType) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid default value for variable", + Detail: fmt.Sprintf("This default value is not compatible with the variable's type constraint: %s.", err), + Subject: attr.Expr.Range().Ptr(), + }) + val = cty.DynamicVal + } + } + + if !v.Nullable && val.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid default value for variable", + Detail: "A null default value is not valid when nullable=false.", + Subject: attr.Expr.Range().Ptr(), + }) + } + + v.Default = val + } + + for _, block := range content.Blocks { + switch block.Type { + + case "validation": + vv, moreDiags := decodeVariableValidationBlock(v.Name, block, override) + diags = append(diags, moreDiags...) + v.Validations = append(v.Validations, vv) + + default: + // The above cases should be exhaustive for all block types + // defined in variableBlockSchema + panic(fmt.Sprintf("unhandled block type %q", block.Type)) + } + } + + return v, diags +} + +func decodeVariableType(expr hcl.Expression) (cty.Type, *typeexpr.Defaults, VariableParsingMode, hcl.Diagnostics) { + if exprIsNativeQuotedString(expr) { + // If a user provides the pre-0.12 form of variable type argument where + // the string values "string", "list" and "map" are accepted, we + // provide an error to point the user towards using the type system + // correctly has a hint. + // Only the native syntax ends up in this codepath; we handle the + // JSON syntax (which is, of course, quoted within the type system) + // in the normal codepath below. + val, diags := expr.Value(nil) + if diags.HasErrors() { + return cty.DynamicPseudoType, nil, VariableParseHCL, diags + } + str := val.AsString() + switch str { + case "string": + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid quoted type constraints", + Detail: "OpenTofu 0.11 and earlier required type constraints to be given in quotes, but that form is now deprecated and will be removed in a future version of OpenTofu. Remove the quotes around \"string\".", + Subject: expr.Range().Ptr(), + }) + return cty.DynamicPseudoType, nil, VariableParseLiteral, diags + case "list": + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid quoted type constraints", + Detail: "OpenTofu 0.11 and earlier required type constraints to be given in quotes, but that form is now deprecated and will be removed in a future version of OpenTofu. Remove the quotes around \"list\" and write list(string) instead to explicitly indicate that the list elements are strings.", + Subject: expr.Range().Ptr(), + }) + return cty.DynamicPseudoType, nil, VariableParseHCL, diags + case "map": + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid quoted type constraints", + Detail: "OpenTofu 0.11 and earlier required type constraints to be given in quotes, but that form is now deprecated and will be removed in a future version of OpenTofu. Remove the quotes around \"map\" and write map(string) instead to explicitly indicate that the map elements are strings.", + Subject: expr.Range().Ptr(), + }) + return cty.DynamicPseudoType, nil, VariableParseHCL, diags + default: + return cty.DynamicPseudoType, nil, VariableParseHCL, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: "Invalid legacy variable type hint", + Detail: `To provide a full type expression, remove the surrounding quotes and give the type expression directly.`, + Subject: expr.Range().Ptr(), + }} + } + } + + // First we'll deal with some shorthand forms that the HCL-level type + // expression parser doesn't include. These both emulate pre-0.12 behavior + // of allowing a list or map of any element type as long as all of the + // elements are consistent. This is the same as list(any) or map(any). + switch hcl.ExprAsKeyword(expr) { + case "list": + return cty.List(cty.DynamicPseudoType), nil, VariableParseHCL, nil + case "map": + return cty.Map(cty.DynamicPseudoType), nil, VariableParseHCL, nil + } + + ty, typeDefaults, diags := typeexpr.TypeConstraintWithDefaults(expr) + if diags.HasErrors() { + return cty.DynamicPseudoType, nil, VariableParseHCL, diags + } + + switch { + case ty.IsPrimitiveType(): + // Primitive types use literal parsing. + return ty, typeDefaults, VariableParseLiteral, diags + default: + // Everything else uses HCL parsing + return ty, typeDefaults, VariableParseHCL, diags + } +} + +func (v *Variable) Addr() addrs.InputVariable { + return addrs.InputVariable{Name: v.Name} +} + +// Required returns true if this variable is required to be set by the caller, +// or false if there is a default value that will be used when it isn't set. +func (v *Variable) Required() bool { + return v.Default == cty.NilVal +} + +// VariableParsingMode defines how values of a particular variable given by +// text-only mechanisms (command line arguments and environment variables) +// should be parsed to produce the final value. +type VariableParsingMode rune + +// VariableParseLiteral is a variable parsing mode that just takes the given +// string directly as a cty.String value. +const VariableParseLiteral VariableParsingMode = 'L' + +// VariableParseHCL is a variable parsing mode that attempts to parse the given +// string as an HCL expression and returns the result. +const VariableParseHCL VariableParsingMode = 'H' + +// Parse uses the receiving parsing mode to process the given variable value +// string, returning the result along with any diagnostics. +// +// A VariableParsingMode does not know the expected type of the corresponding +// variable, so it's the caller's responsibility to attempt to convert the +// result to the appropriate type and return to the user any diagnostics that +// conversion may produce. +// +// The given name is used to create a synthetic filename in case any diagnostics +// must be generated about the given string value. This should be the name +// of the root module variable whose value will be populated from the given +// string. +// +// If the returned diagnostics has errors, the returned value may not be +// valid. +func (m VariableParsingMode) Parse(name, value string) (cty.Value, hcl.Diagnostics) { + switch m { + case VariableParseLiteral: + return cty.StringVal(value), nil + case VariableParseHCL: + fakeFilename := fmt.Sprintf("", name) + expr, diags := hclsyntax.ParseExpression([]byte(value), fakeFilename, hcl.Pos{Line: 1, Column: 1}) + if diags.HasErrors() { + return cty.DynamicVal, diags + } + val, valDiags := expr.Value(nil) + diags = append(diags, valDiags...) + return val, diags + default: + // Should never happen + panic(fmt.Errorf("Parse called on invalid VariableParsingMode %#v", m)) + } +} + +// decodeVariableValidationBlock is a wrapper around decodeCheckRuleBlock +// that imposes the additional rule that the condition expression can refer +// only to an input variable of the given name. +func decodeVariableValidationBlock(varName string, block *hcl.Block, override bool) (*CheckRule, hcl.Diagnostics) { + vv, diags := decodeCheckRuleBlock(block, override) + if vv.Condition != nil { + // The validation condition can only refer to the variable itself, + // to ensure that the variable declaration can't create additional + // edges in the dependency graph. + goodRefs := 0 + for _, traversal := range vv.Condition.Variables() { + ref, moreDiags := addrs.ParseRef(traversal) + if !moreDiags.HasErrors() { + if addr, ok := ref.Subject.(addrs.InputVariable); ok { + if addr.Name == varName { + goodRefs++ + continue // Reference is valid + } + } + } + // If we fall out here then the reference is invalid. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid reference in variable validation", + Detail: fmt.Sprintf("The condition for variable %q can only refer to the variable itself, using var.%s.", varName, varName), + Subject: traversal.SourceRange().Ptr(), + }) + } + if goodRefs < 1 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid variable validation condition", + Detail: fmt.Sprintf("The condition for variable %q must refer to var.%s in order to test incoming values.", varName, varName), + Subject: vv.Condition.Range().Ptr(), + }) + } + } + + if vv.ErrorMessage != nil { + // The same applies to the validation error message, except that + // references are not required. A string literal is a valid error + // message. + goodRefs := 0 + for _, traversal := range vv.ErrorMessage.Variables() { + ref, moreDiags := addrs.ParseRef(traversal) + if !moreDiags.HasErrors() { + if addr, ok := ref.Subject.(addrs.InputVariable); ok { + if addr.Name == varName { + goodRefs++ + continue // Reference is valid + } + } + } + // If we fall out here then the reference is invalid. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid reference in variable validation", + Detail: fmt.Sprintf("The error message for variable %q can only refer to the variable itself, using var.%s.", varName, varName), + Subject: traversal.SourceRange().Ptr(), + }) + } + } + + return vv, diags +} + +// Output represents an "output" block in a module or file. +type Output struct { + Name string + Description string + Expr hcl.Expression + DependsOn []hcl.Traversal + Sensitive bool + + Preconditions []*CheckRule + + DescriptionSet bool + SensitiveSet bool + + DeclRange hcl.Range + + // IsOverridden indicates if the output is being overridden. It's used in + // testing framework to not evaluate expression and use OverrideValue instead. + IsOverridden bool + // OverrideValue is only valid if IsOverridden is set to true. The value + // should be used instead of evaluated expression. It's possible to have no + // OverrideValue even with IsOverridden is set to true. + OverrideValue *cty.Value +} + +func decodeOutputBlock(block *hcl.Block, override bool) (*Output, hcl.Diagnostics) { + var diags hcl.Diagnostics + + o := &Output{ + Name: block.Labels[0], + DeclRange: block.DefRange, + } + + schema := outputBlockSchema + if override { + schema = schemaForOverrides(schema) + } + + content, moreDiags := block.Body.Content(schema) + diags = append(diags, moreDiags...) + + if !hclsyntax.ValidIdentifier(o.Name) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid output name", + Detail: badIdentifierDetail, + Subject: &block.LabelRanges[0], + }) + } + + if attr, exists := content.Attributes["description"]; exists { + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &o.Description) + diags = append(diags, valDiags...) + o.DescriptionSet = true + } + + if attr, exists := content.Attributes["value"]; exists { + o.Expr = attr.Expr + } + + if attr, exists := content.Attributes["sensitive"]; exists { + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &o.Sensitive) + diags = append(diags, valDiags...) + o.SensitiveSet = true + } + + if attr, exists := content.Attributes["depends_on"]; exists { + deps, depsDiags := decodeDependsOn(attr) + diags = append(diags, depsDiags...) + o.DependsOn = append(o.DependsOn, deps...) + } + + for _, block := range content.Blocks { + switch block.Type { + case "precondition": + cr, moreDiags := decodeCheckRuleBlock(block, override) + diags = append(diags, moreDiags...) + o.Preconditions = append(o.Preconditions, cr) + case "postcondition": + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Postconditions are not allowed", + Detail: "Output values can only have preconditions, not postconditions.", + Subject: block.TypeRange.Ptr(), + }) + default: + // The cases above should be exhaustive for all block types + // defined in the block type schema, so this shouldn't happen. + panic(fmt.Sprintf("unexpected lifecycle sub-block type %q", block.Type)) + } + } + + return o, diags +} + +func (o *Output) Addr() addrs.OutputValue { + return addrs.OutputValue{Name: o.Name} +} + +// Local represents a single entry from a "locals" block in a module or file. +// The "locals" block itself is not represented, because it serves only to +// provide context for us to interpret its contents. +type Local struct { + Name string + Expr hcl.Expression + + DeclRange hcl.Range +} + +func decodeLocalsBlock(block *hcl.Block) ([]*Local, hcl.Diagnostics) { + attrs, diags := block.Body.JustAttributes() + if len(attrs) == 0 { + return nil, diags + } + + locals := make([]*Local, 0, len(attrs)) + for name, attr := range attrs { + if !hclsyntax.ValidIdentifier(name) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid local value name", + Detail: badIdentifierDetail, + Subject: &attr.NameRange, + }) + } + + locals = append(locals, &Local{ + Name: name, + Expr: attr.Expr, + DeclRange: attr.Range, + }) + } + return locals, diags +} + +// Addr returns the address of the local value declared by the receiver, +// relative to its containing module. +func (l *Local) Addr() addrs.LocalValue { + return addrs.LocalValue{ + Name: l.Name, + } +} + +var variableBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "description", + }, + { + Name: "default", + }, + { + Name: "type", + }, + { + Name: "sensitive", + }, + { + Name: "nullable", + }, + }, + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "validation", + }, + }, +} + +var outputBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "description", + }, + { + Name: "value", + Required: true, + }, + { + Name: "depends_on", + }, + { + Name: "sensitive", + }, + }, + Blocks: []hcl.BlockHeaderSchema{ + {Type: "precondition"}, + {Type: "postcondition"}, + }, +} diff --git a/pkg/configs/parser.go b/pkg/configs/parser.go new file mode 100644 index 00000000000..02e90f00413 --- /dev/null +++ b/pkg/configs/parser.go @@ -0,0 +1,125 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "fmt" + "strings" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclparse" + "github.com/spf13/afero" +) + +// Parser is the main interface to read configuration files and other related +// files from disk. +// +// It retains a cache of all files that are loaded so that they can be used +// to create source code snippets in diagnostics, etc. +type Parser struct { + fs afero.Afero + p *hclparse.Parser + + // allowExperiments controls whether we will allow modules to opt in to + // experimental language features. In main code this will be set only + // for alpha releases and some development builds. Test code must decide + // for itself whether to enable it so that tests can cover both the + // allowed and not-allowed situations. + allowExperiments bool +} + +// NewParser creates and returns a new Parser that reads files from the given +// filesystem. If a nil filesystem is passed then the system's "real" filesystem +// will be used, via afero.OsFs. +func NewParser(fs afero.Fs) *Parser { + if fs == nil { + fs = afero.OsFs{} + } + + return &Parser{ + fs: afero.Afero{Fs: fs}, + p: hclparse.NewParser(), + } +} + +// LoadHCLFile is a low-level method that reads the file at the given path, +// parses it, and returns the hcl.Body representing its root. In many cases +// it is better to use one of the other Load*File methods on this type, +// which additionally decode the root body in some way and return a higher-level +// construct. +// +// If the file cannot be read at all -- e.g. because it does not exist -- then +// this method will return a nil body and error diagnostics. In this case +// callers may wish to ignore the provided error diagnostics and produce +// a more context-sensitive error instead. +// +// The file will be parsed using the HCL native syntax unless the filename +// ends with ".json", in which case the HCL JSON syntax will be used. +func (p *Parser) LoadHCLFile(path string) (hcl.Body, hcl.Diagnostics) { + src, err := p.fs.ReadFile(path) + + if err != nil { + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Failed to read file", + Detail: fmt.Sprintf("The file %q could not be read.", path), + }, + } + } + + var file *hcl.File + var diags hcl.Diagnostics + switch { + case strings.HasSuffix(path, ".json"): + file, diags = p.p.ParseJSON(src, path) + default: + file, diags = p.p.ParseHCL(src, path) + } + + // If the returned file or body is nil, then we'll return a non-nil empty + // body so we'll meet our contract that nil means an error reading the file. + if file == nil || file.Body == nil { + return hcl.EmptyBody(), diags + } + + return file.Body, diags +} + +// Sources returns a map of the cached source buffers for all files that +// have been loaded through this parser, with source filenames (as requested +// when each file was opened) as the keys. +func (p *Parser) Sources() map[string]*hcl.File { + return p.p.Files() +} + +// ForceFileSource artificially adds source code to the cache of file sources, +// as if it had been loaded from the given filename. +// +// This should be used only in special situations where configuration is loaded +// some other way. Most callers should load configuration via methods of +// Parser, which will update the sources cache automatically. +func (p *Parser) ForceFileSource(filename string, src []byte) { + // We'll make a synthetic hcl.File here just so we can reuse the + // existing cache. + p.p.AddFile(filename, &hcl.File{ + Body: hcl.EmptyBody(), + Bytes: src, + }) +} + +// AllowLanguageExperiments specifies whether subsequent LoadConfigFile (and +// similar) calls will allow opting in to experimental language features. +// +// If this method is never called for a particular parser, the default behavior +// is to disallow language experiments. +// +// Main code should set this only for alpha or development builds. Test code +// is responsible for deciding for itself whether and how to call this +// method. +func (p *Parser) AllowLanguageExperiments(allowed bool) { + p.allowExperiments = allowed +} diff --git a/pkg/configs/parser_config.go b/pkg/configs/parser_config.go new file mode 100644 index 00000000000..e493d67b326 --- /dev/null +++ b/pkg/configs/parser_config.go @@ -0,0 +1,372 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/kubegems/opentofu/pkg/encryption/config" +) + +// LoadConfigFile reads the file at the given path and parses it as a config +// file. +// +// If the file cannot be read -- for example, if it does not exist -- then +// a nil *File will be returned along with error diagnostics. Callers may wish +// to disregard the returned diagnostics in this case and instead generate +// their own error message(s) with additional context. +// +// If the returned diagnostics has errors when a non-nil map is returned +// then the map may be incomplete but should be valid enough for careful +// static analysis. +// +// This method wraps LoadHCLFile, and so it inherits the syntax selection +// behaviors documented for that method. +func (p *Parser) LoadConfigFile(path string) (*File, hcl.Diagnostics) { + return p.loadConfigFile(path, false) +} + +// LoadConfigFileOverride is the same as LoadConfigFile except that it relaxes +// certain required attribute constraints in order to interpret the given +// file as an overrides file. +func (p *Parser) LoadConfigFileOverride(path string) (*File, hcl.Diagnostics) { + return p.loadConfigFile(path, true) +} + +// LoadTestFile reads the file at the given path and parses it as a OpenTofu +// test file. +// +// It references the same LoadHCLFile as LoadConfigFile, so inherits the same +// syntax selection behaviours. +func (p *Parser) LoadTestFile(path string) (*TestFile, hcl.Diagnostics) { + body, diags := p.LoadHCLFile(path) + if body == nil { + return nil, diags + } + + test, testDiags := loadTestFile(body) + diags = append(diags, testDiags...) + return test, diags +} + +func (p *Parser) loadConfigFile(path string, override bool) (*File, hcl.Diagnostics) { + body, diags := p.LoadHCLFile(path) + if body == nil { + return nil, diags + } + + file := &File{} + + var reqDiags hcl.Diagnostics + file.CoreVersionConstraints, reqDiags = sniffCoreVersionRequirements(body) + diags = append(diags, reqDiags...) + + // We'll load the experiments first because other decoding logic in the + // loop below might depend on these experiments. + var expDiags hcl.Diagnostics + file.ActiveExperiments, expDiags = sniffActiveExperiments(body, p.allowExperiments) + diags = append(diags, expDiags...) + + content, contentDiags := body.Content(configFileSchema) + diags = append(diags, contentDiags...) + + for _, block := range content.Blocks { + switch block.Type { + + case "terraform": + content, contentDiags := block.Body.Content(terraformBlockSchema) + diags = append(diags, contentDiags...) + + // We ignore the "terraform_version", "language" and "experiments" + // attributes here because sniffCoreVersionRequirements and + // sniffActiveExperiments already dealt with those above. + + for _, innerBlock := range content.Blocks { + switch innerBlock.Type { + + case "backend": + backendCfg, cfgDiags := decodeBackendBlock(innerBlock) + diags = append(diags, cfgDiags...) + if backendCfg != nil { + file.Backends = append(file.Backends, backendCfg) + } + + case "cloud": + cloudCfg, cfgDiags := decodeCloudBlock(innerBlock) + diags = append(diags, cfgDiags...) + if cloudCfg != nil { + file.CloudConfigs = append(file.CloudConfigs, cloudCfg) + } + + case "required_providers": + reqs, reqsDiags := decodeRequiredProvidersBlock(innerBlock) + diags = append(diags, reqsDiags...) + file.RequiredProviders = append(file.RequiredProviders, reqs) + + case "provider_meta": + providerCfg, cfgDiags := decodeProviderMetaBlock(innerBlock) + diags = append(diags, cfgDiags...) + if providerCfg != nil { + file.ProviderMetas = append(file.ProviderMetas, providerCfg) + } + + case "encryption": + encryptionCfg, cfgDiags := config.DecodeConfig(innerBlock.Body, innerBlock.DefRange) + diags = append(diags, cfgDiags...) + if encryptionCfg != nil { + file.Encryptions = append(file.Encryptions, encryptionCfg) + } + + default: + // Should never happen because the above cases should be exhaustive + // for all block type names in our schema. + continue + + } + } + + case "required_providers": + // required_providers should be nested inside a "terraform" block + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid required_providers block", + Detail: "A \"required_providers\" block must be nested inside a \"terraform\" block.", + Subject: block.TypeRange.Ptr(), + }) + + case "provider": + cfg, cfgDiags := decodeProviderBlock(block) + diags = append(diags, cfgDiags...) + if cfg != nil { + file.ProviderConfigs = append(file.ProviderConfigs, cfg) + } + + case "variable": + cfg, cfgDiags := decodeVariableBlock(block, override) + diags = append(diags, cfgDiags...) + if cfg != nil { + file.Variables = append(file.Variables, cfg) + } + + case "locals": + defs, defsDiags := decodeLocalsBlock(block) + diags = append(diags, defsDiags...) + file.Locals = append(file.Locals, defs...) + + case "output": + cfg, cfgDiags := decodeOutputBlock(block, override) + diags = append(diags, cfgDiags...) + if cfg != nil { + file.Outputs = append(file.Outputs, cfg) + } + + case "module": + cfg, cfgDiags := decodeModuleBlock(block, override) + diags = append(diags, cfgDiags...) + if cfg != nil { + file.ModuleCalls = append(file.ModuleCalls, cfg) + } + + case "resource": + cfg, cfgDiags := decodeResourceBlock(block, override) + diags = append(diags, cfgDiags...) + if cfg != nil { + file.ManagedResources = append(file.ManagedResources, cfg) + } + + case "data": + cfg, cfgDiags := decodeDataBlock(block, override, false) + diags = append(diags, cfgDiags...) + if cfg != nil { + file.DataResources = append(file.DataResources, cfg) + } + + case "moved": + cfg, cfgDiags := decodeMovedBlock(block) + diags = append(diags, cfgDiags...) + if cfg != nil { + file.Moved = append(file.Moved, cfg) + } + + case "import": + cfg, cfgDiags := decodeImportBlock(block) + diags = append(diags, cfgDiags...) + if cfg != nil { + file.Import = append(file.Import, cfg) + } + + case "check": + cfg, cfgDiags := decodeCheckBlock(block, override) + diags = append(diags, cfgDiags...) + if cfg != nil { + file.Checks = append(file.Checks, cfg) + } + + case "removed": + cfg, cfgDiags := decodeRemovedBlock(block) + diags = append(diags, cfgDiags...) + if cfg != nil { + file.Removed = append(file.Removed, cfg) + } + + default: + // Should never happen because the above cases should be exhaustive + // for all block type names in our schema. + continue + + } + } + + return file, diags +} + +// sniffCoreVersionRequirements does minimal parsing of the given body for +// "terraform" blocks with "required_version" attributes, returning the +// requirements found. +// +// This is intended to maximize the chance that we'll be able to read the +// requirements (syntax errors notwithstanding) even if the config file contains +// constructs that might've been added in future OpenTofu versions +// +// This is a "best effort" sort of method which will return constraints it is +// able to find, but may return no constraints at all if the given body is +// so invalid that it cannot be decoded at all. +func sniffCoreVersionRequirements(body hcl.Body) ([]VersionConstraint, hcl.Diagnostics) { + rootContent, _, diags := body.PartialContent(configFileTerraformBlockSniffRootSchema) + + var constraints []VersionConstraint + + for _, block := range rootContent.Blocks { + content, _, blockDiags := block.Body.PartialContent(configFileVersionSniffBlockSchema) + diags = append(diags, blockDiags...) + + attr, exists := content.Attributes["required_version"] + if !exists { + continue + } + + constraint, constraintDiags := decodeVersionConstraint(attr) + diags = append(diags, constraintDiags...) + if !constraintDiags.HasErrors() { + constraints = append(constraints, constraint) + } + } + + return constraints, diags +} + +// configFileSchema is the schema for the top-level of a config file. We use +// the low-level HCL API for this level so we can easily deal with each +// block type separately with its own decoding logic. +var configFileSchema = &hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "terraform", + }, + { + // This one is not really valid, but we include it here so we + // can create a specialized error message hinting the user to + // nest it inside a "terraform" block. + Type: "required_providers", + }, + { + Type: "provider", + LabelNames: []string{"name"}, + }, + { + Type: "variable", + LabelNames: []string{"name"}, + }, + { + Type: "locals", + }, + { + Type: "output", + LabelNames: []string{"name"}, + }, + { + Type: "module", + LabelNames: []string{"name"}, + }, + { + Type: "resource", + LabelNames: []string{"type", "name"}, + }, + { + Type: "data", + LabelNames: []string{"type", "name"}, + }, + { + Type: "moved", + }, + { + Type: "import", + }, + { + Type: "check", + LabelNames: []string{"name"}, + }, + { + Type: "removed", + }, + }, +} + +// terraformBlockSchema is the schema for a top-level "terraform" block in +// a configuration file. +var terraformBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + {Name: "required_version"}, + {Name: "experiments"}, + {Name: "language"}, + }, + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "backend", + LabelNames: []string{"type"}, + }, + { + Type: "cloud", + }, + { + Type: "required_providers", + }, + { + Type: "provider_meta", + LabelNames: []string{"provider"}, + }, + { + Type: "encryption", + }, + }, +} + +// configFileTerraformBlockSniffRootSchema is a schema for +// sniffCoreVersionRequirements and sniffActiveExperiments. +var configFileTerraformBlockSniffRootSchema = &hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "terraform", + }, + }, +} + +// configFileVersionSniffBlockSchema is a schema for sniffCoreVersionRequirements +var configFileVersionSniffBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "required_version", + }, + }, +} + +// configFileExperimentsSniffBlockSchema is a schema for sniffActiveExperiments, +// to decode a single attribute from inside a "terraform" block. +var configFileExperimentsSniffBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + {Name: "experiments"}, + {Name: "language"}, + }, +} diff --git a/pkg/configs/parser_config_dir.go b/pkg/configs/parser_config_dir.go new file mode 100644 index 00000000000..0b08276b2ce --- /dev/null +++ b/pkg/configs/parser_config_dir.go @@ -0,0 +1,384 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "fmt" + "log" + "os" + "path" + "path/filepath" + "slices" + "strings" + + "github.com/hashicorp/hcl/v2" +) + +const ( + DefaultTestDirectory = "tests" +) + +const ( + tfExt = ".tf" + tofuExt = ".tofu" + tfJSONExt = ".tf.json" + tofuJSONExt = ".tofu.json" + tfTestExt = ".tftest.hcl" + tofuTestExt = ".tofutest.hcl" + tfTestJSONExt = ".tftest.json" + tofuTestJSONExt = ".tofutest.json" +) + +// LoadConfigDir reads the .tf and .tf.json files in the given directory +// as config files (using LoadConfigFile) and then combines these files into +// a single Module. +// +// If this method returns nil, that indicates that the given directory does not +// exist at all or could not be opened for some reason. Callers may wish to +// detect this case and ignore the returned diagnostics so that they can +// produce a more context-aware error message in that case. +// +// If this method returns a non-nil module while error diagnostics are returned +// then the module may be incomplete but can be used carefully for static +// analysis. +// +// This file does not consider a directory with no files to be an error, and +// will simply return an empty module in that case. Callers should first call +// Parser.IsConfigDir if they wish to recognize that situation. +// +// .tf files are parsed using the HCL native syntax while .tf.json files are +// parsed using the HCL JSON syntax. +func (p *Parser) LoadConfigDir(path string, call StaticModuleCall) (*Module, hcl.Diagnostics) { + return p.LoadConfigDirSelective(path, call, SelectiveLoadAll) +} +func (p *Parser) LoadConfigDirSelective(path string, call StaticModuleCall, load SelectiveLoader) (*Module, hcl.Diagnostics) { + primaryPaths, overridePaths, _, diags := p.dirFiles(path, "") + if diags.HasErrors() { + return nil, diags + } + + primary, fDiags := p.loadFiles(primaryPaths, false) + diags = append(diags, fDiags...) + override, fDiags := p.loadFiles(overridePaths, true) + diags = append(diags, fDiags...) + + mod, modDiags := NewModule(primary, override, call, path, load) + diags = append(diags, modDiags...) + + return mod, diags +} + +// LoadConfigDirWithTests matches LoadConfigDir, but the return Module also +// contains any relevant .tftest.hcl files. +func (p *Parser) LoadConfigDirWithTests(path string, testDirectory string, call StaticModuleCall) (*Module, hcl.Diagnostics) { + primaryPaths, overridePaths, testPaths, diags := p.dirFiles(path, testDirectory) + if diags.HasErrors() { + return nil, diags + } + + primary, fDiags := p.loadFiles(primaryPaths, false) + diags = append(diags, fDiags...) + override, fDiags := p.loadFiles(overridePaths, true) + diags = append(diags, fDiags...) + tests, fDiags := p.loadTestFiles(path, testPaths) + diags = append(diags, fDiags...) + + mod, modDiags := NewModuleWithTests(primary, override, tests, call, path) + diags = append(diags, modDiags...) + + return mod, diags +} + +// ConfigDirFiles returns lists of the primary and override files configuration +// files in the given directory. +// +// If the given directory does not exist or cannot be read, error diagnostics +// are returned. If errors are returned, the resulting lists may be incomplete. +func (p Parser) ConfigDirFiles(dir string) (primary, override []string, diags hcl.Diagnostics) { + primary, override, _, diags = p.dirFiles(dir, "") + return primary, override, diags +} + +// ConfigDirFilesWithTests matches ConfigDirFiles except it also returns the +// paths to any test files within the module. +func (p Parser) ConfigDirFilesWithTests(dir string, testDirectory string) (primary, override, tests []string, diags hcl.Diagnostics) { + return p.dirFiles(dir, testDirectory) +} + +// IsConfigDir determines whether the given path refers to a directory that +// exists and contains at least one OpenTofu config file (with a .tf or +// .tf.json extension.). Note, we explicitely exclude checking for tests here +// as tests must live alongside actual .tf config files. +func (p *Parser) IsConfigDir(path string) bool { + primaryPaths, overridePaths, _, _ := p.dirFiles(path, "") + return (len(primaryPaths) + len(overridePaths)) > 0 +} + +func (p *Parser) loadFiles(paths []string, override bool) ([]*File, hcl.Diagnostics) { + var files []*File + var diags hcl.Diagnostics + + for _, path := range paths { + var f *File + var fDiags hcl.Diagnostics + if override { + f, fDiags = p.LoadConfigFileOverride(path) + } else { + f, fDiags = p.LoadConfigFile(path) + } + diags = append(diags, fDiags...) + if f != nil { + files = append(files, f) + } + } + + return files, diags +} + +// dirFiles finds OpenTofu configuration files within dir, splitting them into +// primary and override files based on the filename. +// +// If testsDir is not empty, dirFiles will also retrieve OpenTofu testing files +// both directly within dir and within testsDir as a subdirectory of dir. In +// this way, testsDir acts both as a direction to retrieve test files within the +// main direction and as the location for additional test files. +func (p *Parser) dirFiles(dir string, testsDir string) (primary, override, tests []string, diags hcl.Diagnostics) { + includeTests := len(testsDir) > 0 + + if includeTests { + testPath := path.Join(dir, testsDir) + + infos, err := p.fs.ReadDir(testPath) + if err != nil { + // Then we couldn't read from the testing directory for some reason. + + if os.IsNotExist(err) { + // Then this means the testing directory did not exist. + // We won't actually stop loading the rest of the configuration + // for this, we will add a warning to explain to the user why + // test files weren't processed but leave it at that. + if testsDir != DefaultTestDirectory { + // We'll only add the warning if a directory other than the + // default has been requested. If the user is just loading + // the default directory then we have no expectation that + // it should actually exist. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "Test directory does not exist", + Detail: fmt.Sprintf("Requested test directory %s does not exist.", testPath), + }) + } + } else { + // Then there is some other reason we couldn't load. We will + // treat this as a full error. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Failed to read test directory", + Detail: fmt.Sprintf("Test directory %s could not be read: %v.", testPath, err), + }) + + // We'll also stop loading the rest of the config for this. + return + } + + } else { + for _, testInfo := range infos { + if testInfo.IsDir() || IsIgnoredFile(testInfo.Name()) { + continue + } + + ext := fileExt(testInfo.Name()) + if isTestFileExt(ext) { + tests = append(tests, filepath.Join(testPath, testInfo.Name())) + } + } + } + + } + + infos, err := p.fs.ReadDir(dir) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Failed to read module directory", + Detail: fmt.Sprintf("Module directory %s does not exist or cannot be read.", dir), + }) + return + } + + for _, info := range infos { + if info.IsDir() { + // We only care about tofu configuration files. + continue + } + + name := info.Name() + ext := fileExt(name) + if ext == "" || IsIgnoredFile(name) { + continue + } + + if isTestFileExt(ext) { + if includeTests { + tests = append(tests, filepath.Join(dir, name)) + } + continue + } + + baseName := name[:len(name)-len(ext)] // strip extension + isOverride := baseName == "override" || strings.HasSuffix(baseName, "_override") + + fullPath := filepath.Join(dir, name) + if isOverride { + override = append(override, fullPath) + } else { + primary = append(primary, fullPath) + } + } + + return filterTfPathsWithTofuAlternatives(primary), filterTfPathsWithTofuAlternatives(override), filterTfPathsWithTofuAlternatives(tests), diags +} + +// filterTfPathsWithTofuAlternatives filters out .tf files if they have an +// alternative .tofu file with the same name. +// For example, if there are both 'resources.tf.json' and +// 'resources.tofu.json' files, the 'resources.tf.json' file will be ignored, +// and only the 'resources.tofu.json' file will be returned as a relevant path. +func filterTfPathsWithTofuAlternatives(paths []string) []string { + var ignoredPaths []string + var relevantPaths []string + + for _, p := range paths { + ext := tfFileExt(p) + + if ext == "" { + relevantPaths = append(relevantPaths, p) + continue + } + + parallelTofuExt := strings.ReplaceAll(ext, ".tf", ".tofu") + pathWithoutExt, _ := strings.CutSuffix(p, ext) + parallelTofuPath := pathWithoutExt + parallelTofuExt + + // If the .tf file has a parallel .tofu file in the directory, + // we'll ignore the .tf file and only use the .tofu file + if slices.Contains(paths, parallelTofuPath) { + ignoredPaths = append(ignoredPaths, p) + } else { + relevantPaths = append(relevantPaths, p) + } + } + + if len(ignoredPaths) > 0 { + log.Printf("[INFO] filterTfPathsWithTofuAlternatives: Ignored the following .tf files because a .tofu file alternative exists: %q", ignoredPaths) + } + + return relevantPaths +} + +func (p *Parser) loadTestFiles(basePath string, paths []string) (map[string]*TestFile, hcl.Diagnostics) { + var diags hcl.Diagnostics + + tfs := make(map[string]*TestFile) + for _, path := range paths { + tf, fDiags := p.LoadTestFile(path) + diags = append(diags, fDiags...) + if tf != nil { + // We index test files relative to the module they are testing, so + // the key is the relative path between basePath and path. + relPath, err := filepath.Rel(basePath, path) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "Failed to calculate relative path", + Detail: fmt.Sprintf("OpenTofu could not calculate the relative path for test file %s and it has been skipped: %s", path, err), + }) + continue + } + tfs[relPath] = tf + } + } + + return tfs, diags +} + +// fileExt returns the OpenTofu configuration extension of the given +// path, or a blank string if it is not a recognized extension. +func fileExt(path string) string { + extension := tfFileExt(path) + + if extension == "" { + extension = tofuFileExt(path) + } + + return extension +} + +// tfFileExt returns the OpenTofu .tf configuration extension of the given +// path, or a blank string if it is not a recognized .tf extension. +func tfFileExt(path string) string { + switch { + case strings.HasSuffix(path, tfExt): + return tfExt + case strings.HasSuffix(path, tfJSONExt): + return tfJSONExt + case strings.HasSuffix(path, tfTestExt): + return tfTestExt + case strings.HasSuffix(path, tfTestJSONExt): + return tfTestJSONExt + default: + return "" + } +} + +// tofuFileExt returns the OpenTofu .tofu configuration extension of the given +// path, or a blank string if it is not a recognized .tofu extension. +func tofuFileExt(path string) string { + switch { + case strings.HasSuffix(path, tofuExt): + return tofuExt + case strings.HasSuffix(path, tofuJSONExt): + return tofuJSONExt + case strings.HasSuffix(path, tofuTestExt): + return tofuTestExt + case strings.HasSuffix(path, tofuTestJSONExt): + return tofuTestJSONExt + } + + return "" +} + +func isTestFileExt(ext string) bool { + return ext == tfTestExt || ext == tfTestJSONExt || ext == tofuTestExt || ext == tofuTestJSONExt +} + +// IsIgnoredFile returns true if the given filename (which must not have a +// directory path ahead of it) should be ignored as e.g. an editor swap file. +func IsIgnoredFile(name string) bool { + return strings.HasPrefix(name, ".") || // Unix-like hidden files + strings.HasSuffix(name, "~") || // vim + strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#") // emacs +} + +// IsEmptyDir returns true if the given filesystem path contains no OpenTofu +// configuration files. +// +// Unlike the methods of the Parser type, this function always consults the +// real filesystem, and thus it isn't appropriate to use when working with +// configuration loaded from a plan file. +func IsEmptyDir(path string) (bool, error) { + if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { + return true, nil + } + + p := NewParser(nil) + fs, os, _, diags := p.dirFiles(path, "") + if diags.HasErrors() { + return false, diags + } + + return len(fs) == 0 && len(os) == 0, nil +} diff --git a/pkg/configs/parser_config_dir_test.go b/pkg/configs/parser_config_dir_test.go new file mode 100644 index 00000000000..c5a0549ed86 --- /dev/null +++ b/pkg/configs/parser_config_dir_test.go @@ -0,0 +1,351 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "fmt" + "os" + "path/filepath" + "testing" + + "github.com/hashicorp/hcl/v2" + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/zclconf/go-cty/cty" +) + +// TestParseLoadConfigDirSuccess is a simple test that just verifies that +// a number of test configuration directories (in testdata/valid-modules) +// can be parsed without raising any diagnostics. +// +// It also re-tests the individual files in testdata/valid-files as if +// they were single-file modules, to ensure that they can be bundled into +// modules correctly. +// +// This test does not verify that reading these modules produces the correct +// module element contents. More detailed assertions may be made on some subset +// of these configuration files in other tests. +func TestParserLoadConfigDirSuccess(t *testing.T) { + dirs, err := os.ReadDir("testdata/valid-modules") + if err != nil { + t.Fatal(err) + } + + for _, info := range dirs { + name := info.Name() + t.Run(name, func(t *testing.T) { + parser := NewParser(nil) + path := filepath.Join("testdata/valid-modules", name) + + mod, diags := parser.LoadConfigDir(path, RootModuleCallForTesting()) + if len(diags) != 0 && len(mod.ActiveExperiments) != 0 { + // As a special case to reduce churn while we're working + // through experimental features, we'll ignore the warning + // that an experimental feature is active if the module + // intentionally opted in to that feature. + // If you want to explicitly test for the feature warning + // to be generated, consider using testdata/warning-files + // instead. + filterDiags := make(hcl.Diagnostics, 0, len(diags)) + for _, diag := range diags { + if diag.Severity != hcl.DiagWarning { + continue + } + match := false + for exp := range mod.ActiveExperiments { + allowedSummary := fmt.Sprintf("Experimental feature %q is active", exp.Keyword()) + if diag.Summary == allowedSummary { + match = true + break + } + } + if !match { + filterDiags = append(filterDiags, diag) + } + } + diags = filterDiags + } + if len(diags) != 0 { + t.Errorf("unexpected diagnostics") + for _, diag := range diags { + t.Logf("- %s", diag) + } + } + + if mod.SourceDir != path { + t.Errorf("wrong SourceDir value %q; want %s", mod.SourceDir, path) + } + + if len(mod.Tests) > 0 { + // We only load tests when requested, and we didn't request this + // time. + t.Errorf("should not have loaded tests, but found %d", len(mod.Tests)) + } + }) + } + + // The individual files in testdata/valid-files should also work + // when loaded as modules. + files, err := os.ReadDir("testdata/valid-files") + if err != nil { + t.Fatal(err) + } + + for _, info := range files { + name := info.Name() + t.Run(fmt.Sprintf("%s as module", name), func(t *testing.T) { + src, err := os.ReadFile(filepath.Join("testdata/valid-files", name)) + if err != nil { + t.Fatal(err) + } + + parser := testParser(map[string]string{ + "mod/" + name: string(src), + }) + + _, diags := parser.LoadConfigDir("mod", NewStaticModuleCall(addrs.RootModule, + func(v *Variable) (cty.Value, hcl.Diagnostics) { + if !v.Required() { + // Allow defaults in this test + return v.Default, nil + } + panic("Variables not configured for this test!") + }, "", "")) + if diags.HasErrors() { + t.Errorf("unexpected error diagnostics") + for _, diag := range diags { + t.Logf("- %s", diag) + } + } + }) + } + +} + +func TestParserLoadConfigDirWithTests(t *testing.T) { + directories := []string{ + "testdata/valid-modules/with-tests", + "testdata/valid-modules/with-tests-expect-failures", + "testdata/valid-modules/with-tests-nested", + "testdata/valid-modules/with-tests-very-nested", + "testdata/valid-modules/with-tests-json", + } + + for _, directory := range directories { + t.Run(directory, func(t *testing.T) { + + testDirectory := DefaultTestDirectory + if directory == "testdata/valid-modules/with-tests-very-nested" { + testDirectory = "very/nested" + } + + parser := NewParser(nil) + mod, diags := parser.LoadConfigDirWithTests(directory, testDirectory, RootModuleCallForTesting()) + if len(diags) > 0 { // We don't want any warnings or errors. + t.Errorf("unexpected diagnostics") + for _, diag := range diags { + t.Logf("- %s", diag) + } + } + + if len(mod.Tests) != 2 { + t.Errorf("incorrect number of test files found: %d", len(mod.Tests)) + } + }) + } +} + +func TestParserLoadConfigDirWithTests_ReturnsWarnings(t *testing.T) { + parser := NewParser(nil) + mod, diags := parser.LoadConfigDirWithTests("testdata/valid-modules/with-tests", "not_real", RootModuleCallForTesting()) + if len(diags) != 1 { + t.Errorf("expected exactly 1 diagnostic, but found %d", len(diags)) + } else { + if diags[0].Severity != hcl.DiagWarning { + t.Errorf("expected warning severity but found %d", diags[0].Severity) + } + + if diags[0].Summary != "Test directory does not exist" { + t.Errorf("expected summary to be \"Test directory does not exist\" but was \"%s\"", diags[0].Summary) + } + + if diags[0].Detail != "Requested test directory testdata/valid-modules/with-tests/not_real does not exist." { + t.Errorf("expected detail to be \"Requested test directory testdata/valid-modules/with-tests/not_real does not exist.\" but was \"%s\"", diags[0].Detail) + } + } + + // Despite the warning, should still have loaded the tests in the + // configuration directory. + if len(mod.Tests) != 2 { + t.Errorf("incorrect number of test files found: %d", len(mod.Tests)) + } +} + +// TestParseLoadConfigDirFailure is a simple test that just verifies that +// a number of test configuration directories (in testdata/invalid-modules) +// produce diagnostics when parsed. +// +// It also re-tests the individual files in testdata/invalid-files as if +// they were single-file modules, to ensure that their errors are still +// detected when loading as part of a module. +// +// This test does not verify that reading these modules produces any +// diagnostics in particular. More detailed assertions may be made on some subset +// of these configuration files in other tests. +func TestParserLoadConfigDirFailure(t *testing.T) { + dirs, err := os.ReadDir("testdata/invalid-modules") + if err != nil { + t.Fatal(err) + } + + for _, info := range dirs { + name := info.Name() + t.Run(name, func(t *testing.T) { + parser := NewParser(nil) + path := filepath.Join("testdata/invalid-modules", name) + + _, diags := parser.LoadConfigDir(path, RootModuleCallForTesting()) + if !diags.HasErrors() { + t.Errorf("no errors; want at least one") + for _, diag := range diags { + t.Logf("- %s", diag) + } + } + }) + } + + // The individual files in testdata/valid-files should also work + // when loaded as modules. + files, err := os.ReadDir("testdata/invalid-files") + if err != nil { + t.Fatal(err) + } + + for _, info := range files { + name := info.Name() + t.Run(fmt.Sprintf("%s as module", name), func(t *testing.T) { + src, err := os.ReadFile(filepath.Join("testdata/invalid-files", name)) + if err != nil { + t.Fatal(err) + } + + parser := testParser(map[string]string{ + "mod/" + name: string(src), + }) + + _, diags := parser.LoadConfigDir("mod", RootModuleCallForTesting()) + if !diags.HasErrors() { + t.Errorf("no errors; want at least one") + for _, diag := range diags { + t.Logf("- %s", diag) + } + } + }) + } + +} + +func TestParserLoadConfigDirWithTests_TofuFiles(t *testing.T) { + expectedVariablesToOverride := []string{"should_override", "should_override_json"} + expectedLoadedTestFiles := []string{"test/resources_test.tofutest.hcl", "test/resources_test_json.tofutest.json"} + + tests := []struct { + name string + path string + expectedResources []string + }{ + { + name: "only .tofu files", + path: "testdata/tofu-only-files", + expectedResources: []string{"aws_security_group.firewall_tofu", "aws_instance.web_tofu", "test_object.a_tofu", "test_object.b_tofu"}, + }, + { + name: ".tofu and .tf files", + path: "testdata/tofu-and-tf-files", + expectedResources: []string{"aws_security_group.firewall_tofu", "aws_instance.web_tofu", "test_object.a_tofu", "test_object.b_tofu", "tf_resource.first", "tf_json_resource.a"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parser := NewParser(nil) + path := tt.path + + mod, diags := parser.LoadConfigDirWithTests(path, "test", RootModuleCallForTesting()) + if len(diags) != 0 { + t.Errorf("unexpected diagnostics") + for _, diag := range diags { + t.Logf("- %s", diag) + } + } + + if mod.SourceDir != path { + t.Errorf("wrong SourceDir value %q; want %s", mod.SourceDir, path) + } + + if len(tt.expectedResources) != len(mod.ManagedResources) { + t.Errorf("expected to find %d resources but instead got %d resources", len(tt.expectedResources), len(mod.ManagedResources)) + } + + for _, expectedResource := range tt.expectedResources { + if mod.ManagedResources[expectedResource] == nil { + t.Errorf("expected to load %s resource as part of configuration but it is missing", expectedResource) + } + } + + if len(expectedVariablesToOverride) != len(mod.Variables) { + t.Errorf("expected to find %d variables but instead got %d resources", len(expectedVariablesToOverride), len(mod.Variables)) + } + + for _, expectedVariable := range expectedVariablesToOverride { + variableInConfiguration := mod.Variables[expectedVariable] + if variableInConfiguration == nil { + t.Errorf("expected to load %s variable as part of configuration but it is missing", expectedVariable) + } else if variableInConfiguration.Default.AsString() != "overridden by tofu file" { + t.Errorf("expected variable default value %s to be overridden", expectedVariable) + } + } + + if len(mod.Tests) != 2 { + t.Errorf("incorrect number of test files found: %d", len(mod.Tests)) + } + + for _, expectedTest := range expectedLoadedTestFiles { + if mod.Tests[expectedTest] == nil { + t.Errorf("expected to load %s test as part of configuration but it is missing", expectedTest) + } + } + }) + } +} + +func TestIsEmptyDir(t *testing.T) { + val, err := IsEmptyDir(filepath.Join("testdata", "valid-files")) + if err != nil { + t.Fatalf("err: %s", err) + } + if val { + t.Fatal("should not be empty") + } +} + +func TestIsEmptyDir_noExist(t *testing.T) { + val, err := IsEmptyDir(filepath.Join("testdata", "nopenopenope")) + if err != nil { + t.Fatalf("err: %s", err) + } + if !val { + t.Fatal("should be empty") + } +} + +func TestIsEmptyDir_noConfigs(t *testing.T) { + val, err := IsEmptyDir(filepath.Join("testdata", "dir-empty")) + if err != nil { + t.Fatalf("err: %s", err) + } + if !val { + t.Fatal("should be empty") + } +} diff --git a/pkg/configs/parser_config_test.go b/pkg/configs/parser_config_test.go new file mode 100644 index 00000000000..c5504d92b51 --- /dev/null +++ b/pkg/configs/parser_config_test.go @@ -0,0 +1,298 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "bufio" + "bytes" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + + "github.com/hashicorp/hcl/v2" +) + +// TestParseLoadConfigFileSuccess is a simple test that just verifies that +// a number of test configuration files (in testdata/valid-files) can +// be parsed without raising any diagnostics. +// +// This test does not verify that reading these files produces the correct +// file element contents. More detailed assertions may be made on some subset +// of these configuration files in other tests. +func TestParserLoadConfigFileSuccess(t *testing.T) { + files, err := os.ReadDir("testdata/valid-files") + if err != nil { + t.Fatal(err) + } + + for _, info := range files { + name := info.Name() + t.Run(name, func(t *testing.T) { + src, err := os.ReadFile(filepath.Join("testdata/valid-files", name)) + if err != nil { + t.Fatal(err) + } + + parser := testParser(map[string]string{ + name: string(src), + }) + + _, diags := parser.LoadConfigFile(name) + if len(diags) != 0 { + t.Errorf("unexpected diagnostics") + for _, diag := range diags { + t.Logf("- %s", diag) + } + } + }) + } +} + +// TestParseLoadConfigFileFailure is a simple test that just verifies that +// a number of test configuration files (in testdata/invalid-files) +// produce errors as expected. +// +// This test does not verify specific error messages, so more detailed +// assertions should be made on some subset of these configuration files in +// other tests. +func TestParserLoadConfigFileFailure(t *testing.T) { + files, err := os.ReadDir("testdata/invalid-files") + if err != nil { + t.Fatal(err) + } + + for _, info := range files { + name := info.Name() + t.Run(name, func(t *testing.T) { + src, err := os.ReadFile(filepath.Join("testdata/invalid-files", name)) + if err != nil { + t.Fatal(err) + } + + parser := testParser(map[string]string{ + name: string(src), + }) + + _, diags := parser.LoadConfigFile(name) + if !diags.HasErrors() { + t.Errorf("LoadConfigFile succeeded; want errors") + } + for _, diag := range diags { + t.Logf("- %s", diag) + } + }) + } +} + +// This test uses a subset of the same fixture files as +// TestParserLoadConfigFileFailure, but additionally verifies that each +// file produces the expected diagnostic summary. +func TestParserLoadConfigFileFailureMessages(t *testing.T) { + tests := []struct { + Filename string + WantSeverity hcl.DiagnosticSeverity + WantDiag string + }{ + { + "invalid-files/data-resource-lifecycle.tf", + hcl.DiagError, + "Invalid data resource lifecycle argument", + }, + { + "invalid-files/variable-type-unknown.tf", + hcl.DiagError, + "Invalid type specification", + }, + { + "invalid-files/unexpected-attr.tf", + hcl.DiagError, + "Unsupported argument", + }, + { + "invalid-files/unexpected-block.tf", + hcl.DiagError, + "Unsupported block type", + }, + { + "invalid-files/resource-count-and-for_each.tf", + hcl.DiagError, + `Invalid combination of "count" and "for_each"`, + }, + { + "invalid-files/data-count-and-for_each.tf", + hcl.DiagError, + `Invalid combination of "count" and "for_each"`, + }, + { + "invalid-files/resource-lifecycle-badbool.tf", + hcl.DiagError, + "Unsuitable value type", + }, + } + + for _, test := range tests { + t.Run(test.Filename, func(t *testing.T) { + src, err := os.ReadFile(filepath.Join("testdata", test.Filename)) + if err != nil { + t.Fatal(err) + } + + parser := testParser(map[string]string{ + test.Filename: string(src), + }) + + _, diags := parser.LoadConfigFile(test.Filename) + if len(diags) != 1 { + t.Errorf("Wrong number of diagnostics %d; want 1", len(diags)) + for _, diag := range diags { + t.Logf("- %s", diag) + } + return + } + if diags[0].Severity != test.WantSeverity { + t.Errorf("Wrong diagnostic severity %#v; want %#v", diags[0].Severity, test.WantSeverity) + } + if diags[0].Summary != test.WantDiag { + t.Errorf("Wrong diagnostic summary\ngot: %s\nwant: %s", diags[0].Summary, test.WantDiag) + } + }) + } +} + +// TestParseLoadConfigFileWarning is a test that verifies files from +// testdata/warning-files produce particular warnings. +// +// This test does not verify that reading these files produces the correct +// file element contents in spite of those warnings. More detailed assertions +// may be made on some subset of these configuration files in other tests. +func TestParserLoadConfigFileWarning(t *testing.T) { + files, err := os.ReadDir("testdata/warning-files") + if err != nil { + t.Fatal(err) + } + + for _, info := range files { + name := info.Name() + t.Run(name, func(t *testing.T) { + src, err := os.ReadFile(filepath.Join("testdata/warning-files", name)) + if err != nil { + t.Fatal(err) + } + + // First we'll scan the file to see what warnings are expected. + // That's declared inside the files themselves by using the + // string "WARNING: " somewhere on each line that is expected + // to produce a warning, followed by the expected warning summary + // text. A single-line comment (with #) is the main way to do that. + const marker = "WARNING: " + sc := bufio.NewScanner(bytes.NewReader(src)) + wantWarnings := make(map[int]string) + lineNum := 1 + for sc.Scan() { + lineText := sc.Text() + if idx := strings.Index(lineText, marker); idx != -1 { + summaryText := lineText[idx+len(marker):] + wantWarnings[lineNum] = summaryText + } + lineNum++ + } + + parser := testParser(map[string]string{ + name: string(src), + }) + + _, diags := parser.LoadConfigFile(name) + if diags.HasErrors() { + t.Errorf("unexpected error diagnostics") + for _, diag := range diags { + t.Logf("- %s", diag) + } + } + + gotWarnings := make(map[int]string) + for _, diag := range diags { + if diag.Severity != hcl.DiagWarning || diag.Subject == nil { + continue + } + gotWarnings[diag.Subject.Start.Line] = diag.Summary + } + + if diff := cmp.Diff(wantWarnings, gotWarnings); diff != "" { + t.Errorf("wrong warnings\n%s", diff) + } + }) + } +} + +// TestParseLoadConfigFileError is a test that verifies files from +// testdata/warning-files produce particular errors. +// +// This test does not verify that reading these files produces the correct +// file element contents in spite of those errors. More detailed assertions +// may be made on some subset of these configuration files in other tests. +func TestParserLoadConfigFileError(t *testing.T) { + files, err := os.ReadDir("testdata/error-files") + if err != nil { + t.Fatal(err) + } + + for _, info := range files { + name := info.Name() + t.Run(name, func(t *testing.T) { + src, err := os.ReadFile(filepath.Join("testdata/error-files", name)) + if err != nil { + t.Fatal(err) + } + + // First we'll scan the file to see what warnings are expected. + // That's declared inside the files themselves by using the + // string "ERROR: " somewhere on each line that is expected + // to produce a warning, followed by the expected warning summary + // text. A single-line comment (with #) is the main way to do that. + const marker = "ERROR: " + sc := bufio.NewScanner(bytes.NewReader(src)) + wantErrors := make(map[int]string) + lineNum := 1 + for sc.Scan() { + lineText := sc.Text() + if idx := strings.Index(lineText, marker); idx != -1 { + summaryText := lineText[idx+len(marker):] + wantErrors[lineNum] = summaryText + } + lineNum++ + } + + parser := testParser(map[string]string{ + name: string(src), + }) + + file, diags := parser.LoadConfigFile(name) + // TODO many of these errors are now deferred until module loading + // This is a structural issue which existed before static evaluation, but has been made worse by it + // See https://github.com/kubegems/opentofu/issues/1467 for more details + eval := NewStaticEvaluator(nil, RootModuleCallForTesting()) + for _, mc := range file.ModuleCalls { + mDiags := mc.decodeStaticFields(eval) + diags = append(diags, mDiags...) + } + + gotErrors := make(map[int]string) + for _, diag := range diags { + if diag.Severity != hcl.DiagError || diag.Subject == nil { + continue + } + gotErrors[diag.Subject.Start.Line] = diag.Summary + } + + if diff := cmp.Diff(wantErrors, gotErrors); diff != "" { + t.Errorf("wrong errors\n%s", diff) + } + }) + } +} diff --git a/pkg/configs/parser_test.go b/pkg/configs/parser_test.go new file mode 100644 index 00000000000..cf0b2e1d91c --- /dev/null +++ b/pkg/configs/parser_test.go @@ -0,0 +1,208 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "fmt" + "os" + "path" + "path/filepath" + "reflect" + "testing" + + "github.com/davecgh/go-spew/spew" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/hcl/v2" + "github.com/spf13/afero" +) + +// testParser returns a parser that reads files from the given map, which +// is from paths to file contents. +// +// Since this function uses only in-memory objects, it should never fail. +// If any errors are encountered in practice, this function will panic. +func testParser(files map[string]string) *Parser { + fs := afero.Afero{Fs: afero.NewMemMapFs()} + + for filePath, contents := range files { + dirPath := path.Dir(filePath) + err := fs.MkdirAll(dirPath, os.ModePerm) + if err != nil { + panic(err) + } + err = fs.WriteFile(filePath, []byte(contents), os.ModePerm) + if err != nil { + panic(err) + } + } + + return NewParser(fs) +} + +// testModuleConfigFrom File reads a single file from the given path as a +// module and returns its configuration. This is a helper for use in unit tests. +func testModuleConfigFromFile(filename string) (*Config, hcl.Diagnostics) { + parser := NewParser(nil) + f, diags := parser.LoadConfigFile(filename) + mod, modDiags := NewModule([]*File{f}, nil, RootModuleCallForTesting(), filename, SelectiveLoadAll) + diags = append(diags, modDiags...) + cfg, moreDiags := BuildConfig(mod, nil) + return cfg, append(diags, moreDiags...) +} + +// testModuleFromDir reads configuration from the given directory path as +// a module and returns it. This is a helper for use in unit tests. +func testModuleFromDir(path string) (*Module, hcl.Diagnostics) { + parser := NewParser(nil) + return parser.LoadConfigDir(path, RootModuleCallForTesting()) +} + +// testModuleFromDir reads configuration from the given directory path as a +// module and returns its configuration. This is a helper for use in unit tests. +func testModuleConfigFromDir(path string) (*Config, hcl.Diagnostics) { + parser := NewParser(nil) + mod, diags := parser.LoadConfigDir(path, RootModuleCallForTesting()) + cfg, moreDiags := BuildConfig(mod, nil) + return cfg, append(diags, moreDiags...) +} + +// testNestedModuleConfigFromDirWithTests matches testNestedModuleConfigFromDir +// except it also loads any test files within the directory. +func testNestedModuleConfigFromDirWithTests(t *testing.T, path string) (*Config, hcl.Diagnostics) { + t.Helper() + + parser := NewParser(nil) + mod, diags := parser.LoadConfigDirWithTests(path, "tests", RootModuleCallForTesting()) + if mod == nil { + t.Fatal("got nil root module; want non-nil") + } + + cfg, nestedDiags := buildNestedModuleConfig(mod, path, parser) + + diags = append(diags, nestedDiags...) + return cfg, diags +} + +// testNestedModuleConfigFromDir reads configuration from the given directory path as +// a module with (optional) submodules and returns its configuration. This is a +// helper for use in unit tests. +func testNestedModuleConfigFromDir(t *testing.T, path string) (*Config, hcl.Diagnostics) { + t.Helper() + + parser := NewParser(nil) + mod, diags := parser.LoadConfigDir(path, RootModuleCallForTesting()) + if mod == nil { + t.Fatal("got nil root module; want non-nil") + } + + cfg, nestedDiags := buildNestedModuleConfig(mod, path, parser) + + diags = append(diags, nestedDiags...) + return cfg, diags +} + +func buildNestedModuleConfig(mod *Module, path string, parser *Parser) (*Config, hcl.Diagnostics) { + versionI := 0 + return BuildConfig(mod, ModuleWalkerFunc( + func(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) { + // For the sake of this test we're going to just treat our + // SourceAddr as a path relative to the calling module. + // A "real" implementation of ModuleWalker should accept the + // various different source address syntaxes OpenTofu supports. + + // Build a full path by walking up the module tree, prepending each + // source address path until we hit the root + paths := []string{req.SourceAddr.String()} + for config := req.Parent; config != nil && config.Parent != nil; config = config.Parent { + paths = append([]string{config.SourceAddr.String()}, paths...) + } + paths = append([]string{path}, paths...) + sourcePath := filepath.Join(paths...) + + mod, diags := parser.LoadConfigDir(sourcePath, RootModuleCallForTesting()) + version, _ := version.NewVersion(fmt.Sprintf("1.0.%d", versionI)) + versionI++ + return mod, version, diags + }, + )) +} + +func assertNoDiagnostics(t *testing.T, diags hcl.Diagnostics) bool { + t.Helper() + return assertDiagnosticCount(t, diags, 0) +} + +func assertDiagnosticCount(t *testing.T, diags hcl.Diagnostics, want int) bool { + t.Helper() + if len(diags) != want { + t.Errorf("wrong number of diagnostics %d; want %d", len(diags), want) + for _, diag := range diags { + t.Logf("- %s", diag) + } + return true + } + return false +} + +func assertDiagnosticSummary(t *testing.T, diags hcl.Diagnostics, want string) bool { + t.Helper() + + for _, diag := range diags { + if diag.Summary == want { + return false + } + } + + t.Errorf("missing diagnostic summary %q", want) + for _, diag := range diags { + t.Logf("- %s", diag) + } + return true +} + +func assertExactDiagnostics(t *testing.T, diags hcl.Diagnostics, want []string) bool { + t.Helper() + + gotDiags := map[string]bool{} + wantDiags := map[string]bool{} + + for _, diag := range diags { + gotDiags[diag.Error()] = true + } + for _, msg := range want { + wantDiags[msg] = true + } + + bad := false + for got := range gotDiags { + if _, exists := wantDiags[got]; !exists { + t.Errorf("unexpected diagnostic: %s", got) + bad = true + } + } + for want := range wantDiags { + if _, exists := gotDiags[want]; !exists { + t.Errorf("missing expected diagnostic: %s", want) + bad = true + } + } + + return bad +} + +func assertResultDeepEqual(t *testing.T, got, want interface{}) bool { + t.Helper() + if !reflect.DeepEqual(got, want) { + t.Errorf("wrong result\ngot: %swant: %s", spew.Sdump(got), spew.Sdump(want)) + return true + } + return false +} + +func stringPtr(s string) *string { + return &s +} diff --git a/pkg/configs/provider.go b/pkg/configs/provider.go new file mode 100644 index 00000000000..f1c38693b42 --- /dev/null +++ b/pkg/configs/provider.go @@ -0,0 +1,292 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/gohcl" + "github.com/hashicorp/hcl/v2/hclsyntax" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// Provider represents a "provider" block in a module or file. A provider +// block is a provider configuration, and there can be zero or more +// configurations for each actual provider. +type Provider struct { + Name string + NameRange hcl.Range + Alias string + AliasRange *hcl.Range // nil if no alias set + + Version VersionConstraint + + Config hcl.Body + + DeclRange hcl.Range + + // TODO: this may not be set in some cases, so it is not yet suitable for + // use outside of this package. We currently only use it for internal + // validation, but once we verify that this can be set in all cases, we can + // export this so providers don't need to be re-resolved. + // This same field is also added to the ProviderConfigRef struct. + providerType addrs.Provider + + // IsMocked indicates if this provider has been mocked. It is used in + // testing framework to instantiate test provider wrapper. + IsMocked bool + MockResources []*MockResource +} + +func decodeProviderBlock(block *hcl.Block) (*Provider, hcl.Diagnostics) { + var diags hcl.Diagnostics + + content, config, moreDiags := block.Body.PartialContent(providerBlockSchema) + diags = append(diags, moreDiags...) + + // Provider names must be localized. Produce an error with a message + // indicating the action the user can take to fix this message if the local + // name is not localized. + name := block.Labels[0] + nameDiags := checkProviderNameNormalized(name, block.DefRange) + diags = append(diags, nameDiags...) + if nameDiags.HasErrors() { + // If the name is invalid then we mustn't produce a result because + // downstreams could try to use it as a provider type and then crash. + return nil, diags + } + + provider := &Provider{ + Name: name, + NameRange: block.LabelRanges[0], + Config: config, + DeclRange: block.DefRange, + } + + if attr, exists := content.Attributes["alias"]; exists { + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &provider.Alias) + diags = append(diags, valDiags...) + provider.AliasRange = attr.Expr.Range().Ptr() + + if !hclsyntax.ValidIdentifier(provider.Alias) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration alias", + Detail: fmt.Sprintf("An alias must be a valid name. %s", badIdentifierDetail), + }) + } + } + + if attr, exists := content.Attributes["version"]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "Version constraints inside provider configuration blocks are deprecated", + Detail: "OpenTofu 0.13 and earlier allowed provider version constraints inside the provider configuration block, but that is now deprecated and will be removed in a future version of OpenTofu. To silence this warning, move the provider version constraint into the required_providers block.", + Subject: attr.Expr.Range().Ptr(), + }) + var versionDiags hcl.Diagnostics + provider.Version, versionDiags = decodeVersionConstraint(attr) + diags = append(diags, versionDiags...) + } + + // Reserved attribute names + for _, name := range []string{"count", "depends_on", "for_each", "source"} { + if attr, exists := content.Attributes[name]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved argument name in provider block", + Detail: fmt.Sprintf("The provider argument name %q is reserved for use by OpenTofu in a future version.", name), + Subject: &attr.NameRange, + }) + } + } + + var seenEscapeBlock *hcl.Block + for _, block := range content.Blocks { + switch block.Type { + case "_": + if seenEscapeBlock != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate escaping block", + Detail: fmt.Sprintf( + "The special block type \"_\" can be used to force particular arguments to be interpreted as provider-specific rather than as meta-arguments, but each provider block can have only one such block. The first escaping block was at %s.", + seenEscapeBlock.DefRange, + ), + Subject: &block.DefRange, + }) + continue + } + seenEscapeBlock = block + + // When there's an escaping block its content merges with the + // existing config we extracted earlier, so later decoding + // will see a blend of both. + provider.Config = hcl.MergeBodies([]hcl.Body{provider.Config, block.Body}) + + default: + // All of the other block types in our schema are reserved for + // future expansion. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved block type name in provider block", + Detail: fmt.Sprintf("The block type name %q is reserved for use by OpenTofu in a future version.", block.Type), + Subject: &block.TypeRange, + }) + } + } + + return provider, diags +} + +// Addr returns the address of the receiving provider configuration, relative +// to its containing module. +func (p *Provider) Addr() addrs.LocalProviderConfig { + return addrs.LocalProviderConfig{ + LocalName: p.Name, + Alias: p.Alias, + } +} + +func (p *Provider) moduleUniqueKey() string { + if p.Alias != "" { + return fmt.Sprintf("%s.%s", p.Name, p.Alias) + } + return p.Name +} + +// ParseProviderConfigCompact parses the given absolute traversal as a relative +// provider address in compact form. The following are examples of traversals +// that can be successfully parsed as compact relative provider configuration +// addresses: +// +// - aws +// - aws.foo +// +// This function will panic if given a relative traversal. +// +// If the returned diagnostics contains errors then the result value is invalid +// and must not be used. +func ParseProviderConfigCompact(traversal hcl.Traversal) (addrs.LocalProviderConfig, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + ret := addrs.LocalProviderConfig{ + LocalName: traversal.RootName(), + } + + if len(traversal) < 2 { + // Just a type name, then. + return ret, diags + } + + aliasStep := traversal[1] + switch ts := aliasStep.(type) { + case hcl.TraverseAttr: + ret.Alias = ts.Name + return ret, diags + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration address", + Detail: "The provider type name must either stand alone or be followed by an alias name separated with a dot.", + Subject: aliasStep.SourceRange().Ptr(), + }) + } + + if len(traversal) > 2 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration address", + Detail: "Extraneous extra operators after provider configuration address.", + Subject: traversal[2:].SourceRange().Ptr(), + }) + } + + return ret, diags +} + +// ParseProviderConfigCompactStr is a helper wrapper around ParseProviderConfigCompact +// that takes a string and parses it with the HCL native syntax traversal parser +// before interpreting it. +// +// This should be used only in specialized situations since it will cause the +// created references to not have any meaningful source location information. +// If a reference string is coming from a source that should be identified in +// error messages then the caller should instead parse it directly using a +// suitable function from the HCL API and pass the traversal itself to +// ParseProviderConfigCompact. +// +// Error diagnostics are returned if either the parsing fails or the analysis +// of the traversal fails. There is no way for the caller to distinguish the +// two kinds of diagnostics programmatically. If error diagnostics are returned +// then the returned address is invalid. +func ParseProviderConfigCompactStr(str string) (addrs.LocalProviderConfig, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(parseDiags) + if parseDiags.HasErrors() { + return addrs.LocalProviderConfig{}, diags + } + + addr, addrDiags := ParseProviderConfigCompact(traversal) + diags = diags.Append(addrDiags) + return addr, diags +} + +var providerBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "alias", + }, + { + Name: "version", + }, + + // Attribute names reserved for future expansion. + {Name: "count"}, + {Name: "depends_on"}, + {Name: "for_each"}, + {Name: "source"}, + }, + Blocks: []hcl.BlockHeaderSchema{ + {Type: "_"}, // meta-argument escaping block + + // The rest of these are reserved for future expansion. + {Type: "lifecycle"}, + {Type: "locals"}, + }, +} + +// checkProviderNameNormalized verifies that the given string is already +// normalized and returns an error if not. +func checkProviderNameNormalized(name string, declrange hcl.Range) hcl.Diagnostics { + var diags hcl.Diagnostics + // verify that the provider local name is normalized + normalized, err := addrs.IsProviderPartNormalized(name) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider local name", + Detail: fmt.Sprintf("%s is an invalid provider local name: %s", name, err), + Subject: &declrange, + }) + return diags + } + if !normalized { + // we would have returned this error already + normalizedProvider, _ := addrs.ParseProviderPart(name) + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider local name", + Detail: fmt.Sprintf("Provider names must be normalized. Replace %q with %q to fix this error.", name, normalizedProvider), + Subject: &declrange, + }) + } + return diags +} diff --git a/pkg/configs/provider_meta.go b/pkg/configs/provider_meta.go new file mode 100644 index 00000000000..64520974e9e --- /dev/null +++ b/pkg/configs/provider_meta.go @@ -0,0 +1,42 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import "github.com/hashicorp/hcl/v2" + +// ProviderMeta represents a "provider_meta" block inside a "terraform" block +// in a module or file. +type ProviderMeta struct { + Provider string + Config hcl.Body + + ProviderRange hcl.Range + DeclRange hcl.Range +} + +func decodeProviderMetaBlock(block *hcl.Block) (*ProviderMeta, hcl.Diagnostics) { + // provider_meta must be a static map. We can verify this by attempting to + // evaluate the values. + attrs, diags := block.Body.JustAttributes() + if diags.HasErrors() { + return nil, diags + } + + for _, attr := range attrs { + _, d := attr.Expr.Value(nil) + diags = append(diags, d...) + } + + // verify that the local name is already localized or produce an error. + diags = append(diags, checkProviderNameNormalized(block.Labels[0], block.DefRange)...) + + return &ProviderMeta{ + Provider: block.Labels[0], + ProviderRange: block.LabelRanges[0], + Config: block.Body, + DeclRange: block.DefRange, + }, diags +} diff --git a/pkg/configs/provider_requirements.go b/pkg/configs/provider_requirements.go new file mode 100644 index 00000000000..d2564b0a881 --- /dev/null +++ b/pkg/configs/provider_requirements.go @@ -0,0 +1,251 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "fmt" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/hcl/v2" + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/zclconf/go-cty/cty" +) + +// RequiredProvider represents a declaration of a dependency on a particular +// provider version or source without actually configuring that provider. This +// is used in child modules that expect a provider to be passed in from their +// parent. +type RequiredProvider struct { + Name string + Source string + Type addrs.Provider + Requirement VersionConstraint + DeclRange hcl.Range + Aliases []addrs.LocalProviderConfig +} + +type RequiredProviders struct { + RequiredProviders map[string]*RequiredProvider + DeclRange hcl.Range +} + +func decodeRequiredProvidersBlock(block *hcl.Block) (*RequiredProviders, hcl.Diagnostics) { + attrs, diags := block.Body.JustAttributes() + if diags.HasErrors() { + return nil, diags + } + + ret := &RequiredProviders{ + RequiredProviders: make(map[string]*RequiredProvider), + DeclRange: block.DefRange, + } + + for name, attr := range attrs { + rp := &RequiredProvider{ + Name: name, + DeclRange: attr.Expr.Range(), + } + + // Look for a single static string, in case we have the legacy version-only + // format in the configuration. + if expr, err := attr.Expr.Value(nil); err == nil && expr.Type().IsPrimitiveType() { + vc, reqDiags := decodeVersionConstraint(attr) + diags = append(diags, reqDiags...) + + pType, err := addrs.ParseProviderPart(rp.Name) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider name", + Detail: err.Error(), + Subject: attr.Expr.Range().Ptr(), + }) + continue + } + + rp.Requirement = vc + rp.Type = addrs.ImpliedProviderForUnqualifiedType(pType) + ret.RequiredProviders[name] = rp + + continue + } + + // verify that the local name is already localized or produce an error. + nameDiags := checkProviderNameNormalized(name, attr.Expr.Range()) + if nameDiags.HasErrors() { + diags = append(diags, nameDiags...) + continue + } + + kvs, mapDiags := hcl.ExprMap(attr.Expr) + if mapDiags.HasErrors() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid required_providers object", + Detail: "required_providers entries must be strings or objects.", + Subject: attr.Expr.Range().Ptr(), + }) + continue + } + + LOOP: + for _, kv := range kvs { + key, keyDiags := kv.Key.Value(nil) + if keyDiags.HasErrors() { + diags = append(diags, keyDiags...) + continue + } + + if key.Type() != cty.String { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid Attribute", + Detail: fmt.Sprintf("Invalid attribute value for provider requirement: %#v", key), + Subject: kv.Key.Range().Ptr(), + }) + continue + } + + switch key.AsString() { + case "version": + vc := VersionConstraint{ + DeclRange: attr.Range, + } + + constraint, valDiags := kv.Value.Value(nil) + if valDiags.HasErrors() || !constraint.Type().Equals(cty.String) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid version constraint", + Detail: "Version must be specified as a string.", + Subject: kv.Value.Range().Ptr(), + }) + continue + } + + constraintStr := constraint.AsString() + constraints, err := version.NewConstraint(constraintStr) + if err != nil { + // NewConstraint doesn't return user-friendly errors, so we'll just + // ignore the provided error and produce our own generic one. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid version constraint", + Detail: "This string does not use correct version constraint syntax.", + Subject: kv.Value.Range().Ptr(), + }) + continue + } + + vc.Required = constraints + rp.Requirement = vc + + case "source": + source, err := kv.Value.Value(nil) + if err != nil || !source.Type().Equals(cty.String) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid source", + Detail: "Source must be specified as a string.", + Subject: kv.Value.Range().Ptr(), + }) + continue + } + + fqn, sourceDiags := addrs.ParseProviderSourceString(source.AsString()) + if sourceDiags.HasErrors() { + hclDiags := sourceDiags.ToHCL() + // The diagnostics from ParseProviderSourceString don't contain + // source location information because it has no context to compute + // them from, and so we'll add those in quickly here before we + // return. + for _, diag := range hclDiags { + if diag.Subject == nil { + diag.Subject = kv.Value.Range().Ptr() + } + } + diags = append(diags, hclDiags...) + continue + } + + rp.Source = source.AsString() + rp.Type = fqn + + case "configuration_aliases": + exprs, listDiags := hcl.ExprList(kv.Value) + if listDiags.HasErrors() { + diags = append(diags, listDiags...) + continue + } + + for _, expr := range exprs { + traversal, travDiags := hcl.AbsTraversalForExpr(expr) + if travDiags.HasErrors() { + diags = append(diags, travDiags...) + continue + } + + addr, cfgDiags := ParseProviderConfigCompact(traversal) + if cfgDiags.HasErrors() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid configuration_aliases value", + Detail: `Configuration aliases can only contain references to local provider configuration names in the format of provider.alias`, + Subject: kv.Value.Range().Ptr(), + }) + continue + } + + if addr.LocalName != name { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid configuration_aliases value", + Detail: fmt.Sprintf(`Configuration aliases must be prefixed with the provider name. Expected %q, but found %q.`, name, addr.LocalName), + Subject: kv.Value.Range().Ptr(), + }) + continue + } + + rp.Aliases = append(rp.Aliases, addr) + } + + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid required_providers object", + Detail: `required_providers objects can only contain "version", "source" and "configuration_aliases" attributes. To configure a provider, use a "provider" block.`, + Subject: kv.Key.Range().Ptr(), + }) + break LOOP + } + + } + + if diags.HasErrors() { + continue + } + + // We can add the required provider when there are no errors. + // If a source was not given, create an implied type. + if rp.Type.IsZero() { + pType, err := addrs.ParseProviderPart(rp.Name) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider name", + Detail: err.Error(), + Subject: attr.Expr.Range().Ptr(), + }) + } else { + rp.Type = addrs.ImpliedProviderForUnqualifiedType(pType) + } + } + + ret.RequiredProviders[rp.Name] = rp + } + + return ret, diags +} diff --git a/pkg/configs/provider_requirements_test.go b/pkg/configs/provider_requirements_test.go new file mode 100644 index 00000000000..1b7d267b215 --- /dev/null +++ b/pkg/configs/provider_requirements_test.go @@ -0,0 +1,354 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + version "github.com/hashicorp/go-version" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hcltest" + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/zclconf/go-cty/cty" +) + +var ( + ignoreUnexported = cmpopts.IgnoreUnexported(version.Constraint{}) + comparer = cmp.Comparer(func(x, y RequiredProvider) bool { + if x.Name != y.Name { + return false + } + if x.Type != y.Type { + return false + } + if x.Source != y.Source { + return false + } + if x.Requirement.Required.String() != y.Requirement.Required.String() { + return false + } + if x.DeclRange != y.DeclRange { + return false + } + return true + }) + blockRange = hcl.Range{ + Filename: "mock.tf", + Start: hcl.Pos{Line: 3, Column: 12, Byte: 27}, + End: hcl.Pos{Line: 3, Column: 19, Byte: 34}, + } + mockRange = hcl.Range{ + Filename: "MockExprLiteral", + } +) + +func TestDecodeRequiredProvidersBlock(t *testing.T) { + tests := map[string]struct { + Block *hcl.Block + Want *RequiredProviders + Error string + }{ + "legacy": { + Block: &hcl.Block{ + Type: "required_providers", + Body: hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "default": { + Name: "default", + Expr: hcltest.MockExprLiteral(cty.StringVal("1.0.0")), + }, + }, + }), + DefRange: blockRange, + }, + Want: &RequiredProviders{ + RequiredProviders: map[string]*RequiredProvider{ + "default": { + Name: "default", + Type: addrs.NewDefaultProvider("default"), + Requirement: testVC("1.0.0"), + DeclRange: mockRange, + }, + }, + DeclRange: blockRange, + }, + }, + "provider source": { + Block: &hcl.Block{ + Type: "required_providers", + Body: hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "my-test": { + Name: "my-test", + Expr: hcltest.MockExprLiteral(cty.ObjectVal(map[string]cty.Value{ + "source": cty.StringVal("mycloud/test"), + "version": cty.StringVal("2.0.0"), + })), + }, + }, + }), + DefRange: blockRange, + }, + Want: &RequiredProviders{ + RequiredProviders: map[string]*RequiredProvider{ + "my-test": { + Name: "my-test", + Source: "mycloud/test", + Type: addrs.NewProvider(addrs.DefaultProviderRegistryHost, "mycloud", "test"), + Requirement: testVC("2.0.0"), + DeclRange: mockRange, + }, + }, + DeclRange: blockRange, + }, + }, + "mixed": { + Block: &hcl.Block{ + Type: "required_providers", + Body: hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "legacy": { + Name: "legacy", + Expr: hcltest.MockExprLiteral(cty.StringVal("1.0.0")), + }, + "my-test": { + Name: "my-test", + Expr: hcltest.MockExprLiteral(cty.ObjectVal(map[string]cty.Value{ + "source": cty.StringVal("mycloud/test"), + "version": cty.StringVal("2.0.0"), + })), + }, + }, + }), + DefRange: blockRange, + }, + Want: &RequiredProviders{ + RequiredProviders: map[string]*RequiredProvider{ + "legacy": { + Name: "legacy", + Type: addrs.NewDefaultProvider("legacy"), + Requirement: testVC("1.0.0"), + DeclRange: mockRange, + }, + "my-test": { + Name: "my-test", + Source: "mycloud/test", + Type: addrs.NewProvider(addrs.DefaultProviderRegistryHost, "mycloud", "test"), + Requirement: testVC("2.0.0"), + DeclRange: mockRange, + }, + }, + DeclRange: blockRange, + }, + }, + "version-only block": { + Block: &hcl.Block{ + Type: "required_providers", + Body: hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "test": { + Name: "test", + Expr: hcltest.MockExprLiteral(cty.ObjectVal(map[string]cty.Value{ + "version": cty.StringVal("~>2.0.0"), + })), + }, + }, + }), + DefRange: blockRange, + }, + Want: &RequiredProviders{ + RequiredProviders: map[string]*RequiredProvider{ + "test": { + Name: "test", + Type: addrs.NewDefaultProvider("test"), + Requirement: testVC("~>2.0.0"), + DeclRange: mockRange, + }, + }, + DeclRange: blockRange, + }, + }, + "invalid source": { + Block: &hcl.Block{ + Type: "required_providers", + Body: hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "my-test": { + Name: "my-test", + Expr: hcltest.MockExprLiteral(cty.ObjectVal(map[string]cty.Value{ + "source": cty.StringVal("some/invalid/provider/source/test"), + "version": cty.StringVal("~>2.0.0"), + })), + }, + }, + }), + DefRange: blockRange, + }, + Want: &RequiredProviders{ + RequiredProviders: map[string]*RequiredProvider{}, + DeclRange: blockRange, + }, + Error: "Invalid provider source string", + }, + "invalid localname": { + Block: &hcl.Block{ + Type: "required_providers", + Body: hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "my_test": { + Name: "my_test", + Expr: hcltest.MockExprLiteral(cty.ObjectVal(map[string]cty.Value{ + "version": cty.StringVal("~>2.0.0"), + })), + }, + }, + }), + DefRange: blockRange, + }, + Want: &RequiredProviders{ + RequiredProviders: map[string]*RequiredProvider{}, + DeclRange: blockRange, + }, + Error: "Invalid provider local name", + }, + "invalid localname caps": { + Block: &hcl.Block{ + Type: "required_providers", + Body: hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "MYTEST": { + Name: "MYTEST", + Expr: hcltest.MockExprLiteral(cty.ObjectVal(map[string]cty.Value{ + "version": cty.StringVal("~>2.0.0"), + })), + }, + }, + }), + DefRange: blockRange, + }, + Want: &RequiredProviders{ + RequiredProviders: map[string]*RequiredProvider{}, + DeclRange: blockRange, + }, + Error: "Invalid provider local name", + }, + "version constraint error": { + Block: &hcl.Block{ + Type: "required_providers", + Body: hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "my-test": { + Name: "my-test", + Expr: hcltest.MockExprLiteral(cty.ObjectVal(map[string]cty.Value{ + "source": cty.StringVal("mycloud/test"), + "version": cty.StringVal("invalid"), + })), + }, + }, + }), + DefRange: blockRange, + }, + Want: &RequiredProviders{ + RequiredProviders: map[string]*RequiredProvider{}, + DeclRange: blockRange, + }, + Error: "Invalid version constraint", + }, + "invalid required_providers attribute value": { + Block: &hcl.Block{ + Type: "required_providers", + Body: hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "test": { + Name: "test", + Expr: hcltest.MockExprLiteral(cty.ListVal([]cty.Value{cty.StringVal("2.0.0")})), + }, + }, + }), + DefRange: blockRange, + }, + Want: &RequiredProviders{ + RequiredProviders: map[string]*RequiredProvider{}, + DeclRange: blockRange, + }, + Error: "Invalid required_providers object", + }, + "invalid source attribute type": { + Block: &hcl.Block{ + Type: "required_providers", + Body: hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "my-test": { + Name: "my-test", + Expr: hcltest.MockExprLiteral(cty.ObjectVal(map[string]cty.Value{ + "source": cty.DynamicVal, + })), + }, + }, + }), + DefRange: blockRange, + }, + Want: &RequiredProviders{ + RequiredProviders: map[string]*RequiredProvider{}, + DeclRange: blockRange, + }, + Error: "Invalid source", + }, + "additional attributes": { + Block: &hcl.Block{ + Type: "required_providers", + Body: hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "my-test": { + Name: "my-test", + Expr: hcltest.MockExprLiteral(cty.ObjectVal(map[string]cty.Value{ + "source": cty.StringVal("mycloud/test"), + "version": cty.StringVal("2.0.0"), + "invalid": cty.BoolVal(true), + })), + }, + }, + }), + DefRange: blockRange, + }, + Want: &RequiredProviders{ + RequiredProviders: map[string]*RequiredProvider{}, + DeclRange: blockRange, + }, + Error: "Invalid required_providers object", + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + got, diags := decodeRequiredProvidersBlock(test.Block) + if diags.HasErrors() { + if test.Error == "" { + t.Fatalf("unexpected error: %v", diags) + } + if gotErr := diags[0].Summary; gotErr != test.Error { + t.Errorf("wrong error, got %q, want %q", gotErr, test.Error) + } + } else if test.Error != "" { + t.Fatalf("expected error") + } + + if !cmp.Equal(got, test.Want, ignoreUnexported, comparer) { + t.Fatalf("wrong result:\n %s", cmp.Diff(got, test.Want, ignoreUnexported, comparer)) + } + }) + } +} + +func testVC(ver string) VersionConstraint { + constraint, _ := version.NewConstraint(ver) + return VersionConstraint{ + Required: constraint, + DeclRange: hcl.Range{}, + } +} diff --git a/pkg/configs/provider_test.go b/pkg/configs/provider_test.go new file mode 100644 index 00000000000..646d2405b36 --- /dev/null +++ b/pkg/configs/provider_test.go @@ -0,0 +1,155 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "os" + "testing" + + "github.com/go-test/deep" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/kubegems/opentofu/pkg/addrs" +) + +func TestProviderReservedNames(t *testing.T) { + src, err := os.ReadFile("testdata/invalid-files/provider-reserved.tf") + if err != nil { + t.Fatal(err) + } + parser := testParser(map[string]string{ + "config.tf": string(src), + }) + _, diags := parser.LoadConfigFile("config.tf") + + assertExactDiagnostics(t, diags, []string{ + //TODO: This deprecation warning will be removed in OpenTofu v0.15. + `config.tf:4,13-20: Version constraints inside provider configuration blocks are deprecated; OpenTofu 0.13 and earlier allowed provider version constraints inside the provider configuration block, but that is now deprecated and will be removed in a future version of OpenTofu. To silence this warning, move the provider version constraint into the required_providers block.`, + `config.tf:10,3-8: Reserved argument name in provider block; The provider argument name "count" is reserved for use by OpenTofu in a future version.`, + `config.tf:11,3-13: Reserved argument name in provider block; The provider argument name "depends_on" is reserved for use by OpenTofu in a future version.`, + `config.tf:12,3-11: Reserved argument name in provider block; The provider argument name "for_each" is reserved for use by OpenTofu in a future version.`, + `config.tf:14,3-12: Reserved block type name in provider block; The block type name "lifecycle" is reserved for use by OpenTofu in a future version.`, + `config.tf:15,3-9: Reserved block type name in provider block; The block type name "locals" is reserved for use by OpenTofu in a future version.`, + `config.tf:13,3-9: Reserved argument name in provider block; The provider argument name "source" is reserved for use by OpenTofu in a future version.`, + }) +} + +func TestParseProviderConfigCompact(t *testing.T) { + tests := []struct { + Input string + Want addrs.LocalProviderConfig + WantDiag string + }{ + { + `aws`, + addrs.LocalProviderConfig{ + LocalName: "aws", + }, + ``, + }, + { + `aws.foo`, + addrs.LocalProviderConfig{ + LocalName: "aws", + Alias: "foo", + }, + ``, + }, + { + `aws["foo"]`, + addrs.LocalProviderConfig{}, + `The provider type name must either stand alone or be followed by an alias name separated with a dot.`, + }, + } + + for _, test := range tests { + t.Run(test.Input, func(t *testing.T) { + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(test.Input), "", hcl.Pos{}) + if len(parseDiags) != 0 { + t.Errorf("unexpected diagnostics during parse") + for _, diag := range parseDiags { + t.Logf("- %s", diag) + } + return + } + + got, diags := ParseProviderConfigCompact(traversal) + + if test.WantDiag != "" { + if len(diags) != 1 { + t.Fatalf("got %d diagnostics; want 1", len(diags)) + } + gotDetail := diags[0].Description().Detail + if gotDetail != test.WantDiag { + t.Fatalf("wrong diagnostic detail\ngot: %s\nwant: %s", gotDetail, test.WantDiag) + } + return + } else { + if len(diags) != 0 { + t.Fatalf("got %d diagnostics; want 0", len(diags)) + } + } + + for _, problem := range deep.Equal(got, test.Want) { + t.Error(problem) + } + }) + } +} + +func TestParseProviderConfigCompactStr(t *testing.T) { + tests := []struct { + Input string + Want addrs.LocalProviderConfig + WantDiag string + }{ + { + `aws`, + addrs.LocalProviderConfig{ + LocalName: "aws", + }, + ``, + }, + { + `aws.foo`, + addrs.LocalProviderConfig{ + LocalName: "aws", + Alias: "foo", + }, + ``, + }, + { + `aws["foo"]`, + addrs.LocalProviderConfig{}, + `The provider type name must either stand alone or be followed by an alias name separated with a dot.`, + }, + } + + for _, test := range tests { + t.Run(test.Input, func(t *testing.T) { + got, diags := ParseProviderConfigCompactStr(test.Input) + + if test.WantDiag != "" { + if len(diags) != 1 { + t.Fatalf("got %d diagnostics; want 1", len(diags)) + } + gotDetail := diags[0].Description().Detail + if gotDetail != test.WantDiag { + t.Fatalf("wrong diagnostic detail\ngot: %s\nwant: %s", gotDetail, test.WantDiag) + } + return + } else { + if len(diags) != 0 { + t.Fatalf("got %d diagnostics; want 0", len(diags)) + } + } + + for _, problem := range deep.Equal(got, test.Want) { + t.Error(problem) + } + }) + } +} diff --git a/pkg/configs/provider_validation.go b/pkg/configs/provider_validation.go new file mode 100644 index 00000000000..fe645498fad --- /dev/null +++ b/pkg/configs/provider_validation.go @@ -0,0 +1,798 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "fmt" + "sort" + "strings" + + "github.com/hashicorp/hcl/v2" + + "github.com/kubegems/opentofu/pkg/addrs" +) + +// validateProviderConfigsForTests performs the same role as +// validateProviderConfigs except it validates the providers configured within +// test files. +// +// To do this is calls out to validateProviderConfigs for each run block that +// has ConfigUnderTest set. +// +// In addition, for each run block that executes against the main config it +// validates the providers the run block wants to use match the providers +// specified in the main configuration. It does this without reaching out to +// validateProviderConfigs because the main configuration has already been +// validated, and we don't want to redo all the work that happens in that +// function. So, we only validate the providers our test files define match +// the providers required by the main configuration. +// +// This function does some fairly controversial conversions into structures +// expected by validateProviderConfigs but since we're just using it for +// validation we'll still get the correct error messages, and we can make the +// declaration ranges line up sensibly so we'll even get good diagnostics. +func validateProviderConfigsForTests(cfg *Config) (diags hcl.Diagnostics) { + + for name, test := range cfg.Module.Tests { + for _, run := range test.Runs { + + if run.ConfigUnderTest == nil { + // Then we're calling out to the main configuration under test. + // + // We just need to make sure that the providers we are setting + // actually match the providers in the configuration. The main + // configuration has already been validated, so we don't need to + // do the whole thing again. + + if len(run.Providers) > 0 { + // This is the easy case, we can just validate that the + // provider types match. + for _, provider := range run.Providers { + + parentType, childType := provider.InParent.providerType, provider.InChild.providerType + if parentType.IsZero() { + parentType = addrs.NewDefaultProvider(provider.InParent.Name) + } + if childType.IsZero() { + childType = addrs.NewDefaultProvider(provider.InChild.Name) + } + + if !childType.Equals(parentType) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Provider type mismatch", + Detail: fmt.Sprintf( + "The local name %q in %s represents provider %q, but %q in the root module represents %q.\n\nThis means the provider definition for %q within %s, or other provider definitions with the same name, have been referenced by multiple run blocks and assigned to different provider types.", + provider.InParent.Name, name, parentType, provider.InChild.Name, childType, provider.InParent.Name, name), + Subject: provider.InParent.NameRange.Ptr(), + }) + } + } + + // Skip to the next file, we only need to verify the types + // specified here. + continue + } + + // Otherwise, we need to verify that the providers required by + // the configuration match the types defined by our test file. + + for _, requirement := range cfg.Module.ProviderRequirements.RequiredProviders { + if provider, exists := test.Providers[requirement.Name]; exists { + + providerType := provider.providerType + if providerType.IsZero() { + providerType = addrs.NewDefaultProvider(provider.Name) + } + + if !providerType.Equals(requirement.Type) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Provider type mismatch", + Detail: fmt.Sprintf( + "The provider %q in %s represents provider %q, but %q in the root module represents %q.\n\nThis means the provider definition for %q within %s, or other provider definitions with the same name, have been referenced by multiple run blocks and assigned to different provider types.", + provider.moduleUniqueKey(), name, providerType, requirement.Name, requirement.Type, provider.moduleUniqueKey(), name), + Subject: provider.DeclRange.Ptr(), + }) + } + } + + for _, alias := range requirement.Aliases { + if provider, exists := test.Providers[alias.StringCompact()]; exists { + + providerType := provider.providerType + if providerType.IsZero() { + providerType = addrs.NewDefaultProvider(provider.Name) + } + + if !providerType.Equals(requirement.Type) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Provider type mismatch", + Detail: fmt.Sprintf( + "The provider %q in %s represents provider %q, but %q in the root module represents %q.\n\nThis means the provider definition for %q within %s, or other provider definitions with the same name, have been referenced by multiple run blocks and assigned to different provider types.", + provider.moduleUniqueKey(), name, providerType, alias.StringCompact(), requirement.Type, provider.moduleUniqueKey(), name), + Subject: provider.DeclRange.Ptr(), + }) + } + } + } + } + + for _, provider := range cfg.Module.ProviderConfigs { + + providerType := provider.providerType + if providerType.IsZero() { + providerType = addrs.NewDefaultProvider(provider.Name) + } + + if testProvider, exists := test.Providers[provider.moduleUniqueKey()]; exists { + + testProviderType := testProvider.providerType + if testProviderType.IsZero() { + testProviderType = addrs.NewDefaultProvider(testProvider.Name) + } + + if !providerType.Equals(testProviderType) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Provider type mismatch", + Detail: fmt.Sprintf( + "The provider %q in %s represents provider %q, but %q in the root module represents %q.\n\nThis means the provider definition for %q within %s has been referenced by multiple run blocks and assigned to different provider types.", + testProvider.moduleUniqueKey(), name, testProviderType, provider.moduleUniqueKey(), providerType, testProvider.moduleUniqueKey(), name), + Subject: testProvider.DeclRange.Ptr(), + }) + } + } + } + + } else { + // Then we're executing another module. We'll just call out to + // validateProviderConfigs and let it do the whole thing. + + providers := run.Providers + if len(providers) == 0 { + // If the test run didn't provide us a subset of providers + // to use, we'll build our own. This is so that we can fit + // into the schema expected by validateProviderConfigs. + + matchedProviders := make(map[string]PassedProviderConfig) + + // We'll go over all the requirements in the module first + // and see if we have defined any providers for that + // requirement. If we have, then we'll take not of that. + + for _, requirement := range cfg.Module.ProviderRequirements.RequiredProviders { + + if provider, exists := test.Providers[requirement.Name]; exists { + matchedProviders[requirement.Name] = PassedProviderConfig{ + InChild: &ProviderConfigRef{ + Name: requirement.Name, + NameRange: requirement.DeclRange, + providerType: requirement.Type, + }, + InParent: &ProviderConfigRef{ + Name: provider.Name, + NameRange: provider.NameRange, + Alias: provider.Alias, + AliasRange: provider.AliasRange, + providerType: provider.providerType, + }, + } + } + + // Also, remember to check for any aliases the module + // expects. + + for _, alias := range requirement.Aliases { + key := alias.StringCompact() + + if provider, exists := test.Providers[key]; exists { + matchedProviders[key] = PassedProviderConfig{ + InChild: &ProviderConfigRef{ + Name: requirement.Name, + NameRange: requirement.DeclRange, + Alias: alias.Alias, + AliasRange: requirement.DeclRange.Ptr(), + providerType: requirement.Type, + }, + InParent: &ProviderConfigRef{ + Name: provider.Name, + NameRange: provider.NameRange, + Alias: provider.Alias, + AliasRange: provider.AliasRange, + providerType: provider.providerType, + }, + } + } + + } + + } + + // Next, we'll look at any providers the module has defined + // directly. If we have an equivalent provider in the test + // file then we'll add that in to override it. If the module + // has both built a required providers block and a provider + // block for the same provider, we'll overwrite the one we + // made for the requirement provider. We get more precise + // DeclRange objects from provider blocks so it makes for + // better error messages to use these. + + for _, provider := range cfg.Module.ProviderConfigs { + key := provider.moduleUniqueKey() + + if testProvider, exists := test.Providers[key]; exists { + matchedProviders[key] = PassedProviderConfig{ + InChild: &ProviderConfigRef{ + Name: provider.Name, + NameRange: provider.DeclRange, + Alias: provider.Alias, + AliasRange: provider.DeclRange.Ptr(), + providerType: provider.providerType, + }, + InParent: &ProviderConfigRef{ + Name: testProvider.Name, + NameRange: testProvider.NameRange, + Alias: testProvider.Alias, + AliasRange: testProvider.AliasRange, + providerType: testProvider.providerType, + }, + } + } + } + + // Last thing to do here is add them into the actual + // providers list that is going into the module call below. + for _, provider := range matchedProviders { + providers = append(providers, provider) + } + + } + + // Let's make a little fake module call that we can use to call + // into validateProviderConfigs. + mc := &ModuleCall{ + Name: run.Name, + Providers: providers, + DeclRange: run.Module.DeclRange, + } + + diags = append(diags, validateProviderConfigs(mc, run.ConfigUnderTest, nil)...) + } + } + } + + return diags +} + +// validateProviderConfigs walks the full configuration tree from the root +// module outward, static validation rules to the various combinations of +// provider configuration, required_providers values, and module call providers +// mappings. +// +// To retain compatibility with previous terraform versions, empty "proxy +// provider blocks" are still allowed within modules, though they will +// generate warnings when the configuration is loaded. The new validation +// however will generate an error if a suitable provider configuration is not +// passed in through the module call. +// +// The call argument is the ModuleCall for the provided Config cfg. The +// noProviderConfigRange argument is passed down the call stack, indicating +// that the module call, or a parent module call, has used a feature (at the +// specified source location) that precludes providers from being configured at +// all within the module. +func validateProviderConfigs(parentCall *ModuleCall, cfg *Config, noProviderConfigRange *hcl.Range) (diags hcl.Diagnostics) { + mod := cfg.Module + + for name, child := range cfg.Children { + mc := mod.ModuleCalls[name] + childNoProviderConfigRange := noProviderConfigRange + // if the module call has any of count, for_each or depends_on, + // providers are prohibited from being configured in this module, or + // any module beneath this module. + switch { + case mc.Count != nil: + childNoProviderConfigRange = mc.Count.Range().Ptr() + case mc.ForEach != nil: + childNoProviderConfigRange = mc.ForEach.Range().Ptr() + case mc.DependsOn != nil: + if len(mc.DependsOn) > 0 { + childNoProviderConfigRange = mc.DependsOn[0].SourceRange().Ptr() + } else { + // Weird! We'll just use the call itself, then. + childNoProviderConfigRange = mc.DeclRange.Ptr() + } + } + diags = append(diags, validateProviderConfigs(mc, child, childNoProviderConfigRange)...) + } + + // the set of provider configuration names passed into the module, with the + // source range of the provider assignment in the module call. + passedIn := map[string]PassedProviderConfig{} + + // the set of empty configurations that could be proxy configurations, with + // the source range of the empty configuration block. + emptyConfigs := map[string]hcl.Range{} + + // the set of provider with a defined configuration, with the source range + // of the configuration block declaration. + configured := map[string]hcl.Range{} + + // the set of configuration_aliases defined in the required_providers + // block, with the fully qualified provider type. + configAliases := map[string]addrs.AbsProviderConfig{} + + // the set of provider names defined in the required_providers block, and + // their provider types. + localNames := map[string]addrs.Provider{} + + for _, pc := range mod.ProviderConfigs { + name := providerName(pc.Name, pc.Alias) + // Validate the config against an empty schema to see if it's empty. + _, pcConfigDiags := pc.Config.Content(&hcl.BodySchema{}) + if pcConfigDiags.HasErrors() || pc.Version.Required != nil { + configured[name] = pc.DeclRange + } else { + emptyConfigs[name] = pc.DeclRange + } + } + + if mod.ProviderRequirements != nil { + // Track all known local types too to ensure we don't have duplicated + // with different local names. + localTypes := map[string]bool{} + + // check for duplicate requirements of the same type + for _, req := range mod.ProviderRequirements.RequiredProviders { + if localTypes[req.Type.String()] { + // find the last declaration to give a better error + prevDecl := "" + for localName, typ := range localNames { + if typ.Equals(req.Type) { + prevDecl = localName + } + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "Duplicate required provider", + Detail: fmt.Sprintf( + "Provider %s with the local name %q was previously required as %q. A provider can only be required once within required_providers.", + req.Type.ForDisplay(), req.Name, prevDecl, + ), + Subject: &req.DeclRange, + }) + } else if addrs.IsDefaultProvider(req.Type) { + // Now check for possible implied duplicates, where a provider + // block uses a default namespaced provider, but that provider + // was required via a different name. + impliedLocalName := req.Type.Type + // We have to search through the configs for a match, since the keys contains any aliases. + for _, pc := range mod.ProviderConfigs { + if pc.Name == impliedLocalName && req.Name != impliedLocalName { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "Duplicate required provider", + Detail: fmt.Sprintf( + "Provider %s with the local name %q was implicitly required via a configuration block as %q. The provider configuration block name must match the name used in required_providers.", + req.Type.ForDisplay(), req.Name, req.Type.Type, + ), + Subject: &req.DeclRange, + }) + break + } + } + } + + localTypes[req.Type.String()] = true + + localNames[req.Name] = req.Type + for _, alias := range req.Aliases { + addr := addrs.AbsProviderConfig{ + Module: cfg.Path, + Provider: req.Type, + Alias: alias.Alias, + } + configAliases[providerName(alias.LocalName, alias.Alias)] = addr + } + } + } + + checkImpliedProviderNames := func(resourceConfigs map[string]*Resource) { + // Now that we have all the provider configs and requirements validated, + // check for any resources which use an implied localname which doesn't + // match that of required_providers + for _, r := range resourceConfigs { + // We're looking for resources with no specific provider reference + if r.ProviderConfigRef != nil { + continue + } + + localName := r.Addr().ImpliedProvider() + + _, err := addrs.ParseProviderPart(localName) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider local name", + Detail: fmt.Sprintf("%q is an invalid implied provider local name: %s", localName, err), + Subject: r.DeclRange.Ptr(), + }) + continue + } + + if _, ok := localNames[localName]; ok { + // OK, this was listed directly in the required_providers + continue + } + + defAddr := addrs.ImpliedProviderForUnqualifiedType(localName) + + // Now make sure we don't have the same provider required under a + // different name. + for prevLocalName, addr := range localNames { + if addr.Equals(defAddr) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "Duplicate required provider", + Detail: fmt.Sprintf( + "Provider %q was implicitly required via resource %q, but listed in required_providers as %q. Either the local name in required_providers must match the resource name, or the %q provider must be assigned within the resource block.", + defAddr, r.Addr(), prevLocalName, prevLocalName, + ), + Subject: &r.DeclRange, + }) + } + } + } + } + checkImpliedProviderNames(mod.ManagedResources) + checkImpliedProviderNames(mod.DataResources) + + // collect providers passed from the parent + if parentCall != nil { + for _, passed := range parentCall.Providers { + name := providerName(passed.InChild.Name, passed.InChild.Alias) + passedIn[name] = passed + } + } + + parentModuleText := "the root module" + moduleText := "the root module" + if !cfg.Path.IsRoot() { + moduleText = cfg.Path.String() + if parent := cfg.Path.Parent(); !parent.IsRoot() { + // module address are prefixed with `module.` + parentModuleText = parent.String() + } + } + + // Verify that any module calls only refer to named providers, and that + // those providers will have a configuration at runtime. This way we can + // direct users where to add the missing configuration, because the runtime + // error is only "missing provider X". + for _, modCall := range mod.ModuleCalls { + for _, passed := range modCall.Providers { + // aliased providers are handled more strictly, and are never + // inherited, so they are validated within modules further down. + // Skip these checks to prevent redundant diagnostics. + if passed.InParent.Alias != "" { + continue + } + + name := passed.InParent.String() + _, confOK := configured[name] + _, localOK := localNames[name] + _, passedOK := passedIn[name] + + // This name was not declared somewhere within in the + // configuration. We ignore empty configs, because they will + // already produce a warning. + if !(confOK || localOK) { + defAddr := addrs.NewDefaultProvider(name) + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "Reference to undefined provider", + Detail: fmt.Sprintf( + "There is no explicit declaration for local provider name %q in %s, so OpenTofu is assuming you mean to pass a configuration for provider %q.\n\nTo clarify your intent and silence this warning, add to %s a required_providers entry named %q with source = %q, or a different source address if appropriate.", + name, moduleText, defAddr.ForDisplay(), + parentModuleText, name, defAddr.ForDisplay(), + ), + Subject: &passed.InParent.NameRange, + }) + continue + } + + // Now we may have named this provider within the module, but + // there won't be a configuration available at runtime if the + // parent module did not pass one in. + if !cfg.Path.IsRoot() && !(confOK || passedOK) { + defAddr := addrs.NewDefaultProvider(name) + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "Missing required provider configuration", + Detail: fmt.Sprintf( + "The configuration for %s expects to inherit a configuration for provider %s with local name %q, but %s doesn't pass a configuration under that name.\n\nTo satisfy this requirement, add an entry for %q to the \"providers\" argument in the module %q block.", + moduleText, defAddr.ForDisplay(), name, parentModuleText, + name, parentCall.Name, + ), + Subject: parentCall.DeclRange.Ptr(), + }) + } + } + } + + if cfg.Path.IsRoot() { + // nothing else to do in the root module + return diags + } + + // there cannot be any configurations if no provider config is allowed + if len(configured) > 0 && noProviderConfigRange != nil { + // We report this from the perspective of the use of count, for_each, + // or depends_on rather than from inside the module, because the + // recipient of this message is more likely to be the author of the + // calling module (trying to use an older module that hasn't been + // updated yet) than of the called module. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Module is incompatible with count, for_each, and depends_on", + Detail: fmt.Sprintf( + "The module at %s is a legacy module which contains its own local provider configurations, and so calls to it may not use the count, for_each, or depends_on arguments.\n\nIf you also control the module %q, consider updating this module to instead expect provider configurations to be passed by its caller.", + cfg.Path, cfg.SourceAddr, + ), + Subject: noProviderConfigRange, + }) + } + + // now check that the user is not attempting to override a config + for name := range configured { + if passed, ok := passedIn[name]; ok { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Cannot override provider configuration", + Detail: fmt.Sprintf( + "The configuration of %s has its own local configuration for %s, and so it cannot accept an overridden configuration provided by %s.", + moduleText, name, parentModuleText, + ), + Subject: &passed.InChild.NameRange, + }) + } + } + + // A declared alias requires either a matching configuration within the + // module, or one must be passed in. + for name, providerAddr := range configAliases { + _, confOk := configured[name] + _, passedOk := passedIn[name] + + if confOk || passedOk { + continue + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing required provider configuration", + Detail: fmt.Sprintf( + "The child module requires an additional configuration for provider %s, with the local name %q.\n\nRefer to the module's documentation to understand the intended purpose of this additional provider configuration, and then add an entry for %s in the \"providers\" meta-argument in the module block to choose which provider configuration the module should use for that purpose.", + providerAddr.Provider.ForDisplay(), name, + name, + ), + Subject: &parentCall.DeclRange, + }) + } + + // You cannot pass in a provider that cannot be used + for name, passed := range passedIn { + childTy := passed.InChild.providerType + // get a default type if there was none set + if childTy.IsZero() { + // This means the child module is only using an inferred + // provider type. We allow this but will generate a warning to + // declare provider_requirements below. + childTy = addrs.NewDefaultProvider(passed.InChild.Name) + } + + providerAddr := addrs.AbsProviderConfig{ + Module: cfg.Path, + Provider: childTy, + Alias: passed.InChild.Alias, + } + + localAddr, localName := localNames[name] + if localName { + providerAddr.Provider = localAddr + } + + aliasAddr, configAlias := configAliases[name] + if configAlias { + providerAddr = aliasAddr + } + + _, emptyConfig := emptyConfigs[name] + + if !(localName || configAlias || emptyConfig) { + + // we still allow default configs, so switch to a warning if the incoming provider is a default + if addrs.IsDefaultProvider(providerAddr.Provider) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "Reference to undefined provider", + Detail: fmt.Sprintf( + "There is no explicit declaration for local provider name %q in %s, so OpenTofu is assuming you mean to pass a configuration for %q.\n\nIf you also control the child module, add a required_providers entry named %q with the source address %q.", + name, moduleText, providerAddr.Provider.ForDisplay(), + name, providerAddr.Provider.ForDisplay(), + ), + Subject: &passed.InChild.NameRange, + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reference to undefined provider", + Detail: fmt.Sprintf( + "The child module does not declare any provider requirement with the local name %q.\n\nIf you also control the child module, you can add a required_providers entry named %q with the source address %q to accept this provider configuration.", + name, name, providerAddr.Provider.ForDisplay(), + ), + Subject: &passed.InChild.NameRange, + }) + } + } + + // The provider being passed in must also be of the correct type. + pTy := passed.InParent.providerType + if pTy.IsZero() { + // While we would like to ensure required_providers exists here, + // implied default configuration is still allowed. + pTy = addrs.NewDefaultProvider(passed.InParent.Name) + } + + // use the full address for a nice diagnostic output + parentAddr := addrs.AbsProviderConfig{ + Module: cfg.Parent.Path, + Provider: pTy, + Alias: passed.InParent.Alias, + } + + if cfg.Parent.Module.ProviderRequirements != nil { + req, defined := cfg.Parent.Module.ProviderRequirements.RequiredProviders[name] + if defined { + parentAddr.Provider = req.Type + } + } + + if !providerAddr.Provider.Equals(parentAddr.Provider) { + // If this module declares the same source address for a different + // local name then we'll prefer to suggest changing to match + // the child module's chosen name, assuming that it was the local + // name that was wrong rather than the source address. + var otherLocalName string + for localName, sourceAddr := range localNames { + if sourceAddr.Equals(parentAddr.Provider) { + otherLocalName = localName + break + } + } + + const errSummary = "Provider type mismatch" + if otherLocalName != "" { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: errSummary, + Detail: fmt.Sprintf( + "The assigned configuration is for provider %q, but local name %q in %s represents %q.\n\nTo pass this configuration to the child module, use the local name %q instead.", + parentAddr.Provider.ForDisplay(), passed.InChild.Name, + parentModuleText, providerAddr.Provider.ForDisplay(), + otherLocalName, + ), + Subject: &passed.InChild.NameRange, + }) + } else { + // If there is no declared requirement for the provider the + // caller is trying to pass under any name then we'll instead + // report it as an unsuitable configuration to pass into the + // child module's provider configuration slot. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: errSummary, + Detail: fmt.Sprintf( + "The local name %q in %s represents provider %q, but %q in %s represents %q.\n\nEach provider has its own distinct configuration schema and provider types, so this module's %q can be assigned only a configuration for %s, which is not required by %s.", + passed.InParent, parentModuleText, parentAddr.Provider.ForDisplay(), + passed.InChild, moduleText, providerAddr.Provider.ForDisplay(), + passed.InChild, providerAddr.Provider.ForDisplay(), + moduleText, + ), + Subject: passed.InParent.NameRange.Ptr(), + }) + } + } + } + + // Empty configurations are no longer needed. Since the replacement for + // this calls for one entry per provider rather than one entry per + // provider _configuration_, we'll first gather them up by provider + // and then report a single warning for each, whereby we can show a direct + // example of what the replacement should look like. + type ProviderReqSuggestion struct { + SourceAddr addrs.Provider + SourceRanges []hcl.Range + RequiredConfigs []string + AliasCount int + } + providerReqSuggestions := make(map[string]*ProviderReqSuggestion) + for name, src := range emptyConfigs { + providerLocalName := name + if idx := strings.IndexByte(providerLocalName, '.'); idx >= 0 { + providerLocalName = providerLocalName[:idx] + } + + sourceAddr, ok := localNames[name] + if !ok { + sourceAddr = addrs.NewDefaultProvider(providerLocalName) + } + + suggestion := providerReqSuggestions[providerLocalName] + if suggestion == nil { + providerReqSuggestions[providerLocalName] = &ProviderReqSuggestion{ + SourceAddr: sourceAddr, + } + suggestion = providerReqSuggestions[providerLocalName] + } + + if providerLocalName != name { + // It's an aliased provider config, then. + suggestion.AliasCount++ + } + + suggestion.RequiredConfigs = append(suggestion.RequiredConfigs, name) + suggestion.SourceRanges = append(suggestion.SourceRanges, src) + } + for name, suggestion := range providerReqSuggestions { + var buf strings.Builder + + fmt.Fprintf( + &buf, + "Earlier versions of OpenTofu used empty provider blocks (\"proxy provider configurations\") for child modules to declare their need to be passed a provider configuration by their callers. That approach was ambiguous and is now deprecated.\n\nIf you control this module, you can migrate to the new declaration syntax by removing all of the empty provider %q blocks and then adding or updating an entry like the following to the required_providers block of %s:\n", + name, moduleText, + ) + fmt.Fprintf(&buf, " %s = {\n", name) + fmt.Fprintf(&buf, " source = %q\n", suggestion.SourceAddr.ForDisplay()) + if suggestion.AliasCount > 0 { + // A lexical sort is fine because all of these strings are + // guaranteed to start with the same provider local name, and + // so we're only really sorting by the alias part. + sort.Strings(suggestion.RequiredConfigs) + fmt.Fprintln(&buf, " configuration_aliases = [") + for _, addrStr := range suggestion.RequiredConfigs { + fmt.Fprintf(&buf, " %s,\n", addrStr) + } + fmt.Fprintln(&buf, " ]") + + } + fmt.Fprint(&buf, " }") + + // We're arbitrarily going to just take the one source range that + // sorts earliest here. Multiple should be rare, so this is only to + // ensure that we produce a deterministic result in the edge case. + sort.Slice(suggestion.SourceRanges, func(i, j int) bool { + return suggestion.SourceRanges[i].String() < suggestion.SourceRanges[j].String() + }) + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "Redundant empty provider block", + Detail: buf.String(), + Subject: suggestion.SourceRanges[0].Ptr(), + }) + } + + return diags +} + +func providerName(name, alias string) string { + if alias != "" { + name = name + "." + alias + } + return name +} diff --git a/pkg/configs/provisioner.go b/pkg/configs/provisioner.go new file mode 100644 index 00000000000..6d658b080b1 --- /dev/null +++ b/pkg/configs/provisioner.go @@ -0,0 +1,242 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" +) + +// Provisioner represents a "provisioner" block when used within a +// "resource" block in a module or file. +type Provisioner struct { + Type string + Config hcl.Body + Connection *Connection + When ProvisionerWhen + OnFailure ProvisionerOnFailure + + DeclRange hcl.Range + TypeRange hcl.Range +} + +func decodeProvisionerBlock(block *hcl.Block) (*Provisioner, hcl.Diagnostics) { + pv := &Provisioner{ + Type: block.Labels[0], + TypeRange: block.LabelRanges[0], + DeclRange: block.DefRange, + When: ProvisionerWhenCreate, + OnFailure: ProvisionerOnFailureFail, + } + + content, config, diags := block.Body.PartialContent(provisionerBlockSchema) + pv.Config = config + + switch pv.Type { + case "chef", "habitat", "puppet", "salt-masterless": + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("The \"%s\" provisioner has been removed", pv.Type), + Detail: fmt.Sprintf("The \"%s\" provisioner is deprecated and has been removed from OpenTofu.", pv.Type), + Subject: &pv.TypeRange, + }) + return nil, diags + } + + if attr, exists := content.Attributes["when"]; exists { + expr, shimDiags := shimTraversalInString(attr.Expr, true) + diags = append(diags, shimDiags...) + + switch hcl.ExprAsKeyword(expr) { + case "create": + pv.When = ProvisionerWhenCreate + case "destroy": + pv.When = ProvisionerWhenDestroy + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid \"when\" keyword", + Detail: "The \"when\" argument requires one of the following keywords: create or destroy.", + Subject: expr.Range().Ptr(), + }) + } + } + + // destroy provisioners can only refer to self + if pv.When == ProvisionerWhenDestroy { + diags = append(diags, onlySelfRefs(config)...) + } + + if attr, exists := content.Attributes["on_failure"]; exists { + expr, shimDiags := shimTraversalInString(attr.Expr, true) + diags = append(diags, shimDiags...) + + switch hcl.ExprAsKeyword(expr) { + case "continue": + pv.OnFailure = ProvisionerOnFailureContinue + case "fail": + pv.OnFailure = ProvisionerOnFailureFail + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid \"on_failure\" keyword", + Detail: "The \"on_failure\" argument requires one of the following keywords: continue or fail.", + Subject: attr.Expr.Range().Ptr(), + }) + } + } + + var seenConnection *hcl.Block + var seenEscapeBlock *hcl.Block + for _, block := range content.Blocks { + switch block.Type { + case "_": + if seenEscapeBlock != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate escaping block", + Detail: fmt.Sprintf( + "The special block type \"_\" can be used to force particular arguments to be interpreted as provisioner-typpe-specific rather than as meta-arguments, but each provisioner block can have only one such block. The first escaping block was at %s.", + seenEscapeBlock.DefRange, + ), + Subject: &block.DefRange, + }) + continue + } + seenEscapeBlock = block + + // When there's an escaping block its content merges with the + // existing config we extracted earlier, so later decoding + // will see a blend of both. + pv.Config = hcl.MergeBodies([]hcl.Body{pv.Config, block.Body}) + + case "connection": + if seenConnection != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate connection block", + Detail: fmt.Sprintf("This provisioner already has a connection block at %s.", seenConnection.DefRange), + Subject: &block.DefRange, + }) + continue + } + seenConnection = block + + // destroy provisioners can only refer to self + if pv.When == ProvisionerWhenDestroy { + diags = append(diags, onlySelfRefs(block.Body)...) + } + + pv.Connection = &Connection{ + Config: block.Body, + DeclRange: block.DefRange, + } + + default: + // Any other block types are ones we've reserved for future use, + // so they get a generic message. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved block type name in provisioner block", + Detail: fmt.Sprintf("The block type name %q is reserved for use by OpenTofu in a future version.", block.Type), + Subject: &block.TypeRange, + }) + } + } + + return pv, diags +} + +func onlySelfRefs(body hcl.Body) hcl.Diagnostics { + var diags hcl.Diagnostics + + // Provisioners currently do not use any blocks in their configuration. + // Blocks are likely to remain solely for meta parameters, but in the case + // that blocks are supported for provisioners, we will want to extend this + // to find variables in nested blocks. + attrs, _ := body.JustAttributes() + for _, attr := range attrs { + for _, v := range attr.Expr.Variables() { + valid := false + switch v.RootName() { + case "self", "path", "terraform", "tofu": + valid = true + case "count": + // count must use "index" + if len(v) == 2 { + if t, ok := v[1].(hcl.TraverseAttr); ok && t.Name == "index" { + valid = true + } + } + + case "each": + if len(v) == 2 { + if t, ok := v[1].(hcl.TraverseAttr); ok && t.Name == "key" { + valid = true + } + } + } + + if !valid { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid reference from destroy provisioner", + Detail: "Destroy-time provisioners and their connection configurations may only " + + "reference attributes of the related resource, via 'self', 'count.index', " + + "or 'each.key'.\n\nReferences to other resources during the destroy phase " + + "can cause dependency cycles and interact poorly with create_before_destroy.", + Subject: attr.Expr.Range().Ptr(), + }) + } + } + } + return diags +} + +// Connection represents a "connection" block when used within either a +// "resource" or "provisioner" block in a module or file. +type Connection struct { + Config hcl.Body + + DeclRange hcl.Range +} + +// ProvisionerWhen is an enum for valid values for when to run provisioners. +type ProvisionerWhen int + +//go:generate go run golang.org/x/tools/cmd/stringer -type ProvisionerWhen + +const ( + ProvisionerWhenInvalid ProvisionerWhen = iota + ProvisionerWhenCreate + ProvisionerWhenDestroy +) + +// ProvisionerOnFailure is an enum for valid values for on_failure options +// for provisioners. +type ProvisionerOnFailure int + +//go:generate go run golang.org/x/tools/cmd/stringer -type ProvisionerOnFailure + +const ( + ProvisionerOnFailureInvalid ProvisionerOnFailure = iota + ProvisionerOnFailureContinue + ProvisionerOnFailureFail +) + +var provisionerBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + {Name: "when"}, + {Name: "on_failure"}, + }, + Blocks: []hcl.BlockHeaderSchema{ + {Type: "_"}, // meta-argument escaping block + + {Type: "connection"}, + {Type: "lifecycle"}, // reserved for future use + }, +} diff --git a/pkg/configs/provisioner_test.go b/pkg/configs/provisioner_test.go new file mode 100644 index 00000000000..887b1e09250 --- /dev/null +++ b/pkg/configs/provisioner_test.go @@ -0,0 +1,123 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hcltest" +) + +func TestProvisionerBlock_decode(t *testing.T) { + tests := map[string]struct { + input *hcl.Block + want *Provisioner + err string + }{ + "refer terraform.workspace when destroy": { + input: &hcl.Block{ + Type: "provisioner", + Labels: []string{"local-exec"}, + Body: hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "when": { + Name: "when", + Expr: hcltest.MockExprTraversalSrc("destroy"), + }, + "command": { + Name: "command", + Expr: hcltest.MockExprTraversalSrc("terraform.workspace"), + }, + }, + }), + DefRange: blockRange, + LabelRanges: []hcl.Range{hcl.Range{}}, + }, + want: &Provisioner{ + Type: "local-exec", + When: ProvisionerWhenDestroy, + OnFailure: ProvisionerOnFailureFail, + DeclRange: blockRange, + }, + }, + "refer tofu.workspace when destroy": { + input: &hcl.Block{ + Type: "provisioner", + Labels: []string{"local-exec"}, + Body: hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "when": { + Name: "when", + Expr: hcltest.MockExprTraversalSrc("destroy"), + }, + "command": { + Name: "command", + Expr: hcltest.MockExprTraversalSrc("tofu.workspace"), + }, + }, + }), + DefRange: blockRange, + LabelRanges: []hcl.Range{hcl.Range{}}, + }, + want: &Provisioner{ + Type: "local-exec", + When: ProvisionerWhenDestroy, + OnFailure: ProvisionerOnFailureFail, + DeclRange: blockRange, + }, + }, + "refer unknown.workspace when destroy": { + input: &hcl.Block{ + Type: "provisioner", + Labels: []string{"local-exec"}, + Body: hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "when": { + Name: "when", + Expr: hcltest.MockExprTraversalSrc("destroy"), + }, + "command": { + Name: "command", + Expr: hcltest.MockExprTraversalSrc("unknown.workspace"), + }, + }, + }), + DefRange: blockRange, + LabelRanges: []hcl.Range{hcl.Range{}}, + }, + want: &Provisioner{ + Type: "local-exec", + When: ProvisionerWhenDestroy, + OnFailure: ProvisionerOnFailureFail, + DeclRange: blockRange, + }, + err: "Invalid reference from destroy provisioner", + }, + } + for name, test := range tests { + t.Run(name, func(t *testing.T) { + got, diags := decodeProvisionerBlock(test.input) + + if diags.HasErrors() { + if test.err == "" { + t.Fatalf("unexpected error: %s", diags.Errs()) + } + if gotErr := diags[0].Summary; gotErr != test.err { + t.Errorf("wrong error, got %q, want %q", gotErr, test.err) + } + } else if test.err != "" { + t.Fatal("expected error") + } + + if !cmp.Equal(got, test.want, cmpopts.IgnoreInterfaces(struct{ hcl.Body }{})) { + t.Fatalf("wrong result: %s", cmp.Diff(got, test.want)) + } + }) + } +} diff --git a/pkg/configs/provisioneronfailure_string.go b/pkg/configs/provisioneronfailure_string.go new file mode 100644 index 00000000000..7ff5a6e00b5 --- /dev/null +++ b/pkg/configs/provisioneronfailure_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type ProvisionerOnFailure"; DO NOT EDIT. + +package configs + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ProvisionerOnFailureInvalid-0] + _ = x[ProvisionerOnFailureContinue-1] + _ = x[ProvisionerOnFailureFail-2] +} + +const _ProvisionerOnFailure_name = "ProvisionerOnFailureInvalidProvisionerOnFailureContinueProvisionerOnFailureFail" + +var _ProvisionerOnFailure_index = [...]uint8{0, 27, 55, 79} + +func (i ProvisionerOnFailure) String() string { + if i < 0 || i >= ProvisionerOnFailure(len(_ProvisionerOnFailure_index)-1) { + return "ProvisionerOnFailure(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _ProvisionerOnFailure_name[_ProvisionerOnFailure_index[i]:_ProvisionerOnFailure_index[i+1]] +} diff --git a/pkg/configs/provisionerwhen_string.go b/pkg/configs/provisionerwhen_string.go new file mode 100644 index 00000000000..9f21b3ac636 --- /dev/null +++ b/pkg/configs/provisionerwhen_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type ProvisionerWhen"; DO NOT EDIT. + +package configs + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ProvisionerWhenInvalid-0] + _ = x[ProvisionerWhenCreate-1] + _ = x[ProvisionerWhenDestroy-2] +} + +const _ProvisionerWhen_name = "ProvisionerWhenInvalidProvisionerWhenCreateProvisionerWhenDestroy" + +var _ProvisionerWhen_index = [...]uint8{0, 22, 43, 65} + +func (i ProvisionerWhen) String() string { + if i < 0 || i >= ProvisionerWhen(len(_ProvisionerWhen_index)-1) { + return "ProvisionerWhen(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _ProvisionerWhen_name[_ProvisionerWhen_index[i]:_ProvisionerWhen_index[i+1]] +} diff --git a/pkg/configs/removed.go b/pkg/configs/removed.go new file mode 100644 index 00000000000..4d98f27b865 --- /dev/null +++ b/pkg/configs/removed.go @@ -0,0 +1,49 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/kubegems/opentofu/pkg/addrs" +) + +// Removed represents a removed block in the configuration. +type Removed struct { + From *addrs.RemoveEndpoint + + DeclRange hcl.Range +} + +func decodeRemovedBlock(block *hcl.Block) (*Removed, hcl.Diagnostics) { + var diags hcl.Diagnostics + removed := &Removed{ + DeclRange: block.DefRange, + } + + content, moreDiags := block.Body.Content(removedBlockSchema) + diags = append(diags, moreDiags...) + + if attr, exists := content.Attributes["from"]; exists { + from, traversalDiags := hcl.AbsTraversalForExpr(attr.Expr) + diags = append(diags, traversalDiags...) + if !traversalDiags.HasErrors() { + from, fromDiags := addrs.ParseRemoveEndpoint(from) + diags = append(diags, fromDiags.ToHCL()...) + removed.From = from + } + } + + return removed, diags +} + +var removedBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "from", + Required: true, + }, + }, +} diff --git a/pkg/configs/removed_test.go b/pkg/configs/removed_test.go new file mode 100644 index 00000000000..8a419f17a51 --- /dev/null +++ b/pkg/configs/removed_test.go @@ -0,0 +1,198 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hcltest" + "github.com/kubegems/opentofu/pkg/addrs" +) + +func TestRemovedBlock_decode(t *testing.T) { + blockRange := hcl.Range{ + Filename: "mock.tf", + Start: hcl.Pos{Line: 3, Column: 12, Byte: 27}, + End: hcl.Pos{Line: 3, Column: 19, Byte: 34}, + } + + foo_expr := hcltest.MockExprTraversalSrc("test_instance.foo") + mod_foo_expr := hcltest.MockExprTraversalSrc("module.foo") + foo_index_expr := hcltest.MockExprTraversalSrc("test_instance.foo[1]") + mod_boop_index_foo_expr := hcltest.MockExprTraversalSrc("module.boop[1].test_instance.foo") + data_foo_expr := hcltest.MockExprTraversalSrc("data.test_instance.foo") + + tests := map[string]struct { + input *hcl.Block + want *Removed + err string + }{ + "success": { + &hcl.Block{ + Type: "removed", + Body: hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "from": { + Name: "from", + Expr: foo_expr, + }, + }, + }), + DefRange: blockRange, + }, + &Removed{ + From: mustRemoveEndpointFromExpr(foo_expr), + DeclRange: blockRange, + }, + ``, + }, + "modules": { + &hcl.Block{ + Type: "removed", + Body: hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "from": { + Name: "from", + Expr: mod_foo_expr, + }, + }, + }), + DefRange: blockRange, + }, + &Removed{ + From: mustRemoveEndpointFromExpr(mod_foo_expr), + DeclRange: blockRange, + }, + ``, + }, + "error: missing argument": { + &hcl.Block{ + Type: "removed", + Body: hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{}, + }), + DefRange: blockRange, + }, + &Removed{ + DeclRange: blockRange, + }, + "Missing required argument", + }, + "error: indexed resources": { + &hcl.Block{ + Type: "removed", + Body: hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "from": { + Name: "from", + Expr: foo_index_expr, + }, + }, + }), + DefRange: blockRange, + }, + &Removed{ + DeclRange: blockRange, + }, + "Resource instance address with keys is not allowed", + }, + "error: indexed modules": { + &hcl.Block{ + Type: "removed", + Body: hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "from": { + Name: "from", + Expr: mod_boop_index_foo_expr, + }, + }, + }), + DefRange: blockRange, + }, + &Removed{ + DeclRange: blockRange, + }, + "Module instance address with keys is not allowed", + }, + "error: data address": { + &hcl.Block{ + Type: "moved", + Body: hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "from": { + Name: "from", + Expr: data_foo_expr, + }, + }, + }), + DefRange: blockRange, + }, + &Removed{ + DeclRange: blockRange, + }, + "Data source address is not allowed", + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + got, diags := decodeRemovedBlock(test.input) + + if diags.HasErrors() { + if test.err == "" { + t.Fatalf("unexpected error: %s", diags.Errs()) + } + if gotErr := diags[0].Summary; gotErr != test.err { + t.Errorf("wrong error, got %q, want %q", gotErr, test.err) + } + } else if test.err != "" { + t.Fatal("expected error") + } + + if !cmp.Equal(got, test.want, cmp.AllowUnexported(addrs.MoveEndpoint{})) { + t.Fatalf("wrong result: %s", cmp.Diff(got, test.want)) + } + }) + } +} + +func TestRemovedBlock_inModule(t *testing.T) { + parser := NewParser(nil) + mod, diags := parser.LoadConfigDir("testdata/valid-modules/removed-blocks", RootModuleCallForTesting()) + if diags.HasErrors() { + t.Errorf("unexpected error: %s", diags.Error()) + } + + var got []string + for _, mc := range mod.Removed { + got = append(got, mc.From.RelSubject.String()) + } + want := []string{ + `test.foo`, + `test.foo`, + `module.a`, + `module.a`, + `test.foo`, + `test.boop`, + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong addresses\n%s", diff) + } +} + +func mustRemoveEndpointFromExpr(expr hcl.Expression) *addrs.RemoveEndpoint { + traversal, hcldiags := hcl.AbsTraversalForExpr(expr) + if hcldiags.HasErrors() { + panic(hcldiags.Errs()) + } + + ep, diags := addrs.ParseRemoveEndpoint(traversal) + if diags.HasErrors() { + panic(diags.Err()) + } + + return ep +} diff --git a/pkg/configs/resource.go b/pkg/configs/resource.go new file mode 100644 index 00000000000..f246d488a9e --- /dev/null +++ b/pkg/configs/resource.go @@ -0,0 +1,822 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/gohcl" + "github.com/hashicorp/hcl/v2/hclsyntax" + hcljson "github.com/hashicorp/hcl/v2/json" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs/hcl2shim" + "github.com/kubegems/opentofu/pkg/lang" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// Resource represents a "resource" or "data" block in a module or file. +type Resource struct { + Mode addrs.ResourceMode + Name string + Type string + Config hcl.Body + Count hcl.Expression + ForEach hcl.Expression + + ProviderConfigRef *ProviderConfigRef + Provider addrs.Provider + + Preconditions []*CheckRule + Postconditions []*CheckRule + + DependsOn []hcl.Traversal + + TriggersReplacement []hcl.Expression + + // Managed is populated only for Mode = addrs.ManagedResourceMode, + // containing the additional fields that apply to managed resources. + // For all other resource modes, this field is nil. + Managed *ManagedResource + + // Container links a scoped resource back up to the resources that contains + // it. This field is referenced during static analysis to check whether any + // references are also made from within the same container. + // + // If this is nil, then this resource is essentially public. + Container Container + + // IsOverridden indicates if the resource is being overridden. It's used in + // testing framework to not call the underlying provider. + IsOverridden bool + // OverrideValues are only valid if IsOverridden is set to true. The values + // should be used to compose mock provider response. It is possible to have + // zero-length OverrideValues even if IsOverridden is set to true. + OverrideValues map[string]cty.Value + + DeclRange hcl.Range + TypeRange hcl.Range +} + +// ManagedResource represents a "resource" block in a module or file. +type ManagedResource struct { + Connection *Connection + Provisioners []*Provisioner + + CreateBeforeDestroy bool + PreventDestroy bool + IgnoreChanges []hcl.Traversal + IgnoreAllChanges bool + + CreateBeforeDestroySet bool + PreventDestroySet bool +} + +func (r *Resource) moduleUniqueKey() string { + return r.Addr().String() +} + +// Addr returns a resource address for the receiver that is relative to the +// resource's containing module. +func (r *Resource) Addr() addrs.Resource { + return addrs.Resource{ + Mode: r.Mode, + Type: r.Type, + Name: r.Name, + } +} + +// ProviderConfigAddr returns the address for the provider configuration that +// should be used for this resource. This function returns a default provider +// config addr if an explicit "provider" argument was not provided. +func (r *Resource) ProviderConfigAddr() addrs.LocalProviderConfig { + if r.ProviderConfigRef == nil { + // If no specific "provider" argument is given, we want to look up the + // provider config where the local name matches the implied provider + // from the resource type. This may be different from the resource's + // provider type. + return addrs.LocalProviderConfig{ + LocalName: r.Addr().ImpliedProvider(), + } + } + + return addrs.LocalProviderConfig{ + LocalName: r.ProviderConfigRef.Name, + Alias: r.ProviderConfigRef.Alias, + } +} + +// HasCustomConditions returns true if and only if the resource has at least +// one author-specified custom condition. +func (r *Resource) HasCustomConditions() bool { + return len(r.Postconditions) != 0 || len(r.Preconditions) != 0 +} + +func decodeResourceBlock(block *hcl.Block, override bool) (*Resource, hcl.Diagnostics) { + var diags hcl.Diagnostics + r := &Resource{ + Mode: addrs.ManagedResourceMode, + Type: block.Labels[0], + Name: block.Labels[1], + DeclRange: block.DefRange, + TypeRange: block.LabelRanges[0], + Managed: &ManagedResource{}, + } + + content, remain, moreDiags := block.Body.PartialContent(ResourceBlockSchema) + diags = append(diags, moreDiags...) + r.Config = remain + + if !hclsyntax.ValidIdentifier(r.Type) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource type name", + Detail: badIdentifierDetail, + Subject: &block.LabelRanges[0], + }) + } + if !hclsyntax.ValidIdentifier(r.Name) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource name", + Detail: badIdentifierDetail, + Subject: &block.LabelRanges[1], + }) + } + + if attr, exists := content.Attributes["count"]; exists { + r.Count = attr.Expr + } + + if attr, exists := content.Attributes["for_each"]; exists { + r.ForEach = attr.Expr + // Cannot have count and for_each on the same resource block + if r.Count != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid combination of "count" and "for_each"`, + Detail: `The "count" and "for_each" meta-arguments are mutually-exclusive, only one should be used to be explicit about the number of resources to be created.`, + Subject: &attr.NameRange, + }) + } + } + + if attr, exists := content.Attributes["provider"]; exists { + var providerDiags hcl.Diagnostics + r.ProviderConfigRef, providerDiags = decodeProviderConfigRef(attr.Expr, "provider") + diags = append(diags, providerDiags...) + } + + if attr, exists := content.Attributes["depends_on"]; exists { + deps, depsDiags := decodeDependsOn(attr) + diags = append(diags, depsDiags...) + r.DependsOn = append(r.DependsOn, deps...) + } + + var seenLifecycle *hcl.Block + var seenConnection *hcl.Block + var seenEscapeBlock *hcl.Block + for _, block := range content.Blocks { + switch block.Type { + case "lifecycle": + if seenLifecycle != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate lifecycle block", + Detail: fmt.Sprintf("This resource already has a lifecycle block at %s.", seenLifecycle.DefRange), + Subject: &block.DefRange, + }) + continue + } + seenLifecycle = block + + lcContent, lcDiags := block.Body.Content(resourceLifecycleBlockSchema) + diags = append(diags, lcDiags...) + + if attr, exists := lcContent.Attributes["create_before_destroy"]; exists { + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &r.Managed.CreateBeforeDestroy) + diags = append(diags, valDiags...) + r.Managed.CreateBeforeDestroySet = true + } + + if attr, exists := lcContent.Attributes["prevent_destroy"]; exists { + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &r.Managed.PreventDestroy) + diags = append(diags, valDiags...) + r.Managed.PreventDestroySet = true + } + + if attr, exists := lcContent.Attributes["replace_triggered_by"]; exists { + exprs, hclDiags := decodeReplaceTriggeredBy(attr.Expr) + diags = diags.Extend(hclDiags) + + r.TriggersReplacement = append(r.TriggersReplacement, exprs...) + } + + if attr, exists := lcContent.Attributes["ignore_changes"]; exists { + + // ignore_changes can either be a list of relative traversals + // or it can be just the keyword "all" to ignore changes to this + // resource entirely. + // ignore_changes = [ami, instance_type] + // ignore_changes = all + // We also allow two legacy forms for compatibility with earlier + // versions: + // ignore_changes = ["ami", "instance_type"] + // ignore_changes = ["*"] + + kw := hcl.ExprAsKeyword(attr.Expr) + + switch { + case kw == "all": + r.Managed.IgnoreAllChanges = true + default: + exprs, listDiags := hcl.ExprList(attr.Expr) + diags = append(diags, listDiags...) + + var ignoreAllRange hcl.Range + + for _, expr := range exprs { + + // our expr might be the literal string "*", which + // we accept as a deprecated way of saying "all". + if shimIsIgnoreChangesStar(expr) { + r.Managed.IgnoreAllChanges = true + ignoreAllRange = expr.Range() + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid ignore_changes wildcard", + Detail: "The [\"*\"] form of ignore_changes wildcard is was deprecated and is now invalid. Use \"ignore_changes = all\" to ignore changes to all attributes.", + Subject: attr.Expr.Range().Ptr(), + }) + continue + } + + expr, shimDiags := shimTraversalInString(expr, false) + diags = append(diags, shimDiags...) + + traversal, travDiags := hcl.RelTraversalForExpr(expr) + diags = append(diags, travDiags...) + if len(traversal) != 0 { + r.Managed.IgnoreChanges = append(r.Managed.IgnoreChanges, traversal) + } + } + + if r.Managed.IgnoreAllChanges && len(r.Managed.IgnoreChanges) != 0 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid ignore_changes ruleset", + Detail: "Cannot mix wildcard string \"*\" with non-wildcard references.", + Subject: &ignoreAllRange, + Context: attr.Expr.Range().Ptr(), + }) + } + + } + } + + for _, block := range lcContent.Blocks { + switch block.Type { + case "precondition", "postcondition": + cr, moreDiags := decodeCheckRuleBlock(block, override) + diags = append(diags, moreDiags...) + + moreDiags = cr.validateSelfReferences(block.Type, r.Addr()) + diags = append(diags, moreDiags...) + + switch block.Type { + case "precondition": + r.Preconditions = append(r.Preconditions, cr) + case "postcondition": + r.Postconditions = append(r.Postconditions, cr) + } + default: + // The cases above should be exhaustive for all block types + // defined in the lifecycle schema, so this shouldn't happen. + panic(fmt.Sprintf("unexpected lifecycle sub-block type %q", block.Type)) + } + } + + case "connection": + if seenConnection != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate connection block", + Detail: fmt.Sprintf("This resource already has a connection block at %s.", seenConnection.DefRange), + Subject: &block.DefRange, + }) + continue + } + seenConnection = block + + r.Managed.Connection = &Connection{ + Config: block.Body, + DeclRange: block.DefRange, + } + + case "provisioner": + pv, pvDiags := decodeProvisionerBlock(block) + diags = append(diags, pvDiags...) + if pv != nil { + r.Managed.Provisioners = append(r.Managed.Provisioners, pv) + } + + case "_": + if seenEscapeBlock != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate escaping block", + Detail: fmt.Sprintf( + "The special block type \"_\" can be used to force particular arguments to be interpreted as resource-type-specific rather than as meta-arguments, but each resource block can have only one such block. The first escaping block was at %s.", + seenEscapeBlock.DefRange, + ), + Subject: &block.DefRange, + }) + continue + } + seenEscapeBlock = block + + // When there's an escaping block its content merges with the + // existing config we extracted earlier, so later decoding + // will see a blend of both. + r.Config = hcl.MergeBodies([]hcl.Body{r.Config, block.Body}) + + default: + // Any other block types are ones we've reserved for future use, + // so they get a generic message. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved block type name in resource block", + Detail: fmt.Sprintf("The block type name %q is reserved for use by OpenTofu in a future version.", block.Type), + Subject: &block.TypeRange, + }) + } + } + + // Now we can validate the connection block references if there are any destroy provisioners. + // TODO: should we eliminate standalone connection blocks? + if r.Managed.Connection != nil { + for _, p := range r.Managed.Provisioners { + if p.When == ProvisionerWhenDestroy { + diags = append(diags, onlySelfRefs(r.Managed.Connection.Config)...) + break + } + } + } + + return r, diags +} + +func decodeDataBlock(block *hcl.Block, override, nested bool) (*Resource, hcl.Diagnostics) { + var diags hcl.Diagnostics + r := &Resource{ + Mode: addrs.DataResourceMode, + Type: block.Labels[0], + Name: block.Labels[1], + DeclRange: block.DefRange, + TypeRange: block.LabelRanges[0], + } + + content, remain, moreDiags := block.Body.PartialContent(dataBlockSchema) + diags = append(diags, moreDiags...) + r.Config = remain + + if !hclsyntax.ValidIdentifier(r.Type) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid data source name", + Detail: badIdentifierDetail, + Subject: &block.LabelRanges[0], + }) + } + if !hclsyntax.ValidIdentifier(r.Name) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid data resource name", + Detail: badIdentifierDetail, + Subject: &block.LabelRanges[1], + }) + } + + if attr, exists := content.Attributes["count"]; exists && !nested { + r.Count = attr.Expr + } else if exists && nested { + // We don't allow count attributes in nested data blocks. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid "count" attribute`, + Detail: `The "count" and "for_each" meta-arguments are not supported within nested data blocks.`, + Subject: &attr.NameRange, + }) + } + + if attr, exists := content.Attributes["for_each"]; exists && !nested { + r.ForEach = attr.Expr + // Cannot have count and for_each on the same data block + if r.Count != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid combination of "count" and "for_each"`, + Detail: `The "count" and "for_each" meta-arguments are mutually-exclusive, only one should be used to be explicit about the number of resources to be created.`, + Subject: &attr.NameRange, + }) + } + } else if exists && nested { + // We don't allow for_each attributes in nested data blocks. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid "for_each" attribute`, + Detail: `The "count" and "for_each" meta-arguments are not supported within nested data blocks.`, + Subject: &attr.NameRange, + }) + } + + if attr, exists := content.Attributes["provider"]; exists { + var providerDiags hcl.Diagnostics + r.ProviderConfigRef, providerDiags = decodeProviderConfigRef(attr.Expr, "provider") + diags = append(diags, providerDiags...) + } + + if attr, exists := content.Attributes["depends_on"]; exists { + deps, depsDiags := decodeDependsOn(attr) + diags = append(diags, depsDiags...) + r.DependsOn = append(r.DependsOn, deps...) + } + + var seenEscapeBlock *hcl.Block + var seenLifecycle *hcl.Block + for _, block := range content.Blocks { + switch block.Type { + + case "_": + if seenEscapeBlock != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate escaping block", + Detail: fmt.Sprintf( + "The special block type \"_\" can be used to force particular arguments to be interpreted as resource-type-specific rather than as meta-arguments, but each data block can have only one such block. The first escaping block was at %s.", + seenEscapeBlock.DefRange, + ), + Subject: &block.DefRange, + }) + continue + } + seenEscapeBlock = block + + // When there's an escaping block its content merges with the + // existing config we extracted earlier, so later decoding + // will see a blend of both. + r.Config = hcl.MergeBodies([]hcl.Body{r.Config, block.Body}) + + case "lifecycle": + if nested { + // We don't allow lifecycle arguments in nested data blocks, + // the lifecycle is managed by the parent block. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid lifecycle block", + Detail: `Nested data blocks do not support "lifecycle" blocks as the lifecycle is managed by the containing block.`, + Subject: block.DefRange.Ptr(), + }) + } + + if seenLifecycle != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate lifecycle block", + Detail: fmt.Sprintf("This resource already has a lifecycle block at %s.", seenLifecycle.DefRange), + Subject: block.DefRange.Ptr(), + }) + continue + } + seenLifecycle = block + + lcContent, lcDiags := block.Body.Content(resourceLifecycleBlockSchema) + diags = append(diags, lcDiags...) + + // All of the attributes defined for resource lifecycle are for + // managed resources only, so we can emit a common error message + // for any given attributes that HCL accepted. + for name, attr := range lcContent.Attributes { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid data resource lifecycle argument", + Detail: fmt.Sprintf("The lifecycle argument %q is defined only for managed resources (\"resource\" blocks), and is not valid for data resources.", name), + Subject: attr.NameRange.Ptr(), + }) + } + + for _, block := range lcContent.Blocks { + switch block.Type { + case "precondition", "postcondition": + cr, moreDiags := decodeCheckRuleBlock(block, override) + diags = append(diags, moreDiags...) + + moreDiags = cr.validateSelfReferences(block.Type, r.Addr()) + diags = append(diags, moreDiags...) + + switch block.Type { + case "precondition": + r.Preconditions = append(r.Preconditions, cr) + case "postcondition": + r.Postconditions = append(r.Postconditions, cr) + } + default: + // The cases above should be exhaustive for all block types + // defined in the lifecycle schema, so this shouldn't happen. + panic(fmt.Sprintf("unexpected lifecycle sub-block type %q", block.Type)) + } + } + + default: + // Any other block types are ones we're reserving for future use, + // but don't have any defined meaning today. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved block type name in data block", + Detail: fmt.Sprintf("The block type name %q is reserved for use by OpenTofu in a future version.", block.Type), + Subject: block.TypeRange.Ptr(), + }) + } + } + + return r, diags +} + +// decodeReplaceTriggeredBy decodes and does basic validation of the +// replace_triggered_by expressions, ensuring they only contains references to +// a single resource, and the only extra variables are count.index or each.key. +func decodeReplaceTriggeredBy(expr hcl.Expression) ([]hcl.Expression, hcl.Diagnostics) { + // Since we are manually parsing the replace_triggered_by argument, we + // need to specially handle json configs, in which case the values will + // be json strings rather than hcl. To simplify parsing however we will + // decode the individual list elements, rather than the entire expression. + isJSON := hcljson.IsJSONExpression(expr) + + exprs, diags := hcl.ExprList(expr) + + for i, expr := range exprs { + if isJSON { + var convertDiags hcl.Diagnostics + expr, convertDiags = hcl2shim.ConvertJSONExpressionToHCL(expr) + diags = diags.Extend(convertDiags) + if diags.HasErrors() { + continue + } + // make sure to swap out the expression we're returning too + exprs[i] = expr + } + + refs, refDiags := lang.ReferencesInExpr(addrs.ParseRef, expr) + for _, diag := range refDiags { + severity := hcl.DiagError + if diag.Severity() == tfdiags.Warning { + severity = hcl.DiagWarning + } + + desc := diag.Description() + + diags = append(diags, &hcl.Diagnostic{ + Severity: severity, + Summary: desc.Summary, + Detail: desc.Detail, + Subject: expr.Range().Ptr(), + }) + } + + if refDiags.HasErrors() { + continue + } + + resourceCount := 0 + for _, ref := range refs { + switch sub := ref.Subject.(type) { + case addrs.Resource, addrs.ResourceInstance: + resourceCount++ + + case addrs.ForEachAttr: + if sub.Name != "key" { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid each reference in replace_triggered_by expression", + Detail: "Only each.key may be used in replace_triggered_by.", + Subject: expr.Range().Ptr(), + }) + } + case addrs.CountAttr: + if sub.Name != "index" { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count reference in replace_triggered_by expression", + Detail: "Only count.index may be used in replace_triggered_by.", + Subject: expr.Range().Ptr(), + }) + } + default: + // everything else should be simple traversals + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid reference in replace_triggered_by expression", + Detail: "Only resources, count.index, and each.key may be used in replace_triggered_by.", + Subject: expr.Range().Ptr(), + }) + } + } + + switch { + case resourceCount == 0: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid replace_triggered_by expression", + Detail: "Missing resource reference in replace_triggered_by expression.", + Subject: expr.Range().Ptr(), + }) + case resourceCount > 1: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid replace_triggered_by expression", + Detail: "Multiple resource references in replace_triggered_by expression.", + Subject: expr.Range().Ptr(), + }) + } + } + return exprs, diags +} + +type ProviderConfigRef struct { + Name string + NameRange hcl.Range + Alias string + AliasRange *hcl.Range // nil if alias not set + + // TODO: this may not be set in some cases, so it is not yet suitable for + // use outside of this package. We currently only use it for internal + // validation, but once we verify that this can be set in all cases, we can + // export this so providers don't need to be re-resolved. + // This same field is also added to the Provider struct. + providerType addrs.Provider +} + +func decodeProviderConfigRef(expr hcl.Expression, argName string) (*ProviderConfigRef, hcl.Diagnostics) { + var diags hcl.Diagnostics + + var shimDiags hcl.Diagnostics + expr, shimDiags = shimTraversalInString(expr, false) + diags = append(diags, shimDiags...) + + traversal, travDiags := hcl.AbsTraversalForExpr(expr) + + // AbsTraversalForExpr produces only generic errors, so we'll discard + // the errors given and produce our own with extra context. If we didn't + // get any errors then we might still have warnings, though. + if !travDiags.HasErrors() { + diags = append(diags, travDiags...) + } + + if len(traversal) < 1 || len(traversal) > 2 { + // A provider reference was given as a string literal in the legacy + // configuration language and there are lots of examples out there + // showing that usage, so we'll sniff for that situation here and + // produce a specialized error message for it to help users find + // the new correct form. + if exprIsNativeQuotedString(expr) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration reference", + Detail: "A provider configuration reference must not be given in quotes.", + Subject: expr.Range().Ptr(), + }) + return nil, diags + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration reference", + Detail: fmt.Sprintf("The %s argument requires a provider type name, optionally followed by a period and then a configuration alias.", argName), + Subject: expr.Range().Ptr(), + }) + return nil, diags + } + + // verify that the provider local name is normalized + name := traversal.RootName() + nameDiags := checkProviderNameNormalized(name, traversal[0].SourceRange()) + diags = append(diags, nameDiags...) + if diags.HasErrors() { + return nil, diags + } + + ret := &ProviderConfigRef{ + Name: name, + NameRange: traversal[0].SourceRange(), + } + + if len(traversal) > 1 { + aliasStep, ok := traversal[1].(hcl.TraverseAttr) + if !ok { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration reference", + Detail: "Provider name must either stand alone or be followed by a period and then a configuration alias.", + Subject: traversal[1].SourceRange().Ptr(), + }) + return ret, diags + } + + ret.Alias = aliasStep.Name + ret.AliasRange = aliasStep.SourceRange().Ptr() + } + + return ret, diags +} + +// Addr returns the provider config address corresponding to the receiving +// config reference. +// +// This is a trivial conversion, essentially just discarding the source +// location information and keeping just the addressing information. +func (r *ProviderConfigRef) Addr() addrs.LocalProviderConfig { + return addrs.LocalProviderConfig{ + LocalName: r.Name, + Alias: r.Alias, + } +} + +func (r *ProviderConfigRef) String() string { + if r == nil { + return "" + } + if r.Alias != "" { + return fmt.Sprintf("%s.%s", r.Name, r.Alias) + } + return r.Name +} + +var commonResourceAttributes = []hcl.AttributeSchema{ + { + Name: "count", + }, + { + Name: "for_each", + }, + { + Name: "provider", + }, + { + Name: "depends_on", + }, +} + +// ResourceBlockSchema is the schema for a resource or data resource type within +// OpenTofu. +// +// This schema is public as it is required elsewhere in order to validate and +// use generated config. +var ResourceBlockSchema = &hcl.BodySchema{ + Attributes: commonResourceAttributes, + Blocks: []hcl.BlockHeaderSchema{ + {Type: "locals"}, // reserved for future use + {Type: "lifecycle"}, + {Type: "connection"}, + {Type: "provisioner", LabelNames: []string{"type"}}, + {Type: "_"}, // meta-argument escaping block + }, +} + +var dataBlockSchema = &hcl.BodySchema{ + Attributes: commonResourceAttributes, + Blocks: []hcl.BlockHeaderSchema{ + {Type: "lifecycle"}, + {Type: "locals"}, // reserved for future use + {Type: "_"}, // meta-argument escaping block + }, +} + +var resourceLifecycleBlockSchema = &hcl.BodySchema{ + // We tell HCL that these elements are all valid for both "resource" + // and "data" lifecycle blocks, but the rules are actually more restrictive + // than that. We deal with that after decoding so that we can return + // more specific error messages than HCL would typically return itself. + Attributes: []hcl.AttributeSchema{ + { + Name: "create_before_destroy", + }, + { + Name: "prevent_destroy", + }, + { + Name: "ignore_changes", + }, + { + Name: "replace_triggered_by", + }, + }, + Blocks: []hcl.BlockHeaderSchema{ + {Type: "precondition"}, + {Type: "postcondition"}, + }, +} diff --git a/pkg/configs/shim.go b/pkg/configs/shim.go new file mode 100644 index 00000000000..adeef254432 --- /dev/null +++ b/pkg/configs/shim.go @@ -0,0 +1,30 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/kubegems/opentofu/pkg/configs/hcl2shim" + "github.com/zclconf/go-cty/cty" +) + +// These were all moved to the hcl2shim package, but still have uses referenced from this package +// TODO Call sites through opentofu to these functions should be migrated to hcl2shim eventually and this file removed +func MergeBodies(base, override hcl.Body) hcl.Body { + return hcl2shim.MergeBodies(base, override) +} + +func exprIsNativeQuotedString(expr hcl.Expression) bool { + return hcl2shim.ExprIsNativeQuotedString(expr) +} + +func schemaForOverrides(schema *hcl.BodySchema) *hcl.BodySchema { + return hcl2shim.SchemaForOverrides(schema) +} + +func SynthBody(filename string, values map[string]cty.Value) hcl.Body { + return hcl2shim.SynthBody(filename, values) +} diff --git a/pkg/configs/state_encryption.go b/pkg/configs/state_encryption.go new file mode 100644 index 00000000000..e74080fae1c --- /dev/null +++ b/pkg/configs/state_encryption.go @@ -0,0 +1,16 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import "github.com/hashicorp/hcl/v2" + +type StateEncryption struct { + Type string + Config hcl.Body + + TypeRange hcl.Range + DeclRange hcl.Range +} diff --git a/pkg/configs/static_evaluator.go b/pkg/configs/static_evaluator.go new file mode 100644 index 00000000000..b127f4dda2c --- /dev/null +++ b/pkg/configs/static_evaluator.go @@ -0,0 +1,137 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/gohcl" + "github.com/hashicorp/hcl/v2/hcldec" + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/lang" + "github.com/zclconf/go-cty/cty" +) + +// StaticIdentifier holds a Referencable item and where it was declared +type StaticIdentifier struct { + Module addrs.Module + Subject string + DeclRange hcl.Range +} + +func (ref StaticIdentifier) String() string { + val := ref.Subject + if len(ref.Module) != 0 { + val = ref.Module.String() + ":" + val + } + return val +} + +type StaticModuleVariables func(v *Variable) (cty.Value, hcl.Diagnostics) + +// StaticModuleCall contains the information required to call a given module +type StaticModuleCall struct { + addr addrs.Module + vars StaticModuleVariables + rootPath string + workspace string +} + +func NewStaticModuleCall(addr addrs.Module, vars StaticModuleVariables, rootPath string, workspace string) StaticModuleCall { + return StaticModuleCall{ + addr: addr, + vars: vars, + rootPath: rootPath, + workspace: workspace, + } +} + +func (s StaticModuleCall) WithVariables(vars StaticModuleVariables) StaticModuleCall { + return StaticModuleCall{ + addr: s.addr, + vars: vars, + rootPath: s.rootPath, + workspace: s.workspace, + } +} + +// only used in testing +func RootModuleCallForTesting() StaticModuleCall { + return NewStaticModuleCall(addrs.RootModule, func(_ *Variable) (cty.Value, hcl.Diagnostics) { + panic("Variables have not been configured for this test!") + }, "", "") +} + +// A static evaluator contains the information required to build a EvalContext +// which only understands "static" (non-state) data. Internally, it relies +// on staticData +type StaticEvaluator struct { + call StaticModuleCall + cfg *Module +} + +// Creates a static evaluator based from the given module and module call +func NewStaticEvaluator(mod *Module, call StaticModuleCall) *StaticEvaluator { + return &StaticEvaluator{ + call: call, + cfg: mod, + } +} + +func (s *StaticEvaluator) scope(ident StaticIdentifier) *lang.Scope { + return newStaticScope(s, ident) +} + +func (s StaticEvaluator) Evaluate(expr hcl.Expression, ident StaticIdentifier) (cty.Value, hcl.Diagnostics) { + val, diags := s.scope(ident).EvalExpr(expr, cty.DynamicPseudoType) + return val, diags.ToHCL() +} + +func (s StaticEvaluator) DecodeExpression(expr hcl.Expression, ident StaticIdentifier, val any) hcl.Diagnostics { + var diags hcl.Diagnostics + + refs, refsDiags := lang.ReferencesInExpr(addrs.ParseRef, expr) + diags = append(diags, refsDiags.ToHCL()...) + if diags.HasErrors() { + return diags + } + + ctx, ctxDiags := s.scope(ident).EvalContext(refs) + diags = append(diags, ctxDiags.ToHCL()...) + if diags.HasErrors() { + return diags + } + + return gohcl.DecodeExpression(expr, ctx, val) +} + +func (s StaticEvaluator) DecodeBlock(body hcl.Body, spec hcldec.Spec, ident StaticIdentifier) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + refs, refsDiags := lang.References(addrs.ParseRef, hcldec.Variables(body, spec)) + diags = append(diags, refsDiags.ToHCL()...) + if diags.HasErrors() { + return cty.DynamicVal, diags + } + + ctx, ctxDiags := s.scope(ident).EvalContext(refs) + diags = append(diags, ctxDiags.ToHCL()...) + if diags.HasErrors() { + return cty.DynamicVal, diags + } + + val, valDiags := hcldec.Decode(body, spec, ctx) + diags = append(diags, valDiags...) + return val, diags +} + +func (s StaticEvaluator) EvalContext(ident StaticIdentifier, refs []*addrs.Reference) (*hcl.EvalContext, hcl.Diagnostics) { + return s.EvalContextWithParent(nil, ident, refs) +} + +func (s StaticEvaluator) EvalContextWithParent(parent *hcl.EvalContext, ident StaticIdentifier, refs []*addrs.Reference) (*hcl.EvalContext, hcl.Diagnostics) { + evalCtx, diags := s.scope(ident).EvalContextWithParent(parent, refs) + return evalCtx, diags.ToHCL() +} diff --git a/pkg/configs/static_evaluator_test.go b/pkg/configs/static_evaluator_test.go new file mode 100644 index 00000000000..bc111d0900b --- /dev/null +++ b/pkg/configs/static_evaluator_test.go @@ -0,0 +1,397 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "fmt" + "testing" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +// This exercises most of the logic in StaticEvaluator and staticScopeData +// +//nolint:cyclop // it's a test +func TestStaticEvaluator_Evaluate(t *testing.T) { + // Synthetic file for building test components + testData := ` +variable "str" { + type = string +} +variable "str_map" { + type = map(string) +} +variable "str_def" { + type = string + default = "sane default" +} +variable "str_map_def" { + type = map(string) + default = { + keyA = "A" + } +} +# Should/can variable checks be performed during static evaluation? + +locals { + # Simple static cases + static = "static" + static_ref = local.static + static_fn = md5(local.static) + path_root = path.root + path_module = path.module + + # Variable References with Defaults + var_str_def_ref = var.str_def + var_map_def_access = var.str_map_def["keyA"] + + # Variable References without Defaults + var_str_ref = var.str + var_map_access = var.str_map["keyA"] + + # Bad References + invalid_ref = invalid.attribute + unavailable_ref = foo.bar.attribute + + # Circular References + circular = local.circular + circular_ref = local.circular + circular_a = local.circular_b + circular_b = local.circular_a + + # Dependency chain + ref_a = var.str + ref_b = local.ref_a + ref_c = local.ref_b + + # Missing References + local_missing = local.missing + var_missing = var.missing + + # Terraform + ws = terraform.workspace + + # Functions + func = md5("my-string") + missing_func = missing_fn("my-string") + provider_func = provider::type::fn("my-string") +} + +resource "foo" "bar" {} + ` + + parser := testParser(map[string]string{"eval.tf": testData}) + + file, fileDiags := parser.LoadConfigFile("eval.tf") + if fileDiags.HasErrors() { + t.Fatal(fileDiags) + } + + dummyIdentifier := StaticIdentifier{Subject: "local.test"} + + t.Run("Empty Eval", func(t *testing.T) { + mod, _ := NewModule([]*File{file}, nil, RootModuleCallForTesting(), "dir", SelectiveLoadAll) + emptyEval := StaticEvaluator{} + + // Expr with no traversals shouldn't access any fields + value, diags := emptyEval.Evaluate(mod.Locals["static"].Expr, dummyIdentifier) + if diags.HasErrors() { + t.Error(diags) + } + if value.AsString() != "static" { + t.Errorf("Expected %s got %s", "static", value.AsString()) + } + + // Expr with traversals should panic, indicating a programming error + defer func() { + r := recover() + if r == nil { + t.Fatalf("should panic") + } + }() + _, _ = emptyEval.Evaluate(mod.Locals["static_ref"].Expr, dummyIdentifier) + }) + + t.Run("Simple static cases", func(t *testing.T) { + mod, _ := NewModule([]*File{file}, nil, RootModuleCallForTesting(), "dir", SelectiveLoadAll) + eval := NewStaticEvaluator(mod, RootModuleCallForTesting()) + + locals := []struct { + ident string + value string + }{ + {"static", "static"}, + {"static_ref", "static"}, + {"static_fn", "a81259cef8e959c624df1d456e5d3297"}, + {"path_root", ""}, + {"path_module", "dir"}, + } + for _, local := range locals { + t.Run(local.ident, func(t *testing.T) { + value, diags := eval.Evaluate(mod.Locals[local.ident].Expr, dummyIdentifier) + if diags.HasErrors() { + t.Error(diags) + } + if value.AsString() != local.value { + t.Errorf("Expected %s got %s", local.value, value.AsString()) + } + }) + } + }) + + t.Run("Valid Variables", func(t *testing.T) { + input := map[string]cty.Value{ + "str": cty.StringVal("vara"), + "str_map": cty.MapVal(map[string]cty.Value{"keyA": cty.StringVal("mapa")}), + } + call := NewStaticModuleCall(nil, func(v *Variable) (cty.Value, hcl.Diagnostics) { + if in, ok := input[v.Name]; ok { + return in, nil + } + return v.Default, nil + }, "", "") + mod, _ := NewModule([]*File{file}, nil, call, "dir", SelectiveLoadAll) + eval := NewStaticEvaluator(mod, call) + + locals := []struct { + ident string + value string + }{ + {"var_str_def_ref", "sane default"}, + {"var_map_def_access", "A"}, + {"var_str_ref", "vara"}, + {"var_map_access", "mapa"}, + } + for _, local := range locals { + t.Run(local.ident, func(t *testing.T) { + value, diags := eval.Evaluate(mod.Locals[local.ident].Expr, dummyIdentifier) + if diags.HasErrors() { + t.Error(diags) + } + if value.AsString() != local.value { + t.Errorf("Expected %s got %s", local.value, value.AsString()) + } + }) + } + }) + + t.Run("Bad References", func(t *testing.T) { + mod, _ := NewModule([]*File{file}, nil, RootModuleCallForTesting(), "dir", SelectiveLoadAll) + eval := NewStaticEvaluator(mod, RootModuleCallForTesting()) + + locals := []struct { + ident string + diag string + }{ + {"invalid_ref", "eval.tf:37,16-33: Dynamic value in static context; Unable to use invalid.attribute in static context, which is required by local.invalid_ref"}, + {"unavailable_ref", "eval.tf:38,20-27: Dynamic value in static context; Unable to use foo.bar in static context, which is required by local.unavailable_ref"}, + } + for _, local := range locals { + t.Run(local.ident, func(t *testing.T) { + badref := mod.Locals[local.ident] + _, diags := eval.Evaluate(badref.Expr, StaticIdentifier{Subject: fmt.Sprintf("local.%s", badref.Name), DeclRange: badref.DeclRange}) + assertExactDiagnostics(t, diags, []string{local.diag}) + }) + } + }) + + t.Run("Circular References", func(t *testing.T) { + mod, _ := NewModule([]*File{file}, nil, RootModuleCallForTesting(), "dir", SelectiveLoadAll) + eval := NewStaticEvaluator(mod, RootModuleCallForTesting()) + + locals := []struct { + ident string + diags []string + }{ + {"circular", []string{ + "eval.tf:41,2-27: Circular reference; local.circular is self referential", + }}, + {"circular_ref", []string{ + "eval.tf:41,2-27: Circular reference; local.circular is self referential", + "eval.tf:42,2-31: Unable to compute static value; local.circular_ref depends on local.circular which is not available", + }}, + {"circular_a", []string{ + "eval.tf:43,2-31: Unable to compute static value; local.circular_a depends on local.circular_b which is not available", + "eval.tf:43,2-31: Circular reference; local.circular_a is self referential", + }}, + } + for _, local := range locals { + t.Run(local.ident, func(t *testing.T) { + badref := mod.Locals[local.ident] + _, diags := eval.Evaluate(badref.Expr, StaticIdentifier{Subject: fmt.Sprintf("local.%s", badref.Name), DeclRange: badref.DeclRange}) + assertExactDiagnostics(t, diags, local.diags) + }) + } + }) + + t.Run("Dependency chain", func(t *testing.T) { + call := NewStaticModuleCall(nil, func(v *Variable) (cty.Value, hcl.Diagnostics) { + return cty.DynamicVal, hcl.Diagnostics{&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Variable value not provided", + Detail: fmt.Sprintf("var.%s not included", v.Name), + Subject: v.DeclRange.Ptr(), + }} + }, "", "") + mod, _ := NewModule([]*File{file}, nil, call, "dir", SelectiveLoadAll) + eval := NewStaticEvaluator(mod, call) + + badref := mod.Locals["ref_c"] + _, diags := eval.Evaluate(badref.Expr, StaticIdentifier{Subject: fmt.Sprintf("local.%s", badref.Name), DeclRange: badref.DeclRange}) + assertExactDiagnostics(t, diags, []string{ + "eval.tf:2,1-15: Variable value not provided; var.str not included", + "eval.tf:47,2-17: Unable to compute static value; local.ref_a depends on var.str which is not available", + "eval.tf:48,2-21: Unable to compute static value; local.ref_b depends on local.ref_a which is not available", + "eval.tf:49,2-21: Unable to compute static value; local.ref_c depends on local.ref_b which is not available", + }) + }) + + t.Run("Missing References", func(t *testing.T) { + mod, _ := NewModule([]*File{file}, nil, RootModuleCallForTesting(), "dir", SelectiveLoadAll) + eval := NewStaticEvaluator(mod, RootModuleCallForTesting()) + + locals := []struct { + ident string + diag string + }{ + {"local_missing", "eval.tf:52,18-31: Undefined local; Undefined local local.missing"}, + {"var_missing", "eval.tf:53,16-27: Undefined variable; Undefined variable var.missing"}, + } + for _, local := range locals { + t.Run(local.ident, func(t *testing.T) { + badref := mod.Locals[local.ident] + _, diags := eval.Evaluate(badref.Expr, StaticIdentifier{Subject: fmt.Sprintf("local.%s", badref.Name), DeclRange: badref.DeclRange}) + assertExactDiagnostics(t, diags, []string{local.diag}) + }) + } + }) + + t.Run("Workspace", func(t *testing.T) { + call := NewStaticModuleCall(nil, nil, "", "my-workspace") + mod, _ := NewModule([]*File{file}, nil, call, "dir", SelectiveLoadAll) + eval := NewStaticEvaluator(mod, call) + + value, diags := eval.Evaluate(mod.Locals["ws"].Expr, dummyIdentifier) + if diags.HasErrors() { + t.Error(diags) + } + if value.AsString() != "my-workspace" { + t.Errorf("Expected %s got %s", "my-workspace", value.AsString()) + } + }) + + t.Run("Functions", func(t *testing.T) { + mod, _ := NewModule([]*File{file}, nil, RootModuleCallForTesting(), "dir", SelectiveLoadAll) + eval := NewStaticEvaluator(mod, RootModuleCallForTesting()) + + value, diags := eval.Evaluate(mod.Locals["func"].Expr, dummyIdentifier) + if diags.HasErrors() { + t.Error(diags) + } + if value.AsString() != "f887f41a53a46e2d40a3f8f86cacaaa2" { + t.Errorf("Expected %s got %s", "f887f41a53a46e2d40a3f8f86cacaaa2", value.AsString()) + } + + _, diags = eval.Evaluate(mod.Locals["missing_func"].Expr, StaticIdentifier{Subject: fmt.Sprintf("local.%s", mod.Locals["missing_func"].Name), DeclRange: mod.Locals["missing_func"].DeclRange}) + assertExactDiagnostics(t, diags, []string{`eval.tf:60,17-27: Call to unknown function; There is no function named "missing_fn".`}) + _, diags = eval.Evaluate(mod.Locals["provider_func"].Expr, StaticIdentifier{Subject: fmt.Sprintf("local.%s", mod.Locals["provider_func"].Name), DeclRange: mod.Locals["provider_func"].DeclRange}) + assertExactDiagnostics(t, diags, []string{`eval.tf:61,18-36: Provider function in static context; Unable to use provider::type::fn in static context, which is required by local.provider_func`}) + }) +} + +func TestStaticEvaluator_DecodeExpression(t *testing.T) { + dummyIdentifier := StaticIdentifier{Subject: "local.test"} + parser := testParser(map[string]string{"eval.tf": ""}) + file, fileDiags := parser.LoadConfigFile("eval.tf") + if fileDiags.HasErrors() { + t.Fatal(fileDiags) + } + mod, _ := NewModule([]*File{file}, nil, RootModuleCallForTesting(), "dir", SelectiveLoadAll) + eval := NewStaticEvaluator(mod, RootModuleCallForTesting()) + + cases := []struct { + expr string + diags []string + }{{ + expr: `"static"`, + }, { + expr: `count`, + diags: []string{`eval.tf:1,1-6: Invalid reference; The "count" object cannot be accessed directly. Instead, access one of its attributes.`}, + }, { + expr: `module.foo.bar`, + diags: []string{`eval.tf:1,1-15: Module output not supported in static context; Unable to use module.foo.bar in static context, which is required by local.test`}, + }} + + for _, tc := range cases { + t.Run(tc.expr, func(t *testing.T) { + expr, _ := hclsyntax.ParseExpression([]byte(tc.expr), "eval.tf", hcl.InitialPos) + var str string + diags := eval.DecodeExpression(expr, dummyIdentifier, &str) + assertExactDiagnostics(t, diags, tc.diags) + }) + } +} +func TestStaticEvaluator_DecodeBlock(t *testing.T) { + cases := []struct { + ident string + body string + diags []string + }{{ + ident: "valid", + body: ` +locals { + static = "static" +} + +terraform { + backend "valid" { + thing = local.static + } +}`, + }, { + ident: "badref", + body: ` +terraform { + backend "badref" { + thing = count + } +}`, + diags: []string{`eval.tf:4,11-16: Invalid reference; The "count" object cannot be accessed directly. Instead, access one of its attributes.`}, + }, { + ident: "badeval", + body: ` +terraform { + backend "badeval" { + thing = module.foo.bar + } +}`, + diags: []string{`eval.tf:4,11-25: Module output not supported in static context; Unable to use module.foo.bar in static context, which is required by backend.badeval`}, + }} + + for _, tc := range cases { + t.Run(tc.ident, func(t *testing.T) { + parser := testParser(map[string]string{"eval.tf": tc.body}) + file, fileDiags := parser.LoadConfigFile("eval.tf") + if fileDiags.HasErrors() { + t.Fatal(fileDiags) + } + + mod, _ := NewModule([]*File{file}, nil, RootModuleCallForTesting(), "dir", SelectiveLoadAll) + _, diags := mod.Backend.Decode(&configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "thing": &configschema.Attribute{ + Type: cty.String, + }, + }, + }) + + assertExactDiagnostics(t, diags, tc.diags) + }) + } +} diff --git a/pkg/configs/static_scope.go b/pkg/configs/static_scope.go new file mode 100644 index 00000000000..25d2d283939 --- /dev/null +++ b/pkg/configs/static_scope.go @@ -0,0 +1,268 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/didyoumean" + "github.com/kubegems/opentofu/pkg/lang" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// newStaticScope creates a lang.Scope that's backed by the static view of the module represented by the StaticEvaluator +func newStaticScope(eval *StaticEvaluator, stack ...StaticIdentifier) *lang.Scope { + return &lang.Scope{ + Data: staticScopeData{eval, stack}, + ParseRef: addrs.ParseRef, + BaseDir: ".", // Always current working directory for now. (same as Evaluator.Scope()) + PureOnly: false, + ConsoleMode: false, + } +} + +// This structure represents the data required to evaluate a specific identifier reference (top of the stack) +// It is used by lang.Scope to link the given StaticEvaluator data to addrs.References in the current scope. +type staticScopeData struct { + eval *StaticEvaluator + stack []StaticIdentifier +} + +// staticScopeData must implement lang.Data +var _ lang.Data = (*staticScopeData)(nil) + +// Creates a nested scope to evaluate nested references +func (s staticScopeData) scope(ident StaticIdentifier) (*lang.Scope, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + for _, frame := range s.stack { + if frame.String() == ident.String() { + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Circular reference", + Detail: fmt.Sprintf("%s is self referential", ident.String()), // TODO use stack in error message + Subject: ident.DeclRange.Ptr(), + }) + } + } + return newStaticScope(s.eval, append(s.stack, ident)...), diags +} + +// If an error occurs when resolving a dependent value, we need to add additional context to the diagnostics +func (s staticScopeData) enhanceDiagnostics(ident StaticIdentifier, diags tfdiags.Diagnostics) tfdiags.Diagnostics { + if diags.HasErrors() { + top := s.stack[len(s.stack)-1] + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unable to compute static value", + Detail: fmt.Sprintf("%s depends on %s which is not available", top, ident.String()), + Subject: top.DeclRange.Ptr(), + }) + } + return diags +} + +// Early check to only allow references we expect in a static context +func (s staticScopeData) StaticValidateReferences(refs []*addrs.Reference, _ addrs.Referenceable, _ addrs.Referenceable) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + top := s.stack[len(s.stack)-1] + for _, ref := range refs { + switch subject := ref.Subject.(type) { + case addrs.LocalValue: + continue + case addrs.InputVariable: + continue + case addrs.PathAttr: + continue + case addrs.TerraformAttr: + continue + case addrs.ModuleCallInstanceOutput: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Module output not supported in static context", + Detail: fmt.Sprintf("Unable to use %s in static context, which is required by %s", subject.String(), top.String()), + Subject: ref.SourceRange.ToHCL().Ptr(), + }) + case addrs.ProviderFunction: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Provider function in static context", + Detail: fmt.Sprintf("Unable to use %s in static context, which is required by %s", subject.String(), top.String()), + Subject: ref.SourceRange.ToHCL().Ptr(), + }) + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Dynamic value in static context", + Detail: fmt.Sprintf("Unable to use %s in static context, which is required by %s", subject.String(), top.String()), + Subject: ref.SourceRange.ToHCL().Ptr(), + }) + } + } + return diags +} + +func (s staticScopeData) GetCountAttr(addrs.CountAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + panic("Not Available in Static Context") +} + +func (s staticScopeData) GetForEachAttr(addrs.ForEachAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + panic("Not Available in Static Context") +} + +func (s staticScopeData) GetResource(addrs.Resource, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + panic("Not Available in Static Context") +} + +func (s staticScopeData) GetLocalValue(ident addrs.LocalValue, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + local, ok := s.eval.cfg.Locals[ident.Name] + if !ok { + return cty.DynamicVal, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Undefined local", + Detail: fmt.Sprintf("Undefined local %s", ident.String()), + Subject: rng.ToHCL().Ptr(), + }) + } + + id := StaticIdentifier{ + Module: s.eval.call.addr, + Subject: fmt.Sprintf("local.%s", local.Name), + DeclRange: local.DeclRange, + } + + scope, scopeDiags := s.scope(id) + diags = diags.Append(scopeDiags) + if diags.HasErrors() { + return cty.DynamicVal, diags + } + + val, valDiags := scope.EvalExpr(local.Expr, cty.DynamicPseudoType) + return val, s.enhanceDiagnostics(id, diags.Append(valDiags)) +} + +func (s staticScopeData) GetModule(addrs.ModuleCall, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + panic("Not Available in Static Context") +} + +func (s staticScopeData) GetPathAttr(addr addrs.PathAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + // TODO this is copied and trimed down from tofu/evaluate.go GetPathAttr. Ideally this should be refactored to a common location. + var diags tfdiags.Diagnostics + switch addr.Name { + case "cwd": + wd, err := os.Getwd() + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Failed to get working directory`, + Detail: fmt.Sprintf(`The value for path.cwd cannot be determined due to a system error: %s`, err), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + wd, err = filepath.Abs(wd) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Failed to get working directory`, + Detail: fmt.Sprintf(`The value for path.cwd cannot be determined due to a system error: %s`, err), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + + return cty.StringVal(filepath.ToSlash(wd)), diags + + case "module": + return cty.StringVal(s.eval.cfg.SourceDir), diags + + case "root": + return cty.StringVal(s.eval.call.rootPath), diags + + default: + suggestion := didyoumean.NameSuggestion(addr.Name, []string{"cwd", "module", "root"}) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid "path" attribute`, + Detail: fmt.Sprintf(`The "path" object does not have an attribute named %q.%s`, addr.Name, suggestion), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } +} + +func (s staticScopeData) GetTerraformAttr(addr addrs.TerraformAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + // TODO this is copied and trimed down from tofu/evaluate.go GetTerraformAttr. Ideally this should be refactored to a common location. + var diags tfdiags.Diagnostics + switch addr.Name { + case "workspace": + workspaceName := s.eval.call.workspace + return cty.StringVal(workspaceName), diags + + case "env": + // Prior to Terraform 0.12 there was an attribute "env", which was + // an alias name for "workspace". This was deprecated and is now + // removed. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid "terraform" attribute`, + Detail: `The terraform.env attribute was deprecated in v0.10 and removed in v0.12. The "state environment" concept was renamed to "workspace" in v0.12, and so the workspace name can now be accessed using the terraform.workspace attribute.`, + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid "terraform" attribute`, + Detail: fmt.Sprintf(`The "terraform" object does not have an attribute named %q. The only supported attribute is terraform.workspace, the name of the currently-selected workspace.`, addr.Name), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } +} + +func (s staticScopeData) GetInputVariable(ident addrs.InputVariable, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + variable, ok := s.eval.cfg.Variables[ident.Name] + if !ok { + return cty.NilVal, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Undefined variable", + Detail: fmt.Sprintf("Undefined variable %s", ident.String()), + Subject: rng.ToHCL().Ptr(), + }) + } + + id := StaticIdentifier{ + Module: s.eval.call.addr, + Subject: fmt.Sprintf("var.%s", variable.Name), + DeclRange: variable.DeclRange, + } + + val, valDiags := s.eval.call.vars(variable) + return val, s.enhanceDiagnostics(id, diags.Append(valDiags)) +} + +func (s staticScopeData) GetOutput(addrs.OutputValue, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + panic("Not Available in Static Context") +} + +func (s staticScopeData) GetCheckBlock(addrs.Check, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + panic("Not Available in Static Context") +} diff --git a/pkg/configs/test_file.go b/pkg/configs/test_file.go new file mode 100644 index 00000000000..8bffc7fef08 --- /dev/null +++ b/pkg/configs/test_file.go @@ -0,0 +1,1136 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/gohcl" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/getmodules" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// TestCommand represents the OpenTofu a given run block will execute, plan +// or apply. Defaults to apply. +type TestCommand rune + +// TestMode represents the plan mode that OpenTofu will use for a given run +// block, normal or refresh-only. Defaults to normal. +type TestMode rune + +const ( + // ApplyTestCommand causes the run block to execute a OpenTofu apply + // operation. + ApplyTestCommand TestCommand = 0 + + // PlanTestCommand causes the run block to execute a OpenTofu plan + // operation. + PlanTestCommand TestCommand = 'P' + + // NormalTestMode causes the run block to execute in plans.NormalMode. + NormalTestMode TestMode = 0 + + // RefreshOnlyTestMode causes the run block to execute in + // plans.RefreshOnlyMode. + RefreshOnlyTestMode TestMode = 'R' +) + +// TestFile represents a single test file within a `tofu test` execution. +// +// A test file is made up of a sequential list of run blocks, each designating +// a command to execute and a series of validations to check after the command. +type TestFile struct { + // Variables defines a set of global variable definitions that should be set + // for every run block within the test file. + Variables map[string]hcl.Expression + + // Providers defines a set of providers that are available to run blocks + // within this test file. + // + // If empty, tests should use the default providers for the module under + // test. + Providers map[string]*Provider + + // Runs defines the sequential list of run blocks that should be executed in + // order. + Runs []*TestRun + + // OverrideResources is a list of resources to be overridden with static values. + // Underlying providers shouldn't be called for overridden resources. + OverrideResources []*OverrideResource + + // OverrideModules is a list of modules to be overridden with static values. + // Underlying modules shouldn't be called. + OverrideModules []*OverrideModule + + // MockProviders is a map of providers that should be mocked. It is merged + // with Providers map to use later when instantiating provider instance. + MockProviders map[string]*MockProvider + + VariablesDeclRange hcl.Range +} + +// Validate does a very simple and cursory check across the file blocks to look +// for simple issues we can highlight early on. It doesn't validate nested run blocks. +func (file *TestFile) Validate() tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + // It's not allowed to have multiple `override_resource`, `override_data` or `override_module` blocks + // declared globally in a file with the same target address so we want to ensure there's no such cases. + diags = diags.Append(checkForDuplicatedOverrideResources(file.OverrideResources)) + diags = diags.Append(checkForDuplicatedOverrideModules(file.OverrideModules)) + + return diags +} + +func (file *TestFile) getTestProviderOrMock(addr string) (*Provider, bool) { + testProvider, ok := file.Providers[addr] + if ok { + return testProvider, true + } + + mockProvider, ok := file.MockProviders[addr] + if ok { + p := &Provider{ + Name: mockProvider.Name, + NameRange: mockProvider.NameRange, + Alias: mockProvider.Alias, + AliasRange: mockProvider.AliasRange, + DeclRange: mockProvider.DeclRange, + IsMocked: true, + MockResources: mockProvider.MockResources, + } + + return p, true + } + + return nil, false +} + +// TestRun represents a single run block within a test file. +// +// Each run block represents a single OpenTofu command to be executed and a set +// of validations to run after the command. +type TestRun struct { + Name string + + // Command is the OpenTofu command to execute. + // + // One of ['apply', 'plan']. + Command TestCommand + + // Options contains the embedded plan options that will affect the given + // Command. These should map to the options documented here: + // - https://opentofu.org/docs/cli/commands/plan/#planning-options + // + // Note, that the Variables are a top level concept and not embedded within + // the options despite being listed as plan options in the documentation. + Options *TestRunOptions + + // Variables defines a set of variable definitions for this command. + // + // Any variables specified locally that clash with the global variables will + // take precedence over the global definition. + Variables map[string]hcl.Expression + + // Providers specifies the set of providers that should be loaded into the + // module for this run block. + // + // Providers specified here must be configured in one of the provider blocks + // for this file. If empty, the run block will load the default providers + // for the module under test. + Providers []PassedProviderConfig + + // CheckRules defines the list of assertions/validations that should be + // checked by this run block. + CheckRules []*CheckRule + + // Module defines an address of another module that should be loaded and + // executed as part of this run block instead of the module under test. + // + // In the initial version of the testing framework we will only support + // loading alternate modules from local directories or the registry. + Module *TestRunModuleCall + + // ConfigUnderTest describes the configuration this run block should execute + // against. + // + // In typical cases, this will be null and the config under test is the + // configuration within the directory the tofu test command is + // executing within. However, when Module is set the config under test is + // whichever config is defined by Module. This field is then set during the + // configuration load process and should be used when the test is executed. + ConfigUnderTest *Config + + // ExpectFailures should be a list of checkable objects that are expected + // to report a failure from their custom conditions as part of this test + // run. + ExpectFailures []hcl.Traversal + + // OverrideResources is a list of resources to be overridden with static values. + // Underlying providers shouldn't be called for overridden resources. + OverrideResources []*OverrideResource + + // OverrideModules is a list of modules to be overridden with static values. + // Underlying modules shouldn't be called. + OverrideModules []*OverrideModule + + NameDeclRange hcl.Range + VariablesDeclRange hcl.Range + DeclRange hcl.Range +} + +// Validate does a very simple and cursory check across the run block to look +// for simple issues we can highlight early on. +func (run *TestRun) Validate() tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + // We want to make sure all the ExpectFailure references + // are the correct kind of reference. + for _, traversal := range run.ExpectFailures { + + reference, refDiags := addrs.ParseRefFromTestingScope(traversal) + diags = diags.Append(refDiags) + if refDiags.HasErrors() { + continue + } + + switch reference.Subject.(type) { + // You can only reference outputs, inputs, checks, and resources. + case addrs.OutputValue, addrs.InputVariable, addrs.Check, addrs.ResourceInstance, addrs.Resource: + // Do nothing, these are okay! + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid `expect_failures` reference", + Detail: fmt.Sprintf("You cannot expect failures from %s. You can only expect failures from checkable objects such as input variables, output values, check blocks, managed resources and data sources.", reference.Subject.String()), + Subject: reference.SourceRange.ToHCL().Ptr(), + }) + } + + } + + // It's not allowed to have multiple `override_resource`, `override_data` or `override_module` blocks + // inside a single run block with the same target address so we want to ensure there's no such cases. + diags = diags.Append(checkForDuplicatedOverrideResources(run.OverrideResources)) + diags = diags.Append(checkForDuplicatedOverrideModules(run.OverrideModules)) + + return diags +} + +// TestRunModuleCall specifies which module should be executed by a given run +// block. +type TestRunModuleCall struct { + // Source is the source of the module to test. + Source addrs.ModuleSource + + // Version is the version of the module to load from the registry. + Version VersionConstraint + + DeclRange hcl.Range + SourceDeclRange hcl.Range +} + +// TestRunOptions contains the plan options for a given run block. +type TestRunOptions struct { + // Mode is the planning mode to run in. One of ['normal', 'refresh-only']. + Mode TestMode + + // Refresh is analogous to the -refresh=false OpenTofu plan option. + Refresh bool + + // Replace is analogous to the -refresh=ADDRESS OpenTofu plan option. + Replace []hcl.Traversal + + // Target is analogous to the -target=ADDRESS OpenTofu plan option. + Target []hcl.Traversal + + DeclRange hcl.Range +} + +const ( + blockNameOverrideResource = "override_resource" + blockNameOverrideData = "override_data" +) + +// OverrideResource contains information about a resource or data block to be overridden. +type OverrideResource struct { + // Target references resource or data block to override. + Target hcl.Traversal + TargetParsed *addrs.ConfigResource + + // Mode indicates if the Target is resource or data block. + Mode addrs.ResourceMode + + // Values represents fields to use as defaults + // if they are not present in configuration. + Values map[string]cty.Value +} + +func (r OverrideResource) getBlockName() string { + switch r.Mode { + case addrs.ManagedResourceMode: + return blockNameOverrideResource + case addrs.DataResourceMode: + return blockNameOverrideData + case addrs.InvalidResourceMode: + panic("BUG: invalid resource mode in override resource") + default: + panic("BUG: undefined resource mode in override resource: " + r.Mode.String()) + } +} + +const blockNameOverrideModule = "override_module" + +// OverrideModule contains information about a module to be overridden. +type OverrideModule struct { + // Target references module call to override. + Target hcl.Traversal + TargetParsed addrs.Module + + // Outputs represents fields to use instead + // of the real module call output. + Outputs map[string]cty.Value +} + +const blockNameMockProvider = "mock_provider" + +// MockProvider represents mocked provider block. It partially matches +// the Provider configuration block (name, alias) and includes additional +// mocking data (mock resources). +type MockProvider struct { + // Fields below are copied from configs.Provider struct: + + Name string + NameRange hcl.Range + Alias string + AliasRange *hcl.Range // nil if no alias set + + DeclRange hcl.Range + + // Fields below are specific to configs.MockProvider: + + MockResources []*MockResource +} + +// moduleUniqueKey is copied from Provider.moduleUniqueKey +func (p *MockProvider) moduleUniqueKey() string { + if p.Alias != "" { + return fmt.Sprintf("%s.%s", p.Name, p.Alias) + } + return p.Name +} + +const ( + blockNameMockResource = "mock_resource" + blockNameMockData = "mock_data" +) + +// MockResource represents mocked resource. It is similar to OverrideResource, +// except all the resources with the same type should be overridden (mocked). +type MockResource struct { + Mode addrs.ResourceMode + Type string + Defaults map[string]cty.Value +} + +func (r MockResource) getBlockName() string { + switch r.Mode { + case addrs.ManagedResourceMode: + return blockNameMockResource + case addrs.DataResourceMode: + return blockNameMockData + case addrs.InvalidResourceMode: + panic("BUG: invalid resource mode in mock resource") + default: + panic("BUG: undefined resource mode in mock resource: " + r.Mode.String()) + } +} + +func loadTestFile(body hcl.Body) (*TestFile, hcl.Diagnostics) { + var diags hcl.Diagnostics + + content, contentDiags := body.Content(testFileSchema) + diags = append(diags, contentDiags...) + + tf := TestFile{ + Providers: make(map[string]*Provider), + MockProviders: make(map[string]*MockProvider), + } + + for _, block := range content.Blocks { + switch block.Type { + case "run": + run, runDiags := decodeTestRunBlock(block) + diags = append(diags, runDiags...) + if !runDiags.HasErrors() { + tf.Runs = append(tf.Runs, run) + } + + case "variables": + if tf.Variables != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Multiple \"variables\" blocks", + Detail: fmt.Sprintf("This test file already has a variables block defined at %s.", tf.VariablesDeclRange), + Subject: block.DefRange.Ptr(), + }) + continue + } + + tf.Variables = make(map[string]hcl.Expression) + tf.VariablesDeclRange = block.DefRange + + vars, varsDiags := block.Body.JustAttributes() + diags = append(diags, varsDiags...) + for _, v := range vars { + tf.Variables[v.Name] = v.Expr + } + + case "provider": + provider, providerDiags := decodeProviderBlock(block) + diags = append(diags, providerDiags...) + if provider != nil { + tf.Providers[provider.moduleUniqueKey()] = provider + } + + case blockNameOverrideResource: + overrideRes, overrideResDiags := decodeOverrideResourceBlock(block, addrs.ManagedResourceMode) + diags = append(diags, overrideResDiags...) + if !overrideResDiags.HasErrors() { + tf.OverrideResources = append(tf.OverrideResources, overrideRes) + } + + case blockNameOverrideData: + overrideData, overrideDataDiags := decodeOverrideResourceBlock(block, addrs.DataResourceMode) + diags = append(diags, overrideDataDiags...) + if !overrideDataDiags.HasErrors() { + tf.OverrideResources = append(tf.OverrideResources, overrideData) + } + + case blockNameOverrideModule: + overrideMod, overrideModDiags := decodeOverrideModuleBlock(block) + diags = append(diags, overrideModDiags...) + if !overrideModDiags.HasErrors() { + tf.OverrideModules = append(tf.OverrideModules, overrideMod) + } + + case blockNameMockProvider: + mockProvider, mockProviderDiags := decodeMockProviderBlock(block) + diags = append(diags, mockProviderDiags...) + + if !mockProviderDiags.HasErrors() { + k := mockProvider.moduleUniqueKey() + + if _, ok := tf.MockProviders[k]; ok { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicated `mock_provider` block", + Detail: fmt.Sprintf("It is not allowed to have multiple `mock_provider` blocks with the same address: `%v`.", k), + Subject: mockProvider.DeclRange.Ptr(), + }) + } else { + tf.MockProviders[k] = mockProvider + } + } + } + } + + return &tf, diags +} + +func decodeTestRunBlock(block *hcl.Block) (*TestRun, hcl.Diagnostics) { + var diags hcl.Diagnostics + + content, contentDiags := block.Body.Content(testRunBlockSchema) + diags = append(diags, contentDiags...) + + r := TestRun{ + Name: block.Labels[0], + NameDeclRange: block.LabelRanges[0], + DeclRange: block.DefRange, + } + + if !hclsyntax.ValidIdentifier(r.Name) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid run block name", + Detail: badIdentifierDetail, + Subject: &block.LabelRanges[0], + }) + } + + for _, block := range content.Blocks { + switch block.Type { + case "assert": + cr, crDiags := decodeCheckRuleBlock(block, false) + diags = append(diags, crDiags...) + if !crDiags.HasErrors() { + r.CheckRules = append(r.CheckRules, cr) + } + case "plan_options": + if r.Options != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Multiple \"plan_options\" blocks", + Detail: fmt.Sprintf("This run block already has a plan_options block defined at %s.", r.Options.DeclRange), + Subject: block.DefRange.Ptr(), + }) + continue + } + + opts, optsDiags := decodeTestRunOptionsBlock(block) + diags = append(diags, optsDiags...) + if !optsDiags.HasErrors() { + r.Options = opts + } + case "variables": + if r.Variables != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Multiple \"variables\" blocks", + Detail: fmt.Sprintf("This run block already has a variables block defined at %s.", r.VariablesDeclRange), + Subject: block.DefRange.Ptr(), + }) + continue + } + + r.Variables = make(map[string]hcl.Expression) + r.VariablesDeclRange = block.DefRange + + vars, varsDiags := block.Body.JustAttributes() + diags = append(diags, varsDiags...) + for _, v := range vars { + r.Variables[v.Name] = v.Expr + } + case "module": + if r.Module != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Multiple \"module\" blocks", + Detail: fmt.Sprintf("This run block already has a module block defined at %s.", r.Module.DeclRange), + Subject: block.DefRange.Ptr(), + }) + } + + module, moduleDiags := decodeTestRunModuleBlock(block) + diags = append(diags, moduleDiags...) + if !moduleDiags.HasErrors() { + r.Module = module + } + + case blockNameOverrideResource: + overrideRes, overrideResDiags := decodeOverrideResourceBlock(block, addrs.ManagedResourceMode) + diags = append(diags, overrideResDiags...) + if !overrideResDiags.HasErrors() { + r.OverrideResources = append(r.OverrideResources, overrideRes) + } + + case blockNameOverrideData: + overrideData, overrideDataDiags := decodeOverrideResourceBlock(block, addrs.DataResourceMode) + diags = append(diags, overrideDataDiags...) + if !overrideDataDiags.HasErrors() { + r.OverrideResources = append(r.OverrideResources, overrideData) + } + + case blockNameOverrideModule: + overrideMod, overrideModDiags := decodeOverrideModuleBlock(block) + diags = append(diags, overrideModDiags...) + if !overrideModDiags.HasErrors() { + r.OverrideModules = append(r.OverrideModules, overrideMod) + } + } + } + + if r.Variables == nil { + // There is no distinction between a nil map of variables or an empty + // map, but we can avoid any potential nil pointer exceptions by just + // creating an empty map. + r.Variables = make(map[string]hcl.Expression) + } + + if r.Options == nil { + // Create an options with default values if the user didn't specify + // anything. + r.Options = &TestRunOptions{ + Mode: NormalTestMode, + Refresh: true, + } + } + + if attr, exists := content.Attributes["command"]; exists { + switch hcl.ExprAsKeyword(attr.Expr) { + case "apply": + r.Command = ApplyTestCommand + case "plan": + r.Command = PlanTestCommand + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid \"command\" keyword", + Detail: "The \"command\" argument requires one of the following keywords without quotes: apply or plan.", + Subject: attr.Expr.Range().Ptr(), + }) + } + } else { + r.Command = ApplyTestCommand // Default to apply + } + + if attr, exists := content.Attributes["providers"]; exists { + providers, providerDiags := decodePassedProviderConfigs(attr) + diags = append(diags, providerDiags...) + r.Providers = append(r.Providers, providers...) + } + + if attr, exists := content.Attributes["expect_failures"]; exists { + failures, failDiags := decodeDependsOn(attr) + diags = append(diags, failDiags...) + r.ExpectFailures = failures + } + + return &r, diags +} + +func decodeTestRunModuleBlock(block *hcl.Block) (*TestRunModuleCall, hcl.Diagnostics) { + var diags hcl.Diagnostics + + content, contentDiags := block.Body.Content(testRunModuleBlockSchema) + diags = append(diags, contentDiags...) + + module := TestRunModuleCall{ + DeclRange: block.DefRange, + } + + haveVersionArg := false + if attr, exists := content.Attributes["version"]; exists { + var versionDiags hcl.Diagnostics + module.Version, versionDiags = decodeVersionConstraint(attr) + diags = append(diags, versionDiags...) + haveVersionArg = true + } + + if attr, exists := content.Attributes["source"]; exists { + module.SourceDeclRange = attr.Range + + var raw string + rawDiags := gohcl.DecodeExpression(attr.Expr, nil, &raw) + diags = append(diags, rawDiags...) + if !rawDiags.HasErrors() { + var err error + if haveVersionArg { + module.Source, err = addrs.ParseModuleSourceRegistry(raw) + } else { + module.Source, err = addrs.ParseModuleSource(raw) + } + if err != nil { + // NOTE: We leave mc.SourceAddr as nil for any situation where the + // source attribute is invalid, so any code which tries to carefully + // use the partial result of a failed config decode must be + // resilient to that. + module.Source = nil + + // NOTE: In practice it's actually very unlikely to end up here, + // because our source address parser can turn just about any string + // into some sort of remote package address, and so for most errors + // we'll detect them only during module installation. There are + // still a _few_ purely-syntax errors we can catch at parsing time, + // though, mostly related to remote package sub-paths and local + // paths. + switch err := err.(type) { + case *getmodules.MaybeRelativePathErr: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid module source address", + Detail: fmt.Sprintf( + "OpenTofu failed to determine your intended installation method for remote module package %q.\n\nIf you intended this as a path relative to the current module, use \"./%s\" instead. The \"./\" prefix indicates that the address is a relative filesystem path.", + err.Addr, err.Addr, + ), + Subject: module.SourceDeclRange.Ptr(), + }) + default: + if haveVersionArg { + // In this case we'll include some extra context that + // we assumed a registry source address due to the + // version argument. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid registry module source address", + Detail: fmt.Sprintf("Failed to parse module registry address: %s.\n\nOpenTofu assumed that you intended a module registry source address because you also set the argument \"version\", which applies only to registry modules.", err), + Subject: module.SourceDeclRange.Ptr(), + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid module source address", + Detail: fmt.Sprintf("Failed to parse module source address: %s.", err), + Subject: module.SourceDeclRange.Ptr(), + }) + } + } + } + + switch module.Source.(type) { + case addrs.ModuleSourceRemote: + // We only support local or registry modules when loading + // modules directly from alternate sources during a test + // execution. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid module source address", + Detail: "Only local or registry module sources are currently supported from within test run blocks.", + Subject: module.SourceDeclRange.Ptr(), + }) + } + } + } else { + // Must have a source attribute. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing \"source\" attribute for module block", + Detail: "You must specify a source attribute when executing alternate modules during test executions.", + Subject: module.DeclRange.Ptr(), + }) + } + + return &module, diags +} + +func decodeTestRunOptionsBlock(block *hcl.Block) (*TestRunOptions, hcl.Diagnostics) { + var diags hcl.Diagnostics + + content, contentDiags := block.Body.Content(testRunOptionsBlockSchema) + diags = append(diags, contentDiags...) + + opts := TestRunOptions{ + DeclRange: block.DefRange, + } + + if attr, exists := content.Attributes["mode"]; exists { + switch hcl.ExprAsKeyword(attr.Expr) { + case "refresh-only": + opts.Mode = RefreshOnlyTestMode + case "normal": + opts.Mode = NormalTestMode + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid \"mode\" keyword", + Detail: "The \"mode\" argument requires one of the following keywords without quotes: normal or refresh-only", + Subject: attr.Expr.Range().Ptr(), + }) + } + } else { + opts.Mode = NormalTestMode // Default to normal + } + + if attr, exists := content.Attributes["refresh"]; exists { + diags = append(diags, gohcl.DecodeExpression(attr.Expr, nil, &opts.Refresh)...) + } else { + // Defaults to true. + opts.Refresh = true + } + + if attr, exists := content.Attributes["replace"]; exists { + reps, repsDiags := decodeDependsOn(attr) + diags = append(diags, repsDiags...) + opts.Replace = reps + } + + if attr, exists := content.Attributes["target"]; exists { + tars, tarsDiags := decodeDependsOn(attr) + diags = append(diags, tarsDiags...) + opts.Target = tars + } + + if !opts.Refresh && opts.Mode == RefreshOnlyTestMode { + // These options are incompatible. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incompatible plan options", + Detail: "The \"refresh\" option cannot be set to false when running a test in \"refresh-only\" mode.", + Subject: content.Attributes["refresh"].Range.Ptr(), + }) + } + + return &opts, diags +} + +func decodeOverrideResourceBlock(block *hcl.Block, mode addrs.ResourceMode) (*OverrideResource, hcl.Diagnostics) { + parseTarget := func(attr *hcl.Attribute) (hcl.Traversal, *addrs.ConfigResource, hcl.Diagnostics) { + traversal, traversalDiags := hcl.AbsTraversalForExpr(attr.Expr) + diags := traversalDiags + if traversalDiags.HasErrors() { + return nil, nil, diags + } + + configRes, configResDiags := addrs.ParseConfigResource(traversal) + diags = append(diags, configResDiags.ToHCL()...) + if configResDiags.HasErrors() { + return nil, nil, diags + } + + return traversal, &configRes, diags + } + + res := &OverrideResource{ + Mode: mode, + } + + content, diags := block.Body.Content(overrideResourceBlockSchema) + + if attr, exists := content.Attributes["target"]; exists { + target, parsed, moreDiags := parseTarget(attr) + res.Target, res.TargetParsed = target, parsed + diags = append(diags, moreDiags...) + } + + if attr, exists := content.Attributes["values"]; exists { + v, moreDiags := parseObjectAttrWithNoVariables(attr) + res.Values, diags = v, append(diags, moreDiags...) + } + + return res, diags +} + +func decodeOverrideModuleBlock(block *hcl.Block) (*OverrideModule, hcl.Diagnostics) { + parseTarget := func(attr *hcl.Attribute) (hcl.Traversal, addrs.Module, hcl.Diagnostics) { + traversal, traversalDiags := hcl.AbsTraversalForExpr(attr.Expr) + diags := traversalDiags + if traversalDiags.HasErrors() { + return nil, nil, diags + } + + target, targetDiags := addrs.ParseModule(traversal) + diags = append(diags, targetDiags.ToHCL()...) + if targetDiags.HasErrors() { + return nil, nil, diags + } + + return traversal, target, diags + } + + mod := &OverrideModule{} + + content, diags := block.Body.Content(overrideModuleBlockSchema) + + if attr, exists := content.Attributes["target"]; exists { + traversal, target, moreDiags := parseTarget(attr) + mod.Target, mod.TargetParsed = traversal, target + diags = append(diags, moreDiags...) + } + + if attr, exists := content.Attributes["outputs"]; exists { + outputs, moreDiags := parseObjectAttrWithNoVariables(attr) + mod.Outputs, diags = outputs, append(diags, moreDiags...) + } + + return mod, diags +} + +// Some code of decodeMockProviderBlock function was copied from decodeProviderBlock. +func decodeMockProviderBlock(block *hcl.Block) (*MockProvider, hcl.Diagnostics) { + var diags hcl.Diagnostics + + content, moreDiags := block.Body.Content(mockProviderBlockSchema) + diags = append(diags, moreDiags...) + + // Provider names must be localized. Produce an error with a message + // indicating the action the user can take to fix this message if the local + // name is not localized. + name := block.Labels[0] + nameDiags := checkProviderNameNormalized(name, block.DefRange) + diags = append(diags, nameDiags...) + if nameDiags.HasErrors() { + // If the name is invalid then we mustn't produce a result because + // downstreams could try to use it as a provider type and then crash. + return nil, diags + } + + provider := &MockProvider{ + Name: name, + NameRange: block.LabelRanges[0], + DeclRange: block.DefRange, + } + + if attr, exists := content.Attributes["alias"]; exists { + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &provider.Alias) + diags = append(diags, valDiags...) + provider.AliasRange = attr.Expr.Range().Ptr() + + if !hclsyntax.ValidIdentifier(provider.Alias) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid mock provider configuration alias", + Detail: fmt.Sprintf("An alias must be a valid name. %s", badIdentifierDetail), + Subject: provider.AliasRange, + }) + } + } + + var ( + managedResources = make(map[string]struct{}) + dataResources = make(map[string]struct{}) + ) + + for _, block := range content.Blocks { + res, resDiags := decodeMockResourceBlock(block) + diags = append(diags, resDiags...) + if resDiags.HasErrors() { + continue + } + + resources := managedResources + if res.Mode == addrs.DataResourceMode { + resources = dataResources + } + + if _, ok := resources[res.Type]; ok { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Duplicated `%v` block", res.getBlockName()), + Detail: fmt.Sprintf("`%v.%v` is already defined in `mock_provider` block.", res.getBlockName(), res.Type), + Subject: provider.DeclRange.Ptr(), + }) + continue + } + + resources[res.Type] = struct{}{} + + provider.MockResources = append(provider.MockResources, res) + } + + return provider, diags +} + +func decodeMockResourceBlock(block *hcl.Block) (*MockResource, hcl.Diagnostics) { + var mode addrs.ResourceMode + + switch block.Type { + case blockNameMockResource: + mode = addrs.ManagedResourceMode + case blockNameMockData: + mode = addrs.DataResourceMode + default: + panic("BUG: unsupported block type for mock resource: " + block.Type) + } + + res := &MockResource{ + Mode: mode, + Type: block.Labels[0], + } + + content, diags := block.Body.Content(mockResourceBlockSchema) + + if attr, exists := content.Attributes["defaults"]; exists { + v, moreDiags := parseObjectAttrWithNoVariables(attr) + res.Defaults, diags = v, append(diags, moreDiags...) + } + + return res, diags +} + +func parseObjectAttrWithNoVariables(attr *hcl.Attribute) (map[string]cty.Value, hcl.Diagnostics) { + attrVal, valDiags := attr.Expr.Value(nil) + diags := valDiags + if valDiags.HasErrors() { + return nil, diags + } + + if !attrVal.Type().IsObjectType() { + return nil, append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Object expected", + Detail: fmt.Sprintf("The attribute `%v` must be an object.", attr.Name), + Subject: attr.Range.Ptr(), + }) + } + + return attrVal.AsValueMap(), diags +} + +func checkForDuplicatedOverrideResources(resources []*OverrideResource) hcl.Diagnostics { + var diags hcl.Diagnostics + + overrideResources := make(map[string]struct{}, len(resources)) + for _, res := range resources { + k := res.TargetParsed.String() + + if _, ok := overrideResources[k]; ok { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Duplicated `%v` block", res.getBlockName()), + Detail: fmt.Sprintf("It is not allowed to have multiple `%v` blocks with the same target: `%v`.", res.getBlockName(), res.TargetParsed), + Subject: res.Target.SourceRange().Ptr(), + }) + continue + } + + overrideResources[k] = struct{}{} + } + + return diags +} + +func checkForDuplicatedOverrideModules(modules []*OverrideModule) hcl.Diagnostics { + var diags hcl.Diagnostics + + overrideModules := make(map[string]struct{}, len(modules)) + for _, mod := range modules { + k := mod.TargetParsed.String() + + if _, ok := overrideModules[k]; ok { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicated `override_module` block", + Detail: fmt.Sprintf("It is not allowed to have multiple `override_module` blocks with the same target: `%v`.", mod.TargetParsed), + Subject: mod.Target.SourceRange().Ptr(), + }) + continue + } + + overrideModules[k] = struct{}{} + } + + return diags +} + +var testFileSchema = &hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "run", + LabelNames: []string{"name"}, + }, + { + Type: "provider", + LabelNames: []string{"name"}, + }, + { + Type: "variables", + }, + { + Type: blockNameOverrideResource, + }, + { + Type: blockNameOverrideData, + }, + { + Type: blockNameOverrideModule, + }, + { + Type: blockNameMockProvider, + LabelNames: []string{"name"}, + }, + }, +} + +var testRunBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + {Name: "command"}, + {Name: "providers"}, + {Name: "expect_failures"}, + }, + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "plan_options", + }, + { + Type: "assert", + }, + { + Type: "variables", + }, + { + Type: "module", + }, + { + Type: blockNameOverrideResource, + }, + { + Type: blockNameOverrideData, + }, + { + Type: blockNameOverrideModule, + }, + }, +} + +var testRunOptionsBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + {Name: "mode"}, + {Name: "refresh"}, + {Name: "replace"}, + {Name: "target"}, + }, +} + +var testRunModuleBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + {Name: "source"}, + {Name: "version"}, + }, +} + +//nolint:gochecknoglobals // To follow existing code style. +var overrideResourceBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "target", + Required: true, + }, + { + Name: "values", + Required: false, + }, + }, +} + +//nolint:gochecknoglobals // To follow existing code style. +var overrideModuleBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "target", + Required: true, + }, + { + Name: "outputs", + Required: false, + }, + }, +} + +//nolint:gochecknoglobals // To follow existing code style. +var mockProviderBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "alias", + Required: false, + }, + }, + Blocks: []hcl.BlockHeaderSchema{ + { + Type: blockNameMockResource, + LabelNames: []string{"type"}, + }, + { + Type: blockNameMockData, + LabelNames: []string{"type"}, + }, + }, +} + +//nolint:gochecknoglobals // To follow existing code style. +var mockResourceBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "defaults", + }, + }, +} diff --git a/pkg/configs/test_file_test.go b/pkg/configs/test_file_test.go new file mode 100644 index 00000000000..3105c12e88d --- /dev/null +++ b/pkg/configs/test_file_test.go @@ -0,0 +1,99 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" +) + +func TestTestRun_Validate(t *testing.T) { + tcs := map[string]struct { + expectedFailures []string + diagnostic string + }{ + "empty": {}, + "supports_expected": { + expectedFailures: []string{ + "check.expected_check", + "var.expected_var", + "output.expected_output", + "test_resource.resource", + "resource.test_resource.resource", + "data.test_resource.resource", + }, + }, + "count": { + expectedFailures: []string{ + "count.index", + }, + diagnostic: "You cannot expect failures from count.index. You can only expect failures from checkable objects such as input variables, output values, check blocks, managed resources and data sources.", + }, + "foreach": { + expectedFailures: []string{ + "each.key", + }, + diagnostic: "You cannot expect failures from each.key. You can only expect failures from checkable objects such as input variables, output values, check blocks, managed resources and data sources.", + }, + "local": { + expectedFailures: []string{ + "local.value", + }, + diagnostic: "You cannot expect failures from local.value. You can only expect failures from checkable objects such as input variables, output values, check blocks, managed resources and data sources.", + }, + "module": { + expectedFailures: []string{ + "module.my_module", + }, + diagnostic: "You cannot expect failures from module.my_module. You can only expect failures from checkable objects such as input variables, output values, check blocks, managed resources and data sources.", + }, + "path": { + expectedFailures: []string{ + "path.walk", + }, + diagnostic: "You cannot expect failures from path.walk. You can only expect failures from checkable objects such as input variables, output values, check blocks, managed resources and data sources.", + }, + } + for name, tc := range tcs { + t.Run(name, func(t *testing.T) { + run := &TestRun{} + for _, addr := range tc.expectedFailures { + run.ExpectFailures = append(run.ExpectFailures, parseTraversal(t, addr)) + } + + diags := run.Validate() + + if len(diags) > 1 { + t.Fatalf("too many diags: %d", len(diags)) + } + + if len(tc.diagnostic) == 0 { + if len(diags) != 0 { + t.Fatalf("expected no diags but got: %s", diags[0].Description().Detail) + } + + return + } + + if diff := cmp.Diff(tc.diagnostic, diags[0].Description().Detail); len(diff) > 0 { + t.Fatalf("unexpected diff:\n%s", diff) + } + }) + } +} + +func parseTraversal(t *testing.T, addr string) hcl.Traversal { + t.Helper() + + traversal, diags := hclsyntax.ParseTraversalAbs([]byte(addr), "", hcl.InitialPos) + if diags.HasErrors() { + t.Fatalf("invalid address: %s", diags.Error()) + } + return traversal +} diff --git a/pkg/configs/testdata/config-build/child_a/child_a.tf b/pkg/configs/testdata/config-build/child_a/child_a.tf new file mode 100644 index 00000000000..d3eff0b4998 --- /dev/null +++ b/pkg/configs/testdata/config-build/child_a/child_a.tf @@ -0,0 +1,7 @@ + +module "child_c" { + # In the unit test where this fixture is used, we treat the source strings + # as relative paths from the fixture directory rather than as source + # addresses as we would in a real module walker. + source = "./child_c" +} diff --git a/pkg/configs/testdata/config-build/child_b/child_b.tf b/pkg/configs/testdata/config-build/child_b/child_b.tf new file mode 100644 index 00000000000..d3eff0b4998 --- /dev/null +++ b/pkg/configs/testdata/config-build/child_b/child_b.tf @@ -0,0 +1,7 @@ + +module "child_c" { + # In the unit test where this fixture is used, we treat the source strings + # as relative paths from the fixture directory rather than as source + # addresses as we would in a real module walker. + source = "./child_c" +} diff --git a/pkg/configs/testdata/config-build/child_c/child_c.tf b/pkg/configs/testdata/config-build/child_c/child_c.tf new file mode 100644 index 00000000000..16020207c83 --- /dev/null +++ b/pkg/configs/testdata/config-build/child_c/child_c.tf @@ -0,0 +1,3 @@ +output "hello" { + value = "hello" +} diff --git a/pkg/configs/testdata/config-build/root.tf b/pkg/configs/testdata/config-build/root.tf new file mode 100644 index 00000000000..5e23e40a288 --- /dev/null +++ b/pkg/configs/testdata/config-build/root.tf @@ -0,0 +1,9 @@ + +module "child_a" { + source = "./child_a" +} + +module "child_b" { + source = "./child_b" +} + diff --git a/pkg/configs/testdata/config-diagnostics/empty-configs/main.tf b/pkg/configs/testdata/config-diagnostics/empty-configs/main.tf new file mode 100644 index 00000000000..c0edba27533 --- /dev/null +++ b/pkg/configs/testdata/config-diagnostics/empty-configs/main.tf @@ -0,0 +1,20 @@ +terraform { + required_providers { + foo = { + source = "hashicorp/foo" + } + baz = { + source = "hashicorp/baz" + } + } +} + +module "mod" { + source = "./mod" + providers = { + foo = foo + foo.bar = foo + baz = baz + baz.bing = baz + } +} diff --git a/pkg/configs/testdata/config-diagnostics/empty-configs/mod/main.tf b/pkg/configs/testdata/config-diagnostics/empty-configs/mod/main.tf new file mode 100644 index 00000000000..50995ca0bd3 --- /dev/null +++ b/pkg/configs/testdata/config-diagnostics/empty-configs/mod/main.tf @@ -0,0 +1,22 @@ +terraform { + required_providers { + foo = { + source = "hashicorp/foo" + configuration_aliases = [ foo.bar ] + } + } +} + +provider "foo" { +} + +provider "foo" { + alias = "bar" +} + +provider "baz" { +} + +provider "baz" { + alias = "bing" +} diff --git a/pkg/configs/testdata/config-diagnostics/empty-configs/warnings b/pkg/configs/testdata/config-diagnostics/empty-configs/warnings new file mode 100644 index 00000000000..b45f61e60f9 --- /dev/null +++ b/pkg/configs/testdata/config-diagnostics/empty-configs/warnings @@ -0,0 +1,4 @@ +empty-configs/mod/main.tf:10,1-15: Redundant empty provider block; Earlier versions of OpenTofu used empty provider blocks ("proxy provider configurations") for child modules to declare their need to be passed a provider configuration by their callers. That approach was ambiguous and is now deprecated. +If you control this module, you can migrate to the new declaration syntax by removing all of the empty provider "foo" blocks and then adding or updating an entry like the following to the required_providers block of module.mod: +empty-configs/mod/main.tf:17,1-15: Redundant empty provider block; Earlier versions of OpenTofu used empty provider blocks ("proxy provider configurations") for child modules to declare their need to be passed a provider configuration by their callers. That approach was ambiguous and is now deprecated. +If you control this module, you can migrate to the new declaration syntax by removing all of the empty provider "baz" blocks and then adding or updating an entry like the following to the required_providers block of module.mod: diff --git a/pkg/configs/testdata/config-diagnostics/import-in-child-module/child/main.tf b/pkg/configs/testdata/config-diagnostics/import-in-child-module/child/main.tf new file mode 100644 index 00000000000..bb8cb139d1d --- /dev/null +++ b/pkg/configs/testdata/config-diagnostics/import-in-child-module/child/main.tf @@ -0,0 +1,6 @@ +resource "aws_instance" "web" {} + +import { + to = aws_instance.web + id = "test" +} \ No newline at end of file diff --git a/pkg/configs/testdata/config-diagnostics/import-in-child-module/errors b/pkg/configs/testdata/config-diagnostics/import-in-child-module/errors new file mode 100644 index 00000000000..b0a5ac4fc1c --- /dev/null +++ b/pkg/configs/testdata/config-diagnostics/import-in-child-module/errors @@ -0,0 +1 @@ +import-in-child-module/child/main.tf:3,1-7: Invalid import configuration; An import block was detected in "module.child". Import blocks are only allowed in the root module. \ No newline at end of file diff --git a/pkg/configs/testdata/config-diagnostics/import-in-child-module/root.tf b/pkg/configs/testdata/config-diagnostics/import-in-child-module/root.tf new file mode 100644 index 00000000000..3133e57b930 --- /dev/null +++ b/pkg/configs/testdata/config-diagnostics/import-in-child-module/root.tf @@ -0,0 +1,10 @@ +resource "aws_instance" "web" {} + +import { + to = aws_instance.web + id = "test" +} + +module "child" { + source = "./child" +} \ No newline at end of file diff --git a/pkg/configs/testdata/config-diagnostics/incorrect-type/errors b/pkg/configs/testdata/config-diagnostics/incorrect-type/errors new file mode 100644 index 00000000000..97a31fe526e --- /dev/null +++ b/pkg/configs/testdata/config-diagnostics/incorrect-type/errors @@ -0,0 +1 @@ +incorrect-type/main.tf:15,11-14: Provider type mismatch; The local name "foo" in the root module represents provider "hashicorp/foo", but "foo" in module.mod represents "example.com/vendor/foo". diff --git a/pkg/configs/testdata/config-diagnostics/incorrect-type/main.tf b/pkg/configs/testdata/config-diagnostics/incorrect-type/main.tf new file mode 100644 index 00000000000..074cc8422de --- /dev/null +++ b/pkg/configs/testdata/config-diagnostics/incorrect-type/main.tf @@ -0,0 +1,18 @@ +terraform { + required_providers { + foo = { + source = "hashicorp/foo" + } + baz = { + source = "hashicorp/baz" + } + } +} + +module "mod" { + source = "./mod" + providers = { + foo = foo + baz = baz + } +} diff --git a/pkg/configs/testdata/config-diagnostics/incorrect-type/mod/main.tf b/pkg/configs/testdata/config-diagnostics/incorrect-type/mod/main.tf new file mode 100644 index 00000000000..14c3239e928 --- /dev/null +++ b/pkg/configs/testdata/config-diagnostics/incorrect-type/mod/main.tf @@ -0,0 +1,14 @@ +terraform { + required_providers { + foo = { + source = "example.com/vendor/foo" + } + } +} + +resource "foo_resource" "a" { +} + +// implied default provider baz +resource "baz_resource" "a" { +} diff --git a/pkg/configs/testdata/config-diagnostics/incorrect-type/warnings b/pkg/configs/testdata/config-diagnostics/incorrect-type/warnings new file mode 100644 index 00000000000..040ad4f4dd5 --- /dev/null +++ b/pkg/configs/testdata/config-diagnostics/incorrect-type/warnings @@ -0,0 +1 @@ +incorrect-type/main.tf:16,5-8: Reference to undefined provider; There is no explicit declaration for local provider name "baz" in module.mod, so OpenTofu is assuming you mean to pass a configuration for "hashicorp/baz". diff --git a/pkg/configs/testdata/config-diagnostics/invalid-provider/errors b/pkg/configs/testdata/config-diagnostics/invalid-provider/errors new file mode 100644 index 00000000000..359d47600b2 --- /dev/null +++ b/pkg/configs/testdata/config-diagnostics/invalid-provider/errors @@ -0,0 +1 @@ +main.tf:1,1-20: Invalid provider local name; crash_es is an invalid provider local name diff --git a/pkg/configs/testdata/config-diagnostics/invalid-provider/main.tf b/pkg/configs/testdata/config-diagnostics/invalid-provider/main.tf new file mode 100644 index 00000000000..ba846846994 --- /dev/null +++ b/pkg/configs/testdata/config-diagnostics/invalid-provider/main.tf @@ -0,0 +1,3 @@ +module "mod" { + source = "./mod" +} diff --git a/pkg/configs/testdata/config-diagnostics/invalid-provider/mod/main.tf b/pkg/configs/testdata/config-diagnostics/invalid-provider/mod/main.tf new file mode 100644 index 00000000000..f50ced1fe32 --- /dev/null +++ b/pkg/configs/testdata/config-diagnostics/invalid-provider/mod/main.tf @@ -0,0 +1,2 @@ +provider "crash_es" { +} diff --git a/pkg/configs/testdata/config-diagnostics/nested-provider/child/child2/main.tf b/pkg/configs/testdata/config-diagnostics/nested-provider/child/child2/main.tf new file mode 100644 index 00000000000..f2695a66111 --- /dev/null +++ b/pkg/configs/testdata/config-diagnostics/nested-provider/child/child2/main.tf @@ -0,0 +1,7 @@ +provider "aws" { + value = "foo" +} + +output "my_output" { + value = "my output" +} diff --git a/pkg/configs/testdata/config-diagnostics/nested-provider/child/main.tf b/pkg/configs/testdata/config-diagnostics/nested-provider/child/main.tf new file mode 100644 index 00000000000..9a725a52097 --- /dev/null +++ b/pkg/configs/testdata/config-diagnostics/nested-provider/child/main.tf @@ -0,0 +1,4 @@ +module "child2" { + // the test fixture treats these sources as relative to the root + source = "./child/child2" +} diff --git a/pkg/configs/testdata/config-diagnostics/nested-provider/errors b/pkg/configs/testdata/config-diagnostics/nested-provider/errors new file mode 100644 index 00000000000..df929d81f7c --- /dev/null +++ b/pkg/configs/testdata/config-diagnostics/nested-provider/errors @@ -0,0 +1 @@ +nested-provider/root.tf:2,11-12: Module is incompatible with count, for_each, and depends_on; The module at module.child.module.child2 is a legacy module which contains its own local provider configurations, and so calls to it may not use the count, for_each, or depends_on arguments. diff --git a/pkg/configs/testdata/config-diagnostics/nested-provider/root.tf b/pkg/configs/testdata/config-diagnostics/nested-provider/root.tf new file mode 100644 index 00000000000..71b90f6d674 --- /dev/null +++ b/pkg/configs/testdata/config-diagnostics/nested-provider/root.tf @@ -0,0 +1,4 @@ +module "child" { + count = 1 + source = "./child" +} diff --git a/pkg/configs/testdata/config-diagnostics/override-provider/errors b/pkg/configs/testdata/config-diagnostics/override-provider/errors new file mode 100644 index 00000000000..6529dd95970 --- /dev/null +++ b/pkg/configs/testdata/config-diagnostics/override-provider/errors @@ -0,0 +1 @@ +override-provider/main.tf:17,5-8: Cannot override provider configuration; The configuration of module.mod has its own local configuration for bar, and so it cannot accept an overridden configuration provided by the root module. diff --git a/pkg/configs/testdata/config-diagnostics/override-provider/main.tf b/pkg/configs/testdata/config-diagnostics/override-provider/main.tf new file mode 100644 index 00000000000..30feec1c97b --- /dev/null +++ b/pkg/configs/testdata/config-diagnostics/override-provider/main.tf @@ -0,0 +1,19 @@ +terraform { + required_providers { + bar = { + version = "~>1.0.0" + } + } +} + +provider "bar" { + value = "not ok" +} + +// this module configures its own provider, which cannot be overridden +module "mod" { + source = "./mod" + providers = { + bar = bar + } +} diff --git a/pkg/configs/testdata/config-diagnostics/override-provider/mod/main.tf b/pkg/configs/testdata/config-diagnostics/override-provider/mod/main.tf new file mode 100644 index 00000000000..c0b6169710f --- /dev/null +++ b/pkg/configs/testdata/config-diagnostics/override-provider/mod/main.tf @@ -0,0 +1,12 @@ +terraform { + required_providers { + bar = { + version = "~>1.0.0" + } + } +} + +// this configuration cannot be overridden from an outside module +provider "bar" { + value = "ok" +} diff --git a/pkg/configs/testdata/config-diagnostics/pass-inherited-provider/main.tf b/pkg/configs/testdata/config-diagnostics/pass-inherited-provider/main.tf new file mode 100644 index 00000000000..aa4b7e9f7ac --- /dev/null +++ b/pkg/configs/testdata/config-diagnostics/pass-inherited-provider/main.tf @@ -0,0 +1,23 @@ +provider "test" { + value = "ok" +} + +module "mod" { + source = "./mod" +} + +# FIXME: This test is for an awkward interaction that we've preserved for +# compatibility with what was arguably a bug in earlier versions: if a +# child module tries to use an inherited provider configuration explicitly by +# name then Terraform would historically use the wrong provider configuration. +# +# Since we weren't able to address that bug without breaking backward +# compatibility, instead we emit a warning to prompt the author to be explicit, +# passing in the configuration they intend to use. +# +# This case is particularly awkward because a change in the child module +# (previously referring to a provider only implicitly, but now naming it +# explicitly) can cause a required change in _this_ module (the caller), +# even though the author of the child module would've seen no explicit warning +# that they were making a breaking change. Hopefully we can improve on this +# in a future language edition. diff --git a/pkg/configs/testdata/config-diagnostics/pass-inherited-provider/mod/main.tf b/pkg/configs/testdata/config-diagnostics/pass-inherited-provider/mod/main.tf new file mode 100644 index 00000000000..e0d142d5530 --- /dev/null +++ b/pkg/configs/testdata/config-diagnostics/pass-inherited-provider/mod/main.tf @@ -0,0 +1,17 @@ +terraform { + required_providers { + test = { + source = "hashicorp/test" + } + } +} + +module "mod2" { + source = "./mod2" + + // the test provider is named here, but a config must be supplied from the + // parent module. + providers = { + test.foo = test + } +} diff --git a/pkg/configs/testdata/config-diagnostics/pass-inherited-provider/mod2/main.tf b/pkg/configs/testdata/config-diagnostics/pass-inherited-provider/mod2/main.tf new file mode 100644 index 00000000000..0b7361691db --- /dev/null +++ b/pkg/configs/testdata/config-diagnostics/pass-inherited-provider/mod2/main.tf @@ -0,0 +1,12 @@ +terraform { + required_providers { + test = { + source = "hashicorp/test" + configuration_aliases = [test.foo] + } + } +} + +resource "test_resource" "foo" { + provider = test.foo +} diff --git a/pkg/configs/testdata/config-diagnostics/pass-inherited-provider/warnings b/pkg/configs/testdata/config-diagnostics/pass-inherited-provider/warnings new file mode 100644 index 00000000000..692e14ecf6d --- /dev/null +++ b/pkg/configs/testdata/config-diagnostics/pass-inherited-provider/warnings @@ -0,0 +1 @@ +pass-inherited-provider/main.tf:5,1-13: Missing required provider configuration; The configuration for module.mod expects to inherit a configuration for provider hashicorp/test with local name "test", but the root module doesn't pass a configuration under that name. diff --git a/pkg/configs/testdata/config-diagnostics/required-alias/errors b/pkg/configs/testdata/config-diagnostics/required-alias/errors new file mode 100644 index 00000000000..b010244325b --- /dev/null +++ b/pkg/configs/testdata/config-diagnostics/required-alias/errors @@ -0,0 +1 @@ +required-alias/main.tf:1,1-13: Missing required provider configuration; The child module requires an additional configuration for provider hashicorp/foo, with the local name "foo.bar". diff --git a/pkg/configs/testdata/config-diagnostics/required-alias/main.tf b/pkg/configs/testdata/config-diagnostics/required-alias/main.tf new file mode 100644 index 00000000000..c2cfe60b870 --- /dev/null +++ b/pkg/configs/testdata/config-diagnostics/required-alias/main.tf @@ -0,0 +1,4 @@ +module "mod" { + source = "./mod" + // missing providers with foo.bar provider config +} diff --git a/pkg/configs/testdata/config-diagnostics/required-alias/mod/main.tf b/pkg/configs/testdata/config-diagnostics/required-alias/mod/main.tf new file mode 100644 index 00000000000..0f2a52168c1 --- /dev/null +++ b/pkg/configs/testdata/config-diagnostics/required-alias/mod/main.tf @@ -0,0 +1,13 @@ +terraform { + required_providers { + foo = { + source = "hashicorp/foo" + version = "1.0.0" + configuration_aliases = [ foo.bar ] + } + } +} + +resource "foo_resource" "a" { + provider = foo.bar +} diff --git a/pkg/configs/testdata/config-diagnostics/tests-provider-mismatch-with-module/errors b/pkg/configs/testdata/config-diagnostics/tests-provider-mismatch-with-module/errors new file mode 100644 index 00000000000..d860e2e4652 --- /dev/null +++ b/pkg/configs/testdata/config-diagnostics/tests-provider-mismatch-with-module/errors @@ -0,0 +1,3 @@ +testdata/config-diagnostics/tests-provider-mismatch-with-module/main.tftest.hcl:2,1-15: Provider type mismatch; The provider "foo" in main.tftest.hcl represents provider "registry.opentofu.org/hashicorp/bar", but "foo" in the root module represents "registry.opentofu.org/hashicorp/foo".\n\nThis means the provider definition for "foo" within main.tftest.hcl, or other provider definitions with the same name, have been referenced by multiple run blocks and assigned to different provider types. +testdata/config-diagnostics/tests-provider-mismatch-with-module/main.tftest.hcl:4,1-15: Provider type mismatch; The provider "foo.bar" in main.tftest.hcl represents provider "registry.opentofu.org/hashicorp/bar", but "foo.bar" in the root module represents "registry.opentofu.org/hashicorp/foo".\n\nThis means the provider definition for "foo.bar" within main.tftest.hcl, or other provider definitions with the same name, have been referenced by multiple run blocks and assigned to different provider types. +testdata/config-diagnostics/tests-provider-mismatch-with-module/main.tftest.hcl:8,1-15: Provider type mismatch; The provider "bar" in main.tftest.hcl represents provider "registry.opentofu.org/hashicorp/foo", but "bar" in the root module represents "registry.opentofu.org/hashicorp/bar".\n\nThis means the provider definition for "bar" within main.tftest.hcl has been referenced by multiple run blocks and assigned to different provider types. \ No newline at end of file diff --git a/pkg/configs/testdata/config-diagnostics/tests-provider-mismatch-with-module/main.tf b/pkg/configs/testdata/config-diagnostics/tests-provider-mismatch-with-module/main.tf new file mode 100644 index 00000000000..3b15ce42c76 --- /dev/null +++ b/pkg/configs/testdata/config-diagnostics/tests-provider-mismatch-with-module/main.tf @@ -0,0 +1,14 @@ +terraform { + required_providers { + foo = { + source = "hashicorp/foo" + configuration_aliases = [foo.bar] + } + } +} + +provider "bar" {} + +resource "foo_resource" "resource" {} + +resource "bar_resource" "resource" {} diff --git a/pkg/configs/testdata/config-diagnostics/tests-provider-mismatch-with-module/main.tftest.hcl b/pkg/configs/testdata/config-diagnostics/tests-provider-mismatch-with-module/main.tftest.hcl new file mode 100644 index 00000000000..4e93da6dfdd --- /dev/null +++ b/pkg/configs/testdata/config-diagnostics/tests-provider-mismatch-with-module/main.tftest.hcl @@ -0,0 +1,18 @@ + +provider "foo" {} + +provider "foo" { + alias = "bar" +} + +provider "bar" {} + +run "setup_module" { + + module { + source = "./setup" + } + +} + +run "main_module" {} diff --git a/pkg/configs/testdata/config-diagnostics/tests-provider-mismatch-with-module/setup/main.tf b/pkg/configs/testdata/config-diagnostics/tests-provider-mismatch-with-module/setup/main.tf new file mode 100644 index 00000000000..beb7059325b --- /dev/null +++ b/pkg/configs/testdata/config-diagnostics/tests-provider-mismatch-with-module/setup/main.tf @@ -0,0 +1,15 @@ +terraform { + required_providers { + foo = { + source = "hashicorp/bar" + configuration_aliases = [ foo.bar ] + } + bar = { + source = "hashicorp/foo" + } + } +} + +resource "foo_resource" "resource" {} + +resource "bar_resource" "resource" {} diff --git a/pkg/configs/testdata/config-diagnostics/tests-provider-mismatch/errors b/pkg/configs/testdata/config-diagnostics/tests-provider-mismatch/errors new file mode 100644 index 00000000000..c96e5ded821 --- /dev/null +++ b/pkg/configs/testdata/config-diagnostics/tests-provider-mismatch/errors @@ -0,0 +1 @@ +testdata/config-diagnostics/tests-provider-mismatch/main.tftest.hcl:27,11-14: Provider type mismatch; The local name "bar" in main.tftest.hcl represents provider "registry.opentofu.org/hashicorp/bar", but "foo" in the root module represents "registry.opentofu.org/hashicorp/foo".\n\nThis means the provider definition for "bar" within main.tftest.hcl, or other provider definitions with the same name, have been referenced by multiple run blocks and assigned to different provider types. \ No newline at end of file diff --git a/pkg/configs/testdata/config-diagnostics/tests-provider-mismatch/main.tf b/pkg/configs/testdata/config-diagnostics/tests-provider-mismatch/main.tf new file mode 100644 index 00000000000..3b15ce42c76 --- /dev/null +++ b/pkg/configs/testdata/config-diagnostics/tests-provider-mismatch/main.tf @@ -0,0 +1,14 @@ +terraform { + required_providers { + foo = { + source = "hashicorp/foo" + configuration_aliases = [foo.bar] + } + } +} + +provider "bar" {} + +resource "foo_resource" "resource" {} + +resource "bar_resource" "resource" {} diff --git a/pkg/configs/testdata/config-diagnostics/tests-provider-mismatch/main.tftest.hcl b/pkg/configs/testdata/config-diagnostics/tests-provider-mismatch/main.tftest.hcl new file mode 100644 index 00000000000..d0c10347953 --- /dev/null +++ b/pkg/configs/testdata/config-diagnostics/tests-provider-mismatch/main.tftest.hcl @@ -0,0 +1,32 @@ + +provider "foo" {} + +provider "foo" { + alias = "bar" +} + +provider "bar" { + alias = "foo" +} + +run "default_should_be_fine" {} + +run "bit_complicated_still_okay"{ + + providers = { + foo = foo + foo.bar = foo.bar + bar = bar.foo + } + +} + +run "mismatched_foo_direct" { + + providers = { + foo = bar // bad! + foo.bar = foo.bar + bar = bar.foo + } + +} diff --git a/pkg/configs/testdata/config-diagnostics/unexpected-provider/main.tf b/pkg/configs/testdata/config-diagnostics/unexpected-provider/main.tf new file mode 100644 index 00000000000..cd859a7268f --- /dev/null +++ b/pkg/configs/testdata/config-diagnostics/unexpected-provider/main.tf @@ -0,0 +1,15 @@ +terraform { + required_providers { + foo = { + source = "hashicorp/foo" + version = "1.0.0" + } + } +} + +module "mod" { + source = "./mod" + providers = { + foo = foo + } +} diff --git a/pkg/configs/testdata/config-diagnostics/unexpected-provider/mod/main.tf b/pkg/configs/testdata/config-diagnostics/unexpected-provider/mod/main.tf new file mode 100644 index 00000000000..f69bfa813bb --- /dev/null +++ b/pkg/configs/testdata/config-diagnostics/unexpected-provider/mod/main.tf @@ -0,0 +1,2 @@ +resource "foo_resource" "a" { +} diff --git a/pkg/configs/testdata/config-diagnostics/unexpected-provider/warnings b/pkg/configs/testdata/config-diagnostics/unexpected-provider/warnings new file mode 100644 index 00000000000..515fb86c943 --- /dev/null +++ b/pkg/configs/testdata/config-diagnostics/unexpected-provider/warnings @@ -0,0 +1 @@ +unexpected-provider/main.tf:13,5-8: Reference to undefined provider; There is no explicit declaration for local provider name "foo" in module.mod, so OpenTofu is assuming you mean to pass a configuration for "hashicorp/foo". diff --git a/pkg/configs/testdata/config-diagnostics/unknown-root-provider/main.tf b/pkg/configs/testdata/config-diagnostics/unknown-root-provider/main.tf new file mode 100644 index 00000000000..cf60f915ae7 --- /dev/null +++ b/pkg/configs/testdata/config-diagnostics/unknown-root-provider/main.tf @@ -0,0 +1,7 @@ +module "mod" { + source = "./mod" + providers = { + // bar may be required by the module, but the name is not defined here + bar = bar + } +} diff --git a/pkg/configs/testdata/config-diagnostics/unknown-root-provider/mod/main.tf b/pkg/configs/testdata/config-diagnostics/unknown-root-provider/mod/main.tf new file mode 100644 index 00000000000..ea8d0321d67 --- /dev/null +++ b/pkg/configs/testdata/config-diagnostics/unknown-root-provider/mod/main.tf @@ -0,0 +1,10 @@ +terraform { + required_providers { + bar = { + version = "~>1.0.0" + } + } +} + +resource "bar_resource" "x" { +} diff --git a/pkg/configs/testdata/config-diagnostics/unknown-root-provider/warnings b/pkg/configs/testdata/config-diagnostics/unknown-root-provider/warnings new file mode 100644 index 00000000000..a076a5d7e05 --- /dev/null +++ b/pkg/configs/testdata/config-diagnostics/unknown-root-provider/warnings @@ -0,0 +1 @@ +unknown-root-provider/main.tf:5,11-14: Reference to undefined provider; There is no explicit declaration for local provider name "bar" in the root module, so OpenTofu is assuming you mean to pass a configuration for provider "hashicorp/bar". diff --git a/pkg/configs/testdata/config-diagnostics/with-depends-on/main.tf b/pkg/configs/testdata/config-diagnostics/with-depends-on/main.tf new file mode 100644 index 00000000000..49c2dcd6e76 --- /dev/null +++ b/pkg/configs/testdata/config-diagnostics/with-depends-on/main.tf @@ -0,0 +1,14 @@ +terraform { + required_providers { + foo = { + source = "hashicorp/foo" + } + } +} + +module "mod2" { + source = "./mod1" + providers = { + foo = foo + } +} diff --git a/pkg/configs/testdata/config-diagnostics/with-depends-on/mod1/main.tf b/pkg/configs/testdata/config-diagnostics/with-depends-on/mod1/main.tf new file mode 100644 index 00000000000..c318484b5b3 --- /dev/null +++ b/pkg/configs/testdata/config-diagnostics/with-depends-on/mod1/main.tf @@ -0,0 +1,19 @@ +terraform { + required_providers { + foo = { + source = "hashicorp/foo" + } + } +} + +resource "foo_resource" "a" { +} + +module "mod2" { + depends_on = [foo_resource.a] + // test fixture source is from root + source = "./mod1/mod2" + providers = { + foo = foo + } +} diff --git a/pkg/configs/testdata/config-diagnostics/with-depends-on/mod1/mod2/main.tf b/pkg/configs/testdata/config-diagnostics/with-depends-on/mod1/mod2/main.tf new file mode 100644 index 00000000000..eaa3550bd79 --- /dev/null +++ b/pkg/configs/testdata/config-diagnostics/with-depends-on/mod1/mod2/main.tf @@ -0,0 +1,15 @@ +terraform { + required_providers { + foo = { + source = "hashicorp/foo" + } + } +} + +module "mod3" { + // test fixture source is from root + source = "./mod1/mod2/mod3" + providers = { + foo.bar = foo + } +} diff --git a/pkg/configs/testdata/config-diagnostics/with-depends-on/mod1/mod2/mod3/main.tf b/pkg/configs/testdata/config-diagnostics/with-depends-on/mod1/mod2/mod3/main.tf new file mode 100644 index 00000000000..b1827126d58 --- /dev/null +++ b/pkg/configs/testdata/config-diagnostics/with-depends-on/mod1/mod2/mod3/main.tf @@ -0,0 +1,12 @@ +terraform { + required_providers { + foo = { + source = "hashicorp/foo" + configuration_aliases = [ foo.bar ] + } + } +} + +resource "foo_resource" "a" { + providers = foo.bar +} diff --git a/pkg/configs/testdata/dir-empty/.gitkeep b/pkg/configs/testdata/dir-empty/.gitkeep new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/configs/testdata/duplicate-local-name/main.tf b/pkg/configs/testdata/duplicate-local-name/main.tf new file mode 100644 index 00000000000..9bac5451cdf --- /dev/null +++ b/pkg/configs/testdata/duplicate-local-name/main.tf @@ -0,0 +1,23 @@ +terraform { + required_providers { + test = { + source = "hashicorp/test" + } + dupe = { + source = "hashicorp/test" + } + other = { + source = "hashicorp/default" + } + + wrong-name = { + source = "hashicorp/foo" + } + } +} + +provider "default" { +} + +resource "foo_resource" { +} diff --git a/pkg/configs/testdata/error-files/destroy-provisioners.tf b/pkg/configs/testdata/error-files/destroy-provisioners.tf new file mode 100644 index 00000000000..d4e34dae9d4 --- /dev/null +++ b/pkg/configs/testdata/error-files/destroy-provisioners.tf @@ -0,0 +1,46 @@ +locals { + user = "name" +} + +resource "null_resource" "a" { + connection { + host = self.hostname + user = local.user # ERROR: Invalid reference from destroy provisioner + } + + provisioner "remote-exec" { + when = destroy + on_failure = continue + index = count.index + key = each.key + + // path and terraform values are static, and do not create any + // dependencies. + dir = path.module + workspace = terraform.workspace + } +} + +resource "null_resource" "b" { + connection { + host = self.hostname + # this is OK since there is no destroy provisioner + user = local.user + } + + provisioner "remote-exec" { + } +} + +resource "null_resource" "b" { + provisioner "remote-exec" { + when = destroy + on_failure = fail + connection { + host = self.hostname + user = local.user # ERROR: Invalid reference from destroy provisioner + } + + command = "echo ${local.name}" # ERROR: Invalid reference from destroy provisioner + } +} diff --git a/pkg/configs/testdata/error-files/duplicate-block-in-provisioner.tf b/pkg/configs/testdata/error-files/duplicate-block-in-provisioner.tf new file mode 100644 index 00000000000..70526eb30e8 --- /dev/null +++ b/pkg/configs/testdata/error-files/duplicate-block-in-provisioner.tf @@ -0,0 +1,9 @@ +resource "null_resource" "a" { + provisioner "local-exec" { + connection {} + connection {} # ERROR: Duplicate connection block + + _ {} + _ {} # ERROR: Duplicate escaping block + } +} \ No newline at end of file diff --git a/pkg/configs/testdata/error-files/ignore_changes.tf b/pkg/configs/testdata/error-files/ignore_changes.tf new file mode 100644 index 00000000000..061209c198d --- /dev/null +++ b/pkg/configs/testdata/error-files/ignore_changes.tf @@ -0,0 +1,5 @@ +resource "null_resource" "all" { + lifecycle { + ignore_changes = ["*"] # ERROR: Invalid ignore_changes wildcard + } +} diff --git a/pkg/configs/testdata/error-files/invalid-keyword-in-provisioner.tf b/pkg/configs/testdata/error-files/invalid-keyword-in-provisioner.tf new file mode 100644 index 00000000000..2fffcc1e9ef --- /dev/null +++ b/pkg/configs/testdata/error-files/invalid-keyword-in-provisioner.tf @@ -0,0 +1,7 @@ +resource "null_resource" "a" { + provisioner "local-exec" { + when = invalid # ERROR: Invalid "when" keyword + on_failure = invalid # ERROR: Invalid "on_failure" keyword + lifecycle {} # ERROR: Reserved block type name in provisioner block + } +} \ No newline at end of file diff --git a/pkg/configs/testdata/error-files/invalid_language_edition.tf b/pkg/configs/testdata/error-files/invalid_language_edition.tf new file mode 100644 index 00000000000..255aefe0a21 --- /dev/null +++ b/pkg/configs/testdata/error-files/invalid_language_edition.tf @@ -0,0 +1,4 @@ +terraform { + # The language argument expects a bare keyword, not a string. + language = "TF2021" # ERROR: Invalid language edition +} diff --git a/pkg/configs/testdata/error-files/module-invalid-registry-source-with-module.tf b/pkg/configs/testdata/error-files/module-invalid-registry-source-with-module.tf new file mode 100644 index 00000000000..0029be8f4af --- /dev/null +++ b/pkg/configs/testdata/error-files/module-invalid-registry-source-with-module.tf @@ -0,0 +1,5 @@ + +module "test" { + source = "---.com/HashiCorp/Consul/aws" # ERROR: Invalid registry module source address + version = "1.0.0" # Makes Terraform assume "source" is a module address +} diff --git a/pkg/configs/testdata/error-files/module-local-source-with-version.tf b/pkg/configs/testdata/error-files/module-local-source-with-version.tf new file mode 100644 index 00000000000..f570d65fe97 --- /dev/null +++ b/pkg/configs/testdata/error-files/module-local-source-with-version.tf @@ -0,0 +1,5 @@ + +module "test" { + source = "../boop" # ERROR: Invalid registry module source address + version = "1.0.0" # Makes Terraform assume "source" is a module address +} diff --git a/pkg/configs/testdata/error-files/precondition-postcondition-constant.tf b/pkg/configs/testdata/error-files/precondition-postcondition-constant.tf new file mode 100644 index 00000000000..30f44313e37 --- /dev/null +++ b/pkg/configs/testdata/error-files/precondition-postcondition-constant.tf @@ -0,0 +1,34 @@ +resource "test" "test" { + lifecycle { + precondition { + condition = true # ERROR: Invalid precondition expression + error_message = "Must be true." + } + postcondition { + condition = true # ERROR: Invalid postcondition expression + error_message = "Must be true." + } + } +} + +data "test" "test" { + lifecycle { + precondition { + condition = true # ERROR: Invalid precondition expression + error_message = "Must be true." + } + postcondition { + condition = true # ERROR: Invalid postcondition expression + error_message = "Must be true." + } + } +} + +output "test" { + value = "" + + precondition { + condition = true # ERROR: Invalid precondition expression + error_message = "Must be true." + } +} diff --git a/pkg/configs/testdata/error-files/precondition-postcondition-selfref.tf b/pkg/configs/testdata/error-files/precondition-postcondition-selfref.tf new file mode 100644 index 00000000000..5f295c1c986 --- /dev/null +++ b/pkg/configs/testdata/error-files/precondition-postcondition-selfref.tf @@ -0,0 +1,55 @@ +resource "test" "test" { + lifecycle { + precondition { + condition = test.test.foo # ERROR: Invalid reference in precondition + error_message = "Cannot refer to self." + } + postcondition { + condition = test.test.foo # ERROR: Invalid reference in postcondition + error_message = "Cannot refer to self." + } + } +} + +data "test" "test" { + lifecycle { + precondition { + condition = data.test.test.foo # ERROR: Invalid reference in precondition + error_message = "Cannot refer to self." + } + postcondition { + condition = data.test.test.foo # ERROR: Invalid reference in postcondition + error_message = "Cannot refer to self." + } + } +} + +resource "test" "test_counted" { + count = 1 + + lifecycle { + precondition { + condition = test.test_counted[0].foo # ERROR: Invalid reference in precondition + error_message = "Cannot refer to self." + } + postcondition { + condition = test.test_counted[0].foo # ERROR: Invalid reference in postcondition + error_message = "Cannot refer to self." + } + } +} + +data "test" "test_counted" { + count = 1 + + lifecycle { + precondition { + condition = data.test.test_counted[0].foo # ERROR: Invalid reference in precondition + error_message = "Cannot refer to self." + } + postcondition { + condition = data.test.test_counted[0].foo # ERROR: Invalid reference in postcondition + error_message = "Cannot refer to self." + } + } +} diff --git a/pkg/configs/testdata/error-files/provider-source-prefix.tf b/pkg/configs/testdata/error-files/provider-source-prefix.tf new file mode 100644 index 00000000000..96811699fc3 --- /dev/null +++ b/pkg/configs/testdata/error-files/provider-source-prefix.tf @@ -0,0 +1,10 @@ +terraform { + required_providers { + usererror = { + source = "foo/terraform-provider-foo" # ERROR: Invalid provider type + } + badname = { + source = "foo/terraform-foo" # ERROR: Invalid provider type + } + } +} diff --git a/pkg/configs/testdata/error-files/required-providers-toplevel.tf b/pkg/configs/testdata/error-files/required-providers-toplevel.tf new file mode 100644 index 00000000000..de1bf12f57b --- /dev/null +++ b/pkg/configs/testdata/error-files/required-providers-toplevel.tf @@ -0,0 +1,9 @@ +# A top-level required_providers block is not valid, but we have a specialized +# error for it to hint the user to move it into a terraform block. +required_providers { # ERROR: Invalid required_providers block +} + +# This one is valid, and what the user should rewrite the above to be like. +terraform { + required_providers {} +} diff --git a/pkg/configs/testdata/error-files/unsupported_language_edition.tf b/pkg/configs/testdata/error-files/unsupported_language_edition.tf new file mode 100644 index 00000000000..0ee616f734a --- /dev/null +++ b/pkg/configs/testdata/error-files/unsupported_language_edition.tf @@ -0,0 +1,6 @@ +terraform { + # If a future change in this repository happens to make TF2038 a valid + # edition then this will start failing; in that case, change this file to + # select a different edition that isn't supported. + language = TF2038 # ERROR: Unsupported language edition +} diff --git a/pkg/configs/testdata/error-files/variable_type_quoted.tf b/pkg/configs/testdata/error-files/variable_type_quoted.tf new file mode 100644 index 00000000000..2292ce1549e --- /dev/null +++ b/pkg/configs/testdata/error-files/variable_type_quoted.tf @@ -0,0 +1,11 @@ +variable "bad_string" { + type = "string" # ERROR: Invalid quoted type constraints +} + +variable "bad_map" { + type = "map" # ERROR: Invalid quoted type constraints +} + +variable "bad_list" { + type = "list" # ERROR: Invalid quoted type constraints +} diff --git a/pkg/configs/testdata/error-files/vendor_provisioners.tf b/pkg/configs/testdata/error-files/vendor_provisioners.tf new file mode 100644 index 00000000000..4d2ec7892cc --- /dev/null +++ b/pkg/configs/testdata/error-files/vendor_provisioners.tf @@ -0,0 +1,3 @@ +resource "null_resource" "test" { + provisioner "habitat" {} # ERROR: The "habitat" provisioner has been removed +} diff --git a/pkg/configs/testdata/escaping-blocks/data/data-escaping-block.tf b/pkg/configs/testdata/escaping-blocks/data/data-escaping-block.tf new file mode 100644 index 00000000000..f7efa728566 --- /dev/null +++ b/pkg/configs/testdata/escaping-blocks/data/data-escaping-block.tf @@ -0,0 +1,35 @@ + +data "foo" "bar" { + count = 2 + + normal = "yes" + + normal_block {} + + _ { + # This "escaping block" is an escape hatch for when a resource + # type declares argument names that collide with meta-argument + # names. The examples below are not really realistic because they + # are long-standing names that predate the need for escaping, + # but we're using them as a proxy for new meta-arguments we might + # add in future language editions which might collide with + # names defined in pre-existing providers. + + # note that count is set both as a meta-argument above _and_ as + # an resource-type-specific argument here, which is valid and + # should result in both being populated. + count = "not actually count" + + # for_each is only set in here, not as a meta-argument + for_each = "not actually for_each" + + lifecycle { + # This is a literal lifecycle block, not a meta-argument block + } + + _ { + # It would be pretty weird for a resource type to define its own + # "_" block type, but that's valid to escape in here too. + } + } +} diff --git a/pkg/configs/testdata/escaping-blocks/module/child/nothing.tf b/pkg/configs/testdata/escaping-blocks/module/child/nothing.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/configs/testdata/escaping-blocks/module/module-escaping-block.tf b/pkg/configs/testdata/escaping-blocks/module/module-escaping-block.tf new file mode 100644 index 00000000000..8d8a3549f93 --- /dev/null +++ b/pkg/configs/testdata/escaping-blocks/module/module-escaping-block.tf @@ -0,0 +1,36 @@ + +module "foo" { + source = "./child" + count = 2 + + normal = "yes" + + normal_block {} + + _ { + # This "escaping block" is an escape hatch for when a module + # declares input variable names that collide with meta-argument + # names. The examples below are not really realistic because they + # are long-standing names that predate the need for escaping, + # but we're using them as a proxy for new meta-arguments we might + # add in future language editions which might collide with + # names defined in pre-existing modules. + + # note that count is set both as a meta-argument above _and_ as + # an resource-type-specific argument here, which is valid and + # should result in both being populated. + count = "not actually count" + + # for_each is only set in here, not as a meta-argument + for_each = "not actually for_each" + + lifecycle { + # This is a literal lifecycle block, not a meta-argument block + } + + _ { + # It would be pretty weird for a resource type to define its own + # "_" block type, but that's valid to escape in here too. + } + } +} diff --git a/pkg/configs/testdata/escaping-blocks/provider/provider-escaping-block.tf b/pkg/configs/testdata/escaping-blocks/provider/provider-escaping-block.tf new file mode 100644 index 00000000000..65a11b767b3 --- /dev/null +++ b/pkg/configs/testdata/escaping-blocks/provider/provider-escaping-block.tf @@ -0,0 +1,23 @@ + +provider "foo" { + alias = "bar" + + normal = "yes" + + _ { + # This "escaping block" is an escape hatch for when a provider + # declares argument names that collide with meta-argument + # names. The examples below are not really realistic because they + # are long-standing names that predate the need for escaping, + # but we're using them as a proxy for new meta-arguments we might + # add in future language editions which might collide with + # names defined in pre-existing providers. + + # alias is set both as a meta-argument above _and_ + # as a provider-type-specific argument + alias = "not actually alias" + + # version is only set in here, not as a meta-argument + version = "not actually version" + } +} diff --git a/pkg/configs/testdata/escaping-blocks/resource/resource-escaping-block.tf b/pkg/configs/testdata/escaping-blocks/resource/resource-escaping-block.tf new file mode 100644 index 00000000000..4d356d61b65 --- /dev/null +++ b/pkg/configs/testdata/escaping-blocks/resource/resource-escaping-block.tf @@ -0,0 +1,43 @@ + +resource "foo" "bar" { + count = 2 + + normal = "yes" + + normal_block {} + + _ { + # This "escaping block" is an escape hatch for when a resource + # type declares argument names that collide with meta-argument + # names. The examples below are not really realistic because they + # are long-standing names that predate the need for escaping, + # but we're using them as a proxy for new meta-arguments we might + # add in future language editions which might collide with + # names defined in pre-existing providers. + + # note that count is set both as a meta-argument above _and_ as + # an resource-type-specific argument here, which is valid and + # should result in both being populated. + count = "not actually count" + + # for_each is only set in here, not as a meta-argument + for_each = "not actually for_each" + + lifecycle { + # This is a literal lifecycle block, not a meta-argument block + } + + _ { + # It would be pretty weird for a resource type to define its own + # "_" block type, but that's valid to escape in here too. + } + } + + provisioner "boop" { + when = destroy + _ { + when = "hell freezes over" + } + normal = "yep" + } +} diff --git a/pkg/configs/testdata/experiments/concluded/concluded_experiment.tf b/pkg/configs/testdata/experiments/concluded/concluded_experiment.tf new file mode 100644 index 00000000000..33106388170 --- /dev/null +++ b/pkg/configs/testdata/experiments/concluded/concluded_experiment.tf @@ -0,0 +1,3 @@ +terraform { + experiments = [concluded] +} diff --git a/pkg/configs/testdata/experiments/current/current_experiment.tf b/pkg/configs/testdata/experiments/current/current_experiment.tf new file mode 100644 index 00000000000..d21b6609322 --- /dev/null +++ b/pkg/configs/testdata/experiments/current/current_experiment.tf @@ -0,0 +1,3 @@ +terraform { + experiments = [current] +} diff --git a/pkg/configs/testdata/experiments/invalid/invalid_experiments.tf b/pkg/configs/testdata/experiments/invalid/invalid_experiments.tf new file mode 100644 index 00000000000..d3b242b5280 --- /dev/null +++ b/pkg/configs/testdata/experiments/invalid/invalid_experiments.tf @@ -0,0 +1,3 @@ +terraform { + experiments = invalid +} diff --git a/pkg/configs/testdata/experiments/unknown/unknown_experiment.tf b/pkg/configs/testdata/experiments/unknown/unknown_experiment.tf new file mode 100644 index 00000000000..bbe36edf484 --- /dev/null +++ b/pkg/configs/testdata/experiments/unknown/unknown_experiment.tf @@ -0,0 +1,3 @@ +terraform { + experiments = [unknown] +} diff --git a/pkg/configs/testdata/invalid-files/data-count-and-for_each.tf b/pkg/configs/testdata/invalid-files/data-count-and-for_each.tf new file mode 100644 index 00000000000..7cc47632504 --- /dev/null +++ b/pkg/configs/testdata/invalid-files/data-count-and-for_each.tf @@ -0,0 +1,4 @@ +data "test" "foo" { + count = 2 + for_each = ["a"] +} diff --git a/pkg/configs/testdata/invalid-files/data-invalid-provider-reference.tf b/pkg/configs/testdata/invalid-files/data-invalid-provider-reference.tf new file mode 100644 index 00000000000..aef92fd3371 --- /dev/null +++ b/pkg/configs/testdata/invalid-files/data-invalid-provider-reference.tf @@ -0,0 +1,3 @@ +data "test_resource" "t" { + provider = my_test +} diff --git a/pkg/configs/testdata/invalid-files/data-reserved-locals.tf b/pkg/configs/testdata/invalid-files/data-reserved-locals.tf new file mode 100644 index 00000000000..d6558f87ed8 --- /dev/null +++ b/pkg/configs/testdata/invalid-files/data-reserved-locals.tf @@ -0,0 +1,3 @@ +data "test" "foo" { + locals {} +} diff --git a/pkg/configs/testdata/invalid-files/data-resource-lifecycle.tf b/pkg/configs/testdata/invalid-files/data-resource-lifecycle.tf new file mode 100644 index 00000000000..7c1aebe5a0b --- /dev/null +++ b/pkg/configs/testdata/invalid-files/data-resource-lifecycle.tf @@ -0,0 +1,7 @@ +data "example" "example" { + lifecycle { + # The lifecycle arguments are not valid for data resources: + # only the precondition and postcondition blocks are allowed. + ignore_changes = [] + } +} diff --git a/pkg/configs/testdata/invalid-files/everything-is-a-plan.tf b/pkg/configs/testdata/invalid-files/everything-is-a-plan.tf new file mode 100644 index 00000000000..e66766bea60 --- /dev/null +++ b/pkg/configs/testdata/invalid-files/everything-is-a-plan.tf @@ -0,0 +1,11 @@ +# experiments.EverythingIsAPlan exists but is not registered as an active (or +# concluded) experiment, so this should fail until the experiment "gate" is +# removed. +terraform { + experiments = [everything_is_a_plan] +} + +moved { + from = test_instance.foo + to = test_instance.bar +} \ No newline at end of file diff --git a/pkg/configs/testdata/invalid-files/interp-in-data-label.tf b/pkg/configs/testdata/invalid-files/interp-in-data-label.tf new file mode 100644 index 00000000000..e457650ad2a --- /dev/null +++ b/pkg/configs/testdata/invalid-files/interp-in-data-label.tf @@ -0,0 +1,2 @@ +data "null_resource" "foo_${interpolations_invalid_here}" { +} diff --git a/pkg/configs/testdata/invalid-files/interp-in-rsrc-label.tf b/pkg/configs/testdata/invalid-files/interp-in-rsrc-label.tf new file mode 100644 index 00000000000..e7d310bfdaf --- /dev/null +++ b/pkg/configs/testdata/invalid-files/interp-in-rsrc-label.tf @@ -0,0 +1,2 @@ +resource "null_resource" "foo_${interpolations_invalid_here}" { +} diff --git a/pkg/configs/testdata/invalid-files/json-as-native-syntax.tf b/pkg/configs/testdata/invalid-files/json-as-native-syntax.tf new file mode 100644 index 00000000000..2e2809093aa --- /dev/null +++ b/pkg/configs/testdata/invalid-files/json-as-native-syntax.tf @@ -0,0 +1,3 @@ +{ + "terraform": {} +} diff --git a/pkg/configs/testdata/invalid-files/module-calls.tf b/pkg/configs/testdata/invalid-files/module-calls.tf new file mode 100644 index 00000000000..dbf29feef64 --- /dev/null +++ b/pkg/configs/testdata/invalid-files/module-calls.tf @@ -0,0 +1,29 @@ + +module "foo" { + source = "./foo" + # this block intentionally left (almost) blank +} + +module "bar" { + source = "hashicorp/bar/aws" + + boom = "🎆" + yes = true +} + +module "baz" { + source = "git::https://example.com/" + + a = 1 + + count = 12 + for_each = ["a", "b", "c"] + + depends_on = [ + module.bar, + ] + + providers = { + aws = aws.foo + } +} diff --git a/pkg/configs/testdata/invalid-files/native-syntax-as-json.tf.json b/pkg/configs/testdata/invalid-files/native-syntax-as-json.tf.json new file mode 100644 index 00000000000..ca88e62b441 --- /dev/null +++ b/pkg/configs/testdata/invalid-files/native-syntax-as-json.tf.json @@ -0,0 +1,2 @@ +terraform { +} diff --git a/pkg/configs/testdata/invalid-files/precondition-postcondition-badref.tf b/pkg/configs/testdata/invalid-files/precondition-postcondition-badref.tf new file mode 100644 index 00000000000..ff5ff712962 --- /dev/null +++ b/pkg/configs/testdata/invalid-files/precondition-postcondition-badref.tf @@ -0,0 +1,29 @@ +data "example" "example" { + foo = 5 + + lifecycle { + precondition { + condition = data.example.example.foo == 5 # ERROR: Invalid reference in precondition + error_message = "Must be five." + } + postcondition { + condition = self.foo == 5 + error_message = "Must be five, but is ${data.example.example.foo}." # ERROR: Invalid reference in postcondition + } + } +} + +resource "example" "example" { + foo = 5 + + lifecycle { + precondition { + condition = example.example.foo == 5 # ERROR: Invalid reference in precondition + error_message = "Must be five." + } + postcondition { + condition = self.foo == 5 + error_message = "Must be five, but is ${example.example.foo}." # ERROR: Invalid reference in postcondition + } + } +} diff --git a/pkg/configs/testdata/invalid-files/precondition-postcondition-missing-condition.tf b/pkg/configs/testdata/invalid-files/precondition-postcondition-missing-condition.tf new file mode 100644 index 00000000000..d0a6ab3074d --- /dev/null +++ b/pkg/configs/testdata/invalid-files/precondition-postcondition-missing-condition.tf @@ -0,0 +1,12 @@ +resource "example" "example" { + foo = 5 + + lifecycle { + precondition { # ERROR: Missing required argument + error_message = "Can a check block fail without a condition?" + } + postcondition { # ERROR: Missing required argument + error_message = "Do not try to pass the check; only realize that there is no check." + } + } +} diff --git a/pkg/configs/testdata/invalid-files/provider-localname-normalization.tf b/pkg/configs/testdata/invalid-files/provider-localname-normalization.tf new file mode 100644 index 00000000000..34c6e62bab7 --- /dev/null +++ b/pkg/configs/testdata/invalid-files/provider-localname-normalization.tf @@ -0,0 +1,20 @@ +terraform { + required_providers { + test = { + source = "mycorp/test" + } + } +} + +provider "TEST" { + +} + +resource test_resource "test" { + // this resource is (implicitly) provided by "mycorp/test" +} + +resource test_resource "TEST" { + // this resource is (explicitly) provided by "hashicorp/test" + provider = TEST +} diff --git a/pkg/configs/testdata/invalid-files/provider-reserved.tf b/pkg/configs/testdata/invalid-files/provider-reserved.tf new file mode 100644 index 00000000000..559474de998 --- /dev/null +++ b/pkg/configs/testdata/invalid-files/provider-reserved.tf @@ -0,0 +1,16 @@ +provider "test" { + # These are okay + alias = "foo" + version = "1.0.0" + + # Provider-specific arguments are also okay + arbitrary = true + + # These are all reserved and should generate errors. + count = 3 + depends_on = ["foo.bar"] + for_each = ["a", "b"] + source = "foo.example.com/baz/bar" + lifecycle {} + locals {} +} diff --git a/pkg/configs/testdata/invalid-files/provider-syntax.tf b/pkg/configs/testdata/invalid-files/provider-syntax.tf new file mode 100644 index 00000000000..7cf680fee94 --- /dev/null +++ b/pkg/configs/testdata/invalid-files/provider-syntax.tf @@ -0,0 +1,3 @@ +provider "template" { + version = +} diff --git a/pkg/configs/testdata/invalid-files/resource-count-and-for_each.tf b/pkg/configs/testdata/invalid-files/resource-count-and-for_each.tf new file mode 100644 index 00000000000..530fef74f4d --- /dev/null +++ b/pkg/configs/testdata/invalid-files/resource-count-and-for_each.tf @@ -0,0 +1,4 @@ +resource "test" "foo" { + count = 2 + for_each = ["a"] +} diff --git a/pkg/configs/testdata/invalid-files/resource-invalid-provider-reference.tf b/pkg/configs/testdata/invalid-files/resource-invalid-provider-reference.tf new file mode 100644 index 00000000000..6fe675f2729 --- /dev/null +++ b/pkg/configs/testdata/invalid-files/resource-invalid-provider-reference.tf @@ -0,0 +1,3 @@ +resource "test_resource" "t" { + provider = my_test +} diff --git a/pkg/configs/testdata/invalid-files/resource-lifecycle-badbool.tf b/pkg/configs/testdata/invalid-files/resource-lifecycle-badbool.tf new file mode 100644 index 00000000000..fdddb69445a --- /dev/null +++ b/pkg/configs/testdata/invalid-files/resource-lifecycle-badbool.tf @@ -0,0 +1,5 @@ +resource "example" "example" { + lifecycle { + create_before_destroy = "ABSOLUTELY NOT" + } +} diff --git a/pkg/configs/testdata/invalid-files/resource-name-invalid.tf b/pkg/configs/testdata/invalid-files/resource-name-invalid.tf new file mode 100644 index 00000000000..bfd9fff5e56 --- /dev/null +++ b/pkg/configs/testdata/invalid-files/resource-name-invalid.tf @@ -0,0 +1,7 @@ +resource "test resource" "test_resource" { + +} + +data "test resource" "test_resource" { + +} diff --git a/pkg/configs/testdata/invalid-files/resource-reserved-locals.tf b/pkg/configs/testdata/invalid-files/resource-reserved-locals.tf new file mode 100644 index 00000000000..368c21f7cbd --- /dev/null +++ b/pkg/configs/testdata/invalid-files/resource-reserved-locals.tf @@ -0,0 +1,3 @@ +resource "test" "foo" { + locals {} +} diff --git a/pkg/configs/testdata/invalid-files/resources-ignorechanges-all-legacymix.tf b/pkg/configs/testdata/invalid-files/resources-ignorechanges-all-legacymix.tf new file mode 100644 index 00000000000..9557379aaa2 --- /dev/null +++ b/pkg/configs/testdata/invalid-files/resources-ignorechanges-all-legacymix.tf @@ -0,0 +1,5 @@ +resource "aws_instance" "web" { + lifecycle { + ignore_changes = ["*", "foo"] + } +} diff --git a/pkg/configs/testdata/invalid-files/triggered-invalid-each.tf b/pkg/configs/testdata/invalid-files/triggered-invalid-each.tf new file mode 100644 index 00000000000..0649ecc9661 --- /dev/null +++ b/pkg/configs/testdata/invalid-files/triggered-invalid-each.tf @@ -0,0 +1,7 @@ +resource "test_resource" "a" { + for_each = var.input + lifecycle { + // cannot use each.val + replace_triggered_by = [ test_resource.b[each.val] ] + } +} diff --git a/pkg/configs/testdata/invalid-files/triggered-invalid-expression.tf b/pkg/configs/testdata/invalid-files/triggered-invalid-expression.tf new file mode 100644 index 00000000000..8bf8af5bb0b --- /dev/null +++ b/pkg/configs/testdata/invalid-files/triggered-invalid-expression.tf @@ -0,0 +1,6 @@ +resource "test_resource" "a" { + count = 1 + lifecycle { + replace_triggered_by = [ not_a_reference ] + } +} diff --git a/pkg/configs/testdata/invalid-files/unexpected-attr.tf b/pkg/configs/testdata/invalid-files/unexpected-attr.tf new file mode 100644 index 00000000000..5abc475eb91 --- /dev/null +++ b/pkg/configs/testdata/invalid-files/unexpected-attr.tf @@ -0,0 +1 @@ +foo = "bar" diff --git a/pkg/configs/testdata/invalid-files/unexpected-block.tf b/pkg/configs/testdata/invalid-files/unexpected-block.tf new file mode 100644 index 00000000000..491173c3863 --- /dev/null +++ b/pkg/configs/testdata/invalid-files/unexpected-block.tf @@ -0,0 +1,2 @@ +varyable "whoops" { +} diff --git a/pkg/configs/testdata/invalid-files/variable-bad-default.tf b/pkg/configs/testdata/invalid-files/variable-bad-default.tf new file mode 100644 index 00000000000..9da568f71f6 --- /dev/null +++ b/pkg/configs/testdata/invalid-files/variable-bad-default.tf @@ -0,0 +1,4 @@ +variable "incorrectly_typed_default" { + type = list(string) + default = "hello" +} diff --git a/pkg/configs/testdata/invalid-files/variable-bad-sensitive.tf b/pkg/configs/testdata/invalid-files/variable-bad-sensitive.tf new file mode 100644 index 00000000000..11156b3070e --- /dev/null +++ b/pkg/configs/testdata/invalid-files/variable-bad-sensitive.tf @@ -0,0 +1,3 @@ +variable "sensitive-value" { + sensitive = "123" # must be boolean +} diff --git a/pkg/configs/testdata/invalid-files/variable-type-unknown.tf b/pkg/configs/testdata/invalid-files/variable-type-unknown.tf new file mode 100644 index 00000000000..bcbb88d9086 --- /dev/null +++ b/pkg/configs/testdata/invalid-files/variable-type-unknown.tf @@ -0,0 +1,3 @@ +variable "bad_type" { + type = notatype +} diff --git a/pkg/configs/testdata/invalid-files/variable-validation-condition-badref.tf b/pkg/configs/testdata/invalid-files/variable-validation-condition-badref.tf new file mode 100644 index 00000000000..9b9e935767a --- /dev/null +++ b/pkg/configs/testdata/invalid-files/variable-validation-condition-badref.tf @@ -0,0 +1,18 @@ + +locals { + foo = 1 +} + +variable "validation" { + validation { + condition = local.foo == var.validation # ERROR: Invalid reference in variable validation + error_message = "Must be five." + } +} + +variable "validation_error_expression" { + validation { + condition = var.validation_error_expression != 1 + error_message = "Cannot equal ${local.foo}." # ERROR: Invalid reference in variable validation + } +} diff --git a/pkg/configs/testdata/invalid-files/variable-validation-condition-noref.tf b/pkg/configs/testdata/invalid-files/variable-validation-condition-noref.tf new file mode 100644 index 00000000000..6eee91337f1 --- /dev/null +++ b/pkg/configs/testdata/invalid-files/variable-validation-condition-noref.tf @@ -0,0 +1,7 @@ + +variable "validation" { + validation { + condition = true # ERROR: Invalid variable validation condition + error_message = "Must be true." + } +} diff --git a/pkg/configs/testdata/invalid-files/zerolen.tf.json b/pkg/configs/testdata/invalid-files/zerolen.tf.json new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/configs/testdata/invalid-import-files/import-and-module-clash.tf b/pkg/configs/testdata/invalid-import-files/import-and-module-clash.tf new file mode 100644 index 00000000000..3bd56565b40 --- /dev/null +++ b/pkg/configs/testdata/invalid-import-files/import-and-module-clash.tf @@ -0,0 +1,12 @@ + +module "importable_resource" { + source = "../valid-modules/importable-resource" +} + +provider "local" {} + +import { + provider = local + id = "foo/bar" + to = module.importable_resource.local_file.foo +} diff --git a/pkg/configs/testdata/invalid-import-files/import-and-no-resource.tf b/pkg/configs/testdata/invalid-import-files/import-and-no-resource.tf new file mode 100644 index 00000000000..0bb2b0121ad --- /dev/null +++ b/pkg/configs/testdata/invalid-import-files/import-and-no-resource.tf @@ -0,0 +1,10 @@ + +provider "local" {} + +import { + provider = local + id = "foo/bar" + to = local_file.foo_bar +} + +resource "local_file" "foo_bar" {} diff --git a/pkg/configs/testdata/invalid-import-files/import-and-resource-clash.tf b/pkg/configs/testdata/invalid-import-files/import-and-resource-clash.tf new file mode 100644 index 00000000000..c22b1d1a0d3 --- /dev/null +++ b/pkg/configs/testdata/invalid-import-files/import-and-resource-clash.tf @@ -0,0 +1,16 @@ + +provider "local" {} + +provider "local" { + alias = "alternate" +} + +import { + provider = local + id = "foo/bar" + to = local_file.foo_bar +} + +resource "local_file" "foo_bar" { + provider = local.alternate +} diff --git a/pkg/configs/testdata/invalid-modules/multiple-required-providers/a.tf b/pkg/configs/testdata/invalid-modules/multiple-required-providers/a.tf new file mode 100644 index 00000000000..a607ddd1ce1 --- /dev/null +++ b/pkg/configs/testdata/invalid-modules/multiple-required-providers/a.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + bar = { + version = "~>1.0.0" + } + } +} diff --git a/pkg/configs/testdata/invalid-modules/multiple-required-providers/b.tf b/pkg/configs/testdata/invalid-modules/multiple-required-providers/b.tf new file mode 100644 index 00000000000..7672e92497a --- /dev/null +++ b/pkg/configs/testdata/invalid-modules/multiple-required-providers/b.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + foo = { + version = "~>2.0.0" + } + } +} diff --git a/pkg/configs/testdata/invalid-modules/nullable-with-default-null/main.tf b/pkg/configs/testdata/invalid-modules/nullable-with-default-null/main.tf new file mode 100644 index 00000000000..bf7a58a4040 --- /dev/null +++ b/pkg/configs/testdata/invalid-modules/nullable-with-default-null/main.tf @@ -0,0 +1,5 @@ +variable "in" { + type = number + nullable = false + default = null +} diff --git a/pkg/configs/testdata/invalid-modules/override-cloud-duplicates/main.tf b/pkg/configs/testdata/invalid-modules/override-cloud-duplicates/main.tf new file mode 100644 index 00000000000..2de9a58dd38 --- /dev/null +++ b/pkg/configs/testdata/invalid-modules/override-cloud-duplicates/main.tf @@ -0,0 +1,14 @@ +terraform { + cloud { + organization = "foo" + should_not_be_present_with_override = true + } +} + +resource "aws_instance" "web" { + ami = "ami-1234" + security_groups = [ + "foo", + "bar", + ] +} diff --git a/pkg/configs/testdata/invalid-modules/override-cloud-duplicates/override.tf b/pkg/configs/testdata/invalid-modules/override-cloud-duplicates/override.tf new file mode 100644 index 00000000000..17ef011509f --- /dev/null +++ b/pkg/configs/testdata/invalid-modules/override-cloud-duplicates/override.tf @@ -0,0 +1,11 @@ +terraform { + cloud { + organization = "foo" + } +} + +terraform { + cloud { + organization = "bar" + } +} diff --git a/pkg/configs/testdata/invalid-modules/override-nonexist-variable/override.tf b/pkg/configs/testdata/invalid-modules/override-nonexist-variable/override.tf new file mode 100644 index 00000000000..720db27b896 --- /dev/null +++ b/pkg/configs/testdata/invalid-modules/override-nonexist-variable/override.tf @@ -0,0 +1,3 @@ +variable "foo" { + description = "overridden" +} diff --git a/pkg/configs/testdata/invalid-modules/override-variable-causes-bad-default/base.tf b/pkg/configs/testdata/invalid-modules/override-variable-causes-bad-default/base.tf new file mode 100644 index 00000000000..f2d572ebeec --- /dev/null +++ b/pkg/configs/testdata/invalid-modules/override-variable-causes-bad-default/base.tf @@ -0,0 +1,4 @@ +variable "foo" { + type = list(string) + default = ["this is valid"] +} diff --git a/pkg/configs/testdata/invalid-modules/override-variable-causes-bad-default/override.tf b/pkg/configs/testdata/invalid-modules/override-variable-causes-bad-default/override.tf new file mode 100644 index 00000000000..3c979d9eade --- /dev/null +++ b/pkg/configs/testdata/invalid-modules/override-variable-causes-bad-default/override.tf @@ -0,0 +1,5 @@ +variable "foo" { + type = string + # Since we didn't also override the default, this is now invalid because + # the existing default is not compatible with "string". +} diff --git a/pkg/configs/testdata/invalid-modules/provider-meta/invalid-interpolation.tf b/pkg/configs/testdata/invalid-modules/provider-meta/invalid-interpolation.tf new file mode 100644 index 00000000000..d8a0d444a0d --- /dev/null +++ b/pkg/configs/testdata/invalid-modules/provider-meta/invalid-interpolation.tf @@ -0,0 +1,10 @@ +terraform { + provider_meta "my-provider" { + hello = var.name + } +} + +variable "name" { + type = string +} + diff --git a/pkg/configs/testdata/nested-backend-warning/child/child.tf b/pkg/configs/testdata/nested-backend-warning/child/child.tf new file mode 100644 index 00000000000..5a6948e8519 --- /dev/null +++ b/pkg/configs/testdata/nested-backend-warning/child/child.tf @@ -0,0 +1,6 @@ +terraform { + # Only the root module can declare a backend. Terraform should emit a warning + # about this child module backend declaration. + backend "ignored" { + } +} diff --git a/pkg/configs/testdata/nested-backend-warning/root.tf b/pkg/configs/testdata/nested-backend-warning/root.tf new file mode 100644 index 00000000000..1f95749fa7e --- /dev/null +++ b/pkg/configs/testdata/nested-backend-warning/root.tf @@ -0,0 +1,3 @@ +module "child" { + source = "./child" +} diff --git a/pkg/configs/testdata/nested-cloud-warning/child/child.tf b/pkg/configs/testdata/nested-cloud-warning/child/child.tf new file mode 100644 index 00000000000..540b9217073 --- /dev/null +++ b/pkg/configs/testdata/nested-cloud-warning/child/child.tf @@ -0,0 +1,6 @@ +terraform { + # Only the root module can declare a Cloud configuration. Terraform should emit a warning + # about this child module Cloud declaration. + cloud { + } +} diff --git a/pkg/configs/testdata/nested-cloud-warning/root.tf b/pkg/configs/testdata/nested-cloud-warning/root.tf new file mode 100644 index 00000000000..1f95749fa7e --- /dev/null +++ b/pkg/configs/testdata/nested-cloud-warning/root.tf @@ -0,0 +1,3 @@ +module "child" { + source = "./child" +} diff --git a/pkg/configs/testdata/nested-errors/child_a/child_a.tf b/pkg/configs/testdata/nested-errors/child_a/child_a.tf new file mode 100644 index 00000000000..d2fb89fae79 --- /dev/null +++ b/pkg/configs/testdata/nested-errors/child_a/child_a.tf @@ -0,0 +1,7 @@ + +module "child_c" { + # Note: this test case has an unrealistic module loader that resolves all + # sources as relative to the fixture directory, rather than to the + # current module directory as Terraform normally would. + source = "./child_c" +} diff --git a/pkg/configs/testdata/nested-errors/child_c/child_c.tf b/pkg/configs/testdata/nested-errors/child_c/child_c.tf new file mode 100644 index 00000000000..19ba443c884 --- /dev/null +++ b/pkg/configs/testdata/nested-errors/child_c/child_c.tf @@ -0,0 +1,6 @@ +output "hello" { + value = "hello" +} + +invalid "block" "type " { +} diff --git a/pkg/configs/testdata/nested-errors/root.tf b/pkg/configs/testdata/nested-errors/root.tf new file mode 100644 index 00000000000..d596eb06def --- /dev/null +++ b/pkg/configs/testdata/nested-errors/root.tf @@ -0,0 +1,3 @@ +module "child_a" { + source = "./child_a" +} diff --git a/pkg/configs/testdata/provider-reqs-with-tests/provider-reqs-root.tf b/pkg/configs/testdata/provider-reqs-with-tests/provider-reqs-root.tf new file mode 100644 index 00000000000..3ebcbbd6585 --- /dev/null +++ b/pkg/configs/testdata/provider-reqs-with-tests/provider-reqs-root.tf @@ -0,0 +1,20 @@ +terraform { + required_providers { + tls = { + source = "hashicorp/tls" + version = "~> 3.0" + } + } +} + +# There is no provider in required_providers called "implied", so this +# implicitly declares a dependency on "hashicorp/implied". +resource "implied_foo" "bar" { +} + +# There is no provider in required_providers called "terraform", but for +# this name in particular we imply terraform.io/builtin/terraform instead, +# to avoid selecting the now-unmaintained +# registry.opentofu.org/hashicorp/terraform. +data "terraform_remote_state" "bar" { +} diff --git a/pkg/configs/testdata/provider-reqs-with-tests/provider-reqs-root.tftest.hcl b/pkg/configs/testdata/provider-reqs-with-tests/provider-reqs-root.tftest.hcl new file mode 100644 index 00000000000..e368ef00da2 --- /dev/null +++ b/pkg/configs/testdata/provider-reqs-with-tests/provider-reqs-root.tftest.hcl @@ -0,0 +1,11 @@ +# There is no provider in required_providers called "configured", so the version +# constraint should come from this configuration block. +provider "configured" { + version = "~> 1.4" +} + +run "setup" { + module { + source = "./setup" + } +} diff --git a/pkg/configs/testdata/provider-reqs-with-tests/setup/setup.tf b/pkg/configs/testdata/provider-reqs-with-tests/setup/setup.tf new file mode 100644 index 00000000000..7341d6af93c --- /dev/null +++ b/pkg/configs/testdata/provider-reqs-with-tests/setup/setup.tf @@ -0,0 +1,8 @@ +terraform { + required_providers { + null = "~> 2.0.0" + random = { + version = "~> 1.2.0" + } + } +} diff --git a/pkg/configs/testdata/provider-reqs/child/grandchild/provider-reqs-grandchild.tf b/pkg/configs/testdata/provider-reqs/child/grandchild/provider-reqs-grandchild.tf new file mode 100644 index 00000000000..5ac9dfab4db --- /dev/null +++ b/pkg/configs/testdata/provider-reqs/child/grandchild/provider-reqs-grandchild.tf @@ -0,0 +1,4 @@ +# There is no provider in required_providers called "grandchild", so this +# implicitly declares a dependency on "hashicorp/grandchild". +resource "grandchild_foo" "bar" { +} diff --git a/pkg/configs/testdata/provider-reqs/child/provider-reqs-child.tf b/pkg/configs/testdata/provider-reqs/child/provider-reqs-child.tf new file mode 100644 index 00000000000..4ebb659ddca --- /dev/null +++ b/pkg/configs/testdata/provider-reqs/child/provider-reqs-child.tf @@ -0,0 +1,15 @@ +terraform { + required_providers { + cloud = { + source = "tf.example.com/awesomecorp/happycloud" + } + null = { + # This should merge with the null provider constraint in the root module + version = "2.0.1" + } + } +} + +module "nested" { + source = "./grandchild" +} diff --git a/pkg/configs/testdata/provider-reqs/provider-reqs-root.tf b/pkg/configs/testdata/provider-reqs/provider-reqs-root.tf new file mode 100644 index 00000000000..0c72476dd0d --- /dev/null +++ b/pkg/configs/testdata/provider-reqs/provider-reqs-root.tf @@ -0,0 +1,34 @@ +terraform { + required_providers { + null = "~> 2.0.0" + random = { + version = "~> 1.2.0" + } + tls = { + source = "hashicorp/tls" + version = "~> 3.0" + } + } +} + +# There is no provider in required_providers called "implied", so this +# implicitly declares a dependency on "hashicorp/implied". +resource "implied_foo" "bar" { +} + +module "kinder" { + source = "./child" +} + +# There is no provider in required_providers called "terraform", but for +# this name in particular we imply terraform.io/builtin/terraform instead, +# to avoid selecting the now-unmaintained +# registry.opentofu.org/hashicorp/terraform. +data "terraform_remote_state" "bar" { +} + +# There is no provider in required_providers called "configured", so the version +# constraint should come from this configuration block. +provider "configured" { + version = "~> 1.4" +} diff --git a/pkg/configs/testdata/providers-explicit-fqn/root.tf b/pkg/configs/testdata/providers-explicit-fqn/root.tf new file mode 100644 index 00000000000..9a8d426edb9 --- /dev/null +++ b/pkg/configs/testdata/providers-explicit-fqn/root.tf @@ -0,0 +1,11 @@ + +terraform { + required_providers { + foo-test = { + source = "foo/test" + } + terraform = { + source = "not-builtin/not-terraform" + } + } +} diff --git a/pkg/configs/testdata/tofu-and-tf-files/more-resources.tf b/pkg/configs/testdata/tofu-and-tf-files/more-resources.tf new file mode 100644 index 00000000000..6abfa801b59 --- /dev/null +++ b/pkg/configs/testdata/tofu-and-tf-files/more-resources.tf @@ -0,0 +1,3 @@ +resource "tf_resource" "first" { + test_string = "hello" +} \ No newline at end of file diff --git a/pkg/configs/testdata/tofu-and-tf-files/more-resources.tf.json b/pkg/configs/testdata/tofu-and-tf-files/more-resources.tf.json new file mode 100644 index 00000000000..0a5f9809d8c --- /dev/null +++ b/pkg/configs/testdata/tofu-and-tf-files/more-resources.tf.json @@ -0,0 +1,10 @@ +{ + "resource": { + "tf_json_resource": { + "a": { + "count": 1, + "test_string": "first" + } + } + } +} diff --git a/pkg/configs/testdata/tofu-and-tf-files/resources.tf b/pkg/configs/testdata/tofu-and-tf-files/resources.tf new file mode 100644 index 00000000000..349e9f6200d --- /dev/null +++ b/pkg/configs/testdata/tofu-and-tf-files/resources.tf @@ -0,0 +1,43 @@ +resource "aws_security_group" "firewall" { + lifecycle { + create_before_destroy = true + prevent_destroy = true + ignore_changes = [ + description, + ] + } + + connection { + host = "127.0.0.1" + } + + provisioner "local-exec" { + command = "echo hello" + + connection { + host = "10.1.2.1" + } + } + + provisioner "local-exec" { + command = "echo hello" + } +} + +resource "aws_instance" "web" { + count = 2 + ami = "ami-1234" + security_groups = [ + "foo", + "bar", + ] + + network_interface { + device_index = 0 + description = "Main network interface" + } + + depends_on = [ + aws_security_group.firewall, + ] +} diff --git a/pkg/configs/testdata/tofu-and-tf-files/resources.tf.json b/pkg/configs/testdata/tofu-and-tf-files/resources.tf.json new file mode 100644 index 00000000000..3242755b510 --- /dev/null +++ b/pkg/configs/testdata/tofu-and-tf-files/resources.tf.json @@ -0,0 +1,14 @@ +{ + "resource": { + "test_object": { + "a": { + "count": 1, + "test_string": "hello" + }, + "b": { + "count": 1, + "test_string": "world" + } + } + } +} diff --git a/pkg/configs/testdata/tofu-and-tf-files/resources.tofu b/pkg/configs/testdata/tofu-and-tf-files/resources.tofu new file mode 100644 index 00000000000..9fe059515b0 --- /dev/null +++ b/pkg/configs/testdata/tofu-and-tf-files/resources.tofu @@ -0,0 +1,43 @@ +resource "aws_security_group" "firewall_tofu" { + lifecycle { + create_before_destroy = true + prevent_destroy = true + ignore_changes = [ + description, + ] + } + + connection { + host = "127.0.0.1" + } + + provisioner "local-exec" { + command = "echo hello" + + connection { + host = "10.1.2.1" + } + } + + provisioner "local-exec" { + command = "echo hello" + } +} + +resource "aws_instance" "web_tofu" { + count = 2 + ami = "ami-1234" + security_groups = [ + "foo", + "bar", + ] + + network_interface { + device_index = 0 + description = "Main network interface" + } + + depends_on = [ + aws_security_group.firewall, + ] +} diff --git a/pkg/configs/testdata/tofu-and-tf-files/resources.tofu.json b/pkg/configs/testdata/tofu-and-tf-files/resources.tofu.json new file mode 100644 index 00000000000..75c9045efdb --- /dev/null +++ b/pkg/configs/testdata/tofu-and-tf-files/resources.tofu.json @@ -0,0 +1,14 @@ +{ + "resource": { + "test_object": { + "a_tofu": { + "count": 1, + "test_string": "hello" + }, + "b_tofu": { + "count": 1, + "test_string": "world" + } + } + } +} diff --git a/pkg/configs/testdata/tofu-and-tf-files/test/resources_test.tftest.hcl b/pkg/configs/testdata/tofu-and-tf-files/test/resources_test.tftest.hcl new file mode 100644 index 00000000000..d3547ec2ace --- /dev/null +++ b/pkg/configs/testdata/tofu-and-tf-files/test/resources_test.tftest.hcl @@ -0,0 +1,27 @@ +# test_run_one runs a partial plan +run "test_run_one" { + command = plan + + plan_options { + target = [ + test_object.b + ] + } + + assert { + condition = test_object.b.test_string == "world" + error_message = "invalid value" + } +} + +# test_run_two does a complete apply operation +run "test_run_two" { + variables { + input = "custom" + } + + assert { + condition = test_object.a.test_string == "hello" + error_message = "invalid value" + } +} diff --git a/pkg/configs/testdata/tofu-and-tf-files/test/resources_test.tofutest.hcl b/pkg/configs/testdata/tofu-and-tf-files/test/resources_test.tofutest.hcl new file mode 100644 index 00000000000..d3547ec2ace --- /dev/null +++ b/pkg/configs/testdata/tofu-and-tf-files/test/resources_test.tofutest.hcl @@ -0,0 +1,27 @@ +# test_run_one runs a partial plan +run "test_run_one" { + command = plan + + plan_options { + target = [ + test_object.b + ] + } + + assert { + condition = test_object.b.test_string == "world" + error_message = "invalid value" + } +} + +# test_run_two does a complete apply operation +run "test_run_two" { + variables { + input = "custom" + } + + assert { + condition = test_object.a.test_string == "hello" + error_message = "invalid value" + } +} diff --git a/pkg/configs/testdata/tofu-and-tf-files/test/resources_test_json.tftest.json b/pkg/configs/testdata/tofu-and-tf-files/test/resources_test_json.tftest.json new file mode 100644 index 00000000000..7f939dc6fa7 --- /dev/null +++ b/pkg/configs/testdata/tofu-and-tf-files/test/resources_test_json.tftest.json @@ -0,0 +1,29 @@ +{ + "variables": { + "input": "default" + }, + "run": { + "test_run_one": { + "command": "plan", + "plan_options": { + "target": [ + "test_object.a" + ] + }, + "assert": [ + { + "condition": "${test_object.a.test_string} == hello", + "error_message": "invalid value" + } + ] + }, + "test_run_two": { + "assert": [ + { + "condition": "${test_object.b.test_string} == world", + "error_message": "invalid value" + } + ] + } + } +} diff --git a/pkg/configs/testdata/tofu-and-tf-files/test/resources_test_json.tofutest.json b/pkg/configs/testdata/tofu-and-tf-files/test/resources_test_json.tofutest.json new file mode 100644 index 00000000000..7f939dc6fa7 --- /dev/null +++ b/pkg/configs/testdata/tofu-and-tf-files/test/resources_test_json.tofutest.json @@ -0,0 +1,29 @@ +{ + "variables": { + "input": "default" + }, + "run": { + "test_run_one": { + "command": "plan", + "plan_options": { + "target": [ + "test_object.a" + ] + }, + "assert": [ + { + "condition": "${test_object.a.test_string} == hello", + "error_message": "invalid value" + } + ] + }, + "test_run_two": { + "assert": [ + { + "condition": "${test_object.b.test_string} == world", + "error_message": "invalid value" + } + ] + } + } +} diff --git a/pkg/configs/testdata/tofu-and-tf-files/variables.tf b/pkg/configs/testdata/tofu-and-tf-files/variables.tf new file mode 100644 index 00000000000..506faf6fcdd --- /dev/null +++ b/pkg/configs/testdata/tofu-and-tf-files/variables.tf @@ -0,0 +1,3 @@ +variable "should_override" { + default = "not overridden" +} diff --git a/pkg/configs/testdata/tofu-and-tf-files/variables.tf.json b/pkg/configs/testdata/tofu-and-tf-files/variables.tf.json new file mode 100644 index 00000000000..d7b402dd125 --- /dev/null +++ b/pkg/configs/testdata/tofu-and-tf-files/variables.tf.json @@ -0,0 +1,7 @@ +{ + "variable": { + "should_override_json": { + "default": "not overridden" + } + } +} diff --git a/pkg/configs/testdata/tofu-and-tf-files/variables_override.tf b/pkg/configs/testdata/tofu-and-tf-files/variables_override.tf new file mode 100644 index 00000000000..e53315ae238 --- /dev/null +++ b/pkg/configs/testdata/tofu-and-tf-files/variables_override.tf @@ -0,0 +1,3 @@ +variable "should_override" { + default = "overridden by tf file" +} \ No newline at end of file diff --git a/pkg/configs/testdata/tofu-and-tf-files/variables_override.tf.json b/pkg/configs/testdata/tofu-and-tf-files/variables_override.tf.json new file mode 100644 index 00000000000..c12ad061bf1 --- /dev/null +++ b/pkg/configs/testdata/tofu-and-tf-files/variables_override.tf.json @@ -0,0 +1,7 @@ +{ + "variable": { + "should_override_json": { + "default": "overridden by tf file" + } + } +} diff --git a/pkg/configs/testdata/tofu-and-tf-files/variables_override.tofu b/pkg/configs/testdata/tofu-and-tf-files/variables_override.tofu new file mode 100644 index 00000000000..9a995264dd7 --- /dev/null +++ b/pkg/configs/testdata/tofu-and-tf-files/variables_override.tofu @@ -0,0 +1,3 @@ +variable "should_override" { + default = "overridden by tofu file" +} \ No newline at end of file diff --git a/pkg/configs/testdata/tofu-and-tf-files/variables_override.tofu.json b/pkg/configs/testdata/tofu-and-tf-files/variables_override.tofu.json new file mode 100644 index 00000000000..2a73303202d --- /dev/null +++ b/pkg/configs/testdata/tofu-and-tf-files/variables_override.tofu.json @@ -0,0 +1,7 @@ +{ + "variable": { + "should_override_json": { + "default": "overridden by tofu file" + } + } +} diff --git a/pkg/configs/testdata/tofu-only-files/resources.tofu b/pkg/configs/testdata/tofu-only-files/resources.tofu new file mode 100644 index 00000000000..9fe059515b0 --- /dev/null +++ b/pkg/configs/testdata/tofu-only-files/resources.tofu @@ -0,0 +1,43 @@ +resource "aws_security_group" "firewall_tofu" { + lifecycle { + create_before_destroy = true + prevent_destroy = true + ignore_changes = [ + description, + ] + } + + connection { + host = "127.0.0.1" + } + + provisioner "local-exec" { + command = "echo hello" + + connection { + host = "10.1.2.1" + } + } + + provisioner "local-exec" { + command = "echo hello" + } +} + +resource "aws_instance" "web_tofu" { + count = 2 + ami = "ami-1234" + security_groups = [ + "foo", + "bar", + ] + + network_interface { + device_index = 0 + description = "Main network interface" + } + + depends_on = [ + aws_security_group.firewall, + ] +} diff --git a/pkg/configs/testdata/tofu-only-files/resources.tofu.json b/pkg/configs/testdata/tofu-only-files/resources.tofu.json new file mode 100644 index 00000000000..75c9045efdb --- /dev/null +++ b/pkg/configs/testdata/tofu-only-files/resources.tofu.json @@ -0,0 +1,14 @@ +{ + "resource": { + "test_object": { + "a_tofu": { + "count": 1, + "test_string": "hello" + }, + "b_tofu": { + "count": 1, + "test_string": "world" + } + } + } +} diff --git a/pkg/configs/testdata/tofu-only-files/test/resources_test.tofutest.hcl b/pkg/configs/testdata/tofu-only-files/test/resources_test.tofutest.hcl new file mode 100644 index 00000000000..d3547ec2ace --- /dev/null +++ b/pkg/configs/testdata/tofu-only-files/test/resources_test.tofutest.hcl @@ -0,0 +1,27 @@ +# test_run_one runs a partial plan +run "test_run_one" { + command = plan + + plan_options { + target = [ + test_object.b + ] + } + + assert { + condition = test_object.b.test_string == "world" + error_message = "invalid value" + } +} + +# test_run_two does a complete apply operation +run "test_run_two" { + variables { + input = "custom" + } + + assert { + condition = test_object.a.test_string == "hello" + error_message = "invalid value" + } +} diff --git a/pkg/configs/testdata/tofu-only-files/test/resources_test_json.tofutest.json b/pkg/configs/testdata/tofu-only-files/test/resources_test_json.tofutest.json new file mode 100644 index 00000000000..7f939dc6fa7 --- /dev/null +++ b/pkg/configs/testdata/tofu-only-files/test/resources_test_json.tofutest.json @@ -0,0 +1,29 @@ +{ + "variables": { + "input": "default" + }, + "run": { + "test_run_one": { + "command": "plan", + "plan_options": { + "target": [ + "test_object.a" + ] + }, + "assert": [ + { + "condition": "${test_object.a.test_string} == hello", + "error_message": "invalid value" + } + ] + }, + "test_run_two": { + "assert": [ + { + "condition": "${test_object.b.test_string} == world", + "error_message": "invalid value" + } + ] + } + } +} diff --git a/pkg/configs/testdata/tofu-only-files/variables.tofu b/pkg/configs/testdata/tofu-only-files/variables.tofu new file mode 100644 index 00000000000..e5b0f8f09e8 --- /dev/null +++ b/pkg/configs/testdata/tofu-only-files/variables.tofu @@ -0,0 +1,3 @@ +variable "should_override" { + default = "not overridden" +} \ No newline at end of file diff --git a/pkg/configs/testdata/tofu-only-files/variables.tofu.json b/pkg/configs/testdata/tofu-only-files/variables.tofu.json new file mode 100644 index 00000000000..d7b402dd125 --- /dev/null +++ b/pkg/configs/testdata/tofu-only-files/variables.tofu.json @@ -0,0 +1,7 @@ +{ + "variable": { + "should_override_json": { + "default": "not overridden" + } + } +} diff --git a/pkg/configs/testdata/tofu-only-files/variables_override.tofu b/pkg/configs/testdata/tofu-only-files/variables_override.tofu new file mode 100644 index 00000000000..9a995264dd7 --- /dev/null +++ b/pkg/configs/testdata/tofu-only-files/variables_override.tofu @@ -0,0 +1,3 @@ +variable "should_override" { + default = "overridden by tofu file" +} \ No newline at end of file diff --git a/pkg/configs/testdata/tofu-only-files/variables_override.tofu.json b/pkg/configs/testdata/tofu-only-files/variables_override.tofu.json new file mode 100644 index 00000000000..2a73303202d --- /dev/null +++ b/pkg/configs/testdata/tofu-only-files/variables_override.tofu.json @@ -0,0 +1,7 @@ +{ + "variable": { + "should_override_json": { + "default": "overridden by tofu file" + } + } +} diff --git a/pkg/configs/testdata/valid-files/backend.tf b/pkg/configs/testdata/valid-files/backend.tf new file mode 100644 index 00000000000..bd8e0f669ab --- /dev/null +++ b/pkg/configs/testdata/valid-files/backend.tf @@ -0,0 +1,10 @@ + +terraform { + backend "example" { + foo = "bar" + + baz { + bar = "foo" + } + } +} diff --git a/pkg/configs/testdata/valid-files/cloud.tf b/pkg/configs/testdata/valid-files/cloud.tf new file mode 100644 index 00000000000..91985fcad9d --- /dev/null +++ b/pkg/configs/testdata/valid-files/cloud.tf @@ -0,0 +1,10 @@ + +terraform { + cloud { + foo = "bar" + + baz { + bar = "foo" + } + } +} diff --git a/pkg/configs/testdata/valid-files/data-sources.tf b/pkg/configs/testdata/valid-files/data-sources.tf new file mode 100644 index 00000000000..f14dffdac63 --- /dev/null +++ b/pkg/configs/testdata/valid-files/data-sources.tf @@ -0,0 +1,15 @@ +data "http" "example1" { +} + +data "http" "example2" { + url = "http://example.com/" + + request_headers = { + "Accept" = "application/json" + } + + count = 5 + depends_on = [ + data.http.example1, + ] +} diff --git a/pkg/configs/testdata/valid-files/empty.tf b/pkg/configs/testdata/valid-files/empty.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/configs/testdata/valid-files/empty.tf.json b/pkg/configs/testdata/valid-files/empty.tf.json new file mode 100644 index 00000000000..0967ef424bc --- /dev/null +++ b/pkg/configs/testdata/valid-files/empty.tf.json @@ -0,0 +1 @@ +{} diff --git a/pkg/configs/testdata/valid-files/locals.tf b/pkg/configs/testdata/valid-files/locals.tf new file mode 100644 index 00000000000..f2ee4a7c4c8 --- /dev/null +++ b/pkg/configs/testdata/valid-files/locals.tf @@ -0,0 +1,16 @@ + +locals { + # This block intentionally left blank +} + +locals { + foo = "foo" + bar = true +} + +locals { + baz = "oink" + dunno = "🤷" + rowing = "🚣‍♀️" + π = 3.14159265359 +} diff --git a/pkg/configs/testdata/valid-files/locals.tf.json b/pkg/configs/testdata/valid-files/locals.tf.json new file mode 100644 index 00000000000..525f69d6fd4 --- /dev/null +++ b/pkg/configs/testdata/valid-files/locals.tf.json @@ -0,0 +1,10 @@ +{ + "locals": { + "foo": "foo", + "bar": true, + "baz": "oink", + "dunno": "🤷", + "rowing": "🚣‍♀️", + "π": 3.14159265359 + } +} diff --git a/pkg/configs/testdata/valid-files/object-optional-attrs.tf b/pkg/configs/testdata/valid-files/object-optional-attrs.tf new file mode 100644 index 00000000000..8b7fda9a747 --- /dev/null +++ b/pkg/configs/testdata/valid-files/object-optional-attrs.tf @@ -0,0 +1,38 @@ +variable "a" { + type = object({ + foo = optional(string) + bar = optional(bool, true) + }) +} + +variable "b" { + type = list( + object({ + foo = optional(string) + }) + ) +} + +variable "c" { + type = set( + object({ + foo = optional(string) + }) + ) +} + +variable "d" { + type = map( + object({ + foo = optional(string) + }) + ) +} + +variable "e" { + type = object({ + foo = string + bar = optional(bool, true) + }) + default = null +} diff --git a/pkg/configs/testdata/valid-files/outputs.tf b/pkg/configs/testdata/valid-files/outputs.tf new file mode 100644 index 00000000000..7a8066686ff --- /dev/null +++ b/pkg/configs/testdata/valid-files/outputs.tf @@ -0,0 +1,25 @@ + +output "foo" { + value = "hello" +} + +output "bar" { + value = local.bar +} + +output "baz" { + value = "ssshhhhhhh" + sensitive = true +} + +output "cheeze_pizza" { + description = "Nothing special" + value = "🍕" +} + +output "π" { + value = 3.14159265359 + depends_on = [ + pizza.cheese, + ] +} diff --git a/pkg/configs/testdata/valid-files/preconditions-postconditions.tf b/pkg/configs/testdata/valid-files/preconditions-postconditions.tf new file mode 100644 index 00000000000..6f48840a8bf --- /dev/null +++ b/pkg/configs/testdata/valid-files/preconditions-postconditions.tf @@ -0,0 +1,34 @@ +resource "test" "test" { + lifecycle { + precondition { + condition = path.module != "" + error_message = "Must be true." + } + postcondition { + condition = path.module != "" + error_message = "Must be true." + } + } +} + +data "test" "test" { + lifecycle { + precondition { + condition = path.module != "" + error_message = "Must be true." + } + postcondition { + condition = path.module != "" + error_message = "Must be true." + } + } +} + +output "test" { + value = "" + + precondition { + condition = path.module != "" + error_message = "Must be true." + } +} diff --git a/pkg/configs/testdata/valid-files/provider-configs.tf b/pkg/configs/testdata/valid-files/provider-configs.tf new file mode 100644 index 00000000000..6077d641185 --- /dev/null +++ b/pkg/configs/testdata/valid-files/provider-configs.tf @@ -0,0 +1,13 @@ + +provider "foo" { +} + +provider "bar" { + other = 12 +} + +provider "bar" { + other = 13 + + alias = "bar" +} diff --git a/pkg/configs/testdata/valid-files/providers-explicit-implied.tf b/pkg/configs/testdata/valid-files/providers-explicit-implied.tf new file mode 100644 index 00000000000..49c063a1e26 --- /dev/null +++ b/pkg/configs/testdata/valid-files/providers-explicit-implied.tf @@ -0,0 +1,34 @@ +provider "aws" { + +} + +provider "template" { + alias = "foo" +} + +resource "aws_instance" "foo" { + +} + +resource "null_resource" "foo" { + +} + +import { + id = "directory/filename" + to = local_file.foo +} + +import { + provider = template.foo + id = "directory/foo_filename" + to = local_file.bar +} + +terraform { + required_providers { + test = { + source = "hashicorp/test" + } + } +} diff --git a/pkg/configs/testdata/valid-files/refactoring.tf b/pkg/configs/testdata/valid-files/refactoring.tf new file mode 100644 index 00000000000..ccba1188f07 --- /dev/null +++ b/pkg/configs/testdata/valid-files/refactoring.tf @@ -0,0 +1,13 @@ +import { + to = aws_instance.import + id = 1 +} + +moved { + from = aws_instance.moved_from + to = aws_instance.moved_to +} + +removed { + from = aws_instance.removed +} \ No newline at end of file diff --git a/pkg/configs/testdata/valid-files/references.tf.json b/pkg/configs/testdata/valid-files/references.tf.json new file mode 100644 index 00000000000..3fe7e0afff3 --- /dev/null +++ b/pkg/configs/testdata/valid-files/references.tf.json @@ -0,0 +1,11 @@ +{ + "//": "The purpose of this test file is to show that we can use template syntax unwrapping to provide complex expressions without generating the deprecation warnings we'd expect for native syntax.", + "resource": { + "null_resource": { + "baz": { + "//": "This particular use of template syntax is redundant, but we permit it because this is the documented way to use more complex expressions in JSON.", + "triggers": "${ {} }" + } + } + } +} diff --git a/pkg/configs/testdata/valid-files/required-providers.tf b/pkg/configs/testdata/valid-files/required-providers.tf new file mode 100644 index 00000000000..271df4a57f4 --- /dev/null +++ b/pkg/configs/testdata/valid-files/required-providers.tf @@ -0,0 +1,7 @@ + +terraform { + required_providers { + aws = "~> 1.0.0" + consul = "~> 1.2.0" + } +} diff --git a/pkg/configs/testdata/valid-files/required-version.tf b/pkg/configs/testdata/valid-files/required-version.tf new file mode 100644 index 00000000000..77c9a35706f --- /dev/null +++ b/pkg/configs/testdata/valid-files/required-version.tf @@ -0,0 +1,4 @@ + +terraform { + required_version = "~> 0.12.0" +} diff --git a/pkg/configs/testdata/valid-files/resources-ignorechanges-all.tf b/pkg/configs/testdata/valid-files/resources-ignorechanges-all.tf new file mode 100644 index 00000000000..32cd2328800 --- /dev/null +++ b/pkg/configs/testdata/valid-files/resources-ignorechanges-all.tf @@ -0,0 +1,5 @@ +resource "aws_instance" "web" { + lifecycle { + ignore_changes = all + } +} diff --git a/pkg/configs/testdata/valid-files/resources-ignorechanges-all.tf.json b/pkg/configs/testdata/valid-files/resources-ignorechanges-all.tf.json new file mode 100644 index 00000000000..c22020826af --- /dev/null +++ b/pkg/configs/testdata/valid-files/resources-ignorechanges-all.tf.json @@ -0,0 +1,11 @@ +{ + "resource": { + "aws_instance": { + "web": { + "lifecycle": { + "ignore_changes": "all" + } + } + } + } +} diff --git a/pkg/configs/testdata/valid-files/resources.tf b/pkg/configs/testdata/valid-files/resources.tf new file mode 100644 index 00000000000..aab038ea8c3 --- /dev/null +++ b/pkg/configs/testdata/valid-files/resources.tf @@ -0,0 +1,49 @@ +resource "aws_security_group" "firewall" { + lifecycle { + create_before_destroy = true + prevent_destroy = true + ignore_changes = [ + description, + ] + } + + connection { + host = "127.0.0.1" + } + + provisioner "local-exec" { + command = "echo hello" + + connection { + host = "10.1.2.1" + } + } + + provisioner "local-exec" { + command = "echo hello" + } +} + +resource "aws_instance" "web" { + count = 2 + ami = "ami-1234" + security_groups = [ + "foo", + "bar", + ] + + network_interface { + device_index = 0 + description = "Main network interface" + } + + depends_on = [ + aws_security_group.firewall, + ] +} + +resource "aws_instance" "depends" { + lifecycle { + replace_triggered_by = [ aws_instance.web[1], aws_security_group.firewall.id ] + } +} diff --git a/pkg/configs/testdata/valid-files/resources.tf.json b/pkg/configs/testdata/valid-files/resources.tf.json new file mode 100644 index 00000000000..627963d0baf --- /dev/null +++ b/pkg/configs/testdata/valid-files/resources.tf.json @@ -0,0 +1,18 @@ +{ + "resource": { + "test_object": { + "a": { + "count": 1, + "test_string": "new" + }, + "b": { + "count": 1, + "lifecycle": { + "replace_triggered_by": [ + "test_object.a[count.index].test_string" + ] + } + } + } + } +} diff --git a/pkg/configs/testdata/valid-files/valid-language-edition.tf b/pkg/configs/testdata/valid-files/valid-language-edition.tf new file mode 100644 index 00000000000..60f2df3eb49 --- /dev/null +++ b/pkg/configs/testdata/valid-files/valid-language-edition.tf @@ -0,0 +1,8 @@ +terraform { + # If we drop support for TF2021 in a future Terraform release then this + # test will fail. In that case, update this to a newer edition that is + # still supported, because the purpose of this test is to verify that + # we can successfully decode the language argument, not specifically + # that we support TF2021. + language = TF2021 +} diff --git a/pkg/configs/testdata/valid-files/variable_validation.tf b/pkg/configs/testdata/valid-files/variable_validation.tf new file mode 100644 index 00000000000..20e227e0611 --- /dev/null +++ b/pkg/configs/testdata/valid-files/variable_validation.tf @@ -0,0 +1,22 @@ +variable "validation" { + validation { + condition = var.validation == 5 + error_message = "Must be five." + } +} + +variable "validation_function" { + type = list(string) + validation { + condition = length(var.validation_function) > 0 + error_message = "Must not be empty." + } +} + +variable "validation_error_expression" { + type = list(string) + validation { + condition = length(var.validation_error_expression) < 10 + error_message = "Too long (${length(var.validation_error_expression)} is greater than 10)." + } +} diff --git a/pkg/configs/testdata/valid-files/variables.tf b/pkg/configs/testdata/valid-files/variables.tf new file mode 100644 index 00000000000..16640c983ba --- /dev/null +++ b/pkg/configs/testdata/valid-files/variables.tf @@ -0,0 +1,44 @@ + +variable "foo" { +} + +variable "bar" { + default = "hello" +} + +variable "baz" { + type = list +} + +variable "bar-baz" { + default = [] + type = list(string) +} + +variable "cheeze_pizza" { + description = "Nothing special" +} + +variable "π" { + default = 3.14159265359 +} + +variable "sensitive_value" { + default = { + "a" = 1, + "b" = 2 + } + sensitive = true +} + +variable "nullable" { + type = string + nullable = true + default = "ok" +} + +variable "nullable_default_null" { + type = map(string) + nullable = true + default = null +} diff --git a/pkg/configs/testdata/valid-files/variables.tf.json b/pkg/configs/testdata/valid-files/variables.tf.json new file mode 100644 index 00000000000..4e43eb200a5 --- /dev/null +++ b/pkg/configs/testdata/valid-files/variables.tf.json @@ -0,0 +1,28 @@ +{ + "variable": { + "foo": {}, + "bar": { + "default": "hello" + }, + "baz": { + "type": "list" + }, + "bar-baz": { + "default": [], + "type": "list" + }, + "cheese_pizza": { + "description": "Nothing special" + }, + "π": { + "default": 3.14159265359 + }, + "sensitive_value": { + "default": { + "a": 1, + "b": 2 + }, + "sensitive": true + } + } +} diff --git a/pkg/configs/testdata/valid-files/version-variable.tf b/pkg/configs/testdata/valid-files/version-variable.tf new file mode 100644 index 00000000000..910d119664a --- /dev/null +++ b/pkg/configs/testdata/valid-files/version-variable.tf @@ -0,0 +1,6 @@ +variable "module_version" { default = "v1.0" } + +module "foo" { + source = "mod/foo/foo" + version = var.module_version +} diff --git a/pkg/configs/testdata/valid-modules/empty/README b/pkg/configs/testdata/valid-modules/empty/README new file mode 100644 index 00000000000..6d937077a86 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/empty/README @@ -0,0 +1,2 @@ +This directory is intentionally empty, to test what happens when we load +a module that contains no configuration files. diff --git a/pkg/configs/testdata/valid-modules/implied-providers/providers.tf b/pkg/configs/testdata/valid-modules/implied-providers/providers.tf new file mode 100644 index 00000000000..63b5b14df95 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/implied-providers/providers.tf @@ -0,0 +1,21 @@ +terraform { + required_providers { + // This is an expected "real world" example of a community provider, which + // has resources named "foo_*" and will likely be used in configurations + // with the local name of "foo". + foo = { + source = "registry.acme.corp/acme/foo" + } + + // However, implied provider lookups are based on local name, not provider + // type, and this example clarifies that. Only resources with addresses + // starting "whatever_" will be assigned this provider implicitly. + // + // This is _not_ a recommended usage pattern. The best practice is for + // local name and type to be the same, and only use a different local name + // if there are provider type collisions. + whatever = { + source = "acme/something" + } + } +} diff --git a/pkg/configs/testdata/valid-modules/implied-providers/resources.tf b/pkg/configs/testdata/valid-modules/implied-providers/resources.tf new file mode 100644 index 00000000000..6b4a8af87a4 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/implied-providers/resources.tf @@ -0,0 +1,12 @@ +// These resources map to the configured "foo" provider" +resource foo_resource "a" {} +data foo_resource "b" {} + +// These resources map to a default "hashicorp/bar" provider +resource bar_resource "c" {} +data bar_resource "d" {} + +// These resources map to the configured "whatever" provider, which has FQN +// "acme/something". +resource whatever_resource "e" {} +data whatever_resource "f" {} diff --git a/pkg/configs/testdata/valid-modules/importable-resource/main.tf b/pkg/configs/testdata/valid-modules/importable-resource/main.tf new file mode 100644 index 00000000000..3bd2dcb5c50 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/importable-resource/main.tf @@ -0,0 +1,2 @@ + +resource "local_file" "foo" {} diff --git a/pkg/configs/testdata/valid-modules/importable-resource/providers.tf b/pkg/configs/testdata/valid-modules/importable-resource/providers.tf new file mode 100644 index 00000000000..4124dcaee02 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/importable-resource/providers.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + local = { + source = "hashicorp/local" + } + } +} diff --git a/pkg/configs/testdata/valid-modules/moved-blocks/moved-blocks-1.tf b/pkg/configs/testdata/valid-modules/moved-blocks/moved-blocks-1.tf new file mode 100644 index 00000000000..fab882d7f4f --- /dev/null +++ b/pkg/configs/testdata/valid-modules/moved-blocks/moved-blocks-1.tf @@ -0,0 +1,24 @@ +moved { + from = test.foo + to = test.bar +} + +moved { + from = test.foo + to = test.bar["bloop"] +} + +moved { + from = module.a + to = module.b +} + +moved { + from = module.a + to = module.a["foo"] +} + +moved { + from = test.foo + to = module.a.test.foo +} diff --git a/pkg/configs/testdata/valid-modules/moved-blocks/moved-blocks-2.tf b/pkg/configs/testdata/valid-modules/moved-blocks/moved-blocks-2.tf new file mode 100644 index 00000000000..afc9f5a767a --- /dev/null +++ b/pkg/configs/testdata/valid-modules/moved-blocks/moved-blocks-2.tf @@ -0,0 +1,6 @@ +# One more moved block in a separate file just to make sure the +# appending of multiple files works properly. +moved { + from = data.test.foo + to = data.test.bar +} diff --git a/pkg/configs/testdata/valid-modules/nested-providers-fqns/child/main.tf b/pkg/configs/testdata/valid-modules/nested-providers-fqns/child/main.tf new file mode 100644 index 00000000000..79e449bf462 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/nested-providers-fqns/child/main.tf @@ -0,0 +1,25 @@ +terraform { + required_providers { + bar-test = { + source = "bar/test" + } + foo-test = { + source = "foo/test" + configuration_aliases = [foo-test.other] + } + } +} + +resource "test_instance" "explicit" { + // explicitly setting provider bar-test + provider = bar-test +} + +resource "test_instance" "implicit" { + // since the provider type name "test" does not match an entry in + // required_providers, the default provider "test" should be used +} + +resource "test_instance" "other" { + provider = foo-test.other +} diff --git a/pkg/configs/testdata/valid-modules/nested-providers-fqns/main.tf b/pkg/configs/testdata/valid-modules/nested-providers-fqns/main.tf new file mode 100644 index 00000000000..27988f42b28 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/nested-providers-fqns/main.tf @@ -0,0 +1,29 @@ +terraform { + required_providers { + foo-test = { + source = "foo/test" + } + } +} + +provider "foo-test" {} + +module "child" { + source = "./child" + providers = { + foo-test.other = foo-test + } +} + +resource "test_instance" "explicit" { + provider = foo-test +} + +data "test_resource" "explicit" { + provider = foo-test +} + +resource "test_instance" "implicit" { + // since the provider type name "test" does not match an entry in + // required_providers, the default provider "test" should be used +} diff --git a/pkg/configs/testdata/valid-modules/override-backend-no-base/main.tf b/pkg/configs/testdata/valid-modules/override-backend-no-base/main.tf new file mode 100644 index 00000000000..7bb1380e65f --- /dev/null +++ b/pkg/configs/testdata/valid-modules/override-backend-no-base/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "web" { + ami = "ami-1234" + security_groups = [ + "foo", + "bar", + ] +} diff --git a/pkg/configs/testdata/valid-modules/override-backend-no-base/override.tf b/pkg/configs/testdata/valid-modules/override-backend-no-base/override.tf new file mode 100644 index 00000000000..d57fade63e8 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/override-backend-no-base/override.tf @@ -0,0 +1,5 @@ +terraform { + backend "bar" { + path = "CHANGED/relative/path/to/terraform.tfstate" + } +} diff --git a/pkg/configs/testdata/valid-modules/override-backend-with-cloud/main.tf b/pkg/configs/testdata/valid-modules/override-backend-with-cloud/main.tf new file mode 100644 index 00000000000..56fb72f3295 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/override-backend-with-cloud/main.tf @@ -0,0 +1,13 @@ +terraform { + backend "foo" { + path = "relative/path/to/terraform.tfstate" + } +} + +resource "aws_instance" "web" { + ami = "ami-1234" + security_groups = [ + "foo", + "bar", + ] +} diff --git a/pkg/configs/testdata/valid-modules/override-backend-with-cloud/override.tf b/pkg/configs/testdata/valid-modules/override-backend-with-cloud/override.tf new file mode 100644 index 00000000000..51ae925fb98 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/override-backend-with-cloud/override.tf @@ -0,0 +1,5 @@ +terraform { + cloud { + organization = "foo" + } +} diff --git a/pkg/configs/testdata/valid-modules/override-backend/main.tf b/pkg/configs/testdata/valid-modules/override-backend/main.tf new file mode 100644 index 00000000000..56fb72f3295 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/override-backend/main.tf @@ -0,0 +1,13 @@ +terraform { + backend "foo" { + path = "relative/path/to/terraform.tfstate" + } +} + +resource "aws_instance" "web" { + ami = "ami-1234" + security_groups = [ + "foo", + "bar", + ] +} diff --git a/pkg/configs/testdata/valid-modules/override-backend/override.tf b/pkg/configs/testdata/valid-modules/override-backend/override.tf new file mode 100644 index 00000000000..d57fade63e8 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/override-backend/override.tf @@ -0,0 +1,5 @@ +terraform { + backend "bar" { + path = "CHANGED/relative/path/to/terraform.tfstate" + } +} diff --git a/pkg/configs/testdata/valid-modules/override-cloud-no-base/main.tf b/pkg/configs/testdata/valid-modules/override-cloud-no-base/main.tf new file mode 100644 index 00000000000..7bb1380e65f --- /dev/null +++ b/pkg/configs/testdata/valid-modules/override-cloud-no-base/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "web" { + ami = "ami-1234" + security_groups = [ + "foo", + "bar", + ] +} diff --git a/pkg/configs/testdata/valid-modules/override-cloud-no-base/override.tf b/pkg/configs/testdata/valid-modules/override-cloud-no-base/override.tf new file mode 100644 index 00000000000..51ae925fb98 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/override-cloud-no-base/override.tf @@ -0,0 +1,5 @@ +terraform { + cloud { + organization = "foo" + } +} diff --git a/pkg/configs/testdata/valid-modules/override-cloud/main.tf b/pkg/configs/testdata/valid-modules/override-cloud/main.tf new file mode 100644 index 00000000000..2de9a58dd38 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/override-cloud/main.tf @@ -0,0 +1,14 @@ +terraform { + cloud { + organization = "foo" + should_not_be_present_with_override = true + } +} + +resource "aws_instance" "web" { + ami = "ami-1234" + security_groups = [ + "foo", + "bar", + ] +} diff --git a/pkg/configs/testdata/valid-modules/override-cloud/override.tf b/pkg/configs/testdata/valid-modules/override-cloud/override.tf new file mode 100644 index 00000000000..a4a7752ca59 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/override-cloud/override.tf @@ -0,0 +1,5 @@ +terraform { + cloud { + organization = "CHANGED" + } +} diff --git a/pkg/configs/testdata/valid-modules/override-dynamic-block-base/a_override.tf b/pkg/configs/testdata/valid-modules/override-dynamic-block-base/a_override.tf new file mode 100644 index 00000000000..e168fac4792 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/override-dynamic-block-base/a_override.tf @@ -0,0 +1,6 @@ + +resource "test" "foo" { + foo { + from = "override" + } +} diff --git a/pkg/configs/testdata/valid-modules/override-dynamic-block-base/base.tf b/pkg/configs/testdata/valid-modules/override-dynamic-block-base/base.tf new file mode 100644 index 00000000000..0f3ad4ca7e6 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/override-dynamic-block-base/base.tf @@ -0,0 +1,9 @@ + +resource "test" "foo" { + dynamic "foo" { + for_each = [] + content { + from = "base" + } + } +} diff --git a/pkg/configs/testdata/valid-modules/override-dynamic-block-override/a_override.tf b/pkg/configs/testdata/valid-modules/override-dynamic-block-override/a_override.tf new file mode 100644 index 00000000000..d37ce790e46 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/override-dynamic-block-override/a_override.tf @@ -0,0 +1,9 @@ + +resource "test" "foo" { + dynamic "foo" { + for_each = [] + content { + from = "override" + } + } +} diff --git a/pkg/configs/testdata/valid-modules/override-dynamic-block-override/base.tf b/pkg/configs/testdata/valid-modules/override-dynamic-block-override/base.tf new file mode 100644 index 00000000000..3ed55a6b5ea --- /dev/null +++ b/pkg/configs/testdata/valid-modules/override-dynamic-block-override/base.tf @@ -0,0 +1,6 @@ + +resource "test" "foo" { + foo { + from = "base" + } +} diff --git a/pkg/configs/testdata/valid-modules/override-ignore-changes/main.tf b/pkg/configs/testdata/valid-modules/override-ignore-changes/main.tf new file mode 100644 index 00000000000..55ae5151d94 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/override-ignore-changes/main.tf @@ -0,0 +1,3 @@ +resource "test_instance" "foo" { + foo = "bar" +} diff --git a/pkg/configs/testdata/valid-modules/override-ignore-changes/main_override.tf b/pkg/configs/testdata/valid-modules/override-ignore-changes/main_override.tf new file mode 100644 index 00000000000..f9cd9a5d337 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/override-ignore-changes/main_override.tf @@ -0,0 +1,6 @@ +resource "test_instance" "foo" { + foo = "bar" + lifecycle { + ignore_changes = all + } +} diff --git a/pkg/configs/testdata/valid-modules/override-module/a_override.tf b/pkg/configs/testdata/valid-modules/override-module/a_override.tf new file mode 100644 index 00000000000..79628453c7b --- /dev/null +++ b/pkg/configs/testdata/valid-modules/override-module/a_override.tf @@ -0,0 +1,11 @@ + +module "example" { + source = "./example2-a_override" + + foo = "a_override foo" + new = "a_override new" + + providers = { + test = test.a_override + } +} diff --git a/pkg/configs/testdata/valid-modules/override-module/b_override.tf b/pkg/configs/testdata/valid-modules/override-module/b_override.tf new file mode 100644 index 00000000000..b624a8ad7ed --- /dev/null +++ b/pkg/configs/testdata/valid-modules/override-module/b_override.tf @@ -0,0 +1,9 @@ + +module "example" { + new = "b_override new" + newer = "b_override newer" + + providers = { + test = test.b_override + } +} diff --git a/pkg/configs/testdata/valid-modules/override-module/primary.tf b/pkg/configs/testdata/valid-modules/override-module/primary.tf new file mode 100644 index 00000000000..01a870da881 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/override-module/primary.tf @@ -0,0 +1,13 @@ + +module "example" { + source = "./example2" + + kept = "primary kept" + foo = "primary foo" + + providers = { + test = test.foo + } + depends_on = [null_resource.test] +} +resource "null_resource" "test" {} diff --git a/pkg/configs/testdata/valid-modules/override-output-sensitive/override.tf b/pkg/configs/testdata/valid-modules/override-output-sensitive/override.tf new file mode 100644 index 00000000000..b965fc12472 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/override-output-sensitive/override.tf @@ -0,0 +1,3 @@ +output "foo" { + sensitive = true +} diff --git a/pkg/configs/testdata/valid-modules/override-output-sensitive/primary.tf b/pkg/configs/testdata/valid-modules/override-output-sensitive/primary.tf new file mode 100644 index 00000000000..13bd3a99bbf --- /dev/null +++ b/pkg/configs/testdata/valid-modules/override-output-sensitive/primary.tf @@ -0,0 +1,3 @@ +output "foo" { + value = "Hello World" +} diff --git a/pkg/configs/testdata/valid-modules/override-resource-provider/a_override.tf b/pkg/configs/testdata/valid-modules/override-resource-provider/a_override.tf new file mode 100644 index 00000000000..9a7c3f9e3d5 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/override-resource-provider/a_override.tf @@ -0,0 +1,3 @@ +resource "test_instance" "explicit" { + provider = bar-test +} diff --git a/pkg/configs/testdata/valid-modules/override-resource-provider/base.tf b/pkg/configs/testdata/valid-modules/override-resource-provider/base.tf new file mode 100644 index 00000000000..0ea12dde564 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/override-resource-provider/base.tf @@ -0,0 +1,17 @@ +terraform { + required_providers { + foo-test = { + source = "foo/test" + } + bar-test = { + source = "bar/test" + } + } +} + +resource "test_instance" "explicit" { + provider = foo-test +} + +// the provider for this resource should default to "hashicorp/test" +resource "test_instance" "default" {} diff --git a/pkg/configs/testdata/valid-modules/override-variable-sensitive/a_override.tf b/pkg/configs/testdata/valid-modules/override-variable-sensitive/a_override.tf new file mode 100644 index 00000000000..f2ece93df9f --- /dev/null +++ b/pkg/configs/testdata/valid-modules/override-variable-sensitive/a_override.tf @@ -0,0 +1,23 @@ +variable "false_true" { + sensitive = true +} + +variable "true_false" { + sensitive = false +} + +variable "false_false_true" { + sensitive = false +} + +variable "true_true_false" { + sensitive = true +} + +variable "false_true_false" { + sensitive = true +} + +variable "true_false_true" { + sensitive = false +} diff --git a/pkg/configs/testdata/valid-modules/override-variable-sensitive/b_override.tf b/pkg/configs/testdata/valid-modules/override-variable-sensitive/b_override.tf new file mode 100644 index 00000000000..e58e5b3d137 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/override-variable-sensitive/b_override.tf @@ -0,0 +1,21 @@ +variable "false_true" { +} + +variable "true_false" { +} + +variable "false_false_true" { + sensitive = true +} + +variable "true_true_false" { + sensitive = false +} + +variable "false_true_false" { + sensitive = false +} + +variable "true_false_true" { + sensitive = true +} diff --git a/pkg/configs/testdata/valid-modules/override-variable-sensitive/primary.tf b/pkg/configs/testdata/valid-modules/override-variable-sensitive/primary.tf new file mode 100644 index 00000000000..eb79a6eccfc --- /dev/null +++ b/pkg/configs/testdata/valid-modules/override-variable-sensitive/primary.tf @@ -0,0 +1,23 @@ +variable "false_true" { + sensitive = false +} + +variable "true_false" { + sensitive = true +} + +variable "false_false_true" { + sensitive = false +} + +variable "true_true_false" { + sensitive = true +} + +variable "false_true_false" { + sensitive = false +} + +variable "true_false_true" { + sensitive = true +} diff --git a/pkg/configs/testdata/valid-modules/override-variable/a_override.tf b/pkg/configs/testdata/valid-modules/override-variable/a_override.tf new file mode 100644 index 00000000000..6ec4d1ef3b3 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/override-variable/a_override.tf @@ -0,0 +1,9 @@ +variable "fully_overridden" { + default = "a_override" + description = "a_override description" + type = string +} + +variable "partially_overridden" { + default = "a_override partial" +} diff --git a/pkg/configs/testdata/valid-modules/override-variable/b_override.tf b/pkg/configs/testdata/valid-modules/override-variable/b_override.tf new file mode 100644 index 00000000000..f09ce5380fc --- /dev/null +++ b/pkg/configs/testdata/valid-modules/override-variable/b_override.tf @@ -0,0 +1,10 @@ +variable "fully_overridden" { + nullable = false + default = "b_override" + description = "b_override description" + type = string +} + +variable "partially_overridden" { + default = "b_override partial" +} diff --git a/pkg/configs/testdata/valid-modules/override-variable/primary.tf b/pkg/configs/testdata/valid-modules/override-variable/primary.tf new file mode 100644 index 00000000000..981b86b8e2d --- /dev/null +++ b/pkg/configs/testdata/valid-modules/override-variable/primary.tf @@ -0,0 +1,11 @@ +variable "fully_overridden" { + default = "base" + description = "base description" + type = string +} + +variable "partially_overridden" { + default = "base" + description = "base description" + type = string +} diff --git a/pkg/configs/testdata/valid-modules/provider-aliases/main.tf b/pkg/configs/testdata/valid-modules/provider-aliases/main.tf new file mode 100644 index 00000000000..dd9fb084d48 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/provider-aliases/main.tf @@ -0,0 +1,17 @@ +terraform { + required_providers { + foo-test = { + source = "foo/test" + configuration_aliases = [foo-test.a, foo-test.b] + } + } +} + +resource "test_instance" "explicit" { + provider = foo-test.a +} + +data "test_resource" "explicit" { + provider = foo-test.b +} + diff --git a/pkg/configs/testdata/valid-modules/provider-meta/main.tf b/pkg/configs/testdata/valid-modules/provider-meta/main.tf new file mode 100644 index 00000000000..073e35b5cb6 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/provider-meta/main.tf @@ -0,0 +1,5 @@ +terraform { + provider_meta "my-provider" { + hello = "test-module" + } +} diff --git a/pkg/configs/testdata/valid-modules/providers-fqns/main.tf b/pkg/configs/testdata/valid-modules/providers-fqns/main.tf new file mode 100644 index 00000000000..ed92c9af5cf --- /dev/null +++ b/pkg/configs/testdata/valid-modules/providers-fqns/main.tf @@ -0,0 +1,9 @@ +terraform { + required_providers { + foo-test = { + source = "foo/test" + } + } +} + +provider "foo-test" {} diff --git a/pkg/configs/testdata/valid-modules/removed-blocks/removed-blocks-1.tf b/pkg/configs/testdata/valid-modules/removed-blocks/removed-blocks-1.tf new file mode 100644 index 00000000000..2a35a033205 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/removed-blocks/removed-blocks-1.tf @@ -0,0 +1,19 @@ +removed { + from = test.foo +} + +removed { + from = test.foo +} + +removed { + from = module.a +} + +removed { + from = module.a +} + +removed { + from = test.foo +} diff --git a/pkg/configs/testdata/valid-modules/removed-blocks/removed-blocks-2.tf b/pkg/configs/testdata/valid-modules/removed-blocks/removed-blocks-2.tf new file mode 100644 index 00000000000..6a9f6b15696 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/removed-blocks/removed-blocks-2.tf @@ -0,0 +1,5 @@ +# One more removed block in a separate file just to make sure the +# appending of multiple files works properly. +removed { + from = test.boop +} diff --git a/pkg/configs/testdata/valid-modules/required-providers-after-resource/main.tf b/pkg/configs/testdata/valid-modules/required-providers-after-resource/main.tf new file mode 100644 index 00000000000..8d40ec4b961 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/required-providers-after-resource/main.tf @@ -0,0 +1,3 @@ +resource test_instance "my-instance" { + provider = test +} \ No newline at end of file diff --git a/pkg/configs/testdata/valid-modules/required-providers-after-resource/providers.tf b/pkg/configs/testdata/valid-modules/required-providers-after-resource/providers.tf new file mode 100644 index 00000000000..687ef1bdd96 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/required-providers-after-resource/providers.tf @@ -0,0 +1,8 @@ +terraform { + required_providers { + test = { + source = "foo/test" + version = "~>1.0.0" + } + } +} \ No newline at end of file diff --git a/pkg/configs/testdata/valid-modules/required-providers-overrides/bar_provider_override.tf b/pkg/configs/testdata/valid-modules/required-providers-overrides/bar_provider_override.tf new file mode 100644 index 00000000000..b83ef749406 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/required-providers-overrides/bar_provider_override.tf @@ -0,0 +1,9 @@ +terraform { + required_providers { + bar = { + source = "blorp/bar" + version = "~>2.0.0" + } + } +} + diff --git a/pkg/configs/testdata/valid-modules/required-providers-overrides/main.tf b/pkg/configs/testdata/valid-modules/required-providers-overrides/main.tf new file mode 100644 index 00000000000..f7e58f2e2ad --- /dev/null +++ b/pkg/configs/testdata/valid-modules/required-providers-overrides/main.tf @@ -0,0 +1,7 @@ +resource bar_thing "bt" { + provider = bar +} + +resource foo_thing "ft" { + provider = foo +} diff --git a/pkg/configs/testdata/valid-modules/required-providers-overrides/providers.tf b/pkg/configs/testdata/valid-modules/required-providers-overrides/providers.tf new file mode 100644 index 00000000000..9b1d897f4b4 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/required-providers-overrides/providers.tf @@ -0,0 +1,11 @@ +terraform { + required_providers { + bar = { + source = "acme/bar" + } + + foo = { + source = "acme/foo" + } + } +} diff --git a/pkg/configs/testdata/valid-modules/with-tests-expect-failures/main.tf b/pkg/configs/testdata/valid-modules/with-tests-expect-failures/main.tf new file mode 100644 index 00000000000..b54c29ba6fd --- /dev/null +++ b/pkg/configs/testdata/valid-modules/with-tests-expect-failures/main.tf @@ -0,0 +1,13 @@ + +variable "input" { + type = string +} + + +resource "foo_resource" "a" { + value = var.input +} + +output "output" { + value = foo_resource.a.value +} diff --git a/pkg/configs/testdata/valid-modules/with-tests-expect-failures/test_case_one.tftest.hcl b/pkg/configs/testdata/valid-modules/with-tests-expect-failures/test_case_one.tftest.hcl new file mode 100644 index 00000000000..23bcde5d57f --- /dev/null +++ b/pkg/configs/testdata/valid-modules/with-tests-expect-failures/test_case_one.tftest.hcl @@ -0,0 +1,10 @@ +variables { + input = "default" +} + +run "test_run_one" { + expect_failures = [ + input.input, + output.output, + ] +} diff --git a/pkg/configs/testdata/valid-modules/with-tests-expect-failures/test_case_two.tftest.hcl b/pkg/configs/testdata/valid-modules/with-tests-expect-failures/test_case_two.tftest.hcl new file mode 100644 index 00000000000..31b1322e899 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/with-tests-expect-failures/test_case_two.tftest.hcl @@ -0,0 +1,9 @@ +variables { + input = "default" +} + +run "test_run_one" { + expect_failures = [ + foo_resource.a, + ] +} diff --git a/pkg/configs/testdata/valid-modules/with-tests-json/main.tf.json b/pkg/configs/testdata/valid-modules/with-tests-json/main.tf.json new file mode 100644 index 00000000000..03624c1463e --- /dev/null +++ b/pkg/configs/testdata/valid-modules/with-tests-json/main.tf.json @@ -0,0 +1,17 @@ +{ + "variable": { + "input": { + "type": "string" + } + }, + "resource": { + "foo_resource": { + "a": { + "value": "${var.input}" + } + }, + "bar_resource": { + "c": {} + } + } +} diff --git a/pkg/configs/testdata/valid-modules/with-tests-json/test_case_two.tftest.json b/pkg/configs/testdata/valid-modules/with-tests-json/test_case_two.tftest.json new file mode 100644 index 00000000000..43e677db64b --- /dev/null +++ b/pkg/configs/testdata/valid-modules/with-tests-json/test_case_two.tftest.json @@ -0,0 +1,45 @@ +{ + "run": { + "test_run_one": { + "variables": { + "input": "test_run_one" + }, + "assert": [ + { + "condition": "${foo_resource.a.value} == test_run_one", + "error_message": "invalid value" + } + ] + }, + "test_run_two": { + "plan_options": { + "mode": "refresh-only" + }, + "variables": { + "input": "test_run_two" + }, + "assert": [ + { + "condition": "${foo_resource.a.value} == test_run_one", + "error_message": "invalid value" + } + ] + }, + "test_run_three": { + "variables": { + "input": "test_run_three" + }, + "plan_options": { + "replace": [ + "bar_resource.c" + ] + }, + "assert": [ + { + "condition": "${foo_resource.a.value} == test_run_three", + "error_message": "invalid value" + } + ] + } + } +} \ No newline at end of file diff --git a/pkg/configs/testdata/valid-modules/with-tests-json/tests/test_case_one.tftest.json b/pkg/configs/testdata/valid-modules/with-tests-json/tests/test_case_one.tftest.json new file mode 100644 index 00000000000..934173aca38 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/with-tests-json/tests/test_case_one.tftest.json @@ -0,0 +1,32 @@ +{ + "variables": { + "input": "default" + }, + "run": { + "test_run_one": { + "command": "plan", + "plan_options": { + "target": [ + "foo_resource.a" + ] + }, + "assert": [ + { + "condition": "${foo_resource.a.value} == default", + "error_message": "invalid value" + } + ] + }, + "test_run_two": { + "variables": { + "input": "custom" + }, + "assert": [ + { + "condition": "${foo_resource.a.value} == custom", + "error_message": "invalid value" + } + ] + } + } +} diff --git a/pkg/configs/testdata/valid-modules/with-tests-module/main.tf b/pkg/configs/testdata/valid-modules/with-tests-module/main.tf new file mode 100644 index 00000000000..3b60dc01cc7 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/with-tests-module/main.tf @@ -0,0 +1,12 @@ + +variable "managed_id" { + type = string +} + +data "test_data_source" "managed_data" { + id = var.managed_id +} + +resource "test_resource" "created" { + value = data.test_data_source.managed_data.value +} diff --git a/pkg/configs/testdata/valid-modules/with-tests-module/main.tftest.hcl b/pkg/configs/testdata/valid-modules/with-tests-module/main.tftest.hcl new file mode 100644 index 00000000000..1f2a6a94aa9 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/with-tests-module/main.tftest.hcl @@ -0,0 +1,21 @@ +variables { + managed_id = "B853C121" +} + +run "setup" { + module { + source = "./setup" + } + + variables { + value = "Hello, world!" + id = "B853C121" + } +} + +run "test" { + assert { + condition = test_resource.created.value == "Hello, world!" + error_message = "bad value" + } +} diff --git a/pkg/configs/testdata/valid-modules/with-tests-module/setup/main.tf b/pkg/configs/testdata/valid-modules/with-tests-module/setup/main.tf new file mode 100644 index 00000000000..49056bbea73 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/with-tests-module/setup/main.tf @@ -0,0 +1,13 @@ +variable "value" { + type = string +} + +variable "id" { + type = string +} + +resource "test_resource" "managed" { + provider = setup + id = var.id + value = var.value +} diff --git a/pkg/configs/testdata/valid-modules/with-tests-nested-module/main.tf b/pkg/configs/testdata/valid-modules/with-tests-nested-module/main.tf new file mode 100644 index 00000000000..f304def8b1b --- /dev/null +++ b/pkg/configs/testdata/valid-modules/with-tests-nested-module/main.tf @@ -0,0 +1,2 @@ + +resource "test_resource" "resource" {} diff --git a/pkg/configs/testdata/valid-modules/with-tests-nested-module/main.tftest.hcl b/pkg/configs/testdata/valid-modules/with-tests-nested-module/main.tftest.hcl new file mode 100644 index 00000000000..3172b8c6dc4 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/with-tests-nested-module/main.tftest.hcl @@ -0,0 +1,14 @@ +variables { + value = "Hello, world!" +} + +run "load_module" { + module { + source = "./setup" + } + + assert { + condition = output.value == "Hello, world!" + error_message = "invalid value" + } +} \ No newline at end of file diff --git a/pkg/configs/testdata/valid-modules/with-tests-nested-module/setup/main.tf b/pkg/configs/testdata/valid-modules/with-tests-nested-module/setup/main.tf new file mode 100644 index 00000000000..54e468b7dba --- /dev/null +++ b/pkg/configs/testdata/valid-modules/with-tests-nested-module/setup/main.tf @@ -0,0 +1,14 @@ + +variable "value" { + type = string +} + +module "child" { + source = "./other" + + value = var.value +} + +output "value" { + value = module.child.value +} diff --git a/pkg/configs/testdata/valid-modules/with-tests-nested-module/setup/other/main.tf b/pkg/configs/testdata/valid-modules/with-tests-nested-module/setup/other/main.tf new file mode 100644 index 00000000000..e1f6e52f3f1 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/with-tests-nested-module/setup/other/main.tf @@ -0,0 +1,12 @@ + +variable "value" { + type = string +} + +resource "test_resource" "resource" { + value = var.value +} + +output "value" { + value = test_resource.resource.value +} diff --git a/pkg/configs/testdata/valid-modules/with-tests-nested/main.tf b/pkg/configs/testdata/valid-modules/with-tests-nested/main.tf new file mode 100644 index 00000000000..b84d4f3c416 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/with-tests-nested/main.tf @@ -0,0 +1,11 @@ + +variable "input" { + type = string +} + + +resource "foo_resource" "a" { + value = var.input +} + +resource "bar_resource" "c" {} diff --git a/pkg/configs/testdata/valid-modules/with-tests-nested/tests/test_case_one.tftest.hcl b/pkg/configs/testdata/valid-modules/with-tests-nested/tests/test_case_one.tftest.hcl new file mode 100644 index 00000000000..01ef5dff053 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/with-tests-nested/tests/test_case_one.tftest.hcl @@ -0,0 +1,31 @@ +variables { + input = "default" +} + +# test_run_one runs a partial plan +run "test_run_one" { + command = plan + + plan_options { + target = [ + foo_resource.a + ] + } + + assert { + condition = foo_resource.a.value == "default" + error_message = "invalid value" + } +} + +# test_run_two does a complete apply operation +run "test_run_two" { + variables { + input = "custom" + } + + assert { + condition = foo_resource.a.value == "custom" + error_message = "invalid value" + } +} diff --git a/pkg/configs/testdata/valid-modules/with-tests-nested/tests/test_case_two.tftest.hcl b/pkg/configs/testdata/valid-modules/with-tests-nested/tests/test_case_two.tftest.hcl new file mode 100644 index 00000000000..b2be9172b64 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/with-tests-nested/tests/test_case_two.tftest.hcl @@ -0,0 +1,46 @@ +# test_run_one does a complete apply +run "test_run_one" { + variables { + input = "test_run_one" + } + + assert { + condition = foo_resource.a.value == "test_run_one" + error_message = "invalid value" + } +} + +# test_run_two does a refresh only apply +run "test_run_two" { + plan_options { + mode = refresh-only + } + + variables { + input = "test_run_two" + } + + assert { + # value shouldn't change, as we're doing a refresh-only apply. + condition = foo_resource.a.value == "test_run_one" + error_message = "invalid value" + } +} + +# test_run_three does an apply with a replace operation +run "test_run_three" { + variables { + input = "test_run_three" + } + + plan_options { + replace = [ + bar_resource.c + ] + } + + assert { + condition = foo_resource.a.value == "test_run_three" + error_message = "invalid value" + } +} diff --git a/pkg/configs/testdata/valid-modules/with-tests-very-nested/main.tf b/pkg/configs/testdata/valid-modules/with-tests-very-nested/main.tf new file mode 100644 index 00000000000..b84d4f3c416 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/with-tests-very-nested/main.tf @@ -0,0 +1,11 @@ + +variable "input" { + type = string +} + + +resource "foo_resource" "a" { + value = var.input +} + +resource "bar_resource" "c" {} diff --git a/pkg/configs/testdata/valid-modules/with-tests-very-nested/very/nested/test_case_one.tftest.hcl b/pkg/configs/testdata/valid-modules/with-tests-very-nested/very/nested/test_case_one.tftest.hcl new file mode 100644 index 00000000000..01ef5dff053 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/with-tests-very-nested/very/nested/test_case_one.tftest.hcl @@ -0,0 +1,31 @@ +variables { + input = "default" +} + +# test_run_one runs a partial plan +run "test_run_one" { + command = plan + + plan_options { + target = [ + foo_resource.a + ] + } + + assert { + condition = foo_resource.a.value == "default" + error_message = "invalid value" + } +} + +# test_run_two does a complete apply operation +run "test_run_two" { + variables { + input = "custom" + } + + assert { + condition = foo_resource.a.value == "custom" + error_message = "invalid value" + } +} diff --git a/pkg/configs/testdata/valid-modules/with-tests-very-nested/very/nested/test_case_two.tftest.hcl b/pkg/configs/testdata/valid-modules/with-tests-very-nested/very/nested/test_case_two.tftest.hcl new file mode 100644 index 00000000000..b2be9172b64 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/with-tests-very-nested/very/nested/test_case_two.tftest.hcl @@ -0,0 +1,46 @@ +# test_run_one does a complete apply +run "test_run_one" { + variables { + input = "test_run_one" + } + + assert { + condition = foo_resource.a.value == "test_run_one" + error_message = "invalid value" + } +} + +# test_run_two does a refresh only apply +run "test_run_two" { + plan_options { + mode = refresh-only + } + + variables { + input = "test_run_two" + } + + assert { + # value shouldn't change, as we're doing a refresh-only apply. + condition = foo_resource.a.value == "test_run_one" + error_message = "invalid value" + } +} + +# test_run_three does an apply with a replace operation +run "test_run_three" { + variables { + input = "test_run_three" + } + + plan_options { + replace = [ + bar_resource.c + ] + } + + assert { + condition = foo_resource.a.value == "test_run_three" + error_message = "invalid value" + } +} diff --git a/pkg/configs/testdata/valid-modules/with-tests/main.tf b/pkg/configs/testdata/valid-modules/with-tests/main.tf new file mode 100644 index 00000000000..b84d4f3c416 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/with-tests/main.tf @@ -0,0 +1,11 @@ + +variable "input" { + type = string +} + + +resource "foo_resource" "a" { + value = var.input +} + +resource "bar_resource" "c" {} diff --git a/pkg/configs/testdata/valid-modules/with-tests/test_case_one.tftest.hcl b/pkg/configs/testdata/valid-modules/with-tests/test_case_one.tftest.hcl new file mode 100644 index 00000000000..01ef5dff053 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/with-tests/test_case_one.tftest.hcl @@ -0,0 +1,31 @@ +variables { + input = "default" +} + +# test_run_one runs a partial plan +run "test_run_one" { + command = plan + + plan_options { + target = [ + foo_resource.a + ] + } + + assert { + condition = foo_resource.a.value == "default" + error_message = "invalid value" + } +} + +# test_run_two does a complete apply operation +run "test_run_two" { + variables { + input = "custom" + } + + assert { + condition = foo_resource.a.value == "custom" + error_message = "invalid value" + } +} diff --git a/pkg/configs/testdata/valid-modules/with-tests/test_case_two.tftest.hcl b/pkg/configs/testdata/valid-modules/with-tests/test_case_two.tftest.hcl new file mode 100644 index 00000000000..b2be9172b64 --- /dev/null +++ b/pkg/configs/testdata/valid-modules/with-tests/test_case_two.tftest.hcl @@ -0,0 +1,46 @@ +# test_run_one does a complete apply +run "test_run_one" { + variables { + input = "test_run_one" + } + + assert { + condition = foo_resource.a.value == "test_run_one" + error_message = "invalid value" + } +} + +# test_run_two does a refresh only apply +run "test_run_two" { + plan_options { + mode = refresh-only + } + + variables { + input = "test_run_two" + } + + assert { + # value shouldn't change, as we're doing a refresh-only apply. + condition = foo_resource.a.value == "test_run_one" + error_message = "invalid value" + } +} + +# test_run_three does an apply with a replace operation +run "test_run_three" { + variables { + input = "test_run_three" + } + + plan_options { + replace = [ + bar_resource.c + ] + } + + assert { + condition = foo_resource.a.value == "test_run_three" + error_message = "invalid value" + } +} diff --git a/pkg/configs/testdata/warning-files/depends_on.tf b/pkg/configs/testdata/warning-files/depends_on.tf new file mode 100644 index 00000000000..17e1bf34a5e --- /dev/null +++ b/pkg/configs/testdata/warning-files/depends_on.tf @@ -0,0 +1,6 @@ +resource "null_resource" "a" { +} + +resource "null_resource" "b" { + depends_on = ["null_resource.a"] # WARNING: Quoted references are deprecated +} diff --git a/pkg/configs/testdata/warning-files/provider_ref.tf b/pkg/configs/testdata/warning-files/provider_ref.tf new file mode 100644 index 00000000000..6f5525ed77e --- /dev/null +++ b/pkg/configs/testdata/warning-files/provider_ref.tf @@ -0,0 +1,7 @@ +provider "null" { + alias = "foo" +} + +resource "null_resource" "test" { + provider = "null.foo" # WARNING: Quoted references are deprecated +} diff --git a/pkg/configs/testdata/warning-files/provisioner_keyword.tf b/pkg/configs/testdata/warning-files/provisioner_keyword.tf new file mode 100644 index 00000000000..61fe72bdd0c --- /dev/null +++ b/pkg/configs/testdata/warning-files/provisioner_keyword.tf @@ -0,0 +1,6 @@ +resource "null_resource" "a" { + provisioner "local-exec" { + when = "create" # WARNING: Quoted keywords are deprecated + on_failure = "fail" # WARNING: Quoted keywords are deprecated + } +} diff --git a/pkg/configs/variable_type_hint.go b/pkg/configs/variable_type_hint.go new file mode 100644 index 00000000000..7c868249ff3 --- /dev/null +++ b/pkg/configs/variable_type_hint.go @@ -0,0 +1,50 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +// VariableTypeHint is an enumeration used for the Variable.TypeHint field, +// which is an incompletely-specified type for the variable which is used +// as a hint for whether a value provided in an ambiguous context (on the +// command line or in an environment variable) should be taken literally as a +// string or parsed as an HCL expression to produce a data structure. +// +// The type hint is applied to runtime values as well, but since it does not +// accurately describe a precise type it is not fully-sufficient to infer +// the dynamic type of a value passed through a variable. +// +// These hints use inaccurate terminology for historical reasons. Full details +// are in the documentation for each constant in this enumeration, but in +// summary: +// +// - TypeHintString requires a primitive type +// - TypeHintList requires a type that could be converted to a tuple +// - TypeHintMap requires a type that could be converted to an object +type VariableTypeHint rune + +//go:generate go run golang.org/x/tools/cmd/stringer -type VariableTypeHint + +// TypeHintNone indicates the absence of a type hint. Values specified in +// ambiguous contexts will be treated as literal strings, as if TypeHintString +// were selected, but no runtime value checks will be applied. This is reasonable +// type hint for a module that is never intended to be used at the top-level +// of a configuration, since descendent modules never receive values from +// ambiguous contexts. +const TypeHintNone VariableTypeHint = 0 + +// TypeHintString spec indicates that a value provided in an ambiguous context +// should be treated as a literal string, and additionally requires that the +// runtime value for the variable is of a primitive type (string, number, bool). +const TypeHintString VariableTypeHint = 'S' + +// TypeHintList indicates that a value provided in an ambiguous context should +// be treated as an HCL expression, and additionally requires that the +// runtime value for the variable is of an tuple, list, or set type. +const TypeHintList VariableTypeHint = 'L' + +// TypeHintMap indicates that a value provided in an ambiguous context should +// be treated as an HCL expression, and additionally requires that the +// runtime value for the variable is of an object or map type. +const TypeHintMap VariableTypeHint = 'M' diff --git a/pkg/configs/variabletypehint_string.go b/pkg/configs/variabletypehint_string.go new file mode 100644 index 00000000000..2b50428ce12 --- /dev/null +++ b/pkg/configs/variabletypehint_string.go @@ -0,0 +1,39 @@ +// Code generated by "stringer -type VariableTypeHint"; DO NOT EDIT. + +package configs + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[TypeHintNone-0] + _ = x[TypeHintString-83] + _ = x[TypeHintList-76] + _ = x[TypeHintMap-77] +} + +const ( + _VariableTypeHint_name_0 = "TypeHintNone" + _VariableTypeHint_name_1 = "TypeHintListTypeHintMap" + _VariableTypeHint_name_2 = "TypeHintString" +) + +var ( + _VariableTypeHint_index_1 = [...]uint8{0, 12, 23} +) + +func (i VariableTypeHint) String() string { + switch { + case i == 0: + return _VariableTypeHint_name_0 + case 76 <= i && i <= 77: + i -= 76 + return _VariableTypeHint_name_1[_VariableTypeHint_index_1[i]:_VariableTypeHint_index_1[i+1]] + case i == 83: + return _VariableTypeHint_name_2 + default: + return "VariableTypeHint(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/pkg/configs/version_constraint.go b/pkg/configs/version_constraint.go new file mode 100644 index 00000000000..8a2df5cac9e --- /dev/null +++ b/pkg/configs/version_constraint.go @@ -0,0 +1,81 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configs + +import ( + "fmt" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// VersionConstraint represents a version constraint on some resource +// (e.g. OpenTofu Core, a provider, a module, ...) that carries with it +// a source range so that a helpful diagnostic can be printed in the event +// that a particular constraint does not match. +type VersionConstraint struct { + Required version.Constraints + DeclRange hcl.Range +} + +func decodeVersionConstraint(attr *hcl.Attribute) (VersionConstraint, hcl.Diagnostics) { + val, diags := attr.Expr.Value(nil) + if diags.HasErrors() { + return VersionConstraint{}, diags + } + return decodeVersionConstraintValue(attr, val) +} +func decodeVersionConstraintValue(attr *hcl.Attribute, val cty.Value) (VersionConstraint, hcl.Diagnostics) { + var diags hcl.Diagnostics + + ret := VersionConstraint{ + DeclRange: attr.Range, + } + + var err error + val, err = convert.Convert(val, cty.String) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid version constraint", + Detail: fmt.Sprintf("A string value is required for %s.", attr.Name), + Subject: attr.Expr.Range().Ptr(), + }) + return ret, diags + } + + if val.IsNull() { + // A null version constraint is strange, but we'll just treat it + // like an empty constraint set. + return ret, diags + } + + if !val.IsWhollyKnown() { + // If there is a syntax error, HCL sets the value of the given attribute + // to cty.DynamicVal. A diagnostic for the syntax error will already + // bubble up, so we will move forward gracefully here. + return ret, diags + } + + constraintStr := val.AsString() + constraints, err := version.NewConstraint(constraintStr) + if err != nil { + // NewConstraint doesn't return user-friendly errors, so we'll just + // ignore the provided error and produce our own generic one. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid version constraint", + Detail: "This string does not use correct version constraint syntax.", // Not very actionable :( + Subject: attr.Expr.Range().Ptr(), + }) + return ret, diags + } + + ret.Required = constraints + return ret, diags +} diff --git a/pkg/copy/copy_dir.go b/pkg/copy/copy_dir.go new file mode 100644 index 00000000000..e5f162bd9e0 --- /dev/null +++ b/pkg/copy/copy_dir.go @@ -0,0 +1,151 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package copy + +import ( + "io" + "os" + "path/filepath" + "strings" +) + +// CopyDir recursively copies all of the files within the directory given in +// src to the directory given in dst. +// +// Both directories should already exist. If the destination directory is +// non-empty then the new files will merge in with the old, overwriting any +// files that have a relative path in common between source and destination. +// +// Recursive copying of directories is inevitably a rather opinionated sort of +// operation, so this function won't be appropriate for all use-cases. Some +// of the "opinions" it has are described in the following paragraphs: +// +// Symlinks in the source directory are recreated with the same target in the +// destination directory. If the symlink is to a directory itself, that +// directory is not recursively visited for further copying. +// +// File and directory modes are not preserved exactly, but the executable +// flag is preserved for files on operating systems where it is significant. +// +// Any "dot files" it encounters along the way are skipped, even on platforms +// that do not normally ascribe special meaning to files with names starting +// with dots. +// +// Callers may rely on the above details and other undocumented details of +// this function, so if you intend to change it be sure to review the callers +// first and make sure they are compatible with the change you intend to make. +func CopyDir(dst, src string) error { + src, err := filepath.EvalSymlinks(src) + if err != nil { + return err + } + + walkFn := func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + if path == src { + return nil + } + + if strings.HasPrefix(filepath.Base(path), ".") { + // Skip any dot files + if info.IsDir() { + return filepath.SkipDir + } else { + return nil + } + } + + // The "path" has the src prefixed to it. We need to join our + // destination with the path without the src on it. + dstPath := filepath.Join(dst, path[len(src):]) + + // we don't want to try and copy the same file over itself. + if eq, err := SameFile(path, dstPath); eq { + return nil + } else if err != nil { + return err + } + + // If we have a directory, make that subdirectory, then continue + // the walk. + if info.IsDir() { + if path == filepath.Join(src, dst) { + // dst is in src; don't walk it. + return nil + } + + if err := os.MkdirAll(dstPath, 0755); err != nil { + return err + } + + return nil + } + + // If the current path is a symlink, recreate the symlink relative to + // the dst directory + if info.Mode()&os.ModeSymlink == os.ModeSymlink { + target, err := os.Readlink(path) + if err != nil { + return err + } + + return os.Symlink(target, dstPath) + } + + // If we have a file, copy the contents. + srcF, err := os.Open(path) + if err != nil { + return err + } + defer srcF.Close() + + dstF, err := os.Create(dstPath) + if err != nil { + return err + } + defer dstF.Close() + + if _, err := io.Copy(dstF, srcF); err != nil { + return err + } + + // Chmod it + return os.Chmod(dstPath, info.Mode()) + } + + return filepath.Walk(src, walkFn) +} + +// SameFile returns true if the two given paths refer to the same physical +// file on disk, using the unique file identifiers from the underlying +// operating system. For example, on Unix systems this checks whether the +// two files are on the same device and have the same inode. +func SameFile(a, b string) (bool, error) { + if a == b { + return true, nil + } + + aInfo, err := os.Lstat(a) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + + bInfo, err := os.Lstat(b) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + + return os.SameFile(aInfo, bInfo), nil +} diff --git a/pkg/copy/copy_dir_test.go b/pkg/copy/copy_dir_test.go new file mode 100644 index 00000000000..5dc6454e4f9 --- /dev/null +++ b/pkg/copy/copy_dir_test.go @@ -0,0 +1,103 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package copy + +import ( + "os" + "path/filepath" + "testing" +) + +// TestCopyDir_symlinks sets up a directory with two submodules, +// one being a symlink to the other +// +// The resultant file structure is as follows: +// ├── modules +// │   ├── symlink-module -> test-module +// │   └── test-module +// │   └── main.tf +// └── target +// ├── symlink-module -> test-module +// └── test-module +// └── main.tf + +func TestCopyDir_symlinks(t *testing.T) { + tmpdir := t.TempDir() + + moduleDir := filepath.Join(tmpdir, "modules") + err := os.Mkdir(moduleDir, os.ModePerm) + if err != nil { + t.Fatal(err) + } + + subModuleDir := filepath.Join(moduleDir, "test-module") + err = os.Mkdir(subModuleDir, os.ModePerm) + if err != nil { + t.Fatal(err) + } + + err = os.WriteFile(filepath.Join(subModuleDir, "main.tf"), []byte("hello"), 0644) + if err != nil { + t.Fatal(err) + } + + err = os.Symlink("test-module", filepath.Join(moduleDir, "symlink-module")) + if err != nil { + t.Fatal(err) + } + + targetDir := filepath.Join(tmpdir, "target") + os.Mkdir(targetDir, os.ModePerm) + + err = CopyDir(targetDir, moduleDir) + if err != nil { + t.Fatal(err) + } + + if _, err = os.Lstat(filepath.Join(targetDir, "test-module", "main.tf")); os.IsNotExist(err) { + t.Fatal("target test-module/main.tf was not created") + } + + if _, err = os.Lstat(filepath.Join(targetDir, "symlink-module", "main.tf")); os.IsNotExist(err) { + t.Fatal("target symlink-module/main.tf was not created") + } +} + +func TestCopyDir_symlink_file(t *testing.T) { + tmpdir := t.TempDir() + + moduleDir := filepath.Join(tmpdir, "modules") + err := os.Mkdir(moduleDir, os.ModePerm) + if err != nil { + t.Fatal(err) + } + + err = os.WriteFile(filepath.Join(moduleDir, "main.tf"), []byte("hello"), 0644) + if err != nil { + t.Fatal(err) + } + + err = os.Symlink("main.tf", filepath.Join(moduleDir, "symlink.tf")) + if err != nil { + t.Fatal(err) + } + + targetDir := filepath.Join(tmpdir, "target") + os.Mkdir(targetDir, os.ModePerm) + + err = CopyDir(targetDir, moduleDir) + if err != nil { + t.Fatal(err) + } + + if _, err = os.Lstat(filepath.Join(targetDir, "main.tf")); os.IsNotExist(err) { + t.Fatal("target/main.tf was not created") + } + + if _, err = os.Lstat(filepath.Join(targetDir, "symlink.tf")); os.IsNotExist(err) { + t.Fatal("target/symlink.tf was not created") + } +} diff --git a/pkg/copy/copy_file.go b/pkg/copy/copy_file.go new file mode 100644 index 00000000000..5998f7e7773 --- /dev/null +++ b/pkg/copy/copy_file.go @@ -0,0 +1,57 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package copy + +import ( + "io" + "os" +) + +// From: https://gist.github.com/m4ng0squ4sh/92462b38df26839a3ca324697c8cba04 + +// CopyFile copies the contents of the file named src to the file named +// by dst. The file will be created if it does not already exist. If the +// destination file exists, all it's contents will be replaced by the contents +// of the source file. The file mode will be copied from the source and +// the copied data is synced/flushed to stable storage. +func CopyFile(src, dst string) (err error) { + in, err := os.Open(src) + if err != nil { + return + } + defer in.Close() + + out, err := os.Create(dst) + if err != nil { + return + } + defer func() { + if e := out.Close(); e != nil { + err = e + } + }() + + _, err = io.Copy(out, in) + if err != nil { + return + } + + err = out.Sync() + if err != nil { + return + } + + si, err := os.Stat(src) + if err != nil { + return + } + err = os.Chmod(dst, si.Mode()) + if err != nil { + return + } + + return +} diff --git a/pkg/dag/dag.go b/pkg/dag/dag.go new file mode 100644 index 00000000000..e19369dca5c --- /dev/null +++ b/pkg/dag/dag.go @@ -0,0 +1,374 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dag + +import ( + "fmt" + "sort" + "strings" + + "github.com/kubegems/opentofu/pkg/tfdiags" + + "github.com/hashicorp/go-multierror" +) + +// AcyclicGraph is a specialization of Graph that cannot have cycles. +type AcyclicGraph struct { + Graph +} + +// WalkFunc is the callback used for walking the graph. +type WalkFunc func(Vertex) tfdiags.Diagnostics + +// DepthWalkFunc is a walk function that also receives the current depth of the +// walk as an argument +type DepthWalkFunc func(Vertex, int) error + +func (g *AcyclicGraph) DirectedGraph() Grapher { + return g +} + +// Returns a Set that includes every Vertex yielded by walking down from the +// provided starting Vertex v. +func (g *AcyclicGraph) Ancestors(v Vertex) (Set, error) { + s := make(Set) + memoFunc := func(v Vertex, d int) error { + s.Add(v) + return nil + } + + if err := g.DepthFirstWalk(g.downEdgesNoCopy(v), memoFunc); err != nil { + return nil, err + } + + return s, nil +} + +// Returns a Set that includes every Vertex yielded by walking up from the +// provided starting Vertex v. +func (g *AcyclicGraph) Descendents(v Vertex) (Set, error) { + s := make(Set) + memoFunc := func(v Vertex, d int) error { + s.Add(v) + return nil + } + + if err := g.ReverseDepthFirstWalk(g.upEdgesNoCopy(v), memoFunc); err != nil { + return nil, err + } + + return s, nil +} + +// Root returns the root of the DAG, or an error. +// +// Complexity: O(V) +func (g *AcyclicGraph) Root() (Vertex, error) { + roots := make([]Vertex, 0, 1) + for _, v := range g.Vertices() { + if g.upEdgesNoCopy(v).Len() == 0 { + roots = append(roots, v) + } + } + + if len(roots) > 1 { + // TODO(mitchellh): make this error message a lot better + return nil, fmt.Errorf("multiple roots: %#v", roots) + } + + if len(roots) == 0 { + return nil, fmt.Errorf("no roots found") + } + + return roots[0], nil +} + +// TransitiveReduction performs the transitive reduction of graph g in place. +// The transitive reduction of a graph is a graph with as few edges as +// possible with the same reachability as the original graph. This means +// that if there are three nodes A => B => C, and A connects to both +// B and C, and B connects to C, then the transitive reduction is the +// same graph with only a single edge between A and B, and a single edge +// between B and C. +// +// The graph must be free of cycles for this operation to behave properly. +// +// Complexity: O(V(V+E)), or asymptotically O(VE) +func (g *AcyclicGraph) TransitiveReduction() { + // For each vertex u in graph g, do a DFS starting from each vertex + // v such that the edge (u,v) exists (v is a direct descendant of u). + // + // For each v-prime reachable from v, remove the edge (u, v-prime). + for _, u := range g.Vertices() { + uTargets := g.downEdgesNoCopy(u) + + g.DepthFirstWalk(g.downEdgesNoCopy(u), func(v Vertex, d int) error { + shared := uTargets.Intersection(g.downEdgesNoCopy(v)) + for _, vPrime := range shared { + g.RemoveEdge(BasicEdge(u, vPrime)) + } + + return nil + }) + } +} + +// Validate validates the DAG. A DAG is valid if it has a single root +// with no cycles. +func (g *AcyclicGraph) Validate() error { + if _, err := g.Root(); err != nil { + return err + } + + // Look for cycles of more than 1 component + var err error + cycles := g.Cycles() + if len(cycles) > 0 { + for _, cycle := range cycles { + cycleStr := make([]string, len(cycle)) + for j, vertex := range cycle { + cycleStr[j] = VertexName(vertex) + } + + err = multierror.Append(err, fmt.Errorf( + "Cycle: %s", strings.Join(cycleStr, ", "))) + } + } + + // Look for cycles to self + for _, e := range g.Edges() { + if e.Source() == e.Target() { + err = multierror.Append(err, fmt.Errorf( + "Self reference: %s", VertexName(e.Source()))) + } + } + + return err +} + +// Cycles reports any cycles between graph nodes. +// Self-referencing nodes are not reported, and must be detected separately. +func (g *AcyclicGraph) Cycles() [][]Vertex { + var cycles [][]Vertex + for _, cycle := range StronglyConnected(&g.Graph) { + if len(cycle) > 1 { + cycles = append(cycles, cycle) + } + } + return cycles +} + +// Walk walks the graph, calling your callback as each node is visited. +// This will walk nodes in parallel if it can. The resulting diagnostics +// contains problems from all graphs visited, in no particular order. +func (g *AcyclicGraph) Walk(cb WalkFunc) tfdiags.Diagnostics { + w := &Walker{Callback: cb, Reverse: true} + w.Update(g) + return w.Wait() +} + +// simple convenience helper for converting a dag.Set to a []Vertex +func AsVertexList(s Set) []Vertex { + vertexList := make([]Vertex, 0, len(s)) + for _, raw := range s { + vertexList = append(vertexList, raw.(Vertex)) + } + return vertexList +} + +type vertexAtDepth struct { + Vertex Vertex + Depth int +} + +// TopologicalOrder returns a topological sort of the given graph, with source +// vertices ordered before the targets of their edges. The nodes are not sorted, +// and any valid order may be returned. This function will panic if it +// encounters a cycle. +func (g *AcyclicGraph) TopologicalOrder() []Vertex { + return g.topoOrder(upOrder) +} + +// ReverseTopologicalOrder returns a topological sort of the given graph, with +// target vertices ordered before the sources of their edges. The nodes are not +// sorted, and any valid order may be returned. This function will panic if it +// encounters a cycle. +func (g *AcyclicGraph) ReverseTopologicalOrder() []Vertex { + return g.topoOrder(downOrder) +} + +func (g *AcyclicGraph) topoOrder(order walkType) []Vertex { + // Use a dfs-based sorting algorithm, similar to that used in + // TransitiveReduction. + sorted := make([]Vertex, 0, len(g.vertices)) + + // tmp track the current working node to check for cycles + tmp := map[Vertex]bool{} + + // perm tracks completed nodes to end the recursion + perm := map[Vertex]bool{} + + var visit func(v Vertex) + + visit = func(v Vertex) { + if perm[v] { + return + } + + if tmp[v] { + panic("cycle found in dag") + } + + tmp[v] = true + var next Set + switch { + case order&downOrder != 0: + next = g.downEdgesNoCopy(v) + case order&upOrder != 0: + next = g.upEdgesNoCopy(v) + default: + panic(fmt.Sprintln("invalid order", order)) + } + + for _, u := range next { + visit(u) + } + + tmp[v] = false + perm[v] = true + sorted = append(sorted, v) + } + + for _, v := range g.Vertices() { + visit(v) + } + + return sorted +} + +type walkType uint64 + +const ( + depthFirst walkType = 1 << iota + breadthFirst + downOrder + upOrder +) + +// DepthFirstWalk does a depth-first walk of the graph starting from +// the vertices in start. +func (g *AcyclicGraph) DepthFirstWalk(start Set, f DepthWalkFunc) error { + return g.walk(depthFirst|downOrder, false, start, f) +} + +// ReverseDepthFirstWalk does a depth-first walk _up_ the graph starting from +// the vertices in start. +func (g *AcyclicGraph) ReverseDepthFirstWalk(start Set, f DepthWalkFunc) error { + return g.walk(depthFirst|upOrder, false, start, f) +} + +// BreadthFirstWalk does a breadth-first walk of the graph starting from +// the vertices in start. +func (g *AcyclicGraph) BreadthFirstWalk(start Set, f DepthWalkFunc) error { + return g.walk(breadthFirst|downOrder, false, start, f) +} + +// ReverseBreadthFirstWalk does a breadth-first walk _up_ the graph starting from +// the vertices in start. +func (g *AcyclicGraph) ReverseBreadthFirstWalk(start Set, f DepthWalkFunc) error { + return g.walk(breadthFirst|upOrder, false, start, f) +} + +// Setting test to true will walk sets of vertices in sorted order for +// deterministic testing. +func (g *AcyclicGraph) walk(order walkType, test bool, start Set, f DepthWalkFunc) error { + seen := make(map[Vertex]struct{}) + frontier := make([]vertexAtDepth, 0, len(start)) + for _, v := range start { + frontier = append(frontier, vertexAtDepth{ + Vertex: v, + Depth: 0, + }) + } + + if test { + testSortFrontier(frontier) + } + + for len(frontier) > 0 { + // Pop the current vertex + var current vertexAtDepth + + switch { + case order&depthFirst != 0: + // depth first, the frontier is used like a stack + n := len(frontier) + current = frontier[n-1] + frontier = frontier[:n-1] + case order&breadthFirst != 0: + // breadth first, the frontier is used like a queue + current = frontier[0] + frontier = frontier[1:] + default: + panic(fmt.Sprint("invalid visit order", order)) + } + + // Check if we've seen this already and return... + if _, ok := seen[current.Vertex]; ok { + continue + } + seen[current.Vertex] = struct{}{} + + // Visit the current node + if err := f(current.Vertex, current.Depth); err != nil { + return err + } + + var edges Set + switch { + case order&downOrder != 0: + edges = g.downEdgesNoCopy(current.Vertex) + case order&upOrder != 0: + edges = g.upEdgesNoCopy(current.Vertex) + default: + panic(fmt.Sprint("invalid walk order", order)) + } + + if test { + frontier = testAppendNextSorted(frontier, edges, current.Depth+1) + } else { + frontier = appendNext(frontier, edges, current.Depth+1) + } + } + return nil +} + +func appendNext(frontier []vertexAtDepth, next Set, depth int) []vertexAtDepth { + for _, v := range next { + frontier = append(frontier, vertexAtDepth{ + Vertex: v, + Depth: depth, + }) + } + return frontier +} + +func testAppendNextSorted(frontier []vertexAtDepth, edges Set, depth int) []vertexAtDepth { + var newEdges []vertexAtDepth + for _, v := range edges { + newEdges = append(newEdges, vertexAtDepth{ + Vertex: v, + Depth: depth, + }) + } + testSortFrontier(newEdges) + return append(frontier, newEdges...) +} +func testSortFrontier(f []vertexAtDepth) { + sort.Slice(f, func(i, j int) bool { + return VertexName(f[i].Vertex) < VertexName(f[j].Vertex) + }) +} diff --git a/pkg/dag/dag_test.go b/pkg/dag/dag_test.go new file mode 100644 index 00000000000..91dc1f48ff5 --- /dev/null +++ b/pkg/dag/dag_test.go @@ -0,0 +1,587 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dag + +import ( + "flag" + "fmt" + "os" + "reflect" + "strconv" + "strings" + "sync" + "testing" + + "github.com/kubegems/opentofu/pkg/tfdiags" + + _ "github.com/kubegems/opentofu/pkg/logging" +) + +func TestMain(m *testing.M) { + flag.Parse() + os.Exit(m.Run()) +} + +func TestAcyclicGraphRoot(t *testing.T) { + var g AcyclicGraph + g.Add(1) + g.Add(2) + g.Add(3) + g.Connect(BasicEdge(3, 2)) + g.Connect(BasicEdge(3, 1)) + + if root, err := g.Root(); err != nil { + t.Fatalf("err: %s", err) + } else if root != 3 { + t.Fatalf("bad: %#v", root) + } +} + +func TestAcyclicGraphRoot_cycle(t *testing.T) { + var g AcyclicGraph + g.Add(1) + g.Add(2) + g.Add(3) + g.Connect(BasicEdge(1, 2)) + g.Connect(BasicEdge(2, 3)) + g.Connect(BasicEdge(3, 1)) + + if _, err := g.Root(); err == nil { + t.Fatal("should error") + } +} + +func TestAcyclicGraphRoot_multiple(t *testing.T) { + var g AcyclicGraph + g.Add(1) + g.Add(2) + g.Add(3) + g.Connect(BasicEdge(3, 2)) + + if _, err := g.Root(); err == nil { + t.Fatal("should error") + } +} + +func TestAyclicGraphTransReduction(t *testing.T) { + var g AcyclicGraph + g.Add(1) + g.Add(2) + g.Add(3) + g.Connect(BasicEdge(1, 2)) + g.Connect(BasicEdge(1, 3)) + g.Connect(BasicEdge(2, 3)) + g.TransitiveReduction() + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(testGraphTransReductionStr) + if actual != expected { + t.Fatalf("bad: %s", actual) + } +} + +func TestAyclicGraphTransReduction_more(t *testing.T) { + var g AcyclicGraph + g.Add(1) + g.Add(2) + g.Add(3) + g.Add(4) + g.Connect(BasicEdge(1, 2)) + g.Connect(BasicEdge(1, 3)) + g.Connect(BasicEdge(1, 4)) + g.Connect(BasicEdge(2, 3)) + g.Connect(BasicEdge(2, 4)) + g.Connect(BasicEdge(3, 4)) + g.TransitiveReduction() + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(testGraphTransReductionMoreStr) + if actual != expected { + t.Fatalf("bad: %s", actual) + } +} + +func TestAyclicGraphTransReduction_multipleRoots(t *testing.T) { + var g AcyclicGraph + g.Add(1) + g.Add(2) + g.Add(3) + g.Add(4) + g.Connect(BasicEdge(1, 2)) + g.Connect(BasicEdge(1, 3)) + g.Connect(BasicEdge(1, 4)) + g.Connect(BasicEdge(2, 3)) + g.Connect(BasicEdge(2, 4)) + g.Connect(BasicEdge(3, 4)) + + g.Add(5) + g.Add(6) + g.Add(7) + g.Add(8) + g.Connect(BasicEdge(5, 6)) + g.Connect(BasicEdge(5, 7)) + g.Connect(BasicEdge(5, 8)) + g.Connect(BasicEdge(6, 7)) + g.Connect(BasicEdge(6, 8)) + g.Connect(BasicEdge(7, 8)) + g.TransitiveReduction() + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(testGraphTransReductionMultipleRootsStr) + if actual != expected { + t.Fatalf("bad: %s", actual) + } +} + +// use this to simulate slow sort operations +type counter struct { + Name string + Calls int64 +} + +func (s *counter) String() string { + s.Calls++ + return s.Name +} + +// Make sure we can reduce a sizable, fully-connected graph. +func TestAyclicGraphTransReduction_fullyConnected(t *testing.T) { + var g AcyclicGraph + + const nodeCount = 200 + nodes := make([]*counter, nodeCount) + for i := 0; i < nodeCount; i++ { + nodes[i] = &counter{Name: strconv.Itoa(i)} + } + + // Add them all to the graph + for _, n := range nodes { + g.Add(n) + } + + // connect them all + for i := range nodes { + for j := range nodes { + if i == j { + continue + } + g.Connect(BasicEdge(nodes[i], nodes[j])) + } + } + + g.TransitiveReduction() + + vertexNameCalls := int64(0) + for _, n := range nodes { + vertexNameCalls += n.Calls + } + + switch { + case vertexNameCalls > 2*nodeCount: + // Make calling it more the 2x per node fatal. + // If we were sorting this would give us roughly ln(n)(n^3) calls, or + // >59000000 calls for 200 vertices. + t.Fatalf("VertexName called %d times", vertexNameCalls) + case vertexNameCalls > 0: + // we don't expect any calls, but a change here isn't necessarily fatal + t.Logf("WARNING: VertexName called %d times", vertexNameCalls) + } +} + +func TestAcyclicGraphValidate(t *testing.T) { + var g AcyclicGraph + g.Add(1) + g.Add(2) + g.Add(3) + g.Connect(BasicEdge(3, 2)) + g.Connect(BasicEdge(3, 1)) + + if err := g.Validate(); err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestAcyclicGraphValidate_cycle(t *testing.T) { + var g AcyclicGraph + g.Add(1) + g.Add(2) + g.Add(3) + g.Connect(BasicEdge(3, 2)) + g.Connect(BasicEdge(3, 1)) + g.Connect(BasicEdge(1, 2)) + g.Connect(BasicEdge(2, 1)) + + if err := g.Validate(); err == nil { + t.Fatal("should error") + } +} + +func TestAcyclicGraphValidate_cycleSelf(t *testing.T) { + var g AcyclicGraph + g.Add(1) + g.Add(2) + g.Connect(BasicEdge(1, 1)) + + if err := g.Validate(); err == nil { + t.Fatal("should error") + } +} + +func TestAcyclicGraphAncestors(t *testing.T) { + var g AcyclicGraph + g.Add(1) + g.Add(2) + g.Add(3) + g.Add(4) + g.Add(5) + g.Connect(BasicEdge(0, 1)) + g.Connect(BasicEdge(1, 2)) + g.Connect(BasicEdge(2, 3)) + g.Connect(BasicEdge(3, 4)) + g.Connect(BasicEdge(4, 5)) + + actual, err := g.Ancestors(2) + if err != nil { + t.Fatalf("err: %#v", err) + } + + expected := []Vertex{3, 4, 5} + + if actual.Len() != len(expected) { + t.Fatalf("bad length! expected %#v to have len %d", actual, len(expected)) + } + + for _, e := range expected { + if !actual.Include(e) { + t.Fatalf("expected: %#v to include: %#v", expected, actual) + } + } +} + +func TestAcyclicGraphDescendents(t *testing.T) { + var g AcyclicGraph + g.Add(1) + g.Add(2) + g.Add(3) + g.Add(4) + g.Add(5) + g.Connect(BasicEdge(0, 1)) + g.Connect(BasicEdge(1, 2)) + g.Connect(BasicEdge(2, 3)) + g.Connect(BasicEdge(3, 4)) + g.Connect(BasicEdge(4, 5)) + + actual, err := g.Descendents(2) + if err != nil { + t.Fatalf("err: %#v", err) + } + + expected := []Vertex{0, 1} + + if actual.Len() != len(expected) { + t.Fatalf("bad length! expected %#v to have len %d", actual, len(expected)) + } + + for _, e := range expected { + if !actual.Include(e) { + t.Fatalf("expected: %#v to include: %#v", expected, actual) + } + } +} + +func TestAcyclicGraphWalk(t *testing.T) { + var g AcyclicGraph + g.Add(1) + g.Add(2) + g.Add(3) + g.Connect(BasicEdge(3, 2)) + g.Connect(BasicEdge(3, 1)) + + var visits []Vertex + var lock sync.Mutex + err := g.Walk(func(v Vertex) tfdiags.Diagnostics { + lock.Lock() + defer lock.Unlock() + visits = append(visits, v) + return nil + }) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := [][]Vertex{ + {1, 2, 3}, + {2, 1, 3}, + } + for _, e := range expected { + if reflect.DeepEqual(visits, e) { + return + } + } + + t.Fatalf("bad: %#v", visits) +} + +func TestAcyclicGraphWalk_error(t *testing.T) { + var g AcyclicGraph + g.Add(1) + g.Add(2) + g.Add(3) + g.Add(4) + g.Connect(BasicEdge(4, 3)) + g.Connect(BasicEdge(3, 2)) + g.Connect(BasicEdge(2, 1)) + + var visits []Vertex + var lock sync.Mutex + err := g.Walk(func(v Vertex) tfdiags.Diagnostics { + lock.Lock() + defer lock.Unlock() + + var diags tfdiags.Diagnostics + + if v == 2 { + diags = diags.Append(fmt.Errorf("error")) + return diags + } + + visits = append(visits, v) + return diags + }) + if err == nil { + t.Fatal("should error") + } + + expected := []Vertex{1} + if !reflect.DeepEqual(visits, expected) { + t.Errorf("wrong visits\ngot: %#v\nwant: %#v", visits, expected) + } + +} + +func BenchmarkDAG(b *testing.B) { + for i := 0; i < b.N; i++ { + count := 150 + b.StopTimer() + g := &AcyclicGraph{} + + // create 4 layers of fully connected nodes + // layer A + for i := 0; i < count; i++ { + g.Add(fmt.Sprintf("A%d", i)) + } + + // layer B + for i := 0; i < count; i++ { + B := fmt.Sprintf("B%d", i) + g.Add(B) + for j := 0; j < count; j++ { + g.Connect(BasicEdge(B, fmt.Sprintf("A%d", j))) + } + } + + // layer C + for i := 0; i < count; i++ { + c := fmt.Sprintf("C%d", i) + g.Add(c) + for j := 0; j < count; j++ { + // connect them to previous layers so we have something that requires reduction + g.Connect(BasicEdge(c, fmt.Sprintf("A%d", j))) + g.Connect(BasicEdge(c, fmt.Sprintf("B%d", j))) + } + } + + // layer D + for i := 0; i < count; i++ { + d := fmt.Sprintf("D%d", i) + g.Add(d) + for j := 0; j < count; j++ { + g.Connect(BasicEdge(d, fmt.Sprintf("A%d", j))) + g.Connect(BasicEdge(d, fmt.Sprintf("B%d", j))) + g.Connect(BasicEdge(d, fmt.Sprintf("C%d", j))) + } + } + + b.StartTimer() + // Find dependencies for every node + for _, v := range g.Vertices() { + _, err := g.Ancestors(v) + if err != nil { + b.Fatal(err) + } + } + + // reduce the final graph + g.TransitiveReduction() + } +} + +func TestAcyclicGraphWalkOrder(t *testing.T) { + /* Sample dependency graph, + all edges pointing downwards. + 1 2 + / \ / \ + 3 4 5 + / \ / + 6 7 + / | \ + 8 9 10 + \ | / + 11 + */ + + var g AcyclicGraph + for i := 1; i <= 11; i++ { + g.Add(i) + } + g.Connect(BasicEdge(1, 3)) + g.Connect(BasicEdge(1, 4)) + g.Connect(BasicEdge(2, 4)) + g.Connect(BasicEdge(2, 5)) + g.Connect(BasicEdge(3, 6)) + g.Connect(BasicEdge(4, 7)) + g.Connect(BasicEdge(5, 7)) + g.Connect(BasicEdge(7, 8)) + g.Connect(BasicEdge(7, 9)) + g.Connect(BasicEdge(7, 10)) + g.Connect(BasicEdge(8, 11)) + g.Connect(BasicEdge(9, 11)) + g.Connect(BasicEdge(10, 11)) + + start := make(Set) + start.Add(2) + start.Add(1) + reverse := make(Set) + reverse.Add(11) + reverse.Add(6) + + t.Run("DepthFirst", func(t *testing.T) { + var visits []vertexAtDepth + g.walk(depthFirst|downOrder, true, start, func(v Vertex, d int) error { + visits = append(visits, vertexAtDepth{v, d}) + return nil + + }) + expect := []vertexAtDepth{ + {2, 0}, {5, 1}, {7, 2}, {9, 3}, {11, 4}, {8, 3}, {10, 3}, {4, 1}, {1, 0}, {3, 1}, {6, 2}, + } + if !reflect.DeepEqual(visits, expect) { + t.Errorf("expected visits:\n%v\ngot:\n%v\n", expect, visits) + } + }) + t.Run("ReverseDepthFirst", func(t *testing.T) { + var visits []vertexAtDepth + g.walk(depthFirst|upOrder, true, reverse, func(v Vertex, d int) error { + visits = append(visits, vertexAtDepth{v, d}) + return nil + + }) + expect := []vertexAtDepth{ + {6, 0}, {3, 1}, {1, 2}, {11, 0}, {9, 1}, {7, 2}, {5, 3}, {2, 4}, {4, 3}, {8, 1}, {10, 1}, + } + if !reflect.DeepEqual(visits, expect) { + t.Errorf("expected visits:\n%v\ngot:\n%v\n", expect, visits) + } + }) + t.Run("BreadthFirst", func(t *testing.T) { + var visits []vertexAtDepth + g.walk(breadthFirst|downOrder, true, start, func(v Vertex, d int) error { + visits = append(visits, vertexAtDepth{v, d}) + return nil + + }) + expect := []vertexAtDepth{ + {1, 0}, {2, 0}, {3, 1}, {4, 1}, {5, 1}, {6, 2}, {7, 2}, {10, 3}, {8, 3}, {9, 3}, {11, 4}, + } + if !reflect.DeepEqual(visits, expect) { + t.Errorf("expected visits:\n%v\ngot:\n%v\n", expect, visits) + } + }) + t.Run("ReverseBreadthFirst", func(t *testing.T) { + var visits []vertexAtDepth + g.walk(breadthFirst|upOrder, true, reverse, func(v Vertex, d int) error { + visits = append(visits, vertexAtDepth{v, d}) + return nil + + }) + expect := []vertexAtDepth{ + {11, 0}, {6, 0}, {10, 1}, {8, 1}, {9, 1}, {3, 1}, {7, 2}, {1, 2}, {4, 3}, {5, 3}, {2, 4}, + } + if !reflect.DeepEqual(visits, expect) { + t.Errorf("expected visits:\n%v\ngot:\n%v\n", expect, visits) + } + }) + + t.Run("TopologicalOrder", func(t *testing.T) { + order := g.topoOrder(downOrder) + + // Validate the order by checking it against the initial graph. We only + // need to verify that each node has it's direct dependencies + // satisfied. + completed := map[Vertex]bool{} + for _, v := range order { + deps := g.DownEdges(v) + for _, dep := range deps { + if !completed[dep] { + t.Fatalf("walking node %v, but dependency %v was not yet seen", v, dep) + } + } + completed[v] = true + } + }) + t.Run("ReverseTopologicalOrder", func(t *testing.T) { + order := g.topoOrder(upOrder) + + // Validate the order by checking it against the initial graph. We only + // need to verify that each node has it's direct dependencies + // satisfied. + completed := map[Vertex]bool{} + for _, v := range order { + deps := g.UpEdges(v) + for _, dep := range deps { + if !completed[dep] { + t.Fatalf("walking node %v, but dependency %v was not yet seen", v, dep) + } + } + completed[v] = true + } + }) +} + +const testGraphTransReductionStr = ` +1 + 2 +2 + 3 +3 +` + +const testGraphTransReductionMoreStr = ` +1 + 2 +2 + 3 +3 + 4 +4 +` + +const testGraphTransReductionMultipleRootsStr = ` +1 + 2 +2 + 3 +3 + 4 +4 +5 + 6 +6 + 7 +7 + 8 +8 +` diff --git a/pkg/dag/dot.go b/pkg/dag/dot.go new file mode 100644 index 00000000000..0ea0f548fe7 --- /dev/null +++ b/pkg/dag/dot.go @@ -0,0 +1,287 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dag + +import ( + "bytes" + "fmt" + "sort" + "strings" +) + +// DotOpts are the options for generating a dot formatted Graph. +type DotOpts struct { + // Allows some nodes to decide to only show themselves when the user has + // requested the "verbose" graph. + Verbose bool + + // Highlight Cycles + DrawCycles bool + + // How many levels to expand modules as we draw + MaxDepth int + + // use this to keep the cluster_ naming convention from the previous dot writer + cluster bool +} + +// GraphNodeDotter can be implemented by a node to cause it to be included +// in the dot graph. The Dot method will be called which is expected to +// return a representation of this node. +type GraphNodeDotter interface { + // Dot is called to return the dot formatting for the node. + // The first parameter is the title of the node. + // The second parameter includes user-specified options that affect the dot + // graph. See GraphDotOpts below for details. + DotNode(string, *DotOpts) *DotNode +} + +// DotNode provides a structure for Vertices to return in order to specify their +// dot format. +type DotNode struct { + Name string + Attrs map[string]string +} + +// Returns the DOT representation of this Graph. +func (g *marshalGraph) Dot(opts *DotOpts) []byte { + if opts == nil { + opts = &DotOpts{ + DrawCycles: true, + MaxDepth: -1, + Verbose: true, + } + } + + var w indentWriter + w.WriteString("digraph {\n") + w.Indent() + + // some dot defaults + w.WriteString(`compound = "true"` + "\n") + w.WriteString(`newrank = "true"` + "\n") + + // the top level graph is written as the first subgraph + w.WriteString(`subgraph "root" {` + "\n") + g.writeBody(opts, &w) + + // cluster isn't really used other than for naming purposes in some graphs + opts.cluster = opts.MaxDepth != 0 + maxDepth := opts.MaxDepth + if maxDepth == 0 { + maxDepth = -1 + } + + for _, s := range g.Subgraphs { + g.writeSubgraph(s, opts, maxDepth, &w) + } + + w.Unindent() + w.WriteString("}\n") + return w.Bytes() +} + +func (v *marshalVertex) dot(g *marshalGraph, opts *DotOpts) []byte { + var buf bytes.Buffer + graphName := g.Name + if graphName == "" { + graphName = "root" + } + + name := v.Name + attrs := v.Attrs + if v.graphNodeDotter != nil { + node := v.graphNodeDotter.DotNode(name, opts) + if node == nil { + return []byte{} + } + + newAttrs := make(map[string]string) + for k, v := range attrs { + newAttrs[k] = v + } + for k, v := range node.Attrs { + newAttrs[k] = v + } + + name = node.Name + attrs = newAttrs + } + + buf.WriteString(fmt.Sprintf(`"[%s] %s"`, graphName, name)) + writeAttrs(&buf, attrs) + buf.WriteByte('\n') + + return buf.Bytes() +} + +func (e *marshalEdge) dot(g *marshalGraph) string { + var buf bytes.Buffer + graphName := g.Name + if graphName == "" { + graphName = "root" + } + + sourceName := g.vertexByID(e.Source).Name + targetName := g.vertexByID(e.Target).Name + s := fmt.Sprintf(`"[%s] %s" -> "[%s] %s"`, graphName, sourceName, graphName, targetName) + buf.WriteString(s) + writeAttrs(&buf, e.Attrs) + + return buf.String() +} + +func cycleDot(e *marshalEdge, g *marshalGraph) string { + return e.dot(g) + ` [color = "red", penwidth = "2.0"]` +} + +// Write the subgraph body. The is recursive, and the depth argument is used to +// record the current depth of iteration. +func (g *marshalGraph) writeSubgraph(sg *marshalGraph, opts *DotOpts, depth int, w *indentWriter) { + if depth == 0 { + return + } + depth-- + + name := sg.Name + if opts.cluster { + // we prefix with cluster_ to match the old dot output + name = "cluster_" + name + sg.Attrs["label"] = sg.Name + } + w.WriteString(fmt.Sprintf("subgraph %q {\n", name)) + sg.writeBody(opts, w) + + for _, sg := range sg.Subgraphs { + g.writeSubgraph(sg, opts, depth, w) + } +} + +func (g *marshalGraph) writeBody(opts *DotOpts, w *indentWriter) { + w.Indent() + + for _, as := range attrStrings(g.Attrs) { + w.WriteString(as + "\n") + } + + // list of Vertices that aren't to be included in the dot output + skip := map[string]bool{} + + for _, v := range g.Vertices { + if v.graphNodeDotter == nil { + skip[v.ID] = true + continue + } + + w.Write(v.dot(g, opts)) + } + + var dotEdges []string + + if opts.DrawCycles { + for _, c := range g.Cycles { + if len(c) < 2 { + continue + } + + for i, j := 0, 1; i < len(c); i, j = i+1, j+1 { + if j >= len(c) { + j = 0 + } + src := c[i] + tgt := c[j] + + if skip[src.ID] || skip[tgt.ID] { + continue + } + + e := &marshalEdge{ + Name: fmt.Sprintf("%s|%s", src.Name, tgt.Name), + Source: src.ID, + Target: tgt.ID, + Attrs: make(map[string]string), + } + + dotEdges = append(dotEdges, cycleDot(e, g)) + src = tgt + } + } + } + + for _, e := range g.Edges { + dotEdges = append(dotEdges, e.dot(g)) + } + + // srot these again to match the old output + sort.Strings(dotEdges) + + for _, e := range dotEdges { + w.WriteString(e + "\n") + } + + w.Unindent() + w.WriteString("}\n") +} + +func writeAttrs(buf *bytes.Buffer, attrs map[string]string) { + if len(attrs) > 0 { + buf.WriteString(" [") + buf.WriteString(strings.Join(attrStrings(attrs), ", ")) + buf.WriteString("]") + } +} + +func attrStrings(attrs map[string]string) []string { + strings := make([]string, 0, len(attrs)) + for k, v := range attrs { + strings = append(strings, fmt.Sprintf("%s = %q", k, v)) + } + sort.Strings(strings) + return strings +} + +// Provide a bytes.Buffer like structure, which will indent when starting a +// newline. +type indentWriter struct { + bytes.Buffer + level int +} + +func (w *indentWriter) indent() { + newline := []byte("\n") + if !bytes.HasSuffix(w.Bytes(), newline) { + return + } + for i := 0; i < w.level; i++ { + w.Buffer.WriteString("\t") + } +} + +// Indent increases indentation by 1 +func (w *indentWriter) Indent() { w.level++ } + +// Unindent decreases indentation by 1 +func (w *indentWriter) Unindent() { w.level-- } + +// the following methods intercecpt the byte.Buffer writes and insert the +// indentation when starting a new line. +func (w *indentWriter) Write(b []byte) (int, error) { + w.indent() + return w.Buffer.Write(b) +} + +func (w *indentWriter) WriteString(s string) (int, error) { + w.indent() + return w.Buffer.WriteString(s) +} +func (w *indentWriter) WriteByte(b byte) error { + w.indent() + return w.Buffer.WriteByte(b) +} +func (w *indentWriter) WriteRune(r rune) (int, error) { + w.indent() + return w.Buffer.WriteRune(r) +} diff --git a/pkg/dag/dot_test.go b/pkg/dag/dot_test.go new file mode 100644 index 00000000000..4edd754757d --- /dev/null +++ b/pkg/dag/dot_test.go @@ -0,0 +1,44 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dag + +import ( + "reflect" + "testing" +) + +func TestGraphDot_opts(t *testing.T) { + var v testDotVertex + var g Graph + g.Add(&v) + + opts := &DotOpts{MaxDepth: 42} + actual := g.Dot(opts) + if len(actual) == 0 { + t.Fatal("should not be empty") + } + + if !v.DotNodeCalled { + t.Fatal("should call DotNode") + } + if !reflect.DeepEqual(v.DotNodeOpts, opts) { + t.Fatalf("bad; %#v", v.DotNodeOpts) + } +} + +type testDotVertex struct { + DotNodeCalled bool + DotNodeTitle string + DotNodeOpts *DotOpts + DotNodeReturn *DotNode +} + +func (v *testDotVertex) DotNode(title string, opts *DotOpts) *DotNode { + v.DotNodeCalled = true + v.DotNodeTitle = title + v.DotNodeOpts = opts + return v.DotNodeReturn +} diff --git a/pkg/dag/edge.go b/pkg/dag/edge.go new file mode 100644 index 00000000000..d4120797358 --- /dev/null +++ b/pkg/dag/edge.go @@ -0,0 +1,38 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dag + +// Edge represents an edge in the graph, with a source and target vertex. +type Edge interface { + Source() Vertex + Target() Vertex + + Hashable +} + +// BasicEdge returns an Edge implementation that simply tracks the source +// and target given as-is. +func BasicEdge(source, target Vertex) Edge { + return &basicEdge{S: source, T: target} +} + +// basicEdge is a basic implementation of Edge that has the source and +// target vertex. +type basicEdge struct { + S, T Vertex +} + +func (e *basicEdge) Hashcode() interface{} { + return [...]interface{}{e.S, e.T} +} + +func (e *basicEdge) Source() Vertex { + return e.S +} + +func (e *basicEdge) Target() Vertex { + return e.T +} diff --git a/pkg/dag/edge_test.go b/pkg/dag/edge_test.go new file mode 100644 index 00000000000..c54e0b4aab9 --- /dev/null +++ b/pkg/dag/edge_test.go @@ -0,0 +1,31 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dag + +import ( + "testing" +) + +func TestBasicEdgeHashcode(t *testing.T) { + e1 := BasicEdge(1, 2) + e2 := BasicEdge(1, 2) + if e1.Hashcode() != e2.Hashcode() { + t.Fatalf("bad") + } +} + +func TestBasicEdgeHashcode_pointer(t *testing.T) { + type test struct { + Value string + } + + v1, v2 := &test{"foo"}, &test{"bar"} + e1 := BasicEdge(v1, v2) + e2 := BasicEdge(v1, v2) + if e1.Hashcode() != e2.Hashcode() { + t.Fatalf("bad") + } +} diff --git a/pkg/dag/graph.go b/pkg/dag/graph.go new file mode 100644 index 00000000000..845185d08d9 --- /dev/null +++ b/pkg/dag/graph.go @@ -0,0 +1,372 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dag + +import ( + "bytes" + "fmt" + "sort" +) + +// Graph is used to represent a dependency graph. +type Graph struct { + vertices Set + edges Set + downEdges map[interface{}]Set + upEdges map[interface{}]Set +} + +// Subgrapher allows a Vertex to be a Graph itself, by returning a Grapher. +type Subgrapher interface { + Subgraph() Grapher +} + +// A Grapher is any type that returns a Grapher, mainly used to identify +// dag.Graph and dag.AcyclicGraph. In the case of Graph and AcyclicGraph, they +// return themselves. +type Grapher interface { + DirectedGraph() Grapher +} + +// Vertex of the graph. +type Vertex interface{} + +// NamedVertex is an optional interface that can be implemented by Vertex +// to give it a human-friendly name that is used for outputting the graph. +type NamedVertex interface { + Vertex + Name() string +} + +func (g *Graph) DirectedGraph() Grapher { + return g +} + +// Vertices returns the list of all the vertices in the graph. +func (g *Graph) Vertices() []Vertex { + result := make([]Vertex, 0, len(g.vertices)) + for _, v := range g.vertices { + result = append(result, v.(Vertex)) + } + + return result +} + +// Edges returns the list of all the edges in the graph. +func (g *Graph) Edges() []Edge { + result := make([]Edge, 0, len(g.edges)) + for _, v := range g.edges { + result = append(result, v.(Edge)) + } + + return result +} + +// EdgesFrom returns the list of edges from the given source. +func (g *Graph) EdgesFrom(v Vertex) []Edge { + var result []Edge + from := hashcode(v) + for _, e := range g.Edges() { + if hashcode(e.Source()) == from { + result = append(result, e) + } + } + + return result +} + +// EdgesTo returns the list of edges to the given target. +func (g *Graph) EdgesTo(v Vertex) []Edge { + var result []Edge + search := hashcode(v) + for _, e := range g.Edges() { + if hashcode(e.Target()) == search { + result = append(result, e) + } + } + + return result +} + +// HasVertex checks if the given Vertex is present in the graph. +func (g *Graph) HasVertex(v Vertex) bool { + return g.vertices.Include(v) +} + +// HasEdge checks if the given Edge is present in the graph. +func (g *Graph) HasEdge(e Edge) bool { + return g.edges.Include(e) +} + +// Add adds a vertex to the graph. This is safe to call multiple time with +// the same Vertex. +func (g *Graph) Add(v Vertex) Vertex { + g.init() + g.vertices.Add(v) + return v +} + +// Remove removes a vertex from the graph. This will also remove any +// edges with this vertex as a source or target. +func (g *Graph) Remove(v Vertex) Vertex { + // Delete the vertex itself + g.vertices.Delete(v) + + // Delete the edges to non-existent things + for _, target := range g.downEdgesNoCopy(v) { + g.RemoveEdge(BasicEdge(v, target)) + } + for _, source := range g.upEdgesNoCopy(v) { + g.RemoveEdge(BasicEdge(source, v)) + } + + return nil +} + +// Replace replaces the original Vertex with replacement. If the original +// does not exist within the graph, then false is returned. Otherwise, true +// is returned. +func (g *Graph) Replace(original, replacement Vertex) bool { + // If we don't have the original, we can't do anything + if !g.vertices.Include(original) { + return false + } + + // If they're the same, then don't do anything + if original == replacement { + return true + } + + // Add our new vertex, then copy all the edges + g.Add(replacement) + for _, target := range g.downEdgesNoCopy(original) { + g.Connect(BasicEdge(replacement, target)) + } + for _, source := range g.upEdgesNoCopy(original) { + g.Connect(BasicEdge(source, replacement)) + } + + // Remove our old vertex, which will also remove all the edges + g.Remove(original) + + return true +} + +// RemoveEdge removes an edge from the graph. +func (g *Graph) RemoveEdge(edge Edge) { + g.init() + + // Delete the edge from the set + g.edges.Delete(edge) + + // Delete the up/down edges + if s, ok := g.downEdges[hashcode(edge.Source())]; ok { + s.Delete(edge.Target()) + } + if s, ok := g.upEdges[hashcode(edge.Target())]; ok { + s.Delete(edge.Source()) + } +} + +// UpEdges returns the vertices that are *sources* of edges that target the +// destination Vertex v. +func (g *Graph) UpEdges(v Vertex) Set { + return g.upEdgesNoCopy(v).Copy() +} + +// DownEdges returns the vertices that are *targets* of edges that originate +// from the source Vertex v. +func (g *Graph) DownEdges(v Vertex) Set { + return g.downEdgesNoCopy(v).Copy() +} + +// downEdgesNoCopy returns the vertices targeted by edges from the source Vertex +// v as a Set. This Set is the same as used internally by the Graph to prevent a +// copy, and must not be modified by the caller. +func (g *Graph) downEdgesNoCopy(v Vertex) Set { + g.init() + return g.downEdges[hashcode(v)] +} + +// upEdgesNoCopy returns the vertices that are sources of edges targeting the +// destination Vertex v as a Set. This Set is the same as used internally by the +// Graph to prevent a copy, and must not be modified by the caller. +func (g *Graph) upEdgesNoCopy(v Vertex) Set { + g.init() + return g.upEdges[hashcode(v)] +} + +// Connect adds an edge with the given source and target. This is safe to +// call multiple times with the same value. Note that the same value is +// verified through pointer equality of the vertices, not through the +// value of the edge itself. +func (g *Graph) Connect(edge Edge) { + g.init() + + source := edge.Source() + target := edge.Target() + sourceCode := hashcode(source) + targetCode := hashcode(target) + + // Do we have this already? If so, don't add it again. + if s, ok := g.downEdges[sourceCode]; ok && s.Include(target) { + return + } + + // Add the edge to the set + g.edges.Add(edge) + + // Add the down edge + s, ok := g.downEdges[sourceCode] + if !ok { + s = make(Set) + g.downEdges[sourceCode] = s + } + s.Add(target) + + // Add the up edge + s, ok = g.upEdges[targetCode] + if !ok { + s = make(Set) + g.upEdges[targetCode] = s + } + s.Add(source) +} + +// Subsume imports all of the nodes and edges from the given graph into the +// reciever, leaving the given graph unchanged. +// +// If any of the nodes in the given graph are already present in the reciever +// then the existing node will be retained and any new edges from the given +// graph will be connected with it. +// +// If the given graph has edges in common with the reciever then they will be +// ignored, because each pair of nodes can only be connected once. +func (g *Graph) Subsume(other *Graph) { + // We're using Set.Filter just as a "visit each element" here, so we're + // not doing anything with the result (which will always be empty). + other.vertices.Filter(func(i interface{}) bool { + g.Add(i) + return false + }) + other.edges.Filter(func(i interface{}) bool { + g.Connect(i.(Edge)) + return false + }) +} + +// String outputs some human-friendly output for the graph structure. +func (g *Graph) StringWithNodeTypes() string { + var buf bytes.Buffer + + // Build the list of node names and a mapping so that we can more + // easily alphabetize the output to remain deterministic. + vertices := g.Vertices() + names := make([]string, 0, len(vertices)) + mapping := make(map[string]Vertex, len(vertices)) + for _, v := range vertices { + name := VertexName(v) + names = append(names, name) + mapping[name] = v + } + sort.Strings(names) + + // Write each node in order... + for _, name := range names { + v := mapping[name] + targets := g.downEdges[hashcode(v)] + + buf.WriteString(fmt.Sprintf("%s - %T\n", name, v)) + + // Alphabetize dependencies + deps := make([]string, 0, targets.Len()) + targetNodes := make(map[string]Vertex) + for _, target := range targets { + dep := VertexName(target) + deps = append(deps, dep) + targetNodes[dep] = target + } + sort.Strings(deps) + + // Write dependencies + for _, d := range deps { + buf.WriteString(fmt.Sprintf(" %s - %T\n", d, targetNodes[d])) + } + } + + return buf.String() +} + +// String outputs some human-friendly output for the graph structure. +func (g *Graph) String() string { + var buf bytes.Buffer + + // Build the list of node names and a mapping so that we can more + // easily alphabetize the output to remain deterministic. + vertices := g.Vertices() + names := make([]string, 0, len(vertices)) + mapping := make(map[string]Vertex, len(vertices)) + for _, v := range vertices { + name := VertexName(v) + names = append(names, name) + mapping[name] = v + } + sort.Strings(names) + + // Write each node in order... + for _, name := range names { + v := mapping[name] + targets := g.downEdges[hashcode(v)] + + buf.WriteString(fmt.Sprintf("%s\n", name)) + + // Alphabetize dependencies + deps := make([]string, 0, targets.Len()) + for _, target := range targets { + deps = append(deps, VertexName(target)) + } + sort.Strings(deps) + + // Write dependencies + for _, d := range deps { + buf.WriteString(fmt.Sprintf(" %s\n", d)) + } + } + + return buf.String() +} + +func (g *Graph) init() { + if g.vertices == nil { + g.vertices = make(Set) + } + if g.edges == nil { + g.edges = make(Set) + } + if g.downEdges == nil { + g.downEdges = make(map[interface{}]Set) + } + if g.upEdges == nil { + g.upEdges = make(map[interface{}]Set) + } +} + +// Dot returns a dot-formatted representation of the Graph. +func (g *Graph) Dot(opts *DotOpts) []byte { + return newMarshalGraph("", g).Dot(opts) +} + +// VertexName returns the name of a vertex. +func VertexName(raw Vertex) string { + switch v := raw.(type) { + case NamedVertex: + return v.Name() + case fmt.Stringer: + return v.String() + default: + return fmt.Sprintf("%v", v) + } +} diff --git a/pkg/dag/graph_test.go b/pkg/dag/graph_test.go new file mode 100644 index 00000000000..0f5929dd02a --- /dev/null +++ b/pkg/dag/graph_test.go @@ -0,0 +1,258 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dag + +import ( + "fmt" + "strings" + "testing" +) + +func TestGraph_empty(t *testing.T) { + var g Graph + g.Add(1) + g.Add(2) + g.Add(3) + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(testGraphEmptyStr) + if actual != expected { + t.Fatalf("bad: %s", actual) + } +} + +func TestGraph_basic(t *testing.T) { + var g Graph + g.Add(1) + g.Add(2) + g.Add(3) + g.Connect(BasicEdge(1, 3)) + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(testGraphBasicStr) + if actual != expected { + t.Fatalf("bad: %s", actual) + } +} + +func TestGraph_remove(t *testing.T) { + var g Graph + g.Add(1) + g.Add(2) + g.Add(3) + g.Connect(BasicEdge(1, 3)) + g.Remove(3) + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(testGraphRemoveStr) + if actual != expected { + t.Fatalf("bad: %s", actual) + } +} + +func TestGraph_replace(t *testing.T) { + var g Graph + g.Add(1) + g.Add(2) + g.Add(3) + g.Connect(BasicEdge(1, 2)) + g.Connect(BasicEdge(2, 3)) + g.Replace(2, 42) + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(testGraphReplaceStr) + if actual != expected { + t.Fatalf("bad: %s", actual) + } +} + +func TestGraph_replaceSelf(t *testing.T) { + var g Graph + g.Add(1) + g.Add(2) + g.Add(3) + g.Connect(BasicEdge(1, 2)) + g.Connect(BasicEdge(2, 3)) + g.Replace(2, 2) + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(testGraphReplaceSelfStr) + if actual != expected { + t.Fatalf("bad: %s", actual) + } +} + +// This tests that connecting edges works based on custom Hashcode +// implementations for uniqueness. +func TestGraph_hashcode(t *testing.T) { + var g Graph + g.Add(&hashVertex{code: 1}) + g.Add(&hashVertex{code: 2}) + g.Add(&hashVertex{code: 3}) + g.Connect(BasicEdge( + &hashVertex{code: 1}, + &hashVertex{code: 3})) + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(testGraphBasicStr) + if actual != expected { + t.Fatalf("bad: %s", actual) + } +} + +func TestGraphHasVertex(t *testing.T) { + var g Graph + g.Add(1) + + if !g.HasVertex(1) { + t.Fatal("should have 1") + } + if g.HasVertex(2) { + t.Fatal("should not have 2") + } +} + +func TestGraphHasEdge(t *testing.T) { + var g Graph + g.Add(1) + g.Add(2) + g.Connect(BasicEdge(1, 2)) + + if !g.HasEdge(BasicEdge(1, 2)) { + t.Fatal("should have 1,2") + } + if g.HasVertex(BasicEdge(2, 3)) { + t.Fatal("should not have 2,3") + } +} + +func TestGraphEdgesFrom(t *testing.T) { + var g Graph + g.Add(1) + g.Add(2) + g.Add(3) + g.Connect(BasicEdge(1, 3)) + g.Connect(BasicEdge(2, 3)) + + edges := g.EdgesFrom(1) + + expected := make(Set) + expected.Add(BasicEdge(1, 3)) + + s := make(Set) + for _, e := range edges { + s.Add(e) + } + + if s.Intersection(expected).Len() != expected.Len() { + t.Fatalf("bad: %#v", edges) + } +} + +func TestGraphEdgesTo(t *testing.T) { + var g Graph + g.Add(1) + g.Add(2) + g.Add(3) + g.Connect(BasicEdge(1, 3)) + g.Connect(BasicEdge(1, 2)) + + edges := g.EdgesTo(3) + + expected := make(Set) + expected.Add(BasicEdge(1, 3)) + + s := make(Set) + for _, e := range edges { + s.Add(e) + } + + if s.Intersection(expected).Len() != expected.Len() { + t.Fatalf("bad: %#v", edges) + } +} + +func TestGraphUpdownEdges(t *testing.T) { + // Verify that we can't inadvertently modify the internal graph sets + var g Graph + g.Add(1) + g.Add(2) + g.Add(3) + g.Connect(BasicEdge(1, 2)) + g.Connect(BasicEdge(2, 3)) + + up := g.UpEdges(2) + if up.Len() != 1 || !up.Include(1) { + t.Fatalf("expected only an up edge of '1', got %#v", up) + } + // modify the up set + up.Add(9) + + orig := g.UpEdges(2) + diff := up.Difference(orig) + if diff.Len() != 1 || !diff.Include(9) { + t.Fatalf("expected a diff of only '9', got %#v", diff) + } + + down := g.DownEdges(2) + if down.Len() != 1 || !down.Include(3) { + t.Fatalf("expected only a down edge of '3', got %#v", down) + } + // modify the down set + down.Add(8) + + orig = g.DownEdges(2) + diff = down.Difference(orig) + if diff.Len() != 1 || !diff.Include(8) { + t.Fatalf("expected a diff of only '8', got %#v", diff) + } +} + +type hashVertex struct { + code interface{} +} + +func (v *hashVertex) Hashcode() interface{} { + return v.code +} + +func (v *hashVertex) Name() string { + return fmt.Sprintf("%#v", v.code) +} + +const testGraphBasicStr = ` +1 + 3 +2 +3 +` + +const testGraphEmptyStr = ` +1 +2 +3 +` + +const testGraphRemoveStr = ` +1 +2 +` + +const testGraphReplaceStr = ` +1 + 42 +3 +42 + 3 +` + +const testGraphReplaceSelfStr = ` +1 + 2 +2 + 3 +3 +` diff --git a/pkg/dag/marshal.go b/pkg/dag/marshal.go new file mode 100644 index 00000000000..c75b31707b4 --- /dev/null +++ b/pkg/dag/marshal.go @@ -0,0 +1,202 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dag + +import ( + "fmt" + "reflect" + "sort" + "strconv" +) + +// the marshal* structs are for serialization of the graph data. +type marshalGraph struct { + // Type is always "Graph", for identification as a top level object in the + // JSON stream. + Type string + + // Each marshal structure requires a unique ID so that it can be referenced + // by other structures. + ID string `json:",omitempty"` + + // Human readable name for this graph. + Name string `json:",omitempty"` + + // Arbitrary attributes that can be added to the output. + Attrs map[string]string `json:",omitempty"` + + // List of graph vertices, sorted by ID. + Vertices []*marshalVertex `json:",omitempty"` + + // List of edges, sorted by Source ID. + Edges []*marshalEdge `json:",omitempty"` + + // Any number of subgraphs. A subgraph itself is considered a vertex, and + // may be referenced by either end of an edge. + Subgraphs []*marshalGraph `json:",omitempty"` + + // Any lists of vertices that are included in cycles. + Cycles [][]*marshalVertex `json:",omitempty"` +} + +func (g *marshalGraph) vertexByID(id string) *marshalVertex { + for _, v := range g.Vertices { + if id == v.ID { + return v + } + } + return nil +} + +type marshalVertex struct { + // Unique ID, used to reference this vertex from other structures. + ID string + + // Human readable name + Name string `json:",omitempty"` + + Attrs map[string]string `json:",omitempty"` + + // This is to help transition from the old Dot interfaces. We record if the + // node was a GraphNodeDotter here, so we can call it to get attributes. + graphNodeDotter GraphNodeDotter +} + +func newMarshalVertex(v Vertex) *marshalVertex { + dn, ok := v.(GraphNodeDotter) + if !ok { + dn = nil + } + + // the name will be quoted again later, so we need to ensure it's properly + // escaped without quotes. + name := strconv.Quote(VertexName(v)) + name = name[1 : len(name)-1] + + return &marshalVertex{ + ID: marshalVertexID(v), + Name: name, + Attrs: make(map[string]string), + graphNodeDotter: dn, + } +} + +// vertices is a sort.Interface implementation for sorting vertices by ID +type vertices []*marshalVertex + +func (v vertices) Less(i, j int) bool { return v[i].Name < v[j].Name } +func (v vertices) Len() int { return len(v) } +func (v vertices) Swap(i, j int) { v[i], v[j] = v[j], v[i] } + +type marshalEdge struct { + // Human readable name + Name string + + // Source and Target Vertices by ID + Source string + Target string + + Attrs map[string]string `json:",omitempty"` +} + +func newMarshalEdge(e Edge) *marshalEdge { + return &marshalEdge{ + Name: fmt.Sprintf("%s|%s", VertexName(e.Source()), VertexName(e.Target())), + Source: marshalVertexID(e.Source()), + Target: marshalVertexID(e.Target()), + Attrs: make(map[string]string), + } +} + +// edges is a sort.Interface implementation for sorting edges by Source ID +type edges []*marshalEdge + +func (e edges) Less(i, j int) bool { return e[i].Name < e[j].Name } +func (e edges) Len() int { return len(e) } +func (e edges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } + +// build a marshalGraph structure from a *Graph +func newMarshalGraph(name string, g *Graph) *marshalGraph { + mg := &marshalGraph{ + Type: "Graph", + Name: name, + Attrs: make(map[string]string), + } + + for _, v := range g.Vertices() { + id := marshalVertexID(v) + if sg, ok := marshalSubgrapher(v); ok { + smg := newMarshalGraph(VertexName(v), sg) + smg.ID = id + mg.Subgraphs = append(mg.Subgraphs, smg) + } + + mv := newMarshalVertex(v) + mg.Vertices = append(mg.Vertices, mv) + } + + sort.Sort(vertices(mg.Vertices)) + + for _, e := range g.Edges() { + mg.Edges = append(mg.Edges, newMarshalEdge(e)) + } + + sort.Sort(edges(mg.Edges)) + + for _, c := range (&AcyclicGraph{*g}).Cycles() { + var cycle []*marshalVertex + for _, v := range c { + mv := newMarshalVertex(v) + cycle = append(cycle, mv) + } + mg.Cycles = append(mg.Cycles, cycle) + } + + return mg +} + +// Attempt to return a unique ID for any vertex. +func marshalVertexID(v Vertex) string { + val := reflect.ValueOf(v) + switch val.Kind() { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer: + return strconv.Itoa(int(val.Pointer())) + case reflect.Interface: + // A vertex shouldn't contain another layer of interface, but handle + // this just in case. + return fmt.Sprintf("%#v", val.Interface()) + } + + if v, ok := v.(Hashable); ok { + h := v.Hashcode() + if h, ok := h.(string); ok { + return h + } + } + + // fallback to a name, which we hope is unique. + return VertexName(v) + + // we could try harder by attempting to read the arbitrary value from the + // interface, but we shouldn't get here from OpenTofu right now. +} + +// check for a Subgrapher, and return the underlying *Graph. +func marshalSubgrapher(v Vertex) (*Graph, bool) { + sg, ok := v.(Subgrapher) + if !ok { + return nil, false + } + + switch g := sg.Subgraph().DirectedGraph().(type) { + case *Graph: + return g, true + case *AcyclicGraph: + return &g.Graph, true + } + + return nil, false +} diff --git a/pkg/dag/marshal_test.go b/pkg/dag/marshal_test.go new file mode 100644 index 00000000000..0036adeeabe --- /dev/null +++ b/pkg/dag/marshal_test.go @@ -0,0 +1,106 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dag + +import ( + "strings" + "testing" +) + +func TestGraphDot_empty(t *testing.T) { + var g Graph + g.Add(1) + g.Add(2) + g.Add(3) + + actual := strings.TrimSpace(string(g.Dot(nil))) + expected := strings.TrimSpace(testGraphDotEmptyStr) + if actual != expected { + t.Fatalf("bad: %s", actual) + } +} + +func TestGraphDot_basic(t *testing.T) { + var g Graph + g.Add(1) + g.Add(2) + g.Add(3) + g.Connect(BasicEdge(1, 3)) + + actual := strings.TrimSpace(string(g.Dot(nil))) + expected := strings.TrimSpace(testGraphDotBasicStr) + if actual != expected { + t.Fatalf("bad: %s", actual) + } +} + +func TestGraphDot_quoted(t *testing.T) { + var g Graph + quoted := `name["with-quotes"]` + other := `other` + g.Add(quoted) + g.Add(other) + g.Connect(BasicEdge(quoted, other)) + + actual := strings.TrimSpace(string(g.Dot(nil))) + expected := strings.TrimSpace(testGraphDotQuotedStr) + if actual != expected { + t.Fatalf("\ngot: %q\nwanted %q\n", actual, expected) + } +} + +func TestGraphDot_attrs(t *testing.T) { + var g Graph + g.Add(&testGraphNodeDotter{ + Result: &DotNode{ + Name: "foo", + Attrs: map[string]string{"foo": "bar"}, + }, + }) + + actual := strings.TrimSpace(string(g.Dot(nil))) + expected := strings.TrimSpace(testGraphDotAttrsStr) + if actual != expected { + t.Fatalf("bad: %s", actual) + } +} + +type testGraphNodeDotter struct{ Result *DotNode } + +func (n *testGraphNodeDotter) Name() string { return n.Result.Name } +func (n *testGraphNodeDotter) DotNode(string, *DotOpts) *DotNode { return n.Result } + +const testGraphDotQuotedStr = `digraph { + compound = "true" + newrank = "true" + subgraph "root" { + "[root] name[\"with-quotes\"]" -> "[root] other" + } +}` + +const testGraphDotBasicStr = `digraph { + compound = "true" + newrank = "true" + subgraph "root" { + "[root] 1" -> "[root] 3" + } +} +` + +const testGraphDotEmptyStr = `digraph { + compound = "true" + newrank = "true" + subgraph "root" { + } +}` + +const testGraphDotAttrsStr = `digraph { + compound = "true" + newrank = "true" + subgraph "root" { + "[root] foo" [foo = "bar"] + } +}` diff --git a/pkg/dag/set.go b/pkg/dag/set.go new file mode 100644 index 00000000000..7dfbaeb20f9 --- /dev/null +++ b/pkg/dag/set.go @@ -0,0 +1,118 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dag + +// Set is a set data structure. +type Set map[interface{}]interface{} + +// Hashable is the interface used by set to get the hash code of a value. +// If this isn't given, then the value of the item being added to the set +// itself is used as the comparison value. +type Hashable interface { + Hashcode() interface{} +} + +// hashcode returns the hashcode used for set elements. +func hashcode(v interface{}) interface{} { + if h, ok := v.(Hashable); ok { + return h.Hashcode() + } + + return v +} + +// Add adds an item to the set +func (s Set) Add(v interface{}) { + s[hashcode(v)] = v +} + +// Delete removes an item from the set. +func (s Set) Delete(v interface{}) { + delete(s, hashcode(v)) +} + +// Include returns true/false of whether a value is in the set. +func (s Set) Include(v interface{}) bool { + _, ok := s[hashcode(v)] + return ok +} + +// Intersection computes the set intersection with other. +func (s Set) Intersection(other Set) Set { + result := make(Set) + if s == nil || other == nil { + return result + } + // Iteration over a smaller set has better performance. + if other.Len() < s.Len() { + s, other = other, s + } + for _, v := range s { + if other.Include(v) { + result.Add(v) + } + } + return result +} + +// Difference returns a set with the elements that s has but +// other doesn't. +func (s Set) Difference(other Set) Set { + if other == nil || other.Len() == 0 { + return s.Copy() + } + + result := make(Set) + for k, v := range s { + if _, ok := other[k]; !ok { + result.Add(v) + } + } + + return result +} + +// Filter returns a set that contains the elements from the receiver +// where the given callback returns true. +func (s Set) Filter(cb func(interface{}) bool) Set { + result := make(Set) + + for _, v := range s { + if cb(v) { + result.Add(v) + } + } + + return result +} + +// Len is the number of items in the set. +func (s Set) Len() int { + return len(s) +} + +// List returns the list of set elements. +func (s Set) List() []interface{} { + if s == nil { + return nil + } + + r := make([]interface{}, 0, len(s)) + for _, v := range s { + r = append(r, v) + } + + return r +} + +// Copy returns a shallow copy of the set. +func (s Set) Copy() Set { + c := make(Set, len(s)) + for k, v := range s { + c[k] = v + } + return c +} diff --git a/pkg/dag/set_test.go b/pkg/dag/set_test.go new file mode 100644 index 00000000000..b03f417f578 --- /dev/null +++ b/pkg/dag/set_test.go @@ -0,0 +1,163 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dag + +import ( + "fmt" + "testing" +) + +func TestSetDifference(t *testing.T) { + cases := []struct { + Name string + A, B []interface{} + Expected []interface{} + }{ + { + "same", + []interface{}{1, 2, 3}, + []interface{}{3, 1, 2}, + []interface{}{}, + }, + + { + "A has extra elements", + []interface{}{1, 2, 3}, + []interface{}{3, 2}, + []interface{}{1}, + }, + + { + "B has extra elements", + []interface{}{1, 2, 3}, + []interface{}{3, 2, 1, 4}, + []interface{}{}, + }, + { + "B is nil", + []interface{}{1, 2, 3}, + nil, + []interface{}{1, 2, 3}, + }, + } + + for i, tc := range cases { + t.Run(fmt.Sprintf("%d-%s", i, tc.Name), func(t *testing.T) { + one := make(Set) + two := make(Set) + expected := make(Set) + for _, v := range tc.A { + one.Add(v) + } + for _, v := range tc.B { + two.Add(v) + } + if tc.B == nil { + two = nil + } + for _, v := range tc.Expected { + expected.Add(v) + } + + actual := one.Difference(two) + match := actual.Intersection(expected) + if match.Len() != expected.Len() { + t.Fatalf("bad: %#v", actual.List()) + } + }) + } +} + +func TestSetFilter(t *testing.T) { + cases := []struct { + Input []interface{} + Expected []interface{} + }{ + { + []interface{}{1, 2, 3}, + []interface{}{1, 2, 3}, + }, + + { + []interface{}{4, 5, 6}, + []interface{}{4}, + }, + + { + []interface{}{7, 8, 9}, + []interface{}{}, + }, + } + + for i, tc := range cases { + t.Run(fmt.Sprintf("%d-%#v", i, tc.Input), func(t *testing.T) { + input := make(Set) + expected := make(Set) + for _, v := range tc.Input { + input.Add(v) + } + for _, v := range tc.Expected { + expected.Add(v) + } + + actual := input.Filter(func(v interface{}) bool { + return v.(int) < 5 + }) + match := actual.Intersection(expected) + if match.Len() != expected.Len() { + t.Fatalf("bad: %#v", actual.List()) + } + }) + } +} + +func TestSetCopy(t *testing.T) { + a := make(Set) + a.Add(1) + a.Add(2) + + b := a.Copy() + b.Add(3) + + diff := b.Difference(a) + + if diff.Len() != 1 { + t.Fatalf("expected single diff value, got %#v", diff) + } + + if !diff.Include(3) { + t.Fatalf("diff does not contain 3, got %#v", diff) + } + +} + +func makeSet(n int) Set { + ret := make(Set, n) + for i := 0; i < n; i++ { + ret.Add(i) + } + return ret +} + +func BenchmarkSetIntersection_100_100000(b *testing.B) { + small := makeSet(100) + large := makeSet(100000) + + b.ResetTimer() + for n := 0; n < b.N; n++ { + small.Intersection(large) + } +} + +func BenchmarkSetIntersection_100000_100(b *testing.B) { + small := makeSet(100) + large := makeSet(100000) + + b.ResetTimer() + for n := 0; n < b.N; n++ { + large.Intersection(small) + } +} diff --git a/pkg/dag/tarjan.go b/pkg/dag/tarjan.go new file mode 100644 index 00000000000..50f43dcd0ac --- /dev/null +++ b/pkg/dag/tarjan.go @@ -0,0 +1,112 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dag + +// StronglyConnected returns the list of strongly connected components +// within the Graph g. This information is primarily used by this package +// for cycle detection, but strongly connected components have widespread +// use. +func StronglyConnected(g *Graph) [][]Vertex { + vs := g.Vertices() + acct := sccAcct{ + NextIndex: 1, + VertexIndex: make(map[Vertex]int, len(vs)), + } + for _, v := range vs { + // Recurse on any non-visited nodes + if acct.VertexIndex[v] == 0 { + stronglyConnected(&acct, g, v) + } + } + return acct.SCC +} + +func stronglyConnected(acct *sccAcct, g *Graph, v Vertex) int { + // Initial vertex visit + index := acct.visit(v) + minIdx := index + + for _, raw := range g.downEdgesNoCopy(v) { + target := raw.(Vertex) + targetIdx := acct.VertexIndex[target] + + // Recurse on successor if not yet visited + if targetIdx == 0 { + minIdx = min(minIdx, stronglyConnected(acct, g, target)) + } else if acct.inStack(target) { + // Check if the vertex is in the stack + minIdx = min(minIdx, targetIdx) + } + } + + // Pop the strongly connected components off the stack if + // this is a root vertex + if index == minIdx { + var scc []Vertex + for { + v2 := acct.pop() + scc = append(scc, v2) + if v2 == v { + break + } + } + + acct.SCC = append(acct.SCC, scc) + } + + return minIdx +} + +func min(a, b int) int { + if a <= b { + return a + } + return b +} + +// sccAcct is used ot pass around accounting information for +// the StronglyConnectedComponents algorithm +type sccAcct struct { + NextIndex int + VertexIndex map[Vertex]int + Stack []Vertex + SCC [][]Vertex +} + +// visit assigns an index and pushes a vertex onto the stack +func (s *sccAcct) visit(v Vertex) int { + idx := s.NextIndex + s.VertexIndex[v] = idx + s.NextIndex++ + s.push(v) + return idx +} + +// push adds a vertex to the stack +func (s *sccAcct) push(n Vertex) { + s.Stack = append(s.Stack, n) +} + +// pop removes a vertex from the stack +func (s *sccAcct) pop() Vertex { + n := len(s.Stack) + if n == 0 { + return nil + } + vertex := s.Stack[n-1] + s.Stack = s.Stack[:n-1] + return vertex +} + +// inStack checks if a vertex is in the stack +func (s *sccAcct) inStack(needle Vertex) bool { + for _, n := range s.Stack { + if n == needle { + return true + } + } + return false +} diff --git a/pkg/dag/tarjan_test.go b/pkg/dag/tarjan_test.go new file mode 100644 index 00000000000..20b84921a9f --- /dev/null +++ b/pkg/dag/tarjan_test.go @@ -0,0 +1,91 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dag + +import ( + "sort" + "strings" + "testing" +) + +func TestGraphStronglyConnected(t *testing.T) { + var g Graph + g.Add(1) + g.Add(2) + g.Connect(BasicEdge(1, 2)) + g.Connect(BasicEdge(2, 1)) + + actual := strings.TrimSpace(testSCCStr(StronglyConnected(&g))) + expected := strings.TrimSpace(testGraphStronglyConnectedStr) + if actual != expected { + t.Fatalf("bad: %s", actual) + } +} + +func TestGraphStronglyConnected_two(t *testing.T) { + var g Graph + g.Add(1) + g.Add(2) + g.Connect(BasicEdge(1, 2)) + g.Connect(BasicEdge(2, 1)) + g.Add(3) + + actual := strings.TrimSpace(testSCCStr(StronglyConnected(&g))) + expected := strings.TrimSpace(testGraphStronglyConnectedTwoStr) + if actual != expected { + t.Fatalf("bad: %s", actual) + } +} + +func TestGraphStronglyConnected_three(t *testing.T) { + var g Graph + g.Add(1) + g.Add(2) + g.Connect(BasicEdge(1, 2)) + g.Connect(BasicEdge(2, 1)) + g.Add(3) + g.Add(4) + g.Add(5) + g.Add(6) + g.Connect(BasicEdge(4, 5)) + g.Connect(BasicEdge(5, 6)) + g.Connect(BasicEdge(6, 4)) + + actual := strings.TrimSpace(testSCCStr(StronglyConnected(&g))) + expected := strings.TrimSpace(testGraphStronglyConnectedThreeStr) + if actual != expected { + t.Fatalf("bad: %s", actual) + } +} + +func testSCCStr(list [][]Vertex) string { + var lines []string + for _, vs := range list { + result := make([]string, len(vs)) + for i, v := range vs { + result[i] = VertexName(v) + } + + sort.Strings(result) + lines = append(lines, strings.Join(result, ",")) + } + + sort.Strings(lines) + return strings.Join(lines, "\n") +} + +const testGraphStronglyConnectedStr = `1,2` + +const testGraphStronglyConnectedTwoStr = ` +1,2 +3 +` + +const testGraphStronglyConnectedThreeStr = ` +1,2 +3 +4,5,6 +` diff --git a/pkg/dag/walk.go b/pkg/dag/walk.go new file mode 100644 index 00000000000..dcd19b6693f --- /dev/null +++ b/pkg/dag/walk.go @@ -0,0 +1,452 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dag + +import ( + "errors" + "log" + "sync" + "time" + + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// Walker is used to walk every vertex of a graph in parallel. +// +// A vertex will only be walked when the dependencies of that vertex have +// been walked. If two vertices can be walked at the same time, they will be. +// +// Update can be called to update the graph. This can be called even during +// a walk, changing vertices/edges mid-walk. This should be done carefully. +// If a vertex is removed but has already been executed, the result of that +// execution (any error) is still returned by Wait. Changing or re-adding +// a vertex that has already executed has no effect. Changing edges of +// a vertex that has already executed has no effect. +// +// Non-parallelism can be enforced by introducing a lock in your callback +// function. However, the goroutine overhead of a walk will remain. +// Walker will create V*2 goroutines (one for each vertex, and dependency +// waiter for each vertex). In general this should be of no concern unless +// there are a huge number of vertices. +// +// The walk is depth first by default. This can be changed with the Reverse +// option. +// +// A single walker is only valid for one graph walk. After the walk is complete +// you must construct a new walker to walk again. State for the walk is never +// deleted in case vertices or edges are changed. +type Walker struct { + // Callback is what is called for each vertex + Callback WalkFunc + + // Reverse, if true, causes the source of an edge to depend on a target. + // When false (default), the target depends on the source. + Reverse bool + + // changeLock must be held to modify any of the fields below. Only Update + // should modify these fields. Modifying them outside of Update can cause + // serious problems. + changeLock sync.Mutex + vertices, edges Set + vertexMap map[Vertex]*walkerVertex + + // wait is done when all vertices have executed. It may become "undone" + // if new vertices are added. + wait sync.WaitGroup + + diagsLock sync.Mutex + // diagsMap contains the diagnostics recorded so far for execution, + // and upstreamFailed contains all the vertices whose problems were + // caused by upstream failures, and thus whose diagnostics should be + // excluded from the final set. + // + // Readers and writers of either map must hold diagsLock. + diagsMap map[Vertex]tfdiags.Diagnostics + upstreamFailed map[Vertex]struct{} +} + +func (w *Walker) init() { + if w.vertices == nil { + w.vertices = make(Set) + } + if w.edges == nil { + w.edges = make(Set) + } +} + +type walkerVertex struct { + // These should only be set once on initialization and never written again. + // They are not protected by a lock since they don't need to be since + // they are write-once. + + // DoneCh is closed when this vertex has completed execution, regardless + // of success. + // + // CancelCh is closed when the vertex should cancel execution. If execution + // is already complete (DoneCh is closed), this has no effect. Otherwise, + // execution is cancelled as quickly as possible. + DoneCh chan struct{} + CancelCh chan struct{} + + DepsLock sync.Mutex + // Dependency information. Any changes to any of these fields requires + // holding DepsLock. + // + // DepsCh is sent a single value that denotes whether the upstream deps + // were successful (no errors). Any value sent means that the upstream + // dependencies are complete. No other values will ever be sent again. + // + // DepsUpdateCh is closed when there is a new DepsCh set. + DepsCh chan bool + DepsUpdateCh chan struct{} + + // Below is not safe to read/write in parallel. This behavior is + // enforced by changes only happening in Update. Nothing else should + // ever modify these. + deps map[Vertex]chan struct{} + depsCancelCh chan struct{} +} + +// Wait waits for the completion of the walk and returns diagnostics describing +// any problems that arose. Update should be called to populate the walk with +// vertices and edges prior to calling this. +// +// Wait will return as soon as all currently known vertices are complete. +// If you plan on calling Update with more vertices in the future, you +// should not call Wait until after this is done. +func (w *Walker) Wait() tfdiags.Diagnostics { + // Wait for completion + w.wait.Wait() + + var diags tfdiags.Diagnostics + w.diagsLock.Lock() + for v, vDiags := range w.diagsMap { + if _, upstream := w.upstreamFailed[v]; upstream { + // Ignore diagnostics for nodes that had failed upstreams, since + // the downstream diagnostics are likely to be redundant. + continue + } + diags = diags.Append(vDiags) + } + w.diagsLock.Unlock() + + return diags +} + +// Update updates the currently executing walk with the given graph. +// This will perform a diff of the vertices and edges and update the walker. +// Already completed vertices remain completed (including any errors during +// their execution). +// +// This returns immediately once the walker is updated; it does not wait +// for completion of the walk. +// +// Multiple Updates can be called in parallel. Update can be called at any +// time during a walk. +func (w *Walker) Update(g *AcyclicGraph) { + w.init() + v := make(Set) + e := make(Set) + if g != nil { + v, e = g.vertices, g.edges + } + + // Grab the change lock so no more updates happen but also so that + // no new vertices are executed during this time since we may be + // removing them. + w.changeLock.Lock() + defer w.changeLock.Unlock() + + // Initialize fields + if w.vertexMap == nil { + w.vertexMap = make(map[Vertex]*walkerVertex) + } + + // Calculate all our sets + newEdges := e.Difference(w.edges) + oldEdges := w.edges.Difference(e) + newVerts := v.Difference(w.vertices) + oldVerts := w.vertices.Difference(v) + + // Add the new vertices + for _, raw := range newVerts { + v := raw.(Vertex) + + // Add to the waitgroup so our walk is not done until everything finishes + w.wait.Add(1) + + // Add to our own set so we know about it already + w.vertices.Add(raw) + + // Initialize the vertex info + info := &walkerVertex{ + DoneCh: make(chan struct{}), + CancelCh: make(chan struct{}), + deps: make(map[Vertex]chan struct{}), + } + + // Add it to the map and kick off the walk + w.vertexMap[v] = info + } + + // Remove the old vertices + for _, raw := range oldVerts { + v := raw.(Vertex) + + // Get the vertex info so we can cancel it + info, ok := w.vertexMap[v] + if !ok { + // This vertex for some reason was never in our map. This + // shouldn't be possible. + continue + } + + // Cancel the vertex + close(info.CancelCh) + + // Delete it out of the map + delete(w.vertexMap, v) + w.vertices.Delete(raw) + } + + // Add the new edges + changedDeps := make(Set) + for _, raw := range newEdges { + edge := raw.(Edge) + waiter, dep := w.edgeParts(edge) + + // Get the info for the waiter + waiterInfo, ok := w.vertexMap[waiter] + if !ok { + // Vertex doesn't exist... shouldn't be possible but ignore. + continue + } + + // Get the info for the dep + depInfo, ok := w.vertexMap[dep] + if !ok { + // Vertex doesn't exist... shouldn't be possible but ignore. + continue + } + + // Add the dependency to our waiter + waiterInfo.deps[dep] = depInfo.DoneCh + + // Record that the deps changed for this waiter + changedDeps.Add(waiter) + w.edges.Add(raw) + } + + // Process removed edges + for _, raw := range oldEdges { + edge := raw.(Edge) + waiter, dep := w.edgeParts(edge) + + // Get the info for the waiter + waiterInfo, ok := w.vertexMap[waiter] + if !ok { + // Vertex doesn't exist... shouldn't be possible but ignore. + continue + } + + // Delete the dependency from the waiter + delete(waiterInfo.deps, dep) + + // Record that the deps changed for this waiter + changedDeps.Add(waiter) + w.edges.Delete(raw) + } + + // For each vertex with changed dependencies, we need to kick off + // a new waiter and notify the vertex of the changes. + for _, raw := range changedDeps { + v := raw.(Vertex) + info, ok := w.vertexMap[v] + if !ok { + // Vertex doesn't exist... shouldn't be possible but ignore. + continue + } + + // Create a new done channel + doneCh := make(chan bool, 1) + + // Create the channel we close for cancellation + cancelCh := make(chan struct{}) + + // Build a new deps copy + deps := make(map[Vertex]<-chan struct{}) + for k, v := range info.deps { + deps[k] = v + } + + // Update the update channel + info.DepsLock.Lock() + if info.DepsUpdateCh != nil { + close(info.DepsUpdateCh) + } + info.DepsCh = doneCh + info.DepsUpdateCh = make(chan struct{}) + info.DepsLock.Unlock() + + // Cancel the older waiter + if info.depsCancelCh != nil { + close(info.depsCancelCh) + } + info.depsCancelCh = cancelCh + + // Start the waiter + go w.waitDeps(v, deps, doneCh, cancelCh) + } + + // Start all the new vertices. We do this at the end so that all + // the edge waiters and changes are set up above. + for _, raw := range newVerts { + v := raw.(Vertex) + go w.walkVertex(v, w.vertexMap[v]) + } +} + +// edgeParts returns the waiter and the dependency, in that order. +// The waiter is waiting on the dependency. +func (w *Walker) edgeParts(e Edge) (Vertex, Vertex) { + if w.Reverse { + return e.Source(), e.Target() + } + + return e.Target(), e.Source() +} + +// walkVertex walks a single vertex, waiting for any dependencies before +// executing the callback. +func (w *Walker) walkVertex(v Vertex, info *walkerVertex) { + // When we're done executing, lower the waitgroup count + defer w.wait.Done() + + // When we're done, always close our done channel + defer close(info.DoneCh) + + // Wait for our dependencies. We create a [closed] deps channel so + // that we can immediately fall through to load our actual DepsCh. + var depsSuccess bool + var depsUpdateCh chan struct{} + depsCh := make(chan bool, 1) + depsCh <- true + close(depsCh) + for { + select { + case <-info.CancelCh: + // Cancel + return + + case depsSuccess = <-depsCh: + // Deps complete! Mark as nil to trigger completion handling. + depsCh = nil + + case <-depsUpdateCh: + // New deps, reloop + } + + // Check if we have updated dependencies. This can happen if the + // dependencies were satisfied exactly prior to an Update occurring. + // In that case, we'd like to take into account new dependencies + // if possible. + info.DepsLock.Lock() + if info.DepsCh != nil { + depsCh = info.DepsCh + info.DepsCh = nil + } + if info.DepsUpdateCh != nil { + depsUpdateCh = info.DepsUpdateCh + } + info.DepsLock.Unlock() + + // If we still have no deps channel set, then we're done! + if depsCh == nil { + break + } + } + + // If we passed dependencies, we just want to check once more that + // we're not cancelled, since this can happen just as dependencies pass. + select { + case <-info.CancelCh: + // Cancelled during an update while dependencies completed. + return + default: + } + + // Run our callback or note that our upstream failed + var diags tfdiags.Diagnostics + var upstreamFailed bool + if depsSuccess { + diags = w.Callback(v) + } else { + log.Printf("[TRACE] dag/walk: upstream of %q errored, so skipping", VertexName(v)) + // This won't be displayed to the user because we'll set upstreamFailed, + // but we need to ensure there's at least one error in here so that + // the failures will cascade downstream. + diags = diags.Append(errors.New("upstream dependencies failed")) + upstreamFailed = true + } + + // Record the result (we must do this after execution because we mustn't + // hold diagsLock while visiting a vertex.) + w.diagsLock.Lock() + if w.diagsMap == nil { + w.diagsMap = make(map[Vertex]tfdiags.Diagnostics) + } + w.diagsMap[v] = diags + if w.upstreamFailed == nil { + w.upstreamFailed = make(map[Vertex]struct{}) + } + if upstreamFailed { + w.upstreamFailed[v] = struct{}{} + } + w.diagsLock.Unlock() +} + +func (w *Walker) waitDeps( + v Vertex, + deps map[Vertex]<-chan struct{}, + doneCh chan<- bool, + cancelCh <-chan struct{}) { + + // For each dependency given to us, wait for it to complete + for dep, depCh := range deps { + DepSatisfied: + for { + select { + case <-depCh: + // Dependency satisfied! + break DepSatisfied + + case <-cancelCh: + // Wait cancelled. Note that we didn't satisfy dependencies + // so that anything waiting on us also doesn't run. + doneCh <- false + return + + case <-time.After(time.Second * 5): + log.Printf("[TRACE] dag/walk: vertex %q is waiting for %q", + VertexName(v), VertexName(dep)) + } + } + } + + // Dependencies satisfied! We need to check if any errored + w.diagsLock.Lock() + defer w.diagsLock.Unlock() + for dep := range deps { + if w.diagsMap[dep].HasErrors() { + // One of our dependencies failed, so return false + doneCh <- false + return + } + } + + // All dependencies satisfied and successful + doneCh <- true +} diff --git a/pkg/dag/walk_test.go b/pkg/dag/walk_test.go new file mode 100644 index 00000000000..9d85eadf011 --- /dev/null +++ b/pkg/dag/walk_test.go @@ -0,0 +1,303 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dag + +import ( + "fmt" + "reflect" + "sync" + "testing" + "time" + + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +func TestWalker_basic(t *testing.T) { + var g AcyclicGraph + g.Add(1) + g.Add(2) + g.Connect(BasicEdge(1, 2)) + + // Run it a bunch of times since it is timing dependent + for i := 0; i < 50; i++ { + var order []interface{} + w := &Walker{Callback: walkCbRecord(&order)} + w.Update(&g) + + // Wait + if err := w.Wait(); err != nil { + t.Fatalf("err: %s", err) + } + + // Check + expected := []interface{}{1, 2} + if !reflect.DeepEqual(order, expected) { + t.Errorf("wrong order\ngot: %#v\nwant: %#v", order, expected) + } + } +} + +func TestWalker_updateNilGraph(t *testing.T) { + var g AcyclicGraph + g.Add(1) + g.Add(2) + g.Connect(BasicEdge(1, 2)) + + // Run it a bunch of times since it is timing dependent + for i := 0; i < 50; i++ { + var order []interface{} + w := &Walker{Callback: walkCbRecord(&order)} + w.Update(&g) + w.Update(nil) + + // Wait + if err := w.Wait(); err != nil { + t.Fatalf("err: %s", err) + } + } +} + +func TestWalker_error(t *testing.T) { + var g AcyclicGraph + g.Add(1) + g.Add(2) + g.Add(3) + g.Add(4) + g.Connect(BasicEdge(1, 2)) + g.Connect(BasicEdge(2, 3)) + g.Connect(BasicEdge(3, 4)) + + // Record function + var order []interface{} + recordF := walkCbRecord(&order) + + // Build a callback that delays until we close a channel + cb := func(v Vertex) tfdiags.Diagnostics { + if v == 2 { + var diags tfdiags.Diagnostics + diags = diags.Append(fmt.Errorf("error")) + return diags + } + + return recordF(v) + } + + w := &Walker{Callback: cb} + w.Update(&g) + + // Wait + if err := w.Wait(); err == nil { + t.Fatal("expect error") + } + + // Check + expected := []interface{}{1} + if !reflect.DeepEqual(order, expected) { + t.Errorf("wrong order\ngot: %#v\nwant: %#v", order, expected) + } +} + +func TestWalker_newVertex(t *testing.T) { + var g AcyclicGraph + g.Add(1) + g.Add(2) + g.Connect(BasicEdge(1, 2)) + + // Record function + var order []interface{} + recordF := walkCbRecord(&order) + done2 := make(chan int) + + // Build a callback that notifies us when 2 has been walked + var w *Walker + cb := func(v Vertex) tfdiags.Diagnostics { + if v == 2 { + defer close(done2) + } + return recordF(v) + } + + // Add the initial vertices + w = &Walker{Callback: cb} + w.Update(&g) + + // if 2 has been visited, the walk is complete so far + <-done2 + + // Update the graph + g.Add(3) + w.Update(&g) + + // Update the graph again but with the same vertex + g.Add(3) + w.Update(&g) + + // Wait + if err := w.Wait(); err != nil { + t.Fatalf("err: %s", err) + } + + // Check + expected := []interface{}{1, 2, 3} + if !reflect.DeepEqual(order, expected) { + t.Errorf("wrong order\ngot: %#v\nwant: %#v", order, expected) + } +} + +func TestWalker_removeVertex(t *testing.T) { + var g AcyclicGraph + g.Add(1) + g.Add(2) + g.Connect(BasicEdge(1, 2)) + + // Record function + var order []interface{} + recordF := walkCbRecord(&order) + + var w *Walker + cb := func(v Vertex) tfdiags.Diagnostics { + if v == 1 { + g.Remove(2) + w.Update(&g) + } + + return recordF(v) + } + + // Add the initial vertices + w = &Walker{Callback: cb} + w.Update(&g) + + // Wait + if err := w.Wait(); err != nil { + t.Fatalf("err: %s", err) + } + + // Check + expected := []interface{}{1} + if !reflect.DeepEqual(order, expected) { + t.Errorf("wrong order\ngot: %#v\nwant: %#v", order, expected) + } +} + +func TestWalker_newEdge(t *testing.T) { + var g AcyclicGraph + g.Add(1) + g.Add(2) + g.Connect(BasicEdge(1, 2)) + + // Record function + var order []interface{} + recordF := walkCbRecord(&order) + + var w *Walker + cb := func(v Vertex) tfdiags.Diagnostics { + // record where we are first, otherwise the Updated vertex may get + // walked before the first visit. + diags := recordF(v) + + if v == 1 { + g.Add(3) + g.Connect(BasicEdge(3, 2)) + w.Update(&g) + } + return diags + } + + // Add the initial vertices + w = &Walker{Callback: cb} + w.Update(&g) + + // Wait + if err := w.Wait(); err != nil { + t.Fatalf("err: %s", err) + } + + // Check + expected := []interface{}{1, 3, 2} + if !reflect.DeepEqual(order, expected) { + t.Errorf("wrong order\ngot: %#v\nwant: %#v", order, expected) + } +} + +func TestWalker_removeEdge(t *testing.T) { + var g AcyclicGraph + g.Add(1) + g.Add(2) + g.Add(3) + g.Connect(BasicEdge(1, 2)) + g.Connect(BasicEdge(1, 3)) + g.Connect(BasicEdge(3, 2)) + + // Record function + var order []interface{} + recordF := walkCbRecord(&order) + + // The way this works is that our original graph forces + // the order of 1 => 3 => 2. During the execution of 1, we + // remove the edge forcing 3 before 2. Then, during the execution + // of 3, we wait on a channel that is only closed by 2, implicitly + // forcing 2 before 3 via the callback (and not the graph). If + // 2 cannot execute before 3 (edge removal is non-functional), then + // this test will timeout. + var w *Walker + gateCh := make(chan struct{}) + cb := func(v Vertex) tfdiags.Diagnostics { + t.Logf("visit vertex %#v", v) + switch v { + case 1: + g.RemoveEdge(BasicEdge(3, 2)) + w.Update(&g) + t.Logf("removed edge from 3 to 2") + + case 2: + // this visit isn't completed until we've recorded it + // Once the visit is official, we can then close the gate to + // let 3 continue. + defer close(gateCh) + defer t.Logf("2 unblocked 3") + + case 3: + select { + case <-gateCh: + t.Logf("vertex 3 gate channel is now closed") + case <-time.After(500 * time.Millisecond): + t.Logf("vertex 3 timed out waiting for the gate channel to close") + var diags tfdiags.Diagnostics + diags = diags.Append(fmt.Errorf("timeout 3 waiting for 2")) + return diags + } + } + + return recordF(v) + } + + // Add the initial vertices + w = &Walker{Callback: cb} + w.Update(&g) + + // Wait + if diags := w.Wait(); diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + // Check + expected := []interface{}{1, 2, 3} + if !reflect.DeepEqual(order, expected) { + t.Errorf("wrong order\ngot: %#v\nwant: %#v", order, expected) + } +} + +// walkCbRecord is a test helper callback that just records the order called. +func walkCbRecord(order *[]interface{}) WalkFunc { + var l sync.Mutex + return func(v Vertex) tfdiags.Diagnostics { + l.Lock() + defer l.Unlock() + *order = append(*order, v) + return nil + } +} diff --git a/pkg/depsfile/doc.go b/pkg/depsfile/doc.go new file mode 100644 index 00000000000..8eda3f464f8 --- /dev/null +++ b/pkg/depsfile/doc.go @@ -0,0 +1,27 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package depsfile contains the logic for reading and writing OpenTofu's +// dependency lock and development override configuration files. +// +// These files are separate from the main OpenTofu configuration files (.tf) +// for a number of reasons. The first is to help establish a distinction +// where .tf files configure a particular module while these configure +// a whole configuration tree. Another, more practical consideration is that +// we intend both of these files to be primarily maintained automatically by +// OpenTofu itself, rather than by human-originated edits, and so keeping +// them separate means that it's easier to distinguish the files that OpenTofu +// will change automatically during normal workflow from the files that +// OpenTofu only edits on direct request. +// +// Both files use HCL syntax, for consistency with other files in OpenTofu +// that we expect humans to (in this case, only occasionally) edit directly. +// A dependency lock file tracks the most recently selected upstream versions +// of each dependency, and is intended for checkin to version control. +// A development override file allows for temporarily overriding upstream +// dependencies with local files/directories on disk as an aid to testing +// a cross-codebase change during development, and should not be saved in +// version control. +package depsfile diff --git a/pkg/depsfile/locks.go b/pkg/depsfile/locks.go new file mode 100644 index 00000000000..437d1a2ed78 --- /dev/null +++ b/pkg/depsfile/locks.go @@ -0,0 +1,444 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package depsfile + +import ( + "fmt" + "sort" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/getproviders" +) + +// Locks is the top-level type representing the information retained in a +// dependency lock file. +// +// Locks and the other types used within it are mutable via various setter +// methods, but they are not safe for concurrent modifications, so it's the +// caller's responsibility to prevent concurrent writes and writes concurrent +// with reads. +type Locks struct { + providers map[addrs.Provider]*ProviderLock + + // overriddenProviders is a subset of providers which we might be tracking + // in field providers but whose lock information we're disregarding for + // this particular run due to some feature that forces OpenTofu to not + // use a normally-installed plugin for it. For example, the "provider dev + // overrides" feature means that we'll be using an arbitrary directory on + // disk as the package, regardless of what might be selected in "providers". + // + // overriddenProviders is an in-memory-only annotation, never stored as + // part of a lock file and thus not persistent between OpenTofu runs. + // The CLI layer is generally the one responsible for populating this, + // by calling SetProviderOverridden in response to CLI Configuration + // settings, environment variables, or whatever similar sources. + overriddenProviders map[addrs.Provider]struct{} + + // TODO: In future we'll also have module locks, but the design of that + // still needs some more work and we're deferring that to get the + // provider locking capability out sooner, because it's more common to + // directly depend on providers maintained outside your organization than + // modules maintained outside your organization. + + // sources is a copy of the map of source buffers produced by the HCL + // parser during loading, which we retain only so that the caller can + // use it to produce source code snippets in error messages. + sources map[string][]byte +} + +// NewLocks constructs and returns a new Locks object that initially contains +// no locks at all. +func NewLocks() *Locks { + return &Locks{ + providers: make(map[addrs.Provider]*ProviderLock), + + // no "sources" here, because that's only for locks objects loaded + // from files. + } +} + +// Provider returns the stored lock for the given provider, or nil if that +// provider currently has no lock. +func (l *Locks) Provider(addr addrs.Provider) *ProviderLock { + return l.providers[addr] +} + +// AllProviders returns a map describing all of the provider locks in the +// receiver. +func (l *Locks) AllProviders() map[addrs.Provider]*ProviderLock { + // We return a copy of our internal map so that future calls to + // SetProvider won't modify the map we're returning, or vice-versa. + ret := make(map[addrs.Provider]*ProviderLock, len(l.providers)) + for k, v := range l.providers { + ret[k] = v + } + return ret +} + +// SetProvider creates a new lock or replaces the existing lock for the given +// provider. +// +// SetProvider returns the newly-created provider lock object, which +// invalidates any ProviderLock object previously returned from Provider or +// SetProvider for the given provider address. +// +// The ownership of the backing array for the slice of hashes passes to this +// function, and so the caller must not read or write that backing array after +// calling SetProvider. +// +// Only lockable providers can be passed to this method. If you pass a +// non-lockable provider address then this function will panic. Use +// function ProviderIsLockable to determine whether a particular provider +// should participate in the version locking mechanism. +func (l *Locks) SetProvider(addr addrs.Provider, version getproviders.Version, constraints getproviders.VersionConstraints, hashes []getproviders.Hash) *ProviderLock { + if !ProviderIsLockable(addr) { + panic(fmt.Sprintf("Locks.SetProvider with non-lockable provider %s", addr)) + } + + new := NewProviderLock(addr, version, constraints, hashes) + l.providers[new.addr] = new + return new +} + +// RemoveProvider removes any existing lock file entry for the given provider. +// +// If the given provider did not already have a lock entry, RemoveProvider is +// a no-op. +// +// Only lockable providers can be passed to this method. If you pass a +// non-lockable provider address then this function will panic. Use +// function ProviderIsLockable to determine whether a particular provider +// should participate in the version locking mechanism. +func (l *Locks) RemoveProvider(addr addrs.Provider) { + if !ProviderIsLockable(addr) { + panic(fmt.Sprintf("Locks.RemoveProvider with non-lockable provider %s", addr)) + } + + delete(l.providers, addr) +} + +// SetProviderOverridden records that this particular OpenTofu process will +// not pay attention to the recorded lock entry for the given provider, and +// will instead access that provider's functionality in some other special +// way that isn't sensitive to provider version selections or checksums. +// +// This is an in-memory-only annotation which lives only inside a particular +// Locks object, and is never persisted as part of a saved lock file on disk. +// It's valid to still use other methods of the reciever to access +// already-stored lock information and to update lock information for an +// overridden provider, but some callers may need to use ProviderIsOverridden +// to selectively disregard stored lock information for overridden providers, +// depending on what they intended to use the lock information for. +func (l *Locks) SetProviderOverridden(addr addrs.Provider) { + if l.overriddenProviders == nil { + l.overriddenProviders = make(map[addrs.Provider]struct{}) + } + l.overriddenProviders[addr] = struct{}{} +} + +// ProviderIsOverridden returns true only if the given provider address was +// previously registered as overridden by calling SetProviderOverridden. +func (l *Locks) ProviderIsOverridden(addr addrs.Provider) bool { + _, ret := l.overriddenProviders[addr] + return ret +} + +// SetSameOverriddenProviders updates the receiver to mark as overridden all +// of the same providers already marked as overridden in the other given locks. +// +// This allows propagating override information between different lock objects, +// as if calling SetProviderOverridden for each address already overridden +// in the other given locks. If the reciever already has overridden providers, +// SetSameOverriddenProviders will preserve them. +func (l *Locks) SetSameOverriddenProviders(other *Locks) { + if other == nil { + return + } + for addr := range other.overriddenProviders { + l.SetProviderOverridden(addr) + } +} + +// NewProviderLock creates a new ProviderLock object that isn't associated +// with any Locks object. +// +// This is here primarily for testing. Most callers should use Locks.SetProvider +// to construct a new provider lock and insert it into a Locks object at the +// same time. +// +// The ownership of the backing array for the slice of hashes passes to this +// function, and so the caller must not read or write that backing array after +// calling NewProviderLock. +// +// Only lockable providers can be passed to this method. If you pass a +// non-lockable provider address then this function will panic. Use +// function ProviderIsLockable to determine whether a particular provider +// should participate in the version locking mechanism. +func NewProviderLock(addr addrs.Provider, version getproviders.Version, constraints getproviders.VersionConstraints, hashes []getproviders.Hash) *ProviderLock { + if !ProviderIsLockable(addr) { + panic(fmt.Sprintf("Locks.NewProviderLock with non-lockable provider %s", addr)) + } + + // Normalize the hashes into lexical order so that we can do straightforward + // equality tests between different locks for the same provider. The + // hashes are logically a set, so the given order is insignificant. + sort.Slice(hashes, func(i, j int) bool { + return string(hashes[i]) < string(hashes[j]) + }) + + // This is a slightly-tricky in-place deduping to avoid unnecessarily + // allocating a new array in the common case where there are no duplicates: + // we iterate over "hashes" at the same time as appending to another slice + // with the same backing array, relying on the fact that deduping can only + // _skip_ elements from the input, and will never generate additional ones + // that would cause the writer to get ahead of the reader. This also + // assumes that we already sorted the items, which means that any duplicates + // will be consecutive in the sequence. + dedupeHashes := hashes[:0] + prevHash := getproviders.NilHash + for _, hash := range hashes { + if hash != prevHash { + dedupeHashes = append(dedupeHashes, hash) + prevHash = hash + } + } + + return &ProviderLock{ + addr: addr, + version: version, + versionConstraints: constraints, + hashes: dedupeHashes, + } +} + +// ProviderIsLockable returns true if the given provider is eligible for +// version locking. +// +// Currently, all providers except builtin and legacy providers are eligible +// for locking. +func ProviderIsLockable(addr addrs.Provider) bool { + return !(addr.IsBuiltIn() || addr.IsLegacy()) +} + +// Sources returns the source code of the file the receiver was generated from, +// or an empty map if the receiver wasn't generated from a file. +// +// This return type matches the one expected by HCL diagnostics printers to +// produce source code snapshots, which is the only intended use for this +// method. +func (l *Locks) Sources() map[string][]byte { + return l.sources +} + +// Equal returns true if the given Locks represents the same information as +// the receiver. +// +// Equal explicitly _does not_ consider the equality of version constraints +// in the saved locks, because those are saved only as hints to help the UI +// explain what's changed between runs, and are never used as part of +// dependency installation decisions. +func (l *Locks) Equal(other *Locks) bool { + if len(l.providers) != len(other.providers) { + return false + } + for addr, thisLock := range l.providers { + otherLock, ok := other.providers[addr] + if !ok { + return false + } + + if thisLock.addr != otherLock.addr { + // It'd be weird to get here because we already looked these up + // by address above. + return false + } + if thisLock.version != otherLock.version { + // Equality rather than "Version.Same" because changes to the + // build metadata are significant for the purpose of this function: + // it's a different package even if it has the same precedence. + return false + } + + // Although "hashes" is declared as a slice, it's logically an + // unordered set. However, we normalize the slice of hashes when + // recieving it in NewProviderLock, so we can just do a simple + // item-by-item equality test here. + if len(thisLock.hashes) != len(otherLock.hashes) { + return false + } + for i := range thisLock.hashes { + if thisLock.hashes[i] != otherLock.hashes[i] { + return false + } + } + } + // We don't need to worry about providers that are in "other" but not + // in the receiver, because we tested the lengths being equal above. + + return true +} + +// EqualProviderAddress returns true if the given Locks have the same provider +// address as the receiver. This doesn't check version and hashes. +func (l *Locks) EqualProviderAddress(other *Locks) bool { + if len(l.providers) != len(other.providers) { + return false + } + + for addr := range l.providers { + _, ok := other.providers[addr] + if !ok { + return false + } + } + + return true +} + +// Empty returns true if the given Locks object contains no actual locks. +// +// UI code might wish to use this to distinguish a lock file being +// written for the first time from subsequent updates to that lock file. +func (l *Locks) Empty() bool { + return len(l.providers) == 0 +} + +// DeepCopy creates a new Locks that represents the same information as the +// receiver but does not share memory for any parts of the structure that. +// are mutable through methods on Locks. +// +// Note that this does _not_ create deep copies of parts of the structure +// that are technically mutable but are immutable by convention, such as the +// array underlying the slice of version constraints. Callers may mutate the +// resulting data structure only via the direct methods of Locks. +func (l *Locks) DeepCopy() *Locks { + ret := NewLocks() + for addr, lock := range l.providers { + var hashes []getproviders.Hash + if len(lock.hashes) > 0 { + hashes = make([]getproviders.Hash, len(lock.hashes)) + copy(hashes, lock.hashes) + } + ret.SetProvider(addr, lock.version, lock.versionConstraints, hashes) + } + return ret +} + +// ProviderLock represents lock information for a specific provider. +type ProviderLock struct { + // addr is the address of the provider this lock applies to. + addr addrs.Provider + + // version is the specific version that was previously selected, while + // versionConstraints is the constraint that was used to make that + // selection, which we can potentially use to hint to run + // e.g. tofu init -upgrade if a user has changed a version + // constraint but the previous selection still remains valid. + // "version" is therefore authoritative, while "versionConstraints" is + // just for a UI hint and not used to make any real decisions. + version getproviders.Version + versionConstraints getproviders.VersionConstraints + + // hashes contains zero or more hashes of packages or package contents + // for the package associated with the selected version across all of + // the supported platforms. + // + // hashes can contain a mixture of hashes in different formats to support + // changes over time. The new-style hash format is to have a string + // starting with "h" followed by a version number and then a colon, like + // "h1:" for the first hash format version. Other hash versions following + // this scheme may come later. These versioned hash schemes are implemented + // in the getproviders package; for example, "h1:" is implemented in + // getproviders.HashV1 . + // + // There is also a legacy hash format which is just a lowercase-hex-encoded + // SHA256 hash of the official upstream .zip file for the selected version. + // We'll allow as that a stop-gap until we can upgrade Terraform Registry + // to support the new scheme, but is non-ideal because we can verify it only + // when we have the original .zip file exactly; we can't verify a local + // directory containing the unpacked contents of that .zip file. + // + // We ideally want to populate hashes for all available platforms at + // once, by referring to the signed checksums file in the upstream + // registry. In that ideal case it's possible to later work with the same + // configuration on a different platform while still verifying the hashes. + // However, installation from any method other than an origin registry + // means we can only populate the hash for the current platform, and so + // it won't be possible to verify a subsequent installation of the same + // provider on a different platform. + hashes []getproviders.Hash +} + +// Provider returns the address of the provider this lock applies to. +func (l *ProviderLock) Provider() addrs.Provider { + return l.addr +} + +// Version returns the currently-selected version for the corresponding provider. +func (l *ProviderLock) Version() getproviders.Version { + return l.version +} + +// VersionConstraints returns the version constraints that were recorded as +// being used to choose the version returned by Version. +// +// These version constraints are not authoritative for future selections and +// are included only so OpenTofu can detect if the constraints in +// configuration have changed since a selection was made, and thus hint to the +// user that they may need to run tofu init -upgrade to apply the new +// constraints. +func (l *ProviderLock) VersionConstraints() getproviders.VersionConstraints { + return l.versionConstraints +} + +// AllHashes returns all of the package hashes that were recorded when this +// lock was created. If no hashes were recorded for that platform, the result +// is a zero-length slice. +// +// If your intent is to verify a package against the recorded hashes, use +// PreferredHashes to get only the hashes which the current version +// of OpenTofu considers the strongest of the available hashing schemes, one +// of which must match in order for verification to be considered successful. +// +// Do not modify the backing array of the returned slice. +func (l *ProviderLock) AllHashes() []getproviders.Hash { + return l.hashes +} + +// ContainsAll returns true if the hashes in this ProviderLock contains +// all the hashes in the target. +// +// This function assumes the hashes are in each ProviderLock are sorted. +// If the ProviderLock was created by the NewProviderLock constructor then +// the hashes are guaranteed to be sorted. +func (l *ProviderLock) ContainsAll(target *ProviderLock) bool { + if target == nil || len(target.hashes) == 0 { + return true + } + + targetIndex := 0 + for ix := 0; ix < len(l.hashes); ix++ { + if l.hashes[ix] == target.hashes[targetIndex] { + targetIndex++ + + if targetIndex >= len(target.hashes) { + return true + } + } + } + return false +} + +// PreferredHashes returns a filtered version of the AllHashes return value +// which includes only the strongest of the availabile hash schemes, in +// case legacy hash schemes are deprecated over time but still supported for +// upgrade purposes. +// +// At least one of the given hashes must match for a package to be considered +// valud. +func (l *ProviderLock) PreferredHashes() []getproviders.Hash { + return getproviders.PreferredHashes(l.hashes) +} diff --git a/pkg/depsfile/locks_file.go b/pkg/depsfile/locks_file.go new file mode 100644 index 00000000000..46b13ede9fe --- /dev/null +++ b/pkg/depsfile/locks_file.go @@ -0,0 +1,499 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package depsfile + +import ( + "fmt" + "sort" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/gohcl" + "github.com/hashicorp/hcl/v2/hclparse" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/hashicorp/hcl/v2/hclwrite" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/getproviders" + "github.com/kubegems/opentofu/pkg/replacefile" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/version" +) + +// LoadLocksFromFile reads locks from the given file, expecting it to be a +// valid dependency lock file, or returns error diagnostics explaining why +// that was not possible. +// +// The returned locks are a snapshot of what was present on disk at the time +// the method was called. It does not take into account any subsequent writes +// to the file, whether through this package's functions or by external +// writers. +// +// If the returned diagnostics contains errors then the returned Locks may +// be incomplete or invalid. +func LoadLocksFromFile(filename string) (*Locks, tfdiags.Diagnostics) { + return loadLocks(func(parser *hclparse.Parser) (*hcl.File, hcl.Diagnostics) { + return parser.ParseHCLFile(filename) + }) +} + +// LoadLocksFromBytes reads locks from the given byte array, pretending that +// it was read from the given filename. +// +// The constraints and behaviors are otherwise the same as for +// LoadLocksFromFile. LoadLocksFromBytes is primarily to allow more convenient +// integration testing (avoiding creating temporary files on disk); if you +// are writing non-test code, consider whether LoadLocksFromFile might be +// more appropriate to call. +// +// It is valid to use this with dependency lock information recorded as part of +// a plan file, in which case the given filename will typically be a +// placeholder that will only be seen in the unusual case that the plan file +// contains an invalid lock file, which should only be possible if the user +// edited it directly (OpenTofu bugs notwithstanding). +func LoadLocksFromBytes(src []byte, filename string) (*Locks, tfdiags.Diagnostics) { + return loadLocks(func(parser *hclparse.Parser) (*hcl.File, hcl.Diagnostics) { + return parser.ParseHCL(src, filename) + }) +} + +func loadLocks(loadParse func(*hclparse.Parser) (*hcl.File, hcl.Diagnostics)) (*Locks, tfdiags.Diagnostics) { + ret := NewLocks() + + var diags tfdiags.Diagnostics + + parser := hclparse.NewParser() + f, hclDiags := loadParse(parser) + ret.sources = parser.Sources() + diags = diags.Append(hclDiags) + if f == nil { + // If we encountered an error loading the file then those errors + // should already be in diags from the above, but the file might + // also be nil itself and so we can't decode from it. + return ret, diags + } + + moreDiags := decodeLocksFromHCL(ret, f.Body) + diags = diags.Append(moreDiags) + return ret, diags +} + +// SaveLocksToFile writes the given locks object to the given file, +// entirely replacing any content already in that file, or returns error +// diagnostics explaining why that was not possible. +// +// SaveLocksToFile attempts an atomic replacement of the file, as an aid +// to external tools such as text editor integrations that might be monitoring +// the file as a signal to invalidate cached metadata. Consequently, other +// temporary files may be temporarily created in the same directory as the +// given filename during the operation. +func SaveLocksToFile(locks *Locks, filename string) tfdiags.Diagnostics { + src, diags := SaveLocksToBytes(locks) + if diags.HasErrors() { + return diags + } + + err := replacefile.AtomicWriteFile(filename, src, 0644) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to update dependency lock file", + fmt.Sprintf("Error while writing new dependency lock information to %s: %s.", filename, err), + )) + return diags + } + + return diags +} + +// SaveLocksToBytes writes the given locks object into a byte array, +// using the same syntax that LoadLocksFromBytes expects to parse. +func SaveLocksToBytes(locks *Locks) ([]byte, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // In other uses of the "hclwrite" package we typically try to make + // surgical updates to the author's existing files, preserving their + // block ordering, comments, etc. We intentionally don't do that here + // to reinforce the fact that this file primarily belongs to OpenTofu, + // and to help ensure that VCS diffs of the file primarily reflect + // changes that actually affect functionality rather than just cosmetic + // changes, by maintaining it in a highly-normalized form. + + f := hclwrite.NewEmptyFile() + rootBody := f.Body() + + // End-users _may_ edit the lock file in exceptional situations, like + // working around potential dependency selection bugs, but we intend it + // to be primarily maintained automatically by the "tofu init" + // command. + rootBody.AppendUnstructuredTokens(hclwrite.Tokens{ + { + Type: hclsyntax.TokenComment, + Bytes: []byte("# This file is maintained automatically by \"tofu init\".\n"), + }, + { + Type: hclsyntax.TokenComment, + Bytes: []byte("# Manual edits may be lost in future updates.\n"), + }, + }) + + providers := make([]addrs.Provider, 0, len(locks.providers)) + for provider := range locks.providers { + providers = append(providers, provider) + } + sort.Slice(providers, func(i, j int) bool { + return providers[i].LessThan(providers[j]) + }) + + for _, provider := range providers { + lock := locks.providers[provider] + rootBody.AppendNewline() + block := rootBody.AppendNewBlock("provider", []string{lock.addr.String()}) + body := block.Body() + body.SetAttributeValue("version", cty.StringVal(lock.version.String())) + if constraintsStr := getproviders.VersionConstraintsString(lock.versionConstraints); constraintsStr != "" { + body.SetAttributeValue("constraints", cty.StringVal(constraintsStr)) + } + if len(lock.hashes) != 0 { + hashToks := encodeHashSetTokens(lock.hashes) + body.SetAttributeRaw("hashes", hashToks) + } + } + + return f.Bytes(), diags +} + +func decodeLocksFromHCL(locks *Locks, body hcl.Body) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + content, hclDiags := body.Content(&hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "provider", + LabelNames: []string{"source_addr"}, + }, + + // "module" is just a placeholder for future enhancement, so we + // can mostly-ignore the this block type we intend to add in + // future, but warn in case someone tries to use one e.g. if they + // downgraded to an earlier version of OpenTofu. + { + Type: "module", + LabelNames: []string{"path"}, + }, + }, + }) + diags = diags.Append(hclDiags) + + seenProviders := make(map[addrs.Provider]hcl.Range) + seenModule := false + for _, block := range content.Blocks { + + switch block.Type { + case "provider": + lock, moreDiags := decodeProviderLockFromHCL(block) + diags = diags.Append(moreDiags) + if lock == nil { + continue + } + if previousRng, exists := seenProviders[lock.addr]; exists { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate provider lock", + Detail: fmt.Sprintf("This lockfile already declared a lock for provider %s at %s.", lock.addr.String(), previousRng.String()), + Subject: block.TypeRange.Ptr(), + }) + continue + } + locks.providers[lock.addr] = lock + seenProviders[lock.addr] = block.DefRange + + case "module": + // We'll just take the first module block to use for a single warning, + // because that's sufficient to get the point across without swamping + // the output with warning noise. + if !seenModule { + currentVersion := version.SemVer.String() + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "Dependency locks for modules are not yet supported", + Detail: fmt.Sprintf("OpenTofu v%s only supports dependency locks for providers, not for modules. This configuration may be intended for a later version of OpenTofu that also supports dependency locks for modules.", currentVersion), + Subject: block.TypeRange.Ptr(), + }) + seenModule = true + } + + default: + // Shouldn't get here because this should be exhaustive for + // all of the block types in the schema above. + } + + } + + return diags +} + +func decodeProviderLockFromHCL(block *hcl.Block) (*ProviderLock, tfdiags.Diagnostics) { + ret := &ProviderLock{} + var diags tfdiags.Diagnostics + + rawAddr := block.Labels[0] + addr, moreDiags := addrs.ParseProviderSourceString(rawAddr) + if moreDiags.HasErrors() { + // The diagnostics from ParseProviderSourceString are, as the name + // suggests, written with an intended audience of someone who is + // writing a "source" attribute in a provider requirement, not + // our lock file. Therefore we're using a less helpful, fixed error + // here, which is non-ideal but hopefully okay for now because we + // don't intend end-users to typically be hand-editing these anyway. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider source address", + Detail: "The provider source address for a provider lock must be a valid, fully-qualified address of the form \"hostname/namespace/type\".", + Subject: block.LabelRanges[0].Ptr(), + }) + return nil, diags + } + if !ProviderIsLockable(addr) { + if addr.IsBuiltIn() { + // A specialized error for built-in providers, because we have an + // explicit explanation for why those are not allowed. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider source address", + Detail: fmt.Sprintf("Cannot lock a version for built-in provider %s. Built-in providers are bundled inside OpenTofu itself, so you can't select a version for them independently of the OpenTofu release you are currently running.", addr), + Subject: block.LabelRanges[0].Ptr(), + }) + return nil, diags + } + // Otherwise, we'll use a generic error message. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider source address", + Detail: fmt.Sprintf("Provider source address %s is a special provider that is not eligible for dependency locking.", addr), + Subject: block.LabelRanges[0].Ptr(), + }) + return nil, diags + } + if canonAddr := addr.String(); canonAddr != rawAddr { + // We also require the provider addresses in the lock file to be + // written in fully-qualified canonical form, so that it's totally + // clear to a reader which provider each block relates to. Again, + // we expect hand-editing of these to be atypical so it's reasonable + // to be stricter in parsing these than we would be in the main + // configuration. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Non-normalized provider source address", + Detail: fmt.Sprintf("The provider source address for this provider lock must be written as %q, the fully-qualified and normalized form.", canonAddr), + Subject: block.LabelRanges[0].Ptr(), + }) + return nil, diags + } + + ret.addr = addr + + content, hclDiags := block.Body.Content(&hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + {Name: "version", Required: true}, + {Name: "constraints"}, + {Name: "hashes"}, + }, + }) + diags = diags.Append(hclDiags) + + version, moreDiags := decodeProviderVersionArgument(addr, content.Attributes["version"]) + ret.version = version + diags = diags.Append(moreDiags) + + constraints, moreDiags := decodeProviderVersionConstraintsArgument(addr, content.Attributes["constraints"]) + ret.versionConstraints = constraints + diags = diags.Append(moreDiags) + + hashes, moreDiags := decodeProviderHashesArgument(addr, content.Attributes["hashes"]) + ret.hashes = hashes + diags = diags.Append(moreDiags) + + return ret, diags +} + +func decodeProviderVersionArgument(provider addrs.Provider, attr *hcl.Attribute) (getproviders.Version, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + if attr == nil { + // It's not okay to omit this argument, but the caller should already + // have generated diagnostics about that. + return getproviders.UnspecifiedVersion, diags + } + expr := attr.Expr + + var raw *string + hclDiags := gohcl.DecodeExpression(expr, nil, &raw) + diags = diags.Append(hclDiags) + if hclDiags.HasErrors() { + return getproviders.UnspecifiedVersion, diags + } + if raw == nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing required argument", + Detail: "A provider lock block must contain a \"version\" argument.", + Subject: expr.Range().Ptr(), // the range for a missing argument's expression is the body's missing item range + }) + return getproviders.UnspecifiedVersion, diags + } + version, err := getproviders.ParseVersion(*raw) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider version number", + Detail: fmt.Sprintf("The selected version number for provider %s is invalid: %s.", provider, err), + Subject: expr.Range().Ptr(), + }) + } + if canon := version.String(); canon != *raw { + // Canonical forms are required in the lock file, to reduce the risk + // that a file diff will show changes that are entirely cosmetic. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider version number", + Detail: fmt.Sprintf("The selected version number for provider %s must be written in normalized form: %q.", provider, canon), + Subject: expr.Range().Ptr(), + }) + } + return version, diags +} + +func decodeProviderVersionConstraintsArgument(provider addrs.Provider, attr *hcl.Attribute) (getproviders.VersionConstraints, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + if attr == nil { + // It's okay to omit this argument. + return nil, diags + } + expr := attr.Expr + + var raw string + hclDiags := gohcl.DecodeExpression(expr, nil, &raw) + diags = diags.Append(hclDiags) + if hclDiags.HasErrors() { + return nil, diags + } + constraints, err := getproviders.ParseVersionConstraints(raw) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider version constraints", + Detail: fmt.Sprintf("The recorded version constraints for provider %s are invalid: %s.", provider, err), + Subject: expr.Range().Ptr(), + }) + } + if canon := getproviders.VersionConstraintsString(constraints); canon != raw { + // Canonical forms are required in the lock file, to reduce the risk + // that a file diff will show changes that are entirely cosmetic. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider version constraints", + Detail: fmt.Sprintf("The recorded version constraints for provider %s must be written in normalized form: %q.", provider, canon), + Subject: expr.Range().Ptr(), + }) + } + + return constraints, diags +} + +func decodeProviderHashesArgument(provider addrs.Provider, attr *hcl.Attribute) ([]getproviders.Hash, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + if attr == nil { + // It's okay to omit this argument. + return nil, diags + } + expr := attr.Expr + + // We'll decode this argument using the HCL static analysis mode, because + // there's no reason for the hashes list to be dynamic and this way we can + // give more precise feedback on individual elements that are invalid, + // with direct source locations. + hashExprs, hclDiags := hcl.ExprList(expr) + diags = diags.Append(hclDiags) + if hclDiags.HasErrors() { + return nil, diags + } + if len(hashExprs) == 0 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider hash set", + Detail: "The \"hashes\" argument must either be omitted or contain at least one hash value.", + Subject: expr.Range().Ptr(), + }) + return nil, diags + } + + ret := make([]getproviders.Hash, 0, len(hashExprs)) + for _, hashExpr := range hashExprs { + var raw string + hclDiags := gohcl.DecodeExpression(hashExpr, nil, &raw) + diags = diags.Append(hclDiags) + if hclDiags.HasErrors() { + continue + } + + hash, err := getproviders.ParseHash(raw) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider hash string", + Detail: fmt.Sprintf("Cannot interpret %q as a provider hash: %s.", raw, err), + Subject: expr.Range().Ptr(), + }) + continue + } + + ret = append(ret, hash) + } + + return ret, diags +} + +func encodeHashSetTokens(hashes []getproviders.Hash) hclwrite.Tokens { + // We'll generate the source code in a low-level way here (direct + // token manipulation) because it's desirable to maintain exactly + // the layout implemented here so that diffs against the locks + // file are easy to read; we don't want potential future changes to + // hclwrite to inadvertently introduce whitespace changes here. + ret := hclwrite.Tokens{ + { + Type: hclsyntax.TokenOBrack, + Bytes: []byte{'['}, + }, + { + Type: hclsyntax.TokenNewline, + Bytes: []byte{'\n'}, + }, + } + + // Although lock.hashes is a slice, we de-dupe and sort it on + // initialization so it's normalized for interpretation as a logical + // set, and so we can just trust it's already in a good order here. + for _, hash := range hashes { + hashVal := cty.StringVal(hash.String()) + ret = append(ret, hclwrite.TokensForValue(hashVal)...) + ret = append(ret, hclwrite.Tokens{ + { + Type: hclsyntax.TokenComma, + Bytes: []byte{','}, + }, + { + Type: hclsyntax.TokenNewline, + Bytes: []byte{'\n'}, + }, + }...) + } + ret = append(ret, &hclwrite.Token{ + Type: hclsyntax.TokenCBrack, + Bytes: []byte{']'}, + }) + + return ret +} diff --git a/pkg/depsfile/locks_file_test.go b/pkg/depsfile/locks_file_test.go new file mode 100644 index 00000000000..2193b439c1a --- /dev/null +++ b/pkg/depsfile/locks_file_test.go @@ -0,0 +1,278 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package depsfile + +import ( + "bufio" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/getproviders" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +func TestLoadLocksFromFile(t *testing.T) { + // For ease of test maintenance we treat every file under + // test-data/locks-files as a test case which is subject + // at least to testing that it produces an expected set + // of diagnostics represented via specially-formatted comments + // in the fixture files (which might be the empty set, if + // there are no such comments). + // + // Some of the files also have additional assertions that + // are encoded in the test code below. These must pass + // in addition to the standard diagnostics tests, if present. + files, err := os.ReadDir("testdata/locks-files") + if err != nil { + t.Fatal(err.Error()) + } + + for _, info := range files { + testName := filepath.Base(info.Name()) + filename := filepath.Join("testdata/locks-files", testName) + t.Run(testName, func(t *testing.T) { + f, err := os.Open(filename) + if err != nil { + t.Fatal(err.Error()) + } + defer f.Close() + const errorPrefix = "# ERROR: " + const warningPrefix = "# WARNING: " + wantErrors := map[int]string{} + wantWarnings := map[int]string{} + sc := bufio.NewScanner(f) + lineNum := 1 + for sc.Scan() { + l := sc.Text() + if pos := strings.Index(l, errorPrefix); pos != -1 { + wantSummary := l[pos+len(errorPrefix):] + wantErrors[lineNum] = wantSummary + } + if pos := strings.Index(l, warningPrefix); pos != -1 { + wantSummary := l[pos+len(warningPrefix):] + wantWarnings[lineNum] = wantSummary + } + lineNum++ + } + if err := sc.Err(); err != nil { + t.Fatal(err.Error()) + } + + locks, diags := LoadLocksFromFile(filename) + gotErrors := map[int]string{} + gotWarnings := map[int]string{} + for _, diag := range diags { + summary := diag.Description().Summary + if diag.Source().Subject == nil { + // We don't expect any sourceless diagnostics here. + t.Errorf("unexpected sourceless diagnostic: %s", summary) + continue + } + lineNum := diag.Source().Subject.Start.Line + switch sev := diag.Severity(); sev { + case tfdiags.Error: + gotErrors[lineNum] = summary + case tfdiags.Warning: + gotWarnings[lineNum] = summary + default: + t.Errorf("unexpected diagnostic severity %s", sev) + } + } + + if diff := cmp.Diff(wantErrors, gotErrors); diff != "" { + t.Errorf("wrong errors\n%s", diff) + } + if diff := cmp.Diff(wantWarnings, gotWarnings); diff != "" { + t.Errorf("wrong warnings\n%s", diff) + } + + switch testName { + // These are the file-specific test assertions. Not all files + // need custom test assertions in addition to the standard + // diagnostics assertions implemented above, so the cases here + // don't need to be exhaustive for all files. + // + // Please keep these in alphabetical order so the list is easy + // to scan! + + case "empty.hcl": + if got, want := len(locks.providers), 0; got != want { + t.Errorf("wrong number of providers %d; want %d", got, want) + } + + case "valid-provider-locks.hcl": + if got, want := len(locks.providers), 3; got != want { + t.Errorf("wrong number of providers %d; want %d", got, want) + } + + t.Run("version-only", func(t *testing.T) { + if lock := locks.Provider(addrs.MustParseProviderSourceString("terraform.io/test/version-only")); lock != nil { + if got, want := lock.Version().String(), "1.0.0"; got != want { + t.Errorf("wrong version\ngot: %s\nwant: %s", got, want) + } + if got, want := getproviders.VersionConstraintsString(lock.VersionConstraints()), ""; got != want { + t.Errorf("wrong version constraints\ngot: %s\nwant: %s", got, want) + } + if got, want := len(lock.hashes), 0; got != want { + t.Errorf("wrong number of hashes %d; want %d", got, want) + } + } + }) + + t.Run("version-and-constraints", func(t *testing.T) { + if lock := locks.Provider(addrs.MustParseProviderSourceString("terraform.io/test/version-and-constraints")); lock != nil { + if got, want := lock.Version().String(), "1.2.0"; got != want { + t.Errorf("wrong version\ngot: %s\nwant: %s", got, want) + } + if got, want := getproviders.VersionConstraintsString(lock.VersionConstraints()), "~> 1.2"; got != want { + t.Errorf("wrong version constraints\ngot: %s\nwant: %s", got, want) + } + if got, want := len(lock.hashes), 0; got != want { + t.Errorf("wrong number of hashes %d; want %d", got, want) + } + } + }) + + t.Run("all-the-things", func(t *testing.T) { + if lock := locks.Provider(addrs.MustParseProviderSourceString("terraform.io/test/all-the-things")); lock != nil { + if got, want := lock.Version().String(), "3.0.10"; got != want { + t.Errorf("wrong version\ngot: %s\nwant: %s", got, want) + } + if got, want := getproviders.VersionConstraintsString(lock.VersionConstraints()), ">= 3.0.2"; got != want { + t.Errorf("wrong version constraints\ngot: %s\nwant: %s", got, want) + } + wantHashes := []getproviders.Hash{ + getproviders.MustParseHash("test:placeholder-hash-1"), + getproviders.MustParseHash("test:placeholder-hash-2"), + getproviders.MustParseHash("test:placeholder-hash-3"), + } + if diff := cmp.Diff(wantHashes, lock.hashes); diff != "" { + t.Errorf("wrong hashes\n%s", diff) + } + } + }) + } + }) + } +} + +func TestLoadLocksFromFileAbsent(t *testing.T) { + t.Run("lock file is a directory", func(t *testing.T) { + // This can never happen when OpenTofu is the one generating the + // lock file, but might arise if the user makes a directory with the + // lock file's name for some reason. (There is no actual reason to do + // so, so that would always be a mistake.) + locks, diags := LoadLocksFromFile("testdata") + if len(locks.providers) != 0 { + t.Errorf("returned locks has providers; expected empty locks") + } + if !diags.HasErrors() { + t.Fatalf("LoadLocksFromFile succeeded; want error") + } + // This is a generic error message from HCL itself, so upgrading HCL + // in future might cause a different error message here. + want := `Failed to read file: The configuration file "testdata" could not be read.` + got := diags.Err().Error() + if got != want { + t.Errorf("wrong error message\ngot: %s\nwant: %s", got, want) + } + }) + t.Run("lock file doesn't exist", func(t *testing.T) { + locks, diags := LoadLocksFromFile("testdata/nonexist.hcl") + if len(locks.providers) != 0 { + t.Errorf("returned locks has providers; expected empty locks") + } + if !diags.HasErrors() { + t.Fatalf("LoadLocksFromFile succeeded; want error") + } + // This is a generic error message from HCL itself, so upgrading HCL + // in future might cause a different error message here. + want := `Failed to read file: The configuration file "testdata/nonexist.hcl" could not be read.` + got := diags.Err().Error() + if got != want { + t.Errorf("wrong error message\ngot: %s\nwant: %s", got, want) + } + }) +} + +func TestSaveLocksToFile(t *testing.T) { + locks := NewLocks() + + fooProvider := addrs.MustParseProviderSourceString("test/foo") + barProvider := addrs.MustParseProviderSourceString("test/bar") + bazProvider := addrs.MustParseProviderSourceString("test/baz") + booProvider := addrs.MustParseProviderSourceString("test/boo") + oneDotOh := getproviders.MustParseVersion("1.0.0") + oneDotTwo := getproviders.MustParseVersion("1.2.0") + atLeastOneDotOh := getproviders.MustParseVersionConstraints(">= 1.0.0") + pessimisticOneDotOh := getproviders.MustParseVersionConstraints("~> 1") + abbreviatedOneDotTwo := getproviders.MustParseVersionConstraints("1.2") + hashes := []getproviders.Hash{ + getproviders.MustParseHash("test:cccccccccccccccccccccccccccccccccccccccccccccccc"), + getproviders.MustParseHash("test:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"), + getproviders.MustParseHash("test:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + } + locks.SetProvider(fooProvider, oneDotOh, atLeastOneDotOh, hashes) + locks.SetProvider(barProvider, oneDotTwo, pessimisticOneDotOh, nil) + locks.SetProvider(bazProvider, oneDotTwo, nil, nil) + locks.SetProvider(booProvider, oneDotTwo, abbreviatedOneDotTwo, nil) + + dir := t.TempDir() + + filename := filepath.Join(dir, LockFilePath) + diags := SaveLocksToFile(locks, filename) + if diags.HasErrors() { + t.Fatalf("unexpected errors\n%s", diags.Err().Error()) + } + + fileInfo, err := os.Stat(filename) + if err != nil { + t.Fatalf(err.Error()) + } + if mode := fileInfo.Mode(); mode&0111 != 0 { + t.Fatalf("Expected lock file to be non-executable: %o", mode) + } + + gotContentBytes, err := os.ReadFile(filename) + if err != nil { + t.Fatalf(err.Error()) + } + gotContent := string(gotContentBytes) + wantContent := `# This file is maintained automatically by "tofu init". +# Manual edits may be lost in future updates. + +provider "registry.opentofu.org/test/bar" { + version = "1.2.0" + constraints = "~> 1.0" +} + +provider "registry.opentofu.org/test/baz" { + version = "1.2.0" +} + +provider "registry.opentofu.org/test/boo" { + version = "1.2.0" + constraints = "1.2.0" +} + +provider "registry.opentofu.org/test/foo" { + version = "1.0.0" + constraints = ">= 1.0.0" + hashes = [ + "test:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + "test:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", + "test:cccccccccccccccccccccccccccccccccccccccccccccccc", + ] +} +` + if diff := cmp.Diff(wantContent, gotContent); diff != "" { + t.Errorf("wrong result\n%s", diff) + } +} diff --git a/pkg/depsfile/locks_test.go b/pkg/depsfile/locks_test.go new file mode 100644 index 00000000000..7db4ffb23b4 --- /dev/null +++ b/pkg/depsfile/locks_test.go @@ -0,0 +1,314 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package depsfile + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/getproviders" +) + +func TestLocksEqual(t *testing.T) { + boopProvider := addrs.NewDefaultProvider("boop") + v2 := getproviders.MustParseVersion("2.0.0") + v2LocalBuild := getproviders.MustParseVersion("2.0.0+awesomecorp.1") + v2GtConstraints := getproviders.MustParseVersionConstraints(">= 2.0.0") + v2EqConstraints := getproviders.MustParseVersionConstraints("2.0.0") + hash1 := getproviders.HashScheme("test").New("1") + hash2 := getproviders.HashScheme("test").New("2") + hash3 := getproviders.HashScheme("test").New("3") + + equalBothWays := func(t *testing.T, a, b *Locks) { + t.Helper() + if !a.Equal(b) { + t.Errorf("a should be equal to b") + } + if !b.Equal(a) { + t.Errorf("b should be equal to a") + } + } + nonEqualBothWays := func(t *testing.T, a, b *Locks) { + t.Helper() + if a.Equal(b) { + t.Errorf("a should be equal to b") + } + if b.Equal(a) { + t.Errorf("b should be equal to a") + } + } + + t.Run("both empty", func(t *testing.T) { + a := NewLocks() + b := NewLocks() + equalBothWays(t, a, b) + }) + t.Run("an extra provider lock", func(t *testing.T) { + a := NewLocks() + b := NewLocks() + b.SetProvider(boopProvider, v2, v2GtConstraints, nil) + nonEqualBothWays(t, a, b) + }) + t.Run("both have boop provider with same version", func(t *testing.T) { + a := NewLocks() + b := NewLocks() + // Note: the constraints are not part of the definition of "Equal", so they can differ + a.SetProvider(boopProvider, v2, v2GtConstraints, nil) + b.SetProvider(boopProvider, v2, v2EqConstraints, nil) + equalBothWays(t, a, b) + }) + t.Run("both have boop provider with different versions", func(t *testing.T) { + a := NewLocks() + b := NewLocks() + a.SetProvider(boopProvider, v2, v2EqConstraints, nil) + b.SetProvider(boopProvider, v2LocalBuild, v2EqConstraints, nil) + nonEqualBothWays(t, a, b) + }) + t.Run("both have boop provider with same version and same hashes", func(t *testing.T) { + a := NewLocks() + b := NewLocks() + hashes := []getproviders.Hash{hash1, hash2, hash3} + a.SetProvider(boopProvider, v2, v2EqConstraints, hashes) + b.SetProvider(boopProvider, v2, v2EqConstraints, hashes) + equalBothWays(t, a, b) + }) + t.Run("both have boop provider with same version but different hashes", func(t *testing.T) { + a := NewLocks() + b := NewLocks() + hashesA := []getproviders.Hash{hash1, hash2} + hashesB := []getproviders.Hash{hash1, hash3} + a.SetProvider(boopProvider, v2, v2EqConstraints, hashesA) + b.SetProvider(boopProvider, v2, v2EqConstraints, hashesB) + nonEqualBothWays(t, a, b) + }) +} + +func TestLocksEqualProviderAddress(t *testing.T) { + boopProvider := addrs.NewDefaultProvider("boop") + v2 := getproviders.MustParseVersion("2.0.0") + v2LocalBuild := getproviders.MustParseVersion("2.0.0+awesomecorp.1") + v2GtConstraints := getproviders.MustParseVersionConstraints(">= 2.0.0") + v2EqConstraints := getproviders.MustParseVersionConstraints("2.0.0") + hash1 := getproviders.HashScheme("test").New("1") + hash2 := getproviders.HashScheme("test").New("2") + hash3 := getproviders.HashScheme("test").New("3") + + equalProviderAddressBothWays := func(t *testing.T, a, b *Locks) { + t.Helper() + if !a.EqualProviderAddress(b) { + t.Errorf("a should be equal to b") + } + if !b.EqualProviderAddress(a) { + t.Errorf("b should be equal to a") + } + } + nonEqualProviderAddressBothWays := func(t *testing.T, a, b *Locks) { + t.Helper() + if a.EqualProviderAddress(b) { + t.Errorf("a should be equal to b") + } + if b.EqualProviderAddress(a) { + t.Errorf("b should be equal to a") + } + } + + t.Run("both empty", func(t *testing.T) { + a := NewLocks() + b := NewLocks() + equalProviderAddressBothWays(t, a, b) + }) + t.Run("an extra provider lock", func(t *testing.T) { + a := NewLocks() + b := NewLocks() + b.SetProvider(boopProvider, v2, v2GtConstraints, nil) + nonEqualProviderAddressBothWays(t, a, b) + }) + t.Run("both have boop provider with different versions", func(t *testing.T) { + a := NewLocks() + b := NewLocks() + a.SetProvider(boopProvider, v2, v2EqConstraints, nil) + b.SetProvider(boopProvider, v2LocalBuild, v2EqConstraints, nil) + equalProviderAddressBothWays(t, a, b) + }) + t.Run("both have boop provider with same version but different hashes", func(t *testing.T) { + a := NewLocks() + b := NewLocks() + hashesA := []getproviders.Hash{hash1, hash2} + hashesB := []getproviders.Hash{hash1, hash3} + a.SetProvider(boopProvider, v2, v2EqConstraints, hashesA) + b.SetProvider(boopProvider, v2, v2EqConstraints, hashesB) + equalProviderAddressBothWays(t, a, b) + }) +} + +func TestLocksProviderSetRemove(t *testing.T) { + beepProvider := addrs.NewDefaultProvider("beep") + boopProvider := addrs.NewDefaultProvider("boop") + v2 := getproviders.MustParseVersion("2.0.0") + v2EqConstraints := getproviders.MustParseVersionConstraints("2.0.0") + v2GtConstraints := getproviders.MustParseVersionConstraints(">= 2.0.0") + hash := getproviders.HashScheme("test").New("1") + + locks := NewLocks() + if got, want := len(locks.AllProviders()), 0; got != want { + t.Fatalf("fresh locks object already has providers") + } + + locks.SetProvider(boopProvider, v2, v2EqConstraints, []getproviders.Hash{hash}) + { + got := locks.AllProviders() + want := map[addrs.Provider]*ProviderLock{ + boopProvider: { + addr: boopProvider, + version: v2, + versionConstraints: v2EqConstraints, + hashes: []getproviders.Hash{hash}, + }, + } + if diff := cmp.Diff(want, got, ProviderLockComparer); diff != "" { + t.Fatalf("wrong providers after SetProvider boop\n%s", diff) + } + } + + locks.SetProvider(beepProvider, v2, v2GtConstraints, []getproviders.Hash{hash}) + { + got := locks.AllProviders() + want := map[addrs.Provider]*ProviderLock{ + boopProvider: { + addr: boopProvider, + version: v2, + versionConstraints: v2EqConstraints, + hashes: []getproviders.Hash{hash}, + }, + beepProvider: { + addr: beepProvider, + version: v2, + versionConstraints: v2GtConstraints, + hashes: []getproviders.Hash{hash}, + }, + } + if diff := cmp.Diff(want, got, ProviderLockComparer); diff != "" { + t.Fatalf("wrong providers after SetProvider beep\n%s", diff) + } + } + + locks.RemoveProvider(boopProvider) + { + got := locks.AllProviders() + want := map[addrs.Provider]*ProviderLock{ + beepProvider: { + addr: beepProvider, + version: v2, + versionConstraints: v2GtConstraints, + hashes: []getproviders.Hash{hash}, + }, + } + if diff := cmp.Diff(want, got, ProviderLockComparer); diff != "" { + t.Fatalf("wrong providers after RemoveProvider boop\n%s", diff) + } + } + + locks.RemoveProvider(beepProvider) + { + got := locks.AllProviders() + want := map[addrs.Provider]*ProviderLock{} + if diff := cmp.Diff(want, got, ProviderLockComparer); diff != "" { + t.Fatalf("wrong providers after RemoveProvider beep\n%s", diff) + } + } +} + +func TestProviderLockContainsAll(t *testing.T) { + provider := addrs.NewDefaultProvider("provider") + v2 := getproviders.MustParseVersion("2.0.0") + v2EqConstraints := getproviders.MustParseVersionConstraints("2.0.0") + + t.Run("non-symmetric", func(t *testing.T) { + target := NewProviderLock(provider, v2, v2EqConstraints, []getproviders.Hash{ + "9r3i9a9QmASqMnQM", + "K43RHM2klOoywtyW", + "swJPXfuCNhJsTM5c", + }) + + original := NewProviderLock(provider, v2, v2EqConstraints, []getproviders.Hash{ + "9r3i9a9QmASqMnQM", + "1ZAChGWUMWn4zmIk", + "K43RHM2klOoywtyW", + "HWjRvIuWZ1LVatnc", + "swJPXfuCNhJsTM5c", + "KwhJK4p/U2dqbKhI", + }) + + if !original.ContainsAll(target) { + t.Errorf("orginal should contain all hashes in target") + } + if target.ContainsAll(original) { + t.Errorf("target should not contain all hashes in orginal") + } + }) + + t.Run("symmetric", func(t *testing.T) { + target := NewProviderLock(provider, v2, v2EqConstraints, []getproviders.Hash{ + "9r3i9a9QmASqMnQM", + "K43RHM2klOoywtyW", + "swJPXfuCNhJsTM5c", + }) + + original := NewProviderLock(provider, v2, v2EqConstraints, []getproviders.Hash{ + "9r3i9a9QmASqMnQM", + "K43RHM2klOoywtyW", + "swJPXfuCNhJsTM5c", + }) + + if !original.ContainsAll(target) { + t.Errorf("orginal should contain all hashes in target") + } + if !target.ContainsAll(original) { + t.Errorf("target should not contain all hashes in orginal") + } + }) + + t.Run("edge case - null", func(t *testing.T) { + original := NewProviderLock(provider, v2, v2EqConstraints, []getproviders.Hash{ + "9r3i9a9QmASqMnQM", + "K43RHM2klOoywtyW", + "swJPXfuCNhJsTM5c", + }) + + if !original.ContainsAll(nil) { + t.Fatalf("orginal should report true on nil") + } + }) + + t.Run("edge case - empty", func(t *testing.T) { + original := NewProviderLock(provider, v2, v2EqConstraints, []getproviders.Hash{ + "9r3i9a9QmASqMnQM", + "K43RHM2klOoywtyW", + "swJPXfuCNhJsTM5c", + }) + + target := NewProviderLock(provider, v2, v2EqConstraints, []getproviders.Hash{}) + + if !original.ContainsAll(target) { + t.Fatalf("orginal should report true on empty") + } + }) + + t.Run("edge case - original empty", func(t *testing.T) { + original := NewProviderLock(provider, v2, v2EqConstraints, []getproviders.Hash{}) + + target := NewProviderLock(provider, v2, v2EqConstraints, []getproviders.Hash{ + "9r3i9a9QmASqMnQM", + "K43RHM2klOoywtyW", + "swJPXfuCNhJsTM5c", + }) + + if original.ContainsAll(target) { + t.Fatalf("orginal should report false when empty") + } + }) +} diff --git a/pkg/depsfile/paths.go b/pkg/depsfile/paths.go new file mode 100644 index 00000000000..046a5795c34 --- /dev/null +++ b/pkg/depsfile/paths.go @@ -0,0 +1,23 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package depsfile + +// LockFilePath is the path, relative to a configuration's root module +// directory, where OpenTofu expects to find the dependency lock file for +// that configuration. +// +// This file is intended to be kept in version control, so it lives directly +// in the root module directory. The ".terraform" prefix is intended to +// suggest that it's metadata about several types of objects that ultimately +// end up in the .terraform directory after running "tofu init". +const LockFilePath = ".terraform.lock.hcl" + +// DevOverrideFilePath is the path, relative to a configuration's root module +// directory, where OpenTofu will look to find a possible override file that +// represents a request to temporarily (within a single working directory only) +// use specific local directories in place of packages that would normally +// need to be installed from a remote location. +const DevOverrideFilePath = ".terraform/dev-overrides.hcl" diff --git a/pkg/depsfile/testdata/locks-files/empty.hcl b/pkg/depsfile/testdata/locks-files/empty.hcl new file mode 100644 index 00000000000..66169c2ef67 --- /dev/null +++ b/pkg/depsfile/testdata/locks-files/empty.hcl @@ -0,0 +1 @@ +# An empty locks file is a bit of a degenerate case, but it is valid. diff --git a/pkg/depsfile/testdata/locks-files/invalid-provider-addrs.hcl b/pkg/depsfile/testdata/locks-files/invalid-provider-addrs.hcl new file mode 100644 index 00000000000..db5e402adff --- /dev/null +++ b/pkg/depsfile/testdata/locks-files/invalid-provider-addrs.hcl @@ -0,0 +1,44 @@ +provider "" { # ERROR: Invalid provider source address + +} + +provider "hashicorp/aws" { # ERROR: Non-normalized provider source address + +} + +provider "aws" { # ERROR: Non-normalized provider source address + +} + +provider "too/many/parts/here" { # ERROR: Invalid provider source address + +} + +provider "Registry.opentofu.org/example/example" { # ERROR: Non-normalized provider source address + +} + +provider "registry.opentofu.org/eXample/example" { # ERROR: Non-normalized provider source address + +} + +provider "registry.opentofu.org/example/Example" { # ERROR: Non-normalized provider source address + +} + +provider "this/one/okay" { + version = "1.0.0" +} + +provider "this/one/okay" { # ERROR: Duplicate provider lock +} + +# Legacy providers are not allowed, because they existed only to +# support the Terraform 0.13 upgrade process. +provider "registry.opentofu.org/-/null" { # ERROR: Invalid provider source address +} + +# Built-in providers are not allowed, because they are not versioned +# independently of the OpenTofu CLI release they are embedded in. +provider "terraform.io/builtin/foo" { # ERROR: Invalid provider source address +} diff --git a/pkg/depsfile/testdata/locks-files/invalid-versions.hcl b/pkg/depsfile/testdata/locks-files/invalid-versions.hcl new file mode 100644 index 00000000000..d1e96474f81 --- /dev/null +++ b/pkg/depsfile/testdata/locks-files/invalid-versions.hcl @@ -0,0 +1,30 @@ +provider "terraform.io/test/foo" { + version = "" # ERROR: Invalid provider version number +} + +provider "terraform.io/test/bar" { + # The "v" prefix is not expected here + version = "v1.0.0" # ERROR: Invalid provider version number +} + +provider "terraform.io/test/baz" { + # Must be written in the canonical form, with three parts + version = "1.0" # ERROR: Invalid provider version number +} + +provider "terraform.io/test/boop" { + # Must be written in the canonical form, with three parts + version = "1" # ERROR: Invalid provider version number +} + +provider "terraform.io/test/blep" { + # Mustn't use redundant extra zero padding + version = "1.02" # ERROR: Invalid provider version number +} + +provider "terraform.io/test/huzzah" { # ERROR: Missing required argument +} + +provider "terraform.io/test/huzznot" { + version = null # ERROR: Missing required argument +} diff --git a/pkg/depsfile/testdata/locks-files/unsupported-block.hcl b/pkg/depsfile/testdata/locks-files/unsupported-block.hcl new file mode 100644 index 00000000000..41321fca97c --- /dev/null +++ b/pkg/depsfile/testdata/locks-files/unsupported-block.hcl @@ -0,0 +1,2 @@ +doodad "blah" { # ERROR: Unsupported block type +} diff --git a/pkg/depsfile/testdata/locks-files/valid-provider-locks.hcl b/pkg/depsfile/testdata/locks-files/valid-provider-locks.hcl new file mode 100644 index 00000000000..1d19116076a --- /dev/null +++ b/pkg/depsfile/testdata/locks-files/valid-provider-locks.hcl @@ -0,0 +1,20 @@ + +provider "terraform.io/test/version-only" { + version = "1.0.0" +} + +provider "terraform.io/test/version-and-constraints" { + version = "1.2.0" + constraints = "~> 1.2" +} + +provider "terraform.io/test/all-the-things" { + version = "3.0.10" + constraints = ">= 3.0.2" + + hashes = [ + "test:placeholder-hash-1", + "test:placeholder-hash-2", + "test:placeholder-hash-3", + ] +} diff --git a/pkg/depsfile/testing.go b/pkg/depsfile/testing.go new file mode 100644 index 00000000000..3bf781a810c --- /dev/null +++ b/pkg/depsfile/testing.go @@ -0,0 +1,27 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package depsfile + +import ( + "github.com/google/go-cmp/cmp" +) + +// ProviderLockComparer is an option for github.com/google/go-cmp/cmp that +// specifies how to compare values of type depsfile.ProviderLock. +// +// Use this, rather than crafting comparison options yourself, in case the +// comparison strategy needs to change in future due to implementation details +// of the ProviderLock type. +var ProviderLockComparer cmp.Option + +func init() { + // For now, direct comparison of the unexported fields is good enough + // because we store everything in a normalized form. If that changes + // later then we might need to write a custom transformer to a hidden + // type with exported fields, so we can retain the ability for cmp to + // still report differences deeply. + ProviderLockComparer = cmp.AllowUnexported(ProviderLock{}) +} diff --git a/pkg/didyoumean/name_suggestion.go b/pkg/didyoumean/name_suggestion.go new file mode 100644 index 00000000000..cf6a5708536 --- /dev/null +++ b/pkg/didyoumean/name_suggestion.go @@ -0,0 +1,29 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package didyoumean + +import ( + "github.com/agext/levenshtein" +) + +// NameSuggestion tries to find a name from the given slice of suggested names +// that is close to the given name and returns it if found. If no suggestion +// is close enough, returns the empty string. +// +// The suggestions are tried in order, so earlier suggestions take precedence +// if the given string is similar to two or more suggestions. +// +// This function is intended to be used with a relatively-small number of +// suggestions. It's not optimized for hundreds or thousands of them. +func NameSuggestion(given string, suggestions []string) string { + for _, suggestion := range suggestions { + dist := levenshtein.Distance(given, suggestion, nil) + if dist < 3 { // threshold determined experimentally + return suggestion + } + } + return "" +} diff --git a/pkg/didyoumean/name_suggestion_test.go b/pkg/didyoumean/name_suggestion_test.go new file mode 100644 index 00000000000..545daf310bb --- /dev/null +++ b/pkg/didyoumean/name_suggestion_test.go @@ -0,0 +1,58 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package didyoumean + +import ( + "testing" +) + +func TestNameSuggestion(t *testing.T) { + var keywords = []string{"false", "true", "null"} + + tests := []struct { + Input, Want string + }{ + {"true", "true"}, + {"false", "false"}, + {"null", "null"}, + {"bananas", ""}, + {"NaN", ""}, + {"Inf", ""}, + {"Infinity", ""}, + {"void", ""}, + {"undefined", ""}, + + {"ture", "true"}, + {"tru", "true"}, + {"tre", "true"}, + {"treu", "true"}, + {"rtue", "true"}, + + {"flase", "false"}, + {"fales", "false"}, + {"flse", "false"}, + {"fasle", "false"}, + {"fasel", "false"}, + {"flue", "false"}, + + {"nil", "null"}, + {"nul", "null"}, + {"unll", "null"}, + {"nll", "null"}, + } + + for _, test := range tests { + t.Run(test.Input, func(t *testing.T) { + got := NameSuggestion(test.Input, keywords) + if got != test.Want { + t.Errorf( + "wrong result\ninput: %q\ngot: %q\nwant: %q", + test.Input, got, test.Want, + ) + } + }) + } +} diff --git a/pkg/e2e/e2e.go b/pkg/e2e/e2e.go new file mode 100644 index 00000000000..b9dfdcf6474 --- /dev/null +++ b/pkg/e2e/e2e.go @@ -0,0 +1,282 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package e2e + +import ( + "bytes" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "testing" + + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/plans/planfile" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/statefile" +) + +// Type binary represents the combination of a compiled binary +// and a temporary working directory to run it in. +type binary struct { + binPath string + workDir string + env []string +} + +// NewBinary prepares a temporary directory containing the files from the +// given fixture and returns an instance of type binary that can run +// the generated binary in that directory. +// +// If the temporary directory cannot be created, a fixture of the given name +// cannot be found, or if an error occurs while _copying_ the fixture files, +// this function will panic. Tests should be written to assume that this +// function always succeeds. +func NewBinary(t *testing.T, binaryPath, workingDir string) *binary { + tmpDir, err := filepath.EvalSymlinks(t.TempDir()) + if err != nil { + panic(err) + } + + // For our purposes here we do a very simplistic file copy that doesn't + // attempt to preserve file permissions, attributes, alternate data + // streams, etc. Since we only have to deal with our own fixtures in + // the testdata subdir, we know we don't need to deal with anything + // of this nature. + err = filepath.Walk(workingDir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if path == workingDir { + // nothing to do at the root + return nil + } + + if filepath.Base(path) == ".exists" { + // We use this file just to let git know the "empty" fixture + // exists. It is not used by any test. + return nil + } + + srcFn := path + + path, err = filepath.Rel(workingDir, path) + if err != nil { + return err + } + + dstFn := filepath.Join(tmpDir, path) + + if info.IsDir() { + return os.Mkdir(dstFn, os.ModePerm) + } + + src, err := os.Open(srcFn) + if err != nil { + return err + } + dst, err := os.OpenFile(dstFn, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, os.ModePerm) + if err != nil { + return err + } + + _, err = io.Copy(dst, src) + if err != nil { + return err + } + + if err := src.Close(); err != nil { + return err + } + if err := dst.Close(); err != nil { + return err + } + + return nil + }) + if err != nil { + panic(err) + } + + return &binary{ + binPath: binaryPath, + workDir: tmpDir, + } +} + +// AddEnv appends an entry to the environment variable table passed to any +// commands subsequently run. +func (b *binary) AddEnv(entry string) { + b.env = append(b.env, entry) +} + +// Cmd returns an exec.Cmd pre-configured to run the generated OpenTofu +// binary with the given arguments in the temporary working directory. +// +// The returned object can be mutated by the caller to customize how the +// process will be run, before calling Run. +func (b *binary) Cmd(args ...string) *exec.Cmd { + cmd := exec.Command(b.binPath, args...) + cmd.Dir = b.workDir + cmd.Env = os.Environ() + + cmd.Env = append(cmd.Env, b.env...) + + return cmd +} + +// Run executes the generated OpenTofu binary with the given arguments +// and returns the bytes that it wrote to both stdout and stderr. +// +// This is a simple way to run OpenTofu for non-interactive commands +// that don't need any special environment variables. For more complex +// situations, use Cmd and customize the command before running it. +func (b *binary) Run(args ...string) (stdout, stderr string, err error) { + cmd := b.Cmd(args...) + cmd.Stdin = nil + cmd.Stdout = &bytes.Buffer{} + cmd.Stderr = &bytes.Buffer{} + err = cmd.Run() + stdout = cmd.Stdout.(*bytes.Buffer).String() + stderr = cmd.Stderr.(*bytes.Buffer).String() + return +} + +// Path returns a file path within the temporary working directory by +// appending the given arguments as path segments. +func (b *binary) Path(parts ...string) string { + args := make([]string, 0, len(parts)+1) + args = append(args, b.workDir) + args = append(args, parts...) + return filepath.Join(args...) +} + +// OpenFile is a helper for easily opening a file from the working directory +// for reading. +func (b *binary) OpenFile(path ...string) (*os.File, error) { + flatPath := b.Path(path...) + return os.Open(flatPath) +} + +// ReadFile is a helper for easily reading a whole file from the working +// directory. +func (b *binary) ReadFile(path ...string) ([]byte, error) { + flatPath := b.Path(path...) + return os.ReadFile(flatPath) +} + +// FileExists is a helper for easily testing whether a particular file +// exists in the working directory. +func (b *binary) FileExists(path ...string) bool { + flatPath := b.Path(path...) + _, err := os.Stat(flatPath) + return !os.IsNotExist(err) +} + +// LocalState is a helper for easily reading the local backend's state file +// terraform.tfstate from the working directory. +func (b *binary) LocalState() (*states.State, error) { + return b.StateFromFile("terraform.tfstate") +} + +// StateFromFile is a helper for easily reading a state snapshot from a file +// on disk relative to the working directory. +func (b *binary) StateFromFile(filename string) (*states.State, error) { + f, err := b.OpenFile(filename) + if err != nil { + return nil, err + } + defer f.Close() + + stateFile, err := statefile.Read(f, encryption.StateEncryptionDisabled()) + if err != nil { + return nil, fmt.Errorf("Error reading statefile: %w", err) + } + return stateFile.State, nil +} + +// Plan is a helper for easily reading a plan file from the working directory. +func (b *binary) Plan(path string) (*plans.Plan, error) { + path = b.Path(path) + pr, err := planfile.Open(path, encryption.PlanEncryptionDisabled()) + if err != nil { + return nil, err + } + plan, err := pr.ReadPlan() + if err != nil { + return nil, err + } + return plan, nil +} + +// SetLocalState is a helper for easily writing to the file the local backend +// uses for state in the working directory. This does not go through the +// actual local backend code, so processing such as management of serials +// does not apply and the given state will simply be written verbatim. +func (b *binary) SetLocalState(state *states.State) error { + path := b.Path("terraform.tfstate") + f, err := os.OpenFile(path, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, os.ModePerm) + if err != nil { + return fmt.Errorf("failed to create temporary state file %s: %w", path, err) + } + defer f.Close() + + sf := &statefile.File{ + Serial: 0, + Lineage: "fake-for-testing", + State: state, + } + return statefile.Write(sf, f, encryption.StateEncryptionDisabled()) +} + +func GoBuild(pkgPath, tmpPrefix string) string { + dir, prefix := filepath.Split(tmpPrefix) + tmpFile, err := os.CreateTemp(dir, prefix) + if err != nil { + panic(err) + } + tmpFilename := tmpFile.Name() + if err = tmpFile.Close(); err != nil { + panic(err) + } + + args := []string{ + "go", + "build", + } + + if len(os.Getenv("GOCOVERDIR")) != 0 { + args = append(args, + "-cover", + "-coverpkg=github.com/kubegems/opentofu/...", + ) + } + + args = append(args, + "-o", tmpFilename, + pkgPath, + ) + + cmd := exec.Command(args[0], args[1:]...) + cmd.Stderr = os.Stderr + cmd.Stdout = os.Stdout + + err = cmd.Run() + if err != nil { + // The go compiler will have already produced some error messages + // on stderr by the time we get here. + panic(fmt.Sprintf("failed to build executable: %s", err)) + } + + return tmpFilename +} + +// WorkDir() returns the binary workdir +func (b *binary) WorkDir() string { + return b.workDir +} diff --git a/pkg/encryption/README.md b/pkg/encryption/README.md new file mode 100644 index 00000000000..f52dead0ed6 --- /dev/null +++ b/pkg/encryption/README.md @@ -0,0 +1,14 @@ +# OpenTofu State and Plan encryption + +> [!WARNING] +> This file is not an end-user documentation, it is intended for developers. Please follow the user documentation on the OpenTofu website unless you want to work on the encryption code. + +This folder contains the code for state and plan encryption. For a quick example on how to use this package, please take a look at the [example_test.go](example_test.go) file. + +## Structure + +The current folder contains the top level API. It requires a registry for holding the available key providers and encryption methods, which is located in the [registry](registry/) folder. The key providers are located in the [keyprovider](keyprovider/) folder, while the encryption methods are located in the [method](method) folder. You can also find the configuration struct and its related functions in the [config](config) folder. + +## Further reading + +For a detailed design document on state encryption, please read [this document](../../docs/state_encryption.md). \ No newline at end of file diff --git a/pkg/encryption/base.go b/pkg/encryption/base.go new file mode 100644 index 00000000000..c365318f280 --- /dev/null +++ b/pkg/encryption/base.go @@ -0,0 +1,186 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package encryption + +import ( + "encoding/json" + "fmt" + + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/encryption/config" + "github.com/kubegems/opentofu/pkg/encryption/keyprovider" + "github.com/kubegems/opentofu/pkg/encryption/method" + "github.com/kubegems/opentofu/pkg/encryption/method/unencrypted" + + "github.com/hashicorp/hcl/v2" +) + +const ( + encryptionVersion = "v0" +) + +type baseEncryption struct { + enc *encryption + target *config.TargetConfig + enforced bool + name string + encMethods []method.Method + encMeta map[keyprovider.Addr][]byte + staticEval *configs.StaticEvaluator +} + +func newBaseEncryption(enc *encryption, target *config.TargetConfig, enforced bool, name string, staticEval *configs.StaticEvaluator) (*baseEncryption, hcl.Diagnostics) { + base := &baseEncryption{ + enc: enc, + target: target, + enforced: enforced, + name: name, + encMeta: make(map[keyprovider.Addr][]byte), + staticEval: staticEval, + } + // Setup the encryptor + // + // Instead of creating new encryption key data for each call to encrypt, we use the same encryptor for the given application (statefile or planfile). + // + // Why do we do this? + // + // This allows us to always be in a state where we can encrypt data, which is particularly important when dealing with crashes. If the network is severed + // mid-apply, we still need to be able to write an encrypted errored.tfstate or dump to stdout. Additionally it reduces the overhead of encryption in + // general, as well as reducing cloud key provider costs. + // + // What are the security implications? + // + // Plan file flow is fairly simple and is not impacted by this change. It only ever calls encrypt once at the end of plan generation. + // + // State file is a bit more complex. The encrypted local state file (terraform.tfstate, .terraform.tfstate) will be written with the same + // keys as any remote state. These files should be identical, which will make debugging easier. + // + // The major concern with this is that many of the encryption methods used have a limit to how much data a key can safely encrypt. Pbkdf2 for example + // has a limit of around 64GB before exhaustion is approached. Writing to the two local and one remote location specified above could not + // approach that limit. However the cached state file (.terraform/terraform.tfstate) is persisted every 30 seconds during long applies. For an + // extremely large state file (100MB) it would take an apply of over 5 hours to come close to the 64GB limit of pbkdf2 with some malicious actor recording + // every single change to the filesystem (or inspecting deleted blocks). + // + // What other benfits does this provide? + // + // This performs a e2e validation run of the config -> methods flow. It serves as a validation step and allows us to return detailed + // diagnostics here and simple errors in the decrypt function below. + // + methods, diags := base.buildTargetMethods(base.encMeta) + base.encMethods = methods + + return base, diags +} + +type basedata struct { + Meta map[keyprovider.Addr][]byte `json:"meta"` + Data []byte `json:"encrypted_data"` + Version string `json:"encryption_version"` // This is both a sigil for a valid encrypted payload and a future compatability field +} + +func IsEncryptionPayload(data []byte) (bool, error) { + es := basedata{} + err := json.Unmarshal(data, &es) + if err != nil { + return false, err + } + + // This could be extended with full version checking later on + return es.Version != "", nil +} + +func (s *baseEncryption) encrypt(data []byte, enhance func(basedata) interface{}) ([]byte, error) { + // buildTargetMethods above guarantees that there will be at least one encryption method. They are not optional in the common target + // block, which is required to get to this code. + encryptor := s.encMethods[0] + + if unencrypted.Is(encryptor) { + return data, nil + } + + encd, err := encryptor.Encrypt(data) + if err != nil { + return nil, fmt.Errorf("encryption failed for %s: %w", s.name, err) + } + + es := basedata{ + Version: encryptionVersion, + Meta: s.encMeta, + Data: encd, + } + jsond, err := json.Marshal(enhance(es)) + if err != nil { + return nil, fmt.Errorf("unable to encode encrypted data as json: %w", err) + } + + return jsond, nil +} + +// TODO Find a way to make these errors actionable / clear +func (s *baseEncryption) decrypt(data []byte, validator func([]byte) error) ([]byte, error) { + es := basedata{} + err := json.Unmarshal(data, &es) + + if len(es.Version) == 0 || err != nil { + // Not a valid payload, might be already decrypted + verr := validator(data) + if verr != nil { + // Nope, just bad input + + // Return the outer json error if we have one + if err != nil { + return nil, fmt.Errorf("invalid data format for decryption: %w, %w", err, verr) + } + + // Must have been invalid json payload + return nil, fmt.Errorf("unable to determine data structure during decryption: %w", verr) + } + + // Yep, it's already decrypted + for _, method := range s.encMethods { + if unencrypted.Is(method) { + return data, nil + } + } + return nil, fmt.Errorf("encountered unencrypted payload without unencrypted method configured") + } + + if es.Version != encryptionVersion { + return nil, fmt.Errorf("invalid encrypted payload version: %s != %s", es.Version, encryptionVersion) + } + + // TODO Discuss if we should potentially cache this based on a json-encoded version of es.Meta and reduce overhead dramatically + methods, diags := s.buildTargetMethods(es.Meta) + if diags.HasErrors() { + // This cast to error here is safe as we know that at least one error exists + // This is also quite unlikely to happen as the constructor already has checked this code path + return nil, diags + } + + errs := make([]error, 0) + for _, method := range methods { + if unencrypted.Is(method) { + // Not applicable + continue + } + uncd, err := method.Decrypt(es.Data) + if err == nil { + // Success + return uncd, nil + } + // Record the failure + errs = append(errs, fmt.Errorf("attempted decryption failed for %s: %w", s.name, err)) + } + + // This is good enough for now until we have better/distinct errors + errMessage := "decryption failed for all provided methods: " + sep := "" + for _, err := range errs { + errMessage += err.Error() + sep + sep = "\n" + } + return nil, fmt.Errorf(errMessage) +} diff --git a/pkg/encryption/compliancetest/config_struct.go b/pkg/encryption/compliancetest/config_struct.go new file mode 100644 index 00000000000..300e9e4c4a6 --- /dev/null +++ b/pkg/encryption/compliancetest/config_struct.go @@ -0,0 +1,85 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package compliancetest + +import ( + "reflect" + "strings" + "testing" +) + +func ConfigStruct[TConfig any](t *testing.T, configStruct any) { + Log(t, "Testing config struct compliance...") + if configStruct == nil { + Fail(t, "The ConfigStruct() method on the descriptor returns a nil configuration. Please implement this function correctly.") + } else { + Log(t, "The ConfigStruct() method returned a non-nil value.") + } + + configStructPtrType := reflect.TypeOf(configStruct) + if configStructPtrType.Kind() != reflect.Ptr { + Fail(t, "The ConfigStruct() method returns a %T, but it should return a pointer to a struct.", configStruct) + } else { + Log(t, "The ConfigStruct() method returned a pointer.") + } + configStructType := configStructPtrType.Elem() + if configStructType.Kind() != reflect.Struct { + Fail(t, "The ConfigStruct() method returns a pointer to %s, but it should return a pointer to a struct.", configStructType.Elem().Name()) + } else { + Log(t, "The ConfigStruct() method returned a pointer to a struct.") + } + + typedConfigStruct, ok := configStruct.(TConfig) + if !ok { + Fail(t, "The ConfigStruct() method returns a %T instead of a %T", configStruct, typedConfigStruct) + } else { + Log(t, "The ConfigStruct() method correctly returns a %T", typedConfigStruct) + } + + hclTagFound := false + for i := 0; i < configStructType.NumField(); i++ { + field := configStructType.Field(i) + hclTag, ok := field.Tag.Lookup("hcl") + if !ok { + continue + } + hclTagFound = true + if hclTag == "" { + Fail( + t, + "The field '%s' on the config structure %s has an empty HCL tag. Please remove the hcl tag or add a value that matches %s.", + field.Name, + configStructType.Name(), + hclTagRe, + ) + } else { + Log(t, "Found a non-empty hcl tag on field '%s' of %s.", field.Name, configStructType.Name()) + } + hclTagParts := strings.Split(hclTag, ",") + if !hclTagRe.MatchString(hclTagParts[0]) { + Fail( + t, + "The field '%s' on the config structure %s has an invalid hcl tag: %s. Please add a value that matches %s.", + field.Name, + configStructType.Name(), + hclTag, + hclTagRe, + ) + } else { + Log(t, "Found hcl tag on field '%s' of %s matches the name requirements.", field.Name, configStructType.Name()) + } + } + if !hclTagFound { + Fail( + t, + "The configuration struct %s does not contain any fields with hcl tags, which means users will not be able to configure this key provider. Please provide at least one field with an hcl tag.", + configStructType.Name(), + ) + } else { + Log(t, "Found at least one field with a hcl tag.") + } + +} diff --git a/pkg/encryption/compliancetest/const.go b/pkg/encryption/compliancetest/const.go new file mode 100644 index 00000000000..3248a0116dd --- /dev/null +++ b/pkg/encryption/compliancetest/const.go @@ -0,0 +1,12 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package compliancetest + +import ( + "regexp" +) + +var hclTagRe = regexp.MustCompile("^[a-zA-Z0-9_-]+$") diff --git a/pkg/encryption/compliancetest/log.go b/pkg/encryption/compliancetest/log.go new file mode 100644 index 00000000000..516e3d1a882 --- /dev/null +++ b/pkg/encryption/compliancetest/log.go @@ -0,0 +1,29 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package compliancetest + +import ( + "fmt" + "testing" +) + +// Log writes a log line for a compliance test. +func Log(t *testing.T, msg string, params ...any) { + t.Helper() + t.Logf("\033[32m%s\033[0m", fmt.Sprintf(msg, params...)) +} + +// Fail fails a compliance test. +func Fail(t *testing.T, msg string, params ...any) { + t.Helper() + t.Fatalf("\033[31m%s\033[0m", fmt.Sprintf(msg, params...)) +} + +// Skip skips a compliance test. +func Skip(t *testing.T, msg string, params ...any) { + t.Helper() + t.Skipf("\033[33m%s\033[0m", fmt.Sprintf(msg, params...)) +} diff --git a/pkg/encryption/config/config.go b/pkg/encryption/config/config.go new file mode 100644 index 00000000000..a548f55411a --- /dev/null +++ b/pkg/encryption/config/config.go @@ -0,0 +1,115 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package config + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/kubegems/opentofu/pkg/encryption/keyprovider" + "github.com/kubegems/opentofu/pkg/encryption/method" +) + +// EncryptionConfig describes the terraform.encryption HCL block you can use to configure the state and plan encryption. +// The individual fields of this struct match the HCL structure directly. +type EncryptionConfig struct { + KeyProviderConfigs []KeyProviderConfig `hcl:"key_provider,block"` + MethodConfigs []MethodConfig `hcl:"method,block"` + + State *EnforcableTargetConfig `hcl:"state,block"` + Plan *EnforcableTargetConfig `hcl:"plan,block"` + Remote *RemoteConfig `hcl:"remote_state_data_sources,block"` + + // Not preserved through merge operations + DeclRange hcl.Range +} + +// Merge returns a merged configuration with the current config and the specified override combined, the override +// taking precedence. +func (c *EncryptionConfig) Merge(override *EncryptionConfig) *EncryptionConfig { + return MergeConfigs(c, override) +} + +// GetKeyProvider takes type and name arguments to find a respective KeyProviderConfig in the list. +func (c *EncryptionConfig) GetKeyProvider(kpType, kpName string) (KeyProviderConfig, bool) { + for _, kp := range c.KeyProviderConfigs { + if kp.Type == kpType && kp.Name == kpName { + return kp, true + } + } + return KeyProviderConfig{}, false +} + +// KeyProviderConfig describes the terraform.encryption.key_provider.* block you can use to declare a key provider for +// encryption. The Body field will contain the remaining undeclared fields the key provider can consume. +type KeyProviderConfig struct { + Type string `hcl:"type,label"` + Name string `hcl:"name,label"` + Body hcl.Body `hcl:",remain"` +} + +// Addr returns a keyprovider.Addr from the current configuration. +func (k KeyProviderConfig) Addr() (keyprovider.Addr, hcl.Diagnostics) { + return keyprovider.NewAddr(k.Type, k.Name) +} + +// MethodConfig describes the terraform.encryption.method.* block you can use to declare the encryption method. The Body +// field will contain the remaining undeclared fields the method can consume. +type MethodConfig struct { + Type string `hcl:"type,label"` + Name string `hcl:"name,label"` + Body hcl.Body `hcl:",remain"` +} + +func (m MethodConfig) Addr() (method.Addr, hcl.Diagnostics) { + return method.NewAddr(m.Type, m.Name) +} + +// RemoteConfig describes the terraform.encryption.remote block you can use to declare encryption for remote state data +// sources. +type RemoteConfig struct { + Default *TargetConfig `hcl:"default,block"` + Targets []NamedTargetConfig `hcl:"remote_state_data_source,block"` +} + +// TargetConfig describes the target.encryption.state, target.encryption.plan, etc blocks. +type TargetConfig struct { + Method hcl.Expression `hcl:"method,optional"` + Fallback *TargetConfig `hcl:"fallback,block"` +} + +// EnforcableTargetConfig is an extension of the TargetConfig that supports the enforced form. +// +// Note: This struct is copied because gohcl does not support embedding. +type EnforcableTargetConfig struct { + Enforced bool `hcl:"enforced,optional"` + Method hcl.Expression `hcl:"method,optional"` + Fallback *TargetConfig `hcl:"fallback,block"` +} + +// AsTargetConfig converts the struct into its parent TargetConfig. +func (e EnforcableTargetConfig) AsTargetConfig() *TargetConfig { + return &TargetConfig{ + Method: e.Method, + Fallback: e.Fallback, + } +} + +// NamedTargetConfig is an extension of the TargetConfig that describes a +// terraform.encryption.remote.remote_state_data.* block. +// +// Note: This struct is copied because gohcl does not support embedding. +type NamedTargetConfig struct { + Name string `hcl:"name,label"` + Method hcl.Expression `hcl:"method,optional"` + Fallback *TargetConfig `hcl:"fallback,block"` +} + +// AsTargetConfig converts the struct into its parent TargetConfig. +func (n NamedTargetConfig) AsTargetConfig() *TargetConfig { + return &TargetConfig{ + Method: n.Method, + Fallback: n.Fallback, + } +} diff --git a/pkg/encryption/config/config_load.go b/pkg/encryption/config/config_load.go new file mode 100644 index 00000000000..d3a516bc8cb --- /dev/null +++ b/pkg/encryption/config/config_load.go @@ -0,0 +1,35 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package config + +import ( + "strings" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/hashicorp/hcl/v2/json" +) + +// LoadConfigFromString loads a configuration from a string. The sourceName is used to identify the source of the +// configuration in error messages. +// This method serves as an example for how someone using this library might want to load a configuration. +// if they were not using gohcl directly. +// However! Right now, this method should only be used in tests, as OpenTofu should be using gohcl to parse the configuration. +func LoadConfigFromString(sourceName string, rawInput string) (*EncryptionConfig, hcl.Diagnostics) { + var diags hcl.Diagnostics + var file *hcl.File + + if strings.TrimSpace(rawInput)[0] == '{' { + file, diags = json.Parse([]byte(rawInput), sourceName) + } else { + file, diags = hclsyntax.ParseConfig([]byte(rawInput), sourceName, hcl.Pos{Byte: 0, Line: 1, Column: 1}) + } + + cfg, cfgDiags := DecodeConfig(file.Body, hcl.Range{Filename: sourceName}) + diags = append(diags, cfgDiags...) + + return cfg, diags +} diff --git a/pkg/encryption/config/config_merge.go b/pkg/encryption/config/config_merge.go new file mode 100644 index 00000000000..8ce569859a6 --- /dev/null +++ b/pkg/encryption/config/config_merge.go @@ -0,0 +1,171 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package config + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/kubegems/opentofu/pkg/configs/hcl2shim" +) + +// MergeConfigs merges two Configs together, with the override taking precedence. +func MergeConfigs(cfg *EncryptionConfig, override *EncryptionConfig) *EncryptionConfig { + if cfg == nil { + return override + } + if override == nil { + return cfg + } + return &EncryptionConfig{ + KeyProviderConfigs: mergeKeyProviderConfigs(cfg.KeyProviderConfigs, override.KeyProviderConfigs), + MethodConfigs: mergeMethodConfigs(cfg.MethodConfigs, override.MethodConfigs), + + State: mergeEnforcableTargetConfigs(cfg.State, override.State), + Plan: mergeEnforcableTargetConfigs(cfg.Plan, override.Plan), + Remote: mergeRemoteConfigs(cfg.Remote, override.Remote), + } +} + +func mergeMethodConfigs(configs []MethodConfig, overrides []MethodConfig) []MethodConfig { + // Initialize a copy of configs to preserve the original entries. + merged := make([]MethodConfig, len(configs)) + copy(merged, configs) + + for _, override := range overrides { + wasOverridden := false + + // Attempt to find a match based on type/name + for i, method := range merged { + if method.Type == override.Type && method.Name == override.Name { + // Override the existing method. + merged[i].Body = mergeBody(method.Body, override.Body) + wasOverridden = true + break + } + } + + // If no existing method was overridden, append the new override. + if !wasOverridden { + merged = append(merged, override) + } + } + return merged +} + +func mergeKeyProviderConfigs(configs []KeyProviderConfig, overrides []KeyProviderConfig) []KeyProviderConfig { + // Initialize a copy of configs to preserve the original entries. + merged := make([]KeyProviderConfig, len(configs)) + copy(merged, configs) + + for _, override := range overrides { + wasOverridden := false + + // Attempt to find a match based on type/name + for i, keyProvider := range merged { + if keyProvider.Type == override.Type && keyProvider.Name == override.Name { + // Override the existing key provider. + merged[i].Body = mergeBody(keyProvider.Body, override.Body) + wasOverridden = true + break + } + } + + // If no existing key provider was overridden, append the new override. + if !wasOverridden { + merged = append(merged, override) + } + } + return merged +} + +func mergeTargetConfigs(cfg *TargetConfig, override *TargetConfig) *TargetConfig { + if cfg == nil { + return override + } + if override == nil { + return cfg + } + + merged := &TargetConfig{} + + if override.Method != nil { + merged.Method = override.Method + } else { + merged.Method = cfg.Method + } + + if override.Fallback != nil { + merged.Fallback = override.Fallback + } else { + merged.Fallback = cfg.Fallback + } + + return merged +} + +func mergeEnforcableTargetConfigs(cfg *EnforcableTargetConfig, override *EnforcableTargetConfig) *EnforcableTargetConfig { + if cfg == nil { + return override + } + if override == nil { + return cfg + } + + mergeTarget := mergeTargetConfigs(cfg.AsTargetConfig(), override.AsTargetConfig()) + return &EnforcableTargetConfig{ + Enforced: cfg.Enforced || override.Enforced, + Method: mergeTarget.Method, + Fallback: mergeTarget.Fallback, + } +} + +func mergeRemoteConfigs(cfg *RemoteConfig, override *RemoteConfig) *RemoteConfig { + if cfg == nil { + return override + } + if override == nil { + return cfg + } + + merged := &RemoteConfig{ + Default: mergeTargetConfigs(cfg.Default, override.Default), + Targets: make([]NamedTargetConfig, len(cfg.Targets)), + } + + copy(merged.Targets, cfg.Targets) + for _, overrideTarget := range override.Targets { + found := false + for i, t := range merged.Targets { + found = t.Name == overrideTarget.Name + if found { + // gohcl does not support struct embedding + mergeTarget := mergeTargetConfigs(t.AsTargetConfig(), overrideTarget.AsTargetConfig()) + merged.Targets[i] = NamedTargetConfig{ + Name: t.Name, + Method: mergeTarget.Method, + Fallback: mergeTarget.Fallback, + } + break + } + } + if !found { + merged.Targets = append(merged.Targets, overrideTarget) + } + } + + return merged +} + +func mergeBody(base hcl.Body, override hcl.Body) hcl.Body { + if base == nil { + return override + } + + if override == nil { + return base + } + + return hcl2shim.MergeBodies(base, override) +} diff --git a/pkg/encryption/config/config_merge_test.go b/pkg/encryption/config/config_merge_test.go new file mode 100644 index 00000000000..550e850b864 --- /dev/null +++ b/pkg/encryption/config/config_merge_test.go @@ -0,0 +1,335 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package config + +import ( + "reflect" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hcltest" + "github.com/kubegems/opentofu/pkg/configs/hcl2shim" + "github.com/zclconf/go-cty/cty" +) + +func TestMergeMethodConfigs(t *testing.T) { + makeMethodConfig := func(typeName, name, key, value string) MethodConfig { + return MethodConfig{ + Type: typeName, + Name: name, + Body: hcl2shim.SynthBody("method", map[string]cty.Value{ + key: cty.StringVal(value), + }), + } + } + + schema := &hcl.BodySchema{Attributes: []hcl.AttributeSchema{{Name: "key"}}} + + tests := []struct { + name string + configSchema *hcl.BodySchema + input []MethodConfig + override []MethodConfig + expected []MethodConfig + }{ + { + name: "empty", + configSchema: nil, + input: []MethodConfig{}, + override: []MethodConfig{}, + expected: []MethodConfig{}, + }, + { + name: "override one method config body", + configSchema: schema, + input: []MethodConfig{ + makeMethodConfig("type", "name", "key", "value"), + }, + override: []MethodConfig{ + makeMethodConfig("type", "name", "key", "override"), + }, + expected: []MethodConfig{ + makeMethodConfig("type", "name", "key", "override"), + }, + }, + { + name: "initial config is empty", + configSchema: schema, + input: []MethodConfig{}, + override: []MethodConfig{ + makeMethodConfig("type", "name", "key", "override"), + }, + expected: []MethodConfig{ + makeMethodConfig("type", "name", "key", "override"), + }, + }, + { + name: "override multiple method configs", + configSchema: schema, + input: []MethodConfig{ + makeMethodConfig("type", "name", "key", "value"), + makeMethodConfig("type", "name2", "key", "value"), + makeMethodConfig("type", "name3", "key", "value"), + }, + override: []MethodConfig{ + makeMethodConfig("type", "name", "key", "override1"), + makeMethodConfig("type", "name2", "key", "override2"), + }, + expected: []MethodConfig{ + makeMethodConfig("type", "name", "key", "override1"), + makeMethodConfig("type", "name2", "key", "override2"), + makeMethodConfig("type", "name3", "key", "value"), + }, + }, + { + name: "override config is empty", + configSchema: schema, + input: []MethodConfig{ + makeMethodConfig("type", "name", "key", "value"), + }, + override: []MethodConfig{}, + expected: []MethodConfig{ + makeMethodConfig("type", "name", "key", "value"), + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + output := mergeMethodConfigs(test.input, test.override) + + // for each of the expected methods, check if it exists in the output + for _, expectedMethod := range test.expected { + found := false + for _, method := range output { + if method.Type == expectedMethod.Type && method.Name == expectedMethod.Name { + found = true + expectedContent, _ := expectedMethod.Body.Content(test.configSchema) + actualContent, diags := method.Body.Content(test.configSchema) + if diags.HasErrors() { + t.Fatalf("unexpected diagnostics: %v", diags) + } + // Only compare the attributes here, so that we don't look at things like the MissingItemRange on the hcl.Body + if !reflect.DeepEqual(expectedContent.Attributes, actualContent.Attributes) { + t.Errorf("expected %v, got %v", spew.Sdump(expectedContent.Attributes), spew.Sdump(actualContent.Attributes)) + } + } + } + if !found { + t.Errorf("expected method %v not found in output", spew.Sdump(expectedMethod)) + } + } + }) + } +} + +func TestMergeKeyProviderConfigs(t *testing.T) { + makeKeyProviderConfig := func(typeName, name, key, value string) KeyProviderConfig { + return KeyProviderConfig{ + Type: typeName, + Name: name, + Body: hcl2shim.SynthBody("key_provider", map[string]cty.Value{ + key: cty.StringVal(value), + }), + } + } + + schema := &hcl.BodySchema{Attributes: []hcl.AttributeSchema{{Name: "key"}}} + + tests := []struct { + name string + configSchema *hcl.BodySchema + input []KeyProviderConfig + override []KeyProviderConfig + expected []KeyProviderConfig + }{ + { + name: "empty", + configSchema: nil, + input: []KeyProviderConfig{}, + override: []KeyProviderConfig{}, + expected: []KeyProviderConfig{}, + }, + { + name: "override one key provider config body", + configSchema: schema, + input: []KeyProviderConfig{ + makeKeyProviderConfig("type", "name", "key", "value"), + }, + override: []KeyProviderConfig{ + makeKeyProviderConfig("type", "name", "key", "override"), + }, + expected: []KeyProviderConfig{ + makeKeyProviderConfig("type", "name", "key", "override"), + }, + }, + { + name: "initial config is empty", + configSchema: schema, + input: []KeyProviderConfig{}, + override: []KeyProviderConfig{ + makeKeyProviderConfig("type", "name", "key", "override"), + }, + expected: []KeyProviderConfig{ + makeKeyProviderConfig("type", "name", "key", "override"), + }, + }, + { + name: "override multiple key provider configs", + configSchema: schema, + input: []KeyProviderConfig{ + makeKeyProviderConfig("type", "name", "key", "value"), + makeKeyProviderConfig("type", "name2", "key", "value"), + }, + override: []KeyProviderConfig{ + makeKeyProviderConfig("type", "name", "key", "override1"), + makeKeyProviderConfig("type", "name2", "key", "override2"), + }, + expected: []KeyProviderConfig{ + makeKeyProviderConfig("type", "name", "key", "override1"), + makeKeyProviderConfig("type", "name2", "key", "override2"), + }, + }, + { + name: "override config is empty", + configSchema: schema, + input: []KeyProviderConfig{ + makeKeyProviderConfig("type", "name", "key", "value"), + }, + override: []KeyProviderConfig{}, + expected: []KeyProviderConfig{ + makeKeyProviderConfig("type", "name", "key", "value"), + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + output := mergeKeyProviderConfigs(test.input, test.override) + + // for each of the expected key providers, check if it exists in the output + for _, expectedKeyProvider := range test.expected { + found := false + for _, keyProvider := range output { + if keyProvider.Type == expectedKeyProvider.Type && keyProvider.Name == expectedKeyProvider.Name { + found = true + expectedContent, _ := expectedKeyProvider.Body.Content(test.configSchema) + actualContent, diags := keyProvider.Body.Content(test.configSchema) + if diags.HasErrors() { + t.Fatalf("unexpected diagnostics: %v", diags) + } + // Only compare the attributes here, so that we don't look at things like the MissingItemRange on the hcl.Body + if !reflect.DeepEqual(expectedContent.Attributes, actualContent.Attributes) { + t.Errorf("expected %v, got %v", spew.Sdump(expectedContent.Attributes), spew.Sdump(actualContent.Attributes)) + } + } + } + if !found { + t.Errorf("expected key provider %v not found in output", spew.Sdump(expectedKeyProvider)) + } + } + }) + } +} + +func TestMergeTargetConfigs(t *testing.T) { + makeTargetConfig := func(enforced bool, method hcl.Expression, fallback *TargetConfig) *TargetConfig { + return &TargetConfig{ + Method: method, + Fallback: fallback, + } + } + + makeEnforcableTargetConfig := func(enforced bool, method hcl.Expression, fallback *TargetConfig) *EnforcableTargetConfig { + return &EnforcableTargetConfig{ + Enforced: enforced, + Method: method, + Fallback: fallback, + } + } + + expressionOne := hcltest.MockExprLiteral(cty.UnknownVal(cty.Set(cty.String))) + expressionTwo := hcltest.MockExprLiteral(cty.UnknownVal(cty.Set(cty.Bool))) + + tests := []struct { + name string + input *EnforcableTargetConfig + override *EnforcableTargetConfig + expected *EnforcableTargetConfig + }{ + { + name: "both nil", + input: nil, + override: nil, + expected: nil, + }, + { + name: "input is nil", + input: nil, + override: makeEnforcableTargetConfig(true, expressionOne, nil), + expected: makeEnforcableTargetConfig(true, expressionOne, nil), + }, + { + name: "override is nil", + input: makeEnforcableTargetConfig(true, expressionOne, nil), + override: nil, + expected: makeEnforcableTargetConfig(true, expressionOne, nil), + }, + { + name: "override target config method", + input: makeEnforcableTargetConfig(true, expressionOne, nil), + override: makeEnforcableTargetConfig(true, expressionTwo, nil), + expected: makeEnforcableTargetConfig(true, expressionTwo, nil), + }, + { + name: "override target config fallback", + input: makeEnforcableTargetConfig(true, expressionOne, makeTargetConfig(true, expressionOne, nil)), + override: makeEnforcableTargetConfig(true, expressionOne, makeTargetConfig(true, expressionTwo, nil)), + expected: makeEnforcableTargetConfig(true, expressionOne, makeTargetConfig(true, expressionTwo, nil)), + }, + { + name: "override target config fallback", + input: makeEnforcableTargetConfig(true, expressionOne, nil), + override: makeEnforcableTargetConfig(true, expressionOne, makeTargetConfig(true, expressionTwo, nil)), + expected: makeEnforcableTargetConfig(true, expressionOne, makeTargetConfig(true, expressionTwo, nil)), + }, + { + name: "override target config enforced - should be true if any are true", + input: makeEnforcableTargetConfig(true, expressionOne, nil), + override: makeEnforcableTargetConfig(false, expressionOne, nil), + expected: makeEnforcableTargetConfig(true, expressionOne, nil), + }, + { + name: "override target config enforced - should be true if any are true", + input: makeEnforcableTargetConfig(false, expressionOne, nil), + override: makeEnforcableTargetConfig(true, expressionOne, nil), + expected: makeEnforcableTargetConfig(true, expressionOne, nil), + }, + { + name: "override target config enforced - should be false if both are false", + input: makeEnforcableTargetConfig(false, expressionOne, nil), + override: makeEnforcableTargetConfig(false, expressionOne, nil), + expected: makeEnforcableTargetConfig(false, expressionOne, nil), + }, + { + name: "override enforced, method and fallback", + input: makeEnforcableTargetConfig(false, expressionOne, makeTargetConfig(true, expressionOne, nil)), + override: makeEnforcableTargetConfig(true, expressionTwo, makeTargetConfig(true, expressionTwo, nil)), + expected: makeEnforcableTargetConfig(true, expressionTwo, makeTargetConfig(true, expressionTwo, nil)), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + output := mergeEnforcableTargetConfigs(test.input, test.override) + + if !reflect.DeepEqual(output, test.expected) { + t.Errorf("expected %v, got %v", spew.Sdump(test.expected), spew.Sdump(output)) + } + }) + } +} diff --git a/pkg/encryption/config/config_parse.go b/pkg/encryption/config/config_parse.go new file mode 100644 index 00000000000..ab8c0cdaf2b --- /dev/null +++ b/pkg/encryption/config/config_parse.go @@ -0,0 +1,76 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package config + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/gohcl" +) + +// DecodeConfig takes a hcl.Body and decodes it into a Config struct. +// This method is here as an example for how someone using this library might want to decode a configuration. +// if they were not using gohcl directly. +// Right now for real world use this is only intended to be used in tests, until we publish this publicly. +func DecodeConfig(body hcl.Body, rng hcl.Range) (*EncryptionConfig, hcl.Diagnostics) { + cfg := &EncryptionConfig{DeclRange: rng} + + diags := gohcl.DecodeBody(body, nil, cfg) + if diags.HasErrors() { + return nil, diags + } + + for i, kp := range cfg.KeyProviderConfigs { + for j, okp := range cfg.KeyProviderConfigs { + if i != j && kp.Type == okp.Type && kp.Name == okp.Name { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate key_provider", + Detail: fmt.Sprintf("Found multiple instances of key_provider.%s.%s", kp.Type, kp.Name), + Subject: rng.Ptr(), + }) + break + } + } + } + + for i, m := range cfg.MethodConfigs { + for j, om := range cfg.MethodConfigs { + if i != j && m.Type == om.Type && m.Name == om.Name { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate method", + Detail: fmt.Sprintf("Found multiple instances of method.%s.%s", m.Type, m.Name), + Subject: rng.Ptr(), + }) + break + } + } + } + + if cfg.Remote != nil { + for i, t := range cfg.Remote.Targets { + for j, ot := range cfg.Remote.Targets { + if i != j && t.Name == ot.Name { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate remote_data_source", + Detail: fmt.Sprintf("Found multiple instances of remote_data_source.%s", t.Name), + Subject: rng.Ptr(), + }) + break + } + } + } + } + + if diags.HasErrors() { + return nil, diags + } + + return cfg, diags +} diff --git a/pkg/encryption/default_registry.go b/pkg/encryption/default_registry.go new file mode 100644 index 00000000000..6bf0a15e36b --- /dev/null +++ b/pkg/encryption/default_registry.go @@ -0,0 +1,39 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package encryption + +import ( + "github.com/kubegems/opentofu/pkg/encryption/keyprovider/aws_kms" + "github.com/kubegems/opentofu/pkg/encryption/keyprovider/gcp_kms" + "github.com/kubegems/opentofu/pkg/encryption/keyprovider/openbao" + "github.com/kubegems/opentofu/pkg/encryption/keyprovider/pbkdf2" + "github.com/kubegems/opentofu/pkg/encryption/method/aesgcm" + "github.com/kubegems/opentofu/pkg/encryption/method/unencrypted" + "github.com/kubegems/opentofu/pkg/encryption/registry/lockingencryptionregistry" +) + +var DefaultRegistry = lockingencryptionregistry.New() + +func init() { + if err := DefaultRegistry.RegisterKeyProvider(pbkdf2.New()); err != nil { + panic(err) + } + if err := DefaultRegistry.RegisterKeyProvider(aws_kms.New()); err != nil { + panic(err) + } + if err := DefaultRegistry.RegisterKeyProvider(gcp_kms.New()); err != nil { + panic(err) + } + if err := DefaultRegistry.RegisterKeyProvider(openbao.New()); err != nil { + panic(err) + } + if err := DefaultRegistry.RegisterMethod(aesgcm.New()); err != nil { + panic(err) + } + if err := DefaultRegistry.RegisterMethod(unencrypted.New()); err != nil { + panic(err) + } +} diff --git a/pkg/encryption/encryption.go b/pkg/encryption/encryption.go new file mode 100644 index 00000000000..2e908585c7b --- /dev/null +++ b/pkg/encryption/encryption.go @@ -0,0 +1,115 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package encryption + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/encryption/config" + "github.com/kubegems/opentofu/pkg/encryption/registry" +) + +// Encryption contains the methods for obtaining a StateEncryption or PlanEncryption correctly configured for a specific +// purpose. If no encryption configuration is present, it should return a pass through method that doesn't do anything. +type Encryption interface { + // State produces a StateEncryption overlay for encrypting and decrypting state files for local storage. + State() StateEncryption + + // Plan produces a PlanEncryption overlay for encrypting and decrypting plan files. + Plan() PlanEncryption + + // RemoteState produces a StateEncryption for reading remote states using the terraform_remote_state data + // source. + RemoteState(string) StateEncryption +} + +type encryption struct { + state StateEncryption + plan PlanEncryption + remoteDefault StateEncryption + remotes map[string]StateEncryption + + // Inputs + cfg *config.EncryptionConfig + reg registry.Registry +} + +// New creates a new Encryption provider from the given configuration and registry. +func New(reg registry.Registry, cfg *config.EncryptionConfig, staticEval *configs.StaticEvaluator) (Encryption, hcl.Diagnostics) { + if cfg == nil { + return Disabled(), nil + } + + enc := &encryption{ + cfg: cfg, + reg: reg, + + remotes: make(map[string]StateEncryption), + } + var diags hcl.Diagnostics + var encDiags hcl.Diagnostics + + if cfg.State != nil { + enc.state, encDiags = newStateEncryption(enc, cfg.State.AsTargetConfig(), cfg.State.Enforced, "state", staticEval) + diags = append(diags, encDiags...) + } else { + enc.state = StateEncryptionDisabled() + } + + if cfg.Plan != nil { + enc.plan, encDiags = newPlanEncryption(enc, cfg.Plan.AsTargetConfig(), cfg.Plan.Enforced, "plan", staticEval) + diags = append(diags, encDiags...) + } else { + enc.plan = PlanEncryptionDisabled() + } + + if cfg.Remote != nil && cfg.Remote.Default != nil { + enc.remoteDefault, encDiags = newStateEncryption(enc, cfg.Remote.Default, false, "remote.default", staticEval) + diags = append(diags, encDiags...) + } else { + enc.remoteDefault = StateEncryptionDisabled() + } + + if cfg.Remote != nil { + for _, remoteTarget := range cfg.Remote.Targets { + // TODO the addr here should be generated in one place. + addr := "remote.remote_state_datasource." + remoteTarget.Name + enc.remotes[remoteTarget.Name], encDiags = newStateEncryption(enc, remoteTarget.AsTargetConfig(), false, addr, staticEval) + diags = append(diags, encDiags...) + } + } + if diags.HasErrors() { + return nil, diags + } + return enc, diags +} + +func (e *encryption) State() StateEncryption { + return e.state +} + +func (e *encryption) Plan() PlanEncryption { + return e.plan +} + +func (e *encryption) RemoteState(name string) StateEncryption { + if enc, ok := e.remotes[name]; ok { + return enc + } + return e.remoteDefault +} + +// Mostly used in tests +type encryptionDisabled struct{} + +func Disabled() Encryption { + return &encryptionDisabled{} +} +func (e *encryptionDisabled) State() StateEncryption { return StateEncryptionDisabled() } +func (e *encryptionDisabled) Plan() PlanEncryption { return PlanEncryptionDisabled() } +func (e *encryptionDisabled) RemoteState(name string) StateEncryption { + return StateEncryptionDisabled() +} diff --git a/pkg/encryption/enctest/setup.go b/pkg/encryption/enctest/setup.go new file mode 100644 index 00000000000..6dad769861c --- /dev/null +++ b/pkg/encryption/enctest/setup.go @@ -0,0 +1,108 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package enctest + +// This package is used for supplying a fully configured encryption instance for use in unit and integration tests + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/encryption/config" + "github.com/kubegems/opentofu/pkg/encryption/keyprovider/static" + "github.com/kubegems/opentofu/pkg/encryption/method/aesgcm" + "github.com/kubegems/opentofu/pkg/encryption/method/unencrypted" + "github.com/kubegems/opentofu/pkg/encryption/registry/lockingencryptionregistry" +) + +// TODO docstrings once this stabilizes + +func EncryptionDirect(configData string) encryption.Encryption { + reg := lockingencryptionregistry.New() + if err := reg.RegisterKeyProvider(static.New()); err != nil { + panic(err) + } + if err := reg.RegisterMethod(aesgcm.New()); err != nil { + panic(err) + } + if err := reg.RegisterMethod(unencrypted.New()); err != nil { + panic(err) + } + + cfg, diags := config.LoadConfigFromString("Test Config Source", configData) + + handleDiags(diags) + + staticEval := configs.NewStaticEvaluator(nil, configs.RootModuleCallForTesting()) + + enc, diags := encryption.New(reg, cfg, staticEval) + handleDiags(diags) + + return enc +} + +func EncryptionRequired() encryption.Encryption { + return EncryptionDirect(` + key_provider "static" "basic" { + key = "6f6f706830656f67686f6834616872756f3751756165686565796f6f72653169" + } + method "aes_gcm" "example" { + keys = key_provider.static.basic + } + state { + method = method.aes_gcm.example + } + plan { + method = method.aes_gcm.example + } + remote_state_data_sources { + default { + method = method.aes_gcm.example + } + } + `) +} + +func EncryptionWithFallback() encryption.Encryption { + return EncryptionDirect(` + key_provider "static" "basic" { + key = "6f6f706830656f67686f6834616872756f3751756165686565796f6f72653169" + } + method "aes_gcm" "example" { + keys = key_provider.static.basic + } + method "unencrypted" "migration" {} + state { + method = method.aes_gcm.example + fallback { + method = method.unencrypted.migration + } + } + plan { + method = method.aes_gcm.example + fallback { + method = method.unencrypted.migration + } + } + remote_state_data_sources { + default { + method = method.aes_gcm.example + fallback { + method = method.unencrypted.migration + } + } + } + `) +} + +func handleDiags(diags hcl.Diagnostics) { + for _, d := range diags { + println(d.Error()) + } + if diags.HasErrors() { + panic(diags.Error()) + } +} diff --git a/pkg/encryption/example_test.go b/pkg/encryption/example_test.go new file mode 100644 index 00000000000..4b029c66e1b --- /dev/null +++ b/pkg/encryption/example_test.go @@ -0,0 +1,99 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package encryption_test + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/encryption/config" + "github.com/kubegems/opentofu/pkg/encryption/keyprovider/static" + "github.com/kubegems/opentofu/pkg/encryption/method/aesgcm" + "github.com/kubegems/opentofu/pkg/encryption/registry/lockingencryptionregistry" +) + +var ( + ConfigA = ` +state { + enforced = true +} +` + ConfigB = ` +key_provider "static" "basic" { + key = "6f6f706830656f67686f6834616872756f3751756165686565796f6f72653169" +} +method "aes_gcm" "example" { + keys = key_provider.static.basic +} +state { + method = method.aes_gcm.example +} +` +) + +// This example demonstrates how to use the encryption package to encrypt and decrypt data. +func Example() { + // Construct a new registry + // the registry is where we store the key providers and methods + reg := lockingencryptionregistry.New() + if err := reg.RegisterKeyProvider(static.New()); err != nil { + panic(err) + } + if err := reg.RegisterMethod(aesgcm.New()); err != nil { + panic(err) + } + + // Load the 2 different configurations + cfgA, diags := config.LoadConfigFromString("Test Source A", ConfigA) + handleDiags(diags) + + cfgB, diags := config.LoadConfigFromString("Test Source B", ConfigB) + handleDiags(diags) + + // Merge the configurations + cfg := config.MergeConfigs(cfgA, cfgB) + + // Construct static evaluator to pass additional values into encryption configuration. + staticEval := configs.NewStaticEvaluator(nil, configs.RootModuleCallForTesting()) + + // Construct the encryption object + enc, diags := encryption.New(reg, cfg, staticEval) + handleDiags(diags) + + sfe := enc.State() + + // Encrypt the data, for this example we will be using the string `{"serial": 42, "lineage": "magic"}`, + // but in a real world scenario this would be the plan file. + sourceData := []byte(`{"serial": 42, "lineage": "magic"}`) + encrypted, err := sfe.EncryptState(sourceData) + if err != nil { + panic(err) + } + + if string(encrypted) == `{"serial": 42, "lineage": "magic"}` { + panic("The data has not been encrypted!") + } + + // Decrypt + decryptedState, err := sfe.DecryptState(encrypted) + if err != nil { + panic(err) + } + + fmt.Printf("%s\n", decryptedState) + // Output: {"serial": 42, "lineage": "magic"} +} + +func handleDiags(diags hcl.Diagnostics) { + for _, d := range diags { + println(d.Error()) + } + if diags.HasErrors() { + panic(diags.Error()) + } +} diff --git a/pkg/encryption/keyprovider.go b/pkg/encryption/keyprovider.go new file mode 100644 index 00000000000..adaa77a495d --- /dev/null +++ b/pkg/encryption/keyprovider.go @@ -0,0 +1,232 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package encryption + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/encryption/config" + "github.com/kubegems/opentofu/pkg/lang" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/gohcl" + "github.com/kubegems/opentofu/pkg/encryption/keyprovider" + "github.com/kubegems/opentofu/pkg/encryption/registry" + "github.com/zclconf/go-cty/cty" +) + +// setupKeyProviders sets up the key providers for encryption. It returns a list of diagnostics if any of the key providers +// are invalid. +func (e *targetBuilder) setupKeyProviders() hcl.Diagnostics { + var diags hcl.Diagnostics + + e.keyValues = make(map[string]map[string]cty.Value) + + for _, keyProviderConfig := range e.cfg.KeyProviderConfigs { + diags = append(diags, e.setupKeyProvider(keyProviderConfig, nil)...) + } + + // Regenerate the context now that the key provider is loaded + kpMap := make(map[string]cty.Value) + for name, kps := range e.keyValues { + kpMap[name] = cty.ObjectVal(kps) + } + e.ctx.Variables["key_provider"] = cty.ObjectVal(kpMap) + + return diags +} + +func (e *targetBuilder) setupKeyProvider(cfg config.KeyProviderConfig, stack []config.KeyProviderConfig) hcl.Diagnostics { + // Ensure cfg.Type is in keyValues, if it isn't then add it in preparation for the next step + if _, ok := e.keyValues[cfg.Type]; !ok { + e.keyValues[cfg.Type] = make(map[string]cty.Value) + } + + // Check if we have already setup this Descriptor (due to dependency loading) + // if we've already setup this key provider, then we don't need to do it again + // and we can return early + if _, ok := e.keyValues[cfg.Type][cfg.Name]; ok { + return nil + } + + // Mark this key provider as partially handled. This value will be replaced below once it is actually known. + // The goal is to allow an early return via the above if statement to prevent duplicate errors if errors are encoutered in the key loading stack. + e.keyValues[cfg.Type][cfg.Name] = cty.UnknownVal(cty.DynamicPseudoType) + + // Check for circular references, this is done by inspecting the stack of key providers + // that are currently being setup. If we find a key provider in the stack that matches + // the current key provider, then we have a circular reference and we should return an error + // to the user. + for _, s := range stack { + if s == cfg { + addr, diags := keyprovider.NewAddr(cfg.Type, cfg.Name) + diags = diags.Append( + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Circular reference detected", + // TODO add the stack trace to the detail message + Detail: fmt.Sprintf("Can not load %q due to circular reference", addr), + }, + ) + return diags + } + } + stack = append(stack, cfg) + + // Pull the meta key out for error messages and meta storage + metakey, diags := cfg.Addr() + if diags.HasErrors() { + return diags + } + + // Lookup the KeyProviderDescriptor from the registry + id := keyprovider.ID(cfg.Type) + keyProviderDescriptor, err := e.reg.GetKeyProviderDescriptor(id) + if err != nil { + if errors.Is(err, ®istry.KeyProviderNotFoundError{}) { + return diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unknown key_provider type", + Detail: fmt.Sprintf("Can not find %q", cfg.Type), + }) + } + return diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Error fetching key_provider %q", cfg.Type), + Detail: err.Error(), + }) + } + + // Now that we know we have the correct Descriptor, we can decode the configuration + // and build the KeyProvider + keyProviderConfig := keyProviderDescriptor.ConfigStruct() + + // Locate all the dependencies + deps, varDiags := gohcl.VariablesInBody(cfg.Body, keyProviderConfig) + diags = append(diags, varDiags...) + if diags.HasErrors() { + return diags + } + + // lang.References is going to fail parsing key_provider deps + // so we filter them out in nonKeyProviderDeps. + var nonKeyProviderDeps []hcl.Traversal + + // Setting up key providers from deps. + for _, dep := range deps { + // Key Provider references should be in the form key_provider.type.name + if len(dep) != 3 { + nonKeyProviderDeps = append(nonKeyProviderDeps, dep) + continue + } + + // TODO this should be more defensive + depRoot := (dep[0].(hcl.TraverseRoot)).Name + depType := (dep[1].(hcl.TraverseAttr)).Name + depName := (dep[2].(hcl.TraverseAttr)).Name + + if depRoot != "key_provider" { + nonKeyProviderDeps = append(nonKeyProviderDeps, dep) + continue + } + + kpc, ok := e.cfg.GetKeyProvider(depType, depName) + if !ok { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Undefined Key Provider", + Detail: fmt.Sprintf("Key provider %s.%s is missing from the encryption configuration.", depType, depName), + Subject: dep.SourceRange().Ptr(), + }) + continue + } + + depDiags := e.setupKeyProvider(kpc, stack) + diags = append(diags, depDiags...) + } + if diags.HasErrors() { + // We should not continue now if we have any diagnostics that are errors + // as we may end up in an inconsistent state. + // The reason we collate the diags here and then show them instead of showing them as they arise + // is to ensure that the end user does not have to play whack-a-mole with the errors one at a time. + return diags + } + + refs, refDiags := lang.References(addrs.ParseRef, nonKeyProviderDeps) + diags = append(diags, refDiags.ToHCL()...) + if diags.HasErrors() { + return diags + } + + evalCtx, evalDiags := e.staticEval.EvalContextWithParent(e.ctx, configs.StaticIdentifier{ + Module: addrs.RootModule, + Subject: fmt.Sprintf("encryption.key_provider.%s.%s", cfg.Type, cfg.Name), + DeclRange: e.cfg.DeclRange, + }, refs) + diags = append(diags, evalDiags...) + if diags.HasErrors() { + return diags + } + + // Initialize the Key Provider + decodeDiags := gohcl.DecodeBody(cfg.Body, evalCtx, keyProviderConfig) + diags = append(diags, decodeDiags...) + if diags.HasErrors() { + return diags + } + + // Build the Key Provider from the configuration + keyProvider, keyMetaIn, err := keyProviderConfig.Build() + if err != nil { + return append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unable to build encryption key data", + Detail: fmt.Sprintf("%s failed with error: %s", metakey, err.Error()), + }) + } + + // Add the metadata + if meta, ok := e.keyProviderMetadata[metakey]; ok { + err := json.Unmarshal(meta, keyMetaIn) + if err != nil { + return append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unable to decode encrypted metadata (did you change your encryption config?)", + Detail: fmt.Sprintf("metadata decoder for %s failed with error: %s", metakey, err.Error()), + }) + } + } + + output, keyMetaOut, err := keyProvider.Provide(keyMetaIn) + if err != nil { + return append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unable to fetch encryption key data", + Detail: fmt.Sprintf("%s failed with error: %s", metakey, err.Error()), + }) + } + + if keyMetaOut != nil { + e.keyProviderMetadata[metakey], err = json.Marshal(keyMetaOut) + + if err != nil { + return append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unable to encode encrypted metadata", + Detail: fmt.Sprintf("metadata encoder for %s failed with error: %s", metakey, err.Error()), + }) + } + } + + e.keyValues[cfg.Type][cfg.Name] = output.Cty() + + return nil + +} diff --git a/pkg/encryption/keyprovider/README.md b/pkg/encryption/keyprovider/README.md new file mode 100644 index 00000000000..002caa37944 --- /dev/null +++ b/pkg/encryption/keyprovider/README.md @@ -0,0 +1,56 @@ +# OpenTofu key providers + +> [!WARNING] +> This file is not an end-user documentation, it is intended for developers. Please follow the user documentation on the OpenTofu website unless you want to work on the encryption code. + +This folder contains the interface that key providers must implement in order to work with OpenTofu. Please read this document carefully if you intend to work on a key provider. + +## What are key providers? + +Key providers in OpenTofu are responsible for integrating various key-management systems or key derivation (passphrase) functions. They consist of 3 components: + +1. The **descriptor** provides a unique ID and a struct with HCL tags to read the configuration into. +2. The **configuration** is a struct OpenTofu parses the user-supplied configuration into. The `Build` function on this struct creates the key provider itself. +3. The **key provider** is responsible for creating one encryption key and one decryption key. It receives stored metadata and must return similar metadata in the result. + +## What is metadata? + +Some key providers need to store data alongside the encrypted data, such as the salt, the hashing function name, the key length, etc. The key provider can use this metadata to recreate the exact same key for decryption as it used for encryption. However, the key provider could also provide a different key for each encryption and decryption, allowing for a quick rotation of encryption parameters. + +> [!WARNING] +> The metadata is **bound to the key provider name**. In order words, if you change the key provider name, OpenTofu will be unable to decrypt old data. + +## Implementing a key provider + +When you implement a key provider, take a look at the [static](static) key provider as a template. You should never use this provider in production because it exposes users to certain weaknesses in some encryption methods, but it is a simple example for the structure. + +### Testing your provider (do this first!) + +Before you even go about writing a key provider, please set up the compliance tests. You can create a single test case that calls `compliancetest.ComplianceTest`. This test suite will run your key provider through all important compliance tests and will make sure that you are not missing anything during the implementation. + +### Implementing the descriptor + +The descriptor is very simple, you need to implement the [`Descriptor`](descriptor.go) interface in a type. (It does not have to be a struct.) However, make sure that the `ConfigStruct` always returns a struct with `hcl` tags on it. For more information on the `hcl` tags, see the [gohcl documentation](https://godocs.io/github.com/hashicorp/hcl/v2/gohcl). + +### The config struct + +Next, you need to create a config structure. This structure should hold all the fields you expect a user to fill out. **This must be a struct, and you must add `hcl` tags to each field you expect the user to fill out.** + +Additionally, you must implement the `Build` function described in the [`Config` interface](config.go). You can take a look at [static/config.go](static/config.go) for an example on implementing this. + +### The metadata + +The metadata can be anything as long as it's JSON-serializable, but we recommend using a struct for future extensibility. If you do not need metadata, simply use `nil`. + +Think about what data you will need to decrypt data. For example, the user may change the key length in a key derivation function, but you still need the old key length to decrypt. Hence, it needs to be part of the metadata. + +> [!WARNING] +> The metadata is stored **unencrypted** and **unauthenticated**. Do not use it to store sensitive details and treat it as untrusted as it may contain malicious data. + +### The key provider + +The heart of your key provider is... well, your key provider. It has two functions: to create a decryption key and to create an encryption key. If your key doesn't change, these two keys can be the same. However, if you generate new keys every time, you should provide the old key as the decryption key and the new key as the encryption key. If you need to pass along data to help with recreating the decryption key, you can use the metadata for that. + +### The output + +Your key provider must emit the [`keyprovider.Output`](output.go) struct with the keys. \ No newline at end of file diff --git a/pkg/encryption/keyprovider/addr.go b/pkg/encryption/keyprovider/addr.go new file mode 100644 index 00000000000..4dca01ea419 --- /dev/null +++ b/pkg/encryption/keyprovider/addr.go @@ -0,0 +1,68 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package keyprovider + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" +) + +// Addr is a type-alias for key provider address strings that identify a specific key provider configuration. +// The Addr is an opaque value. Do not perform string manipulation on it outside the functions supplied by the +// keyprovider package. +type Addr string + +// Validate validates the Addr for formal naming conformance, but does not check if the referenced key provider actually +// exists in the configuration. +func (a Addr) Validate() hcl.Diagnostics { + if !addrRe.MatchString(string(a)) { + return hcl.Diagnostics{ + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid key provider address", + Detail: fmt.Sprintf( + "The supplied key provider address does not match the required form of %s", + addrRe.String(), + ), + }, + } + } + return nil +} + +// NewAddr creates a new Addr type from the provider and name supplied. The Addr is a type-alias for key provider +// address strings that identify a specific key provider configuration. You should treat the value as opaque and not +// perform string manipulation on it outside the functions supplied by the keyprovider package. +func NewAddr(provider string, name string) (addr Addr, err hcl.Diagnostics) { + if !nameRe.MatchString(provider) { + err = err.Append( + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "The provided key provider type is invalid", + Detail: fmt.Sprintf( + "The supplied key provider type (%s) does not match the required form of %s.", + provider, + nameRe.String(), + ), + }, + ) + } + if !nameRe.MatchString(name) { + err = err.Append( + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "The provided key provider name is invalid", + Detail: fmt.Sprintf( + "The supplied key provider name (%s) does not match the required form of %s.", + name, + nameRe.String(), + ), + }, + ) + } + return Addr(fmt.Sprintf("key_provider.%s.%s", provider, name)), err +} diff --git a/pkg/encryption/keyprovider/aws_kms/README.md b/pkg/encryption/keyprovider/aws_kms/README.md new file mode 100644 index 00000000000..6c99948ae47 --- /dev/null +++ b/pkg/encryption/keyprovider/aws_kms/README.md @@ -0,0 +1,38 @@ +# AWS KMS Key Provider + +> [!WARNING] +> This file is not an end-user documentation, it is intended for developers. Please follow the user documentation on the OpenTofu website unless you want to work on the encryption code. + +This folder contains the code for the AWS KMS Key Provider. The user will be able to provide a reference to an AWS KMS key which can be used to encrypt and decrypt the data. + +## Configuration + +You can configure this key provider by specifying the following options: + +```hcl2 +terraform { + encryption { + key_provider "aws_kms" "myprovider" { + kms_key_id = "1234abcd-12ab-34cd-56ef-1234567890ab" + } + } +} +``` +## Key Provider Options - kms_key_id + +The kms_key_id can refer to one of the following: + +- Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab +- Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab +- Alias name: alias/ExampleAlias +- Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + +For more information see https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/kms#GenerateDataKeyInput + +## State Snapshotting and Key Usage + +### Overview + +OpenTofu generates a new encryption key for every time we store encrypted data, ensuring high security by minimizing key reuse. +This has some minor cost implications that should be communicated to the end users, There may be more keys generated than expected as OpenTofu uses a new key for each state snapshot. +It is important to generate a new key for each state snapshot to ensure that the state snapshot is encrypted with a unique key instead of reusing the same key for all state snapshots and thus reducing the security of the system. \ No newline at end of file diff --git a/pkg/encryption/keyprovider/aws_kms/compliance_test.go b/pkg/encryption/keyprovider/aws_kms/compliance_test.go new file mode 100644 index 00000000000..672f3452d38 --- /dev/null +++ b/pkg/encryption/keyprovider/aws_kms/compliance_test.go @@ -0,0 +1,142 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package aws_kms + +import ( + "fmt" + "testing" + + "github.com/kubegems/opentofu/pkg/encryption/keyprovider/compliancetest" +) + +func TestKeyProvider(t *testing.T) { + testKeyId := getKey(t) + + if testKeyId == "" { + testKeyId = "alias/my-mock-key" + injectDefaultMock() + + t.Setenv("AWS_REGION", "us-east-1") + t.Setenv("AWS_ACCESS_KEY_ID", "accesskey") + t.Setenv("AWS_SECRET_ACCESS_KEY", "secretkey") + } + + compliancetest.ComplianceTest( + t, + compliancetest.TestConfiguration[*descriptor, *Config, *keyMeta, *keyProvider]{ + Descriptor: New().(*descriptor), + HCLParseTestCases: map[string]compliancetest.HCLParseTestCase[*Config, *keyProvider]{ + "success": { + HCL: fmt.Sprintf(`key_provider "aws_kms" "foo" { + kms_key_id = "%s" + key_spec = "AES_256" + skip_credentials_validation = true // required for mocking + }`, testKeyId), + ValidHCL: true, + ValidBuild: true, + Validate: func(config *Config, keyProvider *keyProvider) error { + if config.KMSKeyID != testKeyId { + return fmt.Errorf("incorrect key ID returned") + } + return nil + }, + }, + "empty": { + HCL: `key_provider "aws_kms" "foo" {}`, + ValidHCL: false, + ValidBuild: false, + }, + "invalid-key-spec": { + HCL: fmt.Sprintf(`key_provider "aws_kms" "foo" { + kms_key_id = "%s" + key_spec = "BROKEN STUFF" + }`, testKeyId), + ValidHCL: true, + ValidBuild: false, + }, + "empty-key-id": { + HCL: `key_provider "aws_kms" "foo" { + kms_key_id = "" + key_spec = "AES_256" + }`, + ValidHCL: true, + ValidBuild: false, + }, + "empty-key-spec": { + HCL: `key_provider "aws_kms" "foo" { + kms_key_id = "alias/temp" + key_spec = "" + }`, + ValidHCL: true, + ValidBuild: false, + }, + "unknown-property": { + HCL: fmt.Sprintf(`key_provider "aws_kms" "foo" { + kms_key_id = "%s" + key_spec = "AES_256" + unknown_property = "foo" + }`, testKeyId), + ValidHCL: false, + ValidBuild: false, + }, + }, + ConfigStructTestCases: map[string]compliancetest.ConfigStructTestCase[*Config, *keyProvider]{ + "success": { + Config: &Config{ + KMSKeyID: testKeyId, + KeySpec: "AES_256", + + SkipCredsValidation: true, // Required for mocking + }, + ValidBuild: true, + Validate: nil, + }, + "empty": { + Config: &Config{ + KMSKeyID: "", + KeySpec: "", + }, + ValidBuild: false, + Validate: nil, + }, + }, + MetadataStructTestCases: map[string]compliancetest.MetadataStructTestCase[*Config, *keyMeta]{ + "empty": { + ValidConfig: &Config{ + KMSKeyID: testKeyId, + KeySpec: "AES_256", + + SkipCredsValidation: true, // Required for mocking + }, + Meta: &keyMeta{}, + IsPresent: false, + IsValid: false, + }, + }, + ProvideTestCase: compliancetest.ProvideTestCase[*Config, *keyMeta]{ + ValidConfig: &Config{ + KMSKeyID: testKeyId, + KeySpec: "AES_256", + SkipCredsValidation: true, // Required for mocking + }, + ValidateKeys: func(dec []byte, enc []byte) error { + if len(dec) == 0 { + return fmt.Errorf("decryption key is empty") + } + if len(enc) == 0 { + return fmt.Errorf("encryption key is empty") + } + return nil + }, + ValidateMetadata: func(meta *keyMeta) error { + if meta.CiphertextBlob == nil || len(meta.CiphertextBlob) == 0 { + return fmt.Errorf("ciphertext blob is nil") + } + return nil + }, + }, + }) +} diff --git a/pkg/encryption/keyprovider/aws_kms/config.go b/pkg/encryption/keyprovider/aws_kms/config.go new file mode 100644 index 00000000000..e079ffa01bb --- /dev/null +++ b/pkg/encryption/keyprovider/aws_kms/config.go @@ -0,0 +1,255 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package aws_kms + +import ( + "context" + "fmt" + "os" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" + "github.com/aws/aws-sdk-go-v2/service/kms" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + awsbase "github.com/hashicorp/aws-sdk-go-base/v2" + baselogging "github.com/hashicorp/aws-sdk-go-base/v2/logging" + "github.com/kubegems/opentofu/pkg/encryption/keyprovider" + "github.com/kubegems/opentofu/pkg/httpclient" + "github.com/kubegems/opentofu/pkg/logging" + "github.com/kubegems/opentofu/version" +) + +// Can be overridden for test mocking +var newKMSFromConfig func(aws.Config) kmsClient = func(cfg aws.Config) kmsClient { + return kms.NewFromConfig(cfg) +} + +type Config struct { + // KeyProvider Config + KMSKeyID string `hcl:"kms_key_id"` + KeySpec string `hcl:"key_spec"` + + // Mirrored S3 Backend Config, mirror any changes + AccessKey string `hcl:"access_key,optional"` + Endpoints []ConfigEndpoints `hcl:"endpoints,block"` + MaxRetries int `hcl:"max_retries,optional"` + Profile string `hcl:"profile,optional"` + Region string `hcl:"region,optional"` + SecretKey string `hcl:"secret_key,optional"` + SkipCredsValidation bool `hcl:"skip_credentials_validation,optional"` + SkipRequestingAccountId bool `hcl:"skip_requesting_account_id,optional"` + STSRegion string `hcl:"sts_region,optional"` + Token string `hcl:"token,optional"` + HTTPProxy *string `hcl:"http_proxy,optional"` + HTTPSProxy *string `hcl:"https_proxy,optional"` + NoProxy string `hcl:"no_proxy,optional"` + Insecure bool `hcl:"insecure,optional"` + UseDualStackEndpoint bool `hcl:"use_dualstack_endpoint,optional"` + UseFIPSEndpoint bool `hcl:"use_fips_endpoint,optional"` + CustomCABundle string `hcl:"custom_ca_bundle,optional"` + EC2MetadataServiceEndpoint string `hcl:"ec2_metadata_service_endpoint,optional"` + EC2MetadataServiceEndpointMode string `hcl:"ec2_metadata_service_endpoint_mode,optional"` + SkipMetadataAPICheck *bool `hcl:"skip_metadata_api_check,optional"` + SharedCredentialsFiles []string `hcl:"shared_credentials_files,optional"` + SharedConfigFiles []string `hcl:"shared_config_files,optional"` + AssumeRole *AssumeRole `hcl:"assume_role,optional"` + AssumeRoleWithWebIdentity *AssumeRoleWithWebIdentity `hcl:"assume_role_with_web_identity,optional"` + AllowedAccountIds []string `hcl:"allowed_account_ids,optional"` + ForbiddenAccountIds []string `hcl:"forbidden_account_ids,optional"` + RetryMode string `hcl:"retry_mode,optional"` +} + +func stringAttrEnvFallback(val string, env string) string { + if val != "" { + return val + } + return os.Getenv(env) +} + +func stringArrayAttrEnvFallback(val []string, env string) []string { + if len(val) != 0 { + return val + } + envVal := os.Getenv(env) + if envVal != "" { + return []string{envVal} + } + return nil +} + +func (c Config) asAWSBase() (*awsbase.Config, error) { + // Get endpoints to use + endpoints, err := c.getEndpoints() + if err != nil { + return nil, err + } + + // Get assume role + assumeRole, err := c.AssumeRole.asAWSBase() + if err != nil { + return nil, err + } + + // Get assume role with web identity + assumeRoleWithWebIdentity, err := c.AssumeRoleWithWebIdentity.asAWSBase() + if err != nil { + return nil, err + } + + // validate region + if c.Region == "" && os.Getenv("AWS_REGION") == "" && os.Getenv("AWS_DEFAULT_REGION") == "" { + return nil, fmt.Errorf(`the "region" attribute or the "AWS_REGION" or "AWS_DEFAULT_REGION" environment variables must be set.`) + } + + // Retry Mode + if c.MaxRetries == 0 { + c.MaxRetries = 5 + } + var retryMode aws.RetryMode + if len(c.RetryMode) != 0 { + retryMode, err = aws.ParseRetryMode(c.RetryMode) + if err != nil { + return nil, fmt.Errorf("%w: expected %q or %q", err, aws.RetryModeStandard, aws.RetryModeAdaptive) + } + } + + // IDMS handling + imdsEnabled := imds.ClientDefaultEnableState + if c.SkipMetadataAPICheck != nil { + if *c.SkipMetadataAPICheck { + imdsEnabled = imds.ClientEnabled + } else { + imdsEnabled = imds.ClientDisabled + } + } + + // validate account_ids + if len(c.AllowedAccountIds) != 0 && len(c.ForbiddenAccountIds) != 0 { + return nil, fmt.Errorf("conflicting config attributes: only allowed_account_ids or forbidden_account_ids can be specified, not both") + } + + return &awsbase.Config{ + AccessKey: c.AccessKey, + CallerDocumentationURL: "https://opentofu.org/docs/language/settings/backends/s3", // TODO + CallerName: "KMS Key Provider", + IamEndpoint: stringAttrEnvFallback(endpoints.IAM, "AWS_ENDPOINT_URL_IAM"), + MaxRetries: c.MaxRetries, + RetryMode: retryMode, + Profile: c.Profile, + Region: c.Region, + SecretKey: c.SecretKey, + SkipCredsValidation: c.SkipCredsValidation, + SkipRequestingAccountId: c.SkipRequestingAccountId, + StsEndpoint: stringAttrEnvFallback(endpoints.STS, "AWS_ENDPOINT_URL_STS"), + StsRegion: c.STSRegion, + Token: c.Token, + + // Note: we don't need to read env variables explicitly because they are read implicitly by aws-sdk-base-go: + // see: https://github.com/hashicorp/aws-sdk-go-base/blob/v2.0.0-beta.41/internal/config/config.go#L133 + // which relies on: https://cs.opensource.google/go/x/net/+/refs/tags/v0.18.0:http/httpproxy/proxy.go;l=89-96 + HTTPProxy: c.HTTPProxy, + HTTPSProxy: c.HTTPSProxy, + NoProxy: c.NoProxy, + Insecure: c.Insecure, + UseDualStackEndpoint: c.UseDualStackEndpoint, + UseFIPSEndpoint: c.UseFIPSEndpoint, + UserAgent: awsbase.UserAgentProducts{ + {Name: "APN", Version: "1.0"}, + {Name: httpclient.DefaultApplicationName, Version: version.String()}, + }, + CustomCABundle: stringAttrEnvFallback(c.CustomCABundle, "AWS_CA_BUNDLE"), + + EC2MetadataServiceEnableState: imdsEnabled, + EC2MetadataServiceEndpoint: stringAttrEnvFallback(c.EC2MetadataServiceEndpoint, "AWS_EC2_METADATA_SERVICE_ENDPOINT"), + EC2MetadataServiceEndpointMode: stringAttrEnvFallback(c.EC2MetadataServiceEndpointMode, "AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE"), + + SharedCredentialsFiles: stringArrayAttrEnvFallback(c.SharedCredentialsFiles, "AWS_SHARED_CREDENTIALS_FILE"), + SharedConfigFiles: stringArrayAttrEnvFallback(c.SharedConfigFiles, "AWS_SHARED_CONFIG_FILE"), + AssumeRole: assumeRole, + AssumeRoleWithWebIdentity: assumeRoleWithWebIdentity, + AllowedAccountIds: c.AllowedAccountIds, + ForbiddenAccountIds: c.ForbiddenAccountIds, + }, nil +} + +func (c Config) Build() (keyprovider.KeyProvider, keyprovider.KeyMeta, error) { + err := c.validate() + if err != nil { + return nil, nil, err + } + + cfg, err := c.asAWSBase() + if err != nil { + return nil, nil, err + } + + ctx := context.Background() + ctx, baselog := attachLoggerToContext(ctx) + cfg.Logger = baselog + + _, awsConfig, awsDiags := awsbase.GetAwsConfig(ctx, cfg) + + if awsDiags.HasError() { + out := "errors were encountered in aws kms configuration" + for _, diag := range awsDiags.Errors() { + out += "\n" + diag.Summary() + " : " + diag.Detail() + } + + return nil, nil, fmt.Errorf(out) + } + + return &keyProvider{ + Config: c, + svc: newKMSFromConfig(awsConfig), + ctx: ctx, + }, new(keyMeta), nil +} + +// validate checks the configuration for the key provider +func (c Config) validate() (err error) { + if c.KMSKeyID == "" { + return &keyprovider.ErrInvalidConfiguration{ + Message: "no kms_key_id provided", + } + } + + if c.KeySpec == "" { + return &keyprovider.ErrInvalidConfiguration{ + Message: "no key_spec provided", + } + } + + spec := c.getKeySpecAsAWSType() + if spec == nil { + // This is to fetch a list of the values from the enum, because `spec` here can be nil, so we have to grab + // at least one of the enum possibilities here just to call .Values() + values := types.DataKeySpecAes256.Values() + return &keyprovider.ErrInvalidConfiguration{ + Message: fmt.Sprintf("invalid key_spec %s, expected one of %v", c.KeySpec, values), + } + } + + return nil +} + +// getSpecAsAWSType handles conversion between the string from the config and the aws expected enum type +// it will return nil if it cannot find a match +func (c Config) getKeySpecAsAWSType() *types.DataKeySpec { + var spec types.DataKeySpec + for _, opt := range spec.Values() { + if string(opt) == c.KeySpec { + return &opt + } + } + return nil +} + +// Mirrored from s3 backend config +func attachLoggerToContext(ctx context.Context) (context.Context, baselogging.HcLogger) { + ctx, baseLog := baselogging.NewHcLogger(ctx, logging.HCLogger().Named("backend-s3")) + ctx = baselogging.RegisterLogger(ctx, baseLog) + return ctx, baseLog +} diff --git a/pkg/encryption/keyprovider/aws_kms/config_assumerole.go b/pkg/encryption/keyprovider/aws_kms/config_assumerole.go new file mode 100644 index 00000000000..8d006e7af2b --- /dev/null +++ b/pkg/encryption/keyprovider/aws_kms/config_assumerole.go @@ -0,0 +1,123 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package aws_kms + +import ( + "fmt" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws/arn" + awsbase "github.com/hashicorp/aws-sdk-go-base/v2" +) + +type AssumeRole struct { + RoleARN string `hcl:"role_arn"` + Duration string `hcl:"duration,optional"` + ExternalID string `hcl:"external_id,optional"` + Policy string `hcl:"policy,optional"` + PolicyARNs []string `hcl:"policy_arns,optional"` + SessionName string `hcl:"session_name,optional"` + Tags map[string]string `hcl:"tags,optional"` + TransitiveTagKeys []string `hcl:"transitive_tag_keys,optional"` +} + +type AssumeRoleWithWebIdentity struct { + RoleARN string `hcl:"role_arn,optional"` + Duration string `hcl:"duration,optional"` + Policy string `hcl:"policy,optional"` + PolicyARNs []string `hcl:"policy_arns,optional"` + SessionName string `hcl:"session_name,optional"` + WebIdentityToken string `hcl:"web_identity_token,optional"` + WebIdentityTokenFile string `hcl:"web_identity_token_file,optional"` +} + +func parseAssumeRoleDuration(val string) (dur time.Duration, err error) { + if len(val) == 0 { + return dur, nil + } + dur, err = time.ParseDuration(val) + if err != nil { + return dur, fmt.Errorf("invalid assume_role duration %q: %w", val, err) + } + + minDur := 15 * time.Minute + maxDur := 12 * time.Hour + if (minDur > 0 && dur < minDur) || (maxDur > 0 && dur > maxDur) { + return dur, fmt.Errorf("assume_role duration must be between %s and %s, had %s", minDur, maxDur, dur) + } + return dur, nil +} + +func validatePolicyARNs(arns []string) error { + for _, v := range arns { + arn, err := arn.Parse(v) + if err != nil { + return err + } + if !strings.HasPrefix(arn.Resource, "policy/") { + return fmt.Errorf("arn must be a valid IAM Policy ARN, got %q", v) + } + } + return nil +} + +func (r *AssumeRole) asAWSBase() (*awsbase.AssumeRole, error) { + if r == nil { + return nil, nil + } + + duration, err := parseAssumeRoleDuration(r.Duration) + if err != nil { + return nil, err + } + + err = validatePolicyARNs(r.PolicyARNs) + if err != nil { + return nil, err + } + + assumeRole := &awsbase.AssumeRole{ + RoleARN: r.RoleARN, + Duration: duration, + ExternalID: r.ExternalID, + Policy: r.Policy, + PolicyARNs: r.PolicyARNs, + SessionName: r.SessionName, + Tags: r.Tags, + TransitiveTagKeys: r.TransitiveTagKeys, + } + return assumeRole, nil +} +func (r *AssumeRoleWithWebIdentity) asAWSBase() (*awsbase.AssumeRoleWithWebIdentity, error) { + if r == nil { + return nil, nil + } + + if r.WebIdentityToken != "" && r.WebIdentityTokenFile != "" { + return nil, fmt.Errorf("conflicting config attributes: only web_identity_token or web_identity_token_file can be specified, not both") + } + + duration, err := parseAssumeRoleDuration(r.Duration) + if err != nil { + return nil, err + } + + err = validatePolicyARNs(r.PolicyARNs) + if err != nil { + return nil, err + } + + return &awsbase.AssumeRoleWithWebIdentity{ + RoleARN: stringAttrEnvFallback(r.RoleARN, "AWS_ROLE_ARN"), + Duration: duration, + Policy: r.Policy, + PolicyARNs: r.PolicyARNs, + SessionName: stringAttrEnvFallback(r.SessionName, "AWS_ROLE_SESSION_NAME"), + WebIdentityToken: stringAttrEnvFallback(r.WebIdentityToken, "AWS_WEB_IDENTITY_TOKEN"), + WebIdentityTokenFile: stringAttrEnvFallback(r.WebIdentityTokenFile, "AWS_WEB_IDENTITY_TOKEN_FILE"), + }, nil +} diff --git a/pkg/encryption/keyprovider/aws_kms/config_endpoints.go b/pkg/encryption/keyprovider/aws_kms/config_endpoints.go new file mode 100644 index 00000000000..243852eb740 --- /dev/null +++ b/pkg/encryption/keyprovider/aws_kms/config_endpoints.go @@ -0,0 +1,47 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package aws_kms + +import ( + "fmt" + "log" + "regexp" +) + +type ConfigEndpoints struct { + IAM string `hcl:"iam,optional"` + STS string `hcl:"sts,optional"` +} + +// Mirrored from s3 backend config +func includeProtoIfNessesary(endpoint string) string { + if matched, _ := regexp.MatchString("[a-z]*://.*", endpoint); !matched { + log.Printf("[DEBUG] Adding https:// prefix to endpoint '%s'", endpoint) + endpoint = fmt.Sprintf("https://%s", endpoint) + } + return endpoint +} + +func (c Config) getEndpoints() (ConfigEndpoints, error) { + endpoints := ConfigEndpoints{} + + // Make sure we have 0 or 1 endpoint blocks + if len(c.Endpoints) == 1 { + endpoints = c.Endpoints[0] + } + if len(c.Endpoints) > 1 { + return endpoints, fmt.Errorf("expected single aws_kms endpoints block, multiple provided") + } + + // Endpoint formatting + if len(endpoints.IAM) != 0 { + endpoints.IAM = includeProtoIfNessesary(endpoints.IAM) + } + if len(endpoints.STS) != 0 { + endpoints.STS = includeProtoIfNessesary(endpoints.STS) + } + return endpoints, nil +} diff --git a/pkg/encryption/keyprovider/aws_kms/config_test.go b/pkg/encryption/keyprovider/aws_kms/config_test.go new file mode 100644 index 00000000000..35430599a7a --- /dev/null +++ b/pkg/encryption/keyprovider/aws_kms/config_test.go @@ -0,0 +1,291 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package aws_kms + +import ( + "fmt" + "reflect" + "testing" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/davecgh/go-spew/spew" + awsbase "github.com/hashicorp/aws-sdk-go-base/v2" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/gohcl" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/kubegems/opentofu/pkg/httpclient" + "github.com/kubegems/opentofu/version" +) + +func TestConfig_asAWSBase(t *testing.T) { + testCases := []struct { + name string + input string + expected awsbase.Config + }{ + { + name: "minconfig", + input: ` + kms_key_id = "my-kms-key-id" + key_spec = "AES_256" + region = "magic-mountain"`, + expected: awsbase.Config{ + Region: "magic-mountain", + CallerDocumentationURL: "https://opentofu.org/docs/language/settings/backends/s3", + CallerName: "KMS Key Provider", + MaxRetries: 5, + UserAgent: awsbase.UserAgentProducts{ + {Name: "APN", Version: "1.0"}, + {Name: httpclient.DefaultApplicationName, Version: version.String()}, + }, + }, + }, + { + name: "maxconfig", + input: ` + kms_key_id = "my-kms-key-id" + key_spec = "AES_256" + + access_key = "my-access-key" + endpoints { + iam = "endpoint-iam" + sts = "endpoint-sts" + } + max_retries = 42 + profile = "my-profile" + region = "my-region" + secret_key = "my-secret-key" + skip_credentials_validation = true + skip_requesting_account_id = true + sts_region = "my-sts-region" + token = "my-token" + http_proxy = "my-http-proxy" + https_proxy = "my-https-proxy" + no_proxy = "my-noproxy" + insecure = true + use_dualstack_endpoint = true + use_fips_endpoint = true + custom_ca_bundle = "my-custom-ca-bundle" + ec2_metadata_service_endpoint = "my-emde" + ec2_metadata_service_endpoint_mode = "my-emde-mode" + skip_metadata_api_check = false + shared_credentials_files = ["my-scredf"] + shared_config_files = ["my-sconff"] + assume_role = { + role_arn = "ar_arn" + duration = "4h" + external_id = "ar_extid" + policy = "ar_policy" + policy_arns = ["arn:aws:iam::123456789012:policy/AR"] + session_name = "ar_session_name" + tags = { + foo = "bar" + } + transitive_tag_keys = ["ar_tags"] + } + assume_role_with_web_identity = { + role_arn = "wi_arn" + duration = "5h" + policy = "wi_policy" + policy_arns = ["arn:aws:iam::123456789012:policy/WI"] + session_name = "wi_session_name" + web_identity_token = "wi_token" + //web_identity_token_file = "wi_token_file" + } + allowed_account_ids = ["account"] + //forbidden_account_ids = ? + retry_mode = "adaptive" + `, + expected: awsbase.Config{ + CallerDocumentationURL: "https://opentofu.org/docs/language/settings/backends/s3", + CallerName: "KMS Key Provider", + UserAgent: awsbase.UserAgentProducts{ + {Name: "APN", Version: "1.0"}, + {Name: httpclient.DefaultApplicationName, Version: version.String()}, + }, + + AccessKey: "my-access-key", + IamEndpoint: "https://endpoint-iam", + MaxRetries: 42, + Profile: "my-profile", + Region: "my-region", + SecretKey: "my-secret-key", + SkipCredsValidation: true, + SkipRequestingAccountId: true, + StsEndpoint: "https://endpoint-sts", + StsRegion: "my-sts-region", + Token: "my-token", + HTTPProxy: aws.String("my-http-proxy"), + HTTPSProxy: aws.String("my-https-proxy"), + NoProxy: "my-noproxy", + Insecure: true, + UseDualStackEndpoint: true, + UseFIPSEndpoint: true, + CustomCABundle: "my-custom-ca-bundle", + EC2MetadataServiceEnableState: imds.ClientDisabled, + EC2MetadataServiceEndpoint: "my-emde", + EC2MetadataServiceEndpointMode: "my-emde-mode", + SharedCredentialsFiles: []string{"my-scredf"}, + SharedConfigFiles: []string{"my-sconff"}, + AssumeRole: &awsbase.AssumeRole{ + RoleARN: "ar_arn", + Duration: time.Hour * 4, + ExternalID: "ar_extid", + Policy: "ar_policy", + PolicyARNs: []string{ + "arn:aws:iam::123456789012:policy/AR", + }, + SessionName: "ar_session_name", + Tags: map[string]string{ + "foo": "bar", + }, + TransitiveTagKeys: []string{ + "ar_tags", + }, + }, + AssumeRoleWithWebIdentity: &awsbase.AssumeRoleWithWebIdentity{ + RoleARN: "wi_arn", + Duration: time.Hour * 5, + Policy: "wi_policy", + PolicyARNs: []string{ + "arn:aws:iam::123456789012:policy/WI", + }, + SessionName: "wi_session_name", + WebIdentityToken: "wi_token", + WebIdentityTokenFile: "", + }, + AllowedAccountIds: []string{"account"}, + RetryMode: aws.RetryModeAdaptive, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + input, diags := hclsyntax.ParseConfig([]byte(tc.input), "test", hcl.InitialPos) + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + config := new(Config) + + diags = gohcl.DecodeBody(input.Body, nil, config) + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + if config.KMSKeyID != "my-kms-key-id" { + t.Fatal("missing kms_key_id") + } + if config.KeySpec != "AES_256" { + t.Fatal("missing key_spec") + } + + actual, err := config.asAWSBase() + if err != nil { + t.Fatal(err.Error()) + } + if !reflect.DeepEqual(tc.expected, *actual) { + t.Fatalf("Expected %s, got %s", spew.Sdump(tc.expected), spew.Sdump(*actual)) + } + }) + } +} + +func TestValidate(t *testing.T) { + testCases := []struct { + name string + input Config + expected error + }{ + { + name: "valid", + input: Config{ + KMSKeyID: "my-kms-key-id", + KeySpec: "AES_256", + }, + expected: nil, + }, + { + name: "missing kms_key_id", + input: Config{ + KMSKeyID: "", + KeySpec: "AES_256", + }, + expected: fmt.Errorf("no kms_key_id provided"), + }, + { + name: "missing key_spec", + input: Config{ + KMSKeyID: "my-kms-key-id", + KeySpec: "", + }, + expected: fmt.Errorf("no key_spec provided"), + }, + { + name: "invalid key_spec", + input: Config{ + KMSKeyID: "my-kms-key-id", + KeySpec: "invalid??", + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := tc.input.validate() + // check if the error message is the same + if tc.expected != nil { + if err.Error() != tc.expected.Error() { + t.Fatalf("Expected %q, got %q", tc.expected.Error(), err.Error()) + } + } + }) + } +} + +func TestGetKeySpecAsAWSType(t *testing.T) { + + aes256 := types.DataKeySpecAes256 + aes128 := types.DataKeySpecAes128 + + cases := []struct { + key string + expected *types.DataKeySpec + }{ + { + key: "AES_256", + expected: &aes256, + }, + { + key: "AES_128", + expected: &aes128, + }, + { + key: "", + expected: nil, + }, + { + key: "invalidKey", + expected: nil, + }, + } + + for _, c := range cases { + t.Run(c.key, func(t *testing.T) { + config := Config{ + KeySpec: c.key, + } + actual := config.getKeySpecAsAWSType() + if !reflect.DeepEqual(c.expected, actual) { + t.Fatalf("Expected %s, got %s", spew.Sdump(c.expected), spew.Sdump(actual)) + } + }) + } +} diff --git a/pkg/encryption/keyprovider/aws_kms/descriptor.go b/pkg/encryption/keyprovider/aws_kms/descriptor.go new file mode 100644 index 00000000000..6b908b65c26 --- /dev/null +++ b/pkg/encryption/keyprovider/aws_kms/descriptor.go @@ -0,0 +1,25 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package aws_kms + +import ( + "github.com/kubegems/opentofu/pkg/encryption/keyprovider" +) + +func New() keyprovider.Descriptor { + return &descriptor{} +} + +type descriptor struct { +} + +func (f descriptor) ID() keyprovider.ID { + return "aws_kms" +} + +func (f descriptor) ConfigStruct() keyprovider.Config { + return &Config{} +} diff --git a/pkg/encryption/keyprovider/aws_kms/mock_test.go b/pkg/encryption/keyprovider/aws_kms/mock_test.go new file mode 100644 index 00000000000..681fa13e0de --- /dev/null +++ b/pkg/encryption/keyprovider/aws_kms/mock_test.go @@ -0,0 +1,50 @@ +package aws_kms + +import ( + "context" + "crypto/rand" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/kms" +) + +type mockKMS struct { + genkey func(params *kms.GenerateDataKeyInput) (*kms.GenerateDataKeyOutput, error) + decrypt func(params *kms.DecryptInput) (*kms.DecryptOutput, error) +} + +func (m *mockKMS) GenerateDataKey(ctx context.Context, params *kms.GenerateDataKeyInput, optFns ...func(*kms.Options)) (*kms.GenerateDataKeyOutput, error) { + return m.genkey(params) +} +func (m *mockKMS) Decrypt(ctx context.Context, params *kms.DecryptInput, optFns ...func(*kms.Options)) (*kms.DecryptOutput, error) { + return m.decrypt(params) +} + +func injectMock(m *mockKMS) { + newKMSFromConfig = func(cfg aws.Config) kmsClient { + return m + } +} + +func injectDefaultMock() { + injectMock(&mockKMS{ + genkey: func(params *kms.GenerateDataKeyInput) (*kms.GenerateDataKeyOutput, error) { + keyData := make([]byte, 32) + _, err := rand.Read(keyData) + if err != nil { + panic(err) + } + + return &kms.GenerateDataKeyOutput{ + CiphertextBlob: append([]byte(*params.KeyId), keyData...), + Plaintext: keyData, + }, nil + + }, + decrypt: func(params *kms.DecryptInput) (*kms.DecryptOutput, error) { + return &kms.DecryptOutput{ + Plaintext: params.CiphertextBlob[:len(*params.KeyId)], + }, nil + }, + }) +} diff --git a/pkg/encryption/keyprovider/aws_kms/provider.go b/pkg/encryption/keyprovider/aws_kms/provider.go new file mode 100644 index 00000000000..5bb1c6e69d9 --- /dev/null +++ b/pkg/encryption/keyprovider/aws_kms/provider.go @@ -0,0 +1,86 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package aws_kms + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/kms" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/kubegems/opentofu/pkg/encryption/keyprovider" +) + +type keyMeta struct { + CiphertextBlob []byte `json:"ciphertext_blob"` +} + +func (m keyMeta) isPresent() bool { + return len(m.CiphertextBlob) != 0 +} + +type kmsClient interface { + GenerateDataKey(ctx context.Context, params *kms.GenerateDataKeyInput, optFns ...func(*kms.Options)) (*kms.GenerateDataKeyOutput, error) + Decrypt(ctx context.Context, params *kms.DecryptInput, optFns ...func(*kms.Options)) (*kms.DecryptOutput, error) +} + +type keyProvider struct { + Config + svc kmsClient + ctx context.Context +} + +func (p keyProvider) Provide(rawMeta keyprovider.KeyMeta) (keyprovider.Output, keyprovider.KeyMeta, error) { + if rawMeta == nil { + return keyprovider.Output{}, nil, &keyprovider.ErrInvalidMetadata{Message: "bug: no metadata struct provided"} + } + inMeta, ok := rawMeta.(*keyMeta) + if !ok { + return keyprovider.Output{}, nil, &keyprovider.ErrInvalidMetadata{Message: "bug: metadata struct is not of the correct type"} + } + + outMeta := &keyMeta{} + out := keyprovider.Output{} + + // as validation has happened in the config, we can safely cast here and not worry about the cast failing + spec := types.DataKeySpec(p.KeySpec) + + generatedKeyData, err := p.svc.GenerateDataKey(p.ctx, &kms.GenerateDataKeyInput{ + KeyId: aws.String(p.KMSKeyID), + KeySpec: spec, + }) + + if err != nil { + return out, outMeta, &keyprovider.ErrKeyProviderFailure{ + Message: "failed to generate key", + Cause: err, + } + } + + // Set initial outputs that are always set + out.EncryptionKey = generatedKeyData.Plaintext + outMeta.CiphertextBlob = generatedKeyData.CiphertextBlob + + // We do not set the DecryptionKey here as we should only be setting the decryption key if we are decrypting + // and that is handled below when we check if the inMeta has a CiphertextBlob + + if inMeta.isPresent() { + // We have an existing decryption key to decrypt, so we should now populate the DecryptionKey + decryptedKeyData, decryptErr := p.svc.Decrypt(p.ctx, &kms.DecryptInput{ + KeyId: aws.String(p.KMSKeyID), + CiphertextBlob: inMeta.CiphertextBlob, + }) + + if decryptErr != nil { + return out, outMeta, &keyprovider.ErrKeyProviderFailure{Cause: decryptErr} + } + + // Set decryption key on the output + out.DecryptionKey = decryptedKeyData.Plaintext + } + + return out, outMeta, nil +} diff --git a/pkg/encryption/keyprovider/aws_kms/provider_test.go b/pkg/encryption/keyprovider/aws_kms/provider_test.go new file mode 100644 index 00000000000..e3b42f7b6d4 --- /dev/null +++ b/pkg/encryption/keyprovider/aws_kms/provider_test.go @@ -0,0 +1,82 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package aws_kms + +import ( + "os" + "testing" +) + +func getKey(t *testing.T) string { + if os.Getenv("TF_ACC") == "" && os.Getenv("TF_KMS_TEST") == "" { + return "" + } + return os.Getenv("TF_AWS_KMS_KEY_ID") +} + +func TestKMSProvider_Simple(t *testing.T) { + testKeyId := getKey(t) + if testKeyId == "" { + testKeyId = "alias/my-mock-key" + injectDefaultMock() + + t.Setenv("AWS_REGION", "us-east-1") + t.Setenv("AWS_ACCESS_KEY_ID", "accesskey") + t.Setenv("AWS_SECRET_ACCESS_KEY", "secretkey") + } + + // Constructs a aws kms key provider config that accepts the key id + providerConfig := Config{ + KMSKeyID: testKeyId, + KeySpec: "AES_256", + + SkipCredsValidation: true, // Required for mocking + } + + // Now that we have the config, we can build the provider + provider, metaIn, err := providerConfig.Build() + if err != nil { + t.Fatalf("Error building provider: %s", err) + } + + // Now we can test the provider + output, meta, err := provider.Provide(metaIn) + if err != nil { + t.Fatalf("Error providing keys: %s", err) + } + + if len(output.EncryptionKey) == 0 { + t.Fatalf("No encryption key provided") + } + + if len(output.DecryptionKey) != 0 { + t.Fatalf("Decryption key provided and should not be") + } + + if len(meta.(*keyMeta).CiphertextBlob) == 0 { + t.Fatalf("No ciphertext blob provided") + } + + t.Log("Continue to meta -> decryption key") + + // Now that we have a encyption key and it's meta, let's get the decryption key + output, meta, err = provider.Provide(meta) + if err != nil { + t.Fatalf("Error providing keys: %s", err) + } + + if len(output.EncryptionKey) == 0 { + t.Fatalf("No encryption key provided") + } + + if len(output.DecryptionKey) == 0 { + t.Fatalf("No decryption key provided") + } + + if len(meta.(*keyMeta).CiphertextBlob) == 0 { + t.Fatalf("No ciphertext blob provided") + } +} diff --git a/pkg/encryption/keyprovider/compliancetest/compliance.go b/pkg/encryption/keyprovider/compliancetest/compliance.go new file mode 100644 index 00000000000..74c9c90386b --- /dev/null +++ b/pkg/encryption/keyprovider/compliancetest/compliance.go @@ -0,0 +1,501 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package compliancetest + +import ( + "bytes" + "encoding/json" + "errors" + "reflect" + "testing" + + "github.com/hashicorp/hcl/v2/gohcl" + "github.com/kubegems/opentofu/pkg/encryption/compliancetest" + "github.com/kubegems/opentofu/pkg/encryption/config" + "github.com/kubegems/opentofu/pkg/encryption/keyprovider" +) + +func ComplianceTest[TDescriptor keyprovider.Descriptor, TConfig keyprovider.Config, TMeta keyprovider.KeyMeta, TKeyProvider keyprovider.KeyProvider]( + t *testing.T, + config TestConfiguration[TDescriptor, TConfig, TMeta, TKeyProvider], +) { + var cfg TConfig + cfgType := reflect.TypeOf(cfg) + if cfgType.Kind() != reflect.Ptr || cfgType.Elem().Kind() != reflect.Struct { + compliancetest.Fail(t, "You declared the config type to be %T, but it should be a pointer to a struct. Please fix your call to ComplianceTest().", cfg) + } + + var meta TMeta + metaType := reflect.TypeOf(cfg) + if metaType.Kind() != reflect.Interface { + if metaType.Kind() != reflect.Ptr || metaType.Elem().Kind() != reflect.Struct { + compliancetest.Log(t, "You declared a metadata type as %T, but it should be a pointer to a struct. Please fix your call to ComplianceTest().", meta) + } + } else { + compliancetest.Log(t, "Metadata type declared as interface{}, assuming the key provider does not need metadata. (This will be validated later.)") + } + + t.Run("ID", func(t *testing.T) { + complianceTestID(t, config) + }) + + t.Run("ConfigStruct", func(t *testing.T) { + compliancetest.ConfigStruct[TConfig](t, config.Descriptor.ConfigStruct()) + + t.Run("hcl-parsing", func(t *testing.T) { + if config.HCLParseTestCases == nil { + compliancetest.Fail(t, "Please provide a map in HCLParseTestCases.") + } + for name, tc := range config.HCLParseTestCases { + tc := tc + t.Run(name, func(t *testing.T) { + complianceTestHCLParsingTestCase(t, tc, config) + }) + } + }) + + t.Run("config", func(t *testing.T) { + if config.ConfigStructTestCases == nil { + compliancetest.Fail(t, "Please provide a map in ConfigStructTestCases.") + } + for name, tc := range config.ConfigStructTestCases { + tc := tc + t.Run(name, func(t *testing.T) { + complianceTestConfigCase[TConfig, TKeyProvider, TMeta](t, tc) + }) + } + }) + }) + + t.Run("metadata", func(t *testing.T) { + if config.MetadataStructTestCases == nil { + compliancetest.Fail(t, "Please provide a map in MetadataStructTestCases.") + } + for name, tc := range config.MetadataStructTestCases { + tc := tc + t.Run(name, func(t *testing.T) { + complianceTestMetadataTestCase[TConfig, TKeyProvider, TMeta](t, tc) + }) + } + }) + + t.Run("provide", func(t *testing.T) { + complianceTestProvide[TDescriptor, TConfig, TKeyProvider, TMeta](t, config) + }) + + t.Run("test-completeness", func(t *testing.T) { + t.Run("HCL", func(t *testing.T) { + hasNotValidHCL := false + hasValidHCLNotValidBuild := false + hasValidHCLAndBuild := false + for _, tc := range config.HCLParseTestCases { + if !tc.ValidHCL { + hasNotValidHCL = true + } else { + if tc.ValidBuild { + hasValidHCLAndBuild = true + } else { + hasValidHCLNotValidBuild = true + } + } + } + if !hasNotValidHCL { + compliancetest.Fail(t, "Please define at least one test with an invalid HCL.") + } + if !hasValidHCLNotValidBuild { + compliancetest.Fail(t, "Please define at least one test with a valid HCL that will fail on Build() for validation.") + } + if !hasValidHCLAndBuild { + compliancetest.Fail(t, "Please define at least one test with a valid HCL that will succeed on Build() for validation.") + } + }) + t.Run("metadata", func(t *testing.T) { + hasNotPresent := false + hasNotValid := false + hasValid := false + for _, tc := range config.MetadataStructTestCases { + if !tc.IsPresent { + hasNotPresent = true + } else { + if tc.IsValid { + hasValid = true + } else { + hasNotValid = true + } + } + } + if !hasNotPresent { + compliancetest.Fail(t, "Please provide at least one metadata test that represents non-present metadata.") + } + if !hasNotValid { + compliancetest.Log(t, "Warning: Please provide at least one metadata test that represents an invalid metadata that is present.") + } + if !hasValid { + compliancetest.Log(t, "Warning: Please provide at least one metadata test that represents a valid metadata.") + } + }) + }) +} + +func complianceTestProvide[TDescriptor keyprovider.Descriptor, TConfig keyprovider.Config, TKeyProvider keyprovider.KeyProvider, TMeta keyprovider.KeyMeta]( + t *testing.T, + cfg TestConfiguration[TDescriptor, TConfig, TMeta, TKeyProvider], +) { + if reflect.ValueOf(cfg.ProvideTestCase.ValidConfig).IsNil() { + compliancetest.Fail(t, "Please provide a ValidConfig in ProvideTestCase.") + } + keyProviderConfig := cfg.ProvideTestCase.ValidConfig + t.Run("nil-metadata", func(t *testing.T) { + keyProvider, inMeta := complianceTestBuildConfigAndValidate[TKeyProvider, TMeta](t, keyProviderConfig, true) + + if reflect.ValueOf(inMeta).IsNil() { + compliancetest.Skip(t, "The key provider does not have metadata (no metadata returned from Build()).") + return + } + _, _, err := keyProvider.Provide(nil) + if err == nil { + compliancetest.Fail(t, "Provide() did not return no error when provided with nil metadata.") + } else { + compliancetest.Log(t, "Provide() correctly returned an error when provided with nil metadata (%v).", err) + } + var typedError *keyprovider.ErrInvalidMetadata + if !errors.As(err, &typedError) { + compliancetest.Fail(t, "Provide() returned an error of the type %T instead of %T. Please use the correct typed errors.", err, typedError) + } else { + compliancetest.Log(t, "Provide() correctly returned a %T when provided with nil metadata.", typedError) + } + }) + t.Run("incorrect-metadata-type", func(t *testing.T) { + keyProvider, inMeta := complianceTestBuildConfigAndValidate[TKeyProvider, TMeta](t, keyProviderConfig, true) + if reflect.ValueOf(inMeta).IsNil() { + compliancetest.Skip(t, "The key provider does not have metadata (no metadata returned from Build()).") + return + } + _, _, err := keyProvider.Provide(&struct{}{}) + if err == nil { + compliancetest.Fail(t, "Provide() did not return no error when provided with an incorrect metadata type.") + } else { + compliancetest.Log(t, "Provide() correctly returned an error when provided with an metadata type (%v).", err) + } + var typedError *keyprovider.ErrInvalidMetadata + if !errors.As(err, &typedError) { + compliancetest.Fail(t, "Provide() returned an error of the type %T instead of %T. Please use the correct typed errors.", err, typedError) + } else { + compliancetest.Log(t, "Provide() correctly returned a %T when provided with an incorrect metadata type.", typedError) + } + }) + t.Run("round-trip", func(t *testing.T) { + complianceTestRoundTrip(t, keyProviderConfig, cfg) + }) +} + +func complianceTestRoundTrip[TDescriptor keyprovider.Descriptor, TConfig keyprovider.Config, TKeyProvider keyprovider.KeyProvider, TMeta keyprovider.KeyMeta]( + t *testing.T, + keyProviderConfig TConfig, + cfg TestConfiguration[TDescriptor, TConfig, TMeta, TKeyProvider], +) { + keyProvider, inMeta := complianceTestBuildConfigAndValidate[TKeyProvider, TMeta](t, keyProviderConfig, true) + output, outMeta, err := keyProvider.Provide(inMeta) + if err != nil { + compliancetest.Fail(t, "Provide() failed (%v).", err) + } else { + compliancetest.Log(t, "Provide() succeeded.") + } + if cfg.ProvideTestCase.ValidateMetadata != nil { + if err := cfg.ProvideTestCase.ValidateMetadata(outMeta.(TMeta)); err != nil { + compliancetest.Fail(t, "The metadata after the second Provide() call failed the test (%v).", err) + } + } + + // Create a second key provider to avoid internal state. + keyProvider2, inMeta2 := complianceTestBuildConfigAndValidate[TKeyProvider, TMeta](t, keyProviderConfig, true) + + marshalledMeta, err := json.Marshal(outMeta) + if err != nil { + compliancetest.Fail(t, "JSON-marshalling output meta failed (%v).", err) + } else { + compliancetest.Log(t, "JSON-marshalling output meta succeeded: %s", marshalledMeta) + } + + if err := json.Unmarshal(marshalledMeta, &inMeta2); err != nil { + compliancetest.Fail(t, "JSON-unmarshalling meta failed (%v).", err) + } else { + compliancetest.Log(t, "JSON-unmarshalling meta succeeded.") + } + + output2, outMeta2, err := keyProvider2.Provide(inMeta2) + if err != nil { + compliancetest.Fail(t, "Provide() on the subsequent run failed (%v).", err) + } else { + compliancetest.Log(t, "Provide() on the subsequent run succeeded.") + } + + if cfg.ProvideTestCase.ExpectedOutput != nil { + if !bytes.Equal(cfg.ProvideTestCase.ExpectedOutput.EncryptionKey, output.EncryptionKey) { + compliancetest.Fail(t, "Incorrect encryption key received after the first Provide() call. Please set a break point to the line of this error message to debug this error.") + } + if !bytes.Equal(cfg.ProvideTestCase.ExpectedOutput.DecryptionKey, output2.DecryptionKey) { + compliancetest.Fail(t, "Incorrect decryption key received after the second Provide() call. Please set a break point to the line of this error message to debug this error.") + } + if !bytes.Equal(cfg.ProvideTestCase.ExpectedOutput.EncryptionKey, output2.EncryptionKey) { + compliancetest.Fail(t, "Incorrect encryption key received after the second Provide() call. Please set a break point to the line of this error message to debug this error.") + } + } + if cfg.ProvideTestCase.ValidateMetadata != nil { + if err := cfg.ProvideTestCase.ValidateMetadata(outMeta2.(TMeta)); err != nil { + compliancetest.Fail(t, "The metadata after the second Provide() call failed the test (%v).", err) + } + } + if cfg.ProvideTestCase.ValidateKeys == nil { + if !bytes.Equal(output2.DecryptionKey, output.EncryptionKey) { + compliancetest.Fail( + t, + "The encryption key from the first call to Provide() does not match the decryption key provided by the second Provide() call. If you intend the two keys to be different, please provide an ProvideTestCase.ValidateKeys function. If this is not intended, please set a break point to the line of this error message.", + ) + } else { + compliancetest.Log( + t, + "The encryption and decryption keys match.", + ) + } + } else { + if err := cfg.ProvideTestCase.ValidateKeys(output2.DecryptionKey, output.EncryptionKey); err != nil { + compliancetest.Fail( + t, + "The encryption key from the first call to Provide() does not match the decryption key provided by the second Provide() call (%v),", + err, + ) + } else { + compliancetest.Log( + t, + "The encryption and decryption keys match.", + ) + } + } +} + +func complianceTestID[TDescriptor keyprovider.Descriptor, TConfig keyprovider.Config, TMeta keyprovider.KeyMeta, TKeyProvider keyprovider.KeyProvider]( + t *testing.T, + config TestConfiguration[TDescriptor, TConfig, TMeta, TKeyProvider], +) { + id := config.Descriptor.ID() + if id == "" { + compliancetest.Fail(t, "ID is empty.") + } else { + compliancetest.Log(t, "ID is not empty.") + } + if err := id.Validate(); err != nil { + compliancetest.Fail(t, "ID failed validation: %s", id) + } else { + compliancetest.Log(t, "ID passed validation.") + } +} + +func complianceTestHCLParsingTestCase[TDescriptor keyprovider.Descriptor, TConfig keyprovider.Config, TMeta keyprovider.KeyMeta, TKeyProvider keyprovider.KeyProvider]( + t *testing.T, + tc HCLParseTestCase[TConfig, TKeyProvider], + cfg TestConfiguration[TDescriptor, TConfig, TMeta, TKeyProvider], +) { + parseError := false + parsedConfig, diags := config.LoadConfigFromString("config.hcl", tc.HCL) + if tc.ValidHCL { + if diags.HasErrors() { + compliancetest.Fail(t, "Unexpected HCL error (%v).", diags) + } else { + compliancetest.Log(t, "HCL successfully parsed.") + } + } else { + if diags.HasErrors() { + parseError = true + } + } + + configStruct := cfg.Descriptor.ConfigStruct() + diags = gohcl.DecodeBody( + parsedConfig.KeyProviderConfigs[0].Body, + nil, + configStruct, + ) + var keyProvider TKeyProvider + if tc.ValidHCL { + if diags.HasErrors() { + compliancetest.Fail(t, "Failed to parse empty HCL block into config struct (%v).", diags) + } else { + compliancetest.Log(t, "HCL successfully loaded into config struct.") + } + + keyProvider, _ = complianceTestBuildConfigAndValidate[TKeyProvider, TMeta](t, configStruct, tc.ValidBuild) + } else { + if !parseError && !diags.HasErrors() { + compliancetest.Fail(t, "Expected error during HCL parsing, but no error was returned.") + } else { + compliancetest.Log(t, "HCL loading errored correctly (%v).", diags) + } + } + + if tc.Validate != nil { + if err := tc.Validate(configStruct.(TConfig), keyProvider); err != nil { + compliancetest.Fail(t, "Error during validation and configuration (%v).", err) + } else { + compliancetest.Log(t, "Successfully validated parsed HCL config and applied modifications.") + } + } else { + compliancetest.Log(t, "No ValidateAndConfigure provided, skipping HCL parse validation.") + } +} + +func complianceTestConfigCase[TConfig keyprovider.Config, TKeyProvider keyprovider.KeyProvider, TMeta keyprovider.KeyMeta]( + t *testing.T, + tc ConfigStructTestCase[TConfig, TKeyProvider], +) { + keyProvider, _ := complianceTestBuildConfigAndValidate[TKeyProvider, TMeta](t, tc.Config, tc.ValidBuild) + if tc.Validate != nil { + if err := tc.Validate(keyProvider); err != nil { + compliancetest.Fail(t, "Error during validation and configuration (%v).", err) + } else { + compliancetest.Log(t, "Successfully validated parsed HCL config and applied modifications.") + } + } else { + compliancetest.Log(t, "No ValidateAndConfigure provided, skipping HCL parse validation.") + } +} + +func complianceTestBuildConfigAndValidate[TKeyProvider keyprovider.KeyProvider, TMeta keyprovider.KeyMeta]( + t *testing.T, + configStruct keyprovider.Config, + validBuild bool, +) (TKeyProvider, TMeta) { + if configStruct == nil { + compliancetest.Fail(t, "Nil struct passed!") + } + + var typedKeyProvider TKeyProvider + var typedMeta TMeta + var ok bool + kp, meta, err := configStruct.Build() + if validBuild { + if err != nil { + compliancetest.Fail(t, "Build() returned an unexpected error: %v.", err) + } else { + compliancetest.Log(t, "Build() did not return an error.") + } + typedKeyProvider, ok = kp.(TKeyProvider) + if !ok { + compliancetest.Fail(t, "Build() returned an invalid key provider type of %T, expected %T", kp, typedKeyProvider) + } else { + compliancetest.Log(t, "Build() returned the correct key provider type of %T.", typedKeyProvider) + } + + metaType := reflect.TypeOf(typedMeta) + if meta == nil { + if metaType.Kind() != reflect.Interface { + compliancetest.Fail(t, "Build() did not return a metadata, but you declared a metadata type. Please make sure that you always return the same metadata type.") + } else { + compliancetest.Log(t, "Build() did not return a metadata and the declared metadata type is interface{}.") + } + } else { + if metaType.Kind() == reflect.Interface { + compliancetest.Fail(t, "Build() returned metadata, but you declared an interface type as the metadata type. Please always declare a pointer to a struct as a metadata type.") + } else { + compliancetest.Log(t, "Build() returned metadata and the declared metadata type is not an interface.") + } + typedMeta, ok = meta.(TMeta) + if !ok { + compliancetest.Fail(t, "Build() returned an invalid metadata type of %T, expected %T", meta, typedMeta) + } else { + compliancetest.Log(t, "Build() returned the correct metadata type of %T.", meta) + } + } + } else { + if err == nil { + compliancetest.Fail(t, "Build() did not return an error.") + } else { + compliancetest.Log(t, "Build() correctly returned an error: %v", err) + } + + var typedError *keyprovider.ErrInvalidConfiguration + if !errors.As(err, &typedError) { + compliancetest.Fail( + t, + "Build() did not return the correct error type, got %T but expected %T", + err, + typedError, + ) + } else { + compliancetest.Log(t, "Build() returned the correct error type of %T", typedError) + } + } + return typedKeyProvider, typedMeta +} + +func complianceTestMetadataTestCase[TConfig keyprovider.Config, TKeyProvider keyprovider.KeyProvider, TMeta keyprovider.KeyMeta]( + t *testing.T, + tc MetadataStructTestCase[TConfig, TMeta], +) { + keyProvider, _ := complianceTestBuildConfigAndValidate[TKeyProvider, TMeta](t, tc.ValidConfig, true) + + output, _, err := keyProvider.Provide(tc.Meta) + if tc.IsPresent { + // This test case means that the input metadata should be considered present, so it's either an error or a + // decryption key. + if tc.IsValid { + if err != nil { + var typedError *keyprovider.ErrKeyProviderFailure + if !errors.As(err, &typedError) { + compliancetest.Fail( + t, + "The Provide() function returned an unexpected error, which was also of the incorrect type of %T instead of %T: %v", + err, + typedError, + err, + ) + } + compliancetest.Fail(t, "The Provide() function returned an unexpected error: %v", err) + } + } else { + if err == nil { + compliancetest.Fail(t, "The Provide() function did not return an error as expected.") + } else { + compliancetest.Log(t, "The Provide() function returned an expected error: %v", err) + } + + var typedError *keyprovider.ErrInvalidMetadata + if !errors.As(err, &typedError) { + compliancetest.Fail( + t, + "The Provide() function returned the error type of %T instead of %T. Please use the correct typed errors.", + err, + typedError, + ) + } + } + } else { + if err != nil { + var typedError *keyprovider.ErrKeyProviderFailure + if !errors.As(err, &typedError) { + compliancetest.Fail( + t, + "The Provide() function returned an unexpected error, which was also of the incorrect type of %T instead of %T: %v", + err, + typedError, + err, + ) + } + compliancetest.Fail(t, "The Provide() function returned an unexpected error: %v", err) + } + if len(output.DecryptionKey) != 0 { + compliancetest.Fail( + t, + "The Provide() function a decryption key despite not receiving input meta. This is incorrect, please don't return a decryption key unless you receive the input metadata.", + ) + } else { + compliancetest.Log( + t, + "The Provide() function correctly did not return a decryption key without input metadata.", + ) + } + } +} diff --git a/pkg/encryption/keyprovider/compliancetest/configuration.go b/pkg/encryption/keyprovider/compliancetest/configuration.go new file mode 100644 index 00000000000..5ef5e941c5d --- /dev/null +++ b/pkg/encryption/keyprovider/compliancetest/configuration.go @@ -0,0 +1,78 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package compliancetest + +import ( + "github.com/kubegems/opentofu/pkg/encryption/keyprovider" +) + +type TestConfiguration[TDescriptor keyprovider.Descriptor, TConfig keyprovider.Config, TMeta any, TKeyProvider keyprovider.KeyProvider] struct { + // Descriptor is the descriptor for the key provider. + Descriptor TDescriptor + + // HCLParseTestCases contains the test cases of parsing HCL configuration and then validating it using the Build() + // function. + HCLParseTestCases map[string]HCLParseTestCase[TConfig, TKeyProvider] + + // ConfigStructT validates that a certain config results or does not result in a valid Build() call. + ConfigStructTestCases map[string]ConfigStructTestCase[TConfig, TKeyProvider] + + // MetadataStructTestCases test various metadata values for correct handling. + MetadataStructTestCases map[string]MetadataStructTestCase[TConfig, TMeta] + + // ProvideTestCase exercises the entire chain and generates two keys. + ProvideTestCase ProvideTestCase[TConfig, TMeta] +} + +// HCLParseTestCase contains a test case that parses HCL into a configuration. +type HCLParseTestCase[TConfig keyprovider.Config, TKeyProvider keyprovider.KeyProvider] struct { + // HCL contains the code that should be parsed into the configuration structure. + HCL string + // ValidHCL indicates that the HCL block should be parsable into the configuration structure, but not necessarily + // result in a valid Build() call. + ValidHCL bool + // ValidBuild indicates that calling the Build() function should not result in an error. + ValidBuild bool + // Validate is an extra optional validation function that can check if the configuration contains the correct + // values parsed from HCL. If ValidBuild is true, the key provider will be passed as well. + Validate func(config TConfig, keyProvider TKeyProvider) error +} + +// ConfigStructTestCase validates that the config struct is behaving correctly when Build() is called. +type ConfigStructTestCase[TConfig keyprovider.Config, TKeyProvider keyprovider.KeyProvider] struct { + Config TConfig + ValidBuild bool + Validate func(keyProvider TKeyProvider) error +} + +// MetadataStructTestCase is a test case for metadata. +type MetadataStructTestCase[TConfig keyprovider.Config, TMeta any] struct { + // Config contains a valid configuration that should be used to construct the key provider. + ValidConfig TConfig + // Meta contains the metadata for this test case. + Meta TMeta + // IsPresent indicates that the supplied metadata in Meta should be treated as present and the Provide() function + // should either return an error or a decryption key. If IsPresent is false, the Provide() function must not + // return an error or a decryption key. + IsPresent bool + // IsValid indicates that, if IsPresent is true, the metadata should be valid and the Provide() function should not + // exit with a *keyprovider.ErrInvalidMetadata error. + IsValid bool +} + +// ProvideTestCase provides a test configuration Provide() test where a key is requested and then +// subsequently compared. +type ProvideTestCase[TConfig keyprovider.Config, TMeta any] struct { + // ValidConfig is a valid configuration that the integration test can use to generate keys. + ValidConfig TConfig + // ExpectedOutput indicates what keys are expected as an output when the integration test is ran with full metadata. + ExpectedOutput *keyprovider.Output + // ValidateKeys is a function that compares an encryption and a decryption key. The function should return an error + // if the two keys don't belong together. If you do not provide this function, bytes.Equal will be used. + ValidateKeys func(decryptionKey []byte, encryptionKey []byte) error + // ValidateMetadata is a function that validates that the resulting metadata is correct. + ValidateMetadata func(meta TMeta) error +} diff --git a/pkg/encryption/keyprovider/config.go b/pkg/encryption/keyprovider/config.go new file mode 100644 index 00000000000..c1845b184c8 --- /dev/null +++ b/pkg/encryption/keyprovider/config.go @@ -0,0 +1,16 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package keyprovider + +// Config is a struct annotated with HCL (and preferably JSON) tags that OpenTofu reads the user-provided configuration +// into. The Build function assembles the configuration into a usable key provider. +type Config interface { + // Build provides a key provider and an empty JSON-tagged struct to read the decryption metadata into. If the + // configuration is invalid, it returns an error. + // + // If a key provider does not need metadata, it may return nil. + Build() (KeyProvider, KeyMeta, error) +} diff --git a/pkg/encryption/keyprovider/descriptor.go b/pkg/encryption/keyprovider/descriptor.go new file mode 100644 index 00000000000..8b5908064d0 --- /dev/null +++ b/pkg/encryption/keyprovider/descriptor.go @@ -0,0 +1,20 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package keyprovider + +// Descriptor is a high level description of a key provider. +type Descriptor interface { + // ID returns the unique identifier used when parsing HCL or JSON configs. + ID() ID + + // ConfigStruct creates a new configuration struct pointer annotated with hcl tags. The Build() receiver on + // this struct must be able to build a KeyProvider from the configuration: + // + // Common errors: + // - Returning a struct without a pointer + // - Returning a non-struct + ConfigStruct() Config +} diff --git a/pkg/encryption/keyprovider/errors.go b/pkg/encryption/keyprovider/errors.go new file mode 100644 index 00000000000..e82e0335409 --- /dev/null +++ b/pkg/encryption/keyprovider/errors.go @@ -0,0 +1,72 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package keyprovider + +import "fmt" + +// ErrKeyProviderFailure indicates a generic key provider failure. +type ErrKeyProviderFailure struct { + Message string + Cause error +} + +func (e ErrKeyProviderFailure) Error() string { + if e.Cause != nil { + return fmt.Sprintf("%s: %v", e.Message, e.Cause) + } + return e.Message +} + +func (e ErrKeyProviderFailure) Unwrap() error { + return e.Cause +} + +// ErrInvalidConfiguration indicates that the key provider configuration is incorrect. +type ErrInvalidConfiguration struct { + Message string + Cause error +} + +func (e ErrInvalidConfiguration) Error() string { + if e.Cause != nil { + + if e.Message != "" { + return fmt.Sprintf("%s: %v", e.Message, e.Cause) + } + return fmt.Sprintf("invalid key provider configuration: %v", e.Cause) + } + if e.Message != "" { + return e.Message + } + return "invalid provider configuration" +} + +func (e ErrInvalidConfiguration) Unwrap() error { + return e.Cause +} + +// ErrInvalidMetadata indicates that the key provider has received an incorrect metadata and cannot decrypt. +type ErrInvalidMetadata struct { + Message string + Cause error +} + +func (e ErrInvalidMetadata) Error() string { + if e.Cause != nil { + if e.Message != "" { + return fmt.Sprintf("%s: %v", e.Message, e.Cause) + } + return fmt.Sprintf("invalid key provider metadata: %v", e.Cause) + } + if e.Message != "" { + return e.Message + } + return "invalid provider metadata" +} + +func (e ErrInvalidMetadata) Unwrap() error { + return e.Cause +} diff --git a/pkg/encryption/keyprovider/gcp_kms/compliance_test.go b/pkg/encryption/keyprovider/gcp_kms/compliance_test.go new file mode 100644 index 00000000000..f5a8008a2d4 --- /dev/null +++ b/pkg/encryption/keyprovider/gcp_kms/compliance_test.go @@ -0,0 +1,185 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package gcp_kms + +import ( + "fmt" + "os" + "testing" + + "cloud.google.com/go/kms/apiv1/kmspb" + "github.com/kubegems/opentofu/pkg/encryption/keyprovider/compliancetest" +) + +func getKey(t *testing.T) string { + if os.Getenv("TF_ACC") == "" && os.Getenv("TF_KMS_TEST") == "" { + return "" + } + return os.Getenv("TF_GCP_KMS_KEY") +} + +func TestKeyProvider(t *testing.T) { + testKeyId := getKey(t) + + if testKeyId == "" { + testKeyId = "projects/local-vehicle-id/locations/global/keyRings/ringid/cryptoKeys/keyid" + mock := &mockKMC{ + encrypt: func(req *kmspb.EncryptRequest) (*kmspb.EncryptResponse, error) { + return &kmspb.EncryptResponse{ + Ciphertext: append([]byte(testKeyId), req.Plaintext...), + }, nil + }, + decrypt: func(req *kmspb.DecryptRequest) (*kmspb.DecryptResponse, error) { + return &kmspb.DecryptResponse{ + Plaintext: req.Ciphertext[len(testKeyId):], + }, nil + }, + } + + injectMock(mock) + + // Used by impersonation tests + t.Setenv("GOOGLE_CREDENTIALS", `{"type": "service_account"}`) + + } + + compliancetest.ComplianceTest( + t, + compliancetest.TestConfiguration[*descriptor, *Config, *keyMeta, *keyProvider]{ + Descriptor: New().(*descriptor), + HCLParseTestCases: map[string]compliancetest.HCLParseTestCase[*Config, *keyProvider]{ + "success": { + HCL: fmt.Sprintf(`key_provider "gcp_kms" "foo" { + kms_encryption_key = "%s" + key_length = 32 + }`, testKeyId), + ValidHCL: true, + ValidBuild: true, + Validate: func(config *Config, keyProvider *keyProvider) error { + if config.KMSKeyName != testKeyId { + return fmt.Errorf("incorrect key ID returned") + } + return nil + }, + }, + "empty": { + HCL: `key_provider "gcp_kms" "foo" {}`, + ValidHCL: false, + ValidBuild: false, + }, + "invalid-key-size": { + HCL: fmt.Sprintf(`key_provider "gcp_kms" "foo" { + kms_encryption_key = "%s" + key_length = -1 + }`, testKeyId), + ValidHCL: true, + ValidBuild: false, + }, + "empty-key-id": { + HCL: `key_provider "gcp_kms" "foo" { + kms_encryption_key = "" + key_length = 32 + }`, + ValidHCL: true, + ValidBuild: false, + }, + "large-key-size": { + HCL: `key_provider "gcp_kms" "foo" { + kms_encryption_key = "alias/temp" + key_length = 99999999 + }`, + ValidHCL: true, + ValidBuild: false, + }, + "unknown-property": { + HCL: fmt.Sprintf(`key_provider "gcp_kms" "foo" { + kms_encryption_key = "%s" + key_length = 32 + unknown_property = "foo" + }`, testKeyId), + ValidHCL: false, + ValidBuild: false, + }, + "with-access-token": { + HCL: `key_provider "gcp_kms" "foo" { + kms_encryption_key = "alias/temp" + key_length = 32 + access_token = "my-access-token" + }`, + ValidHCL: true, + ValidBuild: true, + }, + "bad-credentials": { + HCL: `key_provider "gcp_kms" "foo" { + kms_encryption_key = "alias/temp" + key_length = 32 + credentials = "AS{DU*@#8UQDD*a" + }`, + ValidHCL: true, + ValidBuild: false, + }, + "impersonation": { + HCL: `key_provider "gcp_kms" "foo" { + kms_encryption_key = "alias/temp" + key_length = 32 + impersonate_service_account = "batman" + }`, + ValidHCL: true, + ValidBuild: true, + }, + }, + ConfigStructTestCases: map[string]compliancetest.ConfigStructTestCase[*Config, *keyProvider]{ + "success": { + Config: &Config{ + KMSKeyName: testKeyId, + KeyLength: 32, + }, + ValidBuild: true, + Validate: nil, + }, + "empty": { + Config: &Config{ + KMSKeyName: "", + KeyLength: 0, + }, + ValidBuild: false, + Validate: nil, + }, + }, + MetadataStructTestCases: map[string]compliancetest.MetadataStructTestCase[*Config, *keyMeta]{ + "empty": { + ValidConfig: &Config{ + KMSKeyName: testKeyId, + KeyLength: 32, + }, + Meta: &keyMeta{}, + IsPresent: false, + IsValid: false, + }, + }, + ProvideTestCase: compliancetest.ProvideTestCase[*Config, *keyMeta]{ + ValidConfig: &Config{ + KMSKeyName: testKeyId, + KeyLength: 32, + }, + ValidateKeys: func(dec []byte, enc []byte) error { + if len(dec) == 0 { + return fmt.Errorf("decryption key is empty") + } + if len(enc) == 0 { + return fmt.Errorf("encryption key is empty") + } + return nil + }, + ValidateMetadata: func(meta *keyMeta) error { + if len(meta.Ciphertext) == 0 { + return fmt.Errorf("ciphertext is empty") + } + return nil + }, + }, + }) +} diff --git a/pkg/encryption/keyprovider/gcp_kms/config.go b/pkg/encryption/keyprovider/gcp_kms/config.go new file mode 100644 index 00000000000..897555e2fde --- /dev/null +++ b/pkg/encryption/keyprovider/gcp_kms/config.go @@ -0,0 +1,153 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package gcp_kms + +import ( + "context" + "encoding/json" + "os" + + "github.com/kubegems/opentofu/pkg/encryption/keyprovider" + "github.com/kubegems/opentofu/pkg/httpclient" + "github.com/kubegems/opentofu/version" + "github.com/mitchellh/go-homedir" + "golang.org/x/oauth2" + "google.golang.org/api/impersonate" + "google.golang.org/api/option" + + kms "cloud.google.com/go/kms/apiv1" +) + +type keyManagementClientInit func(ctx context.Context, opts ...option.ClientOption) (keyManagementClient, error) + +// Can be overridden for test mocking +var newKeyManagementClient keyManagementClientInit = func(ctx context.Context, opts ...option.ClientOption) (keyManagementClient, error) { + return kms.NewKeyManagementClient(ctx, opts...) +} + +type Config struct { + Credentials string `hcl:"credentials,optional"` + AccessToken string `hcl:"access_token,optional"` + + ImpersonateServiceAccount string `hcl:"impersonate_service_account,optional"` + ImpersonateServiceAccountDelegates []string `hcl:"impersonate_service_account_delegates,optional"` + + KMSKeyName string `hcl:"kms_encryption_key"` + KeyLength int `hcl:"key_length"` +} + +func stringAttrEnvFallback(val string, env string) string { + if val != "" { + return val + } + return os.Getenv(env) +} + +// TODO This is copied in from the backend packge to prevent a circular dependency loop +// If the argument is a path, ReadPathOrContents loads it and returns the contents, +// otherwise the argument is assumed to be the desired contents and is simply +// returned. +func ReadPathOrContents(poc string) (string, error) { + if len(poc) == 0 { + return poc, nil + } + + path := poc + if path[0] == '~' { + var err error + path, err = homedir.Expand(path) + if err != nil { + return path, err + } + } + + if _, err := os.Stat(path); err == nil { + contents, err := os.ReadFile(path) + if err != nil { + return string(contents), err + } + return string(contents), nil + } + + return poc, nil +} + +func (c Config) Build() (keyprovider.KeyProvider, keyprovider.KeyMeta, error) { + // This mirrors the gcp remote state backend + + // Apply env defaults if nessesary + c.Credentials = stringAttrEnvFallback(c.Credentials, "GOOGLE_CREDENTIALS") + c.AccessToken = stringAttrEnvFallback(c.AccessToken, "GOOGLE_OAUTH_ACCESS_TOKEN") + c.ImpersonateServiceAccount = stringAttrEnvFallback(c.ImpersonateServiceAccount, "GOOGLE_BACKEND_IMPERSONATE_SERVICE_ACCOUNT") + c.ImpersonateServiceAccount = stringAttrEnvFallback(c.ImpersonateServiceAccount, "GOOGLE_IMPERSONATE_SERVICE_ACCOUNT") + + ctx := context.Background() + + var opts []option.ClientOption + var credOptions []option.ClientOption + + if c.AccessToken != "" { + tokenSource := oauth2.StaticTokenSource(&oauth2.Token{ + AccessToken: c.AccessToken, + }) + credOptions = append(credOptions, option.WithTokenSource(tokenSource)) + } else if c.Credentials != "" { + // to mirror how the provider works, we accept the file path or the contents + contents, err := ReadPathOrContents(c.Credentials) + if err != nil { + return nil, nil, &keyprovider.ErrInvalidConfiguration{Message: "Error loading credentials", Cause: err} + } + + if !json.Valid([]byte(contents)) { + return nil, nil, &keyprovider.ErrInvalidConfiguration{Message: "the string provided in credentials is neither valid json nor a valid file path"} + } + + credOptions = append(credOptions, option.WithCredentialsJSON([]byte(contents))) + } + + // Service Account Impersonation + if c.ImpersonateServiceAccount != "" { + ts, err := impersonate.CredentialsTokenSource(ctx, impersonate.CredentialsConfig{ + TargetPrincipal: c.ImpersonateServiceAccount, + Scopes: []string{"https://www.googleapis.com/auth/cloudkms"}, // I can't find a smaller scope than this... + Delegates: c.ImpersonateServiceAccountDelegates, + }, credOptions...) + + if err != nil { + return nil, nil, &keyprovider.ErrInvalidConfiguration{Cause: err} + } + + opts = append(opts, option.WithTokenSource(ts)) + + } else { + opts = append(opts, credOptions...) + } + + opts = append(opts, option.WithUserAgent(httpclient.OpenTofuUserAgent(version.Version))) + + svc, err := newKeyManagementClient(ctx, opts...) + if err != nil { + return nil, nil, &keyprovider.ErrInvalidConfiguration{Cause: err} + } + + if c.KMSKeyName == "" { + return nil, nil, &keyprovider.ErrInvalidConfiguration{Message: "kms_key_name must be provided"} + } + + if c.KeyLength < 1 { + return nil, nil, &keyprovider.ErrInvalidConfiguration{Message: "key_length must be at least 1"} + } + if c.KeyLength > 1024 { + return nil, nil, &keyprovider.ErrInvalidConfiguration{Message: "key_length must be less than the GCP limit of 1024"} + } + + return &keyProvider{ + svc: svc, + ctx: ctx, + keyName: c.KMSKeyName, + keyLength: c.KeyLength, + }, new(keyMeta), nil +} diff --git a/pkg/encryption/keyprovider/gcp_kms/descriptor.go b/pkg/encryption/keyprovider/gcp_kms/descriptor.go new file mode 100644 index 00000000000..e8c2e527ba2 --- /dev/null +++ b/pkg/encryption/keyprovider/gcp_kms/descriptor.go @@ -0,0 +1,25 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package gcp_kms + +import ( + "github.com/kubegems/opentofu/pkg/encryption/keyprovider" +) + +func New() keyprovider.Descriptor { + return &descriptor{} +} + +type descriptor struct { +} + +func (f descriptor) ID() keyprovider.ID { + return "gcp_kms" +} + +func (f descriptor) ConfigStruct() keyprovider.Config { + return &Config{} +} diff --git a/pkg/encryption/keyprovider/gcp_kms/mock_test.go b/pkg/encryption/keyprovider/gcp_kms/mock_test.go new file mode 100644 index 00000000000..988040e8f28 --- /dev/null +++ b/pkg/encryption/keyprovider/gcp_kms/mock_test.go @@ -0,0 +1,27 @@ +package gcp_kms + +import ( + "context" + + "cloud.google.com/go/kms/apiv1/kmspb" + "github.com/googleapis/gax-go/v2" + "google.golang.org/api/option" +) + +type mockKMC struct { + encrypt func(*kmspb.EncryptRequest) (*kmspb.EncryptResponse, error) + decrypt func(*kmspb.DecryptRequest) (*kmspb.DecryptResponse, error) +} + +func (m *mockKMC) Encrypt(ctx context.Context, req *kmspb.EncryptRequest, opts ...gax.CallOption) (*kmspb.EncryptResponse, error) { + return m.encrypt(req) +} +func (m *mockKMC) Decrypt(ctx context.Context, req *kmspb.DecryptRequest, opts ...gax.CallOption) (*kmspb.DecryptResponse, error) { + return m.decrypt(req) +} + +func injectMock(m *mockKMC) { + newKeyManagementClient = func(ctx context.Context, opts ...option.ClientOption) (keyManagementClient, error) { + return m, nil + } +} diff --git a/pkg/encryption/keyprovider/gcp_kms/provider.go b/pkg/encryption/keyprovider/gcp_kms/provider.go new file mode 100644 index 00000000000..6f57b211b06 --- /dev/null +++ b/pkg/encryption/keyprovider/gcp_kms/provider.go @@ -0,0 +1,92 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package gcp_kms + +import ( + "context" + "crypto/rand" + + "cloud.google.com/go/kms/apiv1/kmspb" + "github.com/googleapis/gax-go/v2" + "github.com/kubegems/opentofu/pkg/encryption/keyprovider" +) + +type keyMeta struct { + Ciphertext []byte `json:"ciphertext"` +} + +func (m keyMeta) isPresent() bool { + return len(m.Ciphertext) != 0 +} + +type keyManagementClient interface { + Encrypt(ctx context.Context, req *kmspb.EncryptRequest, opts ...gax.CallOption) (*kmspb.EncryptResponse, error) + Decrypt(ctx context.Context, req *kmspb.DecryptRequest, opts ...gax.CallOption) (*kmspb.DecryptResponse, error) +} + +type keyProvider struct { + svc keyManagementClient + ctx context.Context + keyName string + keyLength int +} + +func (p keyProvider) Provide(rawMeta keyprovider.KeyMeta) (keyprovider.Output, keyprovider.KeyMeta, error) { + if rawMeta == nil { + return keyprovider.Output{}, nil, &keyprovider.ErrInvalidMetadata{Message: "bug: no metadata struct provided"} + } + inMeta, ok := rawMeta.(*keyMeta) + if !ok { + return keyprovider.Output{}, nil, &keyprovider.ErrInvalidMetadata{Message: "bug: invalid metadata struct type"} + } + + outMeta := &keyMeta{} + out := keyprovider.Output{} + + // Generate new key + out.EncryptionKey = make([]byte, p.keyLength) + _, err := rand.Read(out.EncryptionKey) + if err != nil { + return out, outMeta, &keyprovider.ErrKeyProviderFailure{ + Message: "failed to generate key", + Cause: err, + } + } + + // Encrypt new encryption key using kms + encryptedKeyData, err := p.svc.Encrypt(p.ctx, &kmspb.EncryptRequest{ + Name: p.keyName, + Plaintext: out.EncryptionKey, + }) + if err != nil { + return out, outMeta, &keyprovider.ErrKeyProviderFailure{ + Message: "failed to encrypt key", + Cause: err, + } + } + + outMeta.Ciphertext = encryptedKeyData.Ciphertext + + // We do not set the DecryptionKey here as we should only be setting the decryption key if we are decrypting + // and that is handled below when we check if the inMeta has a CiphertextBlob + + if inMeta.isPresent() { + // We have an existing decryption key to decrypt, so we should now populate the DecryptionKey + decryptedKeyData, decryptErr := p.svc.Decrypt(p.ctx, &kmspb.DecryptRequest{ + Name: p.keyName, + Ciphertext: inMeta.Ciphertext, + }) + + if decryptErr != nil { + return out, outMeta, decryptErr + } + + // Set decryption key on the output + out.DecryptionKey = decryptedKeyData.Plaintext + } + + return out, outMeta, nil +} diff --git a/pkg/encryption/keyprovider/id.go b/pkg/encryption/keyprovider/id.go new file mode 100644 index 00000000000..1ce8e25bbef --- /dev/null +++ b/pkg/encryption/keyprovider/id.go @@ -0,0 +1,22 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package keyprovider + +import "fmt" + +// ID is a type alias to make passing the wrong ID into a key provider harder. +type ID string + +// Validate validates the key provider ID for correctness. +func (id ID) Validate() error { + if id == "" { + return fmt.Errorf("empty key provider ID (key provider IDs must match %s)", idRe.String()) + } + if !idRe.MatchString(string(id)) { + return fmt.Errorf("invalid key provider ID: %s (must match %s)", id, idRe.String()) + } + return nil +} diff --git a/pkg/encryption/keyprovider/keyprovider.go b/pkg/encryption/keyprovider/keyprovider.go new file mode 100644 index 00000000000..fcefb226b43 --- /dev/null +++ b/pkg/encryption/keyprovider/keyprovider.go @@ -0,0 +1,16 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package keyprovider + +// KeyProvider is the usable key provider. The Provide function is responsible for creating both the decryption and +// encryption key, as well as returning the metadata to be stored. +type KeyProvider interface { + // Provide provides an encryption and decryption keys. If the process fails, it returns an error. + // + // The caller must pass in the same struct obtained from the Build function of the Config, with the decryption + // metadata read in. If no decryption metadata is present, the caller must pass in the struct unmodified. + Provide(decryptionMeta KeyMeta) (keysOutput Output, encryptionMeta KeyMeta, err error) +} diff --git a/pkg/encryption/keyprovider/meta.go b/pkg/encryption/keyprovider/meta.go new file mode 100644 index 00000000000..37ce94663c0 --- /dev/null +++ b/pkg/encryption/keyprovider/meta.go @@ -0,0 +1,13 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package keyprovider + +// KeyMeta is a type alias for a struct annotated with JSON tags to store. Its purpose is to store parameters alongside +// the encrypted data which are required to later provide a decryption key. +// +// Key providers can use this to store, for example, a randomly generated salt value which is required to later provide +// the same decryption key. +type KeyMeta any diff --git a/pkg/encryption/keyprovider/openbao/client.go b/pkg/encryption/keyprovider/openbao/client.go new file mode 100644 index 00000000000..dfaeb99b6eb --- /dev/null +++ b/pkg/encryption/keyprovider/openbao/client.go @@ -0,0 +1,93 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package openbao + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "net/url" + "path" + + openbao "github.com/openbao/openbao/api" +) + +type client interface { + WriteWithContext(ctx context.Context, path string, data map[string]interface{}) (*openbao.Secret, error) +} + +// service implements missing utility functions from openbao/api such as routing and serialization. +type service struct { + c client + transitPath string +} + +type dataKey struct { + Plaintext []byte + Ciphertext []byte +} + +func (s service) generateDataKey(ctx context.Context, keyName string, bitSize int) (dataKey, error) { + path := path.Join(s.transitPath, "datakey/plaintext", url.PathEscape(keyName)) + + secret, err := s.c.WriteWithContext(ctx, path, map[string]interface{}{ + "bits": bitSize, + }) + if err != nil { + return dataKey{}, fmt.Errorf("error sending datakey request to OpenBao: %w", err) + } + + key := dataKey{} + + key.Ciphertext, err = retrieveCiphertext(secret) + if err != nil { + return dataKey{}, err + } + + key.Plaintext, err = retrievePlaintext(secret) + if err != nil { + return dataKey{}, err + } + + return key, nil +} + +func (s service) decryptData(ctx context.Context, keyName string, ciphertext []byte) ([]byte, error) { + path := path.Join(s.transitPath, "decrypt", url.PathEscape(keyName)) + + secret, err := s.c.WriteWithContext(ctx, path, map[string]interface{}{ + "ciphertext": string(ciphertext), + }) + if err != nil { + return nil, fmt.Errorf("error sending decryption request to OpenBao: %w", err) + } + + return retrievePlaintext(secret) +} + +func retrievePlaintext(s *openbao.Secret) ([]byte, error) { + base64Plaintext, ok := s.Data["plaintext"].(string) + if !ok { + return nil, errors.New("failed to deserialize 'plaintext' (it's either OpenTofu bug or incompatible OpenBao version)") + } + + plaintext, err := base64.StdEncoding.DecodeString(base64Plaintext) + if err != nil { + return nil, fmt.Errorf("base64 decoding 'plaintext' (it's either OpenTofu bug or incompatible OpenBao version): %w", err) + } + + return plaintext, nil +} + +func retrieveCiphertext(s *openbao.Secret) ([]byte, error) { + ciphertext, ok := s.Data["ciphertext"].(string) + if !ok { + return nil, errors.New("failed to deserialize 'ciphertext' (it's either OpenTofu bug or incompatible OpenBao version)") + } + + return []byte(ciphertext), nil +} diff --git a/pkg/encryption/keyprovider/openbao/compliance_test.go b/pkg/encryption/keyprovider/openbao/compliance_test.go new file mode 100644 index 00000000000..f1cba5526e8 --- /dev/null +++ b/pkg/encryption/keyprovider/openbao/compliance_test.go @@ -0,0 +1,247 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package openbao + +import ( + "context" + "crypto/rand" + "encoding/base64" + "fmt" + "net/url" + "os" + "testing" + + "github.com/kubegems/opentofu/pkg/encryption/keyprovider/compliancetest" + openbao "github.com/openbao/openbao/api" +) + +func getBaoKeyName() string { + // Acceptance tests are disabled, running with mock. + if os.Getenv("TF_ACC") == "" { + return "" + } + return os.Getenv("TF_ACC_BAO_KEY_NAME") +} + +const defaultTestKeyName = "test-key" + +func TestKeyProvider(t *testing.T) { + testKeyName := getBaoKeyName() + + if testKeyName == "" { + testKeyName = defaultTestKeyName + + mock := prepareClientMockForKeyProviderTest(t, testKeyName) + + injectMock(mock) + + t.Cleanup(func() { + injectDefaultClient() + }) + } + + compliancetest.ComplianceTest( + t, + compliancetest.TestConfiguration[*descriptor, *Config, *keyMeta, *keyProvider]{ + Descriptor: New().(*descriptor), + HCLParseTestCases: map[string]compliancetest.HCLParseTestCase[*Config, *keyProvider]{ + "success": { + HCL: fmt.Sprintf(`key_provider "openbao" "foo" { + key_name = "%s" + }`, testKeyName), + ValidHCL: true, + ValidBuild: true, + }, + "success-full-creds": { + HCL: fmt.Sprintf(`key_provider "openbao" "foo" { + token = "s.dummytoken" + address = "http://127.0.0.1:8201" + key_name = "%s" + }`, testKeyName), + ValidHCL: true, + ValidBuild: true, + }, + "empty": { + HCL: `key_provider "openbao" "foo" {}`, + ValidHCL: false, + ValidBuild: false, + }, + "empty-key-name": { + HCL: `key_provider "openbao" "foo" { + key_name = "" + }`, + ValidHCL: true, + ValidBuild: false, + }, + "invalid-key-length": { + HCL: fmt.Sprintf(`key_provider "openbao" "foo" { + key_name = "%s" + key_length = 17 + }`, testKeyName), + ValidHCL: true, + ValidBuild: false, + }, + "no-key-name": { + HCL: `key_provider "openbao" "foo" { + key_length = 16 + }`, + ValidHCL: false, + ValidBuild: false, + }, + "unknown-property": { + HCL: fmt.Sprintf(`key_provider "openbao" "foo" { + key_name = "%s" + key_length = 16 + unknown_property = "foo" + }`, testKeyName), + ValidHCL: false, + ValidBuild: false, + }, + "transit-path": { + HCL: fmt.Sprintf(`key_provider "openbao" "foo" { + key_name = "%s" + key_length = 16 + transit_engine_path = "foo" + }`, testKeyName), + ValidHCL: true, + ValidBuild: true, + }, + }, + ConfigStructTestCases: map[string]compliancetest.ConfigStructTestCase[*Config, *keyProvider]{ + "success": { + Config: &Config{ + KeyName: testKeyName, + KeyLength: 16, + TransitEnginePath: "/pki", + }, + ValidBuild: true, + Validate: func(p *keyProvider) error { + if p.keyName != testKeyName { + return fmt.Errorf("key names don't match: %v and %v", p.keyName, testKeyName) + } + if p.keyLength != 16 { + return fmt.Errorf("invalid key length: %v", p.keyLength) + } + if p.svc.transitPath != "/pki" { + return fmt.Errorf("invalid transit path: %v", p.svc.transitPath) + } + return nil + }, + }, + "success-default-values": { + Config: &Config{ + KeyName: testKeyName, + }, + ValidBuild: true, + Validate: func(p *keyProvider) error { + if p.keyName != testKeyName { + return fmt.Errorf("key names don't match: %v and %v", p.keyName, testKeyName) + } + if p.keyLength != 32 { + return fmt.Errorf("invalid default key length: %v", p.keyLength) + } + if p.svc.transitPath != "/transit" { + return fmt.Errorf("invalid default transit path: %v; expected: '/transit'", p.svc.transitPath) + } + return nil + }, + }, + "empty": { + Config: &Config{}, + ValidBuild: false, + Validate: nil, + }, + }, + MetadataStructTestCases: map[string]compliancetest.MetadataStructTestCase[*Config, *keyMeta]{ + "empty": { + ValidConfig: &Config{ + KeyName: testKeyName, + }, + Meta: &keyMeta{}, + IsPresent: false, + IsValid: false, + }, + }, + ProvideTestCase: compliancetest.ProvideTestCase[*Config, *keyMeta]{ + ValidConfig: &Config{ + KeyName: testKeyName, + }, + ValidateKeys: func(dec []byte, enc []byte) error { + if len(dec) == 0 { + return fmt.Errorf("decryption key is empty") + } + if len(enc) == 0 { + return fmt.Errorf("encryption key is empty") + } + return nil + }, + ValidateMetadata: func(meta *keyMeta) error { + if len(meta.Ciphertext) == 0 { + return fmt.Errorf("ciphertext is empty") + } + return nil + }, + }, + }, + ) +} + +// Mocking is a bit complicated due to how openbao/api package is structured, +// but in order to test cover as much as we can, it has to have some logic in here. + +func prepareClientMockForKeyProviderTest(t *testing.T, testKeyName string) mockClientFunc { + escapedTestKeyName := url.PathEscape(testKeyName) + + // Mock uses default transit engine path: "/transit". + generateDataKeyPath := fmt.Sprintf("/transit/datakey/plaintext/%s", escapedTestKeyName) + decryptPath := fmt.Sprintf("/transit/decrypt/%s", escapedTestKeyName) + + return func(ctx context.Context, path string, data map[string]interface{}) (*openbao.Secret, error) { + switch path { + case generateDataKeyPath: + bits, ok := data["bits"].(int) + if !ok { + t.Fatalf("Invalid bits in data suplied to mock: not a number") + } + + plaintext := make([]byte, int(bits)/8) + if _, err := rand.Read(plaintext); err != nil { + panic(fmt.Errorf("generating random data key in mock: %w", err)) + } + + s := &openbao.Secret{ + Data: map[string]interface{}{ + "plaintext": base64.StdEncoding.EncodeToString(plaintext), + "ciphertext": string(append([]byte(testKeyName), plaintext...)), + }, + } + + return s, nil + + case decryptPath: + ciphertext, ok := data["ciphertext"].(string) + if !ok { + t.Fatalf("Invalid ciphertext in data suplied to mock: not an string") + } + + plaintext := []byte(ciphertext[len(testKeyName):]) + + s := &openbao.Secret{ + Data: map[string]interface{}{ + "plaintext": base64.StdEncoding.EncodeToString(plaintext), + }, + } + + return s, nil + + default: + t.Fatalf("Invalid path suplied to mock: %s", path) + } + + // unreachable code + return nil, nil + } +} diff --git a/pkg/encryption/keyprovider/openbao/config.go b/pkg/encryption/keyprovider/openbao/config.go new file mode 100644 index 00000000000..ae5ef2b7038 --- /dev/null +++ b/pkg/encryption/keyprovider/openbao/config.go @@ -0,0 +1,115 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package openbao + +import ( + "fmt" + + "github.com/kubegems/opentofu/pkg/encryption/keyprovider" + openbao "github.com/openbao/openbao/api" +) + +type Config struct { + Address string `hcl:"address,optional"` + Token string `hcl:"token,optional"` + + KeyName string `hcl:"key_name"` + KeyLength DataKeyLength `hcl:"key_length,optional"` + TransitEnginePath string `hcl:"transit_engine_path,optional"` +} + +const ( + defaultDataKeyLength DataKeyLength = 32 + defaultTransitEnginePath string = "/transit" +) + +func (c Config) Build() (keyprovider.KeyProvider, keyprovider.KeyMeta, error) { + if c.KeyName == "" { + return nil, nil, &keyprovider.ErrInvalidConfiguration{ + Message: "no key name found", + } + } + + if c.KeyLength == 0 { + c.KeyLength = defaultDataKeyLength + } + + if err := c.KeyLength.Validate(); err != nil { + return nil, nil, &keyprovider.ErrInvalidConfiguration{ + Cause: err, + } + } + + if c.TransitEnginePath == "" { + c.TransitEnginePath = defaultTransitEnginePath + } + + // DefaultConfig reads BAO_ADDR and some other optional env variables. + config := openbao.DefaultConfig() + if config.Error != nil { + return nil, nil, &keyprovider.ErrInvalidConfiguration{ + Cause: config.Error, + } + } + + // Address from HCL supersedes BAO_ADDR. + if c.Address != "" { + config.Address = c.Address + } + + client, err := newClient(config, c.Token) + if err != nil { + return nil, nil, &keyprovider.ErrInvalidConfiguration{ + Cause: err, + } + } + + return &keyProvider{ + svc: service{ + c: client, + transitPath: c.TransitEnginePath, + }, + keyName: c.KeyName, + keyLength: c.KeyLength, + }, new(keyMeta), nil +} + +type DataKeyLength int + +func (l DataKeyLength) Validate() error { + switch l { + case 16, 32, 64: + return nil + default: + return fmt.Errorf("data key length should one of 16, 32 or 64 bytes: got %v", l) + } +} + +func (l DataKeyLength) Bits() int { + return int(l) * 8 +} + +type clientConstructor func(config *openbao.Config, token string) (client, error) + +// newClient variable allows to inject different client implementations. +// In order to keep client interface simple, token setting is in this function as well. +// It's not possible to pass token in config. +var newClient clientConstructor = newOpenBaoClient + +func newOpenBaoClient(config *openbao.Config, token string) (client, error) { + // NewClient reads BAO_TOKEN and some other optional env variables. + c, err := openbao.NewClient(config) + if err != nil { + return nil, fmt.Errorf("error creating OpenBao client: %w", err) + } + + // Token from HCL supersedes BAO_TOKEN. + if token != "" { + c.SetToken(token) + } + + return c.Logical(), nil +} diff --git a/pkg/encryption/keyprovider/openbao/descriptor.go b/pkg/encryption/keyprovider/openbao/descriptor.go new file mode 100644 index 00000000000..6a4247236d3 --- /dev/null +++ b/pkg/encryption/keyprovider/openbao/descriptor.go @@ -0,0 +1,23 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package openbao + +import "github.com/kubegems/opentofu/pkg/encryption/keyprovider" + +func New() keyprovider.Descriptor { + return &descriptor{} +} + +type descriptor struct { +} + +func (f descriptor) ID() keyprovider.ID { + return "openbao" +} + +func (f descriptor) ConfigStruct() keyprovider.Config { + return &Config{} +} diff --git a/pkg/encryption/keyprovider/openbao/mock_test.go b/pkg/encryption/keyprovider/openbao/mock_test.go new file mode 100644 index 00000000000..f5908eeea02 --- /dev/null +++ b/pkg/encryption/keyprovider/openbao/mock_test.go @@ -0,0 +1,23 @@ +package openbao + +import ( + "context" + + openbao "github.com/openbao/openbao/api" +) + +type mockClientFunc func(ctx context.Context, path string, data map[string]interface{}) (*openbao.Secret, error) + +func (f mockClientFunc) WriteWithContext(ctx context.Context, path string, data map[string]interface{}) (*openbao.Secret, error) { + return f(ctx, path, data) +} + +func injectMock(m mockClientFunc) { + newClient = func(_ *openbao.Config, _ string) (client, error) { + return m, nil + } +} + +func injectDefaultClient() { + newClient = newOpenBaoClient +} diff --git a/pkg/encryption/keyprovider/openbao/provider.go b/pkg/encryption/keyprovider/openbao/provider.go new file mode 100644 index 00000000000..4328130f9de --- /dev/null +++ b/pkg/encryption/keyprovider/openbao/provider.go @@ -0,0 +1,71 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package openbao + +import ( + "context" + + "github.com/kubegems/opentofu/pkg/encryption/keyprovider" +) + +type keyMeta struct { + Ciphertext []byte `json:"ciphertext"` +} + +func (m keyMeta) isPresent() bool { + return len(m.Ciphertext) != 0 +} + +type keyProvider struct { + svc service + keyName string + keyLength DataKeyLength +} + +func (p keyProvider) Provide(rawMeta keyprovider.KeyMeta) (keyprovider.Output, keyprovider.KeyMeta, error) { + if rawMeta == nil { + return keyprovider.Output{}, nil, &keyprovider.ErrInvalidMetadata{ + Message: "bug: no metadata struct provided", + } + } + + inMeta, ok := rawMeta.(*keyMeta) + if !ok { + return keyprovider.Output{}, nil, &keyprovider.ErrInvalidMetadata{ + Message: "bug: invalid metadata struct type", + } + } + + ctx := context.Background() + + dataKey, err := p.svc.generateDataKey(ctx, p.keyName, p.keyLength.Bits()) + if err != nil { + return keyprovider.Output{}, nil, &keyprovider.ErrKeyProviderFailure{ + Message: "failed to generate OpenBao data key (check if the configuration valid and OpenBao server accessible)", + Cause: err, + } + } + + outMeta := &keyMeta{ + Ciphertext: dataKey.Ciphertext, + } + + out := keyprovider.Output{ + EncryptionKey: dataKey.Plaintext, + } + + if inMeta.isPresent() { + out.DecryptionKey, err = p.svc.decryptData(ctx, p.keyName, inMeta.Ciphertext) + if err != nil { + return keyprovider.Output{}, nil, &keyprovider.ErrKeyProviderFailure{ + Message: "failed to decrypt ciphertext (check if the configuration valid and OpenBao server accessible)", + Cause: err, + } + } + } + + return out, outMeta, nil +} diff --git a/pkg/encryption/keyprovider/output.go b/pkg/encryption/keyprovider/output.go new file mode 100644 index 00000000000..94b027c338e --- /dev/null +++ b/pkg/encryption/keyprovider/output.go @@ -0,0 +1,35 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package keyprovider + +import "github.com/zclconf/go-cty/cty" + +// Output is the standardized structure a key provider must return when providing a key. +// It contains two keys because some key providers may prefer include random data (e.g. salt) +// in the generated keys and this salt will be different for decryption and encryption. +type Output struct { + EncryptionKey []byte `hcl:"encryption_key" cty:"encryption_key" json:"encryption_key" yaml:"encryption_key"` + DecryptionKey []byte `hcl:"decryption_key" cty:"decryption_key" json:"decryption_key" yaml:"decryption_key"` +} + +// Cty turns the Output struct into a CTY value. +func (o *Output) Cty() cty.Value { + return cty.ObjectVal(map[string]cty.Value{ + "encryption_key": o.byteToCty(o.EncryptionKey), + "decryption_key": o.byteToCty(o.DecryptionKey), + }) +} + +func (o *Output) byteToCty(data []byte) cty.Value { + if len(data) == 0 { + return cty.NullVal(cty.List(cty.Number)) + } + ctyData := make([]cty.Value, len(data)) + for i, d := range data { + ctyData[i] = cty.NumberIntVal(int64(d)) + } + return cty.ListVal(ctyData) +} diff --git a/pkg/encryption/keyprovider/output_test.go b/pkg/encryption/keyprovider/output_test.go new file mode 100644 index 00000000000..3f92a21d970 --- /dev/null +++ b/pkg/encryption/keyprovider/output_test.go @@ -0,0 +1,94 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package keyprovider_test + +import ( + "testing" + + "github.com/kubegems/opentofu/pkg/encryption/keyprovider" + "github.com/zclconf/go-cty/cty" +) + +func TestOutputCty(t *testing.T) { + testCases := map[string]struct { + output keyprovider.Output + expectedOutput cty.Value + }{ + "empty": { + output: keyprovider.Output{}, + expectedOutput: cty.ObjectVal(map[string]cty.Value{ + "encryption_key": cty.NullVal(cty.List(cty.Number)), + "decryption_key": cty.NullVal(cty.List(cty.Number)), + }), + }, + "encryption-key-only": { + output: keyprovider.Output{ + EncryptionKey: []byte("Hello world!"), + }, + expectedOutput: cty.ObjectVal(map[string]cty.Value{ + "encryption_key": cty.ListVal([]cty.Value{ + cty.NumberIntVal(int64('H')), + cty.NumberIntVal(int64('e')), + cty.NumberIntVal(int64('l')), + cty.NumberIntVal(int64('l')), + cty.NumberIntVal(int64('o')), + cty.NumberIntVal(int64(' ')), + cty.NumberIntVal(int64('w')), + cty.NumberIntVal(int64('o')), + cty.NumberIntVal(int64('r')), + cty.NumberIntVal(int64('l')), + cty.NumberIntVal(int64('d')), + cty.NumberIntVal(int64('!')), + }), + "decryption_key": cty.NullVal(cty.List(cty.Number)), + }), + }, + "both-keys": { + output: keyprovider.Output{ + EncryptionKey: []byte("Hello world!"), + DecryptionKey: []byte("Hello world!"), + }, + expectedOutput: cty.ObjectVal(map[string]cty.Value{ + "encryption_key": cty.ListVal([]cty.Value{ + cty.NumberIntVal(int64('H')), + cty.NumberIntVal(int64('e')), + cty.NumberIntVal(int64('l')), + cty.NumberIntVal(int64('l')), + cty.NumberIntVal(int64('o')), + cty.NumberIntVal(int64(' ')), + cty.NumberIntVal(int64('w')), + cty.NumberIntVal(int64('o')), + cty.NumberIntVal(int64('r')), + cty.NumberIntVal(int64('l')), + cty.NumberIntVal(int64('d')), + cty.NumberIntVal(int64('!')), + }), + "decryption_key": cty.ListVal([]cty.Value{ + cty.NumberIntVal(int64('H')), + cty.NumberIntVal(int64('e')), + cty.NumberIntVal(int64('l')), + cty.NumberIntVal(int64('l')), + cty.NumberIntVal(int64('o')), + cty.NumberIntVal(int64(' ')), + cty.NumberIntVal(int64('w')), + cty.NumberIntVal(int64('o')), + cty.NumberIntVal(int64('r')), + cty.NumberIntVal(int64('l')), + cty.NumberIntVal(int64('d')), + cty.NumberIntVal(int64('!')), + }), + }), + }, + } + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + val := tc.output.Cty() + if !val.Equals(tc.expectedOutput).True() { + t.Fatalf("Incorrect cty output value:\n%v\nexpected:\n%v)", val, tc.expectedOutput) + } + }) + } +} diff --git a/pkg/encryption/keyprovider/pbkdf2/README.md b/pkg/encryption/keyprovider/pbkdf2/README.md new file mode 100644 index 00000000000..ab7d7337091 --- /dev/null +++ b/pkg/encryption/keyprovider/pbkdf2/README.md @@ -0,0 +1,35 @@ +# PBKDF passphrase key provider + +> [!WARNING] +> This file is not an end-user documentation, it is intended for developers. Please follow the user documentation on the OpenTofu website unless you want to work on the encryption code. + +This folder contains the code for the PBKDF2 passphrase key provider. The user can enter a passphrase and the key provider will generate `[]byte` keys of a given length and will record the salt in the encryption metadata. + +## Configuration + +You can configure this key provider by specifying the following options: + +```hcl2 +terraform { + encryption { + key_provider "pbkdf2" "myprovider" { + passphrase = "enter a long and complex passphrase here" + + # Adapt the key length to your encryption method needs, + # check the method documentation for the right key length + key_length = 32 + + # Provide the number of iterations that should be performed. + # See https://cheatsheetseries.owasp.org/cheatsheets/Password_Storage_Cheat_Sheet.html#pbkdf2 + # for recommendations + iterations = 600000 + + # Pick the hashing function. Can be sha256 or sha512. + hash_function = "sha512" + + # Pick the salt length in bytes. + salt_length = 32 + } + } +} +``` \ No newline at end of file diff --git a/pkg/encryption/keyprovider/pbkdf2/compliance_test.go b/pkg/encryption/keyprovider/pbkdf2/compliance_test.go new file mode 100644 index 00000000000..050852d9914 --- /dev/null +++ b/pkg/encryption/keyprovider/pbkdf2/compliance_test.go @@ -0,0 +1,229 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pbkdf2 + +import ( + "crypto/rand" + "fmt" + "testing" + + "github.com/kubegems/opentofu/pkg/encryption/keyprovider" + "github.com/kubegems/opentofu/pkg/encryption/keyprovider/compliancetest" +) + +func TestCompliance(t *testing.T) { + validConfig := &Config{ + randomSource: rand.Reader, + Passphrase: "Hello world! 123", + KeyLength: DefaultKeyLength, + Iterations: DefaultIterations, + HashFunction: SHA256HashFunctionName, + SaltLength: DefaultSaltLength, + } + compliancetest.ComplianceTest( + t, + compliancetest.TestConfiguration[*descriptor, *Config, *Metadata, *pbkdf2KeyProvider]{ + Descriptor: New().(*descriptor), + HCLParseTestCases: map[string]compliancetest.HCLParseTestCase[*Config, *pbkdf2KeyProvider]{ + "empty": { + HCL: `key_provider "pbkdf2" "foo" { +}`, + ValidHCL: false, + ValidBuild: false, + Validate: nil, + }, + "basic": { + HCL: `key_provider "pbkdf2" "foo" { + passphrase = "Hello world! 123" +}`, + ValidHCL: true, + ValidBuild: true, + Validate: func(config *Config, keyProvider *pbkdf2KeyProvider) error { + if config.Passphrase != "Hello world! 123" { + return fmt.Errorf("invalid passphrase after HCL parsing") + } + if keyProvider.Passphrase != "Hello world! 123" { + return fmt.Errorf("invalid passphrase in key provideer") + } + return nil + }, + }, + "extended": { + HCL: fmt.Sprintf(`key_provider "pbkdf2" "foo" { + passphrase = "Hello world! 123" + key_length = %d + iterations = %d + salt_length = %d + hash_function = "%s" +}`, DefaultKeyLength+1, DefaultIterations+1, DefaultSaltLength+1, SHA256HashFunctionName), + ValidHCL: true, + ValidBuild: true, + Validate: func(config *Config, keyProvider *pbkdf2KeyProvider) error { + if config.KeyLength != DefaultKeyLength+1 { + return fmt.Errorf("incorrect key length after HCL parsing: %d", config.KeyLength) + } + if config.Iterations != DefaultIterations+1 { + return fmt.Errorf("incorrect iterations after HCL parsing: %d", config.Iterations) + } + if config.SaltLength != DefaultSaltLength+1 { + return fmt.Errorf("incorrect salt length after HCL parsing: %d", config.SaltLength) + } + if config.HashFunction != SHA256HashFunctionName { + return fmt.Errorf("incorrect hash function after HCL parsing: %s", config.HashFunction) + } + return nil + }, + }, + "short-passphrase": { + HCL: `key_provider "pbkdf2" "foo" { + passphrase = "Hello world! 12" +}`, + ValidHCL: true, + ValidBuild: false, + }, + "too-small-iterations": { + HCL: fmt.Sprintf(`key_provider "pbkdf2" "foo" { + passphrase = "Hello world! 123" + iterations = %d +}`, MinimumIterations-1), + ValidHCL: true, + ValidBuild: false, + }, + "invalid-hash-function": { + HCL: `key_provider "pbkdf2" "foo" { + passphrase = "Hello world! 123" + hash_function = "non_existent" +}`, + ValidHCL: true, + ValidBuild: false, + }, + }, + ConfigStructTestCases: map[string]compliancetest.ConfigStructTestCase[*Config, *pbkdf2KeyProvider]{}, + MetadataStructTestCases: map[string]compliancetest.MetadataStructTestCase[*Config, *Metadata]{ + "not-present-salt": { + ValidConfig: validConfig, + Meta: &Metadata{ + Salt: nil, + Iterations: DefaultIterations, + HashFunction: SHA256HashFunctionName, + KeyLength: 32, + }, + IsPresent: false, + }, + "not-present-iterations": { + ValidConfig: validConfig, + Meta: &Metadata{ + Salt: []byte("Hello world!"), + Iterations: 0, + HashFunction: SHA256HashFunctionName, + KeyLength: 32, + }, + IsPresent: false, + }, + "not-present-hash-func": { + ValidConfig: validConfig, + Meta: &Metadata{ + Salt: []byte("Hello world!"), + Iterations: DefaultIterations, + HashFunction: "", + KeyLength: 32, + }, + IsPresent: false, + }, + "not-present-key-length": { + ValidConfig: validConfig, + Meta: &Metadata{ + Salt: []byte("Hello world!"), + Iterations: DefaultIterations, + HashFunction: SHA256HashFunctionName, + KeyLength: 0, + }, + IsPresent: false, + }, + "present-valid": { + ValidConfig: validConfig, + Meta: &Metadata{ + Salt: []byte("Hello world!"), + Iterations: DefaultIterations, + HashFunction: SHA256HashFunctionName, + KeyLength: 32, + }, + IsPresent: true, + IsValid: true, + }, + "present-valid-too-few-iterations": { + ValidConfig: validConfig, + Meta: &Metadata{ + Salt: []byte("Hello world!"), + Iterations: MinimumIterations - 1, + HashFunction: SHA256HashFunctionName, + KeyLength: 32, + }, + IsPresent: true, + IsValid: true, + }, + "invalid-iterations": { + ValidConfig: validConfig, + Meta: &Metadata{ + Salt: []byte("Hello world!"), + Iterations: -1, + HashFunction: SHA256HashFunctionName, + KeyLength: 32, + }, + IsPresent: true, + IsValid: false, + }, + "invalid-salt-length": { + ValidConfig: validConfig, + Meta: &Metadata{ + Salt: []byte("Hello world!"), + Iterations: DefaultIterations, + HashFunction: SHA256HashFunctionName, + KeyLength: -1, + }, + IsPresent: true, + IsValid: false, + }, + }, + ProvideTestCase: compliancetest.ProvideTestCase[*Config, *Metadata]{ + ValidConfig: &Config{ + randomSource: &testRandomSource{t: t}, + Passphrase: "Hello world! 123", + KeyLength: DefaultKeyLength, + Iterations: DefaultIterations, + HashFunction: DefaultHashFunctionName, + SaltLength: DefaultSaltLength, + }, + ExpectedOutput: &keyprovider.Output{ + EncryptionKey: []byte{87, 192, 98, 53, 186, 42, 63, 139, 58, 118, 223, 169, 46, 84, 139, 29, 130, 59, 247, 106, 82, 61, 235, 144, 97, 131, 60, 229, 195, 109, 81, 111}, + DecryptionKey: []byte{87, 192, 98, 53, 186, 42, 63, 139, 58, 118, 223, 169, 46, 84, 139, 29, 130, 59, 247, 106, 82, 61, 235, 144, 97, 131, 60, 229, 195, 109, 81, 111}, + }, + ValidateKeys: nil, + ValidateMetadata: func(meta *Metadata) error { + if !meta.isPresent() { + return fmt.Errorf("output metadata is not present") + } + if err := meta.validate(); err != nil { + return err + } + if meta.KeyLength != DefaultKeyLength { + return fmt.Errorf("incorrect output metadata key length: %d", meta.KeyLength) + } + if meta.Iterations != DefaultIterations { + return fmt.Errorf("incorrect output metadata iterations: %d", meta.Iterations) + } + if len(meta.Salt) != DefaultSaltLength { + return fmt.Errorf("incorrect output salt length: %d", len(meta.Salt)) + } + if meta.HashFunction != DefaultHashFunctionName { + return fmt.Errorf("incorrect output hash function name: %s", meta.HashFunction) + } + return nil + }, + }, + }, + ) +} diff --git a/pkg/encryption/keyprovider/pbkdf2/config.go b/pkg/encryption/keyprovider/pbkdf2/config.go new file mode 100644 index 00000000000..c8ea41bc923 --- /dev/null +++ b/pkg/encryption/keyprovider/pbkdf2/config.go @@ -0,0 +1,130 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pbkdf2 + +import ( + "fmt" + "hash" + "io" + + "github.com/kubegems/opentofu/pkg/encryption/keyprovider" +) + +// HashFunction is a provider of a hash.Hash. +type HashFunction func() hash.Hash + +// HashFunctionName describes a hash function to use for PBKDF2 hash generation. While you could theoretically supply +// your own from outside the package, please don't do that. Include your hash function in this package. (Thanks Go for +// the lack of visibility constraints.) +type HashFunctionName string + +// Validate checks if the specified hash function name is valid. +func (h HashFunctionName) Validate() error { + if h == "" { + return &keyprovider.ErrInvalidConfiguration{Message: "please specify a hash function"} + } + if _, ok := hashFunctions[h]; !ok { + return &keyprovider.ErrInvalidConfiguration{Message: fmt.Sprintf("invalid hash function name: %s", h)} + } + return nil +} + +// Function returns the underlying hash function for the name. +func (h HashFunctionName) Function() HashFunction { + return hashFunctions[h] +} + +type Config struct { + // Set by the descriptor. + randomSource io.Reader + + Passphrase string `hcl:"passphrase"` + KeyLength int `hcl:"key_length,optional"` + Iterations int `hcl:"iterations,optional"` + HashFunction HashFunctionName `hcl:"hash_function,optional"` + SaltLength int `hcl:"salt_length,optional"` +} + +// WithPassphrase adds the passphrase and returns the same config for chaining. +func (c *Config) WithPassphrase(passphrase string) *Config { + c.Passphrase = passphrase + return c +} + +// WithKeyLength sets the key length and returns the same config for chaining +func (c *Config) WithKeyLength(length int) *Config { + c.KeyLength = length + return c +} + +// WithIterations sets the iterations and returns the same config for chaining +func (c *Config) WithIterations(iterations int) *Config { + c.Iterations = iterations + return c +} + +// WithSaltLength sets the salt length and returns the same config for chaining +func (c *Config) WithSaltLength(length int) *Config { + c.SaltLength = length + return c +} + +// WithHashFunction sets the hash function and returns the same config for chaining +func (c *Config) WithHashFunction(hashFunction HashFunctionName) *Config { + c.HashFunction = hashFunction + return c +} + +func (c *Config) Build() (keyprovider.KeyProvider, keyprovider.KeyMeta, error) { + if c.randomSource == nil { + return nil, nil, &keyprovider.ErrInvalidConfiguration{ + Message: "missing randomness source (please don't initialize the Config struct directly, use the descriptor)", + } + } + + if c.Passphrase == "" { + return nil, nil, &keyprovider.ErrInvalidConfiguration{ + Message: "no passphrase provided", + } + } + + if len(c.Passphrase) < MinimumPassphraseLength { + return nil, nil, &keyprovider.ErrInvalidConfiguration{ + Message: fmt.Sprintf("passphrase is too short (minimum %d characters)", MinimumPassphraseLength), + } + } + + if c.KeyLength <= 0 { + return nil, nil, &keyprovider.ErrInvalidConfiguration{ + Message: "the key length must be larger than zero", + } + } + + if c.Iterations <= 0 { + return nil, nil, &keyprovider.ErrInvalidConfiguration{ + Message: "the number of iterations must be larger than zero", + } + } + if c.Iterations < MinimumIterations { + return nil, nil, &keyprovider.ErrInvalidConfiguration{ + Message: fmt.Sprintf("the number of iterations is dangerously low (<%d), refusing to generate key", MinimumIterations), + } + } + + if c.SaltLength <= 0 { + return nil, nil, &keyprovider.ErrInvalidConfiguration{ + Message: "the salt length must be larger than zero", + } + } + + if err := c.HashFunction.Validate(); err != nil { + return nil, nil, &keyprovider.ErrInvalidConfiguration{ + Cause: err, + } + } + + return &pbkdf2KeyProvider{*c}, new(Metadata), nil +} diff --git a/pkg/encryption/keyprovider/pbkdf2/config_constants.go b/pkg/encryption/keyprovider/pbkdf2/config_constants.go new file mode 100644 index 00000000000..53e04243bc9 --- /dev/null +++ b/pkg/encryption/keyprovider/pbkdf2/config_constants.go @@ -0,0 +1,27 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pbkdf2 + +import ( + "crypto/sha256" + "crypto/sha512" +) + +const ( + SHA256HashFunctionName HashFunctionName = "sha256" + SHA512HashFunctionName HashFunctionName = "sha512" + DefaultHashFunctionName HashFunctionName = SHA512HashFunctionName +) + +var hashFunctions = map[HashFunctionName]HashFunction{ + SHA256HashFunctionName: sha256.New, + SHA512HashFunctionName: sha512.New, +} + +const ( + MinimumIterations int = 200000 + MinimumPassphraseLength int = 16 +) diff --git a/pkg/encryption/keyprovider/pbkdf2/config_test.go b/pkg/encryption/keyprovider/pbkdf2/config_test.go new file mode 100644 index 00000000000..afa6864a935 --- /dev/null +++ b/pkg/encryption/keyprovider/pbkdf2/config_test.go @@ -0,0 +1,109 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pbkdf2_test + +import ( + "testing" + + "github.com/kubegems/opentofu/pkg/encryption/keyprovider/pbkdf2" +) + +func TestHashFunctionName_Validate(t *testing.T) { + tc := map[string]struct { + hashFunctionName pbkdf2.HashFunctionName + valid bool + }{ + "empty": { + hashFunctionName: "", + valid: false, + }, + "sha256": { + hashFunctionName: pbkdf2.SHA256HashFunctionName, + valid: true, + }, + "sha0": { + hashFunctionName: "sha0", + valid: false, + }, + } + + for name, testCase := range tc { + t.Run(name, func(t *testing.T) { + err := testCase.hashFunctionName.Validate() + if testCase.valid && err != nil { + t.Fatalf("unexpected error: %v", err) + } else if !testCase.valid && err == nil { + t.Fatalf("expected error") + } + }) + } +} + +func generateFixedStringHelper(length int) string { + result := "" + for i := 0; i < length; i++ { + result += "a" + } + return result +} + +func TestConfig_Build(t *testing.T) { + knownGood := func() *pbkdf2.Config { + return pbkdf2.New().TypedConfig().WithPassphrase(generateFixedStringHelper(pbkdf2.MinimumPassphraseLength)) + } + tc := map[string]struct { + config *pbkdf2.Config + valid bool + }{ + "empty": { + config: &pbkdf2.Config{}, + valid: false, + }, + "default": { + // Missing passphrase + config: pbkdf2.New().ConfigStruct().(*pbkdf2.Config), + valid: false, + }, + "default-short-passphrase": { + config: pbkdf2.New().TypedConfig().WithPassphrase(generateFixedStringHelper(pbkdf2.MinimumPassphraseLength - 1)), + valid: false, + }, + "default-good-passphrase": { + config: knownGood(), + valid: true, + }, + "invalid-key-length": { + config: knownGood().WithKeyLength(0), + valid: false, + }, + "invalid-iterations": { + config: knownGood().WithIterations(0), + valid: false, + }, + "low-iterations": { + config: knownGood().WithIterations(pbkdf2.MinimumIterations - 1), + valid: false, + }, + "invalid-salt-length": { + config: knownGood().WithSaltLength(0), + valid: false, + }, + "invalid-hash-function": { + config: knownGood().WithHashFunction(""), + valid: false, + }, + } + for name, testCase := range tc { + t.Run(name, func(t *testing.T) { + _, _, err := testCase.config.Build() + if testCase.valid && err != nil { + t.Fatalf("unexpected error: %v", err) + } else if !testCase.valid && err == nil { + t.Fatalf("expected error") + } + }) + } +} diff --git a/pkg/encryption/keyprovider/pbkdf2/descriptor.go b/pkg/encryption/keyprovider/pbkdf2/descriptor.go new file mode 100644 index 00000000000..fc451459213 --- /dev/null +++ b/pkg/encryption/keyprovider/pbkdf2/descriptor.go @@ -0,0 +1,61 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pbkdf2 + +import ( + "crypto/rand" + "io" + + "github.com/kubegems/opentofu/pkg/encryption/keyprovider" +) + +const ( + // DefaultSaltLength specifies the default salt length in bytes. + DefaultSaltLength int = 32 + // DefaultIterations contains the default iterations to use. The number is set to the current recommendations + // outlined here: + // https://cheatsheetseries.owasp.org/cheatsheets/Password_Storage_Cheat_Sheet.html#pbkdf2 + DefaultIterations int = 600000 + // DefaultKeyLength is the default output length. We set it to the key length required by AES-GCM 256 + DefaultKeyLength int = 32 +) + +// New creates a new PBKDF2 key provider descriptor. +func New() Descriptor { + return &descriptor{ + randomSource: rand.Reader, + } +} + +// Descriptor provides TypedConfig on top of keyprovider.Descriptor. +type Descriptor interface { + keyprovider.Descriptor + + TypedConfig() *Config +} + +type descriptor struct { + randomSource io.Reader +} + +func (f descriptor) ID() keyprovider.ID { + return "pbkdf2" +} + +func (f descriptor) TypedConfig() *Config { + return &Config{ + randomSource: f.randomSource, + Passphrase: "", + KeyLength: DefaultKeyLength, + Iterations: DefaultIterations, + HashFunction: DefaultHashFunctionName, + SaltLength: DefaultSaltLength, + } +} + +func (f descriptor) ConfigStruct() keyprovider.Config { + return f.TypedConfig() +} diff --git a/pkg/encryption/keyprovider/pbkdf2/descriptor_test.go b/pkg/encryption/keyprovider/pbkdf2/descriptor_test.go new file mode 100644 index 00000000000..b378f9c6f70 --- /dev/null +++ b/pkg/encryption/keyprovider/pbkdf2/descriptor_test.go @@ -0,0 +1,18 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pbkdf2_test + +import ( + "testing" + + "github.com/kubegems/opentofu/pkg/encryption/keyprovider/pbkdf2" +) + +func TestDescriptor_ID(t *testing.T) { + if id := pbkdf2.New().ID(); id != "pbkdf2" { + t.Fatalf("incorrect ID: %s", id) + } +} diff --git a/pkg/encryption/keyprovider/pbkdf2/example_decrypt_test.go b/pkg/encryption/keyprovider/pbkdf2/example_decrypt_test.go new file mode 100644 index 00000000000..5402d19f187 --- /dev/null +++ b/pkg/encryption/keyprovider/pbkdf2/example_decrypt_test.go @@ -0,0 +1,64 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pbkdf2_test + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2/gohcl" + "github.com/kubegems/opentofu/pkg/encryption/keyprovider/pbkdf2" + + "github.com/kubegems/opentofu/pkg/encryption/config" +) + +var configuration = `key_provider "pbkdf2" "foo" { + passphrase = "correct-horse-battery-staple" +} +` + +// This example is a bare-bones configuration for a static key provider. +// It is mainly intended to demonstrate how you can use parse configuration +// and construct a static key provider from it. +func Example_decrypt() { + configStruct := pbkdf2.New().ConfigStruct() + + // Parse the config: + parsedConfig, diags := config.LoadConfigFromString("config.hcl", configuration) + if diags.HasErrors() { + panic(diags) + } + + // Use gohcl to parse the hcl block from parsedConfig into the static configuration struct: + if err := gohcl.DecodeBody( + parsedConfig.KeyProviderConfigs[0].Body, + nil, + configStruct, + ); err != nil { + panic(err) + } + + // Create the actual key provider. + keyProvider, keyMeta, err := configStruct.Build() + if err != nil { + panic(err) + } + + // Fill in the metadata stored with the encrypted form: + meta := keyMeta.(*pbkdf2.Metadata) + meta.Salt = []byte{0x10, 0xec, 0x3d, 0x3f, 0xe0, 0x2a, 0xd2, 0xbe, 0xe6, 0xf1, 0xf5, 0x54, 0xf, 0x8e, 0x6b, 0xbe, 0x3b, 0x8b, 0x29, 0x44, 0x5c, 0xf5, 0x2, 0xd2, 0x7d, 0x47, 0xad, 0x55, 0x4a, 0xa8, 0x97, 0x1f} + meta.Iterations = 600000 + meta.HashFunction = "sha512" + meta.KeyLength = 32 + + // Get decryption key from the provider. + keys, _, err := keyProvider.Provide(meta) + if err != nil { + panic(err) + } + + fmt.Printf("%x", keys.DecryptionKey) + // Output: 225872367198760137e0a18580433447bbf578fbe2b87ff36aef3c175fe5709c +} diff --git a/pkg/encryption/keyprovider/pbkdf2/example_metadata_test.go b/pkg/encryption/keyprovider/pbkdf2/example_metadata_test.go new file mode 100644 index 00000000000..122c8bdf971 --- /dev/null +++ b/pkg/encryption/keyprovider/pbkdf2/example_metadata_test.go @@ -0,0 +1,63 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pbkdf2_test + +import ( + "bytes" + "fmt" + + "github.com/hashicorp/hcl/v2/gohcl" + "github.com/kubegems/opentofu/pkg/encryption/config" + "github.com/kubegems/opentofu/pkg/encryption/keyprovider/pbkdf2" +) + +var metadataExampleConfiguration = `key_provider "pbkdf2" "foo" { + passphrase = "correct-horse-battery-staple" +} +` + +func ExampleMetadata() { + configStruct := pbkdf2.New().ConfigStruct() + + // Parse the config: + parsedConfig, diags := config.LoadConfigFromString("config.hcl", metadataExampleConfiguration) + if diags.HasErrors() { + panic(diags) + } + + // Use gohcl to parse the hcl block from parsedConfig into the static configuration struct: + if err := gohcl.DecodeBody( + parsedConfig.KeyProviderConfigs[0].Body, + nil, + configStruct, + ); err != nil { + panic(err) + } + + // Create the actual key provider. + keyProvider, keyMeta, err := configStruct.Build() + if err != nil { + panic(err) + } + + // The first time around, let's get an encryption key: + oldKeys, oldMeta, err := keyProvider.Provide(keyMeta) + if err != nil { + panic(err) + } + + // The second time, you can pass in the metadata from the previous encryption: + newKeys, _, err := keyProvider.Provide(oldMeta) + if err != nil { + panic(err) + } + + // The old encryption and new decryption key will be the same: + if bytes.Equal(oldKeys.EncryptionKey, newKeys.DecryptionKey) { + fmt.Println("The keys match!") + } + //Output: The keys match! +} diff --git a/pkg/encryption/keyprovider/pbkdf2/metadata.go b/pkg/encryption/keyprovider/pbkdf2/metadata.go new file mode 100644 index 00000000000..70fff2de3a1 --- /dev/null +++ b/pkg/encryption/keyprovider/pbkdf2/metadata.go @@ -0,0 +1,46 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pbkdf2 + +import ( + "fmt" + + "github.com/kubegems/opentofu/pkg/encryption/keyprovider" +) + +// Metadata describes the metadata to be stored alongside the encrypted form. +type Metadata struct { + Salt []byte `json:"salt"` + Iterations int `json:"iterations"` + HashFunction HashFunctionName `json:"hash_function"` + KeyLength int `json:"key_length"` +} + +func (m Metadata) isPresent() bool { + return len(m.Salt) != 0 && m.Iterations != 0 && m.HashFunction != "" && m.KeyLength != 0 +} + +func (m Metadata) validate() error { + if m.Iterations < 0 { + return &keyprovider.ErrInvalidMetadata{ + Message: fmt.Sprintf("invalid number of iterations (%d)", m.Iterations), + } + } + if m.KeyLength < 0 { + return &keyprovider.ErrInvalidMetadata{ + Message: fmt.Sprintf("invalid key length (%d)", m.KeyLength), + } + } + if m.HashFunction != "" { + if err := m.HashFunction.Validate(); err != nil { + return &keyprovider.ErrInvalidMetadata{ + Message: "invalid hash function name", + Cause: err, + } + } + } + return nil +} diff --git a/pkg/encryption/keyprovider/pbkdf2/metadata_test.go b/pkg/encryption/keyprovider/pbkdf2/metadata_test.go new file mode 100644 index 00000000000..a7c34ae3849 --- /dev/null +++ b/pkg/encryption/keyprovider/pbkdf2/metadata_test.go @@ -0,0 +1,71 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pbkdf2 + +import "testing" + +func TestMetadata_validate(t *testing.T) { + for name, tc := range map[string]struct { + meta Metadata + present bool + valid bool + }{ + "empty": { + Metadata{}, + false, + true, + }, + "invalid-iterations": { + Metadata{ + Iterations: -1, + KeyLength: 1, + HashFunction: SHA256HashFunctionName, + Salt: []byte("Hello world!"), + }, + true, + false, + }, + "invalid-keylength": { + Metadata{ + Iterations: 1, + KeyLength: -1, + HashFunction: SHA256HashFunctionName, + Salt: []byte("Hello world!"), + }, + true, + false, + }, + "invalid-hashfunction": { + Metadata{ + Iterations: 1, + KeyLength: 1, + HashFunction: "sha0", + Salt: []byte("Hello world!"), + }, + true, + false, + }, + "no-salt": { + Metadata{ + Iterations: 1, + KeyLength: 1, + HashFunction: SHA256HashFunctionName, + Salt: []byte{}, + }, + false, + true, + }, + } { + t.Run(name, func(t *testing.T) { + if present := tc.meta.isPresent(); present != tc.present { + t.Fatalf("incorrect value for 'present': %t", present) + } + if err := tc.meta.validate(); (err == nil) != tc.valid { + t.Fatalf("incorrect return value from 'validate': %v", err) + } + }) + } +} diff --git a/pkg/encryption/keyprovider/pbkdf2/provider.go b/pkg/encryption/keyprovider/pbkdf2/provider.go new file mode 100644 index 00000000000..3d3baafd4d2 --- /dev/null +++ b/pkg/encryption/keyprovider/pbkdf2/provider.go @@ -0,0 +1,80 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package pbkdf2 contains a key provider that takes a passphrase and emits a PBKDF2 hash of the configured length. +package pbkdf2 + +import ( + "fmt" + "io" + + "github.com/kubegems/opentofu/pkg/encryption/keyprovider" + + goPBKDF2 "golang.org/x/crypto/pbkdf2" +) + +type pbkdf2KeyProvider struct { + Config +} + +func (p pbkdf2KeyProvider) generateMetadata() (*Metadata, error) { + // Build outMeta based on current configuration + outMeta := &Metadata{ + Iterations: p.Iterations, + HashFunction: p.HashFunction, + Salt: make([]byte, p.SaltLength), + KeyLength: p.KeyLength, + } + // Generate new salt + if _, err := io.ReadFull(p.randomSource, outMeta.Salt); err != nil { + return nil, &keyprovider.ErrKeyProviderFailure{ + Message: fmt.Sprintf("failed to obtain %d bytes of random data", p.SaltLength), + Cause: err, + } + } + return outMeta, nil +} + +func (p pbkdf2KeyProvider) Provide(rawMeta keyprovider.KeyMeta) (keyprovider.Output, keyprovider.KeyMeta, error) { + if rawMeta == nil { + return keyprovider.Output{}, nil, &keyprovider.ErrInvalidMetadata{Message: "bug: no metadata struct provided"} + } + inMeta, ok := rawMeta.(*Metadata) + if !ok { + return keyprovider.Output{}, nil, &keyprovider.ErrInvalidMetadata{ + Message: fmt.Sprintf("bug: incorrect metadata type of %T provided", rawMeta), + } + } + + outMeta, err := p.generateMetadata() + if err != nil { + return keyprovider.Output{}, nil, err + } + + var decryptionKey []byte + if inMeta.isPresent() { + if err := inMeta.validate(); err != nil { + return keyprovider.Output{}, nil, err + } + decryptionKey = goPBKDF2.Key( + []byte(p.Passphrase), + inMeta.Salt, + inMeta.Iterations, + inMeta.KeyLength, + inMeta.HashFunction.Function(), + ) + } + + return keyprovider.Output{ + EncryptionKey: goPBKDF2.Key( + []byte(p.Passphrase), + outMeta.Salt, + outMeta.Iterations, + outMeta.KeyLength, + outMeta.HashFunction.Function(), + ), + DecryptionKey: decryptionKey, + }, outMeta, nil +} diff --git a/pkg/encryption/keyprovider/pbkdf2/provider_test.go b/pkg/encryption/keyprovider/pbkdf2/provider_test.go new file mode 100644 index 00000000000..44ce263a619 --- /dev/null +++ b/pkg/encryption/keyprovider/pbkdf2/provider_test.go @@ -0,0 +1,97 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pbkdf2 + +import ( + "bytes" + "crypto/rand" + "io" + "testing" +) + +func TestPbkdf2KeyProvider_generateMetadata(t *testing.T) { + provider := pbkdf2KeyProvider{ + Config{ + randomSource: testRandomSource{t}, + Passphrase: "Hello world!", + KeyLength: 32, + Iterations: MinimumIterations, + HashFunction: SHA256HashFunctionName, + SaltLength: 12, + }, + } + metadata, err := provider.generateMetadata() + if err != nil { + t.Fatalf("%v", err) + } + + if len(metadata.Salt) != 12 { + t.Fatalf("Invalid generated salt length: %d", len(metadata.Salt)) + } + // This is read from the random source, which is the test function name in this case. + // Note: this relies on the internal behavior of generateMetadata, but it's a non-exported + // function, so in this case that's acceptable. + if !bytes.Equal(metadata.Salt, []byte("TestPbkdf2Ke")) { + t.Fatalf("Invalid generated salt: %s", metadata.Salt) + } + + if metadata.KeyLength != 32 { + t.Fatalf("Invalid key length: %d", metadata.KeyLength) + } + if metadata.Iterations != MinimumIterations { + t.Fatalf("Invalid iterations: %d", metadata.Iterations) + } + if metadata.HashFunction != SHA256HashFunctionName { + t.Fatalf("Invalid hash function name: %s", SHA256HashFunctionName) + } +} + +type badReader struct{} + +func (b badReader) Read(target []byte) (int, error) { + return 0, io.EOF +} + +func TestBadReader(t *testing.T) { + provider := pbkdf2KeyProvider{ + Config{ + randomSource: badReader{}, + Passphrase: "Hello world!", + KeyLength: 32, + Iterations: MinimumIterations, + HashFunction: SHA256HashFunctionName, + SaltLength: 12, + }, + } + + if _, err := provider.generateMetadata(); err == nil { + t.Fatalf("expected error") + } + + if _, _, err := provider.Provide(&Metadata{}); err == nil { + t.Fatalf("expected error") + } +} + +func TestKeyLength(t *testing.T) { + provider := pbkdf2KeyProvider{ + Config{ + randomSource: rand.Reader, + Passphrase: "Hello world!", + KeyLength: 128, + Iterations: MinimumIterations, + HashFunction: SHA256HashFunctionName, + SaltLength: 12, + }, + } + keys, _, err := provider.Provide(&Metadata{}) + if err != nil { + t.Fatalf("%v", err) + } + if length := len(keys.EncryptionKey); length != 128 { + t.Fatalf("incorrect key length: %d", length) + } +} diff --git a/pkg/encryption/keyprovider/pbkdf2/random_source.go b/pkg/encryption/keyprovider/pbkdf2/random_source.go new file mode 100644 index 00000000000..d55de239c96 --- /dev/null +++ b/pkg/encryption/keyprovider/pbkdf2/random_source.go @@ -0,0 +1,21 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pbkdf2 + +import "testing" + +// testRandomSource is a predictable reader that outputs the test name as a source of randomness. +type testRandomSource struct { + t *testing.T +} + +func (t testRandomSource) Read(target []byte) (int, error) { + name := t.t.Name() + for i := 0; i < len(target); i++ { + target[i] = name[i%len(name)] + } + return len(target), nil +} diff --git a/pkg/encryption/keyprovider/static/README.md b/pkg/encryption/keyprovider/static/README.md new file mode 100644 index 00000000000..e61d47b5483 --- /dev/null +++ b/pkg/encryption/keyprovider/static/README.md @@ -0,0 +1,9 @@ +# Example static key provider + +> [!WARNING] +> This file is not an end-user documentation, it is intended for developers. Please follow the user documentation on the OpenTofu website unless you want to work on the encryption code. + +> [!WARNING] +> This provider is not intended for production use and merely serves as a simple example! + +This folder contains a key provider that accepts a static, hex-encoded key. Its only purpose is to serve as a provider for tests and as a demonstration on implementing a key provider. \ No newline at end of file diff --git a/pkg/encryption/keyprovider/static/config.go b/pkg/encryption/keyprovider/static/config.go new file mode 100644 index 00000000000..9d662a1f73a --- /dev/null +++ b/pkg/encryption/keyprovider/static/config.go @@ -0,0 +1,37 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package static + +import ( + "encoding/hex" + + "github.com/kubegems/opentofu/pkg/encryption/keyprovider" +) + +// Config contains the configuration for this key provider supplied by the user. This struct must have hcl tags in order +// to function. +type Config struct { + Key string `hcl:"key"` +} + +// Build will create the usable key provider. +func (c Config) Build() (keyprovider.KeyProvider, keyprovider.KeyMeta, error) { + if c.Key == "" { + return nil, nil, &keyprovider.ErrInvalidConfiguration{ + Message: "Missing key", + } + } + + decodedData, err := hex.DecodeString(c.Key) + if err != nil { + return nil, nil, &keyprovider.ErrInvalidConfiguration{ + Message: "failed to hex-decode the provided key", + Cause: err, + } + } + + return &staticKeyProvider{decodedData}, new(Metadata), nil +} diff --git a/pkg/encryption/keyprovider/static/config_test.go b/pkg/encryption/keyprovider/static/config_test.go new file mode 100644 index 00000000000..c11990dbcd2 --- /dev/null +++ b/pkg/encryption/keyprovider/static/config_test.go @@ -0,0 +1,65 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package static_test + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2/gohcl" + + config2 "github.com/kubegems/opentofu/pkg/encryption/config" + "github.com/kubegems/opentofu/pkg/encryption/keyprovider/static" +) + +var exampleConfig = `key_provider "static" "foo" { + key = "6f6f706830656f67686f6834616872756f3751756165686565796f6f72653169" +} +` + +// This example is a bare-bones configuration for a static key provider. +// It is mainly intended to demonstrate how you can use parse configuration +// and construct a static key provider from in. +// And is not intended to be used as a real-world example. +func ExampleConfig() { + staticConfig := static.New().ConfigStruct() + + // Parse the config: + parsedConfig, diags := config2.LoadConfigFromString("config.hcl", exampleConfig) + if diags.HasErrors() { + panic(diags) + } + + if len(parsedConfig.KeyProviderConfigs) != 1 { + panic("Expected 1 key provider") + } + // Grab the KeyProvider from the parsed config: + keyProvider := parsedConfig.KeyProviderConfigs[0] + + // assert the Type is "static" and the Name is "foo" + if keyProvider.Type != "static" { + panic("Expected key provider type to be 'static'") + } + if keyProvider.Name != "foo" { + panic("Expected key provider name to be 'foo'") + } + + // Use gohcl to parse the hcl block from parsedConfig into the static configuration struct + // This is not the intended path, and it should be handled by the implementation of the Encryption + // interface. + // + // This is just an example of how to use the static configuration struct, and this is how testing + // may be carried out. + if err := gohcl.DecodeBody(parsedConfig.KeyProviderConfigs[0].Body, nil, staticConfig); err != nil { + panic(err) + } + + // Cast the static configuration struct to a static.Config so that we can assert against the key + // value + s := staticConfig.(*static.Config) + + fmt.Printf("%s\n", s.Key) + // Output: 6f6f706830656f67686f6834616872756f3751756165686565796f6f72653169 +} diff --git a/pkg/encryption/keyprovider/static/descriptor.go b/pkg/encryption/keyprovider/static/descriptor.go new file mode 100644 index 00000000000..10cc4c1f3c3 --- /dev/null +++ b/pkg/encryption/keyprovider/static/descriptor.go @@ -0,0 +1,30 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package static + +import ( + "github.com/kubegems/opentofu/pkg/encryption/keyprovider" +) + +func New() Descriptor { + return &descriptor{} +} + +// Descriptor is an additional interface to allow for providing custom methods. +type Descriptor interface { + keyprovider.Descriptor +} + +type descriptor struct { +} + +func (f descriptor) ID() keyprovider.ID { + return "static" +} + +func (f descriptor) ConfigStruct() keyprovider.Config { + return &Config{} +} diff --git a/pkg/encryption/keyprovider/static/example_test.go b/pkg/encryption/keyprovider/static/example_test.go new file mode 100644 index 00000000000..43e7ada179f --- /dev/null +++ b/pkg/encryption/keyprovider/static/example_test.go @@ -0,0 +1,70 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package static_test + +import ( + "fmt" + "strings" + + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/encryption/config" + "github.com/kubegems/opentofu/pkg/encryption/keyprovider/static" + "github.com/kubegems/opentofu/pkg/encryption/method/aesgcm" + "github.com/kubegems/opentofu/pkg/encryption/registry/lockingencryptionregistry" +) + +var hclConfig = `key_provider "static" "foo" { + key = "6f6f706830656f67686f6834616872756f3751756165686565796f6f72653169" +} + +method "aes_gcm" "bar" { + keys = key_provider.static.foo +} + +plan { + method = method.aes_gcm.bar +} +` + +// Example is a full end-to-end example of encrypting and decrypting a plan file. +func Example() { + registry := lockingencryptionregistry.New() + if err := registry.RegisterKeyProvider(static.New()); err != nil { + panic(err) + } + if err := registry.RegisterMethod(aesgcm.New()); err != nil { + panic(err) + } + + cfg, diags := config.LoadConfigFromString("test.hcl", hclConfig) + if diags.HasErrors() { + panic(diags) + } + + staticEvaluator := configs.NewStaticEvaluator(nil, configs.RootModuleCallForTesting()) + + enc, diags := encryption.New(registry, cfg, staticEvaluator) + if diags.HasErrors() { + panic(diags) + } + + encryptor := enc.Plan() + + encryptedPlan, err := encryptor.EncryptPlan([]byte("Hello world!")) + if err != nil { + panic(err) + } + if strings.Contains(string(encryptedPlan), "Hello world!") { + panic("The plan was not encrypted!") + } + decryptedPlan, err := encryptor.DecryptPlan(encryptedPlan) + if err != nil { + panic(err) + } + fmt.Printf("%s", decryptedPlan) + // Output: Hello world! +} diff --git a/pkg/encryption/keyprovider/static/meta.go b/pkg/encryption/keyprovider/static/meta.go new file mode 100644 index 00000000000..678da36f740 --- /dev/null +++ b/pkg/encryption/keyprovider/static/meta.go @@ -0,0 +1,10 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package static + +type Metadata struct { + Magic string `json:"magic"` +} diff --git a/pkg/encryption/keyprovider/static/provider.go b/pkg/encryption/keyprovider/static/provider.go new file mode 100644 index 00000000000..34e38d234f8 --- /dev/null +++ b/pkg/encryption/keyprovider/static/provider.go @@ -0,0 +1,52 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package static contains a key provider that emits a static key. +package static + +import ( + "fmt" + + "github.com/kubegems/opentofu/pkg/encryption/keyprovider" +) + +type staticKeyProvider struct { + key []byte +} + +const magic = "Hello world!" + +func (p staticKeyProvider) Provide(meta keyprovider.KeyMeta) (keyprovider.Output, keyprovider.KeyMeta, error) { + // Note: this is a demonstration how you can handle metadata. Using a magic string does not make any sense, + // but it illustrates well how you can store and retrieve metadata. We wish we could use generics to + // save you the trouble of doing a type assertion, but Go does not have sufficiently advanced enough generics + // to do that. + if meta == nil { + return keyprovider.Output{}, nil, &keyprovider.ErrInvalidMetadata{ + Message: "bug: nil provided as metadata", + } + } + typedMeta, ok := meta.(*Metadata) + if !ok { + return keyprovider.Output{}, nil, &keyprovider.ErrInvalidMetadata{ + Message: fmt.Sprintf("bug: invalid metadata type received: %T", meta), + } + } + // Note: the Magic may be empty if OpenTofu isn't decrypting anything, make sure to account for that possibility. + var decryptionKey []byte + if typedMeta.Magic != "" { + decryptionKey = p.key + if typedMeta.Magic != magic { + return keyprovider.Output{}, nil, &keyprovider.ErrInvalidMetadata{ + Message: fmt.Sprintf("corrupted data received, no or invalid magic string: %s", typedMeta.Magic), + } + } + } + + return keyprovider.Output{ + EncryptionKey: p.key, + DecryptionKey: decryptionKey, + }, &Metadata{Magic: magic}, nil +} diff --git a/pkg/encryption/keyprovider/static/provider_test.go b/pkg/encryption/keyprovider/static/provider_test.go new file mode 100644 index 00000000000..ff97c7d3c70 --- /dev/null +++ b/pkg/encryption/keyprovider/static/provider_test.go @@ -0,0 +1,117 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package static + +import ( + "bytes" + "fmt" + "testing" + + "github.com/kubegems/opentofu/pkg/encryption/keyprovider/compliancetest" + + "github.com/kubegems/opentofu/pkg/encryption/keyprovider" +) + +func TestKeyProvider(t *testing.T) { + compliancetest.ComplianceTest( + t, + compliancetest.TestConfiguration[*descriptor, *Config, *Metadata, *staticKeyProvider]{ + Descriptor: New().(*descriptor), + HCLParseTestCases: map[string]compliancetest.HCLParseTestCase[*Config, *staticKeyProvider]{ + "success": { + HCL: `key_provider "static" "foo" { + key = "48656c6c6f20776f726c6421" +}`, + ValidHCL: true, + ValidBuild: true, + Validate: func(config *Config, keyProvider *staticKeyProvider) error { + if config.Key != "48656c6c6f20776f726c6421" { + return fmt.Errorf("incorrect key returned") + } + if !bytes.Equal(keyProvider.key, []byte("Hello world!")) { + return fmt.Errorf("key provider contains invalid key") + } + return nil + }, + }, + "empty": { + HCL: `key_provider "static" "foo" {}`, + ValidHCL: false, + ValidBuild: false, + }, + "bad-hex": { + HCL: `key_provider "static" "foo" { + key = "G" +}`, + ValidHCL: true, + ValidBuild: false, + }, + "bad-argument": { + HCL: `key_provider "static" "foo" { + keys = "48656c6c6f20776f726c6421" # Note the incorrect key name +}`, + ValidHCL: false, + ValidBuild: false, + }, + }, + ConfigStructTestCases: map[string]compliancetest.ConfigStructTestCase[*Config, *staticKeyProvider]{ + "empty": { + Config: &Config{ + Key: "", + }, + ValidBuild: false, + Validate: nil, + }, + }, + MetadataStructTestCases: map[string]compliancetest.MetadataStructTestCase[*Config, *Metadata]{ + "empty": { + ValidConfig: &Config{ + Key: "48656c6c6f20776f726c6421", + }, + Meta: &Metadata{}, + IsPresent: false, + IsValid: false, + }, + "invalid": { + ValidConfig: &Config{ + Key: "48656c6c6f20776f726c6421", + }, + Meta: &Metadata{ + Magic: "Invalid", + }, + IsPresent: true, + IsValid: false, + }, + "valid": { + ValidConfig: &Config{ + Key: "48656c6c6f20776f726c6421", + }, + Meta: &Metadata{ + Magic: "Hello world!", + }, + IsPresent: true, + IsValid: true, + }, + }, + ProvideTestCase: compliancetest.ProvideTestCase[*Config, *Metadata]{ + ValidConfig: &Config{ + Key: "48656c6c6f20776f726c6421", + }, + ExpectedOutput: &keyprovider.Output{ + EncryptionKey: []byte("Hello world!"), // "48656c6c6f20776f726c6421" in hex is "Hello world!" + DecryptionKey: []byte("Hello world!"), + }, + ValidateKeys: nil, + ValidateMetadata: func(meta *Metadata) error { + if meta.Magic != "Hello world!" { + return fmt.Errorf("incorrect output magic: %s", meta.Magic) + } + return nil + }, + }, + }, + ) +} diff --git a/pkg/encryption/keyprovider/validation.go b/pkg/encryption/keyprovider/validation.go new file mode 100644 index 00000000000..62e80794674 --- /dev/null +++ b/pkg/encryption/keyprovider/validation.go @@ -0,0 +1,13 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package keyprovider + +import "regexp" + +// TODO is there a generalized way to regexp-check names? +var addrRe = regexp.MustCompile(`^key_provider\.([a-zA-Z_0-9-]+)\.([a-zA-Z_0-9-]+)$`) +var nameRe = regexp.MustCompile("^([a-zA-Z_0-9-]+)$") +var idRe = regexp.MustCompile("^([a-zA-Z_0-9-]+)$") diff --git a/pkg/encryption/method/README.md b/pkg/encryption/method/README.md new file mode 100644 index 00000000000..9033ef0e755 --- /dev/null +++ b/pkg/encryption/method/README.md @@ -0,0 +1,30 @@ +# Encryption methods + +> [!WARNING] +> This file is not an end-user documentation, it is intended for developers. Please follow the user documentation on the OpenTofu website unless you want to work on the encryption code. + +This folder contains the implementations for the encryption methods used in OpenTofu. Encryption methods determine how exactly data is encrypted, but they do not determine what exactly is encrypted. + +## Implementing a method + +When you implement a method, take a look at the [aesgcm](aesgcm) method as a template. + +### Testing your method (do this first!) + +Before you even go about writing a method, please set up the compliance tests. You can create a single test case that calls `compliancetest.ComplianceTest`. This test suite will run your key provider through all important compliance tests and will make sure that you are not missing anything during the implementation. + +### Implementing the descriptor + +The descriptor is very simple, you need to implement the [`Descriptor`](descriptor.go) interface in a type. (It does not have to be a struct.) However, make sure that the `ConfigStruct` always returns a struct with `hcl` tags on it. For more information on the `hcl` tags, see the [gohcl documentation](https://godocs.io/github.com/hashicorp/hcl/v2/gohcl). + +### The config struct + +Next, you need to create a config structure. This structure should hold all the fields you expect a user to fill out. **This must be a struct, and you must add `hcl` tags to each field you expect the user to fill out.** + +If the config structure needs input from key providers, it should declare one HCL-tagged field with the type of [`keyprovider.Output`](../keyprovider/output.go) to receive the encryption and decryption key. Note, that the decryption key is not always available. + +Additionally, you must implement the `Build` function described in the [`Config` interface](config.go). You can take a look at [aesgcm/config.go](static/config.go) for an example on implementing this. + +### The method + +The heart of your method is... well, your method. It has the `Encrypt()` and `Decrypt()` methods, which should perform the named tasks. If no decryption key is available, the method should refuse to decrypt data. The method should under no circumstances pass through unencrypted data if it fails to decrypt the data. diff --git a/pkg/encryption/method/addr.go b/pkg/encryption/method/addr.go new file mode 100644 index 00000000000..b755e1f0088 --- /dev/null +++ b/pkg/encryption/method/addr.go @@ -0,0 +1,74 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package method + +import ( + "fmt" + "regexp" + + "github.com/hashicorp/hcl/v2" +) + +// TODO is there a generalized way to regexp-check names? +var addrRe = regexp.MustCompile(`^method\.([a-zA-Z_0-9-]+)\.([a-zA-Z_0-9-]+)$`) +var nameRe = regexp.MustCompile("^([a-zA-Z_0-9-]+)$") +var idRe = regexp.MustCompile("^([a-zA-Z_0-9-]+)$") + +// Addr is a type-alias for method address strings that identify a specific encryption method configuration. +// The Addr is an opaque value. Do not perform string manipulation on it outside the functions supplied by the +// method package. +type Addr string + +// Validate validates the Addr for formal naming conformance, but does not check if the referenced method actually +// exists in the configuration. +func (a Addr) Validate() hcl.Diagnostics { + if !addrRe.MatchString(string(a)) { + return hcl.Diagnostics{ + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid encryption method address", + Detail: fmt.Sprintf( + "The supplied encryption method address does not match the required form of %s", + addrRe.String(), + ), + }, + } + } + return nil +} + +// NewAddr creates a new Addr type from the provider and name supplied. The Addr is a type-alias for encryption method +// address strings that identify a specific encryption method configuration. You should treat the value as opaque and +// not perform string manipulation on it outside the functions supplied by the method package. +func NewAddr(method string, name string) (addr Addr, err hcl.Diagnostics) { + if !nameRe.MatchString(method) { + err = err.Append( + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "The provided encryption method type is invalid", + Detail: fmt.Sprintf( + "The supplied encryption method type (%s) does not match the required form of %s.", + method, + nameRe.String(), + ), + }, + ) + } + if !nameRe.MatchString(name) { + err = err.Append( + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "The provided encryption method name is invalid", + Detail: fmt.Sprintf( + "The supplied encryption method name (%s) does not match the required form of %s.", + name, + nameRe.String(), + ), + }, + ) + } + return Addr(fmt.Sprintf("method.%s.%s", method, name)), err +} diff --git a/pkg/encryption/method/aesgcm/README.md b/pkg/encryption/method/aesgcm/README.md new file mode 100644 index 00000000000..474ee5aa6c0 --- /dev/null +++ b/pkg/encryption/method/aesgcm/README.md @@ -0,0 +1,47 @@ +# AES-GCM encryption method + +> [!WARNING] +> This file is not an end-user documentation, it is intended for developers. Please follow the user documentation on the OpenTofu website unless you want to work on the encryption code. + +This folder contains the state encryption implementation of the AES-GCM encryption method. This is implemented following the guidance of the following document: ([NIST SP 800-38D](https://csrc.nist.gov/pubs/sp/800/38/d/final)). + +## Configuration + +You can configure the encryption by specifying the following method block: + +```hcl2 +terraform { + encryption { + method "aes_gcm" "mymethod" { + # Pass the key provider with a 16, 24, or 32 byte encryption key here: + keys = key_provider.someprovider.somename + + # Leave the AAD empty unless needed. Pass as a list of bytes if needed: + aad = [1,2,3,4,...] + } + } +} +``` + +| Field | Description | +|---------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `keys` (*required*) | Encryption and decryption key in the standard output structure of the key providers (`{"encryption_key":[]byte, "decryption_key":[]byte}`). | +| `aad` | Additional Authenticated Data. This data is stored along the encrypted form and authenticated. The AAD value of the encrypted form must match the configuration, otherwise the decryption fails. | + +## Key exhaustion + +AES-GCM keys have a limited lifetime of `2^32` blocks, equaling roughly 64 GB of data that can be encrypted before the keys should be considered compromised. The end-user documentation of this method should guide users to use either a key-derivation function, such as PBKDF2 or Argon2 with a sufficiently long passphrase, or a key management system that can automatically rotate the keys. + +## Encryption vs. Authentication + +The AES-GCM implementation protects data at rest from being accessed. It does not, however, protect against malicious actors reusing old data (replay attacks) to compromise the integrity of the system. Users with the need for payload authentication should rotate their key and/or AAD frequently to ensure that old data cannot be used in this manner. + +## Implementation notes + +### Additional Authenticated Data (AAD) + +The AAD in AES-GCM is a general-purpose authenticated, but not encrypted field in the encrypted payload. The Go implementation only supports using this field as a canary value, rejecting decryption if the value mismatches. AES-GCM would support using this field as a means to store data. Since Go does not support it, neither do we. + +### Panics + +The current Go implementation of AES-GCM uses `panic()` to handle some input errors. \ No newline at end of file diff --git a/pkg/encryption/method/aesgcm/aesgcm.go b/pkg/encryption/method/aesgcm/aesgcm.go new file mode 100644 index 00000000000..133d78452eb --- /dev/null +++ b/pkg/encryption/method/aesgcm/aesgcm.go @@ -0,0 +1,130 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package aesgcm + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "errors" + + "github.com/kubegems/opentofu/pkg/encryption/method" +) + +// aesgcm contains the encryption/decryption methods according to AES-GCM (NIST SP 800-38D). +type aesgcm struct { + encryptionKey []byte + decryptionKey []byte + aad []byte +} + +// Encrypt encrypts the passed data with AES-GCM. If the data the encryption fails, it returns an error. +func (a aesgcm) Encrypt(data []byte) ([]byte, error) { + result, err := handlePanic( + func() ([]byte, error) { + gcm, err := a.getGCM(a.encryptionKey) + if err != nil { + return nil, &method.ErrEncryptionFailed{Cause: err} + } + + nonce := make([]byte, gcm.NonceSize()) + if _, err := rand.Read(nonce); err != nil { + return nil, &method.ErrEncryptionFailed{Cause: &method.ErrCryptoFailure{ + Message: "could not generate nonce", + Cause: err, + }} + } + + encrypted := gcm.Seal(nil, nonce, data, a.aad) + + return append(nonce, encrypted...), nil + }, + ) + if err != nil { + var encryptionFailed *method.ErrEncryptionFailed + if errors.As(err, &encryptionFailed) { + return nil, err + } + return nil, &method.ErrEncryptionFailed{Cause: &method.ErrCryptoFailure{Message: "unexpected error", Cause: err}} + } + return result, nil +} + +// Decrypt decrypts an AES-GCM-encrypted data set. If the data set fails decryption, it returns an error. +func (a aesgcm) Decrypt(data []byte) ([]byte, error) { + if len(a.decryptionKey) == 0 { + return nil, &method.ErrDecryptionKeyUnavailable{} + } + result, err := handlePanic( + func() ([]byte, error) { + if len(data) == 0 { + return nil, &method.ErrDecryptionFailed{ + Cause: method.ErrCryptoFailure{ + Message: "cannot decrypt empty data", + Cause: nil, + }, + } + } + + gcm, err := a.getGCM(a.decryptionKey) + if err != nil { + return nil, &method.ErrDecryptionFailed{Cause: err} + } + + if len(data) < gcm.NonceSize() { + return nil, &method.ErrDecryptionFailed{ + Cause: method.ErrCryptoFailure{ + Message: "cannot decrypt data because it is too small (likely data corruption)", + Cause: nil, + }, + } + } + + nonce := data[:gcm.NonceSize()] + data = data[gcm.NonceSize():] + + decrypted, err := gcm.Open(nil, nonce, data, a.aad) + if err != nil { + return nil, &method.ErrDecryptionFailed{Cause: err} + } + return decrypted, nil + }, + ) + if err != nil { + var decryptionFailed *method.ErrDecryptionFailed + if errors.As(err, &decryptionFailed) { + return nil, err + } + return nil, &method.ErrDecryptionFailed{ + Cause: &method.ErrCryptoFailure{Message: "unexpected error", Cause: err}, + } + } + return result, nil +} + +func (a aesgcm) getGCM(key []byte) (cipher.AEAD, error) { + cipherBlock, err := aes.NewCipher(key) + if err != nil { + return nil, &method.ErrCryptoFailure{ + Message: "failed to create AES cypher block", + Cause: err, + } + } + + gcm, err := cipher.NewGCM(cipherBlock) + if err != nil { + return nil, &method.ErrCryptoFailure{ + Message: "failed to create AES GCM", + Cause: err, + } + } + return gcm, nil +} + +func Is(m method.Method) bool { + _, ok := m.(*aesgcm) + return ok +} diff --git a/pkg/encryption/method/aesgcm/aesgcm_internal_test.go b/pkg/encryption/method/aesgcm/aesgcm_internal_test.go new file mode 100644 index 00000000000..686bd326cef --- /dev/null +++ b/pkg/encryption/method/aesgcm/aesgcm_internal_test.go @@ -0,0 +1,63 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package aesgcm + +import ( + "testing" +) + +type testCase struct { + aes *aesgcm + error bool +} + +func TestInternalErrorHandling(t *testing.T) { + testCases := map[string]testCase{ + "ok": { + &aesgcm{ + encryptionKey: []byte("aeshi1quahb2Rua0ooquaiwahbonedoh"), + decryptionKey: []byte("aeshi1quahb2Rua0ooquaiwahbonedoh"), + }, + false, + }, + "no-key": { + &aesgcm{}, + true, + }, + "bad-key-length": { + &aesgcm{ + encryptionKey: []byte("Hello world!"), + decryptionKey: []byte("Hello world!"), + }, + true, + }, + } + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + encrypted, err := tc.aes.Encrypt([]byte("Hello world!")) + if tc.error && err == nil { + t.Fatalf("Expected error, none returned.") + } else if !tc.error && err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if !tc.error { + decrypted, err := tc.aes.Decrypt(encrypted) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if string(decrypted) != "Hello world!" { + t.Fatalf("Incorrect decrypted string: %s", decrypted) + } + } else { + // Test error handling on the decrypt side as best as we can: + _, err := tc.aes.Decrypt([]byte("Hello world!")) + if err == nil { + t.Fatalf("Expected error, none returned.") + } + } + }) + } +} diff --git a/pkg/encryption/method/aesgcm/aesgcm_test.go b/pkg/encryption/method/aesgcm/aesgcm_test.go new file mode 100644 index 00000000000..5a4ab70a413 --- /dev/null +++ b/pkg/encryption/method/aesgcm/aesgcm_test.go @@ -0,0 +1,98 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package aesgcm_test + +import ( + "errors" + "testing" + + "github.com/kubegems/opentofu/pkg/encryption/keyprovider" + + "github.com/kubegems/opentofu/pkg/encryption/method" + "github.com/kubegems/opentofu/pkg/encryption/method/aesgcm" +) + +var config = &aesgcm.Config{ + Keys: keyprovider.Output{ + EncryptionKey: []byte("aeshi1quahb2Rua0ooquaiwahbonedoh"), + DecryptionKey: []byte("aeshi1quahb2Rua0ooquaiwahbonedoh"), + }, +} + +func TestDecryptEmptyData(t *testing.T) { + m, err := config.Build() + if err != nil { + t.Fatalf("unexpected error (%v)", err) + } + + _, err = m.Decrypt(nil) + if err == nil { + t.Fatalf("Expected error, none returned.") + } + + var e *method.ErrDecryptionFailed + if !errors.As(err, &e) { + t.Fatalf("Incorrect error type returned: %T (%v)", err, err) + } +} + +func TestDecryptShortData(t *testing.T) { + m, err := config.Build() + if err != nil { + t.Fatalf("unexpected error (%v)", err) + } + + // Passing a non-empty, but shorter-than-nonce data + _, err = m.Decrypt([]byte("1")) + if err == nil { + t.Fatalf("Expected error, none returned.") + } + + var e *method.ErrDecryptionFailed + if !errors.As(err, &e) { + t.Fatalf("Incorrect error type returned: %T (%v)", err, err) + } +} + +func TestDecryptInvalidData(t *testing.T) { + m, err := config.Build() + if err != nil { + t.Fatalf("unexpected error (%v)", err) + } + + // Passing a non-empty, but shorter-than-nonce data + _, err = m.Decrypt([]byte("abcdefghijklmnopqrstuvwxyz")) + if err == nil { + t.Fatalf("Expected error, none returned.") + } + + var e *method.ErrDecryptionFailed + if !errors.As(err, &e) { + t.Fatalf("Incorrect error type returned: %T (%v)", err, err) + } +} + +func TestDecryptCorruptData(t *testing.T) { + m, err := config.Build() + if err != nil { + t.Fatalf("unexpected error (%v)", err) + } + + encrypted, err := m.Encrypt([]byte("Hello world!")) + if err != nil { + t.Fatalf("unexpected error (%v)", err) + } + + encrypted = encrypted[:len(encrypted)-1] + decrypted, err := m.Decrypt(encrypted) + if err == nil { + t.Fatalf("Expected error, got: %v", decrypted) + } + var e *method.ErrDecryptionFailed + if !errors.As(err, &e) { + t.Fatalf("Incorrect error type returned: %T (%v)", err, err) + } +} diff --git a/pkg/encryption/method/aesgcm/compliance_test.go b/pkg/encryption/method/aesgcm/compliance_test.go new file mode 100644 index 00000000000..0c9cef464a3 --- /dev/null +++ b/pkg/encryption/method/aesgcm/compliance_test.go @@ -0,0 +1,197 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package aesgcm + +import ( + "bytes" + "fmt" + "testing" + + "github.com/kubegems/opentofu/pkg/encryption/keyprovider" + "github.com/kubegems/opentofu/pkg/encryption/method/compliancetest" +) + +func TestCompliance(t *testing.T) { + compliancetest.ComplianceTest(t, compliancetest.TestConfiguration[*descriptor, *Config, *aesgcm]{ + Descriptor: New().(*descriptor), + HCLParseTestCases: map[string]compliancetest.HCLParseTestCase[*descriptor, *Config, *aesgcm]{ + "empty": { + HCL: `method "aes_gcm" "foo" {}`, + ValidHCL: false, + ValidBuild: false, + Validate: nil, + }, + "empty_keys": { + HCL: `method "aes_gcm" "foo" { + keys = { + encryption_key = [] + decryption_key = [] + } + }`, + ValidHCL: true, + ValidBuild: false, + Validate: nil, + }, + "short-keys": { + HCL: `method "aes_gcm" "foo" { + keys = { + encryption_key = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] + decryption_key = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] + } + }`, + ValidHCL: true, + ValidBuild: false, + Validate: nil, + }, + "short-decryption-key": { + HCL: `method "aes_gcm" "foo" { + keys = { + encryption_key = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16] + decryption_key = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] + } + }`, + ValidHCL: true, + ValidBuild: false, + Validate: nil, + }, + "short-encryption-key": { + HCL: `method "aes_gcm" "foo" { + keys = { + encryption_key = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] + decryption_key = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16] + } + }`, + ValidHCL: true, + ValidBuild: false, + Validate: nil, + }, + "only-decryption-key": { + HCL: `method "aes_gcm" "foo" { + keys = { + encryption_key = [] + decryption_key = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16] + } + }`, + ValidHCL: true, + ValidBuild: false, + }, + "only-encryption-key": { + HCL: `method "aes_gcm" "foo" { + keys = { + encryption_key = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16] + decryption_key = [] + } + }`, + ValidHCL: true, + ValidBuild: true, + Validate: func(config *Config, method *aesgcm) error { + if len(config.Keys.DecryptionKey) > 0 { + return fmt.Errorf("decryption key found in config despite no decryption key being provided") + } + if len(method.decryptionKey) > 0 { + return fmt.Errorf("decryption key found in method despite no decryption key being provided") + } + if !bytes.Equal(config.Keys.EncryptionKey, []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}) { + return fmt.Errorf("incorrect encryption key found after HCL parsing in config") + } + if !bytes.Equal(method.encryptionKey, []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}) { + return fmt.Errorf("incorrect encryption key found after HCL parsing in config") + } + return nil + }, + }, + "encryption-decryption-key": { + HCL: `method "aes_gcm" "foo" { + keys = { + encryption_key = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16] + decryption_key = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16] + } + }`, + ValidHCL: true, + ValidBuild: true, + Validate: func(config *Config, method *aesgcm) error { + if !bytes.Equal(config.Keys.DecryptionKey, []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}) { + return fmt.Errorf("incorrect decryption key found after HCL parsing in config") + } + if !bytes.Equal(method.decryptionKey, []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}) { + return fmt.Errorf("incorrect decryption key found after HCL parsing in config") + } + + if !bytes.Equal(config.Keys.EncryptionKey, []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}) { + return fmt.Errorf("incorrect encryption key found after HCL parsing in config") + } + if !bytes.Equal(method.encryptionKey, []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}) { + return fmt.Errorf("incorrect encryption key found after HCL parsing in config") + } + return nil + }, + }, + "no-aad": { + HCL: `method "aes_gcm" "foo" { + keys = { + encryption_key = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16] + decryption_key = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16] + } + }`, + ValidHCL: true, + ValidBuild: true, + Validate: func(config *Config, method *aesgcm) error { + if len(config.AAD) != 0 { + return fmt.Errorf("invalid AAD in config after HCL parsing") + } + if len(method.aad) != 0 { + return fmt.Errorf("invalid AAD in method after Build()") + } + return nil + }, + }, + "aad": { + HCL: `method "aes_gcm" "foo" { + keys = { + encryption_key = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16] + decryption_key = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16] + } + aad = [1,2,3,4] + }`, + ValidHCL: true, + ValidBuild: true, + Validate: func(config *Config, method *aesgcm) error { + if !bytes.Equal(config.AAD, []byte{1, 2, 3, 4}) { + return fmt.Errorf("invalid AAD in config after HCL parsing") + } + if !bytes.Equal(method.aad, []byte{1, 2, 3, 4}) { + return fmt.Errorf("invalid AAD in method after Build()") + } + return nil + }, + }, + }, + ConfigStructTestCases: map[string]compliancetest.ConfigStructTestCase[*Config, *aesgcm]{ + "empty": { + Config: &Config{ + Keys: keyprovider.Output{}, + AAD: nil, + }, + ValidBuild: false, + Validate: nil, + }, + }, + EncryptDecryptTestCase: compliancetest.EncryptDecryptTestCase[*Config, *aesgcm]{ + ValidEncryptOnlyConfig: &Config{ + Keys: keyprovider.Output{ + EncryptionKey: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}, + DecryptionKey: nil, + }, + }, + ValidFullConfig: &Config{ + Keys: keyprovider.Output{ + EncryptionKey: []byte{17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}, + DecryptionKey: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}, + }, + }, + }, + }) +} diff --git a/pkg/encryption/method/aesgcm/config.go b/pkg/encryption/method/aesgcm/config.go new file mode 100644 index 00000000000..7600f6accdb --- /dev/null +++ b/pkg/encryption/method/aesgcm/config.go @@ -0,0 +1,66 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package aesgcm + +import ( + "fmt" + + "github.com/kubegems/opentofu/pkg/encryption/keyprovider" + + "github.com/kubegems/opentofu/pkg/collections" + + "github.com/kubegems/opentofu/pkg/encryption/method" +) + +// validKeyLengths holds the valid key lengths supported by this method. +var validKeyLengths = collections.NewSet[int](16, 24, 32) + +// Config is the configuration for the AES-GCM method. +type Config struct { + // Key is the encryption key for the AES-GCM encryption. It has to be 16, 24, or 32 bytes long for AES-128, 192, or + // 256, respectively. + Keys keyprovider.Output `hcl:"keys" json:"keys" yaml:"keys"` + + // AAD is the Additional Authenticated Data that is authenticated, but not encrypted. In the Go implementation, this + // data serves as a canary value against replay attacks. The AAD value on decryption must match this setting, + // otherwise the decryption will fail. (Note: this is Go-specific and differs from the NIST SP 800-38D description + // of the AAD.) + AAD []byte `hcl:"aad,optional" json:"aad,omitempty" yaml:"aad,omitempty"` +} + +// Build checks the validity of the configuration and returns a ready-to-use AES-GCM implementation. +func (c *Config) Build() (method.Method, error) { + encryptionKey := c.Keys.EncryptionKey + decryptionKey := c.Keys.DecryptionKey + + if !validKeyLengths.Has(len(encryptionKey)) { + return nil, &method.ErrInvalidConfiguration{ + Cause: fmt.Errorf( + "AES-GCM requires the key length to be one of: %s, received %d bytes in the encryption key", + validKeyLengths.String(), + len(encryptionKey), + ), + } + } + + if len(decryptionKey) > 0 { + if !validKeyLengths.Has(len(decryptionKey)) { + return nil, &method.ErrInvalidConfiguration{ + Cause: fmt.Errorf( + "AES-GCM requires the key length to be one of: %s, received %d bytes in the decryption key", + validKeyLengths.String(), + len(decryptionKey), + ), + } + } + } + + return &aesgcm{ + encryptionKey, + decryptionKey, + c.AAD, + }, nil +} diff --git a/pkg/encryption/method/aesgcm/config_test.go b/pkg/encryption/method/aesgcm/config_test.go new file mode 100644 index 00000000000..e68235a2aac --- /dev/null +++ b/pkg/encryption/method/aesgcm/config_test.go @@ -0,0 +1,141 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package aesgcm + +import ( + "bytes" + "errors" + "testing" + + "github.com/kubegems/opentofu/pkg/encryption/keyprovider" + + "github.com/kubegems/opentofu/pkg/encryption/method" +) + +func TestConfig_Build(t *testing.T) { + var testCases = []struct { + name string + config *Config + errorType any + expected aesgcm + }{ + { + name: "key-32-bytes", + config: &Config{ + Keys: keyprovider.Output{ + EncryptionKey: []byte("bohwu9zoo7Zool5olaileef1eibeathe"), + DecryptionKey: []byte("bohwu9zoo7Zool5olaileef1eibeathd"), + }, + }, + errorType: nil, + expected: aesgcm{ + encryptionKey: []byte("bohwu9zoo7Zool5olaileef1eibeathe"), + decryptionKey: []byte("bohwu9zoo7Zool5olaileef1eibeathd"), + }, + }, + { + name: "key-24-bytes", + config: &Config{ + Keys: keyprovider.Output{ + EncryptionKey: []byte("bohwu9zoo7Zool5olaileefe"), + DecryptionKey: []byte("bohwu9zoo7Zool5olaileefd"), + }, + }, + errorType: nil, + expected: aesgcm{ + encryptionKey: []byte("bohwu9zoo7Zool5olaileefe"), + decryptionKey: []byte("bohwu9zoo7Zool5olaileefd"), + }, + }, + { + name: "key-16-bytes", + config: &Config{ + Keys: keyprovider.Output{ + EncryptionKey: []byte("bohwu9zoo7Zool5e"), + DecryptionKey: []byte("bohwu9zoo7Zool5d"), + }, + }, + errorType: nil, + expected: aesgcm{ + encryptionKey: []byte("bohwu9zoo7Zool5e"), + decryptionKey: []byte("bohwu9zoo7Zool5d"), + }, + }, + { + name: "no-key", + config: &Config{}, + errorType: &method.ErrInvalidConfiguration{}, + }, + { + name: "encryption-key-15-bytes", + config: &Config{ + Keys: keyprovider.Output{ + EncryptionKey: []byte("bohwu9zoo7Ze15"), + DecryptionKey: []byte("bohwu9zoo7Zod16"), + }, + }, + errorType: &method.ErrInvalidConfiguration{}, + }, + { + name: "decryption-key-15-bytes", + config: &Config{ + Keys: keyprovider.Output{ + EncryptionKey: []byte("bohwu9zoo7Zooe16"), + DecryptionKey: []byte("bohwu9zoo7Zod15"), + }, + }, + errorType: &method.ErrInvalidConfiguration{}, + }, + { + name: "aad", + config: &Config{ + Keys: keyprovider.Output{ + EncryptionKey: []byte("bohwu9zoo7Zool5olaileef1eibeathe"), + DecryptionKey: []byte("bohwu9zoo7Zool5olaileef1eibeathd"), + }, + AAD: []byte("foobar"), + }, + expected: aesgcm{ + encryptionKey: []byte("bohwu9zoo7Zool5olaileef1eibeathe"), + decryptionKey: []byte("bohwu9zoo7Zool5olaileef1eibeathd"), + aad: []byte("foobar"), + }, + errorType: nil, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + built, err := tc.config.Build() + if tc.errorType == nil { + if err != nil { + t.Fatalf("Unexpected error returned: %v", err) + } + + built := built.(*aesgcm) + + if !bytes.Equal(tc.expected.encryptionKey, built.encryptionKey) { + t.Fatalf("Incorrect encryption key built: %v != %v", tc.expected.encryptionKey, built.encryptionKey) + } + if !bytes.Equal(tc.expected.decryptionKey, built.decryptionKey) { + t.Fatalf("Incorrect decryption key built: %v != %v", tc.expected.decryptionKey, built.decryptionKey) + } + if !bytes.Equal(tc.expected.aad, built.aad) { + t.Fatalf("Incorrect aad built: %v != %v", tc.expected.aad, built.aad) + } + + } else if tc.errorType != nil { + if err == nil { + t.Fatal("Expected error, none received") + } + if !errors.As(err, &tc.errorType) { + t.Fatalf("Incorrect error type received: %T", err) + } + t.Logf("Correct error of type %T received: %v", err, err) + } + + }) + } +} diff --git a/pkg/encryption/method/aesgcm/descriptor.go b/pkg/encryption/method/aesgcm/descriptor.go new file mode 100644 index 00000000000..d3533997797 --- /dev/null +++ b/pkg/encryption/method/aesgcm/descriptor.go @@ -0,0 +1,42 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package aesgcm + +import ( + "github.com/kubegems/opentofu/pkg/encryption/keyprovider" + "github.com/kubegems/opentofu/pkg/encryption/method" +) + +// Descriptor integrates the method.Descriptor and provides a TypedConfig for easier configuration. +type Descriptor interface { + method.Descriptor + + // TypedConfig returns a config typed for this method. + TypedConfig() *Config +} + +// New creates a new descriptor for the AES-GCM encryption method, which requires a 32-byte key. +func New() Descriptor { + return &descriptor{} +} + +type descriptor struct { +} + +func (f *descriptor) TypedConfig() *Config { + return &Config{ + Keys: keyprovider.Output{}, + AAD: nil, + } +} + +func (f *descriptor) ID() method.ID { + return "aes_gcm" +} + +func (f *descriptor) ConfigStruct() method.Config { + return f.TypedConfig() +} diff --git a/pkg/encryption/method/aesgcm/descriptor_test.go b/pkg/encryption/method/aesgcm/descriptor_test.go new file mode 100644 index 00000000000..dfd4cf8e3f5 --- /dev/null +++ b/pkg/encryption/method/aesgcm/descriptor_test.go @@ -0,0 +1,18 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package aesgcm_test + +import ( + "testing" + + "github.com/kubegems/opentofu/pkg/encryption/method/aesgcm" +) + +func TestDescriptor(t *testing.T) { + if id := aesgcm.New().ID(); id != "aes_gcm" { + t.Fatalf("Incorrect descriptor ID returned: %s", id) + } +} diff --git a/pkg/encryption/method/aesgcm/example_test.go b/pkg/encryption/method/aesgcm/example_test.go new file mode 100644 index 00000000000..16e560ba37d --- /dev/null +++ b/pkg/encryption/method/aesgcm/example_test.go @@ -0,0 +1,180 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package aesgcm_test + +import ( + "encoding/json" + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/gohcl" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/kubegems/opentofu/pkg/encryption/keyprovider" + "github.com/kubegems/opentofu/pkg/encryption/method/aesgcm" +) + +func Example() { + descriptor := aesgcm.New() + + // Get the config struct. You can fill it manually by type-asserting it to aesgcm.Config, but you could also use + // it as JSON. + config := descriptor.ConfigStruct() + + if err := json.Unmarshal( + // Set up a randomly generated 32-byte key. In JSON, you can base64-encode the value. + []byte(`{ + "keys": { + "encryption_key": "Y29veTRhaXZ1NWFpeW9vMWlhMG9vR29vVGFlM1BhaTQ=", + "decryption_key": "Y29veTRhaXZ1NWFpeW9vMWlhMG9vR29vVGFlM1BhaTQ=" + } +}`), &config); err != nil { + panic(err) + } + + method, err := config.Build() + if err != nil { + panic(err) + } + + // Encrypt some data: + encrypted, err := method.Encrypt([]byte("Hello world!")) + if err != nil { + panic(err) + } + + // Now decrypt it: + decrypted, err := method.Decrypt(encrypted) + if err != nil { + panic(err) + } + + fmt.Printf("%s", decrypted) + // Output: Hello world! +} + +func Example_config() { + // First, get the descriptor to make sure we always have the default values. + descriptor := aesgcm.New() + + // Obtain a modifiable, buildable config. Alternatively, you can also use ConfigStruct() method to obtain a + // struct you can fill with HCL or JSON tags. + config := descriptor.TypedConfig() + + // Set up an encryption key: + config.Keys = keyprovider.Output{ + EncryptionKey: []byte("AiphoogheuwohShal8Aefohy7ooLeeyu"), + DecryptionKey: []byte("AiphoogheuwohShal8Aefohy7ooLeeyu"), + } + + // Now you can build a method: + method, err := config.Build() + if err != nil { + panic(err) + } + + // Encrypt something: + encrypted, err := method.Encrypt([]byte("Hello world!")) + if err != nil { + panic(err) + } + + // Decrypt it: + decrypted, err := method.Decrypt(encrypted) + if err != nil { + panic(err) + } + + fmt.Printf("%s", decrypted) + // Output: Hello world! +} + +func Example_config_json() { + // First, get the descriptor to make sure we always have the default values. + descriptor := aesgcm.New() + + // Get an untyped config struct you can use for JSON unmarshalling: + config := descriptor.ConfigStruct() + + // Unmarshal JSON into the config struct: + if err := json.Unmarshal( + // Set up a randomly generated 32-byte key. In JSON, you can base64-encode the value. + []byte(`{ + "keys": { + "encryption_key": "Y29veTRhaXZ1NWFpeW9vMWlhMG9vR29vVGFlM1BhaTQ=", + "decryption_key": "Y29veTRhaXZ1NWFpeW9vMWlhMG9vR29vVGFlM1BhaTQ=" + } +}`), &config); err != nil { + panic(err) + } + + // Now you can build a method: + method, err := config.Build() + if err != nil { + panic(err) + } + + // Encrypt something: + encrypted, err := method.Encrypt([]byte("Hello world!")) + if err != nil { + panic(err) + } + + // Decrypt it: + decrypted, err := method.Decrypt(encrypted) + if err != nil { + panic(err) + } + + fmt.Printf("%s", decrypted) + // Output: Hello world! +} + +func Example_config_hcl() { + // First, get the descriptor to make sure we always have the default values. + descriptor := aesgcm.New() + + // Get an untyped config struct you can use for HCL unmarshalling: + config := descriptor.ConfigStruct() + + // Unmarshal HCL code into the config struct. The input must be a list of bytes, so in a real world scenario + // you may want to put in a hex-decoding function: + rawHCLInput := `keys = { + encryption_key = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32], + decryption_key = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32] +}` + file, diags := hclsyntax.ParseConfig( + []byte(rawHCLInput), + "example.hcl", + hcl.Pos{Byte: 0, Line: 1, Column: 1}, + ) + if diags.HasErrors() { + panic(diags) + } + if diags := gohcl.DecodeBody(file.Body, nil, config); diags.HasErrors() { + panic(diags) + } + + // Now you can build a method: + method, err := config.Build() + if err != nil { + panic(err) + } + + // Encrypt something: + encrypted, err := method.Encrypt([]byte("Hello world!")) + if err != nil { + panic(err) + } + + // Decrypt it: + decrypted, err := method.Decrypt(encrypted) + if err != nil { + panic(err) + } + + fmt.Printf("%s", decrypted) + // Output: Hello world! +} diff --git a/pkg/encryption/method/aesgcm/panic.go b/pkg/encryption/method/aesgcm/panic.go new file mode 100644 index 00000000000..03de0b7b9e0 --- /dev/null +++ b/pkg/encryption/method/aesgcm/panic.go @@ -0,0 +1,31 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package aesgcm + +import "fmt" + +// handlePanic runs the specified function and returns its result value or returned error. If a panic occurs, it returns the +// panic as an error. +func handlePanic(f func() ([]byte, error)) (result []byte, err error) { + result, e := func() ([]byte, error) { + defer func() { + var ok bool + e := recover() + if e == nil { + return + } + if err, ok = e.(error); !ok { + // In case the panic is not an error + err = fmt.Errorf("%v", e) + } + }() + return f() + }() + if err != nil { + return nil, err + } + return result, e +} diff --git a/pkg/encryption/method/aesgcm/panic_test.go b/pkg/encryption/method/aesgcm/panic_test.go new file mode 100644 index 00000000000..944b7b1a61e --- /dev/null +++ b/pkg/encryption/method/aesgcm/panic_test.go @@ -0,0 +1,18 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package aesgcm + +import ( + "fmt" +) + +func Example_handlePanic() { + _, err := handlePanic(func() ([]byte, error) { + panic("Hello world!") + }) + fmt.Printf("%v", err) + // Output: Hello world! +} diff --git a/pkg/encryption/method/compliancetest/compliance.go b/pkg/encryption/method/compliancetest/compliance.go new file mode 100644 index 00000000000..ccfdd05efaf --- /dev/null +++ b/pkg/encryption/method/compliancetest/compliance.go @@ -0,0 +1,321 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package compliancetest + +import ( + "bytes" + "errors" + "reflect" + "testing" + + "github.com/hashicorp/hcl/v2/gohcl" + "github.com/kubegems/opentofu/pkg/encryption/compliancetest" + "github.com/kubegems/opentofu/pkg/encryption/config" + "github.com/kubegems/opentofu/pkg/encryption/method" +) + +// ComplianceTest tests the functionality of a method to make sure it conforms to the expectations of the method +// interface. +func ComplianceTest[TDescriptor method.Descriptor, TConfig method.Config, TMethod method.Method]( + t *testing.T, + testConfig TestConfiguration[TDescriptor, TConfig, TMethod], +) { + testConfig.execute(t) +} + +type TestConfiguration[TDescriptor method.Descriptor, TConfig method.Config, TMethod method.Method] struct { + Descriptor TDescriptor + // HCLParseTestCases contains the test cases of parsing HCL configuration and then validating it using the Build() + // function. + HCLParseTestCases map[string]HCLParseTestCase[TDescriptor, TConfig, TMethod] + + // ConfigStructT validates that a certain config results or does not result in a valid Build() call. + ConfigStructTestCases map[string]ConfigStructTestCase[TConfig, TMethod] + + // ProvideTestCase exercises the entire chain and generates two keys. + EncryptDecryptTestCase EncryptDecryptTestCase[TConfig, TMethod] +} + +func (cfg *TestConfiguration[TDescriptor, TConfig, TMethod]) execute(t *testing.T) { + t.Run("id", func(t *testing.T) { + cfg.testID(t) + }) + t.Run("hcl", func(t *testing.T) { + cfg.testHCL(t) + }) + t.Run("config-struct", func(t *testing.T) { + cfg.testConfigStruct(t) + }) + t.Run("encrypt-decrypt", func(t *testing.T) { + cfg.EncryptDecryptTestCase.execute(t) + }) +} + +func (cfg *TestConfiguration[TDescriptor, TConfig, TMethod]) testID(t *testing.T) { + id := cfg.Descriptor.ID() + if err := id.Validate(); err != nil { + compliancetest.Fail(t, "Invalid ID returned from method descriptor: %s (%v)", id, err) + } else { + compliancetest.Log(t, "The ID provided by the method descriptor is valid: %s", id) + } +} + +func (cfg *TestConfiguration[TDescriptor, TConfig, TMethod]) testHCL(t *testing.T) { + if cfg.HCLParseTestCases == nil { + compliancetest.Fail(t, "Please provide a map to HCLParseTestCases.") + } + hasInvalidHCL := false + hasValidHCLInvalidBuild := false + hasValidBuild := false + for name, tc := range cfg.HCLParseTestCases { + tc := tc + if !tc.ValidHCL { + hasInvalidHCL = true + } else { + if tc.ValidBuild { + hasValidBuild = true + } else { + hasValidHCLInvalidBuild = true + } + } + t.Run(name, func(t *testing.T) { + tc.execute(t, cfg.Descriptor) + }) + } + t.Run("completeness", func(t *testing.T) { + if !hasInvalidHCL { + compliancetest.Fail(t, "Please provide at least one test case with an invalid HCL.") + } + if !hasValidHCLInvalidBuild { + compliancetest.Fail(t, "Please provide at least one test case with a valid HCL that fails on Build()") + } + if !hasValidBuild { + compliancetest.Fail( + t, + "Please provide at least one test case with a valid HCL that succeeds on Build()", + ) + } + }) +} + +func (cfg *TestConfiguration[TDescriptor, TConfig, TMethod]) testConfigStruct(t *testing.T) { + compliancetest.ConfigStruct[TConfig](t, cfg.Descriptor.ConfigStruct()) + + if cfg.ConfigStructTestCases == nil { + compliancetest.Fail(t, "Please provide a map to ConfigStructTestCases.") + } + + for name, tc := range cfg.ConfigStructTestCases { + tc := tc + t.Run(name, func(t *testing.T) { + tc.execute(t) + }) + } +} + +// HCLParseTestCase contains a test case that parses HCL into a configuration. +type HCLParseTestCase[TDescriptor method.Descriptor, TConfig method.Config, TMethod method.Method] struct { + // HCL contains the code that should be parsed into the configuration structure. + HCL string + // ValidHCL indicates that the HCL block should be parsable into the configuration structure, but not necessarily + // result in a valid Build() call. + ValidHCL bool + // ValidBuild indicates that calling the Build() function should not result in an error. + ValidBuild bool + // Validate is an extra optional validation function that can check if the configuration contains the correct + // values parsed from HCL. If ValidBuild is true, the method will be passed as well. + Validate func(config TConfig, method TMethod) error +} + +func (h *HCLParseTestCase[TDescriptor, TConfig, TMethod]) execute(t *testing.T, descriptor TDescriptor) { + parseError := false + parsedConfig, diags := config.LoadConfigFromString("config.hcl", h.HCL) + if h.ValidHCL { + if diags.HasErrors() { + compliancetest.Fail(t, "Unexpected HCL error (%v).", diags) + } else { + compliancetest.Log(t, "HCL successfully parsed.") + } + } else { + if diags.HasErrors() { + parseError = true + } + } + + configStruct := descriptor.ConfigStruct() + diags = gohcl.DecodeBody( + parsedConfig.MethodConfigs[0].Body, + nil, + configStruct, + ) + var m TMethod + if h.ValidHCL { + if diags.HasErrors() { + compliancetest.Fail(t, "Failed to parse empty HCL block into config struct (%v).", diags) + } else { + compliancetest.Log(t, "HCL successfully loaded into config struct.") + } + + m = buildConfigAndValidate[TMethod](t, configStruct, h.ValidBuild) + } else { + if !parseError && !diags.HasErrors() { + compliancetest.Fail(t, "Expected error during HCL parsing, but no error was returned.") + } else { + compliancetest.Log(t, "HCL loading errored correctly (%v).", diags) + } + } + + if h.Validate != nil { + if err := h.Validate(configStruct.(TConfig), m); err != nil { + compliancetest.Fail(t, "Error during validation and configuration (%v).", err) + } else { + compliancetest.Log(t, "Successfully validated parsed HCL config and applied modifications.") + } + } else { + compliancetest.Log(t, "No ValidateAndConfigure provided, skipping HCL parse validation.") + } +} + +// ConfigStructTestCase validates that the config struct is behaving correctly when Build() is called. +type ConfigStructTestCase[TConfig method.Config, TMethod method.Method] struct { + Config TConfig + ValidBuild bool + Validate func(method TMethod) error +} + +func (m ConfigStructTestCase[TConfig, TMethod]) execute(t *testing.T) { + newMethod := buildConfigAndValidate[TMethod, TConfig](t, m.Config, m.ValidBuild) + if m.Validate != nil { + if err := m.Validate(newMethod); err != nil { + compliancetest.Fail(t, "method validation failed (%v)", err) + } + } +} + +// EncryptDecryptTestCase handles a full encryption-decryption cycle. +type EncryptDecryptTestCase[TConfig method.Config, TMethod method.Method] struct { + // ValidEncryptOnlyConfig is a configuration that has no decryption key and can only be used for encryption. The + // key must match ValidFullConfig. + ValidEncryptOnlyConfig TConfig + // ValidFullConfig is a configuration that contains both an encryption and decryption key. + ValidFullConfig TConfig +} + +func (m EncryptDecryptTestCase[TConfig, TMethod]) execute(t *testing.T) { + if reflect.ValueOf(m.ValidEncryptOnlyConfig).IsNil() { + compliancetest.Fail(t, "Please provide a ValidEncryptOnlyConfig to EncryptDecryptTestCase.") + } + if reflect.ValueOf(m.ValidFullConfig).IsNil() { + compliancetest.Fail(t, "Please provide a ValidFullConfig to EncryptDecryptTestCase.") + } + + encryptMethod := buildConfigAndValidate[TMethod, TConfig](t, m.ValidEncryptOnlyConfig, true) + decryptMethod := buildConfigAndValidate[TMethod, TConfig](t, m.ValidFullConfig, true) + + plainData := []byte("Hello world!") + encryptedData, err := encryptMethod.Encrypt(plainData) + if err != nil { + compliancetest.Fail(t, "Unexpected error after Encrypt() on the encrypt-only method (%v).", err) + } + + _, err = encryptMethod.Decrypt(encryptedData) + if err == nil { + compliancetest.Fail(t, "Decrypt() did not fail without a decryption key.") + } else { + compliancetest.Log(t, "Decrypt() returned an error with a decryption key.") + } + var noDecryptionKeyError *method.ErrDecryptionKeyUnavailable + if !errors.As(err, &noDecryptionKeyError) { + compliancetest.Fail(t, "Decrypt() returned a %T instead of a %T without a decryption key. Please use the correct typed errors.", err, noDecryptionKeyError) + } else { + compliancetest.Log(t, "Decrypt() returned the correct error type of %T without a decryption key.", noDecryptionKeyError) + } + + _, err = decryptMethod.Decrypt([]byte{}) + if err == nil { + compliancetest.Fail(t, "Decrypt() must return an error when decrypting empty data, no error returned.") + } else { + compliancetest.Log(t, "Decrypt() correctly returned an error when decrypting empty data.") + } + var typedDecryptError *method.ErrDecryptionFailed + if !errors.As(err, &typedDecryptError) { + compliancetest.Fail(t, "Decrypt() returned a %T instead of a %T when decrypting empty data. Please use the correct typed errors.", err, typedDecryptError) + } else { + compliancetest.Log(t, "Decrypt() returned the correct error type of %T when decrypting empty data.", typedDecryptError) + } + typedDecryptError = nil + + _, err = decryptMethod.Decrypt(plainData) + if err == nil { + compliancetest.Fail(t, "Decrypt() must return an error when decrypting unencrypted data, no error returned.") + } else { + compliancetest.Log(t, "Decrypt() correctly returned an error when decrypting unencrypted data.") + } + if !errors.As(err, &typedDecryptError) { + compliancetest.Fail(t, "Decrypt() returned a %T instead of a %T when decrypting unencrypted data. Please use the correct typed errors.", err, typedDecryptError) + } else { + compliancetest.Log(t, "Decrypt() returned the correct error type of %T when decrypting unencrypted data.", typedDecryptError) + } + + decryptedData, err := decryptMethod.Decrypt(encryptedData) + if err != nil { + compliancetest.Fail(t, "Decrypt() failed to decrypt previously-encrypted data (%v).", err) + } else { + compliancetest.Log(t, "Decrypt() succeeded.") + } + + if !bytes.Equal(decryptedData, plainData) { + compliancetest.Fail(t, "Decrypt() returned incorrect plain text data:\n%v\nexpected:\n%v", decryptedData, plainData) + } else { + compliancetest.Log(t, "Decrypt() returned the correct plain text data.") + } +} + +func buildConfigAndValidate[TMethod method.Method, TConfig method.Config]( + t *testing.T, + configStruct TConfig, + validBuild bool, +) TMethod { + if reflect.ValueOf(configStruct).IsNil() { + compliancetest.Fail(t, "Nil struct passed!") + } + + var typedMethod TMethod + var ok bool + kp, err := configStruct.Build() + if validBuild { + if err != nil { + compliancetest.Fail(t, "Build() returned an unexpected error: %v.", err) + } else { + compliancetest.Log(t, "Build() did not return an error.") + } + typedMethod, ok = kp.(TMethod) + if !ok { + compliancetest.Fail(t, "Build() returned an invalid method type of %T, expected %T", kp, typedMethod) + } else { + compliancetest.Log(t, "Build() returned the correct method type of %T.", typedMethod) + } + } else { + if err == nil { + compliancetest.Fail(t, "Build() did not return an error.") + } else { + compliancetest.Log(t, "Build() correctly returned an error: %v", err) + } + + var typedError *method.ErrInvalidConfiguration + if !errors.As(err, &typedError) { + compliancetest.Fail( + t, + "Build() did not return the correct error type, got %T but expected %T", + err, + typedError, + ) + } else { + compliancetest.Log(t, "Build() returned the correct error type of %T", typedError) + } + } + return typedMethod +} diff --git a/pkg/encryption/method/config.go b/pkg/encryption/method/config.go new file mode 100644 index 00000000000..859502ba1ab --- /dev/null +++ b/pkg/encryption/method/config.go @@ -0,0 +1,21 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package method + +// Config describes a configuration struct for setting up an encryption Method. You should always implement this +// interface with a struct, and you should tag the fields with HCL tags so the encryption implementation can read +// the .tf code into it. For example: +// +// type MyConfig struct { +// Key string `hcl:"key"` +// } +// +// func (m MyConfig) Build() (Method, error) { ... } +type Config interface { + // Build takes the configuration and builds an encryption method. + // TODO this may be better changed to return hcl.Diagnostics so warnings can be issued? + Build() (Method, error) +} diff --git a/pkg/encryption/method/descriptor.go b/pkg/encryption/method/descriptor.go new file mode 100644 index 00000000000..ba119ceb68f --- /dev/null +++ b/pkg/encryption/method/descriptor.go @@ -0,0 +1,20 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package method + +// Descriptor contains the details on an encryption method and produces a configuration structure with default values. +type Descriptor interface { + // ID returns the unique identifier used when parsing HCL or JSON configs. + ID() ID + + // ConfigStruct creates a new configuration struct annotated with hcl tags. The Build() receiver on + // this struct must be able to build a Method from the configuration. + // + // Common errors: + // - Returning a struct without a pointer + // - Returning a non-struct + ConfigStruct() Config +} diff --git a/pkg/encryption/method/errors.go b/pkg/encryption/method/errors.go new file mode 100644 index 00000000000..64a4c11c08e --- /dev/null +++ b/pkg/encryption/method/errors.go @@ -0,0 +1,82 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package method + +import "fmt" + +// ErrCryptoFailure indicates a generic cryptographic failure. This error should be embedded into +// ErrEncryptionFailed, ErrDecryptionFailed, or ErrInvalidConfiguration. +type ErrCryptoFailure struct { + Message string + Cause error +} + +func (e ErrCryptoFailure) Error() string { + if e.Cause != nil { + return fmt.Sprintf("%s: %v", e.Message, e.Cause) + } + return e.Message +} + +func (e ErrCryptoFailure) Unwrap() error { + return e.Cause +} + +// ErrEncryptionFailed indicates that encrypting a set of data failed. +type ErrEncryptionFailed struct { + Cause error +} + +func (e ErrEncryptionFailed) Error() string { + if e.Cause != nil { + return fmt.Sprintf("encryption failed: %v", e.Cause) + } + return "encryption failed" +} + +func (e ErrEncryptionFailed) Unwrap() error { + return e.Cause +} + +// ErrDecryptionFailed indicates that decrypting a set of data failed. +type ErrDecryptionFailed struct { + Cause error +} + +func (e ErrDecryptionFailed) Error() string { + if e.Cause != nil { + return fmt.Sprintf("decryption failed: %v", e.Cause) + } + return "decryption failed" +} + +func (e ErrDecryptionFailed) Unwrap() error { + return e.Cause +} + +// ErrDecryptionKeyUnavailable indicates that no decryption key is available. +type ErrDecryptionKeyUnavailable struct { +} + +func (e ErrDecryptionKeyUnavailable) Error() string { + return "no decryption key available" +} + +// ErrInvalidConfiguration indicates that the method configuration is incorrect. +type ErrInvalidConfiguration struct { + Cause error +} + +func (e ErrInvalidConfiguration) Error() string { + if e.Cause != nil { + return fmt.Sprintf("invalid method configuration: %v", e.Cause) + } + return "invalid method configuration" +} + +func (e ErrInvalidConfiguration) Unwrap() error { + return e.Cause +} diff --git a/pkg/encryption/method/id.go b/pkg/encryption/method/id.go new file mode 100644 index 00000000000..b79669826dd --- /dev/null +++ b/pkg/encryption/method/id.go @@ -0,0 +1,24 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package method + +import ( + "fmt" +) + +// ID is a type alias to make passing the wrong ID into a method ID harder. +type ID string + +// Validate validates the key provider ID for correctness. +func (i ID) Validate() error { + if i == "" { + return fmt.Errorf("empty key provider ID (key provider IDs must match %s)", idRe.String()) + } + if !idRe.MatchString(string(i)) { + return fmt.Errorf("invalid key provider ID: %s (must match %s)", i, idRe.String()) + } + return nil +} diff --git a/pkg/encryption/method/method.go b/pkg/encryption/method/method.go new file mode 100644 index 00000000000..89d72a5c54b --- /dev/null +++ b/pkg/encryption/method/method.go @@ -0,0 +1,19 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package method + +// Method is a low-level encryption method interface that is responsible for encrypting a binary blob of data. It should +// not try to interpret what kind of data it is encrypting. +type Method interface { + // Encrypt encrypts the specified data with the set configuration. This method should treat any data passed as + // opaque and should not try to interpret its contents. The interpretation is the job of the encryption.Encryption + // interface. + Encrypt(data []byte) ([]byte, error) + // Decrypt decrypts the specified data with the set configuration. This method should treat any data passed as + // opaque and should not try to interpret its contents. The interpretation is the job of the encryption.Encryption + // interface. + Decrypt(data []byte) ([]byte, error) +} diff --git a/pkg/encryption/method/unencrypted/method.go b/pkg/encryption/method/unencrypted/method.go new file mode 100644 index 00000000000..d80f07ed92e --- /dev/null +++ b/pkg/encryption/method/unencrypted/method.go @@ -0,0 +1,43 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package unencrypted + +import ( + "github.com/kubegems/opentofu/pkg/encryption/method" +) + +func New() method.Descriptor { + return &descriptor{} +} + +type descriptor struct{} + +func (f *descriptor) ID() method.ID { + return "unencrypted" +} +func (f *descriptor) ConfigStruct() method.Config { + return new(config) +} + +type config struct{} + +func (c *config) Build() (method.Method, error) { + return new(unenc), nil +} + +type unenc struct{} + +func (a *unenc) Encrypt(data []byte) ([]byte, error) { + panic("Placeholder for type check! Should never be called!") +} +func (a *unenc) Decrypt(data []byte) ([]byte, error) { + panic("Placeholder for type check! Should never be called!") +} + +func Is(m method.Method) bool { + _, ok := m.(*unenc) + return ok +} diff --git a/pkg/encryption/methods.go b/pkg/encryption/methods.go new file mode 100644 index 00000000000..a49056d402b --- /dev/null +++ b/pkg/encryption/methods.go @@ -0,0 +1,104 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package encryption + +import ( + "errors" + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/gohcl" + "github.com/kubegems/opentofu/pkg/encryption/config" + "github.com/kubegems/opentofu/pkg/encryption/method" + "github.com/kubegems/opentofu/pkg/encryption/method/unencrypted" + "github.com/kubegems/opentofu/pkg/encryption/registry" + "github.com/zclconf/go-cty/cty" +) + +func (e *targetBuilder) setupMethods() hcl.Diagnostics { + var diags hcl.Diagnostics + + e.methodValues = make(map[string]map[string]cty.Value) + e.methods = make(map[method.Addr]method.Method) + + for _, m := range e.cfg.MethodConfigs { + diags = append(diags, e.setupMethod(m)...) + } + + // Regenerate the context now that the method is loaded + mMap := make(map[string]cty.Value) + for name, ms := range e.methodValues { + mMap[name] = cty.ObjectVal(ms) + } + e.ctx.Variables["method"] = cty.ObjectVal(mMap) + + return diags +} + +// setupMethod sets up a single method for encryption. It returns a list of diagnostics if the method is invalid. +func (e *targetBuilder) setupMethod(cfg config.MethodConfig) hcl.Diagnostics { + addr, diags := cfg.Addr() + if diags.HasErrors() { + return diags + } + + // Ensure cfg.Type is in methodValues + if _, ok := e.methodValues[cfg.Type]; !ok { + e.methodValues[cfg.Type] = make(map[string]cty.Value) + } + + // Lookup the definition of the encryption method from the registry + encryptionMethod, err := e.reg.GetMethodDescriptor(method.ID(cfg.Type)) + if err != nil { + + // Handle if the method was not found + var notFoundError *registry.MethodNotFoundError + if errors.Is(err, notFoundError) { + return append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unknown encryption method type", + Detail: fmt.Sprintf("Can not find %q", cfg.Type), + }) + } + + // Or, we don't know the error type, so we'll just return it as a generic error + return append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Error fetching encryption method %q", cfg.Type), + Detail: err.Error(), + }) + } + + // TODO: we could use varhcl here to provider better error messages + methodConfig := encryptionMethod.ConfigStruct() + methodDiags := gohcl.DecodeBody(cfg.Body, e.ctx, methodConfig) + diags = append(diags, methodDiags...) + if diags.HasErrors() { + return diags + } + + e.methodValues[cfg.Type][cfg.Name] = cty.StringVal(string(addr)) + m, err := methodConfig.Build() + if err != nil { + // TODO this error handling could use some work + return append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Encryption method configuration failed", + Detail: err.Error(), + }) + } + e.methods[addr] = m + + if unencrypted.Is(m) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "Unencrypted method configured", + Detail: "Method unencrypted is present in configuration. This is a security risk and should only be enabled during migrations.", + }) + } + + return diags +} diff --git a/pkg/encryption/plan.go b/pkg/encryption/plan.go new file mode 100644 index 00000000000..b008846fafb --- /dev/null +++ b/pkg/encryption/plan.go @@ -0,0 +1,83 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package encryption + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/encryption/config" +) + +// PlanEncryption describes the methods that you can use for encrypting a plan file. Plan files are opaque values with +// no standardized format, so the encrypted form should be treated equally an opaque value. +type PlanEncryption interface { + // EncryptPlan encrypts a plan file and returns the encrypted form. + // + // When implementing this function: + // + // Plan files are opaque values. You may expect a valid plan file on the input, but you can produce binary data + // that is not necessarily a valid plan file. If no encryption is configured, this function should pass through + // any data it receives without modification, even if the plan file is invalid. + // + // When using this function: + // + // Make sure that you pass a valid plan file as an input. Failing to provide a valid plan file may result in an + // error. However, output values may not be valid plan files and you should not pass the encrypted plan file to any + // additional functions that normally work with plan files. + EncryptPlan([]byte) ([]byte, error) + + // DecryptPlan decrypts an encrypted plan file. + // + // When implementing this function: + // + // If the user has configured no encryption, pass through any input unmodified regardless if the input is a valid + // plan file. If the user configured encryption, decrypt the plan file and return the decrypted plan file as a + // binary without further evaluating its validity. + // + // When using this function: + // + // Pass a potentially encrypted plan file as an input, and you will receive the decrypted plan file or an error as + // a result. + DecryptPlan([]byte) ([]byte, error) +} + +type planEncryption struct { + base *baseEncryption +} + +func newPlanEncryption(enc *encryption, target *config.TargetConfig, enforced bool, name string, staticEval *configs.StaticEvaluator) (PlanEncryption, hcl.Diagnostics) { + base, diags := newBaseEncryption(enc, target, enforced, name, staticEval) + return &planEncryption{base}, diags +} + +func (p planEncryption) EncryptPlan(data []byte) ([]byte, error) { + return p.base.encrypt(data, func(base basedata) interface{} { return base }) +} + +func (p planEncryption) DecryptPlan(data []byte) ([]byte, error) { + return p.base.decrypt(data, func(data []byte) error { + // Check magic bytes + if len(data) < 2 || string(data[:2]) != "PK" { + return fmt.Errorf("Invalid plan file %v", string(data[:2])) + } + return nil + }) +} + +func PlanEncryptionDisabled() PlanEncryption { + return &planDisabled{} +} + +type planDisabled struct{} + +func (s *planDisabled) EncryptPlan(plainPlan []byte) ([]byte, error) { + return plainPlan, nil +} +func (s *planDisabled) DecryptPlan(encryptedPlan []byte) ([]byte, error) { + return encryptedPlan, nil +} diff --git a/pkg/encryption/registry/compliancetest/compliance.go b/pkg/encryption/registry/compliancetest/compliance.go new file mode 100644 index 00000000000..52b92e20a04 --- /dev/null +++ b/pkg/encryption/registry/compliancetest/compliance.go @@ -0,0 +1,29 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package compliancetest + +import ( + "testing" + + "github.com/kubegems/opentofu/pkg/encryption/registry" +) + +func ComplianceTest(t *testing.T, factory func() registry.Registry) { + t.Run("returns-registry", func(t *testing.T) { + reg := factory() + if reg == nil { + t.Fatalf("Calling the factory method did not return a valid registry.") + } + }) + + t.Run("key_provider", func(t *testing.T) { + complianceTestKeyProviders(t, factory) + }) + + t.Run("method", func(t *testing.T) { + complianceTestMethods(t, factory) + }) +} diff --git a/pkg/encryption/registry/compliancetest/compliance_key_provider.go b/pkg/encryption/registry/compliancetest/compliance_key_provider.go new file mode 100644 index 00000000000..42b4adf22c4 --- /dev/null +++ b/pkg/encryption/registry/compliancetest/compliance_key_provider.go @@ -0,0 +1,137 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package compliancetest + +import ( + "errors" + "testing" + + "github.com/kubegems/opentofu/pkg/encryption/keyprovider" + "github.com/kubegems/opentofu/pkg/encryption/registry" +) + +func complianceTestKeyProviders(t *testing.T, factory func() registry.Registry) { + t.Run("registration-and-return", func(t *testing.T) { + complianceTestKeyProviderRegistrationAndReturn(t, factory) + }) + t.Run("register-invalid-id", func(t *testing.T) { + complianceTestKeyProviderInvalidID(t, factory) + }) + t.Run("duplicate-registration", func(t *testing.T) { + complianceTestKeyProviderDuplicateRegistration(t, factory) + }) +} + +func complianceTestKeyProviderRegistrationAndReturn(t *testing.T, factory func() registry.Registry) { + reg := factory() + testKeyProvider := &testKeyProviderDescriptor{ + "test", + } + if err := reg.RegisterKeyProvider(testKeyProvider); err != nil { + t.Fatalf("Failed to register test key provider with ID %s (%v)", testKeyProvider.id, err) + } + returnedKeyProvider, err := reg.GetKeyProviderDescriptor(testKeyProvider.id) + if err != nil { + t.Fatalf("The previously registered key provider with the ID %s couldn't be fetched from the registry (%v).", testKeyProvider.id, err) + } + returnedTypedKeyProvider, ok := returnedKeyProvider.(*testKeyProviderDescriptor) + if !ok { + t.Fatalf("The returned key provider was not of the expected type of %T, but instead it was %T.", testKeyProvider, returnedKeyProvider) + } + if returnedTypedKeyProvider.id != testKeyProvider.id { + t.Fatalf("The returned key provider contained the wrong ID %s instead of %s", returnedTypedKeyProvider.id, testKeyProvider.id) + } + + _, err = reg.GetKeyProviderDescriptor("nonexistent") + if err == nil { + t.Fatalf("Requesting a non-existent key provider from GetKeyProviderDescriptor did not return an error.") + } + var typedErr *registry.KeyProviderNotFoundError + if !errors.As(err, &typedErr) { + t.Fatalf( + "Requesting a non-existent key provider from GetKeyProviderDescriptor returned an incorrect error type of %T. This function should always return a *registry.KeyProviderNotFoundError if the key provider was not found.", + err, + ) + } +} + +func complianceTestKeyProviderInvalidID(t *testing.T, factory func() registry.Registry) { + reg := factory() + testKeyProvider := &testKeyProviderDescriptor{ + "Hello world!", + } + err := reg.RegisterKeyProvider(testKeyProvider) + if err == nil { + t.Fatalf("Registering a key provider with the invalid ID of %s did not result in an error.", testKeyProvider.id) + } + var typedErr *registry.InvalidKeyProviderError + if !errors.As(err, &typedErr) { + t.Fatalf( + "Registering a key provider with an invalid ID of %s resulted in an error of type %T instead of %T. Please make sure to use the correct typed errors.", + testKeyProvider.id, + err, + typedErr, + ) + } +} + +func complianceTestKeyProviderDuplicateRegistration(t *testing.T, factory func() registry.Registry) { + reg := factory() + testKeyProvider := &testKeyProviderDescriptor{ + "test", + } + testKeyProvider2 := &testKeyProviderDescriptor{ + "test", + } + if err := reg.RegisterKeyProvider(testKeyProvider); err != nil { + t.Fatalf("Failed to register test key provider with ID %s (%v)", testKeyProvider.id, err) + } + err := reg.RegisterKeyProvider(testKeyProvider) + if err == nil { + t.Fatalf("Re-registering the same key provider again did not result in an error.") + } + var typedErr *registry.KeyProviderAlreadyRegisteredError + if !errors.As(err, &typedErr) { + t.Fatalf( + "Re-registering the same key provider twice resulted in an error of the type %T instead of %T. Please make sure to use the correct typed errors.", + err, + typedErr, + ) + } + + err = reg.RegisterKeyProvider(testKeyProvider2) + if err == nil { + t.Fatalf("Re-registering the a provider with a duplicate ID did not result in an error.") + } + if !errors.As(err, &typedErr) { + t.Fatalf( + "Re-registering the a key provider with a duplicate ID resulted in an error of the type %T instead of %T. Please make sure to use the correct typed errors.", + err, + typedErr, + ) + } +} + +type testKeyProviderDescriptor struct { + id keyprovider.ID +} + +func (t testKeyProviderDescriptor) ID() keyprovider.ID { + return t.id +} + +func (t testKeyProviderDescriptor) ConfigStruct() keyprovider.Config { + return &testKeyProviderConfigStruct{} +} + +type testKeyProviderConfigStruct struct { +} + +func (t testKeyProviderConfigStruct) Build() (keyprovider.KeyProvider, keyprovider.KeyMeta, error) { + return nil, nil, keyprovider.ErrInvalidConfiguration{ + Message: "The Build() function is not implemented on the testKeyProviderConfigStruct", + } +} diff --git a/pkg/encryption/registry/compliancetest/compliance_method.go b/pkg/encryption/registry/compliancetest/compliance_method.go new file mode 100644 index 00000000000..4a78dfa1679 --- /dev/null +++ b/pkg/encryption/registry/compliancetest/compliance_method.go @@ -0,0 +1,138 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package compliancetest + +import ( + "errors" + "fmt" + "testing" + + "github.com/kubegems/opentofu/pkg/encryption/method" + "github.com/kubegems/opentofu/pkg/encryption/registry" +) + +func complianceTestMethods(t *testing.T, factory func() registry.Registry) { + t.Run("registration-and-return", func(t *testing.T) { + complianceTestMethodRegistrationAndReturn(t, factory) + }) + t.Run("register-invalid-id", func(t *testing.T) { + complianceTestMethodInvalidID(t, factory) + }) + t.Run("duplicate-registration", func(t *testing.T) { + complianceTestMethodDuplicateRegistration(t, factory) + }) +} + +func complianceTestMethodRegistrationAndReturn(t *testing.T, factory func() registry.Registry) { + reg := factory() + testMethod := &testMethodDescriptor{ + "test", + } + if err := reg.RegisterMethod(testMethod); err != nil { + t.Fatalf("Failed to register test method with ID %s (%v)", testMethod.id, err) + } + returnedMethod, err := reg.GetMethodDescriptor(testMethod.id) + if err != nil { + t.Fatalf("The previously registered method with the ID %s couldn't be fetched from the registry (%v).", testMethod.id, err) + } + returnedTypedMethod, ok := returnedMethod.(*testMethodDescriptor) + if !ok { + t.Fatalf("The returned method was not of the expected type of %T, but instead it was %T.", testMethod, returnedMethod) + } + if returnedTypedMethod.id != testMethod.id { + t.Fatalf("The returned method contained the wrong ID %s instead of %s", returnedTypedMethod.id, testMethod.id) + } + + _, err = reg.GetMethodDescriptor("nonexistent") + if err == nil { + t.Fatalf("Requesting a non-existent method from GetMethodDescriptor did not return an error.") + } + var typedErr *registry.MethodNotFoundError + if !errors.As(err, &typedErr) { + t.Fatalf( + "Requesting a non-existent method from GetMethodDescriptor returned an incorrect error type of %T. This function should always return a *registry.MethodNotFoundError if the method was not found.", + err, + ) + } +} + +func complianceTestMethodInvalidID(t *testing.T, factory func() registry.Registry) { + reg := factory() + testMethod := &testMethodDescriptor{ + "Hello world!", + } + err := reg.RegisterMethod(testMethod) + if err == nil { + t.Fatalf("Registering a method with the invalid ID of %s did not result in an error.", testMethod.id) + } + var typedErr *registry.InvalidMethodError + if !errors.As(err, &typedErr) { + t.Fatalf( + "Registering a method with an invalid ID of %s resulted in an error of type %T instead of %T. Please make sure to use the correct typed errors.", + testMethod.id, + err, + typedErr, + ) + } +} + +func complianceTestMethodDuplicateRegistration(t *testing.T, factory func() registry.Registry) { + reg := factory() + testMethod := &testMethodDescriptor{ + "test", + } + testMethod2 := &testMethodDescriptor{ + "test", + } + if err := reg.RegisterMethod(testMethod); err != nil { + t.Fatalf("Failed to register test method with ID %s (%v)", testMethod.id, err) + } + err := reg.RegisterMethod(testMethod) + if err == nil { + t.Fatalf("Re-registering the same method again did not result in an error.") + } + var typedErr *registry.MethodAlreadyRegisteredError + if !errors.As(err, &typedErr) { + t.Fatalf( + "Re-registering the same method twice resulted in an error of the type %T instead of %T. Please make sure to use the correct typed errors.", + err, + typedErr, + ) + } + + err = reg.RegisterMethod(testMethod2) + if err == nil { + t.Fatalf("Re-registering the a provider with a duplicate ID did not result in an error.") + } + if !errors.As(err, &typedErr) { + t.Fatalf( + "Re-registering the a method with a duplicate ID resulted in an error of the type %T instead of %T. Please make sure to use the correct typed errors.", + err, + typedErr, + ) + } +} + +type testMethodDescriptor struct { + id method.ID +} + +func (t testMethodDescriptor) ID() method.ID { + return t.id +} + +func (t testMethodDescriptor) ConfigStruct() method.Config { + return &testMethodConfig{} +} + +type testMethodConfig struct { +} + +func (t testMethodConfig) Build() (method.Method, error) { + return nil, method.ErrInvalidConfiguration{ + Cause: fmt.Errorf("build not implemented for test method"), + } +} diff --git a/pkg/encryption/registry/errors.go b/pkg/encryption/registry/errors.go new file mode 100644 index 00000000000..9ab79b5f2ac --- /dev/null +++ b/pkg/encryption/registry/errors.go @@ -0,0 +1,89 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package registry + +import ( + "fmt" + + "github.com/kubegems/opentofu/pkg/encryption/keyprovider" + "github.com/kubegems/opentofu/pkg/encryption/method" +) + +// InvalidKeyProviderError indicates that the supplied keyprovider.Descriptor is invalid/misbehaving. Check the error +// message for details. +type InvalidKeyProviderError struct { + KeyProvider keyprovider.Descriptor + Cause error +} + +func (k InvalidKeyProviderError) Error() string { + return fmt.Sprintf("the supplied key provider %T is invalid (%v)", k.KeyProvider, k.Cause) +} + +func (k InvalidKeyProviderError) Unwrap() error { + return k.Cause +} + +// KeyProviderNotFoundError indicates that the requested key provider was not found in the registry. +type KeyProviderNotFoundError struct { + ID keyprovider.ID +} + +func (k KeyProviderNotFoundError) Error() string { + return fmt.Sprintf("key provider with ID %s not found", k.ID) +} + +// KeyProviderAlreadyRegisteredError indicates that the requested key provider was already registered in the registry. +type KeyProviderAlreadyRegisteredError struct { + ID keyprovider.ID + CurrentProvider keyprovider.Descriptor + PreviousProvider keyprovider.Descriptor +} + +func (k KeyProviderAlreadyRegisteredError) Error() string { + return fmt.Sprintf( + "error while registering key provider ID %s to %T, this ID is already registered by %T", + k.ID, k.CurrentProvider, k.PreviousProvider, + ) +} + +// InvalidMethodError indicates that the supplied method.Descriptor is invalid/misbehaving. Check the error message for +// details. +type InvalidMethodError struct { + Method method.Descriptor + Cause error +} + +func (k InvalidMethodError) Error() string { + return fmt.Sprintf("the supplied encryption method %T is invalid (%v)", k.Method, k.Cause) +} + +func (k InvalidMethodError) Unwrap() error { + return k.Cause +} + +// MethodNotFoundError indicates that the requested encryption method was not found in the registry. +type MethodNotFoundError struct { + ID method.ID +} + +func (m MethodNotFoundError) Error() string { + return fmt.Sprintf("encryption method with ID %s not found", m.ID) +} + +// MethodAlreadyRegisteredError indicates that the requested encryption method was already registered in the registry. +type MethodAlreadyRegisteredError struct { + ID method.ID + CurrentMethod method.Descriptor + PreviousMethod method.Descriptor +} + +func (m MethodAlreadyRegisteredError) Error() string { + return fmt.Sprintf( + "error while registering encryption method ID %s to %T, this ID is already registered by %T", + m.ID, m.CurrentMethod, m.PreviousMethod, + ) +} diff --git a/pkg/encryption/registry/lockingencryptionregistry/registry.go b/pkg/encryption/registry/lockingencryptionregistry/registry.go new file mode 100644 index 00000000000..fe0a6dc0550 --- /dev/null +++ b/pkg/encryption/registry/lockingencryptionregistry/registry.go @@ -0,0 +1,79 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package lockingencryptionregistry + +import ( + "sync" + + "github.com/kubegems/opentofu/pkg/encryption/keyprovider" + "github.com/kubegems/opentofu/pkg/encryption/method" + "github.com/kubegems/opentofu/pkg/encryption/registry" +) + +// New returns a new encryption registry that locks for parallel access. +func New() registry.Registry { + return &lockingRegistry{ + lock: &sync.RWMutex{}, + providers: map[keyprovider.ID]keyprovider.Descriptor{}, + methods: map[method.ID]method.Descriptor{}, + } +} + +type lockingRegistry struct { + lock *sync.RWMutex + providers map[keyprovider.ID]keyprovider.Descriptor + methods map[method.ID]method.Descriptor +} + +func (l *lockingRegistry) RegisterKeyProvider(keyProvider keyprovider.Descriptor) error { + l.lock.Lock() + defer l.lock.Unlock() + + id := keyProvider.ID() + if err := id.Validate(); err != nil { + return ®istry.InvalidKeyProviderError{KeyProvider: keyProvider, Cause: err} + } + if _, ok := l.providers[id]; ok { + return ®istry.KeyProviderAlreadyRegisteredError{ID: id} + } + l.providers[id] = keyProvider + return nil +} + +func (l *lockingRegistry) RegisterMethod(method method.Descriptor) error { + l.lock.Lock() + defer l.lock.Unlock() + + id := method.ID() + if err := id.Validate(); err != nil { + return ®istry.InvalidMethodError{Method: method, Cause: err} + } + if previousMethod, ok := l.methods[id]; ok { + return ®istry.MethodAlreadyRegisteredError{ID: id, CurrentMethod: method, PreviousMethod: previousMethod} + } + l.methods[id] = method + return nil +} + +func (l *lockingRegistry) GetKeyProviderDescriptor(id keyprovider.ID) (keyprovider.Descriptor, error) { + l.lock.RLock() + defer l.lock.RUnlock() + provider, ok := l.providers[id] + if !ok { + return nil, ®istry.KeyProviderNotFoundError{ID: id} + } + return provider, nil +} + +func (l *lockingRegistry) GetMethodDescriptor(id method.ID) (method.Descriptor, error) { + l.lock.RLock() + defer l.lock.RUnlock() + foundMethod, ok := l.methods[id] + if !ok { + return nil, ®istry.MethodNotFoundError{ID: id} + } + return foundMethod, nil +} diff --git a/pkg/encryption/registry/lockingencryptionregistry/registry_test.go b/pkg/encryption/registry/lockingencryptionregistry/registry_test.go new file mode 100644 index 00000000000..d112f5a5d01 --- /dev/null +++ b/pkg/encryption/registry/lockingencryptionregistry/registry_test.go @@ -0,0 +1,17 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package lockingencryptionregistry_test + +import ( + "testing" + + "github.com/kubegems/opentofu/pkg/encryption/registry/compliancetest" + "github.com/kubegems/opentofu/pkg/encryption/registry/lockingencryptionregistry" +) + +func TestCompliance(t *testing.T) { + compliancetest.ComplianceTest(t, lockingencryptionregistry.New) +} diff --git a/pkg/encryption/registry/registry.go b/pkg/encryption/registry/registry.go new file mode 100644 index 00000000000..e735caf5af6 --- /dev/null +++ b/pkg/encryption/registry/registry.go @@ -0,0 +1,31 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package registry + +import ( + "github.com/kubegems/opentofu/pkg/encryption/keyprovider" + "github.com/kubegems/opentofu/pkg/encryption/method" +) + +// Registry collects all encryption methods and key providers +type Registry interface { + // RegisterKeyProvider registers a key provider. Use the keyprovider.Any(). + // This function returns a *KeyProviderAlreadyRegisteredError error if a key provider with the + // same ID is already registered. + RegisterKeyProvider(keyProvider keyprovider.Descriptor) error + // RegisterMethod registers an encryption method. Use the method.Any() function to convert your method into a + // suitable format. This function returns a *MethodAlreadyRegisteredError error if a key provider with the same ID is + // already registered. + RegisterMethod(method method.Descriptor) error + + // GetKeyProviderDescriptor returns the key provider with the specified ID. If the key provider is not registered, + // it will return a *KeyProviderNotFoundError error. + GetKeyProviderDescriptor(id keyprovider.ID) (keyprovider.Descriptor, error) + + // GetMethodDescriptor returns the method with the specified ID. + // If the method is not registered, it will return a *MethodNotFoundError. + GetMethodDescriptor(id method.ID) (method.Descriptor, error) +} diff --git a/pkg/encryption/state.go b/pkg/encryption/state.go new file mode 100644 index 00000000000..10e41cd8113 --- /dev/null +++ b/pkg/encryption/state.go @@ -0,0 +1,147 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package encryption + +import ( + "encoding/json" + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/encryption/config" +) + +// StateEncryption describes the interface for encrypting state files. +type StateEncryption interface { + // DecryptState decrypts a potentially encrypted state file and returns a valid JSON-serialized state file. + // + // When implementing this function: + // + // If the user configured no encryption, also return the input as-is regardless if the state file is valid. If the + // user configured encryption unserialize the input as JSON and check for the presence of the field specified in the + // StateEncryptionMarkerField. If the field is not present, return the input as-is and return a warning that an + // unexpected unencrypted state file was encountered. Otherwise, decrypt the state file and return the decrypted + // state file as serialized JSON. If the state file cannot be decrypted, return an error in the diagnostics. + // + // When using this function: + // + // After reading the state file from its source (local file, remote backend, etc.), pass in the state file to this + // function. Do not attempt to determine if the state file is encrypted as this function will take care of any + // and all encryption-related matters. After the function returns, use the returned byte array as a normal state + // file. + DecryptState([]byte) ([]byte, error) + + // EncryptState encrypts a state file and returns the encrypted form. + // + // When implementing this function: + // + // The file should take a JSON-serialized state file as an input and encrypt it according to the configuration. + // The encrypted form should also return a JSON which contains, at least, the key specified in + // StateEncryptionMarkerField to identify the state file as encrypted. This is necessary because some backends + // expect a state file to always be a JSON file. + // + // If the user configured no encryption, this function should be a no-op and return the input unchanged. If the + // input is not a valid state file, this function should return an error in the diagnostics return. + // + // When using this function: + // + // Pass in a valid JSON-serialized state file as an input and store the output. Note that you should not pass the + // output to any additional functions that require a valid state file as it may not contain the fields typically + // present in a state file. + EncryptState([]byte) ([]byte, error) +} + +type stateEncryption struct { + base *baseEncryption +} + +func newStateEncryption(enc *encryption, target *config.TargetConfig, enforced bool, name string, staticEval *configs.StaticEvaluator) (StateEncryption, hcl.Diagnostics) { + base, diags := newBaseEncryption(enc, target, enforced, name, staticEval) + return &stateEncryption{base}, diags +} + +type statedata struct { + Serial *int `json:"serial"` + Lineage string `json:"lineage"` +} + +func (s *stateEncryption) EncryptState(plainState []byte) ([]byte, error) { + var passthrough statedata + err := json.Unmarshal(plainState, &passthrough) + if err != nil { + return nil, err + } + + return s.base.encrypt(plainState, func(base basedata) interface{} { + // Merge together the base encryption data and the passthrough fields + return struct { + statedata + basedata + }{ + statedata: passthrough, + basedata: base, + } + }) +} + +func (s *stateEncryption) DecryptState(encryptedState []byte) ([]byte, error) { + decryptedState, err := s.base.decrypt(encryptedState, func(data []byte) error { + tmp := struct { + FormatVersion string `json:"terraform_version"` + }{} + err := json.Unmarshal(data, &tmp) + if err != nil { + return err + } + if len(tmp.FormatVersion) == 0 { + // Not a state file + return fmt.Errorf("Given payload is not a state file") + } + // Probably a state file + return nil + }) + + if err != nil { + return nil, err + } + + // Make sure that the state passthrough fields match + var encrypted statedata + err = json.Unmarshal(encryptedState, &encrypted) + if err != nil { + return nil, err + } + var state statedata + err = json.Unmarshal(decryptedState, &state) + if err != nil { + return nil, err + } + + // TODO make encrypted.Serial non-optional. This is only for supporting alpha1 states! + if encrypted.Serial != nil && state.Serial != nil && *state.Serial != *encrypted.Serial { + return nil, fmt.Errorf("invalid state metadata, serial field mismatch %v vs %v", *encrypted.Serial, *state.Serial) + } + + // TODO make encrypted.Lineage non-optional. This is only for supporting alpha1 states! + if encrypted.Lineage != "" && state.Lineage != encrypted.Lineage { + return nil, fmt.Errorf("invalid state metadata, linage field mismatch %v vs %v", encrypted.Lineage, state.Lineage) + } + + return decryptedState, nil +} + +func StateEncryptionDisabled() StateEncryption { + return &stateDisabled{} +} + +type stateDisabled struct{} + +func (s *stateDisabled) EncryptState(plainState []byte) ([]byte, error) { + return plainState, nil +} +func (s *stateDisabled) DecryptState(encryptedState []byte) ([]byte, error) { + return encryptedState, nil +} diff --git a/pkg/encryption/targets.go b/pkg/encryption/targets.go new file mode 100644 index 00000000000..f48d91825c5 --- /dev/null +++ b/pkg/encryption/targets.go @@ -0,0 +1,128 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package encryption + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/gohcl" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/encryption/config" + "github.com/kubegems/opentofu/pkg/encryption/keyprovider" + "github.com/kubegems/opentofu/pkg/encryption/method" + "github.com/kubegems/opentofu/pkg/encryption/method/unencrypted" + "github.com/kubegems/opentofu/pkg/encryption/registry" + "github.com/zclconf/go-cty/cty" +) + +type targetBuilder struct { + cfg *config.EncryptionConfig + reg registry.Registry + + // Used to evaluate hcl expressions + ctx *hcl.EvalContext + + keyProviderMetadata map[keyprovider.Addr][]byte + + // Used to build EvalContext (and related mappings) + keyValues map[string]map[string]cty.Value + methodValues map[string]map[string]cty.Value + methods map[method.Addr]method.Method + staticEval *configs.StaticEvaluator +} + +func (base *baseEncryption) buildTargetMethods(meta map[keyprovider.Addr][]byte) ([]method.Method, hcl.Diagnostics) { + var diags hcl.Diagnostics + + builder := &targetBuilder{ + cfg: base.enc.cfg, + reg: base.enc.reg, + + staticEval: base.staticEval, + ctx: &hcl.EvalContext{ + Variables: map[string]cty.Value{}, + }, + + keyProviderMetadata: meta, + } + + keyDiags := append(diags, builder.setupKeyProviders()...) + diags = append(diags, keyDiags...) + if diags.HasErrors() { + return nil, diags + } + methodDiags := append(diags, builder.setupMethods()...) + diags = append(diags, methodDiags...) + if diags.HasErrors() { + return nil, diags + } + + methods, targetDiags := builder.build(base.target, base.name) + diags = append(diags, targetDiags...) + + if base.enforced { + for _, m := range methods { + if unencrypted.Is(m) { + return nil, append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unencrypted method is forbidden", + Detail: "Unable to use `unencrypted` method since the `enforced` flag is used.", + }) + } + } + } + + return methods, diags +} + +// build sets up a single target for encryption. It returns the primary and fallback methods for the target, as well +// as a list of diagnostics if the target is invalid. +// The targetName parameter is used for error messages only. +func (e *targetBuilder) build(target *config.TargetConfig, targetName string) (methods []method.Method, diags hcl.Diagnostics) { + + // gohcl has some weirdness around attributes that are not provided, but are hcl.Expressions + // They will set the attribute field to a static null expression + // https://github.com/hashicorp/hcl/blob/main/gohcl/decode.go#L112-L118 + + // Descriptor referenced by this target + var methodIdent *string + decodeDiags := gohcl.DecodeExpression(target.Method, e.ctx, &methodIdent) + diags = append(diags, decodeDiags...) + + // Only attempt to fetch the method if the decoding was successful + if !decodeDiags.HasErrors() { + if methodIdent != nil { + if method, ok := e.methods[method.Addr(*methodIdent)]; ok { + methods = append(methods, method) + } else { + // We can't continue if the method is not found + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Undefined encryption method", + Detail: fmt.Sprintf("Can not find %q for %q", *methodIdent, targetName), + Subject: target.Method.Range().Ptr(), + }) + } + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing encryption method", + Detail: fmt.Sprintf("undefined or null method used for %q", targetName), + Subject: target.Method.Range().Ptr(), + }) + } + } + + // Attempt to fetch the fallback method if it's been configured + if target.Fallback != nil { + fallback, fallbackDiags := e.build(target.Fallback, targetName+".fallback") + diags = append(diags, fallbackDiags...) + methods = append(methods, fallback...) + } + + return methods, diags +} diff --git a/pkg/encryption/targets_test.go b/pkg/encryption/targets_test.go new file mode 100644 index 00000000000..8d7764802f6 --- /dev/null +++ b/pkg/encryption/targets_test.go @@ -0,0 +1,242 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package encryption + +import ( + "reflect" + "testing" + + "github.com/hashicorp/hcl/v2" + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/encryption/config" + "github.com/kubegems/opentofu/pkg/encryption/keyprovider" + "github.com/kubegems/opentofu/pkg/encryption/keyprovider/static" + "github.com/kubegems/opentofu/pkg/encryption/method" + "github.com/kubegems/opentofu/pkg/encryption/method/aesgcm" + "github.com/kubegems/opentofu/pkg/encryption/method/unencrypted" + "github.com/kubegems/opentofu/pkg/encryption/registry" + "github.com/kubegems/opentofu/pkg/encryption/registry/lockingencryptionregistry" + "github.com/zclconf/go-cty/cty" +) + +func TestBaseEncryption_buildTargetMethods(t *testing.T) { + t.Parallel() + + tests := map[string]btmTestCase{ + "simple": { + rawConfig: ` + key_provider "static" "basic" { + key = "6f6f706830656f67686f6834616872756f3751756165686565796f6f72653169" + } + method "aes_gcm" "example" { + keys = key_provider.static.basic + } + state { + method = method.aes_gcm.example + } + `, + wantMethods: []func(method.Method) bool{ + aesgcm.Is, + }, + }, + "no-key-provider": { + rawConfig: ` + method "aes_gcm" "example" { + keys = key_provider.static.basic + } + state { + method = method.aes_gcm.example + } + `, + wantErr: `Test Config Source:3,25-32: Unsupported attribute; This object does not have an attribute named "static".`, + }, + "fallback": { + rawConfig: ` + key_provider "static" "basic" { + key = "6f6f706830656f67686f6834616872756f3751756165686565796f6f72653169" + } + method "aes_gcm" "example" { + keys = key_provider.static.basic + } + method "unencrypted" "example" { + } + state { + method = method.aes_gcm.example + fallback { + method = method.unencrypted.example + } + } + `, + wantMethods: []func(method.Method) bool{ + aesgcm.Is, + unencrypted.Is, + }, + }, + "enforced": { + rawConfig: ` + key_provider "static" "basic" { + key = "6f6f706830656f67686f6834616872756f3751756165686565796f6f72653169" + } + method "aes_gcm" "example" { + keys = key_provider.static.basic + } + method "unencrypted" "example" { + } + state { + enforced = true + method = method.aes_gcm.example + } + `, + wantMethods: []func(method.Method) bool{ + aesgcm.Is, + }, + }, + "enforced-with-unencrypted": { + rawConfig: ` + key_provider "static" "basic" { + key = "6f6f706830656f67686f6834616872756f3751756165686565796f6f72653169" + } + method "aes_gcm" "example" { + keys = key_provider.static.basic + } + method "unencrypted" "example" { + } + state { + enforced = true + method = method.aes_gcm.example + fallback { + method = method.unencrypted.example + } + } + `, + wantErr: ": Unencrypted method is forbidden; Unable to use `unencrypted` method since the `enforced` flag is used.", + }, + "key-from-vars": { + rawConfig: ` + key_provider "static" "basic" { + key = var.key + } + method "aes_gcm" "example" { + keys = key_provider.static.basic + } + state { + method = method.aes_gcm.example + } + `, + wantMethods: []func(method.Method) bool{ + aesgcm.Is, + }, + }, + "undefined-key-from-vars": { + rawConfig: ` + key_provider "static" "basic" { + key = var.undefinedkey + } + method "aes_gcm" "example" { + keys = key_provider.static.basic + } + state { + method = method.aes_gcm.example + } + `, + wantErr: "Test Config Source:3,12-28: Undefined variable; Undefined variable var.undefinedkey", + }, + } + + reg := lockingencryptionregistry.New() + if err := reg.RegisterKeyProvider(static.New()); err != nil { + panic(err) + } + if err := reg.RegisterMethod(aesgcm.New()); err != nil { + panic(err) + } + if err := reg.RegisterMethod(unencrypted.New()); err != nil { + panic(err) + } + + mod := &configs.Module{ + Variables: map[string]*configs.Variable{ + "key": { + Name: "key", + Default: cty.StringVal("6f6f706830656f67686f6834616872756f3751756165686565796f6f72653169"), + Type: cty.String, + }, + }, + } + + getVars := func(v *configs.Variable) (cty.Value, hcl.Diagnostics) { + return v.Default, nil + } + + modCall := configs.NewStaticModuleCall(addrs.RootModule, getVars, "", "") + + staticEval := configs.NewStaticEvaluator(mod, modCall) + + for name, test := range tests { + t.Run(name, test.newTestRun(reg, staticEval)) + } +} + +type btmTestCase struct { + rawConfig string // must contain state target + wantMethods []func(method.Method) bool + wantErr string +} + +func (testCase btmTestCase) newTestRun(reg registry.Registry, staticEval *configs.StaticEvaluator) func(t *testing.T) { + return func(t *testing.T) { + t.Parallel() + + cfg, diags := config.LoadConfigFromString("Test Config Source", testCase.rawConfig) + if diags.HasErrors() { + panic(diags.Error()) + } + + base := &baseEncryption{ + enc: &encryption{ + cfg: cfg, + reg: reg, + }, + target: cfg.State.AsTargetConfig(), + enforced: cfg.State.Enforced, + name: "test", + encMeta: make(map[keyprovider.Addr][]byte), + staticEval: staticEval, + } + + methods, diags := base.buildTargetMethods(base.encMeta) + + if diags.HasErrors() { + if !hasDiagWithMsg(diags, testCase.wantErr) { + t.Fatalf("Got unexpected error: %v", diags.Error()) + } + } + + if !diags.HasErrors() && testCase.wantErr != "" { + t.Fatalf("Expected error (got none): %v", testCase.wantErr) + } + + if len(methods) != len(testCase.wantMethods) { + t.Fatalf("Expected %d method(s), got %d", len(testCase.wantMethods), len(methods)) + } + + for i, m := range methods { + if !testCase.wantMethods[i](m) { + t.Fatalf("Got unexpected method: %v", reflect.TypeOf(m).String()) + } + } + } +} + +func hasDiagWithMsg(diags hcl.Diagnostics, msg string) bool { + for _, d := range diags { + if d.Error() == msg { + return true + } + } + return false +} diff --git a/pkg/experiments/doc.go b/pkg/experiments/doc.go new file mode 100644 index 00000000000..98eda35ceb9 --- /dev/null +++ b/pkg/experiments/doc.go @@ -0,0 +1,14 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package experiments contains the models and logic for opt-in experiments +// that can be activated for a particular OpenTofu module. +// +// We use experiments to get feedback on new configuration language features +// in a way that permits breaking changes without waiting for a future minor +// release. Any feature behind an experiment flag is subject to change in any +// way in even a patch release, until we have enough confidence about the +// design of the feature to make compatibility commitments about it. +package experiments diff --git a/pkg/experiments/errors.go b/pkg/experiments/errors.go new file mode 100644 index 00000000000..70d6be2d0c2 --- /dev/null +++ b/pkg/experiments/errors.go @@ -0,0 +1,31 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package experiments + +import ( + "fmt" +) + +// UnavailableError is the error type returned by GetCurrent when the requested +// experiment is not recognized at all. +type UnavailableError struct { + ExperimentName string +} + +func (e UnavailableError) Error() string { + return fmt.Sprintf("no current experiment is named %q", e.ExperimentName) +} + +// ConcludedError is the error type returned by GetCurrent when the requested +// experiment is recognized as concluded. +type ConcludedError struct { + ExperimentName string + Message string +} + +func (e ConcludedError) Error() string { + return fmt.Sprintf("experiment %q has concluded: %s", e.ExperimentName, e.Message) +} diff --git a/pkg/experiments/experiment.go b/pkg/experiments/experiment.go new file mode 100644 index 00000000000..9c575193470 --- /dev/null +++ b/pkg/experiments/experiment.go @@ -0,0 +1,107 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package experiments + +// Experiment represents a particular experiment, which can be activated +// independently of all other experiments. +type Experiment string + +// All active and defunct experiments must be represented by constants whose +// internal string values are unique. +// +// Each of these declared constants must also be registered as either a +// current or a defunct experiment in the init() function below. +// +// Each experiment is represented by a string that must be a valid HCL +// identifier so that it can be specified in configuration. +const ( + VariableValidation = Experiment("variable_validation") + ModuleVariableOptionalAttrs = Experiment("module_variable_optional_attrs") + SuppressProviderSensitiveAttrs = Experiment("provider_sensitive_attrs") + ConfigDrivenMove = Experiment("config_driven_move") + PreconditionsPostconditions = Experiment("preconditions_postconditions") +) + +func init() { + // Each experiment constant defined above must be registered here as either + // a current or a concluded experiment. + registerConcludedExperiment(VariableValidation, "Custom variable validation can now be used by default, without enabling an experiment.") + registerConcludedExperiment(SuppressProviderSensitiveAttrs, "Provider-defined sensitive attributes are now redacted by default, without enabling an experiment.") + registerConcludedExperiment(ConfigDrivenMove, "Declarations of moved resource instances using \"moved\" blocks can now be used by default, without enabling an experiment.") + registerConcludedExperiment(PreconditionsPostconditions, "Condition blocks can now be used by default, without enabling an experiment.") + registerConcludedExperiment(ModuleVariableOptionalAttrs, "The final feature corresponding to this experiment differs from the experimental form and is available in the OpenTofu language from OpenTofu v1.3.0 onwards.") +} + +// GetCurrent takes an experiment name and returns the experiment value +// representing that expression if and only if it is a current experiment. +// +// If the selected experiment is concluded, GetCurrent will return an +// error of type ConcludedError whose message hopefully includes some guidance +// for users of the experiment on how to migrate to a stable feature that +// succeeded it. +// +// If the selected experiment is not known at all, GetCurrent will return an +// error of type UnavailableError. +func GetCurrent(name string) (Experiment, error) { + exp := Experiment(name) + if currentExperiments.Has(exp) { + return exp, nil + } + + if msg, concluded := concludedExperiments[exp]; concluded { + return Experiment(""), ConcludedError{ExperimentName: name, Message: msg} + } + + return Experiment(""), UnavailableError{ExperimentName: name} +} + +// Keyword returns the keyword that would be used to activate this experiment +// in the configuration. +func (e Experiment) Keyword() string { + return string(e) +} + +// IsCurrent returns true if the receiver is considered a currently-selectable +// experiment. +func (e Experiment) IsCurrent() bool { + return currentExperiments.Has(e) +} + +// IsConcluded returns true if the receiver is a concluded experiment. +func (e Experiment) IsConcluded() bool { + _, exists := concludedExperiments[e] + return exists +} + +// currentExperiments are those which are available to activate in the current +// version of OpenTofu. +// +// Members of this set are registered in the init function above. +var currentExperiments = make(Set) + +// concludedExperiments are those which were available to activate in an earlier +// version of OpenTofu but are no longer available, either because the feature +// in question has been implemented or because the experiment failed and the +// feature was abandoned. Each experiment maps to a message describing the +// outcome, so we can give users feedback about what they might do in modules +// using concluded experiments. +// +// After an experiment has been concluded for a whole major release span it can +// be removed, since we expect users to perform upgrades one major release at +// at time without skipping and thus they will see the concludedness error +// message as they upgrade through a prior major version. +// +// Members of this map are registered in the init function above. +var concludedExperiments = make(map[Experiment]string) + +//lint:ignore U1000 No experiments are active +func registerCurrentExperiment(exp Experiment) { + currentExperiments.Add(exp) +} + +func registerConcludedExperiment(exp Experiment, message string) { + concludedExperiments[exp] = message +} diff --git a/pkg/experiments/set.go b/pkg/experiments/set.go new file mode 100644 index 00000000000..68f56b09827 --- /dev/null +++ b/pkg/experiments/set.go @@ -0,0 +1,51 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package experiments + +// Set is a collection of experiments where every experiment is either a member +// or not. +type Set map[Experiment]struct{} + +// NewSet constructs a new Set with the given experiments as its initial members. +func NewSet(exps ...Experiment) Set { + ret := make(Set) + for _, exp := range exps { + ret.Add(exp) + } + return ret +} + +// SetUnion constructs a new Set containing the members of all of the given +// sets. +func SetUnion(sets ...Set) Set { + ret := make(Set) + for _, set := range sets { + for exp := range set { + ret.Add(exp) + } + } + return ret +} + +// Add inserts the given experiment into the set. +// +// If the given experiment is already present then this is a no-op. +func (s Set) Add(exp Experiment) { + s[exp] = struct{}{} +} + +// Remove takes the given experiment out of the set. +// +// If the given experiment not already present then this is a no-op. +func (s Set) Remove(exp Experiment) { + delete(s, exp) +} + +// Has tests whether the given experiment is in the receiving set. +func (s Set) Has(exp Experiment) bool { + _, ok := s[exp] + return ok +} diff --git a/pkg/experiments/testing.go b/pkg/experiments/testing.go new file mode 100644 index 00000000000..888b461edff --- /dev/null +++ b/pkg/experiments/testing.go @@ -0,0 +1,38 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package experiments + +import ( + "testing" +) + +// OverrideForTesting temporarily overrides the global tables +// of experiments in order to allow for a predictable set when unit testing +// the experiments infrastructure code. +// +// The correct way to use this function is to defer a call to its result so +// that the original tables can be restored at the conclusion of the calling +// test: +// +// defer experiments.OverrideForTesting(t, current, concluded)() +// +// This function modifies global variables that are normally fixed throughout +// our execution, so this function must not be called from non-test code and +// any test using it cannot safely run concurrently with other tests. +func OverrideForTesting(t *testing.T, current Set, concluded map[Experiment]string) func() { + // We're not currently using the given *testing.T in here, but we're + // requiring it anyway in case we might need it in future, and because + // it hopefully reinforces that only test code should be calling this. + + realCurrents := currentExperiments + realConcludeds := concludedExperiments + currentExperiments = current + concludedExperiments = concluded + return func() { + currentExperiments = realCurrents + concludedExperiments = realConcludeds + } +} diff --git a/pkg/genconfig/doc.go b/pkg/genconfig/doc.go new file mode 100644 index 00000000000..10c9b6d389d --- /dev/null +++ b/pkg/genconfig/doc.go @@ -0,0 +1,7 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package genconfig implements config generation from provided state values. +package genconfig diff --git a/pkg/genconfig/generate_config.go b/pkg/genconfig/generate_config.go new file mode 100644 index 00000000000..e2d80bd5311 --- /dev/null +++ b/pkg/genconfig/generate_config.go @@ -0,0 +1,633 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package genconfig + +import ( + "errors" + "fmt" + "sort" + "strings" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclwrite" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/json" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// GenerateResourceContents generates HCL configuration code for the provided +// resource and state value. +// +// If you want to generate actual valid OpenTofu code you should follow this +// call up with a call to WrapResourceContents, which will place an OpenTofu +// resource header around the attributes and blocks returned by this function. +func GenerateResourceContents(addr addrs.AbsResourceInstance, + schema *configschema.Block, + pc addrs.LocalProviderConfig, + stateVal cty.Value) (string, tfdiags.Diagnostics) { + var buf strings.Builder + + var diags tfdiags.Diagnostics + + if pc.LocalName != addr.Resource.Resource.ImpliedProvider() || pc.Alias != "" { + buf.WriteString(strings.Repeat(" ", 2)) + buf.WriteString(fmt.Sprintf("provider = %s\n", pc.StringCompact())) + } + + stateVal = omitUnknowns(stateVal) + if stateVal.RawEquals(cty.NilVal) { + diags = diags.Append(writeConfigAttributes(addr, &buf, schema.Attributes, 2)) + diags = diags.Append(writeConfigBlocks(addr, &buf, schema.BlockTypes, 2)) + } else { + diags = diags.Append(writeConfigAttributesFromExisting(addr, &buf, stateVal, schema.Attributes, 2)) + diags = diags.Append(writeConfigBlocksFromExisting(addr, &buf, stateVal, schema.BlockTypes, 2)) + } + + // The output better be valid HCL which can be parsed and formatted. + formatted := hclwrite.Format([]byte(buf.String())) + return string(formatted), diags +} + +func WrapResourceContents(addr addrs.AbsResourceInstance, config string) string { + var buf strings.Builder + + buf.WriteString(fmt.Sprintf("resource %q %q {\n", addr.Resource.Resource.Type, addr.Resource.Resource.Name)) + buf.WriteString(config) + buf.WriteString("}") + + // The output better be valid HCL which can be parsed and formatted. + formatted := hclwrite.Format([]byte(buf.String())) + return string(formatted) +} + +func writeConfigAttributes(addr addrs.AbsResourceInstance, buf *strings.Builder, attrs map[string]*configschema.Attribute, indent int) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + if len(attrs) == 0 { + return diags + } + + // Get a list of sorted attribute names so the output will be consistent between runs. + keys := make([]string, 0, len(attrs)) + for k := range attrs { + keys = append(keys, k) + } + sort.Strings(keys) + + for i := range keys { + name := keys[i] + attrS := attrs[name] + if attrS.NestedType != nil { + diags = diags.Append(writeConfigNestedTypeAttribute(addr, buf, name, attrS, indent)) + continue + } + if attrS.Required { + buf.WriteString(strings.Repeat(" ", indent)) + buf.WriteString(fmt.Sprintf("%s = ", name)) + tok := hclwrite.TokensForValue(attrS.EmptyValue()) + if _, err := tok.WriteTo(buf); err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "Skipped part of config generation", + Detail: fmt.Sprintf("Could not create attribute %s in %s when generating import configuration. The plan will likely report the missing attribute as being deleted.", name, addr), + Extra: err, + }) + continue + } + writeAttrTypeConstraint(buf, attrS) + } else if attrS.Optional { + buf.WriteString(strings.Repeat(" ", indent)) + buf.WriteString(fmt.Sprintf("%s = ", name)) + tok := hclwrite.TokensForValue(attrS.EmptyValue()) + if _, err := tok.WriteTo(buf); err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "Skipped part of config generation", + Detail: fmt.Sprintf("Could not create attribute %s in %s when generating import configuration. The plan will likely report the missing attribute as being deleted.", name, addr), + Extra: err, + }) + continue + } + writeAttrTypeConstraint(buf, attrS) + } + } + return diags +} + +func writeConfigAttributesFromExisting(addr addrs.AbsResourceInstance, buf *strings.Builder, stateVal cty.Value, attrs map[string]*configschema.Attribute, indent int) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + if len(attrs) == 0 { + return diags + } + + // Get a list of sorted attribute names so the output will be consistent between runs. + keys := make([]string, 0, len(attrs)) + for k := range attrs { + keys = append(keys, k) + } + sort.Strings(keys) + + for i := range keys { + name := keys[i] + attrS := attrs[name] + if attrS.NestedType != nil { + writeConfigNestedTypeAttributeFromExisting(addr, buf, name, attrS, stateVal, indent) + continue + } + + // Exclude computed-only attributes + if attrS.Required || attrS.Optional { + buf.WriteString(strings.Repeat(" ", indent)) + buf.WriteString(fmt.Sprintf("%s = ", name)) + + var val cty.Value + if !stateVal.IsNull() && stateVal.Type().HasAttribute(name) { + val = stateVal.GetAttr(name) + } else { + val = attrS.EmptyValue() + } + if val.Type() == cty.String { + // SHAMELESS HACK: If we have "" for an optional value, assume + // it is actually null, due to the legacy SDK. + if !val.IsNull() && attrS.Optional && len(val.AsString()) == 0 { + val = attrS.EmptyValue() + } + } + if attrS.Sensitive || val.IsMarked() { + buf.WriteString("null # sensitive") + } else { + tok := tryWrapAsJsonEncodeFunctionCall(val) + if _, err := tok.WriteTo(buf); err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "Skipped part of config generation", + Detail: fmt.Sprintf("Could not create attribute %s in %s when generating import configuration. The plan will likely report the missing attribute as being deleted.", name, addr), + Extra: err, + }) + continue + } + } + + buf.WriteString("\n") + } + } + return diags +} + +func writeConfigBlocks(addr addrs.AbsResourceInstance, buf *strings.Builder, blocks map[string]*configschema.NestedBlock, indent int) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + if len(blocks) == 0 { + return diags + } + + // Get a list of sorted block names so the output will be consistent between runs. + names := make([]string, 0, len(blocks)) + for k := range blocks { + names = append(names, k) + } + sort.Strings(names) + + for i := range names { + name := names[i] + blockS := blocks[name] + diags = diags.Append(writeConfigNestedBlock(addr, buf, name, blockS, indent)) + } + return diags +} + +func writeConfigNestedBlock(addr addrs.AbsResourceInstance, buf *strings.Builder, name string, schema *configschema.NestedBlock, indent int) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + switch schema.Nesting { + case configschema.NestingSingle, configschema.NestingGroup: + buf.WriteString(strings.Repeat(" ", indent)) + buf.WriteString(fmt.Sprintf("%s {", name)) + writeBlockTypeConstraint(buf, schema) + diags = diags.Append(writeConfigAttributes(addr, buf, schema.Attributes, indent+2)) + diags = diags.Append(writeConfigBlocks(addr, buf, schema.BlockTypes, indent+2)) + buf.WriteString("}\n") + return diags + case configschema.NestingList, configschema.NestingSet: + buf.WriteString(strings.Repeat(" ", indent)) + buf.WriteString(fmt.Sprintf("%s {", name)) + writeBlockTypeConstraint(buf, schema) + diags = diags.Append(writeConfigAttributes(addr, buf, schema.Attributes, indent+2)) + diags = diags.Append(writeConfigBlocks(addr, buf, schema.BlockTypes, indent+2)) + buf.WriteString("}\n") + return diags + case configschema.NestingMap: + buf.WriteString(strings.Repeat(" ", indent)) + // we use an arbitrary placeholder key (block label) "key" + buf.WriteString(fmt.Sprintf("%s \"key\" {", name)) + writeBlockTypeConstraint(buf, schema) + diags = diags.Append(writeConfigAttributes(addr, buf, schema.Attributes, indent+2)) + diags = diags.Append(writeConfigBlocks(addr, buf, schema.BlockTypes, indent+2)) + buf.WriteString(strings.Repeat(" ", indent)) + buf.WriteString("}\n") + return diags + default: + // This should not happen, the above should be exhaustive. + panic(fmt.Errorf("unsupported NestingMode %s", schema.Nesting.String())) + } +} + +func writeConfigNestedTypeAttribute(addr addrs.AbsResourceInstance, buf *strings.Builder, name string, schema *configschema.Attribute, indent int) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + buf.WriteString(strings.Repeat(" ", indent)) + buf.WriteString(fmt.Sprintf("%s = ", name)) + + switch schema.NestedType.Nesting { + case configschema.NestingSingle: + buf.WriteString("{") + writeAttrTypeConstraint(buf, schema) + diags = diags.Append(writeConfigAttributes(addr, buf, schema.NestedType.Attributes, indent+2)) + buf.WriteString(strings.Repeat(" ", indent)) + buf.WriteString("}\n") + return diags + case configschema.NestingList, configschema.NestingSet: + buf.WriteString("[{") + writeAttrTypeConstraint(buf, schema) + diags = diags.Append(writeConfigAttributes(addr, buf, schema.NestedType.Attributes, indent+2)) + buf.WriteString(strings.Repeat(" ", indent)) + buf.WriteString("}]\n") + return diags + case configschema.NestingMap: + buf.WriteString("{") + writeAttrTypeConstraint(buf, schema) + buf.WriteString(strings.Repeat(" ", indent+2)) + // we use an arbitrary placeholder key "key" + buf.WriteString("key = {\n") + diags = diags.Append(writeConfigAttributes(addr, buf, schema.NestedType.Attributes, indent+4)) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString("}\n") + buf.WriteString(strings.Repeat(" ", indent)) + buf.WriteString("}\n") + return diags + default: + // This should not happen, the above should be exhaustive. + panic(fmt.Errorf("unsupported NestingMode %s", schema.NestedType.Nesting.String())) + } +} + +func writeConfigBlocksFromExisting(addr addrs.AbsResourceInstance, buf *strings.Builder, stateVal cty.Value, blocks map[string]*configschema.NestedBlock, indent int) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + if len(blocks) == 0 { + return diags + } + + // Get a list of sorted block names so the output will be consistent between runs. + names := make([]string, 0, len(blocks)) + for k := range blocks { + names = append(names, k) + } + sort.Strings(names) + + for _, name := range names { + blockS := blocks[name] + // This shouldn't happen in real usage; state always has all values (set + // to null as needed), but it protects against panics in tests (and any + // really weird and unlikely cases). + if !stateVal.Type().HasAttribute(name) { + continue + } + blockVal := stateVal.GetAttr(name) + diags = diags.Append(writeConfigNestedBlockFromExisting(addr, buf, name, blockS, blockVal, indent)) + } + + return diags +} + +func writeConfigNestedTypeAttributeFromExisting(addr addrs.AbsResourceInstance, buf *strings.Builder, name string, schema *configschema.Attribute, stateVal cty.Value, indent int) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + switch schema.NestedType.Nesting { + case configschema.NestingSingle: + if schema.Sensitive || stateVal.IsMarked() { + buf.WriteString(strings.Repeat(" ", indent)) + buf.WriteString(fmt.Sprintf("%s = {} # sensitive\n", name)) + return diags + } + + // This shouldn't happen in real usage; state always has all values (set + // to null as needed), but it protects against panics in tests (and any + // really weird and unlikely cases). + if !stateVal.Type().HasAttribute(name) { + return diags + } + nestedVal := stateVal.GetAttr(name) + + if nestedVal.IsNull() { + // There is a difference between a null object, and an object with + // no attributes. + buf.WriteString(strings.Repeat(" ", indent)) + buf.WriteString(fmt.Sprintf("%s = null\n", name)) + return diags + } + + buf.WriteString(strings.Repeat(" ", indent)) + buf.WriteString(fmt.Sprintf("%s = {\n", name)) + diags = diags.Append(writeConfigAttributesFromExisting(addr, buf, nestedVal, schema.NestedType.Attributes, indent+2)) + buf.WriteString("}\n") + return diags + + case configschema.NestingList, configschema.NestingSet: + + if schema.Sensitive || stateVal.IsMarked() { + buf.WriteString(strings.Repeat(" ", indent)) + buf.WriteString(fmt.Sprintf("%s = [] # sensitive\n", name)) + return diags + } + + listVals := ctyCollectionValues(stateVal.GetAttr(name)) + if listVals == nil { + // There is a difference between an empty list and a null list + buf.WriteString(strings.Repeat(" ", indent)) + buf.WriteString(fmt.Sprintf("%s = null\n", name)) + return diags + } + + buf.WriteString(strings.Repeat(" ", indent)) + buf.WriteString(fmt.Sprintf("%s = [\n", name)) + for i := range listVals { + buf.WriteString(strings.Repeat(" ", indent+2)) + + // The entire element is marked. + if listVals[i].IsMarked() { + buf.WriteString("{}, # sensitive\n") + continue + } + + buf.WriteString("{\n") + diags = diags.Append(writeConfigAttributesFromExisting(addr, buf, listVals[i], schema.NestedType.Attributes, indent+4)) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString("},\n") + } + buf.WriteString(strings.Repeat(" ", indent)) + buf.WriteString("]\n") + return diags + + case configschema.NestingMap: + if schema.Sensitive || stateVal.IsMarked() { + buf.WriteString(strings.Repeat(" ", indent)) + buf.WriteString(fmt.Sprintf("%s = {} # sensitive\n", name)) + return diags + } + + attr := stateVal.GetAttr(name) + if attr.IsNull() { + // There is a difference between an empty map and a null map. + buf.WriteString(strings.Repeat(" ", indent)) + buf.WriteString(fmt.Sprintf("%s = null\n", name)) + return diags + } + + vals := attr.AsValueMap() + keys := make([]string, 0, len(vals)) + for key := range vals { + keys = append(keys, key) + } + sort.Strings(keys) + + buf.WriteString(strings.Repeat(" ", indent)) + buf.WriteString(fmt.Sprintf("%s = {\n", name)) + for _, key := range keys { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(fmt.Sprintf("%s = {", key)) + + // This entire value is marked + if vals[key].IsMarked() { + buf.WriteString("} # sensitive\n") + continue + } + + buf.WriteString("\n") + diags = diags.Append(writeConfigAttributesFromExisting(addr, buf, vals[key], schema.NestedType.Attributes, indent+4)) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString("}\n") + } + buf.WriteString(strings.Repeat(" ", indent)) + buf.WriteString("}\n") + return diags + + default: + // This should not happen, the above should be exhaustive. + panic(fmt.Errorf("unsupported NestingMode %s", schema.NestedType.Nesting.String())) + } +} + +func writeConfigNestedBlockFromExisting(addr addrs.AbsResourceInstance, buf *strings.Builder, name string, schema *configschema.NestedBlock, stateVal cty.Value, indent int) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + switch schema.Nesting { + case configschema.NestingSingle, configschema.NestingGroup: + if stateVal.IsNull() { + return diags + } + buf.WriteString(strings.Repeat(" ", indent)) + buf.WriteString(fmt.Sprintf("%s {", name)) + + // If the entire value is marked, don't print any nested attributes + if stateVal.IsMarked() { + buf.WriteString("} # sensitive\n") + return diags + } + buf.WriteString("\n") + diags = diags.Append(writeConfigAttributesFromExisting(addr, buf, stateVal, schema.Attributes, indent+2)) + diags = diags.Append(writeConfigBlocksFromExisting(addr, buf, stateVal, schema.BlockTypes, indent+2)) + buf.WriteString("}\n") + return diags + case configschema.NestingList, configschema.NestingSet: + if stateVal.IsMarked() { + buf.WriteString(strings.Repeat(" ", indent)) + buf.WriteString(fmt.Sprintf("%s {} # sensitive\n", name)) + return diags + } + listVals := ctyCollectionValues(stateVal) + for i := range listVals { + buf.WriteString(strings.Repeat(" ", indent)) + buf.WriteString(fmt.Sprintf("%s {\n", name)) + diags = diags.Append(writeConfigAttributesFromExisting(addr, buf, listVals[i], schema.Attributes, indent+2)) + diags = diags.Append(writeConfigBlocksFromExisting(addr, buf, listVals[i], schema.BlockTypes, indent+2)) + buf.WriteString("}\n") + } + return diags + case configschema.NestingMap: + // If the entire value is marked, don't print any nested attributes + if stateVal.IsMarked() { + buf.WriteString(fmt.Sprintf("%s {} # sensitive\n", name)) + return diags + } + + vals := stateVal.AsValueMap() + keys := make([]string, 0, len(vals)) + for key := range vals { + keys = append(keys, key) + } + sort.Strings(keys) + for _, key := range keys { + buf.WriteString(strings.Repeat(" ", indent)) + buf.WriteString(fmt.Sprintf("%s %q {", name, key)) + // This entire map element is marked + if vals[key].IsMarked() { + buf.WriteString("} # sensitive\n") + return diags + } + buf.WriteString("\n") + diags = diags.Append(writeConfigAttributesFromExisting(addr, buf, vals[key], schema.Attributes, indent+2)) + diags = diags.Append(writeConfigBlocksFromExisting(addr, buf, vals[key], schema.BlockTypes, indent+2)) + buf.WriteString(strings.Repeat(" ", indent)) + buf.WriteString("}\n") + } + return diags + default: + // This should not happen, the above should be exhaustive. + panic(fmt.Errorf("unsupported NestingMode %s", schema.Nesting.String())) + } +} + +func writeAttrTypeConstraint(buf *strings.Builder, schema *configschema.Attribute) { + if schema.Required { + buf.WriteString(" # REQUIRED ") + } else { + buf.WriteString(" # OPTIONAL ") + } + + if schema.NestedType != nil { + buf.WriteString(fmt.Sprintf("%s\n", schema.NestedType.ImpliedType().FriendlyName())) + } else { + buf.WriteString(fmt.Sprintf("%s\n", schema.Type.FriendlyName())) + } +} + +func writeBlockTypeConstraint(buf *strings.Builder, schema *configschema.NestedBlock) { + if schema.MinItems > 0 { + buf.WriteString(" # REQUIRED block\n") + } else { + buf.WriteString(" # OPTIONAL block\n") + } +} + +// copied from command/format/diff +func ctyCollectionValues(val cty.Value) []cty.Value { + if !val.IsKnown() || val.IsNull() { + return nil + } + + var len int + if val.IsMarked() { + val, _ = val.Unmark() + len = val.LengthInt() + } else { + len = val.LengthInt() + } + + ret := make([]cty.Value, 0, len) + for it := val.ElementIterator(); it.Next(); { + _, value := it.Element() + ret = append(ret, value) + } + + return ret +} + +// omitUnknowns recursively walks the src cty.Value and returns a new cty.Value, +// omitting any unknowns. +// +// The result also normalizes some types: all sequence types are turned into +// tuple types and all mapping types are converted to object types, since we +// assume the result of this is just going to be serialized as JSON (and thus +// lose those distinctions) anyway. +func omitUnknowns(val cty.Value) cty.Value { + ty := val.Type() + switch { + case val.IsNull(): + return val + case !val.IsKnown(): + return cty.NilVal + case ty.IsPrimitiveType(): + return val + case ty.IsListType() || ty.IsTupleType() || ty.IsSetType(): + var vals []cty.Value + it := val.ElementIterator() + for it.Next() { + _, v := it.Element() + newVal := omitUnknowns(v) + if newVal != cty.NilVal { + vals = append(vals, newVal) + } else if newVal == cty.NilVal { + // element order is how we correlate unknownness, so we must + // replace unknowns with nulls + vals = append(vals, cty.NullVal(v.Type())) + } + } + // We use tuple types always here, because the work we did above + // may have caused the individual elements to have different types, + // and we're doing this work to produce JSON anyway and JSON marshalling + // represents all of these sequence types as an array. + return cty.TupleVal(vals) + case ty.IsMapType() || ty.IsObjectType(): + vals := make(map[string]cty.Value) + it := val.ElementIterator() + for it.Next() { + k, v := it.Element() + newVal := omitUnknowns(v) + if newVal != cty.NilVal { + vals[k.AsString()] = newVal + } + } + // We use object types always here, because the work we did above + // may have caused the individual elements to have different types, + // and we're doing this work to produce JSON anyway and JSON marshalling + // represents both of these mapping types as an object. + return cty.ObjectVal(vals) + default: + // Should never happen, since the above should cover all types + panic(fmt.Sprintf("omitUnknowns cannot handle %#v", val)) + } +} + +func tryWrapAsJsonEncodeFunctionCall(v cty.Value) hclwrite.Tokens { + tokens, err := wrapAsJSONEncodeFunctionCall(v) + if err != nil { + return hclwrite.TokensForValue(v) + } + return tokens +} + +func wrapAsJSONEncodeFunctionCall(v cty.Value) (hclwrite.Tokens, error) { + if v.IsNull() || v.Type() != cty.String || !v.IsKnown() { + return nil, errors.New("value cannot be treated as JSON string") + } + + s := []byte(strings.TrimSpace(v.AsString())) + if len(s) == 0 { + return nil, errors.New("empty value") + } + + if s[0] != '{' && s[0] != '[' { + return nil, errors.New("value is not a JSON object, nor a JSON array") + } + + t, err := json.ImpliedType(s) + if err != nil { + return nil, fmt.Errorf("cannot define implied cty type (possibly not a JSON string): %w", err) + } + + v, err = json.Unmarshal(s, t) + if err != nil { + return nil, fmt.Errorf("cannot unmarshal using implied type (possible not a JSON string): %w", err) + } + + tokens := hclwrite.TokensForFunctionCall("jsonencode", hclwrite.TokensForValue(v)) + + return tokens, nil +} diff --git a/pkg/genconfig/generate_config_test.go b/pkg/genconfig/generate_config_test.go new file mode 100644 index 00000000000..1dd9d43f7aa --- /dev/null +++ b/pkg/genconfig/generate_config_test.go @@ -0,0 +1,514 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package genconfig + +import ( + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs/configschema" +) + +func TestConfigGeneration(t *testing.T) { + tcs := map[string]struct { + schema *configschema.Block + addr addrs.AbsResourceInstance + provider addrs.LocalProviderConfig + value cty.Value + expected string + }{ + "simple_resource": { + schema: &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "list_block": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "nested_value": { + Type: cty.String, + Optional: true, + }, + }, + }, + Nesting: configschema.NestingSingle, + }, + }, + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "value": { + Type: cty.String, + Optional: true, + }, + }, + }, + addr: addrs.AbsResourceInstance{ + Module: nil, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "tfcoremock_simple_resource", + Name: "empty", + }, + Key: nil, + }, + }, + provider: addrs.LocalProviderConfig{ + LocalName: "tfcoremock", + }, + value: cty.NilVal, + expected: ` +resource "tfcoremock_simple_resource" "empty" { + value = null # OPTIONAL string + list_block { # OPTIONAL block + nested_value = null # OPTIONAL string + } +}`, + }, + "simple_resource_with_state": { + schema: &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "list_block": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "nested_value": { + Type: cty.String, + Optional: true, + }, + }, + }, + Nesting: configschema.NestingSingle, + }, + }, + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "value": { + Type: cty.String, + Optional: true, + }, + }, + }, + addr: addrs.AbsResourceInstance{ + Module: nil, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "tfcoremock_simple_resource", + Name: "empty", + }, + Key: nil, + }, + }, + provider: addrs.LocalProviderConfig{ + LocalName: "tfcoremock", + }, + value: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("D2320658"), + "value": cty.StringVal("Hello, world!"), + "list_block": cty.ObjectVal(map[string]cty.Value{ + "nested_value": cty.StringVal("Hello, solar system!"), + }), + }), + expected: ` +resource "tfcoremock_simple_resource" "empty" { + value = "Hello, world!" + list_block { + nested_value = "Hello, solar system!" + } +}`, + }, + "simple_resource_with_partial_state": { + schema: &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "list_block": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "nested_value": { + Type: cty.String, + Optional: true, + }, + }, + }, + Nesting: configschema.NestingSingle, + }, + }, + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "value": { + Type: cty.String, + Optional: true, + }, + }, + }, + addr: addrs.AbsResourceInstance{ + Module: nil, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "tfcoremock_simple_resource", + Name: "empty", + }, + Key: nil, + }, + }, + provider: addrs.LocalProviderConfig{ + LocalName: "tfcoremock", + }, + value: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("D2320658"), + "list_block": cty.ObjectVal(map[string]cty.Value{ + "nested_value": cty.StringVal("Hello, solar system!"), + }), + }), + expected: ` +resource "tfcoremock_simple_resource" "empty" { + value = null + list_block { + nested_value = "Hello, solar system!" + } +}`, + }, + "simple_resource_with_alternate_provider": { + schema: &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "list_block": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "nested_value": { + Type: cty.String, + Optional: true, + }, + }, + }, + Nesting: configschema.NestingSingle, + }, + }, + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "value": { + Type: cty.String, + Optional: true, + }, + }, + }, + addr: addrs.AbsResourceInstance{ + Module: nil, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "tfcoremock_simple_resource", + Name: "empty", + }, + Key: nil, + }, + }, + provider: addrs.LocalProviderConfig{ + LocalName: "mock", + }, + value: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("D2320658"), + "value": cty.StringVal("Hello, world!"), + "list_block": cty.ObjectVal(map[string]cty.Value{ + "nested_value": cty.StringVal("Hello, solar system!"), + }), + }), + expected: ` +resource "tfcoremock_simple_resource" "empty" { + provider = mock + value = "Hello, world!" + list_block { + nested_value = "Hello, solar system!" + } +}`, + }, + "simple_resource_with_aliased_provider": { + schema: &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "list_block": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "nested_value": { + Type: cty.String, + Optional: true, + }, + }, + }, + Nesting: configschema.NestingSingle, + }, + }, + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "value": { + Type: cty.String, + Optional: true, + }, + }, + }, + addr: addrs.AbsResourceInstance{ + Module: nil, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "tfcoremock_simple_resource", + Name: "empty", + }, + Key: nil, + }, + }, + provider: addrs.LocalProviderConfig{ + LocalName: "tfcoremock", + Alias: "alternate", + }, + value: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("D2320658"), + "value": cty.StringVal("Hello, world!"), + "list_block": cty.ObjectVal(map[string]cty.Value{ + "nested_value": cty.StringVal("Hello, solar system!"), + }), + }), + expected: ` +resource "tfcoremock_simple_resource" "empty" { + provider = tfcoremock.alternate + value = "Hello, world!" + list_block { + nested_value = "Hello, solar system!" + } +}`, + }, + "resource_with_nulls": { + schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "single": { + NestedType: &configschema.Object{ + Attributes: map[string]*configschema.Attribute{}, + Nesting: configschema.NestingSingle, + }, + Required: true, + }, + "list": { + NestedType: &configschema.Object{ + Attributes: map[string]*configschema.Attribute{ + "nested_id": { + Type: cty.String, + Optional: true, + }, + }, + Nesting: configschema.NestingList, + }, + Required: true, + }, + "map": { + NestedType: &configschema.Object{ + Attributes: map[string]*configschema.Attribute{ + "nested_id": { + Type: cty.String, + Optional: true, + }, + }, + Nesting: configschema.NestingMap, + }, + Required: true, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "nested_single": { + Nesting: configschema.NestingSingle, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "nested_id": { + Type: cty.String, + Optional: true, + }, + }, + }, + }, + // No configschema.NestingGroup example for this test, because this block type can never be null in state. + "nested_list": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "nested_id": { + Type: cty.String, + Optional: true, + }, + }, + }, + }, + "nested_set": { + Nesting: configschema.NestingSet, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "nested_id": { + Type: cty.String, + Optional: true, + }, + }, + }, + }, + "nested_map": { + Nesting: configschema.NestingMap, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "nested_id": { + Type: cty.String, + Optional: true, + }, + }, + }, + }, + }, + }, + addr: addrs.AbsResourceInstance{ + Module: nil, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "tfcoremock_simple_resource", + Name: "empty", + }, + Key: nil, + }, + }, + provider: addrs.LocalProviderConfig{ + LocalName: "tfcoremock", + }, + value: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("D2320658"), + "single": cty.NullVal(cty.Object(map[string]cty.Type{})), + "list": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ + "nested_id": cty.String, + }))), + "map": cty.NullVal(cty.Map(cty.Object(map[string]cty.Type{ + "nested_id": cty.String, + }))), + "nested_single": cty.NullVal(cty.Object(map[string]cty.Type{ + "nested_id": cty.String, + })), + "nested_list": cty.ListValEmpty(cty.Object(map[string]cty.Type{ + "nested_id": cty.String, + })), + "nested_set": cty.SetValEmpty(cty.Object(map[string]cty.Type{ + "nested_id": cty.String, + })), + "nested_map": cty.MapValEmpty(cty.Object(map[string]cty.Type{ + "nested_id": cty.String, + })), + }), + expected: ` +resource "tfcoremock_simple_resource" "empty" { + list = null + map = null + single = null +}`, + }, + "jsonencode_wrapping": { + schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "juststr": { + Type: cty.String, + Optional: true, + }, + "jsonobj": { + Type: cty.String, + Optional: true, + }, + "jsonarr": { + Type: cty.String, + Optional: true, + }, + "sensitivejsonobj": { + Type: cty.String, + Optional: true, + Sensitive: true, + }, + "secrets": { + Type: cty.Object(map[string]cty.Type{ + "main": cty.String, + "secondary": cty.String, + }), + Optional: true, + Sensitive: true, + }, + }, + }, + addr: addrs.AbsResourceInstance{ + Module: nil, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "tfcoremock_simple_resource", + Name: "example", + }, + Key: nil, + }, + }, + provider: addrs.LocalProviderConfig{ + LocalName: "tfcoremock", + }, + value: cty.ObjectVal(map[string]cty.Value{ + "juststr": cty.StringVal("{a=b}"), + "jsonobj": cty.StringVal(`{"SomeDate":"2012-10-17"}`), + "jsonarr": cty.StringVal(`[{"a": 1}]`), + "sensitivejsonobj": cty.StringVal(`{"SomePassword":"dontstealplease"}`), + "secrets": cty.ObjectVal(map[string]cty.Value{ + "main": cty.StringVal(`{"v":"mypass"}`), + "secondary": cty.StringVal(`{"v":"mybackup"}`), + }), + }), + expected: ` +resource "tfcoremock_simple_resource" "example" { + jsonarr = jsonencode([{ + a = 1 + }]) + jsonobj = jsonencode({ + SomeDate = "2012-10-17" + }) + juststr = "{a=b}" + secrets = null # sensitive + sensitivejsonobj = null # sensitive +}`, + }, + } + for name, tc := range tcs { + t.Run(name, func(t *testing.T) { + err := tc.schema.InternalValidate() + if err != nil { + t.Fatalf("schema failed InternalValidate: %s", err) + } + contents, diags := GenerateResourceContents(tc.addr, tc.schema, tc.provider, tc.value) + if len(diags) > 0 { + t.Errorf("expected no diagnostics but found %s", diags) + } + + got := WrapResourceContents(tc.addr, contents) + want := strings.TrimSpace(tc.expected) + if diff := cmp.Diff(got, want); len(diff) > 0 { + t.Errorf("got:\n%s\nwant:\n%s\ndiff:\n%s", got, want, diff) + } + }) + } +} diff --git a/pkg/genconfig/generate_config_write.go b/pkg/genconfig/generate_config_write.go new file mode 100644 index 00000000000..8d8922a315e --- /dev/null +++ b/pkg/genconfig/generate_config_write.go @@ -0,0 +1,84 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package genconfig + +import ( + "fmt" + "io" + "os" + + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +func ShouldWriteConfig(out string) bool { + // No specified out file, so don't write anything. + return len(out) != 0 +} + +func ValidateTargetFile(out string) (diags tfdiags.Diagnostics) { + if _, err := os.Stat(out); !os.IsNotExist(err) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Target generated file already exists", + "OpenTofu can only write generated config into a new file. Either choose a different target location or move all existing configuration out of the target file, delete it and try again.")) + + } + return diags +} + +type Change struct { + Addr string + ImportID string + GeneratedConfig string +} + +func (c *Change) MaybeWriteConfig(writer io.Writer, out string) (io.Writer, bool, tfdiags.Diagnostics) { + var wroteConfig bool + var diags tfdiags.Diagnostics + if len(c.GeneratedConfig) > 0 { + if writer == nil { + // Lazily create the generated file, in case we have no + // generated config to create. + if w, err := os.Create(out); err != nil { + if os.IsPermission(err) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to create target generated file", + fmt.Sprintf("OpenTofu did not have permission to create the generated file (%s) in the target directory. Please modify permissions over the target directory, and try again.", out))) + return nil, false, diags + } + + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to create target generated file", + fmt.Sprintf("OpenTofu could not create the generated file (%s) in the target directory: %v. Depending on the error message, this may be a bug in OpenTofu itself. If so, please report it!", out, err))) + return nil, false, diags + } else { + writer = w + } + + header := "# __generated__ by OpenTofu\n# Please review these resources and move them into your main configuration files.\n" + // Missing the header from the file, isn't the end of the world + // so if this did return an error, then we will just ignore it. + _, _ = writer.Write([]byte(header)) + } + + header := "\n# __generated__ by OpenTofu" + if len(c.ImportID) > 0 { + header += fmt.Sprintf(" from %q", c.ImportID) + } + header += "\n" + if _, err := writer.Write([]byte(fmt.Sprintf("%s%s\n", header, c.GeneratedConfig))); err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + "Failed to save generated config", + fmt.Sprintf("OpenTofu encountered an error while writing generated config: %v. The config for %s must be created manually before applying. Depending on the error message, this may be a bug in OpenTofu itself. If so, please report it!", err, c.Addr))) + } + wroteConfig = true + } + + return writer, wroteConfig, diags +} diff --git a/pkg/getmodules/doc.go b/pkg/getmodules/doc.go new file mode 100644 index 00000000000..dc1cfbac3a0 --- /dev/null +++ b/pkg/getmodules/doc.go @@ -0,0 +1,13 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package getmodules contains the low-level functionality for fetching +// remote module packages. It's essentially just a thin wrapper around +// go-getter. +// +// This package is only for remote module source addresses, not for local +// or registry source addresses. The other address types are handled +// elsewhere. +package getmodules diff --git a/pkg/getmodules/file_detector.go b/pkg/getmodules/file_detector.go new file mode 100644 index 00000000000..53120b6c937 --- /dev/null +++ b/pkg/getmodules/file_detector.go @@ -0,0 +1,70 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package getmodules + +import ( + "fmt" + "path/filepath" + "runtime" +) + +// fileDetector is a replacement for go-getter's own file detector which +// better meets OpenTofu's needs: specifically, it rejects relative filesystem +// paths with a somewhat-decent error message. +// +// This is a replacement for some historical hackery we did where we tried to +// avoid calling into go-getter altogether in this situation. This is, +// therefore, a copy of getter.FileDetector but with the "not absolute path" +// case replaced with a similar result as Terraform's old heuristic would've +// returned: a custom error type that the caller can react to in order to +// produce a hint error message if desired. +type fileDetector struct{} + +func (d *fileDetector) Detect(src, pwd string) (string, bool, error) { + if len(src) == 0 { + return "", false, nil + } + + if !filepath.IsAbs(src) { + return "", true, &MaybeRelativePathErr{src} + } + + return fmtFileURL(src), true, nil +} + +func fmtFileURL(path string) string { + if runtime.GOOS == "windows" { + // Make sure we're using "/" on Windows. URLs are "/"-based. + path = filepath.ToSlash(path) + return fmt.Sprintf("file://%s", path) + } + + // Make sure that we don't start with "/" since we add that below. + if path[0] == '/' { + path = path[1:] + } + return fmt.Sprintf("file:///%s", path) +} + +// MaybeRelativePathErr is the error type returned by NormalizePackageAddress +// if the source address looks like it might be intended to be a relative +// filesystem path but without the required "./" or "../" prefix. +// +// Specifically, NormalizePackageAddress will return a pointer to this type, +// so the error type will be *MaybeRelativePathErr. +// +// It has a name starting with "Maybe" because in practice we can get here +// with any string that isn't recognized as one of the supported schemes: +// treating the address as a local filesystem path is our fallback for when +// everything else fails, but it could just as easily be a typo in an attempt +// to use one of the other schemes and thus not a filesystem path at all. +type MaybeRelativePathErr struct { + Addr string +} + +func (e *MaybeRelativePathErr) Error() string { + return fmt.Sprintf("OpenTofu cannot detect a supported external module source type for %s", e.Addr) +} diff --git a/pkg/getmodules/getter.go b/pkg/getmodules/getter.go new file mode 100644 index 00000000000..fe29e1578ed --- /dev/null +++ b/pkg/getmodules/getter.go @@ -0,0 +1,169 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package getmodules + +import ( + "context" + "fmt" + "log" + "os" + + cleanhttp "github.com/hashicorp/go-cleanhttp" + getter "github.com/hashicorp/go-getter" + "github.com/kubegems/opentofu/pkg/copy" +) + +// We configure our own go-getter detector and getter sets here, because +// the set of sources we support is part of OpenTofu's documentation and +// so we don't want any new sources introduced in go-getter to sneak in here +// and work even though they aren't documented. This also insulates us from +// any meddling that might be done by other go-getter callers linked into our +// executable. +// +// Note that over time we've found go-getter's design to be not wholly fit +// for OpenTofu's purposes in various ways, and so we're continuing to use +// it here because our backward compatibility with earlier versions depends +// on it, but we use go-getter very carefully and always only indirectly via +// the public API of this package so that we can get the subset of the +// go-getter functionality we need while working around some of the less +// helpful parts of its design. See the comments in various other functions +// in this package which call into go-getter for more information on what +// tradeoffs we're making here. + +var goGetterDetectors = []getter.Detector{ + new(getter.GitHubDetector), + new(getter.GitDetector), + + // Because historically BitBucket supported both Git and Mercurial + // repositories but used the same repository URL syntax for both, + // this detector takes the unusual step of actually reaching out + // to the BitBucket API to recognize the repository type. That + // means there's the possibility of an outgoing network request + // inside what is otherwise normally just a local string manipulation + // operation, but we continue to accept this for now. + // + // Perhaps a future version of go-getter will remove the check now + // that BitBucket only supports Git anyway. Aside from this historical + // exception, we should avoid adding any new detectors that make network + // requests in here, and limit ourselves only to ones that can operate + // entirely through local string manipulation. + new(getter.BitBucketDetector), + + new(getter.GCSDetector), + new(getter.S3Detector), + new(fileDetector), +} + +var goGetterNoDetectors = []getter.Detector{} + +var goGetterDecompressors = map[string]getter.Decompressor{ + "bz2": new(getter.Bzip2Decompressor), + "gz": new(getter.GzipDecompressor), + "xz": new(getter.XzDecompressor), + "zip": new(getter.ZipDecompressor), + + "tar.bz2": new(getter.TarBzip2Decompressor), + "tar.tbz2": new(getter.TarBzip2Decompressor), + + "tar.gz": new(getter.TarGzipDecompressor), + "tgz": new(getter.TarGzipDecompressor), + + "tar.xz": new(getter.TarXzDecompressor), + "txz": new(getter.TarXzDecompressor), +} + +var goGetterGetters = map[string]getter.Getter{ + "file": new(getter.FileGetter), + "gcs": new(getter.GCSGetter), + "git": new(getter.GitGetter), + "hg": new(getter.HgGetter), + "s3": new(getter.S3Getter), + "http": getterHTTPGetter, + "https": getterHTTPGetter, +} + +var getterHTTPClient = cleanhttp.DefaultClient() + +var getterHTTPGetter = &getter.HttpGetter{ + Client: getterHTTPClient, + Netrc: true, + XTerraformGetLimit: 10, +} + +// A reusingGetter is a helper for the module installer that remembers +// the final resolved addresses of all of the sources it has already been +// asked to install, and will copy from a prior installation directory if +// it has the same resolved source address. +// +// The keys in a reusingGetter are the normalized (post-detection) package +// addresses, and the values are the paths where each source was previously +// installed. (Users of this map should treat the keys as addrs.ModulePackage +// values, but we can't type them that way because the addrs package +// imports getmodules in order to indirectly access our go-getter +// configuration.) +type reusingGetter map[string]string + +// getWithGoGetter fetches the package at the given address into the given +// target directory. The given address must already be in normalized form +// (using NormalizePackageAddress) or else the behavior is undefined. +// +// This function deals only in entire packages, so it's always the caller's +// responsibility to handle any subdirectory specification and select a +// suitable subdirectory of the given installation directory after installation +// has succeeded. +// +// This function would ideally accept packageAddr as a value of type +// addrs.ModulePackage, but we can't do that because the addrs package +// depends on this package for package address parsing. Therefore we just +// use a string here but assume that the caller got that value by calling +// the String method on a valid addrs.ModulePackage value. +// +// The errors returned by this function are those surfaced by the underlying +// go-getter library, which have very inconsistent quality as +// end-user-actionable error messages. At this time we do not have any +// reasonable way to improve these error messages at this layer because +// the underlying errors are not separately recognizable. +func (g reusingGetter) getWithGoGetter(ctx context.Context, instPath, packageAddr string) error { + var err error + + if prevDir, exists := g[packageAddr]; exists { + log.Printf("[TRACE] getmodules: copying previous install of %q from %s to %s", packageAddr, prevDir, instPath) + err := os.Mkdir(instPath, os.ModePerm) + if err != nil { + return fmt.Errorf("failed to create directory %s: %w", instPath, err) + } + err = copy.CopyDir(instPath, prevDir) + if err != nil { + return fmt.Errorf("failed to copy from %s to %s: %w", prevDir, instPath, err) + } + } else { + log.Printf("[TRACE] getmodules: fetching %q to %q", packageAddr, instPath) + client := getter.Client{ + Src: packageAddr, + Dst: instPath, + Pwd: instPath, + + Mode: getter.ClientModeDir, + + Detectors: goGetterNoDetectors, // our caller should've already done detection + Decompressors: goGetterDecompressors, + Getters: goGetterGetters, + Ctx: ctx, + } + err = client.Get() + if err != nil { + return err + } + // Remember where we installed this so we might reuse this directory + // on subsequent calls to avoid re-downloading. + g[packageAddr] = instPath + } + + // If we get down here then we've either downloaded the package or + // copied a previous tree we downloaded, and so either way we should + // have got the full module package structure written into instPath. + return nil +} diff --git a/pkg/getmodules/installer.go b/pkg/getmodules/installer.go new file mode 100644 index 00000000000..181b1261a8e --- /dev/null +++ b/pkg/getmodules/installer.go @@ -0,0 +1,49 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package getmodules + +import ( + "context" +) + +// PackageFetcher is a low-level utility for fetching remote module packages +// into local filesystem directories in preparation for use by higher-level +// module installer functionality implemented elsewhere. +// +// A PackageFetcher works only with entire module packages and never with +// the individual modules within a package. +// +// A particular PackageFetcher instance remembers the target directory of +// any successfully-installed package so that it can optimize future calls +// that have the same package address by copying the local directory tree, +// rather than fetching the package from its origin repeatedly. There is +// no way to reset this cache, so a particular PackageFetcher instance should +// live only for the duration of a single initialization process. +type PackageFetcher struct { + getter reusingGetter +} + +func NewPackageFetcher() *PackageFetcher { + return &PackageFetcher{ + getter: reusingGetter{}, + } +} + +// FetchPackage downloads or otherwise retrieves the filesystem inside the +// package at the given address into the given local installation directory. +// +// packageAddr must be formatted as if it were the result of an +// addrs.ModulePackage.String() call. It's only defined as a raw string here +// because the getmodules package can't import the addrs package due to +// that creating a package dependency cycle. +// +// PackageFetcher only works with entire packages. If the caller is processing +// a module source address which includes a subdirectory portion then the +// caller must resolve that itself, possibly with the help of the +// getmodules.SplitPackageSubdir and getmodules.ExpandSubdirGlobs functions. +func (f *PackageFetcher) FetchPackage(ctx context.Context, instDir string, packageAddr string) error { + return f.getter.getWithGoGetter(ctx, instDir, packageAddr) +} diff --git a/pkg/getmodules/package.go b/pkg/getmodules/package.go new file mode 100644 index 00000000000..4ea8558a192 --- /dev/null +++ b/pkg/getmodules/package.go @@ -0,0 +1,74 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package getmodules + +import ( + getter "github.com/hashicorp/go-getter" +) + +// NormalizePackageAddress uses the go-getter "detector" functionality in +// order to turn a user-supplied source address into a normalized address +// which always includes a prefix naming a protocol to fetch with and may +// also include a transformed/normalized version of the protocol-specific +// source address included afterward. +// +// This is part of the implementation of addrs.ParseModulePackage and of +// addrs.ParseModuleSource, so for most callers it'd be better to call +// one of those other functions instead. The addrs package can potentially +// perform other processing in addition to just the go-getter detection. +// +// Note that this function expects to recieve only a package address, not +// a full source address that might also include a subdirectory portion. +// The caller must trim off any subdirectory portion using +// getmodules.SplitPackageSubdir before calling this function, passing in +// just the packageAddr return value, or the result will be incorrect. +// +// The detectors in go-getter can potentially introduce their own +// package subdirectory portions. If that happens then this function will +// return the subdirectory portion as a non-empty subDir return value, +// which the caller must then use as a prefix for any subDir it already +// extracted from the user's given package address. +// +// Some of go-getter's detectors make outgoing HTTP requests, and so +// the behavior of this function may depend on the network connectivity +// of the system where OpenTofu is running. However, most of the getters +// we use are local-only, and so HTTP requests are only for some ambiguous +// edge-cases, such as the BitBucket detector which has a mechanism to +// detect whether to use Git or Mercurial, because earlier versions of +// BitBucket used to support both. +func NormalizePackageAddress(given string) (packageAddr, subDir string, err error) { + // Because we're passing go-getter no base directory here, the file + // detector will return an error if the user entered a relative filesystem + // path without a "../" or "./" prefix and thus ended up in here. + // + // go-getter's error message for that case is very poor, and so we'll + // try to heuristically detect that situation and return a better error + // message. + + // NOTE: We're passing an empty string to the "current working directory" + // here because that's only relevant for relative filesystem paths, + // but OpenTofu handles relative filesystem paths itself outside of + // go-getter and so it'd always be an error to pass one into here. + // go-getter's "file" detector returns an error if it encounters a + // relative path when the pwd argument is empty. + // + // (Absolute filesystem paths _are_ valid though, for annoying historical + // reasons, and we treat them as remote packages even though "downloading" + // them just means a recursive copy of the source directory tree.) + + result, err := getter.Detect(given, "", goGetterDetectors) + if err != nil { + // NOTE: go-getter's error messages are of very inconsistent quality + // and many are not suitable for an end-user audience, but they are all + // just strings and so we can't really do any sort of post-processing + // to improve them and thus we just accept some bad error messages for + // now. + return "", "", err + } + + packageAddr, subDir = SplitPackageSubdir(result) + return packageAddr, subDir, nil +} diff --git a/pkg/getmodules/subdir.go b/pkg/getmodules/subdir.go new file mode 100644 index 00000000000..e71e60c2d71 --- /dev/null +++ b/pkg/getmodules/subdir.go @@ -0,0 +1,62 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package getmodules + +import ( + "path" + + getter "github.com/hashicorp/go-getter" +) + +// SplitPackageSubdir detects whether the given address string has a +// subdirectory portion, and if so returns a non-empty subDir string +// along with the trimmed package address. +// +// If the given string doesn't have a subdirectory portion then it'll +// just be returned verbatim in packageAddr, with an empty subDir value. +// +// Although the rest of this package is focused only on direct remote +// module packages, this particular function and its companion +// ExpandSubdirGlobs are both also relevant for registry-based module +// addresses, because a registry translates such an address into a +// remote module package address and thus can contribute its own +// additions to the final subdirectory selection. +func SplitPackageSubdir(given string) (packageAddr, subDir string) { + // We delegate this mostly to go-getter, because older Terraform + // versions just used go-getter directly and so we need to preserve + // its various quirks for compatibility reasons. + // + // However, note that in OpenTofu we _always_ split off the subdirectory + // portion and handle it within OpenTofu-level code, _never_ passing + // a subdirectory portion down into go-getter's own Get function, because + // OpenTofu's ability to refer between local paths inside the same + // package depends on OpenTofu itself always being aware of where the + // package's root directory ended up on disk, and always needs the + // package installed wholesale. + packageAddr, subDir = getter.SourceDirSubdir(given) + if subDir != "" { + subDir = path.Clean(subDir) + } + return packageAddr, subDir +} + +// ExpandSubdirGlobs handles a subdir string that might contain glob syntax, +// turning it into a concrete subdirectory path by referring to the actual +// files on disk in the given directory which we assume contains the content +// of whichever package this is a subdirectory glob for. +// +// Subdir globs are used, for example, when a module registry wants to specify +// to select the contents of the single directory at the root of a conventional +// tar archive but it doesn't actually know the exact name of that directory. +// In that case it might specify a subdir of just "*", which this function +// will then expand into the single subdirectory found inside instDir, or +// return an error if the result would be ambiguous. +func ExpandSubdirGlobs(instDir string, subDir string) (string, error) { + // We just delegate this entirely to go-getter, because older Terraform + // versions just used go-getter directly and so we need to preserve + // its various quirks for compatibility reasons. + return getter.SubdirGlob(instDir, subDir) +} diff --git a/pkg/getproviders/didyoumean.go b/pkg/getproviders/didyoumean.go new file mode 100644 index 00000000000..e96ad74ccdd --- /dev/null +++ b/pkg/getproviders/didyoumean.go @@ -0,0 +1,267 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package getproviders + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "path" + + "github.com/hashicorp/go-retryablehttp" + svchost "github.com/hashicorp/terraform-svchost" + "github.com/kubegems/opentofu/pkg/addrs" +) + +// MissingProviderSuggestion takes a provider address that failed installation +// due to the remote registry reporting that it didn't exist, and attempts +// to find another provider that the user might have meant to select. +// +// If the result is equal to the given address then that indicates that there +// is no suggested alternative to offer, either because the function +// successfully determined there is no recorded alternative or because the +// lookup failed somehow. We don't consider a failure to find a suggestion +// as an installation failure, because the caller should already be reporting +// that the provider didn't exist anyway and this is only extra context for +// that error message. +// +// The result of this is a best effort, so any UI presenting it should be +// careful to give it only as a possibility and not necessarily a suitable +// replacement for the given provider. +// +// In practice today this function only knows how to suggest alternatives for +// "default" providers, which is to say ones that are in the hashicorp +// namespace in the OpenTofu registry. It will always return no result for +// any other provider. That might change in future if we introduce other ways +// to discover provider suggestions. +// +// If the given context is cancelled then this function might not return a +// renaming suggestion even if one would've been available for a completed +// request. +func MissingProviderSuggestion(ctx context.Context, addr addrs.Provider, source Source, reqs Requirements) addrs.Provider { + if !addrs.IsDefaultProvider(addr) { + return addr + } + + // Before possibly looking up legacy naming, see if the user has another provider + // named in their requirements that is of the same type, and offer that + // as a suggestion + for req := range reqs { + if req != addr && req.Type == addr.Type { + return req + } + } + + // Our strategy here, for a default provider, is to use the default + // registry's special API for looking up "legacy" providers and try looking + // for a legacy provider whose type name matches the type of the given + // provider. This should then find a suitable answer for any provider + // that was originally auto-installable in v0.12 and earlier but moved + // into a non-default namespace as part of introducing the hierarchical + // provider namespace. + // + // To achieve that, we need to find the direct registry client in + // particular from the given source, because that is the only Source + // implementation that can actually handle a legacy provider lookup. + regSource := findLegacyProviderLookupSource(addr.Hostname, source) + if regSource == nil { + // If there's no direct registry source in the installation config + // then we can't provide a renaming suggestion. + return addr + } + + defaultNS, redirectNS, err := regSource.lookupLegacyProviderNamespace(ctx, addr.Hostname, addr.Type) + if err != nil { + return addr + } + + switch { + case redirectNS != "": + return addrs.Provider{ + Hostname: addr.Hostname, + Namespace: redirectNS, + Type: addr.Type, + } + default: + return addrs.Provider{ + Hostname: addr.Hostname, + Namespace: defaultNS, + Type: addr.Type, + } + } +} + +// findLegacyProviderLookupSource tries to find a *RegistrySource that can talk +// to the given registry host in the given Source. It might be given directly, +// or it might be given indirectly via a MultiSource where the selector +// includes a wildcard for registry.opentofu.org. +// +// Returns nil if the given source does not have any configured way to talk +// directly to the given host. +// +// If the given source contains multiple sources that can talk to the given +// host directly, the first one in the sequence takes preference. In practice +// it's pointless to have two direct installation sources that match the same +// hostname anyway, so this shouldn't arise in normal use. +func findLegacyProviderLookupSource(host svchost.Hostname, source Source) *RegistrySource { + switch source := source.(type) { + + case *RegistrySource: + // Easy case: the source is a registry source directly, and so we'll + // just use it. + return source + + case *MemoizeSource: + // Also easy: the source is a memoize wrapper, so defer to its + // underlying source. + return findLegacyProviderLookupSource(host, source.underlying) + + case MultiSource: + // Trickier case: if it's a multisource then we need to scan over + // its selectors until we find one that is a *RegistrySource _and_ + // that is configured to accept arbitrary providers from the + // given hostname. + + // For our matching purposes we'll use an address that would not be + // valid as a real provider FQN and thus can only match a selector + // that has no filters at all or a selector that wildcards everything + // except the hostname, like "registry.opentofu.org/*/*" + matchAddr := addrs.Provider{ + Hostname: host, + // Other fields are intentionally left empty, to make this invalid + // as a specific provider address. + } + + for _, selector := range source { + // If this source has suitable matching patterns to install from + // the given hostname then we'll recursively search inside it + // for *RegistrySource objects. + if selector.CanHandleProvider(matchAddr) { + ret := findLegacyProviderLookupSource(host, selector.Source) + if ret != nil { + return ret + } + } + } + + // If we get here then there were no selectors that are both configured + // to handle modules from the given hostname and that are registry + // sources, so we fail. + return nil + + default: + // This source cannot be and cannot contain a *RegistrySource, so + // we fail. + return nil + } +} + +// lookupLegacyProviderNamespace is a special method available only on +// RegistrySource which can deal with legacy provider addresses that contain +// only a type and leave the namespace implied. +// +// It asks the registry at the given hostname to provide a default namespace +// for the given provider type, which can be combined with the given hostname +// and type name to produce a fully-qualified provider address. +// +// Not all unqualified type names can be resolved to a default namespace. If +// the request fails, this method returns an error describing the failure. +// +// This method exists only to allow compatibility with unqualified names +// in older configurations. New configurations should be written so as not to +// depend on it, and this fallback mechanism will likely be removed altogether +// in a future OpenTofu version. +func (s *RegistrySource) lookupLegacyProviderNamespace(ctx context.Context, hostname svchost.Hostname, typeName string) (string, string, error) { + client, err := s.registryClient(hostname) + if err != nil { + return "", "", err + } + return client.legacyProviderDefaultNamespace(ctx, typeName) +} + +// legacyProviderDefaultNamespace returns the raw address strings produced by +// the registry when asked about the given unqualified provider type name. +// The returned namespace string is taken verbatim from the registry's response. +// +// This method exists only to allow compatibility with unqualified names +// in older configurations. New configurations should be written so as not to +// depend on it. +func (c *registryClient) legacyProviderDefaultNamespace(ctx context.Context, typeName string) (string, string, error) { + endpointPath, err := url.Parse(path.Join("-", typeName, "versions")) + if err != nil { + // Should never happen because we're constructing this from + // already-validated components. + return "", "", err + } + endpointURL := c.baseURL.ResolveReference(endpointPath) + + req, err := retryablehttp.NewRequest("GET", endpointURL.String(), nil) + if err != nil { + return "", "", err + } + req = req.WithContext(ctx) + c.addHeadersToRequest(req.Request) + + // This is just to give us something to return in error messages. It's + // not a proper provider address. + placeholderProviderAddr := addrs.NewLegacyProvider(typeName) + + resp, err := c.httpClient.Do(req) + if err != nil { + return "", "", c.errQueryFailed(placeholderProviderAddr, err) + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK: + // Great! + case http.StatusNotFound: + return "", "", ErrProviderNotFound{ + Provider: placeholderProviderAddr, + } + case http.StatusUnauthorized, http.StatusForbidden: + return "", "", c.errUnauthorized(placeholderProviderAddr.Hostname) + default: + return "", "", c.errQueryFailed(placeholderProviderAddr, errors.New(resp.Status)) + } + + type ResponseBody struct { + Id string `json:"id"` + MovedTo string `json:"moved_to"` + } + var body ResponseBody + + dec := json.NewDecoder(resp.Body) + if err := dec.Decode(&body); err != nil { + return "", "", c.errQueryFailed(placeholderProviderAddr, err) + } + + provider, diags := addrs.ParseProviderSourceString(body.Id) + if diags.HasErrors() { + return "", "", fmt.Errorf("Error parsing provider ID from Registry: %s", diags.Err()) + } + + if provider.Type != typeName { + return "", "", fmt.Errorf("Registry returned provider with type %q, expected %q", provider.Type, typeName) + } + + var movedTo addrs.Provider + if body.MovedTo != "" { + movedTo, diags = addrs.ParseProviderSourceString(body.MovedTo) + if diags.HasErrors() { + return "", "", fmt.Errorf("Error parsing provider ID from Registry: %s", diags.Err()) + } + + if movedTo.Type != typeName { + return "", "", fmt.Errorf("Registry returned provider with type %q, expected %q", movedTo.Type, typeName) + } + } + + return provider.Namespace, movedTo.Namespace, nil +} diff --git a/pkg/getproviders/didyoumean_test.go b/pkg/getproviders/didyoumean_test.go new file mode 100644 index 00000000000..bcd4a8b729e --- /dev/null +++ b/pkg/getproviders/didyoumean_test.go @@ -0,0 +1,200 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package getproviders + +import ( + "context" + "testing" + + svchost "github.com/hashicorp/terraform-svchost" + "github.com/kubegems/opentofu/pkg/addrs" +) + +func TestMissingProviderSuggestion(t *testing.T) { + // Most of these test cases rely on specific "magic" provider addresses + // that are implemented by the fake registry source returned by + // testRegistrySource. Refer to that function for more details on how + // they work. + + t.Run("happy path", func(t *testing.T) { + ctx := context.Background() + source, _, close := testRegistrySource(t) + defer close() + + // testRegistrySource handles -/legacy as a valid legacy provider + // lookup mapping to legacycorp/legacy. + legacyAddr := addrs.NewDefaultProvider("legacy") + got := MissingProviderSuggestion( + ctx, + addrs.NewDefaultProvider("legacy"), + source, + Requirements{ + legacyAddr: MustParseVersionConstraints(">= 1.0.0"), + }, + ) + + want := addrs.Provider{ + Hostname: defaultRegistryHost, + Namespace: "legacycorp", + Type: "legacy", + } + if got != want { + t.Errorf("wrong result\ngot: %s\nwant: %s", got, want) + } + }) + t.Run("provider moved", func(t *testing.T) { + ctx := context.Background() + source, _, close := testRegistrySource(t) + defer close() + + // testRegistrySource handles -/moved as a valid legacy provider + // lookup mapping to hashicorp/moved but with an additional "redirect" + // to acme/moved. This mimics how for some providers there is both + // a copy under terraform-providers for v0.12 compatibility _and_ a + // copy in some other namespace for v0.13 or later to use. Our naming + // suggestions ignore the v0.12-compatible one and suggest the + // other one. + moved := addrs.NewDefaultProvider("moved") + want := addrs.Provider{ + Hostname: defaultRegistryHost, + Namespace: "acme", + Type: "moved", + } + + got := MissingProviderSuggestion( + ctx, + moved, + source, + Requirements{ + moved: MustParseVersionConstraints(">= 1.0.0"), + }, + ) + + if got != want { + t.Errorf("wrong result\ngot: %s\nwant: %s", got, want) + } + + // If a provider has moved, but there's provider requirements + // for something of the same type, we'll return that one + // and skip the legacy lookup process. In practice, + // hopefully this is also "acme" but it's "zcme" here to + // exercise the codepath + want2 := addrs.Provider{ + Hostname: defaultRegistryHost, + Namespace: "zcme", + Type: "moved", + } + got2 := MissingProviderSuggestion( + ctx, + moved, + source, + Requirements{ + moved: MustParseVersionConstraints(">= 1.0.0"), + want2: MustParseVersionConstraints(">= 1.0.0"), + }, + ) + + if got2 != want2 { + t.Errorf("wrong result\ngot: %s\nwant: %s", got2, want2) + } + }) + t.Run("invalid response", func(t *testing.T) { + ctx := context.Background() + source, _, close := testRegistrySource(t) + defer close() + + // testRegistrySource handles -/invalid by returning an invalid + // provider address, which MissingProviderSuggestion should reject + // and behave as if there was no suggestion available. + want := addrs.NewDefaultProvider("invalid") + got := MissingProviderSuggestion( + ctx, + want, + source, + Requirements{ + want: MustParseVersionConstraints(">= 1.0.0"), + }, + ) + if got != want { + t.Errorf("wrong result\ngot: %s\nwant: %s", got, want) + } + }) + t.Run("another registry", func(t *testing.T) { + ctx := context.Background() + source, _, close := testRegistrySource(t) + defer close() + + // Because this provider address isn't on registry.opentofu.org, + // MissingProviderSuggestion won't even attempt to make a suggestion + // for it. + want := addrs.Provider{ + Hostname: svchost.Hostname("example.com"), + Namespace: "whatever", + Type: "foo", + } + got := MissingProviderSuggestion( + ctx, + want, + source, + Requirements{ + want: MustParseVersionConstraints(">= 1.0.0"), + }, + ) + if got != want { + t.Errorf("wrong result\ngot: %s\nwant: %s", got, want) + } + }) + t.Run("another namespace", func(t *testing.T) { + ctx := context.Background() + source, _, close := testRegistrySource(t) + defer close() + + // Because this provider address isn't in + // registry.opentofu.org/hashicorp/..., MissingProviderSuggestion + // will provide the same addr since there's no alternative in Requirements + want := addrs.Provider{ + Hostname: defaultRegistryHost, + Namespace: "whatever", + Type: "foo", + } + got := MissingProviderSuggestion( + ctx, + want, + source, + Requirements{ + want: MustParseVersionConstraints(">= 1.0.0"), + }, + ) + if got != want { + t.Errorf("wrong result\ngot: %s\nwant: %s", got, want) + } + + // If there is a provider required that has the same type, + // but different namespace, we can suggest that + foo := addrs.Provider{ + Hostname: defaultRegistryHost, + Namespace: "hashicorp", + Type: "foo", + } + realFoo := addrs.Provider{ + Hostname: defaultRegistryHost, + Namespace: "acme", + Type: "foo", + } + got2 := MissingProviderSuggestion( + ctx, + foo, + source, + Requirements{ + foo: MustParseVersionConstraints(">= 1.0.0"), + realFoo: MustParseVersionConstraints(">= 1.0.0"), + }, + ) + if got2 != realFoo { + t.Errorf("wrong result\ngot: %s\nwant: %s", got2, realFoo) + } + }) +} diff --git a/pkg/getproviders/doc.go b/pkg/getproviders/doc.go new file mode 100644 index 00000000000..71c18eafeb6 --- /dev/null +++ b/pkg/getproviders/doc.go @@ -0,0 +1,16 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package getproviders is the lowest-level provider automatic installation +// functionality. It can answer questions about what providers and provider +// versions are available in a registry, and it can retrieve the URL for +// the distribution archive for a specific version of a specific provider +// targeting a particular platform. +// +// This package is not responsible for choosing the best version to install +// from a set of available versions, or for any signature verification of the +// archives it fetches. Callers will use this package in conjunction with other +// logic elsewhere in order to construct a full provider installer. +package getproviders diff --git a/pkg/getproviders/errors.go b/pkg/getproviders/errors.go new file mode 100644 index 00000000000..3e78b36f595 --- /dev/null +++ b/pkg/getproviders/errors.go @@ -0,0 +1,251 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package getproviders + +import ( + "fmt" + "net/url" + + svchost "github.com/hashicorp/terraform-svchost" + + "github.com/kubegems/opentofu/pkg/addrs" +) + +// ErrHostNoProviders is an error type used to indicate that a hostname given +// in a provider address does not support the provider registry protocol. +type ErrHostNoProviders struct { + Hostname svchost.Hostname + + // HasOtherVersionis set to true if the discovery process detected + // declarations of services named "providers" whose version numbers did not + // match any version supported by the current version of OpenTofu. + // + // If this is set, it's helpful to hint to the user in an error message + // that the provider host may be expecting an older or a newer version + // of OpenTofu, rather than that it isn't a provider registry host at all. + HasOtherVersion bool +} + +func (err ErrHostNoProviders) Error() string { + switch { + case err.HasOtherVersion: + return fmt.Sprintf("host %s does not support the provider registry protocol required by this OpenTofu version, but may be compatible with a different OpenTofu version", err.Hostname.ForDisplay()) + default: + return fmt.Sprintf("host %s does not offer a OpenTofu provider registry", err.Hostname.ForDisplay()) + } +} + +// ErrHostUnreachable is an error type used to indicate that a hostname +// given in a provider address did not resolve in DNS, did not respond to an +// HTTPS request for service discovery, or otherwise failed to correctly speak +// the service discovery protocol. +type ErrHostUnreachable struct { + Hostname svchost.Hostname + Wrapped error +} + +func (err ErrHostUnreachable) Error() string { + return fmt.Sprintf("could not connect to %s: %s", err.Hostname.ForDisplay(), err.Wrapped.Error()) +} + +// Unwrap returns the underlying error that occurred when trying to reach the +// indicated host. +func (err ErrHostUnreachable) Unwrap() error { + return err.Wrapped +} + +// ErrUnauthorized is an error type used to indicate that a hostname +// given in a provider address returned a "401 Unauthorized" or "403 Forbidden" +// error response when we tried to access it. +type ErrUnauthorized struct { + Hostname svchost.Hostname + + // HaveCredentials is true when the request that failed included some + // credentials, and thus it seems that those credentials were invalid. + // Conversely, HaveCredentials is false if the request did not include + // credentials at all, in which case it seems that credentials must be + // provided. + HaveCredentials bool +} + +func (err ErrUnauthorized) Error() string { + switch { + case err.HaveCredentials: + return fmt.Sprintf("host %s rejected the given authentication credentials", err.Hostname) + default: + return fmt.Sprintf("host %s requires authentication credentials", err.Hostname) + } +} + +// ErrProviderNotFound is an error type used to indicate that requested provider +// was not found in the source(s) included in the Description field. This can be +// used to produce user-friendly error messages. +type ErrProviderNotFound struct { + Provider addrs.Provider + Sources []string +} + +func (err ErrProviderNotFound) Error() string { + return fmt.Sprintf( + "provider %s was not found in any of the search locations", + err.Provider, + ) +} + +// ErrRegistryProviderNotKnown is an error type used to indicate that the hostname +// given in a provider address does appear to be a provider registry but that +// registry does not know about the given provider namespace or type. +// +// A caller serving requests from an end-user should recognize this error type +// and use it to produce user-friendly hints for common errors such as failing +// to specify an explicit source for a provider not in the default namespace +// (one not under registry.opentofu.org/hashicorp/). The default error message +// for this type is a direct description of the problem with no such hints, +// because we expect that the caller will have better context to decide what +// hints are appropriate, e.g. by looking at the configuration given by the +// user. +type ErrRegistryProviderNotKnown struct { + Provider addrs.Provider +} + +func (err ErrRegistryProviderNotKnown) Error() string { + return fmt.Sprintf( + "provider registry %s does not have a provider named %s", + err.Provider.Hostname.ForDisplay(), + err.Provider, + ) +} + +// ErrPlatformNotSupported is an error type used to indicate that a particular +// version of a provider isn't available for a particular target platform. +// +// This is returned when DownloadLocation encounters a 404 Not Found response +// from the underlying registry, because it presumes that a caller will only +// ask for the DownloadLocation for a version it already found the existence +// of via AvailableVersions. +type ErrPlatformNotSupported struct { + Provider addrs.Provider + Version Version + Platform Platform + + // MirrorURL, if non-nil, is the base URL of the mirror that serviced + // the request in place of the provider's origin registry. MirrorURL + // is nil for a direct query. + MirrorURL *url.URL +} + +func (err ErrPlatformNotSupported) Error() string { + if err.MirrorURL != nil { + return fmt.Sprintf( + "provider mirror %s does not have a package of %s %s for %s", + err.MirrorURL.String(), + err.Provider, + err.Version, + err.Platform, + ) + } + return fmt.Sprintf( + "provider %s %s is not available for %s", + err.Provider, + err.Version, + err.Platform, + ) +} + +// ErrProtocolNotSupported is an error type used to indicate that a particular +// version of a provider is not supported by the current version of OpenTofu. +// +// Specfically, this is returned when the version's plugin protocol is not supported. +// +// When available, the error will include a suggested version that can be displayed to +// the user. Otherwise it will return UnspecifiedVersion +type ErrProtocolNotSupported struct { + Provider addrs.Provider + Version Version + Suggestion Version +} + +func (err ErrProtocolNotSupported) Error() string { + return fmt.Sprintf( + "provider %s %s is not supported by this version of OpenTofu", + err.Provider, + err.Version, + ) +} + +// ErrQueryFailed is an error type used to indicate that the hostname given +// in a provider address does appear to be a provider registry but that when +// we queried it for metadata for the given provider the server returned an +// unexpected error. +// +// This is used for any error responses other than "Not Found", which would +// indicate the absense of a provider and is thus reported using +// ErrProviderNotKnown instead. +type ErrQueryFailed struct { + Provider addrs.Provider + Wrapped error + + // MirrorURL, if non-nil, is the base URL of the mirror that serviced + // the request in place of the provider's origin registry. MirrorURL + // is nil for a direct query. + MirrorURL *url.URL +} + +func (err ErrQueryFailed) Error() string { + if err.MirrorURL != nil { + return fmt.Sprintf( + "failed to query provider mirror %s for %s: %s", + err.MirrorURL.String(), + err.Provider.String(), + err.Wrapped.Error(), + ) + } + return fmt.Sprintf( + "could not query provider registry for %s: %s", + err.Provider.String(), + err.Wrapped.Error(), + ) +} + +// Unwrap returns the underlying error that occurred when trying to reach the +// indicated host. +func (err ErrQueryFailed) Unwrap() error { + return err.Wrapped +} + +// ErrRequestCanceled is an error type used to indicate that an operation +// failed due to being cancelled via the given context.Context object. +// +// This error type doesn't include information about what was cancelled, +// because the expected treatment of this error type is to quickly abort and +// exit with minimal ceremony. +type ErrRequestCanceled struct { +} + +func (err ErrRequestCanceled) Error() string { + return "request canceled" +} + +// ErrIsNotExist returns true if and only if the given error is one of the +// errors from this package that represents an affirmative response that a +// requested object does not exist. +// +// This is as opposed to errors indicating that the source is unavailable +// or misconfigured in some way, where we therefore cannot say for certain +// whether the requested object exists. +// +// If a caller needs to take a special action based on something not existing, +// such as falling back on some other source, use this function rather than +// direct type assertions so that the set of possible "not exist" errors can +// grow in future. +func ErrIsNotExist(err error) bool { + switch err.(type) { + case ErrProviderNotFound, ErrRegistryProviderNotKnown, ErrPlatformNotSupported: + return true + default: + return false + } +} diff --git a/pkg/getproviders/filesystem_mirror_source.go b/pkg/getproviders/filesystem_mirror_source.go new file mode 100644 index 00000000000..1446dbebc58 --- /dev/null +++ b/pkg/getproviders/filesystem_mirror_source.go @@ -0,0 +1,133 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package getproviders + +import ( + "context" + + "github.com/kubegems/opentofu/pkg/addrs" +) + +// FilesystemMirrorSource is a source that reads providers and their metadata +// from a directory prefix in the local filesystem. +type FilesystemMirrorSource struct { + baseDir string + + // allPackages caches the result of scanning the baseDir for all available + // packages on the first call that needs package availability information, + // to avoid re-scanning the filesystem on subsequent operations. + allPackages map[addrs.Provider]PackageMetaList +} + +var _ Source = (*FilesystemMirrorSource)(nil) + +// NewFilesystemMirrorSource constructs and returns a new filesystem-based +// mirror source with the given base directory. +func NewFilesystemMirrorSource(baseDir string) *FilesystemMirrorSource { + return &FilesystemMirrorSource{ + baseDir: baseDir, + } +} + +// AvailableVersions scans the directory structure under the source's base +// directory for locally-mirrored packages for the given provider, returning +// a list of version numbers for the providers it found. +func (s *FilesystemMirrorSource) AvailableVersions(ctx context.Context, provider addrs.Provider) (VersionList, Warnings, error) { + // s.allPackages is populated if scanAllVersions succeeds + err := s.scanAllVersions() + if err != nil { + return nil, nil, err + } + + // There might be multiple packages for a given version in the filesystem, + // but the contract here is to return distinct versions so we'll dedupe + // them first, then sort them, and then return them. + versionsMap := make(map[Version]struct{}) + for _, m := range s.allPackages[provider] { + versionsMap[m.Version] = struct{}{} + } + ret := make(VersionList, 0, len(versionsMap)) + for v := range versionsMap { + ret = append(ret, v) + } + ret.Sort() + return ret, nil, nil +} + +// PackageMeta checks to see if the source's base directory contains a +// local copy of the distribution package for the given provider version on +// the given target, and returns the metadata about it if so. +func (s *FilesystemMirrorSource) PackageMeta(ctx context.Context, provider addrs.Provider, version Version, target Platform) (PackageMeta, error) { + // s.allPackages is populated if scanAllVersions succeeds + err := s.scanAllVersions() + if err != nil { + return PackageMeta{}, err + } + + relevantPkgs := s.allPackages[provider].FilterProviderPlatformExactVersion(provider, target, version) + if len(relevantPkgs) == 0 { + // This is the local equivalent of a "404 Not Found" when retrieving + // a particular version from a registry or network mirror. Because + // the caller should've selected a version already found by + // AvailableVersions, the only discriminator that should fail here + // is the target platform, and so our error result assumes that, + // causing the caller to return an error like "This provider version is + // not compatible with aros_riscv". + return PackageMeta{}, ErrPlatformNotSupported{ + Provider: provider, + Version: version, + Platform: target, + } + } + + // It's possible that there could be multiple copies of the same package + // available in the filesystem, if e.g. there's both a packed and an + // unpacked variant. For now we assume that the decision between them + // is arbitrary and just take the first one in the result. + return relevantPkgs[0], nil +} + +// AllAvailablePackages scans the directory structure under the source's base +// directory for locally-mirrored packages for all providers, returning a map +// of the discovered packages with the fully-qualified provider names as +// keys. +// +// This is not an operation generally supported by all Source implementations, +// but the filesystem implementation offers it because we also use the +// filesystem mirror source directly to scan our auto-install plugin directory +// and in other automatic discovery situations. +func (s *FilesystemMirrorSource) AllAvailablePackages() (map[addrs.Provider]PackageMetaList, error) { + // s.allPackages is populated if scanAllVersions succeeds + err := s.scanAllVersions() + return s.allPackages, err +} + +func (s *FilesystemMirrorSource) scanAllVersions() error { + if s.allPackages != nil { + // we're distinguishing nil-ness from emptiness here so we can + // recognize when we've scanned the directory without errors, even + // if we found nothing during the scan. + return nil + } + + ret, err := SearchLocalDirectory(s.baseDir) + if err != nil { + return err + } + + // As noted above, we use an explicit empty map so we can distinguish a + // successful-but-empty result from a failure on future calls, so we'll + // make sure that's what we have before we assign it here. + if ret == nil { + ret = make(map[addrs.Provider]PackageMetaList) + } + s.allPackages = ret + return nil +} + +func (s *FilesystemMirrorSource) ForDisplay(provider addrs.Provider) string { + return s.baseDir +} diff --git a/pkg/getproviders/filesystem_mirror_source_test.go b/pkg/getproviders/filesystem_mirror_source_test.go new file mode 100644 index 00000000000..8adfb6f0367 --- /dev/null +++ b/pkg/getproviders/filesystem_mirror_source_test.go @@ -0,0 +1,207 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package getproviders + +import ( + "context" + "testing" + + "github.com/apparentlymart/go-versions/versions" + "github.com/google/go-cmp/cmp" + + svchost "github.com/hashicorp/terraform-svchost" + "github.com/kubegems/opentofu/pkg/addrs" +) + +func TestFilesystemMirrorSourceAllAvailablePackages(t *testing.T) { + source := NewFilesystemMirrorSource("testdata/filesystem-mirror") + got, err := source.AllAvailablePackages() + if err != nil { + t.Fatal(err) + } + + want := map[addrs.Provider]PackageMetaList{ + nullProvider: { + { + Provider: nullProvider, + Version: versions.MustParseVersion("2.0.0"), + TargetPlatform: Platform{"darwin", "amd64"}, + Filename: "terraform-provider-null_2.0.0_darwin_amd64.zip", + Location: PackageLocalDir("testdata/filesystem-mirror/registry.opentofu.org/hashicorp/null/2.0.0/darwin_amd64"), + }, + { + Provider: nullProvider, + Version: versions.MustParseVersion("2.0.0"), + TargetPlatform: Platform{"linux", "amd64"}, + Filename: "terraform-provider-null_2.0.0_linux_amd64.zip", + Location: PackageLocalDir("testdata/filesystem-mirror/registry.opentofu.org/hashicorp/null/2.0.0/linux_amd64"), + }, + { + Provider: nullProvider, + Version: versions.MustParseVersion("2.1.0"), + TargetPlatform: Platform{"linux", "amd64"}, + Filename: "terraform-provider-null_2.1.0_linux_amd64.zip", + Location: PackageLocalArchive("testdata/filesystem-mirror/registry.opentofu.org/hashicorp/null/terraform-provider-null_2.1.0_linux_amd64.zip"), + }, + { + Provider: nullProvider, + Version: versions.MustParseVersion("2.0.0"), + TargetPlatform: Platform{"windows", "amd64"}, + Filename: "terraform-provider-null_2.0.0_windows_amd64.zip", + Location: PackageLocalDir("testdata/filesystem-mirror/registry.opentofu.org/hashicorp/null/2.0.0/windows_amd64"), + }, + }, + randomBetaProvider: { + { + Provider: randomBetaProvider, + Version: versions.MustParseVersion("1.2.0"), + TargetPlatform: Platform{"linux", "amd64"}, + Filename: "terraform-provider-random-beta_1.2.0_linux_amd64.zip", + Location: PackageLocalDir("testdata/filesystem-mirror/registry.opentofu.org/hashicorp/random-beta/1.2.0/linux_amd64"), + }, + }, + randomProvider: { + { + Provider: randomProvider, + Version: versions.MustParseVersion("1.2.0"), + TargetPlatform: Platform{"linux", "amd64"}, + Filename: "terraform-provider-random_1.2.0_linux_amd64.zip", + Location: PackageLocalDir("testdata/filesystem-mirror/registry.opentofu.org/hashicorp/random/1.2.0/linux_amd64"), + }, + }, + + happycloudProvider: { + { + Provider: happycloudProvider, + Version: versions.MustParseVersion("0.1.0-alpha.2"), + TargetPlatform: Platform{"darwin", "amd64"}, + Filename: "terraform-provider-happycloud_0.1.0-alpha.2_darwin_amd64.zip", + Location: PackageLocalDir("testdata/filesystem-mirror/tfe.example.com/AwesomeCorp/happycloud/0.1.0-alpha.2/darwin_amd64"), + }, + }, + legacyProvider: { + { + Provider: legacyProvider, + Version: versions.MustParseVersion("1.0.0"), + TargetPlatform: Platform{"linux", "amd64"}, + Filename: "terraform-provider-legacy_1.0.0_linux_amd64.zip", + Location: PackageLocalDir("testdata/filesystem-mirror/registry.opentofu.org/-/legacy/1.0.0/linux_amd64"), + }, + }, + } + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("incorrect result\n%s", diff) + } +} + +// In this test the directory layout is invalid (missing the hostname +// subdirectory). The provider installer should ignore the invalid directory. +func TestFilesystemMirrorSourceAllAvailablePackages_invalid(t *testing.T) { + source := NewFilesystemMirrorSource("testdata/filesystem-mirror-invalid") + _, err := source.AllAvailablePackages() + if err != nil { + t.Fatal(err) + } +} + +func TestFilesystemMirrorSourceAvailableVersions(t *testing.T) { + source := NewFilesystemMirrorSource("testdata/filesystem-mirror") + got, _, err := source.AvailableVersions(context.Background(), nullProvider) + if err != nil { + t.Fatal(err) + } + + want := VersionList{ + versions.MustParseVersion("2.0.0"), + versions.MustParseVersion("2.1.0"), + } + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("incorrect result\n%s", diff) + } +} + +func TestFilesystemMirrorSourcePackageMeta(t *testing.T) { + t.Run("available platform", func(t *testing.T) { + source := NewFilesystemMirrorSource("testdata/filesystem-mirror") + got, err := source.PackageMeta( + context.Background(), + nullProvider, + versions.MustParseVersion("2.0.0"), + Platform{"linux", "amd64"}, + ) + if err != nil { + t.Fatal(err) + } + + want := PackageMeta{ + Provider: nullProvider, + Version: versions.MustParseVersion("2.0.0"), + TargetPlatform: Platform{"linux", "amd64"}, + Filename: "terraform-provider-null_2.0.0_linux_amd64.zip", + Location: PackageLocalDir("testdata/filesystem-mirror/registry.opentofu.org/hashicorp/null/2.0.0/linux_amd64"), + } + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("incorrect result\n%s", diff) + } + + if gotHashes := got.AcceptableHashes(); len(gotHashes) != 0 { + t.Errorf("wrong acceptable hashes\ngot: %#v\nwant: none", gotHashes) + } + }) + t.Run("unavailable platform", func(t *testing.T) { + source := NewFilesystemMirrorSource("testdata/filesystem-mirror") + // We'll request a version that does exist in the fixture directory, + // but for a platform that isn't supported. + _, err := source.PackageMeta( + context.Background(), + nullProvider, + versions.MustParseVersion("2.0.0"), + Platform{"nonexist", "nonexist"}, + ) + + if err == nil { + t.Fatalf("succeeded; want error") + } + + // This specific error type is important so callers can use it to + // generate an actionable error message e.g. by checking to see if + // _any_ versions of this provider support the given platform, or + // similar helpful hints. + wantErr := ErrPlatformNotSupported{ + Provider: nullProvider, + Version: versions.MustParseVersion("2.0.0"), + Platform: Platform{"nonexist", "nonexist"}, + } + if diff := cmp.Diff(wantErr, err); diff != "" { + t.Errorf("incorrect error\n%s", diff) + } + }) +} + +var nullProvider = addrs.Provider{ + Hostname: svchost.Hostname("registry.opentofu.org"), + Namespace: "hashicorp", + Type: "null", +} +var randomProvider = addrs.Provider{ + Hostname: svchost.Hostname("registry.opentofu.org"), + Namespace: "hashicorp", + Type: "random", +} +var randomBetaProvider = addrs.Provider{ + Hostname: svchost.Hostname("registry.opentofu.org"), + Namespace: "hashicorp", + Type: "random-beta", +} +var happycloudProvider = addrs.Provider{ + Hostname: svchost.Hostname("tfe.example.com"), + Namespace: "awesomecorp", + Type: "happycloud", +} +var legacyProvider = addrs.NewLegacyProvider("legacy") diff --git a/pkg/getproviders/filesystem_search.go b/pkg/getproviders/filesystem_search.go new file mode 100644 index 00000000000..2d30c021fd6 --- /dev/null +++ b/pkg/getproviders/filesystem_search.go @@ -0,0 +1,294 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package getproviders + +import ( + "fmt" + "log" + "os" + "path/filepath" + "strings" + + svchost "github.com/hashicorp/terraform-svchost" + + "github.com/kubegems/opentofu/pkg/addrs" +) + +// SearchLocalDirectory performs an immediate, one-off scan of the given base +// directory for provider plugins using the directory structure defined for +// FilesystemMirrorSource. +// +// This is separated to allow other callers, such as the provider plugin cache +// management in the "internal/providercache" package, to use the same +// directory structure conventions. +func SearchLocalDirectory(baseDir string) (map[addrs.Provider]PackageMetaList, error) { + ret := make(map[addrs.Provider]PackageMetaList) + + // We don't support symlinks at intermediate points inside the directory + // hierarchy because that could potentially cause our walk to get into + // an infinite loop, but as a measure of pragmatism we'll allow the + // top-level location itself to be a symlink, so that a user can + // potentially keep their plugins in a non-standard location but use a + // symlink to help OpenTofu find them anyway. + originalBaseDir := baseDir + if finalDir, err := filepath.EvalSymlinks(baseDir); err == nil { + if finalDir != filepath.Clean(baseDir) { + log.Printf("[TRACE] getproviders.SearchLocalDirectory: using %s instead of %s", finalDir, baseDir) + } + baseDir = finalDir + } else { + // We'll eat this particular error because if we're somehow able to + // find plugins via baseDir below anyway then we'd rather do that than + // hard fail, but we'll log it in case it's useful for diagnosing why + // discovery didn't produce the expected outcome. + log.Printf("[TRACE] getproviders.SearchLocalDirectory: failed to resolve symlinks for %s: %s", baseDir, err) + } + + err := filepath.Walk(baseDir, func(fullPath string, info os.FileInfo, err error) error { + if err != nil { + return fmt.Errorf("cannot search %s: %w", fullPath, err) + } + + // There are two valid directory structures that we support here... + // Unpacked: registry.opentofu.org/hashicorp/aws/2.0.0/linux_amd64 (a directory) + // Packed: registry.opentofu.org/hashicorp/aws/terraform-provider-aws_2.0.0_linux_amd64.zip (a file) + // + // Both of these give us enough information to identify the package + // metadata. + fsPath, err := filepath.Rel(baseDir, fullPath) + if err != nil { + // This should never happen because the filepath.Walk contract is + // for the paths to include the base path. + log.Printf("[TRACE] getproviders.SearchLocalDirectory: ignoring malformed path %q during walk: %s", fullPath, err) + return nil + } + relPath := filepath.ToSlash(fsPath) + parts := strings.Split(relPath, "/") + + if len(parts) < 3 { + // Likely a prefix of a valid path, so we'll ignore it and visit + // the full valid path on a later call. + + if (info.Mode() & os.ModeSymlink) != 0 { + // We don't allow symlinks for intermediate steps in the + // hierarchy because otherwise this walk would risk getting + // itself into an infinite loop, but if we do find one then + // we'll warn about it to help with debugging. + log.Printf("[WARN] Provider plugin search ignored symlink %s: only the base directory %s may be a symlink", fullPath, originalBaseDir) + } + + return nil + } + + hostnameGiven := parts[0] + namespace := parts[1] + typeName := parts[2] + + // validate each part + // The legacy provider namespace is a special case. + if namespace != addrs.LegacyProviderNamespace { + _, err = addrs.ParseProviderPart(namespace) + if err != nil { + log.Printf("[WARN] local provider path %q contains invalid namespace %q; ignoring", fullPath, namespace) + return nil + } + } + + _, err = addrs.ParseProviderPart(typeName) + if err != nil { + log.Printf("[WARN] local provider path %q contains invalid type %q; ignoring", fullPath, typeName) + return nil + } + + hostname, err := svchost.ForComparison(hostnameGiven) + if err != nil { + log.Printf("[WARN] local provider path %q contains invalid hostname %q; ignoring", fullPath, hostnameGiven) + return nil + } + var providerAddr addrs.Provider + if namespace == addrs.LegacyProviderNamespace { + if hostname != addrs.DefaultProviderRegistryHost { + log.Printf("[WARN] local provider path %q indicates a legacy provider not on the default registry host; ignoring", fullPath) + return nil + } + providerAddr = addrs.NewLegacyProvider(typeName) + } else { + providerAddr = addrs.NewProvider(hostname, namespace, typeName) + } + + // The "info" passed to our function is an Lstat result, so it might + // be referring to a symbolic link. We'll do a full "Stat" on it + // now to make sure we're making tests against the real underlying + // filesystem object below. + info, err = os.Stat(fullPath) + if err != nil { + log.Printf("[WARN] failed to read metadata about %s: %s", fullPath, err) + return nil + } + + switch len(parts) { + case 5: // Might be unpacked layout + if !info.IsDir() { + return nil // packed layout requires a directory + } + + versionStr := parts[3] + version, err := ParseVersion(versionStr) + if err != nil { + log.Printf("[WARN] ignoring local provider path %q with invalid version %q: %s", fullPath, versionStr, err) + return nil + } + + platformStr := parts[4] + platform, err := ParsePlatform(platformStr) + if err != nil { + log.Printf("[WARN] ignoring local provider path %q with invalid platform %q: %s", fullPath, platformStr, err) + return nil + } + + log.Printf("[TRACE] getproviders.SearchLocalDirectory: found %s v%s for %s at %s", providerAddr, version, platform, fullPath) + + meta := PackageMeta{ + Provider: providerAddr, + Version: version, + + // FIXME: How do we populate this? + ProtocolVersions: nil, + TargetPlatform: platform, + + // Because this is already unpacked, the filename is synthetic + // based on the standard naming scheme. + Filename: fmt.Sprintf("terraform-provider-%s_%s_%s.zip", providerAddr.Type, version, platform), + Location: PackageLocalDir(fullPath), + + // FIXME: What about the SHA256Sum field? As currently specified + // it's a hash of the zip file, but this thing is already + // unpacked and so we don't have the zip file to hash. + } + ret[providerAddr] = append(ret[providerAddr], meta) + + case 4: // Might be packed layout + if info.IsDir() { + return nil // packed layout requires a file + } + + filename := filepath.Base(fsPath) + // the filename components are matched case-insensitively, and + // the normalized form of them is in lowercase so we'll convert + // to lowercase for comparison here. (This normalizes only for case, + // because that is the primary constraint affecting compatibility + // between filesystem implementations on different platforms; + // filenames are expected to be pre-normalized and valid in other + // regards.) + normFilename := strings.ToLower(filename) + + // In the packed layout, the version number and target platform + // are derived from the package filename, but only if the + // filename has the expected prefix identifying it as a package + // for the provider in question, and the suffix identifying it + // as a zip file. + prefix := "terraform-provider-" + providerAddr.Type + "_" + const suffix = ".zip" + if !strings.HasPrefix(normFilename, prefix) { + log.Printf("[WARN] ignoring file %q as possible package for %s: filename lacks expected prefix %q", fsPath, providerAddr, prefix) + return nil + } + if !strings.HasSuffix(normFilename, suffix) { + log.Printf("[WARN] ignoring file %q as possible package for %s: filename lacks expected suffix %q", fsPath, providerAddr, suffix) + return nil + } + + // Extract the version and target part of the filename, which + // will look like "2.1.0_linux_amd64" + infoSlice := normFilename[len(prefix) : len(normFilename)-len(suffix)] + infoParts := strings.Split(infoSlice, "_") + if len(infoParts) < 3 { + log.Printf("[WARN] ignoring file %q as possible package for %s: filename does not include version number, target OS, and target architecture", fsPath, providerAddr) + return nil + } + + versionStr := infoParts[0] + version, err := ParseVersion(versionStr) + if err != nil { + log.Printf("[WARN] ignoring local provider path %q with invalid version %q: %s", fullPath, versionStr, err) + return nil + } + + // We'll reassemble this back into a single string just so we can + // easily re-use our existing parser and its normalization rules. + platformStr := infoParts[1] + "_" + infoParts[2] + platform, err := ParsePlatform(platformStr) + if err != nil { + log.Printf("[WARN] ignoring local provider path %q with invalid platform %q: %s", fullPath, platformStr, err) + return nil + } + + log.Printf("[TRACE] getproviders.SearchLocalDirectory: found %s v%s for %s at %s", providerAddr, version, platform, fullPath) + + meta := PackageMeta{ + Provider: providerAddr, + Version: version, + + // FIXME: How do we populate this? + ProtocolVersions: nil, + TargetPlatform: platform, + + // Because this is already unpacked, the filename is synthetic + // based on the standard naming scheme. + Filename: normFilename, // normalized filename, because this field says what it _should_ be called, not what it _is_ called + Location: PackageLocalArchive(fullPath), // non-normalized here, because this is the actual physical location + + // TODO: Also populate the SHA256Sum field. Skipping that + // for now because our initial uses of this result -- + // scanning already-installed providers in local directories, + // rather than explicit filesystem mirrors -- doesn't do + // any hash verification anyway, and this is consistent with + // the FIXME in the unpacked case above even though technically + // we _could_ populate SHA256Sum here right now. + } + ret[providerAddr] = append(ret[providerAddr], meta) + + } + + return nil + }) + if err != nil { + return nil, err + } + // Sort the results to be deterministic (aside from semver build metadata) + // and consistent with ordering from other functions. + for _, l := range ret { + l.Sort() + } + return ret, nil +} + +// UnpackedDirectoryPathForPackage is similar to +// PackageMeta.UnpackedDirectoryPath but makes its decision based on +// individually-passed provider address, version, and target platform so that +// it can be used by callers outside this package that may have other +// types that represent package identifiers. +func UnpackedDirectoryPathForPackage(baseDir string, provider addrs.Provider, version Version, platform Platform) string { + return filepath.ToSlash(filepath.Join( + baseDir, + provider.Hostname.ForDisplay(), provider.Namespace, provider.Type, + version.String(), + platform.String(), + )) +} + +// PackedFilePathForPackage is similar to +// PackageMeta.PackedFilePath but makes its decision based on +// individually-passed provider address, version, and target platform so that +// it can be used by callers outside this package that may have other +// types that represent package identifiers. +func PackedFilePathForPackage(baseDir string, provider addrs.Provider, version Version, platform Platform) string { + return filepath.ToSlash(filepath.Join( + baseDir, + provider.Hostname.ForDisplay(), provider.Namespace, provider.Type, + fmt.Sprintf("terraform-provider-%s_%s_%s.zip", provider.Type, version.String(), platform.String()), + )) +} diff --git a/pkg/getproviders/filesystem_search_test.go b/pkg/getproviders/filesystem_search_test.go new file mode 100644 index 00000000000..eb666db3db2 --- /dev/null +++ b/pkg/getproviders/filesystem_search_test.go @@ -0,0 +1,57 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package getproviders + +import ( + "path/filepath" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/kubegems/opentofu/pkg/addrs" +) + +func TestSearchLocalDirectory(t *testing.T) { + tests := []struct { + Fixture string + Subdir string + Want map[addrs.Provider]PackageMetaList + }{ + { + "symlinks", + "symlink", + map[addrs.Provider]PackageMetaList{ + addrs.MustParseProviderSourceString("example.com/foo/bar"): { + { + Provider: addrs.MustParseProviderSourceString("example.com/foo/bar"), + Version: MustParseVersion("1.0.0"), + TargetPlatform: Platform{OS: "linux", Arch: "amd64"}, + Filename: "terraform-provider-bar_1.0.0_linux_amd64.zip", + Location: PackageLocalDir("testdata/search-local-directory/symlinks/real/example.com/foo/bar/1.0.0/linux_amd64"), + }, + }, + // This search doesn't find example.net/foo/bar because only + // the top-level search directory is supported as being a + // symlink, and so we ignore the example.net symlink to + // example.com that is one level deeper. + }, + }, + } + + for _, test := range tests { + t.Run(test.Fixture, func(t *testing.T) { + fullDir := filepath.Join("testdata/search-local-directory", test.Fixture, test.Subdir) + got, err := SearchLocalDirectory(fullDir) + if err != nil { + t.Errorf("unexpected error: %s", err) + } + want := test.Want + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + } +} diff --git a/pkg/getproviders/hanging_source.go b/pkg/getproviders/hanging_source.go new file mode 100644 index 00000000000..86eb4075a55 --- /dev/null +++ b/pkg/getproviders/hanging_source.go @@ -0,0 +1,34 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package getproviders + +import ( + "context" + + "github.com/kubegems/opentofu/pkg/addrs" +) + +// HangingSource is an implementation of Source which hangs until the given +// context is cancelled. This is useful only for unit tests of user-controlled +// cancels. +type HangingSource struct { +} + +var _ Source = (*HangingSource)(nil) + +func (s *HangingSource) AvailableVersions(ctx context.Context, provider addrs.Provider) (VersionList, Warnings, error) { + <-ctx.Done() + return nil, nil, nil +} + +func (s *HangingSource) PackageMeta(ctx context.Context, provider addrs.Provider, version Version, target Platform) (PackageMeta, error) { + <-ctx.Done() + return PackageMeta{}, nil +} + +func (s *HangingSource) ForDisplay(provider addrs.Provider) string { + return "hanging source" +} diff --git a/pkg/getproviders/hash.go b/pkg/getproviders/hash.go new file mode 100644 index 00000000000..b401774fbe1 --- /dev/null +++ b/pkg/getproviders/hash.go @@ -0,0 +1,456 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package getproviders + +import ( + "crypto/sha256" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "golang.org/x/mod/sumdb/dirhash" +) + +// Hash is a specially-formatted string representing a checksum of a package +// or the contents of the package. +// +// A Hash string is always starts with a scheme, which is a short series of +// alphanumeric characters followed by a colon, and then the remainder of the +// string has a different meaning depending on the scheme prefix. +// +// The currently-valid schemes are defined as the constants of type HashScheme +// in this package. +// +// Callers outside of this package must not create Hash values via direct +// conversion. Instead, use either the HashScheme.New method on one of the +// HashScheme contents (for a hash of a particular scheme) or the ParseHash +// function (if hashes of any scheme are acceptable). +type Hash string + +// NilHash is the zero value of Hash. It isn't a valid hash, so all of its +// methods will panic. +const NilHash = Hash("") + +// ParseHash parses the string representation of a Hash into a Hash value. +// +// A particular version of OpenTofu only supports a fixed set of hash schemes, +// but this function intentionally allows unrecognized schemes so that we can +// silently ignore other schemes that may be introduced in the future. For +// that reason, the Scheme method of the returned Hash may return a value that +// isn't in one of the HashScheme constants in this package. +// +// This function doesn't verify that the value portion of the given hash makes +// sense for the given scheme. Invalid values are just considered to not match +// any packages. +// +// If this function returns an error then the returned Hash is invalid and +// must not be used. +func ParseHash(s string) (Hash, error) { + colon := strings.Index(s, ":") + if colon < 1 { // 1 because a zero-length scheme is not allowed + return NilHash, fmt.Errorf("hash string must start with a scheme keyword followed by a colon") + } + return Hash(s), nil +} + +// MustParseHash is a wrapper around ParseHash that panics if it returns an +// error. +func MustParseHash(s string) Hash { + hash, err := ParseHash(s) + if err != nil { + panic(err.Error()) + } + return hash +} + +// Scheme returns the scheme of the recieving hash. If the receiver is not +// using valid syntax then this method will panic. +func (h Hash) Scheme() HashScheme { + colon := strings.Index(string(h), ":") + if colon < 0 { + panic(fmt.Sprintf("invalid hash string %q", h)) + } + return HashScheme(h[:colon+1]) +} + +// HasScheme returns true if the given scheme matches the receiver's scheme, +// or false otherwise. +// +// If the receiver is not using valid syntax then this method will panic. +func (h Hash) HasScheme(want HashScheme) bool { + return h.Scheme() == want +} + +// Value returns the scheme-specific value from the recieving hash. The +// meaning of this value depends on the scheme. +// +// If the receiver is not using valid syntax then this method will panic. +func (h Hash) Value() string { + colon := strings.Index(string(h), ":") + if colon < 0 { + panic(fmt.Sprintf("invalid hash string %q", h)) + } + return string(h[colon+1:]) +} + +// String returns a string representation of the receiving hash. +func (h Hash) String() string { + return string(h) +} + +// GoString returns a Go syntax representation of the receiving hash. +// +// This is here primarily to help with producing descriptive test failure +// output; these results are not particularly useful at runtime. +func (h Hash) GoString() string { + if h == NilHash { + return "getproviders.NilHash" + } + switch scheme := h.Scheme(); scheme { + case HashScheme1: + return fmt.Sprintf("getproviders.HashScheme1.New(%q)", h.Value()) + case HashSchemeZip: + return fmt.Sprintf("getproviders.HashSchemeZip.New(%q)", h.Value()) + default: + // This fallback is for when we encounter lock files or API responses + // with hash schemes that the current version of OpenTofu isn't + // familiar with. They were presumably introduced in a later version. + return fmt.Sprintf("getproviders.HashScheme(%q).New(%q)", scheme, h.Value()) + } +} + +// HashScheme is an enumeration of schemes that are allowed for values of type +// Hash. +type HashScheme string + +const ( + // HashScheme1 is the scheme identifier for the first hash scheme. + // + // Use HashV1 (or one of its wrapper functions) to calculate hashes with + // this scheme. + HashScheme1 HashScheme = HashScheme("h1:") + + // HashSchemeZip is the scheme identifier for the legacy hash scheme that + // applies to distribution archives (.zip files) rather than package + // contents, and can therefore only be verified against the original + // distribution .zip file, not an extracted directory. + // + // Use PackageHashLegacyZipSHA to calculate hashes with this scheme. + HashSchemeZip HashScheme = HashScheme("zh:") +) + +// New creates a new Hash value with the receiver as its scheme and the given +// raw string as its value. +// +// It's the caller's responsibility to make sure that the given value makes +// sense for the selected scheme. +func (hs HashScheme) New(value string) Hash { + return Hash(string(hs) + value) +} + +// PackageHash computes a hash of the contents of the package at the given +// location, using whichever hash algorithm is the current default. +// +// Currently, this method returns version 1 hashes as produced by the +// function PackageHashV1, but this function may switch to other versions in +// later releases. Call PackageHashV1 directly if you specifically need a V1 +// hash. +// +// PackageHash can be used only with the two local package location types +// PackageLocalDir and PackageLocalArchive, because it needs to access the +// contents of the indicated package in order to compute the hash. If given +// a non-local location this function will always return an error. +func PackageHash(loc PackageLocation) (Hash, error) { + return PackageHashV1(loc) +} + +// PackageMatchesHash returns true if the package at the given location matches +// the given hash, or false otherwise. +// +// If it cannot read from the given location, or if the given hash is in an +// unsupported format, PackageMatchesHash returns an error. +// +// There is currently only one hash format, as implemented by HashV1. However, +// if others are introduced in future PackageMatchesHash may accept multiple +// formats, and may generate errors for any formats that become obsolete. +// +// PackageMatchesHash can be used only with the two local package location types +// PackageLocalDir and PackageLocalArchive, because it needs to access the +// contents of the indicated package in order to compute the hash. If given +// a non-local location this function will always return an error. +func PackageMatchesHash(loc PackageLocation, want Hash) (bool, error) { + switch want.Scheme() { + case HashScheme1: + got, err := PackageHashV1(loc) + if err != nil { + return false, err + } + return got == want, nil + case HashSchemeZip: + archiveLoc, ok := loc.(PackageLocalArchive) + if !ok { + return false, fmt.Errorf(`ziphash scheme ("zh:" prefix) is not supported for unpacked provider packages`) + } + got, err := PackageHashLegacyZipSHA(archiveLoc) + if err != nil { + return false, err + } + return got == want, nil + default: + return false, fmt.Errorf("unsupported hash format (this may require a newer version of OpenTofu)") + } +} + +// PackageMatchesAnyHash returns true if the package at the given location +// matches at least one of the given hashes, or false otherwise. +// +// If it cannot read from the given location, PackageMatchesAnyHash returns an +// error. Unlike the singular PackageMatchesHash, PackageMatchesAnyHash +// considers unsupported hash formats as successfully non-matching, rather +// than returning an error. +// +// PackageMatchesAnyHash can be used only with the two local package location +// types PackageLocalDir and PackageLocalArchive, because it needs to access the +// contents of the indicated package in order to compute the hash. If given +// a non-local location this function will always return an error. +func PackageMatchesAnyHash(loc PackageLocation, allowed []Hash) (bool, error) { + // It's likely that we'll have multiple hashes of the same scheme in + // the "allowed" set, in which case we'll avoid repeatedly re-reading the + // given package by caching its result for each of the two + // currently-supported hash formats. These will be NilHash until we + // encounter the first hash of the corresponding scheme. + var v1Hash, zipHash Hash + for _, want := range allowed { + switch want.Scheme() { + case HashScheme1: + if v1Hash == NilHash { + got, err := PackageHashV1(loc) + if err != nil { + return false, err + } + v1Hash = got + } + if v1Hash == want { + return true, nil + } + case HashSchemeZip: + archiveLoc, ok := loc.(PackageLocalArchive) + if !ok { + // A zip hash can never match an unpacked directory + continue + } + if zipHash == NilHash { + got, err := PackageHashLegacyZipSHA(archiveLoc) + if err != nil { + return false, err + } + zipHash = got + } + if zipHash == want { + return true, nil + } + default: + // If it's not a supported format then it can't match. + continue + } + } + return false, nil +} + +// PreferredHashes examines all of the given hash strings and returns the one +// that the current version of OpenTofu considers to provide the strongest +// verification. +// +// Returns an empty string if none of the given hashes are of a supported +// format. If PreferredHash returns a non-empty string then it will be one +// of the hash strings in "given", and that hash is the one that must pass +// verification in order for a package to be considered valid. +func PreferredHashes(given []Hash) []Hash { + // For now this is just filtering for the two hash formats we support, + // both of which are considered equally "preferred". If we introduce + // a new scheme like "h2:" in future then, depending on the characteristics + // of that new version, it might make sense to rework this function so + // that it only returns "h1:" hashes if the input has no "h2:" hashes, + // so that h2: is preferred when possible and h1: is only a fallback for + // interacting with older systems that haven't been updated with the new + // scheme yet. + + var ret []Hash + for _, hash := range given { + switch hash.Scheme() { + case HashScheme1, HashSchemeZip: + ret = append(ret, hash) + } + } + return ret +} + +// PackageHashLegacyZipSHA implements the old provider package hashing scheme +// of taking a SHA256 hash of the containing .zip archive itself, rather than +// of the contents of the archive. +// +// The result is a hash string with the "zh:" prefix, which is intended to +// represent "zip hash". After the prefix is a lowercase-hex encoded SHA256 +// checksum, intended to exactly match the formatting used in the registry +// API (apart from the prefix) so that checksums can be more conveniently +// compared by humans. +// +// Because this hashing scheme uses the official provider .zip file as its +// input, it accepts only PackageLocalArchive locations. +func PackageHashLegacyZipSHA(loc PackageLocalArchive) (Hash, error) { + archivePath, err := filepath.EvalSymlinks(string(loc)) + if err != nil { + return "", err + } + + f, err := os.Open(archivePath) + if err != nil { + return "", err + } + defer f.Close() + + h := sha256.New() + _, err = io.Copy(h, f) + if err != nil { + return "", err + } + + gotHash := h.Sum(nil) + return HashSchemeZip.New(fmt.Sprintf("%x", gotHash)), nil +} + +// HashLegacyZipSHAFromSHA is a convenience method to produce the schemed-string +// hash format from an already-calculated hash of a provider .zip archive. +// +// This just adds the "zh:" prefix and encodes the string in hex, so that the +// result is in the same format as PackageHashLegacyZipSHA. +func HashLegacyZipSHAFromSHA(sum [sha256.Size]byte) Hash { + return HashSchemeZip.New(fmt.Sprintf("%x", sum[:])) +} + +// PackageHashV1 computes a hash of the contents of the package at the given +// location using hash algorithm 1. The resulting Hash is guaranteed to have +// the scheme HashScheme1. +// +// The hash covers the paths to files in the directory and the contents of +// those files. It does not cover other metadata about the files, such as +// permissions. +// +// This function is named "PackageHashV1" in anticipation of other hashing +// algorithms being added in a backward-compatible way in future. The result +// from PackageHashV1 always begins with the prefix "h1:" so that callers can +// distinguish the results of potentially multiple different hash algorithms in +// future. +// +// PackageHashV1 can be used only with the two local package location types +// PackageLocalDir and PackageLocalArchive, because it needs to access the +// contents of the indicated package in order to compute the hash. If given +// a non-local location this function will always return an error. +func PackageHashV1(loc PackageLocation) (Hash, error) { + // Our HashV1 is really just the Go Modules hash version 1, which is + // sufficient for our needs and already well-used for identity of + // Go Modules distribution packages. It is also blocked from incompatible + // changes by being used in a wide array of go.sum files already. + // + // In particular, it also supports computing an equivalent hash from + // an unpacked zip file, which is not important for OpenTofu workflow + // today but is likely to become so in future if we adopt a top-level + // lockfile mechanism that is intended to be checked in to version control, + // rather than just a transient lock for a particular local cache directory. + // (In that case we'd need to check hashes of _packed_ packages, too.) + // + // Internally, dirhash.Hash1 produces a string containing a sequence of + // newline-separated path+filehash pairs for all of the files in the + // directory, and then finally produces a hash of that string to return. + // In both cases, the hash algorithm is SHA256. + + switch loc := loc.(type) { + + case PackageLocalDir: + // We'll first dereference a possible symlink at our PackageDir location, + // as would be created if this package were linked in from another cache. + packageDir, err := filepath.EvalSymlinks(string(loc)) + if err != nil { + return "", err + } + + // The dirhash.HashDir result is already in our expected h1:... + // format, so we can just convert directly to Hash. + s, err := dirhash.HashDir(packageDir, "", dirhash.Hash1) + return Hash(s), err + + case PackageLocalArchive: + archivePath, err := filepath.EvalSymlinks(string(loc)) + if err != nil { + return "", err + } + + // The dirhash.HashDir result is already in our expected h1:... + // format, so we can just convert directly to Hash. + s, err := dirhash.HashZip(archivePath, dirhash.Hash1) + return Hash(s), err + + default: + return "", fmt.Errorf("cannot hash package at %s", loc.String()) + } +} + +// Hash computes a hash of the contents of the package at the location +// associated with the reciever, using whichever hash algorithm is the current +// default. +// +// This method will change to use new hash versions as they are introduced +// in future. If you need a specific hash version, call the method for that +// version directly instead, such as HashV1. +// +// Hash can be used only with the two local package location types +// PackageLocalDir and PackageLocalArchive, because it needs to access the +// contents of the indicated package in order to compute the hash. If given +// a non-local location this function will always return an error. +func (m PackageMeta) Hash() (Hash, error) { + return PackageHash(m.Location) +} + +// MatchesHash returns true if the package at the location associated with +// the receiver matches the given hash, or false otherwise. +// +// If it cannot read from the given location, or if the given hash is in an +// unsupported format, MatchesHash returns an error. +// +// MatchesHash can be used only with the two local package location types +// PackageLocalDir and PackageLocalArchive, because it needs to access the +// contents of the indicated package in order to compute the hash. If given +// a non-local location this function will always return an error. +func (m PackageMeta) MatchesHash(want Hash) (bool, error) { + return PackageMatchesHash(m.Location, want) +} + +// MatchesAnyHash returns true if the package at the location associated with +// the receiver matches at least one of the given hashes, or false otherwise. +// +// If it cannot read from the given location, MatchesHash returns an error. +// Unlike the signular MatchesHash, MatchesAnyHash considers an unsupported +// hash format to be a successful non-match. +func (m PackageMeta) MatchesAnyHash(acceptable []Hash) (bool, error) { + return PackageMatchesAnyHash(m.Location, acceptable) +} + +// HashV1 computes a hash of the contents of the package at the location +// associated with the receiver using hash algorithm 1. +// +// The hash covers the paths to files in the directory and the contents of +// those files. It does not cover other metadata about the files, such as +// permissions. +// +// HashV1 can be used only with the two local package location types +// PackageLocalDir and PackageLocalArchive, because it needs to access the +// contents of the indicated package in order to compute the hash. If given +// a non-local location this function will always return an error. +func (m PackageMeta) HashV1() (Hash, error) { + return PackageHashV1(m.Location) +} diff --git a/pkg/getproviders/hash_test.go b/pkg/getproviders/hash_test.go new file mode 100644 index 00000000000..640417137b8 --- /dev/null +++ b/pkg/getproviders/hash_test.go @@ -0,0 +1,75 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package getproviders + +import ( + "testing" +) + +func TestParseHash(t *testing.T) { + tests := []struct { + Input string + Want Hash + WantErr string + }{ + { + Input: "h1:foo", + Want: HashScheme1.New("foo"), + }, + { + Input: "zh:bar", + Want: HashSchemeZip.New("bar"), + }, + { + // A scheme we don't know is considered valid syntax, it just won't match anything. + Input: "unknown:baz", + Want: HashScheme("unknown:").New("baz"), + }, + { + // A scheme with an empty value is weird, but allowed. + Input: "unknown:", + Want: HashScheme("unknown:").New(""), + }, + { + Input: "", + WantErr: "hash string must start with a scheme keyword followed by a colon", + }, + { + // A naked SHA256 hash in hex format is not sufficient + Input: "1e5f7a5f3ade7b8b1d1d59c5cea2e1a2f8d2f8c3f41962dbbe8647e222be8239", + WantErr: "hash string must start with a scheme keyword followed by a colon", + }, + { + // An empty scheme is not allowed + Input: ":blah", + WantErr: "hash string must start with a scheme keyword followed by a colon", + }, + } + + for _, test := range tests { + t.Run(test.Input, func(t *testing.T) { + got, err := ParseHash(test.Input) + + if test.WantErr != "" { + if err == nil { + t.Fatalf("want error: %s", test.WantErr) + } + if got, want := err.Error(), test.WantErr; got != want { + t.Fatalf("wrong error\ngot: %s\nwant: %s", got, want) + } + return + } + + if err != nil { + t.Fatalf("unexpected error: %s", err.Error()) + } + + if got != test.Want { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} diff --git a/pkg/getproviders/http_mirror_source.go b/pkg/getproviders/http_mirror_source.go new file mode 100644 index 00000000000..0c9b1742022 --- /dev/null +++ b/pkg/getproviders/http_mirror_source.go @@ -0,0 +1,433 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package getproviders + +import ( + "context" + "encoding/json" + "fmt" + "io" + "log" + "mime" + "net/http" + "net/url" + "path" + "strings" + + "github.com/hashicorp/go-retryablehttp" + svchost "github.com/hashicorp/terraform-svchost" + svcauth "github.com/hashicorp/terraform-svchost/auth" + "golang.org/x/net/idna" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/httpclient" + "github.com/kubegems/opentofu/pkg/logging" + "github.com/kubegems/opentofu/version" +) + +// HTTPMirrorSource is a source that reads provider metadata from a provider +// mirror that is accessible over the HTTP provider mirror protocol. +type HTTPMirrorSource struct { + baseURL *url.URL + creds svcauth.CredentialsSource + httpClient *retryablehttp.Client +} + +var _ Source = (*HTTPMirrorSource)(nil) + +// NewHTTPMirrorSource constructs and returns a new network mirror source with +// the given base URL. The relative URL offsets defined by the HTTP mirror +// protocol will be resolve relative to the given URL. +// +// The given URL must use the "https" scheme, or this function will panic. +// (When the URL comes from user input, such as in the CLI config, it's the +// UI/config layer's responsibility to validate this and return a suitable +// error message for the end-user audience.) +func NewHTTPMirrorSource(baseURL *url.URL, creds svcauth.CredentialsSource) *HTTPMirrorSource { + httpClient := httpclient.New() + httpClient.Timeout = requestTimeout + httpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error { + // If we get redirected more than five times we'll assume we're + // in a redirect loop and bail out, rather than hanging forever. + if len(via) > 5 { + return fmt.Errorf("too many redirects") + } + return nil + } + return newHTTPMirrorSourceWithHTTPClient(baseURL, creds, httpClient) +} + +func newHTTPMirrorSourceWithHTTPClient(baseURL *url.URL, creds svcauth.CredentialsSource, httpClient *http.Client) *HTTPMirrorSource { + if baseURL.Scheme != "https" { + panic("non-https URL for HTTP mirror") + } + + // We borrow the retry settings and behaviors from the registry client, + // because our needs here are very similar to those of the registry client. + retryableClient := retryablehttp.NewClient() + retryableClient.HTTPClient = httpClient + retryableClient.RetryMax = discoveryRetry + retryableClient.RequestLogHook = requestLogHook + retryableClient.ErrorHandler = maxRetryErrorHandler + + retryableClient.Logger = log.New(logging.LogOutput(), "", log.Flags()) + + return &HTTPMirrorSource{ + baseURL: baseURL, + creds: creds, + httpClient: retryableClient, + } +} + +// AvailableVersions retrieves the available versions for the given provider +// from the object's underlying HTTP mirror service. +func (s *HTTPMirrorSource) AvailableVersions(ctx context.Context, provider addrs.Provider) (VersionList, Warnings, error) { + log.Printf("[DEBUG] Querying available versions of provider %s at network mirror %s", provider.String(), s.baseURL.String()) + + endpointPath := path.Join( + provider.Hostname.String(), + provider.Namespace, + provider.Type, + "index.json", + ) + + statusCode, body, finalURL, err := s.get(ctx, endpointPath) + defer func() { + if body != nil { + body.Close() + } + }() + if err != nil { + return nil, nil, s.errQueryFailed(provider, err) + } + + switch statusCode { + case http.StatusOK: + // Great! + case http.StatusNotFound: + return nil, nil, ErrProviderNotFound{ + Provider: provider, + } + case http.StatusUnauthorized, http.StatusForbidden: + return nil, nil, s.errUnauthorized(finalURL) + default: + return nil, nil, s.errQueryFailed(provider, fmt.Errorf("server returned unsuccessful status %d", statusCode)) + } + + // If we got here then the response had status OK and so our body + // will be non-nil and should contain some JSON for us to parse. + type ResponseBody struct { + Versions map[string]struct{} `json:"versions"` + } + var bodyContent ResponseBody + + dec := json.NewDecoder(body) + if err := dec.Decode(&bodyContent); err != nil { + return nil, nil, s.errQueryFailed(provider, fmt.Errorf("invalid response content from mirror server: %w", err)) + } + + if len(bodyContent.Versions) == 0 { + return nil, nil, nil + } + ret := make(VersionList, 0, len(bodyContent.Versions)) + for versionStr := range bodyContent.Versions { + version, err := ParseVersion(versionStr) + if err != nil { + log.Printf("[WARN] Ignoring invalid %s version string %q in provider mirror response", provider, versionStr) + continue + } + ret = append(ret, version) + } + + ret.Sort() + return ret, nil, nil +} + +// PackageMeta retrieves metadata for the requested provider package +// from the object's underlying HTTP mirror service. +func (s *HTTPMirrorSource) PackageMeta(ctx context.Context, provider addrs.Provider, version Version, target Platform) (PackageMeta, error) { + log.Printf("[DEBUG] Finding package URL for %s v%s on %s via network mirror %s", provider.String(), version.String(), target.String(), s.baseURL.String()) + + endpointPath := path.Join( + provider.Hostname.String(), + provider.Namespace, + provider.Type, + version.String()+".json", + ) + + statusCode, body, finalURL, err := s.get(ctx, endpointPath) + defer func() { + if body != nil { + body.Close() + } + }() + if err != nil { + return PackageMeta{}, s.errQueryFailed(provider, err) + } + + switch statusCode { + case http.StatusOK: + // Great! + case http.StatusNotFound: + // A 404 Not Found for a version we previously saw in index.json is + // a protocol error, so we'll report this as "query failed. + return PackageMeta{}, s.errQueryFailed(provider, fmt.Errorf("provider mirror does not have archive index for previously-reported %s version %s", provider, version)) + case http.StatusUnauthorized, http.StatusForbidden: + return PackageMeta{}, s.errUnauthorized(finalURL) + default: + return PackageMeta{}, s.errQueryFailed(provider, fmt.Errorf("server returned unsuccessful status %d", statusCode)) + } + + // If we got here then the response had status OK and so our body + // will be non-nil and should contain some JSON for us to parse. + type ResponseArchiveMeta struct { + RelativeURL string `json:"url"` + Hashes []string + } + type ResponseBody struct { + Archives map[string]*ResponseArchiveMeta `json:"archives"` + } + var bodyContent ResponseBody + + dec := json.NewDecoder(body) + if err := dec.Decode(&bodyContent); err != nil { + return PackageMeta{}, s.errQueryFailed(provider, fmt.Errorf("invalid response content from mirror server: %w", err)) + } + + archiveMeta, ok := bodyContent.Archives[target.String()] + if !ok { + return PackageMeta{}, ErrPlatformNotSupported{ + Provider: provider, + Version: version, + Platform: target, + MirrorURL: s.baseURL, + } + } + + relURL, err := url.Parse(archiveMeta.RelativeURL) + if err != nil { + return PackageMeta{}, s.errQueryFailed( + provider, + fmt.Errorf("provider mirror returned invalid URL %q: %w", archiveMeta.RelativeURL, err), + ) + } + absURL := finalURL.ResolveReference(relURL) + + ret := PackageMeta{ + Provider: provider, + Version: version, + TargetPlatform: target, + + Location: PackageHTTPURL(absURL.String()), + Filename: path.Base(absURL.Path), + } + // A network mirror might not provide any hashes at all, in which case + // the package has no source-defined authentication whatsoever. + if len(archiveMeta.Hashes) > 0 { + hashes := make([]Hash, 0, len(archiveMeta.Hashes)) + for _, hashStr := range archiveMeta.Hashes { + hash, err := ParseHash(hashStr) + if err != nil { + return PackageMeta{}, s.errQueryFailed( + provider, + fmt.Errorf("provider mirror returned invalid provider hash %q: %w", hashStr, err), + ) + } + hashes = append(hashes, hash) + } + ret.Authentication = NewPackageHashAuthentication(target, hashes) + } + + return ret, nil +} + +// ForDisplay returns a string description of the source for user-facing output. +func (s *HTTPMirrorSource) ForDisplay(provider addrs.Provider) string { + return "provider mirror at " + s.baseURL.String() +} + +// mirrorHost extracts the hostname portion of the configured base URL and +// returns it as a svchost.Hostname, normalized in the usual ways. +// +// If the returned error is non-nil then the given hostname doesn't comply +// with the IETF RFC 5891 section 5.3 and 5.4 validation rules, and thus cannot +// be interpreted as a valid OpenTofu service host. The IDNA validation errors +// are unfortunately usually not very user-friendly, but they are also +// relatively rare because the IDNA normalization rules are quite tolerant. +func (s *HTTPMirrorSource) mirrorHost() (svchost.Hostname, error) { + return svchostFromURL(s.baseURL) +} + +// mirrorHostCredentials returns the HostCredentials, if any, for the hostname +// included in the mirror base URL. +// +// It might return an error if the mirror base URL is invalid, or if the +// credentials lookup itself fails. +func (s *HTTPMirrorSource) mirrorHostCredentials() (svcauth.HostCredentials, error) { + hostname, err := s.mirrorHost() + if err != nil { + return nil, fmt.Errorf("invalid provider mirror base URL %s: %w", s.baseURL.String(), err) + } + + if s.creds == nil { + // No host-specific credentials, then. + return nil, nil + } + + return s.creds.ForHost(hostname) +} + +// get is the shared functionality for querying a JSON index from a mirror. +// +// It only handles the raw HTTP request. The "body" return value is the +// reader from the response if and only if the response status code is 200 OK +// and the Content-Type is application/json. In all other cases it's nil. +// If body is non-nil then the caller must close it after reading it. +// +// If the "finalURL" return value is not empty then it's the URL that actually +// produced the returned response, possibly after following some redirects. +func (s *HTTPMirrorSource) get(ctx context.Context, relativePath string) (statusCode int, body io.ReadCloser, finalURL *url.URL, error error) { + endpointPath, err := url.Parse(relativePath) + if err != nil { + // Should never happen because the caller should validate all of the + // components it's including in the path. + return 0, nil, nil, err + } + endpointURL := s.baseURL.ResolveReference(endpointPath) + + req, err := retryablehttp.NewRequest("GET", endpointURL.String(), nil) + if err != nil { + return 0, nil, endpointURL, err + } + req = req.WithContext(ctx) + req.Request.Header.Set(terraformVersionHeader, version.String()) + creds, err := s.mirrorHostCredentials() + if err != nil { + return 0, nil, endpointURL, fmt.Errorf("failed to determine request credentials: %w", err) + } + if creds != nil { + // Note that if the initial requests gets redirected elsewhere + // then the credentials will still be included in the new request, + // even if they are on a different hostname. This is intentional + // and consistent with how we handle credentials for other + // OpenTofu-native services, because the user model is to configure + // credentials for the "friendly hostname" they configured, not for + // whatever hostname ends up ultimately serving the request as an + // implementation detail. + creds.PrepareRequest(req.Request) + } + + resp, err := s.httpClient.Do(req) + if err != nil { + return 0, nil, endpointURL, err + } + defer func() { + // If we're not returning the body then we'll close it + // before we return. + if body == nil { + resp.Body.Close() + } + }() + // After this point, our final URL return value should always be the + // one from resp.Request, because that takes into account any redirects + // we followed along the way. + finalURL = resp.Request.URL + + if resp.StatusCode == http.StatusOK { + // If and only if we get an OK response, we'll check that the response + // type is JSON and return the body reader. + ct := resp.Header.Get("Content-Type") + mt, params, err := mime.ParseMediaType(ct) + if err != nil { + return 0, nil, finalURL, fmt.Errorf("response has invalid Content-Type: %w", err) + } + if mt != "application/json" { + return 0, nil, finalURL, fmt.Errorf("response has invalid Content-Type: must be application/json") + } + for name := range params { + // The application/json content-type has no defined parameters, + // but some servers are configured to include a redundant "charset" + // parameter anyway, presumably out of a sense of completeness. + // We'll ignore them but warn that we're ignoring them in case the + // subsequent parsing fails due to the server trying to use an + // unsupported character encoding. (RFC 7159 defines its own + // JSON-specific character encoding rules.) + log.Printf("[WARN] Network mirror returned %q as part of its JSON content type, which is not defined. Ignoring.", name) + } + body = resp.Body + } + + return resp.StatusCode, body, finalURL, nil +} + +func (s *HTTPMirrorSource) errQueryFailed(provider addrs.Provider, err error) error { + if err == context.Canceled { + // This one has a special error type so that callers can + // handle it in a different way. + return ErrRequestCanceled{} + } + return ErrQueryFailed{ + Provider: provider, + Wrapped: err, + MirrorURL: s.baseURL, + } +} + +func (s *HTTPMirrorSource) errUnauthorized(finalURL *url.URL) error { + hostname, err := svchostFromURL(finalURL) + if err != nil { + // Again, weird but we'll tolerate it. + return fmt.Errorf("invalid credentials for %s", finalURL) + } + + return ErrUnauthorized{ + Hostname: hostname, + + // We can't easily tell from here whether we had credentials or + // not, so for now we'll just assume we did because "host rejected + // the given credentials" is, hopefully, still understandable in + // the event that there were none. (If this ends up being confusing + // in practice then we'll need to do some refactoring of how + // we handle credentials in this source.) + HaveCredentials: true, + } +} + +func svchostFromURL(u *url.URL) (svchost.Hostname, error) { + raw := u.Host + + // When "friendly hostnames" appear in OpenTofu-specific identifiers we + // typically constrain their syntax more strictly than the + // Internationalized Domain Name specifications call for, such as + // forbidding direct use of punycode, but in this case we're just + // working with a standard http: or https: URL and so we'll first use the + // IDNA "lookup" rules directly, with no additional notational constraints, + // to effectively normalize away the differences that would normally + // produce an error. + var portPortion string + if colonPos := strings.Index(raw, ":"); colonPos != -1 { + raw, portPortion = raw[:colonPos], raw[colonPos:] + } + // HTTPMirrorSource requires all URLs to be https URLs, because running + // a network mirror over HTTP would potentially transmit any configured + // credentials in cleartext. Therefore we don't need to do any special + // handling of default ports here, because svchost.Hostname already + // considers the absense of a port to represent the standard HTTPS port + // 443, and will normalize away an explicit specification of port 443 + // in svchost.ForComparison below. + + normalized, err := idna.Display.ToUnicode(raw) + if err != nil { + return svchost.Hostname(""), err + } + + // If ToUnicode succeeded above then "normalized" is now a hostname in the + // normalized IDNA form, with any direct punycode already interpreted and + // the case folding and other normalization rules applied. It should + // therefore now be accepted by svchost.ForComparison with no additional + // errors, but the port portion can still potentially be invalid. + return svchost.ForComparison(normalized + portPortion) +} diff --git a/pkg/getproviders/http_mirror_source_test.go b/pkg/getproviders/http_mirror_source_test.go new file mode 100644 index 00000000000..8d8cd7ed0a8 --- /dev/null +++ b/pkg/getproviders/http_mirror_source_test.go @@ -0,0 +1,329 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package getproviders + +import ( + "context" + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/google/go-cmp/cmp" + svchost "github.com/hashicorp/terraform-svchost" + svcauth "github.com/hashicorp/terraform-svchost/auth" + + "github.com/kubegems/opentofu/pkg/addrs" +) + +func TestHTTPMirrorSource(t *testing.T) { + // For mirrors we require a HTTPS server, so we'll use httptest to create + // one. However, that means we need to instantiate the source in an unusual + // way to force it to use the test client that is configured to trust the + // test server. + httpServer := httptest.NewTLSServer(http.HandlerFunc(testHTTPMirrorSourceHandler)) + defer httpServer.Close() + httpClient := httpServer.Client() + baseURL, err := url.Parse(httpServer.URL) + if err != nil { + t.Fatalf("httptest.NewTLSServer returned a server with an invalid URL") + } + creds := svcauth.StaticCredentialsSource(map[svchost.Hostname]map[string]interface{}{ + svchost.Hostname(baseURL.Host): { + "token": "placeholder-token", + }, + }) + source := newHTTPMirrorSourceWithHTTPClient(baseURL, creds, httpClient) + + existingProvider := addrs.MustParseProviderSourceString("terraform.io/test/exists") + missingProvider := addrs.MustParseProviderSourceString("terraform.io/test/missing") + failingProvider := addrs.MustParseProviderSourceString("terraform.io/test/fails") + redirectingProvider := addrs.MustParseProviderSourceString("terraform.io/test/redirects") + redirectLoopProvider := addrs.MustParseProviderSourceString("terraform.io/test/redirect-loop") + tosPlatform := Platform{OS: "tos", Arch: "m68k"} + + t.Run("AvailableVersions for provider that exists", func(t *testing.T) { + got, _, err := source.AvailableVersions(context.Background(), existingProvider) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + want := VersionList{ + MustParseVersion("1.0.0"), + MustParseVersion("1.0.1"), + MustParseVersion("1.0.2-beta.1"), + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("AvailableVersions for provider that doesn't exist", func(t *testing.T) { + _, _, err := source.AvailableVersions(context.Background(), missingProvider) + switch err := err.(type) { + case ErrProviderNotFound: + if got, want := err.Provider, missingProvider; got != want { + t.Errorf("wrong provider in error\ngot: %s\nwant: %s", got, want) + } + default: + t.Fatalf("wrong error type %T; want ErrProviderNotFound", err) + } + }) + t.Run("AvailableVersions without required credentials", func(t *testing.T) { + unauthSource := newHTTPMirrorSourceWithHTTPClient(baseURL, nil, httpClient) + _, _, err := unauthSource.AvailableVersions(context.Background(), existingProvider) + switch err := err.(type) { + case ErrUnauthorized: + if got, want := string(err.Hostname), baseURL.Host; got != want { + t.Errorf("wrong hostname in error\ngot: %s\nwant: %s", got, want) + } + default: + t.Fatalf("wrong error type %T; want ErrUnauthorized", err) + } + }) + t.Run("AvailableVersions when the response is a server error", func(t *testing.T) { + _, _, err := source.AvailableVersions(context.Background(), failingProvider) + switch err := err.(type) { + case ErrQueryFailed: + if got, want := err.Provider, failingProvider; got != want { + t.Errorf("wrong provider in error\ngot: %s\nwant: %s", got, want) + } + if err.MirrorURL != source.baseURL { + t.Errorf("error does not refer to the mirror URL") + } + default: + t.Fatalf("wrong error type %T; want ErrQueryFailed", err) + } + }) + t.Run("AvailableVersions for provider that redirects", func(t *testing.T) { + got, _, err := source.AvailableVersions(context.Background(), redirectingProvider) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + want := VersionList{ + MustParseVersion("1.0.0"), + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("AvailableVersions for provider that redirects too much", func(t *testing.T) { + _, _, err := source.AvailableVersions(context.Background(), redirectLoopProvider) + if err == nil { + t.Fatalf("succeeded; expected error") + } + }) + t.Run("PackageMeta for a version that exists and has a hash", func(t *testing.T) { + version := MustParseVersion("1.0.0") + got, err := source.PackageMeta(context.Background(), existingProvider, version, tosPlatform) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + want := PackageMeta{ + Provider: existingProvider, + Version: version, + TargetPlatform: tosPlatform, + Filename: "terraform-provider-test_v1.0.0_tos_m68k.zip", + Location: PackageHTTPURL(httpServer.URL + "/terraform.io/test/exists/terraform-provider-test_v1.0.0_tos_m68k.zip"), + Authentication: packageHashAuthentication{ + RequiredHashes: []Hash{"h1:placeholder-hash"}, + AllHashes: []Hash{"h1:placeholder-hash", "h0:unacceptable-hash"}, + Platform: Platform{"tos", "m68k"}, + }, + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + + gotHashes := got.AcceptableHashes() + wantHashes := []Hash{"h1:placeholder-hash", "h0:unacceptable-hash"} + if diff := cmp.Diff(wantHashes, gotHashes); diff != "" { + t.Errorf("wrong acceptable hashes\n%s", diff) + } + }) + t.Run("PackageMeta for a version that exists and has no hash", func(t *testing.T) { + version := MustParseVersion("1.0.1") + got, err := source.PackageMeta(context.Background(), existingProvider, version, tosPlatform) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + want := PackageMeta{ + Provider: existingProvider, + Version: version, + TargetPlatform: tosPlatform, + Filename: "terraform-provider-test_v1.0.1_tos_m68k.zip", + Location: PackageHTTPURL(httpServer.URL + "/terraform.io/test/exists/terraform-provider-test_v1.0.1_tos_m68k.zip"), + Authentication: nil, + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("PackageMeta for a version that exists but has no archives", func(t *testing.T) { + version := MustParseVersion("1.0.2-beta.1") + _, err := source.PackageMeta(context.Background(), existingProvider, version, tosPlatform) + switch err := err.(type) { + case ErrPlatformNotSupported: + if got, want := err.Provider, existingProvider; got != want { + t.Errorf("wrong provider in error\ngot: %s\nwant: %s", got, want) + } + if got, want := err.Platform, tosPlatform; got != want { + t.Errorf("wrong platform in error\ngot: %s\nwant: %s", got, want) + } + if err.MirrorURL != source.baseURL { + t.Errorf("error does not contain the mirror URL") + } + default: + t.Fatalf("wrong error type %T; want ErrPlatformNotSupported", err) + } + }) + t.Run("PackageMeta with redirect to a version that exists", func(t *testing.T) { + version := MustParseVersion("1.0.0") + got, err := source.PackageMeta(context.Background(), redirectingProvider, version, tosPlatform) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + want := PackageMeta{ + Provider: redirectingProvider, + Version: version, + TargetPlatform: tosPlatform, + Filename: "terraform-provider-test.zip", + + // NOTE: The final URL is interpreted relative to the redirect + // target, not relative to what we originally requested. + Location: PackageHTTPURL(httpServer.URL + "/redirect-target/terraform-provider-test.zip"), + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("PackageMeta when the response is a server error", func(t *testing.T) { + version := MustParseVersion("1.0.0") + _, err := source.PackageMeta(context.Background(), failingProvider, version, tosPlatform) + switch err := err.(type) { + case ErrQueryFailed: + if got, want := err.Provider, failingProvider; got != want { + t.Errorf("wrong provider in error\ngot: %s\nwant: %s", got, want) + } + if err.MirrorURL != source.baseURL { + t.Errorf("error does not contain the mirror URL") + } + default: + t.Fatalf("wrong error type %T; want ErrQueryFailed", err) + } + }) +} + +func testHTTPMirrorSourceHandler(resp http.ResponseWriter, req *http.Request) { + if auth := req.Header.Get("authorization"); auth != "Bearer placeholder-token" { + resp.WriteHeader(401) + fmt.Fprintln(resp, "incorrect auth token") + } + + switch req.URL.Path { + case "/terraform.io/test/exists/index.json": + resp.Header().Add("Content-Type", "application/json; ignored=yes") + resp.WriteHeader(200) + fmt.Fprint(resp, ` + { + "versions": { + "1.0.0": {}, + "1.0.1": {}, + "1.0.2-beta.1": {} + } + } + `) + + case "/terraform.io/test/fails/index.json", "/terraform.io/test/fails/1.0.0.json": + resp.WriteHeader(500) + fmt.Fprint(resp, "server error") + + case "/terraform.io/test/exists/1.0.0.json": + resp.Header().Add("Content-Type", "application/json; ignored=yes") + resp.WriteHeader(200) + fmt.Fprint(resp, ` + { + "archives": { + "tos_m68k": { + "url": "terraform-provider-test_v1.0.0_tos_m68k.zip", + "hashes": [ + "h1:placeholder-hash", + "h0:unacceptable-hash" + ] + } + } + } + `) + + case "/terraform.io/test/exists/1.0.1.json": + resp.Header().Add("Content-Type", "application/json; ignored=yes") + resp.WriteHeader(200) + fmt.Fprint(resp, ` + { + "archives": { + "tos_m68k": { + "url": "terraform-provider-test_v1.0.1_tos_m68k.zip" + } + } + } + `) + + case "/terraform.io/test/exists/1.0.2-beta.1.json": + resp.Header().Add("Content-Type", "application/json; ignored=yes") + resp.WriteHeader(200) + fmt.Fprint(resp, ` + { + "archives": {} + } + `) + + case "/terraform.io/test/redirects/index.json": + resp.Header().Add("location", "/redirect-target/index.json") + resp.WriteHeader(301) + fmt.Fprint(resp, "redirect") + + case "/redirect-target/index.json": + resp.Header().Add("Content-Type", "application/json") + resp.WriteHeader(200) + fmt.Fprint(resp, ` + { + "versions": { + "1.0.0": {} + } + } + `) + + case "/terraform.io/test/redirects/1.0.0.json": + resp.Header().Add("location", "/redirect-target/1.0.0.json") + resp.WriteHeader(301) + fmt.Fprint(resp, "redirect") + + case "/redirect-target/1.0.0.json": + resp.Header().Add("Content-Type", "application/json") + resp.WriteHeader(200) + fmt.Fprint(resp, ` + { + "archives": { + "tos_m68k": { + "url": "terraform-provider-test.zip" + } + } + } + `) + + case "/terraform.io/test/redirect-loop/index.json": + // This is intentionally redirecting to itself, to create a loop. + resp.Header().Add("location", req.URL.Path) + resp.WriteHeader(301) + fmt.Fprint(resp, "redirect loop") + + default: + resp.WriteHeader(404) + fmt.Fprintln(resp, "not found") + } +} diff --git a/pkg/getproviders/memoize_source.go b/pkg/getproviders/memoize_source.go new file mode 100644 index 00000000000..535f88c66db --- /dev/null +++ b/pkg/getproviders/memoize_source.go @@ -0,0 +1,108 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package getproviders + +import ( + "context" + "sync" + + "github.com/kubegems/opentofu/pkg/addrs" +) + +// MemoizeSource is a Source that wraps another Source and remembers its +// results so that they can be returned more quickly on future calls to the +// same object. +// +// Each MemoizeSource maintains a cache of response it has seen as part of its +// body. All responses are retained for the remaining lifetime of the object. +// Errors from the underlying source are also cached, and so subsequent calls +// with the same arguments will always produce the same errors. +// +// A MemoizeSource can be called concurrently, with incoming requests processed +// sequentially. +type MemoizeSource struct { + underlying Source + mu sync.Mutex + availableVersions map[addrs.Provider]memoizeAvailableVersionsRet + packageMetas map[memoizePackageMetaCall]memoizePackageMetaRet +} + +type memoizeAvailableVersionsRet struct { + VersionList VersionList + Warnings Warnings + Err error +} + +type memoizePackageMetaCall struct { + Provider addrs.Provider + Version Version + Target Platform +} + +type memoizePackageMetaRet struct { + PackageMeta PackageMeta + Err error +} + +var _ Source = (*MemoizeSource)(nil) + +// NewMemoizeSource constructs and returns a new MemoizeSource that wraps +// the given underlying source and memoizes its results. +func NewMemoizeSource(underlying Source) *MemoizeSource { + return &MemoizeSource{ + underlying: underlying, + availableVersions: make(map[addrs.Provider]memoizeAvailableVersionsRet), + packageMetas: make(map[memoizePackageMetaCall]memoizePackageMetaRet), + } +} + +// AvailableVersions requests the available versions from the underlying source +// and caches them before returning them, or on subsequent calls returns the +// result directly from the cache. +func (s *MemoizeSource) AvailableVersions(ctx context.Context, provider addrs.Provider) (VersionList, Warnings, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if existing, exists := s.availableVersions[provider]; exists { + return existing.VersionList, nil, existing.Err + } + + ret, warnings, err := s.underlying.AvailableVersions(ctx, provider) + s.availableVersions[provider] = memoizeAvailableVersionsRet{ + VersionList: ret, + Err: err, + Warnings: warnings, + } + return ret, warnings, err +} + +// PackageMeta requests package metadata from the underlying source and caches +// the result before returning it, or on subsequent calls returns the result +// directly from the cache. +func (s *MemoizeSource) PackageMeta(ctx context.Context, provider addrs.Provider, version Version, target Platform) (PackageMeta, error) { + s.mu.Lock() + defer s.mu.Unlock() + + key := memoizePackageMetaCall{ + Provider: provider, + Version: version, + Target: target, + } + if existing, exists := s.packageMetas[key]; exists { + return existing.PackageMeta, existing.Err + } + + ret, err := s.underlying.PackageMeta(ctx, provider, version, target) + s.packageMetas[key] = memoizePackageMetaRet{ + PackageMeta: ret, + Err: err, + } + return ret, err +} + +func (s *MemoizeSource) ForDisplay(provider addrs.Provider) string { + return s.underlying.ForDisplay(provider) +} diff --git a/pkg/getproviders/memoize_source_test.go b/pkg/getproviders/memoize_source_test.go new file mode 100644 index 00000000000..5978d92d740 --- /dev/null +++ b/pkg/getproviders/memoize_source_test.go @@ -0,0 +1,192 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package getproviders + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/kubegems/opentofu/pkg/addrs" +) + +func TestMemoizeSource(t *testing.T) { + provider := addrs.NewDefaultProvider("foo") + version := MustParseVersion("1.0.0") + protocols := VersionList{MustParseVersion("5.0")} + platform := Platform{OS: "gameboy", Arch: "lr35902"} + meta := FakePackageMeta(provider, version, protocols, platform) + nonexistProvider := addrs.NewDefaultProvider("nonexist") + nonexistPlatform := Platform{OS: "gamegear", Arch: "z80"} + + t.Run("AvailableVersions for existing provider", func(t *testing.T) { + mock := NewMockSource([]PackageMeta{meta}, nil) + source := NewMemoizeSource(mock) + + got, _, err := source.AvailableVersions(context.Background(), provider) + want := VersionList{version} + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if diff := cmp.Diff(want, got); diff != "" { + t.Fatalf("wrong result from first call to AvailableVersions\n%s", diff) + } + + got, _, err = source.AvailableVersions(context.Background(), provider) + want = VersionList{version} + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if diff := cmp.Diff(want, got); diff != "" { + t.Fatalf("wrong result from second call to AvailableVersions\n%s", diff) + } + + _, _, err = source.AvailableVersions(context.Background(), nonexistProvider) + if want, ok := err.(ErrRegistryProviderNotKnown); !ok { + t.Fatalf("wrong error type from nonexist call:\ngot: %T\nwant: %T", err, want) + } + + got, _, err = source.AvailableVersions(context.Background(), provider) + want = VersionList{version} + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if diff := cmp.Diff(want, got); diff != "" { + t.Fatalf("wrong result from third call to AvailableVersions\n%s", diff) + } + + gotLog := mock.CallLog() + wantLog := [][]interface{}{ + // Only one call for the main provider, because the others were returned from the cache. + {"AvailableVersions", provider}, + + // The call for nonexist also shows through, because it didn't match the cache. + {"AvailableVersions", nonexistProvider}, + } + if diff := cmp.Diff(wantLog, gotLog); diff != "" { + t.Fatalf("unexpected call log\n%s", diff) + } + }) + t.Run("AvailableVersions with warnings", func(t *testing.T) { + warnProvider := addrs.NewDefaultProvider("warning") + meta := FakePackageMeta(warnProvider, version, protocols, platform) + mock := NewMockSource([]PackageMeta{meta}, map[addrs.Provider]Warnings{warnProvider: {"WARNING!"}}) + source := NewMemoizeSource(mock) + + got, warns, err := source.AvailableVersions(context.Background(), warnProvider) + want := VersionList{version} + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if diff := cmp.Diff(want, got); diff != "" { + t.Fatalf("wrong result from first call to AvailableVersions\n%s", diff) + } + if len(warns) != 1 { + t.Fatalf("wrong number of warnings. Got %d, expected 1", len(warns)) + } + if warns[0] != "WARNING!" { + t.Fatalf("wrong result! Got %s, expected \"WARNING!\"", warns[0]) + } + + }) + t.Run("PackageMeta for existing provider", func(t *testing.T) { + mock := NewMockSource([]PackageMeta{meta}, nil) + source := NewMemoizeSource(mock) + + got, err := source.PackageMeta(context.Background(), provider, version, platform) + want := meta + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if diff := cmp.Diff(want, got); diff != "" { + t.Fatalf("wrong result from first call to PackageMeta\n%s", diff) + } + + got, err = source.PackageMeta(context.Background(), provider, version, platform) + want = meta + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if diff := cmp.Diff(want, got); diff != "" { + t.Fatalf("wrong result from second call to PackageMeta\n%s", diff) + } + + _, err = source.PackageMeta(context.Background(), nonexistProvider, version, platform) + if want, ok := err.(ErrPlatformNotSupported); !ok { + t.Fatalf("wrong error type from nonexist provider call:\ngot: %T\nwant: %T", err, want) + } + _, err = source.PackageMeta(context.Background(), provider, version, nonexistPlatform) + if want, ok := err.(ErrPlatformNotSupported); !ok { + t.Fatalf("wrong error type from nonexist platform call:\ngot: %T\nwant: %T", err, want) + } + + got, err = source.PackageMeta(context.Background(), provider, version, platform) + want = meta + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if diff := cmp.Diff(want, got); diff != "" { + t.Fatalf("wrong result from third call to PackageMeta\n%s", diff) + } + + gotLog := mock.CallLog() + wantLog := [][]interface{}{ + // Only one call for the main provider, because the others were returned from the cache. + {"PackageMeta", provider, version, platform}, + + // The other calls for non-exist things also show through, because they missed the cache. + {"PackageMeta", nonexistProvider, version, platform}, + {"PackageMeta", provider, version, nonexistPlatform}, + } + if diff := cmp.Diff(wantLog, gotLog); diff != "" { + t.Fatalf("unexpected call log\n%s", diff) + } + }) + t.Run("AvailableVersions for non-existing provider", func(t *testing.T) { + mock := NewMockSource([]PackageMeta{meta}, nil) + source := NewMemoizeSource(mock) + + _, _, err := source.AvailableVersions(context.Background(), nonexistProvider) + if want, ok := err.(ErrRegistryProviderNotKnown); !ok { + t.Fatalf("wrong error type from first call:\ngot: %T\nwant: %T", err, want) + } + _, _, err = source.AvailableVersions(context.Background(), nonexistProvider) + if want, ok := err.(ErrRegistryProviderNotKnown); !ok { + t.Fatalf("wrong error type from second call:\ngot: %T\nwant: %T", err, want) + } + + gotLog := mock.CallLog() + wantLog := [][]interface{}{ + // Only one call, because the other was returned from the cache. + {"AvailableVersions", nonexistProvider}, + } + if diff := cmp.Diff(wantLog, gotLog); diff != "" { + t.Fatalf("unexpected call log\n%s", diff) + } + }) + t.Run("PackageMeta for non-existing provider", func(t *testing.T) { + mock := NewMockSource([]PackageMeta{meta}, nil) + source := NewMemoizeSource(mock) + + _, err := source.PackageMeta(context.Background(), nonexistProvider, version, platform) + if want, ok := err.(ErrPlatformNotSupported); !ok { + t.Fatalf("wrong error type from first call:\ngot: %T\nwant: %T", err, want) + } + _, err = source.PackageMeta(context.Background(), nonexistProvider, version, platform) + if want, ok := err.(ErrPlatformNotSupported); !ok { + t.Fatalf("wrong error type from second call:\ngot: %T\nwant: %T", err, want) + } + + gotLog := mock.CallLog() + wantLog := [][]interface{}{ + // Only one call, because the other was returned from the cache. + {"PackageMeta", nonexistProvider, version, platform}, + } + if diff := cmp.Diff(wantLog, gotLog); diff != "" { + t.Fatalf("unexpected call log\n%s", diff) + } + }) +} diff --git a/pkg/getproviders/mock_source.go b/pkg/getproviders/mock_source.go new file mode 100644 index 00000000000..43f1ce20c5c --- /dev/null +++ b/pkg/getproviders/mock_source.go @@ -0,0 +1,216 @@ +package getproviders + +import ( + "archive/zip" + "context" + "crypto/sha256" + "fmt" + "io" + "os" + + "github.com/kubegems/opentofu/pkg/addrs" +) + +// MockSource is an in-memory-only, statically-configured source intended for +// use only in unit tests of other subsystems that consume provider sources. +// +// The MockSource also tracks calls to it in case a calling test wishes to +// assert that particular calls were made. +// +// This should not be used outside of unit test code. +type MockSource struct { + packages []PackageMeta + warnings map[addrs.Provider]Warnings + calls [][]interface{} +} + +var _ Source = (*MockSource)(nil) + +// NewMockSource creates and returns a MockSource with the given packages. +// +// The given packages don't necessarily need to refer to objects that actually +// exist on disk or over the network, unless the calling test is planning to +// use (directly or indirectly) the results for further provider installation +// actions. +func NewMockSource(packages []PackageMeta, warns map[addrs.Provider]Warnings) *MockSource { + return &MockSource{ + packages: packages, + warnings: warns, + } +} + +// AvailableVersions returns all of the versions of the given provider that +// are available in the fixed set of packages that were passed to +// NewMockSource when creating the receiving source. +func (s *MockSource) AvailableVersions(ctx context.Context, provider addrs.Provider) (VersionList, Warnings, error) { + s.calls = append(s.calls, []interface{}{"AvailableVersions", provider}) + var ret VersionList + for _, pkg := range s.packages { + if pkg.Provider == provider { + ret = append(ret, pkg.Version) + } + } + var warns []string + if s.warnings != nil { + if warnings, ok := s.warnings[provider]; ok { + warns = warnings + } + } + if len(ret) == 0 { + // In this case, we'll behave like a registry that doesn't know about + // this provider at all, rather than just returning an empty result. + return nil, warns, ErrRegistryProviderNotKnown{provider} + } + ret.Sort() + return ret, warns, nil +} + +// PackageMeta returns the first package from the list given to NewMockSource +// when creating the receiver that has the given provider, version, and +// target platform. +// +// If none of the packages match, it returns ErrPlatformNotSupported to +// simulate the situation where a provider release isn't available for a +// particular platform. +// +// Note that if the list of packages passed to NewMockSource contains more +// than one with the same provider, version, and target this function will +// always return the first one in the list, which may not match the behavior +// of other sources in an equivalent situation because it's a degenerate case +// with undefined results. +func (s *MockSource) PackageMeta(ctx context.Context, provider addrs.Provider, version Version, target Platform) (PackageMeta, error) { + s.calls = append(s.calls, []interface{}{"PackageMeta", provider, version, target}) + + for _, pkg := range s.packages { + if pkg.Provider != provider { + continue + } + if pkg.Version != version { + // (We're using strict equality rather than precedence here, + // because this is an exact version specification. The caller + // should consider precedence when selecting a version in the + // AvailableVersions response, and pass the exact selected + // version here.) + continue + } + if pkg.TargetPlatform != target { + continue + } + return pkg, nil + } + + // If we fall out here then nothing matched at all, so we'll treat that + // as "platform not supported" for consistency with RegistrySource. + return PackageMeta{}, ErrPlatformNotSupported{ + Provider: provider, + Version: version, + Platform: target, + } +} + +// CallLog returns a list of calls to other methods of the receiever that have +// been called since it was created, in case a calling test wishes to verify +// a particular sequence of operations. +// +// The result is a slice of slices where the first element of each inner slice +// is the name of the method that was called, and then any subsequent elements +// are positional arguments passed to that method. +// +// Callers are forbidden from modifying any objects accessible via the returned +// value. +func (s *MockSource) CallLog() [][]interface{} { + return s.calls +} + +// FakePackageMeta constructs and returns a PackageMeta that carries the given +// metadata but has fake location information that is likely to fail if +// attempting to install from it. +func FakePackageMeta(provider addrs.Provider, version Version, protocols VersionList, target Platform) PackageMeta { + return PackageMeta{ + Provider: provider, + Version: version, + ProtocolVersions: protocols, + TargetPlatform: target, + + // Some fake but somewhat-realistic-looking other metadata. This + // points nowhere, so will fail if attempting to actually use it. + Filename: fmt.Sprintf("terraform-provider-%s_%s_%s.zip", provider.Type, version.String(), target.String()), + Location: PackageHTTPURL(fmt.Sprintf("https://fake.invalid/terraform-provider-%s_%s.zip", provider.Type, version.String())), + } +} + +// FakeInstallablePackageMeta constructs and returns a PackageMeta that points +// to a temporary archive file that could actually be installed in principle. +// +// Installing it will not produce a working provider though: just a fake file +// posing as an executable. The filename for the executable defaults to the +// standard terraform-provider-NAME_X.Y.Z format, but can be overridden with +// the execFilename argument. +// +// It's the caller's responsibility to call the close callback returned +// alongside the result in order to clean up the temporary file. The caller +// should call the callback even if this function returns an error, because +// some error conditions leave a partially-created file on disk. +func FakeInstallablePackageMeta(provider addrs.Provider, version Version, protocols VersionList, target Platform, execFilename string) (PackageMeta, func(), error) { + f, err := os.CreateTemp("", "tofu-getproviders-fake-package-") + if err != nil { + return PackageMeta{}, func() {}, err + } + + // After this point, all of our return paths should include this as the + // close callback. + close := func() { + f.Close() + os.Remove(f.Name()) + } + + if execFilename == "" { + execFilename = fmt.Sprintf("terraform-provider-%s_%s", provider.Type, version.String()) + if target.OS == "windows" { + // For a little more (technically unnecessary) realism... + execFilename += ".exe" + } + } + + zw := zip.NewWriter(f) + fw, err := zw.Create(execFilename) + if err != nil { + return PackageMeta{}, close, fmt.Errorf("failed to add %s to mock zip file: %w", execFilename, err) + } + fmt.Fprintf(fw, "This is a fake provider package for %s %s, not a real provider.\n", provider, version) + err = zw.Close() + if err != nil { + return PackageMeta{}, close, fmt.Errorf("failed to close the mock zip file: %w", err) + } + + // Compute the SHA256 checksum of the generated file, to allow package + // authentication code to be exercised. + f.Seek(0, io.SeekStart) + h := sha256.New() + io.Copy(h, f) + checksum := [32]byte{} + h.Sum(checksum[:0]) + + meta := PackageMeta{ + Provider: provider, + Version: version, + ProtocolVersions: protocols, + TargetPlatform: target, + + Location: PackageLocalArchive(f.Name()), + + // This is a fake filename that mimics what a real registry might + // indicate as a good filename for this package, in case some caller + // intends to use it to name a local copy of the temporary file. + // (At the time of writing, no caller actually does that, but who + // knows what the future holds?) + Filename: fmt.Sprintf("terraform-provider-%s_%s_%s.zip", provider.Type, version.String(), target.String()), + + Authentication: NewArchiveChecksumAuthentication(target, checksum), + } + return meta, close, nil +} + +func (s *MockSource) ForDisplay(provider addrs.Provider) string { + return "mock source" +} diff --git a/pkg/getproviders/multi_source.go b/pkg/getproviders/multi_source.go new file mode 100644 index 00000000000..f234f6e0070 --- /dev/null +++ b/pkg/getproviders/multi_source.go @@ -0,0 +1,261 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package getproviders + +import ( + "context" + "fmt" + "strings" + + svchost "github.com/hashicorp/terraform-svchost" + + "github.com/kubegems/opentofu/pkg/addrs" +) + +// MultiSource is a Source that wraps a series of other sources and combines +// their sets of available providers and provider versions. +// +// A MultiSource consists of a sequence of selectors that each specify an +// underlying source to query and a set of matching patterns to decide which +// providers can be retrieved from which sources. If multiple selectors find +// a given provider version then the earliest one in the sequence takes +// priority for deciding the package metadata for the provider. +// +// For underlying sources that make network requests, consider wrapping each +// one in a MemoizeSource so that availability information retrieved in +// AvailableVersions can be reused in PackageMeta. +type MultiSource []MultiSourceSelector + +var _ Source = MultiSource(nil) + +// AvailableVersions retrieves all of the versions of the given provider +// that are available across all of the underlying selectors, while respecting +// each selector's matching patterns. +func (s MultiSource) AvailableVersions(ctx context.Context, provider addrs.Provider) (VersionList, Warnings, error) { + if len(s) == 0 { // Easy case: there can be no available versions + return nil, nil, nil + } + + // We will return the union of all versions reported by the nested + // sources that have matching patterns that accept the given provider. + vs := make(map[Version]struct{}) + var registryError bool + var warnings []string + for _, selector := range s { + if !selector.CanHandleProvider(provider) { + continue // doesn't match the given patterns + } + thisSourceVersions, warningsResp, err := selector.Source.AvailableVersions(ctx, provider) + switch err.(type) { + case nil: + // okay + case ErrRegistryProviderNotKnown: + registryError = true + continue // ignore, then + case ErrProviderNotFound: + continue // ignore, then + default: + return nil, nil, err + } + for _, v := range thisSourceVersions { + vs[v] = struct{}{} + } + if len(warningsResp) > 0 { + warnings = append(warnings, warningsResp...) + } + } + + if len(vs) == 0 { + if registryError { + return nil, nil, ErrRegistryProviderNotKnown{provider} + } else { + return nil, nil, ErrProviderNotFound{provider, s.sourcesForProvider(provider)} + } + } + ret := make(VersionList, 0, len(vs)) + for v := range vs { + ret = append(ret, v) + } + ret.Sort() + + return ret, warnings, nil +} + +// PackageMeta retrieves the package metadata for the requested provider package +// from the first selector that indicates availability of it. +func (s MultiSource) PackageMeta(ctx context.Context, provider addrs.Provider, version Version, target Platform) (PackageMeta, error) { + if len(s) == 0 { // Easy case: no providers exist at all + return PackageMeta{}, ErrProviderNotFound{provider, s.sourcesForProvider(provider)} + } + + for _, selector := range s { + if !selector.CanHandleProvider(provider) { + continue // doesn't match the given patterns + } + meta, err := selector.Source.PackageMeta(ctx, provider, version, target) + switch err.(type) { + case nil: + return meta, nil + case ErrProviderNotFound, ErrRegistryProviderNotKnown, ErrPlatformNotSupported: + continue // ignore, then + default: + return PackageMeta{}, err + } + } + + // If we fall out here then none of the sources have the requested + // package. + return PackageMeta{}, ErrPlatformNotSupported{ + Provider: provider, + Version: version, + Platform: target, + } +} + +// MultiSourceSelector is an element of the source selection configuration on +// MultiSource. A MultiSource has zero or more of these to configure which +// underlying sources it should consult for a given provider. +type MultiSourceSelector struct { + // Source is the underlying source that this selector applies to. + Source Source + + // Include and Exclude are sets of provider matching patterns that + // together define which providers are eligible to be potentially + // installed from the corresponding Source. + Include, Exclude MultiSourceMatchingPatterns +} + +// MultiSourceMatchingPatterns is a set of patterns that together define a +// set of providers by matching on the segments of the provider FQNs. +// +// The Provider address values in a MultiSourceMatchingPatterns are special in +// that any of Hostname, Namespace, or Type can be getproviders.Wildcard +// to indicate that any concrete value is permitted for that segment. +type MultiSourceMatchingPatterns []addrs.Provider + +// ParseMultiSourceMatchingPatterns parses a slice of strings containing the +// string form of provider matching patterns and, if all the given strings are +// valid, returns the corresponding, normalized, MultiSourceMatchingPatterns +// value. +func ParseMultiSourceMatchingPatterns(strs []string) (MultiSourceMatchingPatterns, error) { + if len(strs) == 0 { + return nil, nil + } + + ret := make(MultiSourceMatchingPatterns, len(strs)) + for i, str := range strs { + parts := strings.Split(str, "/") + if len(parts) < 2 || len(parts) > 3 { + return nil, fmt.Errorf("invalid provider matching pattern %q: must have either two or three slash-separated segments", str) + } + host := defaultRegistryHost + explicitHost := len(parts) == 3 + if explicitHost { + givenHost := parts[0] + if givenHost == "*" { + host = svchost.Hostname(Wildcard) + } else { + normalHost, err := svchost.ForComparison(givenHost) + if err != nil { + return nil, fmt.Errorf("invalid hostname in provider matching pattern %q: %w", str, err) + } + + // The remaining code below deals only with the namespace/type portions. + host = normalHost + } + + parts = parts[1:] + } + + pType, err := normalizeProviderNameOrWildcard(parts[1]) + if err != nil { + return nil, fmt.Errorf("invalid provider type %q in provider matching pattern %q: must either be the wildcard * or a provider type name", parts[1], str) + } + namespace, err := normalizeProviderNameOrWildcard(parts[0]) + if err != nil { + return nil, fmt.Errorf("invalid registry namespace %q in provider matching pattern %q: must either be the wildcard * or a literal namespace", parts[1], str) + } + + ret[i] = addrs.Provider{ + Hostname: host, + Namespace: namespace, + Type: pType, + } + + if ret[i].Hostname == svchost.Hostname(Wildcard) && !(ret[i].Namespace == Wildcard && ret[i].Type == Wildcard) { + return nil, fmt.Errorf("invalid provider matching pattern %q: hostname can be a wildcard only if both namespace and provider type are also wildcards", str) + } + if ret[i].Namespace == Wildcard && ret[i].Type != Wildcard { + return nil, fmt.Errorf("invalid provider matching pattern %q: namespace can be a wildcard only if the provider type is also a wildcard", str) + } + } + return ret, nil +} + +// CanHandleProvider returns true if and only if the given provider address +// is both included by the selector's include patterns and _not_ excluded +// by its exclude patterns. +// +// The absense of any include patterns is treated the same as a pattern +// that matches all addresses. Exclusions take priority over inclusions. +func (s MultiSourceSelector) CanHandleProvider(addr addrs.Provider) bool { + switch { + case s.Exclude.MatchesProvider(addr): + return false + case len(s.Include) > 0: + return s.Include.MatchesProvider(addr) + default: + return true + } +} + +// MatchesProvider tests whether the receiving matching patterns match with +// the given concrete provider address. +func (ps MultiSourceMatchingPatterns) MatchesProvider(addr addrs.Provider) bool { + for _, pattern := range ps { + hostMatch := (pattern.Hostname == svchost.Hostname(Wildcard) || pattern.Hostname == addr.Hostname) + namespaceMatch := (pattern.Namespace == Wildcard || pattern.Namespace == addr.Namespace) + typeMatch := (pattern.Type == Wildcard || pattern.Type == addr.Type) + if hostMatch && namespaceMatch && typeMatch { + return true + } + } + return false +} + +// Wildcard is a string value representing a wildcard element in the Include +// and Exclude patterns used with MultiSource. It is not valid to use Wildcard +// anywhere else. +const Wildcard string = "*" + +// We'll read the default registry host from over in the addrs package, to +// avoid duplicating it. A "default" provider uses the default registry host +// by definition. +var defaultRegistryHost = addrs.DefaultProviderRegistryHost + +func normalizeProviderNameOrWildcard(s string) (string, error) { + if s == Wildcard { + return s, nil + } + return addrs.ParseProviderPart(s) +} + +func (s MultiSource) ForDisplay(provider addrs.Provider) string { + return strings.Join(s.sourcesForProvider(provider), "\n") +} + +// sourcesForProvider returns a list of source display strings configured for a +// given provider, taking into account any `Exclude` statements. +func (s MultiSource) sourcesForProvider(provider addrs.Provider) []string { + ret := make([]string, 0) + for _, selector := range s { + if !selector.CanHandleProvider(provider) { + continue // doesn't match the given patterns + } + ret = append(ret, selector.Source.ForDisplay(provider)) + } + return ret +} diff --git a/pkg/getproviders/multi_source_test.go b/pkg/getproviders/multi_source_test.go new file mode 100644 index 00000000000..c6119df3868 --- /dev/null +++ b/pkg/getproviders/multi_source_test.go @@ -0,0 +1,554 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package getproviders + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/kubegems/opentofu/pkg/addrs" +) + +func TestMultiSourceAvailableVersions(t *testing.T) { + platform1 := Platform{OS: "amigaos", Arch: "m68k"} + platform2 := Platform{OS: "aros", Arch: "arm"} + + t.Run("unfiltered merging", func(t *testing.T) { + s1 := NewMockSource([]PackageMeta{ + FakePackageMeta( + addrs.NewDefaultProvider("foo"), + MustParseVersion("1.0.0"), + VersionList{MustParseVersion("5.0")}, + platform1, + ), + FakePackageMeta( + addrs.NewDefaultProvider("foo"), + MustParseVersion("1.0.0"), + VersionList{MustParseVersion("5.0")}, + platform2, + ), + FakePackageMeta( + addrs.NewDefaultProvider("bar"), + MustParseVersion("1.0.0"), + VersionList{MustParseVersion("5.0")}, + platform2, + ), + }, + nil, + ) + s2 := NewMockSource([]PackageMeta{ + FakePackageMeta( + addrs.NewDefaultProvider("foo"), + MustParseVersion("1.0.0"), + VersionList{MustParseVersion("5.0")}, + platform1, + ), + FakePackageMeta( + addrs.NewDefaultProvider("foo"), + MustParseVersion("1.2.0"), + VersionList{MustParseVersion("5.0")}, + platform1, + ), + FakePackageMeta( + addrs.NewDefaultProvider("bar"), + MustParseVersion("1.0.0"), + VersionList{MustParseVersion("5.0")}, + platform1, + ), + }, + nil, + ) + multi := MultiSource{ + {Source: s1}, + {Source: s2}, + } + + // AvailableVersions produces the union of all versions available + // across all of the sources. + got, _, err := multi.AvailableVersions(context.Background(), addrs.NewDefaultProvider("foo")) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + want := VersionList{ + MustParseVersion("1.0.0"), + MustParseVersion("1.2.0"), + } + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + + _, _, err = multi.AvailableVersions(context.Background(), addrs.NewDefaultProvider("baz")) + if want, ok := err.(ErrRegistryProviderNotKnown); !ok { + t.Fatalf("wrong error type:\ngot: %T\nwant: %T", err, want) + } + }) + + t.Run("merging with filters", func(t *testing.T) { + // This is just testing that filters are being honored at all, using a + // specific pair of filters. The different filter combinations + // themselves are tested in TestMultiSourceSelector. + + s1 := NewMockSource([]PackageMeta{ + FakePackageMeta( + addrs.NewDefaultProvider("foo"), + MustParseVersion("1.0.0"), + VersionList{MustParseVersion("5.0")}, + platform1, + ), + FakePackageMeta( + addrs.NewDefaultProvider("bar"), + MustParseVersion("1.0.0"), + VersionList{MustParseVersion("5.0")}, + platform1, + ), + }, + nil, + ) + s2 := NewMockSource([]PackageMeta{ + FakePackageMeta( + addrs.NewDefaultProvider("foo"), + MustParseVersion("1.2.0"), + VersionList{MustParseVersion("5.0")}, + platform1, + ), + FakePackageMeta( + addrs.NewDefaultProvider("bar"), + MustParseVersion("1.2.0"), + VersionList{MustParseVersion("5.0")}, + platform1, + ), + }, + nil, + ) + multi := MultiSource{ + { + Source: s1, + Include: mustParseMultiSourceMatchingPatterns("hashicorp/*"), + }, + { + Source: s2, + Include: mustParseMultiSourceMatchingPatterns("hashicorp/bar"), + }, + } + + got, _, err := multi.AvailableVersions(context.Background(), addrs.NewDefaultProvider("foo")) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + want := VersionList{ + MustParseVersion("1.0.0"), + // 1.2.0 isn't present because s3 doesn't include "foo" + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + + got, _, err = multi.AvailableVersions(context.Background(), addrs.NewDefaultProvider("bar")) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + want = VersionList{ + MustParseVersion("1.0.0"), + MustParseVersion("1.2.0"), // included because s2 matches "bar" + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + + _, _, err = multi.AvailableVersions(context.Background(), addrs.NewDefaultProvider("baz")) + if want, ok := err.(ErrRegistryProviderNotKnown); !ok { + t.Fatalf("wrong error type:\ngot: %T\nwant: %T", err, want) + } + }) + + t.Run("provider not found", func(t *testing.T) { + s1 := NewMockSource(nil, nil) + s2 := NewMockSource(nil, nil) + multi := MultiSource{ + {Source: s1}, + {Source: s2}, + } + + _, _, err := multi.AvailableVersions(context.Background(), addrs.NewDefaultProvider("foo")) + if err == nil { + t.Fatal("expected error, got success") + } + + wantErr := `provider registry registry.opentofu.org does not have a provider named registry.opentofu.org/hashicorp/foo` + + if err.Error() != wantErr { + t.Fatalf("wrong error.\ngot: %s\nwant: %s\n", err, wantErr) + } + + }) + + t.Run("merging with warnings", func(t *testing.T) { + platform1 := Platform{OS: "amigaos", Arch: "m68k"} + platform2 := Platform{OS: "aros", Arch: "arm"} + s1 := NewMockSource([]PackageMeta{ + FakePackageMeta( + addrs.NewDefaultProvider("bar"), + MustParseVersion("1.0.0"), + VersionList{MustParseVersion("5.0")}, + platform2, + ), + }, + map[addrs.Provider]Warnings{ + addrs.NewDefaultProvider("bar"): {"WARNING!"}, + }, + ) + s2 := NewMockSource([]PackageMeta{ + FakePackageMeta( + addrs.NewDefaultProvider("bar"), + MustParseVersion("1.0.0"), + VersionList{MustParseVersion("5.0")}, + platform1, + ), + }, + nil, + ) + multi := MultiSource{ + {Source: s1}, + {Source: s2}, + } + + // AvailableVersions produces the union of all versions available + // across all of the sources. + got, warns, err := multi.AvailableVersions(context.Background(), addrs.NewDefaultProvider("bar")) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + want := VersionList{ + MustParseVersion("1.0.0"), + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + + if len(warns) != 1 { + t.Fatalf("wrong number of warnings. Got %d, wanted 1", len(warns)) + } + if warns[0] != "WARNING!" { + t.Fatalf("wrong warnings. Got %s, wanted \"WARNING!\"", warns[0]) + } + }) +} + +func TestMultiSourcePackageMeta(t *testing.T) { + platform1 := Platform{OS: "amigaos", Arch: "m68k"} + platform2 := Platform{OS: "aros", Arch: "arm"} + + // We'll use the Filename field of the fake PackageMetas we created above + // to create a difference between the packages in s1 and the ones in s2, + // so we can test where individual packages came from below. + fakeFilename := func(fn string, meta PackageMeta) PackageMeta { + meta.Filename = fn + return meta + } + + onlyInS1 := fakeFilename("s1", FakePackageMeta( + addrs.NewDefaultProvider("foo"), + MustParseVersion("1.0.0"), + VersionList{MustParseVersion("5.0")}, + platform2, + )) + onlyInS2 := fakeFilename("s2", FakePackageMeta( + addrs.NewDefaultProvider("foo"), + MustParseVersion("1.2.0"), + VersionList{MustParseVersion("5.0")}, + platform1, + )) + inBothS1 := fakeFilename("s1", FakePackageMeta( + addrs.NewDefaultProvider("foo"), + MustParseVersion("1.0.0"), + VersionList{MustParseVersion("5.0")}, + platform1, + )) + inBothS2 := fakeFilename("s2", inBothS1) + s1 := NewMockSource([]PackageMeta{ + inBothS1, + onlyInS1, + fakeFilename("s1", FakePackageMeta( + addrs.NewDefaultProvider("bar"), + MustParseVersion("1.0.0"), + VersionList{MustParseVersion("5.0")}, + platform2, + )), + }, + nil, + ) + s2 := NewMockSource([]PackageMeta{ + inBothS2, + onlyInS2, + fakeFilename("s2", FakePackageMeta( + addrs.NewDefaultProvider("bar"), + MustParseVersion("1.0.0"), + VersionList{MustParseVersion("5.0")}, + platform1, + )), + }, nil) + multi := MultiSource{ + {Source: s1}, + {Source: s2}, + } + + t.Run("only in s1", func(t *testing.T) { + got, err := multi.PackageMeta( + context.Background(), + addrs.NewDefaultProvider("foo"), + MustParseVersion("1.0.0"), + platform2, + ) + want := onlyInS1 + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("only in s2", func(t *testing.T) { + got, err := multi.PackageMeta( + context.Background(), + addrs.NewDefaultProvider("foo"), + MustParseVersion("1.2.0"), + platform1, + ) + want := onlyInS2 + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("in both", func(t *testing.T) { + got, err := multi.PackageMeta( + context.Background(), + addrs.NewDefaultProvider("foo"), + MustParseVersion("1.0.0"), + platform1, + ) + want := inBothS1 // S1 "wins" because it's earlier in the MultiSource + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + + // Make sure inBothS1 and inBothS2 really are different; if not then + // that's a test bug which we'd rather catch than have this test + // accidentally passing without actually checking anything. + if diff := cmp.Diff(inBothS1, inBothS2); diff == "" { + t.Fatalf("test bug: inBothS1 and inBothS2 are indistinguishable") + } + }) + t.Run("in neither", func(t *testing.T) { + _, err := multi.PackageMeta( + context.Background(), + addrs.NewDefaultProvider("nonexist"), + MustParseVersion("1.0.0"), + platform1, + ) + // This case reports "platform not supported" because it assumes that + // a caller would only pass to it package versions that were returned + // by a previousc all to AvailableVersions, and therefore a missing + // object ought to be valid provider/version but an unsupported + // platform. + if want, ok := err.(ErrPlatformNotSupported); !ok { + t.Fatalf("wrong error type:\ngot: %T\nwant: %T", err, want) + } + }) +} + +func TestMultiSourceSelector(t *testing.T) { + emptySource := NewMockSource(nil, nil) + + tests := map[string]struct { + Selector MultiSourceSelector + Provider addrs.Provider + WantMatch bool + }{ + "default provider with no constraints": { + MultiSourceSelector{ + Source: emptySource, + }, + addrs.NewDefaultProvider("foo"), + true, + }, + "built-in provider with no constraints": { + MultiSourceSelector{ + Source: emptySource, + }, + addrs.NewBuiltInProvider("bar"), + true, + }, + + // Include constraints + "default provider with include constraint that matches it exactly": { + MultiSourceSelector{ + Source: emptySource, + Include: mustParseMultiSourceMatchingPatterns("hashicorp/foo"), + }, + addrs.NewDefaultProvider("foo"), + true, + }, + "default provider with include constraint that matches it via type wildcard": { + MultiSourceSelector{ + Source: emptySource, + Include: mustParseMultiSourceMatchingPatterns("hashicorp/*"), + }, + addrs.NewDefaultProvider("foo"), + true, + }, + "default provider with include constraint that matches it via namespace wildcard": { + MultiSourceSelector{ + Source: emptySource, + Include: mustParseMultiSourceMatchingPatterns("*/*"), + }, + addrs.NewDefaultProvider("foo"), + true, + }, + "default provider with non-normalized include constraint that matches it via type wildcard": { + MultiSourceSelector{ + Source: emptySource, + Include: mustParseMultiSourceMatchingPatterns("HashiCorp/*"), + }, + addrs.NewDefaultProvider("foo"), + true, + }, + "built-in provider with exact include constraint that does not match it": { + MultiSourceSelector{ + Source: emptySource, + Include: mustParseMultiSourceMatchingPatterns("hashicorp/foo"), + }, + addrs.NewBuiltInProvider("bar"), + false, + }, + "built-in provider with type-wild include constraint that does not match it": { + MultiSourceSelector{ + Source: emptySource, + Include: mustParseMultiSourceMatchingPatterns("hashicorp/*"), + }, + addrs.NewBuiltInProvider("bar"), + false, + }, + "built-in provider with namespace-wild include constraint that does not match it": { + MultiSourceSelector{ + Source: emptySource, + Include: mustParseMultiSourceMatchingPatterns("*/*"), + }, + // Doesn't match because builtin providers are in "terraform.io", + // but a pattern with no hostname is for registry.opentofu.org. + addrs.NewBuiltInProvider("bar"), + false, + }, + "built-in provider with include constraint that matches it via type wildcard": { + MultiSourceSelector{ + Source: emptySource, + Include: mustParseMultiSourceMatchingPatterns("terraform.io/builtin/*"), + }, + addrs.NewBuiltInProvider("bar"), + true, + }, + + // Exclude constraints + "default provider with exclude constraint that matches it exactly": { + MultiSourceSelector{ + Source: emptySource, + Exclude: mustParseMultiSourceMatchingPatterns("hashicorp/foo"), + }, + addrs.NewDefaultProvider("foo"), + false, + }, + "default provider with exclude constraint that matches it via type wildcard": { + MultiSourceSelector{ + Source: emptySource, + Exclude: mustParseMultiSourceMatchingPatterns("hashicorp/*"), + }, + addrs.NewDefaultProvider("foo"), + false, + }, + "default provider with exact exclude constraint that doesn't match it": { + MultiSourceSelector{ + Source: emptySource, + Exclude: mustParseMultiSourceMatchingPatterns("hashicorp/bar"), + }, + addrs.NewDefaultProvider("foo"), + true, + }, + "default provider with non-normalized exclude constraint that matches it via type wildcard": { + MultiSourceSelector{ + Source: emptySource, + Exclude: mustParseMultiSourceMatchingPatterns("HashiCorp/*"), + }, + addrs.NewDefaultProvider("foo"), + false, + }, + + // Both include and exclude in a single selector + "default provider with exclude wildcard overriding include exact": { + MultiSourceSelector{ + Source: emptySource, + Include: mustParseMultiSourceMatchingPatterns("hashicorp/foo"), + Exclude: mustParseMultiSourceMatchingPatterns("hashicorp/*"), + }, + addrs.NewDefaultProvider("foo"), + false, + }, + "default provider with exclude wildcard overriding irrelevant include exact": { + MultiSourceSelector{ + Source: emptySource, + Include: mustParseMultiSourceMatchingPatterns("hashicorp/bar"), + Exclude: mustParseMultiSourceMatchingPatterns("hashicorp/*"), + }, + addrs.NewDefaultProvider("foo"), + false, + }, + "default provider with exclude exact overriding include wildcard": { + MultiSourceSelector{ + Source: emptySource, + Include: mustParseMultiSourceMatchingPatterns("hashicorp/*"), + Exclude: mustParseMultiSourceMatchingPatterns("hashicorp/foo"), + }, + addrs.NewDefaultProvider("foo"), + false, + }, + "default provider with irrelevant exclude exact overriding include wildcard": { + MultiSourceSelector{ + Source: emptySource, + Include: mustParseMultiSourceMatchingPatterns("hashicorp/*"), + Exclude: mustParseMultiSourceMatchingPatterns("hashicorp/bar"), + }, + addrs.NewDefaultProvider("foo"), + true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + t.Logf("include: %s", test.Selector.Include) + t.Logf("exclude: %s", test.Selector.Exclude) + t.Logf("provider: %s", test.Provider) + got := test.Selector.CanHandleProvider(test.Provider) + want := test.WantMatch + if got != want { + t.Errorf("wrong result %t; want %t", got, want) + } + }) + } +} + +func mustParseMultiSourceMatchingPatterns(strs ...string) MultiSourceMatchingPatterns { + ret, err := ParseMultiSourceMatchingPatterns(strs) + if err != nil { + panic(err) + } + return ret +} diff --git a/pkg/getproviders/package_authentication.go b/pkg/getproviders/package_authentication.go new file mode 100644 index 00000000000..c5d39c14350 --- /dev/null +++ b/pkg/getproviders/package_authentication.go @@ -0,0 +1,550 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package getproviders + +import ( + "bufio" + "bytes" + "crypto/sha256" + "encoding/hex" + "errors" + "fmt" + + "log" + "os" + "strings" + + "github.com/ProtonMail/go-crypto/openpgp" + openpgpErrors "github.com/ProtonMail/go-crypto/openpgp/errors" + tfaddr "github.com/opentofu/registry-address" +) + +type packageAuthenticationResult int + +const ( + verifiedChecksum packageAuthenticationResult = iota + signed + signingSkipped +) + +const ( + enforceGPGValidationEnvName = "OPENTOFU_ENFORCE_GPG_VALIDATION" + enforceGPGExpirationEnvName = "OPENTOFU_ENFORCE_GPG_EXPIRATION" +) + +// PackageAuthenticationResult is returned from a PackageAuthentication +// implementation. It is a mostly-opaque type intended for use in UI, which +// implements Stringer. +// +// A failed PackageAuthentication attempt will return an "unauthenticated" +// result, which is represented by nil. +type PackageAuthenticationResult struct { + result packageAuthenticationResult + KeyID string +} + +func (t *PackageAuthenticationResult) String() string { + if t == nil { + return "unauthenticated" + } + return []string{ + "verified checksum", + "signed", + "signing skipped", + }[t.result] +} + +// Signed returns whether the package was authenticated as signed by anyone. +func (t *PackageAuthenticationResult) Signed() bool { + if t == nil { + return false + } + return t.result == signed +} + +// SigningSkipped returns whether the package was authenticated but the key +// validation was skipped. +func (t *PackageAuthenticationResult) SigningSkipped() bool { + if t == nil { + return false + } + return t.result == signingSkipped +} + +// SigningKey represents a key used to sign packages from a registry. These are +// both in ASCII armored OpenPGP format. +// +// The JSON struct tags represent the field names used by the Registry API. +type SigningKey struct { + ASCIIArmor string `json:"ascii_armor"` +} + +// PackageAuthentication is an interface implemented by the optional package +// authentication implementations a source may include on its PackageMeta +// objects. +// +// A PackageAuthentication implementation is responsible for authenticating +// that a package is what its distributor intended to distribute and that it +// has not been tampered with. +type PackageAuthentication interface { + // AuthenticatePackage takes the local location of a package (which may or + // may not be the same as the original source location), and returns a + // PackageAuthenticationResult, or an error if the authentication checks + // fail. + // + // The local location is guaranteed not to be a PackageHTTPURL: a remote + // package will always be staged locally for inspection first. + AuthenticatePackage(localLocation PackageLocation) (*PackageAuthenticationResult, error) +} + +// PackageAuthenticationHashes is an optional interface implemented by +// PackageAuthentication implementations that are able to return a set of +// hashes they would consider valid if a given PackageLocation referred to +// a package that matched that hash string. +// +// This can be used to record a set of acceptable hashes for a particular +// package in a lock file so that future install operations can determine +// whether the package has changed since its initial installation. +type PackageAuthenticationHashes interface { + PackageAuthentication + + // AcceptableHashes returns a set of hashes that this authenticator + // considers to be valid for the current package or, where possible, + // equivalent packages on other platforms. The order of the items in + // the result is not significant, and it may contain duplicates + // that are also not significant. + // + // This method's result should only be used to create a "lock" for a + // particular provider if an earlier call to AuthenticatePackage for + // the corresponding package succeeded. A caller might choose to apply + // differing levels of trust for the acceptable hashes depending on + // the authentication result: a "verified checksum" result only checked + // that the downloaded package matched what the source claimed, which + // could be considered to be less trustworthy than a check that includes + // verifying a signature from the origin registry, depending on what the + // hashes are going to be used for. + // + // Implementations of PackageAuthenticationHashes may return multiple + // hashes with different schemes, which means that all of them are equally + // acceptable. Implementors may also return hashes that use schemes the + // current version of the authenticator would not allow but that could be + // accepted by other versions of OpenTofu, e.g. if a particular hash + // scheme has been deprecated. + // + // Authenticators that don't use hashes as their authentication procedure + // will either not implement this interface or will have an implementation + // that returns an empty result. + AcceptableHashes() []Hash +} + +type packageAuthenticationAll []PackageAuthentication + +// PackageAuthenticationAll combines several authentications together into a +// single check value, which passes only if all of the given ones pass. +// +// The checks are processed in the order given, so a failure of an earlier +// check will prevent execution of a later one. +// +// The returned result is from the last authentication, so callers should +// take care to order the authentications such that the strongest is last. +// +// The returned object also implements the AcceptableHashes method from +// interface PackageAuthenticationHashes, returning the hashes from the +// last of the given checks that indicates at least one acceptable hash, +// or no hashes at all if none of the constituents indicate any. The result +// may therefore be incomplete if there is more than one check that can provide +// hashes and they disagree about which hashes are acceptable. +func PackageAuthenticationAll(checks ...PackageAuthentication) PackageAuthentication { + return packageAuthenticationAll(checks) +} + +func (checks packageAuthenticationAll) AuthenticatePackage(localLocation PackageLocation) (*PackageAuthenticationResult, error) { + var authResult *PackageAuthenticationResult + for _, check := range checks { + var err error + authResult, err = check.AuthenticatePackage(localLocation) + if err != nil { + return authResult, err + } + } + return authResult, nil +} + +func (checks packageAuthenticationAll) AcceptableHashes() []Hash { + // The elements of checks are expected to be ordered so that the strongest + // one is later in the list, so we'll visit them in reverse order and + // take the first one that implements the interface and returns a non-empty + // result. + for i := len(checks) - 1; i >= 0; i-- { + check, ok := checks[i].(PackageAuthenticationHashes) + if !ok { + continue + } + allHashes := check.AcceptableHashes() + if len(allHashes) > 0 { + return allHashes + } + } + return nil +} + +type packageHashAuthentication struct { + RequiredHashes []Hash + AllHashes []Hash + Platform Platform +} + +// NewPackageHashAuthentication returns a PackageAuthentication implementation +// that checks whether the contents of the package match whatever subset of the +// given hashes are considered acceptable by the current version of OpenTofu. +// +// This uses the hash algorithms implemented by functions PackageHash and +// MatchesHash. The PreferredHashes function will select which of the given +// hashes are considered by OpenTofu to be the strongest verification, and +// authentication succeeds as long as one of those matches. +func NewPackageHashAuthentication(platform Platform, validHashes []Hash) PackageAuthentication { + requiredHashes := PreferredHashes(validHashes) + return packageHashAuthentication{ + RequiredHashes: requiredHashes, + AllHashes: validHashes, + Platform: platform, + } +} + +func (a packageHashAuthentication) AuthenticatePackage(localLocation PackageLocation) (*PackageAuthenticationResult, error) { + if len(a.RequiredHashes) == 0 { + // Indicates that none of the hashes given to + // NewPackageHashAuthentication were considered to be usable by this + // version of OpenTofu. + return nil, fmt.Errorf("this version of OpenTofu does not support any of the checksum formats given for this provider") + } + + matches, err := PackageMatchesAnyHash(localLocation, a.RequiredHashes) + if err != nil { + return nil, fmt.Errorf("failed to verify provider package checksums: %w", err) + } + + if matches { + return &PackageAuthenticationResult{result: verifiedChecksum}, nil + } + if len(a.RequiredHashes) == 1 { + return nil, fmt.Errorf("provider package doesn't match the expected checksum %q", a.RequiredHashes[0].String()) + } + // It's non-ideal that this doesn't actually list the expected checksums, + // but in the many-checksum case the message would get pretty unwieldy. + // In practice today we typically use this authenticator only with a + // single hash returned from a network mirror, so the better message + // above will prevail in that case. Maybe we'll improve on this somehow + // if the future introduction of a new hash scheme causes there to more + // commonly be multiple hashes. + return nil, fmt.Errorf("provider package doesn't match the any of the expected checksums") +} + +func (a packageHashAuthentication) AcceptableHashes() []Hash { + // In this case we include even hashes the current version of OpenTofu + // doesn't prefer, because this result is used for building a lock file + // and so it's helpful to include older hash formats that other OpenTofu + // versions might need in order to do authentication successfully. + return a.AllHashes +} + +type archiveHashAuthentication struct { + Platform Platform + WantSHA256Sum [sha256.Size]byte +} + +// NewArchiveChecksumAuthentication returns a PackageAuthentication +// implementation that checks that the original distribution archive matches +// the given hash. +// +// This authentication is suitable only for PackageHTTPURL and +// PackageLocalArchive source locations, because the unpacked layout +// (represented by PackageLocalDir) does not retain access to the original +// source archive. Therefore this authenticator will return an error if its +// given localLocation is not PackageLocalArchive. +// +// NewPackageHashAuthentication is preferable to use when possible because +// it uses the newer hashing scheme (implemented by function PackageHash) that +// can work with both packed and unpacked provider packages. +func NewArchiveChecksumAuthentication(platform Platform, wantSHA256Sum [sha256.Size]byte) PackageAuthentication { + return archiveHashAuthentication{platform, wantSHA256Sum} +} + +func (a archiveHashAuthentication) AuthenticatePackage(localLocation PackageLocation) (*PackageAuthenticationResult, error) { + archiveLocation, ok := localLocation.(PackageLocalArchive) + if !ok { + // A source should not use this authentication type for non-archive + // locations. + return nil, fmt.Errorf("cannot check archive hash for non-archive location %s", localLocation) + } + + gotHash, err := PackageHashLegacyZipSHA(archiveLocation) + if err != nil { + return nil, fmt.Errorf("failed to compute checksum for %s: %w", archiveLocation, err) + } + wantHash := HashLegacyZipSHAFromSHA(a.WantSHA256Sum) + if gotHash != wantHash { + return nil, fmt.Errorf("archive has incorrect checksum %s (expected %s)", gotHash, wantHash) + } + return &PackageAuthenticationResult{result: verifiedChecksum}, nil +} + +func (a archiveHashAuthentication) AcceptableHashes() []Hash { + return []Hash{HashLegacyZipSHAFromSHA(a.WantSHA256Sum)} +} + +type matchingChecksumAuthentication struct { + Document []byte + Filename string + WantSHA256Sum [sha256.Size]byte +} + +// NewMatchingChecksumAuthentication returns a PackageAuthentication +// implementation that scans a registry-provided SHA256SUMS document for a +// specified filename, and compares the SHA256 hash against the expected hash. +// This is necessary to ensure that the signed SHA256SUMS document matches the +// declared SHA256 hash for the package, and therefore that a valid signature +// of this document authenticates the package. +// +// This authentication always returns a nil result, since it alone cannot offer +// any assertions about package integrity. It should be combined with other +// authentications to be useful. +func NewMatchingChecksumAuthentication(document []byte, filename string, wantSHA256Sum [sha256.Size]byte) PackageAuthentication { + return matchingChecksumAuthentication{ + Document: document, + Filename: filename, + WantSHA256Sum: wantSHA256Sum, + } +} + +func (m matchingChecksumAuthentication) AuthenticatePackage(location PackageLocation) (*PackageAuthenticationResult, error) { + // Find the checksum in the list with matching filename. The document is + // in the form "0123456789abcdef filename.zip". + filename := []byte(m.Filename) + var checksum []byte + for _, line := range bytes.Split(m.Document, []byte("\n")) { + parts := bytes.Fields(line) + if len(parts) > 1 && bytes.Equal(parts[1], filename) { + checksum = parts[0] + break + } + } + if checksum == nil { + return nil, fmt.Errorf("checksum list has no SHA-256 hash for %q", m.Filename) + } + + // Decode the ASCII checksum into a byte array for comparison. + var gotSHA256Sum [sha256.Size]byte + if _, err := hex.Decode(gotSHA256Sum[:], checksum); err != nil { + return nil, fmt.Errorf("checksum list has invalid SHA256 hash %q: %w", string(checksum), err) + } + + // If the checksums don't match, authentication fails. + if !bytes.Equal(gotSHA256Sum[:], m.WantSHA256Sum[:]) { + return nil, fmt.Errorf("checksum list has unexpected SHA-256 hash %x (expected %x)", gotSHA256Sum, m.WantSHA256Sum[:]) + } + + // Success! But this doesn't result in any real authentication, only a + // lack of authentication errors, so we return a nil result. + return nil, nil +} + +type signatureAuthentication struct { + Document []byte + Signature []byte + Keys []SigningKey + ProviderSource *tfaddr.Provider + Meta PackageMeta +} + +// NewSignatureAuthentication returns a PackageAuthentication implementation +// that verifies the cryptographic signature for a package against any of the +// provided keys. +// +// The signing key for a package will be auto detected by attempting each key +// in turn until one is successful. If such a key is found, there are three +// possible successful authentication results: +// +// 1. If the signing key is the HashiCorp official key, it is an official +// provider; +// 2. Otherwise, if the signing key has a trust signature from the HashiCorp +// Partners key, it is a partner provider; +// 3. If neither of the above is true, it is a community provider. +// +// Any failure in the process of validating the signature will result in an +// unauthenticated result. +func NewSignatureAuthentication(meta PackageMeta, document, signature []byte, keys []SigningKey, source *tfaddr.Provider) PackageAuthentication { + return signatureAuthentication{ + Document: document, + Signature: signature, + Keys: keys, + ProviderSource: source, + Meta: meta, + } +} + +// ErrUnknownIssuer indicates an error when no valid signature for a provider could be found. +var ErrUnknownIssuer = fmt.Errorf("authentication signature from unknown issuer") + +func (s signatureAuthentication) shouldEnforceGPGValidation() bool { + // we should enforce validation for all provider sources that are not the default provider registry + if s.ProviderSource != nil && s.ProviderSource.Hostname != tfaddr.DefaultProviderRegistryHost { + return true + } + + // if we have been provided keys, we should enforce GPG validation + if len(s.Keys) > 0 { + return true + } + + // otherwise if the environment variable is set to true, we should enforce GPG validation + enforceEnvVar, exists := os.LookupEnv(enforceGPGValidationEnvName) + return exists && enforceEnvVar == "true" +} +func (s signatureAuthentication) shouldEnforceGPGExpiration() bool { + // otherwise if the environment variable is set to true, we should enforce GPG expiration + enforceEnvVar, exists := os.LookupEnv(enforceGPGExpirationEnvName) + return exists && enforceEnvVar == "true" +} + +func (s signatureAuthentication) AuthenticatePackage(location PackageLocation) (*PackageAuthenticationResult, error) { + shouldValidate := s.shouldEnforceGPGValidation() + + if !shouldValidate { + // As this is a temporary measure, we will log a warning to the user making it very clear what is happening + // and why. This will be removed in a future release. + log.Printf("[WARN] Skipping GPG validation of provider package %s as no keys were provided by the registry. See https://github.com/kubegems/opentofu/pull/309 for more information.", location) + + // construct an empty keyID to indicate that we are not validating and return no errors + // this is to force a successful authentication + // TODO: discuss if this key should be hardcoded to a value such as "UNKNOWN"? + return &PackageAuthenticationResult{result: signingSkipped, KeyID: ""}, nil + } else { + log.Printf("[DEBUG] Validating GPG signature of provider package %s", location) + } + + // Find the key that signed the checksum file. This can fail if there is no + // valid signature for any of the provided keys. + + _, keyID, err := s.findSigningKey() + if err != nil { + return nil, fmt.Errorf("the provider is not signed with a valid signing key; please contact the provider author (%w)", err) + } + + // We have a valid signature. + return &PackageAuthenticationResult{result: signed, KeyID: keyID}, nil +} + +func (s signatureAuthentication) AcceptableHashes() []Hash { + // This is a bit of an abstraction leak because signatureAuthentication + // otherwise just treats the document as an opaque blob that's been + // signed, but here we're making assumptions about its format because + // we only want to trust that _all_ of the checksums are valid (rather + // than just the current platform's one) if we've also verified that the + // bag of checksums is signed. + // + // In recognition of that layering quirk this implementation is intended to + // be somewhat resilient to potentially using this authenticator with + // non-checksums files in future (in which case it'll return nothing at all) + // but it might be better in the long run to instead combine + // signatureAuthentication and matchingChecksumAuthentication together and + // be explicit that the resulting merged authenticator is exclusively for + // checksums files. + + var ret []Hash + sc := bufio.NewScanner(bytes.NewReader(s.Document)) + for sc.Scan() { + parts := bytes.Fields(sc.Bytes()) + if len(parts) != 0 && len(parts) < 2 { + // Doesn't look like a valid sums file line, so we'll assume + // this whole thing isn't a checksums file. + return nil + } + + // If this is a checksums file then the first part should be a + // hex-encoded SHA256 hash, so it should be 64 characters long + // and contain only hex digits. + hashStr := parts[0] + if len(hashStr) != 64 { + return nil // doesn't look like a checksums file + } + + var gotSHA256Sum [sha256.Size]byte + if _, err := hex.Decode(gotSHA256Sum[:], hashStr); err != nil { + return nil // doesn't look like a checksums file + } + + ret = append(ret, HashLegacyZipSHAFromSHA(gotSHA256Sum)) + } + + return ret +} + +// findSigningKey attempts to verify the signature using each of the keys +// returned by the registry. If a valid signature is found, it returns the +// signing key. +// +// Note: currently the registry only returns one key, but this may change in +// the future. +func (s signatureAuthentication) findSigningKey() (*SigningKey, string, error) { + for _, key := range s.Keys { + keyring, err := openpgp.ReadArmoredKeyRing(strings.NewReader(key.ASCIIArmor)) + if err != nil { + return nil, "", fmt.Errorf("error decoding signing key: %w", err) + } + + entity, err := openpgp.CheckDetachedSignature(keyring, bytes.NewReader(s.Document), bytes.NewReader(s.Signature), nil) + if !s.shouldEnforceGPGExpiration() && (errors.Is(err, openpgpErrors.ErrKeyExpired) || errors.Is(err, openpgpErrors.ErrSignatureExpired)) { + // Internally openpgp will *only* return the Expired errors if all other checks have succeeded + // This is currently the best way to work around expired provider keys + fmt.Printf("[WARN] Provider %s/%s (%v) gpg key expired, this will fail in future versions of OpenTofu\n", s.Meta.Provider.Namespace, s.Meta.Provider.Type, s.Meta.Provider.Hostname) + err = nil + } + + // If the signature issuer does not match the key, keep trying the + // rest of the provided keys. + if errors.Is(err, openpgpErrors.ErrUnknownIssuer) { + continue + } + + // Any other signature error is terminal. + if err != nil { + return nil, "", fmt.Errorf("error checking signature: %w", err) + } + + keyID := "n/a" + if entity.PrimaryKey != nil { + keyID = entity.PrimaryKey.KeyIdString() + } + + log.Printf("[DEBUG] Provider signed by %s", entityString(entity)) + return &key, keyID, nil + } + + // If none of the provided keys issued the signature, this package is + // unsigned. This is currently a terminal authentication error. + return nil, "", ErrUnknownIssuer +} + +// entityString extracts the key ID and identity name(s) from an openpgp.Entity +// for logging. +func entityString(entity *openpgp.Entity) string { + if entity == nil { + return "" + } + + keyID := "n/a" + if entity.PrimaryKey != nil { + keyID = entity.PrimaryKey.KeyIdString() + } + + var names []string + for _, identity := range entity.Identities { + names = append(names, identity.Name) + } + + return fmt.Sprintf("%s %s", keyID, strings.Join(names, ", ")) +} diff --git a/pkg/getproviders/package_authentication_test.go b/pkg/getproviders/package_authentication_test.go new file mode 100644 index 00000000000..472c49ccc9e --- /dev/null +++ b/pkg/getproviders/package_authentication_test.go @@ -0,0 +1,906 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package getproviders + +import ( + "crypto/sha256" + "encoding/base64" + "errors" + "fmt" + "strings" + "testing" + + openpgpErrors "github.com/ProtonMail/go-crypto/openpgp/errors" + tfaddr "github.com/opentofu/registry-address" + + "github.com/google/go-cmp/cmp" + + "github.com/ProtonMail/go-crypto/openpgp" +) + +func TestPackageAuthenticationResult(t *testing.T) { + tests := []struct { + result *PackageAuthenticationResult + want string + }{ + { + nil, + "unauthenticated", + }, + { + &PackageAuthenticationResult{result: signed}, + "signed", + }, + } + for _, test := range tests { + if got := test.result.String(); got != test.want { + t.Errorf("wrong value: got %q, want %q", got, test.want) + } + } +} + +// mockAuthentication is an implementation of the PackageAuthentication +// interface which returns fixed values. This is used to test the combining +// logic of PackageAuthenticationAll. +type mockAuthentication struct { + result packageAuthenticationResult + err error +} + +func (m mockAuthentication) AuthenticatePackage(localLocation PackageLocation) (*PackageAuthenticationResult, error) { + if m.err == nil { + return &PackageAuthenticationResult{result: m.result}, nil + } else { + return nil, m.err + } +} + +var _ PackageAuthentication = (*mockAuthentication)(nil) + +// If all authentications succeed, the returned result should come from the +// last authentication. +func TestPackageAuthenticationAll_success(t *testing.T) { + result, err := PackageAuthenticationAll( + &mockAuthentication{result: verifiedChecksum}, + &mockAuthentication{result: signed}, + ).AuthenticatePackage(nil) + + want := PackageAuthenticationResult{result: signed} + if result == nil || *result != want { + t.Errorf("wrong result: want %#v, got %#v", want, result) + } + if err != nil { + t.Errorf("wrong err: got %#v, want nil", err) + } +} + +// If an authentication fails, its error should be returned along with a nil +// result. +func TestPackageAuthenticationAll_failure(t *testing.T) { + someError := errors.New("some error") + result, err := PackageAuthenticationAll( + &mockAuthentication{result: verifiedChecksum}, + &mockAuthentication{err: someError}, + &mockAuthentication{result: signed}, + ).AuthenticatePackage(nil) + + if result != nil { + t.Errorf("wrong result: got %#v, want nil", result) + } + if err != someError { + t.Errorf("wrong err: got %#v, want %#v", err, someError) + } +} + +// Package hash authentication requires a zip file or directory fixture and a +// known-good set of hashes, of which the authenticator will pick one. The +// result should be "verified checksum". +func TestPackageHashAuthentication_success(t *testing.T) { + // Location must be a PackageLocalArchive path + location := PackageLocalDir("testdata/filesystem-mirror/registry.opentofu.org/hashicorp/null/2.0.0/linux_amd64") + + wantHashes := []Hash{ + // Known-good HashV1 result for this directory + Hash("h1:qjsREM4DqEWECD43FcPqddZ9oxCG+IaMTxvWPciS05g="), + } + + auth := NewPackageHashAuthentication(Platform{"linux", "amd64"}, wantHashes) + result, err := auth.AuthenticatePackage(location) + + wantResult := PackageAuthenticationResult{result: verifiedChecksum} + if result == nil || *result != wantResult { + t.Errorf("wrong result: got %#v, want %#v", result, wantResult) + } + if err != nil { + t.Errorf("wrong err: got %s, want nil", err) + } +} + +// Package has authentication can fail for various reasons. +func TestPackageHashAuthentication_failure(t *testing.T) { + tests := map[string]struct { + location PackageLocation + err string + }{ + "missing file": { + PackageLocalArchive("testdata/no-package-here.zip"), + "failed to verify provider package checksums: lstat testdata/no-package-here.zip: no such file or directory", + }, + "checksum mismatch": { + PackageLocalDir("testdata/filesystem-mirror/registry.opentofu.org/hashicorp/null/2.0.0/linux_amd64"), + "provider package doesn't match the expected checksum \"h1:invalid\"", + }, + "invalid zip file": { + PackageLocalArchive("testdata/filesystem-mirror/registry.opentofu.org/hashicorp/null/terraform-provider-null_2.1.0_linux_amd64.zip"), + "failed to verify provider package checksums: zip: not a valid zip file", + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + // Invalid expected hash, either because we'll error before we + // reach it, or we want to force a checksum mismatch. + auth := NewPackageHashAuthentication(Platform{"linux", "amd64"}, []Hash{"h1:invalid"}) + result, err := auth.AuthenticatePackage(test.location) + + if result != nil { + t.Errorf("wrong result: got %#v, want nil", result) + } + if gotErr := err.Error(); gotErr != test.err { + t.Errorf("wrong err: got %q, want %q", gotErr, test.err) + } + }) + } +} + +// Archive checksum authentication requires a file fixture and a known-good +// SHA256 hash. The result should be "verified checksum". +func TestArchiveChecksumAuthentication_success(t *testing.T) { + // Location must be a PackageLocalArchive path + location := PackageLocalArchive("testdata/filesystem-mirror/registry.opentofu.org/hashicorp/null/terraform-provider-null_2.1.0_linux_amd64.zip") + + // Known-good SHA256 hash for this archive + wantSHA256Sum := [sha256.Size]byte{ + 0x4f, 0xb3, 0x98, 0x49, 0xf2, 0xe1, 0x38, 0xeb, + 0x16, 0xa1, 0x8b, 0xa0, 0xc6, 0x82, 0x63, 0x5d, + 0x78, 0x1c, 0xb8, 0xc3, 0xb2, 0x59, 0x01, 0xdd, + 0x5a, 0x79, 0x2a, 0xde, 0x97, 0x11, 0xf5, 0x01, + } + + auth := NewArchiveChecksumAuthentication(Platform{"linux", "amd64"}, wantSHA256Sum) + result, err := auth.AuthenticatePackage(location) + + wantResult := PackageAuthenticationResult{result: verifiedChecksum} + if result == nil || *result != wantResult { + t.Errorf("wrong result: got %#v, want %#v", result, wantResult) + } + if err != nil { + t.Errorf("wrong err: got %s, want nil", err) + } +} + +// Archive checksum authentication can fail for various reasons. These test +// cases are almost exhaustive, missing only an io.Copy error which is +// difficult to induce. +func TestArchiveChecksumAuthentication_failure(t *testing.T) { + tests := map[string]struct { + location PackageLocation + err string + }{ + "missing file": { + PackageLocalArchive("testdata/no-package-here.zip"), + "failed to compute checksum for testdata/no-package-here.zip: lstat testdata/no-package-here.zip: no such file or directory", + }, + "checksum mismatch": { + PackageLocalArchive("testdata/filesystem-mirror/registry.opentofu.org/hashicorp/null/terraform-provider-null_2.1.0_linux_amd64.zip"), + "archive has incorrect checksum zh:4fb39849f2e138eb16a18ba0c682635d781cb8c3b25901dd5a792ade9711f501 (expected zh:0000000000000000000000000000000000000000000000000000000000000000)", + }, + "invalid location": { + PackageLocalDir("testdata/filesystem-mirror/tfe.example.com/AwesomeCorp/happycloud/0.1.0-alpha.2/darwin_amd64"), + "cannot check archive hash for non-archive location testdata/filesystem-mirror/tfe.example.com/AwesomeCorp/happycloud/0.1.0-alpha.2/darwin_amd64", + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + // Zero expected checksum, either because we'll error before we + // reach it, or we want to force a checksum mismatch + auth := NewArchiveChecksumAuthentication(Platform{"linux", "amd64"}, [sha256.Size]byte{0}) + result, err := auth.AuthenticatePackage(test.location) + + if result != nil { + t.Errorf("wrong result: got %#v, want nil", result) + } + if gotErr := err.Error(); gotErr != test.err { + t.Errorf("wrong err: got %q, want %q", gotErr, test.err) + } + }) + } +} + +// Matching checksum authentication takes a SHA256SUMS document, an archive +// filename, and an expected SHA256 hash. On success both return values should +// be nil. +func TestMatchingChecksumAuthentication_success(t *testing.T) { + // Location is unused + location := PackageLocalArchive("testdata/my-package.zip") + + // Two different checksums for other files + wantSHA256Sum := [sha256.Size]byte{0xde, 0xca, 0xde} + otherSHA256Sum := [sha256.Size]byte{0xc0, 0xff, 0xee} + + document := []byte( + fmt.Sprintf( + "%x README.txt\n%x my-package.zip\n", + otherSHA256Sum, + wantSHA256Sum, + ), + ) + filename := "my-package.zip" + + auth := NewMatchingChecksumAuthentication(document, filename, wantSHA256Sum) + result, err := auth.AuthenticatePackage(location) + + // NOTE: This also tests the expired key ignore logic as they key in the test is expired + if result != nil { + t.Errorf("wrong result: got %#v, want nil", result) + } + if err != nil { + t.Errorf("wrong err: got %s, want nil", err) + } +} + +// Matching checksum authentication can fail for three reasons: no checksum +// in the document for the filename, invalid checksum value, and non-matching +// checksum value. +func TestMatchingChecksumAuthentication_failure(t *testing.T) { + wantSHA256Sum := [sha256.Size]byte{0xde, 0xca, 0xde} + filename := "my-package.zip" + + tests := map[string]struct { + document []byte + err string + }{ + "no checksum for filename": { + []byte( + fmt.Sprintf( + "%x README.txt", + [sha256.Size]byte{0xbe, 0xef}, + ), + ), + `checksum list has no SHA-256 hash for "my-package.zip"`, + }, + "invalid checksum": { + []byte( + fmt.Sprintf( + "%s README.txt\n%s my-package.zip", + "horses", + "chickens", + ), + ), + `checksum list has invalid SHA256 hash "chickens": encoding/hex: invalid byte: U+0068 'h'`, + }, + "checksum mismatch": { + []byte( + fmt.Sprintf( + "%x README.txt\n%x my-package.zip", + [sha256.Size]byte{0xbe, 0xef}, + [sha256.Size]byte{0xc0, 0xff, 0xee}, + ), + ), + "checksum list has unexpected SHA-256 hash c0ffee0000000000000000000000000000000000000000000000000000000000 (expected decade0000000000000000000000000000000000000000000000000000000000)", + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + // Location is unused + location := PackageLocalArchive("testdata/my-package.zip") + + auth := NewMatchingChecksumAuthentication(test.document, filename, wantSHA256Sum) + result, err := auth.AuthenticatePackage(location) + + if result != nil { + t.Errorf("wrong result: got %#v, want nil", result) + } + if gotErr := err.Error(); gotErr != test.err { + t.Errorf("wrong err: got %q, want %q", gotErr, test.err) + } + }) + } +} + +// Signature authentication takes a checksum document, a signature, and a list +// of signing keys. If the document is signed by one of the given keys, the +// authentication is successful. The value of the result depends on the signing +// key. +func TestSignatureAuthentication_success(t *testing.T) { + tests := map[string]struct { + signature string + keys []SigningKey + result PackageAuthenticationResult + }{ + "community provider": { + testAuthorSignatureGoodBase64, + []SigningKey{ + { + ASCIIArmor: testAuthorKeyArmor, + }, + }, + PackageAuthenticationResult{ + result: signed, + KeyID: testAuthorKeyID, + }, + }, + "multiple signing keys": { + testAuthorSignatureGoodBase64, + []SigningKey{ + { + ASCIIArmor: anotherPublicKey, + }, + { + ASCIIArmor: testAuthorKeyArmor, + }, + }, + PackageAuthenticationResult{ + result: signed, + KeyID: testAuthorKeyID, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + // Location is unused + location := PackageLocalArchive("testdata/my-package.zip") + // + //providerSource, err := tfaddr.ParseProviderSource("testdata/my-package.zip") + //if err != nil { + // t.Fatal(err) + //} + + signature, err := base64.StdEncoding.DecodeString(test.signature) + if err != nil { + t.Fatal(err) + } + + auth := NewSignatureAuthentication(PackageMeta{Location: location}, []byte(testShaSumsPlaceholder), signature, test.keys, nil) + result, err := auth.AuthenticatePackage(location) + + if result == nil || *result != test.result { + t.Errorf("wrong result: got %#v, want %#v", result, test.result) + } + if err != nil { + t.Errorf("wrong err: got %s, want nil", err) + } + }) + } +} + +func TestNewSignatureAuthentication_success(t *testing.T) { + tests := map[string]struct { + signature string + keys []SigningKey + result PackageAuthenticationResult + }{ + "official provider": { + testHashicorpSignatureGoodBase64, + []SigningKey{ + { + ASCIIArmor: TestingPublicKey, + }, + }, + PackageAuthenticationResult{ + result: signed, + KeyID: testHashiCorpPublicKeyID, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + // Location is unused + location := PackageLocalArchive("testdata/my-package.zip") + + signature, err := base64.StdEncoding.DecodeString(test.signature) + if err != nil { + t.Fatal(err) + } + + auth := NewSignatureAuthentication(PackageMeta{Location: location}, []byte(testProviderShaSums), signature, test.keys, nil) + result, err := auth.AuthenticatePackage(location) + + if result == nil || *result != test.result { + t.Errorf("wrong result: got %#v, want %#v", result, test.result) + } + if err != nil { + t.Errorf("wrong err: got %s, want nil", err) + } + }) + } +} +func TestNewSignatureAuthentication_expired(t *testing.T) { + tests := map[string]struct { + signature string + keys []SigningKey + }{ + "official provider": { + testHashicorpSignatureGoodBase64, + []SigningKey{ + { + ASCIIArmor: TestingPublicKey, + }, + }, + }, + } + t.Setenv(enforceGPGExpirationEnvName, "true") + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + // Location is unused + location := PackageLocalArchive("testdata/my-package.zip") + + signature, err := base64.StdEncoding.DecodeString(test.signature) + if err != nil { + t.Fatal(err) + } + + auth := NewSignatureAuthentication(PackageMeta{Location: location}, []byte(testProviderShaSums), signature, test.keys, nil) + _, err = auth.AuthenticatePackage(location) + + if err == nil { + t.Errorf("wrong err: got %s, want %s", err, openpgpErrors.ErrKeyExpired) + } + }) + } + t.Setenv(enforceGPGExpirationEnvName, "") +} + +// Signature authentication can fail for many reasons, most of which are due +// to OpenPGP failures from malformed keys or signatures. +func TestSignatureAuthentication_failure(t *testing.T) { + tests := map[string]struct { + signature string + keys []SigningKey + errorType any + errorMessage string + }{ + "invalid key": { + testHashicorpSignatureGoodBase64, + []SigningKey{ + { + ASCIIArmor: "invalid PGP armor value", + }, + }, + openpgpErrors.InvalidArgumentError(""), + "no armored data found", + }, + "invalid signature": { + testSignatureBadBase64, + []SigningKey{ + { + ASCIIArmor: testAuthorKeyArmor, + }, + }, + openpgpErrors.InvalidArgumentError(""), + "signature subpacket truncated", + }, + "no keys match signature": { + testAuthorSignatureGoodBase64, + []SigningKey{ + { + ASCIIArmor: TestingPublicKey, + }, + }, + nil, + ErrUnknownIssuer.Error(), + }, + } + + for name, test := range tests { + test := test + t.Run(name, func(t *testing.T) { + // Location is unused + location := PackageLocalArchive("testdata/my-package.zip") + + signature, err := base64.StdEncoding.DecodeString(test.signature) + if err != nil { + t.Fatal(err) + } + + auth := NewSignatureAuthentication(PackageMeta{Location: location}, []byte(testShaSumsPlaceholder), signature, test.keys, nil) + result, err := auth.AuthenticatePackage(location) + + if result != nil { + t.Errorf("wrong result: got %#v, want nil", result) + } + if test.errorType != nil { + if err == nil { + t.Errorf("expected error of type %v, got nil", test.errorType) + } + if !errors.As(err, &test.errorType) { + t.Errorf("wrong error type: got %v, want %v", err, test.errorType) + } + } + if test.errorMessage != "" { + if err == nil { + t.Errorf("expected error of type %v, got nil", test.errorType) + } + if !strings.Contains(err.Error(), test.errorMessage) { + t.Errorf("wrong error message: %s (expected an error message containing %s)", err.Error(), test.errorMessage) + } + } + }) + } +} + +func TestSignatureAuthentication_acceptableHashes(t *testing.T) { + auth := NewSignatureAuthentication(PackageMeta{}, []byte(testShaSumsRealistic), nil, nil, nil) + authWithHashes, ok := auth.(PackageAuthenticationHashes) + if !ok { + t.Fatalf("%T does not implement PackageAuthenticationHashes", auth) + } + got := authWithHashes.AcceptableHashes() + want := []Hash{ + // These are the hashes encoded in constant testShaSumsRealistic + "zh:7d7e888fdd28abfe00894f9055209b9eec785153641de98e6852aa071008d4ee", + "zh:f8b6cf9ade087c17826d49d89cef21261cdc22bd27065bbc5b27d7dbf7fbbf6c", + "zh:a5ba9945606bb7bfb821ba303957eeb40dd9ee4e706ba8da1eaf7cbeb0356e63", + "zh:df3a5a8d6ffff7bacf19c92d10d0d500f98169ea17b3764b01a789f563d1aad7", + "zh:086119a26576d06b8281a97e8644380da89ce16197cd955f74ea5ee664e9358b", + "zh:1e5f7a5f3ade7b8b1d1d59c5cea2e1a2f8d2f8c3f41962dbbe8647e222be8239", + "zh:0e9fd0f3e2254b526a0e81e0cfdfc82583b0cd343778c53ead21aa7d52f776d7", + "zh:66a947e7de1c74caf9f584c3ed4e91d2cb1af6fe5ce8abaf1cf8f7ff626a09d1", + "zh:def1b73849bec0dc57a04405847921bf9206c75b52ae9de195476facb26bd85e", + "zh:48f1826ec31d6f104e46cc2022b41f30cd1019ef48eaec9697654ef9ec37a879", + "zh:17e0b496022bc4e4137be15e96d2b051c8acd6e14cb48d9b13b262330464f6cc", + "zh:2696c86228f491bc5425561c45904c9ce39b1c676b1e17734cb2ee6b578c4bcd", + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } +} + +const testAuthorKeyID = `37A6AB3BCF2C170A` + +// testAuthorKeyArmor is test key ID 5BFEEC4317E746008621970637A6AB3BCF2C170A. +const testAuthorKeyArmor = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQENBF5vhgYBCAC40OcC2hEx3yGiLhHMbt7DAVEQ0nZwAWy6oL98niknLumBa1VO +nMYshP+o/FKOFatBl8aXhmDo606P6pD9d4Pg/WNehqT7hGNHcAFlm+8qjQAvE5uX +Z/na/Np7dmWasCiL5hYyHEnKU/XFpc9KyicbkS7n8igP1LEb8xDD1pMLULQsQHA4 +258asvtwjoYTZIij1I6bUE178bGFPNCfj+FzQM8nKzPpDVxZ7njN9c2sB9FEdJ1+ +S9mZQNK5PbJuEAOpD5Jp9BnGE16jsLUhDmvGHBjFZAXMBkNSloEMHhs2ty9lEzoF +eJmJx7XCGw+ds1SWp4MsHQPWzXxAlrfa4GMlABEBAAG0R1RlcnJhZm9ybSBUZXN0 +aW5nIChwbHVnaW4vZGlzY292ZXJ5LykgPHRlcnJhZm9ybSt0ZXN0aW5nQGhhc2hp +Y29ycC5jb20+iQFOBBMBCAA4FiEEW/7sQxfnRgCGIZcGN6arO88sFwoFAl5vhgYC +GwMFCwkIBwIGFQoJCAsCBBYCAwECHgECF4AACgkQN6arO88sFwpWvQf/apaMu4Bm +ea8AGjdl9acQhHBpWsyiHLIfZvN11xxN/f3+YITvPXIe2PMgveqNfXxu6PIeZGDb +0DBvnBQy/vqmA+sCQ8t8+kIWdfZ1EeM2YcXdmAEtriooLvc85JFYjafLIKSj9N7o +V/R/e1BCW/v1/7Je47c+6FSt3HHhwyT5AZ3BCq1zpw6PeCDSQ/gZr3Mvq4CjeLA/ +K+8TM3KyOF4qBGDvzGzp/t9umQSS2L0ozd90lxJtf5Q8ozqDaBiDo+f/osXT2EvN +VwPP/xh/gABkXiNrPylFbeD+XPAC4N7NmYK5aPDzRYXXknP8e9PDMykoJKZ+bSdz +F3IZ4q5RDHmmNbkBDQReb4YGAQgAt15e1F8TPQQm1jK8+scypHgfmPHbp7Qsulo1 +GTcUd8QmhbR4kayuLDEpJYzq6+IoTM4TPqsdVuq/1Nwey9oyK0wXk/SUR29nRIQh +3GBg7JVg1YsObsfVTvEflYOdjk8T/Udqs4I6HnmSbtzsaohzybutpWXPUkW8OzFI +ATwfVTrrz70Yxs+ly0nSEH2Yf+kg2uYZvv5KsJ3MNENhXnHnlaTy2IfhsxAX0xOG +pa9fXV3NzdEbl0mYaEzMi77qRAyIQ9VrIL5F0yY/LlbpLSl6xk2+BB2v3a1Ey6SJ +w4/le6AM0wlH2hKPCTlkvM0IvUWjlzrPzCkeu027iVc+fqdyiQARAQABiQE2BBgB +CAAgFiEEW/7sQxfnRgCGIZcGN6arO88sFwoFAl5vhgYCGwwACgkQN6arO88sFwqz +nAf/eF4oZG9F8sJX01mVdDm/L7Uthe4xjTdl7jwV4ygNX+pCyWrww3qc3qbd3QKg +CFqIt/TAPE/OxHxCFuxalQefpOqfxjKzvcktxzWmpgxaWsvHaXiS4bKBPz78N/Ke +MUtcjGHyLeSzYPUfjquqDzQxqXidRYhyHGSy9c0NKZ6wCElLZ6KcmCQb4sZxVwfu +ssjwAFbPMp1nr0f5SWCJfhTh7QF7lO2ldJaKMlcBM8aebmqFQ52P7ZWOFcgeerng +G7Zdrci1KEd943HhzDCsUFz4gJwbvUyiAYb2ddndpUBkYwCB/XrHWPOSnGxHgZoo +1gIqed9OV/+s5wKxZPjL0pCStQ== +=mYqJ +-----END PGP PUBLIC KEY BLOCK-----` + +// testAuthorEccKeyArmor uses Curve 25519 and has test key ID D01ED5C4BB1ED36A014B0D376540DDA046E5E135 +const testAuthorEccKeyArmor = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mDMEY1B7+hYJKwYBBAHaRw8BAQdAFRDpASP+iDY+QotOBP9DF5CfuhSBD8Dl0hSG +D7plEsO0M1RlcnJhZm9ybSBUZXN0aW5nIDx0ZXJyYWZvcm0rdGVzdGluZ0BoYXNo +aWNvcnAuY29tPoiTBBMWCgA7FiEE0B7VxLse02oBSw03ZUDdoEbl4TUFAmNQe/oC +GwMFCwkIBwICIgIGFQoJCAsCBBYCAwECHgcCF4AACgkQZUDdoEbl4TWhwwD+N/BR +pR9NhRFDm+JRhA3saKmpTSRo9yJnr6tRlumE4KQA/A2cOCDeezf6t3SXltoYUKIt +EYmbLxgMDlffVkFyC8IMuDgEY1B7+hIKKwYBBAGXVQEFAQEHQJ7frE76Le1qI1Go +dfrVIzEgAcYjDW6T01/V95wgqPIuAwEIB4h4BBgWCgAgFiEE0B7VxLse02oBSw03 +ZUDdoEbl4TUFAmNQe/oCGwwACgkQZUDdoEbl4TWvsAD/YSQAigAH5hq4OmK4gs0J +O74RFokGZzbPtoIvutb8eYoA/1QxxyqE/8A4Z21azYEO0j563LRa8SkZcB5UPDy3 +7ngJ +=Xb0o +-----END PGP PUBLIC KEY BLOCK-----` + +// testShaSumsPlaceholder is a string that represents a signed document that +// the signature authenticator will check. Some of the signature values in +// other constants in this file are signing this string. +const testShaSumsPlaceholder = "example shasums data" + +// testShaSumsRealistic is a more realistic SHA256SUMS document that we can use +// to test the AcceptableHashes method. The signature values in other constants +// in this file do not sign this string. +const testShaSumsRealistic = `7d7e888fdd28abfe00894f9055209b9eec785153641de98e6852aa071008d4ee terraform_0.14.0-alpha20200923_darwin_amd64.zip +f8b6cf9ade087c17826d49d89cef21261cdc22bd27065bbc5b27d7dbf7fbbf6c terraform_0.14.0-alpha20200923_freebsd_386.zip +a5ba9945606bb7bfb821ba303957eeb40dd9ee4e706ba8da1eaf7cbeb0356e63 terraform_0.14.0-alpha20200923_freebsd_amd64.zip +df3a5a8d6ffff7bacf19c92d10d0d500f98169ea17b3764b01a789f563d1aad7 terraform_0.14.0-alpha20200923_freebsd_arm.zip +086119a26576d06b8281a97e8644380da89ce16197cd955f74ea5ee664e9358b terraform_0.14.0-alpha20200923_linux_386.zip +1e5f7a5f3ade7b8b1d1d59c5cea2e1a2f8d2f8c3f41962dbbe8647e222be8239 terraform_0.14.0-alpha20200923_linux_amd64.zip +0e9fd0f3e2254b526a0e81e0cfdfc82583b0cd343778c53ead21aa7d52f776d7 terraform_0.14.0-alpha20200923_linux_arm.zip +66a947e7de1c74caf9f584c3ed4e91d2cb1af6fe5ce8abaf1cf8f7ff626a09d1 terraform_0.14.0-alpha20200923_openbsd_386.zip +def1b73849bec0dc57a04405847921bf9206c75b52ae9de195476facb26bd85e terraform_0.14.0-alpha20200923_openbsd_amd64.zip +48f1826ec31d6f104e46cc2022b41f30cd1019ef48eaec9697654ef9ec37a879 terraform_0.14.0-alpha20200923_solaris_amd64.zip +17e0b496022bc4e4137be15e96d2b051c8acd6e14cb48d9b13b262330464f6cc terraform_0.14.0-alpha20200923_windows_386.zip +2696c86228f491bc5425561c45904c9ce39b1c676b1e17734cb2ee6b578c4bcd terraform_0.14.0-alpha20200923_windows_amd64.zip` + +// testAuthorSignatureGoodBase64 is a signature of testShaSums signed with +// testAuthorKeyArmor, which represents the SHA256SUMS.sig file downloaded for +// a release. +const testAuthorSignatureGoodBase64 = `iQEzBAABCAAdFiEEW/7sQxfnRgCGIZcGN6arO88s` + + `FwoFAl5vh7gACgkQN6arO88sFwrAlQf6Al77qzjxNIj+NQNJfBGYUE5jHIgcuWOs1IPRTYUI` + + `rHQIUU2RVrdHoAefKTKNzGde653JK/pYTflSV+6ini3/aZZnXlF6t001w3wswmakdwTr0hXx` + + `Ez/hHYio72Gpn7+T/L+nl6dKkjeGqd/Kor5x2TY9uYB737ESmAe5T8ZlPaGMFHh0mYlNTeRq` + + `4qIKqL6DwddBF4Ju2svn2MeNMGfE358H31mxAl2k4PPrwBTR1sFUCUOzAXVA/g9Ov5Y9ni2G` + + `rkTahBtV9yuUUd1D+oRTTTdP0bj3A+3xxXmKTBhRuvurydPTicKuWzeILIJkcwp7Kl5UbI2N` + + `n1ayZdaCIw/r4w==` + +// testSignatureBadBase64 is an invalid signature. +const testSignatureBadBase64 = `iQEzBAABCAAdFiEEW/7sQxfnRgCGIZcGN6arO88s` + + `4qIKqL6DwddBF4Ju2svn2MeNMGfE358H31mxAl2k4PPrwBTR1sFUCUOzAXVA/g9Ov5Y9ni2G` + + `rkTahBtV9yuUUd1D+oRTTTdP0bj3A+3xxXmKTBhRuvurydPTicKuWzeILIJkcwp7Kl5UbI2N` + + `n1ayZdaCIw/r4w==` + +// testHashiCorpPublicKeyID is the Key ID of the HashiCorpPublicKey. +const testHashiCorpPublicKeyID = `34365D9472D7468F` + +const testProviderShaSums = `fea4227271ebf7d9e2b61b89ce2328c7262acd9fd190e1fd6d15a591abfa848e terraform-provider-null_3.1.0_darwin_amd64.zip +9ebf4d9704faba06b3ec7242c773c0fbfe12d62db7d00356d4f55385fc69bfb2 terraform-provider-null_3.1.0_darwin_arm64.zip +a6576c81adc70326e4e1c999c04ad9ca37113a6e925aefab4765e5a5198efa7e terraform-provider-null_3.1.0_freebsd_386.zip +5f9200bf708913621d0f6514179d89700e9aa3097c77dac730e8ba6e5901d521 terraform-provider-null_3.1.0_freebsd_amd64.zip +fc39cc1fe71234a0b0369d5c5c7f876c71b956d23d7d6f518289737a001ba69b terraform-provider-null_3.1.0_freebsd_arm.zip +c797744d08a5307d50210e0454f91ca4d1c7621c68740441cf4579390452321d terraform-provider-null_3.1.0_linux_386.zip +53e30545ff8926a8e30ad30648991ca8b93b6fa496272cd23b26763c8ee84515 terraform-provider-null_3.1.0_linux_amd64.zip +cecb6a304046df34c11229f20a80b24b1603960b794d68361a67c5efe58e62b8 terraform-provider-null_3.1.0_linux_arm64.zip +e1371aa1e502000d9974cfaff5be4cfa02f47b17400005a16f14d2ef30dc2a70 terraform-provider-null_3.1.0_linux_arm.zip +a8a42d13346347aff6c63a37cda9b2c6aa5cc384a55b2fe6d6adfa390e609c53 terraform-provider-null_3.1.0_windows_386.zip +02a1675fd8de126a00460942aaae242e65ca3380b5bb192e8773ef3da9073fd2 terraform-provider-null_3.1.0_windows_amd64.zip +` + +// testHashicorpSignatureGoodBase64 is a signature of testProviderShaSums signed with +// HashicorpPublicKey, which represents the SHA256SUMS.sig file downloaded for +// an official release. +const testHashicorpSignatureGoodBase64 = `wsFcBAABCAAQBQJgga+GCRCwtEEJdoW2dgAA` + + `o0YQAAW911BGDr2WHLo5NwcZenwHyxL5DX9g+4BknKbc/WxRC1hD8Afi3eygZk1yR6eT4Gp2H` + + `yNOwCjGL1PTONBumMfj9udIeuX8onrJMMvjFHh+bORGxBi4FKr4V3b2ZV1IYOjWMEyyTGRDvw` + + `SCdxBkp3apH3s2xZLmRoAj84JZ4KaxGF7hlT0j4IkNyQKd2T5cCByN9DV80+x+HtzaOieFwJL` + + `97iyGj6aznXfKfslK6S4oIrVTwyLTrQbxSxA0LsdUjRPHnJamL3sFOG77qUEUoXG3r61yi5vW` + + `V4P5gCH/+C+VkfGHqaB1s0jHYLxoTEXtwthe66MydDBPe2Hd0J12u9ppOIeK3leeb4uiixWIi` + + `rNdpWyjr/LU1KKWPxsDqMGYJ9TexyWkXjEpYmIEiY1Rxar8jrLh+FqVAhxRJajjgSRu5pZj50` + + `CNeKmmbyolLhPCmICjYYU/xKPGXSyDFqonVVyMWCSpO+8F38OmwDQHIk5AWyc8hPOAZ+g5N95` + + `cfUAzEqlvmNvVHQIU40Y6/Ip2HZzzFCLKQkMP1aDakYHq5w4ZO/ucjhKuoh1HDQMuMnZSu4eo` + + `2nMTBzYZnUxwtROrJZF1t103avbmP2QE/GaPvLIQn7o5WMV3ZcPCJ+szzzby7H2e33WIynrY/` + + `95ensBxh7mGFbcQ1C59b5o7viwIaaY2` + +// entityString function is used for logging the signing key. +func TestEntityString(t *testing.T) { + var tests = []struct { + name string + entity *openpgp.Entity + expected string + }{ + { + "nil", + nil, + "", + }, + { + "testAuthorEccKeyArmor", + testReadArmoredEntity(t, testAuthorEccKeyArmor), + "6540DDA046E5E135 Terraform Testing ", + }, + { + "testAuthorKeyArmor", + testReadArmoredEntity(t, testAuthorKeyArmor), + "37A6AB3BCF2C170A Terraform Testing (plugin/discovery/) ", + }, + { + "HashicorpPublicKey", + testReadArmoredEntity(t, TestingPublicKey), + "34365D9472D7468F HashiCorp Security (hashicorp.com/security) ", + }, + { + "HashicorpPartnersKey", + testReadArmoredEntity(t, anotherPublicKey), + "7D72D4268E4660FC HashiCorp Security (Terraform Partner Signing) ", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := entityString(tt.entity) + if actual != tt.expected { + t.Errorf("expected %s, actual %s", tt.expected, actual) + } + }) + } +} + +func testReadArmoredEntity(t *testing.T, armor string) *openpgp.Entity { + data := strings.NewReader(armor) + + el, err := openpgp.ReadArmoredKeyRing(data) + if err != nil { + t.Fatal(err) + } + + if count := len(el); count != 1 { + t.Fatalf("expected 1 entity, got %d", count) + } + + return el[0] +} + +func TestShouldEnforceGPGValidation(t *testing.T) { + tests := []struct { + name string + providerSource *tfaddr.Provider + keys []SigningKey + envVarValue string + expected bool + }{ + { + name: "default provider registry, no keys", + providerSource: &tfaddr.Provider{ + Hostname: tfaddr.DefaultProviderRegistryHost, + }, + keys: []SigningKey{}, + envVarValue: "", + expected: false, + }, + { + name: "default provider registry, some keys", + providerSource: &tfaddr.Provider{ + Hostname: tfaddr.DefaultProviderRegistryHost, + }, + keys: []SigningKey{ + { + ASCIIArmor: testAuthorKeyArmor, + }, + }, + envVarValue: "", + expected: true, + }, + { + name: "non-default provider registry, no keys", + providerSource: &tfaddr.Provider{ + Hostname: "my-registry.com", + }, + keys: []SigningKey{}, + envVarValue: "", + expected: true, + }, + { + name: "non-default provider registry, some keys", + providerSource: &tfaddr.Provider{ + Hostname: "my-registry.com", + }, + keys: []SigningKey{ + { + ASCIIArmor: testAuthorKeyArmor, + }, + }, + envVarValue: "", + expected: true, + }, + // env var "true" + { + name: "default provider registry, no keys, env var true", + providerSource: &tfaddr.Provider{ + Hostname: tfaddr.DefaultProviderRegistryHost, + }, + keys: []SigningKey{}, + envVarValue: "true", + expected: true, + }, + { + name: "default provider registry, some keys, env var true", + providerSource: &tfaddr.Provider{ + Hostname: tfaddr.DefaultProviderRegistryHost, + }, + keys: []SigningKey{ + { + ASCIIArmor: testAuthorKeyArmor, + }, + }, + envVarValue: "true", + expected: true, + }, { + name: "non-default provider registry, no keys, env var true", + providerSource: &tfaddr.Provider{ + Hostname: "my-registry.com", + }, + keys: []SigningKey{}, + envVarValue: "true", + expected: true, + }, + { + name: "non-default provider registry, some keys, env var true", + providerSource: &tfaddr.Provider{ + Hostname: "my-registry.com", + }, + keys: []SigningKey{ + { + ASCIIArmor: testAuthorKeyArmor, + }, + }, + envVarValue: "true", + expected: true, + }, + // env var "false" + { + name: "default provider registry, no keys, env var false", + providerSource: &tfaddr.Provider{ + Hostname: tfaddr.DefaultProviderRegistryHost, + }, + keys: []SigningKey{}, + envVarValue: "false", + expected: false, + }, + { + name: "default provider registry, some keys, env var false", + providerSource: &tfaddr.Provider{ + Hostname: tfaddr.DefaultProviderRegistryHost, + }, + keys: []SigningKey{ + { + ASCIIArmor: testAuthorKeyArmor, + }, + }, + envVarValue: "false", + expected: true, + }, { + name: "non-default provider registry, no keys, env var false", + providerSource: &tfaddr.Provider{ + Hostname: "my-registry.com", + }, + keys: []SigningKey{}, + envVarValue: "false", + expected: true, + }, + { + name: "non-default provider registry, some keys, env var false", + providerSource: &tfaddr.Provider{ + Hostname: "my-registry.com", + }, + keys: []SigningKey{ + { + ASCIIArmor: testAuthorKeyArmor, + }, + }, + envVarValue: "false", + expected: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + sigAuth := signatureAuthentication{ + ProviderSource: tt.providerSource, + Keys: tt.keys, + } + + if tt.envVarValue != "" { + t.Setenv(enforceGPGValidationEnvName, tt.envVarValue) + } + + actual := sigAuth.shouldEnforceGPGValidation() + if actual != tt.expected { + t.Errorf("expected %t, actual %t", tt.expected, actual) + } + }) + } +} diff --git a/pkg/getproviders/public_keys.go b/pkg/getproviders/public_keys.go new file mode 100644 index 00000000000..1e7b56de3a6 --- /dev/null +++ b/pkg/getproviders/public_keys.go @@ -0,0 +1,183 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package getproviders + +// TestingPublicKey is some public key used for testing. +const TestingPublicKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBGB9+xkBEACabYZOWKmgZsHTdRDiyPJxhbuUiKX65GUWkyRMJKi/1dviVxOX +PG6hBPtF48IFnVgxKpIb7G6NjBousAV+CuLlv5yqFKpOZEGC6sBV+Gx8Vu1CICpl +Zm+HpQPcIzwBpN+Ar4l/exCG/f/MZq/oxGgH+TyRF3XcYDjG8dbJCpHO5nQ5Cy9h +QIp3/Bh09kET6lk+4QlofNgHKVT2epV8iK1cXlbQe2tZtfCUtxk+pxvU0UHXp+AB +0xc3/gIhjZp/dePmCOyQyGPJbp5bpO4UeAJ6frqhexmNlaw9Z897ltZmRLGq1p4a +RnWL8FPkBz9SCSKXS8uNyV5oMNVn4G1obCkc106iWuKBTibffYQzq5TG8FYVJKrh +RwWB6piacEB8hl20IIWSxIM3J9tT7CPSnk5RYYCTRHgA5OOrqZhC7JefudrP8n+M +pxkDgNORDu7GCfAuisrf7dXYjLsxG4tu22DBJJC0c/IpRpXDnOuJN1Q5e/3VUKKW +mypNumuQpP5lc1ZFG64TRzb1HR6oIdHfbrVQfdiQXpvdcFx+Fl57WuUraXRV6qfb +4ZmKHX1JEwM/7tu21QE4F1dz0jroLSricZxfaCTHHWNfvGJoZ30/MZUrpSC0IfB3 +iQutxbZrwIlTBt+fGLtm3vDtwMFNWM+Rb1lrOxEQd2eijdxhvBOHtlIcswARAQAB +tERIYXNoaUNvcnAgU2VjdXJpdHkgKGhhc2hpY29ycC5jb20vc2VjdXJpdHkpIDxz +ZWN1cml0eUBoYXNoaWNvcnAuY29tPokCVAQTAQoAPhYhBMh0AR8KtAURDQIQVTQ2 +XZRy10aPBQJgffsZAhsDBQkJZgGABQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAAAoJ +EDQ2XZRy10aPtpcP/0PhJKiHtC1zREpRTrjGizoyk4Sl2SXpBZYhkdrG++abo6zs +buaAG7kgWWChVXBo5E20L7dbstFK7OjVs7vAg/OLgO9dPD8n2M19rpqSbbvKYWvp +0NSgvFTT7lbyDhtPj0/bzpkZEhmvQaDWGBsbDdb2dBHGitCXhGMpdP0BuuPWEix+ +QnUMaPwU51q9GM2guL45Tgks9EKNnpDR6ZdCeWcqo1IDmklloidxT8aKL21UOb8t +cD+Bg8iPaAr73bW7Jh8TdcV6s6DBFub+xPJEB/0bVPmq3ZHs5B4NItroZ3r+h3ke +VDoSOSIZLl6JtVooOJ2la9ZuMqxchO3mrXLlXxVCo6cGcSuOmOdQSz4OhQE5zBxx +LuzA5ASIjASSeNZaRnffLIHmht17BPslgNPtm6ufyOk02P5XXwa69UCjA3RYrA2P +QNNC+OWZ8qQLnzGldqE4MnRNAxRxV6cFNzv14ooKf7+k686LdZrP/3fQu2p3k5rY +0xQUXKh1uwMUMtGR867ZBYaxYvwqDrg9XB7xi3N6aNyNQ+r7zI2lt65lzwG1v9hg +FG2AHrDlBkQi/t3wiTS3JOo/GCT8BjN0nJh0lGaRFtQv2cXOQGVRW8+V/9IpqEJ1 +qQreftdBFWxvH7VJq2mSOXUJyRsoUrjkUuIivaA9Ocdipk2CkP8bpuGz7ZF4uQIN +BGB9+xkBEACoklYsfvWRCjOwS8TOKBTfl8myuP9V9uBNbyHufzNETbhYeT33Cj0M +GCNd9GdoaknzBQLbQVSQogA+spqVvQPz1MND18GIdtmr0BXENiZE7SRvu76jNqLp +KxYALoK2Pc3yK0JGD30HcIIgx+lOofrVPA2dfVPTj1wXvm0rbSGA4Wd4Ng3d2AoR +G/wZDAQ7sdZi1A9hhfugTFZwfqR3XAYCk+PUeoFrkJ0O7wngaon+6x2GJVedVPOs +2x/XOR4l9ytFP3o+5ILhVnsK+ESVD9AQz2fhDEU6RhvzaqtHe+sQccR3oVLoGcat +ma5rbfzH0Fhj0JtkbP7WreQf9udYgXxVJKXLQFQgel34egEGG+NlbGSPG+qHOZtY +4uWdlDSvmo+1P95P4VG/EBteqyBbDDGDGiMs6lAMg2cULrwOsbxWjsWka8y2IN3z +1stlIJFvW2kggU+bKnQ+sNQnclq3wzCJjeDBfucR3a5WRojDtGoJP6Fc3luUtS7V +5TAdOx4dhaMFU9+01OoH8ZdTRiHZ1K7RFeAIslSyd4iA/xkhOhHq89F4ECQf3Bt4 +ZhGsXDTaA/VgHmf3AULbrC94O7HNqOvTWzwGiWHLfcxXQsr+ijIEQvh6rHKmJK8R +9NMHqc3L18eMO6bqrzEHW0Xoiu9W8Yj+WuB3IKdhclT3w0pO4Pj8gQARAQABiQI8 +BBgBCgAmFiEEyHQBHwq0BRENAhBVNDZdlHLXRo8FAmB9+xkCGwwFCQlmAYAACgkQ +NDZdlHLXRo9ZnA/7BmdpQLeTjEiXEJyW46efxlV1f6THn9U50GWcE9tebxCXgmQf +u+Uju4hreltx6GDi/zbVVV3HCa0yaJ4JVvA4LBULJVe3ym6tXXSYaOfMdkiK6P1v +JgfpBQ/b/mWB0yuWTUtWx18BQQwlNEQWcGe8n1lBbYsH9g7QkacRNb8tKUrUbWlQ +QsU8wuFgly22m+Va1nO2N5C/eE/ZEHyN15jEQ+QwgQgPrK2wThcOMyNMQX/VNEr1 +Y3bI2wHfZFjotmek3d7ZfP2VjyDudnmCPQ5xjezWpKbN1kvjO3as2yhcVKfnvQI5 +P5Frj19NgMIGAp7X6pF5Csr4FX/Vw316+AFJd9Ibhfud79HAylvFydpcYbvZpScl +7zgtgaXMCVtthe3GsG4gO7IdxxEBZ/Fm4NLnmbzCIWOsPMx/FxH06a539xFq/1E2 +1nYFjiKg8a5JFmYU/4mV9MQs4bP/3ip9byi10V+fEIfp5cEEmfNeVeW5E7J8PqG9 +t4rLJ8FR4yJgQUa2gs2SNYsjWQuwS/MJvAv4fDKlkQjQmYRAOp1SszAnyaplvri4 +ncmfDsf0r65/sd6S40g5lHH8LIbGxcOIN6kwthSTPWX89r42CbY8GzjTkaeejNKx +v1aCrO58wAtursO1DiXCvBY7+NdafMRnoHwBk50iPqrVkNA8fv+auRyB2/G5Ag0E +YH3+JQEQALivllTjMolxUW2OxrXb+a2Pt6vjCBsiJzrUj0Pa63U+lT9jldbCCfgP +wDpcDuO1O05Q8k1MoYZ6HddjWnqKG7S3eqkV5c3ct3amAXp513QDKZUfIDylOmhU +qvxjEgvGjdRjz6kECFGYr6Vnj/p6AwWv4/FBRFlrq7cnQgPynbIH4hrWvewp3Tqw +GVgqm5RRofuAugi8iZQVlAiQZJo88yaztAQ/7VsXBiHTn61ugQ8bKdAsr8w/ZZU5 +HScHLqRolcYg0cKN91c0EbJq9k1LUC//CakPB9mhi5+aUVUGusIM8ECShUEgSTCi +KQiJUPZ2CFbbPE9L5o9xoPCxjXoX+r7L/WyoCPTeoS3YRUMEnWKvc42Yxz3meRb+ +BmaqgbheNmzOah5nMwPupJYmHrjWPkX7oyyHxLSFw4dtoP2j6Z7GdRXKa2dUYdk2 +x3JYKocrDoPHh3Q0TAZujtpdjFi1BS8pbxYFb3hHmGSdvz7T7KcqP7ChC7k2RAKO +GiG7QQe4NX3sSMgweYpl4OwvQOn73t5CVWYp/gIBNZGsU3Pto8g27vHeWyH9mKr4 +cSepDhw+/X8FGRNdxNfpLKm7Vc0Sm9Sof8TRFrBTqX+vIQupYHRi5QQCuYaV6OVr +ITeegNK3So4m39d6ajCR9QxRbmjnx9UcnSYYDmIB6fpBuwT0ogNtABEBAAGJBHIE +GAEKACYCGwIWIQTIdAEfCrQFEQ0CEFU0Nl2UctdGjwUCYH4bgAUJAeFQ2wJAwXQg +BBkBCgAdFiEEs2y6kaLAcwxDX8KAsLRBCXaFtnYFAmB9/iUACgkQsLRBCXaFtnYX +BhAAlxejyFXoQwyGo9U+2g9N6LUb/tNtH29RHYxy4A3/ZUY7d/FMkArmh4+dfjf0 +p9MJz98Zkps20kaYP+2YzYmaizO6OA6RIddcEXQDRCPHmLts3097mJ/skx9qLAf6 +rh9J7jWeSqWO6VW6Mlx8j9m7sm3Ae1OsjOx/m7lGZOhY4UYfY627+Jf7WQ5103Qs +lgQ09es/vhTCx0g34SYEmMW15Tc3eCjQ21b1MeJD/V26npeakV8iCZ1kHZHawPq/ +aCCuYEcCeQOOteTWvl7HXaHMhHIx7jjOd8XX9V+UxsGz2WCIxX/j7EEEc7CAxwAN +nWp9jXeLfxYfjrUB7XQZsGCd4EHHzUyCf7iRJL7OJ3tz5Z+rOlNjSgci+ycHEccL +YeFAEV+Fz+sj7q4cFAferkr7imY1XEI0Ji5P8p/uRYw/n8uUf7LrLw5TzHmZsTSC +UaiL4llRzkDC6cVhYfqQWUXDd/r385OkE4oalNNE+n+txNRx92rpvXWZ5qFYfv7E +95fltvpXc0iOugPMzyof3lwo3Xi4WZKc1CC/jEviKTQhfn3WZukuF5lbz3V1PQfI +xFsYe9WYQmp25XGgezjXzp89C/OIcYsVB1KJAKihgbYdHyUN4fRCmOszmOUwEAKR +3k5j4X8V5bk08sA69NVXPn2ofxyk3YYOMYWW8ouObnXoS8QJEDQ2XZRy10aPMpsQ +AIbwX21erVqUDMPn1uONP6o4NBEq4MwG7d+fT85rc1U0RfeKBwjucAE/iStZDQoM +ZKWvGhFR+uoyg1LrXNKuSPB82unh2bpvj4zEnJsJadiwtShTKDsikhrfFEK3aCK8 +Zuhpiu3jxMFDhpFzlxsSwaCcGJqcdwGhWUx0ZAVD2X71UCFoOXPjF9fNnpy80YNp +flPjj2RnOZbJyBIM0sWIVMd8F44qkTASf8K5Qb47WFN5tSpePq7OCm7s8u+lYZGK +wR18K7VliundR+5a8XAOyUXOL5UsDaQCK4Lj4lRaeFXunXl3DJ4E+7BKzZhReJL6 +EugV5eaGonA52TWtFdB8p+79wPUeI3KcdPmQ9Ll5Zi/jBemY4bzasmgKzNeMtwWP +fk6WgrvBwptqohw71HDymGxFUnUP7XYYjic2sVKhv9AevMGycVgwWBiWroDCQ9Ja +btKfxHhI2p+g+rcywmBobWJbZsujTNjhtme+kNn1mhJsD3bKPjKQfAxaTskBLb0V +wgV21891TS1Dq9kdPLwoS4XNpYg2LLB4p9hmeG3fu9+OmqwY5oKXsHiWc43dei9Y +yxZ1AAUOIaIdPkq+YG/PhlGE4YcQZ4RPpltAr0HfGgZhmXWigbGS+66pUj+Ojysc +j0K5tCVxVu0fhhFpOlHv0LWaxCbnkgkQH9jfMEJkAWMOuQINBGCAXCYBEADW6RNr +ZVGNXvHVBqSiOWaxl1XOiEoiHPt50Aijt25yXbG+0kHIFSoR+1g6Lh20JTCChgfQ +kGGjzQvEuG1HTw07YhsvLc0pkjNMfu6gJqFox/ogc53mz69OxXauzUQ/TZ27GDVp +UBu+EhDKt1s3OtA6Bjz/csop/Um7gT0+ivHyvJ/jGdnPEZv8tNuSE/Uo+hn/Q9hg +8SbveZzo3C+U4KcabCESEFl8Gq6aRi9vAfa65oxD5jKaIz7cy+pwb0lizqlW7H9t +Qlr3dBfdIcdzgR55hTFC5/XrcwJ6/nHVH/xGskEasnfCQX8RYKMuy0UADJy72TkZ +bYaCx+XXIcVB8GTOmJVoAhrTSSVLAZspfCnjwnSxisDn3ZzsYrq3cV6sU8b+QlIX +7VAjurE+5cZiVlaxgCjyhKqlGgmonnReWOBacCgL/UvuwMmMp5TTLmiLXLT7uxeG +ojEyoCk4sMrqrU1jevHyGlDJH9Taux15GILDwnYFfAvPF9WCid4UZ4Ouwjcaxfys +3LxNiZIlUsXNKwS3mhiMRL4TRsbs4k4QE+LIMOsauIvcvm8/frydvQ/kUwIhVTH8 +0XGOH909bYtJvY3fudK7ShIwm7ZFTduBJUG473E/Fn3VkhTmBX6+PjOC50HR/Hyb +waRCzfDruMe3TAcE/tSP5CUOb9C7+P+hPzQcDwARAQABiQRyBBgBCgAmFiEEyHQB +Hwq0BRENAhBVNDZdlHLXRo8FAmCAXCYCGwIFCQlmAYACQAkQNDZdlHLXRo/BdCAE +GQEKAB0WIQQ3TsdbSFkTYEqDHMfIIMbVzSerhwUCYIBcJgAKCRDIIMbVzSerh0Xw +D/9ghnUsoNCu1OulcoJdHboMazJvDt/znttdQSnULBVElgM5zk0Uyv87zFBzuCyQ +JWL3bWesQ2uFx5fRWEPDEfWVdDrjpQGb1OCCQyz1QlNPV/1M1/xhKGS9EeXrL8Dw +F6KTGkRwn1yXiP4BGgfeFIQHmJcKXEZ9HkrpNb8mcexkROv4aIPAwn+IaE+NHVtt +IBnufMXLyfpkWJQtJa9elh9PMLlHHnuvnYLvuAoOkhuvs7fXDMpfFZ01C+QSv1dz +Hm52GSStERQzZ51w4c0rYDneYDniC/sQT1x3dP5Xf6wzO+EhRMabkvoTbMqPsTEP +xyWr2pNtTBYp7pfQjsHxhJpQF0xjGN9C39z7f3gJG8IJhnPeulUqEZjhRFyVZQ6/ +siUeq7vu4+dM/JQL+i7KKe7Lp9UMrG6NLMH+ltaoD3+lVm8fdTUxS5MNPoA/I8cK +1OWTJHkrp7V/XaY7mUtvQn5V1yET5b4bogz4nME6WLiFMd+7x73gB+YJ6MGYNuO8 +e/NFK67MfHbk1/AiPTAJ6s5uHRQIkZcBPG7y5PpfcHpIlwPYCDGYlTajZXblyKrw +BttVnYKvKsnlysv11glSg0DphGxQJbXzWpvBNyhMNH5dffcfvd3eXJAxnD81GD2z +ZAriMJ4Av2TfeqQ2nxd2ddn0jX4WVHtAvLXfCgLM2Gveho4jD/9sZ6PZz/rEeTvt +h88t50qPcBa4bb25X0B5FO3TeK2LL3VKLuEp5lgdcHVonrcdqZFobN1CgGJua8TW +SprIkh+8ATZ/FXQTi01NzLhHXT1IQzSpFaZw0gb2f5ruXwvTPpfXzQrs2omY+7s7 +fkCwGPesvpSXPKn9v8uhUwD7NGW/Dm+jUM+QtC/FqzX7+/Q+OuEPjClUh1cqopCZ +EvAI3HjnavGrYuU6DgQdjyGT/UDbuwbCXqHxHojVVkISGzCTGpmBcQYQqhcFRedJ +yJlu6PSXlA7+8Ajh52oiMJ3ez4xSssFgUQAyOB16432tm4erpGmCyakkoRmMUn3p +wx+QIppxRlsHznhcCQKR3tcblUqH3vq5i4/ZAihusMCa0YrShtxfdSb13oKX+pFr +aZXvxyZlCa5qoQQBV1sowmPL1N2j3dR9TVpdTyCFQSv4KeiExmowtLIjeCppRBEK +eeYHJnlfkyKXPhxTVVO6H+dU4nVu0ASQZ07KiQjbI+zTpPKFLPp3/0sPRJM57r1+ +aTS71iR7nZNZ1f8LZV2OvGE6fJVtgJ1J4Nu02K54uuIhU3tg1+7Xt+IqwRc9rbVr +pHH/hFCYBPW2D2dxB+k2pQlg5NI+TpsXj5Zun8kRw5RtVb+dLuiH/xmxArIee8Jq +ZF5q4h4I33PSGDdSvGXn9UMY5Isjpg== +=7pIB +-----END PGP PUBLIC KEY BLOCK-----` + +const anotherPublicKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBF5vdGkBEADKi3Nm83oqMcar+YSDFKBup7+/Ty7m+SldtDH4/RWT0vgVHuQ1 +0joA+TrjITR5/aBVQ1/i2pOiBiImnaWsykccjFw9f9AuJqHo520YrAbNCeA6LuGH +Gvz4u0ReL/Cjbb9xCb34tejmrVOX+tmyiYBQd+oTae3DiyffOI9HxF6v+IKhOFKz +Grs3/R5MDwU1ZQIXTO2bdBOM67XBwvTUC+dy6Nem5UmmwuCI0Qz/JWTGndG8aGDC +EO9+DJ59/IwzBYlbs11iqdfqiGALNr+4FXTwftsxZOGpyxhjyAK00U2PP+gQ/wOK +aeIOL7qpF94GdyVrZzDeMKVLUDmhXxDhyatG4UueRJVAoqNVvAFfEwavpYUrVpYl +se/ZugCcTc9VeDodA4r4VI8yQQW805C+uZ/Q+Ym4r+xTsKcTyC4er4ogXgrMT73B +9sgA2M1B4oGbMN5IuG/L2C9JZ1Tob0h0fX+UGMOvrpWeJkZEKTU8hm4mZwhxeRdL +rrcqs6sewNPRnSiUlxz9ynJuf8vFNAD79Z6H9lULe6FnPuLImzH78FKH9QMQsoAW +z1GlYDrxNs3rHDTkSmvglwmWKpsfCxUnfq4ecsYtroCDjAwhLsf2qO1WlXD8B53h +6LU5DwPo7jJDpOv4B0YbjGuAJCf0oXmhXqdu9te6ybXb84ArtHlVO4EBRQARAQAB +tFFIYXNoaUNvcnAgU2VjdXJpdHkgKFRlcnJhZm9ybSBQYXJ0bmVyIFNpZ25pbmcp +IDxzZWN1cml0eSt0ZXJyYWZvcm1AaGFzaGljb3JwLmNvbT6JAk4EEwEIADgWIQRR +iQZXxazbS4IwhlZ9ctQmjkZg/AUCXm90aQIbAwULCQgHAgYVCgkICwIEFgIDAQIe +AQIXgAAKCRB9ctQmjkZg/LxFEACACTHlqULv38VCteo8UR4sRFcaSK4kwzXyRLI2 +oi3tnGdzc9AJ5Brp6/GwcERz0za3NU6LJ5kI7umHhuSb+FOjzQKLbttfKL+bTiNH +HY9NyJPhr6wKJs4Mh8HJ7/FdU7Tsg0cpayNvO5ilU3Mf7H1zaWOVut8BFRYqXGKi +K5/GGmw9C6QwaVSxR4i2kcZYUk4mnTikug53/4sQGnD3zScpDjipEqGTBMLk4r+E +0792MZFRAYRIMmZ0NfaMoIGE7bnmtMrbqtNiw+VaPILk6EyDVK3XJxNDBY/4kwHW +4pDa/qjD7nCL7LapP6NN8sDE++l2MSveorzjtR2yV+goqK1yV0VL2X8zwk1jANX7 +HatY6eKJwkx72BpL5N3ps915Od7kc/k7HdDgyoFQCOkuz9nHr7ix1ioltDcaEXwQ +qTv33M21uG7muNlFsEav2yInPGmIRRqBaGg/5AjF8v1mnGOjzJKNMCIEXIpkYoPS +fY9wud2s9DvHHvVuF+pT8YtmJDqKdGVAgv+VAH8z6zeIRaQXRRrbzFaCIozmz3qF +RLPixaPhcw5EHB7MhWBVDnsPXJG811KjMxCrW57ldeBsbR+cEKydEpYFnSjwksGy +FrCFPA4Vol/ks/ldotS7P9FDmYs7VfB0fco4fdyvwnxksRCfY1kg0dJA3Q0uj/uD +MoBzF7kCDQReb3RpARAAr1uZ2iRuoFRTBiI2Ao9Mn2Nk0B+WEWT+4S6oDSuryf+6 +sKI9Z+wgSvp7DOKyNARoqv+hnjA5Z+t7y/2K7fZP4TYpqOKw8NRKIUoNH0U2/YED +LN0FlXKuVdXtqfijoRZF/W/UyEMVRpub0yKwQDgsijoUDXIG1INVO/NSMGh5UJxE +I+KoU+oIahNPSTgHPizqhJ5OEYkMMfvIr5eHErtB9uylqifVDlvojeHyzU46XmGw +QLxYzufzLYoeBx9uZjZWIlxpxD2mVPmAYVJtDE0uKRZ29+fnlcxWzhx7Ow+wSVRp +XLwDLxZh1YJseY/cGj6yzjA8NolG1fx94PRD1iF7VukHJ3LkukK3+Iw2o4JKmrFx +FpVVcEoldb4bNRMnbY0KDOXn0/9LM+lhEnCRAo8y5zDO6kmjA56emy4iPHRBlngJ +Egms8wnuKsgNkYG8uRaa6zC9FOY/4MbXtNPg8j3pPlWr5jQVdy053uB9UqGs7y3a +C1z9bII58Otp8p4Hf5W97MNuXTxPgPDNmWXA6xu7k2+aut8dgvgz1msHTs31bTeG +X4iRt23/XWlIy56Jar6NkV74rdiKevAbJRHp/sj9AIR4h0pm4yCjZSEKmMqELj7L +nVSj0s9VSL0algqK5yXLoj6gYUWFfcuHcypnRGvjrpDzGgD9AKrDsmQ3pxFflZ8A +EQEAAYkCNgQYAQgAIBYhBFGJBlfFrNtLgjCGVn1y1CaORmD8BQJeb3RpAhsMAAoJ +EH1y1CaORmD89rUP/0gszqvnU3oXo1lMiwz44EfHDGWeY6sh1pJS0FfyjefIMEzE +rAJvyWXbzRj+Dd2g7m7p5JUf/UEMO6EFdxe1l6IihHJBs+pC6hliFwlGosfJwVc2 +wtPg6okAfFI35RBedvrV3uzq01dqFlb+d85Gl24du6nOv6eBXiZ8Pr9F3zPDHLPw +DTP/RtNDxnw8KOC0Z0TE9iQIY1rJCI2mekJ4btHRQ2q9eZQjGFp5HcHBXs/D2ZXC +H/vwB0UskHrtduEUSeTgKkKuPuxbCU5rhE8RGprS41KLYozveD0r5BPa9kBx7qYZ +iOHgWfwlJ4yRjgjtoZl4E9/7aGioYycHNG26UZ+ZHgwTwtDrTU+LP89WrhzoOQmq +H0oU4P/oMe2YKnG6FgCWt8h+31Q08G5VJeXNUoOn+RG02M7HOMHYGeP5wkzAy2HY +I4iehn+A3Cwudv8Gh6WaRqPjLGbk9GWr5fAUG3KLUgJ8iEqnt0/waP7KD78TVId8 +DgHymHMvAU+tAxi5wUcC3iQYrBEc1X0vcsRcW6aAi2Cxc/KEkVCz+PJ+HmFVZakS +V+fniKpSnhUlDkwlG5dMGhkGp/THU3u8oDb3rSydRPcRXVe1D0AReUFE2rDOeRoT +VYF2OtVmpc4ntcRyrItyhSkR/m7BQeBFIT8GQvbTmrCDQgrZCsFsIwxd4Cb4 +=5/s+ +-----END PGP PUBLIC KEY BLOCK-----` diff --git a/pkg/getproviders/registry_client.go b/pkg/getproviders/registry_client.go new file mode 100644 index 00000000000..bf432a1a8cf --- /dev/null +++ b/pkg/getproviders/registry_client.go @@ -0,0 +1,527 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package getproviders + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "net/http" + "net/url" + "os" + "path" + "strconv" + "time" + + "github.com/hashicorp/go-retryablehttp" + svchost "github.com/hashicorp/terraform-svchost" + svcauth "github.com/hashicorp/terraform-svchost/auth" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/httpclient" + "github.com/kubegems/opentofu/pkg/logging" + "github.com/kubegems/opentofu/version" +) + +const ( + terraformVersionHeader = "X-Terraform-Version" + + // registryDiscoveryRetryEnvName is the name of the environment variable that + // can be configured to customize number of retries for module and provider + // discovery requests with the remote registry. + registryDiscoveryRetryEnvName = "TF_REGISTRY_DISCOVERY_RETRY" + defaultRetry = 1 + + // registryClientTimeoutEnvName is the name of the environment variable that + // can be configured to customize the timeout duration (seconds) for module + // and provider discovery with the remote registry. + registryClientTimeoutEnvName = "TF_REGISTRY_CLIENT_TIMEOUT" + + // defaultRequestTimeout is the default timeout duration for requests to the + // remote registry. + defaultRequestTimeout = 10 * time.Second +) + +var ( + discoveryRetry int + requestTimeout time.Duration +) + +func init() { + configureDiscoveryRetry() + configureRequestTimeout() +} + +var SupportedPluginProtocols = MustParseVersionConstraints(">= 5, <7") + +// registryClient is a client for the provider registry protocol that is +// specialized only for the needs of this package. It's not intended as a +// general registry API client. +type registryClient struct { + baseURL *url.URL + creds svcauth.HostCredentials + + httpClient *retryablehttp.Client +} + +func newRegistryClient(baseURL *url.URL, creds svcauth.HostCredentials) *registryClient { + httpClient := httpclient.New() + httpClient.Timeout = requestTimeout + + retryableClient := retryablehttp.NewClient() + retryableClient.HTTPClient = httpClient + retryableClient.RetryMax = discoveryRetry + retryableClient.RequestLogHook = requestLogHook + retryableClient.ErrorHandler = maxRetryErrorHandler + + retryableClient.Logger = log.New(logging.LogOutput(), "", log.Flags()) + + return ®istryClient{ + baseURL: baseURL, + creds: creds, + httpClient: retryableClient, + } +} + +// ProviderVersions returns the raw version and protocol strings produced by the +// registry for the given provider. +// +// The returned error will be ErrRegistryProviderNotKnown if the registry responds with +// 404 Not Found to indicate that the namespace or provider type are not known, +// ErrUnauthorized if the registry responds with 401 or 403 status codes, or +// ErrQueryFailed for any other protocol or operational problem. +func (c *registryClient) ProviderVersions(ctx context.Context, addr addrs.Provider) (map[string][]string, []string, error) { + endpointPath, err := url.Parse(path.Join(addr.Namespace, addr.Type, "versions")) + if err != nil { + // Should never happen because we're constructing this from + // already-validated components. + return nil, nil, err + } + endpointURL := c.baseURL.ResolveReference(endpointPath) + req, err := retryablehttp.NewRequest("GET", endpointURL.String(), nil) + if err != nil { + return nil, nil, err + } + req = req.WithContext(ctx) + c.addHeadersToRequest(req.Request) + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, nil, c.errQueryFailed(addr, err) + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK: + // Great! + case http.StatusNotFound: + return nil, nil, ErrRegistryProviderNotKnown{ + Provider: addr, + } + case http.StatusUnauthorized, http.StatusForbidden: + return nil, nil, c.errUnauthorized(addr.Hostname) + default: + return nil, nil, c.errQueryFailed(addr, errors.New(resp.Status)) + } + + // We ignore the platforms portion of the response body, because the + // installer verifies the platform compatibility after pulling a provider + // versions' metadata. + type ResponseBody struct { + Versions []struct { + Version string `json:"version"` + Protocols []string `json:"protocols"` + } `json:"versions"` + Warnings []string `json:"warnings"` + } + var body ResponseBody + + dec := json.NewDecoder(resp.Body) + if err := dec.Decode(&body); err != nil { + return nil, nil, c.errQueryFailed(addr, err) + } + + if len(body.Versions) == 0 { + return nil, body.Warnings, nil + } + + ret := make(map[string][]string, len(body.Versions)) + for _, v := range body.Versions { + ret[v.Version] = v.Protocols + } + + return ret, body.Warnings, nil +} + +// PackageMeta returns metadata about a distribution package for a provider. +// +// The returned error will be one of the following: +// +// - ErrPlatformNotSupported if the registry responds with 404 Not Found, +// under the assumption that the caller previously checked that the provider +// and version are valid. +// - ErrProtocolNotSupported if the requested provider version's protocols are not +// supported by this version of tofu. +// - ErrUnauthorized if the registry responds with 401 or 403 status codes +// - ErrQueryFailed for any other operational problem. +func (c *registryClient) PackageMeta(ctx context.Context, provider addrs.Provider, version Version, target Platform) (PackageMeta, error) { + endpointPath, err := url.Parse(path.Join( + provider.Namespace, + provider.Type, + version.String(), + "download", + target.OS, + target.Arch, + )) + if err != nil { + // Should never happen because we're constructing this from + // already-validated components. + return PackageMeta{}, err + } + endpointURL := c.baseURL.ResolveReference(endpointPath) + + req, err := retryablehttp.NewRequest("GET", endpointURL.String(), nil) + if err != nil { + return PackageMeta{}, err + } + req = req.WithContext(ctx) + c.addHeadersToRequest(req.Request) + + resp, err := c.httpClient.Do(req) + if err != nil { + return PackageMeta{}, c.errQueryFailed(provider, err) + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK: + // Great! + case http.StatusNotFound: + return PackageMeta{}, ErrPlatformNotSupported{ + Provider: provider, + Version: version, + Platform: target, + } + case http.StatusUnauthorized, http.StatusForbidden: + return PackageMeta{}, c.errUnauthorized(provider.Hostname) + default: + return PackageMeta{}, c.errQueryFailed(provider, errors.New(resp.Status)) + } + + type SigningKeyList struct { + GPGPublicKeys []*SigningKey `json:"gpg_public_keys"` + } + type ResponseBody struct { + Protocols []string `json:"protocols"` + OS string `json:"os"` + Arch string `json:"arch"` + Filename string `json:"filename"` + DownloadURL string `json:"download_url"` + SHA256Sum string `json:"shasum"` + + SHA256SumsURL string `json:"shasums_url"` + SHA256SumsSignatureURL string `json:"shasums_signature_url"` + + SigningKeys SigningKeyList `json:"signing_keys"` + } + var body ResponseBody + + dec := json.NewDecoder(resp.Body) + if err := dec.Decode(&body); err != nil { + return PackageMeta{}, c.errQueryFailed(provider, err) + } + + var protoVersions VersionList + for _, versionStr := range body.Protocols { + v, err := ParseVersion(versionStr) + if err != nil { + return PackageMeta{}, c.errQueryFailed( + provider, + fmt.Errorf("registry response includes invalid version string %q: %w", versionStr, err), + ) + } + protoVersions = append(protoVersions, v) + } + protoVersions.Sort() + + // Verify that this version of tofu supports the providers' protocol + // version(s) + if len(protoVersions) > 0 { + supportedProtos := MeetingConstraints(SupportedPluginProtocols) + protoErr := ErrProtocolNotSupported{ + Provider: provider, + Version: version, + } + match := false + for _, version := range protoVersions { + if supportedProtos.Has(version) { + match = true + } + } + if !match { + // If the protocol version is not supported, try to find the closest + // matching version. + closest, err := c.findClosestProtocolCompatibleVersion(ctx, provider, version) + if err != nil { + return PackageMeta{}, err + } + protoErr.Suggestion = closest + return PackageMeta{}, protoErr + } + } + + if body.OS != target.OS || body.Arch != target.Arch { + return PackageMeta{}, fmt.Errorf("registry response to request for %s archive has incorrect target %s", target, Platform{body.OS, body.Arch}) + } + + downloadURL, err := url.Parse(body.DownloadURL) + if err != nil { + return PackageMeta{}, fmt.Errorf("registry response includes invalid download URL: %w", err) + } + downloadURL = resp.Request.URL.ResolveReference(downloadURL) + if downloadURL.Scheme != "http" && downloadURL.Scheme != "https" { + return PackageMeta{}, fmt.Errorf("registry response includes invalid download URL: must use http or https scheme") + } + + ret := PackageMeta{ + Provider: provider, + Version: version, + ProtocolVersions: protoVersions, + TargetPlatform: Platform{ + OS: body.OS, + Arch: body.Arch, + }, + Filename: body.Filename, + Location: PackageHTTPURL(downloadURL.String()), + // "Authentication" is populated below + } + + if len(body.SHA256Sum) != sha256.Size*2 { // *2 because it's hex-encoded + return PackageMeta{}, c.errQueryFailed( + provider, + fmt.Errorf("registry response includes invalid SHA256 hash %q: %w", body.SHA256Sum, err), + ) + } + + var checksum [sha256.Size]byte + _, err = hex.Decode(checksum[:], []byte(body.SHA256Sum)) + if err != nil { + return PackageMeta{}, c.errQueryFailed( + provider, + fmt.Errorf("registry response includes invalid SHA256 hash %q: %w", body.SHA256Sum, err), + ) + } + + shasumsURL, err := url.Parse(body.SHA256SumsURL) + if err != nil { + return PackageMeta{}, fmt.Errorf("registry response includes invalid SHASUMS URL: %w", err) + } + shasumsURL = resp.Request.URL.ResolveReference(shasumsURL) + if shasumsURL.Scheme != "http" && shasumsURL.Scheme != "https" { + return PackageMeta{}, fmt.Errorf("registry response includes invalid SHASUMS URL: must use http or https scheme") + } + document, err := c.getFile(shasumsURL) + if err != nil { + return PackageMeta{}, c.errQueryFailed( + provider, + fmt.Errorf("failed to retrieve authentication checksums for provider: %w", err), + ) + } + signatureURL, err := url.Parse(body.SHA256SumsSignatureURL) + if err != nil { + return PackageMeta{}, fmt.Errorf("registry response includes invalid SHASUMS signature URL: %w", err) + } + signatureURL = resp.Request.URL.ResolveReference(signatureURL) + if signatureURL.Scheme != "http" && signatureURL.Scheme != "https" { + return PackageMeta{}, fmt.Errorf("registry response includes invalid SHASUMS signature URL: must use http or https scheme") + } + signature, err := c.getFile(signatureURL) + if err != nil { + return PackageMeta{}, c.errQueryFailed( + provider, + fmt.Errorf("failed to retrieve cryptographic signature for provider: %w", err), + ) + } + + keys := make([]SigningKey, len(body.SigningKeys.GPGPublicKeys)) + for i, key := range body.SigningKeys.GPGPublicKeys { + keys[i] = *key + } + + ret.Authentication = PackageAuthenticationAll( + NewMatchingChecksumAuthentication(document, body.Filename, checksum), + NewArchiveChecksumAuthentication(ret.TargetPlatform, checksum), + NewSignatureAuthentication(ret, document, signature, keys, &provider), + ) + + return ret, nil +} + +// findClosestProtocolCompatibleVersion searches for the provider version with the closest protocol match. +func (c *registryClient) findClosestProtocolCompatibleVersion(ctx context.Context, provider addrs.Provider, version Version) (Version, error) { + var match Version + available, _, err := c.ProviderVersions(ctx, provider) + if err != nil { + return UnspecifiedVersion, err + } + + // extract the maps keys so we can make a sorted list of available versions. + versionList := make(VersionList, 0, len(available)) + for versionStr := range available { + v, err := ParseVersion(versionStr) + if err != nil { + return UnspecifiedVersion, ErrQueryFailed{ + Provider: provider, + Wrapped: fmt.Errorf("registry response includes invalid version string %q: %w", versionStr, err), + } + } + versionList = append(versionList, v) + } + versionList.Sort() // lowest precedence first, preserving order when equal precedence + + protoVersions := MeetingConstraints(SupportedPluginProtocols) +FindMatch: + // put the versions in increasing order of precedence + for index := len(versionList) - 1; index >= 0; index-- { // walk backwards to consider newer versions first + for _, protoStr := range available[versionList[index].String()] { + p, err := ParseVersion(protoStr) + if err != nil { + return UnspecifiedVersion, ErrQueryFailed{ + Provider: provider, + Wrapped: fmt.Errorf("registry response includes invalid protocol string %q: %w", protoStr, err), + } + } + if protoVersions.Has(p) { + match = versionList[index] + break FindMatch + } + } + } + return match, nil +} + +func (c *registryClient) addHeadersToRequest(req *http.Request) { + if c.creds != nil { + c.creds.PrepareRequest(req) + } + req.Header.Set(terraformVersionHeader, version.String()) +} + +func (c *registryClient) errQueryFailed(provider addrs.Provider, err error) error { + if err == context.Canceled { + // This one has a special error type so that callers can + // handle it in a different way. + return ErrRequestCanceled{} + } + return ErrQueryFailed{ + Provider: provider, + Wrapped: err, + } +} + +func (c *registryClient) errUnauthorized(hostname svchost.Hostname) error { + return ErrUnauthorized{ + Hostname: hostname, + HaveCredentials: c.creds != nil, + } +} + +func (c *registryClient) getFile(url *url.URL) ([]byte, error) { + resp, err := c.httpClient.Get(url.String()) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s returned from %s", resp.Status, HostFromRequest(resp.Request)) + } + + data, err := io.ReadAll(resp.Body) + if err != nil { + return data, err + } + + return data, nil +} + +// configureDiscoveryRetry configures the number of retries the registry client +// will attempt for requests with retryable errors, like 502 status codes +func configureDiscoveryRetry() { + discoveryRetry = defaultRetry + + if v := os.Getenv(registryDiscoveryRetryEnvName); v != "" { + retry, err := strconv.Atoi(v) + if err == nil && retry > 0 { + discoveryRetry = retry + } + } +} + +func requestLogHook(logger retryablehttp.Logger, req *http.Request, i int) { + if i > 0 { + logger.Printf("[INFO] Previous request to the remote registry failed, attempting retry.") + } +} + +func maxRetryErrorHandler(resp *http.Response, err error, numTries int) (*http.Response, error) { + // Close the body per library instructions + if resp != nil { + resp.Body.Close() + } + + // Additional error detail: if we have a response, use the status code; + // if we have an error, use that; otherwise nothing. We will never have + // both response and error. + var errMsg string + if resp != nil { + errMsg = fmt.Sprintf(": %s returned from %s", resp.Status, HostFromRequest(resp.Request)) + } else if err != nil { + errMsg = fmt.Sprintf(": %s", err) + } + + // This function is always called with numTries=RetryMax+1. If we made any + // retry attempts, include that in the error message. + if numTries > 1 { + return resp, fmt.Errorf("the request failed after %d attempts, please try again later%s", + numTries, errMsg) + } + return resp, fmt.Errorf("the request failed, please try again later%s", errMsg) +} + +// HostFromRequest extracts host the same way net/http Request.Write would, +// accounting for empty Request.Host +func HostFromRequest(req *http.Request) string { + if req.Host != "" { + return req.Host + } + if req.URL != nil { + return req.URL.Host + } + + // this should never happen and if it does + // it will be handled as part of Request.Write() + // https://cs.opensource.google/go/go/+/refs/tags/go1.18.4:src/net/http/request.go;l=574 + return "" +} + +// configureRequestTimeout configures the registry client request timeout from +// environment variables +func configureRequestTimeout() { + requestTimeout = defaultRequestTimeout + + if v := os.Getenv(registryClientTimeoutEnvName); v != "" { + timeout, err := strconv.Atoi(v) + if err == nil && timeout > 0 { + requestTimeout = time.Duration(timeout) * time.Second + } + } +} diff --git a/pkg/getproviders/registry_client_test.go b/pkg/getproviders/registry_client_test.go new file mode 100644 index 00000000000..1b0682cca58 --- /dev/null +++ b/pkg/getproviders/registry_client_test.go @@ -0,0 +1,458 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package getproviders + +import ( + "context" + "encoding/json" + "fmt" + "log" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/apparentlymart/go-versions/versions" + "github.com/google/go-cmp/cmp" + svchost "github.com/hashicorp/terraform-svchost" + disco "github.com/hashicorp/terraform-svchost/disco" + "github.com/kubegems/opentofu/pkg/addrs" +) + +func TestConfigureDiscoveryRetry(t *testing.T) { + t.Run("default retry", func(t *testing.T) { + if discoveryRetry != defaultRetry { + t.Fatalf("expected retry %q, got %q", defaultRetry, discoveryRetry) + } + + rc := newRegistryClient(nil, nil) + if rc.httpClient.RetryMax != defaultRetry { + t.Fatalf("expected client retry %q, got %q", + defaultRetry, rc.httpClient.RetryMax) + } + }) + + t.Run("configured retry", func(t *testing.T) { + defer func() { + discoveryRetry = defaultRetry + }() + t.Setenv(registryDiscoveryRetryEnvName, "2") + + configureDiscoveryRetry() + expected := 2 + if discoveryRetry != expected { + t.Fatalf("expected retry %q, got %q", + expected, discoveryRetry) + } + + rc := newRegistryClient(nil, nil) + if rc.httpClient.RetryMax != expected { + t.Fatalf("expected client retry %q, got %q", + expected, rc.httpClient.RetryMax) + } + }) +} + +func TestConfigureRegistryClientTimeout(t *testing.T) { + t.Run("default timeout", func(t *testing.T) { + if requestTimeout != defaultRequestTimeout { + t.Fatalf("expected timeout %q, got %q", + defaultRequestTimeout.String(), requestTimeout.String()) + } + + rc := newRegistryClient(nil, nil) + if rc.httpClient.HTTPClient.Timeout != defaultRequestTimeout { + t.Fatalf("expected client timeout %q, got %q", + defaultRequestTimeout.String(), rc.httpClient.HTTPClient.Timeout.String()) + } + }) + + t.Run("configured timeout", func(t *testing.T) { + defer func() { + requestTimeout = defaultRequestTimeout + }() + t.Setenv(registryClientTimeoutEnvName, "20") + + configureRequestTimeout() + expected := 20 * time.Second + if requestTimeout != expected { + t.Fatalf("expected timeout %q, got %q", + expected, requestTimeout.String()) + } + + rc := newRegistryClient(nil, nil) + if rc.httpClient.HTTPClient.Timeout != expected { + t.Fatalf("expected client timeout %q, got %q", + expected, rc.httpClient.HTTPClient.Timeout.String()) + } + }) +} + +// testRegistryServices starts up a local HTTP server running a fake provider registry +// service and returns a service discovery object pre-configured to consider +// the host "example.com" to be served by the fake registry service. +// +// The returned discovery object also knows the hostname "not.example.com" +// which does not have a provider registry at all and "too-new.example.com" +// which has a "providers.v99" service that is inoperable but could be useful +// to test the error reporting for detecting an unsupported protocol version. +// It also knows fails.example.com but it refers to an endpoint that doesn't +// correctly speak HTTP, to simulate a protocol error. +// +// The second return value is a function to call at the end of a test function +// to shut down the test server. After you call that function, the discovery +// object becomes useless. +func testRegistryServices(t *testing.T) (services *disco.Disco, baseURL string, cleanup func()) { + server := httptest.NewServer(http.HandlerFunc(fakeRegistryHandler)) + + services = disco.New() + services.ForceHostServices(svchost.Hostname("example.com"), map[string]interface{}{ + "providers.v1": server.URL + "/providers/v1/", + }) + services.ForceHostServices(svchost.Hostname("not.example.com"), map[string]interface{}{}) + services.ForceHostServices(svchost.Hostname("too-new.example.com"), map[string]interface{}{ + // This service doesn't actually work; it's here only to be + // detected as "too new" by the discovery logic. + "providers.v99": server.URL + "/providers/v99/", + }) + services.ForceHostServices(svchost.Hostname("fails.example.com"), map[string]interface{}{ + "providers.v1": server.URL + "/fails-immediately/", + }) + + // We'll also permit registry.opentofu.org here just because it's our + // default and has some unique features that are not allowed on any other + // hostname. It behaves the same as example.com, which should be preferred + // if you're not testing something specific to the default registry in order + // to ensure that most things are hostname-agnostic. + services.ForceHostServices(svchost.Hostname("registry.opentofu.org"), map[string]interface{}{ + "providers.v1": server.URL + "/providers/v1/", + }) + + return services, server.URL, func() { + server.Close() + } +} + +// testRegistrySource is a wrapper around testServices that uses the created +// discovery object to produce a Source instance that is ready to use with the +// fake registry services. +// +// As with testServices, the second return value is a function to call at the end +// of your test in order to shut down the test server. +func testRegistrySource(t *testing.T) (source *RegistrySource, baseURL string, cleanup func()) { + services, baseURL, close := testRegistryServices(t) + source = NewRegistrySource(services) + return source, baseURL, close +} + +func fakeRegistryHandler(resp http.ResponseWriter, req *http.Request) { + path := req.URL.EscapedPath() + if strings.HasPrefix(path, "/fails-immediately/") { + // Here we take over the socket and just close it immediately, to + // simulate one possible way a server might not be an HTTP server. + hijacker, ok := resp.(http.Hijacker) + if !ok { + // Not hijackable, so we'll just fail normally. + // If this happens, tests relying on this will fail. + resp.WriteHeader(500) + resp.Write([]byte(`cannot hijack`)) + return + } + conn, _, err := hijacker.Hijack() + if err != nil { + resp.WriteHeader(500) + resp.Write([]byte(`hijack failed`)) + return + } + conn.Close() + return + } + + if strings.HasPrefix(path, "/pkg/") { + switch path { + case "/pkg/awesomesauce/happycloud_1.2.0.zip": + resp.Write([]byte("some zip file")) + case "/pkg/awesomesauce/happycloud_1.2.0_SHA256SUMS": + resp.Write([]byte("000000000000000000000000000000000000000000000000000000000000f00d happycloud_1.2.0.zip\n000000000000000000000000000000000000000000000000000000000000face happycloud_1.2.0_face.zip\n")) + case "/pkg/awesomesauce/happycloud_1.2.0_SHA256SUMS.sig": + resp.Write([]byte("GPG signature")) + default: + resp.WriteHeader(404) + resp.Write([]byte("unknown package file download")) + } + return + } + + if !strings.HasPrefix(path, "/providers/v1/") { + resp.WriteHeader(404) + resp.Write([]byte(`not a provider registry endpoint`)) + return + } + + pathParts := strings.Split(path, "/")[3:] + if len(pathParts) < 3 { + resp.WriteHeader(404) + resp.Write([]byte(`unexpected number of path parts`)) + return + } + log.Printf("[TRACE] fake provider registry request for %#v", pathParts) + + if pathParts[2] == "versions" { + if len(pathParts) != 3 { + resp.WriteHeader(404) + resp.Write([]byte(`extraneous path parts`)) + return + } + + switch pathParts[0] + "/" + pathParts[1] { + case "awesomesauce/happycloud": + resp.Header().Set("Content-Type", "application/json") + resp.WriteHeader(200) + // Note that these version numbers are intentionally misordered + // so we can test that the client-side code places them in the + // correct order (lowest precedence first). + resp.Write([]byte(`{"versions":[{"version":"0.1.0","protocols":["1.0"]},{"version":"2.0.0","protocols":["99.0"]},{"version":"1.2.0","protocols":["5.0"]}, {"version":"1.0.0","protocols":["5.0"]}]}`)) + case "weaksauce/unsupported-protocol": + resp.Header().Set("Content-Type", "application/json") + resp.WriteHeader(200) + resp.Write([]byte(`{"versions":[{"version":"1.0.0","protocols":["0.1"]}]}`)) + case "weaksauce/protocol-six": + resp.Header().Set("Content-Type", "application/json") + resp.WriteHeader(200) + resp.Write([]byte(`{"versions":[{"version":"1.0.0","protocols":["6.0"]}]}`)) + case "weaksauce/no-versions": + resp.Header().Set("Content-Type", "application/json") + resp.WriteHeader(200) + resp.Write([]byte(`{"versions":[],"warnings":["this provider is weaksauce"]}`)) + case "-/legacy": + resp.Header().Set("Content-Type", "application/json") + resp.WriteHeader(200) + // This response is used for testing LookupLegacyProvider + resp.Write([]byte(`{"id":"legacycorp/legacy"}`)) + case "-/moved": + resp.Header().Set("Content-Type", "application/json") + resp.WriteHeader(200) + // This response is used for testing LookupLegacyProvider + resp.Write([]byte(`{"id":"hashicorp/moved","moved_to":"acme/moved"}`)) + case "-/changetype": + resp.Header().Set("Content-Type", "application/json") + resp.WriteHeader(200) + // This (unrealistic) response is used for error handling code coverage + resp.Write([]byte(`{"id":"legacycorp/newtype"}`)) + case "-/invalid": + resp.Header().Set("Content-Type", "application/json") + resp.WriteHeader(200) + // This (unrealistic) response is used for error handling code coverage + resp.Write([]byte(`{"id":"some/invalid/id/string"}`)) + default: + resp.WriteHeader(404) + resp.Write([]byte(`unknown namespace or provider type`)) + } + return + } + + if len(pathParts) == 6 && pathParts[3] == "download" { + switch pathParts[0] + "/" + pathParts[1] { + case "awesomesauce/happycloud": + if pathParts[4] == "nonexist" { + resp.WriteHeader(404) + resp.Write([]byte(`unsupported OS`)) + return + } + var protocols []string + version := pathParts[2] + switch version { + case "0.1.0": + protocols = []string{"1.0"} + case "2.0.0": + protocols = []string{"99.0"} + default: + protocols = []string{"5.0"} + } + + body := map[string]interface{}{ + "protocols": protocols, + "os": pathParts[4], + "arch": pathParts[5], + "filename": "happycloud_" + version + ".zip", + "shasum": "000000000000000000000000000000000000000000000000000000000000f00d", + "download_url": "/pkg/awesomesauce/happycloud_" + version + ".zip", + "shasums_url": "/pkg/awesomesauce/happycloud_" + version + "_SHA256SUMS", + "shasums_signature_url": "/pkg/awesomesauce/happycloud_" + version + "_SHA256SUMS.sig", + "signing_keys": map[string]interface{}{ + "gpg_public_keys": []map[string]interface{}{ + { + "ascii_armor": TestingPublicKey, + }, + }, + }, + } + enc, err := json.Marshal(body) + if err != nil { + resp.WriteHeader(500) + resp.Write([]byte("failed to encode body")) + } + resp.Header().Set("Content-Type", "application/json") + resp.WriteHeader(200) + resp.Write(enc) + default: + resp.WriteHeader(404) + resp.Write([]byte(`unknown namespace/provider/version/architecture`)) + } + return + } + + resp.WriteHeader(404) + resp.Write([]byte(`unrecognized path scheme`)) +} + +func TestProviderVersions(t *testing.T) { + source, _, close := testRegistrySource(t) + defer close() + + tests := []struct { + provider addrs.Provider + wantVersions map[string][]string + wantErr string + }{ + { + addrs.MustParseProviderSourceString("example.com/awesomesauce/happycloud"), + map[string][]string{ + "0.1.0": {"1.0"}, + "1.0.0": {"5.0"}, + "1.2.0": {"5.0"}, + "2.0.0": {"99.0"}, + }, + ``, + }, + { + addrs.MustParseProviderSourceString("example.com/weaksauce/no-versions"), + nil, + ``, + }, + { + addrs.MustParseProviderSourceString("example.com/nonexist/nonexist"), + nil, + `provider registry example.com does not have a provider named example.com/nonexist/nonexist`, + }, + } + for _, test := range tests { + t.Run(test.provider.String(), func(t *testing.T) { + client, err := source.registryClient(test.provider.Hostname) + if err != nil { + t.Fatal(err) + } + + gotVersions, _, err := client.ProviderVersions(context.Background(), test.provider) + + if err != nil { + if test.wantErr == "" { + t.Fatalf("wrong error\ngot: %s\nwant: ", err.Error()) + } + if got, want := err.Error(), test.wantErr; got != want { + t.Fatalf("wrong error\ngot: %s\nwant: %s", got, want) + } + return + } + + if test.wantErr != "" { + t.Fatalf("wrong error\ngot: \nwant: %s", test.wantErr) + } + + if diff := cmp.Diff(test.wantVersions, gotVersions); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + } +} + +func TestFindClosestProtocolCompatibleVersion(t *testing.T) { + source, _, close := testRegistrySource(t) + defer close() + + tests := map[string]struct { + provider addrs.Provider + version Version + wantSuggestion Version + wantErr string + }{ + "pinned version too old": { + addrs.MustParseProviderSourceString("example.com/awesomesauce/happycloud"), + MustParseVersion("0.1.0"), + MustParseVersion("1.2.0"), + ``, + }, + "pinned version too new": { + addrs.MustParseProviderSourceString("example.com/awesomesauce/happycloud"), + MustParseVersion("2.0.0"), + MustParseVersion("1.2.0"), + ``, + }, + // This should not actually happen, the function is only meant to be + // called when the requested provider version is not supported + "pinned version just right": { + addrs.MustParseProviderSourceString("example.com/awesomesauce/happycloud"), + MustParseVersion("1.2.0"), + MustParseVersion("1.2.0"), + ``, + }, + "nonexisting provider": { + addrs.MustParseProviderSourceString("example.com/nonexist/nonexist"), + MustParseVersion("1.2.0"), + versions.Unspecified, + `provider registry example.com does not have a provider named example.com/nonexist/nonexist`, + }, + "versionless provider": { + addrs.MustParseProviderSourceString("example.com/weaksauce/no-versions"), + MustParseVersion("1.2.0"), + versions.Unspecified, + ``, + }, + "unsupported provider protocol": { + addrs.MustParseProviderSourceString("example.com/weaksauce/unsupported-protocol"), + MustParseVersion("1.0.0"), + versions.Unspecified, + ``, + }, + "provider protocol six": { + addrs.MustParseProviderSourceString("example.com/weaksauce/protocol-six"), + MustParseVersion("1.0.0"), + MustParseVersion("1.0.0"), + ``, + }, + } + for name, test := range tests { + t.Run(name, func(t *testing.T) { + client, err := source.registryClient(test.provider.Hostname) + if err != nil { + t.Fatal(err) + } + + got, err := client.findClosestProtocolCompatibleVersion(context.Background(), test.provider, test.version) + + if err != nil { + if test.wantErr == "" { + t.Fatalf("wrong error\ngot: %s\nwant: ", err.Error()) + } + if got, want := err.Error(), test.wantErr; got != want { + t.Fatalf("wrong error\ngot: %s\nwant: %s", got, want) + } + return + } + + if test.wantErr != "" { + t.Fatalf("wrong error\ngot: \nwant: %s", test.wantErr) + } + + fmt.Printf("Got: %s, Want: %s\n", got, test.wantSuggestion) + + if !got.Same(test.wantSuggestion) { + t.Fatalf("wrong result\ngot: %s\nwant: %s", got.String(), test.wantSuggestion.String()) + } + }) + } +} diff --git a/pkg/getproviders/registry_source.go b/pkg/getproviders/registry_source.go new file mode 100644 index 00000000000..e18e3a8bcf6 --- /dev/null +++ b/pkg/getproviders/registry_source.go @@ -0,0 +1,155 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package getproviders + +import ( + "context" + "fmt" + + svchost "github.com/hashicorp/terraform-svchost" + disco "github.com/hashicorp/terraform-svchost/disco" + + "github.com/kubegems/opentofu/pkg/addrs" +) + +// RegistrySource is a Source that knows how to find and install providers from +// their originating provider registries. +type RegistrySource struct { + services *disco.Disco +} + +var _ Source = (*RegistrySource)(nil) + +// NewRegistrySource creates and returns a new source that will install +// providers from their originating provider registries. +func NewRegistrySource(services *disco.Disco) *RegistrySource { + return &RegistrySource{ + services: services, + } +} + +// AvailableVersions returns all of the versions available for the provider +// with the given address, or an error if that result cannot be determined. +// +// If the request fails, the returned error might be an value of +// ErrHostNoProviders, ErrHostUnreachable, ErrUnauthenticated, +// ErrProviderNotKnown, or ErrQueryFailed. Callers must be defensive and +// expect errors of other types too, to allow for future expansion. +func (s *RegistrySource) AvailableVersions(ctx context.Context, provider addrs.Provider) (VersionList, Warnings, error) { + client, err := s.registryClient(provider.Hostname) + if err != nil { + return nil, nil, err + } + + versionsResponse, warnings, err := client.ProviderVersions(ctx, provider) + if err != nil { + return nil, nil, err + } + + if len(versionsResponse) == 0 { + return nil, warnings, nil + } + + // We ignore protocols here because our goal is to find out which versions + // are available _at all_. Which ones are compatible with the current + // OpenTofu becomes relevant only once we've selected one, at which point + // we'll return an error if the selected one is incompatible. + // + // We intentionally produce an error on incompatibility, rather than + // silently ignoring an incompatible version, in order to give the user + // explicit feedback about why their selection wasn't valid and allow them + // to decide whether to fix that by changing the selection or by some other + // action such as upgrading OpenTofu, using a different OS to run + // OpenTofu, etc. Changes that affect compatibility are considered breaking + // changes from a provider API standpoint, so provider teams should change + // compatibility only in new major versions. + ret := make(VersionList, 0, len(versionsResponse)) + for str := range versionsResponse { + v, err := ParseVersion(str) + if err != nil { + return nil, nil, ErrQueryFailed{ + Provider: provider, + Wrapped: fmt.Errorf("registry response includes invalid version string %q: %w", str, err), + } + } + ret = append(ret, v) + } + ret.Sort() // lowest precedence first, preserving order when equal precedence + return ret, warnings, nil +} + +// PackageMeta returns metadata about the location and capabilities of +// a distribution package for a particular provider at a particular version +// targeting a particular platform. +// +// Callers of PackageMeta should first call AvailableVersions and pass +// one of the resulting versions to this function. This function cannot +// distinguish between a version that is not available and an unsupported +// target platform, so if it encounters either case it will return an error +// suggesting that the target platform isn't supported under the assumption +// that the caller already checked that the version is available at all. +// +// To find a package suitable for the platform where the provider installation +// process is running, set the "target" argument to +// getproviders.CurrentPlatform. +// +// If the request fails, the returned error might be an value of +// ErrHostNoProviders, ErrHostUnreachable, ErrUnauthenticated, +// ErrPlatformNotSupported, or ErrQueryFailed. Callers must be defensive and +// expect errors of other types too, to allow for future expansion. +func (s *RegistrySource) PackageMeta(ctx context.Context, provider addrs.Provider, version Version, target Platform) (PackageMeta, error) { + client, err := s.registryClient(provider.Hostname) + if err != nil { + return PackageMeta{}, err + } + + return client.PackageMeta(ctx, provider, version, target) +} + +func (s *RegistrySource) registryClient(hostname svchost.Hostname) (*registryClient, error) { + host, err := s.services.Discover(hostname) + if err != nil { + return nil, ErrHostUnreachable{ + Hostname: hostname, + Wrapped: err, + } + } + + url, err := host.ServiceURL("providers.v1") + switch err := err.(type) { + case nil: + // okay! We'll fall through and return below. + case *disco.ErrServiceNotProvided: + return nil, ErrHostNoProviders{ + Hostname: hostname, + } + case *disco.ErrVersionNotSupported: + return nil, ErrHostNoProviders{ + Hostname: hostname, + HasOtherVersion: true, + } + default: + return nil, ErrHostUnreachable{ + Hostname: hostname, + Wrapped: err, + } + } + + // Check if we have credentials configured for this hostname. + creds, err := s.services.CredentialsForHost(hostname) + if err != nil { + // This indicates that a credentials helper failed, which means we + // can't do anything better than just pass through the helper's + // own error message. + return nil, fmt.Errorf("failed to retrieve credentials for %s: %w", hostname, err) + } + + return newRegistryClient(url, creds), nil +} + +func (s *RegistrySource) ForDisplay(provider addrs.Provider) string { + return fmt.Sprintf("registry %s", provider.Hostname.ForDisplay()) +} diff --git a/pkg/getproviders/registry_source_test.go b/pkg/getproviders/registry_source_test.go new file mode 100644 index 00000000000..dff036f0a3c --- /dev/null +++ b/pkg/getproviders/registry_source_test.go @@ -0,0 +1,250 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package getproviders + +import ( + "context" + "fmt" + "regexp" + "strings" + "testing" + + tfaddr "github.com/opentofu/registry-address" + + "github.com/apparentlymart/go-versions/versions" + "github.com/google/go-cmp/cmp" + svchost "github.com/hashicorp/terraform-svchost" + + "github.com/kubegems/opentofu/pkg/addrs" +) + +func TestSourceAvailableVersions(t *testing.T) { + source, baseURL, close := testRegistrySource(t) + defer close() + + tests := []struct { + provider string + wantVersions []string + wantErr string + }{ + // These test cases are relying on behaviors of the fake provider + // registry server implemented in registry_client_test.go. + { + "example.com/awesomesauce/happycloud", + []string{"0.1.0", "1.0.0", "1.2.0", "2.0.0"}, + ``, + }, + { + "example.com/weaksauce/no-versions", + nil, + ``, // having no versions is not an error, it's just odd + }, + { + "example.com/nonexist/nonexist", + nil, + `provider registry example.com does not have a provider named example.com/nonexist/nonexist`, + }, + { + "not.example.com/foo/bar", + nil, + `host not.example.com does not offer a OpenTofu provider registry`, + }, + { + "too-new.example.com/foo/bar", + nil, + `host too-new.example.com does not support the provider registry protocol required by this OpenTofu version, but may be compatible with a different OpenTofu version`, + }, + { + "fails.example.com/foo/bar", + nil, + `could not query provider registry for fails.example.com/foo/bar: the request failed after 2 attempts, please try again later: Get "` + baseURL + `/fails-immediately/foo/bar/versions": EOF`, + }, + } + + for _, test := range tests { + t.Run(test.provider, func(t *testing.T) { + provider := addrs.MustParseProviderSourceString(test.provider) + gotVersions, _, err := source.AvailableVersions(context.Background(), provider) + + if err != nil { + if test.wantErr == "" { + t.Fatalf("wrong error\ngot: %s\nwant: ", err.Error()) + } + if got, want := err.Error(), test.wantErr; got != want { + t.Fatalf("wrong error\ngot: %s\nwant: %s", got, want) + } + return + } + + if test.wantErr != "" { + t.Fatalf("wrong error\ngot: \nwant: %s", test.wantErr) + } + + var gotVersionsStr []string + if gotVersions != nil { + gotVersionsStr = make([]string, len(gotVersions)) + for i, v := range gotVersions { + gotVersionsStr[i] = v.String() + } + } + + if diff := cmp.Diff(test.wantVersions, gotVersionsStr); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + } +} + +func TestSourceAvailableVersions_warnings(t *testing.T) { + source, _, close := testRegistrySource(t) + defer close() + + provider := addrs.MustParseProviderSourceString("example.com/weaksauce/no-versions") + _, warnings, err := source.AvailableVersions(context.Background(), provider) + if err != nil { + t.Fatalf("unexpected error: %s", err.Error()) + } + + if len(warnings) != 1 { + t.Fatalf("wrong number of warnings. Expected 1, got %d", len(warnings)) + } + +} + +func TestSourcePackageMeta(t *testing.T) { + source, baseURL, close := testRegistrySource(t) + defer close() + + validMeta := PackageMeta{ + Provider: addrs.NewProvider( + svchost.Hostname("example.com"), "awesomesauce", "happycloud", + ), + Version: versions.MustParseVersion("1.2.0"), + ProtocolVersions: VersionList{versions.MustParseVersion("5.0.0")}, + TargetPlatform: Platform{"linux", "amd64"}, + Filename: "happycloud_1.2.0.zip", + Location: PackageHTTPURL(baseURL + "/pkg/awesomesauce/happycloud_1.2.0.zip"), + } + validMeta.Authentication = PackageAuthenticationAll( + NewMatchingChecksumAuthentication( + []byte("000000000000000000000000000000000000000000000000000000000000f00d happycloud_1.2.0.zip\n000000000000000000000000000000000000000000000000000000000000face happycloud_1.2.0_face.zip\n"), + "happycloud_1.2.0.zip", + [32]byte{30: 0xf0, 31: 0x0d}, + ), + NewArchiveChecksumAuthentication(Platform{"linux", "amd64"}, [32]byte{30: 0xf0, 31: 0x0d}), + NewSignatureAuthentication( + validMeta, + []byte("000000000000000000000000000000000000000000000000000000000000f00d happycloud_1.2.0.zip\n000000000000000000000000000000000000000000000000000000000000face happycloud_1.2.0_face.zip\n"), + []byte("GPG signature"), + []SigningKey{ + {ASCIIArmor: TestingPublicKey}, + }, + &tfaddr.Provider{Hostname: "example.com", Namespace: "awesomesauce", Type: "happycloud"}, + ), + ) + + tests := []struct { + provider string + version string + os, arch string + want PackageMeta + wantHashes []Hash + wantErr string + }{ + // These test cases are relying on behaviors of the fake provider + // registry server implemented in registry_client_test.go. + { + "example.com/awesomesauce/happycloud", + "1.2.0", + "linux", "amd64", + validMeta, + []Hash{ + "zh:000000000000000000000000000000000000000000000000000000000000f00d", + "zh:000000000000000000000000000000000000000000000000000000000000face", + }, + ``, + }, + { + "example.com/awesomesauce/happycloud", + "1.2.0", + "nonexist", "amd64", + PackageMeta{}, + nil, + `provider example.com/awesomesauce/happycloud 1.2.0 is not available for nonexist_amd64`, + }, + { + "not.example.com/awesomesauce/happycloud", + "1.2.0", + "linux", "amd64", + PackageMeta{}, + nil, + `host not.example.com does not offer a OpenTofu provider registry`, + }, + { + "too-new.example.com/awesomesauce/happycloud", + "1.2.0", + "linux", "amd64", + PackageMeta{}, + nil, + `host too-new.example.com does not support the provider registry protocol required by this OpenTofu version, but may be compatible with a different OpenTofu version`, + }, + { + "fails.example.com/awesomesauce/happycloud", + "1.2.0", + "linux", "amd64", + PackageMeta{}, + nil, + `could not query provider registry for fails.example.com/awesomesauce/happycloud: the request failed after 2 attempts, please try again later: Get "http://placeholder-origin/fails-immediately/awesomesauce/happycloud/1.2.0/download/linux/amd64": EOF`, + }, + } + + // Sometimes error messages contain specific HTTP endpoint URLs, but + // since our test server is on a random port we'd not be able to + // consistently match those. Instead, we'll normalize the URLs. + urlPattern := regexp.MustCompile(`http://[^/]+/`) + + cmpOpts := cmp.Comparer(Version.Same) + + for _, test := range tests { + t.Run(fmt.Sprintf("%s for %s_%s", test.provider, test.os, test.arch), func(t *testing.T) { + // TEMP: We don't yet have a function for parsing provider + // source addresses so we'll just fake it in here for now. + parts := strings.Split(test.provider, "/") + providerAddr := addrs.Provider{ + Hostname: svchost.Hostname(parts[0]), + Namespace: parts[1], + Type: parts[2], + } + + version := versions.MustParseVersion(test.version) + + got, err := source.PackageMeta(context.Background(), providerAddr, version, Platform{test.os, test.arch}) + + if err != nil { + if test.wantErr == "" { + t.Fatalf("wrong error\ngot: %s\nwant: ", err.Error()) + } + gotErr := urlPattern.ReplaceAllLiteralString(err.Error(), "http://placeholder-origin/") + if got, want := gotErr, test.wantErr; got != want { + t.Fatalf("wrong error\ngot: %s\nwant: %s", got, want) + } + return + } + + if test.wantErr != "" { + t.Fatalf("wrong error\ngot: \nwant: %s", test.wantErr) + } + + if diff := cmp.Diff(got, test.want, cmpOpts); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + if diff := cmp.Diff(test.wantHashes, got.AcceptableHashes()); diff != "" { + t.Errorf("wrong AcceptableHashes result\n%s", diff) + } + }) + } + +} diff --git a/pkg/getproviders/source.go b/pkg/getproviders/source.go new file mode 100644 index 00000000000..e58991424d0 --- /dev/null +++ b/pkg/getproviders/source.go @@ -0,0 +1,20 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package getproviders + +import ( + "context" + + "github.com/kubegems/opentofu/pkg/addrs" +) + +// A Source can query a particular source for information about providers +// that are available to install. +type Source interface { + AvailableVersions(ctx context.Context, provider addrs.Provider) (VersionList, Warnings, error) + PackageMeta(ctx context.Context, provider addrs.Provider, version Version, target Platform) (PackageMeta, error) + ForDisplay(provider addrs.Provider) string +} diff --git a/pkg/getproviders/testdata/filesystem-mirror-invalid/hashicorp/null/2.0.0/darwin_amd64/terraform-provider-null b/pkg/getproviders/testdata/filesystem-mirror-invalid/hashicorp/null/2.0.0/darwin_amd64/terraform-provider-null new file mode 100644 index 00000000000..daa9e3509f6 --- /dev/null +++ b/pkg/getproviders/testdata/filesystem-mirror-invalid/hashicorp/null/2.0.0/darwin_amd64/terraform-provider-null @@ -0,0 +1 @@ +# This is just a placeholder file for discovery testing, not a real provider plugin. diff --git a/pkg/getproviders/testdata/filesystem-mirror-invalid/hashicorp/null/2.0.0/linux_amd64/terraform-provider-null b/pkg/getproviders/testdata/filesystem-mirror-invalid/hashicorp/null/2.0.0/linux_amd64/terraform-provider-null new file mode 100644 index 00000000000..daa9e3509f6 --- /dev/null +++ b/pkg/getproviders/testdata/filesystem-mirror-invalid/hashicorp/null/2.0.0/linux_amd64/terraform-provider-null @@ -0,0 +1 @@ +# This is just a placeholder file for discovery testing, not a real provider plugin. diff --git a/pkg/getproviders/testdata/filesystem-mirror-invalid/hashicorp/null/invalid b/pkg/getproviders/testdata/filesystem-mirror-invalid/hashicorp/null/invalid new file mode 100644 index 00000000000..289663a2ab3 --- /dev/null +++ b/pkg/getproviders/testdata/filesystem-mirror-invalid/hashicorp/null/invalid @@ -0,0 +1 @@ +This should be ignored because it doesn't follow the provider package naming scheme. diff --git a/pkg/getproviders/testdata/filesystem-mirror-invalid/hashicorp/null/terraform-provider-null_2.1.0_linux_amd64.zip b/pkg/getproviders/testdata/filesystem-mirror-invalid/hashicorp/null/terraform-provider-null_2.1.0_linux_amd64.zip new file mode 100644 index 00000000000..68a5502719d --- /dev/null +++ b/pkg/getproviders/testdata/filesystem-mirror-invalid/hashicorp/null/terraform-provider-null_2.1.0_linux_amd64.zip @@ -0,0 +1,5 @@ +This is just a placeholder file for discovery testing, not a real provider package. + +This file is what we'd find for mirrors using the "packed" mirror layout, +where the mirror maintainer can just download the packages from upstream and +have Terraform unpack them automatically when installing. diff --git a/pkg/getproviders/testdata/filesystem-mirror-invalid/hashicorp/null/terraform-provider-null_invalid.zip b/pkg/getproviders/testdata/filesystem-mirror-invalid/hashicorp/null/terraform-provider-null_invalid.zip new file mode 100644 index 00000000000..289663a2ab3 --- /dev/null +++ b/pkg/getproviders/testdata/filesystem-mirror-invalid/hashicorp/null/terraform-provider-null_invalid.zip @@ -0,0 +1 @@ +This should be ignored because it doesn't follow the provider package naming scheme. diff --git a/pkg/getproviders/testdata/filesystem-mirror-invalid/hashicorp/null/terraform-provider-null_invalid_invalid_invalid.zip b/pkg/getproviders/testdata/filesystem-mirror-invalid/hashicorp/null/terraform-provider-null_invalid_invalid_invalid.zip new file mode 100644 index 00000000000..289663a2ab3 --- /dev/null +++ b/pkg/getproviders/testdata/filesystem-mirror-invalid/hashicorp/null/terraform-provider-null_invalid_invalid_invalid.zip @@ -0,0 +1 @@ +This should be ignored because it doesn't follow the provider package naming scheme. diff --git a/pkg/getproviders/testdata/filesystem-mirror-invalid/hashicorp/random-beta/1.2.0/linux_amd64/terraform-provider-random-beta b/pkg/getproviders/testdata/filesystem-mirror-invalid/hashicorp/random-beta/1.2.0/linux_amd64/terraform-provider-random-beta new file mode 100644 index 00000000000..daa9e3509f6 --- /dev/null +++ b/pkg/getproviders/testdata/filesystem-mirror-invalid/hashicorp/random-beta/1.2.0/linux_amd64/terraform-provider-random-beta @@ -0,0 +1 @@ +# This is just a placeholder file for discovery testing, not a real provider plugin. diff --git a/pkg/getproviders/testdata/filesystem-mirror-invalid/hashicorp/random/1.2.0/linux_amd64/terraform-provider-random b/pkg/getproviders/testdata/filesystem-mirror-invalid/hashicorp/random/1.2.0/linux_amd64/terraform-provider-random new file mode 100644 index 00000000000..daa9e3509f6 --- /dev/null +++ b/pkg/getproviders/testdata/filesystem-mirror-invalid/hashicorp/random/1.2.0/linux_amd64/terraform-provider-random @@ -0,0 +1 @@ +# This is just a placeholder file for discovery testing, not a real provider plugin. diff --git a/pkg/getproviders/testdata/filesystem-mirror/registry.opentofu.org/-/legacy/1.0.0/linux_amd64/terraform-provider-legacy b/pkg/getproviders/testdata/filesystem-mirror/registry.opentofu.org/-/legacy/1.0.0/linux_amd64/terraform-provider-legacy new file mode 100644 index 00000000000..daa9e3509f6 --- /dev/null +++ b/pkg/getproviders/testdata/filesystem-mirror/registry.opentofu.org/-/legacy/1.0.0/linux_amd64/terraform-provider-legacy @@ -0,0 +1 @@ +# This is just a placeholder file for discovery testing, not a real provider plugin. diff --git a/pkg/getproviders/testdata/filesystem-mirror/registry.opentofu.org/hashicorp/null/2.0.0/darwin_amd64/terraform-provider-null b/pkg/getproviders/testdata/filesystem-mirror/registry.opentofu.org/hashicorp/null/2.0.0/darwin_amd64/terraform-provider-null new file mode 100644 index 00000000000..daa9e3509f6 --- /dev/null +++ b/pkg/getproviders/testdata/filesystem-mirror/registry.opentofu.org/hashicorp/null/2.0.0/darwin_amd64/terraform-provider-null @@ -0,0 +1 @@ +# This is just a placeholder file for discovery testing, not a real provider plugin. diff --git a/pkg/getproviders/testdata/filesystem-mirror/registry.opentofu.org/hashicorp/null/2.0.0/linux_amd64/terraform-provider-null b/pkg/getproviders/testdata/filesystem-mirror/registry.opentofu.org/hashicorp/null/2.0.0/linux_amd64/terraform-provider-null new file mode 100644 index 00000000000..daa9e3509f6 --- /dev/null +++ b/pkg/getproviders/testdata/filesystem-mirror/registry.opentofu.org/hashicorp/null/2.0.0/linux_amd64/terraform-provider-null @@ -0,0 +1 @@ +# This is just a placeholder file for discovery testing, not a real provider plugin. diff --git a/pkg/getproviders/testdata/filesystem-mirror/registry.opentofu.org/hashicorp/null/invalid b/pkg/getproviders/testdata/filesystem-mirror/registry.opentofu.org/hashicorp/null/invalid new file mode 100644 index 00000000000..289663a2ab3 --- /dev/null +++ b/pkg/getproviders/testdata/filesystem-mirror/registry.opentofu.org/hashicorp/null/invalid @@ -0,0 +1 @@ +This should be ignored because it doesn't follow the provider package naming scheme. diff --git a/pkg/getproviders/testdata/filesystem-mirror/registry.opentofu.org/hashicorp/null/terraform-provider-null_2.1.0_linux_amd64.zip b/pkg/getproviders/testdata/filesystem-mirror/registry.opentofu.org/hashicorp/null/terraform-provider-null_2.1.0_linux_amd64.zip new file mode 100644 index 00000000000..68a5502719d --- /dev/null +++ b/pkg/getproviders/testdata/filesystem-mirror/registry.opentofu.org/hashicorp/null/terraform-provider-null_2.1.0_linux_amd64.zip @@ -0,0 +1,5 @@ +This is just a placeholder file for discovery testing, not a real provider package. + +This file is what we'd find for mirrors using the "packed" mirror layout, +where the mirror maintainer can just download the packages from upstream and +have Terraform unpack them automatically when installing. diff --git a/pkg/getproviders/testdata/filesystem-mirror/registry.opentofu.org/hashicorp/null/terraform-provider-null_invalid.zip b/pkg/getproviders/testdata/filesystem-mirror/registry.opentofu.org/hashicorp/null/terraform-provider-null_invalid.zip new file mode 100644 index 00000000000..289663a2ab3 --- /dev/null +++ b/pkg/getproviders/testdata/filesystem-mirror/registry.opentofu.org/hashicorp/null/terraform-provider-null_invalid.zip @@ -0,0 +1 @@ +This should be ignored because it doesn't follow the provider package naming scheme. diff --git a/pkg/getproviders/testdata/filesystem-mirror/registry.opentofu.org/hashicorp/null/terraform-provider-null_invalid_invalid_invalid.zip b/pkg/getproviders/testdata/filesystem-mirror/registry.opentofu.org/hashicorp/null/terraform-provider-null_invalid_invalid_invalid.zip new file mode 100644 index 00000000000..289663a2ab3 --- /dev/null +++ b/pkg/getproviders/testdata/filesystem-mirror/registry.opentofu.org/hashicorp/null/terraform-provider-null_invalid_invalid_invalid.zip @@ -0,0 +1 @@ +This should be ignored because it doesn't follow the provider package naming scheme. diff --git a/pkg/getproviders/testdata/filesystem-mirror/registry.opentofu.org/hashicorp/random-beta/1.2.0/linux_amd64/terraform-provider-random-beta b/pkg/getproviders/testdata/filesystem-mirror/registry.opentofu.org/hashicorp/random-beta/1.2.0/linux_amd64/terraform-provider-random-beta new file mode 100644 index 00000000000..daa9e3509f6 --- /dev/null +++ b/pkg/getproviders/testdata/filesystem-mirror/registry.opentofu.org/hashicorp/random-beta/1.2.0/linux_amd64/terraform-provider-random-beta @@ -0,0 +1 @@ +# This is just a placeholder file for discovery testing, not a real provider plugin. diff --git a/pkg/getproviders/testdata/filesystem-mirror/registry.opentofu.org/hashicorp/random/1.2.0/linux_amd64/terraform-provider-random b/pkg/getproviders/testdata/filesystem-mirror/registry.opentofu.org/hashicorp/random/1.2.0/linux_amd64/terraform-provider-random new file mode 100644 index 00000000000..daa9e3509f6 --- /dev/null +++ b/pkg/getproviders/testdata/filesystem-mirror/registry.opentofu.org/hashicorp/random/1.2.0/linux_amd64/terraform-provider-random @@ -0,0 +1 @@ +# This is just a placeholder file for discovery testing, not a real provider plugin. diff --git a/pkg/getproviders/testdata/filesystem-mirror/tfe.example.com/AwesomeCorp/happycloud/0.1.0-alpha.2/darwin_amd64/extra-data.txt b/pkg/getproviders/testdata/filesystem-mirror/tfe.example.com/AwesomeCorp/happycloud/0.1.0-alpha.2/darwin_amd64/extra-data.txt new file mode 100644 index 00000000000..8a1c7c32741 --- /dev/null +++ b/pkg/getproviders/testdata/filesystem-mirror/tfe.example.com/AwesomeCorp/happycloud/0.1.0-alpha.2/darwin_amd64/extra-data.txt @@ -0,0 +1,6 @@ +Provider plugin packages are allowed to include other files such as any static +data they need to operate, or possibly source files if the provider is written +in an interpreted programming language. + +This extra file is here just to make sure that extra files don't cause any +misbehavior during local discovery. diff --git a/pkg/getproviders/testdata/filesystem-mirror/tfe.example.com/AwesomeCorp/happycloud/0.1.0-alpha.2/darwin_amd64/terraform-provider-happycloud b/pkg/getproviders/testdata/filesystem-mirror/tfe.example.com/AwesomeCorp/happycloud/0.1.0-alpha.2/darwin_amd64/terraform-provider-happycloud new file mode 100644 index 00000000000..daa9e3509f6 --- /dev/null +++ b/pkg/getproviders/testdata/filesystem-mirror/tfe.example.com/AwesomeCorp/happycloud/0.1.0-alpha.2/darwin_amd64/terraform-provider-happycloud @@ -0,0 +1 @@ +# This is just a placeholder file for discovery testing, not a real provider plugin. diff --git a/pkg/getproviders/testdata/search-local-directory/symlinks/real/example.com/foo/bar/1.0.0/linux_amd64/terraform-provider-bar b/pkg/getproviders/testdata/search-local-directory/symlinks/real/example.com/foo/bar/1.0.0/linux_amd64/terraform-provider-bar new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/getproviders/testdata/search-local-directory/symlinks/real/example.net b/pkg/getproviders/testdata/search-local-directory/symlinks/real/example.net new file mode 120000 index 00000000000..caa12a8fb3e --- /dev/null +++ b/pkg/getproviders/testdata/search-local-directory/symlinks/real/example.net @@ -0,0 +1 @@ +example.com \ No newline at end of file diff --git a/pkg/getproviders/testdata/search-local-directory/symlinks/symlink b/pkg/getproviders/testdata/search-local-directory/symlinks/symlink new file mode 120000 index 00000000000..ac558a3e1bf --- /dev/null +++ b/pkg/getproviders/testdata/search-local-directory/symlinks/symlink @@ -0,0 +1 @@ +real \ No newline at end of file diff --git a/pkg/getproviders/types.go b/pkg/getproviders/types.go new file mode 100644 index 00000000000..4759b30aa8c --- /dev/null +++ b/pkg/getproviders/types.go @@ -0,0 +1,563 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package getproviders + +import ( + "fmt" + "runtime" + "sort" + "strings" + + "github.com/apparentlymart/go-versions/versions" + "github.com/apparentlymart/go-versions/versions/constraints" + + "github.com/kubegems/opentofu/pkg/addrs" +) + +// Version represents a particular single version of a provider. +type Version = versions.Version + +// UnspecifiedVersion is the zero value of Version, representing the absense +// of a version number. +var UnspecifiedVersion Version = versions.Unspecified + +// VersionList represents a list of versions. It is a []Version with some +// extra methods for convenient filtering. +type VersionList = versions.List + +// VersionSet represents a set of versions, usually describing the acceptable +// versions that can be selected under a particular version constraint provided +// by the end-user. +type VersionSet = versions.Set + +// VersionConstraints represents a set of version constraints, which can +// define the membership of a VersionSet by exclusion. +type VersionConstraints = constraints.IntersectionSpec + +// Warnings represents a list of warnings returned by a Registry source. +type Warnings = []string + +// Requirements gathers together requirements for many different providers +// into a single data structure, as a convenient way to represent the full +// set of requirements for a particular configuration or state or both. +// +// If an entry in a Requirements has a zero-length VersionConstraints then +// that indicates that the provider is required but that any version is +// acceptable. That's different than a provider being absent from the map +// altogether, which means that it is not required at all. +type Requirements map[addrs.Provider]VersionConstraints + +// Merge takes the requirements in the receiever and the requirements in the +// other given value and produces a new set of requirements that combines +// all of the requirements of both. +// +// The resulting requirements will permit only selections that both of the +// source requirements would've allowed. +func (r Requirements) Merge(other Requirements) Requirements { + ret := make(Requirements) + for addr, constraints := range r { + ret[addr] = constraints + } + for addr, constraints := range other { + ret[addr] = append(ret[addr], constraints...) + } + return ret +} + +// Selections gathers together version selections for many different providers. +// +// This is the result of provider installation: a specific version selected +// for each provider given in the requested Requirements, selected based on +// the given version constraints. +type Selections map[addrs.Provider]Version + +// ParseVersion parses a "semver"-style version string into a Version value, +// which is the version syntax we use for provider versions. +func ParseVersion(str string) (Version, error) { + return versions.ParseVersion(str) +} + +// MustParseVersion is a variant of ParseVersion that panics if it encounters +// an error while parsing. +func MustParseVersion(str string) Version { + ret, err := ParseVersion(str) + if err != nil { + panic(err) + } + return ret +} + +// ParseVersionConstraints parses a "Ruby-like" version constraint string +// into a VersionConstraints value. +func ParseVersionConstraints(str string) (VersionConstraints, error) { + return constraints.ParseRubyStyleMulti(str) +} + +// MustParseVersionConstraints is a variant of ParseVersionConstraints that +// panics if it encounters an error while parsing. +func MustParseVersionConstraints(str string) VersionConstraints { + ret, err := ParseVersionConstraints(str) + if err != nil { + panic(err) + } + return ret +} + +// MeetingConstraints returns a version set that contains all of the versions +// that meet the given constraints, specified using the Spec type from the +// constraints package. +func MeetingConstraints(vc VersionConstraints) VersionSet { + return versions.MeetingConstraints(vc) +} + +// Platform represents a target platform that a provider is or might be +// available for. +type Platform struct { + OS, Arch string +} + +func (p Platform) String() string { + return p.OS + "_" + p.Arch +} + +// LessThan returns true if the receiver should sort before the other given +// Platform in an ordered list of platforms. +// +// The ordering is lexical first by OS and then by Architecture. +// This ordering is primarily just to ensure that results of +// functions in this package will be deterministic. The ordering is not +// intended to have any semantic meaning and is subject to change in future. +func (p Platform) LessThan(other Platform) bool { + switch { + case p.OS != other.OS: + return p.OS < other.OS + default: + return p.Arch < other.Arch + } +} + +// ParsePlatform parses a string representation of a platform, like +// "linux_amd64", or returns an error if the string is not valid. +func ParsePlatform(str string) (Platform, error) { + parts := strings.Split(str, "_") + if len(parts) != 2 { + return Platform{}, fmt.Errorf("must be two words separated by an underscore") + } + + os, arch := parts[0], parts[1] + if strings.ContainsAny(os, " \t\n\r") { + return Platform{}, fmt.Errorf("OS portion must not contain whitespace") + } + if strings.ContainsAny(arch, " \t\n\r") { + return Platform{}, fmt.Errorf("architecture portion must not contain whitespace") + } + + return Platform{ + OS: os, + Arch: arch, + }, nil +} + +// CurrentPlatform is the platform where the current program is running. +// +// If attempting to install providers for use on the same system where the +// installation process is running, this is the right platform to use. +var CurrentPlatform = Platform{ + OS: runtime.GOOS, + Arch: runtime.GOARCH, +} + +// PackageMeta represents the metadata related to a particular downloadable +// provider package targeting a single platform. +// +// Package findproviders does no signature verification or protocol version +// compatibility checking of its own. A caller receving a PackageMeta must +// verify that it has a correct signature and supports a protocol version +// accepted by the current version of OpenTofu before trying to use the +// described package. +type PackageMeta struct { + Provider addrs.Provider + Version Version + + ProtocolVersions VersionList + TargetPlatform Platform + + Filename string + Location PackageLocation + + // Authentication, if non-nil, is a request from the source that produced + // this meta for verification of the target package after it has been + // retrieved from the indicated Location. + // + // Different sources will support different authentication strategies -- + // or possibly no strategies at all -- depending on what metadata they + // have available to them, such as checksums provided out-of-band by the + // original package author, expected signing keys, etc. + // + // If Authentication is non-nil then no authentication is requested. + // This is likely appropriate only for packages that are already available + // on the local system. + Authentication PackageAuthentication +} + +// LessThan returns true if the receiver should sort before the given other +// PackageMeta in a sorted list of PackageMeta. +// +// Sorting preference is given first to the provider address, then to the +// taget platform, and the to the version number (using semver precedence). +// Packages that differ only in semver build metadata have no defined +// precedence and so will always return false. +// +// This ordering is primarily just to maximize the chance that results of +// functions in this package will be deterministic. The ordering is not +// intended to have any semantic meaning and is subject to change in future. +func (m PackageMeta) LessThan(other PackageMeta) bool { + switch { + case m.Provider != other.Provider: + return m.Provider.LessThan(other.Provider) + case m.TargetPlatform != other.TargetPlatform: + return m.TargetPlatform.LessThan(other.TargetPlatform) + case m.Version != other.Version: + return m.Version.LessThan(other.Version) + default: + return false + } +} + +// UnpackedDirectoryPath determines the path under the given base +// directory where SearchLocalDirectory or the FilesystemMirrorSource would +// expect to find an unpacked copy of the receiving PackageMeta. +// +// The result always uses forward slashes as path separator, even on Windows, +// to produce a consistent result on all platforms. Windows accepts both +// direction of slash as long as each individual path string is self-consistent. +func (m PackageMeta) UnpackedDirectoryPath(baseDir string) string { + return UnpackedDirectoryPathForPackage(baseDir, m.Provider, m.Version, m.TargetPlatform) +} + +// PackedFilePath determines the path under the given base +// directory where SearchLocalDirectory or the FilesystemMirrorSource would +// expect to find packed copy (a .zip archive) of the receiving PackageMeta. +// +// The result always uses forward slashes as path separator, even on Windows, +// to produce a consistent result on all platforms. Windows accepts both +// direction of slash as long as each individual path string is self-consistent. +func (m PackageMeta) PackedFilePath(baseDir string) string { + return PackedFilePathForPackage(baseDir, m.Provider, m.Version, m.TargetPlatform) +} + +// AcceptableHashes returns a set of hashes that could be recorded for +// comparison to future results for the same provider version, to implement a +// "trust on first use" scheme. +// +// The AcceptableHashes result is a platform-agnostic set of hashes, with the +// intent that in most cases it will be used as an additional cross-check in +// addition to a platform-specific hash check made during installation. However, +// there are some situations (such as verifying an already-installed package +// that's on local disk) where OpenTofu would check only against the results +// of this function, meaning that it would in principle accept another +// platform's package as a substitute for the correct platform. That's a +// pragmatic compromise to allow lock files derived from the result of this +// method to be portable across platforms. +// +// Callers of this method should typically also verify the package using the +// object in the Authentication field, and consider how much trust to give +// the result of this method depending on the authentication result: an +// unauthenticated result or one that only verified a checksum could be +// considered less trustworthy than one that checked the package against +// a signature provided by the origin registry. +// +// The AcceptableHashes result is actually provided by the object in the +// Authentication field. AcceptableHashes therefore returns an empty result +// for a PackageMeta that has no authentication object, or has one that does +// not make use of hashes. +func (m PackageMeta) AcceptableHashes() []Hash { + auth, ok := m.Authentication.(PackageAuthenticationHashes) + if !ok { + return nil + } + return auth.AcceptableHashes() +} + +// PackageLocation represents a location where a provider distribution package +// can be obtained. A value of this type contains one of the following +// concrete types: PackageLocalArchive, PackageLocalDir, or PackageHTTPURL. +type PackageLocation interface { + packageLocation() + String() string +} + +// PackageLocalArchive is the location of a provider distribution archive file +// in the local filesystem. Its value is a local filesystem path using the +// syntax understood by Go's standard path/filepath package on the operating +// system where OpenTofu is running. +type PackageLocalArchive string + +func (p PackageLocalArchive) packageLocation() {} +func (p PackageLocalArchive) String() string { return string(p) } + +// PackageLocalDir is the location of a directory containing an unpacked +// provider distribution archive in the local filesystem. Its value is a local +// filesystem path using the syntax understood by Go's standard path/filepath +// package on the operating system where OpenTofu is running. +type PackageLocalDir string + +func (p PackageLocalDir) packageLocation() {} +func (p PackageLocalDir) String() string { return string(p) } + +// PackageHTTPURL is a provider package location accessible via HTTP. +// Its value is a URL string using either the http: scheme or the https: scheme. +type PackageHTTPURL string + +func (p PackageHTTPURL) packageLocation() {} +func (p PackageHTTPURL) String() string { return string(p) } + +// PackageMetaList is a list of PackageMeta. It's just []PackageMeta with +// some methods for convenient sorting and filtering. +type PackageMetaList []PackageMeta + +func (l PackageMetaList) Len() int { + return len(l) +} + +func (l PackageMetaList) Less(i, j int) bool { + return l[i].LessThan(l[j]) +} + +func (l PackageMetaList) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} + +// Sort performs an in-place, stable sort on the contents of the list, using +// the ordering given by method Less. This ordering is primarily to help +// encourage deterministic results from functions and does not have any +// semantic meaning. +func (l PackageMetaList) Sort() { + sort.Stable(l) +} + +// FilterPlatform constructs a new PackageMetaList that contains only the +// elements of the receiver that are for the given target platform. +// +// Pass CurrentPlatform to filter only for packages targeting the platform +// where this code is running. +func (l PackageMetaList) FilterPlatform(target Platform) PackageMetaList { + var ret PackageMetaList + for _, m := range l { + if m.TargetPlatform == target { + ret = append(ret, m) + } + } + return ret +} + +// FilterProviderExactVersion constructs a new PackageMetaList that contains +// only the elements of the receiver that relate to the given provider address +// and exact version. +// +// The version matching for this function is exact, including matching on +// semver build metadata, because it's intended for handling a single exact +// version selected by the caller from a set of available versions. +func (l PackageMetaList) FilterProviderExactVersion(provider addrs.Provider, version Version) PackageMetaList { + var ret PackageMetaList + for _, m := range l { + if m.Provider == provider && m.Version == version { + ret = append(ret, m) + } + } + return ret +} + +// FilterProviderPlatformExactVersion is a combination of both +// FilterPlatform and FilterProviderExactVersion that filters by all three +// criteria at once. +func (l PackageMetaList) FilterProviderPlatformExactVersion(provider addrs.Provider, platform Platform, version Version) PackageMetaList { + var ret PackageMetaList + for _, m := range l { + if m.Provider == provider && m.Version == version && m.TargetPlatform == platform { + ret = append(ret, m) + } + } + return ret +} + +// VersionConstraintsString returns a canonical string representation of +// a VersionConstraints value. +func VersionConstraintsString(spec VersionConstraints) string { + // (we have our own function for this because the upstream versions + // library prefers to use npm/cargo-style constraint syntax, but + // OpenTofu prefers Ruby-like. Maybe we can upstream a "RubyLikeString") + // function to do this later, but having this in here avoids blocking on + // that and this is the sort of thing that is unlikely to need ongoing + // maintenance because the version constraint syntax is unlikely to change.) + // + // ParseVersionConstraints allows some variations for convenience, but the + // return value from this function serves as the normalized form of a + // particular version constraint, which is the form we require in dependency + // lock files. Therefore the canonical forms produced here are a compatibility + // constraint for the dependency lock file parser. + + if len(spec) == 0 { + return "" + } + + // VersionConstraints values are typically assembled by combining together + // the version constraints from many separate declarations throughout + // a configuration, across many modules. As a consequence, they typically + // contain duplicates and the terms inside are in no particular order. + // For our canonical representation we'll both deduplicate the items + // and sort them into a consistent order. + sels := make(map[constraints.SelectionSpec]struct{}) + for _, sel := range spec { + // The parser allows writing abbreviated version (such as 2) which + // end up being represented in memory with trailing unconstrained parts + // (for example 2.*.*). For the purpose of serialization with Ruby + // style syntax, these unconstrained parts can all be represented as 0 + // with no loss of meaning, so we make that conversion here. Doing so + // allows us to deduplicate equivalent constraints, such as >= 2.0 and + // >= 2.0.0. + normalizedSel := constraints.SelectionSpec{ + Operator: sel.Operator, + Boundary: sel.Boundary.ConstrainToZero(), + } + sels[normalizedSel] = struct{}{} + } + selsOrder := make([]constraints.SelectionSpec, 0, len(sels)) + for sel := range sels { + selsOrder = append(selsOrder, sel) + } + sort.Slice(selsOrder, func(i, j int) bool { + is, js := selsOrder[i], selsOrder[j] + boundaryCmp := versionSelectionBoundaryCompare(is.Boundary, js.Boundary) + if boundaryCmp == 0 { + // The operator is the decider, then. + return versionSelectionOperatorLess(is.Operator, js.Operator) + } + return boundaryCmp < 0 + }) + + var b strings.Builder + for i, sel := range selsOrder { + if i > 0 { + b.WriteString(", ") + } + switch sel.Operator { + case constraints.OpGreaterThan: + b.WriteString("> ") + case constraints.OpLessThan: + b.WriteString("< ") + case constraints.OpGreaterThanOrEqual: + b.WriteString(">= ") + case constraints.OpGreaterThanOrEqualPatchOnly, constraints.OpGreaterThanOrEqualMinorOnly: + // These two differ in how the version is written, not in the symbol. + b.WriteString("~> ") + case constraints.OpLessThanOrEqual: + b.WriteString("<= ") + case constraints.OpEqual: + b.WriteString("") + case constraints.OpNotEqual: + b.WriteString("!= ") + default: + // The above covers all of the operators we support during + // parsing, so we should not get here. + b.WriteString("??? ") + } + + // We use a different constraint operator to distinguish between the + // two types of pessimistic constraint: minor-only and patch-only. For + // minor-only constraints, we always want to display only the major and + // minor version components, so we special-case that operator below. + // + // One final edge case is a minor-only constraint specified with only + // the major version, such as ~> 2. We treat this the same as ~> 2.0, + // because a major-only pessimistic constraint does not exist: it is + // logically identical to >= 2.0.0. + if sel.Operator == constraints.OpGreaterThanOrEqualMinorOnly { + // The minor-pessimistic syntax uses only two version components. + fmt.Fprintf(&b, "%s.%s", sel.Boundary.Major, sel.Boundary.Minor) + } else { + fmt.Fprintf(&b, "%s.%s.%s", sel.Boundary.Major, sel.Boundary.Minor, sel.Boundary.Patch) + } + if sel.Boundary.Prerelease != "" { + b.WriteString("-" + sel.Boundary.Prerelease) + } + if sel.Boundary.Metadata != "" { + b.WriteString("+" + sel.Boundary.Metadata) + } + } + return b.String() +} + +// Our sort for selection operators is somewhat arbitrary and mainly motivated +// by consistency rather than meaning, but this ordering does at least try +// to make it so "simple" constraint sets will appear how a human might +// typically write them, with the lower bounds first and the upper bounds +// last. Weird mixtures of different sorts of constraints will likely seem +// less intuitive, but they'd be unintuitive no matter the ordering. +var versionSelectionsBoundaryPriority = map[constraints.SelectionOp]int{ + // We skip zero here so that if we end up seeing an invalid + // operator (which the string function would render as "???") + // then it will have index zero and thus appear first. + constraints.OpGreaterThan: 1, + constraints.OpGreaterThanOrEqual: 2, + constraints.OpEqual: 3, + constraints.OpGreaterThanOrEqualPatchOnly: 4, + constraints.OpGreaterThanOrEqualMinorOnly: 5, + constraints.OpLessThanOrEqual: 6, + constraints.OpLessThan: 7, + constraints.OpNotEqual: 8, +} + +func versionSelectionOperatorLess(i, j constraints.SelectionOp) bool { + iPrio := versionSelectionsBoundaryPriority[i] + jPrio := versionSelectionsBoundaryPriority[j] + return iPrio < jPrio +} + +func versionSelectionBoundaryCompare(i, j constraints.VersionSpec) int { + // In the Ruby-style constraint syntax, unconstrained parts appear + // only for omitted portions of a version string, like writing + // "2" instead of "2.0.0". For sorting purposes we'll just + // consider those as zero, which also matches how we serialize them + // to strings. + i, j = i.ConstrainToZero(), j.ConstrainToZero() + + // Once we've removed any unconstrained parts, we can safely + // convert to our main Version type so we can use its ordering. + iv := Version{ + Major: i.Major.Num, + Minor: i.Minor.Num, + Patch: i.Patch.Num, + Prerelease: versions.VersionExtra(i.Prerelease), + Metadata: versions.VersionExtra(i.Metadata), + } + jv := Version{ + Major: j.Major.Num, + Minor: j.Minor.Num, + Patch: j.Patch.Num, + Prerelease: versions.VersionExtra(j.Prerelease), + Metadata: versions.VersionExtra(j.Metadata), + } + if iv.Same(jv) { + // Although build metadata doesn't normally weigh in to + // precedence choices, we'll use it for our visual + // ordering just because we need to pick _some_ order. + switch { + case iv.Metadata.Raw() == jv.Metadata.Raw(): + return 0 + case iv.Metadata.LessThan(jv.Metadata): + return -1 + default: + return 1 // greater, by elimination + } + } + switch { + case iv.LessThan(jv): + return -1 + default: + return 1 // greater, by elimination + } +} diff --git a/pkg/getproviders/types_test.go b/pkg/getproviders/types_test.go new file mode 100644 index 00000000000..aad82030846 --- /dev/null +++ b/pkg/getproviders/types_test.go @@ -0,0 +1,176 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package getproviders + +import ( + "testing" +) + +func TestVersionConstraintsString(t *testing.T) { + testCases := map[string]struct { + spec VersionConstraints + want string + }{ + "exact": { + MustParseVersionConstraints("1.2.3"), + "1.2.3", + }, + "prerelease": { + MustParseVersionConstraints("1.2.3-beta"), + "1.2.3-beta", + }, + "metadata": { + MustParseVersionConstraints("1.2.3+foo.bar"), + "1.2.3+foo.bar", + }, + "prerelease and metadata": { + MustParseVersionConstraints("1.2.3-beta+foo.bar"), + "1.2.3-beta+foo.bar", + }, + "major only": { + MustParseVersionConstraints(">= 3"), + ">= 3.0.0", + }, + "major only with pessimistic operator": { + MustParseVersionConstraints("~> 3"), + "~> 3.0", + }, + "pessimistic minor": { + MustParseVersionConstraints("~> 3.0"), + "~> 3.0", + }, + "pessimistic patch": { + MustParseVersionConstraints("~> 3.0.0"), + "~> 3.0.0", + }, + "other operators": { + MustParseVersionConstraints("> 1.0.0, < 1.0.0, >= 1.0.0, <= 1.0.0, != 1.0.0"), + "> 1.0.0, >= 1.0.0, <= 1.0.0, < 1.0.0, != 1.0.0", + }, + "multiple": { + MustParseVersionConstraints(">= 3.0, < 4.0"), + ">= 3.0.0, < 4.0.0", + }, + "duplicates removed": { + MustParseVersionConstraints(">= 1.2.3, 1.2.3, ~> 1.2, 1.2.3"), + "~> 1.2, >= 1.2.3, 1.2.3", + }, + "equivalent duplicates removed": { + MustParseVersionConstraints(">= 2.68, >= 2.68.0"), + ">= 2.68.0", + }, + "consistent ordering, exhaustive": { + // This weird jumble is just to exercise the different sort + // ordering codepaths. Hopefully nothing quite this horrific + // shows up often in practice. + MustParseVersionConstraints("< 1.2.3, <= 1.2.3, != 1.2.3, 1.2.3+local.2, 1.2.3+local.1, = 1.2.4, = 1.2.3, > 2, > 1.2.3, >= 1.2.3, ~> 1.2.3, ~> 1.2"), + "~> 1.2, > 1.2.3, >= 1.2.3, 1.2.3, ~> 1.2.3, <= 1.2.3, < 1.2.3, != 1.2.3, 1.2.3+local.1, 1.2.3+local.2, 1.2.4, > 2.0.0", + }, + "consistent ordering, more typical": { + // This one is aiming to simulate a common situation where + // various different modules express compatible constraints + // but some modules are more constrained than others. The + // combined results here can be kinda confusing, but hopefully + // ordering them consistently makes them a _little_ easier to read. + MustParseVersionConstraints("~> 1.2, >= 1.2, 1.2.4"), + ">= 1.2.0, ~> 1.2, 1.2.4", + }, + "consistent ordering, disjoint": { + // One situation where our presentation of version constraints is + // particularly important is when a user accidentally ends up with + // disjoint constraints that can therefore never match. In that + // case, our ordering should hopefully make it easier to determine + // that the constraints are disjoint, as a first step to debugging, + // by showing > or >= constrains sorted after < or <= constraints. + MustParseVersionConstraints(">= 2, >= 1.2, < 1.3"), + ">= 1.2.0, < 1.3.0, >= 2.0.0", + }, + } + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got := VersionConstraintsString(tc.spec) + + if got != tc.want { + t.Errorf("wrong\n got: %q\nwant: %q", got, tc.want) + } + }) + } +} + +func TestParsePlatform(t *testing.T) { + tests := []struct { + Input string + Want Platform + Err bool + }{ + { + "", + Platform{}, + true, + }, + { + "too_many_notes", + Platform{}, + true, + }, + { + "extra _ whitespaces ", + Platform{}, + true, + }, + { + "arbitrary_os", + Platform{OS: "arbitrary", Arch: "os"}, + false, + }, + } + + for _, test := range tests { + got, err := ParsePlatform(test.Input) + if err != nil { + if test.Err == false { + t.Errorf("unexpected error: %s", err.Error()) + } + } else { + if test.Err { + t.Errorf("wrong result: expected error, got none") + } + } + if got != test.Want { + t.Errorf("wrong\n got: %q\nwant: %q", got, test.Want) + } + } +} + +func TestMeetingConstraints(t *testing.T) { + tests := []struct { + constraintStr string + versionStr string + expected bool + }{ + // NOT PreRelease Version. This failed in apparentlymart/go-versions 1.0.1, and fixed in 1.0.2 + {"!= 2.0.0-beta1, 2.0.0-beta1", "2.0.0-beta1", false}, + } + + for _, test := range tests { + vc, err := ParseVersionConstraints(test.constraintStr) + if err != nil { + t.Fatalf("ParseVersionConstraints failed: %v", err) + } + + version, err := ParseVersion(test.versionStr) + if err != nil { + t.Fatalf("ParseVersion failed: %v", err) + } + + versionSet := MeetingConstraints(vc) + result := versionSet.Has(version) + + if result != test.expected { + t.Errorf("For constraint %s and version %s, expected %t, got %t", test.constraintStr, test.versionStr, test.expected, result) + } + } +} diff --git a/pkg/grpcwrap/provider.go b/pkg/grpcwrap/provider.go new file mode 100644 index 00000000000..fc8ccd66800 --- /dev/null +++ b/pkg/grpcwrap/provider.go @@ -0,0 +1,440 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package grpcwrap + +import ( + "context" + + "github.com/kubegems/opentofu/pkg/plugin/convert" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/tfplugin5" + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + "github.com/zclconf/go-cty/cty/msgpack" +) + +// New wraps a providers.Interface to implement a grpc ProviderServer. +// This is useful for creating a test binary out of an internal provider +// implementation. +func Provider(p providers.Interface) tfplugin5.ProviderServer { + return &provider{ + provider: p, + schema: p.GetProviderSchema(), + } +} + +type provider struct { + provider providers.Interface + schema providers.GetProviderSchemaResponse +} + +func (p *provider) GetMetadata(context.Context, *tfplugin5.GetMetadata_Request) (*tfplugin5.GetMetadata_Response, error) { + panic("Not Implemented") +} + +func (p *provider) GetSchema(_ context.Context, req *tfplugin5.GetProviderSchema_Request) (*tfplugin5.GetProviderSchema_Response, error) { + resp := &tfplugin5.GetProviderSchema_Response{ + ResourceSchemas: make(map[string]*tfplugin5.Schema), + DataSourceSchemas: make(map[string]*tfplugin5.Schema), + } + + resp.Provider = &tfplugin5.Schema{ + Block: &tfplugin5.Schema_Block{}, + } + if p.schema.Provider.Block != nil { + resp.Provider.Block = convert.ConfigSchemaToProto(p.schema.Provider.Block) + } + + resp.ProviderMeta = &tfplugin5.Schema{ + Block: &tfplugin5.Schema_Block{}, + } + if p.schema.ProviderMeta.Block != nil { + resp.ProviderMeta.Block = convert.ConfigSchemaToProto(p.schema.ProviderMeta.Block) + } + + for typ, res := range p.schema.ResourceTypes { + resp.ResourceSchemas[typ] = &tfplugin5.Schema{ + Version: res.Version, + Block: convert.ConfigSchemaToProto(res.Block), + } + } + for typ, dat := range p.schema.DataSources { + resp.DataSourceSchemas[typ] = &tfplugin5.Schema{ + Version: dat.Version, + Block: convert.ConfigSchemaToProto(dat.Block), + } + } + + resp.ServerCapabilities = &tfplugin5.ServerCapabilities{ + PlanDestroy: p.schema.ServerCapabilities.PlanDestroy, + } + + // include any diagnostics from the original GetSchema call + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, p.schema.Diagnostics) + + return resp, nil +} + +func (p *provider) PrepareProviderConfig(_ context.Context, req *tfplugin5.PrepareProviderConfig_Request) (*tfplugin5.PrepareProviderConfig_Response, error) { + resp := &tfplugin5.PrepareProviderConfig_Response{} + ty := p.schema.Provider.Block.ImpliedType() + + configVal, err := decodeDynamicValue(req.Config, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + prepareResp := p.provider.ValidateProviderConfig(providers.ValidateProviderConfigRequest{ + Config: configVal, + }) + + // the PreparedConfig value is no longer used + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, prepareResp.Diagnostics) + return resp, nil +} + +func (p *provider) ValidateResourceTypeConfig(_ context.Context, req *tfplugin5.ValidateResourceTypeConfig_Request) (*tfplugin5.ValidateResourceTypeConfig_Response, error) { + resp := &tfplugin5.ValidateResourceTypeConfig_Response{} + ty := p.schema.ResourceTypes[req.TypeName].Block.ImpliedType() + + configVal, err := decodeDynamicValue(req.Config, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + validateResp := p.provider.ValidateResourceConfig(providers.ValidateResourceConfigRequest{ + TypeName: req.TypeName, + Config: configVal, + }) + + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, validateResp.Diagnostics) + return resp, nil +} + +func (p *provider) ValidateDataSourceConfig(_ context.Context, req *tfplugin5.ValidateDataSourceConfig_Request) (*tfplugin5.ValidateDataSourceConfig_Response, error) { + resp := &tfplugin5.ValidateDataSourceConfig_Response{} + ty := p.schema.DataSources[req.TypeName].Block.ImpliedType() + + configVal, err := decodeDynamicValue(req.Config, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + validateResp := p.provider.ValidateDataResourceConfig(providers.ValidateDataResourceConfigRequest{ + TypeName: req.TypeName, + Config: configVal, + }) + + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, validateResp.Diagnostics) + return resp, nil +} + +func (p *provider) UpgradeResourceState(_ context.Context, req *tfplugin5.UpgradeResourceState_Request) (*tfplugin5.UpgradeResourceState_Response, error) { + resp := &tfplugin5.UpgradeResourceState_Response{} + ty := p.schema.ResourceTypes[req.TypeName].Block.ImpliedType() + + upgradeResp := p.provider.UpgradeResourceState(providers.UpgradeResourceStateRequest{ + TypeName: req.TypeName, + Version: req.Version, + RawStateJSON: req.RawState.Json, + }) + + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, upgradeResp.Diagnostics) + if upgradeResp.Diagnostics.HasErrors() { + return resp, nil + } + + dv, err := encodeDynamicValue(upgradeResp.UpgradedState, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + resp.UpgradedState = dv + + return resp, nil +} + +func (p *provider) Configure(_ context.Context, req *tfplugin5.Configure_Request) (*tfplugin5.Configure_Response, error) { + resp := &tfplugin5.Configure_Response{} + ty := p.schema.Provider.Block.ImpliedType() + + configVal, err := decodeDynamicValue(req.Config, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + configureResp := p.provider.ConfigureProvider(providers.ConfigureProviderRequest{ + TerraformVersion: req.TerraformVersion, + Config: configVal, + }) + + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, configureResp.Diagnostics) + return resp, nil +} + +func (p *provider) ReadResource(_ context.Context, req *tfplugin5.ReadResource_Request) (*tfplugin5.ReadResource_Response, error) { + resp := &tfplugin5.ReadResource_Response{} + ty := p.schema.ResourceTypes[req.TypeName].Block.ImpliedType() + + stateVal, err := decodeDynamicValue(req.CurrentState, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + metaTy := p.schema.ProviderMeta.Block.ImpliedType() + metaVal, err := decodeDynamicValue(req.ProviderMeta, metaTy) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + readResp := p.provider.ReadResource(providers.ReadResourceRequest{ + TypeName: req.TypeName, + PriorState: stateVal, + Private: req.Private, + ProviderMeta: metaVal, + }) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, readResp.Diagnostics) + if readResp.Diagnostics.HasErrors() { + return resp, nil + } + resp.Private = readResp.Private + + dv, err := encodeDynamicValue(readResp.NewState, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + resp.NewState = dv + + return resp, nil +} + +func (p *provider) PlanResourceChange(_ context.Context, req *tfplugin5.PlanResourceChange_Request) (*tfplugin5.PlanResourceChange_Response, error) { + resp := &tfplugin5.PlanResourceChange_Response{} + ty := p.schema.ResourceTypes[req.TypeName].Block.ImpliedType() + + priorStateVal, err := decodeDynamicValue(req.PriorState, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + proposedStateVal, err := decodeDynamicValue(req.ProposedNewState, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + configVal, err := decodeDynamicValue(req.Config, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + metaTy := p.schema.ProviderMeta.Block.ImpliedType() + metaVal, err := decodeDynamicValue(req.ProviderMeta, metaTy) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + planResp := p.provider.PlanResourceChange(providers.PlanResourceChangeRequest{ + TypeName: req.TypeName, + PriorState: priorStateVal, + ProposedNewState: proposedStateVal, + Config: configVal, + PriorPrivate: req.PriorPrivate, + ProviderMeta: metaVal, + }) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, planResp.Diagnostics) + if planResp.Diagnostics.HasErrors() { + return resp, nil + } + + resp.PlannedPrivate = planResp.PlannedPrivate + + resp.PlannedState, err = encodeDynamicValue(planResp.PlannedState, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + for _, path := range planResp.RequiresReplace { + resp.RequiresReplace = append(resp.RequiresReplace, convert.PathToAttributePath(path)) + } + + return resp, nil +} + +func (p *provider) ApplyResourceChange(_ context.Context, req *tfplugin5.ApplyResourceChange_Request) (*tfplugin5.ApplyResourceChange_Response, error) { + resp := &tfplugin5.ApplyResourceChange_Response{} + ty := p.schema.ResourceTypes[req.TypeName].Block.ImpliedType() + + priorStateVal, err := decodeDynamicValue(req.PriorState, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + plannedStateVal, err := decodeDynamicValue(req.PlannedState, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + configVal, err := decodeDynamicValue(req.Config, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + metaTy := p.schema.ProviderMeta.Block.ImpliedType() + metaVal, err := decodeDynamicValue(req.ProviderMeta, metaTy) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + applyResp := p.provider.ApplyResourceChange(providers.ApplyResourceChangeRequest{ + TypeName: req.TypeName, + PriorState: priorStateVal, + PlannedState: plannedStateVal, + Config: configVal, + PlannedPrivate: req.PlannedPrivate, + ProviderMeta: metaVal, + }) + + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, applyResp.Diagnostics) + if applyResp.Diagnostics.HasErrors() { + return resp, nil + } + resp.Private = applyResp.Private + + resp.NewState, err = encodeDynamicValue(applyResp.NewState, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + return resp, nil +} + +func (p *provider) ImportResourceState(_ context.Context, req *tfplugin5.ImportResourceState_Request) (*tfplugin5.ImportResourceState_Response, error) { + resp := &tfplugin5.ImportResourceState_Response{} + + importResp := p.provider.ImportResourceState(providers.ImportResourceStateRequest{ + TypeName: req.TypeName, + ID: req.Id, + }) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, importResp.Diagnostics) + + for _, res := range importResp.ImportedResources { + ty := p.schema.ResourceTypes[res.TypeName].Block.ImpliedType() + state, err := encodeDynamicValue(res.State, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + continue + } + + resp.ImportedResources = append(resp.ImportedResources, &tfplugin5.ImportResourceState_ImportedResource{ + TypeName: res.TypeName, + State: state, + Private: res.Private, + }) + } + + return resp, nil +} + +func (p *provider) MoveResourceState(context.Context, *tfplugin5.MoveResourceState_Request) (*tfplugin5.MoveResourceState_Response, error) { + panic("Not Implemented") +} + +func (p *provider) ReadDataSource(_ context.Context, req *tfplugin5.ReadDataSource_Request) (*tfplugin5.ReadDataSource_Response, error) { + resp := &tfplugin5.ReadDataSource_Response{} + ty := p.schema.DataSources[req.TypeName].Block.ImpliedType() + + configVal, err := decodeDynamicValue(req.Config, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + metaTy := p.schema.ProviderMeta.Block.ImpliedType() + metaVal, err := decodeDynamicValue(req.ProviderMeta, metaTy) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + readResp := p.provider.ReadDataSource(providers.ReadDataSourceRequest{ + TypeName: req.TypeName, + Config: configVal, + ProviderMeta: metaVal, + }) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, readResp.Diagnostics) + if readResp.Diagnostics.HasErrors() { + return resp, nil + } + + resp.State, err = encodeDynamicValue(readResp.State, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + return resp, nil +} + +func (p *provider) Stop(context.Context, *tfplugin5.Stop_Request) (*tfplugin5.Stop_Response, error) { + resp := &tfplugin5.Stop_Response{} + err := p.provider.Stop() + if err != nil { + resp.Error = err.Error() + } + return resp, nil +} + +func (p *provider) GetFunctions(context.Context, *tfplugin5.GetFunctions_Request) (*tfplugin5.GetFunctions_Response, error) { + panic("Not Implemented") +} + +func (p *provider) CallFunction(context.Context, *tfplugin5.CallFunction_Request) (*tfplugin5.CallFunction_Response, error) { + panic("Not Implemented") +} + +// decode a DynamicValue from either the JSON or MsgPack encoding. +func decodeDynamicValue(v *tfplugin5.DynamicValue, ty cty.Type) (cty.Value, error) { + // always return a valid value + var err error + res := cty.NullVal(ty) + if v == nil { + return res, nil + } + + switch { + case len(v.Msgpack) > 0: + res, err = msgpack.Unmarshal(v.Msgpack, ty) + case len(v.Json) > 0: + res, err = ctyjson.Unmarshal(v.Json, ty) + } + return res, err +} + +// encode a cty.Value into a DynamicValue msgpack payload. +func encodeDynamicValue(v cty.Value, ty cty.Type) (*tfplugin5.DynamicValue, error) { + mp, err := msgpack.Marshal(v, ty) + return &tfplugin5.DynamicValue{ + Msgpack: mp, + }, err +} diff --git a/pkg/grpcwrap/provider6.go b/pkg/grpcwrap/provider6.go new file mode 100644 index 00000000000..33f1a2c72a0 --- /dev/null +++ b/pkg/grpcwrap/provider6.go @@ -0,0 +1,440 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package grpcwrap + +import ( + "context" + + "github.com/kubegems/opentofu/pkg/plugin6/convert" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/tfplugin6" + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + "github.com/zclconf/go-cty/cty/msgpack" +) + +// New wraps a providers.Interface to implement a grpc ProviderServer using +// plugin protocol v6. This is useful for creating a test binary out of an +// internal provider implementation. +func Provider6(p providers.Interface) tfplugin6.ProviderServer { + return &provider6{ + provider: p, + schema: p.GetProviderSchema(), + } +} + +type provider6 struct { + provider providers.Interface + schema providers.GetProviderSchemaResponse +} + +func (p *provider6) GetMetadata(context.Context, *tfplugin6.GetMetadata_Request) (*tfplugin6.GetMetadata_Response, error) { + panic("Not Implemented") +} + +func (p *provider6) GetProviderSchema(_ context.Context, req *tfplugin6.GetProviderSchema_Request) (*tfplugin6.GetProviderSchema_Response, error) { + resp := &tfplugin6.GetProviderSchema_Response{ + ResourceSchemas: make(map[string]*tfplugin6.Schema), + DataSourceSchemas: make(map[string]*tfplugin6.Schema), + } + + resp.Provider = &tfplugin6.Schema{ + Block: &tfplugin6.Schema_Block{}, + } + if p.schema.Provider.Block != nil { + resp.Provider.Block = convert.ConfigSchemaToProto(p.schema.Provider.Block) + } + + resp.ProviderMeta = &tfplugin6.Schema{ + Block: &tfplugin6.Schema_Block{}, + } + if p.schema.ProviderMeta.Block != nil { + resp.ProviderMeta.Block = convert.ConfigSchemaToProto(p.schema.ProviderMeta.Block) + } + + for typ, res := range p.schema.ResourceTypes { + resp.ResourceSchemas[typ] = &tfplugin6.Schema{ + Version: res.Version, + Block: convert.ConfigSchemaToProto(res.Block), + } + } + for typ, dat := range p.schema.DataSources { + resp.DataSourceSchemas[typ] = &tfplugin6.Schema{ + Version: dat.Version, + Block: convert.ConfigSchemaToProto(dat.Block), + } + } + + resp.ServerCapabilities = &tfplugin6.ServerCapabilities{ + PlanDestroy: p.schema.ServerCapabilities.PlanDestroy, + } + + // include any diagnostics from the original GetSchema call + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, p.schema.Diagnostics) + + return resp, nil +} + +func (p *provider6) ValidateProviderConfig(_ context.Context, req *tfplugin6.ValidateProviderConfig_Request) (*tfplugin6.ValidateProviderConfig_Response, error) { + resp := &tfplugin6.ValidateProviderConfig_Response{} + ty := p.schema.Provider.Block.ImpliedType() + + configVal, err := decodeDynamicValue6(req.Config, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + prepareResp := p.provider.ValidateProviderConfig(providers.ValidateProviderConfigRequest{ + Config: configVal, + }) + + // the PreparedConfig value is no longer used + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, prepareResp.Diagnostics) + return resp, nil +} + +func (p *provider6) ValidateResourceConfig(_ context.Context, req *tfplugin6.ValidateResourceConfig_Request) (*tfplugin6.ValidateResourceConfig_Response, error) { + resp := &tfplugin6.ValidateResourceConfig_Response{} + ty := p.schema.ResourceTypes[req.TypeName].Block.ImpliedType() + + configVal, err := decodeDynamicValue6(req.Config, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + validateResp := p.provider.ValidateResourceConfig(providers.ValidateResourceConfigRequest{ + TypeName: req.TypeName, + Config: configVal, + }) + + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, validateResp.Diagnostics) + return resp, nil +} + +func (p *provider6) ValidateDataResourceConfig(_ context.Context, req *tfplugin6.ValidateDataResourceConfig_Request) (*tfplugin6.ValidateDataResourceConfig_Response, error) { + resp := &tfplugin6.ValidateDataResourceConfig_Response{} + ty := p.schema.DataSources[req.TypeName].Block.ImpliedType() + + configVal, err := decodeDynamicValue6(req.Config, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + validateResp := p.provider.ValidateDataResourceConfig(providers.ValidateDataResourceConfigRequest{ + TypeName: req.TypeName, + Config: configVal, + }) + + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, validateResp.Diagnostics) + return resp, nil +} + +func (p *provider6) UpgradeResourceState(_ context.Context, req *tfplugin6.UpgradeResourceState_Request) (*tfplugin6.UpgradeResourceState_Response, error) { + resp := &tfplugin6.UpgradeResourceState_Response{} + ty := p.schema.ResourceTypes[req.TypeName].Block.ImpliedType() + + upgradeResp := p.provider.UpgradeResourceState(providers.UpgradeResourceStateRequest{ + TypeName: req.TypeName, + Version: req.Version, + RawStateJSON: req.RawState.Json, + }) + + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, upgradeResp.Diagnostics) + if upgradeResp.Diagnostics.HasErrors() { + return resp, nil + } + + dv, err := encodeDynamicValue6(upgradeResp.UpgradedState, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + resp.UpgradedState = dv + + return resp, nil +} + +func (p *provider6) ConfigureProvider(_ context.Context, req *tfplugin6.ConfigureProvider_Request) (*tfplugin6.ConfigureProvider_Response, error) { + resp := &tfplugin6.ConfigureProvider_Response{} + ty := p.schema.Provider.Block.ImpliedType() + + configVal, err := decodeDynamicValue6(req.Config, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + configureResp := p.provider.ConfigureProvider(providers.ConfigureProviderRequest{ + TerraformVersion: req.TerraformVersion, + Config: configVal, + }) + + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, configureResp.Diagnostics) + return resp, nil +} + +func (p *provider6) ReadResource(_ context.Context, req *tfplugin6.ReadResource_Request) (*tfplugin6.ReadResource_Response, error) { + resp := &tfplugin6.ReadResource_Response{} + ty := p.schema.ResourceTypes[req.TypeName].Block.ImpliedType() + + stateVal, err := decodeDynamicValue6(req.CurrentState, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + metaTy := p.schema.ProviderMeta.Block.ImpliedType() + metaVal, err := decodeDynamicValue6(req.ProviderMeta, metaTy) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + readResp := p.provider.ReadResource(providers.ReadResourceRequest{ + TypeName: req.TypeName, + PriorState: stateVal, + Private: req.Private, + ProviderMeta: metaVal, + }) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, readResp.Diagnostics) + if readResp.Diagnostics.HasErrors() { + return resp, nil + } + resp.Private = readResp.Private + + dv, err := encodeDynamicValue6(readResp.NewState, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + resp.NewState = dv + + return resp, nil +} + +func (p *provider6) PlanResourceChange(_ context.Context, req *tfplugin6.PlanResourceChange_Request) (*tfplugin6.PlanResourceChange_Response, error) { + resp := &tfplugin6.PlanResourceChange_Response{} + ty := p.schema.ResourceTypes[req.TypeName].Block.ImpliedType() + + priorStateVal, err := decodeDynamicValue6(req.PriorState, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + proposedStateVal, err := decodeDynamicValue6(req.ProposedNewState, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + configVal, err := decodeDynamicValue6(req.Config, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + metaTy := p.schema.ProviderMeta.Block.ImpliedType() + metaVal, err := decodeDynamicValue6(req.ProviderMeta, metaTy) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + planResp := p.provider.PlanResourceChange(providers.PlanResourceChangeRequest{ + TypeName: req.TypeName, + PriorState: priorStateVal, + ProposedNewState: proposedStateVal, + Config: configVal, + PriorPrivate: req.PriorPrivate, + ProviderMeta: metaVal, + }) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, planResp.Diagnostics) + if planResp.Diagnostics.HasErrors() { + return resp, nil + } + + resp.PlannedPrivate = planResp.PlannedPrivate + + resp.PlannedState, err = encodeDynamicValue6(planResp.PlannedState, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + for _, path := range planResp.RequiresReplace { + resp.RequiresReplace = append(resp.RequiresReplace, convert.PathToAttributePath(path)) + } + + return resp, nil +} + +func (p *provider6) ApplyResourceChange(_ context.Context, req *tfplugin6.ApplyResourceChange_Request) (*tfplugin6.ApplyResourceChange_Response, error) { + resp := &tfplugin6.ApplyResourceChange_Response{} + ty := p.schema.ResourceTypes[req.TypeName].Block.ImpliedType() + + priorStateVal, err := decodeDynamicValue6(req.PriorState, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + plannedStateVal, err := decodeDynamicValue6(req.PlannedState, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + configVal, err := decodeDynamicValue6(req.Config, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + metaTy := p.schema.ProviderMeta.Block.ImpliedType() + metaVal, err := decodeDynamicValue6(req.ProviderMeta, metaTy) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + applyResp := p.provider.ApplyResourceChange(providers.ApplyResourceChangeRequest{ + TypeName: req.TypeName, + PriorState: priorStateVal, + PlannedState: plannedStateVal, + Config: configVal, + PlannedPrivate: req.PlannedPrivate, + ProviderMeta: metaVal, + }) + + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, applyResp.Diagnostics) + if applyResp.Diagnostics.HasErrors() { + return resp, nil + } + resp.Private = applyResp.Private + + resp.NewState, err = encodeDynamicValue6(applyResp.NewState, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + return resp, nil +} + +func (p *provider6) ImportResourceState(_ context.Context, req *tfplugin6.ImportResourceState_Request) (*tfplugin6.ImportResourceState_Response, error) { + resp := &tfplugin6.ImportResourceState_Response{} + + importResp := p.provider.ImportResourceState(providers.ImportResourceStateRequest{ + TypeName: req.TypeName, + ID: req.Id, + }) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, importResp.Diagnostics) + + for _, res := range importResp.ImportedResources { + ty := p.schema.ResourceTypes[res.TypeName].Block.ImpliedType() + state, err := encodeDynamicValue6(res.State, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + continue + } + + resp.ImportedResources = append(resp.ImportedResources, &tfplugin6.ImportResourceState_ImportedResource{ + TypeName: res.TypeName, + State: state, + Private: res.Private, + }) + } + + return resp, nil +} + +func (p *provider6) MoveResourceState(context.Context, *tfplugin6.MoveResourceState_Request) (*tfplugin6.MoveResourceState_Response, error) { + panic("Not Implemented") +} + +func (p *provider6) ReadDataSource(_ context.Context, req *tfplugin6.ReadDataSource_Request) (*tfplugin6.ReadDataSource_Response, error) { + resp := &tfplugin6.ReadDataSource_Response{} + ty := p.schema.DataSources[req.TypeName].Block.ImpliedType() + + configVal, err := decodeDynamicValue6(req.Config, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + metaTy := p.schema.ProviderMeta.Block.ImpliedType() + metaVal, err := decodeDynamicValue6(req.ProviderMeta, metaTy) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + readResp := p.provider.ReadDataSource(providers.ReadDataSourceRequest{ + TypeName: req.TypeName, + Config: configVal, + ProviderMeta: metaVal, + }) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, readResp.Diagnostics) + if readResp.Diagnostics.HasErrors() { + return resp, nil + } + + resp.State, err = encodeDynamicValue6(readResp.State, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + return resp, nil +} + +func (p *provider6) StopProvider(context.Context, *tfplugin6.StopProvider_Request) (*tfplugin6.StopProvider_Response, error) { + resp := &tfplugin6.StopProvider_Response{} + err := p.provider.Stop() + if err != nil { + resp.Error = err.Error() + } + return resp, nil +} + +func (p *provider6) GetFunctions(context.Context, *tfplugin6.GetFunctions_Request) (*tfplugin6.GetFunctions_Response, error) { + panic("Not Implemented") +} + +func (p *provider6) CallFunction(context.Context, *tfplugin6.CallFunction_Request) (*tfplugin6.CallFunction_Response, error) { + panic("Not Implemented") +} + +// decode a DynamicValue from either the JSON or MsgPack encoding. +func decodeDynamicValue6(v *tfplugin6.DynamicValue, ty cty.Type) (cty.Value, error) { + // always return a valid value + var err error + res := cty.NullVal(ty) + if v == nil { + return res, nil + } + + switch { + case len(v.Msgpack) > 0: + res, err = msgpack.Unmarshal(v.Msgpack, ty) + case len(v.Json) > 0: + res, err = ctyjson.Unmarshal(v.Json, ty) + } + return res, err +} + +// encode a cty.Value into a DynamicValue msgpack payload. +func encodeDynamicValue6(v cty.Value, ty cty.Type) (*tfplugin6.DynamicValue, error) { + mp, err := msgpack.Marshal(v, ty) + return &tfplugin6.DynamicValue{ + Msgpack: mp, + }, err +} diff --git a/pkg/grpcwrap/provisioner.go b/pkg/grpcwrap/provisioner.go new file mode 100644 index 00000000000..7b76a8a7f91 --- /dev/null +++ b/pkg/grpcwrap/provisioner.go @@ -0,0 +1,121 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package grpcwrap + +import ( + "context" + "log" + "strings" + "unicode/utf8" + + "github.com/kubegems/opentofu/pkg/communicator/shared" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/plugin/convert" + "github.com/kubegems/opentofu/pkg/provisioners" + "github.com/kubegems/opentofu/pkg/tfplugin5" +) + +// New wraps a provisioners.Interface to implement a grpc ProviderServer. +// This is useful for creating a test binary out of an internal provider +// implementation. +func Provisioner(p provisioners.Interface) tfplugin5.ProvisionerServer { + return &provisioner{ + provisioner: p, + schema: p.GetSchema().Provisioner, + } +} + +type provisioner struct { + provisioner provisioners.Interface + schema *configschema.Block +} + +func (p *provisioner) GetSchema(_ context.Context, req *tfplugin5.GetProvisionerSchema_Request) (*tfplugin5.GetProvisionerSchema_Response, error) { + resp := &tfplugin5.GetProvisionerSchema_Response{} + + resp.Provisioner = &tfplugin5.Schema{ + Block: &tfplugin5.Schema_Block{}, + } + + if p.schema != nil { + resp.Provisioner.Block = convert.ConfigSchemaToProto(p.schema) + } + + return resp, nil +} + +func (p *provisioner) ValidateProvisionerConfig(_ context.Context, req *tfplugin5.ValidateProvisionerConfig_Request) (*tfplugin5.ValidateProvisionerConfig_Response, error) { + resp := &tfplugin5.ValidateProvisionerConfig_Response{} + ty := p.schema.ImpliedType() + + configVal, err := decodeDynamicValue(req.Config, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + validateResp := p.provisioner.ValidateProvisionerConfig(provisioners.ValidateProvisionerConfigRequest{ + Config: configVal, + }) + + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, validateResp.Diagnostics) + return resp, nil +} + +func (p *provisioner) ProvisionResource(req *tfplugin5.ProvisionResource_Request, srv tfplugin5.Provisioner_ProvisionResourceServer) error { + // We send back a diagnostics over the stream if there was a + // provisioner-side problem. + srvResp := &tfplugin5.ProvisionResource_Response{} + + ty := p.schema.ImpliedType() + configVal, err := decodeDynamicValue(req.Config, ty) + if err != nil { + srvResp.Diagnostics = convert.AppendProtoDiag(srvResp.Diagnostics, err) + srv.Send(srvResp) + return nil + } + + connVal, err := decodeDynamicValue(req.Connection, shared.ConnectionBlockSupersetSchema.ImpliedType()) + if err != nil { + srvResp.Diagnostics = convert.AppendProtoDiag(srvResp.Diagnostics, err) + srv.Send(srvResp) + return nil + } + + resp := p.provisioner.ProvisionResource(provisioners.ProvisionResourceRequest{ + Config: configVal, + Connection: connVal, + UIOutput: uiOutput{srv}, + }) + + srvResp.Diagnostics = convert.AppendProtoDiag(srvResp.Diagnostics, resp.Diagnostics) + srv.Send(srvResp) + return nil +} + +func (p *provisioner) Stop(context.Context, *tfplugin5.Stop_Request) (*tfplugin5.Stop_Response, error) { + resp := &tfplugin5.Stop_Response{} + err := p.provisioner.Stop() + if err != nil { + resp.Error = err.Error() + } + return resp, nil +} + +// uiOutput implements the terraform.UIOutput interface to adapt the grpc +// stream to the legacy Provisioner.Apply method. +type uiOutput struct { + srv tfplugin5.Provisioner_ProvisionResourceServer +} + +func (o uiOutput) Output(s string) { + err := o.srv.Send(&tfplugin5.ProvisionResource_Response{ + Output: strings.ToValidUTF8(s, string(utf8.RuneError)), + }) + if err != nil { + log.Printf("[ERROR] %s", err) + } +} diff --git a/pkg/helper/slowmessage/slowmessage.go b/pkg/helper/slowmessage/slowmessage.go new file mode 100644 index 00000000000..c2f619e3af5 --- /dev/null +++ b/pkg/helper/slowmessage/slowmessage.go @@ -0,0 +1,39 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package slowmessage + +import ( + "time" +) + +// SlowFunc is the function that could be slow. Usually, you'll have to +// wrap an existing function in a lambda to make it match this type signature. +type SlowFunc func() error + +// CallbackFunc is the function that is triggered when the threshold is reached. +type CallbackFunc func() + +// Do calls sf. If threshold time has passed, cb is called. Note that this +// call will be made concurrently to sf still running. +func Do(threshold time.Duration, sf SlowFunc, cb CallbackFunc) error { + // Call the slow function + errCh := make(chan error, 1) + go func() { + errCh <- sf() + }() + + // Wait for it to complete or the threshold to pass + select { + case err := <-errCh: + return err + case <-time.After(threshold): + // Threshold reached, call the callback + cb() + } + + // Wait an indefinite amount of time for it to finally complete + return <-errCh +} diff --git a/pkg/helper/slowmessage/slowmessage_test.go b/pkg/helper/slowmessage/slowmessage_test.go new file mode 100644 index 00000000000..8d5ff14dc7c --- /dev/null +++ b/pkg/helper/slowmessage/slowmessage_test.go @@ -0,0 +1,87 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package slowmessage + +import ( + "errors" + "testing" + "time" +) + +func TestDo(t *testing.T) { + var sfErr error + cbCalled := false + sfCalled := false + sfSleep := 0 * time.Second + + reset := func() { + cbCalled = false + sfCalled = false + sfErr = nil + } + sf := func() error { + sfCalled = true + time.Sleep(sfSleep) + return sfErr + } + cb := func() { cbCalled = true } + + // SF is not slow + reset() + if err := Do(10*time.Millisecond, sf, cb); err != nil { + t.Fatalf("err: %s", err) + } + + if !sfCalled { + t.Fatal("should call") + } + if cbCalled { + t.Fatal("should not call") + } + + // SF is not slow (with error) + reset() + sfErr = errors.New("error") + if err := Do(10*time.Millisecond, sf, cb); err == nil { + t.Fatalf("err: %s", err) + } + + if !sfCalled { + t.Fatal("should call") + } + if cbCalled { + t.Fatal("should not call") + } + + // SF is slow + reset() + sfSleep = 50 * time.Millisecond + if err := Do(10*time.Millisecond, sf, cb); err != nil { + t.Fatalf("err: %s", err) + } + + if !sfCalled { + t.Fatal("should call") + } + if !cbCalled { + t.Fatal("should call") + } + + // SF is slow (with error) + reset() + sfErr = errors.New("error") + sfSleep = 50 * time.Millisecond + if err := Do(10*time.Millisecond, sf, cb); err == nil { + t.Fatalf("err: %s", err) + } + + if !sfCalled { + t.Fatal("should call") + } + if !cbCalled { + t.Fatal("should call") + } +} diff --git a/pkg/httpclient/client.go b/pkg/httpclient/client.go new file mode 100644 index 00000000000..80f651d5c9c --- /dev/null +++ b/pkg/httpclient/client.go @@ -0,0 +1,24 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package httpclient + +import ( + "net/http" + + cleanhttp "github.com/hashicorp/go-cleanhttp" + "github.com/kubegems/opentofu/version" +) + +// New returns the DefaultPooledClient from the cleanhttp +// package that will also send a OpenTofu User-Agent string. +func New() *http.Client { + cli := cleanhttp.DefaultPooledClient() + cli.Transport = &userAgentRoundTripper{ + userAgent: OpenTofuUserAgent(version.Version), + inner: cli.Transport, + } + return cli +} diff --git a/pkg/httpclient/client_test.go b/pkg/httpclient/client_test.go new file mode 100644 index 00000000000..3ac632a9c13 --- /dev/null +++ b/pkg/httpclient/client_test.go @@ -0,0 +1,88 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package httpclient + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "os" + "testing" + + "github.com/kubegems/opentofu/version" +) + +func TestNew_userAgent(t *testing.T) { + + appendUaVal := os.Getenv(appendUaEnvVar) + os.Unsetenv(appendUaEnvVar) + defer os.Setenv(appendUaEnvVar, appendUaVal) + + var actualUserAgent string + ts := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + actualUserAgent = req.UserAgent() + })) + defer ts.Close() + + tsURL, err := url.Parse(ts.URL) + if err != nil { + t.Fatal(err) + } + + for i, c := range []struct { + expected string + request func(c *http.Client) error + }{ + { + fmt.Sprintf("OpenTofu/%s", version.Version), + func(c *http.Client) error { + _, err := c.Get(ts.URL) + return err + }, + }, + { + "foo/1", + func(c *http.Client) error { + req := &http.Request{ + Method: "GET", + URL: tsURL, + Header: http.Header{ + "User-Agent": []string{"foo/1"}, + }, + } + _, err := c.Do(req) + return err + }, + }, + { + "", + func(c *http.Client) error { + req := &http.Request{ + Method: "GET", + URL: tsURL, + Header: http.Header{ + "User-Agent": []string{""}, + }, + } + _, err := c.Do(req) + return err + }, + }, + } { + t.Run(fmt.Sprintf("%d %s", i, c.expected), func(t *testing.T) { + actualUserAgent = "" + cli := New() + err := c.request(cli) + if err != nil { + t.Fatal(err) + } + if actualUserAgent != c.expected { + t.Fatalf("actual User-Agent '%s' is not '%s'", actualUserAgent, c.expected) + } + }) + } +} diff --git a/pkg/httpclient/useragent.go b/pkg/httpclient/useragent.go new file mode 100644 index 00000000000..33ab71c0846 --- /dev/null +++ b/pkg/httpclient/useragent.go @@ -0,0 +1,51 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package httpclient + +import ( + "fmt" + "log" + "net/http" + "os" + "strings" +) + +const ( + appendUaEnvVar = "TF_APPEND_USER_AGENT" + customUaEnvVar = "OPENTOFU_USER_AGENT" + DefaultApplicationName = "OpenTofu" +) + +type userAgentRoundTripper struct { + inner http.RoundTripper + userAgent string +} + +func (rt *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + if _, ok := req.Header["User-Agent"]; !ok { + req.Header.Set("User-Agent", rt.userAgent) + } + log.Printf("[TRACE] HTTP client %s request to %s", req.Method, req.URL.String()) + return rt.inner.RoundTrip(req) +} + +func OpenTofuUserAgent(version string) string { + ua := fmt.Sprintf("%s/%s", DefaultApplicationName, version) + if customUa := os.Getenv(customUaEnvVar); customUa != "" { + ua = customUa + log.Printf("[DEBUG] Using Custom User-Agent: %s", ua) + } + + if add := os.Getenv(appendUaEnvVar); add != "" { + add = strings.TrimSpace(add) + if len(add) > 0 { + ua += " " + add + log.Printf("[DEBUG] Using modified User-Agent: %s", ua) + } + } + + return ua +} diff --git a/pkg/httpclient/useragent_test.go b/pkg/httpclient/useragent_test.go new file mode 100644 index 00000000000..b2ba3a50331 --- /dev/null +++ b/pkg/httpclient/useragent_test.go @@ -0,0 +1,123 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package httpclient + +import ( + "fmt" + "os" + "testing" + + "github.com/kubegems/opentofu/version" +) + +func TestUserAgentString_env(t *testing.T) { + + appendUaVal := os.Getenv(appendUaEnvVar) + os.Unsetenv(appendUaEnvVar) + defer os.Setenv(appendUaEnvVar, appendUaVal) + + expectedBase := fmt.Sprintf("%s/%s", DefaultApplicationName, version.Version) + + for i, c := range []struct { + expected string + additional string + }{ + {expectedBase, ""}, + {expectedBase, " "}, + {expectedBase, " \n"}, + + {fmt.Sprintf("%s test/1", expectedBase), "test/1"}, + {fmt.Sprintf("%s test/2", expectedBase), "test/2 "}, + {fmt.Sprintf("%s test/3", expectedBase), " test/3 "}, + {fmt.Sprintf("%s test/4", expectedBase), "test/4 \n"}, + } { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + if c.additional != "" { + t.Setenv(appendUaEnvVar, c.additional) + } + + actual := OpenTofuUserAgent(version.Version) + + if c.expected != actual { + t.Fatalf("Expected User-Agent '%s' does not match '%s'", c.expected, actual) + } + }) + } +} + +func TestUserAgentAppendViaEnvVar(t *testing.T) { + expectedBase := "OpenTofu/0.0.0" + + testCases := []struct { + envVarValue string + expected string + }{ + {"", expectedBase}, + {" ", expectedBase}, + {" \n", expectedBase}, + {"test/1", expectedBase + " test/1"}, + {"test/1 (comment)", expectedBase + " test/1 (comment)"}, + } + + for i, tc := range testCases { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + t.Setenv(appendUaEnvVar, tc.envVarValue) + givenUA := OpenTofuUserAgent("0.0.0") + if givenUA != tc.expected { + t.Fatalf("Expected User-Agent '%s' does not match '%s'", tc.expected, givenUA) + } + }) + } +} +func TestCustomUserAgentViaEnvVar(t *testing.T) { + + appendUaVal := os.Getenv(appendUaEnvVar) + os.Unsetenv(appendUaEnvVar) + defer os.Setenv(appendUaEnvVar, appendUaVal) + + testCases := []struct { + envVarValue string + }{ + {" "}, + {" \n"}, + {"test/1"}, + {"test/1 (comment)"}, + } + + for i, tc := range testCases { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + t.Setenv(customUaEnvVar, tc.envVarValue) + givenUA := OpenTofuUserAgent("0.0.0") + if givenUA != tc.envVarValue { + t.Fatalf("Expected User-Agent '%s' does not match '%s'", tc.envVarValue, givenUA) + } + }) + } +} +func TestCustomUserAgentAndAppendViaEnvVar(t *testing.T) { + testCases := []struct { + customUaValue string + appendUaValue string + expected string + }{ + {"", "", "OpenTofu/0.0.0"}, + {"", " ", "OpenTofu/0.0.0"}, + {"", " \n", "OpenTofu/0.0.0"}, + {"", "testy test", "OpenTofu/0.0.0 testy test"}, + {"opensource", "opentofu", "opensource opentofu"}, + } + + for i, tc := range testCases { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + t.Setenv(customUaEnvVar, tc.customUaValue) + t.Setenv(appendUaEnvVar, tc.appendUaValue) + givenUA := OpenTofuUserAgent("0.0.0") + if givenUA != tc.expected { + t.Fatalf("Expected User-Agent '%s' does not match '%s'", tc.expected, givenUA) + } + }) + } +} diff --git a/pkg/initwd/doc.go b/pkg/initwd/doc.go new file mode 100644 index 00000000000..7fb8fbb0011 --- /dev/null +++ b/pkg/initwd/doc.go @@ -0,0 +1,12 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package initwd contains various helper functions used by the "tofu init" +// command to initialize a working directory. +// +// These functions may also be used from testing code to simulate the behaviors +// of "tofu init" against test fixtures, but should not be used elsewhere +// in the main code. +package initwd diff --git a/pkg/initwd/from_module.go b/pkg/initwd/from_module.go new file mode 100644 index 00000000000..c412db608ae --- /dev/null +++ b/pkg/initwd/from_module.go @@ -0,0 +1,423 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package initwd + +import ( + "context" + "fmt" + "log" + "os" + "path/filepath" + "sort" + "strings" + + "github.com/hashicorp/hcl/v2" + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configload" + "github.com/kubegems/opentofu/pkg/copy" + "github.com/kubegems/opentofu/pkg/getmodules" + "github.com/zclconf/go-cty/cty" + + version "github.com/hashicorp/go-version" + "github.com/kubegems/opentofu/pkg/modsdir" + "github.com/kubegems/opentofu/pkg/registry" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +const initFromModuleRootCallName = "root" +const initFromModuleRootFilename = "
" +const initFromModuleRootKeyPrefix = initFromModuleRootCallName + "." + +// DirFromModule populates the given directory (which must exist and be +// empty) with the contents of the module at the given source address. +// +// It does this by installing the given module and all of its descendent +// modules in a temporary root directory and then copying the installed +// files into suitable locations. As a consequence, any diagnostics it +// generates will reveal the location of this temporary directory to the +// user. +// +// This rather roundabout installation approach is taken to ensure that +// installation proceeds in a manner identical to normal module installation. +// +// If the given source address specifies a sub-directory of the given +// package then only the sub-directory and its descendents will be copied +// into the given root directory, which will cause any relative module +// references using ../ from that module to be unresolvable. Error diagnostics +// are produced in that case, to prompt the user to rewrite the source strings +// to be absolute references to the original remote module. +func DirFromModule(ctx context.Context, loader *configload.Loader, rootDir, modulesDir, sourceAddrStr string, reg *registry.Client, hooks ModuleInstallHooks) tfdiags.Diagnostics { + + var diags tfdiags.Diagnostics + + // The way this function works is pretty ugly, but we accept it because + // -from-module is a less important case than normal module installation + // and so it's better to keep this ugly complexity out here rather than + // adding even more complexity to the normal module installer. + + // The target directory must exist but be empty. + { + entries, err := os.ReadDir(rootDir) + if err != nil { + if os.IsNotExist(err) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Target directory does not exist", + fmt.Sprintf("Cannot initialize non-existent directory %s.", rootDir), + )) + } else { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to read target directory", + fmt.Sprintf("Error reading %s to ensure it is empty: %s.", rootDir, err), + )) + } + return diags + } + haveEntries := false + for _, entry := range entries { + if entry.Name() == "." || entry.Name() == ".." || entry.Name() == ".terraform" { + continue + } + haveEntries = true + } + if haveEntries { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Can't populate non-empty directory", + fmt.Sprintf("The target directory %s is not empty, so it cannot be initialized with the -from-module=... option.", rootDir), + )) + return diags + } + } + + instDir := filepath.Join(rootDir, ".terraform/init-from-module") + inst := NewModuleInstaller(instDir, loader, reg) + log.Printf("[DEBUG] installing modules in %s to initialize working directory from %q", instDir, sourceAddrStr) + os.RemoveAll(instDir) // if this fails then we'll fail on MkdirAll below too + err := os.MkdirAll(instDir, os.ModePerm) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to create temporary directory", + fmt.Sprintf("Failed to create temporary directory %s: %s.", instDir, err), + )) + return diags + } + + instManifest := make(modsdir.Manifest) + retManifest := make(modsdir.Manifest) + + // -from-module allows relative paths but it's different than a normal + // module address where it'd be resolved relative to the module call + // (which is synthetic, here.) To address this, we'll just patch up any + // relative paths to be absolute paths before we run, ensuring we'll + // get the right result. This also, as an important side-effect, ensures + // that the result will be "downloaded" with go-getter (copied from the + // source location), rather than just recorded as a relative path. + { + maybePath := filepath.ToSlash(sourceAddrStr) + if maybePath == "." || strings.HasPrefix(maybePath, "./") || strings.HasPrefix(maybePath, "../") { + if wd, err := os.Getwd(); err == nil { + sourceAddrStr = filepath.Join(wd, sourceAddrStr) + log.Printf("[TRACE] -from-module relative path rewritten to absolute path %s", sourceAddrStr) + } + } + } + + // Now we need to create an artificial root module that will seed our + // installation process. + sourceAddr, err := addrs.ParseModuleSource(sourceAddrStr) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid module source address", + fmt.Sprintf("Failed to parse module source address: %s", err), + )) + } + rng := hcl.Range{ + Filename: initFromModuleRootFilename, + Start: hcl.InitialPos, + End: hcl.InitialPos, + } + fakeRootModule := &configs.Module{ + ModuleCalls: map[string]*configs.ModuleCall{ + initFromModuleRootCallName: { + Name: initFromModuleRootCallName, + SourceAddr: sourceAddr, + Source: hcl.StaticExpr(cty.StringVal(sourceAddrStr), rng), + DeclRange: rng, + }, + }, + ProviderRequirements: &configs.RequiredProviders{}, + } + + // wrapHooks filters hook notifications to only include Download calls + // and to trim off the initFromModuleRootCallName prefix. We'll produce + // our own Install notifications directly below. + wrapHooks := installHooksInitDir{ + Wrapped: hooks, + } + // Create a manifest record for the root module. This will be used if + // there are any relative-pathed modules in the root. + instManifest[""] = modsdir.Record{ + Key: "", + Dir: rootDir, + } + fetcher := getmodules.NewPackageFetcher() + + walker := inst.moduleInstallWalker(ctx, instManifest, true, wrapHooks, fetcher) + _, cDiags := inst.installDescendentModules(fakeRootModule, instManifest, walker, true) + if cDiags.HasErrors() { + return diags.Append(cDiags) + } + + // If all of that succeeded then we'll now migrate what was installed + // into the final directory structure. + err = os.MkdirAll(modulesDir, os.ModePerm) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to create local modules directory", + fmt.Sprintf("Failed to create modules directory %s: %s.", modulesDir, err), + )) + return diags + } + + recordKeys := make([]string, 0, len(instManifest)) + for k := range instManifest { + recordKeys = append(recordKeys, k) + } + sort.Strings(recordKeys) + + for _, recordKey := range recordKeys { + record := instManifest[recordKey] + + if record.Key == initFromModuleRootCallName { + // We've found the module the user requested, which we must + // now copy into rootDir so it can be used directly. + log.Printf("[TRACE] copying new root module from %s to %s", record.Dir, rootDir) + err := copy.CopyDir(rootDir, record.Dir) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to copy root module", + fmt.Sprintf("Error copying root module %q from %s to %s: %s.", sourceAddrStr, record.Dir, rootDir, err), + )) + continue + } + + // We'll try to load the newly-copied module here just so we can + // sniff for any module calls that ../ out of the root directory + // and must thus be rewritten to be absolute addresses again. + // For now we can't do this rewriting automatically, but we'll + // generate an error to help the user do it manually. + mod, _ := loader.Parser().LoadConfigDir(rootDir, configs.NewStaticModuleCall(addrs.RootModule, nil, rootDir, "")) // ignore diagnostics since we're just doing value-add here anyway + if mod != nil { + for _, mc := range mod.ModuleCalls { + if pathTraversesUp(mc.SourceAddrRaw) { + packageAddr, givenSubdir := getmodules.SplitPackageSubdir(sourceAddrStr) + newSubdir := filepath.Join(givenSubdir, mc.SourceAddrRaw) + if pathTraversesUp(newSubdir) { + // This should never happen in any reasonable + // configuration since this suggests a path that + // traverses up out of the package root. We'll just + // ignore this, since we'll fail soon enough anyway + // trying to resolve this path when this module is + // loaded. + continue + } + + var newAddr = packageAddr + if newSubdir != "" { + newAddr = fmt.Sprintf("%s//%s", newAddr, filepath.ToSlash(newSubdir)) + } + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Root module references parent directory", + fmt.Sprintf("The requested module %q refers to a module via its parent directory. To use this as a new root module this source string must be rewritten as a remote source address, such as %q.", sourceAddrStr, newAddr), + )) + continue + } + } + } + + retManifest[""] = modsdir.Record{ + Key: "", + Dir: rootDir, + } + continue + } + + if !strings.HasPrefix(record.Key, initFromModuleRootKeyPrefix) { + // Ignore the *real* root module, whose key is empty, since + // we're only interested in the module named "root" and its + // descendents. + continue + } + + newKey := record.Key[len(initFromModuleRootKeyPrefix):] + instPath := filepath.Join(modulesDir, newKey) + tempPath := filepath.Join(instDir, record.Key) + + // tempPath won't be present for a module that was installed from + // a relative path, so in that case we just record the installation + // directory and assume it was already copied into place as part + // of its parent. + if _, err := os.Stat(tempPath); err != nil { + if !os.IsNotExist(err) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to stat temporary module install directory", + fmt.Sprintf("Error from stat %s for module %s: %s.", instPath, newKey, err), + )) + continue + } + + var parentKey string + if lastDot := strings.LastIndexByte(newKey, '.'); lastDot != -1 { + parentKey = newKey[:lastDot] + } + + var parentOld modsdir.Record + // "" is the root module; all other modules get `root.` added as a prefix + if parentKey == "" { + parentOld = instManifest[parentKey] + } else { + parentOld = instManifest[initFromModuleRootKeyPrefix+parentKey] + } + parentNew := retManifest[parentKey] + + // We need to figure out which portion of our directory is the + // parent package path and which portion is the subdirectory + // under that. + var baseDirRel string + baseDirRel, err = filepath.Rel(parentOld.Dir, record.Dir) + if err != nil { + // This error may occur when installing a local module with a + // relative path, for e.g. if the source is in a directory above + // the destination ("../") + if parentOld.Dir == "." { + absDir, err := filepath.Abs(parentOld.Dir) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to determine module install directory", + fmt.Sprintf("Error determine relative source directory for module %s: %s.", newKey, err), + )) + continue + } + baseDirRel, err = filepath.Rel(absDir, record.Dir) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to determine relative module source location", + fmt.Sprintf("Error determining relative source for module %s: %s.", newKey, err), + )) + continue + } + } else { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to determine relative module source location", + fmt.Sprintf("Error determining relative source for module %s: %s.", newKey, err), + )) + } + } + + newDir := filepath.Join(parentNew.Dir, baseDirRel) + log.Printf("[TRACE] relative reference for %s rewritten from %s to %s", newKey, record.Dir, newDir) + newRecord := record // shallow copy + newRecord.Dir = newDir + newRecord.Key = newKey + retManifest[newKey] = newRecord + hooks.Install(newRecord.Key, newRecord.Version, newRecord.Dir) + continue + } + + err = os.MkdirAll(instPath, os.ModePerm) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to create module install directory", + fmt.Sprintf("Error creating directory %s for module %s: %s.", instPath, newKey, err), + )) + continue + } + + // We copy rather than "rename" here because renaming between directories + // can be tricky in edge-cases like network filesystems, etc. + log.Printf("[TRACE] copying new module %s from %s to %s", newKey, record.Dir, instPath) + err := copy.CopyDir(instPath, tempPath) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to copy descendent module", + fmt.Sprintf("Error copying module %q from %s to %s: %s.", newKey, tempPath, rootDir, err), + )) + continue + } + + subDir, err := filepath.Rel(tempPath, record.Dir) + if err != nil { + // Should never happen, because we constructed both directories + // from the same base and so they must have a common prefix. + panic(err) + } + + newRecord := record // shallow copy + newRecord.Dir = filepath.Join(instPath, subDir) + newRecord.Key = newKey + retManifest[newKey] = newRecord + hooks.Install(newRecord.Key, newRecord.Version, newRecord.Dir) + } + + retManifest.WriteSnapshotToDir(modulesDir) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to write module manifest", + fmt.Sprintf("Error writing module manifest: %s.", err), + )) + } + + if !diags.HasErrors() { + // Try to clean up our temporary directory, but don't worry if we don't + // succeed since it shouldn't hurt anything. + os.RemoveAll(instDir) + } + + return diags +} + +func pathTraversesUp(path string) bool { + return strings.HasPrefix(filepath.ToSlash(path), "../") +} + +// installHooksInitDir is an adapter wrapper for an InstallHooks that +// does some fakery to make downloads look like they are happening in their +// final locations, rather than in the temporary loader we use. +// +// It also suppresses "Install" calls entirely, since InitDirFromModule +// does its own installation steps after the initial installation pass +// has completed. +type installHooksInitDir struct { + Wrapped ModuleInstallHooks + ModuleInstallHooksImpl +} + +func (h installHooksInitDir) Download(moduleAddr, packageAddr string, version *version.Version) { + if !strings.HasPrefix(moduleAddr, initFromModuleRootKeyPrefix) { + // We won't announce the root module, since hook implementations + // don't expect to see that and the caller will usually have produced + // its own user-facing notification about what it's doing anyway. + return + } + + trimAddr := moduleAddr[len(initFromModuleRootKeyPrefix):] + h.Wrapped.Download(trimAddr, packageAddr, version) +} diff --git a/pkg/initwd/from_module_test.go b/pkg/initwd/from_module_test.go new file mode 100644 index 00000000000..3c3608204ad --- /dev/null +++ b/pkg/initwd/from_module_test.go @@ -0,0 +1,347 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package initwd + +import ( + "context" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + version "github.com/hashicorp/go-version" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configload" + "github.com/kubegems/opentofu/pkg/copy" + "github.com/kubegems/opentofu/pkg/registry" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +func TestDirFromModule_registry(t *testing.T) { + if os.Getenv("TF_ACC") == "" { + t.Skip("this test accesses registry.opentofu.org and github.com; set TF_ACC=1 to run it") + } + + fixtureDir := filepath.Clean("testdata/empty") + tmpDir, done := tempChdir(t, fixtureDir) + defer done() + + // the module installer runs filepath.EvalSymlinks() on the destination + // directory before copying files, and the resultant directory is what is + // returned by the install hooks. Without this, tests could fail on machines + // where the default temp dir was a symlink. + dir, err := filepath.EvalSymlinks(tmpDir) + if err != nil { + t.Error(err) + } + modsDir := filepath.Join(dir, ".terraform/modules") + + hooks := &testInstallHooks{} + + reg := registry.NewClient(nil, nil) + loader, cleanup := configload.NewLoaderForTests(t) + defer cleanup() + diags := DirFromModule(context.Background(), loader, dir, modsDir, "hashicorp/module-installer-acctest/aws//examples/main", reg, hooks) + assertNoDiagnostics(t, diags) + + v := version.Must(version.NewVersion("0.0.2")) + + wantCalls := []testInstallHookCall{ + // The module specified to populate the root directory is not mentioned + // here, because the hook mechanism is defined to talk about descendent + // modules only and so a caller to InitDirFromModule is expected to + // produce its own user-facing announcement about the root module being + // installed. + + // Note that "root" in the following examples is, confusingly, the + // label on the module block in the example we've installed here: + // module "root" { + + { + Name: "Download", + ModuleAddr: "root", + PackageAddr: "registry.opentofu.org/hashicorp/module-installer-acctest/aws", + Version: v, + }, + { + Name: "Install", + ModuleAddr: "root", + Version: v, + // NOTE: This local path and the other paths derived from it below + // can vary depending on how the registry is implemented. At the + // time of writing this test, registry.opentofu.org returns + // git repository source addresses and so this path refers to the + // root of the git clone, but historically the registry referred + // to GitHub-provided tar archives which meant that there was an + // extra level of subdirectory here for the typical directory + // nesting in tar archives, which would've been reflected as + // an extra segment on this path. If this test fails due to an + // additional path segment in future, then a change to the upstream + // registry might be the root cause. + LocalPath: filepath.Join(dir, ".terraform/modules/root"), + }, + { + Name: "Install", + ModuleAddr: "root.child_a", + LocalPath: filepath.Join(dir, ".terraform/modules/root/modules/child_a"), + }, + { + Name: "Install", + ModuleAddr: "root.child_a.child_b", + LocalPath: filepath.Join(dir, ".terraform/modules/root/modules/child_b"), + }, + } + + if diff := cmp.Diff(wantCalls, hooks.Calls); diff != "" { + t.Fatalf("wrong installer calls\n%s", diff) + } + + loader, err = configload.NewLoader(&configload.Config{ + ModulesDir: modsDir, + }) + if err != nil { + t.Fatal(err) + } + + // Make sure the configuration is loadable now. + // (This ensures that correct information is recorded in the manifest.) + config, loadDiags := loader.LoadConfig(".", configs.RootModuleCallForTesting()) + if assertNoDiagnostics(t, tfdiags.Diagnostics{}.Append(loadDiags)) { + return + } + + wantTraces := map[string]string{ + "": "in example", + "root": "in root module", + "root.child_a": "in child_a module", + "root.child_a.child_b": "in child_b module", + } + gotTraces := map[string]string{} + config.DeepEach(func(c *configs.Config) { + path := strings.Join(c.Path, ".") + if c.Module.Variables["v"] == nil { + gotTraces[path] = "" + return + } + varDesc := c.Module.Variables["v"].Description + gotTraces[path] = varDesc + }) + assertResultDeepEqual(t, gotTraces, wantTraces) +} + +func TestDirFromModule_submodules(t *testing.T) { + fixtureDir := filepath.Clean("testdata/empty") + fromModuleDir, err := filepath.Abs("./testdata/local-modules") + if err != nil { + t.Fatal(err) + } + + // DirFromModule will expand ("canonicalize") the pathnames, so we must do + // the same for our "wantCalls" comparison values. Otherwise this test + // will fail when building in a source tree with symlinks in $PWD. + // + // See also: https://github.com/hashicorp/terraform/issues/26014 + // + fromModuleDirRealpath, err := filepath.EvalSymlinks(fromModuleDir) + if err != nil { + t.Error(err) + } + + tmpDir, done := tempChdir(t, fixtureDir) + defer done() + + hooks := &testInstallHooks{} + dir, err := filepath.EvalSymlinks(tmpDir) + if err != nil { + t.Error(err) + } + modInstallDir := filepath.Join(dir, ".terraform/modules") + + loader, cleanup := configload.NewLoaderForTests(t) + defer cleanup() + diags := DirFromModule(context.Background(), loader, dir, modInstallDir, fromModuleDir, nil, hooks) + assertNoDiagnostics(t, diags) + wantCalls := []testInstallHookCall{ + { + Name: "Install", + ModuleAddr: "child_a", + LocalPath: filepath.Join(fromModuleDirRealpath, "child_a"), + }, + { + Name: "Install", + ModuleAddr: "child_a.child_b", + LocalPath: filepath.Join(fromModuleDirRealpath, "child_a/child_b"), + }, + } + + if assertResultDeepEqual(t, hooks.Calls, wantCalls) { + return + } + + loader, err = configload.NewLoader(&configload.Config{ + ModulesDir: modInstallDir, + }) + if err != nil { + t.Fatal(err) + } + + // Make sure the configuration is loadable now. + // (This ensures that correct information is recorded in the manifest.) + config, loadDiags := loader.LoadConfig(".", configs.RootModuleCallForTesting()) + if assertNoDiagnostics(t, tfdiags.Diagnostics{}.Append(loadDiags)) { + return + } + wantTraces := map[string]string{ + "": "in root module", + "child_a": "in child_a module", + "child_a.child_b": "in child_b module", + } + gotTraces := map[string]string{} + + config.DeepEach(func(c *configs.Config) { + path := strings.Join(c.Path, ".") + if c.Module.Variables["v"] == nil { + gotTraces[path] = "" + return + } + varDesc := c.Module.Variables["v"].Description + gotTraces[path] = varDesc + }) + assertResultDeepEqual(t, gotTraces, wantTraces) +} + +// submodulesWithProvider is identical to above, except that the configuration +// would fail to load for some reason. We still want the module to be installed +// for use cases like testing or CDKTF, and will only emit warnings for config +// errors. +func TestDirFromModule_submodulesWithProvider(t *testing.T) { + fixtureDir := filepath.Clean("testdata/empty") + fromModuleDir, err := filepath.Abs("./testdata/local-module-missing-provider") + if err != nil { + t.Fatal(err) + } + + tmpDir, done := tempChdir(t, fixtureDir) + defer done() + + hooks := &testInstallHooks{} + dir, err := filepath.EvalSymlinks(tmpDir) + if err != nil { + t.Error(err) + } + modInstallDir := filepath.Join(dir, ".terraform/modules") + + loader, cleanup := configload.NewLoaderForTests(t) + defer cleanup() + diags := DirFromModule(context.Background(), loader, dir, modInstallDir, fromModuleDir, nil, hooks) + + for _, d := range diags { + if d.Severity() != tfdiags.Warning { + t.Errorf("expected warning, got %v", diags.Err()) + } + } +} + +// TestDirFromModule_rel_submodules is similar to the test above, but the +// from-module is relative to the install dir ("../"): +// https://github.com/hashicorp/terraform/issues/23010 +func TestDirFromModule_rel_submodules(t *testing.T) { + // This test creates a tmpdir with the following directory structure: + // - tmpdir/local-modules (with contents of testdata/local-modules) + // - tmpdir/empty: the workDir we CD into for the test + // - tmpdir/empty/target (target, the destination for init -from-module) + tmpDir := t.TempDir() + fromModuleDir := filepath.Join(tmpDir, "local-modules") + workDir := filepath.Join(tmpDir, "empty") + if err := os.Mkdir(fromModuleDir, os.ModePerm); err != nil { + t.Fatal(err) + } + if err := copy.CopyDir(fromModuleDir, "testdata/local-modules"); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(workDir, os.ModePerm); err != nil { + t.Fatal(err) + } + + targetDir := filepath.Join(tmpDir, "target") + if err := os.Mkdir(targetDir, os.ModePerm); err != nil { + t.Fatal(err) + } + oldDir, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + err = os.Chdir(targetDir) + if err != nil { + t.Fatalf("failed to switch to temp dir %s: %s", tmpDir, err) + } + t.Cleanup(func() { + os.Chdir(oldDir) + // Trigger garbage collection to ensure that all open file handles are closed. + // This prevents TempDir RemoveAll cleanup errors on Windows. + if runtime.GOOS == "windows" { + runtime.GC() + } + }) + + hooks := &testInstallHooks{} + + modInstallDir := ".terraform/modules" + sourceDir := "../local-modules" + loader, cleanup := configload.NewLoaderForTests(t) + defer cleanup() + diags := DirFromModule(context.Background(), loader, ".", modInstallDir, sourceDir, nil, hooks) + assertNoDiagnostics(t, diags) + wantCalls := []testInstallHookCall{ + { + Name: "Install", + ModuleAddr: "child_a", + LocalPath: filepath.Join(sourceDir, "child_a"), + }, + { + Name: "Install", + ModuleAddr: "child_a.child_b", + LocalPath: filepath.Join(sourceDir, "child_a/child_b"), + }, + } + + if assertResultDeepEqual(t, hooks.Calls, wantCalls) { + return + } + + loader, err = configload.NewLoader(&configload.Config{ + ModulesDir: modInstallDir, + }) + if err != nil { + t.Fatal(err) + } + + // Make sure the configuration is loadable now. + // (This ensures that correct information is recorded in the manifest.) + config, loadDiags := loader.LoadConfig(".", configs.RootModuleCallForTesting()) + if assertNoDiagnostics(t, tfdiags.Diagnostics{}.Append(loadDiags)) { + return + } + wantTraces := map[string]string{ + "": "in root module", + "child_a": "in child_a module", + "child_a.child_b": "in child_b module", + } + gotTraces := map[string]string{} + + config.DeepEach(func(c *configs.Config) { + path := strings.Join(c.Path, ".") + if c.Module.Variables["v"] == nil { + gotTraces[path] = "" + return + } + varDesc := c.Module.Variables["v"].Description + gotTraces[path] = varDesc + }) + assertResultDeepEqual(t, gotTraces, wantTraces) +} diff --git a/pkg/initwd/module_install.go b/pkg/initwd/module_install.go new file mode 100644 index 00000000000..b510560bc6a --- /dev/null +++ b/pkg/initwd/module_install.go @@ -0,0 +1,944 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package initwd + +import ( + "context" + "errors" + "fmt" + "log" + "os" + "path" + "path/filepath" + "strings" + + "github.com/apparentlymart/go-versions/versions" + version "github.com/hashicorp/go-version" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configload" + "github.com/kubegems/opentofu/pkg/getmodules" + "github.com/kubegems/opentofu/pkg/modsdir" + "github.com/kubegems/opentofu/pkg/registry" + "github.com/kubegems/opentofu/pkg/registry/regsrc" + "github.com/kubegems/opentofu/pkg/registry/response" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +type ModuleInstaller struct { + modsDir string + loader *configload.Loader + reg *registry.Client + + // The keys in moduleVersions are resolved and trimmed registry source + // addresses and the values are the registry response. + registryPackageVersions map[addrs.ModuleRegistryPackage]*response.ModuleVersions + + // The keys in moduleVersionsUrl are the moduleVersion struct below and + // addresses and the values are underlying remote source addresses. + registryPackageSources map[moduleVersion]addrs.ModuleSourceRemote +} + +type moduleVersion struct { + module addrs.ModuleRegistryPackage + version string +} + +func NewModuleInstaller(modsDir string, loader *configload.Loader, reg *registry.Client) *ModuleInstaller { + return &ModuleInstaller{ + modsDir: modsDir, + loader: loader, + reg: reg, + registryPackageVersions: make(map[addrs.ModuleRegistryPackage]*response.ModuleVersions), + registryPackageSources: make(map[moduleVersion]addrs.ModuleSourceRemote), + } +} + +// InstallModules analyses the root module in the given directory and installs +// all of its direct and transitive dependencies into the given modules +// directory, which must already exist. +// +// Since InstallModules makes possibly-time-consuming calls to remote services, +// a hook interface is supported to allow the caller to be notified when +// each module is installed and, for remote modules, when downloading begins. +// LoadConfig guarantees that two hook calls will not happen concurrently but +// it does not guarantee any particular ordering of hook calls. This mechanism +// is for UI feedback only and does not give the caller any control over the +// process. +// +// If modules are already installed in the target directory, they will be +// skipped unless their source address or version have changed or unless +// the upgrade flag is set. +// +// InstallModules never deletes any directory, except in the case where it +// needs to replace a directory that is already present with a newly-extracted +// package. +// +// installErrsOnly installs modules but converts validation errors from +// building the configuration after installation to warnings. This is used by +// commands like `get` or `init -from-module` where the established behavior +// was only to install the requested module, and extra validation can break +// compatibility. +// +// If the returned diagnostics contains errors then the module installation +// may have wholly or partially completed. Modules must be loaded in order +// to find their dependencies, so this function does many of the same checks +// as LoadConfig as a side-effect. +// +// If successful (the returned diagnostics contains no errors) then the +// first return value is the early configuration tree that was constructed by +// the installation process. +func (i *ModuleInstaller) InstallModules(ctx context.Context, rootDir, testsDir string, upgrade, installErrsOnly bool, hooks ModuleInstallHooks, call configs.StaticModuleCall) (*configs.Config, tfdiags.Diagnostics) { + log.Printf("[TRACE] ModuleInstaller: installing child modules for %s into %s", rootDir, i.modsDir) + var diags tfdiags.Diagnostics + + rootMod, mDiags := i.loader.Parser().LoadConfigDirWithTests(rootDir, testsDir, call) + if rootMod == nil { + // We drop the diagnostics here because we only want to report module + // loading errors after checking the core version constraints, which we + // can only do if the module can be at least partially loaded. + return nil, diags + } else if vDiags := rootMod.CheckCoreVersionRequirements(nil, nil); vDiags.HasErrors() { + // If the core version requirements are not met, we drop any other + // diagnostics, as they may reflect language changes from future + // OpenTofu versions. + diags = diags.Append(vDiags) + } else { + diags = diags.Append(mDiags) + } + + manifest, err := modsdir.ReadManifestSnapshotForDir(i.modsDir) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to read modules manifest file", + fmt.Sprintf("Error reading manifest for %s: %s.", i.modsDir, err), + )) + return nil, diags + } + + fetcher := getmodules.NewPackageFetcher() + + if hooks == nil { + // Use our no-op implementation as a placeholder + hooks = ModuleInstallHooksImpl{} + } + + // Create a manifest record for the root module. This will be used if + // there are any relative-pathed modules in the root. + manifest[""] = modsdir.Record{ + Key: "", + Dir: rootDir, + } + walker := i.moduleInstallWalker(ctx, manifest, upgrade, hooks, fetcher) + + cfg, instDiags := i.installDescendentModules(rootMod, manifest, walker, installErrsOnly) + diags = append(diags, instDiags...) + + return cfg, diags +} + +func (i *ModuleInstaller) moduleInstallWalker(ctx context.Context, manifest modsdir.Manifest, upgrade bool, hooks ModuleInstallHooks, fetcher *getmodules.PackageFetcher) configs.ModuleWalker { + return configs.ModuleWalkerFunc( + func(req *configs.ModuleRequest) (*configs.Module, *version.Version, hcl.Diagnostics) { + var diags hcl.Diagnostics + + if req.SourceAddr == nil { + // If the parent module failed to parse the module source + // address, we can't load it here. Return nothing as the parent + // module's diagnostics should explain this. + return nil, nil, diags + } + + if req.Name == "" { + // An empty string for a module instance name breaks our + // manifest map, which uses that to indicate the root module. + // Because we descend into modules which have errors, we need + // to look out for this case, but the config loader's + // diagnostics will report the error later. + return nil, nil, diags + } + + if !hclsyntax.ValidIdentifier(req.Name) { + // A module with an invalid name shouldn't be installed at all. This is + // mostly a concern for remote modules, since we need to be able to convert + // the name to a valid path. + return nil, nil, diags + } + + key := manifest.ModuleKey(req.Path) + instPath := i.packageInstallPath(req.Path) + + log.Printf("[DEBUG] Module installer: begin %s", key) + + // First we'll check if we need to upgrade/replace an existing + // installed module, and delete it out of the way if so. + replace := upgrade + if !replace { + record, recorded := manifest[key] + switch { + case !recorded: + log.Printf("[TRACE] ModuleInstaller: %s is not yet installed", key) + replace = true + case record.SourceAddr != req.SourceAddr.String(): + log.Printf("[TRACE] ModuleInstaller: %s source address has changed from %q to %q", key, record.SourceAddr, req.SourceAddr) + replace = true + case record.Version != nil && !req.VersionConstraint.Required.Check(record.Version): + log.Printf("[TRACE] ModuleInstaller: %s version %s no longer compatible with constraints %s", key, record.Version, req.VersionConstraint.Required) + replace = true + } + } + + // If we _are_ planning to replace this module, then we'll remove + // it now so our installation code below won't conflict with any + // existing remnants. + if replace { + if _, recorded := manifest[key]; recorded { + log.Printf("[TRACE] ModuleInstaller: discarding previous record of %s prior to reinstall", key) + } + delete(manifest, key) + // Deleting a module invalidates all of its descendent modules too. + keyPrefix := key + "." + for subKey := range manifest { + if strings.HasPrefix(subKey, keyPrefix) { + if _, recorded := manifest[subKey]; recorded { + log.Printf("[TRACE] ModuleInstaller: also discarding downstream %s", subKey) + } + delete(manifest, subKey) + } + } + } + + record, recorded := manifest[key] + if !recorded { + // Clean up any stale cache directory that might be present. + // If this is a local (relative) source then the dir will + // not exist, but we'll ignore that. + log.Printf("[TRACE] ModuleInstaller: cleaning directory %s prior to install of %s", instPath, key) + err := os.RemoveAll(instPath) + if err != nil && !os.IsNotExist(err) { + log.Printf("[TRACE] ModuleInstaller: failed to remove %s: %s", key, err) + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Failed to remove local module cache", + Detail: fmt.Sprintf( + "OpenTofu tried to remove %s in order to reinstall this module, but encountered an error: %s", + instPath, err, + ), + }) + return nil, nil, diags + } + } else { + // If this module is already recorded and its root directory + // exists then we will just load what's already there and + // keep our existing record. + info, err := os.Stat(record.Dir) + if err == nil && info.IsDir() { + mod, mDiags := i.loader.Parser().LoadConfigDir(record.Dir, req.Call) + if mod == nil { + // nil indicates an unreadable module, which should never happen, + // so we return the full loader diagnostics here. + diags = diags.Extend(mDiags) + } else if vDiags := mod.CheckCoreVersionRequirements(req.Path, req.SourceAddr); vDiags.HasErrors() { + // If the core version requirements are not met, we drop any other + // diagnostics, as they may reflect language changes from future + // OpenTofu versions. + diags = diags.Extend(vDiags) + } else { + diags = diags.Extend(mDiags) + } + + log.Printf("[TRACE] ModuleInstaller: Module installer: %s %s already installed in %s", key, record.Version, record.Dir) + return mod, record.Version, diags + } + } + + // If we get down here then it's finally time to actually install + // the module. There are some variants to this process depending + // on what type of module source address we have. + + switch addr := req.SourceAddr.(type) { + + case addrs.ModuleSourceLocal: + log.Printf("[TRACE] ModuleInstaller: %s has local path %q", key, addr.String()) + mod, mDiags := i.installLocalModule(req, key, manifest, hooks) + mDiags = maybeImproveLocalInstallError(req, mDiags) + diags = append(diags, mDiags...) + return mod, nil, diags + + case addrs.ModuleSourceRegistry: + log.Printf("[TRACE] ModuleInstaller: %s is a registry module at %s", key, addr.String()) + mod, v, mDiags := i.installRegistryModule(ctx, req, key, instPath, addr, manifest, hooks, fetcher) + diags = append(diags, mDiags...) + return mod, v, diags + + case addrs.ModuleSourceRemote: + log.Printf("[TRACE] ModuleInstaller: %s address %q will be handled by go-getter", key, addr.String()) + mod, mDiags := i.installGoGetterModule(ctx, req, key, instPath, manifest, hooks, fetcher) + diags = append(diags, mDiags...) + return mod, nil, diags + + default: + // Shouldn't get here, because there are no other implementations + // of addrs.ModuleSource. + panic(fmt.Sprintf("unsupported module source address %#v", addr)) + } + }, + ) +} + +func (i *ModuleInstaller) installDescendentModules(rootMod *configs.Module, manifest modsdir.Manifest, installWalker configs.ModuleWalker, installErrsOnly bool) (*configs.Config, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // When attempting to initialize the current directory with a module + // source, some use cases may want to ignore configuration errors from the + // building of the entire configuration structure, but we still need to + // capture installation errors. Because the actual module installation + // happens in the ModuleWalkFunc callback while building the config, we + // need to create a closure to capture the installation diagnostics + // separately. + var instDiags hcl.Diagnostics + walker := installWalker + if installErrsOnly { + walker = configs.ModuleWalkerFunc(func(req *configs.ModuleRequest) (*configs.Module, *version.Version, hcl.Diagnostics) { + mod, version, diags := installWalker.LoadModule(req) + instDiags = instDiags.Extend(diags) + return mod, version, diags + }) + } + + cfg, cDiags := configs.BuildConfig(rootMod, walker) + diags = diags.Append(cDiags) + if installErrsOnly { + // We can't continue if there was an error during installation, but + // return all diagnostics in case there happens to be anything else + // useful when debugging the problem. Any instDiags will be included in + // diags already. + if instDiags.HasErrors() { + return cfg, diags + } + + // If there are any errors here, they must be only from building the + // config structures. We don't want to block initialization at this + // point, so convert these into warnings. Any actual errors in the + // configuration will be raised as soon as the config is loaded again. + // We continue below because writing the manifest is required to finish + // module installation. + diags = tfdiags.OverrideAll(diags, tfdiags.Warning, nil) + } + + err := manifest.WriteSnapshotToDir(i.modsDir) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to update module manifest", + fmt.Sprintf("Unable to write the module manifest file: %s", err), + )) + } + + return cfg, diags +} + +func (i *ModuleInstaller) installLocalModule(req *configs.ModuleRequest, key string, manifest modsdir.Manifest, hooks ModuleInstallHooks) (*configs.Module, hcl.Diagnostics) { + var diags hcl.Diagnostics + + parentKey := manifest.ModuleKey(req.Parent.Path) + parentRecord, recorded := manifest[parentKey] + if !recorded { + // This is indicative of a bug rather than a user-actionable error + panic(fmt.Errorf("missing manifest record for parent module %s", parentKey)) + } + + if len(req.VersionConstraint.Required) != 0 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid version constraint", + Detail: fmt.Sprintf("Cannot apply a version constraint to module %q (at %s:%d) because it has a relative local path.", req.Name, req.CallRange.Filename, req.CallRange.Start.Line), + Subject: req.CallRange.Ptr(), + }) + } + + // For local sources we don't actually need to modify the + // filesystem at all because the parent already wrote + // the files we need, and so we just load up what's already here. + newDir := filepath.Join(parentRecord.Dir, req.SourceAddr.String()) + + log.Printf("[TRACE] ModuleInstaller: %s uses directory from parent: %s", key, newDir) + // it is possible that the local directory is a symlink + newDir, err := filepath.EvalSymlinks(newDir) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unreadable module directory", + Detail: fmt.Sprintf("Unable to evaluate directory symlink: %s", err.Error()), + }) + } + + // Finally we are ready to try actually loading the module. + mod, mDiags := i.loader.Parser().LoadConfigDir(newDir, req.Call) + if mod == nil { + // nil indicates missing or unreadable directory, so we'll + // discard the returned diags and return a more specific + // error message here. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unreadable module directory", + Detail: fmt.Sprintf("The directory %s could not be read for module %q at %s:%d.", newDir, req.Name, req.CallRange.Filename, req.CallRange.Start.Line), + }) + } else if vDiags := mod.CheckCoreVersionRequirements(req.Path, req.SourceAddr); vDiags.HasErrors() { + // If the core version requirements are not met, we drop any other + // diagnostics, as they may reflect language changes from future + // OpenTofu versions. + diags = diags.Extend(vDiags) + } else { + diags = diags.Extend(mDiags) + } + + // Note the local location in our manifest. + manifest[key] = modsdir.Record{ + Key: key, + Dir: newDir, + SourceAddr: req.SourceAddr.String(), + } + log.Printf("[DEBUG] Module installer: %s installed at %s", key, newDir) + hooks.Install(key, nil, newDir) + + return mod, diags +} + +func (i *ModuleInstaller) installRegistryModule(ctx context.Context, req *configs.ModuleRequest, key string, instPath string, addr addrs.ModuleSourceRegistry, manifest modsdir.Manifest, hooks ModuleInstallHooks, fetcher *getmodules.PackageFetcher) (*configs.Module, *version.Version, hcl.Diagnostics) { + var diags hcl.Diagnostics + + hostname := addr.Package.Host + reg := i.reg + var resp *response.ModuleVersions + var exists bool + + // A registry entry isn't _really_ a module package, but we'll pretend it's + // one for the sake of this reporting by just trimming off any source + // directory. + packageAddr := addr.Package + + // Our registry client is still using the legacy model of addresses, so + // we'll shim it here for now. + regsrcAddr := regsrc.ModuleFromRegistryPackageAddr(packageAddr) + + // check if we've already looked up this module from the registry + if resp, exists = i.registryPackageVersions[packageAddr]; exists { + log.Printf("[TRACE] %s using already found available versions of %s at %s", key, addr, hostname) + } else { + var err error + log.Printf("[DEBUG] %s listing available versions of %s at %s", key, addr, hostname) + resp, err = reg.ModuleVersions(ctx, regsrcAddr) + if err != nil { + if registry.IsModuleNotFound(err) { + suggestion := "" + if hostname == addrs.DefaultModuleRegistryHost { + suggestion = "\n\nIf you believe this module is missing from the registry, please submit a issue on the OpenTofu Registry https://github.com/opentofu/registry/issues/new/choose" + } + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Module not found", + Detail: fmt.Sprintf("Module %s (%q from %s:%d) cannot be found in the module registry at %s.%s", addr.Package.ForRegistryProtocol(), req.Name, req.CallRange.Filename, req.CallRange.Start.Line, hostname, suggestion), + Subject: req.CallRange.Ptr(), + }) + } else if errors.Is(err, context.Canceled) { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Module installation was interrupted", + Detail: fmt.Sprintf("Received interrupt signal while retrieving available versions for module %q.", req.Name), + }) + } else { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Error accessing remote module registry", + Detail: fmt.Sprintf("Failed to retrieve available versions for module %q (%s:%d) from %s: %s.", req.Name, req.CallRange.Filename, req.CallRange.Start.Line, hostname, err), + Subject: req.CallRange.Ptr(), + }) + } + return nil, nil, diags + } + i.registryPackageVersions[packageAddr] = resp + } + + // The response might contain information about dependencies to allow us + // to potentially optimize future requests, but we don't currently do that + // and so for now we'll just take the first item which is guaranteed to + // be the address we requested. + if len(resp.Modules) < 1 { + // Should never happen, but since this is a remote service that may + // be implemented by third-parties we will handle it gracefully. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid response from remote module registry", + Detail: fmt.Sprintf("The registry at %s returned an invalid response when OpenTofu requested available versions for module %q (%s:%d).", hostname, req.Name, req.CallRange.Filename, req.CallRange.Start.Line), + Subject: req.CallRange.Ptr(), + }) + return nil, nil, diags + } + + modMeta := resp.Modules[0] + + var latestMatch *version.Version + var latestVersion *version.Version + for _, mv := range modMeta.Versions { + v, err := version.NewVersion(mv.Version) + if err != nil { + // Should never happen if the registry server is compliant with + // the protocol, but we'll warn if not to assist someone who + // might be developing a module registry server. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "Invalid response from remote module registry", + Detail: fmt.Sprintf("The registry at %s returned an invalid version string %q for module %q (%s:%d), which OpenTofu ignored.", hostname, mv.Version, req.Name, req.CallRange.Filename, req.CallRange.Start.Line), + Subject: req.CallRange.Ptr(), + }) + continue + } + + // If we've found a pre-release version then we'll ignore it unless + // it was exactly requested. + // + // The prerelease checking will be handled by a different library for + // 2 reasons. First, this other library automatically includes the + // "prerelease versions must be exactly requested" behaviour that we are + // looking for. Second, this other library is used to handle all version + // constraints for the provider logic and this is the first step to + // making the module and provider version logic match. + if v.Prerelease() != "" { + // At this point all versions published by the module with + // prerelease metadata will be checked. Users may not have even + // requested this prerelease so don't print lots of unnecessary # + // warnings. + acceptableVersions, err := versions.MeetingConstraintsString(req.VersionConstraint.Required.String()) + if err != nil { + log.Printf("[WARN] ModuleInstaller: %s ignoring %s because the version constraints (%s) could not be parsed: %s", key, v, req.VersionConstraint.Required.String(), err.Error()) + continue + } + + // Validate the version is also readable by the other versions + // library. + version, err := versions.ParseVersion(v.String()) + if err != nil { + log.Printf("[WARN] ModuleInstaller: %s ignoring %s because the version (%s) reported by the module could not be parsed: %s", key, v, v.String(), err.Error()) + continue + } + + // Finally, check if the prerelease is acceptable to version. As + // highlighted previously, we go through all of this because the + // apparentlymart/go-versions library handles prerelease constraints + // in the apporach we want to. + if !acceptableVersions.Has(version) { + log.Printf("[TRACE] ModuleInstaller: %s ignoring %s because it is a pre-release and was not requested exactly", key, v) + continue + } + + // If we reach here, it means this prerelease version was exactly + // requested according to the extra constraints of this library. + // We fall through and allow the other library to also validate it + // for consistency. + } + + if latestVersion == nil || v.GreaterThan(latestVersion) { + latestVersion = v + } + + if req.VersionConstraint.Required.Check(v) { + if latestMatch == nil || v.GreaterThan(latestMatch) { + latestMatch = v + } + } + } + + if latestVersion == nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Module has no versions", + Detail: fmt.Sprintf("Module %q (%s:%d) has no versions available on %s.", addr, req.CallRange.Filename, req.CallRange.Start.Line, hostname), + Subject: req.CallRange.Ptr(), + }) + return nil, nil, diags + } + + if latestMatch == nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unresolvable module version constraint", + Detail: fmt.Sprintf("There is no available version of module %q (%s:%d) which matches the given version constraint. The newest available version is %s.", addr, req.CallRange.Filename, req.CallRange.Start.Line, latestVersion), + Subject: req.CallRange.Ptr(), + }) + return nil, nil, diags + } + + // Report up to the caller that we're about to start downloading. + hooks.Download(key, packageAddr.String(), latestMatch) + + // If we manage to get down here then we've found a suitable version to + // install, so we need to ask the registry where we should download it from. + // The response to this is a go-getter-style address string. + + // first check the cache for the download URL + moduleAddr := moduleVersion{module: packageAddr, version: latestMatch.String()} + if _, exists := i.registryPackageSources[moduleAddr]; !exists { + realAddrRaw, err := reg.ModuleLocation(ctx, regsrcAddr, latestMatch.String()) + if err != nil { + log.Printf("[ERROR] %s from %s %s: %s", key, addr, latestMatch, err) + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Error accessing remote module registry", + Detail: fmt.Sprintf("Failed to retrieve a download URL for %s %s from %s: %s", addr, latestMatch, hostname, err), + }) + return nil, nil, diags + } + realAddr, err := addrs.ParseModuleSource(realAddrRaw) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid package location from module registry", + Detail: fmt.Sprintf("Module registry %s returned invalid source location %q for %s %s: %s.", hostname, realAddrRaw, addr, latestMatch, err), + }) + return nil, nil, diags + } + switch realAddr := realAddr.(type) { + // Only a remote source address is allowed here: a registry isn't + // allowed to return a local path (because it doesn't know what + // its being called from) and we also don't allow recursively pointing + // at another registry source for simplicity's sake. + case addrs.ModuleSourceRemote: + i.registryPackageSources[moduleAddr] = realAddr + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid package location from module registry", + Detail: fmt.Sprintf("Module registry %s returned invalid source location %q for %s %s: must be a direct remote package address.", hostname, realAddrRaw, addr, latestMatch), + }) + return nil, nil, diags + } + } + + dlAddr := i.registryPackageSources[moduleAddr] + + log.Printf("[TRACE] ModuleInstaller: %s %s %s is available at %q", key, packageAddr, latestMatch, dlAddr.Package) + + err := fetcher.FetchPackage(ctx, instPath, dlAddr.Package.String()) + if errors.Is(err, context.Canceled) { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Module download was interrupted", + Detail: fmt.Sprintf("Interrupt signal received when downloading module %s.", addr), + }) + return nil, nil, diags + } + if err != nil { + // Errors returned by go-getter have very inconsistent quality as + // end-user error messages, but for now we're accepting that because + // we have no way to recognize any specific errors to improve them + // and masking the error entirely would hide valuable diagnostic + // information from the user. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Failed to download module", + Detail: fmt.Sprintf("Could not download module %q (%s:%d) source code from %q: %s.", req.Name, req.CallRange.Filename, req.CallRange.Start.Line, dlAddr, err), + Subject: req.CallRange.Ptr(), + }) + return nil, nil, diags + } + + log.Printf("[TRACE] ModuleInstaller: %s %q was downloaded to %s", key, dlAddr.Package, instPath) + + // Incorporate any subdir information from the original path into the + // address returned by the registry in order to find the final directory + // of the target module. + finalAddr := dlAddr.FromRegistry(addr) + subDir := filepath.FromSlash(finalAddr.Subdir) + modDir := filepath.Join(instPath, subDir) + + log.Printf("[TRACE] ModuleInstaller: %s should now be at %s", key, modDir) + + // Finally we are ready to try actually loading the module. + mod, mDiags := i.loader.Parser().LoadConfigDir(modDir, req.Call) + if mod == nil { + // nil indicates missing or unreadable directory, so we'll + // discard the returned diags and return a more specific + // error message here. For registry modules this actually + // indicates a bug in the code above, since it's not the + // user's responsibility to create the directory in this case. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unreadable module directory", + Detail: fmt.Sprintf("The directory %s could not be read. This is a bug in OpenTofu and should be reported.", modDir), + }) + } else if vDiags := mod.CheckCoreVersionRequirements(req.Path, req.SourceAddr); vDiags.HasErrors() { + // If the core version requirements are not met, we drop any other + // diagnostics, as they may reflect language changes from future + // OpenTofu versions. + diags = diags.Extend(vDiags) + } else { + diags = diags.Extend(mDiags) + } + + // Note the local location in our manifest. + manifest[key] = modsdir.Record{ + Key: key, + Version: latestMatch, + Dir: modDir, + SourceAddr: req.SourceAddr.String(), + } + log.Printf("[DEBUG] Module installer: %s installed at %s", key, modDir) + hooks.Install(key, latestMatch, modDir) + + return mod, latestMatch, diags +} + +func (i *ModuleInstaller) installGoGetterModule(ctx context.Context, req *configs.ModuleRequest, key string, instPath string, manifest modsdir.Manifest, hooks ModuleInstallHooks, fetcher *getmodules.PackageFetcher) (*configs.Module, hcl.Diagnostics) { + var diags hcl.Diagnostics + + // Report up to the caller that we're about to start downloading. + addr := req.SourceAddr.(addrs.ModuleSourceRemote) + packageAddr := addr.Package + hooks.Download(key, packageAddr.String(), nil) + + if len(req.VersionConstraint.Required) != 0 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid version constraint", + Detail: fmt.Sprintf("Cannot apply a version constraint to module %q (at %s:%d) because it doesn't come from a module registry.", req.Name, req.CallRange.Filename, req.CallRange.Start.Line), + Subject: req.CallRange.Ptr(), + }) + return nil, diags + } + + err := fetcher.FetchPackage(ctx, instPath, packageAddr.String()) + if err != nil { + // go-getter generates a poor error for an invalid relative path, so + // we'll detect that case and generate a better one. + if _, ok := err.(*getmodules.MaybeRelativePathErr); ok { + log.Printf( + "[TRACE] ModuleInstaller: %s looks like a local path but is missing ./ or ../", + req.SourceAddr, + ) + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Module not found", + Detail: fmt.Sprintf( + "The module address %q could not be resolved.\n\n"+ + "If you intended this as a path relative to the current "+ + "module, use \"./%s\" instead. The \"./\" prefix "+ + "indicates that the address is a relative filesystem path.", + req.SourceAddr, req.SourceAddr, + ), + }) + } else { + // Errors returned by go-getter have very inconsistent quality as + // end-user error messages, but for now we're accepting that because + // we have no way to recognize any specific errors to improve them + // and masking the error entirely would hide valuable diagnostic + // information from the user. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Failed to download module", + Detail: fmt.Sprintf("Could not download module %q (%s:%d) source code from %q: %s", req.Name, req.CallRange.Filename, req.CallRange.Start.Line, packageAddr, err), + Subject: req.CallRange.Ptr(), + }) + } + return nil, diags + } + + modDir, err := getmodules.ExpandSubdirGlobs(instPath, addr.Subdir) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Failed to expand subdir globs", + Detail: err.Error(), + }) + return nil, diags + } + + log.Printf("[TRACE] ModuleInstaller: %s %q was downloaded to %s", key, addr, modDir) + + // Finally we are ready to try actually loading the module. + mod, mDiags := i.loader.Parser().LoadConfigDir(modDir, req.Call) + if mod == nil { + // nil indicates missing or unreadable directory, so we'll + // discard the returned diags and return a more specific + // error message here. For go-getter modules this actually + // indicates a bug in the code above, since it's not the + // user's responsibility to create the directory in this case. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unreadable module directory", + Detail: fmt.Sprintf("The directory %s could not be read. This is a bug in OpenTofu and should be reported.", modDir), + }) + } else if vDiags := mod.CheckCoreVersionRequirements(req.Path, req.SourceAddr); vDiags.HasErrors() { + // If the core version requirements are not met, we drop any other + // diagnostics, as they may reflect language changes from future + // OpenTofu versions. + diags = diags.Extend(vDiags) + } else { + diags = diags.Extend(mDiags) + } + + // Note the local location in our manifest. + manifest[key] = modsdir.Record{ + Key: key, + Dir: modDir, + SourceAddr: req.SourceAddr.String(), + } + log.Printf("[DEBUG] Module installer: %s installed at %s", key, modDir) + hooks.Install(key, nil, modDir) + + return mod, diags +} + +func (i *ModuleInstaller) packageInstallPath(modulePath addrs.Module) string { + return filepath.Join(i.modsDir, strings.Join(modulePath, ".")) +} + +// maybeImproveLocalInstallError is a helper function which can recognize +// some specific situations where it can return a more helpful error message +// and thus replace the given errors with those if so. +// +// If this function can't do anything about a particular situation then it +// will just return the given diags verbatim. +// +// This function's behavior is only reasonable for errors returned from the +// ModuleInstaller.installLocalModule function. +func maybeImproveLocalInstallError(req *configs.ModuleRequest, diags hcl.Diagnostics) hcl.Diagnostics { + if !diags.HasErrors() { + return diags + } + + // The main situation we're interested in detecting here is whether the + // current module or any of its ancestors use relative paths that reach + // outside of the "package" established by the nearest non-local ancestor. + // That's never really valid, but unfortunately we historically didn't + // have any explicit checking for it and so now for compatibility in + // situations where things just happened to "work" we treat this as an + // error only in situations where installation would've failed anyway, + // so we can give a better error about it than just a generic + // "directory not found" or whatever. + // + // Since it's never actually valid to relative out of the containing + // package, we just assume that any failed local package install which + // does so was caused by that, because to stop doing it should always + // improve the situation, even if it leads to another error describing + // a different problem. + + // To decide this we need to find the subset of our ancestors that + // belong to the same "package" as our request, along with the closest + // ancestor that defined that package, and then we can work forwards + // to see if any of the local paths "escaped" the package. + type Step struct { + Path addrs.Module + SourceAddr addrs.ModuleSource + } + var packageDefiner Step + var localRefs []Step + localRefs = append(localRefs, Step{ + Path: req.Path, + SourceAddr: req.SourceAddr, + }) + current := req.Parent // a configs.Config where Children isn't populated yet + for { + if current == nil || current.SourceAddr == nil { + // We've reached the root module, in which case we aren't + // in an external "package" at all and so our special case + // can't apply. + return diags + } + if _, ok := current.SourceAddr.(addrs.ModuleSourceLocal); !ok { + // We've found the package definer, then! + packageDefiner = Step{ + Path: current.Path, + SourceAddr: current.SourceAddr, + } + break + } + + localRefs = append(localRefs, Step{ + Path: current.Path, + SourceAddr: current.SourceAddr, + }) + current = current.Parent + } + // Our localRefs list is reversed because we were traversing up the tree, + // so we'll flip it the other way and thus walk "downwards" through it. + for i, j := 0, len(localRefs)-1; i < j; i, j = i+1, j-1 { + localRefs[i], localRefs[j] = localRefs[j], localRefs[i] + } + + // Our method here is to start with a known base path prefix and + // then apply each of the local refs to it in sequence until one of + // them causes us to "lose" the prefix. If that happens, we've found + // an escape to report. This is not an exact science but good enough + // heuristic for choosing a better error message. + const prefix = "*/" // NOTE: this can find a false negative if the user chooses "*" as a directory name, but we consider that unlikely + packageAddr, startPath := splitAddrSubdir(packageDefiner.SourceAddr) + currentPath := path.Join(prefix, startPath) + for _, step := range localRefs { + rel := step.SourceAddr.String() + + nextPath := path.Join(currentPath, rel) + if !strings.HasPrefix(nextPath, prefix) { // ESCAPED! + escapeeAddr := step.Path.String() + + var newDiags hcl.Diagnostics + + // First we'll copy over any non-error diagnostics from the source diags + for _, diag := range diags { + if diag.Severity != hcl.DiagError { + newDiags = newDiags.Append(diag) + } + } + + // ...but we'll replace any errors with this more precise error. + var suggestion string + if strings.HasPrefix(packageAddr, "/") || filepath.VolumeName(packageAddr) != "" { + // It might be somewhat surprising that OpenTofu treats + // absolute filesystem paths as "external" even though it + // treats relative paths as local, so if it seems like that's + // what the user was doing then we'll add an additional note + // about it. + suggestion = "\n\nOpenTofu treats absolute filesystem paths as external modules which establish a new module package. To treat this directory as part of the same package as its caller, use a local path starting with either \"./\" or \"../\"." + } + newDiags = newDiags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Local module path escapes module package", + Detail: fmt.Sprintf( + "The given source directory for %s would be outside of its containing package %q. Local source addresses starting with \"../\" must stay within the same package that the calling module belongs to.%s", + escapeeAddr, packageAddr, suggestion, + ), + }) + + return newDiags + } + + currentPath = nextPath + } + + // If we get down here then we have nothing useful to do, so we'll just + // echo back what we were given. + return diags +} + +func splitAddrSubdir(addr addrs.ModuleSource) (string, string) { + switch addr := addr.(type) { + case addrs.ModuleSourceRegistry: + subDir := addr.Subdir + addr.Subdir = "" + return addr.String(), subDir + case addrs.ModuleSourceRemote: + return addr.Package.String(), addr.Subdir + case nil: + panic("splitAddrSubdir on nil addrs.ModuleSource") + default: + return addr.String(), "" + } +} diff --git a/pkg/initwd/module_install_hooks.go b/pkg/initwd/module_install_hooks.go new file mode 100644 index 00000000000..0d8c8e8bdf2 --- /dev/null +++ b/pkg/initwd/module_install_hooks.go @@ -0,0 +1,41 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package initwd + +import ( + version "github.com/hashicorp/go-version" +) + +// ModuleInstallHooks is an interface used to provide notifications about the +// installation process being orchestrated by InstallModules. +// +// This interface may have new methods added in future, so implementers should +// embed InstallHooksImpl to get no-op implementations of any unimplemented +// methods. +type ModuleInstallHooks interface { + // Download is called for modules that are retrieved from a remote source + // before that download begins, to allow a caller to give feedback + // on progress through a possibly-long sequence of downloads. + Download(moduleAddr, packageAddr string, version *version.Version) + + // Install is called for each module that is installed, even if it did + // not need to be downloaded from a remote source. + Install(moduleAddr string, version *version.Version, localPath string) +} + +// ModuleInstallHooksImpl is a do-nothing implementation of InstallHooks that +// can be embedded in another implementation struct to allow only partial +// implementation of the interface. +type ModuleInstallHooksImpl struct { +} + +func (h ModuleInstallHooksImpl) Download(moduleAddr, packageAddr string, version *version.Version) { +} + +func (h ModuleInstallHooksImpl) Install(moduleAddr string, version *version.Version, localPath string) { +} + +var _ ModuleInstallHooks = ModuleInstallHooksImpl{} diff --git a/pkg/initwd/module_install_test.go b/pkg/initwd/module_install_test.go new file mode 100644 index 00000000000..340ff831764 --- /dev/null +++ b/pkg/initwd/module_install_test.go @@ -0,0 +1,1003 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package initwd + +import ( + "bytes" + "context" + "flag" + "fmt" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/go-test/deep" + "github.com/google/go-cmp/cmp" + version "github.com/hashicorp/go-version" + svchost "github.com/hashicorp/terraform-svchost" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configload" + "github.com/kubegems/opentofu/pkg/copy" + "github.com/kubegems/opentofu/pkg/registry" + "github.com/kubegems/opentofu/pkg/tfdiags" + + _ "github.com/kubegems/opentofu/pkg/logging" +) + +func TestMain(m *testing.M) { + flag.Parse() + os.Exit(m.Run()) +} + +func TestModuleInstaller(t *testing.T) { + fixtureDir := filepath.Clean("testdata/local-modules") + dir, done := tempChdir(t, fixtureDir) + defer done() + + hooks := &testInstallHooks{} + + modulesDir := filepath.Join(dir, ".terraform/modules") + loader, close := configload.NewLoaderForTests(t) + defer close() + inst := NewModuleInstaller(modulesDir, loader, nil) + _, diags := inst.InstallModules(context.Background(), ".", "tests", false, false, hooks, configs.RootModuleCallForTesting()) + assertNoDiagnostics(t, diags) + + wantCalls := []testInstallHookCall{ + { + Name: "Install", + ModuleAddr: "child_a", + PackageAddr: "", + LocalPath: "child_a", + }, + { + Name: "Install", + ModuleAddr: "child_a.child_b", + PackageAddr: "", + LocalPath: filepath.Join("child_a", "child_b"), + }, + } + + if assertResultDeepEqual(t, hooks.Calls, wantCalls) { + return + } + + loader, err := configload.NewLoader(&configload.Config{ + ModulesDir: modulesDir, + }) + if err != nil { + t.Fatal(err) + } + + // Make sure the configuration is loadable now. + // (This ensures that correct information is recorded in the manifest.) + config, loadDiags := loader.LoadConfig(".", configs.RootModuleCallForTesting()) + assertNoDiagnostics(t, tfdiags.Diagnostics{}.Append(loadDiags)) + + wantTraces := map[string]string{ + "": "in root module", + "child_a": "in child_a module", + "child_a.child_b": "in child_b module", + } + gotTraces := map[string]string{} + config.DeepEach(func(c *configs.Config) { + path := strings.Join(c.Path, ".") + if c.Module.Variables["v"] == nil { + gotTraces[path] = "" + return + } + varDesc := c.Module.Variables["v"].Description + gotTraces[path] = varDesc + }) + assertResultDeepEqual(t, gotTraces, wantTraces) +} + +func TestModuleInstaller_error(t *testing.T) { + fixtureDir := filepath.Clean("testdata/local-module-error") + dir, done := tempChdir(t, fixtureDir) + defer done() + + hooks := &testInstallHooks{} + + modulesDir := filepath.Join(dir, ".terraform/modules") + + loader, close := configload.NewLoaderForTests(t) + defer close() + inst := NewModuleInstaller(modulesDir, loader, nil) + _, diags := inst.InstallModules(context.Background(), ".", "tests", false, false, hooks, configs.RootModuleCallForTesting()) + + if !diags.HasErrors() { + t.Fatal("expected error") + } else { + assertDiagnosticSummary(t, diags, "Invalid module source address") + } +} + +func TestModuleInstaller_emptyModuleName(t *testing.T) { + fixtureDir := filepath.Clean("testdata/empty-module-name") + dir, done := tempChdir(t, fixtureDir) + defer done() + + hooks := &testInstallHooks{} + + modulesDir := filepath.Join(dir, ".terraform/modules") + + loader, close := configload.NewLoaderForTests(t) + defer close() + inst := NewModuleInstaller(modulesDir, loader, nil) + _, diags := inst.InstallModules(context.Background(), ".", "tests", false, false, hooks, configs.RootModuleCallForTesting()) + + if !diags.HasErrors() { + t.Fatal("expected error") + } else { + assertDiagnosticSummary(t, diags, "Invalid module instance name") + } +} + +func TestModuleInstaller_invalidModuleName(t *testing.T) { + fixtureDir := filepath.Clean("testdata/invalid-module-name") + dir, done := tempChdir(t, fixtureDir) + defer done() + + hooks := &testInstallHooks{} + + modulesDir := filepath.Join(dir, ".terraform/modules") + + loader, close := configload.NewLoaderForTests(t) + defer close() + inst := NewModuleInstaller(modulesDir, loader, registry.NewClient(nil, nil)) + _, diags := inst.InstallModules(context.Background(), dir, "tests", false, false, hooks, configs.RootModuleCallForTesting()) + if !diags.HasErrors() { + t.Fatal("expected error") + } else { + assertDiagnosticSummary(t, diags, "Invalid module instance name") + } +} + +func TestModuleInstaller_packageEscapeError(t *testing.T) { + fixtureDir := filepath.Clean("testdata/load-module-package-escape") + dir, done := tempChdir(t, fixtureDir) + defer done() + + // For this particular test we need an absolute path in the root module + // that must actually resolve to our temporary directory in "dir", so + // we need to do a little rewriting. We replace the arbitrary placeholder + // %%BASE%% with the temporary directory path. + { + rootFilename := filepath.Join(dir, "package-escape.tf") + template, err := os.ReadFile(rootFilename) + if err != nil { + t.Fatal(err) + } + final := bytes.ReplaceAll(template, []byte("%%BASE%%"), []byte(filepath.ToSlash(dir))) + err = os.WriteFile(rootFilename, final, 0644) + if err != nil { + t.Fatal(err) + } + } + + hooks := &testInstallHooks{} + + modulesDir := filepath.Join(dir, ".terraform/modules") + + loader, close := configload.NewLoaderForTests(t) + defer close() + inst := NewModuleInstaller(modulesDir, loader, nil) + _, diags := inst.InstallModules(context.Background(), ".", "tests", false, false, hooks, configs.RootModuleCallForTesting()) + + if !diags.HasErrors() { + t.Fatal("expected error") + } else { + assertDiagnosticSummary(t, diags, "Local module path escapes module package") + } +} + +func TestModuleInstaller_explicitPackageBoundary(t *testing.T) { + fixtureDir := filepath.Clean("testdata/load-module-package-prefix") + dir, done := tempChdir(t, fixtureDir) + defer done() + + // For this particular test we need an absolute path in the root module + // that must actually resolve to our temporary directory in "dir", so + // we need to do a little rewriting. We replace the arbitrary placeholder + // %%BASE%% with the temporary directory path. + { + rootFilename := filepath.Join(dir, "package-prefix.tf") + template, err := os.ReadFile(rootFilename) + if err != nil { + t.Fatal(err) + } + final := bytes.ReplaceAll(template, []byte("%%BASE%%"), []byte(filepath.ToSlash(dir))) + err = os.WriteFile(rootFilename, final, 0644) + if err != nil { + t.Fatal(err) + } + } + + hooks := &testInstallHooks{} + + modulesDir := filepath.Join(dir, ".terraform/modules") + + loader, close := configload.NewLoaderForTests(t) + defer close() + inst := NewModuleInstaller(modulesDir, loader, nil) + _, diags := inst.InstallModules(context.Background(), ".", "tests", false, false, hooks, configs.RootModuleCallForTesting()) + + if diags.HasErrors() { + t.Fatalf("unexpected errors\n%s", diags.Err().Error()) + } +} + +func TestModuleInstaller_ExactMatchPrerelease(t *testing.T) { + if os.Getenv("TF_ACC") == "" { + t.Skip("this test accesses registry.opentofu.org and github.com; set TF_ACC=1 to run it") + } + + fixtureDir := filepath.Clean("testdata/prerelease-version-constraint-match") + dir, done := tempChdir(t, fixtureDir) + defer done() + + hooks := &testInstallHooks{} + + modulesDir := filepath.Join(dir, ".terraform/modules") + + loader, close := configload.NewLoaderForTests(t) + defer close() + inst := NewModuleInstaller(modulesDir, loader, registry.NewClient(nil, nil)) + cfg, diags := inst.InstallModules(context.Background(), ".", "tests", false, false, hooks, configs.RootModuleCallForTesting()) + + if diags.HasErrors() { + t.Fatalf("found unexpected errors: %s", diags.Err()) + } + + if !cfg.Children["acctest_exact"].Version.Equal(version.Must(version.NewVersion("v0.0.3-alpha.1"))) { + t.Fatalf("expected version %s but found version %s", "v0.0.3-alpha.1", cfg.Version.String()) + } +} + +func TestModuleInstaller_PartialMatchPrerelease(t *testing.T) { + if os.Getenv("TF_ACC") == "" { + t.Skip("this test accesses registry.opentofu.org and github.com; set TF_ACC=1 to run it") + } + + fixtureDir := filepath.Clean("testdata/prerelease-version-constraint") + dir, done := tempChdir(t, fixtureDir) + defer done() + + hooks := &testInstallHooks{} + + modulesDir := filepath.Join(dir, ".terraform/modules") + + loader, close := configload.NewLoaderForTests(t) + defer close() + inst := NewModuleInstaller(modulesDir, loader, registry.NewClient(nil, nil)) + cfg, diags := inst.InstallModules(context.Background(), ".", "tests", false, false, hooks, configs.RootModuleCallForTesting()) + + if diags.HasErrors() { + t.Fatalf("found unexpected errors: %s", diags.Err()) + } + + if !cfg.Children["acctest_partial"].Version.Equal(version.Must(version.NewVersion("v0.0.2"))) { + t.Fatalf("expected version %s but found version %s", "v0.0.2", cfg.Version.String()) + } +} + +func TestModuleInstaller_invalid_version_constraint_error(t *testing.T) { + fixtureDir := filepath.Clean("testdata/invalid-version-constraint") + dir, done := tempChdir(t, fixtureDir) + defer done() + + hooks := &testInstallHooks{} + + modulesDir := filepath.Join(dir, ".terraform/modules") + + loader, close := configload.NewLoaderForTests(t) + defer close() + inst := NewModuleInstaller(modulesDir, loader, nil) + _, diags := inst.InstallModules(context.Background(), ".", "tests", false, false, hooks, configs.RootModuleCallForTesting()) + + if !diags.HasErrors() { + t.Fatal("expected error") + } else { + // We use the presence of the "version" argument as a heuristic for + // user intent to use a registry module, and so we intentionally catch + // this as an invalid registry module address rather than an invalid + // version constraint, so we can surface the specific address parsing + // error instead of a generic version constraint error. + assertDiagnosticSummary(t, diags, "Invalid registry module source address") + } +} + +func TestModuleInstaller_invalidVersionConstraintGetter(t *testing.T) { + fixtureDir := filepath.Clean("testdata/invalid-version-constraint") + dir, done := tempChdir(t, fixtureDir) + defer done() + + hooks := &testInstallHooks{} + + modulesDir := filepath.Join(dir, ".terraform/modules") + + loader, close := configload.NewLoaderForTests(t) + defer close() + inst := NewModuleInstaller(modulesDir, loader, nil) + _, diags := inst.InstallModules(context.Background(), ".", "tests", false, false, hooks, configs.RootModuleCallForTesting()) + + if !diags.HasErrors() { + t.Fatal("expected error") + } else { + // We use the presence of the "version" argument as a heuristic for + // user intent to use a registry module, and so we intentionally catch + // this as an invalid registry module address rather than an invalid + // version constraint, so we can surface the specific address parsing + // error instead of a generic version constraint error. + assertDiagnosticSummary(t, diags, "Invalid registry module source address") + } +} + +func TestModuleInstaller_invalidVersionConstraintLocal(t *testing.T) { + fixtureDir := filepath.Clean("testdata/invalid-version-constraint-local") + dir, done := tempChdir(t, fixtureDir) + defer done() + + hooks := &testInstallHooks{} + + modulesDir := filepath.Join(dir, ".terraform/modules") + + loader, close := configload.NewLoaderForTests(t) + defer close() + inst := NewModuleInstaller(modulesDir, loader, nil) + _, diags := inst.InstallModules(context.Background(), ".", "tests", false, false, hooks, configs.RootModuleCallForTesting()) + + if !diags.HasErrors() { + t.Fatal("expected error") + } else { + // We use the presence of the "version" argument as a heuristic for + // user intent to use a registry module, and so we intentionally catch + // this as an invalid registry module address rather than an invalid + // version constraint, so we can surface the specific address parsing + // error instead of a generic version constraint error. + assertDiagnosticSummary(t, diags, "Invalid registry module source address") + } +} + +func TestModuleInstaller_symlink(t *testing.T) { + fixtureDir := filepath.Clean("testdata/local-module-symlink") + dir, done := tempChdir(t, fixtureDir) + defer done() + + hooks := &testInstallHooks{} + + modulesDir := filepath.Join(dir, ".terraform/modules") + + loader, close := configload.NewLoaderForTests(t) + defer close() + inst := NewModuleInstaller(modulesDir, loader, nil) + _, diags := inst.InstallModules(context.Background(), ".", "tests", false, false, hooks, configs.RootModuleCallForTesting()) + assertNoDiagnostics(t, diags) + + wantCalls := []testInstallHookCall{ + { + Name: "Install", + ModuleAddr: "child_a", + PackageAddr: "", + LocalPath: "child_a", + }, + { + Name: "Install", + ModuleAddr: "child_a.child_b", + PackageAddr: "", + LocalPath: filepath.Join("child_a", "child_b"), + }, + } + + if assertResultDeepEqual(t, hooks.Calls, wantCalls) { + return + } + + loader, err := configload.NewLoader(&configload.Config{ + ModulesDir: modulesDir, + }) + if err != nil { + t.Fatal(err) + } + + // Make sure the configuration is loadable now. + // (This ensures that correct information is recorded in the manifest.) + config, loadDiags := loader.LoadConfig(".", configs.RootModuleCallForTesting()) + assertNoDiagnostics(t, tfdiags.Diagnostics{}.Append(loadDiags)) + + wantTraces := map[string]string{ + "": "in root module", + "child_a": "in child_a module", + "child_a.child_b": "in child_b module", + } + gotTraces := map[string]string{} + config.DeepEach(func(c *configs.Config) { + path := strings.Join(c.Path, ".") + if c.Module.Variables["v"] == nil { + gotTraces[path] = "" + return + } + varDesc := c.Module.Variables["v"].Description + gotTraces[path] = varDesc + }) + assertResultDeepEqual(t, gotTraces, wantTraces) +} + +func TestLoaderInstallModules_registry(t *testing.T) { + if os.Getenv("TF_ACC") == "" { + t.Skip("this test accesses registry.opentofu.org and github.com; set TF_ACC=1 to run it") + } + + fixtureDir := filepath.Clean("testdata/registry-modules") + tmpDir, done := tempChdir(t, fixtureDir) + // the module installer runs filepath.EvalSymlinks() on the destination + // directory before copying files, and the resultant directory is what is + // returned by the install hooks. Without this, tests could fail on machines + // where the default temp dir was a symlink. + dir, err := filepath.EvalSymlinks(tmpDir) + if err != nil { + t.Error(err) + } + + defer done() + + hooks := &testInstallHooks{} + modulesDir := filepath.Join(dir, ".terraform/modules") + + loader, close := configload.NewLoaderForTests(t) + defer close() + inst := NewModuleInstaller(modulesDir, loader, registry.NewClient(nil, nil)) + _, diags := inst.InstallModules(context.Background(), dir, "tests", false, false, hooks, configs.RootModuleCallForTesting()) + assertNoDiagnostics(t, diags) + + v := version.Must(version.NewVersion("0.0.1")) + + wantCalls := []testInstallHookCall{ + // the configuration builder visits each level of calls in lexicographical + // order by name, so the following list is kept in the same order. + + // acctest_child_a accesses //modules/child_a directly + { + Name: "Download", + ModuleAddr: "acctest_child_a", + PackageAddr: "registry.opentofu.org/hashicorp/module-installer-acctest/aws", // intentionally excludes the subdir because we're downloading the whole package here + Version: v, + }, + { + Name: "Install", + ModuleAddr: "acctest_child_a", + Version: v, + // NOTE: This local path and the other paths derived from it below + // can vary depending on how the registry is implemented. At the + // time of writing this test, registry.opentofu.org returns + // git repository source addresses and so this path refers to the + // root of the git clone, but historically the registry referred + // to GitHub-provided tar archives which meant that there was an + // extra level of subdirectory here for the typical directory + // nesting in tar archives, which would've been reflected as + // an extra segment on this path. If this test fails due to an + // additional path segment in future, then a change to the upstream + // registry might be the root cause. + LocalPath: filepath.Join(dir, ".terraform/modules/acctest_child_a/modules/child_a"), + }, + + // acctest_child_a.child_b + // (no download because it's a relative path inside acctest_child_a) + { + Name: "Install", + ModuleAddr: "acctest_child_a.child_b", + LocalPath: filepath.Join(dir, ".terraform/modules/acctest_child_a/modules/child_b"), + }, + + // acctest_child_b accesses //modules/child_b directly + { + Name: "Download", + ModuleAddr: "acctest_child_b", + PackageAddr: "registry.opentofu.org/hashicorp/module-installer-acctest/aws", // intentionally excludes the subdir because we're downloading the whole package here + Version: v, + }, + { + Name: "Install", + ModuleAddr: "acctest_child_b", + Version: v, + LocalPath: filepath.Join(dir, ".terraform/modules/acctest_child_b/modules/child_b"), + }, + + // acctest_root + { + Name: "Download", + ModuleAddr: "acctest_root", + PackageAddr: "registry.opentofu.org/hashicorp/module-installer-acctest/aws", + Version: v, + }, + { + Name: "Install", + ModuleAddr: "acctest_root", + Version: v, + LocalPath: filepath.Join(dir, ".terraform/modules/acctest_root"), + }, + + // acctest_root.child_a + // (no download because it's a relative path inside acctest_root) + { + Name: "Install", + ModuleAddr: "acctest_root.child_a", + LocalPath: filepath.Join(dir, ".terraform/modules/acctest_root/modules/child_a"), + }, + + // acctest_root.child_a.child_b + // (no download because it's a relative path inside acctest_root, via acctest_root.child_a) + { + Name: "Install", + ModuleAddr: "acctest_root.child_a.child_b", + LocalPath: filepath.Join(dir, ".terraform/modules/acctest_root/modules/child_b"), + }, + } + + if diff := cmp.Diff(wantCalls, hooks.Calls); diff != "" { + t.Fatalf("wrong installer calls\n%s", diff) + } + + //check that the registry reponses were cached + packageAddr := addrs.ModuleRegistryPackage{ + Host: svchost.Hostname("registry.opentofu.org"), + Namespace: "hashicorp", + Name: "module-installer-acctest", + TargetSystem: "aws", + } + if _, ok := inst.registryPackageVersions[packageAddr]; !ok { + t.Errorf("module versions cache was not populated\ngot: %s\nwant: key hashicorp/module-installer-acctest/aws", spew.Sdump(inst.registryPackageVersions)) + } + if _, ok := inst.registryPackageSources[moduleVersion{module: packageAddr, version: "0.0.1"}]; !ok { + t.Errorf("module download url cache was not populated\ngot: %s", spew.Sdump(inst.registryPackageSources)) + } + + loader, err = configload.NewLoader(&configload.Config{ + ModulesDir: modulesDir, + }) + if err != nil { + t.Fatal(err) + } + + // Make sure the configuration is loadable now. + // (This ensures that correct information is recorded in the manifest.) + config, loadDiags := loader.LoadConfig(".", configs.RootModuleCallForTesting()) + assertNoDiagnostics(t, tfdiags.Diagnostics{}.Append(loadDiags)) + + wantTraces := map[string]string{ + "": "in local caller for registry-modules", + "acctest_root": "in root module", + "acctest_root.child_a": "in child_a module", + "acctest_root.child_a.child_b": "in child_b module", + "acctest_child_a": "in child_a module", + "acctest_child_a.child_b": "in child_b module", + "acctest_child_b": "in child_b module", + } + gotTraces := map[string]string{} + config.DeepEach(func(c *configs.Config) { + path := strings.Join(c.Path, ".") + if c.Module.Variables["v"] == nil { + gotTraces[path] = "" + return + } + varDesc := c.Module.Variables["v"].Description + gotTraces[path] = varDesc + }) + assertResultDeepEqual(t, gotTraces, wantTraces) + +} + +func TestLoaderInstallModules_goGetter(t *testing.T) { + if os.Getenv("TF_ACC") == "" { + t.Skip("this test accesses github.com; set TF_ACC=1 to run it") + } + + fixtureDir := filepath.Clean("testdata/go-getter-modules") + tmpDir, done := tempChdir(t, fixtureDir) + // the module installer runs filepath.EvalSymlinks() on the destination + // directory before copying files, and the resultant directory is what is + // returned by the install hooks. Without this, tests could fail on machines + // where the default temp dir was a symlink. + dir, err := filepath.EvalSymlinks(tmpDir) + if err != nil { + t.Error(err) + } + defer done() + + hooks := &testInstallHooks{} + modulesDir := filepath.Join(dir, ".terraform/modules") + + loader, close := configload.NewLoaderForTests(t) + defer close() + inst := NewModuleInstaller(modulesDir, loader, registry.NewClient(nil, nil)) + _, diags := inst.InstallModules(context.Background(), dir, "tests", false, false, hooks, configs.RootModuleCallForTesting()) + assertNoDiagnostics(t, diags) + + wantCalls := []testInstallHookCall{ + // the configuration builder visits each level of calls in lexicographical + // order by name, so the following list is kept in the same order. + + // acctest_child_a accesses //modules/child_a directly + { + Name: "Download", + ModuleAddr: "acctest_child_a", + PackageAddr: "git::https://github.com/hashicorp/terraform-aws-module-installer-acctest.git?ref=v0.0.1", // intentionally excludes the subdir because we're downloading the whole repo here + }, + { + Name: "Install", + ModuleAddr: "acctest_child_a", + LocalPath: filepath.Join(dir, ".terraform/modules/acctest_child_a/modules/child_a"), + }, + + // acctest_child_a.child_b + // (no download because it's a relative path inside acctest_child_a) + { + Name: "Install", + ModuleAddr: "acctest_child_a.child_b", + LocalPath: filepath.Join(dir, ".terraform/modules/acctest_child_a/modules/child_b"), + }, + + // acctest_child_b accesses //modules/child_b directly + { + Name: "Download", + ModuleAddr: "acctest_child_b", + PackageAddr: "git::https://github.com/hashicorp/terraform-aws-module-installer-acctest.git?ref=v0.0.1", // intentionally excludes the subdir because we're downloading the whole package here + }, + { + Name: "Install", + ModuleAddr: "acctest_child_b", + LocalPath: filepath.Join(dir, ".terraform/modules/acctest_child_b/modules/child_b"), + }, + + // acctest_root + { + Name: "Download", + ModuleAddr: "acctest_root", + PackageAddr: "git::https://github.com/hashicorp/terraform-aws-module-installer-acctest.git?ref=v0.0.1", + }, + { + Name: "Install", + ModuleAddr: "acctest_root", + LocalPath: filepath.Join(dir, ".terraform/modules/acctest_root"), + }, + + // acctest_root.child_a + // (no download because it's a relative path inside acctest_root) + { + Name: "Install", + ModuleAddr: "acctest_root.child_a", + LocalPath: filepath.Join(dir, ".terraform/modules/acctest_root/modules/child_a"), + }, + + // acctest_root.child_a.child_b + // (no download because it's a relative path inside acctest_root, via acctest_root.child_a) + { + Name: "Install", + ModuleAddr: "acctest_root.child_a.child_b", + LocalPath: filepath.Join(dir, ".terraform/modules/acctest_root/modules/child_b"), + }, + } + + if diff := cmp.Diff(wantCalls, hooks.Calls); diff != "" { + t.Fatalf("wrong installer calls\n%s", diff) + } + + loader, err = configload.NewLoader(&configload.Config{ + ModulesDir: modulesDir, + }) + if err != nil { + t.Fatal(err) + } + + // Make sure the configuration is loadable now. + // (This ensures that correct information is recorded in the manifest.) + config, loadDiags := loader.LoadConfig(".", configs.RootModuleCallForTesting()) + assertNoDiagnostics(t, tfdiags.Diagnostics{}.Append(loadDiags)) + + wantTraces := map[string]string{ + "": "in local caller for go-getter-modules", + "acctest_root": "in root module", + "acctest_root.child_a": "in child_a module", + "acctest_root.child_a.child_b": "in child_b module", + "acctest_child_a": "in child_a module", + "acctest_child_a.child_b": "in child_b module", + "acctest_child_b": "in child_b module", + } + gotTraces := map[string]string{} + config.DeepEach(func(c *configs.Config) { + path := strings.Join(c.Path, ".") + if c.Module.Variables["v"] == nil { + gotTraces[path] = "" + return + } + varDesc := c.Module.Variables["v"].Description + gotTraces[path] = varDesc + }) + assertResultDeepEqual(t, gotTraces, wantTraces) + +} + +func TestModuleInstaller_fromTests(t *testing.T) { + fixtureDir := filepath.Clean("testdata/local-module-from-test") + dir, done := tempChdir(t, fixtureDir) + defer done() + + hooks := &testInstallHooks{} + + modulesDir := filepath.Join(dir, ".terraform/modules") + loader, close := configload.NewLoaderForTests(t) + defer close() + inst := NewModuleInstaller(modulesDir, loader, nil) + _, diags := inst.InstallModules(context.Background(), ".", "tests", false, false, hooks, configs.RootModuleCallForTesting()) + assertNoDiagnostics(t, diags) + + wantCalls := []testInstallHookCall{ + { + Name: "Install", + ModuleAddr: "test.tests.main.setup", + PackageAddr: "", + LocalPath: "setup", + }, + } + + if assertResultDeepEqual(t, hooks.Calls, wantCalls) { + return + } + + loader, err := configload.NewLoader(&configload.Config{ + ModulesDir: modulesDir, + }) + if err != nil { + t.Fatal(err) + } + + // Make sure the configuration is loadable now. + // (This ensures that correct information is recorded in the manifest.) + config, loadDiags := loader.LoadConfigWithTests(".", "tests", configs.RootModuleCallForTesting()) + assertNoDiagnostics(t, tfdiags.Diagnostics{}.Append(loadDiags)) + + if config.Module.Tests[filepath.Join("tests", "main.tftest.hcl")].Runs[0].ConfigUnderTest == nil { + t.Fatalf("should have loaded config into the relevant run block but did not") + } +} + +func TestLoadInstallModules_registryFromTest(t *testing.T) { + if os.Getenv("TF_ACC") == "" { + t.Skip("this test accesses registry.opentofu.org and github.com; set TF_ACC=1 to run it") + } + + fixtureDir := filepath.Clean("testdata/registry-module-from-test") + tmpDir, done := tempChdir(t, fixtureDir) + // the module installer runs filepath.EvalSymlinks() on the destination + // directory before copying files, and the resultant directory is what is + // returned by the install hooks. Without this, tests could fail on machines + // where the default temp dir was a symlink. + dir, err := filepath.EvalSymlinks(tmpDir) + if err != nil { + t.Error(err) + } + + defer done() + + hooks := &testInstallHooks{} + modulesDir := filepath.Join(dir, ".terraform/modules") + + loader, close := configload.NewLoaderForTests(t) + defer close() + inst := NewModuleInstaller(modulesDir, loader, registry.NewClient(nil, nil)) + _, diags := inst.InstallModules(context.Background(), dir, "tests", false, false, hooks, configs.RootModuleCallForTesting()) + assertNoDiagnostics(t, diags) + + v := version.Must(version.NewVersion("0.0.1")) + wantCalls := []testInstallHookCall{ + // the configuration builder visits each level of calls in lexicographical + // order by name, so the following list is kept in the same order. + + // setup access acctest directly. + { + Name: "Download", + ModuleAddr: "test.main.setup", + PackageAddr: "registry.opentofu.org/hashicorp/module-installer-acctest/aws", // intentionally excludes the subdir because we're downloading the whole package here + Version: v, + }, + { + Name: "Install", + ModuleAddr: "test.main.setup", + Version: v, + // NOTE: This local path and the other paths derived from it below + // can vary depending on how the registry is implemented. At the + // time of writing this test, registry.opentofu.org returns + // git repository source addresses and so this path refers to the + // root of the git clone, but historically the registry referred + // to GitHub-provided tar archives which meant that there was an + // extra level of subdirectory here for the typical directory + // nesting in tar archives, which would've been reflected as + // an extra segment on this path. If this test fails due to an + // additional path segment in future, then a change to the upstream + // registry might be the root cause. + LocalPath: filepath.Join(dir, ".terraform/modules/test.main.setup"), + }, + + // main.tftest.hcl.setup.child_a + // (no download because it's a relative path inside acctest_child_a) + { + Name: "Install", + ModuleAddr: "test.main.setup.child_a", + LocalPath: filepath.Join(dir, ".terraform/modules/test.main.setup/modules/child_a"), + }, + + // main.tftest.hcl.setup.child_a.child_b + // (no download because it's a relative path inside main.tftest.hcl.setup.child_a) + { + Name: "Install", + ModuleAddr: "test.main.setup.child_a.child_b", + LocalPath: filepath.Join(dir, ".terraform/modules/test.main.setup/modules/child_b"), + }, + } + + if diff := cmp.Diff(wantCalls, hooks.Calls); diff != "" { + t.Fatalf("wrong installer calls\n%s", diff) + } + + //check that the registry reponses were cached + packageAddr := addrs.ModuleRegistryPackage{ + Host: svchost.Hostname("registry.opentofu.org"), + Namespace: "hashicorp", + Name: "module-installer-acctest", + TargetSystem: "aws", + } + if _, ok := inst.registryPackageVersions[packageAddr]; !ok { + t.Errorf("module versions cache was not populated\ngot: %s\nwant: key hashicorp/module-installer-acctest/aws", spew.Sdump(inst.registryPackageVersions)) + } + if _, ok := inst.registryPackageSources[moduleVersion{module: packageAddr, version: "0.0.1"}]; !ok { + t.Errorf("module download url cache was not populated\ngot: %s", spew.Sdump(inst.registryPackageSources)) + } + + loader, err = configload.NewLoader(&configload.Config{ + ModulesDir: modulesDir, + }) + if err != nil { + t.Fatal(err) + } + + // Make sure the configuration is loadable now. + // (This ensures that correct information is recorded in the manifest.) + config, loadDiags := loader.LoadConfigWithTests(".", "tests", configs.RootModuleCallForTesting()) + assertNoDiagnostics(t, tfdiags.Diagnostics{}.Append(loadDiags)) + + if config.Module.Tests["main.tftest.hcl"].Runs[0].ConfigUnderTest == nil { + t.Fatalf("should have loaded config into the relevant run block but did not") + } +} + +type testInstallHooks struct { + Calls []testInstallHookCall +} + +type testInstallHookCall struct { + Name string + ModuleAddr string + PackageAddr string + Version *version.Version + LocalPath string +} + +func (h *testInstallHooks) Download(moduleAddr, packageAddr string, version *version.Version) { + h.Calls = append(h.Calls, testInstallHookCall{ + Name: "Download", + ModuleAddr: moduleAddr, + PackageAddr: packageAddr, + Version: version, + }) +} + +func (h *testInstallHooks) Install(moduleAddr string, version *version.Version, localPath string) { + h.Calls = append(h.Calls, testInstallHookCall{ + Name: "Install", + ModuleAddr: moduleAddr, + Version: version, + LocalPath: localPath, + }) +} + +// tempChdir copies the contents of the given directory to a temporary +// directory and changes the test process's current working directory to +// point to that directory. Also returned is a function that should be +// called at the end of the test (e.g. via "defer") to restore the previous +// working directory. +// +// Tests using this helper cannot safely be run in parallel with other tests. +func tempChdir(t *testing.T, sourceDir string) (string, func()) { + t.Helper() + + tmpDir, err := os.MkdirTemp("", "terraform-configload") + if err != nil { + t.Fatalf("failed to create temporary directory: %s", err) + return "", nil + } + + if err := copy.CopyDir(tmpDir, sourceDir); err != nil { + t.Fatalf("failed to copy fixture to temporary directory: %s", err) + return "", nil + } + + oldDir, err := os.Getwd() + if err != nil { + t.Fatalf("failed to determine current working directory: %s", err) + return "", nil + } + + err = os.Chdir(tmpDir) + if err != nil { + t.Fatalf("failed to switch to temp dir %s: %s", tmpDir, err) + return "", nil + } + + // Most of the tests need this, so we'll make it just in case. + os.MkdirAll(filepath.Join(tmpDir, ".terraform/modules"), os.ModePerm) + + t.Logf("tempChdir switched to %s after copying from %s", tmpDir, sourceDir) + + return tmpDir, func() { + err := os.Chdir(oldDir) + if err != nil { + panic(fmt.Errorf("failed to restore previous working directory %s: %w", oldDir, err)) + } + + if os.Getenv("TF_CONFIGLOAD_TEST_KEEP_TMP") == "" { + os.RemoveAll(tmpDir) + } + } +} + +func assertNoDiagnostics(t *testing.T, diags tfdiags.Diagnostics) bool { + t.Helper() + return assertDiagnosticCount(t, diags, 0) +} + +func assertDiagnosticCount(t *testing.T, diags tfdiags.Diagnostics, want int) bool { + t.Helper() + if len(diags) != want { + t.Errorf("wrong number of diagnostics %d; want %d", len(diags), want) + for _, diag := range diags { + t.Logf("- %#v", diag) + } + return true + } + return false +} + +func assertDiagnosticSummary(t *testing.T, diags tfdiags.Diagnostics, want string) bool { + t.Helper() + + for _, diag := range diags { + if diag.Description().Summary == want { + return false + } + } + + t.Errorf("missing diagnostic summary %q", want) + for _, diag := range diags { + t.Logf("- %#v", diag) + } + return true +} + +func assertResultDeepEqual(t *testing.T, got, want interface{}) bool { + t.Helper() + if diff := deep.Equal(got, want); diff != nil { + for _, problem := range diff { + t.Errorf("%s", problem) + } + return true + } + return false +} diff --git a/pkg/initwd/testdata/already-installed/root.tf b/pkg/initwd/testdata/already-installed/root.tf new file mode 100644 index 00000000000..8a4473942da --- /dev/null +++ b/pkg/initwd/testdata/already-installed/root.tf @@ -0,0 +1,10 @@ + +module "child_a" { + source = "example.com/foo/bar_a/baz" + version = ">= 1.0.0" +} + +module "child_b" { + source = "example.com/foo/bar_b/baz" + version = ">= 1.0.0" +} diff --git a/pkg/initwd/testdata/empty-module-name/child/main.tf b/pkg/initwd/testdata/empty-module-name/child/main.tf new file mode 100644 index 00000000000..6187fa659d2 --- /dev/null +++ b/pkg/initwd/testdata/empty-module-name/child/main.tf @@ -0,0 +1,3 @@ +output "boop" { + value = "beep" +} diff --git a/pkg/initwd/testdata/empty-module-name/main.tf b/pkg/initwd/testdata/empty-module-name/main.tf new file mode 100644 index 00000000000..45add55b6fd --- /dev/null +++ b/pkg/initwd/testdata/empty-module-name/main.tf @@ -0,0 +1,3 @@ +module "" { + source = "./child" +} diff --git a/pkg/initwd/testdata/empty/.gitignore b/pkg/initwd/testdata/empty/.gitignore new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/initwd/testdata/go-getter-modules/.gitignore b/pkg/initwd/testdata/go-getter-modules/.gitignore new file mode 100644 index 00000000000..6e0db03a8b7 --- /dev/null +++ b/pkg/initwd/testdata/go-getter-modules/.gitignore @@ -0,0 +1 @@ +.terraform/* diff --git a/pkg/initwd/testdata/go-getter-modules/root.tf b/pkg/initwd/testdata/go-getter-modules/root.tf new file mode 100644 index 00000000000..9b174a7a501 --- /dev/null +++ b/pkg/initwd/testdata/go-getter-modules/root.tf @@ -0,0 +1,21 @@ +# This fixture depends on a github repo at: +# https://github.com/hashicorp/terraform-aws-module-installer-acctest +# ...and expects its v0.0.1 tag to be pointing at the following commit: +# d676ab2559d4e0621d59e3c3c4cbb33958ac4608 + +variable "v" { + description = "in local caller for go-getter-modules" + default = "" +} + +module "acctest_root" { + source = "github.com/hashicorp/terraform-aws-module-installer-acctest?ref=v0.0.1" +} + +module "acctest_child_a" { + source = "github.com/hashicorp/terraform-aws-module-installer-acctest//modules/child_a?ref=v0.0.1" +} + +module "acctest_child_b" { + source = "github.com/hashicorp/terraform-aws-module-installer-acctest//modules/child_b?ref=v0.0.1" +} diff --git a/pkg/initwd/testdata/invalid-module-name/child/main.tf b/pkg/initwd/testdata/invalid-module-name/child/main.tf new file mode 100644 index 00000000000..347fbbc13b7 --- /dev/null +++ b/pkg/initwd/testdata/invalid-module-name/child/main.tf @@ -0,0 +1,3 @@ +output "boop" { + value = "beep" +} \ No newline at end of file diff --git a/pkg/initwd/testdata/invalid-module-name/main.tf b/pkg/initwd/testdata/invalid-module-name/main.tf new file mode 100644 index 00000000000..fcc5b8d1c9d --- /dev/null +++ b/pkg/initwd/testdata/invalid-module-name/main.tf @@ -0,0 +1,3 @@ +module "../invalid" { + source = "./child" +} \ No newline at end of file diff --git a/pkg/initwd/testdata/invalid-version-constraint-local/.gitignore b/pkg/initwd/testdata/invalid-version-constraint-local/.gitignore new file mode 100644 index 00000000000..6e0db03a8b7 --- /dev/null +++ b/pkg/initwd/testdata/invalid-version-constraint-local/.gitignore @@ -0,0 +1 @@ +.terraform/* diff --git a/pkg/initwd/testdata/invalid-version-constraint-local/root.tf b/pkg/initwd/testdata/invalid-version-constraint-local/root.tf new file mode 100644 index 00000000000..e7dbaa323bd --- /dev/null +++ b/pkg/initwd/testdata/invalid-version-constraint-local/root.tf @@ -0,0 +1,9 @@ +# This fixture references the github repo at: +# https://github.com/hashicorp/terraform-aws-module-installer-acctest +# However, due to the nature of this test (verifying early error), the URL will not be contacted, +# and the test is safe to execute as part of the normal test suite. + +module "acctest_root" { + source = "github.com/hashicorp/terraform-aws-module-installer-acctest" + version = "0.0.1" +} \ No newline at end of file diff --git a/pkg/initwd/testdata/invalid-version-constraint/.gitignore b/pkg/initwd/testdata/invalid-version-constraint/.gitignore new file mode 100644 index 00000000000..6e0db03a8b7 --- /dev/null +++ b/pkg/initwd/testdata/invalid-version-constraint/.gitignore @@ -0,0 +1 @@ +.terraform/* diff --git a/pkg/initwd/testdata/invalid-version-constraint/root.tf b/pkg/initwd/testdata/invalid-version-constraint/root.tf new file mode 100644 index 00000000000..a973dd02ff8 --- /dev/null +++ b/pkg/initwd/testdata/invalid-version-constraint/root.tf @@ -0,0 +1,4 @@ +module "local" { + source = "./local" + version = "0.0.1" # Version constraint not allowed for a local module +} diff --git a/pkg/initwd/testdata/load-module-package-escape/child/package-escape-child.tf b/pkg/initwd/testdata/load-module-package-escape/child/package-escape-child.tf new file mode 100644 index 00000000000..935a02de1ea --- /dev/null +++ b/pkg/initwd/testdata/load-module-package-escape/child/package-escape-child.tf @@ -0,0 +1,8 @@ +module "grandchild" { + # NOTE: This seems like it ought to work because there is indeed a + # ../grandchild directory, but our caller loaded us as an external + # module using an absolute path and so we're actually isolated from + # the parent directory in a separate "module package", and so we + # can't traverse out to find the grandchild module. + source = "../grandchild" +} diff --git a/pkg/initwd/testdata/load-module-package-escape/grandchild/package-escape-grandchild.tf b/pkg/initwd/testdata/load-module-package-escape/grandchild/package-escape-grandchild.tf new file mode 100644 index 00000000000..734d0d6387c --- /dev/null +++ b/pkg/initwd/testdata/load-module-package-escape/grandchild/package-escape-grandchild.tf @@ -0,0 +1,2 @@ +# This is intentionally empty, just here to be referred to by the "child" +# module in ../child . diff --git a/pkg/initwd/testdata/load-module-package-escape/package-escape.tf b/pkg/initwd/testdata/load-module-package-escape/package-escape.tf new file mode 100644 index 00000000000..7a665a4e61e --- /dev/null +++ b/pkg/initwd/testdata/load-module-package-escape/package-escape.tf @@ -0,0 +1,9 @@ +module "child" { + # NOTE: For this test we need a working absolute path so that Terraform + # will see this a an "external" module and thus establish a separate + # package for it, but we won't know which temporary directory this + # will be in at runtime, so we'll rewrite this file inside the test + # code to replace %%BASE%% with the actual path. %%BASE%% is not normal + # Terraform syntax and won't work outside of this test. + source = "%%BASE%%/child" +} diff --git a/pkg/initwd/testdata/load-module-package-prefix/package-prefix.tf b/pkg/initwd/testdata/load-module-package-prefix/package-prefix.tf new file mode 100644 index 00000000000..08d5ced6022 --- /dev/null +++ b/pkg/initwd/testdata/load-module-package-prefix/package-prefix.tf @@ -0,0 +1,15 @@ +module "child" { + # NOTE: For this test we need a working absolute path so that Terraform + # will see this a an "external" module and thus establish a separate + # package for it, but we won't know which temporary directory this + # will be in at runtime, so we'll rewrite this file inside the test + # code to replace %%BASE%% with the actual path. %%BASE%% is not normal + # Terraform syntax and won't work outside of this test. + # + # Note that we're intentionally using the special // delimiter to + # tell Terraform that it should treat the "package" directory as a + # whole as a module package, with all of its descendents "downloaded" + # (copied) together into ./.terraform/modules/child so that child + # can refer to ../grandchild successfully. + source = "%%BASE%%/package//child" +} diff --git a/pkg/initwd/testdata/load-module-package-prefix/package/child/package-prefix-child.tf b/pkg/initwd/testdata/load-module-package-prefix/package/child/package-prefix-child.tf new file mode 100644 index 00000000000..84b67acdfc5 --- /dev/null +++ b/pkg/initwd/testdata/load-module-package-prefix/package/child/package-prefix-child.tf @@ -0,0 +1,9 @@ +module "grandchild" { + # NOTE: This only works because our caller told Terraform to treat + # the parent directory as a whole as a module package, and so + # the "./terraform/modules/child" directory should contain both + # "child" and "grandchild" sub directories that we can traverse between. + # This is the same as local paths between different directories inside + # a single git repository or distribution archive. + source = "../grandchild" +} diff --git a/pkg/initwd/testdata/load-module-package-prefix/package/grandchild/package-prefix-grandchild.tf b/pkg/initwd/testdata/load-module-package-prefix/package/grandchild/package-prefix-grandchild.tf new file mode 100644 index 00000000000..734d0d6387c --- /dev/null +++ b/pkg/initwd/testdata/load-module-package-prefix/package/grandchild/package-prefix-grandchild.tf @@ -0,0 +1,2 @@ +# This is intentionally empty, just here to be referred to by the "child" +# module in ../child . diff --git a/pkg/initwd/testdata/local-module-error/child_a/main.tf b/pkg/initwd/testdata/local-module-error/child_a/main.tf new file mode 100644 index 00000000000..03ebd8e476e --- /dev/null +++ b/pkg/initwd/testdata/local-module-error/child_a/main.tf @@ -0,0 +1,8 @@ +variable "v" { + description = "in child_ba module" + default = "" +} + +output "hello" { + value = "Hello from child_a!" +} diff --git a/pkg/initwd/testdata/local-module-error/main.tf b/pkg/initwd/testdata/local-module-error/main.tf new file mode 100644 index 00000000000..a31f9133875 --- /dev/null +++ b/pkg/initwd/testdata/local-module-error/main.tf @@ -0,0 +1,8 @@ +variable "v" { + description = "in root module" + default = "" +} + +module "child_a" { + source = "child_a" +} diff --git a/pkg/initwd/testdata/local-module-from-test/main.tf b/pkg/initwd/testdata/local-module-from-test/main.tf new file mode 100644 index 00000000000..4263e1f1211 --- /dev/null +++ b/pkg/initwd/testdata/local-module-from-test/main.tf @@ -0,0 +1,2 @@ +# Keep this empty, we just want to make sure the test file loads the setup +# module. \ No newline at end of file diff --git a/pkg/initwd/testdata/local-module-from-test/setup/main.tf b/pkg/initwd/testdata/local-module-from-test/setup/main.tf new file mode 100644 index 00000000000..6eac7e720ac --- /dev/null +++ b/pkg/initwd/testdata/local-module-from-test/setup/main.tf @@ -0,0 +1,4 @@ +variable "v" { + description = "in setup module" + default = "" +} diff --git a/pkg/initwd/testdata/local-module-from-test/tests/main.tftest.hcl b/pkg/initwd/testdata/local-module-from-test/tests/main.tftest.hcl new file mode 100644 index 00000000000..e9a479f24fd --- /dev/null +++ b/pkg/initwd/testdata/local-module-from-test/tests/main.tftest.hcl @@ -0,0 +1,5 @@ +run "setup" { + module { + source = "./setup" + } +} diff --git a/pkg/initwd/testdata/local-module-missing-provider/main.tf b/pkg/initwd/testdata/local-module-missing-provider/main.tf new file mode 100644 index 00000000000..0ef917dc7c2 --- /dev/null +++ b/pkg/initwd/testdata/local-module-missing-provider/main.tf @@ -0,0 +1,14 @@ +terraform { + required_providers { + foo = { + source = "hashicorp/foo" + // since this module declares an alias with no config, it is not valid as + // a root module. + configuration_aliases = [ foo.alternate ] + } + } +} + +resource "foo_instance" "bam" { + provider = foo.alternate +} diff --git a/pkg/initwd/testdata/local-module-symlink/child_a/child_a.tf b/pkg/initwd/testdata/local-module-symlink/child_a/child_a.tf new file mode 100644 index 00000000000..68ebb8e4048 --- /dev/null +++ b/pkg/initwd/testdata/local-module-symlink/child_a/child_a.tf @@ -0,0 +1,9 @@ + +variable "v" { + description = "in child_a module" + default = "" +} + +module "child_b" { + source = "./child_b" +} diff --git a/pkg/initwd/testdata/local-module-symlink/child_a/child_b/child_b.tf b/pkg/initwd/testdata/local-module-symlink/child_a/child_b/child_b.tf new file mode 100644 index 00000000000..e2e2209164c --- /dev/null +++ b/pkg/initwd/testdata/local-module-symlink/child_a/child_b/child_b.tf @@ -0,0 +1,9 @@ + +variable "v" { + description = "in child_b module" + default = "" +} + +output "hello" { + value = "Hello from child_b!" +} diff --git a/pkg/initwd/testdata/local-module-symlink/modules/child_a b/pkg/initwd/testdata/local-module-symlink/modules/child_a new file mode 120000 index 00000000000..0d568b14371 --- /dev/null +++ b/pkg/initwd/testdata/local-module-symlink/modules/child_a @@ -0,0 +1 @@ +../child_a/ \ No newline at end of file diff --git a/pkg/initwd/testdata/local-module-symlink/root.tf b/pkg/initwd/testdata/local-module-symlink/root.tf new file mode 100644 index 00000000000..1ca7ca32c0d --- /dev/null +++ b/pkg/initwd/testdata/local-module-symlink/root.tf @@ -0,0 +1,9 @@ + +variable "v" { + description = "in root module" + default = "" +} + +module "child_a" { + source = "./modules/child_a" +} diff --git a/pkg/initwd/testdata/local-modules/child_a/child_a.tf b/pkg/initwd/testdata/local-modules/child_a/child_a.tf new file mode 100644 index 00000000000..68ebb8e4048 --- /dev/null +++ b/pkg/initwd/testdata/local-modules/child_a/child_a.tf @@ -0,0 +1,9 @@ + +variable "v" { + description = "in child_a module" + default = "" +} + +module "child_b" { + source = "./child_b" +} diff --git a/pkg/initwd/testdata/local-modules/child_a/child_b/child_b.tf b/pkg/initwd/testdata/local-modules/child_a/child_b/child_b.tf new file mode 100644 index 00000000000..e2e2209164c --- /dev/null +++ b/pkg/initwd/testdata/local-modules/child_a/child_b/child_b.tf @@ -0,0 +1,9 @@ + +variable "v" { + description = "in child_b module" + default = "" +} + +output "hello" { + value = "Hello from child_b!" +} diff --git a/pkg/initwd/testdata/local-modules/root.tf b/pkg/initwd/testdata/local-modules/root.tf new file mode 100644 index 00000000000..3b4c6416d7d --- /dev/null +++ b/pkg/initwd/testdata/local-modules/root.tf @@ -0,0 +1,9 @@ + +variable "v" { + description = "in root module" + default = "" +} + +module "child_a" { + source = "./child_a" +} diff --git a/pkg/initwd/testdata/prerelease-version-constraint-match/root.tf b/pkg/initwd/testdata/prerelease-version-constraint-match/root.tf new file mode 100644 index 00000000000..b68baf770eb --- /dev/null +++ b/pkg/initwd/testdata/prerelease-version-constraint-match/root.tf @@ -0,0 +1,7 @@ +# We expect this test to download the requested version because it is an exact +# match for a prerelease version. + +module "acctest_exact" { + source = "hashicorp/module-installer-acctest/aws" + version = "=0.0.3-alpha.1" +} diff --git a/pkg/initwd/testdata/prerelease-version-constraint/root.tf b/pkg/initwd/testdata/prerelease-version-constraint/root.tf new file mode 100644 index 00000000000..8ff3dd68da5 --- /dev/null +++ b/pkg/initwd/testdata/prerelease-version-constraint/root.tf @@ -0,0 +1,8 @@ +# We expect this test to download the version 0.0.2, the one before the +# specified version even with the equality because the specified version is a +# prerelease. + +module "acctest_partial" { + source = "hashicorp/module-installer-acctest/aws" + version = "<=0.0.3-alpha.1" +} diff --git a/pkg/initwd/testdata/registry-module-from-test/main.tf b/pkg/initwd/testdata/registry-module-from-test/main.tf new file mode 100644 index 00000000000..69d0720bc05 --- /dev/null +++ b/pkg/initwd/testdata/registry-module-from-test/main.tf @@ -0,0 +1,2 @@ +# Deliberately empty, we just want to make sure the module is loaded from the +# tests. \ No newline at end of file diff --git a/pkg/initwd/testdata/registry-module-from-test/main.tftest.hcl b/pkg/initwd/testdata/registry-module-from-test/main.tftest.hcl new file mode 100644 index 00000000000..da19708133e --- /dev/null +++ b/pkg/initwd/testdata/registry-module-from-test/main.tftest.hcl @@ -0,0 +1,8 @@ +run "setup" { + # We have a dedicated repo for this test module. + # See ../registry-modules/root.tf for more info. + module { + source = "hashicorp/module-installer-acctest/aws" + version = "0.0.1" + } +} diff --git a/pkg/initwd/testdata/registry-modules/.gitignore b/pkg/initwd/testdata/registry-modules/.gitignore new file mode 100644 index 00000000000..6e0db03a8b7 --- /dev/null +++ b/pkg/initwd/testdata/registry-modules/.gitignore @@ -0,0 +1 @@ +.terraform/* diff --git a/pkg/initwd/testdata/registry-modules/root.tf b/pkg/initwd/testdata/registry-modules/root.tf new file mode 100644 index 00000000000..4b5ad1f1edd --- /dev/null +++ b/pkg/initwd/testdata/registry-modules/root.tf @@ -0,0 +1,33 @@ +# This fixture indirectly depends on a github repo at: +# https://github.com/hashicorp/terraform-aws-module-installer-acctest +# ...and expects its v0.0.1 tag to be pointing at the following commit: +# d676ab2559d4e0621d59e3c3c4cbb33958ac4608 +# +# This repository is accessed indirectly via: +# https://registry.terraform.io/modules/hashicorp/module-installer-acctest/aws/0.0.1 +# +# Since the tag's id is included in a downloaded archive, it is expected to +# have the following id: +# 853d03855b3290a3ca491d4c3a7684572dd42237 +# (this particular assumption is encoded in the tests that use this fixture) + + +variable "v" { + description = "in local caller for registry-modules" + default = "" +} + +module "acctest_root" { + source = "hashicorp/module-installer-acctest/aws" + version = "0.0.1" +} + +module "acctest_child_a" { + source = "hashicorp/module-installer-acctest/aws//modules/child_a" + version = "0.0.1" +} + +module "acctest_child_b" { + source = "hashicorp/module-installer-acctest/aws//modules/child_b" + version = "0.0.1" +} diff --git a/pkg/initwd/testing.go b/pkg/initwd/testing.go new file mode 100644 index 00000000000..836fe349eb7 --- /dev/null +++ b/pkg/initwd/testing.go @@ -0,0 +1,80 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package initwd + +import ( + "context" + "testing" + + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configload" + "github.com/kubegems/opentofu/pkg/registry" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// LoadConfigForTests is a convenience wrapper around configload.NewLoaderForTests, +// ModuleInstaller.InstallModules and configload.Loader.LoadConfig that allows +// a test configuration to be loaded in a single step. +// +// If module installation fails, t.Fatal (or similar) is called to halt +// execution of the test, under the assumption that installation failures are +// not expected. If installation failures _are_ expected then use +// NewLoaderForTests and work with the loader object directly. If module +// installation succeeds but generates warnings, these warnings are discarded. +// +// If installation succeeds but errors are detected during loading then a +// possibly-incomplete config is returned along with error diagnostics. The +// test run is not aborted in this case, so that the caller can make assertions +// against the returned diagnostics. +// +// As with NewLoaderForTests, a cleanup function is returned which must be +// called before the test completes in order to remove the temporary +// modules directory. +func LoadConfigForTests(t *testing.T, rootDir string, testsDir string) (*configs.Config, *configload.Loader, func(), tfdiags.Diagnostics) { + t.Helper() + + var diags tfdiags.Diagnostics + + loader, cleanup := configload.NewLoaderForTests(t) + inst := NewModuleInstaller(loader.ModulesDir(), loader, registry.NewClient(nil, nil)) + + call := configs.RootModuleCallForTesting() + _, moreDiags := inst.InstallModules(context.Background(), rootDir, testsDir, true, false, ModuleInstallHooksImpl{}, call) + diags = diags.Append(moreDiags) + if diags.HasErrors() { + cleanup() + t.Fatal(diags.Err()) + return nil, nil, func() {}, diags + } + + // Since module installer has modified the module manifest on disk, we need + // to refresh the cache of it in the loader. + if err := loader.RefreshModules(); err != nil { + t.Fatalf("failed to refresh modules after installation: %s", err) + } + + config, hclDiags := loader.LoadConfig(rootDir, call) + diags = diags.Append(hclDiags) + return config, loader, cleanup, diags +} + +// MustLoadConfigForTests is a variant of LoadConfigForTests which calls +// t.Fatal (or similar) if there are any errors during loading, and thus +// does not return diagnostics at all. +// +// This is useful for concisely writing tests that don't expect errors at +// all. For tests that expect errors and need to assert against them, use +// LoadConfigForTests instead. +func MustLoadConfigForTests(t *testing.T, rootDir, testsDir string) (*configs.Config, *configload.Loader, func()) { + t.Helper() + + config, loader, cleanup, diags := LoadConfigForTests(t, rootDir, testsDir) + if diags.HasErrors() { + cleanup() + t.Fatal(diags.Err()) + } + return config, loader, cleanup +} diff --git a/pkg/instances/expander.go b/pkg/instances/expander.go new file mode 100644 index 00000000000..d62fecaec75 --- /dev/null +++ b/pkg/instances/expander.go @@ -0,0 +1,532 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package instances + +import ( + "fmt" + "sort" + "sync" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/zclconf/go-cty/cty" +) + +// Expander instances serve as a coordination point for gathering object +// repetition values (count and for_each in configuration) and then later +// making use of them to fully enumerate all of the instances of an object. +// +// The two repeatable object types in OpenTofu are modules and resources. +// Because resources belong to modules and modules can nest inside other +// modules, module expansion in particular has a recursive effect that can +// cause deep objects to expand exponentially. Expander assumes that all +// instances of a module have the same static objects inside, and that they +// differ only in the repetition count for some of those objects. +// +// Expander is a synchronized object whose methods can be safely called +// from concurrent threads of execution. However, it does expect a certain +// sequence of operations which is normally obtained by the caller traversing +// a dependency graph: each object must have its repetition mode set exactly +// once, and this must be done before any calls that depend on the repetition +// mode. In other words, the count or for_each expression value for a module +// must be provided before any object nested directly or indirectly inside +// that module can be expanded. If this ordering is violated, the methods +// will panic to enforce internal consistency. +// +// The Expand* methods of Expander only work directly with modules and with +// resources. Addresses for other objects that nest within modules but +// do not themselves support repetition can be obtained by calling ExpandModule +// with the containing module path and then producing one absolute instance +// address per module instance address returned. +type Expander struct { + mu sync.RWMutex + exps *expanderModule +} + +// NewExpander initializes and returns a new Expander, empty and ready to use. +func NewExpander() *Expander { + return &Expander{ + exps: newExpanderModule(), + } +} + +// SetModuleSingle records that the given module call inside the given parent +// module does not use any repetition arguments and is therefore a singleton. +func (e *Expander) SetModuleSingle(parentAddr addrs.ModuleInstance, callAddr addrs.ModuleCall) { + e.setModuleExpansion(parentAddr, callAddr, expansionSingleVal) +} + +// SetModuleCount records that the given module call inside the given parent +// module instance uses the "count" repetition argument, with the given value. +func (e *Expander) SetModuleCount(parentAddr addrs.ModuleInstance, callAddr addrs.ModuleCall, count int) { + e.setModuleExpansion(parentAddr, callAddr, expansionCount(count)) +} + +// SetModuleForEach records that the given module call inside the given parent +// module instance uses the "for_each" repetition argument, with the given +// map value. +// +// In the configuration language the for_each argument can also accept a set. +// It's the caller's responsibility to convert that into an identity map before +// calling this method. +func (e *Expander) SetModuleForEach(parentAddr addrs.ModuleInstance, callAddr addrs.ModuleCall, mapping map[string]cty.Value) { + e.setModuleExpansion(parentAddr, callAddr, expansionForEach(mapping)) +} + +// SetResourceSingle records that the given resource inside the given module +// does not use any repetition arguments and is therefore a singleton. +func (e *Expander) SetResourceSingle(moduleAddr addrs.ModuleInstance, resourceAddr addrs.Resource) { + e.setResourceExpansion(moduleAddr, resourceAddr, expansionSingleVal) +} + +// SetResourceCount records that the given resource inside the given module +// uses the "count" repetition argument, with the given value. +func (e *Expander) SetResourceCount(moduleAddr addrs.ModuleInstance, resourceAddr addrs.Resource, count int) { + e.setResourceExpansion(moduleAddr, resourceAddr, expansionCount(count)) +} + +// SetResourceForEach records that the given resource inside the given module +// uses the "for_each" repetition argument, with the given map value. +// +// In the configuration language the for_each argument can also accept a set. +// It's the caller's responsibility to convert that into an identity map before +// calling this method. +func (e *Expander) SetResourceForEach(moduleAddr addrs.ModuleInstance, resourceAddr addrs.Resource, mapping map[string]cty.Value) { + e.setResourceExpansion(moduleAddr, resourceAddr, expansionForEach(mapping)) +} + +// ExpandModule finds the exhaustive set of module instances resulting from +// the expansion of the given module and all of its ancestor modules. +// +// All of the modules on the path to the identified module must already have +// had their expansion registered using one of the SetModule* methods before +// calling, or this method will panic. +func (e *Expander) ExpandModule(addr addrs.Module) []addrs.ModuleInstance { + return e.expandModule(addr, false) +} + +// expandModule allows skipping unexpanded module addresses by setting skipUnknown to true. +// This is used by instances.Set, which is only concerned with the expanded +// instances, and should not panic when looking up unknown addresses. +func (e *Expander) expandModule(addr addrs.Module, skipUnknown bool) []addrs.ModuleInstance { + if len(addr) == 0 { + // Root module is always a singleton. + return singletonRootModule + } + + e.mu.RLock() + defer e.mu.RUnlock() + + // We're going to be dynamically growing ModuleInstance addresses, so + // we'll preallocate some space to do it so that for typical shallow + // module trees we won't need to reallocate this. + // (moduleInstances does plenty of allocations itself, so the benefit of + // pre-allocating this is marginal but it's not hard to do.) + parentAddr := make(addrs.ModuleInstance, 0, 4) + ret := e.exps.moduleInstances(addr, parentAddr, skipUnknown) + sort.SliceStable(ret, func(i, j int) bool { + return ret[i].Less(ret[j]) + }) + return ret +} + +// GetDeepestExistingModuleInstance is a funny specialized function for +// determining how many steps we can traverse through the given module instance +// address before encountering an undeclared instance of a declared module. +// +// The result is the longest prefix of the given address which steps only +// through module instances that exist. +// +// All of the modules on the given path must already have had their +// expansion registered using one of the SetModule* methods before calling, +// or this method will panic. +func (e *Expander) GetDeepestExistingModuleInstance(given addrs.ModuleInstance) addrs.ModuleInstance { + exps := e.exps // start with the root module expansions + for i := 0; i < len(given); i++ { + step := given[i] + callName := step.Name + if _, ok := exps.moduleCalls[addrs.ModuleCall{Name: callName}]; !ok { + // This is a bug in the caller, because it should always register + // expansions for an object and all of its ancestors before requesting + // expansion of it. + panic(fmt.Sprintf("no expansion has been registered for %s", given[:i].Child(callName, addrs.NoKey))) + } + + var ok bool + exps, ok = exps.childInstances[step] + if !ok { + // We've found a non-existing instance, so we're done. + return given[:i] + } + } + + // If we complete the loop above without returning early then the entire + // given address refers to a declared module instance. + return given +} + +// ExpandModuleResource finds the exhaustive set of resource instances resulting from +// the expansion of the given resource and all of its containing modules. +// +// All of the modules on the path to the identified resource and the resource +// itself must already have had their expansion registered using one of the +// SetModule*/SetResource* methods before calling, or this method will panic. +func (e *Expander) ExpandModuleResource(moduleAddr addrs.Module, resourceAddr addrs.Resource) []addrs.AbsResourceInstance { + e.mu.RLock() + defer e.mu.RUnlock() + + // We're going to be dynamically growing ModuleInstance addresses, so + // we'll preallocate some space to do it so that for typical shallow + // module trees we won't need to reallocate this. + // (moduleInstances does plenty of allocations itself, so the benefit of + // pre-allocating this is marginal but it's not hard to do.) + moduleInstanceAddr := make(addrs.ModuleInstance, 0, 4) + ret := e.exps.moduleResourceInstances(moduleAddr, resourceAddr, moduleInstanceAddr) + sort.SliceStable(ret, func(i, j int) bool { + return ret[i].Less(ret[j]) + }) + return ret +} + +// ExpandResource finds the set of resource instances resulting from +// the expansion of the given resource within its module instance. +// +// All of the modules on the path to the identified resource and the resource +// itself must already have had their expansion registered using one of the +// SetModule*/SetResource* methods before calling, or this method will panic. +// +// ExpandModuleResource returns all instances of a resource across all +// instances of its containing module, whereas this ExpandResource function +// is more specific and only expands within a single module instance. If +// any of the module instances selected in the module path of the given address +// aren't valid for that module's expansion then ExpandResource returns an +// empty result, reflecting that a non-existing module instance can never +// contain any existing resource instances. +func (e *Expander) ExpandResource(resourceAddr addrs.AbsResource) []addrs.AbsResourceInstance { + e.mu.RLock() + defer e.mu.RUnlock() + + moduleInstanceAddr := make(addrs.ModuleInstance, 0, 4) + ret := e.exps.resourceInstances(resourceAddr.Module, resourceAddr.Resource, moduleInstanceAddr) + sort.SliceStable(ret, func(i, j int) bool { + return ret[i].Less(ret[j]) + }) + return ret +} + +// GetModuleInstanceRepetitionData returns an object describing the values +// that should be available for each.key, each.value, and count.index within +// the call block for the given module instance. +func (e *Expander) GetModuleInstanceRepetitionData(addr addrs.ModuleInstance) RepetitionData { + if len(addr) == 0 { + // The root module is always a singleton, so it has no repetition data. + return RepetitionData{} + } + + e.mu.RLock() + defer e.mu.RUnlock() + + parentMod := e.findModule(addr[:len(addr)-1]) + lastStep := addr[len(addr)-1] + exp, ok := parentMod.moduleCalls[addrs.ModuleCall{Name: lastStep.Name}] + if !ok { + panic(fmt.Sprintf("no expansion has been registered for %s", addr)) + } + return exp.repetitionData(lastStep.InstanceKey) +} + +// GetResourceInstanceRepetitionData returns an object describing the values +// that should be available for each.key, each.value, and count.index within +// the definition block for the given resource instance. +func (e *Expander) GetResourceInstanceRepetitionData(addr addrs.AbsResourceInstance) RepetitionData { + e.mu.RLock() + defer e.mu.RUnlock() + + parentMod := e.findModule(addr.Module) + exp, ok := parentMod.resources[addr.Resource.Resource] + if !ok { + panic(fmt.Sprintf("no expansion has been registered for %s", addr.ContainingResource())) + } + return exp.repetitionData(addr.Resource.Key) +} + +// AllInstances returns a set of all of the module and resource instances known +// to the expander. +// +// It generally doesn't make sense to call this until everything has already +// been fully expanded by calling the SetModule* and SetResource* functions. +// After that, the returned set is a convenient small API only for querying +// whether particular instance addresses appeared as a result of those +// expansions. +func (e *Expander) AllInstances() Set { + return Set{e} +} + +func (e *Expander) findModule(moduleInstAddr addrs.ModuleInstance) *expanderModule { + // We expect that all of the modules on the path to our module instance + // should already have expansions registered. + mod := e.exps + for i, step := range moduleInstAddr { + next, ok := mod.childInstances[step] + if !ok { + // Top-down ordering of registration is part of the contract of + // Expander, so this is always indicative of a bug in the caller. + panic(fmt.Sprintf("no expansion has been registered for ancestor module %s", moduleInstAddr[:i+1])) + } + mod = next + } + return mod +} + +func (e *Expander) setModuleExpansion(parentAddr addrs.ModuleInstance, callAddr addrs.ModuleCall, exp expansion) { + e.mu.Lock() + defer e.mu.Unlock() + + mod := e.findModule(parentAddr) + if _, exists := mod.moduleCalls[callAddr]; exists { + panic(fmt.Sprintf("expansion already registered for %s", parentAddr.Child(callAddr.Name, addrs.NoKey))) + } + // We'll also pre-register the child instances so that later calls can + // populate them as the caller traverses the configuration tree. + for _, key := range exp.instanceKeys() { + step := addrs.ModuleInstanceStep{Name: callAddr.Name, InstanceKey: key} + mod.childInstances[step] = newExpanderModule() + } + mod.moduleCalls[callAddr] = exp +} + +func (e *Expander) setResourceExpansion(parentAddr addrs.ModuleInstance, resourceAddr addrs.Resource, exp expansion) { + e.mu.Lock() + defer e.mu.Unlock() + + mod := e.findModule(parentAddr) + if _, exists := mod.resources[resourceAddr]; exists { + panic(fmt.Sprintf("expansion already registered for %s", resourceAddr.Absolute(parentAddr))) + } + mod.resources[resourceAddr] = exp +} + +func (e *Expander) knowsModuleInstance(want addrs.ModuleInstance) bool { + if want.IsRoot() { + return true // root module instance is always present + } + + e.mu.Lock() + defer e.mu.Unlock() + + return e.exps.knowsModuleInstance(want) +} + +func (e *Expander) knowsModuleCall(want addrs.AbsModuleCall) bool { + e.mu.Lock() + defer e.mu.Unlock() + + return e.exps.knowsModuleCall(want) +} + +func (e *Expander) knowsResourceInstance(want addrs.AbsResourceInstance) bool { + e.mu.Lock() + defer e.mu.Unlock() + + return e.exps.knowsResourceInstance(want) +} + +func (e *Expander) knowsResource(want addrs.AbsResource) bool { + e.mu.Lock() + defer e.mu.Unlock() + + return e.exps.knowsResource(want) +} + +type expanderModule struct { + moduleCalls map[addrs.ModuleCall]expansion + resources map[addrs.Resource]expansion + childInstances map[addrs.ModuleInstanceStep]*expanderModule +} + +func newExpanderModule() *expanderModule { + return &expanderModule{ + moduleCalls: make(map[addrs.ModuleCall]expansion), + resources: make(map[addrs.Resource]expansion), + childInstances: make(map[addrs.ModuleInstanceStep]*expanderModule), + } +} + +var singletonRootModule = []addrs.ModuleInstance{addrs.RootModuleInstance} + +// if moduleInstances is being used to lookup known instances after all +// expansions have been done, set skipUnknown to true which allows addrs which +// may not have been seen to return with no instances rather than panicking. +func (m *expanderModule) moduleInstances(addr addrs.Module, parentAddr addrs.ModuleInstance, skipUnknown bool) []addrs.ModuleInstance { + callName := addr[0] + exp, ok := m.moduleCalls[addrs.ModuleCall{Name: callName}] + if !ok { + if skipUnknown { + return nil + } + // This is a bug in the caller, because it should always register + // expansions for an object and all of its ancestors before requesting + // expansion of it. + panic(fmt.Sprintf("no expansion has been registered for %s", parentAddr.Child(callName, addrs.NoKey))) + } + + var ret []addrs.ModuleInstance + + // If there's more than one step remaining then we need to traverse deeper. + if len(addr) > 1 { + for step, inst := range m.childInstances { + if step.Name != callName { + continue + } + instAddr := append(parentAddr, step) + ret = append(ret, inst.moduleInstances(addr[1:], instAddr, skipUnknown)...) + } + return ret + } + + // Otherwise, we'll use the expansion from the final step to produce + // a sequence of addresses under this prefix. + for _, k := range exp.instanceKeys() { + // We're reusing the buffer under parentAddr as we recurse through + // the structure, so we need to copy it here to produce a final + // immutable slice to return. + full := make(addrs.ModuleInstance, 0, len(parentAddr)+1) + full = append(full, parentAddr...) + full = full.Child(callName, k) + ret = append(ret, full) + } + return ret +} + +func (m *expanderModule) moduleResourceInstances(moduleAddr addrs.Module, resourceAddr addrs.Resource, parentAddr addrs.ModuleInstance) []addrs.AbsResourceInstance { + if len(moduleAddr) > 0 { + var ret []addrs.AbsResourceInstance + // We need to traverse through the module levels first, so we can + // then iterate resource expansions in the context of each module + // path leading to them. + callName := moduleAddr[0] + if _, ok := m.moduleCalls[addrs.ModuleCall{Name: callName}]; !ok { + // This is a bug in the caller, because it should always register + // expansions for an object and all of its ancestors before requesting + // expansion of it. + panic(fmt.Sprintf("no expansion has been registered for %s", parentAddr.Child(callName, addrs.NoKey))) + } + + for step, inst := range m.childInstances { + if step.Name != callName { + continue + } + moduleInstAddr := append(parentAddr, step) + ret = append(ret, inst.moduleResourceInstances(moduleAddr[1:], resourceAddr, moduleInstAddr)...) + } + return ret + } + + return m.onlyResourceInstances(resourceAddr, parentAddr) +} + +func (m *expanderModule) resourceInstances(moduleAddr addrs.ModuleInstance, resourceAddr addrs.Resource, parentAddr addrs.ModuleInstance) []addrs.AbsResourceInstance { + if len(moduleAddr) > 0 { + // We need to traverse through the module levels first, using only the + // module instances for our specific resource, as the resource may not + // yet be expanded in all module instances. + step := moduleAddr[0] + callName := step.Name + if _, ok := m.moduleCalls[addrs.ModuleCall{Name: callName}]; !ok { + // This is a bug in the caller, because it should always register + // expansions for an object and all of its ancestors before requesting + // expansion of it. + panic(fmt.Sprintf("no expansion has been registered for %s", parentAddr.Child(callName, addrs.NoKey))) + } + + if inst, ok := m.childInstances[step]; ok { + moduleInstAddr := append(parentAddr, step) + return inst.resourceInstances(moduleAddr[1:], resourceAddr, moduleInstAddr) + } else { + // If we have the module _call_ registered (as we checked above) + // but we don't have the given module _instance_ registered, that + // suggests that the module instance key in "step" is not declared + // by the current definition of this module call. That means the + // module instance doesn't exist at all, and therefore it can't + // possibly declare any resource instances either. + // + // For example, if we were asked about module.foo[0].aws_instance.bar + // but module.foo doesn't currently have count set, then there is no + // module.foo[0] at all, and therefore no aws_instance.bar + // instances inside it. + return nil + } + } + return m.onlyResourceInstances(resourceAddr, parentAddr) +} + +func (m *expanderModule) onlyResourceInstances(resourceAddr addrs.Resource, parentAddr addrs.ModuleInstance) []addrs.AbsResourceInstance { + var ret []addrs.AbsResourceInstance + exp, ok := m.resources[resourceAddr] + if !ok { + panic(fmt.Sprintf("no expansion has been registered for %s", resourceAddr.Absolute(parentAddr))) + } + + for _, k := range exp.instanceKeys() { + // We're reusing the buffer under parentAddr as we recurse through + // the structure, so we need to copy it here to produce a final + // immutable slice to return. + moduleAddr := make(addrs.ModuleInstance, len(parentAddr)) + copy(moduleAddr, parentAddr) + ret = append(ret, resourceAddr.Instance(k).Absolute(moduleAddr)) + } + return ret +} + +func (m *expanderModule) getModuleInstance(want addrs.ModuleInstance) *expanderModule { + current := m + for _, step := range want { + next := current.childInstances[step] + if next == nil { + return nil + } + current = next + } + return current +} + +func (m *expanderModule) knowsModuleInstance(want addrs.ModuleInstance) bool { + return m.getModuleInstance(want) != nil +} + +func (m *expanderModule) knowsModuleCall(want addrs.AbsModuleCall) bool { + modInst := m.getModuleInstance(want.Module) + if modInst == nil { + return false + } + _, ret := modInst.moduleCalls[want.Call] + return ret +} + +func (m *expanderModule) knowsResourceInstance(want addrs.AbsResourceInstance) bool { + modInst := m.getModuleInstance(want.Module) + if modInst == nil { + return false + } + resourceExp := modInst.resources[want.Resource.Resource] + if resourceExp == nil { + return false + } + for _, key := range resourceExp.instanceKeys() { + if key == want.Resource.Key { + return true + } + } + return false +} + +func (m *expanderModule) knowsResource(want addrs.AbsResource) bool { + modInst := m.getModuleInstance(want.Module) + if modInst == nil { + return false + } + _, ret := modInst.resources[want.Resource] + return ret +} diff --git a/pkg/instances/expander_test.go b/pkg/instances/expander_test.go new file mode 100644 index 00000000000..6aca10df152 --- /dev/null +++ b/pkg/instances/expander_test.go @@ -0,0 +1,539 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package instances + +import ( + "fmt" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" +) + +func TestExpander(t *testing.T) { + // Some module and resource addresses and values we'll use repeatedly below. + singleModuleAddr := addrs.ModuleCall{Name: "single"} + count2ModuleAddr := addrs.ModuleCall{Name: "count2"} + count0ModuleAddr := addrs.ModuleCall{Name: "count0"} + forEachModuleAddr := addrs.ModuleCall{Name: "for_each"} + singleResourceAddr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test", + Name: "single", + } + count2ResourceAddr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test", + Name: "count2", + } + count0ResourceAddr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test", + Name: "count0", + } + forEachResourceAddr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test", + Name: "for_each", + } + eachMap := map[string]cty.Value{ + "a": cty.NumberIntVal(1), + "b": cty.NumberIntVal(2), + } + + // In normal use, Expander would be called in the context of a graph + // traversal to ensure that information is registered/requested in the + // correct sequence, but to keep this test self-contained we'll just + // manually write out the steps here. + // + // The steps below are assuming a configuration tree like the following: + // - root module + // - resource test.single with no count or for_each + // - resource test.count2 with count = 2 + // - resource test.count0 with count = 0 + // - resource test.for_each with for_each = { a = 1, b = 2 } + // - child module "single" with no count or for_each + // - resource test.single with no count or for_each + // - resource test.count2 with count = 2 + // - child module "count2" with count = 2 + // - resource test.single with no count or for_each + // - resource test.count2 with count = 2 + // - child module "count2" with count = 2 + // - resource test.count2 with count = 2 + // - child module "count0" with count = 0 + // - resource test.single with no count or for_each + // - child module for_each with for_each = { a = 1, b = 2 } + // - resource test.single with no count or for_each + // - resource test.count2 with count = 2 + + ex := NewExpander() + + // We don't register the root module, because it's always implied to exist. + // + // Below we're going to use braces and indentation just to help visually + // reflect the tree structure from the tree in the above comment, in the + // hope that the following is easier to follow. + // + // The Expander API requires that we register containing modules before + // registering anything inside them, so we'll work through the above + // in a depth-first order in the registration steps that follow. + { + ex.SetResourceSingle(addrs.RootModuleInstance, singleResourceAddr) + ex.SetResourceCount(addrs.RootModuleInstance, count2ResourceAddr, 2) + ex.SetResourceCount(addrs.RootModuleInstance, count0ResourceAddr, 0) + ex.SetResourceForEach(addrs.RootModuleInstance, forEachResourceAddr, eachMap) + + ex.SetModuleSingle(addrs.RootModuleInstance, singleModuleAddr) + { + // The single instance of the module + moduleInstanceAddr := addrs.RootModuleInstance.Child("single", addrs.NoKey) + ex.SetResourceSingle(moduleInstanceAddr, singleResourceAddr) + ex.SetResourceCount(moduleInstanceAddr, count2ResourceAddr, 2) + } + + ex.SetModuleCount(addrs.RootModuleInstance, count2ModuleAddr, 2) + for i1 := 0; i1 < 2; i1++ { + moduleInstanceAddr := addrs.RootModuleInstance.Child("count2", addrs.IntKey(i1)) + ex.SetResourceSingle(moduleInstanceAddr, singleResourceAddr) + ex.SetResourceCount(moduleInstanceAddr, count2ResourceAddr, 2) + ex.SetModuleCount(moduleInstanceAddr, count2ModuleAddr, 2) + for i2 := 0; i2 < 2; i2++ { + moduleInstanceAddr := moduleInstanceAddr.Child("count2", addrs.IntKey(i2)) + ex.SetResourceCount(moduleInstanceAddr, count2ResourceAddr, 2) + } + } + + ex.SetModuleCount(addrs.RootModuleInstance, count0ModuleAddr, 0) + { + // There are no instances of module "count0", so our nested module + // would never actually get registered here: the expansion node + // for the resource would see that its containing module has no + // instances and so do nothing. + } + + ex.SetModuleForEach(addrs.RootModuleInstance, forEachModuleAddr, eachMap) + for k := range eachMap { + moduleInstanceAddr := addrs.RootModuleInstance.Child("for_each", addrs.StringKey(k)) + ex.SetResourceSingle(moduleInstanceAddr, singleResourceAddr) + ex.SetResourceCount(moduleInstanceAddr, count2ResourceAddr, 2) + } + } + + t.Run("root module", func(t *testing.T) { + // Requesting expansion of the root module doesn't really mean anything + // since it's always a singleton, but for consistency it should work. + got := ex.ExpandModule(addrs.RootModule) + want := []addrs.ModuleInstance{addrs.RootModuleInstance} + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("resource single", func(t *testing.T) { + got := ex.ExpandModuleResource( + addrs.RootModule, + singleResourceAddr, + ) + want := []addrs.AbsResourceInstance{ + mustAbsResourceInstanceAddr(`test.single`), + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("resource count2", func(t *testing.T) { + got := ex.ExpandModuleResource( + addrs.RootModule, + count2ResourceAddr, + ) + want := []addrs.AbsResourceInstance{ + mustAbsResourceInstanceAddr(`test.count2[0]`), + mustAbsResourceInstanceAddr(`test.count2[1]`), + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("resource count0", func(t *testing.T) { + got := ex.ExpandModuleResource( + addrs.RootModule, + count0ResourceAddr, + ) + want := []addrs.AbsResourceInstance(nil) + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("resource for_each", func(t *testing.T) { + got := ex.ExpandModuleResource( + addrs.RootModule, + forEachResourceAddr, + ) + want := []addrs.AbsResourceInstance{ + mustAbsResourceInstanceAddr(`test.for_each["a"]`), + mustAbsResourceInstanceAddr(`test.for_each["b"]`), + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("module single", func(t *testing.T) { + got := ex.ExpandModule(addrs.RootModule.Child("single")) + want := []addrs.ModuleInstance{ + mustModuleInstanceAddr(`module.single`), + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("module single resource single", func(t *testing.T) { + got := ex.ExpandModuleResource( + mustModuleAddr("single"), + singleResourceAddr, + ) + want := []addrs.AbsResourceInstance{ + mustAbsResourceInstanceAddr("module.single.test.single"), + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("module single resource count2", func(t *testing.T) { + // Two different ways of asking the same question, which should + // both produce the same result. + // First: nested expansion of all instances of the resource across + // all instances of the module, but it's a single-instance module + // so the first level is a singleton. + got1 := ex.ExpandModuleResource( + mustModuleAddr(`single`), + count2ResourceAddr, + ) + // Second: expansion of only instances belonging to a specific + // instance of the module, but again it's a single-instance module + // so there's only one to ask about. + got2 := ex.ExpandResource( + count2ResourceAddr.Absolute( + addrs.RootModuleInstance.Child("single", addrs.NoKey), + ), + ) + want := []addrs.AbsResourceInstance{ + mustAbsResourceInstanceAddr(`module.single.test.count2[0]`), + mustAbsResourceInstanceAddr(`module.single.test.count2[1]`), + } + if diff := cmp.Diff(want, got1); diff != "" { + t.Errorf("wrong ExpandModuleResource result\n%s", diff) + } + if diff := cmp.Diff(want, got2); diff != "" { + t.Errorf("wrong ExpandResource result\n%s", diff) + } + }) + t.Run("module single resource count2 with non-existing module instance", func(t *testing.T) { + got := ex.ExpandResource( + count2ResourceAddr.Absolute( + // Note: This is intentionally an invalid instance key, + // so we're asking about module.single[1].test.count2 + // even though module.single doesn't have count set and + // therefore there is no module.single[1]. + addrs.RootModuleInstance.Child("single", addrs.IntKey(1)), + ), + ) + // If the containing module instance doesn't exist then it can't + // possibly have any resource instances inside it. + want := ([]addrs.AbsResourceInstance)(nil) + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("module count2", func(t *testing.T) { + got := ex.ExpandModule(mustModuleAddr(`count2`)) + want := []addrs.ModuleInstance{ + mustModuleInstanceAddr(`module.count2[0]`), + mustModuleInstanceAddr(`module.count2[1]`), + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("module count2 resource single", func(t *testing.T) { + got := ex.ExpandModuleResource( + mustModuleAddr(`count2`), + singleResourceAddr, + ) + want := []addrs.AbsResourceInstance{ + mustAbsResourceInstanceAddr(`module.count2[0].test.single`), + mustAbsResourceInstanceAddr(`module.count2[1].test.single`), + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("module count2 resource count2", func(t *testing.T) { + got := ex.ExpandModuleResource( + mustModuleAddr(`count2`), + count2ResourceAddr, + ) + want := []addrs.AbsResourceInstance{ + mustAbsResourceInstanceAddr(`module.count2[0].test.count2[0]`), + mustAbsResourceInstanceAddr(`module.count2[0].test.count2[1]`), + mustAbsResourceInstanceAddr(`module.count2[1].test.count2[0]`), + mustAbsResourceInstanceAddr(`module.count2[1].test.count2[1]`), + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("module count2 module count2", func(t *testing.T) { + got := ex.ExpandModule(mustModuleAddr(`count2.count2`)) + want := []addrs.ModuleInstance{ + mustModuleInstanceAddr(`module.count2[0].module.count2[0]`), + mustModuleInstanceAddr(`module.count2[0].module.count2[1]`), + mustModuleInstanceAddr(`module.count2[1].module.count2[0]`), + mustModuleInstanceAddr(`module.count2[1].module.count2[1]`), + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("module count2 module count2 GetDeepestExistingModuleInstance", func(t *testing.T) { + t.Run("first step invalid", func(t *testing.T) { + got := ex.GetDeepestExistingModuleInstance(mustModuleInstanceAddr(`module.count2["nope"].module.count2[0]`)) + want := addrs.RootModuleInstance + if !want.Equal(got) { + t.Errorf("wrong result\ngot: %s\nwant: %s", got, want) + } + }) + t.Run("second step invalid", func(t *testing.T) { + got := ex.GetDeepestExistingModuleInstance(mustModuleInstanceAddr(`module.count2[1].module.count2`)) + want := mustModuleInstanceAddr(`module.count2[1]`) + if !want.Equal(got) { + t.Errorf("wrong result\ngot: %s\nwant: %s", got, want) + } + }) + t.Run("neither step valid", func(t *testing.T) { + got := ex.GetDeepestExistingModuleInstance(mustModuleInstanceAddr(`module.count2.module.count2["nope"]`)) + want := addrs.RootModuleInstance + if !want.Equal(got) { + t.Errorf("wrong result\ngot: %s\nwant: %s", got, want) + } + }) + t.Run("both steps valid", func(t *testing.T) { + got := ex.GetDeepestExistingModuleInstance(mustModuleInstanceAddr(`module.count2[1].module.count2[0]`)) + want := mustModuleInstanceAddr(`module.count2[1].module.count2[0]`) + if !want.Equal(got) { + t.Errorf("wrong result\ngot: %s\nwant: %s", got, want) + } + }) + }) + t.Run("module count2 resource count2 resource count2", func(t *testing.T) { + got := ex.ExpandModuleResource( + mustModuleAddr(`count2.count2`), + count2ResourceAddr, + ) + want := []addrs.AbsResourceInstance{ + mustAbsResourceInstanceAddr(`module.count2[0].module.count2[0].test.count2[0]`), + mustAbsResourceInstanceAddr(`module.count2[0].module.count2[0].test.count2[1]`), + mustAbsResourceInstanceAddr(`module.count2[0].module.count2[1].test.count2[0]`), + mustAbsResourceInstanceAddr(`module.count2[0].module.count2[1].test.count2[1]`), + mustAbsResourceInstanceAddr(`module.count2[1].module.count2[0].test.count2[0]`), + mustAbsResourceInstanceAddr(`module.count2[1].module.count2[0].test.count2[1]`), + mustAbsResourceInstanceAddr(`module.count2[1].module.count2[1].test.count2[0]`), + mustAbsResourceInstanceAddr(`module.count2[1].module.count2[1].test.count2[1]`), + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("module count2 resource count2 resource count2", func(t *testing.T) { + got := ex.ExpandResource( + count2ResourceAddr.Absolute(mustModuleInstanceAddr(`module.count2[0].module.count2[1]`)), + ) + want := []addrs.AbsResourceInstance{ + mustAbsResourceInstanceAddr(`module.count2[0].module.count2[1].test.count2[0]`), + mustAbsResourceInstanceAddr(`module.count2[0].module.count2[1].test.count2[1]`), + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("module count0", func(t *testing.T) { + got := ex.ExpandModule(mustModuleAddr(`count0`)) + want := []addrs.ModuleInstance(nil) + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("module count0 resource single", func(t *testing.T) { + got := ex.ExpandModuleResource( + mustModuleAddr(`count0`), + singleResourceAddr, + ) + // The containing module has zero instances, so therefore there + // are zero instances of this resource even though it doesn't have + // count = 0 set itself. + want := []addrs.AbsResourceInstance(nil) + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("module for_each", func(t *testing.T) { + got := ex.ExpandModule(mustModuleAddr(`for_each`)) + want := []addrs.ModuleInstance{ + mustModuleInstanceAddr(`module.for_each["a"]`), + mustModuleInstanceAddr(`module.for_each["b"]`), + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("module for_each resource single", func(t *testing.T) { + got := ex.ExpandModuleResource( + mustModuleAddr(`for_each`), + singleResourceAddr, + ) + want := []addrs.AbsResourceInstance{ + mustAbsResourceInstanceAddr(`module.for_each["a"].test.single`), + mustAbsResourceInstanceAddr(`module.for_each["b"].test.single`), + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("module for_each resource count2", func(t *testing.T) { + got := ex.ExpandModuleResource( + mustModuleAddr(`for_each`), + count2ResourceAddr, + ) + want := []addrs.AbsResourceInstance{ + mustAbsResourceInstanceAddr(`module.for_each["a"].test.count2[0]`), + mustAbsResourceInstanceAddr(`module.for_each["a"].test.count2[1]`), + mustAbsResourceInstanceAddr(`module.for_each["b"].test.count2[0]`), + mustAbsResourceInstanceAddr(`module.for_each["b"].test.count2[1]`), + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("module for_each resource count2", func(t *testing.T) { + got := ex.ExpandResource( + count2ResourceAddr.Absolute(mustModuleInstanceAddr(`module.for_each["a"]`)), + ) + want := []addrs.AbsResourceInstance{ + mustAbsResourceInstanceAddr(`module.for_each["a"].test.count2[0]`), + mustAbsResourceInstanceAddr(`module.for_each["a"].test.count2[1]`), + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + + t.Run(`module.for_each["b"] repetitiondata`, func(t *testing.T) { + got := ex.GetModuleInstanceRepetitionData( + mustModuleInstanceAddr(`module.for_each["b"]`), + ) + want := RepetitionData{ + EachKey: cty.StringVal("b"), + EachValue: cty.NumberIntVal(2), + } + if diff := cmp.Diff(want, got, cmp.Comparer(valueEquals)); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run(`module.count2[0].module.count2[1] repetitiondata`, func(t *testing.T) { + got := ex.GetModuleInstanceRepetitionData( + mustModuleInstanceAddr(`module.count2[0].module.count2[1]`), + ) + want := RepetitionData{ + CountIndex: cty.NumberIntVal(1), + } + if diff := cmp.Diff(want, got, cmp.Comparer(valueEquals)); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run(`module.for_each["a"] repetitiondata`, func(t *testing.T) { + got := ex.GetModuleInstanceRepetitionData( + mustModuleInstanceAddr(`module.for_each["a"]`), + ) + want := RepetitionData{ + EachKey: cty.StringVal("a"), + EachValue: cty.NumberIntVal(1), + } + if diff := cmp.Diff(want, got, cmp.Comparer(valueEquals)); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + + t.Run(`test.for_each["a"] repetitiondata`, func(t *testing.T) { + got := ex.GetResourceInstanceRepetitionData( + mustAbsResourceInstanceAddr(`test.for_each["a"]`), + ) + want := RepetitionData{ + EachKey: cty.StringVal("a"), + EachValue: cty.NumberIntVal(1), + } + if diff := cmp.Diff(want, got, cmp.Comparer(valueEquals)); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run(`module.for_each["a"].test.single repetitiondata`, func(t *testing.T) { + got := ex.GetResourceInstanceRepetitionData( + mustAbsResourceInstanceAddr(`module.for_each["a"].test.single`), + ) + want := RepetitionData{} + if diff := cmp.Diff(want, got, cmp.Comparer(valueEquals)); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run(`module.for_each["a"].test.count2[1] repetitiondata`, func(t *testing.T) { + got := ex.GetResourceInstanceRepetitionData( + mustAbsResourceInstanceAddr(`module.for_each["a"].test.count2[1]`), + ) + want := RepetitionData{ + CountIndex: cty.NumberIntVal(1), + } + if diff := cmp.Diff(want, got, cmp.Comparer(valueEquals)); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) +} + +func mustAbsResourceInstanceAddr(str string) addrs.AbsResourceInstance { + addr, diags := addrs.ParseAbsResourceInstanceStr(str) + if diags.HasErrors() { + panic(fmt.Sprintf("invalid absolute resource instance address: %s", diags.Err())) + } + return addr +} + +func mustModuleAddr(str string) addrs.Module { + if len(str) == 0 { + return addrs.RootModule + } + // We don't have a real parser for these because they don't appear in the + // language anywhere, but this interpretation mimics the format we + // produce from the String method on addrs.Module. + parts := strings.Split(str, ".") + return addrs.Module(parts) +} + +func mustModuleInstanceAddr(str string) addrs.ModuleInstance { + if len(str) == 0 { + return addrs.RootModuleInstance + } + addr, diags := addrs.ParseModuleInstanceStr(str) + if diags.HasErrors() { + panic(fmt.Sprintf("invalid module instance address: %s", diags.Err())) + } + return addr +} + +func valueEquals(a, b cty.Value) bool { + if a == cty.NilVal || b == cty.NilVal { + return a == b + } + return a.RawEquals(b) +} diff --git a/pkg/instances/expansion_mode.go b/pkg/instances/expansion_mode.go new file mode 100644 index 00000000000..98d98e4cd07 --- /dev/null +++ b/pkg/instances/expansion_mode.go @@ -0,0 +1,90 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package instances + +import ( + "fmt" + "sort" + + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" +) + +// expansion is an internal interface used to represent the different +// ways expansion can operate depending on how repetition is configured for +// an object. +type expansion interface { + instanceKeys() []addrs.InstanceKey + repetitionData(addrs.InstanceKey) RepetitionData +} + +// expansionSingle is the expansion corresponding to no repetition arguments +// at all, producing a single object with no key. +// +// expansionSingleVal is the only valid value of this type. +type expansionSingle uintptr + +var singleKeys = []addrs.InstanceKey{addrs.NoKey} +var expansionSingleVal expansionSingle + +func (e expansionSingle) instanceKeys() []addrs.InstanceKey { + return singleKeys +} + +func (e expansionSingle) repetitionData(key addrs.InstanceKey) RepetitionData { + if key != addrs.NoKey { + panic("cannot use instance key with non-repeating object") + } + return RepetitionData{} +} + +// expansionCount is the expansion corresponding to the "count" argument. +type expansionCount int + +func (e expansionCount) instanceKeys() []addrs.InstanceKey { + ret := make([]addrs.InstanceKey, int(e)) + for i := range ret { + ret[i] = addrs.IntKey(i) + } + return ret +} + +func (e expansionCount) repetitionData(key addrs.InstanceKey) RepetitionData { + i := int(key.(addrs.IntKey)) + if i < 0 || i >= int(e) { + panic(fmt.Sprintf("instance key %d out of range for count %d", i, e)) + } + return RepetitionData{ + CountIndex: cty.NumberIntVal(int64(i)), + } +} + +// expansionForEach is the expansion corresponding to the "for_each" argument. +type expansionForEach map[string]cty.Value + +func (e expansionForEach) instanceKeys() []addrs.InstanceKey { + ret := make([]addrs.InstanceKey, 0, len(e)) + for k := range e { + ret = append(ret, addrs.StringKey(k)) + } + sort.Slice(ret, func(i, j int) bool { + return ret[i].(addrs.StringKey) < ret[j].(addrs.StringKey) + }) + return ret +} + +func (e expansionForEach) repetitionData(key addrs.InstanceKey) RepetitionData { + k := string(key.(addrs.StringKey)) + v, ok := e[k] + if !ok { + panic(fmt.Sprintf("instance key %q does not match any instance", k)) + } + return RepetitionData{ + EachKey: cty.StringVal(k), + EachValue: v, + } +} diff --git a/pkg/instances/instance_key_data.go b/pkg/instances/instance_key_data.go new file mode 100644 index 00000000000..1fa89120370 --- /dev/null +++ b/pkg/instances/instance_key_data.go @@ -0,0 +1,33 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package instances + +import ( + "github.com/zclconf/go-cty/cty" +) + +// RepetitionData represents the values available to identify individual +// repetitions of a particular object. +// +// This corresponds to the each.key, each.value, and count.index symbols in +// the configuration language. +type RepetitionData struct { + // CountIndex is the value for count.index, or cty.NilVal if evaluating + // in a context where the "count" argument is not active. + // + // For correct operation, this should always be of type cty.Number if not + // nil. + CountIndex cty.Value + + // EachKey and EachValue are the values for each.key and each.value + // respectively, or cty.NilVal if evaluating in a context where the + // "for_each" argument is not active. These must either both be set + // or neither set. + // + // For correct operation, EachKey must always be either of type cty.String + // or cty.Number if not nil. + EachKey, EachValue cty.Value +} diff --git a/pkg/instances/set.go b/pkg/instances/set.go new file mode 100644 index 00000000000..4fa84471a8b --- /dev/null +++ b/pkg/instances/set.go @@ -0,0 +1,56 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package instances + +import ( + "github.com/kubegems/opentofu/pkg/addrs" +) + +// Set is a set of instances, intended mainly for the return value of +// Expander.AllInstances, where it therefore represents all of the module +// and resource instances known to the expander. +type Set struct { + // Set currently really just wraps Expander with a reduced API that + // only supports lookups, to make it clear that a holder of a Set should + // not be modifying the expander any further. + exp *Expander +} + +// HasModuleInstance returns true if and only if the set contains the module +// instance with the given address. +func (s Set) HasModuleInstance(want addrs.ModuleInstance) bool { + return s.exp.knowsModuleInstance(want) +} + +// HasModuleCall returns true if and only if the set contains the module +// call with the given address, even if that module call has no instances. +func (s Set) HasModuleCall(want addrs.AbsModuleCall) bool { + return s.exp.knowsModuleCall(want) +} + +// HasResourceInstance returns true if and only if the set contains the resource +// instance with the given address. +// TODO: +func (s Set) HasResourceInstance(want addrs.AbsResourceInstance) bool { + return s.exp.knowsResourceInstance(want) +} + +// HasResource returns true if and only if the set contains the resource with +// the given address, even if that resource has no instances. +// TODO: +func (s Set) HasResource(want addrs.AbsResource) bool { + return s.exp.knowsResource(want) +} + +// InstancesForModule returns all of the module instances that correspond with +// the given static module path. +// +// If there are multiple module calls in the path that have repetition enabled +// then the result is the full expansion of all combinations of all of their +// declared instance keys. +func (s Set) InstancesForModule(modAddr addrs.Module) []addrs.ModuleInstance { + return s.exp.expandModule(modAddr, true) +} diff --git a/pkg/instances/set_test.go b/pkg/instances/set_test.go new file mode 100644 index 00000000000..e8dd7e47f6b --- /dev/null +++ b/pkg/instances/set_test.go @@ -0,0 +1,216 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package instances + +import ( + "testing" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/zclconf/go-cty/cty" +) + +func TestSet(t *testing.T) { + exp := NewExpander() + + // The following constructs the following imaginary module/resource tree: + // - root module + // - test_thing.single: no repetition + // - test_thing.count: count = 1 + // - test_thing.for_each: for_each = { c = "C" } + // - module.single: no repetition + // - test_thing.single: no repetition + // - module.nested_single: no repetition + // - module.zero_count: count = 0 + // - module.count: count = 2 + // - module.nested_for_each: [0] for_each = {}, [1] for_each = { e = "E" } + // - module.for_each: for_each = { a = "A", b = "B" } + // - test_thing.count: ["a"] count = 0, ["b"] count = 1 + exp.SetModuleSingle(addrs.RootModuleInstance, addrs.ModuleCall{Name: "single"}) + exp.SetModuleCount(addrs.RootModuleInstance, addrs.ModuleCall{Name: "count"}, 2) + exp.SetModuleForEach(addrs.RootModuleInstance, addrs.ModuleCall{Name: "for_each"}, map[string]cty.Value{ + "a": cty.StringVal("A"), + "b": cty.StringVal("B"), + }) + exp.SetModuleSingle(addrs.RootModuleInstance.Child("single", addrs.NoKey), addrs.ModuleCall{Name: "nested_single"}) + exp.SetModuleForEach(addrs.RootModuleInstance.Child("count", addrs.IntKey(0)), addrs.ModuleCall{Name: "nested_for_each"}, nil) + exp.SetModuleForEach(addrs.RootModuleInstance.Child("count", addrs.IntKey(1)), addrs.ModuleCall{Name: "nested_for_each"}, map[string]cty.Value{ + "e": cty.StringVal("E"), + }) + exp.SetModuleCount( + addrs.RootModuleInstance.Child("single", addrs.NoKey).Child("nested_single", addrs.NoKey), + addrs.ModuleCall{Name: "zero_count"}, + 0, + ) + + rAddr := func(name string) addrs.Resource { + return addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: name, + } + } + exp.SetResourceSingle(addrs.RootModuleInstance, rAddr("single")) + exp.SetResourceCount(addrs.RootModuleInstance, rAddr("count"), 1) + exp.SetResourceForEach(addrs.RootModuleInstance, rAddr("for_each"), map[string]cty.Value{ + "c": cty.StringVal("C"), + }) + exp.SetResourceSingle(addrs.RootModuleInstance.Child("single", addrs.NoKey), rAddr("single")) + exp.SetResourceCount(addrs.RootModuleInstance.Child("for_each", addrs.StringKey("a")), rAddr("count"), 0) + exp.SetResourceCount(addrs.RootModuleInstance.Child("for_each", addrs.StringKey("b")), rAddr("count"), 1) + + set := exp.AllInstances() + + // HasModuleInstance tests + if input := addrs.RootModuleInstance; !set.HasModuleInstance(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.Child("single", addrs.NoKey); !set.HasModuleInstance(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.Child("single", addrs.NoKey).Child("nested_single", addrs.NoKey); !set.HasModuleInstance(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.Child("count", addrs.IntKey(0)); !set.HasModuleInstance(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.Child("count", addrs.IntKey(1)); !set.HasModuleInstance(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.Child("count", addrs.IntKey(1)).Child("nested_for_each", addrs.StringKey("e")); !set.HasModuleInstance(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.Child("for_each", addrs.StringKey("a")); !set.HasModuleInstance(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.Child("for_each", addrs.StringKey("b")); !set.HasModuleInstance(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.Child("single", addrs.IntKey(0)); set.HasModuleInstance(input) { + t.Errorf("unexpected %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.Child("single", addrs.StringKey("a")); set.HasModuleInstance(input) { + t.Errorf("unexpected %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.Child("single", addrs.NoKey).Child("nonexist", addrs.NoKey); set.HasModuleInstance(input) { + t.Errorf("unexpected %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.Child("count", addrs.NoKey); set.HasModuleInstance(input) { + t.Errorf("unexpected %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.Child("count", addrs.IntKey(2)); set.HasModuleInstance(input) { + t.Errorf("unexpected %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.Child("count", addrs.StringKey("a")); set.HasModuleInstance(input) { + t.Errorf("unexpected %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.Child("count", addrs.IntKey(0)).Child("nested_for_each", addrs.StringKey("e")); set.HasModuleInstance(input) { + t.Errorf("unexpected %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.Child("single", addrs.NoKey).Child("nested_single", addrs.NoKey).Child("zero_count", addrs.NoKey); set.HasModuleInstance(input) { + t.Errorf("unexpected %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.Child("single", addrs.NoKey).Child("nested_single", addrs.NoKey).Child("zero_count", addrs.IntKey(0)); set.HasModuleInstance(input) { + t.Errorf("unexpected %T %s", input, input.String()) + } + + // HasModuleCall tests + if input := addrs.RootModuleInstance.ChildCall("single"); !set.HasModuleCall(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.Child("single", addrs.NoKey).ChildCall("nested_single"); !set.HasModuleCall(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.ChildCall("count"); !set.HasModuleCall(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.Child("count", addrs.IntKey(0)).ChildCall("nested_for_each"); !set.HasModuleCall(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.Child("count", addrs.IntKey(1)).ChildCall("nested_for_each"); !set.HasModuleCall(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.ChildCall("for_each"); !set.HasModuleCall(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.Child("single", addrs.NoKey).Child("nested_single", addrs.NoKey).ChildCall("zero_count"); !set.HasModuleCall(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.ChildCall("nonexist"); set.HasModuleCall(input) { + t.Errorf("unexpected %T %s", input, input.String()) + } + if input := addrs.RootModuleInstance.Child("single", addrs.NoKey).ChildCall("nonexist"); set.HasModuleCall(input) { + t.Errorf("unexpected %T %s", input, input.String()) + } + + // HasResourceInstance tests + if input := rAddr("single").Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance); !set.HasResourceInstance(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := rAddr("count").Instance(addrs.IntKey(0)).Absolute(addrs.RootModuleInstance); !set.HasResourceInstance(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := rAddr("for_each").Instance(addrs.StringKey("c")).Absolute(addrs.RootModuleInstance); !set.HasResourceInstance(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := rAddr("single").Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance.Child("single", addrs.NoKey)); !set.HasResourceInstance(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := rAddr("count").Instance(addrs.IntKey(0)).Absolute(addrs.RootModuleInstance.Child("for_each", addrs.StringKey("b"))); !set.HasResourceInstance(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := rAddr("single").Instance(addrs.IntKey(0)).Absolute(addrs.RootModuleInstance); set.HasResourceInstance(input) { + t.Errorf("unexpected %T %s", input, input.String()) + } + if input := rAddr("single").Instance(addrs.StringKey("")).Absolute(addrs.RootModuleInstance); set.HasResourceInstance(input) { + t.Errorf("unexpected %T %s", input, input.String()) + } + if input := rAddr("count").Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance); set.HasResourceInstance(input) { + t.Errorf("unexpected %T %s", input, input.String()) + } + if input := rAddr("count").Instance(addrs.StringKey("")).Absolute(addrs.RootModuleInstance); set.HasResourceInstance(input) { + t.Errorf("unexpected %T %s", input, input.String()) + } + if input := rAddr("count").Instance(addrs.IntKey(1)).Absolute(addrs.RootModuleInstance); set.HasResourceInstance(input) { + t.Errorf("unexpected %T %s", input, input.String()) + } + if input := rAddr("single").Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance.Child("single", addrs.IntKey(0))); set.HasResourceInstance(input) { + t.Errorf("unexpected %T %s", input, input.String()) + } + if input := rAddr("count").Instance(addrs.IntKey(0)).Absolute(addrs.RootModuleInstance.Child("for_each", addrs.StringKey("a"))); set.HasResourceInstance(input) { + t.Errorf("unexpected %T %s", input, input.String()) + } + + // HasResource tests + if input := rAddr("single").Absolute(addrs.RootModuleInstance); !set.HasResource(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := rAddr("count").Absolute(addrs.RootModuleInstance); !set.HasResource(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := rAddr("for_each").Absolute(addrs.RootModuleInstance); !set.HasResource(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := rAddr("single").Absolute(addrs.RootModuleInstance.Child("single", addrs.NoKey)); !set.HasResource(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := rAddr("count").Absolute(addrs.RootModuleInstance.Child("for_each", addrs.StringKey("a"))); !set.HasResource(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := rAddr("count").Absolute(addrs.RootModuleInstance.Child("for_each", addrs.StringKey("b"))); !set.HasResource(input) { + t.Errorf("missing %T %s", input, input.String()) + } + if input := rAddr("nonexist").Absolute(addrs.RootModuleInstance); set.HasResource(input) { + t.Errorf("unexpected %T %s", input, input.String()) + } + if input := rAddr("count").Absolute(addrs.RootModuleInstance.Child("for_each", addrs.StringKey("nonexist"))); set.HasResource(input) { + t.Errorf("unexpected %T %s", input, input.String()) + } + + // ensure we can lookup non-existent addrs in a set without panic + if set.InstancesForModule(addrs.RootModule.Child("missing")) != nil { + t.Error("unexpected instances from missing module") + } +} diff --git a/pkg/ipaddr/LICENSE b/pkg/ipaddr/LICENSE new file mode 100644 index 00000000000..74487567632 --- /dev/null +++ b/pkg/ipaddr/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pkg/ipaddr/PATENTS b/pkg/ipaddr/PATENTS new file mode 100644 index 00000000000..733099041f8 --- /dev/null +++ b/pkg/ipaddr/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/pkg/ipaddr/README.md b/pkg/ipaddr/README.md new file mode 100644 index 00000000000..9699ba83245 --- /dev/null +++ b/pkg/ipaddr/README.md @@ -0,0 +1,33 @@ +# Forked IP address parsing functions + +This directory contains a subset of code from the Go project's `net` package +as of Go 1.16, used under the Go project license which we've included here +in [`LICENSE`](LICENSE) and [`PATENTS`](PATENTS), which are also copied from +the Go project. + +OpenTofu has its own fork of these functions because Go 1.17 included a +breaking change to reject IPv4 address octets written with leading zeros. + +The Go project rationale for that change was that Go historically interpreted +leading-zero octets inconsistently with many other implementations, trimming +off the zeros and still treating the rest as decimal rather than treating the +octet as octal. + +The Go team made the reasonable observation that having a function that +interprets a non-normalized form in a manner inconsistent with other +implementations may cause naive validation or policy checks to produce +incorrect results, and thus it's a potential security concern. For more +information, see [Go issue #30999](https://golang.org/issue/30999). + +After careful consideration, it was concluded that OpenTofu's +use of these functions as part of the implementation of the `cidrhost`, +`cidrsubnet`, `cidrsubnets`, and `cidrnetmask` functions has a more limited +impact than the general availability of these functions in the Go standard +library, and so we can't justify a similar exception to our compatibility +promises as the Go team made to their Go 1.0 compatibility promises. + +If you're considering using this package for new functionality _other than_ the +built-in functions mentioned above, please do so only if consistency with the +behavior of those functions is important. Otherwise, new features are not +burdened by the same compatibility constraints and so should typically prefer +to use the stricter interpretation of the upstream parsing functions. diff --git a/pkg/ipaddr/doc.go b/pkg/ipaddr/doc.go new file mode 100644 index 00000000000..7104c185d00 --- /dev/null +++ b/pkg/ipaddr/doc.go @@ -0,0 +1,11 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package ipaddr is a fork of a subset of the Go standard "net" package which +// retains parsing behaviors from Go 1.16 or earlier. +// +// Don't use this for any new code without careful consideration. See the +// README.md in the package directory for more information. +package ipaddr diff --git a/pkg/ipaddr/ip.go b/pkg/ipaddr/ip.go new file mode 100644 index 00000000000..304584e7455 --- /dev/null +++ b/pkg/ipaddr/ip.go @@ -0,0 +1,226 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// IP address manipulations +// +// IPv4 addresses are 4 bytes; IPv6 addresses are 16 bytes. +// An IPv4 address can be converted to an IPv6 address by +// adding a canonical prefix (10 zeros, 2 0xFFs). +// This library accepts either size of byte slice but always +// returns 16-byte addresses. + +package ipaddr + +import ( + stdnet "net" +) + +// +// Lean on the standard net lib as much as possible. +// + +type IP = stdnet.IP +type IPNet = stdnet.IPNet +type ParseError = stdnet.ParseError + +const IPv4len = stdnet.IPv4len +const IPv6len = stdnet.IPv6len + +var CIDRMask = stdnet.CIDRMask +var IPv4 = stdnet.IPv4 + +// Parse IPv4 address (d.d.d.d). +func parseIPv4(s string) IP { + var p [IPv4len]byte + for i := 0; i < IPv4len; i++ { + if len(s) == 0 { + // Missing octets. + return nil + } + if i > 0 { + if s[0] != '.' { + return nil + } + s = s[1:] + } + n, c, ok := dtoi(s) + if !ok || n > 0xFF { + return nil + } + // + // NOTE: This correct check was added for go-1.17, but is a + // backwards-incompatible change for OpenTofu users, who might have + // already written modules with leading zeroes. + // + //if c > 1 && s[0] == '0' { + // // Reject non-zero components with leading zeroes. + // return nil + //} + s = s[c:] + p[i] = byte(n) + } + if len(s) != 0 { + return nil + } + return IPv4(p[0], p[1], p[2], p[3]) +} + +// parseIPv6 parses s as a literal IPv6 address described in RFC 4291 +// and RFC 5952. +func parseIPv6(s string) (ip IP) { + ip = make(IP, IPv6len) + ellipsis := -1 // position of ellipsis in ip + + // Might have leading ellipsis + if len(s) >= 2 && s[0] == ':' && s[1] == ':' { + ellipsis = 0 + s = s[2:] + // Might be only ellipsis + if len(s) == 0 { + return ip + } + } + + // Loop, parsing hex numbers followed by colon. + i := 0 + for i < IPv6len { + // Hex number. + n, c, ok := xtoi(s) + if !ok || n > 0xFFFF { + return nil + } + + // If followed by dot, might be in trailing IPv4. + if c < len(s) && s[c] == '.' { + if ellipsis < 0 && i != IPv6len-IPv4len { + // Not the right place. + return nil + } + if i+IPv4len > IPv6len { + // Not enough room. + return nil + } + ip4 := parseIPv4(s) + if ip4 == nil { + return nil + } + ip[i] = ip4[12] + ip[i+1] = ip4[13] + ip[i+2] = ip4[14] + ip[i+3] = ip4[15] + s = "" + i += IPv4len + break + } + + // Save this 16-bit chunk. + ip[i] = byte(n >> 8) + ip[i+1] = byte(n) + i += 2 + + // Stop at end of string. + s = s[c:] + if len(s) == 0 { + break + } + + // Otherwise must be followed by colon and more. + if s[0] != ':' || len(s) == 1 { + return nil + } + s = s[1:] + + // Look for ellipsis. + if s[0] == ':' { + if ellipsis >= 0 { // already have one + return nil + } + ellipsis = i + s = s[1:] + if len(s) == 0 { // can be at end + break + } + } + } + + // Must have used entire string. + if len(s) != 0 { + return nil + } + + // If didn't parse enough, expand ellipsis. + if i < IPv6len { + if ellipsis < 0 { + return nil + } + n := IPv6len - i + for j := i - 1; j >= ellipsis; j-- { + ip[j+n] = ip[j] + } + for j := ellipsis + n - 1; j >= ellipsis; j-- { + ip[j] = 0 + } + } else if ellipsis >= 0 { + // Ellipsis must represent at least one 0 group. + return nil + } + return ip +} + +// ParseIP parses s as an IP address, returning the result. +// The string s can be in IPv4 dotted decimal ("192.0.2.1"), IPv6 +// ("2001:db8::68"), or IPv4-mapped IPv6 ("::ffff:192.0.2.1") form. +// If s is not a valid textual representation of an IP address, +// ParseIP returns nil. +func ParseIP(s string) IP { + for i := 0; i < len(s); i++ { + switch s[i] { + case '.': + return parseIPv4(s) + case ':': + return parseIPv6(s) + } + } + return nil +} + +// ParseCIDR parses s as a CIDR notation IP address and prefix length, +// like "192.0.2.0/24" or "2001:db8::/32", as defined in +// RFC 4632 and RFC 4291. +// +// It returns the IP address and the network implied by the IP and +// prefix length. +// For example, ParseCIDR("192.0.2.1/24") returns the IP address +// 192.0.2.1 and the network 192.0.2.0/24. +func ParseCIDR(s string) (IP, *IPNet, error) { + i := indexByteString(s, '/') + if i < 0 { + return nil, nil, &ParseError{Type: "CIDR address", Text: s} + } + addr, mask := s[:i], s[i+1:] + iplen := IPv4len + ip := parseIPv4(addr) + if ip == nil { + iplen = IPv6len + ip = parseIPv6(addr) + } + n, i, ok := dtoi(mask) + if ip == nil || !ok || i != len(mask) || n < 0 || n > 8*iplen { + return nil, nil, &ParseError{Type: "CIDR address", Text: s} + } + m := CIDRMask(n, 8*iplen) + return ip, &IPNet{IP: ip.Mask(m), Mask: m}, nil +} + +// This is copied from go/src/internal/bytealg, which includes versions +// optimized for various platforms. Those optimizations are elided here so we +// don't have to maintain them. +func indexByteString(s string, c byte) int { + for i := 0; i < len(s); i++ { + if s[i] == c { + return i + } + } + return -1 +} diff --git a/pkg/ipaddr/ip_test.go b/pkg/ipaddr/ip_test.go new file mode 100644 index 00000000000..9565b7a0a74 --- /dev/null +++ b/pkg/ipaddr/ip_test.go @@ -0,0 +1,124 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipaddr + +import ( + stdnet "net" + "reflect" + "testing" +) + +// Lean on the standard net lib as much as possible. +type IPMask = stdnet.IPMask + +var IPv4Mask = stdnet.IPv4Mask + +var parseIPTests = []struct { + in string + out IP +}{ + {"127.0.1.2", IPv4(127, 0, 1, 2)}, + {"127.0.0.1", IPv4(127, 0, 0, 1)}, + {"127.001.002.003", IPv4(127, 1, 2, 3)}, + {"127.007.008.009", IPv4(127, 7, 8, 9)}, + {"127.010.020.030", IPv4(127, 10, 20, 30)}, + {"::ffff:127.1.2.3", IPv4(127, 1, 2, 3)}, + {"::ffff:127.001.002.003", IPv4(127, 1, 2, 3)}, + {"::ffff:127.007.008.009", IPv4(127, 7, 8, 9)}, + {"::ffff:127.010.020.030", IPv4(127, 10, 20, 30)}, + {"::ffff:7f01:0203", IPv4(127, 1, 2, 3)}, + {"0:0:0:0:0000:ffff:127.1.2.3", IPv4(127, 1, 2, 3)}, + {"0:0:0:0:000000:ffff:127.1.2.3", IPv4(127, 1, 2, 3)}, + {"0:0:0:0::ffff:127.1.2.3", IPv4(127, 1, 2, 3)}, + + {"2001:4860:0:2001::68", IP{0x20, 0x01, 0x48, 0x60, 0, 0, 0x20, 0x01, 0, 0, 0, 0, 0, 0, 0x00, 0x68}}, + {"2001:4860:0000:2001:0000:0000:0000:0068", IP{0x20, 0x01, 0x48, 0x60, 0, 0, 0x20, 0x01, 0, 0, 0, 0, 0, 0, 0x00, 0x68}}, + + {"-0.0.0.0", nil}, + {"0.-1.0.0", nil}, + {"0.0.-2.0", nil}, + {"0.0.0.-3", nil}, + {"127.0.0.256", nil}, + {"abc", nil}, + {"123:", nil}, + {"fe80::1%lo0", nil}, + {"fe80::1%911", nil}, + {"", nil}, + {"a1:a2:a3:a4::b1:b2:b3:b4", nil}, // Issue 6628 + // + // NOTE: These correct failures were added for go-1.17, but are a + // backwards-incompatible change for OpenTofu users, who might have + // already written modules using leading zeroes. + // + //{"127.001.002.003", nil}, + //{"::ffff:127.001.002.003", nil}, + //{"123.000.000.000", nil}, + //{"1.2..4", nil}, + //{"0123.0.0.1", nil}, +} + +func TestParseIP(t *testing.T) { + for _, tt := range parseIPTests { + if out := ParseIP(tt.in); !reflect.DeepEqual(out, tt.out) { + t.Errorf("ParseIP(%q) = %v, want %v", tt.in, out, tt.out) + } + } +} + +var parseCIDRTests = []struct { + in string + ip IP + net *IPNet + err error +}{ + {"135.104.0.0/32", IPv4(135, 104, 0, 0), &IPNet{IP: IPv4(135, 104, 0, 0), Mask: IPv4Mask(255, 255, 255, 255)}, nil}, + {"0.0.0.0/24", IPv4(0, 0, 0, 0), &IPNet{IP: IPv4(0, 0, 0, 0), Mask: IPv4Mask(255, 255, 255, 0)}, nil}, + {"135.104.0.0/24", IPv4(135, 104, 0, 0), &IPNet{IP: IPv4(135, 104, 0, 0), Mask: IPv4Mask(255, 255, 255, 0)}, nil}, + {"135.104.0.1/32", IPv4(135, 104, 0, 1), &IPNet{IP: IPv4(135, 104, 0, 1), Mask: IPv4Mask(255, 255, 255, 255)}, nil}, + {"135.104.0.1/24", IPv4(135, 104, 0, 1), &IPNet{IP: IPv4(135, 104, 0, 0), Mask: IPv4Mask(255, 255, 255, 0)}, nil}, + {"127.000.000.001/32", IPv4(127, 0, 0, 1), &IPNet{IP: IPv4(127, 0, 0, 1), Mask: IPv4Mask(255, 255, 255, 255)}, nil}, + {"127.007.008.009/32", IPv4(127, 7, 8, 9), &IPNet{IP: IPv4(127, 7, 8, 9), Mask: IPv4Mask(255, 255, 255, 255)}, nil}, + {"127.010.020.030/32", IPv4(127, 10, 20, 30), &IPNet{IP: IPv4(127, 10, 20, 30), Mask: IPv4Mask(255, 255, 255, 255)}, nil}, + {"::1/128", ParseIP("::1"), &IPNet{IP: ParseIP("::1"), Mask: IPMask(ParseIP("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"))}, nil}, + {"abcd:2345::/127", ParseIP("abcd:2345::"), &IPNet{IP: ParseIP("abcd:2345::"), Mask: IPMask(ParseIP("ffff:ffff:ffff:ffff:ffff:ffff:ffff:fffe"))}, nil}, + {"abcd:2345::/65", ParseIP("abcd:2345::"), &IPNet{IP: ParseIP("abcd:2345::"), Mask: IPMask(ParseIP("ffff:ffff:ffff:ffff:8000::"))}, nil}, + {"abcd:2345::/64", ParseIP("abcd:2345::"), &IPNet{IP: ParseIP("abcd:2345::"), Mask: IPMask(ParseIP("ffff:ffff:ffff:ffff::"))}, nil}, + {"abcd:2345::/63", ParseIP("abcd:2345::"), &IPNet{IP: ParseIP("abcd:2345::"), Mask: IPMask(ParseIP("ffff:ffff:ffff:fffe::"))}, nil}, + {"abcd:2345::/33", ParseIP("abcd:2345::"), &IPNet{IP: ParseIP("abcd:2345::"), Mask: IPMask(ParseIP("ffff:ffff:8000::"))}, nil}, + {"abcd:2345::/32", ParseIP("abcd:2345::"), &IPNet{IP: ParseIP("abcd:2345::"), Mask: IPMask(ParseIP("ffff:ffff::"))}, nil}, + {"abcd:2344::/31", ParseIP("abcd:2344::"), &IPNet{IP: ParseIP("abcd:2344::"), Mask: IPMask(ParseIP("ffff:fffe::"))}, nil}, + {"abcd:2300::/24", ParseIP("abcd:2300::"), &IPNet{IP: ParseIP("abcd:2300::"), Mask: IPMask(ParseIP("ffff:ff00::"))}, nil}, + {"abcd:2345::/24", ParseIP("abcd:2345::"), &IPNet{IP: ParseIP("abcd:2300::"), Mask: IPMask(ParseIP("ffff:ff00::"))}, nil}, + {"2001:DB8::/48", ParseIP("2001:DB8::"), &IPNet{IP: ParseIP("2001:DB8::"), Mask: IPMask(ParseIP("ffff:ffff:ffff::"))}, nil}, + {"2001:DB8::1/48", ParseIP("2001:DB8::1"), &IPNet{IP: ParseIP("2001:DB8::"), Mask: IPMask(ParseIP("ffff:ffff:ffff::"))}, nil}, + {"192.168.1.1/255.255.255.0", nil, nil, &ParseError{Type: "CIDR address", Text: "192.168.1.1/255.255.255.0"}}, + {"192.168.1.1/35", nil, nil, &ParseError{Type: "CIDR address", Text: "192.168.1.1/35"}}, + {"2001:db8::1/-1", nil, nil, &ParseError{Type: "CIDR address", Text: "2001:db8::1/-1"}}, + {"2001:db8::1/-0", nil, nil, &ParseError{Type: "CIDR address", Text: "2001:db8::1/-0"}}, + {"-0.0.0.0/32", nil, nil, &ParseError{Type: "CIDR address", Text: "-0.0.0.0/32"}}, + {"0.-1.0.0/32", nil, nil, &ParseError{Type: "CIDR address", Text: "0.-1.0.0/32"}}, + {"0.0.-2.0/32", nil, nil, &ParseError{Type: "CIDR address", Text: "0.0.-2.0/32"}}, + {"0.0.0.-3/32", nil, nil, &ParseError{Type: "CIDR address", Text: "0.0.0.-3/32"}}, + {"0.0.0.0/-0", nil, nil, &ParseError{Type: "CIDR address", Text: "0.0.0.0/-0"}}, + // + // NOTE: Theis correct failure was added for go-1.17, but is a + // backwards-incompatible change for OpenTofu users, who might have + // already written modules using leading zeroes. + // + //{"127.000.000.001/32", nil, nil, &ParseError{Type: "CIDR address", Text: "127.000.000.001/32"}}, + {"", nil, nil, &ParseError{Type: "CIDR address", Text: ""}}, +} + +func TestParseCIDR(t *testing.T) { + for _, tt := range parseCIDRTests { + ip, net, err := ParseCIDR(tt.in) + if !reflect.DeepEqual(err, tt.err) { + t.Errorf("ParseCIDR(%q) = %v, %v; want %v, %v", tt.in, ip, net, tt.ip, tt.net) + } + if err == nil && (!tt.ip.Equal(ip) || !tt.net.IP.Equal(net.IP) || !reflect.DeepEqual(net.Mask, tt.net.Mask)) { + t.Errorf("ParseCIDR(%q) = %v, {%v, %v}; want %v, {%v, %v}", tt.in, ip, net.IP, net.Mask, tt.ip, tt.net.IP, tt.net.Mask) + } + } +} diff --git a/pkg/ipaddr/parse.go b/pkg/ipaddr/parse.go new file mode 100644 index 00000000000..07d6eece4a3 --- /dev/null +++ b/pkg/ipaddr/parse.go @@ -0,0 +1,54 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Simple file i/o and string manipulation, to avoid +// depending on strconv and bufio and strings. + +package ipaddr + +// Bigger than we need, not too big to worry about overflow +const big = 0xFFFFFF + +// Decimal to integer. +// Returns number, characters consumed, success. +func dtoi(s string) (n int, i int, ok bool) { + n = 0 + for i = 0; i < len(s) && '0' <= s[i] && s[i] <= '9'; i++ { + n = n*10 + int(s[i]-'0') + if n >= big { + return big, i, false + } + } + if i == 0 { + return 0, 0, false + } + return n, i, true +} + +// Hexadecimal to integer. +// Returns number, characters consumed, success. +func xtoi(s string) (n int, i int, ok bool) { + n = 0 + for i = 0; i < len(s); i++ { + if '0' <= s[i] && s[i] <= '9' { + n *= 16 + n += int(s[i] - '0') + } else if 'a' <= s[i] && s[i] <= 'f' { + n *= 16 + n += int(s[i]-'a') + 10 + } else if 'A' <= s[i] && s[i] <= 'F' { + n *= 16 + n += int(s[i]-'A') + 10 + } else { + break + } + if n >= big { + return 0, i, false + } + } + if i == 0 { + return 0, i, false + } + return n, i, true +} diff --git a/pkg/lang/blocktoattr/doc.go b/pkg/lang/blocktoattr/doc.go new file mode 100644 index 00000000000..2fdd933d374 --- /dev/null +++ b/pkg/lang/blocktoattr/doc.go @@ -0,0 +1,10 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package blocktoattr includes some helper functions that can perform +// preprocessing on a HCL body where a configschema.Block schema is available +// in order to allow list and set attributes defined in the schema to be +// optionally written by the user as block syntax. +package blocktoattr diff --git a/pkg/lang/blocktoattr/fixup.go b/pkg/lang/blocktoattr/fixup.go new file mode 100644 index 00000000000..afa2859cf42 --- /dev/null +++ b/pkg/lang/blocktoattr/fixup.go @@ -0,0 +1,270 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package blocktoattr + +import ( + "log" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hcldec" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +// FixUpBlockAttrs takes a raw HCL body and adds some additional normalization +// functionality to allow attributes that are specified as having list or set +// type in the schema to be written with HCL block syntax as multiple nested +// blocks with the attribute name as the block type. +// +// The fixup is only applied in the absence of structural attribute types. The +// presence of these types indicate the use of a provider which does not +// support mapping blocks to attributes. +// +// This partially restores some of the block/attribute confusion from HCL 1 +// so that existing patterns that depended on that confusion can continue to +// be used in the short term while we settle on a longer-term strategy. +// +// Most of the fixup work is actually done when the returned body is +// subsequently decoded, so while FixUpBlockAttrs always succeeds, the eventual +// decode of the body might not, if the content of the body is so ambiguous +// that there's no safe way to map it to the schema. +func FixUpBlockAttrs(body hcl.Body, schema *configschema.Block) hcl.Body { + // The schema should never be nil, but in practice it seems to be sometimes + // in the presence of poorly-configured test mocks, so we'll be robust + // by synthesizing an empty one. + if schema == nil { + schema = &configschema.Block{} + } + + if skipFixup(schema) { + // we don't have any context for the resource name or type, but + // hopefully this could help locate the evaluation in the logs if there + // were a problem + log.Println("[DEBUG] skipping FixUpBlockAttrs") + return body + } + + return &fixupBody{ + original: body, + schema: schema, + names: ambiguousNames(schema), + } +} + +// skipFixup detects any use of Attribute.NestedType, or Types which could not +// be generate by the legacy SDK when taking SchemaConfigModeAttr into account. +func skipFixup(schema *configschema.Block) bool { + for _, attr := range schema.Attributes { + if attr.NestedType != nil { + return true + } + ty := attr.Type + + // Lists and sets of objects could be generated by + // SchemaConfigModeAttr, but some other combinations can be ruled out. + + // Tuples and objects could not be generated at all. + if ty.IsTupleType() || ty.IsObjectType() { + return true + } + + // A map of objects was not possible. + if ty.IsMapType() && ty.ElementType().IsObjectType() { + return true + } + + // Nested collections were not really supported, but could be generated + // with string types (though we conservatively limit this to primitive types) + if ty.IsCollectionType() { + ety := ty.ElementType() + if ety.IsCollectionType() && !ety.ElementType().IsPrimitiveType() { + return true + } + } + } + + for _, block := range schema.BlockTypes { + if skipFixup(&block.Block) { + return true + } + } + + return false +} + +type fixupBody struct { + original hcl.Body + schema *configschema.Block + names map[string]struct{} +} + +type unknownBlock interface { + Unknown() bool +} + +func (b *fixupBody) Unknown() bool { + if u, ok := b.original.(unknownBlock); ok { + return u.Unknown() + } + return false +} + +// Content decodes content from the body. The given schema must be the lower-level +// representation of the same schema that was previously passed to FixUpBlockAttrs, +// or else the result is undefined. +func (b *fixupBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { + schema = b.effectiveSchema(schema) + content, diags := b.original.Content(schema) + return b.fixupContent(content), diags +} + +func (b *fixupBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { + schema = b.effectiveSchema(schema) + content, remain, diags := b.original.PartialContent(schema) + remain = &fixupBody{ + original: remain, + schema: b.schema, + names: b.names, + } + return b.fixupContent(content), remain, diags +} + +func (b *fixupBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { + // FixUpBlockAttrs is not intended to be used in situations where we'd use + // JustAttributes, so we just pass this through verbatim to complete our + // implementation of hcl.Body. + return b.original.JustAttributes() +} + +func (b *fixupBody) MissingItemRange() hcl.Range { + return b.original.MissingItemRange() +} + +// effectiveSchema produces a derived *hcl.BodySchema by sniffing the body's +// content to determine whether the author has used attribute or block syntax +// for each of the ambigious attributes where both are permitted. +// +// The resulting schema will always contain all of the same names that are +// in the given schema, but some attribute schemas may instead be replaced by +// block header schemas. +func (b *fixupBody) effectiveSchema(given *hcl.BodySchema) *hcl.BodySchema { + return effectiveSchema(given, b.original, b.names, true) +} + +func (b *fixupBody) fixupContent(content *hcl.BodyContent) *hcl.BodyContent { + var ret hcl.BodyContent + ret.Attributes = make(hcl.Attributes) + for name, attr := range content.Attributes { + ret.Attributes[name] = attr + } + blockAttrVals := make(map[string][]*hcl.Block) + for _, block := range content.Blocks { + if _, exists := b.names[block.Type]; exists { + // If we get here then we've found a block type whose instances need + // to be re-interpreted as a list-of-objects attribute. We'll gather + // those up and fix them up below. + blockAttrVals[block.Type] = append(blockAttrVals[block.Type], block) + continue + } + + // We need to now re-wrap our inner body so it will be subject to the + // same attribute-as-block fixup when recursively decoded. + retBlock := *block // shallow copy + if blockS, ok := b.schema.BlockTypes[block.Type]; ok { + // Would be weird if not ok, but we'll allow it for robustness; body just won't be fixed up, then + retBlock.Body = FixUpBlockAttrs(retBlock.Body, &blockS.Block) + } + + ret.Blocks = append(ret.Blocks, &retBlock) + } + // No we'll install synthetic attributes for each of our fixups. We can't + // do this exactly because HCL's information model expects an attribute + // to be a single decl but we have multiple separate blocks. We'll + // approximate things, then, by using only our first block for the source + // location information. (We are guaranteed at least one by the above logic.) + for name, blocks := range blockAttrVals { + ret.Attributes[name] = &hcl.Attribute{ + Name: name, + Expr: &fixupBlocksExpr{ + blocks: blocks, + ety: b.schema.Attributes[name].Type.ElementType(), + }, + + Range: blocks[0].DefRange, + NameRange: blocks[0].TypeRange, + } + } + + ret.MissingItemRange = b.MissingItemRange() + return &ret +} + +type fixupBlocksExpr struct { + blocks hcl.Blocks + ety cty.Type +} + +func (e *fixupBlocksExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + // In order to produce a suitable value for our expression we need to + // now decode the whole descendent block structure under each of our block + // bodies. + // + // That requires us to do something rather strange: we must construct a + // synthetic block type schema derived from the element type of the + // attribute, thus inverting our usual direction of lowering a schema + // into an implied type. Because a type is less detailed than a schema, + // the result is imprecise and in particular will just consider all + // the attributes to be optional and let the provider eventually decide + // whether to return errors if they turn out to be null when required. + schema := SchemaForCtyElementType(e.ety) // this schema's ImpliedType will match e.ety + spec := schema.DecoderSpec() + + vals := make([]cty.Value, len(e.blocks)) + var diags hcl.Diagnostics + for i, block := range e.blocks { + body := FixUpBlockAttrs(block.Body, schema) + val, blockDiags := hcldec.Decode(body, spec, ctx) + diags = append(diags, blockDiags...) + if val == cty.NilVal { + val = cty.UnknownVal(e.ety) + } + vals[i] = val + } + if len(vals) == 0 { + return cty.ListValEmpty(e.ety), diags + } + return cty.ListVal(vals), diags +} + +func (e *fixupBlocksExpr) Variables() []hcl.Traversal { + var ret []hcl.Traversal + schema := SchemaForCtyElementType(e.ety) + spec := schema.DecoderSpec() + for _, block := range e.blocks { + ret = append(ret, hcldec.Variables(block.Body, spec)...) + } + return ret +} + +func (e *fixupBlocksExpr) Functions() []hcl.Traversal { + var ret []hcl.Traversal + schema := SchemaForCtyElementType(e.ety) + spec := schema.DecoderSpec() + for _, block := range e.blocks { + ret = append(ret, hcldec.Functions(block.Body, spec)...) + } + return ret +} + +func (e *fixupBlocksExpr) Range() hcl.Range { + // This is not really an appropriate range for the expression but it's + // the best we can do from here. + return e.blocks[0].DefRange +} + +func (e *fixupBlocksExpr) StartRange() hcl.Range { + return e.blocks[0].DefRange +} diff --git a/pkg/lang/blocktoattr/fixup_bench_test.go b/pkg/lang/blocktoattr/fixup_bench_test.go new file mode 100644 index 00000000000..ec2eafffcdd --- /dev/null +++ b/pkg/lang/blocktoattr/fixup_bench_test.go @@ -0,0 +1,102 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package blocktoattr + +import ( + "testing" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hcldec" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +func ambiguousNestedBlock(nesting int) *configschema.NestedBlock { + ret := &configschema.NestedBlock{ + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "a": {Type: cty.String, Required: true}, + "b": {Type: cty.String, Optional: true}, + }, + }, + } + if nesting > 0 { + ret.BlockTypes = map[string]*configschema.NestedBlock{ + "nested0": ambiguousNestedBlock(nesting - 1), + "nested1": ambiguousNestedBlock(nesting - 1), + "nested2": ambiguousNestedBlock(nesting - 1), + "nested3": ambiguousNestedBlock(nesting - 1), + "nested4": ambiguousNestedBlock(nesting - 1), + "nested5": ambiguousNestedBlock(nesting - 1), + "nested6": ambiguousNestedBlock(nesting - 1), + "nested7": ambiguousNestedBlock(nesting - 1), + "nested8": ambiguousNestedBlock(nesting - 1), + "nested9": ambiguousNestedBlock(nesting - 1), + } + } + return ret +} + +func schemaWithAmbiguousNestedBlock(nesting int) *configschema.Block { + return &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "maybe_block": ambiguousNestedBlock(nesting), + }, + } +} + +const configForFixupBlockAttrsBenchmark = ` +maybe_block { + a = "hello" + b = "world" + nested0 { + a = "the" + nested1 { + a = "deeper" + nested2 { + a = "we" + nested3 { + a = "go" + b = "inside" + } + } + } + } +} +` + +func configBodyForFixupBlockAttrsBenchmark() hcl.Body { + f, diags := hclsyntax.ParseConfig([]byte(configForFixupBlockAttrsBenchmark), "", hcl.Pos{Line: 1, Column: 1}) + if diags.HasErrors() { + panic("test configuration is invalid") + } + return f.Body +} + +func BenchmarkFixUpBlockAttrs(b *testing.B) { + for i := 0; i < b.N; i++ { + b.StopTimer() + body := configBodyForFixupBlockAttrsBenchmark() + schema := schemaWithAmbiguousNestedBlock(5) + b.StartTimer() + + spec := schema.DecoderSpec() + fixedBody := FixUpBlockAttrs(body, schema) + val, diags := hcldec.Decode(fixedBody, spec, nil) + if diags.HasErrors() { + b.Fatal("diagnostics during decoding", diags) + } + if !val.Type().IsObjectType() { + b.Fatal("result is not an object") + } + blockVal := val.GetAttr("maybe_block") + if !blockVal.Type().IsListType() || blockVal.LengthInt() != 1 { + b.Fatal("result has wrong value for 'maybe_block'") + } + } +} diff --git a/pkg/lang/blocktoattr/fixup_test.go b/pkg/lang/blocktoattr/fixup_test.go new file mode 100644 index 00000000000..aadf9c7b877 --- /dev/null +++ b/pkg/lang/blocktoattr/fixup_test.go @@ -0,0 +1,526 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package blocktoattr + +import ( + "testing" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/ext/dynblock" + "github.com/hashicorp/hcl/v2/hcldec" + "github.com/hashicorp/hcl/v2/hclsyntax" + hcljson "github.com/hashicorp/hcl/v2/json" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +func TestFixUpBlockAttrs(t *testing.T) { + fooSchema := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.List(cty.Object(map[string]cty.Type{ + "bar": cty.String, + })), + Optional: true, + }, + }, + } + + tests := map[string]struct { + src string + json bool + schema *configschema.Block + want cty.Value + wantErrs bool + }{ + "empty": { + src: ``, + schema: &configschema.Block{}, + want: cty.EmptyObjectVal, + }, + "empty JSON": { + src: `{}`, + json: true, + schema: &configschema.Block{}, + want: cty.EmptyObjectVal, + }, + "unset": { + src: ``, + schema: fooSchema, + want: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.NullVal(fooSchema.Attributes["foo"].Type), + }), + }, + "unset JSON": { + src: `{}`, + json: true, + schema: fooSchema, + want: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.NullVal(fooSchema.Attributes["foo"].Type), + }), + }, + "no fixup required, with one value": { + src: ` +foo = [ + { + bar = "baz" + }, +] +`, + schema: fooSchema, + want: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("baz"), + }), + }), + }), + }, + "no fixup required, with two values": { + src: ` +foo = [ + { + bar = "baz" + }, + { + bar = "boop" + }, +] +`, + schema: fooSchema, + want: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("baz"), + }), + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("boop"), + }), + }), + }), + }, + "no fixup required, with values, JSON": { + src: `{"foo": [{"bar": "baz"}]}`, + json: true, + schema: fooSchema, + want: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("baz"), + }), + }), + }), + }, + "no fixup required, empty": { + src: ` +foo = [] +`, + schema: fooSchema, + want: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListValEmpty(fooSchema.Attributes["foo"].Type.ElementType()), + }), + }, + "no fixup required, empty, JSON": { + src: `{"foo":[]}`, + json: true, + schema: fooSchema, + want: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListValEmpty(fooSchema.Attributes["foo"].Type.ElementType()), + }), + }, + "fixup one block": { + src: ` +foo { + bar = "baz" +} +`, + schema: fooSchema, + want: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("baz"), + }), + }), + }), + }, + "fixup one block omitting attribute": { + src: ` +foo {} +`, + schema: fooSchema, + want: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.NullVal(cty.String), + }), + }), + }), + }, + "fixup two blocks": { + src: ` +foo { + bar = baz +} +foo { + bar = "boop" +} +`, + schema: fooSchema, + want: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("baz value"), + }), + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("boop"), + }), + }), + }), + }, + "interaction with dynamic block generation": { + src: ` +dynamic "foo" { + for_each = ["baz", beep] + content { + bar = foo.value + } +} +`, + schema: fooSchema, + want: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("baz"), + }), + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("beep value"), + }), + }), + }), + }, + "dynamic block with empty iterator": { + src: ` +dynamic "foo" { + for_each = [] + content { + bar = foo.value + } +} +`, + schema: fooSchema, + want: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.NullVal(fooSchema.Attributes["foo"].Type), + }), + }, + "both attribute and block syntax": { + src: ` +foo = [] +foo { + bar = "baz" +} +`, + schema: fooSchema, + wantErrs: true, // Unsupported block type (user must be consistent about whether they consider foo to be a block type or an attribute) + want: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("baz"), + }), + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("boop"), + }), + }), + }), + }, + "fixup inside block": { + src: ` +container { + foo { + bar = "baz" + } + foo { + bar = "boop" + } +} +container { + foo { + bar = beep + } +} +`, + schema: &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "container": { + Nesting: configschema.NestingList, + Block: *fooSchema, + }, + }, + }, + want: cty.ObjectVal(map[string]cty.Value{ + "container": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("baz"), + }), + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("boop"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("beep value"), + }), + }), + }), + }), + }), + }, + "fixup inside attribute-as-block": { + src: ` +container { + foo { + bar = "baz" + } + foo { + bar = "boop" + } +} +container { + foo { + bar = beep + } +} +`, + schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "container": { + Type: cty.List(cty.Object(map[string]cty.Type{ + "foo": cty.List(cty.Object(map[string]cty.Type{ + "bar": cty.String, + })), + })), + Optional: true, + }, + }, + }, + want: cty.ObjectVal(map[string]cty.Value{ + "container": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("baz"), + }), + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("boop"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("beep value"), + }), + }), + }), + }), + }), + }, + "nested fixup with dynamic block generation": { + src: ` +container { + dynamic "foo" { + for_each = ["baz", beep] + content { + bar = foo.value + } + } +} +`, + schema: &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "container": { + Nesting: configschema.NestingList, + Block: *fooSchema, + }, + }, + }, + want: cty.ObjectVal(map[string]cty.Value{ + "container": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("baz"), + }), + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("beep value"), + }), + }), + }), + }), + }), + }, + + "missing nested block items": { + src: ` +container { + foo { + bar = "one" + } +} +`, + schema: &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "container": { + Nesting: configschema.NestingList, + MinItems: 2, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.List(cty.Object(map[string]cty.Type{ + "bar": cty.String, + })), + Optional: true, + }, + }, + }, + }, + }, + }, + want: cty.ObjectVal(map[string]cty.Value{ + "container": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("baz"), + }), + }), + }), + }), + }), + wantErrs: true, + }, + "no fixup allowed with NestedType": { + src: ` + container { + foo = "one" + } + `, + schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "container": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingList, + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.String, + }, + }, + }, + }, + }, + }, + want: cty.ObjectVal(map[string]cty.Value{ + "container": cty.NullVal(cty.List( + cty.Object(map[string]cty.Type{ + "foo": cty.String, + }), + )), + }), + wantErrs: true, + }, + "no fixup allowed new types": { + src: ` + container { + foo = "one" + } + `, + schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + // This could be a ConfigModeAttr fixup + "container": { + Type: cty.List(cty.Object(map[string]cty.Type{ + "foo": cty.String, + })), + }, + // But the presence of this type means it must have been + // declared by a new SDK + "new_type": { + Type: cty.Object(map[string]cty.Type{ + "boo": cty.String, + }), + }, + }, + }, + want: cty.ObjectVal(map[string]cty.Value{ + "container": cty.NullVal(cty.List( + cty.Object(map[string]cty.Type{ + "foo": cty.String, + }), + )), + }), + wantErrs: true, + }, + } + + ctx := &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "bar": cty.StringVal("bar value"), + "baz": cty.StringVal("baz value"), + "beep": cty.StringVal("beep value"), + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + var f *hcl.File + var diags hcl.Diagnostics + if test.json { + f, diags = hcljson.Parse([]byte(test.src), "test.tf.json") + } else { + f, diags = hclsyntax.ParseConfig([]byte(test.src), "test.tf", hcl.Pos{Line: 1, Column: 1}) + } + if diags.HasErrors() { + for _, diag := range diags { + t.Errorf("unexpected diagnostic: %s", diag) + } + t.FailNow() + } + + // We'll expand dynamic blocks in the body first, to mimic how + // we process this fixup when using the main "lang" package API. + spec := test.schema.DecoderSpec() + body := dynblock.Expand(f.Body, ctx) + + body = FixUpBlockAttrs(body, test.schema) + got, diags := hcldec.Decode(body, spec, ctx) + + if test.wantErrs { + if !diags.HasErrors() { + t.Errorf("succeeded, but want error\ngot: %#v", got) + } + + // check that our wrapped body returns the correct context by + // verifying the Subject is valid. + for _, d := range diags { + if d.Subject.Filename == "" { + t.Errorf("empty diagnostic subject: %#v", d.Subject) + } + } + return + } + + if !test.want.RawEquals(got) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.want) + } + for _, diag := range diags { + t.Errorf("unexpected diagnostic: %s", diag) + } + }) + } +} diff --git a/pkg/lang/blocktoattr/functions.go b/pkg/lang/blocktoattr/functions.go new file mode 100644 index 00000000000..ac763aa5aae --- /dev/null +++ b/pkg/lang/blocktoattr/functions.go @@ -0,0 +1,50 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package blocktoattr + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/ext/dynblock" + "github.com/hashicorp/hcl/v2/hcldec" + "github.com/kubegems/opentofu/pkg/configs/configschema" +) + +// ExpandedFunctions finds all of the global functions referenced in the +// given body with the given schema while taking into account the possibilities +// both of "dynamic" blocks being expanded and the possibility of certain +// attributes being written instead as nested blocks as allowed by the +// FixUpBlockAttrs function. +// +// This function exists to allow functions to be analyzed prior to dynamic +// block expansion while also dealing with the fact that dynamic block expansion +// might in turn produce nested blocks that are subject to FixUpBlockAttrs. +// +// This is intended as a drop-in replacement for dynblock.FunctionsHCLDec, +// which is itself a drop-in replacement for hcldec.Functions. +func ExpandedFunctions(body hcl.Body, schema *configschema.Block) []hcl.Traversal { + rootNode := dynblock.WalkFunctions(body) + return walkFunctions(rootNode, body, schema) +} + +func walkFunctions(node dynblock.WalkFunctionsNode, body hcl.Body, schema *configschema.Block) []hcl.Traversal { + givenRawSchema := hcldec.ImpliedSchema(schema.DecoderSpec()) + ambiguousNames := ambiguousNames(schema) + effectiveRawSchema := effectiveSchema(givenRawSchema, body, ambiguousNames, false) + vars, children := node.Visit(effectiveRawSchema) + + for _, child := range children { + if blockS, exists := schema.BlockTypes[child.BlockTypeName]; exists { + vars = append(vars, walkFunctions(child.Node, child.Body(), &blockS.Block)...) + } else if attrS, exists := schema.Attributes[child.BlockTypeName]; exists && attrS.Type.IsCollectionType() && attrS.Type.ElementType().IsObjectType() { + // ☝️Check for collection type before element type, because if this is a mis-placed reference, + // a panic here will prevent other useful diags from being elevated to show the user what to fix + synthSchema := SchemaForCtyElementType(attrS.Type.ElementType()) + vars = append(vars, walkFunctions(child.Node, child.Body(), synthSchema)...) + } + } + + return vars +} diff --git a/pkg/lang/blocktoattr/schema.go b/pkg/lang/blocktoattr/schema.go new file mode 100644 index 00000000000..d173081ff83 --- /dev/null +++ b/pkg/lang/blocktoattr/schema.go @@ -0,0 +1,151 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package blocktoattr + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +func ambiguousNames(schema *configschema.Block) map[string]struct{} { + if schema == nil { + return nil + } + ambiguousNames := make(map[string]struct{}) + for name, attrS := range schema.Attributes { + aty := attrS.Type + if (aty.IsListType() || aty.IsSetType()) && aty.ElementType().IsObjectType() { + ambiguousNames[name] = struct{}{} + } + } + return ambiguousNames +} + +func effectiveSchema(given *hcl.BodySchema, body hcl.Body, ambiguousNames map[string]struct{}, dynamicExpanded bool) *hcl.BodySchema { + ret := &hcl.BodySchema{} + + appearsAsBlock := make(map[string]struct{}) + { + // We'll construct some throwaway schemas here just to probe for + // whether each of our ambiguous names seems to be being used as + // an attribute or a block. We need to check both because in JSON + // syntax we rely on the schema to decide between attribute or block + // interpretation and so JSON will always answer yes to both of + // these questions and we want to prefer the attribute interpretation + // in that case. + var probeSchema hcl.BodySchema + + for name := range ambiguousNames { + probeSchema = hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: name, + }, + }, + } + content, _, _ := body.PartialContent(&probeSchema) + if _, exists := content.Attributes[name]; exists { + // Can decode as an attribute, so we'll go with that. + continue + } + probeSchema = hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: name, + }, + }, + } + content, _, _ = body.PartialContent(&probeSchema) + if len(content.Blocks) > 0 || dynamicExpanded { + // A dynamic block with an empty iterator returns nothing. + // If there's no attribute and we have either a block or a + // dynamic expansion, we need to rewrite this one as a + // block for a successful result. + appearsAsBlock[name] = struct{}{} + } + } + if !dynamicExpanded { + // If we're deciding for a context where dynamic blocks haven't + // been expanded yet then we need to probe for those too. + probeSchema = hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "dynamic", + LabelNames: []string{"type"}, + }, + }, + } + content, _, _ := body.PartialContent(&probeSchema) + for _, block := range content.Blocks { + if _, exists := ambiguousNames[block.Labels[0]]; exists { + appearsAsBlock[block.Labels[0]] = struct{}{} + } + } + } + } + + for _, attrS := range given.Attributes { + if _, exists := appearsAsBlock[attrS.Name]; exists { + ret.Blocks = append(ret.Blocks, hcl.BlockHeaderSchema{ + Type: attrS.Name, + }) + } else { + ret.Attributes = append(ret.Attributes, attrS) + } + } + + // Anything that is specified as a block type in the input schema remains + // that way by just passing through verbatim. + ret.Blocks = append(ret.Blocks, given.Blocks...) + + return ret +} + +// SchemaForCtyElementType converts a cty object type into an +// approximately-equivalent configschema.Block representing the element of +// a list or set. If the given type is not an object type then this +// function will panic. +func SchemaForCtyElementType(ty cty.Type) *configschema.Block { + atys := ty.AttributeTypes() + ret := &configschema.Block{ + Attributes: make(map[string]*configschema.Attribute, len(atys)), + } + for name, aty := range atys { + ret.Attributes[name] = &configschema.Attribute{ + Type: aty, + Optional: true, + } + } + return ret +} + +// SchemaForCtyContainerType converts a cty list-of-object or set-of-object type +// into an approximately-equivalent configschema.NestedBlock. If the given type +// is not of the expected kind then this function will panic. +func SchemaForCtyContainerType(ty cty.Type) *configschema.NestedBlock { + var nesting configschema.NestingMode + switch { + case ty.IsListType(): + nesting = configschema.NestingList + case ty.IsSetType(): + nesting = configschema.NestingSet + default: + panic("unsuitable type") + } + nested := SchemaForCtyElementType(ty.ElementType()) + return &configschema.NestedBlock{ + Nesting: nesting, + Block: *nested, + } +} + +// TypeCanBeBlocks returns true if the given type is a list-of-object or +// set-of-object type, and would thus be subject to the blocktoattr fixup +// if used as an attribute type. +func TypeCanBeBlocks(ty cty.Type) bool { + return (ty.IsListType() || ty.IsSetType()) && ty.ElementType().IsObjectType() +} diff --git a/pkg/lang/blocktoattr/variables.go b/pkg/lang/blocktoattr/variables.go new file mode 100644 index 00000000000..e696d6d7eb1 --- /dev/null +++ b/pkg/lang/blocktoattr/variables.go @@ -0,0 +1,50 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package blocktoattr + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/ext/dynblock" + "github.com/hashicorp/hcl/v2/hcldec" + "github.com/kubegems/opentofu/pkg/configs/configschema" +) + +// ExpandedVariables finds all of the global variables referenced in the +// given body with the given schema while taking into account the possibilities +// both of "dynamic" blocks being expanded and the possibility of certain +// attributes being written instead as nested blocks as allowed by the +// FixUpBlockAttrs function. +// +// This function exists to allow variables to be analyzed prior to dynamic +// block expansion while also dealing with the fact that dynamic block expansion +// might in turn produce nested blocks that are subject to FixUpBlockAttrs. +// +// This is intended as a drop-in replacement for dynblock.VariablesHCLDec, +// which is itself a drop-in replacement for hcldec.Variables. +func ExpandedVariables(body hcl.Body, schema *configschema.Block) []hcl.Traversal { + rootNode := dynblock.WalkVariables(body) + return walkVariables(rootNode, body, schema) +} + +func walkVariables(node dynblock.WalkVariablesNode, body hcl.Body, schema *configschema.Block) []hcl.Traversal { + givenRawSchema := hcldec.ImpliedSchema(schema.DecoderSpec()) + ambiguousNames := ambiguousNames(schema) + effectiveRawSchema := effectiveSchema(givenRawSchema, body, ambiguousNames, false) + vars, children := node.Visit(effectiveRawSchema) + + for _, child := range children { + if blockS, exists := schema.BlockTypes[child.BlockTypeName]; exists { + vars = append(vars, walkVariables(child.Node, child.Body(), &blockS.Block)...) + } else if attrS, exists := schema.Attributes[child.BlockTypeName]; exists && attrS.Type.IsCollectionType() && attrS.Type.ElementType().IsObjectType() { + // ☝️Check for collection type before element type, because if this is a mis-placed reference, + // a panic here will prevent other useful diags from being elevated to show the user what to fix + synthSchema := SchemaForCtyElementType(attrS.Type.ElementType()) + vars = append(vars, walkVariables(child.Node, child.Body(), synthSchema)...) + } + } + + return vars +} diff --git a/pkg/lang/blocktoattr/variables_test.go b/pkg/lang/blocktoattr/variables_test.go new file mode 100644 index 00000000000..a8b1c70e984 --- /dev/null +++ b/pkg/lang/blocktoattr/variables_test.go @@ -0,0 +1,205 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package blocktoattr + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + hcljson "github.com/hashicorp/hcl/v2/json" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +func TestExpandedVariables(t *testing.T) { + fooSchema := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.List(cty.Object(map[string]cty.Type{ + "bar": cty.String, + })), + Optional: true, + }, + "bar": { + Type: cty.Map(cty.String), + Optional: true, + }, + }, + } + + tests := map[string]struct { + src string + json bool + schema *configschema.Block + want []hcl.Traversal + }{ + "empty": { + src: ``, + schema: &configschema.Block{}, + want: nil, + }, + "attribute syntax": { + src: ` +foo = [ + { + bar = baz + }, +] +`, + schema: fooSchema, + want: []hcl.Traversal{ + { + hcl.TraverseRoot{ + Name: "baz", + SrcRange: hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 4, Column: 11, Byte: 23}, + End: hcl.Pos{Line: 4, Column: 14, Byte: 26}, + }, + }, + }, + }, + }, + "block syntax": { + src: ` +foo { + bar = baz +} +`, + schema: fooSchema, + want: []hcl.Traversal{ + { + hcl.TraverseRoot{ + Name: "baz", + SrcRange: hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 3, Column: 9, Byte: 15}, + End: hcl.Pos{Line: 3, Column: 12, Byte: 18}, + }, + }, + }, + }, + }, + "block syntax with nested blocks": { + src: ` +foo { + bar { + boop = baz + } +} +`, + schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.List(cty.Object(map[string]cty.Type{ + "bar": cty.List(cty.Object(map[string]cty.Type{ + "boop": cty.String, + })), + })), + Optional: true, + }, + }, + }, + want: []hcl.Traversal{ + { + hcl.TraverseRoot{ + Name: "baz", + SrcRange: hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 4, Column: 12, Byte: 26}, + End: hcl.Pos{Line: 4, Column: 15, Byte: 29}, + }, + }, + }, + }, + }, + "dynamic block syntax": { + src: ` +dynamic "foo" { + for_each = beep + content { + bar = baz + } +} +`, + schema: fooSchema, + want: []hcl.Traversal{ + { + hcl.TraverseRoot{ + Name: "beep", + SrcRange: hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 3, Column: 14, Byte: 30}, + End: hcl.Pos{Line: 3, Column: 18, Byte: 34}, + }, + }, + }, + { + hcl.TraverseRoot{ + Name: "baz", + SrcRange: hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 5, Column: 11, Byte: 57}, + End: hcl.Pos{Line: 5, Column: 14, Byte: 60}, + }, + }, + }, + }, + }, + "misplaced dynamic block": { + src: ` +dynamic "bar" { + for_each = beep + content { + key = val + } +} +`, + schema: fooSchema, + want: []hcl.Traversal{ + { + hcl.TraverseRoot{ + Name: "beep", + SrcRange: hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 3, Column: 14, Byte: 30}, + End: hcl.Pos{Line: 3, Column: 18, Byte: 34}, + }, + }, + }, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + var f *hcl.File + var diags hcl.Diagnostics + if test.json { + f, diags = hcljson.Parse([]byte(test.src), "test.tf.json") + } else { + f, diags = hclsyntax.ParseConfig([]byte(test.src), "test.tf", hcl.Pos{Line: 1, Column: 1}) + } + if diags.HasErrors() { + for _, diag := range diags { + t.Errorf("unexpected diagnostic: %s", diag) + } + t.FailNow() + } + + got := ExpandedVariables(f.Body, test.schema) + + co := cmpopts.IgnoreUnexported(hcl.TraverseRoot{}) + if !cmp.Equal(got, test.want, co) { + t.Errorf("wrong result\n%s", cmp.Diff(test.want, got, co)) + } + }) + } + +} diff --git a/pkg/lang/data.go b/pkg/lang/data.go new file mode 100644 index 00000000000..17b5bf24841 --- /dev/null +++ b/pkg/lang/data.go @@ -0,0 +1,41 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package lang + +import ( + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// Data is an interface whose implementations can provide cty.Value +// representations of objects identified by referenceable addresses from +// the addrs package. +// +// This interface will grow each time a new type of reference is added, and so +// implementations outside of the OpenTofu codebases are not advised. +// +// Each method returns a suitable value and optionally some diagnostics. If the +// returned diagnostics contains errors then the type of the returned value is +// used to construct an unknown value of the same type which is then used in +// place of the requested object so that type checking can still proceed. In +// cases where it's not possible to even determine a suitable result type, +// cty.DynamicVal is returned along with errors describing the problem. +type Data interface { + StaticValidateReferences(refs []*addrs.Reference, self addrs.Referenceable, source addrs.Referenceable) tfdiags.Diagnostics + + GetCountAttr(addrs.CountAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetForEachAttr(addrs.ForEachAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetResource(addrs.Resource, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetLocalValue(addrs.LocalValue, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetModule(addrs.ModuleCall, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetPathAttr(addrs.PathAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetTerraformAttr(addrs.TerraformAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetInputVariable(addrs.InputVariable, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetOutput(addrs.OutputValue, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetCheckBlock(addrs.Check, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) +} diff --git a/pkg/lang/data_test.go b/pkg/lang/data_test.go new file mode 100644 index 00000000000..7f5fac5c01d --- /dev/null +++ b/pkg/lang/data_test.go @@ -0,0 +1,78 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package lang + +import ( + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +type dataForTests struct { + CountAttrs map[string]cty.Value + ForEachAttrs map[string]cty.Value + Resources map[string]cty.Value + LocalValues map[string]cty.Value + OutputValues map[string]cty.Value + Modules map[string]cty.Value + PathAttrs map[string]cty.Value + TerraformAttrs map[string]cty.Value + InputVariables map[string]cty.Value + CheckBlocks map[string]cty.Value +} + +var _ Data = &dataForTests{} + +func (d *dataForTests) StaticValidateReferences(refs []*addrs.Reference, self addrs.Referenceable, source addrs.Referenceable) tfdiags.Diagnostics { + return nil // does nothing in this stub implementation +} + +func (d *dataForTests) GetCountAttr(addr addrs.CountAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + return d.CountAttrs[addr.Name], nil +} + +func (d *dataForTests) GetForEachAttr(addr addrs.ForEachAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + return d.ForEachAttrs[addr.Name], nil +} + +func (d *dataForTests) GetResource(addr addrs.Resource, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + return d.Resources[addr.String()], nil +} + +func (d *dataForTests) GetInputVariable(addr addrs.InputVariable, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + return d.InputVariables[addr.Name], nil +} + +func (d *dataForTests) GetLocalValue(addr addrs.LocalValue, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + return d.LocalValues[addr.Name], nil +} + +func (d *dataForTests) GetModule(addr addrs.ModuleCall, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + return d.Modules[addr.String()], nil +} + +func (d *dataForTests) GetModuleInstanceOutput(addr addrs.ModuleCallInstanceOutput, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + // This will panic if the module object does not have the requested attribute + obj := d.Modules[addr.Call.String()] + return obj.GetAttr(addr.Name), nil +} + +func (d *dataForTests) GetPathAttr(addr addrs.PathAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + return d.PathAttrs[addr.Name], nil +} + +func (d *dataForTests) GetTerraformAttr(addr addrs.TerraformAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + return d.TerraformAttrs[addr.Name], nil +} + +func (d *dataForTests) GetOutput(addr addrs.OutputValue, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + return d.OutputValues[addr.Name], nil +} + +func (d *dataForTests) GetCheckBlock(addr addrs.Check, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + return d.CheckBlocks[addr.Name], nil +} diff --git a/pkg/lang/doc.go b/pkg/lang/doc.go new file mode 100644 index 00000000000..bf91bf15ea6 --- /dev/null +++ b/pkg/lang/doc.go @@ -0,0 +1,10 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package lang deals with the runtime aspects of OpenTofu's configuration +// language, with concerns such as expression evaluation. It is closely related +// to sibling package "configs", which is responsible for configuration +// parsing and static validation. +package lang diff --git a/pkg/lang/eval.go b/pkg/lang/eval.go new file mode 100644 index 00000000000..8e293b2c3db --- /dev/null +++ b/pkg/lang/eval.go @@ -0,0 +1,597 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package lang + +import ( + "fmt" + "reflect" + "strings" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/ext/dynblock" + "github.com/hashicorp/hcl/v2/hcldec" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/instances" + "github.com/kubegems/opentofu/pkg/lang/blocktoattr" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// ExpandBlock expands any "dynamic" blocks present in the given body. The +// result is a body with those blocks expanded, ready to be evaluated with +// EvalBlock. +// +// If the returned diagnostics contains errors then the result may be +// incomplete or invalid. +func (s *Scope) ExpandBlock(body hcl.Body, schema *configschema.Block) (hcl.Body, tfdiags.Diagnostics) { + spec := schema.DecoderSpec() + + traversals := dynblock.ExpandVariablesHCLDec(body, spec) + refs, diags := References(s.ParseRef, traversals) + + ctx, ctxDiags := s.EvalContext(refs) + diags = diags.Append(ctxDiags) + + return dynblock.Expand(body, ctx), diags +} + +// EvalBlock evaluates the given body using the given block schema and returns +// a cty object value representing its contents. The type of the result conforms +// to the implied type of the given schema. +// +// This function does not automatically expand "dynamic" blocks within the +// body. If that is desired, first call the ExpandBlock method to obtain +// an expanded body to pass to this method. +// +// If the returned diagnostics contains errors then the result may be +// incomplete or invalid. +func (s *Scope) EvalBlock(body hcl.Body, schema *configschema.Block) (cty.Value, tfdiags.Diagnostics) { + spec := schema.DecoderSpec() + + refs, diags := ReferencesInBlock(s.ParseRef, body, schema) + + ctx, ctxDiags := s.EvalContext(refs) + diags = diags.Append(ctxDiags) + if diags.HasErrors() { + // We'll stop early if we found problems in the references, because + // it's likely evaluation will produce redundant copies of the same errors. + return cty.UnknownVal(schema.ImpliedType()), diags + } + + // HACK: In order to remain compatible with some assumptions made in + // Terraform v0.11 and earlier about the approximate equivalence of + // attribute vs. block syntax, we do a just-in-time fixup here to allow + // any attribute in the schema that has a list-of-objects or set-of-objects + // kind to potentially be populated instead by one or more nested blocks + // whose type is the attribute name. + body = blocktoattr.FixUpBlockAttrs(body, schema) + + val, evalDiags := hcldec.Decode(body, spec, ctx) + diags = diags.Append(s.enhanceFunctionDiags(evalDiags)) + + return val, diags +} + +// EvalSelfBlock evaluates the given body only within the scope of the provided +// object and instance key data. References to the object must use self, and the +// key data will only contain count.index or each.key. The static values for +// terraform and path will also be available in this context. +func (s *Scope) EvalSelfBlock(body hcl.Body, self cty.Value, schema *configschema.Block, keyData instances.RepetitionData) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + spec := schema.DecoderSpec() + + vals := make(map[string]cty.Value) + vals["self"] = self + + if !keyData.CountIndex.IsNull() { + vals["count"] = cty.ObjectVal(map[string]cty.Value{ + "index": keyData.CountIndex, + }) + } + if !keyData.EachKey.IsNull() { + vals["each"] = cty.ObjectVal(map[string]cty.Value{ + "key": keyData.EachKey, + }) + } + + refs, refDiags := References(s.ParseRef, hcldec.Variables(body, spec)) + diags = diags.Append(refDiags) + + terraformAttrs := map[string]cty.Value{} + pathAttrs := map[string]cty.Value{} + + // We could always load the static values for Path and Terraform values, + // but we want to parse the references so that we can get source ranges for + // user diagnostics. + for _, ref := range refs { + // we already loaded the self value + if ref.Subject == addrs.Self { + continue + } + + switch subj := ref.Subject.(type) { + case addrs.PathAttr: + val, valDiags := normalizeRefValue(s.Data.GetPathAttr(subj, ref.SourceRange)) + diags = diags.Append(valDiags) + pathAttrs[subj.Name] = val + + case addrs.TerraformAttr: + val, valDiags := normalizeRefValue(s.Data.GetTerraformAttr(subj, ref.SourceRange)) + diags = diags.Append(valDiags) + terraformAttrs[subj.Name] = val + + case addrs.CountAttr, addrs.ForEachAttr: + // each and count have already been handled. + + default: + // This should have been caught in validation, but point the user + // to the correct location in case something slipped through. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid reference`, + Detail: fmt.Sprintf("The reference to %q is not valid in this context", ref.Subject), + Subject: ref.SourceRange.ToHCL().Ptr(), + }) + } + } + + vals["path"] = cty.ObjectVal(pathAttrs) + vals["terraform"] = cty.ObjectVal(terraformAttrs) + vals["tofu"] = cty.ObjectVal(terraformAttrs) + + ctx := &hcl.EvalContext{ + Variables: vals, + // TODO consider if any provider functions make sense here + Functions: s.Functions(), + } + + val, decDiags := hcldec.Decode(body, schema.DecoderSpec(), ctx) + diags = diags.Append(s.enhanceFunctionDiags(decDiags)) + return val, diags +} + +// EvalExpr evaluates a single expression in the receiving context and returns +// the resulting value. The value will be converted to the given type before +// it is returned if possible, or else an error diagnostic will be produced +// describing the conversion error. +// +// Pass an expected type of cty.DynamicPseudoType to skip automatic conversion +// and just obtain the returned value directly. +// +// If the returned diagnostics contains errors then the result may be +// incomplete, but will always be of the requested type. +func (s *Scope) EvalExpr(expr hcl.Expression, wantType cty.Type) (cty.Value, tfdiags.Diagnostics) { + refs, diags := ReferencesInExpr(s.ParseRef, expr) + + ctx, ctxDiags := s.EvalContext(refs) + diags = diags.Append(ctxDiags) + if diags.HasErrors() { + // We'll stop early if we found problems in the references, because + // it's likely evaluation will produce redundant copies of the same errors. + return cty.UnknownVal(wantType), diags + } + + val, evalDiags := expr.Value(ctx) + diags = diags.Append(s.enhanceFunctionDiags(evalDiags)) + + if wantType != cty.DynamicPseudoType { + var convErr error + val, convErr = convert.Convert(val, wantType) + if convErr != nil { + val = cty.UnknownVal(wantType) + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect value type", + Detail: fmt.Sprintf("Invalid expression value: %s.", tfdiags.FormatError(convErr)), + Subject: expr.Range().Ptr(), + Expression: expr, + EvalContext: ctx, + }) + } + } + + return val, diags +} + +// Identify and enhance any function related dialogs produced by a hcl.EvalContext +func (s *Scope) enhanceFunctionDiags(diags hcl.Diagnostics) hcl.Diagnostics { + out := make(hcl.Diagnostics, len(diags)) + for i, diag := range diags { + out[i] = diag + + if funcExtra, ok := diag.Extra.(hclsyntax.FunctionCallUnknownDiagExtra); ok { + funcName := funcExtra.CalledFunctionName() + // prefix::stuff:: + fullNamespace := funcExtra.CalledFunctionNamespace() + + if len(fullNamespace) == 0 { + // Not a namespaced function, no enhancements nessesary + continue + } + + // Insert the enhanced copy of diag into diags + enhanced := *diag + out[i] = &enhanced + + // Update enhanced with additional details + + fn := addrs.ParseFunction(fullNamespace + funcName) + + if fn.IsNamespace(addrs.FunctionNamespaceCore) { + // Error is in core namespace, mirror non-core equivalent + enhanced.Summary = "Call to unknown function" + enhanced.Detail = fmt.Sprintf("There is no builtin (%s::) function named %q.", addrs.FunctionNamespaceCore, funcName) + } else if fn.IsNamespace(addrs.FunctionNamespaceProvider) { + if _, err := fn.AsProviderFunction(); err != nil { + // complete mismatch or invalid prefix + enhanced.Summary = "Invalid function format" + enhanced.Detail = err.Error() + } + } else { + enhanced.Summary = "Unknown function namespace" + enhanced.Detail = fmt.Sprintf("Function %q does not exist within a valid namespace (%s)", fn, strings.Join(addrs.FunctionNamespaces, ",")) + } + // Function / Provider not found handled by eval_context_builtin.go + } + } + return out +} + +// EvalReference evaluates the given reference in the receiving scope and +// returns the resulting value. The value will be converted to the given type before +// it is returned if possible, or else an error diagnostic will be produced +// describing the conversion error. +// +// Pass an expected type of cty.DynamicPseudoType to skip automatic conversion +// and just obtain the returned value directly. +// +// If the returned diagnostics contains errors then the result may be +// incomplete, but will always be of the requested type. +func (s *Scope) EvalReference(ref *addrs.Reference, wantType cty.Type) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // We cheat a bit here and just build an EvalContext for our requested + // reference with the "self" address overridden, and then pull the "self" + // result out of it to return. + ctx, ctxDiags := s.evalContext(nil, []*addrs.Reference{ref}, ref.Subject) + diags = diags.Append(ctxDiags) + val := ctx.Variables["self"] + if val == cty.NilVal { + val = cty.DynamicVal + } + + var convErr error + val, convErr = convert.Convert(val, wantType) + if convErr != nil { + val = cty.UnknownVal(wantType) + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect value type", + Detail: fmt.Sprintf("Invalid expression value: %s.", tfdiags.FormatError(convErr)), + Subject: ref.SourceRange.ToHCL().Ptr(), + }) + } + + return val, diags +} + +// EvalContext constructs a HCL expression evaluation context whose variable +// scope contains sufficient values to satisfy the given set of references. +// +// Most callers should prefer to use the evaluation helper methods that +// this type offers, but this is here for less common situations where the +// caller will handle the evaluation calls itself. +func (s *Scope) EvalContext(refs []*addrs.Reference) (*hcl.EvalContext, tfdiags.Diagnostics) { + return s.evalContext(nil, refs, s.SelfAddr) +} + +// EvalContextWithParent is exactly the same as EvalContext except the resulting hcl.EvalContext +// will be derived from the given parental hcl.EvalContext. It will enable different hcl mechanisms +// to iteratively lookup target functions and variables in EvalContext's parent. +// See Traversal.TraverseAbs (hcl) or FunctionCallExpr.Value (hcl/hclsyntax) for more details. +func (s *Scope) EvalContextWithParent(p *hcl.EvalContext, refs []*addrs.Reference) (*hcl.EvalContext, tfdiags.Diagnostics) { + return s.evalContext(p, refs, s.SelfAddr) +} + +func (s *Scope) evalContext(parent *hcl.EvalContext, refs []*addrs.Reference, selfAddr addrs.Referenceable) (*hcl.EvalContext, tfdiags.Diagnostics) { + if s == nil { + panic("attempt to construct EvalContext for nil Scope") + } + + var diags tfdiags.Diagnostics + + // Calling NewChild() on a nil parent will + // produce an EvalContext with no parent. + ctx := parent.NewChild() + ctx.Functions = make(map[string]function.Function) + ctx.Variables = make(map[string]cty.Value) + + for name, fn := range s.Functions() { + ctx.Functions[name] = fn + } + + // Easy path for common case where there are no references at all. + if len(refs) == 0 { + return ctx, diags + } + + // First we'll do static validation of the references. This catches things + // early that might otherwise not get caught due to unknown values being + // present in the scope during planning. + staticDiags := s.Data.StaticValidateReferences(refs, selfAddr, s.SourceAddr) + diags = diags.Append(staticDiags) + if staticDiags.HasErrors() { + return ctx, diags + } + + // The reference set we are given has not been de-duped, and so there can + // be redundant requests in it for two reasons: + // - The same item is referenced multiple times + // - Both an item and that item's container are separately referenced. + // We will still visit every reference here and ask our data source for + // it, since that allows us to gather a full set of any errors and + // warnings, but once we've gathered all the data we'll then skip anything + // that's redundant in the process of populating our values map. + varBuilder := s.newEvalVarBuilder() + + for _, ref := range refs { + if ref.Subject == addrs.Self { + diags.Append(varBuilder.putSelfValue(selfAddr, ref)) + continue + } + + if subj, ok := ref.Subject.(addrs.ProviderFunction); ok { + // Inject function directly into context + if _, ok := ctx.Functions[subj.String()]; !ok { + fn, fnDiags := s.ProviderFunctions(subj, ref.SourceRange) + diags = diags.Append(fnDiags) + + if !fnDiags.HasErrors() { + ctx.Functions[subj.String()] = *fn + } + } + + continue + } + + diags = diags.Append(varBuilder.putValueBySubject(ref)) + } + + varBuilder.buildAllVariablesInto(ctx.Variables) + + return ctx, diags +} + +type evalVarBuilder struct { + s *Scope + + dataResources map[string]map[string]cty.Value + managedResources map[string]map[string]cty.Value + wholeModules map[string]cty.Value + inputVariables map[string]cty.Value + localValues map[string]cty.Value + outputValues map[string]cty.Value + pathAttrs map[string]cty.Value + terraformAttrs map[string]cty.Value + countAttrs map[string]cty.Value + forEachAttrs map[string]cty.Value + checkBlocks map[string]cty.Value + self cty.Value +} + +func (s *Scope) newEvalVarBuilder() *evalVarBuilder { + return &evalVarBuilder{ + s: s, + + dataResources: map[string]map[string]cty.Value{}, + managedResources: map[string]map[string]cty.Value{}, + wholeModules: map[string]cty.Value{}, + inputVariables: map[string]cty.Value{}, + localValues: map[string]cty.Value{}, + outputValues: map[string]cty.Value{}, + pathAttrs: map[string]cty.Value{}, + terraformAttrs: map[string]cty.Value{}, + countAttrs: map[string]cty.Value{}, + forEachAttrs: map[string]cty.Value{}, + checkBlocks: map[string]cty.Value{}, + } +} + +func (b *evalVarBuilder) putSelfValue(selfAddr addrs.Referenceable, ref *addrs.Reference) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + if selfAddr == nil { + return diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid "self" reference`, + // This detail message mentions some current practice that + // this codepath doesn't really "know about". If the "self" + // object starts being supported in more contexts later then + // we'll need to adjust this message. + Detail: `The "self" object is not available in this context. This object can be used only in resource provisioner, connection, and postcondition blocks.`, + Subject: ref.SourceRange.ToHCL().Ptr(), + }) + } + + if selfAddr == addrs.Self { + // Programming error: the self address cannot alias itself. + panic("scope SelfAddr attempting to alias itself") + } + + // self can only be used within a resource instance + subj, ok := selfAddr.(addrs.ResourceInstance) + if !ok { + panic("BUG: self addr must be a resource instance, got " + reflect.TypeOf(selfAddr).String()) + } + + val, valDiags := normalizeRefValue(b.s.Data.GetResource(subj.ContainingResource(), ref.SourceRange)) + + diags = diags.Append(valDiags) + + // Self is an exception in that it must always resolve to a + // particular instance. We will still insert the full resource into + // the context below. + var hclDiags hcl.Diagnostics + // We should always have a valid self index by this point, but in + // the case of an error, self may end up as a cty.DynamicValue. + switch k := subj.Key.(type) { + case addrs.IntKey: + b.self, hclDiags = hcl.Index(val, cty.NumberIntVal(int64(k)), ref.SourceRange.ToHCL().Ptr()) + case addrs.StringKey: + b.self, hclDiags = hcl.Index(val, cty.StringVal(string(k)), ref.SourceRange.ToHCL().Ptr()) + default: + b.self = val + } + diags = diags.Append(hclDiags) + + return diags +} + +func (b *evalVarBuilder) putValueBySubject(ref *addrs.Reference) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + rawSubj := ref.Subject + rng := ref.SourceRange + + // This type switch must cover all of the "Referenceable" implementations + // in package addrs, however we are removing the possibility of + // Instances beforehand. + switch addr := rawSubj.(type) { + case addrs.ResourceInstance: + rawSubj = addr.ContainingResource() + case addrs.ModuleCallInstance: + rawSubj = addr.Call + case addrs.ModuleCallInstanceOutput: + rawSubj = addr.Call.Call + } + + var normDiags tfdiags.Diagnostics + + switch subj := rawSubj.(type) { + case addrs.Resource: + diags = diags.Append(b.putResourceValue(subj, rng)) + + case addrs.ModuleCall: + b.wholeModules[subj.Name], normDiags = normalizeRefValue(b.s.Data.GetModule(subj, rng)) + + case addrs.InputVariable: + b.inputVariables[subj.Name], normDiags = normalizeRefValue(b.s.Data.GetInputVariable(subj, rng)) + + case addrs.LocalValue: + b.localValues[subj.Name], normDiags = normalizeRefValue(b.s.Data.GetLocalValue(subj, rng)) + + case addrs.PathAttr: + b.pathAttrs[subj.Name], normDiags = normalizeRefValue(b.s.Data.GetPathAttr(subj, rng)) + + case addrs.TerraformAttr: + b.terraformAttrs[subj.Name], normDiags = normalizeRefValue(b.s.Data.GetTerraformAttr(subj, rng)) + + case addrs.CountAttr: + b.countAttrs[subj.Name], normDiags = normalizeRefValue(b.s.Data.GetCountAttr(subj, rng)) + + case addrs.ForEachAttr: + b.forEachAttrs[subj.Name], normDiags = normalizeRefValue(b.s.Data.GetForEachAttr(subj, rng)) + + case addrs.OutputValue: + b.outputValues[subj.Name], normDiags = normalizeRefValue(b.s.Data.GetOutput(subj, rng)) + + case addrs.Check: + b.outputValues[subj.Name], normDiags = normalizeRefValue(b.s.Data.GetCheckBlock(subj, rng)) + + default: + // Should never happen + panic(fmt.Errorf("Scope.buildEvalContext cannot handle address type %T", rawSubj)) + } + + diags = diags.Append(normDiags) + + return diags +} + +func (b *evalVarBuilder) putResourceValue(res addrs.Resource, rng tfdiags.SourceRange) tfdiags.Diagnostics { + var into map[string]map[string]cty.Value + + switch res.Mode { + case addrs.ManagedResourceMode: + into = b.managedResources + case addrs.DataResourceMode: + into = b.dataResources + case addrs.InvalidResourceMode: + panic("BUG: got invalid resource mode") + default: + panic(fmt.Errorf("BUG: got undefined ResourceMode %s", res.Mode)) + } + + val, diags := normalizeRefValue(b.s.Data.GetResource(res, rng)) + + if into[res.Type] == nil { + into[res.Type] = make(map[string]cty.Value) + } + into[res.Type][res.Name] = val + + return diags +} + +func (b *evalVarBuilder) buildAllVariablesInto(vals map[string]cty.Value) { + // Managed resources are exposed in two different locations. The primary + // is at the top level where the resource type name is the root of the + // traversal, but we also expose them under "resource" as an escaping + // technique if we add a reserved name in a future language edition which + // conflicts with someone's existing provider. + for k, v := range buildResourceObjects(b.managedResources) { + vals[k] = v + } + vals["resource"] = cty.ObjectVal(buildResourceObjects(b.managedResources)) + + vals["data"] = cty.ObjectVal(buildResourceObjects(b.dataResources)) + vals["module"] = cty.ObjectVal(b.wholeModules) + vals["var"] = cty.ObjectVal(b.inputVariables) + vals["local"] = cty.ObjectVal(b.localValues) + vals["path"] = cty.ObjectVal(b.pathAttrs) + vals["terraform"] = cty.ObjectVal(b.terraformAttrs) + vals["tofu"] = cty.ObjectVal(b.terraformAttrs) + vals["count"] = cty.ObjectVal(b.countAttrs) + vals["each"] = cty.ObjectVal(b.forEachAttrs) + + // Checks and outputs are conditionally included in the available scope, so + // we'll only write out their values if we actually have something for them. + if len(b.checkBlocks) > 0 { + vals["check"] = cty.ObjectVal(b.checkBlocks) + } + + if len(b.outputValues) > 0 { + vals["output"] = cty.ObjectVal(b.outputValues) + } + + if b.self != cty.NilVal { + vals["self"] = b.self + } +} + +func buildResourceObjects(resources map[string]map[string]cty.Value) map[string]cty.Value { + vals := make(map[string]cty.Value) + for typeName, nameVals := range resources { + vals[typeName] = cty.ObjectVal(nameVals) + } + return vals +} + +func normalizeRefValue(val cty.Value, diags tfdiags.Diagnostics) (cty.Value, tfdiags.Diagnostics) { + if diags.HasErrors() { + // If there are errors then we will force an unknown result so that + // we can still evaluate and catch type errors but we'll avoid + // producing redundant re-statements of the same errors we've already + // dealt with here. + return cty.UnknownVal(val.Type()), diags + } + return val, diags +} diff --git a/pkg/lang/eval_test.go b/pkg/lang/eval_test.go new file mode 100644 index 00000000000..182ecb7557c --- /dev/null +++ b/pkg/lang/eval_test.go @@ -0,0 +1,1025 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package lang + +import ( + "bytes" + "encoding/json" + "reflect" + "testing" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/instances" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hcldec" + "github.com/hashicorp/hcl/v2/hclsyntax" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + ctyjson "github.com/zclconf/go-cty/cty/json" +) + +func TestScopeEvalContext(t *testing.T) { + data := &dataForTests{ + CountAttrs: map[string]cty.Value{ + "index": cty.NumberIntVal(0), + }, + ForEachAttrs: map[string]cty.Value{ + "key": cty.StringVal("a"), + "value": cty.NumberIntVal(1), + }, + Resources: map[string]cty.Value{ + "null_resource.foo": cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + "data.null_data_source.foo": cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + "null_resource.multi": cty.TupleVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("multi0"), + }), + cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("multi1"), + }), + }), + "null_resource.each": cty.ObjectVal(map[string]cty.Value{ + "each0": cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("each0"), + }), + "each1": cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("each1"), + }), + }), + "null_resource.multi[1]": cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("multi1"), + }), + }, + LocalValues: map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }, + Modules: map[string]cty.Value{ + "module.foo": cty.ObjectVal(map[string]cty.Value{ + "output0": cty.StringVal("bar0"), + "output1": cty.StringVal("bar1"), + }), + }, + PathAttrs: map[string]cty.Value{ + "module": cty.StringVal("foo/bar"), + }, + TerraformAttrs: map[string]cty.Value{ + "workspace": cty.StringVal("default"), + }, + InputVariables: map[string]cty.Value{ + "baz": cty.StringVal("boop"), + }, + } + + tests := []struct { + Expr string + Want map[string]cty.Value + }{ + { + `12`, + map[string]cty.Value{}, + }, + { + `count.index`, + map[string]cty.Value{ + "count": cty.ObjectVal(map[string]cty.Value{ + "index": cty.NumberIntVal(0), + }), + }, + }, + { + `each.key`, + map[string]cty.Value{ + "each": cty.ObjectVal(map[string]cty.Value{ + "key": cty.StringVal("a"), + }), + }, + }, + { + `each.value`, + map[string]cty.Value{ + "each": cty.ObjectVal(map[string]cty.Value{ + "value": cty.NumberIntVal(1), + }), + }, + }, + { + `local.foo`, + map[string]cty.Value{ + "local": cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }), + }, + }, + { + `null_resource.foo`, + map[string]cty.Value{ + "null_resource": cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + }), + "resource": cty.ObjectVal(map[string]cty.Value{ + "null_resource": cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + }), + }), + }, + }, + { + `null_resource.foo.attr`, + map[string]cty.Value{ + "null_resource": cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + }), + "resource": cty.ObjectVal(map[string]cty.Value{ + "null_resource": cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + }), + }), + }, + }, + { + `null_resource.multi`, + map[string]cty.Value{ + "null_resource": cty.ObjectVal(map[string]cty.Value{ + "multi": cty.TupleVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("multi0"), + }), + cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("multi1"), + }), + }), + }), + "resource": cty.ObjectVal(map[string]cty.Value{ + "null_resource": cty.ObjectVal(map[string]cty.Value{ + "multi": cty.TupleVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("multi0"), + }), + cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("multi1"), + }), + }), + }), + }), + }, + }, + { + // at this level, all instance references return the entire resource + `null_resource.multi[1]`, + map[string]cty.Value{ + "null_resource": cty.ObjectVal(map[string]cty.Value{ + "multi": cty.TupleVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("multi0"), + }), + cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("multi1"), + }), + }), + }), + "resource": cty.ObjectVal(map[string]cty.Value{ + "null_resource": cty.ObjectVal(map[string]cty.Value{ + "multi": cty.TupleVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("multi0"), + }), + cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("multi1"), + }), + }), + }), + }), + }, + }, + { + // at this level, all instance references return the entire resource + `null_resource.each["each1"]`, + map[string]cty.Value{ + "null_resource": cty.ObjectVal(map[string]cty.Value{ + "each": cty.ObjectVal(map[string]cty.Value{ + "each0": cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("each0"), + }), + "each1": cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("each1"), + }), + }), + }), + "resource": cty.ObjectVal(map[string]cty.Value{ + "null_resource": cty.ObjectVal(map[string]cty.Value{ + "each": cty.ObjectVal(map[string]cty.Value{ + "each0": cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("each0"), + }), + "each1": cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("each1"), + }), + }), + }), + }), + }, + }, + { + // at this level, all instance references return the entire resource + `null_resource.each["each1"].attr`, + map[string]cty.Value{ + "null_resource": cty.ObjectVal(map[string]cty.Value{ + "each": cty.ObjectVal(map[string]cty.Value{ + "each0": cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("each0"), + }), + "each1": cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("each1"), + }), + }), + }), + "resource": cty.ObjectVal(map[string]cty.Value{ + "null_resource": cty.ObjectVal(map[string]cty.Value{ + "each": cty.ObjectVal(map[string]cty.Value{ + "each0": cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("each0"), + }), + "each1": cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("each1"), + }), + }), + }), + }), + }, + }, + { + `foo(null_resource.multi, null_resource.multi[1])`, + map[string]cty.Value{ + "null_resource": cty.ObjectVal(map[string]cty.Value{ + "multi": cty.TupleVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("multi0"), + }), + cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("multi1"), + }), + }), + }), + "resource": cty.ObjectVal(map[string]cty.Value{ + "null_resource": cty.ObjectVal(map[string]cty.Value{ + "multi": cty.TupleVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("multi0"), + }), + cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("multi1"), + }), + }), + }), + }), + }, + }, + { + `data.null_data_source.foo`, + map[string]cty.Value{ + "data": cty.ObjectVal(map[string]cty.Value{ + "null_data_source": cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + }), + }), + }, + }, + { + `module.foo`, + map[string]cty.Value{ + "module": cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ObjectVal(map[string]cty.Value{ + "output0": cty.StringVal("bar0"), + "output1": cty.StringVal("bar1"), + }), + }), + }, + }, + // any module reference returns the entire module + { + `module.foo.output1`, + map[string]cty.Value{ + "module": cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ObjectVal(map[string]cty.Value{ + "output0": cty.StringVal("bar0"), + "output1": cty.StringVal("bar1"), + }), + }), + }, + }, + { + `path.module`, + map[string]cty.Value{ + "path": cty.ObjectVal(map[string]cty.Value{ + "module": cty.StringVal("foo/bar"), + }), + }, + }, + { + `self.baz`, + map[string]cty.Value{ + "self": cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("multi1"), + }), + }, + }, + { + `terraform.workspace`, + map[string]cty.Value{ + "terraform": cty.ObjectVal(map[string]cty.Value{ + "workspace": cty.StringVal("default"), + }), + "tofu": cty.ObjectVal(map[string]cty.Value{ + "workspace": cty.StringVal("default"), + }), + }, + }, + { + `tofu.workspace`, + map[string]cty.Value{ + "terraform": cty.ObjectVal(map[string]cty.Value{ + "workspace": cty.StringVal("default"), + }), + "tofu": cty.ObjectVal(map[string]cty.Value{ + "workspace": cty.StringVal("default"), + }), + }, + }, + { + `var.baz`, + map[string]cty.Value{ + "var": cty.ObjectVal(map[string]cty.Value{ + "baz": cty.StringVal("boop"), + }), + }, + }, + } + + for _, test := range tests { + t.Run(test.Expr, func(t *testing.T) { + expr, parseDiags := hclsyntax.ParseExpression([]byte(test.Expr), "", hcl.Pos{Line: 1, Column: 1}) + if len(parseDiags) != 0 { + t.Errorf("unexpected diagnostics during parse") + for _, diag := range parseDiags { + t.Errorf("- %s", diag) + } + return + } + + refs, refsDiags := ReferencesInExpr(addrs.ParseRef, expr) + if refsDiags.HasErrors() { + t.Fatal(refsDiags.Err()) + } + + scope := &Scope{ + Data: data, + ParseRef: addrs.ParseRef, + + // "self" will just be an arbitrary one of the several resource + // instances we have in our test dataset. + SelfAddr: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "multi", + }, + Key: addrs.IntKey(1), + }, + } + ctx, ctxDiags := scope.EvalContext(refs) + if ctxDiags.HasErrors() { + t.Fatal(ctxDiags.Err()) + } + + // For easier test assertions we'll just remove any top-level + // empty objects from our variables map. + for k, v := range ctx.Variables { + if v.RawEquals(cty.EmptyObjectVal) { + delete(ctx.Variables, k) + } + } + + gotVal := cty.ObjectVal(ctx.Variables) + wantVal := cty.ObjectVal(test.Want) + + if !gotVal.RawEquals(wantVal) { + // We'll JSON-ize our values here just so it's easier to + // read them in the assertion output. + gotJSON := formattedJSONValue(gotVal) + wantJSON := formattedJSONValue(wantVal) + + t.Errorf( + "wrong result\nexpr: %s\ngot: %s\nwant: %s", + test.Expr, gotJSON, wantJSON, + ) + } + }) + } +} + +// TestScopeEvalContextWithParent tests if the resulting EvalCtx has correct parent. +func TestScopeEvalContextWithParent(t *testing.T) { + t.Run("with-parent", func(t *testing.T) { + barStr, barFunc := cty.StringVal("bar"), function.New(&function.Spec{ + Impl: func(_ []cty.Value, _ cty.Type) (cty.Value, error) { + return cty.NilVal, nil + }, + }) + + scope, parent := &Scope{}, &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "foo": barStr, + }, + Functions: map[string]function.Function{ + "foo": barFunc, + }, + } + + child, diags := scope.EvalContextWithParent(parent, nil) + if len(diags) != 0 { + t.Errorf("Unexpected diagnostics:") + for _, diag := range diags { + t.Errorf("- %s", diag) + } + return + } + + if child.Parent() == nil { + t.Fatalf("Child EvalCtx has no parent") + } + + if child.Parent() != parent { + t.Fatalf("Child EvalCtx has different parent:\n GOT:%v\nWANT:%v", child.Parent(), parent) + } + + if ln := len(child.Parent().Variables); ln != 1 { + t.Fatalf("EvalContextWithParent modified parent's variables: incorrent length: %d", ln) + } + + if v := child.Parent().Variables["foo"]; !v.RawEquals(barStr) { + t.Fatalf("EvalContextWithParent modified parent's variables:\n GOT:%v\nWANT:%v", v, barStr) + } + + if ln := len(child.Parent().Functions); ln != 1 { + t.Fatalf("EvalContextWithParent modified parent's functions: incorrent length: %d", ln) + } + + if v := child.Parent().Functions["foo"]; !reflect.DeepEqual(v, barFunc) { + t.Fatalf("EvalContextWithParent modified parent's functions:\n GOT:%v\nWANT:%v", v, barFunc) + } + }) + + t.Run("zero-parent", func(t *testing.T) { + scope := &Scope{} + + root, diags := scope.EvalContextWithParent(nil, nil) + if len(diags) != 0 { + t.Errorf("Unexpected diagnostics:") + for _, diag := range diags { + t.Errorf("- %s", diag) + } + return + } + + if root.Parent() != nil { + t.Fatalf("Resulting EvalCtx has unexpected parent: %v", root.Parent()) + } + }) +} + +func TestScopeExpandEvalBlock(t *testing.T) { + nestedObjTy := cty.Object(map[string]cty.Type{ + "boop": cty.String, + }) + schema := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + "list_of_obj": {Type: cty.List(nestedObjTy), Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "bar": { + Nesting: configschema.NestingMap, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "baz": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + data := &dataForTests{ + LocalValues: map[string]cty.Value{ + "greeting": cty.StringVal("howdy"), + "list": cty.ListVal([]cty.Value{ + cty.StringVal("elem0"), + cty.StringVal("elem1"), + }), + "map": cty.MapVal(map[string]cty.Value{ + "key1": cty.StringVal("val1"), + "key2": cty.StringVal("val2"), + }), + }, + } + + tests := map[string]struct { + Config string + Want cty.Value + }{ + "empty": { + ` + `, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.NullVal(cty.String), + "list_of_obj": cty.NullVal(cty.List(nestedObjTy)), + "bar": cty.MapValEmpty(cty.Object(map[string]cty.Type{ + "baz": cty.String, + })), + }), + }, + "literal attribute": { + ` + foo = "hello" + `, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("hello"), + "list_of_obj": cty.NullVal(cty.List(nestedObjTy)), + "bar": cty.MapValEmpty(cty.Object(map[string]cty.Type{ + "baz": cty.String, + })), + }), + }, + "variable attribute": { + ` + foo = local.greeting + `, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("howdy"), + "list_of_obj": cty.NullVal(cty.List(nestedObjTy)), + "bar": cty.MapValEmpty(cty.Object(map[string]cty.Type{ + "baz": cty.String, + })), + }), + }, + "one static block": { + ` + bar "static" {} + `, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.NullVal(cty.String), + "list_of_obj": cty.NullVal(cty.List(nestedObjTy)), + "bar": cty.MapVal(map[string]cty.Value{ + "static": cty.ObjectVal(map[string]cty.Value{ + "baz": cty.NullVal(cty.String), + }), + }), + }), + }, + "two static blocks": { + ` + bar "static0" { + baz = 0 + } + bar "static1" { + baz = 1 + } + `, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.NullVal(cty.String), + "list_of_obj": cty.NullVal(cty.List(nestedObjTy)), + "bar": cty.MapVal(map[string]cty.Value{ + "static0": cty.ObjectVal(map[string]cty.Value{ + "baz": cty.StringVal("0"), + }), + "static1": cty.ObjectVal(map[string]cty.Value{ + "baz": cty.StringVal("1"), + }), + }), + }), + }, + "dynamic blocks from list": { + ` + dynamic "bar" { + for_each = local.list + labels = [bar.value] + content { + baz = bar.key + } + } + `, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.NullVal(cty.String), + "list_of_obj": cty.NullVal(cty.List(nestedObjTy)), + "bar": cty.MapVal(map[string]cty.Value{ + "elem0": cty.ObjectVal(map[string]cty.Value{ + "baz": cty.StringVal("0"), + }), + "elem1": cty.ObjectVal(map[string]cty.Value{ + "baz": cty.StringVal("1"), + }), + }), + }), + }, + "dynamic blocks from map": { + ` + dynamic "bar" { + for_each = local.map + labels = [bar.key] + content { + baz = bar.value + } + } + `, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.NullVal(cty.String), + "list_of_obj": cty.NullVal(cty.List(nestedObjTy)), + "bar": cty.MapVal(map[string]cty.Value{ + "key1": cty.ObjectVal(map[string]cty.Value{ + "baz": cty.StringVal("val1"), + }), + "key2": cty.ObjectVal(map[string]cty.Value{ + "baz": cty.StringVal("val2"), + }), + }), + }), + }, + "list-of-object attribute": { + ` + list_of_obj = [ + { + boop = local.greeting + }, + ] + `, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.NullVal(cty.String), + "list_of_obj": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "boop": cty.StringVal("howdy"), + }), + }), + "bar": cty.MapValEmpty(cty.Object(map[string]cty.Type{ + "baz": cty.String, + })), + }), + }, + "list-of-object attribute as blocks": { + ` + list_of_obj { + boop = local.greeting + } + `, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.NullVal(cty.String), + "list_of_obj": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "boop": cty.StringVal("howdy"), + }), + }), + "bar": cty.MapValEmpty(cty.Object(map[string]cty.Type{ + "baz": cty.String, + })), + }), + }, + "lots of things at once": { + ` + foo = "whoop" + bar "static0" { + baz = "s0" + } + dynamic "bar" { + for_each = local.list + labels = [bar.value] + content { + baz = bar.key + } + } + bar "static1" { + baz = "s1" + } + dynamic "bar" { + for_each = local.map + labels = [bar.key] + content { + baz = bar.value + } + } + bar "static2" { + baz = "s2" + } + `, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("whoop"), + "list_of_obj": cty.NullVal(cty.List(nestedObjTy)), + "bar": cty.MapVal(map[string]cty.Value{ + "key1": cty.ObjectVal(map[string]cty.Value{ + "baz": cty.StringVal("val1"), + }), + "key2": cty.ObjectVal(map[string]cty.Value{ + "baz": cty.StringVal("val2"), + }), + "elem0": cty.ObjectVal(map[string]cty.Value{ + "baz": cty.StringVal("0"), + }), + "elem1": cty.ObjectVal(map[string]cty.Value{ + "baz": cty.StringVal("1"), + }), + "static0": cty.ObjectVal(map[string]cty.Value{ + "baz": cty.StringVal("s0"), + }), + "static1": cty.ObjectVal(map[string]cty.Value{ + "baz": cty.StringVal("s1"), + }), + "static2": cty.ObjectVal(map[string]cty.Value{ + "baz": cty.StringVal("s2"), + }), + }), + }), + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + file, parseDiags := hclsyntax.ParseConfig([]byte(test.Config), "", hcl.Pos{Line: 1, Column: 1}) + if len(parseDiags) != 0 { + t.Errorf("unexpected diagnostics during parse") + for _, diag := range parseDiags { + t.Errorf("- %s", diag) + } + return + } + + body := file.Body + scope := &Scope{ + Data: data, + ParseRef: addrs.ParseRef, + } + + body, expandDiags := scope.ExpandBlock(body, schema) + if expandDiags.HasErrors() { + t.Fatal(expandDiags.Err()) + } + + got, valDiags := scope.EvalBlock(body, schema) + if valDiags.HasErrors() { + t.Fatal(valDiags.Err()) + } + + if !got.RawEquals(test.Want) { + // We'll JSON-ize our values here just so it's easier to + // read them in the assertion output. + gotJSON := formattedJSONValue(got) + wantJSON := formattedJSONValue(test.Want) + + t.Errorf( + "wrong result\nconfig: %s\ngot: %s\nwant: %s", + test.Config, gotJSON, wantJSON, + ) + } + + }) + } + +} + +func formattedJSONValue(val cty.Value) string { + val = cty.UnknownAsNull(val) // since JSON can't represent unknowns + j, err := ctyjson.Marshal(val, val.Type()) + if err != nil { + panic(err) + } + var buf bytes.Buffer + json.Indent(&buf, j, "", " ") + return buf.String() +} + +func TestScopeEvalSelfBlock(t *testing.T) { + data := &dataForTests{ + PathAttrs: map[string]cty.Value{ + "module": cty.StringVal("foo/bar"), + "cwd": cty.StringVal("/home/foo/bar"), + "root": cty.StringVal("/home/foo"), + }, + TerraformAttrs: map[string]cty.Value{ + "workspace": cty.StringVal("default"), + }, + } + schema := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "attr": { + Type: cty.String, + }, + "num": { + Type: cty.Number, + }, + }, + } + + tests := []struct { + Config string + Self cty.Value + KeyData instances.RepetitionData + Want map[string]cty.Value + }{ + { + Config: `attr = self.foo`, + Self: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }), + KeyData: instances.RepetitionData{ + CountIndex: cty.NumberIntVal(0), + }, + Want: map[string]cty.Value{ + "attr": cty.StringVal("bar"), + "num": cty.NullVal(cty.Number), + }, + }, + { + Config: `num = count.index`, + KeyData: instances.RepetitionData{ + CountIndex: cty.NumberIntVal(0), + }, + Want: map[string]cty.Value{ + "attr": cty.NullVal(cty.String), + "num": cty.NumberIntVal(0), + }, + }, + { + Config: `attr = each.key`, + KeyData: instances.RepetitionData{ + EachKey: cty.StringVal("a"), + }, + Want: map[string]cty.Value{ + "attr": cty.StringVal("a"), + "num": cty.NullVal(cty.Number), + }, + }, + { + Config: `attr = path.cwd`, + Want: map[string]cty.Value{ + "attr": cty.StringVal("/home/foo/bar"), + "num": cty.NullVal(cty.Number), + }, + }, + { + Config: `attr = path.module`, + Want: map[string]cty.Value{ + "attr": cty.StringVal("foo/bar"), + "num": cty.NullVal(cty.Number), + }, + }, + { + Config: `attr = path.root`, + Want: map[string]cty.Value{ + "attr": cty.StringVal("/home/foo"), + "num": cty.NullVal(cty.Number), + }, + }, + { + Config: `attr = terraform.workspace`, + Want: map[string]cty.Value{ + "attr": cty.StringVal("default"), + "num": cty.NullVal(cty.Number), + }, + }, + { + Config: `attr = tofu.workspace`, + Want: map[string]cty.Value{ + "attr": cty.StringVal("default"), + "num": cty.NullVal(cty.Number), + }, + }, + } + + for _, test := range tests { + t.Run(test.Config, func(t *testing.T) { + file, parseDiags := hclsyntax.ParseConfig([]byte(test.Config), "", hcl.Pos{Line: 1, Column: 1}) + if len(parseDiags) != 0 { + t.Errorf("unexpected diagnostics during parse") + for _, diag := range parseDiags { + t.Errorf("- %s", diag) + } + return + } + + body := file.Body + + scope := &Scope{ + Data: data, + ParseRef: addrs.ParseRef, + } + + gotVal, ctxDiags := scope.EvalSelfBlock(body, test.Self, schema, test.KeyData) + if ctxDiags.HasErrors() { + t.Fatal(ctxDiags.Err()) + } + + wantVal := cty.ObjectVal(test.Want) + + if !gotVal.RawEquals(wantVal) { + t.Errorf( + "wrong result\nexpr: %s\ngot: %#v\nwant: %#v", + test.Config, gotVal, wantVal, + ) + } + }) + } +} + +func Test_enhanceFunctionDiags(t *testing.T) { + tests := []struct { + Name string + Config string + Summary string + Detail string + }{ + { + "Missing builtin function", + "attr = missing_function(54)", + "Call to unknown function", + "There is no function named \"missing_function\".", + }, + { + "Missing core function", + "attr = core::missing_function(54)", + "Call to unknown function", + "There is no builtin (core::) function named \"missing_function\".", + }, + { + "Invalid prefix", + "attr = magic::missing_function(54)", + "Unknown function namespace", + "Function \"magic::missing_function\" does not exist within a valid namespace (provider,core)", + }, + { + "Too many namespaces", + "attr = provider::foo::bar::extra::extra2::missing_function(54)", + "Invalid function format", + "invalid provider function \"provider::foo::bar::extra::extra2::missing_function\": expected provider:::: or provider::::::", + }, + } + + schema := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "attr": { + Type: cty.String, + }, + }, + } + spec := schema.DecoderSpec() + + for _, test := range tests { + t.Run(test.Name, func(t *testing.T) { + file, parseDiags := hclsyntax.ParseConfig([]byte(test.Config), "", hcl.Pos{Line: 1, Column: 1}) + if len(parseDiags) != 0 { + t.Errorf("unexpected diagnostics during parse") + for _, diag := range parseDiags { + t.Errorf("- %s", diag) + } + return + } + + body := file.Body + + scope := &Scope{} + + ctx, ctxDiags := scope.EvalContext(nil) + if ctxDiags.HasErrors() { + t.Fatalf("Unexpected ctxDiags, %#v", ctxDiags) + } + + _, evalDiags := hcldec.Decode(body, spec, ctx) + diags := scope.enhanceFunctionDiags(evalDiags) + if len(diags) != 1 { + t.Fatalf("Expected 1 diag, got %d", len(diags)) + } + diag := diags[0] + if diag.Summary != test.Summary { + t.Fatalf("Expected Summary %q, got %q", test.Summary, diag.Summary) + } + if diag.Detail != test.Detail { + t.Fatalf("Expected Detail %q, got %q", test.Detail, diag.Detail) + } + + }) + } +} diff --git a/pkg/lang/funcs/cidr.go b/pkg/lang/funcs/cidr.go new file mode 100644 index 00000000000..8fb07d31dca --- /dev/null +++ b/pkg/lang/funcs/cidr.go @@ -0,0 +1,300 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package funcs + +import ( + "fmt" + "math/big" + + "github.com/apparentlymart/go-cidr/cidr" + "github.com/kubegems/opentofu/pkg/ipaddr" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/gocty" +) + +// CidrHostFunc contructs a function that calculates a full host IP address +// within a given IP network address prefix. +var CidrHostFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "prefix", + Type: cty.String, + }, + { + Name: "hostnum", + Type: cty.Number, + }, + }, + Type: function.StaticReturnType(cty.String), + RefineResult: refineNotNull, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var hostNum *big.Int + if err := gocty.FromCtyValue(args[1], &hostNum); err != nil { + return cty.UnknownVal(cty.String), err + } + _, network, err := ipaddr.ParseCIDR(args[0].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("invalid CIDR expression: %w", err) + } + + ip, err := cidr.HostBig(network, hostNum) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.StringVal(ip.String()), nil + }, +}) + +// CidrNetmaskFunc contructs a function that converts an IPv4 address prefix given +// in CIDR notation into a subnet mask address. +var CidrNetmaskFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "prefix", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + RefineResult: refineNotNull, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + _, network, err := ipaddr.ParseCIDR(args[0].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("invalid CIDR expression: %w", err) + } + + if network.IP.To4() == nil { + return cty.UnknownVal(cty.String), fmt.Errorf("IPv6 addresses cannot have a netmask: %s", args[0].AsString()) + } + + return cty.StringVal(ipaddr.IP(network.Mask).String()), nil + }, +}) + +// CidrSubnetFunc contructs a function that calculates a subnet address within +// a given IP network address prefix. +var CidrSubnetFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "prefix", + Type: cty.String, + }, + { + Name: "newbits", + Type: cty.Number, + }, + { + Name: "netnum", + Type: cty.Number, + }, + }, + Type: function.StaticReturnType(cty.String), + RefineResult: refineNotNull, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var newbits int + if err := gocty.FromCtyValue(args[1], &newbits); err != nil { + return cty.UnknownVal(cty.String), err + } + var netnum *big.Int + if err := gocty.FromCtyValue(args[2], &netnum); err != nil { + return cty.UnknownVal(cty.String), err + } + + _, network, err := ipaddr.ParseCIDR(args[0].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("invalid CIDR expression: %w", err) + } + + newNetwork, err := cidr.SubnetBig(network, newbits, netnum) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.StringVal(newNetwork.String()), nil + }, +}) + +// CidrSubnetsFunc is similar to CidrSubnetFunc but calculates many consecutive +// subnet addresses at once, rather than just a single subnet extension. +var CidrSubnetsFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "prefix", + Type: cty.String, + }, + }, + VarParam: &function.Parameter{ + Name: "newbits", + Type: cty.Number, + }, + Type: function.StaticReturnType(cty.List(cty.String)), + RefineResult: refineNotNull, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + _, network, err := ipaddr.ParseCIDR(args[0].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), function.NewArgErrorf(0, "invalid CIDR expression: %s", err) + } + startPrefixLen, _ := network.Mask.Size() + + prefixLengthArgs := args[1:] + if len(prefixLengthArgs) == 0 { + return cty.ListValEmpty(cty.String), nil + } + + var firstLength int + if err := gocty.FromCtyValue(prefixLengthArgs[0], &firstLength); err != nil { + return cty.UnknownVal(cty.String), function.NewArgError(1, err) + } + firstLength += startPrefixLen + + retVals := make([]cty.Value, len(prefixLengthArgs)) + + current, _ := cidr.PreviousSubnet(network, firstLength) + for i, lengthArg := range prefixLengthArgs { + var length int + if err := gocty.FromCtyValue(lengthArg, &length); err != nil { + return cty.UnknownVal(cty.String), function.NewArgError(i+1, err) + } + + if length < 1 { + return cty.UnknownVal(cty.String), function.NewArgErrorf(i+1, "must extend prefix by at least one bit") + } + // For portability with 32-bit systems where the subnet number + // will be a 32-bit int, we only allow extension of 32 bits in + // one call even if we're running on a 64-bit machine. + // (Of course, this is significant only for IPv6.) + if length > 32 { + return cty.UnknownVal(cty.String), function.NewArgErrorf(i+1, "may not extend prefix by more than 32 bits") + } + length += startPrefixLen + if length > (len(network.IP) * 8) { + protocol := "IP" + switch len(network.IP) * 8 { + case 32: + protocol = "IPv4" + case 128: + protocol = "IPv6" + } + return cty.UnknownVal(cty.String), function.NewArgErrorf(i+1, "would extend prefix to %d bits, which is too long for an %s address", length, protocol) + } + + next, rollover := cidr.NextSubnet(current, length) + if rollover || !network.Contains(next.IP) { + // If we run out of suffix bits in the base CIDR prefix then + // NextSubnet will start incrementing the prefix bits, which + // we don't allow because it would then allocate addresses + // outside of the caller's given prefix. + return cty.UnknownVal(cty.String), function.NewArgErrorf(i+1, "not enough remaining address space for a subnet with a prefix of %d bits after %s", length, current.String()) + } + + current = next + retVals[i] = cty.StringVal(current.String()) + } + + return cty.ListVal(retVals), nil + }, +}) + +// CidrContainsFunc constructs a function that checks whether a given IP address +// is within a given IP network address prefix. +var CidrContainsFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "containing_prefix", + Type: cty.String, + }, + { + Name: "contained_ip_or_prefix", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + prefix := args[0].AsString() + addr := args[1].AsString() + + // The first argument must be a CIDR prefix. + _, containing, err := ipaddr.ParseCIDR(prefix) + if err != nil { + return cty.UnknownVal(cty.Bool), err + } + + // The second argument can be either an IP address or a CIDR prefix. + // We will try parsing it as an IP address first. + startIP := ipaddr.ParseIP(addr) + var endIP ipaddr.IP + + // If the second argument did not parse as an IP, we will try parsing it + // as a CIDR prefix. + if startIP == nil { + _, contained, err := ipaddr.ParseCIDR(addr) + + // If that also fails, we'll return an error. + if err != nil { + return cty.UnknownVal(cty.Bool), fmt.Errorf("invalid IP address or prefix: %s", addr) + } + + // Otherwise, we will want to know the start and the end IP of the + // prefix, so that we can check whether both are contained in the + // containing prefix. + startIP, endIP = cidr.AddressRange(contained) + } + + // We require that both addresses are of the same type, so that + // we can't accidentally compare an IPv4 address to an IPv6 prefix. + // The underlying Go function will always return false if this happens, + // but we want to return an error instead so that the caller can + // distinguish between a "legitimate" false result and an erroneous + // check. + if (startIP.To4() == nil) != (containing.IP.To4() == nil) { + return cty.UnknownVal(cty.Bool), fmt.Errorf("address family mismatch: %s vs. %s", prefix, addr) + } + + // If the second argument was an IP address, we will check whether it + // is contained in the containing prefix, and that's our result. + result := containing.Contains(startIP) + + // If the second argument was a CIDR prefix, we will also check whether + // the end IP of the prefix is contained in the containing prefix. + // Once CIDR is contained in another CIDR iff both the start and the + // end IP of the contained CIDR are contained in the containing CIDR. + if endIP != nil { + result = result && containing.Contains(endIP) + } + + return cty.BoolVal(result), nil + }, +}) + +// CidrHost calculates a full host IP address within a given IP network address prefix. +func CidrHost(prefix, hostnum cty.Value) (cty.Value, error) { + return CidrHostFunc.Call([]cty.Value{prefix, hostnum}) +} + +// CidrNetmask converts an IPv4 address prefix given in CIDR notation into a subnet mask address. +func CidrNetmask(prefix cty.Value) (cty.Value, error) { + return CidrNetmaskFunc.Call([]cty.Value{prefix}) +} + +// CidrSubnet calculates a subnet address within a given IP network address prefix. +func CidrSubnet(prefix, newbits, netnum cty.Value) (cty.Value, error) { + return CidrSubnetFunc.Call([]cty.Value{prefix, newbits, netnum}) +} + +// CidrSubnets calculates a sequence of consecutive subnet prefixes that may +// be of different prefix lengths under a common base prefix. +func CidrSubnets(prefix cty.Value, newbits ...cty.Value) (cty.Value, error) { + args := make([]cty.Value, len(newbits)+1) + args[0] = prefix + copy(args[1:], newbits) + return CidrSubnetsFunc.Call(args) +} + +// CidrContains checks whether a given IP address is within a given IP network address prefix. +func CidrContains(prefix, address cty.Value) (cty.Value, error) { + return CidrContainsFunc.Call([]cty.Value{prefix, address}) +} diff --git a/pkg/lang/funcs/cidr_test.go b/pkg/lang/funcs/cidr_test.go new file mode 100644 index 00000000000..a184cdfc2e3 --- /dev/null +++ b/pkg/lang/funcs/cidr_test.go @@ -0,0 +1,522 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package funcs + +import ( + "fmt" + "testing" + + "github.com/zclconf/go-cty/cty" +) + +func TestCidrHost(t *testing.T) { + tests := []struct { + Prefix cty.Value + Hostnum cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("192.168.1.0/24"), + cty.NumberIntVal(5), + cty.StringVal("192.168.1.5"), + false, + }, + { + cty.StringVal("192.168.1.0/24"), + cty.NumberIntVal(-5), + cty.StringVal("192.168.1.251"), + false, + }, + { + cty.StringVal("192.168.1.0/24"), + cty.NumberIntVal(-256), + cty.StringVal("192.168.1.0"), + false, + }, + { + // We inadvertently inherited a pre-Go1.17 standard library quirk + // if parsing zero-prefix parts as decimal rather than octal. + // Go 1.17 resolved that quirk by making zero-prefix invalid, but + // we've preserved our existing behavior for backward compatibility, + // on the grounds that these functions are for generating addresses + // rather than validating or processing them. We do always generate + // a canonical result regardless of the input, though. + cty.StringVal("010.001.0.0/24"), + cty.NumberIntVal(6), + cty.StringVal("10.1.0.6"), + false, + }, + { + cty.StringVal("192.168.1.0/30"), + cty.NumberIntVal(255), + cty.UnknownVal(cty.String), + true, // 255 doesn't fit in two bits + }, + { + cty.StringVal("192.168.1.0/30"), + cty.NumberIntVal(-255), + cty.UnknownVal(cty.String), + true, // 255 doesn't fit in two bits + }, + { + cty.StringVal("not-a-cidr"), + cty.NumberIntVal(6), + cty.UnknownVal(cty.String), + true, // not a valid CIDR mask + }, + { + cty.StringVal("10.256.0.0/8"), + cty.NumberIntVal(6), + cty.UnknownVal(cty.String), + true, // can't have an octet >255 + }, + { // fractions are Not Ok + cty.StringVal("10.256.0.0/8"), + cty.NumberFloatVal(.75), + cty.UnknownVal(cty.String), + true, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("cidrhost(%#v, %#v)", test.Prefix, test.Hostnum), func(t *testing.T) { + got, err := CidrHost(test.Prefix, test.Hostnum) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestCidrNetmask(t *testing.T) { + tests := []struct { + Prefix cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("192.168.1.0/24"), + cty.StringVal("255.255.255.0"), + false, + }, + { + cty.StringVal("192.168.1.0/32"), + cty.StringVal("255.255.255.255"), + false, + }, + { + cty.StringVal("0.0.0.0/0"), + cty.StringVal("0.0.0.0"), + false, + }, + { + // We inadvertently inherited a pre-Go1.17 standard library quirk + // if parsing zero-prefix parts as decimal rather than octal. + // Go 1.17 resolved that quirk by making zero-prefix invalid, but + // we've preserved our existing behavior for backward compatibility, + // on the grounds that these functions are for generating addresses + // rather than validating or processing them. + cty.StringVal("010.001.0.0/24"), + cty.StringVal("255.255.255.0"), + false, + }, + { + cty.StringVal("not-a-cidr"), + cty.UnknownVal(cty.String), + true, // not a valid CIDR mask + }, + { + cty.StringVal("110.256.0.0/8"), + cty.UnknownVal(cty.String), + true, // can't have an octet >255 + }, + { + cty.StringVal("1::/64"), + cty.UnknownVal(cty.String), + true, // IPv6 is invalid + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("cidrnetmask(%#v)", test.Prefix), func(t *testing.T) { + got, err := CidrNetmask(test.Prefix) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestCidrSubnet(t *testing.T) { + tests := []struct { + Prefix cty.Value + Newbits cty.Value + Netnum cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("192.168.2.0/20"), + cty.NumberIntVal(4), + cty.NumberIntVal(6), + cty.StringVal("192.168.6.0/24"), + false, + }, + { + cty.StringVal("fe80::/48"), + cty.NumberIntVal(16), + cty.NumberIntVal(6), + cty.StringVal("fe80:0:0:6::/64"), + false, + }, + { // IPv4 address encoded in IPv6 syntax gets normalized + cty.StringVal("::ffff:192.168.0.0/112"), + cty.NumberIntVal(8), + cty.NumberIntVal(6), + cty.StringVal("192.168.6.0/24"), + false, + }, + { + cty.StringVal("fe80::/48"), + cty.NumberIntVal(33), + cty.NumberIntVal(6), + cty.StringVal("fe80::3:0:0:0/81"), + false, + }, + { + // We inadvertently inherited a pre-Go1.17 standard library quirk + // if parsing zero-prefix parts as decimal rather than octal. + // Go 1.17 resolved that quirk by making zero-prefix invalid, but + // we've preserved our existing behavior for backward compatibility, + // on the grounds that these functions are for generating addresses + // rather than validating or processing them. We do always generate + // a canonical result regardless of the input, though. + cty.StringVal("010.001.0.0/24"), + cty.NumberIntVal(4), + cty.NumberIntVal(1), + cty.StringVal("10.1.0.16/28"), + false, + }, + { // not enough bits left + cty.StringVal("192.168.0.0/30"), + cty.NumberIntVal(4), + cty.NumberIntVal(6), + cty.UnknownVal(cty.String), + true, + }, + { // can't encode 16 in 2 bits + cty.StringVal("192.168.0.0/168"), + cty.NumberIntVal(2), + cty.NumberIntVal(16), + cty.UnknownVal(cty.String), + true, + }, + { // not a valid CIDR mask + cty.StringVal("not-a-cidr"), + cty.NumberIntVal(4), + cty.NumberIntVal(6), + cty.UnknownVal(cty.String), + true, + }, + { // can't have an octet >255 + cty.StringVal("10.256.0.0/8"), + cty.NumberIntVal(4), + cty.NumberIntVal(6), + cty.UnknownVal(cty.String), + true, + }, + { // fractions are Not Ok + cty.StringVal("10.256.0.0/8"), + cty.NumberFloatVal(2.0 / 3.0), + cty.NumberFloatVal(.75), + cty.UnknownVal(cty.String), + true, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("cidrsubnet(%#v, %#v, %#v)", test.Prefix, test.Newbits, test.Netnum), func(t *testing.T) { + got, err := CidrSubnet(test.Prefix, test.Newbits, test.Netnum) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} +func TestCidrSubnets(t *testing.T) { + tests := []struct { + Prefix cty.Value + Newbits []cty.Value + Want cty.Value + Err string + }{ + { + cty.StringVal("10.0.0.0/21"), + []cty.Value{ + cty.NumberIntVal(3), + cty.NumberIntVal(3), + cty.NumberIntVal(3), + cty.NumberIntVal(4), + cty.NumberIntVal(4), + cty.NumberIntVal(4), + cty.NumberIntVal(7), + cty.NumberIntVal(7), + cty.NumberIntVal(7), + }, + cty.ListVal([]cty.Value{ + cty.StringVal("10.0.0.0/24"), + cty.StringVal("10.0.1.0/24"), + cty.StringVal("10.0.2.0/24"), + cty.StringVal("10.0.3.0/25"), + cty.StringVal("10.0.3.128/25"), + cty.StringVal("10.0.4.0/25"), + cty.StringVal("10.0.4.128/28"), + cty.StringVal("10.0.4.144/28"), + cty.StringVal("10.0.4.160/28"), + }), + ``, + }, + { + // We inadvertently inherited a pre-Go1.17 standard library quirk + // if parsing zero-prefix parts as decimal rather than octal. + // Go 1.17 resolved that quirk by making zero-prefix invalid, but + // we've preserved our existing behavior for backward compatibility, + // on the grounds that these functions are for generating addresses + // rather than validating or processing them. We do always generate + // a canonical result regardless of the input, though. + cty.StringVal("010.0.0.0/21"), + []cty.Value{ + cty.NumberIntVal(3), + }, + cty.ListVal([]cty.Value{ + cty.StringVal("10.0.0.0/24"), + }), + ``, + }, + { + cty.StringVal("10.0.0.0/30"), + []cty.Value{ + cty.NumberIntVal(1), + cty.NumberIntVal(3), + }, + cty.UnknownVal(cty.List(cty.String)), + `would extend prefix to 33 bits, which is too long for an IPv4 address`, + }, + { + cty.StringVal("10.0.0.0/8"), + []cty.Value{ + cty.NumberIntVal(1), + cty.NumberIntVal(1), + cty.NumberIntVal(1), + }, + cty.UnknownVal(cty.List(cty.String)), + `not enough remaining address space for a subnet with a prefix of 9 bits after 10.128.0.0/9`, + }, + { + cty.StringVal("10.0.0.0/8"), + []cty.Value{ + cty.NumberIntVal(1), + cty.NumberIntVal(0), + }, + cty.UnknownVal(cty.List(cty.String)), + `must extend prefix by at least one bit`, + }, + { + cty.StringVal("10.0.0.0/8"), + []cty.Value{ + cty.NumberIntVal(1), + cty.NumberIntVal(-1), + }, + cty.UnknownVal(cty.List(cty.String)), + `must extend prefix by at least one bit`, + }, + { + cty.StringVal("fe80::/48"), + []cty.Value{ + cty.NumberIntVal(1), + cty.NumberIntVal(33), + }, + cty.UnknownVal(cty.List(cty.String)), + `may not extend prefix by more than 32 bits`, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("cidrsubnets(%#v, %#v)", test.Prefix, test.Newbits), func(t *testing.T) { + got, err := CidrSubnets(test.Prefix, test.Newbits...) + wantErr := test.Err != "" + + if wantErr { + if err == nil { + t.Fatal("succeeded; want error") + } + if err.Error() != test.Err { + t.Fatalf("wrong error\ngot: %s\nwant: %s", err.Error(), test.Err) + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestCidrContains(t *testing.T) { + noError := func(err error) bool { return err == nil } + + tests := []struct { + Prefix cty.Value + Address cty.Value + Want cty.Value + ErrFn func(error) bool + }{ + { + // IPv4, contained (IP). + cty.StringVal("192.168.2.0/20"), + cty.StringVal("192.168.2.1"), + cty.True, + noError, + }, + { + // IPv4, contained (CIDR). + cty.StringVal("192.168.2.0/20"), + cty.StringVal("192.168.2.0/22"), + cty.True, + noError, + }, + { + // IPv4, not contained. + cty.StringVal("192.168.2.0/20"), + cty.StringVal("192.126.2.1"), + cty.False, + noError, + }, + { + // IPv4, not contained (CIDR). + cty.StringVal("192.168.2.0/20"), + cty.StringVal("192.126.2.0/18"), + cty.False, + noError, + }, + { + // IPv6, contained. + cty.StringVal("fe80::/48"), + cty.StringVal("fe80::1"), + cty.True, + noError, + }, + { + // IPv6, not contained. + cty.StringVal("fe80::/48"), + cty.StringVal("fe81::1"), + cty.False, + noError, + }, + { + // Address family mismatch: IPv4 containing_prefix, IPv6 contained_ip_or_prefix (IP). + cty.StringVal("192.168.2.0/20"), + cty.StringVal("fe80::1"), + cty.NilVal, + func(err error) bool { + return err != nil && err.Error() == "address family mismatch: 192.168.2.0/20 vs. fe80::1" + }, + }, + { + // Address family mismatch: IPv4 containing_prefix, IPv6 contained_ip_or_prefix (prefix). + cty.StringVal("192.168.2.0/20"), + cty.StringVal("fe80::/24"), + cty.NilVal, + func(err error) bool { + return err != nil && err.Error() == "address family mismatch: 192.168.2.0/20 vs. fe80::/24" + }, + }, + { + // Address family mismatch: IPv6 containing_prefix, IPv4 contained_ip_or_prefix (IP). + cty.StringVal("fe80::/48"), + cty.StringVal("192.168.2.1"), + cty.NilVal, + func(err error) bool { + return err != nil && err.Error() == "address family mismatch: fe80::/48 vs. 192.168.2.1" + }, + }, + { + // Address family mismatch: IPv6 containing_prefix, IPv4 contained_ip_or_prefix (prefix). + cty.StringVal("fe80::/48"), + cty.StringVal("192.168.2.0/20"), + cty.NilVal, + func(err error) bool { + return err != nil && err.Error() == "address family mismatch: fe80::/48 vs. 192.168.2.0/20" + }, + }, + { + // Input error: invalid CIDR address. + cty.StringVal("not-a-cidr"), + cty.StringVal("192.168.2.1"), + cty.NilVal, + func(err error) bool { + return err != nil && err.Error() == "invalid CIDR address: not-a-cidr" + }, + }, + { + // Input error: invalid IP address. + cty.StringVal("192.168.2.0/20"), + cty.StringVal("not-an-address"), + cty.NilVal, + func(err error) bool { + return err != nil && err.Error() == "invalid IP address or prefix: not-an-address" + }, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("cidrcontains(%#v, %#v)", test.Prefix, test.Address), func(t *testing.T) { + got, err := CidrContains(test.Prefix, test.Address) + + if !test.ErrFn(err) { + t.Errorf("unexpected error: %v", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} diff --git a/pkg/lang/funcs/collection.go b/pkg/lang/funcs/collection.go new file mode 100644 index 00000000000..e1a8ecef0da --- /dev/null +++ b/pkg/lang/funcs/collection.go @@ -0,0 +1,728 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package funcs + +import ( + "errors" + "fmt" + "math/big" + "sort" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/function/stdlib" + "github.com/zclconf/go-cty/cty/gocty" +) + +var LengthFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "value", + Type: cty.DynamicPseudoType, + AllowDynamicType: true, + AllowUnknown: true, + AllowMarked: true, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + collTy := args[0].Type() + switch { + case collTy == cty.String || collTy.IsTupleType() || collTy.IsObjectType() || collTy.IsListType() || collTy.IsMapType() || collTy.IsSetType() || collTy == cty.DynamicPseudoType: + return cty.Number, nil + default: + return cty.Number, errors.New("argument must be a string, a collection type, or a structural type") + } + }, + RefineResult: refineNotNull, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + coll := args[0] + collTy := args[0].Type() + marks := coll.Marks() + switch { + case collTy == cty.DynamicPseudoType: + return cty.UnknownVal(cty.Number).WithMarks(marks), nil + case collTy.IsTupleType(): + l := len(collTy.TupleElementTypes()) + return cty.NumberIntVal(int64(l)).WithMarks(marks), nil + case collTy.IsObjectType(): + l := len(collTy.AttributeTypes()) + return cty.NumberIntVal(int64(l)).WithMarks(marks), nil + case collTy == cty.String: + // We'll delegate to the cty stdlib strlen function here, because + // it deals with all of the complexities of tokenizing unicode + // grapheme clusters. + return stdlib.Strlen(coll) + case collTy.IsListType() || collTy.IsSetType() || collTy.IsMapType(): + return coll.Length(), nil + default: + // Should never happen, because of the checks in our Type func above + return cty.UnknownVal(cty.Number), errors.New("impossible value type for length(...)") + } + }, +}) + +// AllTrueFunc constructs a function that returns true if all elements of the +// list are true. If the list is empty, return true. +var AllTrueFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.List(cty.Bool), + }, + }, + Type: function.StaticReturnType(cty.Bool), + RefineResult: refineNotNull, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + result := cty.True + for it := args[0].ElementIterator(); it.Next(); { + _, v := it.Element() + if !v.IsKnown() { + return cty.UnknownVal(cty.Bool), nil + } + if v.IsNull() { + return cty.False, nil + } + result = result.And(v) + if result.False() { + return cty.False, nil + } + } + return result, nil + }, +}) + +// AnyTrueFunc constructs a function that returns true if any element of the +// list is true. If the list is empty, return false. +var AnyTrueFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.List(cty.Bool), + }, + }, + Type: function.StaticReturnType(cty.Bool), + RefineResult: refineNotNull, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + result := cty.False + var hasUnknown bool + for it := args[0].ElementIterator(); it.Next(); { + _, v := it.Element() + if !v.IsKnown() { + hasUnknown = true + continue + } + if v.IsNull() { + continue + } + result = result.Or(v) + if result.True() { + return cty.True, nil + } + } + if hasUnknown { + return cty.UnknownVal(cty.Bool), nil + } + return result, nil + }, +}) + +// CoalesceFunc constructs a function that takes any number of arguments and +// returns the first one that isn't empty. This function was copied from go-cty +// stdlib and modified so that it returns the first *non-empty* non-null element +// from a sequence, instead of merely the first non-null. +var CoalesceFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "vals", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + argTypes := make([]cty.Type, len(args)) + for i, val := range args { + argTypes[i] = val.Type() + } + retType, _ := convert.UnifyUnsafe(argTypes) + if retType == cty.NilType { + return cty.NilType, errors.New("all arguments must have the same type") + } + return retType, nil + }, + RefineResult: refineNotNull, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + for _, argVal := range args { + // We already know this will succeed because of the checks in our Type func above + argVal, _ = convert.Convert(argVal, retType) + if !argVal.IsKnown() { + return cty.UnknownVal(retType), nil + } + if argVal.IsNull() { + continue + } + if retType == cty.String && argVal.RawEquals(cty.StringVal("")) { + continue + } + + return argVal, nil + } + return cty.NilVal, errors.New("no non-null, non-empty-string arguments") + }, +}) + +// IndexFunc constructs a function that finds the element index for a given value in a list. +var IndexFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.DynamicPseudoType, + }, + { + Name: "value", + Type: cty.DynamicPseudoType, + }, + }, + Type: function.StaticReturnType(cty.Number), + RefineResult: refineNotNull, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + if !(args[0].Type().IsListType() || args[0].Type().IsTupleType()) { + return cty.NilVal, errors.New("argument must be a list or tuple") + } + + if !args[0].IsKnown() { + return cty.UnknownVal(cty.Number), nil + } + + if args[0].LengthInt() == 0 { // Easy path + return cty.NilVal, errors.New("cannot search an empty list") + } + + for it := args[0].ElementIterator(); it.Next(); { + i, v := it.Element() + eq, err := stdlib.Equal(v, args[1]) + if err != nil { + return cty.NilVal, err + } + if !eq.IsKnown() { + return cty.UnknownVal(cty.Number), nil + } + if eq.True() { + return i, nil + } + } + return cty.NilVal, errors.New("item not found") + + }, +}) + +// LookupFunc constructs a function that performs dynamic lookups of map types. +var LookupFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "inputMap", + Type: cty.DynamicPseudoType, + AllowMarked: true, + }, + { + Name: "key", + Type: cty.String, + AllowMarked: true, + }, + }, + VarParam: &function.Parameter{ + Name: "default", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + AllowMarked: true, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + if len(args) < 1 || len(args) > 3 { + return cty.NilType, fmt.Errorf("lookup() takes two or three arguments, got %d", len(args)) + } + + ty := args[0].Type() + + switch { + case ty.IsObjectType(): + if !args[1].IsKnown() { + return cty.DynamicPseudoType, nil + } + + keyVal, _ := args[1].Unmark() + key := keyVal.AsString() + if ty.HasAttribute(key) { + return args[0].GetAttr(key).Type(), nil + } else if len(args) == 3 { + // if the key isn't found but a default is provided, + // return the default type + return args[2].Type(), nil + } + return cty.DynamicPseudoType, function.NewArgErrorf(0, "the given object has no attribute %q", key) + case ty.IsMapType(): + if len(args) == 3 { + _, err = convert.Convert(args[2], ty.ElementType()) + if err != nil { + return cty.NilType, function.NewArgErrorf(2, "the default value must have the same type as the map elements") + } + } + return ty.ElementType(), nil + default: + return cty.NilType, function.NewArgErrorf(0, "lookup() requires a map as the first argument") + } + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var defaultVal cty.Value + defaultValueSet := false + + if len(args) == 3 { + // intentionally leave default value marked + defaultVal = args[2] + defaultValueSet = true + } + + // keep track of marks from the collection and key + var markses []cty.ValueMarks + + // unmark collection, retain marks to reapply later + mapVar, mapMarks := args[0].Unmark() + markses = append(markses, mapMarks) + + // include marks on the key in the result + keyVal, keyMarks := args[1].Unmark() + if len(keyMarks) > 0 { + markses = append(markses, keyMarks) + } + lookupKey := keyVal.AsString() + + if !mapVar.IsKnown() { + return cty.UnknownVal(retType).WithMarks(markses...), nil + } + + if mapVar.Type().IsObjectType() { + if mapVar.Type().HasAttribute(lookupKey) { + return mapVar.GetAttr(lookupKey).WithMarks(markses...), nil + } + } else if mapVar.HasIndex(cty.StringVal(lookupKey)) == cty.True { + return mapVar.Index(cty.StringVal(lookupKey)).WithMarks(markses...), nil + } + + if defaultValueSet { + defaultVal, err = convert.Convert(defaultVal, retType) + if err != nil { + return cty.NilVal, err + } + return defaultVal.WithMarks(markses...), nil + } + + return cty.UnknownVal(cty.DynamicPseudoType), fmt.Errorf( + "lookup failed to find key %s", redactIfSensitive(lookupKey, keyMarks)) + }, +}) + +// MatchkeysFunc constructs a function that constructs a new list by taking a +// subset of elements from one list whose indexes match the corresponding +// indexes of values in another list. +var MatchkeysFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "values", + Type: cty.List(cty.DynamicPseudoType), + }, + { + Name: "keys", + Type: cty.List(cty.DynamicPseudoType), + }, + { + Name: "searchset", + Type: cty.List(cty.DynamicPseudoType), + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + ty, _ := convert.UnifyUnsafe([]cty.Type{args[1].Type(), args[2].Type()}) + if ty == cty.NilType { + return cty.NilType, errors.New("keys and searchset must be of the same type") + } + + // the return type is based on args[0] (values) + return args[0].Type(), nil + }, + RefineResult: refineNotNull, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + if !args[0].IsKnown() { + return cty.UnknownVal(cty.List(retType.ElementType())), nil + } + + if args[0].LengthInt() != args[1].LengthInt() { + return cty.ListValEmpty(retType.ElementType()), errors.New("length of keys and values should be equal") + } + + output := make([]cty.Value, 0) + values := args[0] + + // Keys and searchset must be the same type. + // We can skip error checking here because we've already verified that + // they can be unified in the Type function + ty, _ := convert.UnifyUnsafe([]cty.Type{args[1].Type(), args[2].Type()}) + keys, _ := convert.Convert(args[1], ty) + searchset, _ := convert.Convert(args[2], ty) + + // if searchset is empty, return an empty list. + if searchset.LengthInt() == 0 { + return cty.ListValEmpty(retType.ElementType()), nil + } + + if !values.IsWhollyKnown() || !keys.IsWhollyKnown() { + return cty.UnknownVal(retType), nil + } + + i := 0 + for it := keys.ElementIterator(); it.Next(); { + _, key := it.Element() + for iter := searchset.ElementIterator(); iter.Next(); { + _, search := iter.Element() + eq, err := stdlib.Equal(key, search) + if err != nil { + return cty.NilVal, err + } + if !eq.IsKnown() { + return cty.ListValEmpty(retType.ElementType()), nil + } + if eq.True() { + v := values.Index(cty.NumberIntVal(int64(i))) + output = append(output, v) + break + } + } + i++ + } + + // if we haven't matched any key, then output is an empty list. + if len(output) == 0 { + return cty.ListValEmpty(retType.ElementType()), nil + } + return cty.ListVal(output), nil + }, +}) + +// OneFunc returns either the first element of a one-element list, or null +// if given a zero-element list. +var OneFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.DynamicPseudoType, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + ty := args[0].Type() + switch { + case ty.IsListType() || ty.IsSetType(): + return ty.ElementType(), nil + case ty.IsTupleType(): + etys := ty.TupleElementTypes() + switch len(etys) { + case 0: + // No specific type information, so we'll ultimately return + // a null value of unknown type. + return cty.DynamicPseudoType, nil + case 1: + return etys[0], nil + } + } + return cty.NilType, function.NewArgErrorf(0, "must be a list, set, or tuple value with either zero or one elements") + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + val := args[0] + ty := val.Type() + + // Our parameter spec above doesn't set AllowUnknown or AllowNull, + // so we can assume our top-level collection is both known and non-null + // in here. + + switch { + case ty.IsListType() || ty.IsSetType(): + lenVal := val.Length() + if !lenVal.IsKnown() { + return cty.UnknownVal(retType), nil + } + var l int + err := gocty.FromCtyValue(lenVal, &l) + if err != nil { + // It would be very strange to get here, because that would + // suggest that the length is either not a number or isn't + // an integer, which would suggest a bug in cty. + return cty.NilVal, fmt.Errorf("invalid collection length: %w", err) + } + switch l { + case 0: + return cty.NullVal(retType), nil + case 1: + var ret cty.Value + // We'll use an iterator here because that works for both lists + // and sets, whereas indexing directly would only work for lists. + // Since we've just checked the length, we should only actually + // run this loop body once. + for it := val.ElementIterator(); it.Next(); { + _, ret = it.Element() + } + return ret, nil + } + case ty.IsTupleType(): + etys := ty.TupleElementTypes() + switch len(etys) { + case 0: + return cty.NullVal(retType), nil + case 1: + ret := val.Index(cty.NumberIntVal(0)) + return ret, nil + } + } + return cty.NilVal, function.NewArgErrorf(0, "must be a list, set, or tuple value with either zero or one elements") + }, +}) + +// SumFunc constructs a function that returns the sum of all +// numbers provided in a list +var SumFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.DynamicPseudoType, + }, + }, + Type: function.StaticReturnType(cty.Number), + RefineResult: refineNotNull, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + + if !args[0].CanIterateElements() { + return cty.NilVal, function.NewArgErrorf(0, "cannot sum noniterable") + } + + if args[0].LengthInt() == 0 { // Easy path + return cty.NilVal, function.NewArgErrorf(0, "cannot sum an empty list") + } + + arg := args[0].AsValueSlice() + ty := args[0].Type() + + if !ty.IsListType() && !ty.IsSetType() && !ty.IsTupleType() { + return cty.NilVal, function.NewArgErrorf(0, fmt.Sprintf("argument must be list, set, or tuple. Received %s", ty.FriendlyName())) + } + + if !args[0].IsWhollyKnown() { + return cty.UnknownVal(cty.Number), nil + } + + // big.Float.Add can panic if the input values are opposing infinities, + // so we must catch that here in order to remain within + // the cty Function abstraction. + defer func() { + if r := recover(); r != nil { + if _, ok := r.(big.ErrNaN); ok { + ret = cty.NilVal + err = fmt.Errorf("can't compute sum of opposing infinities") + } else { + // not a panic we recognize + panic(r) + } + } + }() + + s := arg[0] + if s.IsNull() { + return cty.NilVal, function.NewArgErrorf(0, "argument must be list, set, or tuple of number values") + } + s, err = convert.Convert(s, cty.Number) + if err != nil { + return cty.NilVal, function.NewArgErrorf(0, "argument must be list, set, or tuple of number values") + } + for _, v := range arg[1:] { + if v.IsNull() { + return cty.NilVal, function.NewArgErrorf(0, "argument must be list, set, or tuple of number values") + } + v, err = convert.Convert(v, cty.Number) + if err != nil { + return cty.NilVal, function.NewArgErrorf(0, "argument must be list, set, or tuple of number values") + } + s = s.Add(v) + } + + return s, nil + }, +}) + +// TransposeFunc constructs a function that takes a map of lists of strings and +// swaps the keys and values to produce a new map of lists of strings. +var TransposeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "values", + Type: cty.Map(cty.List(cty.String)), + }, + }, + Type: function.StaticReturnType(cty.Map(cty.List(cty.String))), + RefineResult: refineNotNull, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + inputMap := args[0] + if !inputMap.IsWhollyKnown() { + return cty.UnknownVal(retType), nil + } + + outputMap := make(map[string]cty.Value) + tmpMap := make(map[string][]string) + + for it := inputMap.ElementIterator(); it.Next(); { + inKey, inVal := it.Element() + for iter := inVal.ElementIterator(); iter.Next(); { + _, val := iter.Element() + if !val.Type().Equals(cty.String) { + return cty.MapValEmpty(cty.List(cty.String)), errors.New("input must be a map of lists of strings") + } + + outKey := val.AsString() + if _, ok := tmpMap[outKey]; !ok { + tmpMap[outKey] = make([]string, 0) + } + outVal := tmpMap[outKey] + outVal = append(outVal, inKey.AsString()) + sort.Strings(outVal) + tmpMap[outKey] = outVal + } + } + + for outKey, outVal := range tmpMap { + values := make([]cty.Value, 0) + for _, v := range outVal { + values = append(values, cty.StringVal(v)) + } + outputMap[outKey] = cty.ListVal(values) + } + + if len(outputMap) == 0 { + return cty.MapValEmpty(cty.List(cty.String)), nil + } + + return cty.MapVal(outputMap), nil + }, +}) + +// ListFunc constructs a function that takes an arbitrary number of arguments +// and returns a list containing those values in the same order. +// +// This function is deprecated in Terraform v0.12 +var ListFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "vals", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + return cty.DynamicPseudoType, fmt.Errorf("the \"list\" function was deprecated in Terraform v0.12 and is not available in OpenTofu; use tolist([ ... ]) syntax to write a literal list") + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return cty.DynamicVal, fmt.Errorf("the \"list\" function was deprecated in Terraform v0.12 and is not available in OpenTofu; use tolist([ ... ]) syntax to write a literal list") + }, +}) + +// MapFunc constructs a function that takes an even number of arguments and +// returns a map whose elements are constructed from consecutive pairs of arguments. +// +// This function is deprecated in Terraform v0.12 +var MapFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "vals", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + return cty.DynamicPseudoType, fmt.Errorf("the \"map\" function was deprecated in Terraform v0.12 and is not available in OpenTofu; use tomap({ ... }) syntax to write a literal map") + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return cty.DynamicVal, fmt.Errorf("the \"map\" function was deprecated in Terraform v0.12 and is not available in OpenTofu; use tomap({ ... }) syntax to write a literal map") + }, +}) + +// Length returns the number of elements in the given collection or number of +// Unicode characters in the given string. +func Length(collection cty.Value) (cty.Value, error) { + return LengthFunc.Call([]cty.Value{collection}) +} + +// AllTrue returns true if all elements of the list are true. If the list is empty, +// return true. +func AllTrue(collection cty.Value) (cty.Value, error) { + return AllTrueFunc.Call([]cty.Value{collection}) +} + +// AnyTrue returns true if any element of the list is true. If the list is empty, +// return false. +func AnyTrue(collection cty.Value) (cty.Value, error) { + return AnyTrueFunc.Call([]cty.Value{collection}) +} + +// Coalesce takes any number of arguments and returns the first one that isn't empty. +func Coalesce(args ...cty.Value) (cty.Value, error) { + return CoalesceFunc.Call(args) +} + +// Index finds the element index for a given value in a list. +func Index(list, value cty.Value) (cty.Value, error) { + return IndexFunc.Call([]cty.Value{list, value}) +} + +// List takes any number of arguments of types that can unify into a single +// type and returns a list containing those values in the same order, or +// returns an error if there is no single element type that all values can +// convert to. +func List(args ...cty.Value) (cty.Value, error) { + return ListFunc.Call(args) +} + +// Lookup performs a dynamic lookup into a map. +// There are two required arguments, map and key, plus an optional default, +// which is a value to return if no key is found in map. +func Lookup(args ...cty.Value) (cty.Value, error) { + return LookupFunc.Call(args) +} + +// Map takes an even number of arguments and returns a map whose elements are constructed +// from consecutive pairs of arguments. +func Map(args ...cty.Value) (cty.Value, error) { + return MapFunc.Call(args) +} + +// Matchkeys constructs a new list by taking a subset of elements from one list +// whose indexes match the corresponding indexes of values in another list. +func Matchkeys(values, keys, searchset cty.Value) (cty.Value, error) { + return MatchkeysFunc.Call([]cty.Value{values, keys, searchset}) +} + +// One returns either the first element of a one-element list, or null +// if given a zero-element list.. +func One(list cty.Value) (cty.Value, error) { + return OneFunc.Call([]cty.Value{list}) +} + +// Sum adds numbers in a list, set, or tuple +func Sum(list cty.Value) (cty.Value, error) { + return SumFunc.Call([]cty.Value{list}) +} + +// Transpose takes a map of lists of strings and swaps the keys and values to +// produce a new map of lists of strings. +func Transpose(values cty.Value) (cty.Value, error) { + return TransposeFunc.Call([]cty.Value{values}) +} diff --git a/pkg/lang/funcs/collection_test.go b/pkg/lang/funcs/collection_test.go new file mode 100644 index 00000000000..65201fcb077 --- /dev/null +++ b/pkg/lang/funcs/collection_test.go @@ -0,0 +1,1845 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package funcs + +import ( + "fmt" + "math" + "testing" + + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/zclconf/go-cty/cty" +) + +func TestLength(t *testing.T) { + tests := []struct { + Value cty.Value + Want cty.Value + }{ + { + cty.ListValEmpty(cty.Number), + cty.NumberIntVal(0), + }, + { + cty.ListVal([]cty.Value{cty.True}), + cty.NumberIntVal(1), + }, + { + cty.ListVal([]cty.Value{cty.UnknownVal(cty.Bool)}), + cty.NumberIntVal(1), + }, + { + cty.SetValEmpty(cty.Number), + cty.NumberIntVal(0), + }, + { + cty.SetVal([]cty.Value{cty.True}), + cty.NumberIntVal(1), + }, + { + cty.MapValEmpty(cty.Bool), + cty.NumberIntVal(0), + }, + { + cty.MapVal(map[string]cty.Value{"hello": cty.True}), + cty.NumberIntVal(1), + }, + { + cty.EmptyTupleVal, + cty.NumberIntVal(0), + }, + { + cty.UnknownVal(cty.EmptyTuple), + cty.NumberIntVal(0), + }, + { + cty.TupleVal([]cty.Value{cty.True}), + cty.NumberIntVal(1), + }, + { + cty.EmptyObjectVal, + cty.NumberIntVal(0), + }, + { + cty.UnknownVal(cty.EmptyObject), + cty.NumberIntVal(0), + }, + { + cty.ObjectVal(map[string]cty.Value{"true": cty.True}), + cty.NumberIntVal(1), + }, + { + cty.UnknownVal(cty.List(cty.Bool)), + cty.UnknownVal(cty.Number).Refine(). + NotNull(). + NumberRangeLowerBound(cty.Zero, true). + NumberRangeUpperBound(cty.NumberIntVal(math.MaxInt), true). + NewValue(), + }, + { + cty.DynamicVal, + cty.UnknownVal(cty.Number).RefineNotNull(), + }, + { + cty.StringVal("hello"), + cty.NumberIntVal(5), + }, + { + cty.StringVal(""), + cty.NumberIntVal(0), + }, + { + cty.StringVal("1"), + cty.NumberIntVal(1), + }, + { + cty.StringVal("Живой Журнал"), + cty.NumberIntVal(12), + }, + { + // note that the dieresis here is intentionally a combining + // ligature. + cty.StringVal("noël"), + cty.NumberIntVal(4), + }, + { + // The Es in this string has three combining acute accents. + // This tests something that NFC-normalization cannot collapse + // into a single precombined codepoint, since otherwise we might + // be cheating and relying on the single-codepoint forms. + cty.StringVal("wé́́é́́é́́!"), + cty.NumberIntVal(5), + }, + { + // Go's normalization forms don't handle this ligature, so we + // will produce the wrong result but this is now a compatibility + // constraint and so we'll test it. + cty.StringVal("baffle"), + cty.NumberIntVal(4), + }, + { + cty.StringVal("😸😾"), + cty.NumberIntVal(2), + }, + { + cty.UnknownVal(cty.String), + cty.UnknownVal(cty.Number).Refine(). + NotNull(). + NumberRangeLowerBound(cty.Zero, true). + NewValue(), + }, + { // Marked collections return a marked length + cty.ListVal([]cty.Value{ + cty.StringVal("hello"), + cty.StringVal("world"), + }).Mark("secret"), + cty.NumberIntVal(2).Mark("secret"), + }, + { // Marks on values in unmarked collections do not propagate + cty.ListVal([]cty.Value{ + cty.StringVal("hello").Mark("a"), + cty.StringVal("world").Mark("b"), + }), + cty.NumberIntVal(2), + }, + { // Marked strings return a marked length + cty.StringVal("hello world").Mark("secret"), + cty.NumberIntVal(11).Mark("secret"), + }, + { // Marked tuples return a marked length + cty.TupleVal([]cty.Value{ + cty.StringVal("hello"), + cty.StringVal("world"), + }).Mark("secret"), + cty.NumberIntVal(2).Mark("secret"), + }, + { // Marks on values in unmarked tuples do not propagate + cty.TupleVal([]cty.Value{ + cty.StringVal("hello").Mark("a"), + cty.StringVal("world").Mark("b"), + }), + cty.NumberIntVal(2), + }, + { // Marked objects return a marked length + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("hello"), + "b": cty.StringVal("world"), + "c": cty.StringVal("nice to meet you"), + }).Mark("secret"), + cty.NumberIntVal(3).Mark("secret"), + }, + { // Marks on object attribute values do not propagate + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("hello").Mark("a"), + "b": cty.StringVal("world").Mark("b"), + "c": cty.StringVal("nice to meet you").Mark("c"), + }), + cty.NumberIntVal(3), + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("Length(%#v)", test.Value), func(t *testing.T) { + got, err := Length(test.Value) + + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestAllTrue(t *testing.T) { + tests := []struct { + Collection cty.Value + Want cty.Value + Err bool + }{ + { + cty.ListValEmpty(cty.Bool), + cty.True, + false, + }, + { + cty.ListVal([]cty.Value{cty.True}), + cty.True, + false, + }, + { + cty.ListVal([]cty.Value{cty.False}), + cty.False, + false, + }, + { + cty.ListVal([]cty.Value{cty.True, cty.False}), + cty.False, + false, + }, + { + cty.ListVal([]cty.Value{cty.False, cty.True}), + cty.False, + false, + }, + { + cty.ListVal([]cty.Value{cty.True, cty.NullVal(cty.Bool)}), + cty.False, + false, + }, + { + cty.ListVal([]cty.Value{cty.UnknownVal(cty.Bool)}), + cty.UnknownVal(cty.Bool).RefineNotNull(), + false, + }, + { + cty.ListVal([]cty.Value{ + cty.UnknownVal(cty.Bool), + cty.UnknownVal(cty.Bool), + }), + cty.UnknownVal(cty.Bool).RefineNotNull(), + false, + }, + { + cty.UnknownVal(cty.List(cty.Bool)), + cty.UnknownVal(cty.Bool).RefineNotNull(), + false, + }, + { + cty.NullVal(cty.List(cty.Bool)), + cty.NilVal, + true, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("alltrue(%#v)", test.Collection), func(t *testing.T) { + got, err := AllTrue(test.Collection) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestAnyTrue(t *testing.T) { + tests := []struct { + Collection cty.Value + Want cty.Value + Err bool + }{ + { + cty.ListValEmpty(cty.Bool), + cty.False, + false, + }, + { + cty.ListVal([]cty.Value{cty.True}), + cty.True, + false, + }, + { + cty.ListVal([]cty.Value{cty.False}), + cty.False, + false, + }, + { + cty.ListVal([]cty.Value{cty.True, cty.False}), + cty.True, + false, + }, + { + cty.ListVal([]cty.Value{cty.False, cty.True}), + cty.True, + false, + }, + { + cty.ListVal([]cty.Value{cty.NullVal(cty.Bool), cty.True}), + cty.True, + false, + }, + { + cty.ListVal([]cty.Value{cty.UnknownVal(cty.Bool)}), + cty.UnknownVal(cty.Bool).RefineNotNull(), + false, + }, + { + cty.ListVal([]cty.Value{ + cty.UnknownVal(cty.Bool), + cty.False, + }), + cty.UnknownVal(cty.Bool).RefineNotNull(), + false, + }, + { + cty.ListVal([]cty.Value{ + cty.UnknownVal(cty.Bool), + cty.True, + }), + cty.True, + false, + }, + { + cty.UnknownVal(cty.List(cty.Bool)), + cty.UnknownVal(cty.Bool).RefineNotNull(), + false, + }, + { + cty.NullVal(cty.List(cty.Bool)), + cty.NilVal, + true, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("anytrue(%#v)", test.Collection), func(t *testing.T) { + got, err := AnyTrue(test.Collection) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestCoalesce(t *testing.T) { + tests := []struct { + Values []cty.Value + Want cty.Value + Err bool + }{ + { + []cty.Value{cty.StringVal("first"), cty.StringVal("second"), cty.StringVal("third")}, + cty.StringVal("first"), + false, + }, + { + []cty.Value{cty.StringVal(""), cty.StringVal("second"), cty.StringVal("third")}, + cty.StringVal("second"), + false, + }, + { + []cty.Value{cty.StringVal(""), cty.StringVal("")}, + cty.NilVal, + true, + }, + { + []cty.Value{cty.True}, + cty.True, + false, + }, + { + []cty.Value{cty.NullVal(cty.Bool), cty.True}, + cty.True, + false, + }, + { + []cty.Value{cty.NullVal(cty.Bool), cty.False}, + cty.False, + false, + }, + { + []cty.Value{cty.NullVal(cty.Bool), cty.False, cty.StringVal("hello")}, + cty.StringVal("false"), + false, + }, + { + []cty.Value{cty.True, cty.UnknownVal(cty.Bool)}, + cty.True, + false, + }, + { + []cty.Value{cty.UnknownVal(cty.Bool), cty.True}, + cty.UnknownVal(cty.Bool).RefineNotNull(), + false, + }, + { + []cty.Value{cty.UnknownVal(cty.Bool), cty.StringVal("hello")}, + cty.UnknownVal(cty.String).RefineNotNull(), + false, + }, + { + []cty.Value{cty.DynamicVal, cty.True}, + cty.UnknownVal(cty.Bool).RefineNotNull(), + false, + }, + { + []cty.Value{cty.DynamicVal}, + cty.DynamicVal, + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("Coalesce(%#v...)", test.Values), func(t *testing.T) { + got, err := Coalesce(test.Values...) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestIndex(t *testing.T) { + tests := []struct { + List cty.Value + Value cty.Value + Want cty.Value + Err bool + }{ + { + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + cty.StringVal("c"), + }), + cty.StringVal("a"), + cty.NumberIntVal(0), + false, + }, + { + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + cty.UnknownVal(cty.String), + }), + cty.StringVal("a"), + cty.NumberIntVal(0), + false, + }, + { + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + cty.StringVal("c"), + }), + cty.StringVal("b"), + cty.NumberIntVal(1), + false, + }, + { + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + cty.StringVal("c"), + }), + cty.StringVal("z"), + cty.NilVal, + true, + }, + { + cty.ListVal([]cty.Value{ + cty.StringVal("1"), + cty.StringVal("2"), + cty.StringVal("3"), + }), + cty.NumberIntVal(1), + cty.NumberIntVal(0), + true, + }, + { + cty.ListVal([]cty.Value{ + cty.NumberIntVal(1), + cty.NumberIntVal(2), + cty.NumberIntVal(3), + }), + cty.NumberIntVal(2), + cty.NumberIntVal(1), + false, + }, + { + cty.ListVal([]cty.Value{ + cty.NumberIntVal(1), + cty.NumberIntVal(2), + cty.NumberIntVal(3), + }), + cty.NumberIntVal(4), + cty.NilVal, + true, + }, + { + cty.ListVal([]cty.Value{ + cty.NumberIntVal(1), + cty.NumberIntVal(2), + cty.NumberIntVal(3), + }), + cty.StringVal("1"), + cty.NumberIntVal(0), + true, + }, + { + cty.TupleVal([]cty.Value{ + cty.NumberIntVal(1), + cty.NumberIntVal(2), + cty.NumberIntVal(3), + }), + cty.NumberIntVal(1), + cty.NumberIntVal(0), + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("index(%#v, %#v)", test.List, test.Value), func(t *testing.T) { + got, err := Index(test.List, test.Value) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestLookup(t *testing.T) { + simpleMap := cty.MapVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }) + intsMap := cty.MapVal(map[string]cty.Value{ + "foo": cty.NumberIntVal(42), + }) + mapOfLists := cty.MapVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.StringVal("bar"), + cty.StringVal("baz"), + }), + }) + mapOfMaps := cty.MapVal(map[string]cty.Value{ + "foo": cty.MapVal(map[string]cty.Value{ + "a": cty.StringVal("bar"), + }), + "baz": cty.MapVal(map[string]cty.Value{ + "b": cty.StringVal("bat"), + }), + }) + mapOfTuples := cty.MapVal(map[string]cty.Value{ + "foo": cty.TupleVal([]cty.Value{cty.StringVal("bar")}), + "baz": cty.TupleVal([]cty.Value{cty.StringVal("bat")}), + }) + objectOfMaps := cty.ObjectVal(map[string]cty.Value{ + "foo": cty.MapVal(map[string]cty.Value{ + "a": cty.StringVal("bar"), + }), + "baz": cty.MapVal(map[string]cty.Value{ + "b": cty.StringVal("bat"), + }), + }) + mapWithUnknowns := cty.MapVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + "baz": cty.UnknownVal(cty.String), + }) + mapWithObjects := cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + "baz": cty.NumberIntVal(42), + }) + + tests := []struct { + Values []cty.Value + Want cty.Value + Err bool + }{ + { + []cty.Value{ + simpleMap, + cty.StringVal("foo"), + }, + cty.StringVal("bar"), + false, + }, + { + []cty.Value{ + mapWithObjects, + cty.StringVal("foo"), + }, + cty.StringVal("bar"), + false, + }, + { + []cty.Value{ + intsMap, + cty.StringVal("foo"), + }, + cty.NumberIntVal(42), + false, + }, + { + []cty.Value{ + mapOfMaps, + cty.StringVal("foo"), + }, + cty.MapVal(map[string]cty.Value{ + "a": cty.StringVal("bar"), + }), + false, + }, + { + []cty.Value{ + objectOfMaps, + cty.StringVal("foo"), + }, + cty.MapVal(map[string]cty.Value{ + "a": cty.StringVal("bar"), + }), + false, + }, + { + []cty.Value{ + mapOfTuples, + cty.StringVal("foo"), + }, + cty.TupleVal([]cty.Value{cty.StringVal("bar")}), + false, + }, + { // Invalid key + []cty.Value{ + simpleMap, + cty.StringVal("bar"), + }, + cty.NilVal, + true, + }, + { // Invalid key + []cty.Value{ + mapWithObjects, + cty.StringVal("bar"), + }, + cty.NilVal, + true, + }, + { // Supplied default with valid key + []cty.Value{ + simpleMap, + cty.StringVal("foo"), + cty.StringVal(""), + }, + cty.StringVal("bar"), + false, + }, + { // Supplied default with valid (int) key + []cty.Value{ + simpleMap, + cty.StringVal("foo"), + cty.NumberIntVal(-1), + }, + cty.StringVal("bar"), + false, + }, + { // Supplied default with valid (int) key + []cty.Value{ + simpleMap, + cty.StringVal("foobar"), + cty.NumberIntVal(-1), + }, + cty.StringVal("-1"), + false, + }, + { // Supplied default with valid key + []cty.Value{ + mapWithObjects, + cty.StringVal("foobar"), + cty.StringVal(""), + }, + cty.StringVal(""), + false, + }, + { // Supplied default with invalid key + []cty.Value{ + simpleMap, + cty.StringVal("baz"), + cty.StringVal(""), + }, + cty.StringVal(""), + false, + }, + { // Supplied default with type mismatch: expects a map return + []cty.Value{ + mapOfMaps, + cty.StringVal("foo"), + cty.StringVal(""), + }, + cty.NilVal, + true, + }, + { // Supplied non-empty default with invalid key + []cty.Value{ + simpleMap, + cty.StringVal("bar"), + cty.StringVal("xyz"), + }, + cty.StringVal("xyz"), + false, + }, + { // too many args + []cty.Value{ + simpleMap, + cty.StringVal("foo"), + cty.StringVal("bar"), + cty.StringVal("baz"), + }, + cty.NilVal, + true, + }, + { // cannot search a map of lists + []cty.Value{ + mapOfLists, + cty.StringVal("baz"), + }, + cty.NilVal, + true, + }, + { + []cty.Value{ + mapWithUnknowns, + cty.StringVal("baz"), + }, + cty.UnknownVal(cty.String), + false, + }, + { + []cty.Value{ + mapWithUnknowns, + cty.StringVal("foo"), + }, + cty.StringVal("bar"), + false, + }, + { + []cty.Value{ + simpleMap, + cty.UnknownVal(cty.String), + }, + cty.UnknownVal(cty.String), + false, + }, + { + []cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("a"), + "bar": cty.StringVal("b"), + }), + cty.UnknownVal(cty.String), + }, + cty.DynamicVal, // if the key is unknown then we don't know which object attribute and thus can't know the type + false, + }, + { // successful marked collection lookup returns marked value + []cty.Value{ + cty.MapVal(map[string]cty.Value{ + "boop": cty.StringVal("beep"), + }).Mark("a"), + cty.StringVal("boop"), + cty.StringVal("nope"), + }, + cty.StringVal("beep").Mark("a"), + false, + }, + { // apply collection marks to unknown return vaue + []cty.Value{ + cty.MapVal(map[string]cty.Value{ + "boop": cty.StringVal("beep"), + "frob": cty.UnknownVal(cty.String), + }).Mark("a"), + cty.StringVal("frob"), + cty.StringVal("nope"), + }, + cty.UnknownVal(cty.String).Mark("a"), + false, + }, + { // propagate collection marks to default when returning + []cty.Value{ + cty.MapVal(map[string]cty.Value{ + "boop": cty.StringVal("beep"), + }).Mark("a"), + cty.StringVal("frob"), + cty.StringVal("nope").Mark("b"), + }, + cty.StringVal("nope").WithMarks(cty.NewValueMarks("a", "b")), + false, + }, + { // on unmarked collection, return only marks from found value + []cty.Value{ + cty.MapVal(map[string]cty.Value{ + "boop": cty.StringVal("beep").Mark("a"), + "frob": cty.StringVal("honk").Mark("b"), + }), + cty.StringVal("frob"), + cty.StringVal("nope").Mark("c"), + }, + cty.StringVal("honk").Mark("b"), + false, + }, + { // on unmarked collection, return default exactly on missing + []cty.Value{ + cty.MapVal(map[string]cty.Value{ + "boop": cty.StringVal("beep").Mark("a"), + "frob": cty.StringVal("honk").Mark("b"), + }), + cty.StringVal("squish"), + cty.StringVal("nope").Mark("c"), + }, + cty.StringVal("nope").Mark("c"), + false, + }, + { // retain marks on default if converted + []cty.Value{ + cty.MapVal(map[string]cty.Value{ + "boop": cty.StringVal("beep").Mark("a"), + "frob": cty.StringVal("honk").Mark("b"), + }), + cty.StringVal("squish"), + cty.NumberIntVal(5).Mark("c"), + }, + cty.StringVal("5").Mark("c"), + false, + }, + { // propagate marks from key + []cty.Value{ + cty.MapVal(map[string]cty.Value{ + "boop": cty.StringVal("beep"), + "frob": cty.StringVal("honk"), + }), + cty.StringVal("boop").Mark("a"), + cty.StringVal("nope"), + }, + cty.StringVal("beep").Mark("a"), + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("lookup(%#v)", test.Values), func(t *testing.T) { + got, err := Lookup(test.Values...) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestLookup_error(t *testing.T) { + simpleMap := cty.MapVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }) + + tests := map[string]struct { + Values []cty.Value + WantErr string + }{ + "failed to find non-sensitive key": { + []cty.Value{ + simpleMap, + cty.StringVal("boop"), + }, + `lookup failed to find key "boop"`, + }, + "failed to find sensitive key": { + []cty.Value{ + simpleMap, + cty.StringVal("boop").Mark(marks.Sensitive), + }, + "lookup failed to find key (sensitive value)", + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + _, err := Lookup(test.Values...) + + if err == nil { + t.Fatal("succeeded; want error") + } + + if err.Error() != test.WantErr { + t.Errorf("wrong error\ngot: %#v\nwant: %#v", err, test.WantErr) + } + }) + } +} + +func TestMatchkeys(t *testing.T) { + tests := []struct { + Keys cty.Value + Values cty.Value + Searchset cty.Value + Want cty.Value + Err bool + }{ + { // normal usage + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + cty.StringVal("c"), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("ref1"), + cty.StringVal("ref2"), + cty.StringVal("ref3"), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("ref1"), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + }), + false, + }, + { // normal usage 2, check the order + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + cty.StringVal("c"), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("ref1"), + cty.StringVal("ref2"), + cty.StringVal("ref3"), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("ref2"), + cty.StringVal("ref1"), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + }), + false, + }, + { // no matches + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + cty.StringVal("c"), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("ref1"), + cty.StringVal("ref2"), + cty.StringVal("ref3"), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("ref4"), + }), + cty.ListValEmpty(cty.String), + false, + }, + { // no matches 2 + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + cty.StringVal("c"), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("ref1"), + cty.StringVal("ref2"), + cty.StringVal("ref3"), + }), + cty.ListValEmpty(cty.String), + cty.ListValEmpty(cty.String), + false, + }, + { // zero case + cty.ListValEmpty(cty.String), + cty.ListValEmpty(cty.String), + cty.ListVal([]cty.Value{cty.StringVal("nope")}), + cty.ListValEmpty(cty.String), + false, + }, + { // complex values + cty.ListVal([]cty.Value{ + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("a"), + }), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + }), + cty.ListVal([]cty.Value{ + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("a"), + }), + }), + false, + }, + { // unknowns + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + cty.UnknownVal(cty.String), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("ref1"), + cty.StringVal("ref2"), + cty.UnknownVal(cty.String), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("ref1"), + }), + cty.UnknownVal(cty.List(cty.String)).RefineNotNull(), + false, + }, + { // different types that can be unified + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + }), + cty.ListVal([]cty.Value{ + cty.NumberIntVal(1), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + }), + cty.ListValEmpty(cty.String), + false, + }, + { // complex values: values is a different type from keys and searchset + cty.ListVal([]cty.Value{ + cty.MapVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }), + cty.MapVal(map[string]cty.Value{ + "foo": cty.StringVal("baz"), + }), + cty.MapVal(map[string]cty.Value{ + "foo": cty.StringVal("beep"), + }), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + cty.StringVal("c"), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("c"), + }), + cty.ListVal([]cty.Value{ + cty.MapVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }), + cty.MapVal(map[string]cty.Value{ + "foo": cty.StringVal("beep"), + }), + }), + false, + }, + // errors + { // different types + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + }), + cty.ListVal([]cty.Value{ + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + }), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + }), + cty.NilVal, + true, + }, + { // lists of different length + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + }), + cty.NilVal, + true, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("matchkeys(%#v, %#v, %#v)", test.Keys, test.Values, test.Searchset), func(t *testing.T) { + got, err := Matchkeys(test.Keys, test.Values, test.Searchset) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestOne(t *testing.T) { + tests := []struct { + List cty.Value + Want cty.Value + Err string + }{ + { + cty.ListVal([]cty.Value{ + cty.NumberIntVal(1), + }), + cty.NumberIntVal(1), + "", + }, + { + cty.ListValEmpty(cty.Number), + cty.NullVal(cty.Number), + "", + }, + { + cty.ListVal([]cty.Value{ + cty.NumberIntVal(1), + cty.NumberIntVal(2), + cty.NumberIntVal(3), + }), + cty.NilVal, + "must be a list, set, or tuple value with either zero or one elements", + }, + { + cty.ListVal([]cty.Value{ + cty.UnknownVal(cty.Number), + }), + cty.UnknownVal(cty.Number), + "", + }, + { + cty.ListVal([]cty.Value{ + cty.UnknownVal(cty.Number), + cty.UnknownVal(cty.Number), + }), + cty.NilVal, + "must be a list, set, or tuple value with either zero or one elements", + }, + { + cty.UnknownVal(cty.List(cty.String)), + cty.UnknownVal(cty.String), + "", + }, + { + cty.NullVal(cty.List(cty.String)), + cty.NilVal, + "argument must not be null", + }, + { + cty.ListVal([]cty.Value{ + cty.NumberIntVal(1), + }).Mark("boop"), + cty.NumberIntVal(1).Mark("boop"), + "", + }, + { + cty.ListValEmpty(cty.Bool).Mark("boop"), + cty.NullVal(cty.Bool).Mark("boop"), + "", + }, + { + cty.ListVal([]cty.Value{ + cty.NumberIntVal(1).Mark("boop"), + }), + cty.NumberIntVal(1).Mark("boop"), + "", + }, + + { + cty.SetVal([]cty.Value{ + cty.NumberIntVal(1), + }), + cty.NumberIntVal(1), + "", + }, + { + cty.SetValEmpty(cty.Number), + cty.NullVal(cty.Number), + "", + }, + { + cty.SetVal([]cty.Value{ + cty.NumberIntVal(1), + cty.NumberIntVal(2), + cty.NumberIntVal(3), + }), + cty.NilVal, + "must be a list, set, or tuple value with either zero or one elements", + }, + { + cty.SetVal([]cty.Value{ + cty.UnknownVal(cty.Number), + }), + cty.UnknownVal(cty.Number), + "", + }, + { + cty.SetVal([]cty.Value{ + cty.UnknownVal(cty.Number), + cty.UnknownVal(cty.Number), + }), + // The above would be valid if those two unknown values were + // equal known values, so this returns unknown rather than failing. + cty.UnknownVal(cty.Number), + "", + }, + { + cty.UnknownVal(cty.Set(cty.String)), + cty.UnknownVal(cty.String), + "", + }, + { + cty.NullVal(cty.Set(cty.String)), + cty.NilVal, + "argument must not be null", + }, + { + cty.SetVal([]cty.Value{ + cty.NumberIntVal(1), + }).Mark("boop"), + cty.NumberIntVal(1).Mark("boop"), + "", + }, + { + cty.SetValEmpty(cty.Bool).Mark("boop"), + cty.NullVal(cty.Bool).Mark("boop"), + "", + }, + { + cty.SetVal([]cty.Value{ + cty.NumberIntVal(1).Mark("boop"), + }), + cty.NumberIntVal(1).Mark("boop"), + "", + }, + + { + cty.TupleVal([]cty.Value{ + cty.NumberIntVal(1), + }), + cty.NumberIntVal(1), + "", + }, + { + cty.EmptyTupleVal, + cty.NullVal(cty.DynamicPseudoType), + "", + }, + { + cty.TupleVal([]cty.Value{ + cty.NumberIntVal(1), + cty.NumberIntVal(2), + cty.NumberIntVal(3), + }), + cty.NilVal, + "must be a list, set, or tuple value with either zero or one elements", + }, + { + cty.TupleVal([]cty.Value{ + cty.UnknownVal(cty.Number), + }), + cty.UnknownVal(cty.Number), + "", + }, + { + cty.TupleVal([]cty.Value{ + cty.UnknownVal(cty.Number), + cty.UnknownVal(cty.Number), + }), + cty.NilVal, + "must be a list, set, or tuple value with either zero or one elements", + }, + { + cty.UnknownVal(cty.EmptyTuple), + // Could actually return null here, but don't for consistency with unknown lists + cty.UnknownVal(cty.DynamicPseudoType), + "", + }, + { + cty.UnknownVal(cty.Tuple([]cty.Type{cty.Bool})), + cty.UnknownVal(cty.Bool), + "", + }, + { + cty.UnknownVal(cty.Tuple([]cty.Type{cty.Bool, cty.Number})), + cty.NilVal, + "must be a list, set, or tuple value with either zero or one elements", + }, + { + cty.NullVal(cty.EmptyTuple), + cty.NilVal, + "argument must not be null", + }, + { + cty.NullVal(cty.Tuple([]cty.Type{cty.Bool})), + cty.NilVal, + "argument must not be null", + }, + { + cty.NullVal(cty.Tuple([]cty.Type{cty.Bool, cty.Number})), + cty.NilVal, + "argument must not be null", + }, + { + cty.TupleVal([]cty.Value{ + cty.NumberIntVal(1), + }).Mark("boop"), + cty.NumberIntVal(1).Mark("boop"), + "", + }, + { + cty.EmptyTupleVal.Mark("boop"), + cty.NullVal(cty.DynamicPseudoType).Mark("boop"), + "", + }, + { + cty.TupleVal([]cty.Value{ + cty.NumberIntVal(1).Mark("boop"), + }), + cty.NumberIntVal(1).Mark("boop"), + "", + }, + + { + cty.DynamicVal, + cty.DynamicVal, + "", + }, + { + cty.NullVal(cty.DynamicPseudoType), + cty.NilVal, + "argument must not be null", + }, + { + cty.MapValEmpty(cty.String), + cty.NilVal, + "must be a list, set, or tuple value with either zero or one elements", + }, + { + cty.EmptyObjectVal, + cty.NilVal, + "must be a list, set, or tuple value with either zero or one elements", + }, + { + cty.True, + cty.NilVal, + "must be a list, set, or tuple value with either zero or one elements", + }, + { + cty.UnknownVal(cty.Bool), + cty.NilVal, + "must be a list, set, or tuple value with either zero or one elements", + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("one(%#v)", test.List), func(t *testing.T) { + got, err := One(test.List) + + if test.Err != "" { + if err == nil { + t.Fatal("succeeded; want error") + } else if got, want := err.Error(), test.Err; got != want { + t.Fatalf("wrong error\n got: %s\nwant: %s", got, want) + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !test.Want.RawEquals(got) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestSum(t *testing.T) { + tests := []struct { + List cty.Value + Want cty.Value + Err string + }{ + { + cty.ListVal([]cty.Value{ + cty.NumberIntVal(1), + cty.NumberIntVal(2), + cty.NumberIntVal(3), + }), + cty.NumberIntVal(6), + "", + }, + { + cty.ListVal([]cty.Value{ + cty.NumberIntVal(1476), + cty.NumberIntVal(2093), + cty.NumberIntVal(2092495), + cty.NumberIntVal(64589234), + cty.NumberIntVal(234), + }), + cty.NumberIntVal(66685532), + "", + }, + { + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + cty.StringVal("c"), + }), + cty.UnknownVal(cty.String), + "argument must be list, set, or tuple of number values", + }, + { + cty.ListVal([]cty.Value{ + cty.NumberIntVal(10), + cty.NumberIntVal(-19), + cty.NumberIntVal(5), + }), + cty.NumberIntVal(-4), + "", + }, + { + cty.ListVal([]cty.Value{ + cty.NumberFloatVal(10.2), + cty.NumberFloatVal(19.4), + cty.NumberFloatVal(5.7), + }), + cty.NumberFloatVal(35.3), + "", + }, + { + cty.ListVal([]cty.Value{ + cty.NumberFloatVal(-10.2), + cty.NumberFloatVal(-19.4), + cty.NumberFloatVal(-5.7), + }), + cty.NumberFloatVal(-35.3), + "", + }, + { + cty.ListVal([]cty.Value{cty.NullVal(cty.Number)}), + cty.NilVal, + "argument must be list, set, or tuple of number values", + }, + { + cty.ListVal([]cty.Value{ + cty.NumberIntVal(5), + cty.NullVal(cty.Number), + }), + cty.NilVal, + "argument must be list, set, or tuple of number values", + }, + { + cty.SetVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + cty.StringVal("c"), + }), + cty.UnknownVal(cty.String).RefineNotNull(), + "argument must be list, set, or tuple of number values", + }, + { + cty.SetVal([]cty.Value{ + cty.NumberIntVal(10), + cty.NumberIntVal(-19), + cty.NumberIntVal(5), + }), + cty.NumberIntVal(-4), + "", + }, + { + cty.SetVal([]cty.Value{ + cty.NumberIntVal(10), + cty.NumberIntVal(25), + cty.NumberIntVal(30), + }), + cty.NumberIntVal(65), + "", + }, + { + cty.SetVal([]cty.Value{ + cty.NumberFloatVal(2340.8), + cty.NumberFloatVal(10.2), + cty.NumberFloatVal(3), + }), + cty.NumberFloatVal(2354), + "", + }, + { + cty.SetVal([]cty.Value{ + cty.NumberFloatVal(2), + }), + cty.NumberFloatVal(2), + "", + }, + { + cty.SetVal([]cty.Value{ + cty.NumberFloatVal(-2), + cty.NumberFloatVal(-50), + cty.NumberFloatVal(-20), + cty.NumberFloatVal(-123), + cty.NumberFloatVal(-4), + }), + cty.NumberFloatVal(-199), + "", + }, + { + cty.TupleVal([]cty.Value{ + cty.NumberIntVal(12), + cty.StringVal("a"), + cty.NumberIntVal(38), + }), + cty.UnknownVal(cty.String).RefineNotNull(), + "argument must be list, set, or tuple of number values", + }, + { + cty.NumberIntVal(12), + cty.NilVal, + "cannot sum noniterable", + }, + { + cty.ListValEmpty(cty.Number), + cty.NilVal, + "cannot sum an empty list", + }, + { + cty.MapVal(map[string]cty.Value{"hello": cty.True}), + cty.NilVal, + "argument must be list, set, or tuple. Received map of bool", + }, + { + cty.UnknownVal(cty.Number), + cty.UnknownVal(cty.Number).RefineNotNull(), + "", + }, + { + cty.UnknownVal(cty.List(cty.Number)), + cty.UnknownVal(cty.Number).RefineNotNull(), + "", + }, + { // known list containing unknown values + cty.ListVal([]cty.Value{cty.UnknownVal(cty.Number)}), + cty.UnknownVal(cty.Number).RefineNotNull(), + "", + }, + { // numbers too large to represent as float64 + cty.ListVal([]cty.Value{ + cty.MustParseNumberVal("1e+500"), + cty.MustParseNumberVal("1e+500"), + }), + cty.MustParseNumberVal("2e+500"), + "", + }, + { // edge case we have a special error handler for + cty.ListVal([]cty.Value{ + cty.NumberFloatVal(math.Inf(1)), + cty.NumberFloatVal(math.Inf(-1)), + }), + cty.NilVal, + "can't compute sum of opposing infinities", + }, + { + cty.ListVal([]cty.Value{ + cty.StringVal("1"), + cty.StringVal("2"), + cty.StringVal("3"), + }), + cty.NumberIntVal(6), + "", + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("sum(%#v)", test.List), func(t *testing.T) { + got, err := Sum(test.List) + + if test.Err != "" { + if err == nil { + t.Fatal("succeeded; want error") + } else if got, want := err.Error(), test.Err; got != want { + t.Fatalf("wrong error\n got: %s\nwant: %s", got, want) + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestTranspose(t *testing.T) { + tests := []struct { + Values cty.Value + Want cty.Value + Err bool + }{ + { + cty.MapVal(map[string]cty.Value{ + "key1": cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + }), + "key2": cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + cty.StringVal("c"), + }), + "key3": cty.ListVal([]cty.Value{ + cty.StringVal("c"), + }), + "key4": cty.ListValEmpty(cty.String), + }), + cty.MapVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.StringVal("key1"), + cty.StringVal("key2"), + }), + "b": cty.ListVal([]cty.Value{ + cty.StringVal("key1"), + cty.StringVal("key2"), + }), + "c": cty.ListVal([]cty.Value{ + cty.StringVal("key2"), + cty.StringVal("key3"), + }), + }), + false, + }, + { // map - unknown value + cty.MapVal(map[string]cty.Value{ + "key1": cty.UnknownVal(cty.List(cty.String)), + }), + cty.UnknownVal(cty.Map(cty.List(cty.String))).RefineNotNull(), + false, + }, + { // bad map - empty value + cty.MapVal(map[string]cty.Value{ + "key1": cty.ListValEmpty(cty.String), + }), + cty.MapValEmpty(cty.List(cty.String)), + false, + }, + { // bad map - value not a list + cty.MapVal(map[string]cty.Value{ + "key1": cty.StringVal("a"), + }), + cty.NilVal, + true, + }, + { // marks (deep or shallow) on any elements will propegate to the entire return value + cty.MapVal(map[string]cty.Value{ + "key1": cty.ListVal([]cty.Value{ + cty.StringVal("a").Mark("beep"), // mark on the inner list element + cty.StringVal("b"), + }), + "key2": cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + cty.StringVal("c"), + }).Mark("boop"), // mark on the map element + "key3": cty.ListVal([]cty.Value{ + cty.StringVal("c"), + }), + "key4": cty.ListValEmpty(cty.String), + }), + cty.MapVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.StringVal("key1"), + cty.StringVal("key2"), + }), + "b": cty.ListVal([]cty.Value{ + cty.StringVal("key1"), + cty.StringVal("key2"), + }), + "c": cty.ListVal([]cty.Value{ + cty.StringVal("key2"), + cty.StringVal("key3")}), + }).WithMarks(cty.NewValueMarks("beep", "boop")), + false, + }, + { // Marks on the input value will be applied to the return value + cty.MapVal(map[string]cty.Value{ + "key1": cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + }), + "key2": cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + cty.StringVal("c"), + }), + "key3": cty.ListVal([]cty.Value{ + cty.StringVal("c"), + }), + }).Mark("beep"), // mark on the entire input value + cty.MapVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.StringVal("key1"), + cty.StringVal("key2"), + }), + "b": cty.ListVal([]cty.Value{ + cty.StringVal("key1"), + cty.StringVal("key2"), + }), + "c": cty.ListVal([]cty.Value{ + cty.StringVal("key2"), + cty.StringVal("key3"), + }), + }).Mark("beep"), + false, + }, + { // Marks on the entire input value AND inner elements (deep or shallow) ALL apply to the return + cty.MapVal(map[string]cty.Value{ + "key1": cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + }).Mark("beep"), // mark on the map element + "key2": cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + cty.StringVal("c"), + }), + "key3": cty.ListVal([]cty.Value{ + cty.StringVal("c").Mark("boop"), // mark on the inner list element + }), + }).Mark("bloop"), // mark on the entire input value + cty.MapVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.StringVal("key1"), + cty.StringVal("key2"), + }), + "b": cty.ListVal([]cty.Value{ + cty.StringVal("key1"), + cty.StringVal("key2"), + }), + "c": cty.ListVal([]cty.Value{ + cty.StringVal("key2"), + cty.StringVal("key3"), + }), + }).WithMarks(cty.NewValueMarks("beep", "boop", "bloop")), + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("transpose(%#v)", test.Values), func(t *testing.T) { + got, err := Transpose(test.Values) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} diff --git a/pkg/lang/funcs/conversion.go b/pkg/lang/funcs/conversion.go new file mode 100644 index 00000000000..8bd15e36cbb --- /dev/null +++ b/pkg/lang/funcs/conversion.go @@ -0,0 +1,126 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package funcs + +import ( + "strconv" + + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/kubegems/opentofu/pkg/lang/types" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" +) + +// MakeToFunc constructs a "to..." function, like "tostring", which converts +// its argument to a specific type or type kind. +// +// The given type wantTy can be any type constraint that cty's "convert" package +// would accept. In particular, this means that you can pass +// cty.List(cty.DynamicPseudoType) to mean "list of any single type", which +// will then cause cty to attempt to unify all of the element types when given +// a tuple. +func MakeToFunc(wantTy cty.Type) function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "v", + // We use DynamicPseudoType rather than wantTy here so that + // all values will pass through the function API verbatim and + // we can handle the conversion logic within the Type and + // Impl functions. This allows us to customize the error + // messages to be more appropriate for an explicit type + // conversion, whereas the cty function system produces + // messages aimed at _implicit_ type conversions. + Type: cty.DynamicPseudoType, + AllowNull: true, + AllowMarked: true, + AllowDynamicType: true, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + gotTy := args[0].Type() + if gotTy.Equals(wantTy) { + return wantTy, nil + } + conv := convert.GetConversionUnsafe(args[0].Type(), wantTy) + if conv == nil { + // We'll use some specialized errors for some trickier cases, + // but most we can handle in a simple way. + switch { + case gotTy.IsTupleType() && wantTy.IsTupleType(): + return cty.NilType, function.NewArgErrorf(0, "incompatible tuple type for conversion: %s", convert.MismatchMessage(gotTy, wantTy)) + case gotTy.IsObjectType() && wantTy.IsObjectType(): + return cty.NilType, function.NewArgErrorf(0, "incompatible object type for conversion: %s", convert.MismatchMessage(gotTy, wantTy)) + default: + return cty.NilType, function.NewArgErrorf(0, "cannot convert %s to %s", gotTy.FriendlyName(), wantTy.FriendlyNameForConstraint()) + } + } + // If a conversion is available then everything is fine. + return wantTy, nil + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + // We didn't set "AllowUnknown" on our argument, so it is guaranteed + // to be known here but may still be null. + ret, err := convert.Convert(args[0], retType) + if err != nil { + val, _ := args[0].UnmarkDeep() + // Because we used GetConversionUnsafe above, conversion can + // still potentially fail in here. For example, if the user + // asks to convert the string "a" to bool then we'll + // optimistically permit it during type checking but fail here + // once we note that the value isn't either "true" or "false". + gotTy := val.Type() + switch { + case marks.Contains(args[0], marks.Sensitive): + // Generic message so we won't inadvertently disclose + // information about sensitive values. + return cty.NilVal, function.NewArgErrorf(0, "cannot convert this sensitive %s to %s", gotTy.FriendlyName(), wantTy.FriendlyNameForConstraint()) + + case gotTy == cty.String && wantTy == cty.Bool: + what := "string" + if !val.IsNull() { + what = strconv.Quote(val.AsString()) + } + return cty.NilVal, function.NewArgErrorf(0, `cannot convert %s to bool; only the strings "true" or "false" are allowed`, what) + case gotTy == cty.String && wantTy == cty.Number: + what := "string" + if !val.IsNull() { + what = strconv.Quote(val.AsString()) + } + return cty.NilVal, function.NewArgErrorf(0, `cannot convert %s to number; given string must be a decimal representation of a number`, what) + default: + return cty.NilVal, function.NewArgErrorf(0, "cannot convert %s to %s", gotTy.FriendlyName(), wantTy.FriendlyNameForConstraint()) + } + } + return ret, nil + }, + }) +} + +// TypeFunc returns an encapsulated value containing its argument's type. This +// value is marked to allow us to limit the use of this function at the moment +// to only a few supported use cases. +var TypeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "value", + Type: cty.DynamicPseudoType, + AllowDynamicType: true, + AllowUnknown: true, + AllowNull: true, + }, + }, + Type: function.StaticReturnType(types.TypeType), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + givenType := args[0].Type() + return cty.CapsuleVal(types.TypeType, &givenType).Mark(marks.TypeType), nil + }, +}) + +func Type(input []cty.Value) (cty.Value, error) { + return TypeFunc.Call(input) +} diff --git a/pkg/lang/funcs/conversion_test.go b/pkg/lang/funcs/conversion_test.go new file mode 100644 index 00000000000..eb8a93bbbf1 --- /dev/null +++ b/pkg/lang/funcs/conversion_test.go @@ -0,0 +1,207 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package funcs + +import ( + "fmt" + "testing" + + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/zclconf/go-cty/cty" +) + +func TestTo(t *testing.T) { + tests := []struct { + Value cty.Value + TargetTy cty.Type + Want cty.Value + Err string + }{ + { + cty.StringVal("a"), + cty.String, + cty.StringVal("a"), + ``, + }, + { + cty.UnknownVal(cty.String), + cty.String, + cty.UnknownVal(cty.String), + ``, + }, + { + cty.NullVal(cty.String), + cty.String, + cty.NullVal(cty.String), + ``, + }, + { + // This test case represents evaluating the expression tostring(null) + // from HCL, since null in HCL is cty.NullVal(cty.DynamicPseudoType). + // The result in that case should still be null, but a null specifically + // of type string. + cty.NullVal(cty.DynamicPseudoType), + cty.String, + cty.NullVal(cty.String), + ``, + }, + { + cty.StringVal("a").Mark("boop"), + cty.String, + cty.StringVal("a").Mark("boop"), + ``, + }, + { + cty.NullVal(cty.String).Mark("boop"), + cty.String, + cty.NullVal(cty.String).Mark("boop"), + ``, + }, + { + cty.True, + cty.String, + cty.StringVal("true"), + ``, + }, + { + cty.StringVal("a"), + cty.Bool, + cty.DynamicVal, + `cannot convert "a" to bool; only the strings "true" or "false" are allowed`, + }, + { + cty.StringVal("a").Mark("boop"), + cty.Bool, + cty.DynamicVal, + `cannot convert "a" to bool; only the strings "true" or "false" are allowed`, + }, + { + cty.StringVal("a").Mark(marks.Sensitive), + cty.Bool, + cty.DynamicVal, + `cannot convert this sensitive string to bool`, + }, + { + cty.StringVal("a"), + cty.Number, + cty.DynamicVal, + `cannot convert "a" to number; given string must be a decimal representation of a number`, + }, + { + cty.StringVal("a").Mark("boop"), + cty.Number, + cty.DynamicVal, + `cannot convert "a" to number; given string must be a decimal representation of a number`, + }, + { + cty.StringVal("a").Mark(marks.Sensitive), + cty.Number, + cty.DynamicVal, + `cannot convert this sensitive string to number`, + }, + { + cty.NullVal(cty.String), + cty.Number, + cty.NullVal(cty.Number), + ``, + }, + { + cty.UnknownVal(cty.Bool), + cty.String, + cty.UnknownVal(cty.String), + ``, + }, + { + cty.UnknownVal(cty.String), + cty.Bool, + cty.UnknownVal(cty.Bool), // conversion is optimistic + ``, + }, + { + cty.TupleVal([]cty.Value{cty.StringVal("hello"), cty.True}), + cty.List(cty.String), + cty.ListVal([]cty.Value{cty.StringVal("hello"), cty.StringVal("true")}), + ``, + }, + { + cty.TupleVal([]cty.Value{cty.StringVal("hello"), cty.True}), + cty.Set(cty.String), + cty.SetVal([]cty.Value{cty.StringVal("hello"), cty.StringVal("true")}), + ``, + }, + { + cty.ObjectVal(map[string]cty.Value{"foo": cty.StringVal("hello"), "bar": cty.True}), + cty.Map(cty.String), + cty.MapVal(map[string]cty.Value{"foo": cty.StringVal("hello"), "bar": cty.StringVal("true")}), + ``, + }, + { + cty.ObjectVal(map[string]cty.Value{"foo": cty.StringVal("hello"), "bar": cty.StringVal("world").Mark("boop")}), + cty.Map(cty.String), + cty.MapVal(map[string]cty.Value{"foo": cty.StringVal("hello"), "bar": cty.StringVal("world").Mark("boop")}), + ``, + }, + { + cty.ObjectVal(map[string]cty.Value{"foo": cty.StringVal("hello"), "bar": cty.StringVal("world")}).Mark("boop"), + cty.Map(cty.String), + cty.MapVal(map[string]cty.Value{"foo": cty.StringVal("hello"), "bar": cty.StringVal("world")}).Mark("boop"), + ``, + }, + { + cty.TupleVal([]cty.Value{cty.StringVal("hello"), cty.StringVal("world").Mark("boop")}), + cty.List(cty.String), + cty.ListVal([]cty.Value{cty.StringVal("hello"), cty.StringVal("world").Mark("boop")}), + ``, + }, + { + cty.TupleVal([]cty.Value{cty.StringVal("hello"), cty.StringVal("world")}).Mark("boop"), + cty.List(cty.String), + cty.ListVal([]cty.Value{cty.StringVal("hello"), cty.StringVal("world")}).Mark("boop"), + ``, + }, + { + cty.EmptyTupleVal, + cty.String, + cty.DynamicVal, + `cannot convert tuple to string`, + }, + { + cty.UnknownVal(cty.EmptyTuple), + cty.String, + cty.DynamicVal, + `cannot convert tuple to string`, + }, + { + cty.EmptyObjectVal, + cty.Object(map[string]cty.Type{"foo": cty.String}), + cty.DynamicVal, + `incompatible object type for conversion: attribute "foo" is required`, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("to %s(%#v)", test.TargetTy.FriendlyNameForConstraint(), test.Value), func(t *testing.T) { + f := MakeToFunc(test.TargetTy) + got, err := f.Call([]cty.Value{test.Value}) + + if test.Err != "" { + if err == nil { + t.Fatal("succeeded; want error") + } + if got, want := err.Error(), test.Err; got != want { + t.Fatalf("wrong error\ngot: %s\nwant: %s", got, want) + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} diff --git a/pkg/lang/funcs/crypto.go b/pkg/lang/funcs/crypto.go new file mode 100644 index 00000000000..2c7ae37bb7f --- /dev/null +++ b/pkg/lang/funcs/crypto.go @@ -0,0 +1,345 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package funcs + +import ( + "crypto/md5" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/asn1" + "encoding/base64" + "encoding/hex" + "fmt" + "hash" + "io" + "strings" + + uuidv5 "github.com/google/uuid" + uuid "github.com/hashicorp/go-uuid" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/gocty" + "golang.org/x/crypto/bcrypt" + "golang.org/x/crypto/ssh" +) + +var UUIDFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + Type: function.StaticReturnType(cty.String), + RefineResult: refineNotNull, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + result, err := uuid.GenerateUUID() + if err != nil { + return cty.UnknownVal(cty.String), err + } + return cty.StringVal(result), nil + }, +}) + +var UUIDV5Func = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "namespace", + Type: cty.String, + }, + { + Name: "name", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + RefineResult: refineNotNull, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var namespace uuidv5.UUID + switch { + case args[0].AsString() == "dns": + namespace = uuidv5.NameSpaceDNS + case args[0].AsString() == "url": + namespace = uuidv5.NameSpaceURL + case args[0].AsString() == "oid": + namespace = uuidv5.NameSpaceOID + case args[0].AsString() == "x500": + namespace = uuidv5.NameSpaceX500 + default: + if namespace, err = uuidv5.Parse(args[0].AsString()); err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("uuidv5() doesn't support namespace %s (%w)", args[0].AsString(), err) + } + } + val := args[1].AsString() + return cty.StringVal(uuidv5.NewSHA1(namespace, []byte(val)).String()), nil + }, +}) + +// Base64Sha256Func constructs a function that computes the SHA256 hash of a given string +// and encodes it with Base64. +var Base64Sha256Func = makeStringHashFunction(sha256.New, base64.StdEncoding.EncodeToString) + +// MakeFileBase64Sha256Func constructs a function that is like Base64Sha256Func but reads the +// contents of a file rather than hashing a given literal string. +func MakeFileBase64Sha256Func(baseDir string) function.Function { + return makeFileHashFunction(baseDir, sha256.New, base64.StdEncoding.EncodeToString) +} + +// Base64Sha512Func constructs a function that computes the SHA256 hash of a given string +// and encodes it with Base64. +var Base64Sha512Func = makeStringHashFunction(sha512.New, base64.StdEncoding.EncodeToString) + +// MakeFileBase64Sha512Func constructs a function that is like Base64Sha512Func but reads the +// contents of a file rather than hashing a given literal string. +func MakeFileBase64Sha512Func(baseDir string) function.Function { + return makeFileHashFunction(baseDir, sha512.New, base64.StdEncoding.EncodeToString) +} + +// BcryptFunc constructs a function that computes a hash of the given string using the Blowfish cipher. +var BcryptFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + VarParam: &function.Parameter{ + Name: "cost", + Type: cty.Number, + }, + Type: function.StaticReturnType(cty.String), + RefineResult: refineNotNull, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + defaultCost := 10 + + if len(args) > 1 { + var val int + if err := gocty.FromCtyValue(args[1], &val); err != nil { + return cty.UnknownVal(cty.String), err + } + defaultCost = val + } + + if len(args) > 2 { + return cty.UnknownVal(cty.String), fmt.Errorf("bcrypt() takes no more than two arguments") + } + + input := args[0].AsString() + out, err := bcrypt.GenerateFromPassword([]byte(input), defaultCost) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("error occurred generating password %w", err) + } + + return cty.StringVal(string(out)), nil + }, +}) + +// Md5Func constructs a function that computes the MD5 hash of a given string and encodes it with hexadecimal digits. +var Md5Func = makeStringHashFunction(md5.New, hex.EncodeToString) + +// MakeFileMd5Func constructs a function that is like Md5Func but reads the +// contents of a file rather than hashing a given literal string. +func MakeFileMd5Func(baseDir string) function.Function { + return makeFileHashFunction(baseDir, md5.New, hex.EncodeToString) +} + +// RsaDecryptFunc constructs a function that decrypts an RSA-encrypted ciphertext. +var RsaDecryptFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "ciphertext", + Type: cty.String, + }, + { + Name: "privatekey", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + RefineResult: refineNotNull, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + s := args[0].AsString() + key := args[1].AsString() + + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return cty.UnknownVal(cty.String), function.NewArgErrorf(0, "failed to decode input %q: cipher text must be base64-encoded", s) + } + + rawKey, err := ssh.ParseRawPrivateKey([]byte(key)) + if err != nil { + var errStr string + switch e := err.(type) { + case asn1.SyntaxError: + errStr = strings.ReplaceAll(e.Error(), "asn1: syntax error", "invalid ASN1 data in the given private key") + case asn1.StructuralError: + errStr = strings.ReplaceAll(e.Error(), "asn1: struture error", "invalid ASN1 data in the given private key") + default: + errStr = fmt.Sprintf("invalid private key: %s", e) + } + return cty.UnknownVal(cty.String), function.NewArgErrorf(1, errStr) + } + privateKey, ok := rawKey.(*rsa.PrivateKey) + if !ok { + return cty.UnknownVal(cty.String), function.NewArgErrorf(1, "invalid private key type %t", rawKey) + } + + out, err := rsa.DecryptPKCS1v15(nil, privateKey, b) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to decrypt: %w", err) + } + + return cty.StringVal(string(out)), nil + }, +}) + +// Sha1Func contructs a function that computes the SHA1 hash of a given string +// and encodes it with hexadecimal digits. +var Sha1Func = makeStringHashFunction(sha1.New, hex.EncodeToString) + +// MakeFileSha1Func constructs a function that is like Sha1Func but reads the +// contents of a file rather than hashing a given literal string. +func MakeFileSha1Func(baseDir string) function.Function { + return makeFileHashFunction(baseDir, sha1.New, hex.EncodeToString) +} + +// Sha256Func contructs a function that computes the SHA256 hash of a given string +// and encodes it with hexadecimal digits. +var Sha256Func = makeStringHashFunction(sha256.New, hex.EncodeToString) + +// MakeFileSha256Func constructs a function that is like Sha256Func but reads the +// contents of a file rather than hashing a given literal string. +func MakeFileSha256Func(baseDir string) function.Function { + return makeFileHashFunction(baseDir, sha256.New, hex.EncodeToString) +} + +// Sha512Func contructs a function that computes the SHA512 hash of a given string +// and encodes it with hexadecimal digits. +var Sha512Func = makeStringHashFunction(sha512.New, hex.EncodeToString) + +// MakeFileSha512Func constructs a function that is like Sha512Func but reads the +// contents of a file rather than hashing a given literal string. +func MakeFileSha512Func(baseDir string) function.Function { + return makeFileHashFunction(baseDir, sha512.New, hex.EncodeToString) +} + +func makeStringHashFunction(hf func() hash.Hash, enc func([]byte) string) function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + RefineResult: refineNotNull, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + s := args[0].AsString() + h := hf() + h.Write([]byte(s)) + rv := enc(h.Sum(nil)) + return cty.StringVal(rv), nil + }, + }) +} + +func makeFileHashFunction(baseDir string, hf func() hash.Hash, enc func([]byte) string) function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + RefineResult: refineNotNull, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + path := args[0].AsString() + f, err := openFile(baseDir, path) + if err != nil { + return cty.UnknownVal(cty.String), err + } + defer f.Close() + + h := hf() + _, err = io.Copy(h, f) + if err != nil { + return cty.UnknownVal(cty.String), err + } + rv := enc(h.Sum(nil)) + return cty.StringVal(rv), nil + }, + }) +} + +// UUID generates and returns a Type-4 UUID in the standard hexadecimal string +// format. +// +// This is not a pure function: it will generate a different result for each +// call. It must therefore be registered as an impure function in the function +// table in the "lang" package. +func UUID() (cty.Value, error) { + return UUIDFunc.Call(nil) +} + +// UUIDV5 generates and returns a Type-5 UUID in the standard hexadecimal string +// format. +func UUIDV5(namespace cty.Value, name cty.Value) (cty.Value, error) { + return UUIDV5Func.Call([]cty.Value{namespace, name}) +} + +// Base64Sha256 computes the SHA256 hash of a given string and encodes it with +// Base64. +// +// The given string is first encoded as UTF-8 and then the SHA256 algorithm is applied +// as defined in RFC 4634. The raw hash is then encoded with Base64 before returning. +// OpenTofu uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. +func Base64Sha256(str cty.Value) (cty.Value, error) { + return Base64Sha256Func.Call([]cty.Value{str}) +} + +// Base64Sha512 computes the SHA512 hash of a given string and encodes it with +// Base64. +// +// The given string is first encoded as UTF-8 and then the SHA256 algorithm is applied +// as defined in RFC 4634. The raw hash is then encoded with Base64 before returning. +// OpenTofu uses the "standard" Base64 alphabet as defined in RFC 4648 section 4 +func Base64Sha512(str cty.Value) (cty.Value, error) { + return Base64Sha512Func.Call([]cty.Value{str}) +} + +// Bcrypt computes a hash of the given string using the Blowfish cipher, +// returning a string in the Modular Crypt Format +// usually expected in the shadow password file on many Unix systems. +func Bcrypt(str cty.Value, cost ...cty.Value) (cty.Value, error) { + args := make([]cty.Value, len(cost)+1) + args[0] = str + copy(args[1:], cost) + return BcryptFunc.Call(args) +} + +// Md5 computes the MD5 hash of a given string and encodes it with hexadecimal digits. +func Md5(str cty.Value) (cty.Value, error) { + return Md5Func.Call([]cty.Value{str}) +} + +// RsaDecrypt decrypts an RSA-encrypted ciphertext, returning the corresponding +// cleartext. +func RsaDecrypt(ciphertext, privatekey cty.Value) (cty.Value, error) { + return RsaDecryptFunc.Call([]cty.Value{ciphertext, privatekey}) +} + +// Sha1 computes the SHA1 hash of a given string and encodes it with hexadecimal digits. +func Sha1(str cty.Value) (cty.Value, error) { + return Sha1Func.Call([]cty.Value{str}) +} + +// Sha256 computes the SHA256 hash of a given string and encodes it with hexadecimal digits. +func Sha256(str cty.Value) (cty.Value, error) { + return Sha256Func.Call([]cty.Value{str}) +} + +// Sha512 computes the SHA512 hash of a given string and encodes it with hexadecimal digits. +func Sha512(str cty.Value) (cty.Value, error) { + return Sha512Func.Call([]cty.Value{str}) +} diff --git a/pkg/lang/funcs/crypto_test.go b/pkg/lang/funcs/crypto_test.go new file mode 100644 index 00000000000..a9ae90c9239 --- /dev/null +++ b/pkg/lang/funcs/crypto_test.go @@ -0,0 +1,803 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package funcs + +import ( + "fmt" + "testing" + + "github.com/zclconf/go-cty/cty" + "golang.org/x/crypto/bcrypt" +) + +func TestUUID(t *testing.T) { + result, err := UUID() + if err != nil { + t.Fatal(err) + } + + resultStr := result.AsString() + if got, want := len(resultStr), 36; got != want { + t.Errorf("wrong result length %d; want %d", got, want) + } +} + +func TestUUIDV5(t *testing.T) { + tests := []struct { + Namespace cty.Value + Name cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("dns"), + cty.StringVal("tada"), + cty.StringVal("faa898db-9b9d-5b75-86a9-149e7bb8e3b8"), + false, + }, + { + cty.StringVal("url"), + cty.StringVal("tada"), + cty.StringVal("2c1ff6b4-211f-577e-94de-d978b0caa16e"), + false, + }, + { + cty.StringVal("oid"), + cty.StringVal("tada"), + cty.StringVal("61eeea26-5176-5288-87fc-232d6ed30d2f"), + false, + }, + { + cty.StringVal("x500"), + cty.StringVal("tada"), + cty.StringVal("7e12415e-f7c9-57c3-9e43-52dc9950d264"), + false, + }, + { + cty.StringVal("6ba7b810-9dad-11d1-80b4-00c04fd430c8"), + cty.StringVal("tada"), + cty.StringVal("faa898db-9b9d-5b75-86a9-149e7bb8e3b8"), + false, + }, + { + cty.StringVal("tada"), + cty.StringVal("tada"), + cty.UnknownVal(cty.String), + true, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("uuidv5(%#v, %#v)", test.Namespace, test.Name), func(t *testing.T) { + got, err := UUIDV5(test.Namespace, test.Name) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestBase64Sha256(t *testing.T) { + tests := []struct { + String cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("test"), + cty.StringVal("n4bQgYhMfWWaL+qgxVrQFaO/TxsrC4Is0V1sFbDwCgg="), + false, + }, + // This would differ because we're base64-encoding hex represantiation, not raw bytes. + // base64encode(sha256("test")) = + // "OWY4NmQwODE4ODRjN2Q2NTlhMmZlYWEwYzU1YWQwMTVhM2JmNGYxYjJiMGI4MjJjZDE1ZDZjMTViMGYwMGEwOA==" + } + + for _, test := range tests { + t.Run(fmt.Sprintf("base64sha256(%#v)", test.String), func(t *testing.T) { + got, err := Base64Sha256(test.String) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestFileBase64Sha256(t *testing.T) { + tests := []struct { + Path cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("testdata/hello.txt"), + cty.StringVal("pZGm1Av0IEBKARczz7exkNYsZb8LzaMrV7J32a2fFG4="), + false, + }, + { + cty.StringVal("testdata/icon.png"), + cty.StringVal("bun9+lNwoWcTxEze5pVIDfsf+mPDMtVUKZp04RCLHTc="), + false, + }, + { + cty.StringVal("testdata/missing"), + cty.NilVal, + true, // no file exists + }, + } + + fileSHA256 := MakeFileBase64Sha256Func(".") + + for _, test := range tests { + t.Run(fmt.Sprintf("filebase64sha256(%#v)", test.Path), func(t *testing.T) { + got, err := fileSHA256.Call([]cty.Value{test.Path}) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestBase64Sha512(t *testing.T) { + tests := []struct { + String cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("test"), + cty.StringVal("7iaw3Ur350mqGo7jwQrpkj9hiYB3Lkc/iBml1JQODbJ6wYX4oOHV+E+IvIh/1nsUNzLDBMxfqa2Ob1f1ACio/w=="), + false, + }, + // This would differ because we're base64-encoding hex represantiation, not raw bytes + // base64encode(sha512("test")) = + // "OZWUyNmIwZGQ0YWY3ZTc0OWFhMWE4ZWUzYzEwYWU5OTIzZjYxODk4MDc3MmU0NzNmODgxOWE1ZDQ5NDBlMGRiMjdhYzE4NWY4YTBlMWQ1Zjg0Zjg4YmM4ODdmZDY3YjE0MzczMmMzMDRjYzVmYTlhZDhlNmY1N2Y1MDAyOGE4ZmY=" + } + + for _, test := range tests { + t.Run(fmt.Sprintf("base64sha512(%#v)", test.String), func(t *testing.T) { + got, err := Base64Sha512(test.String) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestFileBase64Sha512(t *testing.T) { + tests := []struct { + Path cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("testdata/hello.txt"), + cty.StringVal("LHT9F+2v2A6ER7DUZ0HuJDt+t03SFJoKsbkkb7MDgvJ+hT2FhXGeDmfL2g2qj1FnEGRhXWRa4nrLFb+xRH9Fmw=="), + false, + }, + { + cty.StringVal("testdata/icon.png"), + cty.StringVal("9FcRPvMPKG/8OO+zJpVnl3NZXBQEwOh3bBfrSh0boTkYvo6pq3xidR6ZuG/6rPnE0p7+rTYRfD3AVxdst62n9g=="), + false, + }, + { + cty.StringVal("testdata/missing"), + cty.NilVal, + true, // no file exists + }, + } + + fileSHA512 := MakeFileBase64Sha512Func(".") + + for _, test := range tests { + t.Run(fmt.Sprintf("filebase64sha512(%#v)", test.Path), func(t *testing.T) { + got, err := fileSHA512.Call([]cty.Value{test.Path}) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestBcrypt(t *testing.T) { + // single variable test + p, err := Bcrypt(cty.StringVal("test")) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = bcrypt.CompareHashAndPassword([]byte(p.AsString()), []byte("test")) + if err != nil { + t.Fatalf("Error comparing hash and password: %s", err) + } + + // testing with two parameters + p, err = Bcrypt(cty.StringVal("test"), cty.NumberIntVal(5)) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = bcrypt.CompareHashAndPassword([]byte(p.AsString()), []byte("test")) + if err != nil { + t.Fatalf("Error comparing hash and password: %s", err) + } + + // Negative test for more than two parameters + _, err = Bcrypt(cty.StringVal("test"), cty.NumberIntVal(10), cty.NumberIntVal(11)) + if err == nil { + t.Fatal("succeeded; want error") + } +} + +func TestMd5(t *testing.T) { + tests := []struct { + String cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("tada"), + cty.StringVal("ce47d07243bb6eaf5e1322c81baf9bbf"), + false, + }, + { // Confirm that we're not trimming any whitespaces + cty.StringVal(" tada "), + cty.StringVal("aadf191a583e53062de2d02c008141c4"), + false, + }, + { // We accept empty string too + cty.StringVal(""), + cty.StringVal("d41d8cd98f00b204e9800998ecf8427e"), + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("md5(%#v)", test.String), func(t *testing.T) { + got, err := Md5(test.String) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestFileMD5(t *testing.T) { + tests := []struct { + Path cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("testdata/hello.txt"), + cty.StringVal("b10a8db164e0754105b7a99be72e3fe5"), + false, + }, + { + cty.StringVal("testdata/icon.png"), + cty.StringVal("a18e1dd95427740ff7c7654ecc0280ad"), + false, + }, + { + cty.StringVal("testdata/missing"), + cty.NilVal, + true, // no file exists + }, + } + + fileMD5 := MakeFileMd5Func(".") + + for _, test := range tests { + t.Run(fmt.Sprintf("filemd5(%#v)", test.Path), func(t *testing.T) { + got, err := fileMD5.Call([]cty.Value{test.Path}) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestRsaDecrypt(t *testing.T) { + tests := []struct { + Ciphertext cty.Value + Privatekey cty.Value + Want cty.Value + Err string + }{ + // Base-64 encoded cipher decrypts correctly + { + cty.StringVal(CipherBase64), + cty.StringVal(PrivateKey), + cty.StringVal("message"), + "", + }, + // OpenSSH key format + { + cty.StringVal(CipherBase64), + cty.StringVal(OpenSSHPrivateKey), + cty.StringVal("message"), + "", + }, + // Wrong key + { + cty.StringVal(CipherBase64), + cty.StringVal(WrongPrivateKey), + cty.UnknownVal(cty.String), + "failed to decrypt: crypto/rsa: decryption error", + }, + // Bad key + { + cty.StringVal(CipherBase64), + cty.StringVal(BadPrivateKey), + cty.UnknownVal(cty.String), + "invalid ASN1 data in the given private key: data truncated", + }, + // Empty key + { + cty.StringVal(CipherBase64), + cty.StringVal(""), + cty.UnknownVal(cty.String), + "invalid private key: ssh: no key found", + }, + // Bad ciphertext + { + cty.StringVal("bad"), + cty.StringVal(PrivateKey), + cty.UnknownVal(cty.String), + `failed to decode input "bad": cipher text must be base64-encoded`, + }, + // Empty ciphertext + { + cty.StringVal(""), + cty.StringVal(PrivateKey), + cty.UnknownVal(cty.String), + "failed to decrypt: crypto/rsa: decryption error", + }, + } + for _, test := range tests { + t.Run(fmt.Sprintf("RsaDecrypt(%#v, %#v)", test.Ciphertext, test.Privatekey), func(t *testing.T) { + got, err := RsaDecrypt(test.Ciphertext, test.Privatekey) + + if test.Err != "" { + if err == nil { + t.Fatal("succeeded; want error") + } else if err.Error() != test.Err { + t.Fatalf("wrong error\ngot: %s\nwant: %s", err.Error(), test.Err) + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestSha1(t *testing.T) { + tests := []struct { + String cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("test"), + cty.StringVal("a94a8fe5ccb19ba61c4c0873d391e987982fbbd3"), + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("sha1(%#v)", test.String), func(t *testing.T) { + got, err := Sha1(test.String) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestFileSHA1(t *testing.T) { + tests := []struct { + Path cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("testdata/hello.txt"), + cty.StringVal("0a4d55a8d778e5022fab701977c5d840bbc486d0"), + false, + }, + { + cty.StringVal("testdata/icon.png"), + cty.StringVal("90ecc70d12bc0bca6efa8d5cc8b35091fb087686"), + false, + }, + { + cty.StringVal("testdata/missing"), + cty.NilVal, + true, // no file exists + }, + } + + fileSHA1 := MakeFileSha1Func(".") + + for _, test := range tests { + t.Run(fmt.Sprintf("filesha1(%#v)", test.Path), func(t *testing.T) { + got, err := fileSHA1.Call([]cty.Value{test.Path}) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestSha256(t *testing.T) { + tests := []struct { + String cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("test"), + cty.StringVal("9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08"), + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("sha256(%#v)", test.String), func(t *testing.T) { + got, err := Sha256(test.String) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestFileSHA256(t *testing.T) { + tests := []struct { + Path cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("testdata/hello.txt"), + cty.StringVal("a591a6d40bf420404a011733cfb7b190d62c65bf0bcda32b57b277d9ad9f146e"), + false, + }, + { + cty.StringVal("testdata/icon.png"), + cty.StringVal("6ee9fdfa5370a16713c44cdee695480dfb1ffa63c332d554299a74e1108b1d37"), + false, + }, + { + cty.StringVal("testdata/missing"), + cty.NilVal, + true, // no file exists + }, + } + + fileSHA256 := MakeFileSha256Func(".") + + for _, test := range tests { + t.Run(fmt.Sprintf("filesha256(%#v)", test.Path), func(t *testing.T) { + got, err := fileSHA256.Call([]cty.Value{test.Path}) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestSha512(t *testing.T) { + tests := []struct { + String cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("test"), + cty.StringVal("ee26b0dd4af7e749aa1a8ee3c10ae9923f618980772e473f8819a5d4940e0db27ac185f8a0e1d5f84f88bc887fd67b143732c304cc5fa9ad8e6f57f50028a8ff"), + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("sha512(%#v)", test.String), func(t *testing.T) { + got, err := Sha512(test.String) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestFileSHA512(t *testing.T) { + tests := []struct { + Path cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("testdata/hello.txt"), + cty.StringVal("2c74fd17edafd80e8447b0d46741ee243b7eb74dd2149a0ab1b9246fb30382f27e853d8585719e0e67cbda0daa8f51671064615d645ae27acb15bfb1447f459b"), + false, + }, + { + cty.StringVal("testdata/icon.png"), + cty.StringVal("f457113ef30f286ffc38efb32695679773595c1404c0e8776c17eb4a1d1ba13918be8ea9ab7c62751e99b86ffaacf9c4d29efead36117c3dc057176cb7ada7f6"), + false, + }, + { + cty.StringVal("testdata/missing"), + cty.NilVal, + true, // no file exists + }, + } + + fileSHA512 := MakeFileSha512Func(".") + + for _, test := range tests { + t.Run(fmt.Sprintf("filesha512(%#v)", test.Path), func(t *testing.T) { + got, err := fileSHA512.Call([]cty.Value{test.Path}) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +const ( + CipherBase64 = "eczGaDhXDbOFRZGhjx2etVzWbRqWDlmq0bvNt284JHVbwCgObiuyX9uV0LSAMY707IEgMkExJqXmsB4OWKxvB7epRB9G/3+F+pcrQpODlDuL9oDUAsa65zEpYF0Wbn7Oh7nrMQncyUPpyr9WUlALl0gRWytOA23S+y5joa4M34KFpawFgoqTu/2EEH4Xl1zo+0fy73fEto+nfkUY+meuyGZ1nUx/+DljP7ZqxHBFSlLODmtuTMdswUbHbXbWneW51D7Jm7xB8nSdiA2JQNK5+Sg5x8aNfgvFTt/m2w2+qpsyFa5Wjeu6fZmXSl840CA07aXbk9vN4I81WmJyblD/ZA==" + PrivateKey = ` +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAgUElV5mwqkloIrM8ZNZ72gSCcnSJt7+/Usa5G+D15YQUAdf9 +c1zEekTfHgDP+04nw/uFNFaE5v1RbHaPxhZYVg5ZErNCa/hzn+x10xzcepeS3KPV +Xcxae4MR0BEegvqZqJzN9loXsNL/c3H/B+2Gle3hTxjlWFb3F5qLgR+4Mf4ruhER +1v6eHQa/nchi03MBpT4UeJ7MrL92hTJYLdpSyCqmr8yjxkKJDVC2uRrr+sTSxfh7 +r6v24u/vp/QTmBIAlNPgadVAZw17iNNb7vjV7Gwl/5gHXonCUKURaV++dBNLrHIZ +pqcAM8wHRph8mD1EfL9hsz77pHewxolBATV+7QIDAQABAoIBAC1rK+kFW3vrAYm3 ++8/fQnQQw5nec4o6+crng6JVQXLeH32qXShNf8kLLG/Jj0vaYcTPPDZw9JCKkTMQ +0mKj9XR/5DLbBMsV6eNXXuvJJ3x4iKW5eD9WkLD4FKlNarBRyO7j8sfPTqXW7uat +NxWdFH7YsSRvNh/9pyQHLWA5OituidMrYbc3EUx8B1GPNyJ9W8Q8znNYLfwYOjU4 +Wv1SLE6qGQQH9Q0WzA2WUf8jklCYyMYTIywAjGb8kbAJlKhmj2t2Igjmqtwt1PYc +pGlqbtQBDUiWXt5S4YX/1maIQ/49yeNUajjpbJiH3DbhJbHwFTzP3pZ9P9GHOzlG +kYR+wSECgYEAw/Xida8kSv8n86V3qSY/I+fYQ5V+jDtXIE+JhRnS8xzbOzz3v0WS +Oo5H+o4nJx5eL3Ghb3Gcm0Jn46dHrxinHbm+3RjXv/X6tlbxIYjRSQfHOTSMCTvd +qcliF5vC6RCLXuc7R+IWR1Ky6eDEZGtrvt3DyeYABsp9fRUFR/6NluUCgYEAqNsw +1aSl7WJa27F0DoJdlU9LWerpXcazlJcIdOz/S9QDmSK3RDQTdqfTxRmrxiYI9LEs +mkOkvzlnnOBMpnZ3ZOU5qIRfprecRIi37KDAOHWGnlC0EWGgl46YLb7/jXiWf0AG +Y+DfJJNd9i6TbIDWu8254/erAS6bKMhW/3q7f2kCgYAZ7Id/BiKJAWRpqTRBXlvw +BhXoKvjI2HjYP21z/EyZ+PFPzur/lNaZhIUlMnUfibbwE9pFggQzzf8scM7c7Sf+ +mLoVSdoQ/Rujz7CqvQzi2nKSsM7t0curUIb3lJWee5/UeEaxZcmIufoNUrzohAWH +BJOIPDM4ssUTLRq7wYM9uQKBgHCBau5OP8gE6mjKuXsZXWUoahpFLKwwwmJUp2vQ +pOFPJ/6WZOlqkTVT6QPAcPUbTohKrF80hsZqZyDdSfT3peFx4ZLocBrS56m6NmHR +UYHMvJ8rQm76T1fryHVidz85g3zRmfBeWg8yqT5oFg4LYgfLsPm1gRjOhs8LfPvI +OLlRAoGBAIZ5Uv4Z3s8O7WKXXUe/lq6j7vfiVkR1NW/Z/WLKXZpnmvJ7FgxN4e56 +RXT7GwNQHIY8eDjDnsHxzrxd+raOxOZeKcMHj3XyjCX3NHfTscnsBPAGYpY/Wxzh +T8UYnFu6RzkixElTf2rseEav7rkdKkI3LAeIZy7B0HulKKsmqVQ7 +-----END RSA PRIVATE KEY----- +` + OpenSSHPrivateKey = ` +-----BEGIN OPENSSH PRIVATE KEY----- +b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABFwAAAAdzc2gtcn +NhAAAAAwEAAQAAAQEAgUElV5mwqkloIrM8ZNZ72gSCcnSJt7+/Usa5G+D15YQUAdf9c1zE +ekTfHgDP+04nw/uFNFaE5v1RbHaPxhZYVg5ZErNCa/hzn+x10xzcepeS3KPVXcxae4MR0B +EegvqZqJzN9loXsNL/c3H/B+2Gle3hTxjlWFb3F5qLgR+4Mf4ruhER1v6eHQa/nchi03MB +pT4UeJ7MrL92hTJYLdpSyCqmr8yjxkKJDVC2uRrr+sTSxfh7r6v24u/vp/QTmBIAlNPgad +VAZw17iNNb7vjV7Gwl/5gHXonCUKURaV++dBNLrHIZpqcAM8wHRph8mD1EfL9hsz77pHew +xolBATV+7QAAA7jbhEFk24RBZAAAAAdzc2gtcnNhAAABAQCBQSVXmbCqSWgiszxk1nvaBI +JydIm3v79Sxrkb4PXlhBQB1/1zXMR6RN8eAM/7TifD+4U0VoTm/VFsdo/GFlhWDlkSs0Jr ++HOf7HXTHNx6l5Lco9VdzFp7gxHQER6C+pmonM32Whew0v9zcf8H7YaV7eFPGOVYVvcXmo +uBH7gx/iu6ERHW/p4dBr+dyGLTcwGlPhR4nsysv3aFMlgt2lLIKqavzKPGQokNULa5Guv6 +xNLF+Huvq/bi7++n9BOYEgCU0+Bp1UBnDXuI01vu+NXsbCX/mAdeicJQpRFpX750E0usch +mmpwAzzAdGmHyYPUR8v2GzPvukd7DGiUEBNX7tAAAAAwEAAQAAAQAtayvpBVt76wGJt/vP +30J0EMOZ3nOKOvnK54OiVUFy3h99ql0oTX/JCyxvyY9L2mHEzzw2cPSQipEzENJio/V0f+ +Qy2wTLFenjV17rySd8eIiluXg/VpCw+BSpTWqwUcju4/LHz06l1u7mrTcVnRR+2LEkbzYf +/ackBy1gOTorbonTK2G3NxFMfAdRjzcifVvEPM5zWC38GDo1OFr9UixOqhkEB/UNFswNll +H/I5JQmMjGEyMsAIxm/JGwCZSoZo9rdiII5qrcLdT2HKRpam7UAQ1Ill7eUuGF/9ZmiEP+ +PcnjVGo46WyYh9w24SWx8BU8z96WfT/Rhzs5RpGEfsEhAAAAgQCGeVL+Gd7PDu1il11Hv5 +auo+734lZEdTVv2f1iyl2aZ5ryexYMTeHuekV0+xsDUByGPHg4w57B8c68Xfq2jsTmXinD +B4918owl9zR307HJ7ATwBmKWP1sc4U/FGJxbukc5IsRJU39q7HhGr+65HSpCNywHiGcuwd +B7pSirJqlUOwAAAIEAw/Xida8kSv8n86V3qSY/I+fYQ5V+jDtXIE+JhRnS8xzbOzz3v0WS +Oo5H+o4nJx5eL3Ghb3Gcm0Jn46dHrxinHbm+3RjXv/X6tlbxIYjRSQfHOTSMCTvdqcliF5 +vC6RCLXuc7R+IWR1Ky6eDEZGtrvt3DyeYABsp9fRUFR/6NluUAAACBAKjbMNWkpe1iWtux +dA6CXZVPS1nq6V3Gs5SXCHTs/0vUA5kit0Q0E3an08UZq8YmCPSxLJpDpL85Z5zgTKZ2d2 +TlOaiEX6a3nESIt+ygwDh1hp5QtBFhoJeOmC2+/414ln9ABmPg3ySTXfYuk2yA1rvNueP3 +qwEumyjIVv96u39pAAAAAAEC +-----END OPENSSH PRIVATE KEY----- +` + WrongPrivateKey = ` +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAlrCgnEVgmNKCq7KPc+zUU5IrxPu1ClMNJS7RTsTPEkbwe5SB +p+6V6WtCbD/X/lDRRGbOENChh1Phulb7lViqgrdpHydgsrKoS5ah3DfSIxLFLE00 +9Yo4TCYwgw6+s59j16ZAFVinaQ9l6Kmrb2ll136hMrz8QKh+qw+onOLd38WFgm+W +ZtUqSXf2LANzfzzy4OWFNyFqKaCAolSkPdTS9Nz+svtScvp002DQp8OdP1AgPO+l +o5N3M38Fftapwg0pCtJ5Zq0NRWIXEonXiTEMA6zy3gEZVOmDxoIFUWnmrqlMJLFy +5S6LDrHSdqJhCxDK6WRZj43X9j8spktk3eGhMwIDAQABAoIBAAem8ID/BOi9x+Tw +LFi2rhGQWqimH4tmrEQ3HGnjlKBY+d1MrUjZ1MMFr1nP5CgF8pqGnfA8p/c3Sz8r +K5tp5T6+EZiDZ2WrrOApxg5ox0MAsQKO6SGO40z6o3wEQ6rbbTaGOrraxaWQIpyu +AQanU4Sd6ZGqByVBaS1GnklZO+shCHqw73b7g1cpLEmFzcYnKHYHlUUIsstMe8E1 +BaCY0CH7JbWBjcbiTnBVwIRZuu+EjGiQuhTilYL2OWqoMVg1WU0L2IFpR8lkf/2W +SBx5J6xhwbBGASOpM+qidiN580GdPzGhWYSqKGroHEzBm6xPSmV1tadNA26WFG4p +pthLiAECgYEA5BsPRpNYJAQLu5B0N7mj9eEp0HABVEgL/MpwiImjaKdAwp78HM64 +IuPvJxs7r+xESiIz4JyjR8zrQjYOCKJsARYkmNlEuAz0SkHabCw1BdEBwUhjUGVB +efoERK6GxfAoNqmSDwsOvHFOtsmDIlbHmg7G2rUxNVpeou415BSB0B8CgYEAqR4J +YHKk2Ibr9rU+rBU33TcdTGw0aAkFNAVeqM9j0haWuFXmV3RArgoy09lH+2Ha6z/g +fTX2xSDAWV7QUlLOlBRIhurPAo2jO2yCrGHPZcWiugstrR2hTTInigaSnCmK3i7F +6sYmL3S7K01IcVNxSlWvGijtClT92Cl2WUCTfG0CgYAiEjyk4QtQTd5mxLvnOu5X +oqs5PBGmwiAwQRiv/EcRMbJFn7Oupd3xMDSflbzDmTnWDOfMy/jDl8MoH6TW+1PA +kcsjnYhbKWwvz0hN0giVdtOZSDO1ZXpzOrn6fEsbM7T9/TQY1SD9WrtUKCNTNL0Z +sM1ZC6lu+7GZCpW4HKwLJwKBgQCRT0yxQXBg1/UxwuO5ynV4rx2Oh76z0WRWIXMH +S0MyxdP1SWGkrS/SGtM3cg/GcHtA/V6vV0nUcWK0p6IJyjrTw2XZ/zGluPuTWJYi +9dvVT26Vunshrz7kbH7KuwEICy3V4IyQQHeY+QzFlR70uMS0IVFWAepCoWqHbIDT +CYhwNQKBgGPcLXmjpGtkZvggl0aZr9LsvCTckllSCFSI861kivL/rijdNoCHGxZv +dfDkLTLcz9Gk41rD9Gxn/3sqodnTAc3Z2PxFnzg1Q/u3+x6YAgBwI/g/jE2xutGW +H7CurtMwALQ/n/6LUKFmjRZjqbKX9SO2QSaC3grd6sY9Tu+bZjLe +-----END RSA PRIVATE KEY----- +` + BadPrivateKey = ` +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAgUElV5mwqkloIrM8ZNZ72gSCcnSJt7+/Usa5G+D15YQUAdf9 +c1zEekTfHgDP+04nw/uFNFaE5v1RbHaPxhZYVg5ZErNCa/hzn+x10xzcepeS3KPV +Xcxae4MR0BEegvqZqJzN9loXsNL/c3H/B+2Gle3hTxjlWFb3F5qLgR+4Mf4ruhER +1v6eHQa/nchi03MBpT4UeJ7MrL92hTJYLdpSyCqmr8yjxkKJDVC2uRrr+sTSxfh7 +r6v24u/vp/QTmBIAlNPgadVAZw17iNNb7vjV7Gwl/5gHXonCUKURaV++dBNLrHIZ +pqcAM8wHRph8mD1EfL9hsz77pHewxolBATV+7QIDAQABAoIBAC1rK+kFW3vrAYm3 ++8/fQnQQw5nec4o6+crng6JVQXLeH32qXShNf8kLLG/Jj0vaYcTPPDZw9JCKkTMQ +0mKj9XR/5DLbBMsV6eNXXuvJJ3x4iKW5eD9WkLD4FKlNarBRyO7j8sfPTqXW7uat +NxWdFH7YsSRvNh/9pyQHLWA5OituidMrYbc3EUx8B1GPNyJ9W8Q8znNYLfwYOjU4 +Wv1SLE6qGQQH9Q0WzA2WUf8jklCYyMYTIywAjGb8kbAJlKhmj2t2Igjmqtwt1PYc +pGlqbtQBDUiWXt5S4YX/1maIQ/49yeNUajjpbJiH3DbhJbHwFTzP3pZ9P9GHOzlG +kYR+wSECgYEAw/Xida8kSv8n86V3qSY/I+fYQ5V+jDtXIE+JhRnS8xzbOzz3v0WS +Oo5H+o4nJx5eL3Ghb3Gcm0Jn46dHrxinHbm+3RjXv/X6tlbxIYjRSQfHOTSMCTvd +qcliF5vC6RCLXuc7R+IWR1Ky6eDEZGtrvt3DyeYABsp9fRUFR/6NluUCgYEAqNsw +1aSl7WJa27F0DoJdlU9LWerpXcazlJcIdOz/S9QDmSK3RDQTdqfTxRmrxiYI9LEs +mkOkvzlnnOBMpnZ3ZOU5qIRfprecRIi37KDAOHWGnlC0EWGgl46YLb7/jXiWf0AG +BhXoKvjI2HjYP21z/EyZ+PFPzur/lNaZhIUlMnUfibbwE9pFggQzzf8scM7c7Sf+ +mLoVSdoQ/Rujz7CqvQzi2nKSsM7t0curUIb3lJWee5/UeEaxZcmIufoNUrzohAWH +BJOIPDM4ssUTLRq7wYM9uQKBgHCBau5OP8gE6mjKuXsZXWUoahpFLKwwwmJUp2vQ +pOFPJ/6WZOlqkTVT6QPAcPUbTohKrF80hsZqZyDdSfT3peFx4ZLocBrS56m6NmHR +UYHMvJ8rQm76T1fryHVidz85g3zRmfBeWg8yqT5oFg4LYgfLsPm1gRjOhs8LfPvI +OLlRAoGBAIZ5Uv4Z3s8O7WKXXUe/lq6j7vfiVkR1NW/Z/WLKXZpnmvJ7FgxN4e56 +RXT7GwNQHIY8eDjDnsHxzrxd+raOxOZeKcMHj3XyjCX3NHfTscnsBPAGYpY/Wxzh +T8UYnFu6RzkixElTf2rseEav7rkdKkI3LAeIZy7B0HulKKsmqVQ7 +-----END RSA PRIVATE KEY----- +` +) diff --git a/pkg/lang/funcs/datetime.go b/pkg/lang/funcs/datetime.go new file mode 100644 index 00000000000..f53e3ff7117 --- /dev/null +++ b/pkg/lang/funcs/datetime.go @@ -0,0 +1,202 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package funcs + +import ( + "fmt" + "time" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// TimestampFunc constructs a function that returns a string representation of the current date and time. +var TimestampFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + Type: function.StaticReturnType(cty.String), + RefineResult: refineNotNull, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.StringVal(time.Now().UTC().Format(time.RFC3339)), nil + }, +}) + +// MakeStaticTimestampFunc constructs a function that returns a string +// representation of the date and time specified by the provided argument. +func MakeStaticTimestampFunc(static time.Time) function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{}, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.StringVal(static.Format(time.RFC3339)), nil + }, + }) +} + +// TimeAddFunc constructs a function that adds a duration to a timestamp, returning a new timestamp. +var TimeAddFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "timestamp", + Type: cty.String, + }, + { + Name: "duration", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + RefineResult: refineNotNull, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + ts, err := parseTimestamp(args[0].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), err + } + duration, err := time.ParseDuration(args[1].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.StringVal(ts.Add(duration).Format(time.RFC3339)), nil + }, +}) + +// TimeCmpFunc is a function that compares two timestamps. +var TimeCmpFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "timestamp_a", + Type: cty.String, + }, + { + Name: "timestamp_b", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.Number), + RefineResult: refineNotNull, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + tsA, err := parseTimestamp(args[0].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), function.NewArgError(0, err) + } + tsB, err := parseTimestamp(args[1].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), function.NewArgError(1, err) + } + + switch { + case tsA.Equal(tsB): + return cty.NumberIntVal(0), nil + case tsA.Before(tsB): + return cty.NumberIntVal(-1), nil + default: + // By elimintation, tsA must be after tsB. + return cty.NumberIntVal(1), nil + } + }, +}) + +// Timestamp returns a string representation of the current date and time. +// +// In the OpenTofu language, timestamps are conventionally represented as +// strings using RFC 3339 "Date and Time format" syntax, and so timestamp +// returns a string in this format. +func Timestamp() (cty.Value, error) { + return TimestampFunc.Call([]cty.Value{}) +} + +// TimeAdd adds a duration to a timestamp, returning a new timestamp. +// +// In the OpenTofu language, timestamps are conventionally represented as +// strings using RFC 3339 "Date and Time format" syntax. Timeadd requires +// the timestamp argument to be a string conforming to this syntax. +// +// `duration` is a string representation of a time difference, consisting of +// sequences of number and unit pairs, like `"1.5h"` or `1h30m`. The accepted +// units are `ns`, `us` (or `µs`), `"ms"`, `"s"`, `"m"`, and `"h"`. The first +// number may be negative to indicate a negative duration, like `"-2h5m"`. +// +// The result is a string, also in RFC 3339 format, representing the result +// of adding the given direction to the given timestamp. +func TimeAdd(timestamp cty.Value, duration cty.Value) (cty.Value, error) { + return TimeAddFunc.Call([]cty.Value{timestamp, duration}) +} + +// TimeCmp compares two timestamps, indicating whether they are equal or +// if one is before the other. +// +// TimeCmp considers the UTC offset of each given timestamp when making its +// decision, so for example 6:00 +0200 and 4:00 UTC are equal. +// +// In the OpenTofu language, timestamps are conventionally represented as +// strings using RFC 3339 "Date and Time format" syntax. TimeCmp requires +// the timestamp argument to be a string conforming to this syntax. +// +// The result is always a number between -1 and 1. -1 indicates that +// timestampA is earlier than timestampB. 1 indicates that timestampA is +// later. 0 indicates that the two timestamps represent the same instant. +func TimeCmp(timestampA, timestampB cty.Value) (cty.Value, error) { + return TimeCmpFunc.Call([]cty.Value{timestampA, timestampB}) +} + +func parseTimestamp(ts string) (time.Time, error) { + t, err := time.Parse(time.RFC3339, ts) + if err != nil { + switch err := err.(type) { + case *time.ParseError: + // If err is a time.ParseError then its string representation is not + // appropriate since it relies on details of Go's strange date format + // representation, which a caller of our functions is not expected + // to be familiar with. + // + // Therefore we do some light transformation to get a more suitable + // error that should make more sense to our callers. These are + // still not awesome error messages, but at least they refer to + // the timestamp portions by name rather than by Go's example + // values. + if err.LayoutElem == "" && err.ValueElem == "" && err.Message != "" { + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: %w", err) + } + var what string + switch err.LayoutElem { + case "2006": + what = "year" + case "01": + what = "month" + case "02": + what = "day of month" + case "15": + what = "hour" + case "04": + what = "minute" + case "05": + what = "second" + case "Z07:00": + what = "UTC offset" + case "T": + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: missing required time introducer 'T'") + case ":", "-": + if err.ValueElem == "" { + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: end of string where %q is expected", err.LayoutElem) + } else { + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: found %q where %q is expected", err.ValueElem, err.LayoutElem) + } + default: + // Should never get here, because time.RFC3339 includes only the + // above portions, but since that might change in future we'll + // be robust here. + what = "timestamp segment" + } + if err.ValueElem == "" { + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: end of string before %s", what) + } else { + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: cannot use %q as %s", err.ValueElem, what) + } + } + return time.Time{}, err + } + return t, nil +} diff --git a/pkg/lang/funcs/datetime_test.go b/pkg/lang/funcs/datetime_test.go new file mode 100644 index 00000000000..9f2813eb020 --- /dev/null +++ b/pkg/lang/funcs/datetime_test.go @@ -0,0 +1,187 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package funcs + +import ( + "fmt" + "testing" + "time" + + "github.com/zclconf/go-cty/cty" +) + +func TestTimestamp(t *testing.T) { + currentTime := time.Now().UTC() + result, err := Timestamp() + if err != nil { + t.Fatalf("err: %s", err) + } + resultTime, err := time.Parse(time.RFC3339, result.AsString()) + if err != nil { + t.Fatalf("Error parsing timestamp: %s", err) + } + + if resultTime.Sub(currentTime).Seconds() > 10.0 { + t.Fatalf("Timestamp Diff too large. Expected: %s\nReceived: %s", currentTime.Format(time.RFC3339), result.AsString()) + } + +} + +func TestTimeadd(t *testing.T) { + tests := []struct { + Time cty.Value + Duration cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("2017-11-22T00:00:00Z"), + cty.StringVal("1s"), + cty.StringVal("2017-11-22T00:00:01Z"), + false, + }, + { + cty.StringVal("2017-11-22T00:00:00Z"), + cty.StringVal("10m1s"), + cty.StringVal("2017-11-22T00:10:01Z"), + false, + }, + { // also support subtraction + cty.StringVal("2017-11-22T00:00:00Z"), + cty.StringVal("-1h"), + cty.StringVal("2017-11-21T23:00:00Z"), + false, + }, + { // Invalid format timestamp + cty.StringVal("2017-11-22"), + cty.StringVal("-1h"), + cty.UnknownVal(cty.String).RefineNotNull(), + true, + }, + { // Invalid format duration (day is not supported by ParseDuration) + cty.StringVal("2017-11-22T00:00:00Z"), + cty.StringVal("1d"), + cty.UnknownVal(cty.String).RefineNotNull(), + true, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("TimeAdd(%#v, %#v)", test.Time, test.Duration), func(t *testing.T) { + got, err := TimeAdd(test.Time, test.Duration) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestTimeCmp(t *testing.T) { + tests := []struct { + TimeA, TimeB cty.Value + Want cty.Value + Err string + }{ + { + cty.StringVal("2017-11-22T00:00:00Z"), + cty.StringVal("2017-11-22T00:00:00Z"), + cty.Zero, + ``, + }, + { + cty.StringVal("2017-11-22T00:00:00Z"), + cty.StringVal("2017-11-22T01:00:00+01:00"), + cty.Zero, + ``, + }, + { + cty.StringVal("2017-11-22T00:00:01Z"), + cty.StringVal("2017-11-22T01:00:00+01:00"), + cty.NumberIntVal(1), + ``, + }, + { + cty.StringVal("2017-11-22T01:00:00Z"), + cty.StringVal("2017-11-22T00:59:00-01:00"), + cty.NumberIntVal(-1), + ``, + }, + { + cty.StringVal("2017-11-22T01:00:00+01:00"), + cty.StringVal("2017-11-22T01:00:00-01:00"), + cty.NumberIntVal(-1), + ``, + }, + { + cty.StringVal("2017-11-22T01:00:00-01:00"), + cty.StringVal("2017-11-22T01:00:00+01:00"), + cty.NumberIntVal(1), + ``, + }, + { + cty.StringVal("2017-11-22T00:00:00Z"), + cty.StringVal("bloop"), + cty.UnknownVal(cty.String).RefineNotNull(), + `not a valid RFC3339 timestamp: cannot use "bloop" as year`, + }, + { + cty.StringVal("2017-11-22 00:00:00Z"), + cty.StringVal("2017-11-22T00:00:00Z"), + cty.UnknownVal(cty.String).RefineNotNull(), + `not a valid RFC3339 timestamp: missing required time introducer 'T'`, + }, + { + cty.StringVal("2017-11-22T00:00:00Z"), + cty.UnknownVal(cty.String), + cty.UnknownVal(cty.Number).RefineNotNull(), + ``, + }, + { + cty.UnknownVal(cty.String), + cty.StringVal("2017-11-22T00:00:00Z"), + cty.UnknownVal(cty.Number).RefineNotNull(), + ``, + }, + { + cty.UnknownVal(cty.String), + cty.UnknownVal(cty.String), + cty.UnknownVal(cty.Number).RefineNotNull(), + ``, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("TimeCmp(%#v, %#v)", test.TimeA, test.TimeB), func(t *testing.T) { + got, err := TimeCmp(test.TimeA, test.TimeB) + + if test.Err != "" { + if err == nil { + t.Fatal("succeeded; want error") + } + if got := err.Error(); got != test.Err { + t.Errorf("wrong error message\ngot: %s\nwant: %s", got, test.Err) + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} diff --git a/pkg/lang/funcs/descriptions.go b/pkg/lang/funcs/descriptions.go new file mode 100644 index 00000000000..a19a53084eb --- /dev/null +++ b/pkg/lang/funcs/descriptions.go @@ -0,0 +1,571 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package funcs + +import "github.com/zclconf/go-cty/cty/function" + +type descriptionEntry struct { + // Description is a description for the function. + Description string + + // ParamDescription argument must match the number of parameters of the + // function. If the function has a VarParam then that counts as one + // parameter. The given descriptions will be assigned in order starting + // with the positional arguments in their declared order, followed by the + // variadic parameter if any. + ParamDescription []string +} + +// DescriptionList is a consolidated list containing all descriptions for all +// functions available within OpenTofu. A function's description should point +// to the matching entry in this list. +// +// We keep this as a single list, so we can quickly review descriptions within +// a single file and copy the whole list to other projects, like +// terraform-schema. +var DescriptionList = map[string]descriptionEntry{ + "abs": { + Description: "`abs` returns the absolute value of the given number. In other words, if the number is zero or positive then it is returned as-is, but if it is negative then it is multiplied by -1 to make it positive before returning it.", + ParamDescription: []string{""}, + }, + "abspath": { + Description: "`abspath` takes a string containing a filesystem path and converts it to an absolute path. That is, if the path is not absolute, it will be joined with the current working directory.", + ParamDescription: []string{""}, + }, + "alltrue": { + Description: "`alltrue` returns `true` if all elements in a given collection are `true` or `\"true\"`. It also returns `true` if the collection is empty.", + ParamDescription: []string{""}, + }, + "anytrue": { + Description: "`anytrue` returns `true` if any element in a given collection is `true` or `\"true\"`. It also returns `false` if the collection is empty.", + ParamDescription: []string{""}, + }, + "base64decode": { + Description: "`base64decode` takes a string containing a Base64 character sequence and returns the original string.", + ParamDescription: []string{""}, + }, + "base64encode": { + Description: "`base64encode` applies Base64 encoding to a string.", + ParamDescription: []string{""}, + }, + "base64gzip": { + Description: "`base64gzip` compresses a string with gzip and then encodes the result in Base64 encoding.", + ParamDescription: []string{""}, + }, + "base64gunzip": { + Description: "`base64gunzip` decodes a Base64-encoded string and uncompresses the result with gzip.", + ParamDescription: []string{""}, + }, + "base64sha256": { + Description: "`base64sha256` computes the SHA256 hash of a given string and encodes it with Base64. This is not equivalent to `base64encode(sha256(\"test\"))` since `sha256()` returns hexadecimal representation.", + ParamDescription: []string{""}, + }, + "base64sha512": { + Description: "`base64sha512` computes the SHA512 hash of a given string and encodes it with Base64. This is not equivalent to `base64encode(sha512(\"test\"))` since `sha512()` returns hexadecimal representation.", + ParamDescription: []string{""}, + }, + "basename": { + Description: "`basename` takes a string containing a filesystem path and removes all except the last portion from it.", + ParamDescription: []string{""}, + }, + "bcrypt": { + Description: "`bcrypt` computes a hash of the given string using the Blowfish cipher, returning a string in [the _Modular Crypt Format_](https://passlib.readthedocs.io/en/stable/modular_crypt_format.html) usually expected in the shadow password file on many Unix systems.", + ParamDescription: []string{ + "", + "The `cost` argument is optional and will default to 10 if unspecified.", + }, + }, + "can": { + Description: "`can` evaluates the given expression and returns a boolean value indicating whether the expression produced a result without any errors.", + ParamDescription: []string{""}, + }, + "ceil": { + Description: "`ceil` returns the closest whole number that is greater than or equal to the given value, which may be a fraction.", + ParamDescription: []string{""}, + }, + "chomp": { + Description: "`chomp` removes newline characters at the end of a string.", + ParamDescription: []string{""}, + }, + "chunklist": { + Description: "`chunklist` splits a single list into fixed-size chunks, returning a list of lists.", + ParamDescription: []string{ + "", + "The maximum length of each chunk. All but the last element of the result is guaranteed to be of exactly this size.", + }, + }, + "cidrcontains": { + Description: "`cidrcontains` determines whether a given IP address or an address prefix given in CIDR notation is within a given IP network address prefix.", + ParamDescription: []string{ + "`containing_prefix` must be given in CIDR notation, as defined in [RFC 4632 section 3.1](https://tools.ietf.org/html/rfc4632#section-3.1).", + "`contained_ip_or_prefix` is either an IP address or an address prefix given in CIDR notation.", + }, + }, + "cidrhost": { + Description: "`cidrhost` calculates a full host IP address for a given host number within a given IP network address prefix.", + ParamDescription: []string{ + "`prefix` must be given in CIDR notation, as defined in [RFC 4632 section 3.1](https://tools.ietf.org/html/rfc4632#section-3.1).", + "`hostnum` is a whole number that can be represented as a binary integer with no more than the number of digits remaining in the address after the given prefix.", + }, + }, + "cidrnetmask": { + Description: "`cidrnetmask` converts an IPv4 address prefix given in CIDR notation into a subnet mask address.", + ParamDescription: []string{ + "`prefix` must be given in CIDR notation, as defined in [RFC 4632 section 3.1](https://tools.ietf.org/html/rfc4632#section-3.1).", + }, + }, + "cidrsubnet": { + Description: "`cidrsubnet` calculates a subnet address within given IP network address prefix.", + ParamDescription: []string{ + "`prefix` must be given in CIDR notation, as defined in [RFC 4632 section 3.1](https://tools.ietf.org/html/rfc4632#section-3.1).", + "`newbits` is the number of additional bits with which to extend the prefix.", + "`netnum` is a whole number that can be represented as a binary integer with no more than `newbits` binary digits, which will be used to populate the additional bits added to the prefix."}, + }, + "cidrsubnets": { + Description: "`cidrsubnets` calculates a sequence of consecutive IP address ranges within a particular CIDR prefix.", + ParamDescription: []string{ + "`prefix` must be given in CIDR notation, as defined in [RFC 4632 section 3.1](https://tools.ietf.org/html/rfc4632#section-3.1).", + "", + }, + }, + "coalesce": { + Description: "`coalesce` takes any number of arguments and returns the first one that isn't null or an empty string.", + ParamDescription: []string{""}, + }, + "coalescelist": { + Description: "`coalescelist` takes any number of list arguments and returns the first one that isn't empty.", + ParamDescription: []string{ + "List or tuple values to test in the given order.", + }, + }, + "compact": { + Description: "`compact` takes a list of strings and returns a new list with any empty string elements removed.", + ParamDescription: []string{""}, + }, + "concat": { + Description: "`concat` takes two or more lists and combines them into a single list.", + ParamDescription: []string{""}, + }, + "contains": { + Description: "`contains` determines whether a given list or set contains a given single value as one of its elements.", + ParamDescription: []string{"", ""}, + }, + "csvdecode": { + Description: "`csvdecode` decodes a string containing CSV-formatted data and produces a list of maps representing that data.", + ParamDescription: []string{""}, + }, + "dirname": { + Description: "`dirname` takes a string containing a filesystem path and removes the last portion from it.", + ParamDescription: []string{""}, + }, + "distinct": { + Description: "`distinct` takes a list and returns a new list with any duplicate elements removed.", + ParamDescription: []string{""}, + }, + "element": { + Description: "`element` retrieves a single element from a list.", + ParamDescription: []string{"", ""}, + }, + "endswith": { + Description: "`endswith` takes two values: a string to check and a suffix string. The function returns true if the first string ends with that exact suffix.", + ParamDescription: []string{"", ""}, + }, + "file": { + Description: "`file` reads the contents of a file at the given path and returns them as a string.", + ParamDescription: []string{""}, + }, + "filebase64": { + Description: "`filebase64` reads the contents of a file at the given path and returns them as a base64-encoded string.", + ParamDescription: []string{""}, + }, + "filebase64sha256": { + Description: "`filebase64sha256` is a variant of `base64sha256` that hashes the contents of a given file rather than a literal string.", + ParamDescription: []string{""}, + }, + "filebase64sha512": { + Description: "`filebase64sha512` is a variant of `base64sha512` that hashes the contents of a given file rather than a literal string.", + ParamDescription: []string{""}, + }, + "fileexists": { + Description: "`fileexists` determines whether a file exists at a given path.", + ParamDescription: []string{""}, + }, + "filemd5": { + Description: "`filemd5` is a variant of `md5` that hashes the contents of a given file rather than a literal string.", + ParamDescription: []string{""}, + }, + "fileset": { + Description: "`fileset` enumerates a set of regular file names given a path and pattern. The path is automatically removed from the resulting set of file names and any result still containing path separators always returns forward slash (`/`) as the path separator for cross-system compatibility.", + ParamDescription: []string{"", ""}, + }, + "filesha1": { + Description: "`filesha1` is a variant of `sha1` that hashes the contents of a given file rather than a literal string.", + ParamDescription: []string{""}, + }, + "filesha256": { + Description: "`filesha256` is a variant of `sha256` that hashes the contents of a given file rather than a literal string.", + ParamDescription: []string{""}, + }, + "filesha512": { + Description: "`filesha512` is a variant of `sha512` that hashes the contents of a given file rather than a literal string.", + ParamDescription: []string{""}, + }, + "flatten": { + Description: "`flatten` takes a list and replaces any elements that are lists with a flattened sequence of the list contents.", + ParamDescription: []string{""}, + }, + "floor": { + Description: "`floor` returns the closest whole number that is less than or equal to the given value, which may be a fraction.", + ParamDescription: []string{""}, + }, + "format": { + Description: "The `format` function produces a string by formatting a number of other values according to a specification string. It is similar to the `printf` function in C, and other similar functions in other programming languages.", + ParamDescription: []string{"", ""}, + }, + "formatdate": { + Description: "`formatdate` converts a timestamp into a different time format.", + ParamDescription: []string{"", ""}, + }, + "formatlist": { + Description: "`formatlist` produces a list of strings by formatting a number of other values according to a specification string.", + ParamDescription: []string{"", ""}, + }, + "indent": { + Description: "`indent` adds a given number of spaces to the beginnings of all but the first line in a given multi-line string.", + ParamDescription: []string{ + "Number of spaces to add after each newline character.", + "", + }, + }, + "index": { + Description: "`index` finds the element index for a given value in a list.", + ParamDescription: []string{"", ""}, + }, + "issensitive": { + Description: "`issensitive` takes any value and returns `true` if the value is marked as sensitive, and `false` otherwise.", + ParamDescription: []string{""}, + }, + "join": { + Description: "`join` produces a string by concatenating together all elements of a given list of strings with the given delimiter.", + ParamDescription: []string{ + "Delimiter to insert between the given strings.", + "One or more lists of strings to join.", + }, + }, + "jsondecode": { + Description: "`jsondecode` interprets a given string as JSON, returning a representation of the result of decoding that string.", + ParamDescription: []string{""}, + }, + "jsonencode": { + Description: "`jsonencode` encodes a given value to a string using JSON syntax.", + ParamDescription: []string{""}, + }, + "keys": { + Description: "`keys` takes a map and returns a list containing the keys from that map.", + ParamDescription: []string{ + "The map to extract keys from. May instead be an object-typed value, in which case the result is a tuple of the object attributes.", + }, + }, + "length": { + Description: "`length` determines the length of a given list, map, or string.", + ParamDescription: []string{""}, + }, + "list": { + Description: "The `list` function is no longer available. See `tolist` instead.", + ParamDescription: []string{""}, + }, + "log": { + Description: "`log` returns the logarithm of a given number in a given base.", + ParamDescription: []string{"", ""}, + }, + "lookup": { + Description: "`lookup` retrieves the value of a single element from a map, given its key. If the given key does not exist, the given default value is returned instead.", + ParamDescription: []string{"", "", ""}, + }, + "lower": { + Description: "`lower` converts all cased letters in the given string to lowercase.", + ParamDescription: []string{""}, + }, + "map": { + Description: "The `map` function is no longer available. See `tomap` instead.", + ParamDescription: []string{""}, + }, + "matchkeys": { + Description: "`matchkeys` constructs a new list by taking a subset of elements from one list whose indexes match the corresponding indexes of values in another list.", + ParamDescription: []string{"", "", ""}, + }, + "max": { + Description: "`max` takes one or more numbers and returns the greatest number from the set.", + ParamDescription: []string{""}, + }, + "md5": { + Description: "`md5` computes the MD5 hash of a given string and encodes it with hexadecimal digits.", + ParamDescription: []string{""}, + }, + "merge": { + Description: "`merge` takes an arbitrary number of maps or objects, and returns a single map or object that contains a merged set of elements from all arguments.", + ParamDescription: []string{""}, + }, + "min": { + Description: "`min` takes one or more numbers and returns the smallest number from the set.", + ParamDescription: []string{""}, + }, + "nonsensitive": { + Description: "`nonsensitive` takes a sensitive value and returns a copy of that value with the sensitive marking removed, thereby exposing the sensitive value.", + ParamDescription: []string{""}, + }, + "one": { + Description: "`one` takes a list, set, or tuple value with either zero or one elements. If the collection is empty, `one` returns `null`. Otherwise, `one` returns the first element. If there are two or more elements then `one` will return an error.", + ParamDescription: []string{""}, + }, + "parseint": { + Description: "`parseint` parses the given string as a representation of an integer in the specified base and returns the resulting number. The base must be between 2 and 62 inclusive.", + ParamDescription: []string{"", ""}, + }, + "pathexpand": { + Description: "`pathexpand` takes a filesystem path that might begin with a `~` segment, and if so it replaces that segment with the current user's home directory path.", + ParamDescription: []string{""}, + }, + "pow": { + Description: "`pow` calculates an exponent, by raising its first argument to the power of the second argument.", + ParamDescription: []string{"", ""}, + }, + "range": { + Description: "`range` generates a list of numbers using a start value, a limit value, and a step value.", + ParamDescription: []string{""}, + }, + "regex": { + Description: "`regex` applies a [regular expression](https://en.wikipedia.org/wiki/Regular_expression) to a string and returns the matching substrings.", + ParamDescription: []string{"", ""}, + }, + "regexall": { + Description: "`regexall` applies a [regular expression](https://en.wikipedia.org/wiki/Regular_expression) to a string and returns a list of all matches.", + ParamDescription: []string{"", ""}, + }, + "replace": { + Description: "`replace` searches a given string for another given substring, and replaces each occurrence with a given replacement string.", + ParamDescription: []string{"", "", ""}, + }, + "reverse": { + Description: "`reverse` takes a sequence and produces a new sequence of the same length with all of the same elements as the given sequence but in reverse order.", + ParamDescription: []string{""}, + }, + "rsadecrypt": { + Description: "`rsadecrypt` decrypts an RSA-encrypted ciphertext, returning the corresponding cleartext.", + ParamDescription: []string{"", ""}, + }, + "sensitive": { + Description: "`sensitive` takes any value and returns a copy of it marked so that OpenTofu will treat it as sensitive, with the same meaning and behavior as for [sensitive input variables](/language/values/variables#suppressing-values-in-cli-output).", + ParamDescription: []string{""}, + }, + "setintersection": { + Description: "The `setintersection` function takes multiple sets and produces a single set containing only the elements that all of the given sets have in common. In other words, it computes the [intersection](https://en.wikipedia.org/wiki/Intersection_\\(set_theory\\)) of the sets.", + ParamDescription: []string{"", ""}, + }, + "setproduct": { + Description: "The `setproduct` function finds all of the possible combinations of elements from all of the given sets by computing the [Cartesian product](https://en.wikipedia.org/wiki/Cartesian_product).", + ParamDescription: []string{ + "The sets to consider. Also accepts lists and tuples, and if all arguments are of list or tuple type then the result will preserve the input ordering", + }, + }, + "setsubtract": { + Description: "The `setsubtract` function returns a new set containing the elements from the first set that are not present in the second set. In other words, it computes the [relative complement](https://en.wikipedia.org/wiki/Complement_\\(set_theory\\)#Relative_complement) of the second set.", + ParamDescription: []string{"", ""}, + }, + "setunion": { + Description: "The `setunion` function takes multiple sets and produces a single set containing the elements from all of the given sets. In other words, it computes the [union](https://en.wikipedia.org/wiki/Union_\\(set_theory\\)) of the sets.", + ParamDescription: []string{"", ""}, + }, + "sha1": { + Description: "`sha1` computes the SHA1 hash of a given string and encodes it with hexadecimal digits.", + ParamDescription: []string{""}, + }, + "sha256": { + Description: "`sha256` computes the SHA256 hash of a given string and encodes it with hexadecimal digits.", + ParamDescription: []string{""}, + }, + "sha512": { + Description: "`sha512` computes the SHA512 hash of a given string and encodes it with hexadecimal digits.", + ParamDescription: []string{""}, + }, + "signum": { + Description: "`signum` determines the sign of a number, returning a number between -1 and 1 to represent the sign.", + ParamDescription: []string{""}, + }, + "slice": { + Description: "`slice` extracts some consecutive elements from within a list.", + ParamDescription: []string{"", "", ""}, + }, + "sort": { + Description: "`sort` takes a list of strings and returns a new list with those strings sorted lexicographically.", + ParamDescription: []string{""}, + }, + "split": { + Description: "`split` produces a list by dividing a given string at all occurrences of a given separator.", + ParamDescription: []string{"", ""}, + }, + "startswith": { + Description: "`startswith` takes two values: a string to check and a prefix string. The function returns true if the string begins with that exact prefix.", + ParamDescription: []string{"", ""}, + }, + "strcontains": { + Description: "`strcontains` takes two values: a string to check and an expected substring. The function returns true if the string has the substring contained within it.", + ParamDescription: []string{"", ""}, + }, + "strrev": { + Description: "`strrev` reverses the characters in a string. Note that the characters are treated as _Unicode characters_ (in technical terms, Unicode [grapheme cluster boundaries](https://unicode.org/reports/tr29/#Grapheme_Cluster_Boundaries) are respected).", + ParamDescription: []string{""}, + }, + "substr": { + Description: "`substr` extracts a substring from a given string by offset and (maximum) length.", + ParamDescription: []string{"", "", ""}, + }, + "sum": { + Description: "`sum` takes a list or set of numbers and returns the sum of those numbers.", + ParamDescription: []string{""}, + }, + "templatefile": { + Description: "`templatefile` reads the file at the given path and renders its content as a template using a supplied set of template variables.", + ParamDescription: []string{"", ""}, + }, + "templatestring": { + Description: "`templatestring` processes the provided string as a template using a supplied set of template variables.", + ParamDescription: []string{"", ""}, + }, + "textdecodebase64": { + Description: "`textdecodebase64` function decodes a string that was previously Base64-encoded, and then interprets the result as characters in a specified character encoding.", + ParamDescription: []string{"", ""}, + }, + "textencodebase64": { + Description: "`textencodebase64` encodes the unicode characters in a given string using a specified character encoding, returning the result base64 encoded because OpenTofu language strings are always sequences of unicode characters.", + ParamDescription: []string{"", ""}, + }, + "timeadd": { + Description: "`timeadd` adds a duration to a timestamp, returning a new timestamp.", + ParamDescription: []string{"", ""}, + }, + "timecmp": { + Description: "`timecmp` compares two timestamps and returns a number that represents the ordering of the instants those timestamps represent.", + ParamDescription: []string{"", ""}, + }, + "timestamp": { + Description: "`timestamp` returns a UTC timestamp string in [RFC 3339](https://tools.ietf.org/html/rfc3339) format.", + ParamDescription: []string{}, + }, + "plantimestamp": { + Description: "`plantimestamp` returns a UTC timestamp string in [RFC 3339](https://tools.ietf.org/html/rfc3339) format, fixed to a constant time representing the time of the plan.", + ParamDescription: []string{}, + }, + "title": { + Description: "`title` converts the first letter of each word in the given string to uppercase.", + ParamDescription: []string{""}, + }, + "tobool": { + Description: "`tobool` converts its argument to a boolean value.", + ParamDescription: []string{""}, + }, + "tolist": { + Description: "`tolist` converts its argument to a list value.", + ParamDescription: []string{""}, + }, + "tomap": { + Description: "`tomap` converts its argument to a map value.", + ParamDescription: []string{""}, + }, + "tonumber": { + Description: "`tonumber` converts its argument to a number value.", + ParamDescription: []string{""}, + }, + "toset": { + Description: "`toset` converts its argument to a set value.", + ParamDescription: []string{""}, + }, + "tostring": { + Description: "`tostring` converts its argument to a string value.", + ParamDescription: []string{""}, + }, + "transpose": { + Description: "`transpose` takes a map of lists of strings and swaps the keys and values to produce a new map of lists of strings.", + ParamDescription: []string{""}, + }, + "trim": { + Description: "`trim` removes the specified set of characters from the start and end of the given string.", + ParamDescription: []string{ + "", + "A string containing all of the characters to trim. Each character is taken separately, so the order of characters is insignificant.", + }, + }, + "trimprefix": { + Description: "`trimprefix` removes the specified prefix from the start of the given string. If the string does not start with the prefix, the string is returned unchanged.", + ParamDescription: []string{"", ""}, + }, + "trimspace": { + Description: "`trimspace` removes any space characters from the start and end of the given string.", + ParamDescription: []string{""}, + }, + "trimsuffix": { + Description: "`trimsuffix` removes the specified suffix from the end of the given string.", + ParamDescription: []string{"", ""}, + }, + "try": { + Description: "`try` evaluates all of its argument expressions in turn and returns the result of the first one that does not produce any errors.", + ParamDescription: []string{""}, + }, + "type": { + Description: "`type` returns the type of a given value.", + ParamDescription: []string{""}, + }, + "upper": { + Description: "`upper` converts all cased letters in the given string to uppercase.", + ParamDescription: []string{""}, + }, + "urlencode": { + Description: "`urlencode` applies URL encoding to a given string.", + ParamDescription: []string{""}, + }, + "urldecode": { + Description: "`urldecode` applies URL decoding to a given encoded string.", + ParamDescription: []string{""}, + }, + "uuid": { + Description: "`uuid` generates a unique identifier string.", + ParamDescription: []string{}, + }, + "uuidv5": { + Description: "`uuidv5` generates a _name-based_ UUID, as described in [RFC 4122 section 4.3](https://tools.ietf.org/html/rfc4122#section-4.3), also known as a \"version 5\" UUID.", + ParamDescription: []string{"", ""}, + }, + "values": { + Description: "`values` takes a map and returns a list containing the values of the elements in that map.", + ParamDescription: []string{""}, + }, + "yamldecode": { + Description: "`yamldecode` parses a string as a subset of YAML, and produces a representation of its value.", + ParamDescription: []string{""}, + }, + "yamlencode": { + Description: "`yamlencode` encodes a given value to a string using [YAML 1.2](https://yaml.org/spec/1.2/spec.html) block syntax.", + ParamDescription: []string{""}, + }, + "zipmap": { + Description: "`zipmap` constructs a map from a list of keys and a corresponding list of values.", + ParamDescription: []string{"", ""}, + }, +} + +// WithDescription looks up the description for a given function and uses +// go-cty's WithNewDescriptions to replace the function's description and +// parameter descriptions. +func WithDescription(name string, f function.Function) function.Function { + desc, ok := DescriptionList[name] + if !ok { + return f + } + + // Will panic if ParamDescription doesn't match the number of parameters + // the function expects + return f.WithNewDescriptions(desc.Description, desc.ParamDescription) +} diff --git a/pkg/lang/funcs/encoding.go b/pkg/lang/funcs/encoding.go new file mode 100644 index 00000000000..084d3a6aced --- /dev/null +++ b/pkg/lang/funcs/encoding.go @@ -0,0 +1,335 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package funcs + +import ( + "bytes" + "compress/gzip" + "encoding/base64" + "fmt" + "io" + "log" + "net/url" + "unicode/utf8" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "golang.org/x/text/encoding/ianaindex" +) + +// Base64DecodeFunc constructs a function that decodes a string containing a base64 sequence. +var Base64DecodeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + AllowMarked: true, + }, + }, + Type: function.StaticReturnType(cty.String), + RefineResult: refineNotNull, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + str, strMarks := args[0].Unmark() + s := str.AsString() + sDec, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to decode base64 data %s", redactIfSensitive(s, strMarks)) + } + if !utf8.Valid([]byte(sDec)) { + log.Printf("[DEBUG] the result of decoding the provided string is not valid UTF-8: %s", redactIfSensitive(sDec, strMarks)) + return cty.UnknownVal(cty.String), fmt.Errorf("the result of decoding the provided string is not valid UTF-8") + } + return cty.StringVal(string(sDec)).WithMarks(strMarks), nil + }, +}) + +// Base64EncodeFunc constructs a function that encodes a string to a base64 sequence. +var Base64EncodeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + RefineResult: refineNotNull, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.StringVal(base64.StdEncoding.EncodeToString([]byte(args[0].AsString()))), nil + }, +}) + +// TextEncodeBase64Func constructs a function that encodes a string to a target encoding and then to a base64 sequence. +var TextEncodeBase64Func = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "string", + Type: cty.String, + }, + { + Name: "encoding", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + RefineResult: refineNotNull, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + encoding, err := ianaindex.IANA.Encoding(args[1].AsString()) + if err != nil || encoding == nil { + return cty.UnknownVal(cty.String), function.NewArgErrorf(1, "%q is not a supported IANA encoding name or alias in this OpenTofu version", args[1].AsString()) + } + + encName, err := ianaindex.IANA.Name(encoding) + if err != nil { // would be weird, since we just read this encoding out + encName = args[1].AsString() + } + + encoder := encoding.NewEncoder() + encodedInput, err := encoder.Bytes([]byte(args[0].AsString())) + if err != nil { + // The string representations of "err" disclose implementation + // details of the underlying library, and the main error we might + // like to return a special message for is unexported as + // golang.org/x/text/encoding/internal.RepertoireError, so this + // is just a generic error message for now. + // + // We also don't include the string itself in the message because + // it can typically be very large, contain newline characters, + // etc. + return cty.UnknownVal(cty.String), function.NewArgErrorf(0, "the given string contains characters that cannot be represented in %s", encName) + } + + return cty.StringVal(base64.StdEncoding.EncodeToString(encodedInput)), nil + }, +}) + +// TextDecodeBase64Func constructs a function that decodes a base64 sequence to a target encoding. +var TextDecodeBase64Func = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "source", + Type: cty.String, + }, + { + Name: "encoding", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + RefineResult: refineNotNull, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + encoding, err := ianaindex.IANA.Encoding(args[1].AsString()) + if err != nil || encoding == nil { + return cty.UnknownVal(cty.String), function.NewArgErrorf(1, "%q is not a supported IANA encoding name or alias in this OpenTofu version", args[1].AsString()) + } + + encName, err := ianaindex.IANA.Name(encoding) + if err != nil { // would be weird, since we just read this encoding out + encName = args[1].AsString() + } + + s := args[0].AsString() + sDec, err := base64.StdEncoding.DecodeString(s) + if err != nil { + switch err := err.(type) { + case base64.CorruptInputError: + return cty.UnknownVal(cty.String), function.NewArgErrorf(0, "the given value is has an invalid base64 symbol at offset %d", int(err)) + default: + return cty.UnknownVal(cty.String), function.NewArgErrorf(0, "invalid source string: %w", err) + } + + } + + decoder := encoding.NewDecoder() + decoded, err := decoder.Bytes(sDec) + if err != nil || bytes.ContainsRune(decoded, '�') { + return cty.UnknownVal(cty.String), function.NewArgErrorf(0, "the given string contains symbols that are not defined for %s", encName) + } + + return cty.StringVal(string(decoded)), nil + }, +}) + +// Base64GzipFunc constructs a function that compresses a string with gzip and then encodes the result in +// Base64 encoding. +var Base64GzipFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + RefineResult: refineNotNull, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + s := args[0].AsString() + + var b bytes.Buffer + gz := gzip.NewWriter(&b) + if _, err := gz.Write([]byte(s)); err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to write gzip raw data: %w", err) + } + if err := gz.Flush(); err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to flush gzip writer: %w", err) + } + if err := gz.Close(); err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to close gzip writer: %w", err) + } + return cty.StringVal(base64.StdEncoding.EncodeToString(b.Bytes())), nil + }, +}) + +// Base64GunzipFunc constructs a function that Bae64 decodes a string and decompresses the result with gunzip. +var Base64GunzipFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + RefineResult: refineNotNull, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + str, strMarks := args[0].Unmark() + s := str.AsString() + sDec, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to decode base64 data %s", redactIfSensitive(s, strMarks)) + } + sDecBuffer := bytes.NewReader(sDec) + gzipReader, err := gzip.NewReader(sDecBuffer) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to gunzip bytestream: %w", err) + } + gunzip, err := io.ReadAll(gzipReader) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to read gunzip raw data: %w", err) + } + + return cty.StringVal(string(gunzip)), nil + }, +}) + +// URLEncodeFunc constructs a function that applies URL encoding to a given string. +var URLEncodeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + RefineResult: refineNotNull, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.StringVal(url.QueryEscape(args[0].AsString())), nil + }, +}) + +// URLDecodeFunc constructs a function that applies URL decoding to a given encoded string. +var URLDecodeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + RefineResult: refineNotNull, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + query, err := url.QueryUnescape(args[0].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to decode URL '%s': %v", query, err) + } + + return cty.StringVal(query), nil + }, +}) + +// Base64Decode decodes a string containing a base64 sequence. +// +// OpenTofu uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. +// +// Strings in the OpenTofu language are sequences of unicode characters rather +// than bytes, so this function will also interpret the resulting bytes as +// UTF-8. If the bytes after Base64 decoding are _not_ valid UTF-8, this function +// produces an error. +func Base64Decode(str cty.Value) (cty.Value, error) { + return Base64DecodeFunc.Call([]cty.Value{str}) +} + +// Base64Encode applies Base64 encoding to a string. +// +// OpenTofu uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. +// +// Strings in the OpenTofu language are sequences of unicode characters rather +// than bytes, so this function will first encode the characters from the string +// as UTF-8, and then apply Base64 encoding to the result. +func Base64Encode(str cty.Value) (cty.Value, error) { + return Base64EncodeFunc.Call([]cty.Value{str}) +} + +// Base64Gzip compresses a string with gzip and then encodes the result in +// Base64 encoding. +// +// OpenTofu uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. +// +// Strings in the OpenTofu language are sequences of unicode characters rather +// than bytes, so this function will first encode the characters from the string +// as UTF-8, then apply gzip compression, and then finally apply Base64 encoding. +func Base64Gzip(str cty.Value) (cty.Value, error) { + return Base64GzipFunc.Call([]cty.Value{str}) +} + +// Base64Gunzip decodes a Base64-encoded string and uncompresses the result with gzip. +// +// Opentofu uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. +func Base64Gunzip(str cty.Value) (cty.Value, error) { + return Base64GunzipFunc.Call([]cty.Value{str}) +} + +// URLEncode applies URL encoding to a given string. +// +// This function identifies characters in the given string that would have a +// special meaning when included as a query string argument in a URL and +// escapes them using RFC 3986 "percent encoding". +// +// If the given string contains non-ASCII characters, these are first encoded as +// UTF-8 and then percent encoding is applied separately to each UTF-8 byte. +func URLEncode(str cty.Value) (cty.Value, error) { + return URLEncodeFunc.Call([]cty.Value{str}) +} + +// URLDecode decodes a URL encoded string. +// +// This function decodes the given string that has been encoded. +// +// If the given string contains non-ASCII characters, these are first encoded as +// UTF-8 and then percent decoding is applied separately to each UTF-8 byte. +func URLDecode(str cty.Value) (cty.Value, error) { + return URLDecodeFunc.Call([]cty.Value{str}) +} + +// TextEncodeBase64 applies Base64 encoding to a string that was encoded before with a target encoding. +// +// OpenTofu uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. +// +// First step is to apply the target IANA encoding (e.g. UTF-16LE). +// Strings in the OpenTofu language are sequences of unicode characters rather +// than bytes, so this function will first encode the characters from the string +// as UTF-8, and then apply Base64 encoding to the result. +func TextEncodeBase64(str, enc cty.Value) (cty.Value, error) { + return TextEncodeBase64Func.Call([]cty.Value{str, enc}) +} + +// TextDecodeBase64 decodes a string containing a base64 sequence whereas a specific encoding of the string is expected. +// +// OpenTofu uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. +// +// Strings in the OpenTofu language are sequences of unicode characters rather +// than bytes, so this function will also interpret the resulting bytes as +// the target encoding. +func TextDecodeBase64(str, enc cty.Value) (cty.Value, error) { + return TextDecodeBase64Func.Call([]cty.Value{str, enc}) +} diff --git a/pkg/lang/funcs/encoding_test.go b/pkg/lang/funcs/encoding_test.go new file mode 100644 index 00000000000..9f4fb34e9db --- /dev/null +++ b/pkg/lang/funcs/encoding_test.go @@ -0,0 +1,533 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package funcs + +import ( + "fmt" + "testing" + + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/lang/marks" +) + +func TestBase64Decode(t *testing.T) { + tests := []struct { + String cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("YWJjMTIzIT8kKiYoKSctPUB+"), + cty.StringVal("abc123!?$*&()'-=@~"), + false, + }, + { + cty.StringVal("YWJjMTIzIT8kKiYoKSctPUB+").Mark(marks.Sensitive), + cty.StringVal("abc123!?$*&()'-=@~").Mark(marks.Sensitive), + false, + }, + { // Invalid base64 data decoding + cty.StringVal("this-is-an-invalid-base64-data"), + cty.UnknownVal(cty.String), + true, + }, + { // Invalid utf-8 + cty.StringVal("\xc3\x28"), + cty.UnknownVal(cty.String), + true, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("base64decode(%#v)", test.String), func(t *testing.T) { + got, err := Base64Decode(test.String) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestBase64Decode_error(t *testing.T) { + tests := map[string]struct { + String cty.Value + WantErr string + }{ + "invalid base64": { + cty.StringVal("dfg"), + `failed to decode base64 data "dfg"`, + }, + "sensitive invalid base64": { + cty.StringVal("dfg").Mark(marks.Sensitive), + `failed to decode base64 data (sensitive value)`, + }, + "invalid utf-8": { + cty.StringVal("whee"), + "the result of decoding the provided string is not valid UTF-8", + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + _, err := Base64Decode(test.String) + + if err == nil { + t.Fatal("succeeded; want error") + } + + if err.Error() != test.WantErr { + t.Errorf("wrong error result\ngot: %#v\nwant: %#v", err.Error(), test.WantErr) + } + }) + } +} + +func TestBase64Encode(t *testing.T) { + tests := []struct { + String cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("abc123!?$*&()'-=@~"), + cty.StringVal("YWJjMTIzIT8kKiYoKSctPUB+"), + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("base64encode(%#v)", test.String), func(t *testing.T) { + got, err := Base64Encode(test.String) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestBase64Gzip(t *testing.T) { + tests := []struct { + String cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("test"), + cty.StringVal("H4sIAAAAAAAA/ypJLS4BAAAA//8BAAD//wx+f9gEAAAA"), + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("base64gzip(%#v)", test.String), func(t *testing.T) { + got, err := Base64Gzip(test.String) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestBase64Gunzip(t *testing.T) { + tests := []struct { + String cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("H4sIAAAAAAAA/ypJLS4BAAAA//8BAAD//wx+f9gEAAAA"), + cty.StringVal("test"), + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("base64gunzip(%#v)", test.String), func(t *testing.T) { + got, err := Base64Gunzip(test.String) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestURLEncode(t *testing.T) { + tests := []struct { + String cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("abc123-_"), + cty.StringVal("abc123-_"), + false, + }, + { + cty.StringVal("foo:bar@localhost?foo=bar&bar=baz"), + cty.StringVal("foo%3Abar%40localhost%3Ffoo%3Dbar%26bar%3Dbaz"), + false, + }, + { + cty.StringVal("mailto:email?subject=this+is+my+subject"), + cty.StringVal("mailto%3Aemail%3Fsubject%3Dthis%2Bis%2Bmy%2Bsubject"), + false, + }, + { + cty.StringVal("foo/bar"), + cty.StringVal("foo%2Fbar"), + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("urlencode(%#v)", test.String), func(t *testing.T) { + got, err := URLEncode(test.String) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestURLDecode(t *testing.T) { + tests := []struct { + String cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("abc123-_"), + cty.StringVal("abc123-_"), + false, + }, + { + cty.StringVal("foo%3Abar%40localhost%3Ffoo%3Dbar%26bar%3Dbaz"), + cty.StringVal("foo:bar@localhost?foo=bar&bar=baz"), + false, + }, + { + cty.StringVal("mailto%3Aemail%3Fsubject%3Dthis%2Bis%2Bmy%2Bsubject"), + cty.StringVal("mailto:email?subject=this+is+my+subject"), + false, + }, + { + cty.StringVal("foo%2Fbar"), + cty.StringVal("foo/bar"), + false, + }, + { + cty.StringVal("foo% bar"), + cty.UnknownVal(cty.String), + true, + }, + { + cty.StringVal("foo%2 bar"), + cty.UnknownVal(cty.String), + true, + }, + { + cty.StringVal("%GGfoo%2bar"), + cty.UnknownVal(cty.String), + true, + }, + { + cty.StringVal("foo%00, bar!"), + cty.StringVal("foo\x00, bar!"), + false, + }, + { + cty.StringVal("hello%20%E4%B8%96%E7%95%8C"), //Unicode character support + cty.StringVal("hello 世界"), + false, + }, + { + cty.StringVal("hello%20%D8%AF%D9%86%DB%8C%D8%A7"), //Unicode character support + cty.StringVal("hello دنیا"), + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("urldecode(%#v)", test.String), func(t *testing.T) { + got, err := URLDecode(test.String) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestURLEncodeDecode(t *testing.T) { + tests := []struct { + String cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("abc123-_"), + cty.StringVal("abc123-_"), + false, + }, + { + cty.StringVal("foo:bar@localhost?foo=bar&bar=baz"), + cty.StringVal("foo:bar@localhost?foo=bar&bar=baz"), + false, + }, + { + cty.StringVal("mailto:email?subject=this+is+my+subject"), + cty.StringVal("mailto:email?subject=this+is+my+subject"), + false, + }, + { + cty.StringVal("foo/bar"), + cty.StringVal("foo/bar"), + false, + }, + { + cty.StringVal("foo%00, bar!"), + cty.StringVal("foo%00, bar!"), + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("url encode decode(%#v)", test.String), func(t *testing.T) { + encoded, err := URLEncode(test.String) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + got, err := URLDecode(encoded) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestBase64TextEncode(t *testing.T) { + tests := []struct { + String cty.Value + Encoding cty.Value + Want cty.Value + Err string + }{ + { + cty.StringVal("abc123!?$*&()'-=@~"), + cty.StringVal("UTF-8"), + cty.StringVal("YWJjMTIzIT8kKiYoKSctPUB+"), + ``, + }, + { + cty.StringVal("abc123!?$*&()'-=@~"), + cty.StringVal("UTF-16LE"), + cty.StringVal("YQBiAGMAMQAyADMAIQA/ACQAKgAmACgAKQAnAC0APQBAAH4A"), + ``, + }, + { + cty.StringVal("abc123!?$*&()'-=@~"), + cty.StringVal("CP936"), + cty.StringVal("YWJjMTIzIT8kKiYoKSctPUB+"), + ``, + }, + { + cty.StringVal("abc123!?$*&()'-=@~"), + cty.StringVal("NOT-EXISTS"), + cty.UnknownVal(cty.String).RefineNotNull(), + `"NOT-EXISTS" is not a supported IANA encoding name or alias in this OpenTofu version`, + }, + { + cty.StringVal("🤔"), + cty.StringVal("cp437"), + cty.UnknownVal(cty.String).RefineNotNull(), + `the given string contains characters that cannot be represented in IBM437`, + }, + { + cty.UnknownVal(cty.String), + cty.StringVal("windows-1250"), + cty.UnknownVal(cty.String).RefineNotNull(), + ``, + }, + { + cty.StringVal("hello world"), + cty.UnknownVal(cty.String), + cty.UnknownVal(cty.String).RefineNotNull(), + ``, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("textencodebase64(%#v, %#v)", test.String, test.Encoding), func(t *testing.T) { + got, err := TextEncodeBase64(test.String, test.Encoding) + + if test.Err != "" { + if err == nil { + t.Fatal("succeeded; want error") + } + if got, want := err.Error(), test.Err; got != want { + t.Fatalf("wrong error\ngot: %s\nwant: %s", got, want) + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestBase64TextDecode(t *testing.T) { + tests := []struct { + String cty.Value + Encoding cty.Value + Want cty.Value + Err string + }{ + { + cty.StringVal("YWJjMTIzIT8kKiYoKSctPUB+"), + cty.StringVal("UTF-8"), + cty.StringVal("abc123!?$*&()'-=@~"), + ``, + }, + { + cty.StringVal("YQBiAGMAMQAyADMAIQA/ACQAKgAmACgAKQAnAC0APQBAAH4A"), + cty.StringVal("UTF-16LE"), + cty.StringVal("abc123!?$*&()'-=@~"), + ``, + }, + { + cty.StringVal("YWJjMTIzIT8kKiYoKSctPUB+"), + cty.StringVal("CP936"), + cty.StringVal("abc123!?$*&()'-=@~"), + ``, + }, + { + cty.StringVal("doesn't matter"), + cty.StringVal("NOT-EXISTS"), + cty.UnknownVal(cty.String).RefineNotNull(), + `"NOT-EXISTS" is not a supported IANA encoding name or alias in this OpenTofu version`, + }, + { + cty.StringVal(""), + cty.StringVal("cp437"), + cty.UnknownVal(cty.String).RefineNotNull(), + `the given value is has an invalid base64 symbol at offset 0`, + }, + { + cty.StringVal("gQ=="), // this is 0x81, which is not defined in windows-1250 + cty.StringVal("windows-1250"), + cty.StringVal("�"), + `the given string contains symbols that are not defined for windows-1250`, + }, + { + cty.UnknownVal(cty.String), + cty.StringVal("windows-1250"), + cty.UnknownVal(cty.String).RefineNotNull(), + ``, + }, + { + cty.StringVal("YQBiAGMAMQAyADMAIQA/ACQAKgAmACgAKQAnAC0APQBAAH4A"), + cty.UnknownVal(cty.String), + cty.UnknownVal(cty.String).RefineNotNull(), + ``, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("textdecodebase64(%#v, %#v)", test.String, test.Encoding), func(t *testing.T) { + got, err := TextDecodeBase64(test.String, test.Encoding) + + if test.Err != "" { + if err == nil { + t.Fatal("succeeded; want error") + } + if got, want := err.Error(), test.Err; got != want { + t.Fatalf("wrong error\ngot: %s\nwant: %s", got, want) + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} diff --git a/pkg/lang/funcs/filesystem.go b/pkg/lang/funcs/filesystem.go new file mode 100644 index 00000000000..3e55c117264 --- /dev/null +++ b/pkg/lang/funcs/filesystem.go @@ -0,0 +1,529 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package funcs + +import ( + "encoding/base64" + "fmt" + "io" + "log" + "os" + "path/filepath" + "strconv" + "strings" + "unicode/utf8" + + "github.com/bmatcuk/doublestar/v4" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + homedir "github.com/mitchellh/go-homedir" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// MakeFileFunc constructs a function that takes a file path and returns the +// contents of that file, either directly as a string (where valid UTF-8 is +// required) or as a string containing base64 bytes. +func MakeFileFunc(baseDir string, encBase64 bool) function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + AllowMarked: true, + }, + }, + Type: function.StaticReturnType(cty.String), + RefineResult: refineNotNull, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + pathArg, pathMarks := args[0].Unmark() + path := pathArg.AsString() + src, err := readFileBytes(baseDir, path, pathMarks) + if err != nil { + err = function.NewArgError(0, err) + return cty.UnknownVal(cty.String), err + } + + switch { + case encBase64: + enc := base64.StdEncoding.EncodeToString(src) + return cty.StringVal(enc).WithMarks(pathMarks), nil + default: + if !utf8.Valid(src) { + return cty.UnknownVal(cty.String), fmt.Errorf("contents of %s are not valid UTF-8; use the filebase64 function to obtain the Base64 encoded contents or the other file functions (e.g. filemd5, filesha256) to obtain file hashing results instead", redactIfSensitive(path, pathMarks)) + } + return cty.StringVal(string(src)).WithMarks(pathMarks), nil + } + }, + }) +} + +func templateMaxRecursionDepth() (int, error) { + envkey := "TF_TEMPLATE_RECURSION_DEPTH" + val := os.Getenv(envkey) + if val != "" { + i, err := strconv.Atoi(val) + if err != nil { + return -1, fmt.Errorf("invalid value for %s: %w", envkey, err) + } + return i, nil + } + return 1024, nil // Sane Default +} + +type ErrorTemplateRecursionLimit struct { + sources []string +} + +func (err ErrorTemplateRecursionLimit) Error() string { + trace := make([]string, 0) + maxTrace := 16 + + // Look for repetition in the first N sources + for _, source := range err.sources[:min(maxTrace, len(err.sources))] { + looped := false + for _, st := range trace { + if st == source { + // Repeated source, probably a loop. TF_LOG=debug will contain the full trace. + looped = true + break + } + } + + trace = append(trace, source) + + if looped { + break + } + } + + log.Printf("[DEBUG] Template Stack (%d): %s", len(err.sources)-1, err.sources[len(err.sources)-1]) + + return fmt.Sprintf("maximum recursion depth %d reached in %s ... ", len(err.sources)-1, strings.Join(trace, ", ")) +} + +// MakeTemplateFileFunc constructs a function that takes a file path and +// an arbitrary object of named values and attempts to render the referenced +// file as a template using HCL template syntax. +// +// The template itself may recursively call other functions so a callback +// must be provided to get access to those functions. The template cannot, +// however, access any variables defined in the scope: it is restricted only to +// those variables provided in the second function argument, to ensure that all +// dependencies on other graph nodes can be seen before executing this function. +// +// As a special exception, a referenced template file may call the templatefile +// function, with a recursion depth limit providing an error when reached +func MakeTemplateFileFunc(baseDir string, funcsCb func() map[string]function.Function) function.Function { + return makeTemplateFileFuncImpl(baseDir, funcsCb, 0) +} +func makeTemplateFileFuncImpl(baseDir string, funcsCb func() map[string]function.Function, depth int) function.Function { + params := []function.Parameter{ + { + Name: "path", + Type: cty.String, + AllowMarked: true, + }, + { + Name: "vars", + Type: cty.DynamicPseudoType, + }, + } + + loadTmpl := func(path string, marks cty.ValueMarks) (hcl.Expression, error) { + maxDepth, err := templateMaxRecursionDepth() + if err != nil { + return nil, err + } + if depth > maxDepth { + // Sources will unwind up the stack + return nil, ErrorTemplateRecursionLimit{} + } + + // We re-use File here to ensure the same filename interpretation + // as it does, along with its other safety checks. + templateValue, err := File(baseDir, cty.StringVal(path).WithMarks(marks)) + if err != nil { + return nil, err + } + + // unmark the template ready to be handled + templateValue, _ = templateValue.Unmark() + + expr, diags := hclsyntax.ParseTemplate([]byte(templateValue.AsString()), path, hcl.Pos{Line: 1, Column: 1}) + if diags.HasErrors() { + return nil, diags + } + + return expr, nil + } + + funcsCbDepth := func() map[string]function.Function { + givenFuncs := funcsCb() // this callback indirection is to avoid chicken/egg problems + funcs := make(map[string]function.Function, len(givenFuncs)) + for name, fn := range givenFuncs { + if name == "templatefile" { + // Increment the recursion depth counter + funcs[name] = makeTemplateFileFuncImpl(baseDir, funcsCb, depth+1) + continue + } + funcs[name] = fn + } + return funcs + } + + return function.New(&function.Spec{ + Params: params, + Type: func(args []cty.Value) (cty.Type, error) { + if !(args[0].IsKnown() && args[1].IsKnown()) { + return cty.DynamicPseudoType, nil + } + + // We'll render our template now to see what result type it produces. + // A template consisting only of a single interpolation an potentially + // return any type. + + pathArg, pathMarks := args[0].Unmark() + expr, err := loadTmpl(pathArg.AsString(), pathMarks) + if err != nil { + return cty.DynamicPseudoType, err + } + + // This is safe even if args[1] contains unknowns because the HCL + // template renderer itself knows how to short-circuit those. + val, err := renderTemplate(expr, args[1], funcsCbDepth()) + return val.Type(), err + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + pathArg, pathMarks := args[0].Unmark() + expr, err := loadTmpl(pathArg.AsString(), pathMarks) + if err != nil { + return cty.DynamicVal, err + } + + result, err := renderTemplate(expr, args[1], funcsCbDepth()) + return result.WithMarks(pathMarks), err + }, + }) +} + +// MakeFileExistsFunc constructs a function that takes a path +// and determines whether a file exists at that path +func MakeFileExistsFunc(baseDir string) function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + AllowMarked: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + RefineResult: refineNotNull, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + pathArg, pathMarks := args[0].Unmark() + path := pathArg.AsString() + path, err := homedir.Expand(path) + if err != nil { + return cty.UnknownVal(cty.Bool), fmt.Errorf("failed to expand ~: %w", err) + } + + if !filepath.IsAbs(path) { + path = filepath.Join(baseDir, path) + } + + // Ensure that the path is canonical for the host OS + path = filepath.Clean(path) + + fi, err := os.Stat(path) + if err != nil { + if os.IsNotExist(err) { + return cty.False.WithMarks(pathMarks), nil + } + return cty.UnknownVal(cty.Bool), fmt.Errorf("failed to stat %s", redactIfSensitive(path, pathMarks)) + } + + if fi.Mode().IsRegular() { + return cty.True.WithMarks(pathMarks), nil + } + + // The Go stat API only provides convenient access to whether it's + // a directory or not, so we need to do some bit fiddling to + // recognize other irregular file types. + filename := redactIfSensitive(path, pathMarks) + fileType := fi.Mode().Type() + switch { + case (fileType & os.ModeDir) != 0: + err = function.NewArgErrorf(1, "%s is a directory, not a file", filename) + case (fileType & os.ModeDevice) != 0: + err = function.NewArgErrorf(1, "%s is a device node, not a regular file", filename) + case (fileType & os.ModeNamedPipe) != 0: + err = function.NewArgErrorf(1, "%s is a named pipe, not a regular file", filename) + case (fileType & os.ModeSocket) != 0: + err = function.NewArgErrorf(1, "%s is a unix domain socket, not a regular file", filename) + default: + // If it's not a type we recognize then we'll just return a + // generic error message. This should be very rare. + err = function.NewArgErrorf(1, "%s is not a regular file", filename) + + // Note: os.ModeSymlink should be impossible because we used + // os.Stat above, not os.Lstat. + } + + return cty.False, err + }, + }) +} + +// MakeFileSetFunc constructs a function that takes a glob pattern +// and enumerates a file set from that pattern +func MakeFileSetFunc(baseDir string) function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + AllowMarked: true, + }, + { + Name: "pattern", + Type: cty.String, + AllowMarked: true, + }, + }, + Type: function.StaticReturnType(cty.Set(cty.String)), + RefineResult: refineNotNull, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + pathArg, pathMarks := args[0].Unmark() + path := pathArg.AsString() + patternArg, patternMarks := args[1].Unmark() + pattern := patternArg.AsString() + + marks := []cty.ValueMarks{pathMarks, patternMarks} + + if !filepath.IsAbs(path) { + path = filepath.Join(baseDir, path) + } + + // Join the path to the glob pattern, while ensuring the full + // pattern is canonical for the host OS. The joined path is + // automatically cleaned during this operation. + pattern = filepath.Join(path, pattern) + + matches, err := doublestar.FilepathGlob(pattern) + if err != nil { + return cty.UnknownVal(cty.Set(cty.String)), fmt.Errorf("failed to glob pattern %s: %w", redactIfSensitive(pattern, marks...), err) + } + + var matchVals []cty.Value + for _, match := range matches { + fi, err := os.Stat(match) + + if err != nil { + return cty.UnknownVal(cty.Set(cty.String)), fmt.Errorf("failed to stat %s: %w", redactIfSensitive(match, marks...), err) + } + + if !fi.Mode().IsRegular() { + continue + } + + // Remove the path and file separator from matches. + match, err = filepath.Rel(path, match) + + if err != nil { + return cty.UnknownVal(cty.Set(cty.String)), fmt.Errorf("failed to trim path of match %s: %w", redactIfSensitive(match, marks...), err) + } + + // Replace any remaining file separators with forward slash (/) + // separators for cross-system compatibility. + match = filepath.ToSlash(match) + + matchVals = append(matchVals, cty.StringVal(match)) + } + + if len(matchVals) == 0 { + return cty.SetValEmpty(cty.String).WithMarks(marks...), nil + } + + return cty.SetVal(matchVals).WithMarks(marks...), nil + }, + }) +} + +// BasenameFunc constructs a function that takes a string containing a filesystem path +// and removes all except the last portion from it. +var BasenameFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + RefineResult: refineNotNull, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.StringVal(filepath.Base(args[0].AsString())), nil + }, +}) + +// DirnameFunc constructs a function that takes a string containing a filesystem path +// and removes the last portion from it. +var DirnameFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + RefineResult: refineNotNull, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.StringVal(filepath.Dir(args[0].AsString())), nil + }, +}) + +// AbsPathFunc constructs a function that converts a filesystem path to an absolute path +var AbsPathFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + RefineResult: refineNotNull, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + absPath, err := filepath.Abs(args[0].AsString()) + return cty.StringVal(filepath.ToSlash(absPath)), err + }, +}) + +// PathExpandFunc constructs a function that expands a leading ~ character to the current user's home directory. +var PathExpandFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + RefineResult: refineNotNull, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + + homePath, err := homedir.Expand(args[0].AsString()) + return cty.StringVal(homePath), err + }, +}) + +func openFile(baseDir, path string) (*os.File, error) { + path, err := homedir.Expand(path) + if err != nil { + return nil, fmt.Errorf("failed to expand ~: %w", err) + } + + if !filepath.IsAbs(path) { + path = filepath.Join(baseDir, path) + } + + // Ensure that the path is canonical for the host OS + path = filepath.Clean(path) + + return os.Open(path) +} + +func readFileBytes(baseDir, path string, marks cty.ValueMarks) ([]byte, error) { + f, err := openFile(baseDir, path) + if err != nil { + if os.IsNotExist(err) { + // An extra OpenTofu-specific hint for this situation + return nil, fmt.Errorf("no file exists at %s; this function works only with files that are distributed as part of the configuration source code, so if this file will be created by a resource in this configuration you must instead obtain this result from an attribute of that resource", redactIfSensitive(path, marks)) + } + return nil, err + } + defer f.Close() + + src, err := io.ReadAll(f) + if err != nil { + return nil, fmt.Errorf("failed to read file: %w", err) + } + + return src, nil +} + +// File reads the contents of the file at the given path. +// +// The file must contain valid UTF-8 bytes, or this function will return an error. +// +// The underlying function implementation works relative to a particular base +// directory, so this wrapper takes a base directory string and uses it to +// construct the underlying function before calling it. +func File(baseDir string, path cty.Value) (cty.Value, error) { + fn := MakeFileFunc(baseDir, false) + return fn.Call([]cty.Value{path}) +} + +// FileExists determines whether a file exists at the given path. +// +// The underlying function implementation works relative to a particular base +// directory, so this wrapper takes a base directory string and uses it to +// construct the underlying function before calling it. +func FileExists(baseDir string, path cty.Value) (cty.Value, error) { + fn := MakeFileExistsFunc(baseDir) + return fn.Call([]cty.Value{path}) +} + +// FileSet enumerates a set of files given a glob pattern +// +// The underlying function implementation works relative to a particular base +// directory, so this wrapper takes a base directory string and uses it to +// construct the underlying function before calling it. +func FileSet(baseDir string, path, pattern cty.Value) (cty.Value, error) { + fn := MakeFileSetFunc(baseDir) + return fn.Call([]cty.Value{path, pattern}) +} + +// FileBase64 reads the contents of the file at the given path. +// +// The bytes from the file are encoded as base64 before returning. +// +// The underlying function implementation works relative to a particular base +// directory, so this wrapper takes a base directory string and uses it to +// construct the underlying function before calling it. +func FileBase64(baseDir string, path cty.Value) (cty.Value, error) { + fn := MakeFileFunc(baseDir, true) + return fn.Call([]cty.Value{path}) +} + +// Basename takes a string containing a filesystem path and removes all except the last portion from it. +// +// The underlying function implementation works only with the path string and does not access the filesystem itself. +// It is therefore unable to take into account filesystem features such as symlinks. +// +// If the path is empty then the result is ".", representing the current working directory. +func Basename(path cty.Value) (cty.Value, error) { + return BasenameFunc.Call([]cty.Value{path}) +} + +// Dirname takes a string containing a filesystem path and removes the last portion from it. +// +// The underlying function implementation works only with the path string and does not access the filesystem itself. +// It is therefore unable to take into account filesystem features such as symlinks. +// +// If the path is empty then the result is ".", representing the current working directory. +func Dirname(path cty.Value) (cty.Value, error) { + return DirnameFunc.Call([]cty.Value{path}) +} + +// Pathexpand takes a string that might begin with a `~` segment, and if so it replaces that segment with +// the current user's home directory path. +// +// The underlying function implementation works only with the path string and does not access the filesystem itself. +// It is therefore unable to take into account filesystem features such as symlinks. +// +// If the leading segment in the path is not `~` then the given path is returned unmodified. +func Pathexpand(path cty.Value) (cty.Value, error) { + return PathExpandFunc.Call([]cty.Value{path}) +} diff --git a/pkg/lang/funcs/filesystem_test.go b/pkg/lang/funcs/filesystem_test.go new file mode 100644 index 00000000000..b24fd2a07e0 --- /dev/null +++ b/pkg/lang/funcs/filesystem_test.go @@ -0,0 +1,762 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package funcs + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "testing" + + homedir "github.com/mitchellh/go-homedir" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/function/stdlib" + + "github.com/kubegems/opentofu/pkg/lang/marks" +) + +func TestFile(t *testing.T) { + tests := []struct { + Path cty.Value + Want cty.Value + Err string + }{ + { + cty.StringVal("testdata/hello.txt"), + cty.StringVal("Hello World"), + ``, + }, + { + cty.StringVal("testdata/icon.png"), + cty.NilVal, + `contents of "testdata/icon.png" are not valid UTF-8; use the filebase64 function to obtain the Base64 encoded contents or the other file functions (e.g. filemd5, filesha256) to obtain file hashing results instead`, + }, + { + cty.StringVal("testdata/icon.png").Mark(marks.Sensitive), + cty.NilVal, + `contents of (sensitive value) are not valid UTF-8; use the filebase64 function to obtain the Base64 encoded contents or the other file functions (e.g. filemd5, filesha256) to obtain file hashing results instead`, + }, + { + cty.StringVal("testdata/missing"), + cty.NilVal, + `no file exists at "testdata/missing"; this function works only with files that are distributed as part of the configuration source code, so if this file will be created by a resource in this configuration you must instead obtain this result from an attribute of that resource`, + }, + { + cty.StringVal("testdata/missing").Mark(marks.Sensitive), + cty.NilVal, + `no file exists at (sensitive value); this function works only with files that are distributed as part of the configuration source code, so if this file will be created by a resource in this configuration you must instead obtain this result from an attribute of that resource`, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("File(\".\", %#v)", test.Path), func(t *testing.T) { + got, err := File(".", test.Path) + + if test.Err != "" { + if err == nil { + t.Fatal("succeeded; want error") + } + if got, want := err.Error(), test.Err; got != want { + t.Errorf("wrong error\ngot: %s\nwant: %s", got, want) + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestTemplateFile(t *testing.T) { + tests := []struct { + Path cty.Value + Vars cty.Value + Want cty.Value + Err string + }{ + { + cty.StringVal("testdata/hello.txt"), + cty.EmptyObjectVal, + cty.StringVal("Hello World"), + ``, + }, + { + cty.StringVal("testdata/icon.png"), + cty.EmptyObjectVal, + cty.NilVal, + `contents of "testdata/icon.png" are not valid UTF-8; use the filebase64 function to obtain the Base64 encoded contents or the other file functions (e.g. filemd5, filesha256) to obtain file hashing results instead`, + }, + { + cty.StringVal("testdata/missing"), + cty.EmptyObjectVal, + cty.NilVal, + `no file exists at "testdata/missing"; this function works only with files that are distributed as part of the configuration source code, so if this file will be created by a resource in this configuration you must instead obtain this result from an attribute of that resource`, + }, + { + cty.StringVal("testdata/secrets.txt").Mark(marks.Sensitive), + cty.EmptyObjectVal, + cty.NilVal, + `no file exists at (sensitive value); this function works only with files that are distributed as part of the configuration source code, so if this file will be created by a resource in this configuration you must instead obtain this result from an attribute of that resource`, + }, + { + cty.StringVal("testdata/hello.tmpl"), + cty.MapVal(map[string]cty.Value{ + "name": cty.StringVal("Jodie"), + }), + cty.StringVal("Hello, Jodie!"), + ``, + }, + { + cty.StringVal("testdata/hello.tmpl"), + cty.MapVal(map[string]cty.Value{ + "name!": cty.StringVal("Jodie"), + }), + cty.NilVal, + `invalid template variable name "name!": must start with a letter, followed by zero or more letters, digits, and underscores`, + }, + { + cty.StringVal("testdata/hello.tmpl"), + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("Jimbo"), + }), + cty.StringVal("Hello, Jimbo!"), + ``, + }, + { + cty.StringVal("testdata/hello.tmpl"), + cty.EmptyObjectVal, + cty.NilVal, + `vars map does not contain key "name", referenced at testdata/hello.tmpl:1,10-14`, + }, + { + cty.StringVal("testdata/func.tmpl"), + cty.ObjectVal(map[string]cty.Value{ + "list": cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + cty.StringVal("c"), + }), + }), + cty.StringVal("The items are a, b, c"), + ``, + }, + { + cty.StringVal("testdata/recursive.tmpl"), + cty.MapValEmpty(cty.String), + cty.NilVal, + `maximum recursion depth 1024 reached in testdata/recursive.tmpl:1,3-16, testdata/recursive.tmpl:1,3-16 ... `, + }, + { + cty.StringVal("testdata/list.tmpl"), + cty.ObjectVal(map[string]cty.Value{ + "list": cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + cty.StringVal("c"), + }), + }), + cty.StringVal("- a\n- b\n- c\n"), + ``, + }, + { + cty.StringVal("testdata/list.tmpl"), + cty.ObjectVal(map[string]cty.Value{ + "list": cty.True, + }), + cty.NilVal, + `testdata/list.tmpl:1,13-17: Iteration over non-iterable value; A value of type bool cannot be used as the collection in a 'for' expression.`, + }, + { + cty.StringVal("testdata/bare.tmpl"), + cty.ObjectVal(map[string]cty.Value{ + "val": cty.True, + }), + cty.True, // since this template contains only an interpolation, its true value shines through + ``, + }, + { + // write to a sensitive file path that exists + cty.StringVal("testdata/hello.txt").Mark(marks.Sensitive), + cty.EmptyObjectVal, + cty.StringVal("Hello World").Mark(marks.Sensitive), + ``, + }, + { + cty.StringVal("testdata/bare.tmpl"), + cty.ObjectVal(map[string]cty.Value{ + "val": cty.True.Mark(marks.Sensitive), + }), + cty.True.Mark(marks.Sensitive), + ``, + }, + } + + templateFileFn := MakeTemplateFileFunc(".", func() map[string]function.Function { + return map[string]function.Function{ + "join": stdlib.JoinFunc, + "templatefile": MakeFileFunc(".", false), // just a placeholder, since templatefile itself overrides this + } + }) + + for _, test := range tests { + t.Run(fmt.Sprintf("TemplateFile(%#v, %#v)", test.Path, test.Vars), func(t *testing.T) { + got, err := templateFileFn.Call([]cty.Value{test.Path, test.Vars}) + + var argErr function.ArgError + if errors.As(err, &argErr) { + if argErr.Index < 0 || argErr.Index > 1 { + t.Errorf("ArgError index %d is out of range for templatefile (must be 0 or 1)", argErr.Index) + } + } + + if test.Err != "" { + if err == nil { + t.Fatal("succeeded; want error") + } + if got, want := err.Error(), test.Err; got != want { + t.Errorf("wrong error\ngot: %s\nwant: %s", got, want) + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func Test_templateMaxRecursionDepth(t *testing.T) { + tests := []struct { + Input string + Want int + Err string + }{ + { + "", + 1024, + ``, + }, { + "4096", + 4096, + ``, + }, { + "apple", + -1, + `invalid value for TF_TEMPLATE_RECURSION_DEPTH: strconv.Atoi: parsing "apple": invalid syntax`, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("templateMaxRecursion(%s)", test.Input), func(t *testing.T) { + os.Setenv("TF_TEMPLATE_RECURSION_DEPTH", test.Input) + got, err := templateMaxRecursionDepth() + if test.Err != "" { + if err == nil { + t.Fatal("succeeded; want error") + } + if got, want := err.Error(), test.Err; got != want { + t.Errorf("wrong error\ngot: %s\nwant: %s", got, want) + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if got != test.Want { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestFileExists(t *testing.T) { + tests := []struct { + Path cty.Value + Want cty.Value + Err string + }{ + { + cty.StringVal("testdata/hello.txt"), + cty.BoolVal(true), + ``, + }, + { + cty.StringVal(""), + cty.BoolVal(false), + `"." is a directory, not a file`, + }, + { + cty.StringVal("testdata").Mark(marks.Sensitive), + cty.BoolVal(false), + `(sensitive value) is a directory, not a file`, + }, + { + cty.StringVal("testdata/missing"), + cty.BoolVal(false), + ``, + }, + { + cty.StringVal("testdata/unreadable/foobar"), + cty.BoolVal(false), + `failed to stat "testdata/unreadable/foobar"`, + }, + { + cty.StringVal("testdata/unreadable/foobar").Mark(marks.Sensitive), + cty.BoolVal(false), + `failed to stat (sensitive value)`, + }, + } + + // Ensure "unreadable" directory cannot be listed during the test run + fi, err := os.Lstat("testdata/unreadable") + if err != nil { + t.Fatal(err) + } + os.Chmod("testdata/unreadable", 0000) + defer func(mode os.FileMode) { + os.Chmod("testdata/unreadable", mode) + }(fi.Mode()) + + for _, test := range tests { + t.Run(fmt.Sprintf("FileExists(\".\", %#v)", test.Path), func(t *testing.T) { + got, err := FileExists(".", test.Path) + + if test.Err != "" { + if err == nil { + t.Fatal("succeeded; want error") + } + if got, want := err.Error(), test.Err; got != want { + t.Errorf("wrong error\ngot: %s\nwant: %s", got, want) + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestFileSet(t *testing.T) { + tests := []struct { + Path cty.Value + Pattern cty.Value + Want cty.Value + Err string + }{ + { + cty.StringVal("."), + cty.StringVal("testdata*"), + cty.SetValEmpty(cty.String), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("testdata"), + cty.SetValEmpty(cty.String), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("{testdata,missing}"), + cty.SetValEmpty(cty.String), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("testdata/missing"), + cty.SetValEmpty(cty.String), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("testdata/missing*"), + cty.SetValEmpty(cty.String), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("*/missing"), + cty.SetValEmpty(cty.String), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("**/missing"), + cty.SetValEmpty(cty.String), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("testdata/*.txt"), + cty.SetVal([]cty.Value{ + cty.StringVal("testdata/hello.txt"), + }), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("testdata/hello.txt"), + cty.SetVal([]cty.Value{ + cty.StringVal("testdata/hello.txt"), + }), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("testdata/hello.???"), + cty.SetVal([]cty.Value{ + cty.StringVal("testdata/hello.txt"), + }), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("testdata/hello*"), + cty.SetVal([]cty.Value{ + cty.StringVal("testdata/hello.tmpl"), + cty.StringVal("testdata/hello.txt"), + }), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("testdata/hello.{tmpl,txt}"), + cty.SetVal([]cty.Value{ + cty.StringVal("testdata/hello.tmpl"), + cty.StringVal("testdata/hello.txt"), + }), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("*/hello.txt"), + cty.SetVal([]cty.Value{ + cty.StringVal("testdata/hello.txt"), + }), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("*/*.txt"), + cty.SetVal([]cty.Value{ + cty.StringVal("testdata/hello.txt"), + }), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("*/hello*"), + cty.SetVal([]cty.Value{ + cty.StringVal("testdata/hello.tmpl"), + cty.StringVal("testdata/hello.txt"), + }), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("**/hello*"), + cty.SetVal([]cty.Value{ + cty.StringVal("testdata/hello.tmpl"), + cty.StringVal("testdata/hello.txt"), + }), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("**/hello.{tmpl,txt}"), + cty.SetVal([]cty.Value{ + cty.StringVal("testdata/hello.tmpl"), + cty.StringVal("testdata/hello.txt"), + }), + ``, + }, + { + cty.StringVal("."), + cty.StringVal("["), + cty.SetValEmpty(cty.String), + `failed to glob pattern "[": syntax error in pattern`, + }, + { + cty.StringVal("."), + cty.StringVal("[").Mark(marks.Sensitive), + cty.SetValEmpty(cty.String), + `failed to glob pattern (sensitive value): syntax error in pattern`, + }, + { + cty.StringVal("."), + cty.StringVal("\\"), + cty.SetValEmpty(cty.String), + `failed to glob pattern "\\": syntax error in pattern`, + }, + { + cty.StringVal("testdata"), + cty.StringVal("missing"), + cty.SetValEmpty(cty.String), + ``, + }, + { + cty.StringVal("testdata"), + cty.StringVal("missing*"), + cty.SetValEmpty(cty.String), + ``, + }, + { + cty.StringVal("testdata"), + cty.StringVal("*.txt"), + cty.SetVal([]cty.Value{ + cty.StringVal("hello.txt"), + }), + ``, + }, + { + cty.StringVal("testdata"), + cty.StringVal("hello.txt"), + cty.SetVal([]cty.Value{ + cty.StringVal("hello.txt"), + }), + ``, + }, + { + cty.StringVal("testdata"), + cty.StringVal("hello.???"), + cty.SetVal([]cty.Value{ + cty.StringVal("hello.txt"), + }), + ``, + }, + { + cty.StringVal("testdata"), + cty.StringVal("hello*"), + cty.SetVal([]cty.Value{ + cty.StringVal("hello.tmpl"), + cty.StringVal("hello.txt"), + }), + ``, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("FileSet(\".\", %#v, %#v)", test.Path, test.Pattern), func(t *testing.T) { + got, err := FileSet(".", test.Path, test.Pattern) + + if test.Err != "" { + if err == nil { + t.Fatal("succeeded; want error") + } + if got, want := err.Error(), test.Err; got != want { + t.Errorf("wrong error\ngot: %s\nwant: %s", got, want) + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestFileBase64(t *testing.T) { + tests := []struct { + Path cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("testdata/hello.txt"), + cty.StringVal("SGVsbG8gV29ybGQ="), + false, + }, + { + cty.StringVal("testdata/icon.png"), + cty.StringVal("iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAINSURBVHgBjZNdSBRRFMf/M7tttl9zsdBld7EbZdi6xRZR0UPUU0UvPaeLC71JHxCFWg+2RAkhNRVaSLELGRRIQYX41gci9Bz2UMSOkWgY7szO6vqxznVmxGFdd1f/cGDmnnN+5z/DuUAFERohetBKNbZyjQ5ndduxkPpay2vtC7ZabNseGJuTJ2VsJG8wfDV0sD7dc4ewia8ww3jef6g+5Qk0xorrOWtqMHzS48onoqenacvZ//C6tHXwJwM1+DAsSNI/R1wdH01an+D1h2/7dywkBrt/kRORLLY6WEl3R0MzOLxvlgyOkPNcVQ03r0595ldSjEbPLeKKuAtvvxCUk5G72deI5gstYOB2Gmf8arLpzDz6buWg5Kpx6vJejHx3Wz/s2w8XLokHoDjvYujjJ7RejFpQe+GEOp+GjtisDuPRlfSR98M5jE9twZ5wE14k2iB4PWadoqilAUqWh+DWTNDT9ixeDVXhz+INdFxrRTnxhS+9A3rDJG+CLFdBPyppDcCwL7iZCSqEbALASV1Jpz7dZgJWQFrJBiWjovf5S32B2NiahLFlkSMNqQdxmmY/fcyI/seU9b95xwzJSobd6+5hdaHjKbe+dKt9XPEEA7Q7sNR5vTlHzXTtQ/P8vvhM/i39jc9MjIqF9Vwpm8TXQPO8Pabb7CREkNNy5pHdkRVlSdr4MhWDCKWkUs0yuFTBNunfXEAAAAAASUVORK5CYII="), + false, + }, + { + cty.StringVal("testdata/missing"), + cty.NilVal, + true, // no file exists + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("FileBase64(\".\", %#v)", test.Path), func(t *testing.T) { + got, err := FileBase64(".", test.Path) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestBasename(t *testing.T) { + tests := []struct { + Path cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("testdata/hello.txt"), + cty.StringVal("hello.txt"), + false, + }, + { + cty.StringVal("hello.txt"), + cty.StringVal("hello.txt"), + false, + }, + { + cty.StringVal(""), + cty.StringVal("."), + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("Basename(%#v)", test.Path), func(t *testing.T) { + got, err := Basename(test.Path) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestDirname(t *testing.T) { + tests := []struct { + Path cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("testdata/hello.txt"), + cty.StringVal("testdata"), + false, + }, + { + cty.StringVal("testdata/foo/hello.txt"), + cty.StringVal("testdata/foo"), + false, + }, + { + cty.StringVal("hello.txt"), + cty.StringVal("."), + false, + }, + { + cty.StringVal(""), + cty.StringVal("."), + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("Dirname(%#v)", test.Path), func(t *testing.T) { + got, err := Dirname(test.Path) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestPathExpand(t *testing.T) { + homePath, err := homedir.Dir() + if err != nil { + t.Fatalf("Error getting home directory: %v", err) + } + + tests := []struct { + Path cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("~/test-file"), + cty.StringVal(filepath.Join(homePath, "test-file")), + false, + }, + { + cty.StringVal("~/another/test/file"), + cty.StringVal(filepath.Join(homePath, "another/test/file")), + false, + }, + { + cty.StringVal("/root/file"), + cty.StringVal("/root/file"), + false, + }, + { + cty.StringVal("/"), + cty.StringVal("/"), + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("Dirname(%#v)", test.Path), func(t *testing.T) { + got, err := Pathexpand(test.Path) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} diff --git a/pkg/lang/funcs/number.go b/pkg/lang/funcs/number.go new file mode 100644 index 00000000000..633ef21ebca --- /dev/null +++ b/pkg/lang/funcs/number.go @@ -0,0 +1,182 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package funcs + +import ( + "math" + "math/big" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/gocty" +) + +// LogFunc contructs a function that returns the logarithm of a given number in a given base. +var LogFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "num", + Type: cty.Number, + }, + { + Name: "base", + Type: cty.Number, + }, + }, + Type: function.StaticReturnType(cty.Number), + RefineResult: refineNotNull, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var num float64 + if err := gocty.FromCtyValue(args[0], &num); err != nil { + return cty.UnknownVal(cty.String), err + } + + var base float64 + if err := gocty.FromCtyValue(args[1], &base); err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.NumberFloatVal(math.Log(num) / math.Log(base)), nil + }, +}) + +// PowFunc contructs a function that returns the logarithm of a given number in a given base. +var PowFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "num", + Type: cty.Number, + }, + { + Name: "power", + Type: cty.Number, + }, + }, + Type: function.StaticReturnType(cty.Number), + RefineResult: refineNotNull, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var num float64 + if err := gocty.FromCtyValue(args[0], &num); err != nil { + return cty.UnknownVal(cty.String), err + } + + var power float64 + if err := gocty.FromCtyValue(args[1], &power); err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.NumberFloatVal(math.Pow(num, power)), nil + }, +}) + +// SignumFunc contructs a function that returns the closest whole number greater +// than or equal to the given value. +var SignumFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "num", + Type: cty.Number, + }, + }, + Type: function.StaticReturnType(cty.Number), + RefineResult: refineNotNull, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var num int + if err := gocty.FromCtyValue(args[0], &num); err != nil { + return cty.UnknownVal(cty.String), err + } + switch { + case num < 0: + return cty.NumberIntVal(-1), nil + case num > 0: + return cty.NumberIntVal(+1), nil + default: + return cty.NumberIntVal(0), nil + } + }, +}) + +// ParseIntFunc contructs a function that parses a string argument and returns an integer of the specified base. +var ParseIntFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "number", + Type: cty.DynamicPseudoType, + AllowMarked: true, + }, + { + Name: "base", + Type: cty.Number, + AllowMarked: true, + }, + }, + + Type: func(args []cty.Value) (cty.Type, error) { + if !args[0].Type().Equals(cty.String) { + return cty.Number, function.NewArgErrorf(0, "first argument must be a string, not %s", args[0].Type().FriendlyName()) + } + return cty.Number, nil + }, + RefineResult: refineNotNull, + + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + var numstr string + var base int + var err error + + numArg, numMarks := args[0].Unmark() + if err = gocty.FromCtyValue(numArg, &numstr); err != nil { + return cty.UnknownVal(cty.String), function.NewArgError(0, err) + } + + baseArg, baseMarks := args[1].Unmark() + if err = gocty.FromCtyValue(baseArg, &base); err != nil { + return cty.UnknownVal(cty.Number), function.NewArgError(1, err) + } + + if base < 2 || base > 62 { + return cty.UnknownVal(cty.Number), function.NewArgErrorf( + 1, + "base must be a whole number between 2 and 62 inclusive", + ) + } + + num, ok := (&big.Int{}).SetString(numstr, base) + if !ok { + return cty.UnknownVal(cty.Number), function.NewArgErrorf( + 0, + "cannot parse %s as a base %s integer", + redactIfSensitive(numstr, numMarks), + redactIfSensitive(base, baseMarks), + ) + } + + parsedNum := cty.NumberVal((&big.Float{}).SetInt(num)).WithMarks(numMarks, baseMarks) + + return parsedNum, nil + }, +}) + +// Log returns returns the logarithm of a given number in a given base. +func Log(num, base cty.Value) (cty.Value, error) { + return LogFunc.Call([]cty.Value{num, base}) +} + +// Pow returns the logarithm of a given number in a given base. +func Pow(num, power cty.Value) (cty.Value, error) { + return PowFunc.Call([]cty.Value{num, power}) +} + +// Signum determines the sign of a number, returning a number between -1 and +// 1 to represent the sign. +func Signum(num cty.Value) (cty.Value, error) { + return SignumFunc.Call([]cty.Value{num}) +} + +// ParseInt parses a string argument and returns an integer of the specified base. +func ParseInt(num cty.Value, base cty.Value) (cty.Value, error) { + return ParseIntFunc.Call([]cty.Value{num, base}) +} diff --git a/pkg/lang/funcs/number_test.go b/pkg/lang/funcs/number_test.go new file mode 100644 index 00000000000..bba23501621 --- /dev/null +++ b/pkg/lang/funcs/number_test.go @@ -0,0 +1,389 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package funcs + +import ( + "fmt" + "testing" + + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/zclconf/go-cty/cty" +) + +func TestLog(t *testing.T) { + tests := []struct { + Num cty.Value + Base cty.Value + Want cty.Value + Err bool + }{ + { + cty.NumberFloatVal(1), + cty.NumberFloatVal(10), + cty.NumberFloatVal(0), + false, + }, + { + cty.NumberFloatVal(10), + cty.NumberFloatVal(10), + cty.NumberFloatVal(1), + false, + }, + + { + cty.NumberFloatVal(0), + cty.NumberFloatVal(10), + cty.NegativeInfinity, + false, + }, + { + cty.NumberFloatVal(10), + cty.NumberFloatVal(0), + cty.NumberFloatVal(-0), + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("log(%#v, %#v)", test.Num, test.Base), func(t *testing.T) { + got, err := Log(test.Num, test.Base) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestPow(t *testing.T) { + tests := []struct { + Num cty.Value + Power cty.Value + Want cty.Value + Err bool + }{ + { + cty.NumberFloatVal(1), + cty.NumberFloatVal(0), + cty.NumberFloatVal(1), + false, + }, + { + cty.NumberFloatVal(1), + cty.NumberFloatVal(1), + cty.NumberFloatVal(1), + false, + }, + + { + cty.NumberFloatVal(2), + cty.NumberFloatVal(0), + cty.NumberFloatVal(1), + false, + }, + { + cty.NumberFloatVal(2), + cty.NumberFloatVal(1), + cty.NumberFloatVal(2), + false, + }, + { + cty.NumberFloatVal(3), + cty.NumberFloatVal(2), + cty.NumberFloatVal(9), + false, + }, + { + cty.NumberFloatVal(-3), + cty.NumberFloatVal(2), + cty.NumberFloatVal(9), + false, + }, + { + cty.NumberFloatVal(2), + cty.NumberFloatVal(-2), + cty.NumberFloatVal(0.25), + false, + }, + { + cty.NumberFloatVal(0), + cty.NumberFloatVal(2), + cty.NumberFloatVal(0), + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("pow(%#v, %#v)", test.Num, test.Power), func(t *testing.T) { + got, err := Pow(test.Num, test.Power) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestSignum(t *testing.T) { + tests := []struct { + Num cty.Value + Want cty.Value + Err bool + }{ + { + cty.NumberFloatVal(0), + cty.NumberFloatVal(0), + false, + }, + { + cty.NumberFloatVal(12), + cty.NumberFloatVal(1), + false, + }, + { + cty.NumberFloatVal(-29), + cty.NumberFloatVal(-1), + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("signum(%#v)", test.Num), func(t *testing.T) { + got, err := Signum(test.Num) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestParseInt(t *testing.T) { + tests := []struct { + Num cty.Value + Base cty.Value + Want cty.Value + Err string + }{ + { + cty.StringVal("128"), + cty.NumberIntVal(10), + cty.NumberIntVal(128), + ``, + }, + { + cty.StringVal("128").Mark(marks.Sensitive), + cty.NumberIntVal(10), + cty.NumberIntVal(128).Mark(marks.Sensitive), + ``, + }, + { + cty.StringVal("128"), + cty.NumberIntVal(10).Mark(marks.Sensitive), + cty.NumberIntVal(128).Mark(marks.Sensitive), + ``, + }, + { + cty.StringVal("128").Mark(marks.Sensitive), + cty.NumberIntVal(10).Mark(marks.Sensitive), + cty.NumberIntVal(128).Mark(marks.Sensitive), + ``, + }, + { + cty.StringVal("128").Mark("boop"), + cty.NumberIntVal(10).Mark(marks.Sensitive), + cty.NumberIntVal(128).WithMarks(cty.NewValueMarks("boop", marks.Sensitive)), + ``, + }, + { + cty.StringVal("-128"), + cty.NumberIntVal(10), + cty.NumberIntVal(-128), + ``, + }, + { + cty.StringVal("00128"), + cty.NumberIntVal(10), + cty.NumberIntVal(128), + ``, + }, + { + cty.StringVal("-00128"), + cty.NumberIntVal(10), + cty.NumberIntVal(-128), + ``, + }, + { + cty.StringVal("FF00"), + cty.NumberIntVal(16), + cty.NumberIntVal(65280), + ``, + }, + { + cty.StringVal("ff00"), + cty.NumberIntVal(16), + cty.NumberIntVal(65280), + ``, + }, + { + cty.StringVal("-FF00"), + cty.NumberIntVal(16), + cty.NumberIntVal(-65280), + ``, + }, + { + cty.StringVal("00FF00"), + cty.NumberIntVal(16), + cty.NumberIntVal(65280), + ``, + }, + { + cty.StringVal("-00FF00"), + cty.NumberIntVal(16), + cty.NumberIntVal(-65280), + ``, + }, + { + cty.StringVal("1011111011101111"), + cty.NumberIntVal(2), + cty.NumberIntVal(48879), + ``, + }, + { + cty.StringVal("aA"), + cty.NumberIntVal(62), + cty.NumberIntVal(656), + ``, + }, + { + cty.StringVal("Aa"), + cty.NumberIntVal(62), + cty.NumberIntVal(2242), + ``, + }, + { + cty.StringVal("999999999999999999999999999999999999999999999999999999999999"), + cty.NumberIntVal(10), + cty.MustParseNumberVal("999999999999999999999999999999999999999999999999999999999999"), + ``, + }, + { + cty.StringVal("FF"), + cty.NumberIntVal(10), + cty.UnknownVal(cty.Number), + `cannot parse "FF" as a base 10 integer`, + }, + { + cty.StringVal("FF").Mark(marks.Sensitive), + cty.NumberIntVal(10), + cty.UnknownVal(cty.Number), + `cannot parse (sensitive value) as a base 10 integer`, + }, + { + cty.StringVal("FF").Mark(marks.Sensitive), + cty.NumberIntVal(10).Mark(marks.Sensitive), + cty.UnknownVal(cty.Number), + `cannot parse (sensitive value) as a base (sensitive value) integer`, + }, + { + cty.StringVal("00FF"), + cty.NumberIntVal(10), + cty.UnknownVal(cty.Number), + `cannot parse "00FF" as a base 10 integer`, + }, + { + cty.StringVal("-00FF"), + cty.NumberIntVal(10), + cty.UnknownVal(cty.Number), + `cannot parse "-00FF" as a base 10 integer`, + }, + { + cty.NumberIntVal(2), + cty.NumberIntVal(10), + cty.UnknownVal(cty.Number), + `first argument must be a string, not number`, + }, + { + cty.StringVal("1"), + cty.NumberIntVal(63), + cty.UnknownVal(cty.Number), + `base must be a whole number between 2 and 62 inclusive`, + }, + { + cty.StringVal("1"), + cty.NumberIntVal(-1), + cty.UnknownVal(cty.Number), + `base must be a whole number between 2 and 62 inclusive`, + }, + { + cty.StringVal("1"), + cty.NumberIntVal(1), + cty.UnknownVal(cty.Number), + `base must be a whole number between 2 and 62 inclusive`, + }, + { + cty.StringVal("1"), + cty.NumberIntVal(0), + cty.UnknownVal(cty.Number), + `base must be a whole number between 2 and 62 inclusive`, + }, + { + cty.StringVal("1.2"), + cty.NumberIntVal(10), + cty.UnknownVal(cty.Number), + `cannot parse "1.2" as a base 10 integer`, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("parseint(%#v, %#v)", test.Num, test.Base), func(t *testing.T) { + got, err := ParseInt(test.Num, test.Base) + + if test.Err != "" { + if err == nil { + t.Fatal("succeeded; want error") + } + if got, want := err.Error(), test.Err; got != want { + t.Errorf("wrong error\ngot: %s\nwant: %s", got, want) + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} diff --git a/pkg/lang/funcs/redact.go b/pkg/lang/funcs/redact.go new file mode 100644 index 00000000000..f383c1ff53c --- /dev/null +++ b/pkg/lang/funcs/redact.go @@ -0,0 +1,25 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package funcs + +import ( + "fmt" + + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/zclconf/go-cty/cty" +) + +func redactIfSensitive(value interface{}, markses ...cty.ValueMarks) string { + if marks.Has(cty.DynamicVal.WithMarks(markses...), marks.Sensitive) { + return "(sensitive value)" + } + switch v := value.(type) { + case string: + return fmt.Sprintf("%q", v) + default: + return fmt.Sprintf("%v", v) + } +} diff --git a/pkg/lang/funcs/redact_test.go b/pkg/lang/funcs/redact_test.go new file mode 100644 index 00000000000..3685aa106e4 --- /dev/null +++ b/pkg/lang/funcs/redact_test.go @@ -0,0 +1,56 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package funcs + +import ( + "testing" + + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/zclconf/go-cty/cty" +) + +func TestRedactIfSensitive(t *testing.T) { + testCases := map[string]struct { + value interface{} + marks []cty.ValueMarks + want string + }{ + "sensitive string": { + value: "foo", + marks: []cty.ValueMarks{cty.NewValueMarks(marks.Sensitive)}, + want: "(sensitive value)", + }, + "marked non-sensitive string": { + value: "foo", + marks: []cty.ValueMarks{cty.NewValueMarks("boop")}, + want: `"foo"`, + }, + "sensitive string with other marks": { + value: "foo", + marks: []cty.ValueMarks{cty.NewValueMarks("boop"), cty.NewValueMarks(marks.Sensitive)}, + want: "(sensitive value)", + }, + "sensitive number": { + value: 12345, + marks: []cty.ValueMarks{cty.NewValueMarks(marks.Sensitive)}, + want: "(sensitive value)", + }, + "non-sensitive number": { + value: 12345, + marks: []cty.ValueMarks{}, + want: "12345", + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got := redactIfSensitive(tc.value, tc.marks...) + if got != tc.want { + t.Errorf("wrong result, got %v, want %v", got, tc.want) + } + }) + } +} diff --git a/pkg/lang/funcs/refinements.go b/pkg/lang/funcs/refinements.go new file mode 100644 index 00000000000..75005d0ac3a --- /dev/null +++ b/pkg/lang/funcs/refinements.go @@ -0,0 +1,14 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package funcs + +import ( + "github.com/zclconf/go-cty/cty" +) + +func refineNotNull(b *cty.RefinementBuilder) *cty.RefinementBuilder { + return b.NotNull() +} diff --git a/pkg/lang/funcs/render_template.go b/pkg/lang/funcs/render_template.go new file mode 100644 index 00000000000..d1ca93f9cbb --- /dev/null +++ b/pkg/lang/funcs/render_template.go @@ -0,0 +1,75 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package funcs + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +func renderTemplate(expr hcl.Expression, varsVal cty.Value, funcs map[string]function.Function) (cty.Value, error) { + if varsTy := varsVal.Type(); !(varsTy.IsMapType() || varsTy.IsObjectType()) { + return cty.DynamicVal, function.NewArgErrorf(1, "invalid vars value: must be a map") // or an object, but we don't strongly distinguish these most of the time + } + + ctx := &hcl.EvalContext{ + Variables: varsVal.AsValueMap(), + Functions: funcs, + } + + // We require all of the variables to be valid HCL identifiers, because + // otherwise there would be no way to refer to them in the template + // anyway. Rejecting this here gives better feedback to the user + // than a syntax error somewhere in the template itself. + for n := range ctx.Variables { + if !hclsyntax.ValidIdentifier(n) { + // This error message intentionally doesn't describe _all_ of + // the different permutations that are technically valid as an + // HCL identifier, but rather focuses on what we might + // consider to be an "idiomatic" variable name. + return cty.DynamicVal, function.NewArgErrorf(1, "invalid template variable name %q: must start with a letter, followed by zero or more letters, digits, and underscores", n) + } + } + + // currFilename stores the filename of the template file, if any. + currFilename := expr.Range().Filename + + // We'll pre-check references in the template here so we can give a + // more specialized error message than HCL would by default, so it's + // clearer that this problem is coming from a templatefile/templatestring call. + for _, traversal := range expr.Variables() { + root := traversal.RootName() + referencedPos := fmt.Sprintf("%q", root) + if currFilename != templateStringFilename { + referencedPos = fmt.Sprintf("%q, referenced at %s", root, traversal[0].SourceRange()) + } + if _, ok := ctx.Variables[root]; !ok { + return cty.DynamicVal, function.NewArgErrorf(1, "vars map does not contain key %s", referencedPos) + } + } + + val, diags := expr.Value(ctx) + if diags.HasErrors() { + for _, diag := range diags { + // Roll up recursive errors + if extra, ok := diag.Extra.(hclsyntax.FunctionCallDiagExtra); ok { + if extra.CalledFunctionName() == "templatefile" { + err := extra.FunctionCallError() + if err, ok := err.(ErrorTemplateRecursionLimit); ok { + return cty.DynamicVal, ErrorTemplateRecursionLimit{sources: append(err.sources, diag.Subject.String())} + } + } + } + } + return cty.DynamicVal, diags + } + + return val, nil +} diff --git a/pkg/lang/funcs/render_template_test.go b/pkg/lang/funcs/render_template_test.go new file mode 100644 index 00000000000..6a3c860d7bb --- /dev/null +++ b/pkg/lang/funcs/render_template_test.go @@ -0,0 +1,110 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package funcs + +import ( + "testing" + + "github.com/hashicorp/hcl/v2" + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +func TestRenderTemplate(t *testing.T) { + tests := map[string]struct { + Expr hcl.Expression + Vars cty.Value + Want cty.Value + Err string + }{ + "String interpolation with variable": { + hcl.StaticExpr(cty.StringVal("Hello, ${name}!"), hcl.Range{}), + cty.MapVal(map[string]cty.Value{ + "name": cty.StringVal("Jodie"), + }), + cty.StringVal("Hello, ${name}!"), + ``, + }, + "Looping through list": { + hcl.StaticExpr(cty.StringVal("Items: %{ for x in list ~} ${x} %{ endfor ~}"), hcl.Range{}), + cty.ObjectVal(map[string]cty.Value{ + "list": cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + cty.StringVal("c"), + }), + }), + cty.StringVal("Items: %{ for x in list ~} ${x} %{ endfor ~}"), + ``, + }, + "Looping through map": { + hcl.StaticExpr(cty.StringVal("%{ for key, value in list ~} ${key}:${value} %{ endfor ~}"), hcl.Range{}), + cty.ObjectVal(map[string]cty.Value{ + "list": cty.ObjectVal(map[string]cty.Value{ + "item1": cty.StringVal("a"), + "item2": cty.StringVal("b"), + "item3": cty.StringVal("c"), + }), + }), + cty.StringVal("%{ for key, value in list ~} ${key}:${value} %{ endfor ~}"), + ``, + }, + "Invalid template variable name": { + hcl.StaticExpr(cty.StringVal("Hello, ${1}!"), hcl.Range{}), + cty.MapVal(map[string]cty.Value{ + "1": cty.StringVal("Jodie"), + }), + cty.NilVal, + `invalid template variable name "1": must start with a letter, followed by zero or more letters, digits, and underscores`, + }, + "Interpolation of a boolean value": { + hcl.StaticExpr(cty.StringVal("${val}"), hcl.Range{}), + cty.ObjectVal(map[string]cty.Value{ + "val": cty.True, + }), + cty.StringVal("${val}"), + ``, + }, + "Sensitive string template": { + hcl.StaticExpr(cty.StringVal("My password is 1234").Mark(marks.Sensitive), hcl.Range{}), + cty.EmptyObjectVal, + cty.StringVal("My password is 1234").Mark(marks.Sensitive), + ``, + }, + "Sensitive template variable": { + hcl.StaticExpr(cty.StringVal("My password is ${pass}"), hcl.Range{}), + cty.ObjectVal(map[string]cty.Value{ + "pass": cty.StringVal("secret").Mark(marks.Sensitive), + }), + cty.StringVal("My password is ${pass}"), + ``, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + + got, err := renderTemplate(test.Expr, test.Vars, map[string]function.Function{}) + + if err != nil { + if test.Err == "" { + t.Fatalf("unexpected error: %s", err) + } else { + if got, want := err.Error(), test.Err; got != want { + t.Errorf("wrong error\ngot: %s\nwant: %s", got, want) + } + } + } else if test.Err != "" { + t.Fatal("succeeded; want error") + } else { + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + } + }) + } +} diff --git a/pkg/lang/funcs/sensitive.go b/pkg/lang/funcs/sensitive.go new file mode 100644 index 00000000000..771f3373002 --- /dev/null +++ b/pkg/lang/funcs/sensitive.go @@ -0,0 +1,93 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package funcs + +import ( + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// SensitiveFunc returns a value identical to its argument except that +// OpenTofu will consider it to be sensitive. +var SensitiveFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "value", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowNull: true, + AllowMarked: true, + AllowDynamicType: true, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + // This function only affects the value's marks, so the result + // type is always the same as the argument type. + return args[0].Type(), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + val, _ := args[0].Unmark() + return val.Mark(marks.Sensitive), nil + }, +}) + +// NonsensitiveFunc takes a sensitive value and returns the same value without +// the sensitive marking, effectively exposing the value. +var NonsensitiveFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "value", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowNull: true, + AllowMarked: true, + AllowDynamicType: true, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + // This function only affects the value's marks, so the result + // type is always the same as the argument type. + return args[0].Type(), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + v, m := args[0].Unmark() + delete(m, marks.Sensitive) // remove the sensitive marking + return v.WithMarks(m), nil + }, +}) + +// IsSensitiveFunc returns whether or not the value is sensitive. +var IsSensitiveFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "value", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowNull: true, + AllowMarked: true, + AllowDynamicType: true, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + return cty.Bool, nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return cty.BoolVal(args[0].HasMark(marks.Sensitive)), nil + }, +}) + +func Sensitive(v cty.Value) (cty.Value, error) { + return SensitiveFunc.Call([]cty.Value{v}) +} + +func Nonsensitive(v cty.Value) (cty.Value, error) { + return NonsensitiveFunc.Call([]cty.Value{v}) +} + +func IsSensitive(v cty.Value) (cty.Value, error) { + return IsSensitiveFunc.Call([]cty.Value{v}) +} diff --git a/pkg/lang/funcs/sensitive_test.go b/pkg/lang/funcs/sensitive_test.go new file mode 100644 index 00000000000..ad94d66cd1b --- /dev/null +++ b/pkg/lang/funcs/sensitive_test.go @@ -0,0 +1,238 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package funcs + +import ( + "fmt" + "testing" + + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/zclconf/go-cty/cty" +) + +func TestSensitive(t *testing.T) { + tests := []struct { + Input cty.Value + WantErr string + }{ + { + cty.NumberIntVal(1), + ``, + }, + { + // Unknown values stay unknown while becoming sensitive + cty.UnknownVal(cty.String), + ``, + }, + { + // Null values stay unknown while becoming sensitive + cty.NullVal(cty.String), + ``, + }, + { + // DynamicVal can be marked as sensitive + cty.DynamicVal, + ``, + }, + { + // The marking is shallow only + cty.ListVal([]cty.Value{cty.NumberIntVal(1)}), + ``, + }, + { + // A value already marked is allowed and stays marked + cty.NumberIntVal(1).Mark(marks.Sensitive), + ``, + }, + { + // A value with some non-standard mark gets "fixed" to be marked + // with the standard "sensitive" mark. (This situation occurring + // would imply an inconsistency/bug elsewhere, so we're just + // being robust about it here.) + cty.NumberIntVal(1).Mark("bloop"), + ``, + }, + { + // A value deep already marked is allowed and stays marked, + // _and_ we'll also mark the outer collection as sensitive. + cty.ListVal([]cty.Value{cty.NumberIntVal(1).Mark(marks.Sensitive)}), + ``, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("sensitive(%#v)", test.Input), func(t *testing.T) { + got, err := Sensitive(test.Input) + + if test.WantErr != "" { + if err == nil { + t.Fatal("succeeded; want error") + } + if got, want := err.Error(), test.WantErr; got != want { + t.Fatalf("wrong error\ngot: %s\nwant: %s", got, want) + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.HasMark(marks.Sensitive) { + t.Errorf("result is not marked sensitive") + } + + gotRaw, gotMarks := got.Unmark() + if len(gotMarks) != 1 { + // We're only expecting to have the "sensitive" mark we checked + // above. Any others are an error, even if they happen to + // appear alongside "sensitive". (We might change this rule + // if someday we decide to use marks for some additional + // unrelated thing in OpenTofu, but currently we assume that + // _all_ marks imply sensitive, and so returning any other + // marks would be confusing.) + t.Errorf("extraneous marks %#v", gotMarks) + } + + // Disregarding shallow marks, the result should have the same + // effective value as the input. + wantRaw, _ := test.Input.Unmark() + if !gotRaw.RawEquals(wantRaw) { + t.Errorf("wrong unmarked result\ngot: %#v\nwant: %#v", got, wantRaw) + } + }) + } +} + +func TestNonsensitive(t *testing.T) { + tests := []struct { + Input cty.Value + WantErr string + }{ + { + cty.NumberIntVal(1).Mark(marks.Sensitive), + ``, + }, + { + cty.DynamicVal.Mark(marks.Sensitive), + ``, + }, + { + cty.UnknownVal(cty.String).Mark(marks.Sensitive), + ``, + }, + { + cty.NullVal(cty.EmptyObject).Mark(marks.Sensitive), + ``, + }, + { + // The inner sensitive remains afterwards + cty.ListVal([]cty.Value{cty.NumberIntVal(1).Mark(marks.Sensitive)}).Mark(marks.Sensitive), + ``, + }, + + // Passing a value that is already non-sensitive is not an error, + // as this function may be used with specific to ensure that all + // values are indeed non-sensitive + { + cty.NumberIntVal(1), + ``, + }, + { + cty.NullVal(cty.String), + ``, + }, + + // Unknown values may become sensitive once they are known, so we + // permit them to be marked nonsensitive. + { + cty.DynamicVal, + ``, + }, + { + cty.UnknownVal(cty.String), + ``, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("nonsensitive(%#v)", test.Input), func(t *testing.T) { + got, err := Nonsensitive(test.Input) + + if test.WantErr != "" { + if err == nil { + t.Fatal("succeeded; want error") + } + if got, want := err.Error(), test.WantErr; got != want { + t.Fatalf("wrong error\ngot: %s\nwant: %s", got, want) + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if got.HasMark(marks.Sensitive) { + t.Errorf("result is still marked sensitive") + } + wantRaw, _ := test.Input.Unmark() + if !got.RawEquals(wantRaw) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Input) + } + }) + } +} + +func TestIsSensitive(t *testing.T) { + tests := []struct { + Input cty.Value + IsSensitive bool + }{ + { + cty.NumberIntVal(1).Mark(marks.Sensitive), + true, + }, + { + cty.NumberIntVal(1), + false, + }, + { + cty.DynamicVal.Mark(marks.Sensitive), + true, + }, + { + cty.DynamicVal, + false, + }, + { + cty.UnknownVal(cty.String).Mark(marks.Sensitive), + true, + }, + { + cty.UnknownVal(cty.String), + false, + }, + { + cty.NullVal(cty.EmptyObject).Mark(marks.Sensitive), + true, + }, + { + cty.NullVal(cty.EmptyObject), + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("issensitive(%#v)", test.Input), func(t *testing.T) { + got, err := IsSensitive(test.Input) + + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if got.Equals(cty.BoolVal(test.IsSensitive)).False() { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, cty.BoolVal(test.IsSensitive)) + } + }) + } +} diff --git a/pkg/lang/funcs/string.go b/pkg/lang/funcs/string.go new file mode 100644 index 00000000000..e3aa4fb13d8 --- /dev/null +++ b/pkg/lang/funcs/string.go @@ -0,0 +1,234 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package funcs + +import ( + "regexp" + "strings" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// StartsWithFunc constructs a function that checks if a string starts with +// a specific prefix using strings.HasPrefix +var StartsWithFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + AllowUnknown: true, + }, + { + Name: "prefix", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.Bool), + RefineResult: refineNotNull, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + prefix := args[1].AsString() + + if !args[0].IsKnown() { + // If the unknown value has a known prefix then we might be + // able to still produce a known result. + if prefix == "" { + // The empty string is a prefix of any string. + return cty.True, nil + } + if knownPrefix := args[0].Range().StringPrefix(); knownPrefix != "" { + if strings.HasPrefix(knownPrefix, prefix) { + return cty.True, nil + } + if len(knownPrefix) >= len(prefix) { + // If the prefix we're testing is no longer than the known + // prefix and it didn't match then the full string with + // that same prefix can't match either. + return cty.False, nil + } + } + return cty.UnknownVal(cty.Bool), nil + } + + str := args[0].AsString() + + if strings.HasPrefix(str, prefix) { + return cty.True, nil + } + + return cty.False, nil + }, +}) + +// EndsWithFunc constructs a function that checks if a string ends with +// a specific suffix using strings.HasSuffix +var EndsWithFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + { + Name: "suffix", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.Bool), + RefineResult: refineNotNull, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + str := args[0].AsString() + suffix := args[1].AsString() + + if strings.HasSuffix(str, suffix) { + return cty.True, nil + } + + return cty.False, nil + }, +}) + +// ReplaceFunc constructs a function that searches a given string for another +// given substring, and replaces each occurence with a given replacement string. +var ReplaceFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + { + Name: "substr", + Type: cty.String, + }, + { + Name: "replace", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + RefineResult: refineNotNull, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + str := args[0].AsString() + substr := args[1].AsString() + replace := args[2].AsString() + + // We search/replace using a regexp if the string is surrounded + // in forward slashes. + if len(substr) > 1 && substr[0] == '/' && substr[len(substr)-1] == '/' { + re, err := regexp.Compile(substr[1 : len(substr)-1]) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.StringVal(re.ReplaceAllString(str, replace)), nil + } + + return cty.StringVal(strings.Replace(str, substr, replace, -1)), nil + }, +}) + +// StrContainsFunc searches a given string for another given substring, +// if found the function returns true, otherwise returns false. +var StrContainsFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + { + Name: "substr", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + str := args[0].AsString() + substr := args[1].AsString() + + if strings.Contains(str, substr) { + return cty.True, nil + } + + return cty.False, nil + }, +}) + +// Replace searches a given string for another given substring, +// and replaces all occurences with a given replacement string. +func Replace(str, substr, replace cty.Value) (cty.Value, error) { + return ReplaceFunc.Call([]cty.Value{str, substr, replace}) +} + +func StrContains(str, substr cty.Value) (cty.Value, error) { + return StrContainsFunc.Call([]cty.Value{str, substr}) +} + +// This constant provides a placeholder value for filename indicating +// that no file is needed for templatestring. +const ( + templateStringFilename = "NoFileNeeded" +) + +// MakeTemplateStringFunc constructs a function that takes a string and +// an arbitrary object of named values and attempts to render that string +// as a template using HCL template syntax. +func MakeTemplateStringFunc(content string, funcsCb func() map[string]function.Function) function.Function { + + params := []function.Parameter{ + { + Name: "data", + Type: cty.String, + AllowMarked: true, + }, + { + Name: "vars", + Type: cty.DynamicPseudoType, + AllowMarked: true, + }, + } + loadTmpl := func(content string, marks cty.ValueMarks) (hcl.Expression, error) { + + expr, diags := hclsyntax.ParseTemplate([]byte(content), templateStringFilename, hcl.Pos{Line: 1, Column: 1}) + if diags.HasErrors() { + return nil, diags + } + + return expr, nil + } + + return function.New(&function.Spec{ + Params: params, + Type: func(args []cty.Value) (cty.Type, error) { + if !(args[0].IsKnown() && args[1].IsKnown()) { + return cty.DynamicPseudoType, nil + } + + // We'll render our template now to see what result type it produces. + // A template consisting only of a single interpolation can potentially + // return any type. + dataArg, dataMarks := args[0].Unmark() + expr, err := loadTmpl(dataArg.AsString(), dataMarks) + if err != nil { + return cty.DynamicPseudoType, err + } + + // This is safe even if args[1] contains unknowns because the HCL + // template renderer itself knows how to short-circuit those. + val, err := renderTemplate(expr, args[1], funcsCb()) + return val.Type(), err + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + dataArg, dataMarks := args[0].Unmark() + expr, err := loadTmpl(dataArg.AsString(), dataMarks) + if err != nil { + return cty.DynamicVal, err + } + result, err := renderTemplate(expr, args[1], funcsCb()) + return result.WithMarks(dataMarks), err + }, + }) +} diff --git a/pkg/lang/funcs/string_test.go b/pkg/lang/funcs/string_test.go new file mode 100644 index 00000000000..29ffa17d15e --- /dev/null +++ b/pkg/lang/funcs/string_test.go @@ -0,0 +1,375 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package funcs + +import ( + "fmt" + "testing" + + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +func TestReplace(t *testing.T) { + tests := []struct { + String cty.Value + Substr cty.Value + Replace cty.Value + Want cty.Value + Err bool + }{ + { // Regular search and replace + cty.StringVal("hello"), + cty.StringVal("hel"), + cty.StringVal("bel"), + cty.StringVal("bello"), + false, + }, + { // Search string doesn't match + cty.StringVal("hello"), + cty.StringVal("nope"), + cty.StringVal("bel"), + cty.StringVal("hello"), + false, + }, + { // Regular expression + cty.StringVal("hello"), + cty.StringVal("/l/"), + cty.StringVal("L"), + cty.StringVal("heLLo"), + false, + }, + { + cty.StringVal("helo"), + cty.StringVal("/(l)/"), + cty.StringVal("$1$1"), + cty.StringVal("hello"), + false, + }, + { // Bad regexp + cty.StringVal("hello"), + cty.StringVal("/(l/"), + cty.StringVal("$1$1"), + cty.UnknownVal(cty.String), + true, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("replace(%#v, %#v, %#v)", test.String, test.Substr, test.Replace), func(t *testing.T) { + got, err := Replace(test.String, test.Substr, test.Replace) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestStrContains(t *testing.T) { + tests := []struct { + String cty.Value + Substr cty.Value + Want cty.Value + Err bool + }{ + { + cty.StringVal("hello"), + cty.StringVal("hel"), + cty.BoolVal(true), + false, + }, + { + cty.StringVal("hello"), + cty.StringVal("lo"), + cty.BoolVal(true), + false, + }, + { + cty.StringVal("hello1"), + cty.StringVal("1"), + cty.BoolVal(true), + false, + }, + { + cty.StringVal("hello1"), + cty.StringVal("heo"), + cty.BoolVal(false), + false, + }, + { + cty.StringVal("hello1"), + cty.NumberIntVal(1), + cty.UnknownVal(cty.Bool), + true, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("includes(%#v, %#v)", test.String, test.Substr), func(t *testing.T) { + got, err := StrContains(test.String, test.Substr) + + if test.Err { + if err == nil { + t.Fatal("succeeded; want error") + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +func TestStartsWith(t *testing.T) { + tests := []struct { + String, Prefix cty.Value + Want cty.Value + WantError string + }{ + { + cty.StringVal("hello world"), + cty.StringVal("hello"), + cty.True, + ``, + }, + { + cty.StringVal("hey world"), + cty.StringVal("hello"), + cty.False, + ``, + }, + { + cty.StringVal(""), + cty.StringVal(""), + cty.True, + ``, + }, + { + cty.StringVal("a"), + cty.StringVal(""), + cty.True, + ``, + }, + { + cty.StringVal(""), + cty.StringVal("a"), + cty.False, + ``, + }, + { + cty.UnknownVal(cty.String), + cty.StringVal("a"), + cty.UnknownVal(cty.Bool).RefineNotNull(), + ``, + }, + { + cty.UnknownVal(cty.String), + cty.StringVal(""), + cty.True, + ``, + }, + { + cty.UnknownVal(cty.String).Refine().StringPrefix("https:").NewValue(), + cty.StringVal(""), + cty.True, + ``, + }, + { + cty.UnknownVal(cty.String).Refine().StringPrefix("https:").NewValue(), + cty.StringVal("a"), + cty.False, + ``, + }, + { + cty.UnknownVal(cty.String).Refine().StringPrefix("https:").NewValue(), + cty.StringVal("ht"), + cty.True, + ``, + }, + { + cty.UnknownVal(cty.String).Refine().StringPrefix("https:").NewValue(), + cty.StringVal("https:"), + cty.True, + ``, + }, + { + cty.UnknownVal(cty.String).Refine().StringPrefix("https:").NewValue(), + cty.StringVal("https-"), + cty.False, + ``, + }, + { + cty.UnknownVal(cty.String).Refine().StringPrefix("https:").NewValue(), + cty.StringVal("https://"), + cty.UnknownVal(cty.Bool).RefineNotNull(), + ``, + }, + { + // Unicode combining characters edge-case: we match the prefix + // in terms of unicode code units rather than grapheme clusters, + // which is inconsistent with our string processing elsewhere but + // would be a breaking change to fix that bug now. + cty.StringVal("\U0001f937\u200d\u2642"), // "Man Shrugging" is encoded as "Person Shrugging" followed by zero-width joiner and then the masculine gender presentation modifier + cty.StringVal("\U0001f937"), // Just the "Person Shrugging" character without any modifiers + cty.True, + ``, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("StartsWith(%#v, %#v)", test.String, test.Prefix), func(t *testing.T) { + got, err := StartsWithFunc.Call([]cty.Value{test.String, test.Prefix}) + + if test.WantError != "" { + gotErr := fmt.Sprintf("%s", err) + if gotErr != test.WantError { + t.Errorf("wrong error\ngot: %s\nwant: %s", gotErr, test.WantError) + } + return + } else if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !got.RawEquals(test.Want) { + t.Errorf( + "wrong result\nstring: %#v\nprefix: %#v\ngot: %#v\nwant: %#v", + test.String, test.Prefix, got, test.Want, + ) + } + }) + } +} + +func TestTemplateString(t *testing.T) { + tests := map[string]struct { + Content cty.Value + Vars cty.Value + Want cty.Value + Err string + }{ + "Simple string template": { + cty.StringVal("Hello, Jodie!"), + cty.EmptyObjectVal, + cty.StringVal("Hello, Jodie!"), + ``, + }, + "String interpolation with variable": { + cty.StringVal("Hello, ${name}!"), + cty.MapVal(map[string]cty.Value{ + "name": cty.StringVal("Jodie"), + }), + cty.StringVal("Hello, Jodie!"), + ``, + }, + "Looping through list": { + cty.StringVal("Items: %{ for x in list ~} ${x} %{ endfor ~}"), + cty.ObjectVal(map[string]cty.Value{ + "list": cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + cty.StringVal("c"), + }), + }), + cty.StringVal("Items: a b c "), + ``, + }, + "Looping through map": { + cty.StringVal("%{ for key, value in list ~} ${key}:${value} %{ endfor ~}"), + cty.ObjectVal(map[string]cty.Value{ + "list": cty.ObjectVal(map[string]cty.Value{ + "item1": cty.StringVal("a"), + "item2": cty.StringVal("b"), + "item3": cty.StringVal("c"), + }), + }), + cty.StringVal("item1:a item2:b item3:c "), + ``, + }, + "Invalid template variable name": { + cty.StringVal("Hello, ${1}!"), + cty.MapVal(map[string]cty.Value{ + "1": cty.StringVal("Jodie"), + }), + cty.NilVal, + `invalid template variable name "1": must start with a letter, followed by zero or more letters, digits, and underscores`, + }, + "Variable not present in vars map": { + cty.StringVal("Hello, ${name}!"), + cty.EmptyObjectVal, + cty.NilVal, + `vars map does not contain key "name"`, + }, + "Interpolation of a boolean value": { + cty.StringVal("${val}"), + cty.ObjectVal(map[string]cty.Value{ + "val": cty.True, + }), + cty.True, + ``, + }, + "Sensitive string template": { + cty.StringVal("My password is 1234").Mark(marks.Sensitive), + cty.EmptyObjectVal, + cty.StringVal("My password is 1234").Mark(marks.Sensitive), + ``, + }, + "Sensitive template variable": { + cty.StringVal("My password is ${pass}"), + cty.ObjectVal(map[string]cty.Value{ + "pass": cty.StringVal("secret").Mark(marks.Sensitive), + }), + cty.StringVal("My password is secret").Mark(marks.Sensitive), + ``, + }, + } + + templateStringFn := MakeTemplateStringFunc(".", func() map[string]function.Function { + return map[string]function.Function{} + }) + + for _, test := range tests { + t.Run(fmt.Sprintf("TemplateString(%#v, %#v)", test.Content, test.Vars), func(t *testing.T) { + got, err := templateStringFn.Call([]cty.Value{test.Content, test.Vars}) + + if argErr, ok := err.(function.ArgError); ok { + if argErr.Index < 0 || argErr.Index > 1 { + t.Errorf("ArgError index %d is out of range for templatestring (must be 0 or 1)", argErr.Index) + } + } + + if err != nil { + if test.Err == "" { + t.Fatalf("unexpected error: %s", err) + } else { + if got, want := err.Error(), test.Err; got != want { + t.Errorf("wrong error\ngot: %s\nwant: %s", got, want) + } + } + } else if test.Err != "" { + t.Fatal("succeeded; want error") + } else { + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + } + }) + } +} diff --git a/pkg/lang/funcs/testdata/bare.tmpl b/pkg/lang/funcs/testdata/bare.tmpl new file mode 100644 index 00000000000..da7cbab0eb7 --- /dev/null +++ b/pkg/lang/funcs/testdata/bare.tmpl @@ -0,0 +1 @@ +${val} \ No newline at end of file diff --git a/pkg/lang/funcs/testdata/func.tmpl b/pkg/lang/funcs/testdata/func.tmpl new file mode 100644 index 00000000000..33a240000a7 --- /dev/null +++ b/pkg/lang/funcs/testdata/func.tmpl @@ -0,0 +1 @@ +The items are ${join(", ", list)} \ No newline at end of file diff --git a/pkg/lang/funcs/testdata/hello.tmpl b/pkg/lang/funcs/testdata/hello.tmpl new file mode 100644 index 00000000000..f112ef89919 --- /dev/null +++ b/pkg/lang/funcs/testdata/hello.tmpl @@ -0,0 +1 @@ +Hello, ${name}! \ No newline at end of file diff --git a/pkg/lang/funcs/testdata/hello.txt b/pkg/lang/funcs/testdata/hello.txt new file mode 100644 index 00000000000..5e1c309dae7 --- /dev/null +++ b/pkg/lang/funcs/testdata/hello.txt @@ -0,0 +1 @@ +Hello World \ No newline at end of file diff --git a/pkg/lang/funcs/testdata/icon.png b/pkg/lang/funcs/testdata/icon.png new file mode 100644 index 00000000000..2c18bbedefe Binary files /dev/null and b/pkg/lang/funcs/testdata/icon.png differ diff --git a/pkg/lang/funcs/testdata/list.tmpl b/pkg/lang/funcs/testdata/list.tmpl new file mode 100644 index 00000000000..da8f4749eff --- /dev/null +++ b/pkg/lang/funcs/testdata/list.tmpl @@ -0,0 +1,3 @@ +%{ for x in list ~} +- ${x} +%{ endfor ~} diff --git a/pkg/lang/funcs/testdata/recursive.tmpl b/pkg/lang/funcs/testdata/recursive.tmpl new file mode 100644 index 00000000000..55a412cdc97 --- /dev/null +++ b/pkg/lang/funcs/testdata/recursive.tmpl @@ -0,0 +1 @@ +${templatefile("testdata/recursive.tmpl", {})} diff --git a/pkg/lang/funcs/testdata/unreadable/foobar b/pkg/lang/funcs/testdata/unreadable/foobar new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/lang/functions.go b/pkg/lang/functions.go new file mode 100644 index 00000000000..4f3a4d5d466 --- /dev/null +++ b/pkg/lang/functions.go @@ -0,0 +1,244 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package lang + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2/ext/tryfunc" + ctyyaml "github.com/zclconf/go-cty-yaml" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/function/stdlib" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/experiments" + "github.com/kubegems/opentofu/pkg/lang/funcs" +) + +var impureFunctions = []string{ + "bcrypt", + "timestamp", + "uuid", +} + +// This should probably be replaced with addrs.Function everywhere +const CoreNamespace = addrs.FunctionNamespaceCore + "::" + +// Functions returns the set of functions that should be used to when evaluating +// expressions in the receiving scope. +func (s *Scope) Functions() map[string]function.Function { + s.funcsLock.Lock() + if s.funcs == nil { + // Some of our functions are just directly the cty stdlib functions. + // Others are implemented in the subdirectory "funcs" here in this + // repository. New functions should generally start out their lives + // in the "funcs" directory and potentially graduate to cty stdlib + // later if the functionality seems to be something domain-agnostic + // that would be useful to all applications using cty functions. + + s.funcs = map[string]function.Function{ + "abs": stdlib.AbsoluteFunc, + "abspath": funcs.AbsPathFunc, + "alltrue": funcs.AllTrueFunc, + "anytrue": funcs.AnyTrueFunc, + "basename": funcs.BasenameFunc, + "base64decode": funcs.Base64DecodeFunc, + "base64encode": funcs.Base64EncodeFunc, + "base64gzip": funcs.Base64GzipFunc, + "base64gunzip": funcs.Base64GunzipFunc, + "base64sha256": funcs.Base64Sha256Func, + "base64sha512": funcs.Base64Sha512Func, + "bcrypt": funcs.BcryptFunc, + "can": tryfunc.CanFunc, + "ceil": stdlib.CeilFunc, + "chomp": stdlib.ChompFunc, + "cidrcontains": funcs.CidrContainsFunc, + "cidrhost": funcs.CidrHostFunc, + "cidrnetmask": funcs.CidrNetmaskFunc, + "cidrsubnet": funcs.CidrSubnetFunc, + "cidrsubnets": funcs.CidrSubnetsFunc, + "coalesce": funcs.CoalesceFunc, + "coalescelist": stdlib.CoalesceListFunc, + "compact": stdlib.CompactFunc, + "concat": stdlib.ConcatFunc, + "contains": stdlib.ContainsFunc, + "csvdecode": stdlib.CSVDecodeFunc, + "dirname": funcs.DirnameFunc, + "distinct": stdlib.DistinctFunc, + "element": stdlib.ElementFunc, + "endswith": funcs.EndsWithFunc, + "chunklist": stdlib.ChunklistFunc, + "file": funcs.MakeFileFunc(s.BaseDir, false), + "fileexists": funcs.MakeFileExistsFunc(s.BaseDir), + "fileset": funcs.MakeFileSetFunc(s.BaseDir), + "filebase64": funcs.MakeFileFunc(s.BaseDir, true), + "filebase64sha256": funcs.MakeFileBase64Sha256Func(s.BaseDir), + "filebase64sha512": funcs.MakeFileBase64Sha512Func(s.BaseDir), + "filemd5": funcs.MakeFileMd5Func(s.BaseDir), + "filesha1": funcs.MakeFileSha1Func(s.BaseDir), + "filesha256": funcs.MakeFileSha256Func(s.BaseDir), + "filesha512": funcs.MakeFileSha512Func(s.BaseDir), + "flatten": stdlib.FlattenFunc, + "floor": stdlib.FloorFunc, + "format": stdlib.FormatFunc, + "formatdate": stdlib.FormatDateFunc, + "formatlist": stdlib.FormatListFunc, + "indent": stdlib.IndentFunc, + "index": funcs.IndexFunc, // stdlib.IndexFunc is not compatible + "join": stdlib.JoinFunc, + "jsondecode": stdlib.JSONDecodeFunc, + "jsonencode": stdlib.JSONEncodeFunc, + "keys": stdlib.KeysFunc, + "length": funcs.LengthFunc, + "list": funcs.ListFunc, + "log": stdlib.LogFunc, + "lookup": funcs.LookupFunc, + "lower": stdlib.LowerFunc, + "map": funcs.MapFunc, + "matchkeys": funcs.MatchkeysFunc, + "max": stdlib.MaxFunc, + "md5": funcs.Md5Func, + "merge": stdlib.MergeFunc, + "min": stdlib.MinFunc, + "one": funcs.OneFunc, + "parseint": stdlib.ParseIntFunc, + "pathexpand": funcs.PathExpandFunc, + "pow": stdlib.PowFunc, + "range": stdlib.RangeFunc, + "regex": stdlib.RegexFunc, + "regexall": stdlib.RegexAllFunc, + "replace": funcs.ReplaceFunc, + "reverse": stdlib.ReverseListFunc, + "rsadecrypt": funcs.RsaDecryptFunc, + "sensitive": funcs.SensitiveFunc, + "nonsensitive": funcs.NonsensitiveFunc, + "issensitive": funcs.IsSensitiveFunc, + "setintersection": stdlib.SetIntersectionFunc, + "setproduct": stdlib.SetProductFunc, + "setsubtract": stdlib.SetSubtractFunc, + "setunion": stdlib.SetUnionFunc, + "sha1": funcs.Sha1Func, + "sha256": funcs.Sha256Func, + "sha512": funcs.Sha512Func, + "signum": stdlib.SignumFunc, + "slice": stdlib.SliceFunc, + "sort": stdlib.SortFunc, + "split": stdlib.SplitFunc, + "startswith": funcs.StartsWithFunc, + "strcontains": funcs.StrContainsFunc, + "strrev": stdlib.ReverseFunc, + "substr": stdlib.SubstrFunc, + "sum": funcs.SumFunc, + "textdecodebase64": funcs.TextDecodeBase64Func, + "textencodebase64": funcs.TextEncodeBase64Func, + "timestamp": funcs.TimestampFunc, + "timeadd": stdlib.TimeAddFunc, + "timecmp": funcs.TimeCmpFunc, + "title": stdlib.TitleFunc, + "tostring": funcs.MakeToFunc(cty.String), + "tonumber": funcs.MakeToFunc(cty.Number), + "tobool": funcs.MakeToFunc(cty.Bool), + "toset": funcs.MakeToFunc(cty.Set(cty.DynamicPseudoType)), + "tolist": funcs.MakeToFunc(cty.List(cty.DynamicPseudoType)), + "tomap": funcs.MakeToFunc(cty.Map(cty.DynamicPseudoType)), + "transpose": funcs.TransposeFunc, + "trim": stdlib.TrimFunc, + "trimprefix": stdlib.TrimPrefixFunc, + "trimspace": stdlib.TrimSpaceFunc, + "trimsuffix": stdlib.TrimSuffixFunc, + "try": tryfunc.TryFunc, + "upper": stdlib.UpperFunc, + "urlencode": funcs.URLEncodeFunc, + "urldecode": funcs.URLDecodeFunc, + "uuid": funcs.UUIDFunc, + "uuidv5": funcs.UUIDV5Func, + "values": stdlib.ValuesFunc, + "yamldecode": ctyyaml.YAMLDecodeFunc, + "yamlencode": ctyyaml.YAMLEncodeFunc, + "zipmap": stdlib.ZipmapFunc, + } + + s.funcs["templatefile"] = funcs.MakeTemplateFileFunc(s.BaseDir, func() map[string]function.Function { + // The templatefile function prevents recursive calls to itself + // by copying this map and overwriting the "templatefile" entry. + return s.funcs + }) + + // Registers "templatestring" function in function map. + s.funcs["templatestring"] = funcs.MakeTemplateStringFunc(s.BaseDir, func() map[string]function.Function { + // This anonymous function returns the existing map of functions for initialization. + return s.funcs + }) + + if s.ConsoleMode { + // The type function is only available in OpenTofu console. + s.funcs["type"] = funcs.TypeFunc + } + + if !s.ConsoleMode { + // The plantimestamp function doesn't make sense in the OpenTofu + // console. + s.funcs["plantimestamp"] = funcs.MakeStaticTimestampFunc(s.PlanTimestamp) + } + + if s.PureOnly { + // Force our few impure functions to return unknown so that we + // can defer evaluating them until a later pass. + for _, name := range impureFunctions { + s.funcs[name] = function.Unpredictable(s.funcs[name]) + } + } + + coreNames := make([]string, 0) + // Add a description to each function and parameter based on the + // contents of descriptionList. + // One must create a matching description entry whenever a new + // function is introduced. + for name, f := range s.funcs { + s.funcs[name] = funcs.WithDescription(name, f) + coreNames = append(coreNames, name) + } + // Copy all stdlib funcs into core:: namespace + for _, name := range coreNames { + s.funcs[CoreNamespace+name] = s.funcs[name] + } + } + s.funcsLock.Unlock() + + return s.funcs +} + +// experimentalFunction checks whether the given experiment is enabled for +// the recieving scope. If so, it will return the given function verbatim. +// If not, it will return a placeholder function that just returns an +// error explaining that the function requires the experiment to be enabled. +// +//lint:ignore U1000 Ignore unused function error for now +func (s *Scope) experimentalFunction(experiment experiments.Experiment, fn function.Function) function.Function { + if s.activeExperiments.Has(experiment) { + return fn + } + + err := fmt.Errorf( + "this function is experimental and available only when the experiment keyword %s is enabled for the current module", + experiment.Keyword(), + ) + + return function.New(&function.Spec{ + Params: fn.Params(), + VarParam: fn.VarParam(), + Type: func(args []cty.Value) (cty.Type, error) { + return cty.DynamicPseudoType, err + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + // It would be weird to get here because the Type function always + // fails, but we'll return an error here too anyway just to be + // robust. + return cty.DynamicVal, err + }, + }) +} diff --git a/pkg/lang/functions_descriptions_test.go b/pkg/lang/functions_descriptions_test.go new file mode 100644 index 00000000000..65ca457eb0d --- /dev/null +++ b/pkg/lang/functions_descriptions_test.go @@ -0,0 +1,37 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package lang + +import ( + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/lang/funcs" +) + +func TestFunctionDescriptions(t *testing.T) { + scope := &Scope{ + ConsoleMode: true, + } + // This will implicitly test the parameter description count since + // WithNewDescriptions will panic if the number doesn't match. + allFunctions := scope.Functions() + + // plantimestamp isn't available with ConsoleMode: true + // THis also includes the core:: prefixed functions + expectedFunctionCount := (len(funcs.DescriptionList) - 1) * 2 + + if len(allFunctions) != expectedFunctionCount { + t.Errorf("DescriptionList length expected: %d, got %d", len(allFunctions), expectedFunctionCount) + } + + for name := range allFunctions { + _, ok := funcs.DescriptionList[strings.TrimPrefix(name, CoreNamespace)] + if !ok { + t.Errorf("missing DescriptionList entry for function %q", name) + } + } +} diff --git a/pkg/lang/functions_test.go b/pkg/lang/functions_test.go new file mode 100644 index 00000000000..587ed48242d --- /dev/null +++ b/pkg/lang/functions_test.go @@ -0,0 +1,1377 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package lang + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + homedir "github.com/mitchellh/go-homedir" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/experiments" + "github.com/kubegems/opentofu/pkg/lang/marks" +) + +// TestFunctions tests that functions are callable through the functionality +// in the langs package, via HCL. +// +// These tests are primarily here to assert that the functions are properly +// registered in the functions table, rather than to test all of the details +// of the functions. Each function should only have one or two tests here, +// since the main set of unit tests for a function should live alongside that +// function either in the "funcs" subdirectory here or over in the cty +// function/stdlib package. +// +// One exception to that is we can use this test mechanism to assert common +// patterns that are used in real-world configurations which rely on behaviors +// implemented either in this lang package or in HCL itself, such as automatic +// type conversions. The function unit tests don't cover those things because +// they call directly into the functions. +// +// With that said then, this test function should contain at least one simple +// test case per function registered in the functions table (just to prove +// it really is registered correctly) and possibly a small set of additional +// functions showing real-world use-cases that rely on type conversion +// behaviors. +func TestFunctions(t *testing.T) { + // used in `pathexpand()` test + homePath, err := homedir.Dir() + if err != nil { + t.Fatalf("Error getting home directory: %v", err) + } + + tests := map[string][]struct { + src string + want cty.Value + }{ + // Please maintain this list in alphabetical order by function, with + // a blank line between the group of tests for each function. + + "abs": { + { + `abs(-1)`, + cty.NumberIntVal(1), + }, + }, + + "abspath": { + { + `abspath(".")`, + cty.StringVal((func() string { + cwd, err := os.Getwd() + if err != nil { + panic(err) + } + return filepath.ToSlash(cwd) + })()), + }, + }, + + "alltrue": { + { + `alltrue(["true", true])`, + cty.True, + }, + }, + + "anytrue": { + { + `anytrue([])`, + cty.False, + }, + }, + + "base64decode": { + { + `base64decode("YWJjMTIzIT8kKiYoKSctPUB+")`, + cty.StringVal("abc123!?$*&()'-=@~"), + }, + }, + + "base64encode": { + { + `base64encode("abc123!?$*&()'-=@~")`, + cty.StringVal("YWJjMTIzIT8kKiYoKSctPUB+"), + }, + }, + + "base64gzip": { + { + `base64gzip("test")`, + cty.StringVal("H4sIAAAAAAAA/ypJLS4BAAAA//8BAAD//wx+f9gEAAAA"), + }, + }, + + "base64gunzip": { + { + `base64gunzip("H4sIAAAAAAAA/ypJLS4BAAAA//8BAAD//wx+f9gEAAAA")`, + cty.StringVal("test"), + }, + }, + + "base64sha256": { + { + `base64sha256("test")`, + cty.StringVal("n4bQgYhMfWWaL+qgxVrQFaO/TxsrC4Is0V1sFbDwCgg="), + }, + }, + + "base64sha512": { + { + `base64sha512("test")`, + cty.StringVal("7iaw3Ur350mqGo7jwQrpkj9hiYB3Lkc/iBml1JQODbJ6wYX4oOHV+E+IvIh/1nsUNzLDBMxfqa2Ob1f1ACio/w=="), + }, + }, + + "basename": { + { + `basename("testdata/hello.txt")`, + cty.StringVal("hello.txt"), + }, + }, + + "can": { + { + `can(true)`, + cty.True, + }, + { + // Note: "can" only works with expressions that pass static + // validation, because it only gets an opportunity to run in + // that case. The following "works" (captures the error) because + // OpenTofu understands it as a reference to an attribute + // that does not exist during dynamic evaluation. + // + // "can" doesn't work with references that could never possibly + // be valid and are thus caught during static validation, such + // as an expression like "foo" alone which would be understood + // as an invalid resource reference. + `can({}.baz)`, + cty.False, + }, + }, + + "ceil": { + { + `ceil(1.2)`, + cty.NumberIntVal(2), + }, + }, + + "chomp": { + { + `chomp("goodbye\ncruel\nworld\n")`, + cty.StringVal("goodbye\ncruel\nworld"), + }, + }, + + "chunklist": { + { + `chunklist(["a", "b", "c"], 1)`, + cty.ListVal([]cty.Value{ + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("b"), + }), + cty.ListVal([]cty.Value{ + cty.StringVal("c"), + }), + }), + }, + }, + + "cidrcontains": { + { + `cidrcontains("192.168.1.0/24", "192.168.1.1")`, + cty.True, + }, + }, + + "cidrhost": { + { + `cidrhost("192.168.1.0/24", 5)`, + cty.StringVal("192.168.1.5"), + }, + }, + + "cidrnetmask": { + { + `cidrnetmask("192.168.1.0/24")`, + cty.StringVal("255.255.255.0"), + }, + }, + + "cidrsubnet": { + { + `cidrsubnet("192.168.2.0/20", 4, 6)`, + cty.StringVal("192.168.6.0/24"), + }, + }, + + "cidrsubnets": { + { + `cidrsubnets("10.0.0.0/8", 8, 8, 16, 8)`, + cty.ListVal([]cty.Value{ + cty.StringVal("10.0.0.0/16"), + cty.StringVal("10.1.0.0/16"), + cty.StringVal("10.2.0.0/24"), + cty.StringVal("10.3.0.0/16"), + }), + }, + }, + + "coalesce": { + { + `coalesce("first", "second", "third")`, + cty.StringVal("first"), + }, + + { + `coalescelist(["first", "second"], ["third", "fourth"])`, + cty.TupleVal([]cty.Value{ + cty.StringVal("first"), cty.StringVal("second"), + }), + }, + }, + + "coalescelist": { + { + `coalescelist(tolist(["a", "b"]), tolist(["c", "d"]))`, + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + }), + }, + { + `coalescelist(["a", "b"], ["c", "d"])`, + cty.TupleVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + }), + }, + }, + + "compact": { + { + `compact(["test", "", "test"])`, + cty.ListVal([]cty.Value{ + cty.StringVal("test"), cty.StringVal("test"), + }), + }, + }, + + "concat": { + { + `concat(["a", ""], ["b", "c"])`, + cty.TupleVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal(""), + cty.StringVal("b"), + cty.StringVal("c"), + }), + }, + }, + + "contains": { + { + `contains(["a", "b"], "a")`, + cty.True, + }, + { // Should also work with sets, due to automatic conversion + `contains(toset(["a", "b"]), "a")`, + cty.True, + }, + }, + + "csvdecode": { + { + `csvdecode("a,b,c\n1,2,3\n4,5,6")`, + cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("1"), + "b": cty.StringVal("2"), + "c": cty.StringVal("3"), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("4"), + "b": cty.StringVal("5"), + "c": cty.StringVal("6"), + }), + }), + }, + }, + + "dirname": { + { + `dirname("testdata/hello.txt")`, + cty.StringVal("testdata"), + }, + }, + + "distinct": { + { + `distinct(["a", "b", "a", "b"])`, + cty.ListVal([]cty.Value{ + cty.StringVal("a"), cty.StringVal("b"), + }), + }, + }, + + "element": { + { + `element(["hello"], 0)`, + cty.StringVal("hello"), + }, + }, + + "endswith": { + { + `endswith("hello world", "world")`, + cty.True, + }, + { + `endswith("hello world", "hello")`, + cty.False, + }, + { + `endswith("hello world", "")`, + cty.True, + // Completely empty suffix value ( "" ) + // will always evaluate to true for all strings. + }, + { + `endswith("hello world", " ")`, + cty.False, + }, + { + `endswith("", "")`, + cty.True, + }, + { + `endswith("", " ")`, + cty.False, + }, + { + `endswith(" ", "")`, + cty.True, + }, + { + `endswith("", "hello")`, + cty.False, + }, + { + `endswith(" ", "hello")`, + cty.False, + }, + }, + + "file": { + { + `file("hello.txt")`, + cty.StringVal("hello!"), + }, + }, + + "fileexists": { + { + `fileexists("hello.txt")`, + cty.BoolVal(true), + }, + }, + + "fileset": { + { + `fileset(".", "*/hello.*")`, + cty.SetVal([]cty.Value{ + cty.StringVal("subdirectory/hello.tmpl"), + cty.StringVal("subdirectory/hello.txt"), + }), + }, + { + `fileset(".", "subdirectory/hello.*")`, + cty.SetVal([]cty.Value{ + cty.StringVal("subdirectory/hello.tmpl"), + cty.StringVal("subdirectory/hello.txt"), + }), + }, + { + `fileset(".", "hello.*")`, + cty.SetVal([]cty.Value{ + cty.StringVal("hello.tmpl"), + cty.StringVal("hello.txt"), + }), + }, + { + `fileset("subdirectory", "hello.*")`, + cty.SetVal([]cty.Value{ + cty.StringVal("hello.tmpl"), + cty.StringVal("hello.txt"), + }), + }, + }, + + "filebase64": { + { + `filebase64("hello.txt")`, + cty.StringVal("aGVsbG8h"), + }, + }, + + "filebase64sha256": { + { + `filebase64sha256("hello.txt")`, + cty.StringVal("zgYJL7lI2f+sfRo3bkBLJrdXW8wR7gWkYV/vT+w6MIs="), + }, + }, + + "filebase64sha512": { + { + `filebase64sha512("hello.txt")`, + cty.StringVal("xvgdsOn4IGyXHJ5YJuO6gj/7saOpAPgEdlKov3jqmP38dFhVo4U6Y1Z1RY620arxIJ6I6tLRkjgrXEy91oUOAg=="), + }, + }, + + "filemd5": { + { + `filemd5("hello.txt")`, + cty.StringVal("5a8dd3ad0756a93ded72b823b19dd877"), + }, + }, + + "filesha1": { + { + `filesha1("hello.txt")`, + cty.StringVal("8f7d88e901a5ad3a05d8cc0de93313fd76028f8c"), + }, + }, + + "filesha256": { + { + `filesha256("hello.txt")`, + cty.StringVal("ce06092fb948d9ffac7d1a376e404b26b7575bcc11ee05a4615fef4fec3a308b"), + }, + }, + + "filesha512": { + { + `filesha512("hello.txt")`, + cty.StringVal("c6f81db0e9f8206c971c9e5826e3ba823ffbb1a3a900f8047652a8bf78ea98fdfc745855a3853a635675458eb6d1aaf1209e88ead2d192382b5c4cbdd6850e02"), + }, + }, + + "flatten": { + { + `flatten([["a", "b"], ["c", "d"]])`, + cty.TupleVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("b"), + cty.StringVal("c"), + cty.StringVal("d"), + }), + }, + }, + + "floor": { + { + `floor(-1.8)`, + cty.NumberFloatVal(-2), + }, + }, + + "format": { + { + `format("Hello, %s!", "Ander")`, + cty.StringVal("Hello, Ander!"), + }, + }, + + "formatlist": { + { + `formatlist("Hello, %s!", ["Valentina", "Ander", "Olivia", "Sam"])`, + cty.ListVal([]cty.Value{ + cty.StringVal("Hello, Valentina!"), + cty.StringVal("Hello, Ander!"), + cty.StringVal("Hello, Olivia!"), + cty.StringVal("Hello, Sam!"), + }), + }, + }, + + "formatdate": { + { + `formatdate("DD MMM YYYY hh:mm ZZZ", "2018-01-04T23:12:01Z")`, + cty.StringVal("04 Jan 2018 23:12 UTC"), + }, + }, + + "indent": { + { + fmt.Sprintf("indent(4, %#v)", Poem), + cty.StringVal("Fleas:\n Adam\n Had'em\n \n E.E. Cummings"), + }, + }, + + "index": { + { + `index(["a", "b", "c"], "a")`, + cty.NumberIntVal(0), + }, + }, + + "issensitive": { + { + `issensitive(1)`, + cty.False, + }, + { + `issensitive(sensitive(1))`, + cty.True, + }, + }, + + "join": { + { + `join(" ", ["Hello", "World"])`, + cty.StringVal("Hello World"), + }, + }, + + "jsondecode": { + { + `jsondecode("{\"hello\": \"world\"}")`, + cty.ObjectVal(map[string]cty.Value{ + "hello": cty.StringVal("world"), + }), + }, + }, + + "jsonencode": { + { + `jsonencode({"hello"="world"})`, + cty.StringVal("{\"hello\":\"world\"}"), + }, + // We are intentionally choosing to escape <, >, and & characters + // to preserve backwards compatibility with Terraform 0.11 + { + `jsonencode({"hello"=""})`, + cty.StringVal("{\"hello\":\"\\u003ccats \\u0026 kittens\\u003e\"}"), + }, + }, + + "keys": { + { + `keys({"hello"=1, "goodbye"=42})`, + cty.TupleVal([]cty.Value{ + cty.StringVal("goodbye"), + cty.StringVal("hello"), + }), + }, + }, + + "length": { + { + `length(["the", "quick", "brown", "bear"])`, + cty.NumberIntVal(4), + }, + }, + + "list": { + // There are intentionally no test cases for "list" because + // it is a stub that always returns an error. + }, + + "log": { + { + `log(1, 10)`, + cty.NumberFloatVal(0), + }, + }, + + "lookup": { + { + `lookup({hello=1, goodbye=42}, "goodbye")`, + cty.NumberIntVal(42), + }, + }, + + "lower": { + { + `lower("HELLO")`, + cty.StringVal("hello"), + }, + }, + + "map": { + // There are intentionally no test cases for "map" because + // it is a stub that always returns an error. + }, + + "matchkeys": { + { + `matchkeys(["a", "b", "c"], ["ref1", "ref2", "ref3"], ["ref1"])`, + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + }), + }, + { // mixing types in searchset + `matchkeys(["a", "b", "c"], [1, 2, 3], [1, "3"])`, + cty.ListVal([]cty.Value{ + cty.StringVal("a"), + cty.StringVal("c"), + }), + }, + }, + + "max": { + { + `max(12, 54, 3)`, + cty.NumberIntVal(54), + }, + }, + + "md5": { + { + `md5("tada")`, + cty.StringVal("ce47d07243bb6eaf5e1322c81baf9bbf"), + }, + }, + + "merge": { + { + `merge({"a"="b"}, {"c"="d"})`, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("b"), + "c": cty.StringVal("d"), + }), + }, + }, + + "min": { + { + `min(12, 54, 3)`, + cty.NumberIntVal(3), + }, + }, + + "nonsensitive": { + { + // Due to how this test is set up we have no way to get + // a sensitive value other than to generate one with + // another function, so this is a bit odd but does still + // meet the goal of verifying that the "nonsensitive" + // function is correctly registered. + `nonsensitive(sensitive(1))`, + cty.NumberIntVal(1), + }, + }, + + "one": { + { + `one([])`, + cty.NullVal(cty.DynamicPseudoType), + }, + { + `one([true])`, + cty.True, + }, + }, + + "parseint": { + { + `parseint("100", 10)`, + cty.NumberIntVal(100), + }, + }, + + "pathexpand": { + { + `pathexpand("~/test-file")`, + cty.StringVal(filepath.Join(homePath, "test-file")), + }, + }, + + "plantimestamp": { + { + `plantimestamp()`, + cty.StringVal("2004-04-25T15:00:00Z"), + }, + }, + + "pow": { + { + `pow(1,0)`, + cty.NumberFloatVal(1), + }, + }, + + "range": { + { + `range(3)`, + cty.ListVal([]cty.Value{ + cty.NumberIntVal(0), + cty.NumberIntVal(1), + cty.NumberIntVal(2), + }), + }, + { + `range(1, 4)`, + cty.ListVal([]cty.Value{ + cty.NumberIntVal(1), + cty.NumberIntVal(2), + cty.NumberIntVal(3), + }), + }, + { + `range(1, 8, 2)`, + cty.ListVal([]cty.Value{ + cty.NumberIntVal(1), + cty.NumberIntVal(3), + cty.NumberIntVal(5), + cty.NumberIntVal(7), + }), + }, + }, + + "regex": { + { + `regex("(\\d+)([a-z]+)", "aaa111bbb222")`, + cty.TupleVal([]cty.Value{cty.StringVal("111"), cty.StringVal("bbb")}), + }, + }, + + "regexall": { + { + `regexall("(\\d+)([a-z]+)", "...111aaa222bbb...")`, + cty.ListVal([]cty.Value{ + cty.TupleVal([]cty.Value{cty.StringVal("111"), cty.StringVal("aaa")}), + cty.TupleVal([]cty.Value{cty.StringVal("222"), cty.StringVal("bbb")}), + }), + }, + }, + + "replace": { + { + `replace("hello", "hel", "bel")`, + cty.StringVal("bello"), + }, + }, + + "reverse": { + { + `reverse(["a", true, 0])`, + cty.TupleVal([]cty.Value{cty.Zero, cty.True, cty.StringVal("a")}), + }, + }, + + "rsadecrypt": { + { + fmt.Sprintf("rsadecrypt(%#v, %#v)", CipherBase64, PrivateKey), + cty.StringVal("message"), + }, + }, + + "sensitive": { + { + `sensitive(1)`, + cty.NumberIntVal(1).Mark(marks.Sensitive), + }, + }, + + "setintersection": { + { + `setintersection(["a", "b"], ["b", "c"], ["b", "d"])`, + cty.SetVal([]cty.Value{ + cty.StringVal("b"), + }), + }, + }, + + "setproduct": { + { + `setproduct(["development", "staging", "production"], ["app1", "app2"])`, + cty.ListVal([]cty.Value{ + cty.TupleVal([]cty.Value{cty.StringVal("development"), cty.StringVal("app1")}), + cty.TupleVal([]cty.Value{cty.StringVal("development"), cty.StringVal("app2")}), + cty.TupleVal([]cty.Value{cty.StringVal("staging"), cty.StringVal("app1")}), + cty.TupleVal([]cty.Value{cty.StringVal("staging"), cty.StringVal("app2")}), + cty.TupleVal([]cty.Value{cty.StringVal("production"), cty.StringVal("app1")}), + cty.TupleVal([]cty.Value{cty.StringVal("production"), cty.StringVal("app2")}), + }), + }, + }, + + "setsubtract": { + { + `setsubtract(["a", "b", "c"], ["a", "c"])`, + cty.SetVal([]cty.Value{ + cty.StringVal("b"), + }), + }, + }, + + "setunion": { + { + `setunion(["a", "b"], ["b", "c"], ["d"])`, + cty.SetVal([]cty.Value{ + cty.StringVal("d"), + cty.StringVal("b"), + cty.StringVal("a"), + cty.StringVal("c"), + }), + }, + }, + + "sha1": { + { + `sha1("test")`, + cty.StringVal("a94a8fe5ccb19ba61c4c0873d391e987982fbbd3"), + }, + }, + + "sha256": { + { + `sha256("test")`, + cty.StringVal("9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08"), + }, + }, + + "sha512": { + { + `sha512("test")`, + cty.StringVal("ee26b0dd4af7e749aa1a8ee3c10ae9923f618980772e473f8819a5d4940e0db27ac185f8a0e1d5f84f88bc887fd67b143732c304cc5fa9ad8e6f57f50028a8ff"), + }, + }, + + "signum": { + { + `signum(12)`, + cty.NumberFloatVal(1), + }, + }, + + "slice": { + { + // force a list type here for testing + `slice(tolist(["a", "b", "c", "d"]), 1, 3)`, + cty.ListVal([]cty.Value{ + cty.StringVal("b"), cty.StringVal("c"), + }), + }, + { + `slice(["a", "b", 3, 4], 1, 3)`, + cty.TupleVal([]cty.Value{ + cty.StringVal("b"), cty.NumberIntVal(3), + }), + }, + }, + + "sort": { + { + `sort(["banana", "apple"])`, + cty.ListVal([]cty.Value{ + cty.StringVal("apple"), + cty.StringVal("banana"), + }), + }, + }, + + "split": { + { + `split(" ", "Hello World")`, + cty.ListVal([]cty.Value{ + cty.StringVal("Hello"), + cty.StringVal("World"), + }), + }, + }, + + "startswith": { + { + `startswith("hello world", "hello")`, + cty.True, + }, + { + `startswith("hello world", "world")`, + cty.False, + }, + { + `startswith("hello world", "")`, + cty.True, + // Completely empty prefix value ( "" ) + // will always evaluate to true for all strings. + }, + { + `startswith("hello world", " ")`, + cty.False, + }, + { + `startswith("", "")`, + cty.True, + }, + { + `startswith("", " ")`, + cty.False, + }, + { + `startswith(" ", "")`, + cty.True, + }, + { + `startswith("", "hello")`, + cty.False, + }, + { + `startswith(" ", "hello")`, + cty.False, + }, + }, + + "strcontains": { + { + `strcontains("hello", "llo")`, + cty.BoolVal(true), + }, + { + `strcontains("hello", "a")`, + cty.BoolVal(false), + }, + }, + + "strrev": { + { + `strrev("hello world")`, + cty.StringVal("dlrow olleh"), + }, + }, + + "substr": { + { + `substr("hello world", 1, 4)`, + cty.StringVal("ello"), + }, + }, + + "sum": { + { + `sum([2340.5,10,3])`, + cty.NumberFloatVal(2353.5), + }, + }, + + "textdecodebase64": { + { + `textdecodebase64("dABlAHMAdAA=", "UTF-16LE")`, + cty.StringVal("test"), + }, + }, + + "textencodebase64": { + { + `textencodebase64("test", "UTF-16LE")`, + cty.StringVal("dABlAHMAdAA="), + }, + }, + + "templatefile": { + { + `templatefile("hello.tmpl", {name = "Jodie"})`, + cty.StringVal("Hello, Jodie!"), + }, + }, + + "templatestring": { + { + `templatestring("Hello, $${name}!", {name = "Jodie"})`, + cty.StringVal("Hello, Jodie!"), + }, + }, + + "timeadd": { + { + `timeadd("2017-11-22T00:00:00Z", "1s")`, + cty.StringVal("2017-11-22T00:00:01Z"), + }, + }, + + "timecmp": { + { + `timecmp("2017-11-22T00:00:00Z", "2017-11-22T00:00:00Z")`, + cty.Zero, + }, + }, + + "title": { + { + `title("hello")`, + cty.StringVal("Hello"), + }, + }, + + "tobool": { + { + `tobool("false")`, + cty.False, + }, + }, + + "tolist": { + { + `tolist(["a", "b", "c"])`, + cty.ListVal([]cty.Value{ + cty.StringVal("a"), cty.StringVal("b"), cty.StringVal("c"), + }), + }, + }, + + "tomap": { + { + `tomap({"a" = 1, "b" = 2})`, + cty.MapVal(map[string]cty.Value{ + "a": cty.NumberIntVal(1), + "b": cty.NumberIntVal(2), + }), + }, + }, + + "tonumber": { + { + `tonumber("42")`, + cty.NumberIntVal(42), + }, + }, + + "toset": { + { + `toset(["a", "b", "c"])`, + cty.SetVal([]cty.Value{ + cty.StringVal("a"), cty.StringVal("b"), cty.StringVal("c"), + }), + }, + }, + + "tostring": { + { + `tostring("a")`, + cty.StringVal("a"), + }, + }, + + "transpose": { + { + `transpose({"a" = ["1", "2"], "b" = ["2", "3"]})`, + cty.MapVal(map[string]cty.Value{ + "1": cty.ListVal([]cty.Value{cty.StringVal("a")}), + "2": cty.ListVal([]cty.Value{cty.StringVal("a"), cty.StringVal("b")}), + "3": cty.ListVal([]cty.Value{cty.StringVal("b")}), + }), + }, + }, + + "trim": { + { + `trim("?!hello?!", "!?")`, + cty.StringVal("hello"), + }, + }, + + "trimprefix": { + { + `trimprefix("helloworld", "hello")`, + cty.StringVal("world"), + }, + }, + + "trimspace": { + { + `trimspace(" hello ")`, + cty.StringVal("hello"), + }, + }, + + "trimsuffix": { + { + `trimsuffix("helloworld", "world")`, + cty.StringVal("hello"), + }, + }, + + "try": { + { + // Note: "try" only works with expressions that pass static + // validation, because it only gets an opportunity to run in + // that case. The following "works" (captures the error) because + // OpenTofu understands it as a reference to an attribute + // that does not exist during dynamic evaluation. + // + // "try" doesn't work with references that could never possibly + // be valid and are thus caught during static validation, such + // as an expression like "foo" alone which would be understood + // as an invalid resource reference. That's okay because this + // function exists primarily to ease access to dynamically-typed + // structures that OpenTofu can't statically validate by + // definition. + `try({}.baz, "fallback")`, + cty.StringVal("fallback"), + }, + { + `try("fallback")`, + cty.StringVal("fallback"), + }, + }, + + "upper": { + { + `upper("hello")`, + cty.StringVal("HELLO"), + }, + }, + + "urlencode": { + { + `urlencode("foo:bar@localhost?foo=bar&bar=baz")`, + cty.StringVal("foo%3Abar%40localhost%3Ffoo%3Dbar%26bar%3Dbaz"), + }, + }, + "urldecode": { + { + `urldecode("foo%3Abar%40localhost%3Ffoo%3Dbar%26bar%3Dbaz")`, + cty.StringVal("foo:bar@localhost?foo=bar&bar=baz"), + }, + }, + + "uuidv5": { + { + `uuidv5("dns", "tada")`, + cty.StringVal("faa898db-9b9d-5b75-86a9-149e7bb8e3b8"), + }, + { + `uuidv5("url", "tada")`, + cty.StringVal("2c1ff6b4-211f-577e-94de-d978b0caa16e"), + }, + { + `uuidv5("oid", "tada")`, + cty.StringVal("61eeea26-5176-5288-87fc-232d6ed30d2f"), + }, + { + `uuidv5("x500", "tada")`, + cty.StringVal("7e12415e-f7c9-57c3-9e43-52dc9950d264"), + }, + { + `uuidv5("6ba7b810-9dad-11d1-80b4-00c04fd430c8", "tada")`, + cty.StringVal("faa898db-9b9d-5b75-86a9-149e7bb8e3b8"), + }, + }, + + "values": { + { + `values({"hello"="world", "what's"="up"})`, + cty.TupleVal([]cty.Value{ + cty.StringVal("world"), + cty.StringVal("up"), + }), + }, + }, + + "yamldecode": { + { + `yamldecode("true")`, + cty.True, + }, + { + `yamldecode("key: 0ba")`, + cty.ObjectVal(map[string]cty.Value{ + "key": cty.StringVal("0ba"), + }), + }, + { + `yamldecode("~")`, + cty.NullVal(cty.DynamicPseudoType), + }, + }, + + "yamlencode": { + { + `yamlencode(["foo", "bar", true])`, + cty.StringVal("- \"foo\"\n- \"bar\"\n- true\n"), + }, + { + `yamlencode({a = "b", c = "d"})`, + cty.StringVal("\"a\": \"b\"\n\"c\": \"d\"\n"), + }, + { + `yamlencode(true)`, + // the ... here is an "end of document" marker, produced for implied primitive types only + cty.StringVal("true\n...\n"), + }, + }, + + "zipmap": { + { + `zipmap(["hello", "bar"], ["world", "baz"])`, + cty.ObjectVal(map[string]cty.Value{ + "hello": cty.StringVal("world"), + "bar": cty.StringVal("baz"), + }), + }, + }, + } + + experimentalFuncs := map[string]experiments.Experiment{} + experimentalFuncs["defaults"] = experiments.ModuleVariableOptionalAttrs + + t.Run("all functions are tested", func(t *testing.T) { + data := &dataForTests{} // no variables available; we only need literals here + scope := &Scope{ + Data: data, + BaseDir: "./testdata/functions-test", // for the functions that read from the filesystem + } + + // Check that there is at least one test case for each function, omitting + // those functions that do not return consistent values + allFunctions := scope.Functions() + + // TODO: we can test the impure functions partially by configuring the scope + // with PureOnly: true and then verify that they return unknown values of a + // suitable type. + for _, impureFunc := range impureFunctions { + delete(allFunctions, impureFunc) + delete(allFunctions, CoreNamespace+impureFunc) + } + for f := range scope.Functions() { + if _, ok := tests[strings.TrimPrefix(f, CoreNamespace)]; !ok { + t.Errorf("Missing test for function %s\n", f) + } + } + }) + + for funcName, funcTests := range tests { + t.Run(funcName, func(t *testing.T) { + + // prepareScope starts as a no-op, but if a function is marked as + // experimental in our experimentalFuncs table above then we'll + // reassign this to be a function that activates the appropriate + // experiment. + prepareScope := func(t *testing.T, scope *Scope) {} + + if experiment, isExperimental := experimentalFuncs[funcName]; isExperimental { + // First, we'll run all of the tests without the experiment + // enabled to see that they do actually fail in that case. + for _, test := range funcTests { + testName := fmt.Sprintf("experimental(%s)", test.src) + t.Run(testName, func(t *testing.T) { + data := &dataForTests{} // no variables available; we only need literals here + scope := &Scope{ + Data: data, + BaseDir: "./testdata/functions-test", // for the functions that read from the filesystem + } + + expr, parseDiags := hclsyntax.ParseExpression([]byte(test.src), "test.hcl", hcl.Pos{Line: 1, Column: 1}) + if parseDiags.HasErrors() { + for _, diag := range parseDiags { + t.Error(diag.Error()) + } + return + } + + _, diags := scope.EvalExpr(expr, cty.DynamicPseudoType) + if !diags.HasErrors() { + t.Errorf("experimental function %q succeeded without its experiment %s enabled\nexpr: %s", funcName, experiment.Keyword(), test.src) + } + }) + } + + // Now make the experiment active in the scope so that the + // function will actually work when we test it below. + prepareScope = func(t *testing.T, scope *Scope) { + t.Helper() + t.Logf("activating experiment %s to test %q", experiment.Keyword(), funcName) + experimentsSet := experiments.NewSet() + experimentsSet.Add(experiment) + scope.SetActiveExperiments(experimentsSet) + } + } + + for _, test := range funcTests { + t.Run(test.src, func(t *testing.T) { + data := &dataForTests{} // no variables available; we only need literals here + scope := &Scope{ + Data: data, + BaseDir: "./testdata/functions-test", // for the functions that read from the filesystem + PlanTimestamp: time.Date(2004, 04, 25, 15, 00, 00, 000, time.UTC), + } + prepareScope(t, scope) + + expr, parseDiags := hclsyntax.ParseExpression([]byte(test.src), "test.hcl", hcl.Pos{Line: 1, Column: 1}) + if parseDiags.HasErrors() { + for _, diag := range parseDiags { + t.Error(diag.Error()) + } + return + } + + got, diags := scope.EvalExpr(expr, cty.DynamicPseudoType) + if diags.HasErrors() { + for _, diag := range diags { + t.Errorf("%s: %s", diag.Description().Summary, diag.Description().Detail) + } + return + } + + if !test.want.RawEquals(got) { + t.Errorf("wrong result\nexpr: %s\ngot: %#v\nwant: %#v", test.src, got, test.want) + } + }) + } + }) + } +} + +const ( + CipherBase64 = "eczGaDhXDbOFRZGhjx2etVzWbRqWDlmq0bvNt284JHVbwCgObiuyX9uV0LSAMY707IEgMkExJqXmsB4OWKxvB7epRB9G/3+F+pcrQpODlDuL9oDUAsa65zEpYF0Wbn7Oh7nrMQncyUPpyr9WUlALl0gRWytOA23S+y5joa4M34KFpawFgoqTu/2EEH4Xl1zo+0fy73fEto+nfkUY+meuyGZ1nUx/+DljP7ZqxHBFSlLODmtuTMdswUbHbXbWneW51D7Jm7xB8nSdiA2JQNK5+Sg5x8aNfgvFTt/m2w2+qpsyFa5Wjeu6fZmXSl840CA07aXbk9vN4I81WmJyblD/ZA==" + PrivateKey = ` +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAgUElV5mwqkloIrM8ZNZ72gSCcnSJt7+/Usa5G+D15YQUAdf9 +c1zEekTfHgDP+04nw/uFNFaE5v1RbHaPxhZYVg5ZErNCa/hzn+x10xzcepeS3KPV +Xcxae4MR0BEegvqZqJzN9loXsNL/c3H/B+2Gle3hTxjlWFb3F5qLgR+4Mf4ruhER +1v6eHQa/nchi03MBpT4UeJ7MrL92hTJYLdpSyCqmr8yjxkKJDVC2uRrr+sTSxfh7 +r6v24u/vp/QTmBIAlNPgadVAZw17iNNb7vjV7Gwl/5gHXonCUKURaV++dBNLrHIZ +pqcAM8wHRph8mD1EfL9hsz77pHewxolBATV+7QIDAQABAoIBAC1rK+kFW3vrAYm3 ++8/fQnQQw5nec4o6+crng6JVQXLeH32qXShNf8kLLG/Jj0vaYcTPPDZw9JCKkTMQ +0mKj9XR/5DLbBMsV6eNXXuvJJ3x4iKW5eD9WkLD4FKlNarBRyO7j8sfPTqXW7uat +NxWdFH7YsSRvNh/9pyQHLWA5OituidMrYbc3EUx8B1GPNyJ9W8Q8znNYLfwYOjU4 +Wv1SLE6qGQQH9Q0WzA2WUf8jklCYyMYTIywAjGb8kbAJlKhmj2t2Igjmqtwt1PYc +pGlqbtQBDUiWXt5S4YX/1maIQ/49yeNUajjpbJiH3DbhJbHwFTzP3pZ9P9GHOzlG +kYR+wSECgYEAw/Xida8kSv8n86V3qSY/I+fYQ5V+jDtXIE+JhRnS8xzbOzz3v0WS +Oo5H+o4nJx5eL3Ghb3Gcm0Jn46dHrxinHbm+3RjXv/X6tlbxIYjRSQfHOTSMCTvd +qcliF5vC6RCLXuc7R+IWR1Ky6eDEZGtrvt3DyeYABsp9fRUFR/6NluUCgYEAqNsw +1aSl7WJa27F0DoJdlU9LWerpXcazlJcIdOz/S9QDmSK3RDQTdqfTxRmrxiYI9LEs +mkOkvzlnnOBMpnZ3ZOU5qIRfprecRIi37KDAOHWGnlC0EWGgl46YLb7/jXiWf0AG +Y+DfJJNd9i6TbIDWu8254/erAS6bKMhW/3q7f2kCgYAZ7Id/BiKJAWRpqTRBXlvw +BhXoKvjI2HjYP21z/EyZ+PFPzur/lNaZhIUlMnUfibbwE9pFggQzzf8scM7c7Sf+ +mLoVSdoQ/Rujz7CqvQzi2nKSsM7t0curUIb3lJWee5/UeEaxZcmIufoNUrzohAWH +BJOIPDM4ssUTLRq7wYM9uQKBgHCBau5OP8gE6mjKuXsZXWUoahpFLKwwwmJUp2vQ +pOFPJ/6WZOlqkTVT6QPAcPUbTohKrF80hsZqZyDdSfT3peFx4ZLocBrS56m6NmHR +UYHMvJ8rQm76T1fryHVidz85g3zRmfBeWg8yqT5oFg4LYgfLsPm1gRjOhs8LfPvI +OLlRAoGBAIZ5Uv4Z3s8O7WKXXUe/lq6j7vfiVkR1NW/Z/WLKXZpnmvJ7FgxN4e56 +RXT7GwNQHIY8eDjDnsHxzrxd+raOxOZeKcMHj3XyjCX3NHfTscnsBPAGYpY/Wxzh +T8UYnFu6RzkixElTf2rseEav7rkdKkI3LAeIZy7B0HulKKsmqVQ7 +-----END RSA PRIVATE KEY----- +` + Poem = `Fleas: +Adam +Had'em + +E.E. Cummings` +) diff --git a/pkg/lang/globalref/analyzer.go b/pkg/lang/globalref/analyzer.go new file mode 100644 index 00000000000..013c25158e1 --- /dev/null +++ b/pkg/lang/globalref/analyzer.go @@ -0,0 +1,73 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package globalref + +import ( + "fmt" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/providers" +) + +// Analyzer is the main component of this package, serving as a container for +// various state that the analysis algorithms depend on either for their core +// functionality or for producing results more quickly. +// +// Global reference analysis is currently intended only for "best effort" +// use-cases related to giving hints to the user or tailoring UI output. +// Avoid using it for anything that would cause changes to the analyzer being +// considered a breaking change under the v1 compatibility promises, because +// we expect to continue to refine and evolve these rules over time in ways +// that may cause us to detect either more or fewer references than today. +// Typically we will conservatively return more references than would be +// necessary dynamically, but that isn't guaranteed for all situations. +// +// In particular, we currently typically don't distinguish between multiple +// instances of the same module, and so we overgeneralize references from +// one instance of a module as references from the same location in all +// instances of that module. We may make this more precise in future, which +// would then remove various detected references from the analysis results. +// +// Each Analyzer works with a particular configs.Config object which it assumes +// represents the root module of a configuration. Config objects are typically +// immutable by convention anyway, but it's particularly important not to +// modify a configuration while it's attached to a live Analyzer, because +// the Analyzer contains caches derived from data in the configuration tree. +type Analyzer struct { + cfg *configs.Config + providerSchemas map[addrs.Provider]providers.ProviderSchema +} + +// NewAnalyzer constructs a new analyzer bound to the given configuration and +// provider schemas. +// +// The given object must represent a root module, or this function will panic. +// +// The given provider schemas must cover at least all of the providers used +// in the given configuration. If not then analysis results will be silently +// incomplete for any decision that requires checking schema. +func NewAnalyzer(cfg *configs.Config, providerSchemas map[addrs.Provider]providers.ProviderSchema) *Analyzer { + if !cfg.Path.IsRoot() { + panic(fmt.Sprintf("constructing an Analyzer with non-root module %s", cfg.Path)) + } + + ret := &Analyzer{ + cfg: cfg, + providerSchemas: providerSchemas, + } + return ret +} + +// ModuleConfig retrieves a module configuration from the configuration the +// analyzer belongs to, or nil if there is no module with the given address. +func (a *Analyzer) ModuleConfig(addr addrs.ModuleInstance) *configs.Module { + modCfg := a.cfg.DescendentForInstance(addr) + if modCfg == nil { + return nil + } + return modCfg.Module +} diff --git a/pkg/lang/globalref/analyzer_contributing_resources.go b/pkg/lang/globalref/analyzer_contributing_resources.go new file mode 100644 index 00000000000..62fae366caf --- /dev/null +++ b/pkg/lang/globalref/analyzer_contributing_resources.go @@ -0,0 +1,135 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package globalref + +import ( + "sort" + + "github.com/kubegems/opentofu/pkg/addrs" +) + +// ContributingResources analyzes all of the given references and +// for each one tries to walk backwards through any named values to find all +// resources whose values contributed either directly or indirectly to any of +// them. +// +// This is a wrapper around ContributingResourceReferences which simplifies +// the result to only include distinct resource addresses, not full references. +// If the configuration includes several different references to different +// parts of a resource, ContributingResources will not preserve that detail. +func (a *Analyzer) ContributingResources(refs ...Reference) []addrs.AbsResource { + retRefs := a.ContributingResourceReferences(refs...) + if len(retRefs) == 0 { + return nil + } + + uniq := make(map[string]addrs.AbsResource, len(refs)) + for _, ref := range retRefs { + if addr, ok := resourceForAddr(ref.LocalRef.Subject); ok { + moduleAddr := ref.ModuleAddr() + absAddr := addr.Absolute(moduleAddr) + uniq[absAddr.String()] = absAddr + } + } + ret := make([]addrs.AbsResource, 0, len(uniq)) + for _, addr := range uniq { + ret = append(ret, addr) + } + sort.Slice(ret, func(i, j int) bool { + // We only have a sorting function for resource _instances_, but + // it'll do well enough if we just pretend we have no-key instances. + return ret[i].Instance(addrs.NoKey).Less(ret[j].Instance(addrs.NoKey)) + }) + return ret +} + +// ContributingResourceReferences analyzes all of the given references and +// for each one tries to walk backwards through any named values to find all +// references to resource attributes that contributed either directly or +// indirectly to any of them. +// +// This is a global operation that can be potentially quite expensive for +// complex configurations. +func (a *Analyzer) ContributingResourceReferences(refs ...Reference) []Reference { + // Our methodology here is to keep digging through MetaReferences + // until we've visited everything we encounter directly or indirectly, + // and keep track of any resources we find along the way. + + // We'll aggregate our result here, using the string representations of + // the resources as keys to avoid returning the same one more than once. + found := make(map[referenceAddrKey]Reference) + + // We might encounter the same object multiple times as we walk, + // but we won't learn anything more by traversing them again and so we'll + // just skip them instead. + visitedObjects := make(map[referenceAddrKey]struct{}) + + // A queue of objects we still need to visit. + // Note that if we find multiple references to the same object then we'll + // just arbitrary choose any one of them, because for our purposes here + // it's immaterial which reference we actually followed. + pendingObjects := make(map[referenceAddrKey]Reference) + + // Initial state: identify any directly-mentioned resources and + // queue up any named values we refer to. + for _, ref := range refs { + if _, ok := resourceForAddr(ref.LocalRef.Subject); ok { + found[ref.addrKey()] = ref + } + pendingObjects[ref.addrKey()] = ref + } + + for len(pendingObjects) > 0 { + // Note: we modify this map while we're iterating over it, which means + // that anything we add might be either visited within a later + // iteration of the inner loop or in a later iteration of the outer + // loop, but we get the correct result either way because we keep + // working until we've fully depleted the queue. + for key, ref := range pendingObjects { + delete(pendingObjects, key) + + // We do this _before_ the visit below just in case this is an + // invalid config with a self-referential local value, in which + // case we'll just silently ignore the self reference for our + // purposes here, and thus still eventually converge (albeit + // with an incomplete answer). + visitedObjects[key] = struct{}{} + + moreRefs := a.MetaReferences(ref) + for _, newRef := range moreRefs { + if _, ok := resourceForAddr(newRef.LocalRef.Subject); ok { + found[newRef.addrKey()] = newRef + } + + newKey := newRef.addrKey() + if _, visited := visitedObjects[newKey]; !visited { + pendingObjects[newKey] = newRef + } + } + } + } + + if len(found) == 0 { + return nil + } + + ret := make([]Reference, 0, len(found)) + for _, ref := range found { + ret = append(ret, ref) + } + return ret +} + +func resourceForAddr(addr addrs.Referenceable) (addrs.Resource, bool) { + switch addr := addr.(type) { + case addrs.Resource: + return addr, true + case addrs.ResourceInstance: + return addr.Resource, true + default: + return addrs.Resource{}, false + } +} diff --git a/pkg/lang/globalref/analyzer_contributing_resources_test.go b/pkg/lang/globalref/analyzer_contributing_resources_test.go new file mode 100644 index 00000000000..f695302151c --- /dev/null +++ b/pkg/lang/globalref/analyzer_contributing_resources_test.go @@ -0,0 +1,195 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package globalref + +import ( + "sort" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/kubegems/opentofu/pkg/addrs" +) + +func TestAnalyzerContributingResources(t *testing.T) { + azr := testAnalyzer(t, "contributing-resources") + + tests := map[string]struct { + StartRefs func() []Reference + WantAddrs []string + }{ + "root output 'network'": { + func() []Reference { + return azr.ReferencesFromOutputValue( + addrs.OutputValue{Name: "network"}.Absolute(addrs.RootModuleInstance), + ) + }, + []string{ + `data.test_thing.environment`, + `module.network.test_thing.subnet`, + `module.network.test_thing.vpc`, + }, + }, + "root output 'c10s_url'": { + func() []Reference { + return azr.ReferencesFromOutputValue( + addrs.OutputValue{Name: "c10s_url"}.Absolute(addrs.RootModuleInstance), + ) + }, + []string{ + `data.test_thing.environment`, + `module.compute.test_thing.load_balancer`, + `module.network.test_thing.subnet`, + `module.network.test_thing.vpc`, + + // NOTE: module.compute.test_thing.controller isn't here + // because we can see statically that the output value refers + // only to the "string" attribute of + // module.compute.test_thing.load_balancer , and so we + // don't consider references inside the "list" blocks. + }, + }, + "module.compute.test_thing.load_balancer": { + func() []Reference { + return azr.ReferencesFromResourceInstance( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "load_balancer", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance.Child("compute", addrs.NoKey)), + ) + }, + []string{ + `data.test_thing.environment`, + `module.compute.test_thing.controller`, + `module.network.test_thing.subnet`, + `module.network.test_thing.vpc`, + }, + }, + "data.test_thing.environment": { + func() []Reference { + return azr.ReferencesFromResourceInstance( + addrs.Resource{ + Mode: addrs.DataResourceMode, + Type: "test_thing", + Name: "environment", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + ) + }, + []string{ + // Nothing! This one only refers to an input variable. + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + startRefs := test.StartRefs() + addrs := azr.ContributingResources(startRefs...) + + want := test.WantAddrs + got := make([]string, len(addrs)) + for i, addr := range addrs { + got[i] = addr.String() + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong addresses\n%s", diff) + } + }) + } +} + +func TestAnalyzerContributingResourceAttrs(t *testing.T) { + azr := testAnalyzer(t, "contributing-resources") + + tests := map[string]struct { + StartRefs func() []Reference + WantAttrs []string + }{ + "root output 'network'": { + func() []Reference { + return azr.ReferencesFromOutputValue( + addrs.OutputValue{Name: "network"}.Absolute(addrs.RootModuleInstance), + ) + }, + []string{ + `data.test_thing.environment.any.base_cidr_block`, + `data.test_thing.environment.any.subnet_count`, + `module.network.test_thing.subnet`, + `module.network.test_thing.vpc.string`, + }, + }, + "root output 'c10s_url'": { + func() []Reference { + return azr.ReferencesFromOutputValue( + addrs.OutputValue{Name: "c10s_url"}.Absolute(addrs.RootModuleInstance), + ) + }, + []string{ + `data.test_thing.environment.any.base_cidr_block`, + `data.test_thing.environment.any.subnet_count`, + `module.compute.test_thing.load_balancer.string`, + `module.network.test_thing.subnet`, + `module.network.test_thing.vpc.string`, + }, + }, + "module.compute.test_thing.load_balancer": { + func() []Reference { + return azr.ReferencesFromResourceInstance( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "load_balancer", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance.Child("compute", addrs.NoKey)), + ) + }, + []string{ + `data.test_thing.environment.any.base_cidr_block`, + `data.test_thing.environment.any.subnet_count`, + `module.compute.test_thing.controller`, + `module.network.test_thing.subnet`, + `module.network.test_thing.vpc.string`, + }, + }, + "data.test_thing.environment": { + func() []Reference { + return azr.ReferencesFromResourceInstance( + addrs.Resource{ + Mode: addrs.DataResourceMode, + Type: "test_thing", + Name: "environment", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + ) + }, + []string{ + // Nothing! This one only refers to an input variable. + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + startRefs := test.StartRefs() + refs := azr.ContributingResourceReferences(startRefs...) + + want := test.WantAttrs + got := make([]string, len(refs)) + for i, ref := range refs { + resAttr, ok := ref.ResourceAttr() + if !ok { + t.Errorf("%s is not a resource attr reference", resAttr.DebugString()) + continue + } + got[i] = resAttr.DebugString() + } + + sort.Strings(got) + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong addresses\n%s", diff) + } + }) + } +} diff --git a/pkg/lang/globalref/analyzer_meta_references.go b/pkg/lang/globalref/analyzer_meta_references.go new file mode 100644 index 00000000000..975f5203330 --- /dev/null +++ b/pkg/lang/globalref/analyzer_meta_references.go @@ -0,0 +1,612 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package globalref + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/gocty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/lang" +) + +// MetaReferences inspects the configuration to find the references contained +// within the most specific object that the given address refers to. +// +// This finds only the direct references in that object, not any indirect +// references from those. This is a building block for some other Analyzer +// functions that can walk through multiple levels of reference. +// +// If the given reference refers to something that doesn't exist in the +// configuration we're analyzing then MetaReferences will return no +// meta-references at all, which is indistinguishable from an existing +// object that doesn't refer to anything. +func (a *Analyzer) MetaReferences(ref Reference) []Reference { + // This function is aiming to encapsulate the fact that a reference + // is actually quite a complex notion which includes both a specific + // object the reference is to, where each distinct object type has + // a very different representation in the configuration, and then + // also potentially an attribute or block within the definition of that + // object. Our goal is to make all of these different situations appear + // mostly the same to the caller, in that all of them can be reduced to + // a set of references regardless of which expression or expressions we + // derive those from. + + moduleAddr := ref.ModuleAddr() + remaining := ref.LocalRef.Remaining + + // Our first task then is to select an appropriate implementation based + // on which address type the reference refers to. + switch targetAddr := ref.LocalRef.Subject.(type) { + case addrs.InputVariable: + return a.metaReferencesInputVariable(moduleAddr, targetAddr, remaining) + case addrs.LocalValue: + return a.metaReferencesLocalValue(moduleAddr, targetAddr, remaining) + case addrs.ModuleCallInstanceOutput: + return a.metaReferencesOutputValue(moduleAddr, targetAddr, remaining) + case addrs.ModuleCallInstance: + return a.metaReferencesModuleCall(moduleAddr, targetAddr, remaining) + case addrs.ModuleCall: + // TODO: It isn't really correct to say that a reference to a module + // call is a reference to its no-key instance. Really what we want to + // say here is that it's a reference to _all_ instances, or to an + // instance with an unknown key, but we don't have any representation + // of that. For the moment it's pretty immaterial since most of our + // other analysis ignores instance keys anyway, but maybe we'll revisit + // this latter to distingish these two cases better. + return a.metaReferencesModuleCall(moduleAddr, targetAddr.Instance(addrs.NoKey), remaining) + case addrs.CountAttr, addrs.ForEachAttr: + if resourceAddr, ok := ref.ResourceInstance(); ok { + return a.metaReferencesCountOrEach(resourceAddr.ContainingResource()) + } + return nil + case addrs.ResourceInstance: + return a.metaReferencesResourceInstance(moduleAddr, targetAddr, remaining) + case addrs.Resource: + // TODO: It isn't really correct to say that a reference to a resource + // is a reference to its no-key instance. Really what we want to say + // here is that it's a reference to _all_ instances, or to an instance + // with an unknown key, but we don't have any representation of that. + // For the moment it's pretty immaterial since most of our other + // analysis ignores instance keys anyway, but maybe we'll revisit this + // latter to distingish these two cases better. + return a.metaReferencesResourceInstance(moduleAddr, targetAddr.Instance(addrs.NoKey), remaining) + default: + // For anything we don't explicitly support we'll just return no + // references. This includes the reference types that don't really + // refer to configuration objects at all, like "path.module", + // and so which cannot possibly generate any references. + return nil + } +} + +func (a *Analyzer) metaReferencesInputVariable(calleeAddr addrs.ModuleInstance, addr addrs.InputVariable, remain hcl.Traversal) []Reference { + if calleeAddr.IsRoot() { + // A root module variable definition can never refer to anything, + // because it conceptually exists outside of any module. + return nil + } + + callerAddr, callAddr := calleeAddr.Call() + + // We need to find the module call inside the caller module. + callerCfg := a.ModuleConfig(callerAddr) + if callerCfg == nil { + return nil + } + call := callerCfg.ModuleCalls[callAddr.Name] + if call == nil { + return nil + } + + // Now we need to look for an attribute matching the variable name inside + // the module block body. + body := call.Config + schema := &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + {Name: addr.Name}, + }, + } + // We don't check for errors here because we'll make a best effort to + // analyze whatever partial result HCL is able to extract. + content, _, _ := body.PartialContent(schema) + attr := content.Attributes[addr.Name] + if attr == nil { + return nil + } + refs, _ := lang.ReferencesInExpr(addrs.ParseRef, attr.Expr) + return absoluteRefs(callerAddr, refs) +} + +func (a *Analyzer) metaReferencesOutputValue(callerAddr addrs.ModuleInstance, addr addrs.ModuleCallInstanceOutput, remain hcl.Traversal) []Reference { + calleeAddr := callerAddr.Child(addr.Call.Call.Name, addr.Call.Key) + + // We need to find the output value declaration inside the callee module. + calleeCfg := a.ModuleConfig(calleeAddr) + if calleeCfg == nil { + return nil + } + + oc := calleeCfg.Outputs[addr.Name] + if oc == nil { + return nil + } + + // We don't check for errors here because we'll make a best effort to + // analyze whatever partial result HCL is able to extract. + refs, _ := lang.ReferencesInExpr(addrs.ParseRef, oc.Expr) + return absoluteRefs(calleeAddr, refs) +} + +func (a *Analyzer) metaReferencesLocalValue(moduleAddr addrs.ModuleInstance, addr addrs.LocalValue, remain hcl.Traversal) []Reference { + modCfg := a.ModuleConfig(moduleAddr) + if modCfg == nil { + return nil + } + + local := modCfg.Locals[addr.Name] + if local == nil { + return nil + } + + // We don't check for errors here because we'll make a best effort to + // analyze whatever partial result HCL is able to extract. + refs, _ := lang.ReferencesInExpr(addrs.ParseRef, local.Expr) + return absoluteRefs(moduleAddr, refs) +} + +func (a *Analyzer) metaReferencesModuleCall(callerAddr addrs.ModuleInstance, addr addrs.ModuleCallInstance, remain hcl.Traversal) []Reference { + calleeAddr := callerAddr.Child(addr.Call.Name, addr.Key) + + // What we're really doing here is just rolling up all of the references + // from all of this module's output values. + calleeCfg := a.ModuleConfig(calleeAddr) + if calleeCfg == nil { + return nil + } + + var ret []Reference + for name := range calleeCfg.Outputs { + outputAddr := addrs.ModuleCallInstanceOutput{ + Call: addr, + Name: name, + } + moreRefs := a.metaReferencesOutputValue(callerAddr, outputAddr, nil) + ret = append(ret, moreRefs...) + } + return ret +} + +func (a *Analyzer) metaReferencesCountOrEach(resourceAddr addrs.AbsResource) []Reference { + return a.ReferencesFromResourceRepetition(resourceAddr) +} + +func (a *Analyzer) metaReferencesResourceInstance(moduleAddr addrs.ModuleInstance, addr addrs.ResourceInstance, remain hcl.Traversal) []Reference { + modCfg := a.ModuleConfig(moduleAddr) + if modCfg == nil { + return nil + } + + rc := modCfg.ResourceByAddr(addr.Resource) + if rc == nil { + return nil + } + + // In valid cases we should have the schema for this resource type + // available. In invalid cases we might be dealing with partial information, + // and so the schema might be nil so we won't be able to return reference + // information for this particular situation. + providerSchema, ok := a.providerSchemas[rc.Provider] + if !ok { + return nil + } + + resourceTypeSchema, _ := providerSchema.SchemaForResourceAddr(addr.Resource) + if resourceTypeSchema == nil { + return nil + } + + // When analyzing the resource configuration to look for references, we'll + // make a best effort to narrow down to only a particular sub-portion of + // the configuration by following the remaining traversal steps. In the + // ideal case this will lead us to a specific expression, but as a + // compromise it might lead us to some nested blocks where at least we + // can limit our searching only to those. + bodies := []hcl.Body{rc.Config} + var exprs []hcl.Expression + schema := resourceTypeSchema + var steppingThrough *configschema.NestedBlock + var steppingThroughType string + nextStep := func(newBodies []hcl.Body, newExprs []hcl.Expression) { + // We append exprs but replace bodies because exprs represent extra + // expressions we collected on the path, such as dynamic block for_each, + // which can potentially contribute to the final evalcontext, but + // bodies never contribute any values themselves, and instead just + // narrow down where we're searching. + bodies = newBodies + exprs = append(exprs, newExprs...) + steppingThrough = nil + steppingThroughType = "" + // Caller must also update "schema" if necessary. + } + traverseInBlock := func(name string) ([]hcl.Body, []hcl.Expression) { + if attr := schema.Attributes[name]; attr != nil { + // When we reach a specific attribute we can't traverse any deeper, because attributes are the leaves of the schema. + schema = nil + return traverseAttr(bodies, name) + } else if blockType := schema.BlockTypes[name]; blockType != nil { + // We need to take a different action here depending on + // the nesting mode of the block type. Some require us + // to traverse in two steps in order to select a specific + // child block, while others we can just step through + // directly. + switch blockType.Nesting { + case configschema.NestingSingle, configschema.NestingGroup: + // There should be only zero or one blocks of this + // type, so we can traverse in only one step. + schema = &blockType.Block + return traverseNestedBlockSingle(bodies, name) + case configschema.NestingMap, configschema.NestingList, configschema.NestingSet: + steppingThrough = blockType + return bodies, exprs // Preserve current selections for the second step + default: + // The above should be exhaustive, but just in case + // we add something new in future we'll bail out + // here and conservatively return everything under + // the current traversal point. + schema = nil + return nil, nil + } + } + + // We'll get here if the given name isn't in the schema at all. If so, + // there's nothing else to be done here. + schema = nil + return nil, nil + } +Steps: + for _, step := range remain { + // If we filter out all of our bodies before we finish traversing then + // we know we won't find anything else, because all of our subsequent + // traversal steps won't have any bodies to search. + if len(bodies) == 0 { + return nil + } + // If we no longer have a schema then that suggests we've + // traversed as deep as what the schema covers (e.g. we reached + // a specific attribute) and so we'll stop early, assuming that + // any remaining steps are traversals into an attribute expression + // result. + if schema == nil { + break + } + + switch step := step.(type) { + + case hcl.TraverseAttr: + switch { + case steppingThrough != nil: + // If we're stepping through a NestingMap block then + // it's valid to use attribute syntax to select one of + // the blocks by its label. Other nesting types require + // TraverseIndex, so can never be valid. + if steppingThrough.Nesting != configschema.NestingMap { + nextStep(nil, nil) // bail out + continue + } + nextStep(traverseNestedBlockMap(bodies, steppingThroughType, step.Name)) + schema = &steppingThrough.Block + default: + nextStep(traverseInBlock(step.Name)) + if schema == nil { + // traverseInBlock determined that we've traversed as + // deep as we can with reference to schema, so we'll + // stop here and just process whatever's selected. + break Steps + } + } + case hcl.TraverseIndex: + switch { + case steppingThrough != nil: + switch steppingThrough.Nesting { + case configschema.NestingMap: + keyVal, err := convert.Convert(step.Key, cty.String) + if err != nil { // Invalid traversal, so can't have any refs + nextStep(nil, nil) // bail out + continue + } + nextStep(traverseNestedBlockMap(bodies, steppingThroughType, keyVal.AsString())) + schema = &steppingThrough.Block + case configschema.NestingList: + idxVal, err := convert.Convert(step.Key, cty.Number) + if err != nil { // Invalid traversal, so can't have any refs + nextStep(nil, nil) // bail out + continue + } + var idx int + err = gocty.FromCtyValue(idxVal, &idx) + if err != nil { // Invalid traversal, so can't have any refs + nextStep(nil, nil) // bail out + continue + } + nextStep(traverseNestedBlockList(bodies, steppingThroughType, idx)) + schema = &steppingThrough.Block + default: + // Note that NestingSet ends up in here because we don't + // actually allow traversing into set-backed block types, + // and so such a reference would be invalid. + nextStep(nil, nil) // bail out + continue + } + default: + // When indexing the contents of a block directly we always + // interpret the key as a string representing an attribute + // name. + nameVal, err := convert.Convert(step.Key, cty.String) + if err != nil { // Invalid traversal, so can't have any refs + nextStep(nil, nil) // bail out + continue + } + nextStep(traverseInBlock(nameVal.AsString())) + if schema == nil { + // traverseInBlock determined that we've traversed as + // deep as we can with reference to schema, so we'll + // stop here and just process whatever's selected. + break Steps + } + } + default: + // We shouldn't get here, because the above cases are exhaustive + // for all of the relative traversal types, but we'll be robust in + // case HCL adds more in future and just pretend the traversal + // ended a bit early if so. + break Steps + } + } + + if steppingThrough != nil { + // If we ended in the middle of "stepping through" then we'll conservatively + // use the bodies of _all_ nested blocks of the type we were stepping + // through, because the recipient of this value could refer to any + // of them dynamically. + var labelNames []string + if steppingThrough.Nesting == configschema.NestingMap { + labelNames = []string{"key"} + } + blocks := findBlocksInBodies(bodies, steppingThroughType, labelNames) + for _, block := range blocks { + bodies, exprs = blockParts(block) + } + } + + if len(bodies) == 0 && len(exprs) == 0 { + return nil + } + + var refs []*addrs.Reference + for _, expr := range exprs { + moreRefs, _ := lang.ReferencesInExpr(addrs.ParseRef, expr) + refs = append(refs, moreRefs...) + } + if schema != nil { + for _, body := range bodies { + moreRefs, _ := lang.ReferencesInBlock(addrs.ParseRef, body, schema) + refs = append(refs, moreRefs...) + } + } + return absoluteRefs(addr.Absolute(moduleAddr), refs) +} + +func traverseAttr(bodies []hcl.Body, name string) ([]hcl.Body, []hcl.Expression) { + if len(bodies) == 0 { + return nil, nil + } + schema := &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + {Name: name}, + }, + } + // We can find at most one expression per body, because attribute names + // are always unique within a body. + retExprs := make([]hcl.Expression, 0, len(bodies)) + for _, body := range bodies { + content, _, _ := body.PartialContent(schema) + if attr := content.Attributes[name]; attr != nil && attr.Expr != nil { + retExprs = append(retExprs, attr.Expr) + } + } + return nil, retExprs +} + +func traverseNestedBlockSingle(bodies []hcl.Body, typeName string) ([]hcl.Body, []hcl.Expression) { + if len(bodies) == 0 { + return nil, nil + } + + blocks := findBlocksInBodies(bodies, typeName, nil) + var retBodies []hcl.Body + var retExprs []hcl.Expression + for _, block := range blocks { + moreBodies, moreExprs := blockParts(block) + retBodies = append(retBodies, moreBodies...) + retExprs = append(retExprs, moreExprs...) + } + return retBodies, retExprs +} + +func traverseNestedBlockMap(bodies []hcl.Body, typeName string, key string) ([]hcl.Body, []hcl.Expression) { + if len(bodies) == 0 { + return nil, nil + } + + blocks := findBlocksInBodies(bodies, typeName, []string{"key"}) + var retBodies []hcl.Body + var retExprs []hcl.Expression + for _, block := range blocks { + switch block.Type { + case "dynamic": + // For dynamic blocks we allow the key to be chosen dynamically + // and so we'll just conservatively include all dynamic block + // bodies. However, we need to also look for references in some + // arguments of the dynamic block itself. + argExprs, contentBody := dynamicBlockParts(block.Body) + retExprs = append(retExprs, argExprs...) + if contentBody != nil { + retBodies = append(retBodies, contentBody) + } + case typeName: + if len(block.Labels) == 1 && block.Labels[0] == key && block.Body != nil { + retBodies = append(retBodies, block.Body) + } + } + } + return retBodies, retExprs +} + +func traverseNestedBlockList(bodies []hcl.Body, typeName string, idx int) ([]hcl.Body, []hcl.Expression) { + if len(bodies) == 0 { + return nil, nil + } + + schema := &hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + {Type: typeName, LabelNames: nil}, + {Type: "dynamic", LabelNames: []string{"type"}}, + }, + } + var retBodies []hcl.Body + var retExprs []hcl.Expression + for _, body := range bodies { + content, _, _ := body.PartialContent(schema) + blocks := content.Blocks + + // A tricky aspect of this scenario is that if there are any "dynamic" + // blocks then we can't statically predict how many concrete blocks they + // will generate, and so consequently we can't predict the indices of + // any statically-defined blocks that might appear after them. + firstDynamic := -1 // -1 means "no dynamic blocks" + for i, block := range blocks { + if block.Type == "dynamic" { + firstDynamic = i + break + } + } + + switch { + case firstDynamic >= 0 && idx >= firstDynamic: + // This is the unfortunate case where the selection could be + // any of the blocks from firstDynamic onwards, and so we + // need to conservatively include all of them in our result. + for _, block := range blocks[firstDynamic:] { + moreBodies, moreExprs := blockParts(block) + retBodies = append(retBodies, moreBodies...) + retExprs = append(retExprs, moreExprs...) + } + default: + // This is the happier case where we can select just a single + // static block based on idx. Note that this one is guaranteed + // to never be dynamic but we're using blockParts here just + // for consistency. + moreBodies, moreExprs := blockParts(blocks[idx]) + retBodies = append(retBodies, moreBodies...) + retExprs = append(retExprs, moreExprs...) + } + } + + return retBodies, retExprs +} + +func findBlocksInBodies(bodies []hcl.Body, typeName string, labelNames []string) []*hcl.Block { + // We need to look for both static blocks of the given type, and any + // dynamic blocks whose label gives the expected type name. + schema := &hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + {Type: typeName, LabelNames: labelNames}, + {Type: "dynamic", LabelNames: []string{"type"}}, + }, + } + var blocks []*hcl.Block + for _, body := range bodies { + // We ignore errors here because we'll just make a best effort to analyze + // whatever partial result HCL returns in that case. + content, _, _ := body.PartialContent(schema) + + for _, block := range content.Blocks { + switch block.Type { + case "dynamic": + if len(block.Labels) != 1 { // Invalid + continue + } + if block.Labels[0] == typeName { + blocks = append(blocks, block) + } + case typeName: + blocks = append(blocks, block) + } + } + } + + // NOTE: The caller still needs to check for dynamic vs. static in order + // to do further processing. The callers above all aim to encapsulate + // that. + return blocks +} + +func blockParts(block *hcl.Block) ([]hcl.Body, []hcl.Expression) { + switch block.Type { + case "dynamic": + exprs, contentBody := dynamicBlockParts(block.Body) + var bodies []hcl.Body + if contentBody != nil { + bodies = []hcl.Body{contentBody} + } + return bodies, exprs + default: + if block.Body == nil { + return nil, nil + } + return []hcl.Body{block.Body}, nil + } +} + +func dynamicBlockParts(body hcl.Body) ([]hcl.Expression, hcl.Body) { + if body == nil { + return nil, nil + } + + // This is a subset of the "dynamic" block schema defined by the HCL + // dynblock extension, covering only the two arguments that are allowed + // to be arbitrary expressions possibly referring elsewhere. + schema := &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + {Name: "for_each"}, + {Name: "labels"}, + }, + Blocks: []hcl.BlockHeaderSchema{ + {Type: "content"}, + }, + } + content, _, _ := body.PartialContent(schema) + var exprs []hcl.Expression + if len(content.Attributes) != 0 { + exprs = make([]hcl.Expression, 0, len(content.Attributes)) + } + for _, attr := range content.Attributes { + if attr.Expr != nil { + exprs = append(exprs, attr.Expr) + } + } + var contentBody hcl.Body + for _, block := range content.Blocks { + if block != nil && block.Type == "content" && block.Body != nil { + contentBody = block.Body + } + } + return exprs, contentBody +} diff --git a/pkg/lang/globalref/analyzer_meta_references_shortcuts.go b/pkg/lang/globalref/analyzer_meta_references_shortcuts.go new file mode 100644 index 00000000000..e8b6d2a8613 --- /dev/null +++ b/pkg/lang/globalref/analyzer_meta_references_shortcuts.go @@ -0,0 +1,92 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package globalref + +import ( + "fmt" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/lang" +) + +// ReferencesFromOutputValue returns all of the direct references from the +// value expression of the given output value. It doesn't include any indirect +// references. +func (a *Analyzer) ReferencesFromOutputValue(addr addrs.AbsOutputValue) []Reference { + mc := a.ModuleConfig(addr.Module) + if mc == nil { + return nil + } + oc := mc.Outputs[addr.OutputValue.Name] + if oc == nil { + return nil + } + refs, _ := lang.ReferencesInExpr(addrs.ParseRef, oc.Expr) + return absoluteRefs(addr.Module, refs) +} + +// ReferencesFromResourceInstance returns all of the direct references from the +// definition of the resource instance at the given address. It doesn't include +// any indirect references. +// +// The result doesn't directly include references from a "count" or "for_each" +// expression belonging to the associated resource, but it will include any +// references to count.index, each.key, or each.value that appear in the +// expressions which you can then, if you wish, resolve indirectly using +// Analyzer.MetaReferences. Alternatively, you can use +// Analyzer.ReferencesFromResourceRepetition to get that same result directly. +func (a *Analyzer) ReferencesFromResourceInstance(addr addrs.AbsResourceInstance) []Reference { + // Using MetaReferences for this is kinda overkill, since + // lang.ReferencesInBlock would be sufficient really, but + // this ensures we keep consistent in how we build the + // resulting absolute references and otherwise aside from + // some extra overhead this call boils down to a call to + // lang.ReferencesInBlock anyway. + fakeRef := Reference{ + ContainerAddr: addr.Module, + LocalRef: &addrs.Reference{ + Subject: addr.Resource, + }, + } + return a.MetaReferences(fakeRef) +} + +// ReferencesFromResourceRepetition returns the references from the given +// resource's for_each or count expression, or an empty set if the resource +// doesn't use repetition. +// +// This is a special-case sort of helper for use in situations where an +// expression might refer to count.index, each.key, or each.value, and thus +// we say that it depends indirectly on the repetition expression. +func (a *Analyzer) ReferencesFromResourceRepetition(addr addrs.AbsResource) []Reference { + modCfg := a.ModuleConfig(addr.Module) + if modCfg == nil { + return nil + } + rc := modCfg.ResourceByAddr(addr.Resource) + if rc == nil { + return nil + } + + // We're assuming here that resources can either have count or for_each, + // but never both, because that's a requirement enforced by the language + // decoder. But we'll assert it just to make sure we catch it if that + // changes for some reason. + if rc.ForEach != nil && rc.Count != nil { + panic(fmt.Sprintf("%s has both for_each and count", addr)) + } + + switch { + case rc.ForEach != nil: + refs, _ := lang.ReferencesInExpr(addrs.ParseRef, rc.ForEach) + return absoluteRefs(addr.Module, refs) + case rc.Count != nil: + refs, _ := lang.ReferencesInExpr(addrs.ParseRef, rc.Count) + return absoluteRefs(addr.Module, refs) + default: + return nil + } +} diff --git a/pkg/lang/globalref/analyzer_meta_references_test.go b/pkg/lang/globalref/analyzer_meta_references_test.go new file mode 100644 index 00000000000..9ecdf368b8d --- /dev/null +++ b/pkg/lang/globalref/analyzer_meta_references_test.go @@ -0,0 +1,175 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package globalref + +import ( + "sort" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/kubegems/opentofu/pkg/addrs" +) + +func TestAnalyzerMetaReferences(t *testing.T) { + tests := []struct { + InputContainer string + InputRef string + WantRefs []string + }{ + { + ``, + `local.a`, + nil, + }, + { + ``, + `local.single`, + []string{ + "::test_thing.single.id", + }, + }, + { + ``, + `test_thing.single`, + []string{ + "::local.a", + "::local.b", + }, + }, + { + ``, + `test_thing.single.string`, + []string{ + "::local.a", + }, + }, + { + ``, + `test_thing.for_each`, + []string{ + "::local.a", + "::test_thing.single.string", + }, + }, + { + ``, + `test_thing.for_each["whatever"]`, + []string{ + "::local.a", + "::test_thing.single.string", + }, + }, + { + ``, + `test_thing.for_each["whatever"].single`, + []string{ + "::test_thing.single.string", + }, + }, + { + ``, + `test_thing.for_each["whatever"].single.z`, + []string{ + "::test_thing.single.string", + }, + }, + { + ``, + `test_thing.count`, + []string{ + "::local.a", + }, + }, + { + ``, + `test_thing.count[0]`, + []string{ + "::local.a", + }, + }, + { + ``, + `module.single.a`, + []string{ + "module.single::test_thing.foo", + "module.single::var.a", + }, + }, + { + ``, + `module.for_each["whatever"].a`, + []string{ + `module.for_each["whatever"]::test_thing.foo`, + `module.for_each["whatever"]::var.a`, + }, + }, + { + ``, + `module.count[0].a`, + []string{ + `module.count[0]::test_thing.foo`, + `module.count[0]::var.a`, + }, + }, + { + `module.single`, + `var.a`, + []string{ + "::test_thing.single", + }, + }, + { + `module.single`, + `test_thing.foo`, + []string{ + "module.single::var.a", + }, + }, + } + + azr := testAnalyzer(t, "assorted") + + for _, test := range tests { + name := test.InputRef + if test.InputContainer != "" { + name = test.InputContainer + " " + test.InputRef + } + t.Run(name, func(t *testing.T) { + t.Logf("testing %s", name) + var containerAddr addrs.Targetable + containerAddr = addrs.RootModuleInstance + if test.InputContainer != "" { + moduleAddrTarget, diags := addrs.ParseTargetStr(test.InputContainer) + if diags.HasErrors() { + t.Fatalf("input module address is invalid: %s", diags.Err()) + } + containerAddr = moduleAddrTarget.Subject + } + + localRef, diags := addrs.ParseRefStr(test.InputRef) + if diags.HasErrors() { + t.Fatalf("input reference is invalid: %s", diags.Err()) + } + + ref := Reference{ + ContainerAddr: containerAddr, + LocalRef: localRef, + } + + refs := azr.MetaReferences(ref) + + want := test.WantRefs + var got []string + for _, ref := range refs { + got = append(got, ref.DebugString()) + } + sort.Strings(got) + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong references\n%s", diff) + } + }) + } +} diff --git a/pkg/lang/globalref/analyzer_test.go b/pkg/lang/globalref/analyzer_test.go new file mode 100644 index 00000000000..f5547e13fad --- /dev/null +++ b/pkg/lang/globalref/analyzer_test.go @@ -0,0 +1,109 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package globalref + +import ( + "context" + "path/filepath" + "testing" + + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configload" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/initwd" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/registry" +) + +func testAnalyzer(t *testing.T, fixtureName string) *Analyzer { + configDir := filepath.Join("testdata", fixtureName) + + loader, cleanup := configload.NewLoaderForTests(t) + defer cleanup() + + inst := initwd.NewModuleInstaller(loader.ModulesDir(), loader, registry.NewClient(nil, nil)) + _, instDiags := inst.InstallModules(context.Background(), configDir, "tests", true, false, initwd.ModuleInstallHooksImpl{}, configs.RootModuleCallForTesting()) + if instDiags.HasErrors() { + t.Fatalf("unexpected module installation errors: %s", instDiags.Err().Error()) + } + if err := loader.RefreshModules(); err != nil { + t.Fatalf("failed to refresh modules after install: %s", err) + } + + cfg, loadDiags := loader.LoadConfig(configDir, configs.RootModuleCallForTesting()) + if loadDiags.HasErrors() { + t.Fatalf("unexpected configuration errors: %s", loadDiags.Error()) + } + + resourceTypeSchema := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "string": {Type: cty.String, Optional: true}, + "number": {Type: cty.Number, Optional: true}, + "any": {Type: cty.DynamicPseudoType, Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "single": { + Nesting: configschema.NestingSingle, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "z": {Type: cty.String, Optional: true}, + }, + }, + }, + "group": { + Nesting: configschema.NestingGroup, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "z": {Type: cty.String, Optional: true}, + }, + }, + }, + "list": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "z": {Type: cty.String, Optional: true}, + }, + }, + }, + "map": { + Nesting: configschema.NestingMap, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "z": {Type: cty.String, Optional: true}, + }, + }, + }, + "set": { + Nesting: configschema.NestingSet, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "z": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + schemas := map[addrs.Provider]providers.ProviderSchema{ + addrs.MustParseProviderSourceString("hashicorp/test"): { + ResourceTypes: map[string]providers.Schema{ + "test_thing": { + Block: resourceTypeSchema, + }, + }, + DataSources: map[string]providers.Schema{ + "test_thing": { + Block: resourceTypeSchema, + }, + }, + }, + } + + return NewAnalyzer(cfg, schemas) +} diff --git a/pkg/lang/globalref/doc.go b/pkg/lang/globalref/doc.go new file mode 100644 index 00000000000..9aaab2130cf --- /dev/null +++ b/pkg/lang/globalref/doc.go @@ -0,0 +1,14 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package globalref is home to some analysis algorithms that aim to answer +// questions about references between objects and object attributes across +// an entire configuration. +// +// This is a different problem than references within a single module, which +// we handle using some relatively simpler functions in the "lang" package +// in the parent directory. The globalref algorithms are often implemented +// in terms of those module-local reference-checking functions. +package globalref diff --git a/pkg/lang/globalref/reference.go b/pkg/lang/globalref/reference.go new file mode 100644 index 00000000000..2d2a4b43052 --- /dev/null +++ b/pkg/lang/globalref/reference.go @@ -0,0 +1,205 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package globalref + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +// Reference combines an addrs.Reference with the address of the module +// instance or resource instance where it was found. +// +// Because of the design of the OpenTofu language, our main model of +// references only captures the module-local part of the reference and assumes +// that it's always clear from context which module a reference belongs to. +// That's not true for globalref because our whole purpose is to work across +// module boundaries, and so this package in particular has its own +// representation of references. +type Reference struct { + // ContainerAddr is always either addrs.ModuleInstance or + // addrs.AbsResourceInstance. The latter is required if LocalRef's + // subject is either an addrs.CountAddr or addrs.ForEachAddr, so + // we can know which resource's repetition expression it's + // referring to. + ContainerAddr addrs.Targetable + + // LocalRef is a reference that would be resolved in the context + // of the module instance or resource instance given in ContainerAddr. + LocalRef *addrs.Reference +} + +func absoluteRef(containerAddr addrs.Targetable, localRef *addrs.Reference) Reference { + ret := Reference{ + ContainerAddr: containerAddr, + LocalRef: localRef, + } + // For simplicity's sake, we always reduce the ContainerAddr to be + // just the module address unless it's a count.index, each.key, or + // each.value reference, because for anything else it's immaterial + // which resource it belongs to. + switch localRef.Subject.(type) { + case addrs.CountAttr, addrs.ForEachAttr: + // nothing to do + default: + ret.ContainerAddr = ret.ModuleAddr() + } + return ret +} + +func absoluteRefs(containerAddr addrs.Targetable, refs []*addrs.Reference) []Reference { + if len(refs) == 0 { + return nil + } + + ret := make([]Reference, len(refs)) + for i, ref := range refs { + ret[i] = absoluteRef(containerAddr, ref) + } + return ret +} + +// ModuleAddr returns the address of the module where the reference would +// be resolved. +// +// This is either ContainerAddr directly if it's already just a module +// instance, or the module instance part of it if it's a resource instance. +func (r Reference) ModuleAddr() addrs.ModuleInstance { + switch addr := r.ContainerAddr.(type) { + case addrs.ModuleInstance: + return addr + case addrs.AbsResourceInstance: + return addr.Module + default: + // NOTE: We're intentionally using only a subset of possible + // addrs.Targetable implementations here, so anything else + // is invalid. + panic(fmt.Sprintf("reference has invalid container address type %T", addr)) + } +} + +// ResourceInstance returns the address of the resource where the reference +// would be resolved, if there is one. +// +// Because not all references belong to resources, the extra boolean return +// value indicates whether the returned address is valid. +func (r Reference) ResourceInstance() (addrs.AbsResourceInstance, bool) { + switch container := r.ContainerAddr.(type) { + case addrs.ModuleInstance: + moduleInstance := container + + switch ref := r.LocalRef.Subject.(type) { + case addrs.Resource: + return ref.Instance(addrs.NoKey).Absolute(moduleInstance), true + case addrs.ResourceInstance: + return ref.Absolute(moduleInstance), true + } + + return addrs.AbsResourceInstance{}, false + + case addrs.AbsResourceInstance: + return container, true + default: + // NOTE: We're intentionally using only a subset of possible + // addrs.Targetable implementations here, so anything else + // is invalid. + panic(fmt.Sprintf("reference has invalid container address type %T", container)) + } +} + +// DebugString returns an internal (but still somewhat OpenTofu-language-like) +// compact string representation of the reciever, which isn't an address that +// any of our usual address parsers could accept but still captures the +// essence of what the reference represents. +// +// The DebugString result is not suitable for end-user-oriented messages. +// +// DebugString is also not suitable for use as a unique key for a reference, +// because it's ambiguous (between a no-key resource instance and a resource) +// and because it discards the source location information in the LocalRef. +func (r Reference) DebugString() string { + // As the doc comment insinuates, we don't have any real syntax for + // "absolute references": references are always local, and targets are + // always absolute but only include modules and resources. + return r.ContainerAddr.String() + "::" + r.LocalRef.DisplayString() +} + +// ResourceAttr converts the Reference value to a more specific ResourceAttr +// value. +// +// Because not all references belong to resources, the extra boolean return +// value indicates whether the returned address is valid. +func (r Reference) ResourceAttr() (ResourceAttr, bool) { + res, ok := r.ResourceInstance() + if !ok { + return ResourceAttr{}, ok + } + + traversal := r.LocalRef.Remaining + + path := make(cty.Path, len(traversal)) + for si, step := range traversal { + switch ts := step.(type) { + case hcl.TraverseRoot: + path[si] = cty.GetAttrStep{ + Name: ts.Name, + } + case hcl.TraverseAttr: + path[si] = cty.GetAttrStep{ + Name: ts.Name, + } + case hcl.TraverseIndex: + path[si] = cty.IndexStep{ + Key: ts.Key, + } + default: + panic(fmt.Sprintf("unsupported traversal step %#v", step)) + } + } + + return ResourceAttr{ + Resource: res, + Attr: path, + }, true +} + +// addrKey returns the referenceAddrKey value for the item that +// this reference refers to, discarding any source location information. +// +// See the referenceAddrKey doc comment for more information on what this +// is suitable for. +func (r Reference) addrKey() referenceAddrKey { + // This is a pretty arbitrary bunch of stuff. We include the type here + // just to differentiate between no-key resource instances and resources. + return referenceAddrKey(fmt.Sprintf("%s(%T)%s", r.ContainerAddr.String(), r.LocalRef.Subject, r.LocalRef.DisplayString())) +} + +// referenceAddrKey is a special string type which conventionally contains +// a unique string representation of the object that a reference refers to, +// although not of the reference itself because it ignores the information +// that would differentiate two different references to the same object. +// +// The actual content of a referenceAddrKey is arbitrary, for internal use +// only. and subject to change in future. We use a named type here only to +// make it easier to see when we're intentionally using strings to uniquely +// identify absolute reference addresses. +type referenceAddrKey string + +// ResourceAttr represents a global resource and attribute reference. +// This is a more specific form of the Reference type since it can only refer +// to a specific AbsResource and one of its attributes. +type ResourceAttr struct { + Resource addrs.AbsResourceInstance + Attr cty.Path +} + +func (r ResourceAttr) DebugString() string { + return r.Resource.String() + tfdiags.FormatCtyPath(r.Attr) +} diff --git a/pkg/lang/globalref/testdata/assorted/assorted-root.tf b/pkg/lang/globalref/testdata/assorted/assorted-root.tf new file mode 100644 index 00000000000..09f730eee45 --- /dev/null +++ b/pkg/lang/globalref/testdata/assorted/assorted-root.tf @@ -0,0 +1,47 @@ +locals { + a = "hello world" + b = 2 + single = test_thing.single.id +} + +resource "test_thing" "single" { + string = local.a + number = local.b + +} + +resource "test_thing" "for_each" { + for_each = {"q": local.a} + + string = local.a + + single { + z = test_thing.single.string + } +} + +resource "test_thing" "count" { + for_each = length(local.a) + + string = local.a +} + +module "single" { + source = "./child" + + a = test_thing.single +} + +module "for_each" { + source = "./child" + for_each = {"q": test_thing.single} + + a = test_thing.single +} + +module "count" { + source = "./child" + count = length(test_thing.single.string) + + a = test_thing.single +} diff --git a/pkg/lang/globalref/testdata/assorted/child/assorted-child.tf b/pkg/lang/globalref/testdata/assorted/child/assorted-child.tf new file mode 100644 index 00000000000..e722fe8e1d1 --- /dev/null +++ b/pkg/lang/globalref/testdata/assorted/child/assorted-child.tf @@ -0,0 +1,13 @@ +variable "a" { +} + +resource "test_thing" "foo" { + string = var.a +} + +output "a" { + value = { + a = var.a + foo = test_thing.foo + } +} diff --git a/pkg/lang/globalref/testdata/contributing-resources/compute/contributing-resources-compute.tf b/pkg/lang/globalref/testdata/contributing-resources/compute/contributing-resources-compute.tf new file mode 100644 index 00000000000..a88ec466f98 --- /dev/null +++ b/pkg/lang/globalref/testdata/contributing-resources/compute/contributing-resources-compute.tf @@ -0,0 +1,53 @@ +variable "network" { + type = object({ + vpc_id = string + subnet_ids = map(string) + }) +} + +resource "test_thing" "controller" { + for_each = var.network.subnet_ids + + string = each.value +} + +locals { + workers = flatten([ + for k, id in var.network_subnet_ids : [ + for n in range(3) : { + unique_key = "${k}:${n}" + subnet_id = n + } + ] + ]) + + controllers = test_thing.controller +} + +resource "test_thing" "worker" { + for_each = { for o in local.workers : o.unique_key => o.subnet_id } + + string = each.value + + dynamic "list" { + for_each = test_thing.controller + content { + z = list.value.string + } + } +} + +resource "test_thing" "load_balancer" { + string = var.network.vpc_id + + dynamic "list" { + for_each = local.controllers + content { + z = list.value.string + } + } +} + +output "compuneetees_api_url" { + value = test_thing.load_balancer.string +} diff --git a/pkg/lang/globalref/testdata/contributing-resources/contributing-resources-root.tf b/pkg/lang/globalref/testdata/contributing-resources/contributing-resources-root.tf new file mode 100644 index 00000000000..d6ec5c4815b --- /dev/null +++ b/pkg/lang/globalref/testdata/contributing-resources/contributing-resources-root.tf @@ -0,0 +1,28 @@ +variable "environment" { + type = string +} + +data "test_thing" "environment" { + string = var.environment +} + +module "network" { + source = "./network" + + base_cidr_block = data.test_thing.environment.any.base_cidr_block + subnet_count = data.test_thing.environment.any.subnet_count +} + +module "compute" { + source = "./compute" + + network = module.network +} + +output "network" { + value = module.network +} + +output "c10s_url" { + value = module.compute.compuneetees_api_url +} diff --git a/pkg/lang/globalref/testdata/contributing-resources/network/contributing-resources-network.tf b/pkg/lang/globalref/testdata/contributing-resources/network/contributing-resources-network.tf new file mode 100644 index 00000000000..3a4c9dc1d39 --- /dev/null +++ b/pkg/lang/globalref/testdata/contributing-resources/network/contributing-resources-network.tf @@ -0,0 +1,41 @@ +variable "base_cidr_block" { + type = string +} + +variable "subnet_count" { + type = number +} + +locals { + subnet_newbits = log(var.subnet_count, 2) + subnet_cidr_blocks = toset([ + for n in range(var.subnet_count) : cidrsubnet(var.base_cidr_block, local.subnet_newbits, n) + ]) +} + +resource "test_thing" "vpc" { + string = var.base_cidr_block +} + +resource "test_thing" "subnet" { + for_each = local.subnet_cidr_blocks + + string = test_thing.vpc.string + single { + z = each.value + } +} + +resource "test_thing" "route_table" { + for_each = local.subnet_cidr_blocks + + string = each.value +} + +output "vpc_id" { + value = test_thing.vpc.string +} + +output "subnet_ids" { + value = { for k, sn in test_thing.subnet : k => sn.string } +} diff --git a/pkg/lang/marks/marks.go b/pkg/lang/marks/marks.go new file mode 100644 index 00000000000..302f2891c43 --- /dev/null +++ b/pkg/lang/marks/marks.go @@ -0,0 +1,47 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package marks + +import ( + "github.com/zclconf/go-cty/cty" +) + +// valueMarks allow creating strictly typed values for use as cty.Value marks. +// Each distinct mark value must be a constant in this package whose value +// is a valueMark whose underlying string matches the name of the variable. +type valueMark string + +func (m valueMark) GoString() string { + return "marks." + string(m) +} + +// Has returns true if and only if the cty.Value has the given mark. +func Has(val cty.Value, mark valueMark) bool { + return val.HasMark(mark) +} + +// Contains returns true if the cty.Value or any any value within it contains +// the given mark. +func Contains(val cty.Value, mark valueMark) bool { + ret := false + cty.Walk(val, func(_ cty.Path, v cty.Value) (bool, error) { + if v.HasMark(mark) { + ret = true + return false, nil + } + return true, nil + }) + return ret +} + +// Sensitive indicates that this value is marked as sensitive in the context of +// OpenTofu. +const Sensitive = valueMark("Sensitive") + +// TypeType is used to indicate that the value contains a representation of +// another value's type. This is part of the implementation of the console-only +// `type` function. +const TypeType = valueMark("TypeType") diff --git a/pkg/lang/references.go b/pkg/lang/references.go new file mode 100644 index 00000000000..42839a4519d --- /dev/null +++ b/pkg/lang/references.go @@ -0,0 +1,122 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package lang + +import ( + "github.com/hashicorp/hcl/v2" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/lang/blocktoattr" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// References finds all of the references in the given set of traversals, +// returning diagnostics if any of the traversals cannot be interpreted as a +// reference. +// +// This function does not do any de-duplication of references, since references +// have source location information embedded in them and so any invalid +// references that are duplicated should have errors reported for each +// occurence. +// +// If the returned diagnostics contains errors then the result may be +// incomplete or invalid. Otherwise, the returned slice has one reference per +// given traversal, though it is not guaranteed that the references will +// appear in the same order as the given traversals. +func References(parseRef ParseRef, traversals []hcl.Traversal) ([]*addrs.Reference, tfdiags.Diagnostics) { + if len(traversals) == 0 { + return nil, nil + } + + var diags tfdiags.Diagnostics + refs := make([]*addrs.Reference, 0, len(traversals)) + + for _, traversal := range traversals { + ref, refDiags := parseRef(traversal) + diags = diags.Append(refDiags) + if ref == nil { + continue + } + refs = append(refs, ref) + } + + return refs, diags +} + +// ReferencesInBlock is a helper wrapper around References that first searches +// the given body for traversals, before converting those traversals to +// references. +// +// A block schema must be provided so that this function can determine where in +// the body variables are expected. +func ReferencesInBlock(parseRef ParseRef, body hcl.Body, schema *configschema.Block) ([]*addrs.Reference, tfdiags.Diagnostics) { + if body == nil { + return nil, nil + } + + // We use blocktoattr.ExpandedVariables instead of hcldec.Variables or + // dynblock.VariablesHCLDec here because when we evaluate a block we'll + // first apply the dynamic block extension and _then_ the blocktoattr + // transform, and so blocktoattr.ExpandedVariables takes into account + // both of those transforms when it analyzes the body to ensure we find + // all of the references as if they'd already moved into their final + // locations, even though we can't expand dynamic blocks yet until we + // already know which variables are required. + // + // The set of cases we want to detect here is covered by the tests for + // the plan graph builder in the main 'tofu' package, since it's + // in a better position to test this due to having mock providers etc + // available. + traversals := blocktoattr.ExpandedVariables(body, schema) + funcs := filterProviderFunctions(blocktoattr.ExpandedFunctions(body, schema)) + + return References(parseRef, append(traversals, funcs...)) +} + +// ReferencesInExpr is a helper wrapper around References that first searches +// the given expression for traversals, before converting those traversals +// to references. +func ReferencesInExpr(parseRef ParseRef, expr hcl.Expression) ([]*addrs.Reference, tfdiags.Diagnostics) { + if expr == nil { + return nil, nil + } + traversals := expr.Variables() + if fexpr, ok := expr.(hcl.ExpressionWithFunctions); ok { + funcs := filterProviderFunctions(fexpr.Functions()) + traversals = append(traversals, funcs...) + } + return References(parseRef, traversals) +} + +// ProviderFunctionsInExpr is a helper wrapper around References that searches for provider +// function traversals in an ExpressionWithFunctions, then converts the traversals into +// references +func ProviderFunctionsInExpr(parseRef ParseRef, expr hcl.Expression) ([]*addrs.Reference, tfdiags.Diagnostics) { + if expr == nil { + return nil, nil + } + if fexpr, ok := expr.(hcl.ExpressionWithFunctions); ok { + funcs := filterProviderFunctions(fexpr.Functions()) + return References(parseRef, funcs) + } + return nil, nil +} + +func filterProviderFunctions(funcs []hcl.Traversal) []hcl.Traversal { + pfuncs := make([]hcl.Traversal, 0, len(funcs)) + for _, fn := range funcs { + if len(fn) == 0 { + continue + } + if root, ok := fn[0].(hcl.TraverseRoot); ok { + if addrs.ParseFunction(root.Name).IsNamespace(addrs.FunctionNamespaceProvider) { + pfuncs = append(pfuncs, fn) + } + } + } + return pfuncs +} diff --git a/pkg/lang/scope.go b/pkg/lang/scope.go new file mode 100644 index 00000000000..6ed90db9100 --- /dev/null +++ b/pkg/lang/scope.go @@ -0,0 +1,85 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package lang + +import ( + "sync" + "time" + + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty/function" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/experiments" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +type ParseRef func(traversal hcl.Traversal) (*addrs.Reference, tfdiags.Diagnostics) + +// Scope is the main type in this package, allowing dynamic evaluation of +// blocks and expressions based on some contextual information that informs +// which variables and functions will be available. +type Scope struct { + // Data is used to resolve references in expressions. + Data Data + + // ParseRef is a function that the scope uses to extract references from + // a hcl.Traversal. This controls the type of references the scope currently + // supports. As an example, the testing scope can reference outputs directly + // while the main OpenTofu context scope can not. This means that this + // function for the testing scope will happily return outputs, while the + // main context scope would fail if a user attempts to reference an output. + ParseRef ParseRef + + // SelfAddr is the address that the "self" object should be an alias of, + // or nil if the "self" object should not be available at all. + SelfAddr addrs.Referenceable + + // SourceAddr is the address of the source item for the scope. This will + // affect any scoped resources that can be accessed from within this scope. + // + // If nil, access is assumed to be at the module level. So, in practice this + // only needs to be set for items that should be able to access something + // hidden in their own scope. + SourceAddr addrs.Referenceable + + // BaseDir is the base directory used by any interpolation functions that + // accept filesystem paths as arguments. + BaseDir string + + // PureOnly can be set to true to request that any non-pure functions + // produce unknown value results rather than actually executing. This is + // important during a plan phase to avoid generating results that could + // then differ during apply. + PureOnly bool + + funcsLock sync.Mutex + funcs map[string]function.Function + + // activeExperiments is an optional set of experiments that should be + // considered as active in the module that this scope will be used for. + // Callers can populate it by calling the SetActiveExperiments method. + activeExperiments experiments.Set + + // ConsoleMode can be set to true to request any console-only functions are + // included in this scope. + ConsoleMode bool + + // PlanTimestamp is a timestamp representing when the plan was made. It will + // either have been generated during this operation or read from the plan. + PlanTimestamp time.Time + + ProviderFunctions ProviderFunction +} + +type ProviderFunction func(addrs.ProviderFunction, tfdiags.SourceRange) (*function.Function, tfdiags.Diagnostics) + +// SetActiveExperiments allows a caller to declare that a set of experiments +// is active for the module that the receiving Scope belongs to, which might +// then cause the scope to activate some additional experimental behaviors. +func (s *Scope) SetActiveExperiments(active experiments.Set) { + s.activeExperiments = active +} diff --git a/pkg/lang/testdata/functions-test/hello.tmpl b/pkg/lang/testdata/functions-test/hello.tmpl new file mode 100644 index 00000000000..f112ef89919 --- /dev/null +++ b/pkg/lang/testdata/functions-test/hello.tmpl @@ -0,0 +1 @@ +Hello, ${name}! \ No newline at end of file diff --git a/pkg/lang/testdata/functions-test/hello.txt b/pkg/lang/testdata/functions-test/hello.txt new file mode 100644 index 00000000000..3462721fd4d --- /dev/null +++ b/pkg/lang/testdata/functions-test/hello.txt @@ -0,0 +1 @@ +hello! \ No newline at end of file diff --git a/pkg/lang/testdata/functions-test/subdirectory/hello.tmpl b/pkg/lang/testdata/functions-test/subdirectory/hello.tmpl new file mode 100644 index 00000000000..f112ef89919 --- /dev/null +++ b/pkg/lang/testdata/functions-test/subdirectory/hello.tmpl @@ -0,0 +1 @@ +Hello, ${name}! \ No newline at end of file diff --git a/pkg/lang/testdata/functions-test/subdirectory/hello.txt b/pkg/lang/testdata/functions-test/subdirectory/hello.txt new file mode 100644 index 00000000000..3462721fd4d --- /dev/null +++ b/pkg/lang/testdata/functions-test/subdirectory/hello.txt @@ -0,0 +1 @@ +hello! \ No newline at end of file diff --git a/pkg/lang/types/type_type.go b/pkg/lang/types/type_type.go new file mode 100644 index 00000000000..d5457e0173f --- /dev/null +++ b/pkg/lang/types/type_type.go @@ -0,0 +1,17 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package types + +import ( + "reflect" + + "github.com/zclconf/go-cty/cty" +) + +// TypeType is a capsule type used to represent a cty.Type as a cty.Value. This +// is used by the `type()` console function to smuggle cty.Type values to the +// REPL session, where it can be displayed to the user directly. +var TypeType = cty.Capsule("type", reflect.TypeOf(cty.Type{})) diff --git a/pkg/lang/types/types.go b/pkg/lang/types/types.go new file mode 100644 index 00000000000..e4377210522 --- /dev/null +++ b/pkg/lang/types/types.go @@ -0,0 +1,7 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package types contains non-standard cty types used only within OpenTofu. +package types diff --git a/pkg/legacy/helper/acctest/acctest.go b/pkg/legacy/helper/acctest/acctest.go new file mode 100644 index 00000000000..7beaf4f92cb --- /dev/null +++ b/pkg/legacy/helper/acctest/acctest.go @@ -0,0 +1,7 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package acctest contains for OpenTofu Acceptance Tests +package acctest diff --git a/pkg/legacy/helper/acctest/random.go b/pkg/legacy/helper/acctest/random.go new file mode 100644 index 00000000000..7bae86fa836 --- /dev/null +++ b/pkg/legacy/helper/acctest/random.go @@ -0,0 +1,181 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package acctest + +import ( + "bytes" + crand "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "math/big" + "math/rand" + "net" + "strings" + "time" + + "golang.org/x/crypto/ssh" + + "github.com/apparentlymart/go-cidr/cidr" +) + +func init() { + rand.Seed(time.Now().UTC().UnixNano()) +} + +// Helpers for generating random tidbits for use in identifiers to prevent +// collisions in acceptance tests. + +// RandInt generates a random integer +func RandInt() int { + return rand.New(rand.NewSource(time.Now().UnixNano())).Int() +} + +// RandomWithPrefix is used to generate a unique name with a prefix, for +// randomizing names in acceptance tests +func RandomWithPrefix(name string) string { + return fmt.Sprintf("%s-%d", name, rand.New(rand.NewSource(time.Now().UnixNano())).Int()) +} + +func RandIntRange(min int, max int) int { + source := rand.New(rand.NewSource(time.Now().UnixNano())) + rangeMax := max - min + + return int(source.Int31n(int32(rangeMax))) +} + +// RandString generates a random alphanumeric string of the length specified +func RandString(strlen int) string { + return RandStringFromCharSet(strlen, CharSetAlphaNum) +} + +// RandStringFromCharSet generates a random string by selecting characters from +// the charset provided +func RandStringFromCharSet(strlen int, charSet string) string { + result := make([]byte, strlen) + for i := 0; i < strlen; i++ { + result[i] = charSet[rand.Intn(len(charSet))] + } + return string(result) +} + +// RandSSHKeyPair generates a public and private SSH key pair. The public key is +// returned in OpenSSH format, and the private key is PEM encoded. +func RandSSHKeyPair(comment string) (string, string, error) { + privateKey, privateKeyPEM, err := genPrivateKey() + if err != nil { + return "", "", err + } + + publicKey, err := ssh.NewPublicKey(&privateKey.PublicKey) + if err != nil { + return "", "", err + } + keyMaterial := strings.TrimSpace(string(ssh.MarshalAuthorizedKey(publicKey))) + return fmt.Sprintf("%s %s", keyMaterial, comment), privateKeyPEM, nil +} + +// RandTLSCert generates a self-signed TLS certificate with a newly created +// private key, and returns both the cert and the private key PEM encoded. +func RandTLSCert(orgName string) (string, string, error) { + template := &x509.Certificate{ + SerialNumber: big.NewInt(int64(RandInt())), + Subject: pkix.Name{ + Organization: []string{orgName}, + }, + NotBefore: time.Now(), + NotAfter: time.Now().Add(24 * time.Hour), + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + } + + privateKey, privateKeyPEM, err := genPrivateKey() + if err != nil { + return "", "", err + } + + cert, err := x509.CreateCertificate(crand.Reader, template, template, &privateKey.PublicKey, privateKey) + if err != nil { + return "", "", err + } + + certPEM, err := pemEncode(cert, "CERTIFICATE") + if err != nil { + return "", "", err + } + + return certPEM, privateKeyPEM, nil +} + +// RandIpAddress returns a random IP address in the specified CIDR block. +// The prefix length must be less than 31. +func RandIpAddress(s string) (string, error) { + _, network, err := net.ParseCIDR(s) + if err != nil { + return "", err + } + + firstIp, lastIp := cidr.AddressRange(network) + first := &big.Int{} + first.SetBytes([]byte(firstIp)) + last := &big.Int{} + last.SetBytes([]byte(lastIp)) + r := &big.Int{} + r.Sub(last, first) + if len := r.BitLen(); len > 31 { + return "", fmt.Errorf("CIDR range is too large: %d", len) + } + + max := int(r.Int64()) + if max == 0 { + // panic: invalid argument to Int31n + return firstIp.String(), nil + } + + host, err := cidr.Host(network, RandIntRange(0, max)) + if err != nil { + return "", err + } + + return host.String(), nil +} + +func genPrivateKey() (*rsa.PrivateKey, string, error) { + privateKey, err := rsa.GenerateKey(crand.Reader, 1024) + if err != nil { + return nil, "", err + } + + privateKeyPEM, err := pemEncode(x509.MarshalPKCS1PrivateKey(privateKey), "RSA PRIVATE KEY") + if err != nil { + return nil, "", err + } + + return privateKey, privateKeyPEM, nil +} + +func pemEncode(b []byte, block string) (string, error) { + var buf bytes.Buffer + pb := &pem.Block{Type: block, Bytes: b} + if err := pem.Encode(&buf, pb); err != nil { + return "", err + } + + return buf.String(), nil +} + +const ( + // CharSetAlphaNum is the alphanumeric character set for use with + // RandStringFromCharSet + CharSetAlphaNum = "abcdefghijklmnopqrstuvwxyz012346789" + + // CharSetAlpha is the alphabetical character set for use with + // RandStringFromCharSet + CharSetAlpha = "abcdefghijklmnopqrstuvwxyz" +) diff --git a/pkg/legacy/helper/acctest/random_test.go b/pkg/legacy/helper/acctest/random_test.go new file mode 100644 index 00000000000..922e384d169 --- /dev/null +++ b/pkg/legacy/helper/acctest/random_test.go @@ -0,0 +1,63 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package acctest + +import ( + "regexp" + "testing" +) + +func TestRandIpAddress(t *testing.T) { + testCases := []struct { + s string + expected *regexp.Regexp + expectedErr string + }{ + { + s: "1.1.1.1/32", + expected: regexp.MustCompile(`^1\.1\.1\.1$`), + }, + { + s: "10.0.0.0/8", + expected: regexp.MustCompile(`^10\.\d{1,3}\.\d{1,3}\.\d{1,3}$`), + }, + { + s: "0.0.0.0/0", + expectedErr: "CIDR range is too large: 32", + }, + { + s: "449d:e5f1:14b1:ddf3:8525:7e9e:4a0d:4a82/128", + expected: regexp.MustCompile(`^449d:e5f1:14b1:ddf3:8525:7e9e:4a0d:4a82$`), + }, + { + s: "2001:db8::/112", + expected: regexp.MustCompile(`^2001:db8::[[:xdigit:]]{1,4}$`), + }, + { + s: "2001:db8::/64", + expectedErr: "CIDR range is too large: 64", + }, + { + s: "abcdefg", + expectedErr: "invalid CIDR address: abcdefg", + }, + } + + for i, tc := range testCases { + v, err := RandIpAddress(tc.s) + if err != nil { + msg := err.Error() + if tc.expectedErr == "" { + t.Fatalf("expected test case %d to succeed but got error %q, ", i, msg) + } + if msg != tc.expectedErr { + t.Fatalf("expected test case %d to fail with %q but got %q", i, tc.expectedErr, msg) + } + } else if !tc.expected.MatchString(v) { + t.Fatalf("expected test case %d to return %q but got %q", i, tc.expected, v) + } + } +} diff --git a/pkg/legacy/helper/acctest/remotetests.go b/pkg/legacy/helper/acctest/remotetests.go new file mode 100644 index 00000000000..d662ff375e3 --- /dev/null +++ b/pkg/legacy/helper/acctest/remotetests.go @@ -0,0 +1,32 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package acctest + +import ( + "net/http" + "os" + "testing" +) + +// SkipRemoteTestsEnvVar is an environment variable that can be set by a user +// running the tests in an environment with limited network connectivity. By +// default, tests requiring internet connectivity make an effort to skip if no +// internet is available, but in some cases the smoke test will pass even +// though the test should still be skipped. +const SkipRemoteTestsEnvVar = "TF_SKIP_REMOTE_TESTS" + +// RemoteTestPrecheck is meant to be run by any unit test that requires +// outbound internet connectivity. The test will be skipped if it's +// unavailable. +func RemoteTestPrecheck(t *testing.T) { + if os.Getenv(SkipRemoteTestsEnvVar) != "" { + t.Skipf("skipping test, %s was set", SkipRemoteTestsEnvVar) + } + + if _, err := http.Get("http://google.com"); err != nil { + t.Skipf("skipping, internet seems to not be available: %s", err) + } +} diff --git a/pkg/legacy/helper/hashcode/hashcode.go b/pkg/legacy/helper/hashcode/hashcode.go new file mode 100644 index 00000000000..9336dd6354d --- /dev/null +++ b/pkg/legacy/helper/hashcode/hashcode.go @@ -0,0 +1,47 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package hashcode + +import ( + "bytes" + "fmt" + "hash/crc32" +) + +// String hashes a string to a unique hashcode. +// Returns a non-negative integer representing the hashcode of the string. +func String(s string) int { + // crc32 returns an uint32, so we need to massage it into an int. + crc := crc32.ChecksumIEEE([]byte(s)) + // We need to first squash the result to 32 bits, embracing the overflow + // to ensure that there is no difference between 32 and 64-bit + // platforms. + squashed := int32(crc) + // convert into a generic int that is sized as per the architecture + systemSized := int(squashed) + + // If the integer is negative, we return the absolute value of the + // integer. This is because we want to return a non-negative integer + if systemSized >= 0 { + return systemSized + } + if -systemSized >= 0 { + return -systemSized + } + // systemSized == MinInt + return 0 +} + +// Strings hashes a list of strings to a unique hashcode. +func Strings(strings []string) string { + var buf bytes.Buffer + + for _, s := range strings { + buf.WriteString(fmt.Sprintf("%s-", s)) + } + + return fmt.Sprintf("%d", String(buf.String())) +} diff --git a/pkg/legacy/helper/hashcode/hashcode_test.go b/pkg/legacy/helper/hashcode/hashcode_test.go new file mode 100644 index 00000000000..8834a3f6348 --- /dev/null +++ b/pkg/legacy/helper/hashcode/hashcode_test.go @@ -0,0 +1,42 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package hashcode + +import ( + "testing" +) + +func TestString(t *testing.T) { + v := "hello, world" + expected := String(v) + for i := 0; i < 100; i++ { + actual := String(v) + if actual != expected { + t.Fatalf("bad: %#v\n\t%#v", actual, expected) + } + } +} + +func TestStrings(t *testing.T) { + v := []string{"hello", ",", "world"} + expected := Strings(v) + for i := 0; i < 100; i++ { + actual := Strings(v) + if actual != expected { + t.Fatalf("bad: %#v\n\t%#v", actual, expected) + } + } +} + +func TestString_positiveIndex(t *testing.T) { + // "2338615298" hashes to uint32(2147483648) which is math.MinInt32 + ips := []string{"192.168.1.3", "192.168.1.5", "2338615298"} + for _, ip := range ips { + if index := String(ip); index < 0 { + t.Fatalf("Bad Index %#v for ip %s", index, ip) + } + } +} diff --git a/pkg/legacy/helper/schema/backend.go b/pkg/legacy/helper/schema/backend.go new file mode 100644 index 00000000000..afe68dc1bf6 --- /dev/null +++ b/pkg/legacy/helper/schema/backend.go @@ -0,0 +1,205 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "context" + "fmt" + + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/configs/hcl2shim" + "github.com/kubegems/opentofu/pkg/legacy/tofu" + ctyconvert "github.com/zclconf/go-cty/cty/convert" +) + +// Backend represents a partial backend.Backend implementation and simplifies +// the creation of configuration loading and validation. +// +// Unlike other schema structs such as Provider, this struct is meant to be +// embedded within your actual implementation. It provides implementations +// only for Input and Configure and gives you a method for accessing the +// configuration in the form of a ResourceData that you're expected to call +// from the other implementation funcs. +type Backend struct { + // Schema is the schema for the configuration of this backend. If this + // Backend has no configuration this can be omitted. + Schema map[string]*Schema + + // ConfigureFunc is called to configure the backend. Use the + // FromContext* methods to extract information from the context. + // This can be nil, in which case nothing will be called but the + // config will still be stored. + ConfigureFunc func(context.Context) error + + config *ResourceData +} + +var ( + backendConfigKey = contextKey("backend config") +) + +// FromContextBackendConfig extracts a ResourceData with the configuration +// from the context. This should only be called by Backend functions. +func FromContextBackendConfig(ctx context.Context) *ResourceData { + return ctx.Value(backendConfigKey).(*ResourceData) +} + +func (b *Backend) ConfigSchema() *configschema.Block { + // This is an alias of CoreConfigSchema just to implement the + // backend.Backend interface. + return b.CoreConfigSchema() +} + +func (b *Backend) PrepareConfig(configVal cty.Value) (cty.Value, tfdiags.Diagnostics) { + if b == nil { + return configVal, nil + } + var diags tfdiags.Diagnostics + var err error + + // In order to use Transform below, this needs to be filled out completely + // according the schema. + configVal, err = b.CoreConfigSchema().CoerceValue(configVal) + if err != nil { + return configVal, diags.Append(err) + } + + // lookup any required, top-level attributes that are Null, and see if we + // have a Default value available. + configVal, err = cty.Transform(configVal, func(path cty.Path, val cty.Value) (cty.Value, error) { + // we're only looking for top-level attributes + if len(path) != 1 { + return val, nil + } + + // nothing to do if we already have a value + if !val.IsNull() { + return val, nil + } + + // get the Schema definition for this attribute + getAttr, ok := path[0].(cty.GetAttrStep) + // these should all exist, but just ignore anything strange + if !ok { + return val, nil + } + + attrSchema := b.Schema[getAttr.Name] + // continue to ignore anything that doesn't match + if attrSchema == nil { + return val, nil + } + + // this is deprecated, so don't set it + if attrSchema.Deprecated != "" || attrSchema.Removed != "" { + return val, nil + } + + // find a default value if it exists + def, err := attrSchema.DefaultValue() + if err != nil { + diags = diags.Append(fmt.Errorf("error getting default for %q: %w", getAttr.Name, err)) + return val, err + } + + // no default + if def == nil { + return val, nil + } + + // create a cty.Value and make sure it's the correct type + tmpVal := hcl2shim.HCL2ValueFromConfigValue(def) + + // helper/schema used to allow setting "" to a bool + if val.Type() == cty.Bool && tmpVal.RawEquals(cty.StringVal("")) { + // return a warning about the conversion + diags = diags.Append("provider set empty string as default value for bool " + getAttr.Name) + tmpVal = cty.False + } + + val, err = ctyconvert.Convert(tmpVal, val.Type()) + if err != nil { + diags = diags.Append(fmt.Errorf("error setting default for %q: %w", getAttr.Name, err)) + } + + return val, err + }) + if err != nil { + // any error here was already added to the diagnostics + return configVal, diags + } + + shimRC := b.shimConfig(configVal) + warns, errs := schemaMap(b.Schema).Validate(shimRC) + for _, warn := range warns { + diags = diags.Append(tfdiags.SimpleWarning(warn)) + } + for _, err := range errs { + diags = diags.Append(err) + } + return configVal, diags +} + +func (b *Backend) Configure(obj cty.Value) tfdiags.Diagnostics { + if b == nil { + return nil + } + + var diags tfdiags.Diagnostics + sm := schemaMap(b.Schema) + shimRC := b.shimConfig(obj) + + // Get a ResourceData for this configuration. To do this, we actually + // generate an intermediary "diff" although that is never exposed. + diff, err := sm.Diff(nil, shimRC, nil, nil, true) + if err != nil { + diags = diags.Append(err) + return diags + } + + data, err := sm.Data(nil, diff) + if err != nil { + diags = diags.Append(err) + return diags + } + b.config = data + + if b.ConfigureFunc != nil { + err = b.ConfigureFunc(context.WithValue( + context.Background(), backendConfigKey, data)) + if err != nil { + diags = diags.Append(err) + return diags + } + } + + return diags +} + +// shimConfig turns a new-style cty.Value configuration (which must be of +// an object type) into a minimal old-style *tofu.ResourceConfig object +// that should be populated enough to appease the not-yet-updated functionality +// in this package. This should be removed once everything is updated. +func (b *Backend) shimConfig(obj cty.Value) *tofu.ResourceConfig { + shimMap, ok := hcl2shim.ConfigValueFromHCL2(obj).(map[string]interface{}) + if !ok { + // If the configVal was nil, we still want a non-nil map here. + shimMap = map[string]interface{}{} + } + return &tofu.ResourceConfig{ + Config: shimMap, + Raw: shimMap, + } +} + +// Config returns the configuration. This is available after Configure is +// called. +func (b *Backend) Config() *ResourceData { + return b.config +} diff --git a/pkg/legacy/helper/schema/backend_test.go b/pkg/legacy/helper/schema/backend_test.go new file mode 100644 index 00000000000..19ec15765e6 --- /dev/null +++ b/pkg/legacy/helper/schema/backend_test.go @@ -0,0 +1,198 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "context" + "fmt" + "testing" + + "github.com/zclconf/go-cty/cty" +) + +func TestBackendPrepare(t *testing.T) { + cases := []struct { + Name string + B *Backend + Config map[string]cty.Value + Expect map[string]cty.Value + Err bool + }{ + { + "Basic required field", + &Backend{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Required: true, + Type: TypeString, + }, + }, + }, + map[string]cty.Value{}, + map[string]cty.Value{}, + true, + }, + + { + "Null config", + &Backend{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Required: true, + Type: TypeString, + }, + }, + }, + nil, + map[string]cty.Value{}, + true, + }, + + { + "Basic required field set", + &Backend{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Required: true, + Type: TypeString, + }, + }, + }, + map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }, + map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }, + false, + }, + + { + "unused default", + &Backend{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Optional: true, + Type: TypeString, + Default: "baz", + }, + }, + }, + map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }, + map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }, + false, + }, + + { + "default", + &Backend{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeString, + Optional: true, + Default: "baz", + }, + }, + }, + map[string]cty.Value{}, + map[string]cty.Value{ + "foo": cty.StringVal("baz"), + }, + false, + }, + + { + "default func", + &Backend{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeString, + Optional: true, + DefaultFunc: func() (interface{}, error) { + return "baz", nil + }, + }, + }, + }, + map[string]cty.Value{}, + map[string]cty.Value{ + "foo": cty.StringVal("baz"), + }, + false, + }, + } + + for i, tc := range cases { + t.Run(fmt.Sprintf("%d-%s", i, tc.Name), func(t *testing.T) { + cfgVal := cty.NullVal(cty.Object(map[string]cty.Type{})) + if tc.Config != nil { + cfgVal = cty.ObjectVal(tc.Config) + } + configVal, diags := tc.B.PrepareConfig(cfgVal) + if diags.HasErrors() != tc.Err { + for _, d := range diags { + t.Error(d.Description()) + } + } + + if tc.Err { + return + } + + expect := cty.ObjectVal(tc.Expect) + if !expect.RawEquals(configVal) { + t.Fatalf("\nexpected: %#v\ngot: %#v\n", expect, configVal) + } + }) + } +} + +func TestBackendConfigure(t *testing.T) { + cases := []struct { + Name string + B *Backend + Config map[string]cty.Value + Err bool + }{ + { + "Basic config", + &Backend{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + + ConfigureFunc: func(ctx context.Context) error { + d := FromContextBackendConfig(ctx) + if d.Get("foo").(int) != 42 { + return fmt.Errorf("bad config data") + } + + return nil + }, + }, + map[string]cty.Value{ + "foo": cty.NumberIntVal(42), + }, + false, + }, + } + + for i, tc := range cases { + t.Run(fmt.Sprintf("%d-%s", i, tc.Name), func(t *testing.T) { + diags := tc.B.Configure(cty.ObjectVal(tc.Config)) + if diags.HasErrors() != tc.Err { + t.Errorf("wrong number of diagnostics") + } + }) + } +} diff --git a/pkg/legacy/helper/schema/core_schema.go b/pkg/legacy/helper/schema/core_schema.go new file mode 100644 index 00000000000..17b0c691e64 --- /dev/null +++ b/pkg/legacy/helper/schema/core_schema.go @@ -0,0 +1,314 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "fmt" + + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +// The functions and methods in this file are concerned with the conversion +// of this package's schema model into the slightly-lower-level schema model +// used by OpenTofu core for configuration parsing. + +// CoreConfigSchema lowers the receiver to the schema model expected by +// OpenTofu core. +// +// This lower-level model has fewer features than the schema in this package, +// describing only the basic structure of configuration and state values we +// expect. The full schemaMap from this package is still required for full +// validation, handling of default values, etc. +// +// This method presumes a schema that passes InternalValidate, and so may +// panic or produce an invalid result if given an invalid schemaMap. +func (m schemaMap) CoreConfigSchema() *configschema.Block { + if len(m) == 0 { + // We return an actual (empty) object here, rather than a nil, + // because a nil result would mean that we don't have a schema at + // all, rather than that we have an empty one. + return &configschema.Block{} + } + + ret := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{}, + BlockTypes: map[string]*configschema.NestedBlock{}, + } + + for name, schema := range m { + if schema.Elem == nil { + ret.Attributes[name] = schema.coreConfigSchemaAttribute() + continue + } + if schema.Type == TypeMap { + // For TypeMap in particular, it isn't valid for Elem to be a + // *Resource (since that would be ambiguous in flatmap) and + // so Elem is treated as a TypeString schema if so. This matches + // how the field readers treat this situation, for compatibility + // with configurations targeting Terraform 0.11 and earlier. + if _, isResource := schema.Elem.(*Resource); isResource { + sch := *schema // shallow copy + sch.Elem = &Schema{ + Type: TypeString, + } + ret.Attributes[name] = sch.coreConfigSchemaAttribute() + continue + } + } + switch schema.ConfigMode { + case SchemaConfigModeAttr: + ret.Attributes[name] = schema.coreConfigSchemaAttribute() + case SchemaConfigModeBlock: + ret.BlockTypes[name] = schema.coreConfigSchemaBlock() + default: // SchemaConfigModeAuto, or any other invalid value + if schema.Computed && !schema.Optional { + // Computed-only schemas are always handled as attributes, + // because they never appear in configuration. + ret.Attributes[name] = schema.coreConfigSchemaAttribute() + continue + } + switch schema.Elem.(type) { + case *Schema, ValueType: + ret.Attributes[name] = schema.coreConfigSchemaAttribute() + case *Resource: + ret.BlockTypes[name] = schema.coreConfigSchemaBlock() + default: + // Should never happen for a valid schema + panic(fmt.Errorf("invalid Schema.Elem %#v; need *Schema or *Resource", schema.Elem)) + } + } + } + + return ret +} + +// coreConfigSchemaAttribute prepares a configschema.Attribute representation +// of a schema. This is appropriate only for primitives or collections whose +// Elem is an instance of Schema. Use coreConfigSchemaBlock for collections +// whose elem is a whole resource. +func (s *Schema) coreConfigSchemaAttribute() *configschema.Attribute { + // The Schema.DefaultFunc capability adds some extra weirdness here since + // it can be combined with "Required: true" to create a situation where + // required-ness is conditional. OpenTofu Core doesn't share this concept, + // so we must sniff for this possibility here and conditionally turn + // off the "Required" flag if it looks like the DefaultFunc is going + // to provide a value. + // This is not 100% true to the original interface of DefaultFunc but + // works well enough for the EnvDefaultFunc and MultiEnvDefaultFunc + // situations, which are the main cases we care about. + // + // Note that this also has a consequence for commands that return schema + // information for documentation purposes: running those for certain + // providers will produce different results depending on which environment + // variables are set. We accept that weirdness in order to keep this + // interface to core otherwise simple. + reqd := s.Required + opt := s.Optional + if reqd && s.DefaultFunc != nil { + v, err := s.DefaultFunc() + // We can't report errors from here, so we'll instead just force + // "Required" to false and let the provider try calling its + // DefaultFunc again during the validate step, where it can then + // return the error. + if err != nil || v != nil { + reqd = false + opt = true + } + } + + return &configschema.Attribute{ + Type: s.coreConfigSchemaType(), + Optional: opt, + Required: reqd, + Computed: s.Computed, + Sensitive: s.Sensitive, + Description: s.Description, + } +} + +// coreConfigSchemaBlock prepares a configschema.NestedBlock representation of +// a schema. This is appropriate only for collections whose Elem is an instance +// of Resource, and will panic otherwise. +func (s *Schema) coreConfigSchemaBlock() *configschema.NestedBlock { + ret := &configschema.NestedBlock{} + if nested := s.Elem.(*Resource).coreConfigSchema(); nested != nil { + ret.Block = *nested + } + switch s.Type { + case TypeList: + ret.Nesting = configschema.NestingList + case TypeSet: + ret.Nesting = configschema.NestingSet + case TypeMap: + ret.Nesting = configschema.NestingMap + default: + // Should never happen for a valid schema + panic(fmt.Errorf("invalid s.Type %s for s.Elem being resource", s.Type)) + } + + ret.MinItems = s.MinItems + ret.MaxItems = s.MaxItems + + if s.Required && s.MinItems == 0 { + // configschema doesn't have a "required" representation for nested + // blocks, but we can fake it by requiring at least one item. + ret.MinItems = 1 + } + if s.Optional && s.MinItems > 0 { + // Historically helper/schema would ignore MinItems if Optional were + // set, so we must mimic this behavior here to ensure that providers + // relying on that undocumented behavior can continue to operate as + // they did before. + ret.MinItems = 0 + } + if s.Computed && !s.Optional { + // MinItems/MaxItems are meaningless for computed nested blocks, since + // they are never set by the user anyway. This ensures that we'll never + // generate weird errors about them. + ret.MinItems = 0 + ret.MaxItems = 0 + } + + return ret +} + +// coreConfigSchemaType determines the core config schema type that corresponds +// to a particular schema's type. +func (s *Schema) coreConfigSchemaType() cty.Type { + switch s.Type { + case TypeString: + return cty.String + case TypeBool: + return cty.Bool + case TypeInt, TypeFloat: + // configschema doesn't distinguish int and float, so helper/schema + // will deal with this as an additional validation step after + // configuration has been parsed and decoded. + return cty.Number + case TypeList, TypeSet, TypeMap: + var elemType cty.Type + switch set := s.Elem.(type) { + case *Schema: + elemType = set.coreConfigSchemaType() + case ValueType: + // This represents a mistake in the provider code, but it's a + // common one so we'll just shim it. + elemType = (&Schema{Type: set}).coreConfigSchemaType() + case *Resource: + // By default we construct a NestedBlock in this case, but this + // behavior is selected either for computed-only schemas or + // when ConfigMode is explicitly SchemaConfigModeBlock. + // See schemaMap.CoreConfigSchema for the exact rules. + elemType = set.coreConfigSchema().ImpliedType() + default: + if set != nil { + // Should never happen for a valid schema + panic(fmt.Errorf("invalid Schema.Elem %#v; need *Schema or *Resource", s.Elem)) + } + // Some pre-existing schemas assume string as default, so we need + // to be compatible with them. + elemType = cty.String + } + switch s.Type { + case TypeList: + return cty.List(elemType) + case TypeSet: + return cty.Set(elemType) + case TypeMap: + return cty.Map(elemType) + default: + // can never get here in practice, due to the case we're inside + panic("invalid collection type") + } + default: + // should never happen for a valid schema + panic(fmt.Errorf("invalid Schema.Type %s", s.Type)) + } +} + +// CoreConfigSchema is a convenient shortcut for calling CoreConfigSchema on +// the resource's schema. CoreConfigSchema adds the implicitly required "id" +// attribute for top level resources if it doesn't exist. +func (r *Resource) CoreConfigSchema() *configschema.Block { + block := r.coreConfigSchema() + + if block.Attributes == nil { + block.Attributes = map[string]*configschema.Attribute{} + } + + // Add the implicitly required "id" field if it doesn't exist + if block.Attributes["id"] == nil { + block.Attributes["id"] = &configschema.Attribute{ + Type: cty.String, + Optional: true, + Computed: true, + } + } + + _, timeoutsAttr := block.Attributes[TimeoutsConfigKey] + _, timeoutsBlock := block.BlockTypes[TimeoutsConfigKey] + + // Insert configured timeout values into the schema, as long as the schema + // didn't define anything else by that name. + if r.Timeouts != nil && !timeoutsAttr && !timeoutsBlock { + timeouts := configschema.Block{ + Attributes: map[string]*configschema.Attribute{}, + } + + if r.Timeouts.Create != nil { + timeouts.Attributes[TimeoutCreate] = &configschema.Attribute{ + Type: cty.String, + Optional: true, + } + } + + if r.Timeouts.Read != nil { + timeouts.Attributes[TimeoutRead] = &configschema.Attribute{ + Type: cty.String, + Optional: true, + } + } + + if r.Timeouts.Update != nil { + timeouts.Attributes[TimeoutUpdate] = &configschema.Attribute{ + Type: cty.String, + Optional: true, + } + } + + if r.Timeouts.Delete != nil { + timeouts.Attributes[TimeoutDelete] = &configschema.Attribute{ + Type: cty.String, + Optional: true, + } + } + + if r.Timeouts.Default != nil { + timeouts.Attributes[TimeoutDefault] = &configschema.Attribute{ + Type: cty.String, + Optional: true, + } + } + + block.BlockTypes[TimeoutsConfigKey] = &configschema.NestedBlock{ + Nesting: configschema.NestingSingle, + Block: timeouts, + } + } + + return block +} + +func (r *Resource) coreConfigSchema() *configschema.Block { + return schemaMap(r.Schema).CoreConfigSchema() +} + +// CoreConfigSchema is a convenient shortcut for calling CoreConfigSchema +// on the backends's schema. +func (r *Backend) CoreConfigSchema() *configschema.Block { + return schemaMap(r.Schema).CoreConfigSchema() +} diff --git a/pkg/legacy/helper/schema/core_schema_test.go b/pkg/legacy/helper/schema/core_schema_test.go new file mode 100644 index 00000000000..ce3d388de76 --- /dev/null +++ b/pkg/legacy/helper/schema/core_schema_test.go @@ -0,0 +1,463 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/configs/configschema" +) + +// add the implicit "id" attribute for test resources +func testResource(block *configschema.Block) *configschema.Block { + if block.Attributes == nil { + block.Attributes = make(map[string]*configschema.Attribute) + } + + if block.BlockTypes == nil { + block.BlockTypes = make(map[string]*configschema.NestedBlock) + } + + if block.Attributes["id"] == nil { + block.Attributes["id"] = &configschema.Attribute{ + Type: cty.String, + Optional: true, + Computed: true, + } + } + return block +} + +func TestSchemaMapCoreConfigSchema(t *testing.T) { + tests := map[string]struct { + Schema map[string]*Schema + Want *configschema.Block + }{ + "empty": { + map[string]*Schema{}, + testResource(&configschema.Block{}), + }, + "primitives": { + map[string]*Schema{ + "int": { + Type: TypeInt, + Required: true, + Description: "foo bar baz", + }, + "float": { + Type: TypeFloat, + Optional: true, + }, + "bool": { + Type: TypeBool, + Computed: true, + }, + "string": { + Type: TypeString, + Optional: true, + Computed: true, + }, + }, + testResource(&configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "int": { + Type: cty.Number, + Required: true, + Description: "foo bar baz", + }, + "float": { + Type: cty.Number, + Optional: true, + }, + "bool": { + Type: cty.Bool, + Computed: true, + }, + "string": { + Type: cty.String, + Optional: true, + Computed: true, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{}, + }), + }, + "simple collections": { + map[string]*Schema{ + "list": { + Type: TypeList, + Required: true, + Elem: &Schema{ + Type: TypeInt, + }, + }, + "set": { + Type: TypeSet, + Optional: true, + Elem: &Schema{ + Type: TypeString, + }, + }, + "map": { + Type: TypeMap, + Optional: true, + Elem: &Schema{ + Type: TypeBool, + }, + }, + "map_default_type": { + Type: TypeMap, + Optional: true, + // Maps historically don't have elements because we + // assumed they would be strings, so this needs to work + // for pre-existing schemas. + }, + }, + testResource(&configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "list": { + Type: cty.List(cty.Number), + Required: true, + }, + "set": { + Type: cty.Set(cty.String), + Optional: true, + }, + "map": { + Type: cty.Map(cty.Bool), + Optional: true, + }, + "map_default_type": { + Type: cty.Map(cty.String), + Optional: true, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{}, + }), + }, + "incorrectly-specified collections": { + // Historically we tolerated setting a type directly as the Elem + // attribute, rather than a Schema object. This is common enough + // in existing provider code that we must support it as an alias + // for a schema object with the given type. + map[string]*Schema{ + "list": { + Type: TypeList, + Required: true, + Elem: TypeInt, + }, + "set": { + Type: TypeSet, + Optional: true, + Elem: TypeString, + }, + "map": { + Type: TypeMap, + Optional: true, + Elem: TypeBool, + }, + }, + testResource(&configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "list": { + Type: cty.List(cty.Number), + Required: true, + }, + "set": { + Type: cty.Set(cty.String), + Optional: true, + }, + "map": { + Type: cty.Map(cty.Bool), + Optional: true, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{}, + }), + }, + "sub-resource collections": { + map[string]*Schema{ + "list": { + Type: TypeList, + Required: true, + Elem: &Resource{ + Schema: map[string]*Schema{}, + }, + MinItems: 1, + MaxItems: 2, + }, + "set": { + Type: TypeSet, + Required: true, + Elem: &Resource{ + Schema: map[string]*Schema{}, + }, + }, + "map": { + Type: TypeMap, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{}, + }, + }, + }, + testResource(&configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + // This one becomes a string attribute because helper/schema + // doesn't actually support maps of resource. The given + // "Elem" is just ignored entirely here, which is important + // because that is also true of the helper/schema logic and + // existing providers rely on this being ignored for + // correct operation. + "map": { + Type: cty.Map(cty.String), + Optional: true, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "list": { + Nesting: configschema.NestingList, + Block: configschema.Block{}, + MinItems: 1, + MaxItems: 2, + }, + "set": { + Nesting: configschema.NestingSet, + Block: configschema.Block{}, + MinItems: 1, // because schema is Required + }, + }, + }), + }, + "sub-resource collections minitems+optional": { + // This particular case is an odd one where the provider gives + // conflicting information about whether a sub-resource is required, + // by marking it as optional but also requiring one item. + // Historically the optional-ness "won" here, and so we must + // honor that for compatibility with providers that relied on this + // undocumented interaction. + map[string]*Schema{ + "list": { + Type: TypeList, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{}, + }, + MinItems: 1, + MaxItems: 1, + }, + "set": { + Type: TypeSet, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{}, + }, + MinItems: 1, + MaxItems: 1, + }, + }, + testResource(&configschema.Block{ + Attributes: map[string]*configschema.Attribute{}, + BlockTypes: map[string]*configschema.NestedBlock{ + "list": { + Nesting: configschema.NestingList, + Block: configschema.Block{}, + MinItems: 0, + MaxItems: 1, + }, + "set": { + Nesting: configschema.NestingSet, + Block: configschema.Block{}, + MinItems: 0, + MaxItems: 1, + }, + }, + }), + }, + "sub-resource collections minitems+computed": { + map[string]*Schema{ + "list": { + Type: TypeList, + Computed: true, + Elem: &Resource{ + Schema: map[string]*Schema{}, + }, + MinItems: 1, + MaxItems: 1, + }, + "set": { + Type: TypeSet, + Computed: true, + Elem: &Resource{ + Schema: map[string]*Schema{}, + }, + MinItems: 1, + MaxItems: 1, + }, + }, + testResource(&configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "list": { + Type: cty.List(cty.EmptyObject), + Computed: true, + }, + "set": { + Type: cty.Set(cty.EmptyObject), + Computed: true, + }, + }, + }), + }, + "nested attributes and blocks": { + map[string]*Schema{ + "foo": { + Type: TypeList, + Required: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "bar": { + Type: TypeList, + Required: true, + Elem: &Schema{ + Type: TypeList, + Elem: &Schema{ + Type: TypeString, + }, + }, + }, + "baz": { + Type: TypeSet, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{}, + }, + }, + }, + }, + }, + }, + testResource(&configschema.Block{ + Attributes: map[string]*configschema.Attribute{}, + BlockTypes: map[string]*configschema.NestedBlock{ + "foo": &configschema.NestedBlock{ + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "bar": { + Type: cty.List(cty.List(cty.String)), + Required: true, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "baz": { + Nesting: configschema.NestingSet, + Block: configschema.Block{}, + }, + }, + }, + MinItems: 1, // because schema is Required + }, + }, + }), + }, + "sensitive": { + map[string]*Schema{ + "string": { + Type: TypeString, + Optional: true, + Sensitive: true, + }, + }, + testResource(&configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "string": { + Type: cty.String, + Optional: true, + Sensitive: true, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{}, + }), + }, + "conditionally required on": { + map[string]*Schema{ + "string": { + Type: TypeString, + Required: true, + DefaultFunc: func() (interface{}, error) { + return nil, nil + }, + }, + }, + testResource(&configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "string": { + Type: cty.String, + Required: true, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{}, + }), + }, + "conditionally required off": { + map[string]*Schema{ + "string": { + Type: TypeString, + Required: true, + DefaultFunc: func() (interface{}, error) { + // If we return a non-nil default then this overrides + // the "Required: true" for the purpose of building + // the core schema, so that core will ignore it not + // being set and let the provider handle it. + return "boop", nil + }, + }, + }, + testResource(&configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "string": { + Type: cty.String, + Optional: true, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{}, + }), + }, + "conditionally required error": { + map[string]*Schema{ + "string": { + Type: TypeString, + Required: true, + DefaultFunc: func() (interface{}, error) { + return nil, fmt.Errorf("placeholder error") + }, + }, + }, + testResource(&configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "string": { + Type: cty.String, + Optional: true, // Just so we can progress to provider-driven validation and return the error there + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{}, + }), + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + got := (&Resource{Schema: test.Schema}).CoreConfigSchema() + if !cmp.Equal(got, test.Want, equateEmpty, typeComparer) { + t.Error(cmp.Diff(got, test.Want, equateEmpty, typeComparer)) + } + }) + } +} diff --git a/pkg/legacy/helper/schema/data_source_resource_shim.go b/pkg/legacy/helper/schema/data_source_resource_shim.go new file mode 100644 index 00000000000..586afe039fb --- /dev/null +++ b/pkg/legacy/helper/schema/data_source_resource_shim.go @@ -0,0 +1,64 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "fmt" +) + +// DataSourceResourceShim takes a Resource instance describing a data source +// (with a Read implementation and a Schema, at least) and returns a new +// Resource instance with additional Create and Delete implementations that +// allow the data source to be used as a resource. +// +// This is a backward-compatibility layer for data sources that were formerly +// read-only resources before the data source concept was added. It should not +// be used for any *new* data sources. +// +// The Read function for the data source *must* call d.SetId with a non-empty +// id in order for this shim to function as expected. +// +// The provided Resource instance, and its schema, will be modified in-place +// to make it suitable for use as a full resource. +func DataSourceResourceShim(name string, dataSource *Resource) *Resource { + // Recursively, in-place adjust the schema so that it has ForceNew + // on any user-settable resource. + dataSourceResourceShimAdjustSchema(dataSource.Schema) + + dataSource.Create = CreateFunc(dataSource.Read) + dataSource.Delete = func(d *ResourceData, meta interface{}) error { + d.SetId("") + return nil + } + dataSource.Update = nil // should already be nil, but let's make sure + + // FIXME: Link to some further docs either on the website or in the + // changelog, once such a thing exists. + dataSource.DeprecationMessage = fmt.Sprintf( + "using %s as a resource is deprecated; consider using the data source instead", + name, + ) + + return dataSource +} + +func dataSourceResourceShimAdjustSchema(schema map[string]*Schema) { + for _, s := range schema { + // If the attribute is configurable then it must be ForceNew, + // since we have no Update implementation. + if s.Required || s.Optional { + s.ForceNew = true + } + + // If the attribute is a nested resource, we need to recursively + // apply these same adjustments to it. + if s.Elem != nil { + if r, ok := s.Elem.(*Resource); ok { + dataSourceResourceShimAdjustSchema(r.Schema) + } + } + } +} diff --git a/pkg/legacy/helper/schema/doc.go b/pkg/legacy/helper/schema/doc.go new file mode 100644 index 00000000000..80cf5cd0f6e --- /dev/null +++ b/pkg/legacy/helper/schema/doc.go @@ -0,0 +1,10 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package schema is a legacy package that used to represent the SDK, which is now its own +// library external to OpenTofu Core https://github.com/hashicorp/terraform-plugin-sdk +// Some of it is still used by OpenTofu's remote state backends, but this entire +// package should be removed in the future. +package schema diff --git a/pkg/legacy/helper/schema/equal.go b/pkg/legacy/helper/schema/equal.go new file mode 100644 index 00000000000..eaea311e5dd --- /dev/null +++ b/pkg/legacy/helper/schema/equal.go @@ -0,0 +1,11 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +// Equal is an interface that checks for deep equality between two objects. +type Equal interface { + Equal(interface{}) bool +} diff --git a/pkg/legacy/helper/schema/field_reader.go b/pkg/legacy/helper/schema/field_reader.go new file mode 100644 index 00000000000..8c66a578bc4 --- /dev/null +++ b/pkg/legacy/helper/schema/field_reader.go @@ -0,0 +1,348 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "fmt" + "strconv" + "strings" +) + +// FieldReaders are responsible for decoding fields out of data into +// the proper typed representation. ResourceData uses this to query data +// out of multiple sources: config, state, diffs, etc. +type FieldReader interface { + ReadField([]string) (FieldReadResult, error) +} + +// FieldReadResult encapsulates all the resulting data from reading +// a field. +type FieldReadResult struct { + // Value is the actual read value. NegValue is the _negative_ value + // or the items that should be removed (if they existed). NegValue + // doesn't make sense for primitives but is important for any + // container types such as maps, sets, lists. + Value interface{} + ValueProcessed interface{} + + // Exists is true if the field was found in the data. False means + // it wasn't found if there was no error. + Exists bool + + // Computed is true if the field was found but the value + // is computed. + Computed bool +} + +// ValueOrZero returns the value of this result or the zero value of the +// schema type, ensuring a consistent non-nil return value. +func (r *FieldReadResult) ValueOrZero(s *Schema) interface{} { + if r.Value != nil { + return r.Value + } + + return s.ZeroValue() +} + +// SchemasForFlatmapPath tries its best to find a sequence of schemas that +// the given dot-delimited attribute path traverses through. +func SchemasForFlatmapPath(path string, schemaMap map[string]*Schema) []*Schema { + parts := strings.Split(path, ".") + return addrToSchema(parts, schemaMap) +} + +// addrToSchema finds the final element schema for the given address +// and the given schema. It returns all the schemas that led to the final +// schema. These are in order of the address (out to in). +func addrToSchema(addr []string, schemaMap map[string]*Schema) []*Schema { + current := &Schema{ + Type: typeObject, + Elem: schemaMap, + } + + // If we aren't given an address, then the user is requesting the + // full object, so we return the special value which is the full object. + if len(addr) == 0 { + return []*Schema{current} + } + + result := make([]*Schema, 0, len(addr)) + for len(addr) > 0 { + k := addr[0] + addr = addr[1:] + + REPEAT: + // We want to trim off the first "typeObject" since its not a + // real lookup that people do. i.e. []string{"foo"} in a structure + // isn't {typeObject, typeString}, its just a {typeString}. + if len(result) > 0 || current.Type != typeObject { + result = append(result, current) + } + + switch t := current.Type; t { + case TypeBool, TypeInt, TypeFloat, TypeString: + if len(addr) > 0 { + return nil + } + case TypeList, TypeSet: + isIndex := len(addr) > 0 && addr[0] == "#" + + switch v := current.Elem.(type) { + case *Resource: + current = &Schema{ + Type: typeObject, + Elem: v.Schema, + } + case *Schema: + current = v + case ValueType: + current = &Schema{Type: v} + default: + // we may not know the Elem type and are just looking for the + // index + if isIndex { + break + } + + if len(addr) == 0 { + // we've processed the address, so return what we've + // collected + return result + } + + if len(addr) == 1 { + if _, err := strconv.Atoi(addr[0]); err == nil { + // we're indexing a value without a schema. This can + // happen if the list is nested in another schema type. + // Default to a TypeString like we do with a map + current = &Schema{Type: TypeString} + break + } + } + + return nil + } + + // If we only have one more thing and the next thing + // is a #, then we're accessing the index which is always + // an int. + if isIndex { + current = &Schema{Type: TypeInt} + break + } + + case TypeMap: + if len(addr) > 0 { + switch v := current.Elem.(type) { + case ValueType: + current = &Schema{Type: v} + case *Schema: + current, _ = current.Elem.(*Schema) + default: + // maps default to string values. This is all we can have + // if this is nested in another list or map. + current = &Schema{Type: TypeString} + } + } + case typeObject: + // If we're already in the object, then we want to handle Sets + // and Lists specially. Basically, their next key is the lookup + // key (the set value or the list element). For these scenarios, + // we just want to skip it and move to the next element if there + // is one. + if len(result) > 0 { + lastType := result[len(result)-2].Type + if lastType == TypeSet || lastType == TypeList { + if len(addr) == 0 { + break + } + + k = addr[0] + addr = addr[1:] + } + } + + m := current.Elem.(map[string]*Schema) + val, ok := m[k] + if !ok { + return nil + } + + current = val + goto REPEAT + } + } + + return result +} + +// readListField is a generic method for reading a list field out of a +// a FieldReader. It does this based on the assumption that there is a key +// "foo.#" for a list "foo" and that the indexes are "foo.0", "foo.1", etc. +// after that point. +func readListField( + r FieldReader, addr []string, schema *Schema) (FieldReadResult, error) { + addrPadded := make([]string, len(addr)+1) + copy(addrPadded, addr) + addrPadded[len(addrPadded)-1] = "#" + + // Get the number of elements in the list + countResult, err := r.ReadField(addrPadded) + if err != nil { + return FieldReadResult{}, err + } + if !countResult.Exists { + // No count, means we have no list + countResult.Value = 0 + } + + // If we have an empty list, then return an empty list + if countResult.Computed || countResult.Value.(int) == 0 { + return FieldReadResult{ + Value: []interface{}{}, + Exists: countResult.Exists, + Computed: countResult.Computed, + }, nil + } + + // Go through each count, and get the item value out of it + result := make([]interface{}, countResult.Value.(int)) + for i, _ := range result { + is := strconv.FormatInt(int64(i), 10) + addrPadded[len(addrPadded)-1] = is + rawResult, err := r.ReadField(addrPadded) + if err != nil { + return FieldReadResult{}, err + } + if !rawResult.Exists { + // This should never happen, because by the time the data + // gets to the FieldReaders, all the defaults should be set by + // Schema. + rawResult.Value = nil + } + + result[i] = rawResult.Value + } + + return FieldReadResult{ + Value: result, + Exists: true, + }, nil +} + +// readObjectField is a generic method for reading objects out of FieldReaders +// based on the assumption that building an address of []string{k, FIELD} +// will result in the proper field data. +func readObjectField( + r FieldReader, + addr []string, + schema map[string]*Schema) (FieldReadResult, error) { + result := make(map[string]interface{}) + exists := false + for field, s := range schema { + addrRead := make([]string, len(addr), len(addr)+1) + copy(addrRead, addr) + addrRead = append(addrRead, field) + rawResult, err := r.ReadField(addrRead) + if err != nil { + return FieldReadResult{}, err + } + if rawResult.Exists { + exists = true + } + + result[field] = rawResult.ValueOrZero(s) + } + + return FieldReadResult{ + Value: result, + Exists: exists, + }, nil +} + +// convert map values to the proper primitive type based on schema.Elem +func mapValuesToPrimitive(k string, m map[string]interface{}, schema *Schema) error { + elemType, err := getValueType(k, schema) + if err != nil { + return err + } + + switch elemType { + case TypeInt, TypeFloat, TypeBool: + for k, v := range m { + vs, ok := v.(string) + if !ok { + continue + } + + v, err := stringToPrimitive(vs, false, &Schema{Type: elemType}) + if err != nil { + return err + } + + m[k] = v + } + } + return nil +} + +func stringToPrimitive( + value string, computed bool, schema *Schema) (interface{}, error) { + var returnVal interface{} + switch schema.Type { + case TypeBool: + if value == "" { + returnVal = false + break + } + if computed { + break + } + + v, err := strconv.ParseBool(value) + if err != nil { + return nil, err + } + + returnVal = v + case TypeFloat: + if value == "" { + returnVal = 0.0 + break + } + if computed { + break + } + + v, err := strconv.ParseFloat(value, 64) + if err != nil { + return nil, err + } + + returnVal = v + case TypeInt: + if value == "" { + returnVal = 0 + break + } + if computed { + break + } + + v, err := strconv.ParseInt(value, 0, 0) + if err != nil { + return nil, err + } + + returnVal = int(v) + case TypeString: + returnVal = value + default: + panic(fmt.Sprintf("Unknown type: %s", schema.Type)) + } + + return returnVal, nil +} diff --git a/pkg/legacy/helper/schema/field_reader_config.go b/pkg/legacy/helper/schema/field_reader_config.go new file mode 100644 index 00000000000..9b8f1028a72 --- /dev/null +++ b/pkg/legacy/helper/schema/field_reader_config.go @@ -0,0 +1,358 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "fmt" + "log" + "strconv" + "strings" + "sync" + + "github.com/go-viper/mapstructure/v2" + "github.com/kubegems/opentofu/pkg/legacy/tofu" +) + +// ConfigFieldReader reads fields out of an untyped map[string]string to the +// best of its ability. It also applies defaults from the Schema. (The other +// field readers do not need default handling because they source fully +// populated data structures.) +type ConfigFieldReader struct { + Config *tofu.ResourceConfig + Schema map[string]*Schema + + indexMaps map[string]map[string]int + once sync.Once +} + +func (r *ConfigFieldReader) ReadField(address []string) (FieldReadResult, error) { + r.once.Do(func() { r.indexMaps = make(map[string]map[string]int) }) + return r.readField(address, false) +} + +func (r *ConfigFieldReader) readField( + address []string, nested bool) (FieldReadResult, error) { + schemaList := addrToSchema(address, r.Schema) + if len(schemaList) == 0 { + return FieldReadResult{}, nil + } + + if !nested { + // If we have a set anywhere in the address, then we need to + // read that set out in order and actually replace that part of + // the address with the real list index. i.e. set.50 might actually + // map to set.12 in the config, since it is in list order in the + // config, not indexed by set value. + for i, v := range schemaList { + // Sets are the only thing that cause this issue. + if v.Type != TypeSet { + continue + } + + // If we're at the end of the list, then we don't have to worry + // about this because we're just requesting the whole set. + if i == len(schemaList)-1 { + continue + } + + // If we're looking for the count, then ignore... + if address[i+1] == "#" { + continue + } + + indexMap, ok := r.indexMaps[strings.Join(address[:i+1], ".")] + if !ok { + // Get the set so we can get the index map that tells us the + // mapping of the hash code to the list index + _, err := r.readSet(address[:i+1], v) + if err != nil { + return FieldReadResult{}, err + } + indexMap = r.indexMaps[strings.Join(address[:i+1], ".")] + } + + index, ok := indexMap[address[i+1]] + if !ok { + return FieldReadResult{}, nil + } + + address[i+1] = strconv.FormatInt(int64(index), 10) + } + } + + k := strings.Join(address, ".") + schema := schemaList[len(schemaList)-1] + + // If we're getting the single element of a promoted list, then + // check to see if we have a single element we need to promote. + if address[len(address)-1] == "0" && len(schemaList) > 1 { + lastSchema := schemaList[len(schemaList)-2] + if lastSchema.Type == TypeList && lastSchema.PromoteSingle { + k := strings.Join(address[:len(address)-1], ".") + result, err := r.readPrimitive(k, schema) + if err == nil { + return result, nil + } + } + } + + if protoVersion5 { + switch schema.Type { + case TypeList, TypeSet, TypeMap, typeObject: + // Check if the value itself is unknown. + // The new protocol shims will add unknown values to this list of + // ComputedKeys. This is the only way we have to indicate that a + // collection is unknown in the config + for _, unknown := range r.Config.ComputedKeys { + if k == unknown { + log.Printf("[DEBUG] setting computed for %q from ComputedKeys", k) + return FieldReadResult{Computed: true, Exists: true}, nil + } + } + } + } + + switch schema.Type { + case TypeBool, TypeFloat, TypeInt, TypeString: + return r.readPrimitive(k, schema) + case TypeList: + // If we support promotion then we first check if we have a lone + // value that we must promote. + // a value that is alone. + if schema.PromoteSingle { + result, err := r.readPrimitive(k, schema.Elem.(*Schema)) + if err == nil && result.Exists { + result.Value = []interface{}{result.Value} + return result, nil + } + } + + return readListField(&nestedConfigFieldReader{r}, address, schema) + case TypeMap: + return r.readMap(k, schema) + case TypeSet: + return r.readSet(address, schema) + case typeObject: + return readObjectField( + &nestedConfigFieldReader{r}, + address, schema.Elem.(map[string]*Schema)) + default: + panic(fmt.Sprintf("Unknown type: %s", schema.Type)) + } +} + +func (r *ConfigFieldReader) readMap(k string, schema *Schema) (FieldReadResult, error) { + // We want both the raw value and the interpolated. We use the interpolated + // to store actual values and we use the raw one to check for + // computed keys. Actual values are obtained in the switch, depending on + // the type of the raw value. + mraw, ok := r.Config.GetRaw(k) + if !ok { + // check if this is from an interpolated field by seeing if it exists + // in the config + _, ok := r.Config.Get(k) + if !ok { + // this really doesn't exist + return FieldReadResult{}, nil + } + + // We couldn't fetch the value from a nested data structure, so treat the + // raw value as an interpolation string. The mraw value is only used + // for the type switch below. + mraw = "${INTERPOLATED}" + } + + result := make(map[string]interface{}) + computed := false + switch m := mraw.(type) { + case string: + // This is a map which has come out of an interpolated variable, so we + // can just get the value directly from config. Values cannot be computed + // currently. + v, _ := r.Config.Get(k) + + // If this isn't a map[string]interface, it must be computed. + mapV, ok := v.(map[string]interface{}) + if !ok { + return FieldReadResult{ + Exists: true, + Computed: true, + }, nil + } + + // Otherwise we can proceed as usual. + for i, iv := range mapV { + result[i] = iv + } + case []interface{}: + for i, innerRaw := range m { + for ik := range innerRaw.(map[string]interface{}) { + key := fmt.Sprintf("%s.%d.%s", k, i, ik) + if r.Config.IsComputed(key) { + computed = true + break + } + + v, _ := r.Config.Get(key) + result[ik] = v + } + } + case []map[string]interface{}: + for i, innerRaw := range m { + for ik := range innerRaw { + key := fmt.Sprintf("%s.%d.%s", k, i, ik) + if r.Config.IsComputed(key) { + computed = true + break + } + + v, _ := r.Config.Get(key) + result[ik] = v + } + } + case map[string]interface{}: + for ik := range m { + key := fmt.Sprintf("%s.%s", k, ik) + if r.Config.IsComputed(key) { + computed = true + break + } + + v, _ := r.Config.Get(key) + result[ik] = v + } + case nil: + // the map may have been empty on the configuration, so we leave the + // empty result + default: + panic(fmt.Sprintf("unknown type: %#v", mraw)) + } + + err := mapValuesToPrimitive(k, result, schema) + if err != nil { + return FieldReadResult{}, nil + } + + var value interface{} + if !computed { + value = result + } + + return FieldReadResult{ + Value: value, + Exists: true, + Computed: computed, + }, nil +} + +func (r *ConfigFieldReader) readPrimitive( + k string, schema *Schema) (FieldReadResult, error) { + raw, ok := r.Config.Get(k) + if !ok { + // Nothing in config, but we might still have a default from the schema + var err error + raw, err = schema.DefaultValue() + if err != nil { + return FieldReadResult{}, fmt.Errorf("%s, error loading default: %w", k, err) + } + + if raw == nil { + return FieldReadResult{}, nil + } + } + + var result string + if err := mapstructure.WeakDecode(raw, &result); err != nil { + return FieldReadResult{}, err + } + + computed := r.Config.IsComputed(k) + returnVal, err := stringToPrimitive(result, computed, schema) + if err != nil { + return FieldReadResult{}, err + } + + return FieldReadResult{ + Value: returnVal, + Exists: true, + Computed: computed, + }, nil +} + +func (r *ConfigFieldReader) readSet( + address []string, schema *Schema) (FieldReadResult, error) { + indexMap := make(map[string]int) + // Create the set that will be our result + set := schema.ZeroValue().(*Set) + + raw, err := readListField(&nestedConfigFieldReader{r}, address, schema) + if err != nil { + return FieldReadResult{}, err + } + if !raw.Exists { + return FieldReadResult{Value: set}, nil + } + + // If the list is computed, the set is necessarilly computed + if raw.Computed { + return FieldReadResult{ + Value: set, + Exists: true, + Computed: raw.Computed, + }, nil + } + + // Build up the set from the list elements + for i, v := range raw.Value.([]interface{}) { + // Check if any of the keys in this item are computed + computed := r.hasComputedSubKeys( + fmt.Sprintf("%s.%d", strings.Join(address, "."), i), schema) + + code := set.add(v, computed) + indexMap[code] = i + } + + r.indexMaps[strings.Join(address, ".")] = indexMap + + return FieldReadResult{ + Value: set, + Exists: true, + }, nil +} + +// hasComputedSubKeys walks through a schema and returns whether or not the +// given key contains any subkeys that are computed. +func (r *ConfigFieldReader) hasComputedSubKeys(key string, schema *Schema) bool { + prefix := key + "." + + switch t := schema.Elem.(type) { + case *Resource: + for k, schema := range t.Schema { + if r.Config.IsComputed(prefix + k) { + return true + } + + if r.hasComputedSubKeys(prefix+k, schema) { + return true + } + } + } + + return false +} + +// nestedConfigFieldReader is a funny little thing that just wraps a +// ConfigFieldReader to call readField when ReadField is called so that +// we don't recalculate the set rewrites in the address, which leads to +// an infinite loop. +type nestedConfigFieldReader struct { + Reader *ConfigFieldReader +} + +func (r *nestedConfigFieldReader) ReadField( + address []string) (FieldReadResult, error) { + return r.Reader.readField(address, true) +} diff --git a/pkg/legacy/helper/schema/field_reader_config_test.go b/pkg/legacy/helper/schema/field_reader_config_test.go new file mode 100644 index 00000000000..f613407e80d --- /dev/null +++ b/pkg/legacy/helper/schema/field_reader_config_test.go @@ -0,0 +1,545 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "bytes" + "fmt" + "reflect" + "testing" + + "github.com/kubegems/opentofu/pkg/configs/hcl2shim" + "github.com/kubegems/opentofu/pkg/legacy/helper/hashcode" + "github.com/kubegems/opentofu/pkg/legacy/tofu" +) + +func TestConfigFieldReader_impl(t *testing.T) { + var _ FieldReader = new(ConfigFieldReader) +} + +func TestConfigFieldReader(t *testing.T) { + testFieldReader(t, func(s map[string]*Schema) FieldReader { + return &ConfigFieldReader{ + Schema: s, + + Config: testConfig(t, map[string]interface{}{ + "bool": true, + "float": 3.1415, + "int": 42, + "string": "string", + + "list": []interface{}{"foo", "bar"}, + + "listInt": []interface{}{21, 42}, + + "map": map[string]interface{}{ + "foo": "bar", + "bar": "baz", + }, + "mapInt": map[string]interface{}{ + "one": "1", + "two": "2", + }, + "mapIntNestedSchema": map[string]interface{}{ + "one": "1", + "two": "2", + }, + "mapFloat": map[string]interface{}{ + "oneDotTwo": "1.2", + }, + "mapBool": map[string]interface{}{ + "True": "true", + "False": "false", + }, + + "set": []interface{}{10, 50}, + "setDeep": []interface{}{ + map[string]interface{}{ + "index": 10, + "value": "foo", + }, + map[string]interface{}{ + "index": 50, + "value": "bar", + }, + }, + }), + } + }) +} + +// This contains custom table tests for our ConfigFieldReader +func TestConfigFieldReader_custom(t *testing.T) { + schema := map[string]*Schema{ + "bool": &Schema{ + Type: TypeBool, + }, + } + + cases := map[string]struct { + Addr []string + Result FieldReadResult + Config *tofu.ResourceConfig + Err bool + }{ + "basic": { + []string{"bool"}, + FieldReadResult{ + Value: true, + Exists: true, + }, + testConfig(t, map[string]interface{}{ + "bool": true, + }), + false, + }, + + "computed": { + []string{"bool"}, + FieldReadResult{ + Exists: true, + Computed: true, + }, + testConfig(t, map[string]interface{}{ + "bool": hcl2shim.UnknownVariableValue, + }), + false, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + r := &ConfigFieldReader{ + Schema: schema, + Config: tc.Config, + } + out, err := r.ReadField(tc.Addr) + if err != nil != tc.Err { + t.Fatalf("%s: err: %s", name, err) + } + if s, ok := out.Value.(*Set); ok { + // If it is a set, convert to a list so its more easily checked. + out.Value = s.List() + } + if !reflect.DeepEqual(tc.Result, out) { + t.Fatalf("%s: bad: %#v", name, out) + } + }) + } +} + +func TestConfigFieldReader_DefaultHandling(t *testing.T) { + schema := map[string]*Schema{ + "strWithDefault": &Schema{ + Type: TypeString, + Default: "ImADefault", + }, + "strWithDefaultFunc": &Schema{ + Type: TypeString, + DefaultFunc: func() (interface{}, error) { + return "FuncDefault", nil + }, + }, + } + + cases := map[string]struct { + Addr []string + Result FieldReadResult + Config *tofu.ResourceConfig + Err bool + }{ + "gets default value when no config set": { + []string{"strWithDefault"}, + FieldReadResult{ + Value: "ImADefault", + Exists: true, + Computed: false, + }, + testConfig(t, map[string]interface{}{}), + false, + }, + "config overrides default value": { + []string{"strWithDefault"}, + FieldReadResult{ + Value: "fromConfig", + Exists: true, + Computed: false, + }, + testConfig(t, map[string]interface{}{ + "strWithDefault": "fromConfig", + }), + false, + }, + "gets default from function when no config set": { + []string{"strWithDefaultFunc"}, + FieldReadResult{ + Value: "FuncDefault", + Exists: true, + Computed: false, + }, + testConfig(t, map[string]interface{}{}), + false, + }, + "config overrides default function": { + []string{"strWithDefaultFunc"}, + FieldReadResult{ + Value: "fromConfig", + Exists: true, + Computed: false, + }, + testConfig(t, map[string]interface{}{ + "strWithDefaultFunc": "fromConfig", + }), + false, + }, + } + + for name, tc := range cases { + r := &ConfigFieldReader{ + Schema: schema, + Config: tc.Config, + } + out, err := r.ReadField(tc.Addr) + if err != nil != tc.Err { + t.Fatalf("%s: err: %s", name, err) + } + if s, ok := out.Value.(*Set); ok { + // If it is a set, convert to a list so its more easily checked. + out.Value = s.List() + } + if !reflect.DeepEqual(tc.Result, out) { + t.Fatalf("%s: bad: %#v", name, out) + } + } +} + +func TestConfigFieldReader_ComputedMap(t *testing.T) { + schema := map[string]*Schema{ + "map": &Schema{ + Type: TypeMap, + Computed: true, + }, + "listmap": &Schema{ + Type: TypeMap, + Computed: true, + Elem: TypeList, + }, + "maplist": &Schema{ + Type: TypeList, + Computed: true, + Elem: TypeMap, + }, + } + + cases := []struct { + Name string + Addr []string + Result FieldReadResult + Config *tofu.ResourceConfig + Err bool + }{ + { + "set, normal", + []string{"map"}, + FieldReadResult{ + Value: map[string]interface{}{ + "foo": "bar", + }, + Exists: true, + Computed: false, + }, + testConfig(t, map[string]interface{}{ + "map": map[string]interface{}{ + "foo": "bar", + }, + }), + false, + }, + + { + "computed element", + []string{"map"}, + FieldReadResult{ + Exists: true, + Computed: true, + }, + testConfig(t, map[string]interface{}{ + "map": map[string]interface{}{ + "foo": hcl2shim.UnknownVariableValue, + }, + }), + false, + }, + + { + "native map", + []string{"map"}, + FieldReadResult{ + Value: map[string]interface{}{ + "bar": "baz", + "baz": "bar", + }, + Exists: true, + Computed: false, + }, + testConfig(t, map[string]interface{}{ + "map": map[string]interface{}{ + "bar": "baz", + "baz": "bar", + }, + }), + false, + }, + + { + "map-from-list-of-maps", + []string{"maplist", "0"}, + FieldReadResult{ + Value: map[string]interface{}{ + "key": "bar", + }, + Exists: true, + Computed: false, + }, + testConfig(t, map[string]interface{}{ + "maplist": []interface{}{ + map[string]interface{}{ + "key": "bar", + }, + }, + }), + false, + }, + + { + "value-from-list-of-maps", + []string{"maplist", "0", "key"}, + FieldReadResult{ + Value: "bar", + Exists: true, + Computed: false, + }, + testConfig(t, map[string]interface{}{ + "maplist": []interface{}{ + map[string]interface{}{ + "key": "bar", + }, + }, + }), + false, + }, + + { + "list-from-map-of-lists", + []string{"listmap", "key"}, + FieldReadResult{ + Value: []interface{}{"bar"}, + Exists: true, + Computed: false, + }, + testConfig(t, map[string]interface{}{ + "listmap": map[string]interface{}{ + "key": []interface{}{ + "bar", + }, + }, + }), + false, + }, + + { + "value-from-map-of-lists", + []string{"listmap", "key", "0"}, + FieldReadResult{ + Value: "bar", + Exists: true, + Computed: false, + }, + testConfig(t, map[string]interface{}{ + "listmap": map[string]interface{}{ + "key": []interface{}{ + "bar", + }, + }, + }), + false, + }, + } + + for i, tc := range cases { + t.Run(fmt.Sprintf("%d-%s", i, tc.Name), func(t *testing.T) { + r := &ConfigFieldReader{ + Schema: schema, + Config: tc.Config, + } + out, err := r.ReadField(tc.Addr) + if err != nil != tc.Err { + t.Fatal(err) + } + if s, ok := out.Value.(*Set); ok { + // If it is a set, convert to the raw map + out.Value = s.m + if len(s.m) == 0 { + out.Value = nil + } + } + if !reflect.DeepEqual(tc.Result, out) { + t.Fatalf("\nexpected: %#v\ngot: %#v", tc.Result, out) + } + }) + } +} + +func TestConfigFieldReader_ComputedSet(t *testing.T) { + schema := map[string]*Schema{ + "strSet": &Schema{ + Type: TypeSet, + Elem: &Schema{Type: TypeString}, + Set: HashString, + }, + } + + cases := map[string]struct { + Addr []string + Result FieldReadResult + Config *tofu.ResourceConfig + Err bool + }{ + "set, normal": { + []string{"strSet"}, + FieldReadResult{ + Value: map[string]interface{}{ + "1938594527": "foo", + }, + Exists: true, + Computed: false, + }, + testConfig(t, map[string]interface{}{ + "strSet": []interface{}{"foo"}, + }), + false, + }, + + "set, computed element": { + []string{"strSet"}, + FieldReadResult{ + Value: nil, + Exists: true, + Computed: true, + }, + testConfig(t, map[string]interface{}{ + "strSet": []interface{}{hcl2shim.UnknownVariableValue}, + }), + false, + }, + } + + for name, tc := range cases { + r := &ConfigFieldReader{ + Schema: schema, + Config: tc.Config, + } + out, err := r.ReadField(tc.Addr) + if err != nil != tc.Err { + t.Fatalf("%s: err: %s", name, err) + } + if s, ok := out.Value.(*Set); ok { + // If it is a set, convert to the raw map + out.Value = s.m + if len(s.m) == 0 { + out.Value = nil + } + } + if !reflect.DeepEqual(tc.Result, out) { + t.Fatalf("%s: bad: %#v", name, out) + } + } +} + +func TestConfigFieldReader_computedComplexSet(t *testing.T) { + hashfunc := func(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["vhd_uri"].(string))) + return hashcode.String(buf.String()) + } + + schema := map[string]*Schema{ + "set": &Schema{ + Type: TypeSet, + Elem: &Resource{ + Schema: map[string]*Schema{ + "name": { + Type: TypeString, + Required: true, + }, + + "vhd_uri": { + Type: TypeString, + Required: true, + }, + }, + }, + Set: hashfunc, + }, + } + + cases := map[string]struct { + Addr []string + Result FieldReadResult + Config *tofu.ResourceConfig + Err bool + }{ + "set, normal": { + []string{"set"}, + FieldReadResult{ + Value: map[string]interface{}{ + "532860136": map[string]interface{}{ + "name": "myosdisk1", + "vhd_uri": "bar", + }, + }, + Exists: true, + Computed: false, + }, + testConfig(t, map[string]interface{}{ + "set": []interface{}{ + map[string]interface{}{ + "name": "myosdisk1", + "vhd_uri": "bar", + }, + }, + }), + false, + }, + } + + for name, tc := range cases { + r := &ConfigFieldReader{ + Schema: schema, + Config: tc.Config, + } + out, err := r.ReadField(tc.Addr) + if err != nil != tc.Err { + t.Fatalf("%s: err: %s", name, err) + } + if s, ok := out.Value.(*Set); ok { + // If it is a set, convert to the raw map + out.Value = s.m + if len(s.m) == 0 { + out.Value = nil + } + } + if !reflect.DeepEqual(tc.Result, out) { + t.Fatalf("%s: bad: %#v", name, out) + } + } +} + +func testConfig(t *testing.T, raw map[string]interface{}) *tofu.ResourceConfig { + return tofu.NewResourceConfigRaw(raw) +} diff --git a/pkg/legacy/helper/schema/field_reader_diff.go b/pkg/legacy/helper/schema/field_reader_diff.go new file mode 100644 index 00000000000..bbe3f910648 --- /dev/null +++ b/pkg/legacy/helper/schema/field_reader_diff.go @@ -0,0 +1,249 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "fmt" + "strings" + + "github.com/go-viper/mapstructure/v2" + "github.com/kubegems/opentofu/pkg/legacy/tofu" +) + +// DiffFieldReader reads fields out of a diff structures. +// +// It also requires access to a Reader that reads fields from the structure +// that the diff was derived from. This is usually the state. This is required +// because a diff on its own doesn't have complete data about full objects +// such as maps. +// +// The Source MUST be the data that the diff was derived from. If it isn't, +// the behavior of this struct is undefined. +// +// Reading fields from a DiffFieldReader is identical to reading from +// Source except the diff will be applied to the end result. +// +// The "Exists" field on the result will be set to true if the complete +// field exists whether its from the source, diff, or a combination of both. +// It cannot be determined whether a retrieved value is composed of +// diff elements. +type DiffFieldReader struct { + Diff *tofu.InstanceDiff + Source FieldReader + Schema map[string]*Schema + + // cache for memoizing ReadField calls. + cache map[string]cachedFieldReadResult +} + +type cachedFieldReadResult struct { + val FieldReadResult + err error +} + +func (r *DiffFieldReader) ReadField(address []string) (FieldReadResult, error) { + if r.cache == nil { + r.cache = make(map[string]cachedFieldReadResult) + } + + // Create the cache key by joining around a value that isn't a valid part + // of an address. This assumes that the Source and Schema are not changed + // for the life of this DiffFieldReader. + cacheKey := strings.Join(address, "|") + if cached, ok := r.cache[cacheKey]; ok { + return cached.val, cached.err + } + + schemaList := addrToSchema(address, r.Schema) + if len(schemaList) == 0 { + r.cache[cacheKey] = cachedFieldReadResult{} + return FieldReadResult{}, nil + } + + var res FieldReadResult + var err error + + schema := schemaList[len(schemaList)-1] + switch schema.Type { + case TypeBool, TypeInt, TypeFloat, TypeString: + res, err = r.readPrimitive(address, schema) + case TypeList: + res, err = readListField(r, address, schema) + case TypeMap: + res, err = r.readMap(address, schema) + case TypeSet: + res, err = r.readSet(address, schema) + case typeObject: + res, err = readObjectField(r, address, schema.Elem.(map[string]*Schema)) + default: + panic(fmt.Sprintf("Unknown type: %#v", schema.Type)) + } + + r.cache[cacheKey] = cachedFieldReadResult{ + val: res, + err: err, + } + return res, err +} + +func (r *DiffFieldReader) readMap( + address []string, schema *Schema) (FieldReadResult, error) { + result := make(map[string]interface{}) + resultSet := false + + // First read the map from the underlying source + source, err := r.Source.ReadField(address) + if err != nil { + return FieldReadResult{}, err + } + if source.Exists { + // readMap may return a nil value, or an unknown value placeholder in + // some cases, causing the type assertion to panic if we don't assign the ok value + result, _ = source.Value.(map[string]interface{}) + resultSet = true + } + + // Next, read all the elements we have in our diff, and apply + // the diff to our result. + prefix := strings.Join(address, ".") + "." + for k, v := range r.Diff.Attributes { + if !strings.HasPrefix(k, prefix) { + continue + } + if strings.HasPrefix(k, prefix+"%") { + // Ignore the count field + continue + } + + resultSet = true + + k = k[len(prefix):] + if v.NewRemoved { + delete(result, k) + continue + } + + result[k] = v.New + } + + key := address[len(address)-1] + err = mapValuesToPrimitive(key, result, schema) + if err != nil { + return FieldReadResult{}, nil + } + + var resultVal interface{} + if resultSet { + resultVal = result + } + + return FieldReadResult{ + Value: resultVal, + Exists: resultSet, + }, nil +} + +func (r *DiffFieldReader) readPrimitive( + address []string, schema *Schema) (FieldReadResult, error) { + result, err := r.Source.ReadField(address) + if err != nil { + return FieldReadResult{}, err + } + + attrD, ok := r.Diff.Attributes[strings.Join(address, ".")] + if !ok { + return result, nil + } + + var resultVal string + if !attrD.NewComputed { + resultVal = attrD.New + if attrD.NewExtra != nil { + result.ValueProcessed = resultVal + if err := mapstructure.WeakDecode(attrD.NewExtra, &resultVal); err != nil { + return FieldReadResult{}, err + } + } + } + + result.Computed = attrD.NewComputed + result.Exists = true + result.Value, err = stringToPrimitive(resultVal, false, schema) + if err != nil { + return FieldReadResult{}, err + } + + return result, nil +} + +func (r *DiffFieldReader) readSet( + address []string, schema *Schema) (FieldReadResult, error) { + // copy address to ensure we don't modify the argument + address = append([]string(nil), address...) + + prefix := strings.Join(address, ".") + "." + + // Create the set that will be our result + set := schema.ZeroValue().(*Set) + + // Go through the map and find all the set items + for k, d := range r.Diff.Attributes { + if d.NewRemoved { + // If the field is removed, we always ignore it + continue + } + if !strings.HasPrefix(k, prefix) { + continue + } + if strings.HasSuffix(k, "#") { + // Ignore any count field + continue + } + + // Split the key, since it might be a sub-object like "idx.field" + parts := strings.Split(k[len(prefix):], ".") + idx := parts[0] + + raw, err := r.ReadField(append(address, idx)) + if err != nil { + return FieldReadResult{}, err + } + if !raw.Exists { + // This shouldn't happen because we just verified it does exist + panic("missing field in set: " + k + "." + idx) + } + + set.Add(raw.Value) + } + + // Determine if the set "exists". It exists if there are items or if + // the diff explicitly wanted it empty. + exists := set.Len() > 0 + if !exists { + // We could check if the diff value is "0" here but I think the + // existence of "#" on its own is enough to show it existed. This + // protects us in the future from the zero value changing from + // "0" to "" breaking us (if that were to happen). + if _, ok := r.Diff.Attributes[prefix+"#"]; ok { + exists = true + } + } + + if !exists { + result, err := r.Source.ReadField(address) + if err != nil { + return FieldReadResult{}, err + } + if result.Exists { + return result, nil + } + } + + return FieldReadResult{ + Value: set, + Exists: exists, + }, nil +} diff --git a/pkg/legacy/helper/schema/field_reader_diff_test.go b/pkg/legacy/helper/schema/field_reader_diff_test.go new file mode 100644 index 00000000000..94619b6bb7b --- /dev/null +++ b/pkg/legacy/helper/schema/field_reader_diff_test.go @@ -0,0 +1,529 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "reflect" + "testing" + + "github.com/kubegems/opentofu/pkg/legacy/tofu" +) + +func TestDiffFieldReader_impl(t *testing.T) { + var _ FieldReader = new(DiffFieldReader) +} + +func TestDiffFieldReader_NestedSetUpdate(t *testing.T) { + hashFn := func(a interface{}) int { + m := a.(map[string]interface{}) + return m["val"].(int) + } + + schema := map[string]*Schema{ + "list_of_sets_1": &Schema{ + Type: TypeList, + Elem: &Resource{ + Schema: map[string]*Schema{ + "nested_set": &Schema{ + Type: TypeSet, + Elem: &Resource{ + Schema: map[string]*Schema{ + "val": &Schema{ + Type: TypeInt, + }, + }, + }, + Set: hashFn, + }, + }, + }, + }, + "list_of_sets_2": &Schema{ + Type: TypeList, + Elem: &Resource{ + Schema: map[string]*Schema{ + "nested_set": &Schema{ + Type: TypeSet, + Elem: &Resource{ + Schema: map[string]*Schema{ + "val": &Schema{ + Type: TypeInt, + }, + }, + }, + Set: hashFn, + }, + }, + }, + }, + } + + r := &DiffFieldReader{ + Schema: schema, + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "list_of_sets_1.0.nested_set.1.val": &tofu.ResourceAttrDiff{ + Old: "1", + New: "0", + NewRemoved: true, + }, + "list_of_sets_1.0.nested_set.2.val": &tofu.ResourceAttrDiff{ + New: "2", + }, + }, + }, + } + + r.Source = &MultiLevelFieldReader{ + Readers: map[string]FieldReader{ + "diff": r, + "set": &MapFieldReader{Schema: schema}, + "state": &MapFieldReader{ + Map: &BasicMapReader{ + "list_of_sets_1.#": "1", + "list_of_sets_1.0.nested_set.#": "1", + "list_of_sets_1.0.nested_set.1.val": "1", + "list_of_sets_2.#": "1", + "list_of_sets_2.0.nested_set.#": "1", + "list_of_sets_2.0.nested_set.1.val": "1", + }, + Schema: schema, + }, + }, + Levels: []string{"state", "config"}, + } + + out, err := r.ReadField([]string{"list_of_sets_2"}) + if err != nil { + t.Fatalf("err: %v", err) + } + + s := &Set{F: hashFn} + s.Add(map[string]interface{}{"val": 1}) + expected := s.List() + + l := out.Value.([]interface{}) + i := l[0].(map[string]interface{}) + actual := i["nested_set"].(*Set).List() + + if !reflect.DeepEqual(expected, actual) { + t.Fatalf("bad: NestedSetUpdate\n\nexpected: %#v\n\ngot: %#v\n\n", expected, actual) + } +} + +// https://github.com/hashicorp/terraform/issues/914 +func TestDiffFieldReader_MapHandling(t *testing.T) { + schema := map[string]*Schema{ + "tags": &Schema{ + Type: TypeMap, + }, + } + r := &DiffFieldReader{ + Schema: schema, + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "tags.%": &tofu.ResourceAttrDiff{ + Old: "1", + New: "2", + }, + "tags.baz": &tofu.ResourceAttrDiff{ + Old: "", + New: "qux", + }, + }, + }, + Source: &MapFieldReader{ + Schema: schema, + Map: BasicMapReader(map[string]string{ + "tags.%": "1", + "tags.foo": "bar", + }), + }, + } + + result, err := r.ReadField([]string{"tags"}) + if err != nil { + t.Fatalf("ReadField failed: %#v", err) + } + + expected := map[string]interface{}{ + "foo": "bar", + "baz": "qux", + } + + if !reflect.DeepEqual(expected, result.Value) { + t.Fatalf("bad: DiffHandling\n\nexpected: %#v\n\ngot: %#v\n\n", expected, result.Value) + } +} + +func TestDiffFieldReader_extra(t *testing.T) { + schema := map[string]*Schema{ + "stringComputed": &Schema{Type: TypeString}, + + "listMap": &Schema{ + Type: TypeList, + Elem: &Schema{ + Type: TypeMap, + }, + }, + + "mapRemove": &Schema{Type: TypeMap}, + + "setChange": &Schema{ + Type: TypeSet, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "index": &Schema{ + Type: TypeInt, + Required: true, + }, + + "value": &Schema{ + Type: TypeString, + Required: true, + }, + }, + }, + Set: func(a interface{}) int { + m := a.(map[string]interface{}) + return m["index"].(int) + }, + }, + + "setEmpty": &Schema{ + Type: TypeSet, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "index": &Schema{ + Type: TypeInt, + Required: true, + }, + + "value": &Schema{ + Type: TypeString, + Required: true, + }, + }, + }, + Set: func(a interface{}) int { + m := a.(map[string]interface{}) + return m["index"].(int) + }, + }, + } + + r := &DiffFieldReader{ + Schema: schema, + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "stringComputed": &tofu.ResourceAttrDiff{ + Old: "foo", + New: "bar", + NewComputed: true, + }, + + "listMap.0.bar": &tofu.ResourceAttrDiff{ + NewRemoved: true, + }, + + "mapRemove.bar": &tofu.ResourceAttrDiff{ + NewRemoved: true, + }, + + "setChange.10.value": &tofu.ResourceAttrDiff{ + Old: "50", + New: "80", + }, + + "setEmpty.#": &tofu.ResourceAttrDiff{ + Old: "2", + New: "0", + }, + }, + }, + + Source: &MapFieldReader{ + Schema: schema, + Map: BasicMapReader(map[string]string{ + "listMap.#": "2", + "listMap.0.foo": "bar", + "listMap.0.bar": "baz", + "listMap.1.baz": "baz", + + "mapRemove.foo": "bar", + "mapRemove.bar": "bar", + + "setChange.#": "1", + "setChange.10.index": "10", + "setChange.10.value": "50", + + "setEmpty.#": "2", + "setEmpty.10.index": "10", + "setEmpty.10.value": "50", + "setEmpty.20.index": "20", + "setEmpty.20.value": "50", + }), + }, + } + + cases := map[string]struct { + Addr []string + Result FieldReadResult + Err bool + }{ + "stringComputed": { + []string{"stringComputed"}, + FieldReadResult{ + Value: "", + Exists: true, + Computed: true, + }, + false, + }, + + "listMapRemoval": { + []string{"listMap"}, + FieldReadResult{ + Value: []interface{}{ + map[string]interface{}{ + "foo": "bar", + }, + map[string]interface{}{ + "baz": "baz", + }, + }, + Exists: true, + }, + false, + }, + + "mapRemove": { + []string{"mapRemove"}, + FieldReadResult{ + Value: map[string]interface{}{ + "foo": "bar", + }, + Exists: true, + Computed: false, + }, + false, + }, + + "setChange": { + []string{"setChange"}, + FieldReadResult{ + Value: []interface{}{ + map[string]interface{}{ + "index": 10, + "value": "80", + }, + }, + Exists: true, + }, + false, + }, + + "setEmpty": { + []string{"setEmpty"}, + FieldReadResult{ + Value: []interface{}{}, + Exists: true, + }, + false, + }, + } + + for name, tc := range cases { + out, err := r.ReadField(tc.Addr) + if err != nil != tc.Err { + t.Fatalf("%s: err: %s", name, err) + } + if s, ok := out.Value.(*Set); ok { + // If it is a set, convert to a list so its more easily checked. + out.Value = s.List() + } + if !reflect.DeepEqual(tc.Result, out) { + t.Fatalf("%s: bad: %#v", name, out) + } + } +} + +func TestDiffFieldReader(t *testing.T) { + testFieldReader(t, func(s map[string]*Schema) FieldReader { + return &DiffFieldReader{ + Schema: s, + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "bool": &tofu.ResourceAttrDiff{ + Old: "", + New: "true", + }, + + "int": &tofu.ResourceAttrDiff{ + Old: "", + New: "42", + }, + + "float": &tofu.ResourceAttrDiff{ + Old: "", + New: "3.1415", + }, + + "string": &tofu.ResourceAttrDiff{ + Old: "", + New: "string", + }, + + "stringComputed": &tofu.ResourceAttrDiff{ + Old: "foo", + New: "bar", + NewComputed: true, + }, + + "list.#": &tofu.ResourceAttrDiff{ + Old: "0", + New: "2", + }, + + "list.0": &tofu.ResourceAttrDiff{ + Old: "", + New: "foo", + }, + + "list.1": &tofu.ResourceAttrDiff{ + Old: "", + New: "bar", + }, + + "listInt.#": &tofu.ResourceAttrDiff{ + Old: "0", + New: "2", + }, + + "listInt.0": &tofu.ResourceAttrDiff{ + Old: "", + New: "21", + }, + + "listInt.1": &tofu.ResourceAttrDiff{ + Old: "", + New: "42", + }, + + "map.foo": &tofu.ResourceAttrDiff{ + Old: "", + New: "bar", + }, + + "map.bar": &tofu.ResourceAttrDiff{ + Old: "", + New: "baz", + }, + + "mapInt.%": &tofu.ResourceAttrDiff{ + Old: "", + New: "2", + }, + "mapInt.one": &tofu.ResourceAttrDiff{ + Old: "", + New: "1", + }, + "mapInt.two": &tofu.ResourceAttrDiff{ + Old: "", + New: "2", + }, + + "mapIntNestedSchema.%": &tofu.ResourceAttrDiff{ + Old: "", + New: "2", + }, + "mapIntNestedSchema.one": &tofu.ResourceAttrDiff{ + Old: "", + New: "1", + }, + "mapIntNestedSchema.two": &tofu.ResourceAttrDiff{ + Old: "", + New: "2", + }, + + "mapFloat.%": &tofu.ResourceAttrDiff{ + Old: "", + New: "1", + }, + "mapFloat.oneDotTwo": &tofu.ResourceAttrDiff{ + Old: "", + New: "1.2", + }, + + "mapBool.%": &tofu.ResourceAttrDiff{ + Old: "", + New: "2", + }, + "mapBool.True": &tofu.ResourceAttrDiff{ + Old: "", + New: "true", + }, + "mapBool.False": &tofu.ResourceAttrDiff{ + Old: "", + New: "false", + }, + + "set.#": &tofu.ResourceAttrDiff{ + Old: "0", + New: "2", + }, + + "set.10": &tofu.ResourceAttrDiff{ + Old: "", + New: "10", + }, + + "set.50": &tofu.ResourceAttrDiff{ + Old: "", + New: "50", + }, + + "setDeep.#": &tofu.ResourceAttrDiff{ + Old: "0", + New: "2", + }, + + "setDeep.10.index": &tofu.ResourceAttrDiff{ + Old: "", + New: "10", + }, + + "setDeep.10.value": &tofu.ResourceAttrDiff{ + Old: "", + New: "foo", + }, + + "setDeep.50.index": &tofu.ResourceAttrDiff{ + Old: "", + New: "50", + }, + + "setDeep.50.value": &tofu.ResourceAttrDiff{ + Old: "", + New: "bar", + }, + }, + }, + + Source: &MapFieldReader{ + Schema: s, + Map: BasicMapReader(map[string]string{ + "listMap.#": "2", + "listMap.0.foo": "bar", + "listMap.0.bar": "baz", + "listMap.1.baz": "baz", + }), + }, + } + }) +} diff --git a/pkg/legacy/helper/schema/field_reader_map.go b/pkg/legacy/helper/schema/field_reader_map.go new file mode 100644 index 00000000000..b3a1cbca5af --- /dev/null +++ b/pkg/legacy/helper/schema/field_reader_map.go @@ -0,0 +1,240 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "fmt" + "strings" +) + +// MapFieldReader reads fields out of an untyped map[string]string to +// the best of its ability. +type MapFieldReader struct { + Map MapReader + Schema map[string]*Schema +} + +func (r *MapFieldReader) ReadField(address []string) (FieldReadResult, error) { + k := strings.Join(address, ".") + schemaList := addrToSchema(address, r.Schema) + if len(schemaList) == 0 { + return FieldReadResult{}, nil + } + + schema := schemaList[len(schemaList)-1] + switch schema.Type { + case TypeBool, TypeInt, TypeFloat, TypeString: + return r.readPrimitive(address, schema) + case TypeList: + return readListField(r, address, schema) + case TypeMap: + return r.readMap(k, schema) + case TypeSet: + return r.readSet(address, schema) + case typeObject: + return readObjectField(r, address, schema.Elem.(map[string]*Schema)) + default: + panic(fmt.Sprintf("Unknown type: %s", schema.Type)) + } +} + +func (r *MapFieldReader) readMap(k string, schema *Schema) (FieldReadResult, error) { + result := make(map[string]interface{}) + resultSet := false + + // If the name of the map field is directly in the map with an + // empty string, it means that the map is being deleted, so mark + // that is is set. + if v, ok := r.Map.Access(k); ok && v == "" { + resultSet = true + } + + prefix := k + "." + r.Map.Range(func(k, v string) bool { + if strings.HasPrefix(k, prefix) { + resultSet = true + + key := k[len(prefix):] + if key != "%" && key != "#" { + result[key] = v + } + } + + return true + }) + + err := mapValuesToPrimitive(k, result, schema) + if err != nil { + return FieldReadResult{}, nil + } + + var resultVal interface{} + if resultSet { + resultVal = result + } + + return FieldReadResult{ + Value: resultVal, + Exists: resultSet, + }, nil +} + +func (r *MapFieldReader) readPrimitive( + address []string, schema *Schema) (FieldReadResult, error) { + k := strings.Join(address, ".") + result, ok := r.Map.Access(k) + if !ok { + return FieldReadResult{}, nil + } + + returnVal, err := stringToPrimitive(result, false, schema) + if err != nil { + return FieldReadResult{}, err + } + + return FieldReadResult{ + Value: returnVal, + Exists: true, + }, nil +} + +func (r *MapFieldReader) readSet( + address []string, schema *Schema) (FieldReadResult, error) { + // copy address to ensure we don't modify the argument + address = append([]string(nil), address...) + + // Get the number of elements in the list + countRaw, err := r.readPrimitive( + append(address, "#"), &Schema{Type: TypeInt}) + if err != nil { + return FieldReadResult{}, err + } + if !countRaw.Exists { + // No count, means we have no list + countRaw.Value = 0 + } + + // Create the set that will be our result + set := schema.ZeroValue().(*Set) + + // If we have an empty list, then return an empty list + if countRaw.Computed || countRaw.Value.(int) == 0 { + return FieldReadResult{ + Value: set, + Exists: countRaw.Exists, + Computed: countRaw.Computed, + }, nil + } + + // Go through the map and find all the set items + prefix := strings.Join(address, ".") + "." + countExpected := countRaw.Value.(int) + countActual := make(map[string]struct{}) + completed := r.Map.Range(func(k, _ string) bool { + if !strings.HasPrefix(k, prefix) { + return true + } + if strings.HasPrefix(k, prefix+"#") { + // Ignore the count field + return true + } + + // Split the key, since it might be a sub-object like "idx.field" + parts := strings.Split(k[len(prefix):], ".") + idx := parts[0] + + var raw FieldReadResult + raw, err = r.ReadField(append(address, idx)) + if err != nil { + return false + } + if !raw.Exists { + // This shouldn't happen because we just verified it does exist + panic("missing field in set: " + k + "." + idx) + } + + set.Add(raw.Value) + + // Due to the way multimap readers work, if we've seen the number + // of fields we expect, then exit so that we don't read later values. + // For example: the "set" map might have "ports.#", "ports.0", and + // "ports.1", but the "state" map might have those plus "ports.2". + // We don't want "ports.2" + countActual[idx] = struct{}{} + if len(countActual) >= countExpected { + return false + } + + return true + }) + if !completed && err != nil { + return FieldReadResult{}, err + } + + return FieldReadResult{ + Value: set, + Exists: true, + }, nil +} + +// MapReader is an interface that is given to MapFieldReader for accessing +// a "map". This can be used to have alternate implementations. For a basic +// map[string]string, use BasicMapReader. +type MapReader interface { + Access(string) (string, bool) + Range(func(string, string) bool) bool +} + +// BasicMapReader implements MapReader for a single map. +type BasicMapReader map[string]string + +func (r BasicMapReader) Access(k string) (string, bool) { + v, ok := r[k] + return v, ok +} + +func (r BasicMapReader) Range(f func(string, string) bool) bool { + for k, v := range r { + if cont := f(k, v); !cont { + return false + } + } + + return true +} + +// MultiMapReader reads over multiple maps, preferring keys that are +// founder earlier (lower number index) vs. later (higher number index) +type MultiMapReader []map[string]string + +func (r MultiMapReader) Access(k string) (string, bool) { + for _, m := range r { + if v, ok := m[k]; ok { + return v, ok + } + } + + return "", false +} + +func (r MultiMapReader) Range(f func(string, string) bool) bool { + done := make(map[string]struct{}) + for _, m := range r { + for k, v := range m { + if _, ok := done[k]; ok { + continue + } + + if cont := f(k, v); !cont { + return false + } + + done[k] = struct{}{} + } + } + + return true +} diff --git a/pkg/legacy/helper/schema/field_reader_map_test.go b/pkg/legacy/helper/schema/field_reader_map_test.go new file mode 100644 index 00000000000..d0439be9a5c --- /dev/null +++ b/pkg/legacy/helper/schema/field_reader_map_test.go @@ -0,0 +1,128 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "reflect" + "testing" +) + +func TestMapFieldReader_impl(t *testing.T) { + var _ FieldReader = new(MapFieldReader) +} + +func TestMapFieldReader(t *testing.T) { + testFieldReader(t, func(s map[string]*Schema) FieldReader { + return &MapFieldReader{ + Schema: s, + + Map: BasicMapReader(map[string]string{ + "bool": "true", + "int": "42", + "float": "3.1415", + "string": "string", + + "list.#": "2", + "list.0": "foo", + "list.1": "bar", + + "listInt.#": "2", + "listInt.0": "21", + "listInt.1": "42", + + "map.%": "2", + "map.foo": "bar", + "map.bar": "baz", + + "set.#": "2", + "set.10": "10", + "set.50": "50", + + "setDeep.#": "2", + "setDeep.10.index": "10", + "setDeep.10.value": "foo", + "setDeep.50.index": "50", + "setDeep.50.value": "bar", + + "mapInt.%": "2", + "mapInt.one": "1", + "mapInt.two": "2", + + "mapIntNestedSchema.%": "2", + "mapIntNestedSchema.one": "1", + "mapIntNestedSchema.two": "2", + + "mapFloat.%": "1", + "mapFloat.oneDotTwo": "1.2", + + "mapBool.%": "2", + "mapBool.True": "true", + "mapBool.False": "false", + }), + } + }) +} + +func TestMapFieldReader_extra(t *testing.T) { + r := &MapFieldReader{ + Schema: map[string]*Schema{ + "mapDel": &Schema{Type: TypeMap}, + "mapEmpty": &Schema{Type: TypeMap}, + }, + + Map: BasicMapReader(map[string]string{ + "mapDel": "", + + "mapEmpty.%": "0", + }), + } + + cases := map[string]struct { + Addr []string + Out interface{} + OutOk bool + OutComputed bool + OutErr bool + }{ + "mapDel": { + []string{"mapDel"}, + map[string]interface{}{}, + true, + false, + false, + }, + + "mapEmpty": { + []string{"mapEmpty"}, + map[string]interface{}{}, + true, + false, + false, + }, + } + + for name, tc := range cases { + out, err := r.ReadField(tc.Addr) + if err != nil != tc.OutErr { + t.Fatalf("%s: err: %s", name, err) + } + if out.Computed != tc.OutComputed { + t.Fatalf("%s: err: %#v", name, out.Computed) + } + + if s, ok := out.Value.(*Set); ok { + // If it is a set, convert to a list so its more easily checked. + out.Value = s.List() + } + + if !reflect.DeepEqual(out.Value, tc.Out) { + t.Fatalf("%s: out: %#v", name, out.Value) + } + if out.Exists != tc.OutOk { + t.Fatalf("%s: outOk: %#v", name, out.Exists) + } + } +} diff --git a/pkg/legacy/helper/schema/field_reader_multi.go b/pkg/legacy/helper/schema/field_reader_multi.go new file mode 100644 index 00000000000..0b3ec1efcbf --- /dev/null +++ b/pkg/legacy/helper/schema/field_reader_multi.go @@ -0,0 +1,68 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "fmt" +) + +// MultiLevelFieldReader reads from other field readers, +// merging their results along the way in a specific order. You can specify +// "levels" and name them in order to read only an exact level or up to +// a specific level. +// +// This is useful for saying things such as "read the field from the state +// and config and merge them" or "read the latest value of the field". +type MultiLevelFieldReader struct { + Readers map[string]FieldReader + Levels []string +} + +func (r *MultiLevelFieldReader) ReadField(address []string) (FieldReadResult, error) { + return r.ReadFieldMerge(address, r.Levels[len(r.Levels)-1]) +} + +func (r *MultiLevelFieldReader) ReadFieldExact( + address []string, level string) (FieldReadResult, error) { + reader, ok := r.Readers[level] + if !ok { + return FieldReadResult{}, fmt.Errorf( + "Unknown reader level: %s", level) + } + + result, err := reader.ReadField(address) + if err != nil { + return FieldReadResult{}, fmt.Errorf( + "Error reading level %s: %w", level, err) + } + + return result, nil +} + +func (r *MultiLevelFieldReader) ReadFieldMerge( + address []string, level string) (FieldReadResult, error) { + var result FieldReadResult + for _, l := range r.Levels { + if r, ok := r.Readers[l]; ok { + out, err := r.ReadField(address) + if err != nil { + return FieldReadResult{}, fmt.Errorf( + "Error reading level %s: %w", l, err) + } + + // TODO: computed + if out.Exists { + result = out + } + } + + if l == level { + break + } + } + + return result, nil +} diff --git a/pkg/legacy/helper/schema/field_reader_multi_test.go b/pkg/legacy/helper/schema/field_reader_multi_test.go new file mode 100644 index 00000000000..b0b574f6d0b --- /dev/null +++ b/pkg/legacy/helper/schema/field_reader_multi_test.go @@ -0,0 +1,275 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "reflect" + "strconv" + "testing" + + "github.com/kubegems/opentofu/pkg/legacy/tofu" +) + +func TestMultiLevelFieldReaderReadFieldExact(t *testing.T) { + cases := map[string]struct { + Addr []string + Readers []FieldReader + Level string + Result FieldReadResult + }{ + "specific": { + Addr: []string{"foo"}, + + Readers: []FieldReader{ + &MapFieldReader{ + Schema: map[string]*Schema{ + "foo": &Schema{Type: TypeString}, + }, + Map: BasicMapReader(map[string]string{ + "foo": "bar", + }), + }, + &MapFieldReader{ + Schema: map[string]*Schema{ + "foo": &Schema{Type: TypeString}, + }, + Map: BasicMapReader(map[string]string{ + "foo": "baz", + }), + }, + &MapFieldReader{ + Schema: map[string]*Schema{ + "foo": &Schema{Type: TypeString}, + }, + Map: BasicMapReader(map[string]string{}), + }, + }, + + Level: "1", + Result: FieldReadResult{ + Value: "baz", + Exists: true, + }, + }, + } + + for name, tc := range cases { + readers := make(map[string]FieldReader) + levels := make([]string, len(tc.Readers)) + for i, r := range tc.Readers { + is := strconv.FormatInt(int64(i), 10) + readers[is] = r + levels[i] = is + } + + r := &MultiLevelFieldReader{ + Readers: readers, + Levels: levels, + } + + out, err := r.ReadFieldExact(tc.Addr, tc.Level) + if err != nil { + t.Fatalf("%s: err: %s", name, err) + } + + if !reflect.DeepEqual(tc.Result, out) { + t.Fatalf("%s: bad: %#v", name, out) + } + } +} + +func TestMultiLevelFieldReaderReadFieldMerge(t *testing.T) { + cases := map[string]struct { + Addr []string + Readers []FieldReader + Result FieldReadResult + }{ + "stringInDiff": { + Addr: []string{"availability_zone"}, + + Readers: []FieldReader{ + &DiffFieldReader{ + Schema: map[string]*Schema{ + "availability_zone": &Schema{Type: TypeString}, + }, + + Source: &MapFieldReader{ + Schema: map[string]*Schema{ + "availability_zone": &Schema{Type: TypeString}, + }, + Map: BasicMapReader(map[string]string{ + "availability_zone": "foo", + }), + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "foo", + New: "bar", + RequiresNew: true, + }, + }, + }, + }, + }, + + Result: FieldReadResult{ + Value: "bar", + Exists: true, + }, + }, + + "lastLevelComputed": { + Addr: []string{"availability_zone"}, + + Readers: []FieldReader{ + &MapFieldReader{ + Schema: map[string]*Schema{ + "availability_zone": &Schema{Type: TypeString}, + }, + + Map: BasicMapReader(map[string]string{ + "availability_zone": "foo", + }), + }, + + &DiffFieldReader{ + Schema: map[string]*Schema{ + "availability_zone": &Schema{Type: TypeString}, + }, + + Source: &MapFieldReader{ + Schema: map[string]*Schema{ + "availability_zone": &Schema{Type: TypeString}, + }, + + Map: BasicMapReader(map[string]string{ + "availability_zone": "foo", + }), + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "foo", + New: "bar", + NewComputed: true, + }, + }, + }, + }, + }, + + Result: FieldReadResult{ + Value: "", + Exists: true, + Computed: true, + }, + }, + + "list of maps with removal in diff": { + Addr: []string{"config_vars"}, + + Readers: []FieldReader{ + &DiffFieldReader{ + Schema: map[string]*Schema{ + "config_vars": &Schema{ + Type: TypeList, + Elem: &Schema{Type: TypeMap}, + }, + }, + + Source: &MapFieldReader{ + Schema: map[string]*Schema{ + "config_vars": &Schema{ + Type: TypeList, + Elem: &Schema{Type: TypeMap}, + }, + }, + + Map: BasicMapReader(map[string]string{ + "config_vars.#": "2", + "config_vars.0.foo": "bar", + "config_vars.0.bar": "bar", + "config_vars.1.bar": "baz", + }), + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "config_vars.0.bar": &tofu.ResourceAttrDiff{ + NewRemoved: true, + }, + }, + }, + }, + }, + + Result: FieldReadResult{ + Value: []interface{}{ + map[string]interface{}{ + "foo": "bar", + }, + map[string]interface{}{ + "bar": "baz", + }, + }, + Exists: true, + }, + }, + + "first level only": { + Addr: []string{"foo"}, + + Readers: []FieldReader{ + &MapFieldReader{ + Schema: map[string]*Schema{ + "foo": &Schema{Type: TypeString}, + }, + Map: BasicMapReader(map[string]string{ + "foo": "bar", + }), + }, + &MapFieldReader{ + Schema: map[string]*Schema{ + "foo": &Schema{Type: TypeString}, + }, + Map: BasicMapReader(map[string]string{}), + }, + }, + + Result: FieldReadResult{ + Value: "bar", + Exists: true, + }, + }, + } + + for name, tc := range cases { + readers := make(map[string]FieldReader) + levels := make([]string, len(tc.Readers)) + for i, r := range tc.Readers { + is := strconv.FormatInt(int64(i), 10) + readers[is] = r + levels[i] = is + } + + r := &MultiLevelFieldReader{ + Readers: readers, + Levels: levels, + } + + out, err := r.ReadFieldMerge(tc.Addr, levels[len(levels)-1]) + if err != nil { + t.Fatalf("%s: err: %s", name, err) + } + + if !reflect.DeepEqual(tc.Result, out) { + t.Fatalf("%s: bad: %#v", name, out) + } + } +} diff --git a/pkg/legacy/helper/schema/field_reader_test.go b/pkg/legacy/helper/schema/field_reader_test.go new file mode 100644 index 00000000000..6d65bcfdefb --- /dev/null +++ b/pkg/legacy/helper/schema/field_reader_test.go @@ -0,0 +1,476 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "reflect" + "testing" +) + +func TestAddrToSchema(t *testing.T) { + cases := map[string]struct { + Addr []string + Schema map[string]*Schema + Result []ValueType + }{ + "full object": { + []string{}, + map[string]*Schema{ + "list": &Schema{ + Type: TypeList, + Elem: &Schema{Type: TypeInt}, + }, + }, + []ValueType{typeObject}, + }, + + "list": { + []string{"list"}, + map[string]*Schema{ + "list": &Schema{ + Type: TypeList, + Elem: &Schema{Type: TypeInt}, + }, + }, + []ValueType{TypeList}, + }, + + "list.#": { + []string{"list", "#"}, + map[string]*Schema{ + "list": &Schema{ + Type: TypeList, + Elem: &Schema{Type: TypeInt}, + }, + }, + []ValueType{TypeList, TypeInt}, + }, + + "list.0": { + []string{"list", "0"}, + map[string]*Schema{ + "list": &Schema{ + Type: TypeList, + Elem: &Schema{Type: TypeInt}, + }, + }, + []ValueType{TypeList, TypeInt}, + }, + + "list.0 with resource": { + []string{"list", "0"}, + map[string]*Schema{ + "list": &Schema{ + Type: TypeList, + Elem: &Resource{ + Schema: map[string]*Schema{ + "field": &Schema{Type: TypeString}, + }, + }, + }, + }, + []ValueType{TypeList, typeObject}, + }, + + "list.0.field": { + []string{"list", "0", "field"}, + map[string]*Schema{ + "list": &Schema{ + Type: TypeList, + Elem: &Resource{ + Schema: map[string]*Schema{ + "field": &Schema{Type: TypeString}, + }, + }, + }, + }, + []ValueType{TypeList, typeObject, TypeString}, + }, + + "set": { + []string{"set"}, + map[string]*Schema{ + "set": &Schema{ + Type: TypeSet, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + []ValueType{TypeSet}, + }, + + "set.#": { + []string{"set", "#"}, + map[string]*Schema{ + "set": &Schema{ + Type: TypeSet, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + []ValueType{TypeSet, TypeInt}, + }, + + "set.0": { + []string{"set", "0"}, + map[string]*Schema{ + "set": &Schema{ + Type: TypeSet, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + []ValueType{TypeSet, TypeInt}, + }, + + "set.0 with resource": { + []string{"set", "0"}, + map[string]*Schema{ + "set": &Schema{ + Type: TypeSet, + Elem: &Resource{ + Schema: map[string]*Schema{ + "field": &Schema{Type: TypeString}, + }, + }, + }, + }, + []ValueType{TypeSet, typeObject}, + }, + + "mapElem": { + []string{"map", "foo"}, + map[string]*Schema{ + "map": &Schema{Type: TypeMap}, + }, + []ValueType{TypeMap, TypeString}, + }, + + "setDeep": { + []string{"set", "50", "index"}, + map[string]*Schema{ + "set": &Schema{ + Type: TypeSet, + Elem: &Resource{ + Schema: map[string]*Schema{ + "index": &Schema{Type: TypeInt}, + "value": &Schema{Type: TypeString}, + }, + }, + Set: func(a interface{}) int { + return a.(map[string]interface{})["index"].(int) + }, + }, + }, + []ValueType{TypeSet, typeObject, TypeInt}, + }, + } + + for name, tc := range cases { + result := addrToSchema(tc.Addr, tc.Schema) + types := make([]ValueType, len(result)) + for i, v := range result { + types[i] = v.Type + } + + if !reflect.DeepEqual(types, tc.Result) { + t.Fatalf("%s: %#v", name, types) + } + } +} + +// testFieldReader is a helper that should be used to verify that +// a FieldReader behaves properly in all the common cases. +func testFieldReader(t *testing.T, f func(map[string]*Schema) FieldReader) { + schema := map[string]*Schema{ + // Primitives + "bool": &Schema{Type: TypeBool}, + "float": &Schema{Type: TypeFloat}, + "int": &Schema{Type: TypeInt}, + "string": &Schema{Type: TypeString}, + + // Lists + "list": &Schema{ + Type: TypeList, + Elem: &Schema{Type: TypeString}, + }, + "listInt": &Schema{ + Type: TypeList, + Elem: &Schema{Type: TypeInt}, + }, + "listMap": &Schema{ + Type: TypeList, + Elem: &Schema{ + Type: TypeMap, + }, + }, + + // Maps + "map": &Schema{Type: TypeMap}, + "mapInt": &Schema{ + Type: TypeMap, + Elem: TypeInt, + }, + + // This is used to verify that the type of a Map can be specified using the + // same syntax as for lists (as a nested *Schema passed to Elem) + "mapIntNestedSchema": &Schema{ + Type: TypeMap, + Elem: &Schema{Type: TypeInt}, + }, + "mapFloat": &Schema{ + Type: TypeMap, + Elem: TypeFloat, + }, + "mapBool": &Schema{ + Type: TypeMap, + Elem: TypeBool, + }, + + // Sets + "set": &Schema{ + Type: TypeSet, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + "setDeep": &Schema{ + Type: TypeSet, + Elem: &Resource{ + Schema: map[string]*Schema{ + "index": &Schema{Type: TypeInt}, + "value": &Schema{Type: TypeString}, + }, + }, + Set: func(a interface{}) int { + return a.(map[string]interface{})["index"].(int) + }, + }, + "setEmpty": &Schema{ + Type: TypeSet, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + } + + cases := map[string]struct { + Addr []string + Result FieldReadResult + Err bool + }{ + "noexist": { + []string{"boolNOPE"}, + FieldReadResult{ + Value: nil, + Exists: false, + Computed: false, + }, + false, + }, + + "bool": { + []string{"bool"}, + FieldReadResult{ + Value: true, + Exists: true, + Computed: false, + }, + false, + }, + + "float": { + []string{"float"}, + FieldReadResult{ + Value: 3.1415, + Exists: true, + Computed: false, + }, + false, + }, + + "int": { + []string{"int"}, + FieldReadResult{ + Value: 42, + Exists: true, + Computed: false, + }, + false, + }, + + "string": { + []string{"string"}, + FieldReadResult{ + Value: "string", + Exists: true, + Computed: false, + }, + false, + }, + + "list": { + []string{"list"}, + FieldReadResult{ + Value: []interface{}{ + "foo", + "bar", + }, + Exists: true, + Computed: false, + }, + false, + }, + + "listInt": { + []string{"listInt"}, + FieldReadResult{ + Value: []interface{}{ + 21, + 42, + }, + Exists: true, + Computed: false, + }, + false, + }, + + "map": { + []string{"map"}, + FieldReadResult{ + Value: map[string]interface{}{ + "foo": "bar", + "bar": "baz", + }, + Exists: true, + Computed: false, + }, + false, + }, + + "mapInt": { + []string{"mapInt"}, + FieldReadResult{ + Value: map[string]interface{}{ + "one": 1, + "two": 2, + }, + Exists: true, + Computed: false, + }, + false, + }, + + "mapIntNestedSchema": { + []string{"mapIntNestedSchema"}, + FieldReadResult{ + Value: map[string]interface{}{ + "one": 1, + "two": 2, + }, + Exists: true, + Computed: false, + }, + false, + }, + + "mapFloat": { + []string{"mapFloat"}, + FieldReadResult{ + Value: map[string]interface{}{ + "oneDotTwo": 1.2, + }, + Exists: true, + Computed: false, + }, + false, + }, + + "mapBool": { + []string{"mapBool"}, + FieldReadResult{ + Value: map[string]interface{}{ + "True": true, + "False": false, + }, + Exists: true, + Computed: false, + }, + false, + }, + + "mapelem": { + []string{"map", "foo"}, + FieldReadResult{ + Value: "bar", + Exists: true, + Computed: false, + }, + false, + }, + + "set": { + []string{"set"}, + FieldReadResult{ + Value: []interface{}{10, 50}, + Exists: true, + Computed: false, + }, + false, + }, + + "setDeep": { + []string{"setDeep"}, + FieldReadResult{ + Value: []interface{}{ + map[string]interface{}{ + "index": 10, + "value": "foo", + }, + map[string]interface{}{ + "index": 50, + "value": "bar", + }, + }, + Exists: true, + Computed: false, + }, + false, + }, + + "setEmpty": { + []string{"setEmpty"}, + FieldReadResult{ + Value: []interface{}{}, + Exists: false, + }, + false, + }, + } + + for name, tc := range cases { + r := f(schema) + out, err := r.ReadField(tc.Addr) + if err != nil != tc.Err { + t.Fatalf("%s: err: %s", name, err) + } + if s, ok := out.Value.(*Set); ok { + // If it is a set, convert to a list so its more easily checked. + out.Value = s.List() + } + if !reflect.DeepEqual(tc.Result, out) { + t.Fatalf("%s: bad: %#v", name, out) + } + } +} diff --git a/pkg/legacy/helper/schema/field_writer.go b/pkg/legacy/helper/schema/field_writer.go new file mode 100644 index 00000000000..0c0179d7320 --- /dev/null +++ b/pkg/legacy/helper/schema/field_writer.go @@ -0,0 +1,13 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +// FieldWriters are responsible for writing fields by address into +// a proper typed representation. ResourceData uses this to write new data +// into existing sources. +type FieldWriter interface { + WriteField([]string, interface{}) error +} diff --git a/pkg/legacy/helper/schema/field_writer_map.go b/pkg/legacy/helper/schema/field_writer_map.go new file mode 100644 index 00000000000..196f1230715 --- /dev/null +++ b/pkg/legacy/helper/schema/field_writer_map.go @@ -0,0 +1,361 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "sync" + + "github.com/go-viper/mapstructure/v2" +) + +// MapFieldWriter writes data into a single map[string]string structure. +type MapFieldWriter struct { + lock sync.Mutex + Schema map[string]*Schema + result map[string]string +} + +// Map returns the underlying map that is being written to. +func (w *MapFieldWriter) Map() map[string]string { + w.lock.Lock() + defer w.lock.Unlock() + if w.result == nil { + w.result = make(map[string]string) + } + + return w.result +} + +func (w *MapFieldWriter) unsafeWriteField(addr string, value string) { + w.lock.Lock() + defer w.lock.Unlock() + if w.result == nil { + w.result = make(map[string]string) + } + + w.result[addr] = value +} + +// clearTree clears a field and any sub-fields of the given address out of the +// map. This should be used to reset some kind of complex structures (namely +// sets) before writing to make sure that any conflicting data is removed (for +// example, if the set was previously written to the writer's layer). +func (w *MapFieldWriter) clearTree(addr []string) { + prefix := strings.Join(addr, ".") + "." + for k := range w.result { + if strings.HasPrefix(k, prefix) { + delete(w.result, k) + } + } +} + +func (w *MapFieldWriter) WriteField(addr []string, value interface{}) error { + w.lock.Lock() + defer w.lock.Unlock() + if w.result == nil { + w.result = make(map[string]string) + } + + schemaList := addrToSchema(addr, w.Schema) + if len(schemaList) == 0 { + return fmt.Errorf("Invalid address to set: %#v", addr) + } + + // If we're setting anything other than a list root or set root, + // then disallow it. + for _, schema := range schemaList[:len(schemaList)-1] { + if schema.Type == TypeList { + return fmt.Errorf( + "%s: can only set full list", + strings.Join(addr, ".")) + } + + if schema.Type == TypeMap { + return fmt.Errorf( + "%s: can only set full map", + strings.Join(addr, ".")) + } + + if schema.Type == TypeSet { + return fmt.Errorf( + "%s: can only set full set", + strings.Join(addr, ".")) + } + } + + return w.set(addr, value) +} + +func (w *MapFieldWriter) set(addr []string, value interface{}) error { + schemaList := addrToSchema(addr, w.Schema) + if len(schemaList) == 0 { + return fmt.Errorf("Invalid address to set: %#v", addr) + } + + schema := schemaList[len(schemaList)-1] + switch schema.Type { + case TypeBool, TypeInt, TypeFloat, TypeString: + return w.setPrimitive(addr, value, schema) + case TypeList: + return w.setList(addr, value, schema) + case TypeMap: + return w.setMap(addr, value, schema) + case TypeSet: + return w.setSet(addr, value, schema) + case typeObject: + return w.setObject(addr, value, schema) + default: + panic(fmt.Sprintf("Unknown type: %#v", schema.Type)) + } +} + +func (w *MapFieldWriter) setList( + addr []string, + v interface{}, + schema *Schema) error { + k := strings.Join(addr, ".") + setElement := func(idx string, value interface{}) error { + addrCopy := make([]string, len(addr), len(addr)+1) + copy(addrCopy, addr) + return w.set(append(addrCopy, idx), value) + } + + var vs []interface{} + if err := mapstructure.Decode(v, &vs); err != nil { + return fmt.Errorf("%s: %w", k, err) + } + + // Wipe the set from the current writer prior to writing if it exists. + // Multiple writes to the same layer is a lot safer for lists than sets due + // to the fact that indexes are always deterministic and the length will + // always be updated with the current length on the last write, but making + // sure we have a clean namespace removes any chance for edge cases to pop up + // and ensures that the last write to the set is the correct value. + w.clearTree(addr) + + // Set the entire list. + var err error + for i, elem := range vs { + is := strconv.FormatInt(int64(i), 10) + err = setElement(is, elem) + if err != nil { + break + } + } + if err != nil { + for i, _ := range vs { + is := strconv.FormatInt(int64(i), 10) + setElement(is, nil) + } + + return err + } + + w.result[k+".#"] = strconv.FormatInt(int64(len(vs)), 10) + return nil +} + +func (w *MapFieldWriter) setMap( + addr []string, + value interface{}, + schema *Schema) error { + k := strings.Join(addr, ".") + v := reflect.ValueOf(value) + vs := make(map[string]interface{}) + + if value == nil { + // The empty string here means the map is removed. + w.result[k] = "" + return nil + } + + if v.Kind() != reflect.Map { + return fmt.Errorf("%s: must be a map", k) + } + if v.Type().Key().Kind() != reflect.String { + return fmt.Errorf("%s: keys must strings", k) + } + for _, mk := range v.MapKeys() { + mv := v.MapIndex(mk) + vs[mk.String()] = mv.Interface() + } + + // Wipe this address tree. The contents of the map should always reflect the + // last write made to it. + w.clearTree(addr) + + // Remove the pure key since we're setting the full map value + delete(w.result, k) + + // Set each subkey + addrCopy := make([]string, len(addr), len(addr)+1) + copy(addrCopy, addr) + for subKey, v := range vs { + if err := w.set(append(addrCopy, subKey), v); err != nil { + return err + } + } + + // Set the count + w.result[k+".%"] = strconv.Itoa(len(vs)) + + return nil +} + +func (w *MapFieldWriter) setObject( + addr []string, + value interface{}, + schema *Schema) error { + // Set the entire object. First decode into a proper structure + var v map[string]interface{} + if err := mapstructure.Decode(value, &v); err != nil { + return fmt.Errorf("%s: %w", strings.Join(addr, "."), err) + } + + // Make space for additional elements in the address + addrCopy := make([]string, len(addr), len(addr)+1) + copy(addrCopy, addr) + + // Set each element in turn + var err error + for k1, v1 := range v { + if err = w.set(append(addrCopy, k1), v1); err != nil { + break + } + } + if err != nil { + for k1, _ := range v { + w.set(append(addrCopy, k1), nil) + } + } + + return err +} + +func (w *MapFieldWriter) setPrimitive( + addr []string, + v interface{}, + schema *Schema) error { + k := strings.Join(addr, ".") + + if v == nil { + // The empty string here means the value is removed. + w.result[k] = "" + return nil + } + + var set string + switch schema.Type { + case TypeBool: + var b bool + if err := mapstructure.Decode(v, &b); err != nil { + return fmt.Errorf("%s: %w", k, err) + } + + set = strconv.FormatBool(b) + case TypeString: + if err := mapstructure.Decode(v, &set); err != nil { + return fmt.Errorf("%s: %w", k, err) + } + case TypeInt: + var n int + if err := mapstructure.Decode(v, &n); err != nil { + return fmt.Errorf("%s: %w", k, err) + } + set = strconv.FormatInt(int64(n), 10) + case TypeFloat: + var n float64 + if err := mapstructure.Decode(v, &n); err != nil { + return fmt.Errorf("%s: %w", k, err) + } + set = strconv.FormatFloat(float64(n), 'G', -1, 64) + default: + return fmt.Errorf("Unknown type: %#v", schema.Type) + } + + w.result[k] = set + return nil +} + +func (w *MapFieldWriter) setSet( + addr []string, + value interface{}, + schema *Schema) error { + addrCopy := make([]string, len(addr), len(addr)+1) + copy(addrCopy, addr) + k := strings.Join(addr, ".") + + if value == nil { + w.result[k+".#"] = "0" + return nil + } + + // If it is a slice, then we have to turn it into a *Set so that + // we get the proper order back based on the hash code. + if v := reflect.ValueOf(value); v.Kind() == reflect.Slice { + // Build a temp *ResourceData to use for the conversion + tempAddr := addr[len(addr)-1:] + tempSchema := *schema + tempSchema.Type = TypeList + tempSchemaMap := map[string]*Schema{tempAddr[0]: &tempSchema} + tempW := &MapFieldWriter{Schema: tempSchemaMap} + + // Set the entire list, this lets us get values out of it + if err := tempW.WriteField(tempAddr, value); err != nil { + return err + } + + // Build the set by going over the list items in order and + // hashing them into the set. The reason we go over the list and + // not the `value` directly is because this forces all types + // to become []interface{} (generic) instead of []string, which + // most hash functions are expecting. + s := schema.ZeroValue().(*Set) + tempR := &MapFieldReader{ + Map: BasicMapReader(tempW.Map()), + Schema: tempSchemaMap, + } + for i := 0; i < v.Len(); i++ { + is := strconv.FormatInt(int64(i), 10) + result, err := tempR.ReadField(append(tempAddr, is)) + if err != nil { + return err + } + if !result.Exists { + panic("set item just set doesn't exist") + } + + s.Add(result.Value) + } + + value = s + } + + // Clear any keys that match the set address first. This is necessary because + // it's always possible and sometimes may be necessary to write to a certain + // writer layer more than once with different set data each time, which will + // lead to different keys being inserted, which can lead to determinism + // problems when the old data isn't wiped first. + w.clearTree(addr) + + if value.(*Set) == nil { + w.result[k+".#"] = "0" + return nil + } + + for code, elem := range value.(*Set).m { + if err := w.set(append(addrCopy, code), elem); err != nil { + return err + } + } + + w.result[k+".#"] = strconv.Itoa(value.(*Set).Len()) + return nil +} diff --git a/pkg/legacy/helper/schema/field_writer_map_test.go b/pkg/legacy/helper/schema/field_writer_map_test.go new file mode 100644 index 00000000000..ec581045819 --- /dev/null +++ b/pkg/legacy/helper/schema/field_writer_map_test.go @@ -0,0 +1,552 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "reflect" + "testing" +) + +func TestMapFieldWriter_impl(t *testing.T) { + var _ FieldWriter = new(MapFieldWriter) +} + +func TestMapFieldWriter(t *testing.T) { + schema := map[string]*Schema{ + "bool": &Schema{Type: TypeBool}, + "int": &Schema{Type: TypeInt}, + "string": &Schema{Type: TypeString}, + "list": &Schema{ + Type: TypeList, + Elem: &Schema{Type: TypeString}, + }, + "listInt": &Schema{ + Type: TypeList, + Elem: &Schema{Type: TypeInt}, + }, + "listResource": &Schema{ + Type: TypeList, + Optional: true, + Computed: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "value": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + }, + }, + "map": &Schema{Type: TypeMap}, + "set": &Schema{ + Type: TypeSet, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + "setDeep": &Schema{ + Type: TypeSet, + Elem: &Resource{ + Schema: map[string]*Schema{ + "index": &Schema{Type: TypeInt}, + "value": &Schema{Type: TypeString}, + }, + }, + Set: func(a interface{}) int { + return a.(map[string]interface{})["index"].(int) + }, + }, + } + + cases := map[string]struct { + Addr []string + Value interface{} + Err bool + Out map[string]string + }{ + "noexist": { + []string{"noexist"}, + 42, + true, + map[string]string{}, + }, + + "bool": { + []string{"bool"}, + false, + false, + map[string]string{ + "bool": "false", + }, + }, + + "int": { + []string{"int"}, + 42, + false, + map[string]string{ + "int": "42", + }, + }, + + "string": { + []string{"string"}, + "42", + false, + map[string]string{ + "string": "42", + }, + }, + + "string nil": { + []string{"string"}, + nil, + false, + map[string]string{ + "string": "", + }, + }, + + "list of resources": { + []string{"listResource"}, + []interface{}{ + map[string]interface{}{ + "value": 80, + }, + }, + false, + map[string]string{ + "listResource.#": "1", + "listResource.0.value": "80", + }, + }, + + "list of resources empty": { + []string{"listResource"}, + []interface{}{}, + false, + map[string]string{ + "listResource.#": "0", + }, + }, + + "list of resources nil": { + []string{"listResource"}, + nil, + false, + map[string]string{ + "listResource.#": "0", + }, + }, + + "list of strings": { + []string{"list"}, + []interface{}{"foo", "bar"}, + false, + map[string]string{ + "list.#": "2", + "list.0": "foo", + "list.1": "bar", + }, + }, + + "list element": { + []string{"list", "0"}, + "string", + true, + map[string]string{}, + }, + + "map": { + []string{"map"}, + map[string]interface{}{"foo": "bar"}, + false, + map[string]string{ + "map.%": "1", + "map.foo": "bar", + }, + }, + + "map delete": { + []string{"map"}, + nil, + false, + map[string]string{ + "map": "", + }, + }, + + "map element": { + []string{"map", "foo"}, + "bar", + true, + map[string]string{}, + }, + + "set": { + []string{"set"}, + []interface{}{1, 2, 5}, + false, + map[string]string{ + "set.#": "3", + "set.1": "1", + "set.2": "2", + "set.5": "5", + }, + }, + + "set nil": { + []string{"set"}, + nil, + false, + map[string]string{ + "set.#": "0", + }, + }, + + "set typed nil": { + []string{"set"}, + func() *Set { return nil }(), + false, + map[string]string{ + "set.#": "0", + }, + }, + + "set resource": { + []string{"setDeep"}, + []interface{}{ + map[string]interface{}{ + "index": 10, + "value": "foo", + }, + map[string]interface{}{ + "index": 50, + "value": "bar", + }, + }, + false, + map[string]string{ + "setDeep.#": "2", + "setDeep.10.index": "10", + "setDeep.10.value": "foo", + "setDeep.50.index": "50", + "setDeep.50.value": "bar", + }, + }, + + "set element": { + []string{"set", "5"}, + 5, + true, + map[string]string{}, + }, + + "full object": { + nil, + map[string]interface{}{ + "string": "foo", + "list": []interface{}{"foo", "bar"}, + }, + false, + map[string]string{ + "string": "foo", + "list.#": "2", + "list.0": "foo", + "list.1": "bar", + }, + }, + } + + for name, tc := range cases { + w := &MapFieldWriter{Schema: schema} + err := w.WriteField(tc.Addr, tc.Value) + if err != nil != tc.Err { + t.Fatalf("%s: err: %s", name, err) + } + + actual := w.Map() + if !reflect.DeepEqual(actual, tc.Out) { + t.Fatalf("%s: bad: %#v", name, actual) + } + } +} + +func TestMapFieldWriterCleanSet(t *testing.T) { + schema := map[string]*Schema{ + "setDeep": &Schema{ + Type: TypeSet, + Elem: &Resource{ + Schema: map[string]*Schema{ + "index": &Schema{Type: TypeInt}, + "value": &Schema{Type: TypeString}, + }, + }, + Set: func(a interface{}) int { + return a.(map[string]interface{})["index"].(int) + }, + }, + } + + values := []struct { + Addr []string + Value interface{} + Out map[string]string + }{ + { + []string{"setDeep"}, + []interface{}{ + map[string]interface{}{ + "index": 10, + "value": "foo", + }, + map[string]interface{}{ + "index": 50, + "value": "bar", + }, + }, + map[string]string{ + "setDeep.#": "2", + "setDeep.10.index": "10", + "setDeep.10.value": "foo", + "setDeep.50.index": "50", + "setDeep.50.value": "bar", + }, + }, + { + []string{"setDeep"}, + []interface{}{ + map[string]interface{}{ + "index": 20, + "value": "baz", + }, + map[string]interface{}{ + "index": 60, + "value": "qux", + }, + }, + map[string]string{ + "setDeep.#": "2", + "setDeep.20.index": "20", + "setDeep.20.value": "baz", + "setDeep.60.index": "60", + "setDeep.60.value": "qux", + }, + }, + { + []string{"setDeep"}, + []interface{}{ + map[string]interface{}{ + "index": 30, + "value": "one", + }, + map[string]interface{}{ + "index": 70, + "value": "two", + }, + }, + map[string]string{ + "setDeep.#": "2", + "setDeep.30.index": "30", + "setDeep.30.value": "one", + "setDeep.70.index": "70", + "setDeep.70.value": "two", + }, + }, + } + + w := &MapFieldWriter{Schema: schema} + + for n, tc := range values { + err := w.WriteField(tc.Addr, tc.Value) + if err != nil { + t.Fatalf("%d: err: %s", n, err) + } + + actual := w.Map() + if !reflect.DeepEqual(actual, tc.Out) { + t.Fatalf("%d: bad: %#v", n, actual) + } + } +} + +func TestMapFieldWriterCleanList(t *testing.T) { + schema := map[string]*Schema{ + "listDeep": &Schema{ + Type: TypeList, + Elem: &Resource{ + Schema: map[string]*Schema{ + "thing1": &Schema{Type: TypeString}, + "thing2": &Schema{Type: TypeString}, + }, + }, + }, + } + + values := []struct { + Addr []string + Value interface{} + Out map[string]string + }{ + { + // Base list + []string{"listDeep"}, + []interface{}{ + map[string]interface{}{ + "thing1": "a", + "thing2": "b", + }, + map[string]interface{}{ + "thing1": "c", + "thing2": "d", + }, + map[string]interface{}{ + "thing1": "e", + "thing2": "f", + }, + map[string]interface{}{ + "thing1": "g", + "thing2": "h", + }, + }, + map[string]string{ + "listDeep.#": "4", + "listDeep.0.thing1": "a", + "listDeep.0.thing2": "b", + "listDeep.1.thing1": "c", + "listDeep.1.thing2": "d", + "listDeep.2.thing1": "e", + "listDeep.2.thing2": "f", + "listDeep.3.thing1": "g", + "listDeep.3.thing2": "h", + }, + }, + { + // Remove an element + []string{"listDeep"}, + []interface{}{ + map[string]interface{}{ + "thing1": "a", + "thing2": "b", + }, + map[string]interface{}{ + "thing1": "c", + "thing2": "d", + }, + map[string]interface{}{ + "thing1": "e", + "thing2": "f", + }, + }, + map[string]string{ + "listDeep.#": "3", + "listDeep.0.thing1": "a", + "listDeep.0.thing2": "b", + "listDeep.1.thing1": "c", + "listDeep.1.thing2": "d", + "listDeep.2.thing1": "e", + "listDeep.2.thing2": "f", + }, + }, + { + // Rewrite with missing keys. This should normally not be necessary, as + // hopefully the writers are writing zero values as necessary, but for + // brevity we want to make sure that what exists in the writer is exactly + // what the last write looked like coming from the provider. + []string{"listDeep"}, + []interface{}{ + map[string]interface{}{ + "thing1": "a", + }, + map[string]interface{}{ + "thing1": "c", + }, + map[string]interface{}{ + "thing1": "e", + }, + }, + map[string]string{ + "listDeep.#": "3", + "listDeep.0.thing1": "a", + "listDeep.1.thing1": "c", + "listDeep.2.thing1": "e", + }, + }, + } + + w := &MapFieldWriter{Schema: schema} + + for n, tc := range values { + err := w.WriteField(tc.Addr, tc.Value) + if err != nil { + t.Fatalf("%d: err: %s", n, err) + } + + actual := w.Map() + if !reflect.DeepEqual(actual, tc.Out) { + t.Fatalf("%d: bad: %#v", n, actual) + } + } +} + +func TestMapFieldWriterCleanMap(t *testing.T) { + schema := map[string]*Schema{ + "map": &Schema{ + Type: TypeMap, + }, + } + + values := []struct { + Value interface{} + Out map[string]string + }{ + { + // Base map + map[string]interface{}{ + "thing1": "a", + "thing2": "b", + "thing3": "c", + "thing4": "d", + }, + map[string]string{ + "map.%": "4", + "map.thing1": "a", + "map.thing2": "b", + "map.thing3": "c", + "map.thing4": "d", + }, + }, + { + // Base map + map[string]interface{}{ + "thing1": "a", + "thing2": "b", + "thing4": "d", + }, + map[string]string{ + "map.%": "3", + "map.thing1": "a", + "map.thing2": "b", + "map.thing4": "d", + }, + }, + } + + w := &MapFieldWriter{Schema: schema} + + for n, tc := range values { + err := w.WriteField([]string{"map"}, tc.Value) + if err != nil { + t.Fatalf("%d: err: %s", n, err) + } + + actual := w.Map() + if !reflect.DeepEqual(actual, tc.Out) { + t.Fatalf("%d: bad: %#v", n, actual) + } + } +} diff --git a/pkg/legacy/helper/schema/getsource_string.go b/pkg/legacy/helper/schema/getsource_string.go new file mode 100644 index 00000000000..0184d7b08ab --- /dev/null +++ b/pkg/legacy/helper/schema/getsource_string.go @@ -0,0 +1,46 @@ +// Code generated by "stringer -type=getSource resource_data_get_source.go"; DO NOT EDIT. + +package schema + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[getSourceState-1] + _ = x[getSourceConfig-2] + _ = x[getSourceDiff-4] + _ = x[getSourceSet-8] + _ = x[getSourceExact-16] + _ = x[getSourceLevelMask-15] +} + +const ( + _getSource_name_0 = "getSourceStategetSourceConfig" + _getSource_name_1 = "getSourceDiff" + _getSource_name_2 = "getSourceSet" + _getSource_name_3 = "getSourceLevelMaskgetSourceExact" +) + +var ( + _getSource_index_0 = [...]uint8{0, 14, 29} + _getSource_index_3 = [...]uint8{0, 18, 32} +) + +func (i getSource) String() string { + switch { + case 1 <= i && i <= 2: + i -= 1 + return _getSource_name_0[_getSource_index_0[i]:_getSource_index_0[i+1]] + case i == 4: + return _getSource_name_1 + case i == 8: + return _getSource_name_2 + case 15 <= i && i <= 16: + i -= 15 + return _getSource_name_3[_getSource_index_3[i]:_getSource_index_3[i+1]] + default: + return "getSource(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/pkg/legacy/helper/schema/provider.go b/pkg/legacy/helper/schema/provider.go new file mode 100644 index 00000000000..11b72f4ea71 --- /dev/null +++ b/pkg/legacy/helper/schema/provider.go @@ -0,0 +1,482 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "context" + "errors" + "fmt" + "sort" + "sync" + + multierror "github.com/hashicorp/go-multierror" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/legacy/tofu" +) + +var ReservedProviderFields = []string{ + "alias", + "version", +} + +// Provider represents a resource provider in OpenTofu, and properly +// implements all of the ResourceProvider API. +// +// By defining a schema for the configuration of the provider, the +// map of supporting resources, and a configuration function, the schema +// framework takes over and handles all the provider operations for you. +// +// After defining the provider structure, it is unlikely that you'll require any +// of the methods on Provider itself. +type Provider struct { + // Schema is the schema for the configuration of this provider. If this + // provider has no configuration, this can be omitted. + // + // The keys of this map are the configuration keys, and the value is + // the schema describing the value of the configuration. + Schema map[string]*Schema + + // ResourcesMap is the list of available resources that this provider + // can manage, along with their Resource structure defining their + // own schemas and CRUD operations. + // + // Provider automatically handles routing operations such as Apply, + // Diff, etc. to the proper resource. + ResourcesMap map[string]*Resource + + // DataSourcesMap is the collection of available data sources that + // this provider implements, with a Resource instance defining + // the schema and Read operation of each. + // + // Resource instances for data sources must have a Read function + // and must *not* implement Create, Update or Delete. + DataSourcesMap map[string]*Resource + + // ProviderMetaSchema is the schema for the configuration of the meta + // information for this provider. If this provider has no meta info, + // this can be omitted. This functionality is currently experimental + // and subject to change or break without warning; it should only be + // used by providers that are collaborating on its use with the + // OpenTofu team. + ProviderMetaSchema map[string]*Schema + + // ConfigureFunc is a function for configuring the provider. If the + // provider doesn't need to be configured, this can be omitted. + // + // See the ConfigureFunc documentation for more information. + ConfigureFunc ConfigureFunc + + // MetaReset is called by TestReset to reset any state stored in the meta + // interface. This is especially important if the StopContext is stored by + // the provider. + MetaReset func() error + + meta interface{} + + // a mutex is required because TestReset can directly replace the stopCtx + stopMu sync.Mutex + stopCtx context.Context + stopCtxCancel context.CancelFunc + stopOnce sync.Once + + TerraformVersion string +} + +// ConfigureFunc is the function used to configure a Provider. +// +// The interface{} value returned by this function is stored and passed into +// the subsequent resources as the meta parameter. This return value is +// usually used to pass along a configured API client, a configuration +// structure, etc. +type ConfigureFunc func(*ResourceData) (interface{}, error) + +// InternalValidate should be called to validate the structure +// of the provider. +// +// This should be called in a unit test for any provider to verify +// before release that a provider is properly configured for use with +// this library. +func (p *Provider) InternalValidate() error { + if p == nil { + return errors.New("provider is nil") + } + + var validationErrors error + sm := schemaMap(p.Schema) + if err := sm.InternalValidate(sm); err != nil { + validationErrors = multierror.Append(validationErrors, err) + } + + // Provider-specific checks + for k, _ := range sm { + if isReservedProviderFieldName(k) { + return fmt.Errorf("%s is a reserved field name for a provider", k) + } + } + + for k, r := range p.ResourcesMap { + if err := r.InternalValidate(nil, true); err != nil { + validationErrors = multierror.Append(validationErrors, fmt.Errorf("resource %s: %w", k, err)) + } + } + + for k, r := range p.DataSourcesMap { + if err := r.InternalValidate(nil, false); err != nil { + validationErrors = multierror.Append(validationErrors, fmt.Errorf("data source %s: %w", k, err)) + } + } + + return validationErrors +} + +func isReservedProviderFieldName(name string) bool { + for _, reservedName := range ReservedProviderFields { + if name == reservedName { + return true + } + } + return false +} + +// Meta returns the metadata associated with this provider that was +// returned by the Configure call. It will be nil until Configure is called. +func (p *Provider) Meta() interface{} { + return p.meta +} + +// SetMeta can be used to forcefully set the Meta object of the provider. +// Note that if Configure is called the return value will override anything +// set here. +func (p *Provider) SetMeta(v interface{}) { + p.meta = v +} + +// Stopped reports whether the provider has been stopped or not. +func (p *Provider) Stopped() bool { + ctx := p.StopContext() + select { + case <-ctx.Done(): + return true + default: + return false + } +} + +// StopCh returns a channel that is closed once the provider is stopped. +func (p *Provider) StopContext() context.Context { + p.stopOnce.Do(p.stopInit) + + p.stopMu.Lock() + defer p.stopMu.Unlock() + + return p.stopCtx +} + +func (p *Provider) stopInit() { + p.stopMu.Lock() + defer p.stopMu.Unlock() + + p.stopCtx, p.stopCtxCancel = context.WithCancel(context.Background()) +} + +// Stop implementation of tofu.ResourceProvider interface. +func (p *Provider) Stop() error { + p.stopOnce.Do(p.stopInit) + + p.stopMu.Lock() + defer p.stopMu.Unlock() + + p.stopCtxCancel() + return nil +} + +// TestReset resets any state stored in the Provider, and will call TestReset +// on Meta if it implements the TestProvider interface. +// This may be used to reset the schema.Provider at the start of a test, and is +// automatically called by resource.Test. +func (p *Provider) TestReset() error { + p.stopInit() + if p.MetaReset != nil { + return p.MetaReset() + } + return nil +} + +// GetSchema implementation of tofu.ResourceProvider interface +func (p *Provider) GetSchema(req *tofu.ProviderSchemaRequest) (*tofu.ProviderSchema, error) { + resourceTypes := map[string]*configschema.Block{} + dataSources := map[string]*configschema.Block{} + + for _, name := range req.ResourceTypes { + if r, exists := p.ResourcesMap[name]; exists { + resourceTypes[name] = r.CoreConfigSchema() + } + } + for _, name := range req.DataSources { + if r, exists := p.DataSourcesMap[name]; exists { + dataSources[name] = r.CoreConfigSchema() + } + } + + return &tofu.ProviderSchema{ + Provider: schemaMap(p.Schema).CoreConfigSchema(), + ResourceTypes: resourceTypes, + DataSources: dataSources, + }, nil +} + +// Input implementation of tofu.ResourceProvider interface. +func (p *Provider) Input( + input tofu.UIInput, + c *tofu.ResourceConfig) (*tofu.ResourceConfig, error) { + return schemaMap(p.Schema).Input(input, c) +} + +// Validate implementation of tofu.ResourceProvider interface. +func (p *Provider) Validate(c *tofu.ResourceConfig) ([]string, []error) { + if err := p.InternalValidate(); err != nil { + return nil, []error{fmt.Errorf( + "Internal validation of the provider failed! This is always a bug\n"+ + "with the provider itself, and not a user issue. Please report\n"+ + "this bug:\n\n%w", err)} + } + + return schemaMap(p.Schema).Validate(c) +} + +// ValidateResource implementation of tofu.ResourceProvider interface. +func (p *Provider) ValidateResource( + t string, c *tofu.ResourceConfig) ([]string, []error) { + r, ok := p.ResourcesMap[t] + if !ok { + return nil, []error{fmt.Errorf( + "Provider doesn't support resource: %s", t)} + } + + return r.Validate(c) +} + +// Configure implementation of tofu.ResourceProvider interface. +func (p *Provider) Configure(c *tofu.ResourceConfig) error { + // No configuration + if p.ConfigureFunc == nil { + return nil + } + + sm := schemaMap(p.Schema) + + // Get a ResourceData for this configuration. To do this, we actually + // generate an intermediary "diff" although that is never exposed. + diff, err := sm.Diff(nil, c, nil, p.meta, true) + if err != nil { + return err + } + + data, err := sm.Data(nil, diff) + if err != nil { + return err + } + + meta, err := p.ConfigureFunc(data) + if err != nil { + return err + } + + p.meta = meta + return nil +} + +// Apply implementation of tofu.ResourceProvider interface. +func (p *Provider) Apply( + info *tofu.InstanceInfo, + s *tofu.InstanceState, + d *tofu.InstanceDiff) (*tofu.InstanceState, error) { + r, ok := p.ResourcesMap[info.Type] + if !ok { + return nil, fmt.Errorf("unknown resource type: %s", info.Type) + } + + return r.Apply(s, d, p.meta) +} + +// Diff implementation of tofu.ResourceProvider interface. +func (p *Provider) Diff( + info *tofu.InstanceInfo, + s *tofu.InstanceState, + c *tofu.ResourceConfig) (*tofu.InstanceDiff, error) { + r, ok := p.ResourcesMap[info.Type] + if !ok { + return nil, fmt.Errorf("unknown resource type: %s", info.Type) + } + + return r.Diff(s, c, p.meta) +} + +// SimpleDiff is used by the new protocol wrappers to get a diff that doesn't +// attempt to calculate ignore_changes. +func (p *Provider) SimpleDiff( + info *tofu.InstanceInfo, + s *tofu.InstanceState, + c *tofu.ResourceConfig) (*tofu.InstanceDiff, error) { + r, ok := p.ResourcesMap[info.Type] + if !ok { + return nil, fmt.Errorf("unknown resource type: %s", info.Type) + } + + return r.simpleDiff(s, c, p.meta) +} + +// Refresh implementation of tofu.ResourceProvider interface. +func (p *Provider) Refresh( + info *tofu.InstanceInfo, + s *tofu.InstanceState) (*tofu.InstanceState, error) { + r, ok := p.ResourcesMap[info.Type] + if !ok { + return nil, fmt.Errorf("unknown resource type: %s", info.Type) + } + + return r.Refresh(s, p.meta) +} + +// Resources implementation of tofu.ResourceProvider interface. +func (p *Provider) Resources() []tofu.ResourceType { + keys := make([]string, 0, len(p.ResourcesMap)) + for k := range p.ResourcesMap { + keys = append(keys, k) + } + sort.Strings(keys) + + result := make([]tofu.ResourceType, 0, len(keys)) + for _, k := range keys { + resource := p.ResourcesMap[k] + + // This isn't really possible (it'd fail InternalValidate), but + // we do it anyways to avoid a panic. + if resource == nil { + resource = &Resource{} + } + + result = append(result, tofu.ResourceType{ + Name: k, + Importable: resource.Importer != nil, + + // Indicates that a provider is compiled against a new enough + // version of core to support the GetSchema method. + SchemaAvailable: true, + }) + } + + return result +} + +func (p *Provider) ImportState( + info *tofu.InstanceInfo, + id string) ([]*tofu.InstanceState, error) { + // Find the resource + r, ok := p.ResourcesMap[info.Type] + if !ok { + return nil, fmt.Errorf("unknown resource type: %s", info.Type) + } + + // If it doesn't support import, error + if r.Importer == nil { + return nil, fmt.Errorf("resource %s doesn't support import", info.Type) + } + + // Create the data + data := r.Data(nil) + data.SetId(id) + data.SetType(info.Type) + + // Call the import function + results := []*ResourceData{data} + if r.Importer.State != nil { + var err error + results, err = r.Importer.State(data, p.meta) + if err != nil { + return nil, err + } + } + + // Convert the results to InstanceState values and return it + states := make([]*tofu.InstanceState, len(results)) + for i, r := range results { + states[i] = r.State() + } + + // Verify that all are non-nil. If there are any nil the error + // isn't obvious so we circumvent that with a friendlier error. + for _, s := range states { + if s == nil { + return nil, fmt.Errorf( + "nil entry in ImportState results. This is always a bug with\n" + + "the resource that is being imported. Please report this as\n" + + "a bug to OpenTofu.") + } + } + + return states, nil +} + +// ValidateDataSource implementation of tofu.ResourceProvider interface. +func (p *Provider) ValidateDataSource( + t string, c *tofu.ResourceConfig) ([]string, []error) { + r, ok := p.DataSourcesMap[t] + if !ok { + return nil, []error{fmt.Errorf( + "Provider doesn't support data source: %s", t)} + } + + return r.Validate(c) +} + +// ReadDataDiff implementation of tofu.ResourceProvider interface. +func (p *Provider) ReadDataDiff( + info *tofu.InstanceInfo, + c *tofu.ResourceConfig) (*tofu.InstanceDiff, error) { + + r, ok := p.DataSourcesMap[info.Type] + if !ok { + return nil, fmt.Errorf("unknown data source: %s", info.Type) + } + + return r.Diff(nil, c, p.meta) +} + +// RefreshData implementation of tofu.ResourceProvider interface. +func (p *Provider) ReadDataApply( + info *tofu.InstanceInfo, + d *tofu.InstanceDiff) (*tofu.InstanceState, error) { + + r, ok := p.DataSourcesMap[info.Type] + if !ok { + return nil, fmt.Errorf("unknown data source: %s", info.Type) + } + + return r.ReadDataApply(d, p.meta) +} + +// DataSources implementation of tofu.ResourceProvider interface. +func (p *Provider) DataSources() []tofu.DataSource { + keys := make([]string, 0, len(p.DataSourcesMap)) + for k, _ := range p.DataSourcesMap { + keys = append(keys, k) + } + sort.Strings(keys) + + result := make([]tofu.DataSource, 0, len(keys)) + for _, k := range keys { + result = append(result, tofu.DataSource{ + Name: k, + + // Indicates that a provider is compiled against a new enough + // version of core to support the GetSchema method. + SchemaAvailable: true, + }) + } + + return result +} diff --git a/pkg/legacy/helper/schema/provider_test.go b/pkg/legacy/helper/schema/provider_test.go new file mode 100644 index 00000000000..9c9dd929964 --- /dev/null +++ b/pkg/legacy/helper/schema/provider_test.go @@ -0,0 +1,625 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "fmt" + "reflect" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/legacy/tofu" +) + +func TestProvider_impl(t *testing.T) { + var _ tofu.ResourceProvider = new(Provider) +} + +func TestProviderGetSchema(t *testing.T) { + // This functionality is already broadly tested in core_schema_test.go, + // so this is just to ensure that the call passes through correctly. + p := &Provider{ + Schema: map[string]*Schema{ + "bar": { + Type: TypeString, + Required: true, + }, + }, + ResourcesMap: map[string]*Resource{ + "foo": &Resource{ + Schema: map[string]*Schema{ + "bar": { + Type: TypeString, + Required: true, + }, + }, + }, + }, + DataSourcesMap: map[string]*Resource{ + "baz": &Resource{ + Schema: map[string]*Schema{ + "bur": { + Type: TypeString, + Required: true, + }, + }, + }, + }, + } + + want := &tofu.ProviderSchema{ + Provider: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "bar": &configschema.Attribute{ + Type: cty.String, + Required: true, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{}, + }, + ResourceTypes: map[string]*configschema.Block{ + "foo": testResource(&configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "bar": &configschema.Attribute{ + Type: cty.String, + Required: true, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{}, + }), + }, + DataSources: map[string]*configschema.Block{ + "baz": testResource(&configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "bur": &configschema.Attribute{ + Type: cty.String, + Required: true, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{}, + }), + }, + } + got, err := p.GetSchema(&tofu.ProviderSchemaRequest{ + ResourceTypes: []string{"foo", "bar"}, + DataSources: []string{"baz", "bar"}, + }) + if err != nil { + t.Fatalf("unexpected error %s", err) + } + + if !cmp.Equal(got, want, equateEmpty, typeComparer) { + t.Error("wrong result:\n", cmp.Diff(got, want, equateEmpty, typeComparer)) + } +} + +func TestProviderConfigure(t *testing.T) { + cases := []struct { + P *Provider + Config map[string]interface{} + Err bool + }{ + { + P: &Provider{}, + Config: nil, + Err: false, + }, + + { + P: &Provider{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + + ConfigureFunc: func(d *ResourceData) (interface{}, error) { + if d.Get("foo").(int) == 42 { + return nil, nil + } + + return nil, fmt.Errorf("nope") + }, + }, + Config: map[string]interface{}{ + "foo": 42, + }, + Err: false, + }, + + { + P: &Provider{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + + ConfigureFunc: func(d *ResourceData) (interface{}, error) { + if d.Get("foo").(int) == 42 { + return nil, nil + } + + return nil, fmt.Errorf("nope") + }, + }, + Config: map[string]interface{}{ + "foo": 52, + }, + Err: true, + }, + } + + for i, tc := range cases { + c := tofu.NewResourceConfigRaw(tc.Config) + err := tc.P.Configure(c) + if err != nil != tc.Err { + t.Fatalf("%d: %s", i, err) + } + } +} + +func TestProviderResources(t *testing.T) { + cases := []struct { + P *Provider + Result []tofu.ResourceType + }{ + { + P: &Provider{}, + Result: []tofu.ResourceType{}, + }, + + { + P: &Provider{ + ResourcesMap: map[string]*Resource{ + "foo": nil, + "bar": nil, + }, + }, + Result: []tofu.ResourceType{ + tofu.ResourceType{Name: "bar", SchemaAvailable: true}, + tofu.ResourceType{Name: "foo", SchemaAvailable: true}, + }, + }, + + { + P: &Provider{ + ResourcesMap: map[string]*Resource{ + "foo": nil, + "bar": &Resource{Importer: &ResourceImporter{}}, + "baz": nil, + }, + }, + Result: []tofu.ResourceType{ + tofu.ResourceType{Name: "bar", Importable: true, SchemaAvailable: true}, + tofu.ResourceType{Name: "baz", SchemaAvailable: true}, + tofu.ResourceType{Name: "foo", SchemaAvailable: true}, + }, + }, + } + + for i, tc := range cases { + actual := tc.P.Resources() + if !reflect.DeepEqual(actual, tc.Result) { + t.Fatalf("%d: %#v", i, actual) + } + } +} + +func TestProviderDataSources(t *testing.T) { + cases := []struct { + P *Provider + Result []tofu.DataSource + }{ + { + P: &Provider{}, + Result: []tofu.DataSource{}, + }, + + { + P: &Provider{ + DataSourcesMap: map[string]*Resource{ + "foo": nil, + "bar": nil, + }, + }, + Result: []tofu.DataSource{ + tofu.DataSource{Name: "bar", SchemaAvailable: true}, + tofu.DataSource{Name: "foo", SchemaAvailable: true}, + }, + }, + } + + for i, tc := range cases { + actual := tc.P.DataSources() + if !reflect.DeepEqual(actual, tc.Result) { + t.Fatalf("%d: got %#v; want %#v", i, actual, tc.Result) + } + } +} + +func TestProviderValidate(t *testing.T) { + cases := []struct { + P *Provider + Config map[string]interface{} + Err bool + }{ + { + P: &Provider{ + Schema: map[string]*Schema{ + "foo": &Schema{}, + }, + }, + Config: nil, + Err: true, + }, + } + + for i, tc := range cases { + c := tofu.NewResourceConfigRaw(tc.Config) + _, es := tc.P.Validate(c) + if len(es) > 0 != tc.Err { + t.Fatalf("%d: %#v", i, es) + } + } +} + +func TestProviderDiff_legacyTimeoutType(t *testing.T) { + p := &Provider{ + ResourcesMap: map[string]*Resource{ + "blah": &Resource{ + Schema: map[string]*Schema{ + "foo": { + Type: TypeInt, + Optional: true, + }, + }, + Timeouts: &ResourceTimeout{ + Create: DefaultTimeout(10 * time.Minute), + }, + }, + }, + } + + invalidCfg := map[string]interface{}{ + "foo": 42, + "timeouts": []interface{}{ + map[string]interface{}{ + "create": "40m", + }, + }, + } + ic := tofu.NewResourceConfigRaw(invalidCfg) + _, err := p.Diff( + &tofu.InstanceInfo{ + Type: "blah", + }, + nil, + ic, + ) + if err != nil { + t.Fatal(err) + } +} + +func TestProviderDiff_timeoutInvalidValue(t *testing.T) { + p := &Provider{ + ResourcesMap: map[string]*Resource{ + "blah": &Resource{ + Schema: map[string]*Schema{ + "foo": { + Type: TypeInt, + Optional: true, + }, + }, + Timeouts: &ResourceTimeout{ + Create: DefaultTimeout(10 * time.Minute), + }, + }, + }, + } + + invalidCfg := map[string]interface{}{ + "foo": 42, + "timeouts": map[string]interface{}{ + "create": "invalid", + }, + } + ic := tofu.NewResourceConfigRaw(invalidCfg) + _, err := p.Diff( + &tofu.InstanceInfo{ + Type: "blah", + }, + nil, + ic, + ) + if err == nil { + t.Fatal("Expected provider.Diff to fail with invalid timeout value") + } + expectedErrMsg := `time: invalid duration "invalid"` + if !strings.Contains(err.Error(), expectedErrMsg) { + t.Fatalf("Unexpected error message: %q\nExpected message to contain %q", + err.Error(), + expectedErrMsg) + } +} + +func TestProviderValidateResource(t *testing.T) { + cases := []struct { + P *Provider + Type string + Config map[string]interface{} + Err bool + }{ + { + P: &Provider{}, + Type: "foo", + Config: nil, + Err: true, + }, + + { + P: &Provider{ + ResourcesMap: map[string]*Resource{ + "foo": &Resource{}, + }, + }, + Type: "foo", + Config: nil, + Err: false, + }, + } + + for i, tc := range cases { + c := tofu.NewResourceConfigRaw(tc.Config) + _, es := tc.P.ValidateResource(tc.Type, c) + if len(es) > 0 != tc.Err { + t.Fatalf("%d: %#v", i, es) + } + } +} + +func TestProviderImportState_default(t *testing.T) { + p := &Provider{ + ResourcesMap: map[string]*Resource{ + "foo": &Resource{ + Importer: &ResourceImporter{}, + }, + }, + } + + states, err := p.ImportState(&tofu.InstanceInfo{ + Type: "foo", + }, "bar") + if err != nil { + t.Fatalf("err: %s", err) + } + + if len(states) != 1 { + t.Fatalf("bad: %#v", states) + } + if states[0].ID != "bar" { + t.Fatalf("bad: %#v", states) + } +} + +func TestProviderImportState_setsId(t *testing.T) { + var val string + stateFunc := func(d *ResourceData, meta interface{}) ([]*ResourceData, error) { + val = d.Id() + return []*ResourceData{d}, nil + } + + p := &Provider{ + ResourcesMap: map[string]*Resource{ + "foo": &Resource{ + Importer: &ResourceImporter{ + State: stateFunc, + }, + }, + }, + } + + _, err := p.ImportState(&tofu.InstanceInfo{ + Type: "foo", + }, "bar") + if err != nil { + t.Fatalf("err: %s", err) + } + + if val != "bar" { + t.Fatal("should set id") + } +} + +func TestProviderImportState_setsType(t *testing.T) { + var tVal string + stateFunc := func(d *ResourceData, meta interface{}) ([]*ResourceData, error) { + d.SetId("foo") + tVal = d.State().Ephemeral.Type + return []*ResourceData{d}, nil + } + + p := &Provider{ + ResourcesMap: map[string]*Resource{ + "foo": &Resource{ + Importer: &ResourceImporter{ + State: stateFunc, + }, + }, + }, + } + + _, err := p.ImportState(&tofu.InstanceInfo{ + Type: "foo", + }, "bar") + if err != nil { + t.Fatalf("err: %s", err) + } + + if tVal != "foo" { + t.Fatal("should set type") + } +} + +func TestProviderMeta(t *testing.T) { + p := new(Provider) + if v := p.Meta(); v != nil { + t.Fatalf("bad: %#v", v) + } + + expected := 42 + p.SetMeta(42) + if v := p.Meta(); !reflect.DeepEqual(v, expected) { + t.Fatalf("bad: %#v", v) + } +} + +func TestProviderStop(t *testing.T) { + var p Provider + + if p.Stopped() { + t.Fatal("should not be stopped") + } + + // Verify stopch blocks + ch := p.StopContext().Done() + select { + case <-ch: + t.Fatal("should not be stopped") + case <-time.After(10 * time.Millisecond): + } + + // Stop it + if err := p.Stop(); err != nil { + t.Fatalf("err: %s", err) + } + + // Verify + if !p.Stopped() { + t.Fatal("should be stopped") + } + + select { + case <-ch: + case <-time.After(10 * time.Millisecond): + t.Fatal("should be stopped") + } +} + +func TestProviderStop_stopFirst(t *testing.T) { + var p Provider + + // Stop it + if err := p.Stop(); err != nil { + t.Fatalf("err: %s", err) + } + + // Verify + if !p.Stopped() { + t.Fatal("should be stopped") + } + + select { + case <-p.StopContext().Done(): + case <-time.After(10 * time.Millisecond): + t.Fatal("should be stopped") + } +} + +func TestProviderReset(t *testing.T) { + var p Provider + stopCtx := p.StopContext() + p.MetaReset = func() error { + stopCtx = p.StopContext() + return nil + } + + // cancel the current context + p.Stop() + + if err := p.TestReset(); err != nil { + t.Fatal(err) + } + + // the first context should have been replaced + if err := stopCtx.Err(); err != nil { + t.Fatal(err) + } + + // we should not get a canceled context here either + if err := p.StopContext().Err(); err != nil { + t.Fatal(err) + } +} + +func TestProvider_InternalValidate(t *testing.T) { + cases := []struct { + P *Provider + ExpectedErr error + }{ + { + P: &Provider{ + Schema: map[string]*Schema{ + "foo": { + Type: TypeBool, + Optional: true, + }, + }, + }, + ExpectedErr: nil, + }, + { // Reserved resource fields should be allowed in provider block + P: &Provider{ + Schema: map[string]*Schema{ + "provisioner": { + Type: TypeString, + Optional: true, + }, + "count": { + Type: TypeInt, + Optional: true, + }, + }, + }, + ExpectedErr: nil, + }, + { // Reserved provider fields should not be allowed + P: &Provider{ + Schema: map[string]*Schema{ + "alias": { + Type: TypeString, + Optional: true, + }, + }, + }, + ExpectedErr: fmt.Errorf("%s is a reserved field name for a provider", "alias"), + }, + } + + for i, tc := range cases { + err := tc.P.InternalValidate() + if tc.ExpectedErr == nil { + if err != nil { + t.Fatalf("%d: Error returned (expected no error): %s", i, err) + } + continue + } + if tc.ExpectedErr != nil && err == nil { + t.Fatalf("%d: Expected error (%s), but no error returned", i, tc.ExpectedErr) + } + if err.Error() != tc.ExpectedErr.Error() { + t.Fatalf("%d: Errors don't match. Expected: %#v Given: %#v", i, tc.ExpectedErr, err) + } + } +} diff --git a/pkg/legacy/helper/schema/provisioner.go b/pkg/legacy/helper/schema/provisioner.go new file mode 100644 index 00000000000..214ce566348 --- /dev/null +++ b/pkg/legacy/helper/schema/provisioner.go @@ -0,0 +1,210 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "context" + "errors" + "fmt" + "sync" + + "github.com/hashicorp/go-multierror" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/legacy/tofu" +) + +// Provisioner represents a resource provisioner in OpenTofu and properly +// implements all of the ResourceProvisioner API. +// +// This higher level structure makes it much easier to implement a new or +// custom provisioner for OpenTofu. +// +// The function callbacks for this structure are all passed a context object. +// This context object has a number of pre-defined values that can be accessed +// via the global functions defined in context.go. +type Provisioner struct { + // ConnSchema is the schema for the connection settings for this + // provisioner. + // + // The keys of this map are the configuration keys, and the value is + // the schema describing the value of the configuration. + // + // NOTE: The value of connection keys can only be strings for now. + ConnSchema map[string]*Schema + + // Schema is the schema for the usage of this provisioner. + // + // The keys of this map are the configuration keys, and the value is + // the schema describing the value of the configuration. + Schema map[string]*Schema + + // ApplyFunc is the function for executing the provisioner. This is required. + // It is given a context. See the Provisioner struct docs for more + // information. + ApplyFunc func(ctx context.Context) error + + // ValidateFunc is a function for extended validation. This is optional + // and should be used when individual field validation is not enough. + ValidateFunc func(*tofu.ResourceConfig) ([]string, []error) + + stopCtx context.Context + stopCtxCancel context.CancelFunc + stopOnce sync.Once +} + +// Keys that can be used to access data in the context parameters for +// Provisioners. +var ( + connDataInvalid = contextKey("data invalid") + + // This returns a *ResourceData for the connection information. + // Guaranteed to never be nil. + ProvConnDataKey = contextKey("provider conn data") + + // This returns a *ResourceData for the config information. + // Guaranteed to never be nil. + ProvConfigDataKey = contextKey("provider config data") + + // This returns a tofu.UIOutput. Guaranteed to never be nil. + ProvOutputKey = contextKey("provider output") + + // This returns the raw InstanceState passed to Apply. Guaranteed to + // be set, but may be nil. + ProvRawStateKey = contextKey("provider raw state") +) + +// InternalValidate should be called to validate the structure +// of the provisioner. +// +// This should be called in a unit test to verify before release that this +// structure is properly configured for use. +func (p *Provisioner) InternalValidate() error { + if p == nil { + return errors.New("provisioner is nil") + } + + var validationErrors error + { + sm := schemaMap(p.ConnSchema) + if err := sm.InternalValidate(sm); err != nil { + validationErrors = multierror.Append(validationErrors, err) + } + } + + { + sm := schemaMap(p.Schema) + if err := sm.InternalValidate(sm); err != nil { + validationErrors = multierror.Append(validationErrors, err) + } + } + + if p.ApplyFunc == nil { + validationErrors = multierror.Append(validationErrors, fmt.Errorf( + "ApplyFunc must not be nil")) + } + + return validationErrors +} + +// StopContext returns a context that checks whether a provisioner is stopped. +func (p *Provisioner) StopContext() context.Context { + p.stopOnce.Do(p.stopInit) + return p.stopCtx +} + +func (p *Provisioner) stopInit() { + p.stopCtx, p.stopCtxCancel = context.WithCancel(context.Background()) +} + +// Stop implementation of tofu.ResourceProvisioner interface. +func (p *Provisioner) Stop() error { + p.stopOnce.Do(p.stopInit) + p.stopCtxCancel() + return nil +} + +// GetConfigSchema implementation of tofu.ResourceProvisioner interface. +func (p *Provisioner) GetConfigSchema() (*configschema.Block, error) { + return schemaMap(p.Schema).CoreConfigSchema(), nil +} + +// Apply implementation of tofu.ResourceProvisioner interface. +func (p *Provisioner) Apply( + o tofu.UIOutput, + s *tofu.InstanceState, + c *tofu.ResourceConfig) error { + var connData, configData *ResourceData + + { + // We first need to turn the connection information into a + // tofu.ResourceConfig so that we can use that type to more + // easily build a ResourceData structure. We do this by simply treating + // the conn info as configuration input. + raw := make(map[string]interface{}) + if s != nil { + for k, v := range s.Ephemeral.ConnInfo { + raw[k] = v + } + } + + c := tofu.NewResourceConfigRaw(raw) + sm := schemaMap(p.ConnSchema) + diff, err := sm.Diff(nil, c, nil, nil, true) + if err != nil { + return err + } + connData, err = sm.Data(nil, diff) + if err != nil { + return err + } + } + + { + // Build the configuration data. Doing this requires making a "diff" + // even though that's never used. We use that just to get the correct types. + configMap := schemaMap(p.Schema) + diff, err := configMap.Diff(nil, c, nil, nil, true) + if err != nil { + return err + } + configData, err = configMap.Data(nil, diff) + if err != nil { + return err + } + } + + // Build the context and call the function + ctx := p.StopContext() + ctx = context.WithValue(ctx, ProvConnDataKey, connData) + ctx = context.WithValue(ctx, ProvConfigDataKey, configData) + ctx = context.WithValue(ctx, ProvOutputKey, o) + ctx = context.WithValue(ctx, ProvRawStateKey, s) + return p.ApplyFunc(ctx) +} + +// Validate implements the tofu.ResourceProvisioner interface. +func (p *Provisioner) Validate(c *tofu.ResourceConfig) (ws []string, es []error) { + if err := p.InternalValidate(); err != nil { + return nil, []error{fmt.Errorf( + "Internal validation of the provisioner failed! This is always a bug\n"+ + "with the provisioner itself, and not a user issue. Please report\n"+ + "this bug:\n\n%w", err)} + } + + if p.Schema != nil { + w, e := schemaMap(p.Schema).Validate(c) + ws = append(ws, w...) + es = append(es, e...) + } + + if p.ValidateFunc != nil { + w, e := p.ValidateFunc(c) + ws = append(ws, w...) + es = append(es, e...) + } + + return ws, es +} diff --git a/pkg/legacy/helper/schema/provisioner_test.go b/pkg/legacy/helper/schema/provisioner_test.go new file mode 100644 index 00000000000..aeb0d1ea396 --- /dev/null +++ b/pkg/legacy/helper/schema/provisioner_test.go @@ -0,0 +1,339 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "context" + "fmt" + "reflect" + "testing" + "time" + + "github.com/kubegems/opentofu/pkg/legacy/tofu" +) + +func TestProvisioner_impl(t *testing.T) { + var _ tofu.ResourceProvisioner = new(Provisioner) +} + +func noopApply(ctx context.Context) error { + return nil +} + +func TestProvisionerValidate(t *testing.T) { + cases := []struct { + Name string + P *Provisioner + Config map[string]interface{} + Err bool + Warns []string + }{ + { + Name: "No ApplyFunc", + P: &Provisioner{}, + Config: nil, + Err: true, + }, + { + Name: "Incorrect schema", + P: &Provisioner{ + Schema: map[string]*Schema{ + "foo": {}, + }, + ApplyFunc: noopApply, + }, + Config: nil, + Err: true, + }, + { + "Basic required field", + &Provisioner{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Required: true, + Type: TypeString, + }, + }, + ApplyFunc: noopApply, + }, + nil, + true, + nil, + }, + + { + "Basic required field set", + &Provisioner{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Required: true, + Type: TypeString, + }, + }, + ApplyFunc: noopApply, + }, + map[string]interface{}{ + "foo": "bar", + }, + false, + nil, + }, + { + Name: "Warning from property validation", + P: &Provisioner{ + Schema: map[string]*Schema{ + "foo": { + Type: TypeString, + Optional: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + ws = append(ws, "Simple warning from property validation") + return + }, + }, + }, + ApplyFunc: noopApply, + }, + Config: map[string]interface{}{ + "foo": "", + }, + Err: false, + Warns: []string{"Simple warning from property validation"}, + }, + { + Name: "No schema", + P: &Provisioner{ + Schema: nil, + ApplyFunc: noopApply, + }, + Config: nil, + Err: false, + }, + { + Name: "Warning from provisioner ValidateFunc", + P: &Provisioner{ + Schema: nil, + ApplyFunc: noopApply, + ValidateFunc: func(*tofu.ResourceConfig) (ws []string, errors []error) { + ws = append(ws, "Simple warning from provisioner ValidateFunc") + return + }, + }, + Config: nil, + Err: false, + Warns: []string{"Simple warning from provisioner ValidateFunc"}, + }, + } + + for i, tc := range cases { + t.Run(fmt.Sprintf("%d-%s", i, tc.Name), func(t *testing.T) { + c := tofu.NewResourceConfigRaw(tc.Config) + ws, es := tc.P.Validate(c) + if len(es) > 0 != tc.Err { + t.Fatalf("%d: %#v %s", i, es, es) + } + if (tc.Warns != nil || len(ws) != 0) && !reflect.DeepEqual(ws, tc.Warns) { + t.Fatalf("%d: warnings mismatch, actual: %#v", i, ws) + } + }) + } +} + +func TestProvisionerApply(t *testing.T) { + cases := []struct { + Name string + P *Provisioner + Conn map[string]string + Config map[string]interface{} + Err bool + }{ + { + "Basic config", + &Provisioner{ + ConnSchema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeString, + Optional: true, + }, + }, + + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + + ApplyFunc: func(ctx context.Context) error { + cd := ctx.Value(ProvConnDataKey).(*ResourceData) + d := ctx.Value(ProvConfigDataKey).(*ResourceData) + if d.Get("foo").(int) != 42 { + return fmt.Errorf("bad config data") + } + if cd.Get("foo").(string) != "bar" { + return fmt.Errorf("bad conn data") + } + + return nil + }, + }, + map[string]string{ + "foo": "bar", + }, + map[string]interface{}{ + "foo": 42, + }, + false, + }, + } + + for i, tc := range cases { + t.Run(fmt.Sprintf("%d-%s", i, tc.Name), func(t *testing.T) { + c := tofu.NewResourceConfigRaw(tc.Config) + + state := &tofu.InstanceState{ + Ephemeral: tofu.EphemeralState{ + ConnInfo: tc.Conn, + }, + } + + err := tc.P.Apply(nil, state, c) + if err != nil != tc.Err { + t.Fatalf("%d: %s", i, err) + } + }) + } +} + +func TestProvisionerApply_nilState(t *testing.T) { + p := &Provisioner{ + ConnSchema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeString, + Optional: true, + }, + }, + + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + + ApplyFunc: func(ctx context.Context) error { + return nil + }, + } + + conf := map[string]interface{}{ + "foo": 42, + } + + c := tofu.NewResourceConfigRaw(conf) + err := p.Apply(nil, nil, c) + if err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestProvisionerStop(t *testing.T) { + var p Provisioner + + // Verify stopch blocks + ch := p.StopContext().Done() + select { + case <-ch: + t.Fatal("should not be stopped") + case <-time.After(10 * time.Millisecond): + } + + // Stop it + if err := p.Stop(); err != nil { + t.Fatalf("err: %s", err) + } + + select { + case <-ch: + case <-time.After(10 * time.Millisecond): + t.Fatal("should be stopped") + } +} + +func TestProvisionerStop_apply(t *testing.T) { + p := &Provisioner{ + ConnSchema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeString, + Optional: true, + }, + }, + + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + + ApplyFunc: func(ctx context.Context) error { + <-ctx.Done() + return nil + }, + } + + conn := map[string]string{ + "foo": "bar", + } + + conf := map[string]interface{}{ + "foo": 42, + } + + c := tofu.NewResourceConfigRaw(conf) + state := &tofu.InstanceState{ + Ephemeral: tofu.EphemeralState{ + ConnInfo: conn, + }, + } + + // Run the apply in a goroutine + doneCh := make(chan struct{}) + go func() { + p.Apply(nil, state, c) + close(doneCh) + }() + + // Should block + select { + case <-doneCh: + t.Fatal("should not be done") + case <-time.After(10 * time.Millisecond): + } + + // Stop! + p.Stop() + + select { + case <-doneCh: + case <-time.After(10 * time.Millisecond): + t.Fatal("should be done") + } +} + +func TestProvisionerStop_stopFirst(t *testing.T) { + var p Provisioner + + // Stop it + if err := p.Stop(); err != nil { + t.Fatalf("err: %s", err) + } + + select { + case <-p.StopContext().Done(): + case <-time.After(10 * time.Millisecond): + t.Fatal("should be stopped") + } +} diff --git a/pkg/legacy/helper/schema/resource.go b/pkg/legacy/helper/schema/resource.go new file mode 100644 index 00000000000..cd52a7ea559 --- /dev/null +++ b/pkg/legacy/helper/schema/resource.go @@ -0,0 +1,847 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "errors" + "fmt" + "log" + "strconv" + + "github.com/kubegems/opentofu/pkg/legacy/tofu" + "github.com/zclconf/go-cty/cty" +) + +var ReservedDataSourceFields = []string{ + "connection", + "count", + "depends_on", + "lifecycle", + "provider", + "provisioner", +} + +var ReservedResourceFields = []string{ + "connection", + "count", + "depends_on", + "id", + "lifecycle", + "provider", + "provisioner", +} + +// Resource represents a thing in OpenTofu that has a set of configurable +// attributes and a lifecycle (create, read, update, delete). +// +// The Resource schema is an abstraction that allows provider writers to +// worry only about CRUD operations while off-loading validation, diff +// generation, etc. to this higher level library. +// +// In spite of the name, this struct is not used only for tofu resources, +// but also for data sources. In the case of data sources, the Create, +// Update and Delete functions must not be provided. +type Resource struct { + // Schema is the schema for the configuration of this resource. + // + // The keys of this map are the configuration keys, and the values + // describe the schema of the configuration value. + // + // The schema is used to represent both configurable data as well + // as data that might be computed in the process of creating this + // resource. + Schema map[string]*Schema + + // SchemaVersion is the version number for this resource's Schema + // definition. The current SchemaVersion stored in the state for each + // resource. Provider authors can increment this version number + // when Schema semantics change. If the State's SchemaVersion is less than + // the current SchemaVersion, the InstanceState is yielded to the + // MigrateState callback, where the provider can make whatever changes it + // needs to update the state to be compatible to the latest version of the + // Schema. + // + // When unset, SchemaVersion defaults to 0, so provider authors can start + // their Versioning at any integer >= 1 + SchemaVersion int + + // MigrateState is deprecated and any new changes to a resource's schema + // should be handled by StateUpgraders. Existing MigrateState implementations + // should remain for compatibility with existing state. MigrateState will + // still be called if the stored SchemaVersion is less than the + // first version of the StateUpgraders. + // + // MigrateState is responsible for updating an InstanceState with an old + // version to the format expected by the current version of the Schema. + // + // It is called during Refresh if the State's stored SchemaVersion is less + // than the current SchemaVersion of the Resource. + // + // The function is yielded the state's stored SchemaVersion and a pointer to + // the InstanceState that needs updating, as well as the configured + // provider's configured meta interface{}, in case the migration process + // needs to make any remote API calls. + MigrateState StateMigrateFunc + + // StateUpgraders contains the functions responsible for upgrading an + // existing state with an old schema version to a newer schema. It is + // called specifically by OpenTofu when the stored schema version is less + // than the current SchemaVersion of the Resource. + // + // StateUpgraders map specific schema versions to a StateUpgrader + // function. The registered versions are expected to be ordered, + // consecutive values. The initial value may be greater than 0 to account + // for legacy schemas that weren't recorded and can be handled by + // MigrateState. + StateUpgraders []StateUpgrader + + // The functions below are the CRUD operations for this resource. + // + // The only optional operation is Update. If Update is not implemented, + // then updates will not be supported for this resource. + // + // The ResourceData parameter in the functions below are used to + // query configuration and changes for the resource as well as to set + // the ID, computed data, etc. + // + // The interface{} parameter is the result of the ConfigureFunc in + // the provider for this resource. If the provider does not define + // a ConfigureFunc, this will be nil. This parameter should be used + // to store API clients, configuration structures, etc. + // + // If any errors occur during each of the operation, an error should be + // returned. If a resource was partially updated, be careful to enable + // partial state mode for ResourceData and use it accordingly. + // + // Exists is a function that is called to check if a resource still + // exists. If this returns false, then this will affect the diff + // accordingly. If this function isn't set, it will not be called. You + // can also signal existence in the Read method by calling d.SetId("") + // if the Resource is no longer present and should be removed from state. + // The *ResourceData passed to Exists should _not_ be modified. + Create CreateFunc + Read ReadFunc + Update UpdateFunc + Delete DeleteFunc + Exists ExistsFunc + + // CustomizeDiff is a custom function for working with the diff that + // OpenTofu has created for this resource - it can be used to customize the + // diff that has been created, diff values not controlled by configuration, + // or even veto the diff altogether and abort the plan. It is passed a + // *ResourceDiff, a structure similar to ResourceData but lacking most write + // functions like Set, while introducing new functions that work with the + // diff such as SetNew, SetNewComputed, and ForceNew. + // + // The phases OpenTofu runs this in, and the state available via functions + // like Get and GetChange, are as follows: + // + // * New resource: One run with no state + // * Existing resource: One run with state + // * Existing resource, forced new: One run with state (before ForceNew), + // then one run without state (as if new resource) + // * Tainted resource: No runs (custom diff logic is skipped) + // * Destroy: No runs (standard diff logic is skipped on destroy diffs) + // + // This function needs to be resilient to support all scenarios. + // + // If this function needs to access external API resources, remember to flag + // the RequiresRefresh attribute mentioned below to ensure that + // -refresh=false is blocked when running plan or apply, as this means that + // this resource requires refresh-like behaviour to work effectively. + // + // For the most part, only computed fields can be customized by this + // function. + // + // This function is only allowed on regular resources (not data sources). + CustomizeDiff CustomizeDiffFunc + + // Importer is the ResourceImporter implementation for this resource. + // If this is nil, then this resource does not support importing. If + // this is non-nil, then it supports importing and ResourceImporter + // must be validated. The validity of ResourceImporter is verified + // by InternalValidate on Resource. + Importer *ResourceImporter + + // If non-empty, this string is emitted as a warning during Validate. + DeprecationMessage string + + // Timeouts allow users to specify specific time durations in which an + // operation should time out, to allow them to extend an action to suit their + // usage. For example, a user may specify a large Creation timeout for their + // AWS RDS Instance due to it's size, or restoring from a snapshot. + // Resource implementors must enable Timeout support by adding the allowed + // actions (Create, Read, Update, Delete, Default) to the Resource struct, and + // accessing them in the matching methods. + Timeouts *ResourceTimeout +} + +// ShimInstanceStateFromValue converts a cty.Value to a +// tofu.InstanceState. +func (r *Resource) ShimInstanceStateFromValue(state cty.Value) (*tofu.InstanceState, error) { + // Get the raw shimmed value. While this is correct, the set hashes don't + // match those from the Schema. + s := tofu.NewInstanceStateShimmedFromValue(state, r.SchemaVersion) + + // We now rebuild the state through the ResourceData, so that the set indexes + // match what helper/schema expects. + data, err := schemaMap(r.Schema).Data(s, nil) + if err != nil { + return nil, err + } + + s = data.State() + if s == nil { + s = &tofu.InstanceState{} + } + return s, nil +} + +// See Resource documentation. +type CreateFunc func(*ResourceData, interface{}) error + +// See Resource documentation. +type ReadFunc func(*ResourceData, interface{}) error + +// See Resource documentation. +type UpdateFunc func(*ResourceData, interface{}) error + +// See Resource documentation. +type DeleteFunc func(*ResourceData, interface{}) error + +// See Resource documentation. +type ExistsFunc func(*ResourceData, interface{}) (bool, error) + +// See Resource documentation. +type StateMigrateFunc func( + int, *tofu.InstanceState, interface{}) (*tofu.InstanceState, error) + +type StateUpgrader struct { + // Version is the version schema that this Upgrader will handle, converting + // it to Version+1. + Version int + + // Type describes the schema that this function can upgrade. Type is + // required to decode the schema if the state was stored in a legacy + // flatmap format. + Type cty.Type + + // Upgrade takes the JSON encoded state and the provider meta value, and + // upgrades the state one single schema version. The provided state is + // deocded into the default json types using a map[string]interface{}. It + // is up to the StateUpgradeFunc to ensure that the returned value can be + // encoded using the new schema. + Upgrade StateUpgradeFunc +} + +// See StateUpgrader +type StateUpgradeFunc func(rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) + +// See Resource documentation. +type CustomizeDiffFunc func(*ResourceDiff, interface{}) error + +// Apply creates, updates, and/or deletes a resource. +func (r *Resource) Apply( + s *tofu.InstanceState, + d *tofu.InstanceDiff, + meta interface{}) (*tofu.InstanceState, error) { + data, err := schemaMap(r.Schema).Data(s, d) + if err != nil { + return s, err + } + if s != nil && data != nil { + data.providerMeta = s.ProviderMeta + } + + // Instance Diff shoould have the timeout info, need to copy it over to the + // ResourceData meta + rt := ResourceTimeout{} + if _, ok := d.Meta[TimeoutKey]; ok { + if err := rt.DiffDecode(d); err != nil { + log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) + } + } else if s != nil { + if _, ok := s.Meta[TimeoutKey]; ok { + if err := rt.StateDecode(s); err != nil { + log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) + } + } + } else { + log.Printf("[DEBUG] No meta timeoutkey found in Apply()") + } + data.timeouts = &rt + + if s == nil { + // The OpenTofu API dictates that this should never happen, but + // it doesn't hurt to be safe in this case. + s = new(tofu.InstanceState) + } + + if d.Destroy || d.RequiresNew() { + if s.ID != "" { + // Destroy the resource since it is created + if err := r.Delete(data, meta); err != nil { + return r.recordCurrentSchemaVersion(data.State()), err + } + + // Make sure the ID is gone. + data.SetId("") + } + + // If we're only destroying, and not creating, then return + // now since we're done! + if !d.RequiresNew() { + return nil, nil + } + + // Reset the data to be stateless since we just destroyed + data, err = schemaMap(r.Schema).Data(nil, d) + // data was reset, need to re-apply the parsed timeouts + data.timeouts = &rt + if err != nil { + return nil, err + } + } + + err = nil + if data.Id() == "" { + // We're creating, it is a new resource. + data.MarkNewResource() + err = r.Create(data, meta) + } else { + if r.Update == nil { + return s, fmt.Errorf("doesn't support update") + } + + err = r.Update(data, meta) + } + + return r.recordCurrentSchemaVersion(data.State()), err +} + +// Diff returns a diff of this resource. +func (r *Resource) Diff( + s *tofu.InstanceState, + c *tofu.ResourceConfig, + meta interface{}) (*tofu.InstanceDiff, error) { + + t := &ResourceTimeout{} + err := t.ConfigDecode(r, c) + + if err != nil { + return nil, fmt.Errorf("[ERR] Error decoding timeout: %w", err) + } + + instanceDiff, err := schemaMap(r.Schema).Diff(s, c, r.CustomizeDiff, meta, true) + if err != nil { + return instanceDiff, err + } + + if instanceDiff != nil { + if err := t.DiffEncode(instanceDiff); err != nil { + log.Printf("[ERR] Error encoding timeout to instance diff: %s", err) + } + } else { + log.Printf("[DEBUG] Instance Diff is nil in Diff()") + } + + return instanceDiff, err +} + +func (r *Resource) simpleDiff( + s *tofu.InstanceState, + c *tofu.ResourceConfig, + meta interface{}) (*tofu.InstanceDiff, error) { + + instanceDiff, err := schemaMap(r.Schema).Diff(s, c, r.CustomizeDiff, meta, false) + if err != nil { + return instanceDiff, err + } + + if instanceDiff == nil { + instanceDiff = tofu.NewInstanceDiff() + } + + // Make sure the old value is set in each of the instance diffs. + // This was done by the RequiresNew logic in the full legacy Diff. + for k, attr := range instanceDiff.Attributes { + if attr == nil { + continue + } + if s != nil { + attr.Old = s.Attributes[k] + } + } + + return instanceDiff, nil +} + +// Validate validates the resource configuration against the schema. +func (r *Resource) Validate(c *tofu.ResourceConfig) ([]string, []error) { + warns, errs := schemaMap(r.Schema).Validate(c) + + if r.DeprecationMessage != "" { + warns = append(warns, r.DeprecationMessage) + } + + return warns, errs +} + +// ReadDataApply loads the data for a data source, given a diff that +// describes the configuration arguments and desired computed attributes. +func (r *Resource) ReadDataApply( + d *tofu.InstanceDiff, + meta interface{}, +) (*tofu.InstanceState, error) { + // Data sources are always built completely from scratch + // on each read, so the source state is always nil. + data, err := schemaMap(r.Schema).Data(nil, d) + if err != nil { + return nil, err + } + + err = r.Read(data, meta) + state := data.State() + if state != nil && state.ID == "" { + // Data sources can set an ID if they want, but they aren't + // required to; we'll provide a placeholder if they don't, + // to preserve the invariant that all resources have non-empty + // ids. + state.ID = "-" + } + + return r.recordCurrentSchemaVersion(state), err +} + +// RefreshWithoutUpgrade reads the instance state, but does not call +// MigrateState or the StateUpgraders, since those are now invoked in a +// separate API call. +// RefreshWithoutUpgrade is part of the new plugin shims. +func (r *Resource) RefreshWithoutUpgrade( + s *tofu.InstanceState, + meta interface{}) (*tofu.InstanceState, error) { + // If the ID is already somehow blank, it doesn't exist + if s.ID == "" { + return nil, nil + } + + rt := ResourceTimeout{} + if _, ok := s.Meta[TimeoutKey]; ok { + if err := rt.StateDecode(s); err != nil { + log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) + } + } + + if r.Exists != nil { + // Make a copy of data so that if it is modified it doesn't + // affect our Read later. + data, err := schemaMap(r.Schema).Data(s, nil) + data.timeouts = &rt + + if err != nil { + return s, err + } + + if s != nil { + data.providerMeta = s.ProviderMeta + } + + exists, err := r.Exists(data, meta) + if err != nil { + return s, err + } + if !exists { + return nil, nil + } + } + + data, err := schemaMap(r.Schema).Data(s, nil) + data.timeouts = &rt + if err != nil { + return s, err + } + + if s != nil { + data.providerMeta = s.ProviderMeta + } + + err = r.Read(data, meta) + state := data.State() + if state != nil && state.ID == "" { + state = nil + } + + return r.recordCurrentSchemaVersion(state), err +} + +// Refresh refreshes the state of the resource. +func (r *Resource) Refresh( + s *tofu.InstanceState, + meta interface{}) (*tofu.InstanceState, error) { + // If the ID is already somehow blank, it doesn't exist + if s.ID == "" { + return nil, nil + } + + rt := ResourceTimeout{} + if _, ok := s.Meta[TimeoutKey]; ok { + if err := rt.StateDecode(s); err != nil { + log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) + } + } + + if r.Exists != nil { + // Make a copy of data so that if it is modified it doesn't + // affect our Read later. + data, err := schemaMap(r.Schema).Data(s, nil) + data.timeouts = &rt + + if err != nil { + return s, err + } + + exists, err := r.Exists(data, meta) + if err != nil { + return s, err + } + if !exists { + return nil, nil + } + } + + // there may be new StateUpgraders that need to be run + s, err := r.upgradeState(s, meta) + if err != nil { + return s, err + } + + data, err := schemaMap(r.Schema).Data(s, nil) + data.timeouts = &rt + if err != nil { + return s, err + } + + err = r.Read(data, meta) + state := data.State() + if state != nil && state.ID == "" { + state = nil + } + + return r.recordCurrentSchemaVersion(state), err +} + +func (r *Resource) upgradeState(s *tofu.InstanceState, meta interface{}) (*tofu.InstanceState, error) { + var err error + + needsMigration, stateSchemaVersion := r.checkSchemaVersion(s) + migrate := needsMigration && r.MigrateState != nil + + if migrate { + s, err = r.MigrateState(stateSchemaVersion, s, meta) + if err != nil { + return s, err + } + } + + if len(r.StateUpgraders) == 0 { + return s, nil + } + + // If we ran MigrateState, then the stateSchemaVersion value is no longer + // correct. We can expect the first upgrade function to be the correct + // schema type version. + if migrate { + stateSchemaVersion = r.StateUpgraders[0].Version + } + + schemaType := r.CoreConfigSchema().ImpliedType() + // find the expected type to convert the state + for _, upgrader := range r.StateUpgraders { + if stateSchemaVersion == upgrader.Version { + schemaType = upgrader.Type + } + } + + // StateUpgraders only operate on the new JSON format state, so the state + // need to be converted. + stateVal, err := StateValueFromInstanceState(s, schemaType) + if err != nil { + return nil, err + } + + jsonState, err := StateValueToJSONMap(stateVal, schemaType) + if err != nil { + return nil, err + } + + for _, upgrader := range r.StateUpgraders { + if stateSchemaVersion != upgrader.Version { + continue + } + + jsonState, err = upgrader.Upgrade(jsonState, meta) + if err != nil { + return nil, err + } + stateSchemaVersion++ + } + + // now we need to re-flatmap the new state + stateVal, err = JSONMapToStateValue(jsonState, r.CoreConfigSchema()) + if err != nil { + return nil, err + } + + return r.ShimInstanceStateFromValue(stateVal) +} + +// InternalValidate should be called to validate the structure +// of the resource. +// +// This should be called in a unit test for any resource to verify +// before release that a resource is properly configured for use with +// this library. +// +// Provider.InternalValidate() will automatically call this for all of +// the resources it manages, so you don't need to call this manually if it +// is part of a Provider. +func (r *Resource) InternalValidate(topSchemaMap schemaMap, writable bool) error { + if r == nil { + return errors.New("resource is nil") + } + + if !writable { + if r.Create != nil || r.Update != nil || r.Delete != nil { + return fmt.Errorf("must not implement Create, Update or Delete") + } + + // CustomizeDiff cannot be defined for read-only resources + if r.CustomizeDiff != nil { + return fmt.Errorf("cannot implement CustomizeDiff") + } + } + + tsm := topSchemaMap + + if r.isTopLevel() && writable { + // All non-Computed attributes must be ForceNew if Update is not defined + if r.Update == nil { + nonForceNewAttrs := make([]string, 0) + for k, v := range r.Schema { + if !v.ForceNew && !v.Computed { + nonForceNewAttrs = append(nonForceNewAttrs, k) + } + } + if len(nonForceNewAttrs) > 0 { + return fmt.Errorf( + "No Update defined, must set ForceNew on: %#v", nonForceNewAttrs) + } + } else { + nonUpdateableAttrs := make([]string, 0) + for k, v := range r.Schema { + if v.ForceNew || v.Computed && !v.Optional { + nonUpdateableAttrs = append(nonUpdateableAttrs, k) + } + } + updateableAttrs := len(r.Schema) - len(nonUpdateableAttrs) + if updateableAttrs == 0 { + return fmt.Errorf( + "All fields are ForceNew or Computed w/out Optional, Update is superfluous") + } + } + + tsm = schemaMap(r.Schema) + + // Destroy, and Read are required + if r.Read == nil { + return fmt.Errorf("Read must be implemented") + } + if r.Delete == nil { + return fmt.Errorf("Delete must be implemented") + } + + // If we have an importer, we need to verify the importer. + if r.Importer != nil { + if err := r.Importer.InternalValidate(); err != nil { + return err + } + } + + for k, f := range tsm { + if isReservedResourceFieldName(k, f) { + return fmt.Errorf("%s is a reserved field name", k) + } + } + } + + lastVersion := -1 + for _, u := range r.StateUpgraders { + if lastVersion >= 0 && u.Version-lastVersion > 1 { + return fmt.Errorf("missing schema version between %d and %d", lastVersion, u.Version) + } + + if u.Version >= r.SchemaVersion { + return fmt.Errorf("StateUpgrader version %d is >= current version %d", u.Version, r.SchemaVersion) + } + + if !u.Type.IsObjectType() { + return fmt.Errorf("StateUpgrader %d type is not cty.Object", u.Version) + } + + if u.Upgrade == nil { + return fmt.Errorf("StateUpgrader %d missing StateUpgradeFunc", u.Version) + } + + lastVersion = u.Version + } + + if lastVersion >= 0 && lastVersion != r.SchemaVersion-1 { + return fmt.Errorf("missing StateUpgrader between %d and %d", lastVersion, r.SchemaVersion) + } + + // Data source + if r.isTopLevel() && !writable { + tsm = schemaMap(r.Schema) + for k, _ := range tsm { + if isReservedDataSourceFieldName(k) { + return fmt.Errorf("%s is a reserved field name", k) + } + } + } + + return schemaMap(r.Schema).InternalValidate(tsm) +} + +func isReservedDataSourceFieldName(name string) bool { + for _, reservedName := range ReservedDataSourceFields { + if name == reservedName { + return true + } + } + return false +} + +func isReservedResourceFieldName(name string, s *Schema) bool { + // Allow phasing out "id" + // See https://github.com/terraform-providers/terraform-provider-aws/pull/1626#issuecomment-328881415 + if name == "id" && (s.Deprecated != "" || s.Removed != "") { + return false + } + + for _, reservedName := range ReservedResourceFields { + if name == reservedName { + return true + } + } + return false +} + +// Data returns a ResourceData struct for this Resource. Each return value +// is a separate copy and can be safely modified differently. +// +// The data returned from this function has no actual affect on the Resource +// itself (including the state given to this function). +// +// This function is useful for unit tests and ResourceImporter functions. +func (r *Resource) Data(s *tofu.InstanceState) *ResourceData { + result, err := schemaMap(r.Schema).Data(s, nil) + if err != nil { + // At the time of writing, this isn't possible (Data never returns + // non-nil errors). We panic to find this in the future if we have to. + // I don't see a reason for Data to ever return an error. + panic(err) + } + + // load the Resource timeouts + result.timeouts = r.Timeouts + if result.timeouts == nil { + result.timeouts = &ResourceTimeout{} + } + + // Set the schema version to latest by default + result.meta = map[string]interface{}{ + "schema_version": strconv.Itoa(r.SchemaVersion), + } + + return result +} + +// TestResourceData Yields a ResourceData filled with this resource's schema for use in unit testing +// +// TODO: May be able to be removed with the above ResourceData function. +func (r *Resource) TestResourceData() *ResourceData { + return &ResourceData{ + schema: r.Schema, + } +} + +// SchemasForFlatmapPath tries its best to find a sequence of schemas that +// the given dot-delimited attribute path traverses through in the schema +// of the receiving Resource. +func (r *Resource) SchemasForFlatmapPath(path string) []*Schema { + return SchemasForFlatmapPath(path, r.Schema) +} + +// Returns true if the resource is "top level" i.e. not a sub-resource. +func (r *Resource) isTopLevel() bool { + // TODO: This is a heuristic; replace with a definitive attribute? + return (r.Create != nil || r.Read != nil) +} + +// Determines if a given InstanceState needs to be migrated by checking the +// stored version number with the current SchemaVersion +func (r *Resource) checkSchemaVersion(is *tofu.InstanceState) (bool, int) { + // Get the raw interface{} value for the schema version. If it doesn't + // exist or is nil then set it to zero. + raw := is.Meta["schema_version"] + if raw == nil { + raw = "0" + } + + // Try to convert it to a string. If it isn't a string then we pretend + // that it isn't set at all. It should never not be a string unless it + // was manually tampered with. + rawString, ok := raw.(string) + if !ok { + rawString = "0" + } + + stateSchemaVersion, _ := strconv.Atoi(rawString) + + // Don't run MigrateState if the version is handled by a StateUpgrader, + // since StateMigrateFuncs are not required to handle unknown versions + maxVersion := r.SchemaVersion + if len(r.StateUpgraders) > 0 { + maxVersion = r.StateUpgraders[0].Version + } + + return stateSchemaVersion < maxVersion, stateSchemaVersion +} + +func (r *Resource) recordCurrentSchemaVersion( + state *tofu.InstanceState) *tofu.InstanceState { + if state != nil && r.SchemaVersion > 0 { + if state.Meta == nil { + state.Meta = make(map[string]interface{}) + } + state.Meta["schema_version"] = strconv.Itoa(r.SchemaVersion) + } + return state +} + +// Noop is a convenience implementation of resource function which takes +// no action and returns no error. +func Noop(*ResourceData, interface{}) error { + return nil +} + +// RemoveFromState is a convenience implementation of a resource function +// which sets the resource ID to empty string (to remove it from state) +// and returns no error. +func RemoveFromState(d *ResourceData, _ interface{}) error { + d.SetId("") + return nil +} diff --git a/pkg/legacy/helper/schema/resource_data.go b/pkg/legacy/helper/schema/resource_data.go new file mode 100644 index 00000000000..2b27a3d0e99 --- /dev/null +++ b/pkg/legacy/helper/schema/resource_data.go @@ -0,0 +1,566 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "log" + "reflect" + "strings" + "sync" + "time" + + "github.com/kubegems/opentofu/pkg/legacy/tofu" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/gocty" +) + +// ResourceData is used to query and set the attributes of a resource. +// +// ResourceData is the primary argument received for CRUD operations on +// a resource as well as configuration of a provider. It is a powerful +// structure that can be used to not only query data, but check for changes, +// define partial state updates, etc. +// +// The most relevant methods to take a look at are Get, Set, and Partial. +type ResourceData struct { + // Settable (internally) + schema map[string]*Schema + config *tofu.ResourceConfig + state *tofu.InstanceState + diff *tofu.InstanceDiff + meta map[string]interface{} + timeouts *ResourceTimeout + providerMeta cty.Value + + // Don't set + multiReader *MultiLevelFieldReader + setWriter *MapFieldWriter + newState *tofu.InstanceState + partial bool + partialMap map[string]struct{} + once sync.Once + isNew bool + + panicOnError bool +} + +// getResult is the internal structure that is generated when a Get +// is called that contains some extra data that might be used. +type getResult struct { + Value interface{} + ValueProcessed interface{} + Computed bool + Exists bool + Schema *Schema +} + +// UnsafeSetFieldRaw allows setting arbitrary values in state to arbitrary +// values, bypassing schema. This MUST NOT be used in normal circumstances - +// it exists only to support the remote_state data source. +// +// Deprecated: Fully define schema attributes and use Set() instead. +func (d *ResourceData) UnsafeSetFieldRaw(key string, value string) { + d.once.Do(d.init) + + d.setWriter.unsafeWriteField(key, value) +} + +// Get returns the data for the given key, or nil if the key doesn't exist +// in the schema. +// +// If the key does exist in the schema but doesn't exist in the configuration, +// then the default value for that type will be returned. For strings, this is +// "", for numbers it is 0, etc. +// +// If you want to test if something is set at all in the configuration, +// use GetOk. +func (d *ResourceData) Get(key string) interface{} { + v, _ := d.GetOk(key) + return v +} + +// GetChange returns the old and new value for a given key. +// +// HasChange should be used to check if a change exists. It is possible +// that both the old and new value are the same if the old value was not +// set and the new value is. This is common, for example, for boolean +// fields which have a zero value of false. +func (d *ResourceData) GetChange(key string) (interface{}, interface{}) { + o, n := d.getChange(key, getSourceState, getSourceDiff) + return o.Value, n.Value +} + +// GetOk returns the data for the given key and whether or not the key +// has been set to a non-zero value at some point. +// +// The first result will not necessarilly be nil if the value doesn't exist. +// The second result should be checked to determine this information. +func (d *ResourceData) GetOk(key string) (interface{}, bool) { + r := d.getRaw(key, getSourceSet) + exists := r.Exists && !r.Computed + if exists { + // If it exists, we also want to verify it is not the zero-value. + value := r.Value + zero := r.Schema.Type.Zero() + + if eq, ok := value.(Equal); ok { + exists = !eq.Equal(zero) + } else { + exists = !reflect.DeepEqual(value, zero) + } + } + + return r.Value, exists +} + +// GetOkExists returns the data for a given key and whether or not the key +// has been set to a non-zero value. This is only useful for determining +// if boolean attributes have been set, if they are Optional but do not +// have a Default value. +// +// This is nearly the same function as GetOk, yet it does not check +// for the zero value of the attribute's type. This allows for attributes +// without a default, to fully check for a literal assignment, regardless +// of the zero-value for that type. +// This should only be used if absolutely required/needed. +func (d *ResourceData) GetOkExists(key string) (interface{}, bool) { + r := d.getRaw(key, getSourceSet) + exists := r.Exists && !r.Computed + return r.Value, exists +} + +func (d *ResourceData) getRaw(key string, level getSource) getResult { + var parts []string + if key != "" { + parts = strings.Split(key, ".") + } + + return d.get(parts, level) +} + +// HasChange returns whether or not the given key has been changed. +func (d *ResourceData) HasChange(key string) bool { + o, n := d.GetChange(key) + + // If the type implements the Equal interface, then call that + // instead of just doing a reflect.DeepEqual. An example where this is + // needed is *Set + if eq, ok := o.(Equal); ok { + return !eq.Equal(n) + } + + return !reflect.DeepEqual(o, n) +} + +// Partial turns partial state mode on/off. +// +// When partial state mode is enabled, then only key prefixes specified +// by SetPartial will be in the final state. This allows providers to return +// partial states for partially applied resources (when errors occur). +func (d *ResourceData) Partial(on bool) { + d.partial = on + if on { + if d.partialMap == nil { + d.partialMap = make(map[string]struct{}) + } + } else { + d.partialMap = nil + } +} + +// Set sets the value for the given key. +// +// If the key is invalid or the value is not a correct type, an error +// will be returned. +func (d *ResourceData) Set(key string, value interface{}) error { + d.once.Do(d.init) + + // If the value is a pointer to a non-struct, get its value and + // use that. This allows Set to take a pointer to primitives to + // simplify the interface. + reflectVal := reflect.ValueOf(value) + if reflectVal.Kind() == reflect.Ptr { + if reflectVal.IsNil() { + // If the pointer is nil, then the value is just nil + value = nil + } else { + // Otherwise, we dereference the pointer as long as its not + // a pointer to a struct, since struct pointers are allowed. + reflectVal = reflect.Indirect(reflectVal) + if reflectVal.Kind() != reflect.Struct { + value = reflectVal.Interface() + } + } + } + + err := d.setWriter.WriteField(strings.Split(key, "."), value) + if err != nil && d.panicOnError { + panic(err) + } + return err +} + +// SetPartial adds the key to the final state output while +// in partial state mode. The key must be a root key in the schema (i.e. +// it cannot be "list.0"). +// +// If partial state mode is disabled, then this has no effect. Additionally, +// whenever partial state mode is toggled, the partial data is cleared. +func (d *ResourceData) SetPartial(k string) { + if d.partial { + d.partialMap[k] = struct{}{} + } +} + +func (d *ResourceData) MarkNewResource() { + d.isNew = true +} + +func (d *ResourceData) IsNewResource() bool { + return d.isNew +} + +// Id returns the ID of the resource. +func (d *ResourceData) Id() string { + var result string + + if d.state != nil { + result = d.state.ID + if result == "" { + result = d.state.Attributes["id"] + } + } + + if d.newState != nil { + result = d.newState.ID + if result == "" { + result = d.newState.Attributes["id"] + } + } + + return result +} + +// ConnInfo returns the connection info for this resource. +func (d *ResourceData) ConnInfo() map[string]string { + if d.newState != nil { + return d.newState.Ephemeral.ConnInfo + } + + if d.state != nil { + return d.state.Ephemeral.ConnInfo + } + + return nil +} + +// SetId sets the ID of the resource. If the value is blank, then the +// resource is destroyed. +func (d *ResourceData) SetId(v string) { + d.once.Do(d.init) + d.newState.ID = v + + // once we transition away from the legacy state types, "id" will no longer + // be a special field, and will become a normal attribute. + // set the attribute normally + d.setWriter.unsafeWriteField("id", v) + + // Make sure the newState is also set, otherwise the old value + // may get precedence. + if d.newState.Attributes == nil { + d.newState.Attributes = map[string]string{} + } + d.newState.Attributes["id"] = v +} + +// SetConnInfo sets the connection info for a resource. +func (d *ResourceData) SetConnInfo(v map[string]string) { + d.once.Do(d.init) + d.newState.Ephemeral.ConnInfo = v +} + +// SetType sets the ephemeral type for the data. This is only required +// for importing. +func (d *ResourceData) SetType(t string) { + d.once.Do(d.init) + d.newState.Ephemeral.Type = t +} + +// State returns the new InstanceState after the diff and any Set +// calls. +func (d *ResourceData) State() *tofu.InstanceState { + var result tofu.InstanceState + result.ID = d.Id() + result.Meta = d.meta + + // If we have no ID, then this resource doesn't exist and we just + // return nil. + if result.ID == "" { + return nil + } + + if d.timeouts != nil { + if err := d.timeouts.StateEncode(&result); err != nil { + log.Printf("[ERR] Error encoding Timeout meta to Instance State: %s", err) + } + } + + // Look for a magic key in the schema that determines we skip the + // integrity check of fields existing in the schema, allowing dynamic + // keys to be created. + hasDynamicAttributes := false + for k, _ := range d.schema { + if k == "__has_dynamic_attributes" { + hasDynamicAttributes = true + log.Printf("[INFO] Resource %s has dynamic attributes", result.ID) + } + } + + // In order to build the final state attributes, we read the full + // attribute set as a map[string]interface{}, write it to a MapFieldWriter, + // and then use that map. + rawMap := make(map[string]interface{}) + for k := range d.schema { + source := getSourceSet + if d.partial { + source = getSourceState + if _, ok := d.partialMap[k]; ok { + source = getSourceSet + } + } + + raw := d.get([]string{k}, source) + if raw.Exists && !raw.Computed { + rawMap[k] = raw.Value + if raw.ValueProcessed != nil { + rawMap[k] = raw.ValueProcessed + } + } + } + + mapW := &MapFieldWriter{Schema: d.schema} + if err := mapW.WriteField(nil, rawMap); err != nil { + log.Printf("[ERR] Error writing fields: %s", err) + return nil + } + + result.Attributes = mapW.Map() + + if hasDynamicAttributes { + // If we have dynamic attributes, just copy the attributes map + // one for one into the result attributes. + for k, v := range d.setWriter.Map() { + // Don't clobber schema values. This limits usage of dynamic + // attributes to names which _do not_ conflict with schema + // keys! + if _, ok := result.Attributes[k]; !ok { + result.Attributes[k] = v + } + } + } + + if d.newState != nil { + result.Ephemeral = d.newState.Ephemeral + } + + // TODO: This is hacky and we can remove this when we have a proper + // state writer. We should instead have a proper StateFieldWriter + // and use that. + for k, schema := range d.schema { + if schema.Type != TypeMap { + continue + } + + if result.Attributes[k] == "" { + delete(result.Attributes, k) + } + } + + if v := d.Id(); v != "" { + result.Attributes["id"] = d.Id() + } + + if d.state != nil { + result.Tainted = d.state.Tainted + } + + return &result +} + +// Timeout returns the data for the given timeout key +// Returns a duration of 20 minutes for any key not found, or not found and no default. +func (d *ResourceData) Timeout(key string) time.Duration { + key = strings.ToLower(key) + + // System default of 20 minutes + defaultTimeout := 20 * time.Minute + + if d.timeouts == nil { + return defaultTimeout + } + + var timeout *time.Duration + switch key { + case TimeoutCreate: + timeout = d.timeouts.Create + case TimeoutRead: + timeout = d.timeouts.Read + case TimeoutUpdate: + timeout = d.timeouts.Update + case TimeoutDelete: + timeout = d.timeouts.Delete + } + + if timeout != nil { + return *timeout + } + + if d.timeouts.Default != nil { + return *d.timeouts.Default + } + + return defaultTimeout +} + +func (d *ResourceData) init() { + // Initialize the field that will store our new state + var copyState tofu.InstanceState + if d.state != nil { + copyState = *d.state.DeepCopy() + } + d.newState = ©State + + // Initialize the map for storing set data + d.setWriter = &MapFieldWriter{Schema: d.schema} + + // Initialize the reader for getting data from the + // underlying sources (config, diff, etc.) + readers := make(map[string]FieldReader) + var stateAttributes map[string]string + if d.state != nil { + stateAttributes = d.state.Attributes + readers["state"] = &MapFieldReader{ + Schema: d.schema, + Map: BasicMapReader(stateAttributes), + } + } + if d.config != nil { + readers["config"] = &ConfigFieldReader{ + Schema: d.schema, + Config: d.config, + } + } + if d.diff != nil { + readers["diff"] = &DiffFieldReader{ + Schema: d.schema, + Diff: d.diff, + Source: &MultiLevelFieldReader{ + Levels: []string{"state", "config"}, + Readers: readers, + }, + } + } + readers["set"] = &MapFieldReader{ + Schema: d.schema, + Map: BasicMapReader(d.setWriter.Map()), + } + d.multiReader = &MultiLevelFieldReader{ + Levels: []string{ + "state", + "config", + "diff", + "set", + }, + + Readers: readers, + } +} + +func (d *ResourceData) diffChange( + k string) (interface{}, interface{}, bool, bool, bool) { + // Get the change between the state and the config. + o, n := d.getChange(k, getSourceState, getSourceConfig|getSourceExact) + if !o.Exists { + o.Value = nil + } + if !n.Exists { + n.Value = nil + } + + // Return the old, new, and whether there is a change + return o.Value, n.Value, !reflect.DeepEqual(o.Value, n.Value), n.Computed, false +} + +func (d *ResourceData) getChange( + k string, + oldLevel getSource, + newLevel getSource) (getResult, getResult) { + var parts, parts2 []string + if k != "" { + parts = strings.Split(k, ".") + parts2 = strings.Split(k, ".") + } + + o := d.get(parts, oldLevel) + n := d.get(parts2, newLevel) + return o, n +} + +func (d *ResourceData) get(addr []string, source getSource) getResult { + d.once.Do(d.init) + + level := "set" + flags := source & ^getSourceLevelMask + exact := flags&getSourceExact != 0 + source = source & getSourceLevelMask + if source >= getSourceSet { + level = "set" + } else if source >= getSourceDiff { + level = "diff" + } else if source >= getSourceConfig { + level = "config" + } else { + level = "state" + } + + var result FieldReadResult + var err error + if exact { + result, err = d.multiReader.ReadFieldExact(addr, level) + } else { + result, err = d.multiReader.ReadFieldMerge(addr, level) + } + if err != nil { + panic(err) + } + + // If the result doesn't exist, then we set the value to the zero value + var schema *Schema + if schemaL := addrToSchema(addr, d.schema); len(schemaL) > 0 { + schema = schemaL[len(schemaL)-1] + } + + if result.Value == nil && schema != nil { + result.Value = result.ValueOrZero(schema) + } + + // Transform the FieldReadResult into a getResult. It might be worth + // merging these two structures one day. + return getResult{ + Value: result.Value, + ValueProcessed: result.ValueProcessed, + Computed: result.Computed, + Exists: result.Exists, + Schema: schema, + } +} + +func (d *ResourceData) GetProviderMeta(dst interface{}) error { + if d.providerMeta.IsNull() { + return nil + } + return gocty.FromCtyValue(d.providerMeta, &dst) +} diff --git a/pkg/legacy/helper/schema/resource_data_get_source.go b/pkg/legacy/helper/schema/resource_data_get_source.go new file mode 100644 index 00000000000..096af9a3486 --- /dev/null +++ b/pkg/legacy/helper/schema/resource_data_get_source.go @@ -0,0 +1,22 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +//go:generate go run golang.org/x/tools/cmd/stringer -type=getSource resource_data_get_source.go + +// getSource represents the level we want to get for a value (internally). +// Any source less than or equal to the level will be loaded (whichever +// has a value first). +type getSource byte + +const ( + getSourceState getSource = 1 << iota + getSourceConfig + getSourceDiff + getSourceSet + getSourceExact // Only get from the _exact_ level + getSourceLevelMask getSource = getSourceState | getSourceConfig | getSourceDiff | getSourceSet +) diff --git a/pkg/legacy/helper/schema/resource_data_test.go b/pkg/legacy/helper/schema/resource_data_test.go new file mode 100644 index 00000000000..158498153e0 --- /dev/null +++ b/pkg/legacy/helper/schema/resource_data_test.go @@ -0,0 +1,3556 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "fmt" + "reflect" + "testing" + "time" + + "github.com/kubegems/opentofu/pkg/legacy/tofu" +) + +func TestResourceDataGet(t *testing.T) { + cases := []struct { + Schema map[string]*Schema + State *tofu.InstanceState + Diff *tofu.InstanceDiff + Key string + Value interface{} + }{ + // #0 + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "foo", + New: "bar", + NewComputed: true, + }, + }, + }, + + Key: "availability_zone", + Value: "", + }, + + // #1 + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "", + New: "foo", + RequiresNew: true, + }, + }, + }, + + Key: "availability_zone", + + Value: "foo", + }, + + // #2 + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "", + New: "foo!", + NewExtra: "foo", + }, + }, + }, + + Key: "availability_zone", + Value: "foo", + }, + + // #3 + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "availability_zone": "bar", + }, + }, + + Diff: nil, + + Key: "availability_zone", + + Value: "bar", + }, + + // #4 + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "availability_zone": "foo", + }, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "foo", + New: "bar", + NewComputed: true, + }, + }, + }, + + Key: "availability_zone", + Value: "", + }, + + // #5 + { + Schema: map[string]*Schema{ + "port": &Schema{ + Type: TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "port": "80", + }, + }, + + Diff: nil, + + Key: "port", + + Value: 80, + }, + + // #6 + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Required: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "ports.#": "3", + "ports.0": "1", + "ports.1": "2", + "ports.2": "5", + }, + }, + + Key: "ports.1", + + Value: 2, + }, + + // #7 + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Required: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "ports.#": "3", + "ports.0": "1", + "ports.1": "2", + "ports.2": "5", + }, + }, + + Key: "ports.#", + + Value: 3, + }, + + // #8 + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Required: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: nil, + + Key: "ports.#", + + Value: 0, + }, + + // #9 + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Required: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "ports.#": "3", + "ports.0": "1", + "ports.1": "2", + "ports.2": "5", + }, + }, + + Key: "ports", + + Value: []interface{}{1, 2, 5}, + }, + + // #10 + { + Schema: map[string]*Schema{ + "ingress": &Schema{ + Type: TypeList, + Required: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "from": &Schema{ + Type: TypeInt, + Required: true, + }, + }, + }, + }, + }, + + State: nil, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ingress.#": &tofu.ResourceAttrDiff{ + Old: "", + New: "1", + }, + "ingress.0.from": &tofu.ResourceAttrDiff{ + Old: "", + New: "8080", + }, + }, + }, + + Key: "ingress.0", + + Value: map[string]interface{}{ + "from": 8080, + }, + }, + + // #11 + { + Schema: map[string]*Schema{ + "ingress": &Schema{ + Type: TypeList, + Required: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "from": &Schema{ + Type: TypeInt, + Required: true, + }, + }, + }, + }, + }, + + State: nil, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ingress.#": &tofu.ResourceAttrDiff{ + Old: "", + New: "1", + }, + "ingress.0.from": &tofu.ResourceAttrDiff{ + Old: "", + New: "8080", + }, + }, + }, + + Key: "ingress", + + Value: []interface{}{ + map[string]interface{}{ + "from": 8080, + }, + }, + }, + + // #12 Computed get + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Computed: true, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "availability_zone": "foo", + }, + }, + + Key: "availability_zone", + + Value: "foo", + }, + + // #13 Full object + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "", + New: "foo", + RequiresNew: true, + }, + }, + }, + + Key: "", + + Value: map[string]interface{}{ + "availability_zone": "foo", + }, + }, + + // #14 List of maps + { + Schema: map[string]*Schema{ + "config_vars": &Schema{ + Type: TypeList, + Optional: true, + Computed: true, + Elem: &Schema{ + Type: TypeMap, + }, + }, + }, + + State: nil, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "config_vars.#": &tofu.ResourceAttrDiff{ + Old: "0", + New: "2", + }, + "config_vars.0.foo": &tofu.ResourceAttrDiff{ + Old: "", + New: "bar", + }, + "config_vars.1.bar": &tofu.ResourceAttrDiff{ + Old: "", + New: "baz", + }, + }, + }, + + Key: "config_vars", + + Value: []interface{}{ + map[string]interface{}{ + "foo": "bar", + }, + map[string]interface{}{ + "bar": "baz", + }, + }, + }, + + // #15 List of maps in state + { + Schema: map[string]*Schema{ + "config_vars": &Schema{ + Type: TypeList, + Optional: true, + Computed: true, + Elem: &Schema{ + Type: TypeMap, + }, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "config_vars.#": "2", + "config_vars.0.foo": "baz", + "config_vars.1.bar": "bar", + }, + }, + + Diff: nil, + + Key: "config_vars", + + Value: []interface{}{ + map[string]interface{}{ + "foo": "baz", + }, + map[string]interface{}{ + "bar": "bar", + }, + }, + }, + + // #16 List of maps with removal in diff + { + Schema: map[string]*Schema{ + "config_vars": &Schema{ + Type: TypeList, + Optional: true, + Computed: true, + Elem: &Schema{ + Type: TypeMap, + }, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "config_vars.#": "1", + "config_vars.0.FOO": "bar", + }, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "config_vars.#": &tofu.ResourceAttrDiff{ + Old: "1", + New: "0", + }, + "config_vars.0.FOO": &tofu.ResourceAttrDiff{ + Old: "bar", + NewRemoved: true, + }, + }, + }, + + Key: "config_vars", + + Value: []interface{}{}, + }, + + // #17 Sets + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "ports.#": "1", + "ports.80": "80", + }, + }, + + Diff: nil, + + Key: "ports", + + Value: []interface{}{80}, + }, + + // #18 + { + Schema: map[string]*Schema{ + "data": &Schema{ + Type: TypeSet, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "index": &Schema{ + Type: TypeInt, + Required: true, + }, + + "value": &Schema{ + Type: TypeString, + Required: true, + }, + }, + }, + Set: func(a interface{}) int { + m := a.(map[string]interface{}) + return m["index"].(int) + }, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "data.#": "1", + "data.10.index": "10", + "data.10.value": "50", + }, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "data.10.value": &tofu.ResourceAttrDiff{ + Old: "50", + New: "80", + }, + }, + }, + + Key: "data", + + Value: []interface{}{ + map[string]interface{}{ + "index": 10, + "value": "80", + }, + }, + }, + + // #19 Empty Set + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: nil, + + Diff: nil, + + Key: "ports", + + Value: []interface{}{}, + }, + + // #20 Float zero + { + Schema: map[string]*Schema{ + "ratio": &Schema{ + Type: TypeFloat, + Optional: true, + Computed: true, + }, + }, + + State: nil, + + Diff: nil, + + Key: "ratio", + + Value: 0.0, + }, + + // #21 Float given + { + Schema: map[string]*Schema{ + "ratio": &Schema{ + Type: TypeFloat, + Optional: true, + Computed: true, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "ratio": "0.5", + }, + }, + + Diff: nil, + + Key: "ratio", + + Value: 0.5, + }, + + // #22 Float diff + { + Schema: map[string]*Schema{ + "ratio": &Schema{ + Type: TypeFloat, + Optional: true, + Computed: true, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "ratio": "-0.5", + }, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ratio": &tofu.ResourceAttrDiff{ + Old: "-0.5", + New: "33.0", + }, + }, + }, + + Key: "ratio", + + Value: 33.0, + }, + + // #23 Sets with removed elements + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "ports.#": "1", + "ports.80": "80", + }, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ports.#": &tofu.ResourceAttrDiff{ + Old: "2", + New: "1", + }, + "ports.80": &tofu.ResourceAttrDiff{ + Old: "80", + New: "80", + }, + "ports.8080": &tofu.ResourceAttrDiff{ + Old: "8080", + New: "0", + NewRemoved: true, + }, + }, + }, + + Key: "ports", + + Value: []interface{}{80}, + }, + } + + for i, tc := range cases { + d, err := schemaMap(tc.Schema).Data(tc.State, tc.Diff) + if err != nil { + t.Fatalf("err: %s", err) + } + + v := d.Get(tc.Key) + if s, ok := v.(*Set); ok { + v = s.List() + } + + if !reflect.DeepEqual(v, tc.Value) { + t.Fatalf("Bad: %d\n\n%#v\n\nExpected: %#v", i, v, tc.Value) + } + } +} + +func TestResourceDataGetChange(t *testing.T) { + cases := []struct { + Schema map[string]*Schema + State *tofu.InstanceState + Diff *tofu.InstanceDiff + Key string + OldValue interface{} + NewValue interface{} + }{ + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "", + New: "foo", + RequiresNew: true, + }, + }, + }, + + Key: "availability_zone", + + OldValue: "", + NewValue: "foo", + }, + + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "availability_zone": "foo", + }, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "", + New: "foo", + RequiresNew: true, + }, + }, + }, + + Key: "availability_zone", + + OldValue: "foo", + NewValue: "foo", + }, + } + + for i, tc := range cases { + d, err := schemaMap(tc.Schema).Data(tc.State, tc.Diff) + if err != nil { + t.Fatalf("err: %s", err) + } + + o, n := d.GetChange(tc.Key) + if !reflect.DeepEqual(o, tc.OldValue) { + t.Fatalf("Old Bad: %d\n\n%#v", i, o) + } + if !reflect.DeepEqual(n, tc.NewValue) { + t.Fatalf("New Bad: %d\n\n%#v", i, n) + } + } +} + +func TestResourceDataGetOk(t *testing.T) { + cases := []struct { + Schema map[string]*Schema + State *tofu.InstanceState + Diff *tofu.InstanceDiff + Key string + Value interface{} + Ok bool + }{ + /* + * Primitives + */ + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "", + New: "", + }, + }, + }, + + Key: "availability_zone", + Value: "", + Ok: false, + }, + + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "", + New: "", + NewComputed: true, + }, + }, + }, + + Key: "availability_zone", + Value: "", + Ok: false, + }, + + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Diff: nil, + + Key: "availability_zone", + Value: "", + Ok: false, + }, + + /* + * Lists + */ + + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Optional: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: nil, + + Diff: nil, + + Key: "ports", + Value: []interface{}{}, + Ok: false, + }, + + /* + * Map + */ + + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeMap, + Optional: true, + }, + }, + + State: nil, + + Diff: nil, + + Key: "ports", + Value: map[string]interface{}{}, + Ok: false, + }, + + /* + * Set + */ + + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { return a.(int) }, + }, + }, + + State: nil, + + Diff: nil, + + Key: "ports", + Value: []interface{}{}, + Ok: false, + }, + + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { return a.(int) }, + }, + }, + + State: nil, + + Diff: nil, + + Key: "ports.0", + Value: 0, + Ok: false, + }, + + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { return a.(int) }, + }, + }, + + State: nil, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ports.#": &tofu.ResourceAttrDiff{ + Old: "0", + New: "0", + }, + }, + }, + + Key: "ports", + Value: []interface{}{}, + Ok: false, + }, + + // Further illustrates and clarifiies the GetOk semantics from #933, and + // highlights the limitation that zero-value config is currently + // indistinguishable from unset config. + { + Schema: map[string]*Schema{ + "from_port": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + + State: nil, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "from_port": &tofu.ResourceAttrDiff{ + Old: "", + New: "0", + }, + }, + }, + + Key: "from_port", + Value: 0, + Ok: false, + }, + } + + for i, tc := range cases { + d, err := schemaMap(tc.Schema).Data(tc.State, tc.Diff) + if err != nil { + t.Fatalf("err: %s", err) + } + + v, ok := d.GetOk(tc.Key) + if s, ok := v.(*Set); ok { + v = s.List() + } + + if !reflect.DeepEqual(v, tc.Value) { + t.Fatalf("Bad: %d\n\n%#v", i, v) + } + if ok != tc.Ok { + t.Fatalf("%d: expected ok: %t, got: %t", i, tc.Ok, ok) + } + } +} + +func TestResourceDataGetOkExists(t *testing.T) { + cases := []struct { + Name string + Schema map[string]*Schema + State *tofu.InstanceState + Diff *tofu.InstanceDiff + Key string + Value interface{} + Ok bool + }{ + /* + * Primitives + */ + { + Name: "string-literal-empty", + Schema: map[string]*Schema{ + "availability_zone": { + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": { + Old: "", + New: "", + }, + }, + }, + + Key: "availability_zone", + Value: "", + Ok: true, + }, + + { + Name: "string-computed-empty", + Schema: map[string]*Schema{ + "availability_zone": { + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": { + Old: "", + New: "", + NewComputed: true, + }, + }, + }, + + Key: "availability_zone", + Value: "", + Ok: false, + }, + + { + Name: "string-optional-computed-nil-diff", + Schema: map[string]*Schema{ + "availability_zone": { + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Diff: nil, + + Key: "availability_zone", + Value: "", + Ok: false, + }, + + /* + * Lists + */ + + { + Name: "list-optional", + Schema: map[string]*Schema{ + "ports": { + Type: TypeList, + Optional: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: nil, + + Diff: nil, + + Key: "ports", + Value: []interface{}{}, + Ok: false, + }, + + /* + * Map + */ + + { + Name: "map-optional", + Schema: map[string]*Schema{ + "ports": { + Type: TypeMap, + Optional: true, + }, + }, + + State: nil, + + Diff: nil, + + Key: "ports", + Value: map[string]interface{}{}, + Ok: false, + }, + + /* + * Set + */ + + { + Name: "set-optional", + Schema: map[string]*Schema{ + "ports": { + Type: TypeSet, + Optional: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { return a.(int) }, + }, + }, + + State: nil, + + Diff: nil, + + Key: "ports", + Value: []interface{}{}, + Ok: false, + }, + + { + Name: "set-optional-key", + Schema: map[string]*Schema{ + "ports": { + Type: TypeSet, + Optional: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { return a.(int) }, + }, + }, + + State: nil, + + Diff: nil, + + Key: "ports.0", + Value: 0, + Ok: false, + }, + + { + Name: "bool-literal-empty", + Schema: map[string]*Schema{ + "availability_zone": { + Type: TypeBool, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": { + Old: "", + New: "", + }, + }, + }, + + Key: "availability_zone", + Value: false, + Ok: true, + }, + + { + Name: "bool-literal-set", + Schema: map[string]*Schema{ + "availability_zone": { + Type: TypeBool, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": { + New: "true", + }, + }, + }, + + Key: "availability_zone", + Value: true, + Ok: true, + }, + } + + for i, tc := range cases { + t.Run(fmt.Sprintf("%d-%s", i, tc.Name), func(t *testing.T) { + d, err := schemaMap(tc.Schema).Data(tc.State, tc.Diff) + if err != nil { + t.Fatalf("%s err: %s", tc.Name, err) + } + + v, ok := d.GetOkExists(tc.Key) + if s, ok := v.(*Set); ok { + v = s.List() + } + + if !reflect.DeepEqual(v, tc.Value) { + t.Fatalf("Bad %s: \n%#v", tc.Name, v) + } + if ok != tc.Ok { + t.Fatalf("%s: expected ok: %t, got: %t", tc.Name, tc.Ok, ok) + } + }) + } +} + +func TestResourceDataTimeout(t *testing.T) { + cases := []struct { + Name string + Rd *ResourceData + Expected *ResourceTimeout + }{ + { + Name: "Basic example default", + Rd: &ResourceData{timeouts: timeoutForValues(10, 3, 0, 15, 0)}, + Expected: expectedTimeoutForValues(10, 3, 0, 15, 0), + }, + { + Name: "Resource and config match update, create", + Rd: &ResourceData{timeouts: timeoutForValues(10, 0, 3, 0, 0)}, + Expected: expectedTimeoutForValues(10, 0, 3, 0, 0), + }, + { + Name: "Resource provides default", + Rd: &ResourceData{timeouts: timeoutForValues(10, 0, 0, 0, 7)}, + Expected: expectedTimeoutForValues(10, 7, 7, 7, 7), + }, + { + Name: "Resource provides default and delete", + Rd: &ResourceData{timeouts: timeoutForValues(10, 0, 0, 15, 7)}, + Expected: expectedTimeoutForValues(10, 7, 7, 15, 7), + }, + { + Name: "Resource provides default, config overwrites other values", + Rd: &ResourceData{timeouts: timeoutForValues(10, 3, 0, 0, 13)}, + Expected: expectedTimeoutForValues(10, 3, 13, 13, 13), + }, + { + Name: "Resource has no config", + Rd: &ResourceData{}, + Expected: expectedTimeoutForValues(0, 0, 0, 0, 0), + }, + } + + keys := timeoutKeys() + for i, c := range cases { + t.Run(fmt.Sprintf("%d-%s", i, c.Name), func(t *testing.T) { + + for _, k := range keys { + got := c.Rd.Timeout(k) + var ex *time.Duration + switch k { + case TimeoutCreate: + ex = c.Expected.Create + case TimeoutRead: + ex = c.Expected.Read + case TimeoutUpdate: + ex = c.Expected.Update + case TimeoutDelete: + ex = c.Expected.Delete + case TimeoutDefault: + ex = c.Expected.Default + } + + if got > 0 && ex == nil { + t.Fatalf("Unexpected value in (%s), case %d check 1:\n\texpected: %#v\n\tgot: %#v", k, i, ex, got) + } + if got == 0 && ex != nil { + t.Fatalf("Unexpected value in (%s), case %d check 2:\n\texpected: %#v\n\tgot: %#v", k, i, *ex, got) + } + + // confirm values + if ex != nil { + if got != *ex { + t.Fatalf("Timeout %s case (%d) expected (%s), got (%s)", k, i, *ex, got) + } + } + } + + }) + } +} + +func TestResourceDataHasChange(t *testing.T) { + cases := []struct { + Schema map[string]*Schema + State *tofu.InstanceState + Diff *tofu.InstanceDiff + Key string + Change bool + }{ + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "", + New: "foo", + RequiresNew: true, + }, + }, + }, + + Key: "availability_zone", + + Change: true, + }, + + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "availability_zone": "foo", + }, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "", + New: "foo", + RequiresNew: true, + }, + }, + }, + + Key: "availability_zone", + + Change: false, + }, + + { + Schema: map[string]*Schema{ + "tags": &Schema{ + Type: TypeMap, + Optional: true, + Computed: true, + }, + }, + + State: nil, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "tags.Name": &tofu.ResourceAttrDiff{ + Old: "foo", + New: "foo", + }, + }, + }, + + Key: "tags", + + Change: true, + }, + + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { return a.(int) }, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "ports.#": "1", + "ports.80": "80", + }, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ports.#": &tofu.ResourceAttrDiff{ + Old: "1", + New: "0", + }, + }, + }, + + Key: "ports", + + Change: true, + }, + + // https://github.com/hashicorp/terraform/issues/927 + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { return a.(int) }, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "ports.#": "1", + "ports.80": "80", + }, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "tags.foo": &tofu.ResourceAttrDiff{ + Old: "", + New: "bar", + }, + }, + }, + + Key: "ports", + + Change: false, + }, + } + + for i, tc := range cases { + d, err := schemaMap(tc.Schema).Data(tc.State, tc.Diff) + if err != nil { + t.Fatalf("err: %s", err) + } + + actual := d.HasChange(tc.Key) + if actual != tc.Change { + t.Fatalf("Bad: %d %#v", i, actual) + } + } +} + +func TestResourceDataSet(t *testing.T) { + var testNilPtr *string + + cases := []struct { + TestName string + Schema map[string]*Schema + State *tofu.InstanceState + Diff *tofu.InstanceDiff + Key string + Value interface{} + Err bool + GetKey string + GetValue interface{} + + // GetPreProcess can be set to munge the return value before being + // compared to GetValue + GetPreProcess func(interface{}) interface{} + }{ + { + TestName: "Basic good", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Diff: nil, + + Key: "availability_zone", + Value: "foo", + + GetKey: "availability_zone", + GetValue: "foo", + }, + { + TestName: "Basic int", + Schema: map[string]*Schema{ + "port": &Schema{ + Type: TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Diff: nil, + + Key: "port", + Value: 80, + + GetKey: "port", + GetValue: 80, + }, + { + TestName: "Basic bool, true", + Schema: map[string]*Schema{ + "vpc": &Schema{ + Type: TypeBool, + Optional: true, + }, + }, + + State: nil, + + Diff: nil, + + Key: "vpc", + Value: true, + + GetKey: "vpc", + GetValue: true, + }, + { + TestName: "Basic bool, false", + Schema: map[string]*Schema{ + "vpc": &Schema{ + Type: TypeBool, + Optional: true, + }, + }, + + State: nil, + + Diff: nil, + + Key: "vpc", + Value: false, + + GetKey: "vpc", + GetValue: false, + }, + { + TestName: "Invalid type", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Diff: nil, + + Key: "availability_zone", + Value: 80, + Err: true, + + GetKey: "availability_zone", + GetValue: "", + }, + { + TestName: "List of primitives, set list", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Computed: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: nil, + + Diff: nil, + + Key: "ports", + Value: []int{1, 2, 5}, + + GetKey: "ports", + GetValue: []interface{}{1, 2, 5}, + }, + { + TestName: "List of primitives, set list with error", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Computed: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: nil, + + Diff: nil, + + Key: "ports", + Value: []interface{}{1, "NOPE", 5}, + Err: true, + + GetKey: "ports", + GetValue: []interface{}{}, + }, + { + TestName: "Set a list of maps", + Schema: map[string]*Schema{ + "config_vars": &Schema{ + Type: TypeList, + Optional: true, + Computed: true, + Elem: &Schema{ + Type: TypeMap, + }, + }, + }, + + State: nil, + + Diff: nil, + + Key: "config_vars", + Value: []interface{}{ + map[string]interface{}{ + "foo": "bar", + }, + map[string]interface{}{ + "bar": "baz", + }, + }, + Err: false, + + GetKey: "config_vars", + GetValue: []interface{}{ + map[string]interface{}{ + "foo": "bar", + }, + map[string]interface{}{ + "bar": "baz", + }, + }, + }, + { + TestName: "Set, with list", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "ports.#": "3", + "ports.0": "100", + "ports.1": "80", + "ports.2": "80", + }, + }, + + Key: "ports", + Value: []interface{}{100, 125, 125}, + + GetKey: "ports", + GetValue: []interface{}{100, 125}, + }, + { + TestName: " Set, with Set", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "ports.#": "3", + "ports.100": "100", + "ports.80": "80", + "ports.81": "81", + }, + }, + + Key: "ports", + Value: &Set{ + m: map[string]interface{}{ + "1": 1, + "2": 2, + }, + }, + + GetKey: "ports", + GetValue: []interface{}{1, 2}, + }, + { + TestName: "Set single item", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "ports.#": "2", + "ports.100": "100", + "ports.80": "80", + }, + }, + + Key: "ports.100", + Value: 256, + Err: true, + + GetKey: "ports", + GetValue: []interface{}{100, 80}, + }, + { + TestName: "Set with nested set", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Elem: &Resource{ + Schema: map[string]*Schema{ + "port": &Schema{ + Type: TypeInt, + }, + + "set": &Schema{ + Type: TypeSet, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + }, + Set: func(a interface{}) int { + return a.(map[string]interface{})["port"].(int) + }, + }, + }, + + State: nil, + + Key: "ports", + Value: []interface{}{ + map[string]interface{}{ + "port": 80, + }, + }, + + GetKey: "ports", + GetValue: []interface{}{ + map[string]interface{}{ + "port": 80, + "set": []interface{}{}, + }, + }, + + GetPreProcess: func(v interface{}) interface{} { + if v == nil { + return v + } + s, ok := v.([]interface{}) + if !ok { + return v + } + for _, v := range s { + m, ok := v.(map[string]interface{}) + if !ok { + continue + } + if m["set"] == nil { + continue + } + if s, ok := m["set"].(*Set); ok { + m["set"] = s.List() + } + } + + return v + }, + }, + { + TestName: "List of floats, set list", + Schema: map[string]*Schema{ + "ratios": &Schema{ + Type: TypeList, + Computed: true, + Elem: &Schema{Type: TypeFloat}, + }, + }, + + State: nil, + + Diff: nil, + + Key: "ratios", + Value: []float64{1.0, 2.2, 5.5}, + + GetKey: "ratios", + GetValue: []interface{}{1.0, 2.2, 5.5}, + }, + { + TestName: "Set of floats, set list", + Schema: map[string]*Schema{ + "ratios": &Schema{ + Type: TypeSet, + Computed: true, + Elem: &Schema{Type: TypeFloat}, + Set: func(a interface{}) int { + // Because we want to be safe on a 32-bit and 64-bit system, + // we can just set a "scale factor" here that's always larger than the number of + // decimal places we expect to see., and then multiply by that to cast to int + // otherwise we could get clashes in unique ids + scaleFactor := 100000 + return int(a.(float64) * float64(scaleFactor)) + }, + }, + }, + + State: nil, + + Diff: nil, + + Key: "ratios", + Value: []float64{1.0, 2.2, 5.5}, + + GetKey: "ratios", + GetValue: []interface{}{1.0, 2.2, 5.5}, + }, + { + TestName: "Basic pointer", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Diff: nil, + + Key: "availability_zone", + Value: testPtrTo("foo"), + + GetKey: "availability_zone", + GetValue: "foo", + }, + { + TestName: "Basic nil value", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Diff: nil, + + Key: "availability_zone", + Value: testPtrTo(nil), + + GetKey: "availability_zone", + GetValue: "", + }, + { + TestName: "Basic nil pointer", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Diff: nil, + + Key: "availability_zone", + Value: testNilPtr, + + GetKey: "availability_zone", + GetValue: "", + }, + { + TestName: "Set in a list", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Elem: &Resource{ + Schema: map[string]*Schema{ + "set": &Schema{ + Type: TypeSet, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + }, + }, + }, + + State: nil, + + Key: "ports", + Value: []interface{}{ + map[string]interface{}{ + "set": []interface{}{ + 1, + }, + }, + }, + + GetKey: "ports", + GetValue: []interface{}{ + map[string]interface{}{ + "set": []interface{}{ + 1, + }, + }, + }, + GetPreProcess: func(v interface{}) interface{} { + if v == nil { + return v + } + s, ok := v.([]interface{}) + if !ok { + return v + } + for _, v := range s { + m, ok := v.(map[string]interface{}) + if !ok { + continue + } + if m["set"] == nil { + continue + } + if s, ok := m["set"].(*Set); ok { + m["set"] = s.List() + } + } + + return v + }, + }, + } + + t.Setenv(PanicOnErr, "") + + for _, tc := range cases { + t.Run(tc.TestName, func(t *testing.T) { + d, err := schemaMap(tc.Schema).Data(tc.State, tc.Diff) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = d.Set(tc.Key, tc.Value) + if err != nil != tc.Err { + t.Fatalf("unexpected err: %s", err) + } + + v := d.Get(tc.GetKey) + if s, ok := v.(*Set); ok { + v = s.List() + } + + if tc.GetPreProcess != nil { + v = tc.GetPreProcess(v) + } + + if !reflect.DeepEqual(v, tc.GetValue) { + t.Fatalf("Got unexpected value\nactual: %#v\nexpected:%#v", v, tc.GetValue) + } + }) + } +} + +func TestResourceDataState_dynamicAttributes(t *testing.T) { + cases := []struct { + Schema map[string]*Schema + State *tofu.InstanceState + Diff *tofu.InstanceDiff + Set map[string]interface{} + UnsafeSet map[string]string + Result *tofu.InstanceState + }{ + { + Schema: map[string]*Schema{ + "__has_dynamic_attributes": { + Type: TypeString, + Optional: true, + }, + + "schema_field": { + Type: TypeString, + Required: true, + }, + }, + + State: nil, + + Diff: nil, + + Set: map[string]interface{}{ + "schema_field": "present", + }, + + UnsafeSet: map[string]string{ + "test1": "value", + "test2": "value", + }, + + Result: &tofu.InstanceState{ + Attributes: map[string]string{ + "schema_field": "present", + "test1": "value", + "test2": "value", + }, + }, + }, + } + + for i, tc := range cases { + d, err := schemaMap(tc.Schema).Data(tc.State, tc.Diff) + if err != nil { + t.Fatalf("err: %s", err) + } + + for k, v := range tc.Set { + d.Set(k, v) + } + + for k, v := range tc.UnsafeSet { + d.UnsafeSetFieldRaw(k, v) + } + + // Set an ID so that the state returned is not nil + idSet := false + if d.Id() == "" { + idSet = true + d.SetId("foo") + } + + actual := d.State() + + // If we set an ID, then undo what we did so the comparison works + if actual != nil && idSet { + actual.ID = "" + delete(actual.Attributes, "id") + } + + if !reflect.DeepEqual(actual, tc.Result) { + t.Fatalf("Bad: %d\n\n%#v\n\nExpected:\n\n%#v", i, actual, tc.Result) + } + } +} + +func TestResourceDataState_schema(t *testing.T) { + cases := []struct { + Schema map[string]*Schema + State *tofu.InstanceState + Diff *tofu.InstanceDiff + Set map[string]interface{} + Result *tofu.InstanceState + Partial []string + }{ + // #0 Basic primitive in diff + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "", + New: "foo", + RequiresNew: true, + }, + }, + }, + + Result: &tofu.InstanceState{ + Attributes: map[string]string{ + "availability_zone": "foo", + }, + }, + }, + + // #1 Basic primitive set override + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "", + New: "foo", + RequiresNew: true, + }, + }, + }, + + Set: map[string]interface{}{ + "availability_zone": "bar", + }, + + Result: &tofu.InstanceState{ + Attributes: map[string]string{ + "availability_zone": "bar", + }, + }, + }, + + // #2 + { + Schema: map[string]*Schema{ + "vpc": &Schema{ + Type: TypeBool, + Optional: true, + }, + }, + + State: nil, + + Diff: nil, + + Set: map[string]interface{}{ + "vpc": true, + }, + + Result: &tofu.InstanceState{ + Attributes: map[string]string{ + "vpc": "true", + }, + }, + }, + + // #3 Basic primitive with StateFunc set + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + StateFunc: func(interface{}) string { return "" }, + }, + }, + + State: nil, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "", + New: "foo", + NewExtra: "foo!", + }, + }, + }, + + Result: &tofu.InstanceState{ + Attributes: map[string]string{ + "availability_zone": "foo", + }, + }, + }, + + // #4 List + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Required: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "ports.#": "1", + "ports.0": "80", + }, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ports.#": &tofu.ResourceAttrDiff{ + Old: "1", + New: "2", + }, + "ports.1": &tofu.ResourceAttrDiff{ + Old: "", + New: "100", + }, + }, + }, + + Result: &tofu.InstanceState{ + Attributes: map[string]string{ + "ports.#": "2", + "ports.0": "80", + "ports.1": "100", + }, + }, + }, + + // #5 List of resources + { + Schema: map[string]*Schema{ + "ingress": &Schema{ + Type: TypeList, + Required: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "from": &Schema{ + Type: TypeInt, + Required: true, + }, + }, + }, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "ingress.#": "1", + "ingress.0.from": "80", + }, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ingress.#": &tofu.ResourceAttrDiff{ + Old: "1", + New: "2", + }, + "ingress.0.from": &tofu.ResourceAttrDiff{ + Old: "80", + New: "150", + }, + "ingress.1.from": &tofu.ResourceAttrDiff{ + Old: "", + New: "100", + }, + }, + }, + + Result: &tofu.InstanceState{ + Attributes: map[string]string{ + "ingress.#": "2", + "ingress.0.from": "150", + "ingress.1.from": "100", + }, + }, + }, + + // #6 List of maps + { + Schema: map[string]*Schema{ + "config_vars": &Schema{ + Type: TypeList, + Optional: true, + Computed: true, + Elem: &Schema{ + Type: TypeMap, + }, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "config_vars.#": "2", + "config_vars.0.%": "2", + "config_vars.0.foo": "bar", + "config_vars.0.bar": "bar", + "config_vars.1.%": "1", + "config_vars.1.bar": "baz", + }, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "config_vars.0.bar": &tofu.ResourceAttrDiff{ + NewRemoved: true, + }, + }, + }, + + Set: map[string]interface{}{ + "config_vars": []map[string]interface{}{ + map[string]interface{}{ + "foo": "bar", + }, + map[string]interface{}{ + "baz": "bang", + }, + }, + }, + + Result: &tofu.InstanceState{ + Attributes: map[string]string{ + "config_vars.#": "2", + "config_vars.0.%": "1", + "config_vars.0.foo": "bar", + "config_vars.1.%": "1", + "config_vars.1.baz": "bang", + }, + }, + }, + + // #7 List of maps with removal in diff + { + Schema: map[string]*Schema{ + "config_vars": &Schema{ + Type: TypeList, + Optional: true, + Computed: true, + Elem: &Schema{ + Type: TypeMap, + }, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "config_vars.#": "1", + "config_vars.0.FOO": "bar", + }, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "config_vars.#": &tofu.ResourceAttrDiff{ + Old: "1", + New: "0", + }, + "config_vars.0.FOO": &tofu.ResourceAttrDiff{ + Old: "bar", + NewRemoved: true, + }, + }, + }, + + Result: &tofu.InstanceState{ + Attributes: map[string]string{ + "config_vars.#": "0", + }, + }, + }, + + // #8 Basic state with other keys + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: &tofu.InstanceState{ + ID: "bar", + Attributes: map[string]string{ + "id": "bar", + }, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "", + New: "foo", + RequiresNew: true, + }, + }, + }, + + Result: &tofu.InstanceState{ + ID: "bar", + Attributes: map[string]string{ + "id": "bar", + "availability_zone": "foo", + }, + }, + }, + + // #9 Sets + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "ports.#": "3", + "ports.100": "100", + "ports.80": "80", + "ports.81": "81", + }, + }, + + Diff: nil, + + Result: &tofu.InstanceState{ + Attributes: map[string]string{ + "ports.#": "3", + "ports.80": "80", + "ports.81": "81", + "ports.100": "100", + }, + }, + }, + + // #10 + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: nil, + + Diff: nil, + + Set: map[string]interface{}{ + "ports": []interface{}{100, 80}, + }, + + Result: &tofu.InstanceState{ + Attributes: map[string]string{ + "ports.#": "2", + "ports.80": "80", + "ports.100": "100", + }, + }, + }, + + // #11 + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "order": &Schema{ + Type: TypeInt, + }, + + "a": &Schema{ + Type: TypeList, + Elem: &Schema{Type: TypeInt}, + }, + + "b": &Schema{ + Type: TypeList, + Elem: &Schema{Type: TypeInt}, + }, + }, + }, + Set: func(a interface{}) int { + m := a.(map[string]interface{}) + return m["order"].(int) + }, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "ports.#": "2", + "ports.10.order": "10", + "ports.10.a.#": "1", + "ports.10.a.0": "80", + "ports.20.order": "20", + "ports.20.b.#": "1", + "ports.20.b.0": "100", + }, + }, + + Set: map[string]interface{}{ + "ports": []interface{}{ + map[string]interface{}{ + "order": 20, + "b": []interface{}{100}, + }, + map[string]interface{}{ + "order": 10, + "a": []interface{}{80}, + }, + }, + }, + + Result: &tofu.InstanceState{ + Attributes: map[string]string{ + "ports.#": "2", + "ports.10.order": "10", + "ports.10.a.#": "1", + "ports.10.a.0": "80", + "ports.10.b.#": "0", + "ports.20.order": "20", + "ports.20.a.#": "0", + "ports.20.b.#": "1", + "ports.20.b.0": "100", + }, + }, + }, + + /* + * PARTIAL STATES + */ + + // #12 Basic primitive + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "", + New: "foo", + RequiresNew: true, + }, + }, + }, + + Partial: []string{}, + + Result: &tofu.InstanceState{ + Attributes: map[string]string{}, + }, + }, + + // #13 List + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Required: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "ports.#": "1", + "ports.0": "80", + }, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ports.#": &tofu.ResourceAttrDiff{ + Old: "1", + New: "2", + }, + "ports.1": &tofu.ResourceAttrDiff{ + Old: "", + New: "100", + }, + }, + }, + + Partial: []string{}, + + Result: &tofu.InstanceState{ + Attributes: map[string]string{ + "ports.#": "1", + "ports.0": "80", + }, + }, + }, + + // #14 + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Optional: true, + Computed: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: nil, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ports.#": &tofu.ResourceAttrDiff{ + Old: "", + NewComputed: true, + }, + }, + }, + + Partial: []string{}, + + Set: map[string]interface{}{ + "ports": []interface{}{}, + }, + + Result: &tofu.InstanceState{ + Attributes: map[string]string{}, + }, + }, + + // #15 List of resources + { + Schema: map[string]*Schema{ + "ingress": &Schema{ + Type: TypeList, + Required: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "from": &Schema{ + Type: TypeInt, + Required: true, + }, + }, + }, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "ingress.#": "1", + "ingress.0.from": "80", + }, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ingress.#": &tofu.ResourceAttrDiff{ + Old: "1", + New: "2", + }, + "ingress.0.from": &tofu.ResourceAttrDiff{ + Old: "80", + New: "150", + }, + "ingress.1.from": &tofu.ResourceAttrDiff{ + Old: "", + New: "100", + }, + }, + }, + + Partial: []string{}, + + Result: &tofu.InstanceState{ + Attributes: map[string]string{ + "ingress.#": "1", + "ingress.0.from": "80", + }, + }, + }, + + // #16 List of maps + { + Schema: map[string]*Schema{ + "config_vars": &Schema{ + Type: TypeList, + Optional: true, + Computed: true, + Elem: &Schema{ + Type: TypeMap, + }, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "config_vars.#": "2", + "config_vars.0.foo": "bar", + "config_vars.0.bar": "bar", + "config_vars.1.bar": "baz", + }, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "config_vars.0.bar": &tofu.ResourceAttrDiff{ + NewRemoved: true, + }, + }, + }, + + Set: map[string]interface{}{ + "config_vars": []map[string]interface{}{ + map[string]interface{}{ + "foo": "bar", + }, + map[string]interface{}{ + "baz": "bang", + }, + }, + }, + + Partial: []string{}, + + Result: &tofu.InstanceState{ + Attributes: map[string]string{ + // TODO: broken, shouldn't bar be removed? + "config_vars.#": "2", + "config_vars.0.%": "2", + "config_vars.0.foo": "bar", + "config_vars.0.bar": "bar", + "config_vars.1.%": "1", + "config_vars.1.bar": "baz", + }, + }, + }, + + // #17 Sets + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "ports.#": "3", + "ports.100": "100", + "ports.80": "80", + "ports.81": "81", + }, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ports.120": &tofu.ResourceAttrDiff{ + New: "120", + }, + }, + }, + + Partial: []string{}, + + Result: &tofu.InstanceState{ + Attributes: map[string]string{ + "ports.#": "3", + "ports.80": "80", + "ports.81": "81", + "ports.100": "100", + }, + }, + }, + + // #18 + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: nil, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ports.#": &tofu.ResourceAttrDiff{ + Old: "", + NewComputed: true, + }, + }, + }, + + Partial: []string{}, + + Result: &tofu.InstanceState{ + Attributes: map[string]string{}, + }, + }, + + // #19 Maps + { + Schema: map[string]*Schema{ + "tags": &Schema{ + Type: TypeMap, + Optional: true, + Computed: true, + }, + }, + + State: nil, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "tags.Name": &tofu.ResourceAttrDiff{ + Old: "", + New: "foo", + }, + }, + }, + + Result: &tofu.InstanceState{ + Attributes: map[string]string{ + "tags.%": "1", + "tags.Name": "foo", + }, + }, + }, + + // #20 empty computed map + { + Schema: map[string]*Schema{ + "tags": &Schema{ + Type: TypeMap, + Optional: true, + Computed: true, + }, + }, + + State: nil, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "tags.Name": &tofu.ResourceAttrDiff{ + Old: "", + New: "foo", + }, + }, + }, + + Set: map[string]interface{}{ + "tags": map[string]string{}, + }, + + Result: &tofu.InstanceState{ + Attributes: map[string]string{ + "tags.%": "0", + }, + }, + }, + + // #21 + { + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + }, + }, + + State: nil, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo": &tofu.ResourceAttrDiff{ + NewComputed: true, + }, + }, + }, + + Result: &tofu.InstanceState{ + Attributes: map[string]string{}, + }, + }, + + // #22 + { + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + }, + }, + + State: nil, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo": &tofu.ResourceAttrDiff{ + NewComputed: true, + }, + }, + }, + + Set: map[string]interface{}{ + "foo": "bar", + }, + + Result: &tofu.InstanceState{ + Attributes: map[string]string{ + "foo": "bar", + }, + }, + }, + + // #23 Set of maps + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "index": &Schema{Type: TypeInt}, + "uuids": &Schema{Type: TypeMap}, + }, + }, + Set: func(a interface{}) int { + m := a.(map[string]interface{}) + return m["index"].(int) + }, + }, + }, + + State: nil, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ports.10.uuids.#": &tofu.ResourceAttrDiff{ + NewComputed: true, + }, + }, + }, + + Set: map[string]interface{}{ + "ports": []interface{}{ + map[string]interface{}{ + "index": 10, + "uuids": map[string]interface{}{ + "80": "value", + }, + }, + }, + }, + + Result: &tofu.InstanceState{ + Attributes: map[string]string{ + "ports.#": "1", + "ports.10.index": "10", + "ports.10.uuids.%": "1", + "ports.10.uuids.80": "value", + }, + }, + }, + + // #24 + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "ports.#": "3", + "ports.100": "100", + "ports.80": "80", + "ports.81": "81", + }, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ports.#": &tofu.ResourceAttrDiff{ + Old: "3", + New: "0", + }, + }, + }, + + Result: &tofu.InstanceState{ + Attributes: map[string]string{ + "ports.#": "0", + }, + }, + }, + + // #25 + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: nil, + + Diff: nil, + + Set: map[string]interface{}{ + "ports": []interface{}{}, + }, + + Result: &tofu.InstanceState{ + Attributes: map[string]string{ + "ports.#": "0", + }, + }, + }, + + // #26 + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Optional: true, + Computed: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: nil, + + Diff: nil, + + Set: map[string]interface{}{ + "ports": []interface{}{}, + }, + + Result: &tofu.InstanceState{ + Attributes: map[string]string{ + "ports.#": "0", + }, + }, + }, + + // #27 Set lists + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Optional: true, + Computed: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "index": &Schema{Type: TypeInt}, + "uuids": &Schema{Type: TypeMap}, + }, + }, + }, + }, + + State: nil, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ports.#": &tofu.ResourceAttrDiff{ + NewComputed: true, + }, + }, + }, + + Set: map[string]interface{}{ + "ports": []interface{}{ + map[string]interface{}{ + "index": 10, + "uuids": map[string]interface{}{ + "80": "value", + }, + }, + }, + }, + + Result: &tofu.InstanceState{ + Attributes: map[string]string{ + "ports.#": "1", + "ports.0.index": "10", + "ports.0.uuids.%": "1", + "ports.0.uuids.80": "value", + }, + }, + }, + } + + for i, tc := range cases { + d, err := schemaMap(tc.Schema).Data(tc.State, tc.Diff) + if err != nil { + t.Fatalf("err: %s", err) + } + + for k, v := range tc.Set { + if err := d.Set(k, v); err != nil { + t.Fatalf("%d err: %s", i, err) + } + } + + // Set an ID so that the state returned is not nil + idSet := false + if d.Id() == "" { + idSet = true + d.SetId("foo") + } + + // If we have partial, then enable partial state mode. + if tc.Partial != nil { + d.Partial(true) + for _, k := range tc.Partial { + d.SetPartial(k) + } + } + + actual := d.State() + + // If we set an ID, then undo what we did so the comparison works + if actual != nil && idSet { + actual.ID = "" + delete(actual.Attributes, "id") + } + + if !reflect.DeepEqual(actual, tc.Result) { + t.Fatalf("Bad: %d\n\n%#v\n\nExpected:\n\n%#v", i, actual, tc.Result) + } + } +} + +func TestResourceData_nonStringValuesInMap(t *testing.T) { + cases := []struct { + Schema map[string]*Schema + Diff *tofu.InstanceDiff + MapFieldName string + ItemName string + ExpectedType string + }{ + { + Schema: map[string]*Schema{ + "boolMap": &Schema{ + Type: TypeMap, + Elem: TypeBool, + Optional: true, + }, + }, + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "boolMap.%": &tofu.ResourceAttrDiff{ + Old: "", + New: "1", + }, + "boolMap.boolField": &tofu.ResourceAttrDiff{ + Old: "", + New: "true", + }, + }, + }, + MapFieldName: "boolMap", + ItemName: "boolField", + ExpectedType: "bool", + }, + { + Schema: map[string]*Schema{ + "intMap": &Schema{ + Type: TypeMap, + Elem: TypeInt, + Optional: true, + }, + }, + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "intMap.%": &tofu.ResourceAttrDiff{ + Old: "", + New: "1", + }, + "intMap.intField": &tofu.ResourceAttrDiff{ + Old: "", + New: "8", + }, + }, + }, + MapFieldName: "intMap", + ItemName: "intField", + ExpectedType: "int", + }, + { + Schema: map[string]*Schema{ + "floatMap": &Schema{ + Type: TypeMap, + Elem: TypeFloat, + Optional: true, + }, + }, + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "floatMap.%": &tofu.ResourceAttrDiff{ + Old: "", + New: "1", + }, + "floatMap.floatField": &tofu.ResourceAttrDiff{ + Old: "", + New: "8.22", + }, + }, + }, + MapFieldName: "floatMap", + ItemName: "floatField", + ExpectedType: "float64", + }, + } + + for _, c := range cases { + d, err := schemaMap(c.Schema).Data(nil, c.Diff) + if err != nil { + t.Fatalf("err: %s", err) + } + + m, ok := d.Get(c.MapFieldName).(map[string]interface{}) + if !ok { + t.Fatalf("expected %q to be castable to a map", c.MapFieldName) + } + field, ok := m[c.ItemName] + if !ok { + t.Fatalf("expected %q in the map", c.ItemName) + } + + typeName := reflect.TypeOf(field).Name() + if typeName != c.ExpectedType { + t.Fatalf("expected %q to be %q, it is %q.", + c.ItemName, c.ExpectedType, typeName) + } + } +} + +func TestResourceDataSetConnInfo(t *testing.T) { + d := &ResourceData{} + d.SetId("foo") + d.SetConnInfo(map[string]string{ + "foo": "bar", + }) + + expected := map[string]string{ + "foo": "bar", + } + + actual := d.State() + if !reflect.DeepEqual(actual.Ephemeral.ConnInfo, expected) { + t.Fatalf("bad: %#v", actual) + } +} + +func TestResourceDataSetMeta_Timeouts(t *testing.T) { + d := &ResourceData{} + d.SetId("foo") + + rt := ResourceTimeout{ + Create: DefaultTimeout(7 * time.Minute), + } + + d.timeouts = &rt + + expected := expectedForValues(7, 0, 0, 0, 0) + + actual := d.State() + if !reflect.DeepEqual(actual.Meta[TimeoutKey], expected) { + t.Fatalf("Bad Meta_timeout match:\n\texpected: %#v\n\tgot: %#v", expected, actual.Meta[TimeoutKey]) + } +} + +func TestResourceDataSetId(t *testing.T) { + d := &ResourceData{ + state: &tofu.InstanceState{ + ID: "test", + Attributes: map[string]string{ + "id": "test", + }, + }, + } + d.SetId("foo") + + actual := d.State() + + // SetId should set both the ID field as well as the attribute, to aid in + // transitioning to the new type system. + if actual.ID != "foo" || actual.Attributes["id"] != "foo" { + t.Fatalf("bad: %#v", actual) + } + + d.SetId("") + actual = d.State() + if actual != nil { + t.Fatalf("bad: %#v", actual) + } +} + +func TestResourceDataSetId_clear(t *testing.T) { + d := &ResourceData{ + state: &tofu.InstanceState{ID: "bar"}, + } + d.SetId("") + + actual := d.State() + if actual != nil { + t.Fatalf("bad: %#v", actual) + } +} + +func TestResourceDataSetId_override(t *testing.T) { + d := &ResourceData{ + state: &tofu.InstanceState{ID: "bar"}, + } + d.SetId("foo") + + actual := d.State() + if actual.ID != "foo" { + t.Fatalf("bad: %#v", actual) + } +} + +func TestResourceDataSetType(t *testing.T) { + d := &ResourceData{} + d.SetId("foo") + d.SetType("bar") + + actual := d.State() + if v := actual.Ephemeral.Type; v != "bar" { + t.Fatalf("bad: %#v", actual) + } +} + +func testPtrTo(raw interface{}) interface{} { + return &raw +} diff --git a/pkg/legacy/helper/schema/resource_diff.go b/pkg/legacy/helper/schema/resource_diff.go new file mode 100644 index 00000000000..adb9b95698e --- /dev/null +++ b/pkg/legacy/helper/schema/resource_diff.go @@ -0,0 +1,563 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "errors" + "fmt" + "reflect" + "strings" + "sync" + + "github.com/kubegems/opentofu/pkg/legacy/tofu" +) + +// newValueWriter is a minor re-implementation of MapFieldWriter to include +// keys that should be marked as computed, to represent the new part of a +// pseudo-diff. +type newValueWriter struct { + *MapFieldWriter + + // A lock to prevent races on writes. The underlying writer will have one as + // well - this is for computed keys. + lock sync.Mutex + // A list of keys that should be marked as computed. + computedKeys map[string]bool + + // To be used with init. + once sync.Once +} + +// init performs any initialization tasks for the newValueWriter. +func (w *newValueWriter) init() { + if w.computedKeys == nil { + w.computedKeys = make(map[string]bool) + } +} + +// WriteField overrides MapValueWriter's WriteField, adding the ability to flag +// the address as computed. +func (w *newValueWriter) WriteField(address []string, value interface{}, computed bool) error { + // Fail the write if we have a non-nil value and computed is true. + // NewComputed values should not have a value when written. + if value != nil && computed { + return errors.New("Non-nil value with computed set") + } + + if err := w.MapFieldWriter.WriteField(address, value); err != nil { + return err + } + + w.once.Do(w.init) + + w.lock.Lock() + defer w.lock.Unlock() + if computed { + w.computedKeys[strings.Join(address, ".")] = true + } + return nil +} + +// ComputedKeysMap returns the underlying computed keys map. +func (w *newValueWriter) ComputedKeysMap() map[string]bool { + w.once.Do(w.init) + return w.computedKeys +} + +// newValueReader is a minor re-implementation of MapFieldReader and is the +// read counterpart to MapValueWriter, allowing the read of keys flagged as +// computed to accommodate the diff override logic in ResourceDiff. +type newValueReader struct { + *MapFieldReader + + // The list of computed keys from a newValueWriter. + computedKeys map[string]bool +} + +// ReadField reads the values from the underlying writer, returning the +// computed value if it is found as well. +func (r *newValueReader) ReadField(address []string) (FieldReadResult, error) { + addrKey := strings.Join(address, ".") + v, err := r.MapFieldReader.ReadField(address) + if err != nil { + return FieldReadResult{}, err + } + for computedKey := range r.computedKeys { + if childAddrOf(addrKey, computedKey) { + if strings.HasSuffix(addrKey, ".#") { + // This is a count value for a list or set that has been marked as + // computed, or a sub-list/sub-set of a complex resource that has + // been marked as computed. We need to pass through to other readers + // so that an accurate previous count can be fetched for the diff. + v.Exists = false + } + v.Computed = true + } + } + + return v, nil +} + +// ResourceDiff is used to query and make custom changes to an in-flight diff. +// It can be used to veto particular changes in the diff, customize the diff +// that has been created, or diff values not controlled by config. +// +// The object functions similar to ResourceData, however most notably lacks +// Set, SetPartial, and Partial, as it should be used to change diff values +// only. Most other first-class ResourceData functions exist, namely Get, +// GetOk, HasChange, and GetChange exist. +// +// All functions in ResourceDiff, save for ForceNew, can only be used on +// computed fields. +type ResourceDiff struct { + // The schema for the resource being worked on. + schema map[string]*Schema + + // The current config for this resource. + config *tofu.ResourceConfig + + // The state for this resource as it exists post-refresh, after the initial + // diff. + state *tofu.InstanceState + + // The diff created by Tofu. This diff is used, along with state, + // config, and custom-set diff data, to provide a multi-level reader + // experience similar to ResourceData. + diff *tofu.InstanceDiff + + // The internal reader structure that contains the state, config, the default + // diff, and the new diff. + multiReader *MultiLevelFieldReader + + // A writer that writes overridden new fields. + newWriter *newValueWriter + + // Tracks which keys have been updated by ResourceDiff to ensure that the + // diff does not get re-run on keys that were not touched, or diffs that were + // just removed (re-running on the latter would just roll back the removal). + updatedKeys map[string]bool + + // Tracks which keys were flagged as forceNew. These keys are not saved in + // newWriter, but we need to track them so that they can be re-diffed later. + forcedNewKeys map[string]bool +} + +// newResourceDiff creates a new ResourceDiff instance. +func newResourceDiff(schema map[string]*Schema, config *tofu.ResourceConfig, state *tofu.InstanceState, diff *tofu.InstanceDiff) *ResourceDiff { + d := &ResourceDiff{ + config: config, + state: state, + diff: diff, + schema: schema, + } + + d.newWriter = &newValueWriter{ + MapFieldWriter: &MapFieldWriter{Schema: d.schema}, + } + readers := make(map[string]FieldReader) + var stateAttributes map[string]string + if d.state != nil { + stateAttributes = d.state.Attributes + readers["state"] = &MapFieldReader{ + Schema: d.schema, + Map: BasicMapReader(stateAttributes), + } + } + if d.config != nil { + readers["config"] = &ConfigFieldReader{ + Schema: d.schema, + Config: d.config, + } + } + if d.diff != nil { + readers["diff"] = &DiffFieldReader{ + Schema: d.schema, + Diff: d.diff, + Source: &MultiLevelFieldReader{ + Levels: []string{"state", "config"}, + Readers: readers, + }, + } + } + readers["newDiff"] = &newValueReader{ + MapFieldReader: &MapFieldReader{ + Schema: d.schema, + Map: BasicMapReader(d.newWriter.Map()), + }, + computedKeys: d.newWriter.ComputedKeysMap(), + } + d.multiReader = &MultiLevelFieldReader{ + Levels: []string{ + "state", + "config", + "diff", + "newDiff", + }, + + Readers: readers, + } + + d.updatedKeys = make(map[string]bool) + d.forcedNewKeys = make(map[string]bool) + + return d +} + +// UpdatedKeys returns the keys that were updated by this ResourceDiff run. +// These are the only keys that a diff should be re-calculated for. +// +// This is the combined result of both keys for which diff values were updated +// for or cleared, and also keys that were flagged to be re-diffed as a result +// of ForceNew. +func (d *ResourceDiff) UpdatedKeys() []string { + var s []string + for k := range d.updatedKeys { + s = append(s, k) + } + for k := range d.forcedNewKeys { + for _, l := range s { + if k == l { + break + } + } + s = append(s, k) + } + return s +} + +// Clear wipes the diff for a particular key. It is called by ResourceDiff's +// functionality to remove any possibility of conflicts, but can be called on +// its own to just remove a specific key from the diff completely. +// +// Note that this does not wipe an override. This function is only allowed on +// computed keys. +func (d *ResourceDiff) Clear(key string) error { + if err := d.checkKey(key, "Clear", true); err != nil { + return err + } + + return d.clear(key) +} + +func (d *ResourceDiff) clear(key string) error { + // Check the schema to make sure that this key exists first. + schemaL := addrToSchema(strings.Split(key, "."), d.schema) + if len(schemaL) == 0 { + return fmt.Errorf("%s is not a valid key", key) + } + + for k := range d.diff.Attributes { + if strings.HasPrefix(k, key) { + delete(d.diff.Attributes, k) + } + } + return nil +} + +// GetChangedKeysPrefix helps to implement Resource.CustomizeDiff +// where we need to act on all nested fields +// without calling out each one separately +func (d *ResourceDiff) GetChangedKeysPrefix(prefix string) []string { + keys := make([]string, 0) + for k := range d.diff.Attributes { + if strings.HasPrefix(k, prefix) { + keys = append(keys, k) + } + } + return keys +} + +// diffChange helps to implement resourceDiffer and derives its change values +// from ResourceDiff's own change data, in addition to existing diff, config, and state. +func (d *ResourceDiff) diffChange(key string) (interface{}, interface{}, bool, bool, bool) { + old, new, customized := d.getChange(key) + + if !old.Exists { + old.Value = nil + } + if !new.Exists || d.removed(key) { + new.Value = nil + } + + return old.Value, new.Value, !reflect.DeepEqual(old.Value, new.Value), new.Computed, customized +} + +// SetNew is used to set a new diff value for the mentioned key. The value must +// be correct for the attribute's schema (mostly relevant for maps, lists, and +// sets). The original value from the state is used as the old value. +// +// This function is only allowed on computed attributes. +func (d *ResourceDiff) SetNew(key string, value interface{}) error { + if err := d.checkKey(key, "SetNew", false); err != nil { + return err + } + + return d.setDiff(key, value, false) +} + +// SetNewComputed functions like SetNew, except that it blanks out a new value +// and marks it as computed. +// +// This function is only allowed on computed attributes. +func (d *ResourceDiff) SetNewComputed(key string) error { + if err := d.checkKey(key, "SetNewComputed", false); err != nil { + return err + } + + return d.setDiff(key, nil, true) +} + +// setDiff performs common diff setting behaviour. +func (d *ResourceDiff) setDiff(key string, new interface{}, computed bool) error { + if err := d.clear(key); err != nil { + return err + } + + if err := d.newWriter.WriteField(strings.Split(key, "."), new, computed); err != nil { + return fmt.Errorf("Cannot set new diff value for key %s: %w", key, err) + } + + d.updatedKeys[key] = true + + return nil +} + +// ForceNew force-flags ForceNew in the schema for a specific key, and +// re-calculates its diff, effectively causing this attribute to force a new +// resource. +// +// Keep in mind that forcing a new resource will force a second run of the +// resource's CustomizeDiff function (with a new ResourceDiff) once the current +// one has completed. This second run is performed without state. This behavior +// will be the same as if a new resource is being created and is performed to +// ensure that the diff looks like the diff for a new resource as much as +// possible. CustomizeDiff should expect such a scenario and act correctly. +// +// This function is a no-op/error if there is no diff. +// +// Note that the change to schema is permanent for the lifecycle of this +// specific ResourceDiff instance. +func (d *ResourceDiff) ForceNew(key string) error { + if !d.HasChange(key) { + return fmt.Errorf("ForceNew: No changes for %s", key) + } + + keyParts := strings.Split(key, ".") + var schema *Schema + schemaL := addrToSchema(keyParts, d.schema) + if len(schemaL) > 0 { + schema = schemaL[len(schemaL)-1] + } else { + return fmt.Errorf("ForceNew: %s is not a valid key", key) + } + + schema.ForceNew = true + + // Flag this for a re-diff. Don't save any values to guarantee that existing + // diffs aren't messed with, as this gets messy when dealing with complex + // structures, zero values, etc. + d.forcedNewKeys[keyParts[0]] = true + + return nil +} + +// Get hands off to ResourceData.Get. +func (d *ResourceDiff) Get(key string) interface{} { + r, _ := d.GetOk(key) + return r +} + +// GetChange gets the change between the state and diff, checking first to see +// if an overridden diff exists. +// +// This implementation differs from ResourceData's in the way that we first get +// results from the exact levels for the new diff, then from state and diff as +// per normal. +func (d *ResourceDiff) GetChange(key string) (interface{}, interface{}) { + old, new, _ := d.getChange(key) + return old.Value, new.Value +} + +// GetOk functions the same way as ResourceData.GetOk, but it also checks the +// new diff levels to provide data consistent with the current state of the +// customized diff. +func (d *ResourceDiff) GetOk(key string) (interface{}, bool) { + r := d.get(strings.Split(key, "."), "newDiff") + exists := r.Exists && !r.Computed + if exists { + // If it exists, we also want to verify it is not the zero-value. + value := r.Value + zero := r.Schema.Type.Zero() + + if eq, ok := value.(Equal); ok { + exists = !eq.Equal(zero) + } else { + exists = !reflect.DeepEqual(value, zero) + } + } + + return r.Value, exists +} + +// GetOkExists functions the same way as GetOkExists within ResourceData, but +// it also checks the new diff levels to provide data consistent with the +// current state of the customized diff. +// +// This is nearly the same function as GetOk, yet it does not check +// for the zero value of the attribute's type. This allows for attributes +// without a default, to fully check for a literal assignment, regardless +// of the zero-value for that type. +func (d *ResourceDiff) GetOkExists(key string) (interface{}, bool) { + r := d.get(strings.Split(key, "."), "newDiff") + exists := r.Exists && !r.Computed + return r.Value, exists +} + +// NewValueKnown returns true if the new value for the given key is available +// as its final value at diff time. If the return value is false, this means +// either the value is based of interpolation that was unavailable at diff +// time, or that the value was explicitly marked as computed by SetNewComputed. +func (d *ResourceDiff) NewValueKnown(key string) bool { + r := d.get(strings.Split(key, "."), "newDiff") + return !r.Computed +} + +// HasChange checks to see if there is a change between state and the diff, or +// in the overridden diff. +func (d *ResourceDiff) HasChange(key string) bool { + old, new := d.GetChange(key) + + // If the type implements the Equal interface, then call that + // instead of just doing a reflect.DeepEqual. An example where this is + // needed is *Set + if eq, ok := old.(Equal); ok { + return !eq.Equal(new) + } + + return !reflect.DeepEqual(old, new) +} + +// Id returns the ID of this resource. +// +// Note that technically, ID does not change during diffs (it either has +// already changed in the refresh, or will change on update), hence we do not +// support updating the ID or fetching it from anything else other than state. +func (d *ResourceDiff) Id() string { + var result string + + if d.state != nil { + result = d.state.ID + } + return result +} + +// getChange gets values from two different levels, designed for use in +// diffChange, HasChange, and GetChange. +// +// This implementation differs from ResourceData's in the way that we first get +// results from the exact levels for the new diff, then from state and diff as +// per normal. +func (d *ResourceDiff) getChange(key string) (getResult, getResult, bool) { + old := d.get(strings.Split(key, "."), "state") + var new getResult + for p := range d.updatedKeys { + if childAddrOf(key, p) { + new = d.getExact(strings.Split(key, "."), "newDiff") + return old, new, true + } + } + new = d.get(strings.Split(key, "."), "newDiff") + return old, new, false +} + +// removed checks to see if the key is present in the existing, pre-customized +// diff and if it was marked as NewRemoved. +func (d *ResourceDiff) removed(k string) bool { + diff, ok := d.diff.Attributes[k] + if !ok { + return false + } + return diff.NewRemoved +} + +// get performs the appropriate multi-level reader logic for ResourceDiff, +// starting at source. Refer to newResourceDiff for the level order. +func (d *ResourceDiff) get(addr []string, source string) getResult { + result, err := d.multiReader.ReadFieldMerge(addr, source) + if err != nil { + panic(err) + } + + return d.finalizeResult(addr, result) +} + +// getExact gets an attribute from the exact level referenced by source. +func (d *ResourceDiff) getExact(addr []string, source string) getResult { + result, err := d.multiReader.ReadFieldExact(addr, source) + if err != nil { + panic(err) + } + + return d.finalizeResult(addr, result) +} + +// finalizeResult does some post-processing of the result produced by get and getExact. +func (d *ResourceDiff) finalizeResult(addr []string, result FieldReadResult) getResult { + // If the result doesn't exist, then we set the value to the zero value + var schema *Schema + if schemaL := addrToSchema(addr, d.schema); len(schemaL) > 0 { + schema = schemaL[len(schemaL)-1] + } + + if result.Value == nil && schema != nil { + result.Value = result.ValueOrZero(schema) + } + + // Transform the FieldReadResult into a getResult. It might be worth + // merging these two structures one day. + return getResult{ + Value: result.Value, + ValueProcessed: result.ValueProcessed, + Computed: result.Computed, + Exists: result.Exists, + Schema: schema, + } +} + +// childAddrOf does a comparison of two addresses to see if one is the child of +// the other. +func childAddrOf(child, parent string) bool { + cs := strings.Split(child, ".") + ps := strings.Split(parent, ".") + if len(ps) > len(cs) { + return false + } + return reflect.DeepEqual(ps, cs[:len(ps)]) +} + +// checkKey checks the key to make sure it exists and is computed. +func (d *ResourceDiff) checkKey(key, caller string, nested bool) error { + var schema *Schema + if nested { + keyParts := strings.Split(key, ".") + schemaL := addrToSchema(keyParts, d.schema) + if len(schemaL) > 0 { + schema = schemaL[len(schemaL)-1] + } + } else { + s, ok := d.schema[key] + if ok { + schema = s + } + } + if schema == nil { + return fmt.Errorf("%s: invalid key: %s", caller, key) + } + if !schema.Computed { + return fmt.Errorf("%s only operates on computed keys - %s is not one", caller, key) + } + return nil +} diff --git a/pkg/legacy/helper/schema/resource_diff_test.go b/pkg/legacy/helper/schema/resource_diff_test.go new file mode 100644 index 00000000000..a85fa6c1383 --- /dev/null +++ b/pkg/legacy/helper/schema/resource_diff_test.go @@ -0,0 +1,2050 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "fmt" + "reflect" + "sort" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/kubegems/opentofu/pkg/configs/hcl2shim" + "github.com/kubegems/opentofu/pkg/legacy/tofu" +) + +// testSetFunc is a very simple function we use to test a foo/bar complex set. +// Both "foo" and "bar" are int values. +// +// This is not foolproof as since it performs sums, you can run into +// collisions. Spec tests accordingly. :P +func testSetFunc(v interface{}) int { + m := v.(map[string]interface{}) + return m["foo"].(int) + m["bar"].(int) +} + +// resourceDiffTestCase provides a test case struct for SetNew and SetDiff. +type resourceDiffTestCase struct { + Name string + Schema map[string]*Schema + State *tofu.InstanceState + Config *tofu.ResourceConfig + Diff *tofu.InstanceDiff + Key string + OldValue interface{} + NewValue interface{} + Expected *tofu.InstanceDiff + ExpectedKeys []string + ExpectedError bool +} + +// testDiffCases produces a list of test cases for use with SetNew and SetDiff. +func testDiffCases(t *testing.T, oldPrefix string, oldOffset int, computed bool) []resourceDiffTestCase { + return []resourceDiffTestCase{ + resourceDiffTestCase{ + Name: "basic primitive diff", + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + }, + }, + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "foo": "bar", + }, + }, + Config: testConfig(t, map[string]interface{}{ + "foo": "baz", + }), + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo": &tofu.ResourceAttrDiff{ + Old: "bar", + New: "baz", + }, + }, + }, + Key: "foo", + NewValue: "qux", + Expected: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo": &tofu.ResourceAttrDiff{ + Old: "bar", + New: func() string { + if computed { + return "" + } + return "qux" + }(), + NewComputed: computed, + }, + }, + }, + }, + resourceDiffTestCase{ + Name: "basic set diff", + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Schema{Type: TypeString}, + Set: HashString, + }, + }, + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "foo.#": "1", + "foo.1996459178": "bar", + }, + }, + Config: testConfig(t, map[string]interface{}{ + "foo": []interface{}{"baz"}, + }), + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo.1996459178": &tofu.ResourceAttrDiff{ + Old: "bar", + New: "", + NewRemoved: true, + }, + "foo.2015626392": &tofu.ResourceAttrDiff{ + Old: "", + New: "baz", + }, + }, + }, + Key: "foo", + NewValue: []interface{}{"qux"}, + Expected: &tofu.InstanceDiff{ + Attributes: func() map[string]*tofu.ResourceAttrDiff { + result := map[string]*tofu.ResourceAttrDiff{} + if computed { + result["foo.#"] = &tofu.ResourceAttrDiff{ + Old: "1", + New: "", + NewComputed: true, + } + } else { + result["foo.1494962232"] = &tofu.ResourceAttrDiff{ + Old: "", + New: "qux", + } + result["foo.1996459178"] = &tofu.ResourceAttrDiff{ + Old: "bar", + New: "", + NewRemoved: true, + } + } + return result + }(), + }, + }, + resourceDiffTestCase{ + Name: "basic list diff", + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeList, + Optional: true, + Computed: true, + Elem: &Schema{Type: TypeString}, + }, + }, + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "foo.#": "1", + "foo.0": "bar", + }, + }, + Config: testConfig(t, map[string]interface{}{ + "foo": []interface{}{"baz"}, + }), + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo.0": &tofu.ResourceAttrDiff{ + Old: "bar", + New: "baz", + }, + }, + }, + Key: "foo", + NewValue: []interface{}{"qux"}, + Expected: &tofu.InstanceDiff{ + Attributes: func() map[string]*tofu.ResourceAttrDiff { + result := make(map[string]*tofu.ResourceAttrDiff) + if computed { + result["foo.#"] = &tofu.ResourceAttrDiff{ + Old: "1", + New: "", + NewComputed: true, + } + } else { + result["foo.0"] = &tofu.ResourceAttrDiff{ + Old: "bar", + New: "qux", + } + } + return result + }(), + }, + }, + resourceDiffTestCase{ + Name: "basic map diff", + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeMap, + Optional: true, + Computed: true, + }, + }, + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "foo.%": "1", + "foo.bar": "baz", + }, + }, + Config: testConfig(t, map[string]interface{}{ + "foo": map[string]interface{}{"bar": "qux"}, + }), + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo.bar": &tofu.ResourceAttrDiff{ + Old: "baz", + New: "qux", + }, + }, + }, + Key: "foo", + NewValue: map[string]interface{}{"bar": "quux"}, + Expected: &tofu.InstanceDiff{ + Attributes: func() map[string]*tofu.ResourceAttrDiff { + result := make(map[string]*tofu.ResourceAttrDiff) + if computed { + result["foo.%"] = &tofu.ResourceAttrDiff{ + Old: "", + New: "", + NewComputed: true, + } + result["foo.bar"] = &tofu.ResourceAttrDiff{ + Old: "baz", + New: "", + NewRemoved: true, + } + } else { + result["foo.bar"] = &tofu.ResourceAttrDiff{ + Old: "baz", + New: "quux", + } + } + return result + }(), + }, + }, + resourceDiffTestCase{ + Name: "additional diff with primitive", + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeString, + Optional: true, + }, + "one": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + }, + }, + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "foo": "bar", + "one": "two", + }, + }, + Config: testConfig(t, map[string]interface{}{ + "foo": "baz", + }), + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo": &tofu.ResourceAttrDiff{ + Old: "bar", + New: "baz", + }, + }, + }, + Key: "one", + NewValue: "four", + Expected: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo": &tofu.ResourceAttrDiff{ + Old: "bar", + New: "baz", + }, + "one": &tofu.ResourceAttrDiff{ + Old: "two", + New: func() string { + if computed { + return "" + } + return "four" + }(), + NewComputed: computed, + }, + }, + }, + }, + resourceDiffTestCase{ + Name: "additional diff with primitive computed only", + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeString, + Optional: true, + }, + "one": &Schema{ + Type: TypeString, + Computed: true, + }, + }, + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "foo": "bar", + "one": "two", + }, + }, + Config: testConfig(t, map[string]interface{}{ + "foo": "baz", + }), + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo": &tofu.ResourceAttrDiff{ + Old: "bar", + New: "baz", + }, + }, + }, + Key: "one", + NewValue: "three", + Expected: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo": &tofu.ResourceAttrDiff{ + Old: "bar", + New: "baz", + }, + "one": &tofu.ResourceAttrDiff{ + Old: "two", + New: func() string { + if computed { + return "" + } + return "three" + }(), + NewComputed: computed, + }, + }, + }, + }, + resourceDiffTestCase{ + Name: "complex-ish set diff", + Schema: map[string]*Schema{ + "top": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + Computed: true, + }, + "bar": &Schema{ + Type: TypeInt, + Optional: true, + Computed: true, + }, + }, + }, + Set: testSetFunc, + }, + }, + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "top.#": "2", + "top.3.foo": "1", + "top.3.bar": "2", + "top.23.foo": "11", + "top.23.bar": "12", + }, + }, + Config: testConfig(t, map[string]interface{}{ + "top": []interface{}{ + map[string]interface{}{ + "foo": 1, + "bar": 3, + }, + map[string]interface{}{ + "foo": 12, + "bar": 12, + }, + }, + }), + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "top.4.foo": &tofu.ResourceAttrDiff{ + Old: "", + New: "1", + }, + "top.4.bar": &tofu.ResourceAttrDiff{ + Old: "", + New: "3", + }, + "top.24.foo": &tofu.ResourceAttrDiff{ + Old: "", + New: "12", + }, + "top.24.bar": &tofu.ResourceAttrDiff{ + Old: "", + New: "12", + }, + }, + }, + Key: "top", + NewValue: NewSet(testSetFunc, []interface{}{ + map[string]interface{}{ + "foo": 1, + "bar": 4, + }, + map[string]interface{}{ + "foo": 13, + "bar": 12, + }, + map[string]interface{}{ + "foo": 21, + "bar": 22, + }, + }), + Expected: &tofu.InstanceDiff{ + Attributes: func() map[string]*tofu.ResourceAttrDiff { + result := make(map[string]*tofu.ResourceAttrDiff) + if computed { + result["top.#"] = &tofu.ResourceAttrDiff{ + Old: "2", + New: "", + NewComputed: true, + } + } else { + result["top.#"] = &tofu.ResourceAttrDiff{ + Old: "2", + New: "3", + } + result["top.5.foo"] = &tofu.ResourceAttrDiff{ + Old: "", + New: "1", + } + result["top.5.bar"] = &tofu.ResourceAttrDiff{ + Old: "", + New: "4", + } + result["top.25.foo"] = &tofu.ResourceAttrDiff{ + Old: "", + New: "13", + } + result["top.25.bar"] = &tofu.ResourceAttrDiff{ + Old: "", + New: "12", + } + result["top.43.foo"] = &tofu.ResourceAttrDiff{ + Old: "", + New: "21", + } + result["top.43.bar"] = &tofu.ResourceAttrDiff{ + Old: "", + New: "22", + } + } + return result + }(), + }, + }, + resourceDiffTestCase{ + Name: "primitive, no diff, no refresh", + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeString, + Computed: true, + }, + }, + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "foo": "bar", + }, + }, + Config: testConfig(t, map[string]interface{}{}), + Diff: &tofu.InstanceDiff{Attributes: map[string]*tofu.ResourceAttrDiff{}}, + Key: "foo", + NewValue: "baz", + Expected: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo": &tofu.ResourceAttrDiff{ + Old: "bar", + New: func() string { + if computed { + return "" + } + return "baz" + }(), + NewComputed: computed, + }, + }, + }, + }, + resourceDiffTestCase{ + Name: "non-computed key, should error", + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeString, + Required: true, + }, + }, + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "foo": "bar", + }, + }, + Config: testConfig(t, map[string]interface{}{ + "foo": "baz", + }), + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo": &tofu.ResourceAttrDiff{ + Old: "bar", + New: "baz", + }, + }, + }, + Key: "foo", + NewValue: "qux", + ExpectedError: true, + }, + resourceDiffTestCase{ + Name: "bad key, should error", + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeString, + Required: true, + }, + }, + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "foo": "bar", + }, + }, + Config: testConfig(t, map[string]interface{}{ + "foo": "baz", + }), + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo": &tofu.ResourceAttrDiff{ + Old: "bar", + New: "baz", + }, + }, + }, + Key: "bad", + NewValue: "qux", + ExpectedError: true, + }, + resourceDiffTestCase{ + // NOTE: This case is technically impossible in the current + // implementation, because optional+computed values never show up in the + // diff, and we actually clear existing diffs when SetNew or + // SetNewComputed is run. This test is here to ensure that if either of + // these behaviors change that we don't introduce regressions. + Name: "NewRemoved in diff for Optional and Computed, should be fully overridden", + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + }, + }, + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "foo": "bar", + }, + }, + Config: testConfig(t, map[string]interface{}{}), + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo": &tofu.ResourceAttrDiff{ + Old: "bar", + New: "", + NewRemoved: true, + }, + }, + }, + Key: "foo", + NewValue: "qux", + Expected: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo": &tofu.ResourceAttrDiff{ + Old: "bar", + New: func() string { + if computed { + return "" + } + return "qux" + }(), + NewComputed: computed, + }, + }, + }, + }, + resourceDiffTestCase{ + Name: "NewComputed should always propagate", + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeString, + Computed: true, + }, + }, + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "foo": "", + }, + ID: "pre-existing", + }, + Config: testConfig(t, map[string]interface{}{}), + Diff: &tofu.InstanceDiff{Attributes: map[string]*tofu.ResourceAttrDiff{}}, + Key: "foo", + NewValue: "", + Expected: &tofu.InstanceDiff{ + Attributes: func() map[string]*tofu.ResourceAttrDiff { + if computed { + return map[string]*tofu.ResourceAttrDiff{ + "foo": &tofu.ResourceAttrDiff{ + NewComputed: computed, + }, + } + } + return map[string]*tofu.ResourceAttrDiff{} + }(), + }, + }, + } +} + +func TestSetNew(t *testing.T) { + testCases := testDiffCases(t, "", 0, false) + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + m := schemaMap(tc.Schema) + d := newResourceDiff(tc.Schema, tc.Config, tc.State, tc.Diff) + err := d.SetNew(tc.Key, tc.NewValue) + switch { + case err != nil && !tc.ExpectedError: + t.Fatalf("bad: %s", err) + case err == nil && tc.ExpectedError: + t.Fatalf("Expected error, got none") + case err != nil && tc.ExpectedError: + return + } + for _, k := range d.UpdatedKeys() { + if err := m.diff(k, m[k], tc.Diff, d, false); err != nil { + t.Fatalf("bad: %s", err) + } + } + if !reflect.DeepEqual(tc.Expected, tc.Diff) { + t.Fatalf("Expected %s, got %s", spew.Sdump(tc.Expected), spew.Sdump(tc.Diff)) + } + }) + } +} + +func TestSetNewComputed(t *testing.T) { + testCases := testDiffCases(t, "", 0, true) + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + m := schemaMap(tc.Schema) + d := newResourceDiff(tc.Schema, tc.Config, tc.State, tc.Diff) + err := d.SetNewComputed(tc.Key) + switch { + case err != nil && !tc.ExpectedError: + t.Fatalf("bad: %s", err) + case err == nil && tc.ExpectedError: + t.Fatalf("Expected error, got none") + case err != nil && tc.ExpectedError: + return + } + for _, k := range d.UpdatedKeys() { + if err := m.diff(k, m[k], tc.Diff, d, false); err != nil { + t.Fatalf("bad: %s", err) + } + } + if !reflect.DeepEqual(tc.Expected, tc.Diff) { + t.Fatalf("Expected %s, got %s", spew.Sdump(tc.Expected), spew.Sdump(tc.Diff)) + } + }) + } +} + +func TestForceNew(t *testing.T) { + cases := []resourceDiffTestCase{ + resourceDiffTestCase{ + Name: "basic primitive diff", + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + }, + }, + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "foo": "bar", + }, + }, + Config: testConfig(t, map[string]interface{}{ + "foo": "baz", + }), + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo": &tofu.ResourceAttrDiff{ + Old: "bar", + New: "baz", + }, + }, + }, + Key: "foo", + Expected: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo": &tofu.ResourceAttrDiff{ + Old: "bar", + New: "baz", + RequiresNew: true, + }, + }, + }, + }, + resourceDiffTestCase{ + Name: "no change, should error", + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + }, + }, + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "foo": "bar", + }, + }, + Config: testConfig(t, map[string]interface{}{ + "foo": "bar", + }), + ExpectedError: true, + }, + resourceDiffTestCase{ + Name: "basic primitive, non-computed key", + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeString, + Required: true, + }, + }, + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "foo": "bar", + }, + }, + Config: testConfig(t, map[string]interface{}{ + "foo": "baz", + }), + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo": &tofu.ResourceAttrDiff{ + Old: "bar", + New: "baz", + }, + }, + }, + Key: "foo", + Expected: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo": &tofu.ResourceAttrDiff{ + Old: "bar", + New: "baz", + RequiresNew: true, + }, + }, + }, + }, + resourceDiffTestCase{ + Name: "nested field", + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeList, + Required: true, + MaxItems: 1, + Elem: &Resource{ + Schema: map[string]*Schema{ + "bar": { + Type: TypeString, + Optional: true, + }, + "baz": { + Type: TypeString, + Optional: true, + }, + }, + }, + }, + }, + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "foo.#": "1", + "foo.0.bar": "abc", + "foo.0.baz": "xyz", + }, + }, + Config: testConfig(t, map[string]interface{}{ + "foo": []interface{}{ + map[string]interface{}{ + "bar": "abcdefg", + "baz": "changed", + }, + }, + }), + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo.0.bar": &tofu.ResourceAttrDiff{ + Old: "abc", + New: "abcdefg", + }, + "foo.0.baz": &tofu.ResourceAttrDiff{ + Old: "xyz", + New: "changed", + }, + }, + }, + Key: "foo.0.baz", + Expected: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo.0.bar": &tofu.ResourceAttrDiff{ + Old: "abc", + New: "abcdefg", + }, + "foo.0.baz": &tofu.ResourceAttrDiff{ + Old: "xyz", + New: "changed", + RequiresNew: true, + }, + }, + }, + }, + resourceDiffTestCase{ + Name: "preserve NewRemoved on existing diff", + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeString, + Optional: true, + }, + }, + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "foo": "bar", + }, + }, + Config: testConfig(t, map[string]interface{}{}), + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo": &tofu.ResourceAttrDiff{ + Old: "bar", + New: "", + NewRemoved: true, + }, + }, + }, + Key: "foo", + Expected: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo": &tofu.ResourceAttrDiff{ + Old: "bar", + New: "", + RequiresNew: true, + NewRemoved: true, + }, + }, + }, + }, + resourceDiffTestCase{ + Name: "nested field, preserve original diff without zero values", + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeList, + Required: true, + MaxItems: 1, + Elem: &Resource{ + Schema: map[string]*Schema{ + "bar": { + Type: TypeString, + Optional: true, + }, + "baz": { + Type: TypeInt, + Optional: true, + }, + }, + }, + }, + }, + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "foo.#": "1", + "foo.0.bar": "abc", + }, + }, + Config: testConfig(t, map[string]interface{}{ + "foo": []interface{}{ + map[string]interface{}{ + "bar": "abcdefg", + }, + }, + }), + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo.0.bar": &tofu.ResourceAttrDiff{ + Old: "abc", + New: "abcdefg", + }, + }, + }, + Key: "foo.0.bar", + Expected: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo.0.bar": &tofu.ResourceAttrDiff{ + Old: "abc", + New: "abcdefg", + RequiresNew: true, + }, + }, + }, + }, + } + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + m := schemaMap(tc.Schema) + d := newResourceDiff(m, tc.Config, tc.State, tc.Diff) + err := d.ForceNew(tc.Key) + switch { + case err != nil && !tc.ExpectedError: + t.Fatalf("bad: %s", err) + case err == nil && tc.ExpectedError: + t.Fatalf("Expected error, got none") + case err != nil && tc.ExpectedError: + return + } + for _, k := range d.UpdatedKeys() { + if err := m.diff(k, m[k], tc.Diff, d, false); err != nil { + t.Fatalf("bad: %s", err) + } + } + if !reflect.DeepEqual(tc.Expected, tc.Diff) { + t.Fatalf("Expected %s, got %s", spew.Sdump(tc.Expected), spew.Sdump(tc.Diff)) + } + }) + } +} + +func TestClear(t *testing.T) { + cases := []resourceDiffTestCase{ + resourceDiffTestCase{ + Name: "basic primitive diff", + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + }, + }, + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "foo": "bar", + }, + }, + Config: testConfig(t, map[string]interface{}{ + "foo": "baz", + }), + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo": &tofu.ResourceAttrDiff{ + Old: "bar", + New: "baz", + }, + }, + }, + Key: "foo", + Expected: &tofu.InstanceDiff{Attributes: map[string]*tofu.ResourceAttrDiff{}}, + }, + resourceDiffTestCase{ + Name: "non-computed key, should error", + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeString, + Required: true, + }, + }, + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "foo": "bar", + }, + }, + Config: testConfig(t, map[string]interface{}{ + "foo": "baz", + }), + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo": &tofu.ResourceAttrDiff{ + Old: "bar", + New: "baz", + }, + }, + }, + Key: "foo", + ExpectedError: true, + }, + resourceDiffTestCase{ + Name: "multi-value, one removed", + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + }, + "one": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + }, + }, + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "foo": "bar", + "one": "two", + }, + }, + Config: testConfig(t, map[string]interface{}{ + "foo": "baz", + "one": "three", + }), + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo": &tofu.ResourceAttrDiff{ + Old: "bar", + New: "baz", + }, + "one": &tofu.ResourceAttrDiff{ + Old: "two", + New: "three", + }, + }, + }, + Key: "one", + Expected: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo": &tofu.ResourceAttrDiff{ + Old: "bar", + New: "baz", + }, + }, + }, + }, + resourceDiffTestCase{ + Name: "basic sub-block diff", + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeList, + Optional: true, + Computed: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "bar": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + }, + "baz": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + }, + }, + }, + }, + }, + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "foo.0.bar": "bar1", + "foo.0.baz": "baz1", + }, + }, + Config: testConfig(t, map[string]interface{}{ + "foo": []interface{}{ + map[string]interface{}{ + "bar": "bar2", + "baz": "baz1", + }, + }, + }), + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo.0.bar": &tofu.ResourceAttrDiff{ + Old: "bar1", + New: "bar2", + }, + }, + }, + Key: "foo.0.bar", + Expected: &tofu.InstanceDiff{Attributes: map[string]*tofu.ResourceAttrDiff{}}, + }, + resourceDiffTestCase{ + Name: "sub-block diff only partial clear", + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeList, + Optional: true, + Computed: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "bar": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + }, + "baz": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + }, + }, + }, + }, + }, + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "foo.0.bar": "bar1", + "foo.0.baz": "baz1", + }, + }, + Config: testConfig(t, map[string]interface{}{ + "foo": []interface{}{ + map[string]interface{}{ + "bar": "bar2", + "baz": "baz2", + }, + }, + }), + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo.0.bar": &tofu.ResourceAttrDiff{ + Old: "bar1", + New: "bar2", + }, + "foo.0.baz": &tofu.ResourceAttrDiff{ + Old: "baz1", + New: "baz2", + }, + }, + }, + Key: "foo.0.bar", + Expected: &tofu.InstanceDiff{Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo.0.baz": &tofu.ResourceAttrDiff{ + Old: "baz1", + New: "baz2", + }, + }}, + }, + } + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + m := schemaMap(tc.Schema) + d := newResourceDiff(m, tc.Config, tc.State, tc.Diff) + err := d.Clear(tc.Key) + switch { + case err != nil && !tc.ExpectedError: + t.Fatalf("bad: %s", err) + case err == nil && tc.ExpectedError: + t.Fatalf("Expected error, got none") + case err != nil && tc.ExpectedError: + return + } + for _, k := range d.UpdatedKeys() { + if err := m.diff(k, m[k], tc.Diff, d, false); err != nil { + t.Fatalf("bad: %s", err) + } + } + if !reflect.DeepEqual(tc.Expected, tc.Diff) { + t.Fatalf("Expected %s, got %s", spew.Sdump(tc.Expected), spew.Sdump(tc.Diff)) + } + }) + } +} + +func TestGetChangedKeysPrefix(t *testing.T) { + cases := []resourceDiffTestCase{ + resourceDiffTestCase{ + Name: "basic primitive diff", + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + }, + }, + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "foo": "bar", + }, + }, + Config: testConfig(t, map[string]interface{}{ + "foo": "baz", + }), + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo": &tofu.ResourceAttrDiff{ + Old: "bar", + New: "baz", + }, + }, + }, + Key: "foo", + ExpectedKeys: []string{ + "foo", + }, + }, + resourceDiffTestCase{ + Name: "nested field filtering", + Schema: map[string]*Schema{ + "testfield": &Schema{ + Type: TypeString, + Required: true, + }, + "foo": &Schema{ + Type: TypeList, + Required: true, + MaxItems: 1, + Elem: &Resource{ + Schema: map[string]*Schema{ + "bar": { + Type: TypeString, + Optional: true, + }, + "baz": { + Type: TypeString, + Optional: true, + }, + }, + }, + }, + }, + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "testfield": "blablah", + "foo.#": "1", + "foo.0.bar": "abc", + "foo.0.baz": "xyz", + }, + }, + Config: testConfig(t, map[string]interface{}{ + "testfield": "modified", + "foo": []interface{}{ + map[string]interface{}{ + "bar": "abcdefg", + "baz": "changed", + }, + }, + }), + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "testfield": &tofu.ResourceAttrDiff{ + Old: "blablah", + New: "modified", + }, + "foo.0.bar": &tofu.ResourceAttrDiff{ + Old: "abc", + New: "abcdefg", + }, + "foo.0.baz": &tofu.ResourceAttrDiff{ + Old: "xyz", + New: "changed", + }, + }, + }, + Key: "foo", + ExpectedKeys: []string{ + "foo.0.bar", + "foo.0.baz", + }, + }, + } + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + m := schemaMap(tc.Schema) + d := newResourceDiff(m, tc.Config, tc.State, tc.Diff) + keys := d.GetChangedKeysPrefix(tc.Key) + + for _, k := range d.UpdatedKeys() { + if err := m.diff(k, m[k], tc.Diff, d, false); err != nil { + t.Fatalf("bad: %s", err) + } + } + + sort.Strings(keys) + + if !reflect.DeepEqual(tc.ExpectedKeys, keys) { + t.Fatalf("Expected %s, got %s", spew.Sdump(tc.ExpectedKeys), spew.Sdump(keys)) + } + }) + } +} + +func TestResourceDiffGetOkExists(t *testing.T) { + cases := []struct { + Name string + Schema map[string]*Schema + State *tofu.InstanceState + Config *tofu.ResourceConfig + Diff *tofu.InstanceDiff + Key string + Value interface{} + Ok bool + }{ + /* + * Primitives + */ + { + Name: "string-literal-empty", + Schema: map[string]*Schema{ + "availability_zone": { + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + Config: nil, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": { + Old: "", + New: "", + }, + }, + }, + + Key: "availability_zone", + Value: "", + Ok: true, + }, + + { + Name: "string-computed-empty", + Schema: map[string]*Schema{ + "availability_zone": { + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + Config: nil, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": { + Old: "", + New: "", + NewComputed: true, + }, + }, + }, + + Key: "availability_zone", + Value: "", + Ok: false, + }, + + { + Name: "string-optional-computed-nil-diff", + Schema: map[string]*Schema{ + "availability_zone": { + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + Config: nil, + + Diff: nil, + + Key: "availability_zone", + Value: "", + Ok: false, + }, + + /* + * Lists + */ + + { + Name: "list-optional", + Schema: map[string]*Schema{ + "ports": { + Type: TypeList, + Optional: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: nil, + Config: nil, + + Diff: nil, + + Key: "ports", + Value: []interface{}{}, + Ok: false, + }, + + /* + * Map + */ + + { + Name: "map-optional", + Schema: map[string]*Schema{ + "ports": { + Type: TypeMap, + Optional: true, + }, + }, + + State: nil, + Config: nil, + + Diff: nil, + + Key: "ports", + Value: map[string]interface{}{}, + Ok: false, + }, + + /* + * Set + */ + + { + Name: "set-optional", + Schema: map[string]*Schema{ + "ports": { + Type: TypeSet, + Optional: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { return a.(int) }, + }, + }, + + State: nil, + Config: nil, + + Diff: nil, + + Key: "ports", + Value: []interface{}{}, + Ok: false, + }, + + { + Name: "set-optional-key", + Schema: map[string]*Schema{ + "ports": { + Type: TypeSet, + Optional: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { return a.(int) }, + }, + }, + + State: nil, + Config: nil, + + Diff: nil, + + Key: "ports.0", + Value: 0, + Ok: false, + }, + + { + Name: "bool-literal-empty", + Schema: map[string]*Schema{ + "availability_zone": { + Type: TypeBool, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + Config: nil, + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": { + Old: "", + New: "", + }, + }, + }, + + Key: "availability_zone", + Value: false, + Ok: true, + }, + + { + Name: "bool-literal-set", + Schema: map[string]*Schema{ + "availability_zone": { + Type: TypeBool, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + Config: nil, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": { + New: "true", + }, + }, + }, + + Key: "availability_zone", + Value: true, + Ok: true, + }, + { + Name: "value-in-config", + Schema: map[string]*Schema{ + "availability_zone": { + Type: TypeString, + Optional: true, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "availability_zone": "foo", + }, + }, + Config: testConfig(t, map[string]interface{}{ + "availability_zone": "foo", + }), + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{}, + }, + + Key: "availability_zone", + Value: "foo", + Ok: true, + }, + { + Name: "new-value-in-config", + Schema: map[string]*Schema{ + "availability_zone": { + Type: TypeString, + Optional: true, + }, + }, + + State: nil, + Config: testConfig(t, map[string]interface{}{ + "availability_zone": "foo", + }), + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": { + Old: "", + New: "foo", + }, + }, + }, + + Key: "availability_zone", + Value: "foo", + Ok: true, + }, + { + Name: "optional-computed-value-in-config", + Schema: map[string]*Schema{ + "availability_zone": { + Type: TypeString, + Optional: true, + Computed: true, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "availability_zone": "foo", + }, + }, + Config: testConfig(t, map[string]interface{}{ + "availability_zone": "bar", + }), + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": { + Old: "foo", + New: "bar", + }, + }, + }, + + Key: "availability_zone", + Value: "bar", + Ok: true, + }, + { + Name: "removed-value", + Schema: map[string]*Schema{ + "availability_zone": { + Type: TypeString, + Optional: true, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "availability_zone": "foo", + }, + }, + Config: testConfig(t, map[string]interface{}{}), + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": { + Old: "foo", + New: "", + NewRemoved: true, + }, + }, + }, + + Key: "availability_zone", + Value: "", + Ok: true, + }, + } + + for i, tc := range cases { + t.Run(fmt.Sprintf("%d-%s", i, tc.Name), func(t *testing.T) { + d := newResourceDiff(tc.Schema, tc.Config, tc.State, tc.Diff) + + v, ok := d.GetOkExists(tc.Key) + if s, ok := v.(*Set); ok { + v = s.List() + } + + if !reflect.DeepEqual(v, tc.Value) { + t.Fatalf("Bad %s: \n%#v", tc.Name, v) + } + if ok != tc.Ok { + t.Fatalf("%s: expected ok: %t, got: %t", tc.Name, tc.Ok, ok) + } + }) + } +} + +func TestResourceDiffGetOkExistsSetNew(t *testing.T) { + tc := struct { + Schema map[string]*Schema + State *tofu.InstanceState + Diff *tofu.InstanceDiff + Key string + Value interface{} + Ok bool + }{ + Schema: map[string]*Schema{ + "availability_zone": { + Type: TypeString, + Optional: true, + Computed: true, + }, + }, + + State: nil, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{}, + }, + + Key: "availability_zone", + Value: "foobar", + Ok: true, + } + + d := newResourceDiff(tc.Schema, testConfig(t, map[string]interface{}{}), tc.State, tc.Diff) + d.SetNew(tc.Key, tc.Value) + + v, ok := d.GetOkExists(tc.Key) + if s, ok := v.(*Set); ok { + v = s.List() + } + + if !reflect.DeepEqual(v, tc.Value) { + t.Fatalf("Bad: \n%#v", v) + } + if ok != tc.Ok { + t.Fatalf("expected ok: %t, got: %t", tc.Ok, ok) + } +} + +func TestResourceDiffGetOkExistsSetNewComputed(t *testing.T) { + tc := struct { + Schema map[string]*Schema + State *tofu.InstanceState + Diff *tofu.InstanceDiff + Key string + Value interface{} + Ok bool + }{ + Schema: map[string]*Schema{ + "availability_zone": { + Type: TypeString, + Optional: true, + Computed: true, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "availability_zone": "foo", + }, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{}, + }, + + Key: "availability_zone", + Value: "foobar", + Ok: false, + } + + d := newResourceDiff(tc.Schema, testConfig(t, map[string]interface{}{}), tc.State, tc.Diff) + d.SetNewComputed(tc.Key) + + _, ok := d.GetOkExists(tc.Key) + + if ok != tc.Ok { + t.Fatalf("expected ok: %t, got: %t", tc.Ok, ok) + } +} + +func TestResourceDiffNewValueKnown(t *testing.T) { + cases := []struct { + Name string + Schema map[string]*Schema + State *tofu.InstanceState + Config *tofu.ResourceConfig + Diff *tofu.InstanceDiff + Key string + Expected bool + }{ + { + Name: "in config, no state", + Schema: map[string]*Schema{ + "availability_zone": { + Type: TypeString, + Optional: true, + }, + }, + State: nil, + Config: testConfig(t, map[string]interface{}{ + "availability_zone": "foo", + }), + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": { + Old: "", + New: "foo", + }, + }, + }, + Key: "availability_zone", + Expected: true, + }, + { + Name: "in config, has state, no diff", + Schema: map[string]*Schema{ + "availability_zone": { + Type: TypeString, + Optional: true, + }, + }, + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "availability_zone": "foo", + }, + }, + Config: testConfig(t, map[string]interface{}{ + "availability_zone": "foo", + }), + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{}, + }, + Key: "availability_zone", + Expected: true, + }, + { + Name: "computed attribute, in state, no diff", + Schema: map[string]*Schema{ + "availability_zone": { + Type: TypeString, + Computed: true, + }, + }, + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "availability_zone": "foo", + }, + }, + Config: testConfig(t, map[string]interface{}{}), + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{}, + }, + Key: "availability_zone", + Expected: true, + }, + { + Name: "optional and computed attribute, in state, no config", + Schema: map[string]*Schema{ + "availability_zone": { + Type: TypeString, + Optional: true, + Computed: true, + }, + }, + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "availability_zone": "foo", + }, + }, + Config: testConfig(t, map[string]interface{}{}), + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{}, + }, + Key: "availability_zone", + Expected: true, + }, + { + Name: "optional and computed attribute, in state, with config", + Schema: map[string]*Schema{ + "availability_zone": { + Type: TypeString, + Optional: true, + Computed: true, + }, + }, + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "availability_zone": "foo", + }, + }, + Config: testConfig(t, map[string]interface{}{ + "availability_zone": "foo", + }), + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{}, + }, + Key: "availability_zone", + Expected: true, + }, + { + Name: "computed value, through config reader", + Schema: map[string]*Schema{ + "availability_zone": { + Type: TypeString, + Optional: true, + }, + }, + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "availability_zone": "foo", + }, + }, + Config: testConfig( + t, + map[string]interface{}{ + "availability_zone": hcl2shim.UnknownVariableValue, + }, + ), + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{}, + }, + Key: "availability_zone", + Expected: false, + }, + { + Name: "computed value, through diff reader", + Schema: map[string]*Schema{ + "availability_zone": { + Type: TypeString, + Optional: true, + }, + }, + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "availability_zone": "foo", + }, + }, + Config: testConfig( + t, + map[string]interface{}{ + "availability_zone": hcl2shim.UnknownVariableValue, + }, + ), + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": { + Old: "foo", + New: "", + NewComputed: true, + }, + }, + }, + Key: "availability_zone", + Expected: false, + }, + } + + for i, tc := range cases { + t.Run(fmt.Sprintf("%d-%s", i, tc.Name), func(t *testing.T) { + d := newResourceDiff(tc.Schema, tc.Config, tc.State, tc.Diff) + + actual := d.NewValueKnown(tc.Key) + if tc.Expected != actual { + t.Fatalf("%s: expected ok: %t, got: %t", tc.Name, tc.Expected, actual) + } + }) + } +} + +func TestResourceDiffNewValueKnownSetNew(t *testing.T) { + tc := struct { + Schema map[string]*Schema + State *tofu.InstanceState + Config *tofu.ResourceConfig + Diff *tofu.InstanceDiff + Key string + Value interface{} + Expected bool + }{ + Schema: map[string]*Schema{ + "availability_zone": { + Type: TypeString, + Optional: true, + Computed: true, + }, + }, + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "availability_zone": "foo", + }, + }, + Config: testConfig( + t, + map[string]interface{}{ + "availability_zone": hcl2shim.UnknownVariableValue, + }, + ), + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": { + Old: "foo", + New: "", + NewComputed: true, + }, + }, + }, + Key: "availability_zone", + Value: "bar", + Expected: true, + } + + d := newResourceDiff(tc.Schema, tc.Config, tc.State, tc.Diff) + d.SetNew(tc.Key, tc.Value) + + actual := d.NewValueKnown(tc.Key) + if tc.Expected != actual { + t.Fatalf("expected ok: %t, got: %t", tc.Expected, actual) + } +} + +func TestResourceDiffNewValueKnownSetNewComputed(t *testing.T) { + tc := struct { + Schema map[string]*Schema + State *tofu.InstanceState + Config *tofu.ResourceConfig + Diff *tofu.InstanceDiff + Key string + Expected bool + }{ + Schema: map[string]*Schema{ + "availability_zone": { + Type: TypeString, + Computed: true, + }, + }, + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "availability_zone": "foo", + }, + }, + Config: testConfig(t, map[string]interface{}{}), + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{}, + }, + Key: "availability_zone", + Expected: false, + } + + d := newResourceDiff(tc.Schema, tc.Config, tc.State, tc.Diff) + d.SetNewComputed(tc.Key) + + actual := d.NewValueKnown(tc.Key) + if tc.Expected != actual { + t.Fatalf("expected ok: %t, got: %t", tc.Expected, actual) + } +} diff --git a/pkg/legacy/helper/schema/resource_importer.go b/pkg/legacy/helper/schema/resource_importer.go new file mode 100644 index 00000000000..7ae29ae634c --- /dev/null +++ b/pkg/legacy/helper/schema/resource_importer.go @@ -0,0 +1,57 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +// ResourceImporter defines how a resource is imported in OpenTofu. This +// can be set onto a Resource struct to make it Importable. Not all resources +// have to be importable; if a Resource doesn't have a ResourceImporter then +// it won't be importable. +// +// "Importing" in OpenTofu is the process of taking an already-created +// resource and bringing it under OpenTofu management. This can include +// updating OpenTofu state, generating OpenTofu configuration, etc. +type ResourceImporter struct { + // The functions below must all be implemented for importing to work. + + // State is called to convert an ID to one or more InstanceState to + // insert into the OpenTofu state. If this isn't specified, then + // the ID is passed straight through. + State StateFunc +} + +// StateFunc is the function called to import a resource into the +// OpenTofu state. It is given a ResourceData with only ID set. This +// ID is going to be an arbitrary value given by the user and may not map +// directly to the ID format that the resource expects, so that should +// be validated. +// +// This should return a slice of ResourceData that turn into the state +// that was imported. This might be as simple as returning only the argument +// that was given to the function. In other cases (such as AWS security groups), +// an import may fan out to multiple resources and this will have to return +// multiple. +// +// To create the ResourceData structures for other resource types (if +// you have to), instantiate your resource and call the Data function. +type StateFunc func(*ResourceData, interface{}) ([]*ResourceData, error) + +// InternalValidate should be called to validate the structure of this +// importer. This should be called in a unit test. +// +// Resource.InternalValidate() will automatically call this, so this doesn't +// need to be called manually. Further, Resource.InternalValidate() is +// automatically called by Provider.InternalValidate(), so you only need +// to internal validate the provider. +func (r *ResourceImporter) InternalValidate() error { + return nil +} + +// ImportStatePassthrough is an implementation of StateFunc that can be +// used to simply pass the ID directly through. This should be used only +// in the case that an ID-only refresh is possible. +func ImportStatePassthrough(d *ResourceData, m interface{}) ([]*ResourceData, error) { + return []*ResourceData{d}, nil +} diff --git a/pkg/legacy/helper/schema/resource_test.go b/pkg/legacy/helper/schema/resource_test.go new file mode 100644 index 00000000000..0784f8350c2 --- /dev/null +++ b/pkg/legacy/helper/schema/resource_test.go @@ -0,0 +1,1692 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/kubegems/opentofu/pkg/configs/hcl2shim" + "github.com/kubegems/opentofu/pkg/legacy/tofu" + + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" +) + +func TestResourceApply_create(t *testing.T) { + r := &Resource{ + SchemaVersion: 2, + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + } + + called := false + r.Create = func(d *ResourceData, m interface{}) error { + called = true + d.SetId("foo") + return nil + } + + var s *tofu.InstanceState = nil + + d := &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo": &tofu.ResourceAttrDiff{ + New: "42", + }, + }, + } + + actual, err := r.Apply(s, d, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !called { + t.Fatal("not called") + } + + expected := &tofu.InstanceState{ + ID: "foo", + Attributes: map[string]string{ + "id": "foo", + "foo": "42", + }, + Meta: map[string]interface{}{ + "schema_version": "2", + }, + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: %#v", actual) + } +} + +func TestResourceApply_Timeout_state(t *testing.T) { + r := &Resource{ + SchemaVersion: 2, + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + Timeouts: &ResourceTimeout{ + Create: DefaultTimeout(40 * time.Minute), + Update: DefaultTimeout(80 * time.Minute), + Delete: DefaultTimeout(40 * time.Minute), + }, + } + + called := false + r.Create = func(d *ResourceData, m interface{}) error { + called = true + d.SetId("foo") + return nil + } + + var s *tofu.InstanceState = nil + + d := &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo": &tofu.ResourceAttrDiff{ + New: "42", + }, + }, + } + + diffTimeout := &ResourceTimeout{ + Create: DefaultTimeout(40 * time.Minute), + Update: DefaultTimeout(80 * time.Minute), + Delete: DefaultTimeout(40 * time.Minute), + } + + if err := diffTimeout.DiffEncode(d); err != nil { + t.Fatalf("Error encoding timeout to diff: %s", err) + } + + actual, err := r.Apply(s, d, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !called { + t.Fatal("not called") + } + + expected := &tofu.InstanceState{ + ID: "foo", + Attributes: map[string]string{ + "id": "foo", + "foo": "42", + }, + Meta: map[string]interface{}{ + "schema_version": "2", + TimeoutKey: expectedForValues(40, 0, 80, 40, 0), + }, + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("Not equal in Timeout State:\n\texpected: %#v\n\tactual: %#v", expected.Meta, actual.Meta) + } +} + +// Regression test to ensure that the meta data is read from state, if a +// resource is destroyed and the timeout meta is no longer available from the +// config +func TestResourceApply_Timeout_destroy(t *testing.T) { + timeouts := &ResourceTimeout{ + Create: DefaultTimeout(40 * time.Minute), + Update: DefaultTimeout(80 * time.Minute), + Delete: DefaultTimeout(40 * time.Minute), + } + + r := &Resource{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + Timeouts: timeouts, + } + + called := false + var delTimeout time.Duration + r.Delete = func(d *ResourceData, m interface{}) error { + delTimeout = d.Timeout(TimeoutDelete) + called = true + return nil + } + + s := &tofu.InstanceState{ + ID: "bar", + } + + if err := timeouts.StateEncode(s); err != nil { + t.Fatalf("Error encoding to state: %s", err) + } + + d := &tofu.InstanceDiff{ + Destroy: true, + } + + actual, err := r.Apply(s, d, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !called { + t.Fatal("delete not called") + } + + if *timeouts.Delete != delTimeout { + t.Fatalf("timeouts don't match, expected (%#v), got (%#v)", timeouts.Delete, delTimeout) + } + + if actual != nil { + t.Fatalf("bad: %#v", actual) + } +} + +func TestResourceDiff_Timeout_diff(t *testing.T) { + r := &Resource{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + Timeouts: &ResourceTimeout{ + Create: DefaultTimeout(40 * time.Minute), + Update: DefaultTimeout(80 * time.Minute), + Delete: DefaultTimeout(40 * time.Minute), + }, + } + + r.Create = func(d *ResourceData, m interface{}) error { + d.SetId("foo") + return nil + } + + conf := tofu.NewResourceConfigRaw( + map[string]interface{}{ + "foo": 42, + TimeoutsConfigKey: map[string]interface{}{ + "create": "2h", + }, + }, + ) + var s *tofu.InstanceState + + actual, err := r.Diff(s, conf, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo": &tofu.ResourceAttrDiff{ + New: "42", + }, + }, + } + + diffTimeout := &ResourceTimeout{ + Create: DefaultTimeout(120 * time.Minute), + Update: DefaultTimeout(80 * time.Minute), + Delete: DefaultTimeout(40 * time.Minute), + } + + if err := diffTimeout.DiffEncode(expected); err != nil { + t.Fatalf("Error encoding timeout to diff: %s", err) + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("Not equal Meta in Timeout Diff:\n\texpected: %#v\n\tactual: %#v", expected.Meta, actual.Meta) + } +} + +func TestResourceDiff_CustomizeFunc(t *testing.T) { + r := &Resource{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + } + + var called bool + + r.CustomizeDiff = func(d *ResourceDiff, m interface{}) error { + called = true + return nil + } + + conf := tofu.NewResourceConfigRaw( + map[string]interface{}{ + "foo": 42, + }, + ) + + var s *tofu.InstanceState + + _, err := r.Diff(s, conf, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !called { + t.Fatalf("diff customization not called") + } +} + +func TestResourceApply_destroy(t *testing.T) { + r := &Resource{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + } + + called := false + r.Delete = func(d *ResourceData, m interface{}) error { + called = true + return nil + } + + s := &tofu.InstanceState{ + ID: "bar", + } + + d := &tofu.InstanceDiff{ + Destroy: true, + } + + actual, err := r.Apply(s, d, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !called { + t.Fatal("delete not called") + } + + if actual != nil { + t.Fatalf("bad: %#v", actual) + } +} + +func TestResourceApply_destroyCreate(t *testing.T) { + r := &Resource{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + + "tags": &Schema{ + Type: TypeMap, + Optional: true, + Computed: true, + }, + }, + } + + change := false + r.Create = func(d *ResourceData, m interface{}) error { + change = d.HasChange("tags") + d.SetId("foo") + return nil + } + r.Delete = func(d *ResourceData, m interface{}) error { + return nil + } + + var s *tofu.InstanceState = &tofu.InstanceState{ + ID: "bar", + Attributes: map[string]string{ + "foo": "bar", + "tags.Name": "foo", + }, + } + + d := &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo": &tofu.ResourceAttrDiff{ + New: "42", + RequiresNew: true, + }, + "tags.Name": &tofu.ResourceAttrDiff{ + Old: "foo", + New: "foo", + RequiresNew: true, + }, + }, + } + + actual, err := r.Apply(s, d, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !change { + t.Fatal("should have change") + } + + expected := &tofu.InstanceState{ + ID: "foo", + Attributes: map[string]string{ + "id": "foo", + "foo": "42", + "tags.%": "1", + "tags.Name": "foo", + }, + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: %#v", actual) + } +} + +func TestResourceApply_destroyPartial(t *testing.T) { + r := &Resource{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + SchemaVersion: 3, + } + + r.Delete = func(d *ResourceData, m interface{}) error { + d.Set("foo", 42) + return fmt.Errorf("some error") + } + + s := &tofu.InstanceState{ + ID: "bar", + Attributes: map[string]string{ + "foo": "12", + }, + } + + d := &tofu.InstanceDiff{ + Destroy: true, + } + + actual, err := r.Apply(s, d, nil) + if err == nil { + t.Fatal("should error") + } + + expected := &tofu.InstanceState{ + ID: "bar", + Attributes: map[string]string{ + "id": "bar", + "foo": "42", + }, + Meta: map[string]interface{}{ + "schema_version": "3", + }, + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("expected:\n%#v\n\ngot:\n%#v", expected, actual) + } +} + +func TestResourceApply_update(t *testing.T) { + r := &Resource{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + } + + r.Update = func(d *ResourceData, m interface{}) error { + d.Set("foo", 42) + return nil + } + + s := &tofu.InstanceState{ + ID: "foo", + Attributes: map[string]string{ + "foo": "12", + }, + } + + d := &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo": &tofu.ResourceAttrDiff{ + New: "13", + }, + }, + } + + actual, err := r.Apply(s, d, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &tofu.InstanceState{ + ID: "foo", + Attributes: map[string]string{ + "id": "foo", + "foo": "42", + }, + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: %#v", actual) + } +} + +func TestResourceApply_updateNoCallback(t *testing.T) { + r := &Resource{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + } + + r.Update = nil + + s := &tofu.InstanceState{ + ID: "foo", + Attributes: map[string]string{ + "foo": "12", + }, + } + + d := &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo": &tofu.ResourceAttrDiff{ + New: "13", + }, + }, + } + + actual, err := r.Apply(s, d, nil) + if err == nil { + t.Fatal("should error") + } + + expected := &tofu.InstanceState{ + ID: "foo", + Attributes: map[string]string{ + "foo": "12", + }, + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: %#v", actual) + } +} + +func TestResourceApply_isNewResource(t *testing.T) { + r := &Resource{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeString, + Optional: true, + }, + }, + } + + updateFunc := func(d *ResourceData, m interface{}) error { + d.Set("foo", "updated") + if d.IsNewResource() { + d.Set("foo", "new-resource") + } + return nil + } + r.Create = func(d *ResourceData, m interface{}) error { + d.SetId("foo") + d.Set("foo", "created") + return updateFunc(d, m) + } + r.Update = updateFunc + + d := &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo": &tofu.ResourceAttrDiff{ + New: "bla-blah", + }, + }, + } + + // positive test + var s *tofu.InstanceState = nil + + actual, err := r.Apply(s, d, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &tofu.InstanceState{ + ID: "foo", + Attributes: map[string]string{ + "id": "foo", + "foo": "new-resource", + }, + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("actual: %#v\nexpected: %#v", + actual, expected) + } + + // negative test + s = &tofu.InstanceState{ + ID: "foo", + Attributes: map[string]string{ + "id": "foo", + "foo": "new-resource", + }, + } + + actual, err = r.Apply(s, d, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected = &tofu.InstanceState{ + ID: "foo", + Attributes: map[string]string{ + "id": "foo", + "foo": "updated", + }, + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("actual: %#v\nexpected: %#v", + actual, expected) + } +} + +func TestResourceInternalValidate(t *testing.T) { + cases := []struct { + In *Resource + Writable bool + Err bool + }{ + 0: { + nil, + true, + true, + }, + + // No optional and no required + 1: { + &Resource{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + Required: true, + }, + }, + }, + true, + true, + }, + + // Update undefined for non-ForceNew field + 2: { + &Resource{ + Create: func(d *ResourceData, meta interface{}) error { return nil }, + Schema: map[string]*Schema{ + "boo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + }, + true, + true, + }, + + // Update defined for ForceNew field + 3: { + &Resource{ + Create: func(d *ResourceData, meta interface{}) error { return nil }, + Update: func(d *ResourceData, meta interface{}) error { return nil }, + Schema: map[string]*Schema{ + "goo": &Schema{ + Type: TypeInt, + Optional: true, + ForceNew: true, + }, + }, + }, + true, + true, + }, + + // non-writable doesn't need Update, Create or Delete + 4: { + &Resource{ + Schema: map[string]*Schema{ + "goo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + }, + false, + false, + }, + + // non-writable *must not* have Create + 5: { + &Resource{ + Create: func(d *ResourceData, meta interface{}) error { return nil }, + Schema: map[string]*Schema{ + "goo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + }, + false, + true, + }, + + // writable must have Read + 6: { + &Resource{ + Create: func(d *ResourceData, meta interface{}) error { return nil }, + Update: func(d *ResourceData, meta interface{}) error { return nil }, + Delete: func(d *ResourceData, meta interface{}) error { return nil }, + Schema: map[string]*Schema{ + "goo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + }, + true, + true, + }, + + // writable must have Delete + 7: { + &Resource{ + Create: func(d *ResourceData, meta interface{}) error { return nil }, + Read: func(d *ResourceData, meta interface{}) error { return nil }, + Update: func(d *ResourceData, meta interface{}) error { return nil }, + Schema: map[string]*Schema{ + "goo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + }, + true, + true, + }, + + 8: { // Reserved name at root should be disallowed + &Resource{ + Create: func(d *ResourceData, meta interface{}) error { return nil }, + Read: func(d *ResourceData, meta interface{}) error { return nil }, + Update: func(d *ResourceData, meta interface{}) error { return nil }, + Delete: func(d *ResourceData, meta interface{}) error { return nil }, + Schema: map[string]*Schema{ + "count": { + Type: TypeInt, + Optional: true, + }, + }, + }, + true, + true, + }, + + 9: { // Reserved name at nested levels should be allowed + &Resource{ + Create: func(d *ResourceData, meta interface{}) error { return nil }, + Read: func(d *ResourceData, meta interface{}) error { return nil }, + Update: func(d *ResourceData, meta interface{}) error { return nil }, + Delete: func(d *ResourceData, meta interface{}) error { return nil }, + Schema: map[string]*Schema{ + "parent_list": &Schema{ + Type: TypeString, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "provisioner": { + Type: TypeString, + Optional: true, + }, + }, + }, + }, + }, + }, + true, + false, + }, + + 10: { // Provider reserved name should be allowed in resource + &Resource{ + Create: func(d *ResourceData, meta interface{}) error { return nil }, + Read: func(d *ResourceData, meta interface{}) error { return nil }, + Update: func(d *ResourceData, meta interface{}) error { return nil }, + Delete: func(d *ResourceData, meta interface{}) error { return nil }, + Schema: map[string]*Schema{ + "alias": &Schema{ + Type: TypeString, + Optional: true, + }, + }, + }, + true, + false, + }, + + 11: { // ID should be allowed in data source + &Resource{ + Read: func(d *ResourceData, meta interface{}) error { return nil }, + Schema: map[string]*Schema{ + "id": &Schema{ + Type: TypeString, + Optional: true, + }, + }, + }, + false, + false, + }, + + 12: { // Deprecated ID should be allowed in resource + &Resource{ + Create: func(d *ResourceData, meta interface{}) error { return nil }, + Read: func(d *ResourceData, meta interface{}) error { return nil }, + Update: func(d *ResourceData, meta interface{}) error { return nil }, + Delete: func(d *ResourceData, meta interface{}) error { return nil }, + Schema: map[string]*Schema{ + "id": &Schema{ + Type: TypeString, + Optional: true, + Deprecated: "Use x_id instead", + }, + }, + }, + true, + false, + }, + + 13: { // non-writable must not define CustomizeDiff + &Resource{ + Read: func(d *ResourceData, meta interface{}) error { return nil }, + Schema: map[string]*Schema{ + "goo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + CustomizeDiff: func(*ResourceDiff, interface{}) error { return nil }, + }, + false, + true, + }, + 14: { // Deprecated resource + &Resource{ + Read: func(d *ResourceData, meta interface{}) error { return nil }, + Schema: map[string]*Schema{ + "goo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + DeprecationMessage: "This resource has been deprecated.", + }, + true, + true, + }, + } + + for i, tc := range cases { + t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { + sm := schemaMap{} + if tc.In != nil { + sm = schemaMap(tc.In.Schema) + } + + err := tc.In.InternalValidate(sm, tc.Writable) + if err != nil && !tc.Err { + t.Fatalf("%d: expected validation to pass: %s", i, err) + } + if err == nil && tc.Err { + t.Fatalf("%d: expected validation to fail", i) + } + }) + } +} + +func TestResourceRefresh(t *testing.T) { + r := &Resource{ + SchemaVersion: 2, + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + } + + r.Read = func(d *ResourceData, m interface{}) error { + if m != 42 { + return fmt.Errorf("meta not passed") + } + + return d.Set("foo", d.Get("foo").(int)+1) + } + + s := &tofu.InstanceState{ + ID: "bar", + Attributes: map[string]string{ + "foo": "12", + }, + } + + expected := &tofu.InstanceState{ + ID: "bar", + Attributes: map[string]string{ + "id": "bar", + "foo": "13", + }, + Meta: map[string]interface{}{ + "schema_version": "2", + }, + } + + actual, err := r.Refresh(s, 42) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: %#v", actual) + } +} + +func TestResourceRefresh_blankId(t *testing.T) { + r := &Resource{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + } + + r.Read = func(d *ResourceData, m interface{}) error { + d.SetId("foo") + return nil + } + + s := &tofu.InstanceState{ + ID: "", + Attributes: map[string]string{}, + } + + actual, err := r.Refresh(s, 42) + if err != nil { + t.Fatalf("err: %s", err) + } + if actual != nil { + t.Fatalf("bad: %#v", actual) + } +} + +func TestResourceRefresh_delete(t *testing.T) { + r := &Resource{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + } + + r.Read = func(d *ResourceData, m interface{}) error { + d.SetId("") + return nil + } + + s := &tofu.InstanceState{ + ID: "bar", + Attributes: map[string]string{ + "foo": "12", + }, + } + + actual, err := r.Refresh(s, 42) + if err != nil { + t.Fatalf("err: %s", err) + } + + if actual != nil { + t.Fatalf("bad: %#v", actual) + } +} + +func TestResourceRefresh_existsError(t *testing.T) { + r := &Resource{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + } + + r.Exists = func(*ResourceData, interface{}) (bool, error) { + return false, fmt.Errorf("error") + } + + r.Read = func(d *ResourceData, m interface{}) error { + panic("shouldn't be called") + } + + s := &tofu.InstanceState{ + ID: "bar", + Attributes: map[string]string{ + "foo": "12", + }, + } + + actual, err := r.Refresh(s, 42) + if err == nil { + t.Fatalf("should error") + } + if !reflect.DeepEqual(actual, s) { + t.Fatalf("bad: %#v", actual) + } +} + +func TestResourceRefresh_noExists(t *testing.T) { + r := &Resource{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + } + + r.Exists = func(*ResourceData, interface{}) (bool, error) { + return false, nil + } + + r.Read = func(d *ResourceData, m interface{}) error { + panic("shouldn't be called") + } + + s := &tofu.InstanceState{ + ID: "bar", + Attributes: map[string]string{ + "foo": "12", + }, + } + + actual, err := r.Refresh(s, 42) + if err != nil { + t.Fatalf("err: %s", err) + } + if actual != nil { + t.Fatalf("should have no state") + } +} + +func TestResourceRefresh_needsMigration(t *testing.T) { + // Schema v2 it deals only in newfoo, which tracks foo as an int + r := &Resource{ + SchemaVersion: 2, + Schema: map[string]*Schema{ + "newfoo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + } + + r.Read = func(d *ResourceData, m interface{}) error { + return d.Set("newfoo", d.Get("newfoo").(int)+1) + } + + r.MigrateState = func( + v int, + s *tofu.InstanceState, + meta interface{}) (*tofu.InstanceState, error) { + // Real state migration functions will probably switch on this value, + // but we'll just assert on it for now. + if v != 1 { + t.Fatalf("Expected StateSchemaVersion to be 1, got %d", v) + } + + if meta != 42 { + t.Fatal("Expected meta to be passed through to the migration function") + } + + oldfoo, err := strconv.ParseFloat(s.Attributes["oldfoo"], 64) + if err != nil { + t.Fatalf("err: %#v", err) + } + s.Attributes["newfoo"] = strconv.Itoa(int(oldfoo * 10)) + delete(s.Attributes, "oldfoo") + + return s, nil + } + + // State is v1 and deals in oldfoo, which tracked foo as a float at 1/10th + // the scale of newfoo + s := &tofu.InstanceState{ + ID: "bar", + Attributes: map[string]string{ + "oldfoo": "1.2", + }, + Meta: map[string]interface{}{ + "schema_version": "1", + }, + } + + actual, err := r.Refresh(s, 42) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &tofu.InstanceState{ + ID: "bar", + Attributes: map[string]string{ + "id": "bar", + "newfoo": "13", + }, + Meta: map[string]interface{}{ + "schema_version": "2", + }, + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad:\n\nexpected: %#v\ngot: %#v", expected, actual) + } +} + +func TestResourceRefresh_noMigrationNeeded(t *testing.T) { + r := &Resource{ + SchemaVersion: 2, + Schema: map[string]*Schema{ + "newfoo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + } + + r.Read = func(d *ResourceData, m interface{}) error { + return d.Set("newfoo", d.Get("newfoo").(int)+1) + } + + r.MigrateState = func( + v int, + s *tofu.InstanceState, + meta interface{}) (*tofu.InstanceState, error) { + t.Fatal("Migrate function shouldn't be called!") + return nil, nil + } + + s := &tofu.InstanceState{ + ID: "bar", + Attributes: map[string]string{ + "newfoo": "12", + }, + Meta: map[string]interface{}{ + "schema_version": "2", + }, + } + + actual, err := r.Refresh(s, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &tofu.InstanceState{ + ID: "bar", + Attributes: map[string]string{ + "id": "bar", + "newfoo": "13", + }, + Meta: map[string]interface{}{ + "schema_version": "2", + }, + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad:\n\nexpected: %#v\ngot: %#v", expected, actual) + } +} + +func TestResourceRefresh_stateSchemaVersionUnset(t *testing.T) { + r := &Resource{ + // Version 1 > Version 0 + SchemaVersion: 1, + Schema: map[string]*Schema{ + "newfoo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + } + + r.Read = func(d *ResourceData, m interface{}) error { + return d.Set("newfoo", d.Get("newfoo").(int)+1) + } + + r.MigrateState = func( + v int, + s *tofu.InstanceState, + meta interface{}) (*tofu.InstanceState, error) { + s.Attributes["newfoo"] = s.Attributes["oldfoo"] + return s, nil + } + + s := &tofu.InstanceState{ + ID: "bar", + Attributes: map[string]string{ + "oldfoo": "12", + }, + } + + actual, err := r.Refresh(s, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &tofu.InstanceState{ + ID: "bar", + Attributes: map[string]string{ + "id": "bar", + "newfoo": "13", + }, + Meta: map[string]interface{}{ + "schema_version": "1", + }, + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad:\n\nexpected: %#v\ngot: %#v", expected, actual) + } +} + +func TestResourceRefresh_migrateStateErr(t *testing.T) { + r := &Resource{ + SchemaVersion: 2, + Schema: map[string]*Schema{ + "newfoo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + } + + r.Read = func(d *ResourceData, m interface{}) error { + t.Fatal("Read should never be called!") + return nil + } + + r.MigrateState = func( + v int, + s *tofu.InstanceState, + meta interface{}) (*tofu.InstanceState, error) { + return s, fmt.Errorf("triggering an error") + } + + s := &tofu.InstanceState{ + ID: "bar", + Attributes: map[string]string{ + "oldfoo": "12", + }, + } + + _, err := r.Refresh(s, nil) + if err == nil { + t.Fatal("expected error, but got none!") + } +} + +func TestResourceData(t *testing.T) { + r := &Resource{ + SchemaVersion: 2, + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + } + + state := &tofu.InstanceState{ + ID: "foo", + Attributes: map[string]string{ + "id": "foo", + "foo": "42", + }, + } + + data := r.Data(state) + if data.Id() != "foo" { + t.Fatalf("err: %s", data.Id()) + } + if v := data.Get("foo"); v != 42 { + t.Fatalf("bad: %#v", v) + } + + // Set expectations + state.Meta = map[string]interface{}{ + "schema_version": "2", + } + + result := data.State() + if !reflect.DeepEqual(result, state) { + t.Fatalf("bad: %#v", result) + } +} + +func TestResourceData_blank(t *testing.T) { + r := &Resource{ + SchemaVersion: 2, + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + } + + data := r.Data(nil) + if data.Id() != "" { + t.Fatalf("err: %s", data.Id()) + } + if v := data.Get("foo"); v != 0 { + t.Fatalf("bad: %#v", v) + } +} + +func TestResourceData_timeouts(t *testing.T) { + one := 1 * time.Second + two := 2 * time.Second + three := 3 * time.Second + four := 4 * time.Second + five := 5 * time.Second + + timeouts := &ResourceTimeout{ + Create: &one, + Read: &two, + Update: &three, + Delete: &four, + Default: &five, + } + + r := &Resource{ + SchemaVersion: 2, + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + Timeouts: timeouts, + } + + data := r.Data(nil) + if data.Id() != "" { + t.Fatalf("err: %s", data.Id()) + } + + if !reflect.DeepEqual(timeouts, data.timeouts) { + t.Fatalf("incorrect ResourceData timeouts: %#v\n", *data.timeouts) + } +} + +func TestResource_UpgradeState(t *testing.T) { + // While this really only calls itself and therefore doesn't test any of + // the Resource code directly, it still serves as an example of registering + // a StateUpgrader. + r := &Resource{ + SchemaVersion: 2, + Schema: map[string]*Schema{ + "newfoo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + } + + r.StateUpgraders = []StateUpgrader{ + { + Version: 1, + Type: cty.Object(map[string]cty.Type{ + "id": cty.String, + "oldfoo": cty.Number, + }), + Upgrade: func(m map[string]interface{}, meta interface{}) (map[string]interface{}, error) { + + oldfoo, ok := m["oldfoo"].(float64) + if !ok { + t.Fatalf("expected 1.2, got %#v", m["oldfoo"]) + } + m["newfoo"] = int(oldfoo * 10) + delete(m, "oldfoo") + + return m, nil + }, + }, + } + + oldStateAttrs := map[string]string{ + "id": "bar", + "oldfoo": "1.2", + } + + // convert the legacy flatmap state to the json equivalent + ty := r.StateUpgraders[0].Type + val, err := hcl2shim.HCL2ValueFromFlatmap(oldStateAttrs, ty) + if err != nil { + t.Fatal(err) + } + js, err := ctyjson.Marshal(val, ty) + if err != nil { + t.Fatal(err) + } + + // unmarshal the state using the json default types + var m map[string]interface{} + if err := json.Unmarshal(js, &m); err != nil { + t.Fatal(err) + } + + actual, err := r.StateUpgraders[0].Upgrade(m, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := map[string]interface{}{ + "id": "bar", + "newfoo": 12, + } + + if !reflect.DeepEqual(expected, actual) { + t.Fatalf("expected: %#v\ngot: %#v\n", expected, actual) + } +} + +func TestResource_ValidateUpgradeState(t *testing.T) { + r := &Resource{ + SchemaVersion: 3, + Schema: map[string]*Schema{ + "newfoo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + } + + if err := r.InternalValidate(nil, true); err != nil { + t.Fatal(err) + } + + r.StateUpgraders = append(r.StateUpgraders, StateUpgrader{ + Version: 2, + Type: cty.Object(map[string]cty.Type{ + "id": cty.String, + }), + Upgrade: func(m map[string]interface{}, _ interface{}) (map[string]interface{}, error) { + return m, nil + }, + }) + if err := r.InternalValidate(nil, true); err != nil { + t.Fatal(err) + } + + // check for missing type + r.StateUpgraders[0].Type = cty.Type{} + if err := r.InternalValidate(nil, true); err == nil { + t.Fatal("StateUpgrader must have type") + } + r.StateUpgraders[0].Type = cty.Object(map[string]cty.Type{ + "id": cty.String, + }) + + // check for missing Upgrade func + r.StateUpgraders[0].Upgrade = nil + if err := r.InternalValidate(nil, true); err == nil { + t.Fatal("StateUpgrader must have an Upgrade func") + } + r.StateUpgraders[0].Upgrade = func(m map[string]interface{}, _ interface{}) (map[string]interface{}, error) { + return m, nil + } + + // check for skipped version + r.StateUpgraders[0].Version = 0 + r.StateUpgraders = append(r.StateUpgraders, StateUpgrader{ + Version: 2, + Type: cty.Object(map[string]cty.Type{ + "id": cty.String, + }), + Upgrade: func(m map[string]interface{}, _ interface{}) (map[string]interface{}, error) { + return m, nil + }, + }) + if err := r.InternalValidate(nil, true); err == nil { + t.Fatal("StateUpgraders cannot skip versions") + } + + // add the missing version, but fail because it's still out of order + r.StateUpgraders = append(r.StateUpgraders, StateUpgrader{ + Version: 1, + Type: cty.Object(map[string]cty.Type{ + "id": cty.String, + }), + Upgrade: func(m map[string]interface{}, _ interface{}) (map[string]interface{}, error) { + return m, nil + }, + }) + if err := r.InternalValidate(nil, true); err == nil { + t.Fatal("upgraders must be defined in order") + } + + r.StateUpgraders[1], r.StateUpgraders[2] = r.StateUpgraders[2], r.StateUpgraders[1] + if err := r.InternalValidate(nil, true); err != nil { + t.Fatal(err) + } + + // can't add an upgrader for a schema >= the current version + r.StateUpgraders = append(r.StateUpgraders, StateUpgrader{ + Version: 3, + Type: cty.Object(map[string]cty.Type{ + "id": cty.String, + }), + Upgrade: func(m map[string]interface{}, _ interface{}) (map[string]interface{}, error) { + return m, nil + }, + }) + if err := r.InternalValidate(nil, true); err == nil { + t.Fatal("StateUpgraders cannot have a version >= current SchemaVersion") + } +} + +// The legacy provider will need to be able to handle both types of schema +// transformations, which has been retrofitted into the Refresh method. +func TestResource_migrateAndUpgrade(t *testing.T) { + r := &Resource{ + SchemaVersion: 4, + Schema: map[string]*Schema{ + "four": { + Type: TypeInt, + Required: true, + }, + }, + // this MigrateState will take the state to version 2 + MigrateState: func(v int, is *tofu.InstanceState, _ interface{}) (*tofu.InstanceState, error) { + switch v { + case 0: + _, ok := is.Attributes["zero"] + if !ok { + return nil, fmt.Errorf("zero not found in %#v", is.Attributes) + } + is.Attributes["one"] = "1" + delete(is.Attributes, "zero") + fallthrough + case 1: + _, ok := is.Attributes["one"] + if !ok { + return nil, fmt.Errorf("one not found in %#v", is.Attributes) + } + is.Attributes["two"] = "2" + delete(is.Attributes, "one") + default: + return nil, fmt.Errorf("invalid schema version %d", v) + } + return is, nil + }, + } + + r.Read = func(d *ResourceData, m interface{}) error { + return d.Set("four", 4) + } + + r.StateUpgraders = []StateUpgrader{ + { + Version: 2, + Type: cty.Object(map[string]cty.Type{ + "id": cty.String, + "two": cty.Number, + }), + Upgrade: func(m map[string]interface{}, meta interface{}) (map[string]interface{}, error) { + _, ok := m["two"].(float64) + if !ok { + return nil, fmt.Errorf("two not found in %#v", m) + } + m["three"] = float64(3) + delete(m, "two") + return m, nil + }, + }, + { + Version: 3, + Type: cty.Object(map[string]cty.Type{ + "id": cty.String, + "three": cty.Number, + }), + Upgrade: func(m map[string]interface{}, meta interface{}) (map[string]interface{}, error) { + _, ok := m["three"].(float64) + if !ok { + return nil, fmt.Errorf("three not found in %#v", m) + } + m["four"] = float64(4) + delete(m, "three") + return m, nil + }, + }, + } + + testStates := []*tofu.InstanceState{ + { + ID: "bar", + Attributes: map[string]string{ + "id": "bar", + "zero": "0", + }, + Meta: map[string]interface{}{ + "schema_version": "0", + }, + }, + { + ID: "bar", + Attributes: map[string]string{ + "id": "bar", + "one": "1", + }, + Meta: map[string]interface{}{ + "schema_version": "1", + }, + }, + { + ID: "bar", + Attributes: map[string]string{ + "id": "bar", + "two": "2", + }, + Meta: map[string]interface{}{ + "schema_version": "2", + }, + }, + { + ID: "bar", + Attributes: map[string]string{ + "id": "bar", + "three": "3", + }, + Meta: map[string]interface{}{ + "schema_version": "3", + }, + }, + { + ID: "bar", + Attributes: map[string]string{ + "id": "bar", + "four": "4", + }, + Meta: map[string]interface{}{ + "schema_version": "4", + }, + }, + } + + for i, s := range testStates { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + newState, err := r.Refresh(s, nil) + if err != nil { + t.Fatal(err) + } + + expected := &tofu.InstanceState{ + ID: "bar", + Attributes: map[string]string{ + "id": "bar", + "four": "4", + }, + Meta: map[string]interface{}{ + "schema_version": "4", + }, + } + + if !cmp.Equal(expected, newState, equateEmpty) { + t.Fatal(cmp.Diff(expected, newState, equateEmpty)) + } + }) + } +} diff --git a/pkg/legacy/helper/schema/resource_timeout.go b/pkg/legacy/helper/schema/resource_timeout.go new file mode 100644 index 00000000000..dde3f1a349a --- /dev/null +++ b/pkg/legacy/helper/schema/resource_timeout.go @@ -0,0 +1,267 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "fmt" + "log" + "time" + + "github.com/kubegems/opentofu/pkg/configs/hcl2shim" + "github.com/kubegems/opentofu/pkg/legacy/tofu" + "github.com/mitchellh/copystructure" +) + +const TimeoutKey = "e2bfb730-ecaa-11e6-8f88-34363bc7c4c0" +const TimeoutsConfigKey = "timeouts" + +const ( + TimeoutCreate = "create" + TimeoutRead = "read" + TimeoutUpdate = "update" + TimeoutDelete = "delete" + TimeoutDefault = "default" +) + +func timeoutKeys() []string { + return []string{ + TimeoutCreate, + TimeoutRead, + TimeoutUpdate, + TimeoutDelete, + TimeoutDefault, + } +} + +// could be time.Duration, int64 or float64 +func DefaultTimeout(tx interface{}) *time.Duration { + var td time.Duration + switch raw := tx.(type) { + case time.Duration: + return &raw + case int64: + td = time.Duration(raw) + case float64: + td = time.Duration(int64(raw)) + default: + log.Printf("[WARN] Unknown type in DefaultTimeout: %#v", tx) + } + return &td +} + +type ResourceTimeout struct { + Create, Read, Update, Delete, Default *time.Duration +} + +// ConfigDecode takes a schema and the configuration (available in Diff) and +// validates, parses the timeouts into `t` +func (t *ResourceTimeout) ConfigDecode(s *Resource, c *tofu.ResourceConfig) error { + if s.Timeouts != nil { + raw, err := copystructure.Copy(s.Timeouts) + if err != nil { + log.Printf("[DEBUG] Error with deep copy: %s", err) + } + *t = *raw.(*ResourceTimeout) + } + + if raw, ok := c.Config[TimeoutsConfigKey]; ok { + var rawTimeouts []map[string]interface{} + switch raw := raw.(type) { + case map[string]interface{}: + rawTimeouts = append(rawTimeouts, raw) + case []map[string]interface{}: + rawTimeouts = raw + case string: + if raw == hcl2shim.UnknownVariableValue { + // Timeout is not defined in the config + // Defaults will be used instead + return nil + } else { + log.Printf("[ERROR] Invalid timeout value: %q", raw) + return fmt.Errorf("Invalid Timeout value found") + } + case []interface{}: + for _, r := range raw { + if rMap, ok := r.(map[string]interface{}); ok { + rawTimeouts = append(rawTimeouts, rMap) + } else { + // Go will not allow a fallthrough + log.Printf("[ERROR] Invalid timeout structure: %#v", raw) + return fmt.Errorf("Invalid Timeout structure found") + } + } + default: + log.Printf("[ERROR] Invalid timeout structure: %#v", raw) + return fmt.Errorf("Invalid Timeout structure found") + } + + for _, timeoutValues := range rawTimeouts { + for timeKey, timeValue := range timeoutValues { + // validate that we're dealing with the normal CRUD actions + var found bool + for _, key := range timeoutKeys() { + if timeKey == key { + found = true + break + } + } + + if !found { + return fmt.Errorf("Unsupported Timeout configuration key found (%s)", timeKey) + } + + // Get timeout + rt, err := time.ParseDuration(timeValue.(string)) + if err != nil { + return fmt.Errorf("Error parsing %q timeout: %w", timeKey, err) + } + + var timeout *time.Duration + switch timeKey { + case TimeoutCreate: + timeout = t.Create + case TimeoutUpdate: + timeout = t.Update + case TimeoutRead: + timeout = t.Read + case TimeoutDelete: + timeout = t.Delete + case TimeoutDefault: + timeout = t.Default + } + + // If the resource has not delcared this in the definition, then error + // with an unsupported message + if timeout == nil { + return unsupportedTimeoutKeyError(timeKey) + } + + *timeout = rt + } + return nil + } + } + + return nil +} + +func unsupportedTimeoutKeyError(key string) error { + return fmt.Errorf("Timeout Key (%s) is not supported", key) +} + +// DiffEncode, StateEncode, and MetaDecode are analogous to the Go stdlib JSONEncoder +// interface: they encode/decode a timeouts struct from an instance diff, which is +// where the timeout data is stored after a diff to pass into Apply. +// +// StateEncode encodes the timeout into the ResourceData's InstanceState for +// saving to state +func (t *ResourceTimeout) DiffEncode(id *tofu.InstanceDiff) error { + return t.metaEncode(id) +} + +func (t *ResourceTimeout) StateEncode(is *tofu.InstanceState) error { + return t.metaEncode(is) +} + +// metaEncode encodes the ResourceTimeout into a map[string]interface{} format +// and stores it in the Meta field of the interface it's given. +// Assumes the interface is either *tofu.InstanceState or +// *tofu.InstanceDiff, returns an error otherwise +func (t *ResourceTimeout) metaEncode(ids interface{}) error { + m := make(map[string]interface{}) + + if t.Create != nil { + m[TimeoutCreate] = t.Create.Nanoseconds() + } + if t.Read != nil { + m[TimeoutRead] = t.Read.Nanoseconds() + } + if t.Update != nil { + m[TimeoutUpdate] = t.Update.Nanoseconds() + } + if t.Delete != nil { + m[TimeoutDelete] = t.Delete.Nanoseconds() + } + if t.Default != nil { + m[TimeoutDefault] = t.Default.Nanoseconds() + // for any key above that is nil, if default is specified, we need to + // populate it with the default + for _, k := range timeoutKeys() { + if _, ok := m[k]; !ok { + m[k] = t.Default.Nanoseconds() + } + } + } + + // only add the Timeout to the Meta if we have values + if len(m) > 0 { + switch instance := ids.(type) { + case *tofu.InstanceDiff: + if instance.Meta == nil { + instance.Meta = make(map[string]interface{}) + } + instance.Meta[TimeoutKey] = m + case *tofu.InstanceState: + if instance.Meta == nil { + instance.Meta = make(map[string]interface{}) + } + instance.Meta[TimeoutKey] = m + default: + return fmt.Errorf("Error matching type for Diff Encode") + } + } + + return nil +} + +func (t *ResourceTimeout) StateDecode(id *tofu.InstanceState) error { + return t.metaDecode(id) +} +func (t *ResourceTimeout) DiffDecode(is *tofu.InstanceDiff) error { + return t.metaDecode(is) +} + +func (t *ResourceTimeout) metaDecode(ids interface{}) error { + var rawMeta interface{} + var ok bool + switch rawInstance := ids.(type) { + case *tofu.InstanceDiff: + rawMeta, ok = rawInstance.Meta[TimeoutKey] + if !ok { + return nil + } + case *tofu.InstanceState: + rawMeta, ok = rawInstance.Meta[TimeoutKey] + if !ok { + return nil + } + default: + return fmt.Errorf("Unknown or unsupported type in metaDecode: %#v", ids) + } + + times := rawMeta.(map[string]interface{}) + if len(times) == 0 { + return nil + } + + if v, ok := times[TimeoutCreate]; ok { + t.Create = DefaultTimeout(v) + } + if v, ok := times[TimeoutRead]; ok { + t.Read = DefaultTimeout(v) + } + if v, ok := times[TimeoutUpdate]; ok { + t.Update = DefaultTimeout(v) + } + if v, ok := times[TimeoutDelete]; ok { + t.Delete = DefaultTimeout(v) + } + if v, ok := times[TimeoutDefault]; ok { + t.Default = DefaultTimeout(v) + } + + return nil +} diff --git a/pkg/legacy/helper/schema/resource_timeout_test.go b/pkg/legacy/helper/schema/resource_timeout_test.go new file mode 100644 index 00000000000..208da469835 --- /dev/null +++ b/pkg/legacy/helper/schema/resource_timeout_test.go @@ -0,0 +1,381 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "fmt" + "reflect" + "testing" + "time" + + "github.com/kubegems/opentofu/pkg/legacy/tofu" +) + +func TestResourceTimeout_ConfigDecode_badkey(t *testing.T) { + cases := []struct { + Name string + // what the resource has defined in source + ResourceDefaultTimeout *ResourceTimeout + // configuration provider by user in tf file + Config map[string]interface{} + // what we expect the parsed ResourceTimeout to be + Expected *ResourceTimeout + // Should we have an error (key not defined in source) + ShouldErr bool + }{ + { + Name: "Source does not define 'delete' key", + ResourceDefaultTimeout: timeoutForValues(10, 0, 5, 0, 0), + Config: expectedConfigForValues(2, 0, 0, 1, 0), + Expected: timeoutForValues(10, 0, 5, 0, 0), + ShouldErr: true, + }, + { + Name: "Config overrides create", + ResourceDefaultTimeout: timeoutForValues(10, 0, 5, 0, 0), + Config: expectedConfigForValues(2, 0, 7, 0, 0), + Expected: timeoutForValues(2, 0, 7, 0, 0), + ShouldErr: false, + }, + { + Name: "Config overrides create, default provided. Should still have zero values", + ResourceDefaultTimeout: timeoutForValues(10, 0, 5, 0, 3), + Config: expectedConfigForValues(2, 0, 7, 0, 0), + Expected: timeoutForValues(2, 0, 7, 0, 3), + ShouldErr: false, + }, + { + Name: "Use something besides 'minutes'", + ResourceDefaultTimeout: timeoutForValues(10, 0, 5, 0, 3), + Config: map[string]interface{}{ + "create": "2h", + }, + Expected: timeoutForValues(120, 0, 5, 0, 3), + ShouldErr: false, + }, + } + + for i, c := range cases { + t.Run(fmt.Sprintf("%d-%s", i, c.Name), func(t *testing.T) { + r := &Resource{ + Timeouts: c.ResourceDefaultTimeout, + } + + conf := tofu.NewResourceConfigRaw( + map[string]interface{}{ + "foo": "bar", + TimeoutsConfigKey: c.Config, + }, + ) + + timeout := &ResourceTimeout{} + decodeErr := timeout.ConfigDecode(r, conf) + if c.ShouldErr { + if decodeErr == nil { + t.Fatalf("ConfigDecode case (%d): Expected bad timeout key: %s", i, decodeErr) + } + // should error, err was not nil, continue + return + } else { + if decodeErr != nil { + // should not error, error was not nil, fatal + t.Fatalf("decodeError was not nil: %s", decodeErr) + } + } + + if !reflect.DeepEqual(c.Expected, timeout) { + t.Fatalf("ConfigDecode match error case (%d).\nExpected:\n%#v\nGot:\n%#v", i, c.Expected, timeout) + } + }) + } +} + +func TestResourceTimeout_ConfigDecode(t *testing.T) { + r := &Resource{ + Timeouts: &ResourceTimeout{ + Create: DefaultTimeout(10 * time.Minute), + Update: DefaultTimeout(5 * time.Minute), + }, + } + + c := tofu.NewResourceConfigRaw( + map[string]interface{}{ + "foo": "bar", + TimeoutsConfigKey: map[string]interface{}{ + "create": "2m", + "update": "1m", + }, + }, + ) + + timeout := &ResourceTimeout{} + err := timeout.ConfigDecode(r, c) + if err != nil { + t.Fatalf("Expected good timeout returned:, %s", err) + } + + expected := &ResourceTimeout{ + Create: DefaultTimeout(2 * time.Minute), + Update: DefaultTimeout(1 * time.Minute), + } + + if !reflect.DeepEqual(timeout, expected) { + t.Fatalf("bad timeout decode.\nExpected:\n%#v\nGot:\n%#v\n", expected, timeout) + } +} + +func TestResourceTimeout_legacyConfigDecode(t *testing.T) { + r := &Resource{ + Timeouts: &ResourceTimeout{ + Create: DefaultTimeout(10 * time.Minute), + Update: DefaultTimeout(5 * time.Minute), + }, + } + + c := tofu.NewResourceConfigRaw( + map[string]interface{}{ + "foo": "bar", + TimeoutsConfigKey: []interface{}{ + map[string]interface{}{ + "create": "2m", + "update": "1m", + }, + }, + }, + ) + + timeout := &ResourceTimeout{} + err := timeout.ConfigDecode(r, c) + if err != nil { + t.Fatalf("Expected good timeout returned:, %s", err) + } + + expected := &ResourceTimeout{ + Create: DefaultTimeout(2 * time.Minute), + Update: DefaultTimeout(1 * time.Minute), + } + + if !reflect.DeepEqual(timeout, expected) { + t.Fatalf("bad timeout decode.\nExpected:\n%#v\nGot:\n%#v\n", expected, timeout) + } +} + +func TestResourceTimeout_DiffEncode_basic(t *testing.T) { + cases := []struct { + Timeout *ResourceTimeout + Expected map[string]interface{} + // Not immediately clear when an error would hit + ShouldErr bool + }{ + // Two fields + { + Timeout: timeoutForValues(10, 0, 5, 0, 0), + Expected: map[string]interface{}{TimeoutKey: expectedForValues(10, 0, 5, 0, 0)}, + ShouldErr: false, + }, + // Two fields, one is Default + { + Timeout: timeoutForValues(10, 0, 0, 0, 7), + Expected: map[string]interface{}{TimeoutKey: expectedForValues(10, 0, 0, 0, 7)}, + ShouldErr: false, + }, + // All fields + { + Timeout: timeoutForValues(10, 3, 4, 1, 7), + Expected: map[string]interface{}{TimeoutKey: expectedForValues(10, 3, 4, 1, 7)}, + ShouldErr: false, + }, + // No fields + { + Timeout: &ResourceTimeout{}, + Expected: nil, + ShouldErr: false, + }, + } + + for _, c := range cases { + state := &tofu.InstanceDiff{} + err := c.Timeout.DiffEncode(state) + if err != nil && !c.ShouldErr { + t.Fatalf("Error, expected:\n%#v\n got:\n%#v\n", c.Expected, state.Meta) + } + + // should maybe just compare [TimeoutKey] but for now we're assuming only + // that in Meta + if !reflect.DeepEqual(state.Meta, c.Expected) { + t.Fatalf("Encode not equal, expected:\n%#v\n\ngot:\n%#v\n", c.Expected, state.Meta) + } + } + // same test cases but for InstanceState + for _, c := range cases { + state := &tofu.InstanceState{} + err := c.Timeout.StateEncode(state) + if err != nil && !c.ShouldErr { + t.Fatalf("Error, expected:\n%#v\n got:\n%#v\n", c.Expected, state.Meta) + } + + // should maybe just compare [TimeoutKey] but for now we're assuming only + // that in Meta + if !reflect.DeepEqual(state.Meta, c.Expected) { + t.Fatalf("Encode not equal, expected:\n%#v\n\ngot:\n%#v\n", c.Expected, state.Meta) + } + } +} + +func TestResourceTimeout_MetaDecode_basic(t *testing.T) { + cases := []struct { + State *tofu.InstanceDiff + Expected *ResourceTimeout + // Not immediately clear when an error would hit + ShouldErr bool + }{ + // Two fields + { + State: &tofu.InstanceDiff{Meta: map[string]interface{}{TimeoutKey: expectedForValues(10, 0, 5, 0, 0)}}, + Expected: timeoutForValues(10, 0, 5, 0, 0), + ShouldErr: false, + }, + // Two fields, one is Default + { + State: &tofu.InstanceDiff{Meta: map[string]interface{}{TimeoutKey: expectedForValues(10, 0, 0, 0, 7)}}, + Expected: timeoutForValues(10, 7, 7, 7, 7), + ShouldErr: false, + }, + // All fields + { + State: &tofu.InstanceDiff{Meta: map[string]interface{}{TimeoutKey: expectedForValues(10, 3, 4, 1, 7)}}, + Expected: timeoutForValues(10, 3, 4, 1, 7), + ShouldErr: false, + }, + // No fields + { + State: &tofu.InstanceDiff{}, + Expected: &ResourceTimeout{}, + ShouldErr: false, + }, + } + + for _, c := range cases { + rt := &ResourceTimeout{} + err := rt.DiffDecode(c.State) + if err != nil && !c.ShouldErr { + t.Fatalf("Error, expected:\n%#v\n got:\n%#v\n", c.Expected, rt) + } + + // should maybe just compare [TimeoutKey] but for now we're assuming only + // that in Meta + if !reflect.DeepEqual(rt, c.Expected) { + t.Fatalf("Encode not equal, expected:\n%#v\n\ngot:\n%#v\n", c.Expected, rt) + } + } +} + +func timeoutForValues(create, read, update, del, def int) *ResourceTimeout { + rt := ResourceTimeout{} + + if create != 0 { + rt.Create = DefaultTimeout(time.Duration(create) * time.Minute) + } + if read != 0 { + rt.Read = DefaultTimeout(time.Duration(read) * time.Minute) + } + if update != 0 { + rt.Update = DefaultTimeout(time.Duration(update) * time.Minute) + } + if del != 0 { + rt.Delete = DefaultTimeout(time.Duration(del) * time.Minute) + } + + if def != 0 { + rt.Default = DefaultTimeout(time.Duration(def) * time.Minute) + } + + return &rt +} + +// Generates a ResourceTimeout struct that should reflect the +// d.Timeout("key") results +func expectedTimeoutForValues(create, read, update, del, def int) *ResourceTimeout { + rt := ResourceTimeout{} + + defaultValues := []*int{&create, &read, &update, &del, &def} + for _, v := range defaultValues { + if *v == 0 { + *v = 20 + } + } + + if create != 0 { + rt.Create = DefaultTimeout(time.Duration(create) * time.Minute) + } + if read != 0 { + rt.Read = DefaultTimeout(time.Duration(read) * time.Minute) + } + if update != 0 { + rt.Update = DefaultTimeout(time.Duration(update) * time.Minute) + } + if del != 0 { + rt.Delete = DefaultTimeout(time.Duration(del) * time.Minute) + } + + if def != 0 { + rt.Default = DefaultTimeout(time.Duration(def) * time.Minute) + } + + return &rt +} + +func expectedForValues(create, read, update, del, def int) map[string]interface{} { + ex := make(map[string]interface{}) + + if create != 0 { + ex["create"] = DefaultTimeout(time.Duration(create) * time.Minute).Nanoseconds() + } + if read != 0 { + ex["read"] = DefaultTimeout(time.Duration(read) * time.Minute).Nanoseconds() + } + if update != 0 { + ex["update"] = DefaultTimeout(time.Duration(update) * time.Minute).Nanoseconds() + } + if del != 0 { + ex["delete"] = DefaultTimeout(time.Duration(del) * time.Minute).Nanoseconds() + } + + if def != 0 { + defNano := DefaultTimeout(time.Duration(def) * time.Minute).Nanoseconds() + ex["default"] = defNano + + for _, k := range timeoutKeys() { + if _, ok := ex[k]; !ok { + ex[k] = defNano + } + } + } + + return ex +} + +func expectedConfigForValues(create, read, update, delete, def int) map[string]interface{} { + ex := make(map[string]interface{}, 0) + + if create != 0 { + ex["create"] = fmt.Sprintf("%dm", create) + } + if read != 0 { + ex["read"] = fmt.Sprintf("%dm", read) + } + if update != 0 { + ex["update"] = fmt.Sprintf("%dm", update) + } + if delete != 0 { + ex["delete"] = fmt.Sprintf("%dm", delete) + } + + if def != 0 { + ex["default"] = fmt.Sprintf("%dm", def) + } + return ex +} diff --git a/pkg/legacy/helper/schema/schema.go b/pkg/legacy/helper/schema/schema.go new file mode 100644 index 00000000000..4e19da3fca5 --- /dev/null +++ b/pkg/legacy/helper/schema/schema.go @@ -0,0 +1,1859 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// schema is a high-level framework for easily writing new providers +// for OpenTofu. Usage of schema is recommended over attempting to write +// to the low-level plugin interfaces manually. +// +// schema breaks down provider creation into simple CRUD operations for +// resources. The logic of diffing, destroying before creating, updating +// or creating, etc. is all handled by the framework. The plugin author +// only needs to implement a configuration schema and the CRUD operations and +// everything else is meant to just work. +// +// A good starting point is to view the Provider structure. +package schema + +import ( + "context" + "fmt" + "os" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "sync" + + "github.com/go-viper/mapstructure/v2" + "github.com/kubegems/opentofu/pkg/configs/hcl2shim" + "github.com/kubegems/opentofu/pkg/legacy/tofu" + "github.com/mitchellh/copystructure" +) + +// Name of ENV variable which (if not empty) prefers panic over error +const PanicOnErr = "TF_SCHEMA_PANIC_ON_ERROR" + +// type used for schema package context keys +type contextKey string + +var ( + protoVersionMu sync.Mutex + protoVersion5 = false +) + +func isProto5() bool { + protoVersionMu.Lock() + defer protoVersionMu.Unlock() + return protoVersion5 + +} + +// SetProto5 enables a feature flag for any internal changes required required +// to work with the new plugin protocol. This should not be called by +// provider. +func SetProto5() { + protoVersionMu.Lock() + defer protoVersionMu.Unlock() + protoVersion5 = true +} + +// Schema is used to describe the structure of a value. +// +// Read the documentation of the struct elements for important details. +type Schema struct { + // Type is the type of the value and must be one of the ValueType values. + // + // This type not only determines what type is expected/valid in configuring + // this value, but also what type is returned when ResourceData.Get is + // called. The types returned by Get are: + // + // TypeBool - bool + // TypeInt - int + // TypeFloat - float64 + // TypeString - string + // TypeList - []interface{} + // TypeMap - map[string]interface{} + // TypeSet - *schema.Set + // + Type ValueType + + // ConfigMode allows for overriding the default behaviors for mapping + // schema entries onto configuration constructs. + // + // By default, the Elem field is used to choose whether a particular + // schema is represented in configuration as an attribute or as a nested + // block; if Elem is a *schema.Resource then it's a block and it's an + // attribute otherwise. + // + // If Elem is *schema.Resource then setting ConfigMode to + // SchemaConfigModeAttr will force it to be represented in configuration + // as an attribute, which means that the Computed flag can be used to + // provide default elements when the argument isn't set at all, while still + // allowing the user to force zero elements by explicitly assigning an + // empty list. + // + // When Computed is set without Optional, the attribute is not settable + // in configuration at all and so SchemaConfigModeAttr is the automatic + // behavior, and SchemaConfigModeBlock is not permitted. + ConfigMode SchemaConfigMode + + // If one of these is set, then this item can come from the configuration. + // Both cannot be set. If Optional is set, the value is optional. If + // Required is set, the value is required. + // + // One of these must be set if the value is not computed. That is: + // value either comes from the config, is computed, or is both. + Optional bool + Required bool + + // If this is non-nil, the provided function will be used during diff + // of this field. If this is nil, a default diff for the type of the + // schema will be used. + // + // This allows comparison based on something other than primitive, list + // or map equality - for example SSH public keys may be considered + // equivalent regardless of trailing whitespace. + DiffSuppressFunc SchemaDiffSuppressFunc + + // If this is non-nil, then this will be a default value that is used + // when this item is not set in the configuration. + // + // DefaultFunc can be specified to compute a dynamic default. + // Only one of Default or DefaultFunc can be set. If DefaultFunc is + // used then its return value should be stable to avoid generating + // confusing/perpetual diffs. + // + // Changing either Default or the return value of DefaultFunc can be + // a breaking change, especially if the attribute in question has + // ForceNew set. If a default needs to change to align with changing + // assumptions in an upstream API then it may be necessary to also use + // the MigrateState function on the resource to change the state to match, + // or have the Read function adjust the state value to align with the + // new default. + // + // If Required is true above, then Default cannot be set. DefaultFunc + // can be set with Required. If the DefaultFunc returns nil, then there + // will be no default and the user will be asked to fill it in. + // + // If either of these is set, then the user won't be asked for input + // for this key if the default is not nil. + Default interface{} + DefaultFunc SchemaDefaultFunc + + // Description is used as the description for docs or asking for user + // input. It should be relatively short (a few sentences max) and should + // be formatted to fit a CLI. + Description string + + // InputDefault is the default value to use for when inputs are requested. + // This differs from Default in that if Default is set, no input is + // asked for. If Input is asked, this will be the default value offered. + InputDefault string + + // The fields below relate to diffs. + // + // If Computed is true, then the result of this value is computed + // (unless specified by config) on creation. + // + // If ForceNew is true, then a change in this resource necessitates + // the creation of a new resource. + // + // StateFunc is a function called to change the value of this before + // storing it in the state (and likewise before comparing for diffs). + // The use for this is for example with large strings, you may want + // to simply store the hash of it. + Computed bool + ForceNew bool + StateFunc SchemaStateFunc + + // The following fields are only set for a TypeList, TypeSet, or TypeMap. + // + // Elem represents the element type. For a TypeMap, it must be a *Schema + // with a Type that is one of the primitives: TypeString, TypeBool, + // TypeInt, or TypeFloat. Otherwise it may be either a *Schema or a + // *Resource. If it is *Schema, the element type is just a simple value. + // If it is *Resource, the element type is a complex structure, + // potentially managed via its own CRUD actions on the API. + Elem interface{} + + // The following fields are only set for a TypeList or TypeSet. + // + // MaxItems defines a maximum amount of items that can exist within a + // TypeSet or TypeList. Specific use cases would be if a TypeSet is being + // used to wrap a complex structure, however more than one instance would + // cause instability. + // + // MinItems defines a minimum amount of items that can exist within a + // TypeSet or TypeList. Specific use cases would be if a TypeSet is being + // used to wrap a complex structure, however less than one instance would + // cause instability. + // + // If the field Optional is set to true then MinItems is ignored and thus + // effectively zero. + MaxItems int + MinItems int + + // PromoteSingle originally allowed for a single element to be assigned + // where a primitive list was expected, but this no longer works from + // Terraform v0.12 onwards (Terraform Core will require a list to be set + // regardless of what this is set to) and so only applies to Terraform v0.11 + // and earlier, and so should be used only to retain this functionality + // for those still using v0.11 with a provider that formerly used this. + PromoteSingle bool + + // The following fields are only valid for a TypeSet type. + // + // Set defines a function to determine the unique ID of an item so that + // a proper set can be built. + Set SchemaSetFunc + + // ComputedWhen is a set of queries on the configuration. Whenever any + // of these things is changed, it will require a recompute (this requires + // that Computed is set to true). + // + // NOTE: This currently does not work. + ComputedWhen []string + + // ConflictsWith is a set of schema keys that conflict with this schema. + // This will only check that they're set in the _config_. This will not + // raise an error for a malfunctioning resource that sets a conflicting + // key. + ConflictsWith []string + + // When Deprecated is set, this attribute is deprecated. + // + // A deprecated field still works, but will probably stop working in near + // future. This string is the message shown to the user with instructions on + // how to address the deprecation. + Deprecated string + + // When Removed is set, this attribute has been removed from the schema + // + // Removed attributes can be left in the Schema to generate informative error + // messages for the user when they show up in resource configurations. + // This string is the message shown to the user with instructions on + // what do to about the removed attribute. + Removed string + + // ValidateFunc allows individual fields to define arbitrary validation + // logic. It is yielded the provided config value as an interface{} that is + // guaranteed to be of the proper Schema type, and it can yield warnings or + // errors based on inspection of that value. + // + // ValidateFunc is honored only when the schema's Type is set to TypeInt, + // TypeFloat, TypeString, TypeBool, or TypeMap. It is ignored for all other types. + ValidateFunc SchemaValidateFunc + + // Sensitive ensures that the attribute's value does not get displayed in + // logs or regular output. It should be used for passwords or other + // secret fields. Future versions of OpenTofu may encrypt these + // values. + Sensitive bool +} + +// SchemaConfigMode is used to influence how a schema item is mapped into a +// corresponding configuration construct, using the ConfigMode field of +// Schema. +type SchemaConfigMode int + +const ( + SchemaConfigModeAuto SchemaConfigMode = iota + SchemaConfigModeAttr + SchemaConfigModeBlock +) + +// SchemaDiffSuppressFunc is a function which can be used to determine +// whether a detected diff on a schema element is "valid" or not, and +// suppress it from the plan if necessary. +// +// Return true if the diff should be suppressed, false to retain it. +type SchemaDiffSuppressFunc func(k, old, new string, d *ResourceData) bool + +// SchemaDefaultFunc is a function called to return a default value for +// a field. +type SchemaDefaultFunc func() (interface{}, error) + +// EnvDefaultFunc is a helper function that returns the value of the +// given environment variable, if one exists, or the default value +// otherwise. +func EnvDefaultFunc(k string, dv interface{}) SchemaDefaultFunc { + return func() (interface{}, error) { + if v := os.Getenv(k); v != "" { + return v, nil + } + + return dv, nil + } +} + +// MultiEnvDefaultFunc is a helper function that returns the value of the first +// environment variable in the given list that returns a non-empty value. If +// none of the environment variables return a value, the default value is +// returned. +func MultiEnvDefaultFunc(ks []string, dv interface{}) SchemaDefaultFunc { + return func() (interface{}, error) { + for _, k := range ks { + if v := os.Getenv(k); v != "" { + return v, nil + } + } + return dv, nil + } +} + +// SchemaSetFunc is a function that must return a unique ID for the given +// element. This unique ID is used to store the element in a hash. +type SchemaSetFunc func(interface{}) int + +// SchemaStateFunc is a function used to convert some type to a string +// to be stored in the state. +type SchemaStateFunc func(interface{}) string + +// SchemaValidateFunc is a function used to validate a single field in the +// schema. +type SchemaValidateFunc func(interface{}, string) ([]string, []error) + +func (s *Schema) GoString() string { + return fmt.Sprintf("*%#v", *s) +} + +// Returns a default value for this schema by either reading Default or +// evaluating DefaultFunc. If neither of these are defined, returns nil. +func (s *Schema) DefaultValue() (interface{}, error) { + if s.Default != nil { + return s.Default, nil + } + + if s.DefaultFunc != nil { + defaultValue, err := s.DefaultFunc() + if err != nil { + return nil, fmt.Errorf("error loading default: %w", err) + } + return defaultValue, nil + } + + return nil, nil +} + +// Returns a zero value for the schema. +func (s *Schema) ZeroValue() interface{} { + // If it's a set then we'll do a bit of extra work to provide the + // right hashing function in our empty value. + if s.Type == TypeSet { + setFunc := s.Set + if setFunc == nil { + // Default set function uses the schema to hash the whole value + elem := s.Elem + switch t := elem.(type) { + case *Schema: + setFunc = HashSchema(t) + case *Resource: + setFunc = HashResource(t) + default: + panic("invalid set element type") + } + } + return &Set{F: setFunc} + } else { + return s.Type.Zero() + } +} + +func (s *Schema) finalizeDiff(d *tofu.ResourceAttrDiff, customized bool) *tofu.ResourceAttrDiff { + if d == nil { + return d + } + + if s.Type == TypeBool { + normalizeBoolString := func(s string) string { + switch s { + case "0": + return "false" + case "1": + return "true" + } + return s + } + d.Old = normalizeBoolString(d.Old) + d.New = normalizeBoolString(d.New) + } + + if s.Computed && !d.NewRemoved && d.New == "" { + // Computed attribute without a new value set + d.NewComputed = true + } + + if s.ForceNew { + // ForceNew, mark that this field is requiring new under the + // following conditions, explained below: + // + // * Old != New - There is a change in value. This field + // is therefore causing a new resource. + // + // * NewComputed - This field is being computed, hence a + // potential change in value, mark as causing a new resource. + d.RequiresNew = d.Old != d.New || d.NewComputed + } + + if d.NewRemoved { + return d + } + + if s.Computed { + // FIXME: This is where the customized bool from getChange finally + // comes into play. It allows the previously incorrect behavior + // of an empty string being used as "unset" when the value is + // computed. This should be removed once we can properly + // represent an unset/nil value from the configuration. + if !customized { + if d.Old != "" && d.New == "" { + // This is a computed value with an old value set already, + // just let it go. + return nil + } + } + + if d.New == "" && !d.NewComputed { + // Computed attribute without a new value set + d.NewComputed = true + } + } + + if s.Sensitive { + // Set the Sensitive flag so output is hidden in the UI + d.Sensitive = true + } + + return d +} + +// InternalMap is used to aid in the transition to the new schema types and +// protocol. The name is not meant to convey any usefulness, as this is not to +// be used directly by any providers. +type InternalMap = schemaMap + +// schemaMap is a wrapper that adds nice functions on top of schemas. +type schemaMap map[string]*Schema + +func (m schemaMap) panicOnError() bool { + if os.Getenv(PanicOnErr) != "" { + return true + } + return false +} + +// Data returns a ResourceData for the given schema, state, and diff. +// +// The diff is optional. +func (m schemaMap) Data( + s *tofu.InstanceState, + d *tofu.InstanceDiff) (*ResourceData, error) { + return &ResourceData{ + schema: m, + state: s, + diff: d, + panicOnError: m.panicOnError(), + }, nil +} + +// DeepCopy returns a copy of this schemaMap. The copy can be safely modified +// without affecting the original. +func (m *schemaMap) DeepCopy() schemaMap { + copy, err := copystructure.Config{Lock: true}.Copy(m) + if err != nil { + panic(err) + } + return *copy.(*schemaMap) +} + +// Diff returns the diff for a resource given the schema map, +// state, and configuration. +func (m schemaMap) Diff( + s *tofu.InstanceState, + c *tofu.ResourceConfig, + customizeDiff CustomizeDiffFunc, + meta interface{}, + handleRequiresNew bool) (*tofu.InstanceDiff, error) { + result := new(tofu.InstanceDiff) + result.Attributes = make(map[string]*tofu.ResourceAttrDiff) + + // Make sure to mark if the resource is tainted + if s != nil { + result.DestroyTainted = s.Tainted + } + + d := &ResourceData{ + schema: m, + state: s, + config: c, + panicOnError: m.panicOnError(), + } + + for k, schema := range m { + err := m.diff(k, schema, result, d, false) + if err != nil { + return nil, err + } + } + + // Remove any nil diffs just to keep things clean + for k, v := range result.Attributes { + if v == nil { + delete(result.Attributes, k) + } + } + + // If this is a non-destroy diff, call any custom diff logic that has been + // defined. + if !result.DestroyTainted && customizeDiff != nil { + mc := m.DeepCopy() + rd := newResourceDiff(mc, c, s, result) + if err := customizeDiff(rd, meta); err != nil { + return nil, err + } + for _, k := range rd.UpdatedKeys() { + err := m.diff(k, mc[k], result, rd, false) + if err != nil { + return nil, err + } + } + } + + if handleRequiresNew { + // If the diff requires a new resource, then we recompute the diff + // so we have the complete new resource diff, and preserve the + // RequiresNew fields where necessary so the user knows exactly what + // caused that. + if result.RequiresNew() { + // Create the new diff + result2 := new(tofu.InstanceDiff) + result2.Attributes = make(map[string]*tofu.ResourceAttrDiff) + + // Preserve the DestroyTainted flag + result2.DestroyTainted = result.DestroyTainted + + // Reset the data to not contain state. We have to call init() + // again in order to reset the FieldReaders. + d.state = nil + d.init() + + // Perform the diff again + for k, schema := range m { + err := m.diff(k, schema, result2, d, false) + if err != nil { + return nil, err + } + } + + // Re-run customization + if !result2.DestroyTainted && customizeDiff != nil { + mc := m.DeepCopy() + rd := newResourceDiff(mc, c, d.state, result2) + if err := customizeDiff(rd, meta); err != nil { + return nil, err + } + for _, k := range rd.UpdatedKeys() { + err := m.diff(k, mc[k], result2, rd, false) + if err != nil { + return nil, err + } + } + } + + // Force all the fields to not force a new since we know what we + // want to force new. + for k, attr := range result2.Attributes { + if attr == nil { + continue + } + + if attr.RequiresNew { + attr.RequiresNew = false + } + + if s != nil { + attr.Old = s.Attributes[k] + } + } + + // Now copy in all the requires new diffs... + for k, attr := range result.Attributes { + if attr == nil { + continue + } + + newAttr, ok := result2.Attributes[k] + if !ok { + newAttr = attr + } + + if attr.RequiresNew { + newAttr.RequiresNew = true + } + + result2.Attributes[k] = newAttr + } + + // And set the diff! + result = result2 + } + + } + + // Go through and detect all of the ComputedWhens now that we've + // finished the diff. + // TODO + + if result.Empty() { + // If we don't have any diff elements, just return nil + return nil, nil + } + + return result, nil +} + +// Input implements the tofu.ResourceProvider method by asking +// for input for required configuration keys that don't have a value. +func (m schemaMap) Input( + input tofu.UIInput, + c *tofu.ResourceConfig) (*tofu.ResourceConfig, error) { + keys := make([]string, 0, len(m)) + for k, _ := range m { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + v := m[k] + + // Skip things that don't require config, if that is even valid + // for a provider schema. + // Required XOR Optional must always be true to validate, so we only + // need to check one. + if v.Optional { + continue + } + + // Deprecated fields should never prompt + if v.Deprecated != "" { + continue + } + + // Skip things that have a value of some sort already + if _, ok := c.Raw[k]; ok { + continue + } + + // Skip if it has a default value + defaultValue, err := v.DefaultValue() + if err != nil { + return nil, fmt.Errorf("%s: error loading default: %w", k, err) + } + if defaultValue != nil { + continue + } + + var value interface{} + switch v.Type { + case TypeBool, TypeInt, TypeFloat, TypeSet, TypeList: + continue + case TypeString: + value, err = m.inputString(input, k, v) + default: + panic(fmt.Sprintf("Unknown type for input: %#v", v.Type)) + } + + if err != nil { + return nil, fmt.Errorf( + "%s: %s", k, err) + } + + c.Config[k] = value + } + + return c, nil +} + +// Validate validates the configuration against this schema mapping. +func (m schemaMap) Validate(c *tofu.ResourceConfig) ([]string, []error) { + return m.validateObject("", m, c) +} + +// InternalValidate validates the format of this schema. This should be called +// from a unit test (and not in user-path code) to verify that a schema +// is properly built. +func (m schemaMap) InternalValidate(topSchemaMap schemaMap) error { + return m.internalValidate(topSchemaMap, false) +} + +func (m schemaMap) internalValidate(topSchemaMap schemaMap, attrsOnly bool) error { + if topSchemaMap == nil { + topSchemaMap = m + } + for k, v := range m { + if v.Type == TypeInvalid { + return fmt.Errorf("%s: Type must be specified", k) + } + + if v.Optional && v.Required { + return fmt.Errorf("%s: Optional or Required must be set, not both", k) + } + + if v.Required && v.Computed { + return fmt.Errorf("%s: Cannot be both Required and Computed", k) + } + + if !v.Required && !v.Optional && !v.Computed { + return fmt.Errorf("%s: One of optional, required, or computed must be set", k) + } + + computedOnly := v.Computed && !v.Optional + + switch v.ConfigMode { + case SchemaConfigModeBlock: + if _, ok := v.Elem.(*Resource); !ok { + return fmt.Errorf("%s: ConfigMode of block is allowed only when Elem is *schema.Resource", k) + } + if attrsOnly { + return fmt.Errorf("%s: ConfigMode of block cannot be used in child of schema with ConfigMode of attribute", k) + } + if computedOnly { + return fmt.Errorf("%s: ConfigMode of block cannot be used for computed schema", k) + } + case SchemaConfigModeAttr: + // anything goes + case SchemaConfigModeAuto: + // Since "Auto" for Elem: *Resource would create a nested block, + // and that's impossible inside an attribute, we require it to be + // explicitly overridden as mode "Attr" for clarity. + if _, ok := v.Elem.(*Resource); ok { + if attrsOnly { + return fmt.Errorf("%s: in *schema.Resource with ConfigMode of attribute, so must also have ConfigMode of attribute", k) + } + } + default: + return fmt.Errorf("%s: invalid ConfigMode value", k) + } + + if v.Computed && v.Default != nil { + return fmt.Errorf("%s: Default must be nil if computed", k) + } + + if v.Required && v.Default != nil { + return fmt.Errorf("%s: Default cannot be set with Required", k) + } + + if len(v.ComputedWhen) > 0 && !v.Computed { + return fmt.Errorf("%s: ComputedWhen can only be set with Computed", k) + } + + if len(v.ConflictsWith) > 0 && v.Required { + return fmt.Errorf("%s: ConflictsWith cannot be set with Required", k) + } + + if len(v.ConflictsWith) > 0 { + for _, key := range v.ConflictsWith { + parts := strings.Split(key, ".") + sm := topSchemaMap + var target *Schema + for _, part := range parts { + // Skip index fields + if _, err := strconv.Atoi(part); err == nil { + continue + } + + var ok bool + if target, ok = sm[part]; !ok { + return fmt.Errorf("%s: ConflictsWith references unknown attribute (%s) at part (%s)", k, key, part) + } + + if subResource, ok := target.Elem.(*Resource); ok { + sm = schemaMap(subResource.Schema) + } + } + if target == nil { + return fmt.Errorf("%s: ConflictsWith cannot find target attribute (%s), sm: %#v", k, key, sm) + } + if target.Required { + return fmt.Errorf("%s: ConflictsWith cannot contain Required attribute (%s)", k, key) + } + + if len(target.ComputedWhen) > 0 { + return fmt.Errorf("%s: ConflictsWith cannot contain Computed(When) attribute (%s)", k, key) + } + } + } + + if v.Type == TypeList || v.Type == TypeSet { + if v.Elem == nil { + return fmt.Errorf("%s: Elem must be set for lists", k) + } + + if v.Default != nil { + return fmt.Errorf("%s: Default is not valid for lists or sets", k) + } + + if v.Type != TypeSet && v.Set != nil { + return fmt.Errorf("%s: Set can only be set for TypeSet", k) + } + + switch t := v.Elem.(type) { + case *Resource: + attrsOnly := attrsOnly || v.ConfigMode == SchemaConfigModeAttr + + if err := schemaMap(t.Schema).internalValidate(topSchemaMap, attrsOnly); err != nil { + return err + } + case *Schema: + bad := t.Computed || t.Optional || t.Required + if bad { + return fmt.Errorf( + "%s: Elem must have only Type set", k) + } + } + } else { + if v.MaxItems > 0 || v.MinItems > 0 { + return fmt.Errorf("%s: MaxItems and MinItems are only supported on lists or sets", k) + } + } + + // Computed-only field + if v.Computed && !v.Optional { + if v.ValidateFunc != nil { + return fmt.Errorf("%s: ValidateFunc is for validating user input, "+ + "there's nothing to validate on computed-only field", k) + } + if v.DiffSuppressFunc != nil { + return fmt.Errorf("%s: DiffSuppressFunc is for suppressing differences"+ + " between config and state representation. "+ + "There is no config for computed-only field, nothing to compare.", k) + } + } + + if v.ValidateFunc != nil { + switch v.Type { + case TypeList, TypeSet: + return fmt.Errorf("%s: ValidateFunc is not yet supported on lists or sets.", k) + } + } + + if v.Deprecated == "" && v.Removed == "" { + if !isValidFieldName(k) { + return fmt.Errorf("%s: Field name may only contain lowercase alphanumeric characters & underscores.", k) + } + } + } + + return nil +} + +func isValidFieldName(name string) bool { + re := regexp.MustCompile("^[a-z0-9_]+$") + return re.MatchString(name) +} + +// resourceDiffer is an interface that is used by the private diff functions. +// This helps facilitate diff logic for both ResourceData and ResoureDiff with +// minimal divergence in code. +type resourceDiffer interface { + diffChange(string) (interface{}, interface{}, bool, bool, bool) + Get(string) interface{} + GetChange(string) (interface{}, interface{}) + GetOk(string) (interface{}, bool) + HasChange(string) bool + Id() string +} + +func (m schemaMap) diff( + k string, + schema *Schema, + diff *tofu.InstanceDiff, + d resourceDiffer, + all bool) error { + + unsupressedDiff := new(tofu.InstanceDiff) + unsupressedDiff.Attributes = make(map[string]*tofu.ResourceAttrDiff) + + var err error + switch schema.Type { + case TypeBool, TypeInt, TypeFloat, TypeString: + err = m.diffString(k, schema, unsupressedDiff, d, all) + case TypeList: + err = m.diffList(k, schema, unsupressedDiff, d, all) + case TypeMap: + err = m.diffMap(k, schema, unsupressedDiff, d, all) + case TypeSet: + err = m.diffSet(k, schema, unsupressedDiff, d, all) + default: + err = fmt.Errorf("%s: unknown type %#v", k, schema.Type) + } + + for attrK, attrV := range unsupressedDiff.Attributes { + switch rd := d.(type) { + case *ResourceData: + if schema.DiffSuppressFunc != nil && attrV != nil && + schema.DiffSuppressFunc(attrK, attrV.Old, attrV.New, rd) { + // If this attr diff is suppressed, we may still need it in the + // overall diff if it's contained within a set. Rather than + // dropping the diff, make it a NOOP. + if !all { + continue + } + + attrV = &tofu.ResourceAttrDiff{ + Old: attrV.Old, + New: attrV.Old, + } + } + } + diff.Attributes[attrK] = attrV + } + + return err +} + +func (m schemaMap) diffList( + k string, + schema *Schema, + diff *tofu.InstanceDiff, + d resourceDiffer, + all bool) error { + o, n, _, computedList, customized := d.diffChange(k) + if computedList { + n = nil + } + nSet := n != nil + + // If we have an old value and no new value is set or will be + // computed once all variables can be interpolated and we're + // computed, then nothing has changed. + if o != nil && n == nil && !computedList && schema.Computed { + return nil + } + + if o == nil { + o = []interface{}{} + } + if n == nil { + n = []interface{}{} + } + if s, ok := o.(*Set); ok { + o = s.List() + } + if s, ok := n.(*Set); ok { + n = s.List() + } + os := o.([]interface{}) + vs := n.([]interface{}) + + // If the new value was set, and the two are equal, then we're done. + // We have to do this check here because sets might be NOT + // reflect.DeepEqual so we need to wait until we get the []interface{} + if !all && nSet && reflect.DeepEqual(os, vs) { + return nil + } + + // Get the counts + oldLen := len(os) + newLen := len(vs) + oldStr := strconv.FormatInt(int64(oldLen), 10) + + // If the whole list is computed, then say that the # is computed + if computedList { + diff.Attributes[k+".#"] = &tofu.ResourceAttrDiff{ + Old: oldStr, + NewComputed: true, + RequiresNew: schema.ForceNew, + } + return nil + } + + // If the counts are not the same, then record that diff + changed := oldLen != newLen + computed := oldLen == 0 && newLen == 0 && schema.Computed + if changed || computed || all { + countSchema := &Schema{ + Type: TypeInt, + Computed: schema.Computed, + ForceNew: schema.ForceNew, + } + + newStr := "" + if !computed { + newStr = strconv.FormatInt(int64(newLen), 10) + } else { + oldStr = "" + } + + diff.Attributes[k+".#"] = countSchema.finalizeDiff( + &tofu.ResourceAttrDiff{ + Old: oldStr, + New: newStr, + }, + customized, + ) + } + + // Figure out the maximum + maxLen := oldLen + if newLen > maxLen { + maxLen = newLen + } + + switch t := schema.Elem.(type) { + case *Resource: + // This is a complex resource + for i := 0; i < maxLen; i++ { + for k2, schema := range t.Schema { + subK := fmt.Sprintf("%s.%d.%s", k, i, k2) + err := m.diff(subK, schema, diff, d, all) + if err != nil { + return err + } + } + } + case *Schema: + // Copy the schema so that we can set Computed/ForceNew from + // the parent schema (the TypeList). + t2 := *t + t2.ForceNew = schema.ForceNew + + // This is just a primitive element, so go through each and + // just diff each. + for i := 0; i < maxLen; i++ { + subK := fmt.Sprintf("%s.%d", k, i) + err := m.diff(subK, &t2, diff, d, all) + if err != nil { + return err + } + } + default: + return fmt.Errorf("%s: unknown element type (internal)", k) + } + + return nil +} + +func (m schemaMap) diffMap( + k string, + schema *Schema, + diff *tofu.InstanceDiff, + d resourceDiffer, + all bool) error { + prefix := k + "." + + // First get all the values from the state + var stateMap, configMap map[string]string + o, n, _, nComputed, customized := d.diffChange(k) + if err := mapstructure.WeakDecode(o, &stateMap); err != nil { + return fmt.Errorf("%s: %w", k, err) + } + if err := mapstructure.WeakDecode(n, &configMap); err != nil { + return fmt.Errorf("%s: %w", k, err) + } + + // Keep track of whether the state _exists_ at all prior to clearing it + stateExists := o != nil + + // Delete any count values, since we don't use those + delete(configMap, "%") + delete(stateMap, "%") + + // Check if the number of elements has changed. + oldLen, newLen := len(stateMap), len(configMap) + changed := oldLen != newLen + if oldLen != 0 && newLen == 0 && schema.Computed { + changed = false + } + + // It is computed if we have no old value, no new value, the schema + // says it is computed, and it didn't exist in the state before. The + // last point means: if it existed in the state, even empty, then it + // has already been computed. + computed := oldLen == 0 && newLen == 0 && schema.Computed && !stateExists + + // If the count has changed or we're computed, then add a diff for the + // count. "nComputed" means that the new value _contains_ a value that + // is computed. We don't do granular diffs for this yet, so we mark the + // whole map as computed. + if changed || computed || nComputed { + countSchema := &Schema{ + Type: TypeInt, + Computed: schema.Computed || nComputed, + ForceNew: schema.ForceNew, + } + + oldStr := strconv.FormatInt(int64(oldLen), 10) + newStr := "" + if !computed && !nComputed { + newStr = strconv.FormatInt(int64(newLen), 10) + } else { + oldStr = "" + } + + diff.Attributes[k+".%"] = countSchema.finalizeDiff( + &tofu.ResourceAttrDiff{ + Old: oldStr, + New: newStr, + }, + customized, + ) + } + + // If the new map is nil and we're computed, then ignore it. + if n == nil && schema.Computed { + return nil + } + + // Now we compare, preferring values from the config map + for k, v := range configMap { + old, ok := stateMap[k] + delete(stateMap, k) + + if old == v && ok && !all { + continue + } + + diff.Attributes[prefix+k] = schema.finalizeDiff( + &tofu.ResourceAttrDiff{ + Old: old, + New: v, + }, + customized, + ) + } + for k, v := range stateMap { + diff.Attributes[prefix+k] = schema.finalizeDiff( + &tofu.ResourceAttrDiff{ + Old: v, + NewRemoved: true, + }, + customized, + ) + } + + return nil +} + +func (m schemaMap) diffSet( + k string, + schema *Schema, + diff *tofu.InstanceDiff, + d resourceDiffer, + all bool) error { + + o, n, _, computedSet, customized := d.diffChange(k) + if computedSet { + n = nil + } + nSet := n != nil + + // If we have an old value and no new value is set or will be + // computed once all variables can be interpolated and we're + // computed, then nothing has changed. + if o != nil && n == nil && !computedSet && schema.Computed { + return nil + } + + if o == nil { + o = schema.ZeroValue().(*Set) + } + if n == nil { + n = schema.ZeroValue().(*Set) + } + os := o.(*Set) + ns := n.(*Set) + + // If the new value was set, compare the listCode's to determine if + // the two are equal. Comparing listCode's instead of the actual values + // is needed because there could be computed values in the set which + // would result in false positives while comparing. + if !all && nSet && reflect.DeepEqual(os.listCode(), ns.listCode()) { + return nil + } + + // Get the counts + oldLen := os.Len() + newLen := ns.Len() + oldStr := strconv.Itoa(oldLen) + newStr := strconv.Itoa(newLen) + + // Build a schema for our count + countSchema := &Schema{ + Type: TypeInt, + Computed: schema.Computed, + ForceNew: schema.ForceNew, + } + + // If the set computed then say that the # is computed + if computedSet || schema.Computed && !nSet { + // If # already exists, equals 0 and no new set is supplied, there + // is nothing to record in the diff + count, ok := d.GetOk(k + ".#") + if ok && count.(int) == 0 && !nSet && !computedSet { + return nil + } + + // Set the count but make sure that if # does not exist, we don't + // use the zeroed value + countStr := strconv.Itoa(count.(int)) + if !ok { + countStr = "" + } + + diff.Attributes[k+".#"] = countSchema.finalizeDiff( + &tofu.ResourceAttrDiff{ + Old: countStr, + NewComputed: true, + }, + customized, + ) + return nil + } + + // If the counts are not the same, then record that diff + changed := oldLen != newLen + if changed || all { + diff.Attributes[k+".#"] = countSchema.finalizeDiff( + &tofu.ResourceAttrDiff{ + Old: oldStr, + New: newStr, + }, + customized, + ) + } + + // Build the list of codes that will make up our set. This is the + // removed codes as well as all the codes in the new codes. + codes := make([][]string, 2) + codes[0] = os.Difference(ns).listCode() + codes[1] = ns.listCode() + for _, list := range codes { + for _, code := range list { + switch t := schema.Elem.(type) { + case *Resource: + // This is a complex resource + for k2, schema := range t.Schema { + subK := fmt.Sprintf("%s.%s.%s", k, code, k2) + err := m.diff(subK, schema, diff, d, true) + if err != nil { + return err + } + } + case *Schema: + // Copy the schema so that we can set Computed/ForceNew from + // the parent schema (the TypeSet). + t2 := *t + t2.ForceNew = schema.ForceNew + + // This is just a primitive element, so go through each and + // just diff each. + subK := fmt.Sprintf("%s.%s", k, code) + err := m.diff(subK, &t2, diff, d, true) + if err != nil { + return err + } + default: + return fmt.Errorf("%s: unknown element type (internal)", k) + } + } + } + + return nil +} + +func (m schemaMap) diffString( + k string, + schema *Schema, + diff *tofu.InstanceDiff, + d resourceDiffer, + all bool) error { + var originalN interface{} + var os, ns string + o, n, _, computed, customized := d.diffChange(k) + if schema.StateFunc != nil && n != nil { + originalN = n + n = schema.StateFunc(n) + } + nraw := n + if nraw == nil && o != nil { + nraw = schema.Type.Zero() + } + if err := mapstructure.WeakDecode(o, &os); err != nil { + return fmt.Errorf("%s: %w", k, err) + } + if err := mapstructure.WeakDecode(nraw, &ns); err != nil { + return fmt.Errorf("%s: %w", k, err) + } + + if os == ns && !all && !computed { + // They're the same value. If there old value is not blank or we + // have an ID, then return right away since we're already set up. + if os != "" || d.Id() != "" { + return nil + } + + // Otherwise, only continue if we're computed + if !schema.Computed { + return nil + } + } + + removed := false + if o != nil && n == nil && !computed { + removed = true + } + if removed && schema.Computed { + return nil + } + + diff.Attributes[k] = schema.finalizeDiff( + &tofu.ResourceAttrDiff{ + Old: os, + New: ns, + NewExtra: originalN, + NewRemoved: removed, + NewComputed: computed, + }, + customized, + ) + + return nil +} + +func (m schemaMap) inputString( + input tofu.UIInput, + k string, + schema *Schema) (interface{}, error) { + result, err := input.Input(context.Background(), &tofu.InputOpts{ + Id: k, + Query: k, + Description: schema.Description, + Default: schema.InputDefault, + }) + + return result, err +} + +func (m schemaMap) validate( + k string, + schema *Schema, + c *tofu.ResourceConfig) ([]string, []error) { + raw, ok := c.Get(k) + if !ok && schema.DefaultFunc != nil { + // We have a dynamic default. Check if we have a value. + var err error + raw, err = schema.DefaultFunc() + if err != nil { + return nil, []error{fmt.Errorf( + "%q, error loading default: %w", k, err)} + } + + // We're okay as long as we had a value set + ok = raw != nil + } + if !ok { + if schema.Required { + return nil, []error{fmt.Errorf( + "%q: required field is not set", k)} + } + + return nil, nil + } + + if !schema.Required && !schema.Optional { + // This is a computed-only field + return nil, []error{fmt.Errorf( + "%q: this field cannot be set", k)} + } + + // If the value is unknown then we can't validate it yet. + // In particular, this avoids spurious type errors where downstream + // validation code sees UnknownVariableValue as being just a string. + // The SDK has to allow the unknown value through initially, so that + // Required fields set via an interpolated value are accepted. + if !isWhollyKnown(raw) { + if schema.Deprecated != "" { + return []string{fmt.Sprintf("%q: [DEPRECATED] %s", k, schema.Deprecated)}, nil + } + return nil, nil + } + + err := m.validateConflictingAttributes(k, schema, c) + if err != nil { + return nil, []error{err} + } + + return m.validateType(k, raw, schema, c) +} + +// isWhollyKnown returns false if the argument contains an UnknownVariableValue +func isWhollyKnown(raw interface{}) bool { + switch raw := raw.(type) { + case string: + if raw == hcl2shim.UnknownVariableValue { + return false + } + case []interface{}: + for _, v := range raw { + if !isWhollyKnown(v) { + return false + } + } + case map[string]interface{}: + for _, v := range raw { + if !isWhollyKnown(v) { + return false + } + } + } + return true +} +func (m schemaMap) validateConflictingAttributes( + k string, + schema *Schema, + c *tofu.ResourceConfig) error { + + if len(schema.ConflictsWith) == 0 { + return nil + } + + for _, conflictingKey := range schema.ConflictsWith { + if raw, ok := c.Get(conflictingKey); ok { + if raw == hcl2shim.UnknownVariableValue { + // An unknown value might become unset (null) once known, so + // we must defer validation until it's known. + continue + } + return fmt.Errorf( + "%q: conflicts with %s", k, conflictingKey) + } + } + + return nil +} + +func (m schemaMap) validateList( + k string, + raw interface{}, + schema *Schema, + c *tofu.ResourceConfig) ([]string, []error) { + // first check if the list is wholly unknown + if s, ok := raw.(string); ok { + if s == hcl2shim.UnknownVariableValue { + return nil, nil + } + } + + // schemaMap can't validate nil + if raw == nil { + return nil, nil + } + + // We use reflection to verify the slice because you can't + // case to []interface{} unless the slice is exactly that type. + rawV := reflect.ValueOf(raw) + + // If we support promotion and the raw value isn't a slice, wrap + // it in []interface{} and check again. + if schema.PromoteSingle && rawV.Kind() != reflect.Slice { + raw = []interface{}{raw} + rawV = reflect.ValueOf(raw) + } + + if rawV.Kind() != reflect.Slice { + return nil, []error{fmt.Errorf( + "%s: should be a list", k)} + } + + // We can't validate list length if this came from a dynamic block. + // Since there's no way to determine if something was from a dynamic block + // at this point, we're going to skip validation in the new protocol if + // there are any unknowns. Validate will eventually be called again once + // all values are known. + if isProto5() && !isWhollyKnown(raw) { + return nil, nil + } + + // Validate length + if schema.MaxItems > 0 && rawV.Len() > schema.MaxItems { + return nil, []error{fmt.Errorf( + "%s: attribute supports %d item maximum, config has %d declared", k, schema.MaxItems, rawV.Len())} + } + + if schema.MinItems > 0 && rawV.Len() < schema.MinItems { + return nil, []error{fmt.Errorf( + "%s: attribute supports %d item as a minimum, config has %d declared", k, schema.MinItems, rawV.Len())} + } + + // Now build the []interface{} + raws := make([]interface{}, rawV.Len()) + for i, _ := range raws { + raws[i] = rawV.Index(i).Interface() + } + + var ws []string + var es []error + for i, raw := range raws { + key := fmt.Sprintf("%s.%d", k, i) + + // Reify the key value from the ResourceConfig. + // If the list was computed we have all raw values, but some of these + // may be known in the config, and aren't individually marked as Computed. + if r, ok := c.Get(key); ok { + raw = r + } + + var ws2 []string + var es2 []error + switch t := schema.Elem.(type) { + case *Resource: + // This is a sub-resource + ws2, es2 = m.validateObject(key, t.Schema, c) + case *Schema: + ws2, es2 = m.validateType(key, raw, t, c) + } + + if len(ws2) > 0 { + ws = append(ws, ws2...) + } + if len(es2) > 0 { + es = append(es, es2...) + } + } + + return ws, es +} + +func (m schemaMap) validateMap( + k string, + raw interface{}, + schema *Schema, + c *tofu.ResourceConfig) ([]string, []error) { + // first check if the list is wholly unknown + if s, ok := raw.(string); ok { + if s == hcl2shim.UnknownVariableValue { + return nil, nil + } + } + + // schemaMap can't validate nil + if raw == nil { + return nil, nil + } + // We use reflection to verify the slice because you can't + // case to []interface{} unless the slice is exactly that type. + rawV := reflect.ValueOf(raw) + switch rawV.Kind() { + case reflect.String: + // If raw and reified are equal, this is a string and should + // be rejected. + reified, reifiedOk := c.Get(k) + if reifiedOk && raw == reified && !c.IsComputed(k) { + return nil, []error{fmt.Errorf("%s: should be a map", k)} + } + // Otherwise it's likely raw is an interpolation. + return nil, nil + case reflect.Map: + case reflect.Slice: + default: + return nil, []error{fmt.Errorf("%s: should be a map", k)} + } + + // If it is not a slice, validate directly + if rawV.Kind() != reflect.Slice { + mapIface := rawV.Interface() + if _, errs := validateMapValues(k, mapIface.(map[string]interface{}), schema); len(errs) > 0 { + return nil, errs + } + if schema.ValidateFunc != nil { + return schema.ValidateFunc(mapIface, k) + } + return nil, nil + } + + // It is a slice, verify that all the elements are maps + raws := make([]interface{}, rawV.Len()) + for i, _ := range raws { + raws[i] = rawV.Index(i).Interface() + } + + for _, raw := range raws { + v := reflect.ValueOf(raw) + if v.Kind() != reflect.Map { + return nil, []error{fmt.Errorf( + "%s: should be a map", k)} + } + mapIface := v.Interface() + if _, errs := validateMapValues(k, mapIface.(map[string]interface{}), schema); len(errs) > 0 { + return nil, errs + } + } + + if schema.ValidateFunc != nil { + validatableMap := make(map[string]interface{}) + for _, raw := range raws { + for k, v := range raw.(map[string]interface{}) { + validatableMap[k] = v + } + } + + return schema.ValidateFunc(validatableMap, k) + } + + return nil, nil +} + +func validateMapValues(k string, m map[string]interface{}, schema *Schema) ([]string, []error) { + for key, raw := range m { + valueType, err := getValueType(k, schema) + if err != nil { + return nil, []error{err} + } + + switch valueType { + case TypeBool: + var n bool + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return nil, []error{fmt.Errorf("%s (%s): %w", k, key, err)} + } + case TypeInt: + var n int + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return nil, []error{fmt.Errorf("%s (%s): %w", k, key, err)} + } + case TypeFloat: + var n float64 + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return nil, []error{fmt.Errorf("%s (%s): %w", k, key, err)} + } + case TypeString: + var n string + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return nil, []error{fmt.Errorf("%s (%s): %w", k, key, err)} + } + default: + panic(fmt.Sprintf("Unknown validation type: %#v", schema.Type)) + } + } + return nil, nil +} + +func getValueType(k string, schema *Schema) (ValueType, error) { + if schema.Elem == nil { + return TypeString, nil + } + if vt, ok := schema.Elem.(ValueType); ok { + return vt, nil + } + + // If a Schema is provided to a Map, we use the Type of that schema + // as the type for each element in the Map. + if s, ok := schema.Elem.(*Schema); ok { + return s.Type, nil + } + + if _, ok := schema.Elem.(*Resource); ok { + // TODO: We don't actually support this (yet) + // but silently pass the validation, until we decide + // how to handle nested structures in maps + return TypeString, nil + } + return 0, fmt.Errorf("%s: unexpected map value type: %#v", k, schema.Elem) +} + +func (m schemaMap) validateObject( + k string, + schema map[string]*Schema, + c *tofu.ResourceConfig) ([]string, []error) { + raw, _ := c.Get(k) + + // schemaMap can't validate nil + if raw == nil { + return nil, nil + } + + if _, ok := raw.(map[string]interface{}); !ok && !c.IsComputed(k) { + return nil, []error{fmt.Errorf( + "%s: expected object, got %s", + k, reflect.ValueOf(raw).Kind())} + } + + var ws []string + var es []error + for subK, s := range schema { + key := subK + if k != "" { + key = fmt.Sprintf("%s.%s", k, subK) + } + + ws2, es2 := m.validate(key, s, c) + if len(ws2) > 0 { + ws = append(ws, ws2...) + } + if len(es2) > 0 { + es = append(es, es2...) + } + } + + // Detect any extra/unknown keys and report those as errors. + if m, ok := raw.(map[string]interface{}); ok { + for subk, _ := range m { + if _, ok := schema[subk]; !ok { + if subk == TimeoutsConfigKey { + continue + } + es = append(es, fmt.Errorf( + "%s: invalid or unknown key: %s", k, subk)) + } + } + } + + return ws, es +} + +func (m schemaMap) validatePrimitive( + k string, + raw interface{}, + schema *Schema, + c *tofu.ResourceConfig) ([]string, []error) { + + // a nil value shouldn't happen in the old protocol, and in the new + // protocol the types have already been validated. Either way, we can't + // reflect on nil, so don't panic. + if raw == nil { + return nil, nil + } + + // Catch if the user gave a complex type where a primitive was + // expected, so we can return a friendly error message that + // doesn't contain Go type system terminology. + switch reflect.ValueOf(raw).Type().Kind() { + case reflect.Slice: + return nil, []error{ + fmt.Errorf("%s must be a single value, not a list", k), + } + case reflect.Map: + return nil, []error{ + fmt.Errorf("%s must be a single value, not a map", k), + } + default: // ok + } + + if c.IsComputed(k) { + // If the key is being computed, then it is not an error as + // long as it's not a slice or map. + return nil, nil + } + + var decoded interface{} + switch schema.Type { + case TypeBool: + // Verify that we can parse this as the correct type + var n bool + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return nil, []error{fmt.Errorf("%s: %w", k, err)} + } + decoded = n + case TypeInt: + switch { + case isProto5(): + // We need to verify the type precisely, because WeakDecode will + // decode a float as an integer. + + // the config shims only use int for integral number values + if v, ok := raw.(int); ok { + decoded = v + } else { + return nil, []error{fmt.Errorf("%s: must be a whole number, got %v", k, raw)} + } + default: + // Verify that we can parse this as an int + var n int + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return nil, []error{fmt.Errorf("%s: %w", k, err)} + } + decoded = n + } + case TypeFloat: + // Verify that we can parse this as an int + var n float64 + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return nil, []error{fmt.Errorf("%s: %w", k, err)} + } + decoded = n + case TypeString: + // Verify that we can parse this as a string + var n string + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return nil, []error{fmt.Errorf("%s: %w", k, err)} + } + decoded = n + default: + panic(fmt.Sprintf("Unknown validation type: %#v", schema.Type)) + } + + if schema.ValidateFunc != nil { + return schema.ValidateFunc(decoded, k) + } + + return nil, nil +} + +func (m schemaMap) validateType( + k string, + raw interface{}, + schema *Schema, + c *tofu.ResourceConfig) ([]string, []error) { + var ws []string + var es []error + switch schema.Type { + case TypeSet, TypeList: + ws, es = m.validateList(k, raw, schema, c) + case TypeMap: + ws, es = m.validateMap(k, raw, schema, c) + default: + ws, es = m.validatePrimitive(k, raw, schema, c) + } + + if schema.Deprecated != "" { + ws = append(ws, fmt.Sprintf( + "%q: [DEPRECATED] %s", k, schema.Deprecated)) + } + + if schema.Removed != "" { + es = append(es, fmt.Errorf( + "%q: [REMOVED] %s", k, schema.Removed)) + } + + return ws, es +} + +// Zero returns the zero value for a type. +func (t ValueType) Zero() interface{} { + switch t { + case TypeInvalid: + return nil + case TypeBool: + return false + case TypeInt: + return 0 + case TypeFloat: + return 0.0 + case TypeString: + return "" + case TypeList: + return []interface{}{} + case TypeMap: + return map[string]interface{}{} + case TypeSet: + return new(Set) + case typeObject: + return map[string]interface{}{} + default: + panic(fmt.Sprintf("unknown type %s", t)) + } +} diff --git a/pkg/legacy/helper/schema/schema_test.go b/pkg/legacy/helper/schema/schema_test.go new file mode 100644 index 00000000000..32acd85240e --- /dev/null +++ b/pkg/legacy/helper/schema/schema_test.go @@ -0,0 +1,5546 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/configs/hcl2shim" + "github.com/kubegems/opentofu/pkg/legacy/helper/hashcode" + "github.com/kubegems/opentofu/pkg/legacy/tofu" +) + +func TestEnvDefaultFunc(t *testing.T) { + key := "TF_TEST_ENV_DEFAULT_FUNC" + + f := EnvDefaultFunc(key, "42") + + actual, err := f() + if err != nil { + t.Fatalf("err: %s", err) + } + if actual != "42" { + t.Fatalf("bad: %#v", actual) + } + + t.Setenv(key, "foo") + + actual, err = f() + if err != nil { + t.Fatalf("err: %s", err) + } + if actual != "foo" { + t.Fatalf("bad: %#v", actual) + } +} + +func TestMultiEnvDefaultFunc(t *testing.T) { + keys := []string{ + "TF_TEST_MULTI_ENV_DEFAULT_FUNC1", + "TF_TEST_MULTI_ENV_DEFAULT_FUNC2", + } + + const dv = "42" + + t.Run("shall return the default value", func(t *testing.T) { + f := MultiEnvDefaultFunc(keys, dv) + + actual, err := f() + if err != nil { + t.Fatalf("err: %s", err) + } + if actual != dv { + t.Fatalf("bad: %#v", actual) + } + }) + + t.Run("shall return the value of the first key", func(t *testing.T) { + f := MultiEnvDefaultFunc(keys, dv) + t.Setenv(keys[0], "foo") + + actual, err := f() + if err != nil { + t.Fatalf("err: %s", err) + } + if actual != "foo" { + t.Fatalf("bad: %#v", actual) + } + }) + + t.Run("shall return the value of the second key", func(t *testing.T) { + f := MultiEnvDefaultFunc(keys, dv) + t.Setenv(keys[1], "bar") + + actual, err := f() + if err != nil { + t.Fatalf("err: %s", err) + } + if actual != "bar" { + t.Fatalf("bad: %#v", actual) + } + }) +} + +func TestValueType_Zero(t *testing.T) { + cases := []struct { + Type ValueType + Value interface{} + }{ + {TypeBool, false}, + {TypeInt, 0}, + {TypeFloat, 0.0}, + {TypeString, ""}, + {TypeList, []interface{}{}}, + {TypeMap, map[string]interface{}{}}, + {TypeSet, new(Set)}, + } + + for i, tc := range cases { + actual := tc.Type.Zero() + if !reflect.DeepEqual(actual, tc.Value) { + t.Fatalf("%d: %#v != %#v", i, actual, tc.Value) + } + } +} + +func TestSchemaMap_Diff(t *testing.T) { + cases := []struct { + Name string + Schema map[string]*Schema + State *tofu.InstanceState + Config map[string]interface{} + CustomizeDiff CustomizeDiffFunc + Diff *tofu.InstanceDiff + Err bool + }{ + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "availability_zone": "foo", + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "", + New: "foo", + RequiresNew: true, + }, + }, + }, + + Err: false, + }, + + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Config: map[string]interface{}{}, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "", + NewComputed: true, + RequiresNew: true, + }, + }, + }, + + Err: false, + }, + + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: &tofu.InstanceState{ + ID: "foo", + }, + + Config: map[string]interface{}{}, + + Diff: nil, + + Err: false, + }, + + { + Name: "Computed, but set in config", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "availability_zone": "foo", + }, + }, + + Config: map[string]interface{}{ + "availability_zone": "bar", + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "foo", + New: "bar", + }, + }, + }, + + Err: false, + }, + + { + Name: "Default", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Default: "foo", + }, + }, + + State: nil, + + Config: nil, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "", + New: "foo", + }, + }, + }, + + Err: false, + }, + + { + Name: "DefaultFunc, value", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + DefaultFunc: func() (interface{}, error) { + return "foo", nil + }, + }, + }, + + State: nil, + + Config: nil, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "", + New: "foo", + }, + }, + }, + + Err: false, + }, + + { + Name: "DefaultFunc, configuration set", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + DefaultFunc: func() (interface{}, error) { + return "foo", nil + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "availability_zone": "bar", + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "", + New: "bar", + }, + }, + }, + + Err: false, + }, + + { + Name: "String with StateFunc", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + StateFunc: func(a interface{}) string { + return a.(string) + "!" + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "availability_zone": "foo", + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "", + New: "foo!", + NewExtra: "foo", + }, + }, + }, + + Err: false, + }, + + { + Name: "StateFunc not called with nil value", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + StateFunc: func(a interface{}) string { + t.Fatalf("should not get here!") + return "" + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{}, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "", + New: "", + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Variable computed", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "availability_zone": hcl2shim.UnknownVariableValue, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "", + New: hcl2shim.UnknownVariableValue, + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Int decode", + Schema: map[string]*Schema{ + "port": &Schema{ + Type: TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "port": 27, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "port": &tofu.ResourceAttrDiff{ + Old: "", + New: "27", + RequiresNew: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "bool decode", + Schema: map[string]*Schema{ + "port": &Schema{ + Type: TypeBool, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "port": false, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "port": &tofu.ResourceAttrDiff{ + Old: "", + New: "false", + RequiresNew: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Bool", + Schema: map[string]*Schema{ + "delete": &Schema{ + Type: TypeBool, + Optional: true, + Default: false, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "delete": "false", + }, + }, + + Config: nil, + + Diff: nil, + + Err: false, + }, + + { + Name: "List decode", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Required: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "ports": []interface{}{1, 2, 5}, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ports.#": &tofu.ResourceAttrDiff{ + Old: "0", + New: "3", + }, + "ports.0": &tofu.ResourceAttrDiff{ + Old: "", + New: "1", + }, + "ports.1": &tofu.ResourceAttrDiff{ + Old: "", + New: "2", + }, + "ports.2": &tofu.ResourceAttrDiff{ + Old: "", + New: "5", + }, + }, + }, + + Err: false, + }, + + { + Name: "List decode with promotion", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Required: true, + Elem: &Schema{Type: TypeInt}, + PromoteSingle: true, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "ports": "5", + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ports.#": &tofu.ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "ports.0": &tofu.ResourceAttrDiff{ + Old: "", + New: "5", + }, + }, + }, + + Err: false, + }, + + { + Name: "List decode with promotion with list", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Required: true, + Elem: &Schema{Type: TypeInt}, + PromoteSingle: true, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "ports": []interface{}{"5"}, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ports.#": &tofu.ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "ports.0": &tofu.ResourceAttrDiff{ + Old: "", + New: "5", + }, + }, + }, + + Err: false, + }, + + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Required: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "ports": []interface{}{1, 2, 5}, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ports.#": &tofu.ResourceAttrDiff{ + Old: "0", + New: "3", + }, + "ports.0": &tofu.ResourceAttrDiff{ + Old: "", + New: "1", + }, + "ports.1": &tofu.ResourceAttrDiff{ + Old: "", + New: "2", + }, + "ports.2": &tofu.ResourceAttrDiff{ + Old: "", + New: "5", + }, + }, + }, + + Err: false, + }, + + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Required: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "ports": []interface{}{1, hcl2shim.UnknownVariableValue, 5}, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ports.#": &tofu.ResourceAttrDiff{ + Old: "0", + New: "", + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Required: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "ports.#": "3", + "ports.0": "1", + "ports.1": "2", + "ports.2": "5", + }, + }, + + Config: map[string]interface{}{ + "ports": []interface{}{1, 2, 5}, + }, + + Diff: nil, + + Err: false, + }, + + { + Name: "", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Required: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "ports.#": "2", + "ports.0": "1", + "ports.1": "2", + }, + }, + + Config: map[string]interface{}{ + "ports": []interface{}{1, 2, 5}, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ports.#": &tofu.ResourceAttrDiff{ + Old: "2", + New: "3", + }, + "ports.2": &tofu.ResourceAttrDiff{ + Old: "", + New: "5", + }, + }, + }, + + Err: false, + }, + + { + Name: "", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Required: true, + Elem: &Schema{Type: TypeInt}, + ForceNew: true, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "ports": []interface{}{1, 2, 5}, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ports.#": &tofu.ResourceAttrDiff{ + Old: "0", + New: "3", + RequiresNew: true, + }, + "ports.0": &tofu.ResourceAttrDiff{ + Old: "", + New: "1", + RequiresNew: true, + }, + "ports.1": &tofu.ResourceAttrDiff{ + Old: "", + New: "2", + RequiresNew: true, + }, + "ports.2": &tofu.ResourceAttrDiff{ + Old: "", + New: "5", + RequiresNew: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Optional: true, + Computed: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: nil, + + Config: map[string]interface{}{}, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ports.#": &tofu.ResourceAttrDiff{ + Old: "", + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "List with computed set", + Schema: map[string]*Schema{ + "config": &Schema{ + Type: TypeList, + Optional: true, + ForceNew: true, + MinItems: 1, + Elem: &Resource{ + Schema: map[string]*Schema{ + "name": { + Type: TypeString, + Required: true, + }, + + "rules": { + Type: TypeSet, + Computed: true, + Elem: &Schema{Type: TypeString}, + Set: HashString, + }, + }, + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "config": []interface{}{ + map[string]interface{}{ + "name": "hello", + }, + }, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "config.#": &tofu.ResourceAttrDiff{ + Old: "0", + New: "1", + RequiresNew: true, + }, + + "config.0.name": &tofu.ResourceAttrDiff{ + Old: "", + New: "hello", + }, + + "config.0.rules.#": &tofu.ResourceAttrDiff{ + Old: "", + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Set", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Required: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "ports": []interface{}{5, 2, 1}, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ports.#": &tofu.ResourceAttrDiff{ + Old: "0", + New: "3", + }, + "ports.1": &tofu.ResourceAttrDiff{ + Old: "", + New: "1", + }, + "ports.2": &tofu.ResourceAttrDiff{ + Old: "", + New: "2", + }, + "ports.5": &tofu.ResourceAttrDiff{ + Old: "", + New: "5", + }, + }, + }, + + Err: false, + }, + + { + Name: "Set", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Computed: true, + Required: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "ports.#": "0", + }, + }, + + Config: nil, + + Diff: nil, + + Err: false, + }, + + { + Name: "Set", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: nil, + + Config: nil, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ports.#": &tofu.ResourceAttrDiff{ + Old: "", + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Set", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Required: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "ports": []interface{}{"2", "5", 1}, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ports.#": &tofu.ResourceAttrDiff{ + Old: "0", + New: "3", + }, + "ports.1": &tofu.ResourceAttrDiff{ + Old: "", + New: "1", + }, + "ports.2": &tofu.ResourceAttrDiff{ + Old: "", + New: "2", + }, + "ports.5": &tofu.ResourceAttrDiff{ + Old: "", + New: "5", + }, + }, + }, + + Err: false, + }, + + { + Name: "Set", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Required: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "ports": []interface{}{1, hcl2shim.UnknownVariableValue, "5"}, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ports.#": &tofu.ResourceAttrDiff{ + Old: "", + New: "", + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Set", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Required: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "ports.#": "2", + "ports.1": "1", + "ports.2": "2", + }, + }, + + Config: map[string]interface{}{ + "ports": []interface{}{5, 2, 1}, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ports.#": &tofu.ResourceAttrDiff{ + Old: "2", + New: "3", + }, + "ports.1": &tofu.ResourceAttrDiff{ + Old: "1", + New: "1", + }, + "ports.2": &tofu.ResourceAttrDiff{ + Old: "2", + New: "2", + }, + "ports.5": &tofu.ResourceAttrDiff{ + Old: "", + New: "5", + }, + }, + }, + + Err: false, + }, + + { + Name: "Set", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Required: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "ports.#": "2", + "ports.1": "1", + "ports.2": "2", + }, + }, + + Config: map[string]interface{}{}, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ports.#": &tofu.ResourceAttrDiff{ + Old: "2", + New: "0", + }, + "ports.1": &tofu.ResourceAttrDiff{ + Old: "1", + New: "0", + NewRemoved: true, + }, + "ports.2": &tofu.ResourceAttrDiff{ + Old: "2", + New: "0", + NewRemoved: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Set", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "availability_zone": "bar", + "ports.#": "1", + "ports.80": "80", + }, + }, + + Config: map[string]interface{}{}, + + Diff: nil, + + Err: false, + }, + + { + Name: "Set", + Schema: map[string]*Schema{ + "ingress": &Schema{ + Type: TypeSet, + Required: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Optional: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + }, + Set: func(v interface{}) int { + m := v.(map[string]interface{}) + ps := m["ports"].([]interface{}) + result := 0 + for _, p := range ps { + result += p.(int) + } + return result + }, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "ingress.#": "2", + "ingress.80.ports.#": "1", + "ingress.80.ports.0": "80", + "ingress.443.ports.#": "1", + "ingress.443.ports.0": "443", + }, + }, + + Config: map[string]interface{}{ + "ingress": []interface{}{ + map[string]interface{}{ + "ports": []interface{}{443}, + }, + map[string]interface{}{ + "ports": []interface{}{80}, + }, + }, + }, + + Diff: nil, + + Err: false, + }, + + { + Name: "List of structure decode", + Schema: map[string]*Schema{ + "ingress": &Schema{ + Type: TypeList, + Required: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "from": &Schema{ + Type: TypeInt, + Required: true, + }, + }, + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "ingress": []interface{}{ + map[string]interface{}{ + "from": 8080, + }, + }, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ingress.#": &tofu.ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "ingress.0.from": &tofu.ResourceAttrDiff{ + Old: "", + New: "8080", + }, + }, + }, + + Err: false, + }, + + { + Name: "ComputedWhen", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Computed: true, + ComputedWhen: []string{"port"}, + }, + + "port": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "availability_zone": "foo", + "port": "80", + }, + }, + + Config: map[string]interface{}{ + "port": 80, + }, + + Diff: nil, + + Err: false, + }, + + { + Name: "", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Computed: true, + ComputedWhen: []string{"port"}, + }, + + "port": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "port": "80", + }, + }, + + Config: map[string]interface{}{ + "port": 80, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + /* TODO + { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Computed: true, + ComputedWhen: []string{"port"}, + }, + + "port": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "availability_zone": "foo", + "port": "80", + }, + }, + + Config: map[string]interface{}{ + "port": 8080, + }, + + Diff: &terraform.ResourceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "availability_zone": &terraform.ResourceAttrDiff{ + Old: "foo", + NewComputed: true, + }, + "port": &terraform.ResourceAttrDiff{ + Old: "80", + New: "8080", + }, + }, + }, + + Err: false, + }, + */ + + { + Name: "Maps", + Schema: map[string]*Schema{ + "config_vars": &Schema{ + Type: TypeMap, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "config_vars": []interface{}{ + map[string]interface{}{ + "bar": "baz", + }, + }, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "config_vars.%": &tofu.ResourceAttrDiff{ + Old: "0", + New: "1", + }, + + "config_vars.bar": &tofu.ResourceAttrDiff{ + Old: "", + New: "baz", + }, + }, + }, + + Err: false, + }, + + { + Name: "Maps", + Schema: map[string]*Schema{ + "config_vars": &Schema{ + Type: TypeMap, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "config_vars.foo": "bar", + }, + }, + + Config: map[string]interface{}{ + "config_vars": []interface{}{ + map[string]interface{}{ + "bar": "baz", + }, + }, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "config_vars.foo": &tofu.ResourceAttrDiff{ + Old: "bar", + NewRemoved: true, + }, + "config_vars.bar": &tofu.ResourceAttrDiff{ + Old: "", + New: "baz", + }, + }, + }, + + Err: false, + }, + + { + Name: "Maps", + Schema: map[string]*Schema{ + "vars": &Schema{ + Type: TypeMap, + Optional: true, + Computed: true, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "vars.foo": "bar", + }, + }, + + Config: map[string]interface{}{ + "vars": []interface{}{ + map[string]interface{}{ + "bar": "baz", + }, + }, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "vars.foo": &tofu.ResourceAttrDiff{ + Old: "bar", + New: "", + NewRemoved: true, + }, + "vars.bar": &tofu.ResourceAttrDiff{ + Old: "", + New: "baz", + }, + }, + }, + + Err: false, + }, + + { + Name: "Maps", + Schema: map[string]*Schema{ + "vars": &Schema{ + Type: TypeMap, + Computed: true, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "vars.foo": "bar", + }, + }, + + Config: nil, + + Diff: nil, + + Err: false, + }, + + { + Name: "Maps", + Schema: map[string]*Schema{ + "config_vars": &Schema{ + Type: TypeList, + Elem: &Schema{Type: TypeMap}, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "config_vars.#": "1", + "config_vars.0.foo": "bar", + }, + }, + + Config: map[string]interface{}{ + "config_vars": []interface{}{ + map[string]interface{}{ + "bar": "baz", + }, + }, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "config_vars.0.foo": &tofu.ResourceAttrDiff{ + Old: "bar", + NewRemoved: true, + }, + "config_vars.0.bar": &tofu.ResourceAttrDiff{ + Old: "", + New: "baz", + }, + }, + }, + + Err: false, + }, + + { + Name: "Maps", + Schema: map[string]*Schema{ + "config_vars": &Schema{ + Type: TypeList, + Elem: &Schema{Type: TypeMap}, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "config_vars.#": "1", + "config_vars.0.foo": "bar", + "config_vars.0.bar": "baz", + }, + }, + + Config: map[string]interface{}{}, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "config_vars.#": &tofu.ResourceAttrDiff{ + Old: "1", + New: "0", + }, + "config_vars.0.%": &tofu.ResourceAttrDiff{ + Old: "2", + New: "0", + }, + "config_vars.0.foo": &tofu.ResourceAttrDiff{ + Old: "bar", + NewRemoved: true, + }, + "config_vars.0.bar": &tofu.ResourceAttrDiff{ + Old: "baz", + NewRemoved: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "ForceNews", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + ForceNew: true, + }, + + "address": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "availability_zone": "bar", + "address": "foo", + }, + }, + + Config: map[string]interface{}{ + "availability_zone": "foo", + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "bar", + New: "foo", + RequiresNew: true, + }, + + "address": &tofu.ResourceAttrDiff{ + Old: "foo", + New: "", + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Set", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + ForceNew: true, + }, + + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "availability_zone": "bar", + "ports.#": "1", + "ports.80": "80", + }, + }, + + Config: map[string]interface{}{ + "availability_zone": "foo", + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "bar", + New: "foo", + RequiresNew: true, + }, + + "ports.#": &tofu.ResourceAttrDiff{ + Old: "1", + New: "", + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Set", + Schema: map[string]*Schema{ + "instances": &Schema{ + Type: TypeSet, + Elem: &Schema{Type: TypeString}, + Optional: true, + Computed: true, + Set: func(v interface{}) int { + return len(v.(string)) + }, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "instances.#": "0", + }, + }, + + Config: map[string]interface{}{ + "instances": []interface{}{hcl2shim.UnknownVariableValue}, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "instances.#": &tofu.ResourceAttrDiff{ + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Set", + Schema: map[string]*Schema{ + "route": &Schema{ + Type: TypeSet, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "index": &Schema{ + Type: TypeInt, + Required: true, + }, + + "gateway": &Schema{ + Type: TypeString, + Optional: true, + }, + }, + }, + Set: func(v interface{}) int { + m := v.(map[string]interface{}) + return m["index"].(int) + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "route": []interface{}{ + map[string]interface{}{ + "index": "1", + "gateway": hcl2shim.UnknownVariableValue, + }, + }, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "route.#": &tofu.ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "route.~1.index": &tofu.ResourceAttrDiff{ + Old: "", + New: "1", + }, + "route.~1.gateway": &tofu.ResourceAttrDiff{ + Old: "", + New: hcl2shim.UnknownVariableValue, + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Set", + Schema: map[string]*Schema{ + "route": &Schema{ + Type: TypeSet, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "index": &Schema{ + Type: TypeInt, + Required: true, + }, + + "gateway": &Schema{ + Type: TypeSet, + Optional: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + }, + Set: func(v interface{}) int { + m := v.(map[string]interface{}) + return m["index"].(int) + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "route": []interface{}{ + map[string]interface{}{ + "index": "1", + "gateway": []interface{}{ + hcl2shim.UnknownVariableValue, + }, + }, + }, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "route.#": &tofu.ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "route.~1.index": &tofu.ResourceAttrDiff{ + Old: "", + New: "1", + }, + "route.~1.gateway.#": &tofu.ResourceAttrDiff{ + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Computed maps", + Schema: map[string]*Schema{ + "vars": &Schema{ + Type: TypeMap, + Computed: true, + }, + }, + + State: nil, + + Config: nil, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "vars.%": &tofu.ResourceAttrDiff{ + Old: "", + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Computed maps", + Schema: map[string]*Schema{ + "vars": &Schema{ + Type: TypeMap, + Computed: true, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "vars.%": "0", + }, + }, + + Config: map[string]interface{}{ + "vars": map[string]interface{}{ + "bar": hcl2shim.UnknownVariableValue, + }, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "vars.%": &tofu.ResourceAttrDiff{ + Old: "", + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: " - Empty", + Schema: map[string]*Schema{}, + + State: &tofu.InstanceState{}, + + Config: map[string]interface{}{}, + + Diff: nil, + + Err: false, + }, + + { + Name: "Float", + Schema: map[string]*Schema{ + "some_threshold": &Schema{ + Type: TypeFloat, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "some_threshold": "567.8", + }, + }, + + Config: map[string]interface{}{ + "some_threshold": 12.34, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "some_threshold": &tofu.ResourceAttrDiff{ + Old: "567.8", + New: "12.34", + }, + }, + }, + + Err: false, + }, + + { + Name: "https://github.com/hashicorp/terraform/issues/824", + Schema: map[string]*Schema{ + "block_device": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "device_name": &Schema{ + Type: TypeString, + Required: true, + }, + "delete_on_termination": &Schema{ + Type: TypeBool, + Optional: true, + Default: true, + }, + }, + }, + Set: func(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["device_name"].(string))) + buf.WriteString(fmt.Sprintf("%t-", m["delete_on_termination"].(bool))) + return hashcode.String(buf.String()) + }, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "block_device.#": "2", + "block_device.616397234.delete_on_termination": "true", + "block_device.616397234.device_name": "/dev/sda1", + "block_device.2801811477.delete_on_termination": "true", + "block_device.2801811477.device_name": "/dev/sdx", + }, + }, + + Config: map[string]interface{}{ + "block_device": []interface{}{ + map[string]interface{}{ + "device_name": "/dev/sda1", + }, + map[string]interface{}{ + "device_name": "/dev/sdx", + }, + }, + }, + Diff: nil, + Err: false, + }, + + { + Name: "Zero value in state shouldn't result in diff", + Schema: map[string]*Schema{ + "port": &Schema{ + Type: TypeBool, + Optional: true, + ForceNew: true, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "port": "false", + }, + }, + + Config: map[string]interface{}{}, + + Diff: nil, + + Err: false, + }, + + { + Name: "Same as prev, but for sets", + Schema: map[string]*Schema{ + "route": &Schema{ + Type: TypeSet, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "index": &Schema{ + Type: TypeInt, + Required: true, + }, + + "gateway": &Schema{ + Type: TypeSet, + Optional: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + }, + Set: func(v interface{}) int { + m := v.(map[string]interface{}) + return m["index"].(int) + }, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "route.#": "0", + }, + }, + + Config: map[string]interface{}{}, + + Diff: nil, + + Err: false, + }, + + { + Name: "A set computed element shouldn't cause a diff", + Schema: map[string]*Schema{ + "active": &Schema{ + Type: TypeBool, + Computed: true, + ForceNew: true, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "active": "true", + }, + }, + + Config: map[string]interface{}{}, + + Diff: nil, + + Err: false, + }, + + { + Name: "An empty set should show up in the diff", + Schema: map[string]*Schema{ + "instances": &Schema{ + Type: TypeSet, + Elem: &Schema{Type: TypeString}, + Optional: true, + ForceNew: true, + Set: func(v interface{}) int { + return len(v.(string)) + }, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "instances.#": "1", + "instances.3": "foo", + }, + }, + + Config: map[string]interface{}{}, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "instances.#": &tofu.ResourceAttrDiff{ + Old: "1", + New: "0", + RequiresNew: true, + }, + "instances.3": &tofu.ResourceAttrDiff{ + Old: "foo", + New: "", + NewRemoved: true, + RequiresNew: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Map with empty value", + Schema: map[string]*Schema{ + "vars": &Schema{ + Type: TypeMap, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "vars": map[string]interface{}{ + "foo": "", + }, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "vars.%": &tofu.ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "vars.foo": &tofu.ResourceAttrDiff{ + Old: "", + New: "", + }, + }, + }, + + Err: false, + }, + + { + Name: "Unset bool, not in state", + Schema: map[string]*Schema{ + "force": &Schema{ + Type: TypeBool, + Optional: true, + ForceNew: true, + }, + }, + + State: nil, + + Config: map[string]interface{}{}, + + Diff: nil, + + Err: false, + }, + + { + Name: "Unset set, not in state", + Schema: map[string]*Schema{ + "metadata_keys": &Schema{ + Type: TypeSet, + Optional: true, + ForceNew: true, + Elem: &Schema{Type: TypeInt}, + Set: func(interface{}) int { return 0 }, + }, + }, + + State: nil, + + Config: map[string]interface{}{}, + + Diff: nil, + + Err: false, + }, + + { + Name: "Unset list in state, should not show up computed", + Schema: map[string]*Schema{ + "metadata_keys": &Schema{ + Type: TypeList, + Optional: true, + Computed: true, + ForceNew: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "metadata_keys.#": "0", + }, + }, + + Config: map[string]interface{}{}, + + Diff: nil, + + Err: false, + }, + + { + Name: "Set element computed element", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Required: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "ports": []interface{}{1, hcl2shim.UnknownVariableValue}, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ports.#": &tofu.ResourceAttrDiff{ + Old: "", + New: "", + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Computed map without config that's known to be empty does not generate diff", + Schema: map[string]*Schema{ + "tags": &Schema{ + Type: TypeMap, + Computed: true, + }, + }, + + Config: nil, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "tags.%": "0", + }, + }, + + Diff: nil, + + Err: false, + }, + + { + Name: "Set with hyphen keys", + Schema: map[string]*Schema{ + "route": &Schema{ + Type: TypeSet, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "index": &Schema{ + Type: TypeInt, + Required: true, + }, + + "gateway-name": &Schema{ + Type: TypeString, + Optional: true, + }, + }, + }, + Set: func(v interface{}) int { + m := v.(map[string]interface{}) + return m["index"].(int) + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "route": []interface{}{ + map[string]interface{}{ + "index": "1", + "gateway-name": "hello", + }, + }, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "route.#": &tofu.ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "route.1.index": &tofu.ResourceAttrDiff{ + Old: "", + New: "1", + }, + "route.1.gateway-name": &tofu.ResourceAttrDiff{ + Old: "", + New: "hello", + }, + }, + }, + + Err: false, + }, + + { + Name: ": StateFunc in nested set (#1759)", + Schema: map[string]*Schema{ + "service_account": &Schema{ + Type: TypeList, + Optional: true, + ForceNew: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "scopes": &Schema{ + Type: TypeSet, + Required: true, + ForceNew: true, + Elem: &Schema{ + Type: TypeString, + StateFunc: func(v interface{}) string { + return v.(string) + "!" + }, + }, + Set: func(v interface{}) int { + i, err := strconv.Atoi(v.(string)) + if err != nil { + t.Fatalf("err: %s", err) + } + return i + }, + }, + }, + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "service_account": []interface{}{ + map[string]interface{}{ + "scopes": []interface{}{"123"}, + }, + }, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "service_account.#": &tofu.ResourceAttrDiff{ + Old: "0", + New: "1", + RequiresNew: true, + }, + "service_account.0.scopes.#": &tofu.ResourceAttrDiff{ + Old: "0", + New: "1", + RequiresNew: true, + }, + "service_account.0.scopes.123": &tofu.ResourceAttrDiff{ + Old: "", + New: "123!", + NewExtra: "123", + RequiresNew: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Removing set elements", + Schema: map[string]*Schema{ + "instances": &Schema{ + Type: TypeSet, + Elem: &Schema{Type: TypeString}, + Optional: true, + ForceNew: true, + Set: func(v interface{}) int { + return len(v.(string)) + }, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "instances.#": "2", + "instances.3": "333", + "instances.2": "22", + }, + }, + + Config: map[string]interface{}{ + "instances": []interface{}{"333", "4444"}, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "instances.#": &tofu.ResourceAttrDiff{ + Old: "2", + New: "2", + }, + "instances.2": &tofu.ResourceAttrDiff{ + Old: "22", + New: "", + NewRemoved: true, + RequiresNew: true, + }, + "instances.3": &tofu.ResourceAttrDiff{ + Old: "333", + New: "333", + }, + "instances.4": &tofu.ResourceAttrDiff{ + Old: "", + New: "4444", + RequiresNew: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Bools can be set with 0/1 in config, still get true/false", + Schema: map[string]*Schema{ + "one": &Schema{ + Type: TypeBool, + Optional: true, + }, + "two": &Schema{ + Type: TypeBool, + Optional: true, + }, + "three": &Schema{ + Type: TypeBool, + Optional: true, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "one": "false", + "two": "true", + "three": "true", + }, + }, + + Config: map[string]interface{}{ + "one": "1", + "two": "0", + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "one": &tofu.ResourceAttrDiff{ + Old: "false", + New: "true", + }, + "two": &tofu.ResourceAttrDiff{ + Old: "true", + New: "false", + }, + "three": &tofu.ResourceAttrDiff{ + Old: "true", + New: "false", + NewRemoved: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "tainted in state w/ no attr changes is still a replacement", + Schema: map[string]*Schema{}, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "id": "someid", + }, + Tainted: true, + }, + + Config: map[string]interface{}{}, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{}, + DestroyTainted: true, + }, + + Err: false, + }, + + { + Name: "Set ForceNew only marks the changing element as ForceNew", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Required: true, + ForceNew: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "ports.#": "3", + "ports.1": "1", + "ports.2": "2", + "ports.4": "4", + }, + }, + + Config: map[string]interface{}{ + "ports": []interface{}{5, 2, 1}, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ports.#": &tofu.ResourceAttrDiff{ + Old: "3", + New: "3", + }, + "ports.1": &tofu.ResourceAttrDiff{ + Old: "1", + New: "1", + }, + "ports.2": &tofu.ResourceAttrDiff{ + Old: "2", + New: "2", + }, + "ports.5": &tofu.ResourceAttrDiff{ + Old: "", + New: "5", + RequiresNew: true, + }, + "ports.4": &tofu.ResourceAttrDiff{ + Old: "4", + New: "0", + NewRemoved: true, + RequiresNew: true, + }, + }, + }, + }, + + { + Name: "removed optional items should trigger ForceNew", + Schema: map[string]*Schema{ + "description": &Schema{ + Type: TypeString, + ForceNew: true, + Optional: true, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "description": "foo", + }, + }, + + Config: map[string]interface{}{}, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "description": &tofu.ResourceAttrDiff{ + Old: "foo", + New: "", + RequiresNew: true, + NewRemoved: true, + }, + }, + }, + + Err: false, + }, + + // GH-7715 + { + Name: "computed value for boolean field", + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeBool, + ForceNew: true, + Computed: true, + Optional: true, + }, + }, + + State: &tofu.InstanceState{}, + + Config: map[string]interface{}{ + "foo": hcl2shim.UnknownVariableValue, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo": &tofu.ResourceAttrDiff{ + Old: "", + New: "false", + NewComputed: true, + RequiresNew: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Set ForceNew marks count as ForceNew if computed", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Required: true, + ForceNew: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "ports.#": "3", + "ports.1": "1", + "ports.2": "2", + "ports.4": "4", + }, + }, + + Config: map[string]interface{}{ + "ports": []interface{}{hcl2shim.UnknownVariableValue, 2, 1}, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ports.#": &tofu.ResourceAttrDiff{ + Old: "3", + New: "", + NewComputed: true, + RequiresNew: true, + }, + }, + }, + }, + + { + Name: "List with computed schema and ForceNew", + Schema: map[string]*Schema{ + "config": &Schema{ + Type: TypeList, + Optional: true, + ForceNew: true, + Elem: &Schema{ + Type: TypeString, + }, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "config.#": "2", + "config.0": "a", + "config.1": "b", + }, + }, + + Config: map[string]interface{}{ + "config": []interface{}{hcl2shim.UnknownVariableValue, hcl2shim.UnknownVariableValue}, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "config.#": &tofu.ResourceAttrDiff{ + Old: "2", + New: "", + RequiresNew: true, + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "overridden diff with a CustomizeDiff function, ForceNew not in schema", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "availability_zone": "foo", + }, + + CustomizeDiff: func(d *ResourceDiff, meta interface{}) error { + if err := d.SetNew("availability_zone", "bar"); err != nil { + return err + } + if err := d.ForceNew("availability_zone"); err != nil { + return err + } + return nil + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "", + New: "bar", + RequiresNew: true, + }, + }, + }, + + Err: false, + }, + + { + // NOTE: This case is technically impossible in the current + // implementation, because optional+computed values never show up in the + // diff. In the event behavior changes this test should ensure that the + // intended diff still shows up. + Name: "overridden removed attribute diff with a CustomizeDiff function, ForceNew not in schema", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + }, + }, + + State: nil, + + Config: map[string]interface{}{}, + + CustomizeDiff: func(d *ResourceDiff, meta interface{}) error { + if err := d.SetNew("availability_zone", "bar"); err != nil { + return err + } + if err := d.ForceNew("availability_zone"); err != nil { + return err + } + return nil + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "", + New: "bar", + RequiresNew: true, + }, + }, + }, + + Err: false, + }, + + { + + Name: "overridden diff with a CustomizeDiff function, ForceNew in schema", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "availability_zone": "foo", + }, + + CustomizeDiff: func(d *ResourceDiff, meta interface{}) error { + if err := d.SetNew("availability_zone", "bar"); err != nil { + return err + } + return nil + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "", + New: "bar", + RequiresNew: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "required field with computed diff added with CustomizeDiff function", + Schema: map[string]*Schema{ + "ami_id": &Schema{ + Type: TypeString, + Required: true, + }, + "instance_id": &Schema{ + Type: TypeString, + Computed: true, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "ami_id": "foo", + }, + + CustomizeDiff: func(d *ResourceDiff, meta interface{}) error { + if err := d.SetNew("instance_id", "bar"); err != nil { + return err + } + return nil + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ami_id": &tofu.ResourceAttrDiff{ + Old: "", + New: "foo", + }, + "instance_id": &tofu.ResourceAttrDiff{ + Old: "", + New: "bar", + }, + }, + }, + + Err: false, + }, + + { + Name: "Set ForceNew only marks the changing element as ForceNew - CustomizeDiffFunc edition", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "ports.#": "3", + "ports.1": "1", + "ports.2": "2", + "ports.4": "4", + }, + }, + + Config: map[string]interface{}{ + "ports": []interface{}{5, 2, 6}, + }, + + CustomizeDiff: func(d *ResourceDiff, meta interface{}) error { + if err := d.SetNew("ports", []interface{}{5, 2, 1}); err != nil { + return err + } + if err := d.ForceNew("ports"); err != nil { + return err + } + return nil + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ports.#": &tofu.ResourceAttrDiff{ + Old: "3", + New: "3", + }, + "ports.1": &tofu.ResourceAttrDiff{ + Old: "1", + New: "1", + }, + "ports.2": &tofu.ResourceAttrDiff{ + Old: "2", + New: "2", + }, + "ports.5": &tofu.ResourceAttrDiff{ + Old: "", + New: "5", + RequiresNew: true, + }, + "ports.4": &tofu.ResourceAttrDiff{ + Old: "4", + New: "0", + NewRemoved: true, + RequiresNew: true, + }, + }, + }, + }, + + { + Name: "tainted resource does not run CustomizeDiffFunc", + Schema: map[string]*Schema{}, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "id": "someid", + }, + Tainted: true, + }, + + Config: map[string]interface{}{}, + + CustomizeDiff: func(d *ResourceDiff, meta interface{}) error { + return errors.New("diff customization should not have run") + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{}, + DestroyTainted: true, + }, + + Err: false, + }, + + { + Name: "NewComputed based on a conditional with CustomizeDiffFunc", + Schema: map[string]*Schema{ + "etag": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + }, + "version_id": &Schema{ + Type: TypeString, + Computed: true, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "etag": "foo", + "version_id": "1", + }, + }, + + Config: map[string]interface{}{ + "etag": "bar", + }, + + CustomizeDiff: func(d *ResourceDiff, meta interface{}) error { + if d.HasChange("etag") { + d.SetNewComputed("version_id") + } + return nil + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "etag": &tofu.ResourceAttrDiff{ + Old: "foo", + New: "bar", + }, + "version_id": &tofu.ResourceAttrDiff{ + Old: "1", + New: "", + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "NewComputed should always propagate with CustomizeDiff", + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeString, + Computed: true, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "foo": "", + }, + ID: "pre-existing", + }, + + Config: map[string]interface{}{}, + + CustomizeDiff: func(d *ResourceDiff, meta interface{}) error { + d.SetNewComputed("foo") + return nil + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo": &tofu.ResourceAttrDiff{ + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "vetoing a diff", + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "foo": "bar", + }, + }, + + Config: map[string]interface{}{ + "foo": "baz", + }, + + CustomizeDiff: func(d *ResourceDiff, meta interface{}) error { + return fmt.Errorf("diff vetoed") + }, + + Err: true, + }, + + // A lot of resources currently depended on using the empty string as a + // nil/unset value. + // FIXME: We want this to eventually produce a diff, since there + // technically is a new value in the config. + { + Name: "optional, computed, empty string", + Schema: map[string]*Schema{ + "attr": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "attr": "bar", + }, + }, + + Config: map[string]interface{}{ + "attr": "", + }, + }, + + { + Name: "optional, computed, empty string should not crash in CustomizeDiff", + Schema: map[string]*Schema{ + "unrelated_set": { + Type: TypeSet, + Optional: true, + Elem: &Schema{Type: TypeString}, + }, + "stream_enabled": { + Type: TypeBool, + Optional: true, + }, + "stream_view_type": { + Type: TypeString, + Optional: true, + Computed: true, + }, + }, + + State: &tofu.InstanceState{ + Attributes: map[string]string{ + "unrelated_set.#": "0", + "stream_enabled": "true", + "stream_view_type": "KEYS_ONLY", + }, + }, + Config: map[string]interface{}{ + "stream_enabled": false, + "stream_view_type": "", + }, + CustomizeDiff: func(diff *ResourceDiff, v interface{}) error { + v, ok := diff.GetOk("unrelated_set") + if ok { + return fmt.Errorf("Didn't expect unrelated_set: %#v", v) + } + return nil + }, + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "stream_enabled": { + Old: "true", + New: "false", + }, + }, + }, + }, + } + + for i, tc := range cases { + t.Run(fmt.Sprintf("%d-%s", i, tc.Name), func(t *testing.T) { + c := tofu.NewResourceConfigRaw(tc.Config) + + d, err := schemaMap(tc.Schema).Diff(tc.State, c, tc.CustomizeDiff, nil, true) + if err != nil != tc.Err { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(tc.Diff, d) { + t.Fatalf("expected:\n%#v\n\ngot:\n%#v", tc.Diff, d) + } + }) + } +} + +func TestSchemaMap_Input(t *testing.T) { + cases := map[string]struct { + Schema map[string]*Schema + Config map[string]interface{} + Input map[string]string + Result map[string]interface{} + Err bool + }{ + /* + * String decode + */ + + "no input on optional field with no config": { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + }, + }, + + Input: map[string]string{}, + Result: map[string]interface{}{}, + Err: false, + }, + + "input ignored when config has a value": { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + }, + }, + + Config: map[string]interface{}{ + "availability_zone": "bar", + }, + + Input: map[string]string{ + "availability_zone": "foo", + }, + + Result: map[string]interface{}{}, + + Err: false, + }, + + "input ignored when schema has a default": { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Default: "foo", + Optional: true, + }, + }, + + Input: map[string]string{ + "availability_zone": "bar", + }, + + Result: map[string]interface{}{}, + + Err: false, + }, + + "input ignored when default function returns a value": { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + DefaultFunc: func() (interface{}, error) { + return "foo", nil + }, + Optional: true, + }, + }, + + Input: map[string]string{ + "availability_zone": "bar", + }, + + Result: map[string]interface{}{}, + + Err: false, + }, + + "input ignored when default function returns an empty string": { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Default: "", + Optional: true, + }, + }, + + Input: map[string]string{ + "availability_zone": "bar", + }, + + Result: map[string]interface{}{}, + + Err: false, + }, + + "input used when default function returns nil": { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + DefaultFunc: func() (interface{}, error) { + return nil, nil + }, + Required: true, + }, + }, + + Input: map[string]string{ + "availability_zone": "bar", + }, + + Result: map[string]interface{}{ + "availability_zone": "bar", + }, + + Err: false, + }, + + "input not used when optional default function returns nil": { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + DefaultFunc: func() (interface{}, error) { + return nil, nil + }, + Optional: true, + }, + }, + + Input: map[string]string{}, + Result: map[string]interface{}{}, + Err: false, + }, + } + + for i, tc := range cases { + if tc.Config == nil { + tc.Config = make(map[string]interface{}) + } + + input := new(tofu.MockUIInput) + input.InputReturnMap = tc.Input + + rc := tofu.NewResourceConfigRaw(tc.Config) + rc.Config = make(map[string]interface{}) + + actual, err := schemaMap(tc.Schema).Input(input, rc) + if err != nil != tc.Err { + t.Fatalf("#%v err: %s", i, err) + } + + if !reflect.DeepEqual(tc.Result, actual.Config) { + t.Fatalf("#%v: bad:\n\ngot: %#v\nexpected: %#v", i, actual.Config, tc.Result) + } + } +} + +func TestSchemaMap_InputDefault(t *testing.T) { + emptyConfig := make(map[string]interface{}) + rc := tofu.NewResourceConfigRaw(emptyConfig) + rc.Config = make(map[string]interface{}) + + input := new(tofu.MockUIInput) + input.InputFn = func(opts *tofu.InputOpts) (string, error) { + t.Fatalf("InputFn should not be called on: %#v", opts) + return "", nil + } + + schema := map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Default: "foo", + Optional: true, + }, + } + actual, err := schemaMap(schema).Input(input, rc) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := map[string]interface{}{} + + if !reflect.DeepEqual(expected, actual.Config) { + t.Fatalf("got: %#v\nexpected: %#v", actual.Config, expected) + } +} + +func TestSchemaMap_InputDeprecated(t *testing.T) { + emptyConfig := make(map[string]interface{}) + rc := tofu.NewResourceConfigRaw(emptyConfig) + rc.Config = make(map[string]interface{}) + + input := new(tofu.MockUIInput) + input.InputFn = func(opts *tofu.InputOpts) (string, error) { + t.Fatalf("InputFn should not be called on: %#v", opts) + return "", nil + } + + schema := map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Deprecated: "long gone", + Optional: true, + }, + } + actual, err := schemaMap(schema).Input(input, rc) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := map[string]interface{}{} + + if !reflect.DeepEqual(expected, actual.Config) { + t.Fatalf("got: %#v\nexpected: %#v", actual.Config, expected) + } +} + +func TestSchemaMap_InternalValidate(t *testing.T) { + cases := map[string]struct { + In map[string]*Schema + Err bool + }{ + "nothing": { + nil, + false, + }, + + "Both optional and required": { + map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + Required: true, + }, + }, + true, + }, + + "No optional and no required": { + map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + }, + }, + true, + }, + + "Missing Type": { + map[string]*Schema{ + "foo": &Schema{ + Required: true, + }, + }, + true, + }, + + "Required but computed": { + map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Required: true, + Computed: true, + }, + }, + true, + }, + + "Looks good": { + map[string]*Schema{ + "foo": &Schema{ + Type: TypeString, + Required: true, + }, + }, + false, + }, + + "Computed but has default": { + map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + Computed: true, + Default: "foo", + }, + }, + true, + }, + + "Required but has default": { + map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + Required: true, + Default: "foo", + }, + }, + true, + }, + + "List element not set": { + map[string]*Schema{ + "foo": &Schema{ + Type: TypeList, + }, + }, + true, + }, + + "List default": { + map[string]*Schema{ + "foo": &Schema{ + Type: TypeList, + Elem: &Schema{Type: TypeInt}, + Default: "foo", + }, + }, + true, + }, + + "List element computed": { + map[string]*Schema{ + "foo": &Schema{ + Type: TypeList, + Optional: true, + Elem: &Schema{ + Type: TypeInt, + Computed: true, + }, + }, + }, + true, + }, + + "List element with Set set": { + map[string]*Schema{ + "foo": &Schema{ + Type: TypeList, + Elem: &Schema{Type: TypeInt}, + Set: func(interface{}) int { return 0 }, + Optional: true, + }, + }, + true, + }, + + "Set element with no Set set": { + map[string]*Schema{ + "foo": &Schema{ + Type: TypeSet, + Elem: &Schema{Type: TypeInt}, + Optional: true, + }, + }, + false, + }, + + "Required but computedWhen": { + map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Required: true, + ComputedWhen: []string{"foo"}, + }, + }, + true, + }, + + "Conflicting attributes cannot be required": { + map[string]*Schema{ + "a": &Schema{ + Type: TypeBool, + Required: true, + }, + "b": &Schema{ + Type: TypeBool, + Optional: true, + ConflictsWith: []string{"a"}, + }, + }, + true, + }, + + "Attribute with conflicts cannot be required": { + map[string]*Schema{ + "b": &Schema{ + Type: TypeBool, + Required: true, + ConflictsWith: []string{"a"}, + }, + }, + true, + }, + + "ConflictsWith cannot be used w/ ComputedWhen": { + map[string]*Schema{ + "a": &Schema{ + Type: TypeBool, + ComputedWhen: []string{"foor"}, + }, + "b": &Schema{ + Type: TypeBool, + Required: true, + ConflictsWith: []string{"a"}, + }, + }, + true, + }, + + "Sub-resource invalid": { + map[string]*Schema{ + "foo": &Schema{ + Type: TypeList, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "foo": new(Schema), + }, + }, + }, + }, + true, + }, + + "Sub-resource valid": { + map[string]*Schema{ + "foo": &Schema{ + Type: TypeList, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + }, + }, + }, + false, + }, + + "ValidateFunc on non-primitive": { + map[string]*Schema{ + "foo": &Schema{ + Type: TypeSet, + Required: true, + ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { + return + }, + }, + }, + true, + }, + + "computed-only field with validateFunc": { + map[string]*Schema{ + "string": &Schema{ + Type: TypeString, + Computed: true, + ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { + es = append(es, fmt.Errorf("this is not fine")) + return + }, + }, + }, + true, + }, + + "computed-only field with diffSuppressFunc": { + map[string]*Schema{ + "string": &Schema{ + Type: TypeString, + Computed: true, + DiffSuppressFunc: func(k, old, new string, d *ResourceData) bool { + // Always suppress any diff + return false + }, + }, + }, + true, + }, + + "invalid field name format #1": { + map[string]*Schema{ + "with space": &Schema{ + Type: TypeString, + Optional: true, + }, + }, + true, + }, + + "invalid field name format #2": { + map[string]*Schema{ + "WithCapitals": &Schema{ + Type: TypeString, + Optional: true, + }, + }, + true, + }, + + "invalid field name format of a Deprecated field": { + map[string]*Schema{ + "WithCapitals": &Schema{ + Type: TypeString, + Optional: true, + Deprecated: "Use with_underscores instead", + }, + }, + false, + }, + + "invalid field name format of a Removed field": { + map[string]*Schema{ + "WithCapitals": &Schema{ + Type: TypeString, + Optional: true, + Removed: "Use with_underscores instead", + }, + }, + false, + }, + + "ConfigModeBlock with Elem *Resource": { + map[string]*Schema{ + "block": &Schema{ + Type: TypeList, + ConfigMode: SchemaConfigModeBlock, + Optional: true, + Elem: &Resource{}, + }, + }, + false, + }, + + "ConfigModeBlock Computed with Elem *Resource": { + map[string]*Schema{ + "block": &Schema{ + Type: TypeList, + ConfigMode: SchemaConfigModeBlock, + Computed: true, + Elem: &Resource{}, + }, + }, + true, // ConfigMode of block cannot be used for computed schema + }, + + "ConfigModeBlock with Elem *Schema": { + map[string]*Schema{ + "block": &Schema{ + Type: TypeList, + ConfigMode: SchemaConfigModeBlock, + Optional: true, + Elem: &Schema{ + Type: TypeString, + }, + }, + }, + true, + }, + + "ConfigModeBlock with no Elem": { + map[string]*Schema{ + "block": &Schema{ + Type: TypeString, + ConfigMode: SchemaConfigModeBlock, + Optional: true, + }, + }, + true, + }, + + "ConfigModeBlock inside ConfigModeAttr": { + map[string]*Schema{ + "block": &Schema{ + Type: TypeList, + ConfigMode: SchemaConfigModeAttr, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "sub": &Schema{ + Type: TypeList, + ConfigMode: SchemaConfigModeBlock, + Elem: &Resource{}, + }, + }, + }, + }, + }, + true, // ConfigMode of block cannot be used in child of schema with ConfigMode of attribute + }, + + "ConfigModeAuto with *Resource inside ConfigModeAttr": { + map[string]*Schema{ + "block": &Schema{ + Type: TypeList, + ConfigMode: SchemaConfigModeAttr, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "sub": &Schema{ + Type: TypeList, + Elem: &Resource{}, + }, + }, + }, + }, + }, + true, // in *schema.Resource with ConfigMode of attribute, so must also have ConfigMode of attribute + }, + } + + for tn, tc := range cases { + t.Run(tn, func(t *testing.T) { + err := schemaMap(tc.In).InternalValidate(nil) + if err != nil != tc.Err { + if tc.Err { + t.Fatalf("%q: Expected error did not occur:\n\n%#v", tn, tc.In) + } + t.Fatalf("%q: Unexpected error occurred: %s\n\n%#v", tn, err, tc.In) + } + }) + } + +} + +func TestSchemaMap_DiffSuppress(t *testing.T) { + cases := map[string]struct { + Schema map[string]*Schema + State *tofu.InstanceState + Config map[string]interface{} + ExpectedDiff *tofu.InstanceDiff + Err bool + }{ + "#0 - Suppress otherwise valid diff by returning true": { + Schema: map[string]*Schema{ + "availability_zone": { + Type: TypeString, + Optional: true, + DiffSuppressFunc: func(k, old, new string, d *ResourceData) bool { + // Always suppress any diff + return true + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "availability_zone": "foo", + }, + + ExpectedDiff: nil, + + Err: false, + }, + + "#1 - Don't suppress diff by returning false": { + Schema: map[string]*Schema{ + "availability_zone": { + Type: TypeString, + Optional: true, + DiffSuppressFunc: func(k, old, new string, d *ResourceData) bool { + // Always suppress any diff + return false + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "availability_zone": "foo", + }, + + ExpectedDiff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": { + Old: "", + New: "foo", + }, + }, + }, + + Err: false, + }, + + "Default with suppress makes no diff": { + Schema: map[string]*Schema{ + "availability_zone": { + Type: TypeString, + Optional: true, + Default: "foo", + DiffSuppressFunc: func(k, old, new string, d *ResourceData) bool { + return true + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{}, + + ExpectedDiff: nil, + + Err: false, + }, + + "Default with false suppress makes diff": { + Schema: map[string]*Schema{ + "availability_zone": { + Type: TypeString, + Optional: true, + Default: "foo", + DiffSuppressFunc: func(k, old, new string, d *ResourceData) bool { + return false + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{}, + + ExpectedDiff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": { + Old: "", + New: "foo", + }, + }, + }, + + Err: false, + }, + + "Complex structure with set of computed string should mark root set as computed": { + Schema: map[string]*Schema{ + "outer": &Schema{ + Type: TypeSet, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "outer_str": &Schema{ + Type: TypeString, + Optional: true, + }, + "inner": &Schema{ + Type: TypeSet, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "inner_str": &Schema{ + Type: TypeString, + Optional: true, + }, + }, + }, + Set: func(v interface{}) int { + return 2 + }, + }, + }, + }, + Set: func(v interface{}) int { + return 1 + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "outer": []interface{}{ + map[string]interface{}{ + "outer_str": "foo", + "inner": []interface{}{ + map[string]interface{}{ + "inner_str": hcl2shim.UnknownVariableValue, + }, + }, + }, + }, + }, + + ExpectedDiff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "outer.#": &tofu.ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "outer.~1.outer_str": &tofu.ResourceAttrDiff{ + Old: "", + New: "foo", + }, + "outer.~1.inner.#": &tofu.ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "outer.~1.inner.~2.inner_str": &tofu.ResourceAttrDiff{ + Old: "", + New: hcl2shim.UnknownVariableValue, + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + "Complex structure with complex list of computed string should mark root set as computed": { + Schema: map[string]*Schema{ + "outer": &Schema{ + Type: TypeSet, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "outer_str": &Schema{ + Type: TypeString, + Optional: true, + }, + "inner": &Schema{ + Type: TypeList, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "inner_str": &Schema{ + Type: TypeString, + Optional: true, + }, + }, + }, + }, + }, + }, + Set: func(v interface{}) int { + return 1 + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "outer": []interface{}{ + map[string]interface{}{ + "outer_str": "foo", + "inner": []interface{}{ + map[string]interface{}{ + "inner_str": hcl2shim.UnknownVariableValue, + }, + }, + }, + }, + }, + + ExpectedDiff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "outer.#": &tofu.ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "outer.~1.outer_str": &tofu.ResourceAttrDiff{ + Old: "", + New: "foo", + }, + "outer.~1.inner.#": &tofu.ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "outer.~1.inner.0.inner_str": &tofu.ResourceAttrDiff{ + Old: "", + New: hcl2shim.UnknownVariableValue, + NewComputed: true, + }, + }, + }, + + Err: false, + }, + } + + for tn, tc := range cases { + t.Run(tn, func(t *testing.T) { + c := tofu.NewResourceConfigRaw(tc.Config) + + d, err := schemaMap(tc.Schema).Diff(tc.State, c, nil, nil, true) + if err != nil != tc.Err { + t.Fatalf("#%q err: %s", tn, err) + } + + if !reflect.DeepEqual(tc.ExpectedDiff, d) { + t.Fatalf("#%q:\n\nexpected:\n%#v\n\ngot:\n%#v", tn, tc.ExpectedDiff, d) + } + }) + } +} + +func TestSchemaMap_Validate(t *testing.T) { + cases := map[string]struct { + Schema map[string]*Schema + Config map[string]interface{} + Err bool + Errors []error + Warnings []string + }{ + "Good": { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + Config: map[string]interface{}{ + "availability_zone": "foo", + }, + }, + + "Good, because the var is not set and that error will come elsewhere": { + Schema: map[string]*Schema{ + "size": &Schema{ + Type: TypeInt, + Required: true, + }, + }, + + Config: map[string]interface{}{ + "size": hcl2shim.UnknownVariableValue, + }, + }, + + "Required field not set": { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Required: true, + }, + }, + + Config: map[string]interface{}{}, + + Err: true, + }, + + "Invalid basic type": { + Schema: map[string]*Schema{ + "port": &Schema{ + Type: TypeInt, + Required: true, + }, + }, + + Config: map[string]interface{}{ + "port": "I am invalid", + }, + + Err: true, + }, + + "Invalid complex type": { + Schema: map[string]*Schema{ + "user_data": &Schema{ + Type: TypeString, + Optional: true, + }, + }, + + Config: map[string]interface{}{ + "user_data": []interface{}{ + map[string]interface{}{ + "foo": "bar", + }, + }, + }, + + Err: true, + }, + + "Bad type": { + Schema: map[string]*Schema{ + "size": &Schema{ + Type: TypeInt, + Required: true, + }, + }, + + Config: map[string]interface{}{ + "size": "nope", + }, + + Err: true, + }, + + "Required but has DefaultFunc": { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Required: true, + DefaultFunc: func() (interface{}, error) { + return "foo", nil + }, + }, + }, + + Config: nil, + }, + + "Required but has DefaultFunc return nil": { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Required: true, + DefaultFunc: func() (interface{}, error) { + return nil, nil + }, + }, + }, + + Config: nil, + + Err: true, + }, + + "List with promotion": { + Schema: map[string]*Schema{ + "ingress": &Schema{ + Type: TypeList, + Elem: &Schema{Type: TypeInt}, + PromoteSingle: true, + Optional: true, + }, + }, + + Config: map[string]interface{}{ + "ingress": "5", + }, + + Err: false, + }, + + "List with promotion set as list": { + Schema: map[string]*Schema{ + "ingress": &Schema{ + Type: TypeList, + Elem: &Schema{Type: TypeInt}, + PromoteSingle: true, + Optional: true, + }, + }, + + Config: map[string]interface{}{ + "ingress": []interface{}{"5"}, + }, + + Err: false, + }, + + "Optional sub-resource": { + Schema: map[string]*Schema{ + "ingress": &Schema{ + Type: TypeList, + Elem: &Resource{ + Schema: map[string]*Schema{ + "from": &Schema{ + Type: TypeInt, + Required: true, + }, + }, + }, + }, + }, + + Config: map[string]interface{}{}, + + Err: false, + }, + + "Sub-resource is the wrong type": { + Schema: map[string]*Schema{ + "ingress": &Schema{ + Type: TypeList, + Required: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "from": &Schema{ + Type: TypeInt, + Required: true, + }, + }, + }, + }, + }, + + Config: map[string]interface{}{ + "ingress": []interface{}{"foo"}, + }, + + Err: true, + }, + + "Not a list nested block": { + Schema: map[string]*Schema{ + "ingress": &Schema{ + Type: TypeList, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "from": &Schema{ + Type: TypeInt, + Required: true, + }, + }, + }, + }, + }, + + Config: map[string]interface{}{ + "ingress": "foo", + }, + + Err: true, + Errors: []error{ + fmt.Errorf(`ingress: should be a list`), + }, + }, + + "Not a list primitive": { + Schema: map[string]*Schema{ + "strings": &Schema{ + Type: TypeList, + Optional: true, + Elem: &Schema{ + Type: TypeString, + }, + }, + }, + + Config: map[string]interface{}{ + "strings": "foo", + }, + + Err: true, + Errors: []error{ + fmt.Errorf(`strings: should be a list`), + }, + }, + + "Unknown list": { + Schema: map[string]*Schema{ + "strings": &Schema{ + Type: TypeList, + Optional: true, + Elem: &Schema{ + Type: TypeString, + }, + }, + }, + + Config: map[string]interface{}{ + "strings": hcl2shim.UnknownVariableValue, + }, + + Err: false, + }, + + "Unknown + Deprecation": { + Schema: map[string]*Schema{ + "old_news": &Schema{ + Type: TypeString, + Optional: true, + Deprecated: "please use 'new_news' instead", + }, + }, + + Config: map[string]interface{}{ + "old_news": hcl2shim.UnknownVariableValue, + }, + + Warnings: []string{ + "\"old_news\": [DEPRECATED] please use 'new_news' instead", + }, + }, + + "Required sub-resource field": { + Schema: map[string]*Schema{ + "ingress": &Schema{ + Type: TypeList, + Elem: &Resource{ + Schema: map[string]*Schema{ + "from": &Schema{ + Type: TypeInt, + Required: true, + }, + }, + }, + }, + }, + + Config: map[string]interface{}{ + "ingress": []interface{}{ + map[string]interface{}{}, + }, + }, + + Err: true, + }, + + "Good sub-resource": { + Schema: map[string]*Schema{ + "ingress": &Schema{ + Type: TypeList, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "from": &Schema{ + Type: TypeInt, + Required: true, + }, + }, + }, + }, + }, + + Config: map[string]interface{}{ + "ingress": []interface{}{ + map[string]interface{}{ + "from": 80, + }, + }, + }, + + Err: false, + }, + + "Good sub-resource, computed value": { + Schema: map[string]*Schema{ + "ingress": &Schema{ + Type: TypeList, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "from": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + }, + }, + }, + + Config: map[string]interface{}{ + "ingress": []interface{}{ + map[string]interface{}{ + "from": hcl2shim.UnknownVariableValue, + }, + }, + }, + + Err: false, + }, + + "Invalid/unknown field": { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + Config: map[string]interface{}{ + "foo": "bar", + }, + + Err: true, + }, + + "Invalid/unknown field with computed value": { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + Config: map[string]interface{}{ + "foo": hcl2shim.UnknownVariableValue, + }, + + Err: true, + }, + + "Computed field set": { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Computed: true, + }, + }, + + Config: map[string]interface{}{ + "availability_zone": "bar", + }, + + Err: true, + }, + + "Not a set": { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Required: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + Config: map[string]interface{}{ + "ports": "foo", + }, + + Err: true, + }, + + "Maps": { + Schema: map[string]*Schema{ + "user_data": &Schema{ + Type: TypeMap, + Optional: true, + }, + }, + + Config: map[string]interface{}{ + "user_data": "foo", + }, + + Err: true, + }, + + "Good map: data surrounded by extra slice": { + Schema: map[string]*Schema{ + "user_data": &Schema{ + Type: TypeMap, + Optional: true, + }, + }, + + Config: map[string]interface{}{ + "user_data": []interface{}{ + map[string]interface{}{ + "foo": "bar", + }, + }, + }, + }, + + "Good map": { + Schema: map[string]*Schema{ + "user_data": &Schema{ + Type: TypeMap, + Optional: true, + }, + }, + + Config: map[string]interface{}{ + "user_data": map[string]interface{}{ + "foo": "bar", + }, + }, + }, + + "Map with type specified as value type": { + Schema: map[string]*Schema{ + "user_data": &Schema{ + Type: TypeMap, + Optional: true, + Elem: TypeBool, + }, + }, + + Config: map[string]interface{}{ + "user_data": map[string]interface{}{ + "foo": "not_a_bool", + }, + }, + + Err: true, + }, + + "Map with type specified as nested Schema": { + Schema: map[string]*Schema{ + "user_data": &Schema{ + Type: TypeMap, + Optional: true, + Elem: &Schema{Type: TypeBool}, + }, + }, + + Config: map[string]interface{}{ + "user_data": map[string]interface{}{ + "foo": "not_a_bool", + }, + }, + + Err: true, + }, + + "Bad map: just a slice": { + Schema: map[string]*Schema{ + "user_data": &Schema{ + Type: TypeMap, + Optional: true, + }, + }, + + Config: map[string]interface{}{ + "user_data": []interface{}{ + "foo", + }, + }, + + Err: true, + }, + + "Good set: config has slice with single interpolated value": { + Schema: map[string]*Schema{ + "security_groups": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + ForceNew: true, + Elem: &Schema{Type: TypeString}, + Set: func(v interface{}) int { + return len(v.(string)) + }, + }, + }, + + Config: map[string]interface{}{ + "security_groups": []interface{}{"${var.foo}"}, + }, + + Err: false, + }, + + "Bad set: config has single interpolated value": { + Schema: map[string]*Schema{ + "security_groups": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + ForceNew: true, + Elem: &Schema{Type: TypeString}, + }, + }, + + Config: map[string]interface{}{ + "security_groups": "${var.foo}", + }, + + Err: true, + }, + + "Bad, subresource should not allow unknown elements": { + Schema: map[string]*Schema{ + "ingress": &Schema{ + Type: TypeList, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "port": &Schema{ + Type: TypeInt, + Required: true, + }, + }, + }, + }, + }, + + Config: map[string]interface{}{ + "ingress": []interface{}{ + map[string]interface{}{ + "port": 80, + "other": "yes", + }, + }, + }, + + Err: true, + }, + + "Bad, subresource should not allow invalid types": { + Schema: map[string]*Schema{ + "ingress": &Schema{ + Type: TypeList, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "port": &Schema{ + Type: TypeInt, + Required: true, + }, + }, + }, + }, + }, + + Config: map[string]interface{}{ + "ingress": []interface{}{ + map[string]interface{}{ + "port": "bad", + }, + }, + }, + + Err: true, + }, + + "Bad, should not allow lists to be assigned to string attributes": { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Required: true, + }, + }, + + Config: map[string]interface{}{ + "availability_zone": []interface{}{"foo", "bar", "baz"}, + }, + + Err: true, + }, + + "Bad, should not allow maps to be assigned to string attributes": { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Required: true, + }, + }, + + Config: map[string]interface{}{ + "availability_zone": map[string]interface{}{"foo": "bar", "baz": "thing"}, + }, + + Err: true, + }, + + "Deprecated attribute usage generates warning, but not error": { + Schema: map[string]*Schema{ + "old_news": &Schema{ + Type: TypeString, + Optional: true, + Deprecated: "please use 'new_news' instead", + }, + }, + + Config: map[string]interface{}{ + "old_news": "extra extra!", + }, + + Err: false, + + Warnings: []string{ + "\"old_news\": [DEPRECATED] please use 'new_news' instead", + }, + }, + + "Deprecated generates no warnings if attr not used": { + Schema: map[string]*Schema{ + "old_news": &Schema{ + Type: TypeString, + Optional: true, + Deprecated: "please use 'new_news' instead", + }, + }, + + Err: false, + + Warnings: nil, + }, + + "Removed attribute usage generates error": { + Schema: map[string]*Schema{ + "long_gone": &Schema{ + Type: TypeString, + Optional: true, + Removed: "no longer supported by Cloud API", + }, + }, + + Config: map[string]interface{}{ + "long_gone": "still here!", + }, + + Err: true, + Errors: []error{ + fmt.Errorf("\"long_gone\": [REMOVED] no longer supported by Cloud API"), + }, + }, + + "Removed generates no errors if attr not used": { + Schema: map[string]*Schema{ + "long_gone": &Schema{ + Type: TypeString, + Optional: true, + Removed: "no longer supported by Cloud API", + }, + }, + + Err: false, + }, + + "Conflicting attributes generate error": { + Schema: map[string]*Schema{ + "b": &Schema{ + Type: TypeString, + Optional: true, + }, + "a": &Schema{ + Type: TypeString, + Optional: true, + ConflictsWith: []string{"b"}, + }, + }, + + Config: map[string]interface{}{ + "b": "b-val", + "a": "a-val", + }, + + Err: true, + Errors: []error{ + fmt.Errorf("\"a\": conflicts with b"), + }, + }, + + "Conflicting attributes okay when unknown 1": { + Schema: map[string]*Schema{ + "b": &Schema{ + Type: TypeString, + Optional: true, + }, + "a": &Schema{ + Type: TypeString, + Optional: true, + ConflictsWith: []string{"b"}, + }, + }, + + Config: map[string]interface{}{ + "b": "b-val", + "a": hcl2shim.UnknownVariableValue, + }, + + Err: false, + }, + + "Conflicting attributes okay when unknown 2": { + Schema: map[string]*Schema{ + "b": &Schema{ + Type: TypeString, + Optional: true, + }, + "a": &Schema{ + Type: TypeString, + Optional: true, + ConflictsWith: []string{"b"}, + }, + }, + + Config: map[string]interface{}{ + "b": hcl2shim.UnknownVariableValue, + "a": "a-val", + }, + + Err: false, + }, + + "Conflicting attributes generate error even if one is unknown": { + Schema: map[string]*Schema{ + "b": &Schema{ + Type: TypeString, + Optional: true, + ConflictsWith: []string{"a", "c"}, + }, + "a": &Schema{ + Type: TypeString, + Optional: true, + ConflictsWith: []string{"b", "c"}, + }, + "c": &Schema{ + Type: TypeString, + Optional: true, + ConflictsWith: []string{"b", "a"}, + }, + }, + + Config: map[string]interface{}{ + "b": hcl2shim.UnknownVariableValue, + "a": "a-val", + "c": "c-val", + }, + + Err: true, + Errors: []error{ + fmt.Errorf("\"a\": conflicts with c"), + fmt.Errorf("\"c\": conflicts with a"), + }, + }, + + "Required attribute & undefined conflicting optional are good": { + Schema: map[string]*Schema{ + "required_att": &Schema{ + Type: TypeString, + Required: true, + }, + "optional_att": &Schema{ + Type: TypeString, + Optional: true, + ConflictsWith: []string{"required_att"}, + }, + }, + + Config: map[string]interface{}{ + "required_att": "required-val", + }, + + Err: false, + }, + + "Required conflicting attribute & defined optional generate error": { + Schema: map[string]*Schema{ + "required_att": &Schema{ + Type: TypeString, + Required: true, + }, + "optional_att": &Schema{ + Type: TypeString, + Optional: true, + ConflictsWith: []string{"required_att"}, + }, + }, + + Config: map[string]interface{}{ + "required_att": "required-val", + "optional_att": "optional-val", + }, + + Err: true, + Errors: []error{ + fmt.Errorf(`"optional_att": conflicts with required_att`), + }, + }, + + "Computed + Optional fields conflicting with each other": { + Schema: map[string]*Schema{ + "foo_att": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"bar_att"}, + }, + "bar_att": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"foo_att"}, + }, + }, + + Config: map[string]interface{}{ + "foo_att": "foo-val", + "bar_att": "bar-val", + }, + + Err: true, + Errors: []error{ + fmt.Errorf(`"foo_att": conflicts with bar_att`), + fmt.Errorf(`"bar_att": conflicts with foo_att`), + }, + }, + + "Computed + Optional fields NOT conflicting with each other": { + Schema: map[string]*Schema{ + "foo_att": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"bar_att"}, + }, + "bar_att": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"foo_att"}, + }, + }, + + Config: map[string]interface{}{ + "foo_att": "foo-val", + }, + + Err: false, + }, + + "Computed + Optional fields that conflict with none set": { + Schema: map[string]*Schema{ + "foo_att": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"bar_att"}, + }, + "bar_att": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"foo_att"}, + }, + }, + + Config: map[string]interface{}{}, + + Err: false, + }, + + "Good with ValidateFunc": { + Schema: map[string]*Schema{ + "validate_me": &Schema{ + Type: TypeString, + Required: true, + ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { + return + }, + }, + }, + Config: map[string]interface{}{ + "validate_me": "valid", + }, + Err: false, + }, + + "Bad with ValidateFunc": { + Schema: map[string]*Schema{ + "validate_me": &Schema{ + Type: TypeString, + Required: true, + ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { + es = append(es, fmt.Errorf("something is not right here")) + return + }, + }, + }, + Config: map[string]interface{}{ + "validate_me": "invalid", + }, + Err: true, + Errors: []error{ + fmt.Errorf(`something is not right here`), + }, + }, + + "ValidateFunc not called when type does not match": { + Schema: map[string]*Schema{ + "number": &Schema{ + Type: TypeInt, + Required: true, + ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { + t.Fatalf("Should not have gotten validate call") + return + }, + }, + }, + Config: map[string]interface{}{ + "number": "NaN", + }, + Err: true, + }, + + "ValidateFunc gets decoded type": { + Schema: map[string]*Schema{ + "maybe": &Schema{ + Type: TypeBool, + Required: true, + ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { + if _, ok := v.(bool); !ok { + t.Fatalf("Expected bool, got: %#v", v) + } + return + }, + }, + }, + Config: map[string]interface{}{ + "maybe": "true", + }, + }, + + "ValidateFunc is not called with a computed value": { + Schema: map[string]*Schema{ + "validate_me": &Schema{ + Type: TypeString, + Required: true, + ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { + es = append(es, fmt.Errorf("something is not right here")) + return + }, + }, + }, + Config: map[string]interface{}{ + "validate_me": hcl2shim.UnknownVariableValue, + }, + + Err: false, + }, + + "special timeouts field": { + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + Config: map[string]interface{}{ + TimeoutsConfigKey: "bar", + }, + + Err: false, + }, + + "invalid bool field": { + Schema: map[string]*Schema{ + "bool_field": { + Type: TypeBool, + Optional: true, + }, + }, + Config: map[string]interface{}{ + "bool_field": "abcdef", + }, + Err: true, + }, + "invalid integer field": { + Schema: map[string]*Schema{ + "integer_field": { + Type: TypeInt, + Optional: true, + }, + }, + Config: map[string]interface{}{ + "integer_field": "abcdef", + }, + Err: true, + }, + "invalid float field": { + Schema: map[string]*Schema{ + "float_field": { + Type: TypeFloat, + Optional: true, + }, + }, + Config: map[string]interface{}{ + "float_field": "abcdef", + }, + Err: true, + }, + + // Invalid map values + "invalid bool map value": { + Schema: map[string]*Schema{ + "boolMap": &Schema{ + Type: TypeMap, + Elem: TypeBool, + Optional: true, + }, + }, + Config: map[string]interface{}{ + "boolMap": map[string]interface{}{ + "boolField": "notbool", + }, + }, + Err: true, + }, + "invalid int map value": { + Schema: map[string]*Schema{ + "intMap": &Schema{ + Type: TypeMap, + Elem: TypeInt, + Optional: true, + }, + }, + Config: map[string]interface{}{ + "intMap": map[string]interface{}{ + "intField": "notInt", + }, + }, + Err: true, + }, + "invalid float map value": { + Schema: map[string]*Schema{ + "floatMap": &Schema{ + Type: TypeMap, + Elem: TypeFloat, + Optional: true, + }, + }, + Config: map[string]interface{}{ + "floatMap": map[string]interface{}{ + "floatField": "notFloat", + }, + }, + Err: true, + }, + + "map with positive validate function": { + Schema: map[string]*Schema{ + "floatInt": &Schema{ + Type: TypeMap, + Elem: TypeInt, + Optional: true, + ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { + return + }, + }, + }, + Config: map[string]interface{}{ + "floatInt": map[string]interface{}{ + "rightAnswer": "42", + "tooMuch": "43", + }, + }, + Err: false, + }, + "map with negative validate function": { + Schema: map[string]*Schema{ + "floatInt": &Schema{ + Type: TypeMap, + Elem: TypeInt, + Optional: true, + ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { + es = append(es, fmt.Errorf("this is not fine")) + return + }, + }, + }, + Config: map[string]interface{}{ + "floatInt": map[string]interface{}{ + "rightAnswer": "42", + "tooMuch": "43", + }, + }, + Err: true, + }, + + // The Validation function should not see interpolation strings from + // non-computed values. + "set with partially computed list and map": { + Schema: map[string]*Schema{ + "outer": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "list": &Schema{ + Type: TypeList, + Optional: true, + Elem: &Schema{ + Type: TypeString, + ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { + if strings.HasPrefix(v.(string), "${") { + es = append(es, fmt.Errorf("should not have interpolations")) + } + return + }, + }, + }, + }, + }, + }, + }, + Config: map[string]interface{}{ + "outer": []interface{}{ + map[string]interface{}{ + "list": []interface{}{"A", hcl2shim.UnknownVariableValue, "c"}, + }, + }, + }, + Err: false, + }, + "unexpected nils values": { + Schema: map[string]*Schema{ + "strings": &Schema{ + Type: TypeList, + Optional: true, + Elem: &Schema{ + Type: TypeString, + }, + }, + "block": &Schema{ + Type: TypeList, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "int": &Schema{ + Type: TypeInt, + Required: true, + }, + }, + }, + }, + }, + + Config: map[string]interface{}{ + "strings": []interface{}{"1", nil}, + "block": []interface{}{map[string]interface{}{ + "int": nil, + }, + nil, + }, + }, + Err: true, + }, + } + + for tn, tc := range cases { + t.Run(tn, func(t *testing.T) { + c := tofu.NewResourceConfigRaw(tc.Config) + + ws, es := schemaMap(tc.Schema).Validate(c) + if len(es) > 0 != tc.Err { + if len(es) == 0 { + t.Errorf("%q: no errors", tn) + } + + for _, e := range es { + t.Errorf("%q: err: %s", tn, e) + } + + t.FailNow() + } + + if !reflect.DeepEqual(ws, tc.Warnings) { + t.Fatalf("%q: warnings:\n\nexpected: %#v\ngot:%#v", tn, tc.Warnings, ws) + } + + if tc.Errors != nil { + sort.Sort(errorSort(es)) + sort.Sort(errorSort(tc.Errors)) + + if !reflect.DeepEqual(es, tc.Errors) { + t.Fatalf("%q: errors:\n\nexpected: %q\ngot: %q", tn, tc.Errors, es) + } + } + }) + + } +} + +func TestSchemaSet_ValidateMaxItems(t *testing.T) { + cases := map[string]struct { + Schema map[string]*Schema + State *tofu.InstanceState + Config map[string]interface{} + ConfigVariables map[string]string + Diff *tofu.InstanceDiff + Err bool + Errors []error + }{ + "#0": { + Schema: map[string]*Schema{ + "aliases": &Schema{ + Type: TypeSet, + Optional: true, + MaxItems: 1, + Elem: &Schema{Type: TypeString}, + }, + }, + State: nil, + Config: map[string]interface{}{ + "aliases": []interface{}{"foo", "bar"}, + }, + Diff: nil, + Err: true, + Errors: []error{ + fmt.Errorf("aliases: attribute supports 1 item maximum, config has 2 declared"), + }, + }, + "#1": { + Schema: map[string]*Schema{ + "aliases": &Schema{ + Type: TypeSet, + Optional: true, + Elem: &Schema{Type: TypeString}, + }, + }, + State: nil, + Config: map[string]interface{}{ + "aliases": []interface{}{"foo", "bar"}, + }, + Diff: nil, + Err: false, + Errors: nil, + }, + "#2": { + Schema: map[string]*Schema{ + "aliases": &Schema{ + Type: TypeSet, + Optional: true, + MaxItems: 1, + Elem: &Schema{Type: TypeString}, + }, + }, + State: nil, + Config: map[string]interface{}{ + "aliases": []interface{}{"foo"}, + }, + Diff: nil, + Err: false, + Errors: nil, + }, + } + + for tn, tc := range cases { + c := tofu.NewResourceConfigRaw(tc.Config) + _, es := schemaMap(tc.Schema).Validate(c) + + if len(es) > 0 != tc.Err { + if len(es) == 0 { + t.Errorf("%q: no errors", tn) + } + + for _, e := range es { + t.Errorf("%q: err: %s", tn, e) + } + + t.FailNow() + } + + if tc.Errors != nil { + if !reflect.DeepEqual(es, tc.Errors) { + t.Fatalf("%q: expected: %q\ngot: %q", tn, tc.Errors, es) + } + } + } +} + +func TestSchemaSet_ValidateMinItems(t *testing.T) { + cases := map[string]struct { + Schema map[string]*Schema + State *tofu.InstanceState + Config map[string]interface{} + ConfigVariables map[string]string + Diff *tofu.InstanceDiff + Err bool + Errors []error + }{ + "#0": { + Schema: map[string]*Schema{ + "aliases": &Schema{ + Type: TypeSet, + Optional: true, + MinItems: 2, + Elem: &Schema{Type: TypeString}, + }, + }, + State: nil, + Config: map[string]interface{}{ + "aliases": []interface{}{"foo", "bar"}, + }, + Diff: nil, + Err: false, + Errors: nil, + }, + "#1": { + Schema: map[string]*Schema{ + "aliases": &Schema{ + Type: TypeSet, + Optional: true, + Elem: &Schema{Type: TypeString}, + }, + }, + State: nil, + Config: map[string]interface{}{ + "aliases": []interface{}{"foo", "bar"}, + }, + Diff: nil, + Err: false, + Errors: nil, + }, + "#2": { + Schema: map[string]*Schema{ + "aliases": &Schema{ + Type: TypeSet, + Optional: true, + MinItems: 2, + Elem: &Schema{Type: TypeString}, + }, + }, + State: nil, + Config: map[string]interface{}{ + "aliases": []interface{}{"foo"}, + }, + Diff: nil, + Err: true, + Errors: []error{ + fmt.Errorf("aliases: attribute supports 2 item as a minimum, config has 1 declared"), + }, + }, + } + + for tn, tc := range cases { + c := tofu.NewResourceConfigRaw(tc.Config) + _, es := schemaMap(tc.Schema).Validate(c) + + if len(es) > 0 != tc.Err { + if len(es) == 0 { + t.Errorf("%q: no errors", tn) + } + + for _, e := range es { + t.Errorf("%q: err: %s", tn, e) + } + + t.FailNow() + } + + if tc.Errors != nil { + if !reflect.DeepEqual(es, tc.Errors) { + t.Fatalf("%q: expected: %q\ngot: %q", tn, tc.Errors, es) + } + } + } +} + +// errorSort implements sort.Interface to sort errors by their error message +type errorSort []error + +func (e errorSort) Len() int { return len(e) } +func (e errorSort) Swap(i, j int) { e[i], e[j] = e[j], e[i] } +func (e errorSort) Less(i, j int) bool { + return e[i].Error() < e[j].Error() +} + +func TestSchemaMapDeepCopy(t *testing.T) { + schema := map[string]*Schema{ + "foo": &Schema{ + Type: TypeString, + }, + } + source := schemaMap(schema) + dest := source.DeepCopy() + dest["foo"].ForceNew = true + if reflect.DeepEqual(source, dest) { + t.Fatalf("source and dest should not match") + } +} diff --git a/pkg/legacy/helper/schema/serialize.go b/pkg/legacy/helper/schema/serialize.go new file mode 100644 index 00000000000..5acb1c67e4f --- /dev/null +++ b/pkg/legacy/helper/schema/serialize.go @@ -0,0 +1,130 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "bytes" + "fmt" + "sort" + "strconv" +) + +func SerializeValueForHash(buf *bytes.Buffer, val interface{}, schema *Schema) { + if val == nil { + buf.WriteRune(';') + return + } + + switch schema.Type { + case TypeBool: + if val.(bool) { + buf.WriteRune('1') + } else { + buf.WriteRune('0') + } + case TypeInt: + buf.WriteString(strconv.Itoa(val.(int))) + case TypeFloat: + buf.WriteString(strconv.FormatFloat(val.(float64), 'g', -1, 64)) + case TypeString: + buf.WriteString(val.(string)) + case TypeList: + buf.WriteRune('(') + l := val.([]interface{}) + for _, innerVal := range l { + serializeCollectionMemberForHash(buf, innerVal, schema.Elem) + } + buf.WriteRune(')') + case TypeMap: + + m := val.(map[string]interface{}) + var keys []string + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + buf.WriteRune('[') + for _, k := range keys { + innerVal := m[k] + if innerVal == nil { + continue + } + buf.WriteString(k) + buf.WriteRune(':') + + switch innerVal := innerVal.(type) { + case int: + buf.WriteString(strconv.Itoa(innerVal)) + case float64: + buf.WriteString(strconv.FormatFloat(innerVal, 'g', -1, 64)) + case string: + buf.WriteString(innerVal) + default: + panic(fmt.Sprintf("unknown value type in TypeMap %T", innerVal)) + } + + buf.WriteRune(';') + } + buf.WriteRune(']') + case TypeSet: + buf.WriteRune('{') + s := val.(*Set) + for _, innerVal := range s.List() { + serializeCollectionMemberForHash(buf, innerVal, schema.Elem) + } + buf.WriteRune('}') + default: + panic("unknown schema type to serialize") + } + buf.WriteRune(';') +} + +// SerializeValueForHash appends a serialization of the given resource config +// to the given buffer, guaranteeing deterministic results given the same value +// and schema. +// +// Its primary purpose is as input into a hashing function in order +// to hash complex substructures when used in sets, and so the serialization +// is not reversible. +func SerializeResourceForHash(buf *bytes.Buffer, val interface{}, resource *Resource) { + if val == nil { + return + } + sm := resource.Schema + m := val.(map[string]interface{}) + var keys []string + for k := range sm { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + innerSchema := sm[k] + // Skip attributes that are not user-provided. Computed attributes + // do not contribute to the hash since their ultimate value cannot + // be known at plan/diff time. + if !(innerSchema.Required || innerSchema.Optional) { + continue + } + + buf.WriteString(k) + buf.WriteRune(':') + innerVal := m[k] + SerializeValueForHash(buf, innerVal, innerSchema) + } +} + +func serializeCollectionMemberForHash(buf *bytes.Buffer, val interface{}, elem interface{}) { + switch tElem := elem.(type) { + case *Schema: + SerializeValueForHash(buf, val, tElem) + case *Resource: + buf.WriteRune('<') + SerializeResourceForHash(buf, val, tElem) + buf.WriteString(">;") + default: + panic(fmt.Sprintf("invalid element type: %T", tElem)) + } +} diff --git a/pkg/legacy/helper/schema/serialize_test.go b/pkg/legacy/helper/schema/serialize_test.go new file mode 100644 index 00000000000..0ab41f41f48 --- /dev/null +++ b/pkg/legacy/helper/schema/serialize_test.go @@ -0,0 +1,243 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "bytes" + "testing" +) + +func TestSerializeForHash(t *testing.T) { + type testCase struct { + Schema interface{} + Value interface{} + Expected string + } + + tests := []testCase{ + testCase{ + Schema: &Schema{ + Type: TypeInt, + }, + Value: 0, + Expected: "0;", + }, + + testCase{ + Schema: &Schema{ + Type: TypeInt, + }, + Value: 200, + Expected: "200;", + }, + + testCase{ + Schema: &Schema{ + Type: TypeBool, + }, + Value: true, + Expected: "1;", + }, + + testCase{ + Schema: &Schema{ + Type: TypeBool, + }, + Value: false, + Expected: "0;", + }, + + testCase{ + Schema: &Schema{ + Type: TypeFloat, + }, + Value: 1.0, + Expected: "1;", + }, + + testCase{ + Schema: &Schema{ + Type: TypeFloat, + }, + Value: 1.54, + Expected: "1.54;", + }, + + testCase{ + Schema: &Schema{ + Type: TypeFloat, + }, + Value: 0.1, + Expected: "0.1;", + }, + + testCase{ + Schema: &Schema{ + Type: TypeString, + }, + Value: "hello", + Expected: "hello;", + }, + + testCase{ + Schema: &Schema{ + Type: TypeString, + }, + Value: "1", + Expected: "1;", + }, + + testCase{ + Schema: &Schema{ + Type: TypeList, + Elem: &Schema{ + Type: TypeString, + }, + }, + Value: []interface{}{}, + Expected: "();", + }, + + testCase{ + Schema: &Schema{ + Type: TypeList, + Elem: &Schema{ + Type: TypeString, + }, + }, + Value: []interface{}{"hello", "world"}, + Expected: "(hello;world;);", + }, + + testCase{ + Schema: &Schema{ + Type: TypeList, + Elem: &Resource{ + Schema: map[string]*Schema{ + "fo": &Schema{ + Type: TypeString, + Required: true, + }, + "fum": &Schema{ + Type: TypeString, + Required: true, + }, + }, + }, + }, + Value: []interface{}{ + map[string]interface{}{ + "fo": "bar", + }, + map[string]interface{}{ + "fo": "baz", + "fum": "boz", + }, + }, + Expected: "(;;);", + }, + + testCase{ + Schema: &Schema{ + Type: TypeSet, + Elem: &Schema{ + Type: TypeString, + }, + }, + Value: NewSet(func(i interface{}) int { return len(i.(string)) }, []interface{}{ + "hello", + "woo", + }), + Expected: "{woo;hello;};", + }, + + testCase{ + Schema: &Schema{ + Type: TypeMap, + Elem: &Schema{ + Type: TypeString, + }, + }, + Value: map[string]interface{}{ + "foo": "bar", + "baz": "foo", + }, + Expected: "[baz:foo;foo:bar;];", + }, + + testCase{ + Schema: &Resource{ + Schema: map[string]*Schema{ + "name": &Schema{ + Type: TypeString, + Required: true, + }, + "size": &Schema{ + Type: TypeInt, + Optional: true, + }, + "green": &Schema{ + Type: TypeBool, + Optional: true, + Computed: true, + }, + "upside_down": &Schema{ + Type: TypeBool, + Computed: true, + }, + }, + }, + Value: map[string]interface{}{ + "name": "my-fun-database", + "size": 12, + "green": true, + }, + Expected: "green:1;name:my-fun-database;size:12;", + }, + + // test TypeMap nested in Schema: GH-7091 + testCase{ + Schema: &Resource{ + Schema: map[string]*Schema{ + "outer": &Schema{ + Type: TypeSet, + Required: true, + Elem: &Schema{ + Type: TypeMap, + Optional: true, + }, + }, + }, + }, + Value: map[string]interface{}{ + "outer": NewSet(func(i interface{}) int { return 42 }, []interface{}{ + map[string]interface{}{ + "foo": "bar", + "baz": "foo", + }, + }), + }, + Expected: "outer:{[baz:foo;foo:bar;];};", + }, + } + + for _, test := range tests { + var gotBuf bytes.Buffer + schema := test.Schema + + switch s := schema.(type) { + case *Schema: + SerializeValueForHash(&gotBuf, test.Value, s) + case *Resource: + SerializeResourceForHash(&gotBuf, test.Value, s) + } + + got := gotBuf.String() + if got != test.Expected { + t.Errorf("hash(%#v) got %#v, but want %#v", test.Value, got, test.Expected) + } + } +} diff --git a/pkg/legacy/helper/schema/set.go b/pkg/legacy/helper/schema/set.go new file mode 100644 index 00000000000..275dd416bee --- /dev/null +++ b/pkg/legacy/helper/schema/set.go @@ -0,0 +1,255 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "bytes" + "fmt" + "reflect" + "sort" + "strconv" + "sync" + + "github.com/kubegems/opentofu/pkg/legacy/helper/hashcode" +) + +// HashString hashes strings. If you want a Set of strings, this is the +// SchemaSetFunc you want. +func HashString(v interface{}) int { + return hashcode.String(v.(string)) +} + +// HashInt hashes integers. If you want a Set of integers, this is the +// SchemaSetFunc you want. +func HashInt(v interface{}) int { + return hashcode.String(strconv.Itoa(v.(int))) +} + +// HashResource hashes complex structures that are described using +// a *Resource. This is the default set implementation used when a set's +// element type is a full resource. +func HashResource(resource *Resource) SchemaSetFunc { + return func(v interface{}) int { + var buf bytes.Buffer + SerializeResourceForHash(&buf, v, resource) + return hashcode.String(buf.String()) + } +} + +// HashSchema hashes values that are described using a *Schema. This is the +// default set implementation used when a set's element type is a single +// schema. +func HashSchema(schema *Schema) SchemaSetFunc { + return func(v interface{}) int { + var buf bytes.Buffer + SerializeValueForHash(&buf, v, schema) + return hashcode.String(buf.String()) + } +} + +// Set is a set data structure that is returned for elements of type +// TypeSet. +type Set struct { + F SchemaSetFunc + + m map[string]interface{} + once sync.Once +} + +// NewSet is a convenience method for creating a new set with the given +// items. +func NewSet(f SchemaSetFunc, items []interface{}) *Set { + s := &Set{F: f} + for _, i := range items { + s.Add(i) + } + + return s +} + +// CopySet returns a copy of another set. +func CopySet(otherSet *Set) *Set { + return NewSet(otherSet.F, otherSet.List()) +} + +// Add adds an item to the set if it isn't already in the set. +func (s *Set) Add(item interface{}) { + s.add(item, false) +} + +// Remove removes an item if it's already in the set. Idempotent. +func (s *Set) Remove(item interface{}) { + s.remove(item) +} + +// Contains checks if the set has the given item. +func (s *Set) Contains(item interface{}) bool { + _, ok := s.m[s.hash(item)] + return ok +} + +// Len returns the amount of items in the set. +func (s *Set) Len() int { + return len(s.m) +} + +// List returns the elements of this set in slice format. +// +// The order of the returned elements is deterministic. Given the same +// set, the order of this will always be the same. +func (s *Set) List() []interface{} { + result := make([]interface{}, len(s.m)) + for i, k := range s.listCode() { + result[i] = s.m[k] + } + + return result +} + +// Difference performs a set difference of the two sets, returning +// a new third set that has only the elements unique to this set. +func (s *Set) Difference(other *Set) *Set { + result := &Set{F: s.F} + result.once.Do(result.init) + + for k, v := range s.m { + if _, ok := other.m[k]; !ok { + result.m[k] = v + } + } + + return result +} + +// Intersection performs the set intersection of the two sets +// and returns a new third set. +func (s *Set) Intersection(other *Set) *Set { + result := &Set{F: s.F} + result.once.Do(result.init) + + for k, v := range s.m { + if _, ok := other.m[k]; ok { + result.m[k] = v + } + } + + return result +} + +// Union performs the set union of the two sets and returns a new third +// set. +func (s *Set) Union(other *Set) *Set { + result := &Set{F: s.F} + result.once.Do(result.init) + + for k, v := range s.m { + result.m[k] = v + } + for k, v := range other.m { + result.m[k] = v + } + + return result +} + +func (s *Set) Equal(raw interface{}) bool { + other, ok := raw.(*Set) + if !ok { + return false + } + + return reflect.DeepEqual(s.m, other.m) +} + +// HashEqual simply checks to the keys the top-level map to the keys in the +// other set's top-level map to see if they are equal. This obviously assumes +// you have a properly working hash function - use HashResource if in doubt. +func (s *Set) HashEqual(raw interface{}) bool { + other, ok := raw.(*Set) + if !ok { + return false + } + + ks1 := make([]string, 0) + ks2 := make([]string, 0) + + for k := range s.m { + ks1 = append(ks1, k) + } + for k := range other.m { + ks2 = append(ks2, k) + } + + sort.Strings(ks1) + sort.Strings(ks2) + + return reflect.DeepEqual(ks1, ks2) +} + +func (s *Set) GoString() string { + return fmt.Sprintf("*Set(%#v)", s.m) +} + +func (s *Set) init() { + s.m = make(map[string]interface{}) +} + +func (s *Set) add(item interface{}, computed bool) string { + s.once.Do(s.init) + + code := s.hash(item) + if computed { + code = "~" + code + + if isProto5() { + tmpCode := code + count := 0 + for _, exists := s.m[tmpCode]; exists; _, exists = s.m[tmpCode] { + count++ + tmpCode = fmt.Sprintf("%s%d", code, count) + } + code = tmpCode + } + } + + if _, ok := s.m[code]; !ok { + s.m[code] = item + } + + return code +} + +func (s *Set) hash(item interface{}) string { + code := s.F(item) + // Always return a nonnegative hashcode. + if code < 0 { + code = -code + } + return strconv.Itoa(code) +} + +func (s *Set) remove(item interface{}) string { + s.once.Do(s.init) + + code := s.hash(item) + delete(s.m, code) + + return code +} + +func (s *Set) index(item interface{}) int { + return sort.SearchStrings(s.listCode(), s.hash(item)) +} + +func (s *Set) listCode() []string { + // Sort the hash codes so the order of the list is deterministic + keys := make([]string, 0, len(s.m)) + for k := range s.m { + keys = append(keys, k) + } + sort.Sort(sort.StringSlice(keys)) + return keys +} diff --git a/pkg/legacy/helper/schema/set_test.go b/pkg/legacy/helper/schema/set_test.go new file mode 100644 index 00000000000..eb9e0aae7c2 --- /dev/null +++ b/pkg/legacy/helper/schema/set_test.go @@ -0,0 +1,222 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "reflect" + "testing" +) + +func TestSetAdd(t *testing.T) { + s := &Set{F: testSetInt} + s.Add(1) + s.Add(5) + s.Add(25) + + expected := []interface{}{1, 25, 5} + actual := s.List() + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: %#v", actual) + } +} + +func TestSetAdd_negative(t *testing.T) { + // Since we don't allow negative hashes, this should just hash to the + // same thing... + s := &Set{F: testSetInt} + s.Add(-1) + s.Add(1) + + expected := []interface{}{-1} + actual := s.List() + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: %#v", actual) + } +} + +func TestSetContains(t *testing.T) { + s := &Set{F: testSetInt} + s.Add(5) + s.Add(-5) + + if s.Contains(2) { + t.Fatal("should not contain") + } + if !s.Contains(5) { + t.Fatal("should contain") + } + if !s.Contains(-5) { + t.Fatal("should contain") + } +} + +func TestSetDifference(t *testing.T) { + s1 := &Set{F: testSetInt} + s2 := &Set{F: testSetInt} + + s1.Add(1) + s1.Add(5) + + s2.Add(5) + s2.Add(25) + + difference := s1.Difference(s2) + difference.Add(2) + + expected := []interface{}{1, 2} + actual := difference.List() + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: %#v", actual) + } +} + +func TestSetIntersection(t *testing.T) { + s1 := &Set{F: testSetInt} + s2 := &Set{F: testSetInt} + + s1.Add(1) + s1.Add(5) + + s2.Add(5) + s2.Add(25) + + intersection := s1.Intersection(s2) + intersection.Add(2) + + expected := []interface{}{2, 5} + actual := intersection.List() + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: %#v", actual) + } +} + +func TestSetUnion(t *testing.T) { + s1 := &Set{F: testSetInt} + s2 := &Set{F: testSetInt} + + s1.Add(1) + s1.Add(5) + + s2.Add(5) + s2.Add(25) + + union := s1.Union(s2) + union.Add(2) + + expected := []interface{}{1, 2, 25, 5} + actual := union.List() + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: %#v", actual) + } +} + +func testSetInt(v interface{}) int { + return v.(int) +} + +func TestHashResource_nil(t *testing.T) { + resource := &Resource{ + Schema: map[string]*Schema{ + "name": { + Type: TypeString, + Optional: true, + }, + }, + } + f := HashResource(resource) + + idx := f(nil) + if idx != 0 { + t.Fatalf("Expected 0 when hashing nil, given: %d", idx) + } +} + +func TestHashEqual(t *testing.T) { + nested := &Resource{ + Schema: map[string]*Schema{ + "foo": { + Type: TypeString, + Optional: true, + }, + }, + } + root := &Resource{ + Schema: map[string]*Schema{ + "bar": { + Type: TypeString, + Optional: true, + }, + "nested": { + Type: TypeSet, + Optional: true, + Elem: nested, + }, + }, + } + n1 := map[string]interface{}{"foo": "bar"} + n2 := map[string]interface{}{"foo": "baz"} + + r1 := map[string]interface{}{ + "bar": "baz", + "nested": NewSet(HashResource(nested), []interface{}{n1}), + } + r2 := map[string]interface{}{ + "bar": "qux", + "nested": NewSet(HashResource(nested), []interface{}{n2}), + } + r3 := map[string]interface{}{ + "bar": "baz", + "nested": NewSet(HashResource(nested), []interface{}{n2}), + } + r4 := map[string]interface{}{ + "bar": "qux", + "nested": NewSet(HashResource(nested), []interface{}{n1}), + } + s1 := NewSet(HashResource(root), []interface{}{r1}) + s2 := NewSet(HashResource(root), []interface{}{r2}) + s3 := NewSet(HashResource(root), []interface{}{r3}) + s4 := NewSet(HashResource(root), []interface{}{r4}) + + cases := []struct { + name string + set *Set + compare *Set + expected bool + }{ + { + name: "equal", + set: s1, + compare: s1, + expected: true, + }, + { + name: "not equal", + set: s1, + compare: s2, + expected: false, + }, + { + name: "outer equal, should still not be equal", + set: s1, + compare: s3, + expected: false, + }, + { + name: "inner equal, should still not be equal", + set: s1, + compare: s4, + expected: false, + }, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + actual := tc.set.HashEqual(tc.compare) + if tc.expected != actual { + t.Fatalf("expected %t, got %t", tc.expected, actual) + } + }) + } +} diff --git a/pkg/legacy/helper/schema/shims.go b/pkg/legacy/helper/schema/shims.go new file mode 100644 index 00000000000..f63c78fb712 --- /dev/null +++ b/pkg/legacy/helper/schema/shims.go @@ -0,0 +1,120 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "encoding/json" + + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/configs/hcl2shim" + "github.com/kubegems/opentofu/pkg/legacy/tofu" +) + +// DiffFromValues takes the current state and desired state as cty.Values and +// derives a tofu.InstanceDiff to give to the legacy providers. This is +// used to take the states provided by the new ApplyResourceChange method and +// convert them to a state+diff required for the legacy Apply method. +func DiffFromValues(prior, planned cty.Value, res *Resource) (*tofu.InstanceDiff, error) { + return diffFromValues(prior, planned, res, nil) +} + +// diffFromValues takes an additional CustomizeDiffFunc, so we can generate our +// test fixtures from the legacy tests. In the new provider protocol the diff +// only needs to be created for the apply operation, and any customizations +// have already been done. +func diffFromValues(prior, planned cty.Value, res *Resource, cust CustomizeDiffFunc) (*tofu.InstanceDiff, error) { + instanceState, err := res.ShimInstanceStateFromValue(prior) + if err != nil { + return nil, err + } + + configSchema := res.CoreConfigSchema() + + cfg := tofu.NewResourceConfigShimmed(planned, configSchema) + removeConfigUnknowns(cfg.Config) + removeConfigUnknowns(cfg.Raw) + + diff, err := schemaMap(res.Schema).Diff(instanceState, cfg, cust, nil, false) + if err != nil { + return nil, err + } + + return diff, err +} + +// During apply the only unknown values are those which are to be computed by +// the resource itself. These may have been marked as unknown config values, and +// need to be removed to prevent the UnknownVariableValue from appearing the diff. +func removeConfigUnknowns(cfg map[string]interface{}) { + for k, v := range cfg { + switch v := v.(type) { + case string: + if v == hcl2shim.UnknownVariableValue { + delete(cfg, k) + } + case []interface{}: + for _, i := range v { + if m, ok := i.(map[string]interface{}); ok { + removeConfigUnknowns(m) + } + } + case map[string]interface{}: + removeConfigUnknowns(v) + } + } +} + +// ApplyDiff takes a cty.Value state and applies a tofu.InstanceDiff to +// get a new cty.Value state. This is used to convert the diff returned from +// the legacy provider Diff method to the state required for the new +// PlanResourceChange method. +func ApplyDiff(base cty.Value, d *tofu.InstanceDiff, schema *configschema.Block) (cty.Value, error) { + return d.ApplyToValue(base, schema) +} + +// StateValueToJSONMap converts a cty.Value to generic JSON map via the cty JSON +// encoding. +func StateValueToJSONMap(val cty.Value, ty cty.Type) (map[string]interface{}, error) { + js, err := ctyjson.Marshal(val, ty) + if err != nil { + return nil, err + } + + var m map[string]interface{} + if err := json.Unmarshal(js, &m); err != nil { + return nil, err + } + + return m, nil +} + +// JSONMapToStateValue takes a generic json map[string]interface{} and converts it +// to the specific type, ensuring that the values conform to the schema. +func JSONMapToStateValue(m map[string]interface{}, block *configschema.Block) (cty.Value, error) { + var val cty.Value + + js, err := json.Marshal(m) + if err != nil { + return val, err + } + + val, err = ctyjson.Unmarshal(js, block.ImpliedType()) + if err != nil { + return val, err + } + + return block.CoerceValue(val) +} + +// StateValueFromInstanceState converts a tofu.InstanceState to a +// cty.Value as described by the provided cty.Type, and maintains the resource +// ID as the "id" attribute. +func StateValueFromInstanceState(is *tofu.InstanceState, ty cty.Type) (cty.Value, error) { + return is.AttrsAsObjectValue(ty) +} diff --git a/pkg/legacy/helper/schema/shims_test.go b/pkg/legacy/helper/schema/shims_test.go new file mode 100644 index 00000000000..f34a87fe451 --- /dev/null +++ b/pkg/legacy/helper/schema/shims_test.go @@ -0,0 +1,3526 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "strconv" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/configs/hcl2shim" + "github.com/kubegems/opentofu/pkg/legacy/helper/hashcode" + "github.com/kubegems/opentofu/pkg/legacy/tofu" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +var ( + typeComparer = cmp.Comparer(cty.Type.Equals) + valueComparer = cmp.Comparer(cty.Value.RawEquals) + equateEmpty = cmpopts.EquateEmpty() +) + +func testApplyDiff(t *testing.T, + resource *Resource, + state, expected *tofu.InstanceState, + diff *tofu.InstanceDiff) { + + testSchema := providers.Schema{ + Version: int64(resource.SchemaVersion), + Block: resourceSchemaToBlock(resource.Schema), + } + + stateVal, err := StateValueFromInstanceState(state, testSchema.Block.ImpliedType()) + if err != nil { + t.Fatal(err) + } + + newState, err := ApplyDiff(stateVal, diff, testSchema.Block) + if err != nil { + t.Fatal(err) + } + + // verify that "id" is correct + id := newState.AsValueMap()["id"] + + switch { + case diff.Destroy || diff.DestroyDeposed || diff.DestroyTainted: + // there should be no id + if !id.IsNull() { + t.Fatalf("destroyed instance should have no id: %#v", id) + } + default: + // the "id" field always exists and is computed, so it must have a + // valid value or be unknown. + if id.IsNull() { + t.Fatal("new instance state cannot have a null id") + } + + if id.IsKnown() && id.AsString() == "" { + t.Fatal("new instance id cannot be an empty string") + } + } + + // Resource.Meta will be hanlded separately, so it's OK that we lose the + // timeout values here. + expectedState, err := StateValueFromInstanceState(expected, testSchema.Block.ImpliedType()) + if err != nil { + t.Fatal(err) + } + + if !cmp.Equal(expectedState, newState, equateEmpty, typeComparer, valueComparer) { + t.Fatalf(cmp.Diff(expectedState, newState, equateEmpty, typeComparer, valueComparer)) + } +} + +func TestShimResourcePlan_destroyCreate(t *testing.T) { + r := &Resource{ + SchemaVersion: 2, + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + ForceNew: true, + }, + }, + } + + d := &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo": &tofu.ResourceAttrDiff{ + RequiresNew: true, + Old: "3", + New: "42", + }, + }, + } + + state := &tofu.InstanceState{ + Attributes: map[string]string{"foo": "3"}, + } + + expected := &tofu.InstanceState{ + ID: hcl2shim.UnknownVariableValue, + Attributes: map[string]string{ + "id": hcl2shim.UnknownVariableValue, + "foo": "42", + }, + Meta: map[string]interface{}{ + "schema_version": "2", + }, + } + + testApplyDiff(t, r, state, expected, d) +} + +func TestShimResourceApply_create(t *testing.T) { + r := &Resource{ + SchemaVersion: 2, + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + } + + called := false + r.Create = func(d *ResourceData, m interface{}) error { + called = true + d.SetId("foo") + return nil + } + + var s *tofu.InstanceState = nil + + d := &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo": &tofu.ResourceAttrDiff{ + New: "42", + }, + }, + } + + actual, err := r.Apply(s, d, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !called { + t.Fatal("not called") + } + + expected := &tofu.InstanceState{ + ID: "foo", + Attributes: map[string]string{ + "id": "foo", + "foo": "42", + }, + Meta: map[string]interface{}{ + "schema_version": "2", + }, + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: %#v", actual) + } + + // Shim + // now that we have our diff and desired state, see if we can reproduce + // that with the shim + // we're not testing Resource.Create, so we need to start with the "created" state + createdState := &tofu.InstanceState{ + ID: "foo", + Attributes: map[string]string{"id": "foo"}, + } + + testApplyDiff(t, r, createdState, expected, d) +} + +func TestShimResourceApply_Timeout_state(t *testing.T) { + r := &Resource{ + SchemaVersion: 2, + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + Timeouts: &ResourceTimeout{ + Create: DefaultTimeout(40 * time.Minute), + Update: DefaultTimeout(80 * time.Minute), + Delete: DefaultTimeout(40 * time.Minute), + }, + } + + called := false + r.Create = func(d *ResourceData, m interface{}) error { + called = true + d.SetId("foo") + return nil + } + + var s *tofu.InstanceState = nil + + d := &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo": &tofu.ResourceAttrDiff{ + New: "42", + }, + }, + } + + diffTimeout := &ResourceTimeout{ + Create: DefaultTimeout(40 * time.Minute), + Update: DefaultTimeout(80 * time.Minute), + Delete: DefaultTimeout(40 * time.Minute), + } + + if err := diffTimeout.DiffEncode(d); err != nil { + t.Fatalf("Error encoding timeout to diff: %s", err) + } + + actual, err := r.Apply(s, d, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !called { + t.Fatal("not called") + } + + expected := &tofu.InstanceState{ + ID: "foo", + Attributes: map[string]string{ + "id": "foo", + "foo": "42", + }, + Meta: map[string]interface{}{ + "schema_version": "2", + TimeoutKey: expectedForValues(40, 0, 80, 40, 0), + }, + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("Not equal in Timeout State:\n\texpected: %#v\n\tactual: %#v", expected.Meta, actual.Meta) + } + + // Shim + // we're not testing Resource.Create, so we need to start with the "created" state + createdState := &tofu.InstanceState{ + ID: "foo", + Attributes: map[string]string{"id": "foo"}, + } + + testApplyDiff(t, r, createdState, expected, d) +} + +func TestShimResourceDiff_Timeout_diff(t *testing.T) { + r := &Resource{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + Timeouts: &ResourceTimeout{ + Create: DefaultTimeout(40 * time.Minute), + Update: DefaultTimeout(80 * time.Minute), + Delete: DefaultTimeout(40 * time.Minute), + }, + } + + r.Create = func(d *ResourceData, m interface{}) error { + d.SetId("foo") + return nil + } + + conf := tofu.NewResourceConfigRaw(map[string]interface{}{ + "foo": 42, + TimeoutsConfigKey: map[string]interface{}{ + "create": "2h", + }, + }) + var s *tofu.InstanceState + + actual, err := r.Diff(s, conf, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo": &tofu.ResourceAttrDiff{ + New: "42", + }, + }, + } + + diffTimeout := &ResourceTimeout{ + Create: DefaultTimeout(120 * time.Minute), + Update: DefaultTimeout(80 * time.Minute), + Delete: DefaultTimeout(40 * time.Minute), + } + + if err := diffTimeout.DiffEncode(expected); err != nil { + t.Fatalf("Error encoding timeout to diff: %s", err) + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("Not equal in Timeout Diff:\n\texpected: %#v\n\tactual: %#v", expected.Meta, actual.Meta) + } + + // Shim + // apply this diff, so we have a state to compare + applied, err := r.Apply(s, actual, nil) + if err != nil { + t.Fatal(err) + } + + // we're not testing Resource.Create, so we need to start with the "created" state + createdState := &tofu.InstanceState{ + ID: "foo", + Attributes: map[string]string{"id": "foo"}, + } + + testSchema := providers.Schema{ + Version: int64(r.SchemaVersion), + Block: resourceSchemaToBlock(r.Schema), + } + + initialVal, err := StateValueFromInstanceState(createdState, testSchema.Block.ImpliedType()) + if err != nil { + t.Fatal(err) + } + + appliedVal, err := StateValueFromInstanceState(applied, testSchema.Block.ImpliedType()) + if err != nil { + t.Fatal(err) + } + + d, err := DiffFromValues(initialVal, appliedVal, r) + if err != nil { + t.Fatal(err) + } + if eq, _ := d.Same(expected); !eq { + t.Fatal(cmp.Diff(d, expected)) + } +} + +func TestShimResourceApply_destroy(t *testing.T) { + r := &Resource{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + } + + called := false + r.Delete = func(d *ResourceData, m interface{}) error { + called = true + return nil + } + + s := &tofu.InstanceState{ + ID: "bar", + } + + d := &tofu.InstanceDiff{ + Destroy: true, + } + + actual, err := r.Apply(s, d, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !called { + t.Fatal("delete not called") + } + + if actual != nil { + t.Fatalf("bad: %#v", actual) + } + + // Shim + // now that we have our diff and desired state, see if we can reproduce + // that with the shim + testApplyDiff(t, r, s, actual, d) +} + +func TestShimResourceApply_destroyCreate(t *testing.T) { + r := &Resource{ + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeInt, + Optional: true, + ForceNew: true, + }, + + "tags": &Schema{ + Type: TypeMap, + Optional: true, + Computed: true, + }, + }, + } + + change := false + r.Create = func(d *ResourceData, m interface{}) error { + change = d.HasChange("tags") + d.SetId("foo") + return nil + } + r.Delete = func(d *ResourceData, m interface{}) error { + return nil + } + + var s *tofu.InstanceState = &tofu.InstanceState{ + ID: "bar", + Attributes: map[string]string{ + "foo": "7", + "tags.Name": "foo", + }, + } + + d := &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "id": &tofu.ResourceAttrDiff{ + New: "foo", + }, + "foo": &tofu.ResourceAttrDiff{ + Old: "7", + New: "42", + RequiresNew: true, + }, + "tags.Name": &tofu.ResourceAttrDiff{ + Old: "foo", + New: "foo", + RequiresNew: true, + }, + }, + } + + actual, err := r.Apply(s, d, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !change { + t.Fatal("should have change") + } + + expected := &tofu.InstanceState{ + ID: "foo", + Attributes: map[string]string{ + "id": "foo", + "foo": "42", + "tags.%": "1", + "tags.Name": "foo", + }, + } + + if !reflect.DeepEqual(actual, expected) { + cmp.Diff(actual, expected) + } + + // Shim + // now that we have our diff and desired state, see if we can reproduce + // that with the shim + // we're not testing Resource.Create, so we need to start with the "created" state + createdState := &tofu.InstanceState{ + ID: "foo", + Attributes: map[string]string{ + "id": "foo", + "foo": "7", + "tags.%": "1", + "tags.Name": "foo", + }, + } + + testApplyDiff(t, r, createdState, expected, d) +} + +func TestShimSchemaMap_Diff(t *testing.T) { + cases := []struct { + Name string + Schema map[string]*Schema + State *tofu.InstanceState + Config map[string]interface{} + CustomizeDiff CustomizeDiffFunc + Diff *tofu.InstanceDiff + Err bool + }{ + { + Name: "diff-1", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "availability_zone": "foo", + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "", + New: "foo", + RequiresNew: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "diff-2", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Config: map[string]interface{}{}, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "", + NewComputed: true, + RequiresNew: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "diff-3", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: &tofu.InstanceState{ + ID: "foo", + }, + + Config: map[string]interface{}{}, + + Diff: nil, + + Err: false, + }, + + { + Name: "Computed, but set in config", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + }, + }, + + State: &tofu.InstanceState{ + ID: "id", + Attributes: map[string]string{ + "availability_zone": "foo", + }, + }, + + Config: map[string]interface{}{ + "availability_zone": "bar", + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "foo", + New: "bar", + }, + }, + }, + + Err: false, + }, + + { + Name: "Default", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Default: "foo", + }, + }, + + State: nil, + + Config: nil, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "", + New: "foo", + }, + }, + }, + + Err: false, + }, + + { + Name: "DefaultFunc, value", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + DefaultFunc: func() (interface{}, error) { + return "foo", nil + }, + }, + }, + + State: nil, + + Config: nil, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "", + New: "foo", + }, + }, + }, + + Err: false, + }, + + { + Name: "DefaultFunc, configuration set", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + DefaultFunc: func() (interface{}, error) { + return "foo", nil + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "availability_zone": "bar", + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "", + New: "bar", + }, + }, + }, + + Err: false, + }, + + { + Name: "String with StateFunc", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + StateFunc: func(a interface{}) string { + return a.(string) + "!" + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "availability_zone": "foo", + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "", + New: "foo!", + NewExtra: "foo", + }, + }, + }, + + Err: false, + }, + + { + Name: "StateFunc not called with nil value", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + StateFunc: func(a interface{}) string { + t.Error("should not get here!") + return "" + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{}, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "", + New: "", + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Variable computed", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "availability_zone": hcl2shim.UnknownVariableValue, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "", + New: hcl2shim.UnknownVariableValue, + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Int decode", + Schema: map[string]*Schema{ + "port": &Schema{ + Type: TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "port": 27, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "port": &tofu.ResourceAttrDiff{ + Old: "", + New: "27", + RequiresNew: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "bool decode", + Schema: map[string]*Schema{ + "port": &Schema{ + Type: TypeBool, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "port": false, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "port": &tofu.ResourceAttrDiff{ + Old: "", + New: "false", + RequiresNew: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Bool", + Schema: map[string]*Schema{ + "delete": &Schema{ + Type: TypeBool, + Optional: true, + Default: false, + }, + }, + + State: &tofu.InstanceState{ + ID: "id", + Attributes: map[string]string{ + "delete": "false", + }, + }, + + Config: nil, + + Diff: nil, + + Err: false, + }, + + { + Name: "List decode", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Required: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "ports": []interface{}{1, 2, 5}, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ports.#": &tofu.ResourceAttrDiff{ + Old: "0", + New: "3", + }, + "ports.0": &tofu.ResourceAttrDiff{ + Old: "", + New: "1", + }, + "ports.1": &tofu.ResourceAttrDiff{ + Old: "", + New: "2", + }, + "ports.2": &tofu.ResourceAttrDiff{ + Old: "", + New: "5", + }, + }, + }, + + Err: false, + }, + + { + Name: "List decode with promotion with list", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Required: true, + Elem: &Schema{Type: TypeInt}, + PromoteSingle: true, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "ports": []interface{}{"5"}, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ports.#": &tofu.ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "ports.0": &tofu.ResourceAttrDiff{ + Old: "", + New: "5", + }, + }, + }, + + Err: false, + }, + + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Required: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "ports": []interface{}{1, 2, 5}, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ports.#": &tofu.ResourceAttrDiff{ + Old: "0", + New: "3", + }, + "ports.0": &tofu.ResourceAttrDiff{ + Old: "", + New: "1", + }, + "ports.1": &tofu.ResourceAttrDiff{ + Old: "", + New: "2", + }, + "ports.2": &tofu.ResourceAttrDiff{ + Old: "", + New: "5", + }, + }, + }, + + Err: false, + }, + + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Required: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "ports": []interface{}{1, hcl2shim.UnknownVariableValue, "5"}, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ports.#": &tofu.ResourceAttrDiff{ + Old: "0", + New: "", + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Required: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: &tofu.InstanceState{ + ID: "id", + Attributes: map[string]string{ + "ports.#": "3", + "ports.0": "1", + "ports.1": "2", + "ports.2": "5", + }, + }, + + Config: map[string]interface{}{ + "ports": []interface{}{1, 2, 5}, + }, + + Diff: nil, + + Err: false, + }, + + { + Name: "", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Required: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: &tofu.InstanceState{ + ID: "id", + Attributes: map[string]string{ + "ports.#": "2", + "ports.0": "1", + "ports.1": "2", + }, + }, + + Config: map[string]interface{}{ + "ports": []interface{}{1, 2, 5}, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ports.#": &tofu.ResourceAttrDiff{ + Old: "2", + New: "3", + }, + "ports.2": &tofu.ResourceAttrDiff{ + Old: "", + New: "5", + }, + }, + }, + + Err: false, + }, + + { + Name: "", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Required: true, + Elem: &Schema{Type: TypeInt}, + ForceNew: true, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "ports": []interface{}{1, 2, 5}, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ports.#": &tofu.ResourceAttrDiff{ + Old: "0", + New: "3", + RequiresNew: true, + }, + "ports.0": &tofu.ResourceAttrDiff{ + Old: "", + New: "1", + RequiresNew: true, + }, + "ports.1": &tofu.ResourceAttrDiff{ + Old: "", + New: "2", + RequiresNew: true, + }, + "ports.2": &tofu.ResourceAttrDiff{ + Old: "", + New: "5", + RequiresNew: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Optional: true, + Computed: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: nil, + + Config: map[string]interface{}{}, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ports.#": &tofu.ResourceAttrDiff{ + Old: "", + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "List with computed set", + Schema: map[string]*Schema{ + "config": &Schema{ + Type: TypeList, + Optional: true, + ForceNew: true, + MinItems: 1, + Elem: &Resource{ + Schema: map[string]*Schema{ + "name": { + Type: TypeString, + Required: true, + }, + + "rules": { + Type: TypeSet, + Computed: true, + Elem: &Schema{Type: TypeString}, + Set: HashString, + }, + }, + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "config": []interface{}{ + map[string]interface{}{ + "name": "hello", + }, + }, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "config.#": &tofu.ResourceAttrDiff{ + Old: "0", + New: "1", + RequiresNew: true, + }, + + "config.0.name": &tofu.ResourceAttrDiff{ + Old: "", + New: "hello", + }, + + "config.0.rules.#": &tofu.ResourceAttrDiff{ + Old: "", + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Set-1", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Required: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "ports": []interface{}{5, 2, 1}, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ports.#": &tofu.ResourceAttrDiff{ + Old: "0", + New: "3", + }, + "ports.1": &tofu.ResourceAttrDiff{ + Old: "", + New: "1", + }, + "ports.2": &tofu.ResourceAttrDiff{ + Old: "", + New: "2", + }, + "ports.5": &tofu.ResourceAttrDiff{ + Old: "", + New: "5", + }, + }, + }, + + Err: false, + }, + + { + Name: "Set-2", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Computed: true, + Required: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: &tofu.InstanceState{ + ID: "id", + Attributes: map[string]string{ + "ports.#": "0", + }, + }, + + Config: nil, + + Diff: nil, + + Err: false, + }, + + { + Name: "Set-3", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: nil, + + Config: nil, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ports.#": &tofu.ResourceAttrDiff{ + Old: "", + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Set-4", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Required: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "ports": []interface{}{"2", "5", 1}, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ports.#": &tofu.ResourceAttrDiff{ + Old: "0", + New: "3", + }, + "ports.1": &tofu.ResourceAttrDiff{ + Old: "", + New: "1", + }, + "ports.2": &tofu.ResourceAttrDiff{ + Old: "", + New: "2", + }, + "ports.5": &tofu.ResourceAttrDiff{ + Old: "", + New: "5", + }, + }, + }, + + Err: false, + }, + + { + Name: "Set-5", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Required: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "ports": []interface{}{1, hcl2shim.UnknownVariableValue, 5}, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ports.#": &tofu.ResourceAttrDiff{ + Old: "", + New: "", + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Set-6", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Required: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: &tofu.InstanceState{ + ID: "id", + Attributes: map[string]string{ + "ports.#": "2", + "ports.1": "1", + "ports.2": "2", + }, + }, + + Config: map[string]interface{}{ + "ports": []interface{}{5, 2, 1}, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ports.#": &tofu.ResourceAttrDiff{ + Old: "2", + New: "3", + }, + "ports.1": &tofu.ResourceAttrDiff{ + Old: "1", + New: "1", + }, + "ports.2": &tofu.ResourceAttrDiff{ + Old: "2", + New: "2", + }, + "ports.5": &tofu.ResourceAttrDiff{ + Old: "", + New: "5", + }, + }, + }, + + Err: false, + }, + + { + Name: "Set-8", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: &tofu.InstanceState{ + ID: "id", + Attributes: map[string]string{ + "availability_zone": "bar", + "ports.#": "1", + "ports.80": "80", + }, + }, + + Config: map[string]interface{}{}, + + Diff: nil, + + Err: false, + }, + + { + Name: "Set-9", + Schema: map[string]*Schema{ + "ingress": &Schema{ + Type: TypeSet, + Required: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeList, + Optional: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + }, + Set: func(v interface{}) int { + m := v.(map[string]interface{}) + ps := m["ports"].([]interface{}) + result := 0 + for _, p := range ps { + result += p.(int) + } + return result + }, + }, + }, + + State: &tofu.InstanceState{ + ID: "id", + Attributes: map[string]string{ + "ingress.#": "2", + "ingress.80.ports.#": "1", + "ingress.80.ports.0": "80", + "ingress.443.ports.#": "1", + "ingress.443.ports.0": "443", + }, + }, + + Config: map[string]interface{}{ + "ingress": []interface{}{ + map[string]interface{}{ + "ports": []interface{}{443}, + }, + map[string]interface{}{ + "ports": []interface{}{80}, + }, + }, + }, + + Diff: nil, + + Err: false, + }, + + { + Name: "List of structure decode", + Schema: map[string]*Schema{ + "ingress": &Schema{ + Type: TypeList, + Required: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "from": &Schema{ + Type: TypeInt, + Required: true, + }, + }, + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "ingress": []interface{}{ + map[string]interface{}{ + "from": 8080, + }, + }, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ingress.#": &tofu.ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "ingress.0.from": &tofu.ResourceAttrDiff{ + Old: "", + New: "8080", + }, + }, + }, + + Err: false, + }, + + { + Name: "ComputedWhen", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Computed: true, + ComputedWhen: []string{"port"}, + }, + + "port": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + + State: &tofu.InstanceState{ + ID: "id", + Attributes: map[string]string{ + "availability_zone": "foo", + "port": "80", + }, + }, + + Config: map[string]interface{}{ + "port": 80, + }, + + Diff: nil, + + Err: false, + }, + + { + Name: "computed", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Computed: true, + ComputedWhen: []string{"port"}, + }, + + "port": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "port": 80, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + NewComputed: true, + }, + "port": &tofu.ResourceAttrDiff{ + New: "80", + }, + }, + }, + + Err: false, + }, + + { + Name: "computed, exists", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Computed: true, + ComputedWhen: []string{"port"}, + }, + + "port": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + + State: &tofu.InstanceState{ + ID: "id", + Attributes: map[string]string{ + "port": "80", + }, + }, + + Config: map[string]interface{}{ + "port": 80, + }, + + // there is no computed diff when the instance exists already + Diff: nil, + + Err: false, + }, + + { + Name: "Maps-1", + Schema: map[string]*Schema{ + "config_vars": &Schema{ + Type: TypeMap, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "config_vars": map[string]interface{}{ + "bar": "baz", + }, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "config_vars.%": &tofu.ResourceAttrDiff{ + Old: "0", + New: "1", + }, + + "config_vars.bar": &tofu.ResourceAttrDiff{ + Old: "", + New: "baz", + }, + }, + }, + + Err: false, + }, + + { + Name: "Maps-2", + Schema: map[string]*Schema{ + "config_vars": &Schema{ + Type: TypeMap, + }, + }, + + State: &tofu.InstanceState{ + ID: "id", + Attributes: map[string]string{ + "config_vars.%": "1", + "config_vars.foo": "bar", + }, + }, + + Config: map[string]interface{}{ + "config_vars": map[string]interface{}{ + "bar": "baz", + }, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "config_vars.foo": &tofu.ResourceAttrDiff{ + Old: "bar", + NewRemoved: true, + }, + "config_vars.bar": &tofu.ResourceAttrDiff{ + Old: "", + New: "baz", + }, + }, + }, + + Err: false, + }, + + { + Name: "Maps-3", + Schema: map[string]*Schema{ + "vars": &Schema{ + Type: TypeMap, + Optional: true, + Computed: true, + }, + }, + + State: &tofu.InstanceState{ + ID: "id", + Attributes: map[string]string{ + "vars.%": "1", + "vars.foo": "bar", + }, + }, + + Config: map[string]interface{}{ + "vars": map[string]interface{}{ + "bar": "baz", + }, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "vars.foo": &tofu.ResourceAttrDiff{ + Old: "bar", + New: "", + NewRemoved: true, + }, + "vars.bar": &tofu.ResourceAttrDiff{ + Old: "", + New: "baz", + }, + }, + }, + + Err: false, + }, + + { + Name: "Maps-4", + Schema: map[string]*Schema{ + "vars": &Schema{ + Type: TypeMap, + Computed: true, + }, + }, + + State: &tofu.InstanceState{ + ID: "id", + Attributes: map[string]string{ + "vars.%": "1", + "vars.foo": "bar", + }, + }, + + Config: nil, + + Diff: nil, + + Err: false, + }, + + { + Name: "Maps-5", + Schema: map[string]*Schema{ + "config_vars": &Schema{ + Type: TypeList, + Elem: &Schema{Type: TypeMap}, + }, + }, + + State: &tofu.InstanceState{ + ID: "id", + Attributes: map[string]string{ + "config_vars.#": "1", + "config_vars.0.%": "1", + "config_vars.0.foo": "bar", + }, + }, + + Config: map[string]interface{}{ + "config_vars": []interface{}{ + map[string]interface{}{ + "bar": "baz", + }, + }, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "config_vars.0.foo": &tofu.ResourceAttrDiff{ + Old: "bar", + NewRemoved: true, + }, + "config_vars.0.bar": &tofu.ResourceAttrDiff{ + Old: "", + New: "baz", + }, + }, + }, + + Err: false, + }, + + { + Name: "Maps-6", + Schema: map[string]*Schema{ + "config_vars": &Schema{ + Type: TypeList, + Elem: &Schema{Type: TypeMap}, + Optional: true, + }, + }, + + State: &tofu.InstanceState{ + ID: "id", + Attributes: map[string]string{ + "config_vars.#": "1", + "config_vars.0.%": "2", + "config_vars.0.foo": "bar", + "config_vars.0.bar": "baz", + }, + }, + + Config: map[string]interface{}{}, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "config_vars.#": &tofu.ResourceAttrDiff{ + Old: "1", + New: "0", + }, + "config_vars.0.%": &tofu.ResourceAttrDiff{ + Old: "2", + New: "0", + }, + "config_vars.0.foo": &tofu.ResourceAttrDiff{ + Old: "bar", + NewRemoved: true, + }, + "config_vars.0.bar": &tofu.ResourceAttrDiff{ + Old: "baz", + NewRemoved: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "ForceNews", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + ForceNew: true, + }, + + "address": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + }, + }, + + State: &tofu.InstanceState{ + ID: "id", + Attributes: map[string]string{ + "availability_zone": "bar", + "address": "foo", + }, + }, + + Config: map[string]interface{}{ + "availability_zone": "foo", + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "bar", + New: "foo", + RequiresNew: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Set-10", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + ForceNew: true, + }, + + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: &tofu.InstanceState{ + ID: "id", + Attributes: map[string]string{ + "availability_zone": "bar", + "ports.#": "1", + "ports.80": "80", + }, + }, + + Config: map[string]interface{}{ + "availability_zone": "foo", + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "bar", + New: "foo", + RequiresNew: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Set-11", + Schema: map[string]*Schema{ + "instances": &Schema{ + Type: TypeSet, + Elem: &Schema{Type: TypeString}, + Optional: true, + Computed: true, + Set: func(v interface{}) int { + return len(v.(string)) + }, + }, + }, + + State: &tofu.InstanceState{ + ID: "id", + Attributes: map[string]string{ + "instances.#": "0", + }, + }, + + Config: map[string]interface{}{ + "instances": []interface{}{hcl2shim.UnknownVariableValue}, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "instances.#": &tofu.ResourceAttrDiff{ + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Set-12", + Schema: map[string]*Schema{ + "route": &Schema{ + Type: TypeSet, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "index": &Schema{ + Type: TypeInt, + Required: true, + }, + + "gateway": &Schema{ + Type: TypeString, + Optional: true, + }, + }, + }, + Set: func(v interface{}) int { + m := v.(map[string]interface{}) + return m["index"].(int) + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "route": []interface{}{ + map[string]interface{}{ + "index": "1", + "gateway": hcl2shim.UnknownVariableValue, + }, + }, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "route.#": &tofu.ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "route.~1.index": &tofu.ResourceAttrDiff{ + Old: "", + New: "1", + }, + "route.~1.gateway": &tofu.ResourceAttrDiff{ + Old: "", + New: hcl2shim.UnknownVariableValue, + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Set-13", + Schema: map[string]*Schema{ + "route": &Schema{ + Type: TypeSet, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "index": &Schema{ + Type: TypeInt, + Required: true, + }, + + "gateway": &Schema{ + Type: TypeSet, + Optional: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + }, + Set: func(v interface{}) int { + m := v.(map[string]interface{}) + return m["index"].(int) + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "route": []interface{}{ + map[string]interface{}{ + "index": "1", + "gateway": []interface{}{ + hcl2shim.UnknownVariableValue, + }, + }, + }, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "route.#": &tofu.ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "route.~1.index": &tofu.ResourceAttrDiff{ + Old: "", + New: "1", + }, + "route.~1.gateway.#": &tofu.ResourceAttrDiff{ + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Computed maps", + Schema: map[string]*Schema{ + "vars": &Schema{ + Type: TypeMap, + Computed: true, + }, + }, + + State: nil, + + Config: nil, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "vars.%": &tofu.ResourceAttrDiff{ + Old: "", + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Computed maps", + Schema: map[string]*Schema{ + "vars": &Schema{ + Type: TypeMap, + Computed: true, + }, + }, + + State: &tofu.InstanceState{ + ID: "id", + Attributes: map[string]string{ + "vars.%": "0", + }, + }, + + Config: map[string]interface{}{ + "vars": map[string]interface{}{ + "bar": hcl2shim.UnknownVariableValue, + }, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "vars.%": &tofu.ResourceAttrDiff{ + Old: "", + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Empty", + Schema: map[string]*Schema{}, + + State: &tofu.InstanceState{}, + + Config: map[string]interface{}{}, + + Diff: nil, + + Err: false, + }, + + { + Name: "Float", + Schema: map[string]*Schema{ + "some_threshold": &Schema{ + Type: TypeFloat, + }, + }, + + State: &tofu.InstanceState{ + ID: "id", + Attributes: map[string]string{ + "some_threshold": "567.8", + }, + }, + + Config: map[string]interface{}{ + "some_threshold": 12.34, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "some_threshold": &tofu.ResourceAttrDiff{ + Old: "567.8", + New: "12.34", + }, + }, + }, + + Err: false, + }, + + { + Name: "https://github.com/hashicorp/terraform/issues/824", + Schema: map[string]*Schema{ + "block_device": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "device_name": &Schema{ + Type: TypeString, + Required: true, + }, + "delete_on_termination": &Schema{ + Type: TypeBool, + Optional: true, + Default: true, + }, + }, + }, + Set: func(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["device_name"].(string))) + buf.WriteString(fmt.Sprintf("%t-", m["delete_on_termination"].(bool))) + return hashcode.String(buf.String()) + }, + }, + }, + + State: &tofu.InstanceState{ + ID: "id", + Attributes: map[string]string{ + "block_device.#": "2", + "block_device.616397234.delete_on_termination": "true", + "block_device.616397234.device_name": "/dev/sda1", + "block_device.2801811477.delete_on_termination": "true", + "block_device.2801811477.device_name": "/dev/sdx", + }, + }, + + Config: map[string]interface{}{ + "block_device": []interface{}{ + map[string]interface{}{ + "device_name": "/dev/sda1", + }, + map[string]interface{}{ + "device_name": "/dev/sdx", + }, + }, + }, + Diff: nil, + Err: false, + }, + + { + Name: "Zero value in state shouldn't result in diff", + Schema: map[string]*Schema{ + "port": &Schema{ + Type: TypeBool, + Optional: true, + ForceNew: true, + }, + }, + + State: &tofu.InstanceState{ + ID: "id", + Attributes: map[string]string{ + "port": "false", + }, + }, + + Config: map[string]interface{}{}, + + Diff: nil, + + Err: false, + }, + + { + Name: "Same as prev, but for sets", + Schema: map[string]*Schema{ + "route": &Schema{ + Type: TypeSet, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "index": &Schema{ + Type: TypeInt, + Required: true, + }, + + "gateway": &Schema{ + Type: TypeSet, + Optional: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + }, + Set: func(v interface{}) int { + m := v.(map[string]interface{}) + return m["index"].(int) + }, + }, + }, + + State: &tofu.InstanceState{ + ID: "id", + Attributes: map[string]string{ + "route.#": "0", + }, + }, + + Config: map[string]interface{}{}, + + Diff: nil, + + Err: false, + }, + + { + Name: "A set computed element shouldn't cause a diff", + Schema: map[string]*Schema{ + "active": &Schema{ + Type: TypeBool, + Computed: true, + ForceNew: true, + }, + }, + + State: &tofu.InstanceState{ + ID: "id", + Attributes: map[string]string{ + "active": "true", + }, + }, + + Config: map[string]interface{}{}, + + Diff: nil, + + Err: false, + }, + + { + Name: "An empty set should show up in the diff", + Schema: map[string]*Schema{ + "instances": &Schema{ + Type: TypeSet, + Elem: &Schema{Type: TypeString}, + Optional: true, + ForceNew: true, + Set: func(v interface{}) int { + return len(v.(string)) + }, + }, + }, + + State: &tofu.InstanceState{ + ID: "id", + Attributes: map[string]string{ + "instances.#": "1", + "instances.3": "foo", + }, + }, + + Config: map[string]interface{}{}, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "instances.#": &tofu.ResourceAttrDiff{ + Old: "1", + New: "0", + RequiresNew: true, + }, + "instances.3": &tofu.ResourceAttrDiff{ + Old: "foo", + New: "", + NewRemoved: true, + RequiresNew: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Map with empty value", + Schema: map[string]*Schema{ + "vars": &Schema{ + Type: TypeMap, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "vars": map[string]interface{}{ + "foo": "", + }, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "vars.%": &tofu.ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "vars.foo": &tofu.ResourceAttrDiff{ + Old: "", + New: "", + }, + }, + }, + + Err: false, + }, + + { + Name: "Unset bool, not in state", + Schema: map[string]*Schema{ + "force": &Schema{ + Type: TypeBool, + Optional: true, + ForceNew: true, + }, + }, + + State: nil, + + Config: map[string]interface{}{}, + + Diff: nil, + + Err: false, + }, + + { + Name: "Unset set, not in state", + Schema: map[string]*Schema{ + "metadata_keys": &Schema{ + Type: TypeSet, + Optional: true, + ForceNew: true, + Elem: &Schema{Type: TypeInt}, + Set: func(interface{}) int { return 0 }, + }, + }, + + State: nil, + + Config: map[string]interface{}{}, + + Diff: nil, + + Err: false, + }, + + { + Name: "Unset list in state, should not show up computed", + Schema: map[string]*Schema{ + "metadata_keys": &Schema{ + Type: TypeList, + Optional: true, + Computed: true, + ForceNew: true, + Elem: &Schema{Type: TypeInt}, + }, + }, + + State: &tofu.InstanceState{ + ID: "id", + Attributes: map[string]string{ + "metadata_keys.#": "0", + }, + }, + + Config: map[string]interface{}{}, + + Diff: nil, + + Err: false, + }, + + { + Name: "Computed map without config that's known to be empty does not generate diff", + Schema: map[string]*Schema{ + "tags": &Schema{ + Type: TypeMap, + Computed: true, + }, + }, + + Config: nil, + + State: &tofu.InstanceState{ + ID: "id", + Attributes: map[string]string{ + "tags.%": "0", + }, + }, + + Diff: nil, + + Err: false, + }, + + { + Name: "Set with hyphen keys", + Schema: map[string]*Schema{ + "route": &Schema{ + Type: TypeSet, + Optional: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "index": &Schema{ + Type: TypeInt, + Required: true, + }, + + "gateway-name": &Schema{ + Type: TypeString, + Optional: true, + }, + }, + }, + Set: func(v interface{}) int { + m := v.(map[string]interface{}) + return m["index"].(int) + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "route": []interface{}{ + map[string]interface{}{ + "index": "1", + "gateway-name": "hello", + }, + }, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "route.#": &tofu.ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "route.1.index": &tofu.ResourceAttrDiff{ + Old: "", + New: "1", + }, + "route.1.gateway-name": &tofu.ResourceAttrDiff{ + Old: "", + New: "hello", + }, + }, + }, + + Err: false, + }, + + { + Name: "StateFunc in nested set (#1759)", + Schema: map[string]*Schema{ + "service_account": &Schema{ + Type: TypeList, + Optional: true, + ForceNew: true, + Elem: &Resource{ + Schema: map[string]*Schema{ + "scopes": &Schema{ + Type: TypeSet, + Required: true, + ForceNew: true, + Elem: &Schema{ + Type: TypeString, + StateFunc: func(v interface{}) string { + return v.(string) + "!" + }, + }, + Set: func(v interface{}) int { + i, err := strconv.Atoi(v.(string)) + if err != nil { + t.Fatalf("err: %s", err) + } + return i + }, + }, + }, + }, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "service_account": []interface{}{ + map[string]interface{}{ + "scopes": []interface{}{"123"}, + }, + }, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "service_account.#": &tofu.ResourceAttrDiff{ + Old: "0", + New: "1", + RequiresNew: true, + }, + "service_account.0.scopes.#": &tofu.ResourceAttrDiff{ + Old: "0", + New: "1", + RequiresNew: true, + }, + "service_account.0.scopes.123": &tofu.ResourceAttrDiff{ + Old: "", + New: "123!", + NewExtra: "123", + RequiresNew: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Removing set elements", + Schema: map[string]*Schema{ + "instances": &Schema{ + Type: TypeSet, + Elem: &Schema{Type: TypeString}, + Optional: true, + ForceNew: true, + Set: func(v interface{}) int { + return len(v.(string)) + }, + }, + }, + + State: &tofu.InstanceState{ + ID: "id", + Attributes: map[string]string{ + "instances.#": "2", + "instances.3": "333", + "instances.2": "22", + }, + }, + + Config: map[string]interface{}{ + "instances": []interface{}{"333", "4444"}, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "instances.2": &tofu.ResourceAttrDiff{ + Old: "22", + New: "", + NewRemoved: true, + RequiresNew: true, + }, + "instances.3": &tofu.ResourceAttrDiff{ + Old: "333", + New: "333", + }, + "instances.4": &tofu.ResourceAttrDiff{ + Old: "", + New: "4444", + RequiresNew: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Bools can be set with 0/1 in config, still get true/false", + Schema: map[string]*Schema{ + "one": &Schema{ + Type: TypeBool, + Optional: true, + }, + "two": &Schema{ + Type: TypeBool, + Optional: true, + }, + "three": &Schema{ + Type: TypeBool, + Optional: true, + }, + }, + + State: &tofu.InstanceState{ + ID: "id", + Attributes: map[string]string{ + "one": "false", + "two": "true", + "three": "true", + }, + }, + + Config: map[string]interface{}{ + "one": "1", + "two": "0", + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "one": &tofu.ResourceAttrDiff{ + Old: "false", + New: "true", + }, + "two": &tofu.ResourceAttrDiff{ + Old: "true", + New: "false", + }, + "three": &tofu.ResourceAttrDiff{ + Old: "true", + New: "false", + NewRemoved: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "tainted in state w/ no attr changes is still a replacement", + Schema: map[string]*Schema{}, + + State: &tofu.InstanceState{ + ID: "id", + Attributes: map[string]string{ + "id": "someid", + }, + Tainted: true, + }, + + Config: map[string]interface{}{}, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{}, + DestroyTainted: true, + }, + }, + + { + Name: "Set ForceNew only marks the changing element as ForceNew", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Required: true, + ForceNew: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: &tofu.InstanceState{ + ID: "id", + Attributes: map[string]string{ + "ports.#": "3", + "ports.1": "1", + "ports.2": "2", + "ports.4": "4", + }, + }, + + Config: map[string]interface{}{ + "ports": []interface{}{5, 2, 1}, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ports.1": &tofu.ResourceAttrDiff{ + Old: "1", + New: "1", + }, + "ports.2": &tofu.ResourceAttrDiff{ + Old: "2", + New: "2", + }, + "ports.5": &tofu.ResourceAttrDiff{ + Old: "", + New: "5", + RequiresNew: true, + }, + "ports.4": &tofu.ResourceAttrDiff{ + Old: "4", + New: "0", + NewRemoved: true, + RequiresNew: true, + }, + }, + }, + }, + + { + Name: "removed optional items should trigger ForceNew", + Schema: map[string]*Schema{ + "description": &Schema{ + Type: TypeString, + ForceNew: true, + Optional: true, + }, + }, + + State: &tofu.InstanceState{ + ID: "id", + Attributes: map[string]string{ + "description": "foo", + }, + }, + + Config: map[string]interface{}{}, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "description": &tofu.ResourceAttrDiff{ + Old: "foo", + New: "", + RequiresNew: true, + NewRemoved: true, + }, + }, + }, + + Err: false, + }, + + // GH-7715 + { + Name: "computed value for boolean field", + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeBool, + ForceNew: true, + Computed: true, + Optional: true, + }, + }, + + State: &tofu.InstanceState{ + ID: "id", + }, + + Config: map[string]interface{}{ + "foo": hcl2shim.UnknownVariableValue, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "foo": &tofu.ResourceAttrDiff{ + Old: "", + New: "false", + NewComputed: true, + RequiresNew: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "Set ForceNew marks count as ForceNew if computed", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Required: true, + ForceNew: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: &tofu.InstanceState{ + ID: "id", + Attributes: map[string]string{ + "ports.#": "3", + "ports.1": "1", + "ports.2": "2", + "ports.4": "4", + }, + }, + + Config: map[string]interface{}{ + "ports": []interface{}{hcl2shim.UnknownVariableValue, 2, 1}, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ports.#": &tofu.ResourceAttrDiff{ + NewComputed: true, + RequiresNew: true, + }, + }, + }, + }, + + { + Name: "List with computed schema and ForceNew", + Schema: map[string]*Schema{ + "config": &Schema{ + Type: TypeList, + Optional: true, + ForceNew: true, + Elem: &Schema{ + Type: TypeString, + }, + }, + }, + + State: &tofu.InstanceState{ + ID: "id", + Attributes: map[string]string{ + "config.#": "2", + "config.0": "a", + "config.1": "b", + }, + }, + + Config: map[string]interface{}{ + "config": []interface{}{hcl2shim.UnknownVariableValue, hcl2shim.UnknownVariableValue}, + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "config.#": &tofu.ResourceAttrDiff{ + Old: "2", + New: "", + RequiresNew: true, + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "overridden diff with a CustomizeDiff function, ForceNew not in schema", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "availability_zone": "foo", + }, + + CustomizeDiff: func(d *ResourceDiff, meta interface{}) error { + if err := d.SetNew("availability_zone", "bar"); err != nil { + return err + } + if err := d.ForceNew("availability_zone"); err != nil { + return err + } + return nil + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "", + New: "bar", + RequiresNew: true, + }, + }, + }, + + Err: false, + }, + + { + // NOTE: This case is technically impossible in the current + // implementation, because optional+computed values never show up in the + // diff. In the event behavior changes this test should ensure that the + // intended diff still shows up. + Name: "overridden removed attribute diff with a CustomizeDiff function, ForceNew not in schema", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + }, + }, + + State: nil, + + Config: map[string]interface{}{}, + + CustomizeDiff: func(d *ResourceDiff, meta interface{}) error { + if err := d.SetNew("availability_zone", "bar"); err != nil { + return err + } + if err := d.ForceNew("availability_zone"); err != nil { + return err + } + return nil + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "", + New: "bar", + RequiresNew: true, + }, + }, + }, + + Err: false, + }, + + { + + Name: "overridden diff with a CustomizeDiff function, ForceNew in schema", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "availability_zone": "foo", + }, + + CustomizeDiff: func(d *ResourceDiff, meta interface{}) error { + if err := d.SetNew("availability_zone", "bar"); err != nil { + return err + } + return nil + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "availability_zone": &tofu.ResourceAttrDiff{ + Old: "", + New: "bar", + RequiresNew: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "required field with computed diff added with CustomizeDiff function", + Schema: map[string]*Schema{ + "ami_id": &Schema{ + Type: TypeString, + Required: true, + }, + "instance_id": &Schema{ + Type: TypeString, + Computed: true, + }, + }, + + State: nil, + + Config: map[string]interface{}{ + "ami_id": "foo", + }, + + CustomizeDiff: func(d *ResourceDiff, meta interface{}) error { + if err := d.SetNew("instance_id", "bar"); err != nil { + return err + } + return nil + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ami_id": &tofu.ResourceAttrDiff{ + Old: "", + New: "foo", + }, + "instance_id": &tofu.ResourceAttrDiff{ + Old: "", + New: "bar", + }, + }, + }, + + Err: false, + }, + + { + Name: "Set ForceNew only marks the changing element as ForceNew - CustomizeDiffFunc edition", + Schema: map[string]*Schema{ + "ports": &Schema{ + Type: TypeSet, + Optional: true, + Computed: true, + Elem: &Schema{Type: TypeInt}, + Set: func(a interface{}) int { + return a.(int) + }, + }, + }, + + State: &tofu.InstanceState{ + ID: "id", + Attributes: map[string]string{ + "ports.#": "3", + "ports.1": "1", + "ports.2": "2", + "ports.4": "4", + }, + }, + + Config: map[string]interface{}{ + "ports": []interface{}{5, 2, 6}, + }, + + CustomizeDiff: func(d *ResourceDiff, meta interface{}) error { + if err := d.SetNew("ports", []interface{}{5, 2, 1}); err != nil { + return err + } + if err := d.ForceNew("ports"); err != nil { + return err + } + return nil + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "ports.1": &tofu.ResourceAttrDiff{ + Old: "1", + New: "1", + }, + "ports.2": &tofu.ResourceAttrDiff{ + Old: "2", + New: "2", + }, + "ports.5": &tofu.ResourceAttrDiff{ + Old: "", + New: "5", + RequiresNew: true, + }, + "ports.4": &tofu.ResourceAttrDiff{ + Old: "4", + New: "0", + NewRemoved: true, + RequiresNew: true, + }, + }, + }, + }, + + { + Name: "tainted resource does not run CustomizeDiffFunc", + Schema: map[string]*Schema{}, + + State: &tofu.InstanceState{ + ID: "someid", + Attributes: map[string]string{ + "id": "someid", + }, + Tainted: true, + }, + + Config: map[string]interface{}{}, + + CustomizeDiff: func(d *ResourceDiff, meta interface{}) error { + return errors.New("diff customization should not have run") + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{}, + DestroyTainted: true, + }, + + Err: false, + }, + + { + Name: "NewComputed based on a conditional with CustomizeDiffFunc", + Schema: map[string]*Schema{ + "etag": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + }, + "version_id": &Schema{ + Type: TypeString, + Computed: true, + }, + }, + + State: &tofu.InstanceState{ + ID: "id", + Attributes: map[string]string{ + "etag": "foo", + "version_id": "1", + }, + }, + + Config: map[string]interface{}{ + "etag": "bar", + }, + + CustomizeDiff: func(d *ResourceDiff, meta interface{}) error { + if d.HasChange("etag") { + d.SetNewComputed("version_id") + } + return nil + }, + + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "etag": &tofu.ResourceAttrDiff{ + Old: "foo", + New: "bar", + }, + "version_id": &tofu.ResourceAttrDiff{ + Old: "1", + New: "", + NewComputed: true, + }, + }, + }, + + Err: false, + }, + + { + Name: "vetoing a diff", + Schema: map[string]*Schema{ + "foo": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + }, + }, + + State: &tofu.InstanceState{ + ID: "id", + Attributes: map[string]string{ + "foo": "bar", + }, + }, + + Config: map[string]interface{}{ + "foo": "baz", + }, + + CustomizeDiff: func(d *ResourceDiff, meta interface{}) error { + return fmt.Errorf("diff vetoed") + }, + + Err: true, + }, + + // A lot of resources currently depended on using the empty string as a + // nil/unset value. + { + Name: "optional, computed, empty string", + Schema: map[string]*Schema{ + "attr": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + }, + }, + + State: &tofu.InstanceState{ + ID: "id", + Attributes: map[string]string{ + "attr": "bar", + }, + }, + + Config: map[string]interface{}{ + "attr": "", + }, + }, + + { + Name: "optional, computed, empty string should not crash in CustomizeDiff", + Schema: map[string]*Schema{ + "unrelated_set": { + Type: TypeSet, + Optional: true, + Elem: &Schema{Type: TypeString}, + }, + "stream_enabled": { + Type: TypeBool, + Optional: true, + }, + "stream_view_type": { + Type: TypeString, + Optional: true, + Computed: true, + }, + }, + + State: &tofu.InstanceState{ + ID: "id", + Attributes: map[string]string{ + "unrelated_set.#": "0", + "stream_enabled": "true", + "stream_view_type": "KEYS_ONLY", + }, + }, + Config: map[string]interface{}{ + "stream_enabled": false, + "stream_view_type": "", + }, + CustomizeDiff: func(diff *ResourceDiff, v interface{}) error { + v, ok := diff.GetOk("unrelated_set") + if ok { + return fmt.Errorf("Didn't expect unrelated_set: %#v", v) + } + return nil + }, + Diff: &tofu.InstanceDiff{ + Attributes: map[string]*tofu.ResourceAttrDiff{ + "stream_enabled": { + Old: "true", + New: "false", + }, + }, + }, + }, + } + + for i, tc := range cases { + t.Run(fmt.Sprintf("%d-%s", i, tc.Name), func(t *testing.T) { + c := tofu.NewResourceConfigRaw(tc.Config) + + { + d, err := schemaMap(tc.Schema).Diff(tc.State, c, tc.CustomizeDiff, nil, false) + if err != nil != tc.Err { + t.Fatalf("err: %s", err) + } + if !cmp.Equal(d, tc.Diff, equateEmpty) { + t.Fatal(cmp.Diff(d, tc.Diff, equateEmpty)) + } + } + // up to here is already tested in helper/schema; we're just + // verify that we haven't broken any tests in transition. + + // create a schema from the schemaMap + testSchema := resourceSchemaToBlock(tc.Schema) + + // get our initial state cty.Value + stateVal, err := StateValueFromInstanceState(tc.State, testSchema.ImpliedType()) + if err != nil { + t.Fatal(err) + } + + // this is the desired cty.Value from the configuration + configVal := hcl2shim.HCL2ValueFromConfigValue(c.Config) + + // verify that we can round-trip the config + origConfig := hcl2shim.ConfigValueFromHCL2(configVal) + if !cmp.Equal(c.Config, origConfig, equateEmpty) { + t.Fatal(cmp.Diff(c.Config, origConfig, equateEmpty)) + } + + // make sure our config conforms precisely to the schema + configVal, err = testSchema.CoerceValue(configVal) + if err != nil { + t.Fatal(tfdiags.FormatError(err)) + } + + // The new API requires returning the desired state rather than a + // diff, so we need to verify that we can combine the state and + // diff and recreate a new state. + + // now verify that we can create diff, using the new config and state values + // customize isn't run on tainted resources + tainted := tc.State != nil && tc.State.Tainted + if tainted { + tc.CustomizeDiff = nil + } + + res := &Resource{Schema: tc.Schema} + + d, err := diffFromValues(stateVal, configVal, res, tc.CustomizeDiff) + if err != nil { + if !tc.Err { + t.Fatal(err) + } + } + + // In a real "apply" operation there would be no unknown values, + // so for tests containing unknowns we'll stop here: the steps + // after this point apply only to the apply phase. + if !configVal.IsWhollyKnown() { + return + } + + // our diff function can't set DestroyTainted, but match the + // expected value here for the test fixtures + if tainted { + if d == nil { + d = &tofu.InstanceDiff{} + } + d.DestroyTainted = true + } + + if eq, _ := d.Same(tc.Diff); !eq { + t.Fatal(cmp.Diff(d, tc.Diff)) + } + + }) + } +} + +func resourceSchemaToBlock(s map[string]*Schema) *configschema.Block { + return (&Resource{Schema: s}).CoreConfigSchema() +} + +func TestRemoveConfigUnknowns(t *testing.T) { + cfg := map[string]interface{}{ + "id": "74D93920-ED26-11E3-AC10-0800200C9A66", + "route_rules": []interface{}{ + map[string]interface{}{ + "cidr_block": "74D93920-ED26-11E3-AC10-0800200C9A66", + "destination": "0.0.0.0/0", + "destination_type": "CIDR_BLOCK", + "network_entity_id": "1", + }, + map[string]interface{}{ + "cidr_block": "74D93920-ED26-11E3-AC10-0800200C9A66", + "destination": "0.0.0.0/0", + "destination_type": "CIDR_BLOCK", + "sub_block": []interface{}{ + map[string]interface{}{ + "computed": "74D93920-ED26-11E3-AC10-0800200C9A66", + }, + }, + }, + }, + } + + expect := map[string]interface{}{ + "route_rules": []interface{}{ + map[string]interface{}{ + "destination": "0.0.0.0/0", + "destination_type": "CIDR_BLOCK", + "network_entity_id": "1", + }, + map[string]interface{}{ + "destination": "0.0.0.0/0", + "destination_type": "CIDR_BLOCK", + "sub_block": []interface{}{ + map[string]interface{}{}, + }, + }, + }, + } + + removeConfigUnknowns(cfg) + + if !reflect.DeepEqual(cfg, expect) { + t.Fatalf("\nexpected: %#v\ngot: %#v", expect, cfg) + } +} diff --git a/pkg/legacy/helper/schema/testing.go b/pkg/legacy/helper/schema/testing.go new file mode 100644 index 00000000000..1b14fcf92b6 --- /dev/null +++ b/pkg/legacy/helper/schema/testing.go @@ -0,0 +1,33 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "testing" + + "github.com/kubegems/opentofu/pkg/legacy/tofu" +) + +// TestResourceDataRaw creates a ResourceData from a raw configuration map. +func TestResourceDataRaw( + t *testing.T, schema map[string]*Schema, raw map[string]interface{}) *ResourceData { + t.Helper() + + c := tofu.NewResourceConfigRaw(raw) + + sm := schemaMap(schema) + diff, err := sm.Diff(nil, c, nil, nil, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + result, err := sm.Data(nil, diff) + if err != nil { + t.Fatalf("err: %s", err) + } + + return result +} diff --git a/pkg/legacy/helper/schema/valuetype.go b/pkg/legacy/helper/schema/valuetype.go new file mode 100644 index 00000000000..22954e7623d --- /dev/null +++ b/pkg/legacy/helper/schema/valuetype.go @@ -0,0 +1,26 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +//go:generate go run golang.org/x/tools/cmd/stringer -type=ValueType valuetype.go + +// ValueType is an enum of the type that can be represented by a schema. +type ValueType int + +const ( + TypeInvalid ValueType = iota + TypeBool + TypeInt + TypeFloat + TypeString + TypeList + TypeMap + TypeSet + typeObject +) + +// NOTE: ValueType has more functions defined on it in schema.go. We can't +// put them here because we reference other files. diff --git a/pkg/legacy/helper/schema/valuetype_string.go b/pkg/legacy/helper/schema/valuetype_string.go new file mode 100644 index 00000000000..914ca32cbe0 --- /dev/null +++ b/pkg/legacy/helper/schema/valuetype_string.go @@ -0,0 +1,31 @@ +// Code generated by "stringer -type=ValueType valuetype.go"; DO NOT EDIT. + +package schema + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[TypeInvalid-0] + _ = x[TypeBool-1] + _ = x[TypeInt-2] + _ = x[TypeFloat-3] + _ = x[TypeString-4] + _ = x[TypeList-5] + _ = x[TypeMap-6] + _ = x[TypeSet-7] + _ = x[typeObject-8] +} + +const _ValueType_name = "TypeInvalidTypeBoolTypeIntTypeFloatTypeStringTypeListTypeMapTypeSettypeObject" + +var _ValueType_index = [...]uint8{0, 11, 19, 26, 35, 45, 53, 60, 67, 77} + +func (i ValueType) String() string { + if i < 0 || i >= ValueType(len(_ValueType_index)-1) { + return "ValueType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _ValueType_name[_ValueType_index[i]:_ValueType_index[i+1]] +} diff --git a/pkg/legacy/tofu/context_components.go b/pkg/legacy/tofu/context_components.go new file mode 100644 index 00000000000..efa517a32ea --- /dev/null +++ b/pkg/legacy/tofu/context_components.go @@ -0,0 +1,70 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/provisioners" +) + +// contextComponentFactory is the interface that Context uses +// to initialize various components such as providers and provisioners. +// This factory gets more information than the raw maps using to initialize +// a Context. This information is used for debugging. +type contextComponentFactory interface { + // ResourceProvider creates a new ResourceProvider with the given type. + ResourceProvider(typ addrs.Provider) (providers.Interface, error) + ResourceProviders() []string + + // ResourceProvisioner creates a new ResourceProvisioner with the given + // type. + ResourceProvisioner(typ string) (provisioners.Interface, error) + ResourceProvisioners() []string +} + +// basicComponentFactory just calls a factory from a map directly. +type basicComponentFactory struct { + providers map[addrs.Provider]providers.Factory + provisioners map[string]ProvisionerFactory +} + +func (c *basicComponentFactory) ResourceProviders() []string { + var result []string + for k := range c.providers { + result = append(result, k.String()) + } + return result +} + +func (c *basicComponentFactory) ResourceProvisioners() []string { + var result []string + for k := range c.provisioners { + result = append(result, k) + } + + return result +} + +func (c *basicComponentFactory) ResourceProvider(typ addrs.Provider) (providers.Interface, error) { + f, ok := c.providers[typ] + if !ok { + return nil, fmt.Errorf("unknown provider %q", typ.String()) + } + + return f() +} + +func (c *basicComponentFactory) ResourceProvisioner(typ string) (provisioners.Interface, error) { + f, ok := c.provisioners[typ] + if !ok { + return nil, fmt.Errorf("unknown provisioner %q", typ) + } + + return f() +} diff --git a/pkg/legacy/tofu/diff.go b/pkg/legacy/tofu/diff.go new file mode 100644 index 00000000000..435932358c4 --- /dev/null +++ b/pkg/legacy/tofu/diff.go @@ -0,0 +1,1456 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "bufio" + "bytes" + "fmt" + "log" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "sync" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/configs/hcl2shim" + "github.com/zclconf/go-cty/cty" + + "github.com/mitchellh/copystructure" +) + +// DiffChangeType is an enum with the kind of changes a diff has planned. +type DiffChangeType byte + +const ( + DiffInvalid DiffChangeType = iota + DiffNone + DiffCreate + DiffUpdate + DiffDestroy + DiffDestroyCreate + + // DiffRefresh is only used in the UI for displaying diffs. + // Managed resource reads never appear in plan, and when data source + // reads appear they are represented as DiffCreate in core before + // transforming to DiffRefresh in the UI layer. + DiffRefresh // TODO: Actually use DiffRefresh in core too, for less confusion +) + +// multiVal matches the index key to a flatmapped set, list or map +var multiVal = regexp.MustCompile(`\.(#|%)$`) + +// Diff tracks the changes that are necessary to apply a configuration +// to an existing infrastructure. +type Diff struct { + // Modules contains all the modules that have a diff + Modules []*ModuleDiff +} + +// Prune cleans out unused structures in the diff without affecting +// the behavior of the diff at all. +// +// This is not safe to call concurrently. This is safe to call on a +// nil Diff. +func (d *Diff) Prune() { + if d == nil { + return + } + + // Prune all empty modules + newModules := make([]*ModuleDiff, 0, len(d.Modules)) + for _, m := range d.Modules { + // If the module isn't empty, we keep it + if !m.Empty() { + newModules = append(newModules, m) + } + } + if len(newModules) == 0 { + newModules = nil + } + d.Modules = newModules +} + +// AddModule adds the module with the given path to the diff. +// +// This should be the preferred method to add module diffs since it +// allows us to optimize lookups later as well as control sorting. +func (d *Diff) AddModule(path addrs.ModuleInstance) *ModuleDiff { + // Lower the new-style address into a legacy-style address. + // This requires that none of the steps have instance keys, which is + // true for all addresses at the time of implementing this because + // "count" and "for_each" are not yet implemented for modules. + legacyPath := make([]string, len(path)) + for i, step := range path { + if step.InstanceKey != addrs.NoKey { + // FIXME: Once the rest of OpenTofu is ready to use count and + // for_each, remove all of this and just write the addrs.ModuleInstance + // value itself into the ModuleState. + panic("diff cannot represent modules with count or for_each keys") + } + + legacyPath[i] = step.Name + } + + m := &ModuleDiff{Path: legacyPath} + m.init() + d.Modules = append(d.Modules, m) + return m +} + +// ModuleByPath is used to lookup the module diff for the given path. +// This should be the preferred lookup mechanism as it allows for future +// lookup optimizations. +func (d *Diff) ModuleByPath(path addrs.ModuleInstance) *ModuleDiff { + if d == nil { + return nil + } + for _, mod := range d.Modules { + if mod.Path == nil { + panic("missing module path") + } + modPath := normalizeModulePath(mod.Path) + if modPath.String() == path.String() { + return mod + } + } + return nil +} + +// RootModule returns the ModuleState for the root module +func (d *Diff) RootModule() *ModuleDiff { + root := d.ModuleByPath(addrs.RootModuleInstance) + if root == nil { + panic("missing root module") + } + return root +} + +// Empty returns true if the diff has no changes. +func (d *Diff) Empty() bool { + if d == nil { + return true + } + + for _, m := range d.Modules { + if !m.Empty() { + return false + } + } + + return true +} + +// Equal compares two diffs for exact equality. +// +// This is different from the Same comparison that is supported which +// checks for operation equality taking into account computed values. Equal +// instead checks for exact equality. +func (d *Diff) Equal(d2 *Diff) bool { + // If one is nil, they must both be nil + if d == nil || d2 == nil { + return d == d2 + } + + // Sort the modules + sort.Sort(moduleDiffSort(d.Modules)) + sort.Sort(moduleDiffSort(d2.Modules)) + + // Copy since we have to modify the module destroy flag to false so + // we don't compare that. TODO: delete this when we get rid of the + // destroy flag on modules. + dCopy := d.DeepCopy() + d2Copy := d2.DeepCopy() + for _, m := range dCopy.Modules { + m.Destroy = false + } + for _, m := range d2Copy.Modules { + m.Destroy = false + } + + // Use DeepEqual + return reflect.DeepEqual(dCopy, d2Copy) +} + +// DeepCopy performs a deep copy of all parts of the Diff, making the +// resulting Diff safe to use without modifying this one. +func (d *Diff) DeepCopy() *Diff { + copy, err := copystructure.Config{Lock: true}.Copy(d) + if err != nil { + panic(err) + } + + return copy.(*Diff) +} + +func (d *Diff) String() string { + var buf bytes.Buffer + + keys := make([]string, 0, len(d.Modules)) + lookup := make(map[string]*ModuleDiff) + for _, m := range d.Modules { + addr := normalizeModulePath(m.Path) + key := addr.String() + keys = append(keys, key) + lookup[key] = m + } + sort.Strings(keys) + + for _, key := range keys { + m := lookup[key] + mStr := m.String() + + // If we're the root module, we just write the output directly. + if reflect.DeepEqual(m.Path, rootModulePath) { + buf.WriteString(mStr + "\n") + continue + } + + buf.WriteString(fmt.Sprintf("%s:\n", key)) + + s := bufio.NewScanner(strings.NewReader(mStr)) + for s.Scan() { + buf.WriteString(fmt.Sprintf(" %s\n", s.Text())) + } + } + + return strings.TrimSpace(buf.String()) +} + +func (d *Diff) init() { + if d.Modules == nil { + rootDiff := &ModuleDiff{Path: rootModulePath} + d.Modules = []*ModuleDiff{rootDiff} + } + for _, m := range d.Modules { + m.init() + } +} + +// ModuleDiff tracks the differences between resources to apply within +// a single module. +type ModuleDiff struct { + Path []string + Resources map[string]*InstanceDiff + Destroy bool // Set only by the destroy plan +} + +func (d *ModuleDiff) init() { + if d.Resources == nil { + d.Resources = make(map[string]*InstanceDiff) + } + for _, r := range d.Resources { + r.init() + } +} + +// ChangeType returns the type of changes that the diff for this +// module includes. +// +// At a module level, this will only be DiffNone, DiffUpdate, DiffDestroy, or +// DiffCreate. If an instance within the module has a DiffDestroyCreate +// then this will register as a DiffCreate for a module. +func (d *ModuleDiff) ChangeType() DiffChangeType { + result := DiffNone + for _, r := range d.Resources { + change := r.ChangeType() + switch change { + case DiffCreate, DiffDestroy: + if result == DiffNone { + result = change + } + case DiffDestroyCreate, DiffUpdate: + result = DiffUpdate + } + } + + return result +} + +// Empty returns true if the diff has no changes within this module. +func (d *ModuleDiff) Empty() bool { + if d.Destroy { + return false + } + + if len(d.Resources) == 0 { + return true + } + + for _, rd := range d.Resources { + if !rd.Empty() { + return false + } + } + + return true +} + +// Instances returns the instance diffs for the id given. This can return +// multiple instance diffs if there are counts within the resource. +func (d *ModuleDiff) Instances(id string) []*InstanceDiff { + var result []*InstanceDiff + for k, diff := range d.Resources { + if k == id || strings.HasPrefix(k, id+".") { + if !diff.Empty() { + result = append(result, diff) + } + } + } + + return result +} + +// IsRoot says whether or not this module diff is for the root module. +func (d *ModuleDiff) IsRoot() bool { + return reflect.DeepEqual(d.Path, rootModulePath) +} + +// String outputs the diff in a long but command-line friendly output +// format that users can read to quickly inspect a diff. +func (d *ModuleDiff) String() string { + var buf bytes.Buffer + + names := make([]string, 0, len(d.Resources)) + for name, _ := range d.Resources { + names = append(names, name) + } + sort.Strings(names) + + for _, name := range names { + rdiff := d.Resources[name] + + crud := "UPDATE" + switch { + case rdiff.RequiresNew() && (rdiff.GetDestroy() || rdiff.GetDestroyTainted()): + crud = "DESTROY/CREATE" + case rdiff.GetDestroy() || rdiff.GetDestroyDeposed(): + crud = "DESTROY" + case rdiff.RequiresNew(): + crud = "CREATE" + } + + extra := "" + if !rdiff.GetDestroy() && rdiff.GetDestroyDeposed() { + extra = " (deposed only)" + } + + buf.WriteString(fmt.Sprintf( + "%s: %s%s\n", + crud, + name, + extra)) + + keyLen := 0 + rdiffAttrs := rdiff.CopyAttributes() + keys := make([]string, 0, len(rdiffAttrs)) + for key, _ := range rdiffAttrs { + if key == "id" { + continue + } + + keys = append(keys, key) + if len(key) > keyLen { + keyLen = len(key) + } + } + sort.Strings(keys) + + for _, attrK := range keys { + attrDiff, _ := rdiff.GetAttribute(attrK) + + v := attrDiff.New + u := attrDiff.Old + if attrDiff.NewComputed { + v = "" + } + + if attrDiff.Sensitive { + u = "" + v = "" + } + + updateMsg := "" + if attrDiff.RequiresNew { + updateMsg = " (forces new resource)" + } else if attrDiff.Sensitive { + updateMsg = " (attribute changed)" + } + + buf.WriteString(fmt.Sprintf( + " %s:%s %#v => %#v%s\n", + attrK, + strings.Repeat(" ", keyLen-len(attrK)), + u, + v, + updateMsg)) + } + } + + return buf.String() +} + +// InstanceDiff is the diff of a resource from some state to another. +type InstanceDiff struct { + mu sync.Mutex + Attributes map[string]*ResourceAttrDiff + Destroy bool + DestroyDeposed bool + DestroyTainted bool + + // Meta is a simple K/V map that is stored in a diff and persisted to + // plans but otherwise is completely ignored by OpenTofu core. It is + // meant to be used for additional data a resource may want to pass through. + // The value here must only contain Go primitives and collections. + Meta map[string]interface{} +} + +func (d *InstanceDiff) Lock() { d.mu.Lock() } +func (d *InstanceDiff) Unlock() { d.mu.Unlock() } + +// ApplyToValue merges the receiver into the given base value, returning a +// new value that incorporates the planned changes. The given value must +// conform to the given schema, or this method will panic. +// +// This method is intended for shimming old subsystems that still use this +// legacy diff type to work with the new-style types. +func (d *InstanceDiff) ApplyToValue(base cty.Value, schema *configschema.Block) (cty.Value, error) { + // Create an InstanceState attributes from our existing state. + // We can use this to more easily apply the diff changes. + attrs := hcl2shim.FlatmapValueFromHCL2(base) + applied, err := d.Apply(attrs, schema) + if err != nil { + return base, err + } + + val, err := hcl2shim.HCL2ValueFromFlatmap(applied, schema.ImpliedType()) + if err != nil { + return base, err + } + + return schema.CoerceValue(val) +} + +// Apply applies the diff to the provided flatmapped attributes, +// returning the new instance attributes. +// +// This method is intended for shimming old subsystems that still use this +// legacy diff type to work with the new-style types. +func (d *InstanceDiff) Apply(attrs map[string]string, schema *configschema.Block) (map[string]string, error) { + // We always build a new value here, even if the given diff is "empty", + // because we might be planning to create a new instance that happens + // to have no attributes set, and so we want to produce an empty object + // rather than just echoing back the null old value. + if attrs == nil { + attrs = map[string]string{} + } + + // Rather applying the diff to mutate the attrs, we'll copy new values into + // here to avoid the possibility of leaving stale values. + result := map[string]string{} + + if d.Destroy || d.DestroyDeposed || d.DestroyTainted { + return result, nil + } + + return d.applyBlockDiff(nil, attrs, schema) +} + +func (d *InstanceDiff) applyBlockDiff(path []string, attrs map[string]string, schema *configschema.Block) (map[string]string, error) { + result := map[string]string{} + name := "" + if len(path) > 0 { + name = path[len(path)-1] + } + + // localPrefix is used to build the local result map + localPrefix := "" + if name != "" { + localPrefix = name + "." + } + + // iterate over the schema rather than the attributes, so we can handle + // different block types separately from plain attributes + for n, attrSchema := range schema.Attributes { + var err error + newAttrs, err := d.applyAttrDiff(append(path, n), attrs, attrSchema) + + if err != nil { + return result, err + } + + for k, v := range newAttrs { + result[localPrefix+k] = v + } + } + + blockPrefix := strings.Join(path, ".") + if blockPrefix != "" { + blockPrefix += "." + } + for n, block := range schema.BlockTypes { + // we need to find the set of all keys that traverse this block + candidateKeys := map[string]bool{} + blockKey := blockPrefix + n + "." + localBlockPrefix := localPrefix + n + "." + + // we can only trust the diff for sets, since the path changes, so don't + // count existing values as candidate keys. If it turns out we're + // keeping the attributes, we will catch it down below with "keepBlock" + // after we check the set count. + if block.Nesting != configschema.NestingSet { + for k := range attrs { + if strings.HasPrefix(k, blockKey) { + nextDot := strings.Index(k[len(blockKey):], ".") + if nextDot < 0 { + continue + } + nextDot += len(blockKey) + candidateKeys[k[len(blockKey):nextDot]] = true + } + } + } + + for k, diff := range d.Attributes { + // helper/schema should not insert nil diff values, but don't panic + // if it does. + if diff == nil { + continue + } + + if strings.HasPrefix(k, blockKey) { + nextDot := strings.Index(k[len(blockKey):], ".") + if nextDot < 0 { + continue + } + + if diff.NewRemoved { + continue + } + + nextDot += len(blockKey) + candidateKeys[k[len(blockKey):nextDot]] = true + } + } + + // check each set candidate to see if it was removed. + // we need to do this, because when entire sets are removed, they may + // have the wrong key, and ony show diffs going to "" + if block.Nesting == configschema.NestingSet { + for k := range candidateKeys { + indexPrefix := strings.Join(append(path, n, k), ".") + "." + keep := false + // now check each set element to see if it's a new diff, or one + // that we're dropping. Since we're only applying the "New" + // portion of the set, we can ignore diffs that only contain "Old" + for attr, diff := range d.Attributes { + // helper/schema should not insert nil diff values, but don't panic + // if it does. + if diff == nil { + continue + } + + if !strings.HasPrefix(attr, indexPrefix) { + continue + } + + // check for empty "count" keys + if (strings.HasSuffix(attr, ".#") || strings.HasSuffix(attr, ".%")) && diff.New == "0" { + continue + } + + // removed items don't count either + if diff.NewRemoved { + continue + } + + // this must be a diff to keep + keep = true + break + } + if !keep { + delete(candidateKeys, k) + } + } + } + + for k := range candidateKeys { + newAttrs, err := d.applyBlockDiff(append(path, n, k), attrs, &block.Block) + if err != nil { + return result, err + } + + for attr, v := range newAttrs { + result[localBlockPrefix+attr] = v + } + } + + keepBlock := true + // check this block's count diff directly first, since we may not + // have candidates because it was removed and only set to "0" + if diff, ok := d.Attributes[blockKey+"#"]; ok { + if diff.New == "0" || diff.NewRemoved { + keepBlock = false + } + } + + // if there was no diff at all, then we need to keep the block attributes + if len(candidateKeys) == 0 && keepBlock { + for k, v := range attrs { + if strings.HasPrefix(k, blockKey) { + // we need the key relative to this block, so remove the + // entire prefix, then re-insert the block name. + localKey := localBlockPrefix + k[len(blockKey):] + result[localKey] = v + } + } + } + + countAddr := strings.Join(append(path, n, "#"), ".") + if countDiff, ok := d.Attributes[countAddr]; ok { + if countDiff.NewComputed { + result[localBlockPrefix+"#"] = hcl2shim.UnknownVariableValue + } else { + result[localBlockPrefix+"#"] = countDiff.New + + // While sets are complete, list are not, and we may not have all the + // information to track removals. If the list was truncated, we need to + // remove the extra items from the result. + if block.Nesting == configschema.NestingList && + countDiff.New != "" && countDiff.New != hcl2shim.UnknownVariableValue { + length, _ := strconv.Atoi(countDiff.New) + for k := range result { + if !strings.HasPrefix(k, localBlockPrefix) { + continue + } + + index := k[len(localBlockPrefix):] + nextDot := strings.Index(index, ".") + if nextDot < 1 { + continue + } + index = index[:nextDot] + i, err := strconv.Atoi(index) + if err != nil { + // this shouldn't happen since we added these + // ourself, but make note of it just in case. + log.Printf("[ERROR] bad list index in %q: %s", k, err) + continue + } + if i >= length { + delete(result, k) + } + } + } + } + } else if origCount, ok := attrs[countAddr]; ok && keepBlock { + result[localBlockPrefix+"#"] = origCount + } else { + result[localBlockPrefix+"#"] = countFlatmapContainerValues(localBlockPrefix+"#", result) + } + } + + return result, nil +} + +func (d *InstanceDiff) applyAttrDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + ty := attrSchema.Type + switch { + case ty.IsListType(), ty.IsTupleType(), ty.IsMapType(): + return d.applyCollectionDiff(path, attrs, attrSchema) + case ty.IsSetType(): + return d.applySetDiff(path, attrs, attrSchema) + default: + return d.applySingleAttrDiff(path, attrs, attrSchema) + } +} + +func (d *InstanceDiff) applySingleAttrDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + currentKey := strings.Join(path, ".") + + attr := path[len(path)-1] + + result := map[string]string{} + diff := d.Attributes[currentKey] + old, exists := attrs[currentKey] + + if diff != nil && diff.NewComputed { + result[attr] = hcl2shim.UnknownVariableValue + return result, nil + } + + // "id" must exist and not be an empty string, or it must be unknown. + // This only applied to top-level "id" fields. + if attr == "id" && len(path) == 1 { + if old == "" { + result[attr] = hcl2shim.UnknownVariableValue + } else { + result[attr] = old + } + return result, nil + } + + // attribute diffs are sometimes missed, so assume no diff means keep the + // old value + if diff == nil { + if exists { + result[attr] = old + } else { + // We need required values, so set those with an empty value. It + // must be set in the config, since if it were missing it would have + // failed validation. + if attrSchema.Required { + // we only set a missing string here, since bool or number types + // would have distinct zero value which shouldn't have been + // lost. + if attrSchema.Type == cty.String { + result[attr] = "" + } + } + } + return result, nil + } + + // check for missmatched diff values + if exists && + old != diff.Old && + old != hcl2shim.UnknownVariableValue && + diff.Old != hcl2shim.UnknownVariableValue { + return result, fmt.Errorf("diff apply conflict for %s: diff expects %q, but prior value has %q", attr, diff.Old, old) + } + + if diff.NewRemoved { + // don't set anything in the new value + return map[string]string{}, nil + } + + if diff.Old == diff.New && diff.New == "" { + // this can only be a valid empty string + if attrSchema.Type == cty.String { + result[attr] = "" + } + return result, nil + } + + if attrSchema.Computed && diff.NewComputed { + result[attr] = hcl2shim.UnknownVariableValue + return result, nil + } + + result[attr] = diff.New + + return result, nil +} + +func (d *InstanceDiff) applyCollectionDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + result := map[string]string{} + + prefix := "" + if len(path) > 1 { + prefix = strings.Join(path[:len(path)-1], ".") + "." + } + + name := "" + if len(path) > 0 { + name = path[len(path)-1] + } + + currentKey := prefix + name + + // check the index first for special handling + for k, diff := range d.Attributes { + // check the index value, which can be set, and 0 + if k == currentKey+".#" || k == currentKey+".%" || k == currentKey { + if diff.NewRemoved { + return result, nil + } + + if diff.NewComputed { + result[k[len(prefix):]] = hcl2shim.UnknownVariableValue + return result, nil + } + + // do what the diff tells us to here, so that it's consistent with applies + if diff.New == "0" { + result[k[len(prefix):]] = "0" + return result, nil + } + } + } + + // collect all the keys from the diff and the old state + noDiff := true + keys := map[string]bool{} + for k := range d.Attributes { + if !strings.HasPrefix(k, currentKey+".") { + continue + } + noDiff = false + keys[k] = true + } + + noAttrs := true + for k := range attrs { + if !strings.HasPrefix(k, currentKey+".") { + continue + } + noAttrs = false + keys[k] = true + } + + // If there's no diff and no attrs, then there's no value at all. + // This prevents an unexpected zero-count attribute in the attributes. + if noDiff && noAttrs { + return result, nil + } + + idx := "#" + if attrSchema.Type.IsMapType() { + idx = "%" + } + + for k := range keys { + // generate an schema placeholder for the values + elSchema := &configschema.Attribute{ + Type: attrSchema.Type.ElementType(), + } + + res, err := d.applySingleAttrDiff(append(path, k[len(currentKey)+1:]), attrs, elSchema) + if err != nil { + return result, err + } + + for k, v := range res { + result[name+"."+k] = v + } + } + + // Just like in nested list blocks, for simple lists we may need to fill in + // missing empty strings. + countKey := name + "." + idx + count := result[countKey] + length, _ := strconv.Atoi(count) + + if count != "" && count != hcl2shim.UnknownVariableValue && + attrSchema.Type.Equals(cty.List(cty.String)) { + // insert empty strings into missing indexes + for i := 0; i < length; i++ { + key := fmt.Sprintf("%s.%d", name, i) + if _, ok := result[key]; !ok { + result[key] = "" + } + } + } + + // now check for truncation in any type of list + if attrSchema.Type.IsListType() { + for key := range result { + if key == countKey { + continue + } + + if len(key) <= len(name)+1 { + // not sure what this is, but don't panic + continue + } + + index := key[len(name)+1:] + + // It is possible to have nested sets or maps, so look for another dot + dot := strings.Index(index, ".") + if dot > 0 { + index = index[:dot] + } + + // This shouldn't have any more dots, since the element type is only string. + num, err := strconv.Atoi(index) + if err != nil { + log.Printf("[ERROR] bad list index in %q: %s", currentKey, err) + continue + } + + if num >= length { + delete(result, key) + } + } + } + + // Fill in the count value if it wasn't present in the diff for some reason, + // or if there is no count at all. + _, countDiff := d.Attributes[countKey] + if result[countKey] == "" || (!countDiff && len(keys) != len(result)) { + result[countKey] = countFlatmapContainerValues(countKey, result) + } + + return result, nil +} + +func (d *InstanceDiff) applySetDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + // We only need this special behavior for sets of object. + if !attrSchema.Type.ElementType().IsObjectType() { + // The normal collection apply behavior will work okay for this one, then. + return d.applyCollectionDiff(path, attrs, attrSchema) + } + + // When we're dealing with a set of an object type we actually want to + // use our normal _block type_ apply behaviors, so we'll construct ourselves + // a synthetic schema that treats the object type as a block type and + // then delegate to our block apply method. + synthSchema := &configschema.Block{ + Attributes: make(map[string]*configschema.Attribute), + } + + for name, ty := range attrSchema.Type.ElementType().AttributeTypes() { + // We can safely make everything into an attribute here because in the + // event that there are nested set attributes we'll end up back in + // here again recursively and can then deal with the next level of + // expansion. + synthSchema.Attributes[name] = &configschema.Attribute{ + Type: ty, + Optional: true, + } + } + + parentPath := path[:len(path)-1] + childName := path[len(path)-1] + containerSchema := &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + childName: { + Nesting: configschema.NestingSet, + Block: *synthSchema, + }, + }, + } + + return d.applyBlockDiff(parentPath, attrs, containerSchema) +} + +// countFlatmapContainerValues returns the number of values in the flatmapped container +// (set, map, list) indexed by key. The key argument is expected to include the +// trailing ".#", or ".%". +func countFlatmapContainerValues(key string, attrs map[string]string) string { + if len(key) < 3 || !(strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%")) { + panic(fmt.Sprintf("invalid index value %q", key)) + } + + prefix := key[:len(key)-1] + items := map[string]int{} + + for k := range attrs { + if k == key { + continue + } + if !strings.HasPrefix(k, prefix) { + continue + } + + suffix := k[len(prefix):] + dot := strings.Index(suffix, ".") + if dot > 0 { + suffix = suffix[:dot] + } + + items[suffix]++ + } + return strconv.Itoa(len(items)) +} + +// ResourceAttrDiff is the diff of a single attribute of a resource. +type ResourceAttrDiff struct { + Old string // Old Value + New string // New Value + NewComputed bool // True if new value is computed (unknown currently) + NewRemoved bool // True if this attribute is being removed + NewExtra interface{} // Extra information for the provider + RequiresNew bool // True if change requires new resource + Sensitive bool // True if the data should not be displayed in UI output + Type DiffAttrType +} + +// Empty returns true if the diff for this attr is neutral +func (d *ResourceAttrDiff) Empty() bool { + return d.Old == d.New && !d.NewComputed && !d.NewRemoved +} + +func (d *ResourceAttrDiff) GoString() string { + return fmt.Sprintf("*%#v", *d) +} + +// DiffAttrType is an enum type that says whether a resource attribute +// diff is an input attribute (comes from the configuration) or an +// output attribute (comes as a result of applying the configuration). An +// example input would be "ami" for AWS and an example output would be +// "private_ip". +type DiffAttrType byte + +const ( + DiffAttrUnknown DiffAttrType = iota + DiffAttrInput + DiffAttrOutput +) + +func (d *InstanceDiff) init() { + if d.Attributes == nil { + d.Attributes = make(map[string]*ResourceAttrDiff) + } +} + +func NewInstanceDiff() *InstanceDiff { + return &InstanceDiff{Attributes: make(map[string]*ResourceAttrDiff)} +} + +func (d *InstanceDiff) Copy() (*InstanceDiff, error) { + if d == nil { + return nil, nil + } + + dCopy, err := copystructure.Config{Lock: true}.Copy(d) + if err != nil { + return nil, err + } + + return dCopy.(*InstanceDiff), nil +} + +// ChangeType returns the DiffChangeType represented by the diff +// for this single instance. +func (d *InstanceDiff) ChangeType() DiffChangeType { + if d.Empty() { + return DiffNone + } + + if d.RequiresNew() && (d.GetDestroy() || d.GetDestroyTainted()) { + return DiffDestroyCreate + } + + if d.GetDestroy() || d.GetDestroyDeposed() { + return DiffDestroy + } + + if d.RequiresNew() { + return DiffCreate + } + + return DiffUpdate +} + +// Empty returns true if this diff encapsulates no changes. +func (d *InstanceDiff) Empty() bool { + if d == nil { + return true + } + + d.mu.Lock() + defer d.mu.Unlock() + return !d.Destroy && + !d.DestroyTainted && + !d.DestroyDeposed && + len(d.Attributes) == 0 +} + +// Equal compares two diffs for exact equality. +// +// This is different from the Same comparison that is supported which +// checks for operation equality taking into account computed values. Equal +// instead checks for exact equality. +func (d *InstanceDiff) Equal(d2 *InstanceDiff) bool { + // If one is nil, they must both be nil + if d == nil || d2 == nil { + return d == d2 + } + + // Use DeepEqual + return reflect.DeepEqual(d, d2) +} + +// DeepCopy performs a deep copy of all parts of the InstanceDiff +func (d *InstanceDiff) DeepCopy() *InstanceDiff { + copy, err := copystructure.Config{Lock: true}.Copy(d) + if err != nil { + panic(err) + } + + return copy.(*InstanceDiff) +} + +func (d *InstanceDiff) GoString() string { + return fmt.Sprintf("*%#v", InstanceDiff{ + Attributes: d.Attributes, + Destroy: d.Destroy, + DestroyTainted: d.DestroyTainted, + DestroyDeposed: d.DestroyDeposed, + }) +} + +// RequiresNew returns true if the diff requires the creation of a new +// resource (implying the destruction of the old). +func (d *InstanceDiff) RequiresNew() bool { + if d == nil { + return false + } + + d.mu.Lock() + defer d.mu.Unlock() + + return d.requiresNew() +} + +func (d *InstanceDiff) requiresNew() bool { + if d == nil { + return false + } + + if d.DestroyTainted { + return true + } + + for _, rd := range d.Attributes { + if rd != nil && rd.RequiresNew { + return true + } + } + + return false +} + +func (d *InstanceDiff) GetDestroyDeposed() bool { + d.mu.Lock() + defer d.mu.Unlock() + + return d.DestroyDeposed +} + +func (d *InstanceDiff) SetDestroyDeposed(b bool) { + d.mu.Lock() + defer d.mu.Unlock() + + d.DestroyDeposed = b +} + +// These methods are properly locked, for use outside other InstanceDiff +// methods but everywhere else within the terraform package. +// TODO refactor the locking scheme +func (d *InstanceDiff) SetTainted(b bool) { + d.mu.Lock() + defer d.mu.Unlock() + + d.DestroyTainted = b +} + +func (d *InstanceDiff) GetDestroyTainted() bool { + d.mu.Lock() + defer d.mu.Unlock() + + return d.DestroyTainted +} + +func (d *InstanceDiff) SetDestroy(b bool) { + d.mu.Lock() + defer d.mu.Unlock() + + d.Destroy = b +} + +func (d *InstanceDiff) GetDestroy() bool { + d.mu.Lock() + defer d.mu.Unlock() + + return d.Destroy +} + +func (d *InstanceDiff) SetAttribute(key string, attr *ResourceAttrDiff) { + d.mu.Lock() + defer d.mu.Unlock() + + d.Attributes[key] = attr +} + +func (d *InstanceDiff) DelAttribute(key string) { + d.mu.Lock() + defer d.mu.Unlock() + + delete(d.Attributes, key) +} + +func (d *InstanceDiff) GetAttribute(key string) (*ResourceAttrDiff, bool) { + d.mu.Lock() + defer d.mu.Unlock() + + attr, ok := d.Attributes[key] + return attr, ok +} +func (d *InstanceDiff) GetAttributesLen() int { + d.mu.Lock() + defer d.mu.Unlock() + + return len(d.Attributes) +} + +// Safely copies the Attributes map +func (d *InstanceDiff) CopyAttributes() map[string]*ResourceAttrDiff { + d.mu.Lock() + defer d.mu.Unlock() + + attrs := make(map[string]*ResourceAttrDiff) + for k, v := range d.Attributes { + attrs[k] = v + } + + return attrs +} + +// Same checks whether or not two InstanceDiff's are the "same". When +// we say "same", it is not necessarily exactly equal. Instead, it is +// just checking that the same attributes are changing, a destroy +// isn't suddenly happening, etc. +func (d *InstanceDiff) Same(d2 *InstanceDiff) (bool, string) { + // we can safely compare the pointers without a lock + switch { + case d == nil && d2 == nil: + return true, "" + case d == nil || d2 == nil: + return false, "one nil" + case d == d2: + return true, "" + } + + d.mu.Lock() + defer d.mu.Unlock() + + // If we're going from requiring new to NOT requiring new, then we have + // to see if all required news were computed. If so, it is allowed since + // computed may also mean "same value and therefore not new". + oldNew := d.requiresNew() + newNew := d2.RequiresNew() + if oldNew && !newNew { + oldNew = false + + // This section builds a list of ignorable attributes for requiresNew + // by removing off any elements of collections going to zero elements. + // For collections going to zero, they may not exist at all in the + // new diff (and hence RequiresNew == false). + ignoreAttrs := make(map[string]struct{}) + for k, diffOld := range d.Attributes { + if !strings.HasSuffix(k, ".%") && !strings.HasSuffix(k, ".#") { + continue + } + + // This case is in here as a protection measure. The bug that this + // code originally fixed (GH-11349) didn't have to deal with computed + // so I'm not 100% sure what the correct behavior is. Best to leave + // the old behavior. + if diffOld.NewComputed { + continue + } + + // We're looking for the case a map goes to exactly 0. + if diffOld.New != "0" { + continue + } + + // Found it! Ignore all of these. The prefix here is stripping + // off the "%" so it is just "k." + prefix := k[:len(k)-1] + for k2, _ := range d.Attributes { + if strings.HasPrefix(k2, prefix) { + ignoreAttrs[k2] = struct{}{} + } + } + } + + for k, rd := range d.Attributes { + if _, ok := ignoreAttrs[k]; ok { + continue + } + + // If the field is requires new and NOT computed, then what + // we have is a diff mismatch for sure. We set that the old + // diff does REQUIRE a ForceNew. + if rd != nil && rd.RequiresNew && !rd.NewComputed { + oldNew = true + break + } + } + } + + if oldNew != newNew { + return false, fmt.Sprintf( + "diff RequiresNew; old: %t, new: %t", oldNew, newNew) + } + + // Verify that destroy matches. The second boolean here allows us to + // have mismatching Destroy if we're moving from RequiresNew true + // to false above. Therefore, the second boolean will only pass if + // we're moving from Destroy: true to false as well. + if d.Destroy != d2.GetDestroy() && d.requiresNew() == oldNew { + return false, fmt.Sprintf( + "diff: Destroy; old: %t, new: %t", d.Destroy, d2.GetDestroy()) + } + + // Go through the old diff and make sure the new diff has all the + // same attributes. To start, build up the check map to be all the keys. + checkOld := make(map[string]struct{}) + checkNew := make(map[string]struct{}) + for k, _ := range d.Attributes { + checkOld[k] = struct{}{} + } + for k, _ := range d2.CopyAttributes() { + checkNew[k] = struct{}{} + } + + // Make an ordered list so we are sure the approximated hashes are left + // to process at the end of the loop + keys := make([]string, 0, len(d.Attributes)) + for k, _ := range d.Attributes { + keys = append(keys, k) + } + sort.StringSlice(keys).Sort() + + for _, k := range keys { + diffOld := d.Attributes[k] + + if _, ok := checkOld[k]; !ok { + // We're not checking this key for whatever reason (see where + // check is modified). + continue + } + + // Remove this key since we'll never hit it again + delete(checkOld, k) + delete(checkNew, k) + + _, ok := d2.GetAttribute(k) + if !ok { + // If there's no new attribute, and the old diff expected the attribute + // to be removed, that's just fine. + if diffOld.NewRemoved { + continue + } + + // If the last diff was a computed value then the absense of + // that value is allowed since it may mean the value ended up + // being the same. + if diffOld.NewComputed { + ok = true + } + + // No exact match, but maybe this is a set containing computed + // values. So check if there is an approximate hash in the key + // and if so, try to match the key. + if strings.Contains(k, "~") { + parts := strings.Split(k, ".") + parts2 := append([]string(nil), parts...) + + re := regexp.MustCompile(`^~\d+$`) + for i, part := range parts { + if re.MatchString(part) { + // we're going to consider this the base of a + // computed hash, and remove all longer matching fields + ok = true + + parts2[i] = `\d+` + parts2 = parts2[:i+1] + break + } + } + + re, err := regexp.Compile("^" + strings.Join(parts2, `\.`)) + if err != nil { + return false, fmt.Sprintf("regexp failed to compile; err: %#v", err) + } + + for k2, _ := range checkNew { + if re.MatchString(k2) { + delete(checkNew, k2) + } + } + } + + // This is a little tricky, but when a diff contains a computed + // list, set, or map that can only be interpolated after the apply + // command has created the dependent resources, it could turn out + // that the result is actually the same as the existing state which + // would remove the key from the diff. + if diffOld.NewComputed && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) { + ok = true + } + + // Similarly, in a RequiresNew scenario, a list that shows up in the plan + // diff can disappear from the apply diff, which is calculated from an + // empty state. + if d.requiresNew() && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) { + ok = true + } + + if !ok { + return false, fmt.Sprintf("attribute mismatch: %s", k) + } + } + + // search for the suffix of the base of a [computed] map, list or set. + match := multiVal.FindStringSubmatch(k) + + if diffOld.NewComputed && len(match) == 2 { + matchLen := len(match[1]) + + // This is a computed list, set, or map, so remove any keys with + // this prefix from the check list. + kprefix := k[:len(k)-matchLen] + for k2, _ := range checkOld { + if strings.HasPrefix(k2, kprefix) { + delete(checkOld, k2) + } + } + for k2, _ := range checkNew { + if strings.HasPrefix(k2, kprefix) { + delete(checkNew, k2) + } + } + } + + // We don't compare the values because we can't currently actually + // guarantee to generate the same value two two diffs created from + // the same state+config: we have some pesky interpolation functions + // that do not behave as pure functions (uuid, timestamp) and so they + // can be different each time a diff is produced. + // FIXME: Re-organize our config handling so that we don't re-evaluate + // expressions when we produce a second comparison diff during + // apply (for EvalCompareDiff). + } + + // Check for leftover attributes + if len(checkNew) > 0 { + extras := make([]string, 0, len(checkNew)) + for attr, _ := range checkNew { + extras = append(extras, attr) + } + return false, + fmt.Sprintf("extra attributes: %s", strings.Join(extras, ", ")) + } + + return true, "" +} + +// moduleDiffSort implements sort.Interface to sort module diffs by path. +type moduleDiffSort []*ModuleDiff + +func (s moduleDiffSort) Len() int { return len(s) } +func (s moduleDiffSort) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s moduleDiffSort) Less(i, j int) bool { + a := s[i] + b := s[j] + + // If the lengths are different, then the shorter one always wins + if len(a.Path) != len(b.Path) { + return len(a.Path) < len(b.Path) + } + + // Otherwise, compare lexically + return strings.Join(a.Path, ".") < strings.Join(b.Path, ".") +} diff --git a/pkg/legacy/tofu/diff_test.go b/pkg/legacy/tofu/diff_test.go new file mode 100644 index 00000000000..af89ba8f868 --- /dev/null +++ b/pkg/legacy/tofu/diff_test.go @@ -0,0 +1,1257 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/addrs" +) + +func TestDiffEmpty(t *testing.T) { + var diff *Diff + if !diff.Empty() { + t.Fatal("should be empty") + } + + diff = new(Diff) + if !diff.Empty() { + t.Fatal("should be empty") + } + + mod := diff.AddModule(addrs.RootModuleInstance) + mod.Resources["nodeA"] = &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "foo", + New: "bar", + }, + }, + } + + if diff.Empty() { + t.Fatal("should not be empty") + } +} + +func TestDiffEmpty_taintedIsNotEmpty(t *testing.T) { + diff := new(Diff) + + mod := diff.AddModule(addrs.RootModuleInstance) + mod.Resources["nodeA"] = &InstanceDiff{ + DestroyTainted: true, + } + + if diff.Empty() { + t.Fatal("should not be empty, since DestroyTainted was set") + } +} + +func TestDiffEqual(t *testing.T) { + cases := map[string]struct { + D1, D2 *Diff + Equal bool + }{ + "nil": { + nil, + new(Diff), + false, + }, + + "empty": { + new(Diff), + new(Diff), + true, + }, + + "different module order": { + &Diff{ + Modules: []*ModuleDiff{ + &ModuleDiff{Path: []string{"root", "foo"}}, + &ModuleDiff{Path: []string{"root", "bar"}}, + }, + }, + &Diff{ + Modules: []*ModuleDiff{ + &ModuleDiff{Path: []string{"root", "bar"}}, + &ModuleDiff{Path: []string{"root", "foo"}}, + }, + }, + true, + }, + + "different module diff destroys": { + &Diff{ + Modules: []*ModuleDiff{ + &ModuleDiff{Path: []string{"root", "foo"}, Destroy: true}, + }, + }, + &Diff{ + Modules: []*ModuleDiff{ + &ModuleDiff{Path: []string{"root", "foo"}, Destroy: false}, + }, + }, + true, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + actual := tc.D1.Equal(tc.D2) + if actual != tc.Equal { + t.Fatalf("expected: %v\n\n%#v\n\n%#v", tc.Equal, tc.D1, tc.D2) + } + }) + } +} + +func TestDiffPrune(t *testing.T) { + cases := map[string]struct { + D1, D2 *Diff + }{ + "nil": { + nil, + nil, + }, + + "empty": { + new(Diff), + new(Diff), + }, + + "empty module": { + &Diff{ + Modules: []*ModuleDiff{ + &ModuleDiff{Path: []string{"root", "foo"}}, + }, + }, + &Diff{}, + }, + + "destroy module": { + &Diff{ + Modules: []*ModuleDiff{ + &ModuleDiff{Path: []string{"root", "foo"}, Destroy: true}, + }, + }, + &Diff{ + Modules: []*ModuleDiff{ + &ModuleDiff{Path: []string{"root", "foo"}, Destroy: true}, + }, + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + tc.D1.Prune() + if !tc.D1.Equal(tc.D2) { + t.Fatalf("bad:\n\n%#v\n\n%#v", tc.D1, tc.D2) + } + }) + } +} + +func TestModuleDiff_ChangeType(t *testing.T) { + cases := []struct { + Diff *ModuleDiff + Result DiffChangeType + }{ + { + &ModuleDiff{}, + DiffNone, + }, + { + &ModuleDiff{ + Resources: map[string]*InstanceDiff{ + "foo": &InstanceDiff{Destroy: true}, + }, + }, + DiffDestroy, + }, + { + &ModuleDiff{ + Resources: map[string]*InstanceDiff{ + "foo": &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "", + New: "bar", + }, + }, + }, + }, + }, + DiffUpdate, + }, + { + &ModuleDiff{ + Resources: map[string]*InstanceDiff{ + "foo": &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "", + New: "bar", + RequiresNew: true, + }, + }, + }, + }, + }, + DiffCreate, + }, + { + &ModuleDiff{ + Resources: map[string]*InstanceDiff{ + "foo": &InstanceDiff{ + Destroy: true, + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "", + New: "bar", + RequiresNew: true, + }, + }, + }, + }, + }, + DiffUpdate, + }, + } + + for i, tc := range cases { + actual := tc.Diff.ChangeType() + if actual != tc.Result { + t.Fatalf("%d: %#v", i, actual) + } + } +} + +func TestDiff_DeepCopy(t *testing.T) { + cases := map[string]*Diff{ + "empty": &Diff{}, + + "basic diff": &Diff{ + Modules: []*ModuleDiff{ + &ModuleDiff{ + Path: []string{"root"}, + Resources: map[string]*InstanceDiff{ + "aws_instance.foo": &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "num": &ResourceAttrDiff{ + Old: "0", + New: "2", + }, + }, + }, + }, + }, + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + dup := tc.DeepCopy() + if !reflect.DeepEqual(dup, tc) { + t.Fatalf("\n%#v\n\n%#v", dup, tc) + } + }) + } +} + +func TestModuleDiff_Empty(t *testing.T) { + diff := new(ModuleDiff) + if !diff.Empty() { + t.Fatal("should be empty") + } + + diff.Resources = map[string]*InstanceDiff{ + "nodeA": &InstanceDiff{}, + } + + if !diff.Empty() { + t.Fatal("should be empty") + } + + diff.Resources["nodeA"].Attributes = map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "foo", + New: "bar", + }, + } + + if diff.Empty() { + t.Fatal("should not be empty") + } + + diff.Resources["nodeA"].Attributes = nil + diff.Resources["nodeA"].Destroy = true + + if diff.Empty() { + t.Fatal("should not be empty") + } +} + +func TestModuleDiff_String(t *testing.T) { + diff := &ModuleDiff{ + Resources: map[string]*InstanceDiff{ + "nodeA": &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "foo", + New: "bar", + }, + "bar": &ResourceAttrDiff{ + Old: "foo", + NewComputed: true, + }, + "longfoo": &ResourceAttrDiff{ + Old: "foo", + New: "bar", + RequiresNew: true, + }, + "secretfoo": &ResourceAttrDiff{ + Old: "foo", + New: "bar", + Sensitive: true, + }, + }, + }, + }, + } + + actual := strings.TrimSpace(diff.String()) + expected := strings.TrimSpace(moduleDiffStrBasic) + if actual != expected { + t.Fatalf("bad:\n%s", actual) + } +} + +func TestInstanceDiff_ChangeType(t *testing.T) { + cases := []struct { + Diff *InstanceDiff + Result DiffChangeType + }{ + { + &InstanceDiff{}, + DiffNone, + }, + { + &InstanceDiff{Destroy: true}, + DiffDestroy, + }, + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "", + New: "bar", + }, + }, + }, + DiffUpdate, + }, + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "", + New: "bar", + RequiresNew: true, + }, + }, + }, + DiffCreate, + }, + { + &InstanceDiff{ + Destroy: true, + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "", + New: "bar", + RequiresNew: true, + }, + }, + }, + DiffDestroyCreate, + }, + { + &InstanceDiff{ + DestroyTainted: true, + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "", + New: "bar", + RequiresNew: true, + }, + }, + }, + DiffDestroyCreate, + }, + } + + for i, tc := range cases { + actual := tc.Diff.ChangeType() + if actual != tc.Result { + t.Fatalf("%d: %#v", i, actual) + } + } +} + +func TestInstanceDiff_Empty(t *testing.T) { + var rd *InstanceDiff + + if !rd.Empty() { + t.Fatal("should be empty") + } + + rd = new(InstanceDiff) + + if !rd.Empty() { + t.Fatal("should be empty") + } + + rd = &InstanceDiff{Destroy: true} + + if rd.Empty() { + t.Fatal("should not be empty") + } + + rd = &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + New: "bar", + }, + }, + } + + if rd.Empty() { + t.Fatal("should not be empty") + } +} + +func TestModuleDiff_Instances(t *testing.T) { + yesDiff := &InstanceDiff{Destroy: true} + noDiff := &InstanceDiff{Destroy: true, DestroyTainted: true} + + cases := []struct { + Diff *ModuleDiff + Id string + Result []*InstanceDiff + }{ + { + &ModuleDiff{ + Resources: map[string]*InstanceDiff{ + "foo": yesDiff, + "bar": noDiff, + }, + }, + "foo", + []*InstanceDiff{ + yesDiff, + }, + }, + + { + &ModuleDiff{ + Resources: map[string]*InstanceDiff{ + "foo": yesDiff, + "foo.0": yesDiff, + "bar": noDiff, + }, + }, + "foo", + []*InstanceDiff{ + yesDiff, + yesDiff, + }, + }, + + { + &ModuleDiff{ + Resources: map[string]*InstanceDiff{ + "foo": yesDiff, + "foo.0": yesDiff, + "foo_bar": noDiff, + "bar": noDiff, + }, + }, + "foo", + []*InstanceDiff{ + yesDiff, + yesDiff, + }, + }, + } + + for i, tc := range cases { + actual := tc.Diff.Instances(tc.Id) + if !reflect.DeepEqual(actual, tc.Result) { + t.Fatalf("%d: %#v", i, actual) + } + } +} + +func TestInstanceDiff_RequiresNew(t *testing.T) { + rd := &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{}, + }, + } + + if rd.RequiresNew() { + t.Fatal("should not require new") + } + + rd.Attributes["foo"].RequiresNew = true + + if !rd.RequiresNew() { + t.Fatal("should require new") + } +} + +func TestInstanceDiff_RequiresNew_nil(t *testing.T) { + var rd *InstanceDiff + + if rd.RequiresNew() { + t.Fatal("should not require new") + } +} + +func TestInstanceDiffSame(t *testing.T) { + cases := []struct { + One, Two *InstanceDiff + Same bool + Reason string + }{ + { + &InstanceDiff{}, + &InstanceDiff{}, + true, + "", + }, + + { + nil, + nil, + true, + "", + }, + + { + &InstanceDiff{Destroy: false}, + &InstanceDiff{Destroy: true}, + false, + "diff: Destroy; old: false, new: true", + }, + + { + &InstanceDiff{Destroy: true}, + &InstanceDiff{Destroy: true}, + true, + "", + }, + + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{}, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{}, + }, + }, + true, + "", + }, + + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "bar": &ResourceAttrDiff{}, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{}, + }, + }, + false, + "attribute mismatch: bar", + }, + + // Extra attributes + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{}, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{}, + "bar": &ResourceAttrDiff{}, + }, + }, + false, + "extra attributes: bar", + }, + + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{RequiresNew: true}, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{RequiresNew: false}, + }, + }, + false, + "diff RequiresNew; old: true, new: false", + }, + + // NewComputed on primitive + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "", + New: "${var.foo}", + NewComputed: true, + }, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "0", + New: "1", + }, + }, + }, + true, + "", + }, + + // NewComputed on primitive, removed + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "", + New: "${var.foo}", + NewComputed: true, + }, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{}, + }, + true, + "", + }, + + // NewComputed on set, removed + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "", + New: "", + NewComputed: true, + }, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.1": &ResourceAttrDiff{ + Old: "foo", + New: "", + NewRemoved: true, + }, + "foo.2": &ResourceAttrDiff{ + Old: "", + New: "bar", + }, + }, + }, + true, + "", + }, + + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{NewComputed: true}, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "foo.0": &ResourceAttrDiff{ + Old: "", + New: "12", + }, + }, + }, + true, + "", + }, + + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "foo.~35964334.bar": &ResourceAttrDiff{ + Old: "", + New: "${var.foo}", + }, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "foo.87654323.bar": &ResourceAttrDiff{ + Old: "", + New: "12", + }, + }, + }, + true, + "", + }, + + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "0", + NewComputed: true, + }, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{}, + }, + true, + "", + }, + + // Computed can change RequiresNew by removal, and that's okay + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "0", + NewComputed: true, + RequiresNew: true, + }, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{}, + }, + true, + "", + }, + + // Computed can change Destroy by removal, and that's okay + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "0", + NewComputed: true, + RequiresNew: true, + }, + }, + + Destroy: true, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{}, + }, + true, + "", + }, + + // Computed can change Destroy by elements + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "0", + NewComputed: true, + RequiresNew: true, + }, + }, + + Destroy: true, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "1", + New: "1", + }, + "foo.12": &ResourceAttrDiff{ + Old: "4", + New: "12", + RequiresNew: true, + }, + }, + + Destroy: true, + }, + true, + "", + }, + + // Computed sets may not contain all fields in the original diff, and + // because multiple entries for the same set can compute to the same + // hash before the values are computed or interpolated, the overall + // count can change as well. + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "foo.~35964334.bar": &ResourceAttrDiff{ + Old: "", + New: "${var.foo}", + }, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "0", + New: "2", + }, + "foo.87654323.bar": &ResourceAttrDiff{ + Old: "", + New: "12", + }, + "foo.87654325.bar": &ResourceAttrDiff{ + Old: "", + New: "12", + }, + "foo.87654325.baz": &ResourceAttrDiff{ + Old: "", + New: "12", + }, + }, + }, + true, + "", + }, + + // Computed values in maps will fail the "Same" check as well + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.%": &ResourceAttrDiff{ + Old: "", + New: "", + NewComputed: true, + }, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.%": &ResourceAttrDiff{ + Old: "0", + New: "1", + NewComputed: false, + }, + "foo.val": &ResourceAttrDiff{ + Old: "", + New: "something", + }, + }, + }, + true, + "", + }, + + // In a DESTROY/CREATE scenario, the plan diff will be run against the + // state of the old instance, while the apply diff will be run against an + // empty state (because the state is cleared when the destroy runs.) + // For complex attributes, this can result in keys that seem to disappear + // between the two diffs, when in reality everything is working just fine. + // + // Same() needs to take into account this scenario by analyzing NewRemoved + // and treating as "Same" a diff that does indeed have that key removed. + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "somemap.oldkey": &ResourceAttrDiff{ + Old: "long ago", + New: "", + NewRemoved: true, + }, + "somemap.newkey": &ResourceAttrDiff{ + Old: "", + New: "brave new world", + }, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "somemap.newkey": &ResourceAttrDiff{ + Old: "", + New: "brave new world", + }, + }, + }, + true, + "", + }, + + // Another thing that can occur in DESTROY/CREATE scenarios is that list + // values that are going to zero have diffs that show up at plan time but + // are gone at apply time. The NewRemoved handling catches the fields and + // treats them as OK, but it also needs to treat the .# field itself as + // okay to be present in the old diff but not in the new one. + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "reqnew": &ResourceAttrDiff{ + Old: "old", + New: "new", + RequiresNew: true, + }, + "somemap.#": &ResourceAttrDiff{ + Old: "1", + New: "0", + }, + "somemap.oldkey": &ResourceAttrDiff{ + Old: "long ago", + New: "", + NewRemoved: true, + }, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "reqnew": &ResourceAttrDiff{ + Old: "", + New: "new", + RequiresNew: true, + }, + }, + }, + true, + "", + }, + + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "reqnew": &ResourceAttrDiff{ + Old: "old", + New: "new", + RequiresNew: true, + }, + "somemap.%": &ResourceAttrDiff{ + Old: "1", + New: "0", + }, + "somemap.oldkey": &ResourceAttrDiff{ + Old: "long ago", + New: "", + NewRemoved: true, + }, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "reqnew": &ResourceAttrDiff{ + Old: "", + New: "new", + RequiresNew: true, + }, + }, + }, + true, + "", + }, + + // Innner computed set should allow outer change in key + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "foo.~1.outer_val": &ResourceAttrDiff{ + Old: "", + New: "foo", + }, + "foo.~1.inner.#": &ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "foo.~1.inner.~2.value": &ResourceAttrDiff{ + Old: "", + New: "${var.bar}", + NewComputed: true, + }, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "foo.12.outer_val": &ResourceAttrDiff{ + Old: "", + New: "foo", + }, + "foo.12.inner.#": &ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "foo.12.inner.42.value": &ResourceAttrDiff{ + Old: "", + New: "baz", + }, + }, + }, + true, + "", + }, + + // Innner computed list should allow outer change in key + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "foo.~1.outer_val": &ResourceAttrDiff{ + Old: "", + New: "foo", + }, + "foo.~1.inner.#": &ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "foo.~1.inner.0.value": &ResourceAttrDiff{ + Old: "", + New: "${var.bar}", + NewComputed: true, + }, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "foo.12.outer_val": &ResourceAttrDiff{ + Old: "", + New: "foo", + }, + "foo.12.inner.#": &ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "foo.12.inner.0.value": &ResourceAttrDiff{ + Old: "", + New: "baz", + }, + }, + }, + true, + "", + }, + + // When removing all collection items, the diff is allowed to contain + // nothing when re-creating the resource. This should be the "Same" + // since we said we were going from 1 to 0. + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.%": &ResourceAttrDiff{ + Old: "1", + New: "0", + RequiresNew: true, + }, + "foo.bar": &ResourceAttrDiff{ + Old: "baz", + New: "", + NewRemoved: true, + RequiresNew: true, + }, + }, + }, + &InstanceDiff{}, + true, + "", + }, + + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "1", + New: "0", + RequiresNew: true, + }, + "foo.0": &ResourceAttrDiff{ + Old: "baz", + New: "", + NewRemoved: true, + RequiresNew: true, + }, + }, + }, + &InstanceDiff{}, + true, + "", + }, + + // Make sure that DestroyTainted diffs pass as well, especially when diff + // two works off of no state. + { + &InstanceDiff{ + DestroyTainted: true, + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "foo", + New: "foo", + }, + }, + }, + &InstanceDiff{ + DestroyTainted: true, + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "", + New: "foo", + }, + }, + }, + true, + "", + }, + // RequiresNew in different attribute + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "foo", + New: "foo", + }, + "bar": &ResourceAttrDiff{ + Old: "bar", + New: "baz", + RequiresNew: true, + }, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "", + New: "foo", + }, + "bar": &ResourceAttrDiff{ + Old: "", + New: "baz", + RequiresNew: true, + }, + }, + }, + true, + "", + }, + } + + for i, tc := range cases { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + same, reason := tc.One.Same(tc.Two) + if same != tc.Same { + t.Fatalf("%d: expected same: %t, got %t (%s)\n\n one: %#v\n\ntwo: %#v", + i, tc.Same, same, reason, tc.One, tc.Two) + } + if reason != tc.Reason { + t.Fatalf( + "%d: bad reason\n\nexpected: %#v\n\ngot: %#v", i, tc.Reason, reason) + } + }) + } +} + +const moduleDiffStrBasic = ` +CREATE: nodeA + bar: "foo" => "" + foo: "foo" => "bar" + longfoo: "foo" => "bar" (forces new resource) + secretfoo: "" => "" (attribute changed) +` + +func TestCountFlatmapContainerValues(t *testing.T) { + for i, tc := range []struct { + attrs map[string]string + key string + count string + }{ + { + attrs: map[string]string{"set.2.list.#": "9999", "set.2.list.0": "x", "set.2.list.0.z": "y", "set.2.attr": "bar", "set.#": "9999"}, + key: "set.2.list.#", + count: "1", + }, + { + attrs: map[string]string{"set.2.list.#": "9999", "set.2.list.0": "x", "set.2.list.0.z": "y", "set.2.attr": "bar", "set.#": "9999"}, + key: "set.#", + count: "1", + }, + { + attrs: map[string]string{"set.2.list.0": "x", "set.2.list.0.z": "y", "set.2.attr": "bar", "set.#": "9999"}, + key: "set.#", + count: "1", + }, + { + attrs: map[string]string{"map.#": "3", "map.a": "b", "map.a.#": "0", "map.b": "4"}, + key: "map.#", + count: "2", + }, + } { + t.Run(strconv.Itoa(i), func(t *testing.T) { + count := countFlatmapContainerValues(tc.key, tc.attrs) + if count != tc.count { + t.Fatalf("expected %q, got %q", tc.count, count) + } + }) + } +} diff --git a/pkg/legacy/tofu/features.go b/pkg/legacy/tofu/features.go new file mode 100644 index 00000000000..5f3fe1e9207 --- /dev/null +++ b/pkg/legacy/tofu/features.go @@ -0,0 +1,12 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import "os" + +// This file holds feature flags for the next release + +var flagWarnOutputErrors = os.Getenv("TF_WARN_OUTPUT_ERRORS") != "" diff --git a/pkg/legacy/tofu/instancetype.go b/pkg/legacy/tofu/instancetype.go new file mode 100644 index 00000000000..3217c59cea2 --- /dev/null +++ b/pkg/legacy/tofu/instancetype.go @@ -0,0 +1,18 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +//go:generate go run golang.org/x/tools/cmd/stringer -type=InstanceType instancetype.go + +// InstanceType is an enum of the various types of instances store in the State +type InstanceType int + +const ( + TypeInvalid InstanceType = iota + TypePrimary + TypeTainted + TypeDeposed +) diff --git a/pkg/legacy/tofu/instancetype_string.go b/pkg/legacy/tofu/instancetype_string.go new file mode 100644 index 00000000000..8fc2e159fe3 --- /dev/null +++ b/pkg/legacy/tofu/instancetype_string.go @@ -0,0 +1,26 @@ +// Code generated by "stringer -type=InstanceType instancetype.go"; DO NOT EDIT. + +package tofu + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[TypeInvalid-0] + _ = x[TypePrimary-1] + _ = x[TypeTainted-2] + _ = x[TypeDeposed-3] +} + +const _InstanceType_name = "TypeInvalidTypePrimaryTypeTaintedTypeDeposed" + +var _InstanceType_index = [...]uint8{0, 11, 22, 33, 44} + +func (i InstanceType) String() string { + if i < 0 || i >= InstanceType(len(_InstanceType_index)-1) { + return "InstanceType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _InstanceType_name[_InstanceType_index[i]:_InstanceType_index[i+1]] +} diff --git a/pkg/legacy/tofu/provider_mock.go b/pkg/legacy/tofu/provider_mock.go new file mode 100644 index 00000000000..ef9df6a1768 --- /dev/null +++ b/pkg/legacy/tofu/provider_mock.go @@ -0,0 +1,376 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "encoding/json" + "sync" + + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/kubegems/opentofu/pkg/configs/hcl2shim" + "github.com/kubegems/opentofu/pkg/providers" +) + +var _ providers.Interface = (*MockProvider)(nil) + +// MockProvider implements providers.Interface but mocks out all the +// calls for testing purposes. +type MockProvider struct { + sync.Mutex + + // Anything you want, in case you need to store extra data with the mock. + Meta interface{} + + GetSchemaCalled bool + GetSchemaReturn *ProviderSchema // This is using ProviderSchema directly rather than providers.GetProviderSchemaResponse for compatibility with old tests + + ValidateProviderConfigCalled bool + ValidateProviderConfigResponse providers.ValidateProviderConfigResponse + ValidateProviderConfigRequest providers.ValidateProviderConfigRequest + ValidateProviderConfigFn func(providers.ValidateProviderConfigRequest) providers.ValidateProviderConfigResponse + + ValidateResourceConfigCalled bool + ValidateResourceConfigTypeName string + ValidateResourceConfigResponse providers.ValidateResourceConfigResponse + ValidateResourceConfigRequest providers.ValidateResourceConfigRequest + ValidateResourceConfigFn func(providers.ValidateResourceConfigRequest) providers.ValidateResourceConfigResponse + + ValidateDataResourceConfigCalled bool + ValidateDataResourceConfigTypeName string + ValidateDataResourceConfigResponse providers.ValidateDataResourceConfigResponse + ValidateDataResourceConfigRequest providers.ValidateDataResourceConfigRequest + ValidateDataResourceConfigFn func(providers.ValidateDataResourceConfigRequest) providers.ValidateDataResourceConfigResponse + + UpgradeResourceStateCalled bool + UpgradeResourceStateTypeName string + UpgradeResourceStateResponse providers.UpgradeResourceStateResponse + UpgradeResourceStateRequest providers.UpgradeResourceStateRequest + UpgradeResourceStateFn func(providers.UpgradeResourceStateRequest) providers.UpgradeResourceStateResponse + + ConfigureProviderCalled bool + ConfigureProviderResponse providers.ConfigureProviderResponse + ConfigureProviderRequest providers.ConfigureProviderRequest + ConfigureProviderFn func(providers.ConfigureProviderRequest) providers.ConfigureProviderResponse + + StopCalled bool + StopFn func() error + StopResponse error + + ReadResourceCalled bool + ReadResourceResponse providers.ReadResourceResponse + ReadResourceRequest providers.ReadResourceRequest + ReadResourceFn func(providers.ReadResourceRequest) providers.ReadResourceResponse + + PlanResourceChangeCalled bool + PlanResourceChangeResponse providers.PlanResourceChangeResponse + PlanResourceChangeRequest providers.PlanResourceChangeRequest + PlanResourceChangeFn func(providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse + + ApplyResourceChangeCalled bool + ApplyResourceChangeResponse providers.ApplyResourceChangeResponse + ApplyResourceChangeRequest providers.ApplyResourceChangeRequest + ApplyResourceChangeFn func(providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse + + ImportResourceStateCalled bool + ImportResourceStateResponse providers.ImportResourceStateResponse + ImportResourceStateRequest providers.ImportResourceStateRequest + ImportResourceStateFn func(providers.ImportResourceStateRequest) providers.ImportResourceStateResponse + // Legacy return type for existing tests, which will be shimmed into an + // ImportResourceStateResponse if set + ImportStateReturn []*InstanceState + + ReadDataSourceCalled bool + ReadDataSourceResponse providers.ReadDataSourceResponse + ReadDataSourceRequest providers.ReadDataSourceRequest + ReadDataSourceFn func(providers.ReadDataSourceRequest) providers.ReadDataSourceResponse + + CloseCalled bool + CloseError error +} + +func (p *MockProvider) GetProviderSchema() providers.GetProviderSchemaResponse { + p.Lock() + defer p.Unlock() + p.GetSchemaCalled = true + return p.getSchema() +} + +func (p *MockProvider) getSchema() providers.GetProviderSchemaResponse { + // This version of getSchema doesn't do any locking, so it's suitable to + // call from other methods of this mock as long as they are already + // holding the lock. + + ret := providers.GetProviderSchemaResponse{ + Provider: providers.Schema{}, + DataSources: map[string]providers.Schema{}, + ResourceTypes: map[string]providers.Schema{}, + } + if p.GetSchemaReturn != nil { + ret.Provider.Block = p.GetSchemaReturn.Provider + ret.ProviderMeta.Block = p.GetSchemaReturn.ProviderMeta + for n, s := range p.GetSchemaReturn.DataSources { + ret.DataSources[n] = providers.Schema{ + Block: s, + } + } + for n, s := range p.GetSchemaReturn.ResourceTypes { + ret.ResourceTypes[n] = providers.Schema{ + Version: int64(p.GetSchemaReturn.ResourceTypeSchemaVersions[n]), + Block: s, + } + } + } + + return ret +} + +func (p *MockProvider) ValidateProviderConfig(r providers.ValidateProviderConfigRequest) providers.ValidateProviderConfigResponse { + p.Lock() + defer p.Unlock() + + p.ValidateProviderConfigCalled = true + p.ValidateProviderConfigRequest = r + if p.ValidateProviderConfigFn != nil { + return p.ValidateProviderConfigFn(r) + } + return p.ValidateProviderConfigResponse +} + +func (p *MockProvider) ValidateResourceConfig(r providers.ValidateResourceConfigRequest) providers.ValidateResourceConfigResponse { + p.Lock() + defer p.Unlock() + + p.ValidateResourceConfigCalled = true + p.ValidateResourceConfigRequest = r + + if p.ValidateResourceConfigFn != nil { + return p.ValidateResourceConfigFn(r) + } + + return p.ValidateResourceConfigResponse +} + +func (p *MockProvider) ValidateDataResourceConfig(r providers.ValidateDataResourceConfigRequest) providers.ValidateDataResourceConfigResponse { + p.Lock() + defer p.Unlock() + + p.ValidateDataResourceConfigCalled = true + p.ValidateDataResourceConfigRequest = r + + if p.ValidateDataResourceConfigFn != nil { + return p.ValidateDataResourceConfigFn(r) + } + + return p.ValidateDataResourceConfigResponse +} + +func (p *MockProvider) UpgradeResourceState(r providers.UpgradeResourceStateRequest) providers.UpgradeResourceStateResponse { + p.Lock() + defer p.Unlock() + + schemas := p.getSchema() + schema := schemas.ResourceTypes[r.TypeName] + schemaType := schema.Block.ImpliedType() + + p.UpgradeResourceStateCalled = true + p.UpgradeResourceStateRequest = r + + if p.UpgradeResourceStateFn != nil { + return p.UpgradeResourceStateFn(r) + } + + resp := p.UpgradeResourceStateResponse + + if resp.UpgradedState == cty.NilVal { + switch { + case r.RawStateFlatmap != nil: + v, err := hcl2shim.HCL2ValueFromFlatmap(r.RawStateFlatmap, schemaType) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.UpgradedState = v + case len(r.RawStateJSON) > 0: + v, err := ctyjson.Unmarshal(r.RawStateJSON, schemaType) + + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.UpgradedState = v + } + } + return resp +} + +func (p *MockProvider) ConfigureProvider(r providers.ConfigureProviderRequest) providers.ConfigureProviderResponse { + p.Lock() + defer p.Unlock() + + p.ConfigureProviderCalled = true + p.ConfigureProviderRequest = r + + if p.ConfigureProviderFn != nil { + return p.ConfigureProviderFn(r) + } + + return p.ConfigureProviderResponse +} + +func (p *MockProvider) Stop() error { + // We intentionally don't lock in this one because the whole point of this + // method is to be called concurrently with another operation that can + // be cancelled. The provider itself is responsible for handling + // any concurrency concerns in this case. + + p.StopCalled = true + if p.StopFn != nil { + return p.StopFn() + } + + return p.StopResponse +} + +func (p *MockProvider) ReadResource(r providers.ReadResourceRequest) providers.ReadResourceResponse { + p.Lock() + defer p.Unlock() + + p.ReadResourceCalled = true + p.ReadResourceRequest = r + + if p.ReadResourceFn != nil { + return p.ReadResourceFn(r) + } + + resp := p.ReadResourceResponse + if resp.NewState != cty.NilVal { + // make sure the NewState fits the schema + // This isn't always the case for the existing tests + newState, err := p.GetSchemaReturn.ResourceTypes[r.TypeName].CoerceValue(resp.NewState) + if err != nil { + panic(err) + } + resp.NewState = newState + return resp + } + + // just return the same state we received + resp.NewState = r.PriorState + return resp +} + +func (p *MockProvider) PlanResourceChange(r providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + p.Lock() + defer p.Unlock() + + p.PlanResourceChangeCalled = true + p.PlanResourceChangeRequest = r + + if p.PlanResourceChangeFn != nil { + return p.PlanResourceChangeFn(r) + } + + return p.PlanResourceChangeResponse +} + +func (p *MockProvider) ApplyResourceChange(r providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + p.Lock() + p.ApplyResourceChangeCalled = true + p.ApplyResourceChangeRequest = r + p.Unlock() + + if p.ApplyResourceChangeFn != nil { + return p.ApplyResourceChangeFn(r) + } + + return p.ApplyResourceChangeResponse +} + +func (p *MockProvider) ImportResourceState(r providers.ImportResourceStateRequest) providers.ImportResourceStateResponse { + p.Lock() + defer p.Unlock() + + if p.ImportStateReturn != nil { + for _, is := range p.ImportStateReturn { + if is.Attributes == nil { + is.Attributes = make(map[string]string) + } + is.Attributes["id"] = is.ID + + typeName := is.Ephemeral.Type + // Use the requested type if the resource has no type of it's own. + // We still return the empty type, which will error, but this prevents a panic. + if typeName == "" { + typeName = r.TypeName + } + + schema := p.GetSchemaReturn.ResourceTypes[typeName] + if schema == nil { + panic("no schema found for " + typeName) + } + + private, err := json.Marshal(is.Meta) + if err != nil { + panic(err) + } + + state, err := hcl2shim.HCL2ValueFromFlatmap(is.Attributes, schema.ImpliedType()) + if err != nil { + panic(err) + } + + state, err = schema.CoerceValue(state) + if err != nil { + panic(err) + } + + p.ImportResourceStateResponse.ImportedResources = append( + p.ImportResourceStateResponse.ImportedResources, + providers.ImportedResource{ + TypeName: is.Ephemeral.Type, + State: state, + Private: private, + }) + } + } + + p.ImportResourceStateCalled = true + p.ImportResourceStateRequest = r + if p.ImportResourceStateFn != nil { + return p.ImportResourceStateFn(r) + } + + return p.ImportResourceStateResponse +} + +func (p *MockProvider) ReadDataSource(r providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + p.Lock() + defer p.Unlock() + + p.ReadDataSourceCalled = true + p.ReadDataSourceRequest = r + + if p.ReadDataSourceFn != nil { + return p.ReadDataSourceFn(r) + } + + return p.ReadDataSourceResponse +} + +func (p *MockProvider) GetFunctions() providers.GetFunctionsResponse { + panic("Not Implemented") +} + +func (p *MockProvider) CallFunction(r providers.CallFunctionRequest) providers.CallFunctionResponse { + panic("Not Implemented") +} + +func (p *MockProvider) Close() error { + p.CloseCalled = true + return p.CloseError +} diff --git a/pkg/legacy/tofu/provisioner_mock.go b/pkg/legacy/tofu/provisioner_mock.go new file mode 100644 index 00000000000..90b3089c33c --- /dev/null +++ b/pkg/legacy/tofu/provisioner_mock.go @@ -0,0 +1,109 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "sync" + + "github.com/kubegems/opentofu/pkg/provisioners" +) + +var _ provisioners.Interface = (*MockProvisioner)(nil) + +// MockProvisioner implements provisioners.Interface but mocks out all the +// calls for testing purposes. +type MockProvisioner struct { + sync.Mutex + // Anything you want, in case you need to store extra data with the mock. + Meta interface{} + + GetSchemaCalled bool + GetSchemaResponse provisioners.GetSchemaResponse + + ValidateProvisionerConfigCalled bool + ValidateProvisionerConfigRequest provisioners.ValidateProvisionerConfigRequest + ValidateProvisionerConfigResponse provisioners.ValidateProvisionerConfigResponse + ValidateProvisionerConfigFn func(provisioners.ValidateProvisionerConfigRequest) provisioners.ValidateProvisionerConfigResponse + + ProvisionResourceCalled bool + ProvisionResourceRequest provisioners.ProvisionResourceRequest + ProvisionResourceResponse provisioners.ProvisionResourceResponse + ProvisionResourceFn func(provisioners.ProvisionResourceRequest) provisioners.ProvisionResourceResponse + + StopCalled bool + StopResponse error + StopFn func() error + + CloseCalled bool + CloseResponse error + CloseFn func() error +} + +func (p *MockProvisioner) GetSchema() provisioners.GetSchemaResponse { + p.Lock() + defer p.Unlock() + + p.GetSchemaCalled = true + return p.getSchema() +} + +// getSchema is the implementation of GetSchema, which can be called from other +// methods on MockProvisioner that may already be holding the lock. +func (p *MockProvisioner) getSchema() provisioners.GetSchemaResponse { + return p.GetSchemaResponse +} + +func (p *MockProvisioner) ValidateProvisionerConfig(r provisioners.ValidateProvisionerConfigRequest) provisioners.ValidateProvisionerConfigResponse { + p.Lock() + defer p.Unlock() + + p.ValidateProvisionerConfigCalled = true + p.ValidateProvisionerConfigRequest = r + if p.ValidateProvisionerConfigFn != nil { + return p.ValidateProvisionerConfigFn(r) + } + return p.ValidateProvisionerConfigResponse +} + +func (p *MockProvisioner) ProvisionResource(r provisioners.ProvisionResourceRequest) provisioners.ProvisionResourceResponse { + p.Lock() + defer p.Unlock() + + p.ProvisionResourceCalled = true + p.ProvisionResourceRequest = r + if p.ProvisionResourceFn != nil { + fn := p.ProvisionResourceFn + return fn(r) + } + + return p.ProvisionResourceResponse +} + +func (p *MockProvisioner) Stop() error { + // We intentionally don't lock in this one because the whole point of this + // method is to be called concurrently with another operation that can + // be cancelled. The provisioner itself is responsible for handling + // any concurrency concerns in this case. + + p.StopCalled = true + if p.StopFn != nil { + return p.StopFn() + } + + return p.StopResponse +} + +func (p *MockProvisioner) Close() error { + p.Lock() + defer p.Unlock() + + p.CloseCalled = true + if p.CloseFn != nil { + return p.CloseFn() + } + + return p.CloseResponse +} diff --git a/pkg/legacy/tofu/resource.go b/pkg/legacy/tofu/resource.go new file mode 100644 index 00000000000..11de088a440 --- /dev/null +++ b/pkg/legacy/tofu/resource.go @@ -0,0 +1,521 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/mitchellh/copystructure" + "github.com/mitchellh/reflectwalk" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/configs/hcl2shim" +) + +// Resource is a legacy way to identify a particular resource instance. +// +// New code should use addrs.ResourceInstance instead. This is still here +// only for codepaths that haven't been updated yet. +type Resource struct { + // These are all used by the new EvalNode stuff. + Name string + Type string + CountIndex int + + // These aren't really used anymore anywhere, but we keep them around + // since we haven't done a proper cleanup yet. + Id string + Info *InstanceInfo + Config *ResourceConfig + Dependencies []string + Diff *InstanceDiff + Provider ResourceProvider + State *InstanceState + Flags ResourceFlag +} + +// NewResource constructs a legacy Resource object from an +// addrs.ResourceInstance value. +// +// This is provided to shim to old codepaths that haven't been updated away +// from this type yet. Since this old type is not able to represent instances +// that have string keys, this function will panic if given a resource address +// that has a string key. +func NewResource(addr addrs.ResourceInstance) *Resource { + ret := &Resource{ + Name: addr.Resource.Name, + Type: addr.Resource.Type, + } + + if addr.Key != addrs.NoKey { + switch tk := addr.Key.(type) { + case addrs.IntKey: + ret.CountIndex = int(tk) + default: + panic(fmt.Errorf("resource instance with key %#v is not supported", addr.Key)) + } + } + + return ret +} + +// ResourceFlag specifies what kind of instance we're working with, whether +// its a primary instance, a tainted instance, or an orphan. +type ResourceFlag byte + +// InstanceInfo is used to hold information about the instance and/or +// resource being modified. +type InstanceInfo struct { + // Id is a unique name to represent this instance. This is not related + // to InstanceState.ID in any way. + Id string + + // ModulePath is the complete path of the module containing this + // instance. + ModulePath []string + + // Type is the resource type of this instance + Type string + + // uniqueExtra is an internal field that can be populated to supply + // extra metadata that is used to identify a unique instance in + // the graph walk. This will be appended to HumanID when uniqueId + // is called. + uniqueExtra string +} + +// NewInstanceInfo constructs an InstanceInfo from an addrs.AbsResourceInstance. +// +// InstanceInfo is a legacy type, and uses of it should be gradually replaced +// by direct use of addrs.AbsResource or addrs.AbsResourceInstance as +// appropriate. +// +// The legacy InstanceInfo type cannot represent module instances with instance +// keys, so this function will panic if given such a path. Uses of this type +// should all be removed or replaced before implementing "count" and "for_each" +// arguments on modules in order to avoid such panics. +// +// This legacy type also cannot represent resource instances with string +// instance keys. It will panic if the given key is not either NoKey or an +// IntKey. +func NewInstanceInfo(addr addrs.AbsResourceInstance) *InstanceInfo { + // We need an old-style []string module path for InstanceInfo. + path := make([]string, len(addr.Module)) + for i, step := range addr.Module { + if step.InstanceKey != addrs.NoKey { + panic("NewInstanceInfo cannot convert module instance with key") + } + path[i] = step.Name + } + + // This is a funny old meaning of "id" that is no longer current. It should + // not be used for anything users might see. Note that it does not include + // a representation of the resource mode, and so it's impossible to + // determine from an InstanceInfo alone whether it is a managed or data + // resource that is being referred to. + id := fmt.Sprintf("%s.%s", addr.Resource.Resource.Type, addr.Resource.Resource.Name) + if addr.Resource.Resource.Mode == addrs.DataResourceMode { + id = "data." + id + } + if addr.Resource.Key != addrs.NoKey { + switch k := addr.Resource.Key.(type) { + case addrs.IntKey: + id = id + fmt.Sprintf(".%d", int(k)) + default: + panic(fmt.Sprintf("NewInstanceInfo cannot convert resource instance with %T instance key", addr.Resource.Key)) + } + } + + return &InstanceInfo{ + Id: id, + ModulePath: path, + Type: addr.Resource.Resource.Type, + } +} + +// ResourceAddress returns the address of the resource that the receiver is describing. +func (i *InstanceInfo) ResourceAddress() *ResourceAddress { + // GROSS: for tainted and deposed instances, their status gets appended + // to i.Id to create a unique id for the graph node. Historically these + // ids were displayed to the user, so it's designed to be human-readable: + // "aws_instance.bar.0 (deposed #0)" + // + // So here we detect such suffixes and try to interpret them back to + // their original meaning so we can then produce a ResourceAddress + // with a suitable InstanceType. + id := i.Id + instanceType := TypeInvalid + if idx := strings.Index(id, " ("); idx != -1 { + remain := id[idx:] + id = id[:idx] + + switch { + case strings.Contains(remain, "tainted"): + instanceType = TypeTainted + case strings.Contains(remain, "deposed"): + instanceType = TypeDeposed + } + } + + addr, err := parseResourceAddressInternal(id) + if err != nil { + // should never happen, since that would indicate a bug in the + // code that constructed this InstanceInfo. + panic(fmt.Errorf("InstanceInfo has invalid Id %s", id)) + } + if len(i.ModulePath) > 1 { + addr.Path = i.ModulePath[1:] // trim off "root" prefix, which is implied + } + if instanceType != TypeInvalid { + addr.InstanceTypeSet = true + addr.InstanceType = instanceType + } + return addr +} + +// ResourceConfig is a legacy type that was formerly used to represent +// interpolatable configuration blocks. It is now only used to shim to old +// APIs that still use this type, via NewResourceConfigShimmed. +type ResourceConfig struct { + ComputedKeys []string + Raw map[string]interface{} + Config map[string]interface{} +} + +// NewResourceConfigRaw constructs a ResourceConfig whose content is exactly +// the given value. +// +// The given value may contain hcl2shim.UnknownVariableValue to signal that +// something is computed, but it must not contain unprocessed interpolation +// sequences as we might've seen in Terraform v0.11 and prior. +func NewResourceConfigRaw(raw map[string]interface{}) *ResourceConfig { + v := hcl2shim.HCL2ValueFromConfigValue(raw) + + // This is a little weird but we round-trip the value through the hcl2shim + // package here for two reasons: firstly, because that reduces the risk + // of it including something unlike what NewResourceConfigShimmed would + // produce, and secondly because it creates a copy of "raw" just in case + // something is relying on the fact that in the old world the raw and + // config maps were always distinct, and thus you could in principle mutate + // one without affecting the other. (I sure hope nobody was doing that, though!) + cfg := hcl2shim.ConfigValueFromHCL2(v).(map[string]interface{}) + + return &ResourceConfig{ + Raw: raw, + Config: cfg, + + ComputedKeys: newResourceConfigShimmedComputedKeys(v, ""), + } +} + +// NewResourceConfigShimmed wraps a cty.Value of object type in a legacy +// ResourceConfig object, so that it can be passed to older APIs that expect +// this wrapping. +// +// The returned ResourceConfig is already interpolated and cannot be +// re-interpolated. It is, therefore, useful only to functions that expect +// an already-populated ResourceConfig which they then treat as read-only. +// +// If the given value is not of an object type that conforms to the given +// schema then this function will panic. +func NewResourceConfigShimmed(val cty.Value, schema *configschema.Block) *ResourceConfig { + if !val.Type().IsObjectType() { + panic(fmt.Errorf("NewResourceConfigShimmed given %#v; an object type is required", val.Type())) + } + ret := &ResourceConfig{} + + legacyVal := hcl2shim.ConfigValueFromHCL2Block(val, schema) + if legacyVal != nil { + ret.Config = legacyVal + + // Now we need to walk through our structure and find any unknown values, + // producing the separate list ComputedKeys to represent these. We use the + // schema here so that we can preserve the expected invariant + // that an attribute is always either wholly known or wholly unknown, while + // a child block can be partially unknown. + ret.ComputedKeys = newResourceConfigShimmedComputedKeys(val, "") + } else { + ret.Config = make(map[string]interface{}) + } + ret.Raw = ret.Config + + return ret +} + +// Record the any config values in ComputedKeys. This field had been unused in +// helper/schema, but in the new protocol we're using this so that the SDK can +// now handle having an unknown collection. The legacy diff code doesn't +// properly handle the unknown, because it can't be expressed in the same way +// between the config and diff. +func newResourceConfigShimmedComputedKeys(val cty.Value, path string) []string { + var ret []string + ty := val.Type() + + if val.IsNull() { + return ret + } + + if !val.IsKnown() { + // we shouldn't have an entirely unknown resource, but prevent empty + // strings just in case + if len(path) > 0 { + ret = append(ret, path) + } + return ret + } + + if path != "" { + path += "." + } + switch { + case ty.IsListType(), ty.IsTupleType(), ty.IsSetType(): + i := 0 + for it := val.ElementIterator(); it.Next(); i++ { + _, subVal := it.Element() + keys := newResourceConfigShimmedComputedKeys(subVal, fmt.Sprintf("%s%d", path, i)) + ret = append(ret, keys...) + } + + case ty.IsMapType(), ty.IsObjectType(): + for it := val.ElementIterator(); it.Next(); { + subK, subVal := it.Element() + keys := newResourceConfigShimmedComputedKeys(subVal, fmt.Sprintf("%s%s", path, subK.AsString())) + ret = append(ret, keys...) + } + } + + return ret +} + +// DeepCopy performs a deep copy of the configuration. This makes it safe +// to modify any of the structures that are part of the resource config without +// affecting the original configuration. +func (c *ResourceConfig) DeepCopy() *ResourceConfig { + // DeepCopying a nil should return a nil to avoid panics + if c == nil { + return nil + } + + // Copy, this will copy all the exported attributes + copy, err := copystructure.Config{Lock: true}.Copy(c) + if err != nil { + panic(err) + } + + // Force the type + result := copy.(*ResourceConfig) + + return result +} + +// Equal checks the equality of two resource configs. +func (c *ResourceConfig) Equal(c2 *ResourceConfig) bool { + // If either are nil, then they're only equal if they're both nil + if c == nil || c2 == nil { + return c == c2 + } + + // Sort the computed keys so they're deterministic + sort.Strings(c.ComputedKeys) + sort.Strings(c2.ComputedKeys) + + // Two resource configs if their exported properties are equal. + // We don't compare "raw" because it is never used again after + // initialization and for all intents and purposes they are equal + // if the exported properties are equal. + check := [][2]interface{}{ + {c.ComputedKeys, c2.ComputedKeys}, + {c.Raw, c2.Raw}, + {c.Config, c2.Config}, + } + for _, pair := range check { + if !reflect.DeepEqual(pair[0], pair[1]) { + return false + } + } + + return true +} + +// CheckSet checks that the given list of configuration keys is +// properly set. If not, errors are returned for each unset key. +// +// This is useful to be called in the Validate method of a ResourceProvider. +func (c *ResourceConfig) CheckSet(keys []string) []error { + var errs []error + + for _, k := range keys { + if !c.IsSet(k) { + errs = append(errs, fmt.Errorf("%s must be set", k)) + } + } + + return errs +} + +// Get looks up a configuration value by key and returns the value. +// +// The second return value is true if the get was successful. Get will +// return the raw value if the key is computed, so you should pair this +// with IsComputed. +func (c *ResourceConfig) Get(k string) (interface{}, bool) { + // We aim to get a value from the configuration. If it is computed, + // then we return the pure raw value. + source := c.Config + if c.IsComputed(k) { + source = c.Raw + } + + return c.get(k, source) +} + +// GetRaw looks up a configuration value by key and returns the value, +// from the raw, uninterpolated config. +// +// The second return value is true if the get was successful. Get will +// not succeed if the value is being computed. +func (c *ResourceConfig) GetRaw(k string) (interface{}, bool) { + return c.get(k, c.Raw) +} + +// IsComputed returns whether the given key is computed or not. +func (c *ResourceConfig) IsComputed(k string) bool { + // The next thing we do is check the config if we get a computed + // value out of it. + v, ok := c.get(k, c.Config) + if !ok { + return false + } + + // If value is nil, then it isn't computed + if v == nil { + return false + } + + // Test if the value contains an unknown value + var w unknownCheckWalker + if err := reflectwalk.Walk(v, &w); err != nil { + panic(err) + } + + return w.Unknown +} + +// IsSet checks if the key in the configuration is set. A key is set if +// it has a value or the value is being computed (is unknown currently). +// +// This function should be used rather than checking the keys of the +// raw configuration itself, since a key may be omitted from the raw +// configuration if it is being computed. +func (c *ResourceConfig) IsSet(k string) bool { + if c == nil { + return false + } + + if c.IsComputed(k) { + return true + } + + if _, ok := c.Get(k); ok { + return true + } + + return false +} + +func (c *ResourceConfig) get( + k string, raw map[string]interface{}) (interface{}, bool) { + parts := strings.Split(k, ".") + if len(parts) == 1 && parts[0] == "" { + parts = nil + } + + var current interface{} = raw + var previous interface{} = nil + for i, part := range parts { + if current == nil { + return nil, false + } + + cv := reflect.ValueOf(current) + switch cv.Kind() { + case reflect.Map: + previous = current + v := cv.MapIndex(reflect.ValueOf(part)) + if !v.IsValid() { + if i > 0 && i != (len(parts)-1) { + tryKey := strings.Join(parts[i:], ".") + v := cv.MapIndex(reflect.ValueOf(tryKey)) + if !v.IsValid() { + return nil, false + } + + return v.Interface(), true + } + + return nil, false + } + + current = v.Interface() + case reflect.Slice: + previous = current + + if part == "#" { + // If any value in a list is computed, this whole thing + // is computed and we can't read any part of it. + for i := 0; i < cv.Len(); i++ { + if v := cv.Index(i).Interface(); v == hcl2shim.UnknownVariableValue { + return v, true + } + } + + current = cv.Len() + } else { + i, err := strconv.ParseInt(part, 0, 0) + if err != nil { + return nil, false + } + if int(i) < 0 || int(i) >= cv.Len() { + return nil, false + } + current = cv.Index(int(i)).Interface() + } + case reflect.String: + // This happens when map keys contain "." and have a common + // prefix so were split as path components above. + actualKey := strings.Join(parts[i-1:], ".") + if prevMap, ok := previous.(map[string]interface{}); ok { + v, ok := prevMap[actualKey] + return v, ok + } + + return nil, false + default: + panic(fmt.Sprintf("Unknown kind: %s", cv.Kind())) + } + } + + return current, true +} + +// unknownCheckWalker +type unknownCheckWalker struct { + Unknown bool +} + +func (w *unknownCheckWalker) Primitive(v reflect.Value) error { + if v.Interface() == hcl2shim.UnknownVariableValue { + w.Unknown = true + } + + return nil +} diff --git a/pkg/legacy/tofu/resource_address.go b/pkg/legacy/tofu/resource_address.go new file mode 100644 index 00000000000..f3402a24aa1 --- /dev/null +++ b/pkg/legacy/tofu/resource_address.go @@ -0,0 +1,625 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "reflect" + "regexp" + "strconv" + "strings" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" +) + +// ResourceAddress is a way of identifying an individual resource (or, +// eventually, a subset of resources) within the state. It is used for Targets. +type ResourceAddress struct { + // Addresses a resource falling somewhere in the module path + // When specified alone, addresses all resources within a module path + Path []string + + // Addresses a specific resource that occurs in a list + Index int + + InstanceType InstanceType + InstanceTypeSet bool + Name string + Type string + Mode ResourceMode // significant only if InstanceTypeSet +} + +// Copy returns a copy of this ResourceAddress +func (r *ResourceAddress) Copy() *ResourceAddress { + if r == nil { + return nil + } + + n := &ResourceAddress{ + Path: make([]string, 0, len(r.Path)), + Index: r.Index, + InstanceType: r.InstanceType, + Name: r.Name, + Type: r.Type, + Mode: r.Mode, + } + + n.Path = append(n.Path, r.Path...) + + return n +} + +// String outputs the address that parses into this address. +func (r *ResourceAddress) String() string { + var result []string + for _, p := range r.Path { + result = append(result, "module", p) + } + + switch r.Mode { + case ManagedResourceMode: + // nothing to do + case DataResourceMode: + result = append(result, "data") + default: + panic(fmt.Errorf("unsupported resource mode %s", r.Mode)) + } + + if r.Type != "" { + result = append(result, r.Type) + } + + if r.Name != "" { + name := r.Name + if r.InstanceTypeSet { + switch r.InstanceType { + case TypePrimary: + name += ".primary" + case TypeDeposed: + name += ".deposed" + case TypeTainted: + name += ".tainted" + } + } + + if r.Index >= 0 { + name += fmt.Sprintf("[%d]", r.Index) + } + result = append(result, name) + } + + return strings.Join(result, ".") +} + +// HasResourceSpec returns true if the address has a resource spec, as +// defined in the documentation: +// +// https://opentofu.org/docs/cli/state/resource-addressing/ +// +// In particular, this returns false if the address contains only +// a module path, thus addressing the entire module. +func (r *ResourceAddress) HasResourceSpec() bool { + return r.Type != "" && r.Name != "" +} + +// WholeModuleAddress returns the resource address that refers to all +// resources in the same module as the receiver address. +func (r *ResourceAddress) WholeModuleAddress() *ResourceAddress { + return &ResourceAddress{ + Path: r.Path, + Index: -1, + InstanceTypeSet: false, + } +} + +// MatchesResourceConfig returns true if the receiver matches the given +// configuration resource within the given _static_ module path. Note that +// the module path in a resource address is a _dynamic_ module path, and +// multiple dynamic resource paths may map to a single static path if +// count and for_each are in use on module calls. +// +// Since resource configuration blocks represent all of the instances of +// a multi-instance resource, the index of the address (if any) is not +// considered. +func (r *ResourceAddress) MatchesResourceConfig(path addrs.Module, rc *configs.Resource) bool { + if r.HasResourceSpec() { + // FIXME: Some ugliness while we are between worlds. Functionality + // in "addrs" should eventually replace this ResourceAddress idea + // completely, but for now we'll need to translate to the old + // way of representing resource modes. + switch r.Mode { + case ManagedResourceMode: + if rc.Mode != addrs.ManagedResourceMode { + return false + } + case DataResourceMode: + if rc.Mode != addrs.DataResourceMode { + return false + } + } + if r.Type != rc.Type || r.Name != rc.Name { + return false + } + } + + addrPath := r.Path + + // normalize + if len(addrPath) == 0 { + addrPath = nil + } + if len(path) == 0 { + path = nil + } + rawPath := []string(path) + return reflect.DeepEqual(addrPath, rawPath) +} + +// stateId returns the ID that this resource should be entered with +// in the state. This is also used for diffs. In the future, we'd like to +// move away from this string field so I don't export this. +func (r *ResourceAddress) stateId() string { + result := fmt.Sprintf("%s.%s", r.Type, r.Name) + switch r.Mode { + case ManagedResourceMode: + // Done + case DataResourceMode: + result = fmt.Sprintf("data.%s", result) + default: + panic(fmt.Errorf("unknown resource mode: %s", r.Mode)) + } + if r.Index >= 0 { + result += fmt.Sprintf(".%d", r.Index) + } + + return result +} + +// parseResourceAddressInternal parses the somewhat bespoke resource +// identifier used in states and diffs, such as "instance.name.0". +func parseResourceAddressInternal(s string) (*ResourceAddress, error) { + // Split based on ".". Every resource address should have at least two + // elements (type and name). + parts := strings.Split(s, ".") + if len(parts) < 2 || len(parts) > 4 { + return nil, fmt.Errorf("Invalid internal resource address format: %s", s) + } + + // Data resource if we have at least 3 parts and the first one is data + mode := ManagedResourceMode + if len(parts) > 2 && parts[0] == "data" { + mode = DataResourceMode + parts = parts[1:] + } + + // If we're not a data resource and we have more than 3, then it is an error + if len(parts) > 3 && mode != DataResourceMode { + return nil, fmt.Errorf("Invalid internal resource address format: %s", s) + } + + // Build the parts of the resource address that are guaranteed to exist + addr := &ResourceAddress{ + Type: parts[0], + Name: parts[1], + Index: -1, + InstanceType: TypePrimary, + Mode: mode, + } + + // If we have more parts, then we have an index. Parse that. + if len(parts) > 2 { + idx, err := strconv.ParseInt(parts[2], 0, 0) + if err != nil { + return nil, fmt.Errorf("Error parsing resource address %q: %w", s, err) + } + + addr.Index = int(idx) + } + + return addr, nil +} + +func ParseResourceAddress(s string) (*ResourceAddress, error) { + matches, err := tokenizeResourceAddress(s) + if err != nil { + return nil, err + } + mode := ManagedResourceMode + if matches["data_prefix"] != "" { + mode = DataResourceMode + } + resourceIndex, err := ParseResourceIndex(matches["index"]) + if err != nil { + return nil, err + } + instanceType, err := ParseInstanceType(matches["instance_type"]) + if err != nil { + return nil, err + } + path := ParseResourcePath(matches["path"]) + + // not allowed to say "data." without a type following + if mode == DataResourceMode && matches["type"] == "" { + return nil, fmt.Errorf( + "invalid resource address %q: must target specific data instance", + s, + ) + } + + return &ResourceAddress{ + Path: path, + Index: resourceIndex, + InstanceType: instanceType, + InstanceTypeSet: matches["instance_type"] != "", + Name: matches["name"], + Type: matches["type"], + Mode: mode, + }, nil +} + +// ParseResourceAddressForInstanceDiff creates a ResourceAddress for a +// resource name as described in a module diff. +// +// For historical reasons a different addressing format is used in this +// context. The internal format should not be shown in the UI and instead +// this function should be used to translate to a ResourceAddress and +// then, where appropriate, use the String method to produce a canonical +// resource address string for display in the UI. +// +// The given path slice must be empty (or nil) for the root module, and +// otherwise consist of a sequence of module names traversing down into +// the module tree. If a non-nil path is provided, the caller must not +// modify its underlying array after passing it to this function. +func ParseResourceAddressForInstanceDiff(path []string, key string) (*ResourceAddress, error) { + addr, err := parseResourceAddressInternal(key) + if err != nil { + return nil, err + } + addr.Path = path + return addr, nil +} + +// NewLegacyResourceAddress creates a ResourceAddress from a new-style +// addrs.AbsResource value. +// +// This is provided for shimming purposes so that we can still easily call into +// older functions that expect the ResourceAddress type. +func NewLegacyResourceAddress(addr addrs.AbsResource) *ResourceAddress { + ret := &ResourceAddress{ + Type: addr.Resource.Type, + Name: addr.Resource.Name, + } + + switch addr.Resource.Mode { + case addrs.ManagedResourceMode: + ret.Mode = ManagedResourceMode + case addrs.DataResourceMode: + ret.Mode = DataResourceMode + default: + panic(fmt.Errorf("cannot shim %s to legacy ResourceMode value", addr.Resource.Mode)) + } + + path := make([]string, len(addr.Module)) + for i, step := range addr.Module { + if step.InstanceKey != addrs.NoKey { + // At the time of writing this can't happen because we don't + // ket generate keyed module instances. This legacy codepath must + // be removed before we can support "count" and "for_each" for + // modules. + panic(fmt.Errorf("cannot shim module instance step with key %#v to legacy ResourceAddress.Path", step.InstanceKey)) + } + + path[i] = step.Name + } + ret.Path = path + ret.Index = -1 + + return ret +} + +// NewLegacyResourceInstanceAddress creates a ResourceAddress from a new-style +// addrs.AbsResource value. +// +// This is provided for shimming purposes so that we can still easily call into +// older functions that expect the ResourceAddress type. +func NewLegacyResourceInstanceAddress(addr addrs.AbsResourceInstance) *ResourceAddress { + ret := &ResourceAddress{ + Type: addr.Resource.Resource.Type, + Name: addr.Resource.Resource.Name, + } + + switch addr.Resource.Resource.Mode { + case addrs.ManagedResourceMode: + ret.Mode = ManagedResourceMode + case addrs.DataResourceMode: + ret.Mode = DataResourceMode + default: + panic(fmt.Errorf("cannot shim %s to legacy ResourceMode value", addr.Resource.Resource.Mode)) + } + + path := make([]string, len(addr.Module)) + for i, step := range addr.Module { + if step.InstanceKey != addrs.NoKey { + // At the time of writing this can't happen because we don't + // ket generate keyed module instances. This legacy codepath must + // be removed before we can support "count" and "for_each" for + // modules. + panic(fmt.Errorf("cannot shim module instance step with key %#v to legacy ResourceAddress.Path", step.InstanceKey)) + } + + path[i] = step.Name + } + ret.Path = path + + if addr.Resource.Key == addrs.NoKey { + ret.Index = -1 + } else if ik, ok := addr.Resource.Key.(addrs.IntKey); ok { + ret.Index = int(ik) + } else if _, ok := addr.Resource.Key.(addrs.StringKey); ok { + ret.Index = -1 + } else { + panic(fmt.Errorf("cannot shim resource instance with key %#v to legacy ResourceAddress.Index", addr.Resource.Key)) + } + + return ret +} + +// AbsResourceInstanceAddr converts the receiver, a legacy resource address, to +// the new resource address type addrs.AbsResourceInstance. +// +// This method can be used only on an address that has a resource specification. +// It will panic if called on a module-path-only ResourceAddress. Use +// method HasResourceSpec to check before calling, in contexts where it is +// unclear. +// +// addrs.AbsResourceInstance does not represent the "tainted" and "deposed" +// states, and so if these are present on the receiver then they are discarded. +// +// This is provided for shimming purposes so that we can easily adapt functions +// that are returning the legacy ResourceAddress type, for situations where +// the new type is required. +func (addr *ResourceAddress) AbsResourceInstanceAddr() addrs.AbsResourceInstance { + if !addr.HasResourceSpec() { + panic("AbsResourceInstanceAddr called on ResourceAddress with no resource spec") + } + + ret := addrs.AbsResourceInstance{ + Module: addr.ModuleInstanceAddr(), + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Type: addr.Type, + Name: addr.Name, + }, + }, + } + + switch addr.Mode { + case ManagedResourceMode: + ret.Resource.Resource.Mode = addrs.ManagedResourceMode + case DataResourceMode: + ret.Resource.Resource.Mode = addrs.DataResourceMode + default: + panic(fmt.Errorf("cannot shim %s to addrs.ResourceMode value", addr.Mode)) + } + + if addr.Index != -1 { + ret.Resource.Key = addrs.IntKey(addr.Index) + } + + return ret +} + +// ModuleInstanceAddr returns the module path portion of the receiver as a +// addrs.ModuleInstance value. +func (addr *ResourceAddress) ModuleInstanceAddr() addrs.ModuleInstance { + path := make(addrs.ModuleInstance, len(addr.Path)) + for i, name := range addr.Path { + path[i] = addrs.ModuleInstanceStep{Name: name} + } + return path +} + +// Contains returns true if and only if the given node is contained within +// the receiver. +// +// Containment is defined in terms of the module and resource hierarchy: +// a resource is contained within its module and any ancestor modules, +// an indexed resource instance is contained with the unindexed resource, etc. +func (addr *ResourceAddress) Contains(other *ResourceAddress) bool { + ourPath := addr.Path + givenPath := other.Path + if len(givenPath) < len(ourPath) { + return false + } + for i := range ourPath { + if ourPath[i] != givenPath[i] { + return false + } + } + + // If the receiver is a whole-module address then the path prefix + // matching is all we need. + if !addr.HasResourceSpec() { + return true + } + + if addr.Type != other.Type || addr.Name != other.Name || addr.Mode != other.Mode { + return false + } + + if addr.Index != -1 && addr.Index != other.Index { + return false + } + + if addr.InstanceTypeSet && (addr.InstanceTypeSet != other.InstanceTypeSet || addr.InstanceType != other.InstanceType) { + return false + } + + return true +} + +// Equals returns true if the receiver matches the given address. +// +// The name of this method is a misnomer, since it doesn't test for exact +// equality. Instead, it tests that the _specified_ parts of each +// address match, treating any unspecified parts as wildcards. +// +// See also Contains, which takes a more hierarchical approach to comparing +// addresses. +func (addr *ResourceAddress) Equals(raw interface{}) bool { + other, ok := raw.(*ResourceAddress) + if !ok { + return false + } + + pathMatch := len(addr.Path) == 0 && len(other.Path) == 0 || + reflect.DeepEqual(addr.Path, other.Path) + + indexMatch := addr.Index == -1 || + other.Index == -1 || + addr.Index == other.Index + + nameMatch := addr.Name == "" || + other.Name == "" || + addr.Name == other.Name + + typeMatch := addr.Type == "" || + other.Type == "" || + addr.Type == other.Type + + // mode is significant only when type is set + modeMatch := addr.Type == "" || + other.Type == "" || + addr.Mode == other.Mode + + return pathMatch && + indexMatch && + addr.InstanceType == other.InstanceType && + nameMatch && + typeMatch && + modeMatch +} + +// Less returns true if and only if the receiver should be sorted before +// the given address when presenting a list of resource addresses to +// an end-user. +// +// This sort uses lexicographic sorting for most components, but uses +// numeric sort for indices, thus causing index 10 to sort after +// index 9, rather than after index 1. +func (addr *ResourceAddress) Less(other *ResourceAddress) bool { + + switch { + + case len(addr.Path) != len(other.Path): + return len(addr.Path) < len(other.Path) + + case !reflect.DeepEqual(addr.Path, other.Path): + // If the two paths are the same length but don't match, we'll just + // cheat and compare the string forms since it's easier than + // comparing all of the path segments in turn, and lexicographic + // comparison is correct for the module path portion. + addrStr := addr.String() + otherStr := other.String() + return addrStr < otherStr + + case addr.Mode != other.Mode: + return addr.Mode == DataResourceMode + + case addr.Type != other.Type: + return addr.Type < other.Type + + case addr.Name != other.Name: + return addr.Name < other.Name + + case addr.Index != other.Index: + // Since "Index" is -1 for an un-indexed address, this also conveniently + // sorts unindexed addresses before indexed ones, should they both + // appear for some reason. + return addr.Index < other.Index + + case addr.InstanceTypeSet != other.InstanceTypeSet: + return !addr.InstanceTypeSet + + case addr.InstanceType != other.InstanceType: + // InstanceType is actually an enum, so this is just an arbitrary + // sort based on the enum numeric values, and thus not particularly + // meaningful. + return addr.InstanceType < other.InstanceType + + default: + return false + + } +} + +func ParseResourceIndex(s string) (int, error) { + if s == "" { + return -1, nil + } + return strconv.Atoi(s) +} + +func ParseResourcePath(s string) []string { + if s == "" { + return nil + } + parts := strings.Split(s, ".") + path := make([]string, 0, len(parts)) + for _, s := range parts { + // Due to the limitations of the regexp match below, the path match has + // some noise in it we have to filter out :| + if s == "" || s == "module" { + continue + } + path = append(path, s) + } + return path +} + +func ParseInstanceType(s string) (InstanceType, error) { + switch s { + case "", "primary": + return TypePrimary, nil + case "deposed": + return TypeDeposed, nil + case "tainted": + return TypeTainted, nil + default: + return TypeInvalid, fmt.Errorf("Unexpected value for InstanceType field: %q", s) + } +} + +func tokenizeResourceAddress(s string) (map[string]string, error) { + // Example of portions of the regexp below using the + // string "aws_instance.web.tainted[1]" + re := regexp.MustCompile(`\A` + + // "module.foo.module.bar" (optional) + `(?P(?:module\.(?P[^.]+)\.?)*)` + + // possibly "data.", if targeting is a data resource + `(?P(?:data\.)?)` + + // "aws_instance.web" (optional when module path specified) + `(?:(?P[^.]+)\.(?P[^.[]+))?` + + // "tainted" (optional, omission implies: "primary") + `(?:\.(?P\w+))?` + + // "1" (optional, omission implies: "0") + `(?:\[(?P\d+)\])?` + + `\z`) + + groupNames := re.SubexpNames() + rawMatches := re.FindAllStringSubmatch(s, -1) + if len(rawMatches) != 1 { + return nil, fmt.Errorf("invalid resource address %q", s) + } + + matches := make(map[string]string) + for i, m := range rawMatches[0] { + matches[groupNames[i]] = m + } + + return matches, nil +} diff --git a/pkg/legacy/tofu/resource_address_test.go b/pkg/legacy/tofu/resource_address_test.go new file mode 100644 index 00000000000..4eb420c372e --- /dev/null +++ b/pkg/legacy/tofu/resource_address_test.go @@ -0,0 +1,1334 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "reflect" + "testing" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" +) + +func TestParseResourceAddressInternal(t *testing.T) { + cases := map[string]struct { + Input string + Expected *ResourceAddress + Output string + }{ + "basic resource": { + "aws_instance.foo", + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: -1, + }, + "aws_instance.foo", + }, + + "basic resource with count": { + "aws_instance.foo.1", + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 1, + }, + "aws_instance.foo[1]", + }, + + "data resource": { + "data.aws_ami.foo", + &ResourceAddress{ + Mode: DataResourceMode, + Type: "aws_ami", + Name: "foo", + InstanceType: TypePrimary, + Index: -1, + }, + "data.aws_ami.foo", + }, + + "data resource with count": { + "data.aws_ami.foo.1", + &ResourceAddress{ + Mode: DataResourceMode, + Type: "aws_ami", + Name: "foo", + InstanceType: TypePrimary, + Index: 1, + }, + "data.aws_ami.foo[1]", + }, + + "non-data resource with 4 elements": { + "aws_instance.foo.bar.1", + nil, + "", + }, + } + + for tn, tc := range cases { + t.Run(tc.Input, func(t *testing.T) { + out, err := parseResourceAddressInternal(tc.Input) + if (err != nil) != (tc.Expected == nil) { + t.Fatalf("%s: unexpected err: %#v", tn, err) + } + if err != nil { + return + } + + if !reflect.DeepEqual(out, tc.Expected) { + t.Fatalf("bad: %q\n\nexpected:\n%#v\n\ngot:\n%#v", tn, tc.Expected, out) + } + + // Compare outputs if those exist + expected := tc.Input + if tc.Output != "" { + expected = tc.Output + } + if out.String() != expected { + t.Fatalf("bad: %q\n\nexpected: %s\n\ngot: %s", tn, expected, out) + } + + // Compare equality because the internal parse is used + // to compare equality to equal inputs. + if !out.Equals(tc.Expected) { + t.Fatalf("expected equality:\n\n%#v\n\n%#v", out, tc.Expected) + } + }) + } +} + +func TestParseResourceAddress(t *testing.T) { + cases := map[string]struct { + Input string + Expected *ResourceAddress + Output string + Err bool + }{ + "implicit primary managed instance, no specific index": { + "aws_instance.foo", + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: -1, + }, + "", + false, + }, + "implicit primary data instance, no specific index": { + "data.aws_instance.foo", + &ResourceAddress{ + Mode: DataResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: -1, + }, + "", + false, + }, + "implicit primary, explicit index": { + "aws_instance.foo[2]", + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 2, + }, + "", + false, + }, + "implicit primary, explicit index over ten": { + "aws_instance.foo[12]", + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 12, + }, + "", + false, + }, + "explicit primary, explicit index": { + "aws_instance.foo.primary[2]", + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + InstanceTypeSet: true, + Index: 2, + }, + "", + false, + }, + "tainted": { + "aws_instance.foo.tainted", + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypeTainted, + InstanceTypeSet: true, + Index: -1, + }, + "", + false, + }, + "deposed": { + "aws_instance.foo.deposed", + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypeDeposed, + InstanceTypeSet: true, + Index: -1, + }, + "", + false, + }, + "with a hyphen": { + "aws_instance.foo-bar", + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo-bar", + InstanceType: TypePrimary, + Index: -1, + }, + "", + false, + }, + "managed in a module": { + "module.child.aws_instance.foo", + &ResourceAddress{ + Path: []string{"child"}, + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: -1, + }, + "", + false, + }, + "data in a module": { + "module.child.data.aws_instance.foo", + &ResourceAddress{ + Path: []string{"child"}, + Mode: DataResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: -1, + }, + "", + false, + }, + "nested modules": { + "module.a.module.b.module.forever.aws_instance.foo", + &ResourceAddress{ + Path: []string{"a", "b", "forever"}, + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: -1, + }, + "", + false, + }, + "just a module": { + "module.a", + &ResourceAddress{ + Path: []string{"a"}, + Type: "", + Name: "", + InstanceType: TypePrimary, + Index: -1, + }, + "", + false, + }, + "just a nested module": { + "module.a.module.b", + &ResourceAddress{ + Path: []string{"a", "b"}, + Type: "", + Name: "", + InstanceType: TypePrimary, + Index: -1, + }, + "", + false, + }, + "module missing resource type": { + "module.name.foo", + nil, + "", + true, + }, + } + + for tn, tc := range cases { + t.Run(tn, func(t *testing.T) { + out, err := ParseResourceAddress(tc.Input) + if (err != nil) != tc.Err { + t.Fatalf("%s: unexpected err: %#v", tn, err) + } + if tc.Err { + return + } + + if !reflect.DeepEqual(out, tc.Expected) { + t.Fatalf("bad: %q\n\nexpected:\n%#v\n\ngot:\n%#v", tn, tc.Expected, out) + } + + expected := tc.Input + if tc.Output != "" { + expected = tc.Output + } + if out.String() != expected { + t.Fatalf("bad: %q\n\nexpected: %s\n\ngot: %s", tn, expected, out) + } + }) + } +} + +func TestResourceAddressContains(t *testing.T) { + tests := []struct { + Address *ResourceAddress + Other *ResourceAddress + Want bool + }{ + { + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceTypeSet: true, + InstanceType: TypePrimary, + Index: 0, + }, + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceTypeSet: true, + InstanceType: TypePrimary, + Index: 0, + }, + true, + }, + { + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceTypeSet: false, + Index: 0, + }, + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceTypeSet: true, + InstanceType: TypePrimary, + Index: 0, + }, + true, + }, + { + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceTypeSet: false, + Index: -1, + }, + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceTypeSet: true, + InstanceType: TypePrimary, + Index: 0, + }, + true, + }, + { + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceTypeSet: false, + Index: -1, + }, + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceTypeSet: false, + Index: -1, + }, + true, + }, + { + &ResourceAddress{ + InstanceTypeSet: false, + Index: -1, + }, + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceTypeSet: false, + Index: -1, + }, + true, + }, + { + &ResourceAddress{ + InstanceTypeSet: false, + Index: -1, + }, + &ResourceAddress{ + Path: []string{"bar"}, + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceTypeSet: false, + Index: -1, + }, + true, + }, + { + &ResourceAddress{ + Path: []string{"bar"}, + InstanceTypeSet: false, + Index: -1, + }, + &ResourceAddress{ + Path: []string{"bar"}, + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceTypeSet: false, + Index: -1, + }, + true, + }, + { + &ResourceAddress{ + Path: []string{"bar"}, + InstanceTypeSet: false, + Index: -1, + }, + &ResourceAddress{ + Path: []string{"bar", "baz"}, + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceTypeSet: false, + Index: -1, + }, + true, + }, + { + &ResourceAddress{ + Path: []string{"bar"}, + InstanceTypeSet: false, + Index: -1, + }, + &ResourceAddress{ + Path: []string{"bar", "baz"}, + InstanceTypeSet: false, + Index: -1, + }, + true, + }, + { + &ResourceAddress{ + Path: []string{"bar"}, + InstanceTypeSet: false, + Index: -1, + }, + &ResourceAddress{ + Path: []string{"bar", "baz", "foo", "pizza"}, + InstanceTypeSet: false, + Index: -1, + }, + true, + }, + + { + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "bar", + InstanceTypeSet: true, + InstanceType: TypePrimary, + Index: 0, + }, + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceTypeSet: true, + InstanceType: TypePrimary, + Index: 0, + }, + false, + }, + { + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceTypeSet: true, + InstanceType: TypePrimary, + Index: 0, + }, + &ResourceAddress{ + Mode: DataResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceTypeSet: true, + InstanceType: TypePrimary, + Index: 0, + }, + false, + }, + { + &ResourceAddress{ + Path: []string{"bar"}, + InstanceTypeSet: false, + Index: -1, + }, + &ResourceAddress{ + Path: []string{"baz"}, + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceTypeSet: false, + Index: -1, + }, + false, + }, + { + &ResourceAddress{ + Path: []string{"bar"}, + InstanceTypeSet: false, + Index: -1, + }, + &ResourceAddress{ + Path: []string{"baz", "bar"}, + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceTypeSet: false, + Index: -1, + }, + false, + }, + { + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceTypeSet: true, + InstanceType: TypePrimary, + Index: 0, + }, + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceTypeSet: false, + Index: 0, + }, + false, + }, + { + &ResourceAddress{ + Path: []string{"bar", "baz"}, + InstanceTypeSet: false, + Index: -1, + }, + &ResourceAddress{ + Path: []string{"bar"}, + InstanceTypeSet: false, + Index: -1, + }, + false, + }, + { + &ResourceAddress{ + Type: "aws_instance", + Name: "foo", + Index: 1, + InstanceType: TypePrimary, + Mode: ManagedResourceMode, + }, + &ResourceAddress{ + Type: "aws_instance", + Name: "foo", + Index: -1, + InstanceType: TypePrimary, + Mode: ManagedResourceMode, + }, + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%s contains %s", test.Address, test.Other), func(t *testing.T) { + got := test.Address.Contains(test.Other) + if got != test.Want { + t.Errorf( + "wrong result\nrecv: %s\ngiven: %s\ngot: %#v\nwant: %#v", + test.Address, test.Other, + got, test.Want, + ) + } + }) + } +} + +func TestResourceAddressEquals(t *testing.T) { + cases := map[string]struct { + Address *ResourceAddress + Other interface{} + Expect bool + }{ + "basic match": { + Address: &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 0, + }, + Other: &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 0, + }, + Expect: true, + }, + "address does not set index": { + Address: &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: -1, + }, + Other: &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 3, + }, + Expect: true, + }, + "other does not set index": { + Address: &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 3, + }, + Other: &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: -1, + }, + Expect: true, + }, + "neither sets index": { + Address: &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: -1, + }, + Other: &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: -1, + }, + Expect: true, + }, + "index over ten": { + Address: &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 1, + }, + Other: &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 13, + }, + Expect: false, + }, + "different type": { + Address: &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 0, + }, + Other: &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_vpc", + Name: "foo", + InstanceType: TypePrimary, + Index: 0, + }, + Expect: false, + }, + "different mode": { + Address: &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 0, + }, + Other: &ResourceAddress{ + Mode: DataResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 0, + }, + Expect: false, + }, + "different name": { + Address: &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 0, + }, + Other: &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "bar", + InstanceType: TypePrimary, + Index: 0, + }, + Expect: false, + }, + "different instance type": { + Address: &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 0, + }, + Other: &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypeTainted, + Index: 0, + }, + Expect: false, + }, + "different index": { + Address: &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 0, + }, + Other: &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 1, + }, + Expect: false, + }, + "module address matches address of managed resource inside module": { + Address: &ResourceAddress{ + Path: []string{"a", "b"}, + Type: "", + Name: "", + InstanceType: TypePrimary, + Index: -1, + }, + Other: &ResourceAddress{ + Path: []string{"a", "b"}, + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 0, + }, + Expect: true, + }, + "module address matches address of data resource inside module": { + Address: &ResourceAddress{ + Path: []string{"a", "b"}, + Type: "", + Name: "", + InstanceType: TypePrimary, + Index: -1, + }, + Other: &ResourceAddress{ + Path: []string{"a", "b"}, + Mode: DataResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 0, + }, + Expect: true, + }, + "module address doesn't match managed resource outside module": { + Address: &ResourceAddress{ + Path: []string{"a", "b"}, + Type: "", + Name: "", + InstanceType: TypePrimary, + Index: -1, + }, + Other: &ResourceAddress{ + Path: []string{"a"}, + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 0, + }, + Expect: false, + }, + "module address doesn't match data resource outside module": { + Address: &ResourceAddress{ + Path: []string{"a", "b"}, + Type: "", + Name: "", + InstanceType: TypePrimary, + Index: -1, + }, + Other: &ResourceAddress{ + Path: []string{"a"}, + Mode: DataResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 0, + }, + Expect: false, + }, + "nil path vs empty path should match": { + Address: &ResourceAddress{ + Path: []string{}, + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: -1, + }, + Other: &ResourceAddress{ + Path: nil, + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 0, + }, + Expect: true, + }, + } + + for tn, tc := range cases { + actual := tc.Address.Equals(tc.Other) + if actual != tc.Expect { + t.Fatalf("%q: expected equals: %t, got %t for:\n%#v\n%#v", + tn, tc.Expect, actual, tc.Address, tc.Other) + } + } +} + +func TestResourceAddressStateId(t *testing.T) { + cases := map[string]struct { + Input *ResourceAddress + Expected string + }{ + "basic resource": { + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: -1, + }, + "aws_instance.foo", + }, + + "basic resource with index": { + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 2, + }, + "aws_instance.foo.2", + }, + + "data resource": { + &ResourceAddress{ + Mode: DataResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: -1, + }, + "data.aws_instance.foo", + }, + } + + for tn, tc := range cases { + t.Run(tn, func(t *testing.T) { + actual := tc.Input.stateId() + if actual != tc.Expected { + t.Fatalf("bad: %q\n\nexpected: %s\n\ngot: %s", tn, tc.Expected, actual) + } + }) + } +} + +func TestResourceAddressHasResourceSpec(t *testing.T) { + cases := []struct { + Input string + Want bool + }{ + { + "module.foo", + false, + }, + { + "module.foo.module.bar", + false, + }, + { + "null_resource.baz", + true, + }, + { + "null_resource.baz[0]", + true, + }, + { + "data.null_data_source.baz", + true, + }, + { + "data.null_data_source.baz[0]", + true, + }, + { + "module.foo.null_resource.baz", + true, + }, + { + "module.foo.data.null_data_source.baz", + true, + }, + { + "module.foo.module.bar.null_resource.baz", + true, + }, + } + + for _, test := range cases { + t.Run(test.Input, func(t *testing.T) { + addr, err := ParseResourceAddress(test.Input) + if err != nil { + t.Fatalf("error parsing address: %s", err) + } + got := addr.HasResourceSpec() + if got != test.Want { + t.Fatalf("%q: wrong result %#v; want %#v", test.Input, got, test.Want) + } + }) + } +} + +func TestResourceAddressWholeModuleAddress(t *testing.T) { + cases := []struct { + Input string + Want string + }{ + { + "module.foo", + "module.foo", + }, + { + "module.foo.module.bar", + "module.foo.module.bar", + }, + { + "null_resource.baz", + "", + }, + { + "null_resource.baz[0]", + "", + }, + { + "data.null_data_source.baz", + "", + }, + { + "data.null_data_source.baz[0]", + "", + }, + { + "module.foo.null_resource.baz", + "module.foo", + }, + { + "module.foo.data.null_data_source.baz", + "module.foo", + }, + { + "module.foo.module.bar.null_resource.baz", + "module.foo.module.bar", + }, + } + + for _, test := range cases { + t.Run(test.Input, func(t *testing.T) { + addr, err := ParseResourceAddress(test.Input) + if err != nil { + t.Fatalf("error parsing address: %s", err) + } + gotAddr := addr.WholeModuleAddress() + got := gotAddr.String() + if got != test.Want { + t.Fatalf("%q: wrong result %#v; want %#v", test.Input, got, test.Want) + } + }) + } +} + +func TestResourceAddressMatchesResourceConfig(t *testing.T) { + root := []string(nil) + child := []string{"child"} + grandchild := []string{"child", "grandchild"} + irrelevant := []string{"irrelevant"} + + tests := []struct { + Addr *ResourceAddress + ModulePath []string + Resource *configs.Resource + Want bool + }{ + { + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "null_resource", + Name: "baz", + Index: -1, + }, + root, + &configs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "baz", + }, + true, + }, + { + &ResourceAddress{ + Path: []string{"child"}, + Mode: ManagedResourceMode, + Type: "null_resource", + Name: "baz", + Index: -1, + }, + child, + &configs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "baz", + }, + true, + }, + { + &ResourceAddress{ + Path: []string{"child", "grandchild"}, + Mode: ManagedResourceMode, + Type: "null_resource", + Name: "baz", + Index: -1, + }, + grandchild, + &configs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "baz", + }, + true, + }, + { + &ResourceAddress{ + Path: []string{"child"}, + Index: -1, + }, + child, + &configs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "baz", + }, + true, + }, + { + &ResourceAddress{ + Path: []string{"child", "grandchild"}, + Index: -1, + }, + grandchild, + &configs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "baz", + }, + true, + }, + { + &ResourceAddress{ + Mode: DataResourceMode, + Type: "null_resource", + Name: "baz", + Index: -1, + }, + irrelevant, + &configs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "baz", + }, + false, + }, + { + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "null_resource", + Name: "baz", + Index: -1, + }, + irrelevant, + &configs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "pizza", + }, + false, + }, + { + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "null_resource", + Name: "baz", + Index: -1, + }, + irrelevant, + &configs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "baz", + }, + false, + }, + { + &ResourceAddress{ + Path: []string{"child", "grandchild"}, + Mode: ManagedResourceMode, + Type: "null_resource", + Name: "baz", + Index: -1, + }, + child, + &configs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "baz", + }, + false, + }, + { + &ResourceAddress{ + Path: []string{"child"}, + Mode: ManagedResourceMode, + Type: "null_resource", + Name: "baz", + Index: -1, + }, + grandchild, + &configs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "baz", + }, + false, + }, + } + + for i, test := range tests { + t.Run(fmt.Sprintf("%02d-%s", i, test.Addr), func(t *testing.T) { + got := test.Addr.MatchesResourceConfig(test.ModulePath, test.Resource) + if got != test.Want { + t.Errorf( + "wrong result\naddr: %s\nmod: %#v\nrsrc: %#v\ngot: %#v\nwant: %#v", + test.Addr, test.ModulePath, test.Resource, got, test.Want, + ) + } + }) + } +} + +func TestResourceAddressLess(t *testing.T) { + tests := []struct { + A string + B string + Want bool + }{ + { + "foo.bar", + "module.baz.foo.bar", + true, + }, + { + "module.baz.foo.bar", + "zzz.bar", // would sort after "module" in lexicographical sort + false, + }, + { + "module.baz.foo.bar", + "module.baz.foo.bar", + false, + }, + { + "module.baz.foo.bar", + "module.boz.foo.bar", + true, + }, + { + "module.boz.foo.bar", + "module.baz.foo.bar", + false, + }, + { + "a.b", + "b.c", + true, + }, + { + "a.b", + "a.c", + true, + }, + { + "c.b", + "b.c", + false, + }, + { + "a.b[9]", + "a.b[10]", + true, + }, + { + "b.b[9]", + "a.b[10]", + false, + }, + { + "a.b", + "a.b.deposed", + true, + }, + { + "a.b.tainted", + "a.b.deposed", + true, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%s < %s", test.A, test.B), func(t *testing.T) { + addrA, err := ParseResourceAddress(test.A) + if err != nil { + t.Fatal(err) + } + addrB, err := ParseResourceAddress(test.B) + if err != nil { + t.Fatal(err) + } + got := addrA.Less(addrB) + invGot := addrB.Less(addrA) + if got != test.Want { + t.Errorf( + "wrong result\ntest: %s < %s\ngot: %#v\nwant: %#v", + test.A, test.B, got, test.Want, + ) + } + if test.A != test.B { // inverse test doesn't apply when equal + if invGot != !test.Want { + t.Errorf( + "wrong inverse result\ntest: %s < %s\ngot: %#v\nwant: %#v", + test.B, test.A, invGot, !test.Want, + ) + } + } else { + if invGot != test.Want { + t.Errorf( + "wrong inverse result\ntest: %s < %s\ngot: %#v\nwant: %#v", + test.B, test.A, invGot, test.Want, + ) + } + } + }) + } +} diff --git a/pkg/legacy/tofu/resource_mode.go b/pkg/legacy/tofu/resource_mode.go new file mode 100644 index 00000000000..a366f56c807 --- /dev/null +++ b/pkg/legacy/tofu/resource_mode.go @@ -0,0 +1,17 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +//go:generate go run golang.org/x/tools/cmd/stringer -type=ResourceMode -output=resource_mode_string.go resource_mode.go + +// ResourceMode is deprecated, use addrs.ResourceMode instead. +// It has been preserved for backwards compatibility. +type ResourceMode int + +const ( + ManagedResourceMode ResourceMode = iota + DataResourceMode +) diff --git a/pkg/legacy/tofu/resource_mode_string.go b/pkg/legacy/tofu/resource_mode_string.go new file mode 100644 index 00000000000..c778e34ee2e --- /dev/null +++ b/pkg/legacy/tofu/resource_mode_string.go @@ -0,0 +1,24 @@ +// Code generated by "stringer -type=ResourceMode -output=resource_mode_string.go resource_mode.go"; DO NOT EDIT. + +package tofu + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ManagedResourceMode-0] + _ = x[DataResourceMode-1] +} + +const _ResourceMode_name = "ManagedResourceModeDataResourceMode" + +var _ResourceMode_index = [...]uint8{0, 19, 35} + +func (i ResourceMode) String() string { + if i < 0 || i >= ResourceMode(len(_ResourceMode_index)-1) { + return "ResourceMode(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _ResourceMode_name[_ResourceMode_index[i]:_ResourceMode_index[i+1]] +} diff --git a/pkg/legacy/tofu/resource_provider.go b/pkg/legacy/tofu/resource_provider.go new file mode 100644 index 00000000000..2628fbfa0fb --- /dev/null +++ b/pkg/legacy/tofu/resource_provider.go @@ -0,0 +1,241 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +// ResourceProvider is a legacy interface for providers. +// +// This is retained only for compatibility with legacy code. The current +// interface for providers is providers.Interface, in the sibling directory +// named "providers". +type ResourceProvider interface { + /********************************************************************* + * Functions related to the provider + *********************************************************************/ + + // ProviderSchema returns the config schema for the main provider + // configuration, as would appear in a "provider" block in the + // configuration files. + // + // Currently not all providers support schema. Callers must therefore + // first call Resources and DataSources and ensure that at least one + // resource or data source has the SchemaAvailable flag set. + GetSchema(*ProviderSchemaRequest) (*ProviderSchema, error) + + // Input was used prior to v0.12 to ask the provider to prompt the user + // for input to complete the configuration. + // + // From v0.12 onwards this method is never called because OpenTofu Core + // is able to handle the necessary input logic itself based on the + // schema returned from GetSchema. + Input(UIInput, *ResourceConfig) (*ResourceConfig, error) + + // Validate is called once at the beginning with the raw configuration + // (no interpolation done) and can return a list of warnings and/or + // errors. + // + // This is called once with the provider configuration only. It may not + // be called at all if no provider configuration is given. + // + // This should not assume that any values of the configurations are valid. + // The primary use case of this call is to check that required keys are + // set. + Validate(*ResourceConfig) ([]string, []error) + + // Configure configures the provider itself with the configuration + // given. This is useful for setting things like access keys. + // + // This won't be called at all if no provider configuration is given. + // + // Configure returns an error if it occurred. + Configure(*ResourceConfig) error + + // Resources returns all the available resource types that this provider + // knows how to manage. + Resources() []ResourceType + + // Stop is called when the provider should halt any in-flight actions. + // + // This can be used to make a nicer Ctrl-C experience for OpenTofu. + // Even if this isn't implemented to do anything (just returns nil), + // OpenTofu will still cleanly stop after the currently executing + // graph node is complete. However, this API can be used to make more + // efficient halts. + // + // Stop doesn't have to and shouldn't block waiting for in-flight actions + // to complete. It should take any action it wants and return immediately + // acknowledging it has received the stop request. OpenTofu core will + // automatically not make any further API calls to the provider soon + // after Stop is called (technically exactly once the currently executing + // graph nodes are complete). + // + // The error returned, if non-nil, is assumed to mean that signaling the + // stop somehow failed and that the user should expect potentially waiting + // a longer period of time. + Stop() error + + /********************************************************************* + * Functions related to individual resources + *********************************************************************/ + + // ValidateResource is called once at the beginning with the raw + // configuration (no interpolation done) and can return a list of warnings + // and/or errors. + // + // This is called once per resource. + // + // This should not assume any of the values in the resource configuration + // are valid since it is possible they have to be interpolated still. + // The primary use case of this call is to check that the required keys + // are set and that the general structure is correct. + ValidateResource(string, *ResourceConfig) ([]string, []error) + + // Apply applies a diff to a specific resource and returns the new + // resource state along with an error. + // + // If the resource state given has an empty ID, then a new resource + // is expected to be created. + Apply( + *InstanceInfo, + *InstanceState, + *InstanceDiff) (*InstanceState, error) + + // Diff diffs a resource versus a desired state and returns + // a diff. + Diff( + *InstanceInfo, + *InstanceState, + *ResourceConfig) (*InstanceDiff, error) + + // Refresh refreshes a resource and updates all of its attributes + // with the latest information. + Refresh(*InstanceInfo, *InstanceState) (*InstanceState, error) + + /********************************************************************* + * Functions related to importing + *********************************************************************/ + + // ImportState requests that the given resource be imported. + // + // The returned InstanceState only requires ID be set. Importing + // will always call Refresh after the state to complete it. + // + // IMPORTANT: InstanceState doesn't have the resource type attached + // to it. A type must be specified on the state via the Ephemeral + // field on the state. + // + // This function can return multiple states. Normally, an import + // will map 1:1 to a physical resource. However, some resources map + // to multiple. For example, an AWS security group may contain many rules. + // Each rule is represented by a separate resource in OpenTofu, + // therefore multiple states are returned. + ImportState(*InstanceInfo, string) ([]*InstanceState, error) + + /********************************************************************* + * Functions related to data resources + *********************************************************************/ + + // ValidateDataSource is called once at the beginning with the raw + // configuration (no interpolation done) and can return a list of warnings + // and/or errors. + // + // This is called once per data source instance. + // + // This should not assume any of the values in the resource configuration + // are valid since it is possible they have to be interpolated still. + // The primary use case of this call is to check that the required keys + // are set and that the general structure is correct. + ValidateDataSource(string, *ResourceConfig) ([]string, []error) + + // DataSources returns all of the available data sources that this + // provider implements. + DataSources() []DataSource + + // ReadDataDiff produces a diff that represents the state that will + // be produced when the given data source is read using a later call + // to ReadDataApply. + ReadDataDiff(*InstanceInfo, *ResourceConfig) (*InstanceDiff, error) + + // ReadDataApply initializes a data instance using the configuration + // in a diff produced by ReadDataDiff. + ReadDataApply(*InstanceInfo, *InstanceDiff) (*InstanceState, error) +} + +// ResourceProviderCloser is an interface that providers that can close +// connections that aren't needed anymore must implement. +type ResourceProviderCloser interface { + Close() error +} + +// ResourceType is a type of resource that a resource provider can manage. +type ResourceType struct { + Name string // Name of the resource, example "instance" (no provider prefix) + Importable bool // Whether this resource supports importing + + // SchemaAvailable is set if the provider supports the ProviderSchema, + // ResourceTypeSchema and DataSourceSchema methods. Although it is + // included on each resource type, it's actually a provider-wide setting + // that's smuggled here only because that avoids a breaking change to + // the plugin protocol. + SchemaAvailable bool +} + +// DataSource is a data source that a resource provider implements. +type DataSource struct { + Name string + + // SchemaAvailable is set if the provider supports the ProviderSchema, + // ResourceTypeSchema and DataSourceSchema methods. Although it is + // included on each resource type, it's actually a provider-wide setting + // that's smuggled here only because that avoids a breaking change to + // the plugin protocol. + SchemaAvailable bool +} + +// ResourceProviderFactory is a function type that creates a new instance +// of a resource provider. +type ResourceProviderFactory func() (ResourceProvider, error) + +// ResourceProviderFactoryFixed is a helper that creates a +// ResourceProviderFactory that just returns some fixed provider. +func ResourceProviderFactoryFixed(p ResourceProvider) ResourceProviderFactory { + return func() (ResourceProvider, error) { + return p, nil + } +} + +func ProviderHasResource(p ResourceProvider, n string) bool { + for _, rt := range p.Resources() { + if rt.Name == n { + return true + } + } + + return false +} + +func ProviderHasDataSource(p ResourceProvider, n string) bool { + for _, rt := range p.DataSources() { + if rt.Name == n { + return true + } + } + + return false +} + +const errPluginInit = ` +Plugin reinitialization required. Please run "tofu init". + +Plugins are external binaries that OpenTofu uses to access and manipulate +resources. The configuration provided requires plugins which can't be located, +don't satisfy the version constraints, or are otherwise incompatible. + +OpenTofu automatically discovers provider requirements from your +configuration, including providers used in child modules. To see the +requirements and constraints, run "tofu providers". + +%s +` diff --git a/pkg/legacy/tofu/resource_provider_mock.go b/pkg/legacy/tofu/resource_provider_mock.go new file mode 100644 index 00000000000..1cca519559d --- /dev/null +++ b/pkg/legacy/tofu/resource_provider_mock.go @@ -0,0 +1,320 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "sync" +) + +// MockResourceProvider implements ResourceProvider but mocks out all the +// calls for testing purposes. +type MockResourceProvider struct { + sync.Mutex + + // Anything you want, in case you need to store extra data with the mock. + Meta interface{} + + CloseCalled bool + CloseError error + GetSchemaCalled bool + GetSchemaRequest *ProviderSchemaRequest + GetSchemaReturn *ProviderSchema + GetSchemaReturnError error + InputCalled bool + InputInput UIInput + InputConfig *ResourceConfig + InputReturnConfig *ResourceConfig + InputReturnError error + InputFn func(UIInput, *ResourceConfig) (*ResourceConfig, error) + ApplyCalled bool + ApplyInfo *InstanceInfo + ApplyState *InstanceState + ApplyDiff *InstanceDiff + ApplyFn func(*InstanceInfo, *InstanceState, *InstanceDiff) (*InstanceState, error) + ApplyReturn *InstanceState + ApplyReturnError error + ConfigureCalled bool + ConfigureConfig *ResourceConfig + ConfigureProviderFn func(*ResourceConfig) error + ConfigureReturnError error + DiffCalled bool + DiffInfo *InstanceInfo + DiffState *InstanceState + DiffDesired *ResourceConfig + DiffFn func(*InstanceInfo, *InstanceState, *ResourceConfig) (*InstanceDiff, error) + DiffReturn *InstanceDiff + DiffReturnError error + RefreshCalled bool + RefreshInfo *InstanceInfo + RefreshState *InstanceState + RefreshFn func(*InstanceInfo, *InstanceState) (*InstanceState, error) + RefreshReturn *InstanceState + RefreshReturnError error + ResourcesCalled bool + ResourcesReturn []ResourceType + ReadDataApplyCalled bool + ReadDataApplyInfo *InstanceInfo + ReadDataApplyDiff *InstanceDiff + ReadDataApplyFn func(*InstanceInfo, *InstanceDiff) (*InstanceState, error) + ReadDataApplyReturn *InstanceState + ReadDataApplyReturnError error + ReadDataDiffCalled bool + ReadDataDiffInfo *InstanceInfo + ReadDataDiffDesired *ResourceConfig + ReadDataDiffFn func(*InstanceInfo, *ResourceConfig) (*InstanceDiff, error) + ReadDataDiffReturn *InstanceDiff + ReadDataDiffReturnError error + StopCalled bool + StopFn func() error + StopReturnError error + DataSourcesCalled bool + DataSourcesReturn []DataSource + ValidateCalled bool + ValidateConfig *ResourceConfig + ValidateFn func(*ResourceConfig) ([]string, []error) + ValidateReturnWarns []string + ValidateReturnErrors []error + ValidateResourceFn func(string, *ResourceConfig) ([]string, []error) + ValidateResourceCalled bool + ValidateResourceType string + ValidateResourceConfig *ResourceConfig + ValidateResourceReturnWarns []string + ValidateResourceReturnErrors []error + ValidateDataSourceFn func(string, *ResourceConfig) ([]string, []error) + ValidateDataSourceCalled bool + ValidateDataSourceType string + ValidateDataSourceConfig *ResourceConfig + ValidateDataSourceReturnWarns []string + ValidateDataSourceReturnErrors []error + + ImportStateCalled bool + ImportStateInfo *InstanceInfo + ImportStateID string + ImportStateReturn []*InstanceState + ImportStateReturnError error + ImportStateFn func(*InstanceInfo, string) ([]*InstanceState, error) +} + +func (p *MockResourceProvider) Close() error { + p.CloseCalled = true + return p.CloseError +} + +func (p *MockResourceProvider) GetSchema(req *ProviderSchemaRequest) (*ProviderSchema, error) { + p.Lock() + defer p.Unlock() + + p.GetSchemaCalled = true + p.GetSchemaRequest = req + return p.GetSchemaReturn, p.GetSchemaReturnError +} + +func (p *MockResourceProvider) Input( + input UIInput, c *ResourceConfig) (*ResourceConfig, error) { + p.Lock() + defer p.Unlock() + p.InputCalled = true + p.InputInput = input + p.InputConfig = c + if p.InputFn != nil { + return p.InputFn(input, c) + } + return p.InputReturnConfig, p.InputReturnError +} + +func (p *MockResourceProvider) Validate(c *ResourceConfig) ([]string, []error) { + p.Lock() + defer p.Unlock() + + p.ValidateCalled = true + p.ValidateConfig = c + if p.ValidateFn != nil { + return p.ValidateFn(c) + } + return p.ValidateReturnWarns, p.ValidateReturnErrors +} + +func (p *MockResourceProvider) ValidateResource(t string, c *ResourceConfig) ([]string, []error) { + p.Lock() + defer p.Unlock() + + p.ValidateResourceCalled = true + p.ValidateResourceType = t + p.ValidateResourceConfig = c + + if p.ValidateResourceFn != nil { + return p.ValidateResourceFn(t, c) + } + + return p.ValidateResourceReturnWarns, p.ValidateResourceReturnErrors +} + +func (p *MockResourceProvider) Configure(c *ResourceConfig) error { + p.Lock() + defer p.Unlock() + + p.ConfigureCalled = true + p.ConfigureConfig = c + + if p.ConfigureProviderFn != nil { + return p.ConfigureProviderFn(c) + } + + return p.ConfigureReturnError +} + +func (p *MockResourceProvider) Stop() error { + p.Lock() + defer p.Unlock() + + p.StopCalled = true + if p.StopFn != nil { + return p.StopFn() + } + + return p.StopReturnError +} + +func (p *MockResourceProvider) Apply( + info *InstanceInfo, + state *InstanceState, + diff *InstanceDiff) (*InstanceState, error) { + // We only lock while writing data. Reading is fine + p.Lock() + p.ApplyCalled = true + p.ApplyInfo = info + p.ApplyState = state + p.ApplyDiff = diff + p.Unlock() + + if p.ApplyFn != nil { + return p.ApplyFn(info, state, diff) + } + + return p.ApplyReturn.DeepCopy(), p.ApplyReturnError +} + +func (p *MockResourceProvider) Diff( + info *InstanceInfo, + state *InstanceState, + desired *ResourceConfig) (*InstanceDiff, error) { + p.Lock() + defer p.Unlock() + + p.DiffCalled = true + p.DiffInfo = info + p.DiffState = state + p.DiffDesired = desired + + if p.DiffFn != nil { + return p.DiffFn(info, state, desired) + } + + return p.DiffReturn.DeepCopy(), p.DiffReturnError +} + +func (p *MockResourceProvider) Refresh( + info *InstanceInfo, + s *InstanceState) (*InstanceState, error) { + p.Lock() + defer p.Unlock() + + p.RefreshCalled = true + p.RefreshInfo = info + p.RefreshState = s + + if p.RefreshFn != nil { + return p.RefreshFn(info, s) + } + + return p.RefreshReturn.DeepCopy(), p.RefreshReturnError +} + +func (p *MockResourceProvider) Resources() []ResourceType { + p.Lock() + defer p.Unlock() + + p.ResourcesCalled = true + return p.ResourcesReturn +} + +func (p *MockResourceProvider) ImportState(info *InstanceInfo, id string) ([]*InstanceState, error) { + p.Lock() + defer p.Unlock() + + p.ImportStateCalled = true + p.ImportStateInfo = info + p.ImportStateID = id + if p.ImportStateFn != nil { + return p.ImportStateFn(info, id) + } + + var result []*InstanceState + if p.ImportStateReturn != nil { + result = make([]*InstanceState, len(p.ImportStateReturn)) + for i, v := range p.ImportStateReturn { + result[i] = v.DeepCopy() + } + } + + return result, p.ImportStateReturnError +} + +func (p *MockResourceProvider) ValidateDataSource(t string, c *ResourceConfig) ([]string, []error) { + p.Lock() + defer p.Unlock() + + p.ValidateDataSourceCalled = true + p.ValidateDataSourceType = t + p.ValidateDataSourceConfig = c + + if p.ValidateDataSourceFn != nil { + return p.ValidateDataSourceFn(t, c) + } + + return p.ValidateDataSourceReturnWarns, p.ValidateDataSourceReturnErrors +} + +func (p *MockResourceProvider) ReadDataDiff( + info *InstanceInfo, + desired *ResourceConfig) (*InstanceDiff, error) { + p.Lock() + defer p.Unlock() + + p.ReadDataDiffCalled = true + p.ReadDataDiffInfo = info + p.ReadDataDiffDesired = desired + if p.ReadDataDiffFn != nil { + return p.ReadDataDiffFn(info, desired) + } + + return p.ReadDataDiffReturn.DeepCopy(), p.ReadDataDiffReturnError +} + +func (p *MockResourceProvider) ReadDataApply( + info *InstanceInfo, + d *InstanceDiff) (*InstanceState, error) { + p.Lock() + defer p.Unlock() + + p.ReadDataApplyCalled = true + p.ReadDataApplyInfo = info + p.ReadDataApplyDiff = d + + if p.ReadDataApplyFn != nil { + return p.ReadDataApplyFn(info, d) + } + + return p.ReadDataApplyReturn.DeepCopy(), p.ReadDataApplyReturnError +} + +func (p *MockResourceProvider) DataSources() []DataSource { + p.Lock() + defer p.Unlock() + + p.DataSourcesCalled = true + return p.DataSourcesReturn +} diff --git a/pkg/legacy/tofu/resource_provisioner.go b/pkg/legacy/tofu/resource_provisioner.go new file mode 100644 index 00000000000..becd9f17277 --- /dev/null +++ b/pkg/legacy/tofu/resource_provisioner.go @@ -0,0 +1,74 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/provisioners" +) + +// ResourceProvisioner is an interface that must be implemented by any +// resource provisioner: the thing that initializes resources in +// a OpenTofu configuration. +type ResourceProvisioner interface { + // GetConfigSchema returns the schema for the provisioner type's main + // configuration block. This is called prior to Validate to enable some + // basic structural validation to be performed automatically and to allow + // the configuration to be properly extracted from potentially-ambiguous + // configuration file formats. + GetConfigSchema() (*configschema.Block, error) + + // Validate is called once at the beginning with the raw + // configuration (no interpolation done) and can return a list of warnings + // and/or errors. + // + // This is called once per resource. + // + // This should not assume any of the values in the resource configuration + // are valid since it is possible they have to be interpolated still. + // The primary use case of this call is to check that the required keys + // are set and that the general structure is correct. + Validate(*ResourceConfig) ([]string, []error) + + // Apply runs the provisioner on a specific resource and returns an error. + // Instead of a diff, the ResourceConfig is provided since provisioners + // only run after a resource has been newly created. + Apply(UIOutput, *InstanceState, *ResourceConfig) error + + // Stop is called when the provisioner should halt any in-flight actions. + // + // This can be used to make a nicer Ctrl-C experience for OpenTofu. + // Even if this isn't implemented to do anything (just returns nil), + // OpenTofu will still cleanly stop after the currently executing + // graph node is complete. However, this API can be used to make more + // efficient halts. + // + // Stop doesn't have to and shouldn't block waiting for in-flight actions + // to complete. It should take any action it wants and return immediately + // acknowledging it has received the stop request. OpenTofu core will + // automatically not make any further API calls to the provider soon + // after Stop is called (technically exactly once the currently executing + // graph nodes are complete). + // + // The error returned, if non-nil, is assumed to mean that signaling the + // stop somehow failed and that the user should expect potentially waiting + // a longer period of time. + Stop() error +} + +// ResourceProvisionerCloser is an interface that provisioners that can close +// connections that aren't needed anymore must implement. +type ResourceProvisionerCloser interface { + Close() error +} + +// ResourceProvisionerFactory is a function type that creates a new instance +// of a resource provisioner. +type ResourceProvisionerFactory func() (ResourceProvisioner, error) + +// ProvisionerFactory is a function type that creates a new instance +// of a provisioners.Interface. +type ProvisionerFactory = provisioners.Factory diff --git a/pkg/legacy/tofu/resource_provisioner_mock.go b/pkg/legacy/tofu/resource_provisioner_mock.go new file mode 100644 index 00000000000..a16d6160185 --- /dev/null +++ b/pkg/legacy/tofu/resource_provisioner_mock.go @@ -0,0 +1,92 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "sync" + + "github.com/kubegems/opentofu/pkg/configs/configschema" +) + +// MockResourceProvisioner implements ResourceProvisioner but mocks out all the +// calls for testing purposes. +type MockResourceProvisioner struct { + sync.Mutex + // Anything you want, in case you need to store extra data with the mock. + Meta interface{} + + GetConfigSchemaCalled bool + GetConfigSchemaReturnSchema *configschema.Block + GetConfigSchemaReturnError error + + ApplyCalled bool + ApplyOutput UIOutput + ApplyState *InstanceState + ApplyConfig *ResourceConfig + ApplyFn func(*InstanceState, *ResourceConfig) error + ApplyReturnError error + + ValidateCalled bool + ValidateConfig *ResourceConfig + ValidateFn func(c *ResourceConfig) ([]string, []error) + ValidateReturnWarns []string + ValidateReturnErrors []error + + StopCalled bool + StopFn func() error + StopReturnError error +} + +var _ ResourceProvisioner = (*MockResourceProvisioner)(nil) + +func (p *MockResourceProvisioner) GetConfigSchema() (*configschema.Block, error) { + p.GetConfigSchemaCalled = true + return p.GetConfigSchemaReturnSchema, p.GetConfigSchemaReturnError +} + +func (p *MockResourceProvisioner) Validate(c *ResourceConfig) ([]string, []error) { + p.Lock() + defer p.Unlock() + + p.ValidateCalled = true + p.ValidateConfig = c + if p.ValidateFn != nil { + return p.ValidateFn(c) + } + return p.ValidateReturnWarns, p.ValidateReturnErrors +} + +func (p *MockResourceProvisioner) Apply( + output UIOutput, + state *InstanceState, + c *ResourceConfig) error { + p.Lock() + + p.ApplyCalled = true + p.ApplyOutput = output + p.ApplyState = state + p.ApplyConfig = c + if p.ApplyFn != nil { + fn := p.ApplyFn + p.Unlock() + return fn(state, c) + } + + defer p.Unlock() + return p.ApplyReturnError +} + +func (p *MockResourceProvisioner) Stop() error { + p.Lock() + defer p.Unlock() + + p.StopCalled = true + if p.StopFn != nil { + return p.StopFn() + } + + return p.StopReturnError +} diff --git a/pkg/legacy/tofu/resource_test.go b/pkg/legacy/tofu/resource_test.go new file mode 100644 index 00000000000..08ee2625133 --- /dev/null +++ b/pkg/legacy/tofu/resource_test.go @@ -0,0 +1,679 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "reflect" + "testing" + + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/configs/hcl2shim" + "github.com/mitchellh/reflectwalk" +) + +func TestResourceConfigGet(t *testing.T) { + fooStringSchema := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + } + fooListSchema := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.List(cty.Number), Optional: true}, + }, + } + + cases := []struct { + Config cty.Value + Schema *configschema.Block + Key string + Value interface{} + }{ + { + Config: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }), + Schema: fooStringSchema, + Key: "foo", + Value: "bar", + }, + + { + Config: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.UnknownVal(cty.String), + }), + Schema: fooStringSchema, + Key: "foo", + Value: hcl2shim.UnknownVariableValue, + }, + + { + Config: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.NumberIntVal(1), + cty.NumberIntVal(2), + cty.NumberIntVal(5), + }), + }), + Schema: fooListSchema, + Key: "foo.0", + Value: 1, + }, + + { + Config: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.NumberIntVal(1), + cty.NumberIntVal(2), + cty.NumberIntVal(5), + }), + }), + Schema: fooListSchema, + Key: "foo.5", + Value: nil, + }, + + { + Config: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.NumberIntVal(1), + cty.NumberIntVal(2), + cty.NumberIntVal(5), + }), + }), + Schema: fooListSchema, + Key: "foo.-1", + Value: nil, + }, + + // get from map + { + Config: cty.ObjectVal(map[string]cty.Value{ + "mapname": cty.ListVal([]cty.Value{ + cty.MapVal(map[string]cty.Value{ + "key": cty.NumberIntVal(1), + }), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "mapname": {Type: cty.List(cty.Map(cty.Number)), Optional: true}, + }, + }, + Key: "mapname.0.key", + Value: 1, + }, + + // get from map with dot in key + { + Config: cty.ObjectVal(map[string]cty.Value{ + "mapname": cty.ListVal([]cty.Value{ + cty.MapVal(map[string]cty.Value{ + "key.name": cty.NumberIntVal(1), + }), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "mapname": {Type: cty.List(cty.Map(cty.Number)), Optional: true}, + }, + }, + Key: "mapname.0.key.name", + Value: 1, + }, + + // get from map with overlapping key names + { + Config: cty.ObjectVal(map[string]cty.Value{ + "mapname": cty.ListVal([]cty.Value{ + cty.MapVal(map[string]cty.Value{ + "key.name": cty.NumberIntVal(1), + "key.name.2": cty.NumberIntVal(2), + }), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "mapname": {Type: cty.List(cty.Map(cty.Number)), Optional: true}, + }, + }, + Key: "mapname.0.key.name.2", + Value: 2, + }, + { + Config: cty.ObjectVal(map[string]cty.Value{ + "mapname": cty.ListVal([]cty.Value{ + cty.MapVal(map[string]cty.Value{ + "key.name": cty.NumberIntVal(1), + "key.name.foo": cty.NumberIntVal(2), + }), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "mapname": {Type: cty.List(cty.Map(cty.Number)), Optional: true}, + }, + }, + Key: "mapname.0.key.name", + Value: 1, + }, + { + Config: cty.ObjectVal(map[string]cty.Value{ + "mapname": cty.ListVal([]cty.Value{ + cty.MapVal(map[string]cty.Value{ + "listkey": cty.ListVal([]cty.Value{ + cty.MapVal(map[string]cty.Value{ + "key": cty.NumberIntVal(3), + }), + }), + }), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "mapname": {Type: cty.List(cty.Map(cty.List(cty.Map(cty.Number)))), Optional: true}, + }, + }, + Key: "mapname.0.listkey.0.key", + Value: 3, + }, + } + + for i, tc := range cases { + rc := NewResourceConfigShimmed(tc.Config, tc.Schema) + + // Test getting a key + t.Run(fmt.Sprintf("get-%d", i), func(t *testing.T) { + v, ok := rc.Get(tc.Key) + if ok && v == nil { + t.Fatal("(nil, true) returned from Get") + } + + if !reflect.DeepEqual(v, tc.Value) { + t.Fatalf("%d bad: %#v", i, v) + } + }) + + // Test copying and equality + t.Run(fmt.Sprintf("copy-and-equal-%d", i), func(t *testing.T) { + copy := rc.DeepCopy() + if !reflect.DeepEqual(copy, rc) { + t.Fatalf("bad:\n\n%#v\n\n%#v", copy, rc) + } + + if !copy.Equal(rc) { + t.Fatalf("copy != rc:\n\n%#v\n\n%#v", copy, rc) + } + if !rc.Equal(copy) { + t.Fatalf("rc != copy:\n\n%#v\n\n%#v", copy, rc) + } + }) + } +} + +func TestResourceConfigDeepCopy_nil(t *testing.T) { + var nilRc *ResourceConfig + actual := nilRc.DeepCopy() + if actual != nil { + t.Fatalf("bad: %#v", actual) + } +} + +func TestResourceConfigDeepCopy_nilComputed(t *testing.T) { + rc := &ResourceConfig{} + actual := rc.DeepCopy() + if actual.ComputedKeys != nil { + t.Fatalf("bad: %#v", actual) + } +} + +func TestResourceConfigEqual_nil(t *testing.T) { + var nilRc *ResourceConfig + notNil := NewResourceConfigShimmed(cty.EmptyObjectVal, &configschema.Block{}) + + if nilRc.Equal(notNil) { + t.Fatal("should not be equal") + } + + if notNil.Equal(nilRc) { + t.Fatal("should not be equal") + } +} + +func TestResourceConfigEqual_computedKeyOrder(t *testing.T) { + v := cty.ObjectVal(map[string]cty.Value{ + "foo": cty.UnknownVal(cty.String), + }) + schema := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + } + rc := NewResourceConfigShimmed(v, schema) + rc2 := NewResourceConfigShimmed(v, schema) + + // Set the computed keys manually to force ordering to differ + rc.ComputedKeys = []string{"foo", "bar"} + rc2.ComputedKeys = []string{"bar", "foo"} + + if !rc.Equal(rc2) { + t.Fatal("should be equal") + } +} + +func TestUnknownCheckWalker(t *testing.T) { + cases := []struct { + Name string + Input interface{} + Result bool + }{ + { + "primitive", + 42, + false, + }, + + { + "primitive computed", + hcl2shim.UnknownVariableValue, + true, + }, + + { + "list", + []interface{}{"foo", hcl2shim.UnknownVariableValue}, + true, + }, + + { + "nested list", + []interface{}{ + "foo", + []interface{}{hcl2shim.UnknownVariableValue}, + }, + true, + }, + } + + for i, tc := range cases { + t.Run(fmt.Sprintf("%d-%s", i, tc.Name), func(t *testing.T) { + var w unknownCheckWalker + if err := reflectwalk.Walk(tc.Input, &w); err != nil { + t.Fatalf("err: %s", err) + } + + if w.Unknown != tc.Result { + t.Fatalf("bad: %v", w.Unknown) + } + }) + } +} + +func TestNewResourceConfigShimmed(t *testing.T) { + for _, tc := range []struct { + Name string + Val cty.Value + Schema *configschema.Block + Expected *ResourceConfig + }{ + { + Name: "empty object", + Val: cty.NullVal(cty.EmptyObject), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.String, + Optional: true, + }, + }, + }, + Expected: &ResourceConfig{ + Raw: map[string]interface{}{}, + Config: map[string]interface{}{}, + }, + }, + { + Name: "basic", + Val: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.String, + Optional: true, + }, + }, + }, + Expected: &ResourceConfig{ + Raw: map[string]interface{}{ + "foo": "bar", + }, + Config: map[string]interface{}{ + "foo": "bar", + }, + }, + }, + { + Name: "null string", + Val: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.NullVal(cty.String), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.String, + Optional: true, + }, + }, + }, + Expected: &ResourceConfig{ + Raw: map[string]interface{}{}, + Config: map[string]interface{}{}, + }, + }, + { + Name: "unknown string", + Val: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.UnknownVal(cty.String), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.String, + Optional: true, + }, + }, + }, + Expected: &ResourceConfig{ + ComputedKeys: []string{"foo"}, + Raw: map[string]interface{}{ + "foo": hcl2shim.UnknownVariableValue, + }, + Config: map[string]interface{}{ + "foo": hcl2shim.UnknownVariableValue, + }, + }, + }, + { + Name: "unknown collections", + Val: cty.ObjectVal(map[string]cty.Value{ + "bar": cty.UnknownVal(cty.Map(cty.String)), + "baz": cty.UnknownVal(cty.List(cty.String)), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "bar": { + Type: cty.Map(cty.String), + Required: true, + }, + "baz": { + Type: cty.List(cty.String), + Optional: true, + }, + }, + }, + Expected: &ResourceConfig{ + ComputedKeys: []string{"bar", "baz"}, + Raw: map[string]interface{}{ + "bar": hcl2shim.UnknownVariableValue, + "baz": hcl2shim.UnknownVariableValue, + }, + Config: map[string]interface{}{ + "bar": hcl2shim.UnknownVariableValue, + "baz": hcl2shim.UnknownVariableValue, + }, + }, + }, + { + Name: "null collections", + Val: cty.ObjectVal(map[string]cty.Value{ + "bar": cty.NullVal(cty.Map(cty.String)), + "baz": cty.NullVal(cty.List(cty.String)), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "bar": { + Type: cty.Map(cty.String), + Required: true, + }, + "baz": { + Type: cty.List(cty.String), + Optional: true, + }, + }, + }, + Expected: &ResourceConfig{ + Raw: map[string]interface{}{}, + Config: map[string]interface{}{}, + }, + }, + { + Name: "unknown blocks", + Val: cty.ObjectVal(map[string]cty.Value{ + "bar": cty.UnknownVal(cty.Map(cty.String)), + "baz": cty.UnknownVal(cty.List(cty.String)), + }), + Schema: &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "bar": { + Block: configschema.Block{}, + Nesting: configschema.NestingList, + }, + "baz": { + Block: configschema.Block{}, + Nesting: configschema.NestingSet, + }, + }, + }, + Expected: &ResourceConfig{ + ComputedKeys: []string{"bar", "baz"}, + Raw: map[string]interface{}{ + "bar": hcl2shim.UnknownVariableValue, + "baz": hcl2shim.UnknownVariableValue, + }, + Config: map[string]interface{}{ + "bar": hcl2shim.UnknownVariableValue, + "baz": hcl2shim.UnknownVariableValue, + }, + }, + }, + { + Name: "unknown in nested blocks", + Val: cty.ObjectVal(map[string]cty.Value{ + "bar": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "baz": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "list": cty.UnknownVal(cty.List(cty.String)), + }), + }), + }), + }), + }), + Schema: &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "bar": { + Block: configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "baz": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "list": {Type: cty.List(cty.String), + Optional: true, + }, + }, + }, + Nesting: configschema.NestingList, + }, + }, + }, + Nesting: configschema.NestingList, + }, + }, + }, + Expected: &ResourceConfig{ + ComputedKeys: []string{"bar.0.baz.0.list"}, + Raw: map[string]interface{}{ + "bar": []interface{}{map[string]interface{}{ + "baz": []interface{}{map[string]interface{}{ + "list": "74D93920-ED26-11E3-AC10-0800200C9A66", + }}, + }}, + }, + Config: map[string]interface{}{ + "bar": []interface{}{map[string]interface{}{ + "baz": []interface{}{map[string]interface{}{ + "list": "74D93920-ED26-11E3-AC10-0800200C9A66", + }}, + }}, + }, + }, + }, + { + Name: "unknown in set", + Val: cty.ObjectVal(map[string]cty.Value{ + "bar": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "val": cty.UnknownVal(cty.String), + }), + }), + }), + Schema: &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "bar": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "val": { + Type: cty.String, + Optional: true, + }, + }, + }, + Nesting: configschema.NestingSet, + }, + }, + }, + Expected: &ResourceConfig{ + ComputedKeys: []string{"bar.0.val"}, + Raw: map[string]interface{}{ + "bar": []interface{}{map[string]interface{}{ + "val": "74D93920-ED26-11E3-AC10-0800200C9A66", + }}, + }, + Config: map[string]interface{}{ + "bar": []interface{}{map[string]interface{}{ + "val": "74D93920-ED26-11E3-AC10-0800200C9A66", + }}, + }, + }, + }, + { + Name: "unknown in attribute sets", + Val: cty.ObjectVal(map[string]cty.Value{ + "bar": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "val": cty.UnknownVal(cty.String), + }), + }), + "baz": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "obj": cty.UnknownVal(cty.Object(map[string]cty.Type{ + "attr": cty.List(cty.String), + })), + }), + cty.ObjectVal(map[string]cty.Value{ + "obj": cty.ObjectVal(map[string]cty.Value{ + "attr": cty.UnknownVal(cty.List(cty.String)), + }), + }), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "bar": &configschema.Attribute{ + Type: cty.Set(cty.Object(map[string]cty.Type{ + "val": cty.String, + })), + }, + "baz": &configschema.Attribute{ + Type: cty.Set(cty.Object(map[string]cty.Type{ + "obj": cty.Object(map[string]cty.Type{ + "attr": cty.List(cty.String), + }), + })), + }, + }, + }, + Expected: &ResourceConfig{ + ComputedKeys: []string{"bar.0.val", "baz.0.obj.attr", "baz.1.obj"}, + Raw: map[string]interface{}{ + "bar": []interface{}{map[string]interface{}{ + "val": "74D93920-ED26-11E3-AC10-0800200C9A66", + }}, + "baz": []interface{}{ + map[string]interface{}{ + "obj": map[string]interface{}{ + "attr": "74D93920-ED26-11E3-AC10-0800200C9A66", + }, + }, + map[string]interface{}{ + "obj": "74D93920-ED26-11E3-AC10-0800200C9A66", + }, + }, + }, + Config: map[string]interface{}{ + "bar": []interface{}{map[string]interface{}{ + "val": "74D93920-ED26-11E3-AC10-0800200C9A66", + }}, + "baz": []interface{}{ + map[string]interface{}{ + "obj": map[string]interface{}{ + "attr": "74D93920-ED26-11E3-AC10-0800200C9A66", + }, + }, + map[string]interface{}{ + "obj": "74D93920-ED26-11E3-AC10-0800200C9A66", + }, + }, + }, + }, + }, + { + Name: "null blocks", + Val: cty.ObjectVal(map[string]cty.Value{ + "bar": cty.NullVal(cty.Map(cty.String)), + "baz": cty.NullVal(cty.List(cty.String)), + }), + Schema: &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "bar": { + Block: configschema.Block{}, + Nesting: configschema.NestingMap, + }, + "baz": { + Block: configschema.Block{}, + Nesting: configschema.NestingSingle, + }, + }, + }, + Expected: &ResourceConfig{ + Raw: map[string]interface{}{}, + Config: map[string]interface{}{}, + }, + }, + } { + t.Run(tc.Name, func(*testing.T) { + cfg := NewResourceConfigShimmed(tc.Val, tc.Schema) + if !tc.Expected.Equal(cfg) { + t.Fatalf("expected:\n%#v\ngot:\n%#v", tc.Expected, cfg) + } + }) + } +} diff --git a/pkg/legacy/tofu/schemas.go b/pkg/legacy/tofu/schemas.go new file mode 100644 index 00000000000..4cccc3a5c97 --- /dev/null +++ b/pkg/legacy/tofu/schemas.go @@ -0,0 +1,290 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "log" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// Schemas is a container for various kinds of schema that OpenTofu needs +// during processing. +type Schemas struct { + Providers map[addrs.Provider]*ProviderSchema + Provisioners map[string]*configschema.Block +} + +// ProviderSchema returns the entire ProviderSchema object that was produced +// by the plugin for the given provider, or nil if no such schema is available. +// +// It's usually better to go use the more precise methods offered by type +// Schemas to handle this detail automatically. +func (ss *Schemas) ProviderSchema(provider addrs.Provider) *ProviderSchema { + if ss.Providers == nil { + return nil + } + return ss.Providers[provider] +} + +// ProviderConfig returns the schema for the provider configuration of the +// given provider type, or nil if no such schema is available. +func (ss *Schemas) ProviderConfig(provider addrs.Provider) *configschema.Block { + ps := ss.ProviderSchema(provider) + if ps == nil { + return nil + } + return ps.Provider +} + +// ResourceTypeConfig returns the schema for the configuration of a given +// resource type belonging to a given provider type, or nil of no such +// schema is available. +// +// In many cases the provider type is inferrable from the resource type name, +// but this is not always true because users can override the provider for +// a resource using the "provider" meta-argument. Therefore it's important to +// always pass the correct provider name, even though it many cases it feels +// redundant. +func (ss *Schemas) ResourceTypeConfig(provider addrs.Provider, resourceMode addrs.ResourceMode, resourceType string) (block *configschema.Block, schemaVersion uint64) { + ps := ss.ProviderSchema(provider) + if ps == nil || ps.ResourceTypes == nil { + return nil, 0 + } + return ps.SchemaForResourceType(resourceMode, resourceType) +} + +// ProvisionerConfig returns the schema for the configuration of a given +// provisioner, or nil of no such schema is available. +func (ss *Schemas) ProvisionerConfig(name string) *configschema.Block { + return ss.Provisioners[name] +} + +// LoadSchemas searches the given configuration, state and plan (any of which +// may be nil) for constructs that have an associated schema, requests the +// necessary schemas from the given component factory (which must _not_ be nil), +// and returns a single object representing all of the necessary schemas. +// +// If an error is returned, it may be a wrapped tfdiags.Diagnostics describing +// errors across multiple separate objects. Errors here will usually indicate +// either misbehavior on the part of one of the providers or of the provider +// protocol itself. When returned with errors, the returned schemas object is +// still valid but may be incomplete. +func LoadSchemas(config *configs.Config, state *states.State, components contextComponentFactory) (*Schemas, error) { + schemas := &Schemas{ + Providers: map[addrs.Provider]*ProviderSchema{}, + Provisioners: map[string]*configschema.Block{}, + } + var diags tfdiags.Diagnostics + + newDiags := loadProviderSchemas(schemas.Providers, config, state, components) + diags = diags.Append(newDiags) + newDiags = loadProvisionerSchemas(schemas.Provisioners, config, components) + diags = diags.Append(newDiags) + + return schemas, diags.Err() +} + +func loadProviderSchemas(schemas map[addrs.Provider]*ProviderSchema, config *configs.Config, state *states.State, components contextComponentFactory) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + ensure := func(fqn addrs.Provider) { + name := fqn.String() + + if _, exists := schemas[fqn]; exists { + return + } + + log.Printf("[TRACE] LoadSchemas: retrieving schema for provider type %q", name) + provider, err := components.ResourceProvider(fqn) + if err != nil { + // We'll put a stub in the map so we won't re-attempt this on + // future calls. + schemas[fqn] = &ProviderSchema{} + diags = diags.Append( + fmt.Errorf("Failed to instantiate provider %q to obtain schema: %w", name, err), + ) + return + } + defer func() { + provider.Close() + }() + + resp := provider.GetProviderSchema() + if resp.Diagnostics.HasErrors() { + // We'll put a stub in the map so we won't re-attempt this on + // future calls. + schemas[fqn] = &ProviderSchema{} + diags = diags.Append( + fmt.Errorf("Failed to retrieve schema from provider %q: %w", name, resp.Diagnostics.Err()), + ) + return + } + + s := &ProviderSchema{ + Provider: resp.Provider.Block, + ResourceTypes: make(map[string]*configschema.Block), + DataSources: make(map[string]*configschema.Block), + + ResourceTypeSchemaVersions: make(map[string]uint64), + } + + if resp.Provider.Version < 0 { + // We're not using the version numbers here yet, but we'll check + // for validity anyway in case we start using them in future. + diags = diags.Append( + fmt.Errorf("invalid negative schema version provider configuration for provider %q", name), + ) + } + + for t, r := range resp.ResourceTypes { + s.ResourceTypes[t] = r.Block + s.ResourceTypeSchemaVersions[t] = uint64(r.Version) + if r.Version < 0 { + diags = diags.Append( + fmt.Errorf("invalid negative schema version for resource type %s in provider %q", t, name), + ) + } + } + + for t, d := range resp.DataSources { + s.DataSources[t] = d.Block + if d.Version < 0 { + // We're not using the version numbers here yet, but we'll check + // for validity anyway in case we start using them in future. + diags = diags.Append( + fmt.Errorf("invalid negative schema version for data source %s in provider %q", t, name), + ) + } + } + + schemas[fqn] = s + + if resp.ProviderMeta.Block != nil { + s.ProviderMeta = resp.ProviderMeta.Block + } + } + + if config != nil { + for _, fqn := range config.ProviderTypes() { + ensure(fqn) + } + } + + if state != nil { + needed := providers.AddressedTypesAbs(state.ProviderAddrs()) + for _, typeAddr := range needed { + ensure(typeAddr) + } + } + + return diags +} + +func loadProvisionerSchemas(schemas map[string]*configschema.Block, config *configs.Config, components contextComponentFactory) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + ensure := func(name string) { + if _, exists := schemas[name]; exists { + return + } + + log.Printf("[TRACE] LoadSchemas: retrieving schema for provisioner %q", name) + provisioner, err := components.ResourceProvisioner(name) + if err != nil { + // We'll put a stub in the map so we won't re-attempt this on + // future calls. + schemas[name] = &configschema.Block{} + diags = diags.Append( + fmt.Errorf("Failed to instantiate provisioner %q to obtain schema: %w", name, err), + ) + return + } + defer func() { + if closer, ok := provisioner.(ResourceProvisionerCloser); ok { + closer.Close() + } + }() + + resp := provisioner.GetSchema() + if resp.Diagnostics.HasErrors() { + // We'll put a stub in the map so we won't re-attempt this on + // future calls. + schemas[name] = &configschema.Block{} + diags = diags.Append( + fmt.Errorf("Failed to retrieve schema from provisioner %q: %w", name, resp.Diagnostics.Err()), + ) + return + } + + schemas[name] = resp.Provisioner + } + + if config != nil { + for _, rc := range config.Module.ManagedResources { + for _, pc := range rc.Managed.Provisioners { + ensure(pc.Type) + } + } + + // Must also visit our child modules, recursively. + for _, cc := range config.Children { + childDiags := loadProvisionerSchemas(schemas, cc, components) + diags = diags.Append(childDiags) + } + } + + return diags +} + +// ProviderSchema represents the schema for a provider's own configuration +// and the configuration for some or all of its resources and data sources. +// +// The completeness of this structure depends on how it was constructed. +// When constructed for a configuration, it will generally include only +// resource types and data sources used by that configuration. +type ProviderSchema struct { + Provider *configschema.Block + ProviderMeta *configschema.Block + ResourceTypes map[string]*configschema.Block + DataSources map[string]*configschema.Block + + ResourceTypeSchemaVersions map[string]uint64 +} + +// SchemaForResourceType attempts to find a schema for the given mode and type. +// Returns nil if no such schema is available. +func (ps *ProviderSchema) SchemaForResourceType(mode addrs.ResourceMode, typeName string) (schema *configschema.Block, version uint64) { + switch mode { + case addrs.ManagedResourceMode: + return ps.ResourceTypes[typeName], ps.ResourceTypeSchemaVersions[typeName] + case addrs.DataResourceMode: + // Data resources don't have schema versions right now, since state is discarded for each refresh + return ps.DataSources[typeName], 0 + default: + // Shouldn't happen, because the above cases are comprehensive. + return nil, 0 + } +} + +// SchemaForResourceAddr attempts to find a schema for the mode and type from +// the given resource address. Returns nil if no such schema is available. +func (ps *ProviderSchema) SchemaForResourceAddr(addr addrs.Resource) (schema *configschema.Block, version uint64) { + return ps.SchemaForResourceType(addr.Mode, addr.Type) +} + +// ProviderSchemaRequest is used to describe to a ResourceProvider which +// aspects of schema are required, when calling the GetSchema method. +type ProviderSchemaRequest struct { + ResourceTypes []string + DataSources []string +} diff --git a/pkg/legacy/tofu/state.go b/pkg/legacy/tofu/state.go new file mode 100644 index 00000000000..d452b582a5e --- /dev/null +++ b/pkg/legacy/tofu/state.go @@ -0,0 +1,2255 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "bufio" + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "os" + "reflect" + "sort" + "strconv" + "strings" + "sync" + + "github.com/hashicorp/errwrap" + multierror "github.com/hashicorp/go-multierror" + uuid "github.com/hashicorp/go-uuid" + version "github.com/hashicorp/go-version" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/configs/hcl2shim" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/tfdiags" + tfversion "github.com/kubegems/opentofu/version" + "github.com/mitchellh/copystructure" + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" +) + +const ( + // StateVersion is the current version for our state file + StateVersion = 3 +) + +// rootModulePath is the path of the root module +var rootModulePath = []string{"root"} + +// normalizeModulePath transforms a legacy module path (which may or may not +// have a redundant "root" label at the start of it) into an +// addrs.ModuleInstance representing the same module. +// +// For legacy reasons, different parts of OpenTofu disagree about whether the +// root module has the path []string{} or []string{"root"}, and so this +// function accepts both and trims off the "root". An implication of this is +// that it's not possible to actually have a module call in the root module +// that is itself named "root", since that would be ambiguous. +// +// normalizeModulePath takes a raw module path and returns a path that +// has the rootModulePath prepended to it. If I could go back in time I +// would've never had a rootModulePath (empty path would be root). We can +// still fix this but thats a big refactor that my branch doesn't make sense +// for. Instead, this function normalizes paths. +func normalizeModulePath(p []string) addrs.ModuleInstance { + // FIXME: Remove this once everyone is using addrs.ModuleInstance. + + if len(p) > 0 && p[0] == "root" { + p = p[1:] + } + + ret := make(addrs.ModuleInstance, len(p)) + for i, name := range p { + // For now we don't actually support modules with multiple instances + // identified by keys, so we just treat every path element as a + // step with no key. + ret[i] = addrs.ModuleInstanceStep{ + Name: name, + } + } + return ret +} + +// State keeps track of a snapshot state-of-the-world that OpenTofu +// can use to keep track of what real world resources it is actually +// managing. +type State struct { + // Version is the state file protocol version. + Version int `json:"version"` + + // TFVersion is the version of OpenTofu that wrote this state. + TFVersion string `json:"terraform_version,omitempty"` + + // Serial is incremented on any operation that modifies + // the State file. It is used to detect potentially conflicting + // updates. + Serial int64 `json:"serial"` + + // Lineage is set when a new, blank state is created and then + // never updated. This allows us to determine whether the serials + // of two states can be meaningfully compared. + // Apart from the guarantee that collisions between two lineages + // are very unlikely, this value is opaque and external callers + // should only compare lineage strings byte-for-byte for equality. + Lineage string `json:"lineage"` + + // Remote is used to track the metadata required to + // pull and push state files from a remote storage endpoint. + Remote *RemoteState `json:"remote,omitempty"` + + // Backend tracks the configuration for the backend in use with + // this state. This is used to track any changes in the backend + // configuration. + Backend *BackendState `json:"backend,omitempty"` + + mu sync.Mutex + // Modules contains all the modules in a breadth-first order + Modules []*ModuleState `json:"modules"` +} + +func (s *State) Lock() { s.mu.Lock() } +func (s *State) Unlock() { s.mu.Unlock() } + +// NewState is used to initialize a blank state +func NewState() *State { + s := &State{} + s.init() + return s +} + +// Children returns the ModuleStates that are direct children of +// the given path. If the path is "root", for example, then children +// returned might be "root.child", but not "root.child.grandchild". +func (s *State) Children(path []string) []*ModuleState { + s.Lock() + defer s.Unlock() + // TODO: test + + return s.children(path) +} + +func (s *State) children(path []string) []*ModuleState { + result := make([]*ModuleState, 0) + for _, m := range s.Modules { + if m == nil { + continue + } + + if len(m.Path) != len(path)+1 { + continue + } + if !reflect.DeepEqual(path, m.Path[:len(path)]) { + continue + } + + result = append(result, m) + } + + return result +} + +// AddModule adds the module with the given path to the state. +// +// This should be the preferred method to add module states since it +// allows us to optimize lookups later as well as control sorting. +func (s *State) AddModule(path addrs.ModuleInstance) *ModuleState { + s.Lock() + defer s.Unlock() + + return s.addModule(path) +} + +func (s *State) addModule(path addrs.ModuleInstance) *ModuleState { + // check if the module exists first + m := s.moduleByPath(path) + if m != nil { + return m + } + + // Lower the new-style address into a legacy-style address. + // This requires that none of the steps have instance keys, which is + // true for all addresses at the time of implementing this because + // "count" and "for_each" are not yet implemented for modules. + // For the purposes of state, the legacy address format also includes + // a redundant extra prefix element "root". It is important to include + // this because the "prune" method will remove any module that has a + // path length less than one, and other parts of the state code will + // trim off the first element indiscriminately. + legacyPath := make([]string, len(path)+1) + legacyPath[0] = "root" + for i, step := range path { + if step.InstanceKey != addrs.NoKey { + // FIXME: Once the rest of OpenTofu is ready to use count and + // for_each, remove all of this and just write the addrs.ModuleInstance + // value itself into the ModuleState. + panic("state cannot represent modules with count or for_each keys") + } + + legacyPath[i+1] = step.Name + } + + m = &ModuleState{Path: legacyPath} + m.init() + s.Modules = append(s.Modules, m) + s.sort() + return m +} + +// ModuleByPath is used to lookup the module state for the given path. +// This should be the preferred lookup mechanism as it allows for future +// lookup optimizations. +func (s *State) ModuleByPath(path addrs.ModuleInstance) *ModuleState { + if s == nil { + return nil + } + s.Lock() + defer s.Unlock() + + return s.moduleByPath(path) +} + +func (s *State) moduleByPath(path addrs.ModuleInstance) *ModuleState { + for _, mod := range s.Modules { + if mod == nil { + continue + } + if mod.Path == nil { + panic("missing module path") + } + modPath := normalizeModulePath(mod.Path) + if modPath.String() == path.String() { + return mod + } + } + return nil +} + +// Empty returns true if the state is empty. +func (s *State) Empty() bool { + if s == nil { + return true + } + s.Lock() + defer s.Unlock() + + return len(s.Modules) == 0 +} + +// HasResources returns true if the state contains any resources. +// +// This is similar to !s.Empty, but returns true also in the case where the +// state has modules but all of them are devoid of resources. +func (s *State) HasResources() bool { + if s.Empty() { + return false + } + + for _, mod := range s.Modules { + if len(mod.Resources) > 0 { + return true + } + } + + return false +} + +// IsRemote returns true if State represents a state that exists and is +// remote. +func (s *State) IsRemote() bool { + if s == nil { + return false + } + s.Lock() + defer s.Unlock() + + if s.Remote == nil { + return false + } + if s.Remote.Type == "" { + return false + } + + return true +} + +// Validate validates the integrity of this state file. +// +// Certain properties of the statefile are expected by OpenTofu in order +// to behave properly. The core of OpenTofu will assume that once it +// receives a State structure that it has been validated. This validation +// check should be called to ensure that. +// +// If this returns an error, then the user should be notified. The error +// response will include detailed information on the nature of the error. +func (s *State) Validate() error { + s.Lock() + defer s.Unlock() + + var result error + + // !!!! FOR DEVELOPERS !!!! + // + // Any errors returned from this Validate function will BLOCK TERRAFORM + // from loading a state file. Therefore, this should only contain checks + // that are only resolvable through manual intervention. + // + // !!!! FOR DEVELOPERS !!!! + + // Make sure there are no duplicate module states. We open a new + // block here so we can use basic variable names and future validations + // can do the same. + { + found := make(map[string]struct{}) + for _, ms := range s.Modules { + if ms == nil { + continue + } + + key := strings.Join(ms.Path, ".") + if _, ok := found[key]; ok { + result = multierror.Append(result, fmt.Errorf( + strings.TrimSpace(stateValidateErrMultiModule), key)) + continue + } + + found[key] = struct{}{} + } + } + + return result +} + +// Remove removes the item in the state at the given address, returning +// any errors that may have occurred. +// +// If the address references a module state or resource, it will delete +// all children as well. To check what will be deleted, use a StateFilter +// first. +func (s *State) Remove(addr ...string) error { + s.Lock() + defer s.Unlock() + + // Filter out what we need to delete + filter := &StateFilter{State: s} + results, err := filter.Filter(addr...) + if err != nil { + return err + } + + // If we have no results, just exit early, we're not going to do anything. + // While what happens below is fairly fast, this is an important early + // exit since the prune below might modify the state more and we don't + // want to modify the state if we don't have to. + if len(results) == 0 { + return nil + } + + // Go through each result and grab what we need + removed := make(map[interface{}]struct{}) + for _, r := range results { + // Convert the path to our own type + path := append([]string{"root"}, r.Path...) + + // If we removed this already, then ignore + if _, ok := removed[r.Value]; ok { + continue + } + + // If we removed the parent already, then ignore + if r.Parent != nil { + if _, ok := removed[r.Parent.Value]; ok { + continue + } + } + + // Add this to the removed list + removed[r.Value] = struct{}{} + + switch v := r.Value.(type) { + case *ModuleState: + s.removeModule(path, v) + case *ResourceState: + s.removeResource(path, v) + case *InstanceState: + s.removeInstance(path, r.Parent.Value.(*ResourceState), v) + default: + return fmt.Errorf("unknown type to delete: %T", r.Value) + } + } + + // Prune since the removal functions often do the bare minimum to + // remove a thing and may leave around dangling empty modules, resources, + // etc. Prune will clean that all up. + s.prune() + + return nil +} + +func (s *State) removeModule(path []string, v *ModuleState) { + for i, m := range s.Modules { + if m == v { + s.Modules, s.Modules[len(s.Modules)-1] = append(s.Modules[:i], s.Modules[i+1:]...), nil + return + } + } +} + +func (s *State) removeResource(path []string, v *ResourceState) { + // Get the module this resource lives in. If it doesn't exist, we're done. + mod := s.moduleByPath(normalizeModulePath(path)) + if mod == nil { + return + } + + // Find this resource. This is a O(N) lookup when if we had the key + // it could be O(1) but even with thousands of resources this shouldn't + // matter right now. We can easily up performance here when the time comes. + for k, r := range mod.Resources { + if r == v { + // Found it + delete(mod.Resources, k) + return + } + } +} + +func (s *State) removeInstance(path []string, r *ResourceState, v *InstanceState) { + // Go through the resource and find the instance that matches this + // (if any) and remove it. + + // Check primary + if r.Primary == v { + r.Primary = nil + return + } + + // Check lists + lists := [][]*InstanceState{r.Deposed} + for _, is := range lists { + for i, instance := range is { + if instance == v { + // Found it, remove it + is, is[len(is)-1] = append(is[:i], is[i+1:]...), nil + + // Done + return + } + } + } +} + +// RootModule returns the ModuleState for the root module +func (s *State) RootModule() *ModuleState { + root := s.ModuleByPath(addrs.RootModuleInstance) + if root == nil { + panic("missing root module") + } + return root +} + +// Equal tests if one state is equal to another. +func (s *State) Equal(other *State) bool { + // If one is nil, we do a direct check + if s == nil || other == nil { + return s == other + } + + s.Lock() + defer s.Unlock() + return s.equal(other) +} + +func (s *State) equal(other *State) bool { + if s == nil || other == nil { + return s == other + } + + // If the versions are different, they're certainly not equal + if s.Version != other.Version { + return false + } + + // If any of the modules are not equal, then this state isn't equal + if len(s.Modules) != len(other.Modules) { + return false + } + for _, m := range s.Modules { + // This isn't very optimal currently but works. + otherM := other.moduleByPath(normalizeModulePath(m.Path)) + if otherM == nil { + return false + } + + // If they're not equal, then we're not equal! + if !m.Equal(otherM) { + return false + } + } + + return true +} + +// MarshalEqual is similar to Equal but provides a stronger definition of +// "equal", where two states are equal if and only if their serialized form +// is byte-for-byte identical. +// +// This is primarily useful for callers that are trying to save snapshots +// of state to persistent storage, allowing them to detect when a new +// snapshot must be taken. +// +// Note that the serial number and lineage are included in the serialized form, +// so it's the caller's responsibility to properly manage these attributes +// so that this method is only called on two states that have the same +// serial and lineage, unless detecting such differences is desired. +func (s *State) MarshalEqual(other *State) bool { + if s == nil && other == nil { + return true + } else if s == nil || other == nil { + return false + } + + recvBuf := &bytes.Buffer{} + otherBuf := &bytes.Buffer{} + + err := WriteState(s, recvBuf) + if err != nil { + // should never happen, since we're writing to a buffer + panic(err) + } + + err = WriteState(other, otherBuf) + if err != nil { + // should never happen, since we're writing to a buffer + panic(err) + } + + return bytes.Equal(recvBuf.Bytes(), otherBuf.Bytes()) +} + +type StateAgeComparison int + +const ( + StateAgeEqual StateAgeComparison = 0 + StateAgeReceiverNewer StateAgeComparison = 1 + StateAgeReceiverOlder StateAgeComparison = -1 +) + +// CompareAges compares one state with another for which is "older". +// +// This is a simple check using the state's serial, and is thus only as +// reliable as the serial itself. In the normal case, only one state +// exists for a given combination of lineage/serial, but OpenTofu +// does not guarantee this and so the result of this method should be +// used with care. +// +// Returns an integer that is negative if the receiver is older than +// the argument, positive if the converse, and zero if they are equal. +// An error is returned if the two states are not of the same lineage, +// in which case the integer returned has no meaning. +func (s *State) CompareAges(other *State) (StateAgeComparison, error) { + // nil states are "older" than actual states + switch { + case s != nil && other == nil: + return StateAgeReceiverNewer, nil + case s == nil && other != nil: + return StateAgeReceiverOlder, nil + case s == nil && other == nil: + return StateAgeEqual, nil + } + + if !s.SameLineage(other) { + return StateAgeEqual, fmt.Errorf( + "can't compare two states of differing lineage", + ) + } + + s.Lock() + defer s.Unlock() + + switch { + case s.Serial < other.Serial: + return StateAgeReceiverOlder, nil + case s.Serial > other.Serial: + return StateAgeReceiverNewer, nil + default: + return StateAgeEqual, nil + } +} + +// SameLineage returns true only if the state given in argument belongs +// to the same "lineage" of states as the receiver. +func (s *State) SameLineage(other *State) bool { + s.Lock() + defer s.Unlock() + + // If one of the states has no lineage then it is assumed to predate + // this concept, and so we'll accept it as belonging to any lineage + // so that a lineage string can be assigned to newer versions + // without breaking compatibility with older versions. + if s.Lineage == "" || other.Lineage == "" { + return true + } + + return s.Lineage == other.Lineage +} + +// DeepCopy performs a deep copy of the state structure and returns +// a new structure. +func (s *State) DeepCopy() *State { + if s == nil { + return nil + } + + copy, err := copystructure.Config{Lock: true}.Copy(s) + if err != nil { + panic(err) + } + + return copy.(*State) +} + +// FromFutureTofu checks if this state was written by a OpenTofu +// version from the future. +func (s *State) FromFutureTofu() bool { + s.Lock() + defer s.Unlock() + + // No TF version means it is certainly from the past + if s.TFVersion == "" { + return false + } + + v := version.Must(version.NewVersion(s.TFVersion)) + return tfversion.SemVer.LessThan(v) +} + +func (s *State) Init() { + s.Lock() + defer s.Unlock() + s.init() +} + +func (s *State) init() { + if s.Version == 0 { + s.Version = StateVersion + } + + if s.moduleByPath(addrs.RootModuleInstance) == nil { + s.addModule(addrs.RootModuleInstance) + } + s.ensureHasLineage() + + for _, mod := range s.Modules { + if mod != nil { + mod.init() + } + } + + if s.Remote != nil { + s.Remote.init() + } + +} + +func (s *State) EnsureHasLineage() { + s.Lock() + defer s.Unlock() + + s.ensureHasLineage() +} + +func (s *State) ensureHasLineage() { + if s.Lineage == "" { + lineage, err := uuid.GenerateUUID() + if err != nil { + panic(fmt.Errorf("Failed to generate lineage: %w", err)) + } + s.Lineage = lineage + log.Printf("[DEBUG] New state was assigned lineage %q\n", s.Lineage) + } else { + log.Printf("[TRACE] Preserving existing state lineage %q\n", s.Lineage) + } +} + +// AddModuleState insert this module state and override any existing ModuleState +func (s *State) AddModuleState(mod *ModuleState) { + mod.init() + s.Lock() + defer s.Unlock() + + s.addModuleState(mod) +} + +func (s *State) addModuleState(mod *ModuleState) { + for i, m := range s.Modules { + if reflect.DeepEqual(m.Path, mod.Path) { + s.Modules[i] = mod + return + } + } + + s.Modules = append(s.Modules, mod) + s.sort() +} + +// prune is used to remove any resources that are no longer required +func (s *State) prune() { + if s == nil { + return + } + + // Filter out empty modules. + // A module is always assumed to have a path, and it's length isn't always + // bounds checked later on. Modules may be "emptied" during destroy, but we + // never want to store those in the state. + for i := 0; i < len(s.Modules); i++ { + if s.Modules[i] == nil || len(s.Modules[i].Path) == 0 { + s.Modules = append(s.Modules[:i], s.Modules[i+1:]...) + i-- + } + } + + for _, mod := range s.Modules { + mod.prune() + } + if s.Remote != nil && s.Remote.Empty() { + s.Remote = nil + } +} + +// sort sorts the modules +func (s *State) sort() { + sort.Sort(moduleStateSort(s.Modules)) + + // Allow modules to be sorted + for _, m := range s.Modules { + if m != nil { + m.sort() + } + } +} + +func (s *State) String() string { + if s == nil { + return "" + } + s.Lock() + defer s.Unlock() + + var buf bytes.Buffer + for _, m := range s.Modules { + mStr := m.String() + + // If we're the root module, we just write the output directly. + if reflect.DeepEqual(m.Path, rootModulePath) { + buf.WriteString(mStr + "\n") + continue + } + + buf.WriteString(fmt.Sprintf("module.%s:\n", strings.Join(m.Path[1:], "."))) + + s := bufio.NewScanner(strings.NewReader(mStr)) + for s.Scan() { + text := s.Text() + if text != "" { + text = " " + text + } + + buf.WriteString(fmt.Sprintf("%s\n", text)) + } + } + + return strings.TrimSpace(buf.String()) +} + +// BackendState stores the configuration to connect to a remote backend. +type BackendState struct { + Type string `json:"type"` // Backend type + ConfigRaw json.RawMessage `json:"config"` // Backend raw config + Hash uint64 `json:"hash"` // Hash of portion of configuration from config files +} + +// Empty returns true if BackendState has no state. +func (s *BackendState) Empty() bool { + return s == nil || s.Type == "" +} + +// Config decodes the type-specific configuration object using the provided +// schema and returns the result as a cty.Value. +// +// An error is returned if the stored configuration does not conform to the +// given schema. +func (s *BackendState) Config(schema *configschema.Block) (cty.Value, error) { + ty := schema.ImpliedType() + if s == nil { + return cty.NullVal(ty), nil + } + return ctyjson.Unmarshal(s.ConfigRaw, ty) +} + +// SetConfig replaces (in-place) the type-specific configuration object using +// the provided value and associated schema. +// +// An error is returned if the given value does not conform to the implied +// type of the schema. +func (s *BackendState) SetConfig(val cty.Value, schema *configschema.Block) error { + ty := schema.ImpliedType() + buf, err := ctyjson.Marshal(val, ty) + if err != nil { + return err + } + s.ConfigRaw = buf + return nil +} + +// ForPlan produces an alternative representation of the reciever that is +// suitable for storing in a plan. The current workspace must additionally +// be provided, to be stored alongside the backend configuration. +// +// The backend configuration schema is required in order to properly +// encode the backend-specific configuration settings. +func (s *BackendState) ForPlan(schema *configschema.Block, workspaceName string) (*plans.Backend, error) { + if s == nil { + return nil, nil + } + + configVal, err := s.Config(schema) + if err != nil { + return nil, errwrap.Wrapf("failed to decode backend config: {{err}}", err) + } + return plans.NewBackend(s.Type, configVal, schema, workspaceName) +} + +// RemoteState is used to track the information about a remote +// state store that we push/pull state to. +type RemoteState struct { + // Type controls the client we use for the remote state + Type string `json:"type"` + + mu sync.Mutex + // Config is used to store arbitrary configuration that + // is type specific + Config map[string]string `json:"config"` +} + +func (s *RemoteState) Lock() { s.mu.Lock() } +func (s *RemoteState) Unlock() { s.mu.Unlock() } + +func (r *RemoteState) init() { + r.Lock() + defer r.Unlock() + + if r.Config == nil { + r.Config = make(map[string]string) + } +} + +func (r *RemoteState) deepcopy() *RemoteState { + r.Lock() + defer r.Unlock() + + confCopy := make(map[string]string, len(r.Config)) + for k, v := range r.Config { + confCopy[k] = v + } + return &RemoteState{ + Type: r.Type, + Config: confCopy, + } +} + +func (r *RemoteState) Empty() bool { + if r == nil { + return true + } + r.Lock() + defer r.Unlock() + + return r.Type == "" +} + +func (r *RemoteState) Equals(other *RemoteState) bool { + r.Lock() + defer r.Unlock() + + if r.Type != other.Type { + return false + } + if len(r.Config) != len(other.Config) { + return false + } + for k, v := range r.Config { + if other.Config[k] != v { + return false + } + } + return true +} + +// OutputState is used to track the state relevant to a single output. +type OutputState struct { + // Sensitive describes whether the output is considered sensitive, + // which may lead to masking the value on screen in some cases. + Sensitive bool `json:"sensitive"` + // Type describes the structure of Value. Valid values are "string", + // "map" and "list" + Type string `json:"type"` + // Value contains the value of the output, in the structure described + // by the Type field. + Value interface{} `json:"value"` + + mu sync.Mutex +} + +func (s *OutputState) Lock() { s.mu.Lock() } +func (s *OutputState) Unlock() { s.mu.Unlock() } + +func (s *OutputState) String() string { + return fmt.Sprintf("%#v", s.Value) +} + +// Equal compares two OutputState structures for equality. nil values are +// considered equal. +func (s *OutputState) Equal(other *OutputState) bool { + if s == nil && other == nil { + return true + } + + if s == nil || other == nil { + return false + } + s.Lock() + defer s.Unlock() + + if s.Type != other.Type { + return false + } + + if s.Sensitive != other.Sensitive { + return false + } + + if !reflect.DeepEqual(s.Value, other.Value) { + return false + } + + return true +} + +func (s *OutputState) deepcopy() *OutputState { + if s == nil { + return nil + } + + stateCopy, err := copystructure.Config{Lock: true}.Copy(s) + if err != nil { + panic(fmt.Errorf("Error copying output value: %w", err)) + } + + return stateCopy.(*OutputState) +} + +// ModuleState is used to track all the state relevant to a single +// module. Previous to Terraform 0.3, all state belonged to the "root" +// module. +type ModuleState struct { + // Path is the import path from the root module. Modules imports are + // always disjoint, so the path represents amodule tree + Path []string `json:"path"` + + // Locals are kept only transiently in-memory, because we can always + // re-compute them. + Locals map[string]interface{} `json:"-"` + + // Outputs declared by the module and maintained for each module + // even though only the root module technically needs to be kept. + // This allows operators to inspect values at the boundaries. + Outputs map[string]*OutputState `json:"outputs"` + + // Resources is a mapping of the logically named resource to + // the state of the resource. Each resource may actually have + // N instances underneath, although a user only needs to think + // about the 1:1 case. + Resources map[string]*ResourceState `json:"resources"` + + // Dependencies are a list of things that this module relies on + // existing to remain intact. For example: an module may depend + // on a VPC ID given by an aws_vpc resource. + // + // OpenTofu uses this information to build valid destruction + // orders and to warn the user if they're destroying a module that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // OpenTofu. If OpenTofu doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on"` + + mu sync.Mutex +} + +func (s *ModuleState) Lock() { s.mu.Lock() } +func (s *ModuleState) Unlock() { s.mu.Unlock() } + +// Equal tests whether one module state is equal to another. +func (m *ModuleState) Equal(other *ModuleState) bool { + m.Lock() + defer m.Unlock() + + // Paths must be equal + if !reflect.DeepEqual(m.Path, other.Path) { + return false + } + + // Outputs must be equal + if len(m.Outputs) != len(other.Outputs) { + return false + } + for k, v := range m.Outputs { + if !other.Outputs[k].Equal(v) { + return false + } + } + + // Dependencies must be equal. This sorts these in place but + // this shouldn't cause any problems. + sort.Strings(m.Dependencies) + sort.Strings(other.Dependencies) + if len(m.Dependencies) != len(other.Dependencies) { + return false + } + for i, d := range m.Dependencies { + if other.Dependencies[i] != d { + return false + } + } + + // Resources must be equal + if len(m.Resources) != len(other.Resources) { + return false + } + for k, r := range m.Resources { + otherR, ok := other.Resources[k] + if !ok { + return false + } + + if !r.Equal(otherR) { + return false + } + } + + return true +} + +// IsRoot says whether or not this module diff is for the root module. +func (m *ModuleState) IsRoot() bool { + m.Lock() + defer m.Unlock() + return reflect.DeepEqual(m.Path, rootModulePath) +} + +// IsDescendent returns true if other is a descendent of this module. +func (m *ModuleState) IsDescendent(other *ModuleState) bool { + m.Lock() + defer m.Unlock() + + i := len(m.Path) + return len(other.Path) > i && reflect.DeepEqual(other.Path[:i], m.Path) +} + +// Orphans returns a list of keys of resources that are in the State +// but aren't present in the configuration itself. Hence, these keys +// represent the state of resources that are orphans. +func (m *ModuleState) Orphans(c *configs.Module) []addrs.ResourceInstance { + m.Lock() + defer m.Unlock() + + inConfig := make(map[string]struct{}) + if c != nil { + for _, r := range c.ManagedResources { + inConfig[r.Addr().String()] = struct{}{} + } + for _, r := range c.DataResources { + inConfig[r.Addr().String()] = struct{}{} + } + } + + var result []addrs.ResourceInstance + for k := range m.Resources { + // Since we've not yet updated state to use our new address format, + // we need to do some shimming here. + legacyAddr, err := parseResourceAddressInternal(k) + if err != nil { + // Suggests that the user tampered with the state, since we always + // generate valid internal addresses. + log.Printf("ModuleState has invalid resource key %q. Ignoring.", k) + continue + } + + addr := legacyAddr.AbsResourceInstanceAddr().Resource + compareKey := addr.Resource.String() // compare by resource address, ignoring instance key + if _, exists := inConfig[compareKey]; !exists { + result = append(result, addr) + } + } + return result +} + +// RemovedOutputs returns a list of outputs that are in the State but aren't +// present in the configuration itself. +func (s *ModuleState) RemovedOutputs(outputs map[string]*configs.Output) []addrs.OutputValue { + if outputs == nil { + // If we got no output map at all then we'll just treat our set of + // configured outputs as empty, since that suggests that they've all + // been removed by removing their containing module. + outputs = make(map[string]*configs.Output) + } + + s.Lock() + defer s.Unlock() + + var ret []addrs.OutputValue + for n := range s.Outputs { + if _, declared := outputs[n]; !declared { + ret = append(ret, addrs.OutputValue{ + Name: n, + }) + } + } + + return ret +} + +// View returns a view with the given resource prefix. +func (m *ModuleState) View(id string) *ModuleState { + if m == nil { + return m + } + + r := m.deepcopy() + for k, _ := range r.Resources { + if id == k || strings.HasPrefix(k, id+".") { + continue + } + + delete(r.Resources, k) + } + + return r +} + +func (m *ModuleState) init() { + m.Lock() + defer m.Unlock() + + if m.Path == nil { + m.Path = []string{} + } + if m.Outputs == nil { + m.Outputs = make(map[string]*OutputState) + } + if m.Resources == nil { + m.Resources = make(map[string]*ResourceState) + } + + if m.Dependencies == nil { + m.Dependencies = make([]string, 0) + } + + for _, rs := range m.Resources { + rs.init() + } +} + +func (m *ModuleState) deepcopy() *ModuleState { + if m == nil { + return nil + } + + stateCopy, err := copystructure.Config{Lock: true}.Copy(m) + if err != nil { + panic(err) + } + + return stateCopy.(*ModuleState) +} + +// prune is used to remove any resources that are no longer required +func (m *ModuleState) prune() { + m.Lock() + defer m.Unlock() + + for k, v := range m.Resources { + if v == nil || (v.Primary == nil || v.Primary.ID == "") && len(v.Deposed) == 0 { + delete(m.Resources, k) + continue + } + + v.prune() + } + + for k, v := range m.Outputs { + if v.Value == hcl2shim.UnknownVariableValue { + delete(m.Outputs, k) + } + } + + m.Dependencies = uniqueStrings(m.Dependencies) +} + +func (m *ModuleState) sort() { + for _, v := range m.Resources { + v.sort() + } +} + +func (m *ModuleState) String() string { + m.Lock() + defer m.Unlock() + + var buf bytes.Buffer + + if len(m.Resources) == 0 { + buf.WriteString("") + } + + names := make([]string, 0, len(m.Resources)) + for name, _ := range m.Resources { + names = append(names, name) + } + + sort.Sort(resourceNameSort(names)) + + for _, k := range names { + rs := m.Resources[k] + var id string + if rs.Primary != nil { + id = rs.Primary.ID + } + if id == "" { + id = "" + } + + taintStr := "" + if rs.Primary.Tainted { + taintStr = " (tainted)" + } + + deposedStr := "" + if len(rs.Deposed) > 0 { + deposedStr = fmt.Sprintf(" (%d deposed)", len(rs.Deposed)) + } + + buf.WriteString(fmt.Sprintf("%s:%s%s\n", k, taintStr, deposedStr)) + buf.WriteString(fmt.Sprintf(" ID = %s\n", id)) + if rs.Provider != "" { + buf.WriteString(fmt.Sprintf(" provider = %s\n", rs.Provider)) + } + + var attributes map[string]string + if rs.Primary != nil { + attributes = rs.Primary.Attributes + } + attrKeys := make([]string, 0, len(attributes)) + for ak, _ := range attributes { + if ak == "id" { + continue + } + + attrKeys = append(attrKeys, ak) + } + + sort.Strings(attrKeys) + + for _, ak := range attrKeys { + av := attributes[ak] + buf.WriteString(fmt.Sprintf(" %s = %s\n", ak, av)) + } + + for idx, t := range rs.Deposed { + taintStr := "" + if t.Tainted { + taintStr = " (tainted)" + } + buf.WriteString(fmt.Sprintf(" Deposed ID %d = %s%s\n", idx+1, t.ID, taintStr)) + } + + if len(rs.Dependencies) > 0 { + buf.WriteString(fmt.Sprintf("\n Dependencies:\n")) + for _, dep := range rs.Dependencies { + buf.WriteString(fmt.Sprintf(" %s\n", dep)) + } + } + } + + if len(m.Outputs) > 0 { + buf.WriteString("\nOutputs:\n\n") + + ks := make([]string, 0, len(m.Outputs)) + for k, _ := range m.Outputs { + ks = append(ks, k) + } + + sort.Strings(ks) + + for _, k := range ks { + v := m.Outputs[k] + switch vTyped := v.Value.(type) { + case string: + buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) + case []interface{}: + buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) + case map[string]interface{}: + var mapKeys []string + for key, _ := range vTyped { + mapKeys = append(mapKeys, key) + } + sort.Strings(mapKeys) + + var mapBuf bytes.Buffer + mapBuf.WriteString("{") + for _, key := range mapKeys { + mapBuf.WriteString(fmt.Sprintf("%s:%s ", key, vTyped[key])) + } + mapBuf.WriteString("}") + + buf.WriteString(fmt.Sprintf("%s = %s\n", k, mapBuf.String())) + } + } + } + + return buf.String() +} + +func (m *ModuleState) Empty() bool { + return len(m.Locals) == 0 && len(m.Outputs) == 0 && len(m.Resources) == 0 +} + +// ResourceStateKey is a structured representation of the key used for the +// ModuleState.Resources mapping +type ResourceStateKey struct { + Name string + Type string + Mode ResourceMode + Index int +} + +// Equal determines whether two ResourceStateKeys are the same +func (rsk *ResourceStateKey) Equal(other *ResourceStateKey) bool { + if rsk == nil || other == nil { + return false + } + if rsk.Mode != other.Mode { + return false + } + if rsk.Type != other.Type { + return false + } + if rsk.Name != other.Name { + return false + } + if rsk.Index != other.Index { + return false + } + return true +} + +func (rsk *ResourceStateKey) String() string { + if rsk == nil { + return "" + } + var prefix string + switch rsk.Mode { + case ManagedResourceMode: + prefix = "" + case DataResourceMode: + prefix = "data." + default: + panic(fmt.Errorf("unknown resource mode %s", rsk.Mode)) + } + if rsk.Index == -1 { + return fmt.Sprintf("%s%s.%s", prefix, rsk.Type, rsk.Name) + } + return fmt.Sprintf("%s%s.%s.%d", prefix, rsk.Type, rsk.Name, rsk.Index) +} + +// ParseResourceStateKey accepts a key in the format used by +// ModuleState.Resources and returns a resource name and resource index. In the +// state, a resource has the format "type.name.index" or "type.name". In the +// latter case, the index is returned as -1. +func ParseResourceStateKey(k string) (*ResourceStateKey, error) { + parts := strings.Split(k, ".") + mode := ManagedResourceMode + if len(parts) > 0 && parts[0] == "data" { + mode = DataResourceMode + // Don't need the constant "data" prefix for parsing + // now that we've figured out the mode. + parts = parts[1:] + } + if len(parts) < 2 || len(parts) > 3 { + return nil, fmt.Errorf("Malformed resource state key: %s", k) + } + rsk := &ResourceStateKey{ + Mode: mode, + Type: parts[0], + Name: parts[1], + Index: -1, + } + if len(parts) == 3 { + index, err := strconv.Atoi(parts[2]) + if err != nil { + return nil, fmt.Errorf("Malformed resource state key index: %s", k) + } + rsk.Index = index + } + return rsk, nil +} + +// ResourceState holds the state of a resource that is used so that +// a provider can find and manage an existing resource as well as for +// storing attributes that are used to populate variables of child +// resources. +// +// Attributes has attributes about the created resource that are +// queryable in interpolation: "${type.id.attr}" +// +// Extra is just extra data that a provider can return that we store +// for later, but is not exposed in any way to the user. +type ResourceState struct { + // This is filled in and managed by OpenTofu, and is the resource + // type itself such as "mycloud_instance". If a resource provider sets + // this value, it won't be persisted. + Type string `json:"type"` + + // Dependencies are a list of things that this resource relies on + // existing to remain intact. For example: an AWS instance might + // depend on a subnet (which itself might depend on a VPC, and so + // on). + // + // OpenTofu uses this information to build valid destruction + // orders and to warn the user if they're destroying a resource that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // OpenTofu. If OpenTofu doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on"` + + // Primary is the current active instance for this resource. + // It can be replaced but only after a successful creation. + // This is the instances on which providers will act. + Primary *InstanceState `json:"primary"` + + // Deposed is used in the mechanics of CreateBeforeDestroy: the existing + // Primary is Deposed to get it out of the way for the replacement Primary to + // be created by Apply. If the replacement Primary creates successfully, the + // Deposed instance is cleaned up. + // + // If there were problems creating the replacement Primary, the Deposed + // instance and the (now tainted) replacement Primary will be swapped so the + // tainted replacement will be cleaned up instead. + // + // An instance will remain in the Deposed list until it is successfully + // destroyed and purged. + Deposed []*InstanceState `json:"deposed"` + + // Provider is used when a resource is connected to a provider with an alias. + // If this string is empty, the resource is connected to the default provider, + // e.g. "aws_instance" goes with the "aws" provider. + // If the resource block contained a "provider" key, that value will be set here. + Provider string `json:"provider"` + + mu sync.Mutex +} + +func (s *ResourceState) Lock() { s.mu.Lock() } +func (s *ResourceState) Unlock() { s.mu.Unlock() } + +// Equal tests whether two ResourceStates are equal. +func (s *ResourceState) Equal(other *ResourceState) bool { + s.Lock() + defer s.Unlock() + + if s.Type != other.Type { + return false + } + + if s.Provider != other.Provider { + return false + } + + // Dependencies must be equal + sort.Strings(s.Dependencies) + sort.Strings(other.Dependencies) + if len(s.Dependencies) != len(other.Dependencies) { + return false + } + for i, d := range s.Dependencies { + if other.Dependencies[i] != d { + return false + } + } + + // States must be equal + if !s.Primary.Equal(other.Primary) { + return false + } + + return true +} + +// Taint marks a resource as tainted. +func (s *ResourceState) Taint() { + s.Lock() + defer s.Unlock() + + if s.Primary != nil { + s.Primary.Tainted = true + } +} + +// Untaint unmarks a resource as tainted. +func (s *ResourceState) Untaint() { + s.Lock() + defer s.Unlock() + + if s.Primary != nil { + s.Primary.Tainted = false + } +} + +// ProviderAddr returns the provider address for the receiver, by parsing the +// string representation saved in state. An error can be returned if the +// value in state is corrupt. +func (s *ResourceState) ProviderAddr() (addrs.AbsProviderConfig, error) { + var diags tfdiags.Diagnostics + + str := s.Provider + traversal, travDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(travDiags) + if travDiags.HasErrors() { + return addrs.AbsProviderConfig{}, diags.Err() + } + + addr, addrDiags := addrs.ParseAbsProviderConfig(traversal) + diags = diags.Append(addrDiags) + return addr, diags.Err() +} + +func (s *ResourceState) init() { + s.Lock() + defer s.Unlock() + + if s.Primary == nil { + s.Primary = &InstanceState{} + } + s.Primary.init() + + if s.Dependencies == nil { + s.Dependencies = []string{} + } + + if s.Deposed == nil { + s.Deposed = make([]*InstanceState, 0) + } +} + +func (s *ResourceState) deepcopy() *ResourceState { + copy, err := copystructure.Config{Lock: true}.Copy(s) + if err != nil { + panic(err) + } + + return copy.(*ResourceState) +} + +// prune is used to remove any instances that are no longer required +func (s *ResourceState) prune() { + s.Lock() + defer s.Unlock() + + n := len(s.Deposed) + for i := 0; i < n; i++ { + inst := s.Deposed[i] + if inst == nil || inst.ID == "" { + copy(s.Deposed[i:], s.Deposed[i+1:]) + s.Deposed[n-1] = nil + n-- + i-- + } + } + s.Deposed = s.Deposed[:n] + + s.Dependencies = uniqueStrings(s.Dependencies) +} + +func (s *ResourceState) sort() { + s.Lock() + defer s.Unlock() + + sort.Strings(s.Dependencies) +} + +func (s *ResourceState) String() string { + s.Lock() + defer s.Unlock() + + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf("Type = %s", s.Type)) + return buf.String() +} + +// InstanceState is used to track the unique state information belonging +// to a given instance. +type InstanceState struct { + // A unique ID for this resource. This is opaque to OpenTofu + // and is only meant as a lookup mechanism for the providers. + ID string `json:"id"` + + mu sync.Mutex + // Attributes are basic information about the resource. Any keys here + // are accessible in variable format within OpenTofu configurations: + // ${resourcetype.name.attribute}. + Attributes map[string]string `json:"attributes"` + + // Ephemeral is used to store any state associated with this instance + // that is necessary for the OpenTofu run to complete, but is not + // persisted to a state file. + Ephemeral EphemeralState `json:"-"` + + // Meta is a simple K/V map that is persisted to the State but otherwise + // ignored by OpenTofu core. It's meant to be used for accounting by + // external client code. The value here must only contain Go primitives + // and collections. + Meta map[string]interface{} `json:"meta"` + + ProviderMeta cty.Value + + // Tainted is used to mark a resource for recreation. + Tainted bool `json:"tainted"` +} + +func (s *InstanceState) Lock() { s.mu.Lock() } +func (s *InstanceState) Unlock() { s.mu.Unlock() } + +func (s *InstanceState) init() { + s.Lock() + defer s.Unlock() + + if s.Attributes == nil { + s.Attributes = make(map[string]string) + } + if s.Meta == nil { + s.Meta = make(map[string]interface{}) + } + s.Ephemeral.init() +} + +// NewInstanceStateShimmedFromValue is a shim method to lower a new-style +// object value representing the attributes of an instance object into the +// legacy InstanceState representation. +// +// This is for shimming to old components only and should not be used in new code. +func NewInstanceStateShimmedFromValue(state cty.Value, schemaVersion int) *InstanceState { + attrs := hcl2shim.FlatmapValueFromHCL2(state) + return &InstanceState{ + ID: attrs["id"], + Attributes: attrs, + Meta: map[string]interface{}{ + "schema_version": schemaVersion, + }, + } +} + +// AttrsAsObjectValue shims from the legacy InstanceState representation to +// a new-style cty object value representation of the state attributes, using +// the given type for guidance. +// +// The given type must be the implied type of the schema of the resource type +// of the object whose state is being converted, or the result is undefined. +// +// This is for shimming from old components only and should not be used in +// new code. +func (s *InstanceState) AttrsAsObjectValue(ty cty.Type) (cty.Value, error) { + if s == nil { + // if the state is nil, we need to construct a complete cty.Value with + // null attributes, rather than a single cty.NullVal(ty) + s = &InstanceState{} + } + + if s.Attributes == nil { + s.Attributes = map[string]string{} + } + + // make sure ID is included in the attributes. The InstanceState.ID value + // takes precedence. + if s.ID != "" { + s.Attributes["id"] = s.ID + } + + return hcl2shim.HCL2ValueFromFlatmap(s.Attributes, ty) +} + +// Copy all the Fields from another InstanceState +func (s *InstanceState) Set(from *InstanceState) { + s.Lock() + defer s.Unlock() + + from.Lock() + defer from.Unlock() + + s.ID = from.ID + s.Attributes = from.Attributes + s.Ephemeral = from.Ephemeral + s.Meta = from.Meta + s.Tainted = from.Tainted +} + +func (s *InstanceState) DeepCopy() *InstanceState { + copy, err := copystructure.Config{Lock: true}.Copy(s) + if err != nil { + panic(err) + } + + return copy.(*InstanceState) +} + +func (s *InstanceState) Empty() bool { + if s == nil { + return true + } + s.Lock() + defer s.Unlock() + + return s.ID == "" +} + +func (s *InstanceState) Equal(other *InstanceState) bool { + // Short circuit some nil checks + if s == nil || other == nil { + return s == other + } + s.Lock() + defer s.Unlock() + + // IDs must be equal + if s.ID != other.ID { + return false + } + + // Attributes must be equal + if len(s.Attributes) != len(other.Attributes) { + return false + } + for k, v := range s.Attributes { + otherV, ok := other.Attributes[k] + if !ok { + return false + } + + if v != otherV { + return false + } + } + + // Meta must be equal + if len(s.Meta) != len(other.Meta) { + return false + } + if s.Meta != nil && other.Meta != nil { + // We only do the deep check if both are non-nil. If one is nil + // we treat it as equal since their lengths are both zero (check + // above). + // + // Since this can contain numeric values that may change types during + // serialization, let's compare the serialized values. + sMeta, err := json.Marshal(s.Meta) + if err != nil { + // marshaling primitives shouldn't ever error out + panic(err) + } + otherMeta, err := json.Marshal(other.Meta) + if err != nil { + panic(err) + } + + if !bytes.Equal(sMeta, otherMeta) { + return false + } + } + + if s.Tainted != other.Tainted { + return false + } + + return true +} + +// MergeDiff takes a ResourceDiff and merges the attributes into +// this resource state in order to generate a new state. This new +// state can be used to provide updated attribute lookups for +// variable interpolation. +// +// If the diff attribute requires computing the value, and hence +// won't be available until apply, the value is replaced with the +// computeID. +func (s *InstanceState) MergeDiff(d *InstanceDiff) *InstanceState { + result := s.DeepCopy() + if result == nil { + result = new(InstanceState) + } + result.init() + + if s != nil { + s.Lock() + defer s.Unlock() + for k, v := range s.Attributes { + result.Attributes[k] = v + } + } + if d != nil { + for k, diff := range d.CopyAttributes() { + if diff.NewRemoved { + delete(result.Attributes, k) + continue + } + if diff.NewComputed { + result.Attributes[k] = hcl2shim.UnknownVariableValue + continue + } + + result.Attributes[k] = diff.New + } + } + + return result +} + +func (s *InstanceState) String() string { + notCreated := "" + + if s == nil { + return notCreated + } + + s.Lock() + defer s.Unlock() + + var buf bytes.Buffer + + if s.ID == "" { + return notCreated + } + + buf.WriteString(fmt.Sprintf("ID = %s\n", s.ID)) + + attributes := s.Attributes + attrKeys := make([]string, 0, len(attributes)) + for ak, _ := range attributes { + if ak == "id" { + continue + } + + attrKeys = append(attrKeys, ak) + } + sort.Strings(attrKeys) + + for _, ak := range attrKeys { + av := attributes[ak] + buf.WriteString(fmt.Sprintf("%s = %s\n", ak, av)) + } + + buf.WriteString(fmt.Sprintf("Tainted = %t\n", s.Tainted)) + + return buf.String() +} + +// EphemeralState is used for transient state that is only kept in-memory +type EphemeralState struct { + // ConnInfo is used for the providers to export information which is + // used to connect to the resource for provisioning. For example, + // this could contain SSH or WinRM credentials. + ConnInfo map[string]string `json:"-"` + + // Type is used to specify the resource type for this instance. This is only + // required for import operations (as documented). If the documentation + // doesn't state that you need to set this, then don't worry about + // setting it. + Type string `json:"-"` +} + +func (e *EphemeralState) init() { + if e.ConnInfo == nil { + e.ConnInfo = make(map[string]string) + } +} + +func (e *EphemeralState) DeepCopy() *EphemeralState { + copy, err := copystructure.Config{Lock: true}.Copy(e) + if err != nil { + panic(err) + } + + return copy.(*EphemeralState) +} + +type jsonStateVersionIdentifier struct { + Version int `json:"version"` +} + +// Check if this is a V0 format - the magic bytes at the start of the file +// should be "tfstate" if so. We no longer support upgrading this type of +// state but return an error message explaining to a user how they can +// upgrade via the 0.6.x series. +func testForV0State(buf *bufio.Reader) error { + start, err := buf.Peek(len("tfstate")) + if err != nil { + return fmt.Errorf("Failed to check for magic bytes: %w", err) + } + if string(start) == "tfstate" { + return fmt.Errorf("OpenTofu 0.7 no longer supports upgrading the binary state\n" + + "format which was used prior to OpenTofu 0.3. Please upgrade\n" + + "this state file using OpenTofu 0.6.16 prior to using it with\n" + + "OpenTofu 0.7.") + } + + return nil +} + +// ErrNoState is returned by ReadState when the io.Reader contains no data +var ErrNoState = errors.New("no state") + +// ReadState reads a state structure out of a reader in the format that +// was written by WriteState. +func ReadState(src io.Reader) (*State, error) { + // check for a nil file specifically, since that produces a platform + // specific error if we try to use it in a bufio.Reader. + if f, ok := src.(*os.File); ok && f == nil { + return nil, ErrNoState + } + + buf := bufio.NewReader(src) + + if _, err := buf.Peek(1); err != nil { + if err == io.EOF { + return nil, ErrNoState + } + return nil, err + } + + if err := testForV0State(buf); err != nil { + return nil, err + } + + // If we are JSON we buffer the whole thing in memory so we can read it twice. + // This is suboptimal, but will work for now. + jsonBytes, err := io.ReadAll(buf) + if err != nil { + return nil, fmt.Errorf("Reading state file failed: %w", err) + } + + versionIdentifier := &jsonStateVersionIdentifier{} + if err := json.Unmarshal(jsonBytes, versionIdentifier); err != nil { + return nil, fmt.Errorf("Decoding state file version failed: %w", err) + } + + var result *State + switch versionIdentifier.Version { + case 0: + return nil, fmt.Errorf("State version 0 is not supported as JSON.") + case 1: + v1State, err := ReadStateV1(jsonBytes) + if err != nil { + return nil, err + } + + v2State, err := upgradeStateV1ToV2(v1State) + if err != nil { + return nil, err + } + + v3State, err := upgradeStateV2ToV3(v2State) + if err != nil { + return nil, err + } + + // increment the Serial whenever we upgrade state + v3State.Serial++ + result = v3State + case 2: + v2State, err := ReadStateV2(jsonBytes) + if err != nil { + return nil, err + } + v3State, err := upgradeStateV2ToV3(v2State) + if err != nil { + return nil, err + } + + v3State.Serial++ + result = v3State + case 3: + v3State, err := ReadStateV3(jsonBytes) + if err != nil { + return nil, err + } + + result = v3State + default: + return nil, fmt.Errorf("OpenTofu %s does not support state version %d, please update.", + tfversion.SemVer.String(), versionIdentifier.Version) + } + + // If we reached this place we must have a result set + if result == nil { + panic("resulting state in load not set, assertion failed") + } + + // Prune the state when read it. Its possible to write unpruned states or + // for a user to make a state unpruned (nil-ing a module state for example). + result.prune() + + // Validate the state file is valid + if err := result.Validate(); err != nil { + return nil, err + } + + return result, nil +} + +func ReadStateV1(jsonBytes []byte) (*stateV1, error) { + v1State := &stateV1{} + if err := json.Unmarshal(jsonBytes, v1State); err != nil { + return nil, fmt.Errorf("Decoding state file failed: %w", err) + } + + if v1State.Version != 1 { + return nil, fmt.Errorf("Decoded state version did not match the decoder selection: "+ + "read %d, expected 1", v1State.Version) + } + + return v1State, nil +} + +func ReadStateV2(jsonBytes []byte) (*State, error) { + state := &State{} + if err := json.Unmarshal(jsonBytes, state); err != nil { + return nil, fmt.Errorf("Decoding state file failed: %w", err) + } + + // Check the version, this to ensure we don't read a future + // version that we don't understand + if state.Version > StateVersion { + return nil, fmt.Errorf("OpenTofu %s does not support state version %d, please update.", + tfversion.SemVer.String(), state.Version) + } + + // Make sure the version is semantic + if state.TFVersion != "" { + if _, err := version.NewVersion(state.TFVersion); err != nil { + return nil, fmt.Errorf( + "State contains invalid version: %s\n\n"+ + "OpenTofu validates the version format prior to writing it. This\n"+ + "means that this is invalid of the state becoming corrupted through\n"+ + "some external means. Please manually modify the OpenTofu version\n"+ + "field to be a proper semantic version.", + state.TFVersion) + } + } + + // catch any unitialized fields in the state + state.init() + + // Sort it + state.sort() + + return state, nil +} + +func ReadStateV3(jsonBytes []byte) (*State, error) { + state := &State{} + if err := json.Unmarshal(jsonBytes, state); err != nil { + return nil, fmt.Errorf("Decoding state file failed: %w", err) + } + + // Check the version, this to ensure we don't read a future + // version that we don't understand + if state.Version > StateVersion { + return nil, fmt.Errorf("OpenTofu %s does not support state version %d, please update.", + tfversion.SemVer.String(), state.Version) + } + + // Make sure the version is semantic + if state.TFVersion != "" { + if _, err := version.NewVersion(state.TFVersion); err != nil { + return nil, fmt.Errorf( + "State contains invalid version: %s\n\n"+ + "OpenTofu validates the version format prior to writing it. This\n"+ + "means that this is invalid of the state becoming corrupted through\n"+ + "some external means. Please manually modify the OpenTofu version\n"+ + "field to be a proper semantic version.", + state.TFVersion) + } + } + + // catch any unitialized fields in the state + state.init() + + // Sort it + state.sort() + + // Now we write the state back out to detect any changes in normaliztion. + // If our state is now written out differently, bump the serial number to + // prevent conflicts. + var buf bytes.Buffer + err := WriteState(state, &buf) + if err != nil { + return nil, err + } + + if !bytes.Equal(jsonBytes, buf.Bytes()) { + log.Println("[INFO] state modified during read or write. incrementing serial number") + state.Serial++ + } + + return state, nil +} + +// WriteState writes a state somewhere in a binary format. +func WriteState(d *State, dst io.Writer) error { + // writing a nil state is a noop. + if d == nil { + return nil + } + + // make sure we have no uninitialized fields + d.init() + + // Make sure it is sorted + d.sort() + + // Ensure the version is set + d.Version = StateVersion + + // If the TFVersion is set, verify it. We used to just set the version + // here, but this isn't safe since it changes the MD5 sum on some remote + // state storage backends such as Atlas. We now leave it be if needed. + if d.TFVersion != "" { + if _, err := version.NewVersion(d.TFVersion); err != nil { + return fmt.Errorf( + "Error writing state, invalid version: %s\n\n"+ + "The OpenTofu version when writing the state must be a semantic\n"+ + "version.", + d.TFVersion) + } + } + + // Encode the data in a human-friendly way + data, err := json.MarshalIndent(d, "", " ") + if err != nil { + return fmt.Errorf("Failed to encode state: %w", err) + } + + // We append a newline to the data because MarshalIndent doesn't + data = append(data, '\n') + + // Write the data out to the dst + if _, err := io.Copy(dst, bytes.NewReader(data)); err != nil { + return fmt.Errorf("Failed to write state: %w", err) + } + + return nil +} + +// resourceNameSort implements the sort.Interface to sort name parts lexically for +// strings and numerically for integer indexes. +type resourceNameSort []string + +func (r resourceNameSort) Len() int { return len(r) } +func (r resourceNameSort) Swap(i, j int) { r[i], r[j] = r[j], r[i] } + +func (r resourceNameSort) Less(i, j int) bool { + iParts := strings.Split(r[i], ".") + jParts := strings.Split(r[j], ".") + + end := len(iParts) + if len(jParts) < end { + end = len(jParts) + } + + for idx := 0; idx < end; idx++ { + if iParts[idx] == jParts[idx] { + continue + } + + // sort on the first non-matching part + iInt, iIntErr := strconv.Atoi(iParts[idx]) + jInt, jIntErr := strconv.Atoi(jParts[idx]) + + switch { + case iIntErr == nil && jIntErr == nil: + // sort numerically if both parts are integers + return iInt < jInt + case iIntErr == nil: + // numbers sort before strings + return true + case jIntErr == nil: + return false + default: + return iParts[idx] < jParts[idx] + } + } + + return r[i] < r[j] +} + +// moduleStateSort implements sort.Interface to sort module states +type moduleStateSort []*ModuleState + +func (s moduleStateSort) Len() int { + return len(s) +} + +func (s moduleStateSort) Less(i, j int) bool { + a := s[i] + b := s[j] + + // If either is nil, then the nil one is "less" than + if a == nil || b == nil { + return a == nil + } + + // If the lengths are different, then the shorter one always wins + if len(a.Path) != len(b.Path) { + return len(a.Path) < len(b.Path) + } + + // Otherwise, compare lexically + return strings.Join(a.Path, ".") < strings.Join(b.Path, ".") +} + +func (s moduleStateSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +const stateValidateErrMultiModule = ` +Multiple modules with the same path: %s + +This means that there are multiple entries in the "modules" field +in your state file that point to the same module. This will cause OpenTofu +to behave in unexpected and error prone ways and is invalid. Please back up +and modify your state file manually to resolve this. +` diff --git a/pkg/legacy/tofu/state_filter.go b/pkg/legacy/tofu/state_filter.go new file mode 100644 index 00000000000..72afe2e932b --- /dev/null +++ b/pkg/legacy/tofu/state_filter.go @@ -0,0 +1,272 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "sort" +) + +// StateFilter is responsible for filtering and searching a state. +// +// This is a separate struct from State rather than a method on State +// because StateFilter might create sidecar data structures to optimize +// filtering on the state. +// +// If you change the State, the filter created is invalid and either +// Reset should be called or a new one should be allocated. StateFilter +// will not watch State for changes and do this for you. If you filter after +// changing the State without calling Reset, the behavior is not defined. +type StateFilter struct { + State *State +} + +// Filter takes the addresses specified by fs and finds all the matches. +// The values of fs are resource addressing syntax that can be parsed by +// ParseResourceAddress. +func (f *StateFilter) Filter(fs ...string) ([]*StateFilterResult, error) { + // Parse all the addresses + as := make([]*ResourceAddress, len(fs)) + for i, v := range fs { + a, err := ParseResourceAddress(v) + if err != nil { + return nil, fmt.Errorf("Error parsing address '%s': %w", v, err) + } + + as[i] = a + } + + // If we weren't given any filters, then we list all + if len(fs) == 0 { + as = append(as, &ResourceAddress{Index: -1}) + } + + // Filter each of the address. We keep track of this in a map to + // strip duplicates. + resultSet := make(map[string]*StateFilterResult) + for _, a := range as { + for _, r := range f.filterSingle(a) { + resultSet[r.String()] = r + } + } + + // Make the result list + results := make([]*StateFilterResult, 0, len(resultSet)) + for _, v := range resultSet { + results = append(results, v) + } + + // Sort them and return + sort.Sort(StateFilterResultSlice(results)) + return results, nil +} + +func (f *StateFilter) filterSingle(a *ResourceAddress) []*StateFilterResult { + // The slice to keep track of results + var results []*StateFilterResult + + // Go through modules first. + modules := make([]*ModuleState, 0, len(f.State.Modules)) + for _, m := range f.State.Modules { + if f.relevant(a, m) { + modules = append(modules, m) + + // Only add the module to the results if we haven't specified a type. + // We also ignore the root module. + if a.Type == "" && len(m.Path) > 1 { + results = append(results, &StateFilterResult{ + Path: m.Path[1:], + Address: (&ResourceAddress{Path: m.Path[1:]}).String(), + Value: m, + }) + } + } + } + + // With the modules set, go through all the resources within + // the modules to find relevant resources. + for _, m := range modules { + for n, r := range m.Resources { + // The name in the state contains valuable information. Parse. + key, err := ParseResourceStateKey(n) + if err != nil { + // If we get an error parsing, then just ignore it + // out of the state. + continue + } + + // Older states and test fixtures often don't contain the + // type directly on the ResourceState. We add this so StateFilter + // is a bit more robust. + if r.Type == "" { + r.Type = key.Type + } + + if f.relevant(a, r) { + if a.Name != "" && a.Name != key.Name { + // Name doesn't match + continue + } + + if a.Index >= 0 && key.Index != a.Index { + // Index doesn't match + continue + } + + if a.Name != "" && a.Name != key.Name { + continue + } + + // Build the address for this resource + addr := &ResourceAddress{ + Path: m.Path[1:], + Name: key.Name, + Type: key.Type, + Index: key.Index, + } + + // Add the resource level result + resourceResult := &StateFilterResult{ + Path: addr.Path, + Address: addr.String(), + Value: r, + } + if !a.InstanceTypeSet { + results = append(results, resourceResult) + } + + // Add the instances + if r.Primary != nil { + addr.InstanceType = TypePrimary + addr.InstanceTypeSet = false + results = append(results, &StateFilterResult{ + Path: addr.Path, + Address: addr.String(), + Parent: resourceResult, + Value: r.Primary, + }) + } + + for _, instance := range r.Deposed { + if f.relevant(a, instance) { + addr.InstanceType = TypeDeposed + addr.InstanceTypeSet = true + results = append(results, &StateFilterResult{ + Path: addr.Path, + Address: addr.String(), + Parent: resourceResult, + Value: instance, + }) + } + } + } + } + } + + return results +} + +// relevant checks for relevance of this address against the given value. +func (f *StateFilter) relevant(addr *ResourceAddress, raw interface{}) bool { + switch v := raw.(type) { + case *ModuleState: + path := v.Path[1:] + + if len(addr.Path) > len(path) { + // Longer path in address means there is no way we match. + return false + } + + // Check for a prefix match + for i, p := range addr.Path { + if path[i] != p { + // Any mismatches don't match. + return false + } + } + + return true + case *ResourceState: + if addr.Type == "" { + // If we have no resource type, then we're interested in all! + return true + } + + // If the type doesn't match we fail immediately + if v.Type != addr.Type { + return false + } + + return true + default: + // If we don't know about it, let's just say no + return false + } +} + +// StateFilterResult is a single result from a filter operation. Filter +// can match multiple things within a state (module, resource, instance, etc.) +// and this unifies that. +type StateFilterResult struct { + // Module path of the result + Path []string + + // Address is the address that can be used to reference this exact result. + Address string + + // Parent, if non-nil, is a parent of this result. For instances, the + // parent would be a resource. For resources, the parent would be + // a module. For modules, this is currently nil. + Parent *StateFilterResult + + // Value is the actual value. This must be type switched on. It can be + // any data structures that `State` can hold: `ModuleState`, + // `ResourceState`, `InstanceState`. + Value interface{} +} + +func (r *StateFilterResult) String() string { + return fmt.Sprintf("%T: %s", r.Value, r.Address) +} + +func (r *StateFilterResult) sortedType() int { + switch r.Value.(type) { + case *ModuleState: + return 0 + case *ResourceState: + return 1 + case *InstanceState: + return 2 + default: + return 50 + } +} + +// StateFilterResultSlice is a slice of results that implements +// sort.Interface. The sorting goal is what is most appealing to +// human output. +type StateFilterResultSlice []*StateFilterResult + +func (s StateFilterResultSlice) Len() int { return len(s) } +func (s StateFilterResultSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s StateFilterResultSlice) Less(i, j int) bool { + a, b := s[i], s[j] + + // if these address contain an index, we want to sort by index rather than name + addrA, errA := ParseResourceAddress(a.Address) + addrB, errB := ParseResourceAddress(b.Address) + if errA == nil && errB == nil && addrA.Name == addrB.Name && addrA.Index != addrB.Index { + return addrA.Index < addrB.Index + } + + // If the addresses are different it is just lexographic sorting + if a.Address != b.Address { + return a.Address < b.Address + } + + // Addresses are the same, which means it matters on the type + return a.sortedType() < b.sortedType() +} diff --git a/pkg/legacy/tofu/state_test.go b/pkg/legacy/tofu/state_test.go new file mode 100644 index 00000000000..dd84a588377 --- /dev/null +++ b/pkg/legacy/tofu/state_test.go @@ -0,0 +1,1899 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "reflect" + "sort" + "strings" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs/hcl2shim" +) + +func TestStateValidate(t *testing.T) { + cases := map[string]struct { + In *State + Err bool + }{ + "empty state": { + &State{}, + false, + }, + + "multiple modules": { + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: []string{"root", "foo"}, + }, + &ModuleState{ + Path: []string{"root", "foo"}, + }, + }, + }, + true, + }, + } + + for name, tc := range cases { + // Init the state + tc.In.init() + + err := tc.In.Validate() + if (err != nil) != tc.Err { + t.Fatalf("%s: err: %s", name, err) + } + } +} + +func TestStateAddModule(t *testing.T) { + cases := []struct { + In []addrs.ModuleInstance + Out [][]string + }{ + { + []addrs.ModuleInstance{ + addrs.RootModuleInstance, + addrs.RootModuleInstance.Child("child", addrs.NoKey), + }, + [][]string{ + []string{"root"}, + []string{"root", "child"}, + }, + }, + + { + []addrs.ModuleInstance{ + addrs.RootModuleInstance.Child("foo", addrs.NoKey).Child("bar", addrs.NoKey), + addrs.RootModuleInstance.Child("foo", addrs.NoKey), + addrs.RootModuleInstance, + addrs.RootModuleInstance.Child("bar", addrs.NoKey), + }, + [][]string{ + []string{"root"}, + []string{"root", "bar"}, + []string{"root", "foo"}, + []string{"root", "foo", "bar"}, + }, + }, + // Same last element, different middle element + { + []addrs.ModuleInstance{ + addrs.RootModuleInstance.Child("foo", addrs.NoKey).Child("bar", addrs.NoKey), // This one should sort after... + addrs.RootModuleInstance.Child("foo", addrs.NoKey), + addrs.RootModuleInstance, + addrs.RootModuleInstance.Child("bar", addrs.NoKey).Child("bar", addrs.NoKey), // ...this one. + addrs.RootModuleInstance.Child("bar", addrs.NoKey), + }, + [][]string{ + []string{"root"}, + []string{"root", "bar"}, + []string{"root", "foo"}, + []string{"root", "bar", "bar"}, + []string{"root", "foo", "bar"}, + }, + }, + } + + for _, tc := range cases { + s := new(State) + for _, p := range tc.In { + s.AddModule(p) + } + + actual := make([][]string, 0, len(tc.In)) + for _, m := range s.Modules { + actual = append(actual, m.Path) + } + + if !reflect.DeepEqual(actual, tc.Out) { + t.Fatalf("wrong result\ninput: %sgot: %#v\nwant: %#v", spew.Sdump(tc.In), actual, tc.Out) + } + } +} + +func TestStateOutputTypeRoundTrip(t *testing.T) { + state := &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: []string{"root"}, + Outputs: map[string]*OutputState{ + "string_output": &OutputState{ + Value: "String Value", + Type: "string", + }, + }, + }, + }, + } + state.init() + + buf := new(bytes.Buffer) + if err := WriteState(state, buf); err != nil { + t.Fatalf("err: %s", err) + } + + roundTripped, err := ReadState(buf) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(state, roundTripped) { + t.Logf("expected:\n%#v", state) + t.Fatalf("got:\n%#v", roundTripped) + } +} + +func TestStateDeepCopy(t *testing.T) { + cases := []struct { + State *State + }{ + // Nil + {nil}, + + // Version + { + &State{Version: 5}, + }, + // TFVersion + { + &State{TFVersion: "5"}, + }, + // Modules + { + &State{ + Version: 6, + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Primary: &InstanceState{ + Meta: map[string]interface{}{}, + }, + }, + }, + }, + }, + }, + }, + // Deposed + // The nil values shouldn't be there if the State was properly init'ed, + // but the Copy should still work anyway. + { + &State{ + Version: 6, + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Primary: &InstanceState{ + Meta: map[string]interface{}{}, + }, + Deposed: []*InstanceState{ + {ID: "test"}, + nil, + }, + }, + }, + }, + }, + }, + }, + } + + for i, tc := range cases { + t.Run(fmt.Sprintf("copy-%d", i), func(t *testing.T) { + actual := tc.State.DeepCopy() + expected := tc.State + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("Expected: %#v\nRecevied: %#v\n", expected, actual) + } + }) + } +} + +func TestStateEqual(t *testing.T) { + cases := []struct { + Name string + Result bool + One, Two *State + }{ + // Nils + { + "one nil", + false, + nil, + &State{Version: 2}, + }, + + { + "both nil", + true, + nil, + nil, + }, + + // Different versions + { + "different state versions", + false, + &State{Version: 5}, + &State{Version: 2}, + }, + + // Different modules + { + "different module states", + false, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: []string{"root"}, + }, + }, + }, + &State{}, + }, + + { + "same module states", + true, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: []string{"root"}, + }, + }, + }, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: []string{"root"}, + }, + }, + }, + }, + + // Meta differs + { + "differing meta values with primitives", + false, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Primary: &InstanceState{ + Meta: map[string]interface{}{ + "schema_version": "1", + }, + }, + }, + }, + }, + }, + }, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Primary: &InstanceState{ + Meta: map[string]interface{}{ + "schema_version": "2", + }, + }, + }, + }, + }, + }, + }, + }, + + // Meta with complex types + { + "same meta with complex types", + true, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Primary: &InstanceState{ + Meta: map[string]interface{}{ + "timeouts": map[string]interface{}{ + "create": 42, + "read": "27", + }, + }, + }, + }, + }, + }, + }, + }, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Primary: &InstanceState{ + Meta: map[string]interface{}{ + "timeouts": map[string]interface{}{ + "create": 42, + "read": "27", + }, + }, + }, + }, + }, + }, + }, + }, + }, + + // Meta with complex types that have been altered during serialization + { + "same meta with complex types that have been json-ified", + true, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Primary: &InstanceState{ + Meta: map[string]interface{}{ + "timeouts": map[string]interface{}{ + "create": int(42), + "read": "27", + }, + }, + }, + }, + }, + }, + }, + }, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Primary: &InstanceState{ + Meta: map[string]interface{}{ + "timeouts": map[string]interface{}{ + "create": float64(42), + "read": "27", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + for i, tc := range cases { + t.Run(fmt.Sprintf("%d-%s", i, tc.Name), func(t *testing.T) { + if tc.One.Equal(tc.Two) != tc.Result { + t.Fatalf("Bad: %d\n\n%s\n\n%s", i, tc.One.String(), tc.Two.String()) + } + if tc.Two.Equal(tc.One) != tc.Result { + t.Fatalf("Bad: %d\n\n%s\n\n%s", i, tc.One.String(), tc.Two.String()) + } + }) + } +} + +func TestStateCompareAges(t *testing.T) { + cases := []struct { + Result StateAgeComparison + Err bool + One, Two *State + }{ + { + StateAgeEqual, false, + &State{ + Lineage: "1", + Serial: 2, + }, + &State{ + Lineage: "1", + Serial: 2, + }, + }, + { + StateAgeReceiverOlder, false, + &State{ + Lineage: "1", + Serial: 2, + }, + &State{ + Lineage: "1", + Serial: 3, + }, + }, + { + StateAgeReceiverNewer, false, + &State{ + Lineage: "1", + Serial: 3, + }, + &State{ + Lineage: "1", + Serial: 2, + }, + }, + { + StateAgeEqual, true, + &State{ + Lineage: "1", + Serial: 2, + }, + &State{ + Lineage: "2", + Serial: 2, + }, + }, + { + StateAgeEqual, true, + &State{ + Lineage: "1", + Serial: 3, + }, + &State{ + Lineage: "2", + Serial: 2, + }, + }, + } + + for i, tc := range cases { + result, err := tc.One.CompareAges(tc.Two) + + if err != nil && !tc.Err { + t.Errorf( + "%d: got error, but want success\n\n%s\n\n%s", + i, tc.One, tc.Two, + ) + continue + } + + if err == nil && tc.Err { + t.Errorf( + "%d: got success, but want error\n\n%s\n\n%s", + i, tc.One, tc.Two, + ) + continue + } + + if result != tc.Result { + t.Errorf( + "%d: got result %d, but want %d\n\n%s\n\n%s", + i, result, tc.Result, tc.One, tc.Two, + ) + continue + } + } +} + +func TestStateSameLineage(t *testing.T) { + cases := []struct { + Result bool + One, Two *State + }{ + { + true, + &State{ + Lineage: "1", + }, + &State{ + Lineage: "1", + }, + }, + { + // Empty lineage is compatible with all + true, + &State{ + Lineage: "", + }, + &State{ + Lineage: "1", + }, + }, + { + // Empty lineage is compatible with all + true, + &State{ + Lineage: "1", + }, + &State{ + Lineage: "", + }, + }, + { + false, + &State{ + Lineage: "1", + }, + &State{ + Lineage: "2", + }, + }, + } + + for i, tc := range cases { + result := tc.One.SameLineage(tc.Two) + + if result != tc.Result { + t.Errorf( + "%d: got %v, but want %v\n\n%s\n\n%s", + i, result, tc.Result, tc.One, tc.Two, + ) + continue + } + } +} + +func TestStateMarshalEqual(t *testing.T) { + tests := map[string]struct { + S1, S2 *State + Want bool + }{ + "both nil": { + nil, + nil, + true, + }, + "first zero, second nil": { + &State{}, + nil, + false, + }, + "first nil, second zero": { + nil, + &State{}, + false, + }, + "both zero": { + // These are not equal because they both implicitly init with + // different lineage. + &State{}, + &State{}, + false, + }, + "both set, same lineage": { + &State{ + Lineage: "abc123", + }, + &State{ + Lineage: "abc123", + }, + true, + }, + "both set, same lineage, different serial": { + &State{ + Lineage: "abc123", + Serial: 1, + }, + &State{ + Lineage: "abc123", + Serial: 2, + }, + false, + }, + "both set, same lineage, same serial, same resources": { + &State{ + Lineage: "abc123", + Serial: 1, + Modules: []*ModuleState{ + { + Path: []string{"root"}, + Resources: map[string]*ResourceState{ + "foo_bar.baz": {}, + }, + }, + }, + }, + &State{ + Lineage: "abc123", + Serial: 1, + Modules: []*ModuleState{ + { + Path: []string{"root"}, + Resources: map[string]*ResourceState{ + "foo_bar.baz": {}, + }, + }, + }, + }, + true, + }, + "both set, same lineage, same serial, different resources": { + &State{ + Lineage: "abc123", + Serial: 1, + Modules: []*ModuleState{ + { + Path: []string{"root"}, + Resources: map[string]*ResourceState{ + "foo_bar.baz": {}, + }, + }, + }, + }, + &State{ + Lineage: "abc123", + Serial: 1, + Modules: []*ModuleState{ + { + Path: []string{"root"}, + Resources: map[string]*ResourceState{ + "pizza_crust.tasty": {}, + }, + }, + }, + }, + false, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + got := test.S1.MarshalEqual(test.S2) + if got != test.Want { + t.Errorf("wrong result %#v; want %#v", got, test.Want) + s1Buf := &bytes.Buffer{} + s2Buf := &bytes.Buffer{} + _ = WriteState(test.S1, s1Buf) + _ = WriteState(test.S2, s2Buf) + t.Logf("\nState 1: %s\nState 2: %s", s1Buf.Bytes(), s2Buf.Bytes()) + } + }) + } +} + +func TestStateRemove(t *testing.T) { + cases := map[string]struct { + Address string + One, Two *State + }{ + "simple resource": { + "test_instance.foo", + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + + "test_instance.bar": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + }, + }, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.bar": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + }, + }, + }, + + "single instance": { + "test_instance.foo.primary", + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + }, + }, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{}, + }, + }, + }, + }, + + "single instance in multi-count": { + "test_instance.foo[0]", + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo.0": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + + "test_instance.foo.1": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + }, + }, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo.1": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + }, + }, + }, + + "single resource, multi-count": { + "test_instance.foo", + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo.0": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + + "test_instance.foo.1": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + }, + }, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{}, + }, + }, + }, + }, + + "full module": { + "module.foo", + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + + &ModuleState{ + Path: []string{"root", "foo"}, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + + "test_instance.bar": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + }, + }, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + }, + }, + }, + + "module and children": { + "module.foo", + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + + &ModuleState{ + Path: []string{"root", "foo"}, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + + "test_instance.bar": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + + &ModuleState{ + Path: []string{"root", "foo", "bar"}, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + + "test_instance.bar": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + }, + }, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + }, + }, + }, + } + + for k, tc := range cases { + if err := tc.One.Remove(tc.Address); err != nil { + t.Fatalf("bad: %s\n\n%s", k, err) + } + + if !tc.One.Equal(tc.Two) { + t.Fatalf("Bad: %s\n\n%s\n\n%s", k, tc.One.String(), tc.Two.String()) + } + } +} + +func TestResourceStateEqual(t *testing.T) { + cases := []struct { + Result bool + One, Two *ResourceState + }{ + // Different types + { + false, + &ResourceState{Type: "foo"}, + &ResourceState{Type: "bar"}, + }, + + // Different dependencies + { + false, + &ResourceState{Dependencies: []string{"foo"}}, + &ResourceState{Dependencies: []string{"bar"}}, + }, + + { + false, + &ResourceState{Dependencies: []string{"foo", "bar"}}, + &ResourceState{Dependencies: []string{"foo"}}, + }, + + { + true, + &ResourceState{Dependencies: []string{"bar", "foo"}}, + &ResourceState{Dependencies: []string{"foo", "bar"}}, + }, + + // Different primaries + { + false, + &ResourceState{Primary: nil}, + &ResourceState{Primary: &InstanceState{ID: "foo"}}, + }, + + { + true, + &ResourceState{Primary: &InstanceState{ID: "foo"}}, + &ResourceState{Primary: &InstanceState{ID: "foo"}}, + }, + + // Different tainted + { + false, + &ResourceState{ + Primary: &InstanceState{ + ID: "foo", + }, + }, + &ResourceState{ + Primary: &InstanceState{ + ID: "foo", + Tainted: true, + }, + }, + }, + + { + true, + &ResourceState{ + Primary: &InstanceState{ + ID: "foo", + Tainted: true, + }, + }, + &ResourceState{ + Primary: &InstanceState{ + ID: "foo", + Tainted: true, + }, + }, + }, + } + + for i, tc := range cases { + if tc.One.Equal(tc.Two) != tc.Result { + t.Fatalf("Bad: %d\n\n%s\n\n%s", i, tc.One.String(), tc.Two.String()) + } + if tc.Two.Equal(tc.One) != tc.Result { + t.Fatalf("Bad: %d\n\n%s\n\n%s", i, tc.One.String(), tc.Two.String()) + } + } +} + +func TestResourceStateTaint(t *testing.T) { + cases := map[string]struct { + Input *ResourceState + Output *ResourceState + }{ + "no primary": { + &ResourceState{}, + &ResourceState{}, + }, + + "primary, not tainted": { + &ResourceState{ + Primary: &InstanceState{ID: "foo"}, + }, + &ResourceState{ + Primary: &InstanceState{ + ID: "foo", + Tainted: true, + }, + }, + }, + + "primary, tainted": { + &ResourceState{ + Primary: &InstanceState{ + ID: "foo", + Tainted: true, + }, + }, + &ResourceState{ + Primary: &InstanceState{ + ID: "foo", + Tainted: true, + }, + }, + }, + } + + for k, tc := range cases { + tc.Input.Taint() + if !reflect.DeepEqual(tc.Input, tc.Output) { + t.Fatalf( + "Failure: %s\n\nExpected: %#v\n\nGot: %#v", + k, tc.Output, tc.Input) + } + } +} + +func TestResourceStateUntaint(t *testing.T) { + cases := map[string]struct { + Input *ResourceState + ExpectedOutput *ResourceState + }{ + "no primary, err": { + Input: &ResourceState{}, + ExpectedOutput: &ResourceState{}, + }, + + "primary, not tainted": { + Input: &ResourceState{ + Primary: &InstanceState{ID: "foo"}, + }, + ExpectedOutput: &ResourceState{ + Primary: &InstanceState{ID: "foo"}, + }, + }, + "primary, tainted": { + Input: &ResourceState{ + Primary: &InstanceState{ + ID: "foo", + Tainted: true, + }, + }, + ExpectedOutput: &ResourceState{ + Primary: &InstanceState{ID: "foo"}, + }, + }, + } + + for k, tc := range cases { + tc.Input.Untaint() + if !reflect.DeepEqual(tc.Input, tc.ExpectedOutput) { + t.Fatalf( + "Failure: %s\n\nExpected: %#v\n\nGot: %#v", + k, tc.ExpectedOutput, tc.Input) + } + } +} + +func TestInstanceStateEmpty(t *testing.T) { + cases := map[string]struct { + In *InstanceState + Result bool + }{ + "nil is empty": { + nil, + true, + }, + "non-nil but without ID is empty": { + &InstanceState{}, + true, + }, + "with ID is not empty": { + &InstanceState{ + ID: "i-abc123", + }, + false, + }, + } + + for tn, tc := range cases { + if tc.In.Empty() != tc.Result { + t.Fatalf("%q expected %#v to be empty: %#v", tn, tc.In, tc.Result) + } + } +} + +func TestInstanceStateEqual(t *testing.T) { + cases := []struct { + Result bool + One, Two *InstanceState + }{ + // Nils + { + false, + nil, + &InstanceState{}, + }, + + { + false, + &InstanceState{}, + nil, + }, + + // Different IDs + { + false, + &InstanceState{ID: "foo"}, + &InstanceState{ID: "bar"}, + }, + + // Different Attributes + { + false, + &InstanceState{Attributes: map[string]string{"foo": "bar"}}, + &InstanceState{Attributes: map[string]string{"foo": "baz"}}, + }, + + // Different Attribute keys + { + false, + &InstanceState{Attributes: map[string]string{"foo": "bar"}}, + &InstanceState{Attributes: map[string]string{"bar": "baz"}}, + }, + + { + false, + &InstanceState{Attributes: map[string]string{"bar": "baz"}}, + &InstanceState{Attributes: map[string]string{"foo": "bar"}}, + }, + } + + for i, tc := range cases { + if tc.One.Equal(tc.Two) != tc.Result { + t.Fatalf("Bad: %d\n\n%s\n\n%s", i, tc.One.String(), tc.Two.String()) + } + } +} + +func TestStateEmpty(t *testing.T) { + cases := []struct { + In *State + Result bool + }{ + { + nil, + true, + }, + { + &State{}, + true, + }, + { + &State{ + Remote: &RemoteState{Type: "foo"}, + }, + true, + }, + { + &State{ + Modules: []*ModuleState{ + &ModuleState{}, + }, + }, + false, + }, + } + + for i, tc := range cases { + if tc.In.Empty() != tc.Result { + t.Fatalf("bad %d %#v:\n\n%#v", i, tc.Result, tc.In) + } + } +} + +func TestStateHasResources(t *testing.T) { + cases := []struct { + In *State + Result bool + }{ + { + nil, + false, + }, + { + &State{}, + false, + }, + { + &State{ + Remote: &RemoteState{Type: "foo"}, + }, + false, + }, + { + &State{ + Modules: []*ModuleState{ + &ModuleState{}, + }, + }, + false, + }, + { + &State{ + Modules: []*ModuleState{ + &ModuleState{}, + &ModuleState{}, + }, + }, + false, + }, + { + &State{ + Modules: []*ModuleState{ + &ModuleState{}, + &ModuleState{ + Resources: map[string]*ResourceState{ + "foo.foo": &ResourceState{}, + }, + }, + }, + }, + true, + }, + } + + for i, tc := range cases { + if tc.In.HasResources() != tc.Result { + t.Fatalf("bad %d %#v:\n\n%#v", i, tc.Result, tc.In) + } + } +} + +func TestStateFromFutureTofu(t *testing.T) { + cases := []struct { + In string + Result bool + }{ + { + "", + false, + }, + { + "0.1", + false, + }, + { + "999.15.1", + true, + }, + } + + for _, tc := range cases { + state := &State{TFVersion: tc.In} + actual := state.FromFutureTofu() + if actual != tc.Result { + t.Fatalf("%s: bad: %v", tc.In, actual) + } + } +} + +func TestStateIsRemote(t *testing.T) { + cases := []struct { + In *State + Result bool + }{ + { + nil, + false, + }, + { + &State{}, + false, + }, + { + &State{ + Remote: &RemoteState{Type: "foo"}, + }, + true, + }, + } + + for i, tc := range cases { + if tc.In.IsRemote() != tc.Result { + t.Fatalf("bad %d %#v:\n\n%#v", i, tc.Result, tc.In) + } + } +} + +func TestInstanceState_MergeDiff(t *testing.T) { + is := InstanceState{ + ID: "foo", + Attributes: map[string]string{ + "foo": "bar", + "port": "8000", + }, + } + + diff := &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "bar", + New: "baz", + }, + "bar": &ResourceAttrDiff{ + Old: "", + New: "foo", + }, + "baz": &ResourceAttrDiff{ + Old: "", + New: "foo", + NewComputed: true, + }, + "port": &ResourceAttrDiff{ + NewRemoved: true, + }, + }, + } + + is2 := is.MergeDiff(diff) + + expected := map[string]string{ + "foo": "baz", + "bar": "foo", + "baz": hcl2shim.UnknownVariableValue, + } + + if !reflect.DeepEqual(expected, is2.Attributes) { + t.Fatalf("bad: %#v", is2.Attributes) + } +} + +// GH-12183. This tests that a list with a computed set generates the +// right partial state. This never failed but is put here for completion +// of the test case for GH-12183. +func TestInstanceState_MergeDiff_computedSet(t *testing.T) { + is := InstanceState{} + + diff := &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "config.#": &ResourceAttrDiff{ + Old: "0", + New: "1", + RequiresNew: true, + }, + + "config.0.name": &ResourceAttrDiff{ + Old: "", + New: "hello", + }, + + "config.0.rules.#": &ResourceAttrDiff{ + Old: "", + NewComputed: true, + }, + }, + } + + is2 := is.MergeDiff(diff) + + expected := map[string]string{ + "config.#": "1", + "config.0.name": "hello", + "config.0.rules.#": hcl2shim.UnknownVariableValue, + } + + if !reflect.DeepEqual(expected, is2.Attributes) { + t.Fatalf("bad: %#v", is2.Attributes) + } +} + +func TestInstanceState_MergeDiff_nil(t *testing.T) { + var is *InstanceState + + diff := &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "", + New: "baz", + }, + }, + } + + is2 := is.MergeDiff(diff) + + expected := map[string]string{ + "foo": "baz", + } + + if !reflect.DeepEqual(expected, is2.Attributes) { + t.Fatalf("bad: %#v", is2.Attributes) + } +} + +func TestInstanceState_MergeDiff_nilDiff(t *testing.T) { + is := InstanceState{ + ID: "foo", + Attributes: map[string]string{ + "foo": "bar", + }, + } + + is2 := is.MergeDiff(nil) + + expected := map[string]string{ + "foo": "bar", + } + + if !reflect.DeepEqual(expected, is2.Attributes) { + t.Fatalf("bad: %#v", is2.Attributes) + } +} + +func TestReadWriteState(t *testing.T) { + state := &State{ + Serial: 9, + Lineage: "5d1ad1a1-4027-4665-a908-dbe6adff11d8", + Remote: &RemoteState{ + Type: "http", + Config: map[string]string{ + "url": "http://my-cool-server.com/", + }, + }, + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Dependencies: []string{ + "aws_instance.bar", + }, + Resources: map[string]*ResourceState{ + "foo": &ResourceState{ + Primary: &InstanceState{ + ID: "bar", + Ephemeral: EphemeralState{ + ConnInfo: map[string]string{ + "type": "ssh", + "user": "root", + "password": "supersecret", + }, + }, + }, + }, + }, + }, + }, + } + state.init() + + buf := new(bytes.Buffer) + if err := WriteState(state, buf); err != nil { + t.Fatalf("err: %s", err) + } + + // Verify that the version and serial are set + if state.Version != StateVersion { + t.Fatalf("bad version number: %d", state.Version) + } + + actual, err := ReadState(buf) + if err != nil { + t.Fatalf("err: %s", err) + } + + // ReadState should not restore sensitive information! + mod := state.RootModule() + mod.Resources["foo"].Primary.Ephemeral = EphemeralState{} + mod.Resources["foo"].Primary.Ephemeral.init() + + if !reflect.DeepEqual(actual, state) { + t.Logf("expected:\n%#v", state) + t.Fatalf("got:\n%#v", actual) + } +} + +func TestReadStateNewVersion(t *testing.T) { + type out struct { + Version int + } + + buf, err := json.Marshal(&out{StateVersion + 1}) + if err != nil { + t.Fatalf("err: %v", err) + } + + s, err := ReadState(bytes.NewReader(buf)) + if s != nil { + t.Fatalf("unexpected: %#v", s) + } + if !strings.Contains(err.Error(), "does not support state version") { + t.Fatalf("err: %v", err) + } +} + +func TestReadStateEmptyOrNilFile(t *testing.T) { + var emptyState bytes.Buffer + _, err := ReadState(&emptyState) + if err != ErrNoState { + t.Fatal("expected ErrNostate, got", err) + } + + var nilFile *os.File + _, err = ReadState(nilFile) + if err != ErrNoState { + t.Fatal("expected ErrNostate, got", err) + } +} + +func TestReadStateTFVersion(t *testing.T) { + type tfVersion struct { + Version int `json:"version"` + TFVersion string `json:"terraform_version"` + } + + cases := []struct { + Written string + Read string + Err bool + }{ + { + "0.0.0", + "0.0.0", + false, + }, + { + "", + "", + false, + }, + { + "bad", + "", + true, + }, + } + + for _, tc := range cases { + buf, err := json.Marshal(&tfVersion{ + Version: 2, + TFVersion: tc.Written, + }) + if err != nil { + t.Fatalf("err: %v", err) + } + + s, err := ReadState(bytes.NewReader(buf)) + if (err != nil) != tc.Err { + t.Fatalf("%s: err: %s", tc.Written, err) + } + if err != nil { + continue + } + + if s.TFVersion != tc.Read { + t.Fatalf("%s: bad: %s", tc.Written, s.TFVersion) + } + } +} + +func TestWriteStateTFVersion(t *testing.T) { + cases := []struct { + Write string + Read string + Err bool + }{ + { + "0.0.0", + "0.0.0", + false, + }, + { + "", + "", + false, + }, + { + "bad", + "", + true, + }, + } + + for _, tc := range cases { + var buf bytes.Buffer + err := WriteState(&State{TFVersion: tc.Write}, &buf) + if (err != nil) != tc.Err { + t.Fatalf("%s: err: %s", tc.Write, err) + } + if err != nil { + continue + } + + s, err := ReadState(&buf) + if err != nil { + t.Fatalf("%s: err: %s", tc.Write, err) + } + + if s.TFVersion != tc.Read { + t.Fatalf("%s: bad: %s", tc.Write, s.TFVersion) + } + } +} + +func TestParseResourceStateKey(t *testing.T) { + cases := []struct { + Input string + Expected *ResourceStateKey + ExpectedErr bool + }{ + { + Input: "aws_instance.foo.3", + Expected: &ResourceStateKey{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + Index: 3, + }, + }, + { + Input: "aws_instance.foo.0", + Expected: &ResourceStateKey{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + Index: 0, + }, + }, + { + Input: "aws_instance.foo", + Expected: &ResourceStateKey{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + Index: -1, + }, + }, + { + Input: "data.aws_ami.foo", + Expected: &ResourceStateKey{ + Mode: DataResourceMode, + Type: "aws_ami", + Name: "foo", + Index: -1, + }, + }, + { + Input: "aws_instance.foo.malformed", + ExpectedErr: true, + }, + { + Input: "aws_instance.foo.malformedwithnumber.123", + ExpectedErr: true, + }, + { + Input: "malformed", + ExpectedErr: true, + }, + } + for _, tc := range cases { + rsk, err := ParseResourceStateKey(tc.Input) + if rsk != nil && tc.Expected != nil && !rsk.Equal(tc.Expected) { + t.Fatalf("%s: expected %s, got %s", tc.Input, tc.Expected, rsk) + } + if (err != nil) != tc.ExpectedErr { + t.Fatalf("%s: expected err: %t, got %s", tc.Input, tc.ExpectedErr, err) + } + } +} + +func TestReadState_prune(t *testing.T) { + state := &State{ + Modules: []*ModuleState{ + &ModuleState{Path: rootModulePath}, + nil, + }, + } + state.init() + + buf := new(bytes.Buffer) + if err := WriteState(state, buf); err != nil { + t.Fatalf("err: %s", err) + } + + actual, err := ReadState(buf) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &State{ + Version: state.Version, + Lineage: state.Lineage, + } + expected.init() + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("got:\n%#v", actual) + } +} + +func TestReadState_pruneDependencies(t *testing.T) { + state := &State{ + Serial: 9, + Lineage: "5d1ad1a1-4027-4665-a908-dbe6adff11d8", + Remote: &RemoteState{ + Type: "http", + Config: map[string]string{ + "url": "http://my-cool-server.com/", + }, + }, + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Dependencies: []string{ + "aws_instance.bar", + "aws_instance.bar", + }, + Resources: map[string]*ResourceState{ + "foo": &ResourceState{ + Dependencies: []string{ + "aws_instance.baz", + "aws_instance.baz", + }, + Primary: &InstanceState{ + ID: "bar", + }, + }, + }, + }, + }, + } + state.init() + + buf := new(bytes.Buffer) + if err := WriteState(state, buf); err != nil { + t.Fatalf("err: %s", err) + } + + actual, err := ReadState(buf) + if err != nil { + t.Fatalf("err: %s", err) + } + + // make sure the duplicate Dependencies are filtered + modDeps := actual.Modules[0].Dependencies + resourceDeps := actual.Modules[0].Resources["foo"].Dependencies + + if len(modDeps) > 1 || modDeps[0] != "aws_instance.bar" { + t.Fatalf("expected 1 module depends_on entry, got %q", modDeps) + } + + if len(resourceDeps) > 1 || resourceDeps[0] != "aws_instance.baz" { + t.Fatalf("expected 1 resource depends_on entry, got %q", resourceDeps) + } +} + +func TestReadState_bigHash(t *testing.T) { + expected := uint64(14885267135666261723) + s := strings.NewReader(`{"version": 3, "backend":{"hash":14885267135666261723}}`) + + actual, err := ReadState(s) + if err != nil { + t.Fatal(err) + } + + if actual.Backend.Hash != expected { + t.Fatalf("expected backend hash %d, got %d", expected, actual.Backend.Hash) + } +} + +func TestResourceNameSort(t *testing.T) { + names := []string{ + "a", + "b", + "a.0", + "a.c", + "a.d", + "c", + "a.b.0", + "a.b.1", + "a.b.10", + "a.b.2", + } + + sort.Sort(resourceNameSort(names)) + + expected := []string{ + "a", + "a.0", + "a.b.0", + "a.b.1", + "a.b.2", + "a.b.10", + "a.c", + "a.d", + "b", + "c", + } + + if !reflect.DeepEqual(names, expected) { + t.Fatalf("got: %q\nexpected: %q\n", names, expected) + } +} diff --git a/pkg/legacy/tofu/state_upgrade_v1_to_v2.go b/pkg/legacy/tofu/state_upgrade_v1_to_v2.go new file mode 100644 index 00000000000..31175087d85 --- /dev/null +++ b/pkg/legacy/tofu/state_upgrade_v1_to_v2.go @@ -0,0 +1,194 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + + "github.com/mitchellh/copystructure" +) + +// upgradeStateV1ToV2 is used to upgrade a V1 state representation +// into a V2 state representation +func upgradeStateV1ToV2(old *stateV1) (*State, error) { + if old == nil { + return nil, nil + } + + remote, err := old.Remote.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading State V1: %w", err) + } + + modules := make([]*ModuleState, len(old.Modules)) + for i, module := range old.Modules { + upgraded, err := module.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading State V1: %w", err) + } + modules[i] = upgraded + } + if len(modules) == 0 { + modules = nil + } + + newState := &State{ + Version: 2, + Serial: old.Serial, + Remote: remote, + Modules: modules, + } + + newState.sort() + newState.init() + + return newState, nil +} + +func (old *remoteStateV1) upgradeToV2() (*RemoteState, error) { + if old == nil { + return nil, nil + } + + config, err := copystructure.Copy(old.Config) + if err != nil { + return nil, fmt.Errorf("Error upgrading RemoteState V1: %w", err) + } + + return &RemoteState{ + Type: old.Type, + Config: config.(map[string]string), + }, nil +} + +func (old *moduleStateV1) upgradeToV2() (*ModuleState, error) { + if old == nil { + return nil, nil + } + + pathRaw, err := copystructure.Copy(old.Path) + if err != nil { + return nil, fmt.Errorf("Error upgrading ModuleState V1: %w", err) + } + path, ok := pathRaw.([]string) + if !ok { + return nil, fmt.Errorf("Error upgrading ModuleState V1: path is not a list of strings") + } + if len(path) == 0 { + // We found some V1 states with a nil path. Assume root and catch + // duplicate path errors later (as part of Validate). + path = rootModulePath + } + + // Outputs needs upgrading to use the new structure + outputs := make(map[string]*OutputState) + for key, output := range old.Outputs { + outputs[key] = &OutputState{ + Type: "string", + Value: output, + Sensitive: false, + } + } + + resources := make(map[string]*ResourceState) + for key, oldResource := range old.Resources { + upgraded, err := oldResource.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading ModuleState V1: %w", err) + } + resources[key] = upgraded + } + + dependencies, err := copystructure.Copy(old.Dependencies) + if err != nil { + return nil, fmt.Errorf("Error upgrading ModuleState V1: %w", err) + } + + return &ModuleState{ + Path: path, + Outputs: outputs, + Resources: resources, + Dependencies: dependencies.([]string), + }, nil +} + +func (old *resourceStateV1) upgradeToV2() (*ResourceState, error) { + if old == nil { + return nil, nil + } + + dependencies, err := copystructure.Copy(old.Dependencies) + if err != nil { + return nil, fmt.Errorf("Error upgrading ResourceState V1: %w", err) + } + + primary, err := old.Primary.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading ResourceState V1: %w", err) + } + + deposed := make([]*InstanceState, len(old.Deposed)) + for i, v := range old.Deposed { + upgraded, err := v.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading ResourceState V1: %w", err) + } + deposed[i] = upgraded + } + if len(deposed) == 0 { + deposed = nil + } + + return &ResourceState{ + Type: old.Type, + Dependencies: dependencies.([]string), + Primary: primary, + Deposed: deposed, + Provider: old.Provider, + }, nil +} + +func (old *instanceStateV1) upgradeToV2() (*InstanceState, error) { + if old == nil { + return nil, nil + } + + attributes, err := copystructure.Copy(old.Attributes) + if err != nil { + return nil, fmt.Errorf("Error upgrading InstanceState V1: %w", err) + } + ephemeral, err := old.Ephemeral.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading InstanceState V1: %w", err) + } + + meta, err := copystructure.Copy(old.Meta) + if err != nil { + return nil, fmt.Errorf("Error upgrading InstanceState V1: %w", err) + } + + newMeta := make(map[string]interface{}) + for k, v := range meta.(map[string]string) { + newMeta[k] = v + } + + return &InstanceState{ + ID: old.ID, + Attributes: attributes.(map[string]string), + Ephemeral: *ephemeral, + Meta: newMeta, + }, nil +} + +func (old *ephemeralStateV1) upgradeToV2() (*EphemeralState, error) { + connInfo, err := copystructure.Copy(old.ConnInfo) + if err != nil { + return nil, fmt.Errorf("Error upgrading EphemeralState V1: %w", err) + } + return &EphemeralState{ + ConnInfo: connInfo.(map[string]string), + }, nil +} diff --git a/pkg/legacy/tofu/state_upgrade_v2_to_v3.go b/pkg/legacy/tofu/state_upgrade_v2_to_v3.go new file mode 100644 index 00000000000..d4665c895f0 --- /dev/null +++ b/pkg/legacy/tofu/state_upgrade_v2_to_v3.go @@ -0,0 +1,150 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "log" + "regexp" + "sort" + "strconv" + "strings" +) + +// The upgrade process from V2 to V3 state does not affect the structure, +// so we do not need to redeclare all of the structs involved - we just +// take a deep copy of the old structure and assert the version number is +// as we expect. +func upgradeStateV2ToV3(old *State) (*State, error) { + new := old.DeepCopy() + + // Ensure the copied version is v2 before attempting to upgrade + if new.Version != 2 { + return nil, fmt.Errorf("Cannot apply v2->v3 state upgrade to " + + "a state which is not version 2.") + } + + // Set the new version number + new.Version = 3 + + // Change the counts for things which look like maps to use the % + // syntax. Remove counts for empty collections - they will be added + // back in later. + for _, module := range new.Modules { + for _, resource := range module.Resources { + // Upgrade Primary + if resource.Primary != nil { + upgradeAttributesV2ToV3(resource.Primary) + } + + // Upgrade Deposed + if resource.Deposed != nil { + for _, deposed := range resource.Deposed { + upgradeAttributesV2ToV3(deposed) + } + } + } + } + + return new, nil +} + +func upgradeAttributesV2ToV3(instanceState *InstanceState) error { + collectionKeyRegexp := regexp.MustCompile(`^(.*\.)#$`) + collectionSubkeyRegexp := regexp.MustCompile(`^([^\.]+)\..*`) + + // Identify the key prefix of anything which is a collection + var collectionKeyPrefixes []string + for key := range instanceState.Attributes { + if submatches := collectionKeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 { + collectionKeyPrefixes = append(collectionKeyPrefixes, submatches[0][1]) + } + } + sort.Strings(collectionKeyPrefixes) + + log.Printf("[STATE UPGRADE] Detected the following collections in state: %v", collectionKeyPrefixes) + + // This could be rolled into fewer loops, but it is somewhat clearer this way, and will not + // run very often. + for _, prefix := range collectionKeyPrefixes { + // First get the actual keys that belong to this prefix + var potentialKeysMatching []string + for key := range instanceState.Attributes { + if strings.HasPrefix(key, prefix) { + potentialKeysMatching = append(potentialKeysMatching, strings.TrimPrefix(key, prefix)) + } + } + sort.Strings(potentialKeysMatching) + + var actualKeysMatching []string + for _, key := range potentialKeysMatching { + if submatches := collectionSubkeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 { + actualKeysMatching = append(actualKeysMatching, submatches[0][1]) + } else { + if key != "#" { + actualKeysMatching = append(actualKeysMatching, key) + } + } + } + actualKeysMatching = uniqueSortedStrings(actualKeysMatching) + + // Now inspect the keys in order to determine whether this is most likely to be + // a map, list or set. There is room for error here, so we log in each case. If + // there is no method of telling, we remove the key from the InstanceState in + // order that it will be recreated. Again, this could be rolled into fewer loops + // but we prefer clarity. + + oldCountKey := fmt.Sprintf("%s#", prefix) + + // First, detect "obvious" maps - which have non-numeric keys (mostly). + hasNonNumericKeys := false + for _, key := range actualKeysMatching { + // Ensure that we attempt to parse the key using 64 bits, this is because the state + // could've been generated on a 64-bit system, and we need to be able to + // convert this on both a 32-bit and 64-bit arch. + if _, err := strconv.ParseInt(key, 10, 64); err != nil { + hasNonNumericKeys = true + } + } + if hasNonNumericKeys { + newCountKey := fmt.Sprintf("%s%%", prefix) + + instanceState.Attributes[newCountKey] = instanceState.Attributes[oldCountKey] + delete(instanceState.Attributes, oldCountKey) + log.Printf("[STATE UPGRADE] Detected %s as a map. Replaced count = %s", + strings.TrimSuffix(prefix, "."), instanceState.Attributes[newCountKey]) + } + + // Now detect empty collections and remove them from state. + if len(actualKeysMatching) == 0 { + delete(instanceState.Attributes, oldCountKey) + log.Printf("[STATE UPGRADE] Detected %s as an empty collection. Removed from state.", + strings.TrimSuffix(prefix, ".")) + } + } + + return nil +} + +// uniqueSortedStrings removes duplicates from a slice of strings and returns +// a sorted slice of the unique strings. +func uniqueSortedStrings(input []string) []string { + uniquemap := make(map[string]struct{}) + for _, str := range input { + uniquemap[str] = struct{}{} + } + + output := make([]string, len(uniquemap)) + + i := 0 + for key := range uniquemap { + output[i] = key + i = i + 1 + } + + sort.Strings(output) + return output +} diff --git a/pkg/legacy/tofu/state_v1.go b/pkg/legacy/tofu/state_v1.go new file mode 100644 index 00000000000..6fe0fd3b5b1 --- /dev/null +++ b/pkg/legacy/tofu/state_v1.go @@ -0,0 +1,150 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +// stateV1 keeps track of a snapshot state-of-the-world that OpenTofu +// can use to keep track of what real world resources it is actually +// managing. +// +// stateV1 is _only used for the purposes of backwards compatibility +// and is no longer used in OpenTofu. +// +// For the upgrade process, see state_upgrade_v1_to_v2.go +type stateV1 struct { + // Version is the protocol version. "1" for a StateV1. + Version int `json:"version"` + + // Serial is incremented on any operation that modifies + // the State file. It is used to detect potentially conflicting + // updates. + Serial int64 `json:"serial"` + + // Remote is used to track the metadata required to + // pull and push state files from a remote storage endpoint. + Remote *remoteStateV1 `json:"remote,omitempty"` + + // Modules contains all the modules in a breadth-first order + Modules []*moduleStateV1 `json:"modules"` +} + +type remoteStateV1 struct { + // Type controls the client we use for the remote state + Type string `json:"type"` + + // Config is used to store arbitrary configuration that + // is type specific + Config map[string]string `json:"config"` +} + +type moduleStateV1 struct { + // Path is the import path from the root module. Modules imports are + // always disjoint, so the path represents amodule tree + Path []string `json:"path"` + + // Outputs declared by the module and maintained for each module + // even though only the root module technically needs to be kept. + // This allows operators to inspect values at the boundaries. + Outputs map[string]string `json:"outputs"` + + // Resources is a mapping of the logically named resource to + // the state of the resource. Each resource may actually have + // N instances underneath, although a user only needs to think + // about the 1:1 case. + Resources map[string]*resourceStateV1 `json:"resources"` + + // Dependencies are a list of things that this module relies on + // existing to remain intact. For example: an module may depend + // on a VPC ID given by an aws_vpc resource. + // + // OpenTofu uses this information to build valid destruction + // orders and to warn the user if they're destroying a module that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // OpenTofu. If OpenTofu doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on,omitempty"` +} + +type resourceStateV1 struct { + // This is filled in and managed by OpenTofu, and is the resource + // type itself such as "mycloud_instance". If a resource provider sets + // this value, it won't be persisted. + Type string `json:"type"` + + // Dependencies are a list of things that this resource relies on + // existing to remain intact. For example: an AWS instance might + // depend on a subnet (which itself might depend on a VPC, and so + // on). + // + // OpenTofu uses this information to build valid destruction + // orders and to warn the user if they're destroying a resource that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // OpenTofu. If OpenTofu doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on,omitempty"` + + // Primary is the current active instance for this resource. + // It can be replaced but only after a successful creation. + // This is the instances on which providers will act. + Primary *instanceStateV1 `json:"primary"` + + // Tainted is used to track any underlying instances that + // have been created but are in a bad or unknown state and + // need to be cleaned up subsequently. In the + // standard case, there is only at most a single instance. + // However, in pathological cases, it is possible for the number + // of instances to accumulate. + Tainted []*instanceStateV1 `json:"tainted,omitempty"` + + // Deposed is used in the mechanics of CreateBeforeDestroy: the existing + // Primary is Deposed to get it out of the way for the replacement Primary to + // be created by Apply. If the replacement Primary creates successfully, the + // Deposed instance is cleaned up. If there were problems creating the + // replacement, the instance remains in the Deposed list so it can be + // destroyed in a future run. Functionally, Deposed instances are very + // similar to Tainted instances in that OpenTofu is only tracking them in + // order to remember to destroy them. + Deposed []*instanceStateV1 `json:"deposed,omitempty"` + + // Provider is used when a resource is connected to a provider with an alias. + // If this string is empty, the resource is connected to the default provider, + // e.g. "aws_instance" goes with the "aws" provider. + // If the resource block contained a "provider" key, that value will be set here. + Provider string `json:"provider,omitempty"` +} + +type instanceStateV1 struct { + // A unique ID for this resource. This is opaque to OpenTofu + // and is only meant as a lookup mechanism for the providers. + ID string `json:"id"` + + // Attributes are basic information about the resource. Any keys here + // are accessible in variable format within OpenTofu configurations: + // ${resourcetype.name.attribute}. + Attributes map[string]string `json:"attributes,omitempty"` + + // Ephemeral is used to store any state associated with this instance + // that is necessary for the OpenTofu run to complete, but is not + // persisted to a state file. + Ephemeral ephemeralStateV1 `json:"-"` + + // Meta is a simple K/V map that is persisted to the State but otherwise + // ignored by OpenTofu core. It's meant to be used for accounting by + // external client code. + Meta map[string]string `json:"meta,omitempty"` +} + +type ephemeralStateV1 struct { + // ConnInfo is used for the providers to export information which is + // used to connect to the resource for provisioning. For example, + // this could contain SSH or WinRM credentials. + ConnInfo map[string]string `json:"-"` +} diff --git a/pkg/legacy/tofu/testing.go b/pkg/legacy/tofu/testing.go new file mode 100644 index 00000000000..cb4ea3729ac --- /dev/null +++ b/pkg/legacy/tofu/testing.go @@ -0,0 +1,24 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "os" + "testing" +) + +// TestStateFile writes the given state to the path. +func TestStateFile(t *testing.T, path string, state *State) { + f, err := os.Create(path) + if err != nil { + t.Fatalf("err: %s", err) + } + defer f.Close() + + if err := WriteState(state, f); err != nil { + t.Fatalf("err: %s", err) + } +} diff --git a/pkg/legacy/tofu/ui_input.go b/pkg/legacy/tofu/ui_input.go new file mode 100644 index 00000000000..bc1569cd826 --- /dev/null +++ b/pkg/legacy/tofu/ui_input.go @@ -0,0 +1,37 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import "context" + +// UIInput is the interface that must be implemented to ask for input +// from this user. This should forward the request to wherever the user +// inputs things to ask for values. +type UIInput interface { + Input(context.Context, *InputOpts) (string, error) +} + +// InputOpts are options for asking for input. +type InputOpts struct { + // Id is a unique ID for the question being asked that might be + // used for logging or to look up a prior answered question. + Id string + + // Query is a human-friendly question for inputting this value. + Query string + + // Description is a description about what this option is. Be wary + // that this will probably be in a terminal so split lines as you see + // necessary. + Description string + + // Default will be the value returned if no data is entered. + Default string + + // Secret should be true if we are asking for sensitive input. + // If attached to a TTY, OpenTofu will disable echo. + Secret bool +} diff --git a/pkg/legacy/tofu/ui_input_mock.go b/pkg/legacy/tofu/ui_input_mock.go new file mode 100644 index 00000000000..9e2f2891a7e --- /dev/null +++ b/pkg/legacy/tofu/ui_input_mock.go @@ -0,0 +1,30 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import "context" + +// MockUIInput is an implementation of UIInput that can be used for tests. +type MockUIInput struct { + InputCalled bool + InputOpts *InputOpts + InputReturnMap map[string]string + InputReturnString string + InputReturnError error + InputFn func(*InputOpts) (string, error) +} + +func (i *MockUIInput) Input(ctx context.Context, opts *InputOpts) (string, error) { + i.InputCalled = true + i.InputOpts = opts + if i.InputFn != nil { + return i.InputFn(opts) + } + if i.InputReturnMap != nil { + return i.InputReturnMap[opts.Id], i.InputReturnError + } + return i.InputReturnString, i.InputReturnError +} diff --git a/pkg/legacy/tofu/ui_input_prefix.go b/pkg/legacy/tofu/ui_input_prefix.go new file mode 100644 index 00000000000..a755c3319b4 --- /dev/null +++ b/pkg/legacy/tofu/ui_input_prefix.go @@ -0,0 +1,25 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "context" + "fmt" +) + +// PrefixUIInput is an implementation of UIInput that prefixes the ID +// with a string, allowing queries to be namespaced. +type PrefixUIInput struct { + IdPrefix string + QueryPrefix string + UIInput UIInput +} + +func (i *PrefixUIInput) Input(ctx context.Context, opts *InputOpts) (string, error) { + opts.Id = fmt.Sprintf("%s.%s", i.IdPrefix, opts.Id) + opts.Query = fmt.Sprintf("%s%s", i.QueryPrefix, opts.Query) + return i.UIInput.Input(ctx, opts) +} diff --git a/pkg/legacy/tofu/ui_input_prefix_test.go b/pkg/legacy/tofu/ui_input_prefix_test.go new file mode 100644 index 00000000000..423f97f8f23 --- /dev/null +++ b/pkg/legacy/tofu/ui_input_prefix_test.go @@ -0,0 +1,32 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "context" + "testing" +) + +func TestPrefixUIInput_impl(t *testing.T) { + var _ UIInput = new(PrefixUIInput) +} + +func TestPrefixUIInput(t *testing.T) { + input := new(MockUIInput) + prefix := &PrefixUIInput{ + IdPrefix: "foo", + UIInput: input, + } + + _, err := prefix.Input(context.Background(), &InputOpts{Id: "bar"}) + if err != nil { + t.Fatalf("err: %s", err) + } + + if input.InputOpts.Id != "foo.bar" { + t.Fatalf("bad: %#v", input.InputOpts) + } +} diff --git a/pkg/legacy/tofu/ui_output.go b/pkg/legacy/tofu/ui_output.go new file mode 100644 index 00000000000..3378a521574 --- /dev/null +++ b/pkg/legacy/tofu/ui_output.go @@ -0,0 +1,12 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +// UIOutput is the interface that must be implemented to output +// data to the end user. +type UIOutput interface { + Output(string) +} diff --git a/pkg/legacy/tofu/ui_output_callback.go b/pkg/legacy/tofu/ui_output_callback.go new file mode 100644 index 00000000000..02188ba9b8b --- /dev/null +++ b/pkg/legacy/tofu/ui_output_callback.go @@ -0,0 +1,14 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +type CallbackUIOutput struct { + OutputFn func(string) +} + +func (o *CallbackUIOutput) Output(v string) { + o.OutputFn(v) +} diff --git a/pkg/legacy/tofu/ui_output_callback_test.go b/pkg/legacy/tofu/ui_output_callback_test.go new file mode 100644 index 00000000000..1494934d220 --- /dev/null +++ b/pkg/legacy/tofu/ui_output_callback_test.go @@ -0,0 +1,14 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "testing" +) + +func TestCallbackUIOutput_impl(t *testing.T) { + var _ UIOutput = new(CallbackUIOutput) +} diff --git a/pkg/legacy/tofu/ui_output_mock.go b/pkg/legacy/tofu/ui_output_mock.go new file mode 100644 index 00000000000..3fa45007bf6 --- /dev/null +++ b/pkg/legacy/tofu/ui_output_mock.go @@ -0,0 +1,26 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import "sync" + +// MockUIOutput is an implementation of UIOutput that can be used for tests. +type MockUIOutput struct { + sync.Mutex + OutputCalled bool + OutputMessage string + OutputFn func(string) +} + +func (o *MockUIOutput) Output(v string) { + o.Lock() + defer o.Unlock() + o.OutputCalled = true + o.OutputMessage = v + if o.OutputFn != nil { + o.OutputFn(v) + } +} diff --git a/pkg/legacy/tofu/ui_output_mock_test.go b/pkg/legacy/tofu/ui_output_mock_test.go new file mode 100644 index 00000000000..7f0ffa5aa73 --- /dev/null +++ b/pkg/legacy/tofu/ui_output_mock_test.go @@ -0,0 +1,14 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "testing" +) + +func TestMockUIOutput(t *testing.T) { + var _ UIOutput = new(MockUIOutput) +} diff --git a/pkg/legacy/tofu/upgrade_state_v1_test.go b/pkg/legacy/tofu/upgrade_state_v1_test.go new file mode 100644 index 00000000000..f5e41378605 --- /dev/null +++ b/pkg/legacy/tofu/upgrade_state_v1_test.go @@ -0,0 +1,195 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "bytes" + "reflect" + "strings" + "testing" + + "github.com/davecgh/go-spew/spew" +) + +// TestReadUpgradeStateV1toV3 tests the state upgrade process from the V1 state +// to the current version, and needs editing each time. This means it tests the +// entire pipeline of upgrades (which migrate version to version). +func TestReadUpgradeStateV1toV3(t *testing.T) { + // ReadState should transparently detect the old version but will upgrade + // it on Write. + actual, err := ReadState(strings.NewReader(testV1State)) + if err != nil { + t.Fatalf("err: %s", err) + } + + buf := new(bytes.Buffer) + if err := WriteState(actual, buf); err != nil { + t.Fatalf("err: %s", err) + } + + if actual.Version != 3 { + t.Fatalf("bad: State version not incremented; is %d", actual.Version) + } + + roundTripped, err := ReadState(buf) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(actual, roundTripped) { + t.Logf("actual:\n%#v", actual) + t.Fatalf("roundTripped:\n%#v", roundTripped) + } +} + +func TestReadUpgradeStateV1toV3_outputs(t *testing.T) { + // ReadState should transparently detect the old version but will upgrade + // it on Write. + actual, err := ReadState(strings.NewReader(testV1StateWithOutputs)) + if err != nil { + t.Fatalf("err: %s", err) + } + + buf := new(bytes.Buffer) + if err := WriteState(actual, buf); err != nil { + t.Fatalf("err: %s", err) + } + + if actual.Version != 3 { + t.Fatalf("bad: State version not incremented; is %d", actual.Version) + } + + roundTripped, err := ReadState(buf) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(actual, roundTripped) { + spew.Config.DisableMethods = true + t.Fatalf("bad:\n%s\n\nround tripped:\n%s\n", spew.Sdump(actual), spew.Sdump(roundTripped)) + spew.Config.DisableMethods = false + } +} + +// Upgrading the state should not lose empty module Outputs and Resources maps +// during upgrade. The init for a new module initializes new maps, so we may not +// be expecting to check for a nil map. +func TestReadUpgradeStateV1toV3_emptyState(t *testing.T) { + // ReadState should transparently detect the old version but will upgrade + // it on Write. + orig, err := ReadStateV1([]byte(testV1EmptyState)) + if err != nil { + t.Fatalf("err: %s", err) + } + + stateV2, err := upgradeStateV1ToV2(orig) + if err != nil { + t.Fatalf("error attempting upgradeStateV1ToV2: %s", err) + } + + for _, m := range stateV2.Modules { + if m.Resources == nil { + t.Fatal("V1 to V2 upgrade lost module.Resources") + } + if m.Outputs == nil { + t.Fatal("V1 to V2 upgrade lost module.Outputs") + } + } + + stateV3, err := upgradeStateV2ToV3(stateV2) + if err != nil { + t.Fatalf("error attempting to upgradeStateV2ToV3: %s", err) + } + for _, m := range stateV3.Modules { + if m.Resources == nil { + t.Fatal("V2 to V3 upgrade lost module.Resources") + } + if m.Outputs == nil { + t.Fatal("V2 to V3 upgrade lost module.Outputs") + } + } + +} + +const testV1EmptyState = `{ + "version": 1, + "serial": 0, + "modules": [ + { + "path": [ + "root" + ], + "outputs": {}, + "resources": {} + } + ] +} +` + +const testV1State = `{ + "version": 1, + "serial": 9, + "remote": { + "type": "http", + "config": { + "url": "http://my-cool-server.com/" + } + }, + "modules": [ + { + "path": [ + "root" + ], + "outputs": null, + "resources": { + "foo": { + "type": "", + "primary": { + "id": "bar" + } + } + }, + "depends_on": [ + "aws_instance.bar" + ] + } + ] +} +` + +const testV1StateWithOutputs = `{ + "version": 1, + "serial": 9, + "remote": { + "type": "http", + "config": { + "url": "http://my-cool-server.com/" + } + }, + "modules": [ + { + "path": [ + "root" + ], + "outputs": { + "foo": "bar", + "baz": "foo" + }, + "resources": { + "foo": { + "type": "", + "primary": { + "id": "bar" + } + } + }, + "depends_on": [ + "aws_instance.bar" + ] + } + ] +} +` diff --git a/pkg/legacy/tofu/upgrade_state_v2_test.go b/pkg/legacy/tofu/upgrade_state_v2_test.go new file mode 100644 index 00000000000..93bb34f6f65 --- /dev/null +++ b/pkg/legacy/tofu/upgrade_state_v2_test.go @@ -0,0 +1,207 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "bytes" + "strings" + "testing" +) + +// TestReadUpgradeStateV2toV3 tests the state upgrade process from the V2 state +// to the current version, and needs editing each time. This means it tests the +// entire pipeline of upgrades (which migrate version to version). +func TestReadUpgradeStateV2toV3(t *testing.T) { + // ReadState should transparently detect the old version but will upgrade + // it on Write. + upgraded, err := ReadState(strings.NewReader(testV2State)) + if err != nil { + t.Fatalf("err: %s", err) + } + + buf := new(bytes.Buffer) + if err := WriteState(upgraded, buf); err != nil { + t.Fatalf("err: %s", err) + } + + if upgraded.Version != 3 { + t.Fatalf("bad: State version not incremented; is %d", upgraded.Version) + } + + // For this test we cannot assert that we match the round trip because an + // empty map has been removed from state. Instead we make assertions against + // some of the key fields in the _upgraded_ state. + instanceState, ok := upgraded.RootModule().Resources["test_resource.main"] + if !ok { + t.Fatalf("Instance state for test_resource.main was removed from state during upgrade") + } + + primary := instanceState.Primary + if primary == nil { + t.Fatalf("Primary instance was removed from state for test_resource.main") + } + + // Non-empty computed map is moved from .# to .% + if _, ok := primary.Attributes["computed_map.#"]; ok { + t.Fatalf("Count was not upgraded from .# to .%% for computed_map") + } + if count, ok := primary.Attributes["computed_map.%"]; !ok || count != "1" { + t.Fatalf("Count was not in .%% or was not 2 for computed_map") + } + + // list_of_map top level retains .# + if count, ok := primary.Attributes["list_of_map.#"]; !ok || count != "2" { + t.Fatal("Count for list_of_map was migrated incorrectly") + } + + // list_of_map.0 is moved from .# to .% + if _, ok := primary.Attributes["list_of_map.0.#"]; ok { + t.Fatalf("Count was not upgraded from .# to .%% for list_of_map.0") + } + if count, ok := primary.Attributes["list_of_map.0.%"]; !ok || count != "2" { + t.Fatalf("Count was not in .%% or was not 2 for list_of_map.0") + } + + // list_of_map.1 is moved from .# to .% + if _, ok := primary.Attributes["list_of_map.1.#"]; ok { + t.Fatalf("Count was not upgraded from .# to .%% for list_of_map.1") + } + if count, ok := primary.Attributes["list_of_map.1.%"]; !ok || count != "2" { + t.Fatalf("Count was not in .%% or was not 2 for list_of_map.1") + } + + // map is moved from .# to .% + if _, ok := primary.Attributes["map.#"]; ok { + t.Fatalf("Count was not upgraded from .# to .%% for map") + } + if count, ok := primary.Attributes["map.%"]; !ok || count != "2" { + t.Fatalf("Count was not in .%% or was not 2 for map") + } + + // optional_computed_map should be removed from state + if _, ok := primary.Attributes["optional_computed_map"]; ok { + t.Fatal("optional_computed_map was not removed from state") + } + + // required_map is moved from .# to .% + if _, ok := primary.Attributes["required_map.#"]; ok { + t.Fatalf("Count was not upgraded from .# to .%% for required_map") + } + if count, ok := primary.Attributes["required_map.%"]; !ok || count != "3" { + t.Fatalf("Count was not in .%% or was not 3 for map") + } + + // computed_list keeps .# + if count, ok := primary.Attributes["computed_list.#"]; !ok || count != "2" { + t.Fatal("Count was migrated incorrectly for computed_list") + } + + // computed_set keeps .# + if count, ok := primary.Attributes["computed_set.#"]; !ok || count != "2" { + t.Fatal("Count was migrated incorrectly for computed_set") + } + if val, ok := primary.Attributes["computed_set.2337322984"]; !ok || val != "setval1" { + t.Fatal("Set item for computed_set.2337322984 changed or moved") + } + if val, ok := primary.Attributes["computed_set.307881554"]; !ok || val != "setval2" { + t.Fatal("Set item for computed_set.307881554 changed or moved") + } + + // string properties are unaffected + if val, ok := primary.Attributes["id"]; !ok || val != "testId" { + t.Fatal("id was not set correctly after migration") + } +} + +const testV2State = `{ + "version": 2, + "terraform_version": "0.7.0", + "serial": 2, + "modules": [ + { + "path": [ + "root" + ], + "outputs": { + "computed_map": { + "sensitive": false, + "type": "map", + "value": { + "key1": "value1" + } + }, + "computed_set": { + "sensitive": false, + "type": "list", + "value": [ + "setval1", + "setval2" + ] + }, + "map": { + "sensitive": false, + "type": "map", + "value": { + "key": "test", + "test": "test" + } + }, + "set": { + "sensitive": false, + "type": "list", + "value": [ + "test1", + "test2" + ] + } + }, + "resources": { + "test_resource.main": { + "type": "test_resource", + "primary": { + "id": "testId", + "attributes": { + "computed_list.#": "2", + "computed_list.0": "listval1", + "computed_list.1": "listval2", + "computed_map.#": "1", + "computed_map.key1": "value1", + "computed_read_only": "value_from_api", + "computed_read_only_force_new": "value_from_api", + "computed_set.#": "2", + "computed_set.2337322984": "setval1", + "computed_set.307881554": "setval2", + "id": "testId", + "list_of_map.#": "2", + "list_of_map.0.#": "2", + "list_of_map.0.key1": "value1", + "list_of_map.0.key2": "value2", + "list_of_map.1.#": "2", + "list_of_map.1.key3": "value3", + "list_of_map.1.key4": "value4", + "map.#": "2", + "map.key": "test", + "map.test": "test", + "map_that_look_like_set.#": "2", + "map_that_look_like_set.12352223": "hello", + "map_that_look_like_set.36234341": "world", + "optional_computed_map.#": "0", + "required": "Hello World", + "required_map.#": "3", + "required_map.key1": "value1", + "required_map.key2": "value2", + "required_map.key3": "value3", + "set.#": "2", + "set.2326977762": "test1", + "set.331058520": "test2" + } + } + } + } + } + ] +} +` diff --git a/pkg/legacy/tofu/util.go b/pkg/legacy/tofu/util.go new file mode 100644 index 00000000000..de6209c4e82 --- /dev/null +++ b/pkg/legacy/tofu/util.go @@ -0,0 +1,69 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "sort" +) + +// Semaphore is a wrapper around a channel to provide +// utility methods to clarify that we are treating the +// channel as a semaphore +type Semaphore chan struct{} + +// NewSemaphore creates a semaphore that allows up +// to a given limit of simultaneous acquisitions +func NewSemaphore(n int) Semaphore { + if n <= 0 { + panic("semaphore with limit <=0") + } + ch := make(chan struct{}, n) + return Semaphore(ch) +} + +// Acquire is used to acquire an available slot. +// Blocks until available. +func (s Semaphore) Acquire() { + s <- struct{}{} +} + +// TryAcquire is used to do a non-blocking acquire. +// Returns a bool indicating success +func (s Semaphore) TryAcquire() bool { + select { + case s <- struct{}{}: + return true + default: + return false + } +} + +// Release is used to return a slot. Acquire must +// be called as a pre-condition. +func (s Semaphore) Release() { + select { + case <-s: + default: + panic("release without an acquire") + } +} + +// deduplicate a slice of strings +func uniqueStrings(s []string) []string { + if len(s) < 2 { + return s + } + + sort.Strings(s) + result := make([]string, 1, len(s)) + result[0] = s[0] + for i := 1; i < len(s); i++ { + if s[i] != result[len(result)-1] { + result = append(result, s[i]) + } + } + return result +} diff --git a/pkg/legacy/tofu/util_test.go b/pkg/legacy/tofu/util_test.go new file mode 100644 index 00000000000..f25866005a0 --- /dev/null +++ b/pkg/legacy/tofu/util_test.go @@ -0,0 +1,81 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "reflect" + "testing" + "time" +) + +func TestSemaphore(t *testing.T) { + s := NewSemaphore(2) + timer := time.AfterFunc(time.Second, func() { + panic("deadlock") + }) + defer timer.Stop() + + s.Acquire() + if !s.TryAcquire() { + t.Fatalf("should acquire") + } + if s.TryAcquire() { + t.Fatalf("should not acquire") + } + s.Release() + s.Release() + + // This release should panic + defer func() { + r := recover() + if r == nil { + t.Fatalf("should panic") + } + }() + s.Release() +} + +func TestUniqueStrings(t *testing.T) { + cases := []struct { + Input []string + Expected []string + }{ + { + []string{}, + []string{}, + }, + { + []string{"x"}, + []string{"x"}, + }, + { + []string{"a", "b", "c"}, + []string{"a", "b", "c"}, + }, + { + []string{"a", "a", "a"}, + []string{"a"}, + }, + { + []string{"a", "b", "a", "b", "a", "a"}, + []string{"a", "b"}, + }, + { + []string{"c", "b", "a", "c", "b"}, + []string{"a", "b", "c"}, + }, + } + + for i, tc := range cases { + t.Run(fmt.Sprintf("unique-%d", i), func(t *testing.T) { + actual := uniqueStrings(tc.Input) + if !reflect.DeepEqual(tc.Expected, actual) { + t.Fatalf("Expected: %q\nGot: %q", tc.Expected, actual) + } + }) + } +} diff --git a/pkg/legacy/tofu/version.go b/pkg/legacy/tofu/version.go new file mode 100644 index 00000000000..616534d0339 --- /dev/null +++ b/pkg/legacy/tofu/version.go @@ -0,0 +1,15 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "github.com/kubegems/opentofu/version" +) + +// Deprecated: Providers should use schema.Provider.TerraformVersion instead +func VersionString() string { + return version.String() +} diff --git a/pkg/logging/indent.go b/pkg/logging/indent.go new file mode 100644 index 00000000000..9a4f7affa64 --- /dev/null +++ b/pkg/logging/indent.go @@ -0,0 +1,28 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logging + +import ( + "strings" +) + +// Indent adds two spaces to the beginning of each line of the given string, +// with the goal of making the log level filter understand it as a line +// continuation rather than possibly as new log lines. +func Indent(s string) string { + var b strings.Builder + for len(s) > 0 { + end := strings.IndexByte(s, '\n') + if end == -1 { + end = len(s) - 1 + } + var l string + l, s = s[:end+1], s[end+1:] + b.WriteString(" ") + b.WriteString(l) + } + return b.String() +} diff --git a/pkg/logging/indent_test.go b/pkg/logging/indent_test.go new file mode 100644 index 00000000000..f27f52e94c9 --- /dev/null +++ b/pkg/logging/indent_test.go @@ -0,0 +1,20 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logging + +import ( + "testing" +) + +func TestIndent(t *testing.T) { + s := "hello\n world\ngoodbye\n moon" + got := Indent(s) + want := " hello\n world\n goodbye\n moon" + + if got != want { + t.Errorf("wrong result\ngot:\n%s\n\nwant:\n%s", got, want) + } +} diff --git a/pkg/logging/logging.go b/pkg/logging/logging.go new file mode 100644 index 00000000000..13a8e158463 --- /dev/null +++ b/pkg/logging/logging.go @@ -0,0 +1,250 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logging + +import ( + "fmt" + "io" + "log" + "os" + "strings" + "syscall" + + "github.com/hashicorp/go-hclog" +) + +// These are the environmental variables that determine if we log, and if +// we log whether or not the log should go to a file. +const ( + envLog = "TF_LOG" + envLogFile = "TF_LOG_PATH" + + // Allow logging of specific subsystems. + // We only separate core and providers for now, but this could be extended + // to other loggers, like provisioners and remote-state backends. + envLogCore = "TF_LOG_CORE" + envLogProvider = "TF_LOG_PROVIDER" + envLogCloud = "TF_LOG_CLOUD" +) + +var ( + // ValidLevels are the log level names that OpenTofu recognizes. + ValidLevels = []string{"TRACE", "DEBUG", "INFO", "WARN", "ERROR", "OFF"} + + // logger is the global hclog logger + logger hclog.Logger + + // logWriter is a global writer for logs, to be used with the std log package + logWriter io.Writer + + // initialize our cache of panic output from providers + panics = &panicRecorder{ + panics: make(map[string][]string), + maxLines: 100, + } +) + +func init() { + logger = newHCLogger("") + logWriter = logger.StandardWriter(&hclog.StandardLoggerOptions{InferLevels: true}) + + // set up the default std library logger to use our output + log.SetFlags(0) + log.SetPrefix("") + log.SetOutput(logWriter) +} + +// SetupTempLog adds a new log sink which writes all logs to the given file. +func RegisterSink(f *os.File) { + l, ok := logger.(hclog.InterceptLogger) + if !ok { + panic("global logger is not an InterceptLogger") + } + + if f == nil { + return + } + + l.RegisterSink(hclog.NewSinkAdapter(&hclog.LoggerOptions{ + Level: hclog.Trace, + Output: f, + })) +} + +// LogOutput return the default global log io.Writer +func LogOutput() io.Writer { + return logWriter +} + +// HCLogger returns the default global hclog logger +func HCLogger() hclog.Logger { + return logger +} + +// newHCLogger returns a new hclog.Logger instance with the given name +func newHCLogger(name string) hclog.Logger { + logOutput := io.Writer(os.Stderr) + logLevel, json := globalLogLevel() + + if logPath := os.Getenv(envLogFile); logPath != "" { + f, err := os.OpenFile(logPath, syscall.O_CREAT|syscall.O_RDWR|syscall.O_APPEND, 0666) + if err != nil { + fmt.Fprintf(os.Stderr, "Error opening log file: %v\n", err) + } else { + logOutput = f + } + } + + return hclog.NewInterceptLogger(&hclog.LoggerOptions{ + Name: name, + Level: logLevel, + Output: logOutput, + IndependentLevels: true, + JSONFormat: json, + }) +} + +// NewLogger returns a new logger based in the current global logger, with the +// given name appended. +func NewLogger(name string) hclog.Logger { + if name == "" { + panic("logger name required") + } + return &logPanicWrapper{ + Logger: logger.Named(name), + } +} + +// NewProviderLogger returns a logger for the provider plugin, possibly with a +// different log level from the global logger. +func NewProviderLogger(prefix string) hclog.Logger { + l := &logPanicWrapper{ + Logger: logger.Named(prefix + "provider"), + } + + level := providerLogLevel() + logger.Debug("created provider logger", "level", level) + + l.SetLevel(level) + return l +} + +// NewCloudLogger returns a logger for the cloud plugin, possibly with a +// different log level from the global logger. +func NewCloudLogger() hclog.Logger { + l := &logPanicWrapper{ + Logger: logger.Named("cloud"), + } + + level := cloudLogLevel() + logger.Debug("created cloud logger", "level", level) + + l.SetLevel(level) + return l +} + +// CurrentLogLevel returns the current log level string based the environment vars +func CurrentLogLevel() string { + ll, _ := globalLogLevel() + return strings.ToUpper(ll.String()) +} + +func providerLogLevel() hclog.Level { + providerEnvLevel := strings.ToUpper(os.Getenv(envLogProvider)) + if providerEnvLevel == "" { + providerEnvLevel = strings.ToUpper(os.Getenv(envLog)) + } + + return parseLogLevel(providerEnvLevel) +} + +func cloudLogLevel() hclog.Level { + providerEnvLevel := strings.ToUpper(os.Getenv(envLogCloud)) + if providerEnvLevel == "" { + providerEnvLevel = strings.ToUpper(os.Getenv(envLog)) + } + + return parseLogLevel(providerEnvLevel) +} + +func globalLogLevel() (hclog.Level, bool) { + var json bool + envLevel := strings.ToUpper(os.Getenv(envLog)) + if envLevel == "" { + envLevel = strings.ToUpper(os.Getenv(envLogCore)) + } + if envLevel == "JSON" { + json = true + } + return parseLogLevel(envLevel), json +} + +func parseLogLevel(envLevel string) hclog.Level { + if envLevel == "" { + return hclog.Off + } + if envLevel == "JSON" { + envLevel = "TRACE" + } + + logLevel := hclog.Trace + if isValidLogLevel(envLevel) { + logLevel = hclog.LevelFromString(envLevel) + } else { + fmt.Fprintf(os.Stderr, "[WARN] Invalid log level: %q. Defaulting to level: TRACE. Valid levels are: %+v", + envLevel, ValidLevels) + } + + return logLevel +} + +// IsDebugOrHigher returns whether or not the current log level is debug or trace +func IsDebugOrHigher() bool { + level, _ := globalLogLevel() + return level == hclog.Debug || level == hclog.Trace +} + +func isValidLogLevel(level string) bool { + for _, l := range ValidLevels { + if level == string(l) { + return true + } + } + + return false +} + +// PluginOutputMonitor creates an io.Writer that will warn about any writes in +// the default logger. This is used to catch unexpected output from plugins, +// notifying them about the problem as well as surfacing the lost data for +// context. +func PluginOutputMonitor(source string) io.Writer { + return pluginOutputMonitor{ + source: source, + log: logger, + } +} + +// pluginOutputMonitor is an io.Writer that logs all writes as +// "unexpected data" with the source name. +type pluginOutputMonitor struct { + source string + log hclog.Logger +} + +func (w pluginOutputMonitor) Write(d []byte) (int, error) { + // Limit the write size to 1024 bytes We're not expecting any data to come + // through this channel, so accidental writes will usually be stray fmt + // debugging statements and the like, but we want to provide context to the + // provider to indicate what the unexpected data might be. + n := len(d) + if n > 1024 { + d = append(d[:1024], '.', '.', '.') + } + + w.log.Warn("unexpected data", w.source, strings.TrimSpace(string(d))) + return n, nil +} diff --git a/pkg/logging/panic.go b/pkg/logging/panic.go new file mode 100644 index 00000000000..d95ec97f670 --- /dev/null +++ b/pkg/logging/panic.go @@ -0,0 +1,216 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logging + +import ( + "fmt" + "os" + "runtime/debug" + "strings" + "sync" + + "github.com/hashicorp/go-hclog" +) + +// This output is shown if a panic happens. +const panicOutput = ` +!!!!!!!!!!!!!!!!!!!!!!!!!!! OPENTOFU CRASH !!!!!!!!!!!!!!!!!!!!!!!!!!!! + +OpenTofu crashed! This is always indicative of a bug within OpenTofu. +Please report the crash with OpenTofu[1] so that we can fix this. + +When reporting bugs, please include your OpenTofu version, the stack trace +shown below, and any additional information which may help replicate the issue. + +[1]: https://github.com/kubegems/opentofu/issues + +!!!!!!!!!!!!!!!!!!!!!!!!!!! OPENTOFU CRASH !!!!!!!!!!!!!!!!!!!!!!!!!!!! + +` + +// In case multiple goroutines panic concurrently, ensure only the first one +// recovered by PanicHandler starts printing. +var panicMutex sync.Mutex + +// PanicHandler is called to recover from an internal panic in OpenTofu, and +// augments the standard stack trace with a more user friendly error message. +// PanicHandler must be called as a defered function, and must be the first +// defer called at the start of a new goroutine. +func PanicHandler() { + // Have all managed goroutines checkin here, and prevent them from exiting + // if there's a panic in progress. While this can't lock the entire runtime + // to block progress, we can prevent some cases where OpenTofu may return + // early before the panic has been printed out. + panicMutex.Lock() + defer panicMutex.Unlock() + + recovered := recover() + panicHandler(recovered, nil) +} + +// PanicHandlerWithTraceFn returns a function similar to PanicHandler which is +// called to recover from an internal panic in OpenTofu, and augments the +// standard stack trace with a more complete stack trace. +// The calling stack trace is captured before returing the augmented panicHandler +// The returned panicHandler must be called as a defered function, and must be the +// first defer called at the start of a new goroutine. +// +// Callers of this function should create the panicHandler before any tight looping +// as there may be a performance impact if called excessively. +// +// This only is a partial solution to the problem of panics within deeply nested +// go-routines. It only works between the go-routine being called and the calling +// go-routine. If you have multiple nested go-rotuines, it will only preserve the +// calling stack and the called panic stack. Idealy we would be able to use context +// or a similar construct to build a more comprehensive panic handler, but this +// is a significant step in the right direction that will dramatically improve crash +// debugging +func PanicHandlerWithTraceFn() func() { + trace := debug.Stack() + return func() { + // Have all managed goroutines checkin here, and prevent them from exiting + // if there's a panic in progress. While this can't lock the entire runtime + // to block progress, we can prevent some cases where OpenTofu may return + // early before the panic has been printed out. + panicMutex.Lock() + defer panicMutex.Unlock() + + recovered := recover() + panicHandler(recovered, trace) + } +} + +func panicHandler(recovered interface{}, trace []byte) { + if recovered == nil { + return + } + + fmt.Fprint(os.Stderr, panicOutput) + fmt.Fprint(os.Stderr, recovered, "\n") + + // When called from a deferred function, debug.PrintStack will include the + // full stack from the point of the pending panic. + debug.PrintStack() + if trace != nil { + fmt.Fprint(os.Stderr, "With go-routine called from:\n") + os.Stderr.Write(trace) + } + + // An exit code of 11 keeps us out of the way of the detailed exitcodes + // from plan, and also happens to be the same code as SIGSEGV which is + // roughly the same type of condition that causes most panics. + os.Exit(11) +} + +const pluginPanicOutput = ` +Stack trace from the %[1]s plugin: + +%s + +Error: The %[1]s plugin crashed! + +This is always indicative of a bug within the plugin. It would be immensely +helpful if you could report the crash with the plugin's maintainers so that it +can be fixed. The output above should help diagnose the issue. +` + +// PluginPanics returns a series of provider panics that were collected during +// execution, and formatted for output. +func PluginPanics() []string { + return panics.allPanics() +} + +// panicRecorder provides a registry to check for plugin panics that may have +// happened when a plugin suddenly terminates. +type panicRecorder struct { + sync.Mutex + // panics maps the plugin name to the panic output lines received from + // the logger. + panics map[string][]string + + // maxLines is the max number of lines we'll record after seeing a + // panic header. Since this is going to be printed in the UI output, we + // don't want to destroy the scrollback. In most cases, the first few lines + // of the stack trace is all that are required. + maxLines int +} + +// registerPlugin returns an accumulator function which will accept lines of +// a panic stack trace to collect into an error when requested. +func (p *panicRecorder) registerPlugin(name string) func(string) { + p.Lock() + defer p.Unlock() + + // In most cases we shouldn't be starting a plugin if it already + // panicked, but clear out previous entries just in case. + delete(p.panics, name) + + count := 0 + + // this callback is used by the logger to store panic output + return func(line string) { + p.Lock() + defer p.Unlock() + + // stop recording if there are too many lines. + if count > p.maxLines { + return + } + count++ + + p.panics[name] = append(p.panics[name], line) + } +} + +func (p *panicRecorder) allPanics() []string { + p.Lock() + defer p.Unlock() + + var res []string + for name, lines := range p.panics { + if len(lines) == 0 { + continue + } + + res = append(res, fmt.Sprintf(pluginPanicOutput, name, strings.Join(lines, "\n"))) + } + return res +} + +// logPanicWrapper wraps an hclog.Logger and intercepts and records any output +// that appears to be a panic. +type logPanicWrapper struct { + hclog.Logger + panicRecorder func(string) + inPanic bool +} + +// go-plugin will create a new named logger for each plugin binary. +func (l *logPanicWrapper) Named(name string) hclog.Logger { + return &logPanicWrapper{ + Logger: l.Logger.Named(name), + panicRecorder: panics.registerPlugin(name), + } +} + +// we only need to implement Debug, since that is the default output level used +// by go-plugin when encountering unstructured output on stderr. +func (l *logPanicWrapper) Debug(msg string, args ...interface{}) { + // We don't have access to the binary itself, so guess based on the stderr + // output if this is the start of the traceback. An occasional false + // positive shouldn't be a big deal, since this is only retrieved after an + // error of some sort. + + panicPrefix := strings.HasPrefix(msg, "panic: ") || strings.HasPrefix(msg, "fatal error: ") + + l.inPanic = l.inPanic || panicPrefix + + if l.inPanic && l.panicRecorder != nil { + l.panicRecorder(msg) + } + + l.Logger.Debug(msg, args...) +} diff --git a/pkg/logging/panic_test.go b/pkg/logging/panic_test.go new file mode 100644 index 00000000000..16ceb96caba --- /dev/null +++ b/pkg/logging/panic_test.go @@ -0,0 +1,56 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logging + +import ( + "fmt" + "strings" + "testing" +) + +func TestPanicRecorder(t *testing.T) { + rec := panics.registerPlugin("test") + + output := []string{ + "panic: test", + " stack info", + } + + for _, line := range output { + rec(line) + } + + expected := fmt.Sprintf(pluginPanicOutput, "test", strings.Join(output, "\n")) + + res := PluginPanics() + if len(res) == 0 { + t.Fatal("no output") + } + + if res[0] != expected { + t.Fatalf("expected: %q\ngot: %q", expected, res[0]) + } +} + +func TestPanicLimit(t *testing.T) { + rec := panics.registerPlugin("test") + + rec("panic: test") + + for i := 0; i < 200; i++ { + rec(fmt.Sprintf("LINE: %d", i)) + } + + res := PluginPanics() + // take the extra content into account + max := strings.Count(pluginPanicOutput, "\n") + panics.maxLines + for _, out := range res { + found := strings.Count(out, "\n") + if found > max { + t.Fatalf("expected no more than %d lines, got: %d", max, found) + } + } +} diff --git a/pkg/modsdir/doc.go b/pkg/modsdir/doc.go new file mode 100644 index 00000000000..dae5137ed5e --- /dev/null +++ b/pkg/modsdir/doc.go @@ -0,0 +1,8 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package modsdir is an internal package containing the model types used to +// represent the manifest of modules in a local modules cache directory. +package modsdir diff --git a/pkg/modsdir/manifest.go b/pkg/modsdir/manifest.go new file mode 100644 index 00000000000..cbae7237d5f --- /dev/null +++ b/pkg/modsdir/manifest.go @@ -0,0 +1,186 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package modsdir + +import ( + "encoding/json" + "fmt" + "io" + "log" + "os" + "path/filepath" + "sort" + "strings" + + version "github.com/hashicorp/go-version" + + "github.com/kubegems/opentofu/pkg/addrs" +) + +// Record represents some metadata about an installed module, as part +// of a ModuleManifest. +type Record struct { + // Key is a unique identifier for this particular module, based on its + // position within the static module tree. + Key string `json:"Key"` + + // SourceAddr is the source address given for this module in configuration. + // This is used only to detect if the source was changed in configuration + // since the module was last installed, which means that the installer + // must re-install it. + // + // This should always be the result of calling method String on an + // addrs.ModuleSource value, to get a suitably-normalized result. + SourceAddr string `json:"Source"` + + // Version is the exact version of the module, which results from parsing + // VersionStr. nil for un-versioned modules. + Version *version.Version `json:"-"` + + // VersionStr is the version specifier string. This is used only for + // serialization in snapshots and should not be accessed or updated + // by any other codepaths; use "Version" instead. + VersionStr string `json:"Version,omitempty"` + + // Dir is the path to the local directory where the module is installed. + Dir string `json:"Dir"` +} + +// Manifest is a map used to keep track of the filesystem locations +// and other metadata about installed modules. +// +// The configuration loader refers to this, while the module installer updates +// it to reflect any changes to the installed modules. +type Manifest map[string]Record + +func (m Manifest) ModuleKey(path addrs.Module) string { + if len(path) == 0 { + return "" + } + return strings.Join([]string(path), ".") + +} + +// manifestSnapshotFile is an internal struct used only to assist in our JSON +// serialization of manifest snapshots. It should not be used for any other +// purpose. +type manifestSnapshotFile struct { + Records []Record `json:"Modules"` +} + +func ReadManifestSnapshot(r io.Reader) (Manifest, error) { + src, err := io.ReadAll(r) + if err != nil { + return nil, err + } + + if len(src) == 0 { + // This should never happen, but we'll tolerate it as if it were + // a valid empty JSON object. + return make(Manifest), nil + } + + var read manifestSnapshotFile + err = json.Unmarshal(src, &read) + if err != nil { + return nil, fmt.Errorf("error unmarshalling snapshot: %w", err) + } + new := make(Manifest) + for _, record := range read.Records { + if record.VersionStr != "" { + record.Version, err = version.NewVersion(record.VersionStr) + if err != nil { + return nil, fmt.Errorf("invalid version %q for %s: %w", record.VersionStr, record.Key, err) + } + } + + // Historically we didn't normalize the module source addresses when + // writing them into the manifest, and so we'll make a best effort + // to normalize them back in on read so that we can just gracefully + // upgrade on the next "tofu init". + if record.SourceAddr != "" { + if addr, err := addrs.ParseModuleSource(record.SourceAddr); err == nil { + // This is a best effort sort of thing. If the source + // address isn't valid then we'll just leave it as-is + // and let another component detect that downstream, + // to preserve the old behavior in that case. + record.SourceAddr = addr.String() + } + } + + // Ensure Windows is using the proper modules path format after + // reading the modules manifest Dir records + record.Dir = filepath.FromSlash(record.Dir) + + if _, exists := new[record.Key]; exists { + // This should never happen in any valid file, so we'll catch it + // and report it to avoid confusing/undefined behavior if the + // snapshot file was edited incorrectly outside of OpenTofu. + return nil, fmt.Errorf("snapshot file contains two records for path %s", record.Key) + } + new[record.Key] = record + } + return new, nil +} + +func ReadManifestSnapshotForDir(dir string) (Manifest, error) { + fn := filepath.Join(dir, ManifestSnapshotFilename) + r, err := os.Open(fn) + if err != nil { + if os.IsNotExist(err) { + return make(Manifest), nil // missing file is okay and treated as empty + } + return nil, err + } + defer r.Close() + return ReadManifestSnapshot(r) +} + +func (m Manifest) WriteSnapshot(w io.Writer) error { + var write manifestSnapshotFile + + var keys []string + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + record := m[k] + + // Make sure VersionStr is in sync with Version, since we encourage + // callers to manipulate Version and ignore VersionStr. + if record.Version != nil { + record.VersionStr = record.Version.String() + } else { + record.VersionStr = "" + } + + // Ensure Dir is written in a format that can be read by Linux and + // Windows nodes for remote and apply compatibility + record.Dir = filepath.ToSlash(record.Dir) + write.Records = append(write.Records, record) + } + + src, err := json.Marshal(write) + if err != nil { + return err + } + + _, err = w.Write(src) + return err +} + +func (m Manifest) WriteSnapshotToDir(dir string) error { + fn := filepath.Join(dir, ManifestSnapshotFilename) + log.Printf("[TRACE] modsdir: writing modules manifest to %s", fn) + w, err := os.Create(fn) + if err != nil { + return err + } + defer w.Close() + + return m.WriteSnapshot(w) +} diff --git a/pkg/modsdir/paths.go b/pkg/modsdir/paths.go new file mode 100644 index 00000000000..9a843bfafd2 --- /dev/null +++ b/pkg/modsdir/paths.go @@ -0,0 +1,8 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package modsdir + +const ManifestSnapshotFilename = "modules.json" diff --git a/pkg/moduledeps/dependencies.go b/pkg/moduledeps/dependencies.go new file mode 100644 index 00000000000..a32ea2ae690 --- /dev/null +++ b/pkg/moduledeps/dependencies.go @@ -0,0 +1,49 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package moduledeps + +import ( + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/plugin/discovery" +) + +// Providers describes a set of provider dependencies for a given module. +// +// Each named provider instance can have one version constraint. +type Providers map[addrs.Provider]ProviderDependency + +// ProviderDependency describes the dependency for a particular provider +// instance, including both the set of allowed versions and the reason for +// the dependency. +type ProviderDependency struct { + Constraints discovery.Constraints + Reason ProviderDependencyReason +} + +// ProviderDependencyReason is an enumeration of reasons why a dependency might be +// present. +type ProviderDependencyReason int + +const ( + // ProviderDependencyExplicit means that there is an explicit "provider" + // block in the configuration for this module. + ProviderDependencyExplicit ProviderDependencyReason = iota + + // ProviderDependencyImplicit means that there is no explicit "provider" + // block but there is at least one resource that uses this provider. + ProviderDependencyImplicit + + // ProviderDependencyInherited is a special case of + // ProviderDependencyImplicit where a parent module has defined a + // configuration for the provider that has been inherited by at least one + // resource in this module. + ProviderDependencyInherited + + // ProviderDependencyFromState means that this provider is not currently + // referenced by configuration at all, but some existing instances in + // the state still depend on it. + ProviderDependencyFromState +) diff --git a/pkg/moduledeps/doc.go b/pkg/moduledeps/doc.go new file mode 100644 index 00000000000..cdb2fdd3019 --- /dev/null +++ b/pkg/moduledeps/doc.go @@ -0,0 +1,12 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package moduledeps contains types that can be used to describe the +// providers required for all of the modules in a module tree. +// +// It does not itself contain the functionality for populating such +// data structures; that's in OpenTofu core, since this package intentionally +// does not depend on OpenTofu core to avoid package dependency cycles. +package moduledeps diff --git a/pkg/moduledeps/module.go b/pkg/moduledeps/module.go new file mode 100644 index 00000000000..20212e4d5da --- /dev/null +++ b/pkg/moduledeps/module.go @@ -0,0 +1,204 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package moduledeps + +import ( + "sort" + "strings" + + "github.com/kubegems/opentofu/pkg/plugin/discovery" +) + +// Module represents the dependencies of a single module, as well being +// a node in a tree of such structures representing the dependencies of +// an entire configuration. +type Module struct { + Name string + Providers Providers + Children []*Module +} + +// WalkFunc is a callback type for use with Module.WalkTree +type WalkFunc func(path []string, parent *Module, current *Module) error + +// WalkTree calls the given callback once for the receiver and then +// once for each descendent, in an order such that parents are called +// before their children and siblings are called in the order they +// appear in the Children slice. +// +// When calling the callback, parent will be nil for the first call +// for the receiving module, and then set to the direct parent of +// each module for the subsequent calls. +// +// The path given to the callback is valid only until the callback +// returns, after which it will be mutated and reused. Callbacks must +// therefore copy the path slice if they wish to retain it. +// +// If the given callback returns an error, the walk will be aborted at +// that point and that error returned to the caller. +// +// This function is not thread-safe for concurrent modifications of the +// data structure, so it's the caller's responsibility to arrange for that +// should it be needed. +// +// It is safe for a callback to modify the descendents of the "current" +// module, including the ordering of the Children slice itself, but the +// callback MUST NOT modify the parent module. +func (m *Module) WalkTree(cb WalkFunc) error { + return walkModuleTree(make([]string, 0, 1), nil, m, cb) +} + +func walkModuleTree(path []string, parent *Module, current *Module, cb WalkFunc) error { + path = append(path, current.Name) + err := cb(path, parent, current) + if err != nil { + return err + } + + for _, child := range current.Children { + err := walkModuleTree(path, current, child, cb) + if err != nil { + return err + } + } + return nil +} + +// SortChildren sorts the Children slice into lexicographic order by +// name, in-place. +// +// This is primarily useful prior to calling WalkTree so that the walk +// will proceed in a consistent order. +func (m *Module) SortChildren() { + sort.Sort(sortModules{m.Children}) +} + +// SortDescendents is a convenience wrapper for calling SortChildren on +// the receiver and all of its descendent modules. +func (m *Module) SortDescendents() { + m.WalkTree(func(path []string, parent *Module, current *Module) error { + current.SortChildren() + return nil + }) +} + +type sortModules struct { + modules []*Module +} + +func (s sortModules) Len() int { + return len(s.modules) +} + +func (s sortModules) Less(i, j int) bool { + cmp := strings.Compare(s.modules[i].Name, s.modules[j].Name) + return cmp < 0 +} + +func (s sortModules) Swap(i, j int) { + s.modules[i], s.modules[j] = s.modules[j], s.modules[i] +} + +// ProviderRequirements produces a PluginRequirements structure that can +// be used with discovery.PluginMetaSet.ConstrainVersions to identify +// suitable plugins to satisfy the module's provider dependencies. +// +// This method only considers the direct requirements of the receiver. +// Use AllPluginRequirements to flatten the dependencies for the +// entire tree of modules. +// +// Requirements returned by this method include only version constraints, +// and apply no particular SHA256 hash constraint. +func (m *Module) ProviderRequirements() discovery.PluginRequirements { + ret := make(discovery.PluginRequirements) + for pFqn, dep := range m.Providers { + providerType := pFqn.Type + if existing, exists := ret[providerType]; exists { + ret[providerType].Versions = existing.Versions.Append(dep.Constraints) + } else { + ret[providerType] = &discovery.PluginConstraints{ + Versions: dep.Constraints, + } + } + } + return ret +} + +// AllProviderRequirements calls ProviderRequirements for the receiver and all +// of its descendents, and merges the result into a single PluginRequirements +// structure that would satisfy all of the modules together. +// +// Requirements returned by this method include only version constraints, +// and apply no particular SHA256 hash constraint. +func (m *Module) AllProviderRequirements() discovery.PluginRequirements { + var ret discovery.PluginRequirements + m.WalkTree(func(path []string, parent *Module, current *Module) error { + ret = ret.Merge(current.ProviderRequirements()) + return nil + }) + return ret +} + +// Equal returns true if the receiver is the root of an identical tree +// to the other given Module. This is a deep comparison that considers +// the equality of all downstream modules too. +// +// The children are considered to be ordered, so callers may wish to use +// SortDescendents first to normalize the order of the slices of child nodes. +// +// The implementation of this function is not optimized since it is provided +// primarily for use in tests. +func (m *Module) Equal(other *Module) bool { + // take care of nils first + if m == nil && other == nil { + return true + } else if (m == nil && other != nil) || (m != nil && other == nil) { + return false + } + + if m.Name != other.Name { + return false + } + + if len(m.Providers) != len(other.Providers) { + return false + } + if len(m.Children) != len(other.Children) { + return false + } + + // Can't use reflect.DeepEqual on this provider structure because + // the nested Constraints objects contain function pointers that + // never compare as equal. So we'll need to walk it the long way. + for inst, dep := range m.Providers { + if _, exists := other.Providers[inst]; !exists { + return false + } + + if dep.Reason != other.Providers[inst].Reason { + return false + } + + // Constraints are not too easy to compare robustly, so + // we'll just use their string representations as a proxy + // for now. + if dep.Constraints.String() != other.Providers[inst].Constraints.String() { + return false + } + } + + // Above we already checked that we have the same number of children + // in each module, so now we just need to check that they are + // recursively equal. + for i := range m.Children { + if !m.Children[i].Equal(other.Children[i]) { + return false + } + } + + // If we fall out here then they are equal + return true +} diff --git a/pkg/moduledeps/module_test.go b/pkg/moduledeps/module_test.go new file mode 100644 index 00000000000..1418b92580c --- /dev/null +++ b/pkg/moduledeps/module_test.go @@ -0,0 +1,219 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package moduledeps + +import ( + "fmt" + "reflect" + "testing" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/plugin/discovery" +) + +func TestModuleWalkTree(t *testing.T) { + type walkStep struct { + Path []string + ParentName string + } + + tests := []struct { + Root *Module + WalkOrder []walkStep + }{ + { + &Module{ + Name: "root", + Children: nil, + }, + []walkStep{ + { + Path: []string{"root"}, + ParentName: "", + }, + }, + }, + { + &Module{ + Name: "root", + Children: []*Module{ + { + Name: "child", + }, + }, + }, + []walkStep{ + { + Path: []string{"root"}, + ParentName: "", + }, + { + Path: []string{"root", "child"}, + ParentName: "root", + }, + }, + }, + { + &Module{ + Name: "root", + Children: []*Module{ + { + Name: "child", + Children: []*Module{ + { + Name: "grandchild", + }, + }, + }, + }, + }, + []walkStep{ + { + Path: []string{"root"}, + ParentName: "", + }, + { + Path: []string{"root", "child"}, + ParentName: "root", + }, + { + Path: []string{"root", "child", "grandchild"}, + ParentName: "child", + }, + }, + }, + { + &Module{ + Name: "root", + Children: []*Module{ + { + Name: "child1", + Children: []*Module{ + { + Name: "grandchild1", + }, + }, + }, + { + Name: "child2", + Children: []*Module{ + { + Name: "grandchild2", + }, + }, + }, + }, + }, + []walkStep{ + { + Path: []string{"root"}, + ParentName: "", + }, + { + Path: []string{"root", "child1"}, + ParentName: "root", + }, + { + Path: []string{"root", "child1", "grandchild1"}, + ParentName: "child1", + }, + { + Path: []string{"root", "child2"}, + ParentName: "root", + }, + { + Path: []string{"root", "child2", "grandchild2"}, + ParentName: "child2", + }, + }, + }, + } + + for i, test := range tests { + t.Run(fmt.Sprintf("%02d", i), func(t *testing.T) { + wo := test.WalkOrder + test.Root.WalkTree(func(path []string, parent *Module, current *Module) error { + if len(wo) == 0 { + t.Fatalf("ran out of walk steps while expecting one for %#v", path) + } + step := wo[0] + wo = wo[1:] + if got, want := path, step.Path; !reflect.DeepEqual(got, want) { + t.Errorf("wrong path %#v; want %#v", got, want) + } + parentName := "" + if parent != nil { + parentName = parent.Name + } + if got, want := parentName, step.ParentName; got != want { + t.Errorf("wrong parent name %q; want %q", got, want) + } + + if got, want := current.Name, path[len(path)-1]; got != want { + t.Errorf("mismatching current.Name %q and final path element %q", got, want) + } + return nil + }) + }) + } +} + +func TestModuleSortChildren(t *testing.T) { + m := &Module{ + Name: "root", + Children: []*Module{ + { + Name: "apple", + }, + { + Name: "zebra", + }, + { + Name: "xylophone", + }, + { + Name: "pig", + }, + }, + } + + m.SortChildren() + + want := []string{"apple", "pig", "xylophone", "zebra"} + var got []string + for _, c := range m.Children { + got = append(got, c.Name) + } + + if !reflect.DeepEqual(want, got) { + t.Errorf("wrong order %#v; want %#v", want, got) + } +} + +func TestModuleProviderRequirements(t *testing.T) { + m := &Module{ + Name: "root", + Providers: Providers{ + addrs.NewDefaultProvider("foo"): ProviderDependency{ + Constraints: discovery.ConstraintStr(">=1.0.0").MustParse(), + }, + addrs.NewDefaultProvider("baz"): ProviderDependency{ + Constraints: discovery.ConstraintStr(">=3.0.0").MustParse(), + }, + }, + } + + reqd := m.ProviderRequirements() + if len(reqd) != 2 { + t.Errorf("wrong number of elements in %#v; want 2", reqd) + } + if got, want := reqd["foo"].Versions.String(), ">=1.0.0"; got != want { + t.Errorf("wrong combination of versions for 'foo' %q; want %q", got, want) + } + if got, want := reqd["baz"].Versions.String(), ">=3.0.0"; got != want { + t.Errorf("wrong combination of versions for 'baz' %q; want %q", got, want) + } +} diff --git a/pkg/moduletest/file.go b/pkg/moduletest/file.go new file mode 100644 index 00000000000..bcff32a51da --- /dev/null +++ b/pkg/moduletest/file.go @@ -0,0 +1,22 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package moduletest + +import ( + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +type File struct { + Config *configs.TestFile + + Name string + Status Status + + Runs []*Run + + Diagnostics tfdiags.Diagnostics +} diff --git a/pkg/moduletest/run.go b/pkg/moduletest/run.go new file mode 100644 index 00000000000..f3f9b5b2721 --- /dev/null +++ b/pkg/moduletest/run.go @@ -0,0 +1,342 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package moduletest + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +type Run struct { + Config *configs.TestRun + + Verbose *Verbose + + Name string + Index int + Status Status + + Diagnostics tfdiags.Diagnostics +} + +// Verbose is a utility struct that holds all the information required for a run +// to render the results verbosely. +// +// At the moment, this basically means printing out the plan. To do that we need +// all the information within this struct. +type Verbose struct { + Plan *plans.Plan + State *states.State + Config *configs.Config + Providers map[addrs.Provider]providers.ProviderSchema + Provisioners map[string]*configschema.Block +} + +func (run *Run) GetTargets() ([]addrs.Targetable, tfdiags.Diagnostics) { + var diagnostics tfdiags.Diagnostics + var targets []addrs.Targetable + + for _, target := range run.Config.Options.Target { + addr, diags := addrs.ParseTarget(target) + diagnostics = diagnostics.Append(diags) + if addr != nil { + targets = append(targets, addr.Subject) + } + } + + return targets, diagnostics +} + +func (run *Run) GetReplaces() ([]addrs.AbsResourceInstance, tfdiags.Diagnostics) { + var diagnostics tfdiags.Diagnostics + var replaces []addrs.AbsResourceInstance + + for _, replace := range run.Config.Options.Replace { + addr, diags := addrs.ParseAbsResourceInstance(replace) + diagnostics = diagnostics.Append(diags) + if diags.HasErrors() { + continue + } + + if addr.Resource.Resource.Mode != addrs.ManagedResourceMode { + diagnostics = diagnostics.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Can only target managed resources for forced replacements.", + Detail: addr.String(), + Subject: replace.SourceRange().Ptr(), + }) + continue + } + + replaces = append(replaces, addr) + } + + return replaces, diagnostics +} + +func (run *Run) GetReferences() ([]*addrs.Reference, tfdiags.Diagnostics) { + var diagnostics tfdiags.Diagnostics + var references []*addrs.Reference + + for _, rule := range run.Config.CheckRules { + for _, variable := range rule.Condition.Variables() { + reference, diags := addrs.ParseRef(variable) + diagnostics = diagnostics.Append(diags) + if reference != nil { + references = append(references, reference) + } + } + for _, variable := range rule.ErrorMessage.Variables() { + reference, diags := addrs.ParseRef(variable) + diagnostics = diagnostics.Append(diags) + if reference != nil { + references = append(references, reference) + } + } + } + + return references, diagnostics +} + +// ValidateExpectedFailures steps through the provided diagnostics (which should +// be the result of a plan or an apply operation), and does 3 things: +// 1. Removes diagnostics that match the expected failures from the config. +// 2. Upgrades warnings from check blocks into errors where appropriate so the +// test will fail later. +// 3. Adds diagnostics for any expected failures that were not satisfied. +// +// Point 2 is a bit complicated so worth expanding on. In normal OpenTofu +// execution, any error that originates within a check block (either from an +// assertion or a scoped data source) is wrapped up as a Warning to be +// identified to the user but not to fail the actual OpenTofu operation. During +// test execution, we want to upgrade (or rollback) these warnings into errors +// again so the test will fail. We do that as part of this function as we are +// already processing the diagnostics from check blocks in here anyway. +// +// The way the function works out which diagnostics are relevant to expected +// failures is by using the tfdiags Extra functionality to detect which +// diagnostics were generated by custom conditions. OpenTofu adds the +// addrs.CheckRule that generated each diagnostic to the diagnostic itself so we +// can tell which diagnostics can be expected. +func (run *Run) ValidateExpectedFailures(originals tfdiags.Diagnostics) tfdiags.Diagnostics { + + // We're going to capture all the checkable objects that are referenced + // from the expected failures. + expectedFailures := addrs.MakeMap[addrs.Referenceable, bool]() + sourceRanges := addrs.MakeMap[addrs.Referenceable, tfdiags.SourceRange]() + + for _, traversal := range run.Config.ExpectFailures { + // Ignore the diagnostics returned from the reference parsing, these + // references will have been checked earlier in the process by the + // validate stage so we don't need to do that again here. + reference, _ := addrs.ParseRefFromTestingScope(traversal) + expectedFailures.Put(reference.Subject, false) + sourceRanges.Put(reference.Subject, reference.SourceRange) + } + + var diags tfdiags.Diagnostics + for _, diag := range originals { + + if rule, ok := addrs.DiagnosticOriginatesFromCheckRule(diag); ok { + switch rule.Container.CheckableKind() { + case addrs.CheckableOutputValue: + addr := rule.Container.(addrs.AbsOutputValue) + if !addr.Module.IsRoot() { + // failures can only be expected against checkable objects + // in the root module. This diagnostic will be added into + // returned set below. + break + } + + if diag.Severity() == tfdiags.Warning { + // Warnings don't count as errors. This diagnostic will be + // added into the returned set below. + break + } + + if expectedFailures.Has(addr.OutputValue) { + // Then this failure is expected! Mark the original map as + // having found a failure and swallow this error by + // continuing and not adding it into the returned set of + // diagnostics. + expectedFailures.Put(addr.OutputValue, true) + continue + } + + // Otherwise, this isn't an expected failure so just fall out + // and add it into the returned set of diagnostics below. + + case addrs.CheckableInputVariable: + addr := rule.Container.(addrs.AbsInputVariableInstance) + if !addr.Module.IsRoot() { + // failures can only be expected against checkable objects + // in the root module. This diagnostic will be added into + // returned set below. + break + } + + if diag.Severity() == tfdiags.Warning { + // Warnings don't count as errors. This diagnostic will be + // added into the returned set below. + break + } + if expectedFailures.Has(addr.Variable) { + // Then this failure is expected! Mark the original map as + // having found a failure and swallow this error by + // continuing and not adding it into the returned set of + // diagnostics. + expectedFailures.Put(addr.Variable, true) + continue + } + + // Otherwise, this isn't an expected failure so just fall out + // and add it into the returned set of diagnostics below. + + case addrs.CheckableResource: + addr := rule.Container.(addrs.AbsResourceInstance) + if !addr.Module.IsRoot() { + // failures can only be expected against checkable objects + // in the root module. This diagnostic will be added into + // returned set below. + break + } + + if diag.Severity() == tfdiags.Warning { + // Warnings don't count as errors. This diagnostic will be + // added into the returned set below. + break + } + + if expectedFailures.Has(addr.Resource) { + // Then this failure is expected! Mark the original map as + // having found a failure and swallow this error by + // continuing and not adding it into the returned set of + // diagnostics. + expectedFailures.Put(addr.Resource, true) + continue + } + + if expectedFailures.Has(addr.Resource.Resource) { + // We can also blanket expect failures in all instances for + // a resource so we check for that here as well. + expectedFailures.Put(addr.Resource.Resource, true) + continue + } + + // Otherwise, this isn't an expected failure so just fall out + // and add it into the returned set of diagnostics below. + + case addrs.CheckableCheck: + addr := rule.Container.(addrs.AbsCheck) + + // Check blocks are a bit more difficult than the others. Check + // block diagnostics could be from a nested data block, or + // from a failed assertion, and have all been marked as just + // warning severity. + // + // For diagnostics from failed assertions, we want to check if + // it was expected and skip it if it was. But if it wasn't + // expected we want to upgrade the diagnostic from a warning + // into an error so the test case will fail overall. + // + // For diagnostics from nested data blocks, we have two + // categories of diagnostics. First, diagnostics that were + // originally errors and we mapped into warnings. Second, + // diagnostics that were originally warnings and stayed that + // way. For the first case, we want to turn these back to errors + // and use them as part of the expected failures functionality. + // The second case should remain as warnings and be ignored by + // the expected failures functionality. + // + // Note, as well that we still want to upgrade failed checks + // from child modules into errors, so in the other branches we + // just do a simple blanket skip off all diagnostics not + // from the root module. We're more selective here, only + // diagnostics from the root module are considered for the + // expect failures functionality but we do also upgrade + // diagnostics from child modules back into errors. + + if rule.Type == addrs.CheckAssertion { + // Then this diagnostic is from a check block assertion, it + // is something we want to treat as an error even though it + // is actually claiming to be a warning. + + if addr.Module.IsRoot() && expectedFailures.Has(addr.Check) { + // Then this failure is expected! Mark the original map as + // having found a failure and continue. + expectedFailures.Put(addr.Check, true) + continue + } + + // Otherwise, let's package this up as an error and move on. + diags = diags.Append(tfdiags.Override(diag, tfdiags.Error, nil)) + continue + } else if rule.Type == addrs.CheckDataResource { + // Then the diagnostic we have was actually overridden so + // let's get back to the original. + original := tfdiags.UndoOverride(diag) + + // This diagnostic originated from a scoped data source. + if addr.Module.IsRoot() && original.Severity() == tfdiags.Error { + // Okay, we have a genuine error from the root module, + // so we can now check if we want to ignore it or not. + if expectedFailures.Has(addr.Check) { + // Then this failure is expected! Mark the original map as + // having found a failure and continue. + expectedFailures.Put(addr.Check, true) + continue + } + } + + // In all other cases, we want to add the original error + // into the set we return to the testing framework and move + // onto the next one. + diags = diags.Append(original) + continue + } else { + panic("invalid CheckType: " + rule.Type.String()) + } + default: + panic("unrecognized CheckableKind: " + rule.Container.CheckableKind().String()) + } + } + + // If we get here, then we're not modifying the original diagnostic at + // all. We just want the testing framework to treat it as normal. + diags = diags.Append(diag) + } + + // Okay, we've checked all our diagnostics to see if any were expected. + // Now, let's make sure that all the checkable objects we expected to fail + // actually did! + + for _, elem := range expectedFailures.Elems { + addr := elem.Key + failed := elem.Value + + if !failed { + // Then we expected a failure, and it did not occur. Add it to the + // diagnostics. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing expected failure", + Detail: fmt.Sprintf("The checkable object, %s, was expected to report an error but did not.", addr.String()), + Subject: sourceRanges.Get(addr).ToHCL().Ptr(), + }) + } + } + + return diags +} diff --git a/pkg/moduletest/run_test.go b/pkg/moduletest/run_test.go new file mode 100644 index 00000000000..831d9969d26 --- /dev/null +++ b/pkg/moduletest/run_test.go @@ -0,0 +1,803 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package moduletest + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +func TestRun_ValidateExpectedFailures(t *testing.T) { + + type output struct { + Description tfdiags.Description + Severity tfdiags.Severity + } + + tcs := map[string]struct { + ExpectedFailures []string + Input tfdiags.Diagnostics + Output []output + }{ + "empty": { + ExpectedFailures: nil, + Input: nil, + Output: nil, + }, + "carries through simple diags": { + Input: createDiagnostics(func(diags tfdiags.Diagnostics) tfdiags.Diagnostics { + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "simple error", + Detail: "want to see this in the returned set", + }) + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "simple warning", + Detail: "want to see this in the returned set", + }) + + return diags + }), + Output: []output{ + { + Description: tfdiags.Description{ + Summary: "simple error", + Detail: "want to see this in the returned set", + }, + Severity: tfdiags.Error, + }, + { + Description: tfdiags.Description{ + Summary: "simple warning", + Detail: "want to see this in the returned set", + }, + Severity: tfdiags.Warning, + }, + }, + }, + "expected failures did not fail": { + ExpectedFailures: []string{ + "check.example", + }, + Input: nil, + Output: []output{ + { + Description: tfdiags.Description{ + Summary: "Missing expected failure", + Detail: "The checkable object, check.example, was expected to report an error but did not.", + }, + Severity: tfdiags.Error, + }, + }, + }, + "outputs": { + ExpectedFailures: []string{ + "output.expected_one", + "output.expected_two", + }, + Input: createDiagnostics(func(diags tfdiags.Diagnostics) tfdiags.Diagnostics { + + // First, let's create an output that failed that isn't + // expected. This should be unaffected by our function. + diags = diags.Append( + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "unexpected failure", + Detail: "this should not be removed", + Extra: &addrs.CheckRuleDiagnosticExtra{ + CheckRule: addrs.NewCheckRule(addrs.AbsOutputValue{ + Module: addrs.RootModuleInstance, + OutputValue: addrs.OutputValue{Name: "unexpected"}, + }, addrs.OutputPrecondition, 0), + }, + }) + + // Second, let's create an output that failed but is expected. + // Our function should remove this from the set of diags. + diags = diags.Append( + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "expected failure", + Detail: "this should be removed", + Extra: &addrs.CheckRuleDiagnosticExtra{ + CheckRule: addrs.NewCheckRule(addrs.AbsOutputValue{ + Module: addrs.RootModuleInstance, + OutputValue: addrs.OutputValue{Name: "expected_one"}, + }, addrs.OutputPrecondition, 0), + }, + }) + + diags = diags.Append( + &hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "expected warning", + Detail: "this should not be removed", + Extra: &addrs.CheckRuleDiagnosticExtra{ + CheckRule: addrs.NewCheckRule(addrs.AbsOutputValue{ + Module: addrs.RootModuleInstance, + OutputValue: addrs.OutputValue{Name: "expected_one"}, + }, addrs.OutputPrecondition, 0), + }, + }) + + // The error we are adding here is for expected_two but in a + // child module. We expect that this diagnostic shouldn't + // trigger our expected failure, and that an extra diagnostic + // should be created complaining that the output wasn't actually + // triggered. + + diags = diags.Append( + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "error in child module", + Detail: "this should not be removed", + Extra: &addrs.CheckRuleDiagnosticExtra{ + CheckRule: addrs.NewCheckRule(addrs.AbsOutputValue{ + Module: []addrs.ModuleInstanceStep{ + { + Name: "child_module", + }, + }, + OutputValue: addrs.OutputValue{Name: "expected_two"}, + }, addrs.OutputPrecondition, 0), + }, + }) + + return diags + }), + Output: []output{ + { + Description: tfdiags.Description{ + Summary: "unexpected failure", + Detail: "this should not be removed", + }, + Severity: tfdiags.Error, + }, + { + Description: tfdiags.Description{ + Summary: "expected warning", + Detail: "this should not be removed", + }, + Severity: tfdiags.Warning, + }, + { + Description: tfdiags.Description{ + Summary: "error in child module", + Detail: "this should not be removed", + }, + Severity: tfdiags.Error, + }, + { + Description: tfdiags.Description{ + Summary: "Missing expected failure", + Detail: "The checkable object, output.expected_two, was expected to report an error but did not.", + }, + Severity: tfdiags.Error, + }, + }, + }, + "variables": { + ExpectedFailures: []string{ + "var.expected_one", + "var.expected_two", + }, + Input: createDiagnostics(func(diags tfdiags.Diagnostics) tfdiags.Diagnostics { + + // First, let's create an input that failed that isn't + // expected. This should be unaffected by our function. + diags = diags.Append( + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "unexpected failure", + Detail: "this should not be removed", + Extra: &addrs.CheckRuleDiagnosticExtra{ + CheckRule: addrs.NewCheckRule(addrs.AbsInputVariableInstance{ + Module: addrs.RootModuleInstance, + Variable: addrs.InputVariable{Name: "unexpected"}, + }, addrs.InputValidation, 0), + }, + }) + + // Second, let's create an input that failed but is expected. + // Our function should remove this from the set of diags. + diags = diags.Append( + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "expected failure", + Detail: "this should be removed", + Extra: &addrs.CheckRuleDiagnosticExtra{ + CheckRule: addrs.NewCheckRule(addrs.AbsInputVariableInstance{ + Module: addrs.RootModuleInstance, + Variable: addrs.InputVariable{Name: "expected_one"}, + }, addrs.InputValidation, 0), + }, + }) + + diags = diags.Append( + &hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "expected warning", + Detail: "this should not be removed", + Extra: &addrs.CheckRuleDiagnosticExtra{ + CheckRule: addrs.NewCheckRule(addrs.AbsInputVariableInstance{ + Module: addrs.RootModuleInstance, + Variable: addrs.InputVariable{Name: "expected_one"}, + }, addrs.InputValidation, 0), + }, + }) + + // The error we are adding here is for expected_two but in a + // child module. We expect that this diagnostic shouldn't + // trigger our expected failure, and that an extra diagnostic + // should be created complaining that the output wasn't actually + // triggered. + + diags = diags.Append( + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "error in child module", + Detail: "this should not be removed", + Extra: &addrs.CheckRuleDiagnosticExtra{ + CheckRule: addrs.NewCheckRule(addrs.AbsInputVariableInstance{ + Module: []addrs.ModuleInstanceStep{ + { + Name: "child_module", + }, + }, + Variable: addrs.InputVariable{Name: "expected_two"}, + }, addrs.InputValidation, 0), + }, + }) + + return diags + }), + Output: []output{ + { + Description: tfdiags.Description{ + Summary: "unexpected failure", + Detail: "this should not be removed", + }, + Severity: tfdiags.Error, + }, + { + Description: tfdiags.Description{ + Summary: "expected warning", + Detail: "this should not be removed", + }, + Severity: tfdiags.Warning, + }, + { + Description: tfdiags.Description{ + Summary: "error in child module", + Detail: "this should not be removed", + }, + Severity: tfdiags.Error, + }, + { + Description: tfdiags.Description{ + Summary: "Missing expected failure", + Detail: "The checkable object, var.expected_two, was expected to report an error but did not.", + }, + Severity: tfdiags.Error, + }, + }, + }, + "resources": { + ExpectedFailures: []string{ + "test_instance.single", + "test_instance.all_instances", + "test_instance.instance[0]", + "test_instance.instance[2]", + "test_instance.missing", + }, + Input: createDiagnostics(func(diags tfdiags.Diagnostics) tfdiags.Diagnostics { + // First, we'll create an unexpected failure that should be + // carried through untouched. + diags = diags.Append( + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "unexpected failure", + Detail: "this should not be removed", + Extra: &addrs.CheckRuleDiagnosticExtra{ + CheckRule: addrs.NewCheckRule(addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "unexpected", + }, + }, + }, addrs.ResourcePrecondition, 0), + }, + }) + + // Second, we'll create a failure from our test_instance.single + // resource that should be removed. + diags = diags.Append( + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "expected failure in test_instance.single", + Detail: "this should be removed", + Extra: &addrs.CheckRuleDiagnosticExtra{ + CheckRule: addrs.NewCheckRule(addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "single", + }, + }, + }, addrs.ResourcePrecondition, 0), + }, + }) + + // Third, we'll create a warning from our test_instance.single + // resource that should be propagated as it is only a warning. + diags = diags.Append( + &hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "expected warning in test_instance.single", + Detail: "this should not be removed", + Extra: &addrs.CheckRuleDiagnosticExtra{ + CheckRule: addrs.NewCheckRule(addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "single", + }, + }, + }, addrs.ResourcePrecondition, 0), + }, + }) + + // Fourth, we'll create diagnostics from several instances of + // the test_instance.all_instances which should all be removed. + diags = diags.Append( + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "expected failure in test_instance.all_instances[0]", + Detail: "this should be removed", + Extra: &addrs.CheckRuleDiagnosticExtra{ + CheckRule: addrs.NewCheckRule(addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "all_instances", + }, + Key: addrs.IntKey(0), + }, + }, addrs.ResourcePrecondition, 0), + }, + }) + diags = diags.Append( + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "expected failure in test_instance.all_instances[1]", + Detail: "this should be removed", + Extra: &addrs.CheckRuleDiagnosticExtra{ + CheckRule: addrs.NewCheckRule(addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "all_instances", + }, + Key: addrs.IntKey(1), + }, + }, addrs.ResourcePrecondition, 0), + }, + }) + diags = diags.Append( + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "expected failure in test_instance.all_instances[2]", + Detail: "this should be removed", + Extra: &addrs.CheckRuleDiagnosticExtra{ + CheckRule: addrs.NewCheckRule(addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "all_instances", + }, + Key: addrs.IntKey(2), + }, + }, addrs.ResourcePrecondition, 0), + }, + }) + + // Fifth, we'll create diagnostics for several instances of + // the test_instance.instance resource, only some of which + // should be removed. + diags = diags.Append( + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "expected failure in test_instance.instance[0]", + Detail: "this should be removed", + Extra: &addrs.CheckRuleDiagnosticExtra{ + CheckRule: addrs.NewCheckRule(addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "instance", + }, + Key: addrs.IntKey(0), + }, + }, addrs.ResourcePrecondition, 0), + }, + }) + diags = diags.Append( + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "expected failure in test_instance.instance[1]", + Detail: "this should not be removed", + Extra: &addrs.CheckRuleDiagnosticExtra{ + CheckRule: addrs.NewCheckRule(addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "instance", + }, + Key: addrs.IntKey(1), + }, + }, addrs.ResourcePrecondition, 0), + }, + }) + diags = diags.Append( + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "expected failure in test_instance.instance[2]", + Detail: "this should be removed", + Extra: &addrs.CheckRuleDiagnosticExtra{ + CheckRule: addrs.NewCheckRule(addrs.AbsResourceInstance{ + Module: addrs.RootModuleInstance, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "instance", + }, + Key: addrs.IntKey(2), + }, + }, addrs.ResourcePrecondition, 0), + }, + }) + + // Finally, we'll create an error that originated from + // test_instance.missing but in a child module which shouldn't + // be removed. + diags = diags.Append( + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "failure in child module", + Detail: "this should not be removed", + Extra: &addrs.CheckRuleDiagnosticExtra{ + CheckRule: addrs.NewCheckRule(addrs.AbsResourceInstance{ + Module: []addrs.ModuleInstanceStep{ + { + Name: "child_module", + }, + }, + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "missing", + }, + }, + }, addrs.ResourcePrecondition, 0), + }, + }) + + return diags + }), + Output: []output{ + { + Description: tfdiags.Description{ + Summary: "unexpected failure", + Detail: "this should not be removed", + }, + Severity: tfdiags.Error, + }, + { + Description: tfdiags.Description{ + Summary: "expected warning in test_instance.single", + Detail: "this should not be removed", + }, + Severity: tfdiags.Warning, + }, + { + Description: tfdiags.Description{ + Summary: "expected failure in test_instance.instance[1]", + Detail: "this should not be removed", + }, + Severity: tfdiags.Error, + }, + { + Description: tfdiags.Description{ + Summary: "failure in child module", + Detail: "this should not be removed", + }, + Severity: tfdiags.Error, + }, + { + Description: tfdiags.Description{ + Summary: "Missing expected failure", + Detail: "The checkable object, test_instance.missing, was expected to report an error but did not.", + }, + Severity: tfdiags.Error, + }, + }, + }, + "check_assertions": { + ExpectedFailures: []string{ + "check.expected", + "check.missing", + }, + Input: createDiagnostics(func(diags tfdiags.Diagnostics) tfdiags.Diagnostics { + // First, we'll add an unexpected warning from a check block + // assertion that should get upgraded to an error. + diags = diags.Append( + &hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "unexpected failure", + Detail: "this should upgrade and not be removed", + Extra: &addrs.CheckRuleDiagnosticExtra{ + CheckRule: addrs.NewCheckRule(addrs.AbsCheck{ + Module: addrs.RootModuleInstance, + Check: addrs.Check{ + Name: "unexpected", + }, + }, addrs.CheckAssertion, 0), + }, + }) + + // Second, we'll add an unexpected warning from a check block + // in a child module that should get upgrade to error. + diags = diags.Append( + &hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "expected failure in child module", + Detail: "this should upgrade and not be removed", + Extra: &addrs.CheckRuleDiagnosticExtra{ + CheckRule: addrs.NewCheckRule(addrs.AbsCheck{ + Module: []addrs.ModuleInstanceStep{ + { + Name: "child_module", + }, + }, + Check: addrs.Check{ + Name: "expected", + }, + }, addrs.CheckAssertion, 0), + }, + }) + + // Third, we'll add an expected warning from a check block + // assertion that should be removed. + diags = diags.Append( + &hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "expected failure", + Detail: "this should be removed", + Extra: &addrs.CheckRuleDiagnosticExtra{ + CheckRule: addrs.NewCheckRule(addrs.AbsCheck{ + Module: addrs.RootModuleInstance, + Check: addrs.Check{ + Name: "expected", + }, + }, addrs.CheckAssertion, 0), + }, + }) + + // The second expected failure has no diagnostics, we just want + // to make sure that a new diagnostic is added for this case. + + return diags + }), + Output: []output{ + { + Description: tfdiags.Description{ + Summary: "unexpected failure", + Detail: "this should upgrade and not be removed", + }, + Severity: tfdiags.Error, + }, + { + Description: tfdiags.Description{ + Summary: "expected failure in child module", + Detail: "this should upgrade and not be removed", + }, + Severity: tfdiags.Error, + }, + { + Description: tfdiags.Description{ + Summary: "Missing expected failure", + Detail: "The checkable object, check.missing, was expected to report an error but did not.", + }, + Severity: tfdiags.Error, + }, + }, + }, + "check_data_sources": { + ExpectedFailures: []string{ + "check.expected", + }, + Input: createDiagnostics(func(diags tfdiags.Diagnostics) tfdiags.Diagnostics { + // First, we'll add an unexpected warning from a check block + // assertion that should be propagated as an error. + diags = diags.Append( + tfdiags.Override( + tfdiags.Sourceless(tfdiags.Error, "unexpected failure", "this should be an error and not removed"), + tfdiags.Warning, + func() tfdiags.DiagnosticExtraWrapper { + return &addrs.CheckRuleDiagnosticExtra{ + CheckRule: addrs.NewCheckRule(addrs.AbsCheck{ + Module: addrs.RootModuleInstance, + Check: addrs.Check{ + Name: "unexpected", + }, + }, addrs.CheckDataResource, 0), + } + })) + + // Second, we'll add an unexpected warning from a check block + // assertion that should remain as a warning. + diags = diags.Append( + tfdiags.Override( + tfdiags.Sourceless(tfdiags.Warning, "unexpected warning", "this should be a warning and not removed"), + tfdiags.Warning, + func() tfdiags.DiagnosticExtraWrapper { + return &addrs.CheckRuleDiagnosticExtra{ + CheckRule: addrs.NewCheckRule(addrs.AbsCheck{ + Module: addrs.RootModuleInstance, + Check: addrs.Check{ + Name: "unexpected", + }, + }, addrs.CheckDataResource, 0), + } + })) + + // Third, we'll add an unexpected warning from a check block + // in a child module that should be propagated as an error. + diags = diags.Append( + tfdiags.Override( + tfdiags.Sourceless(tfdiags.Error, "expected failure from child module", "this should be an error and not removed"), + tfdiags.Warning, + func() tfdiags.DiagnosticExtraWrapper { + return &addrs.CheckRuleDiagnosticExtra{ + CheckRule: addrs.NewCheckRule(addrs.AbsCheck{ + Module: []addrs.ModuleInstanceStep{ + { + Name: "child_module", + }, + }, + Check: addrs.Check{ + Name: "expected", + }, + }, addrs.CheckDataResource, 0), + } + })) + + // Fourth, we'll add an expected warning that should be removed. + diags = diags.Append( + tfdiags.Override( + tfdiags.Sourceless(tfdiags.Error, "expected failure", "this should be removed"), + tfdiags.Warning, + func() tfdiags.DiagnosticExtraWrapper { + return &addrs.CheckRuleDiagnosticExtra{ + CheckRule: addrs.NewCheckRule(addrs.AbsCheck{ + Module: addrs.RootModuleInstance, + Check: addrs.Check{ + Name: "expected", + }, + }, addrs.CheckDataResource, 0), + } + })) + + return diags + }), + Output: []output{ + { + Description: tfdiags.Description{ + Summary: "unexpected failure", + Detail: "this should be an error and not removed", + }, + Severity: tfdiags.Error, + }, + { + Description: tfdiags.Description{ + Summary: "unexpected warning", + Detail: "this should be a warning and not removed", + }, + Severity: tfdiags.Warning, + }, + { + Description: tfdiags.Description{ + Summary: "expected failure from child module", + Detail: "this should be an error and not removed", + }, + Severity: tfdiags.Error, + }, + }, + }, + } + for name, tc := range tcs { + t.Run(name, func(t *testing.T) { + var traversals []hcl.Traversal + for _, ef := range tc.ExpectedFailures { + traversal, diags := hclsyntax.ParseTraversalAbs([]byte(ef), "foo.tf", hcl.Pos{Line: 1, Column: 1}) + if diags.HasErrors() { + t.Errorf("invalid expected failure %s: %v", ef, diags.Error()) + } + traversals = append(traversals, traversal) + } + + if t.Failed() { + return + } + + run := Run{ + Config: &configs.TestRun{ + ExpectFailures: traversals, + }, + } + + out := run.ValidateExpectedFailures(tc.Input) + ix := 0 + for ; ix < len(tc.Output); ix++ { + expected := tc.Output[ix] + + if ix >= len(out) { + t.Errorf("missing diagnostic at %d, expected: [%s] %s, %s", ix, expected.Severity, expected.Description.Summary, expected.Description.Detail) + continue + } + + actual := output{ + Description: out[ix].Description(), + Severity: out[ix].Severity(), + } + + if diff := cmp.Diff(expected, actual); len(diff) > 0 { + t.Errorf("mismatched diagnostic at %d:\n%s", ix, diff) + } + } + + for ; ix < len(out); ix++ { + actual := out[ix] + t.Errorf("additional diagnostic at %d: [%s] %s, %s", ix, actual.Severity(), actual.Description().Summary, actual.Description().Detail) + } + }) + } +} + +func createDiagnostics(populate func(diags tfdiags.Diagnostics) tfdiags.Diagnostics) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + diags = populate(diags) + return diags +} diff --git a/pkg/moduletest/status.go b/pkg/moduletest/status.go new file mode 100644 index 00000000000..249ab64047f --- /dev/null +++ b/pkg/moduletest/status.go @@ -0,0 +1,46 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package moduletest + +// Status represents the status of a test case, and is defined as an iota within +// this file. +// +// The order of the definitions matter as different statuses do naturally take +// precedence over others. A test suite that has a mix of pass and fail statuses +// has failed overall and therefore the fail status is of higher precedence than +// the pass status. +// +// See the Status.Merge function for this requirement being used in action. +// +//go:generate go run golang.org/x/tools/cmd/stringer -type=Status status.go +type Status int + +const ( + Pending Status = iota + Skip + Pass + Fail + Error +) + +// Merge compares two statuses and returns a status that best represents the two +// together. +// +// This should be used to collate the overall status of a test file or test +// suite from the collection of test runs that have been executed. +// +// Essentially, if a test suite has a bunch of failures and passes the overall +// status would be failure. If a test suite has all passes, then the test suite +// would be pass overall. +// +// The implementation basically always returns the highest of the two, which +// means the order the statuses are defined within the iota matters. +func (status Status) Merge(next Status) Status { + if next > status { + return next + } + return status +} diff --git a/pkg/moduletest/status_string.go b/pkg/moduletest/status_string.go new file mode 100644 index 00000000000..437ad622c29 --- /dev/null +++ b/pkg/moduletest/status_string.go @@ -0,0 +1,27 @@ +// Code generated by "stringer -type=Status status.go"; DO NOT EDIT. + +package moduletest + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[Pending-0] + _ = x[Skip-1] + _ = x[Pass-2] + _ = x[Fail-3] + _ = x[Error-4] +} + +const _Status_name = "PendingSkipPassFailError" + +var _Status_index = [...]uint8{0, 7, 11, 15, 19, 24} + +func (i Status) String() string { + if i < 0 || i >= Status(len(_Status_index)-1) { + return "Status(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Status_name[_Status_index[i]:_Status_index[i+1]] +} diff --git a/pkg/moduletest/suite.go b/pkg/moduletest/suite.go new file mode 100644 index 00000000000..21aeba52167 --- /dev/null +++ b/pkg/moduletest/suite.go @@ -0,0 +1,12 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package moduletest + +type Suite struct { + Status Status + + Files map[string]*File +} diff --git a/pkg/plans/action.go b/pkg/plans/action.go new file mode 100644 index 00000000000..aef4f8a351f --- /dev/null +++ b/pkg/plans/action.go @@ -0,0 +1,28 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plans + +type Action rune + +const ( + NoOp Action = 0 + Create Action = '+' + Read Action = '←' + Update Action = '~' + DeleteThenCreate Action = '∓' + CreateThenDelete Action = '±' + Delete Action = '-' + Forget Action = '.' +) + +//go:generate go run golang.org/x/tools/cmd/stringer -type Action + +// IsReplace returns true if the action is one of the two actions that +// represents replacing an existing object with a new object: +// DeleteThenCreate or CreateThenDelete. +func (a Action) IsReplace() bool { + return a == DeleteThenCreate || a == CreateThenDelete +} diff --git a/pkg/plans/action_string.go b/pkg/plans/action_string.go new file mode 100644 index 00000000000..82f68e3863b --- /dev/null +++ b/pkg/plans/action_string.go @@ -0,0 +1,55 @@ +// Code generated by "stringer -type Action"; DO NOT EDIT. + +package plans + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[NoOp-0] + _ = x[Create-43] + _ = x[Read-8592] + _ = x[Update-126] + _ = x[DeleteThenCreate-8723] + _ = x[CreateThenDelete-177] + _ = x[Delete-45] + _ = x[Forget-46] +} + +const ( + _Action_name_0 = "NoOp" + _Action_name_1 = "Create" + _Action_name_2 = "DeleteForget" + _Action_name_3 = "Update" + _Action_name_4 = "CreateThenDelete" + _Action_name_5 = "Read" + _Action_name_6 = "DeleteThenCreate" +) + +var ( + _Action_index_2 = [...]uint8{0, 6, 12} +) + +func (i Action) String() string { + switch { + case i == 0: + return _Action_name_0 + case i == 43: + return _Action_name_1 + case 45 <= i && i <= 46: + i -= 45 + return _Action_name_2[_Action_index_2[i]:_Action_index_2[i+1]] + case i == 126: + return _Action_name_3 + case i == 177: + return _Action_name_4 + case i == 8592: + return _Action_name_5 + case i == 8723: + return _Action_name_6 + default: + return "Action(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/pkg/plans/changes.go b/pkg/plans/changes.go new file mode 100644 index 00000000000..31927b883ba --- /dev/null +++ b/pkg/plans/changes.go @@ -0,0 +1,595 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plans + +import ( + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/states" +) + +// Changes describes various actions that OpenTofu will attempt to take if +// the corresponding plan is applied. +// +// A Changes object can be rendered into a visual diff (by the caller, using +// code in another package) for display to the user. +type Changes struct { + // Resources tracks planned changes to resource instance objects. + Resources []*ResourceInstanceChangeSrc + + // Outputs tracks planned changes output values. + // + // Note that although an in-memory plan contains planned changes for + // outputs throughout the configuration, a plan serialized + // to disk retains only the root outputs because they are + // externally-visible, while other outputs are implementation details and + // can be easily re-calculated during the apply phase. Therefore only root + // module outputs will survive a round-trip through a plan file. + Outputs []*OutputChangeSrc +} + +// NewChanges returns a valid Changes object that describes no changes. +func NewChanges() *Changes { + return &Changes{} +} + +func (c *Changes) Empty() bool { + for _, res := range c.Resources { + if res.Action != NoOp || res.Moved() { + return false + } + + if res.Importing != nil { + return false + } + } + + for _, out := range c.Outputs { + if out.Addr.Module.IsRoot() && out.Action != NoOp { + return false + } + } + + return true +} + +// ResourceInstance returns the planned change for the current object of the +// resource instance of the given address, if any. Returns nil if no change is +// planned. +func (c *Changes) ResourceInstance(addr addrs.AbsResourceInstance) *ResourceInstanceChangeSrc { + for _, rc := range c.Resources { + if rc.Addr.Equal(addr) && rc.DeposedKey == states.NotDeposed { + return rc + } + } + + return nil + +} + +// InstancesForAbsResource returns the planned change for the current objects +// of the resource instances of the given address, if any. Returns nil if no +// changes are planned. +func (c *Changes) InstancesForAbsResource(addr addrs.AbsResource) []*ResourceInstanceChangeSrc { + var changes []*ResourceInstanceChangeSrc + for _, rc := range c.Resources { + resAddr := rc.Addr.ContainingResource() + if resAddr.Equal(addr) && rc.DeposedKey == states.NotDeposed { + changes = append(changes, rc) + } + } + + return changes +} + +// InstancesForConfigResource returns the planned change for the current objects +// of the resource instances of the given address, if any. Returns nil if no +// changes are planned. +func (c *Changes) InstancesForConfigResource(addr addrs.ConfigResource) []*ResourceInstanceChangeSrc { + var changes []*ResourceInstanceChangeSrc + for _, rc := range c.Resources { + resAddr := rc.Addr.ContainingResource().Config() + if resAddr.Equal(addr) && rc.DeposedKey == states.NotDeposed { + changes = append(changes, rc) + } + } + + return changes +} + +// ResourceInstanceDeposed returns the plan change of a deposed object of +// the resource instance of the given address, if any. Returns nil if no change +// is planned. +func (c *Changes) ResourceInstanceDeposed(addr addrs.AbsResourceInstance, key states.DeposedKey) *ResourceInstanceChangeSrc { + for _, rc := range c.Resources { + if rc.Addr.Equal(addr) && rc.DeposedKey == key { + return rc + } + } + + return nil +} + +// OutputValue returns the planned change for the output value with the +// +// given address, if any. Returns nil if no change is planned. +func (c *Changes) OutputValue(addr addrs.AbsOutputValue) *OutputChangeSrc { + for _, oc := range c.Outputs { + if oc.Addr.Equal(addr) { + return oc + } + } + + return nil +} + +// RootOutputValues returns planned changes for all outputs of the root module. +func (c *Changes) RootOutputValues() []*OutputChangeSrc { + var res []*OutputChangeSrc + + for _, oc := range c.Outputs { + // we can't evaluate root module outputs + if !oc.Addr.Module.Equal(addrs.RootModuleInstance) { + continue + } + + res = append(res, oc) + + } + + return res +} + +// OutputValues returns planned changes for all outputs for all module +// instances that reside in the parent path. Returns nil if no changes are +// planned. +func (c *Changes) OutputValues(parent addrs.ModuleInstance, module addrs.ModuleCall) []*OutputChangeSrc { + var res []*OutputChangeSrc + + for _, oc := range c.Outputs { + // we can't evaluate root module outputs + if oc.Addr.Module.Equal(addrs.RootModuleInstance) { + continue + } + + changeMod, changeCall := oc.Addr.Module.Call() + // this does not reside on our parent instance path + if !changeMod.Equal(parent) { + continue + } + + // this is not the module you're looking for + if changeCall.Name != module.Name { + continue + } + + res = append(res, oc) + + } + + return res +} + +// SyncWrapper returns a wrapper object around the receiver that can be used +// to make certain changes to the receiver in a concurrency-safe way, as long +// as all callers share the same wrapper object. +func (c *Changes) SyncWrapper() *ChangesSync { + return &ChangesSync{ + changes: c, + } +} + +// ResourceInstanceChange describes a change to a particular resource instance +// object. +type ResourceInstanceChange struct { + // Addr is the absolute address of the resource instance that the change + // will apply to. + Addr addrs.AbsResourceInstance + + // PrevRunAddr is the absolute address that this resource instance had at + // the conclusion of a previous run. + // + // This will typically be the same as Addr, but can be different if the + // previous resource instance was subject to a "moved" block that we + // handled in the process of creating this plan. + // + // For the initial creation of a resource instance there isn't really any + // meaningful "previous run address", but PrevRunAddr will still be set + // equal to Addr in that case in order to simplify logic elsewhere which + // aims to detect and react to the movement of instances between addresses. + PrevRunAddr addrs.AbsResourceInstance + + // DeposedKey is the identifier for a deposed object associated with the + // given instance, or states.NotDeposed if this change applies to the + // current object. + // + // A Replace change for a resource with create_before_destroy set will + // create a new DeposedKey temporarily during replacement. In that case, + // DeposedKey in the plan is always states.NotDeposed, representing that + // the current object is being replaced with the deposed. + DeposedKey states.DeposedKey + + // Provider is the address of the provider configuration that was used + // to plan this change, and thus the configuration that must also be + // used to apply it. + ProviderAddr addrs.AbsProviderConfig + + // Change is an embedded description of the change. + Change + + // ActionReason is an optional extra indication of why we chose the + // action recorded in Change.Action for this particular resource instance. + // + // This is an approximate mechanism only for the purpose of explaining the + // plan to end-users in the UI and is not to be used for any + // decision-making during the apply step; if apply behavior needs to vary + // depending on the "action reason" then the information for that decision + // must be recorded more precisely elsewhere for that purpose. + // + // Sometimes there might be more than one reason for choosing a particular + // action. In that case, it's up to the codepath making that decision to + // decide which value would provide the most relevant explanation to the + // end-user and return that. It's not a goal of this field to represent + // fine details about the planning process. + ActionReason ResourceInstanceChangeActionReason + + // RequiredReplace is a set of paths that caused the change action to be + // Replace rather than Update. Always nil if the change action is not + // Replace. + // + // This is retained only for UI-plan-rendering purposes and so it does not + // currently survive a round-trip through a saved plan file. + RequiredReplace cty.PathSet + + // Private allows a provider to stash any extra data that is opaque to + // OpenTofu that relates to this change. OpenTofu will save this + // byte-for-byte and return it to the provider in the apply call. + Private []byte +} + +// Encode produces a variant of the reciever that has its change values +// serialized so it can be written to a plan file. Pass the implied type of the +// corresponding resource type schema for correct operation. +func (rc *ResourceInstanceChange) Encode(ty cty.Type) (*ResourceInstanceChangeSrc, error) { + cs, err := rc.Change.Encode(ty) + if err != nil { + return nil, err + } + prevRunAddr := rc.PrevRunAddr + if prevRunAddr.Resource.Resource.Type == "" { + // Suggests an old caller that hasn't been properly updated to + // populate this yet. + prevRunAddr = rc.Addr + } + return &ResourceInstanceChangeSrc{ + Addr: rc.Addr, + PrevRunAddr: prevRunAddr, + DeposedKey: rc.DeposedKey, + ProviderAddr: rc.ProviderAddr, + ChangeSrc: *cs, + ActionReason: rc.ActionReason, + RequiredReplace: rc.RequiredReplace, + Private: rc.Private, + }, err +} + +func (rc *ResourceInstanceChange) Moved() bool { + return !rc.Addr.Equal(rc.PrevRunAddr) +} + +// Simplify will, where possible, produce a change with a simpler action than +// the receiever given a flag indicating whether the caller is dealing with +// a normal apply or a destroy. This flag deals with the fact that OpenTofu +// Core uses a specialized graph node type for destroying; only that +// specialized node should set "destroying" to true. +// +// The following table shows the simplification behavior: +// +// Action Destroying? New Action +// --------+-------------+----------- +// Create true NoOp +// Delete false NoOp +// Replace true Delete +// Replace false Create +// +// For any combination not in the above table, the Simplify just returns the +// receiver as-is. +func (rc *ResourceInstanceChange) Simplify(destroying bool) *ResourceInstanceChange { + if destroying { + switch rc.Action { + case Delete: + // We'll fall out and just return rc verbatim, then. + case CreateThenDelete, DeleteThenCreate: + return &ResourceInstanceChange{ + Addr: rc.Addr, + DeposedKey: rc.DeposedKey, + Private: rc.Private, + ProviderAddr: rc.ProviderAddr, + Change: Change{ + Action: Delete, + Before: rc.Before, + After: cty.NullVal(rc.Before.Type()), + Importing: rc.Importing, + GeneratedConfig: rc.GeneratedConfig, + }, + } + default: + return &ResourceInstanceChange{ + Addr: rc.Addr, + DeposedKey: rc.DeposedKey, + Private: rc.Private, + ProviderAddr: rc.ProviderAddr, + Change: Change{ + Action: NoOp, + Before: rc.Before, + After: rc.Before, + Importing: rc.Importing, + GeneratedConfig: rc.GeneratedConfig, + }, + } + } + } else { + switch rc.Action { + case Delete: + return &ResourceInstanceChange{ + Addr: rc.Addr, + DeposedKey: rc.DeposedKey, + Private: rc.Private, + ProviderAddr: rc.ProviderAddr, + Change: Change{ + Action: NoOp, + Before: rc.Before, + After: rc.Before, + Importing: rc.Importing, + GeneratedConfig: rc.GeneratedConfig, + }, + } + case CreateThenDelete, DeleteThenCreate: + return &ResourceInstanceChange{ + Addr: rc.Addr, + DeposedKey: rc.DeposedKey, + Private: rc.Private, + ProviderAddr: rc.ProviderAddr, + Change: Change{ + Action: Create, + Before: cty.NullVal(rc.After.Type()), + After: rc.After, + Importing: rc.Importing, + GeneratedConfig: rc.GeneratedConfig, + }, + } + } + } + + // If we fall out here then our change is already simple enough. + return rc +} + +// ResourceInstanceChangeActionReason allows for some extra user-facing +// reasoning for why a particular change action was chosen for a particular +// resource instance. +// +// This only represents sufficient detail to give a suitable explanation to +// an end-user, and mustn't be used for any real decision-making during the +// apply step. +type ResourceInstanceChangeActionReason rune + +//go:generate go run golang.org/x/tools/cmd/stringer -type=ResourceInstanceChangeActionReason changes.go + +const ( + // In most cases there's no special reason for choosing a particular + // action, which is represented by ResourceInstanceChangeNoReason. + ResourceInstanceChangeNoReason ResourceInstanceChangeActionReason = 0 + + // ResourceInstanceReplaceBecauseTainted indicates that the resource + // instance must be replaced because its existing current object is + // marked as "tainted". + ResourceInstanceReplaceBecauseTainted ResourceInstanceChangeActionReason = 'T' + + // ResourceInstanceReplaceByRequest indicates that the resource instance + // is planned to be replaced because a caller specifically asked for it + // to be using ReplaceAddrs. (On the command line, the -replace=... + // planning option.) + ResourceInstanceReplaceByRequest ResourceInstanceChangeActionReason = 'R' + + // ResourceInstanceReplaceByTriggers indicates that the resource instance + // is planned to be replaced because of a corresponding change in a + // replace_triggered_by reference. + ResourceInstanceReplaceByTriggers ResourceInstanceChangeActionReason = 'D' + + // ResourceInstanceReplaceBecauseCannotUpdate indicates that the resource + // instance is planned to be replaced because the provider has indicated + // that a requested change cannot be applied as an update. + // + // In this case, the RequiredReplace field will typically be populated on + // the ResourceInstanceChange object to give information about specifically + // which arguments changed in a non-updatable way. + ResourceInstanceReplaceBecauseCannotUpdate ResourceInstanceChangeActionReason = 'F' + + // ResourceInstanceDeleteBecauseNoResourceConfig indicates that the + // resource instance is planned to be deleted because there's no + // corresponding resource configuration block in the configuration. + ResourceInstanceDeleteBecauseNoResourceConfig ResourceInstanceChangeActionReason = 'N' + + // ResourceInstanceDeleteBecauseWrongRepetition indicates that the + // resource instance is planned to be deleted because the instance key + // type isn't consistent with the repetition mode selected in the + // resource configuration. + ResourceInstanceDeleteBecauseWrongRepetition ResourceInstanceChangeActionReason = 'W' + + // ResourceInstanceDeleteBecauseCountIndex indicates that the resource + // instance is planned to be deleted because its integer instance key + // is out of range for the current configured resource "count" value. + ResourceInstanceDeleteBecauseCountIndex ResourceInstanceChangeActionReason = 'C' + + // ResourceInstanceDeleteBecauseEachKey indicates that the resource + // instance is planned to be deleted because its string instance key + // isn't one of the keys included in the current configured resource + // "for_each" value. + ResourceInstanceDeleteBecauseEachKey ResourceInstanceChangeActionReason = 'E' + + // ResourceInstanceDeleteBecauseNoModule indicates that the resource + // instance is planned to be deleted because it belongs to a module + // instance that's no longer declared in the configuration. + // + // This is less specific than the reasons we return for the various ways + // a resource instance itself can be no longer declared, including both + // the total removal of a module block and changes to its count/for_each + // arguments. This difference in detail is out of pragmatism, because + // potentially multiple nested modules could all contribute conflicting + // specific reasons for a particular instance to no longer be declared. + ResourceInstanceDeleteBecauseNoModule ResourceInstanceChangeActionReason = 'M' + + // ResourceInstanceDeleteBecauseNoMoveTarget indicates that the resource + // address appears as the target ("to") in a moved block, but no + // configuration exists for that resource. According to our move rules, + // this combination evaluates to a deletion of the "new" resource. + ResourceInstanceDeleteBecauseNoMoveTarget ResourceInstanceChangeActionReason = 'A' + + // ResourceInstanceReadBecauseConfigUnknown indicates that the resource + // must be read during apply (rather than during planning) because its + // configuration contains unknown values. This reason applies only to + // data resources. + ResourceInstanceReadBecauseConfigUnknown ResourceInstanceChangeActionReason = '?' + + // ResourceInstanceReadBecauseDependencyPending indicates that the resource + // must be read during apply (rather than during planning) because it + // depends on a managed resource instance which has its own changes + // pending. + ResourceInstanceReadBecauseDependencyPending ResourceInstanceChangeActionReason = '!' + + // ResourceInstanceReadBecauseCheckNested indicates that the resource must + // be read during apply (as well as during planning) because it is inside + // a check block and when the check assertions execute we want them to use + // the most up-to-date data. + ResourceInstanceReadBecauseCheckNested ResourceInstanceChangeActionReason = '#' +) + +// OutputChange describes a change to an output value. +type OutputChange struct { + // Addr is the absolute address of the output value that the change + // will apply to. + Addr addrs.AbsOutputValue + + // Change is an embedded description of the change. + // + // For output value changes, the type constraint for the DynamicValue + // instances is always cty.DynamicPseudoType. + Change + + // Sensitive, if true, indicates that either the old or new value in the + // change is sensitive and so a rendered version of the plan in the UI + // should elide the actual values while still indicating the action of the + // change. + Sensitive bool +} + +// Encode produces a variant of the reciever that has its change values +// serialized so it can be written to a plan file. +func (oc *OutputChange) Encode() (*OutputChangeSrc, error) { + cs, err := oc.Change.Encode(cty.DynamicPseudoType) + if err != nil { + return nil, err + } + return &OutputChangeSrc{ + Addr: oc.Addr, + ChangeSrc: *cs, + Sensitive: oc.Sensitive, + }, err +} + +// Importing is the part of a ChangeSrc that describes the embedded import +// action. +// +// The fields in here are subject to change, so downstream consumers should be +// prepared for backwards compatibility in case the contents changes. +type Importing struct { + // ID is the original ID of the imported resource. + ID string +} + +// Change describes a single change with a given action. +type Change struct { + // Action defines what kind of change is being made. + Action Action + + // Interpretation of Before and After depend on Action: + // + // NoOp Before and After are the same, unchanged value + // Create Before is nil, and After is the expected value after create. + // Read Before is any prior value (nil if no prior), and After is the + // value that was or will be read. + // Update Before is the value prior to update, and After is the expected + // value after update. + // Replace As with Update. + // Delete Before is the value prior to delete, and After is always nil. + // + // Unknown values may appear anywhere within the Before and After values, + // either as the values themselves or as nested elements within known + // collections/structures. + Before, After cty.Value + + // Importing is present if the resource is being imported as part of this + // change. + // + // Use the simple presence of this field to detect if a ChangeSrc is to be + // imported, the contents of this structure may be modified going forward. + Importing *Importing + + // GeneratedConfig contains any HCL config generated for this resource + // during planning, as a string. If GeneratedConfig is populated, Importing + // should be true. However, not all Importing changes contain generated + // config. + GeneratedConfig string +} + +// Encode produces a variant of the reciever that has its change values +// serialized so it can be written to a plan file. Pass the type constraint +// that the values are expected to conform to; to properly decode the values +// later an identical type constraint must be provided at that time. +// +// Where a Change is embedded in some other struct, it's generally better +// to call the corresponding Encode method of that struct rather than working +// directly with its embedded Change. +func (c *Change) Encode(ty cty.Type) (*ChangeSrc, error) { + // Storing unmarked values so that we can encode unmarked values + // and save the PathValueMarks for re-marking the values later + var beforeVM, afterVM []cty.PathValueMarks + unmarkedBefore := c.Before + unmarkedAfter := c.After + + if c.Before.ContainsMarked() { + unmarkedBefore, beforeVM = c.Before.UnmarkDeepWithPaths() + } + beforeDV, err := NewDynamicValue(unmarkedBefore, ty) + if err != nil { + return nil, err + } + + if c.After.ContainsMarked() { + unmarkedAfter, afterVM = c.After.UnmarkDeepWithPaths() + } + afterDV, err := NewDynamicValue(unmarkedAfter, ty) + if err != nil { + return nil, err + } + + var importing *ImportingSrc + if c.Importing != nil { + importing = &ImportingSrc{ID: c.Importing.ID} + } + + return &ChangeSrc{ + Action: c.Action, + Before: beforeDV, + After: afterDV, + BeforeValMarks: beforeVM, + AfterValMarks: afterVM, + Importing: importing, + GeneratedConfig: c.GeneratedConfig, + }, nil +} diff --git a/pkg/plans/changes_src.go b/pkg/plans/changes_src.go new file mode 100644 index 00000000000..112581877b8 --- /dev/null +++ b/pkg/plans/changes_src.go @@ -0,0 +1,268 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plans + +import ( + "fmt" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/states" + "github.com/zclconf/go-cty/cty" +) + +// ResourceInstanceChangeSrc is a not-yet-decoded ResourceInstanceChange. +// Pass the associated resource type's schema type to method Decode to +// obtain a ResourceInstanceChange. +type ResourceInstanceChangeSrc struct { + // Addr is the absolute address of the resource instance that the change + // will apply to. + Addr addrs.AbsResourceInstance + + // PrevRunAddr is the absolute address that this resource instance had at + // the conclusion of a previous run. + // + // This will typically be the same as Addr, but can be different if the + // previous resource instance was subject to a "moved" block that we + // handled in the process of creating this plan. + // + // For the initial creation of a resource instance there isn't really any + // meaningful "previous run address", but PrevRunAddr will still be set + // equal to Addr in that case in order to simplify logic elsewhere which + // aims to detect and react to the movement of instances between addresses. + PrevRunAddr addrs.AbsResourceInstance + + // DeposedKey is the identifier for a deposed object associated with the + // given instance, or states.NotDeposed if this change applies to the + // current object. + // + // A Replace change for a resource with create_before_destroy set will + // create a new DeposedKey temporarily during replacement. In that case, + // DeposedKey in the plan is always states.NotDeposed, representing that + // the current object is being replaced with the deposed. + DeposedKey states.DeposedKey + + // Provider is the address of the provider configuration that was used + // to plan this change, and thus the configuration that must also be + // used to apply it. + ProviderAddr addrs.AbsProviderConfig + + // ChangeSrc is an embedded description of the not-yet-decoded change. + ChangeSrc + + // ActionReason is an optional extra indication of why we chose the + // action recorded in Change.Action for this particular resource instance. + // + // This is an approximate mechanism only for the purpose of explaining the + // plan to end-users in the UI and is not to be used for any + // decision-making during the apply step; if apply behavior needs to vary + // depending on the "action reason" then the information for that decision + // must be recorded more precisely elsewhere for that purpose. + // + // See the field of the same name in ResourceInstanceChange for more + // details. + ActionReason ResourceInstanceChangeActionReason + + // RequiredReplace is a set of paths that caused the change action to be + // Replace rather than Update. Always nil if the change action is not + // Replace. + RequiredReplace cty.PathSet + + // Private allows a provider to stash any extra data that is opaque to + // OpenTofu that relates to this change. OpenTofu will save this + // byte-for-byte and return it to the provider in the apply call. + Private []byte +} + +// Decode unmarshals the raw representation of the instance object being +// changed. Pass the implied type of the corresponding resource type schema +// for correct operation. +func (rcs *ResourceInstanceChangeSrc) Decode(ty cty.Type) (*ResourceInstanceChange, error) { + change, err := rcs.ChangeSrc.Decode(ty) + if err != nil { + return nil, err + } + prevRunAddr := rcs.PrevRunAddr + if prevRunAddr.Resource.Resource.Type == "" { + // Suggests an old caller that hasn't been properly updated to + // populate this yet. + prevRunAddr = rcs.Addr + } + return &ResourceInstanceChange{ + Addr: rcs.Addr, + PrevRunAddr: prevRunAddr, + DeposedKey: rcs.DeposedKey, + ProviderAddr: rcs.ProviderAddr, + Change: *change, + ActionReason: rcs.ActionReason, + RequiredReplace: rcs.RequiredReplace, + Private: rcs.Private, + }, nil +} + +// DeepCopy creates a copy of the receiver where any pointers to nested mutable +// values are also copied, thus ensuring that future mutations of the receiver +// will not affect the copy. +// +// Some types used within a resource change are immutable by convention even +// though the Go language allows them to be mutated, such as the types from +// the addrs package. These are _not_ copied by this method, under the +// assumption that callers will behave themselves. +func (rcs *ResourceInstanceChangeSrc) DeepCopy() *ResourceInstanceChangeSrc { + if rcs == nil { + return nil + } + ret := *rcs + + ret.RequiredReplace = cty.NewPathSet(ret.RequiredReplace.List()...) + + if len(ret.Private) != 0 { + private := make([]byte, len(ret.Private)) + copy(private, ret.Private) + ret.Private = private + } + + ret.ChangeSrc.Before = ret.ChangeSrc.Before.Copy() + ret.ChangeSrc.After = ret.ChangeSrc.After.Copy() + + return &ret +} + +func (rcs *ResourceInstanceChangeSrc) Moved() bool { + return !rcs.Addr.Equal(rcs.PrevRunAddr) +} + +// OutputChangeSrc describes a change to an output value. +type OutputChangeSrc struct { + // Addr is the absolute address of the output value that the change + // will apply to. + Addr addrs.AbsOutputValue + + // ChangeSrc is an embedded description of the not-yet-decoded change. + // + // For output value changes, the type constraint for the DynamicValue + // instances is always cty.DynamicPseudoType. + ChangeSrc + + // Sensitive, if true, indicates that either the old or new value in the + // change is sensitive and so a rendered version of the plan in the UI + // should elide the actual values while still indicating the action of the + // change. + Sensitive bool +} + +// Decode unmarshals the raw representation of the output value being +// changed. +func (ocs *OutputChangeSrc) Decode() (*OutputChange, error) { + change, err := ocs.ChangeSrc.Decode(cty.DynamicPseudoType) + if err != nil { + return nil, err + } + return &OutputChange{ + Addr: ocs.Addr, + Change: *change, + Sensitive: ocs.Sensitive, + }, nil +} + +// DeepCopy creates a copy of the receiver where any pointers to nested mutable +// values are also copied, thus ensuring that future mutations of the receiver +// will not affect the copy. +// +// Some types used within a resource change are immutable by convention even +// though the Go language allows them to be mutated, such as the types from +// the addrs package. These are _not_ copied by this method, under the +// assumption that callers will behave themselves. +func (ocs *OutputChangeSrc) DeepCopy() *OutputChangeSrc { + if ocs == nil { + return nil + } + ret := *ocs + + ret.ChangeSrc.Before = ret.ChangeSrc.Before.Copy() + ret.ChangeSrc.After = ret.ChangeSrc.After.Copy() + + return &ret +} + +// ImportingSrc is the part of a ChangeSrc that describes the embedded import +// action. +// +// The fields in here are subject to change, so downstream consumers should be +// prepared for backwards compatibility in case the contents changes. +type ImportingSrc struct { + // ID is the original ID of the imported resource. + ID string +} + +// ChangeSrc is a not-yet-decoded Change. +type ChangeSrc struct { + // Action defines what kind of change is being made. + Action Action + + // Before and After correspond to the fields of the same name in Change, + // but have not yet been decoded from the serialized value used for + // storage. + Before, After DynamicValue + + // BeforeValMarks and AfterValMarks are stored path+mark combinations + // that might be discovered when encoding a change. Marks are removed + // to enable encoding (marked values cannot be marshalled), and so storing + // the path+mark combinations allow us to re-mark the value later + // when, for example, displaying the diff to the UI. + BeforeValMarks, AfterValMarks []cty.PathValueMarks + + // Importing is present if the resource is being imported as part of this + // change. + // + // Use the simple presence of this field to detect if a ChangeSrc is to be + // imported, the contents of this structure may be modified going forward. + Importing *ImportingSrc + + // GeneratedConfig contains any HCL config generated for this resource + // during planning, as a string. If GeneratedConfig is populated, Importing + // should be true. However, not all Importing changes contain generated + // config. + GeneratedConfig string +} + +// Decode unmarshals the raw representations of the before and after values +// to produce a Change object. Pass the type constraint that the result must +// conform to. +// +// Where a ChangeSrc is embedded in some other struct, it's generally better +// to call the corresponding Decode method of that struct rather than working +// directly with its embedded Change. +func (cs *ChangeSrc) Decode(ty cty.Type) (*Change, error) { + var err error + before := cty.NullVal(ty) + after := cty.NullVal(ty) + + if len(cs.Before) > 0 { + before, err = cs.Before.Decode(ty) + if err != nil { + return nil, fmt.Errorf("error decoding 'before' value: %w", err) + } + } + if len(cs.After) > 0 { + after, err = cs.After.Decode(ty) + if err != nil { + return nil, fmt.Errorf("error decoding 'after' value: %w", err) + } + } + + var importing *Importing + if cs.Importing != nil { + importing = &Importing{ID: cs.Importing.ID} + } + + return &Change{ + Action: cs.Action, + Before: before.MarkWithPaths(cs.BeforeValMarks), + After: after.MarkWithPaths(cs.AfterValMarks), + Importing: importing, + GeneratedConfig: cs.GeneratedConfig, + }, nil +} diff --git a/pkg/plans/changes_state.go b/pkg/plans/changes_state.go new file mode 100644 index 00000000000..d1f8dbb6520 --- /dev/null +++ b/pkg/plans/changes_state.go @@ -0,0 +1,20 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plans + +import ( + "github.com/kubegems/opentofu/pkg/states" +) + +// PlannedState merges the set of changes described by the receiver into the +// given prior state to produce the planned result state. +// +// The result is an approximation of the state as it would exist after +// applying these changes, omitting any values that cannot be determined until +// the changes are actually applied. +func (c *Changes) PlannedState(prior *states.State) (*states.State, error) { + panic("Changes.PlannedState not yet implemented") +} diff --git a/pkg/plans/changes_sync.go b/pkg/plans/changes_sync.go new file mode 100644 index 00000000000..5f8529166e7 --- /dev/null +++ b/pkg/plans/changes_sync.go @@ -0,0 +1,227 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plans + +import ( + "fmt" + "sync" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/states" +) + +// ChangesSync is a wrapper around a Changes that provides a concurrency-safe +// interface to insert new changes and retrieve copies of existing changes. +// +// Each ChangesSync is independent of all others, so all concurrent writers +// to a particular Changes must share a single ChangesSync. Behavior is +// undefined if any other caller makes changes to the underlying Changes +// object or its nested objects concurrently with any of the methods of a +// particular ChangesSync. +type ChangesSync struct { + lock sync.Mutex + changes *Changes +} + +// AppendResourceInstanceChange records the given resource instance change in +// the set of planned resource changes. +// +// The caller must ensure that there are no concurrent writes to the given +// change while this method is running, but it is safe to resume mutating +// it after this method returns without affecting the saved change. +func (cs *ChangesSync) AppendResourceInstanceChange(changeSrc *ResourceInstanceChangeSrc) { + if cs == nil { + panic("AppendResourceInstanceChange on nil ChangesSync") + } + cs.lock.Lock() + defer cs.lock.Unlock() + + s := changeSrc.DeepCopy() + cs.changes.Resources = append(cs.changes.Resources, s) +} + +// GetResourceInstanceChange searches the set of resource instance changes for +// one matching the given address and generation, returning it if it exists. +// +// If no such change exists, nil is returned. +// +// The returned object is a deep copy of the change recorded in the plan, so +// callers may mutate it although it's generally better (less confusing) to +// treat planned changes as immutable after they've been initially constructed. +func (cs *ChangesSync) GetResourceInstanceChange(addr addrs.AbsResourceInstance, gen states.Generation) *ResourceInstanceChangeSrc { + if cs == nil { + panic("GetResourceInstanceChange on nil ChangesSync") + } + cs.lock.Lock() + defer cs.lock.Unlock() + + if gen == states.CurrentGen { + return cs.changes.ResourceInstance(addr).DeepCopy() + } + if dk, ok := gen.(states.DeposedKey); ok { + return cs.changes.ResourceInstanceDeposed(addr, dk).DeepCopy() + } + panic(fmt.Sprintf("unsupported generation value %#v", gen)) +} + +// GetChangesForConfigResource searches the set of resource instance +// changes and returns all changes related to a given configuration address. +// This is be used to find possible changes related to a configuration +// reference. +// +// If no such changes exist, nil is returned. +// +// The returned objects are a deep copy of the change recorded in the plan, so +// callers may mutate them although it's generally better (less confusing) to +// treat planned changes as immutable after they've been initially constructed. +func (cs *ChangesSync) GetChangesForConfigResource(addr addrs.ConfigResource) []*ResourceInstanceChangeSrc { + if cs == nil { + panic("GetChangesForConfigResource on nil ChangesSync") + } + cs.lock.Lock() + defer cs.lock.Unlock() + var changes []*ResourceInstanceChangeSrc + for _, c := range cs.changes.InstancesForConfigResource(addr) { + changes = append(changes, c.DeepCopy()) + } + return changes +} + +// GetChangesForAbsResource searches the set of resource instance +// changes and returns all changes related to a given configuration address. +// +// If no such changes exist, nil is returned. +// +// The returned objects are a deep copy of the change recorded in the plan, so +// callers may mutate them although it's generally better (less confusing) to +// treat planned changes as immutable after they've been initially constructed. +func (cs *ChangesSync) GetChangesForAbsResource(addr addrs.AbsResource) []*ResourceInstanceChangeSrc { + if cs == nil { + panic("GetChangesForAbsResource on nil ChangesSync") + } + cs.lock.Lock() + defer cs.lock.Unlock() + var changes []*ResourceInstanceChangeSrc + for _, c := range cs.changes.InstancesForAbsResource(addr) { + changes = append(changes, c.DeepCopy()) + } + return changes +} + +// RemoveResourceInstanceChange searches the set of resource instance changes +// for one matching the given address and generation, and removes it from the +// set if it exists. +func (cs *ChangesSync) RemoveResourceInstanceChange(addr addrs.AbsResourceInstance, gen states.Generation) { + if cs == nil { + panic("RemoveResourceInstanceChange on nil ChangesSync") + } + cs.lock.Lock() + defer cs.lock.Unlock() + + dk := states.NotDeposed + if realDK, ok := gen.(states.DeposedKey); ok { + dk = realDK + } + + addrStr := addr.String() + for i, r := range cs.changes.Resources { + if r.Addr.String() != addrStr || r.DeposedKey != dk { + continue + } + copy(cs.changes.Resources[i:], cs.changes.Resources[i+1:]) + cs.changes.Resources = cs.changes.Resources[:len(cs.changes.Resources)-1] + return + } +} + +// AppendOutputChange records the given output value change in the set of +// planned value changes. +// +// The caller must ensure that there are no concurrent writes to the given +// change while this method is running, but it is safe to resume mutating +// it after this method returns without affecting the saved change. +func (cs *ChangesSync) AppendOutputChange(changeSrc *OutputChangeSrc) { + if cs == nil { + panic("AppendOutputChange on nil ChangesSync") + } + cs.lock.Lock() + defer cs.lock.Unlock() + + s := changeSrc.DeepCopy() + cs.changes.Outputs = append(cs.changes.Outputs, s) +} + +// GetOutputChange searches the set of output value changes for one matching +// the given address, returning it if it exists. +// +// If no such change exists, nil is returned. +// +// The returned object is a deep copy of the change recorded in the plan, so +// callers may mutate it although it's generally better (less confusing) to +// treat planned changes as immutable after they've been initially constructed. +func (cs *ChangesSync) GetOutputChange(addr addrs.AbsOutputValue) *OutputChangeSrc { + if cs == nil { + panic("GetOutputChange on nil ChangesSync") + } + cs.lock.Lock() + defer cs.lock.Unlock() + + return cs.changes.OutputValue(addr) +} + +// GetRootOutputChanges searches the set of output changes for any that reside +// the root module. If no such changes exist, nil is returned. +// +// The returned objects are a deep copy of the change recorded in the plan, so +// callers may mutate them although it's generally better (less confusing) to +// treat planned changes as immutable after they've been initially constructed. +func (cs *ChangesSync) GetRootOutputChanges() []*OutputChangeSrc { + if cs == nil { + panic("GetRootOutputChanges on nil ChangesSync") + } + cs.lock.Lock() + defer cs.lock.Unlock() + + return cs.changes.RootOutputValues() +} + +// GetOutputChanges searches the set of output changes for any that reside in +// module instances beneath the given module. If no changes exist, nil +// is returned. +// +// The returned objects are a deep copy of the change recorded in the plan, so +// callers may mutate them although it's generally better (less confusing) to +// treat planned changes as immutable after they've been initially constructed. +func (cs *ChangesSync) GetOutputChanges(parent addrs.ModuleInstance, module addrs.ModuleCall) []*OutputChangeSrc { + if cs == nil { + panic("GetOutputChange on nil ChangesSync") + } + cs.lock.Lock() + defer cs.lock.Unlock() + + return cs.changes.OutputValues(parent, module) +} + +// RemoveOutputChange searches the set of output value changes for one matching +// the given address, and removes it from the set if it exists. +func (cs *ChangesSync) RemoveOutputChange(addr addrs.AbsOutputValue) { + if cs == nil { + panic("RemoveOutputChange on nil ChangesSync") + } + cs.lock.Lock() + defer cs.lock.Unlock() + + addrStr := addr.String() + + for i, o := range cs.changes.Outputs { + if o.Addr.String() != addrStr { + continue + } + copy(cs.changes.Outputs[i:], cs.changes.Outputs[i+1:]) + cs.changes.Outputs = cs.changes.Outputs[:len(cs.changes.Outputs)-1] + return + } +} diff --git a/pkg/plans/changes_test.go b/pkg/plans/changes_test.go new file mode 100644 index 00000000000..401bcf86f3a --- /dev/null +++ b/pkg/plans/changes_test.go @@ -0,0 +1,164 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plans + +import ( + "fmt" + "testing" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/zclconf/go-cty/cty" +) + +func TestChangesEmpty(t *testing.T) { + testCases := map[string]struct { + changes *Changes + want bool + }{ + "no changes": { + &Changes{}, + true, + }, + "resource change": { + &Changes{ + Resources: []*ResourceInstanceChangeSrc{ + { + Addr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "woot", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + PrevRunAddr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "woot", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + ChangeSrc: ChangeSrc{ + Action: Update, + }, + }, + }, + }, + false, + }, + "resource change with no-op action": { + &Changes{ + Resources: []*ResourceInstanceChangeSrc{ + { + Addr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "woot", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + PrevRunAddr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "woot", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + ChangeSrc: ChangeSrc{ + Action: NoOp, + }, + }, + }, + }, + true, + }, + "resource moved with no-op change": { + &Changes{ + Resources: []*ResourceInstanceChangeSrc{ + { + Addr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "woot", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + PrevRunAddr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "toot", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + ChangeSrc: ChangeSrc{ + Action: NoOp, + }, + }, + }, + }, + false, + }, + "output change": { + &Changes{ + Outputs: []*OutputChangeSrc{ + { + Addr: addrs.OutputValue{ + Name: "result", + }.Absolute(addrs.RootModuleInstance), + ChangeSrc: ChangeSrc{ + Action: Update, + }, + }, + }, + }, + false, + }, + "output change no-op": { + &Changes{ + Outputs: []*OutputChangeSrc{ + { + Addr: addrs.OutputValue{ + Name: "result", + }.Absolute(addrs.RootModuleInstance), + ChangeSrc: ChangeSrc{ + Action: NoOp, + }, + }, + }, + }, + true, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + if got, want := tc.changes.Empty(), tc.want; got != want { + t.Fatalf("unexpected result: got %v, want %v", got, want) + } + }) + } +} + +func TestChangeEncodeSensitive(t *testing.T) { + testVals := []cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "ding": cty.StringVal("dong").Mark(marks.Sensitive), + }), + cty.StringVal("bleep").Mark("bloop"), + cty.ListVal([]cty.Value{cty.UnknownVal(cty.String).Mark("sup?")}), + } + + for _, v := range testVals { + t.Run(fmt.Sprintf("%#v", v), func(t *testing.T) { + change := Change{ + Before: cty.NullVal(v.Type()), + After: v, + } + + encoded, err := change.Encode(v.Type()) + if err != nil { + t.Fatal(err) + } + + decoded, err := encoded.Decode(v.Type()) + if err != nil { + t.Fatal(err) + } + + if !v.RawEquals(decoded.After) { + t.Fatalf("%#v != %#v\n", decoded.After, v) + } + }) + } +} diff --git a/pkg/plans/doc.go b/pkg/plans/doc.go new file mode 100644 index 00000000000..7c55901dc67 --- /dev/null +++ b/pkg/plans/doc.go @@ -0,0 +1,10 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package plans contains the types that are used to represent OpenTofu plans. +// +// A plan describes a set of changes that OpenTofu will make to update remote +// objects to match with changes to the configuration. +package plans diff --git a/pkg/plans/dynamic_value.go b/pkg/plans/dynamic_value.go new file mode 100644 index 00000000000..b02e8e16d0d --- /dev/null +++ b/pkg/plans/dynamic_value.go @@ -0,0 +1,101 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plans + +import ( + "github.com/zclconf/go-cty/cty" + ctymsgpack "github.com/zclconf/go-cty/cty/msgpack" +) + +// DynamicValue is the representation in the plan of a value whose type cannot +// be determined at compile time, such as because it comes from a schema +// defined in a plugin. +// +// This type is used as an indirection so that the overall plan structure can +// be decoded without schema available, and then the dynamic values accessed +// at a later time once the appropriate schema has been determined. +// +// Internally, DynamicValue is a serialized version of a cty.Value created +// against a particular type constraint. Callers should not access directly +// the serialized form, whose format may change in future. Values of this +// type must always be created by calling NewDynamicValue. +// +// The zero value of DynamicValue is nil, and represents the absence of a +// value within the Go type system. This is distinct from a cty.NullVal +// result, which represents the absence of a value within the cty type system. +type DynamicValue []byte + +// NewDynamicValue creates a DynamicValue by serializing the given value +// against the given type constraint. The value must conform to the type +// constraint, or the result is undefined. +// +// If the value to be encoded has no predefined schema (for example, for +// module output values and input variables), set the type constraint to +// cty.DynamicPseudoType in order to save type information as part of the +// value, and then also pass cty.DynamicPseudoType to method Decode to recover +// the original value. +// +// cty.NilVal can be used to represent the absence of a value, but callers +// must be careful to distinguish values that are absent at the Go layer +// (cty.NilVal) vs. values that are absent at the cty layer (cty.NullVal +// results). +func NewDynamicValue(val cty.Value, ty cty.Type) (DynamicValue, error) { + // If we're given cty.NilVal (the zero value of cty.Value, which is + // distinct from a typed null value created by cty.NullVal) then we'll + // assume the caller is trying to represent the _absence_ of a value, + // and so we'll return a nil DynamicValue. + if val == cty.NilVal { + return DynamicValue(nil), nil + } + + // Currently our internal encoding is msgpack, via ctymsgpack. + buf, err := ctymsgpack.Marshal(val, ty) + if err != nil { + return nil, err + } + + return DynamicValue(buf), nil +} + +// Decode retrieves the effective value from the receiever by interpreting the +// serialized form against the given type constraint. For correct results, +// the type constraint must match (or be consistent with) the one that was +// used to create the receiver. +// +// A nil DynamicValue decodes to cty.NilVal, which is not a valid value and +// instead represents the absense of a value. +func (v DynamicValue) Decode(ty cty.Type) (cty.Value, error) { + if v == nil { + return cty.NilVal, nil + } + + return ctymsgpack.Unmarshal([]byte(v), ty) +} + +// ImpliedType returns the type implied by the serialized structure of the +// receiving value. +// +// This will not necessarily be exactly the type that was given when the +// value was encoded, and in particular must not be used for values that +// were encoded with their static type given as cty.DynamicPseudoType. +// It is however safe to use this method for values that were encoded using +// their runtime type as the conforming type, with the result being +// semantically equivalent but with all lists and sets represented as tuples, +// and maps as objects, due to ambiguities of the serialization. +func (v DynamicValue) ImpliedType() (cty.Type, error) { + return ctymsgpack.ImpliedType([]byte(v)) +} + +// Copy produces a copy of the receiver with a distinct backing array. +func (v DynamicValue) Copy() DynamicValue { + if v == nil { + return nil + } + + ret := make(DynamicValue, len(v)) + copy(ret, v) + return ret +} diff --git a/pkg/plans/internal/planproto/doc.go b/pkg/plans/internal/planproto/doc.go new file mode 100644 index 00000000000..b81eb218ab6 --- /dev/null +++ b/pkg/plans/internal/planproto/doc.go @@ -0,0 +1,12 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package planproto is home to the Go stubs generated from the tfplan protobuf +// schema. +// +// This is an internal package to be used only by OpenTofu's planfile package. +// From elsewhere in OpenTofu, use the API exported by the planfile package +// itself. +package planproto diff --git a/pkg/plans/internal/planproto/planfile.pb.go b/pkg/plans/internal/planproto/planfile.pb.go new file mode 100644 index 00000000000..e8de94ca668 --- /dev/null +++ b/pkg/plans/internal/planproto/planfile.pb.go @@ -0,0 +1,1796 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v3.15.6 +// source: planfile.proto + +package planproto + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Mode describes the planning mode that created the plan. +type Mode int32 + +const ( + Mode_NORMAL Mode = 0 + Mode_DESTROY Mode = 1 + Mode_REFRESH_ONLY Mode = 2 +) + +// Enum value maps for Mode. +var ( + Mode_name = map[int32]string{ + 0: "NORMAL", + 1: "DESTROY", + 2: "REFRESH_ONLY", + } + Mode_value = map[string]int32{ + "NORMAL": 0, + "DESTROY": 1, + "REFRESH_ONLY": 2, + } +) + +func (x Mode) Enum() *Mode { + p := new(Mode) + *p = x + return p +} + +func (x Mode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Mode) Descriptor() protoreflect.EnumDescriptor { + return file_planfile_proto_enumTypes[0].Descriptor() +} + +func (Mode) Type() protoreflect.EnumType { + return &file_planfile_proto_enumTypes[0] +} + +func (x Mode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Mode.Descriptor instead. +func (Mode) EnumDescriptor() ([]byte, []int) { + return file_planfile_proto_rawDescGZIP(), []int{0} +} + +// Action describes the type of action planned for an object. +// Not all action values are valid for all object types. +type Action int32 + +const ( + Action_NOOP Action = 0 + Action_CREATE Action = 1 + Action_READ Action = 2 + Action_UPDATE Action = 3 + Action_DELETE Action = 5 + Action_DELETE_THEN_CREATE Action = 6 + Action_CREATE_THEN_DELETE Action = 7 + Action_FORGET Action = 8 +) + +// Enum value maps for Action. +var ( + Action_name = map[int32]string{ + 0: "NOOP", + 1: "CREATE", + 2: "READ", + 3: "UPDATE", + 5: "DELETE", + 6: "DELETE_THEN_CREATE", + 7: "CREATE_THEN_DELETE", + 8: "FORGET", + } + Action_value = map[string]int32{ + "NOOP": 0, + "CREATE": 1, + "READ": 2, + "UPDATE": 3, + "DELETE": 5, + "DELETE_THEN_CREATE": 6, + "CREATE_THEN_DELETE": 7, + "FORGET": 8, + } +) + +func (x Action) Enum() *Action { + p := new(Action) + *p = x + return p +} + +func (x Action) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Action) Descriptor() protoreflect.EnumDescriptor { + return file_planfile_proto_enumTypes[1].Descriptor() +} + +func (Action) Type() protoreflect.EnumType { + return &file_planfile_proto_enumTypes[1] +} + +func (x Action) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Action.Descriptor instead. +func (Action) EnumDescriptor() ([]byte, []int) { + return file_planfile_proto_rawDescGZIP(), []int{1} +} + +// ResourceInstanceActionReason sometimes provides some additional user-facing +// context for why a particular action was chosen for a resource instance. +// This is for user feedback only and never used to drive behavior during the +// subsequent apply step. +type ResourceInstanceActionReason int32 + +const ( + ResourceInstanceActionReason_NONE ResourceInstanceActionReason = 0 + ResourceInstanceActionReason_REPLACE_BECAUSE_TAINTED ResourceInstanceActionReason = 1 + ResourceInstanceActionReason_REPLACE_BY_REQUEST ResourceInstanceActionReason = 2 + ResourceInstanceActionReason_REPLACE_BECAUSE_CANNOT_UPDATE ResourceInstanceActionReason = 3 + ResourceInstanceActionReason_DELETE_BECAUSE_NO_RESOURCE_CONFIG ResourceInstanceActionReason = 4 + ResourceInstanceActionReason_DELETE_BECAUSE_WRONG_REPETITION ResourceInstanceActionReason = 5 + ResourceInstanceActionReason_DELETE_BECAUSE_COUNT_INDEX ResourceInstanceActionReason = 6 + ResourceInstanceActionReason_DELETE_BECAUSE_EACH_KEY ResourceInstanceActionReason = 7 + ResourceInstanceActionReason_DELETE_BECAUSE_NO_MODULE ResourceInstanceActionReason = 8 + ResourceInstanceActionReason_REPLACE_BY_TRIGGERS ResourceInstanceActionReason = 9 + ResourceInstanceActionReason_READ_BECAUSE_CONFIG_UNKNOWN ResourceInstanceActionReason = 10 + ResourceInstanceActionReason_READ_BECAUSE_DEPENDENCY_PENDING ResourceInstanceActionReason = 11 + ResourceInstanceActionReason_READ_BECAUSE_CHECK_NESTED ResourceInstanceActionReason = 13 + ResourceInstanceActionReason_DELETE_BECAUSE_NO_MOVE_TARGET ResourceInstanceActionReason = 12 +) + +// Enum value maps for ResourceInstanceActionReason. +var ( + ResourceInstanceActionReason_name = map[int32]string{ + 0: "NONE", + 1: "REPLACE_BECAUSE_TAINTED", + 2: "REPLACE_BY_REQUEST", + 3: "REPLACE_BECAUSE_CANNOT_UPDATE", + 4: "DELETE_BECAUSE_NO_RESOURCE_CONFIG", + 5: "DELETE_BECAUSE_WRONG_REPETITION", + 6: "DELETE_BECAUSE_COUNT_INDEX", + 7: "DELETE_BECAUSE_EACH_KEY", + 8: "DELETE_BECAUSE_NO_MODULE", + 9: "REPLACE_BY_TRIGGERS", + 10: "READ_BECAUSE_CONFIG_UNKNOWN", + 11: "READ_BECAUSE_DEPENDENCY_PENDING", + 13: "READ_BECAUSE_CHECK_NESTED", + 12: "DELETE_BECAUSE_NO_MOVE_TARGET", + } + ResourceInstanceActionReason_value = map[string]int32{ + "NONE": 0, + "REPLACE_BECAUSE_TAINTED": 1, + "REPLACE_BY_REQUEST": 2, + "REPLACE_BECAUSE_CANNOT_UPDATE": 3, + "DELETE_BECAUSE_NO_RESOURCE_CONFIG": 4, + "DELETE_BECAUSE_WRONG_REPETITION": 5, + "DELETE_BECAUSE_COUNT_INDEX": 6, + "DELETE_BECAUSE_EACH_KEY": 7, + "DELETE_BECAUSE_NO_MODULE": 8, + "REPLACE_BY_TRIGGERS": 9, + "READ_BECAUSE_CONFIG_UNKNOWN": 10, + "READ_BECAUSE_DEPENDENCY_PENDING": 11, + "READ_BECAUSE_CHECK_NESTED": 13, + "DELETE_BECAUSE_NO_MOVE_TARGET": 12, + } +) + +func (x ResourceInstanceActionReason) Enum() *ResourceInstanceActionReason { + p := new(ResourceInstanceActionReason) + *p = x + return p +} + +func (x ResourceInstanceActionReason) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ResourceInstanceActionReason) Descriptor() protoreflect.EnumDescriptor { + return file_planfile_proto_enumTypes[2].Descriptor() +} + +func (ResourceInstanceActionReason) Type() protoreflect.EnumType { + return &file_planfile_proto_enumTypes[2] +} + +func (x ResourceInstanceActionReason) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ResourceInstanceActionReason.Descriptor instead. +func (ResourceInstanceActionReason) EnumDescriptor() ([]byte, []int) { + return file_planfile_proto_rawDescGZIP(), []int{2} +} + +// Status describes the status of a particular checkable object at the +// completion of the plan. +type CheckResults_Status int32 + +const ( + CheckResults_UNKNOWN CheckResults_Status = 0 + CheckResults_PASS CheckResults_Status = 1 + CheckResults_FAIL CheckResults_Status = 2 + CheckResults_ERROR CheckResults_Status = 3 +) + +// Enum value maps for CheckResults_Status. +var ( + CheckResults_Status_name = map[int32]string{ + 0: "UNKNOWN", + 1: "PASS", + 2: "FAIL", + 3: "ERROR", + } + CheckResults_Status_value = map[string]int32{ + "UNKNOWN": 0, + "PASS": 1, + "FAIL": 2, + "ERROR": 3, + } +) + +func (x CheckResults_Status) Enum() *CheckResults_Status { + p := new(CheckResults_Status) + *p = x + return p +} + +func (x CheckResults_Status) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CheckResults_Status) Descriptor() protoreflect.EnumDescriptor { + return file_planfile_proto_enumTypes[3].Descriptor() +} + +func (CheckResults_Status) Type() protoreflect.EnumType { + return &file_planfile_proto_enumTypes[3] +} + +func (x CheckResults_Status) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CheckResults_Status.Descriptor instead. +func (CheckResults_Status) EnumDescriptor() ([]byte, []int) { + return file_planfile_proto_rawDescGZIP(), []int{5, 0} +} + +type CheckResults_ObjectKind int32 + +const ( + CheckResults_UNSPECIFIED CheckResults_ObjectKind = 0 + CheckResults_RESOURCE CheckResults_ObjectKind = 1 + CheckResults_OUTPUT_VALUE CheckResults_ObjectKind = 2 + CheckResults_CHECK CheckResults_ObjectKind = 3 + CheckResults_INPUT_VARIABLE CheckResults_ObjectKind = 4 +) + +// Enum value maps for CheckResults_ObjectKind. +var ( + CheckResults_ObjectKind_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "RESOURCE", + 2: "OUTPUT_VALUE", + 3: "CHECK", + 4: "INPUT_VARIABLE", + } + CheckResults_ObjectKind_value = map[string]int32{ + "UNSPECIFIED": 0, + "RESOURCE": 1, + "OUTPUT_VALUE": 2, + "CHECK": 3, + "INPUT_VARIABLE": 4, + } +) + +func (x CheckResults_ObjectKind) Enum() *CheckResults_ObjectKind { + p := new(CheckResults_ObjectKind) + *p = x + return p +} + +func (x CheckResults_ObjectKind) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CheckResults_ObjectKind) Descriptor() protoreflect.EnumDescriptor { + return file_planfile_proto_enumTypes[4].Descriptor() +} + +func (CheckResults_ObjectKind) Type() protoreflect.EnumType { + return &file_planfile_proto_enumTypes[4] +} + +func (x CheckResults_ObjectKind) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CheckResults_ObjectKind.Descriptor instead. +func (CheckResults_ObjectKind) EnumDescriptor() ([]byte, []int) { + return file_planfile_proto_rawDescGZIP(), []int{5, 1} +} + +// Plan is the root message type for the tfplan file +type Plan struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Version is incremented whenever there is a breaking change to + // the serialization format. Programs reading serialized plans should + // verify that version is set to the expected value and abort processing + // if not. A breaking change is any change that may cause an older + // consumer to interpret the structure incorrectly. This number will + // not be incremented if an existing consumer can either safely ignore + // changes to the format or if an existing consumer would fail to process + // the file for another message- or field-specific reason. + Version uint64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + // The mode that was active when this plan was created. + // + // This is saved only for UI purposes, so that OpenTofu can tailor its + // rendering of the plan depending on the mode. This must never be used to + // make decisions in OpenTofu Core during the applying of a plan. + UiMode Mode `protobuf:"varint,17,opt,name=ui_mode,json=uiMode,proto3,enum=tfplan.Mode" json:"ui_mode,omitempty"` + // Errored is true for any plan whose creation was interrupted by an + // error. A plan with this flag set cannot be applied, and the changes + // it proposes are likely to be incomplete. + Errored bool `protobuf:"varint,20,opt,name=errored,proto3" json:"errored,omitempty"` + // The variables that were set when creating the plan. Each value is + // a msgpack serialization of an HCL value. + Variables map[string]*DynamicValue `protobuf:"bytes,2,rep,name=variables,proto3" json:"variables,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // An unordered set of proposed changes to resources throughout the + // configuration, including any nested modules. Use the address of + // each resource to determine which module it belongs to. + ResourceChanges []*ResourceInstanceChange `protobuf:"bytes,3,rep,name=resource_changes,json=resourceChanges,proto3" json:"resource_changes,omitempty"` + // An unordered set of detected drift: changes made to resources outside of + // OpenTofu, computed by comparing the previous run's state to the state + // after refresh. + ResourceDrift []*ResourceInstanceChange `protobuf:"bytes,18,rep,name=resource_drift,json=resourceDrift,proto3" json:"resource_drift,omitempty"` + // An unordered set of proposed changes to outputs in the root module + // of the configuration. This set also includes "no action" changes for + // outputs that are not changing, as context for detecting inconsistencies + // at apply time. + OutputChanges []*OutputChange `protobuf:"bytes,4,rep,name=output_changes,json=outputChanges,proto3" json:"output_changes,omitempty"` + // An unordered set of check results for the entire configuration. + // + // Each element represents a single static configuration object that has + // checks, and each of those may have zero or more dynamic objects that + // the checks were applied to nested within. + CheckResults []*CheckResults `protobuf:"bytes,19,rep,name=check_results,json=checkResults,proto3" json:"check_results,omitempty"` + // An unordered set of target addresses to include when applying. If no + // target addresses are present, the plan applies to the whole + // configuration. + TargetAddrs []string `protobuf:"bytes,5,rep,name=target_addrs,json=targetAddrs,proto3" json:"target_addrs,omitempty"` + // An unordered set of force-replace addresses to include when applying. + // This must match the set of addresses that was used when creating the + // plan, or else applying the plan will fail when it reaches a different + // conclusion about what action a particular resource instance needs. + ForceReplaceAddrs []string `protobuf:"bytes,16,rep,name=force_replace_addrs,json=forceReplaceAddrs,proto3" json:"force_replace_addrs,omitempty"` + // The version string for the OpenTofu binary that created this plan. + TerraformVersion string `protobuf:"bytes,14,opt,name=terraform_version,json=terraformVersion,proto3" json:"terraform_version,omitempty"` + // Backend is a description of the backend configuration and other related + // settings at the time the plan was created. + Backend *Backend `protobuf:"bytes,13,opt,name=backend,proto3" json:"backend,omitempty"` + // RelevantAttributes lists individual resource attributes from + // ResourceDrift which may have contributed to the plan changes. + RelevantAttributes []*PlanResourceAttr `protobuf:"bytes,15,rep,name=relevant_attributes,json=relevantAttributes,proto3" json:"relevant_attributes,omitempty"` + // timestamp is the record of truth for when the plan happened. + Timestamp string `protobuf:"bytes,21,opt,name=timestamp,proto3" json:"timestamp,omitempty"` +} + +func (x *Plan) Reset() { + *x = Plan{} + if protoimpl.UnsafeEnabled { + mi := &file_planfile_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Plan) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Plan) ProtoMessage() {} + +func (x *Plan) ProtoReflect() protoreflect.Message { + mi := &file_planfile_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Plan.ProtoReflect.Descriptor instead. +func (*Plan) Descriptor() ([]byte, []int) { + return file_planfile_proto_rawDescGZIP(), []int{0} +} + +func (x *Plan) GetVersion() uint64 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *Plan) GetUiMode() Mode { + if x != nil { + return x.UiMode + } + return Mode_NORMAL +} + +func (x *Plan) GetErrored() bool { + if x != nil { + return x.Errored + } + return false +} + +func (x *Plan) GetVariables() map[string]*DynamicValue { + if x != nil { + return x.Variables + } + return nil +} + +func (x *Plan) GetResourceChanges() []*ResourceInstanceChange { + if x != nil { + return x.ResourceChanges + } + return nil +} + +func (x *Plan) GetResourceDrift() []*ResourceInstanceChange { + if x != nil { + return x.ResourceDrift + } + return nil +} + +func (x *Plan) GetOutputChanges() []*OutputChange { + if x != nil { + return x.OutputChanges + } + return nil +} + +func (x *Plan) GetCheckResults() []*CheckResults { + if x != nil { + return x.CheckResults + } + return nil +} + +func (x *Plan) GetTargetAddrs() []string { + if x != nil { + return x.TargetAddrs + } + return nil +} + +func (x *Plan) GetForceReplaceAddrs() []string { + if x != nil { + return x.ForceReplaceAddrs + } + return nil +} + +func (x *Plan) GetTerraformVersion() string { + if x != nil { + return x.TerraformVersion + } + return "" +} + +func (x *Plan) GetBackend() *Backend { + if x != nil { + return x.Backend + } + return nil +} + +func (x *Plan) GetRelevantAttributes() []*PlanResourceAttr { + if x != nil { + return x.RelevantAttributes + } + return nil +} + +func (x *Plan) GetTimestamp() string { + if x != nil { + return x.Timestamp + } + return "" +} + +// Backend is a description of backend configuration and other related settings. +type Backend struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + Workspace string `protobuf:"bytes,3,opt,name=workspace,proto3" json:"workspace,omitempty"` +} + +func (x *Backend) Reset() { + *x = Backend{} + if protoimpl.UnsafeEnabled { + mi := &file_planfile_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Backend) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Backend) ProtoMessage() {} + +func (x *Backend) ProtoReflect() protoreflect.Message { + mi := &file_planfile_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Backend.ProtoReflect.Descriptor instead. +func (*Backend) Descriptor() ([]byte, []int) { + return file_planfile_proto_rawDescGZIP(), []int{1} +} + +func (x *Backend) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Backend) GetConfig() *DynamicValue { + if x != nil { + return x.Config + } + return nil +} + +func (x *Backend) GetWorkspace() string { + if x != nil { + return x.Workspace + } + return "" +} + +// Change represents a change made to some object, transforming it from an old +// state to a new state. +type Change struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Not all action values are valid for all object types. Consult + // the documentation for any message that embeds Change. + Action Action `protobuf:"varint,1,opt,name=action,proto3,enum=tfplan.Action" json:"action,omitempty"` + // msgpack-encoded HCL values involved in the change. + // - For update and replace, two values are provided that give the old and new values, + // respectively. + // - For create, one value is provided that gives the new value to be created + // - For delete, one value is provided that describes the value being deleted + // - For read, two values are provided that give the prior value for this object + // (or null, if no prior value exists) and the value that was or will be read, + // respectively. + // - For no-op, one value is provided that is left unmodified by this non-change. + Values []*DynamicValue `protobuf:"bytes,2,rep,name=values,proto3" json:"values,omitempty"` + // An unordered set of paths into the old value which are marked as + // sensitive. Values at these paths should be obscured in human-readable + // output. This set is always empty for create. + BeforeSensitivePaths []*Path `protobuf:"bytes,3,rep,name=before_sensitive_paths,json=beforeSensitivePaths,proto3" json:"before_sensitive_paths,omitempty"` + // An unordered set of paths into the new value which are marked as + // sensitive. Values at these paths should be obscured in human-readable + // output. This set is always empty for delete. + AfterSensitivePaths []*Path `protobuf:"bytes,4,rep,name=after_sensitive_paths,json=afterSensitivePaths,proto3" json:"after_sensitive_paths,omitempty"` + // Importing, if true, specifies that the resource is being imported as part + // of the change. + Importing *Importing `protobuf:"bytes,5,opt,name=importing,proto3" json:"importing,omitempty"` + // GeneratedConfig contains any configuration that was generated as part of + // the change, as an HCL string. + GeneratedConfig string `protobuf:"bytes,6,opt,name=generated_config,json=generatedConfig,proto3" json:"generated_config,omitempty"` +} + +func (x *Change) Reset() { + *x = Change{} + if protoimpl.UnsafeEnabled { + mi := &file_planfile_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Change) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Change) ProtoMessage() {} + +func (x *Change) ProtoReflect() protoreflect.Message { + mi := &file_planfile_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Change.ProtoReflect.Descriptor instead. +func (*Change) Descriptor() ([]byte, []int) { + return file_planfile_proto_rawDescGZIP(), []int{2} +} + +func (x *Change) GetAction() Action { + if x != nil { + return x.Action + } + return Action_NOOP +} + +func (x *Change) GetValues() []*DynamicValue { + if x != nil { + return x.Values + } + return nil +} + +func (x *Change) GetBeforeSensitivePaths() []*Path { + if x != nil { + return x.BeforeSensitivePaths + } + return nil +} + +func (x *Change) GetAfterSensitivePaths() []*Path { + if x != nil { + return x.AfterSensitivePaths + } + return nil +} + +func (x *Change) GetImporting() *Importing { + if x != nil { + return x.Importing + } + return nil +} + +func (x *Change) GetGeneratedConfig() string { + if x != nil { + return x.GeneratedConfig + } + return "" +} + +type ResourceInstanceChange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // addr is a string representation of the resource instance address that + // this change will apply to. + Addr string `protobuf:"bytes,13,opt,name=addr,proto3" json:"addr,omitempty"` + // prev_run_addr is a string representation of the address at which + // this resource instance was tracked during the previous apply operation. + // + // This is populated only if it would be different from addr due to + // OpenTofu having reacted to refactoring annotations in the configuration. + // If empty, the previous run address is the same as the current address. + PrevRunAddr string `protobuf:"bytes,14,opt,name=prev_run_addr,json=prevRunAddr,proto3" json:"prev_run_addr,omitempty"` + // deposed_key, if set, indicates that this change applies to a deposed + // object for the indicated instance with the given deposed key. If not + // set, the change applies to the instance's current object. + DeposedKey string `protobuf:"bytes,7,opt,name=deposed_key,json=deposedKey,proto3" json:"deposed_key,omitempty"` + // provider is the address of the provider configuration that this change + // was planned with, and thus the configuration that must be used to + // apply it. + Provider string `protobuf:"bytes,8,opt,name=provider,proto3" json:"provider,omitempty"` + // Description of the proposed change. May use "create", "read", "update", + // "replace", "delete", "forget" and "no-op" actions. + Change *Change `protobuf:"bytes,9,opt,name=change,proto3" json:"change,omitempty"` + // raw blob value provided by the provider as additional context for the + // change. Must be considered an opaque value for any consumer other than + // the provider that generated it, and will be returned verbatim to the + // provider during the subsequent apply operation. + Private []byte `protobuf:"bytes,10,opt,name=private,proto3" json:"private,omitempty"` + // An unordered set of paths that prompted the change action to be + // "replace" rather than "update". Empty for any action other than + // "replace". + RequiredReplace []*Path `protobuf:"bytes,11,rep,name=required_replace,json=requiredReplace,proto3" json:"required_replace,omitempty"` + // Optional extra user-oriented context for why change.Action was chosen. + // This is for user feedback only and never used to drive behavior during + // apply. + ActionReason ResourceInstanceActionReason `protobuf:"varint,12,opt,name=action_reason,json=actionReason,proto3,enum=tfplan.ResourceInstanceActionReason" json:"action_reason,omitempty"` +} + +func (x *ResourceInstanceChange) Reset() { + *x = ResourceInstanceChange{} + if protoimpl.UnsafeEnabled { + mi := &file_planfile_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceInstanceChange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceInstanceChange) ProtoMessage() {} + +func (x *ResourceInstanceChange) ProtoReflect() protoreflect.Message { + mi := &file_planfile_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceInstanceChange.ProtoReflect.Descriptor instead. +func (*ResourceInstanceChange) Descriptor() ([]byte, []int) { + return file_planfile_proto_rawDescGZIP(), []int{3} +} + +func (x *ResourceInstanceChange) GetAddr() string { + if x != nil { + return x.Addr + } + return "" +} + +func (x *ResourceInstanceChange) GetPrevRunAddr() string { + if x != nil { + return x.PrevRunAddr + } + return "" +} + +func (x *ResourceInstanceChange) GetDeposedKey() string { + if x != nil { + return x.DeposedKey + } + return "" +} + +func (x *ResourceInstanceChange) GetProvider() string { + if x != nil { + return x.Provider + } + return "" +} + +func (x *ResourceInstanceChange) GetChange() *Change { + if x != nil { + return x.Change + } + return nil +} + +func (x *ResourceInstanceChange) GetPrivate() []byte { + if x != nil { + return x.Private + } + return nil +} + +func (x *ResourceInstanceChange) GetRequiredReplace() []*Path { + if x != nil { + return x.RequiredReplace + } + return nil +} + +func (x *ResourceInstanceChange) GetActionReason() ResourceInstanceActionReason { + if x != nil { + return x.ActionReason + } + return ResourceInstanceActionReason_NONE +} + +type OutputChange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Name of the output as defined in the root module. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Description of the proposed change. May use "no-op", "create", + // "update" and "delete" actions. + Change *Change `protobuf:"bytes,2,opt,name=change,proto3" json:"change,omitempty"` + // Sensitive, if true, indicates that one or more of the values given + // in "change" is sensitive and should not be shown directly in any + // rendered plan. + Sensitive bool `protobuf:"varint,3,opt,name=sensitive,proto3" json:"sensitive,omitempty"` +} + +func (x *OutputChange) Reset() { + *x = OutputChange{} + if protoimpl.UnsafeEnabled { + mi := &file_planfile_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OutputChange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OutputChange) ProtoMessage() {} + +func (x *OutputChange) ProtoReflect() protoreflect.Message { + mi := &file_planfile_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OutputChange.ProtoReflect.Descriptor instead. +func (*OutputChange) Descriptor() ([]byte, []int) { + return file_planfile_proto_rawDescGZIP(), []int{4} +} + +func (x *OutputChange) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *OutputChange) GetChange() *Change { + if x != nil { + return x.Change + } + return nil +} + +func (x *OutputChange) GetSensitive() bool { + if x != nil { + return x.Sensitive + } + return false +} + +type CheckResults struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Kind CheckResults_ObjectKind `protobuf:"varint,1,opt,name=kind,proto3,enum=tfplan.CheckResults_ObjectKind" json:"kind,omitempty"` + // Address of the configuration object that declared the checks. + ConfigAddr string `protobuf:"bytes,2,opt,name=config_addr,json=configAddr,proto3" json:"config_addr,omitempty"` + // The aggregate status of the entire configuration object, based on + // the statuses of its zero or more checkable objects. + Status CheckResults_Status `protobuf:"varint,3,opt,name=status,proto3,enum=tfplan.CheckResults_Status" json:"status,omitempty"` + // The results for individual objects that were declared by the + // configuration object named in config_addr. + Objects []*CheckResults_ObjectResult `protobuf:"bytes,4,rep,name=objects,proto3" json:"objects,omitempty"` +} + +func (x *CheckResults) Reset() { + *x = CheckResults{} + if protoimpl.UnsafeEnabled { + mi := &file_planfile_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CheckResults) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CheckResults) ProtoMessage() {} + +func (x *CheckResults) ProtoReflect() protoreflect.Message { + mi := &file_planfile_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CheckResults.ProtoReflect.Descriptor instead. +func (*CheckResults) Descriptor() ([]byte, []int) { + return file_planfile_proto_rawDescGZIP(), []int{5} +} + +func (x *CheckResults) GetKind() CheckResults_ObjectKind { + if x != nil { + return x.Kind + } + return CheckResults_UNSPECIFIED +} + +func (x *CheckResults) GetConfigAddr() string { + if x != nil { + return x.ConfigAddr + } + return "" +} + +func (x *CheckResults) GetStatus() CheckResults_Status { + if x != nil { + return x.Status + } + return CheckResults_UNKNOWN +} + +func (x *CheckResults) GetObjects() []*CheckResults_ObjectResult { + if x != nil { + return x.Objects + } + return nil +} + +// DynamicValue represents a value whose type is not decided until runtime, +// often based on schema information obtained from a plugin. +// +// At present dynamic values are always encoded as msgpack, with extension +// id 0 used to represent the special "unknown" value indicating results +// that won't be known until after apply. +// +// In future other serialization formats may be used, possibly with a +// transitional period of including both as separate attributes of this type. +// Consumers must ignore attributes they don't support and fail if no supported +// attribute is present. The top-level format version will not be incremented +// for changes to the set of dynamic serialization formats. +type DynamicValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Msgpack []byte `protobuf:"bytes,1,opt,name=msgpack,proto3" json:"msgpack,omitempty"` +} + +func (x *DynamicValue) Reset() { + *x = DynamicValue{} + if protoimpl.UnsafeEnabled { + mi := &file_planfile_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DynamicValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DynamicValue) ProtoMessage() {} + +func (x *DynamicValue) ProtoReflect() protoreflect.Message { + mi := &file_planfile_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DynamicValue.ProtoReflect.Descriptor instead. +func (*DynamicValue) Descriptor() ([]byte, []int) { + return file_planfile_proto_rawDescGZIP(), []int{6} +} + +func (x *DynamicValue) GetMsgpack() []byte { + if x != nil { + return x.Msgpack + } + return nil +} + +// Path represents a set of steps to traverse into a data structure. It is +// used to refer to a sub-structure within a dynamic data structure presented +// separately. +type Path struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Steps []*Path_Step `protobuf:"bytes,1,rep,name=steps,proto3" json:"steps,omitempty"` +} + +func (x *Path) Reset() { + *x = Path{} + if protoimpl.UnsafeEnabled { + mi := &file_planfile_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Path) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Path) ProtoMessage() {} + +func (x *Path) ProtoReflect() protoreflect.Message { + mi := &file_planfile_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Path.ProtoReflect.Descriptor instead. +func (*Path) Descriptor() ([]byte, []int) { + return file_planfile_proto_rawDescGZIP(), []int{7} +} + +func (x *Path) GetSteps() []*Path_Step { + if x != nil { + return x.Steps + } + return nil +} + +// Importing contains the embedded metadata about the import operation if this +// change describes it. +type Importing struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The original ID of the resource. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (x *Importing) Reset() { + *x = Importing{} + if protoimpl.UnsafeEnabled { + mi := &file_planfile_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Importing) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Importing) ProtoMessage() {} + +func (x *Importing) ProtoReflect() protoreflect.Message { + mi := &file_planfile_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Importing.ProtoReflect.Descriptor instead. +func (*Importing) Descriptor() ([]byte, []int) { + return file_planfile_proto_rawDescGZIP(), []int{8} +} + +func (x *Importing) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +type PlanResourceAttr struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` + Attr *Path `protobuf:"bytes,2,opt,name=attr,proto3" json:"attr,omitempty"` +} + +func (x *PlanResourceAttr) Reset() { + *x = PlanResourceAttr{} + if protoimpl.UnsafeEnabled { + mi := &file_planfile_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PlanResourceAttr) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PlanResourceAttr) ProtoMessage() {} + +func (x *PlanResourceAttr) ProtoReflect() protoreflect.Message { + mi := &file_planfile_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PlanResourceAttr.ProtoReflect.Descriptor instead. +func (*PlanResourceAttr) Descriptor() ([]byte, []int) { + return file_planfile_proto_rawDescGZIP(), []int{0, 1} +} + +func (x *PlanResourceAttr) GetResource() string { + if x != nil { + return x.Resource + } + return "" +} + +func (x *PlanResourceAttr) GetAttr() *Path { + if x != nil { + return x.Attr + } + return nil +} + +type CheckResults_ObjectResult struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ObjectAddr string `protobuf:"bytes,1,opt,name=object_addr,json=objectAddr,proto3" json:"object_addr,omitempty"` + Status CheckResults_Status `protobuf:"varint,2,opt,name=status,proto3,enum=tfplan.CheckResults_Status" json:"status,omitempty"` + FailureMessages []string `protobuf:"bytes,3,rep,name=failure_messages,json=failureMessages,proto3" json:"failure_messages,omitempty"` +} + +func (x *CheckResults_ObjectResult) Reset() { + *x = CheckResults_ObjectResult{} + if protoimpl.UnsafeEnabled { + mi := &file_planfile_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CheckResults_ObjectResult) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CheckResults_ObjectResult) ProtoMessage() {} + +func (x *CheckResults_ObjectResult) ProtoReflect() protoreflect.Message { + mi := &file_planfile_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CheckResults_ObjectResult.ProtoReflect.Descriptor instead. +func (*CheckResults_ObjectResult) Descriptor() ([]byte, []int) { + return file_planfile_proto_rawDescGZIP(), []int{5, 0} +} + +func (x *CheckResults_ObjectResult) GetObjectAddr() string { + if x != nil { + return x.ObjectAddr + } + return "" +} + +func (x *CheckResults_ObjectResult) GetStatus() CheckResults_Status { + if x != nil { + return x.Status + } + return CheckResults_UNKNOWN +} + +func (x *CheckResults_ObjectResult) GetFailureMessages() []string { + if x != nil { + return x.FailureMessages + } + return nil +} + +type Path_Step struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Selector: + // + // *Path_Step_AttributeName + // *Path_Step_ElementKey + Selector isPath_Step_Selector `protobuf_oneof:"selector"` +} + +func (x *Path_Step) Reset() { + *x = Path_Step{} + if protoimpl.UnsafeEnabled { + mi := &file_planfile_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Path_Step) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Path_Step) ProtoMessage() {} + +func (x *Path_Step) ProtoReflect() protoreflect.Message { + mi := &file_planfile_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Path_Step.ProtoReflect.Descriptor instead. +func (*Path_Step) Descriptor() ([]byte, []int) { + return file_planfile_proto_rawDescGZIP(), []int{7, 0} +} + +func (m *Path_Step) GetSelector() isPath_Step_Selector { + if m != nil { + return m.Selector + } + return nil +} + +func (x *Path_Step) GetAttributeName() string { + if x, ok := x.GetSelector().(*Path_Step_AttributeName); ok { + return x.AttributeName + } + return "" +} + +func (x *Path_Step) GetElementKey() *DynamicValue { + if x, ok := x.GetSelector().(*Path_Step_ElementKey); ok { + return x.ElementKey + } + return nil +} + +type isPath_Step_Selector interface { + isPath_Step_Selector() +} + +type Path_Step_AttributeName struct { + // Set "attribute_name" to represent looking up an attribute + // in the current object value. + AttributeName string `protobuf:"bytes,1,opt,name=attribute_name,json=attributeName,proto3,oneof"` +} + +type Path_Step_ElementKey struct { + // Set "element_key" to represent looking up an element in + // an indexable collection type. + ElementKey *DynamicValue `protobuf:"bytes,2,opt,name=element_key,json=elementKey,proto3,oneof"` +} + +func (*Path_Step_AttributeName) isPath_Step_Selector() {} + +func (*Path_Step_ElementKey) isPath_Step_Selector() {} + +var File_planfile_proto protoreflect.FileDescriptor + +var file_planfile_proto_rawDesc = []byte{ + 0x0a, 0x0e, 0x70, 0x6c, 0x61, 0x6e, 0x66, 0x69, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x06, 0x74, 0x66, 0x70, 0x6c, 0x61, 0x6e, 0x22, 0xdf, 0x06, 0x0a, 0x04, 0x50, 0x6c, 0x61, + 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x07, 0x75, + 0x69, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0c, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x61, 0x6e, 0x2e, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x06, 0x75, 0x69, 0x4d, 0x6f, + 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x65, 0x64, 0x18, 0x14, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x65, 0x64, 0x12, 0x39, 0x0a, 0x09, + 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1b, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x61, 0x6e, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x2e, 0x56, 0x61, + 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x76, 0x61, + 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1e, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x61, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, + 0x65, 0x52, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, + 0x65, 0x73, 0x12, 0x45, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, + 0x72, 0x69, 0x66, 0x74, 0x18, 0x12, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x61, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x73, 0x74, + 0x61, 0x6e, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x44, 0x72, 0x69, 0x66, 0x74, 0x12, 0x3b, 0x0a, 0x0e, 0x6f, 0x75, 0x74, + 0x70, 0x75, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x61, 0x6e, 0x2e, 0x4f, 0x75, 0x74, 0x70, 0x75, + 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x43, + 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0d, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, + 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x61, 0x6e, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x73, 0x52, 0x0c, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, + 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x41, + 0x64, 0x64, 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x5f, 0x72, 0x65, + 0x70, 0x6c, 0x61, 0x63, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x11, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x41, + 0x64, 0x64, 0x72, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x65, 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, + 0x6d, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x10, 0x74, 0x65, 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, 0x6d, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x29, 0x0a, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x18, 0x0d, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x61, 0x6e, 0x2e, 0x42, 0x61, 0x63, 0x6b, + 0x65, 0x6e, 0x64, 0x52, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x12, 0x4b, 0x0a, 0x13, + 0x72, 0x65, 0x6c, 0x65, 0x76, 0x61, 0x6e, 0x74, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x65, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x61, 0x6e, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x61, 0x74, 0x74, 0x72, 0x52, 0x12, 0x72, 0x65, 0x6c, 0x65, 0x76, 0x61, 0x6e, 0x74, 0x41, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x15, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x1a, 0x52, 0x0a, 0x0e, 0x56, 0x61, 0x72, 0x69, 0x61, + 0x62, 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x61, 0x6e, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4d, 0x0a, 0x0d, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x12, 0x1a, 0x0a, 0x08, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x20, 0x0a, 0x04, 0x61, 0x74, 0x74, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x61, 0x6e, 0x2e, + 0x50, 0x61, 0x74, 0x68, 0x52, 0x04, 0x61, 0x74, 0x74, 0x72, 0x22, 0x69, 0x0a, 0x07, 0x42, 0x61, + 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x2c, 0x0a, 0x06, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x61, 0x6e, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1c, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x77, 0x6f, 0x72, 0x6b, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0xc0, 0x02, 0x0a, 0x06, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, + 0x12, 0x26, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x0e, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x61, 0x6e, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x61, + 0x6e, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x16, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, + 0x5f, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x61, 0x6e, 0x2e, + 0x50, 0x61, 0x74, 0x68, 0x52, 0x14, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x53, 0x65, 0x6e, 0x73, + 0x69, 0x74, 0x69, 0x76, 0x65, 0x50, 0x61, 0x74, 0x68, 0x73, 0x12, 0x40, 0x0a, 0x15, 0x61, 0x66, + 0x74, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x70, 0x61, + 0x74, 0x68, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x61, 0x6e, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x52, 0x13, 0x61, 0x66, 0x74, 0x65, 0x72, 0x53, 0x65, + 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x50, 0x61, 0x74, 0x68, 0x73, 0x12, 0x2f, 0x0a, 0x09, + 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x61, 0x6e, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x69, + 0x6e, 0x67, 0x52, 0x09, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x12, 0x29, 0x0a, + 0x10, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xd3, 0x02, 0x0a, 0x16, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x68, 0x61, + 0x6e, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x61, 0x64, 0x64, 0x72, 0x18, 0x0d, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x61, 0x64, 0x64, 0x72, 0x12, 0x22, 0x0a, 0x0d, 0x70, 0x72, 0x65, 0x76, 0x5f, + 0x72, 0x75, 0x6e, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x70, 0x72, 0x65, 0x76, 0x52, 0x75, 0x6e, 0x41, 0x64, 0x64, 0x72, 0x12, 0x1f, 0x0a, 0x0b, 0x64, + 0x65, 0x70, 0x6f, 0x73, 0x65, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x64, 0x65, 0x70, 0x6f, 0x73, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x1a, 0x0a, 0x08, + 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x26, 0x0a, 0x06, 0x63, 0x68, 0x61, 0x6e, + 0x67, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x61, + 0x6e, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x06, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, + 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x10, 0x72, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x18, 0x0b, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x61, 0x6e, 0x2e, 0x50, 0x61, + 0x74, 0x68, 0x52, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x52, 0x65, 0x70, 0x6c, + 0x61, 0x63, 0x65, 0x12, 0x49, 0x0a, 0x0d, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, + 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x61, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x73, 0x74, + 0x61, 0x6e, 0x63, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, + 0x52, 0x0c, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22, 0x68, + 0x0a, 0x0c, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x61, 0x6e, 0x2e, 0x43, 0x68, 0x61, 0x6e, + 0x67, 0x65, 0x52, 0x06, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x65, + 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x73, + 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x22, 0xfc, 0x03, 0x0a, 0x0c, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x33, 0x0a, 0x04, 0x6b, 0x69, 0x6e, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x61, 0x6e, + 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x2e, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x1f, + 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x41, 0x64, 0x64, 0x72, 0x12, + 0x33, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x1b, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x61, 0x6e, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x61, 0x6e, 0x2e, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x2e, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x1a, 0x8f, 0x01, 0x0a, 0x0c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x64, 0x64, + 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, + 0x64, 0x64, 0x72, 0x12, 0x33, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x61, 0x6e, 0x2e, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x66, 0x61, 0x69, 0x6c, + 0x75, 0x72, 0x65, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x73, 0x22, 0x34, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, + 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x50, 0x41, + 0x53, 0x53, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x46, 0x41, 0x49, 0x4c, 0x10, 0x02, 0x12, 0x09, + 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x03, 0x22, 0x5c, 0x0a, 0x0a, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, + 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x45, 0x53, 0x4f, + 0x55, 0x52, 0x43, 0x45, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x4f, 0x55, 0x54, 0x50, 0x55, 0x54, + 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x43, 0x48, 0x45, 0x43, + 0x4b, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x49, 0x4e, 0x50, 0x55, 0x54, 0x5f, 0x56, 0x41, 0x52, + 0x49, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x04, 0x22, 0x28, 0x0a, 0x0c, 0x44, 0x79, 0x6e, 0x61, 0x6d, + 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x73, 0x67, 0x70, 0x61, + 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6d, 0x73, 0x67, 0x70, 0x61, 0x63, + 0x6b, 0x22, 0xa5, 0x01, 0x0a, 0x04, 0x50, 0x61, 0x74, 0x68, 0x12, 0x27, 0x0a, 0x05, 0x73, 0x74, + 0x65, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x61, 0x6e, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x2e, 0x53, 0x74, 0x65, 0x70, 0x52, 0x05, 0x73, 0x74, + 0x65, 0x70, 0x73, 0x1a, 0x74, 0x0a, 0x04, 0x53, 0x74, 0x65, 0x70, 0x12, 0x27, 0x0a, 0x0e, 0x61, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0d, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, + 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x61, 0x6e, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, + 0x00, 0x52, 0x0a, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4b, 0x65, 0x79, 0x42, 0x0a, 0x0a, + 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x22, 0x1b, 0x0a, 0x09, 0x49, 0x6d, 0x70, + 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x2a, 0x31, 0x0a, 0x04, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0a, + 0x0a, 0x06, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, + 0x53, 0x54, 0x52, 0x4f, 0x59, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x52, 0x45, 0x46, 0x52, 0x45, + 0x53, 0x48, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x2a, 0x7c, 0x0a, 0x06, 0x41, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4f, 0x50, 0x10, 0x00, 0x12, 0x0a, 0x0a, + 0x06, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x52, 0x45, 0x41, + 0x44, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x10, 0x03, 0x12, + 0x0a, 0x0a, 0x06, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x05, 0x12, 0x16, 0x0a, 0x12, 0x44, + 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x54, 0x48, 0x45, 0x4e, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, + 0x45, 0x10, 0x06, 0x12, 0x16, 0x0a, 0x12, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x5f, 0x54, 0x48, + 0x45, 0x4e, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x07, 0x12, 0x0a, 0x0a, 0x06, 0x46, + 0x4f, 0x52, 0x47, 0x45, 0x54, 0x10, 0x08, 0x2a, 0xc8, 0x03, 0x0a, 0x1c, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x41, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, + 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x17, 0x52, 0x45, 0x50, 0x4c, 0x41, 0x43, 0x45, 0x5f, 0x42, 0x45, + 0x43, 0x41, 0x55, 0x53, 0x45, 0x5f, 0x54, 0x41, 0x49, 0x4e, 0x54, 0x45, 0x44, 0x10, 0x01, 0x12, + 0x16, 0x0a, 0x12, 0x52, 0x45, 0x50, 0x4c, 0x41, 0x43, 0x45, 0x5f, 0x42, 0x59, 0x5f, 0x52, 0x45, + 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0x02, 0x12, 0x21, 0x0a, 0x1d, 0x52, 0x45, 0x50, 0x4c, 0x41, + 0x43, 0x45, 0x5f, 0x42, 0x45, 0x43, 0x41, 0x55, 0x53, 0x45, 0x5f, 0x43, 0x41, 0x4e, 0x4e, 0x4f, + 0x54, 0x5f, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x10, 0x03, 0x12, 0x25, 0x0a, 0x21, 0x44, 0x45, + 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x42, 0x45, 0x43, 0x41, 0x55, 0x53, 0x45, 0x5f, 0x4e, 0x4f, 0x5f, + 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x10, + 0x04, 0x12, 0x23, 0x0a, 0x1f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x42, 0x45, 0x43, 0x41, + 0x55, 0x53, 0x45, 0x5f, 0x57, 0x52, 0x4f, 0x4e, 0x47, 0x5f, 0x52, 0x45, 0x50, 0x45, 0x54, 0x49, + 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x05, 0x12, 0x1e, 0x0a, 0x1a, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, + 0x5f, 0x42, 0x45, 0x43, 0x41, 0x55, 0x53, 0x45, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x5f, 0x49, + 0x4e, 0x44, 0x45, 0x58, 0x10, 0x06, 0x12, 0x1b, 0x0a, 0x17, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, + 0x5f, 0x42, 0x45, 0x43, 0x41, 0x55, 0x53, 0x45, 0x5f, 0x45, 0x41, 0x43, 0x48, 0x5f, 0x4b, 0x45, + 0x59, 0x10, 0x07, 0x12, 0x1c, 0x0a, 0x18, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x42, 0x45, + 0x43, 0x41, 0x55, 0x53, 0x45, 0x5f, 0x4e, 0x4f, 0x5f, 0x4d, 0x4f, 0x44, 0x55, 0x4c, 0x45, 0x10, + 0x08, 0x12, 0x17, 0x0a, 0x13, 0x52, 0x45, 0x50, 0x4c, 0x41, 0x43, 0x45, 0x5f, 0x42, 0x59, 0x5f, + 0x54, 0x52, 0x49, 0x47, 0x47, 0x45, 0x52, 0x53, 0x10, 0x09, 0x12, 0x1f, 0x0a, 0x1b, 0x52, 0x45, + 0x41, 0x44, 0x5f, 0x42, 0x45, 0x43, 0x41, 0x55, 0x53, 0x45, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, + 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x0a, 0x12, 0x23, 0x0a, 0x1f, 0x52, + 0x45, 0x41, 0x44, 0x5f, 0x42, 0x45, 0x43, 0x41, 0x55, 0x53, 0x45, 0x5f, 0x44, 0x45, 0x50, 0x45, + 0x4e, 0x44, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x0b, + 0x12, 0x1d, 0x0a, 0x19, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x42, 0x45, 0x43, 0x41, 0x55, 0x53, 0x45, + 0x5f, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x5f, 0x4e, 0x45, 0x53, 0x54, 0x45, 0x44, 0x10, 0x0d, 0x12, + 0x21, 0x0a, 0x1d, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x42, 0x45, 0x43, 0x41, 0x55, 0x53, + 0x45, 0x5f, 0x4e, 0x4f, 0x5f, 0x4d, 0x4f, 0x56, 0x45, 0x5f, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, + 0x10, 0x0c, 0x42, 0x40, 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x6f, 0x66, 0x75, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x6f, + 0x66, 0x75, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x6c, 0x61, 0x6e, + 0x73, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x6c, 0x61, 0x6e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_planfile_proto_rawDescOnce sync.Once + file_planfile_proto_rawDescData = file_planfile_proto_rawDesc +) + +func file_planfile_proto_rawDescGZIP() []byte { + file_planfile_proto_rawDescOnce.Do(func() { + file_planfile_proto_rawDescData = protoimpl.X.CompressGZIP(file_planfile_proto_rawDescData) + }) + return file_planfile_proto_rawDescData +} + +var file_planfile_proto_enumTypes = make([]protoimpl.EnumInfo, 5) +var file_planfile_proto_msgTypes = make([]protoimpl.MessageInfo, 13) +var file_planfile_proto_goTypes = []interface{}{ + (Mode)(0), // 0: tfplan.Mode + (Action)(0), // 1: tfplan.Action + (ResourceInstanceActionReason)(0), // 2: tfplan.ResourceInstanceActionReason + (CheckResults_Status)(0), // 3: tfplan.CheckResults.Status + (CheckResults_ObjectKind)(0), // 4: tfplan.CheckResults.ObjectKind + (*Plan)(nil), // 5: tfplan.Plan + (*Backend)(nil), // 6: tfplan.Backend + (*Change)(nil), // 7: tfplan.Change + (*ResourceInstanceChange)(nil), // 8: tfplan.ResourceInstanceChange + (*OutputChange)(nil), // 9: tfplan.OutputChange + (*CheckResults)(nil), // 10: tfplan.CheckResults + (*DynamicValue)(nil), // 11: tfplan.DynamicValue + (*Path)(nil), // 12: tfplan.Path + (*Importing)(nil), // 13: tfplan.Importing + nil, // 14: tfplan.Plan.VariablesEntry + (*PlanResourceAttr)(nil), // 15: tfplan.Plan.resource_attr + (*CheckResults_ObjectResult)(nil), // 16: tfplan.CheckResults.ObjectResult + (*Path_Step)(nil), // 17: tfplan.Path.Step +} +var file_planfile_proto_depIdxs = []int32{ + 0, // 0: tfplan.Plan.ui_mode:type_name -> tfplan.Mode + 14, // 1: tfplan.Plan.variables:type_name -> tfplan.Plan.VariablesEntry + 8, // 2: tfplan.Plan.resource_changes:type_name -> tfplan.ResourceInstanceChange + 8, // 3: tfplan.Plan.resource_drift:type_name -> tfplan.ResourceInstanceChange + 9, // 4: tfplan.Plan.output_changes:type_name -> tfplan.OutputChange + 10, // 5: tfplan.Plan.check_results:type_name -> tfplan.CheckResults + 6, // 6: tfplan.Plan.backend:type_name -> tfplan.Backend + 15, // 7: tfplan.Plan.relevant_attributes:type_name -> tfplan.Plan.resource_attr + 11, // 8: tfplan.Backend.config:type_name -> tfplan.DynamicValue + 1, // 9: tfplan.Change.action:type_name -> tfplan.Action + 11, // 10: tfplan.Change.values:type_name -> tfplan.DynamicValue + 12, // 11: tfplan.Change.before_sensitive_paths:type_name -> tfplan.Path + 12, // 12: tfplan.Change.after_sensitive_paths:type_name -> tfplan.Path + 13, // 13: tfplan.Change.importing:type_name -> tfplan.Importing + 7, // 14: tfplan.ResourceInstanceChange.change:type_name -> tfplan.Change + 12, // 15: tfplan.ResourceInstanceChange.required_replace:type_name -> tfplan.Path + 2, // 16: tfplan.ResourceInstanceChange.action_reason:type_name -> tfplan.ResourceInstanceActionReason + 7, // 17: tfplan.OutputChange.change:type_name -> tfplan.Change + 4, // 18: tfplan.CheckResults.kind:type_name -> tfplan.CheckResults.ObjectKind + 3, // 19: tfplan.CheckResults.status:type_name -> tfplan.CheckResults.Status + 16, // 20: tfplan.CheckResults.objects:type_name -> tfplan.CheckResults.ObjectResult + 17, // 21: tfplan.Path.steps:type_name -> tfplan.Path.Step + 11, // 22: tfplan.Plan.VariablesEntry.value:type_name -> tfplan.DynamicValue + 12, // 23: tfplan.Plan.resource_attr.attr:type_name -> tfplan.Path + 3, // 24: tfplan.CheckResults.ObjectResult.status:type_name -> tfplan.CheckResults.Status + 11, // 25: tfplan.Path.Step.element_key:type_name -> tfplan.DynamicValue + 26, // [26:26] is the sub-list for method output_type + 26, // [26:26] is the sub-list for method input_type + 26, // [26:26] is the sub-list for extension type_name + 26, // [26:26] is the sub-list for extension extendee + 0, // [0:26] is the sub-list for field type_name +} + +func init() { file_planfile_proto_init() } +func file_planfile_proto_init() { + if File_planfile_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_planfile_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Plan); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_planfile_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Backend); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_planfile_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Change); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_planfile_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceInstanceChange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_planfile_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OutputChange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_planfile_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CheckResults); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_planfile_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DynamicValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_planfile_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Path); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_planfile_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Importing); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_planfile_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PlanResourceAttr); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_planfile_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CheckResults_ObjectResult); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_planfile_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Path_Step); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_planfile_proto_msgTypes[12].OneofWrappers = []interface{}{ + (*Path_Step_AttributeName)(nil), + (*Path_Step_ElementKey)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_planfile_proto_rawDesc, + NumEnums: 5, + NumMessages: 13, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_planfile_proto_goTypes, + DependencyIndexes: file_planfile_proto_depIdxs, + EnumInfos: file_planfile_proto_enumTypes, + MessageInfos: file_planfile_proto_msgTypes, + }.Build() + File_planfile_proto = out.File + file_planfile_proto_rawDesc = nil + file_planfile_proto_goTypes = nil + file_planfile_proto_depIdxs = nil +} diff --git a/pkg/plans/internal/planproto/planfile.proto b/pkg/plans/internal/planproto/planfile.proto new file mode 100644 index 00000000000..b7c35cfd836 --- /dev/null +++ b/pkg/plans/internal/planproto/planfile.proto @@ -0,0 +1,321 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +syntax = "proto3"; +package tfplan; + +// For OpenTofu's own parsing, the proto stub types go into an internal Go +// package. The public API is in github.com/kubegems/opentofu/plans/planfile . +option go_package = "github.com/kubegems/opentofu/pkg/plans/internal/planproto"; + +// Plan is the root message type for the tfplan file +message Plan { + // Version is incremented whenever there is a breaking change to + // the serialization format. Programs reading serialized plans should + // verify that version is set to the expected value and abort processing + // if not. A breaking change is any change that may cause an older + // consumer to interpret the structure incorrectly. This number will + // not be incremented if an existing consumer can either safely ignore + // changes to the format or if an existing consumer would fail to process + // the file for another message- or field-specific reason. + uint64 version = 1; + + // The mode that was active when this plan was created. + // + // This is saved only for UI purposes, so that OpenTofu can tailor its + // rendering of the plan depending on the mode. This must never be used to + // make decisions in OpenTofu Core during the applying of a plan. + Mode ui_mode = 17; + + // Errored is true for any plan whose creation was interrupted by an + // error. A plan with this flag set cannot be applied, and the changes + // it proposes are likely to be incomplete. + bool errored = 20; + + // The variables that were set when creating the plan. Each value is + // a msgpack serialization of an HCL value. + map variables = 2; + + // An unordered set of proposed changes to resources throughout the + // configuration, including any nested modules. Use the address of + // each resource to determine which module it belongs to. + repeated ResourceInstanceChange resource_changes = 3; + + // An unordered set of detected drift: changes made to resources outside of + // OpenTofu, computed by comparing the previous run's state to the state + // after refresh. + repeated ResourceInstanceChange resource_drift = 18; + + // An unordered set of proposed changes to outputs in the root module + // of the configuration. This set also includes "no action" changes for + // outputs that are not changing, as context for detecting inconsistencies + // at apply time. + repeated OutputChange output_changes = 4; + + // An unordered set of check results for the entire configuration. + // + // Each element represents a single static configuration object that has + // checks, and each of those may have zero or more dynamic objects that + // the checks were applied to nested within. + repeated CheckResults check_results = 19; + + // An unordered set of target addresses to include when applying. If no + // target addresses are present, the plan applies to the whole + // configuration. + repeated string target_addrs = 5; + + // An unordered set of force-replace addresses to include when applying. + // This must match the set of addresses that was used when creating the + // plan, or else applying the plan will fail when it reaches a different + // conclusion about what action a particular resource instance needs. + repeated string force_replace_addrs = 16; + + // The version string for the OpenTofu binary that created this plan. + string terraform_version = 14; + + // Backend is a description of the backend configuration and other related + // settings at the time the plan was created. + Backend backend = 13; + + message resource_attr { + string resource = 1; + Path attr= 2; + }; + + // RelevantAttributes lists individual resource attributes from + // ResourceDrift which may have contributed to the plan changes. + repeated resource_attr relevant_attributes = 15; + + // timestamp is the record of truth for when the plan happened. + string timestamp = 21; +} + +// Mode describes the planning mode that created the plan. +enum Mode { + NORMAL = 0; + DESTROY = 1; + REFRESH_ONLY = 2; +} + +// Backend is a description of backend configuration and other related settings. +message Backend { + string type = 1; + DynamicValue config = 2; + string workspace = 3; +} + +// Action describes the type of action planned for an object. +// Not all action values are valid for all object types. +enum Action { + NOOP = 0; + CREATE = 1; + READ = 2; + UPDATE = 3; + DELETE = 5; + DELETE_THEN_CREATE = 6; + CREATE_THEN_DELETE = 7; + FORGET = 8; +} + +// Change represents a change made to some object, transforming it from an old +// state to a new state. +message Change { + // Not all action values are valid for all object types. Consult + // the documentation for any message that embeds Change. + Action action = 1; + + // msgpack-encoded HCL values involved in the change. + // - For update and replace, two values are provided that give the old and new values, + // respectively. + // - For create, one value is provided that gives the new value to be created + // - For delete, one value is provided that describes the value being deleted + // - For read, two values are provided that give the prior value for this object + // (or null, if no prior value exists) and the value that was or will be read, + // respectively. + // - For no-op, one value is provided that is left unmodified by this non-change. + repeated DynamicValue values = 2; + + // An unordered set of paths into the old value which are marked as + // sensitive. Values at these paths should be obscured in human-readable + // output. This set is always empty for create. + repeated Path before_sensitive_paths = 3; + + // An unordered set of paths into the new value which are marked as + // sensitive. Values at these paths should be obscured in human-readable + // output. This set is always empty for delete. + repeated Path after_sensitive_paths = 4; + + // Importing, if true, specifies that the resource is being imported as part + // of the change. + Importing importing = 5; + + // GeneratedConfig contains any configuration that was generated as part of + // the change, as an HCL string. + string generated_config = 6; +} + +// ResourceInstanceActionReason sometimes provides some additional user-facing +// context for why a particular action was chosen for a resource instance. +// This is for user feedback only and never used to drive behavior during the +// subsequent apply step. +enum ResourceInstanceActionReason { + NONE = 0; + REPLACE_BECAUSE_TAINTED = 1; + REPLACE_BY_REQUEST = 2; + REPLACE_BECAUSE_CANNOT_UPDATE = 3; + DELETE_BECAUSE_NO_RESOURCE_CONFIG = 4; + DELETE_BECAUSE_WRONG_REPETITION = 5; + DELETE_BECAUSE_COUNT_INDEX = 6; + DELETE_BECAUSE_EACH_KEY = 7; + DELETE_BECAUSE_NO_MODULE = 8; + REPLACE_BY_TRIGGERS = 9; + READ_BECAUSE_CONFIG_UNKNOWN = 10; + READ_BECAUSE_DEPENDENCY_PENDING = 11; + READ_BECAUSE_CHECK_NESTED = 13; + DELETE_BECAUSE_NO_MOVE_TARGET = 12; +} + +message ResourceInstanceChange { + // addr is a string representation of the resource instance address that + // this change will apply to. + string addr = 13; + + // prev_run_addr is a string representation of the address at which + // this resource instance was tracked during the previous apply operation. + // + // This is populated only if it would be different from addr due to + // OpenTofu having reacted to refactoring annotations in the configuration. + // If empty, the previous run address is the same as the current address. + string prev_run_addr = 14; + + // NOTE: Earlier versions of this format had fields 1 through 6 describing + // various indivdual parts of "addr". We're now using our standard compact + // string representation to capture the same information. We don't support + // preserving plan files from one OpenTofu version to the next, so we + // no longer declare nor accept those fields. + + // deposed_key, if set, indicates that this change applies to a deposed + // object for the indicated instance with the given deposed key. If not + // set, the change applies to the instance's current object. + string deposed_key = 7; + + // provider is the address of the provider configuration that this change + // was planned with, and thus the configuration that must be used to + // apply it. + string provider = 8; + + // Description of the proposed change. May use "create", "read", "update", + // "replace", "delete", "forget" and "no-op" actions. + Change change = 9; + + // raw blob value provided by the provider as additional context for the + // change. Must be considered an opaque value for any consumer other than + // the provider that generated it, and will be returned verbatim to the + // provider during the subsequent apply operation. + bytes private = 10; + + // An unordered set of paths that prompted the change action to be + // "replace" rather than "update". Empty for any action other than + // "replace". + repeated Path required_replace = 11; + + // Optional extra user-oriented context for why change.Action was chosen. + // This is for user feedback only and never used to drive behavior during + // apply. + ResourceInstanceActionReason action_reason = 12; +} + +message OutputChange { + // Name of the output as defined in the root module. + string name = 1; + + // Description of the proposed change. May use "no-op", "create", + // "update" and "delete" actions. + Change change = 2; + + // Sensitive, if true, indicates that one or more of the values given + // in "change" is sensitive and should not be shown directly in any + // rendered plan. + bool sensitive = 3; +} + +message CheckResults { + // Status describes the status of a particular checkable object at the + // completion of the plan. + enum Status { + UNKNOWN = 0; + PASS = 1; + FAIL = 2; + ERROR = 3; + } + + enum ObjectKind { + UNSPECIFIED = 0; + RESOURCE = 1; + OUTPUT_VALUE = 2; + CHECK = 3; + INPUT_VARIABLE = 4; + } + + message ObjectResult { + string object_addr = 1; + Status status = 2; + repeated string failure_messages = 3; + } + + ObjectKind kind = 1; + + // Address of the configuration object that declared the checks. + string config_addr = 2; + + // The aggregate status of the entire configuration object, based on + // the statuses of its zero or more checkable objects. + Status status = 3; + + // The results for individual objects that were declared by the + // configuration object named in config_addr. + repeated ObjectResult objects = 4; +} + +// DynamicValue represents a value whose type is not decided until runtime, +// often based on schema information obtained from a plugin. +// +// At present dynamic values are always encoded as msgpack, with extension +// id 0 used to represent the special "unknown" value indicating results +// that won't be known until after apply. +// +// In future other serialization formats may be used, possibly with a +// transitional period of including both as separate attributes of this type. +// Consumers must ignore attributes they don't support and fail if no supported +// attribute is present. The top-level format version will not be incremented +// for changes to the set of dynamic serialization formats. +message DynamicValue { + bytes msgpack = 1; +} + +// Path represents a set of steps to traverse into a data structure. It is +// used to refer to a sub-structure within a dynamic data structure presented +// separately. +message Path { + message Step { + oneof selector { + // Set "attribute_name" to represent looking up an attribute + // in the current object value. + string attribute_name = 1; + + // Set "element_key" to represent looking up an element in + // an indexable collection type. + DynamicValue element_key = 2; + } + } + repeated Step steps = 1; +} + +// Importing contains the embedded metadata about the import operation if this +// change describes it. +message Importing { + // The original ID of the resource. + string id = 1; +} diff --git a/pkg/plans/mode.go b/pkg/plans/mode.go new file mode 100644 index 00000000000..d9b92737332 --- /dev/null +++ b/pkg/plans/mode.go @@ -0,0 +1,36 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plans + +// Mode represents the various mutually-exclusive modes for creating a plan. +type Mode rune + +//go:generate go run golang.org/x/tools/cmd/stringer -type Mode + +const ( + // NormalMode is the default planning mode, which aims to synchronize the + // prior state with remote objects and plan a set of actions intended to + // make those remote objects better match the current configuration. + NormalMode Mode = 0 + + // DestroyMode is a special planning mode for situations where the goal + // is to destroy all remote objects that are bound to instances in the + // prior state, even if the configuration for those instances is still + // present. + // + // This mode corresponds with the "-destroy" option to "tofu plan", + // and with the plan created by the "tofu destroy" command. + DestroyMode Mode = 'D' + + // RefreshOnlyMode is a special planning mode which only performs the + // synchronization of prior state with remote objects, and skips any + // effort to generate any change actions for resource instances even if + // the configuration has changed relative to the state. + // + // This mode corresponds with the "-refresh-only" option to + // "tofu plan". + RefreshOnlyMode Mode = 'R' +) diff --git a/pkg/plans/mode_string.go b/pkg/plans/mode_string.go new file mode 100644 index 00000000000..f1757e8f016 --- /dev/null +++ b/pkg/plans/mode_string.go @@ -0,0 +1,33 @@ +// Code generated by "stringer -type Mode"; DO NOT EDIT. + +package plans + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[NormalMode-0] + _ = x[DestroyMode-68] + _ = x[RefreshOnlyMode-82] +} + +const ( + _Mode_name_0 = "NormalMode" + _Mode_name_1 = "DestroyMode" + _Mode_name_2 = "RefreshOnlyMode" +) + +func (i Mode) String() string { + switch { + case i == 0: + return _Mode_name_0 + case i == 68: + return _Mode_name_1 + case i == 82: + return _Mode_name_2 + default: + return "Mode(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/pkg/plans/objchange/compatible.go b/pkg/plans/objchange/compatible.go new file mode 100644 index 00000000000..1bbf9a23b03 --- /dev/null +++ b/pkg/plans/objchange/compatible.go @@ -0,0 +1,383 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package objchange + +import ( + "fmt" + "strconv" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/lang/marks" +) + +// AssertObjectCompatible checks whether the given "actual" value is a valid +// completion of the possibly-partially-unknown "planned" value. +// +// This means that any known leaf value in "planned" must be equal to the +// corresponding value in "actual", and various other similar constraints. +// +// Any inconsistencies are reported by returning a non-zero number of errors. +// These errors are usually (but not necessarily) cty.PathError values +// referring to a particular nested value within the "actual" value. +// +// The two values must have types that conform to the given schema's implied +// type, or this function will panic. +func AssertObjectCompatible(schema *configschema.Block, planned, actual cty.Value) []error { + return assertObjectCompatible(schema, planned, actual, nil) +} + +func assertObjectCompatible(schema *configschema.Block, planned, actual cty.Value, path cty.Path) []error { + var errs []error + var atRoot string + if len(path) == 0 { + atRoot = "Root object " + } + + if planned.IsNull() && !actual.IsNull() { + errs = append(errs, path.NewErrorf(fmt.Sprintf("%swas absent, but now present", atRoot))) + return errs + } + if actual.IsNull() && !planned.IsNull() { + errs = append(errs, path.NewErrorf(fmt.Sprintf("%swas present, but now absent", atRoot))) + return errs + } + if planned.IsNull() { + // No further checks possible if both values are null + return errs + } + + for name, attrS := range schema.Attributes { + plannedV := planned.GetAttr(name) + actualV := actual.GetAttr(name) + + path := append(path, cty.GetAttrStep{Name: name}) + + // Unmark values here before checking value assertions, + // but save the marks so we can see if we should supress + // exposing a value through errors + unmarkedActualV, marksA := actualV.UnmarkDeep() + unmarkedPlannedV, marksP := plannedV.UnmarkDeep() + _, isSensitiveActual := marksA[marks.Sensitive] + _, isSensitivePlanned := marksP[marks.Sensitive] + + moreErrs := assertValueCompatible(unmarkedPlannedV, unmarkedActualV, path) + if attrS.Sensitive || isSensitiveActual || isSensitivePlanned { + if len(moreErrs) > 0 { + // Use a vague placeholder message instead, to avoid disclosing + // sensitive information. + errs = append(errs, path.NewErrorf("inconsistent values for sensitive attribute")) + } + } else { + errs = append(errs, moreErrs...) + } + } + for name, blockS := range schema.BlockTypes { + plannedV, _ := planned.GetAttr(name).Unmark() + actualV, _ := actual.GetAttr(name).Unmark() + + path := append(path, cty.GetAttrStep{Name: name}) + switch blockS.Nesting { + case configschema.NestingSingle, configschema.NestingGroup: + // If an unknown block placeholder was present then the placeholder + // may have expanded out into zero blocks, which is okay. + if !plannedV.IsKnown() && actualV.IsNull() { + continue + } + moreErrs := assertObjectCompatible(&blockS.Block, plannedV, actualV, path) + errs = append(errs, moreErrs...) + case configschema.NestingList: + // A NestingList might either be a list or a tuple, depending on + // whether there are dynamically-typed attributes inside. However, + // both support a similar-enough API that we can treat them the + // same for our purposes here. + if !plannedV.IsKnown() || !actualV.IsKnown() || plannedV.IsNull() || actualV.IsNull() { + continue + } + + plannedL := plannedV.LengthInt() + actualL := actualV.LengthInt() + if plannedL != actualL { + errs = append(errs, path.NewErrorf("block count changed from %d to %d", plannedL, actualL)) + continue + } + for it := plannedV.ElementIterator(); it.Next(); { + idx, plannedEV := it.Element() + if !actualV.HasIndex(idx).True() { + continue + } + actualEV := actualV.Index(idx) + moreErrs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.IndexStep{Key: idx})) + errs = append(errs, moreErrs...) + } + case configschema.NestingMap: + // A NestingMap might either be a map or an object, depending on + // whether there are dynamically-typed attributes inside, but + // that's decided statically and so both values will have the same + // kind. + if plannedV.Type().IsObjectType() { + plannedAtys := plannedV.Type().AttributeTypes() + actualAtys := actualV.Type().AttributeTypes() + for k := range plannedAtys { + if _, ok := actualAtys[k]; !ok { + errs = append(errs, path.NewErrorf("block key %q has vanished", k)) + continue + } + + plannedEV := plannedV.GetAttr(k) + actualEV := actualV.GetAttr(k) + moreErrs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.GetAttrStep{Name: k})) + errs = append(errs, moreErrs...) + } + if plannedV.IsKnown() { // new blocks may appear if unknown blocks were present in the plan + for k := range actualAtys { + if _, ok := plannedAtys[k]; !ok { + errs = append(errs, path.NewErrorf("new block key %q has appeared", k)) + continue + } + } + } + } else { + if !plannedV.IsKnown() || plannedV.IsNull() || actualV.IsNull() { + continue + } + plannedL := plannedV.LengthInt() + actualL := actualV.LengthInt() + if plannedL != actualL && plannedV.IsKnown() { // new blocks may appear if unknown blocks were persent in the plan + errs = append(errs, path.NewErrorf("block count changed from %d to %d", plannedL, actualL)) + continue + } + for it := plannedV.ElementIterator(); it.Next(); { + idx, plannedEV := it.Element() + if !actualV.HasIndex(idx).True() { + continue + } + actualEV := actualV.Index(idx) + moreErrs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.IndexStep{Key: idx})) + errs = append(errs, moreErrs...) + } + } + case configschema.NestingSet: + if !plannedV.IsKnown() || !actualV.IsKnown() || plannedV.IsNull() || actualV.IsNull() { + continue + } + + if !plannedV.IsKnown() { + // When unknown blocks are present the final number of blocks + // may be different, either because the unknown set values + // become equal and are collapsed, or the count is unknown due + // a dynamic block. Unfortunately this means we can't do our + // usual checks in this case without generating false + // negatives. + continue + } + + setErrs := assertSetValuesCompatible(plannedV, actualV, path, func(plannedEV, actualEV cty.Value) bool { + errs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.IndexStep{Key: actualEV})) + return len(errs) == 0 + }) + errs = append(errs, setErrs...) + + // There can be fewer elements in a set after its elements are all + // known (values that turn out to be equal will coalesce) but the + // number of elements must never get larger. + plannedL := plannedV.LengthInt() + actualL := actualV.LengthInt() + if plannedL < actualL { + errs = append(errs, path.NewErrorf("block set length changed from %d to %d", plannedL, actualL)) + } + default: + panic(fmt.Sprintf("unsupported nesting mode %s", blockS.Nesting)) + } + } + return errs +} + +func assertValueCompatible(planned, actual cty.Value, path cty.Path) []error { + // NOTE: We don't normally use the GoString rendering of cty.Value in + // user-facing error messages as a rule, but we make an exception + // for this function because we expect the user to pass this message on + // verbatim to the provider development team and so more detail is better. + + var errs []error + if planned.Type() == cty.DynamicPseudoType { + // Anything goes, then + return errs + } + if problems := actual.Type().TestConformance(planned.Type()); len(problems) > 0 { + errs = append(errs, path.NewErrorf("wrong final value type: %s", convert.MismatchMessage(actual.Type(), planned.Type()))) + // If the types don't match then we can't do any other comparisons, + // so we bail early. + return errs + } + + if !planned.IsKnown() { + // We didn't know what were going to end up with during plan, so + // the final value needs only to match the type and refinements of + // the unknown value placeholder. + plannedRng := planned.Range() + if ok := plannedRng.Includes(actual); ok.IsKnown() && ok.False() { + errs = append(errs, path.NewErrorf("final value %#v does not conform to planning placeholder %#v", actual, planned)) + } + return errs + } + + if actual.IsNull() { + if planned.IsNull() { + return nil + } + errs = append(errs, path.NewErrorf("was %#v, but now null", planned)) + return errs + } + if planned.IsNull() { + errs = append(errs, path.NewErrorf("was null, but now %#v", actual)) + return errs + } + + ty := planned.Type() + switch { + + case !actual.IsKnown(): + errs = append(errs, path.NewErrorf("was known, but now unknown")) + + case ty.IsPrimitiveType(): + if !actual.Equals(planned).True() { + errs = append(errs, path.NewErrorf("was %#v, but now %#v", planned, actual)) + } + + case ty.IsListType() || ty.IsMapType() || ty.IsTupleType(): + for it := planned.ElementIterator(); it.Next(); { + k, plannedV := it.Element() + if !actual.HasIndex(k).True() { + errs = append(errs, path.NewErrorf("element %s has vanished", indexStrForErrors(k))) + continue + } + + actualV := actual.Index(k) + moreErrs := assertValueCompatible(plannedV, actualV, append(path, cty.IndexStep{Key: k})) + errs = append(errs, moreErrs...) + } + + for it := actual.ElementIterator(); it.Next(); { + k, _ := it.Element() + if !planned.HasIndex(k).True() { + errs = append(errs, path.NewErrorf("new element %s has appeared", indexStrForErrors(k))) + } + } + + case ty.IsObjectType(): + atys := ty.AttributeTypes() + for name := range atys { + // Because we already tested that the two values have the same type, + // we can assume that the same attributes are present in both and + // focus just on testing their values. + plannedV := planned.GetAttr(name) + actualV := actual.GetAttr(name) + moreErrs := assertValueCompatible(plannedV, actualV, append(path, cty.GetAttrStep{Name: name})) + errs = append(errs, moreErrs...) + } + + case ty.IsSetType(): + // We can't really do anything useful for sets here because changing + // an unknown element to known changes the identity of the element, and + // so we can't correlate them properly. However, we will at least check + // to ensure that the number of elements is consistent, along with + // the general type-match checks we ran earlier in this function. + if planned.IsKnown() && !planned.IsNull() && !actual.IsNull() { + + setErrs := assertSetValuesCompatible(planned, actual, path, func(plannedV, actualV cty.Value) bool { + errs := assertValueCompatible(plannedV, actualV, append(path, cty.IndexStep{Key: actualV})) + return len(errs) == 0 + }) + errs = append(errs, setErrs...) + + // There can be fewer elements in a set after its elements are all + // known (values that turn out to be equal will coalesce) but the + // number of elements must never get larger. + + plannedL := planned.LengthInt() + actualL := actual.LengthInt() + if plannedL < actualL { + errs = append(errs, path.NewErrorf("length changed from %d to %d", plannedL, actualL)) + } + } + } + + return errs +} + +func indexStrForErrors(v cty.Value) string { + switch v.Type() { + case cty.Number: + return v.AsBigFloat().Text('f', -1) + case cty.String: + return strconv.Quote(v.AsString()) + default: + // Should be impossible, since no other index types are allowed! + return fmt.Sprintf("%#v", v) + } +} + +// assertSetValuesCompatible checks that each of the elements in a can +// be correlated with at least one equivalent element in b and vice-versa, +// using the given correlation function. +// +// This allows the number of elements in the sets to change as long as all +// elements in both sets can be correlated, making this function safe to use +// with sets that may contain unknown values as long as the unknown case is +// addressed in some reasonable way in the callback function. +// +// The callback always recieves values from set a as its first argument and +// values from set b in its second argument, so it is safe to use with +// non-commutative functions. +// +// As with assertValueCompatible, we assume that the target audience of error +// messages here is a provider developer (via a bug report from a user) and so +// we intentionally violate our usual rule of keeping cty implementation +// details out of error messages. +func assertSetValuesCompatible(planned, actual cty.Value, path cty.Path, f func(aVal, bVal cty.Value) bool) []error { + a := planned + b := actual + + // Our methodology here is a little tricky, to deal with the fact that + // it's impossible to directly correlate two non-equal set elements because + // they don't have identities separate from their values. + // The approach is to count the number of equivalent elements each element + // of a has in b and vice-versa, and then return true only if each element + // in both sets has at least one equivalent. + as := a.AsValueSlice() + bs := b.AsValueSlice() + aeqs := make([]bool, len(as)) + beqs := make([]bool, len(bs)) + for ai, av := range as { + for bi, bv := range bs { + if f(av, bv) { + aeqs[ai] = true + beqs[bi] = true + } + } + } + + var errs []error + for i, eq := range aeqs { + if !eq { + errs = append(errs, path.NewErrorf("planned set element %#v does not correlate with any element in actual", as[i])) + } + } + if len(errs) > 0 { + // Exit early since otherwise we're likely to generate duplicate + // error messages from the other perspective in the subsequent loop. + return errs + } + for i, eq := range beqs { + if !eq { + errs = append(errs, path.NewErrorf("actual set element %#v does not correlate with any element in plan", bs[i])) + } + } + return errs +} diff --git a/pkg/plans/objchange/compatible_test.go b/pkg/plans/objchange/compatible_test.go new file mode 100644 index 00000000000..ce774df54cc --- /dev/null +++ b/pkg/plans/objchange/compatible_test.go @@ -0,0 +1,1440 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package objchange + +import ( + "fmt" + "testing" + + "github.com/apparentlymart/go-dump/dump" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +func TestAssertObjectCompatible(t *testing.T) { + schemaWithFoo := configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + } + fooBlockValue := cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }) + schemaWithFooBar := configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + "bar": {Type: cty.String, Optional: true}, + }, + } + fooBarBlockValue := cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + "bar": cty.NullVal(cty.String), // simulating the situation where bar isn't set in the config at all + }) + + tests := []struct { + Schema *configschema.Block + Planned cty.Value + Actual cty.Value + WantErrs []string + }{ + { + &configschema.Block{}, + cty.EmptyObjectVal, + cty.EmptyObjectVal, + nil, + }, + { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "name": { + Type: cty.String, + Required: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "name": cty.StringVal("thingy"), + }), + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "name": cty.StringVal("thingy"), + }), + nil, + }, + { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "name": { + Type: cty.String, + Required: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "name": cty.UnknownVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "name": cty.StringVal("thingy"), + }), + nil, + }, + { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "name": { + Type: cty.String, + Required: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "name": cty.StringVal("wotsit"), + }), + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "name": cty.StringVal("thingy"), + }), + []string{ + `.name: was cty.StringVal("wotsit"), but now cty.StringVal("thingy")`, + }, + }, + { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "name": { + Type: cty.String, + Required: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "name": cty.UnknownVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "name": cty.Zero, + }), + []string{ + `.name: wrong final value type: string required`, + }, + }, + { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "name": { + Type: cty.String, + Required: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "name": cty.UnknownVal(cty.String).RefineNotNull(), + }), + cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + }), + []string{ + `.name: final value cty.NullVal(cty.String) does not conform to planning placeholder cty.UnknownVal(cty.String).RefineNotNull()`, + }, + }, + { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "name": { + Type: cty.String, + Required: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "name": cty.UnknownVal(cty.String).Refine(). + StringPrefix("boop:"). + NewValue(), + }), + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("thingy"), + }), + []string{ + `.name: final value cty.StringVal("thingy") does not conform to planning placeholder cty.UnknownVal(cty.String).Refine().StringPrefixFull("boop:").NewValue()`, + }, + }, + { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "name": { + Type: cty.String, + Required: true, + Sensitive: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "name": cty.StringVal("wotsit"), + }), + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "name": cty.StringVal("thingy"), + }), + []string{ + `.name: inconsistent values for sensitive attribute`, + }, + }, + { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "name": { + Type: cty.String, + Required: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "name": cty.StringVal("wotsit").Mark(marks.Sensitive), + }), + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "name": cty.StringVal("thingy"), + }), + []string{ + `.name: inconsistent values for sensitive attribute`, + }, + }, + { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "name": { + Type: cty.String, + Required: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "name": cty.StringVal("wotsit"), + }), + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "name": cty.StringVal("thingy").Mark(marks.Sensitive), + }), + []string{ + `.name: inconsistent values for sensitive attribute`, + }, + }, + { + // This tests the codepath that leads to couldHaveUnknownBlockPlaceholder, + // where a set may be sensitive and need to be unmarked before it + // is iterated upon + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "configuration": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "sensitive_fields": { + Nesting: configschema.NestingSet, + Block: schemaWithFoo, + }, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "configuration": cty.TupleVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "sensitive_fields": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("secret"), + }), + }).Mark(marks.Sensitive), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "configuration": cty.TupleVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "sensitive_fields": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("secret"), + }), + }).Mark(marks.Sensitive), + }), + }), + }), + nil, + }, + { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "stuff": { + Type: cty.DynamicPseudoType, + Required: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "stuff": cty.DynamicVal, + }), + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "stuff": cty.StringVal("thingy"), + }), + []string{}, + }, + { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "obj": { + Type: cty.Object(map[string]cty.Type{ + "stuff": cty.DynamicPseudoType, + }), + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "obj": cty.ObjectVal(map[string]cty.Value{ + "stuff": cty.DynamicVal, + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "obj": cty.ObjectVal(map[string]cty.Value{ + "stuff": cty.NumberIntVal(3), + }), + }), + []string{}, + }, + { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "stuff": { + Type: cty.DynamicPseudoType, + Required: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "stuff": cty.StringVal("wotsit"), + }), + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "stuff": cty.StringVal("thingy"), + }), + []string{ + `.stuff: was cty.StringVal("wotsit"), but now cty.StringVal("thingy")`, + }, + }, + { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "stuff": { + Type: cty.DynamicPseudoType, + Required: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "stuff": cty.StringVal("true"), + }), + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "stuff": cty.True, + }), + []string{ + `.stuff: wrong final value type: string required`, + }, + }, + { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "stuff": { + Type: cty.DynamicPseudoType, + Required: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "stuff": cty.DynamicVal, + }), + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "stuff": cty.EmptyObjectVal, + }), + nil, + }, + { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "stuff": { + Type: cty.DynamicPseudoType, + Required: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "stuff": cty.ObjectVal(map[string]cty.Value{ + "nonsense": cty.StringVal("yup"), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "stuff": cty.EmptyObjectVal, + }), + []string{ + `.stuff: wrong final value type: attribute "nonsense" is required`, + }, + }, + { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "tags": { + Type: cty.Map(cty.String), + Optional: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "tags": cty.MapVal(map[string]cty.Value{ + "Name": cty.StringVal("thingy"), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "tags": cty.MapVal(map[string]cty.Value{ + "Name": cty.StringVal("thingy"), + }), + }), + nil, + }, + { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "tags": { + Type: cty.Map(cty.String), + Optional: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "tags": cty.MapVal(map[string]cty.Value{ + "Name": cty.UnknownVal(cty.String), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "tags": cty.MapVal(map[string]cty.Value{ + "Name": cty.StringVal("thingy"), + }), + }), + nil, + }, + { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "tags": { + Type: cty.Map(cty.String), + Optional: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "tags": cty.MapVal(map[string]cty.Value{ + "Name": cty.StringVal("wotsit"), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "tags": cty.MapVal(map[string]cty.Value{ + "Name": cty.StringVal("thingy"), + }), + }), + []string{ + `.tags["Name"]: was cty.StringVal("wotsit"), but now cty.StringVal("thingy")`, + }, + }, + { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "tags": { + Type: cty.Map(cty.String), + Optional: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "tags": cty.MapVal(map[string]cty.Value{ + "Name": cty.StringVal("thingy"), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "tags": cty.MapVal(map[string]cty.Value{ + "Name": cty.StringVal("thingy"), + "Env": cty.StringVal("production"), + }), + }), + []string{ + `.tags: new element "Env" has appeared`, + }, + }, + { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "tags": { + Type: cty.Map(cty.String), + Optional: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "tags": cty.MapVal(map[string]cty.Value{ + "Name": cty.StringVal("thingy"), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "tags": cty.MapValEmpty(cty.String), + }), + []string{ + `.tags: element "Name" has vanished`, + }, + }, + { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "tags": { + Type: cty.Map(cty.String), + Optional: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "tags": cty.MapVal(map[string]cty.Value{ + "Name": cty.UnknownVal(cty.String), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "tags": cty.MapVal(map[string]cty.Value{ + "Name": cty.NullVal(cty.String), + }), + }), + nil, + }, + { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "zones": { + Type: cty.Set(cty.String), + Optional: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "zones": cty.SetVal([]cty.Value{ + cty.StringVal("thingy"), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "zones": cty.SetVal([]cty.Value{ + cty.StringVal("thingy"), + }), + }), + nil, + }, + { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "zones": { + Type: cty.Set(cty.String), + Optional: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "zones": cty.SetVal([]cty.Value{ + cty.StringVal("thingy"), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "zones": cty.SetVal([]cty.Value{ + cty.StringVal("thingy"), + cty.StringVal("wotsit"), + }), + }), + []string{ + `.zones: actual set element cty.StringVal("wotsit") does not correlate with any element in plan`, + `.zones: length changed from 1 to 2`, + }, + }, + { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "zones": { + Type: cty.Set(cty.String), + Optional: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "zones": cty.SetVal([]cty.Value{ + cty.UnknownVal(cty.String), + cty.UnknownVal(cty.String), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "zones": cty.SetVal([]cty.Value{ + // Imagine that both of our unknown values ultimately resolved to "thingy", + // causing them to collapse into a single element. That's valid, + // even though it's also a little confusing and counter-intuitive. + cty.StringVal("thingy"), + }), + }), + nil, + }, + { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "names": { + Type: cty.List(cty.String), + Optional: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "names": cty.ListVal([]cty.Value{ + cty.StringVal("thingy"), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "names": cty.ListVal([]cty.Value{ + cty.StringVal("thingy"), + }), + }), + nil, + }, + { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "names": { + Type: cty.List(cty.String), + Optional: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "names": cty.UnknownVal(cty.List(cty.String)), + }), + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "names": cty.ListVal([]cty.Value{ + cty.StringVal("thingy"), + }), + }), + nil, + }, + { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "names": { + Type: cty.List(cty.String), + Optional: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "names": cty.ListVal([]cty.Value{ + cty.UnknownVal(cty.String), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "names": cty.ListVal([]cty.Value{ + cty.StringVal("thingy"), + }), + }), + nil, + }, + { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "names": { + Type: cty.List(cty.String), + Optional: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "names": cty.ListVal([]cty.Value{ + cty.StringVal("thingy"), + cty.UnknownVal(cty.String), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "names": cty.ListVal([]cty.Value{ + cty.StringVal("thingy"), + cty.StringVal("wotsit"), + }), + }), + nil, + }, + { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "names": { + Type: cty.List(cty.String), + Optional: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "names": cty.ListVal([]cty.Value{ + cty.UnknownVal(cty.String), + cty.StringVal("thingy"), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "names": cty.ListVal([]cty.Value{ + cty.StringVal("thingy"), + cty.StringVal("wotsit"), + }), + }), + []string{ + `.names[1]: was cty.StringVal("thingy"), but now cty.StringVal("wotsit")`, + }, + }, + { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "names": { + Type: cty.List(cty.String), + Optional: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "names": cty.ListVal([]cty.Value{ + cty.UnknownVal(cty.String), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "names": cty.ListVal([]cty.Value{ + cty.StringVal("thingy"), + cty.StringVal("wotsit"), + }), + }), + []string{ + `.names: new element 1 has appeared`, + }, + }, + + // NestingSingle blocks + { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "key": { + Nesting: configschema.NestingSingle, + Block: configschema.Block{}, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "key": cty.EmptyObjectVal, + }), + cty.ObjectVal(map[string]cty.Value{ + "key": cty.EmptyObjectVal, + }), + nil, + }, + { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "key": { + Nesting: configschema.NestingSingle, + Block: configschema.Block{}, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "key": cty.UnknownVal(cty.EmptyObject), + }), + cty.ObjectVal(map[string]cty.Value{ + "key": cty.EmptyObjectVal, + }), + nil, + }, + { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "key": { + Nesting: configschema.NestingSingle, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.String, + Optional: true, + }, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "key": cty.NullVal(cty.Object(map[string]cty.Type{ + "foo": cty.String, + })), + }), + cty.ObjectVal(map[string]cty.Value{ + "key": cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("hello"), + }), + }), + []string{ + `.key: was absent, but now present`, + }, + }, + { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "key": { + Nesting: configschema.NestingSingle, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.String, + Optional: true, + }, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "key": cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("hello"), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "key": cty.NullVal(cty.Object(map[string]cty.Type{ + "foo": cty.String, + })), + }), + []string{ + `.key: was present, but now absent`, + }, + }, + { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "key": { + Nesting: configschema.NestingSingle, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.String, + Optional: true, + }, + }, + }, + }, + }, + }, + cty.UnknownVal(cty.Object(map[string]cty.Type{ + "key": cty.Object(map[string]cty.Type{ + "foo": cty.String, + }), + })), + cty.ObjectVal(map[string]cty.Value{ + "key": cty.NullVal(cty.Object(map[string]cty.Type{ + "foo": cty.String, + })), + }), + nil, + }, + + // NestingList blocks + { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "key": { + Nesting: configschema.NestingList, + Block: schemaWithFoo, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "key": cty.ListVal([]cty.Value{ + fooBlockValue, + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "key": cty.ListVal([]cty.Value{ + fooBlockValue, + }), + }), + nil, + }, + { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "key": { + Nesting: configschema.NestingList, + Block: schemaWithFoo, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "key": cty.TupleVal([]cty.Value{ + fooBlockValue, + fooBlockValue, + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "key": cty.TupleVal([]cty.Value{ + fooBlockValue, + }), + }), + []string{ + `.key: block count changed from 2 to 1`, + }, + }, + { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "key": { + Nesting: configschema.NestingList, + Block: schemaWithFoo, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "key": cty.TupleVal([]cty.Value{}), + }), + cty.ObjectVal(map[string]cty.Value{ + "key": cty.TupleVal([]cty.Value{ + fooBlockValue, + fooBlockValue, + }), + }), + []string{ + `.key: block count changed from 0 to 2`, + }, + }, + { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "key": { + Nesting: configschema.NestingList, + Block: schemaWithFooBar, + }, + }, + }, + cty.UnknownVal(cty.Object(map[string]cty.Type{ + "key": cty.List(fooBarBlockValue.Type()), + })), + cty.ObjectVal(map[string]cty.Value{ + "key": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("hello"), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("world"), + }), + }), + }), + nil, // an unknown block is allowed to expand into multiple, because that's how dynamic blocks behave when for_each is unknown + }, + { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "key": { + Nesting: configschema.NestingList, + Block: schemaWithFooBar, + }, + }, + }, + // While we must make an exception for empty strings in sets due to + // the legacy SDK, lists should be compared more strictly. + // This does not count as a dynamic block placeholder + cty.ObjectVal(map[string]cty.Value{ + "key": cty.ListVal([]cty.Value{ + fooBarBlockValue, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.UnknownVal(cty.String), + "bar": cty.StringVal(""), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "key": cty.ListVal([]cty.Value{ + fooBlockValue, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("hello"), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("world"), + }), + }), + }), + []string{".key: block count changed from 2 to 3"}, + }, + + // NestingSet blocks + { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "block": { + Nesting: configschema.NestingSet, + Block: schemaWithFoo, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "block": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("hello"), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("world"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "block": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("hello"), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("world"), + }), + }), + }), + nil, + }, + { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "block": { + Nesting: configschema.NestingSet, + Block: schemaWithFoo, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "block": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.UnknownVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.UnknownVal(cty.String), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "block": cty.SetVal([]cty.Value{ + // This is testing the scenario where the two unknown values + // turned out to be equal after we learned their values, + // and so they coalesced together into a single element. + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("hello"), + }), + }), + }), + nil, + }, + { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "block": { + Nesting: configschema.NestingSet, + Block: schemaWithFoo, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "block": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.UnknownVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.UnknownVal(cty.String), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "block": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("hello"), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("world"), + }), + }), + }), + nil, + }, + { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "block": { + Nesting: configschema.NestingSet, + Block: schemaWithFoo, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "block": cty.UnknownVal(cty.Set( + cty.Object(map[string]cty.Type{ + "foo": cty.String, + }), + )), + }), + cty.ObjectVal(map[string]cty.Value{ + "block": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("hello"), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("world"), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("nope"), + }), + }), + }), + // there is no error here, because the presence of unknowns + // indicates this may be a dynamic block, and the length is unknown + nil, + }, + { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "block": { + Nesting: configschema.NestingSet, + Block: schemaWithFoo, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "block": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("hello"), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("world"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "block": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("howdy"), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("world"), + }), + }), + }), + []string{ + `.block: planned set element cty.ObjectVal(map[string]cty.Value{"foo":cty.StringVal("hello")}) does not correlate with any element in actual`, + }, + }, + { + // This one is an odd situation where the value representing the + // block itself is unknown. This is never supposed to be true, + // but in legacy SDK mode we allow such things to pass through as + // a warning, and so we must tolerate them for matching purposes. + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "block": { + Nesting: configschema.NestingSet, + Block: schemaWithFoo, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "block": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.UnknownVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.UnknownVal(cty.String), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "block": cty.UnknownVal(cty.Set(cty.Object(map[string]cty.Type{ + "foo": cty.String, + }))), + }), + nil, + }, + { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "block": { + Nesting: configschema.NestingSet, + Block: schemaWithFoo, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "block": cty.UnknownVal(cty.Set(fooBlockValue.Type())), + }), + cty.ObjectVal(map[string]cty.Value{ + "block": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("a"), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("b"), + }), + }), + }), + nil, + }, + // test a set with an unknown dynamic count going to 0 values + { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "block2": { + Nesting: configschema.NestingSet, + Block: schemaWithFoo, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "block2": cty.UnknownVal(cty.Set(fooBlockValue.Type())), + }), + cty.ObjectVal(map[string]cty.Value{ + "block2": cty.SetValEmpty(cty.Object(map[string]cty.Type{ + "foo": cty.String, + })), + }), + nil, + }, + // test a set with a patially known dynamic count reducing it's values + { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "block3": { + Nesting: configschema.NestingSet, + Block: schemaWithFoo, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "block3": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("a"), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.UnknownVal(cty.String), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "block3": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("a"), + }), + }), + }), + nil, + }, + { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "block": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.String, + Required: true, + }, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "block": cty.EmptyObjectVal, + }), + cty.ObjectVal(map[string]cty.Value{ + "block": cty.UnknownVal(cty.List(cty.Object(map[string]cty.Type{ + "foo": cty.String, + }))), + }), + nil, + }, + } + + for i, test := range tests { + t.Run(fmt.Sprintf("%02d: %#v and %#v", i, test.Planned, test.Actual), func(t *testing.T) { + errs := AssertObjectCompatible(test.Schema, test.Planned, test.Actual) + + wantErrs := make(map[string]struct{}) + gotErrs := make(map[string]struct{}) + for _, err := range errs { + gotErrs[tfdiags.FormatError(err)] = struct{}{} + } + for _, msg := range test.WantErrs { + wantErrs[msg] = struct{}{} + } + + t.Logf("\nplanned: %sactual: %s", dump.Value(test.Planned), dump.Value(test.Actual)) + for msg := range wantErrs { + if _, ok := gotErrs[msg]; !ok { + t.Errorf("missing expected error: %s", msg) + } + } + for msg := range gotErrs { + if _, ok := wantErrs[msg]; !ok { + t.Errorf("unexpected extra error: %s", msg) + } + } + }) + } +} diff --git a/pkg/plans/objchange/doc.go b/pkg/plans/objchange/doc.go new file mode 100644 index 00000000000..c6f78f95f80 --- /dev/null +++ b/pkg/plans/objchange/doc.go @@ -0,0 +1,9 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package objchange deals with the business logic of taking a prior state +// value and a config value and producing a proposed new merged value, along +// with other related rules in this domain. +package objchange diff --git a/pkg/plans/objchange/lcs.go b/pkg/plans/objchange/lcs.go new file mode 100644 index 00000000000..ef0834b5a08 --- /dev/null +++ b/pkg/plans/objchange/lcs.go @@ -0,0 +1,123 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package objchange + +import ( + "github.com/zclconf/go-cty/cty" +) + +// ValueEqual provides an implementation of the equals function that can be +// passed into LongestCommonSubsequence when comparing cty.Value types. +func ValueEqual(x, y cty.Value) bool { + unmarkedX, xMarks := x.UnmarkDeep() + unmarkedY, yMarks := y.UnmarkDeep() + eqV := unmarkedX.Equals(unmarkedY) + if len(xMarks) != len(yMarks) { + eqV = cty.False + } + if eqV.IsKnown() && eqV.True() { + return true + } + return false +} + +// LongestCommonSubsequence finds a sequence of values that are common to both +// x and y, with the same relative ordering as in both collections. This result +// is useful as a first step towards computing a diff showing added/removed +// elements in a sequence. +// +// The approached used here is a "naive" one, assuming that both xs and ys will +// generally be small in most reasonable OpenTofu configurations. For larger +// lists the time/space usage may be sub-optimal. +// +// A pair of lists may have multiple longest common subsequences. In that +// case, the one selected by this function is undefined. +func LongestCommonSubsequence[V any](xs, ys []V, equals func(x, y V) bool) []V { + if len(xs) == 0 || len(ys) == 0 { + return make([]V, 0) + } + + c := make([]int, len(xs)*len(ys)) + eqs := make([]bool, len(xs)*len(ys)) + w := len(xs) + + for y := 0; y < len(ys); y++ { + for x := 0; x < len(xs); x++ { + eq := false + if equals(xs[x], ys[y]) { + eq = true + eqs[(w*y)+x] = true // equality tests can be expensive, so cache it + } + if eq { + // Sequence gets one longer than for the cell at top left, + // since we'd append a new item to the sequence here. + if x == 0 || y == 0 { + c[(w*y)+x] = 1 + } else { + c[(w*y)+x] = c[(w*(y-1))+(x-1)] + 1 + } + } else { + // We follow the longest of the sequence above and the sequence + // to the left of us in the matrix. + l := 0 + u := 0 + if x > 0 { + l = c[(w*y)+(x-1)] + } + if y > 0 { + u = c[(w*(y-1))+x] + } + if l > u { + c[(w*y)+x] = l + } else { + c[(w*y)+x] = u + } + } + } + } + + // The bottom right cell tells us how long our longest sequence will be + seq := make([]V, c[len(c)-1]) + + // Now we will walk back from the bottom right cell, finding again all + // of the equal pairs to construct our sequence. + x := len(xs) - 1 + y := len(ys) - 1 + i := len(seq) - 1 + + for x > -1 && y > -1 { + if eqs[(w*y)+x] { + // Add the value to our result list and then walk diagonally + // up and to the left. + seq[i] = xs[x] + x-- + y-- + i-- + } else { + // Take the path with the greatest sequence length in the matrix. + l := 0 + u := 0 + if x > 0 { + l = c[(w*y)+(x-1)] + } + if y > 0 { + u = c[(w*(y-1))+x] + } + if l > u { + x-- + } else { + y-- + } + } + } + + if i > -1 { + // should never happen if the matrix was constructed properly + panic("not enough elements in sequence") + } + + return seq +} diff --git a/pkg/plans/objchange/lcs_test.go b/pkg/plans/objchange/lcs_test.go new file mode 100644 index 00000000000..a66d9147963 --- /dev/null +++ b/pkg/plans/objchange/lcs_test.go @@ -0,0 +1,138 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package objchange + +import ( + "fmt" + "testing" + + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/zclconf/go-cty/cty" +) + +func TestLongestCommonSubsequence(t *testing.T) { + tests := []struct { + xs []cty.Value + ys []cty.Value + want []cty.Value + }{ + { + []cty.Value{}, + []cty.Value{}, + []cty.Value{}, + }, + { + []cty.Value{cty.NumberIntVal(1), cty.NumberIntVal(2)}, + []cty.Value{cty.NumberIntVal(1), cty.NumberIntVal(2)}, + []cty.Value{cty.NumberIntVal(1), cty.NumberIntVal(2)}, + }, + { + []cty.Value{cty.NumberIntVal(1), cty.NumberIntVal(2)}, + []cty.Value{cty.NumberIntVal(3), cty.NumberIntVal(4)}, + []cty.Value{}, + }, + { + []cty.Value{cty.NumberIntVal(2)}, + []cty.Value{cty.NumberIntVal(1), cty.NumberIntVal(2)}, + []cty.Value{cty.NumberIntVal(2)}, + }, + { + []cty.Value{cty.NumberIntVal(1)}, + []cty.Value{cty.NumberIntVal(1), cty.NumberIntVal(2)}, + []cty.Value{cty.NumberIntVal(1)}, + }, + { + []cty.Value{cty.NumberIntVal(2), cty.NumberIntVal(1)}, + []cty.Value{cty.NumberIntVal(1), cty.NumberIntVal(2)}, + []cty.Value{cty.NumberIntVal(1)}, // arbitrarily selected 1; 2 would also be valid + }, + { + []cty.Value{cty.NumberIntVal(1), cty.NumberIntVal(2), cty.NumberIntVal(3), cty.NumberIntVal(4)}, + []cty.Value{cty.NumberIntVal(2), cty.NumberIntVal(4), cty.NumberIntVal(5)}, + []cty.Value{cty.NumberIntVal(2), cty.NumberIntVal(4)}, + }, + { + []cty.Value{cty.NumberIntVal(1), cty.NumberIntVal(2), cty.NumberIntVal(3), cty.NumberIntVal(4)}, + []cty.Value{cty.NumberIntVal(4), cty.NumberIntVal(2), cty.NumberIntVal(5)}, + []cty.Value{cty.NumberIntVal(4)}, // 2 would also be valid + }, + { + []cty.Value{cty.NumberIntVal(1), cty.NumberIntVal(2), cty.NumberIntVal(3), cty.NumberIntVal(5)}, + []cty.Value{cty.NumberIntVal(2), cty.NumberIntVal(4), cty.NumberIntVal(5)}, + []cty.Value{cty.NumberIntVal(2), cty.NumberIntVal(5)}, + }, + + // unknowns never compare as equal + { + []cty.Value{cty.NumberIntVal(1), cty.UnknownVal(cty.Number), cty.NumberIntVal(3)}, + []cty.Value{cty.NumberIntVal(1), cty.UnknownVal(cty.Number), cty.NumberIntVal(3)}, + []cty.Value{cty.NumberIntVal(1), cty.NumberIntVal(3)}, + }, + { + []cty.Value{cty.UnknownVal(cty.Number)}, + []cty.Value{cty.UnknownVal(cty.Number)}, + []cty.Value{}, + }, + + // marked values + { + []cty.Value{cty.NumberIntVal(1).Mark("foo"), cty.NumberIntVal(2).Mark("foo"), cty.NumberIntVal(3)}, + []cty.Value{cty.NumberIntVal(1).Mark("foo"), cty.NumberIntVal(2).Mark("foo")}, + []cty.Value{cty.NumberIntVal(1).Mark("foo"), cty.NumberIntVal(2).Mark("foo")}, + }, + { + []cty.Value{cty.NumberIntVal(1), cty.NumberIntVal(2).Mark("foo"), cty.NumberIntVal(3)}, + []cty.Value{cty.NumberIntVal(2), cty.NumberIntVal(3)}, + []cty.Value{cty.NumberIntVal(3)}, + }, + { + []cty.Value{cty.NumberIntVal(1), cty.NumberIntVal(2).Mark("foo")}, + []cty.Value{cty.NumberIntVal(2)}, + []cty.Value{}, + }, + { + []cty.Value{ + cty.MapVal(map[string]cty.Value{"a": cty.StringVal("x").Mark(marks.Sensitive)}), + cty.MapVal(map[string]cty.Value{"b": cty.StringVal("y")}), + }, + []cty.Value{ + cty.MapVal(map[string]cty.Value{"a": cty.StringVal("x").Mark(marks.Sensitive)}), + cty.MapVal(map[string]cty.Value{"b": cty.StringVal("y")}), + cty.MapVal(map[string]cty.Value{"c": cty.StringVal("z")}), + }, + []cty.Value{ + cty.MapVal(map[string]cty.Value{"a": cty.StringVal("x").Mark(marks.Sensitive)}), + cty.MapVal(map[string]cty.Value{"b": cty.StringVal("y")}), + }, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%#v,%#v", test.xs, test.ys), func(t *testing.T) { + got := LongestCommonSubsequence(test.xs, test.ys, ValueEqual) + + wrong := func() { + t.Fatalf( + "wrong result\nX: %#v\nY: %#v\ngot: %#v\nwant: %#v", + test.xs, test.ys, got, test.want, + ) + } + + if len(got) != len(test.want) { + wrong() + } + + for i := range got { + if got[i] == cty.NilVal { + wrong() + } + if !got[i].RawEquals(test.want[i]) { + wrong() + } + } + }) + } +} diff --git a/pkg/plans/objchange/normalize_obj.go b/pkg/plans/objchange/normalize_obj.go new file mode 100644 index 00000000000..e17c3091ede --- /dev/null +++ b/pkg/plans/objchange/normalize_obj.go @@ -0,0 +1,143 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package objchange + +import ( + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +// NormalizeObjectFromLegacySDK takes an object that may have been generated +// by the legacy Terraform SDK (i.e. returned from a provider with the +// LegacyTypeSystem opt-out set) and does its best to normalize it for the +// assumptions we would normally enforce if the provider had not opted out. +// +// In particular, this function guarantees that a value representing a nested +// block will never itself be unknown or null, instead representing that as +// a non-null value that may contain null/unknown values. +// +// The input value must still conform to the implied type of the given schema, +// or else this function may produce garbage results or panic. This is usually +// okay because type consistency is enforced when deserializing the value +// returned from the provider over the RPC wire protocol anyway. +func NormalizeObjectFromLegacySDK(val cty.Value, schema *configschema.Block) cty.Value { + val, valMarks := val.UnmarkDeepWithPaths() + val = normalizeObjectFromLegacySDK(val, schema) + return val.MarkWithPaths(valMarks) +} + +func normalizeObjectFromLegacySDK(val cty.Value, schema *configschema.Block) cty.Value { + if val == cty.NilVal || val.IsNull() { + // This should never happen in reasonable use, but we'll allow it + // and normalize to a null of the expected type rather than panicking + // below. + return cty.NullVal(schema.ImpliedType()) + } + + vals := make(map[string]cty.Value) + for name := range schema.Attributes { + // No normalization for attributes, since them being type-conformant + // is all that we require. + vals[name] = val.GetAttr(name) + } + for name, blockS := range schema.BlockTypes { + lv := val.GetAttr(name) + + // Legacy SDK never generates dynamically-typed attributes and so our + // normalization code doesn't deal with them, but we need to make sure + // we still pass them through properly so that we don't interfere with + // objects generated by other SDKs. + if ty := blockS.Block.ImpliedType(); ty.HasDynamicTypes() { + vals[name] = lv + continue + } + + switch blockS.Nesting { + case configschema.NestingSingle, configschema.NestingGroup: + if lv.IsKnown() { + if lv.IsNull() && blockS.Nesting == configschema.NestingGroup { + vals[name] = blockS.EmptyValue() + } else { + vals[name] = normalizeObjectFromLegacySDK(lv, &blockS.Block) + } + } else { + vals[name] = unknownBlockStub(&blockS.Block) + } + case configschema.NestingList: + switch { + case !lv.IsKnown(): + vals[name] = cty.ListVal([]cty.Value{unknownBlockStub(&blockS.Block)}) + case lv.IsNull() || lv.LengthInt() == 0: + vals[name] = cty.ListValEmpty(blockS.Block.ImpliedType()) + default: + subVals := make([]cty.Value, 0, lv.LengthInt()) + for it := lv.ElementIterator(); it.Next(); { + _, subVal := it.Element() + subVals = append(subVals, normalizeObjectFromLegacySDK(subVal, &blockS.Block)) + } + vals[name] = cty.ListVal(subVals) + } + case configschema.NestingSet: + switch { + case !lv.IsKnown(): + vals[name] = cty.SetVal([]cty.Value{unknownBlockStub(&blockS.Block)}) + case lv.IsNull() || lv.LengthInt() == 0: + vals[name] = cty.SetValEmpty(blockS.Block.ImpliedType()) + default: + subVals := make([]cty.Value, 0, lv.LengthInt()) + for it := lv.ElementIterator(); it.Next(); { + _, subVal := it.Element() + subVals = append(subVals, normalizeObjectFromLegacySDK(subVal, &blockS.Block)) + } + vals[name] = cty.SetVal(subVals) + } + default: + // The legacy SDK doesn't support NestingMap, so we just assume + // maps are always okay. (If not, we would've detected and returned + // an error to the user before we got here.) + vals[name] = lv + } + } + return cty.ObjectVal(vals) +} + +// unknownBlockStub constructs an object value that approximates an unknown +// block by producing a known block object with all of its leaf attribute +// values set to unknown. +// +// Blocks themselves cannot be unknown, so if the legacy SDK tries to return +// such a thing, we'll use this result instead. This convention mimics how +// the dynamic block feature deals with being asked to iterate over an unknown +// value, because our value-checking functions already accept this convention +// as a special case. +func unknownBlockStub(schema *configschema.Block) cty.Value { + vals := make(map[string]cty.Value) + for name, attrS := range schema.Attributes { + vals[name] = cty.UnknownVal(attrS.Type) + } + for name, blockS := range schema.BlockTypes { + switch blockS.Nesting { + case configschema.NestingSingle, configschema.NestingGroup: + vals[name] = unknownBlockStub(&blockS.Block) + case configschema.NestingList: + // In principle we may be expected to produce a tuple value here, + // if there are any dynamically-typed attributes in our nested block, + // but the legacy SDK doesn't support that, so we just assume it'll + // never be necessary to normalize those. (Incorrect usage in any + // other SDK would be caught and returned as an error before we + // get here.) + vals[name] = cty.ListVal([]cty.Value{unknownBlockStub(&blockS.Block)}) + case configschema.NestingSet: + vals[name] = cty.SetVal([]cty.Value{unknownBlockStub(&blockS.Block)}) + case configschema.NestingMap: + // A nesting map can never be unknown since we then wouldn't know + // what the keys are. (Legacy SDK doesn't support NestingMap anyway, + // so this should never arise.) + vals[name] = cty.MapValEmpty(blockS.Block.ImpliedType()) + } + } + return cty.ObjectVal(vals) +} diff --git a/pkg/plans/objchange/normalize_obj_test.go b/pkg/plans/objchange/normalize_obj_test.go new file mode 100644 index 00000000000..12aff296e9a --- /dev/null +++ b/pkg/plans/objchange/normalize_obj_test.go @@ -0,0 +1,313 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package objchange + +import ( + "testing" + + "github.com/apparentlymart/go-dump/dump" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +func TestNormalizeObjectFromLegacySDK(t *testing.T) { + tests := map[string]struct { + Schema *configschema.Block + Input cty.Value + Want cty.Value + }{ + "empty": { + &configschema.Block{}, + cty.EmptyObjectVal, + cty.EmptyObjectVal, + }, + "attributes only": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "a": {Type: cty.String, Required: true}, + "b": {Type: cty.String, Optional: true}, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("a value"), + "b": cty.StringVal("b value"), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("a value"), + "b": cty.StringVal("b value"), + }), + }, + "null block single": { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "a": { + Nesting: configschema.NestingSingle, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "b": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.Object(map[string]cty.Type{ + "b": cty.String, + })), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.Object(map[string]cty.Type{ + "b": cty.String, + })), + }), + }, + "unknown block single": { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "a": { + Nesting: configschema.NestingSingle, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "b": {Type: cty.String, Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "c": {Nesting: configschema.NestingSingle}, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.UnknownVal(cty.Object(map[string]cty.Type{ + "b": cty.String, + "c": cty.EmptyObject, + })), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "b": cty.UnknownVal(cty.String), + "c": cty.EmptyObjectVal, + }), + }), + }, + "null block list": { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "a": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "b": {Type: cty.String, Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "c": {Nesting: configschema.NestingSingle}, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ + "b": cty.String, + "c": cty.EmptyObject, + }))), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListValEmpty(cty.Object(map[string]cty.Type{ + "b": cty.String, + "c": cty.EmptyObject, + })), + }), + }, + "unknown block list": { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "a": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "b": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.UnknownVal(cty.List(cty.Object(map[string]cty.Type{ + "b": cty.String, + }))), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "b": cty.UnknownVal(cty.String), + }), + }), + }), + }, + "null block set": { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "a": { + Nesting: configschema.NestingSet, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "b": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.Set(cty.Object(map[string]cty.Type{ + "b": cty.String, + }))), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.SetValEmpty(cty.Object(map[string]cty.Type{ + "b": cty.String, + })), + }), + }, + "unknown block set": { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "a": { + Nesting: configschema.NestingSet, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "b": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.UnknownVal(cty.Set(cty.Object(map[string]cty.Type{ + "b": cty.String, + }))), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "b": cty.UnknownVal(cty.String), + }), + }), + }), + }, + "map block passes through": { + // Legacy SDK doesn't use NestingMap, so we don't do any transforms + // related to it but we still need to verify that map blocks pass + // through unscathed. + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "a": { + Nesting: configschema.NestingMap, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "b": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapVal(map[string]cty.Value{ + "foo": cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("b value"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapVal(map[string]cty.Value{ + "foo": cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("b value"), + }), + }), + }), + }, + "block list with dynamic type": { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "a": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "b": {Type: cty.DynamicPseudoType, Optional: true}, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.TupleVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hello"), + }), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.True, + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.TupleVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hello"), + }), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.True, + }), + }), + }), + }, + "block map with dynamic type": { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "a": { + Nesting: configschema.NestingMap, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "b": {Type: cty.DynamicPseudoType, Optional: true}, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "one": cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hello"), + }), + "another": cty.ObjectVal(map[string]cty.Value{ + "b": cty.True, + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "one": cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hello"), + }), + "another": cty.ObjectVal(map[string]cty.Value{ + "b": cty.True, + }), + }), + }), + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + got := NormalizeObjectFromLegacySDK(test.Input, test.Schema) + if !got.RawEquals(test.Want) { + t.Errorf( + "wrong result\ngot: %s\nwant: %s", + dump.Value(got), dump.Value(test.Want), + ) + } + }) + } +} diff --git a/pkg/plans/objchange/objchange.go b/pkg/plans/objchange/objchange.go new file mode 100644 index 00000000000..1ee6d482169 --- /dev/null +++ b/pkg/plans/objchange/objchange.go @@ -0,0 +1,496 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package objchange + +import ( + "errors" + "fmt" + + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/configs/configschema" +) + +// ProposedNew constructs a proposed new object value by combining the +// computed attribute values from "prior" with the configured attribute values +// from "config". +// +// Both value must conform to the given schema's implied type, or this function +// will panic. +// +// The prior value must be wholly known, but the config value may be unknown +// or have nested unknown values. +// +// The merging of the two objects includes the attributes of any nested blocks, +// which will be correlated in a manner appropriate for their nesting mode. +// Note in particular that the correlation for blocks backed by sets is a +// heuristic based on matching non-computed attribute values and so it may +// produce strange results with more "extreme" cases, such as a nested set +// block where _all_ attributes are computed. +func ProposedNew(schema *configschema.Block, prior, config cty.Value) cty.Value { + // If the config and prior are both null, return early here before + // populating the prior block. The prevents non-null blocks from appearing + // the proposed state value. + if config.IsNull() && prior.IsNull() { + return prior + } + + if prior.IsNull() { + // In this case, we will construct a synthetic prior value that is + // similar to the result of decoding an empty configuration block, + // which simplifies our handling of the top-level attributes/blocks + // below by giving us one non-null level of object to pull values from. + // + // "All attributes null" happens to be the definition of EmptyValue for + // a Block, so we can just delegate to that + prior = schema.EmptyValue() + } + return proposedNew(schema, prior, config) +} + +// PlannedDataResourceObject is similar to proposedNewBlock but tailored for +// planning data resources in particular. Specifically, it replaces the values +// of any Computed attributes not set in the configuration with an unknown +// value, which serves as a placeholder for a value to be filled in by the +// provider when the data resource is finally read. +// +// Data resources are different because the planning of them is handled +// entirely within OpenTofu Core and not subject to customization by the +// provider. This function is, in effect, producing an equivalent result to +// passing the proposedNewBlock result into a provider's PlanResourceChange +// function, assuming a fixed implementation of PlanResourceChange that just +// fills in unknown values as needed. +func PlannedDataResourceObject(schema *configschema.Block, config cty.Value) cty.Value { + // Our trick here is to run the proposedNewBlock logic with an + // entirely-unknown prior value. Because of cty's unknown short-circuit + // behavior, any operation on prior returns another unknown, and so + // unknown values propagate into all of the parts of the resulting value + // that would normally be filled in by preserving the prior state. + prior := cty.UnknownVal(schema.ImpliedType()) + return proposedNew(schema, prior, config) +} + +func proposedNew(schema *configschema.Block, prior, config cty.Value) cty.Value { + if config.IsNull() || !config.IsKnown() { + // A block config should never be null at this point. The only nullable + // block type is NestingSingle, which will return early before coming + // back here. We'll allow the null here anyway to free callers from + // needing to specifically check for these cases, and any mismatch will + // be caught in validation, so just take the prior value rather than + // the invalid null. + return prior + } + + if (!prior.Type().IsObjectType()) || (!config.Type().IsObjectType()) { + panic("ProposedNew only supports object-typed values") + } + + // From this point onwards, we can assume that both values are non-null + // object types, and that the config value itself is known (though it + // may contain nested values that are unknown.) + newAttrs := proposedNewAttributes(schema.Attributes, prior, config) + + // Merging nested blocks is a little more complex, since we need to + // correlate blocks between both objects and then recursively propose + // a new object for each. The correlation logic depends on the nesting + // mode for each block type. + for name, blockType := range schema.BlockTypes { + priorV := prior.GetAttr(name) + configV := config.GetAttr(name) + newAttrs[name] = proposedNewNestedBlock(blockType, priorV, configV) + } + + return cty.ObjectVal(newAttrs) +} + +// proposedNewBlockOrObject dispatched the schema to either ProposedNew or +// proposedNewObjectAttributes depending on the given type. +func proposedNewBlockOrObject(schema nestedSchema, prior, config cty.Value) cty.Value { + switch schema := schema.(type) { + case *configschema.Block: + return ProposedNew(schema, prior, config) + case *configschema.Object: + return proposedNewObjectAttributes(schema, prior, config) + default: + panic(fmt.Sprintf("unexpected schema type %T", schema)) + } +} + +func proposedNewNestedBlock(schema *configschema.NestedBlock, prior, config cty.Value) cty.Value { + // The only time we should encounter an entirely unknown block is from the + // use of dynamic with an unknown for_each expression. + if !config.IsKnown() { + return config + } + + newV := config + + switch schema.Nesting { + case configschema.NestingSingle: + // A NestingSingle configuration block value can be null, and since it + // cannot be computed we can always take the configuration value. + if config.IsNull() { + break + } + + // Otherwise use the same assignment rules as NestingGroup + fallthrough + case configschema.NestingGroup: + newV = ProposedNew(&schema.Block, prior, config) + + case configschema.NestingList: + newV = proposedNewNestingList(&schema.Block, prior, config) + + case configschema.NestingMap: + newV = proposedNewNestingMap(&schema.Block, prior, config) + + case configschema.NestingSet: + newV = proposedNewNestingSet(&schema.Block, prior, config) + + default: + // Should never happen, since the above cases are comprehensive. + panic(fmt.Sprintf("unsupported block nesting mode %s", schema.Nesting)) + } + + return newV +} + +func proposedNewNestedType(schema *configschema.Object, prior, config cty.Value) cty.Value { + // if the config isn't known at all, then we must use that value + if !config.IsKnown() { + return config + } + + // Even if the config is null or empty, we will be using this default value. + newV := config + + switch schema.Nesting { + case configschema.NestingSingle: + // If the config is null, we already have our value. If the attribute + // is optional+computed, we won't reach this branch with a null value + // since the computed case would have been taken. + if config.IsNull() { + break + } + + newV = proposedNewObjectAttributes(schema, prior, config) + + case configschema.NestingList: + newV = proposedNewNestingList(schema, prior, config) + + case configschema.NestingMap: + newV = proposedNewNestingMap(schema, prior, config) + + case configschema.NestingSet: + newV = proposedNewNestingSet(schema, prior, config) + + default: + // Should never happen, since the above cases are comprehensive. + panic(fmt.Sprintf("unsupported attribute nesting mode %s", schema.Nesting)) + } + + return newV +} + +func proposedNewNestingList(schema nestedSchema, prior, config cty.Value) cty.Value { + newV := config + + // Nested blocks are correlated by index. + configVLen := 0 + if !config.IsNull() { + configVLen = config.LengthInt() + } + if configVLen > 0 { + newVals := make([]cty.Value, 0, configVLen) + for it := config.ElementIterator(); it.Next(); { + idx, configEV := it.Element() + if prior.IsKnown() && (prior.IsNull() || !prior.HasIndex(idx).True()) { + // If there is no corresponding prior element then + // we just take the config value as-is. + newVals = append(newVals, configEV) + continue + } + priorEV := prior.Index(idx) + + newVals = append(newVals, proposedNewBlockOrObject(schema, priorEV, configEV)) + } + // Despite the name, a NestingList might also be a tuple, if + // its nested schema contains dynamically-typed attributes. + if config.Type().IsTupleType() { + newV = cty.TupleVal(newVals) + } else { + newV = cty.ListVal(newVals) + } + } + + return newV +} + +func proposedNewNestingMap(schema nestedSchema, prior, config cty.Value) cty.Value { + newV := config + + newVals := map[string]cty.Value{} + + if config.IsNull() || !config.IsKnown() || config.LengthInt() == 0 { + // We already assigned newVal and there's nothing to compare in + // config. + return newV + } + cfgMap := config.AsValueMap() + + // prior may be null or empty + priorMap := map[string]cty.Value{} + if !prior.IsNull() && prior.IsKnown() && prior.LengthInt() > 0 { + priorMap = prior.AsValueMap() + } + + for name, configEV := range cfgMap { + priorEV, inPrior := priorMap[name] + if !inPrior { + // If there is no corresponding prior element then + // we just take the config value as-is. + newVals[name] = configEV + continue + } + + newVals[name] = proposedNewBlockOrObject(schema, priorEV, configEV) + } + + // The value must leave as the same type it came in as + switch { + case config.Type().IsObjectType(): + // Although we call the nesting mode "map", we actually use + // object values so that elements might have different types + // in case of dynamically-typed attributes. + newV = cty.ObjectVal(newVals) + default: + newV = cty.MapVal(newVals) + } + + return newV +} + +func proposedNewNestingSet(schema nestedSchema, prior, config cty.Value) cty.Value { + if !config.Type().IsSetType() { + panic("configschema.NestingSet value is not a set as expected") + } + + newV := config + if !config.IsKnown() || config.IsNull() || config.LengthInt() == 0 { + return newV + } + + var priorVals []cty.Value + if prior.IsKnown() && !prior.IsNull() { + priorVals = prior.AsValueSlice() + } + + var newVals []cty.Value + // track which prior elements have been used + used := make([]bool, len(priorVals)) + + for _, configEV := range config.AsValueSlice() { + var priorEV cty.Value + for i, priorCmp := range priorVals { + if used[i] { + continue + } + + // It is possible that multiple prior elements could be valid + // matches for a configuration value, in which case we will end up + // picking the first match encountered (but it will always be + // consistent due to cty's iteration order). Because configured set + // elements must also be entirely unique in order to be included in + // the set, these matches either will not matter because they only + // differ by computed values, or could not have come from a valid + // config with all unique set elements. + if validPriorFromConfig(schema, priorCmp, configEV) { + priorEV = priorCmp + used[i] = true + break + } + } + + if priorEV == cty.NilVal { + priorEV = cty.NullVal(config.Type().ElementType()) + } + + newVals = append(newVals, proposedNewBlockOrObject(schema, priorEV, configEV)) + } + + return cty.SetVal(newVals) +} + +func proposedNewObjectAttributes(schema *configschema.Object, prior, config cty.Value) cty.Value { + if config.IsNull() { + return config + } + + return cty.ObjectVal(proposedNewAttributes(schema.Attributes, prior, config)) +} + +func proposedNewAttributes(attrs map[string]*configschema.Attribute, prior, config cty.Value) map[string]cty.Value { + newAttrs := make(map[string]cty.Value, len(attrs)) + for name, attr := range attrs { + var priorV cty.Value + if prior.IsNull() { + priorV = cty.NullVal(prior.Type().AttributeType(name)) + } else { + priorV = prior.GetAttr(name) + } + + configV := config.GetAttr(name) + + var newV cty.Value + switch { + // required isn't considered when constructing the plan, so attributes + // are essentially either computed or not computed. In the case of + // optional+computed, they are only computed when there is no + // configuration. + case attr.Computed && configV.IsNull(): + // configV will always be null in this case, by definition. + // priorV may also be null, but that's okay. + newV = priorV + + // the exception to the above is that if the config is optional and + // the _prior_ value contains non-computed values, we can infer + // that the config must have been non-null previously. + if optionalValueNotComputable(attr, priorV) { + newV = configV + } + + case attr.NestedType != nil: + // For non-computed NestedType attributes, we need to descend + // into the individual nested attributes to build the final + // value, unless the entire nested attribute is unknown. + newV = proposedNewNestedType(attr.NestedType, priorV, configV) + default: + // For non-computed attributes, we always take the config value, + // even if it is null. If it's _required_ then null values + // should've been caught during an earlier validation step, and + // so we don't really care about that here. + newV = configV + } + newAttrs[name] = newV + } + return newAttrs +} + +// nestedSchema is used as a generic container for either a +// *configschema.Object, or *configschema.Block. +type nestedSchema interface { + AttributeByPath(cty.Path) *configschema.Attribute +} + +// optionalValueNotComputable is used to check if an object in state must +// have at least partially come from configuration. If the prior value has any +// non-null attributes which are not computed in the schema, then we know there +// was previously a configuration value which set those. +// +// This is used when the configuration contains a null optional+computed value, +// and we want to know if we should plan to send the null value or the prior +// state. +func optionalValueNotComputable(schema *configschema.Attribute, val cty.Value) bool { + if !schema.Optional { + return false + } + + // We must have a NestedType for complex nested attributes in order + // to find nested computed values in the first place. + if schema.NestedType == nil { + return false + } + + foundNonComputedAttr := false + cty.Walk(val, func(path cty.Path, v cty.Value) (bool, error) { + if v.IsNull() { + return true, nil + } + + attr := schema.NestedType.AttributeByPath(path) + if attr == nil { + return true, nil + } + + if !attr.Computed { + foundNonComputedAttr = true + return false, nil + } + return true, nil + }) + + return foundNonComputedAttr +} + +// validPriorFromConfig returns true if the prior object could have been +// derived from the configuration. We do this by walking the prior value to +// determine if it is a valid superset of the config, and only computable +// values have been added. This function is only used to correlated +// configuration with possible valid prior values within sets. +func validPriorFromConfig(schema nestedSchema, prior, config cty.Value) bool { + if unrefinedValue(config).RawEquals(unrefinedValue(prior)) { + return true + } + + // error value to halt the walk + stop := errors.New("stop") + + valid := true + cty.Walk(prior, func(path cty.Path, priorV cty.Value) (bool, error) { + configV, err := path.Apply(config) + if err != nil { + // most likely dynamic objects with different types + valid = false + return false, stop + } + + // we don't need to know the schema if both are equal + if unrefinedValue(configV).RawEquals(unrefinedValue(priorV)) { + // we know they are equal, so no need to descend further + return false, nil + } + + // We can't descend into nested sets to correlate configuration, so the + // overall values must be equal. + if configV.Type().IsSetType() { + valid = false + return false, stop + } + + attr := schema.AttributeByPath(path) + if attr == nil { + // Not at a schema attribute, so we can continue until we find leaf + // attributes. + return true, nil + } + + // If we have nested object attributes we'll be descending into those + // to compare the individual values and determine why this level is not + // equal + if attr.NestedType != nil { + return true, nil + } + + // This is a leaf attribute, so it must be computed in order to differ + // from config. + if !attr.Computed { + valid = false + return false, stop + } + + // And if it is computed, the config must be null to allow a change. + if !configV.IsNull() { + valid = false + return false, stop + } + + // We sill stop here. The cty value could be far larger, but this was + // the last level of prescribed schema. + return false, nil + }) + + return valid +} diff --git a/pkg/plans/objchange/objchange_test.go b/pkg/plans/objchange/objchange_test.go new file mode 100644 index 00000000000..0ae8490bbc2 --- /dev/null +++ b/pkg/plans/objchange/objchange_test.go @@ -0,0 +1,2763 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package objchange + +import ( + "testing" + + "github.com/apparentlymart/go-dump/dump" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/configs/configschema" +) + +func TestProposedNew(t *testing.T) { + tests := map[string]struct { + Schema *configschema.Block + Prior cty.Value + Config cty.Value + Want cty.Value + }{ + "empty": { + &configschema.Block{}, + cty.EmptyObjectVal, + cty.EmptyObjectVal, + cty.EmptyObjectVal, + }, + "no prior": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.String, + Optional: true, + }, + "bar": { + Type: cty.String, + Computed: true, + }, + "bloop": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingSingle, + Attributes: map[string]*configschema.Attribute{ + "blop": { + Type: cty.String, + Required: true, + }, + }, + }, + Computed: true, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "baz": { + Nesting: configschema.NestingSingle, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "boz": { + Type: cty.String, + Optional: true, + Computed: true, + }, + "biz": { + Type: cty.String, + Optional: true, + Computed: true, + }, + }, + }, + }, + }, + }, + cty.NullVal(cty.DynamicPseudoType), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("hello"), + "bloop": cty.NullVal(cty.Object(map[string]cty.Type{ + "blop": cty.String, + })), + "bar": cty.NullVal(cty.String), + "baz": cty.ObjectVal(map[string]cty.Value{ + "boz": cty.StringVal("world"), + + // An unknown in the config represents a situation where + // an argument is explicitly set to an expression result + // that is derived from an unknown value. This is distinct + // from leaving it null, which allows the provider itself + // to decide the value during PlanResourceChange. + "biz": cty.UnknownVal(cty.String), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("hello"), + + // unset computed attributes are null in the proposal; provider + // usually changes them to "unknown" during PlanResourceChange, + // to indicate that the value will be decided during apply. + "bar": cty.NullVal(cty.String), + "bloop": cty.NullVal(cty.Object(map[string]cty.Type{ + "blop": cty.String, + })), + + "baz": cty.ObjectVal(map[string]cty.Value{ + "boz": cty.StringVal("world"), + "biz": cty.UnknownVal(cty.String), // explicit unknown preserved from config + }), + }), + }, + "null block remains null": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.String, + Optional: true, + }, + "bloop": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingSingle, + Attributes: map[string]*configschema.Attribute{ + "blop": { + Type: cty.String, + Required: true, + }, + }, + }, + Computed: true, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "baz": { + Nesting: configschema.NestingSingle, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "boz": { + Type: cty.String, + Optional: true, + Computed: true, + }, + }, + }, + }, + }, + }, + cty.NullVal(cty.DynamicPseudoType), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + "bloop": cty.NullVal(cty.Object(map[string]cty.Type{ + "blop": cty.String, + })), + "baz": cty.NullVal(cty.Object(map[string]cty.Type{ + "boz": cty.String, + })), + }), + // The bloop attribue and baz block does not exist in the config, + // and therefore shouldn't be planned. + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + "bloop": cty.NullVal(cty.Object(map[string]cty.Type{ + "blop": cty.String, + })), + "baz": cty.NullVal(cty.Object(map[string]cty.Type{ + "boz": cty.String, + })), + }), + }, + "no prior with set": { + // This one is here because our handling of sets is more complex + // than others (due to the fuzzy correlation heuristic) and + // historically that caused us some panic-related grief. + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "baz": { + Nesting: configschema.NestingSet, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "boz": { + Type: cty.String, + Optional: true, + Computed: true, + }, + }, + }, + }, + }, + Attributes: map[string]*configschema.Attribute{ + "bloop": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingSet, + Attributes: map[string]*configschema.Attribute{ + "blop": { + Type: cty.String, + Required: true, + }, + }, + }, + Computed: true, + Optional: true, + }, + }, + }, + cty.NullVal(cty.DynamicPseudoType), + cty.ObjectVal(map[string]cty.Value{ + "baz": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "boz": cty.StringVal("world"), + }), + }), + "bloop": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("blub"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "baz": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "boz": cty.StringVal("world"), + }), + }), + "bloop": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("blub"), + }), + }), + }), + }, + "prior attributes": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.String, + Optional: true, + }, + "bar": { + Type: cty.String, + Computed: true, + }, + "baz": { + Type: cty.String, + Optional: true, + Computed: true, + }, + "boz": { + Type: cty.String, + Optional: true, + Computed: true, + }, + "bloop": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingSingle, + Attributes: map[string]*configschema.Attribute{ + "blop": { + Type: cty.String, + Required: true, + }, + }, + }, + Optional: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bonjour"), + "bar": cty.StringVal("petit dejeuner"), + "baz": cty.StringVal("grande dejeuner"), + "boz": cty.StringVal("a la monde"), + "bloop": cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("glub"), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("hello"), + "bar": cty.NullVal(cty.String), + "baz": cty.NullVal(cty.String), + "boz": cty.StringVal("world"), + "bloop": cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("bleep"), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("hello"), + "bar": cty.StringVal("petit dejeuner"), + "baz": cty.StringVal("grande dejeuner"), + "boz": cty.StringVal("world"), + "bloop": cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("bleep"), + }), + }), + }, + "prior nested single": { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "foo": { + Nesting: configschema.NestingSingle, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "bar": { + Type: cty.String, + Optional: true, + Computed: true, + }, + "baz": { + Type: cty.String, + Optional: true, + Computed: true, + }, + }, + }, + }, + }, + Attributes: map[string]*configschema.Attribute{ + "bloop": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingSingle, + Attributes: map[string]*configschema.Attribute{ + "blop": { + Type: cty.String, + Required: true, + }, + "bleep": { + Type: cty.String, + Optional: true, + }, + }, + }, + Optional: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("beep"), + "baz": cty.StringVal("boop"), + }), + "bloop": cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("glub"), + "bleep": cty.NullVal(cty.String), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("bap"), + "baz": cty.NullVal(cty.String), + }), + "bloop": cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("glub"), + "bleep": cty.StringVal("beep"), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("bap"), + "baz": cty.StringVal("boop"), + }), + "bloop": cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("glub"), + "bleep": cty.StringVal("beep"), + }), + }), + }, + "prior nested single to null": { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "foo": { + Nesting: configschema.NestingSingle, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "bar": { + Type: cty.String, + Optional: true, + Computed: true, + }, + "baz": { + Type: cty.String, + Optional: true, + Computed: true, + }, + }, + }, + }, + }, + Attributes: map[string]*configschema.Attribute{ + "bloop": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingSingle, + Attributes: map[string]*configschema.Attribute{ + "blop": { + Type: cty.String, + Required: true, + }, + "bleep": { + Type: cty.String, + Optional: true, + }, + }, + }, + Optional: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("beep"), + "baz": cty.StringVal("boop"), + }), + "bloop": cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("glub"), + "bleep": cty.NullVal(cty.String), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.NullVal(cty.Object(map[string]cty.Type{ + "bar": cty.String, + "baz": cty.String, + })), + "bloop": cty.NullVal(cty.Object(map[string]cty.Type{ + "blop": cty.String, + "bleep": cty.String, + })), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.NullVal(cty.Object(map[string]cty.Type{ + "bar": cty.String, + "baz": cty.String, + })), + "bloop": cty.NullVal(cty.Object(map[string]cty.Type{ + "blop": cty.String, + "bleep": cty.String, + })), + }), + }, + + "prior optional computed nested single to null": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "bloop": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingSingle, + Attributes: map[string]*configschema.Attribute{ + "blop": { + Type: cty.String, + Required: true, + }, + "bleep": { + Type: cty.String, + Optional: true, + }, + }, + }, + Optional: true, + Computed: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "bloop": cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("glub"), + "bleep": cty.NullVal(cty.String), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "bloop": cty.NullVal(cty.Object(map[string]cty.Type{ + "blop": cty.String, + "bleep": cty.String, + })), + }), + cty.ObjectVal(map[string]cty.Value{ + "bloop": cty.NullVal(cty.Object(map[string]cty.Type{ + "blop": cty.String, + "bleep": cty.String, + })), + }), + }, + + "prior nested list": { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "foo": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "bar": { + Type: cty.String, + Optional: true, + Computed: true, + }, + "baz": { + Type: cty.String, + Optional: true, + Computed: true, + }, + }, + }, + }, + }, + Attributes: map[string]*configschema.Attribute{ + "bloop": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingList, + Attributes: map[string]*configschema.Attribute{ + "blop": { + Type: cty.String, + Required: true, + }, + }, + }, + Optional: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("beep"), + "baz": cty.StringVal("boop"), + }), + }), + "bloop": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("bar"), + }), + cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("baz"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("bap"), + "baz": cty.NullVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("blep"), + "baz": cty.NullVal(cty.String), + }), + }), + "bloop": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("bar"), + }), + cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("baz"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("bap"), + "baz": cty.StringVal("boop"), + }), + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("blep"), + "baz": cty.NullVal(cty.String), + }), + }), + "bloop": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("bar"), + }), + cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("baz"), + }), + }), + }), + }, + "prior nested list with dynamic": { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "foo": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "bar": { + Type: cty.String, + Optional: true, + Computed: true, + }, + "baz": { + Type: cty.DynamicPseudoType, + Optional: true, + Computed: true, + }, + }, + }, + }, + }, + Attributes: map[string]*configschema.Attribute{ + "bloop": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingList, + Attributes: map[string]*configschema.Attribute{ + "blop": { + Type: cty.DynamicPseudoType, + Required: true, + }, + "blub": { + Type: cty.DynamicPseudoType, + Optional: true, + }, + }, + }, + Optional: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.TupleVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("beep"), + "baz": cty.StringVal("boop"), + }), + }), + "bloop": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("bar"), + "blub": cty.StringVal("glub"), + }), + cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("baz"), + "blub": cty.NullVal(cty.String), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.TupleVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("bap"), + "baz": cty.NullVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("blep"), + "baz": cty.NullVal(cty.String), + }), + }), + "bloop": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("bar"), + "blub": cty.NullVal(cty.String), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.TupleVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("bap"), + "baz": cty.StringVal("boop"), + }), + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("blep"), + "baz": cty.NullVal(cty.String), + }), + }), + "bloop": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("bar"), + "blub": cty.NullVal(cty.String), + }), + }), + }), + }, + "prior nested map": { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "foo": { + Nesting: configschema.NestingMap, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "bar": { + Type: cty.String, + Optional: true, + Computed: true, + }, + "baz": { + Type: cty.String, + Optional: true, + Computed: true, + }, + }, + }, + }, + }, + Attributes: map[string]*configschema.Attribute{ + "bloop": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingMap, + Attributes: map[string]*configschema.Attribute{ + "blop": { + Type: cty.String, + Required: true, + }, + }, + }, + Optional: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("beep"), + "baz": cty.StringVal("boop"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("blep"), + "baz": cty.StringVal("boot"), + }), + }), + "bloop": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("glub"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("blub"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("bap"), + "baz": cty.NullVal(cty.String), + }), + "c": cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("bosh"), + "baz": cty.NullVal(cty.String), + }), + }), + "bloop": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("glub"), + }), + "c": cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("blub"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("bap"), + "baz": cty.StringVal("boop"), + }), + "c": cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("bosh"), + "baz": cty.NullVal(cty.String), + }), + }), + "bloop": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("glub"), + }), + "c": cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("blub"), + }), + }), + }), + }, + + "prior optional computed nested map elem to null": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "bloop": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingMap, + Attributes: map[string]*configschema.Attribute{ + "blop": { + Type: cty.String, + Optional: true, + }, + "bleep": { + Type: cty.String, + Optional: true, + Computed: true, + }, + }, + }, + Optional: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "bloop": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("glub"), + "bleep": cty.StringVal("computed"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("blub"), + "bleep": cty.StringVal("computed"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "bloop": cty.MapVal(map[string]cty.Value{ + "a": cty.NullVal(cty.Object(map[string]cty.Type{ + "blop": cty.String, + "bleep": cty.String, + })), + "c": cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("blub"), + "bleep": cty.NullVal(cty.String), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "bloop": cty.MapVal(map[string]cty.Value{ + "a": cty.NullVal(cty.Object(map[string]cty.Type{ + "blop": cty.String, + "bleep": cty.String, + })), + "c": cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("blub"), + "bleep": cty.NullVal(cty.String), + }), + }), + }), + }, + + "prior optional computed nested map to null": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "bloop": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingMap, + Attributes: map[string]*configschema.Attribute{ + "blop": { + Type: cty.String, + Optional: true, + }, + "bleep": { + Type: cty.String, + Optional: true, + Computed: true, + }, + }, + }, + Optional: true, + Computed: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "bloop": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("glub"), + "bleep": cty.StringVal("computed"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("blub"), + "bleep": cty.StringVal("computed"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "bloop": cty.NullVal(cty.Map( + cty.Object(map[string]cty.Type{ + "blop": cty.String, + "bleep": cty.String, + }), + )), + }), + cty.ObjectVal(map[string]cty.Value{ + "bloop": cty.NullVal(cty.Map( + cty.Object(map[string]cty.Type{ + "blop": cty.String, + "bleep": cty.String, + }), + )), + }), + }, + + "prior nested map with dynamic": { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "foo": { + Nesting: configschema.NestingMap, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "bar": { + Type: cty.String, + Optional: true, + Computed: true, + }, + "baz": { + Type: cty.DynamicPseudoType, + Optional: true, + Computed: true, + }, + }, + }, + }, + }, + Attributes: map[string]*configschema.Attribute{ + "bloop": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingMap, + Attributes: map[string]*configschema.Attribute{ + "blop": { + Type: cty.DynamicPseudoType, + Required: true, + }, + }, + }, + Optional: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ObjectVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("beep"), + "baz": cty.StringVal("boop"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("blep"), + "baz": cty.ListVal([]cty.Value{cty.StringVal("boot")}), + }), + }), + "bloop": cty.ObjectVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("glub"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "blop": cty.NumberIntVal(13), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ObjectVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("bap"), + "baz": cty.NullVal(cty.String), + }), + "c": cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("bosh"), + "baz": cty.NullVal(cty.List(cty.String)), + }), + }), + "bloop": cty.ObjectVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("blep"), + }), + "c": cty.ObjectVal(map[string]cty.Value{ + "blop": cty.NumberIntVal(13), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ObjectVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("bap"), + "baz": cty.StringVal("boop"), + }), + "c": cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("bosh"), + "baz": cty.NullVal(cty.List(cty.String)), + }), + }), + "bloop": cty.ObjectVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("blep"), + }), + "c": cty.ObjectVal(map[string]cty.Value{ + "blop": cty.NumberIntVal(13), + }), + }), + }), + }, + "prior nested set": { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "foo": { + Nesting: configschema.NestingSet, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "bar": { + // This non-computed attribute will serve + // as our matching key for propagating + // "baz" from elements in the prior value. + Type: cty.String, + Optional: true, + }, + "baz": { + Type: cty.String, + Optional: true, + Computed: true, + }, + }, + }, + }, + }, + Attributes: map[string]*configschema.Attribute{ + "bloop": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingSet, + Attributes: map[string]*configschema.Attribute{ + "blop": { + Type: cty.String, + Required: true, + }, + "bleep": { + Type: cty.String, + Optional: true, + }, + }, + }, + Optional: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("beep"), + "baz": cty.StringVal("boop"), + }), + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("blep"), + "baz": cty.StringVal("boot"), + }), + }), + "bloop": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("glubglub"), + "bleep": cty.NullVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("glubglub"), + "bleep": cty.StringVal("beep"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("beep"), + "baz": cty.NullVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("bosh"), + "baz": cty.NullVal(cty.String), + }), + }), + "bloop": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("glubglub"), + "bleep": cty.NullVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("glub"), + "bleep": cty.NullVal(cty.String), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("beep"), + "baz": cty.StringVal("boop"), + }), + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("bosh"), + "baz": cty.NullVal(cty.String), + }), + }), + "bloop": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("glubglub"), + "bleep": cty.NullVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("glub"), + "bleep": cty.NullVal(cty.String), + }), + }), + }), + }, + + "set with partial optional computed change": { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "multi": { + Nesting: configschema.NestingSet, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "opt": { + Type: cty.String, + Optional: true, + }, + "cmp": { + Type: cty.String, + Optional: true, + Computed: true, + }, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "multi": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "opt": cty.StringVal("one"), + "cmp": cty.StringVal("OK"), + }), + cty.ObjectVal(map[string]cty.Value{ + "opt": cty.StringVal("two"), + "cmp": cty.StringVal("OK"), + }), + }), + }), + + cty.ObjectVal(map[string]cty.Value{ + "multi": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "opt": cty.StringVal("one"), + "cmp": cty.NullVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "opt": cty.StringVal("replaced"), + "cmp": cty.NullVal(cty.String), + }), + }), + }), + // "one" can be correlated because it is a non-computed value in + // the configuration. + cty.ObjectVal(map[string]cty.Value{ + "multi": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "opt": cty.StringVal("one"), + "cmp": cty.StringVal("OK"), + }), + cty.ObjectVal(map[string]cty.Value{ + "opt": cty.StringVal("replaced"), + "cmp": cty.NullVal(cty.String), + }), + }), + }), + }, + + "set without partial optional computed change": { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "multi": { + Nesting: configschema.NestingSet, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "opt": { + Type: cty.String, + Optional: true, + Computed: true, + }, + "req": { + Type: cty.String, + Required: true, + }, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "multi": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "opt": cty.StringVal("one"), + "req": cty.StringVal("one"), + }), + cty.ObjectVal(map[string]cty.Value{ + "opt": cty.StringVal("two"), + "req": cty.StringVal("two"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "multi": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "opt": cty.NullVal(cty.String), + "req": cty.StringVal("one"), + }), + cty.ObjectVal(map[string]cty.Value{ + "opt": cty.NullVal(cty.String), + "req": cty.StringVal("two"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "multi": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "opt": cty.StringVal("one"), + "req": cty.StringVal("one"), + }), + cty.ObjectVal(map[string]cty.Value{ + "opt": cty.StringVal("two"), + "req": cty.StringVal("two"), + }), + }), + }), + }, + + "sets differing only by unknown": { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "multi": { + Nesting: configschema.NestingSet, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "optional": { + Type: cty.String, + Optional: true, + Computed: true, + }, + }, + }, + }, + }, + Attributes: map[string]*configschema.Attribute{ + "bloop": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingSet, + Attributes: map[string]*configschema.Attribute{ + "blop": { + Type: cty.String, + Required: true, + }, + }, + }, + Optional: true, + }, + }, + }, + cty.NullVal(cty.DynamicPseudoType), + cty.ObjectVal(map[string]cty.Value{ + "multi": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "optional": cty.UnknownVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "optional": cty.UnknownVal(cty.String), + }), + }), + "bloop": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "blop": cty.UnknownVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "blop": cty.UnknownVal(cty.String), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "multi": cty.SetVal([]cty.Value{ + // These remain distinct because unknown values never + // compare equal. They may be consolidated together once + // the values become known, though. + cty.ObjectVal(map[string]cty.Value{ + "optional": cty.UnknownVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "optional": cty.UnknownVal(cty.String), + }), + }), + "bloop": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "blop": cty.UnknownVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "blop": cty.UnknownVal(cty.String), + }), + }), + }), + }, + "nested list in set": { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "foo": { + Nesting: configschema.NestingSet, + Block: configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "bar": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "baz": { + Type: cty.String, + }, + "qux": { + Type: cty.String, + Computed: true, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "baz": cty.StringVal("beep"), + "qux": cty.StringVal("boop"), + }), + }), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "baz": cty.StringVal("beep"), + "qux": cty.NullVal(cty.String), + }), + }), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "baz": cty.StringVal("beep"), + "qux": cty.StringVal("boop"), + }), + }), + }), + }), + }), + }, + "empty nested list in set": { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "foo": { + Nesting: configschema.NestingSet, + Block: configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "bar": { + Nesting: configschema.NestingList, + Block: configschema.Block{}, + }, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.ListValEmpty((&configschema.Block{}).ImpliedType()), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.ListValEmpty((&configschema.Block{}).ImpliedType()), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.ListValEmpty((&configschema.Block{}).ImpliedType()), + }), + }), + }), + }, + "nested list with dynamic in set": { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "foo": { + Nesting: configschema.NestingSet, + Block: configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "bar": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "baz": { + Type: cty.DynamicPseudoType, + }, + }, + }, + }, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.TupleVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "baz": cty.StringVal("true"), + }), + cty.ObjectVal(map[string]cty.Value{ + "baz": cty.ListVal([]cty.Value{cty.StringVal("true")}), + }), + }), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.TupleVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "baz": cty.StringVal("true"), + }), + cty.ObjectVal(map[string]cty.Value{ + "baz": cty.ListVal([]cty.Value{cty.StringVal("true")}), + }), + }), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.TupleVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "baz": cty.StringVal("true"), + }), + cty.ObjectVal(map[string]cty.Value{ + "baz": cty.ListVal([]cty.Value{cty.StringVal("true")}), + }), + }), + }), + }), + }), + }, + "nested map with dynamic in set": { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "foo": { + Nesting: configschema.NestingSet, + Block: configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "bar": { + Nesting: configschema.NestingMap, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "baz": { + Type: cty.DynamicPseudoType, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.ObjectVal(map[string]cty.Value{ + "bing": cty.ObjectVal(map[string]cty.Value{ + "baz": cty.StringVal("true"), + }), + "bang": cty.ObjectVal(map[string]cty.Value{ + "baz": cty.ListVal([]cty.Value{cty.StringVal("true")}), + }), + }), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.ObjectVal(map[string]cty.Value{ + "bing": cty.ObjectVal(map[string]cty.Value{ + "baz": cty.ListVal([]cty.Value{cty.StringVal("true")}), + }), + }), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.ObjectVal(map[string]cty.Value{ + "bing": cty.ObjectVal(map[string]cty.Value{ + "baz": cty.ListVal([]cty.Value{cty.StringVal("true")}), + }), + }), + }), + }), + }), + }, + "empty nested map in set": { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "foo": { + Nesting: configschema.NestingSet, + Block: configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "bar": { + Nesting: configschema.NestingMap, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "baz": { + Type: cty.String, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.MapValEmpty(cty.Object(map[string]cty.Type{ + "baz": cty.String, + })), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.MapVal(map[string]cty.Value{ + "bing": cty.ObjectVal(map[string]cty.Value{ + "baz": cty.StringVal("true"), + }), + }), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.MapVal(map[string]cty.Value{ + "bing": cty.ObjectVal(map[string]cty.Value{ + "baz": cty.StringVal("true"), + }), + }), + }), + }), + }), + }, + // This example has a mixture of optional, computed and required in a deeply-nested NestedType attribute + "deeply NestedType": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingSingle, + Attributes: map[string]*configschema.Attribute{ + "bar": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingSingle, + Attributes: testAttributes, + }, + Required: true, + }, + "baz": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingSingle, + Attributes: testAttributes, + }, + Optional: true, + }, + }, + }, + Optional: true, + }, + }, + }, + // prior + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ObjectVal(map[string]cty.Value{ + "bar": cty.NullVal(cty.DynamicPseudoType), + "baz": cty.ObjectVal(map[string]cty.Value{ + "optional": cty.NullVal(cty.String), + "computed": cty.StringVal("hello"), + "optional_computed": cty.StringVal("prior"), + "required": cty.StringVal("present"), + }), + }), + }), + // config + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ObjectVal(map[string]cty.Value{ + "bar": cty.UnknownVal(cty.Object(map[string]cty.Type{ // explicit unknown from the config + "optional": cty.String, + "computed": cty.String, + "optional_computed": cty.String, + "required": cty.String, + })), + "baz": cty.ObjectVal(map[string]cty.Value{ + "optional": cty.NullVal(cty.String), + "computed": cty.NullVal(cty.String), + "optional_computed": cty.StringVal("hello"), + "required": cty.StringVal("present"), + }), + }), + }), + // want + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ObjectVal(map[string]cty.Value{ + "bar": cty.UnknownVal(cty.Object(map[string]cty.Type{ // explicit unknown preserved from the config + "optional": cty.String, + "computed": cty.String, + "optional_computed": cty.String, + "required": cty.String, + })), + "baz": cty.ObjectVal(map[string]cty.Value{ + "optional": cty.NullVal(cty.String), // config is null + "computed": cty.StringVal("hello"), // computed values come from prior + "optional_computed": cty.StringVal("hello"), // config takes precedent over prior in opt+computed + "required": cty.StringVal("present"), // value from config + }), + }), + }), + }, + "deeply nested set": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingSet, + Attributes: map[string]*configschema.Attribute{ + "bar": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingSet, + Attributes: testAttributes, + }, + Required: true, + }, + }, + }, + Optional: true, + }, + }, + }, + // prior values + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "optional": cty.StringVal("prior"), + "computed": cty.StringVal("prior"), + "optional_computed": cty.StringVal("prior"), + "required": cty.StringVal("prior"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "optional": cty.StringVal("other_prior"), + "computed": cty.StringVal("other_prior"), + "optional_computed": cty.StringVal("other_prior"), + "required": cty.StringVal("other_prior"), + }), + }), + }), + }), + }), + // config differs from prior + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.SetVal([]cty.Value{cty.ObjectVal(map[string]cty.Value{ + "optional": cty.StringVal("configured"), + "computed": cty.NullVal(cty.String), // computed attrs are null in config + "optional_computed": cty.StringVal("configured"), + "required": cty.StringVal("configured"), + })}), + }), + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.SetVal([]cty.Value{cty.ObjectVal(map[string]cty.Value{ + "optional": cty.NullVal(cty.String), // explicit null in config + "computed": cty.NullVal(cty.String), // computed attrs are null in config + "optional_computed": cty.StringVal("other_configured"), + "required": cty.StringVal("other_configured"), + })}), + }), + }), + }), + // want: + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.SetVal([]cty.Value{cty.ObjectVal(map[string]cty.Value{ + "optional": cty.StringVal("configured"), + "computed": cty.NullVal(cty.String), + "optional_computed": cty.StringVal("configured"), + "required": cty.StringVal("configured"), + })}), + }), + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.SetVal([]cty.Value{cty.ObjectVal(map[string]cty.Value{ + "optional": cty.NullVal(cty.String), // explicit null in config is preserved + "computed": cty.NullVal(cty.String), + "optional_computed": cty.StringVal("other_configured"), + "required": cty.StringVal("other_configured"), + })}), + }), + }), + }), + }, + "expected null NestedTypes": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "single": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingSingle, + Attributes: map[string]*configschema.Attribute{ + "bar": {Type: cty.String}, + }, + }, + Optional: true, + }, + "list": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingList, + Attributes: map[string]*configschema.Attribute{ + "bar": {Type: cty.String}, + }, + }, + Optional: true, + }, + "set": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingSet, + Attributes: map[string]*configschema.Attribute{ + "bar": {Type: cty.String}, + }, + }, + Optional: true, + }, + "map": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingMap, + Attributes: map[string]*configschema.Attribute{ + "bar": {Type: cty.String}, + }, + }, + Optional: true, + }, + "nested_map": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingMap, + Attributes: map[string]*configschema.Attribute{ + "inner": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingSingle, + Attributes: testAttributes, + }, + }, + }, + }, + Optional: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "single": cty.ObjectVal(map[string]cty.Value{"bar": cty.StringVal("baz")}), + "list": cty.ListVal([]cty.Value{cty.ObjectVal(map[string]cty.Value{"bar": cty.StringVal("baz")})}), + "map": cty.MapVal(map[string]cty.Value{ + "map_entry": cty.ObjectVal(map[string]cty.Value{"bar": cty.StringVal("baz")}), + }), + "set": cty.SetVal([]cty.Value{cty.ObjectVal(map[string]cty.Value{"bar": cty.StringVal("baz")})}), + "nested_map": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "inner": cty.ObjectVal(map[string]cty.Value{ + "optional": cty.StringVal("foo"), + "computed": cty.StringVal("foo"), + "optional_computed": cty.StringVal("foo"), + "required": cty.StringVal("foo"), + }), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "single": cty.NullVal(cty.Object(map[string]cty.Type{"bar": cty.String})), + "list": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{"bar": cty.String}))), + "map": cty.NullVal(cty.Map(cty.Object(map[string]cty.Type{"bar": cty.String}))), + "set": cty.NullVal(cty.Set(cty.Object(map[string]cty.Type{"bar": cty.String}))), + "nested_map": cty.NullVal(cty.Map(cty.Object(map[string]cty.Type{ + "inner": cty.Object(map[string]cty.Type{ + "optional": cty.String, + "computed": cty.String, + "optional_computed": cty.String, + "required": cty.String, + }), + }))), + }), + cty.ObjectVal(map[string]cty.Value{ + "single": cty.NullVal(cty.Object(map[string]cty.Type{"bar": cty.String})), + "list": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{"bar": cty.String}))), + "map": cty.NullVal(cty.Map(cty.Object(map[string]cty.Type{"bar": cty.String}))), + "set": cty.NullVal(cty.Set(cty.Object(map[string]cty.Type{"bar": cty.String}))), + "nested_map": cty.NullVal(cty.Map(cty.Object(map[string]cty.Type{ + "inner": cty.Object(map[string]cty.Type{ + "optional": cty.String, + "computed": cty.String, + "optional_computed": cty.String, + "required": cty.String, + }), + }))), + }), + }, + "expected empty NestedTypes": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "set": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingSet, + Attributes: map[string]*configschema.Attribute{ + "bar": {Type: cty.String}, + }, + }, + Optional: true, + }, + "map": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingMap, + Attributes: map[string]*configschema.Attribute{ + "bar": {Type: cty.String}, + }, + }, + Optional: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "map": cty.MapValEmpty(cty.Object(map[string]cty.Type{"bar": cty.String})), + "set": cty.SetValEmpty(cty.Object(map[string]cty.Type{"bar": cty.String})), + }), + cty.ObjectVal(map[string]cty.Value{ + "map": cty.MapValEmpty(cty.Object(map[string]cty.Type{"bar": cty.String})), + "set": cty.SetValEmpty(cty.Object(map[string]cty.Type{"bar": cty.String})), + }), + cty.ObjectVal(map[string]cty.Value{ + "map": cty.MapValEmpty(cty.Object(map[string]cty.Type{"bar": cty.String})), + "set": cty.SetValEmpty(cty.Object(map[string]cty.Type{"bar": cty.String})), + }), + }, + "optional types set replacement": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "set": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingSet, + Attributes: map[string]*configschema.Attribute{ + "bar": { + Type: cty.String, + Required: true, + }, + }, + }, + Optional: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "set": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("old"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "set": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("new"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "set": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.StringVal("new"), + }), + }), + }), + }, + "prior null nested objects": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "single": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingSingle, + Attributes: map[string]*configschema.Attribute{ + "list": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingList, + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.String, + }, + }, + }, + Optional: true, + }, + }, + }, + Optional: true, + }, + "map": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingMap, + Attributes: map[string]*configschema.Attribute{ + "map": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingList, + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.String, + }, + }, + }, + Optional: true, + }, + }, + }, + Optional: true, + }, + }, + }, + cty.NullVal(cty.Object(map[string]cty.Type{ + "single": cty.Object(map[string]cty.Type{ + "list": cty.List(cty.Object(map[string]cty.Type{ + "foo": cty.String, + })), + }), + "map": cty.Map(cty.Object(map[string]cty.Type{ + "list": cty.List(cty.Object(map[string]cty.Type{ + "foo": cty.String, + })), + })), + })), + cty.ObjectVal(map[string]cty.Value{ + "single": cty.ObjectVal(map[string]cty.Value{ + "list": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("a"), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("b"), + }), + }), + }), + "map": cty.MapVal(map[string]cty.Value{ + "one": cty.ObjectVal(map[string]cty.Value{ + "list": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("a"), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("b"), + }), + }), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "single": cty.ObjectVal(map[string]cty.Value{ + "list": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("a"), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("b"), + }), + }), + }), + "map": cty.MapVal(map[string]cty.Value{ + "one": cty.ObjectVal(map[string]cty.Value{ + "list": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("a"), + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("b"), + }), + }), + }), + }), + }), + }, + + // Data sources are planned with an unknown value. + // Note that this plan fails AssertPlanValid, because for managed + // resources an instance would never be completely unknown. + "unknown prior nested objects": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "list": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingList, + Attributes: map[string]*configschema.Attribute{ + "list": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingList, + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.String, + }, + }, + }, + Computed: true, + }, + }, + }, + Computed: true, + }, + }, + }, + cty.UnknownVal(cty.Object(map[string]cty.Type{ + "list": cty.List(cty.Object(map[string]cty.Type{ + "list": cty.List(cty.Object(map[string]cty.Type{ + "foo": cty.String, + })), + })), + })), + cty.NullVal(cty.Object(map[string]cty.Type{ + "list": cty.List(cty.Object(map[string]cty.Type{ + "list": cty.List(cty.Object(map[string]cty.Type{ + "foo": cty.String, + })), + })), + })), + cty.UnknownVal(cty.Object(map[string]cty.Type{ + "list": cty.List(cty.Object(map[string]cty.Type{ + "list": cty.List(cty.Object(map[string]cty.Type{ + "foo": cty.String, + })), + })), + })), + }, + + // A nested object with computed attributes, which is contained in an + // optional+computed container. The nested computed values should be + // represented in the proposed new object. + "config within optional+computed": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "list_obj": { + Optional: true, + Computed: true, + NestedType: &configschema.Object{ + Nesting: configschema.NestingList, + Attributes: map[string]*configschema.Attribute{ + "obj": { + Optional: true, + NestedType: &configschema.Object{ + Nesting: configschema.NestingSingle, + Attributes: map[string]*configschema.Attribute{ + "optional": {Type: cty.String, Optional: true}, + "computed": {Type: cty.String, Computed: true}, + }, + }, + }, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "list_obj": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "obj": cty.ObjectVal(map[string]cty.Value{ + "optional": cty.StringVal("prior"), + "computed": cty.StringVal("prior computed"), + }), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "list_obj": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "obj": cty.ObjectVal(map[string]cty.Value{ + "optional": cty.StringVal("prior"), + "computed": cty.NullVal(cty.String), + }), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "list_obj": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "obj": cty.ObjectVal(map[string]cty.Value{ + "optional": cty.StringVal("prior"), + "computed": cty.StringVal("prior computed"), + }), + }), + }), + }), + }, + + // A nested object with computed attributes, which is contained in an + // optional+computed container. The prior nested object contains values + // which could not be computed, therefor the proposed new value must be + // the null value from the configuration. + "computed within optional+computed": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "list_obj": { + Optional: true, + Computed: true, + NestedType: &configschema.Object{ + Nesting: configschema.NestingList, + Attributes: map[string]*configschema.Attribute{ + "obj": { + Optional: true, + NestedType: &configschema.Object{ + Nesting: configschema.NestingSingle, + Attributes: map[string]*configschema.Attribute{ + "optional": {Type: cty.String, Optional: true}, + "computed": {Type: cty.String, Computed: true}, + }, + }, + }, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "list_obj": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "obj": cty.ObjectVal(map[string]cty.Value{ + "optional": cty.StringVal("prior"), + "computed": cty.StringVal("prior computed"), + }), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "list_obj": cty.NullVal(cty.List( + cty.Object(map[string]cty.Type{ + "obj": cty.Object(map[string]cty.Type{ + "optional": cty.String, + "computed": cty.String, + }), + }), + )), + }), + cty.ObjectVal(map[string]cty.Value{ + "list_obj": cty.NullVal(cty.List( + cty.Object(map[string]cty.Type{ + "obj": cty.Object(map[string]cty.Type{ + "optional": cty.String, + "computed": cty.String, + }), + }), + )), + }), + }, + + // A nested object with computed attributes, which is contained in an + // optional+computed set. The nested computed values should be + // represented in the proposed new object, and correlated with state + // via the non-computed attributes. + "config add within optional+computed set": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "set_obj": { + Optional: true, + Computed: true, + NestedType: &configschema.Object{ + Nesting: configschema.NestingSet, + Attributes: map[string]*configschema.Attribute{ + "obj": { + Optional: true, + NestedType: &configschema.Object{ + Nesting: configschema.NestingSingle, + Attributes: map[string]*configschema.Attribute{ + "optional": {Type: cty.String, Optional: true}, + "computed": {Type: cty.String, Computed: true}, + }, + }, + }, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "set_obj": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "obj": cty.ObjectVal(map[string]cty.Value{ + "optional": cty.StringVal("first"), + "computed": cty.StringVal("first computed"), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "obj": cty.ObjectVal(map[string]cty.Value{ + "optional": cty.StringVal("second"), + "computed": cty.StringVal("second computed"), + }), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "set_obj": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "obj": cty.ObjectVal(map[string]cty.Value{ + "optional": cty.StringVal("first"), + "computed": cty.NullVal(cty.String), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "obj": cty.ObjectVal(map[string]cty.Value{ + "optional": cty.StringVal("second"), + "computed": cty.NullVal(cty.String), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "obj": cty.ObjectVal(map[string]cty.Value{ + "optional": cty.StringVal("third"), + "computed": cty.NullVal(cty.String), + }), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "set_obj": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "obj": cty.ObjectVal(map[string]cty.Value{ + "optional": cty.StringVal("first"), + "computed": cty.StringVal("first computed"), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "obj": cty.ObjectVal(map[string]cty.Value{ + "optional": cty.StringVal("second"), + "computed": cty.StringVal("second computed"), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "obj": cty.ObjectVal(map[string]cty.Value{ + "optional": cty.StringVal("third"), + "computed": cty.NullVal(cty.String), + }), + }), + }), + }), + }, + + // A nested object with computed attributes, which is contained in a + // set. The nested computed values should be represented in the + // proposed new object, and correlated with state via the non-computed + // attributes. + "config add within set block": { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "set_obj": { + Nesting: configschema.NestingSet, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "obj": { + Optional: true, + NestedType: &configschema.Object{ + Nesting: configschema.NestingSingle, + Attributes: map[string]*configschema.Attribute{ + "optional": {Type: cty.String, Optional: true}, + "computed": {Type: cty.String, Optional: true, Computed: true}, + }, + }, + }, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "set_obj": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "obj": cty.ObjectVal(map[string]cty.Value{ + "optional": cty.StringVal("first"), + "computed": cty.StringVal("first computed"), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "obj": cty.ObjectVal(map[string]cty.Value{ + "optional": cty.StringVal("second"), + "computed": cty.StringVal("second from config"), + }), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "set_obj": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "obj": cty.ObjectVal(map[string]cty.Value{ + "optional": cty.StringVal("first"), + "computed": cty.NullVal(cty.String), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "obj": cty.ObjectVal(map[string]cty.Value{ + "optional": cty.StringVal("second"), + "computed": cty.StringVal("second from config"), + }), + }), + // new "third" value added + cty.ObjectVal(map[string]cty.Value{ + "obj": cty.ObjectVal(map[string]cty.Value{ + "optional": cty.StringVal("third"), + "computed": cty.NullVal(cty.String), + }), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "set_obj": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "obj": cty.ObjectVal(map[string]cty.Value{ + "optional": cty.StringVal("first"), + "computed": cty.StringVal("first computed"), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "obj": cty.ObjectVal(map[string]cty.Value{ + "optional": cty.StringVal("second"), + "computed": cty.StringVal("second from config"), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "obj": cty.ObjectVal(map[string]cty.Value{ + "optional": cty.StringVal("third"), + "computed": cty.NullVal(cty.String), + }), + }), + }), + }), + }, + + // A nested object with computed attributes, which is contained in a + // set. The nested computed values should be represented in the + // proposed new object, and correlated with state via the non-computed + // attributes. + "config change within set block": { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "set_obj": { + Nesting: configschema.NestingSet, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "obj": { + Optional: true, + NestedType: &configschema.Object{ + Nesting: configschema.NestingSingle, + Attributes: map[string]*configschema.Attribute{ + "optional": {Type: cty.String, Optional: true}, + "computed": {Type: cty.String, Optional: true, Computed: true}, + }, + }, + }, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "set_obj": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "obj": cty.ObjectVal(map[string]cty.Value{ + "optional": cty.StringVal("first"), + "computed": cty.StringVal("first computed"), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "obj": cty.ObjectVal(map[string]cty.Value{ + "optional": cty.StringVal("second"), + "computed": cty.StringVal("second computed"), + }), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "set_obj": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "obj": cty.ObjectVal(map[string]cty.Value{ + "optional": cty.StringVal("first"), + "computed": cty.NullVal(cty.String), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "obj": cty.ObjectVal(map[string]cty.Value{ + "optional": cty.StringVal("changed"), + "computed": cty.NullVal(cty.String), + }), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "set_obj": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "obj": cty.ObjectVal(map[string]cty.Value{ + "optional": cty.StringVal("first"), + "computed": cty.StringVal("first computed"), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "obj": cty.ObjectVal(map[string]cty.Value{ + "optional": cty.StringVal("changed"), + "computed": cty.NullVal(cty.String), + }), + }), + }), + }), + }, + + "set attr with partial optional computed change": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "multi": { + Optional: true, + NestedType: &configschema.Object{ + Nesting: configschema.NestingSet, + Attributes: map[string]*configschema.Attribute{ + "opt": { + Type: cty.String, + Optional: true, + }, + "oc": { + Type: cty.String, + Optional: true, + Computed: true, + }, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "multi": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "opt": cty.StringVal("one"), + "oc": cty.StringVal("OK"), + }), + cty.ObjectVal(map[string]cty.Value{ + "opt": cty.StringVal("two"), + "oc": cty.StringVal("OK"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "multi": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "opt": cty.StringVal("one"), + "oc": cty.NullVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "opt": cty.StringVal("replaced"), + "oc": cty.NullVal(cty.String), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "multi": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "opt": cty.StringVal("one"), + "oc": cty.StringVal("OK"), + }), + cty.ObjectVal(map[string]cty.Value{ + "opt": cty.StringVal("replaced"), + "oc": cty.NullVal(cty.String), + }), + }), + }), + }, + + "set attr without optional computed change": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "multi": { + Optional: true, + NestedType: &configschema.Object{ + Nesting: configschema.NestingSet, + Attributes: map[string]*configschema.Attribute{ + "opt": { + Type: cty.String, + Optional: true, + }, + "oc": { + Type: cty.String, + Optional: true, + Computed: true, + }, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "multi": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "opt": cty.StringVal("one"), + "oc": cty.StringVal("OK"), + }), + cty.ObjectVal(map[string]cty.Value{ + "opt": cty.StringVal("two"), + "oc": cty.StringVal("OK"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "multi": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "opt": cty.StringVal("one"), + "oc": cty.NullVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "opt": cty.StringVal("two"), + "oc": cty.NullVal(cty.String), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "multi": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "opt": cty.StringVal("one"), + "oc": cty.StringVal("OK"), + }), + cty.ObjectVal(map[string]cty.Value{ + "opt": cty.StringVal("two"), + "oc": cty.StringVal("OK"), + }), + }), + }), + }, + + "set attr with all optional computed": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "multi": { + Optional: true, + NestedType: &configschema.Object{ + Nesting: configschema.NestingSet, + Attributes: map[string]*configschema.Attribute{ + "opt": { + Type: cty.String, + Optional: true, + Computed: true, + }, + "oc": { + Type: cty.String, + Optional: true, + Computed: true, + }, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "multi": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "opt": cty.StringVal("one"), + "oc": cty.StringVal("OK"), + }), + cty.ObjectVal(map[string]cty.Value{ + "opt": cty.StringVal("two"), + "oc": cty.StringVal("OK"), + }), + }), + }), + // Each of these values can be correlated by the existence of the + // optional config attribute. Because "one" and "two" are set in + // the config, they must exist in the state regardless of + // optional&computed. + cty.ObjectVal(map[string]cty.Value{ + "multi": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "opt": cty.StringVal("one"), + "oc": cty.NullVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "opt": cty.StringVal("two"), + "oc": cty.NullVal(cty.String), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "multi": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "opt": cty.StringVal("one"), + "oc": cty.StringVal("OK"), + }), + cty.ObjectVal(map[string]cty.Value{ + "opt": cty.StringVal("two"), + "oc": cty.StringVal("OK"), + }), + }), + }), + }, + + "set block with all optional computed and nested object types": { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "multi": { + Nesting: configschema.NestingSet, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "opt": { + Type: cty.String, + Optional: true, + Computed: true, + }, + "oc": { + Type: cty.String, + Optional: true, + Computed: true, + }, + "attr": { + Optional: true, + NestedType: &configschema.Object{ + Nesting: configschema.NestingSet, + Attributes: map[string]*configschema.Attribute{ + "opt": { + Type: cty.String, + Optional: true, + Computed: true, + }, + "oc": { + Type: cty.String, + Optional: true, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "multi": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "opt": cty.StringVal("one"), + "oc": cty.StringVal("OK"), + "attr": cty.SetVal([]cty.Value{cty.ObjectVal(map[string]cty.Value{ + "opt": cty.StringVal("one"), + "oc": cty.StringVal("OK"), + })}), + }), + cty.ObjectVal(map[string]cty.Value{ + "opt": cty.StringVal("two"), + "oc": cty.StringVal("OK"), + "attr": cty.SetVal([]cty.Value{cty.ObjectVal(map[string]cty.Value{ + "opt": cty.StringVal("two"), + "oc": cty.StringVal("OK"), + })}), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "multi": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "opt": cty.StringVal("one"), + "oc": cty.NullVal(cty.String), + "attr": cty.SetVal([]cty.Value{cty.ObjectVal(map[string]cty.Value{ + "opt": cty.StringVal("one"), + "oc": cty.StringVal("OK"), + })}), + }), + cty.ObjectVal(map[string]cty.Value{ + "opt": cty.StringVal("two"), + "oc": cty.StringVal("OK"), + "attr": cty.SetVal([]cty.Value{cty.ObjectVal(map[string]cty.Value{ + "opt": cty.StringVal("two"), + "oc": cty.NullVal(cty.String), + })}), + }), + cty.ObjectVal(map[string]cty.Value{ + "opt": cty.StringVal("three"), + "oc": cty.NullVal(cty.String), + "attr": cty.NullVal(cty.Set(cty.Object(map[string]cty.Type{ + "opt": cty.String, + "oc": cty.String, + }))), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "multi": cty.SetVal([]cty.Value{ + // We can correlate this with prior from the outer object + // attributes, and the equal nested set. + cty.ObjectVal(map[string]cty.Value{ + "opt": cty.StringVal("one"), + "oc": cty.StringVal("OK"), + "attr": cty.SetVal([]cty.Value{cty.ObjectVal(map[string]cty.Value{ + "opt": cty.StringVal("one"), + "oc": cty.StringVal("OK"), + })}), + }), + // This value is overridden by config, because we can't + // correlate optional+computed config values within nested + // sets. + cty.ObjectVal(map[string]cty.Value{ + "opt": cty.StringVal("two"), + "oc": cty.StringVal("OK"), + "attr": cty.SetVal([]cty.Value{cty.ObjectVal(map[string]cty.Value{ + "opt": cty.StringVal("two"), + "oc": cty.NullVal(cty.String), + })}), + }), + // This value was taken only from config + cty.ObjectVal(map[string]cty.Value{ + "opt": cty.StringVal("three"), + "oc": cty.NullVal(cty.String), + "attr": cty.NullVal(cty.Set(cty.Object(map[string]cty.Type{ + "opt": cty.String, + "oc": cty.String, + }))), + }), + }), + }), + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + got := ProposedNew(test.Schema, test.Prior, test.Config) + if !got.RawEquals(test.Want) { + t.Errorf("wrong result\ngot: %swant: %s", dump.Value(got), dump.Value(test.Want)) + } + }) + } +} + +var testAttributes = map[string]*configschema.Attribute{ + "optional": { + Type: cty.String, + Optional: true, + }, + "computed": { + Type: cty.String, + Computed: true, + }, + "optional_computed": { + Type: cty.String, + Computed: true, + Optional: true, + }, + "required": { + Type: cty.String, + Required: true, + }, +} diff --git a/pkg/plans/objchange/plan_valid.go b/pkg/plans/objchange/plan_valid.go new file mode 100644 index 00000000000..07faa8a7680 --- /dev/null +++ b/pkg/plans/objchange/plan_valid.go @@ -0,0 +1,485 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package objchange + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/configs/configschema" +) + +// AssertPlanValid checks checks whether a planned new state returned by a +// provider's PlanResourceChange method is suitable to achieve a change +// from priorState to config. It returns a slice with nonzero length if +// any problems are detected. Because problems here indicate bugs in the +// provider that generated the plannedState, they are written with provider +// developers as an audience, rather than end-users. +// +// All of the given values must have the same type and must conform to the +// implied type of the given schema, or this function may panic or produce +// garbage results. +// +// During planning, a provider may only make changes to attributes that are +// null (unset) in the configuration and are marked as "computed" in the +// resource type schema, in order to insert any default values the provider +// may know about. If the default value cannot be determined until apply time, +// the provider can return an unknown value. Providers are forbidden from +// planning a change that disagrees with any non-null argument in the +// configuration. +// +// As a special exception, providers _are_ allowed to provide attribute values +// conflicting with configuration if and only if the planned value exactly +// matches the corresponding attribute value in the prior state. The provider +// can use this to signal that the new value is functionally equivalent to +// the old and thus no change is required. +func AssertPlanValid(schema *configschema.Block, priorState, config, plannedState cty.Value) []error { + return assertPlanValid(schema, priorState, config, plannedState, nil) +} + +func assertPlanValid(schema *configschema.Block, priorState, config, plannedState cty.Value, path cty.Path) []error { + var errs []error + if plannedState.IsNull() && !config.IsNull() { + errs = append(errs, path.NewErrorf("planned for absence but config wants existence")) + return errs + } + if config.IsNull() && !plannedState.IsNull() { + errs = append(errs, path.NewErrorf("planned for existence but config wants absence")) + return errs + } + if plannedState.IsNull() { + // No further checks possible if the planned value is null + return errs + } + + impTy := schema.ImpliedType() + + // verify attributes + moreErrs := assertPlannedAttrsValid(schema.Attributes, priorState, config, plannedState, path) + errs = append(errs, moreErrs...) + + for name, blockS := range schema.BlockTypes { + path := append(path, cty.GetAttrStep{Name: name}) + plannedV := plannedState.GetAttr(name) + configV := config.GetAttr(name) + priorV := cty.NullVal(impTy.AttributeType(name)) + if !priorState.IsNull() { + priorV = priorState.GetAttr(name) + } + if plannedV.RawEquals(configV) { + // Easy path: nothing has changed at all + continue + } + + if !configV.IsKnown() { + // An unknown config block represents a dynamic block where the + // for_each value is unknown, and therefor cannot be altered by the + // provider. + errs = append(errs, path.NewErrorf("planned value %#v for unknown dynamic block", plannedV)) + continue + } + + if !plannedV.IsKnown() { + // Only dynamic configuration can set blocks to unknown, so this is + // not allowed from the provider. This means that either the config + // and plan should match, or we have an error where the plan + // changed the config value, both of which have been checked. + errs = append(errs, path.NewErrorf("attribute representing nested block must not be unknown itself; set nested attribute values to unknown instead")) + continue + } + + switch blockS.Nesting { + case configschema.NestingSingle, configschema.NestingGroup: + moreErrs := assertPlanValid(&blockS.Block, priorV, configV, plannedV, path) + errs = append(errs, moreErrs...) + case configschema.NestingList: + // A NestingList might either be a list or a tuple, depending on + // whether there are dynamically-typed attributes inside. However, + // both support a similar-enough API that we can treat them the + // same for our purposes here. + if plannedV.IsNull() { + errs = append(errs, path.NewErrorf("attribute representing a list of nested blocks must be empty to indicate no blocks, not null")) + continue + } + + if configV.IsNull() { + // Configuration cannot decode a block into a null value, but + // we could be dealing with a null returned by a legacy + // provider and inserted via ignore_changes. Fix the value in + // place so the length can still be compared. + configV = cty.ListValEmpty(configV.Type().ElementType()) + } + + plannedL := plannedV.LengthInt() + configL := configV.LengthInt() + if plannedL != configL { + errs = append(errs, path.NewErrorf("block count in plan (%d) disagrees with count in config (%d)", plannedL, configL)) + continue + } + + for it := plannedV.ElementIterator(); it.Next(); { + idx, plannedEV := it.Element() + path := append(path, cty.IndexStep{Key: idx}) + if !plannedEV.IsKnown() { + errs = append(errs, path.NewErrorf("element representing nested block must not be unknown itself; set nested attribute values to unknown instead")) + continue + } + if !configV.HasIndex(idx).True() { + continue // should never happen since we checked the lengths above + } + configEV := configV.Index(idx) + priorEV := cty.NullVal(blockS.ImpliedType()) + if !priorV.IsNull() && priorV.HasIndex(idx).True() { + priorEV = priorV.Index(idx) + } + + moreErrs := assertPlanValid(&blockS.Block, priorEV, configEV, plannedEV, path) + errs = append(errs, moreErrs...) + } + case configschema.NestingMap: + if plannedV.IsNull() { + errs = append(errs, path.NewErrorf("attribute representing a map of nested blocks must be empty to indicate no blocks, not null")) + continue + } + + // A NestingMap might either be a map or an object, depending on + // whether there are dynamically-typed attributes inside, but + // that's decided statically and so all values will have the same + // kind. + if plannedV.Type().IsObjectType() { + plannedAtys := plannedV.Type().AttributeTypes() + configAtys := configV.Type().AttributeTypes() + for k := range plannedAtys { + if _, ok := configAtys[k]; !ok { + errs = append(errs, path.NewErrorf("block key %q from plan is not present in config", k)) + continue + } + path := append(path, cty.GetAttrStep{Name: k}) + + plannedEV := plannedV.GetAttr(k) + if !plannedEV.IsKnown() { + errs = append(errs, path.NewErrorf("element representing nested block must not be unknown itself; set nested attribute values to unknown instead")) + continue + } + configEV := configV.GetAttr(k) + priorEV := cty.NullVal(blockS.ImpliedType()) + if !priorV.IsNull() && priorV.Type().HasAttribute(k) { + priorEV = priorV.GetAttr(k) + } + moreErrs := assertPlanValid(&blockS.Block, priorEV, configEV, plannedEV, path) + errs = append(errs, moreErrs...) + } + for k := range configAtys { + if _, ok := plannedAtys[k]; !ok { + errs = append(errs, path.NewErrorf("block key %q from config is not present in plan", k)) + continue + } + } + } else { + plannedL := plannedV.LengthInt() + configL := configV.LengthInt() + if plannedL != configL { + errs = append(errs, path.NewErrorf("block count in plan (%d) disagrees with count in config (%d)", plannedL, configL)) + continue + } + for it := plannedV.ElementIterator(); it.Next(); { + idx, plannedEV := it.Element() + path := append(path, cty.IndexStep{Key: idx}) + if !plannedEV.IsKnown() { + errs = append(errs, path.NewErrorf("element representing nested block must not be unknown itself; set nested attribute values to unknown instead")) + continue + } + k := idx.AsString() + if !configV.HasIndex(idx).True() { + errs = append(errs, path.NewErrorf("block key %q from plan is not present in config", k)) + continue + } + configEV := configV.Index(idx) + priorEV := cty.NullVal(blockS.ImpliedType()) + if !priorV.IsNull() && priorV.HasIndex(idx).True() { + priorEV = priorV.Index(idx) + } + moreErrs := assertPlanValid(&blockS.Block, priorEV, configEV, plannedEV, path) + errs = append(errs, moreErrs...) + } + for it := configV.ElementIterator(); it.Next(); { + idx, _ := it.Element() + if !plannedV.HasIndex(idx).True() { + errs = append(errs, path.NewErrorf("block key %q from config is not present in plan", idx.AsString())) + continue + } + } + } + case configschema.NestingSet: + if plannedV.IsNull() { + errs = append(errs, path.NewErrorf("attribute representing a set of nested blocks must be empty to indicate no blocks, not null")) + continue + } + + // Because set elements have no identifier with which to correlate + // them, we can't robustly validate the plan for a nested block + // backed by a set, and so unfortunately we need to just trust the + // provider to do the right thing. :( + // + // (In principle we could correlate elements by matching the + // subset of attributes explicitly set in config, except for the + // special diff suppression rule which allows for there to be a + // planned value that is constructed by mixing part of a prior + // value with part of a config value, creating an entirely new + // element that is not present in either prior nor config.) + for it := plannedV.ElementIterator(); it.Next(); { + idx, plannedEV := it.Element() + path := append(path, cty.IndexStep{Key: idx}) + if !plannedEV.IsKnown() { + errs = append(errs, path.NewErrorf("element representing nested block must not be unknown itself; set nested attribute values to unknown instead")) + continue + } + } + + default: + panic(fmt.Sprintf("unsupported nesting mode %s", blockS.Nesting)) + } + } + + return errs +} + +func assertPlannedAttrsValid(schema map[string]*configschema.Attribute, priorState, config, plannedState cty.Value, path cty.Path) []error { + var errs []error + for name, attrS := range schema { + moreErrs := assertPlannedAttrValid(name, attrS, priorState, config, plannedState, path) + errs = append(errs, moreErrs...) + } + return errs +} + +func assertPlannedAttrValid(name string, attrS *configschema.Attribute, priorState, config, plannedState cty.Value, path cty.Path) []error { + plannedV := plannedState.GetAttr(name) + configV := config.GetAttr(name) + priorV := cty.NullVal(attrS.Type) + if !priorState.IsNull() { + priorV = priorState.GetAttr(name) + } + path = append(path, cty.GetAttrStep{Name: name}) + + return assertPlannedValueValid(attrS, priorV, configV, plannedV, path) +} + +func assertPlannedValueValid(attrS *configschema.Attribute, priorV, configV, plannedV cty.Value, path cty.Path) []error { + + var errs []error + if unrefinedValue(plannedV).RawEquals(unrefinedValue(configV)) { + // This is the easy path: provider didn't change anything at all. + return errs + } + if unrefinedValue(plannedV).RawEquals(unrefinedValue(priorV)) && !priorV.IsNull() && !configV.IsNull() { + // Also pretty easy: there is a prior value and the provider has + // returned it unchanged. This indicates that configV and plannedV + // are functionally equivalent and so the provider wishes to disregard + // the configuration value in favor of the prior. + return errs + } + + switch { + // The provider can plan any value for a computed-only attribute. There may + // be a config value here in the case where a user used `ignore_changes` on + // a computed attribute and ignored the warning, or we failed to validate + // computed attributes in the config, but regardless it's not a plan error + // caused by the provider. + case attrS.Computed && !attrS.Optional: + return errs + + // The provider is allowed to insert optional values when the config is + // null, but only if the attribute is computed. + case configV.IsNull() && attrS.Computed: + return errs + + case configV.IsNull() && !plannedV.IsNull(): + // if the attribute is not computed, then any planned value is incorrect + if attrS.Sensitive { + errs = append(errs, path.NewErrorf("sensitive planned value for a non-computed attribute")) + } else { + errs = append(errs, path.NewErrorf("planned value %#v for a non-computed attribute", plannedV)) + } + return errs + } + + // If this attribute has a NestedType, validate the nested object + if attrS.NestedType != nil { + return assertPlannedObjectValid(attrS.NestedType, priorV, configV, plannedV, path) + } + + // If none of the above conditions match, the provider has made an invalid + // change to this attribute. + if priorV.IsNull() { + if attrS.Sensitive { + errs = append(errs, path.NewErrorf("sensitive planned value does not match config value")) + } else { + errs = append(errs, path.NewErrorf("planned value %#v does not match config value %#v", plannedV, configV)) + } + return errs + } + + if attrS.Sensitive { + errs = append(errs, path.NewErrorf("sensitive planned value does not match config value nor prior value")) + } else { + errs = append(errs, path.NewErrorf("planned value %#v does not match config value %#v nor prior value %#v", plannedV, configV, priorV)) + } + + return errs +} + +func assertPlannedObjectValid(schema *configschema.Object, prior, config, planned cty.Value, path cty.Path) []error { + var errs []error + + if planned.IsNull() && !config.IsNull() { + errs = append(errs, path.NewErrorf("planned for absence but config wants existence")) + return errs + } + if config.IsNull() && !planned.IsNull() { + errs = append(errs, path.NewErrorf("planned for existence but config wants absence")) + return errs + } + if !config.IsNull() && !planned.IsKnown() { + errs = append(errs, path.NewErrorf("planned unknown for configured value")) + return errs + } + + if planned.IsNull() { + // No further checks possible if the planned value is null + return errs + } + + switch schema.Nesting { + case configschema.NestingSingle, configschema.NestingGroup: + moreErrs := assertPlannedAttrsValid(schema.Attributes, prior, config, planned, path) + errs = append(errs, moreErrs...) + + case configschema.NestingList: + // A NestingList might either be a list or a tuple, depending on + // whether there are dynamically-typed attributes inside. However, + // both support a similar-enough API that we can treat them the + // same for our purposes here. + + plannedL := planned.Length() + configL := config.Length() + + // config wasn't known, then planned should be unknown too + if !plannedL.IsKnown() && !configL.IsKnown() { + return errs + } + + lenEqual := plannedL.Equals(configL) + if !lenEqual.IsKnown() || lenEqual.False() { + errs = append(errs, path.NewErrorf("count in plan (%#v) disagrees with count in config (%#v)", plannedL, configL)) + return errs + } + for it := planned.ElementIterator(); it.Next(); { + idx, plannedEV := it.Element() + path := append(path, cty.IndexStep{Key: idx}) + if !config.HasIndex(idx).True() { + continue // should never happen since we checked the lengths above + } + configEV := config.Index(idx) + priorEV := cty.NullVal(schema.ImpliedType()) + if !prior.IsNull() && prior.HasIndex(idx).True() { + priorEV = prior.Index(idx) + } + + moreErrs := assertPlannedAttrsValid(schema.Attributes, priorEV, configEV, plannedEV, path) + errs = append(errs, moreErrs...) + } + + case configschema.NestingMap: + // A NestingMap might either be a map or an object, depending on + // whether there are dynamically-typed attributes inside, so we will + // break these down to maps to handle them both in the same manner. + plannedVals := map[string]cty.Value{} + configVals := map[string]cty.Value{} + priorVals := map[string]cty.Value{} + + plannedL := planned.Length() + configL := config.Length() + + // config wasn't known, then planned should be unknown too + if !plannedL.IsKnown() && !configL.IsKnown() { + return errs + } + + lenEqual := plannedL.Equals(configL) + if !lenEqual.IsKnown() || lenEqual.False() { + errs = append(errs, path.NewErrorf("count in plan (%#v) disagrees with count in config (%#v)", plannedL, configL)) + return errs + } + + if !planned.IsNull() { + plannedVals = planned.AsValueMap() + } + if !config.IsNull() { + configVals = config.AsValueMap() + } + if !prior.IsNull() { + priorVals = prior.AsValueMap() + } + + for k, plannedEV := range plannedVals { + configEV, ok := configVals[k] + if !ok { + errs = append(errs, path.NewErrorf("map key %q from plan is not present in config", k)) + continue + } + path := append(path, cty.GetAttrStep{Name: k}) + + priorEV, ok := priorVals[k] + if !ok { + priorEV = cty.NullVal(schema.ImpliedType()) + } + moreErrs := assertPlannedAttrsValid(schema.Attributes, priorEV, configEV, plannedEV, path) + errs = append(errs, moreErrs...) + } + for k := range configVals { + if _, ok := plannedVals[k]; !ok { + errs = append(errs, path.NewErrorf("map key %q from config is not present in plan", k)) + continue + } + } + + case configschema.NestingSet: + plannedL := planned.Length() + configL := config.Length() + + if ok := plannedL.Range().Includes(configL); ok.IsKnown() && ok.False() { + errs = append(errs, path.NewErrorf("count in plan (%#v) disagrees with count in config (%#v)", plannedL, configL)) + return errs + } + // Because set elements have no identifier with which to correlate + // them, we can't robustly validate the plan for a nested object + // backed by a set, and so unfortunately we need to just trust the + // provider to do the right thing. + } + + return errs +} + +// unrefinedValue returns the given value with any unknown value refinements +// stripped away, making it a basic unknown value with only a type constraint. +// +// This function also considers unknown values nested inside a known container +// such as a collection, which unfortunately makes it relatively expensive +// for large data structures. Over time we should transition away from using +// this trick and prefer to use cty's Equals and value range APIs instead of +// of using Value.RawEquals, which is primarily intended for unit test code +// rather than real application use. +func unrefinedValue(v cty.Value) cty.Value { + ret, _ := cty.Transform(v, func(p cty.Path, v cty.Value) (cty.Value, error) { + if !v.IsKnown() { + return cty.UnknownVal(v.Type()), nil + } + return v, nil + }) + return ret +} diff --git a/pkg/plans/objchange/plan_valid_test.go b/pkg/plans/objchange/plan_valid_test.go new file mode 100644 index 00000000000..56a602fad3e --- /dev/null +++ b/pkg/plans/objchange/plan_valid_test.go @@ -0,0 +1,1974 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package objchange + +import ( + "testing" + + "github.com/apparentlymart/go-dump/dump" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +func TestAssertPlanValid(t *testing.T) { + tests := map[string]struct { + Schema *configschema.Block + Prior cty.Value + Config cty.Value + Planned cty.Value + WantErrs []string + }{ + "all empty": { + &configschema.Block{}, + cty.EmptyObjectVal, + cty.EmptyObjectVal, + cty.EmptyObjectVal, + nil, + }, + "no computed, all match": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "a": { + Type: cty.String, + Optional: true, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "b": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "c": { + Type: cty.String, + Optional: true, + }, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("a value"), + "b": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "c": cty.StringVal("c value"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("a value"), + "b": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "c": cty.StringVal("c value"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("a value"), + "b": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "c": cty.StringVal("c value"), + }), + }), + }), + nil, + }, + "no computed, plan matches, no prior": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "a": { + Type: cty.String, + Optional: true, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "b": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "c": { + Type: cty.String, + Optional: true, + }, + }, + }, + }, + }, + }, + cty.NullVal(cty.Object(map[string]cty.Type{ + "a": cty.String, + "b": cty.List(cty.Object(map[string]cty.Type{ + "c": cty.String, + })), + })), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("a value"), + "b": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "c": cty.StringVal("c value"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("a value"), + "b": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "c": cty.StringVal("c value"), + }), + }), + }), + nil, + }, + "no computed, invalid change in plan": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "a": { + Type: cty.String, + Optional: true, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "b": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "c": { + Type: cty.String, + Optional: true, + }, + }, + }, + }, + }, + }, + cty.NullVal(cty.Object(map[string]cty.Type{ + "a": cty.String, + "b": cty.List(cty.Object(map[string]cty.Type{ + "c": cty.String, + })), + })), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("a value"), + "b": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "c": cty.StringVal("c value"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("a value"), + "b": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "c": cty.StringVal("new c value"), + }), + }), + }), + []string{ + `.b[0].c: planned value cty.StringVal("new c value") does not match config value cty.StringVal("c value")`, + }, + }, + "no computed, invalid change in plan sensitive": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "a": { + Type: cty.String, + Optional: true, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "b": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "c": { + Type: cty.String, + Optional: true, + Sensitive: true, + }, + }, + }, + }, + }, + }, + cty.NullVal(cty.Object(map[string]cty.Type{ + "a": cty.String, + "b": cty.List(cty.Object(map[string]cty.Type{ + "c": cty.String, + })), + })), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("a value"), + "b": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "c": cty.StringVal("c value"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("a value"), + "b": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "c": cty.StringVal("new c value"), + }), + }), + }), + []string{ + `.b[0].c: sensitive planned value does not match config value`, + }, + }, + "no computed, diff suppression in plan": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "a": { + Type: cty.String, + Optional: true, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "b": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "c": { + Type: cty.String, + Optional: true, + }, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("a value"), + "b": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "c": cty.StringVal("c value"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("a value"), + "b": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "c": cty.StringVal("new c value"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("a value"), + "b": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "c": cty.StringVal("c value"), // plan uses value from prior object + }), + }), + }), + nil, + }, + "no computed, all null": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "a": { + Type: cty.String, + Optional: true, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "b": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "c": { + Type: cty.String, + Optional: true, + }, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.String), + "b": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "c": cty.NullVal(cty.String), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.String), + "b": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "c": cty.NullVal(cty.String), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.String), + "b": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "c": cty.NullVal(cty.String), + }), + }), + }), + nil, + }, + "nested map, normal update": { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "b": { + Nesting: configschema.NestingMap, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "c": { + Type: cty.String, + Optional: true, + }, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "b": cty.MapVal(map[string]cty.Value{ + "boop": cty.ObjectVal(map[string]cty.Value{ + "c": cty.StringVal("hello"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.MapVal(map[string]cty.Value{ + "boop": cty.ObjectVal(map[string]cty.Value{ + "c": cty.StringVal("howdy"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.MapVal(map[string]cty.Value{ + "boop": cty.ObjectVal(map[string]cty.Value{ + "c": cty.StringVal("howdy"), + }), + }), + }), + nil, + }, + + // Nested block collections are never null + "nested list, null in plan": { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "b": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "c": { + Type: cty.String, + Optional: true, + }, + }, + }, + }, + }, + }, + cty.NullVal(cty.Object(map[string]cty.Type{ + "b": cty.List(cty.Object(map[string]cty.Type{ + "c": cty.String, + })), + })), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.ListValEmpty(cty.Object(map[string]cty.Type{ + "c": cty.String, + })), + }), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ + "c": cty.String, + }))), + }), + []string{ + `.b: attribute representing a list of nested blocks must be empty to indicate no blocks, not null`, + }, + }, + + // but don't panic on a null list just in case + "nested list, null in config": { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "b": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "c": { + Type: cty.String, + Optional: true, + }, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "b": cty.ListValEmpty(cty.Object(map[string]cty.Type{ + "c": cty.String, + })), + }), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ + "c": cty.String, + }))), + }), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.ListValEmpty(cty.Object(map[string]cty.Type{ + "c": cty.String, + })), + }), + nil, + }, + + // blocks can be unknown when using dynamic + "nested list, unknown nested dynamic": { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "a": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "b": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "c": { + Type: cty.String, + Optional: true, + }, + "computed": { + Type: cty.String, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + + cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{cty.ObjectVal(map[string]cty.Value{ + "computed": cty.NullVal(cty.String), + "b": cty.ListVal([]cty.Value{cty.ObjectVal(map[string]cty.Value{ + "c": cty.StringVal("x"), + })}), + })}), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{cty.ObjectVal(map[string]cty.Value{ + "b": cty.UnknownVal(cty.List(cty.Object(map[string]cty.Type{ + "c": cty.String, + "computed": cty.String, + }))), + })}), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{cty.ObjectVal(map[string]cty.Value{ + "b": cty.UnknownVal(cty.List(cty.Object(map[string]cty.Type{ + "c": cty.String, + "computed": cty.String, + }))), + })}), + }), + []string{}, + }, + + "nested set, unknown dynamic cannot be planned": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "computed": { + Type: cty.String, + Computed: true, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "b": { + Nesting: configschema.NestingSet, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "c": { + Type: cty.String, + Optional: true, + }, + }, + }, + }, + }, + }, + + cty.ObjectVal(map[string]cty.Value{ + "computed": cty.NullVal(cty.String), + "b": cty.SetVal([]cty.Value{cty.ObjectVal(map[string]cty.Value{ + "c": cty.StringVal("x"), + })}), + }), + cty.ObjectVal(map[string]cty.Value{ + "computed": cty.NullVal(cty.String), + "b": cty.UnknownVal(cty.Set(cty.Object(map[string]cty.Type{ + "c": cty.String, + }))), + }), + cty.ObjectVal(map[string]cty.Value{ + "computed": cty.StringVal("default"), + "b": cty.SetVal([]cty.Value{cty.ObjectVal(map[string]cty.Value{ + "c": cty.StringVal("oops"), + })}), + }), + + []string{ + `.b: planned value cty.SetVal([]cty.Value{cty.ObjectVal(map[string]cty.Value{"c":cty.StringVal("oops")})}) for unknown dynamic block`, + }, + }, + + "nested set, null in plan": { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "b": { + Nesting: configschema.NestingSet, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "c": { + Type: cty.String, + Optional: true, + }, + }, + }, + }, + }, + }, + cty.NullVal(cty.Object(map[string]cty.Type{ + "b": cty.Set(cty.Object(map[string]cty.Type{ + "c": cty.String, + })), + })), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.SetValEmpty(cty.Object(map[string]cty.Type{ + "c": cty.String, + })), + }), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.NullVal(cty.Set(cty.Object(map[string]cty.Type{ + "c": cty.String, + }))), + }), + []string{ + `.b: attribute representing a set of nested blocks must be empty to indicate no blocks, not null`, + }, + }, + "nested map, null in plan": { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "b": { + Nesting: configschema.NestingMap, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "c": { + Type: cty.String, + Optional: true, + }, + }, + }, + }, + }, + }, + cty.NullVal(cty.Object(map[string]cty.Type{ + "b": cty.Map(cty.Object(map[string]cty.Type{ + "c": cty.String, + })), + })), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.MapValEmpty(cty.Object(map[string]cty.Type{ + "c": cty.String, + })), + }), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.NullVal(cty.Map(cty.Object(map[string]cty.Type{ + "c": cty.String, + }))), + }), + []string{ + `.b: attribute representing a map of nested blocks must be empty to indicate no blocks, not null`, + }, + }, + + // We don't actually do any validation for nested set blocks, and so + // the remaining cases here are just intending to ensure we don't + // inadvertently start generating errors incorrectly in future. + "nested set, no computed, no changes": { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "b": { + Nesting: configschema.NestingSet, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "c": { + Type: cty.String, + Optional: true, + }, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "b": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "c": cty.StringVal("c value"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "c": cty.StringVal("c value"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "c": cty.StringVal("c value"), + }), + }), + }), + nil, + }, + "nested set, no computed, invalid change in plan": { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "b": { + Nesting: configschema.NestingSet, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "c": { + Type: cty.String, + Optional: true, + }, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "b": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "c": cty.StringVal("c value"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "c": cty.StringVal("c value"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "c": cty.StringVal("new c value"), // matches neither prior nor config + }), + }), + }), + nil, + }, + "nested set, no computed, diff suppressed": { + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "b": { + Nesting: configschema.NestingSet, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "c": { + Type: cty.String, + Optional: true, + }, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "b": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "c": cty.StringVal("c value"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "c": cty.StringVal("new c value"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "c": cty.StringVal("c value"), // plan uses value from prior object + }), + }), + }), + nil, + }, + + // Attributes with NestedTypes + "NestedType attr, no computed, all match": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "a": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingList, + Attributes: map[string]*configschema.Attribute{ + "b": { + Type: cty.String, + Optional: true, + }, + }, + }, + Optional: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("b value"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("b value"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("b value"), + }), + }), + }), + nil, + }, + "NestedType attr, no computed, plan matches, no prior": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "a": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingList, + Attributes: map[string]*configschema.Attribute{ + "b": { + Type: cty.String, + Optional: true, + }, + }, + }, + Optional: true, + }, + }, + }, + cty.NullVal(cty.Object(map[string]cty.Type{ + "a": cty.List(cty.Object(map[string]cty.Type{ + "b": cty.String, + })), + })), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("c value"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("c value"), + }), + }), + }), + nil, + }, + "NestedType, no computed, invalid change in plan": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "a": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingList, + Attributes: map[string]*configschema.Attribute{ + "b": { + Type: cty.String, + Optional: true, + }, + }, + }, + Optional: true, + }, + }, + }, + cty.NullVal(cty.Object(map[string]cty.Type{ + "a": cty.List(cty.Object(map[string]cty.Type{ + "b": cty.String, + })), + })), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("c value"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("new c value"), + }), + }), + }), + []string{ + `.a[0].b: planned value cty.StringVal("new c value") does not match config value cty.StringVal("c value")`, + }, + }, + "NestedType attr, no computed, invalid change in plan sensitive": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "a": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingList, + Attributes: map[string]*configschema.Attribute{ + "b": { + Type: cty.String, + Optional: true, + Sensitive: true, + }, + }, + }, + Optional: true, + }, + }, + }, + cty.NullVal(cty.Object(map[string]cty.Type{ + "a": cty.List(cty.Object(map[string]cty.Type{ + "b": cty.String, + })), + })), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("b value"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("new b value"), + }), + }), + }), + []string{ + `.a[0].b: sensitive planned value does not match config value`, + }, + }, + "NestedType attr, no computed, diff suppression in plan": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "a": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingList, + Attributes: map[string]*configschema.Attribute{ + "b": { + Type: cty.String, + Optional: true, + }, + }, + }, + Optional: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("b value"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("new b value"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("b value"), // plan uses value from prior object + }), + }), + }), + nil, + }, + "NestedType attr, no computed, all null": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "a": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingList, + Attributes: map[string]*configschema.Attribute{ + "b": { + Type: cty.String, + Optional: true, + }, + }, + }, + Optional: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.DynamicPseudoType), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.DynamicPseudoType), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.DynamicPseudoType), + }), + nil, + }, + "NestedType attr, no computed, all zero value": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "a": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingList, + Attributes: map[string]*configschema.Attribute{ + "b": { + Type: cty.String, + Optional: true, + }, + }, + }, + Optional: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ + "b": cty.String, + }))), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ + "b": cty.String, + }))), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ + "b": cty.String, + }))), + }), + nil, + }, + "NestedType NestingSet attribute to null": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "bloop": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingSet, + Attributes: map[string]*configschema.Attribute{ + "blop": { + Type: cty.String, + Required: true, + }, + }, + }, + Optional: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "bloop": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("ok"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "bloop": cty.NullVal(cty.Set(cty.Object(map[string]cty.Type{ + "blop": cty.String, + }))), + }), + cty.ObjectVal(map[string]cty.Value{ + "bloop": cty.NullVal(cty.Set(cty.Object(map[string]cty.Type{ + "blop": cty.String, + }))), + }), + nil, + }, + "NestedType deep nested optional set attribute to null": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "bleep": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingList, + Attributes: map[string]*configschema.Attribute{ + "bloop": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingSet, + Attributes: map[string]*configschema.Attribute{ + "blome": { + Type: cty.String, + Optional: true, + }, + }, + }, + Optional: true, + }, + }, + }, + Optional: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "bleep": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bloop": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "blome": cty.StringVal("ok"), + }), + }), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "bleep": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bloop": cty.NullVal(cty.Set( + cty.Object(map[string]cty.Type{ + "blome": cty.String, + }), + )), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "bleep": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bloop": cty.NullVal(cty.List( + cty.Object(map[string]cty.Type{ + "blome": cty.String, + }), + )), + }), + }), + }), + nil, + }, + "NestedType deep nested set": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "bleep": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingList, + Attributes: map[string]*configschema.Attribute{ + "bloop": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingSet, + Attributes: map[string]*configschema.Attribute{ + "blome": { + Type: cty.String, + Optional: true, + }, + }, + }, + Optional: true, + }, + }, + }, + Optional: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "bleep": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bloop": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "blome": cty.StringVal("ok"), + }), + }), + }), + }), + }), + // Note: bloop is null in the config + cty.ObjectVal(map[string]cty.Value{ + "bleep": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bloop": cty.NullVal(cty.Set( + cty.Object(map[string]cty.Type{ + "blome": cty.String, + }), + )), + }), + }), + }), + // provider sends back the prior value, not matching the config + cty.ObjectVal(map[string]cty.Value{ + "bleep": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "bloop": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "blome": cty.StringVal("ok"), + }), + }), + }), + }), + }), + nil, // we cannot validate individual set elements, and trust the provider's response + }, + "NestedType nested computed list attribute": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "bloop": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingList, + Attributes: map[string]*configschema.Attribute{ + "blop": { + Type: cty.String, + Optional: true, + }, + }, + }, + Computed: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "bloop": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("ok"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "bloop": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ + "blop": cty.String, + }))), + }), + + cty.ObjectVal(map[string]cty.Value{ + "bloop": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("ok"), + }), + }), + }), + nil, + }, + "NestedType nested list attribute to null": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "bloop": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingList, + Attributes: map[string]*configschema.Attribute{ + "blop": { + Type: cty.String, + Optional: true, + }, + }, + }, + Optional: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "bloop": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("ok"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "bloop": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ + "blop": cty.String, + }))), + }), + + // provider returned the old value + cty.ObjectVal(map[string]cty.Value{ + "bloop": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("ok"), + }), + }), + }), + []string{`.bloop: planned value cty.ListVal([]cty.Value{cty.ObjectVal(map[string]cty.Value{"blop":cty.StringVal("ok")})}) for a non-computed attribute`}, + }, + "NestedType nested set attribute to null": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "bloop": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingSet, + Attributes: map[string]*configschema.Attribute{ + "blop": { + Type: cty.String, + Optional: true, + }, + }, + }, + Optional: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "bloop": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("ok"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "bloop": cty.NullVal(cty.Set(cty.Object(map[string]cty.Type{ + "blop": cty.String, + }))), + }), + // provider returned the old value + cty.ObjectVal(map[string]cty.Value{ + "bloop": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "blop": cty.StringVal("ok"), + }), + }), + }), + []string{`.bloop: planned value cty.ListVal([]cty.Value{cty.ObjectVal(map[string]cty.Value{"blop":cty.StringVal("ok")})}) for a non-computed attribute`}, + }, + "computed within nested objects": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "map": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingMap, + Attributes: map[string]*configschema.Attribute{ + "name": { + Type: cty.String, + Computed: true, + }, + }, + }, + }, + // When an object has dynamic attrs, the map may be + // handled as an object. + "map_as_obj": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingMap, + Attributes: map[string]*configschema.Attribute{ + "name": { + Type: cty.String, + Computed: true, + }, + }, + }, + }, + "list": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingList, + Attributes: map[string]*configschema.Attribute{ + "name": { + Type: cty.String, + Computed: true, + }, + }, + }, + }, + "set": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingSet, + Attributes: map[string]*configschema.Attribute{ + "name": { + Type: cty.String, + Computed: true, + }, + }, + }, + }, + "single": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingSingle, + Attributes: map[string]*configschema.Attribute{ + "name": { + Type: cty.DynamicPseudoType, + Computed: true, + }, + }, + }, + }, + }, + }, + cty.NullVal(cty.Object(map[string]cty.Type{ + "map": cty.Map(cty.Object(map[string]cty.Type{ + "name": cty.String, + })), + "map_as_obj": cty.Map(cty.Object(map[string]cty.Type{ + "name": cty.DynamicPseudoType, + })), + "list": cty.List(cty.Object(map[string]cty.Type{ + "name": cty.String, + })), + "set": cty.Set(cty.Object(map[string]cty.Type{ + "name": cty.String, + })), + "single": cty.Object(map[string]cty.Type{ + "name": cty.String, + }), + })), + cty.ObjectVal(map[string]cty.Value{ + "map": cty.MapVal(map[string]cty.Value{ + "one": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + }), + }), + "map_as_obj": cty.MapVal(map[string]cty.Value{ + "one": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.DynamicPseudoType), + }), + }), + "list": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + }), + }), + "set": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + }), + }), + "single": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "map": cty.MapVal(map[string]cty.Value{ + "one": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + }), + }), + "map_as_obj": cty.ObjectVal(map[string]cty.Value{ + "one": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("computed"), + }), + }), + "list": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + }), + }), + "set": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + }), + }), + "single": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + }), + }), + nil, + }, + "computed nested objects": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "map": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingMap, + Attributes: map[string]*configschema.Attribute{ + "name": { + Type: cty.String, + }, + }, + }, + Computed: true, + }, + "list": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingList, + Attributes: map[string]*configschema.Attribute{ + "name": { + Type: cty.String, + }, + }, + }, + Computed: true, + }, + "set": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingSet, + Attributes: map[string]*configschema.Attribute{ + "name": { + Type: cty.String, + }, + }, + }, + Optional: true, + Computed: true, + }, + "single": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingSingle, + Attributes: map[string]*configschema.Attribute{ + "name": { + Type: cty.DynamicPseudoType, + }, + }, + }, + Computed: true, + }, + }, + }, + cty.NullVal(cty.Object(map[string]cty.Type{ + "map": cty.Map(cty.Object(map[string]cty.Type{ + "name": cty.String, + })), + "list": cty.List(cty.Object(map[string]cty.Type{ + "name": cty.String, + })), + "set": cty.Set(cty.Object(map[string]cty.Type{ + "name": cty.String, + })), + "single": cty.Object(map[string]cty.Type{ + "name": cty.String, + }), + })), + cty.ObjectVal(map[string]cty.Value{ + "map": cty.NullVal(cty.Map(cty.Object(map[string]cty.Type{ + "name": cty.String, + }))), + "list": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ + "name": cty.String, + }))), + "set": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("from_config"), + }), + }), + "single": cty.NullVal(cty.Object(map[string]cty.Type{ + "name": cty.String, + })), + }), + cty.ObjectVal(map[string]cty.Value{ + "map": cty.MapVal(map[string]cty.Value{ + "one": cty.UnknownVal(cty.Object(map[string]cty.Type{ + "name": cty.String, + })), + }), + "list": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("computed"), + }), + }), + "set": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("from_config"), + }), + }), + "single": cty.UnknownVal(cty.Object(map[string]cty.Type{ + "name": cty.String, + })), + }), + nil, + }, + "optional computed within nested objects": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "map": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingMap, + Attributes: map[string]*configschema.Attribute{ + "name": { + Type: cty.String, + Computed: true, + }, + }, + }, + }, + // When an object has dynamic attrs, the map may be + // handled as an object. + "map_as_obj": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingMap, + Attributes: map[string]*configschema.Attribute{ + "name": { + Type: cty.String, + Optional: true, + Computed: true, + }, + }, + }, + }, + "list": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingList, + Attributes: map[string]*configschema.Attribute{ + "name": { + Type: cty.String, + Optional: true, + Computed: true, + }, + }, + }, + }, + "set": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingSet, + Attributes: map[string]*configschema.Attribute{ + "name": { + Type: cty.String, + Optional: true, + Computed: true, + }, + }, + }, + }, + "single": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingSingle, + Attributes: map[string]*configschema.Attribute{ + "name": { + Type: cty.DynamicPseudoType, + Optional: true, + Computed: true, + }, + }, + }, + }, + }, + }, + cty.NullVal(cty.Object(map[string]cty.Type{ + "map": cty.Map(cty.Object(map[string]cty.Type{ + "name": cty.String, + })), + "map_as_obj": cty.Map(cty.Object(map[string]cty.Type{ + "name": cty.DynamicPseudoType, + })), + "list": cty.List(cty.Object(map[string]cty.Type{ + "name": cty.String, + })), + "set": cty.Set(cty.Object(map[string]cty.Type{ + "name": cty.String, + })), + "single": cty.Object(map[string]cty.Type{ + "name": cty.String, + }), + })), + cty.ObjectVal(map[string]cty.Value{ + "map": cty.MapVal(map[string]cty.Value{ + "one": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("from_config"), + }), + }), + "map_as_obj": cty.MapVal(map[string]cty.Value{ + "one": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.DynamicPseudoType), + }), + }), + "list": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + }), + }), + "set": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + }), + }), + "single": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("from_config"), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "map": cty.MapVal(map[string]cty.Value{ + "one": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("from_config"), + }), + }), + "map_as_obj": cty.ObjectVal(map[string]cty.Value{ + "one": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("computed"), + }), + }), + "list": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("computed"), + }), + }), + "set": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + }), + }), + "single": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("from_config"), + }), + }), + nil, + }, + "cannot replace config nested attr": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "map": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingMap, + Attributes: map[string]*configschema.Attribute{ + "name": { + Type: cty.String, + Computed: true, + Optional: true, + }, + }, + }, + }, + }, + }, + cty.NullVal(cty.Object(map[string]cty.Type{ + "map": cty.Map(cty.Object(map[string]cty.Type{ + "name": cty.String, + })), + })), + cty.ObjectVal(map[string]cty.Value{ + "map": cty.MapVal(map[string]cty.Value{ + "one": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("from_config"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "map": cty.MapVal(map[string]cty.Value{ + "one": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("from_provider"), + }), + }), + }), + []string{`.map.one.name: planned value cty.StringVal("from_provider") does not match config value cty.StringVal("from_config")`}, + }, + + // If a config value ended up in a computed-only attribute it can still + // be a valid plan. We either got here because the user ignore warnings + // about ignore_changes on computed attributes, or we failed to + // validate a config with computed values. Either way, we don't want to + // indicate an error with the provider. + "computed only value with config": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "a": { + Type: cty.String, + Computed: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("old"), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("old"), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.UnknownVal(cty.String), + }), + nil, + }, + + // When validating collections we start by comparing length, which + // requires guarding for any unknown values incorrectly returned by the + // provider. + "nested collection attrs planned unknown": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "set": { + Computed: true, + Optional: true, + NestedType: &configschema.Object{ + Nesting: configschema.NestingSet, + Attributes: map[string]*configschema.Attribute{ + "name": { + Type: cty.String, + Computed: true, + Optional: true, + }, + }, + }, + }, + "list": { + Computed: true, + Optional: true, + NestedType: &configschema.Object{ + Nesting: configschema.NestingList, + Attributes: map[string]*configschema.Attribute{ + "name": { + Type: cty.String, + Computed: true, + Optional: true, + }, + }, + }, + }, + "map": { + Computed: true, + Optional: true, + NestedType: &configschema.Object{ + Nesting: configschema.NestingMap, + Attributes: map[string]*configschema.Attribute{ + "name": { + Type: cty.String, + Computed: true, + Optional: true, + }, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "set": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("from_config"), + }), + }), + "list": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("from_config"), + }), + }), + "map": cty.MapVal(map[string]cty.Value{ + "key": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("from_config"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "set": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("from_config"), + }), + }), + "list": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("from_config"), + }), + }), + "map": cty.MapVal(map[string]cty.Value{ + "key": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("from_config"), + }), + }), + }), + // provider cannot override the config + cty.ObjectVal(map[string]cty.Value{ + "set": cty.UnknownVal(cty.Set( + cty.Object(map[string]cty.Type{ + "name": cty.String, + }), + )), + "list": cty.UnknownVal(cty.Set( + cty.Object(map[string]cty.Type{ + "name": cty.String, + }), + )), + "map": cty.UnknownVal(cty.Map( + cty.Object(map[string]cty.Type{ + "name": cty.String, + }), + )), + }), + []string{ + `.set: planned unknown for configured value`, + `.list: planned unknown for configured value`, + `.map: planned unknown for configured value`, + }, + }, + + "refined unknown values can become less refined": { + // Providers often can't preserve refinements through the provider + // wire protocol: although we do have a defined serialization for + // it, most providers were written before there was any such + // thing as refinements, and in future there might be new + // refinements that even refinement-aware providers don't know + // how to preserve, so we allow them to get dropped here as + // a concession to backward-compatibility. + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "a": { + Type: cty.String, + Required: true, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("old"), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.UnknownVal(cty.String).RefineNotNull(), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.UnknownVal(cty.String), + }), + nil, + }, + + "refined unknown values in collection elements can become less refined": { + // Providers often can't preserve refinements through the provider + // wire protocol: although we do have a defined serialization for + // it, most providers were written before there was any such + // thing as refinements, and in future there might be new + // refinements that even refinement-aware providers don't know + // how to preserve, so we allow them to get dropped here as + // a concession to backward-compatibility. + // + // This is intending to approximate something like this: + // + // resource "null_resource" "hello" { + // triggers = { + // key = uuid() + // } + // } + // + // ...under the assumption that the null_resource implementation + // cannot preserve the not-null refinement that the uuid function + // generates. + // + // https://github.com/hashicorp/terraform/issues/33385 + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "m": { + Type: cty.Map(cty.String), + }, + }, + }, + cty.NullVal(cty.Object(map[string]cty.Type{ + "m": cty.Map(cty.String), + })), + cty.ObjectVal(map[string]cty.Value{ + "m": cty.MapVal(map[string]cty.Value{ + "key": cty.UnknownVal(cty.String).RefineNotNull(), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "m": cty.MapVal(map[string]cty.Value{ + "key": cty.UnknownVal(cty.String), + }), + }), + nil, + }, + + "nested set values can contain computed unknown": { + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "set": { + Optional: true, + NestedType: &configschema.Object{ + Nesting: configschema.NestingSet, + Attributes: map[string]*configschema.Attribute{ + "input": { + Type: cty.String, + Optional: true, + }, + "computed": { + Type: cty.String, + Computed: true, + Optional: true, + }, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "set": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "input": cty.StringVal("a"), + "computed": cty.NullVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "input": cty.StringVal("b"), + "computed": cty.NullVal(cty.String), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "set": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "input": cty.StringVal("a"), + "computed": cty.NullVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "input": cty.StringVal("b"), + "computed": cty.NullVal(cty.String), + }), + }), + }), + // Plan can mark the null computed values as unknown + cty.ObjectVal(map[string]cty.Value{ + "set": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "input": cty.StringVal("a"), + "computed": cty.UnknownVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "input": cty.StringVal("b"), + "computed": cty.UnknownVal(cty.String), + }), + }), + }), + []string{}, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + errs := AssertPlanValid(test.Schema, test.Prior, test.Config, test.Planned) + + wantErrs := make(map[string]struct{}) + gotErrs := make(map[string]struct{}) + for _, err := range errs { + gotErrs[tfdiags.FormatError(err)] = struct{}{} + } + for _, msg := range test.WantErrs { + wantErrs[msg] = struct{}{} + } + + t.Logf( + "\nprior: %sconfig: %splanned: %s", + dump.Value(test.Prior), + dump.Value(test.Config), + dump.Value(test.Planned), + ) + for msg := range wantErrs { + if _, ok := gotErrs[msg]; !ok { + t.Errorf("missing expected error: %s", msg) + } + } + for msg := range gotErrs { + if _, ok := wantErrs[msg]; !ok { + t.Errorf("unexpected extra error: %s", msg) + } + } + }) + } +} diff --git a/pkg/plans/plan.go b/pkg/plans/plan.go new file mode 100644 index 00000000000..57221788380 --- /dev/null +++ b/pkg/plans/plan.go @@ -0,0 +1,230 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plans + +import ( + "sort" + "time" + + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/lang/globalref" + "github.com/kubegems/opentofu/pkg/states" +) + +// Plan is the top-level type representing a planned set of changes. +// +// A plan is a summary of the set of changes required to move from a current +// state to a goal state derived from configuration. The described changes +// are not applied directly, but contain an approximation of the final +// result that will be completed during apply by resolving any values that +// cannot be predicted. +// +// A plan must always be accompanied by the configuration it was built from, +// since the plan does not itself include all of the information required to +// make the changes indicated. +type Plan struct { + // Mode is the mode under which this plan was created. + // + // This is only recorded to allow for UI differences when presenting plans + // to the end-user, and so it must not be used to influence apply-time + // behavior. The actions during apply must be described entirely by + // the Changes field, regardless of how the plan was created. + // + // FIXME: destroy operations still rely on DestroyMode being set, because + // there is no other source of this information in the plan. New behavior + // should not be added based on this flag, and changing the flag should be + // checked carefully against existing destroy behaviors. + UIMode Mode + + VariableValues map[string]DynamicValue + Changes *Changes + DriftedResources []*ResourceInstanceChangeSrc + TargetAddrs []addrs.Targetable + ForceReplaceAddrs []addrs.AbsResourceInstance + Backend Backend + + // Errored is true if the Changes information is incomplete because + // the planning operation failed. An errored plan cannot be applied, + // but can be cautiously inspected for debugging purposes. + Errored bool + + // Checks captures a snapshot of the (probably-incomplete) check results + // at the end of the planning process. + // + // If this plan is applyable (that is, if the planning process completed + // without errors) then the set of checks here should be complete even + // though some of them will likely have StatusUnknown where the check + // condition depends on values we won't know until the apply step. + Checks *states.CheckResults + + // RelevantAttributes is a set of resource instance addresses and + // attributes that are either directly affected by proposed changes or may + // have indirectly contributed to them via references in expressions. + // + // This is the result of a heuristic and is intended only as a hint to + // the UI layer in case it wants to emphasize or de-emphasize certain + // resources. Don't use this to drive any non-cosmetic behavior, especially + // including anything that would be subject to compatibility constraints. + RelevantAttributes []globalref.ResourceAttr + + // PrevRunState and PriorState both describe the situation that the plan + // was derived from: + // + // PrevRunState is a representation of the outcome of the previous + // OpenTofu operation, without any updates from the remote system but + // potentially including some changes that resulted from state upgrade + // actions. + // + // PriorState is a representation of the current state of remote objects, + // which will differ from PrevRunState if the "refresh" step returned + // different data, which might reflect drift. + // + // PriorState is the main snapshot we use for actions during apply. + // PrevRunState is only here so that we can diff PriorState against it in + // order to report to the user any out-of-band changes we've detected. + PrevRunState *states.State + PriorState *states.State + + // PlannedState is the temporary planned state that was created during the + // graph walk that generated this plan. + // + // This is required by the testing framework when evaluating run blocks + // executing in plan mode. The graph updates the state with certain values + // that are difficult to retrieve later, such as local values that reference + // updated resources. It is easier to build the testing scope with access + // to same temporary state the plan used/built. + // + // This is never recorded outside of OpenTofu. It is not written into the + // binary plan file, and it is not written into the JSON structured outputs. + // The testing framework never writes the plans out but holds everything in + // memory as it executes, so there is no need to add any kind of + // serialization for this field. This does mean that you shouldn't rely on + // this field existing unless you have just generated the plan. + PlannedState *states.State + + // ExternalReferences are references that are being made to resources within + // the plan from external sources. As with PlannedState this is used by the + // OpenTofu testing framework, and so isn't written into any external + // representation of the plan. + ExternalReferences []*addrs.Reference + + // Timestamp is the record of truth for when the plan happened. + Timestamp time.Time +} + +// CanApply returns true if and only if the recieving plan includes content +// that would make sense to apply. If it returns false, the plan operation +// should indicate that there's nothing to do and OpenTofu should exit +// without prompting the user to confirm the changes. +// +// This function represents our main business logic for making the decision +// about whether a given plan represents meaningful "changes", and so its +// exact definition may change over time; the intent is just to centralize the +// rules for that rather than duplicating different versions of it at various +// locations in the UI code. +func (p *Plan) CanApply() bool { + switch { + case p.Errored: + // An errored plan can never be applied, because it is incomplete. + // Such a plan is only useful for describing the subset of actions + // planned so far in case they are useful for understanding the + // causes of the errors. + return false + + case !p.Changes.Empty(): + // "Empty" means that everything in the changes is a "NoOp", so if + // not empty then there's at least one non-NoOp change. + return true + + case !p.PriorState.ManagedResourcesEqual(p.PrevRunState): + // If there are no changes planned but we detected some + // outside-OpenTofu changes while refreshing then we consider + // that applyable in isolation only if this was a refresh-only + // plan where we expect updating the state to include these + // changes was the intended goal. + // + // (We don't treat a "refresh only" plan as applyable in normal + // planning mode because historically the refresh result wasn't + // considered part of a plan at all, and so it would be + // a disruptive breaking change if refreshing alone suddenly + // became applyable in the normal case and an existing configuration + // was relying on ignore_changes in order to be convergent in spite + // of intentional out-of-band operations.) + return p.UIMode == RefreshOnlyMode + + default: + // Otherwise, there are either no changes to apply or they are changes + // our cases above don't consider as worthy of applying in isolation. + return false + } +} + +// ProviderAddrs returns a list of all of the provider configuration addresses +// referenced throughout the receiving plan. +// +// The result is de-duplicated so that each distinct address appears only once. +func (p *Plan) ProviderAddrs() []addrs.AbsProviderConfig { + if p == nil || p.Changes == nil { + return nil + } + + m := map[string]addrs.AbsProviderConfig{} + for _, rc := range p.Changes.Resources { + m[rc.ProviderAddr.String()] = rc.ProviderAddr + } + if len(m) == 0 { + return nil + } + + // This is mainly just so we'll get stable results for testing purposes. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + + ret := make([]addrs.AbsProviderConfig, len(keys)) + for i, key := range keys { + ret[i] = m[key] + } + + return ret +} + +// Backend represents the backend-related configuration and other data as it +// existed when a plan was created. +type Backend struct { + // Type is the type of backend that the plan will apply against. + Type string + + // Config is the configuration of the backend, whose schema is decided by + // the backend Type. + Config DynamicValue + + // Workspace is the name of the workspace that was active when the plan + // was created. It is illegal to apply a plan created for one workspace + // to the state of another workspace. + // (This constraint is already enforced by the statefile lineage mechanism, + // but storing this explicitly allows us to return a better error message + // in the situation where the user has the wrong workspace selected.) + Workspace string +} + +func NewBackend(typeName string, config cty.Value, configSchema *configschema.Block, workspaceName string) (*Backend, error) { + dv, err := NewDynamicValue(config, configSchema.ImpliedType()) + if err != nil { + return nil, err + } + + return &Backend{ + Type: typeName, + Config: dv, + Workspace: workspaceName, + }, nil +} diff --git a/pkg/plans/plan_test.go b/pkg/plans/plan_test.go new file mode 100644 index 00000000000..06cf0fc448f --- /dev/null +++ b/pkg/plans/plan_test.go @@ -0,0 +1,100 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plans + +import ( + "testing" + + "github.com/go-test/deep" + + "github.com/kubegems/opentofu/pkg/addrs" +) + +func TestProviderAddrs(t *testing.T) { + + plan := &Plan{ + VariableValues: map[string]DynamicValue{}, + Changes: &Changes{ + Resources: []*ResourceInstanceChangeSrc{ + { + Addr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "woot", + }.Instance(addrs.IntKey(0)).Absolute(addrs.RootModuleInstance), + ProviderAddr: addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("test"), + }, + }, + { + Addr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "woot", + }.Instance(addrs.IntKey(0)).Absolute(addrs.RootModuleInstance), + DeposedKey: "foodface", + ProviderAddr: addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("test"), + }, + }, + { + Addr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "what", + }.Instance(addrs.IntKey(0)).Absolute(addrs.RootModuleInstance), + ProviderAddr: addrs.AbsProviderConfig{ + Module: addrs.RootModule.Child("foo"), + Provider: addrs.NewDefaultProvider("test"), + }, + }, + }, + }, + } + + got := plan.ProviderAddrs() + want := []addrs.AbsProviderConfig{ + addrs.AbsProviderConfig{ + Module: addrs.RootModule.Child("foo"), + Provider: addrs.NewDefaultProvider("test"), + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("test"), + }, + } + + for _, problem := range deep.Equal(got, want) { + t.Error(problem) + } +} + +// Module outputs should not effect the result of Empty +func TestModuleOutputChangesEmpty(t *testing.T) { + changes := &Changes{ + Outputs: []*OutputChangeSrc{ + { + Addr: addrs.AbsOutputValue{ + Module: addrs.RootModuleInstance.Child("child", addrs.NoKey), + OutputValue: addrs.OutputValue{ + Name: "output", + }, + }, + ChangeSrc: ChangeSrc{ + Action: Update, + Before: []byte("a"), + After: []byte("b"), + }, + }, + }, + } + + if !changes.Empty() { + t.Fatal("plan has no visible changes") + } +} diff --git a/pkg/plans/planfile/config_snapshot.go b/pkg/plans/planfile/config_snapshot.go new file mode 100644 index 00000000000..6f2ebde7601 --- /dev/null +++ b/pkg/plans/planfile/config_snapshot.go @@ -0,0 +1,223 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package planfile + +import ( + "archive/zip" + "encoding/json" + "fmt" + "io" + "path" + "sort" + "strings" + "time" + + version "github.com/hashicorp/go-version" + "github.com/kubegems/opentofu/pkg/configs/configload" +) + +const configSnapshotPrefix = "tfconfig/" +const configSnapshotManifestFile = configSnapshotPrefix + "modules.json" +const configSnapshotModulePrefix = configSnapshotPrefix + "m-" + +type configSnapshotModuleRecord struct { + Key string `json:"Key"` + SourceAddr string `json:"Source,omitempty"` + VersionStr string `json:"Version,omitempty"` + Dir string `json:"Dir"` +} +type configSnapshotModuleManifest []configSnapshotModuleRecord + +func readConfigSnapshot(z *zip.Reader) (*configload.Snapshot, error) { + // Errors from this function are expected to be reported with some + // additional prefix context about them being in a config snapshot, + // so they should not themselves refer to the config snapshot. + // They are also generally indicative of an invalid file, and so since + // plan files should not be hand-constructed we don't need to worry + // about making the messages user-actionable. + + snap := &configload.Snapshot{ + Modules: map[string]*configload.SnapshotModule{}, + } + var manifestSrc []byte + + // For processing our source files, we'll just sweep over all the files + // and react to the one-by-one to start, and then clean up afterwards + // when we'll presumably have found the manifest file. + for _, file := range z.File { + switch { + + case file.Name == configSnapshotManifestFile: + // It's the manifest file, so we'll just read it raw into + // manifestSrc for now and process it below. + r, err := file.Open() + if err != nil { + return nil, fmt.Errorf("failed to open module manifest: %s", r) + } + manifestSrc, err = io.ReadAll(r) + if err != nil { + return nil, fmt.Errorf("failed to read module manifest: %s", r) + } + + case strings.HasPrefix(file.Name, configSnapshotModulePrefix): + relName := file.Name[len(configSnapshotModulePrefix):] + moduleKey, fileName := path.Split(relName) + + // moduleKey should currently have a trailing slash on it, which we + // can use to recognize the difference between the root module + // (just a trailing slash) and no module path at all (empty string). + if moduleKey == "" { + // ignore invalid config entry + continue + } + moduleKey = moduleKey[:len(moduleKey)-1] // trim trailing slash + + r, err := file.Open() + if err != nil { + return nil, fmt.Errorf("failed to open snapshot of %s from module %q: %w", fileName, moduleKey, err) + } + fileSrc, err := io.ReadAll(r) + if err != nil { + return nil, fmt.Errorf("failed to read snapshot of %s from module %q: %w", fileName, moduleKey, err) + } + + if _, exists := snap.Modules[moduleKey]; !exists { + snap.Modules[moduleKey] = &configload.SnapshotModule{ + Files: map[string][]byte{}, + // Will fill in everything else afterwards, when we + // process the manifest. + } + } + snap.Modules[moduleKey].Files[fileName] = fileSrc + } + } + + if manifestSrc == nil { + return nil, fmt.Errorf("config snapshot does not have manifest file") + } + + var manifest configSnapshotModuleManifest + err := json.Unmarshal(manifestSrc, &manifest) + if err != nil { + return nil, fmt.Errorf("invalid module manifest: %w", err) + } + + for _, record := range manifest { + modSnap, exists := snap.Modules[record.Key] + if !exists { + // We'll allow this, assuming that it's a module with no files. + // This is still weird, since we generally reject modules with + // no files, but we'll allow it because downstream errors will + // catch it in that case. + modSnap = &configload.SnapshotModule{ + Files: map[string][]byte{}, + } + snap.Modules[record.Key] = modSnap + } + modSnap.SourceAddr = record.SourceAddr + modSnap.Dir = record.Dir + if record.VersionStr != "" { + v, err := version.NewVersion(record.VersionStr) + if err != nil { + return nil, fmt.Errorf("manifest has invalid version string %q for module %q", record.VersionStr, record.Key) + } + modSnap.Version = v + } + } + + // Finally, we'll make sure we don't have any errant files for modules that + // aren't in the manifest. + for k := range snap.Modules { + found := false + for _, record := range manifest { + if record.Key == k { + found = true + break + } + } + if !found { + return nil, fmt.Errorf("found files for module %q that isn't recorded in the manifest", k) + } + } + + return snap, nil +} + +// writeConfigSnapshot adds to the given zip.Writer one or more files +// representing the given snapshot. +// +// This file creates new files in the writer, so any already-open writer +// for the file will be invalidated by this call. The writer remains open +// when this function returns. +func writeConfigSnapshot(snap *configload.Snapshot, z *zip.Writer) error { + // Errors from this function are expected to be reported with some + // additional prefix context about them being in a config snapshot, + // so they should not themselves refer to the config snapshot. + // They are also indicative of a bug in the caller, so they do not + // need to be user-actionable. + + var manifest configSnapshotModuleManifest + keys := make([]string, 0, len(snap.Modules)) + for k := range snap.Modules { + keys = append(keys, k) + } + sort.Strings(keys) + + // We'll re-use this fileheader for each Create we do below. + + for _, k := range keys { + snapMod := snap.Modules[k] + record := configSnapshotModuleRecord{ + Dir: snapMod.Dir, + Key: k, + SourceAddr: snapMod.SourceAddr, + } + if snapMod.Version != nil { + record.VersionStr = snapMod.Version.String() + } + manifest = append(manifest, record) + + pathPrefix := fmt.Sprintf("%s%s/", configSnapshotModulePrefix, k) + for filename, src := range snapMod.Files { + zh := &zip.FileHeader{ + Name: pathPrefix + filename, + Method: zip.Deflate, + Modified: time.Now(), + } + w, err := z.CreateHeader(zh) + if err != nil { + return fmt.Errorf("failed to create snapshot of %s from module %q: %w", zh.Name, k, err) + } + _, err = w.Write(src) + if err != nil { + return fmt.Errorf("failed to write snapshot of %s from module %q: %w", zh.Name, k, err) + } + } + } + + // Now we'll write our manifest + { + zh := &zip.FileHeader{ + Name: configSnapshotManifestFile, + Method: zip.Deflate, + Modified: time.Now(), + } + src, err := json.MarshalIndent(manifest, "", " ") + if err != nil { + return fmt.Errorf("failed to serialize module manifest: %w", err) + } + w, err := z.CreateHeader(zh) + if err != nil { + return fmt.Errorf("failed to create module manifest: %w", err) + } + _, err = w.Write(src) + if err != nil { + return fmt.Errorf("failed to write module manifest: %w", err) + } + } + + return nil +} diff --git a/pkg/plans/planfile/config_snapshot_test.go b/pkg/plans/planfile/config_snapshot_test.go new file mode 100644 index 00000000000..6b74b8daa48 --- /dev/null +++ b/pkg/plans/planfile/config_snapshot_test.go @@ -0,0 +1,58 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package planfile + +import ( + "archive/zip" + "bytes" + "path/filepath" + "reflect" + "testing" + + "github.com/davecgh/go-spew/spew" + + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configload" +) + +func TestConfigSnapshotRoundtrip(t *testing.T) { + fixtureDir := filepath.Join("testdata", "test-config") + loader, err := configload.NewLoader(&configload.Config{ + ModulesDir: filepath.Join(fixtureDir, ".terraform", "modules"), + }) + if err != nil { + t.Fatal(err) + } + + _, snapIn, diags := loader.LoadConfigWithSnapshot(fixtureDir, configs.RootModuleCallForTesting()) + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + var buf bytes.Buffer + zw := zip.NewWriter(&buf) + err = writeConfigSnapshot(snapIn, zw) + if err != nil { + t.Fatalf("failed to write snapshot: %s", err) + } + zw.Close() + + raw := buf.Bytes() + r := bytes.NewReader(raw) + zr, err := zip.NewReader(r, int64(len(raw))) + if err != nil { + t.Fatal(err) + } + + snapOut, err := readConfigSnapshot(zr) + if err != nil { + t.Fatalf("failed to read snapshot: %s", err) + } + + if !reflect.DeepEqual(snapIn, snapOut) { + t.Errorf("result does not match input\nresult: %sinput: %s", spew.Sdump(snapOut), spew.Sdump(snapIn)) + } +} diff --git a/pkg/plans/planfile/doc.go b/pkg/plans/planfile/doc.go new file mode 100644 index 00000000000..e714ec86319 --- /dev/null +++ b/pkg/plans/planfile/doc.go @@ -0,0 +1,11 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package planfile deals with the file format used to serialize plans to disk +// and then deserialize them back into memory later. +// +// A plan file contains the planned changes along with the configuration and +// state snapshot that they are based on. +package planfile diff --git a/pkg/plans/planfile/planfile_test.go b/pkg/plans/planfile/planfile_test.go new file mode 100644 index 00000000000..aa00693fcca --- /dev/null +++ b/pkg/plans/planfile/planfile_test.go @@ -0,0 +1,217 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package planfile + +import ( + "path/filepath" + "runtime" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configload" + "github.com/kubegems/opentofu/pkg/depsfile" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/getproviders" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/statefile" + tfversion "github.com/kubegems/opentofu/version" +) + +func TestRoundtrip(t *testing.T) { + fixtureDir := filepath.Join("testdata", "test-config") + loader, err := configload.NewLoader(&configload.Config{ + ModulesDir: filepath.Join(fixtureDir, ".terraform", "modules"), + }) + if err != nil { + t.Fatal(err) + } + + _, snapIn, diags := loader.LoadConfigWithSnapshot(fixtureDir, configs.RootModuleCallForTesting()) + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + // Just a minimal state file so we can test that it comes out again at all. + // We don't need to test the entire thing because the state file + // serialization is already tested in its own package. + stateFileIn := &statefile.File{ + TerraformVersion: tfversion.SemVer, + Serial: 2, + Lineage: "abc123", + State: states.NewState(), + } + prevStateFileIn := &statefile.File{ + TerraformVersion: tfversion.SemVer, + Serial: 1, + Lineage: "abc123", + State: states.NewState(), + } + + // Minimal plan too, since the serialization of the tfplan portion of the + // file is tested more fully in tfplan_test.go . + planIn := &plans.Plan{ + Changes: &plans.Changes{ + Resources: []*plans.ResourceInstanceChangeSrc{}, + Outputs: []*plans.OutputChangeSrc{}, + }, + DriftedResources: []*plans.ResourceInstanceChangeSrc{}, + VariableValues: map[string]plans.DynamicValue{ + "foo": plans.DynamicValue([]byte("foo placeholder")), + }, + Backend: plans.Backend{ + Type: "local", + Config: plans.DynamicValue([]byte("config placeholder")), + Workspace: "default", + }, + Checks: &states.CheckResults{}, + + // Due to some historical oddities in how we've changed modelling over + // time, we also include the states (without the corresponding file + // headers) in the plans.Plan object. This is currently ignored by + // Create but will be returned by ReadPlan and so we need to include + // it here so that we'll get a match when we compare input and output + // below. + PrevRunState: prevStateFileIn.State, + PriorState: stateFileIn.State, + } + + locksIn := depsfile.NewLocks() + locksIn.SetProvider( + addrs.NewDefaultProvider("boop"), + getproviders.MustParseVersion("1.0.0"), + getproviders.MustParseVersionConstraints(">= 1.0.0"), + []getproviders.Hash{ + getproviders.MustParseHash("fake:hello"), + }, + ) + + planFn := filepath.Join(t.TempDir(), "tfplan") + + err = Create(planFn, CreateArgs{ + ConfigSnapshot: snapIn, + PreviousRunStateFile: prevStateFileIn, + StateFile: stateFileIn, + Plan: planIn, + DependencyLocks: locksIn, + }, encryption.PlanEncryptionDisabled()) + if err != nil { + t.Fatalf("failed to create plan file: %s", err) + } + + wpf, err := OpenWrapped(planFn, encryption.PlanEncryptionDisabled()) + if err != nil { + t.Fatalf("failed to open plan file for reading: %s", err) + } + pr, ok := wpf.Local() + if !ok { + t.Fatalf("failed to open plan file as a local plan file") + } + if wpf.IsCloud() { + t.Fatalf("wrapped plan claims to be both kinds of plan at once") + } + + t.Run("ReadPlan", func(t *testing.T) { + planOut, err := pr.ReadPlan() + if err != nil { + t.Fatalf("failed to read plan: %s", err) + } + if diff := cmp.Diff(planIn, planOut); diff != "" { + t.Errorf("plan did not survive round-trip\n%s", diff) + } + }) + + t.Run("ReadStateFile", func(t *testing.T) { + stateFileOut, err := pr.ReadStateFile() + if err != nil { + t.Fatalf("failed to read state: %s", err) + } + if diff := cmp.Diff(stateFileIn, stateFileOut); diff != "" { + t.Errorf("state file did not survive round-trip\n%s", diff) + } + }) + + t.Run("ReadPrevStateFile", func(t *testing.T) { + prevStateFileOut, err := pr.ReadPrevStateFile() + if err != nil { + t.Fatalf("failed to read state: %s", err) + } + if diff := cmp.Diff(prevStateFileIn, prevStateFileOut); diff != "" { + t.Errorf("state file did not survive round-trip\n%s", diff) + } + }) + + t.Run("ReadConfigSnapshot", func(t *testing.T) { + snapOut, err := pr.ReadConfigSnapshot() + if err != nil { + t.Fatalf("failed to read config snapshot: %s", err) + } + if diff := cmp.Diff(snapIn, snapOut); diff != "" { + t.Errorf("config snapshot did not survive round-trip\n%s", diff) + } + }) + + t.Run("ReadConfig", func(t *testing.T) { + // Reading from snapshots is tested in the configload package, so + // here we'll just test that we can successfully do it, to see if the + // glue code in _this_ package is correct. + _, diags := pr.ReadConfig(configs.RootModuleCallForTesting()) + if diags.HasErrors() { + t.Errorf("when reading config: %s", diags.Err()) + } + }) + + t.Run("ReadDependencyLocks", func(t *testing.T) { + locksOut, diags := pr.ReadDependencyLocks() + if diags.HasErrors() { + t.Fatalf("when reading config: %s", diags.Err()) + } + got := locksOut.AllProviders() + want := locksIn.AllProviders() + if diff := cmp.Diff(want, got, cmp.AllowUnexported(depsfile.ProviderLock{})); diff != "" { + t.Errorf("provider locks did not survive round-trip\n%s", diff) + } + }) +} + +func TestWrappedError(t *testing.T) { + // Open something that isn't a cloud or local planfile: should error + wrongFile := "not a valid zip file" + _, err := OpenWrapped(filepath.Join("testdata", "test-config", "root.tf"), encryption.PlanEncryptionDisabled()) + if !strings.Contains(err.Error(), wrongFile) { + t.Fatalf("expected %q, got %q", wrongFile, err) + } + + // Open something that doesn't exist: should error + var missingFileError string + if runtime.GOOS == "windows" { + missingFileError = "The system cannot find the file specified" + } else { + missingFileError = "no such file or directory" + } + _, err = OpenWrapped(filepath.Join("testdata", "absent.tfplan"), encryption.PlanEncryptionDisabled()) + if !strings.Contains(err.Error(), missingFileError) { + t.Fatalf("expected %q, got %q", missingFileError, err) + } +} + +func TestWrappedCloud(t *testing.T) { + // Loading valid cloud plan results in a wrapped cloud plan + wpf, err := OpenWrapped(filepath.Join("testdata", "cloudplan.json"), encryption.PlanEncryptionDisabled()) + if err != nil { + t.Fatalf("failed to open valid cloud plan: %s", err) + } + if !wpf.IsCloud() { + t.Fatalf("failed to open cloud file as a cloud plan") + } + if wpf.IsLocal() { + t.Fatalf("wrapped plan claims to be both kinds of plan at once") + } +} diff --git a/pkg/plans/planfile/reader.go b/pkg/plans/planfile/reader.go new file mode 100644 index 00000000000..45ed09df6ab --- /dev/null +++ b/pkg/plans/planfile/reader.go @@ -0,0 +1,279 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package planfile + +import ( + "archive/zip" + "bytes" + "fmt" + "io" + "os" + + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configload" + "github.com/kubegems/opentofu/pkg/depsfile" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/states/statefile" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +const tfstateFilename = "tfstate" +const tfstatePreviousFilename = "tfstate-prev" +const dependencyLocksFilename = ".terraform.lock.hcl" // matches the conventional name in an input configuration + +// ErrUnusableLocalPlan is an error wrapper to indicate that we *think* the +// input represents plan file data, but can't use it for some reason (as +// explained in the error text). Callers can check against this type with +// errors.As() if they need to distinguish between corrupt plan files and more +// fundamental problems like an empty file. +type ErrUnusableLocalPlan struct { + inner error +} + +func errUnusable(err error) *ErrUnusableLocalPlan { + return &ErrUnusableLocalPlan{inner: err} +} +func (e *ErrUnusableLocalPlan) Error() string { + return e.inner.Error() +} +func (e *ErrUnusableLocalPlan) Unwrap() error { + return e.inner +} + +// Reader is the main type used to read plan files. Create a Reader by calling +// Open. +// +// A plan file is a random-access file format, so methods of Reader must +// be used to access the individual portions of the file for further +// processing. +type Reader struct { + zip *zip.Reader +} + +// Open creates a Reader for the file at the given filename, or returns an error +// if the file doesn't seem to be a planfile. NOTE: Most commands that accept a +// plan file should use OpenWrapped instead, so they can support both local and +// cloud plan files. +func Open(filename string, enc encryption.PlanEncryption) (*Reader, error) { + raw, err := os.ReadFile(filename) + if err != nil { + return nil, err + } + + decrypted, diags := enc.DecryptPlan(raw) + if diags != nil { + return nil, diags + } + + r, err := zip.NewReader(bytes.NewReader(decrypted), int64(len(decrypted))) + if err != nil { + // Check to see if it's encrypted + if encrypted, _ := encryption.IsEncryptionPayload(decrypted); encrypted { + return nil, errUnusable(fmt.Errorf("the given plan file is encrypted and requires a valid encryption configuration to decrypt")) + } + + // To give a better error message, we'll sniff to see if this looks + // like our old plan format from versions prior to 0.12. + if b, sErr := os.ReadFile(filename); sErr == nil { + if bytes.HasPrefix(b, []byte("tfplan")) { + return nil, errUnusable(fmt.Errorf("the given plan file was created by an earlier version of OpenTofu, or an earlier version of Terraform; plan files cannot be shared between different OpenTofu or Terraform versions")) + } + } + return nil, err + } + + // Sniff to make sure this looks like a plan file, as opposed to any other + // random zip file the user might have around. + var planFile *zip.File + for _, file := range r.File { + if file.Name == tfplanFilename { + planFile = file + break + } + } + if planFile == nil { + return nil, fmt.Errorf("the given file is not a valid plan file") + } + + // For now, we'll just accept the presence of the tfplan file as enough, + // and wait to validate the version when the caller requests the plan + // itself. + + return &Reader{ + zip: r, + }, nil +} + +// ReadPlan reads the plan embedded in the plan file. +// +// Errors can be returned for various reasons, including if the plan file +// is not of an appropriate format version, if it was created by a different +// version of OpenTofu, if it is invalid, etc. +func (r *Reader) ReadPlan() (*plans.Plan, error) { + var planFile *zip.File + for _, file := range r.zip.File { + if file.Name == tfplanFilename { + planFile = file + break + } + } + if planFile == nil { + // This should never happen because we checked for this file during + // Open, but we'll check anyway to be safe. + return nil, errUnusable(fmt.Errorf("the plan file is invalid")) + } + + pr, err := planFile.Open() + if err != nil { + return nil, errUnusable(fmt.Errorf("failed to retrieve plan from plan file: %w", err)) + } + + // There's a slight mismatch in how plans.Plan is modeled vs. how + // the underlying plan file format works, because the "tfplan" embedded + // file contains only some top-level metadata and the planned changes, + // and not the previous run or prior states. Therefore we need to + // build this up in multiple steps. + // This is some technical debt because historically we considered the + // planned changes and prior state as totally separate, but later realized + // that it made sense for a plans.Plan to include the prior state directly + // so we can see what state the plan applies to. Hopefully later we'll + // clean this up some more so that we don't have two different ways to + // access the prior state (this and the ReadStateFile method). + ret, err := readTfplan(pr) + if err != nil { + return nil, errUnusable(err) + } + + prevRunStateFile, err := r.ReadPrevStateFile() + if err != nil { + return nil, errUnusable(fmt.Errorf("failed to read previous run state from plan file: %w", err)) + } + priorStateFile, err := r.ReadStateFile() + if err != nil { + return nil, errUnusable(fmt.Errorf("failed to read prior state from plan file: %w", err)) + } + + ret.PrevRunState = prevRunStateFile.State + ret.PriorState = priorStateFile.State + + return ret, nil +} + +// ReadStateFile reads the state file embedded in the plan file, which +// represents the "PriorState" as defined in plans.Plan. +// +// If the plan file contains no embedded state file, the returned error is +// statefile.ErrNoState. +func (r *Reader) ReadStateFile() (*statefile.File, error) { + for _, file := range r.zip.File { + if file.Name == tfstateFilename { + r, err := file.Open() + if err != nil { + return nil, errUnusable(fmt.Errorf("failed to extract state from plan file: %w", err)) + } + return statefile.Read(r, encryption.StateEncryptionDisabled()) + } + } + return nil, errUnusable(statefile.ErrNoState) +} + +// ReadPrevStateFile reads the previous state file embedded in the plan file, which +// represents the "PrevRunState" as defined in plans.Plan. +// +// If the plan file contains no embedded previous state file, the returned error is +// statefile.ErrNoState. +func (r *Reader) ReadPrevStateFile() (*statefile.File, error) { + for _, file := range r.zip.File { + if file.Name == tfstatePreviousFilename { + r, err := file.Open() + if err != nil { + return nil, errUnusable(fmt.Errorf("failed to extract previous state from plan file: %w", err)) + } + return statefile.Read(r, encryption.StateEncryptionDisabled()) + } + } + return nil, errUnusable(statefile.ErrNoState) +} + +// ReadConfigSnapshot reads the configuration snapshot embedded in the plan +// file. +// +// This is a lower-level alternative to ReadConfig that just extracts the +// source files, without attempting to parse them. +func (r *Reader) ReadConfigSnapshot() (*configload.Snapshot, error) { + return readConfigSnapshot(r.zip) +} + +// ReadConfig reads the configuration embedded in the plan file. +// +// Internally this function delegates to the configs/configload package to +// parse the embedded configuration and so it returns diagnostics (rather than +// a native Go error as with other methods on Reader). +func (r *Reader) ReadConfig(rootCall configs.StaticModuleCall) (*configs.Config, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + snap, err := r.ReadConfigSnapshot() + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to read configuration from plan file", + fmt.Sprintf("The configuration file snapshot in the plan file could not be read: %s.", err), + )) + return nil, diags + } + + loader := configload.NewLoaderFromSnapshot(snap) + rootDir := snap.Modules[""].Dir // Root module base directory + config, configDiags := loader.LoadConfig(rootDir, rootCall) + diags = diags.Append(configDiags) + + return config, diags +} + +// ReadDependencyLocks reads the dependency lock information embedded in +// the plan file. +// +// Some test codepaths create plan files without dependency lock information, +// but all main codepaths should populate this. If reading a file without +// the dependency information, this will return error diagnostics. +func (r *Reader) ReadDependencyLocks() (*depsfile.Locks, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + for _, file := range r.zip.File { + if file.Name == dependencyLocksFilename { + r, err := file.Open() + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to read dependency locks from plan file", + fmt.Sprintf("Couldn't read the dependency lock information embedded in the plan file: %s.", err), + )) + return nil, diags + } + src, err := io.ReadAll(r) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to read dependency locks from plan file", + fmt.Sprintf("Couldn't read the dependency lock information embedded in the plan file: %s.", err), + )) + return nil, diags + } + locks, moreDiags := depsfile.LoadLocksFromBytes(src, "") + diags = diags.Append(moreDiags) + return locks, diags + } + } + + // If we fall out here then this is a file without dependency information. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Saved plan has no dependency lock information", + "The specified saved plan file does not include any dependency lock information. This is a bug in the previous run of OpenTofu that created this file.", + )) + return nil, diags +} diff --git a/pkg/plans/planfile/testdata/cloudplan.json b/pkg/plans/planfile/testdata/cloudplan.json new file mode 100644 index 00000000000..0a1c73302a2 --- /dev/null +++ b/pkg/plans/planfile/testdata/cloudplan.json @@ -0,0 +1,5 @@ +{ + "remote_plan_format": 1, + "run_id": "run-GXfuHMkbyHccAGUg", + "hostname": "app.terraform.io" +} diff --git a/pkg/plans/planfile/testdata/test-config/.terraform/modules/child_a/child_a.tf b/pkg/plans/planfile/testdata/test-config/.terraform/modules/child_a/child_a.tf new file mode 100644 index 00000000000..2f4d0f1a0b8 --- /dev/null +++ b/pkg/plans/planfile/testdata/test-config/.terraform/modules/child_a/child_a.tf @@ -0,0 +1,4 @@ + +module "child_c" { + source = "./child_c" +} diff --git a/pkg/plans/planfile/testdata/test-config/.terraform/modules/child_a/child_c/child_c.tf b/pkg/plans/planfile/testdata/test-config/.terraform/modules/child_a/child_c/child_c.tf new file mode 100644 index 00000000000..785d98d98ac --- /dev/null +++ b/pkg/plans/planfile/testdata/test-config/.terraform/modules/child_a/child_c/child_c.tf @@ -0,0 +1,4 @@ + +output "hello" { + value = "Hello from child_c" +} diff --git a/pkg/plans/planfile/testdata/test-config/.terraform/modules/child_b.child_d/child_d.tf b/pkg/plans/planfile/testdata/test-config/.terraform/modules/child_b.child_d/child_d.tf new file mode 100644 index 00000000000..145576a365e --- /dev/null +++ b/pkg/plans/planfile/testdata/test-config/.terraform/modules/child_b.child_d/child_d.tf @@ -0,0 +1,4 @@ + +output "hello" { + value = "Hello from child_d" +} diff --git a/pkg/plans/planfile/testdata/test-config/.terraform/modules/child_b/child_b.tf b/pkg/plans/planfile/testdata/test-config/.terraform/modules/child_b/child_b.tf new file mode 100644 index 00000000000..4a1b247d39c --- /dev/null +++ b/pkg/plans/planfile/testdata/test-config/.terraform/modules/child_b/child_b.tf @@ -0,0 +1,5 @@ + +module "child_d" { + source = "example.com/foo/bar_d/baz" + # Intentionally no version here +} diff --git a/pkg/plans/planfile/testdata/test-config/.terraform/modules/modules.json b/pkg/plans/planfile/testdata/test-config/.terraform/modules/modules.json new file mode 100644 index 00000000000..ba691877ff1 --- /dev/null +++ b/pkg/plans/planfile/testdata/test-config/.terraform/modules/modules.json @@ -0,0 +1,32 @@ +{ + "Modules": [ + { + "Key": "", + "Source": "", + "Dir": "testdata/test-config" + }, + { + "Key": "child_a", + "Source": "example.com/foo/bar_a/baz", + "Version": "1.0.1", + "Dir": "testdata/test-config/.terraform/modules/child_a" + }, + { + "Key": "child_b", + "Source": "example.com/foo/bar_b/baz", + "Version": "1.0.0", + "Dir": "testdata/test-config/.terraform/modules/child_b" + }, + { + "Key": "child_a.child_c", + "Source": "./child_c", + "Dir": "testdata/test-config/.terraform/modules/child_a/child_c" + }, + { + "Key": "child_b.child_d", + "Source": "example.com/foo/bar_d/baz", + "Version": "1.2.0", + "Dir": "testdata/test-config/.terraform/modules/child_b.child_d" + } + ] +} diff --git a/pkg/plans/planfile/testdata/test-config/root.tf b/pkg/plans/planfile/testdata/test-config/root.tf new file mode 100644 index 00000000000..8a4473942da --- /dev/null +++ b/pkg/plans/planfile/testdata/test-config/root.tf @@ -0,0 +1,10 @@ + +module "child_a" { + source = "example.com/foo/bar_a/baz" + version = ">= 1.0.0" +} + +module "child_b" { + source = "example.com/foo/bar_b/baz" + version = ">= 1.0.0" +} diff --git a/pkg/plans/planfile/tfplan.go b/pkg/plans/planfile/tfplan.go new file mode 100644 index 00000000000..4cf63c9a205 --- /dev/null +++ b/pkg/plans/planfile/tfplan.go @@ -0,0 +1,909 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package planfile + +import ( + "fmt" + "io" + "time" + + "github.com/zclconf/go-cty/cty" + "google.golang.org/protobuf/proto" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/checks" + "github.com/kubegems/opentofu/pkg/lang/globalref" + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/plans/internal/planproto" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/version" +) + +const tfplanFormatVersion = 3 +const tfplanFilename = "tfplan" + +// --------------------------------------------------------------------------- +// This file deals with the internal structure of the "tfplan" sub-file within +// the plan file format. It's all private API, wrapped by methods defined +// elsewhere. This is the only file that should import the +// ../internal/planproto package, which contains the ugly stubs generated +// by the protobuf compiler. +// --------------------------------------------------------------------------- + +// readTfplan reads a protobuf-encoded description from the plan portion of +// a plan file, which is stored in a special file in the archive called +// "tfplan". +func readTfplan(r io.Reader) (*plans.Plan, error) { + src, err := io.ReadAll(r) + if err != nil { + return nil, err + } + + var rawPlan planproto.Plan + err = proto.Unmarshal(src, &rawPlan) + if err != nil { + return nil, fmt.Errorf("parse error: %w", err) + } + + if rawPlan.Version != tfplanFormatVersion { + return nil, fmt.Errorf("unsupported plan file format version %d; only version %d is supported", rawPlan.Version, tfplanFormatVersion) + } + + if rawPlan.TerraformVersion != version.String() { + return nil, fmt.Errorf("plan file was created by OpenTofu or Terraform %s, but this is %s; plan files cannot be transferred between different versions of OpenTofu / Terraform", rawPlan.TerraformVersion, version.String()) + } + + plan := &plans.Plan{ + VariableValues: map[string]plans.DynamicValue{}, + Changes: &plans.Changes{ + Outputs: []*plans.OutputChangeSrc{}, + Resources: []*plans.ResourceInstanceChangeSrc{}, + }, + DriftedResources: []*plans.ResourceInstanceChangeSrc{}, + Checks: &states.CheckResults{}, + } + + plan.Errored = rawPlan.Errored + + switch rawPlan.UiMode { + case planproto.Mode_NORMAL: + plan.UIMode = plans.NormalMode + case planproto.Mode_DESTROY: + plan.UIMode = plans.DestroyMode + case planproto.Mode_REFRESH_ONLY: + plan.UIMode = plans.RefreshOnlyMode + default: + return nil, fmt.Errorf("plan has invalid mode %s", rawPlan.UiMode) + } + + for _, rawOC := range rawPlan.OutputChanges { + name := rawOC.Name + change, err := changeFromTfplan(rawOC.Change) + if err != nil { + return nil, fmt.Errorf("invalid plan for output %q: %w", name, err) + } + + plan.Changes.Outputs = append(plan.Changes.Outputs, &plans.OutputChangeSrc{ + // All output values saved in the plan file are root module outputs, + // since we don't retain others. (They can be easily recomputed + // during apply). + Addr: addrs.OutputValue{Name: name}.Absolute(addrs.RootModuleInstance), + ChangeSrc: *change, + Sensitive: rawOC.Sensitive, + }) + } + + plan.Checks.ConfigResults = addrs.MakeMap[addrs.ConfigCheckable, *states.CheckResultAggregate]() + for _, rawCRs := range rawPlan.CheckResults { + aggr := &states.CheckResultAggregate{} + switch rawCRs.Status { + case planproto.CheckResults_UNKNOWN: + aggr.Status = checks.StatusUnknown + case planproto.CheckResults_PASS: + aggr.Status = checks.StatusPass + case planproto.CheckResults_FAIL: + aggr.Status = checks.StatusFail + case planproto.CheckResults_ERROR: + aggr.Status = checks.StatusError + default: + return nil, fmt.Errorf("aggregate check results for %s have unsupported status %#v", rawCRs.ConfigAddr, rawCRs.Status) + } + + var objKind addrs.CheckableKind + switch rawCRs.Kind { + case planproto.CheckResults_RESOURCE: + objKind = addrs.CheckableResource + case planproto.CheckResults_OUTPUT_VALUE: + objKind = addrs.CheckableOutputValue + case planproto.CheckResults_CHECK: + objKind = addrs.CheckableCheck + case planproto.CheckResults_INPUT_VARIABLE: + objKind = addrs.CheckableInputVariable + default: + return nil, fmt.Errorf("aggregate check results for %s have unsupported object kind %s", rawCRs.ConfigAddr, objKind) + } + + // Some trickiness here: we only have an address parser for + // addrs.Checkable and not for addrs.ConfigCheckable, but that's okay + // because once we have an addrs.Checkable we can always derive an + // addrs.ConfigCheckable from it, and a ConfigCheckable should always + // be the same syntax as a Checkable with no index information and + // thus we can reuse the same parser for both here. + configAddrProxy, diags := addrs.ParseCheckableStr(objKind, rawCRs.ConfigAddr) + if diags.HasErrors() { + return nil, diags.Err() + } + configAddr := configAddrProxy.ConfigCheckable() + if configAddr.String() != configAddrProxy.String() { + // This is how we catch if the config address included index + // information that would be allowed in a Checkable but not + // in a ConfigCheckable. + return nil, fmt.Errorf("invalid checkable config address %s", rawCRs.ConfigAddr) + } + + aggr.ObjectResults = addrs.MakeMap[addrs.Checkable, *states.CheckResultObject]() + for _, rawCR := range rawCRs.Objects { + objectAddr, diags := addrs.ParseCheckableStr(objKind, rawCR.ObjectAddr) + if diags.HasErrors() { + return nil, diags.Err() + } + if !addrs.Equivalent(objectAddr.ConfigCheckable(), configAddr) { + return nil, fmt.Errorf("checkable object %s should not be grouped under %s", objectAddr, configAddr) + } + + obj := &states.CheckResultObject{ + FailureMessages: rawCR.FailureMessages, + } + switch rawCR.Status { + case planproto.CheckResults_UNKNOWN: + obj.Status = checks.StatusUnknown + case planproto.CheckResults_PASS: + obj.Status = checks.StatusPass + case planproto.CheckResults_FAIL: + obj.Status = checks.StatusFail + case planproto.CheckResults_ERROR: + obj.Status = checks.StatusError + default: + return nil, fmt.Errorf("object check results for %s has unsupported status %#v", rawCR.ObjectAddr, rawCR.Status) + } + + aggr.ObjectResults.Put(objectAddr, obj) + } + // If we ended up with no elements in the map then we'll just nil it, + // primarily just to make life easier for our round-trip tests. + if aggr.ObjectResults.Len() == 0 { + aggr.ObjectResults.Elems = nil + } + + plan.Checks.ConfigResults.Put(configAddr, aggr) + } + // If we ended up with no elements in the map then we'll just nil it, + // primarily just to make life easier for our round-trip tests. + if plan.Checks.ConfigResults.Len() == 0 { + plan.Checks.ConfigResults.Elems = nil + } + + for _, rawRC := range rawPlan.ResourceChanges { + change, err := resourceChangeFromTfplan(rawRC) + if err != nil { + // errors from resourceChangeFromTfplan already include context + return nil, err + } + + plan.Changes.Resources = append(plan.Changes.Resources, change) + } + + for _, rawRC := range rawPlan.ResourceDrift { + change, err := resourceChangeFromTfplan(rawRC) + if err != nil { + // errors from resourceChangeFromTfplan already include context + return nil, err + } + + plan.DriftedResources = append(plan.DriftedResources, change) + } + + for _, rawRA := range rawPlan.RelevantAttributes { + ra, err := resourceAttrFromTfplan(rawRA) + if err != nil { + return nil, err + } + plan.RelevantAttributes = append(plan.RelevantAttributes, ra) + } + + for _, rawTargetAddr := range rawPlan.TargetAddrs { + target, diags := addrs.ParseTargetStr(rawTargetAddr) + if diags.HasErrors() { + return nil, fmt.Errorf("plan contains invalid target address %q: %w", target, diags.Err()) + } + plan.TargetAddrs = append(plan.TargetAddrs, target.Subject) + } + + for _, rawReplaceAddr := range rawPlan.ForceReplaceAddrs { + addr, diags := addrs.ParseAbsResourceInstanceStr(rawReplaceAddr) + if diags.HasErrors() { + return nil, fmt.Errorf("plan contains invalid force-replace address %q: %w", addr, diags.Err()) + } + plan.ForceReplaceAddrs = append(plan.ForceReplaceAddrs, addr) + } + + for name, rawVal := range rawPlan.Variables { + val, err := valueFromTfplan(rawVal) + if err != nil { + return nil, fmt.Errorf("invalid value for input variable %q: %w", name, err) + } + plan.VariableValues[name] = val + } + + if rawBackend := rawPlan.Backend; rawBackend == nil { + return nil, fmt.Errorf("plan file has no backend settings; backend settings are required") + } else { + config, err := valueFromTfplan(rawBackend.Config) + if err != nil { + return nil, fmt.Errorf("plan file has invalid backend configuration: %w", err) + } + plan.Backend = plans.Backend{ + Type: rawBackend.Type, + Config: config, + Workspace: rawBackend.Workspace, + } + } + + if plan.Timestamp, err = time.Parse(time.RFC3339, rawPlan.Timestamp); err != nil { + return nil, fmt.Errorf("invalid value for timestamp %s: %w", rawPlan.Timestamp, err) + } + + return plan, nil +} + +func resourceChangeFromTfplan(rawChange *planproto.ResourceInstanceChange) (*plans.ResourceInstanceChangeSrc, error) { + if rawChange == nil { + // Should never happen in practice, since protobuf can't represent + // a nil value in a list. + return nil, fmt.Errorf("resource change object is absent") + } + + ret := &plans.ResourceInstanceChangeSrc{} + + if rawChange.Addr == "" { + // If "Addr" isn't populated then seems likely that this is a plan + // file created by an earlier version of OpenTofu, which had the + // same information spread over various other fields: + // ModulePath, Mode, Name, Type, and InstanceKey. + return nil, fmt.Errorf("no instance address for resource instance change; perhaps this plan was created by a different version of OpenTofu or a different version of Terraform?") + } + + instAddr, diags := addrs.ParseAbsResourceInstanceStr(rawChange.Addr) + if diags.HasErrors() { + return nil, fmt.Errorf("invalid resource instance address %q: %w", rawChange.Addr, diags.Err()) + } + prevRunAddr := instAddr + if rawChange.PrevRunAddr != "" { + prevRunAddr, diags = addrs.ParseAbsResourceInstanceStr(rawChange.PrevRunAddr) + if diags.HasErrors() { + return nil, fmt.Errorf("invalid resource instance previous run address %q: %w", rawChange.PrevRunAddr, diags.Err()) + } + } + + providerAddr, diags := addrs.ParseAbsProviderConfigStr(rawChange.Provider) + if diags.HasErrors() { + return nil, diags.Err() + } + ret.ProviderAddr = providerAddr + + ret.Addr = instAddr + ret.PrevRunAddr = prevRunAddr + + if rawChange.DeposedKey != "" { + if len(rawChange.DeposedKey) != 8 { + return nil, fmt.Errorf("deposed object for %s has invalid deposed key %q", ret.Addr, rawChange.DeposedKey) + } + ret.DeposedKey = states.DeposedKey(rawChange.DeposedKey) + } + + ret.RequiredReplace = cty.NewPathSet() + for _, p := range rawChange.RequiredReplace { + path, err := pathFromTfplan(p) + if err != nil { + return nil, fmt.Errorf("invalid path in required replace: %w", err) + } + ret.RequiredReplace.Add(path) + } + + change, err := changeFromTfplan(rawChange.Change) + if err != nil { + return nil, fmt.Errorf("invalid plan for resource %s: %w", ret.Addr, err) + } + + ret.ChangeSrc = *change + + switch rawChange.ActionReason { + case planproto.ResourceInstanceActionReason_NONE: + ret.ActionReason = plans.ResourceInstanceChangeNoReason + case planproto.ResourceInstanceActionReason_REPLACE_BECAUSE_CANNOT_UPDATE: + ret.ActionReason = plans.ResourceInstanceReplaceBecauseCannotUpdate + case planproto.ResourceInstanceActionReason_REPLACE_BECAUSE_TAINTED: + ret.ActionReason = plans.ResourceInstanceReplaceBecauseTainted + case planproto.ResourceInstanceActionReason_REPLACE_BY_REQUEST: + ret.ActionReason = plans.ResourceInstanceReplaceByRequest + case planproto.ResourceInstanceActionReason_REPLACE_BY_TRIGGERS: + ret.ActionReason = plans.ResourceInstanceReplaceByTriggers + case planproto.ResourceInstanceActionReason_DELETE_BECAUSE_NO_RESOURCE_CONFIG: + ret.ActionReason = plans.ResourceInstanceDeleteBecauseNoResourceConfig + case planproto.ResourceInstanceActionReason_DELETE_BECAUSE_WRONG_REPETITION: + ret.ActionReason = plans.ResourceInstanceDeleteBecauseWrongRepetition + case planproto.ResourceInstanceActionReason_DELETE_BECAUSE_COUNT_INDEX: + ret.ActionReason = plans.ResourceInstanceDeleteBecauseCountIndex + case planproto.ResourceInstanceActionReason_DELETE_BECAUSE_EACH_KEY: + ret.ActionReason = plans.ResourceInstanceDeleteBecauseEachKey + case planproto.ResourceInstanceActionReason_DELETE_BECAUSE_NO_MODULE: + ret.ActionReason = plans.ResourceInstanceDeleteBecauseNoModule + case planproto.ResourceInstanceActionReason_READ_BECAUSE_CONFIG_UNKNOWN: + ret.ActionReason = plans.ResourceInstanceReadBecauseConfigUnknown + case planproto.ResourceInstanceActionReason_READ_BECAUSE_DEPENDENCY_PENDING: + ret.ActionReason = plans.ResourceInstanceReadBecauseDependencyPending + case planproto.ResourceInstanceActionReason_READ_BECAUSE_CHECK_NESTED: + ret.ActionReason = plans.ResourceInstanceReadBecauseCheckNested + case planproto.ResourceInstanceActionReason_DELETE_BECAUSE_NO_MOVE_TARGET: + ret.ActionReason = plans.ResourceInstanceDeleteBecauseNoMoveTarget + default: + return nil, fmt.Errorf("resource has invalid action reason %s", rawChange.ActionReason) + } + + if len(rawChange.Private) != 0 { + ret.Private = rawChange.Private + } + + return ret, nil +} + +func changeFromTfplan(rawChange *planproto.Change) (*plans.ChangeSrc, error) { + if rawChange == nil { + return nil, fmt.Errorf("change object is absent") + } + + ret := &plans.ChangeSrc{} + + // -1 indicates that there is no index. We'll customize these below + // depending on the change action, and then decode. + beforeIdx, afterIdx := -1, -1 + + switch rawChange.Action { + case planproto.Action_NOOP: + ret.Action = plans.NoOp + beforeIdx = 0 + afterIdx = 0 + case planproto.Action_CREATE: + ret.Action = plans.Create + afterIdx = 0 + case planproto.Action_READ: + ret.Action = plans.Read + beforeIdx = 0 + afterIdx = 1 + case planproto.Action_UPDATE: + ret.Action = plans.Update + beforeIdx = 0 + afterIdx = 1 + case planproto.Action_DELETE: + ret.Action = plans.Delete + beforeIdx = 0 + case planproto.Action_FORGET: + ret.Action = plans.Forget + beforeIdx = 0 + case planproto.Action_CREATE_THEN_DELETE: + ret.Action = plans.CreateThenDelete + beforeIdx = 0 + afterIdx = 1 + case planproto.Action_DELETE_THEN_CREATE: + ret.Action = plans.DeleteThenCreate + beforeIdx = 0 + afterIdx = 1 + default: + return nil, fmt.Errorf("invalid change action %s", rawChange.Action) + } + + if beforeIdx != -1 { + if l := len(rawChange.Values); l <= beforeIdx { + return nil, fmt.Errorf("incorrect number of values (%d) for %s change", l, rawChange.Action) + } + var err error + ret.Before, err = valueFromTfplan(rawChange.Values[beforeIdx]) + if err != nil { + return nil, fmt.Errorf("invalid \"before\" value: %w", err) + } + if ret.Before == nil { + return nil, fmt.Errorf("missing \"before\" value: %w", err) + } + } + if afterIdx != -1 { + if l := len(rawChange.Values); l <= afterIdx { + return nil, fmt.Errorf("incorrect number of values (%d) for %s change", l, rawChange.Action) + } + var err error + ret.After, err = valueFromTfplan(rawChange.Values[afterIdx]) + if err != nil { + return nil, fmt.Errorf("invalid \"after\" value: %w", err) + } + if ret.After == nil { + return nil, fmt.Errorf("missing \"after\" value: %w", err) + } + } + + if rawChange.Importing != nil { + ret.Importing = &plans.ImportingSrc{ + ID: rawChange.Importing.Id, + } + } + ret.GeneratedConfig = rawChange.GeneratedConfig + + sensitive := cty.NewValueMarks(marks.Sensitive) + beforeValMarks, err := pathValueMarksFromTfplan(rawChange.BeforeSensitivePaths, sensitive) + if err != nil { + return nil, fmt.Errorf("failed to decode before sensitive paths: %w", err) + } + afterValMarks, err := pathValueMarksFromTfplan(rawChange.AfterSensitivePaths, sensitive) + if err != nil { + return nil, fmt.Errorf("failed to decode after sensitive paths: %w", err) + } + if len(beforeValMarks) > 0 { + ret.BeforeValMarks = beforeValMarks + } + if len(afterValMarks) > 0 { + ret.AfterValMarks = afterValMarks + } + + return ret, nil +} + +func valueFromTfplan(rawV *planproto.DynamicValue) (plans.DynamicValue, error) { + if len(rawV.Msgpack) == 0 { // len(0) because that's the default value for a "bytes" in protobuf + return nil, fmt.Errorf("dynamic value does not have msgpack serialization") + } + + return plans.DynamicValue(rawV.Msgpack), nil +} + +// writeTfplan serializes the given plan into the protobuf-based format used +// for the "tfplan" portion of a plan file. +func writeTfplan(plan *plans.Plan, w io.Writer) error { + if plan == nil { + return fmt.Errorf("cannot write plan file for nil plan") + } + if plan.Changes == nil { + return fmt.Errorf("cannot write plan file with nil changeset") + } + + rawPlan := &planproto.Plan{ + Version: tfplanFormatVersion, + TerraformVersion: version.String(), + + Variables: map[string]*planproto.DynamicValue{}, + OutputChanges: []*planproto.OutputChange{}, + CheckResults: []*planproto.CheckResults{}, + ResourceChanges: []*planproto.ResourceInstanceChange{}, + ResourceDrift: []*planproto.ResourceInstanceChange{}, + } + + rawPlan.Errored = plan.Errored + + switch plan.UIMode { + case plans.NormalMode: + rawPlan.UiMode = planproto.Mode_NORMAL + case plans.DestroyMode: + rawPlan.UiMode = planproto.Mode_DESTROY + case plans.RefreshOnlyMode: + rawPlan.UiMode = planproto.Mode_REFRESH_ONLY + default: + return fmt.Errorf("plan has unsupported mode %s", plan.UIMode) + } + + for _, oc := range plan.Changes.Outputs { + // When serializing a plan we only retain the root outputs, since + // changes to these are externally-visible side effects (e.g. via + // terraform_remote_state). + if !oc.Addr.Module.IsRoot() { + continue + } + + name := oc.Addr.OutputValue.Name + + // Writing outputs as cty.DynamicPseudoType forces the stored values + // to also contain dynamic type information, so we can recover the + // original type when we read the values back in readTFPlan. + protoChange, err := changeToTfplan(&oc.ChangeSrc) + if err != nil { + return fmt.Errorf("cannot write output value %q: %w", name, err) + } + + rawPlan.OutputChanges = append(rawPlan.OutputChanges, &planproto.OutputChange{ + Name: name, + Change: protoChange, + Sensitive: oc.Sensitive, + }) + } + + if plan.Checks != nil { + for _, configElem := range plan.Checks.ConfigResults.Elems { + crs := configElem.Value + pcrs := &planproto.CheckResults{ + ConfigAddr: configElem.Key.String(), + } + switch crs.Status { + case checks.StatusUnknown: + pcrs.Status = planproto.CheckResults_UNKNOWN + case checks.StatusPass: + pcrs.Status = planproto.CheckResults_PASS + case checks.StatusFail: + pcrs.Status = planproto.CheckResults_FAIL + case checks.StatusError: + pcrs.Status = planproto.CheckResults_ERROR + default: + return fmt.Errorf("checkable configuration %s has unsupported aggregate status %s", configElem.Key, crs.Status) + } + switch kind := configElem.Key.CheckableKind(); kind { + case addrs.CheckableResource: + pcrs.Kind = planproto.CheckResults_RESOURCE + case addrs.CheckableOutputValue: + pcrs.Kind = planproto.CheckResults_OUTPUT_VALUE + case addrs.CheckableCheck: + pcrs.Kind = planproto.CheckResults_CHECK + case addrs.CheckableInputVariable: + pcrs.Kind = planproto.CheckResults_INPUT_VARIABLE + default: + return fmt.Errorf("checkable configuration %s has unsupported object type kind %s", configElem.Key, kind) + } + + for _, objectElem := range configElem.Value.ObjectResults.Elems { + cr := objectElem.Value + pcr := &planproto.CheckResults_ObjectResult{ + ObjectAddr: objectElem.Key.String(), + FailureMessages: objectElem.Value.FailureMessages, + } + switch cr.Status { + case checks.StatusUnknown: + pcr.Status = planproto.CheckResults_UNKNOWN + case checks.StatusPass: + pcr.Status = planproto.CheckResults_PASS + case checks.StatusFail: + pcr.Status = planproto.CheckResults_FAIL + case checks.StatusError: + pcr.Status = planproto.CheckResults_ERROR + default: + return fmt.Errorf("checkable object %s has unsupported status %s", objectElem.Key, crs.Status) + } + pcrs.Objects = append(pcrs.Objects, pcr) + } + + rawPlan.CheckResults = append(rawPlan.CheckResults, pcrs) + } + } + + for _, rc := range plan.Changes.Resources { + rawRC, err := resourceChangeToTfplan(rc) + if err != nil { + return err + } + rawPlan.ResourceChanges = append(rawPlan.ResourceChanges, rawRC) + } + + for _, rc := range plan.DriftedResources { + rawRC, err := resourceChangeToTfplan(rc) + if err != nil { + return err + } + rawPlan.ResourceDrift = append(rawPlan.ResourceDrift, rawRC) + } + + for _, ra := range plan.RelevantAttributes { + rawRA, err := resourceAttrToTfplan(ra) + if err != nil { + return err + } + rawPlan.RelevantAttributes = append(rawPlan.RelevantAttributes, rawRA) + } + + for _, targetAddr := range plan.TargetAddrs { + rawPlan.TargetAddrs = append(rawPlan.TargetAddrs, targetAddr.String()) + } + + for _, replaceAddr := range plan.ForceReplaceAddrs { + rawPlan.ForceReplaceAddrs = append(rawPlan.ForceReplaceAddrs, replaceAddr.String()) + } + + for name, val := range plan.VariableValues { + rawPlan.Variables[name] = valueToTfplan(val) + } + + if plan.Backend.Type == "" || plan.Backend.Config == nil { + // This suggests a bug in the code that created the plan, since it + // ought to always have a backend populated, even if it's the default + // "local" backend with a local state file. + return fmt.Errorf("plan does not have a backend configuration") + } + + rawPlan.Backend = &planproto.Backend{ + Type: plan.Backend.Type, + Config: valueToTfplan(plan.Backend.Config), + Workspace: plan.Backend.Workspace, + } + + rawPlan.Timestamp = plan.Timestamp.Format(time.RFC3339) + + src, err := proto.Marshal(rawPlan) + if err != nil { + return fmt.Errorf("serialization error: %w", err) + } + + _, err = w.Write(src) + if err != nil { + return fmt.Errorf("failed to write plan to plan file: %w", err) + } + + return nil +} + +func resourceAttrToTfplan(ra globalref.ResourceAttr) (*planproto.PlanResourceAttr, error) { + res := &planproto.PlanResourceAttr{} + + res.Resource = ra.Resource.String() + attr, err := pathToTfplan(ra.Attr) + if err != nil { + return res, err + } + res.Attr = attr + return res, nil +} + +func resourceAttrFromTfplan(ra *planproto.PlanResourceAttr) (globalref.ResourceAttr, error) { + var res globalref.ResourceAttr + if ra.Resource == "" { + return res, fmt.Errorf("missing resource address from relevant attribute") + } + + instAddr, diags := addrs.ParseAbsResourceInstanceStr(ra.Resource) + if diags.HasErrors() { + return res, fmt.Errorf("invalid resource instance address %q in relevant attributes: %w", ra.Resource, diags.Err()) + } + + res.Resource = instAddr + path, err := pathFromTfplan(ra.Attr) + if err != nil { + return res, fmt.Errorf("invalid path in %q relevant attribute: %w", res.Resource, err) + } + + res.Attr = path + return res, nil +} + +func resourceChangeToTfplan(change *plans.ResourceInstanceChangeSrc) (*planproto.ResourceInstanceChange, error) { + ret := &planproto.ResourceInstanceChange{} + + if change.PrevRunAddr.Resource.Resource.Type == "" { + // Suggests that an old caller wasn't yet updated to populate this + // properly. All code that generates plans should populate this field, + // even if it's just to write in the same value as in change.Addr. + change.PrevRunAddr = change.Addr + } + + ret.Addr = change.Addr.String() + ret.PrevRunAddr = change.PrevRunAddr.String() + if ret.PrevRunAddr == ret.Addr { + // In the on-disk format we leave PrevRunAddr unpopulated in the common + // case where it's the same as Addr, and then fill it back in again on + // read. + ret.PrevRunAddr = "" + } + + ret.DeposedKey = string(change.DeposedKey) + ret.Provider = change.ProviderAddr.String() + + requiredReplace := change.RequiredReplace.List() + ret.RequiredReplace = make([]*planproto.Path, 0, len(requiredReplace)) + for _, p := range requiredReplace { + path, err := pathToTfplan(p) + if err != nil { + return nil, fmt.Errorf("invalid path in required replace: %w", err) + } + ret.RequiredReplace = append(ret.RequiredReplace, path) + } + + valChange, err := changeToTfplan(&change.ChangeSrc) + if err != nil { + return nil, fmt.Errorf("failed to serialize resource %s change: %w", change.Addr, err) + } + ret.Change = valChange + + switch change.ActionReason { + case plans.ResourceInstanceChangeNoReason: + ret.ActionReason = planproto.ResourceInstanceActionReason_NONE + case plans.ResourceInstanceReplaceBecauseCannotUpdate: + ret.ActionReason = planproto.ResourceInstanceActionReason_REPLACE_BECAUSE_CANNOT_UPDATE + case plans.ResourceInstanceReplaceBecauseTainted: + ret.ActionReason = planproto.ResourceInstanceActionReason_REPLACE_BECAUSE_TAINTED + case plans.ResourceInstanceReplaceByRequest: + ret.ActionReason = planproto.ResourceInstanceActionReason_REPLACE_BY_REQUEST + case plans.ResourceInstanceReplaceByTriggers: + ret.ActionReason = planproto.ResourceInstanceActionReason_REPLACE_BY_TRIGGERS + case plans.ResourceInstanceDeleteBecauseNoResourceConfig: + ret.ActionReason = planproto.ResourceInstanceActionReason_DELETE_BECAUSE_NO_RESOURCE_CONFIG + case plans.ResourceInstanceDeleteBecauseWrongRepetition: + ret.ActionReason = planproto.ResourceInstanceActionReason_DELETE_BECAUSE_WRONG_REPETITION + case plans.ResourceInstanceDeleteBecauseCountIndex: + ret.ActionReason = planproto.ResourceInstanceActionReason_DELETE_BECAUSE_COUNT_INDEX + case plans.ResourceInstanceDeleteBecauseEachKey: + ret.ActionReason = planproto.ResourceInstanceActionReason_DELETE_BECAUSE_EACH_KEY + case plans.ResourceInstanceDeleteBecauseNoModule: + ret.ActionReason = planproto.ResourceInstanceActionReason_DELETE_BECAUSE_NO_MODULE + case plans.ResourceInstanceReadBecauseConfigUnknown: + ret.ActionReason = planproto.ResourceInstanceActionReason_READ_BECAUSE_CONFIG_UNKNOWN + case plans.ResourceInstanceReadBecauseDependencyPending: + ret.ActionReason = planproto.ResourceInstanceActionReason_READ_BECAUSE_DEPENDENCY_PENDING + case plans.ResourceInstanceReadBecauseCheckNested: + ret.ActionReason = planproto.ResourceInstanceActionReason_READ_BECAUSE_CHECK_NESTED + case plans.ResourceInstanceDeleteBecauseNoMoveTarget: + ret.ActionReason = planproto.ResourceInstanceActionReason_DELETE_BECAUSE_NO_MOVE_TARGET + default: + return nil, fmt.Errorf("resource %s has unsupported action reason %s", change.Addr, change.ActionReason) + } + + if len(change.Private) > 0 { + ret.Private = change.Private + } + + return ret, nil +} + +func changeToTfplan(change *plans.ChangeSrc) (*planproto.Change, error) { + ret := &planproto.Change{} + + before := valueToTfplan(change.Before) + after := valueToTfplan(change.After) + + beforeSensitivePaths, err := pathValueMarksToTfplan(change.BeforeValMarks) + if err != nil { + return nil, err + } + afterSensitivePaths, err := pathValueMarksToTfplan(change.AfterValMarks) + if err != nil { + return nil, err + } + ret.BeforeSensitivePaths = beforeSensitivePaths + ret.AfterSensitivePaths = afterSensitivePaths + + if change.Importing != nil { + ret.Importing = &planproto.Importing{ + Id: change.Importing.ID, + } + + } + ret.GeneratedConfig = change.GeneratedConfig + + switch change.Action { + case plans.NoOp: + ret.Action = planproto.Action_NOOP + ret.Values = []*planproto.DynamicValue{before} // before and after should be identical + case plans.Create: + ret.Action = planproto.Action_CREATE + ret.Values = []*planproto.DynamicValue{after} + case plans.Read: + ret.Action = planproto.Action_READ + ret.Values = []*planproto.DynamicValue{before, after} + case plans.Update: + ret.Action = planproto.Action_UPDATE + ret.Values = []*planproto.DynamicValue{before, after} + case plans.Delete: + ret.Action = planproto.Action_DELETE + ret.Values = []*planproto.DynamicValue{before} + case plans.Forget: + ret.Action = planproto.Action_FORGET + ret.Values = []*planproto.DynamicValue{before} + case plans.DeleteThenCreate: + ret.Action = planproto.Action_DELETE_THEN_CREATE + ret.Values = []*planproto.DynamicValue{before, after} + case plans.CreateThenDelete: + ret.Action = planproto.Action_CREATE_THEN_DELETE + ret.Values = []*planproto.DynamicValue{before, after} + default: + return nil, fmt.Errorf("invalid change action %s", change.Action) + } + + return ret, nil +} + +func valueToTfplan(val plans.DynamicValue) *planproto.DynamicValue { + if val == nil { + // protobuf can't represent nil, so we'll represent it as a + // DynamicValue that has no serializations at all. + return &planproto.DynamicValue{} + } + return &planproto.DynamicValue{ + Msgpack: []byte(val), + } +} + +func pathValueMarksFromTfplan(paths []*planproto.Path, marks cty.ValueMarks) ([]cty.PathValueMarks, error) { + ret := make([]cty.PathValueMarks, 0, len(paths)) + for _, p := range paths { + path, err := pathFromTfplan(p) + if err != nil { + return nil, err + } + ret = append(ret, cty.PathValueMarks{ + Path: path, + Marks: marks, + }) + } + return ret, nil +} + +func pathValueMarksToTfplan(pvm []cty.PathValueMarks) ([]*planproto.Path, error) { + ret := make([]*planproto.Path, 0, len(pvm)) + for _, p := range pvm { + path, err := pathToTfplan(p.Path) + if err != nil { + return nil, err + } + ret = append(ret, path) + } + return ret, nil +} + +func pathFromTfplan(path *planproto.Path) (cty.Path, error) { + ret := make([]cty.PathStep, 0, len(path.Steps)) + for _, step := range path.Steps { + switch s := step.Selector.(type) { + case *planproto.Path_Step_ElementKey: + dynamicVal, err := valueFromTfplan(s.ElementKey) + if err != nil { + return nil, fmt.Errorf("error decoding path index step: %w", err) + } + ty, err := dynamicVal.ImpliedType() + if err != nil { + return nil, fmt.Errorf("error determining path index type: %w", err) + } + val, err := dynamicVal.Decode(ty) + if err != nil { + return nil, fmt.Errorf("error decoding path index value: %w", err) + } + ret = append(ret, cty.IndexStep{Key: val}) + case *planproto.Path_Step_AttributeName: + ret = append(ret, cty.GetAttrStep{Name: s.AttributeName}) + default: + return nil, fmt.Errorf("Unsupported path step %t", step.Selector) + } + } + return ret, nil +} + +func pathToTfplan(path cty.Path) (*planproto.Path, error) { + steps := make([]*planproto.Path_Step, 0, len(path)) + for _, step := range path { + switch s := step.(type) { + case cty.IndexStep: + value, err := plans.NewDynamicValue(s.Key, s.Key.Type()) + if err != nil { + return nil, fmt.Errorf("Error encoding path step: %w", err) + } + steps = append(steps, &planproto.Path_Step{ + Selector: &planproto.Path_Step_ElementKey{ + ElementKey: valueToTfplan(value), + }, + }) + case cty.GetAttrStep: + steps = append(steps, &planproto.Path_Step{ + Selector: &planproto.Path_Step_AttributeName{ + AttributeName: s.Name, + }, + }) + default: + return nil, fmt.Errorf("Unsupported path step %#v (%t)", step, step) + } + } + return &planproto.Path{ + Steps: steps, + }, nil +} diff --git a/pkg/plans/planfile/tfplan_test.go b/pkg/plans/planfile/tfplan_test.go new file mode 100644 index 00000000000..a0963204e45 --- /dev/null +++ b/pkg/plans/planfile/tfplan_test.go @@ -0,0 +1,444 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package planfile + +import ( + "bytes" + "testing" + + "github.com/go-test/deep" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/checks" + "github.com/kubegems/opentofu/pkg/lang/globalref" + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/states" +) + +func TestTFPlanRoundTrip(t *testing.T) { + objTy := cty.Object(map[string]cty.Type{ + "id": cty.String, + }) + + plan := &plans.Plan{ + VariableValues: map[string]plans.DynamicValue{ + "foo": mustNewDynamicValueStr("foo value"), + }, + Changes: &plans.Changes{ + Outputs: []*plans.OutputChangeSrc{ + { + Addr: addrs.OutputValue{Name: "bar"}.Absolute(addrs.RootModuleInstance), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Create, + After: mustDynamicOutputValue("bar value"), + }, + Sensitive: false, + }, + { + Addr: addrs.OutputValue{Name: "baz"}.Absolute(addrs.RootModuleInstance), + ChangeSrc: plans.ChangeSrc{ + Action: plans.NoOp, + Before: mustDynamicOutputValue("baz value"), + After: mustDynamicOutputValue("baz value"), + }, + Sensitive: false, + }, + { + Addr: addrs.OutputValue{Name: "secret"}.Absolute(addrs.RootModuleInstance), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Update, + Before: mustDynamicOutputValue("old secret value"), + After: mustDynamicOutputValue("new secret value"), + }, + Sensitive: true, + }, + }, + Resources: []*plans.ResourceInstanceChangeSrc{ + { + Addr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "woot", + }.Instance(addrs.IntKey(0)).Absolute(addrs.RootModuleInstance), + PrevRunAddr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "woot", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + ProviderAddr: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ChangeSrc: plans.ChangeSrc{ + Action: plans.DeleteThenCreate, + Before: mustNewDynamicValue(cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo-bar-baz"), + "boop": cty.ListVal([]cty.Value{ + cty.StringVal("beep"), + }), + }), objTy), + After: mustNewDynamicValue(cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "boop": cty.ListVal([]cty.Value{ + cty.StringVal("beep"), + cty.StringVal("honk"), + }), + }), objTy), + AfterValMarks: []cty.PathValueMarks{ + { + Path: cty.GetAttrPath("boop").IndexInt(1), + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + }, + RequiredReplace: cty.NewPathSet( + cty.GetAttrPath("boop"), + ), + ActionReason: plans.ResourceInstanceReplaceBecauseCannotUpdate, + }, + { + Addr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "woot", + }.Instance(addrs.IntKey(1)).Absolute(addrs.RootModuleInstance), + PrevRunAddr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "woot", + }.Instance(addrs.IntKey(1)).Absolute(addrs.RootModuleInstance), + DeposedKey: "foodface", + ProviderAddr: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ChangeSrc: plans.ChangeSrc{ + Action: plans.Delete, + Before: mustNewDynamicValue(cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("bar-baz-foo"), + }), objTy), + }, + }, + { + Addr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "forget", + }.Instance(addrs.IntKey(1)).Absolute(addrs.RootModuleInstance), + PrevRunAddr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "forget", + }.Instance(addrs.IntKey(1)).Absolute(addrs.RootModuleInstance), + ProviderAddr: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ChangeSrc: plans.ChangeSrc{ + Action: plans.Forget, + Before: mustNewDynamicValue(cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("bar-baz-forget"), + }), objTy), + }, + }, + { + Addr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "importing", + }.Instance(addrs.IntKey(1)).Absolute(addrs.RootModuleInstance), + PrevRunAddr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "importing", + }.Instance(addrs.IntKey(1)).Absolute(addrs.RootModuleInstance), + ProviderAddr: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ChangeSrc: plans.ChangeSrc{ + Action: plans.NoOp, + Before: mustNewDynamicValue(cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("testing"), + }), objTy), + After: mustNewDynamicValue(cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("testing"), + }), objTy), + Importing: &plans.ImportingSrc{ID: "testing"}, + GeneratedConfig: "resource \\\"test_thing\\\" \\\"importing\\\" {}", + }, + }, + }, + }, + DriftedResources: []*plans.ResourceInstanceChangeSrc{ + { + Addr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "woot", + }.Instance(addrs.IntKey(0)).Absolute(addrs.RootModuleInstance), + PrevRunAddr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "woot", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + ProviderAddr: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ChangeSrc: plans.ChangeSrc{ + Action: plans.DeleteThenCreate, + Before: mustNewDynamicValue(cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo-bar-baz"), + "boop": cty.ListVal([]cty.Value{ + cty.StringVal("beep"), + }), + }), objTy), + After: mustNewDynamicValue(cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "boop": cty.ListVal([]cty.Value{ + cty.StringVal("beep"), + cty.StringVal("bonk"), + }), + }), objTy), + AfterValMarks: []cty.PathValueMarks{ + { + Path: cty.GetAttrPath("boop").IndexInt(1), + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + }, + }, + }, + RelevantAttributes: []globalref.ResourceAttr{ + { + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "woot", + }.Instance(addrs.IntKey(0)).Absolute(addrs.RootModuleInstance), + Attr: cty.GetAttrPath("boop").Index(cty.NumberIntVal(1)), + }, + }, + Checks: &states.CheckResults{ + ConfigResults: addrs.MakeMap( + addrs.MakeMapElem[addrs.ConfigCheckable]( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "woot", + }.InModule(addrs.RootModule), + &states.CheckResultAggregate{ + Status: checks.StatusFail, + ObjectResults: addrs.MakeMap( + addrs.MakeMapElem[addrs.Checkable]( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "woot", + }.Instance(addrs.IntKey(0)).Absolute(addrs.RootModuleInstance), + &states.CheckResultObject{ + Status: checks.StatusFail, + FailureMessages: []string{"Oh no!"}, + }, + ), + ), + }, + ), + addrs.MakeMapElem[addrs.ConfigCheckable]( + addrs.Check{ + Name: "check", + }.InModule(addrs.RootModule), + &states.CheckResultAggregate{ + Status: checks.StatusFail, + ObjectResults: addrs.MakeMap( + addrs.MakeMapElem[addrs.Checkable]( + addrs.Check{ + Name: "check", + }.Absolute(addrs.RootModuleInstance), + &states.CheckResultObject{ + Status: checks.StatusFail, + FailureMessages: []string{"check failed"}, + }, + ), + ), + }, + ), + ), + }, + TargetAddrs: []addrs.Targetable{ + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "woot", + }.Absolute(addrs.RootModuleInstance), + }, + Backend: plans.Backend{ + Type: "local", + Config: mustNewDynamicValue( + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }), + cty.Object(map[string]cty.Type{ + "foo": cty.String, + }), + ), + Workspace: "default", + }, + } + + var buf bytes.Buffer + err := writeTfplan(plan, &buf) + if err != nil { + t.Fatal(err) + } + + newPlan, err := readTfplan(&buf) + if err != nil { + t.Fatal(err) + } + + { + oldDepth := deep.MaxDepth + oldCompare := deep.CompareUnexportedFields + deep.MaxDepth = 20 + deep.CompareUnexportedFields = true + defer func() { + deep.MaxDepth = oldDepth + deep.CompareUnexportedFields = oldCompare + }() + } + for _, problem := range deep.Equal(newPlan, plan) { + t.Error(problem) + } +} + +func mustDynamicOutputValue(val string) plans.DynamicValue { + ret, err := plans.NewDynamicValue(cty.StringVal(val), cty.DynamicPseudoType) + if err != nil { + panic(err) + } + return ret +} + +func mustNewDynamicValue(val cty.Value, ty cty.Type) plans.DynamicValue { + ret, err := plans.NewDynamicValue(val, ty) + if err != nil { + panic(err) + } + return ret +} + +func mustNewDynamicValueStr(val string) plans.DynamicValue { + realVal := cty.StringVal(val) + ret, err := plans.NewDynamicValue(realVal, cty.String) + if err != nil { + panic(err) + } + return ret +} + +// TestTFPlanRoundTripDestroy ensures that encoding and decoding null values for +// destroy doesn't leave us with any nil values. +func TestTFPlanRoundTripDestroy(t *testing.T) { + objTy := cty.Object(map[string]cty.Type{ + "id": cty.String, + }) + + plan := &plans.Plan{ + Changes: &plans.Changes{ + Outputs: []*plans.OutputChangeSrc{ + { + Addr: addrs.OutputValue{Name: "bar"}.Absolute(addrs.RootModuleInstance), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Delete, + Before: mustDynamicOutputValue("output"), + After: mustNewDynamicValue(cty.NullVal(cty.String), cty.String), + }, + }, + }, + Resources: []*plans.ResourceInstanceChangeSrc{ + { + Addr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "woot", + }.Instance(addrs.IntKey(0)).Absolute(addrs.RootModuleInstance), + PrevRunAddr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "woot", + }.Instance(addrs.IntKey(0)).Absolute(addrs.RootModuleInstance), + ProviderAddr: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ChangeSrc: plans.ChangeSrc{ + Action: plans.Delete, + Before: mustNewDynamicValue(cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo-bar-baz"), + }), objTy), + After: mustNewDynamicValue(cty.NullVal(objTy), objTy), + }, + }, + }, + }, + DriftedResources: []*plans.ResourceInstanceChangeSrc{}, + TargetAddrs: []addrs.Targetable{ + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "woot", + }.Absolute(addrs.RootModuleInstance), + }, + Backend: plans.Backend{ + Type: "local", + Config: mustNewDynamicValue( + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }), + cty.Object(map[string]cty.Type{ + "foo": cty.String, + }), + ), + Workspace: "default", + }, + } + + var buf bytes.Buffer + err := writeTfplan(plan, &buf) + if err != nil { + t.Fatal(err) + } + + newPlan, err := readTfplan(&buf) + if err != nil { + t.Fatal(err) + } + + for _, rics := range newPlan.Changes.Resources { + ric, err := rics.Decode(objTy) + if err != nil { + t.Fatal(err) + } + + if ric.After == cty.NilVal { + t.Fatalf("unexpected nil After value: %#v\n", ric) + } + } + for _, ocs := range newPlan.Changes.Outputs { + oc, err := ocs.Decode() + if err != nil { + t.Fatal(err) + } + + if oc.After == cty.NilVal { + t.Fatalf("unexpected nil After value: %#v\n", ocs) + } + } +} diff --git a/pkg/plans/planfile/wrapped.go b/pkg/plans/planfile/wrapped.go new file mode 100644 index 00000000000..ebe87fe1a8e --- /dev/null +++ b/pkg/plans/planfile/wrapped.go @@ -0,0 +1,100 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package planfile + +import ( + "errors" + "fmt" + + "github.com/kubegems/opentofu/pkg/cloud/cloudplan" + "github.com/kubegems/opentofu/pkg/encryption" +) + +// WrappedPlanFile is a sum type that represents a saved plan, loaded from a +// file path passed on the command line. If the specified file was a thick local +// plan file, the Local field will be populated; if it was a bookmark for a +// remote cloud plan, the Cloud field will be populated. In both cases, the +// other field is expected to be nil. Finally, the outer struct is also expected +// to be used as a pointer, so that a nil value can represent the absence of any +// plan file. +type WrappedPlanFile struct { + local *Reader + cloud *cloudplan.SavedPlanBookmark +} + +func (w *WrappedPlanFile) IsLocal() bool { + return w != nil && w.local != nil +} + +func (w *WrappedPlanFile) IsCloud() bool { + return w != nil && w.cloud != nil +} + +// Local checks whether the wrapped value is a local plan file, and returns it if available. +func (w *WrappedPlanFile) Local() (*Reader, bool) { + if w != nil && w.local != nil { + return w.local, true + } else { + return nil, false + } +} + +// Cloud checks whether the wrapped value is a cloud plan file, and returns it if available. +func (w *WrappedPlanFile) Cloud() (*cloudplan.SavedPlanBookmark, bool) { + if w != nil && w.cloud != nil { + return w.cloud, true + } else { + return nil, false + } +} + +// NewWrappedLocal constructs a WrappedPlanFile from an already loaded local +// plan file reader. Most cases should use OpenWrapped to load from disk +// instead. If the provided reader is nil, the returned pointer is nil. +func NewWrappedLocal(l *Reader) *WrappedPlanFile { + if l != nil { + return &WrappedPlanFile{local: l} + } else { + return nil + } +} + +// NewWrappedCloud constructs a WrappedPlanFile from an already loaded cloud +// plan file. Most cases should use OpenWrapped to load from disk +// instead. If the provided plan file is nil, the returned pointer is nil. +func NewWrappedCloud(c *cloudplan.SavedPlanBookmark) *WrappedPlanFile { + if c != nil { + return &WrappedPlanFile{cloud: c} + } else { + return nil + } +} + +// OpenWrapped loads a local or cloud plan file from a specified file path, or +// returns an error if the file doesn't seem to be a plan file of either kind. +// Most consumers should use this and switch behaviors based on the kind of plan +// they expected, rather than directly using Open. +func OpenWrapped(filename string, enc encryption.PlanEncryption) (*WrappedPlanFile, error) { + // First, try to load it as a local planfile. + local, localErr := Open(filename, enc) + if localErr == nil { + return &WrappedPlanFile{local: local}, nil + } + // Then, try to load it as a cloud plan. + cloud, cloudErr := cloudplan.LoadSavedPlanBookmark(filename) + if cloudErr == nil { + return &WrappedPlanFile{cloud: &cloud}, nil + } + // If neither worked, prioritize definitive "confirmed the format but can't + // use it" errors, then fall back to dumping everything we know. + var ulp *ErrUnusableLocalPlan + if errors.As(localErr, &ulp) { + return nil, ulp + } + + combinedErr := fmt.Errorf("couldn't load the provided path as either a local plan file (%s) or a saved cloud plan (%s)", localErr, cloudErr) + return nil, combinedErr +} diff --git a/pkg/plans/planfile/writer.go b/pkg/plans/planfile/writer.go new file mode 100644 index 00000000000..07de3bd8542 --- /dev/null +++ b/pkg/plans/planfile/writer.go @@ -0,0 +1,147 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package planfile + +import ( + "archive/zip" + "bytes" + "fmt" + "os" + "time" + + "github.com/kubegems/opentofu/pkg/configs/configload" + "github.com/kubegems/opentofu/pkg/depsfile" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/states/statefile" +) + +type CreateArgs struct { + // ConfigSnapshot is a snapshot of the configuration that the plan + // was created from. + ConfigSnapshot *configload.Snapshot + + // PreviousRunStateFile is a representation of the state snapshot we used + // as the original input when creating this plan, containing the same + // information as recorded at the end of the previous apply except for + // upgrading managed resource instance data to the provider's latest + // schema versions. + PreviousRunStateFile *statefile.File + + // BaseStateFile is a representation of the state snapshot we used to + // create the plan, which is the result of asking the providers to refresh + // all previously-stored objects to match the current situation in the + // remote system. (If this plan was created with refreshing disabled, + // this should be the same as PreviousRunStateFile.) + StateFile *statefile.File + + // Plan records the plan itself, which is the main artifact inside a + // saved plan file. + Plan *plans.Plan + + // DependencyLocks records the dependency lock information that we + // checked prior to creating the plan, so we can make sure that all of the + // same dependencies are still available when applying the plan. + DependencyLocks *depsfile.Locks +} + +// Create creates a new plan file with the given filename, overwriting any +// file that might already exist there. +// +// A plan file contains both a snapshot of the configuration and of the latest +// state file in addition to the plan itself, so that OpenTofu can detect +// if the world has changed since the plan was created and thus refuse to +// apply it. +func Create(filename string, args CreateArgs, enc encryption.PlanEncryption) error { + buff := bytes.NewBuffer(make([]byte, 0)) + zw := zip.NewWriter(buff) + + // tfplan file + { + w, err := zw.CreateHeader(&zip.FileHeader{ + Name: tfplanFilename, + Method: zip.Deflate, + Modified: time.Now(), + }) + if err != nil { + return fmt.Errorf("failed to create tfplan file: %w", err) + } + err = writeTfplan(args.Plan, w) + if err != nil { + return fmt.Errorf("failed to write plan: %w", err) + } + } + + // tfstate file + { + w, err := zw.CreateHeader(&zip.FileHeader{ + Name: tfstateFilename, + Method: zip.Deflate, + Modified: time.Now(), + }) + if err != nil { + return fmt.Errorf("failed to create embedded tfstate file: %w", err) + } + err = statefile.Write(args.StateFile, w, encryption.StateEncryptionDisabled()) + if err != nil { + return fmt.Errorf("failed to write state snapshot: %w", err) + } + } + + // tfstate-prev file + { + w, err := zw.CreateHeader(&zip.FileHeader{ + Name: tfstatePreviousFilename, + Method: zip.Deflate, + Modified: time.Now(), + }) + if err != nil { + return fmt.Errorf("failed to create embedded tfstate-prev file: %w", err) + } + err = statefile.Write(args.PreviousRunStateFile, w, encryption.StateEncryptionDisabled()) + if err != nil { + return fmt.Errorf("failed to write previous state snapshot: %w", err) + } + } + + // tfconfig directory + { + err := writeConfigSnapshot(args.ConfigSnapshot, zw) + if err != nil { + return fmt.Errorf("failed to write config snapshot: %w", err) + } + } + + // .terraform.lock.hcl file, containing dependency lock information + if args.DependencyLocks != nil { // (this was a later addition, so not all callers set it, but main callers should) + src, diags := depsfile.SaveLocksToBytes(args.DependencyLocks) + if diags.HasErrors() { + return fmt.Errorf("failed to write embedded dependency lock file: %w", diags.Err()) + } + + w, err := zw.CreateHeader(&zip.FileHeader{ + Name: dependencyLocksFilename, + Method: zip.Deflate, + Modified: time.Now(), + }) + if err != nil { + return fmt.Errorf("failed to create embedded dependency lock file: %w", err) + } + _, err = w.Write(src) + if err != nil { + return fmt.Errorf("failed to write embedded dependency lock file: %w", err) + } + } + + // Finish zip file + zw.Close() + // Encrypt payload + encrypted, err := enc.EncryptPlan(buff.Bytes()) + if err != nil { + return err + } + return os.WriteFile(filename, encrypted, 0644) +} diff --git a/pkg/plans/quality.go b/pkg/plans/quality.go new file mode 100644 index 00000000000..85cd907fd84 --- /dev/null +++ b/pkg/plans/quality.go @@ -0,0 +1,22 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plans + +// Quality represents facts about the nature of a plan that might be relevant +// when rendering it, like whether it errored or contains no changes. A plan can +// have multiple qualities. +type Quality int + +//go:generate go run golang.org/x/tools/cmd/stringer -type Quality + +const ( + // Errored plans did not successfully complete, and cannot be applied. + Errored Quality = iota + // NoChanges plans won't result in any actions on infrastructure, or any + // semantically meaningful updates to state. They can sometimes still affect + // the format of state if applied. + NoChanges +) diff --git a/pkg/plans/quality_string.go b/pkg/plans/quality_string.go new file mode 100644 index 00000000000..61a399a1e88 --- /dev/null +++ b/pkg/plans/quality_string.go @@ -0,0 +1,24 @@ +// Code generated by "stringer -type Quality"; DO NOT EDIT. + +package plans + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[Errored-0] + _ = x[NoChanges-1] +} + +const _Quality_name = "ErroredNoChanges" + +var _Quality_index = [...]uint8{0, 7, 16} + +func (i Quality) String() string { + if i < 0 || i >= Quality(len(_Quality_index)-1) { + return "Quality(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Quality_name[_Quality_index[i]:_Quality_index[i+1]] +} diff --git a/pkg/plans/resourceinstancechangeactionreason_string.go b/pkg/plans/resourceinstancechangeactionreason_string.go new file mode 100644 index 00000000000..9915ec85bc1 --- /dev/null +++ b/pkg/plans/resourceinstancechangeactionreason_string.go @@ -0,0 +1,72 @@ +// Code generated by "stringer -type=ResourceInstanceChangeActionReason changes.go"; DO NOT EDIT. + +package plans + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ResourceInstanceChangeNoReason-0] + _ = x[ResourceInstanceReplaceBecauseTainted-84] + _ = x[ResourceInstanceReplaceByRequest-82] + _ = x[ResourceInstanceReplaceByTriggers-68] + _ = x[ResourceInstanceReplaceBecauseCannotUpdate-70] + _ = x[ResourceInstanceDeleteBecauseNoResourceConfig-78] + _ = x[ResourceInstanceDeleteBecauseWrongRepetition-87] + _ = x[ResourceInstanceDeleteBecauseCountIndex-67] + _ = x[ResourceInstanceDeleteBecauseEachKey-69] + _ = x[ResourceInstanceDeleteBecauseNoModule-77] + _ = x[ResourceInstanceDeleteBecauseNoMoveTarget-65] + _ = x[ResourceInstanceReadBecauseConfigUnknown-63] + _ = x[ResourceInstanceReadBecauseDependencyPending-33] + _ = x[ResourceInstanceReadBecauseCheckNested-35] +} + +const ( + _ResourceInstanceChangeActionReason_name_0 = "ResourceInstanceChangeNoReason" + _ResourceInstanceChangeActionReason_name_1 = "ResourceInstanceReadBecauseDependencyPending" + _ResourceInstanceChangeActionReason_name_2 = "ResourceInstanceReadBecauseCheckNested" + _ResourceInstanceChangeActionReason_name_3 = "ResourceInstanceReadBecauseConfigUnknown" + _ResourceInstanceChangeActionReason_name_4 = "ResourceInstanceDeleteBecauseNoMoveTarget" + _ResourceInstanceChangeActionReason_name_5 = "ResourceInstanceDeleteBecauseCountIndexResourceInstanceReplaceByTriggersResourceInstanceDeleteBecauseEachKeyResourceInstanceReplaceBecauseCannotUpdate" + _ResourceInstanceChangeActionReason_name_6 = "ResourceInstanceDeleteBecauseNoModuleResourceInstanceDeleteBecauseNoResourceConfig" + _ResourceInstanceChangeActionReason_name_7 = "ResourceInstanceReplaceByRequest" + _ResourceInstanceChangeActionReason_name_8 = "ResourceInstanceReplaceBecauseTainted" + _ResourceInstanceChangeActionReason_name_9 = "ResourceInstanceDeleteBecauseWrongRepetition" +) + +var ( + _ResourceInstanceChangeActionReason_index_5 = [...]uint8{0, 39, 72, 108, 150} + _ResourceInstanceChangeActionReason_index_6 = [...]uint8{0, 37, 82} +) + +func (i ResourceInstanceChangeActionReason) String() string { + switch { + case i == 0: + return _ResourceInstanceChangeActionReason_name_0 + case i == 33: + return _ResourceInstanceChangeActionReason_name_1 + case i == 35: + return _ResourceInstanceChangeActionReason_name_2 + case i == 63: + return _ResourceInstanceChangeActionReason_name_3 + case i == 65: + return _ResourceInstanceChangeActionReason_name_4 + case 67 <= i && i <= 70: + i -= 67 + return _ResourceInstanceChangeActionReason_name_5[_ResourceInstanceChangeActionReason_index_5[i]:_ResourceInstanceChangeActionReason_index_5[i+1]] + case 77 <= i && i <= 78: + i -= 77 + return _ResourceInstanceChangeActionReason_name_6[_ResourceInstanceChangeActionReason_index_6[i]:_ResourceInstanceChangeActionReason_index_6[i+1]] + case i == 82: + return _ResourceInstanceChangeActionReason_name_7 + case i == 84: + return _ResourceInstanceChangeActionReason_name_8 + case i == 87: + return _ResourceInstanceChangeActionReason_name_9 + default: + return "ResourceInstanceChangeActionReason(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/pkg/plugin/convert/diagnostics.go b/pkg/plugin/convert/diagnostics.go new file mode 100644 index 00000000000..9ec7add0eba --- /dev/null +++ b/pkg/plugin/convert/diagnostics.go @@ -0,0 +1,137 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package convert + +import ( + "github.com/kubegems/opentofu/pkg/tfdiags" + proto "github.com/kubegems/opentofu/pkg/tfplugin5" + "github.com/zclconf/go-cty/cty" +) + +// WarnsAndErrorsToProto converts the warnings and errors return by the legacy +// provider to protobuf diagnostics. +func WarnsAndErrsToProto(warns []string, errs []error) (diags []*proto.Diagnostic) { + for _, w := range warns { + diags = AppendProtoDiag(diags, w) + } + + for _, e := range errs { + diags = AppendProtoDiag(diags, e) + } + + return diags +} + +// AppendProtoDiag appends a new diagnostic from a warning string or an error. +// This panics if d is not a string or error. +func AppendProtoDiag(diags []*proto.Diagnostic, d interface{}) []*proto.Diagnostic { + switch d := d.(type) { + case cty.PathError: + ap := PathToAttributePath(d.Path) + diags = append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: d.Error(), + Attribute: ap, + }) + case error: + diags = append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: d.Error(), + }) + case string: + diags = append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_WARNING, + Summary: d, + }) + case *proto.Diagnostic: + diags = append(diags, d) + case []*proto.Diagnostic: + diags = append(diags, d...) + } + return diags +} + +// ProtoToDiagnostics converts a list of proto.Diagnostics to a tf.Diagnostics. +func ProtoToDiagnostics(ds []*proto.Diagnostic) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + for _, d := range ds { + var severity tfdiags.Severity + + switch d.Severity { + case proto.Diagnostic_ERROR: + severity = tfdiags.Error + case proto.Diagnostic_WARNING: + severity = tfdiags.Warning + } + + var newDiag tfdiags.Diagnostic + + // if there's an attribute path, we need to create a AttributeValue diagnostic + if d.Attribute != nil && len(d.Attribute.Steps) > 0 { + path := AttributePathToPath(d.Attribute) + newDiag = tfdiags.AttributeValue(severity, d.Summary, d.Detail, path) + } else { + newDiag = tfdiags.WholeContainingBody(severity, d.Summary, d.Detail) + } + + diags = diags.Append(newDiag) + } + + return diags +} + +// AttributePathToPath takes the proto encoded path and converts it to a cty.Path +func AttributePathToPath(ap *proto.AttributePath) cty.Path { + var p cty.Path + for _, step := range ap.Steps { + switch selector := step.Selector.(type) { + case *proto.AttributePath_Step_AttributeName: + p = p.GetAttr(selector.AttributeName) + case *proto.AttributePath_Step_ElementKeyString: + p = p.Index(cty.StringVal(selector.ElementKeyString)) + case *proto.AttributePath_Step_ElementKeyInt: + p = p.Index(cty.NumberIntVal(selector.ElementKeyInt)) + } + } + return p +} + +// AttributePathToPath takes a cty.Path and converts it to a proto-encoded path. +func PathToAttributePath(p cty.Path) *proto.AttributePath { + ap := &proto.AttributePath{} + for _, step := range p { + switch selector := step.(type) { + case cty.GetAttrStep: + ap.Steps = append(ap.Steps, &proto.AttributePath_Step{ + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: selector.Name, + }, + }) + case cty.IndexStep: + key := selector.Key + switch key.Type() { + case cty.String: + ap.Steps = append(ap.Steps, &proto.AttributePath_Step{ + Selector: &proto.AttributePath_Step_ElementKeyString{ + ElementKeyString: key.AsString(), + }, + }) + case cty.Number: + v, _ := key.AsBigFloat().Int64() + ap.Steps = append(ap.Steps, &proto.AttributePath_Step{ + Selector: &proto.AttributePath_Step_ElementKeyInt{ + ElementKeyInt: v, + }, + }) + default: + // We'll bail early if we encounter anything else, and just + // return the valid prefix. + return ap + } + } + } + return ap +} diff --git a/pkg/plugin/convert/diagnostics_test.go b/pkg/plugin/convert/diagnostics_test.go new file mode 100644 index 00000000000..d3f65705f59 --- /dev/null +++ b/pkg/plugin/convert/diagnostics_test.go @@ -0,0 +1,416 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package convert + +import ( + "errors" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/kubegems/opentofu/pkg/tfdiags" + proto "github.com/kubegems/opentofu/pkg/tfplugin5" + "github.com/zclconf/go-cty/cty" +) + +var ignoreUnexported = cmpopts.IgnoreUnexported( + proto.Diagnostic{}, + proto.Schema_Block{}, + proto.Schema_NestedBlock{}, + proto.Schema_Attribute{}, +) + +func TestProtoDiagnostics(t *testing.T) { + diags := WarnsAndErrsToProto( + []string{ + "warning 1", + "warning 2", + }, + []error{ + errors.New("error 1"), + errors.New("error 2"), + }, + ) + + expected := []*proto.Diagnostic{ + { + Severity: proto.Diagnostic_WARNING, + Summary: "warning 1", + }, + { + Severity: proto.Diagnostic_WARNING, + Summary: "warning 2", + }, + { + Severity: proto.Diagnostic_ERROR, + Summary: "error 1", + }, + { + Severity: proto.Diagnostic_ERROR, + Summary: "error 2", + }, + } + + if !cmp.Equal(expected, diags, ignoreUnexported) { + t.Fatal(cmp.Diff(expected, diags, ignoreUnexported)) + } +} + +func TestDiagnostics(t *testing.T) { + type diagFlat struct { + Severity tfdiags.Severity + Attr []interface{} + Summary string + Detail string + } + + tests := map[string]struct { + Cons func([]*proto.Diagnostic) []*proto.Diagnostic + Want []diagFlat + }{ + "nil": { + func(diags []*proto.Diagnostic) []*proto.Diagnostic { + return diags + }, + nil, + }, + "error": { + func(diags []*proto.Diagnostic) []*proto.Diagnostic { + return append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: "simple error", + }) + }, + []diagFlat{ + { + Severity: tfdiags.Error, + Summary: "simple error", + }, + }, + }, + "detailed error": { + func(diags []*proto.Diagnostic) []*proto.Diagnostic { + return append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: "simple error", + Detail: "detailed error", + }) + }, + []diagFlat{ + { + Severity: tfdiags.Error, + Summary: "simple error", + Detail: "detailed error", + }, + }, + }, + "warning": { + func(diags []*proto.Diagnostic) []*proto.Diagnostic { + return append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_WARNING, + Summary: "simple warning", + }) + }, + []diagFlat{ + { + Severity: tfdiags.Warning, + Summary: "simple warning", + }, + }, + }, + "detailed warning": { + func(diags []*proto.Diagnostic) []*proto.Diagnostic { + return append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_WARNING, + Summary: "simple warning", + Detail: "detailed warning", + }) + }, + []diagFlat{ + { + Severity: tfdiags.Warning, + Summary: "simple warning", + Detail: "detailed warning", + }, + }, + }, + "multi error": { + func(diags []*proto.Diagnostic) []*proto.Diagnostic { + diags = append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: "first error", + }, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: "second error", + }) + return diags + }, + []diagFlat{ + { + Severity: tfdiags.Error, + Summary: "first error", + }, + { + Severity: tfdiags.Error, + Summary: "second error", + }, + }, + }, + "warning and error": { + func(diags []*proto.Diagnostic) []*proto.Diagnostic { + diags = append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_WARNING, + Summary: "warning", + }, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: "error", + }) + return diags + }, + []diagFlat{ + { + Severity: tfdiags.Warning, + Summary: "warning", + }, + { + Severity: tfdiags.Error, + Summary: "error", + }, + }, + }, + "attr error": { + func(diags []*proto.Diagnostic) []*proto.Diagnostic { + diags = append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: "error", + Detail: "error detail", + Attribute: &proto.AttributePath{ + Steps: []*proto.AttributePath_Step{ + { + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: "attribute_name", + }, + }, + }, + }, + }) + return diags + }, + []diagFlat{ + { + Severity: tfdiags.Error, + Summary: "error", + Detail: "error detail", + Attr: []interface{}{"attribute_name"}, + }, + }, + }, + "multi attr": { + func(diags []*proto.Diagnostic) []*proto.Diagnostic { + diags = append(diags, + &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: "error 1", + Detail: "error 1 detail", + Attribute: &proto.AttributePath{ + Steps: []*proto.AttributePath_Step{ + { + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: "attr", + }, + }, + }, + }, + }, + &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: "error 2", + Detail: "error 2 detail", + Attribute: &proto.AttributePath{ + Steps: []*proto.AttributePath_Step{ + { + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: "attr", + }, + }, + { + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: "sub", + }, + }, + }, + }, + }, + &proto.Diagnostic{ + Severity: proto.Diagnostic_WARNING, + Summary: "warning", + Detail: "warning detail", + Attribute: &proto.AttributePath{ + Steps: []*proto.AttributePath_Step{ + { + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: "attr", + }, + }, + { + Selector: &proto.AttributePath_Step_ElementKeyInt{ + ElementKeyInt: 1, + }, + }, + { + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: "sub", + }, + }, + }, + }, + }, + &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: "error 3", + Detail: "error 3 detail", + Attribute: &proto.AttributePath{ + Steps: []*proto.AttributePath_Step{ + { + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: "attr", + }, + }, + { + Selector: &proto.AttributePath_Step_ElementKeyString{ + ElementKeyString: "idx", + }, + }, + { + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: "sub", + }, + }, + }, + }, + }, + ) + + return diags + }, + []diagFlat{ + { + Severity: tfdiags.Error, + Summary: "error 1", + Detail: "error 1 detail", + Attr: []interface{}{"attr"}, + }, + { + Severity: tfdiags.Error, + Summary: "error 2", + Detail: "error 2 detail", + Attr: []interface{}{"attr", "sub"}, + }, + { + Severity: tfdiags.Warning, + Summary: "warning", + Detail: "warning detail", + Attr: []interface{}{"attr", 1, "sub"}, + }, + { + Severity: tfdiags.Error, + Summary: "error 3", + Detail: "error 3 detail", + Attr: []interface{}{"attr", "idx", "sub"}, + }, + }, + }, + } + + flattenTFDiags := func(ds tfdiags.Diagnostics) []diagFlat { + var flat []diagFlat + for _, item := range ds { + desc := item.Description() + + var attr []interface{} + + for _, a := range tfdiags.GetAttribute(item) { + switch step := a.(type) { + case cty.GetAttrStep: + attr = append(attr, step.Name) + case cty.IndexStep: + switch step.Key.Type() { + case cty.Number: + i, _ := step.Key.AsBigFloat().Int64() + attr = append(attr, int(i)) + case cty.String: + attr = append(attr, step.Key.AsString()) + } + } + } + + flat = append(flat, diagFlat{ + Severity: item.Severity(), + Attr: attr, + Summary: desc.Summary, + Detail: desc.Detail, + }) + } + return flat + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + // we take the + tfDiags := ProtoToDiagnostics(tc.Cons(nil)) + + flat := flattenTFDiags(tfDiags) + + if !cmp.Equal(flat, tc.Want, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(flat, tc.Want, typeComparer, valueComparer, equateEmpty)) + } + }) + } +} + +// Test that a diagnostic with a present but empty attribute results in a +// whole body diagnostic. We verify this by inspecting the resulting Subject +// from the diagnostic when considered in the context of a config body. +func TestProtoDiagnostics_emptyAttributePath(t *testing.T) { + protoDiags := []*proto.Diagnostic{ + { + Severity: proto.Diagnostic_ERROR, + Summary: "error 1", + Detail: "error 1 detail", + Attribute: &proto.AttributePath{ + Steps: []*proto.AttributePath_Step{ + // this slice is intentionally left empty + }, + }, + }, + } + tfDiags := ProtoToDiagnostics(protoDiags) + + testConfig := `provider "test" { + foo = "bar" +}` + f, parseDiags := hclsyntax.ParseConfig([]byte(testConfig), "test.tf", hcl.Pos{Line: 1, Column: 1}) + if parseDiags.HasErrors() { + t.Fatal(parseDiags) + } + diags := tfDiags.InConfigBody(f.Body, "") + + if len(tfDiags) != 1 { + t.Fatalf("expected 1 diag, got %d", len(tfDiags)) + } + got := diags[0].Source().Subject + want := &tfdiags.SourceRange{ + Filename: "test.tf", + Start: tfdiags.SourcePos{Line: 1, Column: 1}, + End: tfdiags.SourcePos{Line: 1, Column: 1}, + } + + if !cmp.Equal(got, want, typeComparer, valueComparer) { + t.Fatal(cmp.Diff(got, want, typeComparer, valueComparer)) + } +} diff --git a/pkg/plugin/convert/function.go b/pkg/plugin/convert/function.go new file mode 100644 index 00000000000..2858cd24e11 --- /dev/null +++ b/pkg/plugin/convert/function.go @@ -0,0 +1,68 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package convert + +import ( + "encoding/json" + "fmt" + + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/tfplugin5" + "github.com/zclconf/go-cty/cty" +) + +func ProtoToCtyType(in []byte) cty.Type { + var out cty.Type + if err := json.Unmarshal(in, &out); err != nil { + panic(err) + } + return out +} + +func ProtoToTextFormatting(proto tfplugin5.StringKind) providers.TextFormatting { + switch proto { + case tfplugin5.StringKind_PLAIN: + return providers.TextFormattingPlain + case tfplugin5.StringKind_MARKDOWN: + return providers.TextFormattingMarkdown + default: + panic(fmt.Sprintf("Invalid text tfplugin5.StringKind %v", proto)) + } +} + +func ProtoToFunctionParameterSpec(proto *tfplugin5.Function_Parameter) providers.FunctionParameterSpec { + return providers.FunctionParameterSpec{ + Name: proto.Name, + Type: ProtoToCtyType(proto.Type), + AllowNullValue: proto.AllowNullValue, + AllowUnknownValues: proto.AllowUnknownValues, + Description: proto.Description, + DescriptionFormat: ProtoToTextFormatting(proto.DescriptionKind), + } +} + +func ProtoToFunctionSpec(proto *tfplugin5.Function) providers.FunctionSpec { + params := make([]providers.FunctionParameterSpec, len(proto.Parameters)) + for i, param := range proto.Parameters { + params[i] = ProtoToFunctionParameterSpec(param) + } + + var varParam *providers.FunctionParameterSpec + if proto.VariadicParameter != nil { + param := ProtoToFunctionParameterSpec(proto.VariadicParameter) + varParam = ¶m + } + + return providers.FunctionSpec{ + Parameters: params, + VariadicParameter: varParam, + Return: ProtoToCtyType(proto.Return.Type), + Summary: proto.Summary, + Description: proto.Description, + DescriptionFormat: ProtoToTextFormatting(proto.DescriptionKind), + DeprecationMessage: proto.DeprecationMessage, + } +} diff --git a/pkg/plugin/convert/schema.go b/pkg/plugin/convert/schema.go new file mode 100644 index 00000000000..f659921022a --- /dev/null +++ b/pkg/plugin/convert/schema.go @@ -0,0 +1,190 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package convert + +import ( + "encoding/json" + "reflect" + "sort" + + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/providers" + proto "github.com/kubegems/opentofu/pkg/tfplugin5" +) + +// ConfigSchemaToProto takes a *configschema.Block and converts it to a +// proto.Schema_Block for a grpc response. +func ConfigSchemaToProto(b *configschema.Block) *proto.Schema_Block { + block := &proto.Schema_Block{ + Description: b.Description, + DescriptionKind: protoStringKind(b.DescriptionKind), + Deprecated: b.Deprecated, + } + + for _, name := range sortedKeys(b.Attributes) { + a := b.Attributes[name] + + attr := &proto.Schema_Attribute{ + Name: name, + Description: a.Description, + DescriptionKind: protoStringKind(a.DescriptionKind), + Optional: a.Optional, + Computed: a.Computed, + Required: a.Required, + Sensitive: a.Sensitive, + Deprecated: a.Deprecated, + } + + ty, err := json.Marshal(a.Type) + if err != nil { + panic(err) + } + + attr.Type = ty + + block.Attributes = append(block.Attributes, attr) + } + + for _, name := range sortedKeys(b.BlockTypes) { + b := b.BlockTypes[name] + block.BlockTypes = append(block.BlockTypes, protoSchemaNestedBlock(name, b)) + } + + return block +} + +func protoStringKind(k configschema.StringKind) proto.StringKind { + switch k { + default: + return proto.StringKind_PLAIN + case configschema.StringMarkdown: + return proto.StringKind_MARKDOWN + } +} + +func protoSchemaNestedBlock(name string, b *configschema.NestedBlock) *proto.Schema_NestedBlock { + var nesting proto.Schema_NestedBlock_NestingMode + switch b.Nesting { + case configschema.NestingSingle: + nesting = proto.Schema_NestedBlock_SINGLE + case configschema.NestingGroup: + nesting = proto.Schema_NestedBlock_GROUP + case configschema.NestingList: + nesting = proto.Schema_NestedBlock_LIST + case configschema.NestingSet: + nesting = proto.Schema_NestedBlock_SET + case configschema.NestingMap: + nesting = proto.Schema_NestedBlock_MAP + default: + nesting = proto.Schema_NestedBlock_INVALID + } + return &proto.Schema_NestedBlock{ + TypeName: name, + Block: ConfigSchemaToProto(&b.Block), + Nesting: nesting, + MinItems: int64(b.MinItems), + MaxItems: int64(b.MaxItems), + } +} + +// ProtoToProviderSchema takes a proto.Schema and converts it to a providers.Schema. +func ProtoToProviderSchema(s *proto.Schema) providers.Schema { + return providers.Schema{ + Version: s.Version, + Block: ProtoToConfigSchema(s.Block), + } +} + +// ProtoToConfigSchema takes the GetSchcema_Block from a grpc response and converts it +// to a tofu *configschema.Block. +func ProtoToConfigSchema(b *proto.Schema_Block) *configschema.Block { + block := &configschema.Block{ + Attributes: make(map[string]*configschema.Attribute), + BlockTypes: make(map[string]*configschema.NestedBlock), + + Description: b.Description, + DescriptionKind: schemaStringKind(b.DescriptionKind), + Deprecated: b.Deprecated, + } + + for _, a := range b.Attributes { + attr := &configschema.Attribute{ + Description: a.Description, + DescriptionKind: schemaStringKind(a.DescriptionKind), + Required: a.Required, + Optional: a.Optional, + Computed: a.Computed, + Sensitive: a.Sensitive, + Deprecated: a.Deprecated, + } + + if err := json.Unmarshal(a.Type, &attr.Type); err != nil { + panic(err) + } + + block.Attributes[a.Name] = attr + } + + for _, b := range b.BlockTypes { + block.BlockTypes[b.TypeName] = schemaNestedBlock(b) + } + + return block +} + +func schemaStringKind(k proto.StringKind) configschema.StringKind { + switch k { + default: + return configschema.StringPlain + case proto.StringKind_MARKDOWN: + return configschema.StringMarkdown + } +} + +func schemaNestedBlock(b *proto.Schema_NestedBlock) *configschema.NestedBlock { + var nesting configschema.NestingMode + switch b.Nesting { + case proto.Schema_NestedBlock_SINGLE: + nesting = configschema.NestingSingle + case proto.Schema_NestedBlock_GROUP: + nesting = configschema.NestingGroup + case proto.Schema_NestedBlock_LIST: + nesting = configschema.NestingList + case proto.Schema_NestedBlock_MAP: + nesting = configschema.NestingMap + case proto.Schema_NestedBlock_SET: + nesting = configschema.NestingSet + default: + // In all other cases we'll leave it as the zero value (invalid) and + // let the caller validate it and deal with this. + } + + nb := &configschema.NestedBlock{ + Nesting: nesting, + MinItems: int(b.MinItems), + MaxItems: int(b.MaxItems), + } + + nested := ProtoToConfigSchema(b.Block) + nb.Block = *nested + return nb +} + +// sortedKeys returns the lexically sorted keys from the given map. This is +// used to make schema conversions are deterministic. This panics if map keys +// are not a string. +func sortedKeys(m interface{}) []string { + v := reflect.ValueOf(m) + keys := make([]string, v.Len()) + + mapKeys := v.MapKeys() + for i, k := range mapKeys { + keys[i] = k.Interface().(string) + } + + sort.Strings(keys) + return keys +} diff --git a/pkg/plugin/convert/schema_test.go b/pkg/plugin/convert/schema_test.go new file mode 100644 index 00000000000..7aaa2264128 --- /dev/null +++ b/pkg/plugin/convert/schema_test.go @@ -0,0 +1,366 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package convert + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/kubegems/opentofu/pkg/configs/configschema" + proto "github.com/kubegems/opentofu/pkg/tfplugin5" + "github.com/zclconf/go-cty/cty" +) + +var ( + equateEmpty = cmpopts.EquateEmpty() + typeComparer = cmp.Comparer(cty.Type.Equals) + valueComparer = cmp.Comparer(cty.Value.RawEquals) +) + +// Test that we can convert configschema to protobuf types and back again. +func TestConvertSchemaBlocks(t *testing.T) { + tests := map[string]struct { + Block *proto.Schema_Block + Want *configschema.Block + }{ + "attributes": { + &proto.Schema_Block{ + Attributes: []*proto.Schema_Attribute{ + { + Name: "computed", + Type: []byte(`["list","bool"]`), + Computed: true, + }, + { + Name: "optional", + Type: []byte(`"string"`), + Optional: true, + }, + { + Name: "optional_computed", + Type: []byte(`["map","bool"]`), + Optional: true, + Computed: true, + }, + { + Name: "required", + Type: []byte(`"number"`), + Required: true, + }, + }, + }, + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "computed": { + Type: cty.List(cty.Bool), + Computed: true, + }, + "optional": { + Type: cty.String, + Optional: true, + }, + "optional_computed": { + Type: cty.Map(cty.Bool), + Optional: true, + Computed: true, + }, + "required": { + Type: cty.Number, + Required: true, + }, + }, + }, + }, + "blocks": { + &proto.Schema_Block{ + BlockTypes: []*proto.Schema_NestedBlock{ + { + TypeName: "list", + Nesting: proto.Schema_NestedBlock_LIST, + Block: &proto.Schema_Block{}, + }, + { + TypeName: "map", + Nesting: proto.Schema_NestedBlock_MAP, + Block: &proto.Schema_Block{}, + }, + { + TypeName: "set", + Nesting: proto.Schema_NestedBlock_SET, + Block: &proto.Schema_Block{}, + }, + { + TypeName: "single", + Nesting: proto.Schema_NestedBlock_SINGLE, + Block: &proto.Schema_Block{ + Attributes: []*proto.Schema_Attribute{ + { + Name: "foo", + Type: []byte(`"dynamic"`), + Required: true, + }, + }, + }, + }, + }, + }, + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "list": &configschema.NestedBlock{ + Nesting: configschema.NestingList, + }, + "map": &configschema.NestedBlock{ + Nesting: configschema.NestingMap, + }, + "set": &configschema.NestedBlock{ + Nesting: configschema.NestingSet, + }, + "single": &configschema.NestedBlock{ + Nesting: configschema.NestingSingle, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.DynamicPseudoType, + Required: true, + }, + }, + }, + }, + }, + }, + }, + "deep block nesting": { + &proto.Schema_Block{ + BlockTypes: []*proto.Schema_NestedBlock{ + { + TypeName: "single", + Nesting: proto.Schema_NestedBlock_SINGLE, + Block: &proto.Schema_Block{ + BlockTypes: []*proto.Schema_NestedBlock{ + { + TypeName: "list", + Nesting: proto.Schema_NestedBlock_LIST, + Block: &proto.Schema_Block{ + BlockTypes: []*proto.Schema_NestedBlock{ + { + TypeName: "set", + Nesting: proto.Schema_NestedBlock_SET, + Block: &proto.Schema_Block{}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "single": &configschema.NestedBlock{ + Nesting: configschema.NestingSingle, + Block: configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "list": &configschema.NestedBlock{ + Nesting: configschema.NestingList, + Block: configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "set": &configschema.NestedBlock{ + Nesting: configschema.NestingSet, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + converted := ProtoToConfigSchema(tc.Block) + if !cmp.Equal(converted, tc.Want, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(converted, tc.Want, typeComparer, valueComparer, equateEmpty)) + } + }) + } +} + +// Test that we can convert configschema to protobuf types and back again. +func TestConvertProtoSchemaBlocks(t *testing.T) { + tests := map[string]struct { + Want *proto.Schema_Block + Block *configschema.Block + }{ + "attributes": { + &proto.Schema_Block{ + Attributes: []*proto.Schema_Attribute{ + { + Name: "computed", + Type: []byte(`["list","bool"]`), + Computed: true, + }, + { + Name: "optional", + Type: []byte(`"string"`), + Optional: true, + }, + { + Name: "optional_computed", + Type: []byte(`["map","bool"]`), + Optional: true, + Computed: true, + }, + { + Name: "required", + Type: []byte(`"number"`), + Required: true, + }, + }, + }, + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "computed": { + Type: cty.List(cty.Bool), + Computed: true, + }, + "optional": { + Type: cty.String, + Optional: true, + }, + "optional_computed": { + Type: cty.Map(cty.Bool), + Optional: true, + Computed: true, + }, + "required": { + Type: cty.Number, + Required: true, + }, + }, + }, + }, + "blocks": { + &proto.Schema_Block{ + BlockTypes: []*proto.Schema_NestedBlock{ + { + TypeName: "list", + Nesting: proto.Schema_NestedBlock_LIST, + Block: &proto.Schema_Block{}, + }, + { + TypeName: "map", + Nesting: proto.Schema_NestedBlock_MAP, + Block: &proto.Schema_Block{}, + }, + { + TypeName: "set", + Nesting: proto.Schema_NestedBlock_SET, + Block: &proto.Schema_Block{}, + }, + { + TypeName: "single", + Nesting: proto.Schema_NestedBlock_SINGLE, + Block: &proto.Schema_Block{ + Attributes: []*proto.Schema_Attribute{ + { + Name: "foo", + Type: []byte(`"dynamic"`), + Required: true, + }, + }, + }, + }, + }, + }, + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "list": &configschema.NestedBlock{ + Nesting: configschema.NestingList, + }, + "map": &configschema.NestedBlock{ + Nesting: configschema.NestingMap, + }, + "set": &configschema.NestedBlock{ + Nesting: configschema.NestingSet, + }, + "single": &configschema.NestedBlock{ + Nesting: configschema.NestingSingle, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.DynamicPseudoType, + Required: true, + }, + }, + }, + }, + }, + }, + }, + "deep block nesting": { + &proto.Schema_Block{ + BlockTypes: []*proto.Schema_NestedBlock{ + { + TypeName: "single", + Nesting: proto.Schema_NestedBlock_SINGLE, + Block: &proto.Schema_Block{ + BlockTypes: []*proto.Schema_NestedBlock{ + { + TypeName: "list", + Nesting: proto.Schema_NestedBlock_LIST, + Block: &proto.Schema_Block{ + BlockTypes: []*proto.Schema_NestedBlock{ + { + TypeName: "set", + Nesting: proto.Schema_NestedBlock_SET, + Block: &proto.Schema_Block{}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "single": &configschema.NestedBlock{ + Nesting: configschema.NestingSingle, + Block: configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "list": &configschema.NestedBlock{ + Nesting: configschema.NestingList, + Block: configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "set": &configschema.NestedBlock{ + Nesting: configschema.NestingSet, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + converted := ConfigSchemaToProto(tc.Block) + if !cmp.Equal(converted, tc.Want, typeComparer, equateEmpty, ignoreUnexported) { + t.Fatal(cmp.Diff(converted, tc.Want, typeComparer, equateEmpty, ignoreUnexported)) + } + }) + } +} diff --git a/pkg/plugin/discovery/find.go b/pkg/plugin/discovery/find.go new file mode 100644 index 00000000000..7fe18d3409b --- /dev/null +++ b/pkg/plugin/discovery/find.go @@ -0,0 +1,193 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package discovery + +import ( + "log" + "os" + "path/filepath" + "strings" +) + +// FindPlugins looks in the given directories for files whose filenames +// suggest that they are plugins of the given kind (e.g. "provider") and +// returns a PluginMetaSet representing the discovered potential-plugins. +// +// Currently this supports two different naming schemes. The current +// standard naming scheme is a subdirectory called $GOOS-$GOARCH containing +// files named terraform-$KIND-$NAME-V$VERSION. The legacy naming scheme is +// files directly in the given directory whose names are like +// terraform-$KIND-$NAME. +// +// Only one plugin will be returned for each unique plugin (name, version) +// pair, with preference given to files found in earlier directories. +// +// This is a convenience wrapper around FindPluginPaths and ResolvePluginsPaths. +func FindPlugins(kind string, dirs []string) PluginMetaSet { + return ResolvePluginPaths(FindPluginPaths(kind, dirs)) +} + +// FindPluginPaths looks in the given directories for files whose filenames +// suggest that they are plugins of the given kind (e.g. "provider"). +// +// The return value is a list of absolute paths that appear to refer to +// plugins in the given directories, based only on what can be inferred +// from the naming scheme. The paths returned are ordered such that files +// in later dirs appear after files in earlier dirs in the given directory +// list. Within the same directory plugins are returned in a consistent but +// undefined order. +func FindPluginPaths(kind string, dirs []string) []string { + // This is just a thin wrapper around findPluginPaths so that we can + // use the latter in tests with a fake machineName so we can use our + // test fixtures. + return findPluginPaths(kind, dirs) +} + +func findPluginPaths(kind string, dirs []string) []string { + prefix := "terraform-" + kind + "-" + + ret := make([]string, 0, len(dirs)) + + for _, dir := range dirs { + items, err := os.ReadDir(dir) + if err != nil { + // Ignore missing dirs, non-dirs, etc + continue + } + + log.Printf("[DEBUG] checking for %s in %q", kind, dir) + + for _, item := range items { + fullName := item.Name() + + if !strings.HasPrefix(fullName, prefix) { + continue + } + + // New-style paths must have a version segment in filename + if strings.Contains(strings.ToLower(fullName), "_v") { + absPath, err := filepath.Abs(filepath.Join(dir, fullName)) + if err != nil { + log.Printf("[ERROR] plugin filepath error: %s", err) + continue + } + + // Check that the file we found is usable + if !pathIsFile(absPath) { + log.Printf("[ERROR] ignoring non-file %s", absPath) + continue + } + + log.Printf("[DEBUG] found %s %q", kind, fullName) + ret = append(ret, filepath.Clean(absPath)) + continue + } + + // Legacy style with files directly in the base directory + absPath, err := filepath.Abs(filepath.Join(dir, fullName)) + if err != nil { + log.Printf("[ERROR] plugin filepath error: %s", err) + continue + } + + // Check that the file we found is usable + if !pathIsFile(absPath) { + log.Printf("[ERROR] ignoring non-file %s", absPath) + continue + } + + log.Printf("[WARN] found legacy %s %q", kind, fullName) + + ret = append(ret, filepath.Clean(absPath)) + } + } + + return ret +} + +// Returns true if and only if the given path refers to a file or a symlink +// to a file. +func pathIsFile(path string) bool { + info, err := os.Stat(path) + if err != nil { + return false + } + + return !info.IsDir() +} + +// ResolvePluginPaths takes a list of paths to plugin executables (as returned +// by e.g. FindPluginPaths) and produces a PluginMetaSet describing the +// referenced plugins. +// +// If the same combination of plugin name and version appears multiple times, +// the earlier reference will be preferred. Several different versions of +// the same plugin name may be returned, in which case the methods of +// PluginMetaSet can be used to filter down. +func ResolvePluginPaths(paths []string) PluginMetaSet { + s := make(PluginMetaSet) + + type nameVersion struct { + Name string + Version string + } + found := make(map[nameVersion]struct{}) + + for _, path := range paths { + baseName := strings.ToLower(filepath.Base(path)) + if !strings.HasPrefix(baseName, "terraform-") { + // Should never happen with reasonable input + continue + } + + baseName = baseName[10:] + firstDash := strings.Index(baseName, "-") + if firstDash == -1 { + // Should never happen with reasonable input + continue + } + + baseName = baseName[firstDash+1:] + if baseName == "" { + // Should never happen with reasonable input + continue + } + + // Trim the .exe suffix used on Windows before we start wrangling + // the remainder of the path. + baseName = strings.TrimSuffix(baseName, ".exe") + + parts := strings.SplitN(baseName, "_v", 2) + name := parts[0] + version := VersionZero + if len(parts) == 2 { + version = parts[1] + } + + // Auto-installed plugins contain an extra name portion representing + // the expected plugin version, which we must trim off. + if underX := strings.Index(version, "_x"); underX != -1 { + version = version[:underX] + } + + if _, ok := found[nameVersion{name, version}]; ok { + // Skip duplicate versions of the same plugin + // (We do this during this step because after this we will be + // dealing with sets and thus lose our ordering with which to + // decide preference.) + continue + } + + s.Add(PluginMeta{ + Name: name, + Version: VersionStr(version), + Path: path, + }) + found[nameVersion{name, version}] = struct{}{} + } + + return s +} diff --git a/pkg/plugin/discovery/find_test.go b/pkg/plugin/discovery/find_test.go new file mode 100644 index 00000000000..18ab04736af --- /dev/null +++ b/pkg/plugin/discovery/find_test.go @@ -0,0 +1,144 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package discovery + +import ( + "os" + "path/filepath" + "reflect" + "testing" +) + +func TestFindPluginPaths(t *testing.T) { + got := findPluginPaths( + "foo", + []string{ + "testdata/current-style-plugins/mockos_mockarch", + "testdata/legacy-style-plugins", + "testdata/non-existent", + "testdata/not-a-dir", + }, + ) + want := []string{ + filepath.Join("testdata", "current-style-plugins", "mockos_mockarch", "terraform-foo-bar_v0.0.1"), + filepath.Join("testdata", "current-style-plugins", "mockos_mockarch", "terraform-foo-bar_v1.0.0.exe"), + // un-versioned plugins are still picked up, even in current-style paths + filepath.Join("testdata", "current-style-plugins", "mockos_mockarch", "terraform-foo-missing-version"), + filepath.Join("testdata", "legacy-style-plugins", "terraform-foo-bar"), + filepath.Join("testdata", "legacy-style-plugins", "terraform-foo-baz"), + } + + // Turn the paths back into relative paths, since we don't care exactly + // where this code is present on the system for the sake of this test. + baseDir, err := os.Getwd() + if err != nil { + // Should never happen + panic(err) + } + for i, absPath := range got { + if !filepath.IsAbs(absPath) { + t.Errorf("got non-absolute path %s", absPath) + } + + got[i], err = filepath.Rel(baseDir, absPath) + if err != nil { + t.Fatalf("Can't make %s relative to current directory %s", absPath, baseDir) + } + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, want) + } +} + +func TestResolvePluginPaths(t *testing.T) { + got := ResolvePluginPaths([]string{ + "/example/mockos_mockarch/terraform-foo-bar_v0.0.1", + "/example/mockos_mockarch/terraform-foo-baz_v0.0.1", + "/example/mockos_mockarch/terraform-foo-baz_v1.0.0", + "/example/mockos_mockarch/terraform-foo-baz_v2.0.0_x4", + "/example/mockos_mockarch/terraform-foo-upper_V2.0.0_X4", + "/example/terraform-foo-bar", + "/example/mockos_mockarch/terraform-foo-bar_vbananas", + "/example/mockos_mockarch/terraform-foo-bar_v", + "/example/mockos_mockarch/terraform-foo-windowsthing1_v1.0.0.exe", + "/example/mockos_mockarch/terraform-foo-windowsthing2_v1.0.0_x4.exe", + "/example/mockos_mockarch/terraform-foo-windowsthing3.exe", + "/example2/mockos_mockarch/terraform-foo-bar_v0.0.1", + }) + + want := []PluginMeta{ + { + Name: "bar", + Version: "0.0.1", + Path: "/example/mockos_mockarch/terraform-foo-bar_v0.0.1", + }, + { + Name: "baz", + Version: "0.0.1", + Path: "/example/mockos_mockarch/terraform-foo-baz_v0.0.1", + }, + { + Name: "baz", + Version: "1.0.0", + Path: "/example/mockos_mockarch/terraform-foo-baz_v1.0.0", + }, + { + Name: "baz", + Version: "2.0.0", + Path: "/example/mockos_mockarch/terraform-foo-baz_v2.0.0_x4", + }, + { + Name: "upper", + Version: "2.0.0", + Path: "/example/mockos_mockarch/terraform-foo-upper_V2.0.0_X4", + }, + { + Name: "bar", + Version: "0.0.0", + Path: "/example/terraform-foo-bar", + }, + { + Name: "bar", + Version: "bananas", + Path: "/example/mockos_mockarch/terraform-foo-bar_vbananas", + }, + { + Name: "bar", + Version: "", + Path: "/example/mockos_mockarch/terraform-foo-bar_v", + }, + { + Name: "windowsthing1", + Version: "1.0.0", + Path: "/example/mockos_mockarch/terraform-foo-windowsthing1_v1.0.0.exe", + }, + { + Name: "windowsthing2", + Version: "1.0.0", + Path: "/example/mockos_mockarch/terraform-foo-windowsthing2_v1.0.0_x4.exe", + }, + { + Name: "windowsthing3", + Version: "0.0.0", + Path: "/example/mockos_mockarch/terraform-foo-windowsthing3.exe", + }, + } + + for p := range got { + t.Logf("got %#v", p) + } + + if got, want := got.Count(), len(want); got != want { + t.Errorf("got %d items; want %d", got, want) + } + + for _, wantMeta := range want { + if !got.Has(wantMeta) { + t.Errorf("missing meta %#v", wantMeta) + } + } +} diff --git a/pkg/plugin/discovery/get_cache.go b/pkg/plugin/discovery/get_cache.go new file mode 100644 index 00000000000..28e5f41bb8b --- /dev/null +++ b/pkg/plugin/discovery/get_cache.go @@ -0,0 +1,53 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package discovery + +// PluginCache is an interface implemented by objects that are able to maintain +// a cache of plugins. +type PluginCache interface { + // CachedPluginPath returns a path where the requested plugin is already + // cached, or an empty string if the requested plugin is not yet cached. + CachedPluginPath(kind string, name string, version Version) string + + // InstallDir returns the directory that new plugins should be installed into + // in order to populate the cache. This directory should be used as the + // first argument to getter.Get when downloading plugins with go-getter. + // + // After installing into this directory, use CachedPluginPath to obtain the + // path where the plugin was installed. + InstallDir() string +} + +// NewLocalPluginCache returns a PluginCache that caches plugins in a +// given local directory. +func NewLocalPluginCache(dir string) PluginCache { + return &pluginCache{ + Dir: dir, + } +} + +type pluginCache struct { + Dir string +} + +func (c *pluginCache) CachedPluginPath(kind string, name string, version Version) string { + allPlugins := FindPlugins(kind, []string{c.Dir}) + plugins := allPlugins.WithName(name).WithVersion(version) + + if plugins.Count() == 0 { + // nothing cached + return "" + } + + // There should generally be only one plugin here; if there's more than + // one match for some reason then we'll just choose one arbitrarily. + plugin := plugins.Newest() + return plugin.Path +} + +func (c *pluginCache) InstallDir() string { + return c.Dir +} diff --git a/pkg/plugin/discovery/get_cache_test.go b/pkg/plugin/discovery/get_cache_test.go new file mode 100644 index 00000000000..16f0221eafc --- /dev/null +++ b/pkg/plugin/discovery/get_cache_test.go @@ -0,0 +1,34 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package discovery + +import ( + "testing" +) + +func TestLocalPluginCache(t *testing.T) { + cache := NewLocalPluginCache("testdata/plugin-cache") + + foo1Path := cache.CachedPluginPath("provider", "foo", VersionStr("v0.0.1").MustParse()) + if foo1Path == "" { + t.Errorf("foo v0.0.1 not found; should have been found") + } + + foo2Path := cache.CachedPluginPath("provider", "foo", VersionStr("v0.0.2").MustParse()) + if foo2Path != "" { + t.Errorf("foo v0.0.2 found at %s; should not have been found", foo2Path) + } + + baz1Path := cache.CachedPluginPath("provider", "baz", VersionStr("v0.0.1").MustParse()) + if baz1Path != "" { + t.Errorf("baz v0.0.1 found at %s; should not have been found", baz1Path) + } + + baz2Path := cache.CachedPluginPath("provider", "baz", VersionStr("v0.0.2").MustParse()) + if baz1Path != "" { + t.Errorf("baz v0.0.2 found at %s; should not have been found", baz2Path) + } +} diff --git a/pkg/plugin/discovery/meta.go b/pkg/plugin/discovery/meta.go new file mode 100644 index 00000000000..7fbb84fa94b --- /dev/null +++ b/pkg/plugin/discovery/meta.go @@ -0,0 +1,46 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package discovery + +import ( + "crypto/sha256" + "io" + "os" +) + +// PluginMeta is metadata about a plugin, useful for launching the plugin +// and for understanding which plugins are available. +type PluginMeta struct { + // Name is the name of the plugin, e.g. as inferred from the plugin + // binary's filename, or by explicit configuration. + Name string + + // Version is the semver version of the plugin, expressed as a string + // that might not be semver-valid. + Version VersionStr + + // Path is the absolute path of the executable that can be launched + // to provide the RPC server for this plugin. + Path string +} + +// SHA256 returns a SHA256 hash of the content of the referenced executable +// file, or an error if the file's contents cannot be read. +func (m PluginMeta) SHA256() ([]byte, error) { + f, err := os.Open(m.Path) + if err != nil { + return nil, err + } + defer f.Close() + + h := sha256.New() + _, err = io.Copy(h, f) + if err != nil { + return nil, err + } + + return h.Sum(nil), nil +} diff --git a/pkg/plugin/discovery/meta_set.go b/pkg/plugin/discovery/meta_set.go new file mode 100644 index 00000000000..33c3706fbd4 --- /dev/null +++ b/pkg/plugin/discovery/meta_set.go @@ -0,0 +1,200 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package discovery + +// A PluginMetaSet is a set of PluginMeta objects meeting a certain criteria. +// +// MethodConfigs on this type allow filtering of the set to produce subsets that +// meet more restrictive criteria. +type PluginMetaSet map[PluginMeta]struct{} + +// Add inserts the given PluginMeta into the receiving set. This is a no-op +// if the given meta is already present. +func (s PluginMetaSet) Add(p PluginMeta) { + s[p] = struct{}{} +} + +// Remove removes the given PluginMeta from the receiving set. This is a no-op +// if the given meta is not already present. +func (s PluginMetaSet) Remove(p PluginMeta) { + delete(s, p) +} + +// Has returns true if the given meta is in the receiving set, or false +// otherwise. +func (s PluginMetaSet) Has(p PluginMeta) bool { + _, ok := s[p] + return ok +} + +// Count returns the number of metas in the set +func (s PluginMetaSet) Count() int { + return len(s) +} + +// ValidateVersions returns two new PluginMetaSets, separating those with +// versions that have syntax-valid semver versions from those that don't. +// +// Eliminating invalid versions from consideration (and possibly warning about +// them) is usually the first step of working with a meta set after discovery +// has completed. +func (s PluginMetaSet) ValidateVersions() (valid, invalid PluginMetaSet) { + valid = make(PluginMetaSet) + invalid = make(PluginMetaSet) + for p := range s { + if _, err := p.Version.Parse(); err == nil { + valid.Add(p) + } else { + invalid.Add(p) + } + } + return +} + +// WithName returns the subset of metas that have the given name. +func (s PluginMetaSet) WithName(name string) PluginMetaSet { + ns := make(PluginMetaSet) + for p := range s { + if p.Name == name { + ns.Add(p) + } + } + return ns +} + +// WithVersion returns the subset of metas that have the given version. +// +// This should be used only with the "valid" result from ValidateVersions; +// it will ignore any plugin metas that have invalid version strings. +func (s PluginMetaSet) WithVersion(version Version) PluginMetaSet { + ns := make(PluginMetaSet) + for p := range s { + gotVersion, err := p.Version.Parse() + if err != nil { + continue + } + if gotVersion.Equal(version) { + ns.Add(p) + } + } + return ns +} + +// ByName groups the metas in the set by their Names, returning a map. +func (s PluginMetaSet) ByName() map[string]PluginMetaSet { + ret := make(map[string]PluginMetaSet) + for p := range s { + if _, ok := ret[p.Name]; !ok { + ret[p.Name] = make(PluginMetaSet) + } + ret[p.Name].Add(p) + } + return ret +} + +// Newest returns the one item from the set that has the newest Version value. +// +// The result is meaningful only if the set is already filtered such that +// all of the metas have the same Name. +// +// If there isn't at least one meta in the set then this function will panic. +// Use Count() to ensure that there is at least one value before calling. +// +// If any of the metas have invalid version strings then this function will +// panic. Use ValidateVersions() first to filter out metas with invalid +// versions. +// +// If two metas have the same Version then one is arbitrarily chosen. This +// situation should be avoided by pre-filtering the set. +func (s PluginMetaSet) Newest() PluginMeta { + if len(s) == 0 { + panic("can't call NewestStable on empty PluginMetaSet") + } + + var first = true + var winner PluginMeta + var winnerVersion Version + for p := range s { + version, err := p.Version.Parse() + if err != nil { + panic(err) + } + + if first || version.NewerThan(winnerVersion) { + winner = p + winnerVersion = version + first = false + } + } + + return winner +} + +// ConstrainVersions takes a set of requirements and attempts to +// return a map from name to a set of metas that have the matching +// name and an appropriate version. +// +// If any of the given requirements match *no* plugins then its PluginMetaSet +// in the returned map will be empty. +// +// All viable metas are returned, so the caller can apply any desired filtering +// to reduce down to a single option. For example, calling Newest() to obtain +// the highest available version. +// +// If any of the metas in the set have invalid version strings then this +// function will panic. Use ValidateVersions() first to filter out metas with +// invalid versions. +func (s PluginMetaSet) ConstrainVersions(reqd PluginRequirements) map[string]PluginMetaSet { + ret := make(map[string]PluginMetaSet) + for p := range s { + name := p.Name + allowedVersions, ok := reqd[name] + if !ok { + continue + } + if _, ok := ret[p.Name]; !ok { + ret[p.Name] = make(PluginMetaSet) + } + version, err := p.Version.Parse() + if err != nil { + panic(err) + } + if allowedVersions.Allows(version) { + ret[p.Name].Add(p) + } + } + return ret +} + +// OverridePaths returns a new set where any existing plugins with the given +// names are removed and replaced with the single path given in the map. +// +// This is here only to continue to support the legacy way of overriding +// plugin binaries in the .opentofurc file. It treats all given plugins +// as pre-versioning (version 0.0.0). This mechanism will eventually be +// phased out, with vendor directories being the intended replacement. +func (s PluginMetaSet) OverridePaths(paths map[string]string) PluginMetaSet { + ret := make(PluginMetaSet) + for p := range s { + if _, ok := paths[p.Name]; ok { + // Skip plugins that we're overridding + continue + } + + ret.Add(p) + } + + // Now add the metadata for overriding plugins + for name, path := range paths { + ret.Add(PluginMeta{ + Name: name, + Version: VersionZero, + Path: path, + }) + } + + return ret +} diff --git a/pkg/plugin/discovery/meta_set_test.go b/pkg/plugin/discovery/meta_set_test.go new file mode 100644 index 00000000000..37f52cb65fa --- /dev/null +++ b/pkg/plugin/discovery/meta_set_test.go @@ -0,0 +1,422 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package discovery + +import ( + "fmt" + "strings" + "testing" +) + +func TestPluginMetaSetManipulation(t *testing.T) { + metas := []PluginMeta{ + { + Name: "foo", + Version: "1.0.0", + Path: "test-foo", + }, + { + Name: "bar", + Version: "2.0.0", + Path: "test-bar", + }, + { + Name: "baz", + Version: "2.0.0", + Path: "test-bar", + }, + } + s := make(PluginMetaSet) + + if count := s.Count(); count != 0 { + t.Fatalf("set has Count %d before any items added", count) + } + + // Can we add metas? + for _, p := range metas { + s.Add(p) + if !s.Has(p) { + t.Fatalf("%q not in set after adding it", p.Name) + } + } + + if got, want := s.Count(), len(metas); got != want { + t.Fatalf("set has Count %d after all items added; want %d", got, want) + } + + // Can we still retrieve earlier ones after we added later ones? + for _, p := range metas { + if !s.Has(p) { + t.Fatalf("%q not in set after all adds", p.Name) + } + } + + // Can we remove metas? + for _, p := range metas { + s.Remove(p) + if s.Has(p) { + t.Fatalf("%q still in set after removing it", p.Name) + } + } + + if count := s.Count(); count != 0 { + t.Fatalf("set has Count %d after all items removed", count) + } +} + +func TestPluginMetaSetValidateVersions(t *testing.T) { + metas := []PluginMeta{ + { + Name: "foo", + Version: "1.0.0", + Path: "test-foo", + }, + { + Name: "bar", + Version: "0.0.1", + Path: "test-bar", + }, + { + Name: "baz", + Version: "bananas", + Path: "test-bar", + }, + } + s := make(PluginMetaSet) + + for _, p := range metas { + s.Add(p) + } + + valid, invalid := s.ValidateVersions() + if count := valid.Count(); count != 2 { + t.Errorf("valid set has %d metas; want 2", count) + } + if count := invalid.Count(); count != 1 { + t.Errorf("valid set has %d metas; want 1", count) + } + + if !valid.Has(metas[0]) { + t.Errorf("'foo' not in valid set") + } + if !valid.Has(metas[1]) { + t.Errorf("'bar' not in valid set") + } + if !invalid.Has(metas[2]) { + t.Errorf("'baz' not in invalid set") + } + + if invalid.Has(metas[0]) { + t.Errorf("'foo' in invalid set") + } + if invalid.Has(metas[1]) { + t.Errorf("'bar' in invalid set") + } + if valid.Has(metas[2]) { + t.Errorf("'baz' in valid set") + } + +} + +func TestPluginMetaSetWithName(t *testing.T) { + tests := []struct { + metas []PluginMeta + name string + wantCount int + }{ + { + []PluginMeta{}, + "foo", + 0, + }, + { + []PluginMeta{ + { + Name: "foo", + Version: "0.0.1", + Path: "foo", + }, + }, + "foo", + 1, + }, + { + []PluginMeta{ + { + Name: "foo", + Version: "0.0.1", + Path: "foo", + }, + }, + "bar", + 0, + }, + } + + for i, test := range tests { + t.Run(fmt.Sprintf("Test%02d", i), func(t *testing.T) { + s := make(PluginMetaSet) + for _, p := range test.metas { + s.Add(p) + } + filtered := s.WithName(test.name) + if gotCount := filtered.Count(); gotCount != test.wantCount { + t.Errorf("got count %d in %#v; want %d", gotCount, filtered, test.wantCount) + } + }) + } +} + +func TestPluginMetaSetByName(t *testing.T) { + metas := []PluginMeta{ + { + Name: "foo", + Version: "1.0.0", + Path: "test-foo", + }, + { + Name: "foo", + Version: "2.0.0", + Path: "test-foo-2", + }, + { + Name: "bar", + Version: "0.0.1", + Path: "test-bar", + }, + { + Name: "baz", + Version: "1.2.0", + Path: "test-bar", + }, + } + s := make(PluginMetaSet) + + for _, p := range metas { + s.Add(p) + } + + byName := s.ByName() + if got, want := len(byName), 3; got != want { + t.Errorf("%d keys in ByName map; want %d", got, want) + } + if got, want := len(byName["foo"]), 2; got != want { + t.Errorf("%d metas for 'foo'; want %d", got, want) + } + if got, want := len(byName["bar"]), 1; got != want { + t.Errorf("%d metas for 'bar'; want %d", got, want) + } + if got, want := len(byName["baz"]), 1; got != want { + t.Errorf("%d metas for 'baz'; want %d", got, want) + } + + if !byName["foo"].Has(metas[0]) { + t.Errorf("%#v missing from 'foo' set", metas[0]) + } + if !byName["foo"].Has(metas[1]) { + t.Errorf("%#v missing from 'foo' set", metas[1]) + } + if !byName["bar"].Has(metas[2]) { + t.Errorf("%#v missing from 'bar' set", metas[2]) + } + if !byName["baz"].Has(metas[3]) { + t.Errorf("%#v missing from 'baz' set", metas[3]) + } +} + +func TestPluginMetaSetNewest(t *testing.T) { + tests := []struct { + versions []string + want string + }{ + { + []string{ + "0.0.1", + }, + "0.0.1", + }, + { + []string{ + "0.0.1", + "0.0.2", + }, + "0.0.2", + }, + { + []string{ + "1.0.0", + "1.0.0-beta1", + }, + "1.0.0", + }, + { + []string{ + "0.0.1", + "1.0.0", + }, + "1.0.0", + }, + } + + for _, test := range tests { + t.Run(strings.Join(test.versions, "|"), func(t *testing.T) { + s := make(PluginMetaSet) + for _, version := range test.versions { + s.Add(PluginMeta{ + Name: "foo", + Version: VersionStr(version), + Path: "foo-V" + version, + }) + } + + newest := s.Newest() + if newest.Version != VersionStr(test.want) { + t.Errorf("version is %q; want %q", newest.Version, test.want) + } + }) + } +} + +func TestPluginMetaSetConstrainVersions(t *testing.T) { + metas := []PluginMeta{ + { + Name: "foo", + Version: "1.0.0", + Path: "test-foo", + }, + { + Name: "foo", + Version: "2.0.0", + Path: "test-foo-2", + }, + { + Name: "foo", + Version: "3.0.0", + Path: "test-foo-2", + }, + { + Name: "bar", + Version: "0.0.5", + Path: "test-bar", + }, + { + Name: "baz", + Version: "0.0.1", + Path: "test-bar", + }, + } + s := make(PluginMetaSet) + + for _, p := range metas { + s.Add(p) + } + + byName := s.ConstrainVersions(PluginRequirements{ + "foo": &PluginConstraints{Versions: ConstraintStr(">=2.0.0").MustParse()}, + "bar": &PluginConstraints{Versions: ConstraintStr(">=0.0.0").MustParse()}, + "baz": &PluginConstraints{Versions: ConstraintStr(">=1.0.0").MustParse()}, + "fun": &PluginConstraints{Versions: ConstraintStr(">5.0.0").MustParse()}, + }) + if got, want := len(byName), 3; got != want { + t.Errorf("%d keys in map; want %d", got, want) + } + + if got, want := len(byName["foo"]), 2; got != want { + t.Errorf("%d metas for 'foo'; want %d", got, want) + } + if got, want := len(byName["bar"]), 1; got != want { + t.Errorf("%d metas for 'bar'; want %d", got, want) + } + if got, want := len(byName["baz"]), 0; got != want { + t.Errorf("%d metas for 'baz'; want %d", got, want) + } + // "fun" is not in the map at all, because we have no metas for that name + + if !byName["foo"].Has(metas[1]) { + t.Errorf("%#v missing from 'foo' set", metas[1]) + } + if !byName["foo"].Has(metas[2]) { + t.Errorf("%#v missing from 'foo' set", metas[2]) + } + if !byName["bar"].Has(metas[3]) { + t.Errorf("%#v missing from 'bar' set", metas[3]) + } + +} + +func TestPluginMetaSetOverridePaths(t *testing.T) { + + metas := []PluginMeta{ + { + Name: "foo", + Version: "1.0.0", + Path: "test-foo-1", + }, + { + Name: "foo", + Version: "2.0.0", + Path: "test-foo-2", + }, + { + Name: "foo", + Version: "3.0.0", + Path: "test-foo-3", + }, + { + Name: "bar", + Version: "0.0.5", + Path: "test-bar-5", + }, + { + Name: "bar", + Version: "0.0.6", + Path: "test-bar-6", + }, + { + Name: "baz", + Version: "0.0.1", + Path: "test-bar", + }, + } + s := make(PluginMetaSet) + + for _, p := range metas { + s.Add(p) + } + + ns := s.OverridePaths(map[string]string{ + "foo": "override-foo", + "fun": "override-fun", + }) + + if got, want := ns.Count(), 5; got != want { + t.Errorf("got %d metas; want %d", got, want) + } + + if !ns.Has(metas[3]) { + t.Errorf("new set is missing %#v", metas[3]) + } + if !ns.Has(metas[4]) { + t.Errorf("new set is missing %#v", metas[4]) + } + if !ns.Has(metas[5]) { + t.Errorf("new set is missing %#v", metas[5]) + } + if !ns.Has(PluginMeta{ + Name: "foo", + Version: VersionZero, + Path: "override-foo", + }) { + t.Errorf("new set is missing 'foo' override") + } + if !ns.Has(PluginMeta{ + Name: "fun", + Version: VersionZero, + Path: "override-fun", + }) { + t.Errorf("new set is missing 'fun' override") + } +} diff --git a/pkg/plugin/discovery/meta_test.go b/pkg/plugin/discovery/meta_test.go new file mode 100644 index 00000000000..e712a8b3f6a --- /dev/null +++ b/pkg/plugin/discovery/meta_test.go @@ -0,0 +1,27 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package discovery + +import ( + "fmt" + "testing" +) + +func TestMetaSHA256(t *testing.T) { + m := PluginMeta{ + Path: "testdata/current-style-plugins/mockos_mockarch/terraform-foo-bar_v0.0.1", + } + hash, err := m.SHA256() + if err != nil { + t.Fatalf("failed: %s", err) + } + + got := fmt.Sprintf("%x", hash) + want := "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" // (hash of empty file) + if got != want { + t.Errorf("incorrect hash %s; want %s", got, want) + } +} diff --git a/pkg/plugin/discovery/requirements.go b/pkg/plugin/discovery/requirements.go new file mode 100644 index 00000000000..217d64d1860 --- /dev/null +++ b/pkg/plugin/discovery/requirements.go @@ -0,0 +1,116 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package discovery + +import ( + "bytes" +) + +// PluginInstallProtocolVersion is the protocol version TF-core +// supports to communicate with servers, and is used to resolve +// plugin discovery with OpenTofu registry, in addition to +// any specified plugin version constraints +const PluginInstallProtocolVersion = 5 + +// PluginRequirements describes a set of plugins (assumed to be of a consistent +// kind) that are required to exist and have versions within the given +// corresponding sets. +type PluginRequirements map[string]*PluginConstraints + +// PluginConstraints represents an element of PluginRequirements describing +// the constraints for a single plugin. +type PluginConstraints struct { + // Specifies that the plugin's version must be within the given + // constraints. + Versions Constraints + + // If non-nil, the hash of the on-disk plugin executable must exactly + // match the SHA256 hash given here. + SHA256 []byte +} + +// Allows returns true if the given version is within the receiver's version +// constraints. +func (s *PluginConstraints) Allows(v Version) bool { + return s.Versions.Allows(v) +} + +// AcceptsSHA256 returns true if the given executable SHA256 hash is acceptable, +// either because it matches the constraint or because there is no such +// constraint. +func (s *PluginConstraints) AcceptsSHA256(digest []byte) bool { + if s.SHA256 == nil { + return true + } + return bytes.Equal(s.SHA256, digest) +} + +// Merge takes the contents of the receiver and the other given requirements +// object and merges them together into a single requirements structure +// that satisfies both sets of requirements. +// +// Note that it doesn't make sense to merge two PluginRequirements with +// differing required plugin SHA256 hashes, since the result will never +// match any plugin. +func (r PluginRequirements) Merge(other PluginRequirements) PluginRequirements { + ret := make(PluginRequirements) + for n, c := range r { + ret[n] = &PluginConstraints{ + Versions: Constraints{}.Append(c.Versions), + SHA256: c.SHA256, + } + } + for n, c := range other { + if existing, exists := ret[n]; exists { + ret[n].Versions = ret[n].Versions.Append(c.Versions) + + if existing.SHA256 != nil { + if c.SHA256 != nil && !bytes.Equal(c.SHA256, existing.SHA256) { + // If we've been asked to merge two constraints with + // different SHA256 hashes then we'll produce a dummy value + // that can never match anything. This is a silly edge case + // that no reasonable caller should hit. + ret[n].SHA256 = []byte(invalidProviderHash) + } + } else { + ret[n].SHA256 = c.SHA256 // might still be nil + } + } else { + ret[n] = &PluginConstraints{ + Versions: Constraints{}.Append(c.Versions), + SHA256: c.SHA256, + } + } + } + return ret +} + +// LockExecutables applies additional constraints to the receiver that +// require plugin executables with specific SHA256 digests. This modifies +// the receiver in-place, since it's intended to be applied after +// version constraints have been resolved. +// +// The given map must include a key for every plugin that is already +// required. If not, any missing keys will cause the corresponding plugin +// to never match, though the direct caller doesn't necessarily need to +// guarantee this as long as the downstream code _applying_ these constraints +// is able to deal with the non-match in some way. +func (r PluginRequirements) LockExecutables(sha256s map[string][]byte) { + for name, cons := range r { + digest := sha256s[name] + + if digest == nil { + // Prevent any match, which will then presumably cause the + // downstream consumer of this requirements to report an error. + cons.SHA256 = []byte(invalidProviderHash) + continue + } + + cons.SHA256 = digest + } +} + +const invalidProviderHash = "" diff --git a/pkg/plugin/discovery/requirements_test.go b/pkg/plugin/discovery/requirements_test.go new file mode 100644 index 00000000000..6425ab21646 --- /dev/null +++ b/pkg/plugin/discovery/requirements_test.go @@ -0,0 +1,98 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package discovery + +import ( + "fmt" + "testing" +) + +func TestPluginConstraintsAllows(t *testing.T) { + tests := []struct { + Constraints *PluginConstraints + Version string + Want bool + }{ + { + &PluginConstraints{ + Versions: AllVersions, + }, + "1.0.0", + true, + }, + { + &PluginConstraints{ + Versions: ConstraintStr(">1.0.0").MustParse(), + }, + "1.0.0", + false, + }, + // This is not an exhaustive test because the callees + // already have plentiful tests of their own. + } + + for i, test := range tests { + t.Run(fmt.Sprintf("%02d", i), func(t *testing.T) { + version := VersionStr(test.Version).MustParse() + got := test.Constraints.Allows(version) + if got != test.Want { + t.Logf("looking for %s in %#v", test.Version, test.Constraints) + t.Errorf("wrong result %#v; want %#v", got, test.Want) + } + }) + } +} + +func TestPluginConstraintsAcceptsSHA256(t *testing.T) { + mustUnhex := func(hex string) (ret []byte) { + _, err := fmt.Sscanf(hex, "%x", &ret) + if err != nil { + panic(err) + } + return ret + } + + tests := []struct { + Constraints *PluginConstraints + Digest []byte + Want bool + }{ + { + &PluginConstraints{ + Versions: AllVersions, + SHA256: mustUnhex("0123456789abcdef"), + }, + mustUnhex("0123456789abcdef"), + true, + }, + { + &PluginConstraints{ + Versions: AllVersions, + SHA256: mustUnhex("0123456789abcdef"), + }, + mustUnhex("f00dface"), + false, + }, + { + &PluginConstraints{ + Versions: AllVersions, + SHA256: nil, + }, + mustUnhex("f00dface"), + true, + }, + } + + for i, test := range tests { + t.Run(fmt.Sprintf("%02d", i), func(t *testing.T) { + got := test.Constraints.AcceptsSHA256(test.Digest) + if got != test.Want { + t.Logf("%#v.AcceptsSHA256(%#v)", test.Constraints, test.Digest) + t.Errorf("wrong result %#v; want %#v", got, test.Want) + } + }) + } +} diff --git a/pkg/plugin/discovery/testdata/current-style-plugins/mockos_mockarch/terraform-foo-bar_v0.0.1 b/pkg/plugin/discovery/testdata/current-style-plugins/mockos_mockarch/terraform-foo-bar_v0.0.1 new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/plugin/discovery/testdata/current-style-plugins/mockos_mockarch/terraform-foo-missing-version b/pkg/plugin/discovery/testdata/current-style-plugins/mockos_mockarch/terraform-foo-missing-version new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/plugin/discovery/testdata/current-style-plugins/mockos_mockarch/terraform-notfoo-bar_v0.0.1 b/pkg/plugin/discovery/testdata/current-style-plugins/mockos_mockarch/terraform-notfoo-bar_v0.0.1 new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/plugin/discovery/testdata/legacy-style-plugins/terraform-foo-bar b/pkg/plugin/discovery/testdata/legacy-style-plugins/terraform-foo-bar new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/plugin/discovery/testdata/legacy-style-plugins/terraform-foo-baz b/pkg/plugin/discovery/testdata/legacy-style-plugins/terraform-foo-baz new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/plugin/discovery/testdata/legacy-style-plugins/terraform-notfoo-bar b/pkg/plugin/discovery/testdata/legacy-style-plugins/terraform-notfoo-bar new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/plugin/discovery/testdata/not-a-dir b/pkg/plugin/discovery/testdata/not-a-dir new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/plugin/discovery/testdata/plugin-cache/terraform-provider-foo_v0.0.1_x4 b/pkg/plugin/discovery/testdata/plugin-cache/terraform-provider-foo_v0.0.1_x4 new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/plugin/discovery/version.go b/pkg/plugin/discovery/version.go new file mode 100644 index 00000000000..d042cc19074 --- /dev/null +++ b/pkg/plugin/discovery/version.go @@ -0,0 +1,82 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package discovery + +import ( + "fmt" + "sort" + + version "github.com/hashicorp/go-version" +) + +const VersionZero = "0.0.0" + +// A VersionStr is a string containing a possibly-invalid representation +// of a semver version number. Call Parse on it to obtain a real Version +// object, or discover that it is invalid. +type VersionStr string + +// Parse transforms a VersionStr into a Version if it is +// syntactically valid. If it isn't then an error is returned instead. +func (s VersionStr) Parse() (Version, error) { + raw, err := version.NewVersion(string(s)) + if err != nil { + return Version{}, err + } + return Version{raw}, nil +} + +// MustParse transforms a VersionStr into a Version if it is +// syntactically valid. If it isn't then it panics. +func (s VersionStr) MustParse() Version { + ret, err := s.Parse() + if err != nil { + panic(err) + } + return ret +} + +// Version represents a version number that has been parsed from +// a semver string and known to be valid. +type Version struct { + // We wrap this here just because it avoids a proliferation of + // direct go-version imports all over the place, and keeps the + // version-processing details within this package. + raw *version.Version +} + +func (v Version) String() string { + return v.raw.String() +} + +func (v Version) NewerThan(other Version) bool { + return v.raw.GreaterThan(other.raw) +} + +func (v Version) Equal(other Version) bool { + return v.raw.Equal(other.raw) +} + +// IsPrerelease determines if version is a prerelease +func (v Version) IsPrerelease() bool { + return v.raw.Prerelease() != "" +} + +// MinorUpgradeConstraintStr returns a ConstraintStr that would permit +// minor upgrades relative to the receiving version. +func (v Version) MinorUpgradeConstraintStr() ConstraintStr { + segments := v.raw.Segments() + return ConstraintStr(fmt.Sprintf("~> %d.%d", segments[0], segments[1])) +} + +type Versions []Version + +// Sort sorts version from newest to oldest. +func (v Versions) Sort() { + sort.Slice(v, func(i, j int) bool { + return v[i].NewerThan(v[j]) + }) +} diff --git a/pkg/plugin/discovery/version_set.go b/pkg/plugin/discovery/version_set.go new file mode 100644 index 00000000000..c715677c071 --- /dev/null +++ b/pkg/plugin/discovery/version_set.go @@ -0,0 +1,94 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package discovery + +import ( + "sort" + + version "github.com/hashicorp/go-version" +) + +// A ConstraintStr is a string containing a possibly-invalid representation +// of a version constraint provided in configuration. Call Parse on it to +// obtain a real Constraint object, or discover that it is invalid. +type ConstraintStr string + +// Parse transforms a ConstraintStr into a Constraints if it is +// syntactically valid. If it isn't then an error is returned instead. +func (s ConstraintStr) Parse() (Constraints, error) { + raw, err := version.NewConstraint(string(s)) + if err != nil { + return Constraints{}, err + } + return Constraints{raw}, nil +} + +// MustParse is like Parse but it panics if the constraint string is invalid. +func (s ConstraintStr) MustParse() Constraints { + ret, err := s.Parse() + if err != nil { + panic(err) + } + return ret +} + +// Constraints represents a set of versions which any given Version is either +// a member of or not. +type Constraints struct { + raw version.Constraints +} + +// NewConstraints creates a Constraints based on a version.Constraints. +func NewConstraints(c version.Constraints) Constraints { + return Constraints{c} +} + +// AllVersions is a Constraints containing all versions +var AllVersions Constraints + +func init() { + AllVersions = Constraints{ + raw: make(version.Constraints, 0), + } +} + +// Allows returns true if the given version permitted by the receiving +// constraints set. +func (s Constraints) Allows(v Version) bool { + return s.raw.Check(v.raw) +} + +// Append combines the receiving set with the given other set to produce +// a set that is the intersection of both sets, which is to say that resulting +// constraints contain only the versions that are members of both. +func (s Constraints) Append(other Constraints) Constraints { + raw := make(version.Constraints, 0, len(s.raw)+len(other.raw)) + + // Since "raw" is a list of constraints that remove versions from the set, + // "Intersection" is implemented by concatenating together those lists, + // thus leaving behind only the versions not removed by either list. + raw = append(raw, s.raw...) + raw = append(raw, other.raw...) + + // while the set is unordered, we sort these lexically for consistent output + sort.Slice(raw, func(i, j int) bool { + return raw[i].String() < raw[j].String() + }) + + return Constraints{raw} +} + +// String returns a string representation of the set members as a set +// of range constraints. +func (s Constraints) String() string { + return s.raw.String() +} + +// Unconstrained returns true if and only if the receiver is an empty +// constraint set. +func (s Constraints) Unconstrained() bool { + return len(s.raw) == 0 +} diff --git a/pkg/plugin/discovery/version_set_test.go b/pkg/plugin/discovery/version_set_test.go new file mode 100644 index 00000000000..65652e9cb76 --- /dev/null +++ b/pkg/plugin/discovery/version_set_test.go @@ -0,0 +1,79 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package discovery + +import ( + "fmt" + "testing" +) + +func TestVersionSet(t *testing.T) { + tests := []struct { + ConstraintStr string + VersionStr string + ShouldHave bool + }{ + // These test cases are not exhaustive since the underlying go-version + // library is well-tested. This is mainly here just to exercise our + // wrapper code, but also used as an opportunity to cover some basic + // but important cases such as the ~> constraint so that we'll be more + // likely to catch any accidental breaking behavior changes in the + // underlying library. + { + ">=1.0.0", + "1.0.0", + true, + }, + { + ">=1.0.0", + "0.0.0", + false, + }, + { + ">=1.0.0", + "1.1.0-beta1", + false, + }, + { + ">=1.0.0", + "1.1.0", + true, + }, + { + "~>1.1.0-a", + "1.1.0-beta1", + true, + }, + { + "~>1.1.0", + "1.1.2", + true, + }, + { + "~>1.1.0", + "1.2.0", + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%s has %s", test.ConstraintStr, test.VersionStr), func(t *testing.T) { + accepted, err := ConstraintStr(test.ConstraintStr).Parse() + if err != nil { + t.Fatalf("unwanted error parsing constraints string %q: %s", test.ConstraintStr, err) + } + + version, err := VersionStr(test.VersionStr).Parse() + if err != nil { + t.Fatalf("unwanted error parsing version string %q: %s", test.VersionStr, err) + } + + if got, want := accepted.Allows(version), test.ShouldHave; got != want { + t.Errorf("Has returned %#v; want %#v", got, want) + } + }) + } +} diff --git a/pkg/plugin/discovery/version_test.go b/pkg/plugin/discovery/version_test.go new file mode 100644 index 00000000000..29334d1a7ef --- /dev/null +++ b/pkg/plugin/discovery/version_test.go @@ -0,0 +1,44 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package discovery + +import ( + "reflect" + "testing" +) + +func TestSortVersions(t *testing.T) { + versions := Versions{ + VersionStr("4").MustParse(), + VersionStr("3.1").MustParse(), + VersionStr("1.2").MustParse(), + VersionStr("1.2.3").MustParse(), + VersionStr("2.2.3").MustParse(), + VersionStr("3.2.1").MustParse(), + VersionStr("2.3.2").MustParse(), + } + + expected := []string{ + "4.0.0", + "3.2.1", + "3.1.0", + "2.3.2", + "2.2.3", + "1.2.3", + "1.2.0", + } + + versions.Sort() + + var sorted []string + for _, v := range versions { + sorted = append(sorted, v.String()) + } + + if !reflect.DeepEqual(sorted, expected) { + t.Fatal("versions aren't sorted:", sorted) + } +} diff --git a/pkg/plugin/grpc_error.go b/pkg/plugin/grpc_error.go new file mode 100644 index 00000000000..491e426c670 --- /dev/null +++ b/pkg/plugin/grpc_error.go @@ -0,0 +1,79 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "fmt" + "path" + "runtime" + + "github.com/kubegems/opentofu/pkg/tfdiags" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// grpcErr extracts some known error types and formats them into better +// representations for core. This must only be called from plugin methods. +// Since we don't use RPC status errors for the plugin protocol, these do not +// contain any useful details, and we can return some text that at least +// indicates the plugin call and possible error condition. +func grpcErr(err error) (diags tfdiags.Diagnostics) { + if err == nil { + return + } + + // extract the method name from the caller. + pc, _, _, ok := runtime.Caller(1) + if !ok { + logger.Error("unknown grpc call", "error", err) + return diags.Append(err) + } + + f := runtime.FuncForPC(pc) + + // Function names will contain the full import path. Take the last + // segment, which will let users know which method was being called. + _, requestName := path.Split(f.Name()) + + // Here we can at least correlate the error in the logs to a particular binary. + logger.Error(requestName, "error", err) + + // TODO: while this expands the error codes into somewhat better messages, + // this still does not easily link the error to an actual user-recognizable + // plugin. The grpc plugin does not know its configured name, and the + // errors are in a list of diagnostics, making it hard for the caller to + // annotate the returned errors. + switch status.Code(err) { + case codes.Unavailable: + // This case is when the plugin has stopped running for some reason, + // and is usually the result of a crash. + diags = diags.Append(tfdiags.WholeContainingBody( + tfdiags.Error, + "Plugin did not respond", + fmt.Sprintf("The plugin encountered an error, and failed to respond to the %s call. "+ + "The plugin logs may contain more details.", requestName), + )) + case codes.Canceled: + diags = diags.Append(tfdiags.WholeContainingBody( + tfdiags.Error, + "Request cancelled", + fmt.Sprintf("The %s request was cancelled.", requestName), + )) + case codes.Unimplemented: + diags = diags.Append(tfdiags.WholeContainingBody( + tfdiags.Error, + "Unsupported plugin method", + fmt.Sprintf("The %s method is not supported by this plugin.", requestName), + )) + default: + diags = diags.Append(tfdiags.WholeContainingBody( + tfdiags.Error, + "Plugin error", + fmt.Sprintf("The plugin returned an unexpected error from %s: %v", requestName, err), + )) + } + return +} diff --git a/pkg/plugin/grpc_provider.go b/pkg/plugin/grpc_provider.go new file mode 100644 index 00000000000..20d0e287057 --- /dev/null +++ b/pkg/plugin/grpc_provider.go @@ -0,0 +1,854 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "context" + "errors" + "fmt" + "sync" + + "github.com/zclconf/go-cty/cty" + + plugin "github.com/hashicorp/go-plugin" + ctyjson "github.com/zclconf/go-cty/cty/json" + "github.com/zclconf/go-cty/cty/msgpack" + "google.golang.org/grpc" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/logging" + "github.com/kubegems/opentofu/pkg/plugin/convert" + "github.com/kubegems/opentofu/pkg/providers" + proto "github.com/kubegems/opentofu/pkg/tfplugin5" +) + +var logger = logging.HCLogger() + +// GRPCProviderPlugin implements plugin.GRPCPlugin for the go-plugin package. +type GRPCProviderPlugin struct { + plugin.Plugin + GRPCProvider func() proto.ProviderServer +} + +func (p *GRPCProviderPlugin) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { + return &GRPCProvider{ + client: proto.NewProviderClient(c), + ctx: ctx, + }, nil +} + +func (p *GRPCProviderPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { + proto.RegisterProviderServer(s, p.GRPCProvider()) + return nil +} + +// GRPCProvider handles the client, or core side of the plugin rpc connection. +// The GRPCProvider methods are mostly a translation layer between the +// tofu providers types and the grpc proto types, directly converting +// between the two. +type GRPCProvider struct { + // PluginClient provides a reference to the plugin.Client which controls the plugin process. + // This allows the GRPCProvider a way to shutdown the plugin process. + PluginClient *plugin.Client + + // TestServer contains a grpc.Server to close when the GRPCProvider is being + // used in an end to end test of a provider. + TestServer *grpc.Server + + // Addr uniquely identifies the type of provider. + // Normally executed providers will have this set during initialization, + // but it may not always be available for alternative execute modes. + Addr addrs.Provider + + // Proto client use to make the grpc service calls. + client proto.ProviderClient + + // this context is created by the plugin package, and is canceled when the + // plugin process ends. + ctx context.Context + + mu sync.Mutex + // schema stores the schema for this provider. This is used to properly + // serialize the requests for schemas. + schema providers.GetProviderSchemaResponse +} + +var _ providers.Interface = new(GRPCProvider) + +func (p *GRPCProvider) GetProviderSchema() (resp providers.GetProviderSchemaResponse) { + logger.Trace("GRPCProvider: GetProviderSchema") + p.mu.Lock() + defer p.mu.Unlock() + + // First, we check the global cache. + // The cache could contain this schema if an instance of this provider has previously been started. + if !p.Addr.IsZero() { + // Even if the schema is cached, GetProviderSchemaOptional could be false. This would indicate that once instantiated, + // this provider requires the get schema call to be made at least once, as it handles part of the provider's setup. + // At this point, we don't know if this is the first call to a provider instance or not, so we don't use the result in that case. + if schemaCached, ok := providers.SchemaCache.Get(p.Addr); ok && schemaCached.ServerCapabilities.GetProviderSchemaOptional { + logger.Trace("GRPCProvider: GetProviderSchema: serving from global schema cache", "address", p.Addr) + return schemaCached + } + } + + // If the local cache is non-zero, we know this instance has called + // GetProviderSchema at least once, so has satisfied the possible requirement of `GetProviderSchemaOptional=false`. + // This means that we can return early now using the locally cached schema, without making this call again. + if p.schema.Provider.Block != nil { + return p.schema + } + + resp.ResourceTypes = make(map[string]providers.Schema) + resp.DataSources = make(map[string]providers.Schema) + resp.Functions = make(map[string]providers.FunctionSpec) + + // Some providers may generate quite large schemas, and the internal default + // grpc response size limit is 4MB. 64MB should cover most any use case, and + // if we get providers nearing that we may want to consider a finer-grained + // API to fetch individual resource schemas. + // Note: this option is marked as EXPERIMENTAL in the grpc API. We keep + // this for compatibility, but recent providers all set the max message + // size much higher on the server side, which is the supported method for + // determining payload size. + const maxRecvSize = 64 << 20 + protoResp, err := p.client.GetSchema(p.ctx, new(proto.GetProviderSchema_Request), grpc.MaxRecvMsgSizeCallOption{MaxRecvMsgSize: maxRecvSize}) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + if resp.Diagnostics.HasErrors() { + return resp + } + + if protoResp.Provider == nil { + resp.Diagnostics = resp.Diagnostics.Append(errors.New("missing provider schema")) + return resp + } + + resp.Provider = convert.ProtoToProviderSchema(protoResp.Provider) + if protoResp.ProviderMeta == nil { + logger.Debug("No provider meta schema returned") + } else { + resp.ProviderMeta = convert.ProtoToProviderSchema(protoResp.ProviderMeta) + } + + for name, res := range protoResp.ResourceSchemas { + resp.ResourceTypes[name] = convert.ProtoToProviderSchema(res) + } + + for name, data := range protoResp.DataSourceSchemas { + resp.DataSources[name] = convert.ProtoToProviderSchema(data) + } + + for name, fn := range protoResp.Functions { + resp.Functions[name] = convert.ProtoToFunctionSpec(fn) + } + + if protoResp.ServerCapabilities != nil { + resp.ServerCapabilities.PlanDestroy = protoResp.ServerCapabilities.PlanDestroy + resp.ServerCapabilities.GetProviderSchemaOptional = protoResp.ServerCapabilities.GetProviderSchemaOptional + } + + // Set the global provider cache so that future calls to this provider can use the cached value. + // Crucially, this doesn't look at GetProviderSchemaOptional, because the layers above could use this cache + // *without* creating an instance of this provider. And if there is no instance, + // then we don't need to set up anything (cause there is nothing to set up), so we need no call + // to the providers GetSchema rpc. + if !p.Addr.IsZero() { + providers.SchemaCache.Set(p.Addr, resp) + } + + // Always store this here in the client for providers that are not able to use GetProviderSchemaOptional. + // Crucially, this indicates that we've made at least one call to GetProviderSchema to this instance of the provider, + // which means in the future we'll be able to return using this cache + // (because the possible setup contained in the GetProviderSchema call has happened). + // If GetProviderSchemaOptional is true then this cache won't actually ever be used, because the calls to this method + // will be satisfied by the global provider cache. + p.schema = resp + + return resp +} + +func (p *GRPCProvider) ValidateProviderConfig(r providers.ValidateProviderConfigRequest) (resp providers.ValidateProviderConfigResponse) { + logger.Trace("GRPCProvider: ValidateProviderConfig") + + schema := p.GetProviderSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = schema.Diagnostics + return resp + } + + ty := schema.Provider.Block.ImpliedType() + + mp, err := msgpack.Marshal(r.Config, ty) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.PrepareProviderConfig_Request{ + Config: &proto.DynamicValue{Msgpack: mp}, + } + + protoResp, err := p.client.PrepareProviderConfig(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + + config, err := decodeDynamicValue(protoResp.PreparedConfig, ty) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.PreparedConfig = config + + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + return resp +} + +func (p *GRPCProvider) ValidateResourceConfig(r providers.ValidateResourceConfigRequest) (resp providers.ValidateResourceConfigResponse) { + logger.Trace("GRPCProvider: ValidateResourceConfig") + + schema := p.GetProviderSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = schema.Diagnostics + return resp + } + + resourceSchema, ok := schema.ResourceTypes[r.TypeName] + if !ok { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unknown resource type %q", r.TypeName)) + return resp + } + + mp, err := msgpack.Marshal(r.Config, resourceSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.ValidateResourceTypeConfig_Request{ + TypeName: r.TypeName, + Config: &proto.DynamicValue{Msgpack: mp}, + } + + protoResp, err := p.client.ValidateResourceTypeConfig(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + return resp +} + +func (p *GRPCProvider) ValidateDataResourceConfig(r providers.ValidateDataResourceConfigRequest) (resp providers.ValidateDataResourceConfigResponse) { + logger.Trace("GRPCProvider: ValidateDataResourceConfig") + + schema := p.GetProviderSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = schema.Diagnostics + return resp + } + + dataSchema, ok := schema.DataSources[r.TypeName] + if !ok { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unknown data source %q", r.TypeName)) + return resp + } + + mp, err := msgpack.Marshal(r.Config, dataSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.ValidateDataSourceConfig_Request{ + TypeName: r.TypeName, + Config: &proto.DynamicValue{Msgpack: mp}, + } + + protoResp, err := p.client.ValidateDataSourceConfig(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + return resp +} + +func (p *GRPCProvider) UpgradeResourceState(r providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) { + logger.Trace("GRPCProvider: UpgradeResourceState") + + schema := p.GetProviderSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = schema.Diagnostics + return resp + } + + resSchema, ok := schema.ResourceTypes[r.TypeName] + if !ok { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unknown resource type %q", r.TypeName)) + return resp + } + + protoReq := &proto.UpgradeResourceState_Request{ + TypeName: r.TypeName, + Version: int64(r.Version), + RawState: &proto.RawState{ + Json: r.RawStateJSON, + Flatmap: r.RawStateFlatmap, + }, + } + + protoResp, err := p.client.UpgradeResourceState(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + ty := resSchema.Block.ImpliedType() + resp.UpgradedState = cty.NullVal(ty) + if protoResp.UpgradedState == nil { + return resp + } + + state, err := decodeDynamicValue(protoResp.UpgradedState, ty) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.UpgradedState = state + + return resp +} + +func (p *GRPCProvider) ConfigureProvider(r providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { + logger.Trace("GRPCProvider: ConfigureProvider") + + schema := p.GetProviderSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = schema.Diagnostics + return resp + } + + var mp []byte + + // we don't have anything to marshal if there's no config + mp, err := msgpack.Marshal(r.Config, schema.Provider.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.Configure_Request{ + TerraformVersion: r.TerraformVersion, + Config: &proto.DynamicValue{ + Msgpack: mp, + }, + } + + protoResp, err := p.client.Configure(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + return resp +} + +func (p *GRPCProvider) Stop() error { + logger.Trace("GRPCProvider: Stop") + + resp, err := p.client.Stop(p.ctx, new(proto.Stop_Request)) + if err != nil { + return err + } + + if resp.Error != "" { + return errors.New(resp.Error) + } + return nil +} + +func (p *GRPCProvider) ReadResource(r providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { + logger.Trace("GRPCProvider: ReadResource") + + schema := p.GetProviderSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = schema.Diagnostics + return resp + } + + resSchema, ok := schema.ResourceTypes[r.TypeName] + if !ok { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unknown resource type " + r.TypeName)) + return resp + } + + metaSchema := schema.ProviderMeta + + mp, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.ReadResource_Request{ + TypeName: r.TypeName, + CurrentState: &proto.DynamicValue{Msgpack: mp}, + Private: r.Private, + } + + if metaSchema.Block != nil { + metaMP, err := msgpack.Marshal(r.ProviderMeta, metaSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + protoReq.ProviderMeta = &proto.DynamicValue{Msgpack: metaMP} + } + + protoResp, err := p.client.ReadResource(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + state, err := decodeDynamicValue(protoResp.NewState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.NewState = state + resp.Private = protoResp.Private + + return resp +} + +func (p *GRPCProvider) PlanResourceChange(r providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + logger.Trace("GRPCProvider: PlanResourceChange") + + schema := p.GetProviderSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = schema.Diagnostics + return resp + } + + resSchema, ok := schema.ResourceTypes[r.TypeName] + if !ok { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unknown resource type %q", r.TypeName)) + return resp + } + + metaSchema := schema.ProviderMeta + capabilities := schema.ServerCapabilities + + // If the provider doesn't support planning a destroy operation, we can + // return immediately. + if r.ProposedNewState.IsNull() && !capabilities.PlanDestroy { + resp.PlannedState = r.ProposedNewState + resp.PlannedPrivate = r.PriorPrivate + return resp + } + + priorMP, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + configMP, err := msgpack.Marshal(r.Config, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + propMP, err := msgpack.Marshal(r.ProposedNewState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.PlanResourceChange_Request{ + TypeName: r.TypeName, + PriorState: &proto.DynamicValue{Msgpack: priorMP}, + Config: &proto.DynamicValue{Msgpack: configMP}, + ProposedNewState: &proto.DynamicValue{Msgpack: propMP}, + PriorPrivate: r.PriorPrivate, + } + + if metaSchema.Block != nil { + metaMP, err := msgpack.Marshal(r.ProviderMeta, metaSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + protoReq.ProviderMeta = &proto.DynamicValue{Msgpack: metaMP} + } + + protoResp, err := p.client.PlanResourceChange(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + state, err := decodeDynamicValue(protoResp.PlannedState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.PlannedState = state + + for _, p := range protoResp.RequiresReplace { + resp.RequiresReplace = append(resp.RequiresReplace, convert.AttributePathToPath(p)) + } + + resp.PlannedPrivate = protoResp.PlannedPrivate + + resp.LegacyTypeSystem = protoResp.LegacyTypeSystem + + return resp +} + +func (p *GRPCProvider) ApplyResourceChange(r providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + logger.Trace("GRPCProvider: ApplyResourceChange") + + schema := p.GetProviderSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = schema.Diagnostics + return resp + } + + resSchema, ok := schema.ResourceTypes[r.TypeName] + if !ok { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unknown resource type %q", r.TypeName)) + return resp + } + + metaSchema := schema.ProviderMeta + + priorMP, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + plannedMP, err := msgpack.Marshal(r.PlannedState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + configMP, err := msgpack.Marshal(r.Config, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.ApplyResourceChange_Request{ + TypeName: r.TypeName, + PriorState: &proto.DynamicValue{Msgpack: priorMP}, + PlannedState: &proto.DynamicValue{Msgpack: plannedMP}, + Config: &proto.DynamicValue{Msgpack: configMP}, + PlannedPrivate: r.PlannedPrivate, + } + + if metaSchema.Block != nil { + metaMP, err := msgpack.Marshal(r.ProviderMeta, metaSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + protoReq.ProviderMeta = &proto.DynamicValue{Msgpack: metaMP} + } + + protoResp, err := p.client.ApplyResourceChange(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + resp.Private = protoResp.Private + + state, err := decodeDynamicValue(protoResp.NewState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.NewState = state + + resp.LegacyTypeSystem = protoResp.LegacyTypeSystem + + return resp +} + +func (p *GRPCProvider) ImportResourceState(r providers.ImportResourceStateRequest) (resp providers.ImportResourceStateResponse) { + logger.Trace("GRPCProvider: ImportResourceState") + + schema := p.GetProviderSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = schema.Diagnostics + return resp + } + + protoReq := &proto.ImportResourceState_Request{ + TypeName: r.TypeName, + Id: r.ID, + } + + protoResp, err := p.client.ImportResourceState(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + for _, imported := range protoResp.ImportedResources { + resource := providers.ImportedResource{ + TypeName: imported.TypeName, + Private: imported.Private, + } + + resSchema, ok := schema.ResourceTypes[r.TypeName] + if !ok { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unknown resource type %q", r.TypeName)) + continue + } + + state, err := decodeDynamicValue(imported.State, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resource.State = state + resp.ImportedResources = append(resp.ImportedResources, resource) + } + + return resp +} + +func (p *GRPCProvider) ReadDataSource(r providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { + logger.Trace("GRPCProvider: ReadDataSource") + + schema := p.GetProviderSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = schema.Diagnostics + return resp + } + + dataSchema, ok := schema.DataSources[r.TypeName] + if !ok { + schema.Diagnostics = schema.Diagnostics.Append(fmt.Errorf("unknown data source %q", r.TypeName)) + } + + metaSchema := schema.ProviderMeta + + config, err := msgpack.Marshal(r.Config, dataSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.ReadDataSource_Request{ + TypeName: r.TypeName, + Config: &proto.DynamicValue{ + Msgpack: config, + }, + } + + if metaSchema.Block != nil { + metaMP, err := msgpack.Marshal(r.ProviderMeta, metaSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + protoReq.ProviderMeta = &proto.DynamicValue{Msgpack: metaMP} + } + + protoResp, err := p.client.ReadDataSource(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + state, err := decodeDynamicValue(protoResp.State, dataSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.State = state + + return resp +} + +func (p *GRPCProvider) GetFunctions() (resp providers.GetFunctionsResponse) { + logger.Trace("GRPCProvider: GetFunctions") + + protoReq := &proto.GetFunctions_Request{} + + protoResp, err := p.client.GetFunctions(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + resp.Functions = make(map[string]providers.FunctionSpec) + + for name, fn := range protoResp.Functions { + resp.Functions[name] = convert.ProtoToFunctionSpec(fn) + } + + return resp +} + +func (p *GRPCProvider) CallFunction(r providers.CallFunctionRequest) (resp providers.CallFunctionResponse) { + logger.Trace("GRPCProvider: CallFunction") + + schema := p.GetProviderSchema() + if schema.Diagnostics.HasErrors() { + // This should be unreachable + resp.Error = schema.Diagnostics.Err() + return resp + } + + spec, ok := schema.Functions[r.Name] + if !ok { + funcs := p.GetFunctions() + if funcs.Diagnostics.HasErrors() { + // This should be unreachable + resp.Error = funcs.Diagnostics.Err() + return resp + } + spec, ok = funcs.Functions[r.Name] + if !ok { + // This should be unreachable + resp.Error = fmt.Errorf("invalid CallFunctionRequest: function %s not defined in provider schema", r.Name) + return resp + } + } + + protoReq := &proto.CallFunction_Request{ + Name: r.Name, + Arguments: make([]*proto.DynamicValue, len(r.Arguments)), + } + + // Translate the arguments + // As this is functionality is always sitting behind cty/function.Function, we skip some validation + // checks of from the function and param spec. We still include basic validation to prevent panics, + // just in case there are bugs in cty. See context_functions_test.go for explicit testing of argument + // handling and short-circuiting. + if len(r.Arguments) < len(spec.Parameters) { + // This should be unreachable + resp.Error = fmt.Errorf("invalid CallFunctionRequest: function %s expected %d parameters and got %d instead", r.Name, len(spec.Parameters), len(r.Arguments)) + return resp + } + + for i, arg := range r.Arguments { + var paramSpec providers.FunctionParameterSpec + if i < len(spec.Parameters) { + paramSpec = spec.Parameters[i] + } else { + // We are past the end of spec.Parameters, this is either variadic or an error + if spec.VariadicParameter != nil { + paramSpec = *spec.VariadicParameter + } else { + // This should be unreachable + resp.Error = fmt.Errorf("invalid CallFunctionRequest: too many arguments passed to non-variadic function %s", r.Name) + } + } + + if arg.IsNull() { + if paramSpec.AllowNullValue { + continue + } else { + resp.Error = &providers.CallFunctionArgumentError{ + Text: fmt.Sprintf("parameter %s is null, which is not allowed for function %s", paramSpec.Name, r.Name), + FunctionArgument: i, + } + } + + } + + encodedArg, err := msgpack.Marshal(arg, paramSpec.Type) + if err != nil { + resp.Error = err + return + } + + protoReq.Arguments[i] = &proto.DynamicValue{ + Msgpack: encodedArg, + } + } + + protoResp, err := p.client.CallFunction(p.ctx, protoReq) + if err != nil { + resp.Error = err + return + } + + if protoResp.Error != nil { + err := &providers.CallFunctionArgumentError{ + Text: protoResp.Error.Text, + } + if protoResp.Error.FunctionArgument != nil { + err.FunctionArgument = int(*protoResp.Error.FunctionArgument) + } + resp.Error = err + return + } + + resp.Result, resp.Error = decodeDynamicValue(protoResp.Result, spec.Return) + return +} + +// closing the grpc connection is final, and tofu will call it at the end of every phase. +func (p *GRPCProvider) Close() error { + logger.Trace("GRPCProvider: Close") + + // Make sure to stop the server if we're not running within go-plugin. + if p.TestServer != nil { + p.TestServer.Stop() + } + + // Check this since it's not automatically inserted during plugin creation. + // It's currently only inserted by the command package, because that is + // where the factory is built and is the only point with access to the + // plugin.Client. + if p.PluginClient == nil { + logger.Debug("provider has no plugin.Client") + return nil + } + + p.PluginClient.Kill() + return nil +} + +// Decode a DynamicValue from either the JSON or MsgPack encoding. +func decodeDynamicValue(v *proto.DynamicValue, ty cty.Type) (cty.Value, error) { + // always return a valid value + var err error + res := cty.NullVal(ty) + if v == nil { + return res, nil + } + + switch { + case len(v.Msgpack) > 0: + res, err = msgpack.Unmarshal(v.Msgpack, ty) + case len(v.Json) > 0: + res, err = ctyjson.Unmarshal(v.Json, ty) + } + return res, err +} diff --git a/pkg/plugin/grpc_provider_test.go b/pkg/plugin/grpc_provider_test.go new file mode 100644 index 00000000000..e9ac084414b --- /dev/null +++ b/pkg/plugin/grpc_provider_test.go @@ -0,0 +1,923 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "bytes" + "fmt" + "testing" + + "github.com/kubegems/opentofu/pkg/addrs" + + "github.com/golang/mock/gomock" + "github.com/google/go-cmp/cmp" + "github.com/kubegems/opentofu/pkg/configs/hcl2shim" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/zclconf/go-cty/cty" + + mockproto "github.com/kubegems/opentofu/pkg/plugin/mock_proto" + proto "github.com/kubegems/opentofu/pkg/tfplugin5" +) + +var _ providers.Interface = (*GRPCProvider)(nil) + +func mockProviderClient(t *testing.T) *mockproto.MockProviderClient { + ctrl := gomock.NewController(t) + client := mockproto.NewMockProviderClient(ctrl) + + // we always need a GetSchema method + client.EXPECT().GetSchema( + gomock.Any(), + gomock.Any(), + gomock.Any(), + ).Return(providerProtoSchema(), nil) + + return client +} + +func checkDiags(t *testing.T, d tfdiags.Diagnostics) { + t.Helper() + if d.HasErrors() { + t.Fatal(d.Err()) + } +} + +// checkDiagsHasError ensures error diagnostics are present or fails the test. +func checkDiagsHasError(t *testing.T, d tfdiags.Diagnostics) { + t.Helper() + + if !d.HasErrors() { + t.Fatal("expected error diagnostics") + } +} + +func providerProtoSchema() *proto.GetProviderSchema_Response { + return &proto.GetProviderSchema_Response{ + Provider: &proto.Schema{ + Block: &proto.Schema_Block{ + Attributes: []*proto.Schema_Attribute{ + { + Name: "attr", + Type: []byte(`"string"`), + Required: true, + }, + }, + }, + }, + ResourceSchemas: map[string]*proto.Schema{ + "resource": &proto.Schema{ + Version: 1, + Block: &proto.Schema_Block{ + Attributes: []*proto.Schema_Attribute{ + { + Name: "attr", + Type: []byte(`"string"`), + Required: true, + }, + }, + }, + }, + }, + DataSourceSchemas: map[string]*proto.Schema{ + "data": &proto.Schema{ + Version: 1, + Block: &proto.Schema_Block{ + Attributes: []*proto.Schema_Attribute{ + { + Name: "attr", + Type: []byte(`"string"`), + Required: true, + }, + }, + }, + }, + }, + Functions: map[string]*proto.Function{ + "fn": &proto.Function{ + Parameters: []*proto.Function_Parameter{{ + Name: "par_a", + Type: []byte(`"string"`), + AllowNullValue: false, + AllowUnknownValues: false, + }}, + VariadicParameter: &proto.Function_Parameter{ + Name: "par_var", + Type: []byte(`"string"`), + AllowNullValue: true, + AllowUnknownValues: false, + }, + Return: &proto.Function_Return{ + Type: []byte(`"string"`), + }, + }, + }, + } +} + +func TestGRPCProvider_GetSchema(t *testing.T) { + p := &GRPCProvider{ + client: mockProviderClient(t), + } + + resp := p.GetProviderSchema() + checkDiags(t, resp.Diagnostics) +} + +// Ensure that gRPC errors are returned early. +// Reference: https://github.com/hashicorp/terraform/issues/31047 +func TestGRPCProvider_GetSchema_GRPCError(t *testing.T) { + ctrl := gomock.NewController(t) + client := mockproto.NewMockProviderClient(ctrl) + + client.EXPECT().GetSchema( + gomock.Any(), + gomock.Any(), + gomock.Any(), + ).Return(&proto.GetProviderSchema_Response{}, fmt.Errorf("test error")) + + p := &GRPCProvider{ + client: client, + } + + resp := p.GetProviderSchema() + + checkDiagsHasError(t, resp.Diagnostics) +} + +func TestGRPCProvider_GetSchema_GlobalCacheEnabled(t *testing.T) { + ctrl := gomock.NewController(t) + client := mockproto.NewMockProviderClient(ctrl) + // The SchemaCache is global and is saved between test runs + providers.SchemaCache = providers.NewMockSchemaCache() + + providerAddr := addrs.Provider{ + Namespace: "namespace", + Type: "type", + } + + mockedProviderResponse := &proto.Schema{Version: 2, Block: &proto.Schema_Block{}} + + client.EXPECT().GetSchema( + gomock.Any(), + gomock.Any(), + gomock.Any(), + ).Times(1).Return(&proto.GetProviderSchema_Response{ + Provider: mockedProviderResponse, + ServerCapabilities: &proto.ServerCapabilities{GetProviderSchemaOptional: true}, + }, nil) + + // Run GetProviderTwice, expect GetSchema to be called once + // Re-initialize the provider before each run to avoid usage of the local cache + p := &GRPCProvider{ + client: client, + Addr: providerAddr, + } + resp := p.GetProviderSchema() + + checkDiags(t, resp.Diagnostics) + if !cmp.Equal(resp.Provider.Version, mockedProviderResponse.Version) { + t.Fatal(cmp.Diff(resp.Provider.Version, mockedProviderResponse.Version)) + } + + p = &GRPCProvider{ + client: client, + Addr: providerAddr, + } + resp = p.GetProviderSchema() + + checkDiags(t, resp.Diagnostics) + if !cmp.Equal(resp.Provider.Version, mockedProviderResponse.Version) { + t.Fatal(cmp.Diff(resp.Provider.Version, mockedProviderResponse.Version)) + } +} + +func TestGRPCProvider_GetSchema_GlobalCacheDisabled(t *testing.T) { + ctrl := gomock.NewController(t) + client := mockproto.NewMockProviderClient(ctrl) + // The SchemaCache is global and is saved between test runs + providers.SchemaCache = providers.NewMockSchemaCache() + + providerAddr := addrs.Provider{ + Namespace: "namespace", + Type: "type", + } + + mockedProviderResponse := &proto.Schema{Version: 2, Block: &proto.Schema_Block{}} + + client.EXPECT().GetSchema( + gomock.Any(), + gomock.Any(), + gomock.Any(), + ).Times(2).Return(&proto.GetProviderSchema_Response{ + Provider: mockedProviderResponse, + ServerCapabilities: &proto.ServerCapabilities{GetProviderSchemaOptional: false}, + }, nil) + + // Run GetProviderTwice, expect GetSchema to be called once + // Re-initialize the provider before each run to avoid usage of the local cache + p := &GRPCProvider{ + client: client, + Addr: providerAddr, + } + resp := p.GetProviderSchema() + + checkDiags(t, resp.Diagnostics) + if !cmp.Equal(resp.Provider.Version, mockedProviderResponse.Version) { + t.Fatal(cmp.Diff(resp.Provider.Version, mockedProviderResponse.Version)) + } + + p = &GRPCProvider{ + client: client, + Addr: providerAddr, + } + resp = p.GetProviderSchema() + + checkDiags(t, resp.Diagnostics) + if !cmp.Equal(resp.Provider.Version, mockedProviderResponse.Version) { + t.Fatal(cmp.Diff(resp.Provider.Version, mockedProviderResponse.Version)) + } +} + +// Ensure that provider error diagnostics are returned early. +// Reference: https://github.com/hashicorp/terraform/issues/31047 +func TestGRPCProvider_GetSchema_ResponseErrorDiagnostic(t *testing.T) { + ctrl := gomock.NewController(t) + client := mockproto.NewMockProviderClient(ctrl) + + client.EXPECT().GetSchema( + gomock.Any(), + gomock.Any(), + gomock.Any(), + ).Return(&proto.GetProviderSchema_Response{ + Diagnostics: []*proto.Diagnostic{ + { + Severity: proto.Diagnostic_ERROR, + Summary: "error summary", + Detail: "error detail", + }, + }, + // Trigger potential panics + Provider: &proto.Schema{}, + }, nil) + + p := &GRPCProvider{ + client: client, + } + + resp := p.GetProviderSchema() + + checkDiagsHasError(t, resp.Diagnostics) +} + +func TestGRPCProvider_PrepareProviderConfig(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().PrepareProviderConfig( + gomock.Any(), + gomock.Any(), + ).Return(&proto.PrepareProviderConfig_Response{}, nil) + + cfg := hcl2shim.HCL2ValueFromConfigValue(map[string]interface{}{"attr": "value"}) + resp := p.ValidateProviderConfig(providers.ValidateProviderConfigRequest{Config: cfg}) + checkDiags(t, resp.Diagnostics) +} + +func TestGRPCProvider_ValidateResourceConfig(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().ValidateResourceTypeConfig( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ValidateResourceTypeConfig_Response{}, nil) + + cfg := hcl2shim.HCL2ValueFromConfigValue(map[string]interface{}{"attr": "value"}) + resp := p.ValidateResourceConfig(providers.ValidateResourceConfigRequest{ + TypeName: "resource", + Config: cfg, + }) + checkDiags(t, resp.Diagnostics) +} + +func TestGRPCProvider_ValidateDataSourceConfig(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().ValidateDataSourceConfig( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ValidateDataSourceConfig_Response{}, nil) + + cfg := hcl2shim.HCL2ValueFromConfigValue(map[string]interface{}{"attr": "value"}) + resp := p.ValidateDataResourceConfig(providers.ValidateDataResourceConfigRequest{ + TypeName: "data", + Config: cfg, + }) + checkDiags(t, resp.Diagnostics) +} + +func TestGRPCProvider_UpgradeResourceState(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().UpgradeResourceState( + gomock.Any(), + gomock.Any(), + ).Return(&proto.UpgradeResourceState_Response{ + UpgradedState: &proto.DynamicValue{ + Msgpack: []byte("\x81\xa4attr\xa3bar"), + }, + }, nil) + + resp := p.UpgradeResourceState(providers.UpgradeResourceStateRequest{ + TypeName: "resource", + Version: 0, + RawStateJSON: []byte(`{"old_attr":"bar"}`), + }) + checkDiags(t, resp.Diagnostics) + + expected := cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }) + + if !cmp.Equal(expected, resp.UpgradedState, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expected, resp.UpgradedState, typeComparer, valueComparer, equateEmpty)) + } +} + +func TestGRPCProvider_UpgradeResourceStateJSON(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().UpgradeResourceState( + gomock.Any(), + gomock.Any(), + ).Return(&proto.UpgradeResourceState_Response{ + UpgradedState: &proto.DynamicValue{ + Json: []byte(`{"attr":"bar"}`), + }, + }, nil) + + resp := p.UpgradeResourceState(providers.UpgradeResourceStateRequest{ + TypeName: "resource", + Version: 0, + RawStateJSON: []byte(`{"old_attr":"bar"}`), + }) + checkDiags(t, resp.Diagnostics) + + expected := cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }) + + if !cmp.Equal(expected, resp.UpgradedState, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expected, resp.UpgradedState, typeComparer, valueComparer, equateEmpty)) + } +} + +func TestGRPCProvider_Configure(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().Configure( + gomock.Any(), + gomock.Any(), + ).Return(&proto.Configure_Response{}, nil) + + resp := p.ConfigureProvider(providers.ConfigureProviderRequest{ + Config: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }), + }) + checkDiags(t, resp.Diagnostics) +} + +func TestGRPCProvider_Stop(t *testing.T) { + ctrl := gomock.NewController(t) + client := mockproto.NewMockProviderClient(ctrl) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().Stop( + gomock.Any(), + gomock.Any(), + ).Return(&proto.Stop_Response{}, nil) + + err := p.Stop() + if err != nil { + t.Fatal(err) + } +} + +func TestGRPCProvider_ReadResource(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().ReadResource( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ReadResource_Response{ + NewState: &proto.DynamicValue{ + Msgpack: []byte("\x81\xa4attr\xa3bar"), + }, + }, nil) + + resp := p.ReadResource(providers.ReadResourceRequest{ + TypeName: "resource", + PriorState: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }), + }) + + checkDiags(t, resp.Diagnostics) + + expected := cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }) + + if !cmp.Equal(expected, resp.NewState, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expected, resp.NewState, typeComparer, valueComparer, equateEmpty)) + } +} + +func TestGRPCProvider_ReadResourceJSON(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().ReadResource( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ReadResource_Response{ + NewState: &proto.DynamicValue{ + Json: []byte(`{"attr":"bar"}`), + }, + }, nil) + + resp := p.ReadResource(providers.ReadResourceRequest{ + TypeName: "resource", + PriorState: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }), + }) + + checkDiags(t, resp.Diagnostics) + + expected := cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }) + + if !cmp.Equal(expected, resp.NewState, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expected, resp.NewState, typeComparer, valueComparer, equateEmpty)) + } +} + +func TestGRPCProvider_ReadEmptyJSON(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().ReadResource( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ReadResource_Response{ + NewState: &proto.DynamicValue{ + Json: []byte(``), + }, + }, nil) + + obj := cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }) + resp := p.ReadResource(providers.ReadResourceRequest{ + TypeName: "resource", + PriorState: obj, + }) + + checkDiags(t, resp.Diagnostics) + + expected := cty.NullVal(obj.Type()) + + if !cmp.Equal(expected, resp.NewState, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expected, resp.NewState, typeComparer, valueComparer, equateEmpty)) + } +} + +func TestGRPCProvider_PlanResourceChange(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + expectedPrivate := []byte(`{"meta": "data"}`) + + client.EXPECT().PlanResourceChange( + gomock.Any(), + gomock.Any(), + ).Return(&proto.PlanResourceChange_Response{ + PlannedState: &proto.DynamicValue{ + Msgpack: []byte("\x81\xa4attr\xa3bar"), + }, + RequiresReplace: []*proto.AttributePath{ + { + Steps: []*proto.AttributePath_Step{ + { + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: "attr", + }, + }, + }, + }, + }, + PlannedPrivate: expectedPrivate, + }, nil) + + resp := p.PlanResourceChange(providers.PlanResourceChangeRequest{ + TypeName: "resource", + PriorState: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }), + ProposedNewState: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + Config: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + }) + + checkDiags(t, resp.Diagnostics) + + expectedState := cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }) + + if !cmp.Equal(expectedState, resp.PlannedState, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expectedState, resp.PlannedState, typeComparer, valueComparer, equateEmpty)) + } + + expectedReplace := `[]cty.Path{cty.Path{cty.GetAttrStep{Name:"attr"}}}` + replace := fmt.Sprintf("%#v", resp.RequiresReplace) + if expectedReplace != replace { + t.Fatalf("expected %q, got %q", expectedReplace, replace) + } + + if !bytes.Equal(expectedPrivate, resp.PlannedPrivate) { + t.Fatalf("expected %q, got %q", expectedPrivate, resp.PlannedPrivate) + } +} + +func TestGRPCProvider_PlanResourceChangeJSON(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + expectedPrivate := []byte(`{"meta": "data"}`) + + client.EXPECT().PlanResourceChange( + gomock.Any(), + gomock.Any(), + ).Return(&proto.PlanResourceChange_Response{ + PlannedState: &proto.DynamicValue{ + Json: []byte(`{"attr":"bar"}`), + }, + RequiresReplace: []*proto.AttributePath{ + { + Steps: []*proto.AttributePath_Step{ + { + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: "attr", + }, + }, + }, + }, + }, + PlannedPrivate: expectedPrivate, + }, nil) + + resp := p.PlanResourceChange(providers.PlanResourceChangeRequest{ + TypeName: "resource", + PriorState: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }), + ProposedNewState: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + Config: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + }) + + checkDiags(t, resp.Diagnostics) + + expectedState := cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }) + + if !cmp.Equal(expectedState, resp.PlannedState, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expectedState, resp.PlannedState, typeComparer, valueComparer, equateEmpty)) + } + + expectedReplace := `[]cty.Path{cty.Path{cty.GetAttrStep{Name:"attr"}}}` + replace := fmt.Sprintf("%#v", resp.RequiresReplace) + if expectedReplace != replace { + t.Fatalf("expected %q, got %q", expectedReplace, replace) + } + + if !bytes.Equal(expectedPrivate, resp.PlannedPrivate) { + t.Fatalf("expected %q, got %q", expectedPrivate, resp.PlannedPrivate) + } +} + +func TestGRPCProvider_ApplyResourceChange(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + expectedPrivate := []byte(`{"meta": "data"}`) + + client.EXPECT().ApplyResourceChange( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ApplyResourceChange_Response{ + NewState: &proto.DynamicValue{ + Msgpack: []byte("\x81\xa4attr\xa3bar"), + }, + Private: expectedPrivate, + }, nil) + + resp := p.ApplyResourceChange(providers.ApplyResourceChangeRequest{ + TypeName: "resource", + PriorState: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }), + PlannedState: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + Config: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + PlannedPrivate: expectedPrivate, + }) + + checkDiags(t, resp.Diagnostics) + + expectedState := cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }) + + if !cmp.Equal(expectedState, resp.NewState, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expectedState, resp.NewState, typeComparer, valueComparer, equateEmpty)) + } + + if !bytes.Equal(expectedPrivate, resp.Private) { + t.Fatalf("expected %q, got %q", expectedPrivate, resp.Private) + } +} +func TestGRPCProvider_ApplyResourceChangeJSON(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + expectedPrivate := []byte(`{"meta": "data"}`) + + client.EXPECT().ApplyResourceChange( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ApplyResourceChange_Response{ + NewState: &proto.DynamicValue{ + Json: []byte(`{"attr":"bar"}`), + }, + Private: expectedPrivate, + }, nil) + + resp := p.ApplyResourceChange(providers.ApplyResourceChangeRequest{ + TypeName: "resource", + PriorState: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }), + PlannedState: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + Config: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + PlannedPrivate: expectedPrivate, + }) + + checkDiags(t, resp.Diagnostics) + + expectedState := cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }) + + if !cmp.Equal(expectedState, resp.NewState, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expectedState, resp.NewState, typeComparer, valueComparer, equateEmpty)) + } + + if !bytes.Equal(expectedPrivate, resp.Private) { + t.Fatalf("expected %q, got %q", expectedPrivate, resp.Private) + } +} + +func TestGRPCProvider_ImportResourceState(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + expectedPrivate := []byte(`{"meta": "data"}`) + + client.EXPECT().ImportResourceState( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ImportResourceState_Response{ + ImportedResources: []*proto.ImportResourceState_ImportedResource{ + { + TypeName: "resource", + State: &proto.DynamicValue{ + Msgpack: []byte("\x81\xa4attr\xa3bar"), + }, + Private: expectedPrivate, + }, + }, + }, nil) + + resp := p.ImportResourceState(providers.ImportResourceStateRequest{ + TypeName: "resource", + ID: "foo", + }) + + checkDiags(t, resp.Diagnostics) + + expectedResource := providers.ImportedResource{ + TypeName: "resource", + State: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + Private: expectedPrivate, + } + + imported := resp.ImportedResources[0] + if !cmp.Equal(expectedResource, imported, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expectedResource, imported, typeComparer, valueComparer, equateEmpty)) + } +} +func TestGRPCProvider_ImportResourceStateJSON(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + expectedPrivate := []byte(`{"meta": "data"}`) + + client.EXPECT().ImportResourceState( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ImportResourceState_Response{ + ImportedResources: []*proto.ImportResourceState_ImportedResource{ + { + TypeName: "resource", + State: &proto.DynamicValue{ + Json: []byte(`{"attr":"bar"}`), + }, + Private: expectedPrivate, + }, + }, + }, nil) + + resp := p.ImportResourceState(providers.ImportResourceStateRequest{ + TypeName: "resource", + ID: "foo", + }) + + checkDiags(t, resp.Diagnostics) + + expectedResource := providers.ImportedResource{ + TypeName: "resource", + State: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + Private: expectedPrivate, + } + + imported := resp.ImportedResources[0] + if !cmp.Equal(expectedResource, imported, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expectedResource, imported, typeComparer, valueComparer, equateEmpty)) + } +} + +func TestGRPCProvider_ReadDataSource(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().ReadDataSource( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ReadDataSource_Response{ + State: &proto.DynamicValue{ + Msgpack: []byte("\x81\xa4attr\xa3bar"), + }, + }, nil) + + resp := p.ReadDataSource(providers.ReadDataSourceRequest{ + TypeName: "data", + Config: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }), + }) + + checkDiags(t, resp.Diagnostics) + + expected := cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }) + + if !cmp.Equal(expected, resp.State, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expected, resp.State, typeComparer, valueComparer, equateEmpty)) + } +} + +func TestGRPCProvider_ReadDataSourceJSON(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().ReadDataSource( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ReadDataSource_Response{ + State: &proto.DynamicValue{ + Json: []byte(`{"attr":"bar"}`), + }, + }, nil) + + resp := p.ReadDataSource(providers.ReadDataSourceRequest{ + TypeName: "data", + Config: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }), + }) + + checkDiags(t, resp.Diagnostics) + + expected := cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }) + + if !cmp.Equal(expected, resp.State, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expected, resp.State, typeComparer, valueComparer, equateEmpty)) + } +} + +func TestGRPCProvider_CallFunction(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().CallFunction( + gomock.Any(), + gomock.Any(), + ).Return(&proto.CallFunction_Response{ + Result: &proto.DynamicValue{Json: []byte(`"foo"`)}, + }, nil) + + resp := p.CallFunction(providers.CallFunctionRequest{ + Name: "fn", + Arguments: []cty.Value{cty.StringVal("bar"), cty.NilVal}, + }) + + if resp.Error != nil { + t.Fatal(resp.Error) + } + if resp.Result != cty.StringVal("foo") { + t.Fatalf("%v", resp.Result) + } +} diff --git a/pkg/plugin/grpc_provisioner.go b/pkg/plugin/grpc_provisioner.go new file mode 100644 index 00000000000..fb12642ede2 --- /dev/null +++ b/pkg/plugin/grpc_provisioner.go @@ -0,0 +1,182 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "context" + "errors" + "io" + "sync" + + plugin "github.com/hashicorp/go-plugin" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/plugin/convert" + "github.com/kubegems/opentofu/pkg/provisioners" + proto "github.com/kubegems/opentofu/pkg/tfplugin5" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/msgpack" + "google.golang.org/grpc" +) + +// GRPCProvisionerPlugin is the plugin.GRPCPlugin implementation. +type GRPCProvisionerPlugin struct { + plugin.Plugin + GRPCProvisioner func() proto.ProvisionerServer +} + +func (p *GRPCProvisionerPlugin) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { + return &GRPCProvisioner{ + client: proto.NewProvisionerClient(c), + ctx: ctx, + }, nil +} + +func (p *GRPCProvisionerPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { + proto.RegisterProvisionerServer(s, p.GRPCProvisioner()) + return nil +} + +// provisioners.Interface grpc implementation +type GRPCProvisioner struct { + // PluginClient provides a reference to the plugin.Client which controls the plugin process. + // This allows the GRPCProvider a way to shutdown the plugin process. + PluginClient *plugin.Client + + client proto.ProvisionerClient + ctx context.Context + + mu sync.Mutex + // Cache the schema since we need it for serialization in each method call. + schema *configschema.Block +} + +func (p *GRPCProvisioner) GetSchema() (resp provisioners.GetSchemaResponse) { + p.mu.Lock() + defer p.mu.Unlock() + + if p.schema != nil { + return provisioners.GetSchemaResponse{ + Provisioner: p.schema, + } + } + + protoResp, err := p.client.GetSchema(p.ctx, new(proto.GetProvisionerSchema_Request)) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + if protoResp.Provisioner == nil { + resp.Diagnostics = resp.Diagnostics.Append(errors.New("missing provisioner schema")) + return resp + } + + resp.Provisioner = convert.ProtoToConfigSchema(protoResp.Provisioner.Block) + + p.schema = resp.Provisioner + + return resp +} + +func (p *GRPCProvisioner) ValidateProvisionerConfig(r provisioners.ValidateProvisionerConfigRequest) (resp provisioners.ValidateProvisionerConfigResponse) { + schema := p.GetSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = resp.Diagnostics.Append(schema.Diagnostics) + return resp + } + + mp, err := msgpack.Marshal(r.Config, schema.Provisioner.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.ValidateProvisionerConfig_Request{ + Config: &proto.DynamicValue{Msgpack: mp}, + } + protoResp, err := p.client.ValidateProvisionerConfig(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + return resp +} + +func (p *GRPCProvisioner) ProvisionResource(r provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { + schema := p.GetSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = resp.Diagnostics.Append(schema.Diagnostics) + return resp + } + + mp, err := msgpack.Marshal(r.Config, schema.Provisioner.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + // connection is always assumed to be a simple string map + connMP, err := msgpack.Marshal(r.Connection, cty.Map(cty.String)) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.ProvisionResource_Request{ + Config: &proto.DynamicValue{Msgpack: mp}, + Connection: &proto.DynamicValue{Msgpack: connMP}, + } + + outputClient, err := p.client.ProvisionResource(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + + for { + rcv, err := outputClient.Recv() + if rcv != nil { + r.UIOutput.Output(rcv.Output) + } + if err != nil { + if err != io.EOF { + resp.Diagnostics = resp.Diagnostics.Append(err) + } + break + } + + if len(rcv.Diagnostics) > 0 { + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(rcv.Diagnostics)) + break + } + } + + return resp +} + +func (p *GRPCProvisioner) Stop() error { + protoResp, err := p.client.Stop(p.ctx, &proto.Stop_Request{}) + if err != nil { + return err + } + if protoResp.Error != "" { + return errors.New(protoResp.Error) + } + return nil +} + +func (p *GRPCProvisioner) Close() error { + // check this since it's not automatically inserted during plugin creation + if p.PluginClient == nil { + logger.Debug("provisioner has no plugin.Client") + return nil + } + + p.PluginClient.Kill() + return nil +} diff --git a/pkg/plugin/grpc_provisioner_test.go b/pkg/plugin/grpc_provisioner_test.go new file mode 100644 index 00000000000..e51af0c91b9 --- /dev/null +++ b/pkg/plugin/grpc_provisioner_test.go @@ -0,0 +1,152 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "io" + "testing" + + "github.com/golang/mock/gomock" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/kubegems/opentofu/pkg/configs/hcl2shim" + "github.com/kubegems/opentofu/pkg/provisioners" + proto "github.com/kubegems/opentofu/pkg/tfplugin5" + "github.com/zclconf/go-cty/cty" + + mockproto "github.com/kubegems/opentofu/pkg/plugin/mock_proto" +) + +var _ provisioners.Interface = (*GRPCProvisioner)(nil) + +var ( + equateEmpty = cmpopts.EquateEmpty() + typeComparer = cmp.Comparer(cty.Type.Equals) + valueComparer = cmp.Comparer(cty.Value.RawEquals) +) + +func mockProvisionerClient(t *testing.T) *mockproto.MockProvisionerClient { + ctrl := gomock.NewController(t) + client := mockproto.NewMockProvisionerClient(ctrl) + + // we always need a GetSchema method + client.EXPECT().GetSchema( + gomock.Any(), + gomock.Any(), + ).Return(provisionerProtoSchema(), nil) + + return client +} + +func provisionerProtoSchema() *proto.GetProvisionerSchema_Response { + return &proto.GetProvisionerSchema_Response{ + Provisioner: &proto.Schema{ + Block: &proto.Schema_Block{ + Attributes: []*proto.Schema_Attribute{ + { + Name: "attr", + Type: []byte(`"string"`), + Required: true, + }, + }, + }, + }, + } +} + +func TestGRPCProvisioner_GetSchema(t *testing.T) { + p := &GRPCProvisioner{ + client: mockProvisionerClient(t), + } + + resp := p.GetSchema() + checkDiags(t, resp.Diagnostics) +} + +func TestGRPCProvisioner_ValidateProvisionerConfig(t *testing.T) { + client := mockProvisionerClient(t) + p := &GRPCProvisioner{ + client: client, + } + + client.EXPECT().ValidateProvisionerConfig( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ValidateProvisionerConfig_Response{}, nil) + + cfg := hcl2shim.HCL2ValueFromConfigValue(map[string]interface{}{"attr": "value"}) + resp := p.ValidateProvisionerConfig(provisioners.ValidateProvisionerConfigRequest{Config: cfg}) + checkDiags(t, resp.Diagnostics) +} + +func TestGRPCProvisioner_ProvisionResource(t *testing.T) { + ctrl := gomock.NewController(t) + client := mockproto.NewMockProvisionerClient(ctrl) + + // we always need a GetSchema method + client.EXPECT().GetSchema( + gomock.Any(), + gomock.Any(), + ).Return(provisionerProtoSchema(), nil) + + stream := mockproto.NewMockProvisioner_ProvisionResourceClient(ctrl) + stream.EXPECT().Recv().Return(&proto.ProvisionResource_Response{ + Output: "provisioned", + }, io.EOF) + + client.EXPECT().ProvisionResource( + gomock.Any(), + gomock.Any(), + ).Return(stream, nil) + + p := &GRPCProvisioner{ + client: client, + } + + rec := &provisionRecorder{} + + resp := p.ProvisionResource(provisioners.ProvisionResourceRequest{ + Config: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("value"), + }), + Connection: cty.EmptyObjectVal, + UIOutput: rec, + }) + + if resp.Diagnostics.HasErrors() { + t.Fatal(resp.Diagnostics.Err()) + } + + if len(rec.output) == 0 || rec.output[0] != "provisioned" { + t.Fatalf("expected %q, got %q", []string{"provisioned"}, rec.output) + } +} + +type provisionRecorder struct { + output []string +} + +func (r *provisionRecorder) Output(s string) { + r.output = append(r.output, s) +} + +func TestGRPCProvisioner_Stop(t *testing.T) { + ctrl := gomock.NewController(t) + client := mockproto.NewMockProvisionerClient(ctrl) + p := &GRPCProvisioner{ + client: client, + } + + client.EXPECT().Stop( + gomock.Any(), + gomock.Any(), + ).Return(&proto.Stop_Response{}, nil) + + err := p.Stop() + if err != nil { + t.Fatal(err) + } +} diff --git a/pkg/plugin/mock_proto/generate.go b/pkg/plugin/mock_proto/generate.go new file mode 100644 index 00000000000..954bf591932 --- /dev/null +++ b/pkg/plugin/mock_proto/generate.go @@ -0,0 +1,8 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:generate go run github.com/golang/mock/mockgen -destination mock.go github.com/kubegems/opentofu/pkg/tfplugin5 ProviderClient,ProvisionerClient,Provisioner_ProvisionResourceClient,Provisioner_ProvisionResourceServer + +package mock_tfplugin5 diff --git a/pkg/plugin/mock_proto/mock.go b/pkg/plugin/mock_proto/mock.go new file mode 100644 index 00000000000..315493feff4 --- /dev/null +++ b/pkg/plugin/mock_proto/mock.go @@ -0,0 +1,703 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/kubegems/opentofu/pkg/tfplugin5 (interfaces: ProviderClient,ProvisionerClient,Provisioner_ProvisionResourceClient,Provisioner_ProvisionResourceServer) + +// Package mock_tfplugin5 is a generated GoMock package. +package mock_tfplugin5 + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + tfplugin5 "github.com/kubegems/opentofu/pkg/tfplugin5" + grpc "google.golang.org/grpc" + metadata "google.golang.org/grpc/metadata" +) + +// MockProviderClient is a mock of ProviderClient interface. +type MockProviderClient struct { + ctrl *gomock.Controller + recorder *MockProviderClientMockRecorder +} + +// MockProviderClientMockRecorder is the mock recorder for MockProviderClient. +type MockProviderClientMockRecorder struct { + mock *MockProviderClient +} + +// NewMockProviderClient creates a new mock instance. +func NewMockProviderClient(ctrl *gomock.Controller) *MockProviderClient { + mock := &MockProviderClient{ctrl: ctrl} + mock.recorder = &MockProviderClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockProviderClient) EXPECT() *MockProviderClientMockRecorder { + return m.recorder +} + +// ApplyResourceChange mocks base method. +func (m *MockProviderClient) ApplyResourceChange(arg0 context.Context, arg1 *tfplugin5.ApplyResourceChange_Request, arg2 ...grpc.CallOption) (*tfplugin5.ApplyResourceChange_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ApplyResourceChange", varargs...) + ret0, _ := ret[0].(*tfplugin5.ApplyResourceChange_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ApplyResourceChange indicates an expected call of ApplyResourceChange. +func (mr *MockProviderClientMockRecorder) ApplyResourceChange(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyResourceChange", reflect.TypeOf((*MockProviderClient)(nil).ApplyResourceChange), varargs...) +} + +// CallFunction mocks base method. +func (m *MockProviderClient) CallFunction(arg0 context.Context, arg1 *tfplugin5.CallFunction_Request, arg2 ...grpc.CallOption) (*tfplugin5.CallFunction_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CallFunction", varargs...) + ret0, _ := ret[0].(*tfplugin5.CallFunction_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CallFunction indicates an expected call of CallFunction. +func (mr *MockProviderClientMockRecorder) CallFunction(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CallFunction", reflect.TypeOf((*MockProviderClient)(nil).CallFunction), varargs...) +} + +// Configure mocks base method. +func (m *MockProviderClient) Configure(arg0 context.Context, arg1 *tfplugin5.Configure_Request, arg2 ...grpc.CallOption) (*tfplugin5.Configure_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Configure", varargs...) + ret0, _ := ret[0].(*tfplugin5.Configure_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Configure indicates an expected call of Configure. +func (mr *MockProviderClientMockRecorder) Configure(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Configure", reflect.TypeOf((*MockProviderClient)(nil).Configure), varargs...) +} + +// GetFunctions mocks base method. +func (m *MockProviderClient) GetFunctions(arg0 context.Context, arg1 *tfplugin5.GetFunctions_Request, arg2 ...grpc.CallOption) (*tfplugin5.GetFunctions_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetFunctions", varargs...) + ret0, _ := ret[0].(*tfplugin5.GetFunctions_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetFunctions indicates an expected call of GetFunctions. +func (mr *MockProviderClientMockRecorder) GetFunctions(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFunctions", reflect.TypeOf((*MockProviderClient)(nil).GetFunctions), varargs...) +} + +// GetMetadata mocks base method. +func (m *MockProviderClient) GetMetadata(arg0 context.Context, arg1 *tfplugin5.GetMetadata_Request, arg2 ...grpc.CallOption) (*tfplugin5.GetMetadata_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetMetadata", varargs...) + ret0, _ := ret[0].(*tfplugin5.GetMetadata_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetMetadata indicates an expected call of GetMetadata. +func (mr *MockProviderClientMockRecorder) GetMetadata(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMetadata", reflect.TypeOf((*MockProviderClient)(nil).GetMetadata), varargs...) +} + +// GetSchema mocks base method. +func (m *MockProviderClient) GetSchema(arg0 context.Context, arg1 *tfplugin5.GetProviderSchema_Request, arg2 ...grpc.CallOption) (*tfplugin5.GetProviderSchema_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetSchema", varargs...) + ret0, _ := ret[0].(*tfplugin5.GetProviderSchema_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSchema indicates an expected call of GetSchema. +func (mr *MockProviderClientMockRecorder) GetSchema(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSchema", reflect.TypeOf((*MockProviderClient)(nil).GetSchema), varargs...) +} + +// ImportResourceState mocks base method. +func (m *MockProviderClient) ImportResourceState(arg0 context.Context, arg1 *tfplugin5.ImportResourceState_Request, arg2 ...grpc.CallOption) (*tfplugin5.ImportResourceState_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ImportResourceState", varargs...) + ret0, _ := ret[0].(*tfplugin5.ImportResourceState_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ImportResourceState indicates an expected call of ImportResourceState. +func (mr *MockProviderClientMockRecorder) ImportResourceState(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImportResourceState", reflect.TypeOf((*MockProviderClient)(nil).ImportResourceState), varargs...) +} + +// MoveResourceState mocks base method. +func (m *MockProviderClient) MoveResourceState(arg0 context.Context, arg1 *tfplugin5.MoveResourceState_Request, arg2 ...grpc.CallOption) (*tfplugin5.MoveResourceState_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "MoveResourceState", varargs...) + ret0, _ := ret[0].(*tfplugin5.MoveResourceState_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MoveResourceState indicates an expected call of MoveResourceState. +func (mr *MockProviderClientMockRecorder) MoveResourceState(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MoveResourceState", reflect.TypeOf((*MockProviderClient)(nil).MoveResourceState), varargs...) +} + +// PlanResourceChange mocks base method. +func (m *MockProviderClient) PlanResourceChange(arg0 context.Context, arg1 *tfplugin5.PlanResourceChange_Request, arg2 ...grpc.CallOption) (*tfplugin5.PlanResourceChange_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PlanResourceChange", varargs...) + ret0, _ := ret[0].(*tfplugin5.PlanResourceChange_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PlanResourceChange indicates an expected call of PlanResourceChange. +func (mr *MockProviderClientMockRecorder) PlanResourceChange(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PlanResourceChange", reflect.TypeOf((*MockProviderClient)(nil).PlanResourceChange), varargs...) +} + +// PrepareProviderConfig mocks base method. +func (m *MockProviderClient) PrepareProviderConfig(arg0 context.Context, arg1 *tfplugin5.PrepareProviderConfig_Request, arg2 ...grpc.CallOption) (*tfplugin5.PrepareProviderConfig_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PrepareProviderConfig", varargs...) + ret0, _ := ret[0].(*tfplugin5.PrepareProviderConfig_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PrepareProviderConfig indicates an expected call of PrepareProviderConfig. +func (mr *MockProviderClientMockRecorder) PrepareProviderConfig(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrepareProviderConfig", reflect.TypeOf((*MockProviderClient)(nil).PrepareProviderConfig), varargs...) +} + +// ReadDataSource mocks base method. +func (m *MockProviderClient) ReadDataSource(arg0 context.Context, arg1 *tfplugin5.ReadDataSource_Request, arg2 ...grpc.CallOption) (*tfplugin5.ReadDataSource_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ReadDataSource", varargs...) + ret0, _ := ret[0].(*tfplugin5.ReadDataSource_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ReadDataSource indicates an expected call of ReadDataSource. +func (mr *MockProviderClientMockRecorder) ReadDataSource(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadDataSource", reflect.TypeOf((*MockProviderClient)(nil).ReadDataSource), varargs...) +} + +// ReadResource mocks base method. +func (m *MockProviderClient) ReadResource(arg0 context.Context, arg1 *tfplugin5.ReadResource_Request, arg2 ...grpc.CallOption) (*tfplugin5.ReadResource_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ReadResource", varargs...) + ret0, _ := ret[0].(*tfplugin5.ReadResource_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ReadResource indicates an expected call of ReadResource. +func (mr *MockProviderClientMockRecorder) ReadResource(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadResource", reflect.TypeOf((*MockProviderClient)(nil).ReadResource), varargs...) +} + +// Stop mocks base method. +func (m *MockProviderClient) Stop(arg0 context.Context, arg1 *tfplugin5.Stop_Request, arg2 ...grpc.CallOption) (*tfplugin5.Stop_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Stop", varargs...) + ret0, _ := ret[0].(*tfplugin5.Stop_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Stop indicates an expected call of Stop. +func (mr *MockProviderClientMockRecorder) Stop(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockProviderClient)(nil).Stop), varargs...) +} + +// UpgradeResourceState mocks base method. +func (m *MockProviderClient) UpgradeResourceState(arg0 context.Context, arg1 *tfplugin5.UpgradeResourceState_Request, arg2 ...grpc.CallOption) (*tfplugin5.UpgradeResourceState_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpgradeResourceState", varargs...) + ret0, _ := ret[0].(*tfplugin5.UpgradeResourceState_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpgradeResourceState indicates an expected call of UpgradeResourceState. +func (mr *MockProviderClientMockRecorder) UpgradeResourceState(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpgradeResourceState", reflect.TypeOf((*MockProviderClient)(nil).UpgradeResourceState), varargs...) +} + +// ValidateDataSourceConfig mocks base method. +func (m *MockProviderClient) ValidateDataSourceConfig(arg0 context.Context, arg1 *tfplugin5.ValidateDataSourceConfig_Request, arg2 ...grpc.CallOption) (*tfplugin5.ValidateDataSourceConfig_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ValidateDataSourceConfig", varargs...) + ret0, _ := ret[0].(*tfplugin5.ValidateDataSourceConfig_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ValidateDataSourceConfig indicates an expected call of ValidateDataSourceConfig. +func (mr *MockProviderClientMockRecorder) ValidateDataSourceConfig(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateDataSourceConfig", reflect.TypeOf((*MockProviderClient)(nil).ValidateDataSourceConfig), varargs...) +} + +// ValidateResourceTypeConfig mocks base method. +func (m *MockProviderClient) ValidateResourceTypeConfig(arg0 context.Context, arg1 *tfplugin5.ValidateResourceTypeConfig_Request, arg2 ...grpc.CallOption) (*tfplugin5.ValidateResourceTypeConfig_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ValidateResourceTypeConfig", varargs...) + ret0, _ := ret[0].(*tfplugin5.ValidateResourceTypeConfig_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ValidateResourceTypeConfig indicates an expected call of ValidateResourceTypeConfig. +func (mr *MockProviderClientMockRecorder) ValidateResourceTypeConfig(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateResourceTypeConfig", reflect.TypeOf((*MockProviderClient)(nil).ValidateResourceTypeConfig), varargs...) +} + +// MockProvisionerClient is a mock of ProvisionerClient interface. +type MockProvisionerClient struct { + ctrl *gomock.Controller + recorder *MockProvisionerClientMockRecorder +} + +// MockProvisionerClientMockRecorder is the mock recorder for MockProvisionerClient. +type MockProvisionerClientMockRecorder struct { + mock *MockProvisionerClient +} + +// NewMockProvisionerClient creates a new mock instance. +func NewMockProvisionerClient(ctrl *gomock.Controller) *MockProvisionerClient { + mock := &MockProvisionerClient{ctrl: ctrl} + mock.recorder = &MockProvisionerClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockProvisionerClient) EXPECT() *MockProvisionerClientMockRecorder { + return m.recorder +} + +// GetSchema mocks base method. +func (m *MockProvisionerClient) GetSchema(arg0 context.Context, arg1 *tfplugin5.GetProvisionerSchema_Request, arg2 ...grpc.CallOption) (*tfplugin5.GetProvisionerSchema_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetSchema", varargs...) + ret0, _ := ret[0].(*tfplugin5.GetProvisionerSchema_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSchema indicates an expected call of GetSchema. +func (mr *MockProvisionerClientMockRecorder) GetSchema(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSchema", reflect.TypeOf((*MockProvisionerClient)(nil).GetSchema), varargs...) +} + +// ProvisionResource mocks base method. +func (m *MockProvisionerClient) ProvisionResource(arg0 context.Context, arg1 *tfplugin5.ProvisionResource_Request, arg2 ...grpc.CallOption) (tfplugin5.Provisioner_ProvisionResourceClient, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ProvisionResource", varargs...) + ret0, _ := ret[0].(tfplugin5.Provisioner_ProvisionResourceClient) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ProvisionResource indicates an expected call of ProvisionResource. +func (mr *MockProvisionerClientMockRecorder) ProvisionResource(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ProvisionResource", reflect.TypeOf((*MockProvisionerClient)(nil).ProvisionResource), varargs...) +} + +// Stop mocks base method. +func (m *MockProvisionerClient) Stop(arg0 context.Context, arg1 *tfplugin5.Stop_Request, arg2 ...grpc.CallOption) (*tfplugin5.Stop_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Stop", varargs...) + ret0, _ := ret[0].(*tfplugin5.Stop_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Stop indicates an expected call of Stop. +func (mr *MockProvisionerClientMockRecorder) Stop(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockProvisionerClient)(nil).Stop), varargs...) +} + +// ValidateProvisionerConfig mocks base method. +func (m *MockProvisionerClient) ValidateProvisionerConfig(arg0 context.Context, arg1 *tfplugin5.ValidateProvisionerConfig_Request, arg2 ...grpc.CallOption) (*tfplugin5.ValidateProvisionerConfig_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ValidateProvisionerConfig", varargs...) + ret0, _ := ret[0].(*tfplugin5.ValidateProvisionerConfig_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ValidateProvisionerConfig indicates an expected call of ValidateProvisionerConfig. +func (mr *MockProvisionerClientMockRecorder) ValidateProvisionerConfig(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateProvisionerConfig", reflect.TypeOf((*MockProvisionerClient)(nil).ValidateProvisionerConfig), varargs...) +} + +// MockProvisioner_ProvisionResourceClient is a mock of Provisioner_ProvisionResourceClient interface. +type MockProvisioner_ProvisionResourceClient struct { + ctrl *gomock.Controller + recorder *MockProvisioner_ProvisionResourceClientMockRecorder +} + +// MockProvisioner_ProvisionResourceClientMockRecorder is the mock recorder for MockProvisioner_ProvisionResourceClient. +type MockProvisioner_ProvisionResourceClientMockRecorder struct { + mock *MockProvisioner_ProvisionResourceClient +} + +// NewMockProvisioner_ProvisionResourceClient creates a new mock instance. +func NewMockProvisioner_ProvisionResourceClient(ctrl *gomock.Controller) *MockProvisioner_ProvisionResourceClient { + mock := &MockProvisioner_ProvisionResourceClient{ctrl: ctrl} + mock.recorder = &MockProvisioner_ProvisionResourceClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockProvisioner_ProvisionResourceClient) EXPECT() *MockProvisioner_ProvisionResourceClientMockRecorder { + return m.recorder +} + +// CloseSend mocks base method. +func (m *MockProvisioner_ProvisionResourceClient) CloseSend() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CloseSend") + ret0, _ := ret[0].(error) + return ret0 +} + +// CloseSend indicates an expected call of CloseSend. +func (mr *MockProvisioner_ProvisionResourceClientMockRecorder) CloseSend() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseSend", reflect.TypeOf((*MockProvisioner_ProvisionResourceClient)(nil).CloseSend)) +} + +// Context mocks base method. +func (m *MockProvisioner_ProvisionResourceClient) Context() context.Context { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Context") + ret0, _ := ret[0].(context.Context) + return ret0 +} + +// Context indicates an expected call of Context. +func (mr *MockProvisioner_ProvisionResourceClientMockRecorder) Context() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockProvisioner_ProvisionResourceClient)(nil).Context)) +} + +// Header mocks base method. +func (m *MockProvisioner_ProvisionResourceClient) Header() (metadata.MD, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Header") + ret0, _ := ret[0].(metadata.MD) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Header indicates an expected call of Header. +func (mr *MockProvisioner_ProvisionResourceClientMockRecorder) Header() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Header", reflect.TypeOf((*MockProvisioner_ProvisionResourceClient)(nil).Header)) +} + +// Recv mocks base method. +func (m *MockProvisioner_ProvisionResourceClient) Recv() (*tfplugin5.ProvisionResource_Response, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Recv") + ret0, _ := ret[0].(*tfplugin5.ProvisionResource_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Recv indicates an expected call of Recv. +func (mr *MockProvisioner_ProvisionResourceClientMockRecorder) Recv() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockProvisioner_ProvisionResourceClient)(nil).Recv)) +} + +// RecvMsg mocks base method. +func (m *MockProvisioner_ProvisionResourceClient) RecvMsg(arg0 interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RecvMsg", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// RecvMsg indicates an expected call of RecvMsg. +func (mr *MockProvisioner_ProvisionResourceClientMockRecorder) RecvMsg(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockProvisioner_ProvisionResourceClient)(nil).RecvMsg), arg0) +} + +// SendMsg mocks base method. +func (m *MockProvisioner_ProvisionResourceClient) SendMsg(arg0 interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendMsg", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendMsg indicates an expected call of SendMsg. +func (mr *MockProvisioner_ProvisionResourceClientMockRecorder) SendMsg(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockProvisioner_ProvisionResourceClient)(nil).SendMsg), arg0) +} + +// Trailer mocks base method. +func (m *MockProvisioner_ProvisionResourceClient) Trailer() metadata.MD { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Trailer") + ret0, _ := ret[0].(metadata.MD) + return ret0 +} + +// Trailer indicates an expected call of Trailer. +func (mr *MockProvisioner_ProvisionResourceClientMockRecorder) Trailer() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trailer", reflect.TypeOf((*MockProvisioner_ProvisionResourceClient)(nil).Trailer)) +} + +// MockProvisioner_ProvisionResourceServer is a mock of Provisioner_ProvisionResourceServer interface. +type MockProvisioner_ProvisionResourceServer struct { + ctrl *gomock.Controller + recorder *MockProvisioner_ProvisionResourceServerMockRecorder +} + +// MockProvisioner_ProvisionResourceServerMockRecorder is the mock recorder for MockProvisioner_ProvisionResourceServer. +type MockProvisioner_ProvisionResourceServerMockRecorder struct { + mock *MockProvisioner_ProvisionResourceServer +} + +// NewMockProvisioner_ProvisionResourceServer creates a new mock instance. +func NewMockProvisioner_ProvisionResourceServer(ctrl *gomock.Controller) *MockProvisioner_ProvisionResourceServer { + mock := &MockProvisioner_ProvisionResourceServer{ctrl: ctrl} + mock.recorder = &MockProvisioner_ProvisionResourceServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockProvisioner_ProvisionResourceServer) EXPECT() *MockProvisioner_ProvisionResourceServerMockRecorder { + return m.recorder +} + +// Context mocks base method. +func (m *MockProvisioner_ProvisionResourceServer) Context() context.Context { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Context") + ret0, _ := ret[0].(context.Context) + return ret0 +} + +// Context indicates an expected call of Context. +func (mr *MockProvisioner_ProvisionResourceServerMockRecorder) Context() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockProvisioner_ProvisionResourceServer)(nil).Context)) +} + +// RecvMsg mocks base method. +func (m *MockProvisioner_ProvisionResourceServer) RecvMsg(arg0 interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RecvMsg", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// RecvMsg indicates an expected call of RecvMsg. +func (mr *MockProvisioner_ProvisionResourceServerMockRecorder) RecvMsg(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockProvisioner_ProvisionResourceServer)(nil).RecvMsg), arg0) +} + +// Send mocks base method. +func (m *MockProvisioner_ProvisionResourceServer) Send(arg0 *tfplugin5.ProvisionResource_Response) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Send", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Send indicates an expected call of Send. +func (mr *MockProvisioner_ProvisionResourceServerMockRecorder) Send(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockProvisioner_ProvisionResourceServer)(nil).Send), arg0) +} + +// SendHeader mocks base method. +func (m *MockProvisioner_ProvisionResourceServer) SendHeader(arg0 metadata.MD) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendHeader", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendHeader indicates an expected call of SendHeader. +func (mr *MockProvisioner_ProvisionResourceServerMockRecorder) SendHeader(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendHeader", reflect.TypeOf((*MockProvisioner_ProvisionResourceServer)(nil).SendHeader), arg0) +} + +// SendMsg mocks base method. +func (m *MockProvisioner_ProvisionResourceServer) SendMsg(arg0 interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendMsg", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendMsg indicates an expected call of SendMsg. +func (mr *MockProvisioner_ProvisionResourceServerMockRecorder) SendMsg(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockProvisioner_ProvisionResourceServer)(nil).SendMsg), arg0) +} + +// SetHeader mocks base method. +func (m *MockProvisioner_ProvisionResourceServer) SetHeader(arg0 metadata.MD) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetHeader", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetHeader indicates an expected call of SetHeader. +func (mr *MockProvisioner_ProvisionResourceServerMockRecorder) SetHeader(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeader", reflect.TypeOf((*MockProvisioner_ProvisionResourceServer)(nil).SetHeader), arg0) +} + +// SetTrailer mocks base method. +func (m *MockProvisioner_ProvisionResourceServer) SetTrailer(arg0 metadata.MD) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetTrailer", arg0) +} + +// SetTrailer indicates an expected call of SetTrailer. +func (mr *MockProvisioner_ProvisionResourceServerMockRecorder) SetTrailer(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTrailer", reflect.TypeOf((*MockProvisioner_ProvisionResourceServer)(nil).SetTrailer), arg0) +} diff --git a/pkg/plugin/plugin.go b/pkg/plugin/plugin.go new file mode 100644 index 00000000000..ad18a6e6048 --- /dev/null +++ b/pkg/plugin/plugin.go @@ -0,0 +1,24 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "github.com/hashicorp/go-plugin" + "github.com/kubegems/opentofu/pkg/plugin6" +) + +// VersionedPlugins includes both protocol 5 and 6 because this is the function +// called in providerFactory (command/meta_providers.go) to set up the initial +// plugin client config. +var VersionedPlugins = map[int]plugin.PluginSet{ + 5: { + "provider": &GRPCProviderPlugin{}, + "provisioner": &GRPCProvisionerPlugin{}, + }, + 6: { + "provider": &plugin6.GRPCProviderPlugin{}, + }, +} diff --git a/pkg/plugin/serve.go b/pkg/plugin/serve.go new file mode 100644 index 00000000000..7654abf7488 --- /dev/null +++ b/pkg/plugin/serve.go @@ -0,0 +1,80 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "github.com/hashicorp/go-plugin" + proto "github.com/kubegems/opentofu/pkg/tfplugin5" +) + +const ( + // The constants below are the names of the plugins that can be dispensed + // from the plugin server. + ProviderPluginName = "provider" + ProvisionerPluginName = "provisioner" + + // DefaultProtocolVersion is the protocol version assumed for legacy clients that don't specify + // a particular version during their handshake. This is the version used when Terraform 0.10 + // and 0.11 launch plugins that were built with support for both versions 4 and 5, and must + // stay unchanged at 4 until we intentionally build plugins that are not compatible with 0.10 and + // 0.11. + DefaultProtocolVersion = 4 +) + +// Handshake is the HandshakeConfig used to configure clients and servers. +var Handshake = plugin.HandshakeConfig{ + // The ProtocolVersion is the version that must match between TF core + // and TF plugins. This should be bumped whenever a change happens in + // one or the other that makes it so that they can't safely communicate. + // This could be adding a new interface value, it could be how + // helper/schema computes diffs, etc. + ProtocolVersion: DefaultProtocolVersion, + + // The magic cookie values should NEVER be changed. + MagicCookieKey: "TF_PLUGIN_MAGIC_COOKIE", + MagicCookieValue: "d602bf8f470bc67ca7faa0386276bbdd4330efaf76d1a219cb4d6991ca9872b2", +} + +type GRPCProviderFunc func() proto.ProviderServer +type GRPCProvisionerFunc func() proto.ProvisionerServer + +// ServeOpts are the configurations to serve a plugin. +type ServeOpts struct { + // Wrapped versions of the above plugins will automatically shimmed and + // added to the GRPC functions when possible. + GRPCProviderFunc GRPCProviderFunc + GRPCProvisionerFunc GRPCProvisionerFunc +} + +// Serve serves a plugin. This function never returns and should be the final +// function called in the main function of the plugin. +func Serve(opts *ServeOpts) { + plugin.Serve(&plugin.ServeConfig{ + HandshakeConfig: Handshake, + VersionedPlugins: pluginSet(opts), + GRPCServer: plugin.DefaultGRPCServer, + }) +} + +func pluginSet(opts *ServeOpts) map[int]plugin.PluginSet { + plugins := map[int]plugin.PluginSet{} + + // add the new protocol versions if they're configured + if opts.GRPCProviderFunc != nil || opts.GRPCProvisionerFunc != nil { + plugins[5] = plugin.PluginSet{} + if opts.GRPCProviderFunc != nil { + plugins[5]["provider"] = &GRPCProviderPlugin{ + GRPCProvider: opts.GRPCProviderFunc, + } + } + if opts.GRPCProvisionerFunc != nil { + plugins[5]["provisioner"] = &GRPCProvisionerPlugin{ + GRPCProvisioner: opts.GRPCProvisionerFunc, + } + } + } + return plugins +} diff --git a/pkg/plugin/ui_input.go b/pkg/plugin/ui_input.go new file mode 100644 index 00000000000..3d62d46972a --- /dev/null +++ b/pkg/plugin/ui_input.go @@ -0,0 +1,57 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "context" + "net/rpc" + + "github.com/hashicorp/go-plugin" + "github.com/kubegems/opentofu/pkg/tofu" +) + +// UIInput is an implementation of tofu.UIInput that communicates +// over RPC. +type UIInput struct { + Client *rpc.Client +} + +func (i *UIInput) Input(ctx context.Context, opts *tofu.InputOpts) (string, error) { + var resp UIInputInputResponse + err := i.Client.Call("Plugin.Input", opts, &resp) + if err != nil { + return "", err + } + if resp.Error != nil { + err = resp.Error + return "", err + } + + return resp.Value, nil +} + +type UIInputInputResponse struct { + Value string + Error *plugin.BasicError +} + +// UIInputServer is a net/rpc compatible structure for serving +// a UIInputServer. This should not be used directly. +type UIInputServer struct { + UIInput tofu.UIInput +} + +func (s *UIInputServer) Input( + opts *tofu.InputOpts, + reply *UIInputInputResponse) error { + value, err := s.UIInput.Input(context.Background(), opts) + *reply = UIInputInputResponse{ + Value: value, + Error: plugin.NewBasicError(err), + } + + return nil +} diff --git a/pkg/plugin/ui_input_test.go b/pkg/plugin/ui_input_test.go new file mode 100644 index 00000000000..4cc2f5e3995 --- /dev/null +++ b/pkg/plugin/ui_input_test.go @@ -0,0 +1,55 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "context" + "reflect" + "testing" + + "github.com/hashicorp/go-plugin" + "github.com/kubegems/opentofu/pkg/tofu" +) + +func TestUIInput_impl(t *testing.T) { + var _ tofu.UIInput = new(UIInput) +} + +func TestUIInput_input(t *testing.T) { + client, server := plugin.TestRPCConn(t) + defer client.Close() + + i := new(tofu.MockUIInput) + i.InputReturnString = "foo" + + err := server.RegisterName("Plugin", &UIInputServer{ + UIInput: i, + }) + if err != nil { + t.Fatalf("err: %s", err) + } + + input := &UIInput{Client: client} + + opts := &tofu.InputOpts{ + Id: "foo", + } + + v, err := input.Input(context.Background(), opts) + if !i.InputCalled { + t.Fatal("input should be called") + } + if !reflect.DeepEqual(i.InputOpts, opts) { + t.Fatalf("bad: %#v", i.InputOpts) + } + if err != nil { + t.Fatalf("bad: %#v", err) + } + + if v != "foo" { + t.Fatalf("bad: %#v", v) + } +} diff --git a/pkg/plugin/ui_output.go b/pkg/plugin/ui_output.go new file mode 100644 index 00000000000..3aa76c40775 --- /dev/null +++ b/pkg/plugin/ui_output.go @@ -0,0 +1,34 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "net/rpc" + + "github.com/kubegems/opentofu/pkg/tofu" +) + +// UIOutput is an implementatin of tofu.UIOutput that communicates +// over RPC. +type UIOutput struct { + Client *rpc.Client +} + +func (o *UIOutput) Output(v string) { + o.Client.Call("Plugin.Output", v, new(interface{})) +} + +// UIOutputServer is the RPC server for serving UIOutput. +type UIOutputServer struct { + UIOutput tofu.UIOutput +} + +func (s *UIOutputServer) Output( + v string, + reply *interface{}) error { + s.UIOutput.Output(v) + return nil +} diff --git a/pkg/plugin/ui_output_test.go b/pkg/plugin/ui_output_test.go new file mode 100644 index 00000000000..ce0ad19fe9b --- /dev/null +++ b/pkg/plugin/ui_output_test.go @@ -0,0 +1,40 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "testing" + + "github.com/hashicorp/go-plugin" + "github.com/kubegems/opentofu/pkg/tofu" +) + +func TestUIOutput_impl(t *testing.T) { + var _ tofu.UIOutput = new(UIOutput) +} + +func TestUIOutput_input(t *testing.T) { + client, server := plugin.TestRPCConn(t) + defer client.Close() + + o := new(tofu.MockUIOutput) + + err := server.RegisterName("Plugin", &UIOutputServer{ + UIOutput: o, + }) + if err != nil { + t.Fatalf("err: %s", err) + } + + output := &UIOutput{Client: client} + output.Output("foo") + if !o.OutputCalled { + t.Fatal("output should be called") + } + if o.OutputMessage != "foo" { + t.Fatalf("bad: %#v", o.OutputMessage) + } +} diff --git a/pkg/plugin6/convert/diagnostics.go b/pkg/plugin6/convert/diagnostics.go new file mode 100644 index 00000000000..8e421b7e87b --- /dev/null +++ b/pkg/plugin6/convert/diagnostics.go @@ -0,0 +1,137 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package convert + +import ( + "github.com/kubegems/opentofu/pkg/tfdiags" + proto "github.com/kubegems/opentofu/pkg/tfplugin6" + "github.com/zclconf/go-cty/cty" +) + +// WarnsAndErrorsToProto converts the warnings and errors return by the legacy +// provider to protobuf diagnostics. +func WarnsAndErrsToProto(warns []string, errs []error) (diags []*proto.Diagnostic) { + for _, w := range warns { + diags = AppendProtoDiag(diags, w) + } + + for _, e := range errs { + diags = AppendProtoDiag(diags, e) + } + + return diags +} + +// AppendProtoDiag appends a new diagnostic from a warning string or an error. +// This panics if d is not a string or error. +func AppendProtoDiag(diags []*proto.Diagnostic, d interface{}) []*proto.Diagnostic { + switch d := d.(type) { + case cty.PathError: + ap := PathToAttributePath(d.Path) + diags = append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: d.Error(), + Attribute: ap, + }) + case error: + diags = append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: d.Error(), + }) + case string: + diags = append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_WARNING, + Summary: d, + }) + case *proto.Diagnostic: + diags = append(diags, d) + case []*proto.Diagnostic: + diags = append(diags, d...) + } + return diags +} + +// ProtoToDiagnostics converts a list of proto.Diagnostics to a tf.Diagnostics. +func ProtoToDiagnostics(ds []*proto.Diagnostic) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + for _, d := range ds { + var severity tfdiags.Severity + + switch d.Severity { + case proto.Diagnostic_ERROR: + severity = tfdiags.Error + case proto.Diagnostic_WARNING: + severity = tfdiags.Warning + } + + var newDiag tfdiags.Diagnostic + + // if there's an attribute path, we need to create a AttributeValue diagnostic + if d.Attribute != nil { + path := AttributePathToPath(d.Attribute) + newDiag = tfdiags.AttributeValue(severity, d.Summary, d.Detail, path) + } else { + newDiag = tfdiags.WholeContainingBody(severity, d.Summary, d.Detail) + } + + diags = diags.Append(newDiag) + } + + return diags +} + +// AttributePathToPath takes the proto encoded path and converts it to a cty.Path +func AttributePathToPath(ap *proto.AttributePath) cty.Path { + var p cty.Path + for _, step := range ap.Steps { + switch selector := step.Selector.(type) { + case *proto.AttributePath_Step_AttributeName: + p = p.GetAttr(selector.AttributeName) + case *proto.AttributePath_Step_ElementKeyString: + p = p.Index(cty.StringVal(selector.ElementKeyString)) + case *proto.AttributePath_Step_ElementKeyInt: + p = p.Index(cty.NumberIntVal(selector.ElementKeyInt)) + } + } + return p +} + +// AttributePathToPath takes a cty.Path and converts it to a proto-encoded path. +func PathToAttributePath(p cty.Path) *proto.AttributePath { + ap := &proto.AttributePath{} + for _, step := range p { + switch selector := step.(type) { + case cty.GetAttrStep: + ap.Steps = append(ap.Steps, &proto.AttributePath_Step{ + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: selector.Name, + }, + }) + case cty.IndexStep: + key := selector.Key + switch key.Type() { + case cty.String: + ap.Steps = append(ap.Steps, &proto.AttributePath_Step{ + Selector: &proto.AttributePath_Step_ElementKeyString{ + ElementKeyString: key.AsString(), + }, + }) + case cty.Number: + v, _ := key.AsBigFloat().Int64() + ap.Steps = append(ap.Steps, &proto.AttributePath_Step{ + Selector: &proto.AttributePath_Step_ElementKeyInt{ + ElementKeyInt: v, + }, + }) + default: + // We'll bail early if we encounter anything else, and just + // return the valid prefix. + return ap + } + } + } + return ap +} diff --git a/pkg/plugin6/convert/diagnostics_test.go b/pkg/plugin6/convert/diagnostics_test.go new file mode 100644 index 00000000000..5d6cef5faa6 --- /dev/null +++ b/pkg/plugin6/convert/diagnostics_test.go @@ -0,0 +1,372 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package convert + +import ( + "errors" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/kubegems/opentofu/pkg/tfdiags" + proto "github.com/kubegems/opentofu/pkg/tfplugin6" + "github.com/zclconf/go-cty/cty" +) + +var ignoreUnexported = cmpopts.IgnoreUnexported( + proto.Diagnostic{}, + proto.Schema_Block{}, + proto.Schema_NestedBlock{}, + proto.Schema_Attribute{}, +) + +func TestProtoDiagnostics(t *testing.T) { + diags := WarnsAndErrsToProto( + []string{ + "warning 1", + "warning 2", + }, + []error{ + errors.New("error 1"), + errors.New("error 2"), + }, + ) + + expected := []*proto.Diagnostic{ + { + Severity: proto.Diagnostic_WARNING, + Summary: "warning 1", + }, + { + Severity: proto.Diagnostic_WARNING, + Summary: "warning 2", + }, + { + Severity: proto.Diagnostic_ERROR, + Summary: "error 1", + }, + { + Severity: proto.Diagnostic_ERROR, + Summary: "error 2", + }, + } + + if !cmp.Equal(expected, diags, ignoreUnexported) { + t.Fatal(cmp.Diff(expected, diags, ignoreUnexported)) + } +} + +func TestDiagnostics(t *testing.T) { + type diagFlat struct { + Severity tfdiags.Severity + Attr []interface{} + Summary string + Detail string + } + + tests := map[string]struct { + Cons func([]*proto.Diagnostic) []*proto.Diagnostic + Want []diagFlat + }{ + "nil": { + func(diags []*proto.Diagnostic) []*proto.Diagnostic { + return diags + }, + nil, + }, + "error": { + func(diags []*proto.Diagnostic) []*proto.Diagnostic { + return append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: "simple error", + }) + }, + []diagFlat{ + { + Severity: tfdiags.Error, + Summary: "simple error", + }, + }, + }, + "detailed error": { + func(diags []*proto.Diagnostic) []*proto.Diagnostic { + return append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: "simple error", + Detail: "detailed error", + }) + }, + []diagFlat{ + { + Severity: tfdiags.Error, + Summary: "simple error", + Detail: "detailed error", + }, + }, + }, + "warning": { + func(diags []*proto.Diagnostic) []*proto.Diagnostic { + return append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_WARNING, + Summary: "simple warning", + }) + }, + []diagFlat{ + { + Severity: tfdiags.Warning, + Summary: "simple warning", + }, + }, + }, + "detailed warning": { + func(diags []*proto.Diagnostic) []*proto.Diagnostic { + return append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_WARNING, + Summary: "simple warning", + Detail: "detailed warning", + }) + }, + []diagFlat{ + { + Severity: tfdiags.Warning, + Summary: "simple warning", + Detail: "detailed warning", + }, + }, + }, + "multi error": { + func(diags []*proto.Diagnostic) []*proto.Diagnostic { + diags = append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: "first error", + }, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: "second error", + }) + return diags + }, + []diagFlat{ + { + Severity: tfdiags.Error, + Summary: "first error", + }, + { + Severity: tfdiags.Error, + Summary: "second error", + }, + }, + }, + "warning and error": { + func(diags []*proto.Diagnostic) []*proto.Diagnostic { + diags = append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_WARNING, + Summary: "warning", + }, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: "error", + }) + return diags + }, + []diagFlat{ + { + Severity: tfdiags.Warning, + Summary: "warning", + }, + { + Severity: tfdiags.Error, + Summary: "error", + }, + }, + }, + "attr error": { + func(diags []*proto.Diagnostic) []*proto.Diagnostic { + diags = append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: "error", + Detail: "error detail", + Attribute: &proto.AttributePath{ + Steps: []*proto.AttributePath_Step{ + { + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: "attribute_name", + }, + }, + }, + }, + }) + return diags + }, + []diagFlat{ + { + Severity: tfdiags.Error, + Summary: "error", + Detail: "error detail", + Attr: []interface{}{"attribute_name"}, + }, + }, + }, + "multi attr": { + func(diags []*proto.Diagnostic) []*proto.Diagnostic { + diags = append(diags, + &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: "error 1", + Detail: "error 1 detail", + Attribute: &proto.AttributePath{ + Steps: []*proto.AttributePath_Step{ + { + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: "attr", + }, + }, + }, + }, + }, + &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: "error 2", + Detail: "error 2 detail", + Attribute: &proto.AttributePath{ + Steps: []*proto.AttributePath_Step{ + { + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: "attr", + }, + }, + { + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: "sub", + }, + }, + }, + }, + }, + &proto.Diagnostic{ + Severity: proto.Diagnostic_WARNING, + Summary: "warning", + Detail: "warning detail", + Attribute: &proto.AttributePath{ + Steps: []*proto.AttributePath_Step{ + { + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: "attr", + }, + }, + { + Selector: &proto.AttributePath_Step_ElementKeyInt{ + ElementKeyInt: 1, + }, + }, + { + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: "sub", + }, + }, + }, + }, + }, + &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: "error 3", + Detail: "error 3 detail", + Attribute: &proto.AttributePath{ + Steps: []*proto.AttributePath_Step{ + { + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: "attr", + }, + }, + { + Selector: &proto.AttributePath_Step_ElementKeyString{ + ElementKeyString: "idx", + }, + }, + { + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: "sub", + }, + }, + }, + }, + }, + ) + + return diags + }, + []diagFlat{ + { + Severity: tfdiags.Error, + Summary: "error 1", + Detail: "error 1 detail", + Attr: []interface{}{"attr"}, + }, + { + Severity: tfdiags.Error, + Summary: "error 2", + Detail: "error 2 detail", + Attr: []interface{}{"attr", "sub"}, + }, + { + Severity: tfdiags.Warning, + Summary: "warning", + Detail: "warning detail", + Attr: []interface{}{"attr", 1, "sub"}, + }, + { + Severity: tfdiags.Error, + Summary: "error 3", + Detail: "error 3 detail", + Attr: []interface{}{"attr", "idx", "sub"}, + }, + }, + }, + } + + flattenTFDiags := func(ds tfdiags.Diagnostics) []diagFlat { + var flat []diagFlat + for _, item := range ds { + desc := item.Description() + + var attr []interface{} + + for _, a := range tfdiags.GetAttribute(item) { + switch step := a.(type) { + case cty.GetAttrStep: + attr = append(attr, step.Name) + case cty.IndexStep: + switch step.Key.Type() { + case cty.Number: + i, _ := step.Key.AsBigFloat().Int64() + attr = append(attr, int(i)) + case cty.String: + attr = append(attr, step.Key.AsString()) + } + } + } + + flat = append(flat, diagFlat{ + Severity: item.Severity(), + Attr: attr, + Summary: desc.Summary, + Detail: desc.Detail, + }) + } + return flat + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + // we take the + tfDiags := ProtoToDiagnostics(tc.Cons(nil)) + + flat := flattenTFDiags(tfDiags) + + if !cmp.Equal(flat, tc.Want, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(flat, tc.Want, typeComparer, valueComparer, equateEmpty)) + } + }) + } +} diff --git a/pkg/plugin6/convert/function.go b/pkg/plugin6/convert/function.go new file mode 100644 index 00000000000..2e46373c980 --- /dev/null +++ b/pkg/plugin6/convert/function.go @@ -0,0 +1,68 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package convert + +import ( + "encoding/json" + "fmt" + + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/tfplugin6" + "github.com/zclconf/go-cty/cty" +) + +func ProtoToCtyType(in []byte) cty.Type { + var out cty.Type + if err := json.Unmarshal(in, &out); err != nil { + panic(err) + } + return out +} + +func ProtoToTextFormatting(proto tfplugin6.StringKind) providers.TextFormatting { + switch proto { + case tfplugin6.StringKind_PLAIN: + return providers.TextFormattingPlain + case tfplugin6.StringKind_MARKDOWN: + return providers.TextFormattingMarkdown + default: + panic(fmt.Sprintf("Invalid text tfplugin6.StringKind %v", proto)) + } +} + +func ProtoToFunctionParameterSpec(proto *tfplugin6.Function_Parameter) providers.FunctionParameterSpec { + return providers.FunctionParameterSpec{ + Name: proto.Name, + Type: ProtoToCtyType(proto.Type), + AllowNullValue: proto.AllowNullValue, + AllowUnknownValues: proto.AllowUnknownValues, + Description: proto.Description, + DescriptionFormat: ProtoToTextFormatting(proto.DescriptionKind), + } +} + +func ProtoToFunctionSpec(proto *tfplugin6.Function) providers.FunctionSpec { + params := make([]providers.FunctionParameterSpec, len(proto.Parameters)) + for i, param := range proto.Parameters { + params[i] = ProtoToFunctionParameterSpec(param) + } + + var varParam *providers.FunctionParameterSpec + if proto.VariadicParameter != nil { + param := ProtoToFunctionParameterSpec(proto.VariadicParameter) + varParam = ¶m + } + + return providers.FunctionSpec{ + Parameters: params, + VariadicParameter: varParam, + Return: ProtoToCtyType(proto.Return.Type), + Summary: proto.Summary, + Description: proto.Description, + DescriptionFormat: ProtoToTextFormatting(proto.DescriptionKind), + DeprecationMessage: proto.DeprecationMessage, + } +} diff --git a/pkg/plugin6/convert/schema.go b/pkg/plugin6/convert/schema.go new file mode 100644 index 00000000000..c3eee557867 --- /dev/null +++ b/pkg/plugin6/convert/schema.go @@ -0,0 +1,302 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package convert + +import ( + "encoding/json" + "reflect" + "sort" + + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/providers" + proto "github.com/kubegems/opentofu/pkg/tfplugin6" + "github.com/zclconf/go-cty/cty" +) + +// ConfigSchemaToProto takes a *configschema.Block and converts it to a +// proto.Schema_Block for a grpc response. +func ConfigSchemaToProto(b *configschema.Block) *proto.Schema_Block { + block := &proto.Schema_Block{ + Description: b.Description, + DescriptionKind: protoStringKind(b.DescriptionKind), + Deprecated: b.Deprecated, + } + + for _, name := range sortedKeys(b.Attributes) { + a := b.Attributes[name] + + attr := &proto.Schema_Attribute{ + Name: name, + Description: a.Description, + DescriptionKind: protoStringKind(a.DescriptionKind), + Optional: a.Optional, + Computed: a.Computed, + Required: a.Required, + Sensitive: a.Sensitive, + Deprecated: a.Deprecated, + } + + if a.Type != cty.NilType { + ty, err := json.Marshal(a.Type) + if err != nil { + panic(err) + } + attr.Type = ty + } + + if a.NestedType != nil { + attr.NestedType = configschemaObjectToProto(a.NestedType) + } + + block.Attributes = append(block.Attributes, attr) + } + + for _, name := range sortedKeys(b.BlockTypes) { + b := b.BlockTypes[name] + block.BlockTypes = append(block.BlockTypes, protoSchemaNestedBlock(name, b)) + } + + return block +} + +func protoStringKind(k configschema.StringKind) proto.StringKind { + switch k { + default: + return proto.StringKind_PLAIN + case configschema.StringMarkdown: + return proto.StringKind_MARKDOWN + } +} + +func protoSchemaNestedBlock(name string, b *configschema.NestedBlock) *proto.Schema_NestedBlock { + var nesting proto.Schema_NestedBlock_NestingMode + switch b.Nesting { + case configschema.NestingSingle: + nesting = proto.Schema_NestedBlock_SINGLE + case configschema.NestingGroup: + nesting = proto.Schema_NestedBlock_GROUP + case configschema.NestingList: + nesting = proto.Schema_NestedBlock_LIST + case configschema.NestingSet: + nesting = proto.Schema_NestedBlock_SET + case configschema.NestingMap: + nesting = proto.Schema_NestedBlock_MAP + default: + nesting = proto.Schema_NestedBlock_INVALID + } + return &proto.Schema_NestedBlock{ + TypeName: name, + Block: ConfigSchemaToProto(&b.Block), + Nesting: nesting, + MinItems: int64(b.MinItems), + MaxItems: int64(b.MaxItems), + } +} + +// ProtoToProviderSchema takes a proto.Schema and converts it to a providers.Schema. +func ProtoToProviderSchema(s *proto.Schema) providers.Schema { + return providers.Schema{ + Version: s.Version, + Block: ProtoToConfigSchema(s.Block), + } +} + +// ProtoToConfigSchema takes the GetSchcema_Block from a grpc response and converts it +// to a tofu *configschema.Block. +func ProtoToConfigSchema(b *proto.Schema_Block) *configschema.Block { + block := &configschema.Block{ + Attributes: make(map[string]*configschema.Attribute), + BlockTypes: make(map[string]*configschema.NestedBlock), + + Description: b.Description, + DescriptionKind: schemaStringKind(b.DescriptionKind), + Deprecated: b.Deprecated, + } + + for _, a := range b.Attributes { + attr := &configschema.Attribute{ + Description: a.Description, + DescriptionKind: schemaStringKind(a.DescriptionKind), + Required: a.Required, + Optional: a.Optional, + Computed: a.Computed, + Sensitive: a.Sensitive, + Deprecated: a.Deprecated, + } + + if a.Type != nil { + if err := json.Unmarshal(a.Type, &attr.Type); err != nil { + panic(err) + } + } + + if a.NestedType != nil { + attr.NestedType = protoObjectToConfigSchema(a.NestedType) + } + + block.Attributes[a.Name] = attr + } + + for _, b := range b.BlockTypes { + block.BlockTypes[b.TypeName] = schemaNestedBlock(b) + } + + return block +} + +func schemaStringKind(k proto.StringKind) configschema.StringKind { + switch k { + default: + return configschema.StringPlain + case proto.StringKind_MARKDOWN: + return configschema.StringMarkdown + } +} + +func schemaNestedBlock(b *proto.Schema_NestedBlock) *configschema.NestedBlock { + var nesting configschema.NestingMode + switch b.Nesting { + case proto.Schema_NestedBlock_SINGLE: + nesting = configschema.NestingSingle + case proto.Schema_NestedBlock_GROUP: + nesting = configschema.NestingGroup + case proto.Schema_NestedBlock_LIST: + nesting = configschema.NestingList + case proto.Schema_NestedBlock_MAP: + nesting = configschema.NestingMap + case proto.Schema_NestedBlock_SET: + nesting = configschema.NestingSet + default: + // In all other cases we'll leave it as the zero value (invalid) and + // let the caller validate it and deal with this. + } + + nb := &configschema.NestedBlock{ + Nesting: nesting, + MinItems: int(b.MinItems), + MaxItems: int(b.MaxItems), + } + + nested := ProtoToConfigSchema(b.Block) + nb.Block = *nested + return nb +} + +func protoObjectToConfigSchema(b *proto.Schema_Object) *configschema.Object { + var nesting configschema.NestingMode + switch b.Nesting { + case proto.Schema_Object_SINGLE: + nesting = configschema.NestingSingle + case proto.Schema_Object_LIST: + nesting = configschema.NestingList + case proto.Schema_Object_MAP: + nesting = configschema.NestingMap + case proto.Schema_Object_SET: + nesting = configschema.NestingSet + default: + // In all other cases we'll leave it as the zero value (invalid) and + // let the caller validate it and deal with this. + } + + object := &configschema.Object{ + Attributes: make(map[string]*configschema.Attribute), + Nesting: nesting, + } + + for _, a := range b.Attributes { + attr := &configschema.Attribute{ + Description: a.Description, + DescriptionKind: schemaStringKind(a.DescriptionKind), + Required: a.Required, + Optional: a.Optional, + Computed: a.Computed, + Sensitive: a.Sensitive, + Deprecated: a.Deprecated, + } + + if a.Type != nil { + if err := json.Unmarshal(a.Type, &attr.Type); err != nil { + panic(err) + } + } + + if a.NestedType != nil { + attr.NestedType = protoObjectToConfigSchema(a.NestedType) + } + + object.Attributes[a.Name] = attr + } + + return object +} + +// sortedKeys returns the lexically sorted keys from the given map. This is +// used to make schema conversions are deterministic. This panics if map keys +// are not a string. +func sortedKeys(m interface{}) []string { + v := reflect.ValueOf(m) + keys := make([]string, v.Len()) + + mapKeys := v.MapKeys() + for i, k := range mapKeys { + keys[i] = k.Interface().(string) + } + + sort.Strings(keys) + return keys +} + +func configschemaObjectToProto(b *configschema.Object) *proto.Schema_Object { + var nesting proto.Schema_Object_NestingMode + switch b.Nesting { + case configschema.NestingSingle: + nesting = proto.Schema_Object_SINGLE + case configschema.NestingList: + nesting = proto.Schema_Object_LIST + case configschema.NestingSet: + nesting = proto.Schema_Object_SET + case configschema.NestingMap: + nesting = proto.Schema_Object_MAP + default: + nesting = proto.Schema_Object_INVALID + } + + attributes := make([]*proto.Schema_Attribute, 0, len(b.Attributes)) + + for _, name := range sortedKeys(b.Attributes) { + a := b.Attributes[name] + + attr := &proto.Schema_Attribute{ + Name: name, + Description: a.Description, + DescriptionKind: protoStringKind(a.DescriptionKind), + Optional: a.Optional, + Computed: a.Computed, + Required: a.Required, + Sensitive: a.Sensitive, + Deprecated: a.Deprecated, + } + + if a.Type != cty.NilType { + ty, err := json.Marshal(a.Type) + if err != nil { + panic(err) + } + attr.Type = ty + } + + if a.NestedType != nil { + attr.NestedType = configschemaObjectToProto(a.NestedType) + } + + attributes = append(attributes, attr) + } + + return &proto.Schema_Object{ + Attributes: attributes, + Nesting: nesting, + } +} diff --git a/pkg/plugin6/convert/schema_test.go b/pkg/plugin6/convert/schema_test.go new file mode 100644 index 00000000000..6962722b9cd --- /dev/null +++ b/pkg/plugin6/convert/schema_test.go @@ -0,0 +1,571 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package convert + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/kubegems/opentofu/pkg/configs/configschema" + proto "github.com/kubegems/opentofu/pkg/tfplugin6" + "github.com/zclconf/go-cty/cty" +) + +var ( + equateEmpty = cmpopts.EquateEmpty() + typeComparer = cmp.Comparer(cty.Type.Equals) + valueComparer = cmp.Comparer(cty.Value.RawEquals) +) + +// Test that we can convert configschema to protobuf types and back again. +func TestConvertSchemaBlocks(t *testing.T) { + tests := map[string]struct { + Block *proto.Schema_Block + Want *configschema.Block + }{ + "attributes": { + &proto.Schema_Block{ + Attributes: []*proto.Schema_Attribute{ + { + Name: "computed", + Type: []byte(`["list","bool"]`), + Computed: true, + }, + { + Name: "optional", + Type: []byte(`"string"`), + Optional: true, + }, + { + Name: "optional_computed", + Type: []byte(`["map","bool"]`), + Optional: true, + Computed: true, + }, + { + Name: "required", + Type: []byte(`"number"`), + Required: true, + }, + { + Name: "nested_type", + NestedType: &proto.Schema_Object{ + Nesting: proto.Schema_Object_SINGLE, + Attributes: []*proto.Schema_Attribute{ + { + Name: "computed", + Type: []byte(`["list","bool"]`), + Computed: true, + }, + { + Name: "optional", + Type: []byte(`"string"`), + Optional: true, + }, + { + Name: "optional_computed", + Type: []byte(`["map","bool"]`), + Optional: true, + Computed: true, + }, + { + Name: "required", + Type: []byte(`"number"`), + Required: true, + }, + }, + }, + Required: true, + }, + { + Name: "deeply_nested_type", + NestedType: &proto.Schema_Object{ + Nesting: proto.Schema_Object_SINGLE, + Attributes: []*proto.Schema_Attribute{ + { + Name: "first_level", + NestedType: &proto.Schema_Object{ + Nesting: proto.Schema_Object_SINGLE, + Attributes: []*proto.Schema_Attribute{ + { + Name: "computed", + Type: []byte(`["list","bool"]`), + Computed: true, + }, + { + Name: "optional", + Type: []byte(`"string"`), + Optional: true, + }, + { + Name: "optional_computed", + Type: []byte(`["map","bool"]`), + Optional: true, + Computed: true, + }, + { + Name: "required", + Type: []byte(`"number"`), + Required: true, + }, + }, + }, + Computed: true, + }, + }, + }, + Required: true, + }, + { + Name: "nested_list", + NestedType: &proto.Schema_Object{ + Nesting: proto.Schema_Object_LIST, + Attributes: []*proto.Schema_Attribute{ + { + Name: "required", + Type: []byte(`"string"`), + Computed: true, + }, + }, + }, + Required: true, + }, + { + Name: "nested_set", + NestedType: &proto.Schema_Object{ + Nesting: proto.Schema_Object_SET, + Attributes: []*proto.Schema_Attribute{ + { + Name: "required", + Type: []byte(`"string"`), + Computed: true, + }, + }, + }, + Required: true, + }, + { + Name: "nested_map", + NestedType: &proto.Schema_Object{ + Nesting: proto.Schema_Object_MAP, + Attributes: []*proto.Schema_Attribute{ + { + Name: "required", + Type: []byte(`"string"`), + Computed: true, + }, + }, + }, + Required: true, + }, + }, + }, + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "computed": { + Type: cty.List(cty.Bool), + Computed: true, + }, + "optional": { + Type: cty.String, + Optional: true, + }, + "optional_computed": { + Type: cty.Map(cty.Bool), + Optional: true, + Computed: true, + }, + "required": { + Type: cty.Number, + Required: true, + }, + "nested_type": { + NestedType: &configschema.Object{ + Attributes: map[string]*configschema.Attribute{ + "computed": { + Type: cty.List(cty.Bool), + Computed: true, + }, + "optional": { + Type: cty.String, + Optional: true, + }, + "optional_computed": { + Type: cty.Map(cty.Bool), + Optional: true, + Computed: true, + }, + "required": { + Type: cty.Number, + Required: true, + }, + }, + Nesting: configschema.NestingSingle, + }, + Required: true, + }, + "deeply_nested_type": { + NestedType: &configschema.Object{ + Attributes: map[string]*configschema.Attribute{ + "first_level": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingSingle, + Attributes: map[string]*configschema.Attribute{ + "computed": { + Type: cty.List(cty.Bool), + Computed: true, + }, + "optional": { + Type: cty.String, + Optional: true, + }, + "optional_computed": { + Type: cty.Map(cty.Bool), + Optional: true, + Computed: true, + }, + "required": { + Type: cty.Number, + Required: true, + }, + }, + }, + Computed: true, + }, + }, + Nesting: configschema.NestingSingle, + }, + Required: true, + }, + "nested_list": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingList, + Attributes: map[string]*configschema.Attribute{ + "required": { + Type: cty.String, + Computed: true, + }, + }, + }, + Required: true, + }, + "nested_map": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingMap, + Attributes: map[string]*configschema.Attribute{ + "required": { + Type: cty.String, + Computed: true, + }, + }, + }, + Required: true, + }, + "nested_set": { + NestedType: &configschema.Object{ + Nesting: configschema.NestingSet, + Attributes: map[string]*configschema.Attribute{ + "required": { + Type: cty.String, + Computed: true, + }, + }, + }, + Required: true, + }, + }, + }, + }, + "blocks": { + &proto.Schema_Block{ + BlockTypes: []*proto.Schema_NestedBlock{ + { + TypeName: "list", + Nesting: proto.Schema_NestedBlock_LIST, + Block: &proto.Schema_Block{}, + }, + { + TypeName: "map", + Nesting: proto.Schema_NestedBlock_MAP, + Block: &proto.Schema_Block{}, + }, + { + TypeName: "set", + Nesting: proto.Schema_NestedBlock_SET, + Block: &proto.Schema_Block{}, + }, + { + TypeName: "single", + Nesting: proto.Schema_NestedBlock_SINGLE, + Block: &proto.Schema_Block{ + Attributes: []*proto.Schema_Attribute{ + { + Name: "foo", + Type: []byte(`"dynamic"`), + Required: true, + }, + }, + }, + }, + }, + }, + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "list": &configschema.NestedBlock{ + Nesting: configschema.NestingList, + }, + "map": &configschema.NestedBlock{ + Nesting: configschema.NestingMap, + }, + "set": &configschema.NestedBlock{ + Nesting: configschema.NestingSet, + }, + "single": &configschema.NestedBlock{ + Nesting: configschema.NestingSingle, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.DynamicPseudoType, + Required: true, + }, + }, + }, + }, + }, + }, + }, + "deep block nesting": { + &proto.Schema_Block{ + BlockTypes: []*proto.Schema_NestedBlock{ + { + TypeName: "single", + Nesting: proto.Schema_NestedBlock_SINGLE, + Block: &proto.Schema_Block{ + BlockTypes: []*proto.Schema_NestedBlock{ + { + TypeName: "list", + Nesting: proto.Schema_NestedBlock_LIST, + Block: &proto.Schema_Block{ + BlockTypes: []*proto.Schema_NestedBlock{ + { + TypeName: "set", + Nesting: proto.Schema_NestedBlock_SET, + Block: &proto.Schema_Block{}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "single": &configschema.NestedBlock{ + Nesting: configschema.NestingSingle, + Block: configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "list": &configschema.NestedBlock{ + Nesting: configschema.NestingList, + Block: configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "set": &configschema.NestedBlock{ + Nesting: configschema.NestingSet, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + converted := ProtoToConfigSchema(tc.Block) + if !cmp.Equal(converted, tc.Want, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(converted, tc.Want, typeComparer, valueComparer, equateEmpty)) + } + }) + } +} + +// Test that we can convert configschema to protobuf types and back again. +func TestConvertProtoSchemaBlocks(t *testing.T) { + tests := map[string]struct { + Want *proto.Schema_Block + Block *configschema.Block + }{ + "attributes": { + &proto.Schema_Block{ + Attributes: []*proto.Schema_Attribute{ + { + Name: "computed", + Type: []byte(`["list","bool"]`), + Computed: true, + }, + { + Name: "optional", + Type: []byte(`"string"`), + Optional: true, + }, + { + Name: "optional_computed", + Type: []byte(`["map","bool"]`), + Optional: true, + Computed: true, + }, + { + Name: "required", + Type: []byte(`"number"`), + Required: true, + }, + }, + }, + &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "computed": { + Type: cty.List(cty.Bool), + Computed: true, + }, + "optional": { + Type: cty.String, + Optional: true, + }, + "optional_computed": { + Type: cty.Map(cty.Bool), + Optional: true, + Computed: true, + }, + "required": { + Type: cty.Number, + Required: true, + }, + }, + }, + }, + "blocks": { + &proto.Schema_Block{ + BlockTypes: []*proto.Schema_NestedBlock{ + { + TypeName: "list", + Nesting: proto.Schema_NestedBlock_LIST, + Block: &proto.Schema_Block{}, + }, + { + TypeName: "map", + Nesting: proto.Schema_NestedBlock_MAP, + Block: &proto.Schema_Block{}, + }, + { + TypeName: "set", + Nesting: proto.Schema_NestedBlock_SET, + Block: &proto.Schema_Block{}, + }, + { + TypeName: "single", + Nesting: proto.Schema_NestedBlock_SINGLE, + Block: &proto.Schema_Block{ + Attributes: []*proto.Schema_Attribute{ + { + Name: "foo", + Type: []byte(`"dynamic"`), + Required: true, + }, + }, + }, + }, + }, + }, + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "list": &configschema.NestedBlock{ + Nesting: configschema.NestingList, + }, + "map": &configschema.NestedBlock{ + Nesting: configschema.NestingMap, + }, + "set": &configschema.NestedBlock{ + Nesting: configschema.NestingSet, + }, + "single": &configschema.NestedBlock{ + Nesting: configschema.NestingSingle, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.DynamicPseudoType, + Required: true, + }, + }, + }, + }, + }, + }, + }, + "deep block nesting": { + &proto.Schema_Block{ + BlockTypes: []*proto.Schema_NestedBlock{ + { + TypeName: "single", + Nesting: proto.Schema_NestedBlock_SINGLE, + Block: &proto.Schema_Block{ + BlockTypes: []*proto.Schema_NestedBlock{ + { + TypeName: "list", + Nesting: proto.Schema_NestedBlock_LIST, + Block: &proto.Schema_Block{ + BlockTypes: []*proto.Schema_NestedBlock{ + { + TypeName: "set", + Nesting: proto.Schema_NestedBlock_SET, + Block: &proto.Schema_Block{}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "single": &configschema.NestedBlock{ + Nesting: configschema.NestingSingle, + Block: configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "list": &configschema.NestedBlock{ + Nesting: configschema.NestingList, + Block: configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "set": &configschema.NestedBlock{ + Nesting: configschema.NestingSet, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + converted := ConfigSchemaToProto(tc.Block) + if !cmp.Equal(converted, tc.Want, typeComparer, equateEmpty, ignoreUnexported) { + t.Fatal(cmp.Diff(converted, tc.Want, typeComparer, equateEmpty, ignoreUnexported)) + } + }) + } +} diff --git a/pkg/plugin6/doc.go b/pkg/plugin6/doc.go new file mode 100644 index 00000000000..6c008f5c710 --- /dev/null +++ b/pkg/plugin6/doc.go @@ -0,0 +1,14 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin6 + +// plugin6 builds on types in package plugin to include support for plugin +// protocol v6. The main gRPC functions use by tofu (and initialized in +// init.go), such as Serve, are in the plugin package. The version of those +// functions in this package are used by various mocks and in tests. + +// When provider protocol v5 is deprecated, some functions may need to be moved +// here, or the existing functions updated, before removing the plugin pacakge. diff --git a/pkg/plugin6/grpc_error.go b/pkg/plugin6/grpc_error.go new file mode 100644 index 00000000000..918efefcdf7 --- /dev/null +++ b/pkg/plugin6/grpc_error.go @@ -0,0 +1,79 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin6 + +import ( + "fmt" + "path" + "runtime" + + "github.com/kubegems/opentofu/pkg/tfdiags" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// grpcErr extracts some known error types and formats them into better +// representations for core. This must only be called from plugin methods. +// Since we don't use RPC status errors for the plugin protocol, these do not +// contain any useful details, and we can return some text that at least +// indicates the plugin call and possible error condition. +func grpcErr(err error) (diags tfdiags.Diagnostics) { + if err == nil { + return + } + + // extract the method name from the caller. + pc, _, _, ok := runtime.Caller(1) + if !ok { + logger.Error("unknown grpc call", "error", err) + return diags.Append(err) + } + + f := runtime.FuncForPC(pc) + + // Function names will contain the full import path. Take the last + // segment, which will let users know which method was being called. + _, requestName := path.Split(f.Name()) + + // Here we can at least correlate the error in the logs to a particular binary. + logger.Error(requestName, "error", err) + + // TODO: while this expands the error codes into somewhat better messages, + // this still does not easily link the error to an actual user-recognizable + // plugin. The grpc plugin does not know its configured name, and the + // errors are in a list of diagnostics, making it hard for the caller to + // annotate the returned errors. + switch status.Code(err) { + case codes.Unavailable: + // This case is when the plugin has stopped running for some reason, + // and is usually the result of a crash. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Plugin did not respond", + fmt.Sprintf("The plugin encountered an error, and failed to respond to the %s call. "+ + "The plugin logs may contain more details.", requestName), + )) + case codes.Canceled: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Request cancelled", + fmt.Sprintf("The %s request was cancelled.", requestName), + )) + case codes.Unimplemented: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unsupported plugin method", + fmt.Sprintf("The %s method is not supported by this plugin.", requestName), + )) + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Plugin error", + fmt.Sprintf("The plugin returned an unexpected error from %s: %v", requestName, err), + )) + } + return +} diff --git a/pkg/plugin6/grpc_provider.go b/pkg/plugin6/grpc_provider.go new file mode 100644 index 00000000000..af1303580bc --- /dev/null +++ b/pkg/plugin6/grpc_provider.go @@ -0,0 +1,843 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin6 + +import ( + "context" + "errors" + "fmt" + "sync" + + "github.com/zclconf/go-cty/cty" + + plugin "github.com/hashicorp/go-plugin" + ctyjson "github.com/zclconf/go-cty/cty/json" + "github.com/zclconf/go-cty/cty/msgpack" + "google.golang.org/grpc" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/logging" + "github.com/kubegems/opentofu/pkg/plugin6/convert" + "github.com/kubegems/opentofu/pkg/providers" + proto6 "github.com/kubegems/opentofu/pkg/tfplugin6" +) + +var logger = logging.HCLogger() + +// GRPCProviderPlugin implements plugin.GRPCPlugin for the go-plugin package. +type GRPCProviderPlugin struct { + plugin.Plugin + GRPCProvider func() proto6.ProviderServer +} + +func (p *GRPCProviderPlugin) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { + return &GRPCProvider{ + client: proto6.NewProviderClient(c), + ctx: ctx, + }, nil +} + +func (p *GRPCProviderPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { + proto6.RegisterProviderServer(s, p.GRPCProvider()) + return nil +} + +// GRPCProvider handles the client, or core side of the plugin rpc connection. +// The GRPCProvider methods are mostly a translation layer between the +// tofu providers types and the grpc proto types, directly converting +// between the two. +type GRPCProvider struct { + // PluginClient provides a reference to the plugin.Client which controls the plugin process. + // This allows the GRPCProvider a way to shutdown the plugin process. + PluginClient *plugin.Client + + // TestServer contains a grpc.Server to close when the GRPCProvider is being + // used in an end to end test of a provider. + TestServer *grpc.Server + + // Addr uniquely identifies the type of provider. + // Normally executed providers will have this set during initialization, + // but it may not always be available for alternative execute modes. + Addr addrs.Provider + + // Proto client use to make the grpc service calls. + client proto6.ProviderClient + + // this context is created by the plugin package, and is canceled when the + // plugin process ends. + ctx context.Context + + mu sync.Mutex + // schema stores the schema for this provider. This is used to properly + // serialize the requests for schemas. + schema providers.GetProviderSchemaResponse +} + +var _ providers.Interface = new(GRPCProvider) + +func (p *GRPCProvider) GetProviderSchema() (resp providers.GetProviderSchemaResponse) { + logger.Trace("GRPCProvider.v6: GetProviderSchema") + p.mu.Lock() + defer p.mu.Unlock() + + // First, we check the global cache. + // The cache could contain this schema if an instance of this provider has previously been started. + if !p.Addr.IsZero() { + // Even if the schema is cached, GetProviderSchemaOptional could be false. This would indicate that once instantiated, + // this provider requires the get schema call to be made at least once, as it handles part of the provider's setup. + // At this point, we don't know if this is the first call to a provider instance or not, so we don't use the result in that case. + if schemaCached, ok := providers.SchemaCache.Get(p.Addr); ok && schemaCached.ServerCapabilities.GetProviderSchemaOptional { + logger.Trace("GRPCProvider: GetProviderSchema: serving from global schema cache", "address", p.Addr) + return schemaCached + } + } + + // If the local cache is non-zero, we know this instance has called + // GetProviderSchema at least once, so has satisfied the possible requirement of `GetProviderSchemaOptional=false`. + // This means that we can return early now using the locally cached schema, without making this call again. + if p.schema.Provider.Block != nil { + return p.schema + } + + resp.ResourceTypes = make(map[string]providers.Schema) + resp.DataSources = make(map[string]providers.Schema) + resp.Functions = make(map[string]providers.FunctionSpec) + + // Some providers may generate quite large schemas, and the internal default + // grpc response size limit is 4MB. 64MB should cover most any use case, and + // if we get providers nearing that we may want to consider a finer-grained + // API to fetch individual resource schemas. + // Note: this option is marked as EXPERIMENTAL in the grpc API. We keep + // this for compatibility, but recent providers all set the max message + // size much higher on the server side, which is the supported method for + // determining payload size. + const maxRecvSize = 64 << 20 + protoResp, err := p.client.GetProviderSchema(p.ctx, new(proto6.GetProviderSchema_Request), grpc.MaxRecvMsgSizeCallOption{MaxRecvMsgSize: maxRecvSize}) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + if resp.Diagnostics.HasErrors() { + return resp + } + + if protoResp.Provider == nil { + resp.Diagnostics = resp.Diagnostics.Append(errors.New("missing provider schema")) + return resp + } + + resp.Provider = convert.ProtoToProviderSchema(protoResp.Provider) + if protoResp.ProviderMeta == nil { + logger.Debug("No provider meta schema returned") + } else { + resp.ProviderMeta = convert.ProtoToProviderSchema(protoResp.ProviderMeta) + } + + for name, res := range protoResp.ResourceSchemas { + resp.ResourceTypes[name] = convert.ProtoToProviderSchema(res) + } + + for name, data := range protoResp.DataSourceSchemas { + resp.DataSources[name] = convert.ProtoToProviderSchema(data) + } + + for name, fn := range protoResp.Functions { + resp.Functions[name] = convert.ProtoToFunctionSpec(fn) + } + + if protoResp.ServerCapabilities != nil { + resp.ServerCapabilities.PlanDestroy = protoResp.ServerCapabilities.PlanDestroy + resp.ServerCapabilities.GetProviderSchemaOptional = protoResp.ServerCapabilities.GetProviderSchemaOptional + } + + // Set the global provider cache so that future calls to this provider can use the cached value. + // Crucially, this doesn't look at GetProviderSchemaOptional, because the layers above could use this cache + // *without* creating an instance of this provider. And if there is no instance, + // then we don't need to set up anything (cause there is nothing to set up), so we need no call + // to the providers GetSchema rpc. + if !p.Addr.IsZero() { + providers.SchemaCache.Set(p.Addr, resp) + } + + // Always store this here in the client for providers that are not able to use GetProviderSchemaOptional. + // Crucially, this indicates that we've made at least one call to GetProviderSchema to this instance of the provider, + // which means in the future we'll be able to return using this cache + // (because the possible setup contained in the GetProviderSchema call has happened). + // If GetProviderSchemaOptional is true then this cache won't actually ever be used, because the calls to this method + // will be satisfied by the global provider cache. + p.schema = resp + + return resp +} + +func (p *GRPCProvider) ValidateProviderConfig(r providers.ValidateProviderConfigRequest) (resp providers.ValidateProviderConfigResponse) { + logger.Trace("GRPCProvider.v6: ValidateProviderConfig") + + schema := p.GetProviderSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = schema.Diagnostics + return resp + } + + ty := schema.Provider.Block.ImpliedType() + + mp, err := msgpack.Marshal(r.Config, ty) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto6.ValidateProviderConfig_Request{ + Config: &proto6.DynamicValue{Msgpack: mp}, + } + + protoResp, err := p.client.ValidateProviderConfig(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + return resp +} + +func (p *GRPCProvider) ValidateResourceConfig(r providers.ValidateResourceConfigRequest) (resp providers.ValidateResourceConfigResponse) { + logger.Trace("GRPCProvider.v6: ValidateResourceConfig") + + schema := p.GetProviderSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = schema.Diagnostics + return resp + } + + resourceSchema, ok := schema.ResourceTypes[r.TypeName] + if !ok { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unknown resource type %q", r.TypeName)) + return resp + } + + mp, err := msgpack.Marshal(r.Config, resourceSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto6.ValidateResourceConfig_Request{ + TypeName: r.TypeName, + Config: &proto6.DynamicValue{Msgpack: mp}, + } + + protoResp, err := p.client.ValidateResourceConfig(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + return resp +} + +func (p *GRPCProvider) ValidateDataResourceConfig(r providers.ValidateDataResourceConfigRequest) (resp providers.ValidateDataResourceConfigResponse) { + logger.Trace("GRPCProvider.v6: ValidateDataResourceConfig") + + schema := p.GetProviderSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = schema.Diagnostics + return resp + } + + dataSchema, ok := schema.DataSources[r.TypeName] + if !ok { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unknown data source %q", r.TypeName)) + return resp + } + + mp, err := msgpack.Marshal(r.Config, dataSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto6.ValidateDataResourceConfig_Request{ + TypeName: r.TypeName, + Config: &proto6.DynamicValue{Msgpack: mp}, + } + + protoResp, err := p.client.ValidateDataResourceConfig(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + return resp +} + +func (p *GRPCProvider) UpgradeResourceState(r providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) { + logger.Trace("GRPCProvider.v6: UpgradeResourceState") + + schema := p.GetProviderSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = schema.Diagnostics + return resp + } + + resSchema, ok := schema.ResourceTypes[r.TypeName] + if !ok { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unknown resource type %q", r.TypeName)) + return resp + } + + protoReq := &proto6.UpgradeResourceState_Request{ + TypeName: r.TypeName, + Version: int64(r.Version), + RawState: &proto6.RawState{ + Json: r.RawStateJSON, + Flatmap: r.RawStateFlatmap, + }, + } + + protoResp, err := p.client.UpgradeResourceState(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + ty := resSchema.Block.ImpliedType() + resp.UpgradedState = cty.NullVal(ty) + if protoResp.UpgradedState == nil { + return resp + } + + state, err := decodeDynamicValue(protoResp.UpgradedState, ty) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.UpgradedState = state + + return resp +} + +func (p *GRPCProvider) ConfigureProvider(r providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { + logger.Trace("GRPCProvider.v6: ConfigureProvider") + + schema := p.GetProviderSchema() + + var mp []byte + + // we don't have anything to marshal if there's no config + mp, err := msgpack.Marshal(r.Config, schema.Provider.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto6.ConfigureProvider_Request{ + TerraformVersion: r.TerraformVersion, + Config: &proto6.DynamicValue{ + Msgpack: mp, + }, + } + + protoResp, err := p.client.ConfigureProvider(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + return resp +} + +func (p *GRPCProvider) Stop() error { + logger.Trace("GRPCProvider.v6: Stop") + + resp, err := p.client.StopProvider(p.ctx, new(proto6.StopProvider_Request)) + if err != nil { + return err + } + + if resp.Error != "" { + return errors.New(resp.Error) + } + return nil +} + +func (p *GRPCProvider) ReadResource(r providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { + logger.Trace("GRPCProvider.v6: ReadResource") + + schema := p.GetProviderSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = schema.Diagnostics + return resp + } + + resSchema, ok := schema.ResourceTypes[r.TypeName] + if !ok { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unknown resource type " + r.TypeName)) + return resp + } + + metaSchema := schema.ProviderMeta + + mp, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto6.ReadResource_Request{ + TypeName: r.TypeName, + CurrentState: &proto6.DynamicValue{Msgpack: mp}, + Private: r.Private, + } + + if metaSchema.Block != nil { + metaMP, err := msgpack.Marshal(r.ProviderMeta, metaSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + protoReq.ProviderMeta = &proto6.DynamicValue{Msgpack: metaMP} + } + + protoResp, err := p.client.ReadResource(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + state, err := decodeDynamicValue(protoResp.NewState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.NewState = state + resp.Private = protoResp.Private + + return resp +} + +func (p *GRPCProvider) PlanResourceChange(r providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + logger.Trace("GRPCProvider.v6: PlanResourceChange") + + schema := p.GetProviderSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = schema.Diagnostics + return resp + } + + resSchema, ok := schema.ResourceTypes[r.TypeName] + if !ok { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unknown resource type %q", r.TypeName)) + return resp + } + + metaSchema := schema.ProviderMeta + capabilities := schema.ServerCapabilities + + // If the provider doesn't support planning a destroy operation, we can + // return immediately. + if r.ProposedNewState.IsNull() && !capabilities.PlanDestroy { + resp.PlannedState = r.ProposedNewState + resp.PlannedPrivate = r.PriorPrivate + return resp + } + + priorMP, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + configMP, err := msgpack.Marshal(r.Config, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + propMP, err := msgpack.Marshal(r.ProposedNewState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto6.PlanResourceChange_Request{ + TypeName: r.TypeName, + PriorState: &proto6.DynamicValue{Msgpack: priorMP}, + Config: &proto6.DynamicValue{Msgpack: configMP}, + ProposedNewState: &proto6.DynamicValue{Msgpack: propMP}, + PriorPrivate: r.PriorPrivate, + } + + if metaSchema.Block != nil { + metaMP, err := msgpack.Marshal(r.ProviderMeta, metaSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + protoReq.ProviderMeta = &proto6.DynamicValue{Msgpack: metaMP} + } + + protoResp, err := p.client.PlanResourceChange(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + state, err := decodeDynamicValue(protoResp.PlannedState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.PlannedState = state + + for _, p := range protoResp.RequiresReplace { + resp.RequiresReplace = append(resp.RequiresReplace, convert.AttributePathToPath(p)) + } + + resp.PlannedPrivate = protoResp.PlannedPrivate + + resp.LegacyTypeSystem = protoResp.LegacyTypeSystem + + return resp +} + +func (p *GRPCProvider) ApplyResourceChange(r providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + logger.Trace("GRPCProvider.v6: ApplyResourceChange") + + schema := p.GetProviderSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = schema.Diagnostics + return resp + } + + resSchema, ok := schema.ResourceTypes[r.TypeName] + if !ok { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unknown resource type %q", r.TypeName)) + return resp + } + + metaSchema := schema.ProviderMeta + + priorMP, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + plannedMP, err := msgpack.Marshal(r.PlannedState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + configMP, err := msgpack.Marshal(r.Config, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto6.ApplyResourceChange_Request{ + TypeName: r.TypeName, + PriorState: &proto6.DynamicValue{Msgpack: priorMP}, + PlannedState: &proto6.DynamicValue{Msgpack: plannedMP}, + Config: &proto6.DynamicValue{Msgpack: configMP}, + PlannedPrivate: r.PlannedPrivate, + } + + if metaSchema.Block != nil { + metaMP, err := msgpack.Marshal(r.ProviderMeta, metaSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + protoReq.ProviderMeta = &proto6.DynamicValue{Msgpack: metaMP} + } + + protoResp, err := p.client.ApplyResourceChange(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + resp.Private = protoResp.Private + + state, err := decodeDynamicValue(protoResp.NewState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.NewState = state + + resp.LegacyTypeSystem = protoResp.LegacyTypeSystem + + return resp +} + +func (p *GRPCProvider) ImportResourceState(r providers.ImportResourceStateRequest) (resp providers.ImportResourceStateResponse) { + logger.Trace("GRPCProvider.v6: ImportResourceState") + + schema := p.GetProviderSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = schema.Diagnostics + return resp + } + + protoReq := &proto6.ImportResourceState_Request{ + TypeName: r.TypeName, + Id: r.ID, + } + + protoResp, err := p.client.ImportResourceState(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + for _, imported := range protoResp.ImportedResources { + resource := providers.ImportedResource{ + TypeName: imported.TypeName, + Private: imported.Private, + } + + resSchema, ok := schema.ResourceTypes[r.TypeName] + if !ok { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unknown resource type %q", r.TypeName)) + continue + } + + state, err := decodeDynamicValue(imported.State, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resource.State = state + resp.ImportedResources = append(resp.ImportedResources, resource) + } + + return resp +} + +func (p *GRPCProvider) ReadDataSource(r providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { + logger.Trace("GRPCProvider.v6: ReadDataSource") + + schema := p.GetProviderSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = schema.Diagnostics + return resp + } + + dataSchema, ok := schema.DataSources[r.TypeName] + if !ok { + schema.Diagnostics = schema.Diagnostics.Append(fmt.Errorf("unknown data source %q", r.TypeName)) + } + + metaSchema := schema.ProviderMeta + + config, err := msgpack.Marshal(r.Config, dataSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto6.ReadDataSource_Request{ + TypeName: r.TypeName, + Config: &proto6.DynamicValue{ + Msgpack: config, + }, + } + + if metaSchema.Block != nil { + metaMP, err := msgpack.Marshal(r.ProviderMeta, metaSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + protoReq.ProviderMeta = &proto6.DynamicValue{Msgpack: metaMP} + } + + protoResp, err := p.client.ReadDataSource(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + state, err := decodeDynamicValue(protoResp.State, dataSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.State = state + + return resp +} + +func (p *GRPCProvider) GetFunctions() (resp providers.GetFunctionsResponse) { + logger.Trace("GRPCProvider6: GetFunctions") + + protoReq := &proto6.GetFunctions_Request{} + + protoResp, err := p.client.GetFunctions(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err)) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + resp.Functions = make(map[string]providers.FunctionSpec) + + for name, fn := range protoResp.Functions { + resp.Functions[name] = convert.ProtoToFunctionSpec(fn) + } + + return resp +} + +func (p *GRPCProvider) CallFunction(r providers.CallFunctionRequest) (resp providers.CallFunctionResponse) { + logger.Trace("GRPCProvider6: CallFunction") + + schema := p.GetProviderSchema() + if schema.Diagnostics.HasErrors() { + // This should be unreachable + resp.Error = schema.Diagnostics.Err() + return resp + } + + spec, ok := schema.Functions[r.Name] + if !ok { + funcs := p.GetFunctions() + if funcs.Diagnostics.HasErrors() { + // This should be unreachable + resp.Error = funcs.Diagnostics.Err() + return resp + } + spec, ok = funcs.Functions[r.Name] + if !ok { + // This should be unreachable + resp.Error = fmt.Errorf("invalid CallFunctionRequest: function %s not defined in provider schema", r.Name) + return resp + } + } + + protoReq := &proto6.CallFunction_Request{ + Name: r.Name, + Arguments: make([]*proto6.DynamicValue, len(r.Arguments)), + } + + // Translate the arguments + // As this is functionality is always sitting behind cty/function.Function, we skip some validation + // checks of from the function and param spec. We still include basic validation to prevent panics, + // just in case there are bugs in cty. See context_functions_test.go for explicit testing of argument + // handling and short-circuiting. + if len(r.Arguments) < len(spec.Parameters) { + // This should be unreachable + resp.Error = fmt.Errorf("invalid CallFunctionRequest: function %s expected %d parameters and got %d instead", r.Name, len(spec.Parameters), len(r.Arguments)) + return resp + } + + for i, arg := range r.Arguments { + var paramSpec providers.FunctionParameterSpec + if i < len(spec.Parameters) { + paramSpec = spec.Parameters[i] + } else { + // We are past the end of spec.Parameters, this is either variadic or an error + if spec.VariadicParameter != nil { + paramSpec = *spec.VariadicParameter + } else { + // This should be unreachable + resp.Error = fmt.Errorf("invalid CallFunctionRequest: too many arguments passed to non-variadic function %s", r.Name) + } + } + + if arg.IsNull() { + if paramSpec.AllowNullValue { + continue + } else { + resp.Error = &providers.CallFunctionArgumentError{ + Text: fmt.Sprintf("parameter %s is null, which is not allowed for function %s", paramSpec.Name, r.Name), + FunctionArgument: i, + } + } + + } + + encodedArg, err := msgpack.Marshal(arg, paramSpec.Type) + if err != nil { + resp.Error = err + return + } + + protoReq.Arguments[i] = &proto6.DynamicValue{ + Msgpack: encodedArg, + } + } + + protoResp, err := p.client.CallFunction(p.ctx, protoReq) + if err != nil { + resp.Error = err + return + } + + if protoResp.Error != nil { + err := &providers.CallFunctionArgumentError{ + Text: protoResp.Error.Text, + } + if protoResp.Error.FunctionArgument != nil { + err.FunctionArgument = int(*protoResp.Error.FunctionArgument) + } + resp.Error = err + return + } + + resp.Result, resp.Error = decodeDynamicValue(protoResp.Result, spec.Return) + return +} + +// closing the grpc connection is final, and tofu will call it at the end of every phase. +func (p *GRPCProvider) Close() error { + logger.Trace("GRPCProvider.v6: Close") + + // Make sure to stop the server if we're not running within go-plugin. + if p.TestServer != nil { + p.TestServer.Stop() + } + + // Check this since it's not automatically inserted during plugin creation. + // It's currently only inserted by the command package, because that is + // where the factory is built and is the only point with access to the + // plugin.Client. + if p.PluginClient == nil { + logger.Debug("provider has no plugin.Client") + return nil + } + + p.PluginClient.Kill() + return nil +} + +// Decode a DynamicValue from either the JSON or MsgPack encoding. +func decodeDynamicValue(v *proto6.DynamicValue, ty cty.Type) (cty.Value, error) { + // always return a valid value + var err error + res := cty.NullVal(ty) + if v == nil { + return res, nil + } + + switch { + case len(v.Msgpack) > 0: + res, err = msgpack.Unmarshal(v.Msgpack, ty) + case len(v.Json) > 0: + res, err = ctyjson.Unmarshal(v.Json, ty) + } + return res, err +} diff --git a/pkg/plugin6/grpc_provider_test.go b/pkg/plugin6/grpc_provider_test.go new file mode 100644 index 00000000000..c8e8f37cd19 --- /dev/null +++ b/pkg/plugin6/grpc_provider_test.go @@ -0,0 +1,930 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin6 + +import ( + "bytes" + "fmt" + "testing" + + "github.com/kubegems/opentofu/pkg/addrs" + + "github.com/golang/mock/gomock" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/kubegems/opentofu/pkg/configs/hcl2shim" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/zclconf/go-cty/cty" + + mockproto "github.com/kubegems/opentofu/pkg/plugin6/mock_proto" + proto "github.com/kubegems/opentofu/pkg/tfplugin6" +) + +var _ providers.Interface = (*GRPCProvider)(nil) + +var ( + equateEmpty = cmpopts.EquateEmpty() + typeComparer = cmp.Comparer(cty.Type.Equals) + valueComparer = cmp.Comparer(cty.Value.RawEquals) +) + +func mockProviderClient(t *testing.T) *mockproto.MockProviderClient { + ctrl := gomock.NewController(t) + client := mockproto.NewMockProviderClient(ctrl) + + // we always need a GetSchema method + client.EXPECT().GetProviderSchema( + gomock.Any(), + gomock.Any(), + gomock.Any(), + ).Return(providerProtoSchema(), nil) + + return client +} + +func checkDiags(t *testing.T, d tfdiags.Diagnostics) { + t.Helper() + if d.HasErrors() { + t.Fatal(d.Err()) + } +} + +// checkDiagsHasError ensures error diagnostics are present or fails the test. +func checkDiagsHasError(t *testing.T, d tfdiags.Diagnostics) { + t.Helper() + + if !d.HasErrors() { + t.Fatal("expected error diagnostics") + } +} + +func providerProtoSchema() *proto.GetProviderSchema_Response { + return &proto.GetProviderSchema_Response{ + Provider: &proto.Schema{ + Block: &proto.Schema_Block{ + Attributes: []*proto.Schema_Attribute{ + { + Name: "attr", + Type: []byte(`"string"`), + Required: true, + }, + }, + }, + }, + ResourceSchemas: map[string]*proto.Schema{ + "resource": { + Version: 1, + Block: &proto.Schema_Block{ + Attributes: []*proto.Schema_Attribute{ + { + Name: "attr", + Type: []byte(`"string"`), + Required: true, + }, + }, + }, + }, + }, + DataSourceSchemas: map[string]*proto.Schema{ + "data": { + Version: 1, + Block: &proto.Schema_Block{ + Attributes: []*proto.Schema_Attribute{ + { + Name: "attr", + Type: []byte(`"string"`), + Required: true, + }, + }, + }, + }, + }, + Functions: map[string]*proto.Function{ + "fn": &proto.Function{ + Parameters: []*proto.Function_Parameter{{ + Name: "par_a", + Type: []byte(`"string"`), + AllowNullValue: false, + AllowUnknownValues: false, + }}, + VariadicParameter: &proto.Function_Parameter{ + Name: "par_var", + Type: []byte(`"string"`), + AllowNullValue: true, + AllowUnknownValues: false, + }, + Return: &proto.Function_Return{ + Type: []byte(`"string"`), + }, + }, + }, + } +} + +func TestGRPCProvider_GetSchema(t *testing.T) { + p := &GRPCProvider{ + client: mockProviderClient(t), + } + + resp := p.GetProviderSchema() + checkDiags(t, resp.Diagnostics) +} + +// Ensure that gRPC errors are returned early. +// Reference: https://github.com/hashicorp/terraform/issues/31047 +func TestGRPCProvider_GetSchema_GRPCError(t *testing.T) { + ctrl := gomock.NewController(t) + client := mockproto.NewMockProviderClient(ctrl) + + client.EXPECT().GetProviderSchema( + gomock.Any(), + gomock.Any(), + gomock.Any(), + ).Return(&proto.GetProviderSchema_Response{}, fmt.Errorf("test error")) + + p := &GRPCProvider{ + client: client, + } + + resp := p.GetProviderSchema() + + checkDiagsHasError(t, resp.Diagnostics) +} + +// Ensure that provider error diagnostics are returned early. +// Reference: https://github.com/hashicorp/terraform/issues/31047 +func TestGRPCProvider_GetSchema_ResponseErrorDiagnostic(t *testing.T) { + ctrl := gomock.NewController(t) + client := mockproto.NewMockProviderClient(ctrl) + + client.EXPECT().GetProviderSchema( + gomock.Any(), + gomock.Any(), + gomock.Any(), + ).Return(&proto.GetProviderSchema_Response{ + Diagnostics: []*proto.Diagnostic{ + { + Severity: proto.Diagnostic_ERROR, + Summary: "error summary", + Detail: "error detail", + }, + }, + // Trigger potential panics + Provider: &proto.Schema{}, + }, nil) + + p := &GRPCProvider{ + client: client, + } + + resp := p.GetProviderSchema() + + checkDiagsHasError(t, resp.Diagnostics) +} + +func TestGRPCProvider_GetSchema_GlobalCacheEnabled(t *testing.T) { + ctrl := gomock.NewController(t) + client := mockproto.NewMockProviderClient(ctrl) + // The SchemaCache is global and is saved between test runs + providers.SchemaCache = providers.NewMockSchemaCache() + + providerAddr := addrs.Provider{ + Namespace: "namespace", + Type: "type", + } + + mockedProviderResponse := &proto.Schema{Version: 2, Block: &proto.Schema_Block{}} + + client.EXPECT().GetProviderSchema( + gomock.Any(), + gomock.Any(), + gomock.Any(), + ).Times(1).Return(&proto.GetProviderSchema_Response{ + Provider: mockedProviderResponse, + ServerCapabilities: &proto.ServerCapabilities{GetProviderSchemaOptional: true}, + }, nil) + + // Run GetProviderTwice, expect GetSchema to be called once + // Re-initialize the provider before each run to avoid usage of the local cache + p := &GRPCProvider{ + client: client, + Addr: providerAddr, + } + resp := p.GetProviderSchema() + + checkDiags(t, resp.Diagnostics) + if !cmp.Equal(resp.Provider.Version, mockedProviderResponse.Version) { + t.Fatal(cmp.Diff(resp.Provider.Version, mockedProviderResponse.Version)) + } + + p = &GRPCProvider{ + client: client, + Addr: providerAddr, + } + resp = p.GetProviderSchema() + + checkDiags(t, resp.Diagnostics) + if !cmp.Equal(resp.Provider.Version, mockedProviderResponse.Version) { + t.Fatal(cmp.Diff(resp.Provider.Version, mockedProviderResponse.Version)) + } +} + +func TestGRPCProvider_GetSchema_GlobalCacheDisabled(t *testing.T) { + ctrl := gomock.NewController(t) + client := mockproto.NewMockProviderClient(ctrl) + // The SchemaCache is global and is saved between test runs + providers.SchemaCache = providers.NewMockSchemaCache() + + providerAddr := addrs.Provider{ + Namespace: "namespace", + Type: "type", + } + + mockedProviderResponse := &proto.Schema{Version: 2, Block: &proto.Schema_Block{}} + + client.EXPECT().GetProviderSchema( + gomock.Any(), + gomock.Any(), + gomock.Any(), + ).Times(2).Return(&proto.GetProviderSchema_Response{ + Provider: mockedProviderResponse, + ServerCapabilities: &proto.ServerCapabilities{GetProviderSchemaOptional: false}, + }, nil) + + // Run GetProviderTwice, expect GetSchema to be called once + // Re-initialize the provider before each run to avoid usage of the local cache + p := &GRPCProvider{ + client: client, + Addr: providerAddr, + } + resp := p.GetProviderSchema() + + checkDiags(t, resp.Diagnostics) + if !cmp.Equal(resp.Provider.Version, mockedProviderResponse.Version) { + t.Fatal(cmp.Diff(resp.Provider.Version, mockedProviderResponse.Version)) + } + + p = &GRPCProvider{ + client: client, + Addr: providerAddr, + } + resp = p.GetProviderSchema() + + checkDiags(t, resp.Diagnostics) + if !cmp.Equal(resp.Provider.Version, mockedProviderResponse.Version) { + t.Fatal(cmp.Diff(resp.Provider.Version, mockedProviderResponse.Version)) + } +} + +func TestGRPCProvider_PrepareProviderConfig(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().ValidateProviderConfig( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ValidateProviderConfig_Response{}, nil) + + cfg := hcl2shim.HCL2ValueFromConfigValue(map[string]interface{}{"attr": "value"}) + resp := p.ValidateProviderConfig(providers.ValidateProviderConfigRequest{Config: cfg}) + checkDiags(t, resp.Diagnostics) +} + +func TestGRPCProvider_ValidateResourceConfig(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().ValidateResourceConfig( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ValidateResourceConfig_Response{}, nil) + + cfg := hcl2shim.HCL2ValueFromConfigValue(map[string]interface{}{"attr": "value"}) + resp := p.ValidateResourceConfig(providers.ValidateResourceConfigRequest{ + TypeName: "resource", + Config: cfg, + }) + checkDiags(t, resp.Diagnostics) +} + +func TestGRPCProvider_ValidateDataResourceConfig(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().ValidateDataResourceConfig( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ValidateDataResourceConfig_Response{}, nil) + + cfg := hcl2shim.HCL2ValueFromConfigValue(map[string]interface{}{"attr": "value"}) + resp := p.ValidateDataResourceConfig(providers.ValidateDataResourceConfigRequest{ + TypeName: "data", + Config: cfg, + }) + checkDiags(t, resp.Diagnostics) +} + +func TestGRPCProvider_UpgradeResourceState(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().UpgradeResourceState( + gomock.Any(), + gomock.Any(), + ).Return(&proto.UpgradeResourceState_Response{ + UpgradedState: &proto.DynamicValue{ + Msgpack: []byte("\x81\xa4attr\xa3bar"), + }, + }, nil) + + resp := p.UpgradeResourceState(providers.UpgradeResourceStateRequest{ + TypeName: "resource", + Version: 0, + RawStateJSON: []byte(`{"old_attr":"bar"}`), + }) + checkDiags(t, resp.Diagnostics) + + expected := cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }) + + if !cmp.Equal(expected, resp.UpgradedState, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expected, resp.UpgradedState, typeComparer, valueComparer, equateEmpty)) + } +} + +func TestGRPCProvider_UpgradeResourceStateJSON(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().UpgradeResourceState( + gomock.Any(), + gomock.Any(), + ).Return(&proto.UpgradeResourceState_Response{ + UpgradedState: &proto.DynamicValue{ + Json: []byte(`{"attr":"bar"}`), + }, + }, nil) + + resp := p.UpgradeResourceState(providers.UpgradeResourceStateRequest{ + TypeName: "resource", + Version: 0, + RawStateJSON: []byte(`{"old_attr":"bar"}`), + }) + checkDiags(t, resp.Diagnostics) + + expected := cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }) + + if !cmp.Equal(expected, resp.UpgradedState, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expected, resp.UpgradedState, typeComparer, valueComparer, equateEmpty)) + } +} + +func TestGRPCProvider_Configure(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().ConfigureProvider( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ConfigureProvider_Response{}, nil) + + resp := p.ConfigureProvider(providers.ConfigureProviderRequest{ + Config: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }), + }) + checkDiags(t, resp.Diagnostics) +} + +func TestGRPCProvider_Stop(t *testing.T) { + ctrl := gomock.NewController(t) + client := mockproto.NewMockProviderClient(ctrl) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().StopProvider( + gomock.Any(), + gomock.Any(), + ).Return(&proto.StopProvider_Response{}, nil) + + err := p.Stop() + if err != nil { + t.Fatal(err) + } +} + +func TestGRPCProvider_ReadResource(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().ReadResource( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ReadResource_Response{ + NewState: &proto.DynamicValue{ + Msgpack: []byte("\x81\xa4attr\xa3bar"), + }, + }, nil) + + resp := p.ReadResource(providers.ReadResourceRequest{ + TypeName: "resource", + PriorState: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }), + }) + + checkDiags(t, resp.Diagnostics) + + expected := cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }) + + if !cmp.Equal(expected, resp.NewState, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expected, resp.NewState, typeComparer, valueComparer, equateEmpty)) + } +} + +func TestGRPCProvider_ReadResourceJSON(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().ReadResource( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ReadResource_Response{ + NewState: &proto.DynamicValue{ + Json: []byte(`{"attr":"bar"}`), + }, + }, nil) + + resp := p.ReadResource(providers.ReadResourceRequest{ + TypeName: "resource", + PriorState: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }), + }) + + checkDiags(t, resp.Diagnostics) + + expected := cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }) + + if !cmp.Equal(expected, resp.NewState, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expected, resp.NewState, typeComparer, valueComparer, equateEmpty)) + } +} + +func TestGRPCProvider_ReadEmptyJSON(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().ReadResource( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ReadResource_Response{ + NewState: &proto.DynamicValue{ + Json: []byte(``), + }, + }, nil) + + obj := cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }) + resp := p.ReadResource(providers.ReadResourceRequest{ + TypeName: "resource", + PriorState: obj, + }) + + checkDiags(t, resp.Diagnostics) + + expected := cty.NullVal(obj.Type()) + + if !cmp.Equal(expected, resp.NewState, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expected, resp.NewState, typeComparer, valueComparer, equateEmpty)) + } +} + +func TestGRPCProvider_PlanResourceChange(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + expectedPrivate := []byte(`{"meta": "data"}`) + + client.EXPECT().PlanResourceChange( + gomock.Any(), + gomock.Any(), + ).Return(&proto.PlanResourceChange_Response{ + PlannedState: &proto.DynamicValue{ + Msgpack: []byte("\x81\xa4attr\xa3bar"), + }, + RequiresReplace: []*proto.AttributePath{ + { + Steps: []*proto.AttributePath_Step{ + { + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: "attr", + }, + }, + }, + }, + }, + PlannedPrivate: expectedPrivate, + }, nil) + + resp := p.PlanResourceChange(providers.PlanResourceChangeRequest{ + TypeName: "resource", + PriorState: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }), + ProposedNewState: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + Config: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + }) + + checkDiags(t, resp.Diagnostics) + + expectedState := cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }) + + if !cmp.Equal(expectedState, resp.PlannedState, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expectedState, resp.PlannedState, typeComparer, valueComparer, equateEmpty)) + } + + expectedReplace := `[]cty.Path{cty.Path{cty.GetAttrStep{Name:"attr"}}}` + replace := fmt.Sprintf("%#v", resp.RequiresReplace) + if expectedReplace != replace { + t.Fatalf("expected %q, got %q", expectedReplace, replace) + } + + if !bytes.Equal(expectedPrivate, resp.PlannedPrivate) { + t.Fatalf("expected %q, got %q", expectedPrivate, resp.PlannedPrivate) + } +} + +func TestGRPCProvider_PlanResourceChangeJSON(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + expectedPrivate := []byte(`{"meta": "data"}`) + + client.EXPECT().PlanResourceChange( + gomock.Any(), + gomock.Any(), + ).Return(&proto.PlanResourceChange_Response{ + PlannedState: &proto.DynamicValue{ + Json: []byte(`{"attr":"bar"}`), + }, + RequiresReplace: []*proto.AttributePath{ + { + Steps: []*proto.AttributePath_Step{ + { + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: "attr", + }, + }, + }, + }, + }, + PlannedPrivate: expectedPrivate, + }, nil) + + resp := p.PlanResourceChange(providers.PlanResourceChangeRequest{ + TypeName: "resource", + PriorState: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }), + ProposedNewState: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + Config: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + }) + + checkDiags(t, resp.Diagnostics) + + expectedState := cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }) + + if !cmp.Equal(expectedState, resp.PlannedState, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expectedState, resp.PlannedState, typeComparer, valueComparer, equateEmpty)) + } + + expectedReplace := `[]cty.Path{cty.Path{cty.GetAttrStep{Name:"attr"}}}` + replace := fmt.Sprintf("%#v", resp.RequiresReplace) + if expectedReplace != replace { + t.Fatalf("expected %q, got %q", expectedReplace, replace) + } + + if !bytes.Equal(expectedPrivate, resp.PlannedPrivate) { + t.Fatalf("expected %q, got %q", expectedPrivate, resp.PlannedPrivate) + } +} + +func TestGRPCProvider_ApplyResourceChange(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + expectedPrivate := []byte(`{"meta": "data"}`) + + client.EXPECT().ApplyResourceChange( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ApplyResourceChange_Response{ + NewState: &proto.DynamicValue{ + Msgpack: []byte("\x81\xa4attr\xa3bar"), + }, + Private: expectedPrivate, + }, nil) + + resp := p.ApplyResourceChange(providers.ApplyResourceChangeRequest{ + TypeName: "resource", + PriorState: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }), + PlannedState: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + Config: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + PlannedPrivate: expectedPrivate, + }) + + checkDiags(t, resp.Diagnostics) + + expectedState := cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }) + + if !cmp.Equal(expectedState, resp.NewState, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expectedState, resp.NewState, typeComparer, valueComparer, equateEmpty)) + } + + if !bytes.Equal(expectedPrivate, resp.Private) { + t.Fatalf("expected %q, got %q", expectedPrivate, resp.Private) + } +} +func TestGRPCProvider_ApplyResourceChangeJSON(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + expectedPrivate := []byte(`{"meta": "data"}`) + + client.EXPECT().ApplyResourceChange( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ApplyResourceChange_Response{ + NewState: &proto.DynamicValue{ + Json: []byte(`{"attr":"bar"}`), + }, + Private: expectedPrivate, + }, nil) + + resp := p.ApplyResourceChange(providers.ApplyResourceChangeRequest{ + TypeName: "resource", + PriorState: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }), + PlannedState: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + Config: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + PlannedPrivate: expectedPrivate, + }) + + checkDiags(t, resp.Diagnostics) + + expectedState := cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }) + + if !cmp.Equal(expectedState, resp.NewState, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expectedState, resp.NewState, typeComparer, valueComparer, equateEmpty)) + } + + if !bytes.Equal(expectedPrivate, resp.Private) { + t.Fatalf("expected %q, got %q", expectedPrivate, resp.Private) + } +} + +func TestGRPCProvider_ImportResourceState(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + expectedPrivate := []byte(`{"meta": "data"}`) + + client.EXPECT().ImportResourceState( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ImportResourceState_Response{ + ImportedResources: []*proto.ImportResourceState_ImportedResource{ + { + TypeName: "resource", + State: &proto.DynamicValue{ + Msgpack: []byte("\x81\xa4attr\xa3bar"), + }, + Private: expectedPrivate, + }, + }, + }, nil) + + resp := p.ImportResourceState(providers.ImportResourceStateRequest{ + TypeName: "resource", + ID: "foo", + }) + + checkDiags(t, resp.Diagnostics) + + expectedResource := providers.ImportedResource{ + TypeName: "resource", + State: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + Private: expectedPrivate, + } + + imported := resp.ImportedResources[0] + if !cmp.Equal(expectedResource, imported, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expectedResource, imported, typeComparer, valueComparer, equateEmpty)) + } +} +func TestGRPCProvider_ImportResourceStateJSON(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + expectedPrivate := []byte(`{"meta": "data"}`) + + client.EXPECT().ImportResourceState( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ImportResourceState_Response{ + ImportedResources: []*proto.ImportResourceState_ImportedResource{ + { + TypeName: "resource", + State: &proto.DynamicValue{ + Json: []byte(`{"attr":"bar"}`), + }, + Private: expectedPrivate, + }, + }, + }, nil) + + resp := p.ImportResourceState(providers.ImportResourceStateRequest{ + TypeName: "resource", + ID: "foo", + }) + + checkDiags(t, resp.Diagnostics) + + expectedResource := providers.ImportedResource{ + TypeName: "resource", + State: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }), + Private: expectedPrivate, + } + + imported := resp.ImportedResources[0] + if !cmp.Equal(expectedResource, imported, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expectedResource, imported, typeComparer, valueComparer, equateEmpty)) + } +} + +func TestGRPCProvider_ReadDataSource(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().ReadDataSource( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ReadDataSource_Response{ + State: &proto.DynamicValue{ + Msgpack: []byte("\x81\xa4attr\xa3bar"), + }, + }, nil) + + resp := p.ReadDataSource(providers.ReadDataSourceRequest{ + TypeName: "data", + Config: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }), + }) + + checkDiags(t, resp.Diagnostics) + + expected := cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }) + + if !cmp.Equal(expected, resp.State, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expected, resp.State, typeComparer, valueComparer, equateEmpty)) + } +} + +func TestGRPCProvider_ReadDataSourceJSON(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().ReadDataSource( + gomock.Any(), + gomock.Any(), + ).Return(&proto.ReadDataSource_Response{ + State: &proto.DynamicValue{ + Json: []byte(`{"attr":"bar"}`), + }, + }, nil) + + resp := p.ReadDataSource(providers.ReadDataSourceRequest{ + TypeName: "data", + Config: cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("foo"), + }), + }) + + checkDiags(t, resp.Diagnostics) + + expected := cty.ObjectVal(map[string]cty.Value{ + "attr": cty.StringVal("bar"), + }) + + if !cmp.Equal(expected, resp.State, typeComparer, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(expected, resp.State, typeComparer, valueComparer, equateEmpty)) + } +} + +func TestGRPCProvider_CallFunction(t *testing.T) { + client := mockProviderClient(t) + p := &GRPCProvider{ + client: client, + } + + client.EXPECT().CallFunction( + gomock.Any(), + gomock.Any(), + ).Return(&proto.CallFunction_Response{ + Result: &proto.DynamicValue{Json: []byte(`"foo"`)}, + }, nil) + + resp := p.CallFunction(providers.CallFunctionRequest{ + Name: "fn", + Arguments: []cty.Value{cty.StringVal("bar"), cty.NilVal}, + }) + + if resp.Error != nil { + t.Fatal(resp.Error) + } + if resp.Result != cty.StringVal("foo") { + t.Fatalf("%v", resp.Result) + } +} diff --git a/pkg/plugin6/mock_proto/generate.go b/pkg/plugin6/mock_proto/generate.go new file mode 100644 index 00000000000..55801518e95 --- /dev/null +++ b/pkg/plugin6/mock_proto/generate.go @@ -0,0 +1,8 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:generate go run github.com/golang/mock/mockgen -destination mock.go github.com/kubegems/opentofu/pkg/tfplugin6 ProviderClient + +package mock_tfplugin6 diff --git a/pkg/plugin6/mock_proto/mock.go b/pkg/plugin6/mock_proto/mock.go new file mode 100644 index 00000000000..aadeb2faa2f --- /dev/null +++ b/pkg/plugin6/mock_proto/mock.go @@ -0,0 +1,357 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/kubegems/opentofu/pkg/tfplugin6 (interfaces: ProviderClient) + +// Package mock_tfplugin6 is a generated GoMock package. +package mock_tfplugin6 + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + tfplugin6 "github.com/kubegems/opentofu/pkg/tfplugin6" + grpc "google.golang.org/grpc" +) + +// MockProviderClient is a mock of ProviderClient interface. +type MockProviderClient struct { + ctrl *gomock.Controller + recorder *MockProviderClientMockRecorder +} + +// MockProviderClientMockRecorder is the mock recorder for MockProviderClient. +type MockProviderClientMockRecorder struct { + mock *MockProviderClient +} + +// NewMockProviderClient creates a new mock instance. +func NewMockProviderClient(ctrl *gomock.Controller) *MockProviderClient { + mock := &MockProviderClient{ctrl: ctrl} + mock.recorder = &MockProviderClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockProviderClient) EXPECT() *MockProviderClientMockRecorder { + return m.recorder +} + +// ApplyResourceChange mocks base method. +func (m *MockProviderClient) ApplyResourceChange(arg0 context.Context, arg1 *tfplugin6.ApplyResourceChange_Request, arg2 ...grpc.CallOption) (*tfplugin6.ApplyResourceChange_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ApplyResourceChange", varargs...) + ret0, _ := ret[0].(*tfplugin6.ApplyResourceChange_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ApplyResourceChange indicates an expected call of ApplyResourceChange. +func (mr *MockProviderClientMockRecorder) ApplyResourceChange(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyResourceChange", reflect.TypeOf((*MockProviderClient)(nil).ApplyResourceChange), varargs...) +} + +// CallFunction mocks base method. +func (m *MockProviderClient) CallFunction(arg0 context.Context, arg1 *tfplugin6.CallFunction_Request, arg2 ...grpc.CallOption) (*tfplugin6.CallFunction_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CallFunction", varargs...) + ret0, _ := ret[0].(*tfplugin6.CallFunction_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CallFunction indicates an expected call of CallFunction. +func (mr *MockProviderClientMockRecorder) CallFunction(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CallFunction", reflect.TypeOf((*MockProviderClient)(nil).CallFunction), varargs...) +} + +// ConfigureProvider mocks base method. +func (m *MockProviderClient) ConfigureProvider(arg0 context.Context, arg1 *tfplugin6.ConfigureProvider_Request, arg2 ...grpc.CallOption) (*tfplugin6.ConfigureProvider_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ConfigureProvider", varargs...) + ret0, _ := ret[0].(*tfplugin6.ConfigureProvider_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ConfigureProvider indicates an expected call of ConfigureProvider. +func (mr *MockProviderClientMockRecorder) ConfigureProvider(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ConfigureProvider", reflect.TypeOf((*MockProviderClient)(nil).ConfigureProvider), varargs...) +} + +// GetFunctions mocks base method. +func (m *MockProviderClient) GetFunctions(arg0 context.Context, arg1 *tfplugin6.GetFunctions_Request, arg2 ...grpc.CallOption) (*tfplugin6.GetFunctions_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetFunctions", varargs...) + ret0, _ := ret[0].(*tfplugin6.GetFunctions_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetFunctions indicates an expected call of GetFunctions. +func (mr *MockProviderClientMockRecorder) GetFunctions(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFunctions", reflect.TypeOf((*MockProviderClient)(nil).GetFunctions), varargs...) +} + +// GetMetadata mocks base method. +func (m *MockProviderClient) GetMetadata(arg0 context.Context, arg1 *tfplugin6.GetMetadata_Request, arg2 ...grpc.CallOption) (*tfplugin6.GetMetadata_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetMetadata", varargs...) + ret0, _ := ret[0].(*tfplugin6.GetMetadata_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetMetadata indicates an expected call of GetMetadata. +func (mr *MockProviderClientMockRecorder) GetMetadata(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMetadata", reflect.TypeOf((*MockProviderClient)(nil).GetMetadata), varargs...) +} + +// GetProviderSchema mocks base method. +func (m *MockProviderClient) GetProviderSchema(arg0 context.Context, arg1 *tfplugin6.GetProviderSchema_Request, arg2 ...grpc.CallOption) (*tfplugin6.GetProviderSchema_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetProviderSchema", varargs...) + ret0, _ := ret[0].(*tfplugin6.GetProviderSchema_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetProviderSchema indicates an expected call of GetProviderSchema. +func (mr *MockProviderClientMockRecorder) GetProviderSchema(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProviderSchema", reflect.TypeOf((*MockProviderClient)(nil).GetProviderSchema), varargs...) +} + +// ImportResourceState mocks base method. +func (m *MockProviderClient) ImportResourceState(arg0 context.Context, arg1 *tfplugin6.ImportResourceState_Request, arg2 ...grpc.CallOption) (*tfplugin6.ImportResourceState_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ImportResourceState", varargs...) + ret0, _ := ret[0].(*tfplugin6.ImportResourceState_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ImportResourceState indicates an expected call of ImportResourceState. +func (mr *MockProviderClientMockRecorder) ImportResourceState(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImportResourceState", reflect.TypeOf((*MockProviderClient)(nil).ImportResourceState), varargs...) +} + +// MoveResourceState mocks base method. +func (m *MockProviderClient) MoveResourceState(arg0 context.Context, arg1 *tfplugin6.MoveResourceState_Request, arg2 ...grpc.CallOption) (*tfplugin6.MoveResourceState_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "MoveResourceState", varargs...) + ret0, _ := ret[0].(*tfplugin6.MoveResourceState_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MoveResourceState indicates an expected call of MoveResourceState. +func (mr *MockProviderClientMockRecorder) MoveResourceState(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MoveResourceState", reflect.TypeOf((*MockProviderClient)(nil).MoveResourceState), varargs...) +} + +// PlanResourceChange mocks base method. +func (m *MockProviderClient) PlanResourceChange(arg0 context.Context, arg1 *tfplugin6.PlanResourceChange_Request, arg2 ...grpc.CallOption) (*tfplugin6.PlanResourceChange_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PlanResourceChange", varargs...) + ret0, _ := ret[0].(*tfplugin6.PlanResourceChange_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PlanResourceChange indicates an expected call of PlanResourceChange. +func (mr *MockProviderClientMockRecorder) PlanResourceChange(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PlanResourceChange", reflect.TypeOf((*MockProviderClient)(nil).PlanResourceChange), varargs...) +} + +// ReadDataSource mocks base method. +func (m *MockProviderClient) ReadDataSource(arg0 context.Context, arg1 *tfplugin6.ReadDataSource_Request, arg2 ...grpc.CallOption) (*tfplugin6.ReadDataSource_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ReadDataSource", varargs...) + ret0, _ := ret[0].(*tfplugin6.ReadDataSource_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ReadDataSource indicates an expected call of ReadDataSource. +func (mr *MockProviderClientMockRecorder) ReadDataSource(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadDataSource", reflect.TypeOf((*MockProviderClient)(nil).ReadDataSource), varargs...) +} + +// ReadResource mocks base method. +func (m *MockProviderClient) ReadResource(arg0 context.Context, arg1 *tfplugin6.ReadResource_Request, arg2 ...grpc.CallOption) (*tfplugin6.ReadResource_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ReadResource", varargs...) + ret0, _ := ret[0].(*tfplugin6.ReadResource_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ReadResource indicates an expected call of ReadResource. +func (mr *MockProviderClientMockRecorder) ReadResource(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadResource", reflect.TypeOf((*MockProviderClient)(nil).ReadResource), varargs...) +} + +// StopProvider mocks base method. +func (m *MockProviderClient) StopProvider(arg0 context.Context, arg1 *tfplugin6.StopProvider_Request, arg2 ...grpc.CallOption) (*tfplugin6.StopProvider_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "StopProvider", varargs...) + ret0, _ := ret[0].(*tfplugin6.StopProvider_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StopProvider indicates an expected call of StopProvider. +func (mr *MockProviderClientMockRecorder) StopProvider(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StopProvider", reflect.TypeOf((*MockProviderClient)(nil).StopProvider), varargs...) +} + +// UpgradeResourceState mocks base method. +func (m *MockProviderClient) UpgradeResourceState(arg0 context.Context, arg1 *tfplugin6.UpgradeResourceState_Request, arg2 ...grpc.CallOption) (*tfplugin6.UpgradeResourceState_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpgradeResourceState", varargs...) + ret0, _ := ret[0].(*tfplugin6.UpgradeResourceState_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpgradeResourceState indicates an expected call of UpgradeResourceState. +func (mr *MockProviderClientMockRecorder) UpgradeResourceState(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpgradeResourceState", reflect.TypeOf((*MockProviderClient)(nil).UpgradeResourceState), varargs...) +} + +// ValidateDataResourceConfig mocks base method. +func (m *MockProviderClient) ValidateDataResourceConfig(arg0 context.Context, arg1 *tfplugin6.ValidateDataResourceConfig_Request, arg2 ...grpc.CallOption) (*tfplugin6.ValidateDataResourceConfig_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ValidateDataResourceConfig", varargs...) + ret0, _ := ret[0].(*tfplugin6.ValidateDataResourceConfig_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ValidateDataResourceConfig indicates an expected call of ValidateDataResourceConfig. +func (mr *MockProviderClientMockRecorder) ValidateDataResourceConfig(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateDataResourceConfig", reflect.TypeOf((*MockProviderClient)(nil).ValidateDataResourceConfig), varargs...) +} + +// ValidateProviderConfig mocks base method. +func (m *MockProviderClient) ValidateProviderConfig(arg0 context.Context, arg1 *tfplugin6.ValidateProviderConfig_Request, arg2 ...grpc.CallOption) (*tfplugin6.ValidateProviderConfig_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ValidateProviderConfig", varargs...) + ret0, _ := ret[0].(*tfplugin6.ValidateProviderConfig_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ValidateProviderConfig indicates an expected call of ValidateProviderConfig. +func (mr *MockProviderClientMockRecorder) ValidateProviderConfig(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateProviderConfig", reflect.TypeOf((*MockProviderClient)(nil).ValidateProviderConfig), varargs...) +} + +// ValidateResourceConfig mocks base method. +func (m *MockProviderClient) ValidateResourceConfig(arg0 context.Context, arg1 *tfplugin6.ValidateResourceConfig_Request, arg2 ...grpc.CallOption) (*tfplugin6.ValidateResourceConfig_Response, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ValidateResourceConfig", varargs...) + ret0, _ := ret[0].(*tfplugin6.ValidateResourceConfig_Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ValidateResourceConfig indicates an expected call of ValidateResourceConfig. +func (mr *MockProviderClientMockRecorder) ValidateResourceConfig(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateResourceConfig", reflect.TypeOf((*MockProviderClient)(nil).ValidateResourceConfig), varargs...) +} diff --git a/pkg/plugin6/serve.go b/pkg/plugin6/serve.go new file mode 100644 index 00000000000..493f2d28d3f --- /dev/null +++ b/pkg/plugin6/serve.go @@ -0,0 +1,68 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin6 + +import ( + "github.com/hashicorp/go-plugin" + proto "github.com/kubegems/opentofu/pkg/tfplugin6" +) + +const ( + // The constants below are the names of the plugins that can be dispensed + // from the plugin server. + ProviderPluginName = "provider" + + // DefaultProtocolVersion is the protocol version assumed for legacy clients + // that don't specify a particular version during their handshake. Since we + // explicitly set VersionedPlugins in Serve, this number does not need to + // change with the protocol version and can effectively stay 4 forever + // (unless we need the "biggest hammer" approach to break all provider + // compatibility). + DefaultProtocolVersion = 4 +) + +// Handshake is the HandshakeConfig used to configure clients and servers. +var Handshake = plugin.HandshakeConfig{ + // The ProtocolVersion is the version that must match between TF core + // and TF plugins. + ProtocolVersion: DefaultProtocolVersion, + + // The magic cookie values should NEVER be changed. + MagicCookieKey: "TF_PLUGIN_MAGIC_COOKIE", + MagicCookieValue: "d602bf8f470bc67ca7faa0386276bbdd4330efaf76d1a219cb4d6991ca9872b2", +} + +type GRPCProviderFunc func() proto.ProviderServer + +// ServeOpts are the configurations to serve a plugin. +type ServeOpts struct { + GRPCProviderFunc GRPCProviderFunc +} + +// Serve serves a plugin. This function never returns and should be the final +// function called in the main function of the plugin. +func Serve(opts *ServeOpts) { + plugin.Serve(&plugin.ServeConfig{ + HandshakeConfig: Handshake, + VersionedPlugins: pluginSet(opts), + GRPCServer: plugin.DefaultGRPCServer, + }) +} + +func pluginSet(opts *ServeOpts) map[int]plugin.PluginSet { + plugins := map[int]plugin.PluginSet{} + + // add the new protocol versions if they're configured + if opts.GRPCProviderFunc != nil { + plugins[6] = plugin.PluginSet{} + if opts.GRPCProviderFunc != nil { + plugins[6]["provider"] = &GRPCProviderPlugin{ + GRPCProvider: opts.GRPCProviderFunc, + } + } + } + return plugins +} diff --git a/pkg/provider-simple-v6/main/main.go b/pkg/provider-simple-v6/main/main.go new file mode 100644 index 00000000000..e326a2e665c --- /dev/null +++ b/pkg/provider-simple-v6/main/main.go @@ -0,0 +1,21 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "github.com/kubegems/opentofu/pkg/grpcwrap" + plugin "github.com/kubegems/opentofu/pkg/plugin6" + simple "github.com/kubegems/opentofu/pkg/provider-simple-v6" + "github.com/kubegems/opentofu/pkg/tfplugin6" +) + +func main() { + plugin.Serve(&plugin.ServeOpts{ + GRPCProviderFunc: func() tfplugin6.ProviderServer { + return grpcwrap.Provider6(simple.Provider()) + }, + }) +} diff --git a/pkg/provider-simple-v6/provider.go b/pkg/provider-simple-v6/provider.go new file mode 100644 index 00000000000..e9cf48f1cb2 --- /dev/null +++ b/pkg/provider-simple-v6/provider.go @@ -0,0 +1,160 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// simple provider a minimal provider implementation for testing +package simple + +import ( + "errors" + "fmt" + "time" + + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" +) + +type simple struct { + schema providers.GetProviderSchemaResponse +} + +func Provider() providers.Interface { + simpleResource := providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Computed: true, + Type: cty.String, + }, + "value": { + Optional: true, + Type: cty.String, + }, + }, + }, + } + + return simple{ + schema: providers.GetProviderSchemaResponse{ + Provider: providers.Schema{ + Block: nil, + }, + ResourceTypes: map[string]providers.Schema{ + "simple_resource": simpleResource, + }, + DataSources: map[string]providers.Schema{ + "simple_resource": simpleResource, + }, + ServerCapabilities: providers.ServerCapabilities{ + PlanDestroy: true, + }, + }, + } +} + +func (s simple) GetProviderSchema() providers.GetProviderSchemaResponse { + return s.schema +} + +func (s simple) ValidateProviderConfig(req providers.ValidateProviderConfigRequest) (resp providers.ValidateProviderConfigResponse) { + return resp +} + +func (s simple) ValidateResourceConfig(req providers.ValidateResourceConfigRequest) (resp providers.ValidateResourceConfigResponse) { + return resp +} + +func (s simple) ValidateDataResourceConfig(req providers.ValidateDataResourceConfigRequest) (resp providers.ValidateDataResourceConfigResponse) { + return resp +} + +func (p simple) UpgradeResourceState(req providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) { + ty := p.schema.ResourceTypes[req.TypeName].Block.ImpliedType() + val, err := ctyjson.Unmarshal(req.RawStateJSON, ty) + resp.Diagnostics = resp.Diagnostics.Append(err) + resp.UpgradedState = val + return resp +} + +func (s simple) ConfigureProvider(providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { + return resp +} + +func (s simple) Stop() error { + return nil +} + +func (s simple) ReadResource(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { + // just return the same state we received + resp.NewState = req.PriorState + return resp +} + +func (s simple) PlanResourceChange(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + if req.ProposedNewState.IsNull() { + // destroy op + resp.PlannedState = req.ProposedNewState + + // signal that this resource was properly planned for destruction, + // verifying that the schema capabilities with PlanDestroy took effect. + resp.PlannedPrivate = []byte("destroy planned") + return resp + } + + m := req.ProposedNewState.AsValueMap() + _, ok := m["id"] + if !ok { + m["id"] = cty.UnknownVal(cty.String) + } + + resp.PlannedState = cty.ObjectVal(m) + return resp +} + +func (s simple) ApplyResourceChange(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + if req.PlannedState.IsNull() { + // make sure this was transferred from the plan action + if string(req.PlannedPrivate) != "destroy planned" { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("resource not planned for destroy, private data %q", req.PlannedPrivate)) + } + + resp.NewState = req.PlannedState + return resp + } + + m := req.PlannedState.AsValueMap() + _, ok := m["id"] + if !ok { + m["id"] = cty.StringVal(time.Now().String()) + } + resp.NewState = cty.ObjectVal(m) + + return resp +} + +func (s simple) ImportResourceState(providers.ImportResourceStateRequest) (resp providers.ImportResourceStateResponse) { + resp.Diagnostics = resp.Diagnostics.Append(errors.New("unsupported")) + return resp +} + +func (s simple) ReadDataSource(req providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { + m := req.Config.AsValueMap() + m["id"] = cty.StringVal("static_id") + resp.State = cty.ObjectVal(m) + return resp +} + +func (s simple) GetFunctions() providers.GetFunctionsResponse { + panic("Not Implemented") +} + +func (s simple) CallFunction(r providers.CallFunctionRequest) providers.CallFunctionResponse { + panic("Not Implemented") +} + +func (s simple) Close() error { + return nil +} diff --git a/pkg/provider-simple/main/main.go b/pkg/provider-simple/main/main.go new file mode 100644 index 00000000000..8f63a9f3957 --- /dev/null +++ b/pkg/provider-simple/main/main.go @@ -0,0 +1,21 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "github.com/kubegems/opentofu/pkg/grpcwrap" + "github.com/kubegems/opentofu/pkg/plugin" + simple "github.com/kubegems/opentofu/pkg/provider-simple" + "github.com/kubegems/opentofu/pkg/tfplugin5" +) + +func main() { + plugin.Serve(&plugin.ServeOpts{ + GRPCProviderFunc: func() tfplugin5.ProviderServer { + return grpcwrap.Provider(simple.Provider()) + }, + }) +} diff --git a/pkg/provider-simple/provider.go b/pkg/provider-simple/provider.go new file mode 100644 index 00000000000..eb78602315b --- /dev/null +++ b/pkg/provider-simple/provider.go @@ -0,0 +1,151 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// simple provider a minimal provider implementation for testing +package simple + +import ( + "errors" + "time" + + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" +) + +type simple struct { + schema providers.GetProviderSchemaResponse +} + +func Provider() providers.Interface { + simpleResource := providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Computed: true, + Type: cty.String, + }, + "value": { + Optional: true, + Type: cty.String, + }, + }, + }, + } + + return simple{ + schema: providers.GetProviderSchemaResponse{ + Provider: providers.Schema{ + Block: nil, + }, + ResourceTypes: map[string]providers.Schema{ + "simple_resource": simpleResource, + }, + DataSources: map[string]providers.Schema{ + "simple_resource": simpleResource, + }, + ServerCapabilities: providers.ServerCapabilities{ + PlanDestroy: true, + }, + }, + } +} + +func (s simple) GetProviderSchema() providers.GetProviderSchemaResponse { + return s.schema +} + +func (s simple) ValidateProviderConfig(req providers.ValidateProviderConfigRequest) (resp providers.ValidateProviderConfigResponse) { + return resp +} + +func (s simple) ValidateResourceConfig(req providers.ValidateResourceConfigRequest) (resp providers.ValidateResourceConfigResponse) { + return resp +} + +func (s simple) ValidateDataResourceConfig(req providers.ValidateDataResourceConfigRequest) (resp providers.ValidateDataResourceConfigResponse) { + return resp +} + +func (p simple) UpgradeResourceState(req providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) { + ty := p.schema.ResourceTypes[req.TypeName].Block.ImpliedType() + val, err := ctyjson.Unmarshal(req.RawStateJSON, ty) + resp.Diagnostics = resp.Diagnostics.Append(err) + resp.UpgradedState = val + return resp +} + +func (s simple) ConfigureProvider(providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { + return resp +} + +func (s simple) Stop() error { + return nil +} + +func (s simple) ReadResource(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { + // just return the same state we received + resp.NewState = req.PriorState + return resp +} + +func (s simple) PlanResourceChange(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + if req.ProposedNewState.IsNull() { + // destroy op + resp.PlannedState = req.ProposedNewState + resp.PlannedPrivate = req.PriorPrivate + return resp + } + + m := req.ProposedNewState.AsValueMap() + _, ok := m["id"] + if !ok { + m["id"] = cty.UnknownVal(cty.String) + } + + resp.PlannedState = cty.ObjectVal(m) + return resp +} + +func (s simple) ApplyResourceChange(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + if req.PlannedState.IsNull() { + resp.NewState = req.PlannedState + return resp + } + + m := req.PlannedState.AsValueMap() + _, ok := m["id"] + if !ok { + m["id"] = cty.StringVal(time.Now().String()) + } + resp.NewState = cty.ObjectVal(m) + + return resp +} + +func (s simple) ImportResourceState(providers.ImportResourceStateRequest) (resp providers.ImportResourceStateResponse) { + resp.Diagnostics = resp.Diagnostics.Append(errors.New("unsupported")) + return resp +} + +func (s simple) ReadDataSource(req providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { + m := req.Config.AsValueMap() + m["id"] = cty.StringVal("static_id") + resp.State = cty.ObjectVal(m) + return resp +} + +func (s simple) GetFunctions() providers.GetFunctionsResponse { + panic("Not Implemented") +} + +func (s simple) CallFunction(r providers.CallFunctionRequest) providers.CallFunctionResponse { + panic("Not Implemented") +} + +func (s simple) Close() error { + return nil +} diff --git a/pkg/providercache/cached_provider.go b/pkg/providercache/cached_provider.go new file mode 100644 index 00000000000..1f8de96f52e --- /dev/null +++ b/pkg/providercache/cached_provider.go @@ -0,0 +1,156 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package providercache + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/getproviders" +) + +// CachedProvider represents a provider package in a cache directory. +type CachedProvider struct { + // Provider and Version together identify the specific provider version + // this cache entry represents. + Provider addrs.Provider + Version getproviders.Version + + // PackageDir is the local filesystem path to the root directory where + // the provider's distribution archive was unpacked. + // + // The path always uses slashes as path separators, even on Windows, so + // that the results are consistent between platforms. Windows accepts + // both slashes and backslashes as long as the separators are consistent + // within a particular path string. + PackageDir string +} + +// PackageLocation returns the package directory given in the PackageDir field +// as a getproviders.PackageLocation implementation. +// +// Because cached providers are always in the unpacked structure, the result is +// always of the concrete type getproviders.PackageLocalDir. +func (cp *CachedProvider) PackageLocation() getproviders.PackageLocalDir { + return getproviders.PackageLocalDir(cp.PackageDir) +} + +// Hash computes a hash of the contents of the package directory associated +// with the receiving cached provider, using whichever hash algorithm is +// the current default. +// +// If you need a specific version of hash rather than just whichever one is +// current default, call that version's corresponding method (e.g. HashV1) +// directly instead. +func (cp *CachedProvider) Hash() (getproviders.Hash, error) { + return getproviders.PackageHash(cp.PackageLocation()) +} + +// MatchesHash returns true if the package on disk matches the given hash, +// or false otherwise. If it cannot traverse the package directory and read +// all of the files in it, or if the hash is in an unsupported format, +// MatchesHash returns an error. +// +// MatchesHash may accept hashes in a number of different formats. Over time +// the set of supported formats may grow and shrink. +func (cp *CachedProvider) MatchesHash(want getproviders.Hash) (bool, error) { + return getproviders.PackageMatchesHash(cp.PackageLocation(), want) +} + +// MatchesAnyHash returns true if the package on disk matches the given hash, +// or false otherwise. If it cannot traverse the package directory and read +// all of the files in it, MatchesAnyHash returns an error. +// +// Unlike the singular MatchesHash, MatchesAnyHash considers unsupported hash +// formats as successfully non-matching, rather than returning an error. +func (cp *CachedProvider) MatchesAnyHash(allowed []getproviders.Hash) (bool, error) { + return getproviders.PackageMatchesAnyHash(cp.PackageLocation(), allowed) +} + +// HashV1 computes a hash of the contents of the package directory associated +// with the receiving cached provider using hash algorithm 1. +// +// The hash covers the paths to files in the directory and the contents of +// those files. It does not cover other metadata about the files, such as +// permissions. +// +// This function is named "HashV1" in anticipation of other hashing algorithms +// being added (in a backward-compatible way) in future. The result from +// HashV1 always begins with the prefix "h1:" so that callers can distinguish +// the results of potentially multiple different hash algorithms in future. +func (cp *CachedProvider) HashV1() (getproviders.Hash, error) { + return getproviders.PackageHashV1(cp.PackageLocation()) +} + +// ExecutableFile inspects the cached provider's unpacked package directory for +// something that looks like it's intended to be the executable file for the +// plugin. +// +// This is a bit messy and heuristic-y because historically Terraform used the +// filename itself for local filesystem discovery, allowing some variance in +// the filenames to capture extra metadata, whereas now we're using the +// directory structure leading to the executable instead but need to remain +// compatible with the executable names bundled into existing provider packages. +// +// It will return an error if it can't find a file following the expected +// convention in the given directory. +// +// If found, the path always uses slashes as path separators, even on Windows, +// so that the results are consistent between platforms. Windows accepts both +// slashes and backslashes as long as the separators are consistent within a +// particular path string. +func (cp *CachedProvider) ExecutableFile() (string, error) { + infos, err := os.ReadDir(cp.PackageDir) + if err != nil { + // If the directory itself doesn't exist or isn't readable then we + // can't access an executable in it. + return "", fmt.Errorf("could not read package directory: %w", err) + } + + // For a provider named e.g. tf.example.com/awesomecorp/happycloud, we + // expect an executable file whose name starts with + // "terraform-provider-happycloud", followed by zero or more additional + // characters. If there _are_ additional characters then the first one + // must be an underscore or a period, like in thse examples: + // - terraform-provider-happycloud_v1.0.0 + // - terraform-provider-happycloud.exe + // + // We don't require the version in the filename to match because the + // executable's name is no longer authoritative, but packages of "official" + // providers may continue to use versioned executable names for backward + // compatibility with Terraform 0.12. + // + // We also presume that providers packaged for Windows will include the + // necessary .exe extension on their filenames but do not explicitly check + // for that. If there's a provider package for Windows that has a file + // without that suffix then it will be detected as an executable but then + // we'll presumably fail later trying to run it. + wantPrefix := "terraform-provider-" + cp.Provider.Type + + // We'll visit all of the directory entries and take the first (in + // name-lexical order) that looks like a plausible provider executable + // name. A package with multiple files meeting these criteria is degenerate + // but we will tolerate it by ignoring the subsequent entries. + for _, info := range infos { + if info.IsDir() { + continue // A directory can never be an executable + } + name := info.Name() + if !strings.HasPrefix(name, wantPrefix) { + continue + } + remainder := name[len(wantPrefix):] + if len(remainder) > 0 && (remainder[0] != '_' && remainder[0] != '.') { + continue // subsequent characters must be delimited by _ or . + } + return filepath.ToSlash(filepath.Join(cp.PackageDir, name)), nil + } + + return "", fmt.Errorf("could not find executable file starting with %s", wantPrefix) +} diff --git a/pkg/providercache/cached_provider_test.go b/pkg/providercache/cached_provider_test.go new file mode 100644 index 00000000000..2b3ec7296bb --- /dev/null +++ b/pkg/providercache/cached_provider_test.go @@ -0,0 +1,118 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package providercache + +import ( + "testing" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/getproviders" +) + +func TestCachedProviderHash(t *testing.T) { + cp := &CachedProvider{ + Provider: addrs.NewProvider( + addrs.DefaultProviderRegistryHost, + "hashicorp", "null", + ), + Version: getproviders.MustParseVersion("2.0.0"), + + PackageDir: "testdata/cachedir/registry.opentofu.org/hashicorp/null/2.0.0/darwin_amd64", + } + + want := getproviders.MustParseHash("h1:qjsREM4DqEWECD43FcPqddZ9oxCG+IaMTxvWPciS05g=") + got, err := cp.Hash() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if got != want { + t.Errorf("wrong Hash result\ngot: %s\nwant: %s", got, want) + } + + gotMatches, err := cp.MatchesHash(want) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if wantMatches := true; gotMatches != wantMatches { + t.Errorf("wrong MatchesHash result\ngot: %#v\nwant: %#v", gotMatches, wantMatches) + } + + // The windows build has a different hash because its executable filename + // has a .exe suffix, but the darwin build (hashed above) does not. + cp2 := &CachedProvider{ + Provider: addrs.NewProvider( + addrs.DefaultProviderRegistryHost, + "hashicorp", "null", + ), + Version: getproviders.MustParseVersion("2.0.0"), + + PackageDir: "testdata/cachedir/registry.opentofu.org/hashicorp/null/2.0.0/windows_amd64", + } + gotMatches, err = cp2.MatchesHash(want) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if wantMatches := false; gotMatches != wantMatches { + t.Errorf("wrong MatchesHash result for other package\ngot: %#v\nwant: %#v", gotMatches, wantMatches) + } + +} + +func TestExecutableFile(t *testing.T) { + testCases := map[string]struct { + cp *CachedProvider + file string + err string + }{ + "linux": { + cp: &CachedProvider{ + Provider: addrs.NewProvider(addrs.DefaultProviderRegistryHost, "hashicorp", "null"), + Version: getproviders.MustParseVersion("2.0.0"), + PackageDir: "testdata/cachedir/registry.opentofu.org/hashicorp/null/2.0.0/linux_amd64", + }, + file: "testdata/cachedir/registry.opentofu.org/hashicorp/null/2.0.0/linux_amd64/terraform-provider-null", + }, + "windows": { + cp: &CachedProvider{ + Provider: addrs.NewProvider(addrs.DefaultProviderRegistryHost, "hashicorp", "null"), + Version: getproviders.MustParseVersion("2.0.0"), + PackageDir: "testdata/cachedir/registry.opentofu.org/hashicorp/null/2.0.0/windows_amd64", + }, + file: "testdata/cachedir/registry.opentofu.org/hashicorp/null/2.0.0/windows_amd64/terraform-provider-null.exe", + }, + "missing-executable": { + cp: &CachedProvider{ + Provider: addrs.NewProvider(addrs.DefaultProviderRegistryHost, "missing", "executable"), + Version: getproviders.MustParseVersion("2.0.0"), + PackageDir: "testdata/cachedir/registry.opentofu.org/missing/executable/2.0.0/linux_amd64", + }, + err: "could not find executable file starting with terraform-provider-executable", + }, + "missing-dir": { + cp: &CachedProvider{ + Provider: addrs.NewProvider(addrs.DefaultProviderRegistryHost, "missing", "packagedir"), + Version: getproviders.MustParseVersion("2.0.0"), + PackageDir: "testdata/cachedir/registry.opentofu.org/missing/packagedir/2.0.0/linux_amd64", + }, + err: "could not read package directory: open testdata/cachedir/registry.opentofu.org/missing/packagedir/2.0.0/linux_amd64: no such file or directory", + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + file, err := tc.cp.ExecutableFile() + if file != tc.file { + t.Errorf("wrong file\n got: %q\nwant: %q", file, tc.file) + } + if err == nil && tc.err != "" { + t.Fatalf("no error returned, want: %q", tc.err) + } else if err != nil && err.Error() != tc.err { + t.Errorf("wrong error\n got: %q\nwant: %q", err, tc.err) + } + }) + } +} diff --git a/pkg/providercache/dir.go b/pkg/providercache/dir.go new file mode 100644 index 00000000000..da51acaa12f --- /dev/null +++ b/pkg/providercache/dir.go @@ -0,0 +1,203 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package providercache + +import ( + "log" + "path/filepath" + "sort" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/getproviders" +) + +// Dir represents a single local filesystem directory containing cached +// provider plugin packages that can be both read from (to find providers to +// use for operations) and written to (during provider installation). +// +// The contents of a cache directory follow the same naming conventions as a +// getproviders.FilesystemMirrorSource, except that the packages are always +// kept in the "unpacked" form (a directory containing the contents of the +// original distribution archive) so that they are ready for direct execution. +// +// A Dir also pays attention only to packages for the current host platform, +// silently ignoring any cached packages for other platforms. +// +// Various Dir methods return values that are technically mutable due to the +// restrictions of the Go typesystem, but callers are not permitted to mutate +// any part of the returned data structures. +type Dir struct { + baseDir string + targetPlatform getproviders.Platform + + // metaCache is a cache of the metadata of relevant packages available in + // the cache directory last time we scanned it. This can be nil to indicate + // that the cache is cold. The cache will be invalidated (set back to nil) + // by any operation that modifies the contents of the cache directory. + // + // We intentionally don't make effort to detect modifications to the + // directory made by other codepaths because the contract for NewDir + // explicitly defines using the same directory for multiple purposes + // as undefined behavior. + metaCache map[addrs.Provider][]CachedProvider +} + +// NewDir creates and returns a new Dir object that will read and write +// provider plugins in the given filesystem directory. +// +// If two instances of Dir are concurrently operating on a particular base +// directory, or if a Dir base directory is also used as a filesystem mirror +// source directory, the behavior is undefined. +func NewDir(baseDir string) *Dir { + return &Dir{ + baseDir: baseDir, + targetPlatform: getproviders.CurrentPlatform, + } +} + +// NewDirWithPlatform is a variant of NewDir that allows selecting a specific +// target platform, rather than taking the current one where this code is +// running. +// +// This is primarily intended for portable unit testing and not particularly +// useful in "real" callers. +func NewDirWithPlatform(baseDir string, platform getproviders.Platform) *Dir { + return &Dir{ + baseDir: baseDir, + targetPlatform: platform, + } +} + +// BasePath returns the filesystem path of the base directory of this +// cache directory. +func (d *Dir) BasePath() string { + return filepath.Clean(d.baseDir) +} + +// AllAvailablePackages returns a description of all of the packages already +// present in the directory. The cache entries are grouped by the provider +// they relate to and then sorted by version precedence, with highest +// precedence first. +// +// This function will return an empty result both when the directory is empty +// and when scanning the directory produces an error. +// +// The caller is forbidden from modifying the returned data structure in any +// way, even though the Go type system permits it. +func (d *Dir) AllAvailablePackages() map[addrs.Provider][]CachedProvider { + if err := d.fillMetaCache(); err != nil { + log.Printf("[WARN] Failed to scan provider cache directory %s: %s", d.baseDir, err) + return nil + } + + return d.metaCache +} + +// ProviderVersion returns the cache entry for the requested provider version, +// or nil if the requested provider version isn't present in the cache. +func (d *Dir) ProviderVersion(provider addrs.Provider, version getproviders.Version) *CachedProvider { + if err := d.fillMetaCache(); err != nil { + return nil + } + + for _, entry := range d.metaCache[provider] { + // We're intentionally comparing exact version here, so if either + // version number contains build metadata and they don't match then + // this will not return true. The rule of ignoring build metadata + // applies only for handling version _constraints_ and for deciding + // version precedence. + if entry.Version == version { + return &entry + } + } + + return nil +} + +// ProviderLatestVersion returns the cache entry for the latest +// version of the requested provider already available in the cache, or nil if +// there are no versions of that provider available. +func (d *Dir) ProviderLatestVersion(provider addrs.Provider) *CachedProvider { + if err := d.fillMetaCache(); err != nil { + return nil + } + + entries := d.metaCache[provider] + if len(entries) == 0 { + return nil + } + + return &entries[0] +} + +func (d *Dir) fillMetaCache() error { + // For d.metaCache we consider nil to be different than a non-nil empty + // map, so we can distinguish between having scanned and got an empty + // result vs. not having scanned successfully at all yet. + if d.metaCache != nil { + log.Printf("[TRACE] providercache.fillMetaCache: using cached result from previous scan of %s", d.baseDir) + return nil + } + log.Printf("[TRACE] providercache.fillMetaCache: scanning directory %s", d.baseDir) + + allData, err := getproviders.SearchLocalDirectory(d.baseDir) + if err != nil { + log.Printf("[TRACE] providercache.fillMetaCache: error while scanning directory %s: %s", d.baseDir, err) + return err + } + + // The getproviders package just returns everything it found, but we're + // interested only in a subset of the results: + // - those that are for the current platform + // - those that are in the "unpacked" form, ready to execute + // ...so we'll filter in these ways while we're constructing our final + // map to save as the cache. + // + // We intentionally always make a non-nil map, even if it might ultimately + // be empty, because we use that to recognize that the cache is populated. + data := make(map[addrs.Provider][]CachedProvider) + + for providerAddr, metas := range allData { + for _, meta := range metas { + if meta.TargetPlatform != d.targetPlatform { + log.Printf("[TRACE] providercache.fillMetaCache: ignoring %s because it is for %s, not %s", meta.Location, meta.TargetPlatform, d.targetPlatform) + continue + } + if _, ok := meta.Location.(getproviders.PackageLocalDir); !ok { + // PackageLocalDir indicates an unpacked provider package ready + // to execute. + log.Printf("[TRACE] providercache.fillMetaCache: ignoring %s because it is not an unpacked directory", meta.Location) + continue + } + + packageDir := filepath.Clean(string(meta.Location.(getproviders.PackageLocalDir))) + + log.Printf("[TRACE] providercache.fillMetaCache: including %s as a candidate package for %s %s", meta.Location, providerAddr, meta.Version) + data[providerAddr] = append(data[providerAddr], CachedProvider{ + Provider: providerAddr, + Version: meta.Version, + PackageDir: filepath.ToSlash(packageDir), + }) + } + } + + // After we've built our lists per provider, we'll also sort them by + // version precedence so that the newest available version is always at + // index zero. If there are two versions that differ only in build metadata + // then it's undefined but deterministic which one we will select here, + // because we're preserving the order returned by SearchLocalDirectory + // in that case.. + for _, entries := range data { + sort.SliceStable(entries, func(i, j int) bool { + // We're using GreaterThan rather than LessThan here because we + // want these in _decreasing_ order of precedence. + return entries[i].Version.GreaterThan(entries[j].Version) + }) + } + + d.metaCache = data + return nil +} diff --git a/pkg/providercache/dir_modify.go b/pkg/providercache/dir_modify.go new file mode 100644 index 00000000000..0e3a90135c1 --- /dev/null +++ b/pkg/providercache/dir_modify.go @@ -0,0 +1,112 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package providercache + +import ( + "context" + "fmt" + "log" + + "github.com/kubegems/opentofu/pkg/getproviders" +) + +// InstallPackage takes a metadata object describing a package available for +// installation, retrieves that package, and installs it into the receiving +// cache directory. +// +// If the allowedHashes set has non-zero length then at least one of the hashes +// in the set must match the package that "entry" refers to. If none of the +// hashes match then the returned error message assumes that the hashes came +// from a lock file. +func (d *Dir) InstallPackage(ctx context.Context, meta getproviders.PackageMeta, allowedHashes []getproviders.Hash) (*getproviders.PackageAuthenticationResult, error) { + if meta.TargetPlatform != d.targetPlatform { + return nil, fmt.Errorf("can't install %s package into cache directory expecting %s", meta.TargetPlatform, d.targetPlatform) + } + newPath := getproviders.UnpackedDirectoryPathForPackage( + d.baseDir, meta.Provider, meta.Version, d.targetPlatform, + ) + + // Invalidate our metaCache so that subsequent read calls will re-scan to + // incorporate any changes we make here. + d.metaCache = nil + + log.Printf("[TRACE] providercache.Dir.InstallPackage: installing %s v%s from %s", meta.Provider, meta.Version, meta.Location) + switch meta.Location.(type) { + case getproviders.PackageHTTPURL: + return installFromHTTPURL(ctx, meta, newPath, allowedHashes) + case getproviders.PackageLocalArchive: + return installFromLocalArchive(ctx, meta, newPath, allowedHashes) + case getproviders.PackageLocalDir: + return installFromLocalDir(ctx, meta, newPath, allowedHashes) + default: + // Should not get here, because the above should be exhaustive for + // all implementations of getproviders.Location. + return nil, fmt.Errorf("don't know how to install from a %T location", meta.Location) + } +} + +// LinkFromOtherCache takes a CachedProvider value produced from another Dir +// and links it into the cache represented by the receiver Dir. +// +// This is used to implement tiered caching, where new providers are first +// populated into a system-wide shared cache and then linked from there into +// a configuration-specific local cache. +// +// It's invalid to link a CachedProvider from a particular Dir into that same +// Dir, because that would otherwise potentially replace a real package +// directory with a circular link back to itself. +// +// If the allowedHashes set has non-zero length then at least one of the hashes +// in the set must match the package that "entry" refers to. If none of the +// hashes match then the returned error message assumes that the hashes came +// from a lock file. +func (d *Dir) LinkFromOtherCache(entry *CachedProvider, allowedHashes []getproviders.Hash) error { + if len(allowedHashes) > 0 { + if matches, err := entry.MatchesAnyHash(allowedHashes); err != nil { + return fmt.Errorf( + "failed to calculate checksum for cached copy of %s %s in %s: %w", + entry.Provider, entry.Version, d.baseDir, err, + ) + } else if !matches { + return fmt.Errorf( + "the provider cache at %s has a copy of %s %s that doesn't match any of the checksums recorded in the dependency lock file", + d.baseDir, entry.Provider, entry.Version, + ) + } + } + + newPath := getproviders.UnpackedDirectoryPathForPackage( + d.baseDir, entry.Provider, entry.Version, d.targetPlatform, + ) + currentPath := entry.PackageDir + log.Printf("[TRACE] providercache.Dir.LinkFromOtherCache: linking %s v%s from existing cache %s to %s", entry.Provider, entry.Version, currentPath, newPath) + + // Invalidate our metaCache so that subsequent read calls will re-scan to + // incorporate any changes we make here. + d.metaCache = nil + + // We re-use the process of installing from a local directory here, because + // the two operations are fundamentally the same: symlink if possible, + // deep-copy otherwise. + meta := getproviders.PackageMeta{ + Provider: entry.Provider, + Version: entry.Version, + + // FIXME: How do we populate this? + ProtocolVersions: nil, + TargetPlatform: d.targetPlatform, + + // Because this is already unpacked, the filename is synthetic + // based on the standard naming scheme. + Filename: fmt.Sprintf("terraform-provider-%s_%s_%s.zip", + entry.Provider.Type, entry.Version, d.targetPlatform), + Location: getproviders.PackageLocalDir(currentPath), + } + // No further hash check here because we already checked the hash + // of the source directory above. + _, err := installFromLocalDir(context.TODO(), meta, newPath, nil) + return err +} diff --git a/pkg/providercache/dir_modify_test.go b/pkg/providercache/dir_modify_test.go new file mode 100644 index 00000000000..3389ef988e3 --- /dev/null +++ b/pkg/providercache/dir_modify_test.go @@ -0,0 +1,148 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package providercache + +import ( + "context" + "path/filepath" + "testing" + + "github.com/apparentlymart/go-versions/versions" + "github.com/google/go-cmp/cmp" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/getproviders" +) + +func TestInstallPackage(t *testing.T) { + tmpDirPath, err := filepath.EvalSymlinks(t.TempDir()) + if err != nil { + t.Fatal(err) + } + + linuxPlatform := getproviders.Platform{ + OS: "linux", + Arch: "amd64", + } + nullProvider := addrs.NewProvider( + addrs.DefaultProviderRegistryHost, "hashicorp", "null", + ) + + tmpDir := NewDirWithPlatform(tmpDirPath, linuxPlatform) + + meta := getproviders.PackageMeta{ + Provider: nullProvider, + Version: versions.MustParseVersion("2.1.0"), + + ProtocolVersions: getproviders.VersionList{versions.MustParseVersion("5.0.0")}, + TargetPlatform: linuxPlatform, + + Filename: "provider-null_2.1.0_linux_amd64.zip", + Location: getproviders.PackageLocalArchive("testdata/provider-null_2.1.0_linux_amd64.zip"), + } + + result, err := tmpDir.InstallPackage(context.TODO(), meta, nil) + if err != nil { + t.Fatalf("InstallPackage failed: %s", err) + } + if result != nil { + t.Errorf("unexpected result %#v, wanted nil", result) + } + + // Now we should see the same version reflected in the temporary directory. + got := tmpDir.AllAvailablePackages() + want := map[addrs.Provider][]CachedProvider{ + nullProvider: { + CachedProvider{ + Provider: nullProvider, + + Version: versions.MustParseVersion("2.1.0"), + + PackageDir: tmpDirPath + "/registry.opentofu.org/hashicorp/null/2.1.0/linux_amd64", + }, + }, + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong cache contents after install\n%s", diff) + } +} + +func TestLinkFromOtherCache(t *testing.T) { + srcDirPath := "testdata/cachedir" + tmpDirPath, err := filepath.EvalSymlinks(t.TempDir()) + if err != nil { + t.Fatal(err) + } + + windowsPlatform := getproviders.Platform{ + OS: "windows", + Arch: "amd64", + } + nullProvider := addrs.NewProvider( + addrs.DefaultProviderRegistryHost, "hashicorp", "null", + ) + + srcDir := NewDirWithPlatform(srcDirPath, windowsPlatform) + tmpDir := NewDirWithPlatform(tmpDirPath, windowsPlatform) + + // First we'll check our preconditions: srcDir should have only the + // null provider version 2.0.0 in it, because we're faking that we're on + // Windows, and tmpDir should have no providers in it at all. + + gotSrcInitial := srcDir.AllAvailablePackages() + wantSrcInitial := map[addrs.Provider][]CachedProvider{ + nullProvider: { + CachedProvider{ + Provider: nullProvider, + + // We want 2.0.0 rather than 2.1.0 because the 2.1.0 package is + // still packed and thus not considered to be a cache member. + Version: versions.MustParseVersion("2.0.0"), + + PackageDir: "testdata/cachedir/registry.opentofu.org/hashicorp/null/2.0.0/windows_amd64", + }, + }, + } + if diff := cmp.Diff(wantSrcInitial, gotSrcInitial); diff != "" { + t.Fatalf("incorrect initial source directory contents\n%s", diff) + } + + gotTmpInitial := tmpDir.AllAvailablePackages() + wantTmpInitial := map[addrs.Provider][]CachedProvider{} + if diff := cmp.Diff(wantTmpInitial, gotTmpInitial); diff != "" { + t.Fatalf("incorrect initial temp directory contents\n%s", diff) + } + + cacheEntry := srcDir.ProviderLatestVersion(nullProvider) + if cacheEntry == nil { + // This is weird because we just checked for the presence of this above + t.Fatalf("null provider has no latest version in source directory") + } + + err = tmpDir.LinkFromOtherCache(cacheEntry, nil) + if err != nil { + t.Fatalf("LinkFromOtherCache failed: %s", err) + } + + // Now we should see the same version reflected in the temporary directory. + got := tmpDir.AllAvailablePackages() + want := map[addrs.Provider][]CachedProvider{ + nullProvider: { + CachedProvider{ + Provider: nullProvider, + + // We want 2.0.0 rather than 2.1.0 because the 2.1.0 package is + // still packed and thus not considered to be a cache member. + Version: versions.MustParseVersion("2.0.0"), + + PackageDir: tmpDirPath + "/registry.opentofu.org/hashicorp/null/2.0.0/windows_amd64", + }, + }, + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong cache contents after link\n%s", diff) + } +} diff --git a/pkg/providercache/dir_test.go b/pkg/providercache/dir_test.go new file mode 100644 index 00000000000..2ebf9d2ba38 --- /dev/null +++ b/pkg/providercache/dir_test.go @@ -0,0 +1,189 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package providercache + +import ( + "testing" + + "github.com/apparentlymart/go-versions/versions" + "github.com/google/go-cmp/cmp" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/getproviders" +) + +func TestDirReading(t *testing.T) { + testDir := "testdata/cachedir" + + // We'll force using particular platforms for unit testing purposes, + // so that we'll get consistent results on all platforms. + windowsPlatform := getproviders.Platform{ // only null 2.0.0 is cached + OS: "windows", + Arch: "amd64", + } + linuxPlatform := getproviders.Platform{ // various provider versions are cached + OS: "linux", + Arch: "amd64", + } + + nullProvider := addrs.NewProvider( + addrs.DefaultProviderRegistryHost, "hashicorp", "null", + ) + randomProvider := addrs.NewProvider( + addrs.DefaultProviderRegistryHost, "hashicorp", "random", + ) + randomBetaProvider := addrs.NewProvider( + addrs.DefaultProviderRegistryHost, "hashicorp", "random-beta", + ) + nonExistProvider := addrs.NewProvider( + addrs.DefaultProviderRegistryHost, "bloop", "nonexist", + ) + legacyProvider := addrs.NewLegacyProvider("legacy") + missingExecutableProvider := addrs.NewProvider( + addrs.DefaultProviderRegistryHost, "missing", "executable", + ) + + t.Run("ProviderLatestVersion", func(t *testing.T) { + t.Run("exists", func(t *testing.T) { + dir := NewDirWithPlatform(testDir, windowsPlatform) + + got := dir.ProviderLatestVersion(nullProvider) + want := &CachedProvider{ + Provider: nullProvider, + + // We want 2.0.0 rather than 2.1.0 because the 2.1.0 package is + // still packed and thus not considered to be a cache member. + Version: versions.MustParseVersion("2.0.0"), + + PackageDir: "testdata/cachedir/registry.opentofu.org/hashicorp/null/2.0.0/windows_amd64", + } + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("no package for current platform", func(t *testing.T) { + dir := NewDirWithPlatform(testDir, windowsPlatform) + + // random provider is only cached for linux_amd64 in our fixtures dir + got := dir.ProviderLatestVersion(randomProvider) + var want *CachedProvider + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("no versions available at all", func(t *testing.T) { + dir := NewDirWithPlatform(testDir, windowsPlatform) + + // nonexist provider is not present in our fixtures dir at all + got := dir.ProviderLatestVersion(nonExistProvider) + var want *CachedProvider + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + }) + + t.Run("ProviderVersion", func(t *testing.T) { + t.Run("exists", func(t *testing.T) { + dir := NewDirWithPlatform(testDir, windowsPlatform) + + got := dir.ProviderVersion(nullProvider, versions.MustParseVersion("2.0.0")) + want := &CachedProvider{ + Provider: nullProvider, + Version: versions.MustParseVersion("2.0.0"), + + PackageDir: "testdata/cachedir/registry.opentofu.org/hashicorp/null/2.0.0/windows_amd64", + } + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("specified version is not cached", func(t *testing.T) { + dir := NewDirWithPlatform(testDir, windowsPlatform) + + // there is no v5.0.0 package in our fixtures dir + got := dir.ProviderVersion(nullProvider, versions.MustParseVersion("5.0.0")) + var want *CachedProvider + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("no package for current platform", func(t *testing.T) { + dir := NewDirWithPlatform(testDir, windowsPlatform) + + // random provider 1.2.0 is only cached for linux_amd64 in our fixtures dir + got := dir.ProviderVersion(randomProvider, versions.MustParseVersion("1.2.0")) + var want *CachedProvider + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + t.Run("no versions available at all", func(t *testing.T) { + dir := NewDirWithPlatform(testDir, windowsPlatform) + + // nonexist provider is not present in our fixtures dir at all + got := dir.ProviderVersion(nonExistProvider, versions.MustParseVersion("1.0.0")) + var want *CachedProvider + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) + }) + + t.Run("AllAvailablePackages", func(t *testing.T) { + dir := NewDirWithPlatform(testDir, linuxPlatform) + + got := dir.AllAvailablePackages() + want := map[addrs.Provider][]CachedProvider{ + legacyProvider: { + { + Provider: legacyProvider, + Version: versions.MustParseVersion("1.0.0"), + PackageDir: "testdata/cachedir/registry.opentofu.org/-/legacy/1.0.0/linux_amd64", + }, + }, + nullProvider: { + { + Provider: nullProvider, + Version: versions.MustParseVersion("2.0.0"), + PackageDir: "testdata/cachedir/registry.opentofu.org/hashicorp/null/2.0.0/linux_amd64", + }, + }, + randomProvider: { + { + Provider: randomProvider, + Version: versions.MustParseVersion("1.2.0"), + PackageDir: "testdata/cachedir/registry.opentofu.org/hashicorp/random/1.2.0/linux_amd64", + }, + }, + randomBetaProvider: { + { + Provider: randomBetaProvider, + Version: versions.MustParseVersion("1.2.0"), + PackageDir: "testdata/cachedir/registry.opentofu.org/hashicorp/random-beta/1.2.0/linux_amd64", + }, + }, + missingExecutableProvider: { + { + Provider: missingExecutableProvider, + Version: versions.MustParseVersion("2.0.0"), + PackageDir: "testdata/cachedir/registry.opentofu.org/missing/executable/2.0.0/linux_amd64", + }, + }, + } + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + }) +} diff --git a/pkg/providercache/doc.go b/pkg/providercache/doc.go new file mode 100644 index 00000000000..a9341c8ed6a --- /dev/null +++ b/pkg/providercache/doc.go @@ -0,0 +1,15 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package providercache contains the logic for auto-installing providers from +// packages obtained elsewhere, and for managing the local directories that +// serve as global or single-configuration caches of those auto-installed +// providers. +// +// It builds on the lower-level provider source functionality provided by +// the internal/getproviders package, adding the additional behaviors around +// obtaining the discovered providers and placing them in the cache +// directories for subsequent use. +package providercache diff --git a/pkg/providercache/installer.go b/pkg/providercache/installer.go new file mode 100644 index 00000000000..79b3d6b1c6d --- /dev/null +++ b/pkg/providercache/installer.go @@ -0,0 +1,794 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package providercache + +import ( + "context" + "fmt" + "log" + "sort" + "strings" + + "github.com/apparentlymart/go-versions/versions" + + "github.com/kubegems/opentofu/pkg/addrs" + copydir "github.com/kubegems/opentofu/pkg/copy" + "github.com/kubegems/opentofu/pkg/depsfile" + "github.com/kubegems/opentofu/pkg/getproviders" +) + +// Installer is the main type in this package, representing a provider installer +// with a particular configuration-specific cache directory and an optional +// global cache directory. +type Installer struct { + // targetDir is the cache directory we're ultimately aiming to get the + // requested providers installed into. + targetDir *Dir + + // source is the provider source that the installer will use to discover + // what provider versions are available for installation and to + // find the source locations for any versions that are not already + // available via one of the cache directories. + source getproviders.Source + + // globalCacheDir is an optional additional directory that will, if + // provided, be treated as a read-through cache when retrieving new + // provider versions. That is, new packages are fetched into this + // directory first and then linked into targetDir, which allows sharing + // both the disk space and the download time for a particular provider + // version between different configurations on the same system. + globalCacheDir *Dir + + // globalCacheDirMayBreakDependencyLockFile allows a temporary exception to + // the rule that an entry in globalCacheDir can normally only be used if + // its validity is already confirmed by an entry in the dependency lock + // file. + globalCacheDirMayBreakDependencyLockFile bool + + // builtInProviderTypes is an optional set of types that should be + // considered valid to appear in the special terraform.io/builtin/... + // namespace, which we use for providers that are built in to OpenTofu + // and thus do not need any separate installation step. + builtInProviderTypes []string + + // unmanagedProviderTypes is a set of provider addresses that should be + // considered implemented, but that OpenTofu does not manage the + // lifecycle for, and therefore does not need to worry about the + // installation of. + unmanagedProviderTypes map[addrs.Provider]struct{} +} + +// NewInstaller constructs and returns a new installer with the given target +// directory and provider source. +// +// A newly-created installer does not have a global cache directory configured, +// but a caller can make a follow-up call to SetGlobalCacheDir to provide +// one prior to taking any installation actions. +// +// The target directory MUST NOT also be an input consulted by the given source, +// or the result is undefined. +func NewInstaller(targetDir *Dir, source getproviders.Source) *Installer { + return &Installer{ + targetDir: targetDir, + source: source, + } +} + +// Clone returns a new Installer which has the a new target directory but +// the same optional global cache directory, the same installation sources, +// and the same built-in/unmanaged providers. The result can be mutated further +// using the various setter methods without affecting the original. +func (i *Installer) Clone(targetDir *Dir) *Installer { + // For now all of our setter methods just overwrite field values in + // their entirety, rather than mutating things on the other side of + // the shared pointers, and so we can safely just shallow-copy the + // root. We might need to be more careful here if in future we add + // methods that allow deeper mutations through the stored pointers. + ret := *i + ret.targetDir = targetDir + return &ret +} + +// ProviderSource returns the getproviders.Source that the installer would +// use for installing any new providers. +func (i *Installer) ProviderSource() getproviders.Source { + return i.source +} + +// SetGlobalCacheDir activates a second tier of caching for the receiving +// installer, with the given directory used as a read-through cache for +// installation operations that need to retrieve new packages. +// +// The global cache directory for an installer must never be the same as its +// target directory, and must not be used as one of its provider sources. +// If these overlap then undefined behavior will result. +func (i *Installer) SetGlobalCacheDir(cacheDir *Dir) { + // A little safety check to catch straightforward mistakes where the + // directories overlap. Better to panic early than to do + // possibly-distructive actions on the cache directory downstream. + if same, err := copydir.SameFile(i.targetDir.baseDir, cacheDir.baseDir); err == nil && same { + panic(fmt.Sprintf("global cache directory %s must not match the installation target directory %s", cacheDir.baseDir, i.targetDir.baseDir)) + } + i.globalCacheDir = cacheDir +} + +// SetGlobalCacheDirMayBreakDependencyLockFile activates or deactivates our +// temporary exception to the rule that the global cache directory can be used +// only when entries are confirmed by existing entries in the dependency lock +// file. +// +// If this is set then if we install a provider for the first time from the +// cache then the dependency lock file will include only the checksum from +// the package in the global cache, which means the lock file won't be portable +// to OpenTofu running on another operating system or CPU architecture. +func (i *Installer) SetGlobalCacheDirMayBreakDependencyLockFile(mayBreak bool) { + i.globalCacheDirMayBreakDependencyLockFile = mayBreak +} + +// HasGlobalCacheDir returns true if someone has previously called +// SetGlobalCacheDir to configure a global cache directory for this installer. +func (i *Installer) HasGlobalCacheDir() bool { + return i.globalCacheDir != nil +} + +// SetBuiltInProviderTypes tells the receiver to consider the type names in the +// given slice to be valid as providers in the special special +// terraform.io/builtin/... namespace that we use for providers that are +// built in to OpenTofu and thus do not need a separate installation step. +// +// If a caller requests installation of a provider in that namespace, the +// installer will treat it as a no-op if its name exists in this list, but +// will produce an error if it does not. +// +// The default, if this method isn't called, is for there to be no valid +// builtin providers. +// +// Do not modify the buffer under the given slice after passing it to this +// method. +func (i *Installer) SetBuiltInProviderTypes(types []string) { + i.builtInProviderTypes = types +} + +// SetUnmanagedProviderTypes tells the receiver to consider the providers +// indicated by the passed addrs.Providers as unmanaged. OpenTofu does not +// need to control the lifecycle of these providers, and they are assumed to be +// running already when OpenTofu is started. Because these are essentially +// processes, not binaries, OpenTofu will not do any work to ensure presence +// or versioning of these binaries. +func (i *Installer) SetUnmanagedProviderTypes(types map[addrs.Provider]struct{}) { + i.unmanagedProviderTypes = types +} + +// EnsureProviderVersions compares the given provider requirements with what +// is already available in the installer's target directory and then takes +// appropriate installation actions to ensure that suitable packages +// are available in the target cache directory. +// +// The given mode modifies how the operation will treat providers that already +// have acceptable versions available in the target cache directory. See the +// documentation for InstallMode and the InstallMode values for more +// information. +// +// The given context can be used to cancel the overall installation operation +// (causing any operations in progress to fail with an error), and can also +// include an InstallerEvents value for optional intermediate progress +// notifications. +// +// If a given InstallerEvents subscribes to notifications about installation +// failures then those notifications will be redundant with the ones included +// in the final returned error value so callers should show either one or the +// other, and not both. +func (i *Installer) EnsureProviderVersions(ctx context.Context, locks *depsfile.Locks, reqs getproviders.Requirements, mode InstallMode) (*depsfile.Locks, error) { + errs := map[addrs.Provider]error{} + evts := installerEventsForContext(ctx) + + // We'll work with a copy of the given locks, so we can modify it and + // return the updated locks without affecting the caller's object. + // We'll add, replace, or remove locks in here during our work so that the + // final locks file reflects what the installer has selected. + locks = locks.DeepCopy() + + if cb := evts.PendingProviders; cb != nil { + cb(reqs) + } + + // Step 1: Which providers might we need to fetch a new version of? + // This produces the subset of requirements we need to ask the provider + // source about. If we're in the normal (non-upgrade) mode then we'll + // just ask the source to confirm the continued existence of what + // was locked, or otherwise we'll find the newest version matching the + // configured version constraint. + mightNeed := map[addrs.Provider]getproviders.VersionSet{} + locked := map[addrs.Provider]bool{} + for provider, versionConstraints := range reqs { + if provider.IsBuiltIn() { + // Built in providers do not require installation but we'll still + // verify that the requested provider name is valid. + valid := false + for _, name := range i.builtInProviderTypes { + if name == provider.Type { + valid = true + break + } + } + var err error + if valid { + if len(versionConstraints) == 0 { + // Other than reporting an event for the outcome of this + // provider, we'll do nothing else with it: it's just + // automatically available for use. + if cb := evts.BuiltInProviderAvailable; cb != nil { + cb(provider) + } + } else { + // A built-in provider is not permitted to have an explicit + // version constraint, because we can only use the version + // that is built in to the current OpenTofu release. + err = fmt.Errorf("built-in providers do not support explicit version constraints") + } + } else { + err = fmt.Errorf("this OpenTofu release has no built-in provider named %q", provider.Type) + } + if err != nil { + errs[provider] = err + if cb := evts.BuiltInProviderFailure; cb != nil { + cb(provider, err) + } + } + continue + } + if _, ok := i.unmanagedProviderTypes[provider]; ok { + // unmanaged providers do not require installation + continue + } + acceptableVersions := versions.MeetingConstraints(versionConstraints) + if !mode.forceQueryAllProviders() { + // If we're not forcing potential changes of version then an + // existing selection from the lock file takes priority over + // the currently-configured version constraints. + if lock := locks.Provider(provider); lock != nil { + if !acceptableVersions.Has(lock.Version()) { + err := fmt.Errorf( + "locked provider %s %s does not match configured version constraint %s; must use tofu init -upgrade to allow selection of new versions", + provider, lock.Version(), getproviders.VersionConstraintsString(versionConstraints), + ) + errs[provider] = err + // This is a funny case where we're returning an error + // before we do any querying at all. To keep the event + // stream consistent without introducing an extra event + // type, we'll emit an artificial QueryPackagesBegin for + // this provider before we indicate that it failed using + // QueryPackagesFailure. + if cb := evts.QueryPackagesBegin; cb != nil { + cb(provider, versionConstraints, true) + } + if cb := evts.QueryPackagesFailure; cb != nil { + cb(provider, err) + } + continue + } + acceptableVersions = versions.Only(lock.Version()) + locked[provider] = true + } + } + mightNeed[provider] = acceptableVersions + } + + // Step 2: Query the provider source for each of the providers we selected + // in the first step and select the latest available version that is + // in the set of acceptable versions. + // + // This produces a set of packages to install to our cache in the next step. + need := map[addrs.Provider]getproviders.Version{} +NeedProvider: + for provider, acceptableVersions := range mightNeed { + if err := ctx.Err(); err != nil { + // If our context has been cancelled or reached a timeout then + // we'll abort early, because subsequent operations against + // that context will fail immediately anyway. + return nil, err + } + + if cb := evts.QueryPackagesBegin; cb != nil { + cb(provider, reqs[provider], locked[provider]) + } + available, warnings, err := i.source.AvailableVersions(ctx, provider) + if err != nil { + errs[provider] = err + if cb := evts.QueryPackagesFailure; cb != nil { + cb(provider, err) + } + // We will take no further actions for this provider. + continue + } + if len(warnings) > 0 { + if cb := evts.QueryPackagesWarning; cb != nil { + cb(provider, warnings) + } + } + available.Sort() // put the versions in increasing order of precedence + for i := len(available) - 1; i >= 0; i-- { // walk backwards to consider newer versions first + if acceptableVersions.Has(available[i]) { + need[provider] = available[i] + if cb := evts.QueryPackagesSuccess; cb != nil { + cb(provider, available[i]) + } + continue NeedProvider + } + } + // If we get here then the source has no packages that meet the given + // version constraint, which we model as a query error. + if locked[provider] { + // This situation should be a rare one: it suggests that a + // version was previously available but was yanked for some + // reason. + lock := locks.Provider(provider) + err = fmt.Errorf("the previously-selected version %s is no longer available", lock.Version()) + } else { + err = fmt.Errorf("no available releases match the given constraints %s", getproviders.VersionConstraintsString(reqs[provider])) + log.Printf("[DEBUG] %s", err.Error()) + log.Printf("[DEBUG] Available releases: %s", available) + } + errs[provider] = err + if cb := evts.QueryPackagesFailure; cb != nil { + cb(provider, err) + } + } + + // Step 3: For each provider version we've decided we need to install, + // install its package into our target cache (possibly via the global cache). + authResults := map[addrs.Provider]*getproviders.PackageAuthenticationResult{} // record auth results for all successfully fetched providers + targetPlatform := i.targetDir.targetPlatform // we inherit this to behave correctly in unit tests + for provider, version := range need { + if err := ctx.Err(); err != nil { + // If our context has been cancelled or reached a timeout then + // we'll abort early, because subsequent operations against + // that context will fail immediately anyway. + return nil, err + } + + lock := locks.Provider(provider) + var preferredHashes []getproviders.Hash + if lock != nil && lock.Version() == version { // hash changes are expected if the version is also changing + preferredHashes = lock.PreferredHashes() + } + + // If our target directory already has the provider version that fulfills the lock file, carry on + if installed := i.targetDir.ProviderVersion(provider, version); installed != nil { + if len(preferredHashes) > 0 { + if matches, _ := installed.MatchesAnyHash(preferredHashes); matches { + if cb := evts.ProviderAlreadyInstalled; cb != nil { + cb(provider, version) + } + continue + } + } + } + + if i.globalCacheDir != nil { + // Step 3a: If our global cache already has this version available then + // we'll just link it in. + if cached := i.globalCacheDir.ProviderVersion(provider, version); cached != nil { + // An existing cache entry is only an acceptable choice + // if there is already a lock file entry for this provider + // and the cache entry matches its checksums. + // + // If there was no lock file entry at all then we need to + // install the package for real so that we can lock as complete + // as possible a set of checksums for all of this provider's + // packages. + // + // If there was a lock file entry but the cache doesn't match + // it then we assume that the lock file checksums were only + // partially populated (e.g. from a local mirror where we can + // only see one package to checksum it) and so we'll fetch + // from upstream to see if the origin can give us a package + // that _does_ match. This might still not work out, but if + // it does then it allows us to avoid returning a checksum + // mismatch error. + acceptablePackage := false + if len(preferredHashes) != 0 { + var err error + acceptablePackage, err = cached.MatchesAnyHash(preferredHashes) + if err != nil { + // If we can't calculate the checksum for the cached + // package then we'll just treat it as a checksum failure. + acceptablePackage = false + } + } + + if !acceptablePackage && i.globalCacheDirMayBreakDependencyLockFile { + // The "may break dependency lock file" setting effectively + // means that we'll accept any matching package that's + // already in the cache, regardless of whether it matches + // what's in the dependency lock file. + // + // That means two less-ideal situations might occur: + // - If this provider is not currently tracked in the lock + // file at all then after installation the lock file will + // only accept the package that was already present in + // the cache as a valid checksum. That means the generated + // lock file won't be portable to other operating systems + // or CPU architectures. + // - If the provider _is_ currently tracked in the lock file + // but the checksums there don't match what was in the + // cache then the LinkFromOtherCache call below will + // fail with a checksum error, and the user will need to + // either manually remove the entry from the lock file + // or remove the mismatching item from the cache, + // depending on which of these they prefer to use as the + // source of truth for the expected contents of the + // package. + // + // If the lock file already includes this provider and the + // cache entry matches one of the locked checksums then + // there's no problem, but in that case we wouldn't enter + // this branch because acceptablePackage would already be + // true from the check above. + log.Printf( + "[WARN] plugin_cache_may_break_dependency_lock_file: Using global cache dir package for %s v%s even though it doesn't match this configuration's dependency lock file", + provider.String(), version.String(), + ) + acceptablePackage = true + } + + // TODO: Should we emit an event through the events object + // for "there was an entry in the cache but we ignored it + // because the checksum didn't match"? We can't use + // LinkFromCacheFailure in that case because this isn't a + // failure. For now we'll just be quiet about it. + + if acceptablePackage { + if cb := evts.LinkFromCacheBegin; cb != nil { + cb(provider, version, i.globalCacheDir.baseDir) + } + if _, err := cached.ExecutableFile(); err != nil { + err := fmt.Errorf("provider binary not found: %w", err) + errs[provider] = err + if cb := evts.LinkFromCacheFailure; cb != nil { + cb(provider, version, err) + } + continue + } + + err := i.targetDir.LinkFromOtherCache(cached, preferredHashes) + if err != nil { + errs[provider] = err + if cb := evts.LinkFromCacheFailure; cb != nil { + cb(provider, version, err) + } + continue + } + // We'll fetch what we just linked to make sure it actually + // did show up there. + new := i.targetDir.ProviderVersion(provider, version) + if new == nil { + err := fmt.Errorf("after linking %s from provider cache at %s it is still not detected in the target directory; this is a bug in OpenTofu", provider, i.globalCacheDir.baseDir) + errs[provider] = err + if cb := evts.LinkFromCacheFailure; cb != nil { + cb(provider, version, err) + } + continue + } + + // The LinkFromOtherCache call above should've verified that + // the package matches one of the hashes previously recorded, + // if any. We'll now augment those hashes with one freshly + // calculated from the package we just linked, which allows + // the lock file to gradually transition to recording newer hash + // schemes when they become available. + var priorHashes []getproviders.Hash + if lock != nil && lock.Version() == version { + // If the version we're installing is identical to the + // one we previously locked then we'll keep all of the + // hashes we saved previously and add to it. Otherwise + // we'll be starting fresh, because each version has its + // own set of packages and thus its own hashes. + priorHashes = append(priorHashes, preferredHashes...) + + // NOTE: The behavior here is unfortunate when a particular + // provider version was already cached on the first time + // the current configuration requested it, because that + // means we don't currently get the opportunity to fetch + // and verify the checksums for the new package from + // upstream. That's currently unavoidable because upstream + // checksums are in the "ziphash" format and so we can't + // verify them against our cache directory's unpacked + // packages: we'd need to go fetch the package from the + // origin and compare against it, which would defeat the + // purpose of the global cache. + // + // If we fetch from upstream on the first encounter with + // a particular provider then we'll end up in the other + // codepath below where we're able to also include the + // checksums from the origin registry. + } + newHash, err := cached.Hash() + if err != nil { + err := fmt.Errorf("after linking %s from provider cache at %s, failed to compute a checksum for it: %w", provider, i.globalCacheDir.baseDir, err) + errs[provider] = err + if cb := evts.LinkFromCacheFailure; cb != nil { + cb(provider, version, err) + } + continue + } + // The hashes slice gets deduplicated in the lock file + // implementation, so we don't worry about potentially + // creating a duplicate here. + var newHashes []getproviders.Hash + newHashes = append(newHashes, priorHashes...) + newHashes = append(newHashes, newHash) + locks.SetProvider(provider, version, reqs[provider], newHashes) + if cb := evts.ProvidersLockUpdated; cb != nil { + // We want to ensure that newHash and priorHashes are + // sorted. newHash is a single value, so it's definitely + // sorted. priorHashes are pulled from the lock file, so + // are also already sorted. + cb(provider, version, []getproviders.Hash{newHash}, nil, priorHashes) + } + + if cb := evts.LinkFromCacheSuccess; cb != nil { + cb(provider, version, new.PackageDir) + } + continue // Don't need to do full install, then. + } + } + } + + // Step 3b: Get the package metadata for the selected version from our + // provider source. + // + // This is the step where we might detect and report that the provider + // isn't available for the current platform. + if cb := evts.FetchPackageMeta; cb != nil { + cb(provider, version) + } + meta, err := i.source.PackageMeta(ctx, provider, version, targetPlatform) + if err != nil { + errs[provider] = err + if cb := evts.FetchPackageFailure; cb != nil { + cb(provider, version, err) + } + continue + } + + // Step 3c: Retrieve the package indicated by the metadata we received, + // either directly into our target directory or via the global cache + // directory. + if cb := evts.FetchPackageBegin; cb != nil { + cb(provider, version, meta.Location) + } + var installTo, linkTo *Dir + if i.globalCacheDir != nil { + installTo = i.globalCacheDir + linkTo = i.targetDir + } else { + installTo = i.targetDir + linkTo = nil // no linking needed + } + + allowedHashes := preferredHashes + if mode.forceInstallChecksums() { + allowedHashes = []getproviders.Hash{} + } + + authResult, err := installTo.InstallPackage(ctx, meta, allowedHashes) + if err != nil { + // TODO: Consider retrying for certain kinds of error that seem + // likely to be transient. For now, we just treat all errors equally. + errs[provider] = err + if cb := evts.FetchPackageFailure; cb != nil { + cb(provider, version, err) + } + continue + } + new := installTo.ProviderVersion(provider, version) + if new == nil { + err := fmt.Errorf("after installing %s it is still not detected in %s; this is a bug in OpenTofu", provider, installTo.BasePath()) + errs[provider] = err + if cb := evts.FetchPackageFailure; cb != nil { + cb(provider, version, err) + } + continue + } + if _, err := new.ExecutableFile(); err != nil { + err := fmt.Errorf("provider binary not found: %w", err) + errs[provider] = err + if cb := evts.FetchPackageFailure; cb != nil { + cb(provider, version, err) + } + continue + } + if linkTo != nil { + // We skip emitting the "LinkFromCache..." events here because + // it's simpler for the caller to treat them as mutually exclusive. + // We can just subsume the linking step under the "FetchPackage..." + // series here (and that's why we use FetchPackageFailure below). + // We also don't do a hash check here because we already did that + // as part of the installTo.InstallPackage call above. + err := linkTo.LinkFromOtherCache(new, nil) + if err != nil { + errs[provider] = err + if cb := evts.FetchPackageFailure; cb != nil { + cb(provider, version, err) + } + continue + } + + // We should now also find the package in the linkTo dir, which + // gives us the final value of "new" where the path points in to + // the true target directory, rather than possibly the global + // cache directory. + new = linkTo.ProviderVersion(provider, version) + if new == nil { + err := fmt.Errorf("after installing %s it is still not detected in %s; this is a bug in OpenTofu", provider, linkTo.BasePath()) + errs[provider] = err + if cb := evts.FetchPackageFailure; cb != nil { + cb(provider, version, err) + } + continue + } + if _, err := new.ExecutableFile(); err != nil { + err := fmt.Errorf("provider binary not found: %w", err) + errs[provider] = err + if cb := evts.FetchPackageFailure; cb != nil { + cb(provider, version, err) + } + continue + } + } + authResults[provider] = authResult + + // The InstallPackage call above should've verified that + // the package matches one of the hashes previously recorded, + // if any. We'll now augment those hashes with a new set populated + // with the hashes returned by the upstream source and from the + // package we've just installed, which allows the lock file to + // gradually transition to newer hash schemes when they become + // available. + // + // This is assuming that if a package matches both a hash we saw before + // _and_ a new hash then the new hash is a valid substitute for + // the previous hash. + // + // The hashes slice gets deduplicated in the lock file + // implementation, so we don't worry about potentially + // creating duplicates here. + var priorHashes []getproviders.Hash + if lock != nil && lock.Version() == version { + // If the version we're installing is identical to the + // one we previously locked then we'll keep all of the + // hashes we saved previously and add to it. Otherwise + // we'll be starting fresh, because each version has its + // own set of packages and thus its own hashes. + priorHashes = append(priorHashes, preferredHashes...) + } + newHash, err := new.Hash() + if err != nil { + err := fmt.Errorf("after installing %s, failed to compute a checksum for it: %w", provider, err) + errs[provider] = err + if cb := evts.FetchPackageFailure; cb != nil { + cb(provider, version, err) + } + continue + } + + var signedHashes []getproviders.Hash + // For now, we will temporarily trust the hashes returned by the + // installation process that are "SigningSkipped" or "Signed". + // This is only intended to be temporary, see https://github.com/kubegems/opentofu/issues/266 for more information + if authResult.Signed() || authResult.SigningSkipped() { + // We'll trust new hashes from upstream only if they were verified + // as signed by a suitable key or if the signing validation was skipped. + // Otherwise, we'd record only + // a new hash we just calculated ourselves from the bytes on disk, + // and so the hashes would cover only the current platform. + signedHashes = append(signedHashes, meta.AcceptableHashes()...) + } + + var newHashes []getproviders.Hash + newHashes = append(newHashes, newHash) + newHashes = append(newHashes, priorHashes...) + newHashes = append(newHashes, signedHashes...) + + locks.SetProvider(provider, version, reqs[provider], newHashes) + if cb := evts.ProvidersLockUpdated; cb != nil { + // newHash and priorHashes are already sorted. + // But we do need to sort signedHashes so we can reason about it + // sensibly. + sort.Slice(signedHashes, func(i, j int) bool { + return string(signedHashes[i]) < string(signedHashes[j]) + }) + + cb(provider, version, []getproviders.Hash{newHash}, signedHashes, priorHashes) + } + + if cb := evts.FetchPackageSuccess; cb != nil { + cb(provider, version, new.PackageDir, authResult) + } + } + + // Emit final event for fetching if any were successfully fetched + if cb := evts.ProvidersFetched; cb != nil && len(authResults) > 0 { + cb(authResults) + } + + // Finally, if the lock structure contains locks for any providers that + // are no longer needed by this configuration, we'll remove them. This + // is important because we will not have installed those providers + // above and so a lock file still containing them would make the working + // directory invalid: not every provider in the lock file is available + // for use. + for providerAddr := range locks.AllProviders() { + if _, ok := reqs[providerAddr]; !ok { + locks.RemoveProvider(providerAddr) + } + } + + if len(errs) > 0 { + return locks, InstallerError{ + ProviderErrors: errs, + } + } + return locks, nil +} + +// InstallMode customizes the details of how an install operation treats +// providers that have versions already cached in the target directory. +type InstallMode rune + +const ( + // InstallNewProvidersOnly is an InstallMode that causes the installer + // to accept any existing version of a requested provider that is already + // cached as long as it's in the given version sets, without checking + // whether new versions are available that are also in the given version + // sets. + InstallNewProvidersOnly InstallMode = 'N' + + // InstallNewProvidersForce is an InstallMode that follows the same + // logic as InstallNewProvidersOnly except it does not verify existing + // checksums but force installs new checksums for all given providers. + InstallNewProvidersForce InstallMode = 'F' + + // InstallUpgrades is an InstallMode that causes the installer to check + // all requested providers to see if new versions are available that + // are also in the given version sets, even if a suitable version of + // a given provider is already available. + InstallUpgrades InstallMode = 'U' +) + +func (m InstallMode) forceQueryAllProviders() bool { + return m == InstallUpgrades +} + +func (m InstallMode) forceInstallChecksums() bool { + return m == InstallNewProvidersForce +} + +// InstallerError is an error type that may be returned (but is not guaranteed) +// from Installer.EnsureProviderVersions to indicate potentially several +// separate failed installation outcomes for different providers included in +// the overall request. +type InstallerError struct { + ProviderErrors map[addrs.Provider]error +} + +func (err InstallerError) Error() string { + addrs := make([]addrs.Provider, 0, len(err.ProviderErrors)) + for addr := range err.ProviderErrors { + addrs = append(addrs, addr) + } + sort.Slice(addrs, func(i, j int) bool { + return addrs[i].LessThan(addrs[j]) + }) + var b strings.Builder + b.WriteString("some providers could not be installed:\n") + for _, addr := range addrs { + providerErr := err.ProviderErrors[addr] + fmt.Fprintf(&b, "- %s: %s\n", addr, providerErr) + } + return strings.TrimSpace(b.String()) +} diff --git a/pkg/providercache/installer_events.go b/pkg/providercache/installer_events.go new file mode 100644 index 00000000000..34288dafe83 --- /dev/null +++ b/pkg/providercache/installer_events.go @@ -0,0 +1,165 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package providercache + +import ( + "context" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/getproviders" +) + +// InstallerEvents is a collection of function references that can be +// associated with an Installer object in order to be notified about various +// installation lifecycle events during an install operation. +// +// The set of supported events is primarily motivated by allowing ongoing +// progress reports in the UI of the command running provider installation, +// and so this only exposes information interesting to display and does not +// allow the recipient of the events to influence the ongoing process. +// +// Any of the fields may be left as nil to signal that the caller is not +// interested in the associated event. It's better to leave a field set to +// nil than to assign a do-nothing function into it because the installer +// may choose to skip preparing certain temporary data structures if it can see +// that a particular event is not used. +type InstallerEvents struct { + // The PendingProviders event is called prior to other events to give + // the recipient prior notice of the full set of distinct provider + // addresses it can expect to see mentioned in the other events. + // + // A recipient driving a UI might, for example, use this to pre-allocate + // UI space for status reports for all of the providers and then update + // those positions in-place as other events arrive. + PendingProviders func(reqs map[addrs.Provider]getproviders.VersionConstraints) + + // ProviderAlreadyInstalled is called for any provider that was included + // in PendingProviders but requires no further action because a suitable + // version is already present in the local provider cache directory. + // + // This event can also appear after the QueryPackages... series if + // querying determines that a version already available is the newest + // available version. + ProviderAlreadyInstalled func(provider addrs.Provider, selectedVersion getproviders.Version) + + // The BuiltInProvider... family of events describe the outcome for any + // requested providers that are built in to OpenTofu. Only one of these + // methods will be called for each such provider, and no other method + // will be called for them except that they are included in the + // aggregate PendingProviders map. + // + // The "Available" event reports that the requested builtin provider is + // available in this release of OpenTofu. The "Failure" event reports + // either that the provider is unavailable or that the request for it + // is invalid somehow. + BuiltInProviderAvailable func(provider addrs.Provider) + BuiltInProviderFailure func(provider addrs.Provider, err error) + + // The QueryPackages... family of events delimit the operation of querying + // a provider source for information about available packages matching + // a particular version constraint, prior to selecting a single version + // to install. + // + // A particular install operation includes only one query per distinct + // provider, so a caller can use the provider argument as a unique + // identifier to correlate between successive events. + // + // The Begin, Success, and Failure events will each occur only once per + // distinct provider. + // + // The Warning event is unique to the registry source + QueryPackagesBegin func(provider addrs.Provider, versionConstraints getproviders.VersionConstraints, locked bool) + QueryPackagesSuccess func(provider addrs.Provider, selectedVersion getproviders.Version) + QueryPackagesFailure func(provider addrs.Provider, err error) + QueryPackagesWarning func(provider addrs.Provider, warn []string) + + // The LinkFromCache... family of events delimit the operation of linking + // a selected provider package from the system-wide shared cache into the + // current configuration's local cache. + // + // This sequence occurs instead of the FetchPackage... sequence if the + // QueryPackages... sequence selects a version that is already in the + // system-wide cache, and thus we will skip fetching it from the + // originating provider source and take it from the shared cache instead. + // + // Linking should, in most cases, be a much faster operation than + // fetching. However, it could still potentially be slow in some unusual + // cases like a particularly large source package on a system where symlinks + // are impossible, or when either of the cache directories are on a network + // filesystem accessed over a slow link. + LinkFromCacheBegin func(provider addrs.Provider, version getproviders.Version, cacheRoot string) + LinkFromCacheSuccess func(provider addrs.Provider, version getproviders.Version, localDir string) + LinkFromCacheFailure func(provider addrs.Provider, version getproviders.Version, err error) + + // The FetchPackage... family of events delimit the operation of retrieving + // a package from a particular source location. + // + // A particular install operation includes only one fetch per distinct + // provider, so a caller can use the provider argument as a unique + // identifier to correlate between successive events. + // + // A particular provider will either notify the LinkFromCache... events + // or the FetchPackage... events, never both in the same install operation. + // + // The Query, Begin, Success, and Failure events will each occur only once + // per distinct provider. + FetchPackageMeta func(provider addrs.Provider, version getproviders.Version) // fetching metadata prior to real download + FetchPackageBegin func(provider addrs.Provider, version getproviders.Version, location getproviders.PackageLocation) + FetchPackageSuccess func(provider addrs.Provider, version getproviders.Version, localDir string, authResult *getproviders.PackageAuthenticationResult) + FetchPackageFailure func(provider addrs.Provider, version getproviders.Version, err error) + + // The ProvidersLockUpdated event is called whenever the lock file will be + // updated. It provides the following information: + // + // - `localHashes`: Hashes computed on the local system by analyzing + // files on disk. + // - `signedHashes`: Hashes signed by the private key that the origin + // registry claims is the owner of this provider. + // - `priorHashes`: Hashes already present in the lock file before we + // made any changes. + // + // The final lock file will be updated with a union of all the provided + // hashes. It not just likely, but expected that there will be duplicates + // shared between all three collections of hashes i.e. the local hash and + // remote hashes could already be in the cached hashes. + // + // In addition, we place a guarantee that the hash slices will be ordered + // in the same manner enforced by the lock file within NewProviderLock. + ProvidersLockUpdated func(provider addrs.Provider, version getproviders.Version, localHashes []getproviders.Hash, signedHashes []getproviders.Hash, priorHashes []getproviders.Hash) + + // The ProvidersFetched event is called after all fetch operations if at + // least one provider was fetched successfully. + ProvidersFetched func(authResults map[addrs.Provider]*getproviders.PackageAuthenticationResult) +} + +// OnContext produces a context with all of the same behaviors as the given +// context except that it will additionally carry the receiving +// InstallerEvents. +// +// Passing the resulting context to an installer request will cause the +// installer to send event notifications via the callbacks inside. +func (e *InstallerEvents) OnContext(ctx context.Context) context.Context { + return context.WithValue(ctx, ctxInstallerEvents, e) +} + +// installerEventsForContext looks on the given context for a registered +// InstallerEvents and returns a pointer to it if so. +// +// For caller convenience, if there is no events object attached to the +// given context this function will construct one that has all of its +// fields set to nil and return that, freeing the caller from having to +// do a nil check on the result before dereferencing it. +func installerEventsForContext(ctx context.Context) *InstallerEvents { + v := ctx.Value(ctxInstallerEvents) + if v != nil { + return v.(*InstallerEvents) + } + return &InstallerEvents{} +} + +type ctxInstallerEventsType int + +const ctxInstallerEvents = ctxInstallerEventsType(0) diff --git a/pkg/providercache/installer_events_test.go b/pkg/providercache/installer_events_test.go new file mode 100644 index 00000000000..ead9ddb8b68 --- /dev/null +++ b/pkg/providercache/installer_events_test.go @@ -0,0 +1,191 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package providercache + +import ( + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/getproviders" +) + +type testInstallerEventLogItem struct { + // The name of the event that occurred, using the same names as the + // fields of InstallerEvents. + Event string + + // Most events relate to a specific provider. For the few event types + // that don't, this will be a zero-value Provider. + Provider addrs.Provider + + // The type of Args will vary by event, but it should always be something + // that can be deterministically compared using the go-cmp package. + Args interface{} +} + +// installerLogEventsForTests is a test helper that produces an InstallerEvents +// that writes event notifications (*testInstallerEventLogItem values) to +// the given channel as they occur. +// +// The caller must keep reading from the read side of the given channel +// throughout any installer operation using the returned InstallerEvents. +// It's the caller's responsibility to close the channel if needed and +// clean up any goroutines it started to process the events. +// +// The exact sequence of events emitted for an installer operation might +// change in future, if e.g. we introduce new event callbacks to the +// InstallerEvents struct. Tests using this mechanism may therefore need to +// be updated to reflect such changes. +// +// (The channel-based approach here is so that the control flow for event +// processing will belong to the caller and thus it can safely use its +// testing.T object(s) to emit log lines without non-test-case frames in the +// call stack.) +func installerLogEventsForTests(into chan<- *testInstallerEventLogItem) *InstallerEvents { + return &InstallerEvents{ + PendingProviders: func(reqs map[addrs.Provider]getproviders.VersionConstraints) { + into <- &testInstallerEventLogItem{ + Event: "PendingProviders", + Args: reqs, + } + }, + ProviderAlreadyInstalled: func(provider addrs.Provider, selectedVersion getproviders.Version) { + into <- &testInstallerEventLogItem{ + Event: "ProviderAlreadyInstalled", + Provider: provider, + Args: selectedVersion, + } + }, + BuiltInProviderAvailable: func(provider addrs.Provider) { + into <- &testInstallerEventLogItem{ + Event: "BuiltInProviderAvailable", + Provider: provider, + } + }, + BuiltInProviderFailure: func(provider addrs.Provider, err error) { + into <- &testInstallerEventLogItem{ + Event: "BuiltInProviderFailure", + Provider: provider, + Args: err.Error(), // stringified to guarantee cmp-ability + } + }, + QueryPackagesBegin: func(provider addrs.Provider, versionConstraints getproviders.VersionConstraints, locked bool) { + into <- &testInstallerEventLogItem{ + Event: "QueryPackagesBegin", + Provider: provider, + Args: struct { + Constraints string + Locked bool + }{getproviders.VersionConstraintsString(versionConstraints), locked}, + } + }, + QueryPackagesSuccess: func(provider addrs.Provider, selectedVersion getproviders.Version) { + into <- &testInstallerEventLogItem{ + Event: "QueryPackagesSuccess", + Provider: provider, + Args: selectedVersion.String(), + } + }, + QueryPackagesFailure: func(provider addrs.Provider, err error) { + into <- &testInstallerEventLogItem{ + Event: "QueryPackagesFailure", + Provider: provider, + Args: err.Error(), // stringified to guarantee cmp-ability + } + }, + QueryPackagesWarning: func(provider addrs.Provider, warns []string) { + into <- &testInstallerEventLogItem{ + Event: "QueryPackagesWarning", + Provider: provider, + Args: warns, + } + }, + LinkFromCacheBegin: func(provider addrs.Provider, version getproviders.Version, cacheRoot string) { + into <- &testInstallerEventLogItem{ + Event: "LinkFromCacheBegin", + Provider: provider, + Args: struct { + Version string + CacheRoot string + }{version.String(), cacheRoot}, + } + }, + LinkFromCacheSuccess: func(provider addrs.Provider, version getproviders.Version, localDir string) { + into <- &testInstallerEventLogItem{ + Event: "LinkFromCacheSuccess", + Provider: provider, + Args: struct { + Version string + LocalDir string + }{version.String(), localDir}, + } + }, + LinkFromCacheFailure: func(provider addrs.Provider, version getproviders.Version, err error) { + into <- &testInstallerEventLogItem{ + Event: "LinkFromCacheFailure", + Provider: provider, + Args: struct { + Version string + Error string + }{version.String(), err.Error()}, + } + }, + FetchPackageMeta: func(provider addrs.Provider, version getproviders.Version) { + into <- &testInstallerEventLogItem{ + Event: "FetchPackageMeta", + Provider: provider, + Args: version.String(), + } + }, + FetchPackageBegin: func(provider addrs.Provider, version getproviders.Version, location getproviders.PackageLocation) { + into <- &testInstallerEventLogItem{ + Event: "FetchPackageBegin", + Provider: provider, + Args: struct { + Version string + Location getproviders.PackageLocation + }{version.String(), location}, + } + }, + FetchPackageSuccess: func(provider addrs.Provider, version getproviders.Version, localDir string, authResult *getproviders.PackageAuthenticationResult) { + into <- &testInstallerEventLogItem{ + Event: "FetchPackageSuccess", + Provider: provider, + Args: struct { + Version string + LocalDir string + AuthResult string + }{version.String(), localDir, authResult.String()}, + } + }, + FetchPackageFailure: func(provider addrs.Provider, version getproviders.Version, err error) { + into <- &testInstallerEventLogItem{ + Event: "FetchPackageFailure", + Provider: provider, + Args: struct { + Version string + Error string + }{version.String(), err.Error()}, + } + }, + ProvidersLockUpdated: func(provider addrs.Provider, version getproviders.Version, localHashes []getproviders.Hash, signedHashes []getproviders.Hash, priorHashes []getproviders.Hash) { + into <- &testInstallerEventLogItem{ + Event: "ProvidersLockUpdated", + Provider: provider, + Args: struct { + Version string + Local []getproviders.Hash + Signed []getproviders.Hash + Prior []getproviders.Hash + }{version.String(), localHashes, signedHashes, priorHashes}, + } + }, + ProvidersFetched: func(authResults map[addrs.Provider]*getproviders.PackageAuthenticationResult) { + into <- &testInstallerEventLogItem{ + Event: "ProvidersFetched", + Args: authResults, + } + }, + } +} diff --git a/pkg/providercache/installer_test.go b/pkg/providercache/installer_test.go new file mode 100644 index 00000000000..306281f1533 --- /dev/null +++ b/pkg/providercache/installer_test.go @@ -0,0 +1,2696 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package providercache + +import ( + "context" + "encoding/json" + "fmt" + "log" + "net/http" + "net/http/httptest" + "path/filepath" + "strings" + "testing" + + "github.com/apparentlymart/go-versions/versions" + "github.com/apparentlymart/go-versions/versions/constraints" + "github.com/davecgh/go-spew/spew" + "github.com/google/go-cmp/cmp" + svchost "github.com/hashicorp/terraform-svchost" + "github.com/hashicorp/terraform-svchost/disco" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/depsfile" + "github.com/kubegems/opentofu/pkg/getproviders" +) + +func TestEnsureProviderVersions(t *testing.T) { + // This is a sort of hybrid between table-driven and imperative-style + // testing, because the overall sequence of steps is the same for all + // of the test cases but the setup and verification have enough different + // permutations that it ends up being more concise to express them as + // normal code. + type Test struct { + Source getproviders.Source + Prepare func(*testing.T, *Installer, *Dir) + LockFile string + Reqs getproviders.Requirements + Mode InstallMode + Check func(*testing.T, *Dir, *depsfile.Locks) + WantErr string + WantEvents func(*Installer, *Dir) map[addrs.Provider][]*testInstallerEventLogItem + } + + // noProvider is just the zero value of addrs.Provider, which we're + // using in this test as the key for installer events that are not + // specific to a particular provider. + var noProvider addrs.Provider + beepProvider := addrs.MustParseProviderSourceString("example.com/foo/beep") + beepProviderDir := getproviders.PackageLocalDir("testdata/beep-provider") + fakePlatform := getproviders.Platform{OS: "bleep", Arch: "bloop"} + wrongPlatform := getproviders.Platform{OS: "wrong", Arch: "wrong"} + beepProviderHash := getproviders.HashScheme1.New("2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84=") + terraformProvider := addrs.MustParseProviderSourceString("terraform.io/builtin/terraform") + + tests := map[string]Test{ + "no dependencies": { + Mode: InstallNewProvidersOnly, + Check: func(t *testing.T, dir *Dir, locks *depsfile.Locks) { + if allCached := dir.AllAvailablePackages(); len(allCached) != 0 { + t.Errorf("unexpected cache directory entries\n%s", spew.Sdump(allCached)) + } + if allLocked := locks.AllProviders(); len(allLocked) != 0 { + t.Errorf("unexpected provider lock entries\n%s", spew.Sdump(allLocked)) + } + }, + WantEvents: func(*Installer, *Dir) map[addrs.Provider][]*testInstallerEventLogItem { + return map[addrs.Provider][]*testInstallerEventLogItem{ + noProvider: { + { + Event: "PendingProviders", + Args: map[addrs.Provider]getproviders.VersionConstraints(nil), + }, + }, + } + }, + }, + "successful initial install of one provider": { + Source: getproviders.NewMockSource( + []getproviders.PackageMeta{ + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("1.0.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.0.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.1.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + }, + nil, + ), + Mode: InstallNewProvidersOnly, + Reqs: getproviders.Requirements{ + beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"), + }, + Check: func(t *testing.T, dir *Dir, locks *depsfile.Locks) { + if allCached := dir.AllAvailablePackages(); len(allCached) != 1 { + t.Errorf("wrong number of cache directory entries; want only one\n%s", spew.Sdump(allCached)) + } + if allLocked := locks.AllProviders(); len(allLocked) != 1 { + t.Errorf("wrong number of provider lock entries; want only one\n%s", spew.Sdump(allLocked)) + } + + gotLock := locks.Provider(beepProvider) + wantLock := depsfile.NewProviderLock( + beepProvider, + getproviders.MustParseVersion("2.1.0"), + getproviders.MustParseVersionConstraints(">= 2.0.0"), + []getproviders.Hash{beepProviderHash}, + ) + if diff := cmp.Diff(wantLock, gotLock, depsfile.ProviderLockComparer); diff != "" { + t.Errorf("wrong lock entry\n%s", diff) + } + + gotEntry := dir.ProviderLatestVersion(beepProvider) + wantEntry := &CachedProvider{ + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.1.0"), + PackageDir: filepath.Join(dir.BasePath(), "example.com/foo/beep/2.1.0/bleep_bloop"), + } + if diff := cmp.Diff(wantEntry, gotEntry); diff != "" { + t.Errorf("wrong cache entry\n%s", diff) + } + }, + WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem { + return map[addrs.Provider][]*testInstallerEventLogItem{ + noProvider: { + { + Event: "PendingProviders", + Args: map[addrs.Provider]getproviders.VersionConstraints{ + beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"), + }, + }, + { + Event: "ProvidersFetched", + Args: map[addrs.Provider]*getproviders.PackageAuthenticationResult{ + beepProvider: nil, + }, + }, + }, + beepProvider: { + { + Event: "QueryPackagesBegin", + Provider: beepProvider, + Args: struct { + Constraints string + Locked bool + }{">= 2.0.0", false}, + }, + { + Event: "QueryPackagesSuccess", + Provider: beepProvider, + Args: "2.1.0", + }, + { + Event: "FetchPackageMeta", + Provider: beepProvider, + Args: "2.1.0", + }, + { + Event: "FetchPackageBegin", + Provider: beepProvider, + Args: struct { + Version string + Location getproviders.PackageLocation + }{"2.1.0", beepProviderDir}, + }, + { + Event: "ProvidersLockUpdated", + Provider: beepProvider, + Args: struct { + Version string + Local []getproviders.Hash + Signed []getproviders.Hash + Prior []getproviders.Hash + }{ + "2.1.0", + []getproviders.Hash{"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84="}, + nil, + nil, + }, + }, + { + Event: "FetchPackageSuccess", + Provider: beepProvider, + Args: struct { + Version string + LocalDir string + AuthResult string + }{ + "2.1.0", + filepath.Join(dir.BasePath(), "example.com/foo/beep/2.1.0/bleep_bloop"), + "unauthenticated", + }, + }, + }, + } + }, + }, + "successful initial install of one provider through a cold global cache": { + Source: getproviders.NewMockSource( + []getproviders.PackageMeta{ + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.0.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.1.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + }, + nil, + ), + Prepare: func(t *testing.T, inst *Installer, dir *Dir) { + globalCacheDirPath := tmpDir(t) + globalCacheDir := NewDirWithPlatform(globalCacheDirPath, fakePlatform) + inst.SetGlobalCacheDir(globalCacheDir) + }, + Mode: InstallNewProvidersOnly, + Reqs: getproviders.Requirements{ + beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"), + }, + Check: func(t *testing.T, dir *Dir, locks *depsfile.Locks) { + if allCached := dir.AllAvailablePackages(); len(allCached) != 1 { + t.Errorf("wrong number of cache directory entries; want only one\n%s", spew.Sdump(allCached)) + } + if allLocked := locks.AllProviders(); len(allLocked) != 1 { + t.Errorf("wrong number of provider lock entries; want only one\n%s", spew.Sdump(allLocked)) + } + + gotLock := locks.Provider(beepProvider) + wantLock := depsfile.NewProviderLock( + beepProvider, + getproviders.MustParseVersion("2.1.0"), + getproviders.MustParseVersionConstraints(">= 2.0.0"), + []getproviders.Hash{beepProviderHash}, + ) + if diff := cmp.Diff(wantLock, gotLock, depsfile.ProviderLockComparer); diff != "" { + t.Errorf("wrong lock entry\n%s", diff) + } + + gotEntry := dir.ProviderLatestVersion(beepProvider) + wantEntry := &CachedProvider{ + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.1.0"), + PackageDir: filepath.Join(dir.BasePath(), "example.com/foo/beep/2.1.0/bleep_bloop"), + } + if diff := cmp.Diff(wantEntry, gotEntry); diff != "" { + t.Errorf("wrong cache entry\n%s", diff) + } + }, + WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem { + return map[addrs.Provider][]*testInstallerEventLogItem{ + noProvider: { + { + Event: "PendingProviders", + Args: map[addrs.Provider]getproviders.VersionConstraints{ + beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"), + }, + }, + { + Event: "ProvidersFetched", + Args: map[addrs.Provider]*getproviders.PackageAuthenticationResult{ + beepProvider: nil, + }, + }, + }, + beepProvider: { + { + Event: "QueryPackagesBegin", + Provider: beepProvider, + Args: struct { + Constraints string + Locked bool + }{">= 2.0.0", false}, + }, + { + Event: "QueryPackagesSuccess", + Provider: beepProvider, + Args: "2.1.0", + }, + { + Event: "FetchPackageMeta", + Provider: beepProvider, + Args: "2.1.0", + }, + { + Event: "FetchPackageBegin", + Provider: beepProvider, + Args: struct { + Version string + Location getproviders.PackageLocation + }{"2.1.0", beepProviderDir}, + }, + { + Event: "ProvidersLockUpdated", + Provider: beepProvider, + Args: struct { + Version string + Local []getproviders.Hash + Signed []getproviders.Hash + Prior []getproviders.Hash + }{ + "2.1.0", + []getproviders.Hash{"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84="}, + nil, + nil, + }, + }, + { + Event: "FetchPackageSuccess", + Provider: beepProvider, + Args: struct { + Version string + LocalDir string + AuthResult string + }{ + "2.1.0", + filepath.Join(dir.BasePath(), "example.com/foo/beep/2.1.0/bleep_bloop"), + "unauthenticated", + }, + }, + }, + } + }, + }, + "successful initial install of one provider through a warm global cache but without a lock file entry": { + Source: getproviders.NewMockSource( + []getproviders.PackageMeta{ + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.0.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.1.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + }, + nil, + ), + Prepare: func(t *testing.T, inst *Installer, dir *Dir) { + globalCacheDirPath := tmpDir(t) + globalCacheDir := NewDirWithPlatform(globalCacheDirPath, fakePlatform) + _, err := globalCacheDir.InstallPackage( + context.Background(), + getproviders.PackageMeta{ + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.1.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + nil, + ) + if err != nil { + t.Fatalf("failed to populate global cache: %s", err) + } + inst.SetGlobalCacheDir(globalCacheDir) + }, + Mode: InstallNewProvidersOnly, + Reqs: getproviders.Requirements{ + beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"), + }, + Check: func(t *testing.T, dir *Dir, locks *depsfile.Locks) { + if allCached := dir.AllAvailablePackages(); len(allCached) != 1 { + t.Errorf("wrong number of cache directory entries; want only one\n%s", spew.Sdump(allCached)) + } + if allLocked := locks.AllProviders(); len(allLocked) != 1 { + t.Errorf("wrong number of provider lock entries; want only one\n%s", spew.Sdump(allLocked)) + } + + gotLock := locks.Provider(beepProvider) + wantLock := depsfile.NewProviderLock( + beepProvider, + getproviders.MustParseVersion("2.1.0"), + getproviders.MustParseVersionConstraints(">= 2.0.0"), + []getproviders.Hash{beepProviderHash}, + ) + if diff := cmp.Diff(wantLock, gotLock, depsfile.ProviderLockComparer); diff != "" { + t.Errorf("wrong lock entry\n%s", diff) + } + + gotEntry := dir.ProviderLatestVersion(beepProvider) + wantEntry := &CachedProvider{ + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.1.0"), + PackageDir: filepath.Join(dir.BasePath(), "example.com/foo/beep/2.1.0/bleep_bloop"), + } + if diff := cmp.Diff(wantEntry, gotEntry); diff != "" { + t.Errorf("wrong cache entry\n%s", diff) + } + }, + WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem { + return map[addrs.Provider][]*testInstallerEventLogItem{ + noProvider: { + { + Event: "PendingProviders", + Args: map[addrs.Provider]getproviders.VersionConstraints{ + beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"), + }, + }, + { + Event: "ProvidersFetched", + Args: map[addrs.Provider]*getproviders.PackageAuthenticationResult{ + beepProvider: nil, + }, + }, + }, + beepProvider: { + { + Event: "QueryPackagesBegin", + Provider: beepProvider, + Args: struct { + Constraints string + Locked bool + }{">= 2.0.0", false}, + }, + { + Event: "QueryPackagesSuccess", + Provider: beepProvider, + Args: "2.1.0", + }, + // Existing cache entry is ineligible for linking because + // we have no lock file checksums to compare it to. + // Instead, we install from upstream and lock with + // whatever checksums we learn in that process. + { + Event: "FetchPackageMeta", + Provider: beepProvider, + Args: "2.1.0", + }, + { + Event: "FetchPackageBegin", + Provider: beepProvider, + Args: struct { + Version string + Location getproviders.PackageLocation + }{ + "2.1.0", + beepProviderDir, + }, + }, + { + Event: "ProvidersLockUpdated", + Provider: beepProvider, + Args: struct { + Version string + Local []getproviders.Hash + Signed []getproviders.Hash + Prior []getproviders.Hash + }{ + "2.1.0", + []getproviders.Hash{"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84="}, + nil, + nil, + }, + }, + { + Event: "FetchPackageSuccess", + Provider: beepProvider, + Args: struct { + Version string + LocalDir string + AuthResult string + }{ + "2.1.0", + filepath.Join(dir.BasePath(), "/example.com/foo/beep/2.1.0/bleep_bloop"), + "unauthenticated", + }, + }, + }, + } + }, + }, + "successful initial install of one provider through a warm global cache and correct locked checksum": { + Source: getproviders.NewMockSource( + []getproviders.PackageMeta{ + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.0.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.1.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + }, + nil, + ), + LockFile: ` + # The existing cache entry is valid only if it matches a + # checksum already recorded in the lock file. + provider "example.com/foo/beep" { + version = "2.1.0" + constraints = ">= 1.0.0" + hashes = [ + "h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84=", + ] + } + `, + Prepare: func(t *testing.T, inst *Installer, dir *Dir) { + globalCacheDirPath := tmpDir(t) + globalCacheDir := NewDirWithPlatform(globalCacheDirPath, fakePlatform) + _, err := globalCacheDir.InstallPackage( + context.Background(), + getproviders.PackageMeta{ + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.1.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + nil, + ) + if err != nil { + t.Fatalf("failed to populate global cache: %s", err) + } + inst.SetGlobalCacheDir(globalCacheDir) + }, + Mode: InstallNewProvidersOnly, + Reqs: getproviders.Requirements{ + beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"), + }, + Check: func(t *testing.T, dir *Dir, locks *depsfile.Locks) { + if allCached := dir.AllAvailablePackages(); len(allCached) != 1 { + t.Errorf("wrong number of cache directory entries; want only one\n%s", spew.Sdump(allCached)) + } + if allLocked := locks.AllProviders(); len(allLocked) != 1 { + t.Errorf("wrong number of provider lock entries; want only one\n%s", spew.Sdump(allLocked)) + } + + gotLock := locks.Provider(beepProvider) + wantLock := depsfile.NewProviderLock( + beepProvider, + getproviders.MustParseVersion("2.1.0"), + getproviders.MustParseVersionConstraints(">= 2.0.0"), + []getproviders.Hash{beepProviderHash}, + ) + if diff := cmp.Diff(wantLock, gotLock, depsfile.ProviderLockComparer); diff != "" { + t.Errorf("wrong lock entry\n%s", diff) + } + + gotEntry := dir.ProviderLatestVersion(beepProvider) + wantEntry := &CachedProvider{ + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.1.0"), + PackageDir: filepath.Join(dir.BasePath(), "example.com/foo/beep/2.1.0/bleep_bloop"), + } + if diff := cmp.Diff(wantEntry, gotEntry); diff != "" { + t.Errorf("wrong cache entry\n%s", diff) + } + }, + WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem { + return map[addrs.Provider][]*testInstallerEventLogItem{ + noProvider: { + { + Event: "PendingProviders", + Args: map[addrs.Provider]getproviders.VersionConstraints{ + beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"), + }, + }, + }, + beepProvider: { + { + Event: "QueryPackagesBegin", + Provider: beepProvider, + Args: struct { + Constraints string + Locked bool + }{">= 2.0.0", true}, + }, + { + Event: "QueryPackagesSuccess", + Provider: beepProvider, + Args: "2.1.0", + }, + { + Event: "LinkFromCacheBegin", + Provider: beepProvider, + Args: struct { + Version string + CacheRoot string + }{ + "2.1.0", + inst.globalCacheDir.BasePath(), + }, + }, + { + Event: "ProvidersLockUpdated", + Provider: beepProvider, + Args: struct { + Version string + Local []getproviders.Hash + Signed []getproviders.Hash + Prior []getproviders.Hash + }{ + "2.1.0", + []getproviders.Hash{"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84="}, + nil, + []getproviders.Hash{"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84="}, + }, + }, + { + Event: "LinkFromCacheSuccess", + Provider: beepProvider, + Args: struct { + Version string + LocalDir string + }{ + "2.1.0", + filepath.Join(dir.BasePath(), "/example.com/foo/beep/2.1.0/bleep_bloop"), + }, + }, + }, + } + }, + }, + "successful initial install of one provider through a warm global cache with an incompatible checksum": { + Source: getproviders.NewMockSource( + []getproviders.PackageMeta{ + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.0.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.1.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + }, + nil, + ), + LockFile: ` + # This is approximating the awkward situation where the lock + # file was populated by someone who installed from a location + # other than the origin registry annd so the set of checksums + # is incomplete. In this case we can't prove that our cache + # entry is valid and so we silently ignore the cache entry + # and try to install from upstream anyway, in the hope that + # this will give us an opportunity to access the origin + # registry and get a checksum that works for the current + # platform. + provider "example.com/foo/beep" { + version = "2.1.0" + constraints = ">= 1.0.0" + hashes = [ + # NOTE: This is the correct checksum for the + # beepProviderDir package, but we're going to + # intentionally install from a different directory + # below so that the entry in the cache will not + # match this checksum. + "h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84=", + ] + } + `, + Prepare: func(t *testing.T, inst *Installer, dir *Dir) { + // This is another "beep provider" package directory that + // has a different checksum than the one in beepProviderDir. + // We're mimicking the situation where the lock file was + // originally built from beepProviderDir but the local system + // is running on a different platform and so its existing + // cache entry doesn't match the checksum. + beepProviderOtherPlatformDir := getproviders.PackageLocalDir("testdata/beep-provider-other-platform") + + globalCacheDirPath := tmpDir(t) + globalCacheDir := NewDirWithPlatform(globalCacheDirPath, fakePlatform) + _, err := globalCacheDir.InstallPackage( + context.Background(), + getproviders.PackageMeta{ + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.1.0"), + TargetPlatform: fakePlatform, + Location: beepProviderOtherPlatformDir, + }, + nil, + ) + if err != nil { + t.Fatalf("failed to populate global cache: %s", err) + } + inst.SetGlobalCacheDir(globalCacheDir) + }, + Mode: InstallNewProvidersOnly, + Reqs: getproviders.Requirements{ + beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"), + }, + Check: func(t *testing.T, dir *Dir, locks *depsfile.Locks) { + if allCached := dir.AllAvailablePackages(); len(allCached) != 1 { + t.Errorf("wrong number of cache directory entries; want only one\n%s", spew.Sdump(allCached)) + } + if allLocked := locks.AllProviders(); len(allLocked) != 1 { + t.Errorf("wrong number of provider lock entries; want only one\n%s", spew.Sdump(allLocked)) + } + + gotLock := locks.Provider(beepProvider) + wantLock := depsfile.NewProviderLock( + beepProvider, + getproviders.MustParseVersion("2.1.0"), + getproviders.MustParseVersionConstraints(">= 2.0.0"), + []getproviders.Hash{beepProviderHash}, + ) + if diff := cmp.Diff(wantLock, gotLock, depsfile.ProviderLockComparer); diff != "" { + t.Errorf("wrong lock entry\n%s", diff) + } + + gotEntry := dir.ProviderLatestVersion(beepProvider) + wantEntry := &CachedProvider{ + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.1.0"), + PackageDir: filepath.Join(dir.BasePath(), "example.com/foo/beep/2.1.0/bleep_bloop"), + } + if diff := cmp.Diff(wantEntry, gotEntry); diff != "" { + t.Errorf("wrong cache entry\n%s", diff) + } + }, + WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem { + return map[addrs.Provider][]*testInstallerEventLogItem{ + noProvider: { + { + Event: "PendingProviders", + Args: map[addrs.Provider]getproviders.VersionConstraints{ + beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"), + }, + }, + { + Event: "ProvidersFetched", + Args: map[addrs.Provider]*getproviders.PackageAuthenticationResult{ + beepProvider: nil, + }, + }, + }, + beepProvider: { + { + Event: "QueryPackagesBegin", + Provider: beepProvider, + Args: struct { + Constraints string + Locked bool + }{">= 2.0.0", true}, + }, + { + Event: "QueryPackagesSuccess", + Provider: beepProvider, + Args: "2.1.0", + }, + { + Event: "FetchPackageMeta", + Provider: beepProvider, + Args: "2.1.0", + }, + { + Event: "FetchPackageBegin", + Provider: beepProvider, + Args: struct { + Version string + Location getproviders.PackageLocation + }{ + "2.1.0", + beepProviderDir, + }, + }, + { + Event: "ProvidersLockUpdated", + Provider: beepProvider, + Args: struct { + Version string + Local []getproviders.Hash + Signed []getproviders.Hash + Prior []getproviders.Hash + }{ + "2.1.0", + []getproviders.Hash{"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84="}, + nil, + []getproviders.Hash{"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84="}, + }, + }, + { + Event: "FetchPackageSuccess", + Provider: beepProvider, + Args: struct { + Version string + LocalDir string + AuthResult string + }{ + "2.1.0", + filepath.Join(dir.BasePath(), "/example.com/foo/beep/2.1.0/bleep_bloop"), + "unauthenticated", + }, + }, + }, + } + }, + }, + "successful initial install of one provider through a warm global cache without a lock file entry but allowing the cache to break the lock file": { + Source: getproviders.NewMockSource( + []getproviders.PackageMeta{ + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.0.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.1.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + }, + nil, + ), + LockFile: ` + # (intentionally empty) + `, + Prepare: func(t *testing.T, inst *Installer, dir *Dir) { + globalCacheDirPath := tmpDir(t) + globalCacheDir := NewDirWithPlatform(globalCacheDirPath, fakePlatform) + _, err := globalCacheDir.InstallPackage( + context.Background(), + getproviders.PackageMeta{ + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.1.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + nil, + ) + if err != nil { + t.Fatalf("failed to populate global cache: %s", err) + } + inst.SetGlobalCacheDir(globalCacheDir) + inst.SetGlobalCacheDirMayBreakDependencyLockFile(true) + }, + Mode: InstallNewProvidersOnly, + Reqs: getproviders.Requirements{ + beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"), + }, + Check: func(t *testing.T, dir *Dir, locks *depsfile.Locks) { + if allCached := dir.AllAvailablePackages(); len(allCached) != 1 { + t.Errorf("wrong number of cache directory entries; want only one\n%s", spew.Sdump(allCached)) + } + if allLocked := locks.AllProviders(); len(allLocked) != 1 { + t.Errorf("wrong number of provider lock entries; want only one\n%s", spew.Sdump(allLocked)) + } + + gotLock := locks.Provider(beepProvider) + wantLock := depsfile.NewProviderLock( + beepProvider, + getproviders.MustParseVersion("2.1.0"), + getproviders.MustParseVersionConstraints(">= 2.0.0"), + []getproviders.Hash{beepProviderHash}, + ) + if diff := cmp.Diff(wantLock, gotLock, depsfile.ProviderLockComparer); diff != "" { + t.Errorf("wrong lock entry\n%s", diff) + } + + gotEntry := dir.ProviderLatestVersion(beepProvider) + wantEntry := &CachedProvider{ + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.1.0"), + PackageDir: filepath.Join(dir.BasePath(), "example.com/foo/beep/2.1.0/bleep_bloop"), + } + if diff := cmp.Diff(wantEntry, gotEntry); diff != "" { + t.Errorf("wrong cache entry\n%s", diff) + } + }, + WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem { + return map[addrs.Provider][]*testInstallerEventLogItem{ + noProvider: { + { + Event: "PendingProviders", + Args: map[addrs.Provider]getproviders.VersionConstraints{ + beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"), + }, + }, + }, + beepProvider: { + { + Event: "QueryPackagesBegin", + Provider: beepProvider, + Args: struct { + Constraints string + Locked bool + }{">= 2.0.0", false}, + }, + { + Event: "QueryPackagesSuccess", + Provider: beepProvider, + Args: "2.1.0", + }, + { + Event: "LinkFromCacheBegin", + Provider: beepProvider, + Args: struct { + Version string + CacheRoot string + }{ + "2.1.0", + inst.globalCacheDir.BasePath(), + }, + }, + { + Event: "ProvidersLockUpdated", + Provider: beepProvider, + Args: struct { + Version string + Local []getproviders.Hash + Signed []getproviders.Hash + Prior []getproviders.Hash + }{ + "2.1.0", + []getproviders.Hash{"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84="}, + nil, + nil, + }, + }, + { + Event: "LinkFromCacheSuccess", + Provider: beepProvider, + Args: struct { + Version string + LocalDir string + }{ + "2.1.0", + filepath.Join(dir.BasePath(), "/example.com/foo/beep/2.1.0/bleep_bloop"), + }, + }, + }, + } + }, + }, + "failing install of one provider through a warm global cache with an incorrect locked checksum while allowing the cache to break the lock file": { + Source: getproviders.NewMockSource( + []getproviders.PackageMeta{ + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.0.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.1.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + }, + nil, + ), + LockFile: ` + # The existing cache entry is valid only if it matches a + # checksum already recorded in the lock file, but this + # test is overriding that rule using a special setting. + provider "example.com/foo/beep" { + version = "2.1.0" + constraints = ">= 1.0.0" + hashes = [ + "h1:wrong-not-matchy", + ] + } + `, + Prepare: func(t *testing.T, inst *Installer, dir *Dir) { + globalCacheDirPath := tmpDir(t) + globalCacheDir := NewDirWithPlatform(globalCacheDirPath, fakePlatform) + _, err := globalCacheDir.InstallPackage( + context.Background(), + getproviders.PackageMeta{ + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.1.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + nil, + ) + if err != nil { + t.Fatalf("failed to populate global cache: %s", err) + } + inst.SetGlobalCacheDir(globalCacheDir) + inst.SetGlobalCacheDirMayBreakDependencyLockFile(true) + }, + Mode: InstallNewProvidersOnly, + Reqs: getproviders.Requirements{ + beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"), + }, + Check: func(t *testing.T, dir *Dir, locks *depsfile.Locks) { + if allCached := dir.AllAvailablePackages(); len(allCached) != 0 { + t.Errorf("wrong number of cache directory entries; want none\n%s", spew.Sdump(allCached)) + } + if allLocked := locks.AllProviders(); len(allLocked) != 1 { + t.Errorf("wrong number of provider lock entries; want only one\n%s", spew.Sdump(allLocked)) + } + + gotLock := locks.Provider(beepProvider) + wantLock := depsfile.NewProviderLock( + // The lock file entry hasn't changed because the cache + // entry didn't match the existing lock file entry. + beepProvider, + getproviders.MustParseVersion("2.1.0"), + getproviders.MustParseVersionConstraints(">= 1.0.0"), + []getproviders.Hash{"h1:wrong-not-matchy"}, + ) + if diff := cmp.Diff(wantLock, gotLock, depsfile.ProviderLockComparer); diff != "" { + t.Errorf("wrong lock entry\n%s", diff) + } + + // The provider wasn't installed into the local cache directory + // because that would make the local cache mismatch the + // lock file. + gotEntry := dir.ProviderLatestVersion(beepProvider) + wantEntry := (*CachedProvider)(nil) + if diff := cmp.Diff(wantEntry, gotEntry); diff != "" { + t.Errorf("wrong cache entry\n%s", diff) + } + }, + WantErr: `doesn't match any of the checksums`, + WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem { + return map[addrs.Provider][]*testInstallerEventLogItem{ + noProvider: { + { + Event: "PendingProviders", + Args: map[addrs.Provider]getproviders.VersionConstraints{ + beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"), + }, + }, + }, + beepProvider: { + { + Event: "QueryPackagesBegin", + Provider: beepProvider, + Args: struct { + Constraints string + Locked bool + }{">= 2.0.0", true}, + }, + { + Event: "QueryPackagesSuccess", + Provider: beepProvider, + Args: "2.1.0", + }, + { + Event: "LinkFromCacheBegin", + Provider: beepProvider, + Args: struct { + Version string + CacheRoot string + }{ + "2.1.0", + inst.globalCacheDir.BasePath(), + }, + }, + { + Event: "LinkFromCacheFailure", + Provider: beepProvider, + Args: struct { + Version string + Error string + }{ + "2.1.0", + fmt.Sprintf( + "the provider cache at %s has a copy of example.com/foo/beep 2.1.0 that doesn't match any of the checksums recorded in the dependency lock file", + dir.BasePath(), + ), + }, + }, + }, + } + }, + }, + "successful reinstall of one previously-locked provider": { + Source: getproviders.NewMockSource( + []getproviders.PackageMeta{ + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("1.0.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.0.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.1.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + }, + nil, + ), + LockFile: ` + provider "example.com/foo/beep" { + version = "2.0.0" + constraints = ">= 2.0.0" + hashes = [ + "h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84=", + ] + } + `, + Mode: InstallNewProvidersOnly, + Reqs: getproviders.Requirements{ + beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"), + }, + Check: func(t *testing.T, dir *Dir, locks *depsfile.Locks) { + if allCached := dir.AllAvailablePackages(); len(allCached) != 1 { + t.Errorf("wrong number of cache directory entries; want only one\n%s", spew.Sdump(allCached)) + } + if allLocked := locks.AllProviders(); len(allLocked) != 1 { + t.Errorf("wrong number of provider lock entries; want only one\n%s", spew.Sdump(allLocked)) + } + + gotLock := locks.Provider(beepProvider) + wantLock := depsfile.NewProviderLock( + beepProvider, + getproviders.MustParseVersion("2.0.0"), + getproviders.MustParseVersionConstraints(">= 2.0.0"), + []getproviders.Hash{"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84="}, + ) + if diff := cmp.Diff(wantLock, gotLock, depsfile.ProviderLockComparer); diff != "" { + t.Errorf("wrong lock entry\n%s", diff) + } + + gotEntry := dir.ProviderLatestVersion(beepProvider) + wantEntry := &CachedProvider{ + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.0.0"), + PackageDir: filepath.Join(dir.BasePath(), "example.com/foo/beep/2.0.0/bleep_bloop"), + } + if diff := cmp.Diff(wantEntry, gotEntry); diff != "" { + t.Errorf("wrong cache entry\n%s", diff) + } + }, + WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem { + return map[addrs.Provider][]*testInstallerEventLogItem{ + noProvider: { + { + Event: "PendingProviders", + Args: map[addrs.Provider]getproviders.VersionConstraints{ + beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"), + }, + }, + { + Event: "ProvidersFetched", + Args: map[addrs.Provider]*getproviders.PackageAuthenticationResult{ + beepProvider: nil, + }, + }, + }, + beepProvider: { + { + Event: "QueryPackagesBegin", + Provider: beepProvider, + Args: struct { + Constraints string + Locked bool + }{">= 2.0.0", true}, + }, + { + Event: "QueryPackagesSuccess", + Provider: beepProvider, + Args: "2.0.0", + }, + { + Event: "FetchPackageMeta", + Provider: beepProvider, + Args: "2.0.0", + }, + { + Event: "FetchPackageBegin", + Provider: beepProvider, + Args: struct { + Version string + Location getproviders.PackageLocation + }{"2.0.0", beepProviderDir}, + }, + { + Event: "ProvidersLockUpdated", + Provider: beepProvider, + Args: struct { + Version string + Local []getproviders.Hash + Signed []getproviders.Hash + Prior []getproviders.Hash + }{ + "2.0.0", + []getproviders.Hash{"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84="}, + nil, + []getproviders.Hash{"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84="}, + }, + }, + { + Event: "FetchPackageSuccess", + Provider: beepProvider, + Args: struct { + Version string + LocalDir string + AuthResult string + }{ + "2.0.0", + filepath.Join(dir.BasePath(), "example.com/foo/beep/2.0.0/bleep_bloop"), + "unauthenticated", + }, + }, + }, + } + }, + }, + "skipped install of one previously-locked and installed provider": { + Source: getproviders.NewMockSource( + []getproviders.PackageMeta{ + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.0.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + }, + nil, + ), + LockFile: ` + provider "example.com/foo/beep" { + version = "2.0.0" + constraints = ">= 2.0.0" + hashes = [ + "h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84=", + ] + } + `, + Prepare: func(t *testing.T, inst *Installer, dir *Dir) { + _, err := dir.InstallPackage( + context.Background(), + getproviders.PackageMeta{ + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.0.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + nil, + ) + if err != nil { + t.Fatalf("installation to the test dir failed: %s", err) + } + }, + Mode: InstallNewProvidersOnly, + Reqs: getproviders.Requirements{ + beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"), + }, + Check: func(t *testing.T, dir *Dir, locks *depsfile.Locks) { + if allCached := dir.AllAvailablePackages(); len(allCached) != 1 { + t.Errorf("wrong number of cache directory entries; want only one\n%s", spew.Sdump(allCached)) + } + if allLocked := locks.AllProviders(); len(allLocked) != 1 { + t.Errorf("wrong number of provider lock entries; want only one\n%s", spew.Sdump(allLocked)) + } + + gotLock := locks.Provider(beepProvider) + wantLock := depsfile.NewProviderLock( + beepProvider, + getproviders.MustParseVersion("2.0.0"), + getproviders.MustParseVersionConstraints(">= 2.0.0"), + []getproviders.Hash{"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84="}, + ) + if diff := cmp.Diff(wantLock, gotLock, depsfile.ProviderLockComparer); diff != "" { + t.Errorf("wrong lock entry\n%s", diff) + } + + gotEntry := dir.ProviderLatestVersion(beepProvider) + wantEntry := &CachedProvider{ + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.0.0"), + PackageDir: filepath.Join(dir.BasePath(), "example.com/foo/beep/2.0.0/bleep_bloop"), + } + if diff := cmp.Diff(wantEntry, gotEntry); diff != "" { + t.Errorf("wrong cache entry\n%s", diff) + } + }, + WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem { + return map[addrs.Provider][]*testInstallerEventLogItem{ + noProvider: { + { + Event: "PendingProviders", + Args: map[addrs.Provider]getproviders.VersionConstraints{ + beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"), + }, + }, + }, + beepProvider: { + { + Event: "QueryPackagesBegin", + Provider: beepProvider, + Args: struct { + Constraints string + Locked bool + }{">= 2.0.0", true}, + }, + { + Event: "QueryPackagesSuccess", + Provider: beepProvider, + Args: "2.0.0", + }, + { + Event: "ProviderAlreadyInstalled", + Provider: beepProvider, + Args: versions.Version{Major: 2, Minor: 0, Patch: 0}, + }, + }, + } + }, + }, + "successful upgrade of one previously-locked provider": { + Source: getproviders.NewMockSource( + []getproviders.PackageMeta{ + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("1.0.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.0.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.1.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + }, + nil, + ), + LockFile: ` + provider "example.com/foo/beep" { + version = "2.0.0" + constraints = ">= 2.0.0" + hashes = [ + "h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84=", + ] + } + `, + Mode: InstallUpgrades, + Reqs: getproviders.Requirements{ + beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"), + }, + Check: func(t *testing.T, dir *Dir, locks *depsfile.Locks) { + if allCached := dir.AllAvailablePackages(); len(allCached) != 1 { + t.Errorf("wrong number of cache directory entries; want only one\n%s", spew.Sdump(allCached)) + } + if allLocked := locks.AllProviders(); len(allLocked) != 1 { + t.Errorf("wrong number of provider lock entries; want only one\n%s", spew.Sdump(allLocked)) + } + + gotLock := locks.Provider(beepProvider) + wantLock := depsfile.NewProviderLock( + beepProvider, + getproviders.MustParseVersion("2.1.0"), + getproviders.MustParseVersionConstraints(">= 2.0.0"), + []getproviders.Hash{"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84="}, + ) + if diff := cmp.Diff(wantLock, gotLock, depsfile.ProviderLockComparer); diff != "" { + t.Errorf("wrong lock entry\n%s", diff) + } + + gotEntry := dir.ProviderLatestVersion(beepProvider) + wantEntry := &CachedProvider{ + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.1.0"), + PackageDir: filepath.Join(dir.BasePath(), "example.com/foo/beep/2.1.0/bleep_bloop"), + } + if diff := cmp.Diff(wantEntry, gotEntry); diff != "" { + t.Errorf("wrong cache entry\n%s", diff) + } + }, + WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem { + return map[addrs.Provider][]*testInstallerEventLogItem{ + noProvider: { + { + Event: "PendingProviders", + Args: map[addrs.Provider]getproviders.VersionConstraints{ + beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"), + }, + }, + { + Event: "ProvidersFetched", + Args: map[addrs.Provider]*getproviders.PackageAuthenticationResult{ + beepProvider: nil, + }, + }, + }, + beepProvider: { + { + Event: "QueryPackagesBegin", + Provider: beepProvider, + Args: struct { + Constraints string + Locked bool + }{">= 2.0.0", false}, + }, + { + Event: "QueryPackagesSuccess", + Provider: beepProvider, + Args: "2.1.0", + }, + { + Event: "FetchPackageMeta", + Provider: beepProvider, + Args: "2.1.0", + }, + { + Event: "FetchPackageBegin", + Provider: beepProvider, + Args: struct { + Version string + Location getproviders.PackageLocation + }{"2.1.0", beepProviderDir}, + }, + { + Event: "ProvidersLockUpdated", + Provider: beepProvider, + Args: struct { + Version string + Local []getproviders.Hash + Signed []getproviders.Hash + Prior []getproviders.Hash + }{ + "2.1.0", + []getproviders.Hash{"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84="}, + nil, + nil, + }, + }, + { + Event: "FetchPackageSuccess", + Provider: beepProvider, + Args: struct { + Version string + LocalDir string + AuthResult string + }{ + "2.1.0", + filepath.Join(dir.BasePath(), "example.com/foo/beep/2.1.0/bleep_bloop"), + "unauthenticated", + }, + }, + }, + } + }, + }, + "successful install of a built-in provider": { + Source: getproviders.NewMockSource( + []getproviders.PackageMeta{}, + nil, + ), + Prepare: func(t *testing.T, inst *Installer, dir *Dir) { + inst.SetBuiltInProviderTypes([]string{"terraform"}) + }, + Mode: InstallNewProvidersOnly, + Reqs: getproviders.Requirements{ + terraformProvider: nil, + }, + Check: func(t *testing.T, dir *Dir, locks *depsfile.Locks) { + // Built-in providers are neither included in the cache + // directory nor mentioned in the lock file, because they + // are compiled directly into the OpenTofu executable. + if allCached := dir.AllAvailablePackages(); len(allCached) != 0 { + t.Errorf("wrong number of cache directory entries; want none\n%s", spew.Sdump(allCached)) + } + if allLocked := locks.AllProviders(); len(allLocked) != 0 { + t.Errorf("wrong number of provider lock entries; want none\n%s", spew.Sdump(allLocked)) + } + }, + WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem { + return map[addrs.Provider][]*testInstallerEventLogItem{ + noProvider: { + { + Event: "PendingProviders", + Args: map[addrs.Provider]getproviders.VersionConstraints{ + terraformProvider: constraints.IntersectionSpec(nil), + }, + }, + }, + terraformProvider: { + { + Event: "BuiltInProviderAvailable", + Provider: terraformProvider, + }, + }, + } + }, + }, + "remove no-longer-needed provider from lock file": { + Source: getproviders.NewMockSource( + []getproviders.PackageMeta{ + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("1.0.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + }, + nil, + ), + LockFile: ` + provider "example.com/foo/beep" { + version = "1.0.0" + constraints = ">= 1.0.0" + hashes = [ + "h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84=", + ] + } + provider "example.com/foo/obsolete" { + version = "2.0.0" + constraints = ">= 2.0.0" + hashes = [ + "no:irrelevant", + ] + } + `, + Mode: InstallNewProvidersOnly, + Reqs: getproviders.Requirements{ + beepProvider: getproviders.MustParseVersionConstraints(">= 1.0.0"), + }, + Check: func(t *testing.T, dir *Dir, locks *depsfile.Locks) { + if allCached := dir.AllAvailablePackages(); len(allCached) != 1 { + t.Errorf("wrong number of cache directory entries; want only one\n%s", spew.Sdump(allCached)) + } + if allLocked := locks.AllProviders(); len(allLocked) != 1 { + t.Errorf("wrong number of provider lock entries; want only one\n%s", spew.Sdump(allLocked)) + } + + gotLock := locks.Provider(beepProvider) + wantLock := depsfile.NewProviderLock( + beepProvider, + getproviders.MustParseVersion("1.0.0"), + getproviders.MustParseVersionConstraints(">= 1.0.0"), + []getproviders.Hash{"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84="}, + ) + if diff := cmp.Diff(wantLock, gotLock, depsfile.ProviderLockComparer); diff != "" { + t.Errorf("wrong lock entry\n%s", diff) + } + + gotEntry := dir.ProviderLatestVersion(beepProvider) + wantEntry := &CachedProvider{ + Provider: beepProvider, + Version: getproviders.MustParseVersion("1.0.0"), + PackageDir: filepath.Join(dir.BasePath(), "example.com/foo/beep/1.0.0/bleep_bloop"), + } + if diff := cmp.Diff(wantEntry, gotEntry); diff != "" { + t.Errorf("wrong cache entry\n%s", diff) + } + }, + WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem { + return map[addrs.Provider][]*testInstallerEventLogItem{ + noProvider: { + { + Event: "PendingProviders", + Args: map[addrs.Provider]getproviders.VersionConstraints{ + beepProvider: getproviders.MustParseVersionConstraints(">= 1.0.0"), + }, + }, + { + Event: "ProvidersFetched", + Args: map[addrs.Provider]*getproviders.PackageAuthenticationResult{ + beepProvider: nil, + }, + }, + }, + // Note: intentionally no entries for example.com/foo/obsolete + // here, because it's no longer needed and therefore not + // installed. + beepProvider: { + { + Event: "QueryPackagesBegin", + Provider: beepProvider, + Args: struct { + Constraints string + Locked bool + }{">= 1.0.0", true}, + }, + { + Event: "QueryPackagesSuccess", + Provider: beepProvider, + Args: "1.0.0", + }, + { + Event: "FetchPackageMeta", + Provider: beepProvider, + Args: "1.0.0", + }, + { + Event: "FetchPackageBegin", + Provider: beepProvider, + Args: struct { + Version string + Location getproviders.PackageLocation + }{"1.0.0", beepProviderDir}, + }, + { + Event: "ProvidersLockUpdated", + Provider: beepProvider, + Args: struct { + Version string + Local []getproviders.Hash + Signed []getproviders.Hash + Prior []getproviders.Hash + }{ + "1.0.0", + []getproviders.Hash{"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84="}, + nil, + []getproviders.Hash{"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84="}, + }, + }, + { + Event: "FetchPackageSuccess", + Provider: beepProvider, + Args: struct { + Version string + LocalDir string + AuthResult string + }{ + "1.0.0", + filepath.Join(dir.BasePath(), "example.com/foo/beep/1.0.0/bleep_bloop"), + "unauthenticated", + }, + }, + }, + } + }, + }, + "failed install of a non-existing built-in provider": { + Source: getproviders.NewMockSource( + []getproviders.PackageMeta{}, + nil, + ), + Prepare: func(t *testing.T, inst *Installer, dir *Dir) { + // NOTE: We're intentionally not calling + // inst.SetBuiltInProviderTypes to make the "terraform" + // built-in provider available here, so requests for it + // should fail. + }, + Mode: InstallNewProvidersOnly, + Reqs: getproviders.Requirements{ + terraformProvider: nil, + }, + WantErr: `some providers could not be installed: +- terraform.io/builtin/terraform: this OpenTofu release has no built-in provider named "terraform"`, + WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem { + return map[addrs.Provider][]*testInstallerEventLogItem{ + noProvider: { + { + Event: "PendingProviders", + Args: map[addrs.Provider]getproviders.VersionConstraints{ + terraformProvider: constraints.IntersectionSpec(nil), + }, + }, + }, + terraformProvider: { + { + Event: "BuiltInProviderFailure", + Provider: terraformProvider, + Args: `this OpenTofu release has no built-in provider named "terraform"`, + }, + }, + } + }, + }, + "failed install when a built-in provider has a version constraint": { + Source: getproviders.NewMockSource( + []getproviders.PackageMeta{}, + nil, + ), + Prepare: func(t *testing.T, inst *Installer, dir *Dir) { + inst.SetBuiltInProviderTypes([]string{"terraform"}) + }, + Mode: InstallNewProvidersOnly, + Reqs: getproviders.Requirements{ + terraformProvider: getproviders.MustParseVersionConstraints(">= 1.0.0"), + }, + WantErr: `some providers could not be installed: +- terraform.io/builtin/terraform: built-in providers do not support explicit version constraints`, + WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem { + return map[addrs.Provider][]*testInstallerEventLogItem{ + noProvider: { + { + Event: "PendingProviders", + Args: map[addrs.Provider]getproviders.VersionConstraints{ + terraformProvider: getproviders.MustParseVersionConstraints(">= 1.0.0"), + }, + }, + }, + terraformProvider: { + { + Event: "BuiltInProviderFailure", + Provider: terraformProvider, + Args: `built-in providers do not support explicit version constraints`, + }, + }, + } + }, + }, + "locked version is excluded by new version constraint": { + Source: getproviders.NewMockSource( + []getproviders.PackageMeta{ + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("1.0.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.0.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + }, + nil, + ), + LockFile: ` + provider "example.com/foo/beep" { + version = "1.0.0" + constraints = ">= 1.0.0" + hashes = [ + "h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84=", + ] + } + `, + Mode: InstallNewProvidersOnly, + Reqs: getproviders.Requirements{ + beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"), + }, + Check: func(t *testing.T, dir *Dir, locks *depsfile.Locks) { + if allCached := dir.AllAvailablePackages(); len(allCached) != 0 { + t.Errorf("wrong number of cache directory entries; want none\n%s", spew.Sdump(allCached)) + } + if allLocked := locks.AllProviders(); len(allLocked) != 1 { + t.Errorf("wrong number of provider lock entries; want only one\n%s", spew.Sdump(allLocked)) + } + + gotLock := locks.Provider(beepProvider) + wantLock := depsfile.NewProviderLock( + beepProvider, + getproviders.MustParseVersion("1.0.0"), + getproviders.MustParseVersionConstraints(">= 1.0.0"), + []getproviders.Hash{"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84="}, + ) + if diff := cmp.Diff(wantLock, gotLock, depsfile.ProviderLockComparer); diff != "" { + t.Errorf("wrong lock entry\n%s", diff) + } + }, + WantErr: `some providers could not be installed: +- example.com/foo/beep: locked provider example.com/foo/beep 1.0.0 does not match configured version constraint >= 2.0.0; must use tofu init -upgrade to allow selection of new versions`, + WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem { + return map[addrs.Provider][]*testInstallerEventLogItem{ + noProvider: { + { + Event: "PendingProviders", + Args: map[addrs.Provider]getproviders.VersionConstraints{ + beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"), + }, + }, + }, + beepProvider: { + { + Event: "QueryPackagesBegin", + Provider: beepProvider, + Args: struct { + Constraints string + Locked bool + }{">= 2.0.0", true}, + }, + { + Event: "QueryPackagesFailure", + Provider: beepProvider, + Args: `locked provider example.com/foo/beep 1.0.0 does not match configured version constraint >= 2.0.0; must use tofu init -upgrade to allow selection of new versions`, + }, + }, + } + }, + }, + "locked version is no longer available": { + Source: getproviders.NewMockSource( + []getproviders.PackageMeta{ + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("1.0.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.0.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + }, + nil, + ), + LockFile: ` + provider "example.com/foo/beep" { + version = "1.2.0" + constraints = ">= 1.0.0" + hashes = [ + "h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84=", + ] + } + `, + Mode: InstallNewProvidersOnly, + Reqs: getproviders.Requirements{ + beepProvider: getproviders.MustParseVersionConstraints(">= 1.0.0"), + }, + Check: func(t *testing.T, dir *Dir, locks *depsfile.Locks) { + if allCached := dir.AllAvailablePackages(); len(allCached) != 0 { + t.Errorf("wrong number of cache directory entries; want none\n%s", spew.Sdump(allCached)) + } + if allLocked := locks.AllProviders(); len(allLocked) != 1 { + t.Errorf("wrong number of provider lock entries; want only one\n%s", spew.Sdump(allLocked)) + } + + gotLock := locks.Provider(beepProvider) + wantLock := depsfile.NewProviderLock( + beepProvider, + getproviders.MustParseVersion("1.2.0"), + getproviders.MustParseVersionConstraints(">= 1.0.0"), + []getproviders.Hash{"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84="}, + ) + if diff := cmp.Diff(wantLock, gotLock, depsfile.ProviderLockComparer); diff != "" { + t.Errorf("wrong lock entry\n%s", diff) + } + }, + WantErr: `some providers could not be installed: +- example.com/foo/beep: the previously-selected version 1.2.0 is no longer available`, + WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem { + return map[addrs.Provider][]*testInstallerEventLogItem{ + noProvider: { + { + Event: "PendingProviders", + Args: map[addrs.Provider]getproviders.VersionConstraints{ + beepProvider: getproviders.MustParseVersionConstraints(">= 1.0.0"), + }, + }, + }, + beepProvider: { + { + Event: "QueryPackagesBegin", + Provider: beepProvider, + Args: struct { + Constraints string + Locked bool + }{">= 1.0.0", true}, + }, + { + Event: "QueryPackagesFailure", + Provider: beepProvider, + Args: `the previously-selected version 1.2.0 is no longer available`, + }, + }, + } + }, + }, + "no versions match the version constraint": { + Source: getproviders.NewMockSource( + []getproviders.PackageMeta{ + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("1.0.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + }, + nil, + ), + Mode: InstallNewProvidersOnly, + Reqs: getproviders.Requirements{ + beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"), + }, + WantErr: `some providers could not be installed: +- example.com/foo/beep: no available releases match the given constraints >= 2.0.0`, + WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem { + return map[addrs.Provider][]*testInstallerEventLogItem{ + noProvider: { + { + Event: "PendingProviders", + Args: map[addrs.Provider]getproviders.VersionConstraints{ + beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"), + }, + }, + }, + beepProvider: { + { + Event: "QueryPackagesBegin", + Provider: beepProvider, + Args: struct { + Constraints string + Locked bool + }{">= 2.0.0", false}, + }, + { + Event: "QueryPackagesFailure", + Provider: beepProvider, + Args: `no available releases match the given constraints >= 2.0.0`, + }, + }, + } + }, + }, + "version exists but doesn't support the current platform": { + Source: getproviders.NewMockSource( + []getproviders.PackageMeta{ + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("1.0.0"), + TargetPlatform: wrongPlatform, + Location: beepProviderDir, + }, + }, + nil, + ), + Mode: InstallNewProvidersOnly, + Reqs: getproviders.Requirements{ + beepProvider: getproviders.MustParseVersionConstraints(">= 1.0.0"), + }, + WantErr: `some providers could not be installed: +- example.com/foo/beep: provider example.com/foo/beep 1.0.0 is not available for bleep_bloop`, + WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem { + return map[addrs.Provider][]*testInstallerEventLogItem{ + noProvider: { + { + Event: "PendingProviders", + Args: map[addrs.Provider]getproviders.VersionConstraints{ + beepProvider: getproviders.MustParseVersionConstraints(">= 1.0.0"), + }, + }, + }, + beepProvider: { + { + Event: "QueryPackagesBegin", + Provider: beepProvider, + Args: struct { + Constraints string + Locked bool + }{">= 1.0.0", false}, + }, + { + Event: "QueryPackagesSuccess", + Provider: beepProvider, + Args: "1.0.0", + }, + { + Event: "FetchPackageMeta", + Provider: beepProvider, + Args: "1.0.0", + }, + { + Event: "FetchPackageFailure", + Provider: beepProvider, + Args: struct { + Version string + Error string + }{ + "1.0.0", + "provider example.com/foo/beep 1.0.0 is not available for bleep_bloop", + }, + }, + }, + } + }, + }, + "available package doesn't match locked hash": { + Source: getproviders.NewMockSource( + []getproviders.PackageMeta{ + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("1.0.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + }, + nil, + ), + LockFile: ` + provider "example.com/foo/beep" { + version = "1.0.0" + constraints = ">= 1.0.0" + hashes = [ + "h1:does-not-match", + ] + } + `, + Mode: InstallNewProvidersOnly, + Reqs: getproviders.Requirements{ + beepProvider: getproviders.MustParseVersionConstraints(">= 1.0.0"), + }, + WantErr: `some providers could not be installed: +- example.com/foo/beep: the local package for example.com/foo/beep 1.0.0 doesn't match any of the checksums previously recorded in the dependency lock file (this might be because the available checksums are for packages targeting different platforms); for more information: https://opentofu.org/docs/language/files/dependency-lock/#checksum-verification`, + WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem { + return map[addrs.Provider][]*testInstallerEventLogItem{ + noProvider: { + { + Event: "PendingProviders", + Args: map[addrs.Provider]getproviders.VersionConstraints{ + beepProvider: getproviders.MustParseVersionConstraints(">= 1.0.0"), + }, + }, + }, + beepProvider: { + { + Event: "QueryPackagesBegin", + Provider: beepProvider, + Args: struct { + Constraints string + Locked bool + }{">= 1.0.0", true}, + }, + { + Event: "QueryPackagesSuccess", + Provider: beepProvider, + Args: "1.0.0", + }, + { + Event: "FetchPackageMeta", + Provider: beepProvider, + Args: "1.0.0", + }, + { + Event: "FetchPackageBegin", + Provider: beepProvider, + Args: struct { + Version string + Location getproviders.PackageLocation + }{"1.0.0", beepProviderDir}, + }, + { + Event: "FetchPackageFailure", + Provider: beepProvider, + Args: struct { + Version string + Error string + }{ + "1.0.0", + `the local package for example.com/foo/beep 1.0.0 doesn't match any of the checksums previously recorded in the dependency lock file (this might be because the available checksums are for packages targeting different platforms); for more information: https://opentofu.org/docs/language/files/dependency-lock/#checksum-verification`, + }, + }, + }, + } + }, + }, + "force mode ignores hashes": { + Source: getproviders.NewMockSource( + []getproviders.PackageMeta{ + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("1.0.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + }, + nil, + ), + LockFile: ` + provider "example.com/foo/beep" { + version = "1.0.0" + constraints = ">= 1.0.0" + hashes = [ + "h1:does-not-match", + ] + } + `, + Mode: InstallNewProvidersForce, + Reqs: getproviders.Requirements{ + beepProvider: getproviders.MustParseVersionConstraints(">= 1.0.0"), + }, + Check: func(t *testing.T, dir *Dir, locks *depsfile.Locks) { + if allCached := dir.AllAvailablePackages(); len(allCached) != 1 { + t.Errorf("wrong number of cache directory entries; want only one\n%s", spew.Sdump(allCached)) + } + if allLocked := locks.AllProviders(); len(allLocked) != 1 { + t.Errorf("wrong number of provider lock entries; want only one\n%s", spew.Sdump(allLocked)) + } + + gotLock := locks.Provider(beepProvider) + wantLock := depsfile.NewProviderLock( + beepProvider, + getproviders.MustParseVersion("1.0.0"), + getproviders.MustParseVersionConstraints(">= 1.0.0"), + []getproviders.Hash{beepProviderHash, "h1:does-not-match"}, + ) + if diff := cmp.Diff(wantLock, gotLock, depsfile.ProviderLockComparer); diff != "" { + t.Errorf("wrong lock entry\n%s", diff) + } + + gotEntry := dir.ProviderLatestVersion(beepProvider) + wantEntry := &CachedProvider{ + Provider: beepProvider, + Version: getproviders.MustParseVersion("1.0.0"), + PackageDir: filepath.Join(dir.BasePath(), "example.com/foo/beep/1.0.0/bleep_bloop"), + } + if diff := cmp.Diff(wantEntry, gotEntry); diff != "" { + t.Errorf("wrong cache entry\n%s", diff) + } + }, + WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem { + return map[addrs.Provider][]*testInstallerEventLogItem{ + noProvider: { + { + Event: "PendingProviders", + Args: map[addrs.Provider]getproviders.VersionConstraints{ + beepProvider: getproviders.MustParseVersionConstraints(">= 1.0.0"), + }, + }, + { + Event: "ProvidersFetched", + Args: map[addrs.Provider]*getproviders.PackageAuthenticationResult{ + beepProvider: nil, + }, + }, + }, + beepProvider: { + { + Event: "QueryPackagesBegin", + Provider: beepProvider, + Args: struct { + Constraints string + Locked bool + }{">= 1.0.0", true}, + }, + { + Event: "QueryPackagesSuccess", + Provider: beepProvider, + Args: "1.0.0", + }, + { + Event: "FetchPackageMeta", + Provider: beepProvider, + Args: "1.0.0", + }, + { + Event: "FetchPackageBegin", + Provider: beepProvider, + Args: struct { + Version string + Location getproviders.PackageLocation + }{"1.0.0", beepProviderDir}, + }, + { + Event: "ProvidersLockUpdated", + Provider: beepProvider, + Args: struct { + Version string + Local []getproviders.Hash + Signed []getproviders.Hash + Prior []getproviders.Hash + }{ + "1.0.0", + []getproviders.Hash{"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84="}, + nil, + []getproviders.Hash{"h1:does-not-match"}, + }, + }, + { + Event: "FetchPackageSuccess", + Provider: beepProvider, + Args: struct { + Version string + LocalDir string + AuthResult string + }{ + "1.0.0", + filepath.Join(dir.BasePath(), "example.com/foo/beep/1.0.0/bleep_bloop"), + "unauthenticated", + }, + }, + }, + } + }, + }, + } + + ctx := context.Background() + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + if test.Check == nil && test.WantEvents == nil && test.WantErr == "" { + t.Fatalf("invalid test: must set at least one of Check, WantEvents, or WantErr") + } + + outputDir := NewDirWithPlatform(tmpDir(t), fakePlatform) + source := test.Source + if source == nil { + source = getproviders.NewMockSource(nil, nil) + } + inst := NewInstaller(outputDir, source) + if test.Prepare != nil { + test.Prepare(t, inst, outputDir) + } /* boop */ + + locks, lockDiags := depsfile.LoadLocksFromBytes([]byte(test.LockFile), "test.lock.hcl") + if lockDiags.HasErrors() { + t.Fatalf("invalid lock file: %s", lockDiags.Err().Error()) + } + + providerEvents := make(map[addrs.Provider][]*testInstallerEventLogItem) + eventsCh := make(chan *testInstallerEventLogItem) + var newLocks *depsfile.Locks + var instErr error + go func(ch chan *testInstallerEventLogItem) { + events := installerLogEventsForTests(ch) + ctx := events.OnContext(ctx) + newLocks, instErr = inst.EnsureProviderVersions(ctx, locks, test.Reqs, test.Mode) + close(eventsCh) // exits the event loop below + }(eventsCh) + for evt := range eventsCh { + // We do the event collection in the main goroutine, rather than + // running the installer itself in the main goroutine, so that + // we can safely t.Log in here without violating the testing.T + // usage rules. + if evt.Provider == (addrs.Provider{}) { + t.Logf("%s(%s)", evt.Event, spew.Sdump(evt.Args)) + } else { + t.Logf("%s: %s(%s)", evt.Provider, evt.Event, spew.Sdump(evt.Args)) + } + providerEvents[evt.Provider] = append(providerEvents[evt.Provider], evt) + } + + if test.WantErr != "" { + if instErr == nil { + t.Errorf("succeeded; want error\nwant: %s", test.WantErr) + } else if got, want := instErr.Error(), test.WantErr; !strings.Contains(got, want) { + t.Errorf("wrong error\ngot: %s\nwant substring: %s", got, want) + } + } else if instErr != nil { + t.Errorf("unexpected error\ngot: %s", instErr.Error()) + } + + if test.Check != nil { + test.Check(t, outputDir, newLocks) + } + + if test.WantEvents != nil { + wantEvents := test.WantEvents(inst, outputDir) + if diff := cmp.Diff(wantEvents, providerEvents); diff != "" { + t.Errorf("wrong installer events\n%s", diff) + } + } + }) + } +} + +func TestEnsureProviderVersions_local_source(t *testing.T) { + // create filesystem source using the test provider cache dir + source := getproviders.NewFilesystemMirrorSource("testdata/cachedir") + + // create a temporary workdir + tmpDirPath := t.TempDir() + + // set up the installer using the temporary directory and filesystem source + platform := getproviders.Platform{OS: "linux", Arch: "amd64"} + dir := NewDirWithPlatform(tmpDirPath, platform) + installer := NewInstaller(dir, source) + + tests := map[string]struct { + provider string + version string + wantHash getproviders.Hash // getproviders.NilHash if not expected to be installed + err string + }{ + "install-unpacked": { + provider: "null", + version: "2.0.0", + wantHash: getproviders.HashScheme1.New("qjsREM4DqEWECD43FcPqddZ9oxCG+IaMTxvWPciS05g="), + }, + "invalid-zip-file": { + provider: "null", + version: "2.1.0", + wantHash: getproviders.NilHash, + err: "zip: not a valid zip file", + }, + "version-constraint-unmet": { + provider: "null", + version: "2.2.0", + wantHash: getproviders.NilHash, + err: "no available releases match the given constraints 2.2.0", + }, + "missing-executable": { + provider: "missing/executable", + version: "2.0.0", + wantHash: getproviders.NilHash, // installation fails for a provider with no executable + err: "provider binary not found: could not find executable file starting with terraform-provider-executable", + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + ctx := context.TODO() + + provider := addrs.MustParseProviderSourceString(test.provider) + versionConstraint := getproviders.MustParseVersionConstraints(test.version) + version := getproviders.MustParseVersion(test.version) + reqs := getproviders.Requirements{ + provider: versionConstraint, + } + + newLocks, err := installer.EnsureProviderVersions(ctx, depsfile.NewLocks(), reqs, InstallNewProvidersOnly) + gotProviderlocks := newLocks.AllProviders() + wantProviderLocks := map[addrs.Provider]*depsfile.ProviderLock{ + provider: depsfile.NewProviderLock( + provider, + version, + getproviders.MustParseVersionConstraints("= 2.0.0"), + []getproviders.Hash{ + test.wantHash, + }, + ), + } + if test.wantHash == getproviders.NilHash { + wantProviderLocks = map[addrs.Provider]*depsfile.ProviderLock{} + } + + if diff := cmp.Diff(wantProviderLocks, gotProviderlocks, depsfile.ProviderLockComparer); diff != "" { + t.Errorf("wrong selected\n%s", diff) + } + + if test.err == "" && err == nil { + return + } + + switch err := err.(type) { + case InstallerError: + providerError, ok := err.ProviderErrors[provider] + if !ok { + t.Fatalf("did not get error for provider %s", provider) + } + + if got := providerError.Error(); got != test.err { + t.Fatalf("wrong result\ngot: %s\nwant: %s\n", got, test.err) + } + default: + t.Fatalf("wrong error type. Expected InstallerError, got %T", err) + } + }) + } +} + +// This test only verifies protocol errors and does not try for successful +// installation (at the time of writing, the test files aren't signed so the +// signature verification fails); that's left to the e2e tests. +func TestEnsureProviderVersions_protocol_errors(t *testing.T) { + source, _, close := testRegistrySource(t) + defer close() + + // create a temporary workdir + tmpDirPath := t.TempDir() + + version0 := getproviders.MustParseVersionConstraints("0.1.0") // supports protocol version 1.0 + version1 := getproviders.MustParseVersion("1.2.0") // this is the expected result in tests with a match + version2 := getproviders.MustParseVersionConstraints("2.0") // supports protocol version 99 + + // set up the installer using the temporary directory and mock source + platform := getproviders.Platform{OS: "gameboy", Arch: "lr35902"} + dir := NewDirWithPlatform(tmpDirPath, platform) + installer := NewInstaller(dir, source) + + tests := map[string]struct { + provider addrs.Provider + inputVersion getproviders.VersionConstraints + wantVersion getproviders.Version + }{ + "too old": { + addrs.MustParseProviderSourceString("example.com/awesomesauce/happycloud"), + version0, + version1, + }, + "too new": { + addrs.MustParseProviderSourceString("example.com/awesomesauce/happycloud"), + version2, + version1, + }, + "unsupported": { + addrs.MustParseProviderSourceString("example.com/weaksauce/unsupported-protocol"), + version0, + getproviders.UnspecifiedVersion, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + reqs := getproviders.Requirements{ + test.provider: test.inputVersion, + } + ctx := context.TODO() + _, err := installer.EnsureProviderVersions(ctx, depsfile.NewLocks(), reqs, InstallNewProvidersOnly) + + switch err := err.(type) { + case nil: + t.Fatalf("expected error, got success") + case InstallerError: + providerError, ok := err.ProviderErrors[test.provider] + if !ok { + t.Fatalf("did not get error for provider %s", test.provider) + } + + switch providerError := providerError.(type) { + case getproviders.ErrProtocolNotSupported: + if !providerError.Suggestion.Same(test.wantVersion) { + t.Fatalf("wrong result\ngot: %s\nwant: %s\n", providerError.Suggestion, test.wantVersion) + } + default: + t.Fatalf("wrong error type. Expected ErrProtocolNotSupported, got %T", err) + } + default: + t.Fatalf("wrong error type. Expected InstallerError, got %T", err) + } + }) + } +} + +// testServices starts up a local HTTP server running a fake provider registry +// service and returns a service discovery object pre-configured to consider +// the host "example.com" to be served by the fake registry service. +// +// The returned discovery object also knows the hostname "not.example.com" +// which does not have a provider registry at all and "too-new.example.com" +// which has a "providers.v99" service that is inoperable but could be useful +// to test the error reporting for detecting an unsupported protocol version. +// It also knows fails.example.com but it refers to an endpoint that doesn't +// correctly speak HTTP, to simulate a protocol error. +// +// The second return value is a function to call at the end of a test function +// to shut down the test server. After you call that function, the discovery +// object becomes useless. +func testServices(t *testing.T) (services *disco.Disco, baseURL string, cleanup func()) { + server := httptest.NewServer(http.HandlerFunc(fakeRegistryHandler)) + + services = disco.New() + services.ForceHostServices(svchost.Hostname("example.com"), map[string]interface{}{ + "providers.v1": server.URL + "/providers/v1/", + }) + services.ForceHostServices(svchost.Hostname("not.example.com"), map[string]interface{}{}) + services.ForceHostServices(svchost.Hostname("too-new.example.com"), map[string]interface{}{ + // This service doesn't actually work; it's here only to be + // detected as "too new" by the discovery logic. + "providers.v99": server.URL + "/providers/v99/", + }) + services.ForceHostServices(svchost.Hostname("fails.example.com"), map[string]interface{}{ + "providers.v1": server.URL + "/fails-immediately/", + }) + + // We'll also permit registry.opentofu.org here just because it's our + // default and has some unique features that are not allowed on any other + // hostname. It behaves the same as example.com, which should be preferred + // if you're not testing something specific to the default registry in order + // to ensure that most things are hostname-agnostic. + services.ForceHostServices(svchost.Hostname("registry.opentofu.org"), map[string]interface{}{ + "providers.v1": server.URL + "/providers/v1/", + }) + + return services, server.URL, func() { + server.Close() + } +} + +// testRegistrySource is a wrapper around testServices that uses the created +// discovery object to produce a Source instance that is ready to use with the +// fake registry services. +// +// As with testServices, the second return value is a function to call at the end +// of your test in order to shut down the test server. +func testRegistrySource(t *testing.T) (source *getproviders.RegistrySource, baseURL string, cleanup func()) { + services, baseURL, close := testServices(t) + source = getproviders.NewRegistrySource(services) + return source, baseURL, close +} + +func fakeRegistryHandler(resp http.ResponseWriter, req *http.Request) { + path := req.URL.EscapedPath() + if strings.HasPrefix(path, "/fails-immediately/") { + // Here we take over the socket and just close it immediately, to + // simulate one possible way a server might not be an HTTP server. + hijacker, ok := resp.(http.Hijacker) + if !ok { + // Not hijackable, so we'll just fail normally. + // If this happens, tests relying on this will fail. + resp.WriteHeader(500) + resp.Write([]byte(`cannot hijack`)) + return + } + conn, _, err := hijacker.Hijack() + if err != nil { + resp.WriteHeader(500) + resp.Write([]byte(`hijack failed`)) + return + } + conn.Close() + return + } + + if strings.HasPrefix(path, "/pkg/") { + switch path { + case "/pkg/awesomesauce/happycloud_1.2.0.zip": + resp.Write([]byte("some zip file")) + case "/pkg/awesomesauce/happycloud_1.2.0_SHA256SUMS": + resp.Write([]byte("000000000000000000000000000000000000000000000000000000000000f00d happycloud_1.2.0.zip\n")) + case "/pkg/awesomesauce/happycloud_1.2.0_SHA256SUMS.sig": + resp.Write([]byte("GPG signature")) + default: + resp.WriteHeader(404) + resp.Write([]byte("unknown package file download")) + } + return + } + + if !strings.HasPrefix(path, "/providers/v1/") { + resp.WriteHeader(404) + resp.Write([]byte(`not a provider registry endpoint`)) + return + } + + pathParts := strings.Split(path, "/")[3:] + if len(pathParts) < 2 { + resp.WriteHeader(404) + resp.Write([]byte(`unexpected number of path parts`)) + return + } + log.Printf("[TRACE] fake provider registry request for %#v", pathParts) + if len(pathParts) == 2 { + switch pathParts[0] + "/" + pathParts[1] { + + case "-/legacy": + // NOTE: This legacy lookup endpoint is specific to + // registry.opentofu.org and not expected to work on any other + // registry host. + resp.Header().Set("Content-Type", "application/json") + resp.WriteHeader(200) + resp.Write([]byte(`{"namespace":"legacycorp"}`)) + + default: + resp.WriteHeader(404) + resp.Write([]byte(`unknown namespace or provider type for direct lookup`)) + } + } + + if len(pathParts) < 3 { + resp.WriteHeader(404) + resp.Write([]byte(`unexpected number of path parts`)) + return + } + + if pathParts[2] == "versions" { + if len(pathParts) != 3 { + resp.WriteHeader(404) + resp.Write([]byte(`extraneous path parts`)) + return + } + + switch pathParts[0] + "/" + pathParts[1] { + case "awesomesauce/happycloud": + resp.Header().Set("Content-Type", "application/json") + resp.WriteHeader(200) + // Note that these version numbers are intentionally misordered + // so we can test that the client-side code places them in the + // correct order (lowest precedence first). + resp.Write([]byte(`{"versions":[{"version":"0.1.0","protocols":["1.0"]},{"version":"2.0.0","protocols":["99.0"]},{"version":"1.2.0","protocols":["5.0"]}, {"version":"1.0.0","protocols":["5.0"]}]}`)) + case "weaksauce/unsupported-protocol": + resp.Header().Set("Content-Type", "application/json") + resp.WriteHeader(200) + resp.Write([]byte(`{"versions":[{"version":"0.1.0","protocols":["0.1"]}]}`)) + case "weaksauce/no-versions": + resp.Header().Set("Content-Type", "application/json") + resp.WriteHeader(200) + resp.Write([]byte(`{"versions":[]}`)) + default: + resp.WriteHeader(404) + resp.Write([]byte(`unknown namespace or provider type`)) + } + return + } + + if len(pathParts) == 6 && pathParts[3] == "download" { + switch pathParts[0] + "/" + pathParts[1] { + case "awesomesauce/happycloud": + if pathParts[4] == "nonexist" { + resp.WriteHeader(404) + resp.Write([]byte(`unsupported OS`)) + return + } + version := pathParts[2] + body := map[string]interface{}{ + "protocols": []string{"99.0"}, + "os": pathParts[4], + "arch": pathParts[5], + "filename": "happycloud_" + version + ".zip", + "shasum": "000000000000000000000000000000000000000000000000000000000000f00d", + "download_url": "/pkg/awesomesauce/happycloud_" + version + ".zip", + "shasums_url": "/pkg/awesomesauce/happycloud_" + version + "_SHA256SUMS", + "shasums_signature_url": "/pkg/awesomesauce/happycloud_" + version + "_SHA256SUMS.sig", + "signing_keys": map[string]interface{}{ + "gpg_public_keys": []map[string]interface{}{ + { + "ascii_armor": getproviders.TestingPublicKey, + }, + }, + }, + } + enc, err := json.Marshal(body) + if err != nil { + resp.WriteHeader(500) + resp.Write([]byte("failed to encode body")) + } + resp.Header().Set("Content-Type", "application/json") + resp.WriteHeader(200) + resp.Write(enc) + case "weaksauce/unsupported-protocol": + var protocols []string + version := pathParts[2] + switch version { + case "0.1.0": + protocols = []string{"1.0"} + case "2.0.0": + protocols = []string{"99.0"} + default: + protocols = []string{"5.0"} + } + + body := map[string]interface{}{ + "protocols": protocols, + "os": pathParts[4], + "arch": pathParts[5], + "filename": "happycloud_" + version + ".zip", + "shasum": "000000000000000000000000000000000000000000000000000000000000f00d", + "download_url": "/pkg/awesomesauce/happycloud_" + version + ".zip", + "shasums_url": "/pkg/awesomesauce/happycloud_" + version + "_SHA256SUMS", + "shasums_signature_url": "/pkg/awesomesauce/happycloud_" + version + "_SHA256SUMS.sig", + "signing_keys": map[string]interface{}{ + "gpg_public_keys": []map[string]interface{}{ + { + "ascii_armor": getproviders.TestingPublicKey, + }, + }, + }, + } + enc, err := json.Marshal(body) + if err != nil { + resp.WriteHeader(500) + resp.Write([]byte("failed to encode body")) + } + resp.Header().Set("Content-Type", "application/json") + resp.WriteHeader(200) + resp.Write(enc) + default: + resp.WriteHeader(404) + resp.Write([]byte(`unknown namespace/provider/version/architecture`)) + } + return + } + + resp.WriteHeader(404) + resp.Write([]byte(`unrecognized path scheme`)) +} + +// In order to be able to compare the recorded temp dir paths, we need to +// normalize the path to match what the installer would report. +func tmpDir(t *testing.T) string { + unlinked, err := filepath.EvalSymlinks(t.TempDir()) + if err != nil { + t.Fatal(err) + } + return filepath.Clean(unlinked) +} diff --git a/pkg/providercache/package_install.go b/pkg/providercache/package_install.go new file mode 100644 index 00000000000..939e8cb0631 --- /dev/null +++ b/pkg/providercache/package_install.go @@ -0,0 +1,307 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package providercache + +import ( + "context" + "fmt" + "log" + "net/http" + "os" + "path/filepath" + "strconv" + + getter "github.com/hashicorp/go-getter" + "github.com/hashicorp/go-retryablehttp" + + "github.com/kubegems/opentofu/pkg/copy" + "github.com/kubegems/opentofu/pkg/getproviders" + "github.com/kubegems/opentofu/pkg/httpclient" + "github.com/kubegems/opentofu/pkg/logging" +) + +// We borrow the "unpack a zip file into a target directory" logic from +// go-getter, even though we're not otherwise using go-getter here. +// (We don't need the same flexibility as we have for modules, because +// providers _always_ come from provider registries, which have a very +// specific protocol and set of expectations.) +var unzip = getter.ZipDecompressor{} + +const ( + // httpClientRetryCountEnvName is the environment variable name used to customize + // the HTTP retry count for module downloads. + httpClientRetryCountEnvName = "TF_PROVIDER_DOWNLOAD_RETRY" + + defaultRetry = 2 +) + +func init() { + configureProviderDownloadRetry() +} + +var ( + maxRetryCount int +) + +// will attempt for requests with retryable errors, like 502 status codes +func configureProviderDownloadRetry() { + maxRetryCount = defaultRetry + if v := os.Getenv(httpClientRetryCountEnvName); v != "" { + retry, err := strconv.Atoi(v) + if err == nil && retry > 0 { + maxRetryCount = retry + } + } +} + +func requestLogHook(logger retryablehttp.Logger, req *http.Request, i int) { + if i > 0 { + logger.Printf("[INFO] Previous request to the provider install failed, attempting retry.") + } +} + +func installFromHTTPURL(ctx context.Context, meta getproviders.PackageMeta, targetDir string, allowedHashes []getproviders.Hash) (*getproviders.PackageAuthenticationResult, error) { + url := meta.Location.String() + + // When we're installing from an HTTP URL we expect the URL to refer to + // a zip file. We'll fetch that into a temporary file here and then + // delegate to installFromLocalArchive below to actually extract it. + // (We're not using go-getter here because its HTTP getter has a bunch + // of extraneous functionality we don't need or want, like indirection + // through X-Terraform-Get header, attempting partial fetches for + // files that already exist, etc.) + + retryableClient := retryablehttp.NewClient() + retryableClient.HTTPClient = httpclient.New() + retryableClient.RetryMax = maxRetryCount + retryableClient.RequestLogHook = requestLogHook + retryableClient.Logger = log.New(logging.LogOutput(), "", log.Flags()) + + req, err := retryablehttp.NewRequestWithContext(ctx, "GET", url, nil) + if err != nil { + return nil, fmt.Errorf("invalid provider download request: %w", err) + } + resp, err := retryableClient.Do(req) + if err != nil { + if ctx.Err() == context.Canceled { + // "context canceled" is not a user-friendly error message, + // so we'll return a more appropriate one here. + return nil, fmt.Errorf("provider download was interrupted") + } + return nil, fmt.Errorf("%s: %w", getproviders.HostFromRequest(req.Request), err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("unsuccessful request to %s: %s", url, resp.Status) + } + + f, err := os.CreateTemp("", "terraform-provider") + if err != nil { + return nil, fmt.Errorf("failed to open temporary file to download from %s: %w", url, err) + } + defer f.Close() + defer os.Remove(f.Name()) + + // We'll borrow go-getter's "cancelable copy" implementation here so that + // the download can potentially be interrupted partway through. + n, err := getter.Copy(ctx, f, resp.Body) + if err == nil && n < resp.ContentLength { + err = fmt.Errorf("incorrect response size: expected %d bytes, but got %d bytes", resp.ContentLength, n) + } + if err != nil { + return nil, err + } + + archiveFilename := f.Name() + localLocation := getproviders.PackageLocalArchive(archiveFilename) + + var authResult *getproviders.PackageAuthenticationResult + if meta.Authentication != nil { + if authResult, err = meta.Authentication.AuthenticatePackage(localLocation); err != nil { + return authResult, err + } + } + + // We can now delegate to installFromLocalArchive for extraction. To do so, + // we construct a new package meta description using the local archive + // path as the location, and skipping authentication. installFromLocalMeta + // is responsible for verifying that the archive matches the allowedHashes, + // though. + localMeta := getproviders.PackageMeta{ + Provider: meta.Provider, + Version: meta.Version, + ProtocolVersions: meta.ProtocolVersions, + TargetPlatform: meta.TargetPlatform, + Filename: meta.Filename, + Location: localLocation, + Authentication: nil, + } + if _, err := installFromLocalArchive(ctx, localMeta, targetDir, allowedHashes); err != nil { + return nil, err + } + return authResult, nil +} + +func installFromLocalArchive(ctx context.Context, meta getproviders.PackageMeta, targetDir string, allowedHashes []getproviders.Hash) (*getproviders.PackageAuthenticationResult, error) { + var authResult *getproviders.PackageAuthenticationResult + if meta.Authentication != nil { + var err error + if authResult, err = meta.Authentication.AuthenticatePackage(meta.Location); err != nil { + return nil, err + } + } + + if len(allowedHashes) > 0 { + if matches, err := meta.MatchesAnyHash(allowedHashes); err != nil { + return authResult, fmt.Errorf( + "failed to calculate checksum for %s %s package at %s: %w", + meta.Provider, meta.Version, meta.Location, err, + ) + } else if !matches { + return authResult, fmt.Errorf( + "the current package for %s %s doesn't match any of the checksums previously recorded in the dependency lock file; for more information: https://opentofu.org/docs/language/files/dependency-lock/#checksum-verification", + meta.Provider, meta.Version, + ) + } + } + + filename := meta.Location.String() + + // NOTE: We're not checking whether there's already a directory at + // targetDir with some files in it. Packages are supposed to be immutable + // and therefore we'll just be overwriting all of the existing files with + // their same contents unless something unusual is happening. If something + // unusual _is_ happening then this will produce something that doesn't + // match the allowed hashes and so our caller should catch that after + // we return if so. + + err := unzip.Decompress(targetDir, filename, true, 0000) + if err != nil { + return authResult, err + } + + return authResult, nil +} + +// installFromLocalDir is the implementation of both installing a package from +// a local directory source _and_ of linking a package from another cache +// in LinkFromOtherCache, because they both do fundamentally the same +// operation: symlink if possible, or deep-copy otherwise. +func installFromLocalDir(ctx context.Context, meta getproviders.PackageMeta, targetDir string, allowedHashes []getproviders.Hash) (*getproviders.PackageAuthenticationResult, error) { + sourceDir := meta.Location.String() + + absNew, err := filepath.Abs(targetDir) + if err != nil { + return nil, fmt.Errorf("failed to make target path %s absolute: %w", targetDir, err) + } + absCurrent, err := filepath.Abs(sourceDir) + if err != nil { + return nil, fmt.Errorf("failed to make source path %s absolute: %w", sourceDir, err) + } + + // Before we do anything else, we'll do a quick check to make sure that + // these two paths are not pointing at the same physical directory on + // disk. This compares the files by their OS-level device and directory + // entry identifiers, not by their virtual filesystem paths. + if same, err := copy.SameFile(absNew, absCurrent); same { + return nil, fmt.Errorf("cannot install existing provider directory %s to itself", targetDir) + } else if err != nil { + return nil, fmt.Errorf("failed to determine if %s and %s are the same: %w", sourceDir, targetDir, err) + } + + var authResult *getproviders.PackageAuthenticationResult + if meta.Authentication != nil { + // (we have this here for completeness but note that local filesystem + // mirrors typically don't include enough information for package + // authentication and so we'll rarely get in here in practice.) + var err error + if authResult, err = meta.Authentication.AuthenticatePackage(meta.Location); err != nil { + return nil, err + } + } + + // If the caller provided at least one hash in allowedHashes then at + // least one of those hashes ought to match. However, for local directories + // in particular we can't actually verify the legacy "zh:" hash scheme + // because it requires access to the original .zip archive, and so as a + // measure of pragmatism we'll treat a set of hashes where all are "zh:" + // the same as no hashes at all, and let anything pass. This is definitely + // non-ideal but accepted for two reasons: + // - Packages we find on local disk can be considered a little more trusted + // than packages coming from over the network, because we assume that + // they were either placed intentionally by an operator or they were + // automatically installed by a previous network operation that would've + // itself verified the hashes. + // - Our installer makes a concerted effort to record at least one new-style + // hash for each lock entry, so we should very rarely end up in this + // situation anyway. + suitableHashCount := 0 + for _, hash := range allowedHashes { + if !hash.HasScheme(getproviders.HashSchemeZip) { + suitableHashCount++ + } + } + if suitableHashCount > 0 { + if matches, err := meta.MatchesAnyHash(allowedHashes); err != nil { + return authResult, fmt.Errorf( + "failed to calculate checksum for %s %s package at %s: %w", + meta.Provider, meta.Version, meta.Location, err, + ) + } else if !matches { + return authResult, fmt.Errorf( + "the local package for %s %s doesn't match any of the checksums previously recorded in the dependency lock file (this might be because the available checksums are for packages targeting different platforms); for more information: https://opentofu.org/docs/language/files/dependency-lock/#checksum-verification", + meta.Provider, meta.Version, + ) + } + } + + // Delete anything that's already present at this path first. + err = os.RemoveAll(targetDir) + if err != nil && !os.IsNotExist(err) { + return nil, fmt.Errorf("failed to remove existing %s before linking it to %s: %w", sourceDir, targetDir, err) + } + + // We'll prefer to create a symlink if possible, but we'll fall back to + // a recursive copy if symlink creation fails. It could fail for a number + // of reasons, including being on Windows 8 without administrator + // privileges or being on a legacy filesystem like FAT that has no way + // to represent a symlink. (Generalized symlink support for Windows was + // introduced in a Windows 10 minor update.) + // + // We use an absolute path for the symlink to reduce the risk of it being + // broken by moving things around later, since the source directory is + // likely to be a shared directory independent on any particular target + // and thus we can't assume that they will move around together. + linkTarget := absCurrent + + parentDir := filepath.Dir(absNew) + err = os.MkdirAll(parentDir, 0755) + if err != nil { + return nil, fmt.Errorf("failed to create parent directories leading to %s: %w", targetDir, err) + } + + err = os.Symlink(linkTarget, absNew) + if err == nil { + // Success, then! + return nil, nil + } + + // If we get down here then symlinking failed and we need a deep copy + // instead. To make a copy, we first need to create the target directory, + // which would otherwise be a symlink. + err = os.Mkdir(absNew, 0755) + if err != nil && os.IsExist(err) { + return nil, fmt.Errorf("failed to create directory %s: %w", absNew, err) + } + err = copy.CopyDir(absNew, absCurrent) + if err != nil { + return nil, fmt.Errorf("failed to either symlink or copy %s to %s: %w", absCurrent, absNew, err) + } + + // If we got here then apparently our copy succeeded, so we're done. + return nil, nil +} diff --git a/pkg/providercache/testdata/beep-provider-other-platform/terraform-provider-beep b/pkg/providercache/testdata/beep-provider-other-platform/terraform-provider-beep new file mode 100644 index 00000000000..18929cd34bf --- /dev/null +++ b/pkg/providercache/testdata/beep-provider-other-platform/terraform-provider-beep @@ -0,0 +1,7 @@ +This is not a real provider executable. It's just here to give the installer +something to copy in some of our installer test cases. + +This must be different than the file of the same name in the sibling directory +"beep-provider", because we're using this to stand in for a valid package +that was built for a different platform than the one whose checksum is recorded +in the lock file. diff --git a/pkg/providercache/testdata/beep-provider/terraform-provider-beep b/pkg/providercache/testdata/beep-provider/terraform-provider-beep new file mode 100644 index 00000000000..e0841fd8c19 --- /dev/null +++ b/pkg/providercache/testdata/beep-provider/terraform-provider-beep @@ -0,0 +1,2 @@ +This is not a real provider executable. It's just here to give the installer +something to copy in some of our installer test cases. diff --git a/pkg/providercache/testdata/cachedir/opentf.example.com/AwesomeCorp/happycloud/0.1.0-alpha.2/darwin_amd64/extra-data.txt b/pkg/providercache/testdata/cachedir/opentf.example.com/AwesomeCorp/happycloud/0.1.0-alpha.2/darwin_amd64/extra-data.txt new file mode 100644 index 00000000000..8a1c7c32741 --- /dev/null +++ b/pkg/providercache/testdata/cachedir/opentf.example.com/AwesomeCorp/happycloud/0.1.0-alpha.2/darwin_amd64/extra-data.txt @@ -0,0 +1,6 @@ +Provider plugin packages are allowed to include other files such as any static +data they need to operate, or possibly source files if the provider is written +in an interpreted programming language. + +This extra file is here just to make sure that extra files don't cause any +misbehavior during local discovery. diff --git a/pkg/providercache/testdata/cachedir/opentf.example.com/AwesomeCorp/happycloud/0.1.0-alpha.2/darwin_amd64/terraform-provider-happycloud b/pkg/providercache/testdata/cachedir/opentf.example.com/AwesomeCorp/happycloud/0.1.0-alpha.2/darwin_amd64/terraform-provider-happycloud new file mode 100644 index 00000000000..daa9e3509f6 --- /dev/null +++ b/pkg/providercache/testdata/cachedir/opentf.example.com/AwesomeCorp/happycloud/0.1.0-alpha.2/darwin_amd64/terraform-provider-happycloud @@ -0,0 +1 @@ +# This is just a placeholder file for discovery testing, not a real provider plugin. diff --git a/pkg/providercache/testdata/cachedir/registry.opentofu.org/-/legacy/1.0.0/linux_amd64/terraform-provider-legacy b/pkg/providercache/testdata/cachedir/registry.opentofu.org/-/legacy/1.0.0/linux_amd64/terraform-provider-legacy new file mode 100644 index 00000000000..daa9e3509f6 --- /dev/null +++ b/pkg/providercache/testdata/cachedir/registry.opentofu.org/-/legacy/1.0.0/linux_amd64/terraform-provider-legacy @@ -0,0 +1 @@ +# This is just a placeholder file for discovery testing, not a real provider plugin. diff --git a/pkg/providercache/testdata/cachedir/registry.opentofu.org/hashicorp/null/2.0.0/darwin_amd64/terraform-provider-null b/pkg/providercache/testdata/cachedir/registry.opentofu.org/hashicorp/null/2.0.0/darwin_amd64/terraform-provider-null new file mode 100644 index 00000000000..daa9e3509f6 --- /dev/null +++ b/pkg/providercache/testdata/cachedir/registry.opentofu.org/hashicorp/null/2.0.0/darwin_amd64/terraform-provider-null @@ -0,0 +1 @@ +# This is just a placeholder file for discovery testing, not a real provider plugin. diff --git a/pkg/providercache/testdata/cachedir/registry.opentofu.org/hashicorp/null/2.0.0/linux_amd64/terraform-provider-null b/pkg/providercache/testdata/cachedir/registry.opentofu.org/hashicorp/null/2.0.0/linux_amd64/terraform-provider-null new file mode 100644 index 00000000000..daa9e3509f6 --- /dev/null +++ b/pkg/providercache/testdata/cachedir/registry.opentofu.org/hashicorp/null/2.0.0/linux_amd64/terraform-provider-null @@ -0,0 +1 @@ +# This is just a placeholder file for discovery testing, not a real provider plugin. diff --git a/pkg/providercache/testdata/cachedir/registry.opentofu.org/hashicorp/null/invalid b/pkg/providercache/testdata/cachedir/registry.opentofu.org/hashicorp/null/invalid new file mode 100644 index 00000000000..289663a2ab3 --- /dev/null +++ b/pkg/providercache/testdata/cachedir/registry.opentofu.org/hashicorp/null/invalid @@ -0,0 +1 @@ +This should be ignored because it doesn't follow the provider package naming scheme. diff --git a/pkg/providercache/testdata/cachedir/registry.opentofu.org/hashicorp/null/terraform-provider-null_2.1.0_linux_amd64.zip b/pkg/providercache/testdata/cachedir/registry.opentofu.org/hashicorp/null/terraform-provider-null_2.1.0_linux_amd64.zip new file mode 100644 index 00000000000..68a5502719d --- /dev/null +++ b/pkg/providercache/testdata/cachedir/registry.opentofu.org/hashicorp/null/terraform-provider-null_2.1.0_linux_amd64.zip @@ -0,0 +1,5 @@ +This is just a placeholder file for discovery testing, not a real provider package. + +This file is what we'd find for mirrors using the "packed" mirror layout, +where the mirror maintainer can just download the packages from upstream and +have Terraform unpack them automatically when installing. diff --git a/pkg/providercache/testdata/cachedir/registry.opentofu.org/hashicorp/null/terraform-provider-null_invalid.zip b/pkg/providercache/testdata/cachedir/registry.opentofu.org/hashicorp/null/terraform-provider-null_invalid.zip new file mode 100644 index 00000000000..289663a2ab3 --- /dev/null +++ b/pkg/providercache/testdata/cachedir/registry.opentofu.org/hashicorp/null/terraform-provider-null_invalid.zip @@ -0,0 +1 @@ +This should be ignored because it doesn't follow the provider package naming scheme. diff --git a/pkg/providercache/testdata/cachedir/registry.opentofu.org/hashicorp/null/terraform-provider-null_invalid_invalid_invalid.zip b/pkg/providercache/testdata/cachedir/registry.opentofu.org/hashicorp/null/terraform-provider-null_invalid_invalid_invalid.zip new file mode 100644 index 00000000000..289663a2ab3 --- /dev/null +++ b/pkg/providercache/testdata/cachedir/registry.opentofu.org/hashicorp/null/terraform-provider-null_invalid_invalid_invalid.zip @@ -0,0 +1 @@ +This should be ignored because it doesn't follow the provider package naming scheme. diff --git a/pkg/providercache/testdata/cachedir/registry.opentofu.org/hashicorp/random-beta/1.2.0/linux_amd64/terraform-provider-random-beta b/pkg/providercache/testdata/cachedir/registry.opentofu.org/hashicorp/random-beta/1.2.0/linux_amd64/terraform-provider-random-beta new file mode 100644 index 00000000000..daa9e3509f6 --- /dev/null +++ b/pkg/providercache/testdata/cachedir/registry.opentofu.org/hashicorp/random-beta/1.2.0/linux_amd64/terraform-provider-random-beta @@ -0,0 +1 @@ +# This is just a placeholder file for discovery testing, not a real provider plugin. diff --git a/pkg/providercache/testdata/cachedir/registry.opentofu.org/hashicorp/random/1.2.0/linux_amd64/terraform-provider-random b/pkg/providercache/testdata/cachedir/registry.opentofu.org/hashicorp/random/1.2.0/linux_amd64/terraform-provider-random new file mode 100644 index 00000000000..daa9e3509f6 --- /dev/null +++ b/pkg/providercache/testdata/cachedir/registry.opentofu.org/hashicorp/random/1.2.0/linux_amd64/terraform-provider-random @@ -0,0 +1 @@ +# This is just a placeholder file for discovery testing, not a real provider plugin. diff --git a/pkg/providercache/testdata/cachedir/registry.opentofu.org/missing/executable/2.0.0/linux_amd64/executable b/pkg/providercache/testdata/cachedir/registry.opentofu.org/missing/executable/2.0.0/linux_amd64/executable new file mode 100755 index 00000000000..f7a5e529a32 --- /dev/null +++ b/pkg/providercache/testdata/cachedir/registry.opentofu.org/missing/executable/2.0.0/linux_amd64/executable @@ -0,0 +1 @@ +This file represents a misnamed provider executable. diff --git a/pkg/providercache/testdata/provider-null_2.1.0_linux_amd64.zip b/pkg/providercache/testdata/provider-null_2.1.0_linux_amd64.zip new file mode 100644 index 00000000000..4b243f2b6d0 Binary files /dev/null and b/pkg/providercache/testdata/provider-null_2.1.0_linux_amd64.zip differ diff --git a/pkg/providers/addressed_types.go b/pkg/providers/addressed_types.go new file mode 100644 index 00000000000..2365ace9e82 --- /dev/null +++ b/pkg/providers/addressed_types.go @@ -0,0 +1,38 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package providers + +import ( + "sort" + + "github.com/kubegems/opentofu/pkg/addrs" +) + +// AddressedTypesAbs is a helper that extracts all of the distinct provider +// types from the given list of absolute provider configuration addresses. +func AddressedTypesAbs(providerAddrs []addrs.AbsProviderConfig) []addrs.Provider { + if len(providerAddrs) == 0 { + return nil + } + m := map[string]addrs.Provider{} + for _, addr := range providerAddrs { + m[addr.Provider.String()] = addr.Provider + } + + names := make([]string, 0, len(m)) + for typeName := range m { + names = append(names, typeName) + } + + sort.Strings(names) // Stable result for tests + + ret := make([]addrs.Provider, len(names)) + for i, name := range names { + ret[i] = m[name] + } + + return ret +} diff --git a/pkg/providers/addressed_types_test.go b/pkg/providers/addressed_types_test.go new file mode 100644 index 00000000000..910a2528a56 --- /dev/null +++ b/pkg/providers/addressed_types_test.go @@ -0,0 +1,50 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package providers + +import ( + "testing" + + "github.com/go-test/deep" + + "github.com/kubegems/opentofu/pkg/addrs" +) + +func TestAddressedTypesAbs(t *testing.T) { + providerAddrs := []addrs.AbsProviderConfig{ + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("aws"), + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("aws"), + Alias: "foo", + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("azure"), + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("null"), + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("null"), + }, + } + + got := AddressedTypesAbs(providerAddrs) + want := []addrs.Provider{ + addrs.NewDefaultProvider("aws"), + addrs.NewDefaultProvider("azure"), + addrs.NewDefaultProvider("null"), + } + for _, problem := range deep.Equal(got, want) { + t.Error(problem) + } +} diff --git a/pkg/providers/doc.go b/pkg/providers/doc.go new file mode 100644 index 00000000000..a1b9b0a026f --- /dev/null +++ b/pkg/providers/doc.go @@ -0,0 +1,8 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package providers contains the interface and primary types required to +// implement a OpenTofu resource provider. +package providers diff --git a/pkg/providers/factory.go b/pkg/providers/factory.go new file mode 100644 index 00000000000..d116b9013b8 --- /dev/null +++ b/pkg/providers/factory.go @@ -0,0 +1,24 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package providers + +// Factory is a function type that creates a new instance of a resource +// provider, or returns an error if that is impossible. +type Factory func() (Interface, error) + +// FactoryFixed is a helper that creates a Factory that just returns some given +// single provider. +// +// Unlike usual factories, the exact same instance is returned for each call +// to the factory and so this must be used in only specialized situations where +// the caller can take care to either not mutate the given provider at all +// or to mutate it in ways that will not cause unexpected behavior for others +// holding the same reference. +func FactoryFixed(p Interface) Factory { + return func() (Interface, error) { + return p, nil + } +} diff --git a/pkg/providers/mock_schema_cache.go b/pkg/providers/mock_schema_cache.go new file mode 100644 index 00000000000..5766f7630c3 --- /dev/null +++ b/pkg/providers/mock_schema_cache.go @@ -0,0 +1,9 @@ +package providers + +import "github.com/kubegems/opentofu/pkg/addrs" + +func NewMockSchemaCache() *schemaCache { + return &schemaCache{ + m: make(map[addrs.Provider]ProviderSchema), + } +} diff --git a/pkg/providers/provider.go b/pkg/providers/provider.go new file mode 100644 index 00000000000..19d809aafe9 --- /dev/null +++ b/pkg/providers/provider.go @@ -0,0 +1,501 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package providers + +import ( + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// Interface represents the set of methods required for a complete resource +// provider plugin. +type Interface interface { + // GetMetadata is not yet implemented or used at this time. It may + // be used in the future to avoid loading a provider's full schema + // for initial validation. This could result in some potential + // memory savings. + + // GetSchema returns the complete schema for the provider. + GetProviderSchema() GetProviderSchemaResponse + + // ValidateProviderConfig allows the provider to validate the configuration. + // The ValidateProviderConfigResponse.PreparedConfig field is unused. The + // final configuration is not stored in the state, and any modifications + // that need to be made must be made during the Configure method call. + ValidateProviderConfig(ValidateProviderConfigRequest) ValidateProviderConfigResponse + + // ValidateResourceConfig allows the provider to validate the resource + // configuration values. + ValidateResourceConfig(ValidateResourceConfigRequest) ValidateResourceConfigResponse + + // ValidateDataResourceConfig allows the provider to validate the data source + // configuration values. + ValidateDataResourceConfig(ValidateDataResourceConfigRequest) ValidateDataResourceConfigResponse + + // UpgradeResourceState is called when the state loader encounters an + // instance state whose schema version is less than the one reported by the + // currently-used version of the corresponding provider, and the upgraded + // result is used for any further processing. + UpgradeResourceState(UpgradeResourceStateRequest) UpgradeResourceStateResponse + + // Configure configures and initialized the provider. + ConfigureProvider(ConfigureProviderRequest) ConfigureProviderResponse + + // Stop is called when the provider should halt any in-flight actions. + // + // Stop should not block waiting for in-flight actions to complete. It + // should take any action it wants and return immediately acknowledging it + // has received the stop request. OpenTofu will not make any further API + // calls to the provider after Stop is called. + // + // The error returned, if non-nil, is assumed to mean that signaling the + // stop somehow failed and that the user should expect potentially waiting + // a longer period of time. + Stop() error + + // ReadResource refreshes a resource and returns its current state. + ReadResource(ReadResourceRequest) ReadResourceResponse + + // PlanResourceChange takes the current state and proposed state of a + // resource, and returns the planned final state. + PlanResourceChange(PlanResourceChangeRequest) PlanResourceChangeResponse + + // ApplyResourceChange takes the planned state for a resource, which may + // yet contain unknown computed values, and applies the changes returning + // the final state. + ApplyResourceChange(ApplyResourceChangeRequest) ApplyResourceChangeResponse + + // ImportResourceState requests that the given resource be imported. + ImportResourceState(ImportResourceStateRequest) ImportResourceStateResponse + + // ReadDataSource returns the data source's current state. + ReadDataSource(ReadDataSourceRequest) ReadDataSourceResponse + + // GetFunctions returns a full list of functions defined in this provider. It should be a super + // set of the functions returned in GetProviderSchema() + GetFunctions() GetFunctionsResponse + + // CallFunction requests that the given function is called and response returned. + CallFunction(CallFunctionRequest) CallFunctionResponse + + // Close shuts down the plugin process if applicable. + Close() error +} + +// GetProviderSchemaResponse is the return type for GetProviderSchema, and +// should only be used when handling a value for that method. The handling of +// of schemas in any other context should always use ProviderSchema, so that +// the in-memory representation can be more easily changed separately from the +// RCP protocol. +type GetProviderSchemaResponse struct { + // Provider is the schema for the provider itself. + Provider Schema + + // ProviderMeta is the schema for the provider's meta info in a module + ProviderMeta Schema + + // ResourceTypes map the resource type name to that type's schema. + ResourceTypes map[string]Schema + + // DataSources maps the data source name to that data source's schema. + DataSources map[string]Schema + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics + + // ServerCapabilities lists optional features supported by the provider. + ServerCapabilities ServerCapabilities + + // Functions lists all functions supported by this provider. + Functions map[string]FunctionSpec +} + +// Schema pairs a provider or resource schema with that schema's version. +// This is used to be able to upgrade the schema in UpgradeResourceState. +// +// This describes the schema for a single object within a provider. Type +// "Schemas" (plural) instead represents the overall collection of schemas +// for everything within a particular provider. +type Schema struct { + Version int64 + Block *configschema.Block +} + +// ServerCapabilities allows providers to communicate extra information +// regarding supported protocol features. This is used to indicate availability +// of certain forward-compatible changes which may be optional in a major +// protocol version, but cannot be tested for directly. +type ServerCapabilities struct { + // PlanDestroy signals that this provider expects to receive a + // PlanResourceChange call for resources that are to be destroyed. + PlanDestroy bool + + // The GetProviderSchemaOptional capability indicates that this + // provider does not require calling GetProviderSchema to operate + // normally, and the caller can used a cached copy of the provider's + // schema. + // In other words, the providers for which GetProviderSchemaOptional is false + // require their schema to be read after EVERY instantiation to function normally. + GetProviderSchemaOptional bool +} + +type FunctionSpec struct { + // List of parameters required to call the function + Parameters []FunctionParameterSpec + // Optional Spec for variadic parameters + VariadicParameter *FunctionParameterSpec + // Type which the function will return + Return cty.Type + // Human-readable shortened documentation for the function + Summary string + // Human-readable documentation for the function + Description string + // Formatting type of the Description field + DescriptionFormat TextFormatting + // Human-readable message present if the function is deprecated + DeprecationMessage string +} + +type FunctionParameterSpec struct { + // Human-readable display name for the parameter + Name string + // Type constraint for the parameter + Type cty.Type + // Null values alowed for the parameter + AllowNullValue bool + // Unknown Values allowed for the parameter + // Implies the Return type of the function is also Unknown + AllowUnknownValues bool + // Human-readable documentation for the parameter + Description string + // Formatting type of the Description field + DescriptionFormat TextFormatting +} + +type TextFormatting string + +const TextFormattingPlain = TextFormatting("Plain") +const TextFormattingMarkdown = TextFormatting("Markdown") + +type ValidateProviderConfigRequest struct { + // Config is the raw configuration value for the provider. + Config cty.Value +} + +type ValidateProviderConfigResponse struct { + // PreparedConfig is unused and will be removed with support for plugin protocol v5. + PreparedConfig cty.Value + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +type ValidateResourceConfigRequest struct { + // TypeName is the name of the resource type to validate. + TypeName string + + // Config is the configuration value to validate, which may contain unknown + // values. + Config cty.Value +} + +type ValidateResourceConfigResponse struct { + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +type ValidateDataResourceConfigRequest struct { + // TypeName is the name of the data source type to validate. + TypeName string + + // Config is the configuration value to validate, which may contain unknown + // values. + Config cty.Value +} + +type ValidateDataResourceConfigResponse struct { + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +type UpgradeResourceStateRequest struct { + // TypeName is the name of the resource type being upgraded + TypeName string + + // Version is version of the schema that created the current state. + Version int64 + + // RawStateJSON and RawStateFlatmap contiain the state that needs to be + // upgraded to match the current schema version. Because the schema is + // unknown, this contains only the raw data as stored in the state. + // RawStateJSON is the current json state encoding. + // RawStateFlatmap is the legacy flatmap encoding. + // Only on of these fields may be set for the upgrade request. + RawStateJSON []byte + RawStateFlatmap map[string]string +} + +type UpgradeResourceStateResponse struct { + // UpgradedState is the newly upgraded resource state. + UpgradedState cty.Value + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +type ConfigureProviderRequest struct { + // OpenTofu version is the version string from the running instance of + // tofu. Providers can use TerraformVersion to verify compatibility, + // and to store for informational purposes. + TerraformVersion string + + // Config is the complete configuration value for the provider. + Config cty.Value +} + +type ConfigureProviderResponse struct { + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +type ReadResourceRequest struct { + // TypeName is the name of the resource type being read. + TypeName string + + // PriorState contains the previously saved state value for this resource. + PriorState cty.Value + + // Private is an opaque blob that will be stored in state along with the + // resource. It is intended only for interpretation by the provider itself. + Private []byte + + // ProviderMeta is the configuration for the provider_meta block for the + // module and provider this resource belongs to. Its use is defined by + // each provider, and it should not be used without coordination with + // HashiCorp. It is considered experimental and subject to change. + ProviderMeta cty.Value +} + +type ReadResourceResponse struct { + // NewState contains the current state of the resource. + NewState cty.Value + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics + + // Private is an opaque blob that will be stored in state along with the + // resource. It is intended only for interpretation by the provider itself. + Private []byte +} + +type PlanResourceChangeRequest struct { + // TypeName is the name of the resource type to plan. + TypeName string + + // PriorState is the previously saved state value for this resource. + PriorState cty.Value + + // ProposedNewState is the expected state after the new configuration is + // applied. This is created by directly applying the configuration to the + // PriorState. The provider is then responsible for applying any further + // changes required to create the proposed final state. + ProposedNewState cty.Value + + // Config is the resource configuration, before being merged with the + // PriorState. Any value not explicitly set in the configuration will be + // null. Config is supplied for reference, but Provider implementations + // should prefer the ProposedNewState in most circumstances. + Config cty.Value + + // PriorPrivate is the previously saved private data returned from the + // provider during the last apply. + PriorPrivate []byte + + // ProviderMeta is the configuration for the provider_meta block for the + // module and provider this resource belongs to. Its use is defined by + // each provider, and it should not be used without coordination with + // HashiCorp. It is considered experimental and subject to change. + ProviderMeta cty.Value +} + +type PlanResourceChangeResponse struct { + // PlannedState is the expected state of the resource once the current + // configuration is applied. + PlannedState cty.Value + + // RequiresReplace is the list of the attributes that are requiring + // resource replacement. + RequiresReplace []cty.Path + + // PlannedPrivate is an opaque blob that is not interpreted by tofu + // core. This will be saved and relayed back to the provider during + // ApplyResourceChange. + PlannedPrivate []byte + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics + + // LegacyTypeSystem is set only if the provider is using the legacy SDK + // whose type system cannot be precisely mapped into the OpenTofu type + // system. We use this to bypass certain consistency checks that would + // otherwise fail due to this imprecise mapping. No other provider or SDK + // implementation is permitted to set this. + LegacyTypeSystem bool +} + +type ApplyResourceChangeRequest struct { + // TypeName is the name of the resource type being applied. + TypeName string + + // PriorState is the current state of resource. + PriorState cty.Value + + // Planned state is the state returned from PlanResourceChange, and should + // represent the new state, minus any remaining computed attributes. + PlannedState cty.Value + + // Config is the resource configuration, before being merged with the + // PriorState. Any value not explicitly set in the configuration will be + // null. Config is supplied for reference, but Provider implementations + // should prefer the PlannedState in most circumstances. + Config cty.Value + + // PlannedPrivate is the same value as returned by PlanResourceChange. + PlannedPrivate []byte + + // ProviderMeta is the configuration for the provider_meta block for the + // module and provider this resource belongs to. Its use is defined by + // each provider, and it should not be used without coordination with + // HashiCorp. It is considered experimental and subject to change. + ProviderMeta cty.Value +} + +type ApplyResourceChangeResponse struct { + // NewState is the new complete state after applying the planned change. + // In the event of an error, NewState should represent the most recent + // known state of the resource, if it exists. + NewState cty.Value + + // Private is an opaque blob that will be stored in state along with the + // resource. It is intended only for interpretation by the provider itself. + Private []byte + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics + + // LegacyTypeSystem is set only if the provider is using the legacy SDK + // whose type system cannot be precisely mapped into the OpenTofu type + // system. We use this to bypass certain consistency checks that would + // otherwise fail due to this imprecise mapping. No other provider or SDK + // implementation is permitted to set this. + LegacyTypeSystem bool +} + +type ImportResourceStateRequest struct { + // TypeName is the name of the resource type to be imported. + TypeName string + + // ID is a string with which the provider can identify the resource to be + // imported. + ID string +} + +type ImportResourceStateResponse struct { + // ImportedResources contains one or more state values related to the + // imported resource. It is not required that these be complete, only that + // there is enough identifying information for the provider to successfully + // update the states in ReadResource. + ImportedResources []ImportedResource + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +// ImportedResource represents an object being imported into OpenTofu with the +// help of a provider. An ImportedObject is a RemoteObject that has been read +// by the provider's import handler but hasn't yet been committed to state. +type ImportedResource struct { + // TypeName is the name of the resource type associated with the + // returned state. It's possible for providers to import multiple related + // types with a single import request. + TypeName string + + // State is the state of the remote object being imported. This may not be + // complete, but must contain enough information to uniquely identify the + // resource. + State cty.Value + + // Private is an opaque blob that will be stored in state along with the + // resource. It is intended only for interpretation by the provider itself. + Private []byte +} + +// AsInstanceObject converts the receiving ImportedObject into a +// ResourceInstanceObject that has status ObjectReady. +// +// The returned object does not know its own resource type, so the caller must +// retain the ResourceType value from the source object if this information is +// needed. +// +// The returned object also has no dependency addresses, but the caller may +// freely modify the direct fields of the returned object without affecting +// the receiver. +func (ir ImportedResource) AsInstanceObject() *states.ResourceInstanceObject { + return &states.ResourceInstanceObject{ + Status: states.ObjectReady, + Value: ir.State, + Private: ir.Private, + } +} + +type ReadDataSourceRequest struct { + // TypeName is the name of the data source type to Read. + TypeName string + + // Config is the complete configuration for the requested data source. + Config cty.Value + + // ProviderMeta is the configuration for the provider_meta block for the + // module and provider this resource belongs to. Its use is defined by + // each provider, and it should not be used without coordination with + // HashiCorp. It is considered experimental and subject to change. + ProviderMeta cty.Value +} + +type ReadDataSourceResponse struct { + // State is the current state of the requested data source. + State cty.Value + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +type GetFunctionsResponse struct { + Functions map[string]FunctionSpec + + Diagnostics tfdiags.Diagnostics +} + +type CallFunctionRequest struct { + Name string + Arguments []cty.Value +} + +type CallFunctionResponse struct { + Result cty.Value + Error error +} + +type CallFunctionArgumentError struct { + Text string + FunctionArgument int +} + +func (err *CallFunctionArgumentError) Error() string { + return err.Text +} diff --git a/pkg/providers/schema_cache.go b/pkg/providers/schema_cache.go new file mode 100644 index 00000000000..7d5308d2235 --- /dev/null +++ b/pkg/providers/schema_cache.go @@ -0,0 +1,43 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package providers + +import ( + "sync" + + "github.com/kubegems/opentofu/pkg/addrs" +) + +// SchemaCache is a global cache of Schemas. +// This will be accessed by both core and the provider clients to ensure that +// large schemas are stored in a single location. +var SchemaCache = &schemaCache{ + m: make(map[addrs.Provider]ProviderSchema), +} + +// Global cache for provider schemas +// Cache the entire response to ensure we capture any new fields, like +// ServerCapabilities. This also serves to capture errors so that multiple +// concurrent calls resulting in an error can be handled in the same manner. +type schemaCache struct { + mu sync.Mutex + m map[addrs.Provider]ProviderSchema +} + +func (c *schemaCache) Set(p addrs.Provider, s ProviderSchema) { + c.mu.Lock() + defer c.mu.Unlock() + + c.m[p] = s +} + +func (c *schemaCache) Get(p addrs.Provider) (ProviderSchema, bool) { + c.mu.Lock() + defer c.mu.Unlock() + + s, ok := c.m[p] + return s, ok +} diff --git a/pkg/providers/schemas.go b/pkg/providers/schemas.go new file mode 100644 index 00000000000..a896ac2a50a --- /dev/null +++ b/pkg/providers/schemas.go @@ -0,0 +1,38 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package providers + +import ( + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs/configschema" +) + +// ProviderSchema is an overall container for all of the schemas for all +// configurable objects defined within a particular provider. All storage of +// provider schemas should use this type. +type ProviderSchema = GetProviderSchemaResponse + +// SchemaForResourceType attempts to find a schema for the given mode and type. +// Returns nil if no such schema is available. +func (ss ProviderSchema) SchemaForResourceType(mode addrs.ResourceMode, typeName string) (schema *configschema.Block, version uint64) { + switch mode { + case addrs.ManagedResourceMode: + res := ss.ResourceTypes[typeName] + return res.Block, uint64(res.Version) + case addrs.DataResourceMode: + // Data resources don't have schema versions right now, since state is discarded for each refresh + return ss.DataSources[typeName].Block, 0 + default: + // Shouldn't happen, because the above cases are comprehensive. + return nil, 0 + } +} + +// SchemaForResourceAddr attempts to find a schema for the mode and type from +// the given resource address. Returns nil if no such schema is available. +func (ss ProviderSchema) SchemaForResourceAddr(addr addrs.Resource) (schema *configschema.Block, version uint64) { + return ss.SchemaForResourceType(addr.Mode, addr.Type) +} diff --git a/pkg/provisioner-local-exec/main/main.go b/pkg/provisioner-local-exec/main/main.go new file mode 100644 index 00000000000..3cd67eef623 --- /dev/null +++ b/pkg/provisioner-local-exec/main/main.go @@ -0,0 +1,22 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + localexec "github.com/kubegems/opentofu/pkg/builtin/provisioners/local-exec" + "github.com/kubegems/opentofu/pkg/grpcwrap" + "github.com/kubegems/opentofu/pkg/plugin" + "github.com/kubegems/opentofu/pkg/tfplugin5" +) + +func main() { + // Provide a binary version of the internal terraform provider for testing + plugin.Serve(&plugin.ServeOpts{ + GRPCProvisionerFunc: func() tfplugin5.ProvisionerServer { + return grpcwrap.Provisioner(localexec.New()) + }, + }) +} diff --git a/pkg/provisioners/doc.go b/pkg/provisioners/doc.go new file mode 100644 index 00000000000..197e86ef7c2 --- /dev/null +++ b/pkg/provisioners/doc.go @@ -0,0 +1,8 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package provisioners contains the interface and primary types to implement a +// OpenTofu resource provisioner. +package provisioners diff --git a/pkg/provisioners/factory.go b/pkg/provisioners/factory.go new file mode 100644 index 00000000000..f3987df1c04 --- /dev/null +++ b/pkg/provisioners/factory.go @@ -0,0 +1,24 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package provisioners + +// Factory is a function type that creates a new instance of a resource +// provisioner, or returns an error if that is impossible. +type Factory func() (Interface, error) + +// FactoryFixed is a helper that creates a Factory that just returns some given +// single provisioner. +// +// Unlike usual factories, the exact same instance is returned for each call +// to the factory and so this must be used in only specialized situations where +// the caller can take care to either not mutate the given provider at all +// or to mutate it in ways that will not cause unexpected behavior for others +// holding the same reference. +func FactoryFixed(p Interface) Factory { + return func() (Interface, error) { + return p, nil + } +} diff --git a/pkg/provisioners/provisioner.go b/pkg/provisioners/provisioner.go new file mode 100644 index 00000000000..271d15a6cc7 --- /dev/null +++ b/pkg/provisioners/provisioner.go @@ -0,0 +1,87 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package provisioners + +import ( + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +// Interface is the set of methods required for a resource provisioner plugin. +type Interface interface { + // GetSchema returns the schema for the provisioner configuration. + GetSchema() GetSchemaResponse + + // ValidateProvisionerConfig allows the provisioner to validate the + // configuration values. + ValidateProvisionerConfig(ValidateProvisionerConfigRequest) ValidateProvisionerConfigResponse + + // ProvisionResource runs the provisioner with provided configuration. + // ProvisionResource blocks until the execution is complete. + // If the returned diagnostics contain any errors, the resource will be + // left in a tainted state. + ProvisionResource(ProvisionResourceRequest) ProvisionResourceResponse + + // Stop is called to interrupt the provisioner. + // + // Stop should not block waiting for in-flight actions to complete. It + // should take any action it wants and return immediately acknowledging it + // has received the stop request. OpenTofu will not make any further API + // calls to the provisioner after Stop is called. + // + // The error returned, if non-nil, is assumed to mean that signaling the + // stop somehow failed and that the user should expect potentially waiting + // a longer period of time. + Stop() error + + // Close shuts down the plugin process if applicable. + Close() error +} + +type GetSchemaResponse struct { + // Provisioner contains the schema for this provisioner. + Provisioner *configschema.Block + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +// UIOutput provides the Output method for resource provisioner +// plugins to write any output to the UI. +// +// Provisioners may call the Output method multiple times while Apply is in +// progress. It is invalid to call Output after Apply returns. +type UIOutput interface { + Output(string) +} + +type ValidateProvisionerConfigRequest struct { + // Config is the complete configuration to be used for the provisioner. + Config cty.Value +} + +type ValidateProvisionerConfigResponse struct { + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +type ProvisionResourceRequest struct { + // Config is the complete provisioner configuration. + Config cty.Value + + // Connection contains any information required to access the resource + // instance. + Connection cty.Value + + // UIOutput is used to return output during the Apply operation. + UIOutput UIOutput +} + +type ProvisionResourceResponse struct { + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} diff --git a/pkg/refactoring/move_execute.go b/pkg/refactoring/move_execute.go new file mode 100644 index 00000000000..e66f052f9b6 --- /dev/null +++ b/pkg/refactoring/move_execute.go @@ -0,0 +1,347 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package refactoring + +import ( + "fmt" + "log" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/dag" + "github.com/kubegems/opentofu/pkg/logging" + "github.com/kubegems/opentofu/pkg/states" +) + +// ApplyMoves modifies in-place the given state object so that any existing +// objects that are matched by a "from" argument of one of the move statements +// will be moved to instead appear at the "to" argument of that statement. +// +// The result is a map from the unique key of each absolute address that was +// either the source or destination of a move to a MoveResult describing +// what happened at that address. +// +// ApplyMoves does not have any error situations itself, and will instead just +// ignore any unresolvable move statements. Validation of a set of moves is +// a separate concern applied to the configuration, because validity of +// moves is always dependent only on the configuration, not on the state. +// +// ApplyMoves expects exclusive access to the given state while it's running. +// Don't read or write any part of the state structure until ApplyMoves returns. +func ApplyMoves(stmts []MoveStatement, state *states.State) MoveResults { + ret := makeMoveResults() + + if len(stmts) == 0 { + return ret + } + + // The methodology here is to construct a small graph of all of the move + // statements where the edges represent where a particular statement + // is either chained from or nested inside the effect of another statement. + // That then means we can traverse the graph in topological sort order + // to gradually move objects through potentially multiple moves each. + + g := buildMoveStatementGraph(stmts) + + // If the graph is not valid the we will not take any action at all. The + // separate validation step should detect this and return an error. + if diags := validateMoveStatementGraph(g); diags.HasErrors() { + log.Printf("[ERROR] ApplyMoves: %s", diags.ErrWithWarnings()) + return ret + } + + // The graph must be reduced in order for ReverseDepthFirstWalk to work + // correctly, since it is built from following edges and can skip over + // dependencies if there is a direct edge to a transitive dependency. + g.TransitiveReduction() + + // The starting nodes are the ones that don't depend on any other nodes. + startNodes := make(dag.Set, len(stmts)) + for _, v := range g.Vertices() { + if len(g.DownEdges(v)) == 0 { + startNodes.Add(v) + } + } + + if startNodes.Len() == 0 { + log.Println("[TRACE] refactoring.ApplyMoves: No 'moved' statements to consider in this configuration") + return ret + } + + log.Printf("[TRACE] refactoring.ApplyMoves: Processing 'moved' statements in the configuration\n%s", logging.Indent(g.String())) + + recordOldAddr := func(oldAddr, newAddr addrs.AbsResourceInstance) { + if prevMove, exists := ret.Changes.GetOk(oldAddr); exists { + // If the old address was _already_ the result of a move then + // we'll replace that entry so that our results summarize a chain + // of moves into a single entry. + ret.Changes.Remove(oldAddr) + oldAddr = prevMove.From + } + ret.Changes.Put(newAddr, MoveSuccess{ + From: oldAddr, + To: newAddr, + }) + } + recordBlockage := func(newAddr, wantedAddr addrs.AbsMoveable) { + ret.Blocked.Put(newAddr, MoveBlocked{ + Wanted: wantedAddr, + Actual: newAddr, + }) + } + + for _, v := range g.ReverseTopologicalOrder() { + stmt := v.(*MoveStatement) + + for _, ms := range state.Modules { + modAddr := ms.Addr + + // We don't yet know that the current module is relevant, and + // we determine that differently for each the object kind. + switch kind := stmt.ObjectKind(); kind { + case addrs.MoveEndpointModule: + // For a module endpoint we just try the module address + // directly, and execute the moves if it matches. + if newAddr, matches := modAddr.MoveDestination(stmt.From, stmt.To); matches { + log.Printf("[TRACE] refactoring.ApplyMoves: %s has moved to %s", modAddr, newAddr) + + // If we already have a module at the new address then + // we'll skip this move and let the existing object take + // priority. + if ms := state.Module(newAddr); ms != nil { + log.Printf("[WARN] Skipped moving %s to %s, because there's already another module instance at the destination", modAddr, newAddr) + recordBlockage(modAddr, newAddr) + continue + } + + // We need to visit all of the resource instances in the + // module and record them individually as results. + for _, rs := range ms.Resources { + relAddr := rs.Addr.Resource + for key := range rs.Instances { + oldInst := relAddr.Instance(key).Absolute(modAddr) + newInst := relAddr.Instance(key).Absolute(newAddr) + recordOldAddr(oldInst, newInst) + } + } + + state.MoveModuleInstance(modAddr, newAddr) + continue + } + case addrs.MoveEndpointResource: + // For a resource endpoint we require an exact containing + // module match, because by definition a matching resource + // cannot be nested any deeper than that. + if !stmt.From.SelectsModule(modAddr) { + continue + } + + // We then need to search each of the resources and resource + // instances in the module. + for _, rs := range ms.Resources { + rAddr := rs.Addr + if newAddr, matches := rAddr.MoveDestination(stmt.From, stmt.To); matches { + log.Printf("[TRACE] refactoring.ApplyMoves: resource %s has moved to %s", rAddr, newAddr) + + // If we already have a resource at the new address then + // we'll skip this move and let the existing object take + // priority. + if rs := state.Resource(newAddr); rs != nil { + log.Printf("[WARN] Skipped moving %s to %s, because there's already another resource at the destination", rAddr, newAddr) + recordBlockage(rAddr, newAddr) + continue + } + + for key := range rs.Instances { + oldInst := rAddr.Instance(key) + newInst := newAddr.Instance(key) + recordOldAddr(oldInst, newInst) + } + state.MoveAbsResource(rAddr, newAddr) + continue + } + for key := range rs.Instances { + iAddr := rAddr.Instance(key) + if newAddr, matches := iAddr.MoveDestination(stmt.From, stmt.To); matches { + log.Printf("[TRACE] refactoring.ApplyMoves: resource instance %s has moved to %s", iAddr, newAddr) + + // If we already have a resource instance at the new + // address then we'll skip this move and let the existing + // object take priority. + if is := state.ResourceInstance(newAddr); is != nil { + log.Printf("[WARN] Skipped moving %s to %s, because there's already another resource instance at the destination", iAddr, newAddr) + recordBlockage(iAddr, newAddr) + continue + } + + recordOldAddr(iAddr, newAddr) + + state.MoveAbsResourceInstance(iAddr, newAddr) + continue + } + } + } + default: + panic(fmt.Sprintf("unhandled move object kind %s", kind)) + } + } + } + + return ret +} + +// buildMoveStatementGraph constructs a dependency graph of the given move +// statements, where the nodes are all pointers to statements in the given +// slice and the edges represent either chaining or nesting relationships. +// +// buildMoveStatementGraph doesn't do any validation of the graph, so it +// may contain cycles and other sorts of invalidity. +func buildMoveStatementGraph(stmts []MoveStatement) *dag.AcyclicGraph { + g := &dag.AcyclicGraph{} + for i := range stmts { + // The graph nodes are pointers to the actual statements directly. + g.Add(&stmts[i]) + } + + // Now we'll add the edges representing chaining and nesting relationships. + // We assume that a reasonable configuration will have at most tens of + // move statements and thus this N*M algorithm is acceptable. + for dependerI := range stmts { + depender := &stmts[dependerI] + for dependeeI := range stmts { + if dependerI == dependeeI { + // skip comparing the statement to itself + continue + } + dependee := &stmts[dependeeI] + + if statementDependsOn(depender, dependee) { + g.Connect(dag.BasicEdge(depender, dependee)) + } + } + } + + return g +} + +// statementDependsOn returns true if statement a depends on statement b; +// i.e. statement b must be executed before statement a. +func statementDependsOn(a, b *MoveStatement) bool { + // chain-able moves are simple, as on the destination of one move could be + // equal to the source of another. + if a.From.CanChainFrom(b.To) { + return true + } + + // Statement nesting in more complex, as we have 8 possible combinations to + // assess. Here we list all combinations, along with the statement which + // must be executed first when one address is nested within another. + // A.From IsNestedWithin B.From => A + // A.From IsNestedWithin B.To => B + // A.To IsNestedWithin B.From => A + // A.To IsNestedWithin B.To => B + // B.From IsNestedWithin A.From => B + // B.From IsNestedWithin A.To => A + // B.To IsNestedWithin A.From => B + // B.To IsNestedWithin A.To => A + // + // Since we are only interested in checking if A depends on B, we only need + // to check the 4 possibilities above which result in B being executed + // first. If we're there's no dependency at all we can return immediately. + if !(a.From.NestedWithin(b.To) || a.To.NestedWithin(b.To) || + b.From.NestedWithin(a.From) || b.To.NestedWithin(a.From)) { + return false + } + + // If a nested move has a dependency, we need to rule out the possibility + // that this is a move inside a module only changing indexes. If an + // ancestor module is only changing the index of a nested module, any + // nested move statements are going to match both the From and To address + // when the base name is not changing, causing a cycle in the order of + // operations. + + // if A is not declared in an ancestor module, then we can't be nested + // within a module index change. + if len(a.To.Module()) >= len(b.To.Module()) { + return true + } + // We only want the nested move statement to depend on the outer module + // move, so we only test this in the reverse direction. + if a.From.IsModuleReIndex(a.To) { + return false + } + + return true +} + +// MoveResults describes the outcome of an ApplyMoves call. +type MoveResults struct { + // Changes is a map from the unique keys of the final new resource + // instance addresses to an object describing what changed. + // + // This includes one entry for each resource instance address that was + // the destination of a move statement. It doesn't include resource + // instances that were not affected by moves at all, but it does include + // resource instance addresses that were "blocked" (also recorded in + // BlockedAddrs) if and only if they were able to move at least + // partially along a chain before being blocked. + // + // In the return value from ApplyMoves, all of the keys are guaranteed to + // be unique keys derived from addrs.AbsResourceInstance values. + Changes addrs.Map[addrs.AbsResourceInstance, MoveSuccess] + + // Blocked is a map from the unique keys of the final new + // resource instances addresses to information about where they "wanted" + // to move, but were blocked by a pre-existing object at the same address. + // + // "Blocking" can arise in unusual situations where multiple points along + // a move chain were already bound to objects, and thus only one of them + // can actually adopt the final position in the chain. It can also + // occur in other similar situations, such as if a configuration contains + // a move of an entire module and a move of an individual resource into + // that module, such that the individual resource would collide with a + // resource in the whole module that was moved. + // + // In the return value from ApplyMoves, all of the keys are guaranteed to + // be unique keys derived from values of addrs.AbsMoveable types. + Blocked addrs.Map[addrs.AbsMoveable, MoveBlocked] +} + +func makeMoveResults() MoveResults { + return MoveResults{ + Changes: addrs.MakeMap[addrs.AbsResourceInstance, MoveSuccess](), + Blocked: addrs.MakeMap[addrs.AbsMoveable, MoveBlocked](), + } +} + +type MoveSuccess struct { + From addrs.AbsResourceInstance + To addrs.AbsResourceInstance +} + +type MoveBlocked struct { + Wanted addrs.AbsMoveable + Actual addrs.AbsMoveable +} + +// AddrMoved returns true if and only if the given resource instance moved to +// a new address in the ApplyMoves call that the receiver is describing. +// +// If AddrMoved returns true, you can pass the same address to method OldAddr +// to find its original address prior to moving. +func (rs MoveResults) AddrMoved(newAddr addrs.AbsResourceInstance) bool { + return rs.Changes.Has(newAddr) +} + +// OldAddr returns the old address of the given resource instance address, or +// just returns back the same address if the given instance wasn't affected by +// any move statements. +func (rs MoveResults) OldAddr(newAddr addrs.AbsResourceInstance) addrs.AbsResourceInstance { + change, ok := rs.Changes.GetOk(newAddr) + if !ok { + return newAddr + } + return change.From +} diff --git a/pkg/refactoring/move_execute_test.go b/pkg/refactoring/move_execute_test.go new file mode 100644 index 00000000000..12be34b4ebe --- /dev/null +++ b/pkg/refactoring/move_execute_test.go @@ -0,0 +1,695 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package refactoring + +import ( + "fmt" + "sort" + "strings" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/states" +) + +func TestApplyMoves(t *testing.T) { + providerAddr := addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.MustParseProviderSourceString("example.com/foo/bar"), + } + + mustParseInstAddr := func(s string) addrs.AbsResourceInstance { + addr, err := addrs.ParseAbsResourceInstanceStr(s) + if err != nil { + t.Fatal(err) + } + return addr + } + + emptyResults := makeMoveResults() + + tests := map[string]struct { + Stmts []MoveStatement + State *states.State + + WantResults MoveResults + WantInstanceAddrs []string + }{ + "no moves and empty state": { + []MoveStatement{}, + states.NewState(), + emptyResults, + nil, + }, + "no moves": { + []MoveStatement{}, + states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + mustParseInstAddr("foo.from"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{}`), + }, + providerAddr, + ) + }), + emptyResults, + []string{ + `foo.from`, + }, + }, + "single move of whole singleton resource": { + []MoveStatement{ + testMoveStatement(t, "", "foo.from", "foo.to"), + }, + states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + mustParseInstAddr("foo.from"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{}`), + }, + providerAddr, + ) + }), + MoveResults{ + Changes: addrs.MakeMap( + addrs.MakeMapElem(mustParseInstAddr("foo.to"), MoveSuccess{ + From: mustParseInstAddr("foo.from"), + To: mustParseInstAddr("foo.to"), + }), + ), + Blocked: emptyResults.Blocked, + }, + []string{ + `foo.to`, + }, + }, + "single move of whole 'count' resource": { + []MoveStatement{ + testMoveStatement(t, "", "foo.from", "foo.to"), + }, + states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + mustParseInstAddr("foo.from[0]"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{}`), + }, + providerAddr, + ) + }), + MoveResults{ + Changes: addrs.MakeMap( + addrs.MakeMapElem(mustParseInstAddr("foo.to[0]"), MoveSuccess{ + From: mustParseInstAddr("foo.from[0]"), + To: mustParseInstAddr("foo.to[0]"), + }), + ), + Blocked: emptyResults.Blocked, + }, + []string{ + `foo.to[0]`, + }, + }, + "chained move of whole singleton resource": { + []MoveStatement{ + testMoveStatement(t, "", "foo.from", "foo.mid"), + testMoveStatement(t, "", "foo.mid", "foo.to"), + }, + states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + mustParseInstAddr("foo.from"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{}`), + }, + providerAddr, + ) + }), + MoveResults{ + Changes: addrs.MakeMap( + addrs.MakeMapElem(mustParseInstAddr("foo.to"), MoveSuccess{ + From: mustParseInstAddr("foo.from"), + To: mustParseInstAddr("foo.to"), + }), + ), + Blocked: emptyResults.Blocked, + }, + []string{ + `foo.to`, + }, + }, + + "move whole resource into module": { + []MoveStatement{ + testMoveStatement(t, "", "foo.from", "module.boo.foo.to"), + }, + states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + mustParseInstAddr("foo.from[0]"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{}`), + }, + providerAddr, + ) + }), + MoveResults{ + Changes: addrs.MakeMap( + addrs.MakeMapElem(mustParseInstAddr("module.boo.foo.to[0]"), MoveSuccess{ + From: mustParseInstAddr("foo.from[0]"), + To: mustParseInstAddr("module.boo.foo.to[0]"), + }), + ), + Blocked: emptyResults.Blocked, + }, + []string{ + `module.boo.foo.to[0]`, + }, + }, + + "move resource instance between modules": { + []MoveStatement{ + testMoveStatement(t, "", "module.boo.foo.from[0]", "module.bar[0].foo.to[0]"), + }, + states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + mustParseInstAddr("module.boo.foo.from[0]"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{}`), + }, + providerAddr, + ) + }), + MoveResults{ + Changes: addrs.MakeMap( + addrs.MakeMapElem(mustParseInstAddr("module.bar[0].foo.to[0]"), MoveSuccess{ + From: mustParseInstAddr("module.boo.foo.from[0]"), + To: mustParseInstAddr("module.bar[0].foo.to[0]"), + }), + ), + Blocked: emptyResults.Blocked, + }, + []string{ + `module.bar[0].foo.to[0]`, + }, + }, + + "module move with child module": { + []MoveStatement{ + testMoveStatement(t, "", "module.boo", "module.bar"), + }, + states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + mustParseInstAddr("module.boo.foo.from"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{}`), + }, + providerAddr, + ) + s.SetResourceInstanceCurrent( + mustParseInstAddr("module.boo.module.hoo.foo.from"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{}`), + }, + providerAddr, + ) + }), + MoveResults{ + Changes: addrs.MakeMap( + addrs.MakeMapElem(mustParseInstAddr("module.bar.foo.from"), MoveSuccess{ + From: mustParseInstAddr("module.boo.foo.from"), + To: mustParseInstAddr("module.bar.foo.from"), + }), + addrs.MakeMapElem(mustParseInstAddr("module.bar.module.hoo.foo.from"), MoveSuccess{ + From: mustParseInstAddr("module.boo.module.hoo.foo.from"), + To: mustParseInstAddr("module.bar.module.hoo.foo.from"), + }), + ), + Blocked: emptyResults.Blocked, + }, + []string{ + `module.bar.foo.from`, + `module.bar.module.hoo.foo.from`, + }, + }, + + "move whole single module to indexed module": { + []MoveStatement{ + testMoveStatement(t, "", "module.boo", "module.bar[0]"), + }, + states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + mustParseInstAddr("module.boo.foo.from[0]"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{}`), + }, + providerAddr, + ) + }), + MoveResults{ + Changes: addrs.MakeMap( + addrs.MakeMapElem(mustParseInstAddr("module.bar[0].foo.from[0]"), MoveSuccess{ + From: mustParseInstAddr("module.boo.foo.from[0]"), + To: mustParseInstAddr("module.bar[0].foo.from[0]"), + }), + ), + Blocked: emptyResults.Blocked, + }, + []string{ + `module.bar[0].foo.from[0]`, + }, + }, + + "move whole module to indexed module and move instance chained": { + []MoveStatement{ + testMoveStatement(t, "", "module.boo", "module.bar[0]"), + testMoveStatement(t, "bar", "foo.from[0]", "foo.to[0]"), + }, + states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + mustParseInstAddr("module.boo.foo.from[0]"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{}`), + }, + providerAddr, + ) + }), + MoveResults{ + Changes: addrs.MakeMap( + addrs.MakeMapElem(mustParseInstAddr("module.bar[0].foo.to[0]"), MoveSuccess{ + From: mustParseInstAddr("module.boo.foo.from[0]"), + To: mustParseInstAddr("module.bar[0].foo.to[0]"), + }), + ), + Blocked: emptyResults.Blocked, + }, + []string{ + `module.bar[0].foo.to[0]`, + }, + }, + + "move instance to indexed module and instance chained": { + []MoveStatement{ + testMoveStatement(t, "", "module.boo.foo.from[0]", "module.bar[0].foo.from[0]"), + testMoveStatement(t, "bar", "foo.from[0]", "foo.to[0]"), + }, + states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + mustParseInstAddr("module.boo.foo.from[0]"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{}`), + }, + providerAddr, + ) + }), + MoveResults{ + Changes: addrs.MakeMap( + addrs.MakeMapElem(mustParseInstAddr("module.bar[0].foo.to[0]"), MoveSuccess{ + From: mustParseInstAddr("module.boo.foo.from[0]"), + To: mustParseInstAddr("module.bar[0].foo.to[0]"), + }), + ), + Blocked: emptyResults.Blocked, + }, + []string{ + `module.bar[0].foo.to[0]`, + }, + }, + + "move module instance to already-existing module instance": { + []MoveStatement{ + testMoveStatement(t, "", "module.bar[0]", "module.boo"), + }, + states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + mustParseInstAddr("module.bar[0].foo.from"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{}`), + }, + providerAddr, + ) + s.SetResourceInstanceCurrent( + mustParseInstAddr("module.boo.foo.to[0]"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{}`), + }, + providerAddr, + ) + }), + MoveResults{ + // Nothing moved, because the module.b address is already + // occupied by another module. + Changes: emptyResults.Changes, + Blocked: addrs.MakeMap( + addrs.MakeMapElem[addrs.AbsMoveable]( + mustParseInstAddr("module.bar[0].foo.from").Module, + MoveBlocked{ + Wanted: mustParseInstAddr("module.boo.foo.to[0]").Module, + Actual: mustParseInstAddr("module.bar[0].foo.from").Module, + }, + ), + ), + }, + []string{ + `module.bar[0].foo.from`, + `module.boo.foo.to[0]`, + }, + }, + + "move resource to already-existing resource": { + []MoveStatement{ + testMoveStatement(t, "", "foo.from", "foo.to"), + }, + states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + mustParseInstAddr("foo.from"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{}`), + }, + providerAddr, + ) + s.SetResourceInstanceCurrent( + mustParseInstAddr("foo.to"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{}`), + }, + providerAddr, + ) + }), + MoveResults{ + // Nothing moved, because the from.to address is already + // occupied by another resource. + Changes: emptyResults.Changes, + Blocked: addrs.MakeMap( + addrs.MakeMapElem[addrs.AbsMoveable]( + mustParseInstAddr("foo.from").ContainingResource(), + MoveBlocked{ + Wanted: mustParseInstAddr("foo.to").ContainingResource(), + Actual: mustParseInstAddr("foo.from").ContainingResource(), + }, + ), + ), + }, + []string{ + `foo.from`, + `foo.to`, + }, + }, + + "move resource instance to already-existing resource instance": { + []MoveStatement{ + testMoveStatement(t, "", "foo.from", "foo.to[0]"), + }, + states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + mustParseInstAddr("foo.from"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{}`), + }, + providerAddr, + ) + s.SetResourceInstanceCurrent( + mustParseInstAddr("foo.to[0]"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{}`), + }, + providerAddr, + ) + }), + MoveResults{ + // Nothing moved, because the from.to[0] address is already + // occupied by another resource instance. + Changes: emptyResults.Changes, + Blocked: addrs.MakeMap( + addrs.MakeMapElem[addrs.AbsMoveable]( + mustParseInstAddr("foo.from"), + MoveBlocked{ + Wanted: mustParseInstAddr("foo.to[0]"), + Actual: mustParseInstAddr("foo.from"), + }, + ), + ), + }, + []string{ + `foo.from`, + `foo.to[0]`, + }, + }, + "move resource and containing module": { + []MoveStatement{ + testMoveStatement(t, "", "module.boo", "module.bar[0]"), + testMoveStatement(t, "boo", "foo.from", "foo.to"), + }, + states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + mustParseInstAddr("module.boo.foo.from"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{}`), + }, + providerAddr, + ) + }), + MoveResults{ + Changes: addrs.MakeMap( + addrs.MakeMapElem(mustParseInstAddr("module.bar[0].foo.to"), MoveSuccess{ + From: mustParseInstAddr("module.boo.foo.from"), + To: mustParseInstAddr("module.bar[0].foo.to"), + }), + ), + Blocked: emptyResults.Blocked, + }, + []string{ + `module.bar[0].foo.to`, + }, + }, + + "move module and then move resource into it": { + []MoveStatement{ + testMoveStatement(t, "", "module.bar[0]", "module.boo"), + testMoveStatement(t, "", "foo.from", "module.boo.foo.from"), + }, + states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + mustParseInstAddr("module.bar[0].foo.to"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{}`), + }, + providerAddr, + ) + s.SetResourceInstanceCurrent( + mustParseInstAddr("foo.from"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{}`), + }, + providerAddr, + ) + }), + MoveResults{ + Changes: addrs.MakeMap( + addrs.MakeMapElem(mustParseInstAddr("module.boo.foo.from"), MoveSuccess{ + mustParseInstAddr("foo.from"), + mustParseInstAddr("module.boo.foo.from"), + }), + addrs.MakeMapElem(mustParseInstAddr("module.boo.foo.to"), MoveSuccess{ + mustParseInstAddr("module.bar[0].foo.to"), + mustParseInstAddr("module.boo.foo.to"), + }), + ), + Blocked: emptyResults.Blocked, + }, + []string{ + `module.boo.foo.from`, + `module.boo.foo.to`, + }, + }, + + "move resources into module and then move module": { + []MoveStatement{ + testMoveStatement(t, "", "foo.from", "module.boo.foo.to"), + testMoveStatement(t, "", "bar.from", "module.boo.bar.to"), + testMoveStatement(t, "", "module.boo", "module.bar[0]"), + }, + states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + mustParseInstAddr("foo.from"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{}`), + }, + providerAddr, + ) + s.SetResourceInstanceCurrent( + mustParseInstAddr("bar.from"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{}`), + }, + providerAddr, + ) + }), + MoveResults{ + Changes: addrs.MakeMap( + addrs.MakeMapElem(mustParseInstAddr("module.bar[0].foo.to"), MoveSuccess{ + mustParseInstAddr("foo.from"), + mustParseInstAddr("module.bar[0].foo.to"), + }), + addrs.MakeMapElem(mustParseInstAddr("module.bar[0].bar.to"), MoveSuccess{ + mustParseInstAddr("bar.from"), + mustParseInstAddr("module.bar[0].bar.to"), + }), + ), + Blocked: emptyResults.Blocked, + }, + []string{ + `module.bar[0].bar.to`, + `module.bar[0].foo.to`, + }, + }, + + "module move collides with resource move": { + []MoveStatement{ + testMoveStatement(t, "", "module.bar[0]", "module.boo"), + testMoveStatement(t, "", "foo.from", "module.boo.foo.from"), + }, + states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + mustParseInstAddr("module.bar[0].foo.from"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{}`), + }, + providerAddr, + ) + s.SetResourceInstanceCurrent( + mustParseInstAddr("foo.from"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{}`), + }, + providerAddr, + ) + }), + MoveResults{ + Changes: addrs.MakeMap( + addrs.MakeMapElem(mustParseInstAddr("module.boo.foo.from"), MoveSuccess{ + mustParseInstAddr("module.bar[0].foo.from"), + mustParseInstAddr("module.boo.foo.from"), + }), + ), + Blocked: addrs.MakeMap( + addrs.MakeMapElem[addrs.AbsMoveable]( + mustParseInstAddr("foo.from").ContainingResource(), + MoveBlocked{ + Actual: mustParseInstAddr("foo.from").ContainingResource(), + Wanted: mustParseInstAddr("module.boo.foo.from").ContainingResource(), + }, + ), + ), + }, + []string{ + `foo.from`, + `module.boo.foo.from`, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + var stmtsBuf strings.Builder + for _, stmt := range test.Stmts { + fmt.Fprintf(&stmtsBuf, "• from: %s\n to: %s\n", stmt.From, stmt.To) + } + t.Logf("move statements:\n%s", stmtsBuf.String()) + + t.Logf("resource instances in prior state:\n%s", spew.Sdump(allResourceInstanceAddrsInState(test.State))) + + state := test.State.DeepCopy() // don't modify the test case in-place + gotResults := ApplyMoves(test.Stmts, state) + + if diff := cmp.Diff(test.WantResults, gotResults); diff != "" { + t.Errorf("wrong results\n%s", diff) + } + + gotInstAddrs := allResourceInstanceAddrsInState(state) + if diff := cmp.Diff(test.WantInstanceAddrs, gotInstAddrs); diff != "" { + t.Errorf("wrong resource instances in final state\n%s", diff) + } + }) + } +} + +func testMoveStatement(t *testing.T, module string, from string, to string) MoveStatement { + t.Helper() + + moduleAddr := addrs.RootModule + if len(module) != 0 { + moduleAddr = addrs.Module(strings.Split(module, ".")) + } + + fromTraversal, hclDiags := hclsyntax.ParseTraversalAbs([]byte(from), "from", hcl.InitialPos) + if hclDiags.HasErrors() { + t.Fatalf("invalid 'from' argument: %s", hclDiags.Error()) + } + fromAddr, diags := addrs.ParseMoveEndpoint(fromTraversal) + if diags.HasErrors() { + t.Fatalf("invalid 'from' argument: %s", diags.Err().Error()) + } + toTraversal, hclDiags := hclsyntax.ParseTraversalAbs([]byte(to), "to", hcl.InitialPos) + if diags.HasErrors() { + t.Fatalf("invalid 'to' argument: %s", hclDiags.Error()) + } + toAddr, diags := addrs.ParseMoveEndpoint(toTraversal) + if diags.HasErrors() { + t.Fatalf("invalid 'from' argument: %s", diags.Err().Error()) + } + + fromInModule, toInModule := addrs.UnifyMoveEndpoints(moduleAddr, fromAddr, toAddr) + if fromInModule == nil || toInModule == nil { + t.Fatalf("incompatible endpoints") + } + + return MoveStatement{ + From: fromInModule, + To: toInModule, + + // DeclRange not populated because it's unimportant for our tests + } +} + +func allResourceInstanceAddrsInState(state *states.State) []string { + var ret []string + for _, ms := range state.Modules { + for _, rs := range ms.Resources { + for key := range rs.Instances { + ret = append(ret, rs.Addr.Instance(key).String()) + } + } + } + sort.Strings(ret) + return ret +} diff --git a/pkg/refactoring/move_statement.go b/pkg/refactoring/move_statement.go new file mode 100644 index 00000000000..a49e5266ea7 --- /dev/null +++ b/pkg/refactoring/move_statement.go @@ -0,0 +1,191 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package refactoring + +import ( + "fmt" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +type MoveStatement struct { + From, To *addrs.MoveEndpointInModule + DeclRange tfdiags.SourceRange + + // Implied is true for statements produced by ImpliedMoveStatements, and + // false for statements produced by FindMoveStatements. + // + // An "implied" statement is one that has no explicit "moved" block in + // the configuration and was instead generated automatically based on a + // comparison between current configuration and previous run state. + // For implied statements, the DeclRange field contains the source location + // of something in the source code that implied the statement, in which + // case it would probably be confusing to show that source range to the + // user, e.g. in an error message, without clearly mentioning that it's + // related to an implied move statement. + Implied bool +} + +// FindMoveStatements recurses through the modules of the given configuration +// and returns a flat set of all "moved" blocks defined within, in a +// deterministic but undefined order. +func FindMoveStatements(rootCfg *configs.Config) []MoveStatement { + return findMoveStatements(rootCfg, nil) +} + +func findMoveStatements(cfg *configs.Config, into []MoveStatement) []MoveStatement { + modAddr := cfg.Path + for _, mc := range cfg.Module.Moved { + fromAddr, toAddr := addrs.UnifyMoveEndpoints(modAddr, mc.From, mc.To) + if fromAddr == nil || toAddr == nil { + // Invalid combination should've been caught during original + // configuration decoding, in the configs package. + panic(fmt.Sprintf("incompatible move endpoints in %s", mc.DeclRange)) + } + + into = append(into, MoveStatement{ + From: fromAddr, + To: toAddr, + DeclRange: tfdiags.SourceRangeFromHCL(mc.DeclRange), + Implied: false, + }) + } + + for _, childCfg := range cfg.Children { + into = findMoveStatements(childCfg, into) + } + + return into +} + +// ImpliedMoveStatements compares addresses in the given state with addresses +// in the given configuration and potentially returns additional MoveStatement +// objects representing moves we infer automatically, even though they aren't +// explicitly recorded in the configuration. +// +// We do this primarily for backward compatibility with behaviors of Terraform +// versions prior to introducing explicit "moved" blocks. Specifically, this +// function aims to achieve the same result as the "NodeCountBoundary" +// heuristic from Terraform v1.0 and earlier, where adding or removing the +// "count" meta-argument from an already-created resource can automatically +// preserve the zeroth or the NoKey instance, depending on the direction of +// the change. We do this only for resources that aren't mentioned already +// in at least one explicit move statement. +// +// As with the previous-version heuristics it replaces, this is a best effort +// and doesn't handle all situations. An explicit move statement is always +// preferred, but our goal here is to match exactly the same cases that the +// old heuristic would've matched, to retain compatibility for existing modules. +// +// We should think very hard before adding any _new_ implication rules for +// moved statements. +func ImpliedMoveStatements(rootCfg *configs.Config, prevRunState *states.State, explicitStmts []MoveStatement) []MoveStatement { + return impliedMoveStatements(rootCfg, prevRunState, explicitStmts, nil) +} + +func impliedMoveStatements(cfg *configs.Config, prevRunState *states.State, explicitStmts []MoveStatement, into []MoveStatement) []MoveStatement { + modAddr := cfg.Path + + // There can be potentially many instances of the module, so we need + // to consider each of them separately. + for _, modState := range prevRunState.ModuleInstances(modAddr) { + // What we're looking for here is either a no-key resource instance + // where the configuration has count set or a zero-key resource + // instance where the configuration _doesn't_ have count set. + // If so, we'll generate a statement replacing no-key with zero-key or + // vice-versa. + for _, rState := range modState.Resources { + rAddr := rState.Addr + rCfg := cfg.Module.ResourceByAddr(rAddr.Resource) + if rCfg == nil { + // If there's no configuration at all then there can't be any + // automatic move fixup to do. + continue + } + approxSrcRange := tfdiags.SourceRangeFromHCL(rCfg.DeclRange) + + // NOTE: We're intentionally not checking to see whether the + // "to" addresses in our implied statements already have + // instances recorded in state, because ApplyMoves should + // deal with such conflicts in a deterministic way for both + // explicit and implicit moves, and we'd rather have that + // handled all in one place. + + var fromKey, toKey addrs.InstanceKey + + switch { + case rCfg.Count != nil: + // If we have a count expression then we'll use _that_ as + // a slightly-more-precise approximate source range. + approxSrcRange = tfdiags.SourceRangeFromHCL(rCfg.Count.Range()) + + if riState := rState.Instances[addrs.NoKey]; riState != nil { + fromKey = addrs.NoKey + toKey = addrs.IntKey(0) + } + case rCfg.Count == nil && rCfg.ForEach == nil: // no repetition at all + if riState := rState.Instances[addrs.IntKey(0)]; riState != nil { + fromKey = addrs.IntKey(0) + toKey = addrs.NoKey + } + } + + if fromKey != toKey { + // We mustn't generate an impied statement if the user already + // wrote an explicit statement referring to this resource, + // because they may wish to select an instance key other than + // zero as the one to retain. + if !haveMoveStatementForResource(rAddr, explicitStmts) { + into = append(into, MoveStatement{ + From: addrs.ImpliedMoveStatementEndpoint(rAddr.Instance(fromKey), approxSrcRange), + To: addrs.ImpliedMoveStatementEndpoint(rAddr.Instance(toKey), approxSrcRange), + DeclRange: approxSrcRange, + Implied: true, + }) + } + } + } + } + + for _, childCfg := range cfg.Children { + into = impliedMoveStatements(childCfg, prevRunState, explicitStmts, into) + } + + return into +} + +func (s *MoveStatement) ObjectKind() addrs.MoveEndpointKind { + // addrs.UnifyMoveEndpoints guarantees that both of our addresses have + // the same kind, so we can just arbitrary use From and assume To will + // match it. + return s.From.ObjectKind() +} + +// Name is used internally for displaying the statement graph +func (s *MoveStatement) Name() string { + return fmt.Sprintf("%s->%s", s.From, s.To) +} + +func haveMoveStatementForResource(addr addrs.AbsResource, stmts []MoveStatement) bool { + // This is not a particularly optimal way to answer this question, + // particularly since our caller calls this function in a loop already, + // but we expect the total number of explicit statements to be small + // in any reasonable OpenTofu configuration and so a more complicated + // approach wouldn't be justified here. + + for _, stmt := range stmts { + if stmt.From.SelectsResource(addr) { + return true + } + if stmt.To.SelectsResource(addr) { + return true + } + } + return false +} diff --git a/pkg/refactoring/move_statement_test.go b/pkg/refactoring/move_statement_test.go new file mode 100644 index 00000000000..f435711f78b --- /dev/null +++ b/pkg/refactoring/move_statement_test.go @@ -0,0 +1,247 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package refactoring + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "runtime" + "sort" + "testing" + + "github.com/google/go-cmp/cmp" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// Changes line endings from Windows-style ('\r\n') to Unix-style ('\n'). +func normaliseLineEndings(filename string) ([]byte, error) { + originalContent, err := os.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("error reading file %s: %w", filename, err) + } + + // Replace all occurrences of '\r\n' with '\n' + normalisedContent := bytes.ReplaceAll(originalContent, []byte("\r\n"), []byte("\n")) + + if !bytes.Equal(originalContent, normalisedContent) { + err = os.WriteFile(filename, normalisedContent, 0600) + if err != nil { + return nil, fmt.Errorf("error writing file %s: %w", filename, err) + } + } + + return originalContent, nil +} + +func TestImpliedMoveStatements(t *testing.T) { + // Normalise file content for cross-platform compatibility + if runtime.GOOS == "windows" { + file1 := "testdata/move-statement-implied/move-statement-implied.tf" + file2 := "testdata/move-statement-implied/child/move-statement-implied.tf" + originalContent, err := normaliseLineEndings(file1) + if err != nil { + t.Errorf("Error normalising line endings %v", err) + } + originalContentChild, err := normaliseLineEndings(file2) + if err != nil { + t.Errorf("Error normalising line endings %v", err) + } + + // Restore original file content after test completion + t.Cleanup(func() { + err1 := os.WriteFile(file1, originalContent, 0600) + if err1 != nil { + t.Error() + } + err = os.WriteFile(file2, originalContentChild, 0600) + if err != nil { + t.Error() + } + }, + ) + } + + resourceAddr := func(name string) addrs.AbsResource { + return addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "foo", + Name: name, + }.Absolute(addrs.RootModuleInstance) + } + + nestedResourceAddr := func(mod, name string) addrs.AbsResource { + return addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "foo", + Name: name, + }.Absolute(addrs.RootModuleInstance.Child(mod, addrs.NoKey)) + } + + instObjState := func() *states.ResourceInstanceObjectSrc { + return &states.ResourceInstanceObjectSrc{} + } + providerAddr := addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.MustParseProviderSourceString("hashicorp/foo"), + } + + rootCfg, _ := loadRefactoringFixture(t, "testdata/move-statement-implied") + prevRunState := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + resourceAddr("formerly_count").Instance(addrs.IntKey(0)), + instObjState(), + providerAddr, + ) + s.SetResourceInstanceCurrent( + resourceAddr("formerly_count").Instance(addrs.IntKey(1)), + instObjState(), + providerAddr, + ) + s.SetResourceInstanceCurrent( + resourceAddr("now_count").Instance(addrs.NoKey), + instObjState(), + providerAddr, + ) + s.SetResourceInstanceCurrent( + resourceAddr("formerly_count_explicit").Instance(addrs.IntKey(0)), + instObjState(), + providerAddr, + ) + s.SetResourceInstanceCurrent( + resourceAddr("formerly_count_explicit").Instance(addrs.IntKey(1)), + instObjState(), + providerAddr, + ) + s.SetResourceInstanceCurrent( + resourceAddr("now_count_explicit").Instance(addrs.NoKey), + instObjState(), + providerAddr, + ) + s.SetResourceInstanceCurrent( + resourceAddr("now_for_each_formerly_count").Instance(addrs.IntKey(0)), + instObjState(), + providerAddr, + ) + s.SetResourceInstanceCurrent( + resourceAddr("now_for_each_formerly_no_count").Instance(addrs.NoKey), + instObjState(), + providerAddr, + ) + + // This "ambiguous" resource is representing a rare but possible + // situation where we end up having a mixture of different index + // types in the state at the same time. The main way to get into + // this state would be to remove "count = 1" and then have the + // provider fail to destroy the zero-key instance even though we + // already created the no-key instance. Users can also get here + // by using "tofu state mv" in weird ways. + s.SetResourceInstanceCurrent( + resourceAddr("ambiguous").Instance(addrs.NoKey), + instObjState(), + providerAddr, + ) + s.SetResourceInstanceCurrent( + resourceAddr("ambiguous").Instance(addrs.IntKey(0)), + instObjState(), + providerAddr, + ) + + // Add two resource nested in a module to ensure we find these + // recursively. + s.SetResourceInstanceCurrent( + nestedResourceAddr("child", "formerly_count").Instance(addrs.IntKey(0)), + instObjState(), + providerAddr, + ) + s.SetResourceInstanceCurrent( + nestedResourceAddr("child", "now_count").Instance(addrs.NoKey), + instObjState(), + providerAddr, + ) + }) + + explicitStmts := FindMoveStatements(rootCfg) + got := ImpliedMoveStatements(rootCfg, prevRunState, explicitStmts) + want := []MoveStatement{ + { + From: addrs.ImpliedMoveStatementEndpoint(resourceAddr("formerly_count").Instance(addrs.IntKey(0)), tfdiags.SourceRange{}), + To: addrs.ImpliedMoveStatementEndpoint(resourceAddr("formerly_count").Instance(addrs.NoKey), tfdiags.SourceRange{}), + Implied: true, + DeclRange: tfdiags.SourceRange{ + Filename: filepath.Join("testdata", "move-statement-implied", "move-statement-implied.tf"), + Start: tfdiags.SourcePos{Line: 5, Column: 1, Byte: 180}, + End: tfdiags.SourcePos{Line: 5, Column: 32, Byte: 211}, + }, + }, + + // Found implied moves in a nested module, ignoring the explicit moves + { + From: addrs.ImpliedMoveStatementEndpoint(nestedResourceAddr("child", "formerly_count").Instance(addrs.IntKey(0)), tfdiags.SourceRange{}), + To: addrs.ImpliedMoveStatementEndpoint(nestedResourceAddr("child", "formerly_count").Instance(addrs.NoKey), tfdiags.SourceRange{}), + Implied: true, + DeclRange: tfdiags.SourceRange{ + Filename: filepath.Join("testdata", "move-statement-implied", "child", "move-statement-implied.tf"), + Start: tfdiags.SourcePos{Line: 5, Column: 1, Byte: 180}, + End: tfdiags.SourcePos{Line: 5, Column: 32, Byte: 211}, + }, + }, + + { + From: addrs.ImpliedMoveStatementEndpoint(resourceAddr("now_count").Instance(addrs.NoKey), tfdiags.SourceRange{}), + To: addrs.ImpliedMoveStatementEndpoint(resourceAddr("now_count").Instance(addrs.IntKey(0)), tfdiags.SourceRange{}), + Implied: true, + DeclRange: tfdiags.SourceRange{ + Filename: filepath.Join("testdata", "move-statement-implied", "move-statement-implied.tf"), + Start: tfdiags.SourcePos{Line: 10, Column: 11, Byte: 282}, + End: tfdiags.SourcePos{Line: 10, Column: 12, Byte: 283}, + }, + }, + + // Found implied moves in a nested module, ignoring the explicit moves + { + From: addrs.ImpliedMoveStatementEndpoint(nestedResourceAddr("child", "now_count").Instance(addrs.NoKey), tfdiags.SourceRange{}), + To: addrs.ImpliedMoveStatementEndpoint(nestedResourceAddr("child", "now_count").Instance(addrs.IntKey(0)), tfdiags.SourceRange{}), + Implied: true, + DeclRange: tfdiags.SourceRange{ + Filename: filepath.Join("testdata", "move-statement-implied", "child", "move-statement-implied.tf"), + Start: tfdiags.SourcePos{Line: 10, Column: 11, Byte: 282}, + End: tfdiags.SourcePos{Line: 10, Column: 12, Byte: 283}, + }, + }, + + // We generate foo.ambiguous[0] to foo.ambiguous here, even though + // there's already a foo.ambiguous in the state, because it's the + // responsibility of the later ApplyMoves step to deal with the + // situation where an object wants to move into an address already + // occupied by another object. + { + From: addrs.ImpliedMoveStatementEndpoint(resourceAddr("ambiguous").Instance(addrs.IntKey(0)), tfdiags.SourceRange{}), + To: addrs.ImpliedMoveStatementEndpoint(resourceAddr("ambiguous").Instance(addrs.NoKey), tfdiags.SourceRange{}), + Implied: true, + DeclRange: tfdiags.SourceRange{ + Filename: filepath.Join("testdata", "move-statement-implied", "move-statement-implied.tf"), + Start: tfdiags.SourcePos{Line: 46, Column: 1, Byte: 806}, + End: tfdiags.SourcePos{Line: 46, Column: 27, Byte: 832}, + }, + }, + } + + sort.Slice(got, func(i, j int) bool { + // This is just an arbitrary sort to make the result consistent + // regardless of what order the ImpliedMoveStatements function + // visits the entries in the state/config. + return got[i].DeclRange.Start.Line < got[j].DeclRange.Start.Line + }) + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } +} diff --git a/pkg/refactoring/move_validate.go b/pkg/refactoring/move_validate.go new file mode 100644 index 00000000000..abc56eb9c7d --- /dev/null +++ b/pkg/refactoring/move_validate.go @@ -0,0 +1,345 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package refactoring + +import ( + "fmt" + "sort" + "strings" + + "github.com/hashicorp/hcl/v2" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/dag" + "github.com/kubegems/opentofu/pkg/instances" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// ValidateMoves tests whether all of the given move statements comply with +// both the single-statement validation rules and the "big picture" rules +// that constrain statements in relation to one another. +// +// The validation rules are primarily in terms of the configuration, but +// ValidateMoves also takes the expander that resulted from creating a plan +// so that it can see which instances are defined for each module and resource, +// to precisely validate move statements involving specific-instance addresses. +// +// Because validation depends on the planning result but move execution must +// happen _before_ planning, we have the unusual situation where sibling +// function ApplyMoves must run before ValidateMoves and must therefore +// tolerate and ignore any invalid statements. The plan walk will then +// construct in incorrect plan (because it'll be starting from the wrong +// prior state) but ValidateMoves will block actually showing that invalid +// plan to the user. +func ValidateMoves(stmts []MoveStatement, rootCfg *configs.Config, declaredInsts instances.Set) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + if len(stmts) == 0 { + return diags + } + + g := buildMoveStatementGraph(stmts) + + // We need to track the absolute versions of our endpoint addresses in + // order to detect when there are ambiguous moves. + type AbsMoveEndpoint struct { + Other addrs.AbsMoveable + StmtRange tfdiags.SourceRange + } + stmtFrom := addrs.MakeMap[addrs.AbsMoveable, AbsMoveEndpoint]() + stmtTo := addrs.MakeMap[addrs.AbsMoveable, AbsMoveEndpoint]() + + for _, stmt := range stmts { + // Earlier code that constructs MoveStatement values should ensure that + // both stmt.From and stmt.To always belong to the same statement. + fromMod, _ := stmt.From.ModuleCallTraversals() + + for _, fromModInst := range declaredInsts.InstancesForModule(fromMod) { + absFrom := stmt.From.InModuleInstance(fromModInst) + + absTo := stmt.To.InModuleInstance(fromModInst) + + if addrs.Equivalent(absFrom, absTo) { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Redundant move statement", + Detail: fmt.Sprintf( + "This statement declares a move from %s to the same address, which is the same as not declaring this move at all.", + absFrom, + ), + Subject: stmt.DeclRange.ToHCL().Ptr(), + }) + continue + } + + var noun string + var shortNoun string + switch absFrom.(type) { + case addrs.ModuleInstance: + noun = "module instance" + shortNoun = "instance" + case addrs.AbsModuleCall: + noun = "module call" + shortNoun = "call" + case addrs.AbsResourceInstance: + noun = "resource instance" + shortNoun = "instance" + case addrs.AbsResource: + noun = "resource" + shortNoun = "resource" + default: + // The above cases should cover all of the AbsMoveable types + panic("unsupported AbsMoveable address type") + } + + // It's invalid to have a move statement whose "from" address + // refers to something that is still declared in the configuration. + if moveableObjectExists(absFrom, declaredInsts) { + conflictRange, hasRange := movableObjectDeclRange(absFrom, rootCfg) + declaredAt := "" + if hasRange { + // NOTE: It'd be pretty weird to _not_ have a range, since + // we're only in this codepath because the plan phase + // thought this object existed in the configuration. + declaredAt = fmt.Sprintf(" at %s", conflictRange.StartString()) + } + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Moved object still exists", + Detail: fmt.Sprintf( + "This statement declares a move from %s, but that %s is still declared%s.\n\nChange your configuration so that this %s will be declared as %s instead.", + absFrom, noun, declaredAt, shortNoun, absTo, + ), + Subject: stmt.DeclRange.ToHCL().Ptr(), + }) + } + + // There can only be one destination for each source address. + if existing, exists := stmtFrom.GetOk(absFrom); exists { + if !addrs.Equivalent(existing.Other, absTo) { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Ambiguous move statements", + Detail: fmt.Sprintf( + "A statement at %s declared that %s moved to %s, but this statement instead declares that it moved to %s.\n\nEach %s can move to only one destination %s.", + existing.StmtRange.StartString(), absFrom, existing.Other, absTo, + noun, shortNoun, + ), + Subject: stmt.DeclRange.ToHCL().Ptr(), + }) + } + } else { + stmtFrom.Put(absFrom, AbsMoveEndpoint{ + Other: absTo, + StmtRange: stmt.DeclRange, + }) + } + + // There can only be one source for each destination address. + if existing, exists := stmtTo.GetOk(absTo); exists { + if !addrs.Equivalent(existing.Other, absFrom) { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Ambiguous move statements", + Detail: fmt.Sprintf( + "A statement at %s declared that %s moved to %s, but this statement instead declares that %s moved there.\n\nEach %s can have moved from only one source %s.", + existing.StmtRange.StartString(), existing.Other, absTo, absFrom, + noun, shortNoun, + ), + Subject: stmt.DeclRange.ToHCL().Ptr(), + }) + } + } else { + stmtTo.Put(absTo, AbsMoveEndpoint{ + Other: absFrom, + StmtRange: stmt.DeclRange, + }) + } + + // Resource types must match. + if resourceTypesDiffer(absFrom, absTo) { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Resource type mismatch", + Detail: fmt.Sprintf( + "This statement declares a move from %s to %s, which is a %s of a different type.", absFrom, absTo, noun, + ), + }) + } + + } + } + + // If we're not already returning other errors then we'll also check for + // and report cycles. + // + // Cycles alone are difficult to report in a helpful way because we don't + // have enough context to guess the user's intent. However, some particular + // mistakes that might lead to a cycle can also be caught by other + // validation rules above where we can make better suggestions, and so + // we'll use a cycle report only as a last resort. + if !diags.HasErrors() { + diags = diags.Append(validateMoveStatementGraph(g)) + } + + return diags +} + +func validateMoveStatementGraph(g *dag.AcyclicGraph) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + for _, cycle := range g.Cycles() { + // Reporting cycles is awkward because there isn't any definitive + // way to decide which of the objects in the cycle is the cause of + // the problem. Therefore we'll just list them all out and leave + // the user to figure it out. :( + stmtStrs := make([]string, 0, len(cycle)) + for _, stmtI := range cycle { + // move statement graph nodes are pointers to move statements + stmt := stmtI.(*MoveStatement) + stmtStrs = append(stmtStrs, fmt.Sprintf( + "\n - %s: %s → %s", + stmt.DeclRange.StartString(), + stmt.From.String(), + stmt.To.String(), + )) + } + sort.Strings(stmtStrs) // just to make the order deterministic + + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Cyclic dependency in move statements", + fmt.Sprintf( + "The following chained move statements form a cycle, and so there is no final location to move objects to:%s\n\nA chain of move statements must end with an address that doesn't appear in any other statements, and which typically also refers to an object still declared in the configuration.", + strings.Join(stmtStrs, ""), + ), + )) + } + + // Look for cycles to self. + // A user shouldn't be able to create self-references, but we cannot + // correctly process a graph with them. + for _, e := range g.Edges() { + src := e.Source() + if src == e.Target() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Self reference in move statements", + fmt.Sprintf( + "The move statement %s refers to itself the move dependency graph, which is invalid. This is a bug in OpenTofu; please report it!", + src.(*MoveStatement).Name(), + ), + )) + } + } + + return diags +} + +func moveableObjectExists(addr addrs.AbsMoveable, in instances.Set) bool { + switch addr := addr.(type) { + case addrs.ModuleInstance: + return in.HasModuleInstance(addr) + case addrs.AbsModuleCall: + return in.HasModuleCall(addr) + case addrs.AbsResourceInstance: + return in.HasResourceInstance(addr) + case addrs.AbsResource: + return in.HasResource(addr) + default: + // The above cases should cover all of the AbsMoveable types + panic("unsupported AbsMoveable address type") + } +} + +func resourceTypesDiffer(absFrom, absTo addrs.AbsMoveable) bool { + switch absFrom := absFrom.(type) { + case addrs.AbsMoveableResource: + // addrs.UnifyMoveEndpoints guarantees that both addresses are of the + // same kind, so at this point we can assume that absTo is also an + // addrs.AbsResourceInstance or addrs.AbsResource. + absTo := absTo.(addrs.AbsMoveableResource) + return absFrom.AffectedAbsResource().Resource.Type != absTo.AffectedAbsResource().Resource.Type + default: + return false + } +} + +func movableObjectDeclRange(addr addrs.AbsMoveable, cfg *configs.Config) (tfdiags.SourceRange, bool) { + switch addr := addr.(type) { + case addrs.ModuleInstance: + // For a module instance we're actually looking for the call that + // declared it, which belongs to the parent module. + // (NOTE: This assumes "addr" can never be the root module instance, + // because the root module is never moveable.) + parentAddr, callAddr := addr.Call() + modCfg := cfg.DescendentForInstance(parentAddr) + if modCfg == nil { + return tfdiags.SourceRange{}, false + } + call := modCfg.Module.ModuleCalls[callAddr.Name] + if call == nil { + return tfdiags.SourceRange{}, false + } + + // If the call has either count or for_each set then we'll "blame" + // that expression, rather than the block as a whole, because it's + // the expression that decides which instances are available. + switch { + case call.ForEach != nil: + return tfdiags.SourceRangeFromHCL(call.ForEach.Range()), true + case call.Count != nil: + return tfdiags.SourceRangeFromHCL(call.Count.Range()), true + default: + return tfdiags.SourceRangeFromHCL(call.DeclRange), true + } + case addrs.AbsModuleCall: + modCfg := cfg.DescendentForInstance(addr.Module) + if modCfg == nil { + return tfdiags.SourceRange{}, false + } + call := modCfg.Module.ModuleCalls[addr.Call.Name] + if call == nil { + return tfdiags.SourceRange{}, false + } + return tfdiags.SourceRangeFromHCL(call.DeclRange), true + case addrs.AbsResourceInstance: + modCfg := cfg.DescendentForInstance(addr.Module) + if modCfg == nil { + return tfdiags.SourceRange{}, false + } + rc := modCfg.Module.ResourceByAddr(addr.Resource.Resource) + if rc == nil { + return tfdiags.SourceRange{}, false + } + + // If the resource has either count or for_each set then we'll "blame" + // that expression, rather than the block as a whole, because it's + // the expression that decides which instances are available. + switch { + case rc.ForEach != nil: + return tfdiags.SourceRangeFromHCL(rc.ForEach.Range()), true + case rc.Count != nil: + return tfdiags.SourceRangeFromHCL(rc.Count.Range()), true + default: + return tfdiags.SourceRangeFromHCL(rc.DeclRange), true + } + case addrs.AbsResource: + modCfg := cfg.DescendentForInstance(addr.Module) + if modCfg == nil { + return tfdiags.SourceRange{}, false + } + rc := modCfg.Module.ResourceByAddr(addr.Resource) + if rc == nil { + return tfdiags.SourceRange{}, false + } + return tfdiags.SourceRangeFromHCL(rc.DeclRange), true + default: + // The above cases should cover all of the AbsMoveable types + panic("unsupported AbsMoveable address type") + } +} diff --git a/pkg/refactoring/move_validate_test.go b/pkg/refactoring/move_validate_test.go new file mode 100644 index 00000000000..921114af0e5 --- /dev/null +++ b/pkg/refactoring/move_validate_test.go @@ -0,0 +1,714 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package refactoring + +import ( + "context" + "path/filepath" + "strings" + "testing" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/zclconf/go-cty/cty/gocty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configload" + "github.com/kubegems/opentofu/pkg/initwd" + "github.com/kubegems/opentofu/pkg/instances" + "github.com/kubegems/opentofu/pkg/registry" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +func TestValidateMoves(t *testing.T) { + rootCfg, instances := loadRefactoringFixture(t, "testdata/move-validate-zoo") + + tests := map[string]struct { + Statements []MoveStatement + WantError string + }{ + "no move statements": { + Statements: nil, + WantError: ``, + }, + "some valid statements": { + Statements: []MoveStatement{ + // This is just a grab bag of various valid cases that don't + // generate any errors at all. + makeTestMoveStmt(t, + ``, + `test.nonexist1`, + `test.target1`, + ), + makeTestMoveStmt(t, + `single`, + `test.nonexist1`, + `test.target1`, + ), + makeTestMoveStmt(t, + ``, + `test.nonexist2`, + `module.nonexist.test.nonexist2`, + ), + makeTestMoveStmt(t, + ``, + `module.single.test.nonexist3`, + `module.single.test.single`, + ), + makeTestMoveStmt(t, + ``, + `module.single.test.nonexist4`, + `test.target2`, + ), + makeTestMoveStmt(t, + ``, + `test.single[0]`, // valid because test.single doesn't have "count" set + `test.target3`, + ), + makeTestMoveStmt(t, + ``, + `test.zero_count[0]`, // valid because test.zero_count has count = 0 + `test.target4`, + ), + makeTestMoveStmt(t, + ``, + `test.zero_count[1]`, // valid because test.zero_count has count = 0 + `test.zero_count[0]`, + ), + makeTestMoveStmt(t, + ``, + `module.nonexist1`, + `module.target3`, + ), + makeTestMoveStmt(t, + ``, + `module.nonexist1[0]`, + `module.target4`, + ), + makeTestMoveStmt(t, + ``, + `module.single[0]`, // valid because module.single doesn't have "count" set + `module.target5`, + ), + makeTestMoveStmt(t, + ``, + `module.for_each["nonexist1"]`, + `module.for_each["a"]`, + ), + makeTestMoveStmt(t, + ``, + `module.for_each["nonexist2"]`, + `module.nonexist.module.nonexist`, + ), + makeTestMoveStmt(t, + ``, + `module.for_each["nonexist3"].test.single`, // valid because module.for_each doesn't currently have a "nonexist3" + `module.for_each["a"].test.single`, + ), + }, + WantError: ``, + }, + "two statements with the same endpoints": { + Statements: []MoveStatement{ + makeTestMoveStmt(t, + ``, + `module.a`, + `module.b`, + ), + makeTestMoveStmt(t, + ``, + `module.a`, + `module.b`, + ), + }, + WantError: ``, + }, + "moving nowhere": { + Statements: []MoveStatement{ + makeTestMoveStmt(t, + ``, + `module.a`, + `module.a`, + ), + }, + WantError: `Redundant move statement: This statement declares a move from module.a to the same address, which is the same as not declaring this move at all.`, + }, + "cyclic chain": { + Statements: []MoveStatement{ + makeTestMoveStmt(t, + ``, + `module.a`, + `module.b`, + ), + makeTestMoveStmt(t, + ``, + `module.b`, + `module.c`, + ), + makeTestMoveStmt(t, + ``, + `module.c`, + `module.a`, + ), + }, + WantError: `Cyclic dependency in move statements: The following chained move statements form a cycle, and so there is no final location to move objects to: + - test:1,1: module.a[*] → module.b[*] + - test:1,1: module.b[*] → module.c[*] + - test:1,1: module.c[*] → module.a[*] + +A chain of move statements must end with an address that doesn't appear in any other statements, and which typically also refers to an object still declared in the configuration.`, + }, + "module.single as a call still exists in configuration": { + Statements: []MoveStatement{ + makeTestMoveStmt(t, + ``, + `module.single`, + `module.other`, + ), + }, + WantError: `Moved object still exists: This statement declares a move from module.single, but that module call is still declared at testdata/move-validate-zoo/move-validate-root.tf:6,1. + +Change your configuration so that this call will be declared as module.other instead.`, + }, + "module.single as an instance still exists in configuration": { + Statements: []MoveStatement{ + makeTestMoveStmt(t, + ``, + `module.single`, + `module.other[0]`, + ), + }, + WantError: `Moved object still exists: This statement declares a move from module.single, but that module instance is still declared at testdata/move-validate-zoo/move-validate-root.tf:6,1. + +Change your configuration so that this instance will be declared as module.other[0] instead.`, + }, + "module.count[0] still exists in configuration": { + Statements: []MoveStatement{ + makeTestMoveStmt(t, + ``, + `module.count[0]`, + `module.other`, + ), + }, + WantError: `Moved object still exists: This statement declares a move from module.count[0], but that module instance is still declared at testdata/move-validate-zoo/move-validate-root.tf:12,12. + +Change your configuration so that this instance will be declared as module.other instead.`, + }, + `module.for_each["a"] still exists in configuration`: { + Statements: []MoveStatement{ + makeTestMoveStmt(t, + ``, + `module.for_each["a"]`, + `module.other`, + ), + }, + WantError: `Moved object still exists: This statement declares a move from module.for_each["a"], but that module instance is still declared at testdata/move-validate-zoo/move-validate-root.tf:22,14. + +Change your configuration so that this instance will be declared as module.other instead.`, + }, + "test.single as a resource still exists in configuration": { + Statements: []MoveStatement{ + makeTestMoveStmt(t, + ``, + `test.single`, + `test.other`, + ), + }, + WantError: `Moved object still exists: This statement declares a move from test.single, but that resource is still declared at testdata/move-validate-zoo/move-validate-root.tf:27,1. + +Change your configuration so that this resource will be declared as test.other instead.`, + }, + "test.single as an instance still exists in configuration": { + Statements: []MoveStatement{ + makeTestMoveStmt(t, + ``, + `test.single`, + `test.other[0]`, + ), + }, + WantError: `Moved object still exists: This statement declares a move from test.single, but that resource instance is still declared at testdata/move-validate-zoo/move-validate-root.tf:27,1. + +Change your configuration so that this instance will be declared as test.other[0] instead.`, + }, + "module.single.test.single as a resource still exists in configuration": { + Statements: []MoveStatement{ + makeTestMoveStmt(t, + ``, + `module.single.test.single`, + `test.other`, + ), + }, + WantError: `Moved object still exists: This statement declares a move from module.single.test.single, but that resource is still declared at testdata/move-validate-zoo/child/move-validate-child.tf:6,1. + +Change your configuration so that this resource will be declared as test.other instead.`, + }, + "module.single.test.single as a resource declared in module.single still exists in configuration": { + Statements: []MoveStatement{ + makeTestMoveStmt(t, + `single`, + `test.single`, + `test.other`, + ), + }, + WantError: `Moved object still exists: This statement declares a move from module.single.test.single, but that resource is still declared at testdata/move-validate-zoo/child/move-validate-child.tf:6,1. + +Change your configuration so that this resource will be declared as module.single.test.other instead.`, + }, + "module.single.test.single as an instance still exists in configuration": { + Statements: []MoveStatement{ + makeTestMoveStmt(t, + ``, + `module.single.test.single`, + `test.other[0]`, + ), + }, + WantError: `Moved object still exists: This statement declares a move from module.single.test.single, but that resource instance is still declared at testdata/move-validate-zoo/child/move-validate-child.tf:6,1. + +Change your configuration so that this instance will be declared as test.other[0] instead.`, + }, + "module.count[0].test.single still exists in configuration": { + Statements: []MoveStatement{ + makeTestMoveStmt(t, + ``, + `module.count[0].test.single`, + `test.other`, + ), + }, + WantError: `Moved object still exists: This statement declares a move from module.count[0].test.single, but that resource is still declared at testdata/move-validate-zoo/child/move-validate-child.tf:6,1. + +Change your configuration so that this resource will be declared as test.other instead.`, + }, + "two different moves from test.nonexist": { + Statements: []MoveStatement{ + makeTestMoveStmt(t, + ``, + `test.nonexist`, + `test.other1`, + ), + makeTestMoveStmt(t, + ``, + `test.nonexist`, + `test.other2`, + ), + }, + WantError: `Ambiguous move statements: A statement at test:1,1 declared that test.nonexist moved to test.other1, but this statement instead declares that it moved to test.other2. + +Each resource can move to only one destination resource.`, + }, + "two different moves to test.single": { + Statements: []MoveStatement{ + makeTestMoveStmt(t, + ``, + `test.other1`, + `test.single`, + ), + makeTestMoveStmt(t, + ``, + `test.other2`, + `test.single`, + ), + }, + WantError: `Ambiguous move statements: A statement at test:1,1 declared that test.other1 moved to test.single, but this statement instead declares that test.other2 moved there. + +Each resource can have moved from only one source resource.`, + }, + "two different moves to module.count[0].test.single across two modules": { + Statements: []MoveStatement{ + makeTestMoveStmt(t, + ``, + `test.other1`, + `module.count[0].test.single`, + ), + makeTestMoveStmt(t, + `count`, + `test.other2`, + `test.single`, + ), + }, + WantError: `Ambiguous move statements: A statement at test:1,1 declared that test.other1 moved to module.count[0].test.single, but this statement instead declares that module.count[0].test.other2 moved there. + +Each resource can have moved from only one source resource.`, + }, + "move from resource in another module package": { + Statements: []MoveStatement{ + makeTestMoveStmt(t, + ``, + `module.fake_external.test.thing`, + `test.thing`, + ), + }, + WantError: ``, + }, + "move to resource in another module package": { + Statements: []MoveStatement{ + makeTestMoveStmt(t, + ``, + `test.thing`, + `module.fake_external.test.thing`, + ), + }, + WantError: ``, + }, + "move from module call in another module package": { + Statements: []MoveStatement{ + makeTestMoveStmt(t, + ``, + `module.fake_external.module.a`, + `module.b`, + ), + }, + WantError: ``, + }, + "move to module call in another module package": { + Statements: []MoveStatement{ + makeTestMoveStmt(t, + ``, + `module.a`, + `module.fake_external.module.b`, + ), + }, + WantError: ``, + }, + "implied move from resource in another module package": { + Statements: []MoveStatement{ + makeTestImpliedMoveStmt(t, + ``, + `module.fake_external.test.thing`, + `test.thing`, + ), + }, + // Implied move statements are not subject to the cross-package restriction + WantError: ``, + }, + "implied move to resource in another module package": { + Statements: []MoveStatement{ + makeTestImpliedMoveStmt(t, + ``, + `test.thing`, + `module.fake_external.test.thing`, + ), + }, + // Implied move statements are not subject to the cross-package restriction + WantError: ``, + }, + "implied move from module call in another module package": { + Statements: []MoveStatement{ + makeTestImpliedMoveStmt(t, + ``, + `module.fake_external.module.a`, + `module.b`, + ), + }, + // Implied move statements are not subject to the cross-package restriction + WantError: ``, + }, + "implied move to module call in another module package": { + Statements: []MoveStatement{ + makeTestImpliedMoveStmt(t, + ``, + `module.a`, + `module.fake_external.module.b`, + ), + }, + // Implied move statements are not subject to the cross-package restriction + WantError: ``, + }, + "move to a call that refers to another module package": { + Statements: []MoveStatement{ + makeTestMoveStmt(t, + ``, + `module.nonexist`, + `module.fake_external`, + ), + }, + WantError: ``, // This is okay because the call itself is not considered to be inside the package it refers to + }, + "move to instance of a call that refers to another module package": { + Statements: []MoveStatement{ + makeTestMoveStmt(t, + ``, + `module.nonexist`, + `module.fake_external[0]`, + ), + }, + WantError: ``, // This is okay because the call itself is not considered to be inside the package it refers to + }, + "resource type mismatch": { + Statements: []MoveStatement{ + makeTestMoveStmt(t, ``, + `test.nonexist1`, + `other.single`, + ), + }, + WantError: `Resource type mismatch: This statement declares a move from test.nonexist1 to other.single, which is a resource of a different type.`, + }, + "resource instance type mismatch": { + Statements: []MoveStatement{ + makeTestMoveStmt(t, ``, + `test.nonexist1[0]`, + `other.single`, + ), + }, + WantError: `Resource type mismatch: This statement declares a move from test.nonexist1[0] to other.single, which is a resource instance of a different type.`, + }, + "crossing nested statements": { + // overlapping nested moves will result in a cycle. + Statements: []MoveStatement{ + makeTestMoveStmt(t, ``, + `module.nonexist.test.single`, + `module.count[0].test.count[0]`, + ), + makeTestMoveStmt(t, ``, + `module.nonexist`, + `module.count[0]`, + ), + }, + WantError: `Cyclic dependency in move statements: The following chained move statements form a cycle, and so there is no final location to move objects to: + - test:1,1: module.nonexist → module.count[0] + - test:1,1: module.nonexist.test.single → module.count[0].test.count[0] + +A chain of move statements must end with an address that doesn't appear in any other statements, and which typically also refers to an object still declared in the configuration.`, + }, + "fully contained nested statements": { + // we have to avoid a cycle because the nested moves appear in both + // the from and to address of the parent when only the module index + // is changing. + Statements: []MoveStatement{ + makeTestMoveStmt(t, `count`, + `test.count`, + `test.count[0]`, + ), + makeTestMoveStmt(t, ``, + `module.count`, + `module.count[0]`, + ), + }, + }, + "double fully contained nested statements": { + // we have to avoid a cycle because the nested moves appear in both + // the from and to address of the parent when only the module index + // is changing. + Statements: []MoveStatement{ + makeTestMoveStmt(t, `count`, + `module.count`, + `module.count[0]`, + ), + makeTestMoveStmt(t, `count.count`, + `test.count`, + `test.count[0]`, + ), + makeTestMoveStmt(t, ``, + `module.count`, + `module.count[0]`, + ), + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + gotDiags := ValidateMoves(test.Statements, rootCfg, instances) + + switch { + case test.WantError != "": + if !gotDiags.HasErrors() { + t.Fatalf("unexpected success\nwant error: %s", test.WantError) + } + normalisedErr := filepath.ToSlash(gotDiags.Err().Error()) + if got, want := normalisedErr, test.WantError; got != want { + t.Fatalf("wrong error\ngot error: %s\nwant error: %s", got, want) + } + default: + if gotDiags.HasErrors() { + t.Fatalf("unexpected error\ngot error: %s", gotDiags.Err().Error()) + } + } + }) + } +} + +// loadRefactoringFixture reads a configuration from the given directory and +// does some naive static processing on any count and for_each expressions +// inside, in order to get a realistic-looking instances.Set for what it +// declares without having to run a full OpenTofu plan. +func loadRefactoringFixture(t *testing.T, dir string) (*configs.Config, instances.Set) { + t.Helper() + + loader, cleanup := configload.NewLoaderForTests(t) + defer cleanup() + + inst := initwd.NewModuleInstaller(loader.ModulesDir(), loader, registry.NewClient(nil, nil)) + _, instDiags := inst.InstallModules(context.Background(), dir, "tests", true, false, initwd.ModuleInstallHooksImpl{}, configs.RootModuleCallForTesting()) + if instDiags.HasErrors() { + t.Fatal(instDiags.Err()) + } + + // Since module installer has modified the module manifest on disk, we need + // to refresh the cache of it in the loader. + if err := loader.RefreshModules(); err != nil { + t.Fatalf("failed to refresh modules after installation: %s", err) + } + + rootCfg, diags := loader.LoadConfig(dir, configs.RootModuleCallForTesting()) + if diags.HasErrors() { + t.Fatalf("failed to load root module: %s", diags.Error()) + } + + expander := instances.NewExpander() + staticPopulateExpanderModule(t, rootCfg, addrs.RootModuleInstance, expander) + return rootCfg, expander.AllInstances() +} + +func staticPopulateExpanderModule(t *testing.T, rootCfg *configs.Config, moduleAddr addrs.ModuleInstance, expander *instances.Expander) { + t.Helper() + + modCfg := rootCfg.DescendentForInstance(moduleAddr) + if modCfg == nil { + t.Fatalf("no configuration for %s", moduleAddr) + } + + if len(modCfg.Path) > 0 && modCfg.Path[len(modCfg.Path)-1] == "fake_external" { + // As a funny special case we modify the source address of this + // module to be something that counts as a separate package, + // so we can test rules relating to crossing package boundaries + // even though we really just loaded the module from a local path. + modCfg.SourceAddr = fakeExternalModuleSource + } + + for _, call := range modCfg.Module.ModuleCalls { + callAddr := addrs.ModuleCall{Name: call.Name} + + if call.Name == "fake_external" { + // As a funny special case we modify the source address of this + // module to be something that counts as a separate package, + // so we can test rules relating to crossing package boundaries + // even though we really just loaded the module from a local path. + call.SourceAddr = fakeExternalModuleSource + } + + // In order to get a valid, useful set of instances here we're going + // to just statically evaluate the count and for_each expressions. + // Normally it's valid to use references and functions there, but for + // our unit tests we'll just limit it to literal values to avoid + // bringing all of the core evaluator complexity. + switch { + case call.ForEach != nil: + val, diags := call.ForEach.Value(nil) + if diags.HasErrors() { + t.Fatalf("invalid for_each: %s", diags.Error()) + } + expander.SetModuleForEach(moduleAddr, callAddr, val.AsValueMap()) + case call.Count != nil: + val, diags := call.Count.Value(nil) + if diags.HasErrors() { + t.Fatalf("invalid count: %s", diags.Error()) + } + var count int + err := gocty.FromCtyValue(val, &count) + if err != nil { + t.Fatalf("invalid count at %s: %s", call.Count.Range(), err) + } + expander.SetModuleCount(moduleAddr, callAddr, count) + default: + expander.SetModuleSingle(moduleAddr, callAddr) + } + + // We need to recursively analyze the child modules too. + calledMod := modCfg.Path.Child(call.Name) + for _, inst := range expander.ExpandModule(calledMod) { + staticPopulateExpanderModule(t, rootCfg, inst, expander) + } + } + + for _, rc := range modCfg.Module.ManagedResources { + staticPopulateExpanderResource(t, moduleAddr, rc, expander) + } + for _, rc := range modCfg.Module.DataResources { + staticPopulateExpanderResource(t, moduleAddr, rc, expander) + } + +} + +func staticPopulateExpanderResource(t *testing.T, moduleAddr addrs.ModuleInstance, rCfg *configs.Resource, expander *instances.Expander) { + t.Helper() + + addr := rCfg.Addr() + switch { + case rCfg.ForEach != nil: + val, diags := rCfg.ForEach.Value(nil) + if diags.HasErrors() { + t.Fatalf("invalid for_each: %s", diags.Error()) + } + expander.SetResourceForEach(moduleAddr, addr, val.AsValueMap()) + case rCfg.Count != nil: + val, diags := rCfg.Count.Value(nil) + if diags.HasErrors() { + t.Fatalf("invalid count: %s", diags.Error()) + } + var count int + err := gocty.FromCtyValue(val, &count) + if err != nil { + t.Fatalf("invalid count at %s: %s", rCfg.Count.Range(), err) + } + expander.SetResourceCount(moduleAddr, addr, count) + default: + expander.SetResourceSingle(moduleAddr, addr) + } +} + +func makeTestMoveStmt(t *testing.T, moduleStr, fromStr, toStr string) MoveStatement { + t.Helper() + + module := addrs.RootModule + if moduleStr != "" { + module = addrs.Module(strings.Split(moduleStr, ".")) + } + + traversal, hclDiags := hclsyntax.ParseTraversalAbs([]byte(fromStr), "", hcl.InitialPos) + if hclDiags.HasErrors() { + t.Fatalf("invalid from address: %s", hclDiags.Error()) + } + fromEP, diags := addrs.ParseMoveEndpoint(traversal) + if diags.HasErrors() { + t.Fatalf("invalid from address: %s", diags.Err().Error()) + } + + traversal, hclDiags = hclsyntax.ParseTraversalAbs([]byte(toStr), "", hcl.InitialPos) + if hclDiags.HasErrors() { + t.Fatalf("invalid to address: %s", hclDiags.Error()) + } + toEP, diags := addrs.ParseMoveEndpoint(traversal) + if diags.HasErrors() { + t.Fatalf("invalid to address: %s", diags.Err().Error()) + } + + fromInModule, toInModule := addrs.UnifyMoveEndpoints(module, fromEP, toEP) + if fromInModule == nil || toInModule == nil { + t.Fatalf("incompatible move endpoints") + } + + return MoveStatement{ + From: fromInModule, + To: toInModule, + DeclRange: tfdiags.SourceRange{ + Filename: "test", + Start: tfdiags.SourcePos{Line: 1, Column: 1}, + End: tfdiags.SourcePos{Line: 1, Column: 1}, + }, + } +} + +func makeTestImpliedMoveStmt(t *testing.T, moduleStr, fromStr, toStr string) MoveStatement { + t.Helper() + ret := makeTestMoveStmt(t, moduleStr, fromStr, toStr) + ret.Implied = true + return ret +} + +var fakeExternalModuleSource = addrs.ModuleSourceRemote{ + Package: addrs.ModulePackage("fake-external:///"), +} diff --git a/pkg/refactoring/remove_statement.go b/pkg/refactoring/remove_statement.go new file mode 100644 index 00000000000..10652d2d68e --- /dev/null +++ b/pkg/refactoring/remove_statement.go @@ -0,0 +1,124 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package refactoring + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +type RemoveStatement struct { + From addrs.ConfigRemovable + DeclRange tfdiags.SourceRange +} + +// GetEndpointsToRemove recurses through the modules of the given configuration +// and returns an array of all "removed" addresses within, in a +// deterministic but undefined order. +// We also validate that the removed modules/resources configuration blocks were removed. +func GetEndpointsToRemove(rootCfg *configs.Config) ([]addrs.ConfigRemovable, tfdiags.Diagnostics) { + rm := findRemoveStatements(rootCfg, nil) + diags := validateRemoveStatements(rootCfg, rm) + removedAddresses := make([]addrs.ConfigRemovable, len(rm)) + for i, rs := range rm { + removedAddresses[i] = rs.From + } + return removedAddresses, diags +} + +func findRemoveStatements(cfg *configs.Config, into []*RemoveStatement) []*RemoveStatement { + modAddr := cfg.Path + + for _, rc := range cfg.Module.Removed { + var removedEndpoint *RemoveStatement + switch FromAddress := rc.From.RelSubject.(type) { + case addrs.ConfigResource: + // Get the absolute address of the resource by appending the module config address + // to the resource's relative address + absModule := make(addrs.Module, 0, len(modAddr)+len(FromAddress.Module)) + absModule = append(absModule, modAddr...) + absModule = append(absModule, FromAddress.Module...) + + var absConfigResource addrs.ConfigRemovable = addrs.ConfigResource{ + Resource: FromAddress.Resource, + Module: absModule, + } + + removedEndpoint = &RemoveStatement{From: absConfigResource, DeclRange: tfdiags.SourceRangeFromHCL(rc.DeclRange)} + + case addrs.Module: + // Get the absolute address of the module by appending the module config address + // to the module itself + var absModule = make(addrs.Module, 0, len(modAddr)+len(FromAddress)) + absModule = append(absModule, modAddr...) + absModule = append(absModule, FromAddress...) + removedEndpoint = &RemoveStatement{From: absModule, DeclRange: tfdiags.SourceRangeFromHCL(rc.DeclRange)} + + default: + panic(fmt.Sprintf("unhandled address type %T", FromAddress)) + } + + into = append(into, removedEndpoint) + + } + + for _, childCfg := range cfg.Children { + into = findRemoveStatements(childCfg, into) + } + + return into +} + +// validateRemoveStatements validates that the removed modules/resources configuration blocks were removed. +func validateRemoveStatements(cfg *configs.Config, removeStatements []*RemoveStatement) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + for _, rs := range removeStatements { + fromAddr := rs.From + if fromAddr == nil { + // Invalid value should've been caught during original + // configuration decoding, in the configs package. + panic(fmt.Sprintf("incompatible Remove endpoint in %s", rs.DeclRange.ToHCL())) + } + + // validate that a resource/module with this address doesn't exist in the config + switch fromAddr := fromAddr.(type) { + case addrs.ConfigResource: + moduleConfig := cfg.Descendent(fromAddr.Module) + if moduleConfig != nil && moduleConfig.Module.ResourceByAddr(fromAddr.Resource) != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Removed resource block still exists", + Detail: fmt.Sprintf( + "This statement declares a removal of the resource %s, but this resource block still exists in the configuration. Please remove the resource block.", + fromAddr, + ), + Subject: rs.DeclRange.ToHCL().Ptr(), + }) + } + case addrs.Module: + if cfg.Descendent(fromAddr) != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Removed module block still exists", + Detail: fmt.Sprintf( + "This statement declares a removal of the module %s, but this module block still exists in the configuration. Please remove the module block.", + fromAddr, + ), + Subject: rs.DeclRange.ToHCL().Ptr(), + }) + } + default: + panic(fmt.Sprintf("incompatible Remove endpoint address type in %s", rs.DeclRange.ToHCL())) + } + } + + return diags +} diff --git a/pkg/refactoring/remove_statement_test.go b/pkg/refactoring/remove_statement_test.go new file mode 100644 index 00000000000..04ffcfe362b --- /dev/null +++ b/pkg/refactoring/remove_statement_test.go @@ -0,0 +1,85 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package refactoring + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + + "github.com/kubegems/opentofu/pkg/addrs" +) + +func TestGetEndpointsToRemove(t *testing.T) { + tests := []struct { + name string + fixtureName string + want []addrs.ConfigRemovable + wantError string + }{ + { + name: "Valid cases", + fixtureName: "testdata/remove-statement/valid-remove-statements", + want: []addrs.ConfigRemovable{ + interface{}(mustConfigResourceAddr("foo.basic_resource")).(addrs.ConfigRemovable), + interface{}(addrs.Module{"basic_module"}).(addrs.ConfigRemovable), + interface{}(mustConfigResourceAddr("module.child.foo.removed_resource_from_root_module")).(addrs.ConfigRemovable), + interface{}(mustConfigResourceAddr("module.child.foo.removed_resource_from_child_module")).(addrs.ConfigRemovable), + interface{}(addrs.Module{"child", "removed_module_from_child_module"}).(addrs.ConfigRemovable), + interface{}(mustConfigResourceAddr("module.child.module.grandchild.foo.removed_resource_from_grandchild_module")).(addrs.ConfigRemovable), + interface{}(addrs.Module{"child", "grandchild", "removed_module_from_grandchild_module"}).(addrs.ConfigRemovable), + }, + wantError: ``, + }, + { + name: "Error - resource block still exist", + fixtureName: "testdata/remove-statement/not-valid-resource-block-still-exist", + want: []addrs.ConfigRemovable{ + interface{}(mustConfigResourceAddr("foo.basic_resource")).(addrs.ConfigRemovable), + }, + wantError: `Removed resource block still exists: This statement declares a removal of the resource foo.basic_resource, but this resource block still exists in the configuration. Please remove the resource block.`, + }, + { + name: "Error - module block still exist", + fixtureName: "testdata/remove-statement/not-valid-module-block-still-exist", + want: []addrs.ConfigRemovable{}, + wantError: `Removed module block still exists: This statement declares a removal of the module module.child, but this module block still exists in the configuration. Please remove the module block.`, + }, + { + name: "Error - nested resource block still exist", + fixtureName: "testdata/remove-statement/not-valid-nested-resource-block-still-exist", + want: []addrs.ConfigRemovable{}, + wantError: `Removed resource block still exists: This statement declares a removal of the resource module.child.foo.basic_resource, but this resource block still exists in the configuration. Please remove the resource block.`, + }} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rootCfg, _ := loadRefactoringFixture(t, tt.fixtureName) + got, diags := GetEndpointsToRemove(rootCfg) + + if tt.wantError != "" { + if !diags.HasErrors() { + t.Fatalf("missing expected error\ngot: \nwant: %s", tt.wantError) + } + errStr := diags.Err().Error() + if errStr != tt.wantError { + t.Fatalf("wrong error\ngot: %s\nwant: %s", errStr, tt.wantError) + } + } else { + if diff := cmp.Diff(tt.want, got); diff != "" { + t.Errorf("wrong result\n%s", diff) + } + } + }) + } +} + +func mustConfigResourceAddr(s string) addrs.ConfigResource { + addr, diags := addrs.ParseAbsResourceStr(s) + if diags.HasErrors() { + panic(diags.Err()) + } + return addr.Config() +} diff --git a/pkg/refactoring/testdata/move-statement-implied/child/move-statement-implied.tf b/pkg/refactoring/testdata/move-statement-implied/child/move-statement-implied.tf new file mode 100644 index 00000000000..87d09c82797 --- /dev/null +++ b/pkg/refactoring/testdata/move-statement-implied/child/move-statement-implied.tf @@ -0,0 +1,16 @@ +# This fixture is useful only in conjunction with a previous run state that +# conforms to the statements encoded in the resource names. It's for +# TestImpliedMoveStatements only. + +resource "foo" "formerly_count" { + # but not count anymore +} + +resource "foo" "now_count" { + count = 1 +} + +moved { + from = foo.no_longer_present[1] + to = foo.no_longer_present +} diff --git a/pkg/refactoring/testdata/move-statement-implied/move-statement-implied.tf b/pkg/refactoring/testdata/move-statement-implied/move-statement-implied.tf new file mode 100644 index 00000000000..4ea628ea65f --- /dev/null +++ b/pkg/refactoring/testdata/move-statement-implied/move-statement-implied.tf @@ -0,0 +1,54 @@ +# This fixture is useful only in conjunction with a previous run state that +# conforms to the statements encoded in the resource names. It's for +# TestImpliedMoveStatements only. + +resource "foo" "formerly_count" { + # but not count anymore +} + +resource "foo" "now_count" { + count = 2 +} + +resource "foo" "new_no_count" { +} + +resource "foo" "new_count" { + count = 2 +} + +resource "foo" "formerly_count_explicit" { + # but not count anymore +} + +moved { + from = foo.formerly_count_explicit[1] + to = foo.formerly_count_explicit +} + +resource "foo" "now_count_explicit" { + count = 2 +} + +moved { + from = foo.now_count_explicit + to = foo.now_count_explicit[1] +} + +resource "foo" "now_for_each_formerly_count" { + for_each = { a = 1 } +} + +resource "foo" "now_for_each_formerly_no_count" { + for_each = { a = 1 } +} + +resource "foo" "ambiguous" { + # this one doesn't have count in the config, but the test should + # set it up to have both no-key and zero-key instances in the + # state. +} + +module "child" { + source = "./child" +} diff --git a/pkg/refactoring/testdata/move-validate-zoo/child/move-validate-child.tf b/pkg/refactoring/testdata/move-validate-zoo/child/move-validate-child.tf new file mode 100644 index 00000000000..8f3c4f444bf --- /dev/null +++ b/pkg/refactoring/testdata/move-validate-zoo/child/move-validate-child.tf @@ -0,0 +1,21 @@ + +# NOTE: This fixture is used in a test that doesn't run a full Terraform plan +# operation, so the count and for_each expressions here can only be literal +# values and mustn't include any references or function calls. + +resource "test" "single" { +} + +resource "test" "count" { + count = 2 +} + +resource "test" "zero_count" { + count = 0 +} + +resource "test" "for_each" { + for_each = { + a = "A" + } +} diff --git a/pkg/refactoring/testdata/move-validate-zoo/move-validate-root.tf b/pkg/refactoring/testdata/move-validate-zoo/move-validate-root.tf new file mode 100644 index 00000000000..3cc8504f9e2 --- /dev/null +++ b/pkg/refactoring/testdata/move-validate-zoo/move-validate-root.tf @@ -0,0 +1,53 @@ + +# NOTE: This fixture is used in a test that doesn't run a full Terraform plan +# operation, so the count and for_each expressions here can only be literal +# values and mustn't include any references or function calls. + +module "single" { + source = "./child" +} + +module "count" { + source = "./child" + count = 2 +} + +module "zero_count" { + source = "./child" + count = 0 +} + +module "for_each" { + source = "./child" + for_each = { + a = "A" + } +} + +resource "test" "single" { +} + +resource "test" "count" { + count = 2 +} + +resource "test" "zero_count" { + count = 0 +} + +resource "test" "for_each" { + for_each = { + a = "A" + } +} + +resource "other" "single" { +} + +module "fake_external" { + # Our configuration fixture loader has a special case for a module call + # named "fake_external" where it will mutate the source address after + # loading to instead be an external address, so we can test rules relating + # to crossing module boundaries. + source = "./child" +} diff --git a/pkg/refactoring/testdata/remove-statement/not-valid-module-block-still-exist/child/main.tf b/pkg/refactoring/testdata/remove-statement/not-valid-module-block-still-exist/child/main.tf new file mode 100644 index 00000000000..6a5b9e86780 --- /dev/null +++ b/pkg/refactoring/testdata/remove-statement/not-valid-module-block-still-exist/child/main.tf @@ -0,0 +1,2 @@ +resource "foo" "basic_resource" { +} diff --git a/pkg/refactoring/testdata/remove-statement/not-valid-module-block-still-exist/main.tf b/pkg/refactoring/testdata/remove-statement/not-valid-module-block-still-exist/main.tf new file mode 100644 index 00000000000..03346e02084 --- /dev/null +++ b/pkg/refactoring/testdata/remove-statement/not-valid-module-block-still-exist/main.tf @@ -0,0 +1,7 @@ +module "child" { + source = "./child" +} + +removed { + from = module.child +} \ No newline at end of file diff --git a/pkg/refactoring/testdata/remove-statement/not-valid-nested-resource-block-still-exist/child/main.tf b/pkg/refactoring/testdata/remove-statement/not-valid-nested-resource-block-still-exist/child/main.tf new file mode 100644 index 00000000000..6a5b9e86780 --- /dev/null +++ b/pkg/refactoring/testdata/remove-statement/not-valid-nested-resource-block-still-exist/child/main.tf @@ -0,0 +1,2 @@ +resource "foo" "basic_resource" { +} diff --git a/pkg/refactoring/testdata/remove-statement/not-valid-nested-resource-block-still-exist/main.tf b/pkg/refactoring/testdata/remove-statement/not-valid-nested-resource-block-still-exist/main.tf new file mode 100644 index 00000000000..8de31420438 --- /dev/null +++ b/pkg/refactoring/testdata/remove-statement/not-valid-nested-resource-block-still-exist/main.tf @@ -0,0 +1,7 @@ +removed { + from = module.child.foo.basic_resource +} + +module "child" { + source = "./child" +} diff --git a/pkg/refactoring/testdata/remove-statement/not-valid-resource-block-still-exist/main.tf b/pkg/refactoring/testdata/remove-statement/not-valid-resource-block-still-exist/main.tf new file mode 100644 index 00000000000..28b32b3a2a8 --- /dev/null +++ b/pkg/refactoring/testdata/remove-statement/not-valid-resource-block-still-exist/main.tf @@ -0,0 +1,6 @@ +resource "foo" "basic_resource" { +} + +removed { + from = foo.basic_resource +} \ No newline at end of file diff --git a/pkg/refactoring/testdata/remove-statement/valid-remove-statements/child/grandchild/main.tf b/pkg/refactoring/testdata/remove-statement/valid-remove-statements/child/grandchild/main.tf new file mode 100644 index 00000000000..40c599c639b --- /dev/null +++ b/pkg/refactoring/testdata/remove-statement/valid-remove-statements/child/grandchild/main.tf @@ -0,0 +1,7 @@ +removed { + from = foo.removed_resource_from_grandchild_module +} + +removed { + from = module.removed_module_from_grandchild_module +} \ No newline at end of file diff --git a/pkg/refactoring/testdata/remove-statement/valid-remove-statements/child/main.tf b/pkg/refactoring/testdata/remove-statement/valid-remove-statements/child/main.tf new file mode 100644 index 00000000000..250c8298dfa --- /dev/null +++ b/pkg/refactoring/testdata/remove-statement/valid-remove-statements/child/main.tf @@ -0,0 +1,11 @@ +removed { + from = foo.removed_resource_from_child_module +} + +module "grandchild" { + source = "./grandchild" +} + +removed { + from = module.removed_module_from_child_module +} \ No newline at end of file diff --git a/pkg/refactoring/testdata/remove-statement/valid-remove-statements/main.tf b/pkg/refactoring/testdata/remove-statement/valid-remove-statements/main.tf new file mode 100644 index 00000000000..a078fde5ce8 --- /dev/null +++ b/pkg/refactoring/testdata/remove-statement/valid-remove-statements/main.tf @@ -0,0 +1,15 @@ +removed { + from = foo.basic_resource +} + +removed { + from = module.basic_module +} + +removed { + from = module.child.foo.removed_resource_from_root_module +} + +module "child" { + source = "./child" +} diff --git a/pkg/registry/client.go b/pkg/registry/client.go new file mode 100644 index 00000000000..818b8387a83 --- /dev/null +++ b/pkg/registry/client.go @@ -0,0 +1,343 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package registry + +import ( + "context" + "encoding/json" + "fmt" + "io" + "log" + "net/http" + "net/url" + "os" + "path" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-retryablehttp" + svchost "github.com/hashicorp/terraform-svchost" + "github.com/hashicorp/terraform-svchost/disco" + "github.com/kubegems/opentofu/pkg/httpclient" + "github.com/kubegems/opentofu/pkg/logging" + "github.com/kubegems/opentofu/pkg/registry/regsrc" + "github.com/kubegems/opentofu/pkg/registry/response" + "github.com/kubegems/opentofu/version" +) + +const ( + xTerraformGet = "X-Terraform-Get" + xTerraformVersion = "X-Terraform-Version" + modulesServiceID = "modules.v1" + providersServiceID = "providers.v1" + + // registryDiscoveryRetryEnvName is the name of the environment variable that + // can be configured to customize number of retries for module and provider + // discovery requests with the remote registry. + registryDiscoveryRetryEnvName = "TF_REGISTRY_DISCOVERY_RETRY" + defaultRetry = 1 + + // registryClientTimeoutEnvName is the name of the environment variable that + // can be configured to customize the timeout duration (seconds) for module + // and provider discovery with the remote registry. + registryClientTimeoutEnvName = "TF_REGISTRY_CLIENT_TIMEOUT" + + // defaultRequestTimeout is the default timeout duration for requests to the + // remote registry. + defaultRequestTimeout = 10 * time.Second +) + +var ( + tfVersion = version.String() + + discoveryRetry int + requestTimeout time.Duration +) + +func init() { + configureDiscoveryRetry() + configureRequestTimeout() +} + +// Client provides methods to query OpenTofu Registries. +type Client struct { + // this is the client to be used for all requests. + client *retryablehttp.Client + + // services is a required *disco.Disco, which may have services and + // credentials pre-loaded. + services *disco.Disco +} + +// NewClient returns a new initialized registry client. +func NewClient(services *disco.Disco, client *http.Client) *Client { + if services == nil { + services = disco.New() + } + + if client == nil { + client = httpclient.New() + client.Timeout = requestTimeout + } + retryableClient := retryablehttp.NewClient() + retryableClient.HTTPClient = client + retryableClient.RetryMax = discoveryRetry + retryableClient.RequestLogHook = requestLogHook + retryableClient.ErrorHandler = maxRetryErrorHandler + + logOutput := logging.LogOutput() + retryableClient.Logger = log.New(logOutput, "", log.Flags()) + + services.Transport = retryableClient.HTTPClient.Transport + + services.SetUserAgent(httpclient.OpenTofuUserAgent(version.String())) + + return &Client{ + client: retryableClient, + services: services, + } +} + +// Discover queries the host, and returns the url for the registry. +func (c *Client) Discover(host svchost.Hostname, serviceID string) (*url.URL, error) { + service, err := c.services.DiscoverServiceURL(host, serviceID) + if err != nil { + return nil, &ServiceUnreachableError{err} + } + if !strings.HasSuffix(service.Path, "/") { + service.Path += "/" + } + return service, nil +} + +// ModuleVersions queries the registry for a module, and returns the available versions. +func (c *Client) ModuleVersions(ctx context.Context, module *regsrc.Module) (*response.ModuleVersions, error) { + host, err := module.SvcHost() + if err != nil { + return nil, err + } + + service, err := c.Discover(host, modulesServiceID) + if err != nil { + return nil, err + } + + p, err := url.Parse(path.Join(module.Module(), "versions")) + if err != nil { + return nil, err + } + + service = service.ResolveReference(p) + + log.Printf("[DEBUG] fetching module versions from %q", service) + + req, err := retryablehttp.NewRequest("GET", service.String(), nil) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + + c.addRequestCreds(host, req.Request) + req.Header.Set(xTerraformVersion, tfVersion) + + resp, err := c.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK: + // OK + case http.StatusNotFound: + return nil, &errModuleNotFound{addr: module} + default: + return nil, fmt.Errorf("error looking up module versions: %s", resp.Status) + } + + var versions response.ModuleVersions + + dec := json.NewDecoder(resp.Body) + if err := dec.Decode(&versions); err != nil { + return nil, err + } + + for _, mod := range versions.Modules { + for _, v := range mod.Versions { + log.Printf("[DEBUG] found available version %q for %s", v.Version, mod.Source) + } + } + + return &versions, nil +} + +func (c *Client) addRequestCreds(host svchost.Hostname, req *http.Request) { + creds, err := c.services.CredentialsForHost(host) + if err != nil { + log.Printf("[WARN] Failed to get credentials for %s: %s (ignoring)", host, err) + return + } + + if creds != nil { + creds.PrepareRequest(req) + } +} + +// ModuleLocation find the download location for a specific version module. +// This returns a string, because the final location may contain special go-getter syntax. +func (c *Client) ModuleLocation(ctx context.Context, module *regsrc.Module, version string) (string, error) { + host, err := module.SvcHost() + if err != nil { + return "", err + } + + service, err := c.Discover(host, modulesServiceID) + if err != nil { + return "", err + } + + var p *url.URL + if version == "" { + p, err = url.Parse(path.Join(module.Module(), "download")) + } else { + p, err = url.Parse(path.Join(module.Module(), version, "download")) + } + if err != nil { + return "", err + } + download := service.ResolveReference(p) + + log.Printf("[DEBUG] looking up module location from %q", download) + + req, err := retryablehttp.NewRequest("GET", download.String(), nil) + if err != nil { + return "", err + } + + req = req.WithContext(ctx) + + c.addRequestCreds(host, req.Request) + req.Header.Set(xTerraformVersion, tfVersion) + + resp, err := c.client.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return "", fmt.Errorf("error reading response body from registry: %w", err) + } + + var location string + + switch resp.StatusCode { + case http.StatusOK: + var v response.ModuleLocationRegistryResp + if err := json.Unmarshal(body, &v); err != nil { + return "", fmt.Errorf("module %q version %q failed to deserialize response body %s: %w", + module, version, body, err) + } + + location = v.Location + + case http.StatusNoContent: + // FALLBACK: set the found location from the header + location = resp.Header.Get(xTerraformGet) + + case http.StatusNotFound: + return "", fmt.Errorf("module %q version %q not found", module, version) + + default: + // anything else is an error: + return "", fmt.Errorf("error getting download location for %q: %s resp:%s", module, resp.Status, body) + } + + if location == "" { + return "", fmt.Errorf("failed to get download URL for %q: %s resp:%s", module, resp.Status, body) + } + + // If location looks like it's trying to be a relative URL, treat it as + // one. + // + // We don't do this for just _any_ location, since the X-Terraform-Get + // header is a go-getter location rather than a URL, and so not all + // possible values will parse reasonably as URLs.) + // + // When used in conjunction with go-getter we normally require this header + // to be an absolute URL, but we are more liberal here because third-party + // registry implementations may not "know" their own absolute URLs if + // e.g. they are running behind a reverse proxy frontend, or such. + if strings.HasPrefix(location, "/") || strings.HasPrefix(location, "./") || strings.HasPrefix(location, "../") { + locationURL, err := url.Parse(location) + if err != nil { + return "", fmt.Errorf("invalid relative URL for %q: %w", module, err) + } + locationURL = download.ResolveReference(locationURL) + location = locationURL.String() + } + + return location, nil +} + +// configureDiscoveryRetry configures the number of retries the registry client +// will attempt for requests with retryable errors, like 502 status codes +func configureDiscoveryRetry() { + discoveryRetry = defaultRetry + + if v := os.Getenv(registryDiscoveryRetryEnvName); v != "" { + retry, err := strconv.Atoi(v) + if err == nil && retry > 0 { + discoveryRetry = retry + } + } +} + +func requestLogHook(logger retryablehttp.Logger, req *http.Request, i int) { + if i > 0 { + logger.Printf("[INFO] Previous request to the remote registry failed, attempting retry.") + } +} + +func maxRetryErrorHandler(resp *http.Response, err error, numTries int) (*http.Response, error) { + // Close the body per library instructions + if resp != nil { + resp.Body.Close() + } + + // Additional error detail: if we have a response, use the status code; + // if we have an error, use that; otherwise nothing. We will never have + // both response and error. + var errMsg string + if resp != nil { + errMsg = fmt.Sprintf(": %s returned from %s", resp.Status, resp.Request.URL) + } else if err != nil { + errMsg = fmt.Sprintf(": %s", err) + } + + // This function is always called with numTries=RetryMax+1. If we made any + // retry attempts, include that in the error message. + if numTries > 1 { + return resp, fmt.Errorf("the request failed after %d attempts, please try again later%s", + numTries, errMsg) + } + return resp, fmt.Errorf("the request failed, please try again later%s", errMsg) +} + +// configureRequestTimeout configures the registry client request timeout from +// environment variables +func configureRequestTimeout() { + requestTimeout = defaultRequestTimeout + + if v := os.Getenv(registryClientTimeoutEnvName); v != "" { + timeout, err := strconv.Atoi(v) + if err == nil && timeout > 0 { + requestTimeout = time.Duration(timeout) * time.Second + } + } +} diff --git a/pkg/registry/client_test.go b/pkg/registry/client_test.go new file mode 100644 index 00000000000..808ae081065 --- /dev/null +++ b/pkg/registry/client_test.go @@ -0,0 +1,554 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package registry + +import ( + "context" + "errors" + "io" + "net/http" + "os" + "reflect" + "strings" + "testing" + "time" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform-svchost/disco" + "github.com/kubegems/opentofu/pkg/httpclient" + "github.com/kubegems/opentofu/pkg/registry/regsrc" + "github.com/kubegems/opentofu/pkg/registry/test" + tfversion "github.com/kubegems/opentofu/version" +) + +func TestConfigureDiscoveryRetry(t *testing.T) { + t.Run("default retry", func(t *testing.T) { + if discoveryRetry != defaultRetry { + t.Fatalf("expected retry %q, got %q", defaultRetry, discoveryRetry) + } + + rc := NewClient(nil, nil) + if rc.client.RetryMax != defaultRetry { + t.Fatalf("expected client retry %q, got %q", + defaultRetry, rc.client.RetryMax) + } + }) + + t.Run("configured retry", func(t *testing.T) { + defer func() { + discoveryRetry = defaultRetry + }() + t.Setenv(registryDiscoveryRetryEnvName, "2") + + configureDiscoveryRetry() + expected := 2 + if discoveryRetry != expected { + t.Fatalf("expected retry %q, got %q", + expected, discoveryRetry) + } + + rc := NewClient(nil, nil) + if rc.client.RetryMax != expected { + t.Fatalf("expected client retry %q, got %q", + expected, rc.client.RetryMax) + } + }) +} + +func TestConfigureRegistryClientTimeout(t *testing.T) { + t.Run("default timeout", func(t *testing.T) { + if requestTimeout != defaultRequestTimeout { + t.Fatalf("expected timeout %q, got %q", + defaultRequestTimeout.String(), requestTimeout.String()) + } + + rc := NewClient(nil, nil) + if rc.client.HTTPClient.Timeout != defaultRequestTimeout { + t.Fatalf("expected client timeout %q, got %q", + defaultRequestTimeout.String(), rc.client.HTTPClient.Timeout.String()) + } + }) + + t.Run("configured timeout", func(t *testing.T) { + defer func() { + requestTimeout = defaultRequestTimeout + }() + t.Setenv(registryClientTimeoutEnvName, "20") + + configureRequestTimeout() + expected := 20 * time.Second + if requestTimeout != expected { + t.Fatalf("expected timeout %q, got %q", + expected, requestTimeout.String()) + } + + rc := NewClient(nil, nil) + if rc.client.HTTPClient.Timeout != expected { + t.Fatalf("expected client timeout %q, got %q", + expected, rc.client.HTTPClient.Timeout.String()) + } + }) +} + +func TestLookupModuleVersions(t *testing.T) { + server := test.Registry() + defer server.Close() + + client := NewClient(test.Disco(server), nil) + + // test with and without a hostname + for _, src := range []string{ + "example.com/test-versions/name/provider", + "test-versions/name/provider", + } { + modsrc, err := regsrc.ParseModuleSource(src) + if err != nil { + t.Fatal(err) + } + + resp, err := client.ModuleVersions(context.Background(), modsrc) + if err != nil { + t.Fatal(err) + } + + if len(resp.Modules) != 1 { + t.Fatal("expected 1 module, got", len(resp.Modules)) + } + + mod := resp.Modules[0] + name := "test-versions/name/provider" + if mod.Source != name { + t.Fatalf("expected module name %q, got %q", name, mod.Source) + } + + if len(mod.Versions) != 4 { + t.Fatal("expected 4 versions, got", len(mod.Versions)) + } + + for _, v := range mod.Versions { + _, err := version.NewVersion(v.Version) + if err != nil { + t.Fatalf("invalid version %q: %s", v.Version, err) + } + } + } +} + +func TestInvalidRegistry(t *testing.T) { + server := test.Registry() + defer server.Close() + + client := NewClient(test.Disco(server), nil) + + src := "non-existent.localhost.localdomain/test-versions/name/provider" + modsrc, err := regsrc.ParseModuleSource(src) + if err != nil { + t.Fatal(err) + } + + if _, err := client.ModuleVersions(context.Background(), modsrc); err == nil { + t.Fatal("expected error") + } +} + +func TestRegistryAuth(t *testing.T) { + server := test.Registry() + defer server.Close() + + client := NewClient(test.Disco(server), nil) + + src := "private/name/provider" + mod, err := regsrc.ParseModuleSource(src) + if err != nil { + t.Fatal(err) + } + + _, err = client.ModuleVersions(context.Background(), mod) + if err != nil { + t.Fatal(err) + } + _, err = client.ModuleLocation(context.Background(), mod, "1.0.0") + if err != nil { + t.Fatal(err) + } + + // Also test without a credentials source + client.services.SetCredentialsSource(nil) + + // both should fail without auth + _, err = client.ModuleVersions(context.Background(), mod) + if err == nil { + t.Fatal("expected error") + } + _, err = client.ModuleLocation(context.Background(), mod, "1.0.0") + if err == nil { + t.Fatal("expected error") + } +} + +func TestLookupModuleLocationRelative(t *testing.T) { + server := test.Registry() + defer server.Close() + + client := NewClient(test.Disco(server), nil) + + src := "relative/foo/bar" + mod, err := regsrc.ParseModuleSource(src) + if err != nil { + t.Fatal(err) + } + + got, err := client.ModuleLocation(context.Background(), mod, "0.2.0") + if err != nil { + t.Fatal(err) + } + + want := server.URL + "/relative-path" + if got != want { + t.Errorf("wrong location %s; want %s", got, want) + } +} + +func TestAccLookupModuleVersions(t *testing.T) { + if os.Getenv("TF_ACC") == "" { + t.Skip() + } + regDisco := disco.New() + regDisco.SetUserAgent(httpclient.OpenTofuUserAgent(tfversion.String())) + + // test with and without a hostname + for _, src := range []string{ + "terraform-aws-modules/vpc/aws", + regsrc.PublicRegistryHost.String() + "/terraform-aws-modules/vpc/aws", + } { + modsrc, err := regsrc.ParseModuleSource(src) + if err != nil { + t.Fatal(err) + } + + s := NewClient(regDisco, nil) + resp, err := s.ModuleVersions(context.Background(), modsrc) + if err != nil { + t.Fatal(err) + } + + if len(resp.Modules) != 1 { + t.Fatal("expected 1 module, got", len(resp.Modules)) + } + + mod := resp.Modules[0] + name := "terraform-aws-modules/vpc/aws" + if mod.Source != name { + t.Fatalf("expected module name %q, got %q", name, mod.Source) + } + + if len(mod.Versions) == 0 { + t.Fatal("expected multiple versions, got 0") + } + + for _, v := range mod.Versions { + _, err := version.NewVersion(v.Version) + if err != nil { + t.Fatalf("invalid version %q: %s", v.Version, err) + } + } + } +} + +// the error should reference the config source exactly, not the discovered path. +func TestLookupLookupModuleError(t *testing.T) { + server := test.Registry() + defer server.Close() + + client := NewClient(test.Disco(server), nil) + + // this should not be found in the registry + src := "bad/local/path" + mod, err := regsrc.ParseModuleSource(src) + if err != nil { + t.Fatal(err) + } + + // Instrument CheckRetry to make sure 404s are not retried + retries := 0 + oldCheck := client.client.CheckRetry + client.client.CheckRetry = func(ctx context.Context, resp *http.Response, err error) (bool, error) { + if retries > 0 { + t.Fatal("retried after module not found") + } + retries++ + return oldCheck(ctx, resp, err) + } + + _, err = client.ModuleLocation(context.Background(), mod, "0.2.0") + if err == nil { + t.Fatal("expected error") + } + + // check for the exact quoted string to ensure we didn't prepend a hostname. + if !strings.Contains(err.Error(), `"bad/local/path"`) { + t.Fatal("error should not include the hostname. got:", err) + } +} + +func TestLookupModuleRetryError(t *testing.T) { + server := test.RegistryRetryableErrorsServer() + defer server.Close() + + client := NewClient(test.Disco(server), nil) + + src := "example.com/test-versions/name/provider" + modsrc, err := regsrc.ParseModuleSource(src) + if err != nil { + t.Fatal(err) + } + resp, err := client.ModuleVersions(context.Background(), modsrc) + if err == nil { + t.Fatal("expected requests to exceed retry", err) + } + if resp != nil { + t.Fatal("unexpected response", *resp) + } + + // verify maxRetryErrorHandler handler returned the error + if !strings.Contains(err.Error(), "the request failed after 2 attempts, please try again later") { + t.Fatal("unexpected error, got:", err) + } +} + +func TestLookupModuleNoRetryError(t *testing.T) { + // Disable retries + discoveryRetry = 0 + defer configureDiscoveryRetry() + + server := test.RegistryRetryableErrorsServer() + defer server.Close() + + client := NewClient(test.Disco(server), nil) + + src := "example.com/test-versions/name/provider" + modsrc, err := regsrc.ParseModuleSource(src) + if err != nil { + t.Fatal(err) + } + resp, err := client.ModuleVersions(context.Background(), modsrc) + if err == nil { + t.Fatal("expected request to fail", err) + } + if resp != nil { + t.Fatal("unexpected response", *resp) + } + + // verify maxRetryErrorHandler handler returned the error + if !strings.Contains(err.Error(), "the request failed, please try again later") { + t.Fatal("unexpected error, got:", err) + } +} + +func TestLookupModuleNetworkError(t *testing.T) { + server := test.RegistryRetryableErrorsServer() + client := NewClient(test.Disco(server), nil) + + // Shut down the server to simulate network failure + server.Close() + + src := "example.com/test-versions/name/provider" + modsrc, err := regsrc.ParseModuleSource(src) + if err != nil { + t.Fatal(err) + } + resp, err := client.ModuleVersions(context.Background(), modsrc) + if err == nil { + t.Fatal("expected request to fail", err) + } + if resp != nil { + t.Fatal("unexpected response", *resp) + } + + // verify maxRetryErrorHandler handler returned the correct error + if !strings.Contains(err.Error(), "the request failed after 2 attempts, please try again later") { + t.Fatal("unexpected error, got:", err) + } +} + +func TestModuleLocation_readRegistryResponse(t *testing.T) { + cases := map[string]struct { + src string + httpClient *http.Client + registryFlags []uint8 + want string + wantErrorStr string + wantToReadFromHeader bool + wantStatusCode int + }{ + "shall find the module location in the registry response body": { + src: "exists-in-registry/identifier/provider", + want: "file:///registry/exists", + wantStatusCode: http.StatusOK, + httpClient: &http.Client{ + Transport: &mockRoundTripper{}, + }, + }, + "shall find the module location in the registry response header": { + src: "exists-in-registry/identifier/provider", + registryFlags: []uint8{test.WithModuleLocationInHeader}, + want: "file:///registry/exists", + wantToReadFromHeader: true, + wantStatusCode: http.StatusNoContent, + httpClient: &http.Client{ + Transport: &mockRoundTripper{}, + }, + }, + "shall read location from the registry response body even if the header with location address is also set": { + src: "exists-in-registry/identifier/provider", + want: "file:///registry/exists", + wantStatusCode: http.StatusOK, + wantToReadFromHeader: false, + registryFlags: []uint8{test.WithModuleLocationInBody, test.WithModuleLocationInHeader}, + httpClient: &http.Client{ + Transport: &mockRoundTripper{}, + }, + }, + "shall fail to find the module": { + src: "not-exist/identifier/provider", + // note that the version is fixed in the mock + // see: /internal/registry/test/mock_registry.go:testMods + wantErrorStr: `module "not-exist/identifier/provider" version "0.2.0" not found`, + wantStatusCode: http.StatusNotFound, + httpClient: &http.Client{ + Transport: &mockRoundTripper{}, + }, + }, + "shall fail because of reading response body error": { + src: "foo/bar/baz", + wantErrorStr: "error reading response body from registry: foo", + wantStatusCode: http.StatusOK, + httpClient: &http.Client{ + Transport: &mockRoundTripper{ + forwardResponse: &http.Response{ + StatusCode: http.StatusOK, + Body: mockErrorReadCloser{err: errors.New("foo")}, + }, + }, + }, + }, + "shall fail to deserialize JSON response": { + src: "foo/bar/baz", + wantErrorStr: `module "foo/bar/baz" version "0.2.0" failed to deserialize response body {: unexpected end of JSON input`, + wantStatusCode: http.StatusOK, + httpClient: &http.Client{ + Transport: &mockRoundTripper{ + forwardResponse: &http.Response{ + StatusCode: http.StatusOK, + Body: io.NopCloser(strings.NewReader("{")), + }, + }, + }, + }, + "shall fail because of unexpected protocol change - 422 http status": { + src: "foo/bar/baz", + wantErrorStr: `error getting download location for "foo/bar/baz": foo resp:bar`, + wantStatusCode: http.StatusUnprocessableEntity, + httpClient: &http.Client{ + Transport: &mockRoundTripper{ + forwardResponse: &http.Response{ + StatusCode: http.StatusUnprocessableEntity, + Status: "foo", + Body: io.NopCloser(strings.NewReader("bar")), + }, + }, + }, + }, + "shall fail because location is not found in the response": { + src: "foo/bar/baz", + wantErrorStr: `failed to get download URL for "foo/bar/baz": OK resp:{"foo":"git::https://github.com/foo/terraform-baz-bar?ref=v0.2.0"}`, + wantStatusCode: http.StatusOK, + httpClient: &http.Client{ + Transport: &mockRoundTripper{ + forwardResponse: &http.Response{ + StatusCode: http.StatusOK, + Status: "OK", + // note that the response emulates a contract change + Body: io.NopCloser(strings.NewReader(`{"foo":"git::https://github.com/foo/terraform-baz-bar?ref=v0.2.0"}`)), + }, + }, + }, + }, + } + + t.Parallel() + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + server := test.Registry(tc.registryFlags...) + defer server.Close() + + client := NewClient(test.Disco(server), tc.httpClient) + + mod, err := regsrc.ParseModuleSource(tc.src) + if err != nil { + t.Fatal(err) + } + + got, err := client.ModuleLocation(context.Background(), mod, "0.2.0") + if err != nil && tc.wantErrorStr == "" { + t.Fatalf("unexpected error: %v", err) + } + if err != nil && err.Error() != tc.wantErrorStr { + t.Fatalf("unexpected error content: want=%s, got=%v", tc.wantErrorStr, err) + } + if got != tc.want { + t.Fatalf("unexpected location: want=%s, got=%v", tc.want, got) + } + + gotStatusCode := tc.httpClient.Transport.(*mockRoundTripper).reverseResponse.StatusCode + if tc.wantStatusCode != gotStatusCode { + t.Fatalf("unexpected response status code: want=%d, got=%d", tc.wantStatusCode, gotStatusCode) + } + + if tc.wantToReadFromHeader { + resp := tc.httpClient.Transport.(*mockRoundTripper).reverseResponse + if !reflect.DeepEqual(resp.Body, http.NoBody) { + t.Fatalf("expected no body") + } + } + }) + } +} + +type mockRoundTripper struct { + // response to return without calling the server + // SET TO USE AS A REVERSE PROXY + forwardResponse *http.Response + // the response from the server will be written here + // DO NOT SET + reverseResponse *http.Response + err error +} + +func (m *mockRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + if m.err != nil { + return nil, m.err + } + if m.forwardResponse != nil { + m.reverseResponse = m.forwardResponse + return m.forwardResponse, nil + } + resp, err := http.DefaultTransport.RoundTrip(req) + m.reverseResponse = resp + return resp, err +} + +type mockErrorReadCloser struct { + err error +} + +func (m mockErrorReadCloser) Read(_ []byte) (n int, err error) { + return 0, m.err +} + +func (m mockErrorReadCloser) Close() error { + return m.err +} diff --git a/pkg/registry/errors.go b/pkg/registry/errors.go new file mode 100644 index 00000000000..8fce787ef25 --- /dev/null +++ b/pkg/registry/errors.go @@ -0,0 +1,52 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package registry + +import ( + "fmt" + + "github.com/hashicorp/terraform-svchost/disco" + "github.com/kubegems/opentofu/pkg/registry/regsrc" +) + +type errModuleNotFound struct { + addr *regsrc.Module +} + +func (e *errModuleNotFound) Error() string { + return fmt.Sprintf("module %s not found", e.addr) +} + +// IsModuleNotFound returns true only if the given error is a "module not found" +// error. This allows callers to recognize this particular error condition +// as distinct from operational errors such as poor network connectivity. +func IsModuleNotFound(err error) bool { + _, ok := err.(*errModuleNotFound) + return ok +} + +// IsServiceNotProvided returns true only if the given error is a "service not provided" +// error. This allows callers to recognize this particular error condition +// as distinct from operational errors such as poor network connectivity. +func IsServiceNotProvided(err error) bool { + _, ok := err.(*disco.ErrServiceNotProvided) + return ok +} + +// ServiceUnreachableError Registry service is unreachable +type ServiceUnreachableError struct { + err error +} + +func (e *ServiceUnreachableError) Error() string { + return e.err.Error() +} + +// IsServiceUnreachable returns true if the registry/discovery service was unreachable +func IsServiceUnreachable(err error) bool { + _, ok := err.(*ServiceUnreachableError) + return ok +} diff --git a/pkg/registry/regsrc/friendly_host.go b/pkg/registry/regsrc/friendly_host.go new file mode 100644 index 00000000000..ce1fb44337d --- /dev/null +++ b/pkg/registry/regsrc/friendly_host.go @@ -0,0 +1,145 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package regsrc + +import ( + "regexp" + "strings" + + svchost "github.com/hashicorp/terraform-svchost" +) + +var ( + // InvalidHostString is a placeholder returned when a raw host can't be + // converted by IDNA spec. It will never be returned for any host for which + // Valid() is true. + InvalidHostString = "" + + // urlLabelEndSubRe is a sub-expression that matches any character that's + // allowed at the start or end of a URL label according to RFC1123. + urlLabelEndSubRe = "[0-9A-Za-z]" + + // urlLabelEndSubRe is a sub-expression that matches any character that's + // allowed at in a non-start or end of a URL label according to RFC1123. + urlLabelMidSubRe = "[0-9A-Za-z-]" + + // urlLabelUnicodeSubRe is a sub-expression that matches any non-ascii char + // in an IDN (Unicode) display URL. It's not strict - there are only ~15k + // valid Unicode points in IDN RFC (some with conditions). We are just going + // with being liberal with matching and then erroring if we fail to convert + // to punycode later (which validates chars fully). This at least ensures + // ascii chars dissalowed by the RC1123 parts above don't become legal + // again. + urlLabelUnicodeSubRe = "[^[:ascii:]]" + + // hostLabelSubRe is the sub-expression that matches a valid hostname label. + // It does not anchor the start or end so it can be composed into more + // complex RegExps below. Note that for sanity we don't handle disallowing + // raw punycode in this regexp (esp. since re2 doesn't support negative + // lookbehind, but we can capture it's presence here to check later). + hostLabelSubRe = "" + + // Match valid initial char, or unicode char + "(?:" + urlLabelEndSubRe + "|" + urlLabelUnicodeSubRe + ")" + + // Optionally, match 0 to 61 valid URL or Unicode chars, + // followed by one valid end char or unicode char + "(?:" + + "(?:" + urlLabelMidSubRe + "|" + urlLabelUnicodeSubRe + "){0,61}" + + "(?:" + urlLabelEndSubRe + "|" + urlLabelUnicodeSubRe + ")" + + ")?" + + // hostSubRe is the sub-expression that matches a valid host prefix. + // Allows custom port. + hostSubRe = hostLabelSubRe + "(?:\\." + hostLabelSubRe + ")+(?::\\d+)?" + + // hostRe is a regexp that matches a valid host prefix. Additional + // validation of unicode strings is needed for matches. + hostRe = regexp.MustCompile("^" + hostSubRe + "$") +) + +// FriendlyHost describes a registry instance identified in source strings by a +// simple bare hostname like registry.opentofu.org. +type FriendlyHost struct { + Raw string +} + +func NewFriendlyHost(host string) *FriendlyHost { + return &FriendlyHost{Raw: host} +} + +// ParseFriendlyHost attempts to parse a valid "friendly host" prefix from the +// given string. If no valid prefix is found, host will be nil and rest will +// contain the full source string. The host prefix must terminate at the end of +// the input or at the first / character. If one or more characters exist after +// the first /, they will be returned as rest (without the / delimiter). +// Hostnames containing punycode WILL be parsed successfully since they may have +// come from an internal normalized source string, however should be considered +// invalid if the string came from a user directly. This must be checked +// explicitly for user-input strings by calling Valid() on the +// returned host. +func ParseFriendlyHost(source string) (host *FriendlyHost, rest string) { + parts := strings.SplitN(source, "/", 2) + + if hostRe.MatchString(parts[0]) { + host = &FriendlyHost{Raw: parts[0]} + if len(parts) == 2 { + rest = parts[1] + } + return + } + + // No match, return whole string as rest along with nil host + rest = source + return +} + +// Valid returns whether the host prefix is considered valid in any case. +// Example of invalid prefixes might include ones that don't conform to the host +// name specifications. Not that IDN prefixes containing punycode are not valid +// input which we expect to always be in user-input or normalised display form. +func (h *FriendlyHost) Valid() bool { + return svchost.IsValid(h.Raw) +} + +// Display returns the host formatted for display to the user in CLI or web +// output. +func (h *FriendlyHost) Display() string { + return svchost.ForDisplay(h.Raw) +} + +// Normalized returns the host formatted for internal reference or comparison. +func (h *FriendlyHost) Normalized() string { + host, err := svchost.ForComparison(h.Raw) + if err != nil { + return InvalidHostString + } + return string(host) +} + +// String returns the host formatted as the user originally typed it assuming it +// was parsed from user input. +func (h *FriendlyHost) String() string { + return h.Raw +} + +// Equal compares the FriendlyHost against another instance taking normalization +// into account. Invalid hosts cannot be compared and will always return false. +func (h *FriendlyHost) Equal(other *FriendlyHost) bool { + if other == nil { + return false + } + + otherHost, err := svchost.ForComparison(other.Raw) + if err != nil { + return false + } + + host, err := svchost.ForComparison(h.Raw) + if err != nil { + return false + } + + return otherHost == host +} diff --git a/pkg/registry/regsrc/friendly_host_test.go b/pkg/registry/regsrc/friendly_host_test.go new file mode 100644 index 00000000000..2f35f73cf19 --- /dev/null +++ b/pkg/registry/regsrc/friendly_host_test.go @@ -0,0 +1,146 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package regsrc + +import ( + "strings" + "testing" +) + +func TestFriendlyHost(t *testing.T) { + tests := []struct { + name string + source string + wantHost string + wantDisplay string + wantNorm string + wantValid bool + }{ + { + name: "simple ascii", + source: "registry.opentofu.org", + wantHost: "registry.opentofu.org", + wantDisplay: "registry.opentofu.org", + wantNorm: "registry.opentofu.org", + wantValid: true, + }, + { + name: "mixed-case ascii", + source: "Registry.OpenTofu.org", + wantHost: "Registry.OpenTofu.org", + wantDisplay: "registry.opentofu.org", // Display case folded + wantNorm: "registry.opentofu.org", + wantValid: true, + }, + { + name: "IDN", + source: "ʎɹʇsıƃǝɹ.ɯɹoɟɐɹɹǝʇ.io", + wantHost: "ʎɹʇsıƃǝɹ.ɯɹoɟɐɹɹǝʇ.io", + wantDisplay: "ʎɹʇsıƃǝɹ.ɯɹoɟɐɹɹǝʇ.io", + wantNorm: "xn--s-fka0wmm0zea7g8b.xn--o-8ta85a3b1dwcda1k.io", + wantValid: true, + }, + { + name: "IDN TLD", + source: "zhongwen.中国", + wantHost: "zhongwen.中国", + wantDisplay: "zhongwen.中国", + wantNorm: "zhongwen.xn--fiqs8s", + wantValid: true, + }, + { + name: "IDN Case Folding", + source: "Испытание.com", + wantHost: "Испытание.com", // Raw input retains case + wantDisplay: "испытание.com", // Display form is unicode but case-folded + wantNorm: "xn--80akhbyknj4f.com", + wantValid: true, + }, + { + name: "Punycode is invalid as an input format", + source: "xn--s-fka0wmm0zea7g8b.xn--o-8ta85a3b1dwcda1k.io", + wantHost: "xn--s-fka0wmm0zea7g8b.xn--o-8ta85a3b1dwcda1k.io", + wantDisplay: "ʎɹʇsıƃǝɹ.ɯɹoɟɐɹɹǝʇ.io", + wantNorm: InvalidHostString, + wantValid: false, + }, + { + name: "non-host prefix is left alone", + source: "foo/bar/baz", + wantHost: "", + wantDisplay: "", + wantNorm: "", + wantValid: false, + }, + } + for _, tt := range tests { + // Matrix each test with prefix and total match variants + for _, sfx := range []string{"", "/", "/foo/bar/baz"} { + t.Run(tt.name+" suffix:"+sfx, func(t *testing.T) { + gotHost, gotRest := ParseFriendlyHost(tt.source + sfx) + + if gotHost == nil { + if tt.wantHost != "" { + t.Fatalf("ParseFriendlyHost() gotHost = nil, want %v", tt.wantHost) + } + // If we return nil host, the whole input string should be in rest + if gotRest != (tt.source + sfx) { + t.Fatalf("ParseFriendlyHost() was nil rest = %s, want %v", gotRest, tt.source+sfx) + } + return + } + + if tt.wantHost == "" { + t.Fatalf("ParseFriendlyHost() gotHost.Raw = %v, want nil", gotHost.Raw) + } + + if v := gotHost.String(); v != tt.wantHost { + t.Fatalf("String() = %v, want %v", v, tt.wantHost) + } + if v := gotHost.Display(); v != tt.wantDisplay { + t.Fatalf("Display() = %v, want %v", v, tt.wantDisplay) + } + if v := gotHost.Normalized(); v != tt.wantNorm { + t.Fatalf("Normalized() = %v, want %v", v, tt.wantNorm) + } + if v := gotHost.Valid(); v != tt.wantValid { + t.Fatalf("Valid() = %v, want %v", v, tt.wantValid) + } + if gotRest != strings.TrimLeft(sfx, "/") { + t.Fatalf("ParseFriendlyHost() rest = %v, want %v", gotRest, strings.TrimLeft(sfx, "/")) + } + + // Also verify that host compares equal with all the variants. + if gotHost.Valid() && !gotHost.Equal(&FriendlyHost{Raw: tt.wantDisplay}) { + t.Fatalf("Equal() should be true for %s and %s", tt.wantHost, tt.wantDisplay) + } + }) + } + } +} + +func TestInvalidHostEquals(t *testing.T) { + invalid := NewFriendlyHost("NOT_A_HOST_NAME") + valid := PublicRegistryHost + + // invalid hosts are not comparable + if invalid.Equal(invalid) { + t.Fatal("invalid host names are not comparable") + } + + if valid.Equal(invalid) { + t.Fatalf("%q is not equal to %q", valid, invalid) + } + + puny := NewFriendlyHost("xn--s-fka0wmm0zea7g8b.xn--o-8ta85a3b1dwcda1k.io") + display := NewFriendlyHost("ʎɹʇsıƃǝɹ.ɯɹoɟɐɹɹǝʇ.io") + + // The pre-normalized host is not a valid source, and therefore not + // comparable to the display version. + if display.Equal(puny) { + t.Fatalf("invalid host %q should not be comparable", puny) + } +} diff --git a/pkg/registry/regsrc/module.go b/pkg/registry/regsrc/module.go new file mode 100644 index 00000000000..2dfa131d10e --- /dev/null +++ b/pkg/registry/regsrc/module.go @@ -0,0 +1,250 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package regsrc + +import ( + "errors" + "fmt" + "regexp" + "strings" + + svchost "github.com/hashicorp/terraform-svchost" + "github.com/kubegems/opentofu/pkg/addrs" +) + +var ( + ErrInvalidModuleSource = errors.New("not a valid registry module source") + + // nameSubRe is the sub-expression that matches a valid module namespace or + // name. It's strictly a super-set of what GitHub allows for user/org and + // repo names respectively, but more restrictive than our original repo-name + // regex which allowed periods but could cause ambiguity with hostname + // prefixes. It does not anchor the start or end so it can be composed into + // more complex RegExps below. Alphanumeric with - and _ allowed in non + // leading or trailing positions. Max length 64 chars. (GitHub username is + // 38 max.) + nameSubRe = "[0-9A-Za-z](?:[0-9A-Za-z-_]{0,62}[0-9A-Za-z])?" + + // providerSubRe is the sub-expression that matches a valid provider. It + // does not anchor the start or end so it can be composed into more complex + // RegExps below. Only lowercase chars and digits are supported in practice. + // Max length 64 chars. + providerSubRe = "[0-9a-z]{1,64}" + + // moduleSourceRe is a regular expression that matches the basic + // namespace/name/provider[//...] format for registry sources. It assumes + // any FriendlyHost prefix has already been removed if present. + moduleSourceRe = regexp.MustCompile( + fmt.Sprintf("^(%s)\\/(%s)\\/(%s)(?:\\/\\/(.*))?$", + nameSubRe, nameSubRe, providerSubRe)) + + // NameRe is a regular expression defining the format allowed for namespace + // or name fields in module registry implementations. + NameRe = regexp.MustCompile("^" + nameSubRe + "$") + + // ProviderRe is a regular expression defining the format allowed for + // provider fields in module registry implementations. + ProviderRe = regexp.MustCompile("^" + providerSubRe + "$") + + // these hostnames are not allowed as registry sources, because they are + // already special case module sources in tofu. + disallowed = map[string]bool{ + "github.com": true, + "bitbucket.org": true, + } +) + +// Module describes a OpenTofu Registry Module source. +type Module struct { + // RawHost is the friendly host prefix if one was present. It might be nil + // if the original source had no host prefix which implies + // PublicRegistryHost but is distinct from having an actual pointer to + // PublicRegistryHost since it encodes the fact the original string didn't + // include a host prefix at all which is significant for recovering actual + // input not just normalized form. Most callers should access it with Host() + // which will return public registry host instance if it's nil. + RawHost *FriendlyHost + RawNamespace string + RawName string + RawProvider string + RawSubmodule string +} + +// NewModule construct a new module source from separate parts. Pass empty +// string if host or submodule are not needed. +func NewModule(host, namespace, name, provider, submodule string) (*Module, error) { + m := &Module{ + RawNamespace: namespace, + RawName: name, + RawProvider: provider, + RawSubmodule: submodule, + } + if host != "" { + h := NewFriendlyHost(host) + if h != nil { + fmt.Println("HOST:", h) + if !h.Valid() || disallowed[h.Display()] { + return nil, ErrInvalidModuleSource + } + } + m.RawHost = h + } + return m, nil +} + +// ModuleFromModuleSourceAddr is an adapter to automatically transform the +// modern representation of registry module addresses, +// addrs.ModuleSourceRegistry, into the legacy representation regsrc.Module. +// +// Note that the new-style model always does normalization during parsing and +// does not preserve the raw user input at all, and so although the fields +// of regsrc.Module are all called "Raw...", initializing a Module indirectly +// through an addrs.ModuleSourceRegistry will cause those values to be the +// normalized ones, not the raw user input. +// +// Use this only for temporary shims to call into existing code that still +// uses regsrc.Module. Eventually all other subsystems should be updated to +// use addrs.ModuleSourceRegistry instead, and then package regsrc can be +// removed altogether. +func ModuleFromModuleSourceAddr(addr addrs.ModuleSourceRegistry) *Module { + ret := ModuleFromRegistryPackageAddr(addr.Package) + ret.RawSubmodule = addr.Subdir + return ret +} + +// ModuleFromRegistryPackageAddr is similar to ModuleFromModuleSourceAddr, but +// it works with just the isolated registry package address, and not the +// full source address. +// +// The practical implication of that is that RawSubmodule will always be +// the empty string in results from this function, because "Submodule" maps +// to "Subdir" and that's a module source address concept, not a module +// package concept. In practice this typically doesn't matter because the +// registry client ignores the RawSubmodule field anyway; that's a concern +// for the higher-level module installer to deal with. +func ModuleFromRegistryPackageAddr(addr addrs.ModuleRegistryPackage) *Module { + return &Module{ + RawHost: NewFriendlyHost(addr.Host.String()), + RawNamespace: addr.Namespace, + RawName: addr.Name, + RawProvider: addr.TargetSystem, // this field was never actually enforced to be a provider address, so now has a more general name + } +} + +// ParseModuleSource attempts to parse source as a OpenTofu registry module +// source. If the string is not found to be in a valid format, +// ErrInvalidModuleSource is returned. Note that this can only be used on +// "input" strings, e.g. either ones supplied by the user or potentially +// normalised but in Display form (unicode). It will fail to parse a source with +// a punycoded domain since this is not permitted input from a user. If you have +// an already normalized string internally, you can compare it without parsing +// by comparing with the normalized version of the subject with the normal +// string equality operator. +func ParseModuleSource(source string) (*Module, error) { + // See if there is a friendly host prefix. + host, rest := ParseFriendlyHost(source) + if host != nil { + if !host.Valid() || disallowed[host.Display()] { + return nil, ErrInvalidModuleSource + } + } + + matches := moduleSourceRe.FindStringSubmatch(rest) + if len(matches) < 4 { + return nil, ErrInvalidModuleSource + } + + m := &Module{ + RawHost: host, + RawNamespace: matches[1], + RawName: matches[2], + RawProvider: matches[3], + } + + if len(matches) == 5 { + m.RawSubmodule = matches[4] + } + + return m, nil +} + +// Display returns the source formatted for display to the user in CLI or web +// output. +func (m *Module) Display() string { + return m.formatWithPrefix(m.normalizedHostPrefix(m.Host().Display()), false) +} + +// Normalized returns the source formatted for internal reference or comparison. +func (m *Module) Normalized() string { + return m.formatWithPrefix(m.normalizedHostPrefix(m.Host().Normalized()), false) +} + +// String returns the source formatted as the user originally typed it assuming +// it was parsed from user input. +func (m *Module) String() string { + // Don't normalize public registry hostname - leave it exactly like the user + // input it. + hostPrefix := "" + if m.RawHost != nil { + hostPrefix = m.RawHost.String() + "/" + } + return m.formatWithPrefix(hostPrefix, true) +} + +// Equal compares the module source against another instance taking +// normalization into account. +func (m *Module) Equal(other *Module) bool { + return m.Normalized() == other.Normalized() +} + +// Host returns the FriendlyHost object describing which registry this module is +// in. If the original source string had not host component this will return the +// PublicRegistryHost. +func (m *Module) Host() *FriendlyHost { + if m.RawHost == nil { + return PublicRegistryHost + } + return m.RawHost +} + +func (m *Module) normalizedHostPrefix(host string) string { + if m.Host().Equal(PublicRegistryHost) { + return "" + } + return host + "/" +} + +func (m *Module) formatWithPrefix(hostPrefix string, preserveCase bool) string { + suffix := "" + if m.RawSubmodule != "" { + suffix = "//" + m.RawSubmodule + } + str := fmt.Sprintf("%s%s/%s/%s%s", hostPrefix, m.RawNamespace, m.RawName, + m.RawProvider, suffix) + + // lower case by default + if !preserveCase { + return strings.ToLower(str) + } + return str +} + +// Module returns just the registry ID of the module, without a hostname or +// suffix. +func (m *Module) Module() string { + return fmt.Sprintf("%s/%s/%s", m.RawNamespace, m.RawName, m.RawProvider) +} + +// SvcHost returns the svchost.Hostname for this module. Since FriendlyHost may +// contain an invalid hostname, this also returns an error indicating if it +// could be converted to a svchost.Hostname. If no host is specified, the +// default PublicRegistryHost is returned. +func (m *Module) SvcHost() (svchost.Hostname, error) { + if m.RawHost == nil { + return svchost.ForComparison(PublicRegistryHost.Raw) + } + return svchost.ForComparison(m.RawHost.Raw) +} diff --git a/pkg/registry/regsrc/module_test.go b/pkg/registry/regsrc/module_test.go new file mode 100644 index 00000000000..199b28a1c53 --- /dev/null +++ b/pkg/registry/regsrc/module_test.go @@ -0,0 +1,146 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package regsrc + +import ( + "testing" +) + +func TestModule(t *testing.T) { + tests := []struct { + name string + source string + wantString string + wantDisplay string + wantNorm string + wantErr bool + }{ + { + name: "public registry", + source: "hashicorp/consul/aws", + wantString: "hashicorp/consul/aws", + wantDisplay: "hashicorp/consul/aws", + wantNorm: "hashicorp/consul/aws", + wantErr: false, + }, + { + name: "public registry, submodule", + source: "hashicorp/consul/aws//foo", + wantString: "hashicorp/consul/aws//foo", + wantDisplay: "hashicorp/consul/aws//foo", + wantNorm: "hashicorp/consul/aws//foo", + wantErr: false, + }, + { + name: "public registry, explicit host", + source: "registry.opentofu.org/hashicorp/consul/aws", + wantString: "registry.opentofu.org/hashicorp/consul/aws", + wantDisplay: "hashicorp/consul/aws", + wantNorm: "hashicorp/consul/aws", + wantErr: false, + }, + { + name: "public registry, mixed case", + source: "HashiCorp/Consul/aws", + wantString: "HashiCorp/Consul/aws", + wantDisplay: "hashicorp/consul/aws", + wantNorm: "hashicorp/consul/aws", + wantErr: false, + }, + { + name: "private registry, custom port", + source: "Example.com:1234/HashiCorp/Consul/aws", + wantString: "Example.com:1234/HashiCorp/Consul/aws", + wantDisplay: "example.com:1234/hashicorp/consul/aws", + wantNorm: "example.com:1234/hashicorp/consul/aws", + wantErr: false, + }, + { + name: "IDN registry", + source: "Испытание.com/HashiCorp/Consul/aws", + wantString: "Испытание.com/HashiCorp/Consul/aws", + wantDisplay: "испытание.com/hashicorp/consul/aws", + wantNorm: "xn--80akhbyknj4f.com/hashicorp/consul/aws", + wantErr: false, + }, + { + name: "IDN registry, submodule, custom port", + source: "Испытание.com:1234/HashiCorp/Consul/aws//Foo", + wantString: "Испытание.com:1234/HashiCorp/Consul/aws//Foo", + // Note we DO lowercase submodule names. This might causes issues on + // some filesystems (e.g. HFS+) that are case-sensitive where + ////modules/Foo and //modules/foo describe different paths, but + // it's less confusing in general just to not support that. Any user + // with a module with submodules in both cases is already asking for + // portability issues, and tofu can ensure it does + // case-insensitive search for the dir in those cases. + wantDisplay: "испытание.com:1234/hashicorp/consul/aws//foo", + wantNorm: "xn--80akhbyknj4f.com:1234/hashicorp/consul/aws//foo", + wantErr: false, + }, + { + name: "invalid host", + source: "---.com/HashiCorp/Consul/aws", + wantErr: true, + }, + { + name: "invalid format", + source: "foo/var/baz/qux", + wantErr: true, + }, + { + name: "invalid suffix", + source: "foo/var/baz?otherthing", + wantErr: true, + }, + { + name: "valid host, invalid format", + source: "foo.com/var/baz?otherthing", + wantErr: true, + }, + { + name: "disallow github", + source: "github.com/HashiCorp/Consul/aws", + wantErr: true, + }, + { + name: "disallow bitbucket", + source: "bitbucket.org/HashiCorp/Consul/aws", + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := ParseModuleSource(tt.source) + + if (err != nil) != tt.wantErr { + t.Fatalf("ParseModuleSource() error = %v, wantErr %v", err, tt.wantErr) + } + + if err != nil { + return + } + + if v := got.String(); v != tt.wantString { + t.Fatalf("String() = %v, want %v", v, tt.wantString) + } + if v := got.Display(); v != tt.wantDisplay { + t.Fatalf("Display() = %v, want %v", v, tt.wantDisplay) + } + if v := got.Normalized(); v != tt.wantNorm { + t.Fatalf("Normalized() = %v, want %v", v, tt.wantNorm) + } + + gotDisplay, err := ParseModuleSource(tt.wantDisplay) + if err != nil { + t.Fatalf("ParseModuleSource(wantDisplay) error = %v", err) + } + if !got.Equal(gotDisplay) { + t.Fatalf("Equal() failed for %s and %s", tt.source, tt.wantDisplay) + } + }) + } +} diff --git a/pkg/registry/regsrc/regsrc.go b/pkg/registry/regsrc/regsrc.go new file mode 100644 index 00000000000..eefa2f654b6 --- /dev/null +++ b/pkg/registry/regsrc/regsrc.go @@ -0,0 +1,13 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package regsrc provides helpers for working with source strings that identify +// resources within a OpenTofu registry. +package regsrc + +var ( + // PublicRegistryHost is a FriendlyHost that represents the public registry. + PublicRegistryHost = NewFriendlyHost("registry.opentofu.org") +) diff --git a/pkg/registry/response/module.go b/pkg/registry/response/module.go new file mode 100644 index 00000000000..93907f5724f --- /dev/null +++ b/pkg/registry/response/module.go @@ -0,0 +1,98 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package response + +import ( + "time" +) + +// Module is the response structure with the data for a single module version. +type Module struct { + ID string `json:"id"` + + //--------------------------------------------------------------- + // Metadata about the overall module. + + Owner string `json:"owner"` + Namespace string `json:"namespace"` + Name string `json:"name"` + Version string `json:"version"` + Provider string `json:"provider"` + Description string `json:"description"` + Source string `json:"source"` + PublishedAt time.Time `json:"published_at"` + Downloads int `json:"downloads"` + Verified bool `json:"verified"` +} + +// ModuleDetail represents a module in full detail. +type ModuleDetail struct { + Module + + //--------------------------------------------------------------- + // Metadata about the overall module. This is only available when + // requesting the specific module (not in list responses). + + // Root is the root module. + Root *ModuleSubmodule `json:"root"` + + // Submodules are the other submodules that are available within + // this module. + Submodules []*ModuleSubmodule `json:"submodules"` + + //--------------------------------------------------------------- + // The fields below are only set when requesting this specific + // module. They are available to easily know all available versions + // and providers without multiple API calls. + + Providers []string `json:"providers"` // All available providers + Versions []string `json:"versions"` // All versions +} + +// ModuleSubmodule is the metadata about a specific submodule within +// a module. This includes the root module as a special case. +type ModuleSubmodule struct { + Path string `json:"path"` + Readme string `json:"readme"` + Empty bool `json:"empty"` + + Inputs []*ModuleInput `json:"inputs"` + Outputs []*ModuleOutput `json:"outputs"` + Dependencies []*ModuleDep `json:"dependencies"` + Resources []*ModuleResource `json:"resources"` +} + +// ModuleInput is an input for a module. +type ModuleInput struct { + Name string `json:"name"` + Description string `json:"description"` + Default string `json:"default"` +} + +// ModuleOutput is an output for a module. +type ModuleOutput struct { + Name string `json:"name"` + Description string `json:"description"` +} + +// ModuleDep is an output for a module. +type ModuleDep struct { + Name string `json:"name"` + Source string `json:"source"` + Version string `json:"version"` +} + +// ModuleProviderDep is the output for a provider dependency +type ModuleProviderDep struct { + Name string `json:"name"` + Version string `json:"version"` +} + +// ModuleResource is an output for a module. +type ModuleResource struct { + Name string `json:"name"` + Type string `json:"type"` +} diff --git a/pkg/registry/response/module_download.go b/pkg/registry/response/module_download.go new file mode 100644 index 00000000000..d1a44e1b8d9 --- /dev/null +++ b/pkg/registry/response/module_download.go @@ -0,0 +1,11 @@ +// Copyright (c) OpenTofu +// SPDX-License-Identifier: MPL-2.0 + +package response + +// ModuleLocationRegistryResp defines the OpenTofu registry response +// returned when calling the endpoint /v1/modules/:namespace/:name/:system/:version/download +type ModuleLocationRegistryResp struct { + // The URL to download the module from. + Location string `json:"location"` +} diff --git a/pkg/registry/response/module_list.go b/pkg/registry/response/module_list.go new file mode 100644 index 00000000000..a7e11036880 --- /dev/null +++ b/pkg/registry/response/module_list.go @@ -0,0 +1,12 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package response + +// ModuleList is the response structure for a pageable list of modules. +type ModuleList struct { + Meta PaginationMeta `json:"meta"` + Modules []*Module `json:"modules"` +} diff --git a/pkg/registry/response/module_provider.go b/pkg/registry/response/module_provider.go new file mode 100644 index 00000000000..90d0c651a69 --- /dev/null +++ b/pkg/registry/response/module_provider.go @@ -0,0 +1,19 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package response + +// ModuleProvider represents a single provider for modules. +type ModuleProvider struct { + Name string `json:"name"` + Downloads int `json:"downloads"` + ModuleCount int `json:"module_count"` +} + +// ModuleProviderList is the response structure for a pageable list of ModuleProviders. +type ModuleProviderList struct { + Meta PaginationMeta `json:"meta"` + Providers []*ModuleProvider `json:"providers"` +} diff --git a/pkg/registry/response/module_versions.go b/pkg/registry/response/module_versions.go new file mode 100644 index 00000000000..1790e25ebe8 --- /dev/null +++ b/pkg/registry/response/module_versions.go @@ -0,0 +1,37 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package response + +// ModuleVersions is the response format that contains all metadata about module +// versions needed for tofu CLI to resolve version constraints. See RFC +// TF-042 for details on this format. +type ModuleVersions struct { + Modules []*ModuleProviderVersions `json:"modules"` +} + +// ModuleProviderVersions is the response format for a single module instance, +// containing metadata about all versions and their dependencies. +type ModuleProviderVersions struct { + Source string `json:"source"` + Versions []*ModuleVersion `json:"versions"` +} + +// ModuleVersion is the output metadata for a given version needed by CLI to +// resolve candidate versions to satisfy requirements. +type ModuleVersion struct { + Version string `json:"version"` + Root VersionSubmodule `json:"root"` + Submodules []*VersionSubmodule `json:"submodules"` +} + +// VersionSubmodule is the output metadata for a submodule within a given +// version needed by CLI to resolve candidate versions to satisfy requirements. +// When representing the Root in JSON the path is omitted. +type VersionSubmodule struct { + Path string `json:"path,omitempty"` + Providers []*ModuleProviderDep `json:"providers"` + Dependencies []*ModuleDep `json:"dependencies"` +} diff --git a/pkg/registry/response/pagination.go b/pkg/registry/response/pagination.go new file mode 100644 index 00000000000..40cf27a1286 --- /dev/null +++ b/pkg/registry/response/pagination.go @@ -0,0 +1,70 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package response + +import ( + "net/url" + "strconv" +) + +// PaginationMeta is a structure included in responses for pagination. +type PaginationMeta struct { + Limit int `json:"limit"` + CurrentOffset int `json:"current_offset"` + NextOffset *int `json:"next_offset,omitempty"` + PrevOffset *int `json:"prev_offset,omitempty"` + NextURL string `json:"next_url,omitempty"` + PrevURL string `json:"prev_url,omitempty"` +} + +// NewPaginationMeta populates pagination meta data from result parameters +func NewPaginationMeta(offset, limit int, hasMore bool, currentURL string) PaginationMeta { + pm := PaginationMeta{ + Limit: limit, + CurrentOffset: offset, + } + + // Calculate next/prev offsets, leave nil if not valid pages + nextOffset := offset + limit + if hasMore { + pm.NextOffset = &nextOffset + } + + prevOffset := offset - limit + if prevOffset < 0 { + prevOffset = 0 + } + if prevOffset < offset { + pm.PrevOffset = &prevOffset + } + + // If URL format provided, populate URLs. Intentionally swallow URL errors for now, API should + // catch missing URLs if we call with bad URL arg (and we care about them being present). + if currentURL != "" && pm.NextOffset != nil { + pm.NextURL, _ = setQueryParam(currentURL, "offset", *pm.NextOffset, 0) + } + if currentURL != "" && pm.PrevOffset != nil { + pm.PrevURL, _ = setQueryParam(currentURL, "offset", *pm.PrevOffset, 0) + } + + return pm +} + +func setQueryParam(baseURL, key string, val, defaultVal int) (string, error) { + u, err := url.Parse(baseURL) + if err != nil { + return "", err + } + q := u.Query() + if val == defaultVal { + // elide param if it's the default value + q.Del(key) + } else { + q.Set(key, strconv.Itoa(val)) + } + u.RawQuery = q.Encode() + return u.String(), nil +} diff --git a/pkg/registry/response/pagination_test.go b/pkg/registry/response/pagination_test.go new file mode 100644 index 00000000000..4d1e720a09c --- /dev/null +++ b/pkg/registry/response/pagination_test.go @@ -0,0 +1,123 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package response + +import ( + "encoding/json" + "testing" +) + +func prettyJSON(o interface{}) (string, error) { + bytes, err := json.MarshalIndent(o, "", "\t") + if err != nil { + return "", err + } + return string(bytes), nil +} + +func TestNewPaginationMeta(t *testing.T) { + type args struct { + offset int + limit int + hasMore bool + currentURL string + } + tests := []struct { + name string + args args + wantJSON string + }{ + { + name: "first page", + args: args{0, 10, true, "http://foo.com/v1/bar"}, + wantJSON: `{ + "limit": 10, + "current_offset": 0, + "next_offset": 10, + "next_url": "http://foo.com/v1/bar?offset=10" +}`, + }, + { + name: "second page", + args: args{10, 10, true, "http://foo.com/v1/bar"}, + wantJSON: `{ + "limit": 10, + "current_offset": 10, + "next_offset": 20, + "prev_offset": 0, + "next_url": "http://foo.com/v1/bar?offset=20", + "prev_url": "http://foo.com/v1/bar" +}`, + }, + { + name: "last page", + args: args{40, 10, false, "http://foo.com/v1/bar"}, + wantJSON: `{ + "limit": 10, + "current_offset": 40, + "prev_offset": 30, + "prev_url": "http://foo.com/v1/bar?offset=30" +}`, + }, + { + name: "misaligned start ending exactly on boundary", + args: args{32, 10, false, "http://foo.com/v1/bar"}, + wantJSON: `{ + "limit": 10, + "current_offset": 32, + "prev_offset": 22, + "prev_url": "http://foo.com/v1/bar?offset=22" +}`, + }, + { + name: "misaligned start partially through first page", + args: args{5, 12, true, "http://foo.com/v1/bar"}, + wantJSON: `{ + "limit": 12, + "current_offset": 5, + "next_offset": 17, + "prev_offset": 0, + "next_url": "http://foo.com/v1/bar?offset=17", + "prev_url": "http://foo.com/v1/bar" +}`, + }, + { + name: "no current URL", + args: args{10, 10, true, ""}, + wantJSON: `{ + "limit": 10, + "current_offset": 10, + "next_offset": 20, + "prev_offset": 0 +}`, + }, + { + name: "#58 regression test", + args: args{1, 3, true, ""}, + wantJSON: `{ + "limit": 3, + "current_offset": 1, + "next_offset": 4, + "prev_offset": 0 +}`, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := NewPaginationMeta(tt.args.offset, tt.args.limit, tt.args.hasMore, + tt.args.currentURL) + gotJSON, err := prettyJSON(got) + if err != nil { + t.Fatalf("failed to marshal PaginationMeta to JSON: %s", err) + } + if gotJSON != tt.wantJSON { + // prettyJSON makes debugging easier due to the annoying pointer-to-ints, but it + // also implicitly tests JSON marshalling as we can see if it's omitting fields etc. + t.Fatalf("NewPaginationMeta() =\n%s\n want:\n%s\n", gotJSON, tt.wantJSON) + } + }) + } +} diff --git a/pkg/registry/response/redirect.go b/pkg/registry/response/redirect.go new file mode 100644 index 00000000000..49cb0be0d44 --- /dev/null +++ b/pkg/registry/response/redirect.go @@ -0,0 +1,11 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package response + +// Redirect causes the frontend to perform a window redirect. +type Redirect struct { + URL string `json:"url"` +} diff --git a/pkg/registry/test/mock_registry.go b/pkg/registry/test/mock_registry.go new file mode 100644 index 00000000000..3a9fad31e72 --- /dev/null +++ b/pkg/registry/test/mock_registry.go @@ -0,0 +1,300 @@ +package test + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "net/http/httptest" + "os" + "regexp" + "strings" + + svchost "github.com/hashicorp/terraform-svchost" + "github.com/hashicorp/terraform-svchost/auth" + "github.com/hashicorp/terraform-svchost/disco" + "github.com/kubegems/opentofu/pkg/httpclient" + "github.com/kubegems/opentofu/pkg/registry/regsrc" + "github.com/kubegems/opentofu/pkg/registry/response" + tfversion "github.com/kubegems/opentofu/version" +) + +// Disco return a *disco.Disco mapping registry.opentofu.org, localhost, +// localhost.localdomain, and example.com to the test server. +func Disco(s *httptest.Server) *disco.Disco { + services := map[string]interface{}{ + // Note that both with and without trailing slashes are supported behaviours + // TODO: add specific tests to enumerate both possibilities. + "modules.v1": fmt.Sprintf("%s/v1/modules", s.URL), + "providers.v1": fmt.Sprintf("%s/v1/providers", s.URL), + } + d := disco.NewWithCredentialsSource(credsSrc) + d.SetUserAgent(httpclient.OpenTofuUserAgent(tfversion.String())) + + d.ForceHostServices(svchost.Hostname("registry.opentofu.org"), services) + d.ForceHostServices(svchost.Hostname("localhost"), services) + d.ForceHostServices(svchost.Hostname("localhost.localdomain"), services) + d.ForceHostServices(svchost.Hostname("example.com"), services) + return d +} + +// Map of module names and location of test modules. +// Only one version for now, as we only lookup latest from the registry. +type testMod struct { + location string + version string +} + +// Map of provider names and location of test providers. +// Only one version for now, as we only lookup latest from the registry. +type testProvider struct { + version string + url string +} + +const ( + testCred = "test-auth-token" +) + +var ( + regHost = svchost.Hostname(regsrc.PublicRegistryHost.Normalized()) + credsSrc = auth.StaticCredentialsSource(map[svchost.Hostname]map[string]interface{}{ + regHost: {"token": testCred}, + }) +) + +// All the locationes from the mockRegistry start with a file:// scheme. If +// the location string here doesn't have a scheme, the mockRegistry will +// find the absolute path and return a complete URL. +var testMods = map[string][]testMod{ + "registry/foo/bar": {{ + location: "file:///download/registry/foo/bar/0.2.3//*?archive=tar.gz", + version: "0.2.3", + }}, + "registry/foo/baz": {{ + location: "file:///download/registry/foo/baz/1.10.0//*?archive=tar.gz", + version: "1.10.0", + }}, + "registry/local/sub": {{ + location: "testdata/registry-tar-subdir/foo.tgz//*?archive=tar.gz", + version: "0.1.2", + }}, + "exists-in-registry/identifier/provider": {{ + location: "file:///registry/exists", + version: "0.2.0", + }}, + "relative/foo/bar": {{ // There is an exception for the "relative/" prefix in the test registry server + location: "/relative-path", + version: "0.2.0", + }}, + "test-versions/name/provider": { + {version: "2.2.0"}, + {version: "2.1.1"}, + {version: "1.2.2"}, + {version: "1.2.1"}, + }, + "private/name/provider": { + {version: "1.0.0"}, + }, +} + +var testProviders = map[string][]testProvider{ + "-/foo": { + { + version: "0.2.3", + url: "https://releases.hashicorp.com/terraform-provider-foo/0.2.3/terraform-provider-foo.zip", + }, + {version: "0.3.0"}, + }, + "-/bar": { + { + version: "0.1.1", + url: "https://releases.hashicorp.com/terraform-provider-bar/0.1.1/terraform-provider-bar.zip", + }, + {version: "0.1.2"}, + }, +} + +func providerAlias(provider string) string { + re := regexp.MustCompile("^-/") + if re.MatchString(provider) { + return re.ReplaceAllString(provider, "terraform-providers/") + } + return provider +} + +func init() { + // Add provider aliases + for provider, info := range testProviders { + alias := providerAlias(provider) + testProviders[alias] = info + } +} + +func mockRegHandler(config map[uint8]struct{}) http.Handler { + mux := http.NewServeMux() + + moduleDownload := func(w http.ResponseWriter, r *http.Request) { + p := strings.TrimLeft(r.URL.Path, "/") + // handle download request + re := regexp.MustCompile(`^([-a-z]+/\w+/\w+).*/download$`) + // download lookup + matches := re.FindStringSubmatch(p) + if len(matches) != 2 { + w.WriteHeader(http.StatusBadRequest) + return + } + + // check for auth + if strings.Contains(matches[0], "private/") { + if !strings.Contains(r.Header.Get("Authorization"), testCred) { + http.Error(w, "", http.StatusForbidden) + return + } + } + + versions, ok := testMods[matches[1]] + if !ok { + http.NotFound(w, r) + return + } + mod := versions[0] + + location := mod.location + if !strings.HasPrefix(matches[0], "relative/") && !strings.HasPrefix(location, "file:///") { + // we can't use filepath.Abs because it will clean `//` + wd, _ := os.Getwd() + location = fmt.Sprintf("file://%s/%s", wd, location) + } + + // the location will be returned in the response header + _, inHeader := config[WithModuleLocationInHeader] + // the location will be returned in the response body + _, inBody := config[WithModuleLocationInBody] + + if inHeader { + w.Header().Set("X-Terraform-Get", location) + } + + if inBody { + w.WriteHeader(http.StatusOK) + o, err := json.Marshal(response.ModuleLocationRegistryResp{Location: location}) + if err != nil { + panic("mock error: " + err.Error()) + } + _, _ = w.Write(o) + return + } + + w.WriteHeader(http.StatusNoContent) + } + + moduleVersions := func(w http.ResponseWriter, r *http.Request) { + p := strings.TrimLeft(r.URL.Path, "/") + re := regexp.MustCompile(`^([-a-z]+/\w+/\w+)/versions$`) + matches := re.FindStringSubmatch(p) + if len(matches) != 2 { + w.WriteHeader(http.StatusBadRequest) + return + } + + // check for auth + if strings.Contains(matches[1], "private/") { + if !strings.Contains(r.Header.Get("Authorization"), testCred) { + http.Error(w, "", http.StatusForbidden) + } + } + + name := matches[1] + versions, ok := testMods[name] + if !ok { + http.NotFound(w, r) + return + } + + // only adding the single requested module for now + // this is the minimal that any regisry is epected to support + mpvs := &response.ModuleProviderVersions{ + Source: name, + } + + for _, v := range versions { + mv := &response.ModuleVersion{ + Version: v.version, + } + mpvs.Versions = append(mpvs.Versions, mv) + } + + resp := response.ModuleVersions{ + Modules: []*response.ModuleProviderVersions{mpvs}, + } + + js, err := json.Marshal(resp) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + w.Write(js) + } + + mux.Handle("/v1/modules/", + http.StripPrefix("/v1/modules/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if strings.HasSuffix(r.URL.Path, "/download") { + moduleDownload(w, r) + return + } + + if strings.HasSuffix(r.URL.Path, "/versions") { + moduleVersions(w, r) + return + } + + http.NotFound(w, r) + })), + ) + + mux.HandleFunc("/.well-known/terraform.json", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + io.WriteString(w, `{"modules.v1":"http://localhost/v1/modules/", "providers.v1":"http://localhost/v1/providers/"}`) + }) + return mux +} + +const ( + // WithModuleLocationInBody sets to return the module's location in the response body + WithModuleLocationInBody uint8 = iota + // WithModuleLocationInHeader sets to return the module's location in the response header + WithModuleLocationInHeader +) + +// Registry returns an httptest server that mocks out some registry functionality. +func Registry(flags ...uint8) *httptest.Server { + if len(flags) == 0 { + return httptest.NewServer(mockRegHandler( + map[uint8]struct{}{ + // default setting + WithModuleLocationInBody: {}, + }, + )) + } + + cfg := map[uint8]struct{}{} + for _, flag := range flags { + cfg[flag] = struct{}{} + } + return httptest.NewServer(mockRegHandler(cfg)) +} + +// RegistryRetryableErrorsServer returns an httptest server that mocks out the +// registry API to return 502 errors. +func RegistryRetryableErrorsServer() *httptest.Server { + mux := http.NewServeMux() + mux.HandleFunc("/v1/modules/", func(w http.ResponseWriter, r *http.Request) { + http.Error(w, "mocked server error", http.StatusBadGateway) + }) + mux.HandleFunc("/v1/providers/", func(w http.ResponseWriter, r *http.Request) { + http.Error(w, "mocked server error", http.StatusBadGateway) + }) + return httptest.NewServer(mux) +} diff --git a/pkg/repl/format.go b/pkg/repl/format.go new file mode 100644 index 00000000000..1d35c786e27 --- /dev/null +++ b/pkg/repl/format.go @@ -0,0 +1,178 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package repl + +import ( + "fmt" + "strconv" + "strings" + + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/zclconf/go-cty/cty" +) + +// FormatValue formats a value in a way that resembles OpenTofu language syntax +// and uses the type conversion functions where necessary to indicate exactly +// what type it is given, so that equality test failures can be quickly +// understood. +func FormatValue(v cty.Value, indent int) string { + if !v.IsKnown() { + return "(known after apply)" + } + if v.HasMark(marks.Sensitive) { + return "(sensitive value)" + } + if v.IsNull() { + ty := v.Type() + switch { + case ty == cty.DynamicPseudoType: + return "null" + case ty == cty.String: + return "tostring(null)" + case ty == cty.Number: + return "tonumber(null)" + case ty == cty.Bool: + return "tobool(null)" + case ty.IsListType(): + return fmt.Sprintf("tolist(null) /* of %s */", ty.ElementType().FriendlyName()) + case ty.IsSetType(): + return fmt.Sprintf("toset(null) /* of %s */", ty.ElementType().FriendlyName()) + case ty.IsMapType(): + return fmt.Sprintf("tomap(null) /* of %s */", ty.ElementType().FriendlyName()) + default: + return fmt.Sprintf("null /* %s */", ty.FriendlyName()) + } + } + + ty := v.Type() + switch { + case ty.IsPrimitiveType(): + switch ty { + case cty.String: + if formatted, isMultiline := formatMultilineString(v, indent); isMultiline { + return formatted + } + return strconv.Quote(v.AsString()) + case cty.Number: + bf := v.AsBigFloat() + return bf.Text('f', -1) + case cty.Bool: + if v.True() { + return "true" + } else { + return "false" + } + } + case ty.IsObjectType(): + return formatMappingValue(v, indent) + case ty.IsTupleType(): + return formatSequenceValue(v, indent) + case ty.IsListType(): + return fmt.Sprintf("tolist(%s)", formatSequenceValue(v, indent)) + case ty.IsSetType(): + return fmt.Sprintf("toset(%s)", formatSequenceValue(v, indent)) + case ty.IsMapType(): + return fmt.Sprintf("tomap(%s)", formatMappingValue(v, indent)) + } + + // Should never get here because there are no other types + return fmt.Sprintf("%#v", v) +} + +func formatMultilineString(v cty.Value, indent int) (string, bool) { + str := v.AsString() + lines := strings.Split(str, "\n") + if len(lines) < 2 { + return "", false + } + + // If the value is indented, we use the indented form of heredoc for readability. + operator := "<<" + if indent > 0 { + operator = "<<-" + } + + // Default delimiter is "End Of Text" by convention + delimiter := "EOT" + +OUTER: + for { + // Check if any of the lines are in conflict with the delimiter. The + // parser allows leading and trailing whitespace, so we must remove it + // before comparison. + for _, line := range lines { + // If the delimiter matches a line, extend it and start again + if strings.TrimSpace(line) == delimiter { + delimiter = delimiter + "_" + continue OUTER + } + } + + // None of the lines match the delimiter, so we're ready + break + } + + // Write the heredoc, with indentation as appropriate. + var buf strings.Builder + + buf.WriteString(operator) + buf.WriteString(delimiter) + for _, line := range lines { + buf.WriteByte('\n') + buf.WriteString(strings.Repeat(" ", indent)) + buf.WriteString(line) + } + buf.WriteByte('\n') + buf.WriteString(strings.Repeat(" ", indent)) + buf.WriteString(delimiter) + + return buf.String(), true +} + +func formatMappingValue(v cty.Value, indent int) string { + var buf strings.Builder + count := 0 + buf.WriteByte('{') + indent += 2 + for it := v.ElementIterator(); it.Next(); { + count++ + k, v := it.Element() + buf.WriteByte('\n') + buf.WriteString(strings.Repeat(" ", indent)) + buf.WriteString(FormatValue(k, indent)) + buf.WriteString(" = ") + buf.WriteString(FormatValue(v, indent)) + } + indent -= 2 + if count > 0 { + buf.WriteByte('\n') + buf.WriteString(strings.Repeat(" ", indent)) + } + buf.WriteByte('}') + return buf.String() +} + +func formatSequenceValue(v cty.Value, indent int) string { + var buf strings.Builder + count := 0 + buf.WriteByte('[') + indent += 2 + for it := v.ElementIterator(); it.Next(); { + count++ + _, v := it.Element() + buf.WriteByte('\n') + buf.WriteString(strings.Repeat(" ", indent)) + buf.WriteString(FormatValue(v, indent)) + buf.WriteByte(',') + } + indent -= 2 + if count > 0 { + buf.WriteByte('\n') + buf.WriteString(strings.Repeat(" ", indent)) + } + buf.WriteByte(']') + return buf.String() +} diff --git a/pkg/repl/format_test.go b/pkg/repl/format_test.go new file mode 100644 index 00000000000..b2f88e5f657 --- /dev/null +++ b/pkg/repl/format_test.go @@ -0,0 +1,192 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package repl + +import ( + "fmt" + "testing" + + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/zclconf/go-cty/cty" +) + +func TestFormatValue(t *testing.T) { + tests := []struct { + Val cty.Value + Want string + }{ + { + cty.NullVal(cty.DynamicPseudoType), + `null`, + }, + { + cty.NullVal(cty.String), + `tostring(null)`, + }, + { + cty.NullVal(cty.Number), + `tonumber(null)`, + }, + { + cty.NullVal(cty.Bool), + `tobool(null)`, + }, + { + cty.NullVal(cty.List(cty.String)), + `tolist(null) /* of string */`, + }, + { + cty.NullVal(cty.Set(cty.Number)), + `toset(null) /* of number */`, + }, + { + cty.NullVal(cty.Map(cty.Bool)), + `tomap(null) /* of bool */`, + }, + { + cty.NullVal(cty.Object(map[string]cty.Type{"a": cty.Bool})), + `null /* object */`, // Ideally this would display the full object type, including its attributes + }, + { + cty.UnknownVal(cty.DynamicPseudoType), + `(known after apply)`, + }, + { + cty.StringVal(""), + `""`, + }, + { + cty.StringVal("hello"), + `"hello"`, + }, + { + cty.StringVal("hello\nworld"), + `<", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(parseDiags) + if parseDiags.HasErrors() { + return "", diags + } + + val, valDiags := s.Scope.EvalExpr(expr, cty.DynamicPseudoType) + diags = diags.Append(valDiags) + if valDiags.HasErrors() { + return "", diags + } + + // The TypeType mark is used only by the console-only `type` function, in + // order to smuggle the type of a given value back here. We can then + // display a representation of the type directly. + if marks.Contains(val, marks.TypeType) { + val, _ = val.UnmarkDeep() + + valType := val.Type() + switch { + case valType.Equals(types.TypeType): + // An encapsulated type value, which should be displayed directly. + valType := val.EncapsulatedValue().(*cty.Type) + return typeString(*valType), diags + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid use of type function", + "The console-only \"type\" function cannot be used as part of an expression.", + )) + return "", diags + } + } + + return FormatValue(val, 0), diags +} + +func (s *Session) handleHelp() (string, tfdiags.Diagnostics) { + text := ` +The OpenTofu console allows you to experiment with OpenTofu interpolations. +You may access resources in the state (if you have one) just as you would +from a configuration. For example: "aws_instance.foo.id" would evaluate +to the ID of "aws_instance.foo" if it exists in your state. + +Type in the interpolation to test and hit to see the result. + +To exit the console, type "exit" and hit , or use Control-C or +Control-D. +` + + return strings.TrimSpace(text), nil +} + +// Modified copy of TypeString from go-cty: +// https://github.com/zclconf/go-cty-debug/blob/master/ctydebug/type_string.go +// +// TypeString returns a string representation of a given type that is +// reminiscent of Go syntax calling into the cty package but is mainly +// intended for easy human inspection of values in tests, debug output, etc. +// +// The resulting string will include newlines and indentation in order to +// increase the readability of complex structures. It always ends with a +// newline, so you can print this result directly to your output. +func typeString(ty cty.Type) string { + var b strings.Builder + writeType(ty, &b, 0) + return b.String() +} + +func writeType(ty cty.Type, b *strings.Builder, indent int) { + switch { + case ty == cty.NilType: + b.WriteString("nil") + return + case ty.IsObjectType(): + atys := ty.AttributeTypes() + if len(atys) == 0 { + b.WriteString("object({})") + return + } + attrNames := make([]string, 0, len(atys)) + for name := range atys { + attrNames = append(attrNames, name) + } + sort.Strings(attrNames) + b.WriteString("object({\n") + indent++ + for _, name := range attrNames { + aty := atys[name] + b.WriteString(indentSpaces(indent)) + fmt.Fprintf(b, "%s: ", name) + writeType(aty, b, indent) + b.WriteString(",\n") + } + indent-- + b.WriteString(indentSpaces(indent)) + b.WriteString("})") + case ty.IsTupleType(): + etys := ty.TupleElementTypes() + if len(etys) == 0 { + b.WriteString("tuple([])") + return + } + b.WriteString("tuple([\n") + indent++ + for _, ety := range etys { + b.WriteString(indentSpaces(indent)) + writeType(ety, b, indent) + b.WriteString(",\n") + } + indent-- + b.WriteString(indentSpaces(indent)) + b.WriteString("])") + case ty.IsCollectionType(): + ety := ty.ElementType() + switch { + case ty.IsListType(): + b.WriteString("list(") + case ty.IsMapType(): + b.WriteString("map(") + case ty.IsSetType(): + b.WriteString("set(") + default: + // At the time of writing there are no other collection types, + // but we'll be robust here and just pass through the GoString + // of anything we don't recognize. + b.WriteString(ty.FriendlyName()) + return + } + // Because object and tuple types render split over multiple + // lines, a collection type container around them can end up + // being hard to see when scanning, so we'll generate some extra + // indentation to make a collection of structural type more visually + // distinct from the structural type alone. + complexElem := ety.IsObjectType() || ety.IsTupleType() + if complexElem { + indent++ + b.WriteString("\n") + b.WriteString(indentSpaces(indent)) + } + writeType(ty.ElementType(), b, indent) + if complexElem { + indent-- + b.WriteString(",\n") + b.WriteString(indentSpaces(indent)) + } + b.WriteString(")") + default: + // For any other type we'll just use its GoString and assume it'll + // follow the usual GoString conventions. + b.WriteString(ty.FriendlyName()) + } +} + +func indentSpaces(level int) string { + return strings.Repeat(" ", level) +} diff --git a/pkg/repl/session_test.go b/pkg/repl/session_test.go new file mode 100644 index 00000000000..4cf60cb7927 --- /dev/null +++ b/pkg/repl/session_test.go @@ -0,0 +1,450 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package repl + +import ( + "flag" + "os" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/initwd" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tofu" + + _ "github.com/kubegems/opentofu/pkg/logging" +) + +func TestMain(m *testing.M) { + flag.Parse() + os.Exit(m.Run()) +} + +func TestSession_basicState(t *testing.T) { + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance.Child("module", addrs.NoKey)), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + + t.Run("basic", func(t *testing.T) { + testSession(t, testSessionTest{ + State: state, + Inputs: []testSessionInput{ + { + Input: "test_instance.foo.id", + Output: `"bar"`, + }, + }, + }) + }) + + t.Run("missing resource", func(t *testing.T) { + testSession(t, testSessionTest{ + State: state, + Inputs: []testSessionInput{ + { + Input: "test_instance.bar.id", + Error: true, + ErrorContains: `A managed resource "test_instance" "bar" has not been declared`, + }, + }, + }) + }) + + t.Run("missing module", func(t *testing.T) { + testSession(t, testSessionTest{ + State: state, + Inputs: []testSessionInput{ + { + Input: "module.child", + Error: true, + ErrorContains: `No module call named "child" is declared in the root module.`, + }, + }, + }) + }) + + t.Run("missing module referencing just one output", func(t *testing.T) { + testSession(t, testSessionTest{ + State: state, + Inputs: []testSessionInput{ + { + Input: "module.child.foo", + Error: true, + ErrorContains: `No module call named "child" is declared in the root module.`, + }, + }, + }) + }) + + t.Run("missing module output", func(t *testing.T) { + testSession(t, testSessionTest{ + State: state, + Inputs: []testSessionInput{ + { + Input: "module.module.foo", + Error: true, + ErrorContains: `Unsupported attribute: This object does not have an attribute named "foo"`, + }, + }, + }) + }) + + t.Run("type function", func(t *testing.T) { + testSession(t, testSessionTest{ + State: state, + Inputs: []testSessionInput{ + { + Input: "type(test_instance.foo)", + Output: `object({ + id: string, +})`, + }, + }, + }) + }) +} + +func TestSession_stateless(t *testing.T) { + t.Run("exit", func(t *testing.T) { + testSession(t, testSessionTest{ + Inputs: []testSessionInput{ + { + Input: "exit", + Exit: true, + }, + }, + }) + }) + + t.Run("help", func(t *testing.T) { + testSession(t, testSessionTest{ + Inputs: []testSessionInput{ + { + Input: "help", + OutputContains: "allows you to", + }, + }, + }) + }) + + t.Run("help with spaces", func(t *testing.T) { + testSession(t, testSessionTest{ + Inputs: []testSessionInput{ + { + Input: "help ", + OutputContains: "allows you to", + }, + }, + }) + }) + + t.Run("basic math", func(t *testing.T) { + testSession(t, testSessionTest{ + Inputs: []testSessionInput{ + { + Input: "1 + 5", + Output: "6", + }, + }, + }) + }) + + t.Run("missing resource", func(t *testing.T) { + testSession(t, testSessionTest{ + Inputs: []testSessionInput{ + { + Input: "test_instance.bar.id", + Error: true, + ErrorContains: `resource "test_instance" "bar" has not been declared`, + }, + }, + }) + }) + + t.Run("type function", func(t *testing.T) { + testSession(t, testSessionTest{ + Inputs: []testSessionInput{ + { + Input: `type("foo")`, + Output: "string", + }, + }, + }) + }) + + t.Run("type type is type", func(t *testing.T) { + testSession(t, testSessionTest{ + Inputs: []testSessionInput{ + { + Input: `type(type("foo"))`, + Output: "type", + }, + }, + }) + }) + + t.Run("interpolating type with strings is not possible", func(t *testing.T) { + testSession(t, testSessionTest{ + Inputs: []testSessionInput{ + { + Input: `"quin${type([])}"`, + Error: true, + ErrorContains: "Invalid template interpolation value", + }, + }, + }) + }) + + t.Run("type function cannot be used in expressions", func(t *testing.T) { + testSession(t, testSessionTest{ + Inputs: []testSessionInput{ + { + Input: `[for i in [1, "two", true]: type(i)]`, + Output: "", + Error: true, + ErrorContains: "Invalid use of type function", + }, + }, + }) + }) + + t.Run("type equality checks are not permitted", func(t *testing.T) { + testSession(t, testSessionTest{ + Inputs: []testSessionInput{ + { + Input: `type("foo") == type("bar")`, + Output: "", + Error: true, + ErrorContains: "Invalid use of type function", + }, + }, + }) + }) +} + +func testSession(t *testing.T, test testSessionTest) { + t.Helper() + + p := &tofu.MockProvider{} + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + }, + }, + } + + config, _, cleanup, configDiags := initwd.LoadConfigForTests(t, "testdata/config-fixture", "tests") + defer cleanup() + if configDiags.HasErrors() { + t.Fatalf("unexpected problems loading config: %s", configDiags.Err()) + } + + // Build the TF context + ctx, diags := tofu.NewContext(&tofu.ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): providers.FactoryFixed(p), + }, + }) + if diags.HasErrors() { + t.Fatalf("failed to create context: %s", diags.Err()) + } + + state := test.State + if state == nil { + state = states.NewState() + } + scope, diags := ctx.Eval(config, state, addrs.RootModuleInstance, &tofu.EvalOpts{}) + if diags.HasErrors() { + t.Fatalf("failed to create scope: %s", diags.Err()) + } + + // Ensure that any console-only functions are available + scope.ConsoleMode = true + + // Build the session + s := &Session{ + Scope: scope, + } + + // Test the inputs. We purposely don't use subtests here because + // the inputs don't represent subtests, but a sequence of stateful + // operations. + for _, input := range test.Inputs { + result, exit, diags := s.Handle(input.Input) + if exit != input.Exit { + t.Fatalf("incorrect 'exit' result %t; want %t", exit, input.Exit) + } + if (diags.HasErrors()) != input.Error { + t.Fatalf("%q: unexpected errors: %s", input.Input, diags.Err()) + } + if diags.HasErrors() { + if input.ErrorContains != "" { + if !strings.Contains(diags.Err().Error(), input.ErrorContains) { + t.Fatalf( + "%q: diagnostics should contain: %q\n\n%s", + input.Input, input.ErrorContains, diags.Err(), + ) + } + } + + continue + } + + if input.Output != "" && result != input.Output { + t.Fatalf( + "%q: expected:\n\n%s\n\ngot:\n\n%s", + input.Input, input.Output, result) + } + + if input.OutputContains != "" && !strings.Contains(result, input.OutputContains) { + t.Fatalf( + "%q: expected contains:\n\n%s\n\ngot:\n\n%s", + input.Input, input.OutputContains, result) + } + } +} + +type testSessionTest struct { + State *states.State // State to use + Module string // Module name in testdata to load + + // Inputs are the list of test inputs that are run in order. + // Each input can test the output of each step. + Inputs []testSessionInput +} + +// testSessionInput is a single input to test for a session. +type testSessionInput struct { + Input string // Input string + Output string // Exact output string to check + OutputContains string + Error bool // Error is true if error is expected + Exit bool // Exit is true if exiting is expected + ErrorContains string +} + +func TestTypeString(t *testing.T) { + tests := []struct { + Input cty.Value + Want string + }{ + // Primititves + { + cty.StringVal("a"), + "string", + }, + { + cty.NumberIntVal(42), + "number", + }, + { + cty.BoolVal(true), + "bool", + }, + // Collections + { + cty.EmptyObjectVal, + `object({})`, + }, + { + cty.EmptyTupleVal, + `tuple([])`, + }, + { + cty.ListValEmpty(cty.String), + `list(string)`, + }, + { + cty.MapValEmpty(cty.String), + `map(string)`, + }, + { + cty.SetValEmpty(cty.String), + `set(string)`, + }, + { + cty.ListVal([]cty.Value{cty.StringVal("a")}), + `list(string)`, + }, + { + cty.ListVal([]cty.Value{cty.ListVal([]cty.Value{cty.NumberIntVal(42)})}), + `list(list(number))`, + }, + { + cty.ListVal([]cty.Value{cty.MapValEmpty(cty.String)}), + `list(map(string))`, + }, + { + cty.ListVal([]cty.Value{cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + })}), + "list(\n object({\n foo: string,\n }),\n)", + }, + // Unknowns and Nulls + { + cty.UnknownVal(cty.String), + "string", + }, + { + cty.NullVal(cty.Object(map[string]cty.Type{ + "foo": cty.String, + })), + "object({\n foo: string,\n})", + }, + { // irrelevant marks do nothing + cty.ListVal([]cty.Value{cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bar").Mark("ignore me"), + })}), + "list(\n object({\n foo: string,\n }),\n)", + }, + } + for _, test := range tests { + got := typeString(test.Input.Type()) + if got != test.Want { + t.Errorf("wrong result:\n%s", cmp.Diff(got, test.Want)) + } + } +} diff --git a/pkg/repl/testdata/config-fixture/child/empty.tf b/pkg/repl/testdata/config-fixture/child/empty.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/repl/testdata/config-fixture/repl_test.tf b/pkg/repl/testdata/config-fixture/repl_test.tf new file mode 100644 index 00000000000..09120849947 --- /dev/null +++ b/pkg/repl/testdata/config-fixture/repl_test.tf @@ -0,0 +1,11 @@ + +# This configuration is just here to allow the tests in session_test to +# evaluate expressions without getting errors about things not being declared. +# Therefore it's intended to just be the minimum config to make those +# expressions work against the equally-minimal mock provider. +resource "test_instance" "foo" { +} + +module "module" { + source = "./child" +} diff --git a/pkg/replacefile/doc.go b/pkg/replacefile/doc.go new file mode 100644 index 00000000000..db7eeeee68d --- /dev/null +++ b/pkg/replacefile/doc.go @@ -0,0 +1,17 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package replacefile is a small helper package focused directly at the +// problem of atomically "renaming" one file over another one. +// +// On Unix systems this is the standard behavior of the rename function, but +// the equivalent operation on Windows requires some specific operation flags +// which this package encapsulates. +// +// This package uses conditional compilation to select a different +// implementation for Windows vs. all other platforms. It may therefore +// require further fiddling in future if OpenTofu is ported to another +// OS that is neither Unix-like nor Windows. +package replacefile diff --git a/pkg/replacefile/replacefile_unix.go b/pkg/replacefile/replacefile_unix.go new file mode 100644 index 00000000000..34720e6a845 --- /dev/null +++ b/pkg/replacefile/replacefile_unix.go @@ -0,0 +1,30 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !windows +// +build !windows + +package replacefile + +import ( + "os" +) + +// AtomicRename renames from the source path to the destination path, +// atomically replacing any file that might already exist at the destination. +// +// Typically this operation can succeed only if the source and destination +// are within the same physical filesystem, so this function is best reserved +// for cases where the source and destination exist in the same directory and +// only the local filename differs between them. +// +// The Unix implementation of AtomicRename relies on the atomicity of renaming +// that is required by the ISO C standard, which in turn assumes that Go's +// implementation of rename is calling into a system call that preserves that +// guarantee. +func AtomicRename(source, destination string) error { + // On Unix systems, a rename is sufficiently atomic. + return os.Rename(source, destination) +} diff --git a/pkg/replacefile/replacefile_windows.go b/pkg/replacefile/replacefile_windows.go new file mode 100644 index 00000000000..e77c1c17f64 --- /dev/null +++ b/pkg/replacefile/replacefile_windows.go @@ -0,0 +1,46 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build windows +// +build windows + +package replacefile + +import ( + "os" + "syscall" + + "golang.org/x/sys/windows" +) + +// AtomicRename renames from the source path to the destination path, +// atomically replacing any file that might already exist at the destination. +// +// Typically this operation can succeed only if the source and destination +// are within the same physical filesystem, so this function is best reserved +// for cases where the source and destination exist in the same directory and +// only the local filename differs between them. +func AtomicRename(source, destination string) error { + // On Windows, renaming one file over another is not atomic and certain + // error conditions can result in having only the source file and nothing + // at the destination file. Instead, we need to call into the MoveFileEx + // Windows API function, setting two flags to opt in to replacing an + // existing file. + srcPtr, err := syscall.UTF16PtrFromString(source) + if err != nil { + return &os.LinkError{"replace", source, destination, err} + } + destPtr, err := syscall.UTF16PtrFromString(destination) + if err != nil { + return &os.LinkError{"replace", source, destination, err} + } + + flags := uint32(windows.MOVEFILE_REPLACE_EXISTING | windows.MOVEFILE_WRITE_THROUGH) + err = windows.MoveFileEx(srcPtr, destPtr, flags) + if err != nil { + return &os.LinkError{"replace", source, destination, err} + } + return nil +} diff --git a/pkg/replacefile/writefile.go b/pkg/replacefile/writefile.go new file mode 100644 index 00000000000..7d47ab4f546 --- /dev/null +++ b/pkg/replacefile/writefile.go @@ -0,0 +1,81 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package replacefile + +import ( + "fmt" + "os" + "path/filepath" +) + +// AtomicWriteFile uses a temporary file along with this package's AtomicRename +// function in order to provide a replacement for ioutil.WriteFile that +// writes the given file into place as atomically as the underlying operating +// system can support. +// +// The sense of "atomic" meant by this function is that the file at the +// given filename will either contain the entirety of the previous contents +// or the entirety of the given data array if opened and read at any point +// during the execution of the function. +// +// On some platforms attempting to overwrite a file that has at least one +// open filehandle will produce an error. On other platforms, the overwriting +// will succeed but existing open handles will still refer to the old file, +// even though its directory entry is no longer present. +// +// Although AtomicWriteFile tries its best to avoid leaving behind its +// temporary file on error, some particularly messy error cases may result +// in a leftover temporary file. +func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { + dir, file := filepath.Split(filename) + if dir == "" { + // If the file is in the current working directory then dir will + // end up being "", but that's not right here because TempFile + // treats an empty dir as meaning "use the TMPDIR environment variable". + dir = "." + } + f, err := os.CreateTemp(dir, file) // alongside target file and with a similar name + if err != nil { + return fmt.Errorf("cannot create temporary file to update %s: %w", filename, err) + } + tmpName := f.Name() + moved := false + defer func(f *os.File, name string) { + // Remove the temporary file if it hasn't been moved yet. We're + // ignoring errors here because there's nothing we can do about + // them anyway. + if !moved { + os.Remove(name) + } + }(f, tmpName) + + // We'll try to apply the requested permissions. This may + // not be effective on all platforms, but should at least work on + // Unix-like targets and should be harmless elsewhere. + if err := os.Chmod(tmpName, perm); err != nil { + return fmt.Errorf("cannot set mode for temporary file %s: %w", tmpName, err) + } + + // Write the credentials to the temporary file, then immediately close + // it, whether or not the write succeeds. Note that closing the file here + // is required because on Windows we can't move a file while it's open. + _, err = f.Write(data) + f.Close() + if err != nil { + return fmt.Errorf("cannot write to temporary file %s: %w", tmpName, err) + } + + // Temporary file now replaces the original file, as atomically as + // possible. (At the very least, we should not end up with a file + // containing only a partial JSON object.) + err = AtomicRename(tmpName, filename) + if err != nil { + return fmt.Errorf("failed to replace %s with temporary file %s: %w", filename, tmpName, err) + } + + moved = true + return nil +} diff --git a/pkg/states/checks.go b/pkg/states/checks.go new file mode 100644 index 00000000000..dcd0bfaeb77 --- /dev/null +++ b/pkg/states/checks.go @@ -0,0 +1,187 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package states + +import ( + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/checks" +) + +// CheckResults represents a summary snapshot of the status of a set of checks +// declared in configuration, updated after each OpenTofu Core run that +// changes the state or remote system in a way that might impact the check +// results. +// +// Unlike a checks.State, this type only tracks the overall results for +// each checkable object and doesn't aim to preserve the identity of individual +// checks in the configuration. For our UI reporting purposes, it is entire +// objects that pass or fail based on their declared checks; the individual +// checks have no durable identity between runs, and so are only a language +// design convenience to help authors describe various independent conditions +// with different failure messages each. +// +// CheckResults should typically be considered immutable once constructed: +// instead of updating it in-place,instead construct an entirely new +// CheckResults object based on a fresh checks.State. +type CheckResults struct { + // ConfigResults has all of the individual check results grouped by the + // configuration object they relate to. + // + // The top-level map here will always have a key for every configuration + // object that includes checks at the time of evaluating the results, + // even if there turned out to be no instances of that object and + // therefore no individual check results. + ConfigResults addrs.Map[addrs.ConfigCheckable, *CheckResultAggregate] +} + +// CheckResultAggregate represents both the overall result for a particular +// configured object that has checks and the individual checkable objects +// it declared, if any. +type CheckResultAggregate struct { + // Status is the aggregate status across all objects. + // + // Sometimes an error or check failure during planning will prevent + // OpenTofu Core from even determining the individual checkable objects + // associated with a downstream configuration object, and that situation is + // described here by this Status being checks.StatusUnknown and there being + // no elements in the ObjectResults field. + // + // That's different than OpenTofu Core explicitly reporting that there are + // no instances of the config object (e.g. a resource with count = 0), + // which leads to the aggregate status being checks.StatusPass while + // ObjectResults is still empty. + Status checks.Status + + ObjectResults addrs.Map[addrs.Checkable, *CheckResultObject] +} + +// CheckResultObject is the check status for a single checkable object. +// +// This aggregates together all of the checks associated with a particular +// object into a single pass/fail/error/unknown result, because checkable +// objects have durable addresses that can survive between runs, but their +// individual checks do not. (Module authors are free to reorder their checks +// for a particular object in the configuration with no change in meaning.) +type CheckResultObject struct { + // Status is the check status of the checkable object, derived from the + // results of all of its individual checks. + Status checks.Status + + // FailureMessages is an optional set of module-author-defined messages + // describing the problems that the checks detected, for objects whose + // status is checks.StatusFail. + // + // (checks.StatusError problems get reported as normal diagnostics during + // evaluation instead, and so will not appear here.) + FailureMessages []string +} + +// NewCheckResults constructs a new states.CheckResults object that is a +// snapshot of the check statuses recorded in the given checks.State object. +// +// This should be called only after a OpenTofu Core run has completed and +// recorded any results from running the checks in the given object. +func NewCheckResults(source *checks.State) *CheckResults { + ret := &CheckResults{ + ConfigResults: addrs.MakeMap[addrs.ConfigCheckable, *CheckResultAggregate](), + } + + for _, configAddr := range source.AllConfigAddrs() { + aggr := &CheckResultAggregate{ + Status: source.AggregateCheckStatus(configAddr), + ObjectResults: addrs.MakeMap[addrs.Checkable, *CheckResultObject](), + } + + for _, objectAddr := range source.ObjectAddrs(configAddr) { + obj := &CheckResultObject{ + Status: source.ObjectCheckStatus(objectAddr), + FailureMessages: source.ObjectFailureMessages(objectAddr), + } + aggr.ObjectResults.Put(objectAddr, obj) + } + + ret.ConfigResults.Put(configAddr, aggr) + } + + // If there aren't actually any configuration objects then we'll just + // leave the map as a whole nil, because having it be zero-value makes + // life easier for deep comparisons in unit tests elsewhere. + if ret.ConfigResults.Len() == 0 { + ret.ConfigResults.Elems = nil + } + + return ret +} + +// GetObjectResult looks up the result for a single object, or nil if there +// is no such object. +// +// In main code we shouldn't typically need to look up individual objects +// like this, since we'll usually be reporting check results in an aggregate +// form, but determining the result of a particular object is useful in our +// internal unit tests, and so this is here primarily for that purpose. +func (r *CheckResults) GetObjectResult(objectAddr addrs.Checkable) *CheckResultObject { + configAddr := objectAddr.ConfigCheckable() + + aggr := r.ConfigResults.Get(configAddr) + if aggr == nil { + return nil + } + + return aggr.ObjectResults.Get(objectAddr) +} + +func (r *CheckResults) DeepCopy() *CheckResults { + if r == nil { + return nil + } + ret := &CheckResults{} + if r.ConfigResults.Elems == nil { + return ret + } + + ret.ConfigResults = addrs.MakeMap[addrs.ConfigCheckable, *CheckResultAggregate]() + + for _, configElem := range r.ConfigResults.Elems { + aggr := &CheckResultAggregate{ + Status: configElem.Value.Status, + } + + if configElem.Value.ObjectResults.Elems != nil { + aggr.ObjectResults = addrs.MakeMap[addrs.Checkable, *CheckResultObject]() + + for _, objectElem := range configElem.Value.ObjectResults.Elems { + result := &CheckResultObject{ + Status: objectElem.Value.Status, + + // NOTE: We don't deep-copy this slice because it's + // immutable once constructed by convention. + FailureMessages: objectElem.Value.FailureMessages, + } + aggr.ObjectResults.Put(objectElem.Key, result) + } + } + + ret.ConfigResults.Put(configElem.Key, aggr) + } + + return ret +} + +// ObjectAddrsKnown determines whether the set of objects recorded in this +// aggregate is accurate (true) or if it's incomplete as a result of the +// run being interrupted before instance expansion. +func (r *CheckResultAggregate) ObjectAddrsKnown() bool { + if r.ObjectResults.Len() != 0 { + // If there are any object results at all then we definitely know. + return true + } + + // If we don't have any object addresses then we distinguish a known + // empty set of objects from an unknown set of objects by the aggregate + // status being unknown. + return r.Status != checks.StatusUnknown +} diff --git a/pkg/states/doc.go b/pkg/states/doc.go new file mode 100644 index 00000000000..daef408cfae --- /dev/null +++ b/pkg/states/doc.go @@ -0,0 +1,8 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package states contains the types that are used to represent OpenTofu +// states. +package states diff --git a/pkg/states/instance_generation.go b/pkg/states/instance_generation.go new file mode 100644 index 00000000000..775323f2185 --- /dev/null +++ b/pkg/states/instance_generation.go @@ -0,0 +1,25 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package states + +// Generation is used to represent multiple objects in a succession of objects +// represented by a single resource instance address. A resource instance can +// have multiple generations over its lifetime due to object replacement +// (when a change can't be applied without destroying and re-creating), and +// multiple generations can exist at the same time when create_before_destroy +// is used. +// +// A Generation value can either be the value of the variable "CurrentGen" or +// a value of type DeposedKey. Generation values can be compared for equality +// using "==" and used as map keys. The zero value of Generation (nil) is not +// a valid generation and must not be used. +type Generation interface { + generation() +} + +// CurrentGen is the Generation representing the currently-active object for +// a resource instance. +var CurrentGen Generation diff --git a/pkg/states/instance_object.go b/pkg/states/instance_object.go new file mode 100644 index 00000000000..02fdf4c1f43 --- /dev/null +++ b/pkg/states/instance_object.go @@ -0,0 +1,153 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package states + +import ( + "sort" + + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/kubegems/opentofu/pkg/addrs" +) + +// ResourceInstanceObject is the local representation of a specific remote +// object associated with a resource instance. In practice not all remote +// objects are actually remote in the sense of being accessed over the network, +// but this is the most common case. +// +// It is not valid to mutate a ResourceInstanceObject once it has been created. +// Instead, create a new object and replace the existing one. +type ResourceInstanceObject struct { + // Value is the object-typed value representing the remote object within + // OpenTofu. + Value cty.Value + + // Private is an opaque value set by the provider when this object was + // last created or updated. OpenTofu Core does not use this value in + // any way and it is not exposed anywhere in the user interface, so + // a provider can use it for retaining any necessary private state. + Private []byte + + // Status represents the "readiness" of the object as of the last time + // it was updated. + Status ObjectStatus + + // Dependencies is a set of absolute address to other resources this + // instance dependeded on when it was applied. This is used to construct + // the dependency relationships for an object whose configuration is no + // longer available, such as if it has been removed from configuration + // altogether, or is now deposed. + Dependencies []addrs.ConfigResource + + // CreateBeforeDestroy reflects the status of the lifecycle + // create_before_destroy option when this instance was last updated. + // Because create_before_destroy also effects the overall ordering of the + // destroy operations, we need to record the status to ensure a resource + // removed from the config will still be destroyed in the same manner. + CreateBeforeDestroy bool +} + +// ObjectStatus represents the status of a RemoteObject. +type ObjectStatus rune + +//go:generate go run golang.org/x/tools/cmd/stringer -type ObjectStatus + +const ( + // ObjectReady is an object status for an object that is ready to use. + ObjectReady ObjectStatus = 'R' + + // ObjectTainted is an object status representing an object that is in + // an unrecoverable bad state due to a partial failure during a create, + // update, or delete operation. Since it cannot be moved into the + // ObjectRead state, a tainted object must be replaced. + ObjectTainted ObjectStatus = 'T' + + // ObjectPlanned is a special object status used only for the transient + // placeholder objects we place into state during the refresh and plan + // walks to stand in for objects that will be created during apply. + // + // Any object of this status must have a corresponding change recorded + // in the current plan, whose value must then be used in preference to + // the value stored in state when evaluating expressions. A planned + // object stored in state will be incomplete if any of its attributes are + // not yet known, and the plan must be consulted in order to "see" those + // unknown values, because the state is not able to represent them. + ObjectPlanned ObjectStatus = 'P' +) + +// Encode marshals the value within the receiver to produce a +// ResourceInstanceObjectSrc ready to be written to a state file. +// +// The given type must be the implied type of the resource type schema, and +// the given value must conform to it. It is important to pass the schema +// type and not the object's own type so that dynamically-typed attributes +// will be stored correctly. The caller must also provide the version number +// of the schema that the given type was derived from, which will be recorded +// in the source object so it can be used to detect when schema migration is +// required on read. +// +// The returned object may share internal references with the receiver and +// so the caller must not mutate the receiver any further once once this +// method is called. +func (o *ResourceInstanceObject) Encode(ty cty.Type, schemaVersion uint64) (*ResourceInstanceObjectSrc, error) { + // If it contains marks, remove these marks before traversing the + // structure with UnknownAsNull, and save the PathValueMarks + // so we can save them in state. + val, pvm := o.Value.UnmarkDeepWithPaths() + + // Our state serialization can't represent unknown values, so we convert + // them to nulls here. This is lossy, but nobody should be writing unknown + // values here and expecting to get them out again later. + // + // We get unknown values here while we're building out a "planned state" + // during the plan phase, but the value stored in the plan takes precedence + // for expression evaluation. The apply step should never produce unknown + // values, but if it does it's the responsibility of the caller to detect + // and raise an error about that. + val = cty.UnknownAsNull(val) + + src, err := ctyjson.Marshal(val, ty) + if err != nil { + return nil, err + } + + // Dependencies are collected and merged in an unordered format (using map + // keys as a set), then later changed to a slice (in random ordering) to be + // stored in state as an array. To avoid pointless thrashing of state in + // refresh-only runs, we can either override comparison of dependency lists + // (more desirable, but tricky for Reasons) or just sort when encoding. + // Encoding of instances can happen concurrently, so we must copy the + // dependencies to avoid mutating what may be a shared array of values. + dependencies := make([]addrs.ConfigResource, len(o.Dependencies)) + copy(dependencies, o.Dependencies) + + sort.Slice(dependencies, func(i, j int) bool { return dependencies[i].String() < dependencies[j].String() }) + + return &ResourceInstanceObjectSrc{ + SchemaVersion: schemaVersion, + AttrsJSON: src, + AttrSensitivePaths: pvm, + Private: o.Private, + Status: o.Status, + Dependencies: dependencies, + CreateBeforeDestroy: o.CreateBeforeDestroy, + }, nil +} + +// AsTainted returns a deep copy of the receiver with the status updated to +// ObjectTainted. +func (o *ResourceInstanceObject) AsTainted() *ResourceInstanceObject { + if o == nil { + // A nil object can't be tainted, but we'll allow this anyway to + // avoid a crash, since we presumably intend to eventually record + // the object has having been deleted anyway. + return nil + } + ret := o.DeepCopy() + ret.Status = ObjectTainted + return ret +} diff --git a/pkg/states/instance_object_src.go b/pkg/states/instance_object_src.go new file mode 100644 index 00000000000..617afa452e2 --- /dev/null +++ b/pkg/states/instance_object_src.go @@ -0,0 +1,128 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package states + +import ( + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs/hcl2shim" +) + +// ResourceInstanceObjectSrc is a not-fully-decoded version of +// ResourceInstanceObject. Decoding of it can be completed by first handling +// any schema migration steps to get to the latest schema version and then +// calling method Decode with the implied type of the latest schema. +type ResourceInstanceObjectSrc struct { + // SchemaVersion is the resource-type-specific schema version number that + // was current when either AttrsJSON or AttrsFlat was encoded. Migration + // steps are required if this is less than the current version number + // reported by the corresponding provider. + SchemaVersion uint64 + + // AttrsJSON is a JSON-encoded representation of the object attributes, + // encoding the value (of the object type implied by the associated resource + // type schema) that represents this remote object in OpenTofu Language + // expressions, and is compared with configuration when producing a diff. + // + // This is retained in JSON format here because it may require preprocessing + // before decoding if, for example, the stored attributes are for an older + // schema version which the provider must upgrade before use. If the + // version is current, it is valid to simply decode this using the + // type implied by the current schema, without the need for the provider + // to perform an upgrade first. + // + // When writing a ResourceInstanceObject into the state, AttrsJSON should + // always be conformant to the current schema version and the current + // schema version should be recorded in the SchemaVersion field. + AttrsJSON []byte + + // AttrsFlat is a legacy form of attributes used in older state file + // formats, and in the new state format for objects that haven't yet been + // upgraded. This attribute is mutually exclusive with Attrs: for any + // ResourceInstanceObject, only one of these attributes may be populated + // and the other must be nil. + // + // An instance object with this field populated should be upgraded to use + // Attrs at the earliest opportunity, since this legacy flatmap-based + // format will be phased out over time. AttrsFlat should not be used when + // writing new or updated objects to state; instead, callers must follow + // the recommendations in the AttrsJSON documentation above. + AttrsFlat map[string]string + + // AttrSensitivePaths is an array of paths to mark as sensitive coming out of + // state, or to save as sensitive paths when saving state + AttrSensitivePaths []cty.PathValueMarks + + // These fields all correspond to the fields of the same name on + // ResourceInstanceObject. + Private []byte + Status ObjectStatus + Dependencies []addrs.ConfigResource + CreateBeforeDestroy bool +} + +// Decode unmarshals the raw representation of the object attributes. Pass the +// implied type of the corresponding resource type schema for correct operation. +// +// Before calling Decode, the caller must check that the SchemaVersion field +// exactly equals the version number of the schema whose implied type is being +// passed, or else the result is undefined. +// +// The returned object may share internal references with the receiver and +// so the caller must not mutate the receiver any further once once this +// method is called. +func (os *ResourceInstanceObjectSrc) Decode(ty cty.Type) (*ResourceInstanceObject, error) { + var val cty.Value + var err error + if os.AttrsFlat != nil { + // Legacy mode. We'll do our best to unpick this from the flatmap. + val, err = hcl2shim.HCL2ValueFromFlatmap(os.AttrsFlat, ty) + if err != nil { + return nil, err + } + } else { + val, err = ctyjson.Unmarshal(os.AttrsJSON, ty) + // Mark the value with paths if applicable + if os.AttrSensitivePaths != nil { + val = val.MarkWithPaths(os.AttrSensitivePaths) + } + if err != nil { + return nil, err + } + } + + return &ResourceInstanceObject{ + Value: val, + Status: os.Status, + Dependencies: os.Dependencies, + Private: os.Private, + CreateBeforeDestroy: os.CreateBeforeDestroy, + }, nil +} + +// CompleteUpgrade creates a new ResourceInstanceObjectSrc by copying the +// metadata from the receiver and writing in the given new schema version +// and attribute value that are presumed to have resulted from upgrading +// from an older schema version. +func (os *ResourceInstanceObjectSrc) CompleteUpgrade(newAttrs cty.Value, newType cty.Type, newSchemaVersion uint64) (*ResourceInstanceObjectSrc, error) { + new := os.DeepCopy() + new.AttrsFlat = nil // We always use JSON after an upgrade, even if the source used flatmap + + // This is the same principle as ResourceInstanceObject.Encode, but + // avoiding a decode/re-encode cycle because we don't have type info + // available for the "old" attributes. + newAttrs = cty.UnknownAsNull(newAttrs) + src, err := ctyjson.Marshal(newAttrs, newType) + if err != nil { + return nil, err + } + + new.AttrsJSON = src + new.SchemaVersion = newSchemaVersion + return new, nil +} diff --git a/pkg/states/instance_object_test.go b/pkg/states/instance_object_test.go new file mode 100644 index 00000000000..3212f596cef --- /dev/null +++ b/pkg/states/instance_object_test.go @@ -0,0 +1,88 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package states + +import ( + "sync" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/zclconf/go-cty/cty" +) + +func TestResourceInstanceObject_encode(t *testing.T) { + value := cty.ObjectVal(map[string]cty.Value{ + "foo": cty.True, + }) + // The in-memory order of resource dependencies is random, since they're an + // unordered set. + depsOne := []addrs.ConfigResource{ + addrs.RootModule.Resource(addrs.ManagedResourceMode, "test", "honk"), + addrs.RootModule.Child("child").Resource(addrs.ManagedResourceMode, "test", "flub"), + addrs.RootModule.Resource(addrs.ManagedResourceMode, "test", "boop"), + } + depsTwo := []addrs.ConfigResource{ + addrs.RootModule.Child("child").Resource(addrs.ManagedResourceMode, "test", "flub"), + addrs.RootModule.Resource(addrs.ManagedResourceMode, "test", "boop"), + addrs.RootModule.Resource(addrs.ManagedResourceMode, "test", "honk"), + } + + // multiple instances may have been assigned the same deps slice + objs := []*ResourceInstanceObject{ + &ResourceInstanceObject{ + Value: value, + Status: ObjectPlanned, + Dependencies: depsOne, + }, + &ResourceInstanceObject{ + Value: value, + Status: ObjectPlanned, + Dependencies: depsTwo, + }, + &ResourceInstanceObject{ + Value: value, + Status: ObjectPlanned, + Dependencies: depsOne, + }, + &ResourceInstanceObject{ + Value: value, + Status: ObjectPlanned, + Dependencies: depsOne, + }, + } + + var encoded []*ResourceInstanceObjectSrc + + // Encoding can happen concurrently, so we need to make sure the shared + // Dependencies are safely handled + var wg sync.WaitGroup + var mu sync.Mutex + + for _, obj := range objs { + obj := obj + wg.Add(1) + go func() { + defer wg.Done() + rios, err := obj.Encode(value.Type(), 0) + if err != nil { + t.Errorf("unexpected error: %s", err) + } + mu.Lock() + encoded = append(encoded, rios) + mu.Unlock() + }() + } + wg.Wait() + + // However, identical sets of dependencies should always be written to state + // in an identical order, so we don't do meaningless state updates on refresh. + for i := 0; i < len(encoded)-1; i++ { + if diff := cmp.Diff(encoded[i].Dependencies, encoded[i+1].Dependencies); diff != "" { + t.Errorf("identical dependencies got encoded in different orders:\n%s", diff) + } + } +} diff --git a/pkg/states/module.go b/pkg/states/module.go new file mode 100644 index 00000000000..b4025e13c18 --- /dev/null +++ b/pkg/states/module.go @@ -0,0 +1,326 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package states + +import ( + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" +) + +// Module is a container for the states of objects within a particular module. +type Module struct { + Addr addrs.ModuleInstance + + // Resources contains the state for each resource. The keys in this map are + // an implementation detail and must not be used by outside callers. + Resources map[string]*Resource + + // OutputValues contains the state for each output value. The keys in this + // map are output value names. + OutputValues map[string]*OutputValue + + // LocalValues contains the value for each named output value. The keys + // in this map are local value names. + LocalValues map[string]cty.Value +} + +// NewModule constructs an empty module state for the given module address. +func NewModule(addr addrs.ModuleInstance) *Module { + return &Module{ + Addr: addr, + Resources: map[string]*Resource{}, + OutputValues: map[string]*OutputValue{}, + LocalValues: map[string]cty.Value{}, + } +} + +// Resource returns the state for the resource with the given address within +// the receiving module state, or nil if the requested resource is not tracked +// in the state. +func (ms *Module) Resource(addr addrs.Resource) *Resource { + return ms.Resources[addr.String()] +} + +// ResourceInstance returns the state for the resource instance with the given +// address within the receiving module state, or nil if the requested instance +// is not tracked in the state. +func (ms *Module) ResourceInstance(addr addrs.ResourceInstance) *ResourceInstance { + rs := ms.Resource(addr.Resource) + if rs == nil { + return nil + } + return rs.Instance(addr.Key) +} + +// SetResourceProvider updates the resource-level metadata for the resource +// with the given address, creating the resource state for it if it doesn't +// already exist. +func (ms *Module) SetResourceProvider(addr addrs.Resource, provider addrs.AbsProviderConfig) { + rs := ms.Resource(addr) + if rs == nil { + rs = &Resource{ + Addr: addr.Absolute(ms.Addr), + Instances: map[addrs.InstanceKey]*ResourceInstance{}, + } + ms.Resources[addr.String()] = rs + } + + rs.ProviderConfig = provider +} + +// RemoveResource removes the entire state for the given resource, taking with +// it any instances associated with the resource. This should generally be +// called only for resource objects whose instances have all been destroyed. +func (ms *Module) RemoveResource(addr addrs.Resource) { + delete(ms.Resources, addr.String()) +} + +// SetResourceInstanceCurrent saves the given instance object as the current +// generation of the resource instance with the given address, simultaneously +// updating the recorded provider configuration address and dependencies. +// +// Any existing current instance object for the given resource is overwritten. +// Set obj to nil to remove the primary generation object altogether. If there +// are no deposed objects then the instance will be removed altogether. +// +// The provider address is a resource-wide setting and is updated for all other +// instances of the same resource as a side-effect of this call. +func (ms *Module) SetResourceInstanceCurrent(addr addrs.ResourceInstance, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) { + rs := ms.Resource(addr.Resource) + // if the resource is nil and the object is nil, don't do anything! + // you'll probably just cause issues + if obj == nil && rs == nil { + return + } + if obj == nil && rs != nil { + // does the resource have any other objects? + // if not then delete the whole resource + if len(rs.Instances) == 0 { + delete(ms.Resources, addr.Resource.String()) + return + } + // check for an existing resource, now that we've ensured that rs.Instances is more than 0/not nil + is := rs.Instance(addr.Key) + if is == nil { + // if there is no instance on the resource with this address and obj is nil, return and change nothing + return + } + // if we have an instance, update the current + is.Current = obj + if !is.HasObjects() { + // If we have no objects at all then we'll clean up. + delete(rs.Instances, addr.Key) + // Delete the resource if it has no instances, but only if NoEach + if len(rs.Instances) == 0 { + delete(ms.Resources, addr.Resource.String()) + return + } + } + // Nothing more to do here, so return! + return + } + if rs == nil && obj != nil { + // We don't have have a resource so make one, which is a side effect of setResourceMeta + ms.SetResourceProvider(addr.Resource, provider) + // now we have a resource! so update the rs value to point to it + rs = ms.Resource(addr.Resource) + } + // Get our instance from the resource; it could be there or not at this point + is := rs.Instance(addr.Key) + if is == nil { + // if we don't have a resource, create one and add to the instances + is = rs.CreateInstance(addr.Key) + // update the resource meta because we have a new + ms.SetResourceProvider(addr.Resource, provider) + } + // Update the resource's ProviderConfig, in case the provider has updated + rs.ProviderConfig = provider + is.Current = obj +} + +// SetResourceInstanceDeposed saves the given instance object as a deposed +// generation of the resource instance with the given address and deposed key. +// +// Call this method only for pre-existing deposed objects that already have +// a known DeposedKey. For example, this method is useful if reloading objects +// that were persisted to a state file. To mark the current object as deposed, +// use DeposeResourceInstanceObject instead. +// +// The resource that contains the given instance must already exist in the +// state, or this method will panic. Use Resource to check first if its +// presence is not already guaranteed. +// +// Any existing current instance object for the given resource and deposed key +// is overwritten. Set obj to nil to remove the deposed object altogether. If +// the instance is left with no objects after this operation then it will +// be removed from its containing resource altogether. +func (ms *Module) SetResourceInstanceDeposed(addr addrs.ResourceInstance, key DeposedKey, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) { + ms.SetResourceProvider(addr.Resource, provider) + + rs := ms.Resource(addr.Resource) + is := rs.EnsureInstance(addr.Key) + if obj != nil { + is.Deposed[key] = obj + } else { + delete(is.Deposed, key) + } + + if !is.HasObjects() { + // If we have no objects at all then we'll clean up. + delete(rs.Instances, addr.Key) + } + if len(rs.Instances) == 0 { + // Also clean up if we only expect to have one instance anyway + // and there are none. We leave the resource behind if an each mode + // is active because an empty list or map of instances is a valid state. + delete(ms.Resources, addr.Resource.String()) + } +} + +// ForgetResourceInstanceAll removes the record of all objects associated with +// the specified resource instance, if present. If not present, this is a no-op. +func (ms *Module) ForgetResourceInstanceAll(addr addrs.ResourceInstance) { + rs := ms.Resource(addr.Resource) + if rs == nil { + return + } + delete(rs.Instances, addr.Key) + + if len(rs.Instances) == 0 { + // Also clean up if we only expect to have one instance anyway + // and there are none. We leave the resource behind if an each mode + // is active because an empty list or map of instances is a valid state. + delete(ms.Resources, addr.Resource.String()) + } +} + +// ForgetResourceInstanceDeposed removes the record of the deposed object with +// the given address and key, if present. If not present, this is a no-op. +func (ms *Module) ForgetResourceInstanceDeposed(addr addrs.ResourceInstance, key DeposedKey) { + rs := ms.Resource(addr.Resource) + if rs == nil { + return + } + is := rs.Instance(addr.Key) + if is == nil { + return + } + delete(is.Deposed, key) + + if !is.HasObjects() { + // If we have no objects at all then we'll clean up. + delete(rs.Instances, addr.Key) + } + if len(rs.Instances) == 0 { + // Also clean up if we only expect to have one instance anyway + // and there are none. We leave the resource behind if an each mode + // is active because an empty list or map of instances is a valid state. + delete(ms.Resources, addr.Resource.String()) + } +} + +// deposeResourceInstanceObject is the real implementation of +// SyncState.DeposeResourceInstanceObject. +func (ms *Module) deposeResourceInstanceObject(addr addrs.ResourceInstance, forceKey DeposedKey) DeposedKey { + is := ms.ResourceInstance(addr) + if is == nil { + return NotDeposed + } + return is.deposeCurrentObject(forceKey) +} + +// maybeRestoreResourceInstanceDeposed is the real implementation of +// SyncState.MaybeRestoreResourceInstanceDeposed. +func (ms *Module) maybeRestoreResourceInstanceDeposed(addr addrs.ResourceInstance, key DeposedKey) bool { + rs := ms.Resource(addr.Resource) + if rs == nil { + return false + } + is := rs.Instance(addr.Key) + if is == nil { + return false + } + if is.Current != nil { + return false + } + if len(is.Deposed) == 0 { + return false + } + is.Current = is.Deposed[key] + delete(is.Deposed, key) + return true +} + +// SetOutputValue writes an output value into the state, overwriting any +// existing value of the same name. +func (ms *Module) SetOutputValue(name string, value cty.Value, sensitive bool) *OutputValue { + os := &OutputValue{ + Addr: addrs.AbsOutputValue{ + Module: ms.Addr, + OutputValue: addrs.OutputValue{ + Name: name, + }, + }, + Value: value, + Sensitive: sensitive, + } + ms.OutputValues[name] = os + return os +} + +// RemoveOutputValue removes the output value of the given name from the state, +// if it exists. This method is a no-op if there is no value of the given +// name. +func (ms *Module) RemoveOutputValue(name string) { + delete(ms.OutputValues, name) +} + +// SetLocalValue writes a local value into the state, overwriting any +// existing value of the same name. +func (ms *Module) SetLocalValue(name string, value cty.Value) { + ms.LocalValues[name] = value +} + +// RemoveLocalValue removes the local value of the given name from the state, +// if it exists. This method is a no-op if there is no value of the given +// name. +func (ms *Module) RemoveLocalValue(name string) { + delete(ms.LocalValues, name) +} + +// PruneResourceHusks is a specialized method that will remove any Resource +// objects that do not contain any instances, even if they have an EachMode. +// +// You probably shouldn't call this! See the method of the same name on +// type State for more information on what this is for and the rare situations +// where it is safe to use. +func (ms *Module) PruneResourceHusks() { + for _, rs := range ms.Resources { + if len(rs.Instances) == 0 { + ms.RemoveResource(rs.Addr.Resource) + } + } +} + +// empty returns true if the receving module state is contributing nothing +// to the state. In other words, it returns true if the module could be +// removed from the state altogether without changing the meaning of the state. +// +// In practice a module containing no objects is the same as a non-existent +// module, and so we can opportunistically clean up once a module becomes +// empty on the assumption that it will be re-added if needed later. +func (ms *Module) empty() bool { + if ms == nil { + return true + } + + // This must be updated to cover any new collections added to Module + // in future. + return (len(ms.Resources) == 0 && + len(ms.OutputValues) == 0 && + len(ms.LocalValues) == 0) +} diff --git a/pkg/states/objectstatus_string.go b/pkg/states/objectstatus_string.go new file mode 100644 index 00000000000..96a6db2f4c4 --- /dev/null +++ b/pkg/states/objectstatus_string.go @@ -0,0 +1,33 @@ +// Code generated by "stringer -type ObjectStatus"; DO NOT EDIT. + +package states + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ObjectReady-82] + _ = x[ObjectTainted-84] + _ = x[ObjectPlanned-80] +} + +const ( + _ObjectStatus_name_0 = "ObjectPlanned" + _ObjectStatus_name_1 = "ObjectReady" + _ObjectStatus_name_2 = "ObjectTainted" +) + +func (i ObjectStatus) String() string { + switch { + case i == 80: + return _ObjectStatus_name_0 + case i == 82: + return _ObjectStatus_name_1 + case i == 84: + return _ObjectStatus_name_2 + default: + return "ObjectStatus(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/pkg/states/output_value.go b/pkg/states/output_value.go new file mode 100644 index 00000000000..3ace0d8f907 --- /dev/null +++ b/pkg/states/output_value.go @@ -0,0 +1,21 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package states + +import ( + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/zclconf/go-cty/cty" +) + +// OutputValue represents the state of a particular output value. +// +// It is not valid to mutate an OutputValue object once it has been created. +// Instead, create an entirely new OutputValue to replace the previous one. +type OutputValue struct { + Addr addrs.AbsOutputValue + Value cty.Value + Sensitive bool +} diff --git a/pkg/states/remote/remote.go b/pkg/states/remote/remote.go new file mode 100644 index 00000000000..4add7404c04 --- /dev/null +++ b/pkg/states/remote/remote.go @@ -0,0 +1,43 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package remote + +import ( + "github.com/kubegems/opentofu/pkg/states/statemgr" +) + +// Client is the interface that must be implemented for a remote state +// driver. It supports dumb put/get/delete, and the higher level structs +// handle persisting the state properly here. +type Client interface { + Get() (*Payload, error) + Put([]byte) error + Delete() error +} + +// ClientForcePusher is an optional interface that allows a remote +// state to force push by managing a flag on the client that is +// toggled on by a call to EnableForcePush. +type ClientForcePusher interface { + Client + EnableForcePush() +} + +// ClientLocker is an optional interface that allows a remote state +// backend to enable state lock/unlock. +type ClientLocker interface { + Client + statemgr.Locker +} + +// Payload is the return value from the remote state storage. +type Payload struct { + MD5 []byte + Data []byte +} + +// Factory is the factory function to create a remote client. +type Factory func(map[string]string) (Client, error) diff --git a/pkg/states/remote/remote_test.go b/pkg/states/remote/remote_test.go new file mode 100644 index 00000000000..75859eaf3b1 --- /dev/null +++ b/pkg/states/remote/remote_test.go @@ -0,0 +1,135 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package remote + +import ( + "crypto/md5" + "encoding/json" + "testing" +) + +func TestRemoteClient_noPayload(t *testing.T) { + s := &State{ + Client: nilClient{}, + } + if err := s.RefreshState(); err != nil { + t.Fatal("error refreshing empty remote state") + } +} + +// nilClient returns nil for everything +type nilClient struct{} + +func (nilClient) Get() (*Payload, error) { return nil, nil } + +func (c nilClient) Put([]byte) error { return nil } + +func (c nilClient) Delete() error { return nil } + +// mockClient is a client that tracks persisted state snapshots only in +// memory and also logs what it has been asked to do for use in test +// assertions. +type mockClient struct { + current []byte + log []mockClientRequest +} + +type mockClientRequest struct { + Method string + Content map[string]interface{} +} + +func (c *mockClient) Get() (*Payload, error) { + c.appendLog("Get", c.current) + if c.current == nil { + return nil, nil + } + checksum := md5.Sum(c.current) + return &Payload{ + Data: c.current, + MD5: checksum[:], + }, nil +} + +func (c *mockClient) Put(data []byte) error { + c.appendLog("Put", data) + c.current = data + return nil +} + +func (c *mockClient) Delete() error { + c.appendLog("Delete", c.current) + c.current = nil + return nil +} + +func (c *mockClient) appendLog(method string, content []byte) { + // For easier test assertions, we actually log the result of decoding + // the content JSON rather than the raw bytes. Callers are in principle + // allowed to provide any arbitrary bytes here, but we know we're only + // using this to test our own State implementation here and that always + // uses the JSON state format, so this is fine. + + var contentVal map[string]interface{} + if content != nil { + err := json.Unmarshal(content, &contentVal) + if err != nil { + panic(err) // should never happen because our tests control this input + } + } + c.log = append(c.log, mockClientRequest{method, contentVal}) +} + +// mockClientForcePusher is like mockClient, but also implements +// EnableForcePush, allowing testing for this behavior +type mockClientForcePusher struct { + current []byte + force bool + log []mockClientRequest +} + +func (c *mockClientForcePusher) Get() (*Payload, error) { + c.appendLog("Get", c.current) + if c.current == nil { + return nil, nil + } + checksum := md5.Sum(c.current) + return &Payload{ + Data: c.current, + MD5: checksum[:], + }, nil +} + +func (c *mockClientForcePusher) Put(data []byte) error { + if c.force { + c.appendLog("Force Put", data) + } else { + c.appendLog("Put", data) + } + c.current = data + return nil +} + +// Implements remote.ClientForcePusher +func (c *mockClientForcePusher) EnableForcePush() { + c.force = true +} + +func (c *mockClientForcePusher) Delete() error { + c.appendLog("Delete", c.current) + c.current = nil + return nil +} +func (c *mockClientForcePusher) appendLog(method string, content []byte) { + var contentVal map[string]interface{} + if content != nil { + err := json.Unmarshal(content, &contentVal) + if err != nil { + panic(err) // should never happen because our tests control this input + } + } + c.log = append(c.log, mockClientRequest{method, contentVal}) +} diff --git a/pkg/states/remote/state.go b/pkg/states/remote/state.go new file mode 100644 index 00000000000..209365c79e4 --- /dev/null +++ b/pkg/states/remote/state.go @@ -0,0 +1,299 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package remote + +import ( + "bytes" + "fmt" + "log" + "sync" + + uuid "github.com/hashicorp/go-uuid" + + "github.com/kubegems/opentofu/pkg/backend/local" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/statefile" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "github.com/kubegems/opentofu/pkg/tofu" +) + +// State implements the State interfaces in the state package to handle +// reading and writing the remote state. This State on its own does no +// local caching so every persist will go to the remote storage and local +// writes will go to memory. +type State struct { + Client Client + + encryption encryption.StateEncryption + + // We track two pieces of meta data in addition to the state itself: + // + // lineage - the state's unique ID + // serial - the monotonic counter of "versions" of the state + // + // Both of these (along with state) have a sister field + // that represents the values read in from an existing source. + // All three of these values are used to determine if the new + // state has changed from an existing state we read in. + lineage, readLineage string + serial, readSerial uint64 + mu sync.Mutex + state, readState *states.State + disableLocks bool + + // If this is set then the state manager will decline to store intermediate + // state snapshots created while a OpenTofu Core apply operation is in + // progress. Otherwise (by default) it will accept persistent snapshots + // using the default rules defined in the local backend. + disableIntermediateSnapshots bool +} + +var _ statemgr.Full = (*State)(nil) +var _ statemgr.Migrator = (*State)(nil) +var _ local.IntermediateStateConditionalPersister = (*State)(nil) + +func NewState(client Client, enc encryption.StateEncryption) *State { + return &State{ + Client: client, + encryption: enc, + } +} + +func (s *State) DisableIntermediateSnapshots() { + s.disableIntermediateSnapshots = true +} + +// statemgr.Reader impl. +func (s *State) State() *states.State { + s.mu.Lock() + defer s.mu.Unlock() + + return s.state.DeepCopy() +} + +func (s *State) GetRootOutputValues() (map[string]*states.OutputValue, error) { + if err := s.RefreshState(); err != nil { + return nil, fmt.Errorf("Failed to load state: %w", err) + } + + state := s.State() + if state == nil { + state = states.NewState() + } + + return state.RootModule().OutputValues, nil +} + +// StateForMigration is part of our implementation of statemgr.Migrator. +func (s *State) StateForMigration() *statefile.File { + s.mu.Lock() + defer s.mu.Unlock() + + return statefile.New(s.state.DeepCopy(), s.lineage, s.serial) +} + +// statemgr.Writer impl. +func (s *State) WriteState(state *states.State) error { + s.mu.Lock() + defer s.mu.Unlock() + + // We create a deep copy of the state here, because the caller also has + // a reference to the given object and can potentially go on to mutate + // it after we return, but we want the snapshot at this point in time. + s.state = state.DeepCopy() + + return nil +} + +// WriteStateForMigration is part of our implementation of statemgr.Migrator. +func (s *State) WriteStateForMigration(f *statefile.File, force bool) error { + s.mu.Lock() + defer s.mu.Unlock() + + if !force { + checkFile := statefile.New(s.state, s.lineage, s.serial) + if err := statemgr.CheckValidImport(f, checkFile); err != nil { + return err + } + } + + // The remote backend needs to pass the `force` flag through to its client. + // For backends that support such operations, inform the client + // that a force push has been requested + c, isForcePusher := s.Client.(ClientForcePusher) + if force && isForcePusher { + c.EnableForcePush() + } + + // We create a deep copy of the state here, because the caller also has + // a reference to the given object and can potentially go on to mutate + // it after we return, but we want the snapshot at this point in time. + s.state = f.State.DeepCopy() + s.lineage = f.Lineage + s.serial = f.Serial + + return nil +} + +// statemgr.Refresher impl. +func (s *State) RefreshState() error { + s.mu.Lock() + defer s.mu.Unlock() + return s.refreshState() +} + +// refreshState is the main implementation of RefreshState, but split out so +// that we can make internal calls to it from methods that are already holding +// the s.mu lock. +func (s *State) refreshState() error { + payload, err := s.Client.Get() + if err != nil { + return err + } + + // no remote state is OK + if payload == nil { + s.readState = nil + s.lineage = "" + s.serial = 0 + return nil + } + + stateFile, err := statefile.Read(bytes.NewReader(payload.Data), s.encryption) + if err != nil { + return err + } + + s.lineage = stateFile.Lineage + s.serial = stateFile.Serial + s.state = stateFile.State + + // Properties from the remote must be separate so we can + // track changes as lineage, serial and/or state are mutated + s.readLineage = stateFile.Lineage + s.readSerial = stateFile.Serial + s.readState = s.state.DeepCopy() + return nil +} + +// statemgr.Persister impl. +func (s *State) PersistState(schemas *tofu.Schemas) error { + s.mu.Lock() + defer s.mu.Unlock() + + log.Printf("[DEBUG] states/remote: state read serial is: %d; serial is: %d", s.readSerial, s.serial) + log.Printf("[DEBUG] states/remote: state read lineage is: %s; lineage is: %s", s.readLineage, s.lineage) + + if s.readState != nil { + lineageUnchanged := s.readLineage != "" && s.lineage == s.readLineage + serialUnchanged := s.readSerial != 0 && s.serial == s.readSerial + stateUnchanged := statefile.StatesMarshalEqual(s.state, s.readState) + if stateUnchanged && lineageUnchanged && serialUnchanged { + // If the state, lineage or serial haven't changed at all then we have nothing to do. + return nil + } + s.serial++ + } else { + // We might be writing a new state altogether, but before we do that + // we'll check to make sure there isn't already a snapshot present + // that we ought to be updating. + err := s.refreshState() + if err != nil { + return fmt.Errorf("failed checking for existing remote state: %w", err) + } + log.Printf("[DEBUG] states/remote: after refresh, state read serial is: %d; serial is: %d", s.readSerial, s.serial) + log.Printf("[DEBUG] states/remote: after refresh, state read lineage is: %s; lineage is: %s", s.readLineage, s.lineage) + if s.lineage == "" { // indicates that no state snapshot is present yet + lineage, err := uuid.GenerateUUID() + if err != nil { + return fmt.Errorf("failed to generate initial lineage: %w", err) + } + s.lineage = lineage + s.serial++ + } + } + + f := statefile.New(s.state, s.lineage, s.serial) + + var buf bytes.Buffer + err := statefile.Write(f, &buf, s.encryption) + if err != nil { + return err + } + + err = s.Client.Put(buf.Bytes()) + if err != nil { + return err + } + + // After we've successfully persisted, what we just wrote is our new + // reference state until someone calls RefreshState again. + // We've potentially overwritten (via force) the state, lineage + // and / or serial (and serial was incremented) so we copy over all + // three fields so everything matches the new state and a subsequent + // operation would correctly detect no changes to the lineage, serial or state. + s.readState = s.state.DeepCopy() + s.readLineage = s.lineage + s.readSerial = s.serial + return nil +} + +// ShouldPersistIntermediateState implements local.IntermediateStateConditionalPersister +func (s *State) ShouldPersistIntermediateState(info *local.IntermediateStatePersistInfo) bool { + if s.disableIntermediateSnapshots { + return false + } + return local.DefaultIntermediateStatePersistRule(info) +} + +// Lock calls the Client's Lock method if it's implemented. +func (s *State) Lock(info *statemgr.LockInfo) (string, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if s.disableLocks { + return "", nil + } + + if c, ok := s.Client.(ClientLocker); ok { + return c.Lock(info) + } + return "", nil +} + +// Unlock calls the Client's Unlock method if it's implemented. +func (s *State) Unlock(id string) error { + s.mu.Lock() + defer s.mu.Unlock() + + if s.disableLocks { + return nil + } + + if c, ok := s.Client.(ClientLocker); ok { + return c.Unlock(id) + } + return nil +} + +// DisableLocks turns the Lock and Unlock methods into no-ops. This is intended +// to be called during initialization of a state manager and should not be +// called after any of the statemgr.Full interface methods have been called. +func (s *State) DisableLocks() { + s.disableLocks = true +} + +// StateSnapshotMeta returns the metadata from the most recently persisted +// or refreshed persistent state snapshot. +// +// This is an implementation of statemgr.PersistentMeta. +func (s *State) StateSnapshotMeta() statemgr.SnapshotMeta { + return statemgr.SnapshotMeta{ + Lineage: s.lineage, + Serial: s.serial, + } +} diff --git a/pkg/states/remote/state_test.go b/pkg/states/remote/state_test.go new file mode 100644 index 00000000000..296b76a1c3d --- /dev/null +++ b/pkg/states/remote/state_test.go @@ -0,0 +1,748 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package remote + +import ( + "log" + "sync" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/statefile" + "github.com/kubegems/opentofu/pkg/states/statemgr" + "github.com/kubegems/opentofu/version" + tfaddr "github.com/opentofu/registry-address" +) + +func TestState_impl(t *testing.T) { + var _ statemgr.Reader = new(State) + var _ statemgr.Writer = new(State) + var _ statemgr.Persister = new(State) + var _ statemgr.Refresher = new(State) + var _ statemgr.OutputReader = new(State) + var _ statemgr.Locker = new(State) +} + +func TestStateRace(t *testing.T) { + s := NewState(nilClient{}, encryption.StateEncryptionDisabled()) + + current := states.NewState() + + var wg sync.WaitGroup + + for i := 0; i < 100; i++ { + wg.Add(1) + go func() { + defer wg.Done() + s.WriteState(current) + s.PersistState(nil) + s.RefreshState() + }() + } + wg.Wait() +} + +// testCase encapsulates a test state test +type testCase struct { + name string + // A function to mutate state and return a cleanup function + mutationFunc func(*State) (*states.State, func()) + // The expected requests to have taken place + expectedRequests []mockClientRequest + // Mark this case as not having a request + noRequest bool +} + +// isRequested ensures a test that is specified as not having +// a request doesn't have one by checking if a method exists +// on the expectedRequest. +func (tc testCase) isRequested(t *testing.T) bool { + for _, expectedMethod := range tc.expectedRequests { + hasMethod := expectedMethod.Method != "" + if tc.noRequest && hasMethod { + t.Fatalf("expected no content for %q but got: %v", tc.name, expectedMethod) + } + } + return !tc.noRequest +} + +func TestStatePersist(t *testing.T) { + testCases := []testCase{ + { + name: "first state persistence", + mutationFunc: func(mgr *State) (*states.State, func()) { + mgr.state = &states.State{ + Modules: map[string]*states.Module{"": {}}, + } + s := mgr.State() + s.RootModule().SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Name: "myfile", + Type: "local_file", + }.Instance(addrs.NoKey), + &states.ResourceInstanceObjectSrc{ + AttrsFlat: map[string]string{ + "filename": "file.txt", + }, + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: tfaddr.Provider{Namespace: "local"}, + }, + ) + return s, func() {} + }, + expectedRequests: []mockClientRequest{ + // Expect an initial refresh, which returns nothing since there is no remote state. + { + Method: "Get", + Content: nil, + }, + // Expect a second refresh, since the read state is nil + { + Method: "Get", + Content: nil, + }, + // Expect an initial push with values and a serial of 1 + { + Method: "Put", + Content: map[string]interface{}{ + "version": 4.0, // encoding/json decodes this as float64 by default + "lineage": "some meaningless value", + "serial": 1.0, // encoding/json decodes this as float64 by default + "terraform_version": version.Version, + "outputs": map[string]interface{}{}, + "resources": []interface{}{ + map[string]interface{}{ + "instances": []interface{}{ + map[string]interface{}{ + "attributes_flat": map[string]interface{}{ + "filename": "file.txt", + }, + "schema_version": 0.0, + "sensitive_attributes": []interface{}{}, + }, + }, + "mode": "managed", + "name": "myfile", + "provider": `provider["/local/"]`, + "type": "local_file", + }, + }, + "check_results": nil, + }, + }, + }, + }, + // If lineage changes, expect the serial to increment + { + name: "change lineage", + mutationFunc: func(mgr *State) (*states.State, func()) { + mgr.lineage = "mock-lineage" + return mgr.State(), func() {} + }, + expectedRequests: []mockClientRequest{ + { + Method: "Put", + Content: map[string]interface{}{ + "version": 4.0, // encoding/json decodes this as float64 by default + "lineage": "mock-lineage", + "serial": 2.0, // encoding/json decodes this as float64 by default + "terraform_version": version.Version, + "outputs": map[string]interface{}{}, + "resources": []interface{}{ + map[string]interface{}{ + "instances": []interface{}{ + map[string]interface{}{ + "attributes_flat": map[string]interface{}{ + "filename": "file.txt", + }, + "schema_version": 0.0, + "sensitive_attributes": []interface{}{}, + }, + }, + "mode": "managed", + "name": "myfile", + "provider": `provider["/local/"]`, + "type": "local_file", + }, + }, + "check_results": nil, + }, + }, + }, + }, + // removing resources should increment the serial + { + name: "remove resources", + mutationFunc: func(mgr *State) (*states.State, func()) { + mgr.state.RootModule().Resources = map[string]*states.Resource{} + return mgr.State(), func() {} + }, + expectedRequests: []mockClientRequest{ + { + Method: "Put", + Content: map[string]interface{}{ + "version": 4.0, // encoding/json decodes this as float64 by default + "lineage": "mock-lineage", + "serial": 3.0, // encoding/json decodes this as float64 by default + "terraform_version": version.Version, + "outputs": map[string]interface{}{}, + "resources": []interface{}{}, + "check_results": nil, + }, + }, + }, + }, + // If the remote serial is incremented, then we increment it once more. + { + name: "change serial", + mutationFunc: func(mgr *State) (*states.State, func()) { + originalSerial := mgr.serial + mgr.serial++ + return mgr.State(), func() { + mgr.serial = originalSerial + } + }, + expectedRequests: []mockClientRequest{ + { + Method: "Put", + Content: map[string]interface{}{ + "version": 4.0, // encoding/json decodes this as float64 by default + "lineage": "mock-lineage", + "serial": 5.0, // encoding/json decodes this as float64 by default + "terraform_version": version.Version, + "outputs": map[string]interface{}{}, + "resources": []interface{}{}, + "check_results": nil, + }, + }, + }, + }, + // Adding an output should cause the serial to increment as well. + { + name: "add output to state", + mutationFunc: func(mgr *State) (*states.State, func()) { + s := mgr.State() + s.RootModule().SetOutputValue("foo", cty.StringVal("bar"), false) + return s, func() {} + }, + expectedRequests: []mockClientRequest{ + { + Method: "Put", + Content: map[string]interface{}{ + "version": 4.0, // encoding/json decodes this as float64 by default + "lineage": "mock-lineage", + "serial": 4.0, // encoding/json decodes this as float64 by default + "terraform_version": version.Version, + "outputs": map[string]interface{}{ + "foo": map[string]interface{}{ + "type": "string", + "value": "bar", + }, + }, + "resources": []interface{}{}, + "check_results": nil, + }, + }, + }, + }, + // ...as should changing an output + { + name: "mutate state bar -> baz", + mutationFunc: func(mgr *State) (*states.State, func()) { + s := mgr.State() + s.RootModule().SetOutputValue("foo", cty.StringVal("baz"), false) + return s, func() {} + }, + expectedRequests: []mockClientRequest{ + { + Method: "Put", + Content: map[string]interface{}{ + "version": 4.0, // encoding/json decodes this as float64 by default + "lineage": "mock-lineage", + "serial": 5.0, // encoding/json decodes this as float64 by default + "terraform_version": version.Version, + "outputs": map[string]interface{}{ + "foo": map[string]interface{}{ + "type": "string", + "value": "baz", + }, + }, + "resources": []interface{}{}, + "check_results": nil, + }, + }, + }, + }, + { + name: "nothing changed", + mutationFunc: func(mgr *State) (*states.State, func()) { + s := mgr.State() + return s, func() {} + }, + noRequest: true, + }, + // If the remote state's serial is less (force push), then we + // increment it once from there. + { + name: "reset serial (force push style)", + mutationFunc: func(mgr *State) (*states.State, func()) { + mgr.serial = 2 + return mgr.State(), func() {} + }, + expectedRequests: []mockClientRequest{ + { + Method: "Put", + Content: map[string]interface{}{ + "version": 4.0, // encoding/json decodes this as float64 by default + "lineage": "mock-lineage", + "serial": 3.0, // encoding/json decodes this as float64 by default + "terraform_version": version.Version, + "outputs": map[string]interface{}{ + "foo": map[string]interface{}{ + "type": "string", + "value": "baz", + }, + }, + "resources": []interface{}{}, + "check_results": nil, + }, + }, + }, + }, + } + + // Initial setup of state just to give us a fixed starting point for our + // test assertions below, or else we'd need to deal with + // random lineage. + mgr := NewState( + &mockClient{}, + encryption.StateEncryptionDisabled(), + ) + + // In normal use (during a OpenTofu operation) we always refresh and read + // before any writes would happen, so we'll mimic that here for realism. + // NB This causes a GET to be logged so the first item in the test cases + // must account for this + if err := mgr.RefreshState(); err != nil { + t.Fatalf("failed to RefreshState: %s", err) + } + + // Our client is a mockClient which has a log we + // use to check that operations generate expected requests + mockClient := mgr.Client.(*mockClient) + + // logIdx tracks the current index of the log separate from + // the loop iteration so we can check operations that don't + // cause any requests to be generated + logIdx := 0 + + // Run tests in order. + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + s, cleanup := tc.mutationFunc(mgr) + + if err := mgr.WriteState(s); err != nil { + t.Fatalf("failed to WriteState for %q: %s", tc.name, err) + } + if err := mgr.PersistState(nil); err != nil { + t.Fatalf("failed to PersistState for %q: %s", tc.name, err) + } + + if tc.isRequested(t) { + // Get captured request from the mock client log + // based on the index of the current test + if logIdx >= len(mockClient.log) { + t.Fatalf("request lock and index are out of sync on %q: idx=%d len=%d", tc.name, logIdx, len(mockClient.log)) + } + for expectedRequestIdx := 0; expectedRequestIdx < len(tc.expectedRequests); expectedRequestIdx++ { + loggedRequest := mockClient.log[logIdx] + logIdx++ + if diff := cmp.Diff(tc.expectedRequests[expectedRequestIdx], loggedRequest, cmpopts.IgnoreMapEntries(func(key string, value interface{}) bool { + // This is required since the initial state creation causes the lineage to be a UUID that is not known at test time. + return tc.name == "first state persistence" && key == "lineage" + })); len(diff) > 0 { + t.Logf("incorrect client requests for %q:\n%s", tc.name, diff) + t.Fail() + } + } + } + cleanup() + }) + } + logCnt := len(mockClient.log) + if logIdx != logCnt { + t.Fatalf("not all requests were read. Expected logIdx to be %d but got %d", logCnt, logIdx) + } +} + +func TestState_GetRootOutputValues(t *testing.T) { + // Initial setup of state with outputs already defined + mgr := NewState( + &mockClient{ + current: []byte(` + { + "version": 4, + "lineage": "mock-lineage", + "serial": 1, + "terraform_version":"0.0.0", + "outputs": {"foo": {"value":"bar", "type": "string"}}, + "resources": [] + } + `), + }, + encryption.StateEncryptionDisabled(), + ) + + outputs, err := mgr.GetRootOutputValues() + if err != nil { + t.Errorf("Expected GetRootOutputValues to not return an error, but it returned %v", err) + } + + if len(outputs) != 1 { + t.Errorf("Expected %d outputs, but received %d", 1, len(outputs)) + } +} + +type migrationTestCase struct { + name string + // A function to generate a statefile + stateFile func(*State) *statefile.File + // The expected request to have taken place + expectedRequest mockClientRequest + // Mark this case as not having a request + expectedError string + // force flag passed to client + force bool +} + +func TestWriteStateForMigration(t *testing.T) { + mgr := NewState( + &mockClient{ + current: []byte(` + { + "version": 4, + "lineage": "mock-lineage", + "serial": 3, + "terraform_version":"0.0.0", + "outputs": {"foo": {"value":"bar", "type": "string"}}, + "resources": [] + } + `), + }, + encryption.StateEncryptionDisabled(), + ) + + testCases := []migrationTestCase{ + // Refreshing state before we run the test loop causes a GET + { + name: "refresh state", + stateFile: func(mgr *State) *statefile.File { + return mgr.StateForMigration() + }, + expectedRequest: mockClientRequest{ + Method: "Get", + Content: map[string]interface{}{ + "version": 4.0, + "lineage": "mock-lineage", + "serial": 3.0, + "terraform_version": "0.0.0", + "outputs": map[string]interface{}{"foo": map[string]interface{}{"type": string("string"), "value": string("bar")}}, + "resources": []interface{}{}, + }, + }, + }, + { + name: "cannot import lesser serial without force", + stateFile: func(mgr *State) *statefile.File { + return statefile.New(mgr.state, mgr.lineage, 1) + }, + expectedError: "cannot import state with serial 1 over newer state with serial 3", + }, + { + name: "cannot import differing lineage without force", + stateFile: func(mgr *State) *statefile.File { + return statefile.New(mgr.state, "different-lineage", mgr.serial) + }, + expectedError: `cannot import state with lineage "different-lineage" over unrelated state with lineage "mock-lineage"`, + }, + { + name: "can import lesser serial with force", + stateFile: func(mgr *State) *statefile.File { + return statefile.New(mgr.state, mgr.lineage, 1) + }, + expectedRequest: mockClientRequest{ + Method: "Put", + Content: map[string]interface{}{ + "version": 4.0, + "lineage": "mock-lineage", + "serial": 2.0, + "terraform_version": version.Version, + "outputs": map[string]interface{}{"foo": map[string]interface{}{"type": string("string"), "value": string("bar")}}, + "resources": []interface{}{}, + "check_results": nil, + }, + }, + force: true, + }, + { + name: "cannot import differing lineage without force", + stateFile: func(mgr *State) *statefile.File { + return statefile.New(mgr.state, "different-lineage", mgr.serial) + }, + expectedRequest: mockClientRequest{ + Method: "Put", + Content: map[string]interface{}{ + "version": 4.0, + "lineage": "different-lineage", + "serial": 3.0, + "terraform_version": version.Version, + "outputs": map[string]interface{}{"foo": map[string]interface{}{"type": string("string"), "value": string("bar")}}, + "resources": []interface{}{}, + "check_results": nil, + }, + }, + force: true, + }, + } + + // In normal use (during a OpenTofu operation) we always refresh and read + // before any writes would happen, so we'll mimic that here for realism. + // NB This causes a GET to be logged so the first item in the test cases + // must account for this + if err := mgr.RefreshState(); err != nil { + t.Fatalf("failed to RefreshState: %s", err) + } + + if err := mgr.WriteState(mgr.State()); err != nil { + t.Fatalf("failed to write initial state: %s", err) + } + + // Our client is a mockClient which has a log we + // use to check that operations generate expected requests + mockClient := mgr.Client.(*mockClient) + + // logIdx tracks the current index of the log separate from + // the loop iteration so we can check operations that don't + // cause any requests to be generated + logIdx := 0 + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + sf := tc.stateFile(mgr) + err := mgr.WriteStateForMigration(sf, tc.force) + shouldError := tc.expectedError != "" + + // If we are expecting and error check it and move on + if shouldError { + if err == nil { + t.Fatalf("test case %q should have failed with error %q", tc.name, tc.expectedError) + } else if err.Error() != tc.expectedError { + t.Fatalf("test case %q expected error %q but got %q", tc.name, tc.expectedError, err) + } + return + } + + if err != nil { + t.Fatalf("test case %q failed: %v", tc.name, err) + } + + // At this point we should just do a normal write and persist + // as would happen from the CLI + mgr.WriteState(mgr.State()) + mgr.PersistState(nil) + + if logIdx >= len(mockClient.log) { + t.Fatalf("request lock and index are out of sync on %q: idx=%d len=%d", tc.name, logIdx, len(mockClient.log)) + } + loggedRequest := mockClient.log[logIdx] + logIdx++ + if diff := cmp.Diff(tc.expectedRequest, loggedRequest); len(diff) > 0 { + t.Fatalf("incorrect client requests for %q:\n%s", tc.name, diff) + } + }) + } + + logCnt := len(mockClient.log) + if logIdx != logCnt { + log.Fatalf("not all requests were read. Expected logIdx to be %d but got %d", logCnt, logIdx) + } +} + +// This test runs the same test cases as above, but with +// a client that implements EnableForcePush -- this allows +// us to test that -force continues to work for backends without +// this interface, but that this interface works for those that do. +func TestWriteStateForMigrationWithForcePushClient(t *testing.T) { + mgr := NewState( + &mockClientForcePusher{ + current: []byte(` + { + "version": 4, + "lineage": "mock-lineage", + "serial": 3, + "terraform_version":"0.0.0", + "outputs": {"foo": {"value":"bar", "type": "string"}}, + "resources": [] + } + `), + }, + encryption.StateEncryptionDisabled(), + ) + + testCases := []migrationTestCase{ + // Refreshing state before we run the test loop causes a GET + { + name: "refresh state", + stateFile: func(mgr *State) *statefile.File { + return mgr.StateForMigration() + }, + expectedRequest: mockClientRequest{ + Method: "Get", + Content: map[string]interface{}{ + "version": 4.0, + "lineage": "mock-lineage", + "serial": 3.0, + "terraform_version": "0.0.0", + "outputs": map[string]interface{}{"foo": map[string]interface{}{"type": string("string"), "value": string("bar")}}, + "resources": []interface{}{}, + }, + }, + }, + { + name: "cannot import lesser serial without force", + stateFile: func(mgr *State) *statefile.File { + return statefile.New(mgr.state, mgr.lineage, 1) + }, + expectedError: "cannot import state with serial 1 over newer state with serial 3", + }, + { + name: "cannot import differing lineage without force", + stateFile: func(mgr *State) *statefile.File { + return statefile.New(mgr.state, "different-lineage", mgr.serial) + }, + expectedError: `cannot import state with lineage "different-lineage" over unrelated state with lineage "mock-lineage"`, + }, + { + name: "can import lesser serial with force", + stateFile: func(mgr *State) *statefile.File { + return statefile.New(mgr.state, mgr.lineage, 1) + }, + expectedRequest: mockClientRequest{ + Method: "Force Put", + Content: map[string]interface{}{ + "version": 4.0, + "lineage": "mock-lineage", + "serial": 2.0, + "terraform_version": version.Version, + "outputs": map[string]interface{}{"foo": map[string]interface{}{"type": string("string"), "value": string("bar")}}, + "resources": []interface{}{}, + "check_results": nil, + }, + }, + force: true, + }, + { + name: "cannot import differing lineage without force", + stateFile: func(mgr *State) *statefile.File { + return statefile.New(mgr.state, "different-lineage", mgr.serial) + }, + expectedRequest: mockClientRequest{ + Method: "Force Put", + Content: map[string]interface{}{ + "version": 4.0, + "lineage": "different-lineage", + "serial": 3.0, + "terraform_version": version.Version, + "outputs": map[string]interface{}{"foo": map[string]interface{}{"type": string("string"), "value": string("bar")}}, + "resources": []interface{}{}, + "check_results": nil, + }, + }, + force: true, + }, + } + + // In normal use (during a OpenTofu operation) we always refresh and read + // before any writes would happen, so we'll mimic that here for realism. + // NB This causes a GET to be logged so the first item in the test cases + // must account for this + if err := mgr.RefreshState(); err != nil { + t.Fatalf("failed to RefreshState: %s", err) + } + + if err := mgr.WriteState(mgr.State()); err != nil { + t.Fatalf("failed to write initial state: %s", err) + } + + // Our client is a mockClientForcePusher which has a log we + // use to check that operations generate expected requests + mockClient := mgr.Client.(*mockClientForcePusher) + + if mockClient.force { + t.Fatalf("client should not default to force") + } + + // logIdx tracks the current index of the log separate from + // the loop iteration so we can check operations that don't + // cause any requests to be generated + logIdx := 0 + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Always reset client to not be force pushing + mockClient.force = false + sf := tc.stateFile(mgr) + err := mgr.WriteStateForMigration(sf, tc.force) + shouldError := tc.expectedError != "" + + // If we are expecting and error check it and move on + if shouldError { + if err == nil { + t.Fatalf("test case %q should have failed with error %q", tc.name, tc.expectedError) + } else if err.Error() != tc.expectedError { + t.Fatalf("test case %q expected error %q but got %q", tc.name, tc.expectedError, err) + } + return + } + + if err != nil { + t.Fatalf("test case %q failed: %v", tc.name, err) + } + + if tc.force && !mockClient.force { + t.Fatalf("test case %q should have enabled force push", tc.name) + } + + // At this point we should just do a normal write and persist + // as would happen from the CLI + mgr.WriteState(mgr.State()) + mgr.PersistState(nil) + + if logIdx >= len(mockClient.log) { + t.Fatalf("request lock and index are out of sync on %q: idx=%d len=%d", tc.name, logIdx, len(mockClient.log)) + } + loggedRequest := mockClient.log[logIdx] + logIdx++ + if diff := cmp.Diff(tc.expectedRequest, loggedRequest); len(diff) > 0 { + t.Fatalf("incorrect client requests for %q:\n%s", tc.name, diff) + } + }) + } + + logCnt := len(mockClient.log) + if logIdx != logCnt { + log.Fatalf("not all requests were read. Expected logIdx to be %d but got %d", logCnt, logIdx) + } +} diff --git a/pkg/states/remote/testing.go b/pkg/states/remote/testing.go new file mode 100644 index 00000000000..d447eb462e4 --- /dev/null +++ b/pkg/states/remote/testing.go @@ -0,0 +1,108 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package remote + +import ( + "bytes" + "testing" + + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/states/statefile" + "github.com/kubegems/opentofu/pkg/states/statemgr" +) + +// TestClient is a generic function to test any client. +func TestClient(t *testing.T, c Client) { + var buf bytes.Buffer + s := statemgr.TestFullInitialState() + sf := statefile.New(s, "stub-lineage", 2) + err := statefile.Write(sf, &buf, encryption.StateEncryptionDisabled()) + if err != nil { + t.Fatalf("err: %s", err) + } + data := buf.Bytes() + + if err := c.Put(data); err != nil { + t.Fatalf("put: %s", err) + } + + p, err := c.Get() + if err != nil { + t.Fatalf("get: %s", err) + } + if !bytes.Equal(p.Data, data) { + t.Fatalf("expected full state %q\n\ngot: %q", string(p.Data), string(data)) + } + + if err := c.Delete(); err != nil { + t.Fatalf("delete: %s", err) + } + + p, err = c.Get() + if err != nil { + t.Fatalf("get: %s", err) + } + if p != nil { + t.Fatalf("expected empty state, got: %q", string(p.Data)) + } +} + +// Test the lock implementation for a remote.Client. +// This test requires 2 client instances, in oder to have multiple remote +// clients since some implementations may tie the client to the lock, or may +// have reentrant locks. +func TestRemoteLocks(t *testing.T, a, b Client) { + lockerA, ok := a.(statemgr.Locker) + if !ok { + t.Fatal("client A not a statemgr.Locker") + } + + lockerB, ok := b.(statemgr.Locker) + if !ok { + t.Fatal("client B not a statemgr.Locker") + } + + infoA := statemgr.NewLockInfo() + infoA.Operation = "test" + infoA.Who = "clientA" + + infoB := statemgr.NewLockInfo() + infoB.Operation = "test" + infoB.Who = "clientB" + + lockIDA, err := lockerA.Lock(infoA) + if err != nil { + t.Fatal("unable to get initial lock:", err) + } + + _, err = lockerB.Lock(infoB) + if err == nil { + lockerA.Unlock(lockIDA) + t.Fatal("client B obtained lock while held by client A") + } + if _, ok := err.(*statemgr.LockError); !ok { + t.Errorf("expected a LockError, but was %t: %s", err, err) + } + + if err := lockerA.Unlock(lockIDA); err != nil { + t.Fatal("error unlocking client A", err) + } + + lockIDB, err := lockerB.Lock(infoB) + if err != nil { + t.Fatal("unable to obtain lock from client B") + } + + if lockIDB == lockIDA { + t.Fatalf("duplicate lock IDs: %q", lockIDB) + } + + if err = lockerB.Unlock(lockIDB); err != nil { + t.Fatal("error unlocking client B:", err) + } + + // TODO: Should we enforce that Unlock requires the correct ID? +} diff --git a/pkg/states/resource.go b/pkg/states/resource.go new file mode 100644 index 00000000000..1c26d47ec2b --- /dev/null +++ b/pkg/states/resource.go @@ -0,0 +1,220 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package states + +import ( + "fmt" + "math/rand" + "time" + + "github.com/kubegems/opentofu/pkg/addrs" +) + +// Resource represents the state of a resource. +type Resource struct { + // Addr is the absolute address for the resource this state object + // belongs to. + Addr addrs.AbsResource + + // Instances contains the potentially-multiple instances associated with + // this resource. This map can contain a mixture of different key types, + // but only the ones of InstanceKeyType are considered current. + Instances map[addrs.InstanceKey]*ResourceInstance + + // ProviderConfig is the absolute address for the provider configuration that + // most recently managed this resource. This is used to connect a resource + // with a provider configuration when the resource configuration block is + // not available, such as if it has been removed from configuration + // altogether. + ProviderConfig addrs.AbsProviderConfig +} + +// Instance returns the state for the instance with the given key, or nil +// if no such instance is tracked within the state. +func (rs *Resource) Instance(key addrs.InstanceKey) *ResourceInstance { + return rs.Instances[key] +} + +// CreateInstance creates an instance and adds it to the resource +func (rs *Resource) CreateInstance(key addrs.InstanceKey) *ResourceInstance { + is := NewResourceInstance() + rs.Instances[key] = is + return is +} + +// EnsureInstance returns the state for the instance with the given key, +// creating a new empty state for it if one doesn't already exist. +// +// Because this may create and save a new state, it is considered to be +// a write operation. +func (rs *Resource) EnsureInstance(key addrs.InstanceKey) *ResourceInstance { + ret := rs.Instance(key) + if ret == nil { + ret = NewResourceInstance() + rs.Instances[key] = ret + } + return ret +} + +// ResourceInstance represents the state of a particular instance of a resource. +type ResourceInstance struct { + // Current, if non-nil, is the remote object that is currently represented + // by the corresponding resource instance. + Current *ResourceInstanceObjectSrc + + // Deposed, if len > 0, contains any remote objects that were previously + // represented by the corresponding resource instance but have been + // replaced and are pending destruction due to the create_before_destroy + // lifecycle mode. + Deposed map[DeposedKey]*ResourceInstanceObjectSrc +} + +// NewResourceInstance constructs and returns a new ResourceInstance, ready to +// use. +func NewResourceInstance() *ResourceInstance { + return &ResourceInstance{ + Deposed: map[DeposedKey]*ResourceInstanceObjectSrc{}, + } +} + +// HasCurrent returns true if this resource instance has a "current"-generation +// object. Most instances do, but this can briefly be false during a +// create-before-destroy replace operation when the current has been deposed +// but its replacement has not yet been created. +func (i *ResourceInstance) HasCurrent() bool { + return i != nil && i.Current != nil +} + +// HasDeposed returns true if this resource instance has a deposed object +// with the given key. +func (i *ResourceInstance) HasDeposed(key DeposedKey) bool { + return i != nil && i.Deposed[key] != nil +} + +// HasAnyDeposed returns true if this resource instance has one or more +// deposed objects. +func (i *ResourceInstance) HasAnyDeposed() bool { + return i != nil && len(i.Deposed) > 0 +} + +// HasObjects returns true if this resource has any objects at all, whether +// current or deposed. +func (i *ResourceInstance) HasObjects() bool { + return i.Current != nil || len(i.Deposed) != 0 +} + +// deposeCurrentObject is part of the real implementation of +// SyncState.DeposeResourceInstanceObject. The exported method uses a lock +// to ensure that we can safely allocate an unused deposed key without +// collision. +func (i *ResourceInstance) deposeCurrentObject(forceKey DeposedKey) DeposedKey { + if !i.HasCurrent() { + return NotDeposed + } + + key := forceKey + if key == NotDeposed { + key = i.findUnusedDeposedKey() + } else { + if _, exists := i.Deposed[key]; exists { + panic(fmt.Sprintf("forced key %s is already in use", forceKey)) + } + } + i.Deposed[key] = i.Current + i.Current = nil + return key +} + +// GetGeneration retrieves the object of the given generation from the +// ResourceInstance, or returns nil if there is no such object. +// +// If the given generation is nil or invalid, this method will panic. +func (i *ResourceInstance) GetGeneration(gen Generation) *ResourceInstanceObjectSrc { + if gen == CurrentGen { + return i.Current + } + if dk, ok := gen.(DeposedKey); ok { + return i.Deposed[dk] + } + if gen == nil { + panic("get with nil Generation") + } + // Should never fall out here, since the above covers all possible + // Generation values. + panic(fmt.Sprintf("get invalid Generation %#v", gen)) +} + +// FindUnusedDeposedKey generates a unique DeposedKey that is guaranteed not to +// already be in use for this instance at the time of the call. +// +// Note that the validity of this result may change if new deposed keys are +// allocated before it is used. To avoid this risk, instead use the +// DeposeResourceInstanceObject method on the SyncState wrapper type, which +// allocates a key and uses it atomically. +func (i *ResourceInstance) FindUnusedDeposedKey() DeposedKey { + return i.findUnusedDeposedKey() +} + +// findUnusedDeposedKey generates a unique DeposedKey that is guaranteed not to +// already be in use for this instance. +func (i *ResourceInstance) findUnusedDeposedKey() DeposedKey { + for { + key := NewDeposedKey() + if _, exists := i.Deposed[key]; !exists { + return key + } + // Spin until we find a unique one. This shouldn't take long, because + // we have a 32-bit keyspace and there's rarely more than one deposed + // instance. + } +} + +// DeposedKey is a 8-character hex string used to uniquely identify deposed +// instance objects in the state. +type DeposedKey string + +// NotDeposed is a special invalid value of DeposedKey that is used to represent +// the absense of a deposed key. It must not be used as an actual deposed key. +const NotDeposed = DeposedKey("") + +var deposedKeyRand = rand.New(rand.NewSource(time.Now().UnixNano())) + +// NewDeposedKey generates a pseudo-random deposed key. Because of the short +// length of these keys, uniqueness is not a natural consequence and so the +// caller should test to see if the generated key is already in use and generate +// another if so, until a unique key is found. +func NewDeposedKey() DeposedKey { + v := deposedKeyRand.Uint32() + return DeposedKey(fmt.Sprintf("%08x", v)) +} + +func (k DeposedKey) String() string { + return string(k) +} + +func (k DeposedKey) GoString() string { + ks := string(k) + switch { + case ks == "": + return "states.NotDeposed" + default: + return fmt.Sprintf("states.DeposedKey(%s)", ks) + } +} + +// Generation is a helper method to convert a DeposedKey into a Generation. +// If the reciever is anything other than NotDeposed then the result is +// just the same value as a Generation. If the receiver is NotDeposed then +// the result is CurrentGen. +func (k DeposedKey) Generation() Generation { + if k == NotDeposed { + return CurrentGen + } + return k +} + +// generation is an implementation of Generation. +func (k DeposedKey) generation() {} diff --git a/pkg/states/resource_test.go b/pkg/states/resource_test.go new file mode 100644 index 00000000000..12f873fe128 --- /dev/null +++ b/pkg/states/resource_test.go @@ -0,0 +1,61 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package states + +import ( + "testing" +) + +func TestResourceInstanceDeposeCurrentObject(t *testing.T) { + obj := &ResourceInstanceObjectSrc{ + // Empty for the sake of this test, because we're just going to + // compare by pointer below anyway. + } + + is := NewResourceInstance() + is.Current = obj + var dk DeposedKey + + t.Run("first depose", func(t *testing.T) { + dk = is.deposeCurrentObject(NotDeposed) // dk is randomly-generated but should be eight characters long + t.Logf("deposedKey is %q", dk) + + if got := is.Current; got != nil { + t.Errorf("current is %#v; want nil", got) + } + if got, want := is.Deposed[dk], obj; got != want { + t.Errorf("deposed object pointer is %#v; want %#v", got, want) + } + if got, want := len(is.Deposed), 1; got != want { + t.Errorf("wrong len(is.Deposed) %d; want %d", got, want) + } + if got, want := len(dk), 8; got != want { + t.Errorf("wrong len(deposedkey) %d; want %d", got, want) + } + }) + + t.Run("second depose", func(t *testing.T) { + notDK := is.deposeCurrentObject(NotDeposed) + if notDK != NotDeposed { + t.Errorf("got deposedKey %q; want NotDeposed", notDK) + } + + // Make sure we really did abort early, and haven't corrupted the + // state somehow. + if got := is.Current; got != nil { + t.Errorf("current is %#v; want nil", got) + } + if got, want := is.Deposed[dk], obj; got != want { + t.Errorf("deposed object pointer is %#v; want %#v", got, want) + } + if got, want := len(is.Deposed), 1; got != want { + t.Errorf("wrong len(is.Deposed) %d; want %d", got, want) + } + if got, want := len(dk), 8; got != want { + t.Errorf("wrong len(deposedkey) %d; want %d", got, want) + } + }) +} diff --git a/pkg/states/state.go b/pkg/states/state.go new file mode 100644 index 00000000000..a1dd37c9f42 --- /dev/null +++ b/pkg/states/state.go @@ -0,0 +1,639 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package states + +import ( + "fmt" + "sort" + + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/getproviders" +) + +// State is the top-level type of OpenTofu state. +// +// A state should be mutated only via its accessor methods, to ensure that +// invariants are preserved. +// +// Access to State and the nested values within it is not concurrency-safe, +// so when accessing a State object concurrently it is the caller's +// responsibility to ensure that only one write is in progress at a time +// and that reads only occur when no write is in progress. The most common +// way to achieve this is to wrap the State in a SyncState and use the +// higher-level atomic operations supported by that type. +type State struct { + // Modules contains the state for each module. The keys in this map are + // an implementation detail and must not be used by outside callers. + Modules map[string]*Module + + // CheckResults contains a snapshot of the statuses of checks at the + // end of the most recent update to the state. Callers might compare + // checks between runs to see if e.g. a previously-failing check has + // been fixed since the last run, or similar. + // + // CheckResults can be nil to indicate that there are no check results + // from the previous run at all, which is subtly different than the + // previous run having affirmatively recorded that there are no checks + // to run. For example, if this object was created from a state snapshot + // created by a version of OpenTofu that didn't yet support checks + // then this field will be nil. + CheckResults *CheckResults +} + +// NewState constructs a minimal empty state, containing an empty root module. +func NewState() *State { + modules := map[string]*Module{} + modules[addrs.RootModuleInstance.String()] = NewModule(addrs.RootModuleInstance) + return &State{ + Modules: modules, + } +} + +// BuildState is a helper -- primarily intended for tests -- to build a state +// using imperative code against the StateSync type while still acting as +// an expression of type *State to assign into a containing struct. +func BuildState(cb func(*SyncState)) *State { + s := NewState() + cb(s.SyncWrapper()) + return s +} + +// Empty returns true if there are no resources or populated output values +// in the receiver. In other words, if this state could be safely replaced +// with the return value of NewState and be functionally equivalent. +func (s *State) Empty() bool { + if s == nil { + return true + } + for _, ms := range s.Modules { + if len(ms.Resources) != 0 { + return false + } + if len(ms.OutputValues) != 0 { + return false + } + } + return true +} + +// Module returns the state for the module with the given address, or nil if +// the requested module is not tracked in the state. +func (s *State) Module(addr addrs.ModuleInstance) *Module { + if s == nil { + panic("State.Module on nil *State") + } + return s.Modules[addr.String()] +} + +// ModuleInstances returns the set of Module states that matches the given path. +func (s *State) ModuleInstances(addr addrs.Module) []*Module { + var ms []*Module + for _, m := range s.Modules { + if m.Addr.Module().Equal(addr) { + ms = append(ms, m) + } + } + return ms +} + +// ModuleOutputs returns all outputs for the given module call under the +// parentAddr instance. +func (s *State) ModuleOutputs(parentAddr addrs.ModuleInstance, module addrs.ModuleCall) []*OutputValue { + var os []*OutputValue + for _, m := range s.Modules { + // can't get outputs from the root module + if m.Addr.IsRoot() { + continue + } + + parent, call := m.Addr.Call() + // make sure this is a descendent in the correct path + if !parentAddr.Equal(parent) { + continue + } + + // and check if this is the correct child + if call.Name != module.Name { + continue + } + + for _, o := range m.OutputValues { + os = append(os, o) + } + } + + return os +} + +// RemoveModule removes the module with the given address from the state, +// unless it is the root module. The root module cannot be deleted, and so +// this method will panic if that is attempted. +// +// Removing a module implicitly discards all of the resources, outputs and +// local values within it, and so this should usually be done only for empty +// modules. For callers accessing the state through a SyncState wrapper, modules +// are automatically pruned if they are empty after one of their contained +// elements is removed. +func (s *State) RemoveModule(addr addrs.ModuleInstance) { + if addr.IsRoot() { + panic("attempted to remove root module") + } + + delete(s.Modules, addr.String()) +} + +// RootModule is a convenient alias for Module(addrs.RootModuleInstance). +func (s *State) RootModule() *Module { + if s == nil { + panic("RootModule called on nil State") + } + return s.Modules[addrs.RootModuleInstance.String()] +} + +// EnsureModule returns the state for the module with the given address, +// creating and adding a new one if necessary. +// +// Since this might modify the state to add a new instance, it is considered +// to be a write operation. +func (s *State) EnsureModule(addr addrs.ModuleInstance) *Module { + ms := s.Module(addr) + if ms == nil { + ms = NewModule(addr) + s.Modules[addr.String()] = ms + } + return ms +} + +// HasManagedResourceInstanceObjects returns true if there is at least one +// resource instance object (current or deposed) associated with a managed +// resource in the receiving state. +// +// A true result would suggest that just discarding this state without first +// destroying these objects could leave "dangling" objects in remote systems, +// no longer tracked by any OpenTofu state. +func (s *State) HasManagedResourceInstanceObjects() bool { + if s == nil { + return false + } + for _, ms := range s.Modules { + for _, rs := range ms.Resources { + if rs.Addr.Resource.Mode != addrs.ManagedResourceMode { + continue + } + for _, is := range rs.Instances { + if is.Current != nil || len(is.Deposed) != 0 { + return true + } + } + } + } + return false +} + +// Resource returns the state for the resource with the given address, or nil +// if no such resource is tracked in the state. +func (s *State) Resource(addr addrs.AbsResource) *Resource { + ms := s.Module(addr.Module) + if ms == nil { + return nil + } + return ms.Resource(addr.Resource) +} + +// Resources returns the set of resources that match the given configuration path. +func (s *State) Resources(addr addrs.ConfigResource) []*Resource { + var ret []*Resource + for _, m := range s.ModuleInstances(addr.Module) { + r := m.Resource(addr.Resource) + if r != nil { + ret = append(ret, r) + } + } + return ret +} + +// AllManagedResourceInstanceObjectAddrs returns a set of addresses for all of +// the leaf resource instance objects associated with managed resources that +// are tracked in this state. +// +// This result is the set of objects that would be effectively "forgotten" +// (like "tofu state rm") if this state were totally discarded, such as +// by deleting a workspace. This function is intended only for reporting +// context in error messages, such as when we reject deleting a "non-empty" +// workspace as detected by s.HasManagedResourceInstanceObjects. +// +// The ordering of the result is meaningless but consistent. DeposedKey will +// be NotDeposed (the zero value of DeposedKey) for any "current" objects. +// This method is guaranteed to return at least one item if +// s.HasManagedResourceInstanceObjects returns true for the same state, and +// to return a zero-length slice if it returns false. +func (s *State) AllResourceInstanceObjectAddrs() []struct { + Instance addrs.AbsResourceInstance + DeposedKey DeposedKey +} { + if s == nil { + return nil + } + + // We use an unnamed return type here just because we currently have no + // general need to return pairs of instance address and deposed key aside + // from this method, and this method itself is only of marginal value + // when producing some error messages. + // + // If that need ends up arising more in future then it might make sense to + // name this as addrs.AbsResourceInstanceObject, although that would require + // moving DeposedKey into the addrs package too. + type ResourceInstanceObject = struct { + Instance addrs.AbsResourceInstance + DeposedKey DeposedKey + } + var ret []ResourceInstanceObject + + for _, ms := range s.Modules { + for _, rs := range ms.Resources { + if rs.Addr.Resource.Mode != addrs.ManagedResourceMode { + continue + } + + for instKey, is := range rs.Instances { + instAddr := rs.Addr.Instance(instKey) + if is.Current != nil { + ret = append(ret, ResourceInstanceObject{instAddr, NotDeposed}) + } + for deposedKey := range is.Deposed { + ret = append(ret, ResourceInstanceObject{instAddr, deposedKey}) + } + } + } + } + + sort.SliceStable(ret, func(i, j int) bool { + objI, objJ := ret[i], ret[j] + switch { + case !objI.Instance.Equal(objJ.Instance): + return objI.Instance.Less(objJ.Instance) + default: + return objI.DeposedKey < objJ.DeposedKey + } + }) + + return ret +} + +// ResourceInstance returns the state for the resource instance with the given +// address, or nil if no such resource is tracked in the state. +func (s *State) ResourceInstance(addr addrs.AbsResourceInstance) *ResourceInstance { + if s == nil { + panic("State.ResourceInstance on nil *State") + } + ms := s.Module(addr.Module) + if ms == nil { + return nil + } + return ms.ResourceInstance(addr.Resource) +} + +// OutputValue returns the state for the output value with the given address, +// or nil if no such output value is tracked in the state. +func (s *State) OutputValue(addr addrs.AbsOutputValue) *OutputValue { + ms := s.Module(addr.Module) + if ms == nil { + return nil + } + return ms.OutputValues[addr.OutputValue.Name] +} + +// LocalValue returns the value of the named local value with the given address, +// or cty.NilVal if no such value is tracked in the state. +func (s *State) LocalValue(addr addrs.AbsLocalValue) cty.Value { + ms := s.Module(addr.Module) + if ms == nil { + return cty.NilVal + } + return ms.LocalValues[addr.LocalValue.Name] +} + +// ProviderAddrs returns a list of all of the provider configuration addresses +// referenced throughout the receiving state. +// +// The result is de-duplicated so that each distinct address appears only once. +func (s *State) ProviderAddrs() []addrs.AbsProviderConfig { + if s == nil { + return nil + } + + m := map[string]addrs.AbsProviderConfig{} + for _, ms := range s.Modules { + for _, rc := range ms.Resources { + m[rc.ProviderConfig.String()] = rc.ProviderConfig + } + } + if len(m) == 0 { + return nil + } + + // This is mainly just so we'll get stable results for testing purposes. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + + ret := make([]addrs.AbsProviderConfig, len(keys)) + for i, key := range keys { + ret[i] = m[key] + } + + return ret +} + +// ProviderRequirements returns a description of all of the providers that +// are required to work with the receiving state. +// +// Because the state does not track specific version information for providers, +// the requirements returned by this method will always be unconstrained. +// The result should usually be merged with a Requirements derived from the +// current configuration in order to apply some constraints. +func (s *State) ProviderRequirements() getproviders.Requirements { + configAddrs := s.ProviderAddrs() + ret := make(getproviders.Requirements, len(configAddrs)) + for _, configAddr := range configAddrs { + ret[configAddr.Provider] = nil // unconstrained dependency + } + return ret +} + +// PruneResourceHusks is a specialized method that will remove any Resource +// objects that do not contain any instances, even if they have an EachMode. +// +// This should generally be used only after a "tofu destroy" operation, +// to finalize the cleanup of the state. It is not correct to use this after +// other operations because if a resource has "count = 0" or "for_each" over +// an empty collection then we want to retain it in the state so that references +// to it, particularly in "strange" contexts like "tofu console", can be +// properly resolved. +// +// This method MUST NOT be called concurrently with other readers and writers +// of the receiving state. +func (s *State) PruneResourceHusks() { + for _, m := range s.Modules { + m.PruneResourceHusks() + if len(m.Resources) == 0 && !m.Addr.IsRoot() { + s.RemoveModule(m.Addr) + } + } +} + +// SyncWrapper returns a SyncState object wrapping the receiver. +func (s *State) SyncWrapper() *SyncState { + return &SyncState{ + state: s, + } +} + +// MoveAbsResource moves the given src AbsResource's current state to the new +// dst address. This will panic if the src AbsResource does not exist in state, +// or if there is already a resource at the dst address. It is the caller's +// responsibility to verify the validity of the move (for example, that the src +// and dst are compatible types). +func (s *State) MoveAbsResource(src, dst addrs.AbsResource) { + // verify that the src address exists and the dst address does not + rs := s.Resource(src) + if rs == nil { + panic(fmt.Sprintf("no state for src address %s", src.String())) + } + + ds := s.Resource(dst) + if ds != nil { + panic(fmt.Sprintf("dst resource %s already exists", dst.String())) + } + + ms := s.Module(src.Module) + ms.RemoveResource(src.Resource) + + // Remove the module if it is empty (and not root) after removing the + // resource. + if !ms.Addr.IsRoot() && ms.empty() { + s.RemoveModule(src.Module) + } + + // Update the address before adding it to the state + rs.Addr = dst + s.EnsureModule(dst.Module).Resources[dst.Resource.String()] = rs +} + +// MaybeMoveAbsResource moves the given src AbsResource's current state to the +// new dst address. This function will succeed if both the src address does not +// exist in state and the dst address does; the return value indicates whether +// or not the move occurred. This function will panic if either the src does not +// exist or the dst does exist (but not both). +func (s *State) MaybeMoveAbsResource(src, dst addrs.AbsResource) bool { + // Get the source and destinatation addresses from state. + rs := s.Resource(src) + ds := s.Resource(dst) + + // Normal case: the src exists in state, dst does not + if rs != nil && ds == nil { + s.MoveAbsResource(src, dst) + return true + } + + if rs == nil && ds != nil { + // The source is not in state, the destination is. This is not + // guaranteed to be idempotent since we aren't tracking exact moves, but + // it's useful information for the caller. + return false + } else { + panic("invalid move") + } +} + +// MoveAbsResourceInstance moves the given src AbsResourceInstance's current state to +// the new dst address. This will panic if the src AbsResourceInstance does not +// exist in state, or if there is already a resource at the dst address. It is +// the caller's responsibility to verify the validity of the move (for example, +// that the src and dst are compatible types). +func (s *State) MoveAbsResourceInstance(src, dst addrs.AbsResourceInstance) { + srcInstanceState := s.ResourceInstance(src) + if srcInstanceState == nil { + panic(fmt.Sprintf("no state for src address %s", src.String())) + } + + dstInstanceState := s.ResourceInstance(dst) + if dstInstanceState != nil { + panic(fmt.Sprintf("dst resource %s already exists", dst.String())) + } + + srcResourceState := s.Resource(src.ContainingResource()) + srcProviderAddr := srcResourceState.ProviderConfig + dstResourceAddr := dst.ContainingResource() + + // Remove the source resource instance from the module's state, and then the + // module if empty. + ms := s.Module(src.Module) + ms.ForgetResourceInstanceAll(src.Resource) + if !ms.Addr.IsRoot() && ms.empty() { + s.RemoveModule(src.Module) + } + + dstModule := s.EnsureModule(dst.Module) + + // See if there is already a resource we can add this instance to. + dstResourceState := s.Resource(dstResourceAddr) + if dstResourceState == nil { + // If we're moving to an address without an index then that + // suggests the user's intent is to establish both the + // resource and the instance at the same time (since the + // address covers both). If there's an index in the + // target then allow creating the new instance here. + dstModule.SetResourceProvider( + dstResourceAddr.Resource, + srcProviderAddr, // in this case, we bring the provider along as if we were moving the whole resource + ) + dstResourceState = dstModule.Resource(dstResourceAddr.Resource) + } + + dstResourceState.Instances[dst.Resource.Key] = srcInstanceState +} + +// MaybeMoveAbsResourceInstance moves the given src AbsResourceInstance's +// current state to the new dst address. This function will succeed if both the +// src address does not exist in state and the dst address does; the return +// value indicates whether or not the move occured. This function will panic if +// either the src does not exist or the dst does exist (but not both). +func (s *State) MaybeMoveAbsResourceInstance(src, dst addrs.AbsResourceInstance) bool { + // get the src and dst resource instances from state + rs := s.ResourceInstance(src) + ds := s.ResourceInstance(dst) + + // Normal case: the src exists in state, dst does not + if rs != nil && ds == nil { + s.MoveAbsResourceInstance(src, dst) + return true + } + + if rs == nil && ds != nil { + // The source is not in state, the destination is. This is not + // guaranteed to be idempotent since we aren't tracking exact moves, but + // it's useful information. + return false + } else { + panic("invalid move") + } +} + +// MoveModuleInstance moves the given src ModuleInstance's current state to the +// new dst address. This will panic if the src ModuleInstance does not +// exist in state, or if there is already a resource at the dst address. It is +// the caller's responsibility to verify the validity of the move. +func (s *State) MoveModuleInstance(src, dst addrs.ModuleInstance) { + if src.IsRoot() || dst.IsRoot() { + panic("cannot move to or from root module") + } + + srcMod := s.Module(src) + if srcMod == nil { + panic(fmt.Sprintf("no state for src module %s", src.String())) + } + + dstMod := s.Module(dst) + if dstMod != nil { + panic(fmt.Sprintf("dst module %s already exists in state", dst.String())) + } + + s.RemoveModule(src) + + srcMod.Addr = dst + s.EnsureModule(dst) + s.Modules[dst.String()] = srcMod + + // Update any Resource's addresses. + if srcMod.Resources != nil { + for _, r := range srcMod.Resources { + r.Addr.Module = dst + } + } + + // Update any OutputValues's addresses. + if srcMod.OutputValues != nil { + for _, ov := range srcMod.OutputValues { + ov.Addr.Module = dst + } + } +} + +// MaybeMoveModuleInstance moves the given src ModuleInstance's current state to +// the new dst address. This function will succeed if both the src address does +// not exist in state and the dst address does; the return value indicates +// whether or not the move occured. This function will panic if either the src +// does not exist or the dst does exist (but not both). +func (s *State) MaybeMoveModuleInstance(src, dst addrs.ModuleInstance) bool { + if src.IsRoot() || dst.IsRoot() { + panic("cannot move to or from root module") + } + + srcMod := s.Module(src) + dstMod := s.Module(dst) + + // Normal case: the src exists in state, dst does not + if srcMod != nil && dstMod == nil { + s.MoveModuleInstance(src, dst) + return true + } + + if srcMod == nil || src.IsRoot() && dstMod != nil { + // The source is not in state, the destination is. This is not + // guaranteed to be idempotent since we aren't tracking exact moves, but + // it's useful information. + return false + } else { + panic("invalid move") + } +} + +// MoveModule takes a source and destination addrs.Module address, and moves all +// state Modules which are contained by the src address to the new address. +func (s *State) MoveModule(src, dst addrs.AbsModuleCall) { + if src.Module.IsRoot() || dst.Module.IsRoot() { + panic("cannot move to or from root module") + } + + // Modules only exist as ModuleInstances in state, so we need to check each + // state Module and see if it is contained by the src address to get a full + // list of modules to move. + var srcMIs []*Module + for _, module := range s.Modules { + if !module.Addr.IsRoot() { + if src.Module.TargetContains(module.Addr) { + srcMIs = append(srcMIs, module) + } + } + } + + if len(srcMIs) == 0 { + panic(fmt.Sprintf("no matching module instances found for src module %s", src.String())) + } + + for _, ms := range srcMIs { + newInst := make(addrs.ModuleInstance, len(ms.Addr)) + copy(newInst, ms.Addr) + if ms.Addr.IsDeclaredByCall(src) { + // Easy case: we just need to update the last step with the new name + newInst[len(newInst)-1].Name = dst.Call.Name + } else { + // Trickier: this Module is a submodule. we need to find and update + // only that appropriate step + for s := range newInst { + if newInst[s].Name == src.Call.Name { + newInst[s].Name = dst.Call.Name + } + } + } + s.MoveModuleInstance(ms.Addr, newInst) + } +} diff --git a/pkg/states/state_deepcopy.go b/pkg/states/state_deepcopy.go new file mode 100644 index 00000000000..8682ab8177d --- /dev/null +++ b/pkg/states/state_deepcopy.go @@ -0,0 +1,239 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package states + +import ( + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/zclconf/go-cty/cty" +) + +// Taking deep copies of states is an important operation because state is +// otherwise a mutable data structure that is challenging to share across +// many separate callers. It is important that the DeepCopy implementations +// in this file comprehensively copy all parts of the state data structure +// that could be mutated via pointers. + +// DeepCopy returns a new state that contains equivalent data to the reciever +// but shares no backing memory in common. +// +// As with all methods on State, this method is not safe to use concurrently +// with writing to any portion of the recieving data structure. It is the +// caller's responsibility to ensure mutual exclusion for the duration of the +// operation, but may then freely modify the receiver and the returned copy +// independently once this method returns. +func (s *State) DeepCopy() *State { + if s == nil { + return nil + } + + modules := make(map[string]*Module, len(s.Modules)) + for k, m := range s.Modules { + modules[k] = m.DeepCopy() + } + return &State{ + Modules: modules, + CheckResults: s.CheckResults.DeepCopy(), + } +} + +// DeepCopy returns a new module state that contains equivalent data to the +// receiver but shares no backing memory in common. +// +// As with all methods on Module, this method is not safe to use concurrently +// with writing to any portion of the recieving data structure. It is the +// caller's responsibility to ensure mutual exclusion for the duration of the +// operation, but may then freely modify the receiver and the returned copy +// independently once this method returns. +func (ms *Module) DeepCopy() *Module { + if ms == nil { + return nil + } + + resources := make(map[string]*Resource, len(ms.Resources)) + for k, r := range ms.Resources { + resources[k] = r.DeepCopy() + } + outputValues := make(map[string]*OutputValue, len(ms.OutputValues)) + for k, v := range ms.OutputValues { + outputValues[k] = v.DeepCopy() + } + localValues := make(map[string]cty.Value, len(ms.LocalValues)) + for k, v := range ms.LocalValues { + // cty.Value is immutable, so we don't need to copy these. + localValues[k] = v + } + + return &Module{ + Addr: ms.Addr, // technically mutable, but immutable by convention + Resources: resources, + OutputValues: outputValues, + LocalValues: localValues, + } +} + +// DeepCopy returns a new resource state that contains equivalent data to the +// receiver but shares no backing memory in common. +// +// As with all methods on Resource, this method is not safe to use concurrently +// with writing to any portion of the recieving data structure. It is the +// caller's responsibility to ensure mutual exclusion for the duration of the +// operation, but may then freely modify the receiver and the returned copy +// independently once this method returns. +func (rs *Resource) DeepCopy() *Resource { + if rs == nil { + return nil + } + + instances := make(map[addrs.InstanceKey]*ResourceInstance, len(rs.Instances)) + for k, i := range rs.Instances { + instances[k] = i.DeepCopy() + } + + return &Resource{ + Addr: rs.Addr, + Instances: instances, + ProviderConfig: rs.ProviderConfig, // technically mutable, but immutable by convention + } +} + +// DeepCopy returns a new resource instance state that contains equivalent data +// to the receiver but shares no backing memory in common. +// +// As with all methods on ResourceInstance, this method is not safe to use +// concurrently with writing to any portion of the recieving data structure. It +// is the caller's responsibility to ensure mutual exclusion for the duration +// of the operation, but may then freely modify the receiver and the returned +// copy independently once this method returns. +func (i *ResourceInstance) DeepCopy() *ResourceInstance { + if i == nil { + return nil + } + + deposed := make(map[DeposedKey]*ResourceInstanceObjectSrc, len(i.Deposed)) + for k, obj := range i.Deposed { + deposed[k] = obj.DeepCopy() + } + + return &ResourceInstance{ + Current: i.Current.DeepCopy(), + Deposed: deposed, + } +} + +// DeepCopy returns a new resource instance object that contains equivalent data +// to the receiver but shares no backing memory in common. +// +// As with all methods on ResourceInstanceObjectSrc, this method is not safe to +// use concurrently with writing to any portion of the recieving data structure. +// It is the caller's responsibility to ensure mutual exclusion for the duration +// of the operation, but may then freely modify the receiver and the returned +// copy independently once this method returns. +func (os *ResourceInstanceObjectSrc) DeepCopy() *ResourceInstanceObjectSrc { + if os == nil { + return nil + } + + var attrsFlat map[string]string + if os.AttrsFlat != nil { + attrsFlat = make(map[string]string, len(os.AttrsFlat)) + for k, v := range os.AttrsFlat { + attrsFlat[k] = v + } + } + + var attrsJSON []byte + if os.AttrsJSON != nil { + attrsJSON = make([]byte, len(os.AttrsJSON)) + copy(attrsJSON, os.AttrsJSON) + } + + var attrPaths []cty.PathValueMarks + if os.AttrSensitivePaths != nil { + attrPaths = make([]cty.PathValueMarks, len(os.AttrSensitivePaths)) + copy(attrPaths, os.AttrSensitivePaths) + } + + var private []byte + if os.Private != nil { + private = make([]byte, len(os.Private)) + copy(private, os.Private) + } + + // Some addrs.Referencable implementations are technically mutable, but + // we treat them as immutable by convention and so we don't deep-copy here. + var dependencies []addrs.ConfigResource + if os.Dependencies != nil { + dependencies = make([]addrs.ConfigResource, len(os.Dependencies)) + copy(dependencies, os.Dependencies) + } + + return &ResourceInstanceObjectSrc{ + Status: os.Status, + SchemaVersion: os.SchemaVersion, + Private: private, + AttrsFlat: attrsFlat, + AttrsJSON: attrsJSON, + AttrSensitivePaths: attrPaths, + Dependencies: dependencies, + CreateBeforeDestroy: os.CreateBeforeDestroy, + } +} + +// DeepCopy returns a new resource instance object that contains equivalent data +// to the receiver but shares no backing memory in common. +// +// As with all methods on ResourceInstanceObject, this method is not safe to use +// concurrently with writing to any portion of the recieving data structure. It +// is the caller's responsibility to ensure mutual exclusion for the duration +// of the operation, but may then freely modify the receiver and the returned +// copy independently once this method returns. +func (o *ResourceInstanceObject) DeepCopy() *ResourceInstanceObject { + if o == nil { + return nil + } + + var private []byte + if o.Private != nil { + private = make([]byte, len(o.Private)) + copy(private, o.Private) + } + + // Some addrs.Referenceable implementations are technically mutable, but + // we treat them as immutable by convention and so we don't deep-copy here. + var dependencies []addrs.ConfigResource + if o.Dependencies != nil { + dependencies = make([]addrs.ConfigResource, len(o.Dependencies)) + copy(dependencies, o.Dependencies) + } + + return &ResourceInstanceObject{ + Value: o.Value, + Status: o.Status, + Private: private, + Dependencies: dependencies, + CreateBeforeDestroy: o.CreateBeforeDestroy, + } +} + +// DeepCopy returns a new output value state that contains equivalent data +// to the receiver but shares no backing memory in common. +// +// As with all methods on OutputValue, this method is not safe to use +// concurrently with writing to any portion of the recieving data structure. It +// is the caller's responsibility to ensure mutual exclusion for the duration +// of the operation, but may then freely modify the receiver and the returned +// copy independently once this method returns. +func (os *OutputValue) DeepCopy() *OutputValue { + if os == nil { + return nil + } + + return &OutputValue{ + Addr: os.Addr, + Value: os.Value, + Sensitive: os.Sensitive, + } +} diff --git a/pkg/states/state_equal.go b/pkg/states/state_equal.go new file mode 100644 index 00000000000..b5c9b85b67e --- /dev/null +++ b/pkg/states/state_equal.go @@ -0,0 +1,73 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package states + +import ( + "reflect" + + "github.com/kubegems/opentofu/pkg/addrs" +) + +// Equal returns true if the receiver is functionally equivalent to other, +// including any ephemeral portions of the state that would not be included +// if the state were saved to files. +// +// To test only the persistent portions of two states for equality, instead +// use statefile.StatesMarshalEqual. +func (s *State) Equal(other *State) bool { + // For the moment this is sufficient, but we may need to do something + // more elaborate in future if we have any portions of state that require + // more sophisticated comparisons. + return reflect.DeepEqual(s, other) +} + +// ManagedResourcesEqual returns true if all of the managed resources tracked +// in the reciever are functionally equivalent to the same tracked in the +// other given state. +// +// This is a more constrained version of Equal that disregards other +// differences, including but not limited to changes to data resources and +// changes to output values. +func (s *State) ManagedResourcesEqual(other *State) bool { + // First, some accommodations for situations where one of the objects is + // nil, for robustness since we sometimes use a nil state to represent + // a prior state being entirely absent. + if s == nil && other == nil { + return true + } + if s == nil { + return !other.HasManagedResourceInstanceObjects() + } + if other == nil { + return !s.HasManagedResourceInstanceObjects() + } + + // If we get here then both states are non-nil. + + // sameManagedResources tests that its second argument has all the + // resources that the first one does, so we'll call it twice with the + // arguments inverted to ensure that we'll also catch situations where + // the second has resources that the first does not. + return sameManagedResources(s, other) && sameManagedResources(other, s) +} + +func sameManagedResources(s1, s2 *State) bool { + for _, ms := range s1.Modules { + for _, rs := range ms.Resources { + addr := rs.Addr + if addr.Resource.Mode != addrs.ManagedResourceMode { + continue + } + otherRS := s2.Resource(addr) + if !reflect.DeepEqual(rs, otherRS) { + return false + } + } + } + + return true + +} diff --git a/pkg/states/state_string.go b/pkg/states/state_string.go new file mode 100644 index 00000000000..fb0121ad4b6 --- /dev/null +++ b/pkg/states/state_string.go @@ -0,0 +1,277 @@ +package states + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "sort" + "strings" + + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs/hcl2shim" +) + +// String returns a rather-odd string representation of the entire state. +// +// This is intended to match the behavior of the older tofu.State.String +// method that is used in lots of existing tests. It should not be used in +// new tests: instead, use "cmp" to directly compare the state data structures +// and print out a diff if they do not match. +// +// This method should never be used in non-test code, whether directly by call +// or indirectly via a %s or %q verb in package fmt. +func (s *State) String() string { + if s == nil { + return "" + } + + // sort the modules by name for consistent output + modules := make([]string, 0, len(s.Modules)) + for m := range s.Modules { + modules = append(modules, m) + } + sort.Strings(modules) + + var buf bytes.Buffer + for _, name := range modules { + m := s.Modules[name] + mStr := m.testString() + + // If we're the root module, we just write the output directly. + if m.Addr.IsRoot() { + buf.WriteString(mStr + "\n") + continue + } + + // We need to build out a string that resembles the not-quite-standard + // format that tofu.State.String used to use, where there's a + // "module." prefix but then just a chain of all of the module names + // without any further "module." portions. + buf.WriteString("module") + for _, step := range m.Addr { + buf.WriteByte('.') + buf.WriteString(step.Name) + if step.InstanceKey != addrs.NoKey { + buf.WriteString(step.InstanceKey.String()) + } + } + buf.WriteString(":\n") + + s := bufio.NewScanner(strings.NewReader(mStr)) + for s.Scan() { + text := s.Text() + if text != "" { + text = " " + text + } + + buf.WriteString(fmt.Sprintf("%s\n", text)) + } + } + + return strings.TrimSpace(buf.String()) +} + +// testString is used to produce part of the output of State.String. It should +// never be used directly. +func (ms *Module) testString() string { + var buf bytes.Buffer + + if len(ms.Resources) == 0 { + buf.WriteString("") + } + + // We use AbsResourceInstance here, even though everything belongs to + // the same module, just because we have a sorting behavior defined + // for those but not for just ResourceInstance. + addrsOrder := make([]addrs.AbsResourceInstance, 0, len(ms.Resources)) + for _, rs := range ms.Resources { + for ik := range rs.Instances { + addrsOrder = append(addrsOrder, rs.Addr.Instance(ik)) + } + } + + sort.Slice(addrsOrder, func(i, j int) bool { + return addrsOrder[i].Less(addrsOrder[j]) + }) + + for _, fakeAbsAddr := range addrsOrder { + addr := fakeAbsAddr.Resource + rs := ms.Resource(addr.ContainingResource()) + is := ms.ResourceInstance(addr) + + // Here we need to fake up a legacy-style address as the old state + // types would've used, since that's what our tests against those + // old types expect. The significant difference is that instancekey + // is dot-separated rather than using index brackets. + k := addr.ContainingResource().String() + if addr.Key != addrs.NoKey { + switch tk := addr.Key.(type) { + case addrs.IntKey: + k = fmt.Sprintf("%s.%d", k, tk) + default: + // No other key types existed for the legacy types, so we + // can do whatever we want here. We'll just use our standard + // syntax for these. + k = k + tk.String() + } + } + + id := LegacyInstanceObjectID(is.Current) + + taintStr := "" + if is.Current != nil && is.Current.Status == ObjectTainted { + taintStr = " (tainted)" + } + + deposedStr := "" + if len(is.Deposed) > 0 { + deposedStr = fmt.Sprintf(" (%d deposed)", len(is.Deposed)) + } + + buf.WriteString(fmt.Sprintf("%s:%s%s\n", k, taintStr, deposedStr)) + buf.WriteString(fmt.Sprintf(" ID = %s\n", id)) + buf.WriteString(fmt.Sprintf(" provider = %s\n", rs.ProviderConfig.String())) + + // Attributes were a flatmap before, but are not anymore. To preserve + // our old output as closely as possible we need to do a conversion + // to flatmap. Normally we'd want to do this with schema for + // accuracy, but for our purposes here it only needs to be approximate. + // This should produce an identical result for most cases, though + // in particular will differ in a few cases: + // - The keys used for elements in a set will be different + // - Values for attributes of type cty.DynamicPseudoType will be + // misinterpreted (but these weren't possible in old world anyway) + var attributes map[string]string + if obj := is.Current; obj != nil { + switch { + case obj.AttrsFlat != nil: + // Easy (but increasingly unlikely) case: the state hasn't + // actually been upgraded to the new form yet. + attributes = obj.AttrsFlat + case obj.AttrsJSON != nil: + ty, err := ctyjson.ImpliedType(obj.AttrsJSON) + if err == nil { + val, err := ctyjson.Unmarshal(obj.AttrsJSON, ty) + if err == nil { + attributes = hcl2shim.FlatmapValueFromHCL2(val) + } + } + } + } + attrKeys := make([]string, 0, len(attributes)) + for ak, val := range attributes { + if ak == "id" { + continue + } + + // don't show empty containers in the output + if val == "0" && (strings.HasSuffix(ak, ".#") || strings.HasSuffix(ak, ".%")) { + continue + } + + attrKeys = append(attrKeys, ak) + } + + sort.Strings(attrKeys) + + for _, ak := range attrKeys { + av := attributes[ak] + buf.WriteString(fmt.Sprintf(" %s = %s\n", ak, av)) + } + + // CAUTION: Since deposed keys are now random strings instead of + // incrementing integers, this result will not be deterministic + // if there is more than one deposed object. + i := 1 + for _, t := range is.Deposed { + id := LegacyInstanceObjectID(t) + taintStr := "" + if t.Status == ObjectTainted { + taintStr = " (tainted)" + } + buf.WriteString(fmt.Sprintf(" Deposed ID %d = %s%s\n", i, id, taintStr)) + i++ + } + + if obj := is.Current; obj != nil && len(obj.Dependencies) > 0 { + buf.WriteString("\n Dependencies:\n") + for _, dep := range obj.Dependencies { + buf.WriteString(fmt.Sprintf(" %s\n", dep.String())) + } + } + } + + if len(ms.OutputValues) > 0 { + buf.WriteString("\nOutputs:\n\n") + + ks := make([]string, 0, len(ms.OutputValues)) + for k := range ms.OutputValues { + ks = append(ks, k) + } + sort.Strings(ks) + + for _, k := range ks { + v := ms.OutputValues[k] + lv := hcl2shim.ConfigValueFromHCL2(v.Value) + switch vTyped := lv.(type) { + case string: + buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) + case []interface{}: + buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) + case map[string]interface{}: + var mapKeys []string + for key := range vTyped { + mapKeys = append(mapKeys, key) + } + sort.Strings(mapKeys) + + var mapBuf bytes.Buffer + mapBuf.WriteString("{") + for _, key := range mapKeys { + mapBuf.WriteString(fmt.Sprintf("%s:%s ", key, vTyped[key])) + } + mapBuf.WriteString("}") + + buf.WriteString(fmt.Sprintf("%s = %s\n", k, mapBuf.String())) + default: + buf.WriteString(fmt.Sprintf("%s = %#v\n", k, lv)) + } + } + } + + return buf.String() +} + +// LegacyInstanceObjectID is a helper for extracting an object id value from +// an instance object in a way that approximates how we used to do this +// for the old state types. ID is no longer first-class, so this is preserved +// only for compatibility with old tests that include the id as part of their +// expected value. +func LegacyInstanceObjectID(obj *ResourceInstanceObjectSrc) string { + if obj == nil { + return "" + } + + if obj.AttrsJSON != nil { + type WithID struct { + ID string `json:"id"` + } + var withID WithID + err := json.Unmarshal(obj.AttrsJSON, &withID) + if err == nil { + return withID.ID + } + } else if obj.AttrsFlat != nil { + if flatID, exists := obj.AttrsFlat["id"]; exists { + return flatID + } + } + + // For resource types created after we removed id as special there may + // not actually be one at all. This is okay because older tests won't + // encounter this, and new tests shouldn't be using ids. + return "" +} diff --git a/pkg/states/state_test.go b/pkg/states/state_test.go new file mode 100644 index 00000000000..d4ec353751e --- /dev/null +++ b/pkg/states/state_test.go @@ -0,0 +1,1013 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package states + +import ( + "fmt" + "reflect" + "testing" + + "github.com/go-test/deep" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/lang/marks" +) + +func TestState(t *testing.T) { + // This basic tests exercises the main mutation methods to construct + // a state. It is not fully comprehensive, so other tests should visit + // more esoteric codepaths. + + state := NewState() + + rootModule := state.RootModule() + if rootModule == nil { + t.Errorf("root module is nil; want valid object") + } + + rootModule.SetLocalValue("foo", cty.StringVal("foo value")) + rootModule.SetOutputValue("bar", cty.StringVal("bar value"), false) + rootModule.SetOutputValue("secret", cty.StringVal("secret value"), true) + rootModule.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "baz", + }.Instance(addrs.IntKey(0)), + &ResourceInstanceObjectSrc{ + Status: ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + + childModule := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) + childModule.SetOutputValue("pizza", cty.StringVal("hawaiian"), false) + multiModA := state.EnsureModule(addrs.RootModuleInstance.Child("multi", addrs.StringKey("a"))) + multiModA.SetOutputValue("pizza", cty.StringVal("cheese"), false) + multiModB := state.EnsureModule(addrs.RootModuleInstance.Child("multi", addrs.StringKey("b"))) + multiModB.SetOutputValue("pizza", cty.StringVal("sausage"), false) + + want := &State{ + Modules: map[string]*Module{ + "": { + Addr: addrs.RootModuleInstance, + LocalValues: map[string]cty.Value{ + "foo": cty.StringVal("foo value"), + }, + OutputValues: map[string]*OutputValue{ + "bar": { + Addr: addrs.AbsOutputValue{ + OutputValue: addrs.OutputValue{ + Name: "bar", + }, + }, + Value: cty.StringVal("bar value"), + Sensitive: false, + }, + "secret": { + Addr: addrs.AbsOutputValue{ + OutputValue: addrs.OutputValue{ + Name: "secret", + }, + }, + Value: cty.StringVal("secret value"), + Sensitive: true, + }, + }, + Resources: map[string]*Resource{ + "test_thing.baz": { + Addr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "baz", + }.Absolute(addrs.RootModuleInstance), + + Instances: map[addrs.InstanceKey]*ResourceInstance{ + addrs.IntKey(0): { + Current: &ResourceInstanceObjectSrc{ + SchemaVersion: 1, + Status: ObjectReady, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + Deposed: map[DeposedKey]*ResourceInstanceObjectSrc{}, + }, + }, + ProviderConfig: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + }, + }, + }, + "module.child": { + Addr: addrs.RootModuleInstance.Child("child", addrs.NoKey), + LocalValues: map[string]cty.Value{}, + OutputValues: map[string]*OutputValue{ + "pizza": { + Addr: addrs.AbsOutputValue{ + Module: addrs.RootModuleInstance.Child("child", addrs.NoKey), + OutputValue: addrs.OutputValue{ + Name: "pizza", + }, + }, + Value: cty.StringVal("hawaiian"), + Sensitive: false, + }, + }, + Resources: map[string]*Resource{}, + }, + `module.multi["a"]`: { + Addr: addrs.RootModuleInstance.Child("multi", addrs.StringKey("a")), + LocalValues: map[string]cty.Value{}, + OutputValues: map[string]*OutputValue{ + "pizza": { + Addr: addrs.AbsOutputValue{ + Module: addrs.RootModuleInstance.Child("multi", addrs.StringKey("a")), + OutputValue: addrs.OutputValue{ + Name: "pizza", + }, + }, + Value: cty.StringVal("cheese"), + Sensitive: false, + }, + }, + Resources: map[string]*Resource{}, + }, + `module.multi["b"]`: { + Addr: addrs.RootModuleInstance.Child("multi", addrs.StringKey("b")), + LocalValues: map[string]cty.Value{}, + OutputValues: map[string]*OutputValue{ + "pizza": { + Addr: addrs.AbsOutputValue{ + Module: addrs.RootModuleInstance.Child("multi", addrs.StringKey("b")), + OutputValue: addrs.OutputValue{ + Name: "pizza", + }, + }, + Value: cty.StringVal("sausage"), + Sensitive: false, + }, + }, + Resources: map[string]*Resource{}, + }, + }, + } + + { + // Our structure goes deep, so we need to temporarily override the + // deep package settings to ensure that we visit the full structure. + oldDeepDepth := deep.MaxDepth + oldDeepCompareUnexp := deep.CompareUnexportedFields + deep.MaxDepth = 50 + deep.CompareUnexportedFields = true + defer func() { + deep.MaxDepth = oldDeepDepth + deep.CompareUnexportedFields = oldDeepCompareUnexp + }() + } + + for _, problem := range deep.Equal(state, want) { + t.Error(problem) + } + + expectedOutputs := map[string]string{ + `module.multi["a"].output.pizza`: "cheese", + `module.multi["b"].output.pizza`: "sausage", + } + + for _, o := range state.ModuleOutputs(addrs.RootModuleInstance, addrs.ModuleCall{Name: "multi"}) { + addr := o.Addr.String() + expected := expectedOutputs[addr] + delete(expectedOutputs, addr) + + if expected != o.Value.AsString() { + t.Fatalf("expected %q:%q, got %q", addr, expected, o.Value.AsString()) + } + } + + for addr, o := range expectedOutputs { + t.Fatalf("missing output %q:%q", addr, o) + } +} + +func TestStateDeepCopyObject(t *testing.T) { + obj := &ResourceInstanceObject{ + Value: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("id"), + }), + Private: []byte("private"), + Status: ObjectReady, + Dependencies: []addrs.ConfigResource{ + { + Module: addrs.RootModule, + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "bar", + }, + }, + }, + CreateBeforeDestroy: true, + } + + objCopy := obj.DeepCopy() + if !reflect.DeepEqual(obj, objCopy) { + t.Fatalf("not equal\n%#v\n%#v", obj, objCopy) + } +} + +func TestStateDeepCopy(t *testing.T) { + state := NewState() + + rootModule := state.RootModule() + if rootModule == nil { + t.Errorf("root module is nil; want valid object") + } + + rootModule.SetLocalValue("foo", cty.StringVal("foo value")) + rootModule.SetOutputValue("bar", cty.StringVal("bar value"), false) + rootModule.SetOutputValue("secret", cty.StringVal("secret value"), true) + rootModule.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "baz", + }.Instance(addrs.IntKey(0)), + &ResourceInstanceObjectSrc{ + Status: ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + Private: []byte("private data"), + Dependencies: []addrs.ConfigResource{}, + CreateBeforeDestroy: true, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + rootModule.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "bar", + }.Instance(addrs.IntKey(0)), + &ResourceInstanceObjectSrc{ + Status: ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + // Sensitive path at "woozles" + AttrSensitivePaths: []cty.PathValueMarks{ + { + Path: cty.Path{cty.GetAttrStep{Name: "woozles"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + Private: []byte("private data"), + Dependencies: []addrs.ConfigResource{ + { + Module: addrs.RootModule, + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "baz", + }, + }, + }, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + + childModule := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) + childModule.SetOutputValue("pizza", cty.StringVal("hawaiian"), false) + + stateCopy := state.DeepCopy() + if !state.Equal(stateCopy) { + t.Fatalf("\nexpected:\n%q\ngot:\n%q\n", state, stateCopy) + } +} + +func TestStateHasResourceInstanceObjects(t *testing.T) { + providerConfig := addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.MustParseProviderSourceString("test/test"), + } + childModuleProviderConfig := addrs.AbsProviderConfig{ + Module: addrs.RootModule.Child("child"), + Provider: addrs.MustParseProviderSourceString("test/test"), + } + + tests := map[string]struct { + Setup func(ss *SyncState) + Want bool + }{ + "empty": { + func(ss *SyncState) {}, + false, + }, + "one current, ready object in root module": { + func(ss *SyncState) { + ss.SetResourceInstanceCurrent( + mustAbsResourceAddr("test.foo").Instance(addrs.NoKey), + &ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: ObjectReady, + }, + providerConfig, + ) + }, + true, + }, + "one current, ready object in child module": { + func(ss *SyncState) { + ss.SetResourceInstanceCurrent( + mustAbsResourceAddr("module.child.test.foo").Instance(addrs.NoKey), + &ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: ObjectReady, + }, + childModuleProviderConfig, + ) + }, + true, + }, + "one current, tainted object in root module": { + func(ss *SyncState) { + ss.SetResourceInstanceCurrent( + mustAbsResourceAddr("test.foo").Instance(addrs.NoKey), + &ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: ObjectTainted, + }, + providerConfig, + ) + }, + true, + }, + "one deposed, ready object in root module": { + func(ss *SyncState) { + ss.SetResourceInstanceDeposed( + mustAbsResourceAddr("test.foo").Instance(addrs.NoKey), + DeposedKey("uhoh"), + &ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: ObjectTainted, + }, + providerConfig, + ) + }, + true, + }, + "one empty resource husk in root module": { + func(ss *SyncState) { + // Current OpenTofu doesn't actually create resource husks + // as part of its everyday work, so this is a "should never + // happen" case but we'll test to make sure we're robust to + // it anyway, because this was a historical bug blocking + // "tofu workspace delete" and similar. + ss.SetResourceInstanceCurrent( + mustAbsResourceAddr("test.foo").Instance(addrs.NoKey), + &ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: ObjectTainted, + }, + providerConfig, + ) + s := ss.Lock() + delete(s.Modules[""].Resources["test.foo"].Instances, addrs.NoKey) + ss.Unlock() + }, + false, + }, + "one current data resource object in root module": { + func(ss *SyncState) { + ss.SetResourceInstanceCurrent( + mustAbsResourceAddr("data.test.foo").Instance(addrs.NoKey), + &ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: ObjectReady, + }, + providerConfig, + ) + }, + false, // data resources aren't managed resources, so they don't count + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + state := BuildState(test.Setup) + got := state.HasManagedResourceInstanceObjects() + if got != test.Want { + t.Errorf("wrong result\nstate content: (using legacy state string format; might not be comprehensive)\n%s\n\ngot: %t\nwant: %t", state, got, test.Want) + } + }) + } + +} + +func TestState_MoveAbsResource(t *testing.T) { + // Set up a starter state for the embedded tests, which should start from a copy of this state. + state := NewState() + rootModule := state.RootModule() + rootModule.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "foo", + }.Instance(addrs.IntKey(0)), + &ResourceInstanceObjectSrc{ + Status: ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + src := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "foo"}.Absolute(addrs.RootModuleInstance) + + t.Run("basic move", func(t *testing.T) { + s := state.DeepCopy() + dst := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "bar"}.Absolute(addrs.RootModuleInstance) + + s.MoveAbsResource(src, dst) + + if s.Empty() { + t.Fatal("unexpected empty state") + } + + if len(s.RootModule().Resources) != 1 { + t.Fatalf("wrong number of resources in state; expected 1, found %d", len(state.RootModule().Resources)) + } + + got := s.Resource(dst) + if got.Addr.Resource != dst.Resource { + t.Fatalf("dst resource not in state") + } + }) + + t.Run("move to new module", func(t *testing.T) { + s := state.DeepCopy() + dstModule := addrs.RootModuleInstance.Child("kinder", addrs.StringKey("one")) + dst := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "bar"}.Absolute(dstModule) + + s.MoveAbsResource(src, dst) + + if s.Empty() { + t.Fatal("unexpected empty state") + } + + if s.Module(dstModule) == nil { + t.Fatalf("child module %s not in state", dstModule.String()) + } + + if len(s.Module(dstModule).Resources) != 1 { + t.Fatalf("wrong number of resources in state; expected 1, found %d", len(s.Module(dstModule).Resources)) + } + + got := s.Resource(dst) + if got.Addr.Resource != dst.Resource { + t.Fatalf("dst resource not in state") + } + }) + + t.Run("from a child module to root", func(t *testing.T) { + s := state.DeepCopy() + srcModule := addrs.RootModuleInstance.Child("kinder", addrs.NoKey) + cm := s.EnsureModule(srcModule) + cm.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "child", + }.Instance(addrs.IntKey(0)), // Moving the AbsResouce moves all instances + &ResourceInstanceObjectSrc{ + Status: ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + cm.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "child", + }.Instance(addrs.IntKey(1)), // Moving the AbsResouce moves all instances + &ResourceInstanceObjectSrc{ + Status: ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + + src := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "child"}.Absolute(srcModule) + dst := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "child"}.Absolute(addrs.RootModuleInstance) + s.MoveAbsResource(src, dst) + + if s.Empty() { + t.Fatal("unexpected empty state") + } + + // The child module should have been removed after removing its only resource + if s.Module(srcModule) != nil { + t.Fatalf("child module %s was not removed from state after mv", srcModule.String()) + } + + if len(s.RootModule().Resources) != 2 { + t.Fatalf("wrong number of resources in state; expected 2, found %d", len(s.RootModule().Resources)) + } + + if len(s.Resource(dst).Instances) != 2 { + t.Fatalf("wrong number of resource instances for dst, got %d expected 2", len(s.Resource(dst).Instances)) + } + + got := s.Resource(dst) + if got.Addr.Resource != dst.Resource { + t.Fatalf("dst resource not in state") + } + }) + + t.Run("module to new module", func(t *testing.T) { + s := NewState() + srcModule := addrs.RootModuleInstance.Child("kinder", addrs.StringKey("exists")) + dstModule := addrs.RootModuleInstance.Child("kinder", addrs.StringKey("new")) + cm := s.EnsureModule(srcModule) + cm.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "child", + }.Instance(addrs.NoKey), + &ResourceInstanceObjectSrc{ + Status: ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + + src := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "child"}.Absolute(srcModule) + dst := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "child"}.Absolute(dstModule) + s.MoveAbsResource(src, dst) + + if s.Empty() { + t.Fatal("unexpected empty state") + } + + // The child module should have been removed after removing its only resource + if s.Module(srcModule) != nil { + t.Fatalf("child module %s was not removed from state after mv", srcModule.String()) + } + + gotMod := s.Module(dstModule) + if len(gotMod.Resources) != 1 { + t.Fatalf("wrong number of resources in state; expected 1, found %d", len(gotMod.Resources)) + } + + got := s.Resource(dst) + if got.Addr.Resource != dst.Resource { + t.Fatalf("dst resource not in state") + } + }) + + t.Run("module to new module", func(t *testing.T) { + s := NewState() + srcModule := addrs.RootModuleInstance.Child("kinder", addrs.StringKey("exists")) + dstModule := addrs.RootModuleInstance.Child("kinder", addrs.StringKey("new")) + cm := s.EnsureModule(srcModule) + cm.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "child", + }.Instance(addrs.NoKey), + &ResourceInstanceObjectSrc{ + Status: ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + + src := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "child"}.Absolute(srcModule) + dst := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "child"}.Absolute(dstModule) + s.MoveAbsResource(src, dst) + + if s.Empty() { + t.Fatal("unexpected empty state") + } + + // The child module should have been removed after removing its only resource + if s.Module(srcModule) != nil { + t.Fatalf("child module %s was not removed from state after mv", srcModule.String()) + } + + gotMod := s.Module(dstModule) + if len(gotMod.Resources) != 1 { + t.Fatalf("wrong number of resources in state; expected 1, found %d", len(gotMod.Resources)) + } + + got := s.Resource(dst) + if got.Addr.Resource != dst.Resource { + t.Fatalf("dst resource not in state") + } + }) +} + +func TestState_MaybeMoveAbsResource(t *testing.T) { + state := NewState() + rootModule := state.RootModule() + rootModule.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "foo", + }.Instance(addrs.IntKey(0)), + &ResourceInstanceObjectSrc{ + Status: ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + + src := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "foo"}.Absolute(addrs.RootModuleInstance) + dst := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "bar"}.Absolute(addrs.RootModuleInstance) + + // First move, success + t.Run("first move", func(t *testing.T) { + moved := state.MaybeMoveAbsResource(src, dst) + if !moved { + t.Fatal("wrong result") + } + }) + + // Trying to move a resource that doesn't exist in state to a resource which does exist should be a noop. + t.Run("noop", func(t *testing.T) { + moved := state.MaybeMoveAbsResource(src, dst) + if moved { + t.Fatal("wrong result") + } + }) +} + +func TestState_MoveAbsResourceInstance(t *testing.T) { + state := NewState() + rootModule := state.RootModule() + rootModule.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "foo", + }.Instance(addrs.NoKey), + &ResourceInstanceObjectSrc{ + Status: ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + // src resource from the state above + src := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "foo"}.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + + t.Run("resource to resource instance", func(t *testing.T) { + s := state.DeepCopy() + // For a little extra fun, move a resource to a resource instance: test_thing.foo to test_thing.foo[1] + dst := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "foo"}.Instance(addrs.IntKey(1)).Absolute(addrs.RootModuleInstance) + + s.MoveAbsResourceInstance(src, dst) + + if s.Empty() { + t.Fatal("unexpected empty state") + } + + if len(s.RootModule().Resources) != 1 { + t.Fatalf("wrong number of resources in state; expected 1, found %d", len(state.RootModule().Resources)) + } + + got := s.ResourceInstance(dst) + if got == nil { + t.Fatalf("dst resource not in state") + } + }) + + t.Run("move to new module", func(t *testing.T) { + s := state.DeepCopy() + // test_thing.foo to module.kinder.test_thing.foo["baz"] + dstModule := addrs.RootModuleInstance.Child("kinder", addrs.NoKey) + dst := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "foo"}.Instance(addrs.IntKey(1)).Absolute(dstModule) + + s.MoveAbsResourceInstance(src, dst) + + if s.Empty() { + t.Fatal("unexpected empty state") + } + + if s.Module(dstModule) == nil { + t.Fatalf("child module %s not in state", dstModule.String()) + } + + if len(s.Module(dstModule).Resources) != 1 { + t.Fatalf("wrong number of resources in state; expected 1, found %d", len(s.Module(dstModule).Resources)) + } + + got := s.ResourceInstance(dst) + if got == nil { + t.Fatalf("dst resource not in state") + } + }) +} + +func TestState_MaybeMoveAbsResourceInstance(t *testing.T) { + state := NewState() + rootModule := state.RootModule() + rootModule.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "foo", + }.Instance(addrs.NoKey), + &ResourceInstanceObjectSrc{ + Status: ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + + // For a little extra fun, let's go from a resource to a resource instance: test_thing.foo to test_thing.bar[1] + src := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "foo"}.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + dst := addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "test_thing", Name: "foo"}.Instance(addrs.IntKey(1)).Absolute(addrs.RootModuleInstance) + + // First move, success + t.Run("first move", func(t *testing.T) { + moved := state.MaybeMoveAbsResourceInstance(src, dst) + if !moved { + t.Fatal("wrong result") + } + got := state.ResourceInstance(dst) + if got == nil { + t.Fatal("destination resource instance not in state") + } + }) + + // Moving a resource instance that doesn't exist in state to a resource which does exist should be a noop. + t.Run("noop", func(t *testing.T) { + moved := state.MaybeMoveAbsResourceInstance(src, dst) + if moved { + t.Fatal("wrong result") + } + }) +} + +func TestState_MoveModuleInstance(t *testing.T) { + state := NewState() + srcModule := addrs.RootModuleInstance.Child("kinder", addrs.NoKey) + m := state.EnsureModule(srcModule) + m.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "foo", + }.Instance(addrs.NoKey), + &ResourceInstanceObjectSrc{ + Status: ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + + dstModule := addrs.RootModuleInstance.Child("child", addrs.IntKey(3)) + state.MoveModuleInstance(srcModule, dstModule) + + // srcModule should have been removed, dstModule should exist and have one resource + if len(state.Modules) != 2 { // kinder[3] and root + t.Fatalf("wrong number of modules in state. Expected 2, got %d", len(state.Modules)) + } + + got := state.Module(dstModule) + if got == nil { + t.Fatal("dstModule not found") + } + + gone := state.Module(srcModule) + if gone != nil { + t.Fatal("srcModule not removed from state") + } + + r := got.Resource(mustAbsResourceAddr("test_thing.foo").Resource) + if r.Addr.Module.String() != dstModule.String() { + fmt.Println(r.Addr.Module.String()) + t.Fatal("resource address was not updated") + } + +} + +func TestState_MaybeMoveModuleInstance(t *testing.T) { + state := NewState() + src := addrs.RootModuleInstance.Child("child", addrs.StringKey("a")) + cm := state.EnsureModule(src) + cm.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "foo", + }.Instance(addrs.NoKey), + &ResourceInstanceObjectSrc{ + Status: ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + + dst := addrs.RootModuleInstance.Child("kinder", addrs.StringKey("b")) + + // First move, success + t.Run("first move", func(t *testing.T) { + moved := state.MaybeMoveModuleInstance(src, dst) + if !moved { + t.Fatal("wrong result") + } + }) + + // Second move, should be a noop + t.Run("noop", func(t *testing.T) { + moved := state.MaybeMoveModuleInstance(src, dst) + if moved { + t.Fatal("wrong result") + } + }) +} + +func TestState_MoveModule(t *testing.T) { + // For this test, add two module instances (kinder and kinder["a"]). + // MoveModule(kinder) should move both instances. + state := NewState() // starter state, should be copied by the subtests. + srcModule := addrs.RootModule.Child("kinder") + m := state.EnsureModule(srcModule.UnkeyedInstanceShim()) + m.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "foo", + }.Instance(addrs.NoKey), + &ResourceInstanceObjectSrc{ + Status: ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + + moduleInstance := addrs.RootModuleInstance.Child("kinder", addrs.StringKey("a")) + mi := state.EnsureModule(moduleInstance) + mi.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "foo", + }.Instance(addrs.NoKey), + &ResourceInstanceObjectSrc{ + Status: ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + + _, mc := srcModule.Call() + src := mc.Absolute(addrs.RootModuleInstance.Child("kinder", addrs.NoKey)) + + t.Run("basic", func(t *testing.T) { + s := state.DeepCopy() + _, dstMC := addrs.RootModule.Child("child").Call() + dst := dstMC.Absolute(addrs.RootModuleInstance.Child("child", addrs.NoKey)) + s.MoveModule(src, dst) + + // srcModule should have been removed, dstModule should exist and have one resource + if len(s.Modules) != 3 { // child, child["a"] and root + t.Fatalf("wrong number of modules in state. Expected 3, got %d", len(s.Modules)) + } + + got := s.Module(dst.Module) + if got == nil { + t.Fatal("dstModule not found") + } + + got = s.Module(addrs.RootModuleInstance.Child("child", addrs.StringKey("a"))) + if got == nil { + t.Fatal("dstModule instance \"a\" not found") + } + + gone := s.Module(srcModule.UnkeyedInstanceShim()) + if gone != nil { + t.Fatal("srcModule not removed from state") + } + }) + + t.Run("nested modules", func(t *testing.T) { + s := state.DeepCopy() + + // add a child module to module.kinder + mi := mustParseModuleInstanceStr(`module.kinder.module.grand[1]`) + m := s.EnsureModule(mi) + m.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "foo", + }.Instance(addrs.NoKey), + &ResourceInstanceObjectSrc{ + Status: ObjectReady, + SchemaVersion: 1, + AttrsJSON: []byte(`{"woozles":"confuzles"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + + _, dstMC := addrs.RootModule.Child("child").Call() + dst := dstMC.Absolute(addrs.RootModuleInstance.Child("child", addrs.NoKey)) + s.MoveModule(src, dst) + + moved := s.Module(addrs.RootModuleInstance.Child("child", addrs.StringKey("a"))) + if moved == nil { + t.Fatal("dstModule not found") + } + + // The nested module's relative address should also have been updated + nested := s.Module(mustParseModuleInstanceStr(`module.child.module.grand[1]`)) + if nested == nil { + t.Fatal("nested child module of src wasn't moved") + } + }) +} + +func mustParseModuleInstanceStr(str string) addrs.ModuleInstance { + addr, diags := addrs.ParseModuleInstanceStr(str) + if diags.HasErrors() { + panic(diags.Err()) + } + return addr +} + +func mustAbsResourceAddr(s string) addrs.AbsResource { + addr, diags := addrs.ParseAbsResourceStr(s) + if diags.HasErrors() { + panic(diags.Err()) + } + return addr +} diff --git a/pkg/states/statefile/diagnostics.go b/pkg/states/statefile/diagnostics.go new file mode 100644 index 00000000000..7162a1ffc9a --- /dev/null +++ b/pkg/states/statefile/diagnostics.go @@ -0,0 +1,67 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package statefile + +import ( + "encoding/json" + "fmt" + + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +const invalidFormat = "Invalid state file format" + +// jsonUnmarshalDiags is a helper that translates errors returned from +// json.Unmarshal into hopefully-more-helpful diagnostics messages. +func jsonUnmarshalDiags(err error) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + if err == nil { + return diags + } + + switch tErr := err.(type) { + case *json.SyntaxError: + // We've usually already successfully parsed a source file as JSON at + // least once before we'd use jsonUnmarshalDiags with it (to sniff + // the version number) so this particular error should not appear much + // in practice. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + invalidFormat, + fmt.Sprintf("The state file could not be parsed as JSON: syntax error at byte offset %d.", tErr.Offset), + )) + case *json.UnmarshalTypeError: + // This is likely to be the most common area, describing a + // non-conformance between the file and the expected file format + // at a semantic level. + if tErr.Field != "" { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + invalidFormat, + fmt.Sprintf("The state file field %q has invalid value %s", tErr.Field, tErr.Value), + )) + break + } else { + // Without a field name, we can't really say anything helpful. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + invalidFormat, + "The state file does not conform to the expected JSON data structure.", + )) + } + default: + // Fallback for all other types of errors. This can happen only for + // custom UnmarshalJSON implementations, so should be encountered + // only rarely. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + invalidFormat, + fmt.Sprintf("The state file does not conform to the expected JSON data structure: %s.", err.Error()), + )) + } + + return diags +} diff --git a/pkg/states/statefile/doc.go b/pkg/states/statefile/doc.go new file mode 100644 index 00000000000..b38841ed03c --- /dev/null +++ b/pkg/states/statefile/doc.go @@ -0,0 +1,8 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package statefile deals with the file format used to serialize states for +// persistent storage and then deserialize them into memory again later. +package statefile diff --git a/pkg/states/statefile/file.go b/pkg/states/statefile/file.go new file mode 100644 index 00000000000..c8ac998e36e --- /dev/null +++ b/pkg/states/statefile/file.go @@ -0,0 +1,67 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package statefile + +import ( + version "github.com/hashicorp/go-version" + + "github.com/kubegems/opentofu/pkg/states" + tfversion "github.com/kubegems/opentofu/version" +) + +// File is the in-memory representation of a state file. It includes the state +// itself along with various metadata used to track changing state files for +// the same configuration over time. +type File struct { + // TerraformVersion is the version of OpenTofu that wrote this state file. + TerraformVersion *version.Version + + // Serial is incremented on any operation that modifies + // the State file. It is used to detect potentially conflicting + // updates. + Serial uint64 + + // Lineage is set when a new, blank state file is created and then + // never updated. This allows us to determine whether the serials + // of two states can be meaningfully compared. + // Apart from the guarantee that collisions between two lineages + // are very unlikely, this value is opaque and external callers + // should only compare lineage strings byte-for-byte for equality. + Lineage string + + // State is the actual state represented by this file. + State *states.State +} + +func New(state *states.State, lineage string, serial uint64) *File { + // To make life easier on callers, we'll accept a nil state here and just + // allocate an empty one, which is required for this file to be successfully + // written out. + if state == nil { + state = states.NewState() + } + + return &File{ + TerraformVersion: tfversion.SemVer, + State: state, + Lineage: lineage, + Serial: serial, + } +} + +// DeepCopy is a convenience method to create a new File object whose state +// is a deep copy of the receiver's, as implemented by states.State.DeepCopy. +func (f *File) DeepCopy() *File { + if f == nil { + return nil + } + return &File{ + TerraformVersion: f.TerraformVersion, + Serial: f.Serial, + Lineage: f.Lineage, + State: f.State.DeepCopy(), + } +} diff --git a/pkg/states/statefile/marshal_equal.go b/pkg/states/statefile/marshal_equal.go new file mode 100644 index 00000000000..84691950438 --- /dev/null +++ b/pkg/states/statefile/marshal_equal.go @@ -0,0 +1,46 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package statefile + +import ( + "bytes" + + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/states" +) + +// StatesMarshalEqual returns true if and only if the two given states have +// an identical (byte-for-byte) statefile representation. +// +// This function compares only the portions of the state that are persisted +// in state files, so for example it will not return false if the only +// differences between the two states are local values or descendent module +// outputs. +func StatesMarshalEqual(a, b *states.State) bool { + var aBuf bytes.Buffer + var bBuf bytes.Buffer + + // nil states are not valid states, and so they can never martial equal. + if a == nil || b == nil { + return false + } + + // We write here some temporary files that have no header information + // populated, thus ensuring that we're only comparing the state itself + // and not any metadata. + err := Write(&File{State: a}, &aBuf, encryption.StateEncryptionDisabled()) + if err != nil { + // Should never happen, because we're writing to an in-memory buffer + panic(err) + } + err = Write(&File{State: b}, &bBuf, encryption.StateEncryptionDisabled()) + if err != nil { + // Should never happen, because we're writing to an in-memory buffer + panic(err) + } + + return bytes.Equal(aBuf.Bytes(), bBuf.Bytes()) +} diff --git a/pkg/states/statefile/read.go b/pkg/states/statefile/read.go new file mode 100644 index 00000000000..d1f08b18e72 --- /dev/null +++ b/pkg/states/statefile/read.go @@ -0,0 +1,258 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package statefile + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "os" + + version "github.com/hashicorp/go-version" + + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/tfdiags" + tfversion "github.com/kubegems/opentofu/version" +) + +// ErrNoState is returned by ReadState when the state file is empty. +var ErrNoState = errors.New("no state") + +// ErrUnusableState is an error wrapper to indicate that we *think* the input +// represents state data, but can't use it for some reason (as explained in the +// error text). Callers can check against this type with errors.As() if they +// need to distinguish between corrupt state and more fundamental problems like +// an empty file. +type ErrUnusableState struct { + inner error +} + +func errUnusable(err error) *ErrUnusableState { + return &ErrUnusableState{inner: err} +} +func (e *ErrUnusableState) Error() string { + return e.inner.Error() +} +func (e *ErrUnusableState) Unwrap() error { + return e.inner +} + +// Read reads a state from the given reader. +// +// Legacy state format versions 1 through 3 are supported, but the result will +// contain object attributes in the deprecated "flatmap" format and so must +// be upgraded by the caller before use. +// +// If the state file is empty, the special error value ErrNoState is returned. +// Otherwise, the returned error might be a wrapper around tfdiags.Diagnostics +// potentially describing multiple errors. +func Read(r io.Reader, enc encryption.StateEncryption) (*File, error) { + // Some callers provide us a "typed nil" *os.File here, which would + // cause us to panic below if we tried to use it. + if f, ok := r.(*os.File); ok && f == nil { + return nil, ErrNoState + } + + var diags tfdiags.Diagnostics + + // We actually just buffer the whole thing in memory, because states are + // generally not huge and we need to do be able to sniff for a version + // number before full parsing. + src, err := io.ReadAll(r) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to read state file", + fmt.Sprintf("The state file could not be read: %s", err), + )) + return nil, diags.Err() + } + + if len(src) == 0 { + return nil, ErrNoState + } + + decrypted, err := enc.DecryptState(src) + if err != nil { + return nil, err + } + + state, err := readState(decrypted) + if err != nil { + return nil, err + } + + if state == nil { + // Should never happen + panic("readState returned nil state with no errors") + } + + return state, diags.Err() +} + +func readState(src []byte) (*File, error) { + var diags tfdiags.Diagnostics + + if looksLikeVersion0(src) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + unsupportedFormat, + // This is a user-facing usage of OpenTofu but refers to a very old historical version of OpenTofu + // which has no corresponding OpenTofu version, and is unlikely to get one. + // If we ever get OpenTofu 0.6.16 and 0.7.x, we should update this message to mention OpenTofu instead. + "The state is stored in a legacy binary format that is not supported since Terraform v0.7. To continue, first upgrade the state using Terraform 0.6.16 or earlier.", + )) + return nil, errUnusable(diags.Err()) + } + + version, versionDiags := sniffJSONStateVersion(src) + diags = diags.Append(versionDiags) + if versionDiags.HasErrors() { + // This is the last point where there's a really good chance it's not a + // state file at all. Past here, we'll assume errors mean it's state but + // we can't use it. + return nil, diags.Err() + } + + var result *File + var err error + switch version { + case 0: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + unsupportedFormat, + "The state file uses JSON syntax but has a version number of zero. There was never a JSON-based state format zero, so this state file is invalid and cannot be processed.", + )) + case 1: + result, diags = readStateV1(src) + case 2: + result, diags = readStateV2(src) + case 3: + result, diags = readStateV3(src) + case 4: + result, diags = readStateV4(src) + default: + thisVersion := tfversion.SemVer.String() + creatingVersion := sniffJSONStateTerraformVersion(src) + switch { + case creatingVersion != "": + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + unsupportedFormat, + fmt.Sprintf("The state file uses format version %d, which is not supported by OpenTofu %s. This state file was created by OpenTofu %s.", version, thisVersion, creatingVersion), + )) + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + unsupportedFormat, + fmt.Sprintf("The state file uses format version %d, which is not supported by OpenTofu %s. This state file may have been created by a newer version of OpenTofu.", version, thisVersion), + )) + } + } + + if diags.HasErrors() { + err = errUnusable(diags.Err()) + } + + return result, err +} + +func sniffJSONStateVersion(src []byte) (uint64, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + type VersionSniff struct { + Version *uint64 `json:"version"` + } + var sniff VersionSniff + err := json.Unmarshal(src, &sniff) + if err != nil { + switch tErr := err.(type) { + case *json.SyntaxError: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + unsupportedFormat, + fmt.Sprintf("The state file could not be parsed as JSON: syntax error at byte offset %d.", tErr.Offset), + )) + case *json.UnmarshalTypeError: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + unsupportedFormat, + fmt.Sprintf("The version in the state file is %s. A positive whole number is required.", tErr.Value), + )) + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + unsupportedFormat, + "The state file could not be parsed as JSON.", + )) + } + } + + if sniff.Version == nil { + encrypted, err := encryption.IsEncryptionPayload(src) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + unsupportedFormat, + fmt.Sprintf("The state file can not be checked for presense of encryption: %s", err.Error()), + )) + return 0, diags + } + if encrypted { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + unsupportedFormat, + "This state file is encrypted and can not be read without an encryption configuration", + )) + return 0, diags + } + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + unsupportedFormat, + "The state file does not have a \"version\" attribute, which is required to identify the format version.", + )) + return 0, diags + } + + return *sniff.Version, diags +} + +// sniffJSONStateTerraformVersion attempts to sniff the OpenTofu version +// specification from the given state file source code. The result is either +// a version string or an empty string if no version number could be extracted. +// +// This is a best-effort function intended to produce nicer error messages. It +// should not be used for any real processing. +func sniffJSONStateTerraformVersion(src []byte) string { + type VersionSniff struct { + Version string `json:"terraform_version"` + } + var sniff VersionSniff + + err := json.Unmarshal(src, &sniff) + if err != nil { + return "" + } + + // Attempt to parse the string as a version so we won't report garbage + // as a version number. + _, err = version.NewVersion(sniff.Version) + if err != nil { + return "" + } + + return sniff.Version +} + +// unsupportedFormat is a diagnostic summary message for when the state file +// seems to not be a state file at all, or is not a supported version. +// +// Use invalidFormat instead for the subtly-different case of "this looks like +// it's intended to be a state file but it's not structured correctly". +const unsupportedFormat = "Unsupported state file format" + +const upgradeFailed = "State format upgrade failed" diff --git a/pkg/states/statefile/read_test.go b/pkg/states/statefile/read_test.go new file mode 100644 index 00000000000..9779abbbb51 --- /dev/null +++ b/pkg/states/statefile/read_test.go @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: MPL-2.0 + +package statefile + +import ( + "bytes" + "errors" + "os" + "testing" + + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/encryption/enctest" +) + +func TestReadErrNoState_emptyFile(t *testing.T) { + emptyFile, err := os.Open("testdata/read/empty") + if err != nil { + t.Fatal(err) + } + defer emptyFile.Close() + + _, err = Read(emptyFile, encryption.StateEncryptionDisabled()) + if !errors.Is(err, ErrNoState) { + t.Fatalf("expected ErrNoState, got %T", err) + } +} + +func TestReadErrNoState_nilFile(t *testing.T) { + nilFile, err := os.Open("") + if err == nil { + t.Fatal("wrongly succeeded in opening non-existent file") + } + + _, err = Read(nilFile, encryption.StateEncryptionDisabled()) + if !errors.Is(err, ErrNoState) { + t.Fatalf("expected ErrNoState, got %T", err) + } +} +func TestReadEmptyWithEncryption(t *testing.T) { + payload := bytes.NewBufferString("") + + _, err := Read(payload, enctest.EncryptionRequired().State()) + if !errors.Is(err, ErrNoState) { + t.Fatalf("expected ErrNoState, got %T", err) + } +} +func TestReadEmptyJsonWithEncryption(t *testing.T) { + payload := bytes.NewBufferString("{}") + + _, err := Read(payload, enctest.EncryptionRequired().State()) + + if err == nil || err.Error() != "unable to determine data structure during decryption: Given payload is not a state file" { + t.Fatalf("expected encryption error, got %v", err) + } +} diff --git a/pkg/states/statefile/roundtrip_test.go b/pkg/states/statefile/roundtrip_test.go new file mode 100644 index 00000000000..9281aa80c00 --- /dev/null +++ b/pkg/states/statefile/roundtrip_test.go @@ -0,0 +1,127 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package statefile + +import ( + "bytes" + "os" + "path/filepath" + "sort" + "strings" + "testing" + + "github.com/go-test/deep" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/encryption/enctest" +) + +func TestRoundtrip(t *testing.T) { + const dir = "testdata/roundtrip" + entries, err := os.ReadDir(dir) + if err != nil { + t.Fatal(err) + } + + for _, info := range entries { + const inSuffix = ".in.tfstate" + const outSuffix = ".out.tfstate" + + if info.IsDir() { + continue + } + inName := info.Name() + if !strings.HasSuffix(inName, inSuffix) { + continue + } + name := inName[:len(inName)-len(inSuffix)] + outName := name + outSuffix + + t.Run(name, func(t *testing.T) { + oSrcWant, err := os.ReadFile(filepath.Join(dir, outName)) + if err != nil { + t.Fatal(err) + } + oWant, diags := readStateV4(oSrcWant) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + ir, err := os.Open(filepath.Join(dir, inName)) + if err != nil { + t.Fatal(err) + } + defer ir.Close() + + f, err := Read(ir, encryption.StateEncryptionDisabled()) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + var buf bytes.Buffer + err = Write(f, &buf, encryption.StateEncryptionDisabled()) + if err != nil { + t.Fatal(err) + } + oSrcWritten := buf.Bytes() + + oGot, diags := readStateV4(oSrcWritten) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + problems := deep.Equal(oGot, oWant) + sort.Strings(problems) + for _, problem := range problems { + t.Error(problem) + } + }) + } +} + +func TestRoundtripEncryption(t *testing.T) { + const path = "testdata/roundtrip/v4-modules.out.tfstate" + + enc := enctest.EncryptionWithFallback().State() + + unencryptedInput, err := os.Open(path) + if err != nil { + t.Fatal(err) + } + defer unencryptedInput.Close() + + // Read unencrypted using fallback + originalState, err := Read(unencryptedInput, enc) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + // Write encrypted + var encrypted bytes.Buffer + err = Write(originalState, &encrypted, enc) + if err != nil { + t.Fatal(err) + } + + // Make sure it is encrypted / not readable + encryptedCopy := encrypted + _, err = Read(&encryptedCopy, encryption.StateEncryptionDisabled()) + if err == nil || err.Error() != "Unsupported state file format: This state file is encrypted and can not be read without an encryption configuration" { + t.Fatalf("expected written state file to be encrypted!") + } + + // Read encrypted + newState, err := Read(&encrypted, enc) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + // Compare before/after encryption workflow + problems := deep.Equal(newState, originalState) + sort.Strings(problems) + for _, problem := range problems { + t.Error(problem) + } +} diff --git a/pkg/states/statefile/testdata/read/empty b/pkg/states/statefile/testdata/read/empty new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/states/statefile/testdata/roundtrip/v1-simple.in.tfstate b/pkg/states/statefile/testdata/roundtrip/v1-simple.in.tfstate new file mode 100644 index 00000000000..c1777cce7eb --- /dev/null +++ b/pkg/states/statefile/testdata/roundtrip/v1-simple.in.tfstate @@ -0,0 +1 @@ +{"version":1,"serial":1,"modules":[{"path":["root"],"outputs":{"numbers":"0,1"},"resources":{"null_resource.bar":{"type":"null_resource","depends_on":["null_resource.foo"],"primary":{"id":"6456912646020570139","attributes":{"id":"6456912646020570139","triggers.#":"1","triggers.whaaat":"0,1"}}},"null_resource.foo.0":{"type":"null_resource","primary":{"id":"3597404161631769617","attributes":{"id":"3597404161631769617","triggers.#":"1","triggers.what":"0"}}},"null_resource.foo.1":{"type":"null_resource","primary":{"id":"3214385801340650197","attributes":{"id":"3214385801340650197","triggers.#":"1","triggers.what":"1"}}}}}]} diff --git a/pkg/states/statefile/testdata/roundtrip/v1-simple.out.tfstate b/pkg/states/statefile/testdata/roundtrip/v1-simple.out.tfstate new file mode 100644 index 00000000000..a1f7b2afc00 --- /dev/null +++ b/pkg/states/statefile/testdata/roundtrip/v1-simple.out.tfstate @@ -0,0 +1 @@ +{"version":4,"serial":1,"lineage":"","terraform_version":"0.0.0","outputs":{"numbers":{"type":"string","value":"0,1"}},"resources":[{"mode":"managed","type":"null_resource","name":"bar","provider":"provider[\"registry.opentofu.org/-/null\"]","instances":[{"schema_version":0,"attributes_flat":{"id":"6456912646020570139","triggers.%":"1","triggers.whaaat":"0,1"},"depends_on":["null_resource.foo"]}]},{"mode":"managed","type":"null_resource","name":"foo","provider":"provider[\"registry.opentofu.org/-/null\"]","each":"list","instances":[{"index_key":0,"schema_version":0,"attributes_flat":{"id":"3597404161631769617","triggers.%":"1","triggers.what":"0"}},{"index_key":1,"schema_version":0,"attributes_flat":{"id":"3214385801340650197","triggers.%":"1","triggers.what":"1"}}]}]} diff --git a/pkg/states/statefile/testdata/roundtrip/v3-bigint.in.tfstate b/pkg/states/statefile/testdata/roundtrip/v3-bigint.in.tfstate new file mode 100644 index 00000000000..ba6d95c44f7 --- /dev/null +++ b/pkg/states/statefile/testdata/roundtrip/v3-bigint.in.tfstate @@ -0,0 +1 @@ +{"version":3,"terraform_version":"0.11.1","serial":8,"lineage":"0f5b2ff9-6ff5-8e9e-1f81-aa3ce9a483eb","backend":{"hash":10669755453527594976},"modules":[{"path":["root"],"outputs":{"results":{"sensitive":false,"type":"map","value":{"aws_region":"us-west-2","list":"[{\"triggers\":{\"index\":\"0\"}},{\"triggers\":{\"index\":\"1\"}}]","list_item_0":"{\"triggers\":{\"index\":\"0\"}}","list_item_1":"{\"triggers\":{\"index\":\"1\"}}","list_triggers":"[{\"index\":\"0\"},{\"index\":\"1\"}]","list_triggers_item":"{\"index\":\"0\"}","module_object":"{\"test\":\"hello us-west-2\",\"test2\":\"hello world 2\"}","module_output":"hello us-west-2","single":"{\"triggers\":{\"baz\":\"BAR\",\"cwd_path\":\"/home/mart/Devel/terraform/tmp/hcl2-simple\",\"foo\":\"bar\",\"format\":\"Hello 12\",\"json\":\"{\\\"foo\\\":\\\"bar\\\",\\\"wonk\\\":[]}\",\"module_path\":\"/home/mart/Devel/terraform/tmp/hcl2-simple\",\"root_path\":\"/home/mart/Devel/terraform/tmp/hcl2-simple\",\"woot\":\"us-west-2\",\"workspace\":\"default\"}}"}}},"resources":{"null_resource.bar.0":{"type":"null_resource","depends_on":["null_resource.baz"],"primary":{"id":"604776346677326098","attributes":{"id":"604776346677326098","triggers.%":"1","triggers.index":"0"},"meta":{"schema_version":"1"},"tainted":false},"deposed":[],"provider":"provider.null"},"null_resource.bar.1":{"type":"null_resource","depends_on":["null_resource.baz"],"primary":{"id":"4776432143683449212","attributes":{"id":"4776432143683449212","triggers.%":"1","triggers.index":"1"},"meta":{},"tainted":false},"deposed":[],"provider":"provider.null"},"null_resource.baz":{"type":"null_resource","depends_on":[],"primary":{"id":"1361331090091665738","attributes":{"id":"1361331090091665738","triggers.%":"9","triggers.baz":"BAR","triggers.cwd_path":"/home/mart/Devel/terraform/tmp/hcl2-simple","triggers.foo":"bar","triggers.format":"Hello 12","triggers.json":"{\"foo\":\"bar\",\"wonk\":[]}","triggers.module_path":"/home/mart/Devel/terraform/tmp/hcl2-simple","triggers.root_path":"/home/mart/Devel/terraform/tmp/hcl2-simple","triggers.woot":"us-west-2","triggers.workspace":"default"},"meta":{"foo":"bar"},"tainted":false},"deposed":[],"provider":"provider.null"}},"depends_on":[]},{"path":["root","child"],"outputs":{"test":{"sensitive":false,"type":"string","value":"hello us-west-2"},"test2":{"sensitive":false,"type":"string","value":"hello world 2"}},"resources":{"null_resource.foo":{"type":"null_resource","depends_on":[],"primary":{"id":"1361","attributes":{"id":"1361","triggers.%":"0"},"meta":{},"tainted":false},"deposed":[],"provider":"provider.null"}},"depends_on":[]}]} diff --git a/pkg/states/statefile/testdata/roundtrip/v3-bigint.out.tfstate b/pkg/states/statefile/testdata/roundtrip/v3-bigint.out.tfstate new file mode 100644 index 00000000000..f66c102a594 --- /dev/null +++ b/pkg/states/statefile/testdata/roundtrip/v3-bigint.out.tfstate @@ -0,0 +1 @@ +{"version":4,"terraform_version":"0.11.1","serial":8,"lineage":"0f5b2ff9-6ff5-8e9e-1f81-aa3ce9a483eb","outputs":{"results":{"type":["map","string"],"value":{"aws_region":"us-west-2","list":"[{\"triggers\":{\"index\":\"0\"}},{\"triggers\":{\"index\":\"1\"}}]","list_item_0":"{\"triggers\":{\"index\":\"0\"}}","list_item_1":"{\"triggers\":{\"index\":\"1\"}}","list_triggers":"[{\"index\":\"0\"},{\"index\":\"1\"}]","list_triggers_item":"{\"index\":\"0\"}","module_object":"{\"test\":\"hello us-west-2\",\"test2\":\"hello world 2\"}","module_output":"hello us-west-2","single":"{\"triggers\":{\"baz\":\"BAR\",\"cwd_path\":\"/home/mart/Devel/terraform/tmp/hcl2-simple\",\"foo\":\"bar\",\"format\":\"Hello 12\",\"json\":\"{\\\"foo\\\":\\\"bar\\\",\\\"wonk\\\":[]}\",\"module_path\":\"/home/mart/Devel/terraform/tmp/hcl2-simple\",\"root_path\":\"/home/mart/Devel/terraform/tmp/hcl2-simple\",\"woot\":\"us-west-2\",\"workspace\":\"default\"}}"}}},"resources":[{"mode":"managed","type":"null_resource","name":"bar","each":"list","provider":"provider.null","instances":[{"attributes_flat":{"id":"604776346677326098","triggers.%":"1","triggers.index":"0"},"depends_on":["null_resource.baz"],"index_key":0,"schema_version":1},{"attributes_flat":{"id":"4776432143683449212","triggers.%":"1","triggers.index":"1"},"depends_on":["null_resource.baz"],"index_key":1,"schema_version":0}]},{"mode":"managed","type":"null_resource","name":"baz","provider":"provider.null","instances":[{"attributes_flat":{"id":"1361331090091665738","triggers.%":"9","triggers.baz":"BAR","triggers.cwd_path":"/home/mart/Devel/terraform/tmp/hcl2-simple","triggers.foo":"bar","triggers.format":"Hello 12","triggers.json":"{\"foo\":\"bar\",\"wonk\":[]}","triggers.module_path":"/home/mart/Devel/terraform/tmp/hcl2-simple","triggers.root_path":"/home/mart/Devel/terraform/tmp/hcl2-simple","triggers.woot":"us-west-2","triggers.workspace":"default"},"schema_version":0,"private":"eyJmb28iOiJiYXIifQ=="}]},{"module":"module.child","mode":"managed","type":"null_resource","name":"foo","provider":"provider.null","instances":[{"attributes_flat":{"id":"1361","triggers.%":"0"},"schema_version":0}]}]} diff --git a/pkg/states/statefile/testdata/roundtrip/v3-builtin.in.tfstate b/pkg/states/statefile/testdata/roundtrip/v3-builtin.in.tfstate new file mode 100644 index 00000000000..36d29e3824a --- /dev/null +++ b/pkg/states/statefile/testdata/roundtrip/v3-builtin.in.tfstate @@ -0,0 +1 @@ +{"version":3,"terraform_version":"0.11.14","serial":1,"lineage":"b707851e-4209-9792-e752-bc0dd6c81fcf","modules":[{"path":["root"],"outputs":{},"resources":{"data.terraform_remote_state.test":{"type":"terraform_remote_state","depends_on":[],"primary":{"id":"2020-08-14 19:13:36.875081 +0000 UTC","attributes":{"backend":"remote","config.#":"1","config.345861710.organization":"hashicorp","config.345861710.workspaces.#":"1","config.345861710.workspaces.0.%":"1","config.345861710.workspaces.0.name":"test","environment":"default","id":"2020-08-14 19:13:36.875081 +0000 UTC","workspace":"default"},"meta":{},"tainted":false},"deposed":[],"provider":"provider.terraform"}},"depends_on":[]}]} diff --git a/pkg/states/statefile/testdata/roundtrip/v3-builtin.out.tfstate b/pkg/states/statefile/testdata/roundtrip/v3-builtin.out.tfstate new file mode 100644 index 00000000000..372e236973c --- /dev/null +++ b/pkg/states/statefile/testdata/roundtrip/v3-builtin.out.tfstate @@ -0,0 +1 @@ +{"version":4,"terraform_version":"0.12.28","serial":1,"lineage":"b707851e-4209-9792-e752-bc0dd6c81fcf","outputs":{},"resources":[{"mode":"data","type":"terraform_remote_state","name":"test","provider":"provider.terraform","instances":[{"schema_version":0,"attributes_flat":{"backend":"remote","config.#":"1","config.345861710.organization":"hashicorp","config.345861710.workspaces.#":"1","config.345861710.workspaces.0.%":"1","config.345861710.workspaces.0.name":"test","environment":"default","id":"2020-08-14 19:13:36.875081 +0000 UTC","workspace":"default"}}]}]} diff --git a/pkg/states/statefile/testdata/roundtrip/v3-grabbag.in.tfstate b/pkg/states/statefile/testdata/roundtrip/v3-grabbag.in.tfstate new file mode 100644 index 00000000000..c1da48d6bf2 --- /dev/null +++ b/pkg/states/statefile/testdata/roundtrip/v3-grabbag.in.tfstate @@ -0,0 +1 @@ +{"version":3,"terraform_version":"0.11.1","serial":8,"lineage":"0f5b2ff9-6ff5-8e9e-1f81-aa3ce9a483eb","modules":[{"path":["root"],"outputs":{"results":{"sensitive":false,"type":"map","value":{"aws_region":"us-west-2","list":"[{\"triggers\":{\"index\":\"0\"}},{\"triggers\":{\"index\":\"1\"}}]","list_item_0":"{\"triggers\":{\"index\":\"0\"}}","list_item_1":"{\"triggers\":{\"index\":\"1\"}}","list_triggers":"[{\"index\":\"0\"},{\"index\":\"1\"}]","list_triggers_item":"{\"index\":\"0\"}","module_object":"{\"test\":\"hello us-west-2\",\"test2\":\"hello world 2\"}","module_output":"hello us-west-2","single":"{\"triggers\":{\"baz\":\"BAR\",\"cwd_path\":\"/home/mart/Devel/terraform/tmp/hcl2-simple\",\"foo\":\"bar\",\"format\":\"Hello 12\",\"json\":\"{\\\"foo\\\":\\\"bar\\\",\\\"wonk\\\":[]}\",\"module_path\":\"/home/mart/Devel/terraform/tmp/hcl2-simple\",\"root_path\":\"/home/mart/Devel/terraform/tmp/hcl2-simple\",\"woot\":\"us-west-2\",\"workspace\":\"default\"}}"}}},"resources":{"null_resource.bar.0":{"type":"null_resource","depends_on":["null_resource.baz"],"primary":{"id":"604776346677326098","attributes":{"id":"604776346677326098","triggers.%":"1","triggers.index":"0"},"meta":{"schema_version":"1"},"tainted":false},"deposed":[],"provider":"provider.null"},"null_resource.bar.1":{"type":"null_resource","depends_on":["null_resource.baz"],"primary":{"id":"4776432143683449212","attributes":{"id":"4776432143683449212","triggers.%":"1","triggers.index":"1"},"meta":{},"tainted":false},"deposed":[],"provider":"provider.null"},"null_resource.baz":{"type":"null_resource","depends_on":[],"primary":{"id":"1361331090091665738","attributes":{"id":"1361331090091665738","triggers.%":"9","triggers.baz":"BAR","triggers.cwd_path":"/home/mart/Devel/terraform/tmp/hcl2-simple","triggers.foo":"bar","triggers.format":"Hello 12","triggers.json":"{\"foo\":\"bar\",\"wonk\":[]}","triggers.module_path":"/home/mart/Devel/terraform/tmp/hcl2-simple","triggers.root_path":"/home/mart/Devel/terraform/tmp/hcl2-simple","triggers.woot":"us-west-2","triggers.workspace":"default"},"meta":{"foo":"bar"},"tainted":false},"deposed":[],"provider":"provider.null"}},"depends_on":[]},{"path":["root","child"],"outputs":{"test":{"sensitive":false,"type":"string","value":"hello us-west-2"},"test2":{"sensitive":false,"type":"string","value":"hello world 2"}},"resources":{"null_resource.foo":{"type":"null_resource","depends_on":[],"primary":{"id":"1361","attributes":{"id":"1361","triggers.%":"0"},"meta":{},"tainted":false},"deposed":[],"provider":"provider.null"}},"depends_on":[]}]} diff --git a/pkg/states/statefile/testdata/roundtrip/v3-grabbag.out.tfstate b/pkg/states/statefile/testdata/roundtrip/v3-grabbag.out.tfstate new file mode 100644 index 00000000000..f66c102a594 --- /dev/null +++ b/pkg/states/statefile/testdata/roundtrip/v3-grabbag.out.tfstate @@ -0,0 +1 @@ +{"version":4,"terraform_version":"0.11.1","serial":8,"lineage":"0f5b2ff9-6ff5-8e9e-1f81-aa3ce9a483eb","outputs":{"results":{"type":["map","string"],"value":{"aws_region":"us-west-2","list":"[{\"triggers\":{\"index\":\"0\"}},{\"triggers\":{\"index\":\"1\"}}]","list_item_0":"{\"triggers\":{\"index\":\"0\"}}","list_item_1":"{\"triggers\":{\"index\":\"1\"}}","list_triggers":"[{\"index\":\"0\"},{\"index\":\"1\"}]","list_triggers_item":"{\"index\":\"0\"}","module_object":"{\"test\":\"hello us-west-2\",\"test2\":\"hello world 2\"}","module_output":"hello us-west-2","single":"{\"triggers\":{\"baz\":\"BAR\",\"cwd_path\":\"/home/mart/Devel/terraform/tmp/hcl2-simple\",\"foo\":\"bar\",\"format\":\"Hello 12\",\"json\":\"{\\\"foo\\\":\\\"bar\\\",\\\"wonk\\\":[]}\",\"module_path\":\"/home/mart/Devel/terraform/tmp/hcl2-simple\",\"root_path\":\"/home/mart/Devel/terraform/tmp/hcl2-simple\",\"woot\":\"us-west-2\",\"workspace\":\"default\"}}"}}},"resources":[{"mode":"managed","type":"null_resource","name":"bar","each":"list","provider":"provider.null","instances":[{"attributes_flat":{"id":"604776346677326098","triggers.%":"1","triggers.index":"0"},"depends_on":["null_resource.baz"],"index_key":0,"schema_version":1},{"attributes_flat":{"id":"4776432143683449212","triggers.%":"1","triggers.index":"1"},"depends_on":["null_resource.baz"],"index_key":1,"schema_version":0}]},{"mode":"managed","type":"null_resource","name":"baz","provider":"provider.null","instances":[{"attributes_flat":{"id":"1361331090091665738","triggers.%":"9","triggers.baz":"BAR","triggers.cwd_path":"/home/mart/Devel/terraform/tmp/hcl2-simple","triggers.foo":"bar","triggers.format":"Hello 12","triggers.json":"{\"foo\":\"bar\",\"wonk\":[]}","triggers.module_path":"/home/mart/Devel/terraform/tmp/hcl2-simple","triggers.root_path":"/home/mart/Devel/terraform/tmp/hcl2-simple","triggers.woot":"us-west-2","triggers.workspace":"default"},"schema_version":0,"private":"eyJmb28iOiJiYXIifQ=="}]},{"module":"module.child","mode":"managed","type":"null_resource","name":"foo","provider":"provider.null","instances":[{"attributes_flat":{"id":"1361","triggers.%":"0"},"schema_version":0}]}]} diff --git a/pkg/states/statefile/testdata/roundtrip/v3-invalid-depends.in.tfstate b/pkg/states/statefile/testdata/roundtrip/v3-invalid-depends.in.tfstate new file mode 100644 index 00000000000..78c4da28f72 --- /dev/null +++ b/pkg/states/statefile/testdata/roundtrip/v3-invalid-depends.in.tfstate @@ -0,0 +1 @@ +{"version":3,"terraform_version":"0.7.13","serial":0,"lineage":"f2968801-fa14-41ab-a044-224f3a4adf04","modules":[{"path":["root"],"outputs":{"numbers":{"sensitive":false,"type":"string","value":"0,1"}},"resources":{"null_resource.bar":{"type":"null_resource","depends_on":["null_resource.valid","null_resource.1invalid"],"primary":{"id":"5388490630832483079","attributes":{"id":"5388490630832483079","triggers.%":"1","triggers.whaaat":"0,1"},"meta":{},"tainted":false},"deposed":[],"provider":""}},"depends_on":[]}]} diff --git a/pkg/states/statefile/testdata/roundtrip/v3-invalid-depends.out.tfstate b/pkg/states/statefile/testdata/roundtrip/v3-invalid-depends.out.tfstate new file mode 100644 index 00000000000..97711d27ab9 --- /dev/null +++ b/pkg/states/statefile/testdata/roundtrip/v3-invalid-depends.out.tfstate @@ -0,0 +1 @@ +{"version":4,"serial":0,"lineage":"f2968801-fa14-41ab-a044-224f3a4adf04","terraform_version":"0.7.13","outputs":{"numbers":{"type":"string","value":"0,1"}},"resources":[{"mode":"managed","type":"null_resource","name":"bar","provider":"provider.null","instances":[{"schema_version":0,"attributes_flat":{"id":"5388490630832483079","triggers.%":"1","triggers.whaaat":"0,1"},"depends_on":["null_resource.valid"]}]}]} diff --git a/pkg/states/statefile/testdata/roundtrip/v3-simple.in.tfstate b/pkg/states/statefile/testdata/roundtrip/v3-simple.in.tfstate new file mode 100644 index 00000000000..530284629a1 --- /dev/null +++ b/pkg/states/statefile/testdata/roundtrip/v3-simple.in.tfstate @@ -0,0 +1 @@ +{"version":3,"terraform_version":"0.7.13","serial":0,"lineage":"f2968801-fa14-41ab-a044-224f3a4adf04","modules":[{"path":["root"],"outputs":{"numbers":{"sensitive":false,"type":"string","value":"0,1"}},"resources":{"null_resource.bar":{"type":"null_resource","depends_on":["null_resource.foo.*","null_resource.foobar","null_resource.foobar.1"],"primary":{"id":"5388490630832483079","attributes":{"id":"5388490630832483079","triggers.%":"1","triggers.whaaat":"0,1"},"meta":{},"tainted":false},"deposed":[],"provider":""},"null_resource.foo.0":{"type":"null_resource","depends_on":[],"primary":{"id":"8212585058302700791","attributes":{"id":"8212585058302700791","triggers.%":"1","triggers.what":"0"},"meta":{},"tainted":false},"deposed":[],"provider":""},"null_resource.foo.1":{"type":"null_resource","depends_on":[],"primary":{"id":"1523897709610803586","attributes":{"id":"1523897709610803586","triggers.%":"1","triggers.what":"1"},"meta":{},"tainted":false},"deposed":[],"provider":""},"null_resource.foobar":{"type":"null_resource","depends_on":[],"primary":{"id":"7388490630832483079","attributes":{"id":"7388490630832483079","triggers.%":"1","triggers.whaaat":"0,1"},"meta":{},"tainted":false},"deposed":[],"provider":""}},"depends_on":[]}]} diff --git a/pkg/states/statefile/testdata/roundtrip/v3-simple.out.tfstate b/pkg/states/statefile/testdata/roundtrip/v3-simple.out.tfstate new file mode 100644 index 00000000000..2bf1510a10a --- /dev/null +++ b/pkg/states/statefile/testdata/roundtrip/v3-simple.out.tfstate @@ -0,0 +1 @@ +{"version":4,"serial":0,"lineage":"f2968801-fa14-41ab-a044-224f3a4adf04","terraform_version":"0.7.13","outputs":{"numbers":{"type":"string","value":"0,1"}},"resources":[{"mode":"managed","type":"null_resource","name":"bar","provider":"provider.null","instances":[{"schema_version":0,"attributes_flat":{"id":"5388490630832483079","triggers.%":"1","triggers.whaaat":"0,1"},"depends_on":["null_resource.foo","null_resource.foobar","null_resource.foobar[1]"]}]},{"mode":"managed","type":"null_resource","name":"foo","provider":"provider.null","each":"list","instances":[{"index_key":0,"schema_version":0,"attributes_flat":{"id":"8212585058302700791","triggers.%":"1","triggers.what":"0"}},{"index_key":1,"schema_version":0,"attributes_flat":{"id":"1523897709610803586","triggers.%":"1","triggers.what":"1"}}]},{"mode":"managed","type":"null_resource","name":"foobar","provider":"provider.null","instances":[{"schema_version":0,"attributes_flat":{"id":"7388490630832483079","triggers.%":"1","triggers.whaaat":"0,1"}}]}]} diff --git a/pkg/states/statefile/testdata/roundtrip/v4-cbd.in.tfstate b/pkg/states/statefile/testdata/roundtrip/v4-cbd.in.tfstate new file mode 100644 index 00000000000..61db1f51f7a --- /dev/null +++ b/pkg/states/statefile/testdata/roundtrip/v4-cbd.in.tfstate @@ -0,0 +1 @@ +{"version":4,"serial":0,"lineage":"f2968801-fa14-41ab-a044-224f3a4adf04","terraform_version":"0.12.0","outputs":{"numbers":{"type":"string","value":"0,1"}},"resources":[{"module":"module.modA","mode":"managed","type":"null_resource","name":"resource","provider":"provider[\"registry.opentofu.org/-/null\"]","instances":[{"schema_version":0,"attributes":{"id":"4639265839606265182","triggers":{"input":"test"}},"create_before_destroy":true,"private":"bnVsbA=="}]}]} diff --git a/pkg/states/statefile/testdata/roundtrip/v4-cbd.out.tfstate b/pkg/states/statefile/testdata/roundtrip/v4-cbd.out.tfstate new file mode 120000 index 00000000000..3831d381af0 --- /dev/null +++ b/pkg/states/statefile/testdata/roundtrip/v4-cbd.out.tfstate @@ -0,0 +1 @@ +v4-cbd.in.tfstate \ No newline at end of file diff --git a/pkg/states/statefile/testdata/roundtrip/v4-foreach.in.tfstate b/pkg/states/statefile/testdata/roundtrip/v4-foreach.in.tfstate new file mode 100644 index 00000000000..47bb9c74d1a --- /dev/null +++ b/pkg/states/statefile/testdata/roundtrip/v4-foreach.in.tfstate @@ -0,0 +1 @@ +{"version":4,"serial":0,"lineage":"f2968801-fa14-41ab-a044-224f3a4adf04","terraform_version":"0.12.0","outputs":{"numbers":{"type":"string","value":"0,1"}},"resources":[{"module":"module.modA","mode":"managed","type":"null_resource","name":"resource","provider":"provider[\"registry.opentofu.org/-/null\"]","instances":[{"schema_version":0,"attributes":{"id":"4639265839606265182","triggers":{"input":"test"}},"private":"bnVsbA==","depends_on":["var.input"]}]}]} diff --git a/pkg/states/statefile/testdata/roundtrip/v4-foreach.out.tfstate b/pkg/states/statefile/testdata/roundtrip/v4-foreach.out.tfstate new file mode 120000 index 00000000000..d35986e2e51 --- /dev/null +++ b/pkg/states/statefile/testdata/roundtrip/v4-foreach.out.tfstate @@ -0,0 +1 @@ +v4-foreach.in.tfstate \ No newline at end of file diff --git a/pkg/states/statefile/testdata/roundtrip/v4-future.in.tfstate b/pkg/states/statefile/testdata/roundtrip/v4-future.in.tfstate new file mode 100644 index 00000000000..6c581b0be62 --- /dev/null +++ b/pkg/states/statefile/testdata/roundtrip/v4-future.in.tfstate @@ -0,0 +1 @@ +{"version":4,"serial":0,"lineage":"f2968801-fa14-41ab-a044-224f3a4adf04","terraform_version":"999.0.0","outputs":{"numbers":{"type":"string","value":"0,1"}},"resources":[{"mode":"managed","type":"null_resource","name":"bar","provider":"provider[\"registry.opentofu.org/-/null\"]","instances":[{"schema_version":0,"attributes_flat":{"id":"5388490630832483079","triggers.%":"1","triggers.whaaat":"0,1"},"depends_on":["null_resource.foo"]}]},{"mode":"managed","type":"null_resource","name":"foo","provider":"provider[\"registry.opentofu.org/-/null\"]","each":"list","instances":[{"index_key":0,"schema_version":0,"attributes_flat":{"id":"8212585058302700791","triggers.%":"1","triggers.what":"0"}},{"index_key":1,"schema_version":0,"attributes_flat":{"id":"1523897709610803586","triggers.%":"1","triggers.what":"0"}}]}]} diff --git a/pkg/states/statefile/testdata/roundtrip/v4-future.out.tfstate b/pkg/states/statefile/testdata/roundtrip/v4-future.out.tfstate new file mode 120000 index 00000000000..b4037372eeb --- /dev/null +++ b/pkg/states/statefile/testdata/roundtrip/v4-future.out.tfstate @@ -0,0 +1 @@ +v4-future.in.tfstate \ No newline at end of file diff --git a/pkg/states/statefile/testdata/roundtrip/v4-legacy-foreach.in.tfstate b/pkg/states/statefile/testdata/roundtrip/v4-legacy-foreach.in.tfstate new file mode 100644 index 00000000000..1b77f5e3b31 --- /dev/null +++ b/pkg/states/statefile/testdata/roundtrip/v4-legacy-foreach.in.tfstate @@ -0,0 +1 @@ +{"version":4,"serial":0,"lineage":"f2968801-fa14-41ab-a044-224f3a4adf04","terraform_version":"0.12.0","outputs":{"numbers":{"type":"string","value":"0,1"}},"resources":[{"module":"module.modA","mode":"managed","type":"null_resource","name":"resource","provider":"provider.null","instances":[{"schema_version":0,"attributes":{"id":"4639265839606265182","triggers":{"input":"test"}},"private":"bnVsbA==","depends_on":["var.input"]}]}]} diff --git a/pkg/states/statefile/testdata/roundtrip/v4-legacy-foreach.out.tfstate b/pkg/states/statefile/testdata/roundtrip/v4-legacy-foreach.out.tfstate new file mode 100644 index 00000000000..47bb9c74d1a --- /dev/null +++ b/pkg/states/statefile/testdata/roundtrip/v4-legacy-foreach.out.tfstate @@ -0,0 +1 @@ +{"version":4,"serial":0,"lineage":"f2968801-fa14-41ab-a044-224f3a4adf04","terraform_version":"0.12.0","outputs":{"numbers":{"type":"string","value":"0,1"}},"resources":[{"module":"module.modA","mode":"managed","type":"null_resource","name":"resource","provider":"provider[\"registry.opentofu.org/-/null\"]","instances":[{"schema_version":0,"attributes":{"id":"4639265839606265182","triggers":{"input":"test"}},"private":"bnVsbA==","depends_on":["var.input"]}]}]} diff --git a/pkg/states/statefile/testdata/roundtrip/v4-legacy-modules.in.tfstate b/pkg/states/statefile/testdata/roundtrip/v4-legacy-modules.in.tfstate new file mode 100644 index 00000000000..22dcc3e8d61 --- /dev/null +++ b/pkg/states/statefile/testdata/roundtrip/v4-legacy-modules.in.tfstate @@ -0,0 +1 @@ +{"version":4,"terraform_version":"0.12.0","serial":0,"lineage":"f2968801-fa14-41ab-a044-224f3a4adf04","outputs":{"numbers":{"value":"0,1","type":"string"}},"resources":[{"mode":"managed","type":"null_resource","name":"bar","provider":"provider.null","instances":[{"schema_version":0,"attributes_flat":{"id":"5388490630832483079","triggers.%":"1","triggers.whaaat":"0,1"},"depends_on":["null_resource.foo"]}]},{"module":"module.modB","mode":"managed","type":"null_resource","name":"bar","each":"map","provider":"provider. null","instances":[{"index_key":"a","schema_version":0,"attributes_flat":{"id":"8212585058302700791"},"dependencies":["module.modA.null_resource.resource"]},{"index_key":"b","schema_version":0,"attributes_flat":{"id":"1523897709610803586"},"dependencies":["module.modA.null_resource.resource"]}]},{"module":"module.modA","mode":"managed","type":"null_resource","name":"resource","provider":"provider.null","instances":[{"schema_version":0,"attributes":{"id":"4639265839606265182","triggers":{"input":"test"}},"private":"bnVsbA==","dependencies":["null_resource.bar"],"depends_on":["var.input"]}]}]} diff --git a/pkg/states/statefile/testdata/roundtrip/v4-legacy-modules.out.tfstate b/pkg/states/statefile/testdata/roundtrip/v4-legacy-modules.out.tfstate new file mode 100644 index 00000000000..2f8cdd1e1b0 --- /dev/null +++ b/pkg/states/statefile/testdata/roundtrip/v4-legacy-modules.out.tfstate @@ -0,0 +1 @@ +{"version":4,"terraform_version":"0.12.0","serial":0,"lineage":"f2968801-fa14-41ab-a044-224f3a4adf04","outputs":{"numbers":{"value":"0,1","type":"string"}},"resources":[{"mode":"managed","type":"null_resource","name":"bar","provider":"provider[\"registry.opentofu.org/-/null\"]","instances":[{"schema_version":0,"attributes_flat":{"id":"5388490630832483079","triggers.%":"1","triggers.whaaat":"0,1"},"depends_on":["null_resource.foo"]}]},{"module":"module.modB","mode":"managed","type":"null_resource","name":"bar","each":"map","provider":"provider[\"registry.opentofu.org/-/null\"]","instances":[{"index_key":"a","schema_version":0,"attributes_flat":{"id":"8212585058302700791"},"dependencies":["module.modA.null_resource.resource"]},{"index_key":"b","schema_version":0,"attributes_flat":{"id":"1523897709610803586"},"dependencies":["module.modA.null_resource.resource"]}]},{"module":"module.modA","mode":"managed","type":"null_resource","name":"resource","provider":"provider[\"registry.opentofu.org/-/null\"]","instances":[{"schema_version":0,"attributes":{"id":"4639265839606265182","triggers":{"input":"test"}},"private":"bnVsbA==","dependencies":["null_resource.bar"],"depends_on":["var.input"]}]}]} diff --git a/pkg/states/statefile/testdata/roundtrip/v4-legacy-simple.in.tfstate b/pkg/states/statefile/testdata/roundtrip/v4-legacy-simple.in.tfstate new file mode 100644 index 00000000000..7847d2f610e --- /dev/null +++ b/pkg/states/statefile/testdata/roundtrip/v4-legacy-simple.in.tfstate @@ -0,0 +1 @@ +{"version":4,"serial":0,"lineage":"f2968801-fa14-41ab-a044-224f3a4adf04","terraform_version":"0.12.0","outputs":{"numbers":{"type":"string","value":"0,1"}},"resources":[{"mode":"data","type":"terraform_remote_state","name":"random","provider":"provider.terraform","instances":[{"schema_version":0,"attributes_flat":{"backend":"remote"}}]},{"mode":"managed","type":"null_resource","name":"bar","provider":"provider.null","instances":[{"schema_version":0,"attributes_flat":{"id":"5388490630832483079","triggers.%":"1","triggers.whaaat":"0,1"},"depends_on":["null_resource.foo"]}]},{"mode":"managed","type":"null_resource","name":"foo","provider":"provider.null","each":"list","instances":[{"index_key":0,"schema_version":0,"attributes_flat":{"id":"8212585058302700791","triggers.%":"1","triggers.what":"0"}},{"index_key":1,"schema_version":0,"attributes_flat":{"id":"1523897709610803586","triggers.%":"1","triggers.what":"0"}}]}]} diff --git a/pkg/states/statefile/testdata/roundtrip/v4-legacy-simple.out.tfstate b/pkg/states/statefile/testdata/roundtrip/v4-legacy-simple.out.tfstate new file mode 100644 index 00000000000..41649871502 --- /dev/null +++ b/pkg/states/statefile/testdata/roundtrip/v4-legacy-simple.out.tfstate @@ -0,0 +1 @@ +{"version":4,"serial":0,"lineage":"f2968801-fa14-41ab-a044-224f3a4adf04","terraform_version":"0.12.0","outputs":{"numbers":{"type":"string","value":"0,1"}},"resources":[{"mode":"data","type":"terraform_remote_state","name":"random","provider":"provider[\"terraform.io/builtin/terraform\"]","instances":[{"schema_version":0,"attributes_flat":{"backend":"remote"}}]},{"mode":"managed","type":"null_resource","name":"bar","provider":"provider[\"registry.opentofu.org/-/null\"]","instances":[{"schema_version":0,"attributes_flat":{"id":"5388490630832483079","triggers.%":"1","triggers.whaaat":"0,1"},"depends_on":["null_resource.foo"]}]},{"mode":"managed","type":"null_resource","name":"foo","provider":"provider.null","each":"list","instances":[{"index_key":0,"schema_version":0,"attributes_flat":{"id":"8212585058302700791","triggers.%":"1","triggers.what":"0"}},{"index_key":1,"schema_version":0,"attributes_flat":{"id":"1523897709610803586","triggers.%":"1","triggers.what":"0"}}]}]} diff --git a/pkg/states/statefile/testdata/roundtrip/v4-modules.in.tfstate b/pkg/states/statefile/testdata/roundtrip/v4-modules.in.tfstate new file mode 100644 index 00000000000..2f8cdd1e1b0 --- /dev/null +++ b/pkg/states/statefile/testdata/roundtrip/v4-modules.in.tfstate @@ -0,0 +1 @@ +{"version":4,"terraform_version":"0.12.0","serial":0,"lineage":"f2968801-fa14-41ab-a044-224f3a4adf04","outputs":{"numbers":{"value":"0,1","type":"string"}},"resources":[{"mode":"managed","type":"null_resource","name":"bar","provider":"provider[\"registry.opentofu.org/-/null\"]","instances":[{"schema_version":0,"attributes_flat":{"id":"5388490630832483079","triggers.%":"1","triggers.whaaat":"0,1"},"depends_on":["null_resource.foo"]}]},{"module":"module.modB","mode":"managed","type":"null_resource","name":"bar","each":"map","provider":"provider[\"registry.opentofu.org/-/null\"]","instances":[{"index_key":"a","schema_version":0,"attributes_flat":{"id":"8212585058302700791"},"dependencies":["module.modA.null_resource.resource"]},{"index_key":"b","schema_version":0,"attributes_flat":{"id":"1523897709610803586"},"dependencies":["module.modA.null_resource.resource"]}]},{"module":"module.modA","mode":"managed","type":"null_resource","name":"resource","provider":"provider[\"registry.opentofu.org/-/null\"]","instances":[{"schema_version":0,"attributes":{"id":"4639265839606265182","triggers":{"input":"test"}},"private":"bnVsbA==","dependencies":["null_resource.bar"],"depends_on":["var.input"]}]}]} diff --git a/pkg/states/statefile/testdata/roundtrip/v4-modules.out.tfstate b/pkg/states/statefile/testdata/roundtrip/v4-modules.out.tfstate new file mode 120000 index 00000000000..009f759edc7 --- /dev/null +++ b/pkg/states/statefile/testdata/roundtrip/v4-modules.out.tfstate @@ -0,0 +1 @@ +v4-modules.in.tfstate \ No newline at end of file diff --git a/pkg/states/statefile/testdata/roundtrip/v4-simple.in.tfstate b/pkg/states/statefile/testdata/roundtrip/v4-simple.in.tfstate new file mode 100644 index 00000000000..c06f44a5542 --- /dev/null +++ b/pkg/states/statefile/testdata/roundtrip/v4-simple.in.tfstate @@ -0,0 +1 @@ +{"version":4,"serial":0,"lineage":"f2968801-fa14-41ab-a044-224f3a4adf04","terraform_version":"0.12.0","outputs":{"numbers":{"type":"string","value":"0,1"}},"resources":[{"mode":"managed","type":"null_resource","name":"bar","provider":"provider[\"registry.opentofu.org/-/null\"]","instances":[{"schema_version":0,"attributes_flat":{"id":"5388490630832483079","triggers.%":"1","triggers.whaaat":"0,1"},"depends_on":["null_resource.foo"]}]},{"mode":"managed","type":"null_resource","name":"foo","provider":"provider[\"registry.opentofu.org/-/null\"]","each":"list","instances":[{"index_key":0,"schema_version":0,"attributes_flat":{"id":"8212585058302700791","triggers.%":"1","triggers.what":"0"}},{"index_key":1,"schema_version":0,"attributes_flat":{"id":"1523897709610803586","triggers.%":"1","triggers.what":"0"}}]}]} diff --git a/pkg/states/statefile/testdata/roundtrip/v4-simple.out.tfstate b/pkg/states/statefile/testdata/roundtrip/v4-simple.out.tfstate new file mode 120000 index 00000000000..d0e79c30a10 --- /dev/null +++ b/pkg/states/statefile/testdata/roundtrip/v4-simple.out.tfstate @@ -0,0 +1 @@ +v4-simple.in.tfstate \ No newline at end of file diff --git a/pkg/states/statefile/version0.go b/pkg/states/statefile/version0.go new file mode 100644 index 00000000000..3c307631aab --- /dev/null +++ b/pkg/states/statefile/version0.go @@ -0,0 +1,28 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package statefile + +// looksLikeVersion0 sniffs for the signature indicating a version 0 state +// file. +// +// Version 0 was the number retroactively assigned to OpenTofu's initial +// (unversioned) binary state file format, which was later superseded by the +// version 1 format in JSON. +// +// Version 0 is no longer supported, so this is used only to detect it and +// return a nice error to the user. +func looksLikeVersion0(src []byte) bool { + // Version 0 files begin with the magic prefix "tfstate". + const magic = "tfstate" + if len(src) < len(magic) { + // Not even long enough to have the magic prefix + return false + } + if string(src[0:len(magic)]) == magic { + return true + } + return false +} diff --git a/pkg/states/statefile/version1.go b/pkg/states/statefile/version1.go new file mode 100644 index 00000000000..d6d4dba8e83 --- /dev/null +++ b/pkg/states/statefile/version1.go @@ -0,0 +1,172 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package statefile + +import ( + "encoding/json" + "fmt" + + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +func readStateV1(src []byte) (*File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + sV1 := &stateV1{} + err := json.Unmarshal(src, sV1) + if err != nil { + diags = diags.Append(jsonUnmarshalDiags(err)) + return nil, diags + } + + file, prepDiags := prepareStateV1(sV1) + diags = diags.Append(prepDiags) + return file, diags +} + +func prepareStateV1(sV1 *stateV1) (*File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + sV2, err := upgradeStateV1ToV2(sV1) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + upgradeFailed, + fmt.Sprintf("Error upgrading state file format from version 1 to version 2: %s.", err), + )) + return nil, diags + } + + file, prepDiags := prepareStateV2(sV2) + diags = diags.Append(prepDiags) + return file, diags +} + +// stateV1 is a representation of the legacy JSON state format version 1. +// +// It is only used to read version 1 JSON files prior to upgrading them to +// the current format. +type stateV1 struct { + // Version is the protocol version. "1" for a StateV1. + Version int `json:"version"` + + // Serial is incremented on any operation that modifies + // the State file. It is used to detect potentially conflicting + // updates. + Serial int64 `json:"serial"` + + // Remote is used to track the metadata required to + // pull and push state files from a remote storage endpoint. + Remote *remoteStateV1 `json:"remote,omitempty"` + + // Modules contains all the modules in a breadth-first order + Modules []*moduleStateV1 `json:"modules"` +} + +type remoteStateV1 struct { + // Type controls the client we use for the remote state + Type string `json:"type"` + + // Config is used to store arbitrary configuration that + // is type specific + Config map[string]string `json:"config"` +} + +type moduleStateV1 struct { + // Path is the import path from the root module. Modules imports are + // always disjoint, so the path represents amodule tree + Path []string `json:"path"` + + // Outputs declared by the module and maintained for each module + // even though only the root module technically needs to be kept. + // This allows operators to inspect values at the boundaries. + Outputs map[string]string `json:"outputs"` + + // Resources is a mapping of the logically named resource to + // the state of the resource. Each resource may actually have + // N instances underneath, although a user only needs to think + // about the 1:1 case. + Resources map[string]*resourceStateV1 `json:"resources"` + + // Dependencies are a list of things that this module relies on + // existing to remain intact. For example: an module may depend + // on a VPC ID given by an aws_vpc resource. + // + // OpenTofu uses this information to build valid destruction + // orders and to warn the user if they're destroying a module that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // OpenTofu. If OpenTofu doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on,omitempty"` +} + +type resourceStateV1 struct { + // This is filled in and managed by OpenTofu, and is the resource + // type itself such as "mycloud_instance". If a resource provider sets + // this value, it won't be persisted. + Type string `json:"type"` + + // Dependencies are a list of things that this resource relies on + // existing to remain intact. For example: an AWS instance might + // depend on a subnet (which itself might depend on a VPC, and so + // on). + // + // OpenTofu uses this information to build valid destruction + // orders and to warn the user if they're destroying a resource that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // OpenTofu. If OpenTofu doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on,omitempty"` + + // Primary is the current active instance for this resource. + // It can be replaced but only after a successful creation. + // This is the instances on which providers will act. + Primary *instanceStateV1 `json:"primary"` + + // Tainted is used to track any underlying instances that + // have been created but are in a bad or unknown state and + // need to be cleaned up subsequently. In the + // standard case, there is only at most a single instance. + // However, in pathological cases, it is possible for the number + // of instances to accumulate. + Tainted []*instanceStateV1 `json:"tainted,omitempty"` + + // Deposed is used in the mechanics of CreateBeforeDestroy: the existing + // Primary is Deposed to get it out of the way for the replacement Primary to + // be created by Apply. If the replacement Primary creates successfully, the + // Deposed instance is cleaned up. If there were problems creating the + // replacement, the instance remains in the Deposed list so it can be + // destroyed in a future run. Functionally, Deposed instances are very + // similar to Tainted instances in that OpenTofu is only tracking them in + // order to remember to destroy them. + Deposed []*instanceStateV1 `json:"deposed,omitempty"` + + // Provider is used when a resource is connected to a provider with an alias. + // If this string is empty, the resource is connected to the default provider, + // e.g. "aws_instance" goes with the "aws" provider. + // If the resource block contained a "provider" key, that value will be set here. + Provider string `json:"provider,omitempty"` +} + +type instanceStateV1 struct { + // A unique ID for this resource. This is opaque to OpenTofu + // and is only meant as a lookup mechanism for the providers. + ID string `json:"id"` + + // Attributes are basic information about the resource. Any keys here + // are accessible in variable format within OpenTofu configurations: + // ${resourcetype.name.attribute}. + Attributes map[string]string `json:"attributes,omitempty"` + + // Meta is a simple K/V map that is persisted to the State but otherwise + // ignored by OpenTofu core. It's meant to be used for accounting by + // external client code. + Meta map[string]string `json:"meta,omitempty"` +} diff --git a/pkg/states/statefile/version1_upgrade.go b/pkg/states/statefile/version1_upgrade.go new file mode 100644 index 00000000000..7e15ab6e0a9 --- /dev/null +++ b/pkg/states/statefile/version1_upgrade.go @@ -0,0 +1,177 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package statefile + +import ( + "fmt" + "log" + + "github.com/mitchellh/copystructure" +) + +// upgradeStateV1ToV2 is used to upgrade a V1 state representation +// into a V2 state representation +func upgradeStateV1ToV2(old *stateV1) (*stateV2, error) { + log.Printf("[TRACE] statefile.Read: upgrading format from v1 to v2") + if old == nil { + return nil, nil + } + + remote, err := old.Remote.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading State V1: %w", err) + } + + modules := make([]*moduleStateV2, len(old.Modules)) + for i, module := range old.Modules { + upgraded, err := module.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading State V1: %w", err) + } + modules[i] = upgraded + } + if len(modules) == 0 { + modules = nil + } + + newState := &stateV2{ + Version: 2, + Serial: old.Serial, + Remote: remote, + Modules: modules, + } + + return newState, nil +} + +func (old *remoteStateV1) upgradeToV2() (*remoteStateV2, error) { + if old == nil { + return nil, nil + } + + config, err := copystructure.Copy(old.Config) + if err != nil { + return nil, fmt.Errorf("Error upgrading RemoteState V1: %w", err) + } + + return &remoteStateV2{ + Type: old.Type, + Config: config.(map[string]string), + }, nil +} + +func (old *moduleStateV1) upgradeToV2() (*moduleStateV2, error) { + if old == nil { + return nil, nil + } + + pathRaw, err := copystructure.Copy(old.Path) + if err != nil { + return nil, fmt.Errorf("Error upgrading ModuleState V1: %w", err) + } + path, ok := pathRaw.([]string) + if !ok { + return nil, fmt.Errorf("Error upgrading ModuleState V1: path is not a list of strings") + } + if len(path) == 0 { + // We found some V1 states with a nil path. Assume root. + path = []string{"root"} + } + + // Outputs needs upgrading to use the new structure + outputs := make(map[string]*outputStateV2) + for key, output := range old.Outputs { + outputs[key] = &outputStateV2{ + Type: "string", + Value: output, + Sensitive: false, + } + } + + resources := make(map[string]*resourceStateV2) + for key, oldResource := range old.Resources { + upgraded, err := oldResource.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading ModuleState V1: %w", err) + } + resources[key] = upgraded + } + + dependencies, err := copystructure.Copy(old.Dependencies) + if err != nil { + return nil, fmt.Errorf("Error upgrading ModuleState V1: %w", err) + } + + return &moduleStateV2{ + Path: path, + Outputs: outputs, + Resources: resources, + Dependencies: dependencies.([]string), + }, nil +} + +func (old *resourceStateV1) upgradeToV2() (*resourceStateV2, error) { + if old == nil { + return nil, nil + } + + dependencies, err := copystructure.Copy(old.Dependencies) + if err != nil { + return nil, fmt.Errorf("Error upgrading ResourceState V1: %w", err) + } + + primary, err := old.Primary.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading ResourceState V1: %w", err) + } + + deposed := make([]*instanceStateV2, len(old.Deposed)) + for i, v := range old.Deposed { + upgraded, err := v.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading ResourceState V1: %w", err) + } + deposed[i] = upgraded + } + if len(deposed) == 0 { + deposed = nil + } + + return &resourceStateV2{ + Type: old.Type, + Dependencies: dependencies.([]string), + Primary: primary, + Deposed: deposed, + Provider: old.Provider, + }, nil +} + +func (old *instanceStateV1) upgradeToV2() (*instanceStateV2, error) { + if old == nil { + return nil, nil + } + + attributes, err := copystructure.Copy(old.Attributes) + if err != nil { + return nil, fmt.Errorf("Error upgrading InstanceState V1: %w", err) + } + + meta, err := copystructure.Copy(old.Meta) + if err != nil { + return nil, fmt.Errorf("Error upgrading InstanceState V1: %w", err) + } + + newMeta := make(map[string]interface{}) + for k, v := range meta.(map[string]string) { + newMeta[k] = v + } + + return &instanceStateV2{ + ID: old.ID, + Attributes: attributes.(map[string]string), + Meta: newMeta, + }, nil +} diff --git a/pkg/states/statefile/version2.go b/pkg/states/statefile/version2.go new file mode 100644 index 00000000000..2b52d16f063 --- /dev/null +++ b/pkg/states/statefile/version2.go @@ -0,0 +1,209 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package statefile + +import ( + "encoding/json" + "fmt" + + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +func readStateV2(src []byte) (*File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + sV2 := &stateV2{} + err := json.Unmarshal(src, sV2) + if err != nil { + diags = diags.Append(jsonUnmarshalDiags(err)) + return nil, diags + } + + file, prepDiags := prepareStateV2(sV2) + diags = diags.Append(prepDiags) + return file, diags +} + +func prepareStateV2(sV2 *stateV2) (*File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + sV3, err := upgradeStateV2ToV3(sV2) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + upgradeFailed, + fmt.Sprintf("Error upgrading state file format from version 2 to version 3: %s.", err), + )) + return nil, diags + } + + file, prepDiags := prepareStateV3(sV3) + diags = diags.Append(prepDiags) + return file, diags +} + +// stateV2 is a representation of the legacy JSON state format version 2. +// +// It is only used to read version 2 JSON files prior to upgrading them to +// the current format. +type stateV2 struct { + // Version is the state file protocol version. + Version int `json:"version"` + + // TFVersion is the version of OpenTofu that wrote this state. + TFVersion string `json:"terraform_version,omitempty"` + + // Serial is incremented on any operation that modifies + // the State file. It is used to detect potentially conflicting + // updates. + Serial int64 `json:"serial"` + + // Lineage is set when a new, blank state is created and then + // never updated. This allows us to determine whether the serials + // of two states can be meaningfully compared. + // Apart from the guarantee that collisions between two lineages + // are very unlikely, this value is opaque and external callers + // should only compare lineage strings byte-for-byte for equality. + Lineage string `json:"lineage"` + + // Remote is used to track the metadata required to + // pull and push state files from a remote storage endpoint. + Remote *remoteStateV2 `json:"remote,omitempty"` + + // Backend tracks the configuration for the backend in use with + // this state. This is used to track any changes in the backend + // configuration. + Backend *backendStateV2 `json:"backend,omitempty"` + + // Modules contains all the modules in a breadth-first order + Modules []*moduleStateV2 `json:"modules"` +} + +type remoteStateV2 struct { + // Type controls the client we use for the remote state + Type string `json:"type"` + + // Config is used to store arbitrary configuration that + // is type specific + Config map[string]string `json:"config"` +} + +type outputStateV2 struct { + // Sensitive describes whether the output is considered sensitive, + // which may lead to masking the value on screen in some cases. + Sensitive bool `json:"sensitive"` + // Type describes the structure of Value. Valid values are "string", + // "map" and "list" + Type string `json:"type"` + // Value contains the value of the output, in the structure described + // by the Type field. + Value interface{} `json:"value"` +} + +type moduleStateV2 struct { + // Path is the import path from the root module. Modules imports are + // always disjoint, so the path represents amodule tree + Path []string `json:"path"` + + // Locals are kept only transiently in-memory, because we can always + // re-compute them. + Locals map[string]interface{} `json:"-"` + + // Outputs declared by the module and maintained for each module + // even though only the root module technically needs to be kept. + // This allows operators to inspect values at the boundaries. + Outputs map[string]*outputStateV2 `json:"outputs"` + + // Resources is a mapping of the logically named resource to + // the state of the resource. Each resource may actually have + // N instances underneath, although a user only needs to think + // about the 1:1 case. + Resources map[string]*resourceStateV2 `json:"resources"` + + // Dependencies are a list of things that this module relies on + // existing to remain intact. For example: an module may depend + // on a VPC ID given by an aws_vpc resource. + // + // OpenTofu uses this information to build valid destruction + // orders and to warn the user if they're destroying a module that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // OpenTofu. If OpenTofu doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on"` +} + +type resourceStateV2 struct { + // This is filled in and managed by OpenTofu, and is the resource + // type itself such as "mycloud_instance". If a resource provider sets + // this value, it won't be persisted. + Type string `json:"type"` + + // Dependencies are a list of things that this resource relies on + // existing to remain intact. For example: an AWS instance might + // depend on a subnet (which itself might depend on a VPC, and so + // on). + // + // OpenTofu uses this information to build valid destruction + // orders and to warn the user if they're destroying a resource that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // OpenTofu. If OpenTofu doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on"` + + // Primary is the current active instance for this resource. + // It can be replaced but only after a successful creation. + // This is the instances on which providers will act. + Primary *instanceStateV2 `json:"primary"` + + // Deposed is used in the mechanics of CreateBeforeDestroy: the existing + // Primary is Deposed to get it out of the way for the replacement Primary to + // be created by Apply. If the replacement Primary creates successfully, the + // Deposed instance is cleaned up. + // + // If there were problems creating the replacement Primary, the Deposed + // instance and the (now tainted) replacement Primary will be swapped so the + // tainted replacement will be cleaned up instead. + // + // An instance will remain in the Deposed list until it is successfully + // destroyed and purged. + Deposed []*instanceStateV2 `json:"deposed"` + + // Provider is used when a resource is connected to a provider with an alias. + // If this string is empty, the resource is connected to the default provider, + // e.g. "aws_instance" goes with the "aws" provider. + // If the resource block contained a "provider" key, that value will be set here. + Provider string `json:"provider"` +} + +type instanceStateV2 struct { + // A unique ID for this resource. This is opaque to OpenTofu + // and is only meant as a lookup mechanism for the providers. + ID string `json:"id"` + + // Attributes are basic information about the resource. Any keys here + // are accessible in variable format within OpenTofu configurations: + // ${resourcetype.name.attribute}. + Attributes map[string]string `json:"attributes"` + + // Meta is a simple K/V map that is persisted to the State but otherwise + // ignored by OpenTofu core. It's meant to be used for accounting by + // external client code. The value here must only contain Go primitives + // and collections. + Meta map[string]interface{} `json:"meta"` + + // Tainted is used to mark a resource for recreation. + Tainted bool `json:"tainted"` +} + +type backendStateV2 struct { + Type string `json:"type"` // Backend type + ConfigRaw json.RawMessage `json:"config"` // Backend raw config + Hash uint64 `json:"hash"` // Hash of portion of configuration from config files +} diff --git a/pkg/states/statefile/version2_upgrade.go b/pkg/states/statefile/version2_upgrade.go new file mode 100644 index 00000000000..9816738ee9c --- /dev/null +++ b/pkg/states/statefile/version2_upgrade.go @@ -0,0 +1,150 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package statefile + +import ( + "fmt" + "log" + "regexp" + "sort" + "strconv" + "strings" + + "github.com/mitchellh/copystructure" +) + +func upgradeStateV2ToV3(old *stateV2) (*stateV3, error) { + if old == nil { + return (*stateV3)(nil), nil + } + + var new *stateV3 + { + copy, err := copystructure.Config{Lock: true}.Copy(old) + if err != nil { + panic(err) + } + newWrongType := copy.(*stateV2) + newRightType := (stateV3)(*newWrongType) + new = &newRightType + } + + // Set the new version number + new.Version = 3 + + // Change the counts for things which look like maps to use the % + // syntax. Remove counts for empty collections - they will be added + // back in later. + for _, module := range new.Modules { + for _, resource := range module.Resources { + // Upgrade Primary + if resource.Primary != nil { + upgradeAttributesV2ToV3(resource.Primary) + } + + // Upgrade Deposed + for _, deposed := range resource.Deposed { + upgradeAttributesV2ToV3(deposed) + } + } + } + + return new, nil +} + +func upgradeAttributesV2ToV3(instanceState *instanceStateV2) error { + collectionKeyRegexp := regexp.MustCompile(`^(.*\.)#$`) + collectionSubkeyRegexp := regexp.MustCompile(`^([^\.]+)\..*`) + + // Identify the key prefix of anything which is a collection + var collectionKeyPrefixes []string + for key := range instanceState.Attributes { + if submatches := collectionKeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 { + collectionKeyPrefixes = append(collectionKeyPrefixes, submatches[0][1]) + } + } + sort.Strings(collectionKeyPrefixes) + + log.Printf("[STATE UPGRADE] Detected the following collections in state: %v", collectionKeyPrefixes) + + // This could be rolled into fewer loops, but it is somewhat clearer this way, and will not + // run very often. + for _, prefix := range collectionKeyPrefixes { + // First get the actual keys that belong to this prefix + var potentialKeysMatching []string + for key := range instanceState.Attributes { + if strings.HasPrefix(key, prefix) { + potentialKeysMatching = append(potentialKeysMatching, strings.TrimPrefix(key, prefix)) + } + } + sort.Strings(potentialKeysMatching) + + var actualKeysMatching []string + for _, key := range potentialKeysMatching { + if submatches := collectionSubkeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 { + actualKeysMatching = append(actualKeysMatching, submatches[0][1]) + } else { + if key != "#" { + actualKeysMatching = append(actualKeysMatching, key) + } + } + } + actualKeysMatching = uniqueSortedStrings(actualKeysMatching) + + // Now inspect the keys in order to determine whether this is most likely to be + // a map, list or set. There is room for error here, so we log in each case. If + // there is no method of telling, we remove the key from the InstanceState in + // order that it will be recreated. Again, this could be rolled into fewer loops + // but we prefer clarity. + + oldCountKey := fmt.Sprintf("%s#", prefix) + + // First, detect "obvious" maps - which have non-numeric keys (mostly). + hasNonNumericKeys := false + for _, key := range actualKeysMatching { + if _, err := strconv.Atoi(key); err != nil { + hasNonNumericKeys = true + } + } + if hasNonNumericKeys { + newCountKey := fmt.Sprintf("%s%%", prefix) + + instanceState.Attributes[newCountKey] = instanceState.Attributes[oldCountKey] + delete(instanceState.Attributes, oldCountKey) + log.Printf("[STATE UPGRADE] Detected %s as a map. Replaced count = %s", + strings.TrimSuffix(prefix, "."), instanceState.Attributes[newCountKey]) + } + + // Now detect empty collections and remove them from state. + if len(actualKeysMatching) == 0 { + delete(instanceState.Attributes, oldCountKey) + log.Printf("[STATE UPGRADE] Detected %s as an empty collection. Removed from state.", + strings.TrimSuffix(prefix, ".")) + } + } + + return nil +} + +// uniqueSortedStrings removes duplicates from a slice of strings and returns +// a sorted slice of the unique strings. +func uniqueSortedStrings(input []string) []string { + uniquemap := make(map[string]struct{}) + for _, str := range input { + uniquemap[str] = struct{}{} + } + + output := make([]string, len(uniquemap)) + + i := 0 + for key := range uniquemap { + output[i] = key + i = i + 1 + } + + sort.Strings(output) + return output +} diff --git a/pkg/states/statefile/version3.go b/pkg/states/statefile/version3.go new file mode 100644 index 00000000000..82e5eaa2c0f --- /dev/null +++ b/pkg/states/statefile/version3.go @@ -0,0 +1,55 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package statefile + +import ( + "encoding/json" + "fmt" + + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +func readStateV3(src []byte) (*File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + sV3 := &stateV3{} + err := json.Unmarshal(src, sV3) + if err != nil { + diags = diags.Append(jsonUnmarshalDiags(err)) + return nil, diags + } + + file, prepDiags := prepareStateV3(sV3) + diags = diags.Append(prepDiags) + return file, diags +} + +func prepareStateV3(sV3 *stateV3) (*File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + sV4, err := upgradeStateV3ToV4(sV3) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + upgradeFailed, + fmt.Sprintf("Error upgrading state file format from version 3 to version 4: %s.", err), + )) + return nil, diags + } + + file, prepDiags := prepareStateV4(sV4) + diags = diags.Append(prepDiags) + return file, diags +} + +// stateV2 is a representation of the legacy JSON state format version 3. +// +// It is only used to read version 3 JSON files prior to upgrading them to +// the current format. +// +// The differences between version 2 and version 3 are only in the data and +// not in the structure, so stateV3 actually shares the same structs as +// stateV2. Type stateV3 represents that the data within is formatted as +// expected by the V3 format, rather than the V2 format. +type stateV3 stateV2 diff --git a/pkg/states/statefile/version3_upgrade.go b/pkg/states/statefile/version3_upgrade.go new file mode 100644 index 00000000000..32113d6a251 --- /dev/null +++ b/pkg/states/statefile/version3_upgrade.go @@ -0,0 +1,468 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package statefile + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" + + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +func upgradeStateV3ToV4(old *stateV3) (*stateV4, error) { + + if old.Serial < 0 { + // The new format is using uint64 here, which should be fine for any + // real state (we only used positive integers in practice) but we'll + // catch this explicitly here to avoid weird behavior if a state file + // has been tampered with in some way. + return nil, fmt.Errorf("state has serial less than zero, which is invalid") + } + + new := &stateV4{ + TerraformVersion: old.TFVersion, + Serial: uint64(old.Serial), + Lineage: old.Lineage, + RootOutputs: map[string]outputStateV4{}, + Resources: []resourceStateV4{}, + } + + if new.TerraformVersion == "" { + // Older formats considered this to be optional, but now it's required + // and so we'll stub it out with something that's definitely older + // than the version that really created this state. + new.TerraformVersion = "0.0.0" + } + + for _, msOld := range old.Modules { + if len(msOld.Path) < 1 || msOld.Path[0] != "root" { + return nil, fmt.Errorf("state contains invalid module path %#v", msOld.Path) + } + + // Convert legacy-style module address into our newer address type. + // Since these old formats are only generated by versions of OpenTofu + // that don't support count and for_each on modules, we can just assume + // all of the modules are unkeyed. + moduleAddr := make(addrs.ModuleInstance, len(msOld.Path)-1) + for i, name := range msOld.Path[1:] { + if !hclsyntax.ValidIdentifier(name) { + // If we don't fail here then we'll produce an invalid state + // version 4 which subsequent operations will reject, so we'll + // fail early here for safety to make sure we can never + // inadvertently commit an invalid snapshot to a backend. + // + // This is a user-facing usage of Terraform but refers to a very + // old historical version of Terraform which has no corresponding + // OpenTofu version yet. + // If we ever get OpenTofu 0.11.x and 0.12.x, we should update this + // message to mention OpenTofu instead. + return nil, fmt.Errorf("state contains invalid module path %#v: %q is not a valid identifier; rename it in Terraform 0.11 before upgrading to Terraform 0.12", msOld.Path, name) + } + moduleAddr[i] = addrs.ModuleInstanceStep{ + Name: name, + InstanceKey: addrs.NoKey, + } + } + + // In a v3 state file, a "resource state" is actually an instance + // state, so we need to fill in a missing level of hierarchy here + // by lazily creating resource states as we encounter them. + // We'll track them in here, keyed on the string representation of + // the resource address. + resourceStates := map[string]*resourceStateV4{} + + for legacyAddr, rsOld := range msOld.Resources { + instAddr, err := parseLegacyResourceAddress(legacyAddr) + if err != nil { + return nil, err + } + + resAddr := instAddr.Resource + rs, exists := resourceStates[resAddr.String()] + if !exists { + var modeStr string + switch resAddr.Mode { + case addrs.ManagedResourceMode: + modeStr = "managed" + case addrs.DataResourceMode: + modeStr = "data" + default: + return nil, fmt.Errorf("state contains resource %s with an unsupported resource mode %#v", resAddr, resAddr.Mode) + } + + // In state versions prior to 4 we allowed each instance of a + // resource to have its own provider configuration address, + // which makes no real sense in practice because providers + // are associated with resources in the configuration. We + // elevate that to the resource level during this upgrade, + // implicitly taking the provider address of the first instance + // we encounter for each resource. While this is lossy in + // theory, in practice there is no reason for these values to + // differ between instances. + var providerAddr addrs.AbsProviderConfig + oldProviderAddr := rsOld.Provider + if strings.Contains(oldProviderAddr, "provider.") { + // Smells like a new-style provider address, but we'll test it. + var diags tfdiags.Diagnostics + providerAddr, diags = addrs.ParseLegacyAbsProviderConfigStr(oldProviderAddr) + if diags.HasErrors() { + if strings.Contains(oldProviderAddr, "${") { + // There seems to be a common misconception that + // interpolation was valid in provider aliases + // in 0.11, so we'll use a specialized error + // message for that case. + // + // This is a user-facing usage of Terraform but refers + // to a very old historical version of Terraform + // which has no corresponding OpenTofu version. + // If we ever get OpenTofu 0.11.x and 0.12.x, we should + // update this message to mention OpenTofu instead. + return nil, fmt.Errorf("invalid provider config reference %q for %s: this alias seems to contain a template interpolation sequence, which was not supported but also not error-checked in Terraform 0.11. To proceed, rename the associated provider alias to a valid identifier and apply the change with Terraform 0.11 before upgrading to Terraform 0.12", oldProviderAddr, instAddr) + } + return nil, fmt.Errorf("invalid provider config reference %q for %s: %w", oldProviderAddr, instAddr, diags.Err()) + } + } else { + // Smells like an old-style module-local provider address, + // which we'll need to migrate. We'll assume it's referring + // to the same module the resource is in, which might be + // incorrect but it'll get fixed up next time any updates + // are made to an instance. + if oldProviderAddr != "" { + localAddr, diags := configs.ParseProviderConfigCompactStr(oldProviderAddr) + if diags.HasErrors() { + if strings.Contains(oldProviderAddr, "${") { + // There seems to be a common misconception that + // interpolation was valid in provider aliases + // in 0.11, so we'll use a specialized error + // message for that case. + // + // This is a user-facing usage of Terraform but refers + // to a very old historical version of Terraform + // which has no corresponding OpenTofu version. + // If we ever get OpenTofu 0.11.x and 0.12.x, we should + // update this message to mention OpenTofu instead. + return nil, fmt.Errorf("invalid legacy provider config reference %q for %s: this alias seems to contain a template interpolation sequence, which was not supported but also not error-checked in Terraform 0.11. To proceed, rename the associated provider alias to a valid identifier and apply the change with Terraform 0.11 before upgrading to Terraform 0.12", oldProviderAddr, instAddr) + } + return nil, fmt.Errorf("invalid legacy provider config reference %q for %s: %w", oldProviderAddr, instAddr, diags.Err()) + } + providerAddr = addrs.AbsProviderConfig{ + Module: moduleAddr.Module(), + // We use NewLegacyProvider here so we can use + // LegacyString() below to get the appropriate + // legacy-style provider string. + Provider: addrs.NewLegacyProvider(localAddr.LocalName), + Alias: localAddr.Alias, + } + } else { + providerAddr = addrs.AbsProviderConfig{ + Module: moduleAddr.Module(), + // We use NewLegacyProvider here so we can use + // LegacyString() below to get the appropriate + // legacy-style provider string. + Provider: addrs.NewLegacyProvider(resAddr.ImpliedProvider()), + } + } + } + + rs = &resourceStateV4{ + Module: moduleAddr.String(), + Mode: modeStr, + Type: resAddr.Type, + Name: resAddr.Name, + Instances: []instanceObjectStateV4{}, + ProviderConfig: providerAddr.LegacyString(), + } + resourceStates[resAddr.String()] = rs + } + + // Now we'll deal with the instance itself, which may either be + // the first instance in a resource we just created or an additional + // instance for a resource added on a prior loop. + instKey := instAddr.Key + if isOld := rsOld.Primary; isOld != nil { + isNew, err := upgradeInstanceObjectV3ToV4(rsOld, isOld, instKey, states.NotDeposed) + if err != nil { + return nil, fmt.Errorf("failed to migrate primary generation of %s: %w", instAddr, err) + } + rs.Instances = append(rs.Instances, *isNew) + } + for i, isOld := range rsOld.Deposed { + // When we migrate old instances we'll use sequential deposed + // keys just so that the upgrade result is deterministic. New + // deposed keys allocated moving forward will be pseudorandomly + // selected, but we check for collisions and so these + // non-random ones won't hurt. + deposedKey := states.DeposedKey(fmt.Sprintf("%08x", i+1)) + isNew, err := upgradeInstanceObjectV3ToV4(rsOld, isOld, instKey, deposedKey) + if err != nil { + return nil, fmt.Errorf("failed to migrate deposed generation index %d of %s: %w", i, instAddr, err) + } + rs.Instances = append(rs.Instances, *isNew) + } + + if instKey != addrs.NoKey && rs.EachMode == "" { + rs.EachMode = "list" + } + } + + for _, rs := range resourceStates { + new.Resources = append(new.Resources, *rs) + } + + if len(msOld.Path) == 1 && msOld.Path[0] == "root" { + // We'll migrate the outputs for this module too, then. + for name, oldOS := range msOld.Outputs { + newOS := outputStateV4{ + Sensitive: oldOS.Sensitive, + } + + valRaw := oldOS.Value + valSrc, err := json.Marshal(valRaw) + if err != nil { + // Should never happen, because this value came from JSON + // in the first place and so we're just round-tripping here. + return nil, fmt.Errorf("failed to serialize output %q value as JSON: %w", name, err) + } + + // The "type" field in state V2 wasn't really that useful + // since it was only able to capture string vs. list vs. map. + // For this reason, during upgrade we'll just discard it + // altogether and use cty's idea of the implied type of + // turning our old value into JSON. + ty, err := ctyjson.ImpliedType(valSrc) + if err != nil { + // REALLY should never happen, because we literally just + // encoded this as JSON above! + return nil, fmt.Errorf("failed to parse output %q value from JSON: %w", name, err) + } + + // ImpliedType tends to produce structural types, but since older + // version of Terraform didn't support those a collection type + // is probably what was intended, so we'll see if we can + // interpret our value as one. + ty = simplifyImpliedValueType(ty) + + tySrc, err := ctyjson.MarshalType(ty) + if err != nil { + return nil, fmt.Errorf("failed to serialize output %q type as JSON: %w", name, err) + } + + newOS.ValueRaw = json.RawMessage(valSrc) + newOS.ValueTypeRaw = json.RawMessage(tySrc) + + new.RootOutputs[name] = newOS + } + } + } + + new.normalize() + + return new, nil +} + +func upgradeInstanceObjectV3ToV4(rsOld *resourceStateV2, isOld *instanceStateV2, instKey addrs.InstanceKey, deposedKey states.DeposedKey) (*instanceObjectStateV4, error) { + + // Schema versions were, in prior formats, a private concern of the provider + // SDK, and not a first-class concept in the state format. Here we're + // sniffing for the pre-0.12 SDK's way of representing schema versions + // and promoting it to our first-class field if we find it. We'll ignore + // it if it doesn't look like what the SDK would've written. If this + // sniffing fails then we'll assume schema version 0. + var schemaVersion uint64 + migratedSchemaVersion := false + if raw, exists := isOld.Meta["schema_version"]; exists { + switch tv := raw.(type) { + case string: + v, err := strconv.ParseUint(tv, 10, 64) + if err == nil { + schemaVersion = v + migratedSchemaVersion = true + } + case int: + schemaVersion = uint64(tv) + migratedSchemaVersion = true + case float64: + schemaVersion = uint64(tv) + migratedSchemaVersion = true + } + } + + private := map[string]interface{}{} + for k, v := range isOld.Meta { + if k == "schema_version" && migratedSchemaVersion { + // We're gonna promote this into our first-class schema version field + continue + } + private[k] = v + } + var privateJSON []byte + if len(private) != 0 { + var err error + privateJSON, err = json.Marshal(private) + if err != nil { + // This shouldn't happen, because the Meta values all came from JSON + // originally anyway. + return nil, fmt.Errorf("cannot serialize private instance object data: %w", err) + } + } + + var status string + if isOld.Tainted { + status = "tainted" + } + + var instKeyRaw interface{} + switch tk := instKey.(type) { + case addrs.IntKey: + instKeyRaw = int(tk) + case addrs.StringKey: + instKeyRaw = string(tk) + default: + if instKeyRaw != nil { + return nil, fmt.Errorf("unsupported instance key: %#v", instKey) + } + } + + var attributes map[string]string + if isOld.Attributes != nil { + attributes = make(map[string]string, len(isOld.Attributes)) + for k, v := range isOld.Attributes { + attributes[k] = v + } + } + if isOld.ID != "" { + // As a special case, if we don't already have an "id" attribute and + // yet there's a non-empty first-class ID on the old object then we'll + // create a synthetic id attribute to avoid losing that first-class id. + // In practice this generally arises only in tests where state literals + // are hand-written in a non-standard way; real code prior to 0.12 + // would always force the first-class ID to be copied into the + // id attribute before storing. + if attributes == nil { + attributes = make(map[string]string, len(isOld.Attributes)) + } + if idVal := attributes["id"]; idVal == "" { + attributes["id"] = isOld.ID + } + } + + return &instanceObjectStateV4{ + IndexKey: instKeyRaw, + Status: status, + Deposed: string(deposedKey), + AttributesFlat: attributes, + SchemaVersion: schemaVersion, + PrivateRaw: privateJSON, + }, nil +} + +// parseLegacyResourceAddress parses the different identifier format used +// state formats before version 4, like "instance.name.0". +func parseLegacyResourceAddress(s string) (addrs.ResourceInstance, error) { + var ret addrs.ResourceInstance + + // Split based on ".". Every resource address should have at least two + // elements (type and name). + parts := strings.Split(s, ".") + if len(parts) < 2 || len(parts) > 4 { + return ret, fmt.Errorf("invalid internal resource address format: %s", s) + } + + // Data resource if we have at least 3 parts and the first one is data + ret.Resource.Mode = addrs.ManagedResourceMode + if len(parts) > 2 && parts[0] == "data" { + ret.Resource.Mode = addrs.DataResourceMode + parts = parts[1:] + } + + // If we're not a data resource and we have more than 3, then it is an error + if len(parts) > 3 && ret.Resource.Mode != addrs.DataResourceMode { + return ret, fmt.Errorf("invalid internal resource address format: %s", s) + } + + // Build the parts of the resource address that are guaranteed to exist + ret.Resource.Type = parts[0] + ret.Resource.Name = parts[1] + ret.Key = addrs.NoKey + + // If we have more parts, then we have an index. Parse that. + if len(parts) > 2 { + idx, err := strconv.ParseInt(parts[2], 0, 0) + if err != nil { + return ret, fmt.Errorf("error parsing resource address %q: %w", s, err) + } + + ret.Key = addrs.IntKey(idx) + } + + return ret, nil +} + +// simplifyImpliedValueType attempts to heuristically simplify a value type +// derived from a legacy stored output value into something simpler that +// is closer to what would've fitted into the pre-v0.12 value type system. +func simplifyImpliedValueType(ty cty.Type) cty.Type { + switch { + case ty.IsTupleType(): + // If all of the element types are the same then we'll make this + // a list instead. This is very likely to be true, since prior versions + // of Terraform did not officially support mixed-type collections. + + if ty.Equals(cty.EmptyTuple) { + // Don't know what the element type would be, then. + return ty + } + + etys := ty.TupleElementTypes() + ety := etys[0] + for _, other := range etys[1:] { + if !other.Equals(ety) { + // inconsistent types + return ty + } + } + ety = simplifyImpliedValueType(ety) + return cty.List(ety) + + case ty.IsObjectType(): + // If all of the attribute types are the same then we'll make this + // a map instead. This is very likely to be true, since prior versions + // of Terraform did not officially support mixed-type collections. + + if ty.Equals(cty.EmptyObject) { + // Don't know what the element type would be, then. + return ty + } + + atys := ty.AttributeTypes() + var ety cty.Type + for _, other := range atys { + if ety == cty.NilType { + ety = other + continue + } + if !other.Equals(ety) { + // inconsistent types + return ty + } + } + ety = simplifyImpliedValueType(ety) + return cty.Map(ety) + + default: + // No other normalizations are possible + return ty + } +} diff --git a/pkg/states/statefile/version4.go b/pkg/states/statefile/version4.go new file mode 100644 index 00000000000..8ec765cf301 --- /dev/null +++ b/pkg/states/statefile/version4.go @@ -0,0 +1,939 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package statefile + +import ( + "encoding/json" + "fmt" + "io" + "sort" + + version "github.com/hashicorp/go-version" + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/checks" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +func readStateV4(src []byte) (*File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + sV4 := &stateV4{} + err := json.Unmarshal(src, sV4) + if err != nil { + diags = diags.Append(jsonUnmarshalDiags(err)) + return nil, diags + } + + file, prepDiags := prepareStateV4(sV4) + diags = diags.Append(prepDiags) + return file, diags +} + +func prepareStateV4(sV4 *stateV4) (*File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + var tfVersion *version.Version + if sV4.TerraformVersion != "" { + var err error + tfVersion, err = version.NewVersion(sV4.TerraformVersion) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid OpenTofu version string", + fmt.Sprintf("State file claims to have been written by OpenTofu version %q, which is not a valid version string.", sV4.TerraformVersion), + )) + } + } + + file := &File{ + TerraformVersion: tfVersion, + Serial: sV4.Serial, + Lineage: sV4.Lineage, + } + + state := states.NewState() + + for _, rsV4 := range sV4.Resources { + rAddr := addrs.Resource{ + Type: rsV4.Type, + Name: rsV4.Name, + } + switch rsV4.Mode { + case "managed": + rAddr.Mode = addrs.ManagedResourceMode + case "data": + rAddr.Mode = addrs.DataResourceMode + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid resource mode in state", + fmt.Sprintf("State contains a resource with mode %q (%q %q) which is not supported.", rsV4.Mode, rAddr.Type, rAddr.Name), + )) + continue + } + + moduleAddr := addrs.RootModuleInstance + if rsV4.Module != "" { + var addrDiags tfdiags.Diagnostics + moduleAddr, addrDiags = addrs.ParseModuleInstanceStr(rsV4.Module) + diags = diags.Append(addrDiags) + if addrDiags.HasErrors() { + continue + } + } + + providerAddr, addrDiags := addrs.ParseAbsProviderConfigStr(rsV4.ProviderConfig) + diags.Append(addrDiags) + if addrDiags.HasErrors() { + // If ParseAbsProviderConfigStr returns an error, the state may have + // been written before Provider FQNs were introduced and the + // AbsProviderConfig string format will need normalization. If so, + // we treat it like a legacy provider (namespace "-") and let the + // provider installer handle detecting the FQN. + var legacyAddrDiags tfdiags.Diagnostics + providerAddr, legacyAddrDiags = addrs.ParseLegacyAbsProviderConfigStr(rsV4.ProviderConfig) + if legacyAddrDiags.HasErrors() { + continue + } + } + + ms := state.EnsureModule(moduleAddr) + + // Ensure the resource container object is present in the state. + ms.SetResourceProvider(rAddr, providerAddr) + + for _, isV4 := range rsV4.Instances { + keyRaw := isV4.IndexKey + var key addrs.InstanceKey + switch tk := keyRaw.(type) { + case int: + key = addrs.IntKey(tk) + case float64: + // Since JSON only has one number type, reading from encoding/json + // gives us a float64 here even if the number is whole. + // float64 has a smaller integer range than int, but in practice + // we rarely have more than a few tens of instances and so + // it's unlikely that we'll exhaust the 52 bits in a float64. + key = addrs.IntKey(int(tk)) + case string: + key = addrs.StringKey(tk) + default: + if keyRaw != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid resource instance metadata in state", + fmt.Sprintf("Resource %s has an instance with the invalid instance key %#v.", rAddr.Absolute(moduleAddr), keyRaw), + )) + continue + } + key = addrs.NoKey + } + + instAddr := rAddr.Instance(key) + + obj := &states.ResourceInstanceObjectSrc{ + SchemaVersion: isV4.SchemaVersion, + CreateBeforeDestroy: isV4.CreateBeforeDestroy, + } + + { + // Instance attributes + switch { + case isV4.AttributesRaw != nil: + obj.AttrsJSON = isV4.AttributesRaw + case isV4.AttributesFlat != nil: + obj.AttrsFlat = isV4.AttributesFlat + default: + // This is odd, but we'll accept it and just treat the + // object has being empty. In practice this should arise + // only from the contrived sort of state objects we tend + // to hand-write inline in tests. + obj.AttrsJSON = []byte{'{', '}'} + } + } + + // Sensitive paths + if isV4.AttributeSensitivePaths != nil { + paths, pathsDiags := unmarshalPaths([]byte(isV4.AttributeSensitivePaths)) + diags = diags.Append(pathsDiags) + if pathsDiags.HasErrors() { + continue + } + + var pvm []cty.PathValueMarks + for _, path := range paths { + pvm = append(pvm, cty.PathValueMarks{ + Path: path, + Marks: cty.NewValueMarks(marks.Sensitive), + }) + } + obj.AttrSensitivePaths = pvm + } + + { + // Status + raw := isV4.Status + switch raw { + case "": + obj.Status = states.ObjectReady + case "tainted": + obj.Status = states.ObjectTainted + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid resource instance metadata in state", + fmt.Sprintf("Instance %s has invalid status %q.", instAddr.Absolute(moduleAddr), raw), + )) + continue + } + } + + if raw := isV4.PrivateRaw; len(raw) > 0 { + obj.Private = raw + } + + { + depsRaw := isV4.Dependencies + deps := make([]addrs.ConfigResource, 0, len(depsRaw)) + for _, depRaw := range depsRaw { + addr, addrDiags := addrs.ParseAbsResourceStr(depRaw) + diags = diags.Append(addrDiags) + if addrDiags.HasErrors() { + continue + } + deps = append(deps, addr.Config()) + } + obj.Dependencies = deps + } + + switch { + case isV4.Deposed != "": + dk := states.DeposedKey(isV4.Deposed) + if len(dk) != 8 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid resource instance metadata in state", + fmt.Sprintf("Instance %s has an object with deposed key %q, which is not correctly formatted.", instAddr.Absolute(moduleAddr), isV4.Deposed), + )) + continue + } + is := ms.ResourceInstance(instAddr) + if is.HasDeposed(dk) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Duplicate resource instance in state", + fmt.Sprintf("Instance %s deposed object %q appears multiple times in the state file.", instAddr.Absolute(moduleAddr), dk), + )) + continue + } + + ms.SetResourceInstanceDeposed(instAddr, dk, obj, providerAddr) + default: + is := ms.ResourceInstance(instAddr) + if is.HasCurrent() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Duplicate resource instance in state", + fmt.Sprintf("Instance %s appears multiple times in the state file.", instAddr.Absolute(moduleAddr)), + )) + continue + } + + ms.SetResourceInstanceCurrent(instAddr, obj, providerAddr) + } + } + + // We repeat this after creating the instances because + // SetResourceInstanceCurrent automatically resets this metadata based + // on the incoming objects. That behavior is useful when we're making + // piecemeal updates to the state during an apply, but when we're + // reading the state file we want to reflect its contents exactly. + ms.SetResourceProvider(rAddr, providerAddr) + } + + // The root module is special in that we persist its attributes and thus + // need to reload them now. (For descendent modules we just re-calculate + // them based on the latest configuration on each run.) + { + rootModule := state.RootModule() + for name, fos := range sV4.RootOutputs { + os := &states.OutputValue{ + Addr: addrs.AbsOutputValue{ + OutputValue: addrs.OutputValue{ + Name: name, + }, + }, + } + os.Sensitive = fos.Sensitive + + ty, err := ctyjson.UnmarshalType([]byte(fos.ValueTypeRaw)) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid output value type in state", + fmt.Sprintf("The state file has an invalid type specification for output %q: %s.", name, err), + )) + continue + } + + val, err := ctyjson.Unmarshal([]byte(fos.ValueRaw), ty) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid output value saved in state", + fmt.Sprintf("The state file has an invalid value for output %q: %s.", name, err), + )) + continue + } + + os.Value = val + rootModule.OutputValues[name] = os + } + } + + // Saved check results from the previous run, if any. + // We differentiate absense from an empty array here so that we can + // recognize if the previous run was with a version of OpenTofu that + // didn't support checks yet, or if there just weren't any checkable + // objects to record, in case that's important for certain messaging. + if sV4.CheckResults != nil { + var moreDiags tfdiags.Diagnostics + state.CheckResults, moreDiags = decodeCheckResultsV4(sV4.CheckResults) + diags = diags.Append(moreDiags) + } + + file.State = state + return file, diags +} + +func writeStateV4(file *File, w io.Writer, enc encryption.StateEncryption) tfdiags.Diagnostics { + // Here we'll convert back from the "File" representation to our + // stateV4 struct representation and write that. + // + // While we support legacy state formats for reading, we only support the + // latest for writing and so if a V5 is added in future then this function + // should be deleted and replaced with a writeStateV5, even though the + // read/prepare V4 functions above would stick around. + + var diags tfdiags.Diagnostics + if file == nil || file.State == nil { + panic("attempt to write nil state to file") + } + + var terraformVersion string + if file.TerraformVersion != nil { + terraformVersion = file.TerraformVersion.String() + } + + sV4 := &stateV4{ + TerraformVersion: terraformVersion, + Serial: file.Serial, + Lineage: file.Lineage, + RootOutputs: map[string]outputStateV4{}, + Resources: []resourceStateV4{}, + } + + for name, os := range file.State.RootModule().OutputValues { + src, err := ctyjson.Marshal(os.Value, os.Value.Type()) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to serialize output value in state", + fmt.Sprintf("An error occurred while serializing output value %q: %s.", name, err), + )) + continue + } + + typeSrc, err := ctyjson.MarshalType(os.Value.Type()) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to serialize output value in state", + fmt.Sprintf("An error occurred while serializing the type of output value %q: %s.", name, err), + )) + continue + } + + sV4.RootOutputs[name] = outputStateV4{ + Sensitive: os.Sensitive, + ValueRaw: json.RawMessage(src), + ValueTypeRaw: json.RawMessage(typeSrc), + } + } + + for _, ms := range file.State.Modules { + moduleAddr := ms.Addr + for _, rs := range ms.Resources { + resourceAddr := rs.Addr.Resource + + var mode string + switch resourceAddr.Mode { + case addrs.ManagedResourceMode: + mode = "managed" + case addrs.DataResourceMode: + mode = "data" + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to serialize resource in state", + fmt.Sprintf("Resource %s has mode %s, which cannot be serialized in state", resourceAddr.Absolute(moduleAddr), resourceAddr.Mode), + )) + continue + } + + sV4.Resources = append(sV4.Resources, resourceStateV4{ + Module: moduleAddr.String(), + Mode: mode, + Type: resourceAddr.Type, + Name: resourceAddr.Name, + ProviderConfig: rs.ProviderConfig.String(), + Instances: []instanceObjectStateV4{}, + }) + rsV4 := &(sV4.Resources[len(sV4.Resources)-1]) + + for key, is := range rs.Instances { + if is.HasCurrent() { + var objDiags tfdiags.Diagnostics + rsV4.Instances, objDiags = appendInstanceObjectStateV4( + rs, is, key, is.Current, states.NotDeposed, + rsV4.Instances, + ) + diags = diags.Append(objDiags) + } + for dk, obj := range is.Deposed { + var objDiags tfdiags.Diagnostics + rsV4.Instances, objDiags = appendInstanceObjectStateV4( + rs, is, key, obj, dk, + rsV4.Instances, + ) + diags = diags.Append(objDiags) + } + } + } + } + + sV4.CheckResults = encodeCheckResultsV4(file.State.CheckResults) + + sV4.normalize() + + src, err := json.Marshal(sV4) + if err != nil { + // Shouldn't happen if we do our conversion to *stateV4 correctly above. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to serialize state", + fmt.Sprintf("An error occurred while serializing the state to save it. This is a bug in OpenTofu and should be reported: %s.", err), + )) + return diags + } + src = append(src, '\n') + + encrypted, encDiags := enc.EncryptState(src) + diags = diags.Append(encDiags) + + _, err = w.Write(encrypted) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to write state", + fmt.Sprintf("An error occurred while writing the serialized state: %s.", err), + )) + return diags + } + + return diags +} + +func appendInstanceObjectStateV4(rs *states.Resource, is *states.ResourceInstance, key addrs.InstanceKey, obj *states.ResourceInstanceObjectSrc, deposed states.DeposedKey, isV4s []instanceObjectStateV4) ([]instanceObjectStateV4, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + var status string + switch obj.Status { + case states.ObjectReady: + status = "" + case states.ObjectTainted: + status = "tainted" + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to serialize resource instance in state", + fmt.Sprintf("Instance %s has status %s, which cannot be saved in state.", rs.Addr.Instance(key), obj.Status), + )) + } + + var privateRaw []byte + if len(obj.Private) > 0 { + privateRaw = obj.Private + } + + deps := make([]string, len(obj.Dependencies)) + for i, depAddr := range obj.Dependencies { + deps[i] = depAddr.String() + } + + var rawKey interface{} + switch tk := key.(type) { + case addrs.IntKey: + rawKey = int(tk) + case addrs.StringKey: + rawKey = string(tk) + default: + if key != addrs.NoKey { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to serialize resource instance in state", + fmt.Sprintf("Instance %s has an unsupported instance key: %#v.", rs.Addr.Instance(key), key), + )) + } + } + + // Extract paths from path value marks + var paths []cty.Path + for _, vm := range obj.AttrSensitivePaths { + paths = append(paths, vm.Path) + } + + // Marshal paths to JSON + attributeSensitivePaths, pathsDiags := marshalPaths(paths) + diags = diags.Append(pathsDiags) + + return append(isV4s, instanceObjectStateV4{ + IndexKey: rawKey, + Deposed: string(deposed), + Status: status, + SchemaVersion: obj.SchemaVersion, + AttributesFlat: obj.AttrsFlat, + AttributesRaw: obj.AttrsJSON, + AttributeSensitivePaths: attributeSensitivePaths, + PrivateRaw: privateRaw, + Dependencies: deps, + CreateBeforeDestroy: obj.CreateBeforeDestroy, + }), diags +} + +func decodeCheckResultsV4(in []checkResultsV4) (*states.CheckResults, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + ret := &states.CheckResults{} + if len(in) == 0 { + return ret, diags + } + + ret.ConfigResults = addrs.MakeMap[addrs.ConfigCheckable, *states.CheckResultAggregate]() + for _, aggrIn := range in { + objectKind := decodeCheckableObjectKindV4(aggrIn.ObjectKind) + if objectKind == addrs.CheckableKindInvalid { + // We cannot decode a future unknown check result kind, but + // for forwards compatibility we need not treat this as an + // error. Eliding unknown check results will not result in + // significant data loss and allows us to maintain state file + // interoperability in the 1.x series. + continue + } + + // Some trickiness here: we only have an address parser for + // addrs.Checkable and not for addrs.ConfigCheckable, but that's okay + // because once we have an addrs.Checkable we can always derive an + // addrs.ConfigCheckable from it, and a ConfigCheckable should always + // be the same syntax as a Checkable with no index information and + // thus we can reuse the same parser for both here. + configAddrProxy, moreDiags := addrs.ParseCheckableStr(objectKind, aggrIn.ConfigAddr) + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + continue + } + configAddr := configAddrProxy.ConfigCheckable() + if configAddr.String() != configAddrProxy.String() { + // This is how we catch if the config address included index + // information that would be allowed in a Checkable but not + // in a ConfigCheckable. + diags = diags.Append(fmt.Errorf("invalid checkable config address %s", aggrIn.ConfigAddr)) + continue + } + + aggr := &states.CheckResultAggregate{ + Status: decodeCheckStatusV4(aggrIn.Status), + } + + if len(aggrIn.Objects) != 0 { + aggr.ObjectResults = addrs.MakeMap[addrs.Checkable, *states.CheckResultObject]() + for _, objectIn := range aggrIn.Objects { + objectAddr, moreDiags := addrs.ParseCheckableStr(objectKind, objectIn.ObjectAddr) + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + continue + } + + obj := &states.CheckResultObject{ + Status: decodeCheckStatusV4(objectIn.Status), + FailureMessages: objectIn.FailureMessages, + } + aggr.ObjectResults.Put(objectAddr, obj) + } + } + + ret.ConfigResults.Put(configAddr, aggr) + } + + return ret, diags +} + +func encodeCheckResultsV4(in *states.CheckResults) []checkResultsV4 { + // normalize empty and nil sets in the serialized state + if in == nil || in.ConfigResults.Len() == 0 { + return nil + } + + ret := make([]checkResultsV4, 0, in.ConfigResults.Len()) + + for _, configElem := range in.ConfigResults.Elems { + configResultsOut := checkResultsV4{ + ObjectKind: encodeCheckableObjectKindV4(configElem.Key.CheckableKind()), + ConfigAddr: configElem.Key.String(), + Status: encodeCheckStatusV4(configElem.Value.Status), + } + for _, objectElem := range configElem.Value.ObjectResults.Elems { + configResultsOut.Objects = append(configResultsOut.Objects, checkResultsObjectV4{ + ObjectAddr: objectElem.Key.String(), + Status: encodeCheckStatusV4(objectElem.Value.Status), + FailureMessages: objectElem.Value.FailureMessages, + }) + } + + ret = append(ret, configResultsOut) + } + + return ret +} + +func decodeCheckStatusV4(in string) checks.Status { + switch in { + case "pass": + return checks.StatusPass + case "fail": + return checks.StatusFail + case "error": + return checks.StatusError + default: + // We'll treat anything else as unknown just as a concession to + // forward-compatible parsing, in case a later version of OpenTofu + // introduces a new status. + return checks.StatusUnknown + } +} + +func encodeCheckStatusV4(in checks.Status) string { + switch in { + case checks.StatusPass: + return "pass" + case checks.StatusFail: + return "fail" + case checks.StatusError: + return "error" + case checks.StatusUnknown: + return "unknown" + default: + panic(fmt.Sprintf("unsupported check status %s", in)) + } +} + +func decodeCheckableObjectKindV4(in string) addrs.CheckableKind { + switch in { + case "resource": + return addrs.CheckableResource + case "output": + return addrs.CheckableOutputValue + case "check": + return addrs.CheckableCheck + case "var": + return addrs.CheckableInputVariable + default: + // We'll treat anything else as invalid just as a concession to + // forward-compatible parsing, in case a later version of OpenTofu + // introduces a new status. + return addrs.CheckableKindInvalid + } +} + +func encodeCheckableObjectKindV4(in addrs.CheckableKind) string { + switch in { + case addrs.CheckableResource: + return "resource" + case addrs.CheckableOutputValue: + return "output" + case addrs.CheckableCheck: + return "check" + case addrs.CheckableInputVariable: + return "var" + default: + panic(fmt.Sprintf("unsupported checkable object kind %s", in)) + } +} + +type stateV4 struct { + Version stateVersionV4 `json:"version"` + TerraformVersion string `json:"terraform_version"` + Serial uint64 `json:"serial"` + Lineage string `json:"lineage"` + RootOutputs map[string]outputStateV4 `json:"outputs"` + Resources []resourceStateV4 `json:"resources"` + CheckResults []checkResultsV4 `json:"check_results"` +} + +// normalize makes some in-place changes to normalize the way items are +// stored to ensure that two functionally-equivalent states will be stored +// identically. +func (s *stateV4) normalize() { + sort.Stable(sortResourcesV4(s.Resources)) + for _, rs := range s.Resources { + sort.Stable(sortInstancesV4(rs.Instances)) + } +} + +type outputStateV4 struct { + ValueRaw json.RawMessage `json:"value"` + ValueTypeRaw json.RawMessage `json:"type"` + Sensitive bool `json:"sensitive,omitempty"` +} + +type resourceStateV4 struct { + Module string `json:"module,omitempty"` + Mode string `json:"mode"` + Type string `json:"type"` + Name string `json:"name"` + EachMode string `json:"each,omitempty"` + ProviderConfig string `json:"provider"` + Instances []instanceObjectStateV4 `json:"instances"` +} + +type instanceObjectStateV4 struct { + IndexKey interface{} `json:"index_key,omitempty"` + Status string `json:"status,omitempty"` + Deposed string `json:"deposed,omitempty"` + + SchemaVersion uint64 `json:"schema_version"` + AttributesRaw json.RawMessage `json:"attributes,omitempty"` + AttributesFlat map[string]string `json:"attributes_flat,omitempty"` + AttributeSensitivePaths json.RawMessage `json:"sensitive_attributes,omitempty"` + + PrivateRaw []byte `json:"private,omitempty"` + + Dependencies []string `json:"dependencies,omitempty"` + + CreateBeforeDestroy bool `json:"create_before_destroy,omitempty"` +} + +type checkResultsV4 struct { + ObjectKind string `json:"object_kind"` + ConfigAddr string `json:"config_addr"` + Status string `json:"status"` + Objects []checkResultsObjectV4 `json:"objects"` +} + +type checkResultsObjectV4 struct { + ObjectAddr string `json:"object_addr"` + Status string `json:"status"` + FailureMessages []string `json:"failure_messages,omitempty"` +} + +// stateVersionV4 is a weird special type we use to produce our hard-coded +// "version": 4 in the JSON serialization. +type stateVersionV4 struct{} + +func (sv stateVersionV4) MarshalJSON() ([]byte, error) { + return []byte{'4'}, nil +} + +func (sv stateVersionV4) UnmarshalJSON([]byte) error { + // Nothing to do: we already know we're version 4 + return nil +} + +type sortResourcesV4 []resourceStateV4 + +func (sr sortResourcesV4) Len() int { return len(sr) } +func (sr sortResourcesV4) Swap(i, j int) { sr[i], sr[j] = sr[j], sr[i] } +func (sr sortResourcesV4) Less(i, j int) bool { + switch { + case sr[i].Module != sr[j].Module: + return sr[i].Module < sr[j].Module + case sr[i].Mode != sr[j].Mode: + return sr[i].Mode < sr[j].Mode + case sr[i].Type != sr[j].Type: + return sr[i].Type < sr[j].Type + case sr[i].Name != sr[j].Name: + return sr[i].Name < sr[j].Name + default: + return false + } +} + +type sortInstancesV4 []instanceObjectStateV4 + +func (si sortInstancesV4) Len() int { return len(si) } +func (si sortInstancesV4) Swap(i, j int) { si[i], si[j] = si[j], si[i] } +func (si sortInstancesV4) Less(i, j int) bool { + ki := si[i].IndexKey + kj := si[j].IndexKey + if ki != kj { + if (ki == nil) != (kj == nil) { + return ki == nil + } + if kii, isInt := ki.(int); isInt { + if kji, isInt := kj.(int); isInt { + return kii < kji + } + return true + } + if kis, isStr := ki.(string); isStr { + if kjs, isStr := kj.(string); isStr { + return kis < kjs + } + return true + } + } + if si[i].Deposed != si[j].Deposed { + return si[i].Deposed < si[j].Deposed + } + return false +} + +// pathStep is an intermediate representation of a cty.PathStep to facilitate +// consistent JSON serialization. The Value field can either be a cty.Value of +// dynamic type (for index steps), or a string (for get attr steps). +type pathStep struct { + Type string `json:"type"` + Value json.RawMessage `json:"value"` +} + +const ( + indexPathStepType = "index" + getAttrPathStepType = "get_attr" +) + +func unmarshalPaths(buf []byte) ([]cty.Path, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + var jsonPaths [][]pathStep + + err := json.Unmarshal(buf, &jsonPaths) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Error unmarshaling path steps", + err.Error(), + )) + } + + paths := make([]cty.Path, 0, len(jsonPaths)) + +unmarshalOuter: + for _, jsonPath := range jsonPaths { + var path cty.Path + for _, jsonStep := range jsonPath { + switch jsonStep.Type { + case indexPathStepType: + key, err := ctyjson.Unmarshal(jsonStep.Value, cty.DynamicPseudoType) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Error unmarshaling path step", + fmt.Sprintf("Failed to unmarshal index step key: %s", err), + )) + continue unmarshalOuter + } + path = append(path, cty.IndexStep{Key: key}) + case getAttrPathStepType: + var name string + if err := json.Unmarshal(jsonStep.Value, &name); err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Error unmarshaling path step", + fmt.Sprintf("Failed to unmarshal get attr step name: %s", err), + )) + continue unmarshalOuter + } + path = append(path, cty.GetAttrStep{Name: name}) + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unsupported path step", + fmt.Sprintf("Unsupported path step type %q", jsonStep.Type), + )) + continue unmarshalOuter + } + } + paths = append(paths, path) + } + + return paths, diags +} + +func marshalPaths(paths []cty.Path) ([]byte, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // cty.Path is a slice of cty.PathSteps, so our representation of a slice + // of paths is a nested slice of our intermediate pathStep struct + jsonPaths := make([][]pathStep, 0, len(paths)) + +marshalOuter: + for _, path := range paths { + jsonPath := make([]pathStep, 0, len(path)) + for _, step := range path { + var jsonStep pathStep + switch s := step.(type) { + case cty.IndexStep: + key, err := ctyjson.Marshal(s.Key, cty.DynamicPseudoType) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Error marshaling path step", + fmt.Sprintf("Failed to marshal index step key %#v: %s", s.Key, err), + )) + continue marshalOuter + } + jsonStep.Type = indexPathStepType + jsonStep.Value = key + case cty.GetAttrStep: + name, err := json.Marshal(s.Name) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Error marshaling path step", + fmt.Sprintf("Failed to marshal get attr step name %s: %s", s.Name, err), + )) + continue marshalOuter + } + jsonStep.Type = getAttrPathStepType + jsonStep.Value = name + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unsupported path step", + fmt.Sprintf("Unsupported path step %#v (%t)", step, step), + )) + continue marshalOuter + } + jsonPath = append(jsonPath, jsonStep) + } + jsonPaths = append(jsonPaths, jsonPath) + } + + buf, err := json.Marshal(jsonPaths) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Error marshaling path steps", + fmt.Sprintf("Failed to marshal path steps: %s", err), + )) + } + + return buf, diags +} diff --git a/pkg/states/statefile/version4_test.go b/pkg/states/statefile/version4_test.go new file mode 100644 index 00000000000..ed929ac05d1 --- /dev/null +++ b/pkg/states/statefile/version4_test.go @@ -0,0 +1,263 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package statefile + +import ( + "sort" + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +// This test verifies that modules are sorted before resources: +// https://github.com/hashicorp/terraform/issues/21552 +func TestVersion4_sort(t *testing.T) { + resources := sortResourcesV4{ + { + Module: "module.child", + Type: "test_instance", + Name: "foo", + }, + { + Type: "test_instance", + Name: "foo", + }, + { + Module: "module.kinder", + Type: "test_instance", + Name: "foo", + }, + { + Module: "module.child.grandchild", + Type: "test_instance", + Name: "foo", + }, + } + sort.Stable(resources) + + moduleOrder := []string{"", "module.child", "module.child.grandchild", "module.kinder"} + + for i, resource := range resources { + if resource.Module != moduleOrder[i] { + t.Errorf("wrong sort order: expected %q, got %q\n", moduleOrder[i], resource.Module) + } + } +} + +func TestVersion4_unmarshalPaths(t *testing.T) { + testCases := map[string]struct { + json string + paths []cty.Path + diags []string + }{ + "no paths": { + json: `[]`, + paths: []cty.Path{}, + }, + "attribute path": { + json: `[ + [ + { + "type": "get_attr", + "value": "password" + } + ] +]`, + paths: []cty.Path{cty.GetAttrPath("password")}, + }, + "attribute and string index": { + json: `[ + [ + { + "type": "get_attr", + "value": "triggers" + }, + { + "type": "index", + "value": { + "value": "secret", + "type": "string" + } + } + ] +]`, + paths: []cty.Path{cty.GetAttrPath("triggers").IndexString("secret")}, + }, + "attribute, number index, attribute": { + json: `[ + [ + { + "type": "get_attr", + "value": "identities" + }, + { + "type": "index", + "value": { + "value": 2, + "type": "number" + } + }, + { + "type": "get_attr", + "value": "private_key" + } + ] +]`, + paths: []cty.Path{cty.GetAttrPath("identities").IndexInt(2).GetAttr("private_key")}, + }, + "multiple paths": { + json: `[ + [ + { + "type": "get_attr", + "value": "alpha" + } + ], + [ + { + "type": "get_attr", + "value": "beta" + } + ], + [ + { + "type": "get_attr", + "value": "gamma" + } + ] +]`, + paths: []cty.Path{cty.GetAttrPath("alpha"), cty.GetAttrPath("beta"), cty.GetAttrPath("gamma")}, + }, + "errors": { + json: `[ + [ + { + "type": "get_attr", + "value": 5 + } + ], + [ + { + "type": "index", + "value": "test" + } + ], + [ + { + "type": "invalid_type", + "value": ["this is invalid too"] + } + ] +]`, + paths: []cty.Path{}, + diags: []string{ + "Failed to unmarshal get attr step name", + "Failed to unmarshal index step key", + "Unsupported path step", + }, + }, + "one invalid path, others valid": { + json: `[ + [ + { + "type": "get_attr", + "value": "alpha" + } + ], + [ + { + "type": "invalid_type", + "value": ["this is invalid too"] + } + ], + [ + { + "type": "get_attr", + "value": "gamma" + } + ] +]`, + paths: []cty.Path{cty.GetAttrPath("alpha"), cty.GetAttrPath("gamma")}, + diags: []string{"Unsupported path step"}, + }, + "invalid structure": { + json: `{}`, + paths: []cty.Path{}, + diags: []string{"Error unmarshaling path steps"}, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + paths, diags := unmarshalPaths([]byte(tc.json)) + + if len(tc.diags) == 0 { + if len(diags) != 0 { + t.Errorf("expected no diags, got: %#v", diags) + } + } else { + if got, want := len(diags), len(tc.diags); got != want { + t.Fatalf("got %d diags, want %d\n%s", got, want, diags.Err()) + } + for i := range tc.diags { + got := tfdiags.Diagnostics{diags[i]}.Err().Error() + if !strings.Contains(got, tc.diags[i]) { + t.Errorf("expected diag %d to contain %q, but was:\n%s", i, tc.diags[i], got) + } + } + } + + if len(paths) != len(tc.paths) { + t.Fatalf("got %d paths, want %d", len(paths), len(tc.paths)) + } + for i, path := range paths { + if !path.Equals(tc.paths[i]) { + t.Errorf("wrong paths\n got: %#v\nwant: %#v", path, tc.paths[i]) + } + } + }) + } +} + +func TestVersion4_marshalPaths(t *testing.T) { + testCases := map[string]struct { + paths []cty.Path + json string + }{ + "no paths": { + paths: []cty.Path{}, + json: `[]`, + }, + "attribute path": { + paths: []cty.Path{cty.GetAttrPath("password")}, + json: `[[{"type":"get_attr","value":"password"}]]`, + }, + "attribute, number index, attribute": { + paths: []cty.Path{cty.GetAttrPath("identities").IndexInt(2).GetAttr("private_key")}, + json: `[[{"type":"get_attr","value":"identities"},{"type":"index","value":{"value":2,"type":"number"}},{"type":"get_attr","value":"private_key"}]]`, + }, + "multiple paths": { + paths: []cty.Path{cty.GetAttrPath("a"), cty.GetAttrPath("b"), cty.GetAttrPath("c")}, + json: `[[{"type":"get_attr","value":"a"}],[{"type":"get_attr","value":"b"}],[{"type":"get_attr","value":"c"}]]`, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + json, diags := marshalPaths(tc.paths) + + if len(diags) != 0 { + t.Fatalf("expected no diags, got: %#v", diags) + } + + if got, want := string(json), tc.json; got != want { + t.Fatalf("wrong JSON output\n got: %s\nwant: %s\n", got, want) + } + }) + } +} diff --git a/pkg/states/statefile/write.go b/pkg/states/statefile/write.go new file mode 100644 index 00000000000..226a3d745f7 --- /dev/null +++ b/pkg/states/statefile/write.go @@ -0,0 +1,32 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package statefile + +import ( + "io" + + "github.com/kubegems/opentofu/pkg/encryption" + tfversion "github.com/kubegems/opentofu/version" +) + +// Write writes the given state to the given writer in the current state +// serialization format. +func Write(s *File, w io.Writer, enc encryption.StateEncryption) error { + // Always record the current tofu version in the state. + s.TerraformVersion = tfversion.SemVer + + diags := writeStateV4(s, w, enc) + return diags.Err() +} + +// WriteForTest writes the given state to the given writer in the current state +// serialization format without recording the current tofu version. This is +// intended for use in tests that need to override the current tofu +// version. +func WriteForTest(s *File, w io.Writer) error { + diags := writeStateV4(s, w, encryption.StateEncryptionDisabled()) + return diags.Err() +} diff --git a/pkg/states/statemgr/doc.go b/pkg/states/statemgr/doc.go new file mode 100644 index 00000000000..643ef0c1300 --- /dev/null +++ b/pkg/states/statemgr/doc.go @@ -0,0 +1,26 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package statemgr defines the interfaces and some supporting functionality +// for "state managers", which are components responsible for writing state +// to some persistent storage and then later retrieving it. +// +// State managers will usually (but not necessarily) use the state file formats +// implemented in the sibling directory "statefile" to serialize the persistent +// parts of state for storage. +// +// State managers are responsible for ensuring that stored state can be updated +// safely across multiple, possibly-concurrent OpenTofu runs (with reasonable +// constraints and limitations). The rest of OpenTofu considers state to be +// a mutable data structure, with state managers preserving that illusion +// by creating snapshots of the state and updating them over time. +// +// From the perspective of callers of the general state manager API, a state +// manager is able to return the latest snapshot and to replace that snapshot +// with a new one. Some state managers may also preserve historical snapshots +// using facilities offered by their storage backend, but this is always an +// implementation detail: the historical versions are not visible to a user +// of these interfaces. +package statemgr diff --git a/pkg/states/statemgr/filesystem.go b/pkg/states/statemgr/filesystem.go new file mode 100644 index 00000000000..8d5689e7ab5 --- /dev/null +++ b/pkg/states/statemgr/filesystem.go @@ -0,0 +1,559 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package statemgr + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "log" + "os" + "path/filepath" + "sync" + "time" + + multierror "github.com/hashicorp/go-multierror" + + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/statefile" + "github.com/kubegems/opentofu/pkg/tofu" +) + +// Filesystem is a full state manager that uses a file in the local filesystem +// for persistent storage. +// +// The transient storage for Filesystem is always in-memory. +type Filesystem struct { + // path is the location where a file will be created or replaced for + // each persistent snapshot. + path string + + // readPath is read by RefreshState instead of "path" until the first + // call to PersistState, after which it is ignored. + // + // The file at readPath must never be written to by this manager. + readPath string + + // backupPath is an optional extra path which, if non-empty, will be + // created or overwritten with the first state snapshot we read if there + // is a subsequent call to write a different state. + backupPath string + + // the file handle corresponding to PathOut + stateFileOut *os.File + + // While the stateFileOut will correspond to the lock directly, + // store and check the lock ID to maintain a strict statemgr.Locker + // implementation. + lockID string + + // created is set to true if stateFileOut didn't exist before we created it. + // This is mostly so we can clean up empty files during tests, but doesn't + // hurt to remove file we never wrote to. + created bool + + mu sync.Mutex + file, readFile *statefile.File + backupFile *statefile.File + writtenBackup bool + + encryption encryption.StateEncryption +} + +var ( + _ Full = (*Filesystem)(nil) + _ PersistentMeta = (*Filesystem)(nil) + _ Migrator = (*Filesystem)(nil) +) + +// NewFilesystem creates a filesystem-based state manager that reads and writes +// state snapshots at the given filesystem path. +// +// This is equivalent to calling NewFileSystemBetweenPaths with statePath as +// both of the path arguments. +func NewFilesystem(statePath string, enc encryption.StateEncryption) *Filesystem { + return &Filesystem{ + path: statePath, + readPath: statePath, + encryption: enc, + } +} + +// NewFilesystemBetweenPaths creates a filesystem-based state manager that +// reads an initial snapshot from readPath and then writes all new snapshots to +// writePath. +func NewFilesystemBetweenPaths(readPath, writePath string, enc encryption.StateEncryption) *Filesystem { + return &Filesystem{ + path: writePath, + readPath: readPath, + encryption: enc, + } +} + +// SetBackupPath configures the receiever so that it will create a local +// backup file of the next state snapshot it reads (in State) if a different +// snapshot is subsequently written (in WriteState). Only one backup is +// written for the lifetime of the object, unless reset as described below. +// +// For correct operation, this must be called before any other state methods +// are called. If called multiple times, each call resets the backup +// function so that the next read will become the backup snapshot and a +// following write will save a backup of it. +func (s *Filesystem) SetBackupPath(path string) { + s.backupPath = path + s.backupFile = nil + s.writtenBackup = false +} + +// BackupPath returns the manager's backup path if backup files are enabled, +// or an empty string otherwise. +func (s *Filesystem) BackupPath() string { + return s.backupPath +} + +// State is an implementation of Reader. +func (s *Filesystem) State() *states.State { + defer s.mutex()() + if s.file == nil { + return nil + } + return s.file.DeepCopy().State +} + +// WriteState is an implementation of Writer. +func (s *Filesystem) WriteState(state *states.State) error { + defer s.mutex()() + + if s.readFile == nil { + err := s.refreshState() + if err != nil { + return err + } + } + + return s.writeState(state, nil) +} + +func (s *Filesystem) writeState(state *states.State, meta *SnapshotMeta) error { + s.file = s.file.DeepCopy() + if s.file == nil { + s.file = NewStateFile() + } + s.file.State = state.DeepCopy() + + if meta != nil { + // Force new metadata + s.file.Lineage = meta.Lineage + s.file.Serial = meta.Serial + log.Printf("[TRACE] statemgr.Filesystem: forcing lineage %q serial %d for migration/import", s.file.Lineage, s.file.Serial) + } + + return nil +} + +// PersistState writes state to a tfstate file. +func (s *Filesystem) PersistState(schemas *tofu.Schemas) error { + defer s.mutex()() + + return s.persistState(schemas) +} + +func (s *Filesystem) persistState(schemas *tofu.Schemas) error { + // TODO: this should use a more robust method of writing state, by first + // writing to a temp file on the same filesystem, and renaming the file over + // the original. + if s.stateFileOut == nil { + if err := s.createStateFiles(); err != nil { + return nil + } + } + defer s.stateFileOut.Sync() + + if s.file == nil { + s.file = NewStateFile() + } + state := s.file.State + + // We'll try to write our backup first, so we can be sure we've created + // it successfully before clobbering the original file it came from. + if !s.writtenBackup && s.backupFile != nil && s.backupPath != "" { + if !statefile.StatesMarshalEqual(state, s.backupFile.State) { + log.Printf("[TRACE] statemgr.Filesystem: creating backup snapshot at %s", s.backupPath) + bfh, err := os.Create(s.backupPath) + if err != nil { + return fmt.Errorf("failed to create local state backup file: %w", err) + } + defer bfh.Close() + + err = statefile.Write(s.backupFile, bfh, s.encryption) + if err != nil { + return fmt.Errorf("failed to write to local state backup file: %w", err) + } + + s.writtenBackup = true + } else { + log.Print("[TRACE] statemgr.Filesystem: not making a backup, because the new snapshot is identical to the old") + } + } else { + // This branch is all just logging, to help understand why we didn't make a backup. + switch { + case s.backupPath == "": + log.Print("[TRACE] statemgr.Filesystem: state file backups are disabled") + case s.writtenBackup: + log.Printf("[TRACE] statemgr.Filesystem: have already backed up original %s to %s on a previous write", s.path, s.backupPath) + case s.backupFile == nil: + log.Printf("[TRACE] statemgr.Filesystem: no original state snapshot to back up") + default: + log.Printf("[TRACE] statemgr.Filesystem: not creating a backup for an unknown reason") + } + } + + if _, err := s.stateFileOut.Seek(0, io.SeekStart); err != nil { + return err + } + if err := s.stateFileOut.Truncate(0); err != nil { + return err + } + + if state == nil { + // if we have no state, don't write anything else. + log.Print("[TRACE] statemgr.Filesystem: state is nil, so leaving the file empty") + return nil + } + + if s.readFile == nil || !statefile.StatesMarshalEqual(s.file.State, s.readFile.State) { + s.file.Serial++ + log.Printf("[TRACE] statemgr.Filesystem: state has changed since last snapshot, so incrementing serial to %d", s.file.Serial) + } else { + log.Print("[TRACE] statemgr.Filesystem: no state changes since last snapshot") + } + + log.Printf("[TRACE] statemgr.Filesystem: writing snapshot at %s", s.path) + if err := statefile.Write(s.file, s.stateFileOut, s.encryption); err != nil { + return err + } + + // Any future reads must come from the file we've now updated + s.readPath = s.path + return nil +} + +// RefreshState is an implementation of Refresher. +func (s *Filesystem) RefreshState() error { + defer s.mutex()() + return s.refreshState() +} + +func (s *Filesystem) GetRootOutputValues() (map[string]*states.OutputValue, error) { + err := s.RefreshState() + if err != nil { + return nil, err + } + + state := s.State() + if state == nil { + state = states.NewState() + } + + return state.RootModule().OutputValues, nil +} + +func (s *Filesystem) refreshState() error { + var reader io.Reader + + // The s.readPath file is only OK to read if we have not written any state out + // (in which case the same state needs to be read in), and no state output file + // has been opened (possibly via a lock) or the input path is different + // than the output path. + // This is important for Windows, as if the input file is the same as the + // output file, and the output file has been locked already, we can't open + // the file again. + if s.stateFileOut == nil || s.readPath != s.path { + // we haven't written a state file yet, so load from readPath + log.Printf("[TRACE] statemgr.Filesystem: reading initial snapshot from %s", s.readPath) + f, err := os.Open(s.readPath) + if err != nil { + // It is okay if the file doesn't exist; we'll treat that as a nil state. + if !os.IsNotExist(err) { + return err + } + + // we need a non-nil reader for ReadState and an empty buffer works + // to return EOF immediately + reader = bytes.NewBuffer(nil) + + } else { + defer f.Close() + reader = f + } + } else { + log.Printf("[TRACE] statemgr.Filesystem: reading latest snapshot from %s", s.path) + // no state to refresh + if s.stateFileOut == nil { + return nil + } + + // we have a state file, make sure we're at the start + _, err := s.stateFileOut.Seek(0, io.SeekStart) + if err != nil { + return err + } + reader = s.stateFileOut + } + + f, err := statefile.Read(reader, s.encryption) + // if there's no state then a nil file is fine + if err != nil { + if err != statefile.ErrNoState { + return err + } + log.Printf("[TRACE] statemgr.Filesystem: snapshot file has nil snapshot, but that's okay") + } + + s.file = f + s.readFile = s.file.DeepCopy() + if s.file != nil { + log.Printf("[TRACE] statemgr.Filesystem: read snapshot with lineage %q serial %d", s.file.Lineage, s.file.Serial) + } else { + log.Print("[TRACE] statemgr.Filesystem: read nil snapshot") + } + return nil +} + +// Lock implements Locker using filesystem discretionary locks. +func (s *Filesystem) Lock(info *LockInfo) (string, error) { + defer s.mutex()() + + if s.stateFileOut == nil { + if err := s.createStateFiles(); err != nil { + return "", err + } + } + + if s.lockID != "" { + return "", fmt.Errorf("state %q already locked", s.stateFileOut.Name()) + } + + if err := s.lock(); err != nil { + info, infoErr := s.lockInfo() + if infoErr != nil { + err = multierror.Append(err, infoErr) + } + + lockErr := &LockError{ + Info: info, + Err: err, + } + + return "", lockErr + } + + s.lockID = info.ID + return s.lockID, s.writeLockInfo(info) +} + +// Unlock is the companion to Lock, completing the implemention of Locker. +func (s *Filesystem) Unlock(id string) error { + defer s.mutex()() + + if s.lockID == "" { + return fmt.Errorf("LocalState not locked") + } + + if id != s.lockID { + idErr := fmt.Errorf("invalid lock id: %q. current id: %q", id, s.lockID) + info, err := s.lockInfo() + if err != nil { + idErr = multierror.Append(idErr, err) + } + + return &LockError{ + Err: idErr, + Info: info, + } + } + + lockInfoPath := s.lockInfoPath() + err := os.Remove(lockInfoPath) + if err != nil { + log.Printf( + "[ERROR] statemgr.Filesystem: error removing lock metadata file %q: %s", + lockInfoPath, + err, + ) + } else { + log.Printf("[TRACE] statemgr.Filesystem: removed lock metadata file %s", lockInfoPath) + } + fileName := s.stateFileOut.Name() + + unlockErr := s.unlock() + + s.stateFileOut.Close() + s.stateFileOut = nil + s.lockID = "" + + // clean up the state file if we created it an never wrote to it + stat, err := os.Stat(fileName) + if err == nil && stat.Size() == 0 && s.created { + err = os.Remove(fileName) + if err != nil { + log.Printf("[ERROR] stagemgr.Filesystem: error removing empty state file %q: %s", fileName, err) + } + } + + return unlockErr +} + +// StateSnapshotMeta returns the metadata from the most recently persisted +// or refreshed persistent state snapshot. +// +// This is an implementation of PersistentMeta. +func (s *Filesystem) StateSnapshotMeta() SnapshotMeta { + if s.file == nil { + return SnapshotMeta{} // placeholder + } + + return SnapshotMeta{ + Lineage: s.file.Lineage, + Serial: s.file.Serial, + + TerraformVersion: s.file.TerraformVersion, + } +} + +// StateForMigration is part of our implementation of Migrator. +func (s *Filesystem) StateForMigration() *statefile.File { + return s.file.DeepCopy() +} + +// WriteStateForMigration is part of our implementation of Migrator. +func (s *Filesystem) WriteStateForMigration(f *statefile.File, force bool) error { + defer s.mutex()() + + if s.readFile == nil { + err := s.refreshState() + if err != nil { + return err + } + } + + if !force { + err := CheckValidImport(f, s.readFile) + if err != nil { + return err + } + } + + if s.readFile != nil { + log.Printf( + "[TRACE] statemgr.Filesystem: Importing snapshot with lineage %q serial %d over snapshot with lineage %q serial %d at %s", + f.Lineage, f.Serial, + s.readFile.Lineage, s.readFile.Serial, + s.path, + ) + } else { + log.Printf( + "[TRACE] statemgr.Filesystem: Importing snapshot with lineage %q serial %d as the initial state snapshot at %s", + f.Lineage, f.Serial, + s.path, + ) + } + + err := s.writeState(f.State, &SnapshotMeta{Lineage: f.Lineage, Serial: f.Serial}) + if err != nil { + return err + } + + return s.persistState(nil) +} + +// Open the state file, creating the directories and file as needed. +func (s *Filesystem) createStateFiles() error { + log.Printf("[TRACE] statemgr.Filesystem: preparing to manage state snapshots at %s", s.path) + + // This could race, but we only use it to clean up empty files + if _, err := os.Stat(s.path); os.IsNotExist(err) { + s.created = true + } + + // Create all the directories + if err := os.MkdirAll(filepath.Dir(s.path), 0755); err != nil { + return err + } + + f, err := os.OpenFile(s.path, os.O_RDWR|os.O_CREATE, 0666) + if err != nil { + return err + } + + s.stateFileOut = f + + // If the file already existed with content then that'll be the content + // of our backup file if we write a change later. + s.backupFile, err = statefile.Read(s.stateFileOut, s.encryption) + if err != nil { + if err != statefile.ErrNoState { + return err + } + log.Printf("[TRACE] statemgr.Filesystem: no previously-stored snapshot exists") + } else { + log.Printf("[TRACE] statemgr.Filesystem: existing snapshot has lineage %q serial %d", s.backupFile.Lineage, s.backupFile.Serial) + } + + // Refresh now, to load in the snapshot if the file already existed + return nil +} + +// return the path for the lockInfo metadata. +func (s *Filesystem) lockInfoPath() string { + stateDir, stateName := filepath.Split(s.path) + if stateName == "" { + panic("empty state file path") + } + + if stateName[0] == '.' { + stateName = stateName[1:] + } + + return filepath.Join(stateDir, fmt.Sprintf(".%s.lock.info", stateName)) +} + +// lockInfo returns the data in a lock info file +func (s *Filesystem) lockInfo() (*LockInfo, error) { + path := s.lockInfoPath() + infoData, err := os.ReadFile(path) + if err != nil { + return nil, err + } + + info := LockInfo{} + err = json.Unmarshal(infoData, &info) + if err != nil { + return nil, fmt.Errorf("state file %q locked, but could not unmarshal lock info: %w", s.readPath, err) + } + return &info, nil +} + +// write a new lock info file +func (s *Filesystem) writeLockInfo(info *LockInfo) error { + path := s.lockInfoPath() + info.Path = s.readPath + info.Created = time.Now().UTC() + + log.Printf("[TRACE] statemgr.Filesystem: writing lock metadata to %s", path) + err := os.WriteFile(path, info.Marshal(), 0600) + if err != nil { + return fmt.Errorf("could not write lock info for %q: %w", s.readPath, err) + } + return nil +} + +func (s *Filesystem) mutex() func() { + s.mu.Lock() + return s.mu.Unlock +} diff --git a/pkg/states/statemgr/filesystem_lock_unix.go b/pkg/states/statemgr/filesystem_lock_unix.go new file mode 100644 index 00000000000..59652d6e32a --- /dev/null +++ b/pkg/states/statemgr/filesystem_lock_unix.go @@ -0,0 +1,43 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !windows +// +build !windows + +package statemgr + +import ( + "io" + "log" + "syscall" +) + +// use fcntl POSIX locks for the most consistent behavior across platforms, and +// hopefully some campatibility over NFS and CIFS. +func (s *Filesystem) lock() error { + log.Printf("[TRACE] statemgr.Filesystem: locking %s using fcntl flock", s.path) + flock := &syscall.Flock_t{ + Type: syscall.F_RDLCK | syscall.F_WRLCK, + Whence: int16(io.SeekStart), + Start: 0, + Len: 0, + } + + fd := s.stateFileOut.Fd() + return syscall.FcntlFlock(fd, syscall.F_SETLK, flock) +} + +func (s *Filesystem) unlock() error { + log.Printf("[TRACE] statemgr.Filesystem: unlocking %s using fcntl flock", s.path) + flock := &syscall.Flock_t{ + Type: syscall.F_UNLCK, + Whence: int16(io.SeekStart), + Start: 0, + Len: 0, + } + + fd := s.stateFileOut.Fd() + return syscall.FcntlFlock(fd, syscall.F_SETLK, flock) +} diff --git a/pkg/states/statemgr/filesystem_lock_windows.go b/pkg/states/statemgr/filesystem_lock_windows.go new file mode 100644 index 00000000000..f04641ddf88 --- /dev/null +++ b/pkg/states/statemgr/filesystem_lock_windows.go @@ -0,0 +1,119 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build windows +// +build windows + +package statemgr + +import ( + "log" + "math" + "syscall" + "unsafe" +) + +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + procLockFileEx = modkernel32.NewProc("LockFileEx") + procCreateEventW = modkernel32.NewProc("CreateEventW") +) + +const ( + // dwFlags defined for LockFileEx + // https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx + _LOCKFILE_FAIL_IMMEDIATELY = 1 + _LOCKFILE_EXCLUSIVE_LOCK = 2 +) + +func (s *Filesystem) lock() error { + log.Printf("[TRACE] statemgr.Filesystem: locking %s using LockFileEx", s.path) + + // even though we're failing immediately, an overlapped event structure is + // required + ol, err := newOverlapped() + if err != nil { + return err + } + defer syscall.CloseHandle(ol.HEvent) + + return lockFileEx( + syscall.Handle(s.stateFileOut.Fd()), + _LOCKFILE_EXCLUSIVE_LOCK|_LOCKFILE_FAIL_IMMEDIATELY, + 0, // reserved + 0, // bytes low + math.MaxUint32, // bytes high + ol, + ) +} + +func (s *Filesystem) unlock() error { + log.Printf("[TRACE] statemgr.Filesystem: unlocked by closing %s", s.path) + + // the file is closed in Unlock + return nil +} + +func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall6( + procLockFileEx.Addr(), + 6, + uintptr(h), + uintptr(flags), + uintptr(reserved), + uintptr(locklow), + uintptr(lockhigh), + uintptr(unsafe.Pointer(ol)), + ) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +// newOverlapped creates a structure used to track asynchronous +// I/O requests that have been issued. +func newOverlapped() (*syscall.Overlapped, error) { + event, err := createEvent(nil, true, false, nil) + if err != nil { + return nil, err + } + return &syscall.Overlapped{HEvent: event}, nil +} + +func createEvent(sa *syscall.SecurityAttributes, manualReset bool, initialState bool, name *uint16) (handle syscall.Handle, err error) { + var _p0 uint32 + if manualReset { + _p0 = 1 + } + var _p1 uint32 + if initialState { + _p1 = 1 + } + + r0, _, e1 := syscall.Syscall6( + procCreateEventW.Addr(), + 4, + uintptr(unsafe.Pointer(sa)), + uintptr(_p0), + uintptr(_p1), + uintptr(unsafe.Pointer(name)), + 0, + 0, + ) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} diff --git a/pkg/states/statemgr/filesystem_test.go b/pkg/states/statemgr/filesystem_test.go new file mode 100644 index 00000000000..95ffc3ab161 --- /dev/null +++ b/pkg/states/statemgr/filesystem_test.go @@ -0,0 +1,470 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package statemgr + +import ( + "os" + "os/exec" + "path/filepath" + "strings" + "sync" + "testing" + + "github.com/go-test/deep" + version "github.com/hashicorp/go-version" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/statefile" + tfversion "github.com/kubegems/opentofu/version" +) + +func TestFilesystem(t *testing.T) { + defer testOverrideVersion(t, "1.2.3")() + ls := testFilesystem(t) + defer os.Remove(ls.readPath) + TestFull(t, ls) +} + +func TestFilesystemRace(t *testing.T) { + defer testOverrideVersion(t, "1.2.3")() + ls := testFilesystem(t) + defer os.Remove(ls.readPath) + + current := TestFullInitialState() + + var wg sync.WaitGroup + for i := 0; i < 100; i++ { + wg.Add(1) + go func() { + defer wg.Done() + ls.WriteState(current) + }() + } + wg.Wait() +} + +func TestFilesystemLocks(t *testing.T) { + defer testOverrideVersion(t, "1.2.3")() + s := testFilesystem(t) + defer os.Remove(s.readPath) + + // lock first + info := NewLockInfo() + info.Operation = "test" + lockID, err := s.Lock(info) + if err != nil { + t.Fatal(err) + } + + out, err := exec.Command("go", "run", "testdata/lockstate.go", s.path).CombinedOutput() + if err != nil { + t.Fatal("unexpected lock failure", err, string(out)) + } + + if !strings.Contains(string(out), "lock failed") { + t.Fatal("expected 'locked failed', got", string(out)) + } + + // check our lock info + lockInfo, err := s.lockInfo() + if err != nil { + t.Fatal(err) + } + + if lockInfo.Operation != "test" { + t.Fatalf("invalid lock info %#v\n", lockInfo) + } + + // a noop, since we unlock on exit + if err := s.Unlock(lockID); err != nil { + t.Fatal(err) + } + + // local locks can re-lock + lockID, err = s.Lock(info) + if err != nil { + t.Fatal(err) + } + + if err := s.Unlock(lockID); err != nil { + t.Fatal(err) + } + + // we should not be able to unlock the same lock twice + if err := s.Unlock(lockID); err == nil { + t.Fatal("unlocking an unlocked state should fail") + } + + // make sure lock info is gone + lockInfoPath := s.lockInfoPath() + if _, err := os.Stat(lockInfoPath); !os.IsNotExist(err) { + t.Fatal("lock info not removed") + } +} + +// Verify that we can write to the state file, as Windows' mandatory locking +// will prevent writing to a handle different than the one that hold the lock. +func TestFilesystem_writeWhileLocked(t *testing.T) { + defer testOverrideVersion(t, "1.2.3")() + s := testFilesystem(t) + defer os.Remove(s.readPath) + + // lock first + info := NewLockInfo() + info.Operation = "test" + lockID, err := s.Lock(info) + if err != nil { + t.Fatal(err) + } + defer func() { + if err := s.Unlock(lockID); err != nil { + t.Fatal(err) + } + }() + + if err := s.WriteState(TestFullInitialState()); err != nil { + t.Fatal(err) + } +} + +func TestFilesystem_pathOut(t *testing.T) { + defer testOverrideVersion(t, "1.2.3")() + f, err := os.CreateTemp("", "tf") + if err != nil { + t.Fatalf("err: %s", err) + } + f.Close() + defer os.Remove(f.Name()) + + ls := testFilesystem(t) + ls.path = f.Name() + defer os.Remove(ls.path) + + TestFull(t, ls) +} + +func TestFilesystem_backup(t *testing.T) { + defer testOverrideVersion(t, "1.2.3")() + f, err := os.CreateTemp("", "tf") + if err != nil { + t.Fatalf("err: %s", err) + } + f.Close() + defer os.Remove(f.Name()) + + ls := testFilesystem(t) + backupPath := f.Name() + ls.SetBackupPath(backupPath) + + TestFull(t, ls) + + // The backup functionality should've saved a copy of the original state + // prior to all of the modifications that TestFull does. + bfh, err := os.Open(backupPath) + if err != nil { + t.Fatal(err) + } + bf, err := statefile.Read(bfh, encryption.StateEncryptionDisabled()) + if err != nil { + t.Fatal(err) + } + origState := TestFullInitialState() + if !bf.State.Equal(origState) { + for _, problem := range deep.Equal(origState, bf.State) { + t.Error(problem) + } + } +} + +// This test verifies a particularly tricky behavior where the input file +// is overridden and backups are enabled at the same time. This combination +// requires special care because we must ensure that when we create a backup +// it is of the original contents of the output file (which we're overwriting), +// not the contents of the input file (which is left unchanged). +func TestFilesystem_backupAndReadPath(t *testing.T) { + defer testOverrideVersion(t, "1.2.3")() + info := NewLockInfo() + info.Operation = "test" + + workDir := t.TempDir() + markerOutput := addrs.OutputValue{Name: "foo"}.Absolute(addrs.RootModuleInstance) + + outState := states.BuildState(func(ss *states.SyncState) { + ss.SetOutputValue( + markerOutput, + cty.StringVal("from-output-state"), + false, // not sensitive + ) + }) + outFile, err := os.Create(filepath.Join(workDir, "output.tfstate")) + if err != nil { + t.Fatalf("failed to create temporary outFile %s", err) + } + defer outFile.Close() + err = statefile.Write(&statefile.File{ + Lineage: "-", + Serial: 0, + TerraformVersion: version.Must(version.NewVersion("1.2.3")), + State: outState, + }, outFile, encryption.StateEncryptionDisabled()) + if err != nil { + t.Fatalf("failed to write initial outfile state to %s: %s", outFile.Name(), err) + } + + inState := states.BuildState(func(ss *states.SyncState) { + ss.SetOutputValue( + markerOutput, + cty.StringVal("from-input-state"), + false, // not sensitive + ) + }) + inFile, err := os.Create(filepath.Join(workDir, "input.tfstate")) + if err != nil { + t.Fatalf("failed to create temporary inFile %s", err) + } + defer inFile.Close() + err = statefile.Write(&statefile.File{ + Lineage: "-", + Serial: 0, + TerraformVersion: version.Must(version.NewVersion("1.2.3")), + State: inState, + }, inFile, encryption.StateEncryptionDisabled()) + if err != nil { + t.Fatalf("failed to write initial infile state to %s: %s", inFile.Name(), err) + } + + backupPath := outFile.Name() + ".backup" + + ls := NewFilesystemBetweenPaths(inFile.Name(), outFile.Name(), encryption.StateEncryptionDisabled()) + ls.SetBackupPath(backupPath) + + newState := states.BuildState(func(ss *states.SyncState) { + ss.SetOutputValue( + markerOutput, + cty.StringVal("from-new-state"), + false, // not sensitive + ) + }) + err = WriteAndPersist(ls, newState, nil) + if err != nil { + t.Fatalf("failed to write new state: %s", err) + } + + lockID, err := ls.Lock(info) + if err != nil { + t.Fatal(err) + } + + if err := ls.Unlock(lockID); err != nil { + t.Fatal(err) + } + + // The backup functionality should've saved a copy of the original contents + // of the _output_ file, even though the first snapshot was read from + // the _input_ file. + t.Run("backup file", func(t *testing.T) { + bfh, err := os.Open(backupPath) + if err != nil { + t.Fatal(err) + } + defer bfh.Close() + bf, err := statefile.Read(bfh, encryption.StateEncryptionDisabled()) + if err != nil { + t.Fatal(err) + } + os := bf.State.OutputValue(markerOutput) + if got, want := os.Value, cty.StringVal("from-output-state"); !want.RawEquals(got) { + t.Errorf("wrong marker value in backup state file\ngot: %#v\nwant: %#v", got, want) + } + }) + t.Run("output file", func(t *testing.T) { + ofh, err := os.Open(outFile.Name()) + if err != nil { + t.Fatal(err) + } + defer ofh.Close() + of, err := statefile.Read(ofh, encryption.StateEncryptionDisabled()) + if err != nil { + t.Fatal(err) + } + os := of.State.OutputValue(markerOutput) + if got, want := os.Value, cty.StringVal("from-new-state"); !want.RawEquals(got) { + t.Errorf("wrong marker value in backup state file\ngot: %#v\nwant: %#v", got, want) + } + }) +} + +func TestFilesystem_nonExist(t *testing.T) { + defer testOverrideVersion(t, "1.2.3")() + ls := NewFilesystem("ishouldntexist", encryption.StateEncryptionDisabled()) + if err := ls.RefreshState(); err != nil { + t.Fatalf("err: %s", err) + } + + if state := ls.State(); state != nil { + t.Fatalf("bad: %#v", state) + } +} + +func TestFilesystem_lockUnlockWithoutWrite(t *testing.T) { + info := NewLockInfo() + info.Operation = "test" + + ls := testFilesystem(t) + + // Delete the just-created tempfile so that Lock recreates it + os.Remove(ls.path) + + // Lock the state, and in doing so recreate the tempfile + lockID, err := ls.Lock(info) + if err != nil { + t.Fatal(err) + } + + if !ls.created { + t.Fatal("should have marked state as created") + } + + if err := ls.Unlock(lockID); err != nil { + t.Fatal(err) + } + + _, err = os.Stat(ls.path) + if os.IsNotExist(err) { + // Success! Unlocking the state successfully deleted the tempfile + return + } else if err != nil { + t.Fatalf("unexpected error from os.Stat: %s", err) + } else { + os.Remove(ls.readPath) + t.Fatal("should have removed path, but exists") + } +} + +func TestFilesystem_impl(t *testing.T) { + defer testOverrideVersion(t, "1.2.3")() + var _ Reader = new(Filesystem) + var _ Writer = new(Filesystem) + var _ Persister = new(Filesystem) + var _ Refresher = new(Filesystem) + var _ OutputReader = new(Filesystem) + var _ Locker = new(Filesystem) +} + +func testFilesystem(t *testing.T) *Filesystem { + f, err := os.CreateTemp("", "tf") + if err != nil { + t.Fatalf("failed to create temporary file %s", err) + } + t.Logf("temporary state file at %s", f.Name()) + + err = statefile.Write(&statefile.File{ + Lineage: "test-lineage", + Serial: 0, + TerraformVersion: version.Must(version.NewVersion("1.2.3")), + State: TestFullInitialState(), + }, f, encryption.StateEncryptionDisabled()) + if err != nil { + t.Fatalf("failed to write initial state to %s: %s", f.Name(), err) + } + f.Close() + + ls := NewFilesystem(f.Name(), encryption.StateEncryptionDisabled()) + if err := ls.RefreshState(); err != nil { + t.Fatalf("initial refresh failed: %s", err) + } + + return ls +} + +// Make sure we can refresh while the state is locked +func TestFilesystem_refreshWhileLocked(t *testing.T) { + defer testOverrideVersion(t, "1.2.3")() + f, err := os.CreateTemp("", "tf") + if err != nil { + t.Fatalf("err: %s", err) + } + + err = statefile.Write(&statefile.File{ + Lineage: "test-lineage", + Serial: 0, + TerraformVersion: version.Must(version.NewVersion("1.2.3")), + State: TestFullInitialState(), + }, f, encryption.StateEncryptionDisabled()) + if err != nil { + t.Fatalf("err: %s", err) + } + f.Close() + + s := NewFilesystem(f.Name(), encryption.StateEncryptionDisabled()) + defer os.Remove(s.path) + + // lock first + info := NewLockInfo() + info.Operation = "test" + lockID, err := s.Lock(info) + if err != nil { + t.Fatal(err) + } + defer func() { + if err := s.Unlock(lockID); err != nil { + t.Fatal(err) + } + }() + + if err := s.RefreshState(); err != nil { + t.Fatal(err) + } + + readState := s.State() + if readState == nil { + t.Fatal("missing state") + } +} + +func TestFilesystem_GetRootOutputValues(t *testing.T) { + fs := testFilesystem(t) + + outputs, err := fs.GetRootOutputValues() + if err != nil { + t.Errorf("Expected GetRootOutputValues to not return an error, but it returned %v", err) + } + + if len(outputs) != 2 { + t.Errorf("Expected %d outputs, but received %d", 2, len(outputs)) + } +} + +func testOverrideVersion(t *testing.T, v string) func() { + oldVersionStr := tfversion.Version + oldPrereleaseStr := tfversion.Prerelease + oldSemVer := tfversion.SemVer + + var newPrereleaseStr string + if dash := strings.Index(v, "-"); dash != -1 { + newPrereleaseStr = v[dash+1:] + v = v[:dash] + } + + newSemVer, err := version.NewVersion(v) + if err != nil { + t.Errorf("invalid override version %q: %s", v, err) + } + newVersionStr := newSemVer.String() + + tfversion.Version = newVersionStr + tfversion.Prerelease = newPrereleaseStr + tfversion.SemVer = newSemVer + + return func() { // reset function + tfversion.Version = oldVersionStr + tfversion.Prerelease = oldPrereleaseStr + tfversion.SemVer = oldSemVer + } +} diff --git a/pkg/states/statemgr/helper.go b/pkg/states/statemgr/helper.go new file mode 100644 index 00000000000..d6bdf3f4148 --- /dev/null +++ b/pkg/states/statemgr/helper.go @@ -0,0 +1,59 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package statemgr + +// The functions in this file are helper wrappers for common sequences of +// operations done against full state managers. + +import ( + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/statefile" + "github.com/kubegems/opentofu/pkg/tofu" + "github.com/kubegems/opentofu/version" +) + +// NewStateFile creates a new statefile.File object, with a newly-minted +// lineage identifier and serial 0, and returns a pointer to it. +func NewStateFile() *statefile.File { + return &statefile.File{ + Lineage: NewLineage(), + TerraformVersion: version.SemVer, + State: states.NewState(), + } +} + +// RefreshAndRead refreshes the persistent snapshot in the given state manager +// and then returns it. +// +// This is a wrapper around calling RefreshState and then State on the given +// manager. +func RefreshAndRead(mgr Storage) (*states.State, error) { + err := mgr.RefreshState() + if err != nil { + return nil, err + } + return mgr.State(), nil +} + +// WriteAndPersist writes a snapshot of the given state to the given state +// manager's transient store and then immediately persists it. +// +// The caller must ensure that the given state is not concurrently modified +// while this function is running, but it is safe to modify it after this +// function has returned. +// +// If an error is returned, it is undefined whether the state has been saved +// to the transient store or not, and so the only safe response is to bail +// out quickly with a user-facing error. In situations where more control +// is required, call WriteState and PersistState on the state manager directly +// and handle their errors. +func WriteAndPersist(mgr Storage, state *states.State, schemas *tofu.Schemas) error { + err := mgr.WriteState(state) + if err != nil { + return err + } + return mgr.PersistState(schemas) +} diff --git a/pkg/states/statemgr/lineage.go b/pkg/states/statemgr/lineage.go new file mode 100644 index 00000000000..de084169678 --- /dev/null +++ b/pkg/states/statemgr/lineage.go @@ -0,0 +1,25 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package statemgr + +import ( + "fmt" + + "github.com/hashicorp/go-uuid" +) + +// NewLineage generates a new lineage identifier string. A lineage identifier +// is an opaque string that is intended to be unique in space and time, chosen +// when state is recorded at a location for the first time and then preserved +// afterwards to allow OpenTofu to recognize when one state snapshot is a +// predecessor or successor of another. +func NewLineage() string { + lineage, err := uuid.GenerateUUID() + if err != nil { + panic(fmt.Errorf("Failed to generate lineage: %w", err)) + } + return lineage +} diff --git a/pkg/states/statemgr/lock.go b/pkg/states/statemgr/lock.go new file mode 100644 index 00000000000..da7159f5c2b --- /dev/null +++ b/pkg/states/statemgr/lock.go @@ -0,0 +1,48 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package statemgr + +import ( + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tofu" +) + +// LockDisabled implements State and Locker but disables state locking. +// If State doesn't support locking, this is a no-op. This is useful for +// easily disabling locking of an existing state or for tests. +type LockDisabled struct { + // We can't embed State directly since Go dislikes that a field is + // State and State interface has a method State + Inner Full +} + +func (s *LockDisabled) State() *states.State { + return s.Inner.State() +} + +func (s *LockDisabled) GetRootOutputValues() (map[string]*states.OutputValue, error) { + return s.Inner.GetRootOutputValues() +} + +func (s *LockDisabled) WriteState(v *states.State) error { + return s.Inner.WriteState(v) +} + +func (s *LockDisabled) RefreshState() error { + return s.Inner.RefreshState() +} + +func (s *LockDisabled) PersistState(schemas *tofu.Schemas) error { + return s.Inner.PersistState(schemas) +} + +func (s *LockDisabled) Lock(info *LockInfo) (string, error) { + return "", nil +} + +func (s *LockDisabled) Unlock(id string) error { + return nil +} diff --git a/pkg/states/statemgr/lock_test.go b/pkg/states/statemgr/lock_test.go new file mode 100644 index 00000000000..07262d229c7 --- /dev/null +++ b/pkg/states/statemgr/lock_test.go @@ -0,0 +1,15 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package statemgr + +import ( + "testing" +) + +func TestLockDisabled_impl(t *testing.T) { + var _ Full = new(LockDisabled) + var _ Locker = new(LockDisabled) +} diff --git a/pkg/states/statemgr/locker.go b/pkg/states/statemgr/locker.go new file mode 100644 index 00000000000..1eb60c10ca7 --- /dev/null +++ b/pkg/states/statemgr/locker.go @@ -0,0 +1,252 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package statemgr + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "math/rand" + "os" + "os/user" + "strings" + "text/template" + "time" + + uuid "github.com/hashicorp/go-uuid" + "github.com/kubegems/opentofu/version" +) + +var rngSource = rand.New(rand.NewSource(time.Now().UnixNano())) + +// Locker is the interface for state managers that are able to manage +// mutual-exclusion locks for state. +// +// Implementing Locker alongside Persistent relaxes some of the usual +// implementation constraints for implementations of Refresher and Persister, +// under the assumption that the locking mechanism effectively prevents +// multiple OpenTofu processes from reading and writing state concurrently. +// In particular, a type that implements both Locker and Persistent is only +// required to that the Persistent implementation is concurrency-safe within +// a single OpenTofu process. +// +// A Locker implementation must ensure that another processes with a +// similarly-configured state manager cannot successfully obtain a lock while +// the current process is holding it, or vice-versa, assuming that both +// processes agree on the locking mechanism. +// +// A Locker is not required to prevent non-cooperating processes from +// concurrently modifying the state, but is free to do so as an extra +// protection. If a mandatory locking mechanism of this sort is implemented, +// the state manager must ensure that RefreshState and PersistState calls +// can succeed if made through the same manager instance that is holding the +// lock, such has by retaining some sort of lock token that the Persistent +// methods can then use. +type Locker interface { + // Lock attempts to obtain a lock, using the given lock information. + // + // The result is an opaque id that can be passed to Unlock to release + // the lock, or an error if the lock cannot be acquired. Lock returns + // an instance of LockError immediately if the lock is already held, + // and the helper function LockWithContext uses this to automatically + // retry lock acquisition periodically until a timeout is reached. + Lock(info *LockInfo) (string, error) + + // Unlock releases a lock previously acquired by Lock. + // + // If the lock cannot be released -- for example, if it was stolen by + // another user with some sort of administrative override privilege -- + // then an error is returned explaining the situation in a way that + // is suitable for returning to an end-user. + Unlock(id string) error +} + +// test hook to verify that LockWithContext has attempted a lock +var postLockHook func() + +// LockWithContext locks the given state manager using the provided context +// for both timeout and cancellation. +// +// This method has a built-in retry/backoff behavior up to the context's +// timeout. +func LockWithContext(ctx context.Context, s Locker, info *LockInfo) (string, error) { + delay := time.Second + maxDelay := 16 * time.Second + for { + id, err := s.Lock(info) + if err == nil { + return id, nil + } + + le, ok := err.(*LockError) + if !ok { + // not a lock error, so we can't retry + return "", err + } + + if !le.Retriable() { + return "", err + } + + if postLockHook != nil { + postLockHook() + } + + // Lock() can be repeated without sleep + if le.RetriableWithoutDelay() { + continue + } + + // there's an existing lock, wait and try again + select { + case <-ctx.Done(): + // return the last lock error with the info + return "", err + case <-time.After(delay): + if delay < maxDelay { + delay *= 2 + } + } + } +} + +// LockInfo stores lock metadata. +// +// Only Operation and Info are required to be set by the caller of Lock. +// Most callers should use NewLockInfo to create a LockInfo value with many +// of the fields populated with suitable default values. +type LockInfo struct { + // Unique ID for the lock. NewLockInfo provides a random ID, but this may + // be overridden by the lock implementation. The final value of ID will be + // returned by the call to Lock. + ID string + + // OpenTofu operation, provided by the caller. + Operation string + + // Extra information to store with the lock, provided by the caller. + Info string + + // user@hostname when available + Who string + + // OpenTofu version + Version string + + // Time that the lock was taken. + Created time.Time + + // Path to the state file when applicable. Set by the Lock implementation. + Path string +} + +// NewLockInfo creates a LockInfo object and populates many of its fields +// with suitable default values. +func NewLockInfo() *LockInfo { + // this doesn't need to be cryptographically secure, just unique. + // Using math/rand alleviates the need to check handle the read error. + // Use a uuid format to match other IDs used throughout OpenTofu. + buf := make([]byte, 16) + rngSource.Read(buf) + + id, err := uuid.FormatUUID(buf) + if err != nil { + // this of course shouldn't happen + panic(err) + } + + // don't error out on user and hostname, as we don't require them + userName := "" + if userInfo, err := user.Current(); err == nil { + userName = userInfo.Username + } + host, _ := os.Hostname() + + info := &LockInfo{ + ID: id, + Who: fmt.Sprintf("%s@%s", userName, host), + Version: version.Version, + Created: time.Now().UTC(), + } + return info +} + +// Err returns the lock info formatted in an error +func (l *LockInfo) Err() error { + return errors.New(l.String()) +} + +// Marshal returns a string json representation of the LockInfo +func (l *LockInfo) Marshal() []byte { + js, err := json.Marshal(l) + if err != nil { + panic(err) + } + return js +} + +// String return a multi-line string representation of LockInfo +func (l *LockInfo) String() string { + tmpl := `Lock Info: + ID: {{.ID}} + Path: {{.Path}} + Operation: {{.Operation}} + Who: {{.Who}} + Version: {{.Version}} + Created: {{.Created}} + Info: {{.Info}} +` + + t := template.Must(template.New("LockInfo").Parse(tmpl)) + var out bytes.Buffer + if err := t.Execute(&out, l); err != nil { + panic(err) + } + return out.String() +} + +// LockError is a specialization of type error that is returned by Locker.Lock +// to indicate that the lock is already held by another process and that +// retrying may be productive to take the lock once the other process releases +// it. +type LockError struct { + Info *LockInfo + Err error + + // Set when writing of lock file fails because of conflict and + // then reading fails because file doesn't exist (removed by other process) + InconsistentRead bool +} + +func (e *LockError) Error() string { + var out []string + if e.Err != nil { + out = append(out, e.Err.Error()) + } + + if e.Info != nil { + out = append(out, e.Info.String()) + } + return strings.Join(out, "\n") +} + +// Retriable returns true when locking should be retried +func (e *LockError) Retriable() bool { + // If we don't have a complete LockError then there's something + // wrong with the lock. + if e == nil { + return false + } + + return e.InconsistentRead || (e.Info != nil && e.Info.ID != "") +} + +// RetriableWithoutDelay returns true when delaying can be avoided +func (e *LockError) RetriableWithoutDelay() bool { + return e != nil && e.InconsistentRead +} diff --git a/pkg/states/statemgr/migrate.go b/pkg/states/statemgr/migrate.go new file mode 100644 index 00000000000..483bd4233ff --- /dev/null +++ b/pkg/states/statemgr/migrate.go @@ -0,0 +1,217 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package statemgr + +import ( + "fmt" + + "github.com/kubegems/opentofu/pkg/states/statefile" +) + +// Migrator is an optional interface implemented by state managers that +// are capable of direct migration of state snapshots with their associated +// metadata unchanged. +// +// This interface is used when available by function Migrate. See that +// function for more information on how it is used. +type Migrator interface { + PersistentMeta + + // StateForMigration returns a full statefile representing the latest + // snapshot (as would be returned by Reader.State) and the associated + // snapshot metadata (as would be returned by + // PersistentMeta.StateSnapshotMeta). + // + // Just as with Reader.State, this must not fail. + StateForMigration() *statefile.File + + // WriteStateForMigration accepts a full statefile including associated + // snapshot metadata, and atomically updates the stored file (as with + // Writer.WriteState) and the metadata. + // + // If "force" is not set, the manager must call CheckValidImport with + // the given file and the current file and complete the update only if + // that function returns nil. If force is set this may override such + // checks, but some backends do not support forcing and so will act + // as if force is always false. + WriteStateForMigration(f *statefile.File, force bool) error +} + +// Migrate writes the latest transient state snapshot from src into dest, +// preserving snapshot metadata (serial and lineage) where possible. +// +// If both managers implement the optional interface Migrator then it will +// be used to copy the snapshot and its associated metadata. Otherwise, +// the normal Reader and Writer interfaces will be used instead. +// +// If the destination manager refuses the new state or fails to write it then +// its error is returned directly. +// +// For state managers that also implement Persistent, it is the caller's +// responsibility to persist the newly-written state after a successful result, +// just as with calls to Writer.WriteState. +// +// This function doesn't do any locking of its own, so if the state managers +// also implement Locker the caller should hold a lock on both managers +// for the duration of this call. +func Migrate(dst, src Transient) error { + if dstM, ok := dst.(Migrator); ok { + if srcM, ok := src.(Migrator); ok { + // Full-fidelity migration, them. + s := srcM.StateForMigration() + return dstM.WriteStateForMigration(s, true) + } + } + + // Managers to not support full-fidelity migration, so migration will not + // preserve serial/lineage. + s := src.State() + return dst.WriteState(s) +} + +// Import loads the given state snapshot into the given manager, preserving +// its metadata (serial and lineage) if the target manager supports metadata. +// +// A state manager must implement the optional interface Migrator to get +// access to the full metadata. +// +// Unless "force" is true, Import will check first that the metadata given +// in the file matches the current snapshot metadata for the manager, if the +// manager supports metadata. Some managers do not support forcing, so a +// write with an unsuitable lineage or serial may still be rejected even if +// "force" is set. "force" has no effect for managers that do not support +// snapshot metadata. +// +// For state managers that also implement Persistent, it is the caller's +// responsibility to persist the newly-written state after a successful result, +// just as with calls to Writer.WriteState. +// +// This function doesn't do any locking of its own, so if the state manager +// also implements Locker the caller should hold a lock on it for the +// duration of this call. +func Import(f *statefile.File, mgr Transient, force bool) error { + if mgrM, ok := mgr.(Migrator); ok { + return mgrM.WriteStateForMigration(f, force) + } + + // For managers that don't implement Migrator, this is just a normal write + // of the state contained in the given file. + return mgr.WriteState(f.State) +} + +// Export retrieves the latest state snapshot from the given manager, including +// its metadata (serial and lineage) where possible. +// +// A state manager must also implement either Migrator or PersistentMeta +// for the metadata to be included. Otherwise, the relevant fields will have +// zero value in the returned object. +// +// For state managers that also implement Persistent, it is the caller's +// responsibility to refresh from persistent storage first if needed. +// +// This function doesn't do any locking of its own, so if the state manager +// also implements Locker the caller should hold a lock on it for the +// duration of this call. +func Export(mgr Reader) *statefile.File { + switch mgrT := mgr.(type) { + case Migrator: + return mgrT.StateForMigration() + case PersistentMeta: + s := mgr.State() + meta := mgrT.StateSnapshotMeta() + return statefile.New(s, meta.Lineage, meta.Serial) + default: + s := mgr.State() + return statefile.New(s, "", 0) + } +} + +// SnapshotMetaRel describes a relationship between two SnapshotMeta values, +// returned from the SnapshotMeta.Compare method where the "first" value +// is the receiver of that method and the "second" is the given argument. +type SnapshotMetaRel rune + +//go:generate go run golang.org/x/tools/cmd/stringer -type=SnapshotMetaRel + +const ( + // SnapshotOlder indicates that two snapshots have a common lineage and + // that the first has a lower serial value. + SnapshotOlder SnapshotMetaRel = '<' + + // SnapshotNewer indicates that two snapshots have a common lineage and + // that the first has a higher serial value. + SnapshotNewer SnapshotMetaRel = '>' + + // SnapshotEqual indicates that two snapshots have a common lineage and + // the same serial value. + SnapshotEqual SnapshotMetaRel = '=' + + // SnapshotUnrelated indicates that two snapshots have different lineage + // and thus cannot be meaningfully compared. + SnapshotUnrelated SnapshotMetaRel = '!' + + // SnapshotLegacy indicates that one or both of the snapshots + // does not have a lineage at all, and thus no comparison is possible. + SnapshotLegacy SnapshotMetaRel = '?' +) + +// Compare determines the relationship, if any, between the given existing +// SnapshotMeta and the potential "new" SnapshotMeta that is the receiver. +func (m SnapshotMeta) Compare(existing SnapshotMeta) SnapshotMetaRel { + switch { + case m.Lineage == "" || existing.Lineage == "": + return SnapshotLegacy + case m.Lineage != existing.Lineage: + return SnapshotUnrelated + case m.Serial > existing.Serial: + return SnapshotNewer + case m.Serial < existing.Serial: + return SnapshotOlder + default: + // both serials are equal, by elimination + return SnapshotEqual + } +} + +// CheckValidImport returns nil if the "new" snapshot can be imported as a +// successor of the "existing" snapshot without forcing. +// +// If not, an error is returned describing why. +func CheckValidImport(newFile, existingFile *statefile.File) error { + if existingFile == nil || existingFile.State.Empty() { + // It's always okay to overwrite an empty state, regardless of + // its lineage/serial. + return nil + } + new := SnapshotMeta{ + Lineage: newFile.Lineage, + Serial: newFile.Serial, + } + existing := SnapshotMeta{ + Lineage: existingFile.Lineage, + Serial: existingFile.Serial, + } + rel := new.Compare(existing) + switch rel { + case SnapshotNewer: + return nil // a newer snapshot is fine + case SnapshotLegacy: + return nil // anything goes for a legacy state + case SnapshotUnrelated: + return fmt.Errorf("cannot import state with lineage %q over unrelated state with lineage %q", new.Lineage, existing.Lineage) + case SnapshotEqual: + if statefile.StatesMarshalEqual(newFile.State, existingFile.State) { + // If lineage, serial, and state all match then this is fine. + return nil + } + return fmt.Errorf("cannot overwrite existing state with serial %d with a different state that has the same serial", new.Serial) + case SnapshotOlder: + return fmt.Errorf("cannot import state with serial %d over newer state with serial %d", new.Serial, existing.Serial) + default: + // Should never happen, but we'll check to make sure for safety + return fmt.Errorf("unsupported state snapshot relationship %s", rel) + } +} diff --git a/pkg/states/statemgr/migrate_test.go b/pkg/states/statemgr/migrate_test.go new file mode 100644 index 00000000000..ec6326bba8a --- /dev/null +++ b/pkg/states/statemgr/migrate_test.go @@ -0,0 +1,107 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package statemgr + +import ( + "testing" + + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/statefile" +) + +func TestCheckValidImport(t *testing.T) { + barState := states.BuildState(func(s *states.SyncState) { + s.SetOutputValue( + addrs.OutputValue{Name: "foo"}.Absolute(addrs.RootModuleInstance), + cty.StringVal("bar"), false, + ) + }) + notBarState := states.BuildState(func(s *states.SyncState) { + s.SetOutputValue( + addrs.OutputValue{Name: "foo"}.Absolute(addrs.RootModuleInstance), + cty.StringVal("not bar"), false, + ) + }) + emptyState := states.NewState() + + tests := map[string]struct { + New *statefile.File + Existing *statefile.File + WantErr string + }{ + "exact match": { + New: statefile.New(barState, "lineage", 1), + Existing: statefile.New(barState, "lineage", 1), + WantErr: ``, + }, + "overwrite unrelated empty state": { + New: statefile.New(barState, "lineage1", 1), + Existing: statefile.New(emptyState, "lineage2", 1), + WantErr: ``, + }, + "different state with same serial": { + New: statefile.New(barState, "lineage", 1), + Existing: statefile.New(notBarState, "lineage", 1), + WantErr: `cannot overwrite existing state with serial 1 with a different state that has the same serial`, + }, + "different state with newer serial": { + New: statefile.New(barState, "lineage", 2), + Existing: statefile.New(notBarState, "lineage", 1), + WantErr: ``, + }, + "different state with older serial": { + New: statefile.New(barState, "lineage", 1), + Existing: statefile.New(notBarState, "lineage", 2), + WantErr: `cannot import state with serial 1 over newer state with serial 2`, + }, + "different lineage with same serial": { + New: statefile.New(barState, "lineage1", 2), + Existing: statefile.New(notBarState, "lineage2", 2), + WantErr: `cannot import state with lineage "lineage1" over unrelated state with lineage "lineage2"`, + }, + "different lineage with different serial": { + New: statefile.New(barState, "lineage1", 3), + Existing: statefile.New(notBarState, "lineage2", 2), + WantErr: `cannot import state with lineage "lineage1" over unrelated state with lineage "lineage2"`, + }, + "new state is legacy": { + New: statefile.New(barState, "", 2), + Existing: statefile.New(notBarState, "lineage", 2), + WantErr: ``, + }, + "old state is legacy": { + New: statefile.New(barState, "lineage", 2), + Existing: statefile.New(notBarState, "", 2), + WantErr: ``, + }, + "both states are legacy": { + New: statefile.New(barState, "", 2), + Existing: statefile.New(notBarState, "", 2), + WantErr: ``, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + gotErr := CheckValidImport(test.New, test.Existing) + + if test.WantErr == "" { + if gotErr != nil { + t.Errorf("unexpected error: %s", gotErr) + } + } else { + if gotErr == nil { + t.Errorf("succeeded, but want error: %s", test.WantErr) + } else if got, want := gotErr.Error(), test.WantErr; got != want { + t.Errorf("wrong error\ngot: %s\nwant: %s", got, want) + } + } + }) + } +} diff --git a/pkg/states/statemgr/persistent.go b/pkg/states/statemgr/persistent.go new file mode 100644 index 00000000000..f0be0828faf --- /dev/null +++ b/pkg/states/statemgr/persistent.go @@ -0,0 +1,126 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package statemgr + +import ( + version "github.com/hashicorp/go-version" + + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tofu" +) + +// Persistent is a union of the Refresher and Persistent interfaces, for types +// that deal with persistent snapshots. +// +// Persistent snapshots are ones that are retained in storage that will +// outlive a particular OpenTofu process, and are shared with other OpenTofu +// processes that have a similarly-configured state manager. +// +// A manager may also choose to retain historical persistent snapshots, but +// that is an implementation detail and not visible via this API. +type Persistent interface { + Refresher + Persister + OutputReader +} + +// OutputReader is the interface for managers that fetches output values from state +// or another source. This is a refinement of fetching the entire state and digging +// the output values from it because enhanced backends can apply special permissions +// to differentiate reading the state and reading the outputs within the state. +type OutputReader interface { + // GetRootOutputValues fetches the root module output values from state or another source + GetRootOutputValues() (map[string]*states.OutputValue, error) +} + +// Refresher is the interface for managers that can read snapshots from +// persistent storage. +// +// Refresher is usually implemented in conjunction with Reader, with +// RefreshState copying the latest persistent snapshot into the latest +// transient snapshot. +// +// For a type that implements both Refresher and Persister, RefreshState must +// return the result of the most recently completed successful call to +// PersistState, unless another concurrently-running process has persisted +// another snapshot in the mean time. +// +// The Refresher implementation must guarantee that the snapshot is read +// from persistent storage in a way that is safe under concurrent calls to +// PersistState that may be happening in other processes. +type Refresher interface { + // RefreshState retrieves a snapshot of state from persistent storage, + // returning an error if this is not possible. + // + // Types that implement RefreshState generally also implement a State + // method that returns the result of the latest successful refresh. + // + // Since only a subset of the data in a state is included when persisting, + // a round-trip through PersistState and then RefreshState will often + // return only a subset of what was written. Callers must assume that + // ephemeral portions of the state may be unpopulated after calling + // RefreshState. + RefreshState() error +} + +// Persister is the interface for managers that can write snapshots to +// persistent storage. +// +// Persister is usually implemented in conjunction with Writer, with +// PersistState copying the latest transient snapshot to be the new latest +// persistent snapshot. +// +// A Persister implementation must detect updates made by other processes +// that may be running concurrently and avoid destroying those changes. This +// is most commonly achieved by making use of atomic write capabilities on +// the remote storage backend in conjunction with book-keeping with the +// Serial and Lineage fields in the standard state file formats. +// +// Some implementations may optionally utilize config schema to persist +// state. For example, when representing state in an external JSON +// representation. +type Persister interface { + PersistState(*tofu.Schemas) error +} + +// PersistentMeta is an optional extension to Persistent that allows inspecting +// the metadata associated with the snapshot that was most recently either +// read by RefreshState or written by PersistState. +type PersistentMeta interface { + // StateSnapshotMeta returns metadata about the state snapshot most + // recently created either by a call to PersistState or read by a call + // to RefreshState. + // + // If no persistent snapshot is yet available in the manager then + // the return value is meaningless. This method is primarily available + // for testing and logging purposes, and is of little use otherwise. + StateSnapshotMeta() SnapshotMeta +} + +// SnapshotMeta contains metadata about a persisted state snapshot. +// +// This metadata is usually (but not necessarily) included as part of the +// "header" of a state file, which is then written to a raw blob storage medium +// by a persistent state manager. +// +// Not all state managers will have useful values for all fields in this +// struct, so SnapshotMeta values are of little use beyond testing and logging +// use-cases. +type SnapshotMeta struct { + // Lineage and Serial can be used to understand the relationships between + // snapshots. + // + // If two snapshots both have an identical, non-empty Lineage + // then the one with the higher Serial is newer than the other. + // If the Lineage values are different or empty then the two snapshots + // are unrelated and cannot be compared for relative age. + Lineage string + Serial uint64 + + // TerraformVersion is the number of the version of OpenTofu that created + // the snapshot. + TerraformVersion *version.Version +} diff --git a/pkg/states/statemgr/plan.go b/pkg/states/statemgr/plan.go new file mode 100644 index 00000000000..54d5ef5caeb --- /dev/null +++ b/pkg/states/statemgr/plan.go @@ -0,0 +1,76 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package statemgr + +import ( + "fmt" + + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/statefile" +) + +// PlannedStateUpdate is a special helper to obtain a statefile representation +// of a not-yet-written state snapshot that can be written later by a call +// to the companion function WritePlannedStateUpdate. +// +// The statefile object returned here has an unusual interpretation of its +// metadata that is understood only by WritePlannedStateUpdate, and so the +// returned object should not be used for any other purpose. +// +// If the state manager implements Locker then it is the caller's +// responsibility to hold the lock at least for the duration of this call. +// It is not safe to modify the given state concurrently while +// PlannedStateUpdate is running. +func PlannedStateUpdate(mgr Transient, planned *states.State) *statefile.File { + ret := &statefile.File{ + State: planned.DeepCopy(), + } + + // If the given manager uses snapshot metadata then we'll save that + // in our file so we can check it again during WritePlannedStateUpdate. + if mr, ok := mgr.(PersistentMeta); ok { + m := mr.StateSnapshotMeta() + ret.Lineage = m.Lineage + ret.Serial = m.Serial + } + + return ret +} + +// WritePlannedStateUpdate is a companion to PlannedStateUpdate that attempts +// to apply a state update that was planned earlier to the given state +// manager. +// +// An error is returned if this function detects that a new state snapshot +// has been written to the backend since the update was planned, since that +// invalidates the plan. An error is returned also if the manager itself +// rejects the given state when asked to store it. +// +// If the returned error is nil, the given manager's transient state snapshot +// is updated to match what was planned. It is the caller's responsibility +// to then persist that state if the manager also implements Persistent and +// the snapshot should be written to the persistent store. +// +// If the state manager implements Locker then it is the caller's +// responsibility to hold the lock at least for the duration of this call. +func WritePlannedStateUpdate(mgr Transient, planned *statefile.File) error { + // If the given manager uses snapshot metadata then we'll check to make + // sure no new snapshots have been created since we planned to write + // the given state file. + if mr, ok := mgr.(PersistentMeta); ok { + m := mr.StateSnapshotMeta() + if planned.Lineage != "" { + if planned.Lineage != m.Lineage { + return fmt.Errorf("planned state update is from an unrelated state lineage than the current state") + } + if planned.Serial != m.Serial { + return fmt.Errorf("stored state has been changed by another operation since the given update was planned") + } + } + } + + return mgr.WriteState(planned.State) +} diff --git a/pkg/states/statemgr/snapshotmetarel_string.go b/pkg/states/statemgr/snapshotmetarel_string.go new file mode 100644 index 00000000000..a5b66138cfe --- /dev/null +++ b/pkg/states/statemgr/snapshotmetarel_string.go @@ -0,0 +1,37 @@ +// Code generated by "stringer -type=SnapshotMetaRel"; DO NOT EDIT. + +package statemgr + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[SnapshotOlder-60] + _ = x[SnapshotNewer-62] + _ = x[SnapshotEqual-61] + _ = x[SnapshotUnrelated-33] + _ = x[SnapshotLegacy-63] +} + +const ( + _SnapshotMetaRel_name_0 = "SnapshotUnrelated" + _SnapshotMetaRel_name_1 = "SnapshotOlderSnapshotEqualSnapshotNewerSnapshotLegacy" +) + +var ( + _SnapshotMetaRel_index_1 = [...]uint8{0, 13, 26, 39, 53} +) + +func (i SnapshotMetaRel) String() string { + switch { + case i == 33: + return _SnapshotMetaRel_name_0 + case 60 <= i && i <= 63: + i -= 60 + return _SnapshotMetaRel_name_1[_SnapshotMetaRel_index_1[i]:_SnapshotMetaRel_index_1[i+1]] + default: + return "SnapshotMetaRel(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/pkg/states/statemgr/statemgr.go b/pkg/states/statemgr/statemgr.go new file mode 100644 index 00000000000..034d260b78a --- /dev/null +++ b/pkg/states/statemgr/statemgr.go @@ -0,0 +1,31 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package statemgr + +// Storage is the union of Transient and Persistent, for state managers that +// have both transient and persistent storage. +// +// Types implementing this interface coordinate between their Transient +// and Persistent implementations so that the persistent operations read +// or write the transient store. +type Storage interface { + Transient + Persistent +} + +// Full is the union of all of the more-specific state interfaces. +// +// This interface may grow over time, so state implementations aiming to +// implement it may need to be modified for future changes. To ensure that +// this need can be detected, always include a statement nearby the declaration +// of the implementing type that will fail at compile time if the interface +// isn't satisfied, such as: +// +// var _ statemgr.Full = (*ImplementingType)(nil) +type Full interface { + Storage + Locker +} diff --git a/pkg/states/statemgr/statemgr_fake.go b/pkg/states/statemgr/statemgr_fake.go new file mode 100644 index 00000000000..beb28ddab90 --- /dev/null +++ b/pkg/states/statemgr/statemgr_fake.go @@ -0,0 +1,146 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package statemgr + +import ( + "errors" + "sync" + + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tofu" +) + +// NewFullFake returns a full state manager that really only supports transient +// snapshots. This is primarily intended for testing and is not suitable for +// general use. +// +// The persistent part of the interface is stubbed out as an in-memory store, +// and so its snapshots are effectively also transient. +// +// The given Transient implementation is used to implement the transient +// portion of the interface. If nil is given, NewTransientInMemory is +// automatically called to create an in-memory transient manager with no +// initial transient snapshot. +// +// If the given initial state is non-nil then a copy of it will be used as +// the initial persistent snapshot. +// +// The Locker portion of the returned manager uses a local mutex to simulate +// mutually-exclusive access to the fake persistent portion of the object. +func NewFullFake(t Transient, initial *states.State) Full { + if t == nil { + t = NewTransientInMemory(nil) + } + + // The "persistent" part of our manager is actually just another in-memory + // transient used to fake a secondary storage layer. + fakeP := NewTransientInMemory(initial.DeepCopy()) + + return &fakeFull{ + t: t, + fakeP: fakeP, + } +} + +type fakeFull struct { + t Transient + fakeP Transient + + lockLock sync.Mutex + locked bool +} + +var _ Full = (*fakeFull)(nil) + +func (m *fakeFull) State() *states.State { + return m.t.State() +} + +func (m *fakeFull) WriteState(s *states.State) error { + return m.t.WriteState(s) +} + +func (m *fakeFull) RefreshState() error { + return m.t.WriteState(m.fakeP.State()) +} + +func (m *fakeFull) PersistState(schemas *tofu.Schemas) error { + return m.fakeP.WriteState(m.t.State()) +} + +func (m *fakeFull) GetRootOutputValues() (map[string]*states.OutputValue, error) { + return m.State().RootModule().OutputValues, nil +} + +func (m *fakeFull) Lock(info *LockInfo) (string, error) { + m.lockLock.Lock() + defer m.lockLock.Unlock() + + if m.locked { + return "", &LockError{ + Err: errors.New("fake state manager is locked"), + Info: info, + } + } + + m.locked = true + return "placeholder", nil +} + +func (m *fakeFull) Unlock(id string) error { + m.lockLock.Lock() + defer m.lockLock.Unlock() + + if !m.locked { + return errors.New("fake state manager is not locked") + } + if id != "placeholder" { + return errors.New("wrong lock id for fake state manager") + } + + m.locked = false + return nil +} + +// NewUnlockErrorFull returns a state manager that is useful for testing errors +// (mostly Unlock errors) when used with the clistate.Locker interface. Lock() +// does not return an error because clistate.Locker Lock()s the state at the +// start of Unlock(), so Lock() must succeeded for Unlock() to get called. +func NewUnlockErrorFull(t Transient, initial *states.State) Full { + return &fakeErrorFull{} +} + +type fakeErrorFull struct{} + +var _ Full = (*fakeErrorFull)(nil) + +func (m *fakeErrorFull) State() *states.State { + return nil +} + +func (m *fakeErrorFull) GetRootOutputValues() (map[string]*states.OutputValue, error) { + return nil, errors.New("fake state manager error") +} + +func (m *fakeErrorFull) WriteState(s *states.State) error { + return errors.New("fake state manager error") +} + +func (m *fakeErrorFull) RefreshState() error { + return errors.New("fake state manager error") +} + +func (m *fakeErrorFull) PersistState(schemas *tofu.Schemas) error { + return errors.New("fake state manager error") +} + +func (m *fakeErrorFull) Lock(info *LockInfo) (string, error) { + return "placeholder", nil +} + +func (m *fakeErrorFull) Unlock(id string) error { + return errors.New("fake state manager error") +} diff --git a/pkg/states/statemgr/statemgr_test.go b/pkg/states/statemgr/statemgr_test.go new file mode 100644 index 00000000000..4f2bdfe755e --- /dev/null +++ b/pkg/states/statemgr/statemgr_test.go @@ -0,0 +1,100 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package statemgr + +import ( + "context" + "encoding/json" + "flag" + "os" + "testing" + "time" + + _ "github.com/kubegems/opentofu/pkg/logging" +) + +func TestNewLockInfo(t *testing.T) { + info1 := NewLockInfo() + info2 := NewLockInfo() + + if info1.ID == "" { + t.Fatal("LockInfo missing ID") + } + + if info1.Version == "" { + t.Fatal("LockInfo missing version") + } + + if info1.Created.IsZero() { + t.Fatal("LockInfo missing Created") + } + + if info1.ID == info2.ID { + t.Fatal("multiple LockInfo with identical IDs") + } + + // test the JSON output is valid + newInfo := &LockInfo{} + err := json.Unmarshal(info1.Marshal(), newInfo) + if err != nil { + t.Fatal(err) + } +} + +func TestLockWithContext(t *testing.T) { + s := NewFullFake(nil, TestFullInitialState()) + + id, err := s.Lock(NewLockInfo()) + if err != nil { + t.Fatal(err) + } + + // use a cancelled context for an immediate timeout + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + info := NewLockInfo() + info.Info = "lock with context" + _, err = LockWithContext(ctx, s, info) + if err == nil { + t.Fatal("lock should have failed immediately") + } + + // block until LockwithContext has made a first attempt + attempted := make(chan struct{}) + postLockHook = func() { + close(attempted) + postLockHook = nil + } + + // unlock the state during LockWithContext + unlocked := make(chan struct{}) + var unlockErr error + go func() { + defer close(unlocked) + <-attempted + unlockErr = s.Unlock(id) + }() + + ctx, cancel = context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + id, err = LockWithContext(ctx, s, info) + if err != nil { + t.Fatal("lock should have completed within 2s:", err) + } + + // ensure the goruotine completes + <-unlocked + if unlockErr != nil { + t.Fatal(unlockErr) + } +} + +func TestMain(m *testing.M) { + flag.Parse() + os.Exit(m.Run()) +} diff --git a/pkg/states/statemgr/testdata/lockstate.go b/pkg/states/statemgr/testdata/lockstate.go new file mode 100644 index 00000000000..4ee674ee3b9 --- /dev/null +++ b/pkg/states/statemgr/testdata/lockstate.go @@ -0,0 +1,29 @@ +package main + +import ( + "io" + "log" + "os" + + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/states/statemgr" +) + +// Attempt to open and lock a tofu state file. +// Lock failure exits with 0 and writes "lock failed" to stderr. +func main() { + if len(os.Args) != 2 { + log.Fatal(os.Args[0], "statefile") + } + + s := statemgr.NewFilesystem(os.Args[1], encryption.StateEncryptionDisabled()) + + info := statemgr.NewLockInfo() + info.Operation = "test" + info.Info = "state locker" + + _, err := s.Lock(info) + if err != nil { + io.WriteString(os.Stderr, "lock failed") + } +} diff --git a/pkg/states/statemgr/testing.go b/pkg/states/statemgr/testing.go new file mode 100644 index 00000000000..475caa85a64 --- /dev/null +++ b/pkg/states/statemgr/testing.go @@ -0,0 +1,168 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package statemgr + +import ( + "reflect" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/statefile" +) + +// TestFull is a helper for testing full state manager implementations. It +// expects that the given implementation is pre-loaded with a snapshot of the +// result from TestFullInitialState. +// +// If the given state manager also implements PersistentMeta, this function +// will test that the snapshot metadata changes as expected between calls +// to the methods of Persistent. +func TestFull(t *testing.T, s Full) { + t.Helper() + + if err := s.RefreshState(); err != nil { + t.Fatalf("err: %s", err) + } + + // Check that the initial state is correct. + // These do have different Lineages, but we will replace current below. + initial := TestFullInitialState() + if state := s.State(); !state.Equal(initial) { + t.Fatalf("state does not match expected initial state\n\ngot:\n%s\nwant:\n%s", spew.Sdump(state), spew.Sdump(initial)) + } + + var initialMeta SnapshotMeta + if sm, ok := s.(PersistentMeta); ok { + initialMeta = sm.StateSnapshotMeta() + } + + // Now we've proven that the state we're starting with is an initial + // state, we'll complete our work here with that state, since otherwise + // further writes would violate the invariant that we only try to write + // states that share the same lineage as what was initially written. + current := s.State() + + // Write a new state and verify that we have it + current.RootModule().SetOutputValue("bar", cty.StringVal("baz"), false) + + if err := s.WriteState(current); err != nil { + t.Fatalf("err: %s", err) + } + + if actual := s.State(); !actual.Equal(current) { + t.Fatalf("bad:\n%#v\n\n%#v", actual, current) + } + + // Test persistence + if err := s.PersistState(nil); err != nil { + t.Fatalf("err: %s", err) + } + + // Refresh if we got it + if err := s.RefreshState(); err != nil { + t.Fatalf("err: %s", err) + } + + var newMeta SnapshotMeta + if sm, ok := s.(PersistentMeta); ok { + newMeta = sm.StateSnapshotMeta() + if got, want := newMeta.Lineage, initialMeta.Lineage; got != want { + t.Errorf("Lineage changed from %q to %q", want, got) + } + if after, before := newMeta.Serial, initialMeta.Serial; after == before { + t.Errorf("Serial didn't change from %d after new module added", before) + } + } + + // Same serial + serial := newMeta.Serial + if err := s.WriteState(current); err != nil { + t.Fatalf("err: %s", err) + } + if err := s.PersistState(nil); err != nil { + t.Fatalf("err: %s", err) + } + + if sm, ok := s.(PersistentMeta); ok { + newMeta = sm.StateSnapshotMeta() + if newMeta.Serial != serial { + t.Fatalf("serial changed after persisting with no changes: got %d, want %d", newMeta.Serial, serial) + } + } + + if sm, ok := s.(PersistentMeta); ok { + newMeta = sm.StateSnapshotMeta() + } + + // Change the serial + current = current.DeepCopy() + current.EnsureModule(addrs.RootModuleInstance).SetOutputValue( + "serialCheck", cty.StringVal("true"), false, + ) + if err := s.WriteState(current); err != nil { + t.Fatalf("err: %s", err) + } + if err := s.PersistState(nil); err != nil { + t.Fatalf("err: %s", err) + } + + if sm, ok := s.(PersistentMeta); ok { + oldMeta := newMeta + newMeta = sm.StateSnapshotMeta() + + if newMeta.Serial <= serial { + t.Fatalf("serial incorrect after persisting with changes: got %d, want > %d", newMeta.Serial, serial) + } + + if newMeta.TerraformVersion != oldMeta.TerraformVersion { + t.Fatalf("TFVersion changed from %s to %s", oldMeta.TerraformVersion, newMeta.TerraformVersion) + } + + // verify that Lineage doesn't change along with Serial, or during copying. + if newMeta.Lineage != oldMeta.Lineage { + t.Fatalf("Lineage changed from %q to %q", oldMeta.Lineage, newMeta.Lineage) + } + } + + // Check that State() returns a copy by modifying the copy and comparing + // to the current state. + stateCopy := s.State() + stateCopy.EnsureModule(addrs.RootModuleInstance.Child("another", addrs.NoKey)) + if reflect.DeepEqual(stateCopy, s.State()) { + t.Fatal("State() should return a copy") + } + + // our current expected state should also marshal identically to the persisted state + if !statefile.StatesMarshalEqual(current, s.State()) { + t.Fatalf("Persisted state altered unexpectedly.\n\ngot:\n%s\nwant:\n%s", spew.Sdump(s.State()), spew.Sdump(current)) + } +} + +// TestFullInitialState is a state that should be snapshotted into a +// full state manager before passing it into TestFull. +func TestFullInitialState() *states.State { + state := states.NewState() + childMod := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) + rAddr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "foo", + } + providerAddr := addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider(rAddr.ImpliedProvider()), + Module: addrs.RootModule, + } + childMod.SetResourceProvider(rAddr, providerAddr) + + state.RootModule().SetOutputValue("sensitive_output", cty.StringVal("it's a secret"), true) + state.RootModule().SetOutputValue("nonsensitive_output", cty.StringVal("hello, world!"), false) + + return state +} diff --git a/pkg/states/statemgr/transient.go b/pkg/states/statemgr/transient.go new file mode 100644 index 00000000000..8330d2b0896 --- /dev/null +++ b/pkg/states/statemgr/transient.go @@ -0,0 +1,71 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package statemgr + +import "github.com/kubegems/opentofu/pkg/states" + +// Transient is a union of the Reader and Writer interfaces, for types that +// deal with transient snapshots. +// +// Transient snapshots are ones that are generally retained only locally and +// to not create any historical version record when updated. Transient +// snapshots are not expected to outlive a particular OpenTofu process, +// and are not shared with any other process. +// +// A state manager type that is primarily concerned with persistent storage +// may embed type Transient and then call State from its PersistState and +// WriteState from its RefreshState in order to build on any existing +// Transient implementation, such as the one returned by NewTransientInMemory. +type Transient interface { + Reader + Writer +} + +// Reader is the interface for managers that can return transient snapshots +// of state. +// +// Retrieving the snapshot must not fail, so retrieving a snapshot from remote +// storage (for example) should be dealt with elsewhere, often in an +// implementation of Refresher. For a type that implements both Reader +// and Refresher, it is okay for State to return nil if called before +// a RefreshState call has completed. +// +// For a type that implements both Reader and Writer, State must return the +// result of the most recently completed call to WriteState, and the state +// manager must accept concurrent calls to both State and WriteState. +// +// Each caller of this function must get a distinct copy of the state, and +// it must also be distinct from any instance cached inside the reader, to +// ensure that mutations of the returned state will not affect the values +// returned to other callers. +type Reader interface { + // State returns the latest state. + // + // Each call to State returns an entirely-distinct copy of the state, with + // no storage shared with any other call, so the caller may freely mutate + // the returned object via the state APIs. + State() *states.State +} + +// Writer is the interface for managers that can create transient snapshots +// from state. +// +// Writer is the opposite of Reader, and so it must update whatever the State +// method reads from. It does not write the state to any persistent +// storage, and (for managers that support historical versions) must not +// be recorded as a persistent new version of state. +// +// Implementations that cache the state in memory must take a deep copy of it, +// since the caller may continue to modify the given state object after +// WriteState returns. +type Writer interface { + // WriteState saves a transient snapshot of the given state. + // + // The caller must ensure that the given state object is not concurrently + // modified while a WriteState call is in progress. WriteState itself + // will never modify the given state. + WriteState(*states.State) error +} diff --git a/pkg/states/statemgr/transient_inmem.go b/pkg/states/statemgr/transient_inmem.go new file mode 100644 index 00000000000..5ea85868010 --- /dev/null +++ b/pkg/states/statemgr/transient_inmem.go @@ -0,0 +1,46 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package statemgr + +import ( + "sync" + + "github.com/kubegems/opentofu/pkg/states" +) + +// NewTransientInMemory returns a Transient implementation that retains +// transient snapshots only in memory, as part of the object. +// +// The given initial state, if any, must not be modified concurrently while +// this function is running, but may be freely modified once this function +// returns without affecting the stored transient snapshot. +func NewTransientInMemory(initial *states.State) Transient { + return &transientInMemory{ + current: initial.DeepCopy(), + } +} + +type transientInMemory struct { + lock sync.RWMutex + current *states.State +} + +var _ Transient = (*transientInMemory)(nil) + +func (m *transientInMemory) State() *states.State { + m.lock.RLock() + defer m.lock.RUnlock() + + return m.current.DeepCopy() +} + +func (m *transientInMemory) WriteState(new *states.State) error { + m.lock.Lock() + defer m.lock.Unlock() + + m.current = new.DeepCopy() + return nil +} diff --git a/pkg/states/sync.go b/pkg/states/sync.go new file mode 100644 index 00000000000..e29dc550fbc --- /dev/null +++ b/pkg/states/sync.go @@ -0,0 +1,578 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package states + +import ( + "log" + "sync" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/checks" + "github.com/zclconf/go-cty/cty" +) + +// SyncState is a wrapper around State that provides concurrency-safe access to +// various common operations that occur during a OpenTofu graph walk, or other +// similar concurrent contexts. +// +// When a SyncState wrapper is in use, no concurrent direct access to the +// underlying objects is permitted unless the caller first acquires an explicit +// lock, using the Lock and Unlock methods. Most callers should _not_ +// explicitly lock, and should instead use the other methods of this type that +// handle locking automatically. +// +// Since SyncState is able to safely consolidate multiple updates into a single +// atomic operation, many of its methods are at a higher level than those +// of the underlying types, and operate on the state as a whole rather than +// on individual sub-structures of the state. +// +// SyncState can only protect against races within its own methods. It cannot +// provide any guarantees about the order in which concurrent operations will +// be processed, so callers may still need to employ higher-level techniques +// for ensuring correct operation sequencing, such as building and walking +// a dependency graph. +type SyncState struct { + state *State + lock sync.RWMutex +} + +// Module returns a snapshot of the state of the module instance with the given +// address, or nil if no such module is tracked. +// +// The return value is a pointer to a copy of the module state, which the +// caller may then freely access and mutate. However, since the module state +// tends to be a large data structure with many child objects, where possible +// callers should prefer to use a more granular accessor to access a child +// module directly, and thus reduce the amount of copying required. +func (s *SyncState) Module(addr addrs.ModuleInstance) *Module { + s.lock.RLock() + ret := s.state.Module(addr).DeepCopy() + s.lock.RUnlock() + return ret +} + +// ModuleOutputs returns the set of OutputValues that matches the given path. +func (s *SyncState) ModuleOutputs(parentAddr addrs.ModuleInstance, module addrs.ModuleCall) []*OutputValue { + s.lock.RLock() + defer s.lock.RUnlock() + var os []*OutputValue + + for _, o := range s.state.ModuleOutputs(parentAddr, module) { + os = append(os, o.DeepCopy()) + } + return os +} + +// RemoveModule removes the entire state for the given module, taking with +// it any resources associated with the module. This should generally be +// called only for modules whose resources have all been destroyed, but +// that is not enforced by this method. +func (s *SyncState) RemoveModule(addr addrs.ModuleInstance) { + s.lock.Lock() + defer s.lock.Unlock() + + s.state.RemoveModule(addr) +} + +// OutputValue returns a snapshot of the state of the output value with the +// given address, or nil if no such output value is tracked. +// +// The return value is a pointer to a copy of the output value state, which the +// caller may then freely access and mutate. +func (s *SyncState) OutputValue(addr addrs.AbsOutputValue) *OutputValue { + s.lock.RLock() + ret := s.state.OutputValue(addr).DeepCopy() + s.lock.RUnlock() + return ret +} + +// SetOutputValue writes a given output value into the state, overwriting +// any existing value of the same name. +// +// If the module containing the output is not yet tracked in state then it +// be added as a side-effect. +func (s *SyncState) SetOutputValue(addr addrs.AbsOutputValue, value cty.Value, sensitive bool) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.EnsureModule(addr.Module) + ms.SetOutputValue(addr.OutputValue.Name, value, sensitive) +} + +// RemoveOutputValue removes the stored value for the output value with the +// given address. +// +// If this results in its containing module being empty, the module will be +// pruned from the state as a side-effect. +func (s *SyncState) RemoveOutputValue(addr addrs.AbsOutputValue) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.Module(addr.Module) + if ms == nil { + return + } + ms.RemoveOutputValue(addr.OutputValue.Name) + s.maybePruneModule(addr.Module) +} + +// LocalValue returns the current value associated with the given local value +// address. +func (s *SyncState) LocalValue(addr addrs.AbsLocalValue) cty.Value { + s.lock.RLock() + // cty.Value is immutable, so we don't need any extra copying here. + ret := s.state.LocalValue(addr) + s.lock.RUnlock() + return ret +} + +// SetLocalValue writes a given output value into the state, overwriting +// any existing value of the same name. +// +// If the module containing the local value is not yet tracked in state then it +// will be added as a side-effect. +func (s *SyncState) SetLocalValue(addr addrs.AbsLocalValue, value cty.Value) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.EnsureModule(addr.Module) + ms.SetLocalValue(addr.LocalValue.Name, value) +} + +// RemoveLocalValue removes the stored value for the local value with the +// given address. +// +// If this results in its containing module being empty, the module will be +// pruned from the state as a side-effect. +func (s *SyncState) RemoveLocalValue(addr addrs.AbsLocalValue) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.Module(addr.Module) + if ms == nil { + return + } + ms.RemoveLocalValue(addr.LocalValue.Name) + s.maybePruneModule(addr.Module) +} + +// Resource returns a snapshot of the state of the resource with the given +// address, or nil if no such resource is tracked. +// +// The return value is a pointer to a copy of the resource state, which the +// caller may then freely access and mutate. +func (s *SyncState) Resource(addr addrs.AbsResource) *Resource { + s.lock.RLock() + ret := s.state.Resource(addr).DeepCopy() + s.lock.RUnlock() + return ret +} + +// ResourceInstance returns a snapshot of the state the resource instance with +// the given address, or nil if no such instance is tracked. +// +// The return value is a pointer to a copy of the instance state, which the +// caller may then freely access and mutate. +func (s *SyncState) ResourceInstance(addr addrs.AbsResourceInstance) *ResourceInstance { + s.lock.RLock() + ret := s.state.ResourceInstance(addr).DeepCopy() + s.lock.RUnlock() + return ret +} + +// ResourceInstanceObject returns a snapshot of the current instance object +// of the given generation belonging to the instance with the given address, +// or nil if no such object is tracked.. +// +// The return value is a pointer to a copy of the object, which the caller may +// then freely access and mutate. +func (s *SyncState) ResourceInstanceObject(addr addrs.AbsResourceInstance, gen Generation) *ResourceInstanceObjectSrc { + s.lock.RLock() + defer s.lock.RUnlock() + + inst := s.state.ResourceInstance(addr) + if inst == nil { + return nil + } + return inst.GetGeneration(gen).DeepCopy() +} + +// SetResourceMeta updates the resource-level metadata for the resource at +// the given address, creating the containing module state and resource state +// as a side-effect if not already present. +func (s *SyncState) SetResourceProvider(addr addrs.AbsResource, provider addrs.AbsProviderConfig) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.EnsureModule(addr.Module) + ms.SetResourceProvider(addr.Resource, provider) +} + +// RemoveResource removes the entire state for the given resource, taking with +// it any instances associated with the resource. This should generally be +// called only for resource objects whose instances have all been destroyed, +// but that is not enforced by this method. (Use RemoveResourceIfEmpty instead +// to safely check first.) +func (s *SyncState) RemoveResource(addr addrs.AbsResource) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.EnsureModule(addr.Module) + ms.RemoveResource(addr.Resource) + s.maybePruneModule(addr.Module) +} + +// RemoveResourceIfEmpty is similar to RemoveResource but first checks to +// make sure there are no instances or objects left in the resource. +// +// Returns true if the resource was removed, or false if remaining child +// objects prevented its removal. Returns true also if the resource was +// already absent, and thus no action needed to be taken. +func (s *SyncState) RemoveResourceIfEmpty(addr addrs.AbsResource) bool { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.Module(addr.Module) + if ms == nil { + return true // nothing to do + } + rs := ms.Resource(addr.Resource) + if rs == nil { + return true // nothing to do + } + if len(rs.Instances) != 0 { + // We don't check here for the possibility of instances that exist + // but don't have any objects because it's the responsibility of the + // instance-mutation methods to prune those away automatically. + return false + } + ms.RemoveResource(addr.Resource) + s.maybePruneModule(addr.Module) + return true +} + +// SetResourceInstanceCurrent saves the given instance object as the current +// generation of the resource instance with the given address, simultaneously +// updating the recorded provider configuration address, dependencies, and +// resource EachMode. +// +// Any existing current instance object for the given resource is overwritten. +// Set obj to nil to remove the primary generation object altogether. If there +// are no deposed objects then the instance as a whole will be removed, which +// may in turn also remove the containing module if it becomes empty. +// +// The caller must ensure that the given ResourceInstanceObject is not +// concurrently mutated during this call, but may be freely used again once +// this function returns. +// +// The provider address is a resource-wide settings and is updated +// for all other instances of the same resource as a side-effect of this call. +// +// If the containing module for this resource or the resource itself are not +// already tracked in state then they will be added as a side-effect. +func (s *SyncState) SetResourceInstanceCurrent(addr addrs.AbsResourceInstance, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.EnsureModule(addr.Module) + ms.SetResourceInstanceCurrent(addr.Resource, obj.DeepCopy(), provider) + s.maybePruneModule(addr.Module) +} + +// SetResourceInstanceDeposed saves the given instance object as a deposed +// generation of the resource instance with the given address and deposed key. +// +// Call this method only for pre-existing deposed objects that already have +// a known DeposedKey. For example, this method is useful if reloading objects +// that were persisted to a state file. To mark the current object as deposed, +// use DeposeResourceInstanceObject instead. +// +// The caller must ensure that the given ResourceInstanceObject is not +// concurrently mutated during this call, but may be freely used again once +// this function returns. +// +// The resource that contains the given instance must already exist in the +// state, or this method will panic. Use Resource to check first if its +// presence is not already guaranteed. +// +// Any existing current instance object for the given resource and deposed key +// is overwritten. Set obj to nil to remove the deposed object altogether. If +// the instance is left with no objects after this operation then it will +// be removed from its containing resource altogether. +// +// If the containing module for this resource or the resource itself are not +// already tracked in state then they will be added as a side-effect. +func (s *SyncState) SetResourceInstanceDeposed(addr addrs.AbsResourceInstance, key DeposedKey, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.EnsureModule(addr.Module) + ms.SetResourceInstanceDeposed(addr.Resource, key, obj.DeepCopy(), provider) + s.maybePruneModule(addr.Module) +} + +// DeposeResourceInstanceObject moves the current instance object for the +// given resource instance address into the deposed set, leaving the instance +// without a current object. +// +// The return value is the newly-allocated deposed key, or NotDeposed if the +// given instance is already lacking a current object. +// +// If the containing module for this resource or the resource itself are not +// already tracked in state then there cannot be a current object for the +// given instance, and so NotDeposed will be returned without modifying the +// state at all. +func (s *SyncState) DeposeResourceInstanceObject(addr addrs.AbsResourceInstance) DeposedKey { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.Module(addr.Module) + if ms == nil { + return NotDeposed + } + + return ms.deposeResourceInstanceObject(addr.Resource, NotDeposed) +} + +// DeposeResourceInstanceObjectForceKey is like DeposeResourceInstanceObject +// but uses a pre-allocated key. It's the caller's responsibility to ensure +// that there aren't any races to use a particular key; this method will panic +// if the given key is already in use. +func (s *SyncState) DeposeResourceInstanceObjectForceKey(addr addrs.AbsResourceInstance, forcedKey DeposedKey) { + s.lock.Lock() + defer s.lock.Unlock() + + if forcedKey == NotDeposed { + // Usage error: should use DeposeResourceInstanceObject in this case + panic("DeposeResourceInstanceObjectForceKey called without forced key") + } + + ms := s.state.Module(addr.Module) + if ms == nil { + return // Nothing to do, since there can't be any current object either. + } + + ms.deposeResourceInstanceObject(addr.Resource, forcedKey) +} + +// ForgetResourceInstanceAll removes the record of all objects associated with +// the specified resource instance, if present. If not present, this is a no-op. +func (s *SyncState) ForgetResourceInstanceAll(addr addrs.AbsResourceInstance) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.Module(addr.Module) + if ms == nil { + return + } + ms.ForgetResourceInstanceAll(addr.Resource) + s.maybePruneModule(addr.Module) +} + +// ForgetResourceInstanceDeposed removes the record of the deposed object with +// the given address and key, if present. If not present, this is a no-op. +func (s *SyncState) ForgetResourceInstanceDeposed(addr addrs.AbsResourceInstance, key DeposedKey) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.Module(addr.Module) + if ms == nil { + return + } + ms.ForgetResourceInstanceDeposed(addr.Resource, key) + s.maybePruneModule(addr.Module) +} + +// MaybeRestoreResourceInstanceDeposed will restore the deposed object with the +// given key on the specified resource as the current object for that instance +// if and only if that would not cause us to forget an existing current +// object for that instance. +// +// Returns true if the object was restored to current, or false if no change +// was made at all. +func (s *SyncState) MaybeRestoreResourceInstanceDeposed(addr addrs.AbsResourceInstance, key DeposedKey) bool { + s.lock.Lock() + defer s.lock.Unlock() + + if key == NotDeposed { + panic("MaybeRestoreResourceInstanceDeposed called without DeposedKey") + } + + ms := s.state.Module(addr.Module) + if ms == nil { + // Nothing to do, since the specified deposed object cannot exist. + return false + } + + return ms.maybeRestoreResourceInstanceDeposed(addr.Resource, key) +} + +// RemovePlannedResourceInstanceObjects removes from the state any resource +// instance objects that have the status ObjectPlanned, indiciating that they +// are just transient placeholders created during planning. +// +// Note that this does not restore any "ready" or "tainted" object that might +// have been present before the planned object was written. The only real use +// for this method is in preparing the state created during a refresh walk, +// where we run the planning step for certain instances just to create enough +// information to allow correct expression evaluation within provider and +// data resource blocks. Discarding planned instances in that case is okay +// because the refresh phase only creates planned objects to stand in for +// objects that don't exist yet, and thus the planned object must have been +// absent before by definition. +func (s *SyncState) RemovePlannedResourceInstanceObjects() { + // TODO: Merge together the refresh and plan phases into a single walk, + // so we can remove the need to create this "partial plan" during refresh + // that we then need to clean up before proceeding. + + s.lock.Lock() + defer s.lock.Unlock() + + for _, ms := range s.state.Modules { + moduleAddr := ms.Addr + + for _, rs := range ms.Resources { + resAddr := rs.Addr.Resource + + for ik, is := range rs.Instances { + instAddr := resAddr.Instance(ik) + + if is.Current != nil && is.Current.Status == ObjectPlanned { + // Setting the current instance to nil removes it from the + // state altogether if there are not also deposed instances. + ms.SetResourceInstanceCurrent(instAddr, nil, rs.ProviderConfig) + } + + for dk, obj := range is.Deposed { + // Deposed objects should never be "planned", but we'll + // do this anyway for the sake of completeness. + if obj.Status == ObjectPlanned { + ms.ForgetResourceInstanceDeposed(instAddr, dk) + } + } + } + } + + // We may have deleted some objects, which means that we may have + // left a module empty, and so we must prune to preserve the invariant + // that only the root module is allowed to be empty. + s.maybePruneModule(moduleAddr) + } +} + +// DiscardCheckResults discards any previously-recorded check results, with +// the intent of preventing any references to them after they have become +// stale due to starting (but possibly not completing) an update. +func (s *SyncState) DiscardCheckResults() { + s.lock.Lock() + s.state.CheckResults = nil + s.lock.Unlock() +} + +// RecordCheckResults replaces any check results already recorded in the state +// with a new set taken from the given check state object. +func (s *SyncState) RecordCheckResults(checkState *checks.State) { + newResults := NewCheckResults(checkState) + s.lock.Lock() + s.state.CheckResults = newResults + s.lock.Unlock() +} + +// Lock acquires an explicit lock on the state, allowing direct read and write +// access to the returned state object. The caller must call Unlock once +// access is no longer needed, and then immediately discard the state pointer +// pointer. +// +// Most callers should not use this. Instead, use the concurrency-safe +// accessors and mutators provided directly on SyncState. +func (s *SyncState) Lock() *State { + s.lock.Lock() + return s.state +} + +// Unlock releases a lock previously acquired by Lock, at which point the +// caller must cease all use of the state pointer that was returned. +// +// Do not call this method except to end an explicit lock acquired by +// Lock. If a caller calls Unlock without first holding the lock, behavior +// is undefined. +func (s *SyncState) Unlock() { + s.lock.Unlock() +} + +// Close extracts the underlying state from inside this wrapper, making the +// wrapper invalid for any future operations. +func (s *SyncState) Close() *State { + s.lock.Lock() + ret := s.state + s.state = nil // make sure future operations can't still modify it + s.lock.Unlock() + return ret +} + +// maybePruneModule will remove a module from the state altogether if it is +// empty, unless it's the root module which must always be present. +// +// This helper method is not concurrency-safe on its own, so must only be +// called while the caller is already holding the lock for writing. +func (s *SyncState) maybePruneModule(addr addrs.ModuleInstance) { + if addr.IsRoot() { + // We never prune the root. + return + } + + ms := s.state.Module(addr) + if ms == nil { + return + } + + if ms.empty() { + log.Printf("[TRACE] states.SyncState: pruning %s because it is empty", addr) + s.state.RemoveModule(addr) + } +} + +func (s *SyncState) MoveAbsResource(src, dst addrs.AbsResource) { + s.lock.Lock() + defer s.lock.Unlock() + + s.state.MoveAbsResource(src, dst) +} + +func (s *SyncState) MaybeMoveAbsResource(src, dst addrs.AbsResource) bool { + s.lock.Lock() + defer s.lock.Unlock() + + return s.state.MaybeMoveAbsResource(src, dst) +} + +func (s *SyncState) MoveResourceInstance(src, dst addrs.AbsResourceInstance) { + s.lock.Lock() + defer s.lock.Unlock() + + s.state.MoveAbsResourceInstance(src, dst) +} + +func (s *SyncState) MaybeMoveResourceInstance(src, dst addrs.AbsResourceInstance) bool { + s.lock.Lock() + defer s.lock.Unlock() + + return s.state.MaybeMoveAbsResourceInstance(src, dst) +} + +func (s *SyncState) MoveModuleInstance(src, dst addrs.ModuleInstance) { + s.lock.Lock() + defer s.lock.Unlock() + + s.state.MoveModuleInstance(src, dst) +} + +func (s *SyncState) MaybeMoveModuleInstance(src, dst addrs.ModuleInstance) bool { + s.lock.Lock() + defer s.lock.Unlock() + + return s.state.MaybeMoveModuleInstance(src, dst) +} diff --git a/pkg/terminal/impl_others.go b/pkg/terminal/impl_others.go new file mode 100644 index 00000000000..19d8715fa7e --- /dev/null +++ b/pkg/terminal/impl_others.go @@ -0,0 +1,59 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !windows +// +build !windows + +package terminal + +import ( + "os" + + "golang.org/x/term" +) + +// This is the implementation for all operating systems except Windows, where +// we don't expect to need to do any special initialization to get a working +// Virtual Terminal. +// +// For this implementation we just delegate everything upstream to +// golang.org/x/term, since it already has a variety of different +// implementations for quirks of more esoteric operating systems like plan9, +// and will hopefully grow to include others as Go is ported to other platforms +// in future. +// +// For operating systems that golang.org/x/term doesn't support either, it +// defaults to indicating that nothing is a terminal and returns an error when +// asked for a size, which we'll handle below. + +func configureOutputHandle(f *os.File) (*OutputStream, error) { + return &OutputStream{ + File: f, + isTerminal: isTerminalGolangXTerm, + getColumns: getColumnsGolangXTerm, + }, nil +} + +func configureInputHandle(f *os.File) (*InputStream, error) { + return &InputStream{ + File: f, + isTerminal: isTerminalGolangXTerm, + }, nil +} + +func isTerminalGolangXTerm(f *os.File) bool { + return term.IsTerminal(int(f.Fd())) +} + +func getColumnsGolangXTerm(f *os.File) int { + width, _, err := term.GetSize(int(f.Fd())) + if err != nil { + // Suggests that it's either not a terminal at all or that we're on + // a platform that golang.org/x/term doesn't support. In both cases + // we'll just return the placeholder default value. + return defaultColumns + } + return width +} diff --git a/pkg/terminal/impl_windows.go b/pkg/terminal/impl_windows.go new file mode 100644 index 00000000000..8a5d167b032 --- /dev/null +++ b/pkg/terminal/impl_windows.go @@ -0,0 +1,167 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build windows +// +build windows + +package terminal + +import ( + "fmt" + "os" + "syscall" + + "golang.org/x/sys/windows" + + // We're continuing to use this third-party library on Windows because it + // has the additional IsCygwinTerminal function, which includes some useful + // heuristics for recognizing when a pipe seems to be connected to a + // legacy terminal emulator on Windows versions that lack true pty support. + // We now use golang.org/x/term's functionality on other platforms. + isatty "github.com/mattn/go-isatty" +) + +func configureOutputHandle(f *os.File) (*OutputStream, error) { + ret := &OutputStream{ + File: f, + } + + if fd := f.Fd(); isatty.IsTerminal(fd) { + // We have a few things to deal with here: + // - Activating UTF-8 output support (mandatory) + // - Activating virtual terminal support (optional) + // These will not succeed on Windows 8 or early versions of Windows 10. + + // UTF-8 support means switching the console "code page" to CP_UTF8. + // Notice that this doesn't take the specific file descriptor, because + // the console is just ambiently associated with our process. + err := SetConsoleOutputCP(CP_UTF8) + if err != nil { + return nil, fmt.Errorf("failed to set the console to UTF-8 mode; you may need to use a newer version of Windows: %w", err) + } + + // If the console also allows us to turn on + // ENABLE_VIRTUAL_TERMINAL_PROCESSING then we can potentially use VT + // output, although the methods of Settings will make the final + // determination on that because we might have some handles pointing at + // terminals and other handles pointing at files/pipes. + ret.getColumns = getColumnsWindowsConsole + var mode uint32 + err = windows.GetConsoleMode(windows.Handle(fd), &mode) + if err != nil { + return ret, nil // We'll treat this as success but without VT support + } + mode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING + err = windows.SetConsoleMode(windows.Handle(fd), mode) + if err != nil { + return ret, nil // We'll treat this as success but without VT support + } + + // If we get here then we've successfully turned on VT processing, so + // we can return an OutputStream that answers true when asked if it + // is a Terminal. + ret.isTerminal = staticTrue + return ret, nil + + } else if isatty.IsCygwinTerminal(fd) { + // Cygwin terminals -- and other VT100 "fakers" for older versions of + // Windows -- are not really terminals in the usual sense, but rather + // are pipes between the child process (OpenTofu) and the terminal + // emulator. isatty.IsCygwinTerminal uses some heuristics to + // distinguish those pipes from other pipes we might see if the user + // were, for example, using the | operator on the command line. + // If we get in here then we'll assume that we can send VT100 sequences + // to this stream, even though it isn't a terminal in the usual sense. + + ret.isTerminal = staticTrue + // TODO: Is it possible to detect the width of these fake terminals? + return ret, nil + } + + // If we fall out here then we have a non-terminal filehandle, so we'll + // just accept all of the default OutputStream behaviors + return ret, nil +} + +func configureInputHandle(f *os.File) (*InputStream, error) { + ret := &InputStream{ + File: f, + } + + if fd := f.Fd(); isatty.IsTerminal(fd) { + // We have to activate UTF-8 input, or else we fail. This will not + // succeed on Windows 8 or early versions of Windows 10. + // Notice that this doesn't take the specific file descriptor, because + // the console is just ambiently associated with our process. + err := SetConsoleCP(CP_UTF8) + if err != nil { + return nil, fmt.Errorf("failed to set the console to UTF-8 mode; you may need to use a newer version of Windows: %w", err) + } + ret.isTerminal = staticTrue + return ret, nil + } else if isatty.IsCygwinTerminal(fd) { + // As with the output handles above, we'll use isatty's heuristic to + // pretend that a pipe from mintty or a similar userspace terminal + // emulator is actually a terminal. + ret.isTerminal = staticTrue + return ret, nil + } + + // If we fall out here then we have a non-terminal filehandle, so we'll + // just accept all of the default InputStream behaviors + return ret, nil +} + +func getColumnsWindowsConsole(f *os.File) int { + // We'll just unconditionally ask the given file for its console buffer + // info here, and let it fail if the file isn't actually a console. + // (In practice, the init functions above only hook up this function + // if the handle looks like a console, so this should succeed.) + var info windows.ConsoleScreenBufferInfo + err := windows.GetConsoleScreenBufferInfo(windows.Handle(f.Fd()), &info) + if err != nil { + return defaultColumns + } + return int(info.Size.X) +} + +// Unfortunately not all of the Windows kernel functions we need are in +// x/sys/windows at the time of writing, so we need to call some of them +// directly. (If you're maintaining this in future and have the capacity to +// test it well, consider checking if these functions have been added upstream +// yet and switch to their wrapper stubs if so. +var modkernel32 = windows.NewLazySystemDLL("kernel32.dll") +var procSetConsoleCP = modkernel32.NewProc("SetConsoleCP") +var procSetConsoleOutputCP = modkernel32.NewProc("SetConsoleOutputCP") + +const CP_UTF8 = 65001 + +// (These are written in the style of the stubs in x/sys/windows, which is +// a little non-idiomatic just due to the awkwardness of the low-level syscall +// interface.) + +func SetConsoleCP(codepageID uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleCP.Addr(), 1, uintptr(codepageID), 0, 0) + if r1 == 0 { + err = e1 + } + return +} + +func SetConsoleOutputCP(codepageID uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleOutputCP.Addr(), 1, uintptr(codepageID), 0, 0) + if r1 == 0 { + err = e1 + } + return +} + +func staticTrue(f *os.File) bool { + return true +} + +func staticFalse(f *os.File) bool { + return false +} diff --git a/pkg/terminal/stream.go b/pkg/terminal/stream.go new file mode 100644 index 00000000000..2b23b123b21 --- /dev/null +++ b/pkg/terminal/stream.go @@ -0,0 +1,85 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package terminal + +import ( + "os" +) + +const defaultColumns int = 78 +const defaultIsTerminal bool = false + +// OutputStream represents an output stream that might or might not be connected +// to a terminal. +// +// There are typically two instances of this: one representing stdout and one +// representing stderr. +type OutputStream struct { + File *os.File + + // Interacting with a terminal is typically platform-specific, so we + // factor out these into virtual functions, although we have default + // behaviors suitable for non-Terminal output if any of these isn't + // set. (We're using function pointers rather than interfaces for this + // because it allows us to mix both normal methods and virtual methods + // on the same type, without a bunch of extra complexity.) + isTerminal func(*os.File) bool + getColumns func(*os.File) int +} + +// Columns returns a number of character cell columns that we expect will +// fill the width of the terminal that stdout is connected to, or a reasonable +// placeholder value of 78 if the output doesn't seem to be a terminal. +// +// This is a best-effort sort of function which may give an inaccurate result +// in various cases. For example, callers storing the result will not react +// to subsequent changes in the terminal width, and indeed this function itself +// may not be able to either, depending on the constraints of the current +// execution context. +func (s *OutputStream) Columns() int { + if s.getColumns == nil { + return defaultColumns + } + return s.getColumns(s.File) +} + +// IsTerminal returns true if we expect that the stream is connected to a +// terminal which supports VT100-style formatting and cursor control sequences. +func (s *OutputStream) IsTerminal() bool { + if s.isTerminal == nil { + return defaultIsTerminal + } + return s.isTerminal(s.File) +} + +// InputStream represents an input stream that might or might not be a terminal. +// +// There is typically only one instance of this type, representing stdin. +type InputStream struct { + File *os.File + + // Interacting with a terminal is typically platform-specific, so we + // factor out these into virtual functions, although we have default + // behaviors suitable for non-Terminal output if any of these isn't + // set. (We're using function pointers rather than interfaces for this + // because it allows us to mix both normal methods and virtual methods + // on the same type, without a bunch of extra complexity.) + isTerminal func(*os.File) bool +} + +// IsTerminal returns true if we expect that the stream is connected to a +// terminal which can support interactive input. +// +// If this returns false, callers might prefer to skip elaborate input prompt +// functionality like tab completion and instead just treat the input as a +// raw byte stream, or perhaps skip prompting for input at all depending on the +// situation. +func (s *InputStream) IsTerminal() bool { + if s.isTerminal == nil { + return defaultIsTerminal + } + return s.isTerminal(s.File) +} diff --git a/pkg/terminal/streams.go b/pkg/terminal/streams.go new file mode 100644 index 00000000000..24433b7f055 --- /dev/null +++ b/pkg/terminal/streams.go @@ -0,0 +1,110 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package terminal encapsulates some platform-specific logic for detecting +// if we're running in a terminal and, if so, properly configuring that +// terminal to meet the assumptions that the rest of OpenTofu makes. +// +// Specifically, OpenTofu requires a Terminal which supports virtual terminal +// sequences and which accepts UTF-8-encoded text. +// +// This is an abstraction only over the platform-specific detection of and +// possibly initialization of terminals. It's not intended to provide +// higher-level abstractions of the sort provided by packages like termcap or +// curses; ultimately we just assume that terminals are "standard" VT100-like +// terminals and use a subset of control codes that works across the various +// platforms we support. Our approximate target is "xterm-compatible" +// virtual terminals. +package terminal + +import ( + "fmt" + "os" +) + +// Streams represents a collection of three streams that each may or may not +// be connected to a terminal. +// +// If a stream is connected to a terminal then there are more possibilities +// available, such as detecting the current terminal width. If we're connected +// to something else, such as a pipe or a file on disk, the stream will +// typically provide placeholder values or do-nothing stubs for +// terminal-requiring operatons. +// +// Note that it's possible for only a subset of the streams to be connected +// to a terminal. For example, this happens if the user runs OpenTofu with +// I/O redirection where Stdout might refer to a regular disk file while Stderr +// refers to a terminal, or various other similar combinations. +type Streams struct { + Stdout *OutputStream + Stderr *OutputStream + Stdin *InputStream +} + +// Init tries to initialize a terminal, if OpenTofu is running in one, and +// returns an object describing what it was able to set up. +// +// An error for this function indicates that the current execution context +// can't meet OpenTofu's assumptions. For example, on Windows Init will return +// an error if OpenTofu is running in a Windows Console that refuses to +// activate UTF-8 mode, which can happen if we're running on an unsupported old +// version of Windows. +// +// Note that the success of this function doesn't mean that we're actually +// running in a terminal. It could also represent successfully detecting that +// one or more of the input/output streams is not a terminal. +func Init() (*Streams, error) { + // These configure* functions are platform-specific functions in other + // files that use //+build constraints to vary based on target OS. + + stderr, err := configureOutputHandle(os.Stderr) + if err != nil { + return nil, err + } + stdout, err := configureOutputHandle(os.Stdout) + if err != nil { + return nil, err + } + stdin, err := configureInputHandle(os.Stdin) + if err != nil { + return nil, err + } + + return &Streams{ + Stdout: stdout, + Stderr: stderr, + Stdin: stdin, + }, nil +} + +// Print is a helper for conveniently calling fmt.Fprint on the Stdout stream. +func (s *Streams) Print(a ...interface{}) (n int, err error) { + return fmt.Fprint(s.Stdout.File, a...) +} + +// Printf is a helper for conveniently calling fmt.Fprintf on the Stdout stream. +func (s *Streams) Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(s.Stdout.File, format, a...) +} + +// Println is a helper for conveniently calling fmt.Fprintln on the Stdout stream. +func (s *Streams) Println(a ...interface{}) (n int, err error) { + return fmt.Fprintln(s.Stdout.File, a...) +} + +// Eprint is a helper for conveniently calling fmt.Fprint on the Stderr stream. +func (s *Streams) Eprint(a ...interface{}) (n int, err error) { + return fmt.Fprint(s.Stderr.File, a...) +} + +// Eprintf is a helper for conveniently calling fmt.Fprintf on the Stderr stream. +func (s *Streams) Eprintf(format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(s.Stderr.File, format, a...) +} + +// Eprintln is a helper for conveniently calling fmt.Fprintln on the Stderr stream. +func (s *Streams) Eprintln(a ...interface{}) (n int, err error) { + return fmt.Fprintln(s.Stderr.File, a...) +} diff --git a/pkg/terminal/streams_test.go b/pkg/terminal/streams_test.go new file mode 100644 index 00000000000..12bcf632783 --- /dev/null +++ b/pkg/terminal/streams_test.go @@ -0,0 +1,43 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package terminal + +import ( + "testing" + + "github.com/google/go-cmp/cmp" +) + +func TestStreamsFmtHelpers(t *testing.T) { + streams, close := StreamsForTesting(t) + + streams.Print("stdout print ", 5, "\n") + streams.Eprint("stderr print ", 6, "\n") + streams.Println("stdout println", 7) + streams.Eprintln("stderr println", 8) + streams.Printf("stdout printf %d\n", 9) + streams.Eprintf("stderr printf %d\n", 10) + + outp := close(t) + + gotOut := outp.Stdout() + wantOut := `stdout print 5 +stdout println 7 +stdout printf 9 +` + if diff := cmp.Diff(wantOut, gotOut); diff != "" { + t.Errorf("wrong stdout\n%s", diff) + } + + gotErr := outp.Stderr() + wantErr := `stderr print 6 +stderr println 8 +stderr printf 10 +` + if diff := cmp.Diff(wantErr, gotErr); diff != "" { + t.Errorf("wrong stderr\n%s", diff) + } +} diff --git a/pkg/terminal/testing.go b/pkg/terminal/testing.go new file mode 100644 index 00000000000..7188092b6d3 --- /dev/null +++ b/pkg/terminal/testing.go @@ -0,0 +1,196 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package terminal + +import ( + "fmt" + "io" + "os" + "strings" + "sync" + "testing" +) + +// StreamsForTesting is a helper for test code that is aiming to test functions +// that interact with the input and output streams. +// +// This particular function is for the simple case of a function that only +// produces output: the returned input stream is connected to the system's +// "null device", as if a user had run OpenTofu with I/O redirection like +// 1 { + body = traversePathSteps(traverse, body) + } + + // Default is to indicate a missing item in the deepest body we reached + // while traversing. + subject := SourceRangeFromHCL(body.MissingItemRange()) + ret.subject = &subject + + // Once we get here, "final" should be a GetAttr step that maps to an + // attribute in our current body. + finalStep, isAttr := final.(cty.GetAttrStep) + if !isAttr { + return &ret + } + + content, _, contentDiags := body.PartialContent(&hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: finalStep.Name, + Required: true, + }, + }, + }) + if contentDiags.HasErrors() { + return &ret + } + + if attr, ok := content.Attributes[finalStep.Name]; ok { + hclRange := attr.Expr.Range() + if hasIdx { + // Try to be more precise by finding index range + hclRange = hclRangeFromIndexStepAndAttribute(idxStep, attr) + } + subject = SourceRangeFromHCL(hclRange) + ret.subject = &subject + } + + return &ret +} + +func traversePathSteps(traverse []cty.PathStep, body hcl.Body) hcl.Body { + for i := 0; i < len(traverse); i++ { + step := traverse[i] + + switch tStep := step.(type) { + case cty.GetAttrStep: + + var next cty.PathStep + if i < (len(traverse) - 1) { + next = traverse[i+1] + } + + // Will be indexing into our result here? + var indexType cty.Type + var indexVal cty.Value + if nextIndex, ok := next.(cty.IndexStep); ok { + indexVal = nextIndex.Key + indexType = indexVal.Type() + i++ // skip over the index on subsequent iterations + } + + var blockLabelNames []string + if indexType == cty.String { + // Map traversal means we expect one label for the key. + blockLabelNames = []string{"key"} + } + + // For intermediate steps we expect to be referring to a child + // block, so we'll attempt decoding under that assumption. + content, _, contentDiags := body.PartialContent(&hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: tStep.Name, + LabelNames: blockLabelNames, + }, + }, + }) + if contentDiags.HasErrors() { + return body + } + filtered := make([]*hcl.Block, 0, len(content.Blocks)) + for _, block := range content.Blocks { + if block.Type == tStep.Name { + filtered = append(filtered, block) + } + } + if len(filtered) == 0 { + // Step doesn't refer to a block + continue + } + + switch indexType { + case cty.NilType: // no index at all + if len(filtered) != 1 { + return body + } + body = filtered[0].Body + case cty.Number: + var idx int + err := gocty.FromCtyValue(indexVal, &idx) + if err != nil || idx >= len(filtered) { + return body + } + body = filtered[idx].Body + case cty.String: + key := indexVal.AsString() + var block *hcl.Block + for _, candidate := range filtered { + if candidate.Labels[0] == key { + block = candidate + break + } + } + if block == nil { + // No block with this key, so we'll just indicate a + // missing item in the containing block. + return body + } + body = block.Body + default: + // Should never happen, because only string and numeric indices + // are supported by cty collections. + return body + } + + default: + // For any other kind of step, we'll just return our current body + // as the subject and accept that this is a little inaccurate. + return body + } + } + return body +} + +func hclRangeFromIndexStepAndAttribute(idxStep cty.IndexStep, attr *hcl.Attribute) hcl.Range { + switch idxStep.Key.Type() { + case cty.Number: + var idx int + err := gocty.FromCtyValue(idxStep.Key, &idx) + items, diags := hcl.ExprList(attr.Expr) + if diags.HasErrors() { + return attr.Expr.Range() + } + if err != nil || idx >= len(items) { + return attr.NameRange + } + return items[idx].Range() + case cty.String: + pairs, diags := hcl.ExprMap(attr.Expr) + if diags.HasErrors() { + return attr.Expr.Range() + } + stepKey := idxStep.Key.AsString() + for _, kvPair := range pairs { + key, diags := kvPair.Key.Value(nil) + if diags.HasErrors() { + return attr.Expr.Range() + } + if key.AsString() == stepKey { + startRng := kvPair.Value.StartRange() + return startRng + } + } + return attr.NameRange + } + return attr.Expr.Range() +} + +func (d *attributeDiagnostic) Source() Source { + return Source{ + Subject: d.subject, + } +} + +// WholeContainingBody returns a diagnostic about the body that is an implied +// current configuration context. This should be returned only from +// functions whose interface specifies a clear configuration context that this +// will be resolved in. +// +// The returned attribute will not have source location information until +// context is applied to the containing diagnostics using diags.InConfigBody. +// After context is applied, the source location is currently the missing item +// range of the body. In future, this may change to some other suitable +// part of the containing body. +func WholeContainingBody(severity Severity, summary, detail string) Diagnostic { + return &wholeBodyDiagnostic{ + diagnosticBase: diagnosticBase{ + severity: severity, + summary: summary, + detail: detail, + }, + } +} + +type wholeBodyDiagnostic struct { + diagnosticBase + subject *SourceRange // populated only after ElaborateFromConfigBody +} + +func (d *wholeBodyDiagnostic) ElaborateFromConfigBody(body hcl.Body, addr string) Diagnostic { + // don't change an existing address + if d.address == "" { + d.address = addr + } + + if d.subject != nil { + // Don't modify an already-elaborated diagnostic. + return d + } + + ret := *d + rng := SourceRangeFromHCL(body.MissingItemRange()) + ret.subject = &rng + return &ret +} + +func (d *wholeBodyDiagnostic) Source() Source { + return Source{ + Subject: d.subject, + } +} diff --git a/pkg/tfdiags/contextual_test.go b/pkg/tfdiags/contextual_test.go new file mode 100644 index 00000000000..5256041836a --- /dev/null +++ b/pkg/tfdiags/contextual_test.go @@ -0,0 +1,586 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfdiags + +import ( + "fmt" + "reflect" + "testing" + + "github.com/go-test/deep" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/zclconf/go-cty/cty" +) + +func TestAttributeValue(t *testing.T) { + testConfig := ` +foo { + bar = "hi" +} +foo { + bar = "bar" +} +bar { + bar = "woot" +} +baz "a" { + bar = "beep" +} +baz "b" { + bar = "boop" +} +parent { + nested_str = "hello" + nested_str_tuple = ["aa", "bbb", "cccc"] + nested_num_tuple = [1, 9863, 22] + nested_map = { + first_key = "first_value" + second_key = "2nd value" + } +} +tuple_of_one = ["one"] +tuple_of_two = ["first", "22222"] +root_map = { + first = "1st" + second = "2nd" +} +simple_attr = "val" +` + // TODO: Test ConditionalExpr + // TODO: Test ForExpr + // TODO: Test FunctionCallExpr + // TODO: Test IndexExpr + // TODO: Test interpolation + // TODO: Test SplatExpr + + f, parseDiags := hclsyntax.ParseConfig([]byte(testConfig), "test.tf", hcl.Pos{Line: 1, Column: 1}) + if len(parseDiags) != 0 { + t.Fatal(parseDiags) + } + emptySrcRng := &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 1, Column: 1, Byte: 0}, + End: SourcePos{Line: 1, Column: 1, Byte: 0}, + } + + testCases := []struct { + Diag Diagnostic + ExpectedRange *SourceRange + }{ + { + AttributeValue( + Error, + "foo[0].bar", + "detail", + cty.Path{ + cty.GetAttrStep{Name: "foo"}, + cty.IndexStep{Key: cty.NumberIntVal(0)}, + cty.GetAttrStep{Name: "bar"}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 3, Column: 9, Byte: 15}, + End: SourcePos{Line: 3, Column: 13, Byte: 19}, + }, + }, + { + AttributeValue( + Error, + "foo[1].bar", + "detail", + cty.Path{ + cty.GetAttrStep{Name: "foo"}, + cty.IndexStep{Key: cty.NumberIntVal(1)}, + cty.GetAttrStep{Name: "bar"}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 6, Column: 9, Byte: 36}, + End: SourcePos{Line: 6, Column: 14, Byte: 41}, + }, + }, + { + AttributeValue( + Error, + "foo[99].bar", + "detail", + cty.Path{ + cty.GetAttrStep{Name: "foo"}, + cty.IndexStep{Key: cty.NumberIntVal(99)}, + cty.GetAttrStep{Name: "bar"}, + }, + ), + emptySrcRng, + }, + { + AttributeValue( + Error, + "bar.bar", + "detail", + cty.Path{ + cty.GetAttrStep{Name: "bar"}, + cty.GetAttrStep{Name: "bar"}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 9, Column: 9, Byte: 58}, + End: SourcePos{Line: 9, Column: 15, Byte: 64}, + }, + }, + { + AttributeValue( + Error, + `baz["a"].bar`, + "detail", + cty.Path{ + cty.GetAttrStep{Name: "baz"}, + cty.IndexStep{Key: cty.StringVal("a")}, + cty.GetAttrStep{Name: "bar"}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 12, Column: 9, Byte: 85}, + End: SourcePos{Line: 12, Column: 15, Byte: 91}, + }, + }, + { + AttributeValue( + Error, + `baz["b"].bar`, + "detail", + cty.Path{ + cty.GetAttrStep{Name: "baz"}, + cty.IndexStep{Key: cty.StringVal("b")}, + cty.GetAttrStep{Name: "bar"}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 15, Column: 9, Byte: 112}, + End: SourcePos{Line: 15, Column: 15, Byte: 118}, + }, + }, + { + AttributeValue( + Error, + `baz["not_exists"].bar`, + "detail", + cty.Path{ + cty.GetAttrStep{Name: "baz"}, + cty.IndexStep{Key: cty.StringVal("not_exists")}, + cty.GetAttrStep{Name: "bar"}, + }, + ), + emptySrcRng, + }, + { + // Attribute value with subject already populated should not be disturbed. + // (in a real case, this might've been passed through from a deeper function + // in the call stack, for example.) + &attributeDiagnostic{ + attrPath: cty.Path{cty.GetAttrStep{Name: "foo"}}, + diagnosticBase: diagnosticBase{ + summary: "preexisting", + detail: "detail", + address: "original", + }, + subject: &SourceRange{ + Filename: "somewhere_else.tf", + }, + }, + &SourceRange{ + Filename: "somewhere_else.tf", + }, + }, + { + // Missing path + &attributeDiagnostic{ + diagnosticBase: diagnosticBase{ + summary: "missing path", + }, + }, + nil, + }, + + // Nested attributes + { + AttributeValue( + Error, + "parent.nested_str", + "detail", + cty.Path{ + cty.GetAttrStep{Name: "parent"}, + cty.GetAttrStep{Name: "nested_str"}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 18, Column: 16, Byte: 145}, + End: SourcePos{Line: 18, Column: 23, Byte: 152}, + }, + }, + { + AttributeValue( + Error, + "parent.nested_str_tuple[99]", + "detail", + cty.Path{ + cty.GetAttrStep{Name: "parent"}, + cty.GetAttrStep{Name: "nested_str_tuple"}, + cty.IndexStep{Key: cty.NumberIntVal(99)}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 19, Column: 3, Byte: 155}, + End: SourcePos{Line: 19, Column: 19, Byte: 171}, + }, + }, + { + AttributeValue( + Error, + "parent.nested_str_tuple[0]", + "detail", + cty.Path{ + cty.GetAttrStep{Name: "parent"}, + cty.GetAttrStep{Name: "nested_str_tuple"}, + cty.IndexStep{Key: cty.NumberIntVal(0)}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 19, Column: 23, Byte: 175}, + End: SourcePos{Line: 19, Column: 27, Byte: 179}, + }, + }, + { + AttributeValue( + Error, + "parent.nested_str_tuple[2]", + "detail", + cty.Path{ + cty.GetAttrStep{Name: "parent"}, + cty.GetAttrStep{Name: "nested_str_tuple"}, + cty.IndexStep{Key: cty.NumberIntVal(2)}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 19, Column: 36, Byte: 188}, + End: SourcePos{Line: 19, Column: 42, Byte: 194}, + }, + }, + { + AttributeValue( + Error, + "parent.nested_num_tuple[0]", + "detail", + cty.Path{ + cty.GetAttrStep{Name: "parent"}, + cty.GetAttrStep{Name: "nested_num_tuple"}, + cty.IndexStep{Key: cty.NumberIntVal(0)}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 20, Column: 23, Byte: 218}, + End: SourcePos{Line: 20, Column: 24, Byte: 219}, + }, + }, + { + AttributeValue( + Error, + "parent.nested_num_tuple[1]", + "detail", + cty.Path{ + cty.GetAttrStep{Name: "parent"}, + cty.GetAttrStep{Name: "nested_num_tuple"}, + cty.IndexStep{Key: cty.NumberIntVal(1)}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 20, Column: 26, Byte: 221}, + End: SourcePos{Line: 20, Column: 30, Byte: 225}, + }, + }, + { + AttributeValue( + Error, + "parent.nested_map.first_key", + "detail", + cty.Path{ + cty.GetAttrStep{Name: "parent"}, + cty.GetAttrStep{Name: "nested_map"}, + cty.IndexStep{Key: cty.StringVal("first_key")}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 22, Column: 19, Byte: 266}, + End: SourcePos{Line: 22, Column: 30, Byte: 277}, + }, + }, + { + AttributeValue( + Error, + "parent.nested_map.second_key", + "detail", + cty.Path{ + cty.GetAttrStep{Name: "parent"}, + cty.GetAttrStep{Name: "nested_map"}, + cty.IndexStep{Key: cty.StringVal("second_key")}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 23, Column: 19, Byte: 297}, + End: SourcePos{Line: 23, Column: 28, Byte: 306}, + }, + }, + { + AttributeValue( + Error, + "parent.nested_map.undefined_key", + "detail", + cty.Path{ + cty.GetAttrStep{Name: "parent"}, + cty.GetAttrStep{Name: "nested_map"}, + cty.IndexStep{Key: cty.StringVal("undefined_key")}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 21, Column: 3, Byte: 233}, + End: SourcePos{Line: 21, Column: 13, Byte: 243}, + }, + }, + + // Root attributes of complex types + { + AttributeValue( + Error, + "tuple_of_one[0]", + "detail", + cty.Path{ + cty.GetAttrStep{Name: "tuple_of_one"}, + cty.IndexStep{Key: cty.NumberIntVal(0)}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 26, Column: 17, Byte: 330}, + End: SourcePos{Line: 26, Column: 22, Byte: 335}, + }, + }, + { + AttributeValue( + Error, + "tuple_of_two[0]", + "detail", + cty.Path{ + cty.GetAttrStep{Name: "tuple_of_two"}, + cty.IndexStep{Key: cty.NumberIntVal(0)}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 27, Column: 17, Byte: 353}, + End: SourcePos{Line: 27, Column: 24, Byte: 360}, + }, + }, + { + AttributeValue( + Error, + "tuple_of_two[1]", + "detail", + cty.Path{ + cty.GetAttrStep{Name: "tuple_of_two"}, + cty.IndexStep{Key: cty.NumberIntVal(1)}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 27, Column: 26, Byte: 362}, + End: SourcePos{Line: 27, Column: 33, Byte: 369}, + }, + }, + { + AttributeValue( + Error, + "tuple_of_one[null]", + "detail", + cty.Path{ + cty.GetAttrStep{Name: "tuple_of_one"}, + cty.IndexStep{Key: cty.NullVal(cty.Number)}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 26, Column: 1, Byte: 314}, + End: SourcePos{Line: 26, Column: 13, Byte: 326}, + }, + }, + { + // index out of range + AttributeValue( + Error, + "tuple_of_two[99]", + "detail", + cty.Path{ + cty.GetAttrStep{Name: "tuple_of_two"}, + cty.IndexStep{Key: cty.NumberIntVal(99)}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 27, Column: 1, Byte: 337}, + End: SourcePos{Line: 27, Column: 13, Byte: 349}, + }, + }, + { + AttributeValue( + Error, + "root_map.first", + "detail", + cty.Path{ + cty.GetAttrStep{Name: "root_map"}, + cty.IndexStep{Key: cty.StringVal("first")}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 29, Column: 13, Byte: 396}, + End: SourcePos{Line: 29, Column: 16, Byte: 399}, + }, + }, + { + AttributeValue( + Error, + "root_map.second", + "detail", + cty.Path{ + cty.GetAttrStep{Name: "root_map"}, + cty.IndexStep{Key: cty.StringVal("second")}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 30, Column: 13, Byte: 413}, + End: SourcePos{Line: 30, Column: 16, Byte: 416}, + }, + }, + { + AttributeValue( + Error, + "root_map.undefined_key", + "detail", + cty.Path{ + cty.GetAttrStep{Name: "root_map"}, + cty.IndexStep{Key: cty.StringVal("undefined_key")}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 28, Column: 1, Byte: 371}, + End: SourcePos{Line: 28, Column: 9, Byte: 379}, + }, + }, + { + AttributeValue( + Error, + "simple_attr", + "detail", + cty.Path{ + cty.GetAttrStep{Name: "simple_attr"}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 32, Column: 15, Byte: 434}, + End: SourcePos{Line: 32, Column: 20, Byte: 439}, + }, + }, + { + // This should never happen as error should always point to an attribute + // or index of an attribute, but we should not crash if it does + AttributeValue( + Error, + "key", + "index_step", + cty.Path{ + cty.IndexStep{Key: cty.StringVal("key")}, + }, + ), + emptySrcRng, + }, + { + // This should never happen as error should always point to an attribute + // or index of an attribute, but we should not crash if it does + AttributeValue( + Error, + "key.another", + "index_step", + cty.Path{ + cty.IndexStep{Key: cty.StringVal("key")}, + cty.IndexStep{Key: cty.StringVal("another")}, + }, + ), + emptySrcRng, + }, + } + + for i, tc := range testCases { + t.Run(fmt.Sprintf("%d:%s", i, tc.Diag.Description()), func(t *testing.T) { + var diags Diagnostics + + origAddr := tc.Diag.Description().Address + diags = diags.Append(tc.Diag) + + gotDiags := diags.InConfigBody(f.Body, "test.addr") + gotRange := gotDiags[0].Source().Subject + gotAddr := gotDiags[0].Description().Address + + switch { + case origAddr != "": + if gotAddr != origAddr { + t.Errorf("original diagnostic address modified from %s to %s", origAddr, gotAddr) + } + case gotAddr != "test.addr": + t.Error("missing detail address") + } + + for _, problem := range deep.Equal(gotRange, tc.ExpectedRange) { + t.Error(problem) + } + }) + } +} + +func TestGetAttribute(t *testing.T) { + path := cty.Path{ + cty.GetAttrStep{Name: "foo"}, + cty.IndexStep{Key: cty.NumberIntVal(0)}, + cty.GetAttrStep{Name: "bar"}, + } + + d := AttributeValue( + Error, + "foo[0].bar", + "detail", + path, + ) + + p := GetAttribute(d) + if !reflect.DeepEqual(path, p) { + t.Fatalf("paths don't match:\nexpected: %#v\ngot: %#v", path, p) + } +} diff --git a/pkg/tfdiags/diagnostic.go b/pkg/tfdiags/diagnostic.go new file mode 100644 index 00000000000..47ef35871c8 --- /dev/null +++ b/pkg/tfdiags/diagnostic.go @@ -0,0 +1,69 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfdiags + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" +) + +type Diagnostic interface { + Severity() Severity + Description() Description + Source() Source + + // FromExpr returns the expression-related context for the diagnostic, if + // available. Returns nil if the diagnostic is not related to an + // expression evaluation. + FromExpr() *FromExpr + + // ExtraInfo returns the raw extra information value. This is a low-level + // API which requires some work on the part of the caller to properly + // access associated information, so in most cases it'll be more convienient + // to use the package-level ExtraInfo function to try to unpack a particular + // specialized interface from this value. + ExtraInfo() interface{} +} + +type Severity rune + +//go:generate go run golang.org/x/tools/cmd/stringer -type=Severity + +const ( + Error Severity = 'E' + Warning Severity = 'W' +) + +// ToHCL converts a Severity to the equivalent HCL diagnostic severity. +func (s Severity) ToHCL() hcl.DiagnosticSeverity { + switch s { + case Warning: + return hcl.DiagWarning + case Error: + return hcl.DiagError + default: + // The above should always be exhaustive for all of the valid + // Severity values in this package. + panic(fmt.Sprintf("unknown diagnostic severity %s", s)) + } +} + +type Description struct { + Address string + Summary string + Detail string +} + +type Source struct { + Subject *SourceRange + Context *SourceRange +} + +type FromExpr struct { + Expression hcl.Expression + EvalContext *hcl.EvalContext +} diff --git a/pkg/tfdiags/diagnostic_base.go b/pkg/tfdiags/diagnostic_base.go new file mode 100644 index 00000000000..863a1202fbe --- /dev/null +++ b/pkg/tfdiags/diagnostic_base.go @@ -0,0 +1,42 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfdiags + +// diagnosticBase can be embedded in other diagnostic structs to get +// default implementations of Severity and Description. This type also +// has default implementations of Source and FromExpr that return no source +// location or expression-related information, so embedders should generally +// override those method to return more useful results where possible. +type diagnosticBase struct { + severity Severity + summary string + detail string + address string +} + +func (d diagnosticBase) Severity() Severity { + return d.severity +} + +func (d diagnosticBase) Description() Description { + return Description{ + Summary: d.summary, + Detail: d.detail, + Address: d.address, + } +} + +func (d diagnosticBase) Source() Source { + return Source{} +} + +func (d diagnosticBase) FromExpr() *FromExpr { + return nil +} + +func (d diagnosticBase) ExtraInfo() interface{} { + return nil +} diff --git a/pkg/tfdiags/diagnostic_extra.go b/pkg/tfdiags/diagnostic_extra.go new file mode 100644 index 00000000000..b849b990c42 --- /dev/null +++ b/pkg/tfdiags/diagnostic_extra.go @@ -0,0 +1,203 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfdiags + +// This "Extra" idea is something we've inherited from HCL's diagnostic model, +// and so it's primarily to expose that functionality from wrapped HCL +// diagnostics but other diagnostic types could potentially implement this +// protocol too, if needed. + +// ExtraInfo tries to retrieve extra information of interface type T from +// the given diagnostic. +// +// "Extra information" is situation-specific additional contextual data which +// might allow for some special tailored reporting of particular +// diagnostics in the UI. Conventionally the extra information is provided +// as a hidden type that implements one or more interfaces which a caller +// can pass as type parameter T to retrieve a value of that type when the +// diagnostic has such an implementation. +// +// If the given diagnostic's extra value has an implementation of interface T +// then ExtraInfo returns a non-nil interface value. If there is no such +// implementation, ExtraInfo returns a nil T. +// +// Although the signature of this function does not constrain T to be an +// interface type, our convention is to only use interface types to access +// extra info in order to allow for alternative or wrapping implementations +// of the interface. +func ExtraInfo[T any](diag Diagnostic) T { + extra := diag.ExtraInfo() + if ret, ok := extra.(T); ok { + return ret + } + + // If "extra" doesn't implement T directly then we'll delegate to + // our ExtraInfoNext helper to try iteratively unwrapping it. + return ExtraInfoNext[T](extra) +} + +// ExtraInfoNext takes a value previously returned by ExtraInfo and attempts +// to find an implementation of interface T wrapped inside of it. The return +// value meaning is the same as for ExtraInfo. +// +// This is to help with the less common situation where a particular "extra" +// value might be wrapping another value implementing the same interface, +// and so callers can peel away one layer at a time until there are no more +// nested layers. +// +// Because this function is intended for searching for _nested_ implementations +// of T, ExtraInfoNext does not consider whether value "previous" directly +// implements interface T, on the assumption that the previous call to ExtraInfo +// with the same T caused "previous" to already be that result. +func ExtraInfoNext[T any](previous interface{}) T { + // As long as T is an interface type as documented, zero will always be + // a nil interface value for us to return in the non-matching case. + var zero T + + unwrapper, ok := previous.(DiagnosticExtraUnwrapper) + // If the given value isn't unwrappable then it can't possibly have + // any other info nested inside of it. + if !ok { + return zero + } + + extra := unwrapper.UnwrapDiagnosticExtra() + + // We'll keep unwrapping until we either find the interface we're + // looking for or we run out of layers of unwrapper. + for { + if ret, ok := extra.(T); ok { + return ret + } + + if unwrapper, ok := extra.(DiagnosticExtraUnwrapper); ok { + extra = unwrapper.UnwrapDiagnosticExtra() + } else { + return zero + } + } +} + +// DiagnosticExtraUnwrapper is an interface implemented by values in the +// Extra field of Diagnostic when they are wrapping another "Extra" value that +// was generated downstream. +// +// Diagnostic recipients which want to examine "Extra" values to sniff for +// particular types of extra data can either type-assert this interface +// directly and repeatedly unwrap until they recieve nil, or can use the +// helper function DiagnosticExtra. +// +// This interface intentionally matches hcl.DiagnosticExtraUnwrapper, so that +// wrapping extra values implemented using HCL's API will also work with the +// tfdiags API, but that non-HCL uses of this will not need to implement HCL +// just to get this interface. +type DiagnosticExtraUnwrapper interface { + // If the reciever is wrapping another "diagnostic extra" value, returns + // that value. Otherwise returns nil to indicate dynamically that nothing + // is wrapped. + // + // The "nothing is wrapped" condition can be signalled either by this + // method returning nil or by a type not implementing this interface at all. + // + // Implementers should never create unwrap "cycles" where a nested extra + // value returns a value that was also wrapping it. + UnwrapDiagnosticExtra() interface{} +} + +// DiagnosticExtraWrapper is an interface implemented by values that can be +// dynamically updated to wrap other extra info. +type DiagnosticExtraWrapper interface { + // WrapDiagnosticExtra accepts an ExtraInfo that it should add within the + // current ExtraInfo. + WrapDiagnosticExtra(inner interface{}) +} + +// DiagnosticExtraBecauseUnknown is an interface implemented by values in +// the Extra field of Diagnostic when the diagnostic is potentially caused by +// the presence of unknown values in an expression evaluation. +// +// Just implementing this interface is not sufficient signal, though. Callers +// must also call the DiagnosticCausedByUnknown method in order to confirm +// the result, or use the package-level function DiagnosticCausedByUnknown +// as a convenient wrapper. +type DiagnosticExtraBecauseUnknown interface { + // DiagnosticCausedByUnknown returns true if the associated diagnostic + // was caused by the presence of unknown values during an expression + // evaluation, or false otherwise. + // + // Callers might use this to tailor what contextual information they show + // alongside an error report in the UI, to avoid potential confusion + // caused by talking about the presence of unknown values if that was + // immaterial to the error. + DiagnosticCausedByUnknown() bool +} + +// DiagnosticCausedByUnknown returns true if the given diagnostic has an +// indication that it was caused by the presence of unknown values during +// an expression evaluation. +// +// This is a wrapper around checking if the diagnostic's extra info implements +// interface DiagnosticExtraBecauseUnknown and then calling its method if so. +func DiagnosticCausedByUnknown(diag Diagnostic) bool { + maybe := ExtraInfo[DiagnosticExtraBecauseUnknown](diag) + if maybe == nil { + return false + } + return maybe.DiagnosticCausedByUnknown() +} + +// DiagnosticExtraBecauseSensitive is an interface implemented by values in +// the Extra field of Diagnostic when the diagnostic is potentially caused by +// the presence of sensitive values in an expression evaluation. +// +// Just implementing this interface is not sufficient signal, though. Callers +// must also call the DiagnosticCausedBySensitive method in order to confirm +// the result, or use the package-level function DiagnosticCausedBySensitive +// as a convenient wrapper. +type DiagnosticExtraBecauseSensitive interface { + // DiagnosticCausedBySensitive returns true if the associated diagnostic + // was caused by the presence of sensitive values during an expression + // evaluation, or false otherwise. + // + // Callers might use this to tailor what contextual information they show + // alongside an error report in the UI, to avoid potential confusion + // caused by talking about the presence of sensitive values if that was + // immaterial to the error. + DiagnosticCausedBySensitive() bool +} + +// DiagnosticCausedBySensitive returns true if the given diagnostic has an +// indication that it was caused by the presence of sensitive values during +// an expression evaluation. +// +// This is a wrapper around checking if the diagnostic's extra info implements +// interface DiagnosticExtraBecauseSensitive and then calling its method if so. +func DiagnosticCausedBySensitive(diag Diagnostic) bool { + maybe := ExtraInfo[DiagnosticExtraBecauseSensitive](diag) + if maybe == nil { + return false + } + return maybe.DiagnosticCausedBySensitive() +} + +// DiagnosticExtraDoNotConsolidate tells the Diagnostics.ConsolidateWarnings +// function not to consolidate this diagnostic if it otherwise would. +type DiagnosticExtraDoNotConsolidate interface { + // DoNotConsolidateDiagnostic returns true if the associated diagnostic + // should not be consolidated by the Diagnostics.ConsolidateWarnings + // function. + DoNotConsolidateDiagnostic() bool +} + +// DoNotConsolidateDiagnostic returns true if the given diagnostic should not +// be consolidated by the Diagnostics.ConsolidateWarnings function. +func DoNotConsolidateDiagnostic(diag Diagnostic) bool { + maybe := ExtraInfo[DiagnosticExtraDoNotConsolidate](diag) + if maybe == nil { + return false + } + return maybe.DoNotConsolidateDiagnostic() +} diff --git a/pkg/tfdiags/diagnostics.go b/pkg/tfdiags/diagnostics.go new file mode 100644 index 00000000000..4c2331b0621 --- /dev/null +++ b/pkg/tfdiags/diagnostics.go @@ -0,0 +1,339 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfdiags + +import ( + "bytes" + "fmt" + "path/filepath" + "sort" + "strings" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/hcl/v2" +) + +// Diagnostics is a list of diagnostics. Diagnostics is intended to be used +// where a Go "error" might normally be used, allowing richer information +// to be conveyed (more context, support for warnings). +// +// A nil Diagnostics is a valid, empty diagnostics list, thus allowing +// heap allocation to be avoided in the common case where there are no +// diagnostics to report at all. +type Diagnostics []Diagnostic + +// Append is the main interface for constructing Diagnostics lists, taking +// an existing list (which may be nil) and appending the new objects to it +// after normalizing them to be implementations of Diagnostic. +// +// The usual pattern for a function that natively "speaks" diagnostics is: +// +// // Create a nil Diagnostics at the start of the function +// var diags diag.Diagnostics +// +// // At later points, build on it if errors / warnings occur: +// foo, err := DoSomethingRisky() +// if err != nil { +// diags = diags.Append(err) +// } +// +// // Eventually return the result and diagnostics in place of error +// return result, diags +// +// Append accepts a variety of different diagnostic-like types, including +// native Go errors and HCL diagnostics. It also knows how to unwrap +// a multierror.Error into separate error diagnostics. It can be passed +// another Diagnostics to concatenate the two lists. If given something +// it cannot handle, this function will panic. +func (diags Diagnostics) Append(new ...interface{}) Diagnostics { + for _, item := range new { + if item == nil { + continue + } + + switch ti := item.(type) { + case Diagnostic: + diags = append(diags, ti) + case Diagnostics: + diags = append(diags, ti...) // flatten + case diagnosticsAsError: + diags = diags.Append(ti.Diagnostics) // unwrap + case NonFatalError: + diags = diags.Append(ti.Diagnostics) // unwrap + case hcl.Diagnostics: + for _, hclDiag := range ti { + diags = append(diags, hclDiagnostic{hclDiag}) + } + case *hcl.Diagnostic: + diags = append(diags, hclDiagnostic{ti}) + case *multierror.Error: + for _, err := range ti.Errors { + diags = append(diags, nativeError{err}) + } + case error: + switch { + case errwrap.ContainsType(ti, Diagnostics(nil)): + // If we have an errwrap wrapper with a Diagnostics hiding + // inside then we'll unpick it here to get access to the + // individual diagnostics. + diags = diags.Append(errwrap.GetType(ti, Diagnostics(nil))) + case errwrap.ContainsType(ti, hcl.Diagnostics(nil)): + // Likewise, if we have HCL diagnostics we'll unpick that too. + diags = diags.Append(errwrap.GetType(ti, hcl.Diagnostics(nil))) + default: + diags = append(diags, nativeError{ti}) + } + default: + panic(fmt.Errorf("can't construct diagnostic(s) from %T", item)) + } + } + + // Given the above, we should never end up with a non-nil empty slice + // here, but we'll make sure of that so callers can rely on empty == nil + if len(diags) == 0 { + return nil + } + + return diags +} + +// HasErrors returns true if any of the diagnostics in the list have +// a severity of Error. +func (diags Diagnostics) HasErrors() bool { + for _, diag := range diags { + if diag.Severity() == Error { + return true + } + } + return false +} + +// ForRPC returns a version of the receiver that has been simplified so that +// it is friendly to RPC protocols. +// +// Currently this means that it can be serialized with encoding/gob and +// subsequently re-inflated. It may later grow to include other serialization +// formats. +// +// Note that this loses information about the original objects used to +// construct the diagnostics, so e.g. the errwrap API will not work as +// expected on an error-wrapped Diagnostics that came from ForRPC. +func (diags Diagnostics) ForRPC() Diagnostics { + ret := make(Diagnostics, len(diags)) + for i := range diags { + ret[i] = makeRPCFriendlyDiag(diags[i]) + } + return ret +} + +// Err flattens a diagnostics list into a single Go error, or to nil +// if the diagnostics list does not include any error-level diagnostics. +// +// This can be used to smuggle diagnostics through an API that deals in +// native errors, but unfortunately it will lose any warnings that aren't +// accompanied by at least one error since such APIs have no mechanism through +// which to report those. +// +// return result, diags.Error() +func (diags Diagnostics) Err() error { + if !diags.HasErrors() { + return nil + } + return diagnosticsAsError{diags} +} + +// ErrWithWarnings is similar to Err except that it will also return a non-nil +// error if the receiver contains only warnings. +// +// In the warnings-only situation, the result is guaranteed to be of dynamic +// type NonFatalError, allowing diagnostics-aware callers to type-assert +// and unwrap it, treating it as non-fatal. +// +// This should be used only in contexts where the caller is able to recognize +// and handle NonFatalError. For normal callers that expect a lack of errors +// to be signaled by nil, use just Diagnostics.Err. +func (diags Diagnostics) ErrWithWarnings() error { + if len(diags) == 0 { + return nil + } + if diags.HasErrors() { + return diags.Err() + } + return NonFatalError{diags} +} + +// NonFatalErr is similar to Err except that it always returns either nil +// (if there are no diagnostics at all) or NonFatalError. +// +// This allows diagnostics to be returned over an error return channel while +// being explicit that the diagnostics should not halt processing. +// +// This should be used only in contexts where the caller is able to recognize +// and handle NonFatalError. For normal callers that expect a lack of errors +// to be signaled by nil, use just Diagnostics.Err. +func (diags Diagnostics) NonFatalErr() error { + if len(diags) == 0 { + return nil + } + return NonFatalError{diags} +} + +// Sort applies an ordering to the diagnostics in the receiver in-place. +// +// The ordering is: warnings before errors, sourceless before sourced, +// short source paths before long source paths, and then ordering by +// position within each file. +// +// Diagnostics that do not differ by any of these sortable characteristics +// will remain in the same relative order after this method returns. +func (diags Diagnostics) Sort() { + sort.Stable(sortDiagnostics(diags)) +} + +func (diags Diagnostics) TrimDuplicated() { + sort.Stable(sortDiagnostics(diags)) +} + +type diagnosticsAsError struct { + Diagnostics +} + +func (dae diagnosticsAsError) Error() string { + diags := dae.Diagnostics + switch { + case len(diags) == 0: + // should never happen, since we don't create this wrapper if + // there are no diagnostics in the list. + return "no errors" + case len(diags) == 1: + desc := diags[0].Description() + if desc.Detail == "" { + return desc.Summary + } + return fmt.Sprintf("%s: %s", desc.Summary, desc.Detail) + default: + var ret bytes.Buffer + fmt.Fprintf(&ret, "%d problems:\n", len(diags)) + for _, diag := range dae.Diagnostics { + desc := diag.Description() + if desc.Detail == "" { + fmt.Fprintf(&ret, "\n- %s", desc.Summary) + } else { + fmt.Fprintf(&ret, "\n- %s: %s", desc.Summary, desc.Detail) + } + } + return ret.String() + } +} + +// WrappedErrors is an implementation of errwrap.Wrapper so that an error-wrapped +// diagnostics object can be picked apart by errwrap-aware code. +func (dae diagnosticsAsError) WrappedErrors() []error { + var errs []error + for _, diag := range dae.Diagnostics { + if wrapper, isErr := diag.(nativeError); isErr { + errs = append(errs, wrapper.err) + } + } + return errs +} + +// NonFatalError is a special error type, returned by +// Diagnostics.ErrWithWarnings and Diagnostics.NonFatalErr, +// that indicates that the wrapped diagnostics should be treated as non-fatal. +// Callers can conditionally type-assert an error to this type in order to +// detect the non-fatal scenario and handle it in a different way. +type NonFatalError struct { + Diagnostics +} + +func (woe NonFatalError) Error() string { + diags := woe.Diagnostics + switch { + case len(diags) == 0: + // should never happen, since we don't create this wrapper if + // there are no diagnostics in the list. + return "no errors or warnings" + case len(diags) == 1: + desc := diags[0].Description() + if desc.Detail == "" { + return desc.Summary + } + return fmt.Sprintf("%s: %s", desc.Summary, desc.Detail) + default: + var ret bytes.Buffer + if diags.HasErrors() { + fmt.Fprintf(&ret, "%d problems:\n", len(diags)) + } else { + fmt.Fprintf(&ret, "%d warnings:\n", len(diags)) + } + for _, diag := range woe.Diagnostics { + desc := diag.Description() + if desc.Detail == "" { + fmt.Fprintf(&ret, "\n- %s", desc.Summary) + } else { + fmt.Fprintf(&ret, "\n- %s: %s", desc.Summary, desc.Detail) + } + } + return ret.String() + } +} + +// sortDiagnostics is an implementation of sort.Interface +type sortDiagnostics []Diagnostic + +var _ sort.Interface = sortDiagnostics(nil) + +func (sd sortDiagnostics) Len() int { + return len(sd) +} + +func (sd sortDiagnostics) Less(i, j int) bool { + iD, jD := sd[i], sd[j] + iSev, jSev := iD.Severity(), jD.Severity() + iSrc, jSrc := iD.Source(), jD.Source() + + switch { + + case iSev != jSev: + return iSev == Warning + + case (iSrc.Subject == nil) != (jSrc.Subject == nil): + return iSrc.Subject == nil + + case iSrc.Subject != nil && *iSrc.Subject != *jSrc.Subject: + iSubj := iSrc.Subject + jSubj := jSrc.Subject + switch { + case iSubj.Filename != jSubj.Filename: + // Path with fewer segments goes first if they are different lengths + sep := string(filepath.Separator) + iCount := strings.Count(iSubj.Filename, sep) + jCount := strings.Count(jSubj.Filename, sep) + if iCount != jCount { + return iCount < jCount + } + return iSubj.Filename < jSubj.Filename + case iSubj.Start.Byte != jSubj.Start.Byte: + return iSubj.Start.Byte < jSubj.Start.Byte + case iSubj.End.Byte != jSubj.End.Byte: + return iSubj.End.Byte < jSubj.End.Byte + } + fallthrough + + default: + // The remaining properties do not have a defined ordering, so + // we'll leave it unspecified. Since we use sort.Stable in + // the caller of this, the ordering of remaining items will + // be preserved. + return false + } +} + +func (sd sortDiagnostics) Swap(i, j int) { + sd[i], sd[j] = sd[j], sd[i] +} diff --git a/pkg/tfdiags/diagnostics_test.go b/pkg/tfdiags/diagnostics_test.go new file mode 100644 index 00000000000..d7256de231a --- /dev/null +++ b/pkg/tfdiags/diagnostics_test.go @@ -0,0 +1,444 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfdiags + +import ( + "errors" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/hashicorp/go-multierror" + + "github.com/davecgh/go-spew/spew" + "github.com/hashicorp/hcl/v2" +) + +func TestBuild(t *testing.T) { + type diagFlat struct { + Severity Severity + Summary string + Detail string + Subject *SourceRange + Context *SourceRange + } + + tests := map[string]struct { + Cons func(Diagnostics) Diagnostics + Want []diagFlat + }{ + "nil": { + func(diags Diagnostics) Diagnostics { + return diags + }, + nil, + }, + "fmt.Errorf": { + func(diags Diagnostics) Diagnostics { + diags = diags.Append(fmt.Errorf("oh no bad")) + return diags + }, + []diagFlat{ + { + Severity: Error, + Summary: "oh no bad", + }, + }, + }, + "errors.New": { + func(diags Diagnostics) Diagnostics { + diags = diags.Append(errors.New("oh no bad")) + return diags + }, + []diagFlat{ + { + Severity: Error, + Summary: "oh no bad", + }, + }, + }, + "hcl.Diagnostic": { + func(diags Diagnostics) Diagnostics { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Something bad happened", + Detail: "It was really, really bad.", + Subject: &hcl.Range{ + Filename: "foo.tf", + Start: hcl.Pos{Line: 1, Column: 10, Byte: 9}, + End: hcl.Pos{Line: 2, Column: 3, Byte: 25}, + }, + Context: &hcl.Range{ + Filename: "foo.tf", + Start: hcl.Pos{Line: 1, Column: 1, Byte: 0}, + End: hcl.Pos{Line: 3, Column: 1, Byte: 30}, + }, + }) + return diags + }, + []diagFlat{ + { + Severity: Error, + Summary: "Something bad happened", + Detail: "It was really, really bad.", + Subject: &SourceRange{ + Filename: "foo.tf", + Start: SourcePos{Line: 1, Column: 10, Byte: 9}, + End: SourcePos{Line: 2, Column: 3, Byte: 25}, + }, + Context: &SourceRange{ + Filename: "foo.tf", + Start: SourcePos{Line: 1, Column: 1, Byte: 0}, + End: SourcePos{Line: 3, Column: 1, Byte: 30}, + }, + }, + }, + }, + "hcl.Diagnostics": { + func(diags Diagnostics) Diagnostics { + diags = diags.Append(hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Something bad happened", + Detail: "It was really, really bad.", + }, + { + Severity: hcl.DiagWarning, + Summary: "Also, somebody sneezed", + Detail: "How rude!", + }, + }) + return diags + }, + []diagFlat{ + { + Severity: Error, + Summary: "Something bad happened", + Detail: "It was really, really bad.", + }, + { + Severity: Warning, + Summary: "Also, somebody sneezed", + Detail: "How rude!", + }, + }, + }, + "multierror.Error": { + func(diags Diagnostics) Diagnostics { + err := multierror.Append(nil, errors.New("bad thing A")) + err = multierror.Append(err, errors.New("bad thing B")) + diags = diags.Append(err) + return diags + }, + []diagFlat{ + { + Severity: Error, + Summary: "bad thing A", + }, + { + Severity: Error, + Summary: "bad thing B", + }, + }, + }, + "concat Diagnostics": { + func(diags Diagnostics) Diagnostics { + var moreDiags Diagnostics + moreDiags = moreDiags.Append(errors.New("bad thing A")) + moreDiags = moreDiags.Append(errors.New("bad thing B")) + return diags.Append(moreDiags) + }, + []diagFlat{ + { + Severity: Error, + Summary: "bad thing A", + }, + { + Severity: Error, + Summary: "bad thing B", + }, + }, + }, + "single Diagnostic": { + func(diags Diagnostics) Diagnostics { + return diags.Append(SimpleWarning("Don't forget your toothbrush!")) + }, + []diagFlat{ + { + Severity: Warning, + Summary: "Don't forget your toothbrush!", + }, + }, + }, + "multiple appends": { + func(diags Diagnostics) Diagnostics { + diags = diags.Append(SimpleWarning("Don't forget your toothbrush!")) + diags = diags.Append(fmt.Errorf("exploded")) + return diags + }, + []diagFlat{ + { + Severity: Warning, + Summary: "Don't forget your toothbrush!", + }, + { + Severity: Error, + Summary: "exploded", + }, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + gotDiags := test.Cons(nil) + var got []diagFlat + for _, item := range gotDiags { + desc := item.Description() + source := item.Source() + got = append(got, diagFlat{ + Severity: item.Severity(), + Summary: desc.Summary, + Detail: desc.Detail, + Subject: source.Subject, + Context: source.Context, + }) + } + + if !reflect.DeepEqual(got, test.Want) { + t.Errorf("wrong result\ngot: %swant: %s", spew.Sdump(got), spew.Sdump(test.Want)) + } + }) + } +} + +func TestDiagnosticsErr(t *testing.T) { + t.Run("empty", func(t *testing.T) { + var diags Diagnostics + err := diags.Err() + if err != nil { + t.Errorf("got non-nil error %#v; want nil", err) + } + }) + t.Run("warning only", func(t *testing.T) { + var diags Diagnostics + diags = diags.Append(SimpleWarning("bad")) + err := diags.Err() + if err != nil { + t.Errorf("got non-nil error %#v; want nil", err) + } + }) + t.Run("one error", func(t *testing.T) { + var diags Diagnostics + diags = diags.Append(errors.New("didn't work")) + err := diags.Err() + if err == nil { + t.Fatalf("got nil error %#v; want non-nil", err) + } + if got, want := err.Error(), "didn't work"; got != want { + t.Errorf("wrong error message\ngot: %s\nwant: %s", got, want) + } + }) + t.Run("two errors", func(t *testing.T) { + var diags Diagnostics + diags = diags.Append(errors.New("didn't work")) + diags = diags.Append(errors.New("didn't work either")) + err := diags.Err() + if err == nil { + t.Fatalf("got nil error %#v; want non-nil", err) + } + want := strings.TrimSpace(` +2 problems: + +- didn't work +- didn't work either +`) + if got := err.Error(); got != want { + t.Errorf("wrong error message\ngot: %s\nwant: %s", got, want) + } + }) + t.Run("error and warning", func(t *testing.T) { + var diags Diagnostics + diags = diags.Append(errors.New("didn't work")) + diags = diags.Append(SimpleWarning("didn't work either")) + err := diags.Err() + if err == nil { + t.Fatalf("got nil error %#v; want non-nil", err) + } + // Since this "as error" mode is just a fallback for + // non-diagnostics-aware situations like tests, we don't actually + // distinguish warnings and errors here since the point is to just + // get the messages rendered. User-facing code should be printing + // each diagnostic separately, so won't enter this codepath, + want := strings.TrimSpace(` +2 problems: + +- didn't work +- didn't work either +`) + if got := err.Error(); got != want { + t.Errorf("wrong error message\ngot: %s\nwant: %s", got, want) + } + }) +} + +func TestDiagnosticsErrWithWarnings(t *testing.T) { + t.Run("empty", func(t *testing.T) { + var diags Diagnostics + err := diags.ErrWithWarnings() + if err != nil { + t.Errorf("got non-nil error %#v; want nil", err) + } + }) + t.Run("warning only", func(t *testing.T) { + var diags Diagnostics + diags = diags.Append(SimpleWarning("bad")) + err := diags.ErrWithWarnings() + if err == nil { + t.Errorf("got nil error; want NonFatalError") + return + } + if _, ok := err.(NonFatalError); !ok { + t.Errorf("got %T; want NonFatalError", err) + } + }) + t.Run("one error", func(t *testing.T) { + var diags Diagnostics + diags = diags.Append(errors.New("didn't work")) + err := diags.ErrWithWarnings() + if err == nil { + t.Fatalf("got nil error %#v; want non-nil", err) + } + if got, want := err.Error(), "didn't work"; got != want { + t.Errorf("wrong error message\ngot: %s\nwant: %s", got, want) + } + }) + t.Run("two errors", func(t *testing.T) { + var diags Diagnostics + diags = diags.Append(errors.New("didn't work")) + diags = diags.Append(errors.New("didn't work either")) + err := diags.ErrWithWarnings() + if err == nil { + t.Fatalf("got nil error %#v; want non-nil", err) + } + want := strings.TrimSpace(` +2 problems: + +- didn't work +- didn't work either +`) + if got := err.Error(); got != want { + t.Errorf("wrong error message\ngot: %s\nwant: %s", got, want) + } + }) + t.Run("error and warning", func(t *testing.T) { + var diags Diagnostics + diags = diags.Append(errors.New("didn't work")) + diags = diags.Append(SimpleWarning("didn't work either")) + err := diags.ErrWithWarnings() + if err == nil { + t.Fatalf("got nil error %#v; want non-nil", err) + } + // Since this "as error" mode is just a fallback for + // non-diagnostics-aware situations like tests, we don't actually + // distinguish warnings and errors here since the point is to just + // get the messages rendered. User-facing code should be printing + // each diagnostic separately, so won't enter this codepath, + want := strings.TrimSpace(` +2 problems: + +- didn't work +- didn't work either +`) + if got := err.Error(); got != want { + t.Errorf("wrong error message\ngot: %s\nwant: %s", got, want) + } + }) +} + +func TestDiagnosticsNonFatalErr(t *testing.T) { + t.Run("empty", func(t *testing.T) { + var diags Diagnostics + err := diags.NonFatalErr() + if err != nil { + t.Errorf("got non-nil error %#v; want nil", err) + } + }) + t.Run("warning only", func(t *testing.T) { + var diags Diagnostics + diags = diags.Append(SimpleWarning("bad")) + err := diags.NonFatalErr() + if err == nil { + t.Errorf("got nil error; want NonFatalError") + return + } + if _, ok := err.(NonFatalError); !ok { + t.Errorf("got %T; want NonFatalError", err) + } + }) + t.Run("one error", func(t *testing.T) { + var diags Diagnostics + diags = diags.Append(errors.New("didn't work")) + err := diags.NonFatalErr() + if err == nil { + t.Fatalf("got nil error %#v; want non-nil", err) + } + if got, want := err.Error(), "didn't work"; got != want { + t.Errorf("wrong error message\ngot: %s\nwant: %s", got, want) + } + if _, ok := err.(NonFatalError); !ok { + t.Errorf("got %T; want NonFatalError", err) + } + }) + t.Run("two errors", func(t *testing.T) { + var diags Diagnostics + diags = diags.Append(errors.New("didn't work")) + diags = diags.Append(errors.New("didn't work either")) + err := diags.NonFatalErr() + if err == nil { + t.Fatalf("got nil error %#v; want non-nil", err) + } + want := strings.TrimSpace(` +2 problems: + +- didn't work +- didn't work either +`) + if got := err.Error(); got != want { + t.Errorf("wrong error message\ngot: %s\nwant: %s", got, want) + } + if _, ok := err.(NonFatalError); !ok { + t.Errorf("got %T; want NonFatalError", err) + } + }) + t.Run("error and warning", func(t *testing.T) { + var diags Diagnostics + diags = diags.Append(errors.New("didn't work")) + diags = diags.Append(SimpleWarning("didn't work either")) + err := diags.NonFatalErr() + if err == nil { + t.Fatalf("got nil error %#v; want non-nil", err) + } + // Since this "as error" mode is just a fallback for + // non-diagnostics-aware situations like tests, we don't actually + // distinguish warnings and errors here since the point is to just + // get the messages rendered. User-facing code should be printing + // each diagnostic separately, so won't enter this codepath, + want := strings.TrimSpace(` +2 problems: + +- didn't work +- didn't work either +`) + if got := err.Error(); got != want { + t.Errorf("wrong error message\ngot: %s\nwant: %s", got, want) + } + if _, ok := err.(NonFatalError); !ok { + t.Errorf("got %T; want NonFatalError", err) + } + }) +} diff --git a/pkg/tfdiags/doc.go b/pkg/tfdiags/doc.go new file mode 100644 index 00000000000..9a4902c0dd2 --- /dev/null +++ b/pkg/tfdiags/doc.go @@ -0,0 +1,21 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package tfdiags is a utility package for representing errors and +// warnings in a manner that allows us to produce good messages for the +// user. +// +// "diag" is short for "diagnostics", and is meant as a general word for +// feedback to a user about potential or actual problems. +// +// A design goal for this package is for it to be able to provide rich +// messaging where possible but to also be pragmatic about dealing with +// generic errors produced by system components that _can't_ provide +// such rich messaging. As a consequence, the main types in this package -- +// Diagnostics and Diagnostic -- are designed so that they can be "smuggled" +// over an error channel and then be unpacked at the other end, so that +// error diagnostics (at least) can transit through APIs that are not +// aware of this package. +package tfdiags diff --git a/pkg/tfdiags/error.go b/pkg/tfdiags/error.go new file mode 100644 index 00000000000..796748140fd --- /dev/null +++ b/pkg/tfdiags/error.go @@ -0,0 +1,38 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfdiags + +// nativeError is a Diagnostic implementation that wraps a normal Go error +type nativeError struct { + err error +} + +var _ Diagnostic = nativeError{} + +func (e nativeError) Severity() Severity { + return Error +} + +func (e nativeError) Description() Description { + return Description{ + Summary: FormatError(e.err), + } +} + +func (e nativeError) Source() Source { + // No source information available for a native error + return Source{} +} + +func (e nativeError) FromExpr() *FromExpr { + // Native errors are not expression-related + return nil +} + +func (e nativeError) ExtraInfo() interface{} { + // Native errors don't carry any "extra information". + return nil +} diff --git a/pkg/tfdiags/hcl.go b/pkg/tfdiags/hcl.go new file mode 100644 index 00000000000..8211b231095 --- /dev/null +++ b/pkg/tfdiags/hcl.go @@ -0,0 +1,138 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfdiags + +import ( + "github.com/hashicorp/hcl/v2" +) + +// hclDiagnostic is a Diagnostic implementation that wraps a HCL Diagnostic +type hclDiagnostic struct { + diag *hcl.Diagnostic +} + +var _ Diagnostic = hclDiagnostic{} + +func (d hclDiagnostic) Severity() Severity { + switch d.diag.Severity { + case hcl.DiagWarning: + return Warning + default: + return Error + } +} + +func (d hclDiagnostic) Description() Description { + return Description{ + Summary: d.diag.Summary, + Detail: d.diag.Detail, + } +} + +func (d hclDiagnostic) Source() Source { + var ret Source + if d.diag.Subject != nil { + rng := SourceRangeFromHCL(*d.diag.Subject) + ret.Subject = &rng + } + if d.diag.Context != nil { + rng := SourceRangeFromHCL(*d.diag.Context) + ret.Context = &rng + } + return ret +} + +func (d hclDiagnostic) FromExpr() *FromExpr { + if d.diag.Expression == nil || d.diag.EvalContext == nil { + return nil + } + return &FromExpr{ + Expression: d.diag.Expression, + EvalContext: d.diag.EvalContext, + } +} + +func (d hclDiagnostic) ExtraInfo() interface{} { + return d.diag.Extra +} + +// SourceRangeFromHCL constructs a SourceRange from the corresponding range +// type within the HCL package. +func SourceRangeFromHCL(hclRange hcl.Range) SourceRange { + return SourceRange{ + Filename: hclRange.Filename, + Start: SourcePos{ + Line: hclRange.Start.Line, + Column: hclRange.Start.Column, + Byte: hclRange.Start.Byte, + }, + End: SourcePos{ + Line: hclRange.End.Line, + Column: hclRange.End.Column, + Byte: hclRange.End.Byte, + }, + } +} + +// ToHCL constructs a HCL Range from the receiving SourceRange. This is the +// opposite of SourceRangeFromHCL. +func (r SourceRange) ToHCL() hcl.Range { + return hcl.Range{ + Filename: r.Filename, + Start: hcl.Pos{ + Line: r.Start.Line, + Column: r.Start.Column, + Byte: r.Start.Byte, + }, + End: hcl.Pos{ + Line: r.End.Line, + Column: r.End.Column, + Byte: r.End.Byte, + }, + } +} + +// ToHCL constructs a hcl.Diagnostics containing the same diagnostic messages +// as the receiving tfdiags.Diagnostics. +// +// This conversion preserves the data that HCL diagnostics are able to +// preserve but would be lossy in a round trip from tfdiags to HCL and then +// back to tfdiags, because it will lose the specific type information of +// the source diagnostics. In most cases this will not be a significant +// problem, but could produce an awkward result in some special cases such +// as converting the result of ConsolidateWarnings, which will force the +// resulting warning groups to be flattened early. +func (diags Diagnostics) ToHCL() hcl.Diagnostics { + if len(diags) == 0 { + return nil + } + ret := make(hcl.Diagnostics, len(diags)) + for i, diag := range diags { + severity := diag.Severity() + desc := diag.Description() + source := diag.Source() + fromExpr := diag.FromExpr() + + hclDiag := &hcl.Diagnostic{ + Summary: desc.Summary, + Detail: desc.Detail, + Severity: severity.ToHCL(), + } + if source.Subject != nil { + hclDiag.Subject = source.Subject.ToHCL().Ptr() + } + if source.Context != nil { + hclDiag.Context = source.Context.ToHCL().Ptr() + } + if fromExpr != nil { + hclDiag.Expression = fromExpr.Expression + hclDiag.EvalContext = fromExpr.EvalContext + } + + ret[i] = hclDiag + } + return ret +} diff --git a/pkg/tfdiags/hcl_test.go b/pkg/tfdiags/hcl_test.go new file mode 100644 index 00000000000..a851cd9a7c5 --- /dev/null +++ b/pkg/tfdiags/hcl_test.go @@ -0,0 +1,108 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfdiags + +import ( + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" +) + +func TestDiagnosticsToHCL(t *testing.T) { + var diags Diagnostics + diags = diags.Append(Sourceless( + Error, + "A sourceless diagnostic", + "...that has a detail", + )) + diags = diags.Append(fmt.Errorf("a diagnostic promoted from an error")) + diags = diags.Append(SimpleWarning("A diagnostic from a simple warning")) + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "A diagnostic from HCL", + Detail: "...that has a detail and source information", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 2, Byte: 1}, + End: hcl.Pos{Line: 1, Column: 3, Byte: 2}, + }, + Context: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 1, Byte: 0}, + End: hcl.Pos{Line: 1, Column: 4, Byte: 3}, + }, + EvalContext: &hcl.EvalContext{}, + Expression: &fakeHCLExpression{}, + }) + + got := diags.ToHCL() + want := hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "A sourceless diagnostic", + Detail: "...that has a detail", + }, + { + Severity: hcl.DiagError, + Summary: "a diagnostic promoted from an error", + }, + { + Severity: hcl.DiagWarning, + Summary: "A diagnostic from a simple warning", + }, + { + Severity: hcl.DiagWarning, + Summary: "A diagnostic from HCL", + Detail: "...that has a detail and source information", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 2, Byte: 1}, + End: hcl.Pos{Line: 1, Column: 3, Byte: 2}, + }, + Context: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 1, Byte: 0}, + End: hcl.Pos{Line: 1, Column: 4, Byte: 3}, + }, + EvalContext: &hcl.EvalContext{}, + Expression: &fakeHCLExpression{}, + }, + } + + if diff := cmp.Diff(want, got, cmpopts.IgnoreUnexported(hcl.EvalContext{})); diff != "" { + t.Errorf("incorrect result\n%s", diff) + } +} + +// We have this here just to give us something easy to compare in the test +// above, because we only care that the expression passes through, not about +// how exactly it is shaped. +type fakeHCLExpression struct { +} + +func (e *fakeHCLExpression) Range() hcl.Range { + return hcl.Range{} +} + +func (e *fakeHCLExpression) StartRange() hcl.Range { + return hcl.Range{} +} + +func (e *fakeHCLExpression) Variables() []hcl.Traversal { + return nil +} + +func (e *fakeHCLExpression) Functions() []hcl.Traversal { + return nil +} + +func (e *fakeHCLExpression) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + return cty.DynamicVal, nil +} diff --git a/pkg/tfdiags/override.go b/pkg/tfdiags/override.go new file mode 100644 index 00000000000..d1f43838ef7 --- /dev/null +++ b/pkg/tfdiags/override.go @@ -0,0 +1,77 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfdiags + +// overriddenDiagnostic implements the Diagnostic interface by wrapping another +// Diagnostic while overriding the severity of the original Diagnostic. +type overriddenDiagnostic struct { + original Diagnostic + severity Severity + extra interface{} +} + +var _ Diagnostic = overriddenDiagnostic{} + +// OverrideAll accepts a set of Diagnostics and wraps them with a new severity +// and, optionally, a new ExtraInfo. +func OverrideAll(originals Diagnostics, severity Severity, createExtra func() DiagnosticExtraWrapper) Diagnostics { + var diags Diagnostics + for _, diag := range originals { + diags = diags.Append(Override(diag, severity, createExtra)) + } + return diags +} + +// Override matches OverrideAll except it operates over a single Diagnostic +// rather than multiple Diagnostics. +func Override(original Diagnostic, severity Severity, createExtra func() DiagnosticExtraWrapper) Diagnostic { + extra := original.ExtraInfo() + if createExtra != nil { + nw := createExtra() + nw.WrapDiagnosticExtra(extra) + extra = nw + } + + return overriddenDiagnostic{ + original: original, + severity: severity, + extra: extra, + } +} + +// UndoOverride will return the original diagnostic that was overridden within +// the OverrideAll function. +// +// If the provided Diagnostic was never overridden then it is simply returned +// unchanged. +func UndoOverride(diag Diagnostic) Diagnostic { + if override, ok := diag.(overriddenDiagnostic); ok { + return override.original + } + + // Then it wasn't overridden, so we'll just return the diag unchanged. + return diag +} + +func (o overriddenDiagnostic) Severity() Severity { + return o.severity +} + +func (o overriddenDiagnostic) Description() Description { + return o.original.Description() +} + +func (o overriddenDiagnostic) Source() Source { + return o.original.Source() +} + +func (o overriddenDiagnostic) FromExpr() *FromExpr { + return o.original.FromExpr() +} + +func (o overriddenDiagnostic) ExtraInfo() interface{} { + return o.extra +} diff --git a/pkg/tfdiags/override_test.go b/pkg/tfdiags/override_test.go new file mode 100644 index 00000000000..f344721af90 --- /dev/null +++ b/pkg/tfdiags/override_test.go @@ -0,0 +1,85 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfdiags + +import ( + "testing" + + "github.com/hashicorp/hcl/v2" +) + +func TestOverride_UpdatesSeverity(t *testing.T) { + original := Sourceless(Error, "summary", "detail") + override := Override(original, Warning, nil) + + if override.Severity() != Warning { + t.Errorf("expected warning but was %s", override.Severity()) + } +} + +func TestOverride_MaintainsExtra(t *testing.T) { + original := hclDiagnostic{&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "summary", + Detail: "detail", + Extra: "extra", + }} + override := Override(original, Warning, nil) + + if override.ExtraInfo().(string) != "extra" { + t.Errorf("invalid extra info %v", override.ExtraInfo()) + } +} + +func TestOverride_WrapsExtra(t *testing.T) { + original := hclDiagnostic{&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "summary", + Detail: "detail", + Extra: "extra", + }} + override := Override(original, Warning, func() DiagnosticExtraWrapper { + return &extraWrapper{ + mine: "mine", + } + }) + + wrapper := override.ExtraInfo().(*extraWrapper) + if wrapper.mine != "mine" { + t.Errorf("invalid extra info %v", override.ExtraInfo()) + } + if wrapper.original.(string) != "extra" { + t.Errorf("invalid wrapped extra info %v", override.ExtraInfo()) + } +} + +func TestUndoOverride(t *testing.T) { + original := Sourceless(Error, "summary", "detail") + override := Override(original, Warning, nil) + restored := UndoOverride(override) + + if restored.Severity() != Error { + t.Errorf("expected warning but was %s", restored.Severity()) + } +} + +func TestUndoOverride_NotOverridden(t *testing.T) { + original := Sourceless(Error, "summary", "detail") + restored := UndoOverride(original) // Shouldn't do anything bad. + + if restored.Severity() != Error { + t.Errorf("expected warning but was %s", restored.Severity()) + } +} + +type extraWrapper struct { + mine string + original interface{} +} + +func (e *extraWrapper) WrapDiagnosticExtra(inner interface{}) { + e.original = inner +} diff --git a/pkg/tfdiags/rpc_friendly.go b/pkg/tfdiags/rpc_friendly.go new file mode 100644 index 00000000000..73828624c26 --- /dev/null +++ b/pkg/tfdiags/rpc_friendly.go @@ -0,0 +1,69 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfdiags + +import ( + "encoding/gob" +) + +type rpcFriendlyDiag struct { + Severity_ Severity + Summary_ string + Detail_ string + Subject_ *SourceRange + Context_ *SourceRange +} + +// rpcFriendlyDiag transforms a given diagnostic so that is more friendly to +// RPC. +// +// In particular, it currently returns an object that can be serialized and +// later re-inflated using gob. This definition may grow to include other +// serializations later. +func makeRPCFriendlyDiag(diag Diagnostic) Diagnostic { + desc := diag.Description() + source := diag.Source() + return &rpcFriendlyDiag{ + Severity_: diag.Severity(), + Summary_: desc.Summary, + Detail_: desc.Detail, + Subject_: source.Subject, + Context_: source.Context, + } +} + +func (d *rpcFriendlyDiag) Severity() Severity { + return d.Severity_ +} + +func (d *rpcFriendlyDiag) Description() Description { + return Description{ + Summary: d.Summary_, + Detail: d.Detail_, + } +} + +func (d *rpcFriendlyDiag) Source() Source { + return Source{ + Subject: d.Subject_, + Context: d.Context_, + } +} + +func (d rpcFriendlyDiag) FromExpr() *FromExpr { + // RPC-friendly diagnostics cannot preserve expression information because + // expressions themselves are not RPC-friendly. + return nil +} + +func (d rpcFriendlyDiag) ExtraInfo() interface{} { + // RPC-friendly diagnostics always discard any "extra information". + return nil +} + +func init() { + gob.Register((*rpcFriendlyDiag)(nil)) +} diff --git a/pkg/tfdiags/rpc_friendly_test.go b/pkg/tfdiags/rpc_friendly_test.go new file mode 100644 index 00000000000..31f1fdc69f9 --- /dev/null +++ b/pkg/tfdiags/rpc_friendly_test.go @@ -0,0 +1,77 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfdiags + +import ( + "bytes" + "encoding/gob" + "fmt" + "reflect" + "testing" + + "github.com/davecgh/go-spew/spew" + + "github.com/hashicorp/hcl/v2" +) + +func TestDiagnosticsForRPC(t *testing.T) { + var diags Diagnostics + diags = diags.Append(fmt.Errorf("bad")) + diags = diags.Append(SimpleWarning("less bad")) + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "bad bad bad", + Detail: "badily bad bad", + Subject: &hcl.Range{ + Filename: "foo", + }, + Context: &hcl.Range{ + Filename: "bar", + }, + }) + + buf := bytes.Buffer{} + enc := gob.NewEncoder(&buf) + dec := gob.NewDecoder(&buf) + + rpcDiags := diags.ForRPC() + err := enc.Encode(rpcDiags) + if err != nil { + t.Fatalf("error on Encode: %s", err) + } + + var got Diagnostics + err = dec.Decode(&got) + if err != nil { + t.Fatalf("error on Decode: %s", err) + } + + want := Diagnostics{ + &rpcFriendlyDiag{ + Severity_: Error, + Summary_: "bad", + }, + &rpcFriendlyDiag{ + Severity_: Warning, + Summary_: "less bad", + }, + &rpcFriendlyDiag{ + Severity_: Error, + Summary_: "bad bad bad", + Detail_: "badily bad bad", + Subject_: &SourceRange{ + Filename: "foo", + }, + Context_: &SourceRange{ + Filename: "bar", + }, + }, + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("wrong result\ngot: %swant: %s", spew.Sdump(got), spew.Sdump(want)) + } +} diff --git a/pkg/tfdiags/severity_string.go b/pkg/tfdiags/severity_string.go new file mode 100644 index 00000000000..78a721068c3 --- /dev/null +++ b/pkg/tfdiags/severity_string.go @@ -0,0 +1,29 @@ +// Code generated by "stringer -type=Severity"; DO NOT EDIT. + +package tfdiags + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[Error-69] + _ = x[Warning-87] +} + +const ( + _Severity_name_0 = "Error" + _Severity_name_1 = "Warning" +) + +func (i Severity) String() string { + switch { + case i == 69: + return _Severity_name_0 + case i == 87: + return _Severity_name_1 + default: + return "Severity(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/pkg/tfdiags/simple_warning.go b/pkg/tfdiags/simple_warning.go new file mode 100644 index 00000000000..62ec31e51ec --- /dev/null +++ b/pkg/tfdiags/simple_warning.go @@ -0,0 +1,40 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfdiags + +type simpleWarning string + +var _ Diagnostic = simpleWarning("") + +// SimpleWarning constructs a simple (summary-only) warning diagnostic. +func SimpleWarning(msg string) Diagnostic { + return simpleWarning(msg) +} + +func (e simpleWarning) Severity() Severity { + return Warning +} + +func (e simpleWarning) Description() Description { + return Description{ + Summary: string(e), + } +} + +func (e simpleWarning) Source() Source { + // No source information available for a simple warning + return Source{} +} + +func (e simpleWarning) FromExpr() *FromExpr { + // Simple warnings are not expression-related + return nil +} + +func (e simpleWarning) ExtraInfo() interface{} { + // Simple warnings cannot carry extra information. + return nil +} diff --git a/pkg/tfdiags/source_range.go b/pkg/tfdiags/source_range.go new file mode 100644 index 00000000000..955efcab4ff --- /dev/null +++ b/pkg/tfdiags/source_range.go @@ -0,0 +1,40 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfdiags + +import ( + "fmt" + "os" + "path/filepath" +) + +type SourceRange struct { + Filename string + Start, End SourcePos +} + +type SourcePos struct { + Line, Column, Byte int +} + +// StartString returns a string representation of the start of the range, +// including the filename and the line and column numbers. +func (r SourceRange) StartString() string { + filename := r.Filename + + // We'll try to relative-ize our filename here so it's less verbose + // in the common case of being in the current working directory. If not, + // we'll just show the full path. + wd, err := os.Getwd() + if err == nil { + relFn, err := filepath.Rel(wd, filename) + if err == nil { + filename = relFn + } + } + + return fmt.Sprintf("%s:%d,%d", filename, r.Start.Line, r.Start.Column) +} diff --git a/pkg/tfdiags/sourceless.go b/pkg/tfdiags/sourceless.go new file mode 100644 index 00000000000..036dbdb349e --- /dev/null +++ b/pkg/tfdiags/sourceless.go @@ -0,0 +1,18 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfdiags + +// Sourceless creates and returns a diagnostic with no source location +// information. This is generally used for operational-type errors that are +// caused by or relate to the environment where OpenTofu is running rather +// than to the provided configuration. +func Sourceless(severity Severity, summary, detail string) Diagnostic { + return diagnosticBase{ + severity: severity, + summary: summary, + detail: detail, + } +} diff --git a/pkg/tfplugin5/tfplugin5.pb.go b/pkg/tfplugin5/tfplugin5.pb.go new file mode 100644 index 00000000000..e71e9dd689a --- /dev/null +++ b/pkg/tfplugin5/tfplugin5.pb.go @@ -0,0 +1,7147 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Terraform Plugin RPC protocol version 5.5 +// +// This file defines version 5.5 of the RPC protocol. To implement a plugin +// against this protocol, copy this definition into your own codebase and +// use protoc to generate stubs for your target language. +// +// This file will not be updated. Any minor versions of protocol 5 to follow +// should copy this file and modify the copy while maintaing backwards +// compatibility. Breaking changes, if any are required, will come +// in a subsequent major version with its own separate proto definition. +// +// Note that only the proto files included in a release tag of Terraform are +// official protocol releases. Proto files taken from other commits may include +// incomplete changes or features that did not make it into a final release. +// In all reasonable cases, plugin developers should take the proto file from +// the tag of the most recent release of Terraform, and not from the main +// branch or any other development branch. +// + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v3.15.6 +// source: tfplugin5.proto + +package tfplugin5 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type StringKind int32 + +const ( + StringKind_PLAIN StringKind = 0 + StringKind_MARKDOWN StringKind = 1 +) + +// Enum value maps for StringKind. +var ( + StringKind_name = map[int32]string{ + 0: "PLAIN", + 1: "MARKDOWN", + } + StringKind_value = map[string]int32{ + "PLAIN": 0, + "MARKDOWN": 1, + } +) + +func (x StringKind) Enum() *StringKind { + p := new(StringKind) + *p = x + return p +} + +func (x StringKind) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (StringKind) Descriptor() protoreflect.EnumDescriptor { + return file_tfplugin5_proto_enumTypes[0].Descriptor() +} + +func (StringKind) Type() protoreflect.EnumType { + return &file_tfplugin5_proto_enumTypes[0] +} + +func (x StringKind) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use StringKind.Descriptor instead. +func (StringKind) EnumDescriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{0} +} + +type Diagnostic_Severity int32 + +const ( + Diagnostic_INVALID Diagnostic_Severity = 0 + Diagnostic_ERROR Diagnostic_Severity = 1 + Diagnostic_WARNING Diagnostic_Severity = 2 +) + +// Enum value maps for Diagnostic_Severity. +var ( + Diagnostic_Severity_name = map[int32]string{ + 0: "INVALID", + 1: "ERROR", + 2: "WARNING", + } + Diagnostic_Severity_value = map[string]int32{ + "INVALID": 0, + "ERROR": 1, + "WARNING": 2, + } +) + +func (x Diagnostic_Severity) Enum() *Diagnostic_Severity { + p := new(Diagnostic_Severity) + *p = x + return p +} + +func (x Diagnostic_Severity) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Diagnostic_Severity) Descriptor() protoreflect.EnumDescriptor { + return file_tfplugin5_proto_enumTypes[1].Descriptor() +} + +func (Diagnostic_Severity) Type() protoreflect.EnumType { + return &file_tfplugin5_proto_enumTypes[1] +} + +func (x Diagnostic_Severity) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Diagnostic_Severity.Descriptor instead. +func (Diagnostic_Severity) EnumDescriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{1, 0} +} + +type Schema_NestedBlock_NestingMode int32 + +const ( + Schema_NestedBlock_INVALID Schema_NestedBlock_NestingMode = 0 + Schema_NestedBlock_SINGLE Schema_NestedBlock_NestingMode = 1 + Schema_NestedBlock_LIST Schema_NestedBlock_NestingMode = 2 + Schema_NestedBlock_SET Schema_NestedBlock_NestingMode = 3 + Schema_NestedBlock_MAP Schema_NestedBlock_NestingMode = 4 + Schema_NestedBlock_GROUP Schema_NestedBlock_NestingMode = 5 +) + +// Enum value maps for Schema_NestedBlock_NestingMode. +var ( + Schema_NestedBlock_NestingMode_name = map[int32]string{ + 0: "INVALID", + 1: "SINGLE", + 2: "LIST", + 3: "SET", + 4: "MAP", + 5: "GROUP", + } + Schema_NestedBlock_NestingMode_value = map[string]int32{ + "INVALID": 0, + "SINGLE": 1, + "LIST": 2, + "SET": 3, + "MAP": 4, + "GROUP": 5, + } +) + +func (x Schema_NestedBlock_NestingMode) Enum() *Schema_NestedBlock_NestingMode { + p := new(Schema_NestedBlock_NestingMode) + *p = x + return p +} + +func (x Schema_NestedBlock_NestingMode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Schema_NestedBlock_NestingMode) Descriptor() protoreflect.EnumDescriptor { + return file_tfplugin5_proto_enumTypes[2].Descriptor() +} + +func (Schema_NestedBlock_NestingMode) Type() protoreflect.EnumType { + return &file_tfplugin5_proto_enumTypes[2] +} + +func (x Schema_NestedBlock_NestingMode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Schema_NestedBlock_NestingMode.Descriptor instead. +func (Schema_NestedBlock_NestingMode) EnumDescriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{6, 2, 0} +} + +// DynamicValue is an opaque encoding of terraform data, with the field name +// indicating the encoding scheme used. +type DynamicValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Msgpack []byte `protobuf:"bytes,1,opt,name=msgpack,proto3" json:"msgpack,omitempty"` + Json []byte `protobuf:"bytes,2,opt,name=json,proto3" json:"json,omitempty"` +} + +func (x *DynamicValue) Reset() { + *x = DynamicValue{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DynamicValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DynamicValue) ProtoMessage() {} + +func (x *DynamicValue) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DynamicValue.ProtoReflect.Descriptor instead. +func (*DynamicValue) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{0} +} + +func (x *DynamicValue) GetMsgpack() []byte { + if x != nil { + return x.Msgpack + } + return nil +} + +func (x *DynamicValue) GetJson() []byte { + if x != nil { + return x.Json + } + return nil +} + +type Diagnostic struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Severity Diagnostic_Severity `protobuf:"varint,1,opt,name=severity,proto3,enum=tfplugin5.Diagnostic_Severity" json:"severity,omitempty"` + Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"` + Detail string `protobuf:"bytes,3,opt,name=detail,proto3" json:"detail,omitempty"` + Attribute *AttributePath `protobuf:"bytes,4,opt,name=attribute,proto3" json:"attribute,omitempty"` +} + +func (x *Diagnostic) Reset() { + *x = Diagnostic{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Diagnostic) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Diagnostic) ProtoMessage() {} + +func (x *Diagnostic) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Diagnostic.ProtoReflect.Descriptor instead. +func (*Diagnostic) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{1} +} + +func (x *Diagnostic) GetSeverity() Diagnostic_Severity { + if x != nil { + return x.Severity + } + return Diagnostic_INVALID +} + +func (x *Diagnostic) GetSummary() string { + if x != nil { + return x.Summary + } + return "" +} + +func (x *Diagnostic) GetDetail() string { + if x != nil { + return x.Detail + } + return "" +} + +func (x *Diagnostic) GetAttribute() *AttributePath { + if x != nil { + return x.Attribute + } + return nil +} + +type FunctionError struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Text string `protobuf:"bytes,1,opt,name=text,proto3" json:"text,omitempty"` + // The optional function_argument records the index position of the + // argument which caused the error. + FunctionArgument *int64 `protobuf:"varint,2,opt,name=function_argument,json=functionArgument,proto3,oneof" json:"function_argument,omitempty"` +} + +func (x *FunctionError) Reset() { + *x = FunctionError{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FunctionError) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FunctionError) ProtoMessage() {} + +func (x *FunctionError) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FunctionError.ProtoReflect.Descriptor instead. +func (*FunctionError) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{2} +} + +func (x *FunctionError) GetText() string { + if x != nil { + return x.Text + } + return "" +} + +func (x *FunctionError) GetFunctionArgument() int64 { + if x != nil && x.FunctionArgument != nil { + return *x.FunctionArgument + } + return 0 +} + +type AttributePath struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Steps []*AttributePath_Step `protobuf:"bytes,1,rep,name=steps,proto3" json:"steps,omitempty"` +} + +func (x *AttributePath) Reset() { + *x = AttributePath{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AttributePath) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AttributePath) ProtoMessage() {} + +func (x *AttributePath) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AttributePath.ProtoReflect.Descriptor instead. +func (*AttributePath) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{3} +} + +func (x *AttributePath) GetSteps() []*AttributePath_Step { + if x != nil { + return x.Steps + } + return nil +} + +type Stop struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Stop) Reset() { + *x = Stop{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Stop) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Stop) ProtoMessage() {} + +func (x *Stop) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Stop.ProtoReflect.Descriptor instead. +func (*Stop) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{4} +} + +// RawState holds the stored state for a resource to be upgraded by the +// provider. It can be in one of two formats, the current json encoded format +// in bytes, or the legacy flatmap format as a map of strings. +type RawState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Json []byte `protobuf:"bytes,1,opt,name=json,proto3" json:"json,omitempty"` + Flatmap map[string]string `protobuf:"bytes,2,rep,name=flatmap,proto3" json:"flatmap,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *RawState) Reset() { + *x = RawState{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RawState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RawState) ProtoMessage() {} + +func (x *RawState) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RawState.ProtoReflect.Descriptor instead. +func (*RawState) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{5} +} + +func (x *RawState) GetJson() []byte { + if x != nil { + return x.Json + } + return nil +} + +func (x *RawState) GetFlatmap() map[string]string { + if x != nil { + return x.Flatmap + } + return nil +} + +// Schema is the configuration schema for a Resource, Provider, or Provisioner. +type Schema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The version of the schema. + // Schemas are versioned, so that providers can upgrade a saved resource + // state when the schema is changed. + Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + // Block is the top level configuration block for this schema. + Block *Schema_Block `protobuf:"bytes,2,opt,name=block,proto3" json:"block,omitempty"` +} + +func (x *Schema) Reset() { + *x = Schema{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Schema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Schema) ProtoMessage() {} + +func (x *Schema) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Schema.ProtoReflect.Descriptor instead. +func (*Schema) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{6} +} + +func (x *Schema) GetVersion() int64 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *Schema) GetBlock() *Schema_Block { + if x != nil { + return x.Block + } + return nil +} + +// ServerCapabilities allows providers to communicate extra information +// regarding supported protocol features. This is used to indicate +// availability of certain forward-compatible changes which may be optional +// in a major protocol version, but cannot be tested for directly. +type ServerCapabilities struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The plan_destroy capability signals that a provider expects a call + // to PlanResourceChange when a resource is going to be destroyed. + PlanDestroy bool `protobuf:"varint,1,opt,name=plan_destroy,json=planDestroy,proto3" json:"plan_destroy,omitempty"` + // The get_provider_schema_optional capability indicates that this + // provider does not require calling GetProviderSchema to operate + // normally, and the caller can used a cached copy of the provider's + // schema. + GetProviderSchemaOptional bool `protobuf:"varint,2,opt,name=get_provider_schema_optional,json=getProviderSchemaOptional,proto3" json:"get_provider_schema_optional,omitempty"` + // The move_resource_state capability signals that a provider supports the + // MoveResourceState RPC. + MoveResourceState bool `protobuf:"varint,3,opt,name=move_resource_state,json=moveResourceState,proto3" json:"move_resource_state,omitempty"` +} + +func (x *ServerCapabilities) Reset() { + *x = ServerCapabilities{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerCapabilities) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerCapabilities) ProtoMessage() {} + +func (x *ServerCapabilities) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerCapabilities.ProtoReflect.Descriptor instead. +func (*ServerCapabilities) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{7} +} + +func (x *ServerCapabilities) GetPlanDestroy() bool { + if x != nil { + return x.PlanDestroy + } + return false +} + +func (x *ServerCapabilities) GetGetProviderSchemaOptional() bool { + if x != nil { + return x.GetProviderSchemaOptional + } + return false +} + +func (x *ServerCapabilities) GetMoveResourceState() bool { + if x != nil { + return x.MoveResourceState + } + return false +} + +type Function struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // parameters is the ordered list of positional function parameters. + Parameters []*Function_Parameter `protobuf:"bytes,1,rep,name=parameters,proto3" json:"parameters,omitempty"` + // variadic_parameter is an optional final parameter which accepts + // zero or more argument values, in which Terraform will send an + // ordered list of the parameter type. + VariadicParameter *Function_Parameter `protobuf:"bytes,2,opt,name=variadic_parameter,json=variadicParameter,proto3" json:"variadic_parameter,omitempty"` + // return is the function result. + Return *Function_Return `protobuf:"bytes,3,opt,name=return,proto3" json:"return,omitempty"` + // summary is the human-readable shortened documentation for the function. + Summary string `protobuf:"bytes,4,opt,name=summary,proto3" json:"summary,omitempty"` + // description is human-readable documentation for the function. + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + // description_kind is the formatting of the description. + DescriptionKind StringKind `protobuf:"varint,6,opt,name=description_kind,json=descriptionKind,proto3,enum=tfplugin5.StringKind" json:"description_kind,omitempty"` + // deprecation_message is human-readable documentation if the + // function is deprecated. + DeprecationMessage string `protobuf:"bytes,7,opt,name=deprecation_message,json=deprecationMessage,proto3" json:"deprecation_message,omitempty"` +} + +func (x *Function) Reset() { + *x = Function{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Function) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Function) ProtoMessage() {} + +func (x *Function) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Function.ProtoReflect.Descriptor instead. +func (*Function) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{8} +} + +func (x *Function) GetParameters() []*Function_Parameter { + if x != nil { + return x.Parameters + } + return nil +} + +func (x *Function) GetVariadicParameter() *Function_Parameter { + if x != nil { + return x.VariadicParameter + } + return nil +} + +func (x *Function) GetReturn() *Function_Return { + if x != nil { + return x.Return + } + return nil +} + +func (x *Function) GetSummary() string { + if x != nil { + return x.Summary + } + return "" +} + +func (x *Function) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Function) GetDescriptionKind() StringKind { + if x != nil { + return x.DescriptionKind + } + return StringKind_PLAIN +} + +func (x *Function) GetDeprecationMessage() string { + if x != nil { + return x.DeprecationMessage + } + return "" +} + +type GetMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetMetadata) Reset() { + *x = GetMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMetadata) ProtoMessage() {} + +func (x *GetMetadata) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMetadata.ProtoReflect.Descriptor instead. +func (*GetMetadata) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{9} +} + +type GetProviderSchema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetProviderSchema) Reset() { + *x = GetProviderSchema{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetProviderSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetProviderSchema) ProtoMessage() {} + +func (x *GetProviderSchema) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetProviderSchema.ProtoReflect.Descriptor instead. +func (*GetProviderSchema) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{10} +} + +type PrepareProviderConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *PrepareProviderConfig) Reset() { + *x = PrepareProviderConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PrepareProviderConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PrepareProviderConfig) ProtoMessage() {} + +func (x *PrepareProviderConfig) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PrepareProviderConfig.ProtoReflect.Descriptor instead. +func (*PrepareProviderConfig) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{11} +} + +type UpgradeResourceState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *UpgradeResourceState) Reset() { + *x = UpgradeResourceState{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpgradeResourceState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpgradeResourceState) ProtoMessage() {} + +func (x *UpgradeResourceState) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpgradeResourceState.ProtoReflect.Descriptor instead. +func (*UpgradeResourceState) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{12} +} + +type ValidateResourceTypeConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ValidateResourceTypeConfig) Reset() { + *x = ValidateResourceTypeConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateResourceTypeConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateResourceTypeConfig) ProtoMessage() {} + +func (x *ValidateResourceTypeConfig) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateResourceTypeConfig.ProtoReflect.Descriptor instead. +func (*ValidateResourceTypeConfig) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{13} +} + +type ValidateDataSourceConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ValidateDataSourceConfig) Reset() { + *x = ValidateDataSourceConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateDataSourceConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateDataSourceConfig) ProtoMessage() {} + +func (x *ValidateDataSourceConfig) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateDataSourceConfig.ProtoReflect.Descriptor instead. +func (*ValidateDataSourceConfig) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{14} +} + +type Configure struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Configure) Reset() { + *x = Configure{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Configure) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Configure) ProtoMessage() {} + +func (x *Configure) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Configure.ProtoReflect.Descriptor instead. +func (*Configure) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{15} +} + +type ReadResource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ReadResource) Reset() { + *x = ReadResource{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadResource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadResource) ProtoMessage() {} + +func (x *ReadResource) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadResource.ProtoReflect.Descriptor instead. +func (*ReadResource) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{16} +} + +type PlanResourceChange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *PlanResourceChange) Reset() { + *x = PlanResourceChange{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PlanResourceChange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PlanResourceChange) ProtoMessage() {} + +func (x *PlanResourceChange) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PlanResourceChange.ProtoReflect.Descriptor instead. +func (*PlanResourceChange) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{17} +} + +type ApplyResourceChange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ApplyResourceChange) Reset() { + *x = ApplyResourceChange{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ApplyResourceChange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApplyResourceChange) ProtoMessage() {} + +func (x *ApplyResourceChange) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApplyResourceChange.ProtoReflect.Descriptor instead. +func (*ApplyResourceChange) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{18} +} + +type ImportResourceState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ImportResourceState) Reset() { + *x = ImportResourceState{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ImportResourceState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ImportResourceState) ProtoMessage() {} + +func (x *ImportResourceState) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ImportResourceState.ProtoReflect.Descriptor instead. +func (*ImportResourceState) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{19} +} + +type MoveResourceState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *MoveResourceState) Reset() { + *x = MoveResourceState{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MoveResourceState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MoveResourceState) ProtoMessage() {} + +func (x *MoveResourceState) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MoveResourceState.ProtoReflect.Descriptor instead. +func (*MoveResourceState) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{20} +} + +type ReadDataSource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ReadDataSource) Reset() { + *x = ReadDataSource{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadDataSource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadDataSource) ProtoMessage() {} + +func (x *ReadDataSource) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadDataSource.ProtoReflect.Descriptor instead. +func (*ReadDataSource) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{21} +} + +type GetProvisionerSchema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetProvisionerSchema) Reset() { + *x = GetProvisionerSchema{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetProvisionerSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetProvisionerSchema) ProtoMessage() {} + +func (x *GetProvisionerSchema) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetProvisionerSchema.ProtoReflect.Descriptor instead. +func (*GetProvisionerSchema) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{22} +} + +type ValidateProvisionerConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ValidateProvisionerConfig) Reset() { + *x = ValidateProvisionerConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateProvisionerConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateProvisionerConfig) ProtoMessage() {} + +func (x *ValidateProvisionerConfig) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateProvisionerConfig.ProtoReflect.Descriptor instead. +func (*ValidateProvisionerConfig) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{23} +} + +type ProvisionResource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ProvisionResource) Reset() { + *x = ProvisionResource{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProvisionResource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProvisionResource) ProtoMessage() {} + +func (x *ProvisionResource) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProvisionResource.ProtoReflect.Descriptor instead. +func (*ProvisionResource) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{24} +} + +type GetFunctions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetFunctions) Reset() { + *x = GetFunctions{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetFunctions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetFunctions) ProtoMessage() {} + +func (x *GetFunctions) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetFunctions.ProtoReflect.Descriptor instead. +func (*GetFunctions) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{25} +} + +type CallFunction struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *CallFunction) Reset() { + *x = CallFunction{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CallFunction) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CallFunction) ProtoMessage() {} + +func (x *CallFunction) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CallFunction.ProtoReflect.Descriptor instead. +func (*CallFunction) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{26} +} + +type AttributePath_Step struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Selector: + // + // *AttributePath_Step_AttributeName + // *AttributePath_Step_ElementKeyString + // *AttributePath_Step_ElementKeyInt + Selector isAttributePath_Step_Selector `protobuf_oneof:"selector"` +} + +func (x *AttributePath_Step) Reset() { + *x = AttributePath_Step{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AttributePath_Step) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AttributePath_Step) ProtoMessage() {} + +func (x *AttributePath_Step) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AttributePath_Step.ProtoReflect.Descriptor instead. +func (*AttributePath_Step) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{3, 0} +} + +func (m *AttributePath_Step) GetSelector() isAttributePath_Step_Selector { + if m != nil { + return m.Selector + } + return nil +} + +func (x *AttributePath_Step) GetAttributeName() string { + if x, ok := x.GetSelector().(*AttributePath_Step_AttributeName); ok { + return x.AttributeName + } + return "" +} + +func (x *AttributePath_Step) GetElementKeyString() string { + if x, ok := x.GetSelector().(*AttributePath_Step_ElementKeyString); ok { + return x.ElementKeyString + } + return "" +} + +func (x *AttributePath_Step) GetElementKeyInt() int64 { + if x, ok := x.GetSelector().(*AttributePath_Step_ElementKeyInt); ok { + return x.ElementKeyInt + } + return 0 +} + +type isAttributePath_Step_Selector interface { + isAttributePath_Step_Selector() +} + +type AttributePath_Step_AttributeName struct { + // Set "attribute_name" to represent looking up an attribute + // in the current object value. + AttributeName string `protobuf:"bytes,1,opt,name=attribute_name,json=attributeName,proto3,oneof"` +} + +type AttributePath_Step_ElementKeyString struct { + // Set "element_key_*" to represent looking up an element in + // an indexable collection type. + ElementKeyString string `protobuf:"bytes,2,opt,name=element_key_string,json=elementKeyString,proto3,oneof"` +} + +type AttributePath_Step_ElementKeyInt struct { + ElementKeyInt int64 `protobuf:"varint,3,opt,name=element_key_int,json=elementKeyInt,proto3,oneof"` +} + +func (*AttributePath_Step_AttributeName) isAttributePath_Step_Selector() {} + +func (*AttributePath_Step_ElementKeyString) isAttributePath_Step_Selector() {} + +func (*AttributePath_Step_ElementKeyInt) isAttributePath_Step_Selector() {} + +type Stop_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Stop_Request) Reset() { + *x = Stop_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Stop_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Stop_Request) ProtoMessage() {} + +func (x *Stop_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Stop_Request.ProtoReflect.Descriptor instead. +func (*Stop_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{4, 0} +} + +type Stop_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Error string `protobuf:"bytes,1,opt,name=Error,proto3" json:"Error,omitempty"` +} + +func (x *Stop_Response) Reset() { + *x = Stop_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Stop_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Stop_Response) ProtoMessage() {} + +func (x *Stop_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Stop_Response.ProtoReflect.Descriptor instead. +func (*Stop_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{4, 1} +} + +func (x *Stop_Response) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +type Schema_Block struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + Attributes []*Schema_Attribute `protobuf:"bytes,2,rep,name=attributes,proto3" json:"attributes,omitempty"` + BlockTypes []*Schema_NestedBlock `protobuf:"bytes,3,rep,name=block_types,json=blockTypes,proto3" json:"block_types,omitempty"` + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` + DescriptionKind StringKind `protobuf:"varint,5,opt,name=description_kind,json=descriptionKind,proto3,enum=tfplugin5.StringKind" json:"description_kind,omitempty"` + Deprecated bool `protobuf:"varint,6,opt,name=deprecated,proto3" json:"deprecated,omitempty"` +} + +func (x *Schema_Block) Reset() { + *x = Schema_Block{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Schema_Block) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Schema_Block) ProtoMessage() {} + +func (x *Schema_Block) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Schema_Block.ProtoReflect.Descriptor instead. +func (*Schema_Block) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{6, 0} +} + +func (x *Schema_Block) GetVersion() int64 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *Schema_Block) GetAttributes() []*Schema_Attribute { + if x != nil { + return x.Attributes + } + return nil +} + +func (x *Schema_Block) GetBlockTypes() []*Schema_NestedBlock { + if x != nil { + return x.BlockTypes + } + return nil +} + +func (x *Schema_Block) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Schema_Block) GetDescriptionKind() StringKind { + if x != nil { + return x.DescriptionKind + } + return StringKind_PLAIN +} + +func (x *Schema_Block) GetDeprecated() bool { + if x != nil { + return x.Deprecated + } + return false +} + +type Schema_Attribute struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Type []byte `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + Required bool `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"` + Optional bool `protobuf:"varint,5,opt,name=optional,proto3" json:"optional,omitempty"` + Computed bool `protobuf:"varint,6,opt,name=computed,proto3" json:"computed,omitempty"` + Sensitive bool `protobuf:"varint,7,opt,name=sensitive,proto3" json:"sensitive,omitempty"` + DescriptionKind StringKind `protobuf:"varint,8,opt,name=description_kind,json=descriptionKind,proto3,enum=tfplugin5.StringKind" json:"description_kind,omitempty"` + Deprecated bool `protobuf:"varint,9,opt,name=deprecated,proto3" json:"deprecated,omitempty"` +} + +func (x *Schema_Attribute) Reset() { + *x = Schema_Attribute{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Schema_Attribute) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Schema_Attribute) ProtoMessage() {} + +func (x *Schema_Attribute) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Schema_Attribute.ProtoReflect.Descriptor instead. +func (*Schema_Attribute) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{6, 1} +} + +func (x *Schema_Attribute) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Schema_Attribute) GetType() []byte { + if x != nil { + return x.Type + } + return nil +} + +func (x *Schema_Attribute) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Schema_Attribute) GetRequired() bool { + if x != nil { + return x.Required + } + return false +} + +func (x *Schema_Attribute) GetOptional() bool { + if x != nil { + return x.Optional + } + return false +} + +func (x *Schema_Attribute) GetComputed() bool { + if x != nil { + return x.Computed + } + return false +} + +func (x *Schema_Attribute) GetSensitive() bool { + if x != nil { + return x.Sensitive + } + return false +} + +func (x *Schema_Attribute) GetDescriptionKind() StringKind { + if x != nil { + return x.DescriptionKind + } + return StringKind_PLAIN +} + +func (x *Schema_Attribute) GetDeprecated() bool { + if x != nil { + return x.Deprecated + } + return false +} + +type Schema_NestedBlock struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Block *Schema_Block `protobuf:"bytes,2,opt,name=block,proto3" json:"block,omitempty"` + Nesting Schema_NestedBlock_NestingMode `protobuf:"varint,3,opt,name=nesting,proto3,enum=tfplugin5.Schema_NestedBlock_NestingMode" json:"nesting,omitempty"` + MinItems int64 `protobuf:"varint,4,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + MaxItems int64 `protobuf:"varint,5,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` +} + +func (x *Schema_NestedBlock) Reset() { + *x = Schema_NestedBlock{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Schema_NestedBlock) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Schema_NestedBlock) ProtoMessage() {} + +func (x *Schema_NestedBlock) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Schema_NestedBlock.ProtoReflect.Descriptor instead. +func (*Schema_NestedBlock) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{6, 2} +} + +func (x *Schema_NestedBlock) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *Schema_NestedBlock) GetBlock() *Schema_Block { + if x != nil { + return x.Block + } + return nil +} + +func (x *Schema_NestedBlock) GetNesting() Schema_NestedBlock_NestingMode { + if x != nil { + return x.Nesting + } + return Schema_NestedBlock_INVALID +} + +func (x *Schema_NestedBlock) GetMinItems() int64 { + if x != nil { + return x.MinItems + } + return 0 +} + +func (x *Schema_NestedBlock) GetMaxItems() int64 { + if x != nil { + return x.MaxItems + } + return 0 +} + +type Function_Parameter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // name is the human-readable display name for the parameter. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // type is the type constraint for the parameter. + Type []byte `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + // allow_null_value when enabled denotes that a null argument value can + // be passed to the provider. When disabled, Terraform returns an error + // if the argument value is null. + AllowNullValue bool `protobuf:"varint,3,opt,name=allow_null_value,json=allowNullValue,proto3" json:"allow_null_value,omitempty"` + // allow_unknown_values when enabled denotes that only wholly known + // argument values will be passed to the provider. When disabled, + // Terraform skips the function call entirely and assumes an unknown + // value result from the function. + AllowUnknownValues bool `protobuf:"varint,4,opt,name=allow_unknown_values,json=allowUnknownValues,proto3" json:"allow_unknown_values,omitempty"` + // description is human-readable documentation for the parameter. + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + // description_kind is the formatting of the description. + DescriptionKind StringKind `protobuf:"varint,6,opt,name=description_kind,json=descriptionKind,proto3,enum=tfplugin5.StringKind" json:"description_kind,omitempty"` +} + +func (x *Function_Parameter) Reset() { + *x = Function_Parameter{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Function_Parameter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Function_Parameter) ProtoMessage() {} + +func (x *Function_Parameter) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Function_Parameter.ProtoReflect.Descriptor instead. +func (*Function_Parameter) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{8, 0} +} + +func (x *Function_Parameter) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Function_Parameter) GetType() []byte { + if x != nil { + return x.Type + } + return nil +} + +func (x *Function_Parameter) GetAllowNullValue() bool { + if x != nil { + return x.AllowNullValue + } + return false +} + +func (x *Function_Parameter) GetAllowUnknownValues() bool { + if x != nil { + return x.AllowUnknownValues + } + return false +} + +func (x *Function_Parameter) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Function_Parameter) GetDescriptionKind() StringKind { + if x != nil { + return x.DescriptionKind + } + return StringKind_PLAIN +} + +type Function_Return struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // type is the type constraint for the function result. + Type []byte `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` +} + +func (x *Function_Return) Reset() { + *x = Function_Return{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Function_Return) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Function_Return) ProtoMessage() {} + +func (x *Function_Return) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[35] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Function_Return.ProtoReflect.Descriptor instead. +func (*Function_Return) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{8, 1} +} + +func (x *Function_Return) GetType() []byte { + if x != nil { + return x.Type + } + return nil +} + +type GetMetadata_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetMetadata_Request) Reset() { + *x = GetMetadata_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMetadata_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMetadata_Request) ProtoMessage() {} + +func (x *GetMetadata_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[36] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMetadata_Request.ProtoReflect.Descriptor instead. +func (*GetMetadata_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{9, 0} +} + +type GetMetadata_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ServerCapabilities *ServerCapabilities `protobuf:"bytes,1,opt,name=server_capabilities,json=serverCapabilities,proto3" json:"server_capabilities,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + DataSources []*GetMetadata_DataSourceMetadata `protobuf:"bytes,3,rep,name=data_sources,json=dataSources,proto3" json:"data_sources,omitempty"` + Resources []*GetMetadata_ResourceMetadata `protobuf:"bytes,4,rep,name=resources,proto3" json:"resources,omitempty"` + // functions returns metadata for any functions. + Functions []*GetMetadata_FunctionMetadata `protobuf:"bytes,5,rep,name=functions,proto3" json:"functions,omitempty"` +} + +func (x *GetMetadata_Response) Reset() { + *x = GetMetadata_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMetadata_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMetadata_Response) ProtoMessage() {} + +func (x *GetMetadata_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[37] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMetadata_Response.ProtoReflect.Descriptor instead. +func (*GetMetadata_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{9, 1} +} + +func (x *GetMetadata_Response) GetServerCapabilities() *ServerCapabilities { + if x != nil { + return x.ServerCapabilities + } + return nil +} + +func (x *GetMetadata_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +func (x *GetMetadata_Response) GetDataSources() []*GetMetadata_DataSourceMetadata { + if x != nil { + return x.DataSources + } + return nil +} + +func (x *GetMetadata_Response) GetResources() []*GetMetadata_ResourceMetadata { + if x != nil { + return x.Resources + } + return nil +} + +func (x *GetMetadata_Response) GetFunctions() []*GetMetadata_FunctionMetadata { + if x != nil { + return x.Functions + } + return nil +} + +type GetMetadata_FunctionMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // name is the function name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetMetadata_FunctionMetadata) Reset() { + *x = GetMetadata_FunctionMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMetadata_FunctionMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMetadata_FunctionMetadata) ProtoMessage() {} + +func (x *GetMetadata_FunctionMetadata) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[38] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMetadata_FunctionMetadata.ProtoReflect.Descriptor instead. +func (*GetMetadata_FunctionMetadata) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{9, 2} +} + +func (x *GetMetadata_FunctionMetadata) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +type GetMetadata_DataSourceMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` +} + +func (x *GetMetadata_DataSourceMetadata) Reset() { + *x = GetMetadata_DataSourceMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMetadata_DataSourceMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMetadata_DataSourceMetadata) ProtoMessage() {} + +func (x *GetMetadata_DataSourceMetadata) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[39] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMetadata_DataSourceMetadata.ProtoReflect.Descriptor instead. +func (*GetMetadata_DataSourceMetadata) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{9, 3} +} + +func (x *GetMetadata_DataSourceMetadata) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +type GetMetadata_ResourceMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` +} + +func (x *GetMetadata_ResourceMetadata) Reset() { + *x = GetMetadata_ResourceMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMetadata_ResourceMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMetadata_ResourceMetadata) ProtoMessage() {} + +func (x *GetMetadata_ResourceMetadata) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[40] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMetadata_ResourceMetadata.ProtoReflect.Descriptor instead. +func (*GetMetadata_ResourceMetadata) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{9, 4} +} + +func (x *GetMetadata_ResourceMetadata) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +type GetProviderSchema_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetProviderSchema_Request) Reset() { + *x = GetProviderSchema_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetProviderSchema_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetProviderSchema_Request) ProtoMessage() {} + +func (x *GetProviderSchema_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[41] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetProviderSchema_Request.ProtoReflect.Descriptor instead. +func (*GetProviderSchema_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{10, 0} +} + +type GetProviderSchema_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Provider *Schema `protobuf:"bytes,1,opt,name=provider,proto3" json:"provider,omitempty"` + ResourceSchemas map[string]*Schema `protobuf:"bytes,2,rep,name=resource_schemas,json=resourceSchemas,proto3" json:"resource_schemas,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + DataSourceSchemas map[string]*Schema `protobuf:"bytes,3,rep,name=data_source_schemas,json=dataSourceSchemas,proto3" json:"data_source_schemas,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Diagnostics []*Diagnostic `protobuf:"bytes,4,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + ProviderMeta *Schema `protobuf:"bytes,5,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` + ServerCapabilities *ServerCapabilities `protobuf:"bytes,6,opt,name=server_capabilities,json=serverCapabilities,proto3" json:"server_capabilities,omitempty"` + // functions is a mapping of function names to definitions. + Functions map[string]*Function `protobuf:"bytes,7,rep,name=functions,proto3" json:"functions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *GetProviderSchema_Response) Reset() { + *x = GetProviderSchema_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetProviderSchema_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetProviderSchema_Response) ProtoMessage() {} + +func (x *GetProviderSchema_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[42] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetProviderSchema_Response.ProtoReflect.Descriptor instead. +func (*GetProviderSchema_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{10, 1} +} + +func (x *GetProviderSchema_Response) GetProvider() *Schema { + if x != nil { + return x.Provider + } + return nil +} + +func (x *GetProviderSchema_Response) GetResourceSchemas() map[string]*Schema { + if x != nil { + return x.ResourceSchemas + } + return nil +} + +func (x *GetProviderSchema_Response) GetDataSourceSchemas() map[string]*Schema { + if x != nil { + return x.DataSourceSchemas + } + return nil +} + +func (x *GetProviderSchema_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +func (x *GetProviderSchema_Response) GetProviderMeta() *Schema { + if x != nil { + return x.ProviderMeta + } + return nil +} + +func (x *GetProviderSchema_Response) GetServerCapabilities() *ServerCapabilities { + if x != nil { + return x.ServerCapabilities + } + return nil +} + +func (x *GetProviderSchema_Response) GetFunctions() map[string]*Function { + if x != nil { + return x.Functions + } + return nil +} + +type PrepareProviderConfig_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Config *DynamicValue `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` +} + +func (x *PrepareProviderConfig_Request) Reset() { + *x = PrepareProviderConfig_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PrepareProviderConfig_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PrepareProviderConfig_Request) ProtoMessage() {} + +func (x *PrepareProviderConfig_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[46] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PrepareProviderConfig_Request.ProtoReflect.Descriptor instead. +func (*PrepareProviderConfig_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{11, 0} +} + +func (x *PrepareProviderConfig_Request) GetConfig() *DynamicValue { + if x != nil { + return x.Config + } + return nil +} + +type PrepareProviderConfig_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PreparedConfig *DynamicValue `protobuf:"bytes,1,opt,name=prepared_config,json=preparedConfig,proto3" json:"prepared_config,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *PrepareProviderConfig_Response) Reset() { + *x = PrepareProviderConfig_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PrepareProviderConfig_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PrepareProviderConfig_Response) ProtoMessage() {} + +func (x *PrepareProviderConfig_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[47] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PrepareProviderConfig_Response.ProtoReflect.Descriptor instead. +func (*PrepareProviderConfig_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{11, 1} +} + +func (x *PrepareProviderConfig_Response) GetPreparedConfig() *DynamicValue { + if x != nil { + return x.PreparedConfig + } + return nil +} + +func (x *PrepareProviderConfig_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +// Request is the message that is sent to the provider during the +// UpgradeResourceState RPC. +// +// This message intentionally does not include configuration data as any +// configuration-based or configuration-conditional changes should occur +// during the PlanResourceChange RPC. Additionally, the configuration is +// not guaranteed to exist (in the case of resource destruction), be wholly +// known, nor match the given prior state, which could lead to unexpected +// provider behaviors for practitioners. +type UpgradeResourceState_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + // version is the schema_version number recorded in the state file + Version int64 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` + // raw_state is the raw states as stored for the resource. Core does + // not have access to the schema of prior_version, so it's the + // provider's responsibility to interpret this value using the + // appropriate older schema. The raw_state will be the json encoded + // state, or a legacy flat-mapped format. + RawState *RawState `protobuf:"bytes,3,opt,name=raw_state,json=rawState,proto3" json:"raw_state,omitempty"` +} + +func (x *UpgradeResourceState_Request) Reset() { + *x = UpgradeResourceState_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpgradeResourceState_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpgradeResourceState_Request) ProtoMessage() {} + +func (x *UpgradeResourceState_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[48] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpgradeResourceState_Request.ProtoReflect.Descriptor instead. +func (*UpgradeResourceState_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{12, 0} +} + +func (x *UpgradeResourceState_Request) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *UpgradeResourceState_Request) GetVersion() int64 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *UpgradeResourceState_Request) GetRawState() *RawState { + if x != nil { + return x.RawState + } + return nil +} + +type UpgradeResourceState_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // new_state is a msgpack-encoded data structure that, when interpreted with + // the _current_ schema for this resource type, is functionally equivalent to + // that which was given in prior_state_raw. + UpgradedState *DynamicValue `protobuf:"bytes,1,opt,name=upgraded_state,json=upgradedState,proto3" json:"upgraded_state,omitempty"` + // diagnostics describes any errors encountered during migration that could not + // be safely resolved, and warnings about any possibly-risky assumptions made + // in the upgrade process. + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *UpgradeResourceState_Response) Reset() { + *x = UpgradeResourceState_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpgradeResourceState_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpgradeResourceState_Response) ProtoMessage() {} + +func (x *UpgradeResourceState_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[49] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpgradeResourceState_Response.ProtoReflect.Descriptor instead. +func (*UpgradeResourceState_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{12, 1} +} + +func (x *UpgradeResourceState_Response) GetUpgradedState() *DynamicValue { + if x != nil { + return x.UpgradedState + } + return nil +} + +func (x *UpgradeResourceState_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +type ValidateResourceTypeConfig_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` +} + +func (x *ValidateResourceTypeConfig_Request) Reset() { + *x = ValidateResourceTypeConfig_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateResourceTypeConfig_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateResourceTypeConfig_Request) ProtoMessage() {} + +func (x *ValidateResourceTypeConfig_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[50] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateResourceTypeConfig_Request.ProtoReflect.Descriptor instead. +func (*ValidateResourceTypeConfig_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{13, 0} +} + +func (x *ValidateResourceTypeConfig_Request) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *ValidateResourceTypeConfig_Request) GetConfig() *DynamicValue { + if x != nil { + return x.Config + } + return nil +} + +type ValidateResourceTypeConfig_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *ValidateResourceTypeConfig_Response) Reset() { + *x = ValidateResourceTypeConfig_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateResourceTypeConfig_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateResourceTypeConfig_Response) ProtoMessage() {} + +func (x *ValidateResourceTypeConfig_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[51] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateResourceTypeConfig_Response.ProtoReflect.Descriptor instead. +func (*ValidateResourceTypeConfig_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{13, 1} +} + +func (x *ValidateResourceTypeConfig_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +type ValidateDataSourceConfig_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` +} + +func (x *ValidateDataSourceConfig_Request) Reset() { + *x = ValidateDataSourceConfig_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[52] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateDataSourceConfig_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateDataSourceConfig_Request) ProtoMessage() {} + +func (x *ValidateDataSourceConfig_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[52] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateDataSourceConfig_Request.ProtoReflect.Descriptor instead. +func (*ValidateDataSourceConfig_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{14, 0} +} + +func (x *ValidateDataSourceConfig_Request) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *ValidateDataSourceConfig_Request) GetConfig() *DynamicValue { + if x != nil { + return x.Config + } + return nil +} + +type ValidateDataSourceConfig_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *ValidateDataSourceConfig_Response) Reset() { + *x = ValidateDataSourceConfig_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[53] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateDataSourceConfig_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateDataSourceConfig_Response) ProtoMessage() {} + +func (x *ValidateDataSourceConfig_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[53] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateDataSourceConfig_Response.ProtoReflect.Descriptor instead. +func (*ValidateDataSourceConfig_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{14, 1} +} + +func (x *ValidateDataSourceConfig_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +type Configure_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TerraformVersion string `protobuf:"bytes,1,opt,name=terraform_version,json=terraformVersion,proto3" json:"terraform_version,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` +} + +func (x *Configure_Request) Reset() { + *x = Configure_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[54] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Configure_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Configure_Request) ProtoMessage() {} + +func (x *Configure_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[54] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Configure_Request.ProtoReflect.Descriptor instead. +func (*Configure_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{15, 0} +} + +func (x *Configure_Request) GetTerraformVersion() string { + if x != nil { + return x.TerraformVersion + } + return "" +} + +func (x *Configure_Request) GetConfig() *DynamicValue { + if x != nil { + return x.Config + } + return nil +} + +type Configure_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *Configure_Response) Reset() { + *x = Configure_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[55] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Configure_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Configure_Response) ProtoMessage() {} + +func (x *Configure_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[55] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Configure_Response.ProtoReflect.Descriptor instead. +func (*Configure_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{15, 1} +} + +func (x *Configure_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +// Request is the message that is sent to the provider during the +// ReadResource RPC. +// +// This message intentionally does not include configuration data as any +// configuration-based or configuration-conditional changes should occur +// during the PlanResourceChange RPC. Additionally, the configuration is +// not guaranteed to be wholly known nor match the given prior state, which +// could lead to unexpected provider behaviors for practitioners. +type ReadResource_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + CurrentState *DynamicValue `protobuf:"bytes,2,opt,name=current_state,json=currentState,proto3" json:"current_state,omitempty"` + Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"` + ProviderMeta *DynamicValue `protobuf:"bytes,4,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` +} + +func (x *ReadResource_Request) Reset() { + *x = ReadResource_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[56] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadResource_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadResource_Request) ProtoMessage() {} + +func (x *ReadResource_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[56] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadResource_Request.ProtoReflect.Descriptor instead. +func (*ReadResource_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{16, 0} +} + +func (x *ReadResource_Request) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *ReadResource_Request) GetCurrentState() *DynamicValue { + if x != nil { + return x.CurrentState + } + return nil +} + +func (x *ReadResource_Request) GetPrivate() []byte { + if x != nil { + return x.Private + } + return nil +} + +func (x *ReadResource_Request) GetProviderMeta() *DynamicValue { + if x != nil { + return x.ProviderMeta + } + return nil +} + +type ReadResource_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NewState *DynamicValue `protobuf:"bytes,1,opt,name=new_state,json=newState,proto3" json:"new_state,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"` +} + +func (x *ReadResource_Response) Reset() { + *x = ReadResource_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[57] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadResource_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadResource_Response) ProtoMessage() {} + +func (x *ReadResource_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[57] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadResource_Response.ProtoReflect.Descriptor instead. +func (*ReadResource_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{16, 1} +} + +func (x *ReadResource_Response) GetNewState() *DynamicValue { + if x != nil { + return x.NewState + } + return nil +} + +func (x *ReadResource_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +func (x *ReadResource_Response) GetPrivate() []byte { + if x != nil { + return x.Private + } + return nil +} + +type PlanResourceChange_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + PriorState *DynamicValue `protobuf:"bytes,2,opt,name=prior_state,json=priorState,proto3" json:"prior_state,omitempty"` + ProposedNewState *DynamicValue `protobuf:"bytes,3,opt,name=proposed_new_state,json=proposedNewState,proto3" json:"proposed_new_state,omitempty"` + Config *DynamicValue `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` + PriorPrivate []byte `protobuf:"bytes,5,opt,name=prior_private,json=priorPrivate,proto3" json:"prior_private,omitempty"` + ProviderMeta *DynamicValue `protobuf:"bytes,6,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` +} + +func (x *PlanResourceChange_Request) Reset() { + *x = PlanResourceChange_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[58] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PlanResourceChange_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PlanResourceChange_Request) ProtoMessage() {} + +func (x *PlanResourceChange_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[58] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PlanResourceChange_Request.ProtoReflect.Descriptor instead. +func (*PlanResourceChange_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{17, 0} +} + +func (x *PlanResourceChange_Request) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *PlanResourceChange_Request) GetPriorState() *DynamicValue { + if x != nil { + return x.PriorState + } + return nil +} + +func (x *PlanResourceChange_Request) GetProposedNewState() *DynamicValue { + if x != nil { + return x.ProposedNewState + } + return nil +} + +func (x *PlanResourceChange_Request) GetConfig() *DynamicValue { + if x != nil { + return x.Config + } + return nil +} + +func (x *PlanResourceChange_Request) GetPriorPrivate() []byte { + if x != nil { + return x.PriorPrivate + } + return nil +} + +func (x *PlanResourceChange_Request) GetProviderMeta() *DynamicValue { + if x != nil { + return x.ProviderMeta + } + return nil +} + +type PlanResourceChange_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PlannedState *DynamicValue `protobuf:"bytes,1,opt,name=planned_state,json=plannedState,proto3" json:"planned_state,omitempty"` + RequiresReplace []*AttributePath `protobuf:"bytes,2,rep,name=requires_replace,json=requiresReplace,proto3" json:"requires_replace,omitempty"` + PlannedPrivate []byte `protobuf:"bytes,3,opt,name=planned_private,json=plannedPrivate,proto3" json:"planned_private,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,4,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + // This may be set only by the helper/schema "SDK" in the main Terraform + // repository, to request that Terraform Core >=0.12 permit additional + // inconsistencies that can result from the legacy SDK type system + // and its imprecise mapping to the >=0.12 type system. + // The change in behavior implied by this flag makes sense only for the + // specific details of the legacy SDK type system, and are not a general + // mechanism to avoid proper type handling in providers. + // + // ==== DO NOT USE THIS ==== + // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== + // ==== DO NOT USE THIS ==== + LegacyTypeSystem bool `protobuf:"varint,5,opt,name=legacy_type_system,json=legacyTypeSystem,proto3" json:"legacy_type_system,omitempty"` +} + +func (x *PlanResourceChange_Response) Reset() { + *x = PlanResourceChange_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[59] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PlanResourceChange_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PlanResourceChange_Response) ProtoMessage() {} + +func (x *PlanResourceChange_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[59] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PlanResourceChange_Response.ProtoReflect.Descriptor instead. +func (*PlanResourceChange_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{17, 1} +} + +func (x *PlanResourceChange_Response) GetPlannedState() *DynamicValue { + if x != nil { + return x.PlannedState + } + return nil +} + +func (x *PlanResourceChange_Response) GetRequiresReplace() []*AttributePath { + if x != nil { + return x.RequiresReplace + } + return nil +} + +func (x *PlanResourceChange_Response) GetPlannedPrivate() []byte { + if x != nil { + return x.PlannedPrivate + } + return nil +} + +func (x *PlanResourceChange_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +func (x *PlanResourceChange_Response) GetLegacyTypeSystem() bool { + if x != nil { + return x.LegacyTypeSystem + } + return false +} + +type ApplyResourceChange_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + PriorState *DynamicValue `protobuf:"bytes,2,opt,name=prior_state,json=priorState,proto3" json:"prior_state,omitempty"` + PlannedState *DynamicValue `protobuf:"bytes,3,opt,name=planned_state,json=plannedState,proto3" json:"planned_state,omitempty"` + Config *DynamicValue `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` + PlannedPrivate []byte `protobuf:"bytes,5,opt,name=planned_private,json=plannedPrivate,proto3" json:"planned_private,omitempty"` + ProviderMeta *DynamicValue `protobuf:"bytes,6,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` +} + +func (x *ApplyResourceChange_Request) Reset() { + *x = ApplyResourceChange_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[60] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ApplyResourceChange_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApplyResourceChange_Request) ProtoMessage() {} + +func (x *ApplyResourceChange_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[60] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApplyResourceChange_Request.ProtoReflect.Descriptor instead. +func (*ApplyResourceChange_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{18, 0} +} + +func (x *ApplyResourceChange_Request) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *ApplyResourceChange_Request) GetPriorState() *DynamicValue { + if x != nil { + return x.PriorState + } + return nil +} + +func (x *ApplyResourceChange_Request) GetPlannedState() *DynamicValue { + if x != nil { + return x.PlannedState + } + return nil +} + +func (x *ApplyResourceChange_Request) GetConfig() *DynamicValue { + if x != nil { + return x.Config + } + return nil +} + +func (x *ApplyResourceChange_Request) GetPlannedPrivate() []byte { + if x != nil { + return x.PlannedPrivate + } + return nil +} + +func (x *ApplyResourceChange_Request) GetProviderMeta() *DynamicValue { + if x != nil { + return x.ProviderMeta + } + return nil +} + +type ApplyResourceChange_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NewState *DynamicValue `protobuf:"bytes,1,opt,name=new_state,json=newState,proto3" json:"new_state,omitempty"` + Private []byte `protobuf:"bytes,2,opt,name=private,proto3" json:"private,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,3,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + // This may be set only by the helper/schema "SDK" in the main Terraform + // repository, to request that Terraform Core >=0.12 permit additional + // inconsistencies that can result from the legacy SDK type system + // and its imprecise mapping to the >=0.12 type system. + // The change in behavior implied by this flag makes sense only for the + // specific details of the legacy SDK type system, and are not a general + // mechanism to avoid proper type handling in providers. + // + // ==== DO NOT USE THIS ==== + // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== + // ==== DO NOT USE THIS ==== + LegacyTypeSystem bool `protobuf:"varint,4,opt,name=legacy_type_system,json=legacyTypeSystem,proto3" json:"legacy_type_system,omitempty"` +} + +func (x *ApplyResourceChange_Response) Reset() { + *x = ApplyResourceChange_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[61] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ApplyResourceChange_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApplyResourceChange_Response) ProtoMessage() {} + +func (x *ApplyResourceChange_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[61] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApplyResourceChange_Response.ProtoReflect.Descriptor instead. +func (*ApplyResourceChange_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{18, 1} +} + +func (x *ApplyResourceChange_Response) GetNewState() *DynamicValue { + if x != nil { + return x.NewState + } + return nil +} + +func (x *ApplyResourceChange_Response) GetPrivate() []byte { + if x != nil { + return x.Private + } + return nil +} + +func (x *ApplyResourceChange_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +func (x *ApplyResourceChange_Response) GetLegacyTypeSystem() bool { + if x != nil { + return x.LegacyTypeSystem + } + return false +} + +type ImportResourceState_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` +} + +func (x *ImportResourceState_Request) Reset() { + *x = ImportResourceState_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[62] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ImportResourceState_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ImportResourceState_Request) ProtoMessage() {} + +func (x *ImportResourceState_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[62] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ImportResourceState_Request.ProtoReflect.Descriptor instead. +func (*ImportResourceState_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{19, 0} +} + +func (x *ImportResourceState_Request) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *ImportResourceState_Request) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +type ImportResourceState_ImportedResource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + State *DynamicValue `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` + Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"` +} + +func (x *ImportResourceState_ImportedResource) Reset() { + *x = ImportResourceState_ImportedResource{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[63] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ImportResourceState_ImportedResource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ImportResourceState_ImportedResource) ProtoMessage() {} + +func (x *ImportResourceState_ImportedResource) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[63] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ImportResourceState_ImportedResource.ProtoReflect.Descriptor instead. +func (*ImportResourceState_ImportedResource) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{19, 1} +} + +func (x *ImportResourceState_ImportedResource) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *ImportResourceState_ImportedResource) GetState() *DynamicValue { + if x != nil { + return x.State + } + return nil +} + +func (x *ImportResourceState_ImportedResource) GetPrivate() []byte { + if x != nil { + return x.Private + } + return nil +} + +type ImportResourceState_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ImportedResources []*ImportResourceState_ImportedResource `protobuf:"bytes,1,rep,name=imported_resources,json=importedResources,proto3" json:"imported_resources,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *ImportResourceState_Response) Reset() { + *x = ImportResourceState_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[64] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ImportResourceState_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ImportResourceState_Response) ProtoMessage() {} + +func (x *ImportResourceState_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[64] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ImportResourceState_Response.ProtoReflect.Descriptor instead. +func (*ImportResourceState_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{19, 2} +} + +func (x *ImportResourceState_Response) GetImportedResources() []*ImportResourceState_ImportedResource { + if x != nil { + return x.ImportedResources + } + return nil +} + +func (x *ImportResourceState_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +type MoveResourceState_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The address of the provider the resource is being moved from. + SourceProviderAddress string `protobuf:"bytes,1,opt,name=source_provider_address,json=sourceProviderAddress,proto3" json:"source_provider_address,omitempty"` + // The resource type that the resource is being moved from. + SourceTypeName string `protobuf:"bytes,2,opt,name=source_type_name,json=sourceTypeName,proto3" json:"source_type_name,omitempty"` + // The schema version of the resource type that the resource is being + // moved from. + SourceSchemaVersion int64 `protobuf:"varint,3,opt,name=source_schema_version,json=sourceSchemaVersion,proto3" json:"source_schema_version,omitempty"` + // The raw state of the resource being moved. Only the json field is + // populated, as there should be no legacy providers using the flatmap + // format that support newly introduced RPCs. + SourceState *RawState `protobuf:"bytes,4,opt,name=source_state,json=sourceState,proto3" json:"source_state,omitempty"` + // The resource type that the resource is being moved to. + TargetTypeName string `protobuf:"bytes,5,opt,name=target_type_name,json=targetTypeName,proto3" json:"target_type_name,omitempty"` + // The private state of the resource being moved. + SourcePrivate []byte `protobuf:"bytes,6,opt,name=source_private,json=sourcePrivate,proto3" json:"source_private,omitempty"` +} + +func (x *MoveResourceState_Request) Reset() { + *x = MoveResourceState_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[65] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MoveResourceState_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MoveResourceState_Request) ProtoMessage() {} + +func (x *MoveResourceState_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[65] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MoveResourceState_Request.ProtoReflect.Descriptor instead. +func (*MoveResourceState_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{20, 0} +} + +func (x *MoveResourceState_Request) GetSourceProviderAddress() string { + if x != nil { + return x.SourceProviderAddress + } + return "" +} + +func (x *MoveResourceState_Request) GetSourceTypeName() string { + if x != nil { + return x.SourceTypeName + } + return "" +} + +func (x *MoveResourceState_Request) GetSourceSchemaVersion() int64 { + if x != nil { + return x.SourceSchemaVersion + } + return 0 +} + +func (x *MoveResourceState_Request) GetSourceState() *RawState { + if x != nil { + return x.SourceState + } + return nil +} + +func (x *MoveResourceState_Request) GetTargetTypeName() string { + if x != nil { + return x.TargetTypeName + } + return "" +} + +func (x *MoveResourceState_Request) GetSourcePrivate() []byte { + if x != nil { + return x.SourcePrivate + } + return nil +} + +type MoveResourceState_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The state of the resource after it has been moved. + TargetState *DynamicValue `protobuf:"bytes,1,opt,name=target_state,json=targetState,proto3" json:"target_state,omitempty"` + // Any diagnostics that occurred during the move. + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + // The private state of the resource after it has been moved. + TargetPrivate []byte `protobuf:"bytes,3,opt,name=target_private,json=targetPrivate,proto3" json:"target_private,omitempty"` +} + +func (x *MoveResourceState_Response) Reset() { + *x = MoveResourceState_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[66] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MoveResourceState_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MoveResourceState_Response) ProtoMessage() {} + +func (x *MoveResourceState_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[66] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MoveResourceState_Response.ProtoReflect.Descriptor instead. +func (*MoveResourceState_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{20, 1} +} + +func (x *MoveResourceState_Response) GetTargetState() *DynamicValue { + if x != nil { + return x.TargetState + } + return nil +} + +func (x *MoveResourceState_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +func (x *MoveResourceState_Response) GetTargetPrivate() []byte { + if x != nil { + return x.TargetPrivate + } + return nil +} + +type ReadDataSource_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + ProviderMeta *DynamicValue `protobuf:"bytes,3,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` +} + +func (x *ReadDataSource_Request) Reset() { + *x = ReadDataSource_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[67] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadDataSource_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadDataSource_Request) ProtoMessage() {} + +func (x *ReadDataSource_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[67] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadDataSource_Request.ProtoReflect.Descriptor instead. +func (*ReadDataSource_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{21, 0} +} + +func (x *ReadDataSource_Request) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *ReadDataSource_Request) GetConfig() *DynamicValue { + if x != nil { + return x.Config + } + return nil +} + +func (x *ReadDataSource_Request) GetProviderMeta() *DynamicValue { + if x != nil { + return x.ProviderMeta + } + return nil +} + +type ReadDataSource_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + State *DynamicValue `protobuf:"bytes,1,opt,name=state,proto3" json:"state,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *ReadDataSource_Response) Reset() { + *x = ReadDataSource_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[68] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadDataSource_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadDataSource_Response) ProtoMessage() {} + +func (x *ReadDataSource_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[68] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadDataSource_Response.ProtoReflect.Descriptor instead. +func (*ReadDataSource_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{21, 1} +} + +func (x *ReadDataSource_Response) GetState() *DynamicValue { + if x != nil { + return x.State + } + return nil +} + +func (x *ReadDataSource_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +type GetProvisionerSchema_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetProvisionerSchema_Request) Reset() { + *x = GetProvisionerSchema_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[69] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetProvisionerSchema_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetProvisionerSchema_Request) ProtoMessage() {} + +func (x *GetProvisionerSchema_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[69] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetProvisionerSchema_Request.ProtoReflect.Descriptor instead. +func (*GetProvisionerSchema_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{22, 0} +} + +type GetProvisionerSchema_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Provisioner *Schema `protobuf:"bytes,1,opt,name=provisioner,proto3" json:"provisioner,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *GetProvisionerSchema_Response) Reset() { + *x = GetProvisionerSchema_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[70] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetProvisionerSchema_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetProvisionerSchema_Response) ProtoMessage() {} + +func (x *GetProvisionerSchema_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[70] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetProvisionerSchema_Response.ProtoReflect.Descriptor instead. +func (*GetProvisionerSchema_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{22, 1} +} + +func (x *GetProvisionerSchema_Response) GetProvisioner() *Schema { + if x != nil { + return x.Provisioner + } + return nil +} + +func (x *GetProvisionerSchema_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +type ValidateProvisionerConfig_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Config *DynamicValue `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` +} + +func (x *ValidateProvisionerConfig_Request) Reset() { + *x = ValidateProvisionerConfig_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[71] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateProvisionerConfig_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateProvisionerConfig_Request) ProtoMessage() {} + +func (x *ValidateProvisionerConfig_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[71] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateProvisionerConfig_Request.ProtoReflect.Descriptor instead. +func (*ValidateProvisionerConfig_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{23, 0} +} + +func (x *ValidateProvisionerConfig_Request) GetConfig() *DynamicValue { + if x != nil { + return x.Config + } + return nil +} + +type ValidateProvisionerConfig_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *ValidateProvisionerConfig_Response) Reset() { + *x = ValidateProvisionerConfig_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[72] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateProvisionerConfig_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateProvisionerConfig_Response) ProtoMessage() {} + +func (x *ValidateProvisionerConfig_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[72] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateProvisionerConfig_Response.ProtoReflect.Descriptor instead. +func (*ValidateProvisionerConfig_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{23, 1} +} + +func (x *ValidateProvisionerConfig_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +type ProvisionResource_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Config *DynamicValue `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + Connection *DynamicValue `protobuf:"bytes,2,opt,name=connection,proto3" json:"connection,omitempty"` +} + +func (x *ProvisionResource_Request) Reset() { + *x = ProvisionResource_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[73] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProvisionResource_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProvisionResource_Request) ProtoMessage() {} + +func (x *ProvisionResource_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[73] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProvisionResource_Request.ProtoReflect.Descriptor instead. +func (*ProvisionResource_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{24, 0} +} + +func (x *ProvisionResource_Request) GetConfig() *DynamicValue { + if x != nil { + return x.Config + } + return nil +} + +func (x *ProvisionResource_Request) GetConnection() *DynamicValue { + if x != nil { + return x.Connection + } + return nil +} + +type ProvisionResource_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Output string `protobuf:"bytes,1,opt,name=output,proto3" json:"output,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *ProvisionResource_Response) Reset() { + *x = ProvisionResource_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[74] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProvisionResource_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProvisionResource_Response) ProtoMessage() {} + +func (x *ProvisionResource_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[74] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProvisionResource_Response.ProtoReflect.Descriptor instead. +func (*ProvisionResource_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{24, 1} +} + +func (x *ProvisionResource_Response) GetOutput() string { + if x != nil { + return x.Output + } + return "" +} + +func (x *ProvisionResource_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +type GetFunctions_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetFunctions_Request) Reset() { + *x = GetFunctions_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[75] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetFunctions_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetFunctions_Request) ProtoMessage() {} + +func (x *GetFunctions_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[75] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetFunctions_Request.ProtoReflect.Descriptor instead. +func (*GetFunctions_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{25, 0} +} + +type GetFunctions_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // functions is a mapping of function names to definitions. + Functions map[string]*Function `protobuf:"bytes,1,rep,name=functions,proto3" json:"functions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // diagnostics is any warnings or errors. + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *GetFunctions_Response) Reset() { + *x = GetFunctions_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[76] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetFunctions_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetFunctions_Response) ProtoMessage() {} + +func (x *GetFunctions_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[76] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetFunctions_Response.ProtoReflect.Descriptor instead. +func (*GetFunctions_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{25, 1} +} + +func (x *GetFunctions_Response) GetFunctions() map[string]*Function { + if x != nil { + return x.Functions + } + return nil +} + +func (x *GetFunctions_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +type CallFunction_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // name is the name of the function being called. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // arguments is the data of each function argument value. + Arguments []*DynamicValue `protobuf:"bytes,2,rep,name=arguments,proto3" json:"arguments,omitempty"` +} + +func (x *CallFunction_Request) Reset() { + *x = CallFunction_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[78] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CallFunction_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CallFunction_Request) ProtoMessage() {} + +func (x *CallFunction_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[78] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CallFunction_Request.ProtoReflect.Descriptor instead. +func (*CallFunction_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{26, 0} +} + +func (x *CallFunction_Request) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CallFunction_Request) GetArguments() []*DynamicValue { + if x != nil { + return x.Arguments + } + return nil +} + +type CallFunction_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // result is result value after running the function logic. + Result *DynamicValue `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` + // error is any error from the function logic. + Error *FunctionError `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *CallFunction_Response) Reset() { + *x = CallFunction_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[79] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CallFunction_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CallFunction_Response) ProtoMessage() {} + +func (x *CallFunction_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[79] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CallFunction_Response.ProtoReflect.Descriptor instead. +func (*CallFunction_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{26, 1} +} + +func (x *CallFunction_Response) GetResult() *DynamicValue { + if x != nil { + return x.Result + } + return nil +} + +func (x *CallFunction_Response) GetError() *FunctionError { + if x != nil { + return x.Error + } + return nil +} + +var File_tfplugin5_proto protoreflect.FileDescriptor + +var file_tfplugin5_proto_rawDesc = []byte{ + 0x0a, 0x0f, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x09, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x22, 0x3c, 0x0a, 0x0c, + 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x6d, 0x73, 0x67, 0x70, 0x61, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6d, + 0x73, 0x67, 0x70, 0x61, 0x63, 0x6b, 0x12, 0x12, 0x0a, 0x04, 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x6a, 0x73, 0x6f, 0x6e, 0x22, 0xe3, 0x01, 0x0a, 0x0a, 0x44, + 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x12, 0x3a, 0x0a, 0x08, 0x73, 0x65, 0x76, + 0x65, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, + 0x69, 0x63, 0x2e, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x52, 0x08, 0x73, 0x65, 0x76, + 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, + 0x16, 0x0a, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x12, 0x36, 0x0a, 0x09, 0x61, 0x74, 0x74, 0x72, 0x69, + 0x62, 0x75, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x50, 0x61, 0x74, 0x68, 0x52, 0x09, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x22, + 0x2f, 0x0a, 0x08, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x0b, 0x0a, 0x07, 0x49, + 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, + 0x52, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x57, 0x41, 0x52, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02, + 0x22, 0x6b, 0x0a, 0x0d, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, + 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x78, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x74, 0x65, 0x78, 0x74, 0x12, 0x30, 0x0a, 0x11, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, + 0x48, 0x00, 0x52, 0x10, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x72, 0x67, 0x75, + 0x6d, 0x65, 0x6e, 0x74, 0x88, 0x01, 0x01, 0x42, 0x14, 0x0a, 0x12, 0x5f, 0x66, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x22, 0xdc, 0x01, + 0x0a, 0x0d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, + 0x33, 0x0a, 0x05, 0x73, 0x74, 0x65, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, + 0x62, 0x75, 0x74, 0x65, 0x50, 0x61, 0x74, 0x68, 0x2e, 0x53, 0x74, 0x65, 0x70, 0x52, 0x05, 0x73, + 0x74, 0x65, 0x70, 0x73, 0x1a, 0x95, 0x01, 0x0a, 0x04, 0x53, 0x74, 0x65, 0x70, 0x12, 0x27, 0x0a, + 0x0e, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0d, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x12, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x48, 0x00, 0x52, 0x10, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4b, 0x65, 0x79, + 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x28, 0x0a, 0x0f, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x69, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, + 0x00, 0x52, 0x0d, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4b, 0x65, 0x79, 0x49, 0x6e, 0x74, + 0x42, 0x0a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x22, 0x33, 0x0a, 0x04, + 0x53, 0x74, 0x6f, 0x70, 0x1a, 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x20, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x45, + 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x45, 0x72, 0x72, 0x6f, + 0x72, 0x22, 0x96, 0x01, 0x0a, 0x08, 0x52, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x12, + 0x0a, 0x04, 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x6a, 0x73, + 0x6f, 0x6e, 0x12, 0x3a, 0x0a, 0x07, 0x66, 0x6c, 0x61, 0x74, 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, + 0x52, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x46, 0x6c, 0x61, 0x74, 0x6d, 0x61, 0x70, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x66, 0x6c, 0x61, 0x74, 0x6d, 0x61, 0x70, 0x1a, 0x3a, + 0x0a, 0x0c, 0x46, 0x6c, 0x61, 0x74, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xcc, 0x07, 0x0a, 0x06, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x2d, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x1a, 0xa2, + 0x02, 0x0a, 0x05, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, + 0x75, 0x74, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, + 0x3e, 0x0a, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x52, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, + 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x40, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x69, + 0x6e, 0x64, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, + 0x69, 0x6e, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x1a, 0xa9, 0x02, 0x0a, 0x09, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x72, + 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, + 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x64, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x64, 0x12, + 0x1c, 0x0a, 0x09, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x09, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x12, 0x40, 0x0a, + 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x69, 0x6e, + 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x0f, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x12, + 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x1a, + 0xa7, 0x02, 0x0a, 0x0b, 0x4e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, + 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x05, + 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x42, + 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x43, 0x0a, 0x07, 0x6e, + 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, + 0x4e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x4e, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x67, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x07, 0x6e, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, + 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, + 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x22, 0x4d, 0x0a, 0x0b, 0x4e, 0x65, + 0x73, 0x74, 0x69, 0x6e, 0x67, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56, + 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x49, 0x4e, 0x47, 0x4c, 0x45, + 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x4c, 0x49, 0x53, 0x54, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, + 0x53, 0x45, 0x54, 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, 0x4d, 0x41, 0x50, 0x10, 0x04, 0x12, 0x09, + 0x0a, 0x05, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x05, 0x22, 0xa8, 0x01, 0x0a, 0x12, 0x53, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, + 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x6c, 0x61, 0x6e, 0x5f, 0x64, 0x65, 0x73, 0x74, 0x72, 0x6f, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x6c, 0x61, 0x6e, 0x44, 0x65, 0x73, 0x74, + 0x72, 0x6f, 0x79, 0x12, 0x3f, 0x0a, 0x1c, 0x67, 0x65, 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, + 0x64, 0x65, 0x72, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x67, 0x65, 0x74, 0x50, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x61, 0x6c, 0x12, 0x2e, 0x0a, 0x13, 0x6d, 0x6f, 0x76, 0x65, 0x5f, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x11, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x22, 0x8e, 0x05, 0x0a, 0x08, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x3d, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x35, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, + 0x12, 0x4c, 0x0a, 0x12, 0x76, 0x61, 0x72, 0x69, 0x61, 0x64, 0x69, 0x63, 0x5f, 0x70, 0x61, 0x72, + 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, 0x11, 0x76, 0x61, 0x72, + 0x69, 0x61, 0x64, 0x69, 0x63, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x32, + 0x0a, 0x06, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x52, 0x06, 0x72, 0x65, 0x74, 0x75, + 0x72, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, + 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x69, + 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x69, 0x6e, 0x64, 0x52, + 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x69, 0x6e, 0x64, + 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x1a, 0xf3, 0x01, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x61, 0x6c, 0x6c, 0x6f, 0x77, + 0x5f, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0e, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x30, 0x0a, 0x14, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, + 0x77, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x72, 0x69, + 0x6e, 0x67, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x1a, 0x1c, 0x0a, 0x06, 0x52, 0x65, 0x74, 0x75, 0x72, + 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x96, 0x04, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0xef, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, + 0x13, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, + 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x61, 0x70, + 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x12, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x37, 0x0a, + 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, + 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x4c, 0x0a, 0x0c, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x09, 0x66, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x1a, 0x26, 0x0a, 0x10, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x1a, 0x31, 0x0a, 0x12, 0x44, 0x61, + 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x2f, 0x0a, + 0x10, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xc7, + 0x06, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x1a, 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0xa6, 0x06, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x08, + 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x65, 0x0a, 0x10, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x35, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x73, 0x12, 0x6c, 0x0a, 0x13, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x3c, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x50, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x64, + 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, + 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, + 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x36, 0x0a, 0x0d, 0x70, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, + 0x61, 0x12, 0x4e, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x70, 0x61, + 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x12, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, + 0x73, 0x12, 0x52, 0x0a, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x46, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x66, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x57, 0x0a, 0x16, + 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x51, 0x0a, 0x0e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xdb, 0x01, 0x0a, 0x15, 0x50, 0x72, 0x65, + 0x70, 0x61, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x1a, 0x3a, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, + 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x85, + 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x40, 0x0a, 0x0f, 0x70, + 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x70, + 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x37, 0x0a, + 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, + 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0x90, 0x02, 0x0a, 0x14, 0x55, 0x70, 0x67, 0x72, 0x61, + 0x64, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, + 0x72, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, + 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, + 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x30, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x52, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x08, 0x72, 0x61, 0x77, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x1a, 0x83, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x3e, 0x0a, 0x0e, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x0d, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, + 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xba, 0x01, 0x0a, 0x1a, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, + 0x70, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x57, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, + 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x1a, 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, + 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, + 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xb8, 0x01, 0x0a, 0x18, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x1a, 0x57, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, + 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, + 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, + 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, + 0x73, 0x22, 0xb9, 0x01, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x1a, + 0x67, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x65, + 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x65, 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, 0x6d, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, + 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, + 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xe3, 0x02, + 0x0a, 0x0c, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, 0xbc, + 0x01, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, + 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, + 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x63, 0x75, 0x72, 0x72, 0x65, + 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, + 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, + 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0x93, 0x01, + 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x09, 0x6e, 0x65, + 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x08, 0x6e, 0x65, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, + 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, + 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, + 0x61, 0x74, 0x65, 0x22, 0xf2, 0x04, 0x0a, 0x12, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x1a, 0xbb, 0x02, 0x0a, 0x07, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0b, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x45, 0x0a, + 0x12, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x64, 0x5f, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x10, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x64, 0x4e, 0x65, 0x77, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x5f, 0x70, + 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x70, 0x72, + 0x69, 0x6f, 0x72, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, + 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, + 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0x9d, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x12, 0x43, 0x0a, 0x10, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x73, 0x5f, + 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, + 0x75, 0x74, 0x65, 0x50, 0x61, 0x74, 0x68, 0x52, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, + 0x73, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x6c, 0x61, 0x6e, + 0x6e, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x0e, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, + 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, + 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, + 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65, + 0x67, 0x61, 0x63, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x54, 0x79, + 0x70, 0x65, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x22, 0x92, 0x04, 0x0a, 0x13, 0x41, 0x70, 0x70, + 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, + 0x1a, 0xb6, 0x02, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, + 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0b, 0x70, 0x72, 0x69, + 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, + 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, + 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x70, 0x72, + 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x70, 0x6c, 0x61, + 0x6e, 0x6e, 0x65, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0xc1, 0x01, 0x0a, 0x08, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x09, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x08, 0x6e, 0x65, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, + 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, + 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, + 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, + 0x2c, 0x0a, 0x12, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x73, + 0x79, 0x73, 0x74, 0x65, 0x6d, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x6c, 0x65, 0x67, + 0x61, 0x63, 0x79, 0x54, 0x79, 0x70, 0x65, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x22, 0xed, 0x02, + 0x0a, 0x13, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x36, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, + 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x1a, 0x78, 0x0a, + 0x10, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2d, + 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, + 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x1a, 0xa3, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5e, 0x0a, 0x12, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, + 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x49, 0x6d, 0x70, + 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x52, 0x11, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, + 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, + 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xe7, 0x03, + 0x0a, 0x11, 0x4d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x1a, 0xa8, 0x02, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x36, 0x0a, 0x17, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x15, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x32, 0x0a, 0x15, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x13, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x52, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x28, 0x0a, + 0x10, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x1a, 0xa6, + 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x0c, 0x74, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, + 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x74, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, + 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, + 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, + 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x22, 0x9c, 0x02, 0x0a, 0x0e, 0x52, 0x65, 0x61, 0x64, + 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, 0x95, 0x01, 0x0a, 0x07, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, + 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, + 0x74, 0x61, 0x1a, 0x72, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, + 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, + 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, + 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0x9b, 0x01, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x50, 0x72, + 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x1a, + 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x78, 0x0a, 0x08, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x0b, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, + 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0b, + 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x0b, 0x64, + 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, + 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, + 0x74, 0x69, 0x63, 0x73, 0x22, 0x9c, 0x01, 0x0a, 0x19, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x1a, 0x3a, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, + 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, + 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, + 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, + 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, + 0x69, 0x63, 0x73, 0x22, 0xe5, 0x01, 0x0a, 0x11, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, 0x73, 0x0a, 0x07, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x37, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x5b, + 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x75, + 0x74, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, + 0x75, 0x74, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, + 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0x81, 0x02, 0x0a, 0x0c, + 0x47, 0x65, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x09, 0x0a, 0x07, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0xe5, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, + 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, + 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x1a, 0x51, 0x0a, 0x0e, + 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x13, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x46, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, + 0xd1, 0x01, 0x0a, 0x0c, 0x43, 0x61, 0x6c, 0x6c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x1a, 0x54, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x35, 0x0a, 0x09, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x61, 0x72, 0x67, + 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x6b, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x12, 0x2e, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x46, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x2a, 0x25, 0x0a, 0x0a, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x69, 0x6e, + 0x64, 0x12, 0x09, 0x0a, 0x05, 0x50, 0x4c, 0x41, 0x49, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, + 0x4d, 0x41, 0x52, 0x4b, 0x44, 0x4f, 0x57, 0x4e, 0x10, 0x01, 0x32, 0xef, 0x0b, 0x0a, 0x08, 0x50, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1e, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x12, 0x24, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x6c, 0x0a, 0x15, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x76, + 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x28, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x50, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x7b, 0x0a, 0x1a, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2d, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x75, 0x0a, 0x18, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2b, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, + 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x35, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x69, 0x0a, 0x14, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x27, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, + 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x12, 0x1c, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x52, 0x65, 0x61, 0x64, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x63, 0x0a, 0x12, 0x50, + 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, + 0x65, 0x12, 0x25, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x50, 0x6c, + 0x61, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, + 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x66, 0x0a, 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x26, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x35, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x41, 0x70, 0x70, 0x6c, + 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, 0x13, 0x49, 0x6d, 0x70, 0x6f, + 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, + 0x26, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x49, 0x6d, 0x70, 0x6f, + 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x35, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x60, 0x0a, 0x11, 0x4d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x24, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x35, 0x2e, 0x4d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x4d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x57, 0x0a, 0x0e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x12, 0x21, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x35, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x47, + 0x65, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, + 0x0a, 0x0c, 0x43, 0x61, 0x6c, 0x6c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x46, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x20, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x43, 0x61, 0x6c, 0x6c, + 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x39, 0x0a, 0x04, 0x53, 0x74, 0x6f, 0x70, 0x12, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, + 0x74, 0x6f, 0x70, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x86, 0x03, 0x0a, + 0x0b, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x12, 0x5e, 0x0a, 0x09, + 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, + 0x6f, 0x6e, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, + 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x78, 0x0a, 0x19, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, + 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2c, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, + 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x35, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, + 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x62, 0x0a, 0x11, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x24, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x25, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x50, 0x72, + 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x39, 0x0a, 0x04, 0x53, 0x74, + 0x6f, 0x70, 0x12, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, + 0x74, 0x6f, 0x70, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x2e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x31, 0x5a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x6f, 0x66, 0x75, 0x2f, 0x6f, 0x70, 0x65, + 0x6e, 0x74, 0x6f, 0x66, 0x75, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_tfplugin5_proto_rawDescOnce sync.Once + file_tfplugin5_proto_rawDescData = file_tfplugin5_proto_rawDesc +) + +func file_tfplugin5_proto_rawDescGZIP() []byte { + file_tfplugin5_proto_rawDescOnce.Do(func() { + file_tfplugin5_proto_rawDescData = protoimpl.X.CompressGZIP(file_tfplugin5_proto_rawDescData) + }) + return file_tfplugin5_proto_rawDescData +} + +var file_tfplugin5_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_tfplugin5_proto_msgTypes = make([]protoimpl.MessageInfo, 80) +var file_tfplugin5_proto_goTypes = []interface{}{ + (StringKind)(0), // 0: tfplugin5.StringKind + (Diagnostic_Severity)(0), // 1: tfplugin5.Diagnostic.Severity + (Schema_NestedBlock_NestingMode)(0), // 2: tfplugin5.Schema.NestedBlock.NestingMode + (*DynamicValue)(nil), // 3: tfplugin5.DynamicValue + (*Diagnostic)(nil), // 4: tfplugin5.Diagnostic + (*FunctionError)(nil), // 5: tfplugin5.FunctionError + (*AttributePath)(nil), // 6: tfplugin5.AttributePath + (*Stop)(nil), // 7: tfplugin5.Stop + (*RawState)(nil), // 8: tfplugin5.RawState + (*Schema)(nil), // 9: tfplugin5.Schema + (*ServerCapabilities)(nil), // 10: tfplugin5.ServerCapabilities + (*Function)(nil), // 11: tfplugin5.Function + (*GetMetadata)(nil), // 12: tfplugin5.GetMetadata + (*GetProviderSchema)(nil), // 13: tfplugin5.GetProviderSchema + (*PrepareProviderConfig)(nil), // 14: tfplugin5.PrepareProviderConfig + (*UpgradeResourceState)(nil), // 15: tfplugin5.UpgradeResourceState + (*ValidateResourceTypeConfig)(nil), // 16: tfplugin5.ValidateResourceTypeConfig + (*ValidateDataSourceConfig)(nil), // 17: tfplugin5.ValidateDataSourceConfig + (*Configure)(nil), // 18: tfplugin5.Configure + (*ReadResource)(nil), // 19: tfplugin5.ReadResource + (*PlanResourceChange)(nil), // 20: tfplugin5.PlanResourceChange + (*ApplyResourceChange)(nil), // 21: tfplugin5.ApplyResourceChange + (*ImportResourceState)(nil), // 22: tfplugin5.ImportResourceState + (*MoveResourceState)(nil), // 23: tfplugin5.MoveResourceState + (*ReadDataSource)(nil), // 24: tfplugin5.ReadDataSource + (*GetProvisionerSchema)(nil), // 25: tfplugin5.GetProvisionerSchema + (*ValidateProvisionerConfig)(nil), // 26: tfplugin5.ValidateProvisionerConfig + (*ProvisionResource)(nil), // 27: tfplugin5.ProvisionResource + (*GetFunctions)(nil), // 28: tfplugin5.GetFunctions + (*CallFunction)(nil), // 29: tfplugin5.CallFunction + (*AttributePath_Step)(nil), // 30: tfplugin5.AttributePath.Step + (*Stop_Request)(nil), // 31: tfplugin5.Stop.Request + (*Stop_Response)(nil), // 32: tfplugin5.Stop.Response + nil, // 33: tfplugin5.RawState.FlatmapEntry + (*Schema_Block)(nil), // 34: tfplugin5.Schema.Block + (*Schema_Attribute)(nil), // 35: tfplugin5.Schema.Attribute + (*Schema_NestedBlock)(nil), // 36: tfplugin5.Schema.NestedBlock + (*Function_Parameter)(nil), // 37: tfplugin5.Function.Parameter + (*Function_Return)(nil), // 38: tfplugin5.Function.Return + (*GetMetadata_Request)(nil), // 39: tfplugin5.GetMetadata.Request + (*GetMetadata_Response)(nil), // 40: tfplugin5.GetMetadata.Response + (*GetMetadata_FunctionMetadata)(nil), // 41: tfplugin5.GetMetadata.FunctionMetadata + (*GetMetadata_DataSourceMetadata)(nil), // 42: tfplugin5.GetMetadata.DataSourceMetadata + (*GetMetadata_ResourceMetadata)(nil), // 43: tfplugin5.GetMetadata.ResourceMetadata + (*GetProviderSchema_Request)(nil), // 44: tfplugin5.GetProviderSchema.Request + (*GetProviderSchema_Response)(nil), // 45: tfplugin5.GetProviderSchema.Response + nil, // 46: tfplugin5.GetProviderSchema.Response.ResourceSchemasEntry + nil, // 47: tfplugin5.GetProviderSchema.Response.DataSourceSchemasEntry + nil, // 48: tfplugin5.GetProviderSchema.Response.FunctionsEntry + (*PrepareProviderConfig_Request)(nil), // 49: tfplugin5.PrepareProviderConfig.Request + (*PrepareProviderConfig_Response)(nil), // 50: tfplugin5.PrepareProviderConfig.Response + (*UpgradeResourceState_Request)(nil), // 51: tfplugin5.UpgradeResourceState.Request + (*UpgradeResourceState_Response)(nil), // 52: tfplugin5.UpgradeResourceState.Response + (*ValidateResourceTypeConfig_Request)(nil), // 53: tfplugin5.ValidateResourceTypeConfig.Request + (*ValidateResourceTypeConfig_Response)(nil), // 54: tfplugin5.ValidateResourceTypeConfig.Response + (*ValidateDataSourceConfig_Request)(nil), // 55: tfplugin5.ValidateDataSourceConfig.Request + (*ValidateDataSourceConfig_Response)(nil), // 56: tfplugin5.ValidateDataSourceConfig.Response + (*Configure_Request)(nil), // 57: tfplugin5.Configure.Request + (*Configure_Response)(nil), // 58: tfplugin5.Configure.Response + (*ReadResource_Request)(nil), // 59: tfplugin5.ReadResource.Request + (*ReadResource_Response)(nil), // 60: tfplugin5.ReadResource.Response + (*PlanResourceChange_Request)(nil), // 61: tfplugin5.PlanResourceChange.Request + (*PlanResourceChange_Response)(nil), // 62: tfplugin5.PlanResourceChange.Response + (*ApplyResourceChange_Request)(nil), // 63: tfplugin5.ApplyResourceChange.Request + (*ApplyResourceChange_Response)(nil), // 64: tfplugin5.ApplyResourceChange.Response + (*ImportResourceState_Request)(nil), // 65: tfplugin5.ImportResourceState.Request + (*ImportResourceState_ImportedResource)(nil), // 66: tfplugin5.ImportResourceState.ImportedResource + (*ImportResourceState_Response)(nil), // 67: tfplugin5.ImportResourceState.Response + (*MoveResourceState_Request)(nil), // 68: tfplugin5.MoveResourceState.Request + (*MoveResourceState_Response)(nil), // 69: tfplugin5.MoveResourceState.Response + (*ReadDataSource_Request)(nil), // 70: tfplugin5.ReadDataSource.Request + (*ReadDataSource_Response)(nil), // 71: tfplugin5.ReadDataSource.Response + (*GetProvisionerSchema_Request)(nil), // 72: tfplugin5.GetProvisionerSchema.Request + (*GetProvisionerSchema_Response)(nil), // 73: tfplugin5.GetProvisionerSchema.Response + (*ValidateProvisionerConfig_Request)(nil), // 74: tfplugin5.ValidateProvisionerConfig.Request + (*ValidateProvisionerConfig_Response)(nil), // 75: tfplugin5.ValidateProvisionerConfig.Response + (*ProvisionResource_Request)(nil), // 76: tfplugin5.ProvisionResource.Request + (*ProvisionResource_Response)(nil), // 77: tfplugin5.ProvisionResource.Response + (*GetFunctions_Request)(nil), // 78: tfplugin5.GetFunctions.Request + (*GetFunctions_Response)(nil), // 79: tfplugin5.GetFunctions.Response + nil, // 80: tfplugin5.GetFunctions.Response.FunctionsEntry + (*CallFunction_Request)(nil), // 81: tfplugin5.CallFunction.Request + (*CallFunction_Response)(nil), // 82: tfplugin5.CallFunction.Response +} +var file_tfplugin5_proto_depIdxs = []int32{ + 1, // 0: tfplugin5.Diagnostic.severity:type_name -> tfplugin5.Diagnostic.Severity + 6, // 1: tfplugin5.Diagnostic.attribute:type_name -> tfplugin5.AttributePath + 30, // 2: tfplugin5.AttributePath.steps:type_name -> tfplugin5.AttributePath.Step + 33, // 3: tfplugin5.RawState.flatmap:type_name -> tfplugin5.RawState.FlatmapEntry + 34, // 4: tfplugin5.Schema.block:type_name -> tfplugin5.Schema.Block + 37, // 5: tfplugin5.Function.parameters:type_name -> tfplugin5.Function.Parameter + 37, // 6: tfplugin5.Function.variadic_parameter:type_name -> tfplugin5.Function.Parameter + 38, // 7: tfplugin5.Function.return:type_name -> tfplugin5.Function.Return + 0, // 8: tfplugin5.Function.description_kind:type_name -> tfplugin5.StringKind + 35, // 9: tfplugin5.Schema.Block.attributes:type_name -> tfplugin5.Schema.Attribute + 36, // 10: tfplugin5.Schema.Block.block_types:type_name -> tfplugin5.Schema.NestedBlock + 0, // 11: tfplugin5.Schema.Block.description_kind:type_name -> tfplugin5.StringKind + 0, // 12: tfplugin5.Schema.Attribute.description_kind:type_name -> tfplugin5.StringKind + 34, // 13: tfplugin5.Schema.NestedBlock.block:type_name -> tfplugin5.Schema.Block + 2, // 14: tfplugin5.Schema.NestedBlock.nesting:type_name -> tfplugin5.Schema.NestedBlock.NestingMode + 0, // 15: tfplugin5.Function.Parameter.description_kind:type_name -> tfplugin5.StringKind + 10, // 16: tfplugin5.GetMetadata.Response.server_capabilities:type_name -> tfplugin5.ServerCapabilities + 4, // 17: tfplugin5.GetMetadata.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 42, // 18: tfplugin5.GetMetadata.Response.data_sources:type_name -> tfplugin5.GetMetadata.DataSourceMetadata + 43, // 19: tfplugin5.GetMetadata.Response.resources:type_name -> tfplugin5.GetMetadata.ResourceMetadata + 41, // 20: tfplugin5.GetMetadata.Response.functions:type_name -> tfplugin5.GetMetadata.FunctionMetadata + 9, // 21: tfplugin5.GetProviderSchema.Response.provider:type_name -> tfplugin5.Schema + 46, // 22: tfplugin5.GetProviderSchema.Response.resource_schemas:type_name -> tfplugin5.GetProviderSchema.Response.ResourceSchemasEntry + 47, // 23: tfplugin5.GetProviderSchema.Response.data_source_schemas:type_name -> tfplugin5.GetProviderSchema.Response.DataSourceSchemasEntry + 4, // 24: tfplugin5.GetProviderSchema.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 9, // 25: tfplugin5.GetProviderSchema.Response.provider_meta:type_name -> tfplugin5.Schema + 10, // 26: tfplugin5.GetProviderSchema.Response.server_capabilities:type_name -> tfplugin5.ServerCapabilities + 48, // 27: tfplugin5.GetProviderSchema.Response.functions:type_name -> tfplugin5.GetProviderSchema.Response.FunctionsEntry + 9, // 28: tfplugin5.GetProviderSchema.Response.ResourceSchemasEntry.value:type_name -> tfplugin5.Schema + 9, // 29: tfplugin5.GetProviderSchema.Response.DataSourceSchemasEntry.value:type_name -> tfplugin5.Schema + 11, // 30: tfplugin5.GetProviderSchema.Response.FunctionsEntry.value:type_name -> tfplugin5.Function + 3, // 31: tfplugin5.PrepareProviderConfig.Request.config:type_name -> tfplugin5.DynamicValue + 3, // 32: tfplugin5.PrepareProviderConfig.Response.prepared_config:type_name -> tfplugin5.DynamicValue + 4, // 33: tfplugin5.PrepareProviderConfig.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 8, // 34: tfplugin5.UpgradeResourceState.Request.raw_state:type_name -> tfplugin5.RawState + 3, // 35: tfplugin5.UpgradeResourceState.Response.upgraded_state:type_name -> tfplugin5.DynamicValue + 4, // 36: tfplugin5.UpgradeResourceState.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 37: tfplugin5.ValidateResourceTypeConfig.Request.config:type_name -> tfplugin5.DynamicValue + 4, // 38: tfplugin5.ValidateResourceTypeConfig.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 39: tfplugin5.ValidateDataSourceConfig.Request.config:type_name -> tfplugin5.DynamicValue + 4, // 40: tfplugin5.ValidateDataSourceConfig.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 41: tfplugin5.Configure.Request.config:type_name -> tfplugin5.DynamicValue + 4, // 42: tfplugin5.Configure.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 43: tfplugin5.ReadResource.Request.current_state:type_name -> tfplugin5.DynamicValue + 3, // 44: tfplugin5.ReadResource.Request.provider_meta:type_name -> tfplugin5.DynamicValue + 3, // 45: tfplugin5.ReadResource.Response.new_state:type_name -> tfplugin5.DynamicValue + 4, // 46: tfplugin5.ReadResource.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 47: tfplugin5.PlanResourceChange.Request.prior_state:type_name -> tfplugin5.DynamicValue + 3, // 48: tfplugin5.PlanResourceChange.Request.proposed_new_state:type_name -> tfplugin5.DynamicValue + 3, // 49: tfplugin5.PlanResourceChange.Request.config:type_name -> tfplugin5.DynamicValue + 3, // 50: tfplugin5.PlanResourceChange.Request.provider_meta:type_name -> tfplugin5.DynamicValue + 3, // 51: tfplugin5.PlanResourceChange.Response.planned_state:type_name -> tfplugin5.DynamicValue + 6, // 52: tfplugin5.PlanResourceChange.Response.requires_replace:type_name -> tfplugin5.AttributePath + 4, // 53: tfplugin5.PlanResourceChange.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 54: tfplugin5.ApplyResourceChange.Request.prior_state:type_name -> tfplugin5.DynamicValue + 3, // 55: tfplugin5.ApplyResourceChange.Request.planned_state:type_name -> tfplugin5.DynamicValue + 3, // 56: tfplugin5.ApplyResourceChange.Request.config:type_name -> tfplugin5.DynamicValue + 3, // 57: tfplugin5.ApplyResourceChange.Request.provider_meta:type_name -> tfplugin5.DynamicValue + 3, // 58: tfplugin5.ApplyResourceChange.Response.new_state:type_name -> tfplugin5.DynamicValue + 4, // 59: tfplugin5.ApplyResourceChange.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 60: tfplugin5.ImportResourceState.ImportedResource.state:type_name -> tfplugin5.DynamicValue + 66, // 61: tfplugin5.ImportResourceState.Response.imported_resources:type_name -> tfplugin5.ImportResourceState.ImportedResource + 4, // 62: tfplugin5.ImportResourceState.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 8, // 63: tfplugin5.MoveResourceState.Request.source_state:type_name -> tfplugin5.RawState + 3, // 64: tfplugin5.MoveResourceState.Response.target_state:type_name -> tfplugin5.DynamicValue + 4, // 65: tfplugin5.MoveResourceState.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 66: tfplugin5.ReadDataSource.Request.config:type_name -> tfplugin5.DynamicValue + 3, // 67: tfplugin5.ReadDataSource.Request.provider_meta:type_name -> tfplugin5.DynamicValue + 3, // 68: tfplugin5.ReadDataSource.Response.state:type_name -> tfplugin5.DynamicValue + 4, // 69: tfplugin5.ReadDataSource.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 9, // 70: tfplugin5.GetProvisionerSchema.Response.provisioner:type_name -> tfplugin5.Schema + 4, // 71: tfplugin5.GetProvisionerSchema.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 72: tfplugin5.ValidateProvisionerConfig.Request.config:type_name -> tfplugin5.DynamicValue + 4, // 73: tfplugin5.ValidateProvisionerConfig.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 74: tfplugin5.ProvisionResource.Request.config:type_name -> tfplugin5.DynamicValue + 3, // 75: tfplugin5.ProvisionResource.Request.connection:type_name -> tfplugin5.DynamicValue + 4, // 76: tfplugin5.ProvisionResource.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 80, // 77: tfplugin5.GetFunctions.Response.functions:type_name -> tfplugin5.GetFunctions.Response.FunctionsEntry + 4, // 78: tfplugin5.GetFunctions.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 11, // 79: tfplugin5.GetFunctions.Response.FunctionsEntry.value:type_name -> tfplugin5.Function + 3, // 80: tfplugin5.CallFunction.Request.arguments:type_name -> tfplugin5.DynamicValue + 3, // 81: tfplugin5.CallFunction.Response.result:type_name -> tfplugin5.DynamicValue + 5, // 82: tfplugin5.CallFunction.Response.error:type_name -> tfplugin5.FunctionError + 39, // 83: tfplugin5.Provider.GetMetadata:input_type -> tfplugin5.GetMetadata.Request + 44, // 84: tfplugin5.Provider.GetSchema:input_type -> tfplugin5.GetProviderSchema.Request + 49, // 85: tfplugin5.Provider.PrepareProviderConfig:input_type -> tfplugin5.PrepareProviderConfig.Request + 53, // 86: tfplugin5.Provider.ValidateResourceTypeConfig:input_type -> tfplugin5.ValidateResourceTypeConfig.Request + 55, // 87: tfplugin5.Provider.ValidateDataSourceConfig:input_type -> tfplugin5.ValidateDataSourceConfig.Request + 51, // 88: tfplugin5.Provider.UpgradeResourceState:input_type -> tfplugin5.UpgradeResourceState.Request + 57, // 89: tfplugin5.Provider.Configure:input_type -> tfplugin5.Configure.Request + 59, // 90: tfplugin5.Provider.ReadResource:input_type -> tfplugin5.ReadResource.Request + 61, // 91: tfplugin5.Provider.PlanResourceChange:input_type -> tfplugin5.PlanResourceChange.Request + 63, // 92: tfplugin5.Provider.ApplyResourceChange:input_type -> tfplugin5.ApplyResourceChange.Request + 65, // 93: tfplugin5.Provider.ImportResourceState:input_type -> tfplugin5.ImportResourceState.Request + 68, // 94: tfplugin5.Provider.MoveResourceState:input_type -> tfplugin5.MoveResourceState.Request + 70, // 95: tfplugin5.Provider.ReadDataSource:input_type -> tfplugin5.ReadDataSource.Request + 78, // 96: tfplugin5.Provider.GetFunctions:input_type -> tfplugin5.GetFunctions.Request + 81, // 97: tfplugin5.Provider.CallFunction:input_type -> tfplugin5.CallFunction.Request + 31, // 98: tfplugin5.Provider.Stop:input_type -> tfplugin5.Stop.Request + 72, // 99: tfplugin5.Provisioner.GetSchema:input_type -> tfplugin5.GetProvisionerSchema.Request + 74, // 100: tfplugin5.Provisioner.ValidateProvisionerConfig:input_type -> tfplugin5.ValidateProvisionerConfig.Request + 76, // 101: tfplugin5.Provisioner.ProvisionResource:input_type -> tfplugin5.ProvisionResource.Request + 31, // 102: tfplugin5.Provisioner.Stop:input_type -> tfplugin5.Stop.Request + 40, // 103: tfplugin5.Provider.GetMetadata:output_type -> tfplugin5.GetMetadata.Response + 45, // 104: tfplugin5.Provider.GetSchema:output_type -> tfplugin5.GetProviderSchema.Response + 50, // 105: tfplugin5.Provider.PrepareProviderConfig:output_type -> tfplugin5.PrepareProviderConfig.Response + 54, // 106: tfplugin5.Provider.ValidateResourceTypeConfig:output_type -> tfplugin5.ValidateResourceTypeConfig.Response + 56, // 107: tfplugin5.Provider.ValidateDataSourceConfig:output_type -> tfplugin5.ValidateDataSourceConfig.Response + 52, // 108: tfplugin5.Provider.UpgradeResourceState:output_type -> tfplugin5.UpgradeResourceState.Response + 58, // 109: tfplugin5.Provider.Configure:output_type -> tfplugin5.Configure.Response + 60, // 110: tfplugin5.Provider.ReadResource:output_type -> tfplugin5.ReadResource.Response + 62, // 111: tfplugin5.Provider.PlanResourceChange:output_type -> tfplugin5.PlanResourceChange.Response + 64, // 112: tfplugin5.Provider.ApplyResourceChange:output_type -> tfplugin5.ApplyResourceChange.Response + 67, // 113: tfplugin5.Provider.ImportResourceState:output_type -> tfplugin5.ImportResourceState.Response + 69, // 114: tfplugin5.Provider.MoveResourceState:output_type -> tfplugin5.MoveResourceState.Response + 71, // 115: tfplugin5.Provider.ReadDataSource:output_type -> tfplugin5.ReadDataSource.Response + 79, // 116: tfplugin5.Provider.GetFunctions:output_type -> tfplugin5.GetFunctions.Response + 82, // 117: tfplugin5.Provider.CallFunction:output_type -> tfplugin5.CallFunction.Response + 32, // 118: tfplugin5.Provider.Stop:output_type -> tfplugin5.Stop.Response + 73, // 119: tfplugin5.Provisioner.GetSchema:output_type -> tfplugin5.GetProvisionerSchema.Response + 75, // 120: tfplugin5.Provisioner.ValidateProvisionerConfig:output_type -> tfplugin5.ValidateProvisionerConfig.Response + 77, // 121: tfplugin5.Provisioner.ProvisionResource:output_type -> tfplugin5.ProvisionResource.Response + 32, // 122: tfplugin5.Provisioner.Stop:output_type -> tfplugin5.Stop.Response + 103, // [103:123] is the sub-list for method output_type + 83, // [83:103] is the sub-list for method input_type + 83, // [83:83] is the sub-list for extension type_name + 83, // [83:83] is the sub-list for extension extendee + 0, // [0:83] is the sub-list for field type_name +} + +func init() { file_tfplugin5_proto_init() } +func file_tfplugin5_proto_init() { + if File_tfplugin5_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_tfplugin5_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DynamicValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Diagnostic); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FunctionError); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AttributePath); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Stop); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RawState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Schema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerCapabilities); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Function); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetProviderSchema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PrepareProviderConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpgradeResourceState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateResourceTypeConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateDataSourceConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Configure); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadResource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PlanResourceChange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplyResourceChange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ImportResourceState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MoveResourceState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadDataSource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetProvisionerSchema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateProvisionerConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProvisionResource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetFunctions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CallFunction); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AttributePath_Step); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Stop_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Stop_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Schema_Block); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Schema_Attribute); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Schema_NestedBlock); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Function_Parameter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Function_Return); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMetadata_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMetadata_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMetadata_FunctionMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMetadata_DataSourceMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMetadata_ResourceMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetProviderSchema_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetProviderSchema_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PrepareProviderConfig_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PrepareProviderConfig_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpgradeResourceState_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpgradeResourceState_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateResourceTypeConfig_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateResourceTypeConfig_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateDataSourceConfig_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateDataSourceConfig_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Configure_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Configure_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadResource_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadResource_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PlanResourceChange_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PlanResourceChange_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplyResourceChange_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplyResourceChange_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ImportResourceState_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ImportResourceState_ImportedResource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ImportResourceState_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MoveResourceState_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MoveResourceState_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadDataSource_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadDataSource_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetProvisionerSchema_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetProvisionerSchema_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateProvisionerConfig_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateProvisionerConfig_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProvisionResource_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProvisionResource_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetFunctions_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetFunctions_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[78].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CallFunction_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[79].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CallFunction_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_tfplugin5_proto_msgTypes[2].OneofWrappers = []interface{}{} + file_tfplugin5_proto_msgTypes[27].OneofWrappers = []interface{}{ + (*AttributePath_Step_AttributeName)(nil), + (*AttributePath_Step_ElementKeyString)(nil), + (*AttributePath_Step_ElementKeyInt)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_tfplugin5_proto_rawDesc, + NumEnums: 3, + NumMessages: 80, + NumExtensions: 0, + NumServices: 2, + }, + GoTypes: file_tfplugin5_proto_goTypes, + DependencyIndexes: file_tfplugin5_proto_depIdxs, + EnumInfos: file_tfplugin5_proto_enumTypes, + MessageInfos: file_tfplugin5_proto_msgTypes, + }.Build() + File_tfplugin5_proto = out.File + file_tfplugin5_proto_rawDesc = nil + file_tfplugin5_proto_goTypes = nil + file_tfplugin5_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// ProviderClient is the client API for Provider service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ProviderClient interface { + // GetMetadata returns upfront information about server capabilities and + // supported resource types without requiring the server to instantiate all + // schema information, which may be memory intensive. This RPC is optional, + // where clients may receive an unimplemented RPC error. Clients should + // ignore the error and call the GetSchema RPC as a fallback. + GetMetadata(ctx context.Context, in *GetMetadata_Request, opts ...grpc.CallOption) (*GetMetadata_Response, error) + // GetSchema returns schema information for the provider, data resources, + // and managed resources. + GetSchema(ctx context.Context, in *GetProviderSchema_Request, opts ...grpc.CallOption) (*GetProviderSchema_Response, error) + PrepareProviderConfig(ctx context.Context, in *PrepareProviderConfig_Request, opts ...grpc.CallOption) (*PrepareProviderConfig_Response, error) + ValidateResourceTypeConfig(ctx context.Context, in *ValidateResourceTypeConfig_Request, opts ...grpc.CallOption) (*ValidateResourceTypeConfig_Response, error) + ValidateDataSourceConfig(ctx context.Context, in *ValidateDataSourceConfig_Request, opts ...grpc.CallOption) (*ValidateDataSourceConfig_Response, error) + UpgradeResourceState(ctx context.Context, in *UpgradeResourceState_Request, opts ...grpc.CallOption) (*UpgradeResourceState_Response, error) + // ////// One-time initialization, called before other functions below + Configure(ctx context.Context, in *Configure_Request, opts ...grpc.CallOption) (*Configure_Response, error) + // ////// Managed Resource Lifecycle + ReadResource(ctx context.Context, in *ReadResource_Request, opts ...grpc.CallOption) (*ReadResource_Response, error) + PlanResourceChange(ctx context.Context, in *PlanResourceChange_Request, opts ...grpc.CallOption) (*PlanResourceChange_Response, error) + ApplyResourceChange(ctx context.Context, in *ApplyResourceChange_Request, opts ...grpc.CallOption) (*ApplyResourceChange_Response, error) + ImportResourceState(ctx context.Context, in *ImportResourceState_Request, opts ...grpc.CallOption) (*ImportResourceState_Response, error) + MoveResourceState(ctx context.Context, in *MoveResourceState_Request, opts ...grpc.CallOption) (*MoveResourceState_Response, error) + ReadDataSource(ctx context.Context, in *ReadDataSource_Request, opts ...grpc.CallOption) (*ReadDataSource_Response, error) + // GetFunctions returns the definitions of all functions. + GetFunctions(ctx context.Context, in *GetFunctions_Request, opts ...grpc.CallOption) (*GetFunctions_Response, error) + // CallFunction runs the provider-defined function logic and returns + // the result with any diagnostics. + CallFunction(ctx context.Context, in *CallFunction_Request, opts ...grpc.CallOption) (*CallFunction_Response, error) + // ////// Graceful Shutdown + Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) +} + +type providerClient struct { + cc grpc.ClientConnInterface +} + +func NewProviderClient(cc grpc.ClientConnInterface) ProviderClient { + return &providerClient{cc} +} + +func (c *providerClient) GetMetadata(ctx context.Context, in *GetMetadata_Request, opts ...grpc.CallOption) (*GetMetadata_Response, error) { + out := new(GetMetadata_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/GetMetadata", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) GetSchema(ctx context.Context, in *GetProviderSchema_Request, opts ...grpc.CallOption) (*GetProviderSchema_Response, error) { + out := new(GetProviderSchema_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/GetSchema", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) PrepareProviderConfig(ctx context.Context, in *PrepareProviderConfig_Request, opts ...grpc.CallOption) (*PrepareProviderConfig_Response, error) { + out := new(PrepareProviderConfig_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/PrepareProviderConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ValidateResourceTypeConfig(ctx context.Context, in *ValidateResourceTypeConfig_Request, opts ...grpc.CallOption) (*ValidateResourceTypeConfig_Response, error) { + out := new(ValidateResourceTypeConfig_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ValidateResourceTypeConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ValidateDataSourceConfig(ctx context.Context, in *ValidateDataSourceConfig_Request, opts ...grpc.CallOption) (*ValidateDataSourceConfig_Response, error) { + out := new(ValidateDataSourceConfig_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ValidateDataSourceConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) UpgradeResourceState(ctx context.Context, in *UpgradeResourceState_Request, opts ...grpc.CallOption) (*UpgradeResourceState_Response, error) { + out := new(UpgradeResourceState_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/UpgradeResourceState", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) Configure(ctx context.Context, in *Configure_Request, opts ...grpc.CallOption) (*Configure_Response, error) { + out := new(Configure_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/Configure", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ReadResource(ctx context.Context, in *ReadResource_Request, opts ...grpc.CallOption) (*ReadResource_Response, error) { + out := new(ReadResource_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ReadResource", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) PlanResourceChange(ctx context.Context, in *PlanResourceChange_Request, opts ...grpc.CallOption) (*PlanResourceChange_Response, error) { + out := new(PlanResourceChange_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/PlanResourceChange", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ApplyResourceChange(ctx context.Context, in *ApplyResourceChange_Request, opts ...grpc.CallOption) (*ApplyResourceChange_Response, error) { + out := new(ApplyResourceChange_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ApplyResourceChange", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ImportResourceState(ctx context.Context, in *ImportResourceState_Request, opts ...grpc.CallOption) (*ImportResourceState_Response, error) { + out := new(ImportResourceState_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ImportResourceState", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) MoveResourceState(ctx context.Context, in *MoveResourceState_Request, opts ...grpc.CallOption) (*MoveResourceState_Response, error) { + out := new(MoveResourceState_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/MoveResourceState", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ReadDataSource(ctx context.Context, in *ReadDataSource_Request, opts ...grpc.CallOption) (*ReadDataSource_Response, error) { + out := new(ReadDataSource_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ReadDataSource", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) GetFunctions(ctx context.Context, in *GetFunctions_Request, opts ...grpc.CallOption) (*GetFunctions_Response, error) { + out := new(GetFunctions_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/GetFunctions", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) CallFunction(ctx context.Context, in *CallFunction_Request, opts ...grpc.CallOption) (*CallFunction_Response, error) { + out := new(CallFunction_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/CallFunction", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) { + out := new(Stop_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/Stop", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ProviderServer is the server API for Provider service. +type ProviderServer interface { + // GetMetadata returns upfront information about server capabilities and + // supported resource types without requiring the server to instantiate all + // schema information, which may be memory intensive. This RPC is optional, + // where clients may receive an unimplemented RPC error. Clients should + // ignore the error and call the GetSchema RPC as a fallback. + GetMetadata(context.Context, *GetMetadata_Request) (*GetMetadata_Response, error) + // GetSchema returns schema information for the provider, data resources, + // and managed resources. + GetSchema(context.Context, *GetProviderSchema_Request) (*GetProviderSchema_Response, error) + PrepareProviderConfig(context.Context, *PrepareProviderConfig_Request) (*PrepareProviderConfig_Response, error) + ValidateResourceTypeConfig(context.Context, *ValidateResourceTypeConfig_Request) (*ValidateResourceTypeConfig_Response, error) + ValidateDataSourceConfig(context.Context, *ValidateDataSourceConfig_Request) (*ValidateDataSourceConfig_Response, error) + UpgradeResourceState(context.Context, *UpgradeResourceState_Request) (*UpgradeResourceState_Response, error) + // ////// One-time initialization, called before other functions below + Configure(context.Context, *Configure_Request) (*Configure_Response, error) + // ////// Managed Resource Lifecycle + ReadResource(context.Context, *ReadResource_Request) (*ReadResource_Response, error) + PlanResourceChange(context.Context, *PlanResourceChange_Request) (*PlanResourceChange_Response, error) + ApplyResourceChange(context.Context, *ApplyResourceChange_Request) (*ApplyResourceChange_Response, error) + ImportResourceState(context.Context, *ImportResourceState_Request) (*ImportResourceState_Response, error) + MoveResourceState(context.Context, *MoveResourceState_Request) (*MoveResourceState_Response, error) + ReadDataSource(context.Context, *ReadDataSource_Request) (*ReadDataSource_Response, error) + // GetFunctions returns the definitions of all functions. + GetFunctions(context.Context, *GetFunctions_Request) (*GetFunctions_Response, error) + // CallFunction runs the provider-defined function logic and returns + // the result with any diagnostics. + CallFunction(context.Context, *CallFunction_Request) (*CallFunction_Response, error) + // ////// Graceful Shutdown + Stop(context.Context, *Stop_Request) (*Stop_Response, error) +} + +// UnimplementedProviderServer can be embedded to have forward compatible implementations. +type UnimplementedProviderServer struct { +} + +func (*UnimplementedProviderServer) GetMetadata(context.Context, *GetMetadata_Request) (*GetMetadata_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetMetadata not implemented") +} +func (*UnimplementedProviderServer) GetSchema(context.Context, *GetProviderSchema_Request) (*GetProviderSchema_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetSchema not implemented") +} +func (*UnimplementedProviderServer) PrepareProviderConfig(context.Context, *PrepareProviderConfig_Request) (*PrepareProviderConfig_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method PrepareProviderConfig not implemented") +} +func (*UnimplementedProviderServer) ValidateResourceTypeConfig(context.Context, *ValidateResourceTypeConfig_Request) (*ValidateResourceTypeConfig_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ValidateResourceTypeConfig not implemented") +} +func (*UnimplementedProviderServer) ValidateDataSourceConfig(context.Context, *ValidateDataSourceConfig_Request) (*ValidateDataSourceConfig_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ValidateDataSourceConfig not implemented") +} +func (*UnimplementedProviderServer) UpgradeResourceState(context.Context, *UpgradeResourceState_Request) (*UpgradeResourceState_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpgradeResourceState not implemented") +} +func (*UnimplementedProviderServer) Configure(context.Context, *Configure_Request) (*Configure_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method Configure not implemented") +} +func (*UnimplementedProviderServer) ReadResource(context.Context, *ReadResource_Request) (*ReadResource_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReadResource not implemented") +} +func (*UnimplementedProviderServer) PlanResourceChange(context.Context, *PlanResourceChange_Request) (*PlanResourceChange_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method PlanResourceChange not implemented") +} +func (*UnimplementedProviderServer) ApplyResourceChange(context.Context, *ApplyResourceChange_Request) (*ApplyResourceChange_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ApplyResourceChange not implemented") +} +func (*UnimplementedProviderServer) ImportResourceState(context.Context, *ImportResourceState_Request) (*ImportResourceState_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ImportResourceState not implemented") +} +func (*UnimplementedProviderServer) MoveResourceState(context.Context, *MoveResourceState_Request) (*MoveResourceState_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method MoveResourceState not implemented") +} +func (*UnimplementedProviderServer) ReadDataSource(context.Context, *ReadDataSource_Request) (*ReadDataSource_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReadDataSource not implemented") +} +func (*UnimplementedProviderServer) GetFunctions(context.Context, *GetFunctions_Request) (*GetFunctions_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetFunctions not implemented") +} +func (*UnimplementedProviderServer) CallFunction(context.Context, *CallFunction_Request) (*CallFunction_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method CallFunction not implemented") +} +func (*UnimplementedProviderServer) Stop(context.Context, *Stop_Request) (*Stop_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method Stop not implemented") +} + +func RegisterProviderServer(s *grpc.Server, srv ProviderServer) { + s.RegisterService(&_Provider_serviceDesc, srv) +} + +func _Provider_GetMetadata_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetMetadata_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).GetMetadata(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/GetMetadata", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).GetMetadata(ctx, req.(*GetMetadata_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_GetSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetProviderSchema_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).GetSchema(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/GetSchema", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).GetSchema(ctx, req.(*GetProviderSchema_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_PrepareProviderConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PrepareProviderConfig_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).PrepareProviderConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/PrepareProviderConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).PrepareProviderConfig(ctx, req.(*PrepareProviderConfig_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ValidateResourceTypeConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidateResourceTypeConfig_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ValidateResourceTypeConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/ValidateResourceTypeConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ValidateResourceTypeConfig(ctx, req.(*ValidateResourceTypeConfig_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ValidateDataSourceConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidateDataSourceConfig_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ValidateDataSourceConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/ValidateDataSourceConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ValidateDataSourceConfig(ctx, req.(*ValidateDataSourceConfig_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_UpgradeResourceState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpgradeResourceState_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).UpgradeResourceState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/UpgradeResourceState", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).UpgradeResourceState(ctx, req.(*UpgradeResourceState_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_Configure_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Configure_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).Configure(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/Configure", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).Configure(ctx, req.(*Configure_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ReadResource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadResource_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ReadResource(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/ReadResource", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ReadResource(ctx, req.(*ReadResource_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_PlanResourceChange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PlanResourceChange_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).PlanResourceChange(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/PlanResourceChange", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).PlanResourceChange(ctx, req.(*PlanResourceChange_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ApplyResourceChange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ApplyResourceChange_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ApplyResourceChange(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/ApplyResourceChange", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ApplyResourceChange(ctx, req.(*ApplyResourceChange_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ImportResourceState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ImportResourceState_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ImportResourceState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/ImportResourceState", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ImportResourceState(ctx, req.(*ImportResourceState_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_MoveResourceState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MoveResourceState_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).MoveResourceState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/MoveResourceState", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).MoveResourceState(ctx, req.(*MoveResourceState_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ReadDataSource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadDataSource_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ReadDataSource(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/ReadDataSource", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ReadDataSource(ctx, req.(*ReadDataSource_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_GetFunctions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetFunctions_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).GetFunctions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/GetFunctions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).GetFunctions(ctx, req.(*GetFunctions_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_CallFunction_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CallFunction_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).CallFunction(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/CallFunction", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).CallFunction(ctx, req.(*CallFunction_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_Stop_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Stop_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).Stop(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/Stop", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).Stop(ctx, req.(*Stop_Request)) + } + return interceptor(ctx, in, info, handler) +} + +var _Provider_serviceDesc = grpc.ServiceDesc{ + ServiceName: "tfplugin5.Provider", + HandlerType: (*ProviderServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetMetadata", + Handler: _Provider_GetMetadata_Handler, + }, + { + MethodName: "GetSchema", + Handler: _Provider_GetSchema_Handler, + }, + { + MethodName: "PrepareProviderConfig", + Handler: _Provider_PrepareProviderConfig_Handler, + }, + { + MethodName: "ValidateResourceTypeConfig", + Handler: _Provider_ValidateResourceTypeConfig_Handler, + }, + { + MethodName: "ValidateDataSourceConfig", + Handler: _Provider_ValidateDataSourceConfig_Handler, + }, + { + MethodName: "UpgradeResourceState", + Handler: _Provider_UpgradeResourceState_Handler, + }, + { + MethodName: "Configure", + Handler: _Provider_Configure_Handler, + }, + { + MethodName: "ReadResource", + Handler: _Provider_ReadResource_Handler, + }, + { + MethodName: "PlanResourceChange", + Handler: _Provider_PlanResourceChange_Handler, + }, + { + MethodName: "ApplyResourceChange", + Handler: _Provider_ApplyResourceChange_Handler, + }, + { + MethodName: "ImportResourceState", + Handler: _Provider_ImportResourceState_Handler, + }, + { + MethodName: "MoveResourceState", + Handler: _Provider_MoveResourceState_Handler, + }, + { + MethodName: "ReadDataSource", + Handler: _Provider_ReadDataSource_Handler, + }, + { + MethodName: "GetFunctions", + Handler: _Provider_GetFunctions_Handler, + }, + { + MethodName: "CallFunction", + Handler: _Provider_CallFunction_Handler, + }, + { + MethodName: "Stop", + Handler: _Provider_Stop_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "tfplugin5.proto", +} + +// ProvisionerClient is the client API for Provisioner service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ProvisionerClient interface { + GetSchema(ctx context.Context, in *GetProvisionerSchema_Request, opts ...grpc.CallOption) (*GetProvisionerSchema_Response, error) + ValidateProvisionerConfig(ctx context.Context, in *ValidateProvisionerConfig_Request, opts ...grpc.CallOption) (*ValidateProvisionerConfig_Response, error) + ProvisionResource(ctx context.Context, in *ProvisionResource_Request, opts ...grpc.CallOption) (Provisioner_ProvisionResourceClient, error) + Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) +} + +type provisionerClient struct { + cc grpc.ClientConnInterface +} + +func NewProvisionerClient(cc grpc.ClientConnInterface) ProvisionerClient { + return &provisionerClient{cc} +} + +func (c *provisionerClient) GetSchema(ctx context.Context, in *GetProvisionerSchema_Request, opts ...grpc.CallOption) (*GetProvisionerSchema_Response, error) { + out := new(GetProvisionerSchema_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provisioner/GetSchema", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *provisionerClient) ValidateProvisionerConfig(ctx context.Context, in *ValidateProvisionerConfig_Request, opts ...grpc.CallOption) (*ValidateProvisionerConfig_Response, error) { + out := new(ValidateProvisionerConfig_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provisioner/ValidateProvisionerConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *provisionerClient) ProvisionResource(ctx context.Context, in *ProvisionResource_Request, opts ...grpc.CallOption) (Provisioner_ProvisionResourceClient, error) { + stream, err := c.cc.NewStream(ctx, &_Provisioner_serviceDesc.Streams[0], "/tfplugin5.Provisioner/ProvisionResource", opts...) + if err != nil { + return nil, err + } + x := &provisionerProvisionResourceClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Provisioner_ProvisionResourceClient interface { + Recv() (*ProvisionResource_Response, error) + grpc.ClientStream +} + +type provisionerProvisionResourceClient struct { + grpc.ClientStream +} + +func (x *provisionerProvisionResourceClient) Recv() (*ProvisionResource_Response, error) { + m := new(ProvisionResource_Response) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *provisionerClient) Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) { + out := new(Stop_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provisioner/Stop", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ProvisionerServer is the server API for Provisioner service. +type ProvisionerServer interface { + GetSchema(context.Context, *GetProvisionerSchema_Request) (*GetProvisionerSchema_Response, error) + ValidateProvisionerConfig(context.Context, *ValidateProvisionerConfig_Request) (*ValidateProvisionerConfig_Response, error) + ProvisionResource(*ProvisionResource_Request, Provisioner_ProvisionResourceServer) error + Stop(context.Context, *Stop_Request) (*Stop_Response, error) +} + +// UnimplementedProvisionerServer can be embedded to have forward compatible implementations. +type UnimplementedProvisionerServer struct { +} + +func (*UnimplementedProvisionerServer) GetSchema(context.Context, *GetProvisionerSchema_Request) (*GetProvisionerSchema_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetSchema not implemented") +} +func (*UnimplementedProvisionerServer) ValidateProvisionerConfig(context.Context, *ValidateProvisionerConfig_Request) (*ValidateProvisionerConfig_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ValidateProvisionerConfig not implemented") +} +func (*UnimplementedProvisionerServer) ProvisionResource(*ProvisionResource_Request, Provisioner_ProvisionResourceServer) error { + return status.Errorf(codes.Unimplemented, "method ProvisionResource not implemented") +} +func (*UnimplementedProvisionerServer) Stop(context.Context, *Stop_Request) (*Stop_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method Stop not implemented") +} + +func RegisterProvisionerServer(s *grpc.Server, srv ProvisionerServer) { + s.RegisterService(&_Provisioner_serviceDesc, srv) +} + +func _Provisioner_GetSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetProvisionerSchema_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProvisionerServer).GetSchema(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provisioner/GetSchema", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProvisionerServer).GetSchema(ctx, req.(*GetProvisionerSchema_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provisioner_ValidateProvisionerConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidateProvisionerConfig_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProvisionerServer).ValidateProvisionerConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provisioner/ValidateProvisionerConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProvisionerServer).ValidateProvisionerConfig(ctx, req.(*ValidateProvisionerConfig_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provisioner_ProvisionResource_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ProvisionResource_Request) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ProvisionerServer).ProvisionResource(m, &provisionerProvisionResourceServer{stream}) +} + +type Provisioner_ProvisionResourceServer interface { + Send(*ProvisionResource_Response) error + grpc.ServerStream +} + +type provisionerProvisionResourceServer struct { + grpc.ServerStream +} + +func (x *provisionerProvisionResourceServer) Send(m *ProvisionResource_Response) error { + return x.ServerStream.SendMsg(m) +} + +func _Provisioner_Stop_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Stop_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProvisionerServer).Stop(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provisioner/Stop", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProvisionerServer).Stop(ctx, req.(*Stop_Request)) + } + return interceptor(ctx, in, info, handler) +} + +var _Provisioner_serviceDesc = grpc.ServiceDesc{ + ServiceName: "tfplugin5.Provisioner", + HandlerType: (*ProvisionerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetSchema", + Handler: _Provisioner_GetSchema_Handler, + }, + { + MethodName: "ValidateProvisionerConfig", + Handler: _Provisioner_ValidateProvisionerConfig_Handler, + }, + { + MethodName: "Stop", + Handler: _Provisioner_Stop_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "ProvisionResource", + Handler: _Provisioner_ProvisionResource_Handler, + ServerStreams: true, + }, + }, + Metadata: "tfplugin5.proto", +} diff --git a/pkg/tfplugin5/tfplugin5.proto b/pkg/tfplugin5/tfplugin5.proto new file mode 120000 index 00000000000..0d938c65a82 --- /dev/null +++ b/pkg/tfplugin5/tfplugin5.proto @@ -0,0 +1 @@ +../../docs/plugin-protocol/tfplugin5.5.proto \ No newline at end of file diff --git a/pkg/tfplugin6/tfplugin6.pb.go b/pkg/tfplugin6/tfplugin6.pb.go new file mode 100644 index 00000000000..6866c1fbc0f --- /dev/null +++ b/pkg/tfplugin6/tfplugin6.pb.go @@ -0,0 +1,6507 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Terraform Plugin RPC protocol version 6.5 +// +// This file defines version 6.5 of the RPC protocol. To implement a plugin +// against this protocol, copy this definition into your own codebase and +// use protoc to generate stubs for your target language. +// +// This file will not be updated. Any minor versions of protocol 6 to follow +// should copy this file and modify the copy while maintaing backwards +// compatibility. Breaking changes, if any are required, will come +// in a subsequent major version with its own separate proto definition. +// +// Note that only the proto files included in a release tag of Terraform are +// official protocol releases. Proto files taken from other commits may include +// incomplete changes or features that did not make it into a final release. +// In all reasonable cases, plugin developers should take the proto file from +// the tag of the most recent release of Terraform, and not from the main +// branch or any other development branch. +// + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v3.15.6 +// source: tfplugin6.proto + +package tfplugin6 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type StringKind int32 + +const ( + StringKind_PLAIN StringKind = 0 + StringKind_MARKDOWN StringKind = 1 +) + +// Enum value maps for StringKind. +var ( + StringKind_name = map[int32]string{ + 0: "PLAIN", + 1: "MARKDOWN", + } + StringKind_value = map[string]int32{ + "PLAIN": 0, + "MARKDOWN": 1, + } +) + +func (x StringKind) Enum() *StringKind { + p := new(StringKind) + *p = x + return p +} + +func (x StringKind) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (StringKind) Descriptor() protoreflect.EnumDescriptor { + return file_tfplugin6_proto_enumTypes[0].Descriptor() +} + +func (StringKind) Type() protoreflect.EnumType { + return &file_tfplugin6_proto_enumTypes[0] +} + +func (x StringKind) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use StringKind.Descriptor instead. +func (StringKind) EnumDescriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{0} +} + +type Diagnostic_Severity int32 + +const ( + Diagnostic_INVALID Diagnostic_Severity = 0 + Diagnostic_ERROR Diagnostic_Severity = 1 + Diagnostic_WARNING Diagnostic_Severity = 2 +) + +// Enum value maps for Diagnostic_Severity. +var ( + Diagnostic_Severity_name = map[int32]string{ + 0: "INVALID", + 1: "ERROR", + 2: "WARNING", + } + Diagnostic_Severity_value = map[string]int32{ + "INVALID": 0, + "ERROR": 1, + "WARNING": 2, + } +) + +func (x Diagnostic_Severity) Enum() *Diagnostic_Severity { + p := new(Diagnostic_Severity) + *p = x + return p +} + +func (x Diagnostic_Severity) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Diagnostic_Severity) Descriptor() protoreflect.EnumDescriptor { + return file_tfplugin6_proto_enumTypes[1].Descriptor() +} + +func (Diagnostic_Severity) Type() protoreflect.EnumType { + return &file_tfplugin6_proto_enumTypes[1] +} + +func (x Diagnostic_Severity) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Diagnostic_Severity.Descriptor instead. +func (Diagnostic_Severity) EnumDescriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{1, 0} +} + +type Schema_NestedBlock_NestingMode int32 + +const ( + Schema_NestedBlock_INVALID Schema_NestedBlock_NestingMode = 0 + Schema_NestedBlock_SINGLE Schema_NestedBlock_NestingMode = 1 + Schema_NestedBlock_LIST Schema_NestedBlock_NestingMode = 2 + Schema_NestedBlock_SET Schema_NestedBlock_NestingMode = 3 + Schema_NestedBlock_MAP Schema_NestedBlock_NestingMode = 4 + Schema_NestedBlock_GROUP Schema_NestedBlock_NestingMode = 5 +) + +// Enum value maps for Schema_NestedBlock_NestingMode. +var ( + Schema_NestedBlock_NestingMode_name = map[int32]string{ + 0: "INVALID", + 1: "SINGLE", + 2: "LIST", + 3: "SET", + 4: "MAP", + 5: "GROUP", + } + Schema_NestedBlock_NestingMode_value = map[string]int32{ + "INVALID": 0, + "SINGLE": 1, + "LIST": 2, + "SET": 3, + "MAP": 4, + "GROUP": 5, + } +) + +func (x Schema_NestedBlock_NestingMode) Enum() *Schema_NestedBlock_NestingMode { + p := new(Schema_NestedBlock_NestingMode) + *p = x + return p +} + +func (x Schema_NestedBlock_NestingMode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Schema_NestedBlock_NestingMode) Descriptor() protoreflect.EnumDescriptor { + return file_tfplugin6_proto_enumTypes[2].Descriptor() +} + +func (Schema_NestedBlock_NestingMode) Type() protoreflect.EnumType { + return &file_tfplugin6_proto_enumTypes[2] +} + +func (x Schema_NestedBlock_NestingMode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Schema_NestedBlock_NestingMode.Descriptor instead. +func (Schema_NestedBlock_NestingMode) EnumDescriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{6, 2, 0} +} + +type Schema_Object_NestingMode int32 + +const ( + Schema_Object_INVALID Schema_Object_NestingMode = 0 + Schema_Object_SINGLE Schema_Object_NestingMode = 1 + Schema_Object_LIST Schema_Object_NestingMode = 2 + Schema_Object_SET Schema_Object_NestingMode = 3 + Schema_Object_MAP Schema_Object_NestingMode = 4 +) + +// Enum value maps for Schema_Object_NestingMode. +var ( + Schema_Object_NestingMode_name = map[int32]string{ + 0: "INVALID", + 1: "SINGLE", + 2: "LIST", + 3: "SET", + 4: "MAP", + } + Schema_Object_NestingMode_value = map[string]int32{ + "INVALID": 0, + "SINGLE": 1, + "LIST": 2, + "SET": 3, + "MAP": 4, + } +) + +func (x Schema_Object_NestingMode) Enum() *Schema_Object_NestingMode { + p := new(Schema_Object_NestingMode) + *p = x + return p +} + +func (x Schema_Object_NestingMode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Schema_Object_NestingMode) Descriptor() protoreflect.EnumDescriptor { + return file_tfplugin6_proto_enumTypes[3].Descriptor() +} + +func (Schema_Object_NestingMode) Type() protoreflect.EnumType { + return &file_tfplugin6_proto_enumTypes[3] +} + +func (x Schema_Object_NestingMode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Schema_Object_NestingMode.Descriptor instead. +func (Schema_Object_NestingMode) EnumDescriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{6, 3, 0} +} + +// DynamicValue is an opaque encoding of terraform data, with the field name +// indicating the encoding scheme used. +type DynamicValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Msgpack []byte `protobuf:"bytes,1,opt,name=msgpack,proto3" json:"msgpack,omitempty"` + Json []byte `protobuf:"bytes,2,opt,name=json,proto3" json:"json,omitempty"` +} + +func (x *DynamicValue) Reset() { + *x = DynamicValue{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DynamicValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DynamicValue) ProtoMessage() {} + +func (x *DynamicValue) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DynamicValue.ProtoReflect.Descriptor instead. +func (*DynamicValue) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{0} +} + +func (x *DynamicValue) GetMsgpack() []byte { + if x != nil { + return x.Msgpack + } + return nil +} + +func (x *DynamicValue) GetJson() []byte { + if x != nil { + return x.Json + } + return nil +} + +type Diagnostic struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Severity Diagnostic_Severity `protobuf:"varint,1,opt,name=severity,proto3,enum=tfplugin6.Diagnostic_Severity" json:"severity,omitempty"` + Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"` + Detail string `protobuf:"bytes,3,opt,name=detail,proto3" json:"detail,omitempty"` + Attribute *AttributePath `protobuf:"bytes,4,opt,name=attribute,proto3" json:"attribute,omitempty"` +} + +func (x *Diagnostic) Reset() { + *x = Diagnostic{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Diagnostic) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Diagnostic) ProtoMessage() {} + +func (x *Diagnostic) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Diagnostic.ProtoReflect.Descriptor instead. +func (*Diagnostic) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{1} +} + +func (x *Diagnostic) GetSeverity() Diagnostic_Severity { + if x != nil { + return x.Severity + } + return Diagnostic_INVALID +} + +func (x *Diagnostic) GetSummary() string { + if x != nil { + return x.Summary + } + return "" +} + +func (x *Diagnostic) GetDetail() string { + if x != nil { + return x.Detail + } + return "" +} + +func (x *Diagnostic) GetAttribute() *AttributePath { + if x != nil { + return x.Attribute + } + return nil +} + +type FunctionError struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Text string `protobuf:"bytes,1,opt,name=text,proto3" json:"text,omitempty"` + // The optional function_argument records the index position of the + // argument which caused the error. + FunctionArgument *int64 `protobuf:"varint,2,opt,name=function_argument,json=functionArgument,proto3,oneof" json:"function_argument,omitempty"` +} + +func (x *FunctionError) Reset() { + *x = FunctionError{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FunctionError) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FunctionError) ProtoMessage() {} + +func (x *FunctionError) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FunctionError.ProtoReflect.Descriptor instead. +func (*FunctionError) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{2} +} + +func (x *FunctionError) GetText() string { + if x != nil { + return x.Text + } + return "" +} + +func (x *FunctionError) GetFunctionArgument() int64 { + if x != nil && x.FunctionArgument != nil { + return *x.FunctionArgument + } + return 0 +} + +type AttributePath struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Steps []*AttributePath_Step `protobuf:"bytes,1,rep,name=steps,proto3" json:"steps,omitempty"` +} + +func (x *AttributePath) Reset() { + *x = AttributePath{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AttributePath) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AttributePath) ProtoMessage() {} + +func (x *AttributePath) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AttributePath.ProtoReflect.Descriptor instead. +func (*AttributePath) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{3} +} + +func (x *AttributePath) GetSteps() []*AttributePath_Step { + if x != nil { + return x.Steps + } + return nil +} + +type StopProvider struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *StopProvider) Reset() { + *x = StopProvider{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StopProvider) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StopProvider) ProtoMessage() {} + +func (x *StopProvider) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StopProvider.ProtoReflect.Descriptor instead. +func (*StopProvider) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{4} +} + +// RawState holds the stored state for a resource to be upgraded by the +// provider. It can be in one of two formats, the current json encoded format +// in bytes, or the legacy flatmap format as a map of strings. +type RawState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Json []byte `protobuf:"bytes,1,opt,name=json,proto3" json:"json,omitempty"` + Flatmap map[string]string `protobuf:"bytes,2,rep,name=flatmap,proto3" json:"flatmap,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *RawState) Reset() { + *x = RawState{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RawState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RawState) ProtoMessage() {} + +func (x *RawState) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RawState.ProtoReflect.Descriptor instead. +func (*RawState) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{5} +} + +func (x *RawState) GetJson() []byte { + if x != nil { + return x.Json + } + return nil +} + +func (x *RawState) GetFlatmap() map[string]string { + if x != nil { + return x.Flatmap + } + return nil +} + +// Schema is the configuration schema for a Resource or Provider. +type Schema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The version of the schema. + // Schemas are versioned, so that providers can upgrade a saved resource + // state when the schema is changed. + Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + // Block is the top level configuration block for this schema. + Block *Schema_Block `protobuf:"bytes,2,opt,name=block,proto3" json:"block,omitempty"` +} + +func (x *Schema) Reset() { + *x = Schema{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Schema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Schema) ProtoMessage() {} + +func (x *Schema) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Schema.ProtoReflect.Descriptor instead. +func (*Schema) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{6} +} + +func (x *Schema) GetVersion() int64 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *Schema) GetBlock() *Schema_Block { + if x != nil { + return x.Block + } + return nil +} + +type Function struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // parameters is the ordered list of positional function parameters. + Parameters []*Function_Parameter `protobuf:"bytes,1,rep,name=parameters,proto3" json:"parameters,omitempty"` + // variadic_parameter is an optional final parameter which accepts + // zero or more argument values, in which Terraform will send an + // ordered list of the parameter type. + VariadicParameter *Function_Parameter `protobuf:"bytes,2,opt,name=variadic_parameter,json=variadicParameter,proto3" json:"variadic_parameter,omitempty"` + // return is the function result. + Return *Function_Return `protobuf:"bytes,3,opt,name=return,proto3" json:"return,omitempty"` + // summary is the human-readable shortened documentation for the function. + Summary string `protobuf:"bytes,4,opt,name=summary,proto3" json:"summary,omitempty"` + // description is human-readable documentation for the function. + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + // description_kind is the formatting of the description. + DescriptionKind StringKind `protobuf:"varint,6,opt,name=description_kind,json=descriptionKind,proto3,enum=tfplugin6.StringKind" json:"description_kind,omitempty"` + // deprecation_message is human-readable documentation if the + // function is deprecated. + DeprecationMessage string `protobuf:"bytes,7,opt,name=deprecation_message,json=deprecationMessage,proto3" json:"deprecation_message,omitempty"` +} + +func (x *Function) Reset() { + *x = Function{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Function) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Function) ProtoMessage() {} + +func (x *Function) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Function.ProtoReflect.Descriptor instead. +func (*Function) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{7} +} + +func (x *Function) GetParameters() []*Function_Parameter { + if x != nil { + return x.Parameters + } + return nil +} + +func (x *Function) GetVariadicParameter() *Function_Parameter { + if x != nil { + return x.VariadicParameter + } + return nil +} + +func (x *Function) GetReturn() *Function_Return { + if x != nil { + return x.Return + } + return nil +} + +func (x *Function) GetSummary() string { + if x != nil { + return x.Summary + } + return "" +} + +func (x *Function) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Function) GetDescriptionKind() StringKind { + if x != nil { + return x.DescriptionKind + } + return StringKind_PLAIN +} + +func (x *Function) GetDeprecationMessage() string { + if x != nil { + return x.DeprecationMessage + } + return "" +} + +// ServerCapabilities allows providers to communicate extra information +// regarding supported protocol features. This is used to indicate +// availability of certain forward-compatible changes which may be optional +// in a major protocol version, but cannot be tested for directly. +type ServerCapabilities struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The plan_destroy capability signals that a provider expects a call + // to PlanResourceChange when a resource is going to be destroyed. + PlanDestroy bool `protobuf:"varint,1,opt,name=plan_destroy,json=planDestroy,proto3" json:"plan_destroy,omitempty"` + // The get_provider_schema_optional capability indicates that this + // provider does not require calling GetProviderSchema to operate + // normally, and the caller can used a cached copy of the provider's + // schema. + GetProviderSchemaOptional bool `protobuf:"varint,2,opt,name=get_provider_schema_optional,json=getProviderSchemaOptional,proto3" json:"get_provider_schema_optional,omitempty"` + // The move_resource_state capability signals that a provider supports the + // MoveResourceState RPC. + MoveResourceState bool `protobuf:"varint,3,opt,name=move_resource_state,json=moveResourceState,proto3" json:"move_resource_state,omitempty"` +} + +func (x *ServerCapabilities) Reset() { + *x = ServerCapabilities{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerCapabilities) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerCapabilities) ProtoMessage() {} + +func (x *ServerCapabilities) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerCapabilities.ProtoReflect.Descriptor instead. +func (*ServerCapabilities) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{8} +} + +func (x *ServerCapabilities) GetPlanDestroy() bool { + if x != nil { + return x.PlanDestroy + } + return false +} + +func (x *ServerCapabilities) GetGetProviderSchemaOptional() bool { + if x != nil { + return x.GetProviderSchemaOptional + } + return false +} + +func (x *ServerCapabilities) GetMoveResourceState() bool { + if x != nil { + return x.MoveResourceState + } + return false +} + +type GetMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetMetadata) Reset() { + *x = GetMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMetadata) ProtoMessage() {} + +func (x *GetMetadata) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMetadata.ProtoReflect.Descriptor instead. +func (*GetMetadata) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{9} +} + +type GetProviderSchema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetProviderSchema) Reset() { + *x = GetProviderSchema{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetProviderSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetProviderSchema) ProtoMessage() {} + +func (x *GetProviderSchema) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetProviderSchema.ProtoReflect.Descriptor instead. +func (*GetProviderSchema) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{10} +} + +type ValidateProviderConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ValidateProviderConfig) Reset() { + *x = ValidateProviderConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateProviderConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateProviderConfig) ProtoMessage() {} + +func (x *ValidateProviderConfig) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateProviderConfig.ProtoReflect.Descriptor instead. +func (*ValidateProviderConfig) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{11} +} + +type UpgradeResourceState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *UpgradeResourceState) Reset() { + *x = UpgradeResourceState{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpgradeResourceState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpgradeResourceState) ProtoMessage() {} + +func (x *UpgradeResourceState) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpgradeResourceState.ProtoReflect.Descriptor instead. +func (*UpgradeResourceState) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{12} +} + +type ValidateResourceConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ValidateResourceConfig) Reset() { + *x = ValidateResourceConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateResourceConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateResourceConfig) ProtoMessage() {} + +func (x *ValidateResourceConfig) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateResourceConfig.ProtoReflect.Descriptor instead. +func (*ValidateResourceConfig) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{13} +} + +type ValidateDataResourceConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ValidateDataResourceConfig) Reset() { + *x = ValidateDataResourceConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateDataResourceConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateDataResourceConfig) ProtoMessage() {} + +func (x *ValidateDataResourceConfig) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateDataResourceConfig.ProtoReflect.Descriptor instead. +func (*ValidateDataResourceConfig) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{14} +} + +type ConfigureProvider struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ConfigureProvider) Reset() { + *x = ConfigureProvider{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConfigureProvider) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConfigureProvider) ProtoMessage() {} + +func (x *ConfigureProvider) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConfigureProvider.ProtoReflect.Descriptor instead. +func (*ConfigureProvider) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{15} +} + +type ReadResource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ReadResource) Reset() { + *x = ReadResource{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadResource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadResource) ProtoMessage() {} + +func (x *ReadResource) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadResource.ProtoReflect.Descriptor instead. +func (*ReadResource) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{16} +} + +type PlanResourceChange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *PlanResourceChange) Reset() { + *x = PlanResourceChange{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PlanResourceChange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PlanResourceChange) ProtoMessage() {} + +func (x *PlanResourceChange) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PlanResourceChange.ProtoReflect.Descriptor instead. +func (*PlanResourceChange) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{17} +} + +type ApplyResourceChange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ApplyResourceChange) Reset() { + *x = ApplyResourceChange{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ApplyResourceChange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApplyResourceChange) ProtoMessage() {} + +func (x *ApplyResourceChange) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApplyResourceChange.ProtoReflect.Descriptor instead. +func (*ApplyResourceChange) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{18} +} + +type ImportResourceState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ImportResourceState) Reset() { + *x = ImportResourceState{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ImportResourceState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ImportResourceState) ProtoMessage() {} + +func (x *ImportResourceState) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ImportResourceState.ProtoReflect.Descriptor instead. +func (*ImportResourceState) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{19} +} + +type MoveResourceState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *MoveResourceState) Reset() { + *x = MoveResourceState{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MoveResourceState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MoveResourceState) ProtoMessage() {} + +func (x *MoveResourceState) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MoveResourceState.ProtoReflect.Descriptor instead. +func (*MoveResourceState) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{20} +} + +type ReadDataSource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ReadDataSource) Reset() { + *x = ReadDataSource{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadDataSource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadDataSource) ProtoMessage() {} + +func (x *ReadDataSource) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadDataSource.ProtoReflect.Descriptor instead. +func (*ReadDataSource) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{21} +} + +type GetFunctions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetFunctions) Reset() { + *x = GetFunctions{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetFunctions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetFunctions) ProtoMessage() {} + +func (x *GetFunctions) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetFunctions.ProtoReflect.Descriptor instead. +func (*GetFunctions) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{22} +} + +type CallFunction struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *CallFunction) Reset() { + *x = CallFunction{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CallFunction) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CallFunction) ProtoMessage() {} + +func (x *CallFunction) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CallFunction.ProtoReflect.Descriptor instead. +func (*CallFunction) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{23} +} + +type AttributePath_Step struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Selector: + // + // *AttributePath_Step_AttributeName + // *AttributePath_Step_ElementKeyString + // *AttributePath_Step_ElementKeyInt + Selector isAttributePath_Step_Selector `protobuf_oneof:"selector"` +} + +func (x *AttributePath_Step) Reset() { + *x = AttributePath_Step{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AttributePath_Step) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AttributePath_Step) ProtoMessage() {} + +func (x *AttributePath_Step) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AttributePath_Step.ProtoReflect.Descriptor instead. +func (*AttributePath_Step) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{3, 0} +} + +func (m *AttributePath_Step) GetSelector() isAttributePath_Step_Selector { + if m != nil { + return m.Selector + } + return nil +} + +func (x *AttributePath_Step) GetAttributeName() string { + if x, ok := x.GetSelector().(*AttributePath_Step_AttributeName); ok { + return x.AttributeName + } + return "" +} + +func (x *AttributePath_Step) GetElementKeyString() string { + if x, ok := x.GetSelector().(*AttributePath_Step_ElementKeyString); ok { + return x.ElementKeyString + } + return "" +} + +func (x *AttributePath_Step) GetElementKeyInt() int64 { + if x, ok := x.GetSelector().(*AttributePath_Step_ElementKeyInt); ok { + return x.ElementKeyInt + } + return 0 +} + +type isAttributePath_Step_Selector interface { + isAttributePath_Step_Selector() +} + +type AttributePath_Step_AttributeName struct { + // Set "attribute_name" to represent looking up an attribute + // in the current object value. + AttributeName string `protobuf:"bytes,1,opt,name=attribute_name,json=attributeName,proto3,oneof"` +} + +type AttributePath_Step_ElementKeyString struct { + // Set "element_key_*" to represent looking up an element in + // an indexable collection type. + ElementKeyString string `protobuf:"bytes,2,opt,name=element_key_string,json=elementKeyString,proto3,oneof"` +} + +type AttributePath_Step_ElementKeyInt struct { + ElementKeyInt int64 `protobuf:"varint,3,opt,name=element_key_int,json=elementKeyInt,proto3,oneof"` +} + +func (*AttributePath_Step_AttributeName) isAttributePath_Step_Selector() {} + +func (*AttributePath_Step_ElementKeyString) isAttributePath_Step_Selector() {} + +func (*AttributePath_Step_ElementKeyInt) isAttributePath_Step_Selector() {} + +type StopProvider_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *StopProvider_Request) Reset() { + *x = StopProvider_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StopProvider_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StopProvider_Request) ProtoMessage() {} + +func (x *StopProvider_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StopProvider_Request.ProtoReflect.Descriptor instead. +func (*StopProvider_Request) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{4, 0} +} + +type StopProvider_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Error string `protobuf:"bytes,1,opt,name=Error,proto3" json:"Error,omitempty"` +} + +func (x *StopProvider_Response) Reset() { + *x = StopProvider_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StopProvider_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StopProvider_Response) ProtoMessage() {} + +func (x *StopProvider_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StopProvider_Response.ProtoReflect.Descriptor instead. +func (*StopProvider_Response) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{4, 1} +} + +func (x *StopProvider_Response) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +type Schema_Block struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + Attributes []*Schema_Attribute `protobuf:"bytes,2,rep,name=attributes,proto3" json:"attributes,omitempty"` + BlockTypes []*Schema_NestedBlock `protobuf:"bytes,3,rep,name=block_types,json=blockTypes,proto3" json:"block_types,omitempty"` + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` + DescriptionKind StringKind `protobuf:"varint,5,opt,name=description_kind,json=descriptionKind,proto3,enum=tfplugin6.StringKind" json:"description_kind,omitempty"` + Deprecated bool `protobuf:"varint,6,opt,name=deprecated,proto3" json:"deprecated,omitempty"` +} + +func (x *Schema_Block) Reset() { + *x = Schema_Block{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Schema_Block) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Schema_Block) ProtoMessage() {} + +func (x *Schema_Block) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Schema_Block.ProtoReflect.Descriptor instead. +func (*Schema_Block) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{6, 0} +} + +func (x *Schema_Block) GetVersion() int64 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *Schema_Block) GetAttributes() []*Schema_Attribute { + if x != nil { + return x.Attributes + } + return nil +} + +func (x *Schema_Block) GetBlockTypes() []*Schema_NestedBlock { + if x != nil { + return x.BlockTypes + } + return nil +} + +func (x *Schema_Block) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Schema_Block) GetDescriptionKind() StringKind { + if x != nil { + return x.DescriptionKind + } + return StringKind_PLAIN +} + +func (x *Schema_Block) GetDeprecated() bool { + if x != nil { + return x.Deprecated + } + return false +} + +type Schema_Attribute struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Type []byte `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + NestedType *Schema_Object `protobuf:"bytes,10,opt,name=nested_type,json=nestedType,proto3" json:"nested_type,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + Required bool `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"` + Optional bool `protobuf:"varint,5,opt,name=optional,proto3" json:"optional,omitempty"` + Computed bool `protobuf:"varint,6,opt,name=computed,proto3" json:"computed,omitempty"` + Sensitive bool `protobuf:"varint,7,opt,name=sensitive,proto3" json:"sensitive,omitempty"` + DescriptionKind StringKind `protobuf:"varint,8,opt,name=description_kind,json=descriptionKind,proto3,enum=tfplugin6.StringKind" json:"description_kind,omitempty"` + Deprecated bool `protobuf:"varint,9,opt,name=deprecated,proto3" json:"deprecated,omitempty"` +} + +func (x *Schema_Attribute) Reset() { + *x = Schema_Attribute{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Schema_Attribute) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Schema_Attribute) ProtoMessage() {} + +func (x *Schema_Attribute) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Schema_Attribute.ProtoReflect.Descriptor instead. +func (*Schema_Attribute) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{6, 1} +} + +func (x *Schema_Attribute) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Schema_Attribute) GetType() []byte { + if x != nil { + return x.Type + } + return nil +} + +func (x *Schema_Attribute) GetNestedType() *Schema_Object { + if x != nil { + return x.NestedType + } + return nil +} + +func (x *Schema_Attribute) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Schema_Attribute) GetRequired() bool { + if x != nil { + return x.Required + } + return false +} + +func (x *Schema_Attribute) GetOptional() bool { + if x != nil { + return x.Optional + } + return false +} + +func (x *Schema_Attribute) GetComputed() bool { + if x != nil { + return x.Computed + } + return false +} + +func (x *Schema_Attribute) GetSensitive() bool { + if x != nil { + return x.Sensitive + } + return false +} + +func (x *Schema_Attribute) GetDescriptionKind() StringKind { + if x != nil { + return x.DescriptionKind + } + return StringKind_PLAIN +} + +func (x *Schema_Attribute) GetDeprecated() bool { + if x != nil { + return x.Deprecated + } + return false +} + +type Schema_NestedBlock struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Block *Schema_Block `protobuf:"bytes,2,opt,name=block,proto3" json:"block,omitempty"` + Nesting Schema_NestedBlock_NestingMode `protobuf:"varint,3,opt,name=nesting,proto3,enum=tfplugin6.Schema_NestedBlock_NestingMode" json:"nesting,omitempty"` + MinItems int64 `protobuf:"varint,4,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + MaxItems int64 `protobuf:"varint,5,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` +} + +func (x *Schema_NestedBlock) Reset() { + *x = Schema_NestedBlock{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Schema_NestedBlock) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Schema_NestedBlock) ProtoMessage() {} + +func (x *Schema_NestedBlock) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Schema_NestedBlock.ProtoReflect.Descriptor instead. +func (*Schema_NestedBlock) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{6, 2} +} + +func (x *Schema_NestedBlock) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *Schema_NestedBlock) GetBlock() *Schema_Block { + if x != nil { + return x.Block + } + return nil +} + +func (x *Schema_NestedBlock) GetNesting() Schema_NestedBlock_NestingMode { + if x != nil { + return x.Nesting + } + return Schema_NestedBlock_INVALID +} + +func (x *Schema_NestedBlock) GetMinItems() int64 { + if x != nil { + return x.MinItems + } + return 0 +} + +func (x *Schema_NestedBlock) GetMaxItems() int64 { + if x != nil { + return x.MaxItems + } + return 0 +} + +type Schema_Object struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Attributes []*Schema_Attribute `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty"` + Nesting Schema_Object_NestingMode `protobuf:"varint,3,opt,name=nesting,proto3,enum=tfplugin6.Schema_Object_NestingMode" json:"nesting,omitempty"` + // MinItems and MaxItems were never used in the protocol, and have no + // effect on validation. + // + // Deprecated: Marked as deprecated in tfplugin6.proto. + MinItems int64 `protobuf:"varint,4,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + // Deprecated: Marked as deprecated in tfplugin6.proto. + MaxItems int64 `protobuf:"varint,5,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` +} + +func (x *Schema_Object) Reset() { + *x = Schema_Object{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Schema_Object) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Schema_Object) ProtoMessage() {} + +func (x *Schema_Object) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Schema_Object.ProtoReflect.Descriptor instead. +func (*Schema_Object) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{6, 3} +} + +func (x *Schema_Object) GetAttributes() []*Schema_Attribute { + if x != nil { + return x.Attributes + } + return nil +} + +func (x *Schema_Object) GetNesting() Schema_Object_NestingMode { + if x != nil { + return x.Nesting + } + return Schema_Object_INVALID +} + +// Deprecated: Marked as deprecated in tfplugin6.proto. +func (x *Schema_Object) GetMinItems() int64 { + if x != nil { + return x.MinItems + } + return 0 +} + +// Deprecated: Marked as deprecated in tfplugin6.proto. +func (x *Schema_Object) GetMaxItems() int64 { + if x != nil { + return x.MaxItems + } + return 0 +} + +type Function_Parameter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // name is the human-readable display name for the parameter. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // type is the type constraint for the parameter. + Type []byte `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + // allow_null_value when enabled denotes that a null argument value can + // be passed to the provider. When disabled, Terraform returns an error + // if the argument value is null. + AllowNullValue bool `protobuf:"varint,3,opt,name=allow_null_value,json=allowNullValue,proto3" json:"allow_null_value,omitempty"` + // allow_unknown_values when enabled denotes that only wholly known + // argument values will be passed to the provider. When disabled, + // Terraform skips the function call entirely and assumes an unknown + // value result from the function. + AllowUnknownValues bool `protobuf:"varint,4,opt,name=allow_unknown_values,json=allowUnknownValues,proto3" json:"allow_unknown_values,omitempty"` + // description is human-readable documentation for the parameter. + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + // description_kind is the formatting of the description. + DescriptionKind StringKind `protobuf:"varint,6,opt,name=description_kind,json=descriptionKind,proto3,enum=tfplugin6.StringKind" json:"description_kind,omitempty"` +} + +func (x *Function_Parameter) Reset() { + *x = Function_Parameter{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Function_Parameter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Function_Parameter) ProtoMessage() {} + +func (x *Function_Parameter) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Function_Parameter.ProtoReflect.Descriptor instead. +func (*Function_Parameter) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{7, 0} +} + +func (x *Function_Parameter) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Function_Parameter) GetType() []byte { + if x != nil { + return x.Type + } + return nil +} + +func (x *Function_Parameter) GetAllowNullValue() bool { + if x != nil { + return x.AllowNullValue + } + return false +} + +func (x *Function_Parameter) GetAllowUnknownValues() bool { + if x != nil { + return x.AllowUnknownValues + } + return false +} + +func (x *Function_Parameter) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Function_Parameter) GetDescriptionKind() StringKind { + if x != nil { + return x.DescriptionKind + } + return StringKind_PLAIN +} + +type Function_Return struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // type is the type constraint for the function result. + Type []byte `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` +} + +func (x *Function_Return) Reset() { + *x = Function_Return{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Function_Return) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Function_Return) ProtoMessage() {} + +func (x *Function_Return) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Function_Return.ProtoReflect.Descriptor instead. +func (*Function_Return) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{7, 1} +} + +func (x *Function_Return) GetType() []byte { + if x != nil { + return x.Type + } + return nil +} + +type GetMetadata_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetMetadata_Request) Reset() { + *x = GetMetadata_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMetadata_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMetadata_Request) ProtoMessage() {} + +func (x *GetMetadata_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMetadata_Request.ProtoReflect.Descriptor instead. +func (*GetMetadata_Request) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{9, 0} +} + +type GetMetadata_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ServerCapabilities *ServerCapabilities `protobuf:"bytes,1,opt,name=server_capabilities,json=serverCapabilities,proto3" json:"server_capabilities,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + DataSources []*GetMetadata_DataSourceMetadata `protobuf:"bytes,3,rep,name=data_sources,json=dataSources,proto3" json:"data_sources,omitempty"` + Resources []*GetMetadata_ResourceMetadata `protobuf:"bytes,4,rep,name=resources,proto3" json:"resources,omitempty"` + // functions returns metadata for any functions. + Functions []*GetMetadata_FunctionMetadata `protobuf:"bytes,5,rep,name=functions,proto3" json:"functions,omitempty"` +} + +func (x *GetMetadata_Response) Reset() { + *x = GetMetadata_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMetadata_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMetadata_Response) ProtoMessage() {} + +func (x *GetMetadata_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[35] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMetadata_Response.ProtoReflect.Descriptor instead. +func (*GetMetadata_Response) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{9, 1} +} + +func (x *GetMetadata_Response) GetServerCapabilities() *ServerCapabilities { + if x != nil { + return x.ServerCapabilities + } + return nil +} + +func (x *GetMetadata_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +func (x *GetMetadata_Response) GetDataSources() []*GetMetadata_DataSourceMetadata { + if x != nil { + return x.DataSources + } + return nil +} + +func (x *GetMetadata_Response) GetResources() []*GetMetadata_ResourceMetadata { + if x != nil { + return x.Resources + } + return nil +} + +func (x *GetMetadata_Response) GetFunctions() []*GetMetadata_FunctionMetadata { + if x != nil { + return x.Functions + } + return nil +} + +type GetMetadata_FunctionMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // name is the function name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetMetadata_FunctionMetadata) Reset() { + *x = GetMetadata_FunctionMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMetadata_FunctionMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMetadata_FunctionMetadata) ProtoMessage() {} + +func (x *GetMetadata_FunctionMetadata) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[36] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMetadata_FunctionMetadata.ProtoReflect.Descriptor instead. +func (*GetMetadata_FunctionMetadata) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{9, 2} +} + +func (x *GetMetadata_FunctionMetadata) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +type GetMetadata_DataSourceMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` +} + +func (x *GetMetadata_DataSourceMetadata) Reset() { + *x = GetMetadata_DataSourceMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMetadata_DataSourceMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMetadata_DataSourceMetadata) ProtoMessage() {} + +func (x *GetMetadata_DataSourceMetadata) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[37] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMetadata_DataSourceMetadata.ProtoReflect.Descriptor instead. +func (*GetMetadata_DataSourceMetadata) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{9, 3} +} + +func (x *GetMetadata_DataSourceMetadata) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +type GetMetadata_ResourceMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` +} + +func (x *GetMetadata_ResourceMetadata) Reset() { + *x = GetMetadata_ResourceMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMetadata_ResourceMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMetadata_ResourceMetadata) ProtoMessage() {} + +func (x *GetMetadata_ResourceMetadata) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[38] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMetadata_ResourceMetadata.ProtoReflect.Descriptor instead. +func (*GetMetadata_ResourceMetadata) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{9, 4} +} + +func (x *GetMetadata_ResourceMetadata) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +type GetProviderSchema_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetProviderSchema_Request) Reset() { + *x = GetProviderSchema_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetProviderSchema_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetProviderSchema_Request) ProtoMessage() {} + +func (x *GetProviderSchema_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[39] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetProviderSchema_Request.ProtoReflect.Descriptor instead. +func (*GetProviderSchema_Request) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{10, 0} +} + +type GetProviderSchema_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Provider *Schema `protobuf:"bytes,1,opt,name=provider,proto3" json:"provider,omitempty"` + ResourceSchemas map[string]*Schema `protobuf:"bytes,2,rep,name=resource_schemas,json=resourceSchemas,proto3" json:"resource_schemas,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + DataSourceSchemas map[string]*Schema `protobuf:"bytes,3,rep,name=data_source_schemas,json=dataSourceSchemas,proto3" json:"data_source_schemas,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Diagnostics []*Diagnostic `protobuf:"bytes,4,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + ProviderMeta *Schema `protobuf:"bytes,5,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` + ServerCapabilities *ServerCapabilities `protobuf:"bytes,6,opt,name=server_capabilities,json=serverCapabilities,proto3" json:"server_capabilities,omitempty"` + // functions is a mapping of function names to definitions. + Functions map[string]*Function `protobuf:"bytes,7,rep,name=functions,proto3" json:"functions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *GetProviderSchema_Response) Reset() { + *x = GetProviderSchema_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetProviderSchema_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetProviderSchema_Response) ProtoMessage() {} + +func (x *GetProviderSchema_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[40] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetProviderSchema_Response.ProtoReflect.Descriptor instead. +func (*GetProviderSchema_Response) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{10, 1} +} + +func (x *GetProviderSchema_Response) GetProvider() *Schema { + if x != nil { + return x.Provider + } + return nil +} + +func (x *GetProviderSchema_Response) GetResourceSchemas() map[string]*Schema { + if x != nil { + return x.ResourceSchemas + } + return nil +} + +func (x *GetProviderSchema_Response) GetDataSourceSchemas() map[string]*Schema { + if x != nil { + return x.DataSourceSchemas + } + return nil +} + +func (x *GetProviderSchema_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +func (x *GetProviderSchema_Response) GetProviderMeta() *Schema { + if x != nil { + return x.ProviderMeta + } + return nil +} + +func (x *GetProviderSchema_Response) GetServerCapabilities() *ServerCapabilities { + if x != nil { + return x.ServerCapabilities + } + return nil +} + +func (x *GetProviderSchema_Response) GetFunctions() map[string]*Function { + if x != nil { + return x.Functions + } + return nil +} + +type ValidateProviderConfig_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Config *DynamicValue `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` +} + +func (x *ValidateProviderConfig_Request) Reset() { + *x = ValidateProviderConfig_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateProviderConfig_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateProviderConfig_Request) ProtoMessage() {} + +func (x *ValidateProviderConfig_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[44] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateProviderConfig_Request.ProtoReflect.Descriptor instead. +func (*ValidateProviderConfig_Request) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{11, 0} +} + +func (x *ValidateProviderConfig_Request) GetConfig() *DynamicValue { + if x != nil { + return x.Config + } + return nil +} + +type ValidateProviderConfig_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *ValidateProviderConfig_Response) Reset() { + *x = ValidateProviderConfig_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateProviderConfig_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateProviderConfig_Response) ProtoMessage() {} + +func (x *ValidateProviderConfig_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[45] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateProviderConfig_Response.ProtoReflect.Descriptor instead. +func (*ValidateProviderConfig_Response) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{11, 1} +} + +func (x *ValidateProviderConfig_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +// Request is the message that is sent to the provider during the +// UpgradeResourceState RPC. +// +// This message intentionally does not include configuration data as any +// configuration-based or configuration-conditional changes should occur +// during the PlanResourceChange RPC. Additionally, the configuration is +// not guaranteed to exist (in the case of resource destruction), be wholly +// known, nor match the given prior state, which could lead to unexpected +// provider behaviors for practitioners. +type UpgradeResourceState_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + // version is the schema_version number recorded in the state file + Version int64 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` + // raw_state is the raw states as stored for the resource. Core does + // not have access to the schema of prior_version, so it's the + // provider's responsibility to interpret this value using the + // appropriate older schema. The raw_state will be the json encoded + // state, or a legacy flat-mapped format. + RawState *RawState `protobuf:"bytes,3,opt,name=raw_state,json=rawState,proto3" json:"raw_state,omitempty"` +} + +func (x *UpgradeResourceState_Request) Reset() { + *x = UpgradeResourceState_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpgradeResourceState_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpgradeResourceState_Request) ProtoMessage() {} + +func (x *UpgradeResourceState_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[46] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpgradeResourceState_Request.ProtoReflect.Descriptor instead. +func (*UpgradeResourceState_Request) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{12, 0} +} + +func (x *UpgradeResourceState_Request) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *UpgradeResourceState_Request) GetVersion() int64 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *UpgradeResourceState_Request) GetRawState() *RawState { + if x != nil { + return x.RawState + } + return nil +} + +type UpgradeResourceState_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // new_state is a msgpack-encoded data structure that, when interpreted with + // the _current_ schema for this resource type, is functionally equivalent to + // that which was given in prior_state_raw. + UpgradedState *DynamicValue `protobuf:"bytes,1,opt,name=upgraded_state,json=upgradedState,proto3" json:"upgraded_state,omitempty"` + // diagnostics describes any errors encountered during migration that could not + // be safely resolved, and warnings about any possibly-risky assumptions made + // in the upgrade process. + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *UpgradeResourceState_Response) Reset() { + *x = UpgradeResourceState_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpgradeResourceState_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpgradeResourceState_Response) ProtoMessage() {} + +func (x *UpgradeResourceState_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[47] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpgradeResourceState_Response.ProtoReflect.Descriptor instead. +func (*UpgradeResourceState_Response) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{12, 1} +} + +func (x *UpgradeResourceState_Response) GetUpgradedState() *DynamicValue { + if x != nil { + return x.UpgradedState + } + return nil +} + +func (x *UpgradeResourceState_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +type ValidateResourceConfig_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` +} + +func (x *ValidateResourceConfig_Request) Reset() { + *x = ValidateResourceConfig_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateResourceConfig_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateResourceConfig_Request) ProtoMessage() {} + +func (x *ValidateResourceConfig_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[48] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateResourceConfig_Request.ProtoReflect.Descriptor instead. +func (*ValidateResourceConfig_Request) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{13, 0} +} + +func (x *ValidateResourceConfig_Request) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *ValidateResourceConfig_Request) GetConfig() *DynamicValue { + if x != nil { + return x.Config + } + return nil +} + +type ValidateResourceConfig_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *ValidateResourceConfig_Response) Reset() { + *x = ValidateResourceConfig_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateResourceConfig_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateResourceConfig_Response) ProtoMessage() {} + +func (x *ValidateResourceConfig_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[49] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateResourceConfig_Response.ProtoReflect.Descriptor instead. +func (*ValidateResourceConfig_Response) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{13, 1} +} + +func (x *ValidateResourceConfig_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +type ValidateDataResourceConfig_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` +} + +func (x *ValidateDataResourceConfig_Request) Reset() { + *x = ValidateDataResourceConfig_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateDataResourceConfig_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateDataResourceConfig_Request) ProtoMessage() {} + +func (x *ValidateDataResourceConfig_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[50] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateDataResourceConfig_Request.ProtoReflect.Descriptor instead. +func (*ValidateDataResourceConfig_Request) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{14, 0} +} + +func (x *ValidateDataResourceConfig_Request) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *ValidateDataResourceConfig_Request) GetConfig() *DynamicValue { + if x != nil { + return x.Config + } + return nil +} + +type ValidateDataResourceConfig_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *ValidateDataResourceConfig_Response) Reset() { + *x = ValidateDataResourceConfig_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateDataResourceConfig_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateDataResourceConfig_Response) ProtoMessage() {} + +func (x *ValidateDataResourceConfig_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[51] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateDataResourceConfig_Response.ProtoReflect.Descriptor instead. +func (*ValidateDataResourceConfig_Response) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{14, 1} +} + +func (x *ValidateDataResourceConfig_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +type ConfigureProvider_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TerraformVersion string `protobuf:"bytes,1,opt,name=terraform_version,json=terraformVersion,proto3" json:"terraform_version,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` +} + +func (x *ConfigureProvider_Request) Reset() { + *x = ConfigureProvider_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[52] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConfigureProvider_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConfigureProvider_Request) ProtoMessage() {} + +func (x *ConfigureProvider_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[52] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConfigureProvider_Request.ProtoReflect.Descriptor instead. +func (*ConfigureProvider_Request) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{15, 0} +} + +func (x *ConfigureProvider_Request) GetTerraformVersion() string { + if x != nil { + return x.TerraformVersion + } + return "" +} + +func (x *ConfigureProvider_Request) GetConfig() *DynamicValue { + if x != nil { + return x.Config + } + return nil +} + +type ConfigureProvider_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *ConfigureProvider_Response) Reset() { + *x = ConfigureProvider_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[53] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConfigureProvider_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConfigureProvider_Response) ProtoMessage() {} + +func (x *ConfigureProvider_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[53] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConfigureProvider_Response.ProtoReflect.Descriptor instead. +func (*ConfigureProvider_Response) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{15, 1} +} + +func (x *ConfigureProvider_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +// Request is the message that is sent to the provider during the +// ReadResource RPC. +// +// This message intentionally does not include configuration data as any +// configuration-based or configuration-conditional changes should occur +// during the PlanResourceChange RPC. Additionally, the configuration is +// not guaranteed to be wholly known nor match the given prior state, which +// could lead to unexpected provider behaviors for practitioners. +type ReadResource_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + CurrentState *DynamicValue `protobuf:"bytes,2,opt,name=current_state,json=currentState,proto3" json:"current_state,omitempty"` + Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"` + ProviderMeta *DynamicValue `protobuf:"bytes,4,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` +} + +func (x *ReadResource_Request) Reset() { + *x = ReadResource_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[54] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadResource_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadResource_Request) ProtoMessage() {} + +func (x *ReadResource_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[54] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadResource_Request.ProtoReflect.Descriptor instead. +func (*ReadResource_Request) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{16, 0} +} + +func (x *ReadResource_Request) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *ReadResource_Request) GetCurrentState() *DynamicValue { + if x != nil { + return x.CurrentState + } + return nil +} + +func (x *ReadResource_Request) GetPrivate() []byte { + if x != nil { + return x.Private + } + return nil +} + +func (x *ReadResource_Request) GetProviderMeta() *DynamicValue { + if x != nil { + return x.ProviderMeta + } + return nil +} + +type ReadResource_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NewState *DynamicValue `protobuf:"bytes,1,opt,name=new_state,json=newState,proto3" json:"new_state,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"` +} + +func (x *ReadResource_Response) Reset() { + *x = ReadResource_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[55] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadResource_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadResource_Response) ProtoMessage() {} + +func (x *ReadResource_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[55] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadResource_Response.ProtoReflect.Descriptor instead. +func (*ReadResource_Response) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{16, 1} +} + +func (x *ReadResource_Response) GetNewState() *DynamicValue { + if x != nil { + return x.NewState + } + return nil +} + +func (x *ReadResource_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +func (x *ReadResource_Response) GetPrivate() []byte { + if x != nil { + return x.Private + } + return nil +} + +type PlanResourceChange_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + PriorState *DynamicValue `protobuf:"bytes,2,opt,name=prior_state,json=priorState,proto3" json:"prior_state,omitempty"` + ProposedNewState *DynamicValue `protobuf:"bytes,3,opt,name=proposed_new_state,json=proposedNewState,proto3" json:"proposed_new_state,omitempty"` + Config *DynamicValue `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` + PriorPrivate []byte `protobuf:"bytes,5,opt,name=prior_private,json=priorPrivate,proto3" json:"prior_private,omitempty"` + ProviderMeta *DynamicValue `protobuf:"bytes,6,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` +} + +func (x *PlanResourceChange_Request) Reset() { + *x = PlanResourceChange_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[56] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PlanResourceChange_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PlanResourceChange_Request) ProtoMessage() {} + +func (x *PlanResourceChange_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[56] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PlanResourceChange_Request.ProtoReflect.Descriptor instead. +func (*PlanResourceChange_Request) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{17, 0} +} + +func (x *PlanResourceChange_Request) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *PlanResourceChange_Request) GetPriorState() *DynamicValue { + if x != nil { + return x.PriorState + } + return nil +} + +func (x *PlanResourceChange_Request) GetProposedNewState() *DynamicValue { + if x != nil { + return x.ProposedNewState + } + return nil +} + +func (x *PlanResourceChange_Request) GetConfig() *DynamicValue { + if x != nil { + return x.Config + } + return nil +} + +func (x *PlanResourceChange_Request) GetPriorPrivate() []byte { + if x != nil { + return x.PriorPrivate + } + return nil +} + +func (x *PlanResourceChange_Request) GetProviderMeta() *DynamicValue { + if x != nil { + return x.ProviderMeta + } + return nil +} + +type PlanResourceChange_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PlannedState *DynamicValue `protobuf:"bytes,1,opt,name=planned_state,json=plannedState,proto3" json:"planned_state,omitempty"` + RequiresReplace []*AttributePath `protobuf:"bytes,2,rep,name=requires_replace,json=requiresReplace,proto3" json:"requires_replace,omitempty"` + PlannedPrivate []byte `protobuf:"bytes,3,opt,name=planned_private,json=plannedPrivate,proto3" json:"planned_private,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,4,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + // This may be set only by the helper/schema "SDK" in the main Terraform + // repository, to request that Terraform Core >=0.12 permit additional + // inconsistencies that can result from the legacy SDK type system + // and its imprecise mapping to the >=0.12 type system. + // The change in behavior implied by this flag makes sense only for the + // specific details of the legacy SDK type system, and are not a general + // mechanism to avoid proper type handling in providers. + // + // ==== DO NOT USE THIS ==== + // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== + // ==== DO NOT USE THIS ==== + LegacyTypeSystem bool `protobuf:"varint,5,opt,name=legacy_type_system,json=legacyTypeSystem,proto3" json:"legacy_type_system,omitempty"` +} + +func (x *PlanResourceChange_Response) Reset() { + *x = PlanResourceChange_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[57] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PlanResourceChange_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PlanResourceChange_Response) ProtoMessage() {} + +func (x *PlanResourceChange_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[57] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PlanResourceChange_Response.ProtoReflect.Descriptor instead. +func (*PlanResourceChange_Response) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{17, 1} +} + +func (x *PlanResourceChange_Response) GetPlannedState() *DynamicValue { + if x != nil { + return x.PlannedState + } + return nil +} + +func (x *PlanResourceChange_Response) GetRequiresReplace() []*AttributePath { + if x != nil { + return x.RequiresReplace + } + return nil +} + +func (x *PlanResourceChange_Response) GetPlannedPrivate() []byte { + if x != nil { + return x.PlannedPrivate + } + return nil +} + +func (x *PlanResourceChange_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +func (x *PlanResourceChange_Response) GetLegacyTypeSystem() bool { + if x != nil { + return x.LegacyTypeSystem + } + return false +} + +type ApplyResourceChange_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + PriorState *DynamicValue `protobuf:"bytes,2,opt,name=prior_state,json=priorState,proto3" json:"prior_state,omitempty"` + PlannedState *DynamicValue `protobuf:"bytes,3,opt,name=planned_state,json=plannedState,proto3" json:"planned_state,omitempty"` + Config *DynamicValue `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` + PlannedPrivate []byte `protobuf:"bytes,5,opt,name=planned_private,json=plannedPrivate,proto3" json:"planned_private,omitempty"` + ProviderMeta *DynamicValue `protobuf:"bytes,6,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` +} + +func (x *ApplyResourceChange_Request) Reset() { + *x = ApplyResourceChange_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[58] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ApplyResourceChange_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApplyResourceChange_Request) ProtoMessage() {} + +func (x *ApplyResourceChange_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[58] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApplyResourceChange_Request.ProtoReflect.Descriptor instead. +func (*ApplyResourceChange_Request) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{18, 0} +} + +func (x *ApplyResourceChange_Request) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *ApplyResourceChange_Request) GetPriorState() *DynamicValue { + if x != nil { + return x.PriorState + } + return nil +} + +func (x *ApplyResourceChange_Request) GetPlannedState() *DynamicValue { + if x != nil { + return x.PlannedState + } + return nil +} + +func (x *ApplyResourceChange_Request) GetConfig() *DynamicValue { + if x != nil { + return x.Config + } + return nil +} + +func (x *ApplyResourceChange_Request) GetPlannedPrivate() []byte { + if x != nil { + return x.PlannedPrivate + } + return nil +} + +func (x *ApplyResourceChange_Request) GetProviderMeta() *DynamicValue { + if x != nil { + return x.ProviderMeta + } + return nil +} + +type ApplyResourceChange_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NewState *DynamicValue `protobuf:"bytes,1,opt,name=new_state,json=newState,proto3" json:"new_state,omitempty"` + Private []byte `protobuf:"bytes,2,opt,name=private,proto3" json:"private,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,3,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + // This may be set only by the helper/schema "SDK" in the main Terraform + // repository, to request that Terraform Core >=0.12 permit additional + // inconsistencies that can result from the legacy SDK type system + // and its imprecise mapping to the >=0.12 type system. + // The change in behavior implied by this flag makes sense only for the + // specific details of the legacy SDK type system, and are not a general + // mechanism to avoid proper type handling in providers. + // + // ==== DO NOT USE THIS ==== + // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== + // ==== DO NOT USE THIS ==== + LegacyTypeSystem bool `protobuf:"varint,4,opt,name=legacy_type_system,json=legacyTypeSystem,proto3" json:"legacy_type_system,omitempty"` +} + +func (x *ApplyResourceChange_Response) Reset() { + *x = ApplyResourceChange_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[59] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ApplyResourceChange_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApplyResourceChange_Response) ProtoMessage() {} + +func (x *ApplyResourceChange_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[59] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApplyResourceChange_Response.ProtoReflect.Descriptor instead. +func (*ApplyResourceChange_Response) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{18, 1} +} + +func (x *ApplyResourceChange_Response) GetNewState() *DynamicValue { + if x != nil { + return x.NewState + } + return nil +} + +func (x *ApplyResourceChange_Response) GetPrivate() []byte { + if x != nil { + return x.Private + } + return nil +} + +func (x *ApplyResourceChange_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +func (x *ApplyResourceChange_Response) GetLegacyTypeSystem() bool { + if x != nil { + return x.LegacyTypeSystem + } + return false +} + +type ImportResourceState_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` +} + +func (x *ImportResourceState_Request) Reset() { + *x = ImportResourceState_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[60] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ImportResourceState_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ImportResourceState_Request) ProtoMessage() {} + +func (x *ImportResourceState_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[60] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ImportResourceState_Request.ProtoReflect.Descriptor instead. +func (*ImportResourceState_Request) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{19, 0} +} + +func (x *ImportResourceState_Request) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *ImportResourceState_Request) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +type ImportResourceState_ImportedResource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + State *DynamicValue `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` + Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"` +} + +func (x *ImportResourceState_ImportedResource) Reset() { + *x = ImportResourceState_ImportedResource{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[61] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ImportResourceState_ImportedResource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ImportResourceState_ImportedResource) ProtoMessage() {} + +func (x *ImportResourceState_ImportedResource) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[61] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ImportResourceState_ImportedResource.ProtoReflect.Descriptor instead. +func (*ImportResourceState_ImportedResource) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{19, 1} +} + +func (x *ImportResourceState_ImportedResource) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *ImportResourceState_ImportedResource) GetState() *DynamicValue { + if x != nil { + return x.State + } + return nil +} + +func (x *ImportResourceState_ImportedResource) GetPrivate() []byte { + if x != nil { + return x.Private + } + return nil +} + +type ImportResourceState_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ImportedResources []*ImportResourceState_ImportedResource `protobuf:"bytes,1,rep,name=imported_resources,json=importedResources,proto3" json:"imported_resources,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *ImportResourceState_Response) Reset() { + *x = ImportResourceState_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[62] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ImportResourceState_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ImportResourceState_Response) ProtoMessage() {} + +func (x *ImportResourceState_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[62] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ImportResourceState_Response.ProtoReflect.Descriptor instead. +func (*ImportResourceState_Response) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{19, 2} +} + +func (x *ImportResourceState_Response) GetImportedResources() []*ImportResourceState_ImportedResource { + if x != nil { + return x.ImportedResources + } + return nil +} + +func (x *ImportResourceState_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +type MoveResourceState_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The address of the provider the resource is being moved from. + SourceProviderAddress string `protobuf:"bytes,1,opt,name=source_provider_address,json=sourceProviderAddress,proto3" json:"source_provider_address,omitempty"` + // The resource type that the resource is being moved from. + SourceTypeName string `protobuf:"bytes,2,opt,name=source_type_name,json=sourceTypeName,proto3" json:"source_type_name,omitempty"` + // The schema version of the resource type that the resource is being + // moved from. + SourceSchemaVersion int64 `protobuf:"varint,3,opt,name=source_schema_version,json=sourceSchemaVersion,proto3" json:"source_schema_version,omitempty"` + // The raw state of the resource being moved. Only the json field is + // populated, as there should be no legacy providers using the flatmap + // format that support newly introduced RPCs. + SourceState *RawState `protobuf:"bytes,4,opt,name=source_state,json=sourceState,proto3" json:"source_state,omitempty"` + // The resource type that the resource is being moved to. + TargetTypeName string `protobuf:"bytes,5,opt,name=target_type_name,json=targetTypeName,proto3" json:"target_type_name,omitempty"` + // The private state of the resource being moved. + SourcePrivate []byte `protobuf:"bytes,6,opt,name=source_private,json=sourcePrivate,proto3" json:"source_private,omitempty"` +} + +func (x *MoveResourceState_Request) Reset() { + *x = MoveResourceState_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[63] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MoveResourceState_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MoveResourceState_Request) ProtoMessage() {} + +func (x *MoveResourceState_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[63] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MoveResourceState_Request.ProtoReflect.Descriptor instead. +func (*MoveResourceState_Request) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{20, 0} +} + +func (x *MoveResourceState_Request) GetSourceProviderAddress() string { + if x != nil { + return x.SourceProviderAddress + } + return "" +} + +func (x *MoveResourceState_Request) GetSourceTypeName() string { + if x != nil { + return x.SourceTypeName + } + return "" +} + +func (x *MoveResourceState_Request) GetSourceSchemaVersion() int64 { + if x != nil { + return x.SourceSchemaVersion + } + return 0 +} + +func (x *MoveResourceState_Request) GetSourceState() *RawState { + if x != nil { + return x.SourceState + } + return nil +} + +func (x *MoveResourceState_Request) GetTargetTypeName() string { + if x != nil { + return x.TargetTypeName + } + return "" +} + +func (x *MoveResourceState_Request) GetSourcePrivate() []byte { + if x != nil { + return x.SourcePrivate + } + return nil +} + +type MoveResourceState_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The state of the resource after it has been moved. + TargetState *DynamicValue `protobuf:"bytes,1,opt,name=target_state,json=targetState,proto3" json:"target_state,omitempty"` + // Any diagnostics that occurred during the move. + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + // The private state of the resource after it has been moved. + TargetPrivate []byte `protobuf:"bytes,3,opt,name=target_private,json=targetPrivate,proto3" json:"target_private,omitempty"` +} + +func (x *MoveResourceState_Response) Reset() { + *x = MoveResourceState_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[64] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MoveResourceState_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MoveResourceState_Response) ProtoMessage() {} + +func (x *MoveResourceState_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[64] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MoveResourceState_Response.ProtoReflect.Descriptor instead. +func (*MoveResourceState_Response) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{20, 1} +} + +func (x *MoveResourceState_Response) GetTargetState() *DynamicValue { + if x != nil { + return x.TargetState + } + return nil +} + +func (x *MoveResourceState_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +func (x *MoveResourceState_Response) GetTargetPrivate() []byte { + if x != nil { + return x.TargetPrivate + } + return nil +} + +type ReadDataSource_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + ProviderMeta *DynamicValue `protobuf:"bytes,3,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` +} + +func (x *ReadDataSource_Request) Reset() { + *x = ReadDataSource_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[65] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadDataSource_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadDataSource_Request) ProtoMessage() {} + +func (x *ReadDataSource_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[65] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadDataSource_Request.ProtoReflect.Descriptor instead. +func (*ReadDataSource_Request) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{21, 0} +} + +func (x *ReadDataSource_Request) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +func (x *ReadDataSource_Request) GetConfig() *DynamicValue { + if x != nil { + return x.Config + } + return nil +} + +func (x *ReadDataSource_Request) GetProviderMeta() *DynamicValue { + if x != nil { + return x.ProviderMeta + } + return nil +} + +type ReadDataSource_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + State *DynamicValue `protobuf:"bytes,1,opt,name=state,proto3" json:"state,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *ReadDataSource_Response) Reset() { + *x = ReadDataSource_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[66] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadDataSource_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadDataSource_Response) ProtoMessage() {} + +func (x *ReadDataSource_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[66] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadDataSource_Response.ProtoReflect.Descriptor instead. +func (*ReadDataSource_Response) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{21, 1} +} + +func (x *ReadDataSource_Response) GetState() *DynamicValue { + if x != nil { + return x.State + } + return nil +} + +func (x *ReadDataSource_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +type GetFunctions_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetFunctions_Request) Reset() { + *x = GetFunctions_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[67] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetFunctions_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetFunctions_Request) ProtoMessage() {} + +func (x *GetFunctions_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[67] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetFunctions_Request.ProtoReflect.Descriptor instead. +func (*GetFunctions_Request) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{22, 0} +} + +type GetFunctions_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // functions is a mapping of function names to definitions. + Functions map[string]*Function `protobuf:"bytes,1,rep,name=functions,proto3" json:"functions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // diagnostics is any warnings or errors. + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *GetFunctions_Response) Reset() { + *x = GetFunctions_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[68] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetFunctions_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetFunctions_Response) ProtoMessage() {} + +func (x *GetFunctions_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[68] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetFunctions_Response.ProtoReflect.Descriptor instead. +func (*GetFunctions_Response) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{22, 1} +} + +func (x *GetFunctions_Response) GetFunctions() map[string]*Function { + if x != nil { + return x.Functions + } + return nil +} + +func (x *GetFunctions_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +type CallFunction_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // name is the name of the function being called. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // arguments is the data of each function argument value. + Arguments []*DynamicValue `protobuf:"bytes,2,rep,name=arguments,proto3" json:"arguments,omitempty"` +} + +func (x *CallFunction_Request) Reset() { + *x = CallFunction_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[70] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CallFunction_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CallFunction_Request) ProtoMessage() {} + +func (x *CallFunction_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[70] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CallFunction_Request.ProtoReflect.Descriptor instead. +func (*CallFunction_Request) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{23, 0} +} + +func (x *CallFunction_Request) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CallFunction_Request) GetArguments() []*DynamicValue { + if x != nil { + return x.Arguments + } + return nil +} + +type CallFunction_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // result is result value after running the function logic. + Result *DynamicValue `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` + // error is any errors from the function logic. + Error *FunctionError `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *CallFunction_Response) Reset() { + *x = CallFunction_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[71] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CallFunction_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CallFunction_Response) ProtoMessage() {} + +func (x *CallFunction_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[71] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CallFunction_Response.ProtoReflect.Descriptor instead. +func (*CallFunction_Response) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{23, 1} +} + +func (x *CallFunction_Response) GetResult() *DynamicValue { + if x != nil { + return x.Result + } + return nil +} + +func (x *CallFunction_Response) GetError() *FunctionError { + if x != nil { + return x.Error + } + return nil +} + +var File_tfplugin6_proto protoreflect.FileDescriptor + +var file_tfplugin6_proto_rawDesc = []byte{ + 0x0a, 0x0f, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x09, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x22, 0x3c, 0x0a, 0x0c, + 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x6d, 0x73, 0x67, 0x70, 0x61, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6d, + 0x73, 0x67, 0x70, 0x61, 0x63, 0x6b, 0x12, 0x12, 0x0a, 0x04, 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x6a, 0x73, 0x6f, 0x6e, 0x22, 0xe3, 0x01, 0x0a, 0x0a, 0x44, + 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x12, 0x3a, 0x0a, 0x08, 0x73, 0x65, 0x76, + 0x65, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, + 0x69, 0x63, 0x2e, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x52, 0x08, 0x73, 0x65, 0x76, + 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, + 0x16, 0x0a, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x12, 0x36, 0x0a, 0x09, 0x61, 0x74, 0x74, 0x72, 0x69, + 0x62, 0x75, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x50, 0x61, 0x74, 0x68, 0x52, 0x09, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x22, + 0x2f, 0x0a, 0x08, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x0b, 0x0a, 0x07, 0x49, + 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, + 0x52, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x57, 0x41, 0x52, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02, + 0x22, 0x6b, 0x0a, 0x0d, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, + 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x78, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x74, 0x65, 0x78, 0x74, 0x12, 0x30, 0x0a, 0x11, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, + 0x48, 0x00, 0x52, 0x10, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x72, 0x67, 0x75, + 0x6d, 0x65, 0x6e, 0x74, 0x88, 0x01, 0x01, 0x42, 0x14, 0x0a, 0x12, 0x5f, 0x66, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x22, 0xdc, 0x01, + 0x0a, 0x0d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, + 0x33, 0x0a, 0x05, 0x73, 0x74, 0x65, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, + 0x62, 0x75, 0x74, 0x65, 0x50, 0x61, 0x74, 0x68, 0x2e, 0x53, 0x74, 0x65, 0x70, 0x52, 0x05, 0x73, + 0x74, 0x65, 0x70, 0x73, 0x1a, 0x95, 0x01, 0x0a, 0x04, 0x53, 0x74, 0x65, 0x70, 0x12, 0x27, 0x0a, + 0x0e, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0d, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x12, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x48, 0x00, 0x52, 0x10, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4b, 0x65, 0x79, + 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x28, 0x0a, 0x0f, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x69, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, + 0x00, 0x52, 0x0d, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4b, 0x65, 0x79, 0x49, 0x6e, 0x74, + 0x42, 0x0a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x22, 0x3b, 0x0a, 0x0c, + 0x53, 0x74, 0x6f, 0x70, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x1a, 0x09, 0x0a, 0x07, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x96, 0x01, 0x0a, 0x08, 0x52, 0x61, + 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x6a, 0x73, 0x6f, 0x6e, 0x12, 0x3a, 0x0a, 0x07, 0x66, 0x6c, + 0x61, 0x74, 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x52, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x2e, 0x46, 0x6c, 0x61, 0x74, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x66, + 0x6c, 0x61, 0x74, 0x6d, 0x61, 0x70, 0x1a, 0x3a, 0x0a, 0x0c, 0x46, 0x6c, 0x61, 0x74, 0x6d, 0x61, + 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x22, 0x95, 0x0a, 0x0a, 0x06, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x18, 0x0a, + 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, + 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x1a, 0xa2, 0x02, 0x0a, 0x05, 0x42, 0x6c, 0x6f, 0x63, 0x6b, + 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x0a, 0x61, 0x74, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x3e, 0x0a, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, + 0x4e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x0a, 0x62, 0x6c, 0x6f, + 0x63, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x10, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, + 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x1a, 0xe4, 0x02, 0x0a, 0x09, + 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x39, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x0a, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, + 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, + 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, + 0x65, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, + 0x12, 0x40, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x69, 0x6e, + 0x64, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x69, + 0x6e, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x1a, 0xa7, 0x02, 0x0a, 0x0b, 0x4e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x2d, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x43, + 0x0a, 0x07, 0x6e, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x29, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x2e, 0x4e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x4e, + 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x07, 0x6e, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x67, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, + 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x22, 0x4d, 0x0a, + 0x0b, 0x4e, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0b, 0x0a, 0x07, + 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x49, 0x4e, + 0x47, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x4c, 0x49, 0x53, 0x54, 0x10, 0x02, 0x12, + 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, 0x4d, 0x41, 0x50, 0x10, + 0x04, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x05, 0x1a, 0x8b, 0x02, 0x0a, + 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x3b, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, + 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x41, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, + 0x75, 0x74, 0x65, 0x73, 0x12, 0x3e, 0x0a, 0x07, 0x6e, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, + 0x4e, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x07, 0x6e, 0x65, 0x73, + 0x74, 0x69, 0x6e, 0x67, 0x12, 0x1f, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, + 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x02, 0x18, 0x01, 0x52, 0x08, 0x6d, 0x69, 0x6e, + 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1f, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, + 0x6d, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x42, 0x02, 0x18, 0x01, 0x52, 0x08, 0x6d, 0x61, + 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x22, 0x42, 0x0a, 0x0b, 0x4e, 0x65, 0x73, 0x74, 0x69, 0x6e, + 0x67, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, + 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x49, 0x4e, 0x47, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x08, + 0x0a, 0x04, 0x4c, 0x49, 0x53, 0x54, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, + 0x03, 0x12, 0x07, 0x0a, 0x03, 0x4d, 0x41, 0x50, 0x10, 0x04, 0x22, 0x8e, 0x05, 0x0a, 0x08, 0x46, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x4c, 0x0a, 0x12, 0x76, 0x61, 0x72, 0x69, 0x61, 0x64, + 0x69, 0x63, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x46, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x52, 0x11, 0x76, 0x61, 0x72, 0x69, 0x61, 0x64, 0x69, 0x63, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x12, 0x32, 0x0a, 0x06, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, + 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, + 0x52, 0x06, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, + 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, + 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x12, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0xf3, 0x01, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, + 0x10, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x4e, 0x75, + 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x61, 0x6c, 0x6c, 0x6f, 0x77, + 0x5f, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x55, 0x6e, 0x6b, 0x6e, + 0x6f, 0x77, 0x6e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x10, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x36, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x0f, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x1a, 0x1c, 0x0a, + 0x06, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0xa8, 0x01, 0x0a, 0x12, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, + 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x6c, 0x61, 0x6e, 0x5f, 0x64, 0x65, 0x73, 0x74, 0x72, + 0x6f, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x6c, 0x61, 0x6e, 0x44, 0x65, + 0x73, 0x74, 0x72, 0x6f, 0x79, 0x12, 0x3f, 0x0a, 0x1c, 0x67, 0x65, 0x74, 0x5f, 0x70, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x67, 0x65, 0x74, + 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x12, 0x2e, 0x0a, 0x13, 0x6d, 0x6f, 0x76, 0x65, 0x5f, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x11, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x22, 0x96, 0x04, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0xef, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, + 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, + 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x61, + 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x12, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x37, + 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, + 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, + 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x4c, 0x0a, 0x0c, 0x64, 0x61, 0x74, 0x61, 0x5f, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x09, + 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x1a, 0x26, 0x0a, 0x10, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x1a, 0x31, 0x0a, 0x12, 0x44, + 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x2f, + 0x0a, 0x10, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, + 0xc7, 0x06, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x1a, 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0xa6, 0x06, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, + 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x65, 0x0a, 0x10, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x73, 0x12, 0x6c, 0x0a, 0x13, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x3c, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, + 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, + 0x64, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, + 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, + 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x36, 0x0a, 0x0d, 0x70, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, + 0x74, 0x61, 0x12, 0x4e, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x70, + 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x12, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, + 0x65, 0x73, 0x12, 0x52, 0x0a, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x36, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x46, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x57, 0x0a, + 0x16, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x51, 0x0a, 0x0e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x99, 0x01, 0x0a, 0x16, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x3a, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, + 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x1a, 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, + 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, + 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, + 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0x90, 0x02, 0x0a, 0x14, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, + 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x72, + 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, + 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x30, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, + 0x52, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x08, 0x72, 0x61, 0x77, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x1a, 0x83, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x3e, 0x0a, 0x0e, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x52, 0x0d, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, + 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, + 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, + 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xb6, 0x01, 0x0a, 0x16, 0x56, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x1a, 0x57, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, + 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, + 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, + 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, + 0x73, 0x22, 0xba, 0x01, 0x0a, 0x1a, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, + 0x74, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x1a, 0x57, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, + 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, + 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, + 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xc1, + 0x01, 0x0a, 0x11, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x76, + 0x69, 0x64, 0x65, 0x72, 0x1a, 0x67, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x2b, 0x0a, 0x11, 0x74, 0x65, 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x65, 0x72, 0x72, + 0x61, 0x66, 0x6f, 0x72, 0x6d, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x0a, 0x06, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, + 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, + 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, + 0x63, 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x0c, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x1a, 0xbc, 0x01, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x3c, 0x0a, 0x0d, + 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, + 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x63, 0x75, + 0x72, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, + 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, + 0x76, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, + 0x74, 0x61, 0x1a, 0x93, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x34, 0x0a, 0x09, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x08, 0x6e, 0x65, 0x77, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, + 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, + 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x18, + 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x22, 0xf2, 0x04, 0x0a, 0x12, 0x50, 0x6c, 0x61, + 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x1a, + 0xbb, 0x02, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, + 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0b, 0x70, 0x72, 0x69, 0x6f, + 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x45, 0x0a, 0x12, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x64, 0x5f, 0x6e, + 0x65, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, + 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x10, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, + 0x64, 0x4e, 0x65, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x72, + 0x69, 0x6f, 0x72, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x0c, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, + 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0x9d, 0x02, + 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x6c, + 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, + 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x6c, 0x61, 0x6e, + 0x6e, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x43, 0x0a, 0x10, 0x72, 0x65, 0x71, 0x75, + 0x69, 0x72, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x41, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x50, 0x61, 0x74, 0x68, 0x52, 0x0f, 0x72, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x12, 0x27, 0x0a, + 0x0f, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x50, + 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, + 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, + 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, + 0x2c, 0x0a, 0x12, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x73, + 0x79, 0x73, 0x74, 0x65, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x6c, 0x65, 0x67, + 0x61, 0x63, 0x79, 0x54, 0x79, 0x70, 0x65, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x22, 0x92, 0x04, + 0x0a, 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, + 0x68, 0x61, 0x6e, 0x67, 0x65, 0x1a, 0xb6, 0x02, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x38, + 0x0a, 0x0b, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, + 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x70, 0x72, + 0x69, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x6c, 0x61, 0x6e, + 0x6e, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, + 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, + 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x6c, 0x61, 0x6e, 0x6e, + 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x0e, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, + 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, + 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0xc1, + 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x09, 0x6e, + 0x65, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, + 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x08, 0x6e, 0x65, 0x77, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, + 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, + 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, + 0x74, 0x69, 0x63, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x5f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x10, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x54, 0x79, 0x70, 0x65, 0x53, 0x79, 0x73, 0x74, + 0x65, 0x6d, 0x22, 0xed, 0x02, 0x0a, 0x13, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x36, 0x0a, 0x07, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, + 0x69, 0x64, 0x1a, 0x78, 0x0a, 0x10, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x1a, 0xa3, 0x01, 0x0a, + 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5e, 0x0a, 0x12, 0x69, 0x6d, 0x70, + 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x36, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x11, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, + 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, + 0x63, 0x73, 0x22, 0xe7, 0x03, 0x0a, 0x11, 0x4d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0xa8, 0x02, 0x0a, 0x07, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x17, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x28, 0x0a, 0x10, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, + 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x0c, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x13, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x52, 0x61, 0x77, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x69, 0x76, + 0x61, 0x74, 0x65, 0x1a, 0xa6, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x3a, 0x0a, 0x0c, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, + 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, + 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, + 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, + 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x74, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x22, 0x9c, 0x02, 0x0a, + 0x0e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, + 0x95, 0x01, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, + 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, + 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, + 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0x72, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, + 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0x81, 0x02, 0x0a, 0x0c, + 0x47, 0x65, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x09, 0x0a, 0x07, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0xe5, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, + 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, + 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x1a, 0x51, 0x0a, 0x0e, + 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x13, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x46, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, + 0xd1, 0x01, 0x0a, 0x0c, 0x43, 0x61, 0x6c, 0x6c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x1a, 0x54, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x35, 0x0a, 0x09, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x61, 0x72, 0x67, + 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x6b, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x12, 0x2e, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x46, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x2a, 0x25, 0x0a, 0x0a, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x69, 0x6e, + 0x64, 0x12, 0x09, 0x0a, 0x05, 0x50, 0x4c, 0x41, 0x49, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, + 0x4d, 0x41, 0x52, 0x4b, 0x44, 0x4f, 0x57, 0x4e, 0x10, 0x01, 0x32, 0xa4, 0x0c, 0x0a, 0x08, 0x50, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1e, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x60, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x50, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x24, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, + 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, + 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x16, 0x56, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x29, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x16, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x29, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, + 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2a, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x56, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7b, 0x0a, 0x1a, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2d, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, + 0x74, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, + 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x69, 0x0a, 0x14, 0x55, 0x70, 0x67, 0x72, + 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x12, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x55, 0x70, 0x67, + 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x60, 0x0a, 0x11, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, + 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x24, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x50, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x36, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x36, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x63, 0x0a, 0x12, 0x50, 0x6c, 0x61, 0x6e, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x25, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x36, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, + 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, + 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, + 0x61, 0x6e, 0x67, 0x65, 0x12, 0x26, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, + 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, + 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, 0x13, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x26, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, + 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x60, 0x0a, + 0x11, 0x4d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x24, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x4d, + 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x4d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x57, 0x0a, 0x0e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x12, 0x21, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x52, 0x65, + 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, + 0x2e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x46, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x43, + 0x61, 0x6c, 0x6c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x46, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x46, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, + 0x0a, 0x0c, 0x53, 0x74, 0x6f, 0x70, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x1f, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x50, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x20, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x74, 0x6f, 0x70, + 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x42, 0x31, 0x5a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x6f, 0x66, 0x75, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x6f, 0x66, + 0x75, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x36, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_tfplugin6_proto_rawDescOnce sync.Once + file_tfplugin6_proto_rawDescData = file_tfplugin6_proto_rawDesc +) + +func file_tfplugin6_proto_rawDescGZIP() []byte { + file_tfplugin6_proto_rawDescOnce.Do(func() { + file_tfplugin6_proto_rawDescData = protoimpl.X.CompressGZIP(file_tfplugin6_proto_rawDescData) + }) + return file_tfplugin6_proto_rawDescData +} + +var file_tfplugin6_proto_enumTypes = make([]protoimpl.EnumInfo, 4) +var file_tfplugin6_proto_msgTypes = make([]protoimpl.MessageInfo, 72) +var file_tfplugin6_proto_goTypes = []interface{}{ + (StringKind)(0), // 0: tfplugin6.StringKind + (Diagnostic_Severity)(0), // 1: tfplugin6.Diagnostic.Severity + (Schema_NestedBlock_NestingMode)(0), // 2: tfplugin6.Schema.NestedBlock.NestingMode + (Schema_Object_NestingMode)(0), // 3: tfplugin6.Schema.Object.NestingMode + (*DynamicValue)(nil), // 4: tfplugin6.DynamicValue + (*Diagnostic)(nil), // 5: tfplugin6.Diagnostic + (*FunctionError)(nil), // 6: tfplugin6.FunctionError + (*AttributePath)(nil), // 7: tfplugin6.AttributePath + (*StopProvider)(nil), // 8: tfplugin6.StopProvider + (*RawState)(nil), // 9: tfplugin6.RawState + (*Schema)(nil), // 10: tfplugin6.Schema + (*Function)(nil), // 11: tfplugin6.Function + (*ServerCapabilities)(nil), // 12: tfplugin6.ServerCapabilities + (*GetMetadata)(nil), // 13: tfplugin6.GetMetadata + (*GetProviderSchema)(nil), // 14: tfplugin6.GetProviderSchema + (*ValidateProviderConfig)(nil), // 15: tfplugin6.ValidateProviderConfig + (*UpgradeResourceState)(nil), // 16: tfplugin6.UpgradeResourceState + (*ValidateResourceConfig)(nil), // 17: tfplugin6.ValidateResourceConfig + (*ValidateDataResourceConfig)(nil), // 18: tfplugin6.ValidateDataResourceConfig + (*ConfigureProvider)(nil), // 19: tfplugin6.ConfigureProvider + (*ReadResource)(nil), // 20: tfplugin6.ReadResource + (*PlanResourceChange)(nil), // 21: tfplugin6.PlanResourceChange + (*ApplyResourceChange)(nil), // 22: tfplugin6.ApplyResourceChange + (*ImportResourceState)(nil), // 23: tfplugin6.ImportResourceState + (*MoveResourceState)(nil), // 24: tfplugin6.MoveResourceState + (*ReadDataSource)(nil), // 25: tfplugin6.ReadDataSource + (*GetFunctions)(nil), // 26: tfplugin6.GetFunctions + (*CallFunction)(nil), // 27: tfplugin6.CallFunction + (*AttributePath_Step)(nil), // 28: tfplugin6.AttributePath.Step + (*StopProvider_Request)(nil), // 29: tfplugin6.StopProvider.Request + (*StopProvider_Response)(nil), // 30: tfplugin6.StopProvider.Response + nil, // 31: tfplugin6.RawState.FlatmapEntry + (*Schema_Block)(nil), // 32: tfplugin6.Schema.Block + (*Schema_Attribute)(nil), // 33: tfplugin6.Schema.Attribute + (*Schema_NestedBlock)(nil), // 34: tfplugin6.Schema.NestedBlock + (*Schema_Object)(nil), // 35: tfplugin6.Schema.Object + (*Function_Parameter)(nil), // 36: tfplugin6.Function.Parameter + (*Function_Return)(nil), // 37: tfplugin6.Function.Return + (*GetMetadata_Request)(nil), // 38: tfplugin6.GetMetadata.Request + (*GetMetadata_Response)(nil), // 39: tfplugin6.GetMetadata.Response + (*GetMetadata_FunctionMetadata)(nil), // 40: tfplugin6.GetMetadata.FunctionMetadata + (*GetMetadata_DataSourceMetadata)(nil), // 41: tfplugin6.GetMetadata.DataSourceMetadata + (*GetMetadata_ResourceMetadata)(nil), // 42: tfplugin6.GetMetadata.ResourceMetadata + (*GetProviderSchema_Request)(nil), // 43: tfplugin6.GetProviderSchema.Request + (*GetProviderSchema_Response)(nil), // 44: tfplugin6.GetProviderSchema.Response + nil, // 45: tfplugin6.GetProviderSchema.Response.ResourceSchemasEntry + nil, // 46: tfplugin6.GetProviderSchema.Response.DataSourceSchemasEntry + nil, // 47: tfplugin6.GetProviderSchema.Response.FunctionsEntry + (*ValidateProviderConfig_Request)(nil), // 48: tfplugin6.ValidateProviderConfig.Request + (*ValidateProviderConfig_Response)(nil), // 49: tfplugin6.ValidateProviderConfig.Response + (*UpgradeResourceState_Request)(nil), // 50: tfplugin6.UpgradeResourceState.Request + (*UpgradeResourceState_Response)(nil), // 51: tfplugin6.UpgradeResourceState.Response + (*ValidateResourceConfig_Request)(nil), // 52: tfplugin6.ValidateResourceConfig.Request + (*ValidateResourceConfig_Response)(nil), // 53: tfplugin6.ValidateResourceConfig.Response + (*ValidateDataResourceConfig_Request)(nil), // 54: tfplugin6.ValidateDataResourceConfig.Request + (*ValidateDataResourceConfig_Response)(nil), // 55: tfplugin6.ValidateDataResourceConfig.Response + (*ConfigureProvider_Request)(nil), // 56: tfplugin6.ConfigureProvider.Request + (*ConfigureProvider_Response)(nil), // 57: tfplugin6.ConfigureProvider.Response + (*ReadResource_Request)(nil), // 58: tfplugin6.ReadResource.Request + (*ReadResource_Response)(nil), // 59: tfplugin6.ReadResource.Response + (*PlanResourceChange_Request)(nil), // 60: tfplugin6.PlanResourceChange.Request + (*PlanResourceChange_Response)(nil), // 61: tfplugin6.PlanResourceChange.Response + (*ApplyResourceChange_Request)(nil), // 62: tfplugin6.ApplyResourceChange.Request + (*ApplyResourceChange_Response)(nil), // 63: tfplugin6.ApplyResourceChange.Response + (*ImportResourceState_Request)(nil), // 64: tfplugin6.ImportResourceState.Request + (*ImportResourceState_ImportedResource)(nil), // 65: tfplugin6.ImportResourceState.ImportedResource + (*ImportResourceState_Response)(nil), // 66: tfplugin6.ImportResourceState.Response + (*MoveResourceState_Request)(nil), // 67: tfplugin6.MoveResourceState.Request + (*MoveResourceState_Response)(nil), // 68: tfplugin6.MoveResourceState.Response + (*ReadDataSource_Request)(nil), // 69: tfplugin6.ReadDataSource.Request + (*ReadDataSource_Response)(nil), // 70: tfplugin6.ReadDataSource.Response + (*GetFunctions_Request)(nil), // 71: tfplugin6.GetFunctions.Request + (*GetFunctions_Response)(nil), // 72: tfplugin6.GetFunctions.Response + nil, // 73: tfplugin6.GetFunctions.Response.FunctionsEntry + (*CallFunction_Request)(nil), // 74: tfplugin6.CallFunction.Request + (*CallFunction_Response)(nil), // 75: tfplugin6.CallFunction.Response +} +var file_tfplugin6_proto_depIdxs = []int32{ + 1, // 0: tfplugin6.Diagnostic.severity:type_name -> tfplugin6.Diagnostic.Severity + 7, // 1: tfplugin6.Diagnostic.attribute:type_name -> tfplugin6.AttributePath + 28, // 2: tfplugin6.AttributePath.steps:type_name -> tfplugin6.AttributePath.Step + 31, // 3: tfplugin6.RawState.flatmap:type_name -> tfplugin6.RawState.FlatmapEntry + 32, // 4: tfplugin6.Schema.block:type_name -> tfplugin6.Schema.Block + 36, // 5: tfplugin6.Function.parameters:type_name -> tfplugin6.Function.Parameter + 36, // 6: tfplugin6.Function.variadic_parameter:type_name -> tfplugin6.Function.Parameter + 37, // 7: tfplugin6.Function.return:type_name -> tfplugin6.Function.Return + 0, // 8: tfplugin6.Function.description_kind:type_name -> tfplugin6.StringKind + 33, // 9: tfplugin6.Schema.Block.attributes:type_name -> tfplugin6.Schema.Attribute + 34, // 10: tfplugin6.Schema.Block.block_types:type_name -> tfplugin6.Schema.NestedBlock + 0, // 11: tfplugin6.Schema.Block.description_kind:type_name -> tfplugin6.StringKind + 35, // 12: tfplugin6.Schema.Attribute.nested_type:type_name -> tfplugin6.Schema.Object + 0, // 13: tfplugin6.Schema.Attribute.description_kind:type_name -> tfplugin6.StringKind + 32, // 14: tfplugin6.Schema.NestedBlock.block:type_name -> tfplugin6.Schema.Block + 2, // 15: tfplugin6.Schema.NestedBlock.nesting:type_name -> tfplugin6.Schema.NestedBlock.NestingMode + 33, // 16: tfplugin6.Schema.Object.attributes:type_name -> tfplugin6.Schema.Attribute + 3, // 17: tfplugin6.Schema.Object.nesting:type_name -> tfplugin6.Schema.Object.NestingMode + 0, // 18: tfplugin6.Function.Parameter.description_kind:type_name -> tfplugin6.StringKind + 12, // 19: tfplugin6.GetMetadata.Response.server_capabilities:type_name -> tfplugin6.ServerCapabilities + 5, // 20: tfplugin6.GetMetadata.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 41, // 21: tfplugin6.GetMetadata.Response.data_sources:type_name -> tfplugin6.GetMetadata.DataSourceMetadata + 42, // 22: tfplugin6.GetMetadata.Response.resources:type_name -> tfplugin6.GetMetadata.ResourceMetadata + 40, // 23: tfplugin6.GetMetadata.Response.functions:type_name -> tfplugin6.GetMetadata.FunctionMetadata + 10, // 24: tfplugin6.GetProviderSchema.Response.provider:type_name -> tfplugin6.Schema + 45, // 25: tfplugin6.GetProviderSchema.Response.resource_schemas:type_name -> tfplugin6.GetProviderSchema.Response.ResourceSchemasEntry + 46, // 26: tfplugin6.GetProviderSchema.Response.data_source_schemas:type_name -> tfplugin6.GetProviderSchema.Response.DataSourceSchemasEntry + 5, // 27: tfplugin6.GetProviderSchema.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 10, // 28: tfplugin6.GetProviderSchema.Response.provider_meta:type_name -> tfplugin6.Schema + 12, // 29: tfplugin6.GetProviderSchema.Response.server_capabilities:type_name -> tfplugin6.ServerCapabilities + 47, // 30: tfplugin6.GetProviderSchema.Response.functions:type_name -> tfplugin6.GetProviderSchema.Response.FunctionsEntry + 10, // 31: tfplugin6.GetProviderSchema.Response.ResourceSchemasEntry.value:type_name -> tfplugin6.Schema + 10, // 32: tfplugin6.GetProviderSchema.Response.DataSourceSchemasEntry.value:type_name -> tfplugin6.Schema + 11, // 33: tfplugin6.GetProviderSchema.Response.FunctionsEntry.value:type_name -> tfplugin6.Function + 4, // 34: tfplugin6.ValidateProviderConfig.Request.config:type_name -> tfplugin6.DynamicValue + 5, // 35: tfplugin6.ValidateProviderConfig.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 9, // 36: tfplugin6.UpgradeResourceState.Request.raw_state:type_name -> tfplugin6.RawState + 4, // 37: tfplugin6.UpgradeResourceState.Response.upgraded_state:type_name -> tfplugin6.DynamicValue + 5, // 38: tfplugin6.UpgradeResourceState.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 4, // 39: tfplugin6.ValidateResourceConfig.Request.config:type_name -> tfplugin6.DynamicValue + 5, // 40: tfplugin6.ValidateResourceConfig.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 4, // 41: tfplugin6.ValidateDataResourceConfig.Request.config:type_name -> tfplugin6.DynamicValue + 5, // 42: tfplugin6.ValidateDataResourceConfig.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 4, // 43: tfplugin6.ConfigureProvider.Request.config:type_name -> tfplugin6.DynamicValue + 5, // 44: tfplugin6.ConfigureProvider.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 4, // 45: tfplugin6.ReadResource.Request.current_state:type_name -> tfplugin6.DynamicValue + 4, // 46: tfplugin6.ReadResource.Request.provider_meta:type_name -> tfplugin6.DynamicValue + 4, // 47: tfplugin6.ReadResource.Response.new_state:type_name -> tfplugin6.DynamicValue + 5, // 48: tfplugin6.ReadResource.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 4, // 49: tfplugin6.PlanResourceChange.Request.prior_state:type_name -> tfplugin6.DynamicValue + 4, // 50: tfplugin6.PlanResourceChange.Request.proposed_new_state:type_name -> tfplugin6.DynamicValue + 4, // 51: tfplugin6.PlanResourceChange.Request.config:type_name -> tfplugin6.DynamicValue + 4, // 52: tfplugin6.PlanResourceChange.Request.provider_meta:type_name -> tfplugin6.DynamicValue + 4, // 53: tfplugin6.PlanResourceChange.Response.planned_state:type_name -> tfplugin6.DynamicValue + 7, // 54: tfplugin6.PlanResourceChange.Response.requires_replace:type_name -> tfplugin6.AttributePath + 5, // 55: tfplugin6.PlanResourceChange.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 4, // 56: tfplugin6.ApplyResourceChange.Request.prior_state:type_name -> tfplugin6.DynamicValue + 4, // 57: tfplugin6.ApplyResourceChange.Request.planned_state:type_name -> tfplugin6.DynamicValue + 4, // 58: tfplugin6.ApplyResourceChange.Request.config:type_name -> tfplugin6.DynamicValue + 4, // 59: tfplugin6.ApplyResourceChange.Request.provider_meta:type_name -> tfplugin6.DynamicValue + 4, // 60: tfplugin6.ApplyResourceChange.Response.new_state:type_name -> tfplugin6.DynamicValue + 5, // 61: tfplugin6.ApplyResourceChange.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 4, // 62: tfplugin6.ImportResourceState.ImportedResource.state:type_name -> tfplugin6.DynamicValue + 65, // 63: tfplugin6.ImportResourceState.Response.imported_resources:type_name -> tfplugin6.ImportResourceState.ImportedResource + 5, // 64: tfplugin6.ImportResourceState.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 9, // 65: tfplugin6.MoveResourceState.Request.source_state:type_name -> tfplugin6.RawState + 4, // 66: tfplugin6.MoveResourceState.Response.target_state:type_name -> tfplugin6.DynamicValue + 5, // 67: tfplugin6.MoveResourceState.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 4, // 68: tfplugin6.ReadDataSource.Request.config:type_name -> tfplugin6.DynamicValue + 4, // 69: tfplugin6.ReadDataSource.Request.provider_meta:type_name -> tfplugin6.DynamicValue + 4, // 70: tfplugin6.ReadDataSource.Response.state:type_name -> tfplugin6.DynamicValue + 5, // 71: tfplugin6.ReadDataSource.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 73, // 72: tfplugin6.GetFunctions.Response.functions:type_name -> tfplugin6.GetFunctions.Response.FunctionsEntry + 5, // 73: tfplugin6.GetFunctions.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 11, // 74: tfplugin6.GetFunctions.Response.FunctionsEntry.value:type_name -> tfplugin6.Function + 4, // 75: tfplugin6.CallFunction.Request.arguments:type_name -> tfplugin6.DynamicValue + 4, // 76: tfplugin6.CallFunction.Response.result:type_name -> tfplugin6.DynamicValue + 6, // 77: tfplugin6.CallFunction.Response.error:type_name -> tfplugin6.FunctionError + 38, // 78: tfplugin6.Provider.GetMetadata:input_type -> tfplugin6.GetMetadata.Request + 43, // 79: tfplugin6.Provider.GetProviderSchema:input_type -> tfplugin6.GetProviderSchema.Request + 48, // 80: tfplugin6.Provider.ValidateProviderConfig:input_type -> tfplugin6.ValidateProviderConfig.Request + 52, // 81: tfplugin6.Provider.ValidateResourceConfig:input_type -> tfplugin6.ValidateResourceConfig.Request + 54, // 82: tfplugin6.Provider.ValidateDataResourceConfig:input_type -> tfplugin6.ValidateDataResourceConfig.Request + 50, // 83: tfplugin6.Provider.UpgradeResourceState:input_type -> tfplugin6.UpgradeResourceState.Request + 56, // 84: tfplugin6.Provider.ConfigureProvider:input_type -> tfplugin6.ConfigureProvider.Request + 58, // 85: tfplugin6.Provider.ReadResource:input_type -> tfplugin6.ReadResource.Request + 60, // 86: tfplugin6.Provider.PlanResourceChange:input_type -> tfplugin6.PlanResourceChange.Request + 62, // 87: tfplugin6.Provider.ApplyResourceChange:input_type -> tfplugin6.ApplyResourceChange.Request + 64, // 88: tfplugin6.Provider.ImportResourceState:input_type -> tfplugin6.ImportResourceState.Request + 67, // 89: tfplugin6.Provider.MoveResourceState:input_type -> tfplugin6.MoveResourceState.Request + 69, // 90: tfplugin6.Provider.ReadDataSource:input_type -> tfplugin6.ReadDataSource.Request + 71, // 91: tfplugin6.Provider.GetFunctions:input_type -> tfplugin6.GetFunctions.Request + 74, // 92: tfplugin6.Provider.CallFunction:input_type -> tfplugin6.CallFunction.Request + 29, // 93: tfplugin6.Provider.StopProvider:input_type -> tfplugin6.StopProvider.Request + 39, // 94: tfplugin6.Provider.GetMetadata:output_type -> tfplugin6.GetMetadata.Response + 44, // 95: tfplugin6.Provider.GetProviderSchema:output_type -> tfplugin6.GetProviderSchema.Response + 49, // 96: tfplugin6.Provider.ValidateProviderConfig:output_type -> tfplugin6.ValidateProviderConfig.Response + 53, // 97: tfplugin6.Provider.ValidateResourceConfig:output_type -> tfplugin6.ValidateResourceConfig.Response + 55, // 98: tfplugin6.Provider.ValidateDataResourceConfig:output_type -> tfplugin6.ValidateDataResourceConfig.Response + 51, // 99: tfplugin6.Provider.UpgradeResourceState:output_type -> tfplugin6.UpgradeResourceState.Response + 57, // 100: tfplugin6.Provider.ConfigureProvider:output_type -> tfplugin6.ConfigureProvider.Response + 59, // 101: tfplugin6.Provider.ReadResource:output_type -> tfplugin6.ReadResource.Response + 61, // 102: tfplugin6.Provider.PlanResourceChange:output_type -> tfplugin6.PlanResourceChange.Response + 63, // 103: tfplugin6.Provider.ApplyResourceChange:output_type -> tfplugin6.ApplyResourceChange.Response + 66, // 104: tfplugin6.Provider.ImportResourceState:output_type -> tfplugin6.ImportResourceState.Response + 68, // 105: tfplugin6.Provider.MoveResourceState:output_type -> tfplugin6.MoveResourceState.Response + 70, // 106: tfplugin6.Provider.ReadDataSource:output_type -> tfplugin6.ReadDataSource.Response + 72, // 107: tfplugin6.Provider.GetFunctions:output_type -> tfplugin6.GetFunctions.Response + 75, // 108: tfplugin6.Provider.CallFunction:output_type -> tfplugin6.CallFunction.Response + 30, // 109: tfplugin6.Provider.StopProvider:output_type -> tfplugin6.StopProvider.Response + 94, // [94:110] is the sub-list for method output_type + 78, // [78:94] is the sub-list for method input_type + 78, // [78:78] is the sub-list for extension type_name + 78, // [78:78] is the sub-list for extension extendee + 0, // [0:78] is the sub-list for field type_name +} + +func init() { file_tfplugin6_proto_init() } +func file_tfplugin6_proto_init() { + if File_tfplugin6_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_tfplugin6_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DynamicValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Diagnostic); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FunctionError); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AttributePath); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StopProvider); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RawState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Schema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Function); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerCapabilities); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetProviderSchema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateProviderConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpgradeResourceState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateResourceConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateDataResourceConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConfigureProvider); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadResource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PlanResourceChange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplyResourceChange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ImportResourceState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MoveResourceState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadDataSource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetFunctions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CallFunction); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AttributePath_Step); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StopProvider_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StopProvider_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Schema_Block); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Schema_Attribute); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Schema_NestedBlock); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Schema_Object); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Function_Parameter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Function_Return); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMetadata_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMetadata_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMetadata_FunctionMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMetadata_DataSourceMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMetadata_ResourceMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetProviderSchema_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetProviderSchema_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateProviderConfig_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateProviderConfig_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpgradeResourceState_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpgradeResourceState_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateResourceConfig_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateResourceConfig_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateDataResourceConfig_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateDataResourceConfig_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConfigureProvider_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConfigureProvider_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadResource_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadResource_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PlanResourceChange_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PlanResourceChange_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplyResourceChange_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplyResourceChange_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ImportResourceState_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ImportResourceState_ImportedResource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ImportResourceState_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MoveResourceState_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MoveResourceState_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadDataSource_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadDataSource_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetFunctions_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetFunctions_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CallFunction_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CallFunction_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_tfplugin6_proto_msgTypes[2].OneofWrappers = []interface{}{} + file_tfplugin6_proto_msgTypes[24].OneofWrappers = []interface{}{ + (*AttributePath_Step_AttributeName)(nil), + (*AttributePath_Step_ElementKeyString)(nil), + (*AttributePath_Step_ElementKeyInt)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_tfplugin6_proto_rawDesc, + NumEnums: 4, + NumMessages: 72, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_tfplugin6_proto_goTypes, + DependencyIndexes: file_tfplugin6_proto_depIdxs, + EnumInfos: file_tfplugin6_proto_enumTypes, + MessageInfos: file_tfplugin6_proto_msgTypes, + }.Build() + File_tfplugin6_proto = out.File + file_tfplugin6_proto_rawDesc = nil + file_tfplugin6_proto_goTypes = nil + file_tfplugin6_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// ProviderClient is the client API for Provider service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ProviderClient interface { + // GetMetadata returns upfront information about server capabilities and + // supported resource types without requiring the server to instantiate all + // schema information, which may be memory intensive. This RPC is optional, + // where clients may receive an unimplemented RPC error. Clients should + // ignore the error and call the GetProviderSchema RPC as a fallback. + GetMetadata(ctx context.Context, in *GetMetadata_Request, opts ...grpc.CallOption) (*GetMetadata_Response, error) + // GetSchema returns schema information for the provider, data resources, + // and managed resources. + GetProviderSchema(ctx context.Context, in *GetProviderSchema_Request, opts ...grpc.CallOption) (*GetProviderSchema_Response, error) + ValidateProviderConfig(ctx context.Context, in *ValidateProviderConfig_Request, opts ...grpc.CallOption) (*ValidateProviderConfig_Response, error) + ValidateResourceConfig(ctx context.Context, in *ValidateResourceConfig_Request, opts ...grpc.CallOption) (*ValidateResourceConfig_Response, error) + ValidateDataResourceConfig(ctx context.Context, in *ValidateDataResourceConfig_Request, opts ...grpc.CallOption) (*ValidateDataResourceConfig_Response, error) + UpgradeResourceState(ctx context.Context, in *UpgradeResourceState_Request, opts ...grpc.CallOption) (*UpgradeResourceState_Response, error) + // ////// One-time initialization, called before other functions below + ConfigureProvider(ctx context.Context, in *ConfigureProvider_Request, opts ...grpc.CallOption) (*ConfigureProvider_Response, error) + // ////// Managed Resource Lifecycle + ReadResource(ctx context.Context, in *ReadResource_Request, opts ...grpc.CallOption) (*ReadResource_Response, error) + PlanResourceChange(ctx context.Context, in *PlanResourceChange_Request, opts ...grpc.CallOption) (*PlanResourceChange_Response, error) + ApplyResourceChange(ctx context.Context, in *ApplyResourceChange_Request, opts ...grpc.CallOption) (*ApplyResourceChange_Response, error) + ImportResourceState(ctx context.Context, in *ImportResourceState_Request, opts ...grpc.CallOption) (*ImportResourceState_Response, error) + MoveResourceState(ctx context.Context, in *MoveResourceState_Request, opts ...grpc.CallOption) (*MoveResourceState_Response, error) + ReadDataSource(ctx context.Context, in *ReadDataSource_Request, opts ...grpc.CallOption) (*ReadDataSource_Response, error) + // GetFunctions returns the definitions of all functions. + GetFunctions(ctx context.Context, in *GetFunctions_Request, opts ...grpc.CallOption) (*GetFunctions_Response, error) + // CallFunction runs the provider-defined function logic and returns + // the result with any diagnostics. + CallFunction(ctx context.Context, in *CallFunction_Request, opts ...grpc.CallOption) (*CallFunction_Response, error) + // ////// Graceful Shutdown + StopProvider(ctx context.Context, in *StopProvider_Request, opts ...grpc.CallOption) (*StopProvider_Response, error) +} + +type providerClient struct { + cc grpc.ClientConnInterface +} + +func NewProviderClient(cc grpc.ClientConnInterface) ProviderClient { + return &providerClient{cc} +} + +func (c *providerClient) GetMetadata(ctx context.Context, in *GetMetadata_Request, opts ...grpc.CallOption) (*GetMetadata_Response, error) { + out := new(GetMetadata_Response) + err := c.cc.Invoke(ctx, "/tfplugin6.Provider/GetMetadata", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) GetProviderSchema(ctx context.Context, in *GetProviderSchema_Request, opts ...grpc.CallOption) (*GetProviderSchema_Response, error) { + out := new(GetProviderSchema_Response) + err := c.cc.Invoke(ctx, "/tfplugin6.Provider/GetProviderSchema", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ValidateProviderConfig(ctx context.Context, in *ValidateProviderConfig_Request, opts ...grpc.CallOption) (*ValidateProviderConfig_Response, error) { + out := new(ValidateProviderConfig_Response) + err := c.cc.Invoke(ctx, "/tfplugin6.Provider/ValidateProviderConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ValidateResourceConfig(ctx context.Context, in *ValidateResourceConfig_Request, opts ...grpc.CallOption) (*ValidateResourceConfig_Response, error) { + out := new(ValidateResourceConfig_Response) + err := c.cc.Invoke(ctx, "/tfplugin6.Provider/ValidateResourceConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ValidateDataResourceConfig(ctx context.Context, in *ValidateDataResourceConfig_Request, opts ...grpc.CallOption) (*ValidateDataResourceConfig_Response, error) { + out := new(ValidateDataResourceConfig_Response) + err := c.cc.Invoke(ctx, "/tfplugin6.Provider/ValidateDataResourceConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) UpgradeResourceState(ctx context.Context, in *UpgradeResourceState_Request, opts ...grpc.CallOption) (*UpgradeResourceState_Response, error) { + out := new(UpgradeResourceState_Response) + err := c.cc.Invoke(ctx, "/tfplugin6.Provider/UpgradeResourceState", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ConfigureProvider(ctx context.Context, in *ConfigureProvider_Request, opts ...grpc.CallOption) (*ConfigureProvider_Response, error) { + out := new(ConfigureProvider_Response) + err := c.cc.Invoke(ctx, "/tfplugin6.Provider/ConfigureProvider", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ReadResource(ctx context.Context, in *ReadResource_Request, opts ...grpc.CallOption) (*ReadResource_Response, error) { + out := new(ReadResource_Response) + err := c.cc.Invoke(ctx, "/tfplugin6.Provider/ReadResource", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) PlanResourceChange(ctx context.Context, in *PlanResourceChange_Request, opts ...grpc.CallOption) (*PlanResourceChange_Response, error) { + out := new(PlanResourceChange_Response) + err := c.cc.Invoke(ctx, "/tfplugin6.Provider/PlanResourceChange", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ApplyResourceChange(ctx context.Context, in *ApplyResourceChange_Request, opts ...grpc.CallOption) (*ApplyResourceChange_Response, error) { + out := new(ApplyResourceChange_Response) + err := c.cc.Invoke(ctx, "/tfplugin6.Provider/ApplyResourceChange", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ImportResourceState(ctx context.Context, in *ImportResourceState_Request, opts ...grpc.CallOption) (*ImportResourceState_Response, error) { + out := new(ImportResourceState_Response) + err := c.cc.Invoke(ctx, "/tfplugin6.Provider/ImportResourceState", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) MoveResourceState(ctx context.Context, in *MoveResourceState_Request, opts ...grpc.CallOption) (*MoveResourceState_Response, error) { + out := new(MoveResourceState_Response) + err := c.cc.Invoke(ctx, "/tfplugin6.Provider/MoveResourceState", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ReadDataSource(ctx context.Context, in *ReadDataSource_Request, opts ...grpc.CallOption) (*ReadDataSource_Response, error) { + out := new(ReadDataSource_Response) + err := c.cc.Invoke(ctx, "/tfplugin6.Provider/ReadDataSource", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) GetFunctions(ctx context.Context, in *GetFunctions_Request, opts ...grpc.CallOption) (*GetFunctions_Response, error) { + out := new(GetFunctions_Response) + err := c.cc.Invoke(ctx, "/tfplugin6.Provider/GetFunctions", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) CallFunction(ctx context.Context, in *CallFunction_Request, opts ...grpc.CallOption) (*CallFunction_Response, error) { + out := new(CallFunction_Response) + err := c.cc.Invoke(ctx, "/tfplugin6.Provider/CallFunction", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) StopProvider(ctx context.Context, in *StopProvider_Request, opts ...grpc.CallOption) (*StopProvider_Response, error) { + out := new(StopProvider_Response) + err := c.cc.Invoke(ctx, "/tfplugin6.Provider/StopProvider", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ProviderServer is the server API for Provider service. +type ProviderServer interface { + // GetMetadata returns upfront information about server capabilities and + // supported resource types without requiring the server to instantiate all + // schema information, which may be memory intensive. This RPC is optional, + // where clients may receive an unimplemented RPC error. Clients should + // ignore the error and call the GetProviderSchema RPC as a fallback. + GetMetadata(context.Context, *GetMetadata_Request) (*GetMetadata_Response, error) + // GetSchema returns schema information for the provider, data resources, + // and managed resources. + GetProviderSchema(context.Context, *GetProviderSchema_Request) (*GetProviderSchema_Response, error) + ValidateProviderConfig(context.Context, *ValidateProviderConfig_Request) (*ValidateProviderConfig_Response, error) + ValidateResourceConfig(context.Context, *ValidateResourceConfig_Request) (*ValidateResourceConfig_Response, error) + ValidateDataResourceConfig(context.Context, *ValidateDataResourceConfig_Request) (*ValidateDataResourceConfig_Response, error) + UpgradeResourceState(context.Context, *UpgradeResourceState_Request) (*UpgradeResourceState_Response, error) + // ////// One-time initialization, called before other functions below + ConfigureProvider(context.Context, *ConfigureProvider_Request) (*ConfigureProvider_Response, error) + // ////// Managed Resource Lifecycle + ReadResource(context.Context, *ReadResource_Request) (*ReadResource_Response, error) + PlanResourceChange(context.Context, *PlanResourceChange_Request) (*PlanResourceChange_Response, error) + ApplyResourceChange(context.Context, *ApplyResourceChange_Request) (*ApplyResourceChange_Response, error) + ImportResourceState(context.Context, *ImportResourceState_Request) (*ImportResourceState_Response, error) + MoveResourceState(context.Context, *MoveResourceState_Request) (*MoveResourceState_Response, error) + ReadDataSource(context.Context, *ReadDataSource_Request) (*ReadDataSource_Response, error) + // GetFunctions returns the definitions of all functions. + GetFunctions(context.Context, *GetFunctions_Request) (*GetFunctions_Response, error) + // CallFunction runs the provider-defined function logic and returns + // the result with any diagnostics. + CallFunction(context.Context, *CallFunction_Request) (*CallFunction_Response, error) + // ////// Graceful Shutdown + StopProvider(context.Context, *StopProvider_Request) (*StopProvider_Response, error) +} + +// UnimplementedProviderServer can be embedded to have forward compatible implementations. +type UnimplementedProviderServer struct { +} + +func (*UnimplementedProviderServer) GetMetadata(context.Context, *GetMetadata_Request) (*GetMetadata_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetMetadata not implemented") +} +func (*UnimplementedProviderServer) GetProviderSchema(context.Context, *GetProviderSchema_Request) (*GetProviderSchema_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetProviderSchema not implemented") +} +func (*UnimplementedProviderServer) ValidateProviderConfig(context.Context, *ValidateProviderConfig_Request) (*ValidateProviderConfig_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ValidateProviderConfig not implemented") +} +func (*UnimplementedProviderServer) ValidateResourceConfig(context.Context, *ValidateResourceConfig_Request) (*ValidateResourceConfig_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ValidateResourceConfig not implemented") +} +func (*UnimplementedProviderServer) ValidateDataResourceConfig(context.Context, *ValidateDataResourceConfig_Request) (*ValidateDataResourceConfig_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ValidateDataResourceConfig not implemented") +} +func (*UnimplementedProviderServer) UpgradeResourceState(context.Context, *UpgradeResourceState_Request) (*UpgradeResourceState_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpgradeResourceState not implemented") +} +func (*UnimplementedProviderServer) ConfigureProvider(context.Context, *ConfigureProvider_Request) (*ConfigureProvider_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ConfigureProvider not implemented") +} +func (*UnimplementedProviderServer) ReadResource(context.Context, *ReadResource_Request) (*ReadResource_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReadResource not implemented") +} +func (*UnimplementedProviderServer) PlanResourceChange(context.Context, *PlanResourceChange_Request) (*PlanResourceChange_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method PlanResourceChange not implemented") +} +func (*UnimplementedProviderServer) ApplyResourceChange(context.Context, *ApplyResourceChange_Request) (*ApplyResourceChange_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ApplyResourceChange not implemented") +} +func (*UnimplementedProviderServer) ImportResourceState(context.Context, *ImportResourceState_Request) (*ImportResourceState_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ImportResourceState not implemented") +} +func (*UnimplementedProviderServer) MoveResourceState(context.Context, *MoveResourceState_Request) (*MoveResourceState_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method MoveResourceState not implemented") +} +func (*UnimplementedProviderServer) ReadDataSource(context.Context, *ReadDataSource_Request) (*ReadDataSource_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReadDataSource not implemented") +} +func (*UnimplementedProviderServer) GetFunctions(context.Context, *GetFunctions_Request) (*GetFunctions_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetFunctions not implemented") +} +func (*UnimplementedProviderServer) CallFunction(context.Context, *CallFunction_Request) (*CallFunction_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method CallFunction not implemented") +} +func (*UnimplementedProviderServer) StopProvider(context.Context, *StopProvider_Request) (*StopProvider_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method StopProvider not implemented") +} + +func RegisterProviderServer(s *grpc.Server, srv ProviderServer) { + s.RegisterService(&_Provider_serviceDesc, srv) +} + +func _Provider_GetMetadata_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetMetadata_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).GetMetadata(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin6.Provider/GetMetadata", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).GetMetadata(ctx, req.(*GetMetadata_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_GetProviderSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetProviderSchema_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).GetProviderSchema(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin6.Provider/GetProviderSchema", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).GetProviderSchema(ctx, req.(*GetProviderSchema_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ValidateProviderConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidateProviderConfig_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ValidateProviderConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin6.Provider/ValidateProviderConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ValidateProviderConfig(ctx, req.(*ValidateProviderConfig_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ValidateResourceConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidateResourceConfig_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ValidateResourceConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin6.Provider/ValidateResourceConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ValidateResourceConfig(ctx, req.(*ValidateResourceConfig_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ValidateDataResourceConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidateDataResourceConfig_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ValidateDataResourceConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin6.Provider/ValidateDataResourceConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ValidateDataResourceConfig(ctx, req.(*ValidateDataResourceConfig_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_UpgradeResourceState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpgradeResourceState_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).UpgradeResourceState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin6.Provider/UpgradeResourceState", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).UpgradeResourceState(ctx, req.(*UpgradeResourceState_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ConfigureProvider_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ConfigureProvider_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ConfigureProvider(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin6.Provider/ConfigureProvider", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ConfigureProvider(ctx, req.(*ConfigureProvider_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ReadResource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadResource_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ReadResource(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin6.Provider/ReadResource", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ReadResource(ctx, req.(*ReadResource_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_PlanResourceChange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PlanResourceChange_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).PlanResourceChange(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin6.Provider/PlanResourceChange", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).PlanResourceChange(ctx, req.(*PlanResourceChange_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ApplyResourceChange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ApplyResourceChange_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ApplyResourceChange(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin6.Provider/ApplyResourceChange", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ApplyResourceChange(ctx, req.(*ApplyResourceChange_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ImportResourceState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ImportResourceState_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ImportResourceState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin6.Provider/ImportResourceState", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ImportResourceState(ctx, req.(*ImportResourceState_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_MoveResourceState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MoveResourceState_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).MoveResourceState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin6.Provider/MoveResourceState", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).MoveResourceState(ctx, req.(*MoveResourceState_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ReadDataSource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadDataSource_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ReadDataSource(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin6.Provider/ReadDataSource", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ReadDataSource(ctx, req.(*ReadDataSource_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_GetFunctions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetFunctions_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).GetFunctions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin6.Provider/GetFunctions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).GetFunctions(ctx, req.(*GetFunctions_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_CallFunction_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CallFunction_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).CallFunction(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin6.Provider/CallFunction", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).CallFunction(ctx, req.(*CallFunction_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_StopProvider_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StopProvider_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).StopProvider(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin6.Provider/StopProvider", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).StopProvider(ctx, req.(*StopProvider_Request)) + } + return interceptor(ctx, in, info, handler) +} + +var _Provider_serviceDesc = grpc.ServiceDesc{ + ServiceName: "tfplugin6.Provider", + HandlerType: (*ProviderServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetMetadata", + Handler: _Provider_GetMetadata_Handler, + }, + { + MethodName: "GetProviderSchema", + Handler: _Provider_GetProviderSchema_Handler, + }, + { + MethodName: "ValidateProviderConfig", + Handler: _Provider_ValidateProviderConfig_Handler, + }, + { + MethodName: "ValidateResourceConfig", + Handler: _Provider_ValidateResourceConfig_Handler, + }, + { + MethodName: "ValidateDataResourceConfig", + Handler: _Provider_ValidateDataResourceConfig_Handler, + }, + { + MethodName: "UpgradeResourceState", + Handler: _Provider_UpgradeResourceState_Handler, + }, + { + MethodName: "ConfigureProvider", + Handler: _Provider_ConfigureProvider_Handler, + }, + { + MethodName: "ReadResource", + Handler: _Provider_ReadResource_Handler, + }, + { + MethodName: "PlanResourceChange", + Handler: _Provider_PlanResourceChange_Handler, + }, + { + MethodName: "ApplyResourceChange", + Handler: _Provider_ApplyResourceChange_Handler, + }, + { + MethodName: "ImportResourceState", + Handler: _Provider_ImportResourceState_Handler, + }, + { + MethodName: "MoveResourceState", + Handler: _Provider_MoveResourceState_Handler, + }, + { + MethodName: "ReadDataSource", + Handler: _Provider_ReadDataSource_Handler, + }, + { + MethodName: "GetFunctions", + Handler: _Provider_GetFunctions_Handler, + }, + { + MethodName: "CallFunction", + Handler: _Provider_CallFunction_Handler, + }, + { + MethodName: "StopProvider", + Handler: _Provider_StopProvider_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "tfplugin6.proto", +} diff --git a/pkg/tfplugin6/tfplugin6.proto b/pkg/tfplugin6/tfplugin6.proto new file mode 120000 index 00000000000..cb60ee615ab --- /dev/null +++ b/pkg/tfplugin6/tfplugin6.proto @@ -0,0 +1 @@ +../../docs/plugin-protocol/tfplugin6.5.proto \ No newline at end of file diff --git a/pkg/tofu/context.go b/pkg/tofu/context.go new file mode 100644 index 00000000000..5645d249c46 --- /dev/null +++ b/pkg/tofu/context.go @@ -0,0 +1,442 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "context" + "fmt" + "log" + "sort" + "sync" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/logging" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/provisioners" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +// InputMode defines what sort of input will be asked for when Input +// is called on Context. +type InputMode byte + +const ( + // InputModeProvider asks for provider variables + InputModeProvider InputMode = 1 << iota + + // InputModeStd is the standard operating mode and asks for both variables + // and providers. + InputModeStd = InputModeProvider +) + +// ContextOpts are the user-configurable options to create a context with +// NewContext. +type ContextOpts struct { + Meta *ContextMeta + Hooks []Hook + Parallelism int + Providers map[addrs.Provider]providers.Factory + Provisioners map[string]provisioners.Factory + Encryption encryption.Encryption + + UIInput UIInput +} + +// ContextMeta is metadata about the running context. This is information +// that this package or structure cannot determine on its own but exposes +// into OpenTofu in various ways. This must be provided by the Context +// initializer. +type ContextMeta struct { + Env string // Env is the state environment + + // OriginalWorkingDir is the working directory where the OpenTofu CLI + // was run from, which may no longer actually be the current working + // directory if the user included the -chdir=... option. + // + // If this string is empty then the original working directory is the same + // as the current working directory. + // + // In most cases we should respect the user's override by ignoring this + // path and just using the current working directory, but this is here + // for some exceptional cases where the original working directory is + // needed. + OriginalWorkingDir string +} + +// Context represents all the context that OpenTofu needs in order to +// perform operations on infrastructure. This structure is built using +// NewContext. +type Context struct { + // meta captures some misc. information about the working directory where + // we're taking these actions, and thus which should remain steady between + // operations. + meta *ContextMeta + + plugins *contextPlugins + + hooks []Hook + sh *stopHook + uiInput UIInput + + parallelSem Semaphore + l sync.Mutex // Lock acquired during any task + providerInputConfig map[string]map[string]cty.Value + runCond *sync.Cond + runContext context.Context + runContextCancel context.CancelFunc + + encryption encryption.Encryption +} + +// (additional methods on Context can be found in context_*.go files.) + +// NewContext creates a new Context structure. +// +// Once a Context is created, the caller must not access or mutate any of +// the objects referenced (directly or indirectly) by the ContextOpts fields. +// +// If the returned diagnostics contains errors then the resulting context is +// invalid and must not be used. +func NewContext(opts *ContextOpts) (*Context, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + log.Printf("[TRACE] tofu.NewContext: starting") + + // Copy all the hooks and add our stop hook. We don't append directly + // to the Config so that we're not modifying that in-place. + sh := new(stopHook) + hooks := make([]Hook, len(opts.Hooks)+1) + copy(hooks, opts.Hooks) + hooks[len(opts.Hooks)] = sh + + // Determine parallelism, default to 10. We do this both to limit + // CPU pressure but also to have an extra guard against rate throttling + // from providers. + // We throw an error in case of negative parallelism + par := opts.Parallelism + if par < 0 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid parallelism value", + fmt.Sprintf("The parallelism must be a positive value. Not %d.", par), + )) + return nil, diags + } + + if par == 0 { + par = 10 + } + + plugins := newContextPlugins(opts.Providers, opts.Provisioners) + + log.Printf("[TRACE] tofu.NewContext: complete") + + return &Context{ + hooks: hooks, + meta: opts.Meta, + uiInput: opts.UIInput, + + plugins: plugins, + + parallelSem: NewSemaphore(par), + providerInputConfig: make(map[string]map[string]cty.Value), + sh: sh, + + encryption: opts.Encryption, + }, diags +} + +func (c *Context) Schemas(config *configs.Config, state *states.State) (*Schemas, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + ret, err := loadSchemas(config, state, c.plugins) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to load plugin schemas", + fmt.Sprintf("Error while loading schemas for plugin components: %s.", err), + )) + return nil, diags + } + return ret, diags +} + +type ContextGraphOpts struct { + // If true, validates the graph structure (checks for cycles). + Validate bool + + // Legacy graphs only: won't prune the graph + Verbose bool +} + +// Stop stops the running task. +// +// Stop will block until the task completes. +func (c *Context) Stop() { + log.Printf("[WARN] tofu: Stop called, initiating interrupt sequence") + + c.l.Lock() + defer c.l.Unlock() + + // If we're running, then stop + if c.runContextCancel != nil { + log.Printf("[WARN] tofu: run context exists, stopping") + + // Tell the hook we want to stop + c.sh.Stop() + + // Stop the context + c.runContextCancel() + c.runContextCancel = nil + } + + // Notify all of the hooks that we're stopping, in case they want to try + // to flush in-memory state to disk before a subsequent hard kill. + for _, hook := range c.hooks { + hook.Stopping() + } + + // Grab the condition var before we exit + if cond := c.runCond; cond != nil { + log.Printf("[INFO] tofu: waiting for graceful stop to complete") + cond.Wait() + } + + log.Printf("[WARN] tofu: stop complete") +} + +func (c *Context) acquireRun(phase string) func() { + // With the run lock held, grab the context lock to make changes + // to the run context. + c.l.Lock() + defer c.l.Unlock() + + // Wait until we're no longer running + for c.runCond != nil { + c.runCond.Wait() + } + + // Build our lock + c.runCond = sync.NewCond(&c.l) + + // Create a new run context + c.runContext, c.runContextCancel = context.WithCancel(context.Background()) + + // Reset the stop hook so we're not stopped + c.sh.Reset() + + return c.releaseRun +} + +func (c *Context) releaseRun() { + // Grab the context lock so that we can make modifications to fields + c.l.Lock() + defer c.l.Unlock() + + // End our run. We check if runContext is non-nil because it can be + // set to nil if it was cancelled via Stop() + if c.runContextCancel != nil { + c.runContextCancel() + } + + // Unlock all waiting our condition + cond := c.runCond + c.runCond = nil + cond.Broadcast() + + // Unset the context + c.runContext = nil +} + +// watchStop immediately returns a `stop` and a `wait` chan after dispatching +// the watchStop goroutine. This will watch the runContext for cancellation and +// stop the providers accordingly. When the watch is no longer needed, the +// `stop` chan should be closed before waiting on the `wait` chan. +// The `wait` chan is important, because without synchronizing with the end of +// the watchStop goroutine, the runContext may also be closed during the select +// incorrectly causing providers to be stopped. Even if the graph walk is done +// at that point, stopping a provider permanently cancels its StopContext which +// can cause later actions to fail. +func (c *Context) watchStop(walker *ContextGraphWalker) (chan struct{}, <-chan struct{}) { + stop := make(chan struct{}) + wait := make(chan struct{}) + + // get the runContext cancellation channel now, because releaseRun will + // write to the runContext field. + done := c.runContext.Done() + + panicHandler := logging.PanicHandlerWithTraceFn() + go func() { + defer panicHandler() + + defer close(wait) + // Wait for a stop or completion + select { + case <-done: + // done means the context was canceled, so we need to try and stop + // providers. + case <-stop: + // our own stop channel was closed. + return + } + + // If we're here, we're stopped, trigger the call. + log.Printf("[TRACE] Context: requesting providers and provisioners to gracefully stop") + + { + // Copy the providers so that a misbehaved blocking Stop doesn't + // completely hang OpenTofu. + walker.providerLock.Lock() + ps := make([]providers.Interface, 0, len(walker.providerCache)) + for _, p := range walker.providerCache { + ps = append(ps, p) + } + defer walker.providerLock.Unlock() + + for _, p := range ps { + // We ignore the error for now since there isn't any reasonable + // action to take if there is an error here, since the stop is still + // advisory: OpenTofu will exit once the graph node completes. + p.Stop() + } + } + + { + // Call stop on all the provisioners + walker.provisionerLock.Lock() + ps := make([]provisioners.Interface, 0, len(walker.provisionerCache)) + for _, p := range walker.provisionerCache { + ps = append(ps, p) + } + defer walker.provisionerLock.Unlock() + + for _, p := range ps { + // We ignore the error for now since there isn't any reasonable + // action to take if there is an error here, since the stop is still + // advisory: OpenTofu will exit once the graph node completes. + p.Stop() + } + } + }() + + return stop, wait +} + +// checkConfigDependencies checks whether the recieving context is able to +// support the given configuration, returning error diagnostics if not. +// +// Currently this function checks whether the current OpenTofu CLI version +// matches the version requirements of all of the modules, and whether our +// plugin library contains all of the plugin names/addresses needed. +// +// This function does *not* check that external modules are installed (that's +// the responsibility of the configuration loader) and doesn't check that the +// plugins are of suitable versions to match any version constraints (which is +// the responsibility of the code which installed the plugins and then +// constructed the Providers/Provisioners maps passed in to NewContext). +// +// In most cases we should typically catch the problems this function detects +// before we reach this point, but this function can come into play in some +// unusual cases outside of the main workflow, and can avoid some +// potentially-more-confusing errors from later operations. +func (c *Context) checkConfigDependencies(config *configs.Config) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + // This checks the OpenTofu CLI version constraints specified in all of + // the modules. + diags = diags.Append(CheckCoreVersionRequirements(config)) + + // We only check that we have a factory for each required provider, and + // assume the caller already assured that any separately-installed + // plugins are of a suitable version, match expected checksums, etc. + providerReqs, hclDiags := config.ProviderRequirements() + diags = diags.Append(hclDiags) + if hclDiags.HasErrors() { + return diags + } + for providerAddr := range providerReqs { + if !c.plugins.HasProvider(providerAddr) { + if !providerAddr.IsBuiltIn() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Missing required provider", + fmt.Sprintf( + "This configuration requires provider %s, but that provider isn't available. You may be able to install it automatically by running:\n tofu init", + providerAddr, + ), + )) + } else { + // Built-in providers can never be installed by "tofu init", + // so no point in confusing the user by suggesting that. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Missing required provider", + fmt.Sprintf( + "This configuration requires built-in provider %s, but that provider isn't available in this OpenTofu version.", + providerAddr, + ), + )) + } + } + } + + // Our handling of provisioners is much less sophisticated than providers + // because they are in many ways a legacy system. We need to go hunting + // for them more directly in the configuration. + config.DeepEach(func(modCfg *configs.Config) { + if modCfg == nil || modCfg.Module == nil { + return // should not happen, but we'll be robust + } + for _, rc := range modCfg.Module.ManagedResources { + if rc.Managed == nil { + continue // should not happen, but we'll be robust + } + for _, pc := range rc.Managed.Provisioners { + if !c.plugins.HasProvisioner(pc.Type) { + // This is not a very high-quality error, because really + // the caller of tofu.NewContext should've already + // done equivalent checks when doing plugin discovery. + // This is just to make sure we return a predictable + // error in a central place, rather than failing somewhere + // later in the non-deterministically-ordered graph walk. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Missing required provisioner plugin", + fmt.Sprintf( + "This configuration requires provisioner plugin %q, which isn't available. If you're intending to use an external provisioner plugin, you must install it manually into one of the plugin search directories before running OpenTofu.", + pc.Type, + ), + )) + } + } + } + }) + + // Because we were doing a lot of map iteration above, and we're only + // generating sourceless diagnostics anyway, our diagnostics will not be + // in a deterministic order. To ensure stable output when there are + // multiple errors to report, we'll sort these particular diagnostics + // so they are at least always consistent alone. This ordering is + // arbitrary and not a compatibility constraint. + sort.Slice(diags, func(i, j int) bool { + // Because these are sourcelss diagnostics and we know they are all + // errors, we know they'll only differ in their description fields. + descI := diags[i].Description() + descJ := diags[j].Description() + switch { + case descI.Summary != descJ.Summary: + return descI.Summary < descJ.Summary + default: + return descI.Detail < descJ.Detail + } + }) + + return diags +} diff --git a/pkg/tofu/context_apply.go b/pkg/tofu/context_apply.go new file mode 100644 index 00000000000..e4e7979348c --- /dev/null +++ b/pkg/tofu/context_apply.go @@ -0,0 +1,210 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "log" + + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// Apply performs the actions described by the given Plan object and returns +// the resulting updated state. +// +// The given configuration *must* be the same configuration that was passed +// earlier to Context.Plan in order to create this plan. +// +// Even if the returned diagnostics contains errors, Apply always returns the +// resulting state which is likely to have been partially-updated. +func (c *Context) Apply(plan *plans.Plan, config *configs.Config) (*states.State, tfdiags.Diagnostics) { + defer c.acquireRun("apply")() + + log.Printf("[DEBUG] Building and walking apply graph for %s plan", plan.UIMode) + + if plan.Errored { + var diags tfdiags.Diagnostics + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Cannot apply failed plan", + `The given plan is incomplete due to errors during planning, and so it cannot be applied.`, + )) + return nil, diags + } + + for _, rc := range plan.Changes.Resources { + // Import is a no-op change during an apply (all the real action happens during the plan) but we'd + // like to show some helpful output that mirrors the way we show other changes. + if rc.Importing != nil { + for _, h := range c.hooks { + // In future, we may need to call PostApplyImport separately elsewhere in the apply + // operation. For now, though, we'll call Pre and Post hooks together. + h.PreApplyImport(rc.Addr, *rc.Importing) + h.PostApplyImport(rc.Addr, *rc.Importing) + } + } + } + + graph, operation, diags := c.applyGraph(plan, config, true) + if diags.HasErrors() { + return nil, diags + } + + workingState := plan.PriorState.DeepCopy() + walker, walkDiags := c.walk(graph, operation, &graphWalkOpts{ + Config: config, + InputState: workingState, + Changes: plan.Changes, + + // We need to propagate the check results from the plan phase, + // because that will tell us which checkable objects we're expecting + // to see updated results from during the apply step. + PlanTimeCheckResults: plan.Checks, + + // We also want to propagate the timestamp from the plan file. + PlanTimeTimestamp: plan.Timestamp, + }) + diags = diags.Append(walker.NonFatalDiagnostics) + diags = diags.Append(walkDiags) + + // After the walk is finished, we capture a simplified snapshot of the + // check result data as part of the new state. + walker.State.RecordCheckResults(walker.Checks) + + newState := walker.State.Close() + if plan.UIMode == plans.DestroyMode && !diags.HasErrors() { + // NOTE: This is a vestigial violation of the rule that we mustn't + // use plan.UIMode to affect apply-time behavior. + // We ideally ought to just call newState.PruneResourceHusks + // unconditionally here, but we historically didn't and haven't yet + // verified that it'd be safe to do so. + newState.PruneResourceHusks() + } + + if len(plan.TargetAddrs) > 0 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + "Applied changes may be incomplete", + `The plan was created with the -target option in effect, so some changes requested in the configuration may have been ignored and the output values may not be fully updated. Run the following command to verify that no other changes are pending: + tofu plan + +Note that the -target option is not suitable for routine use, and is provided only for exceptional situations such as recovering from errors or mistakes, or when OpenTofu specifically suggests to use it as part of an error message.`, + )) + } + + // FIXME: we cannot check for an empty plan for refresh-only, because root + // outputs are always stored as changes. The final condition of the state + // also depends on some cleanup which happens during the apply walk. It + // would probably make more sense if applying a refresh-only plan were + // simply just returning the planned state and checks, but some extra + // cleanup is going to be needed to make the plan state match what apply + // would do. For now we can copy the checks over which were overwritten + // during the apply walk. + // Despite the intent of UIMode, it must still be used for apply-time + // differences in destroy plans too, so we can make use of that here as + // well. + if plan.UIMode == plans.RefreshOnlyMode { + newState.CheckResults = plan.Checks.DeepCopy() + } + + return newState, diags +} + +func (c *Context) applyGraph(plan *plans.Plan, config *configs.Config, validate bool) (*Graph, walkOperation, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + variables := InputValues{} + for name, dyVal := range plan.VariableValues { + val, err := dyVal.Decode(cty.DynamicPseudoType) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid variable value in plan", + fmt.Sprintf("Invalid value for variable %q recorded in plan file: %s.", name, err), + )) + continue + } + + variables[name] = &InputValue{ + Value: val, + SourceType: ValueFromPlan, + } + } + if diags.HasErrors() { + return nil, walkApply, diags + } + + // The plan.VariableValues field only records variables that were actually + // set by the caller in the PlanOpts, so we may need to provide + // placeholders for any other variables that the user didn't set, in + // which case OpenTofu will once again use the default value from the + // configuration when we visit these variables during the graph walk. + for name := range config.Module.Variables { + if _, ok := variables[name]; ok { + continue + } + variables[name] = &InputValue{ + Value: cty.NilVal, + SourceType: ValueFromPlan, + } + } + + operation := walkApply + if plan.UIMode == plans.DestroyMode { + // FIXME: Due to differences in how objects must be handled in the + // graph and evaluated during a complete destroy, we must continue to + // use plans.DestroyMode to switch on this behavior. If all objects + // which require special destroy handling can be tracked in the plan, + // then this switch will no longer be needed and we can remove the + // walkDestroy operation mode. + // TODO: Audit that and remove walkDestroy as an operation mode. + operation = walkDestroy + } + + graph, moreDiags := (&ApplyGraphBuilder{ + Config: config, + Changes: plan.Changes, + State: plan.PriorState, + RootVariableValues: variables, + Plugins: c.plugins, + Targets: plan.TargetAddrs, + ForceReplace: plan.ForceReplaceAddrs, + Operation: operation, + ExternalReferences: plan.ExternalReferences, + }).Build(addrs.RootModuleInstance) + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + return nil, walkApply, diags + } + + return graph, operation, diags +} + +// ApplyGraphForUI is a last vestage of graphs in the public interface of +// Context (as opposed to graphs as an implementation detail) intended only for +// use by the "tofu graph" command when asked to render an apply-time +// graph. +// +// The result of this is intended only for rendering ot the user as a dot +// graph, and so may change in future in order to make the result more useful +// in that context, even if drifts away from the physical graph that OpenTofu +// Core currently uses as an implementation detail of planning. +func (c *Context) ApplyGraphForUI(plan *plans.Plan, config *configs.Config) (*Graph, tfdiags.Diagnostics) { + // For now though, this really is just the internal graph, confusing + // implementation details and all. + + var diags tfdiags.Diagnostics + + graph, _, moreDiags := c.applyGraph(plan, config, false) + diags = diags.Append(moreDiags) + return graph, diags +} diff --git a/pkg/tofu/context_apply2_test.go b/pkg/tofu/context_apply2_test.go new file mode 100644 index 00000000000..dce583f4589 --- /dev/null +++ b/pkg/tofu/context_apply2_test.go @@ -0,0 +1,2309 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "bytes" + "errors" + "fmt" + "strings" + "sync" + "testing" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/google/go-cmp/cmp" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/checks" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// Test that the PreApply hook is called with the correct deposed key +func TestContext2Apply_createBeforeDestroy_deposedKeyPreApply(t *testing.T) { + m := testModule(t, "apply-cbd-deposed-only") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + + deposedKey := states.NewDeposedKey() + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.bar").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceDeposed( + mustResourceInstanceAddr("aws_instance.bar").Resource, + deposedKey, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectTainted, + AttrsJSON: []byte(`{"id":"foo"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + hook := new(MockHook) + ctx := testContext2(t, &ContextOpts{ + Hooks: []Hook{hook}, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } else { + t.Logf(legacyDiffComparisonString(plan.Changes)) + } + + _, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + // Verify PreApply was called correctly + if !hook.PreApplyCalled { + t.Fatalf("PreApply hook not called") + } + if addr, wantAddr := hook.PreApplyAddr, mustResourceInstanceAddr("aws_instance.bar"); !addr.Equal(wantAddr) { + t.Errorf("expected addr to be %s, but was %s", wantAddr, addr) + } + if gen := hook.PreApplyGen; gen != deposedKey { + t.Errorf("expected gen to be %q, but was %q", deposedKey, gen) + } +} + +func TestContext2Apply_destroyWithDataSourceExpansion(t *testing.T) { + // While managed resources store their destroy-time dependencies, data + // sources do not. This means that if a provider were only included in a + // destroy graph because of data sources, it could have dependencies which + // are not correctly ordered. Here we verify that the provider is not + // included in the destroy operation, and all dependency evaluations + // succeed. + + m := testModuleInline(t, map[string]string{ + "main.tf": ` +module "mod" { + source = "./mod" +} + +provider "other" { + foo = module.mod.data +} + +# this should not require the provider be present during destroy +data "other_data_source" "a" { +} +`, + + "mod/main.tf": ` +data "test_data_source" "a" { + count = 1 +} + +data "test_data_source" "b" { + count = data.test_data_source.a[0].foo == "ok" ? 1 : 0 +} + +output "data" { + value = data.test_data_source.a[0].foo == "ok" ? data.test_data_source.b[0].foo : "nope" +} +`, + }) + + testP := testProvider("test") + otherP := testProvider("other") + + readData := func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + return providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("data_source"), + "foo": cty.StringVal("ok"), + }), + } + } + + testP.ReadDataSourceFn = readData + otherP.ReadDataSourceFn = readData + + ps := map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(testP), + addrs.NewDefaultProvider("other"): testProviderFuncFixed(otherP), + } + + otherP.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { + foo := req.Config.GetAttr("foo") + if foo.IsNull() || foo.AsString() != "ok" { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("incorrect config val: %#v\n", foo)) + } + return resp + } + + ctx := testContext2(t, &ContextOpts{ + Providers: ps, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + _, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + // now destroy the whole thing + ctx = testContext2(t, &ContextOpts{ + Providers: ps, + }) + + plan, diags = ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.DestroyMode, + }) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + otherP.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { + // should not be used to destroy data sources + resp.Diagnostics = resp.Diagnostics.Append(errors.New("provider should not be used")) + return resp + } + + _, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } +} + +func TestContext2Apply_destroyThenUpdate(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_instance" "a" { + value = "udpated" +} +`, + }) + + p := testProvider("test") + p.PlanResourceChangeFn = testDiffFn + + var orderMu sync.Mutex + var order []string + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + id := req.PriorState.GetAttr("id").AsString() + if id == "b" { + // slow down the b destroy, since a should wait for it + time.Sleep(100 * time.Millisecond) + } + + orderMu.Lock() + order = append(order, id) + orderMu.Unlock() + + resp.NewState = req.PlannedState + return resp + } + + addrA := mustResourceInstanceAddr(`test_instance.a`) + addrB := mustResourceInstanceAddr(`test_instance.b`) + + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent(addrA, &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"a","value":"old","type":"test"}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`)) + + // test_instance.b depended on test_instance.a, and therefor should be + // destroyed before any changes to test_instance.a + s.SetResourceInstanceCurrent(addrB, &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"b"}`), + Status: states.ObjectReady, + Dependencies: []addrs.ConfigResource{addrA.ContainingResource().Config()}, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`)) + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + assertNoErrors(t, diags) + + _, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + if order[0] != "b" { + t.Fatalf("expected apply order [b, a], got: %v\n", order) + } +} + +// verify that dependencies are updated in the state during refresh and apply +func TestApply_updateDependencies(t *testing.T) { + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + + fooAddr := mustResourceInstanceAddr("aws_instance.foo") + barAddr := mustResourceInstanceAddr("aws_instance.bar") + bazAddr := mustResourceInstanceAddr("aws_instance.baz") + bamAddr := mustResourceInstanceAddr("aws_instance.bam") + binAddr := mustResourceInstanceAddr("aws_instance.bin") + root.SetResourceInstanceCurrent( + fooAddr.Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo"}`), + Dependencies: []addrs.ConfigResource{ + bazAddr.ContainingResource().Config(), + }, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + binAddr.Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bin","type":"aws_instance","unknown":"ok"}`), + Dependencies: []addrs.ConfigResource{ + bazAddr.ContainingResource().Config(), + }, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + bazAddr.Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"baz"}`), + Dependencies: []addrs.ConfigResource{ + // Existing dependencies should not be removed from orphaned instances + bamAddr.ContainingResource().Config(), + }, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + barAddr.Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar","foo":"foo"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "aws_instance" "bar" { + foo = aws_instance.foo.id +} + +resource "aws_instance" "foo" { +} + +resource "aws_instance" "bin" { +} +`, + }) + + p := testProvider("aws") + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + assertNoErrors(t, diags) + + bar := plan.PriorState.ResourceInstance(barAddr) + if len(bar.Current.Dependencies) == 0 || !bar.Current.Dependencies[0].Equal(fooAddr.ContainingResource().Config()) { + t.Fatalf("bar should depend on foo after refresh, but got %s", bar.Current.Dependencies) + } + + foo := plan.PriorState.ResourceInstance(fooAddr) + if len(foo.Current.Dependencies) == 0 || !foo.Current.Dependencies[0].Equal(bazAddr.ContainingResource().Config()) { + t.Fatalf("foo should depend on baz after refresh because of the update, but got %s", foo.Current.Dependencies) + } + + bin := plan.PriorState.ResourceInstance(binAddr) + if len(bin.Current.Dependencies) != 0 { + t.Fatalf("bin should depend on nothing after refresh because there is no change, but got %s", bin.Current.Dependencies) + } + + baz := plan.PriorState.ResourceInstance(bazAddr) + if len(baz.Current.Dependencies) == 0 || !baz.Current.Dependencies[0].Equal(bamAddr.ContainingResource().Config()) { + t.Fatalf("baz should depend on bam after refresh, but got %s", baz.Current.Dependencies) + } + + state, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + bar = state.ResourceInstance(barAddr) + if len(bar.Current.Dependencies) == 0 || !bar.Current.Dependencies[0].Equal(fooAddr.ContainingResource().Config()) { + t.Fatalf("bar should still depend on foo after apply, but got %s", bar.Current.Dependencies) + } + + foo = state.ResourceInstance(fooAddr) + if len(foo.Current.Dependencies) != 0 { + t.Fatalf("foo should have no deps after apply, but got %s", foo.Current.Dependencies) + } + +} + +func TestContext2Apply_additionalSensitiveFromState(t *testing.T) { + // Ensure we're not trying to double-mark values decoded from state + m := testModuleInline(t, map[string]string{ + "main.tf": ` +variable "secret" { + sensitive = true + default = ["secret"] +} + +resource "test_resource" "a" { + sensitive_attr = var.secret +} + +resource "test_resource" "b" { + value = test_resource.a.id +} +`, + }) + + p := new(MockProvider) + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_resource": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "value": { + Type: cty.String, + Optional: true, + }, + "sensitive_attr": { + Type: cty.List(cty.String), + Optional: true, + Sensitive: true, + }, + }, + }, + }, + }) + + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + mustResourceInstanceAddr(`test_resource.a`), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"a","sensitive_attr":["secret"]}`), + AttrSensitivePaths: []cty.PathValueMarks{ + { + Path: cty.GetAttrPath("sensitive_attr"), + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) + assertNoErrors(t, diags) + + _, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } +} + +func TestContext2Apply_sensitiveOutputPassthrough(t *testing.T) { + // Ensure we're not trying to double-mark values decoded from state + m := testModuleInline(t, map[string]string{ + "main.tf": ` +module "mod" { + source = "./mod" +} + +resource "test_object" "a" { + test_string = module.mod.out +} +`, + + "mod/main.tf": ` +variable "in" { + sensitive = true + default = "foo" +} +output "out" { + value = var.in +} +`, + }) + + p := simpleMockProvider() + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } + + obj := state.ResourceInstance(mustResourceInstanceAddr("test_object.a")) + if len(obj.Current.AttrSensitivePaths) != 1 { + t.Fatalf("Expected 1 sensitive mark for test_object.a, got %#v\n", obj.Current.AttrSensitivePaths) + } + + plan, diags = ctx.Plan(m, state, DefaultPlanOpts) + assertNoErrors(t, diags) + + // make sure the same marks are compared in the next plan as well + for _, c := range plan.Changes.Resources { + if c.Action != plans.NoOp { + t.Errorf("Unexpcetd %s change for %s", c.Action, c.Addr) + } + } +} + +func TestContext2Apply_ignoreImpureFunctionChanges(t *testing.T) { + // The impure function call should not cause a planned change with + // ignore_changes + m := testModuleInline(t, map[string]string{ + "main.tf": ` +variable "pw" { + sensitive = true + default = "foo" +} + +resource "test_object" "x" { + test_map = { + string = "X${bcrypt(var.pw)}" + } + lifecycle { + ignore_changes = [ test_map["string"] ] + } +} + +resource "test_object" "y" { + test_map = { + string = "X${bcrypt(var.pw)}" + } + lifecycle { + ignore_changes = [ test_map ] + } +} + +`, + }) + + p := simpleMockProvider() + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + assertNoErrors(t, diags) + + // FINAL PLAN: + plan, diags = ctx.Plan(m, state, SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) + assertNoErrors(t, diags) + + // make sure the same marks are compared in the next plan as well + for _, c := range plan.Changes.Resources { + if c.Action != plans.NoOp { + t.Logf("marks before: %#v", c.BeforeValMarks) + t.Logf("marks after: %#v", c.AfterValMarks) + t.Errorf("Unexpcetd %s change for %s", c.Action, c.Addr) + } + } +} + +func TestContext2Apply_destroyWithDeposed(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_object" "x" { + test_string = "ok" + lifecycle { + create_before_destroy = true + } +}`, + }) + + p := simpleMockProvider() + + deposedKey := states.NewDeposedKey() + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceDeposed( + mustResourceInstanceAddr("test_object.x").Resource, + deposedKey, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectTainted, + AttrsJSON: []byte(`{"test_string":"deposed"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + }) + if diags.HasErrors() { + t.Fatalf("plan: %s", diags.Err()) + } + + _, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("apply: %s", diags.Err()) + } + +} + +func TestContext2Apply_nullableVariables(t *testing.T) { + m := testModule(t, "apply-nullable-variables") + state := states.NewState() + ctx := testContext2(t, &ContextOpts{}) + plan, diags := ctx.Plan(m, state, &PlanOpts{}) + if diags.HasErrors() { + t.Fatalf("plan: %s", diags.Err()) + } + state, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("apply: %s", diags.Err()) + } + + outputs := state.Module(addrs.RootModuleInstance).OutputValues + // we check for null outputs be seeing that they don't exists + if _, ok := outputs["nullable_null_default"]; ok { + t.Error("nullable_null_default: expected no output value") + } + if _, ok := outputs["nullable_non_null_default"]; ok { + t.Error("nullable_non_null_default: expected no output value") + } + if _, ok := outputs["nullable_no_default"]; ok { + t.Error("nullable_no_default: expected no output value") + } + + if v := outputs["non_nullable_default"].Value; v.AsString() != "ok" { + t.Fatalf("incorrect 'non_nullable_default' output value: %#v\n", v) + } + if v := outputs["non_nullable_no_default"].Value; v.AsString() != "ok" { + t.Fatalf("incorrect 'non_nullable_no_default' output value: %#v\n", v) + } +} + +func TestContext2Apply_targetedDestroyWithMoved(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +module "modb" { + source = "./mod" + for_each = toset(["a", "b"]) +} +`, + "./mod/main.tf": ` +resource "test_object" "a" { +} + +module "sub" { + for_each = toset(["a", "b"]) + source = "./sub" +} + +moved { + from = module.old + to = module.sub +} +`, + "./mod/sub/main.tf": ` +resource "test_object" "s" { +} +`}) + + p := simpleMockProvider() + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + assertNoErrors(t, diags) + + // destroy only a single instance not included in the moved statements + _, diags = ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + Targets: []addrs.Targetable{mustResourceInstanceAddr(`module.modb["a"].test_object.a`)}, + }) + assertNoErrors(t, diags) +} + +func TestContext2Apply_graphError(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_object" "a" { + test_string = "ok" +} + +resource "test_object" "b" { + test_string = test_object.a.test_string +} +`, + }) + + p := simpleMockProvider() + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.a").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectTainted, + AttrsJSON: []byte(`{"test_string":"ok"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.b").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectTainted, + AttrsJSON: []byte(`{"test_string":"ok"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + }) + if diags.HasErrors() { + t.Fatalf("plan: %s", diags.Err()) + } + + // We're going to corrupt the stored state so that the dependencies will + // cause a cycle when building the apply graph. + testObjA := plan.PriorState.Modules[""].Resources["test_object.a"].Instances[addrs.NoKey].Current + testObjA.Dependencies = append(testObjA.Dependencies, mustResourceInstanceAddr("test_object.b").ContainingResource().Config()) + + _, diags = ctx.Apply(plan, m) + if !diags.HasErrors() { + t.Fatal("expected cycle error from apply") + } +} + +func TestContext2Apply_resourcePostcondition(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +variable "boop" { + type = string +} + +resource "test_resource" "a" { + value = var.boop +} + +resource "test_resource" "b" { + value = test_resource.a.output + lifecycle { + postcondition { + condition = self.output != "" + error_message = "Output must not be blank." + } + } +} + +resource "test_resource" "c" { + value = test_resource.b.output +} +`, + }) + + p := testProvider("test") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_resource": { + Attributes: map[string]*configschema.Attribute{ + "value": { + Type: cty.String, + Required: true, + }, + "output": { + Type: cty.String, + Computed: true, + }, + }, + }, + }, + }) + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + m := req.ProposedNewState.AsValueMap() + m["output"] = cty.UnknownVal(cty.String) + + resp.PlannedState = cty.ObjectVal(m) + resp.LegacyTypeSystem = true + return resp + } + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + t.Run("condition pass", func(t *testing.T) { + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "boop": &InputValue{ + Value: cty.StringVal("boop"), + SourceType: ValueFromCLIArg, + }, + }, + }) + assertNoErrors(t, diags) + if len(plan.Changes.Resources) != 3 { + t.Fatalf("unexpected plan changes: %#v", plan.Changes) + } + + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + m := req.PlannedState.AsValueMap() + m["output"] = cty.StringVal(fmt.Sprintf("new-%s", m["value"].AsString())) + + resp.NewState = cty.ObjectVal(m) + return resp + } + state, diags := ctx.Apply(plan, m) + assertNoErrors(t, diags) + + wantResourceAttrs := map[string]struct{ value, output string }{ + "a": {"boop", "new-boop"}, + "b": {"new-boop", "new-new-boop"}, + "c": {"new-new-boop", "new-new-new-boop"}, + } + for name, attrs := range wantResourceAttrs { + addr := mustResourceInstanceAddr(fmt.Sprintf("test_resource.%s", name)) + r := state.ResourceInstance(addr) + rd, err := r.Current.Decode(cty.Object(map[string]cty.Type{ + "value": cty.String, + "output": cty.String, + })) + if err != nil { + t.Fatalf("error decoding test_resource.a: %s", err) + } + want := cty.ObjectVal(map[string]cty.Value{ + "value": cty.StringVal(attrs.value), + "output": cty.StringVal(attrs.output), + }) + if !cmp.Equal(want, rd.Value, valueComparer) { + t.Errorf("wrong attrs for %s\n%s", addr, cmp.Diff(want, rd.Value, valueComparer)) + } + } + }) + t.Run("condition fail", func(t *testing.T) { + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "boop": &InputValue{ + Value: cty.StringVal("boop"), + SourceType: ValueFromCLIArg, + }, + }, + }) + assertNoErrors(t, diags) + if len(plan.Changes.Resources) != 3 { + t.Fatalf("unexpected plan changes: %#v", plan.Changes) + } + + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + m := req.PlannedState.AsValueMap() + + // For the resource with a constraint, fudge the output to make the + // condition fail. + if value := m["value"].AsString(); value == "new-boop" { + m["output"] = cty.StringVal("") + } else { + m["output"] = cty.StringVal(fmt.Sprintf("new-%s", value)) + } + + resp.NewState = cty.ObjectVal(m) + return resp + } + state, diags := ctx.Apply(plan, m) + if !diags.HasErrors() { + t.Fatal("succeeded; want errors") + } + if got, want := diags.Err().Error(), "Resource postcondition failed: Output must not be blank."; got != want { + t.Fatalf("wrong error:\ngot: %s\nwant: %q", got, want) + } + + // Resources a and b should still be recorded in state + wantResourceAttrs := map[string]struct{ value, output string }{ + "a": {"boop", "new-boop"}, + "b": {"new-boop", ""}, + } + for name, attrs := range wantResourceAttrs { + addr := mustResourceInstanceAddr(fmt.Sprintf("test_resource.%s", name)) + r := state.ResourceInstance(addr) + rd, err := r.Current.Decode(cty.Object(map[string]cty.Type{ + "value": cty.String, + "output": cty.String, + })) + if err != nil { + t.Fatalf("error decoding test_resource.a: %s", err) + } + want := cty.ObjectVal(map[string]cty.Value{ + "value": cty.StringVal(attrs.value), + "output": cty.StringVal(attrs.output), + }) + if !cmp.Equal(want, rd.Value, valueComparer) { + t.Errorf("wrong attrs for %s\n%s", addr, cmp.Diff(want, rd.Value, valueComparer)) + } + } + + // Resource c should not be in state + if state.ResourceInstance(mustResourceInstanceAddr("test_resource.c")) != nil { + t.Error("test_resource.c should not exist in state, but is") + } + }) +} + +func TestContext2Apply_outputValuePrecondition(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` + variable "input" { + type = string + } + + module "child" { + source = "./child" + + input = var.input + } + + output "result" { + value = module.child.result + + precondition { + condition = var.input != "" + error_message = "Input must not be empty." + } + } + `, + "child/main.tf": ` + variable "input" { + type = string + } + + output "result" { + value = var.input + + precondition { + condition = var.input != "" + error_message = "Input must not be empty." + } + } + `, + }) + + checkableObjects := []addrs.Checkable{ + addrs.OutputValue{Name: "result"}.Absolute(addrs.RootModuleInstance), + addrs.OutputValue{Name: "result"}.Absolute(addrs.RootModuleInstance.Child("child", addrs.NoKey)), + } + + t.Run("pass", func(t *testing.T) { + ctx := testContext2(t, &ContextOpts{}) + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "input": &InputValue{ + Value: cty.StringVal("beep"), + SourceType: ValueFromCLIArg, + }, + }, + }) + assertNoDiagnostics(t, diags) + + for _, addr := range checkableObjects { + result := plan.Checks.GetObjectResult(addr) + if result == nil { + t.Fatalf("no check result for %s in the plan", addr) + } + if got, want := result.Status, checks.StatusPass; got != want { + t.Fatalf("wrong check status for %s during planning\ngot: %s\nwant: %s", addr, got, want) + } + } + + state, diags := ctx.Apply(plan, m) + assertNoDiagnostics(t, diags) + for _, addr := range checkableObjects { + result := state.CheckResults.GetObjectResult(addr) + if result == nil { + t.Fatalf("no check result for %s in the final state", addr) + } + if got, want := result.Status, checks.StatusPass; got != want { + t.Errorf("wrong check status for %s after apply\ngot: %s\nwant: %s", addr, got, want) + } + } + }) + + t.Run("fail", func(t *testing.T) { + // NOTE: This test actually catches a failure during planning and so + // cannot proceed to apply, so it's really more of a plan test + // than an apply test but better to keep all of these + // thematically-related test cases together. + ctx := testContext2(t, &ContextOpts{}) + _, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "input": &InputValue{ + Value: cty.StringVal(""), + SourceType: ValueFromCLIArg, + }, + }, + }) + if !diags.HasErrors() { + t.Fatalf("succeeded; want error") + } + + const wantSummary = "Module output value precondition failed" + found := false + for _, diag := range diags { + if diag.Severity() == tfdiags.Error && diag.Description().Summary == wantSummary { + found = true + break + } + } + + if !found { + t.Fatalf("missing expected error\nwant summary: %s\ngot: %s", wantSummary, diags.Err().Error()) + } + }) +} + +func TestContext2Apply_resourceConditionApplyTimeFail(t *testing.T) { + // This tests the less common situation where a condition fails due to + // a change in a resource other than the one the condition is attached to, + // and the condition result is unknown during planning. + // + // This edge case is a tricky one because it relies on OpenTofu still + // visiting test_resource.b (in the configuration below) to evaluate + // its conditions even though there aren't any changes directly planned + // for it, so that we can consider whether changes to test_resource.a + // have changed the outcome. + + m := testModuleInline(t, map[string]string{ + "main.tf": ` + variable "input" { + type = string + } + + resource "test_resource" "a" { + value = var.input + } + + resource "test_resource" "b" { + value = "beep" + + lifecycle { + postcondition { + condition = test_resource.a.output == self.output + error_message = "Outputs must match." + } + } + } + `, + }) + + p := testProvider("test") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_resource": { + Attributes: map[string]*configschema.Attribute{ + "value": { + Type: cty.String, + Required: true, + }, + "output": { + Type: cty.String, + Computed: true, + }, + }, + }, + }, + }) + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + // Whenever "value" changes, "output" follows it during the apply step, + // but is initially unknown during the plan step. + + m := req.ProposedNewState.AsValueMap() + priorVal := cty.NullVal(cty.String) + if !req.PriorState.IsNull() { + priorVal = req.PriorState.GetAttr("value") + } + if m["output"].IsNull() || !priorVal.RawEquals(m["value"]) { + m["output"] = cty.UnknownVal(cty.String) + } + + resp.PlannedState = cty.ObjectVal(m) + resp.LegacyTypeSystem = true + return resp + } + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + m := req.PlannedState.AsValueMap() + m["output"] = m["value"] + resp.NewState = cty.ObjectVal(m) + return resp + } + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + instA := mustResourceInstanceAddr("test_resource.a") + instB := mustResourceInstanceAddr("test_resource.b") + + // Preparation: an initial plan and apply with a correct input variable + // should succeed and give us a valid and complete state to use for the + // subsequent plan and apply that we'll expect to fail. + var prevRunState *states.State + { + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "input": &InputValue{ + Value: cty.StringVal("beep"), + SourceType: ValueFromCLIArg, + }, + }, + }) + assertNoErrors(t, diags) + planA := plan.Changes.ResourceInstance(instA) + if planA == nil || planA.Action != plans.Create { + t.Fatalf("incorrect initial plan for instance A\nwant a 'create' change\ngot: %s", spew.Sdump(planA)) + } + planB := plan.Changes.ResourceInstance(instB) + if planB == nil || planB.Action != plans.Create { + t.Fatalf("incorrect initial plan for instance B\nwant a 'create' change\ngot: %s", spew.Sdump(planB)) + } + + state, diags := ctx.Apply(plan, m) + assertNoErrors(t, diags) + + stateA := state.ResourceInstance(instA) + if stateA == nil || stateA.Current == nil || !bytes.Contains(stateA.Current.AttrsJSON, []byte(`"beep"`)) { + t.Fatalf("incorrect initial state for instance A\ngot: %s", spew.Sdump(stateA)) + } + stateB := state.ResourceInstance(instB) + if stateB == nil || stateB.Current == nil || !bytes.Contains(stateB.Current.AttrsJSON, []byte(`"beep"`)) { + t.Fatalf("incorrect initial state for instance B\ngot: %s", spew.Sdump(stateB)) + } + prevRunState = state + } + + // Now we'll run another plan and apply with a different value for + // var.input that should cause the test_resource.b condition to be unknown + // during planning and then fail during apply. + { + plan, diags := ctx.Plan(m, prevRunState, &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "input": &InputValue{ + Value: cty.StringVal("boop"), // NOTE: This has changed + SourceType: ValueFromCLIArg, + }, + }, + }) + assertNoErrors(t, diags) + planA := plan.Changes.ResourceInstance(instA) + if planA == nil || planA.Action != plans.Update { + t.Fatalf("incorrect initial plan for instance A\nwant an 'update' change\ngot: %s", spew.Sdump(planA)) + } + planB := plan.Changes.ResourceInstance(instB) + if planB == nil || planB.Action != plans.NoOp { + t.Fatalf("incorrect initial plan for instance B\nwant a 'no-op' change\ngot: %s", spew.Sdump(planB)) + } + + _, diags = ctx.Apply(plan, m) + if !diags.HasErrors() { + t.Fatal("final apply succeeded, but should've failed with a postcondition error") + } + if len(diags) != 1 { + t.Fatalf("expected exactly one diagnostic, but got: %s", diags.Err().Error()) + } + if got, want := diags[0].Description().Summary, "Resource postcondition failed"; got != want { + t.Fatalf("wrong diagnostic summary\ngot: %s\nwant: %s", got, want) + } + } +} + +// pass an input through some expanded values, and back to a provider to make +// sure we can fully evaluate a provider configuration during a destroy plan. +func TestContext2Apply_destroyWithConfiguredProvider(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +variable "in" { + type = map(string) + default = { + "a" = "first" + "b" = "second" + } +} + +module "mod" { + source = "./mod" + for_each = var.in + in = each.value +} + +locals { + config = [for each in module.mod : each.out] +} + +provider "other" { + output = [for each in module.mod : each.out] + local = local.config + var = var.in +} + +resource "other_object" "other" { +} +`, + "./mod/main.tf": ` +variable "in" { + type = string +} + +data "test_object" "d" { + test_string = var.in +} + +resource "test_object" "a" { + test_string = var.in +} + +output "out" { + value = data.test_object.d.output +} +`}) + + testProvider := &MockProvider{ + GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{Block: simpleTestSchema()}, + ResourceTypes: map[string]providers.Schema{ + "test_object": providers.Schema{Block: simpleTestSchema()}, + }, + DataSources: map[string]providers.Schema{ + "test_object": providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "test_string": { + Type: cty.String, + Optional: true, + }, + "output": { + Type: cty.String, + Computed: true, + }, + }, + }, + }, + }, + }, + } + + testProvider.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { + cfg := req.Config.AsValueMap() + s := cfg["test_string"].AsString() + if !strings.Contains("firstsecond", s) { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("expected 'first' or 'second', got %s", s)) + return resp + } + + cfg["output"] = cty.StringVal(s + "-ok") + resp.State = cty.ObjectVal(cfg) + return resp + } + + otherProvider := &MockProvider{ + GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "output": { + Type: cty.List(cty.String), + Optional: true, + }, + "local": { + Type: cty.List(cty.String), + Optional: true, + }, + "var": { + Type: cty.Map(cty.String), + Optional: true, + }, + }, + }, + }, + ResourceTypes: map[string]providers.Schema{ + "other_object": providers.Schema{Block: simpleTestSchema()}, + }, + }, + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(testProvider), + addrs.NewDefaultProvider("other"): testProviderFuncFixed(otherProvider), + }, + }) + + opts := SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables)) + plan, diags := ctx.Plan(m, states.NewState(), opts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + assertNoErrors(t, diags) + + // Resource changes which have dependencies across providers which + // themselves depend on resources can result in cycles. + // Because other_object transitively depends on the module resources + // through its provider, we trigger changes on both sides of this boundary + // to ensure we can create a valid plan. + // + // Taint the object to make sure a replacement works in the plan. + otherObjAddr := mustResourceInstanceAddr("other_object.other") + otherObj := state.ResourceInstance(otherObjAddr) + otherObj.Current.Status = states.ObjectTainted + // Force a change which needs to be reverted. + testObjAddr := mustResourceInstanceAddr(`module.mod["a"].test_object.a`) + testObjA := state.ResourceInstance(testObjAddr) + testObjA.Current.AttrsJSON = []byte(`{"test_bool":null,"test_list":null,"test_map":null,"test_number":null,"test_string":"changed"}`) + + _, diags = ctx.Plan(m, state, opts) + assertNoErrors(t, diags) + return + // TODO: unreachable code + otherProvider.ConfigureProviderCalled = false + otherProvider.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { + // check that our config is complete, even during a destroy plan + expected := cty.ObjectVal(map[string]cty.Value{ + "local": cty.ListVal([]cty.Value{cty.StringVal("first-ok"), cty.StringVal("second-ok")}), + "output": cty.ListVal([]cty.Value{cty.StringVal("first-ok"), cty.StringVal("second-ok")}), + "var": cty.MapVal(map[string]cty.Value{ + "a": cty.StringVal("first"), + "b": cty.StringVal("second"), + }), + }) + + if !req.Config.RawEquals(expected) { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf( + `incorrect provider config: +expected: %#v +got: %#v`, + expected, req.Config)) + } + + return resp + } + + opts.Mode = plans.DestroyMode + // skip refresh so that we don't configure the provider before the destroy plan + opts.SkipRefresh = true + + // destroy only a single instance not included in the moved statements + _, diags = ctx.Plan(m, state, opts) + assertNoErrors(t, diags) + + if !otherProvider.ConfigureProviderCalled { + t.Fatal("failed to configure provider during destroy plan") + } +} + +// check that a provider can verify a planned destroy +func TestContext2Apply_plannedDestroy(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_object" "x" { + test_string = "ok" +}`, + }) + + p := simpleMockProvider() + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + if !req.ProposedNewState.IsNull() { + // we should only be destroying in this test + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unexpected plan with %#v", req.ProposedNewState)) + return resp + } + + resp.PlannedState = req.ProposedNewState + // we're going to verify the destroy plan by inserting private data required for destroy + resp.PlannedPrivate = append(resp.PlannedPrivate, []byte("planned")...) + return resp + } + + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + // if the value is nil, we return that directly to correspond to a delete + if !req.PlannedState.IsNull() { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unexpected apply with %#v", req.PlannedState)) + return resp + } + + resp.NewState = req.PlannedState + + // make sure we get our private data from the plan + private := string(req.PlannedPrivate) + if private != "planned" { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("missing private data from plan, got %q", private)) + } + return resp + } + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.x").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"test_string":"ok"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + // we don't want to refresh, because that actually runs a normal plan + SkipRefresh: true, + }) + if diags.HasErrors() { + t.Fatalf("plan: %s", diags.Err()) + } + + _, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("apply: %s", diags.Err()) + } +} + +func TestContext2Apply_missingOrphanedResource(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +# changed resource address to create a new object +resource "test_object" "y" { + test_string = "y" +} +`, + }) + + p := simpleMockProvider() + + // report the prior value is missing + p.ReadResourceFn = func(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { + resp.NewState = cty.NullVal(req.PriorState.Type()) + return resp + } + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.x").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"test_string":"x"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + opts := SimplePlanOpts(plans.NormalMode, nil) + plan, diags := ctx.Plan(m, state, opts) + assertNoErrors(t, diags) + + _, diags = ctx.Apply(plan, m) + assertNoErrors(t, diags) +} + +// Outputs should not cause evaluation errors during destroy +// Check eval from both root level outputs and module outputs, which are +// handled differently during apply. +func TestContext2Apply_outputsNotToEvaluate(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +module "mod" { + source = "./mod" + cond = false +} + +output "from_resource" { + value = module.mod.from_resource +} + +output "from_data" { + value = module.mod.from_data +} +`, + + "./mod/main.tf": ` +variable "cond" { + type = bool +} + +module "mod" { + source = "../mod2/" + cond = var.cond +} + +output "from_resource" { + value = module.mod.resource +} + +output "from_data" { + value = module.mod.data +} +`, + + "./mod2/main.tf": ` +variable "cond" { + type = bool +} + +resource "test_object" "x" { + count = var.cond ? 0:1 +} + +data "test_object" "d" { + count = var.cond ? 0:1 +} + +output "resource" { + value = var.cond ? null : test_object.x.*.test_string[0] +} + +output "data" { + value = one(data.test_object.d[*].test_string) +} +`}) + + p := simpleMockProvider() + p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { + resp.State = req.Config + return resp + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + // apply the state + opts := SimplePlanOpts(plans.NormalMode, nil) + plan, diags := ctx.Plan(m, states.NewState(), opts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + assertNoErrors(t, diags) + + // and destroy + opts = SimplePlanOpts(plans.DestroyMode, nil) + plan, diags = ctx.Plan(m, state, opts) + assertNoErrors(t, diags) + + state, diags = ctx.Apply(plan, m) + assertNoErrors(t, diags) + + // and destroy again with no state + if !state.Empty() { + t.Fatal("expected empty state, got", state) + } + + opts = SimplePlanOpts(plans.DestroyMode, nil) + plan, diags = ctx.Plan(m, state, opts) + assertNoErrors(t, diags) + + _, diags = ctx.Apply(plan, m) + assertNoErrors(t, diags) +} + +// don't evaluate conditions on outputs when destroying +func TestContext2Apply_noOutputChecksOnDestroy(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +module "mod" { + source = "./mod" +} + +output "from_resource" { + value = module.mod.from_resource +} +`, + + "./mod/main.tf": ` +resource "test_object" "x" { + test_string = "wrong val" +} + +output "from_resource" { + value = test_object.x.test_string + precondition { + condition = test_object.x.test_string == "ok" + error_message = "resource error" + } +} +`}) + + p := simpleMockProvider() + + state := states.NewState() + mod := state.EnsureModule(addrs.RootModuleInstance.Child("mod", addrs.NoKey)) + mod.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.x").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"test_string":"wrong_val"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + opts := SimplePlanOpts(plans.DestroyMode, nil) + plan, diags := ctx.Plan(m, state, opts) + assertNoErrors(t, diags) + + _, diags = ctx.Apply(plan, m) + assertNoErrors(t, diags) +} + +// -refresh-only should update checks +func TestContext2Apply_refreshApplyUpdatesChecks(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_object" "x" { + test_string = "ok" + lifecycle { + postcondition { + condition = self.test_string == "ok" + error_message = "wrong val" + } + } +} + +output "from_resource" { + value = test_object.x.test_string + precondition { + condition = test_object.x.test_string == "ok" + error_message = "wrong val" + } +} +`}) + + p := simpleMockProvider() + p.ReadResourceResponse = &providers.ReadResourceResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "test_string": cty.StringVal("ok"), + }), + } + + state := states.NewState() + mod := state.EnsureModule(addrs.RootModuleInstance) + mod.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.x").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"test_string":"wrong val"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + mod.SetOutputValue("from_resource", cty.StringVal("wrong val"), false) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + opts := SimplePlanOpts(plans.RefreshOnlyMode, nil) + plan, diags := ctx.Plan(m, state, opts) + assertNoErrors(t, diags) + + state, diags = ctx.Apply(plan, m) + assertNoErrors(t, diags) + + resCheck := state.CheckResults.GetObjectResult(mustResourceInstanceAddr("test_object.x")) + if resCheck.Status != checks.StatusPass { + t.Fatalf("unexpected check %s: %s\n", resCheck.Status, resCheck.FailureMessages) + } + + outAddr := addrs.AbsOutputValue{ + Module: addrs.RootModuleInstance, + OutputValue: addrs.OutputValue{ + Name: "from_resource", + }, + } + outCheck := state.CheckResults.GetObjectResult(outAddr) + if outCheck.Status != checks.StatusPass { + t.Fatalf("unexpected check %s: %s\n", outCheck.Status, outCheck.FailureMessages) + } +} + +// NoOp changes may have conditions to evaluate, but should not re-plan and +// apply the entire resource. +func TestContext2Apply_noRePlanNoOp(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_object" "x" { +} + +resource "test_object" "y" { + # test_object.w is being re-created, so this precondition must be evaluated + # during apply, however this resource should otherwise be a NoOp. + lifecycle { + precondition { + condition = test_object.x.test_string == null + error_message = "test_object.x.test_string should be null" + } + } +} +`}) + + p := simpleMockProvider() + // make sure we can compute the attr + testString := p.GetProviderSchemaResponse.ResourceTypes["test_object"].Block.Attributes["test_string"] + testString.Computed = true + testString.Optional = false + + yAddr := mustResourceInstanceAddr("test_object.y") + + state := states.NewState() + mod := state.RootModule() + mod.SetResourceInstanceCurrent( + yAddr.Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"test_string":"y"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + opts := SimplePlanOpts(plans.NormalMode, nil) + plan, diags := ctx.Plan(m, state, opts) + assertNoErrors(t, diags) + + for _, c := range plan.Changes.Resources { + if c.Addr.Equal(yAddr) && c.Action != plans.NoOp { + t.Fatalf("unexpected %s change for test_object.y", c.Action) + } + } + + // test_object.y is a NoOp change from the plan, but is included in the + // graph due to the conditions which must be evaluated. This however should + // not cause the resource to be re-planned. + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + testString := req.ProposedNewState.GetAttr("test_string") + if !testString.IsNull() && testString.AsString() == "y" { + resp.Diagnostics = resp.Diagnostics.Append(errors.New("Unexpected apply-time plan for test_object.y. Original plan was a NoOp")) + } + resp.PlannedState = req.ProposedNewState + return resp + } + + _, diags = ctx.Apply(plan, m) + assertNoErrors(t, diags) +} + +// ensure all references from preconditions are tracked through plan and apply +func TestContext2Apply_preconditionErrorMessageRef(t *testing.T) { + p := testProvider("test") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + m := testModuleInline(t, map[string]string{ + "main.tf": ` +module "nested" { + source = "./mod" +} + +output "nested_a" { + value = module.nested.a +} +`, + + "mod/main.tf": ` +variable "boop" { + default = "boop" +} + +variable "msg" { + default = "Incorrect boop." +} + +output "a" { + value = "x" + + precondition { + condition = var.boop == "boop" + error_message = var.msg + } +} +`, + }) + + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + }) + assertNoErrors(t, diags) + _, diags = ctx.Apply(plan, m) + assertNoErrors(t, diags) +} + +func TestContext2Apply_destroyNullModuleOutput(t *testing.T) { + p := testProvider("test") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + m := testModuleInline(t, map[string]string{ + "main.tf": ` +module "null_module" { + source = "./mod" +} + +locals { + module_output = module.null_module.null_module_test +} + +output "test_root" { + value = module.null_module.test_output +} + +output "root_module" { + value = local.module_output #fails +} +`, + + "mod/main.tf": ` +output "test_output" { + value = "test" +} + +output "null_module_test" { + value = null +} +`, + }) + + // verify plan and apply + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + }) + assertNoErrors(t, diags) + state, diags := ctx.Apply(plan, m) + assertNoErrors(t, diags) + + // now destroy + plan, diags = ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + }) + assertNoErrors(t, diags) + _, diags = ctx.Apply(plan, m) + assertNoErrors(t, diags) +} + +func TestContext2Apply_moduleOutputWithSensitiveAttrs(t *testing.T) { + // Ensure that nested sensitive marks are stored when accessing non-root + // module outputs, and that they do not cause the entire output value to + // become sensitive. + m := testModuleInline(t, map[string]string{ + "main.tf": ` +module "mod" { + source = "./mod" +} + +resource "test_resource" "b" { + // if the module output were wholly sensitive it would not be valid to use in + // for_each + for_each = module.mod.resources + value = each.value.output +} + +output "root_output" { + // The root output cannot contain any sensitive marks at all. + // Applying nonsensitive would fail here if the nested sensitive mark were + // not maintained through the output. + value = [ for k, v in module.mod.resources : nonsensitive(v.output) ] +} +`, + "./mod/main.tf": ` +resource "test_resource" "a" { + for_each = {"key": "value"} + value = each.key +} + +output "resources" { + value = test_resource.a +} +`, + }) + + p := testProvider("test") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_resource": { + Attributes: map[string]*configschema.Attribute{ + "value": { + Type: cty.String, + Required: true, + }, + "output": { + Type: cty.String, + Sensitive: true, + Computed: true, + }, + }, + }, + }, + }) + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + }) + assertNoErrors(t, diags) + _, diags = ctx.Apply(plan, m) + assertNoErrors(t, diags) +} + +func TestContext2Apply_timestamps(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_resource" "a" { + id = "timestamp" + value = timestamp() +} + +resource "test_resource" "b" { + id = "plantimestamp" + value = plantimestamp() +} +`, + }) + + var plantime time.Time + + p := testProvider("test") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_resource": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Required: true, + }, + "value": { + Type: cty.String, + Required: true, + }, + }, + }, + }, + }) + p.PlanResourceChangeFn = func(request providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + values := request.ProposedNewState.AsValueMap() + if id := values["id"]; id.AsString() == "plantimestamp" { + var err error + plantime, err = time.Parse(time.RFC3339, values["value"].AsString()) + if err != nil { + t.Errorf("couldn't parse plan time: %s", err) + } + } + + return providers.PlanResourceChangeResponse{ + PlannedState: request.ProposedNewState, + } + } + p.ApplyResourceChangeFn = func(request providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + values := request.PlannedState.AsValueMap() + if id := values["id"]; id.AsString() == "timestamp" { + applytime, err := time.Parse(time.RFC3339, values["value"].AsString()) + if err != nil { + t.Errorf("couldn't parse apply time: %s", err) + } + + if applytime.Before(plantime) { + t.Errorf("applytime (%s) should be after plantime (%s)", applytime.Format(time.RFC3339), plantime.Format(time.RFC3339)) + } + } else if id.AsString() == "plantimestamp" { + otherplantime, err := time.Parse(time.RFC3339, values["value"].AsString()) + if err != nil { + t.Errorf("couldn't parse plan time: %s", err) + } + + if !plantime.Equal(otherplantime) { + t.Errorf("plantime changed from (%s) to (%s) during apply", plantime.Format(time.RFC3339), otherplantime.Format(time.RFC3339)) + } + } + + return providers.ApplyResourceChangeResponse{ + NewState: request.PlannedState, + } + } + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + }) + assertNoErrors(t, diags) + + _, diags = ctx.Apply(plan, m) + assertNoErrors(t, diags) +} + +func TestContext2Apply_destroyUnusedModuleProvider(t *testing.T) { + // an unsued provider within a module should not be called during destroy + unusedProvider := testProvider("unused") + testProvider := testProvider("test") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(testProvider), + addrs.NewDefaultProvider("unused"): testProviderFuncFixed(unusedProvider), + }, + }) + + unusedProvider.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { + resp.Diagnostics = resp.Diagnostics.Append(errors.New("configuration failed")) + return resp + } + + m := testModuleInline(t, map[string]string{ + "main.tf": ` +module "mod" { + source = "./mod" +} + +resource "test_resource" "test" { +} +`, + + "mod/main.tf": ` +provider "unused" { +} + +resource "unused_resource" "test" { +} +`, + }) + + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.DestroyMode, + }) + assertNoErrors(t, diags) + _, diags = ctx.Apply(plan, m) + assertNoErrors(t, diags) +} + +func TestContext2Apply_import(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_resource" "a" { + id = "importable" +} + +import { + to = test_resource.a + id = "importable" +} +`, + }) + + p := testProvider("test") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_resource": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Required: true, + }, + }, + }, + }, + }) + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + p.ImportResourceStateFn = func(req providers.ImportResourceStateRequest) providers.ImportResourceStateResponse { + return providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "test_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("importable"), + }), + }, + }, + } + } + hook := new(MockHook) + ctx := testContext2(t, &ContextOpts{ + Hooks: []Hook{hook}, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + }) + assertNoErrors(t, diags) + + _, diags = ctx.Apply(plan, m) + assertNoErrors(t, diags) + + if !hook.PreApplyImportCalled { + t.Fatalf("PreApplyImport hook not called") + } + if addr, wantAddr := hook.PreApplyImportAddr, mustResourceInstanceAddr("test_resource.a"); !addr.Equal(wantAddr) { + t.Errorf("expected addr to be %s, but was %s", wantAddr, addr) + } + + if !hook.PostApplyImportCalled { + t.Fatalf("PostApplyImport hook not called") + } + if addr, wantAddr := hook.PostApplyImportAddr, mustResourceInstanceAddr("test_resource.a"); !addr.Equal(wantAddr) { + t.Errorf("expected addr to be %s, but was %s", wantAddr, addr) + } +} + +func TestContext2Apply_noExternalReferences(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_object" "a" { + test_string = "foo" +} + +locals { + local_value = test_object.a.test_string +} +`, + }) + + p := simpleMockProvider() + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), nil) + if diags.HasErrors() { + t.Errorf("expected no errors, but got %s", diags) + } + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Errorf("expected no errors, but got %s", diags) + } + + // We didn't specify any external references, so the unreferenced local + // value should have been tidied up and never made it into the state. + module := state.RootModule() + if len(module.LocalValues) > 0 { + t.Errorf("expected no local values in the state but found %d", len(module.LocalValues)) + } +} + +func TestContext2Apply_withExternalReferences(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_object" "a" { + test_string = "foo" +} + +locals { + local_value = test_object.a.test_string +} +`, + }) + + p := simpleMockProvider() + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + ExternalReferences: []*addrs.Reference{ + mustReference("local.local_value"), + }, + }) + if diags.HasErrors() { + t.Errorf("expected no errors, but got %s", diags) + } + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Errorf("expected no errors, but got %s", diags) + } + + // We did specify the local value in the external references, so it should + // have been preserved even though it is not referenced by anything directly + // in the config. + module := state.RootModule() + if module.LocalValues["local_value"].AsString() != "foo" { + t.Errorf("expected local value to be \"foo\" but was \"%s\"", module.LocalValues["local_value"].AsString()) + } +} + +func TestContext2Apply_forgetOrphanAndDeposed(t *testing.T) { + desposedKey := states.DeposedKey("deposed") + addr := "aws_instance.baz" + m := testModuleInline(t, map[string]string{ + "main.tf": ` + removed { + from = aws_instance.baz + } + `, + }) + hook := new(MockHook) + p := testProvider("aws") + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr(addr).Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceDeposed( + mustResourceInstanceAddr(addr).Resource, + desposedKey, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectTainted, + AttrsJSON: []byte(`{"id":"bar"}`), + Dependencies: []addrs.ConfigResource{}, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + p.PlanResourceChangeFn = testDiffFn + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + assertNoErrors(t, diags) + + s, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + if !s.Empty() { + t.Fatalf("State should be empty") + } + + if p.ApplyResourceChangeCalled { + t.Fatalf("When we forget we don't call the provider's ApplyResourceChange unlike in destroy") + } + + if hook.PostApplyCalled { + t.Fatalf("PostApply hook should not be called as part of forget") + } +} diff --git a/pkg/tofu/context_apply_checks_test.go b/pkg/tofu/context_apply_checks_test.go new file mode 100644 index 00000000000..b1173f788b9 --- /dev/null +++ b/pkg/tofu/context_apply_checks_test.go @@ -0,0 +1,821 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "testing" + + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/checks" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// This file contains 'integration' tests for the OpenTofu check blocks. +// +// These tests could live in context_apply_test or context_apply2_test but given +// the size of those files, it makes sense to keep these check related tests +// grouped together. + +type checksTestingStatus struct { + status checks.Status + messages []string +} + +func TestContextChecks(t *testing.T) { + tests := map[string]struct { + configs map[string]string + plan map[string]checksTestingStatus + planError string + planWarning string + apply map[string]checksTestingStatus + applyError string + applyWarning string + state *states.State + provider *MockProvider + providerHook func(*MockProvider) + }{ + "passing": { + configs: map[string]string{ + "main.tf": ` +provider "checks" {} + +check "passing" { + data "checks_object" "positive" {} + + assert { + condition = data.checks_object.positive.number >= 0 + error_message = "negative number" + } +} +`, + }, + plan: map[string]checksTestingStatus{ + "passing": { + status: checks.StatusPass, + }, + }, + apply: map[string]checksTestingStatus{ + "passing": { + status: checks.StatusPass, + }, + }, + provider: &MockProvider{ + Meta: "checks", + GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ + DataSources: map[string]providers.Schema{ + "checks_object": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "number": { + Type: cty.Number, + Computed: true, + }, + }, + }, + }, + }, + }, + ReadDataSourceFn: func(request providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + return providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "number": cty.NumberIntVal(0), + }), + } + }, + }, + }, + "failing": { + configs: map[string]string{ + "main.tf": ` +provider "checks" {} + +check "failing" { + data "checks_object" "positive" {} + + assert { + condition = data.checks_object.positive.number >= 0 + error_message = "negative number" + } +} +`, + }, + plan: map[string]checksTestingStatus{ + "failing": { + status: checks.StatusFail, + messages: []string{"negative number"}, + }, + }, + planWarning: "Check block assertion failed: negative number", + apply: map[string]checksTestingStatus{ + "failing": { + status: checks.StatusFail, + messages: []string{"negative number"}, + }, + }, + applyWarning: "Check block assertion failed: negative number", + provider: &MockProvider{ + Meta: "checks", + GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ + DataSources: map[string]providers.Schema{ + "checks_object": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "number": { + Type: cty.Number, + Computed: true, + }, + }, + }, + }, + }, + }, + ReadDataSourceFn: func(request providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + return providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "number": cty.NumberIntVal(-1), + }), + } + }, + }, + }, + "mixed": { + configs: map[string]string{ + "main.tf": ` +provider "checks" {} + +check "failing" { + data "checks_object" "neutral" {} + + assert { + condition = data.checks_object.neutral.number >= 0 + error_message = "negative number" + } + + assert { + condition = data.checks_object.neutral.number < 0 + error_message = "positive number" + } +} +`, + }, + plan: map[string]checksTestingStatus{ + "failing": { + status: checks.StatusFail, + messages: []string{"positive number"}, + }, + }, + planWarning: "Check block assertion failed: positive number", + apply: map[string]checksTestingStatus{ + "failing": { + status: checks.StatusFail, + messages: []string{"positive number"}, + }, + }, + applyWarning: "Check block assertion failed: positive number", + provider: &MockProvider{ + Meta: "checks", + GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ + DataSources: map[string]providers.Schema{ + "checks_object": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "number": { + Type: cty.Number, + Computed: true, + }, + }, + }, + }, + }, + }, + ReadDataSourceFn: func(request providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + return providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "number": cty.NumberIntVal(0), + }), + } + }, + }, + }, + "nested data blocks reload during apply": { + configs: map[string]string{ + "main.tf": ` +provider "checks" {} + +data "checks_object" "data_block" {} + +check "data_block" { + assert { + condition = data.checks_object.data_block.number >= 0 + error_message = "negative number" + } +} + +check "nested_data_block" { + data "checks_object" "nested_data_block" {} + + assert { + condition = data.checks_object.nested_data_block.number >= 0 + error_message = "negative number" + } +} +`, + }, + plan: map[string]checksTestingStatus{ + "nested_data_block": { + status: checks.StatusFail, + messages: []string{"negative number"}, + }, + "data_block": { + status: checks.StatusFail, + messages: []string{"negative number"}, + }, + }, + planWarning: "2 warnings:\n\n- Check block assertion failed: negative number\n- Check block assertion failed: negative number", + apply: map[string]checksTestingStatus{ + "nested_data_block": { + status: checks.StatusPass, + }, + "data_block": { + status: checks.StatusFail, + messages: []string{"negative number"}, + }, + }, + applyWarning: "Check block assertion failed: negative number", + provider: &MockProvider{ + Meta: "checks", + GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ + DataSources: map[string]providers.Schema{ + "checks_object": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "number": { + Type: cty.Number, + Computed: true, + }, + }, + }, + }, + }, + }, + ReadDataSourceFn: func(request providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + return providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "number": cty.NumberIntVal(-1), + }), + } + }, + }, + providerHook: func(provider *MockProvider) { + provider.ReadDataSourceFn = func(request providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + // The data returned by the data sources are changing + // between the plan and apply stage. The nested data block + // will update to reflect this while the normal data block + // will not detect the change. + return providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "number": cty.NumberIntVal(0), + }), + } + } + }, + }, + "returns unknown for unknown config": { + configs: map[string]string{ + "main.tf": ` +provider "checks" {} + +resource "checks_object" "resource_block" {} + +check "resource_block" { + data "checks_object" "data_block" { + id = checks_object.resource_block.id + } + + assert { + condition = data.checks_object.data_block.number >= 0 + error_message = "negative number" + } +} +`, + }, + plan: map[string]checksTestingStatus{ + "resource_block": { + status: checks.StatusUnknown, + }, + }, + planWarning: "Check block assertion known after apply: The condition could not be evaluated at this time, a result will be known when this plan is applied.", + apply: map[string]checksTestingStatus{ + "resource_block": { + status: checks.StatusPass, + }, + }, + provider: &MockProvider{ + Meta: "checks", + GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "checks_object": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + }, + }, + }, + }, + DataSources: map[string]providers.Schema{ + "checks_object": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Required: true, + }, + "number": { + Type: cty.Number, + Computed: true, + }, + }, + }, + }, + }, + }, + PlanResourceChangeFn: func(request providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + }), + } + }, + ApplyResourceChangeFn: func(request providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + return providers.ApplyResourceChangeResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("7A9F887D-44C7-4281-80E5-578E41F99DFC"), + }), + } + }, + ReadDataSourceFn: func(request providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + values := request.Config.AsValueMap() + if id, ok := values["id"]; ok { + if id.IsKnown() && id.AsString() == "7A9F887D-44C7-4281-80E5-578E41F99DFC" { + return providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("7A9F887D-44C7-4281-80E5-578E41F99DFC"), + "number": cty.NumberIntVal(0), + }), + } + } + } + + return providers.ReadDataSourceResponse{ + Diagnostics: tfdiags.Diagnostics{tfdiags.Sourceless(tfdiags.Error, "shouldn't make it here", "really shouldn't make it here")}, + } + }, + }, + }, + "failing nested data source doesn't block the plan": { + configs: map[string]string{ + "main.tf": ` +provider "checks" {} + +check "error" { + data "checks_object" "data_block" {} + + assert { + condition = data.checks_object.data_block.number >= 0 + error_message = "negative number" + } +} +`, + }, + plan: map[string]checksTestingStatus{ + "error": { + status: checks.StatusFail, + messages: []string{ + "data source read failed: something bad happened and the provider couldn't read the data source", + }, + }, + }, + planWarning: "data source read failed: something bad happened and the provider couldn't read the data source", + apply: map[string]checksTestingStatus{ + "error": { + status: checks.StatusFail, + messages: []string{ + "data source read failed: something bad happened and the provider couldn't read the data source", + }, + }, + }, + applyWarning: "data source read failed: something bad happened and the provider couldn't read the data source", + provider: &MockProvider{ + Meta: "checks", + GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ + DataSources: map[string]providers.Schema{ + "checks_object": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "number": { + Type: cty.Number, + Computed: true, + }, + }, + }, + }, + }, + }, + ReadDataSourceFn: func(request providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + return providers.ReadDataSourceResponse{ + Diagnostics: tfdiags.Diagnostics{tfdiags.Sourceless(tfdiags.Error, "data source read failed", "something bad happened and the provider couldn't read the data source")}, + } + }, + }, + }, "failing nested data source should prevent checks from executing": { + configs: map[string]string{ + "main.tf": ` +provider "checks" {} + +resource "checks_object" "resource_block" { + number = -1 +} + +check "error" { + data "checks_object" "data_block" {} + + assert { + condition = checks_object.resource_block.number >= 0 + error_message = "negative number" + } +} +`, + }, + state: states.BuildState(func(state *states.SyncState) { + state.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "checks_object", + Name: "resource_block", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"number": -1}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }) + }), + plan: map[string]checksTestingStatus{ + "error": { + status: checks.StatusFail, + messages: []string{ + "data source read failed: something bad happened and the provider couldn't read the data source", + }, + }, + }, + planWarning: "data source read failed: something bad happened and the provider couldn't read the data source", + apply: map[string]checksTestingStatus{ + "error": { + status: checks.StatusFail, + messages: []string{ + "data source read failed: something bad happened and the provider couldn't read the data source", + }, + }, + }, + applyWarning: "data source read failed: something bad happened and the provider couldn't read the data source", + provider: &MockProvider{ + Meta: "checks", + GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "checks_object": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "number": { + Type: cty.Number, + Required: true, + }, + }, + }, + }, + }, + DataSources: map[string]providers.Schema{ + "checks_object": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "number": { + Type: cty.Number, + Computed: true, + }, + }, + }, + }, + }, + }, + PlanResourceChangeFn: func(request providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: cty.ObjectVal(map[string]cty.Value{ + "number": cty.NumberIntVal(-1), + }), + } + }, + ApplyResourceChangeFn: func(request providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + return providers.ApplyResourceChangeResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "number": cty.NumberIntVal(-1), + }), + } + }, + ReadDataSourceFn: func(request providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + return providers.ReadDataSourceResponse{ + Diagnostics: tfdiags.Diagnostics{tfdiags.Sourceless(tfdiags.Error, "data source read failed", "something bad happened and the provider couldn't read the data source")}, + } + }, + }, + }, + "check failing in state and passing after plan and apply": { + configs: map[string]string{ + "main.tf": ` +provider "checks" {} + +resource "checks_object" "resource" { + number = 0 +} + +check "passing" { + assert { + condition = checks_object.resource.number >= 0 + error_message = "negative number" + } +} +`, + }, + state: states.BuildState(func(state *states.SyncState) { + state.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "checks_object", + Name: "resource", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"number": -1}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }) + }), + plan: map[string]checksTestingStatus{ + "passing": { + status: checks.StatusPass, + }, + }, + apply: map[string]checksTestingStatus{ + "passing": { + status: checks.StatusPass, + }, + }, + provider: &MockProvider{ + Meta: "checks", + GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "checks_object": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "number": { + Type: cty.Number, + Required: true, + }, + }, + }, + }, + }, + }, + PlanResourceChangeFn: func(request providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: cty.ObjectVal(map[string]cty.Value{ + "number": cty.NumberIntVal(0), + }), + } + }, + ApplyResourceChangeFn: func(request providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + return providers.ApplyResourceChangeResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "number": cty.NumberIntVal(0), + }), + } + }, + }, + }, + "failing data source does block the plan": { + configs: map[string]string{ + "main.tf": ` +provider "checks" {} + +data "checks_object" "data_block" {} + +check "error" { + assert { + condition = data.checks_object.data_block.number >= 0 + error_message = "negative number" + } +} +`, + }, + planError: "data source read failed: something bad happened and the provider couldn't read the data source", + provider: &MockProvider{ + Meta: "checks", + GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ + DataSources: map[string]providers.Schema{ + "checks_object": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "number": { + Type: cty.Number, + Computed: true, + }, + }, + }, + }, + }, + }, + ReadDataSourceFn: func(request providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + return providers.ReadDataSourceResponse{ + Diagnostics: tfdiags.Diagnostics{tfdiags.Sourceless(tfdiags.Error, "data source read failed", "something bad happened and the provider couldn't read the data source")}, + } + }, + }, + }, + "invalid reference into check block": { + configs: map[string]string{ + "main.tf": ` +provider "checks" {} + +data "checks_object" "data_block" { + id = data.checks_object.nested_data_block.id +} + +check "error" { + data "checks_object" "nested_data_block" {} + + assert { + condition = data.checks_object.data_block.number >= 0 + error_message = "negative number" + } +} +`, + }, + planError: "Reference to scoped resource: The referenced data resource \"checks_object\" \"nested_data_block\" is not available from this context.", + provider: &MockProvider{ + Meta: "checks", + GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ + DataSources: map[string]providers.Schema{ + "checks_object": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + Optional: true, + }, + }, + }, + }, + }, + }, + ReadDataSourceFn: func(request providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + input := request.Config.AsValueMap() + if _, ok := input["id"]; ok { + return providers.ReadDataSourceResponse{ + State: request.Config, + } + } + + return providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + }), + } + }, + }, + }, + } + for name, test := range tests { + t.Run(name, func(t *testing.T) { + configs := testModuleInline(t, test.configs) + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider(test.provider.Meta.(string)): testProviderFuncFixed(test.provider), + }, + }) + + initialState := states.NewState() + if test.state != nil { + initialState = test.state + } + + plan, diags := ctx.Plan(configs, initialState, &PlanOpts{ + Mode: plans.NormalMode, + }) + if validateCheckDiagnostics(t, "planning", test.planWarning, test.planError, diags) { + return + } + validateCheckResults(t, "planning", test.plan, plan.Checks) + + if test.providerHook != nil { + // This gives an opportunity to change the behaviour of the + // provider between the plan and apply stages. + test.providerHook(test.provider) + } + + state, diags := ctx.Apply(plan, configs) + if validateCheckDiagnostics(t, "apply", test.applyWarning, test.applyError, diags) { + return + } + validateCheckResults(t, "apply", test.apply, state.CheckResults) + }) + } +} + +func validateCheckDiagnostics(t *testing.T, stage string, expectedWarning, expectedError string, actual tfdiags.Diagnostics) bool { + if expectedError != "" { + if !actual.HasErrors() { + t.Errorf("expected %s to error with \"%s\", but no errors were returned", stage, expectedError) + } else if expectedError != actual.Err().Error() { + t.Errorf("expected %s to error with \"%s\" but found \"%s\"", stage, expectedError, actual.Err()) + } + + // If we expected an error then we won't finish the rest of the test. + return true + } + + if expectedWarning != "" { + warnings := actual.ErrWithWarnings() + if actual.ErrWithWarnings() == nil { + t.Errorf("expected %s to warn with \"%s\", but no errors were returned", stage, expectedWarning) + } else if expectedWarning != warnings.Error() { + t.Errorf("expected %s to warn with \"%s\" but found \"%s\"", stage, expectedWarning, warnings) + } + } else { + if actual.ErrWithWarnings() != nil { + t.Errorf("expected %s to produce no diagnostics but found \"%s\"", stage, actual.ErrWithWarnings()) + } + } + + assertNoErrors(t, actual) + return false +} + +func validateCheckResults(t *testing.T, stage string, expected map[string]checksTestingStatus, actual *states.CheckResults) { + + // Just a quick sanity check that the plan or apply process didn't create + // some non-existent checks. + if len(expected) != len(actual.ConfigResults.Keys()) { + t.Errorf("expected %d check results but found %d after %s", len(expected), len(actual.ConfigResults.Keys()), stage) + } + + // Now, lets make sure the checks all match what we expect. + for check, want := range expected { + results := actual.GetObjectResult(addrs.Check{ + Name: check, + }.Absolute(addrs.RootModuleInstance)) + + if results.Status != want.status { + t.Errorf("%s: wanted %s but got %s after %s", check, want.status, results.Status, stage) + } + + if len(want.messages) != len(results.FailureMessages) { + t.Errorf("%s: expected %d failure messages but had %d after %s", check, len(want.messages), len(results.FailureMessages), stage) + } + + max := len(want.messages) + if len(results.FailureMessages) > max { + max = len(results.FailureMessages) + } + + for ix := 0; ix < max; ix++ { + var expected, actual string + if ix < len(want.messages) { + expected = want.messages[ix] + } + if ix < len(results.FailureMessages) { + actual = results.FailureMessages[ix] + } + + // Order matters! + if actual != expected { + t.Errorf("%s: expected failure message at %d to be \"%s\" but was \"%s\" after %s", check, ix, expected, actual, stage) + } + } + + } +} diff --git a/pkg/tofu/context_apply_test.go b/pkg/tofu/context_apply_test.go new file mode 100644 index 00000000000..92006d2b99c --- /dev/null +++ b/pkg/tofu/context_apply_test.go @@ -0,0 +1,12882 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "log" + "reflect" + "runtime" + "sort" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/go-test/deep" + "github.com/google/go-cmp/cmp" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/gocty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/configs/hcl2shim" + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/provisioners" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +func TestContext2Apply_basic(t *testing.T) { + m := testModule(t, "apply-good") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + mod := state.RootModule() + if len(mod.Resources) < 2 { + t.Fatalf("bad: %#v", mod.Resources) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testTofuApplyStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +func TestContext2Apply_stop(t *testing.T) { + t.Parallel() + + m := testModule(t, "apply-stop") + stopCh := make(chan struct{}) + waitCh := make(chan struct{}) + stoppedCh := make(chan struct{}) + stopCalled := uint32(0) + applyStopped := uint32(0) + p := &MockProvider{ + GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "indefinite": { + Version: 1, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "result": { + Type: cty.String, + Computed: true, + }, + }, + }, + }, + }, + }, + PlanResourceChangeFn: func(prcr providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + log.Printf("[TRACE] TestContext2Apply_stop: no-op PlanResourceChange") + return providers.PlanResourceChangeResponse{ + PlannedState: cty.ObjectVal(map[string]cty.Value{ + "result": cty.UnknownVal(cty.String), + }), + } + }, + ApplyResourceChangeFn: func(arcr providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + // This will unblock the main test code once we reach this + // point, so that it'll then be guaranteed to call Stop + // while we're waiting in here. + close(waitCh) + + log.Printf("[TRACE] TestContext2Apply_stop: ApplyResourceChange waiting for Stop call") + // This will block until StopFn closes this channel below. + <-stopCh + atomic.AddUint32(&applyStopped, 1) + // This unblocks StopFn below, thereby acknowledging the request + // to stop. + close(stoppedCh) + return providers.ApplyResourceChangeResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "result": cty.StringVal("complete"), + }), + } + }, + StopFn: func() error { + // Closing this channel will unblock the channel read in + // ApplyResourceChangeFn above. + log.Printf("[TRACE] TestContext2Apply_stop: Stop called") + atomic.AddUint32(&stopCalled, 1) + close(stopCh) + // This will block until ApplyResourceChange has reacted to + // being stopped. + log.Printf("[TRACE] TestContext2Apply_stop: Waiting for ApplyResourceChange to react to being stopped") + <-stoppedCh + log.Printf("[TRACE] TestContext2Apply_stop: Stop is completing") + return nil + }, + } + + hook := &testHook{} + ctx := testContext2(t, &ContextOpts{ + Hooks: []Hook{hook}, + Providers: map[addrs.Provider]providers.Factory{ + addrs.MustParseProviderSourceString("terraform.io/test/indefinite"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + // We'll reset the hook events before we apply because we only care about + // the apply-time events. + hook.Calls = hook.Calls[:0] + + // We'll apply in the background so that we can call Stop in the foreground. + stateCh := make(chan *states.State) + go func(plan *plans.Plan) { + state, _ := ctx.Apply(plan, m) + stateCh <- state + }(plan) + + // We'll wait until the provider signals that we've reached the + // ApplyResourceChange function, so we can guarantee the expected + // order of operations so our hook events below will always match. + t.Log("waiting for the apply phase to get started") + <-waitCh + + // This will block until the apply operation has unwound, so we should + // be able to observe all of the apply side-effects afterwards. + t.Log("waiting for ctx.Stop to return") + ctx.Stop() + + t.Log("waiting for apply goroutine to return state") + state := <-stateCh + + t.Log("apply is all complete") + if state == nil { + t.Fatalf("final state is nil") + } + + if got, want := atomic.LoadUint32(&stopCalled), uint32(1); got != want { + t.Errorf("provider's Stop method was not called") + } + if got, want := atomic.LoadUint32(&applyStopped), uint32(1); got != want { + // This should not happen if things are working correctly but this is + // to catch weird situations such as if a bug in this test causes us + // to inadvertently stop OpenTofu before it reaches te apply phase, + // or if the apply operation fails in a way that causes it not to reach + // the ApplyResourceChange function. + t.Errorf("somehow provider's ApplyResourceChange didn't react to being stopped") + } + + // Because we interrupted the apply phase while applying the resource, + // we should have halted immediately after we finished visiting that + // resource. We don't visit indefinite.bar at all. + gotEvents := hook.Calls + wantEvents := []*testHookCall{ + {"PreDiff", "indefinite.foo"}, + {"PostDiff", "indefinite.foo"}, + {"PreApply", "indefinite.foo"}, + {"PostApply", "indefinite.foo"}, + {"PostStateUpdate", ""}, // State gets updated one more time to include the apply result. + } + // The "Stopping" event gets sent to the hook asynchronously from the others + // because it is triggered in the ctx.Stop call above, rather than from + // the goroutine where ctx.Apply was running, and therefore it doesn't + // appear in a guaranteed position in gotEvents. We already checked above + // that the provider's Stop method was called, so we'll just strip that + // event out of our gotEvents. + seenStopped := false + for i, call := range gotEvents { + if call.Action == "Stopping" { + seenStopped = true + // We'll shift up everything else in the slice to create the + // effect of the Stopping event not having been present at all, + // which should therefore make this slice match "wantEvents". + copy(gotEvents[i:], gotEvents[i+1:]) + gotEvents = gotEvents[:len(gotEvents)-1] + break + } + } + if diff := cmp.Diff(wantEvents, gotEvents); diff != "" { + t.Errorf("wrong hook events\n%s", diff) + } + if !seenStopped { + t.Errorf("'Stopping' event did not get sent to the hook") + } + + rov := state.OutputValue(addrs.OutputValue{Name: "result"}.Absolute(addrs.RootModuleInstance)) + if rov != nil && rov.Value != cty.NilVal && !rov.Value.IsNull() { + t.Errorf("'result' output value unexpectedly populated: %#v", rov.Value) + } + + resourceAddr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "indefinite", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + rv := state.ResourceInstance(resourceAddr) + if rv == nil || rv.Current == nil { + t.Fatalf("no state entry for %s", resourceAddr) + } + + resourceAddr.Resource.Resource.Name = "bar" + rv = state.ResourceInstance(resourceAddr) + if rv != nil && rv.Current != nil { + t.Fatalf("unexpected state entry for %s", resourceAddr) + } +} + +func TestContext2Apply_unstable(t *testing.T) { + // This tests behavior when the configuration contains an unstable value, + // such as the result of uuid() or timestamp(), where each call produces + // a different result. + // + // This is an important case to test because we need to ensure that + // we don't re-call the function during the apply phase: the value should + // be fixed during plan + + m := testModule(t, "apply-unstable") + p := testProvider("test") + p.PlanResourceChangeFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected error during Plan: %s", diags.Err()) + } + + addr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + schema := p.GetProviderSchemaResponse.ResourceTypes["test_resource"].Block + rds := plan.Changes.ResourceInstance(addr) + rd, err := rds.Decode(schema.ImpliedType()) + if err != nil { + t.Fatal(err) + } + if rd.After.GetAttr("random").IsKnown() { + t.Fatalf("Attribute 'random' has known value %#v; should be unknown in plan", rd.After.GetAttr("random")) + } + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("unexpected error during Apply: %s", diags.Err()) + } + + mod := state.Module(addr.Module) + rss := state.ResourceInstance(addr) + + if len(mod.Resources) != 1 { + t.Fatalf("wrong number of resources %d; want 1", len(mod.Resources)) + } + + rs, err := rss.Current.Decode(schema.ImpliedType()) + if err != nil { + t.Fatalf("decode error: %v", err) + } + got := rs.Value.GetAttr("random") + if !got.IsKnown() { + t.Fatalf("random is still unknown after apply") + } + if got, want := len(got.AsString()), 36; got != want { + t.Fatalf("random string has wrong length %d; want %d", got, want) + } +} + +func TestContext2Apply_escape(t *testing.T) { + m := testModule(t, "apply-escape") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + checkStateString(t, state, ` +aws_instance.bar: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = "bar" + type = aws_instance +`) +} + +func TestContext2Apply_resourceCountOneList(t *testing.T) { + m := testModule(t, "apply-resource-count-one-list") + p := testProvider("null") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("null"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + assertNoDiagnostics(t, diags) + + got := strings.TrimSpace(state.String()) + want := strings.TrimSpace(`null_resource.foo.0: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/null"] + +Outputs: + +test = [foo]`) + if got != want { + t.Fatalf("got:\n%s\n\nwant:\n%s\n", got, want) + } +} +func TestContext2Apply_resourceCountZeroList(t *testing.T) { + m := testModule(t, "apply-resource-count-zero-list") + p := testProvider("null") + p.PlanResourceChangeFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("null"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + got := strings.TrimSpace(state.String()) + want := strings.TrimSpace(` +Outputs: + +test = []`) + if got != want { + t.Fatalf("wrong state\n\ngot:\n%s\n\nwant:\n%s\n", got, want) + } +} + +func TestContext2Apply_resourceDependsOnModule(t *testing.T) { + m := testModule(t, "apply-resource-depends-on-module") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + // verify the apply happens in the correct order + var mu sync.Mutex + var order []string + + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + ami := req.PlannedState.GetAttr("ami").AsString() + switch ami { + case "child": + + // make the child slower than the parent + time.Sleep(50 * time.Millisecond) + + mu.Lock() + order = append(order, "child") + mu.Unlock() + case "parent": + mu.Lock() + order = append(order, "parent") + mu.Unlock() + } + + return testApplyFn(req) + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + if !reflect.DeepEqual(order, []string{"child", "parent"}) { + t.Fatal("resources applied out of order") + } + + checkStateString(t, state, testTofuApplyResourceDependsOnModuleStr) +} + +// Test that without a config, the Dependencies in the state are enough +// to maintain proper ordering. +func TestContext2Apply_resourceDependsOnModuleStateOnly(t *testing.T) { + m := testModule(t, "apply-resource-depends-on-module-empty") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.a").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"parent"}`), + Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("module.child.aws_instance.child")}, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + child := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) + child.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.child").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"child"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + { + // verify the apply happens in the correct order + var mu sync.Mutex + var order []string + + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + id := req.PriorState.GetAttr("id") + if id.IsKnown() && id.AsString() == "parent" { + // make the dep slower than the parent + time.Sleep(50 * time.Millisecond) + + mu.Lock() + order = append(order, "child") + mu.Unlock() + } else { + mu.Lock() + order = append(order, "parent") + mu.Unlock() + } + + return testApplyFn(req) + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + assertNoErrors(t, diags) + + if !reflect.DeepEqual(order, []string{"child", "parent"}) { + t.Fatal("resources applied out of order") + } + + checkStateString(t, state, "") + } +} + +func TestContext2Apply_resourceDependsOnModuleDestroy(t *testing.T) { + m := testModule(t, "apply-resource-depends-on-module") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + var globalState *states.State + { + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + globalState = state + } + + { + // Wait for the dependency, sleep, and verify the graph never + // called a child. + var called int32 + var checked bool + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + ami := req.PriorState.GetAttr("ami").AsString() + if ami == "parent" { + checked = true + + // Sleep to allow parallel execution + time.Sleep(50 * time.Millisecond) + + // Verify that called is 0 (dep not called) + if atomic.LoadInt32(&called) != 0 { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("module child should not be called")) + return resp + } + } + + atomic.AddInt32(&called, 1) + return testApplyFn(req) + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, globalState, &PlanOpts{ + Mode: plans.DestroyMode, + }) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + if !checked { + t.Fatal("should check") + } + + checkStateString(t, state, ``) + } +} + +func TestContext2Apply_resourceDependsOnModuleGrandchild(t *testing.T) { + m := testModule(t, "apply-resource-depends-on-module-deep") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + { + // Wait for the dependency, sleep, and verify the graph never + // called a child. + var called int32 + var checked bool + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + planned := req.PlannedState.AsValueMap() + if ami, ok := planned["ami"]; ok && ami.AsString() == "grandchild" { + checked = true + + // Sleep to allow parallel execution + time.Sleep(50 * time.Millisecond) + + // Verify that called is 0 (dep not called) + if atomic.LoadInt32(&called) != 0 { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("aws_instance.a should not be called")) + return resp + } + } + + atomic.AddInt32(&called, 1) + return testApplyFn(req) + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + if !checked { + t.Fatal("should check") + } + + checkStateString(t, state, testTofuApplyResourceDependsOnModuleDeepStr) + } +} + +func TestContext2Apply_resourceDependsOnModuleInModule(t *testing.T) { + m := testModule(t, "apply-resource-depends-on-module-in-module") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + { + // Wait for the dependency, sleep, and verify the graph never + // called a child. + var called int32 + var checked bool + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + planned := req.PlannedState.AsValueMap() + if ami, ok := planned["ami"]; ok && ami.AsString() == "grandchild" { + checked = true + + // Sleep to allow parallel execution + time.Sleep(50 * time.Millisecond) + + // Verify that called is 0 (dep not called) + if atomic.LoadInt32(&called) != 0 { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("something else was applied before grandchild; grandchild should be first")) + return resp + } + } + + atomic.AddInt32(&called, 1) + return testApplyFn(req) + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + if !checked { + t.Fatal("should check") + } + + checkStateString(t, state, testTofuApplyResourceDependsOnModuleInModuleStr) + } +} + +func TestContext2Apply_mapVarBetweenModules(t *testing.T) { + m := testModule(t, "apply-map-var-through-module") + p := testProvider("null") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("null"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(` +Outputs: + +amis_from_module = {eu-west-1:ami-789012 eu-west-2:ami-989484 us-west-1:ami-123456 us-west-2:ami-456789 } + +module.test: + null_resource.noop: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/null"] + + Outputs: + + amis_out = {eu-west-1:ami-789012 eu-west-2:ami-989484 us-west-1:ami-123456 us-west-2:ami-456789 }`) + if actual != expected { + t.Fatalf("expected: \n%s\n\ngot: \n%s\n", expected, actual) + } +} + +func TestContext2Apply_refCount(t *testing.T) { + m := testModule(t, "apply-ref-count") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + mod := state.RootModule() + if len(mod.Resources) < 2 { + t.Fatalf("bad: %#v", mod.Resources) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testTofuApplyRefCountStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +func TestContext2Apply_providerAlias(t *testing.T) { + m := testModule(t, "apply-provider-alias") + + // Each provider instance must be completely independent to ensure that we + // are verifying the correct state of each. + p := func() (providers.Interface, error) { + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + return p, nil + } + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): p, + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + mod := state.RootModule() + if len(mod.Resources) < 2 { + t.Fatalf("bad: %#v", mod.Resources) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testTofuApplyProviderAliasStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +// Two providers that are configured should both be configured prior to apply +func TestContext2Apply_providerAliasConfigure(t *testing.T) { + m := testModule(t, "apply-provider-alias-configure") + + // Each provider instance must be completely independent to ensure that we + // are verifying the correct state of each. + p := func() (providers.Interface, error) { + p := testProvider("another") + p.ApplyResourceChangeFn = testApplyFn + p.PlanResourceChangeFn = testDiffFn + return p, nil + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("another"): p, + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } else { + t.Logf(legacyDiffComparisonString(plan.Changes)) + } + + // Configure to record calls AFTER Plan above + var configCount int32 + p = func() (providers.Interface, error) { + p := testProvider("another") + p.ApplyResourceChangeFn = testApplyFn + p.PlanResourceChangeFn = testDiffFn + p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { + atomic.AddInt32(&configCount, 1) + + foo := req.Config.GetAttr("foo").AsString() + if foo != "bar" { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("foo: %#v", foo)) + } + + return + } + return p, nil + } + + ctx = testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("another"): p, + }, + }) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + if configCount != 2 { + t.Fatalf("provider config expected 2 calls, got: %d", configCount) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testTofuApplyProviderAliasConfigStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +// GH-2870 +func TestContext2Apply_providerWarning(t *testing.T) { + m := testModule(t, "apply-provider-warning") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + p.ValidateResourceConfigFn = func(req providers.ValidateResourceConfigRequest) (resp providers.ValidateResourceConfigResponse) { + resp.Diagnostics = resp.Diagnostics.Append(tfdiags.SimpleWarning("just a warning")) + return + } + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(` +aws_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + type = aws_instance + `) + if actual != expected { + t.Fatalf("got: \n%s\n\nexpected:\n%s", actual, expected) + } + + if !p.ConfigureProviderCalled { + t.Fatalf("provider Configure() was never called!") + } +} + +func TestContext2Apply_emptyModule(t *testing.T) { + // A module with only outputs (no resources) + m := testModule(t, "apply-empty-module") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + actual := strings.TrimSpace(state.String()) + actual = strings.Replace(actual, " ", "", -1) + expected := strings.TrimSpace(testTofuApplyEmptyModuleStr) + if actual != expected { + t.Fatalf("bad: \n%s\nexpect:\n%s", actual, expected) + } +} + +func TestContext2Apply_createBeforeDestroy(t *testing.T) { + m := testModule(t, "apply-good-create-before") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.bar").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar", "require_new": "abc"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } else { + t.Logf(legacyDiffComparisonString(plan.Changes)) + } + + state, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + mod := state.RootModule() + if got, want := len(mod.Resources), 1; got != want { + t.Logf("state:\n%s", state) + t.Fatalf("wrong number of resources %d; want %d", got, want) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testTofuApplyCreateBeforeStr) + if actual != expected { + t.Fatalf("expected:\n%s\ngot:\n%s", expected, actual) + } +} + +func TestContext2Apply_createBeforeDestroyUpdate(t *testing.T) { + m := testModule(t, "apply-good-create-before-update") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + // signal that resource foo has started applying + fooChan := make(chan struct{}) + + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + id := req.PriorState.GetAttr("id").AsString() + switch id { + case "bar": + select { + case <-fooChan: + resp.Diagnostics = resp.Diagnostics.Append(errors.New("bar must be updated before foo is destroyed")) + return resp + case <-time.After(100 * time.Millisecond): + // wait a moment to ensure that foo is not going to be destroyed first + } + case "foo": + close(fooChan) + } + + return testApplyFn(req) + } + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + fooAddr := mustResourceInstanceAddr("aws_instance.foo") + root.SetResourceInstanceCurrent( + fooAddr.Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo","foo":"bar"}`), + CreateBeforeDestroy: true, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.bar").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar","foo":"bar"}`), + CreateBeforeDestroy: true, + Dependencies: []addrs.ConfigResource{fooAddr.ContainingResource().Config()}, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } else { + t.Logf(legacyDiffComparisonString(plan.Changes)) + } + + state, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + mod := state.RootModule() + if len(mod.Resources) != 1 { + t.Fatalf("bad: %s", state) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testTofuApplyCreateBeforeUpdateStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +// This tests that when a CBD resource depends on a non-CBD resource, +// we can still properly apply changes that require new for both. +func TestContext2Apply_createBeforeDestroy_dependsNonCBD(t *testing.T) { + m := testModule(t, "apply-cbd-depends-non-cbd") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.bar").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar", "require_new": "abc"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo", "require_new": "abc"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } else { + t.Logf(legacyDiffComparisonString(plan.Changes)) + } + + state, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + checkStateString(t, state, ` +aws_instance.bar: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + require_new = yes + type = aws_instance + value = foo + + Dependencies: + aws_instance.foo +aws_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + require_new = yes + type = aws_instance + `) +} + +func TestContext2Apply_createBeforeDestroy_hook(t *testing.T) { + h := new(MockHook) + m := testModule(t, "apply-good-create-before") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.bar").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar", "require_new": "abc"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + var actual []cty.Value + var actualLock sync.Mutex + h.PostApplyFn = func(addr addrs.AbsResourceInstance, gen states.Generation, sv cty.Value, e error) (HookAction, error) { + actualLock.Lock() + + defer actualLock.Unlock() + actual = append(actual, sv) + return HookActionContinue, nil + } + + ctx := testContext2(t, &ContextOpts{ + Hooks: []Hook{h}, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } else { + t.Logf(legacyDiffComparisonString(plan.Changes)) + } + + if _, diags := ctx.Apply(plan, m); diags.HasErrors() { + t.Fatalf("apply errors: %s", diags.Err()) + } + + expected := []cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + "require_new": cty.StringVal("xyz"), + "type": cty.StringVal("aws_instance"), + }), + cty.NullVal(cty.DynamicPseudoType), + } + + cmpOpt := cmp.Transformer("ctyshim", hcl2shim.ConfigValueFromHCL2) + if !cmp.Equal(actual, expected, cmpOpt) { + t.Fatalf("wrong state snapshot sequence\n%s", cmp.Diff(expected, actual, cmpOpt)) + } +} + +// Test that we can perform an apply with CBD in a count with deposed instances. +func TestContext2Apply_createBeforeDestroy_deposedCount(t *testing.T) { + m := testModule(t, "apply-cbd-count") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.bar[0]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectTainted, + AttrsJSON: []byte(`{"id":"bar"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceDeposed( + mustResourceInstanceAddr("aws_instance.bar[0]").Resource, + states.NewDeposedKey(), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectTainted, + AttrsJSON: []byte(`{"id":"foo"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.bar[1]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectTainted, + AttrsJSON: []byte(`{"id":"bar"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceDeposed( + mustResourceInstanceAddr("aws_instance.bar[1]").Resource, + states.NewDeposedKey(), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectTainted, + AttrsJSON: []byte(`{"id":"bar"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } else { + t.Logf(legacyDiffComparisonString(plan.Changes)) + } + + state, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + checkStateString(t, state, ` +aws_instance.bar.0: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = bar + type = aws_instance +aws_instance.bar.1: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = bar + type = aws_instance + `) +} + +// Test that when we have a deposed instance but a good primary, we still +// destroy the deposed instance. +func TestContext2Apply_createBeforeDestroy_deposedOnly(t *testing.T) { + m := testModule(t, "apply-cbd-deposed-only") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.bar").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceDeposed( + mustResourceInstanceAddr("aws_instance.bar").Resource, + states.NewDeposedKey(), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectTainted, + AttrsJSON: []byte(`{"id":"foo"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } else { + t.Logf(legacyDiffComparisonString(plan.Changes)) + } + + state, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + checkStateString(t, state, ` +aws_instance.bar: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/aws"] + type = aws_instance + `) +} + +func TestContext2Apply_destroyComputed(t *testing.T) { + m := testModule(t, "apply-destroy-computed") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo", "output": "value"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + }) + if diags.HasErrors() { + logDiagnostics(t, diags) + t.Fatal("plan failed") + } else { + t.Logf("plan:\n\n%s", legacyDiffComparisonString(plan.Changes)) + } + + if _, diags := ctx.Apply(plan, m); diags.HasErrors() { + logDiagnostics(t, diags) + t.Fatal("apply failed") + } +} + +// Test that the destroy operation uses depends_on as a source of ordering. +func TestContext2Apply_destroyDependsOn(t *testing.T) { + // It is possible for this to be racy, so we loop a number of times + // just to check. + for i := 0; i < 10; i++ { + testContext2Apply_destroyDependsOn(t) + } +} + +func testContext2Apply_destroyDependsOn(t *testing.T) { + m := testModule(t, "apply-destroy-depends-on") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.bar").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo"}`), + Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("aws_instance.bar")}, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + // Record the order we see Apply + var actual []string + var actualLock sync.Mutex + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + actualLock.Lock() + defer actualLock.Unlock() + id := req.PriorState.GetAttr("id").AsString() + actual = append(actual, id) + + return testApplyFn(req) + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + Parallelism: 1, // To check ordering + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + }) + assertNoErrors(t, diags) + + if _, diags := ctx.Apply(plan, m); diags.HasErrors() { + t.Fatalf("apply errors: %s", diags.Err()) + } + + expected := []string{"foo", "bar"} + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("wrong order\ngot: %#v\nwant: %#v", actual, expected) + } +} + +// Test that destroy ordering is correct with dependencies only +// in the state. +func TestContext2Apply_destroyDependsOnStateOnly(t *testing.T) { + newState := states.NewState() + root := newState.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + }.Instance(addrs.NoKey), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo"}`), + Dependencies: []addrs.ConfigResource{}, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("aws"), + Module: addrs.RootModule, + }, + ) + root.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "bar", + }.Instance(addrs.NoKey), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar"}`), + Dependencies: []addrs.ConfigResource{ + { + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + }, + Module: root.Addr.Module(), + }, + }, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("aws"), + Module: addrs.RootModule, + }, + ) + + // It is possible for this to be racy, so we loop a number of times + // just to check. + for i := 0; i < 10; i++ { + t.Run("new", func(t *testing.T) { + testContext2Apply_destroyDependsOnStateOnly(t, newState) + }) + } +} + +func testContext2Apply_destroyDependsOnStateOnly(t *testing.T, state *states.State) { + state = state.DeepCopy() + m := testModule(t, "empty") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + // Record the order we see Apply + var actual []string + var actualLock sync.Mutex + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + actualLock.Lock() + defer actualLock.Unlock() + id := req.PriorState.GetAttr("id").AsString() + actual = append(actual, id) + return testApplyFn(req) + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + Parallelism: 1, // To check ordering + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + }) + assertNoErrors(t, diags) + + if _, diags := ctx.Apply(plan, m); diags.HasErrors() { + t.Fatalf("apply errors: %s", diags.Err()) + } + + expected := []string{"bar", "foo"} + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("wrong order\ngot: %#v\nwant: %#v", actual, expected) + } +} + +// Test that destroy ordering is correct with dependencies only +// in the state within a module (GH-11749) +func TestContext2Apply_destroyDependsOnStateOnlyModule(t *testing.T) { + newState := states.NewState() + child := newState.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) + child.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + }.Instance(addrs.NoKey), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo"}`), + Dependencies: []addrs.ConfigResource{}, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("aws"), + Module: addrs.RootModule, + }, + ) + child.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "bar", + }.Instance(addrs.NoKey), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar"}`), + Dependencies: []addrs.ConfigResource{ + { + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + }, + Module: child.Addr.Module(), + }, + }, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("aws"), + Module: addrs.RootModule, + }, + ) + + // It is possible for this to be racy, so we loop a number of times + // just to check. + for i := 0; i < 10; i++ { + t.Run("new", func(t *testing.T) { + testContext2Apply_destroyDependsOnStateOnlyModule(t, newState) + }) + } +} + +func testContext2Apply_destroyDependsOnStateOnlyModule(t *testing.T, state *states.State) { + state = state.DeepCopy() + m := testModule(t, "empty") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + // Record the order we see Apply + var actual []string + var actualLock sync.Mutex + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + actualLock.Lock() + defer actualLock.Unlock() + id := req.PriorState.GetAttr("id").AsString() + actual = append(actual, id) + return testApplyFn(req) + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + Parallelism: 1, // To check ordering + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + }) + assertNoErrors(t, diags) + + if _, diags := ctx.Apply(plan, m); diags.HasErrors() { + t.Fatalf("apply errors: %s", diags.Err()) + } + + expected := []string{"bar", "foo"} + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("wrong order\ngot: %#v\nwant: %#v", actual, expected) + } +} + +func TestContext2Apply_dataBasic(t *testing.T) { + m := testModule(t, "apply-data-basic") + p := testProvider("null") + p.PlanResourceChangeFn = testDiffFn + p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yo"), + "foo": cty.NullVal(cty.String), + }), + } + + hook := new(MockHook) + ctx := testContext2(t, &ContextOpts{ + Hooks: []Hook{hook}, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("null"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } else { + t.Logf(legacyDiffComparisonString(plan.Changes)) + } + + state, diags := ctx.Apply(plan, m) + assertNoErrors(t, diags) + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testTofuApplyDataBasicStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } + + if !hook.PreApplyCalled { + t.Fatal("PreApply not called for data source read") + } + if !hook.PostApplyCalled { + t.Fatal("PostApply not called for data source read") + } +} + +func TestContext2Apply_destroyData(t *testing.T) { + m := testModule(t, "apply-destroy-data-resource") + p := testProvider("null") + p.PlanResourceChangeFn = testDiffFn + p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + return providers.ReadDataSourceResponse{ + State: req.Config, + } + } + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("data.null_data_source.testing").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"-"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/null"]`), + ) + + hook := &testHook{} + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("null"): testProviderFuncFixed(p), + }, + Hooks: []Hook{hook}, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + }) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } else { + t.Logf(legacyDiffComparisonString(plan.Changes)) + } + + newState, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + if got := len(newState.Modules); got != 1 { + t.Fatalf("state has %d modules after destroy; want 1", got) + } + + if got := len(newState.RootModule().Resources); got != 0 { + t.Fatalf("state has %d resources after destroy; want 0", got) + } + + wantHookCalls := []*testHookCall{ + {"PreApply", "data.null_data_source.testing"}, + {"PostApply", "data.null_data_source.testing"}, + {"PostStateUpdate", ""}, + } + if !reflect.DeepEqual(hook.Calls, wantHookCalls) { + t.Errorf("wrong hook calls\ngot: %swant: %s", spew.Sdump(hook.Calls), spew.Sdump(wantHookCalls)) + } +} + +// https://github.com/hashicorp/terraform/pull/5096 +func TestContext2Apply_destroySkipsCBD(t *testing.T) { + // Config contains CBD resource depending on non-CBD resource, which triggers + // a cycle if they are both replaced, but should _not_ trigger a cycle when + // just doing a `tofu destroy`. + m := testModule(t, "apply-destroy-cbd") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.bar").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + }) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } else { + t.Logf(legacyDiffComparisonString(plan.Changes)) + } + + if _, diags := ctx.Apply(plan, m); diags.HasErrors() { + t.Fatalf("apply errors: %s", diags.Err()) + } +} + +func TestContext2Apply_destroyModuleVarProviderConfig(t *testing.T) { + m := testModule(t, "apply-destroy-mod-var-provider-config") + p := func() (providers.Interface, error) { + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + return p, nil + } + state := states.NewState() + child := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) + child.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): p, + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + }) + assertNoErrors(t, diags) + + _, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } +} + +func TestContext2Apply_destroyCrossProviders(t *testing.T) { + m := testModule(t, "apply-destroy-cross-providers") + + p_aws := testProvider("aws") + p_aws.ApplyResourceChangeFn = testApplyFn + p_aws.PlanResourceChangeFn = testDiffFn + p_aws.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + }, + }, + "aws_vpc": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "value": { + Type: cty.String, + Optional: true, + }, + }, + }, + }, + }) + + providers := map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p_aws), + } + + ctx, m, state := getContextForApply_destroyCrossProviders(t, m, providers) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + }) + assertNoErrors(t, diags) + + if _, diags := ctx.Apply(plan, m); diags.HasErrors() { + logDiagnostics(t, diags) + t.Fatal("apply failed") + } +} + +func getContextForApply_destroyCrossProviders(t *testing.T, m *configs.Config, providerFactories map[addrs.Provider]providers.Factory) (*Context, *configs.Config, *states.State) { + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.shared").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"test"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + child := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) + child.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_vpc.bar").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id": "vpc-aaabbb12", "value":"test"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: providerFactories, + }) + + return ctx, m, state +} + +func TestContext2Apply_minimal(t *testing.T) { + m := testModule(t, "apply-minimal") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testTofuApplyMinimalStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +func TestContext2Apply_cancel(t *testing.T) { + stopped := false + + m := testModule(t, "apply-cancel") + p := testProvider("aws") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + if !stopped { + stopped = true + go ctx.Stop() + + for { + if ctx.sh.Stopped() { + break + } + time.Sleep(10 * time.Millisecond) + } + } + return testApplyFn(req) + } + p.PlanResourceChangeFn = testDiffFn + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + // Start the Apply in a goroutine + var applyDiags tfdiags.Diagnostics + stateCh := make(chan *states.State) + go func() { + state, diags := ctx.Apply(plan, m) + applyDiags = diags + + stateCh <- state + }() + + state := <-stateCh + // only expecting an early exit error + if !applyDiags.HasErrors() { + t.Fatal("expected early exit error") + } + + for _, d := range applyDiags { + desc := d.Description() + if desc.Summary != "execution halted" { + t.Fatalf("unexpected error: %v", applyDiags.Err()) + } + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testTofuApplyCancelStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } + + if !p.StopCalled { + t.Fatal("stop should be called") + } +} + +func TestContext2Apply_cancelBlock(t *testing.T) { + m := testModule(t, "apply-cancel-block") + p := testProvider("aws") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + applyCh := make(chan struct{}) + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + close(applyCh) + + for !ctx.sh.Stopped() { + // Wait for stop to be called. We call Gosched here so that + // the other goroutines can always be scheduled to set Stopped. + runtime.Gosched() + } + + // Sleep + time.Sleep(100 * time.Millisecond) + return testApplyFn(req) + } + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + // Start the Apply in a goroutine + var applyDiags tfdiags.Diagnostics + stateCh := make(chan *states.State) + go func() { + state, diags := ctx.Apply(plan, m) + applyDiags = diags + + stateCh <- state + }() + + stopDone := make(chan struct{}) + go func() { + defer close(stopDone) + <-applyCh + ctx.Stop() + }() + + // Make sure that stop blocks + select { + case <-stopDone: + t.Fatal("stop should block") + case <-time.After(10 * time.Millisecond): + } + + // Wait for stop + select { + case <-stopDone: + case <-time.After(500 * time.Millisecond): + t.Fatal("stop should be done") + } + + // Wait for apply to complete + state := <-stateCh + // only expecting an early exit error + if !applyDiags.HasErrors() { + t.Fatal("expected early exit error") + } + + for _, d := range applyDiags { + desc := d.Description() + if desc.Summary != "execution halted" { + t.Fatalf("unexpected error: %v", applyDiags.Err()) + } + } + + checkStateString(t, state, ` +aws_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + num = 2 + type = aws_instance + `) +} + +func TestContext2Apply_cancelProvisioner(t *testing.T) { + m := testModule(t, "apply-cancel-provisioner") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + + pr := testProvisioner() + pr.GetSchemaResponse = provisioners.GetSchemaResponse{ + Provisioner: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.String, + Optional: true, + }, + }, + }, + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + Provisioners: map[string]provisioners.Factory{ + "shell": testProvisionerFuncFixed(pr), + }, + }) + + prStopped := make(chan struct{}) + pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { + // Start the stop process + go ctx.Stop() + + <-prStopped + return + } + pr.StopFn = func() error { + close(prStopped) + return nil + } + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + // Start the Apply in a goroutine + var applyDiags tfdiags.Diagnostics + stateCh := make(chan *states.State) + go func() { + state, diags := ctx.Apply(plan, m) + applyDiags = diags + + stateCh <- state + }() + + // Wait for completion + state := <-stateCh + + // we are expecting only an early exit error + if !applyDiags.HasErrors() { + t.Fatal("expected early exit error") + } + + for _, d := range applyDiags { + desc := d.Description() + if desc.Summary != "execution halted" { + t.Fatalf("unexpected error: %v", applyDiags.Err()) + } + } + + checkStateString(t, state, ` +aws_instance.foo: (tainted) + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + num = 2 + type = aws_instance + `) + + if !pr.StopCalled { + t.Fatal("stop should be called") + } +} + +func TestContext2Apply_compute(t *testing.T) { + m := testModule(t, "apply-compute") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "num": { + Type: cty.Number, + Optional: true, + }, + "compute": { + Type: cty.String, + Optional: true, + }, + "compute_value": { + Type: cty.String, + Optional: true, + }, + "foo": { + Type: cty.String, + Optional: true, + }, + "id": { + Type: cty.String, + Computed: true, + }, + "type": { + Type: cty.String, + Computed: true, + }, + "value": { // Populated from compute_value because compute = "value" in the config fixture + Type: cty.String, + Computed: true, + }, + }, + }, + }, + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + SetVariables: InputValues{ + "value": &InputValue{ + Value: cty.NumberIntVal(1), + SourceType: ValueFromCaller, + }, + }, + }) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testTofuApplyComputeStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +func TestContext2Apply_countDecrease(t *testing.T) { + m := testModule(t, "apply-count-dec") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[0]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar","foo": "foo","type": "aws_instance"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[1]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar","foo": "foo","type": "aws_instance"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[2]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar", "foo": "foo", "type": "aws_instance"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + assertNoErrors(t, diags) + + s, diags := ctx.Apply(plan, m) + assertNoErrors(t, diags) + + actual := strings.TrimSpace(s.String()) + expected := strings.TrimSpace(testTofuApplyCountDecStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +func TestContext2Apply_countDecreaseToOneX(t *testing.T) { + m := testModule(t, "apply-count-dec-one") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[0]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar", "foo": "foo", "type": "aws_instance"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[1]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[2]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + assertNoErrors(t, diags) + + s, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + actual := strings.TrimSpace(s.String()) + expected := strings.TrimSpace(testTofuApplyCountDecToOneStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +// https://github.com/PeoplePerHour/terraform/pull/11 +// +// This tests a rare but possible situation where we have both a no-key and +// a zero-key instance of the same resource in the configuration when we +// disable count. +// +// The main way to get here is for a provider to fail to destroy the zero-key +// instance but succeed in creating the no-key instance, since those two +// can typically happen concurrently. There are various other ways to get here +// that might be considered user error, such as using "tofu state mv" +// to create a strange combination of different key types on the same resource. +// +// This test indirectly exercises an intentional interaction between +// refactoring.ImpliedMoveStatements and refactoring.ApplyMoves: we'll first +// generate an implied move statement from aws_instance.foo[0] to +// aws_instance.foo, but then refactoring.ApplyMoves should notice that and +// ignore the statement, in the same way as it would if an explicit move +// statement specified the same situation. +func TestContext2Apply_countDecreaseToOneCorrupted(t *testing.T) { + m := testModule(t, "apply-count-dec-one") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar", "foo": "foo", "type": "aws_instance"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[0]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"baz", "type": "aws_instance"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + assertNoErrors(t, diags) + { + got := strings.TrimSpace(legacyPlanComparisonString(state, plan.Changes)) + want := strings.TrimSpace(testTofuApplyCountDecToOneCorruptedPlanStr) + if got != want { + t.Fatalf("wrong plan result\ngot:\n%s\nwant:\n%s", got, want) + } + } + { + change := plan.Changes.ResourceInstance(mustResourceInstanceAddr("aws_instance.foo[0]")) + if change == nil { + t.Fatalf("no planned change for instance zero") + } + if got, want := change.Action, plans.Delete; got != want { + t.Errorf("wrong action for instance zero %s; want %s", got, want) + } + if got, want := change.ActionReason, plans.ResourceInstanceDeleteBecauseWrongRepetition; got != want { + t.Errorf("wrong action reason for instance zero %s; want %s", got, want) + } + } + { + change := plan.Changes.ResourceInstance(mustResourceInstanceAddr("aws_instance.foo")) + if change == nil { + t.Fatalf("no planned change for no-key instance") + } + if got, want := change.Action, plans.NoOp; got != want { + t.Errorf("wrong action for no-key instance %s; want %s", got, want) + } + if got, want := change.ActionReason, plans.ResourceInstanceChangeNoReason; got != want { + t.Errorf("wrong action reason for no-key instance %s; want %s", got, want) + } + } + + s, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + actual := strings.TrimSpace(s.String()) + expected := strings.TrimSpace(testTofuApplyCountDecToOneCorruptedStr) + if actual != expected { + t.Fatalf("wrong final state\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +func TestContext2Apply_countTainted(t *testing.T) { + m := testModule(t, "apply-count-tainted") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[0]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectTainted, + AttrsJSON: []byte(`{"id":"bar", "type": "aws_instance", "foo": "foo"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + assertNoErrors(t, diags) + { + got := strings.TrimSpace(legacyDiffComparisonString(plan.Changes)) + want := strings.TrimSpace(` +DESTROY/CREATE: aws_instance.foo[0] + foo: "foo" => "foo" + id: "bar" => "" + type: "aws_instance" => "" +CREATE: aws_instance.foo[1] + foo: "" => "foo" + id: "" => "" + type: "" => "" +`) + if got != want { + t.Fatalf("wrong plan\n\ngot:\n%s\n\nwant:\n%s", got, want) + } + } + + s, diags := ctx.Apply(plan, m) + assertNoErrors(t, diags) + + got := strings.TrimSpace(s.String()) + want := strings.TrimSpace(` +aws_instance.foo.0: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = foo + type = aws_instance +aws_instance.foo.1: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = foo + type = aws_instance +`) + if got != want { + t.Fatalf("wrong final state\n\ngot:\n%s\n\nwant:\n%s", got, want) + } +} + +func TestContext2Apply_countVariable(t *testing.T) { + m := testModule(t, "apply-count-variable") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testTofuApplyCountVariableStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +func TestContext2Apply_countVariableRef(t *testing.T) { + m := testModule(t, "apply-count-variable-ref") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testTofuApplyCountVariableRefStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +func TestContext2Apply_provisionerInterpCount(t *testing.T) { + // This test ensures that a provisioner can interpolate a resource count + // even though the provisioner expression is evaluated during the plan + // walk. https://github.com/hashicorp/terraform/issues/16840 + + m, snap := testModuleWithSnapshot(t, "apply-provisioner-interp-count") + + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + pr := testProvisioner() + + Providers := map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + } + + provisioners := map[string]provisioners.Factory{ + "local-exec": testProvisionerFuncFixed(pr), + } + ctx := testContext2(t, &ContextOpts{ + Providers: Providers, + Provisioners: provisioners, + }) + + plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) + assertNoErrors(t, diags) + + // We'll marshal and unmarshal the plan here, to ensure that we have + // a clean new context as would be created if we separately ran + // tofu plan -out=tfplan && tofu apply tfplan + ctxOpts, m, plan, err := contextOptsForPlanViaFile(t, snap, plan) + if err != nil { + t.Fatal(err) + } + ctxOpts.Providers = Providers + ctxOpts.Provisioners = provisioners + ctx, diags = NewContext(ctxOpts) + if diags.HasErrors() { + t.Fatalf("failed to create context for plan: %s", diags.Err()) + } + + // Applying the plan should now succeed + _, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("apply failed unexpectedly: %s", diags.Err()) + } + + // Verify apply was invoked + if !pr.ProvisionResourceCalled { + t.Fatalf("provisioner was not called") + } +} + +func TestContext2Apply_foreachVariable(t *testing.T) { + m := testModule(t, "plan-for-each-unknown-value") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "foo": &InputValue{ + Value: cty.StringVal("hello"), + }, + }, + }) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testTofuApplyForEachVariableStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +func TestContext2Apply_moduleBasic(t *testing.T) { + m := testModule(t, "apply-module") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testTofuApplyModuleStr) + if actual != expected { + t.Fatalf("bad, expected:\n%s\n\nactual:\n%s", expected, actual) + } +} + +func TestContext2Apply_moduleDestroyOrder(t *testing.T) { + m := testModule(t, "apply-module-destroy-order") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + // Create a custom apply function to track the order they were destroyed + var order []string + var orderLock sync.Mutex + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + id := req.PriorState.GetAttr("id").AsString() + + if id == "b" { + // Pause briefly to make any race conditions more visible, since + // missing edges here can cause undeterministic ordering. + time.Sleep(100 * time.Millisecond) + } + + orderLock.Lock() + defer orderLock.Unlock() + + order = append(order, id) + resp.NewState = req.PlannedState + return resp + } + + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Required: true}, + "blah": {Type: cty.String, Optional: true}, + "value": {Type: cty.String, Optional: true}, + }, + }, + }, + }) + + state := states.NewState() + child := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) + child.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.a").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"a"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.b").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"b"}`), + Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("module.child.aws_instance.a")}, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + }) + assertNoErrors(t, diags) + + state, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + expected := []string{"b", "a"} + if !reflect.DeepEqual(order, expected) { + t.Errorf("wrong order\ngot: %#v\nwant: %#v", order, expected) + } + + { + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testTofuApplyModuleDestroyOrderStr) + if actual != expected { + t.Errorf("wrong final state\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } + } +} + +func TestContext2Apply_moduleInheritAlias(t *testing.T) { + m := testModule(t, "apply-module-provider-inherit-alias") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + + p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { + val := req.Config.GetAttr("value") + if val.IsNull() { + return + } + + root := req.Config.GetAttr("root") + if !root.IsNull() { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("child should not get root")) + } + + return + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + checkStateString(t, state, ` + +module.child: + aws_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"].eu + type = aws_instance + `) +} + +func TestContext2Apply_orphanResource(t *testing.T) { + // This is a two-step test: + // 1. Apply a configuration with resources that have count set. + // This should place the empty resource object in the state to record + // that each exists, and record any instances. + // 2. Apply an empty configuration against the same state, which should + // then clean up both the instances and the containing resource objects. + p := testProvider("test") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_thing": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + }) + + // Step 1: create the resources and instances + m := testModule(t, "apply-orphan-resource") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + state, diags := ctx.Apply(plan, m) + assertNoErrors(t, diags) + + // At this point both resources should be recorded in the state, along + // with the single instance associated with test_thing.one. + want := states.BuildState(func(s *states.SyncState) { + providerAddr := addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + } + oneAddr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "one", + }.Absolute(addrs.RootModuleInstance) + s.SetResourceProvider(oneAddr, providerAddr) + s.SetResourceInstanceCurrent(oneAddr.Instance(addrs.IntKey(0)), &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo"}`), + }, providerAddr) + }) + + if state.String() != want.String() { + t.Fatalf("wrong state after step 1\n%s", cmp.Diff(want, state)) + } + + // Step 2: update with an empty config, to destroy everything + m = testModule(t, "empty") + ctx = testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + plan, diags = ctx.Plan(m, state, DefaultPlanOpts) + assertNoErrors(t, diags) + { + addr := mustResourceInstanceAddr("test_thing.one[0]") + change := plan.Changes.ResourceInstance(addr) + if change == nil { + t.Fatalf("no planned change for %s", addr) + } + if got, want := change.Action, plans.Delete; got != want { + t.Errorf("wrong action for %s %s; want %s", addr, got, want) + } + if got, want := change.ActionReason, plans.ResourceInstanceDeleteBecauseNoResourceConfig; got != want { + t.Errorf("wrong action for %s %s; want %s", addr, got, want) + } + } + + state, diags = ctx.Apply(plan, m) + assertNoErrors(t, diags) + + // The state should now be _totally_ empty, with just an empty root module + // (since that always exists) and no resources at all. + want = states.NewState() + want.CheckResults = &states.CheckResults{} + if !cmp.Equal(state, want) { + t.Fatalf("wrong state after step 2\ngot: %swant: %s", spew.Sdump(state), spew.Sdump(want)) + } + +} + +func TestContext2Apply_moduleOrphanInheritAlias(t *testing.T) { + m := testModule(t, "apply-module-provider-inherit-alias-orphan") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { + val := req.Config.GetAttr("value") + if val.IsNull() { + return + } + + root := req.Config.GetAttr("root") + if !root.IsNull() { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("child should not get root")) + } + + return + } + + // Create a state with an orphan module + state := states.NewState() + child := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) + child.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.bar").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + assertNoErrors(t, diags) + { + addr := mustResourceInstanceAddr("module.child.aws_instance.bar") + change := plan.Changes.ResourceInstance(addr) + if change == nil { + t.Fatalf("no planned change for %s", addr) + } + if got, want := change.Action, plans.Delete; got != want { + t.Errorf("wrong action for %s %s; want %s", addr, got, want) + } + // This should ideally be ResourceInstanceDeleteBecauseNoModule, but + // the codepath deciding this doesn't currently have enough information + // to differentiate, and so this is a compromise. + if got, want := change.ActionReason, plans.ResourceInstanceDeleteBecauseNoResourceConfig; got != want { + t.Errorf("wrong action for %s %s; want %s", addr, got, want) + } + } + + state, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + if !p.ConfigureProviderCalled { + t.Fatal("must call configure") + } + + checkStateString(t, state, "") +} + +func TestContext2Apply_moduleOrphanProvider(t *testing.T) { + m := testModule(t, "apply-module-orphan-provider-inherit") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { + val := req.Config.GetAttr("value") + if val.IsNull() { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("value is not found")) + } + + return + } + + // Create a state with an orphan module + state := states.NewState() + child := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) + child.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.bar").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + assertNoErrors(t, diags) + + if _, diags := ctx.Apply(plan, m); diags.HasErrors() { + t.Fatalf("apply errors: %s", diags.Err()) + } +} + +func TestContext2Apply_moduleOrphanGrandchildProvider(t *testing.T) { + m := testModule(t, "apply-module-orphan-provider-inherit") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { + val := req.Config.GetAttr("value") + if val.IsNull() { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("value is not found")) + } + + return + } + + // Create a state with an orphan module that is nested (grandchild) + state := states.NewState() + child := state.EnsureModule(addrs.RootModuleInstance.Child("parent", addrs.NoKey).Child("child", addrs.NoKey)) + child.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.bar").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + assertNoErrors(t, diags) + + if _, diags := ctx.Apply(plan, m); diags.HasErrors() { + t.Fatalf("apply errors: %s", diags.Err()) + } +} + +func TestContext2Apply_moduleGrandchildProvider(t *testing.T) { + m := testModule(t, "apply-module-grandchild-provider-inherit") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + var callLock sync.Mutex + called := false + p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { + val := req.Config.GetAttr("value") + if val.IsNull() { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("value is not found")) + } + + callLock.Lock() + called = true + callLock.Unlock() + + return + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + if _, diags := ctx.Apply(plan, m); diags.HasErrors() { + t.Fatalf("apply errors: %s", diags.Err()) + } + + callLock.Lock() + defer callLock.Unlock() + if called != true { + t.Fatalf("err: configure never called") + } +} + +// This tests an issue where all the providers in a module but not +// in the root weren't being added to the root properly. In this test +// case: aws is explicitly added to root, but "test" should be added to. +// With the bug, it wasn't. +func TestContext2Apply_moduleOnlyProvider(t *testing.T) { + m := testModule(t, "apply-module-only-provider") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + pTest := testProvider("test") + pTest.ApplyResourceChangeFn = testApplyFn + pTest.PlanResourceChangeFn = testDiffFn + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + addrs.NewDefaultProvider("test"): testProviderFuncFixed(pTest), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testTofuApplyModuleOnlyProviderStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +func TestContext2Apply_moduleProviderAlias(t *testing.T) { + m := testModule(t, "apply-module-provider-alias") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testTofuApplyModuleProviderAliasStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +func TestContext2Apply_moduleProviderAliasTargets(t *testing.T) { + m := testModule(t, "apply-module-provider-alias") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + Targets: []addrs.Targetable{ + addrs.ConfigResource{ + Module: addrs.RootModule, + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "nonexistent", + Name: "thing", + }, + }, + }, + }) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(` + + `) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +func TestContext2Apply_moduleProviderCloseNested(t *testing.T) { + m := testModule(t, "apply-module-provider-close-nested") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + }) + assertNoErrors(t, diags) + + if _, diags := ctx.Apply(plan, m); diags.HasErrors() { + t.Fatalf("apply errors: %s", diags.Err()) + } +} + +// Tests that variables used as module vars that reference data that +// already exists in the state and requires no diff works properly. This +// fixes an issue faced where module variables were pruned because they were +// accessing "non-existent" resources (they existed, just not in the graph +// cause they weren't in the diff). +func TestContext2Apply_moduleVarRefExisting(t *testing.T) { + m := testModule(t, "apply-ref-existing") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo","foo":"bar"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testTofuApplyModuleVarRefExistingStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +func TestContext2Apply_moduleVarResourceCount(t *testing.T) { + m := testModule(t, "apply-module-var-resource-count") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.DestroyMode, + SetVariables: InputValues{ + "num": &InputValue{ + Value: cty.NumberIntVal(2), + SourceType: ValueFromCaller, + }, + }, + }) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + assertNoErrors(t, diags) + + ctx = testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags = ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "num": &InputValue{ + Value: cty.NumberIntVal(5), + SourceType: ValueFromCaller, + }, + }, + }) + assertNoErrors(t, diags) + + if _, diags := ctx.Apply(plan, m); diags.HasErrors() { + t.Fatalf("apply errors: %s", diags.Err()) + } +} + +// GH-819 +func TestContext2Apply_moduleBool(t *testing.T) { + m := testModule(t, "apply-module-bool") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testTofuApplyModuleBoolStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +// Tests that a module can be targeted and everything is properly created. +// This adds to the plan test to also just verify that apply works. +func TestContext2Apply_moduleTarget(t *testing.T) { + m := testModule(t, "plan-targeted-cross-module") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + Targets: []addrs.Targetable{ + addrs.RootModuleInstance.Child("B", addrs.NoKey), + }, + }) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + checkStateString(t, state, ` + +module.A: + aws_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = bar + type = aws_instance + + Outputs: + + value = foo +module.B: + aws_instance.bar: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = foo + type = aws_instance + + Dependencies: + module.A.aws_instance.foo + `) +} + +func TestContext2Apply_multiProvider(t *testing.T) { + m := testModule(t, "apply-multi-provider") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + + pDO := testProvider("do") + pDO.ApplyResourceChangeFn = testApplyFn + pDO.PlanResourceChangeFn = testDiffFn + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + addrs.NewDefaultProvider("do"): testProviderFuncFixed(pDO), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + mod := state.RootModule() + if len(mod.Resources) < 2 { + t.Fatalf("bad: %#v", mod.Resources) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testTofuApplyMultiProviderStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +func TestContext2Apply_multiProviderDestroy(t *testing.T) { + m := testModule(t, "apply-multi-provider-destroy") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + Provider: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "addr": {Type: cty.String, Optional: true}, + }, + }, + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + }) + + p2 := testProvider("vault") + p2.ApplyResourceChangeFn = testApplyFn + p2.PlanResourceChangeFn = testDiffFn + p2.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "vault_instance": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + }, + }) + + var state *states.State + + // First, create the instances + { + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + addrs.NewDefaultProvider("vault"): testProviderFuncFixed(p2), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + s, diags := ctx.Apply(plan, m) + assertNoErrors(t, diags) + + state = s + } + + // Destroy them + { + // Verify that aws_instance.bar is destroyed first + var checked bool + var called int32 + var lock sync.Mutex + applyFn := func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + lock.Lock() + defer lock.Unlock() + + if req.TypeName == "aws_instance" { + checked = true + + // Sleep to allow parallel execution + time.Sleep(50 * time.Millisecond) + + // Verify that called is 0 (dep not called) + if atomic.LoadInt32(&called) != 0 { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("nothing else should be called")) + return resp + } + } + + atomic.AddInt32(&called, 1) + return testApplyFn(req) + } + + // Set the apply functions + p.ApplyResourceChangeFn = applyFn + p2.ApplyResourceChangeFn = applyFn + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + addrs.NewDefaultProvider("vault"): testProviderFuncFixed(p2), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + }) + assertNoErrors(t, diags) + + s, diags := ctx.Apply(plan, m) + assertNoErrors(t, diags) + + if !checked { + t.Fatal("should be checked") + } + + state = s + } + + checkStateString(t, state, ``) +} + +// This is like the multiProviderDestroy test except it tests that +// dependent resources within a child module that inherit provider +// configuration are still destroyed first. +func TestContext2Apply_multiProviderDestroyChild(t *testing.T) { + m := testModule(t, "apply-multi-provider-destroy-child") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + Provider: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "value": {Type: cty.String, Optional: true}, + }, + }, + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + }) + + p2 := testProvider("vault") + p2.ApplyResourceChangeFn = testApplyFn + p2.PlanResourceChangeFn = testDiffFn + p2.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + Provider: &configschema.Block{}, + ResourceTypes: map[string]*configschema.Block{ + "vault_instance": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + }, + }) + + var state *states.State + + // First, create the instances + { + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + addrs.NewDefaultProvider("vault"): testProviderFuncFixed(p2), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + s, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + state = s + } + + // Destroy them + { + // Verify that aws_instance.bar is destroyed first + var checked bool + var called int32 + var lock sync.Mutex + applyFn := func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + lock.Lock() + defer lock.Unlock() + + if req.TypeName == "aws_instance" { + checked = true + + // Sleep to allow parallel execution + time.Sleep(50 * time.Millisecond) + + // Verify that called is 0 (dep not called) + if atomic.LoadInt32(&called) != 0 { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("nothing else should be called")) + return resp + } + } + + atomic.AddInt32(&called, 1) + return testApplyFn(req) + } + + // Set the apply functions + p.ApplyResourceChangeFn = applyFn + p2.ApplyResourceChangeFn = applyFn + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + addrs.NewDefaultProvider("vault"): testProviderFuncFixed(p2), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + }) + assertNoErrors(t, diags) + + s, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + if !checked { + t.Fatal("should be checked") + } + + state = s + } + + checkStateString(t, state, ` + +`) +} + +func TestContext2Apply_multiVar(t *testing.T) { + m := testModule(t, "apply-multi-var") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + // First, apply with a count of 3 + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "num": &InputValue{ + Value: cty.NumberIntVal(3), + SourceType: ValueFromCaller, + }, + }, + }) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + actual := state.RootModule().OutputValues["output"] + expected := cty.StringVal("bar0,bar1,bar2") + if actual == nil || actual.Value != expected { + t.Fatalf("wrong value\ngot: %#v\nwant: %#v", actual.Value, expected) + } + + t.Logf("Initial state: %s", state.String()) + + // Apply again, reduce the count to 1 + { + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "num": &InputValue{ + Value: cty.NumberIntVal(1), + SourceType: ValueFromCaller, + }, + }, + }) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + t.Logf("End state: %s", state.String()) + + actual := state.RootModule().OutputValues["output"] + if actual == nil { + t.Fatal("missing output") + } + + expected := cty.StringVal("bar0") + if actual.Value != expected { + t.Fatalf("wrong value\ngot: %#v\nwant: %#v", actual.Value, expected) + } + } +} + +// This is a holistic test of multi-var (aka "splat variable") handling +// across several different OpenTofu subsystems. This is here because +// historically there were quirky differences in handling across different +// parts of OpenTofu and so here we want to assert the expected behavior and +// ensure that it remains consistent in future. +func TestContext2Apply_multiVarComprehensive(t *testing.T) { + m := testModule(t, "apply-multi-var-comprehensive") + p := testProvider("test") + + configs := map[string]cty.Value{} + var configsLock sync.Mutex + + p.ApplyResourceChangeFn = testApplyFn + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + proposed := req.ProposedNewState + configsLock.Lock() + defer configsLock.Unlock() + key := proposed.GetAttr("key").AsString() + // This test was originally written using the legacy p.PlanResourceChangeFn interface, + // and so the assertions below expect an old-style ResourceConfig, which + // we'll construct via our shim for now to avoid rewriting all of the + // assertions. + configs[key] = req.ProposedNewState + + retVals := make(map[string]cty.Value) + for it := proposed.ElementIterator(); it.Next(); { + idxVal, val := it.Element() + idx := idxVal.AsString() + + switch idx { + case "id": + retVals[idx] = cty.UnknownVal(cty.String) + case "name": + retVals[idx] = cty.StringVal(key) + default: + retVals[idx] = val + } + } + + return providers.PlanResourceChangeResponse{ + PlannedState: cty.ObjectVal(retVals), + } + } + + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_thing": { + Attributes: map[string]*configschema.Attribute{ + "key": {Type: cty.String, Required: true}, + + "source_id": {Type: cty.String, Optional: true}, + "source_name": {Type: cty.String, Optional: true}, + "first_source_id": {Type: cty.String, Optional: true}, + "first_source_name": {Type: cty.String, Optional: true}, + "source_ids": {Type: cty.List(cty.String), Optional: true}, + "source_names": {Type: cty.List(cty.String), Optional: true}, + "source_ids_from_func": {Type: cty.List(cty.String), Optional: true}, + "source_names_from_func": {Type: cty.List(cty.String), Optional: true}, + "source_ids_wrapped": {Type: cty.List(cty.List(cty.String)), Optional: true}, + "source_names_wrapped": {Type: cty.List(cty.List(cty.String)), Optional: true}, + + "id": {Type: cty.String, Computed: true}, + "name": {Type: cty.String, Computed: true}, + }, + }, + }, + }) + + // First, apply with a count of 3 + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "num": &InputValue{ + Value: cty.NumberIntVal(3), + SourceType: ValueFromCaller, + }, + }, + }) + assertNoErrors(t, diags) + + checkConfig := func(key string, want cty.Value) { + configsLock.Lock() + defer configsLock.Unlock() + + got, ok := configs[key] + if !ok { + t.Errorf("no config recorded for %s; expected a configuration", key) + return + } + + t.Run("config for "+key, func(t *testing.T) { + for _, problem := range deep.Equal(got, want) { + t.Errorf(problem) + } + }) + } + + checkConfig("multi_count_var.0", cty.ObjectVal(map[string]cty.Value{ + "source_id": cty.UnknownVal(cty.String), + "source_name": cty.StringVal("source.0"), + })) + checkConfig("multi_count_var.2", cty.ObjectVal(map[string]cty.Value{ + "source_id": cty.UnknownVal(cty.String), + "source_name": cty.StringVal("source.2"), + })) + checkConfig("multi_count_derived.0", cty.ObjectVal(map[string]cty.Value{ + "source_id": cty.UnknownVal(cty.String), + "source_name": cty.StringVal("source.0"), + })) + checkConfig("multi_count_derived.2", cty.ObjectVal(map[string]cty.Value{ + "source_id": cty.UnknownVal(cty.String), + "source_name": cty.StringVal("source.2"), + })) + checkConfig("whole_splat", cty.ObjectVal(map[string]cty.Value{ + "source_ids": cty.ListVal([]cty.Value{ + cty.UnknownVal(cty.String), + cty.UnknownVal(cty.String), + cty.UnknownVal(cty.String), + }), + "source_names": cty.ListVal([]cty.Value{ + cty.StringVal("source.0"), + cty.StringVal("source.1"), + cty.StringVal("source.2"), + }), + "source_ids_from_func": cty.UnknownVal(cty.String), + "source_names_from_func": cty.ListVal([]cty.Value{ + cty.StringVal("source.0"), + cty.StringVal("source.1"), + cty.StringVal("source.2"), + }), + "source_ids_wrapped": cty.ListVal([]cty.Value{ + cty.ListVal([]cty.Value{ + cty.UnknownVal(cty.String), + cty.UnknownVal(cty.String), + cty.UnknownVal(cty.String), + }), + }), + "source_names_wrapped": cty.ListVal([]cty.Value{ + cty.ListVal([]cty.Value{ + cty.StringVal("source.0"), + cty.StringVal("source.1"), + cty.StringVal("source.2"), + }), + }), + "first_source_id": cty.UnknownVal(cty.String), + "first_source_name": cty.StringVal("source.0"), + })) + checkConfig("child.whole_splat", cty.ObjectVal(map[string]cty.Value{ + "source_ids": cty.ListVal([]cty.Value{ + cty.UnknownVal(cty.String), + cty.UnknownVal(cty.String), + cty.UnknownVal(cty.String), + }), + "source_names": cty.ListVal([]cty.Value{ + cty.StringVal("source.0"), + cty.StringVal("source.1"), + cty.StringVal("source.2"), + }), + "source_ids_wrapped": cty.ListVal([]cty.Value{ + cty.ListVal([]cty.Value{ + cty.UnknownVal(cty.String), + cty.UnknownVal(cty.String), + cty.UnknownVal(cty.String), + }), + }), + "source_names_wrapped": cty.ListVal([]cty.Value{ + cty.ListVal([]cty.Value{ + cty.StringVal("source.0"), + cty.StringVal("source.1"), + cty.StringVal("source.2"), + }), + }), + })) + + t.Run("apply", func(t *testing.T) { + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("error during apply: %s", diags.Err()) + } + + want := map[string]interface{}{ + "source_ids": []interface{}{"foo", "foo", "foo"}, + "source_names": []interface{}{ + "source.0", + "source.1", + "source.2", + }, + } + got := map[string]interface{}{} + for k, s := range state.RootModule().OutputValues { + got[k] = hcl2shim.ConfigValueFromHCL2(s.Value) + } + if !reflect.DeepEqual(got, want) { + t.Errorf( + "wrong outputs\ngot: %s\nwant: %s", + spew.Sdump(got), spew.Sdump(want), + ) + } + }) +} + +// Test that multi-var (splat) access is ordered by count, not by +// value. +func TestContext2Apply_multiVarOrder(t *testing.T) { + m := testModule(t, "apply-multi-var-order") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + // First, apply with a count of 3 + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + t.Logf("State: %s", state.String()) + + actual := state.RootModule().OutputValues["should-be-11"] + expected := cty.StringVal("index-11") + if actual == nil || actual.Value != expected { + t.Fatalf("wrong value\ngot: %#v\nwant: %#v", actual.Value, expected) + } +} + +// Test that multi-var (splat) access is ordered by count, not by +// value, through interpolations. +func TestContext2Apply_multiVarOrderInterp(t *testing.T) { + m := testModule(t, "apply-multi-var-order-interp") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + // First, apply with a count of 3 + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + t.Logf("State: %s", state.String()) + + actual := state.RootModule().OutputValues["should-be-11"] + expected := cty.StringVal("baz-index-11") + if actual == nil || actual.Value != expected { + t.Fatalf("wrong value\ngot: %#v\nwant: %#v", actual.Value, expected) + } +} + +// Based on GH-10440 where a graph edge wasn't properly being created +// between a modified resource and a count instance being destroyed. +func TestContext2Apply_multiVarCountDec(t *testing.T) { + var s *states.State + + // First create resources. Nothing sneaky here. + { + m := testModule(t, "apply-multi-var-count-dec") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + log.Print("\n========\nStep 1 Plan\n========") + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "num": &InputValue{ + Value: cty.NumberIntVal(2), + SourceType: ValueFromCaller, + }, + }, + }) + assertNoErrors(t, diags) + + log.Print("\n========\nStep 1 Apply\n========") + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + t.Logf("Step 1 state:\n%s", state) + + s = state + } + + // Decrease the count by 1 and verify that everything happens in the + // right order. + m := testModule(t, "apply-multi-var-count-dec") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + // Verify that aws_instance.bar is modified first and nothing + // else happens at the same time. + { + var checked bool + var called int32 + var lock sync.Mutex + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + lock.Lock() + defer lock.Unlock() + + if !req.PlannedState.IsNull() { + s := req.PlannedState.AsValueMap() + if ami, ok := s["ami"]; ok && !ami.IsNull() && ami.AsString() == "special" { + checked = true + + // Sleep to allow parallel execution + time.Sleep(50 * time.Millisecond) + + // Verify that called is 0 (dep not called) + if atomic.LoadInt32(&called) != 1 { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("nothing else should be called")) + return + } + } + } + atomic.AddInt32(&called, 1) + return testApplyFn(req) + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + log.Print("\n========\nStep 2 Plan\n========") + plan, diags := ctx.Plan(m, s, &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "num": &InputValue{ + Value: cty.NumberIntVal(1), + SourceType: ValueFromCaller, + }, + }, + }) + assertNoErrors(t, diags) + + t.Logf("Step 2 plan:\n%s", legacyDiffComparisonString(plan.Changes)) + + log.Print("\n========\nStep 2 Apply\n========") + _, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("apply errors: %s", diags.Err()) + } + + if !checked { + t.Error("apply never called") + } + } +} + +// Test that we can resolve a multi-var (splat) for the first resource +// created in a non-root module, which happens when the module state doesn't +// exist yet. +// https://github.com/hashicorp/terraform/issues/14438 +func TestContext2Apply_multiVarMissingState(t *testing.T) { + m := testModule(t, "apply-multi-var-missing-state") + p := testProvider("test") + p.PlanResourceChangeFn = testDiffFn + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_thing": { + Attributes: map[string]*configschema.Attribute{ + "a_ids": {Type: cty.String, Optional: true}, + "id": {Type: cty.String, Computed: true}, + }, + }, + }, + }) + + // First, apply with a count of 3 + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + // Before the relevant bug was fixed, OpenTofu would panic during apply. + if _, diags := ctx.Apply(plan, m); diags.HasErrors() { + t.Fatalf("apply failed: %s", diags.Err()) + } + + // If we get here with no errors or panics then our test was successful. +} + +func TestContext2Apply_outputOrphan(t *testing.T) { + m := testModule(t, "apply-output-orphan") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetOutputValue("foo", cty.StringVal("bar"), false) + root.SetOutputValue("bar", cty.StringVal("baz"), false) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testTofuApplyOutputOrphanStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +func TestContext2Apply_outputOrphanModule(t *testing.T) { + m := testModule(t, "apply-output-orphan-module") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + state := states.NewState() + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + assertNoErrors(t, diags) + + s, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + actual := strings.TrimSpace(s.String()) + expected := strings.TrimSpace(testTofuApplyOutputOrphanModuleStr) + if actual != expected { + t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual) + } + + // now apply with no module in the config, which should remove the + // remaining output + ctx = testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + emptyConfig := configs.NewEmptyConfig() + + // NOTE: While updating this test to pass the state in as a Plan argument, + // rather than into the testContext2 call above, it previously said + // State: state.DeepCopy(), which is a little weird since we just + // created "s" above as the result of the previous apply, but I've preserved + // it to avoid changing the flow of this test in case that's important + // for some reason. + plan, diags = ctx.Plan(emptyConfig, state.DeepCopy(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags = ctx.Apply(plan, emptyConfig) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + if !state.Empty() { + t.Fatalf("wrong final state %s\nwant empty state", spew.Sdump(state)) + } +} + +func TestContext2Apply_providerComputedVar(t *testing.T) { + m := testModule(t, "apply-provider-computed") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + pTest := testProvider("test") + pTest.ApplyResourceChangeFn = testApplyFn + pTest.PlanResourceChangeFn = testDiffFn + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + addrs.NewDefaultProvider("test"): testProviderFuncFixed(pTest), + }, + }) + + p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { + val := req.Config.GetAttr("value") + if val.IsNull() { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("value is not found")) + return + } + return + } + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + if _, diags := ctx.Apply(plan, m); diags.HasErrors() { + t.Fatalf("apply errors: %s", diags.Err()) + } +} + +func TestContext2Apply_providerConfigureDisabled(t *testing.T) { + m := testModule(t, "apply-provider-configure-disabled") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { + val := req.Config.GetAttr("value") + if val.IsNull() { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("value is not found")) + } + + return + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + if _, diags := ctx.Apply(plan, m); diags.HasErrors() { + t.Fatalf("apply errors: %s", diags.Err()) + } + + if !p.ConfigureProviderCalled { + t.Fatal("configure never called") + } +} + +func TestContext2Apply_provisionerModule(t *testing.T) { + m := testModule(t, "apply-provisioner-module") + + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + + pr := testProvisioner() + pr.GetSchemaResponse = provisioners.GetSchemaResponse{ + Provisioner: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + Provisioners: map[string]provisioners.Factory{ + "shell": testProvisionerFuncFixed(pr), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testTofuApplyProvisionerModuleStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } + + // Verify apply was invoked + if !pr.ProvisionResourceCalled { + t.Fatalf("provisioner not invoked") + } +} + +func TestContext2Apply_Provisioner_compute(t *testing.T) { + m := testModule(t, "apply-provisioner-compute") + p := testProvider("aws") + pr := testProvisioner() + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { + + val := req.Config.GetAttr("command").AsString() + if val != "computed_value" { + t.Fatalf("bad value for foo: %q", val) + } + req.UIOutput.Output(fmt.Sprintf("Executing: %q", val)) + + return + } + h := new(MockHook) + ctx := testContext2(t, &ContextOpts{ + Hooks: []Hook{h}, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + Provisioners: map[string]provisioners.Factory{ + "shell": testProvisionerFuncFixed(pr), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "value": &InputValue{ + Value: cty.NumberIntVal(1), + SourceType: ValueFromCaller, + }, + }, + }) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testTofuApplyProvisionerStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } + + // Verify apply was invoked + if !pr.ProvisionResourceCalled { + t.Fatalf("provisioner not invoked") + } + + // Verify output was rendered + if !h.ProvisionOutputCalled { + t.Fatalf("ProvisionOutput hook not called") + } + if got, want := h.ProvisionOutputMessage, `Executing: "computed_value"`; got != want { + t.Errorf("expected output to be %q, but was %q", want, got) + } +} + +func TestContext2Apply_provisionerCreateFail(t *testing.T) { + m := testModule(t, "apply-provisioner-fail-create") + p := testProvider("aws") + pr := testProvisioner() + p.PlanResourceChangeFn = testDiffFn + + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + resp := testApplyFn(req) + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("error")) + + return resp + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + Provisioners: map[string]provisioners.Factory{ + "shell": testProvisionerFuncFixed(pr), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags == nil { + t.Fatal("should error") + } + + got := strings.TrimSpace(state.String()) + want := strings.TrimSpace(testTofuApplyProvisionerFailCreateStr) + if got != want { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", got, want) + } +} + +func TestContext2Apply_provisionerCreateFailNoId(t *testing.T) { + m := testModule(t, "apply-provisioner-fail-create") + p := testProvider("aws") + pr := testProvisioner() + p.PlanResourceChangeFn = testDiffFn + + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("error")) + return + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + Provisioners: map[string]provisioners.Factory{ + "shell": testProvisionerFuncFixed(pr), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags == nil { + t.Fatal("should error") + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testTofuApplyProvisionerFailCreateNoIdStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +func TestContext2Apply_provisionerFail(t *testing.T) { + m := testModule(t, "apply-provisioner-fail") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + pr := testProvisioner() + pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("EXPLOSION")) + return + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + Provisioners: map[string]provisioners.Factory{ + "shell": testProvisionerFuncFixed(pr), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags == nil { + t.Fatal("should error") + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testTofuApplyProvisionerFailStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +func TestContext2Apply_provisionerFail_createBeforeDestroy(t *testing.T) { + m := testModule(t, "apply-provisioner-fail-create-before") + p := testProvider("aws") + pr := testProvisioner() + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("EXPLOSION")) + return + } + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.bar").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar","require_new":"abc"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + Provisioners: map[string]provisioners.Factory{ + "shell": testProvisionerFuncFixed(pr), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags = ctx.Apply(plan, m) + if !diags.HasErrors() { + t.Fatal("should error") + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testTofuApplyProvisionerFailCreateBeforeDestroyStr) + if actual != expected { + t.Fatalf("expected:\n%s\n:got\n%s", expected, actual) + } +} + +func TestContext2Apply_error_createBeforeDestroy(t *testing.T) { + m := testModule(t, "apply-error-create-before") + p := testProvider("aws") + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.bar").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar", "require_new": "abc","type":"aws_instance"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("placeholder error from ApplyFn")) + return + } + p.PlanResourceChangeFn = testDiffFn + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags = ctx.Apply(plan, m) + if !diags.HasErrors() { + t.Fatal("should have error") + } + if got, want := diags.Err().Error(), "placeholder error from ApplyFn"; got != want { + // We're looking for our artificial error from ApplyFn above, whose + // message is literally "placeholder error from ApplyFn". + t.Fatalf("wrong error\ngot: %s\nwant: %s", got, want) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testTofuApplyErrorCreateBeforeDestroyStr) + if actual != expected { + t.Fatalf("wrong final state\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +func TestContext2Apply_errorDestroy_createBeforeDestroy(t *testing.T) { + m := testModule(t, "apply-error-create-before") + p := testProvider("aws") + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.bar").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar", "require_new": "abc"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + // Fail the destroy! + if req.PlannedState.IsNull() { + resp.NewState = req.PriorState + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("error")) + return + } + + return testApplyFn(req) + } + p.PlanResourceChangeFn = testDiffFn + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags = ctx.Apply(plan, m) + if !diags.HasErrors() { + t.Fatal("should have error") + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testTofuApplyErrorDestroyCreateBeforeDestroyStr) + if actual != expected { + t.Fatalf("bad: actual:\n%s\n\nexpected:\n%s", actual, expected) + } +} + +func TestContext2Apply_multiDepose_createBeforeDestroy(t *testing.T) { + m := testModule(t, "apply-multi-depose-create-before-destroy") + p := testProvider("aws") + ps := map[addrs.Provider]providers.Factory{addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p)} + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "require_new": {Type: cty.String, Optional: true}, + "id": {Type: cty.String, Computed: true}, + }, + }, + }, + }) + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.web").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + p.PlanResourceChangeFn = testDiffFn + + ctx := testContext2(t, &ContextOpts{ + Providers: ps, + }) + createdInstanceId := "bar" + // Create works + createFunc := func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + s := req.PlannedState.AsValueMap() + s["id"] = cty.StringVal(createdInstanceId) + resp.NewState = cty.ObjectVal(s) + return + } + + // Destroy starts broken + destroyFunc := func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + resp.NewState = req.PriorState + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("destroy failed")) + return + } + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + if req.PlannedState.IsNull() { + return destroyFunc(req) + } else { + return createFunc(req) + } + } + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "require_new": &InputValue{ + Value: cty.StringVal("yes"), + }, + }, + }) + assertNoErrors(t, diags) + + // Destroy is broken, so even though CBD successfully replaces the instance, + // we'll have to save the Deposed instance to destroy later + state, diags = ctx.Apply(plan, m) + if !diags.HasErrors() { + t.Fatal("should have error") + } + + checkStateString(t, state, ` +aws_instance.web: (1 deposed) + ID = bar + provider = provider["registry.opentofu.org/hashicorp/aws"] + require_new = yes + Deposed ID 1 = foo + `) + + createdInstanceId = "baz" + ctx = testContext2(t, &ContextOpts{ + Providers: ps, + }) + + plan, diags = ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "require_new": &InputValue{ + Value: cty.StringVal("baz"), + }, + }, + }) + assertNoErrors(t, diags) + + // We're replacing the primary instance once again. Destroy is _still_ + // broken, so the Deposed list gets longer + state, diags = ctx.Apply(plan, m) + if !diags.HasErrors() { + t.Fatal("should have error") + } + + // For this one we can't rely on checkStateString because its result is + // not deterministic when multiple deposed objects are present. Instead, + // we will probe the state object directly. + { + is := state.RootModule().Resources["aws_instance.web"].Instances[addrs.NoKey] + if is.Current == nil { + t.Fatalf("no current object for aws_instance web; should have one") + } + if !bytes.Contains(is.Current.AttrsJSON, []byte("baz")) { + t.Fatalf("incorrect current object attrs %s; want id=baz", is.Current.AttrsJSON) + } + if got, want := len(is.Deposed), 2; got != want { + t.Fatalf("wrong number of deposed instances %d; want %d", got, want) + } + var foos, bars int + for _, obj := range is.Deposed { + if bytes.Contains(obj.AttrsJSON, []byte("foo")) { + foos++ + } + if bytes.Contains(obj.AttrsJSON, []byte("bar")) { + bars++ + } + } + if got, want := foos, 1; got != want { + t.Fatalf("wrong number of deposed instances with id=foo %d; want %d", got, want) + } + if got, want := bars, 1; got != want { + t.Fatalf("wrong number of deposed instances with id=bar %d; want %d", got, want) + } + } + + // Destroy partially fixed! + destroyFunc = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + s := req.PriorState.AsValueMap() + id := s["id"].AsString() + if id == "foo" || id == "baz" { + resp.NewState = cty.NullVal(req.PriorState.Type()) + } else { + resp.NewState = req.PriorState + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("destroy partially failed")) + } + return + } + + createdInstanceId = "qux" + ctx = testContext2(t, &ContextOpts{ + Providers: ps, + }) + plan, diags = ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "require_new": &InputValue{ + Value: cty.StringVal("qux"), + }, + }, + }) + assertNoErrors(t, diags) + + state, diags = ctx.Apply(plan, m) + // Expect error because 1/2 of Deposed destroys failed + if !diags.HasErrors() { + t.Fatal("should have error") + } + + // foo and baz are now gone, bar sticks around + checkStateString(t, state, ` +aws_instance.web: (1 deposed) + ID = qux + provider = provider["registry.opentofu.org/hashicorp/aws"] + require_new = qux + Deposed ID 1 = bar + `) + + // Destroy working fully! + destroyFunc = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + resp.NewState = cty.NullVal(req.PriorState.Type()) + return + } + + createdInstanceId = "quux" + ctx = testContext2(t, &ContextOpts{ + Providers: ps, + }) + plan, diags = ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "require_new": &InputValue{ + Value: cty.StringVal("quux"), + }, + }, + }) + assertNoErrors(t, diags) + state, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatal("should not have error:", diags.Err()) + } + + // And finally the state is clean + checkStateString(t, state, ` +aws_instance.web: + ID = quux + provider = provider["registry.opentofu.org/hashicorp/aws"] + require_new = quux + `) +} + +// Verify that a normal provisioner with on_failure "continue" set won't +// taint the resource and continues executing. +func TestContext2Apply_provisionerFailContinue(t *testing.T) { + m := testModule(t, "apply-provisioner-fail-continue") + p := testProvider("aws") + pr := testProvisioner() + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + + pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("provisioner error")) + return + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + Provisioners: map[string]provisioners.Factory{ + "shell": testProvisionerFuncFixed(pr), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + checkStateString(t, state, ` +aws_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = bar + type = aws_instance + `) + + // Verify apply was invoked + if !pr.ProvisionResourceCalled { + t.Fatalf("provisioner not invoked") + } +} + +// Verify that a normal provisioner with on_failure "continue" records +// the error with the hook. +func TestContext2Apply_provisionerFailContinueHook(t *testing.T) { + h := new(MockHook) + m := testModule(t, "apply-provisioner-fail-continue") + p := testProvider("aws") + pr := testProvisioner() + p.PlanResourceChangeFn = testDiffFn + pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("provisioner error")) + return + } + + ctx := testContext2(t, &ContextOpts{ + Hooks: []Hook{h}, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + Provisioners: map[string]provisioners.Factory{ + "shell": testProvisionerFuncFixed(pr), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + if _, diags := ctx.Apply(plan, m); diags.HasErrors() { + t.Fatalf("apply errors: %s", diags.Err()) + } + + if !h.PostProvisionInstanceStepCalled { + t.Fatal("PostProvisionInstanceStep not called") + } + if h.PostProvisionInstanceStepErrorArg == nil { + t.Fatal("should have error") + } +} + +func TestContext2Apply_provisionerDestroy(t *testing.T) { + m := testModule(t, "apply-provisioner-destroy") + p := testProvider("aws") + pr := testProvisioner() + p.PlanResourceChangeFn = testDiffFn + pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { + val := req.Config.GetAttr("command").AsString() + if val != "destroy a bar" { + t.Fatalf("bad value for foo: %q", val) + } + + return + } + + state := states.NewState() + root := state.RootModule() + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr(`aws_instance.foo["a"]`).Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar","foo":"bar"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + Provisioners: map[string]provisioners.Factory{ + "shell": testProvisionerFuncFixed(pr), + }, + }) + + plan, diags := ctx.Plan(m, state, SimplePlanOpts(plans.DestroyMode, testInputValuesUnset(m.Module.Variables))) + assertNoErrors(t, diags) + + state, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + checkStateString(t, state, ``) + + // Verify apply was invoked + if !pr.ProvisionResourceCalled { + t.Fatalf("provisioner not invoked") + } +} + +// Verify that on destroy provisioner failure, nothing happens to the instance +func TestContext2Apply_provisionerDestroyFail(t *testing.T) { + m := testModule(t, "apply-provisioner-destroy") + p := testProvider("aws") + pr := testProvisioner() + p.PlanResourceChangeFn = testDiffFn + pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("provisioner error")) + return + } + + state := states.NewState() + root := state.RootModule() + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr(`aws_instance.foo["a"]`).Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar","foo":"bar"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + Provisioners: map[string]provisioners.Factory{ + "shell": testProvisionerFuncFixed(pr), + }, + }) + + plan, diags := ctx.Plan(m, state, SimplePlanOpts(plans.DestroyMode, testInputValuesUnset(m.Module.Variables))) + assertNoErrors(t, diags) + + state, diags = ctx.Apply(plan, m) + if diags == nil { + t.Fatal("should error") + } + + checkStateString(t, state, ` +aws_instance.foo["a"]: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = bar + `) + + // Verify apply was invoked + if !pr.ProvisionResourceCalled { + t.Fatalf("provisioner not invoked") + } +} + +// Verify that on destroy provisioner failure with "continue" that +// we continue to the next provisioner. +func TestContext2Apply_provisionerDestroyFailContinue(t *testing.T) { + m := testModule(t, "apply-provisioner-destroy-continue") + p := testProvider("aws") + pr := testProvisioner() + p.PlanResourceChangeFn = testDiffFn + + var l sync.Mutex + var calls []string + pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { + val := req.Config.GetAttr("command") + if val.IsNull() { + t.Fatalf("bad value for foo: %#v", val) + } + + l.Lock() + defer l.Unlock() + calls = append(calls, val.AsString()) + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("provisioner error")) + return + } + + state := states.NewState() + root := state.RootModule() + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr(`aws_instance.foo["a"]`).Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + Provisioners: map[string]provisioners.Factory{ + "shell": testProvisionerFuncFixed(pr), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + }) + assertNoErrors(t, diags) + + state, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + checkStateString(t, state, ``) + + // Verify apply was invoked + if !pr.ProvisionResourceCalled { + t.Fatalf("provisioner not invoked") + } + + expected := []string{"one", "two"} + if !reflect.DeepEqual(calls, expected) { + t.Fatalf("wrong commands\ngot: %#v\nwant: %#v", calls, expected) + } +} + +// Verify that on destroy provisioner failure with "continue" that +// we continue to the next provisioner. But if the next provisioner defines +// to fail, then we fail after running it. +func TestContext2Apply_provisionerDestroyFailContinueFail(t *testing.T) { + m := testModule(t, "apply-provisioner-destroy-fail") + p := testProvider("aws") + pr := testProvisioner() + p.PlanResourceChangeFn = testDiffFn + + var l sync.Mutex + var calls []string + pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { + val := req.Config.GetAttr("command") + if val.IsNull() { + t.Fatalf("bad value for foo: %#v", val) + } + + l.Lock() + defer l.Unlock() + calls = append(calls, val.AsString()) + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("provisioner error")) + return + } + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + Provisioners: map[string]provisioners.Factory{ + "shell": testProvisionerFuncFixed(pr), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + }) + assertNoErrors(t, diags) + + state, diags = ctx.Apply(plan, m) + if diags == nil { + t.Fatal("apply succeeded; wanted error from second provisioner") + } + + checkStateString(t, state, ` +aws_instance.foo: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/aws"] + `) + + // Verify apply was invoked + if !pr.ProvisionResourceCalled { + t.Fatalf("provisioner not invoked") + } + + expected := []string{"one", "two"} + if !reflect.DeepEqual(calls, expected) { + t.Fatalf("bad: %#v", calls) + } +} + +// Verify destroy provisioners are not run for tainted instances. +func TestContext2Apply_provisionerDestroyTainted(t *testing.T) { + m := testModule(t, "apply-provisioner-destroy") + p := testProvider("aws") + pr := testProvisioner() + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + + destroyCalled := false + pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { + expected := "create a b" + val := req.Config.GetAttr("command") + if val.AsString() != expected { + t.Fatalf("bad value for command: %#v", val) + } + + return + } + + state := states.NewState() + root := state.RootModule() + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr(`aws_instance.foo["a"]`).Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectTainted, + AttrsJSON: []byte(`{"id":"bar"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + Provisioners: map[string]provisioners.Factory{ + "shell": testProvisionerFuncFixed(pr), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "input": &InputValue{ + Value: cty.MapVal(map[string]cty.Value{ + "a": cty.StringVal("b"), + }), + SourceType: ValueFromInput, + }, + }, + }) + assertNoErrors(t, diags) + + state, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + checkStateString(t, state, ` +aws_instance.foo["a"]: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = bar + type = aws_instance + `) + + // Verify apply was invoked + if !pr.ProvisionResourceCalled { + t.Fatalf("provisioner not invoked") + } + + if destroyCalled { + t.Fatal("destroy should not be called") + } +} + +func TestContext2Apply_provisionerResourceRef(t *testing.T) { + m := testModule(t, "apply-provisioner-resource-ref") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + + pr := testProvisioner() + pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { + val := req.Config.GetAttr("command") + if val.AsString() != "2" { + t.Fatalf("bad value for command: %#v", val) + } + + return + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + Provisioners: map[string]provisioners.Factory{ + "shell": testProvisionerFuncFixed(pr), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testTofuApplyProvisionerResourceRefStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } + + // Verify apply was invoked + if !pr.ProvisionResourceCalled { + t.Fatalf("provisioner not invoked") + } +} + +func TestContext2Apply_provisionerSelfRef(t *testing.T) { + m := testModule(t, "apply-provisioner-self-ref") + p := testProvider("aws") + pr := testProvisioner() + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { + val := req.Config.GetAttr("command") + if val.AsString() != "bar" { + t.Fatalf("bad value for command: %#v", val) + } + + return + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + Provisioners: map[string]provisioners.Factory{ + "shell": testProvisionerFuncFixed(pr), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testTofuApplyProvisionerSelfRefStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } + + // Verify apply was invoked + if !pr.ProvisionResourceCalled { + t.Fatalf("provisioner not invoked") + } +} + +func TestContext2Apply_provisionerMultiSelfRef(t *testing.T) { + var lock sync.Mutex + commands := make([]string, 0, 5) + + m := testModule(t, "apply-provisioner-multi-self-ref") + p := testProvider("aws") + pr := testProvisioner() + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { + lock.Lock() + defer lock.Unlock() + + val := req.Config.GetAttr("command") + if val.IsNull() { + t.Fatalf("bad value for command: %#v", val) + } + + commands = append(commands, val.AsString()) + return + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + Provisioners: map[string]provisioners.Factory{ + "shell": testProvisionerFuncFixed(pr), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testTofuApplyProvisionerMultiSelfRefStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } + + // Verify apply was invoked + if !pr.ProvisionResourceCalled { + t.Fatalf("provisioner not invoked") + } + + // Verify our result + sort.Strings(commands) + expectedCommands := []string{"number 0", "number 1", "number 2"} + if !reflect.DeepEqual(commands, expectedCommands) { + t.Fatalf("bad: %#v", commands) + } +} + +func TestContext2Apply_provisionerMultiSelfRefSingle(t *testing.T) { + var lock sync.Mutex + order := make([]string, 0, 5) + + m := testModule(t, "apply-provisioner-multi-self-ref-single") + p := testProvider("aws") + pr := testProvisioner() + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { + lock.Lock() + defer lock.Unlock() + + val := req.Config.GetAttr("order") + if val.IsNull() { + t.Fatalf("no val for order") + } + + order = append(order, val.AsString()) + return + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + Provisioners: map[string]provisioners.Factory{ + "shell": testProvisionerFuncFixed(pr), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testTofuApplyProvisionerMultiSelfRefSingleStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } + + // Verify apply was invoked + if !pr.ProvisionResourceCalled { + t.Fatalf("provisioner not invoked") + } + + // Verify our result + sort.Strings(order) + expectedOrder := []string{"0", "1", "2"} + if !reflect.DeepEqual(order, expectedOrder) { + t.Fatalf("bad: %#v", order) + } +} + +func TestContext2Apply_provisionerExplicitSelfRef(t *testing.T) { + m := testModule(t, "apply-provisioner-explicit-self-ref") + p := testProvider("aws") + pr := testProvisioner() + p.PlanResourceChangeFn = testDiffFn + pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { + val := req.Config.GetAttr("command") + if val.IsNull() || val.AsString() != "bar" { + t.Fatalf("bad value for command: %#v", val) + } + + return + } + + var state *states.State + { + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + Provisioners: map[string]provisioners.Factory{ + "shell": testProvisionerFuncFixed(pr), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + state, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + // Verify apply was invoked + if !pr.ProvisionResourceCalled { + t.Fatalf("provisioner not invoked") + } + } + + { + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + Provisioners: map[string]provisioners.Factory{ + "shell": testProvisionerFuncFixed(pr), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + }) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + state, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + checkStateString(t, state, ``) + } +} + +func TestContext2Apply_provisionerForEachSelfRef(t *testing.T) { + m := testModule(t, "apply-provisioner-for-each-self") + p := testProvider("aws") + pr := testProvisioner() + p.PlanResourceChangeFn = testDiffFn + + pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { + val := req.Config.GetAttr("command") + if val.IsNull() { + t.Fatalf("bad value for command: %#v", val) + } + + return resp + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + Provisioners: map[string]provisioners.Factory{ + "shell": testProvisionerFuncFixed(pr), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + _, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } +} + +// Provisioner should NOT run on a diff, only create +func TestContext2Apply_Provisioner_Diff(t *testing.T) { + m := testModule(t, "apply-provisioner-diff") + p := testProvider("aws") + pr := testProvisioner() + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + Provisioners: map[string]provisioners.Factory{ + "shell": testProvisionerFuncFixed(pr), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + logDiagnostics(t, diags) + t.Fatal("apply failed") + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testTofuApplyProvisionerDiffStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } + + // Verify apply was invoked + if !pr.ProvisionResourceCalled { + t.Fatalf("provisioner was not called on first apply") + } + pr.ProvisionResourceCalled = false + + // Change the state to force a diff + mod := state.RootModule() + obj := mod.Resources["aws_instance.bar"].Instances[addrs.NoKey].Current + var attrs map[string]interface{} + err := json.Unmarshal(obj.AttrsJSON, &attrs) + if err != nil { + t.Fatal(err) + } + attrs["foo"] = "baz" + obj.AttrsJSON, err = json.Marshal(attrs) + if err != nil { + t.Fatal(err) + } + + // Re-create context with state + ctx = testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + Provisioners: map[string]provisioners.Factory{ + "shell": testProvisionerFuncFixed(pr), + }, + }) + + plan, diags = ctx.Plan(m, state, DefaultPlanOpts) + assertNoErrors(t, diags) + + state2, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + logDiagnostics(t, diags) + t.Fatal("apply failed") + } + + actual = strings.TrimSpace(state2.String()) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } + + // Verify apply was NOT invoked + if pr.ProvisionResourceCalled { + t.Fatalf("provisioner was called on second apply; should not have been") + } +} + +func TestContext2Apply_outputDiffVars(t *testing.T) { + m := testModule(t, "apply-good") + p := testProvider("aws") + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.baz").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + p.PlanResourceChangeFn = testDiffFn + //func(info *InstanceInfo, s *InstanceState, rc *ResourceConfig) (*InstanceDiff, error) { + // d := &InstanceDiff{ + // Attributes: map[string]*ResourceAttrDiff{}, + // } + // if new, ok := rc.Get("value"); ok { + // d.Attributes["value"] = &ResourceAttrDiff{ + // New: new.(string), + // } + // } + // if new, ok := rc.Get("foo"); ok { + // d.Attributes["foo"] = &ResourceAttrDiff{ + // New: new.(string), + // } + // } else if rc.IsComputed("foo") { + // d.Attributes["foo"] = &ResourceAttrDiff{ + // NewComputed: true, + // Type: DiffAttrOutput, // This doesn't actually really do anything anymore, but this test originally set it. + // } + // } + // if new, ok := rc.Get("num"); ok { + // d.Attributes["num"] = &ResourceAttrDiff{ + // New: fmt.Sprintf("%#v", new), + // } + // } + // return d, nil + // } + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + assertNoErrors(t, diags) + + _, diags = ctx.Apply(plan, m) + assertNoErrors(t, diags) +} + +func TestContext2Apply_destroyX(t *testing.T) { + m := testModule(t, "apply-destroy") + h := new(HookRecordApplyOrder) + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Hooks: []Hook{h}, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + // First plan and apply a create operation + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + // Next, plan and apply a destroy operation + h.Active = true + ctx = testContext2(t, &ContextOpts{ + Hooks: []Hook{h}, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags = ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + }) + assertNoErrors(t, diags) + + state, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + // Test that things were destroyed + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testTofuApplyDestroyStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } + + // Test that things were destroyed _in the right order_ + expected2 := []string{"aws_instance.bar", "aws_instance.foo"} + actual2 := h.IDs + if !reflect.DeepEqual(actual2, expected2) { + t.Fatalf("expected: %#v\n\ngot:%#v", expected2, actual2) + } +} + +func TestContext2Apply_destroyOrder(t *testing.T) { + m := testModule(t, "apply-destroy") + h := new(HookRecordApplyOrder) + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Hooks: []Hook{h}, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + // First plan and apply a create operation + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + t.Logf("State 1: %s", state) + + // Next, plan and apply a destroy + h.Active = true + ctx = testContext2(t, &ContextOpts{ + Hooks: []Hook{h}, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags = ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + }) + assertNoErrors(t, diags) + + state, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + // Test that things were destroyed + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testTofuApplyDestroyStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } + + // Test that things were destroyed _in the right order_ + expected2 := []string{"aws_instance.bar", "aws_instance.foo"} + actual2 := h.IDs + if !reflect.DeepEqual(actual2, expected2) { + t.Fatalf("expected: %#v\n\ngot:%#v", expected2, actual2) + } +} + +// https://github.com/hashicorp/terraform/issues/2767 +func TestContext2Apply_destroyModulePrefix(t *testing.T) { + m := testModule(t, "apply-destroy-module-resource-prefix") + h := new(MockHook) + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Hooks: []Hook{h}, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + // First plan and apply a create operation + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + // Verify that we got the apply info correct + if v := h.PreApplyAddr.String(); v != "module.child.aws_instance.foo" { + t.Fatalf("bad: %s", v) + } + + // Next, plan and apply a destroy operation and reset the hook + h = new(MockHook) + ctx = testContext2(t, &ContextOpts{ + Hooks: []Hook{h}, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags = ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + }) + assertNoErrors(t, diags) + + _, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + // Test that things were destroyed + if v := h.PreApplyAddr.String(); v != "module.child.aws_instance.foo" { + t.Fatalf("bad: %s", v) + } +} + +func TestContext2Apply_destroyNestedModule(t *testing.T) { + m := testModule(t, "apply-destroy-nested-module") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.bar").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + // First plan and apply a create operation + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + assertNoErrors(t, diags) + + s, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + // Test that things were destroyed + actual := strings.TrimSpace(s.String()) + if actual != "" { + t.Fatalf("expected no state, got: %s", actual) + } +} + +func TestContext2Apply_destroyDeeplyNestedModule(t *testing.T) { + m := testModule(t, "apply-destroy-deeply-nested-module") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.bar").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + // First plan and apply a create operation + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + assertNoErrors(t, diags) + + s, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + // Test that things were destroyed + if !s.Empty() { + t.Fatalf("wrong final state %s\nwant empty state", spew.Sdump(s)) + } +} + +// https://github.com/hashicorp/terraform/issues/5440 +func TestContext2Apply_destroyModuleWithAttrsReferencingResource(t *testing.T) { + m, snap := testModuleWithSnapshot(t, "apply-destroy-module-with-attrs") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + var state *states.State + { + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + // First plan and apply a create operation + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("plan diags: %s", diags.Err()) + } else { + t.Logf("Step 1 plan: %s", legacyDiffComparisonString(plan.Changes)) + } + + state, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("apply errs: %s", diags.Err()) + } + + t.Logf("Step 1 state: %s", state) + } + + h := new(HookRecordApplyOrder) + h.Active = true + + { + ctx := testContext2(t, &ContextOpts{ + Hooks: []Hook{h}, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + // First plan and apply a create operation + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + }) + if diags.HasErrors() { + t.Fatalf("destroy plan err: %s", diags.Err()) + } + + t.Logf("Step 2 plan: %s", legacyDiffComparisonString(plan.Changes)) + + ctxOpts, m, plan, err := contextOptsForPlanViaFile(t, snap, plan) + if err != nil { + t.Fatalf("failed to round-trip through planfile: %s", err) + } + + ctxOpts.Providers = map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + } + + ctx, diags = NewContext(ctxOpts) + if diags.HasErrors() { + t.Fatalf("err: %s", diags.Err()) + } + + state, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("destroy apply err: %s", diags.Err()) + } + + t.Logf("Step 2 state: %s", state) + } + + // Test that things were destroyed + if state.HasManagedResourceInstanceObjects() { + t.Fatal("expected empty state, got:", state) + } +} + +func TestContext2Apply_destroyWithModuleVariableAndCount(t *testing.T) { + m, snap := testModuleWithSnapshot(t, "apply-destroy-mod-var-and-count") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + var state *states.State + { + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + // First plan and apply a create operation + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("apply err: %s", diags.Err()) + } + } + + h := new(HookRecordApplyOrder) + h.Active = true + + { + ctx := testContext2(t, &ContextOpts{ + Hooks: []Hook{h}, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + // First plan and apply a create operation + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + }) + if diags.HasErrors() { + t.Fatalf("destroy plan err: %s", diags.Err()) + } + + ctxOpts, m, plan, err := contextOptsForPlanViaFile(t, snap, plan) + if err != nil { + t.Fatalf("failed to round-trip through planfile: %s", err) + } + + ctxOpts.Providers = + map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + } + + ctx, diags = NewContext(ctxOpts) + if diags.HasErrors() { + t.Fatalf("err: %s", diags.Err()) + } + + state, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("destroy apply err: %s", diags.Err()) + } + } + + // Test that things were destroyed + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(` +`) + if actual != expected { + t.Fatalf("expected: \n%s\n\nbad: \n%s", expected, actual) + } +} + +func TestContext2Apply_destroyTargetWithModuleVariableAndCount(t *testing.T) { + m := testModule(t, "apply-destroy-mod-var-and-count") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + var state *states.State + { + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + // First plan and apply a create operation + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("apply err: %s", diags.Err()) + } + } + + { + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + Targets: []addrs.Targetable{ + addrs.RootModuleInstance.Child("child", addrs.NoKey), + }, + }) + if diags.HasErrors() { + t.Fatalf("plan err: %s", diags) + } + if len(diags) != 1 { + // Should have one warning that -target is in effect. + t.Fatalf("got %d diagnostics in plan; want 1", len(diags)) + } + if got, want := diags[0].Severity(), tfdiags.Warning; got != want { + t.Errorf("wrong diagnostic severity %#v; want %#v", got, want) + } + if got, want := diags[0].Description().Summary, "Resource targeting is in effect"; got != want { + t.Errorf("wrong diagnostic summary %#v; want %#v", got, want) + } + + // Destroy, targeting the module explicitly + state, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("destroy apply err: %s", diags) + } + if len(diags) != 1 { + t.Fatalf("got %d diagnostics; want 1", len(diags)) + } + if got, want := diags[0].Severity(), tfdiags.Warning; got != want { + t.Errorf("wrong diagnostic severity %#v; want %#v", got, want) + } + if got, want := diags[0].Description().Summary, "Applied changes may be incomplete"; got != want { + t.Errorf("wrong diagnostic summary %#v; want %#v", got, want) + } + } + + // Test that things were destroyed + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(``) + if actual != expected { + t.Fatalf("expected: \n%s\n\nbad: \n%s", expected, actual) + } +} + +func TestContext2Apply_destroyWithModuleVariableAndCountNested(t *testing.T) { + m, snap := testModuleWithSnapshot(t, "apply-destroy-mod-var-and-count-nested") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + var state *states.State + { + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + // First plan and apply a create operation + plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) + assertNoErrors(t, diags) + + state, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("apply err: %s", diags.Err()) + } + } + + h := new(HookRecordApplyOrder) + h.Active = true + + { + ctx := testContext2(t, &ContextOpts{ + Hooks: []Hook{h}, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + // First plan and apply a create operation + plan, diags := ctx.Plan(m, state, SimplePlanOpts(plans.DestroyMode, testInputValuesUnset(m.Module.Variables))) + if diags.HasErrors() { + t.Fatalf("destroy plan err: %s", diags.Err()) + } + + ctxOpts, m, plan, err := contextOptsForPlanViaFile(t, snap, plan) + if err != nil { + t.Fatalf("failed to round-trip through planfile: %s", err) + } + + ctxOpts.Providers = + map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + } + + ctx, diags = NewContext(ctxOpts) + if diags.HasErrors() { + t.Fatalf("err: %s", diags.Err()) + } + + state, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("destroy apply err: %s", diags.Err()) + } + } + + // Test that things were destroyed + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(` +`) + if actual != expected { + t.Fatalf("expected: \n%s\n\nbad: \n%s", expected, actual) + } +} + +func TestContext2Apply_destroyOutputs(t *testing.T) { + m := testModule(t, "apply-destroy-outputs") + p := testProvider("test") + p.PlanResourceChangeFn = testDiffFn + + p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + // add the required id + m := req.Config.AsValueMap() + m["id"] = cty.StringVal("foo") + + return providers.ReadDataSourceResponse{ + State: cty.ObjectVal(m), + } + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + // First plan and apply a create operation + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + // Next, plan and apply a destroy operation + ctx = testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags = ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + }) + assertNoErrors(t, diags) + + state, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + mod := state.RootModule() + if len(mod.Resources) > 0 { + t.Fatalf("expected no resources, got: %#v", mod) + } + + // destroying again should produce no errors + ctx = testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + plan, diags = ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + }) + assertNoErrors(t, diags) + + if _, diags := ctx.Apply(plan, m); diags.HasErrors() { + t.Fatal(diags.Err()) + } +} + +func TestContext2Apply_destroyOrphan(t *testing.T) { + m := testModule(t, "apply-error") + p := testProvider("aws") + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.baz").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + p.PlanResourceChangeFn = testDiffFn + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + assertNoErrors(t, diags) + + s, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + mod := s.RootModule() + if _, ok := mod.Resources["aws_instance.baz"]; ok { + t.Fatalf("bad: %#v", mod.Resources) + } +} + +func TestContext2Apply_destroyTaintedProvisioner(t *testing.T) { + m := testModule(t, "apply-destroy-provisioner") + p := testProvider("aws") + pr := testProvisioner() + p.PlanResourceChangeFn = testDiffFn + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + Provisioners: map[string]provisioners.Factory{ + "shell": testProvisionerFuncFixed(pr), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + }) + assertNoErrors(t, diags) + + s, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + if pr.ProvisionResourceCalled { + t.Fatal("provisioner should not be called") + } + + actual := strings.TrimSpace(s.String()) + expected := strings.TrimSpace("") + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +func TestContext2Apply_error(t *testing.T) { + errored := false + + m := testModule(t, "apply-error") + p := testProvider("aws") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + if errored { + resp.NewState = req.PlannedState + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("error")) + return + } + errored = true + + return testApplyFn(req) + } + p.PlanResourceChangeFn = testDiffFn + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags == nil { + t.Fatal("should have error") + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testTofuApplyErrorStr) + if actual != expected { + t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual) + } +} + +func TestContext2Apply_errorDestroy(t *testing.T) { + m := testModule(t, "empty") + p := testProvider("test") + + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_thing": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true}, + }, + }, + }, + }) + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + // Should actually be called for this test, because OpenTofu Core + // constructs the plan for a destroy operation itself. + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + // The apply (in this case, a destroy) always fails, so we can verify + // that the object stays in the state after a destroy fails even though + // we aren't returning a new state object here. + return providers.ApplyResourceChangeResponse{ + Diagnostics: tfdiags.Diagnostics(nil).Append(fmt.Errorf("failed")), + } + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + state := states.BuildState(func(ss *states.SyncState) { + ss.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"baz"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags = ctx.Apply(plan, m) + if !diags.HasErrors() { + t.Fatal("should have error") + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(` +test_thing.foo: + ID = baz + provider = provider["registry.opentofu.org/hashicorp/test"] +`) // test_thing.foo is still here, even though provider returned no new state along with its error + if actual != expected { + t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual) + } +} + +func TestContext2Apply_errorCreateInvalidNew(t *testing.T) { + m := testModule(t, "apply-error") + + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "value": {Type: cty.String, Optional: true}, + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + }) + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + // We're intentionally returning an inconsistent new state here + // because we want to test that OpenTofu ignores the inconsistency + // when accompanied by another error. + return providers.ApplyResourceChangeResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "value": cty.StringVal("wrong wrong wrong wrong"), + "foo": cty.StringVal("absolutely brimming over with wrongability"), + }), + Diagnostics: tfdiags.Diagnostics(nil).Append(fmt.Errorf("forced error")), + } + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags == nil { + t.Fatal("should have error") + } + if got, want := len(diags), 1; got != want { + // There should be no additional diagnostics generated by OpenTofu's own eval logic, + // because the provider's own error supersedes them. + t.Errorf("wrong number of diagnostics %d; want %d\n%s", got, want, diags.Err()) + } + if got, want := diags.Err().Error(), "forced error"; !strings.Contains(got, want) { + t.Errorf("returned error does not contain %q, but it should\n%s", want, diags.Err()) + } + if got, want := len(state.RootModule().Resources), 1; got != want { + t.Errorf("%d resources in state before prune; should have %d\n%s", got, want, spew.Sdump(state)) + } + state.PruneResourceHusks() // aws_instance.bar with no instances gets left behind when we bail out, but that's okay + if got, want := len(state.RootModule().Resources), 1; got != want { + t.Errorf("%d resources in state after prune; should have only one (aws_instance.foo, tainted)\n%s", got, spew.Sdump(state)) + } +} + +func TestContext2Apply_errorUpdateNullNew(t *testing.T) { + m := testModule(t, "apply-error") + + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "value": {Type: cty.String, Optional: true}, + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + }) + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + // We're intentionally returning no NewState here because we want to + // test that OpenTofu retains the prior state, rather than treating + // the returned null as "no state" (object deleted). + return providers.ApplyResourceChangeResponse{ + Diagnostics: tfdiags.Diagnostics(nil).Append(fmt.Errorf("forced error")), + } + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + state := states.BuildState(func(ss *states.SyncState) { + ss.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"value":"old"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("aws"), + Module: addrs.RootModule, + }, + ) + }) + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags = ctx.Apply(plan, m) + if !diags.HasErrors() { + t.Fatal("should have error") + } + if got, want := len(diags), 1; got != want { + // There should be no additional diagnostics generated by OpenTofu's own eval logic, + // because the provider's own error supersedes them. + t.Errorf("wrong number of diagnostics %d; want %d\n%s", got, want, diags.Err()) + } + if got, want := diags.Err().Error(), "forced error"; !strings.Contains(got, want) { + t.Errorf("returned error does not contain %q, but it should\n%s", want, diags.Err()) + } + state.PruneResourceHusks() + if got, want := len(state.RootModule().Resources), 1; got != want { + t.Fatalf("%d resources in state; should have only one (aws_instance.foo, unmodified)\n%s", got, spew.Sdump(state)) + } + + is := state.ResourceInstance(addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance)) + if is == nil { + t.Fatalf("aws_instance.foo is not in the state after apply") + } + if got, want := is.Current.AttrsJSON, []byte(`"old"`); !bytes.Contains(got, want) { + t.Fatalf("incorrect attributes for aws_instance.foo\ngot: %s\nwant: JSON containing %s\n\n%s", got, want, spew.Sdump(is)) + } +} + +func TestContext2Apply_errorPartial(t *testing.T) { + errored := false + + m := testModule(t, "apply-error") + p := testProvider("aws") + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.bar").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar","type":"aws_instance"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + if errored { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("error")) + return + } + errored = true + + return testApplyFn(req) + } + p.PlanResourceChangeFn = testDiffFn + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + assertNoErrors(t, diags) + + s, diags := ctx.Apply(plan, m) + if diags == nil { + t.Fatal("should have error") + } + + mod := s.RootModule() + if len(mod.Resources) != 2 { + t.Fatalf("bad: %#v", mod.Resources) + } + + actual := strings.TrimSpace(s.String()) + expected := strings.TrimSpace(testTofuApplyErrorPartialStr) + if actual != expected { + t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual) + } +} + +func TestContext2Apply_hook(t *testing.T) { + m := testModule(t, "apply-good") + h := new(MockHook) + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Hooks: []Hook{h}, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + if _, diags := ctx.Apply(plan, m); diags.HasErrors() { + t.Fatalf("apply errors: %s", diags.Err()) + } + + if !h.PreApplyCalled { + t.Fatal("should be called") + } + if !h.PostApplyCalled { + t.Fatal("should be called") + } + if !h.PostStateUpdateCalled { + t.Fatalf("should call post state update") + } +} + +func TestContext2Apply_hookOrphan(t *testing.T) { + m := testModule(t, "apply-blank") + h := new(MockHook) + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.bar").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Hooks: []Hook{h}, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + assertNoErrors(t, diags) + + if _, diags := ctx.Apply(plan, m); diags.HasErrors() { + t.Fatalf("apply errors: %s", diags.Err()) + } + + if !h.PreApplyCalled { + t.Fatal("should be called") + } + if !h.PostApplyCalled { + t.Fatal("should be called") + } + if !h.PostStateUpdateCalled { + t.Fatalf("should call post state update") + } +} + +func TestContext2Apply_idAttr(t *testing.T) { + m := testModule(t, "apply-idattr") + p := testProvider("aws") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("apply errors: %s", diags.Err()) + } + + mod := state.RootModule() + rs, ok := mod.Resources["aws_instance.foo"] + if !ok { + t.Fatal("not in state") + } + var attrs map[string]interface{} + err := json.Unmarshal(rs.Instances[addrs.NoKey].Current.AttrsJSON, &attrs) + if err != nil { + t.Fatal(err) + } + if got, want := attrs["id"], "foo"; got != want { + t.Fatalf("wrong id\ngot: %#v\nwant: %#v", got, want) + } +} + +func TestContext2Apply_outputBasic(t *testing.T) { + m := testModule(t, "apply-output") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testTofuApplyOutputStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +func TestContext2Apply_outputAdd(t *testing.T) { + m1 := testModule(t, "apply-output-add-before") + p1 := testProvider("aws") + p1.ApplyResourceChangeFn = testApplyFn + p1.PlanResourceChangeFn = testDiffFn + ctx1 := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p1), + }, + }) + + plan1, diags := ctx1.Plan(m1, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state1, diags := ctx1.Apply(plan1, m1) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + m2 := testModule(t, "apply-output-add-after") + p2 := testProvider("aws") + p2.ApplyResourceChangeFn = testApplyFn + p2.PlanResourceChangeFn = testDiffFn + ctx2 := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p2), + }, + }) + + plan2, diags := ctx1.Plan(m2, state1, DefaultPlanOpts) + assertNoErrors(t, diags) + + state2, diags := ctx2.Apply(plan2, m2) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + actual := strings.TrimSpace(state2.String()) + expected := strings.TrimSpace(testTofuApplyOutputAddStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +func TestContext2Apply_outputList(t *testing.T) { + m := testModule(t, "apply-output-list") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testTofuApplyOutputListStr) + if actual != expected { + t.Fatalf("expected: \n%s\n\nbad: \n%s", expected, actual) + } +} + +func TestContext2Apply_outputMulti(t *testing.T) { + m := testModule(t, "apply-output-multi") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testTofuApplyOutputMultiStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +func TestContext2Apply_outputMultiIndex(t *testing.T) { + m := testModule(t, "apply-output-multi-index") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testTofuApplyOutputMultiIndexStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +func TestContext2Apply_taintX(t *testing.T) { + m := testModule(t, "apply-taint") + p := testProvider("aws") + // destroyCount tests against regression of + // https://github.com/hashicorp/terraform/issues/1056 + var destroyCount = int32(0) + var once sync.Once + simulateProviderDelay := func() { + time.Sleep(10 * time.Millisecond) + } + + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + once.Do(simulateProviderDelay) + if req.PlannedState.IsNull() { + atomic.AddInt32(&destroyCount, 1) + } + return testApplyFn(req) + } + p.PlanResourceChangeFn = testDiffFn + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.bar").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectTainted, + AttrsJSON: []byte(`{"id":"baz","num": "2", "type": "aws_instance"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } else { + t.Logf("plan: %s", legacyDiffComparisonString(plan.Changes)) + } + + s, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + actual := strings.TrimSpace(s.String()) + expected := strings.TrimSpace(testTofuApplyTaintStr) + if actual != expected { + t.Fatalf("bad:\n%s", actual) + } + + if destroyCount != 1 { + t.Fatalf("Expected 1 destroy, got %d", destroyCount) + } +} + +func TestContext2Apply_taintDep(t *testing.T) { + m := testModule(t, "apply-taint-dep") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectTainted, + AttrsJSON: []byte(`{"id":"baz","num": "2", "type": "aws_instance"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.bar").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar","num": "2", "type": "aws_instance", "foo": "baz"}`), + Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("aws_instance.foo")}, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } else { + t.Logf("plan: %s", legacyDiffComparisonString(plan.Changes)) + } + + s, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + actual := strings.TrimSpace(s.String()) + expected := strings.TrimSpace(testTofuApplyTaintDepStr) + if actual != expected { + t.Fatalf("bad:\n%s", actual) + } +} + +func TestContext2Apply_taintDepRequiresNew(t *testing.T) { + m := testModule(t, "apply-taint-dep-requires-new") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectTainted, + AttrsJSON: []byte(`{"id":"baz","num": "2", "type": "aws_instance"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.bar").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar","num": "2", "type": "aws_instance", "foo": "baz"}`), + Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("aws_instance.foo")}, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } else { + t.Logf("plan: %s", legacyDiffComparisonString(plan.Changes)) + } + + s, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + actual := strings.TrimSpace(s.String()) + expected := strings.TrimSpace(testTofuApplyTaintDepRequireNewStr) + if actual != expected { + t.Fatalf("bad:\n%s", actual) + } +} + +func TestContext2Apply_targeted(t *testing.T) { + m := testModule(t, "apply-targeted") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + Targets: []addrs.Targetable{ + addrs.RootModuleInstance.Resource( + addrs.ManagedResourceMode, "aws_instance", "foo", + ), + }, + }) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + mod := state.RootModule() + if len(mod.Resources) != 1 { + t.Fatalf("expected 1 resource, got: %#v", mod.Resources) + } + + checkStateString(t, state, ` +aws_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + num = 2 + type = aws_instance + `) +} + +func TestContext2Apply_targetedCount(t *testing.T) { + m := testModule(t, "apply-targeted-count") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + Targets: []addrs.Targetable{ + addrs.RootModuleInstance.Resource( + addrs.ManagedResourceMode, "aws_instance", "foo", + ), + }, + }) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + checkStateString(t, state, ` +aws_instance.foo.0: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + type = aws_instance +aws_instance.foo.1: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + type = aws_instance +aws_instance.foo.2: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + type = aws_instance + `) +} + +func TestContext2Apply_targetedCountIndex(t *testing.T) { + m := testModule(t, "apply-targeted-count") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + Targets: []addrs.Targetable{ + addrs.RootModuleInstance.ResourceInstance( + addrs.ManagedResourceMode, "aws_instance", "foo", addrs.IntKey(1), + ), + }, + }) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + checkStateString(t, state, ` +aws_instance.foo.1: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + type = aws_instance + `) +} + +func TestContext2Apply_targetedDestroy(t *testing.T) { + m := testModule(t, "destroy-targeted") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.a").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetOutputValue("out", cty.StringVal("bar"), false) + + child := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) + child.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.b").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"i-bcd345"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + if diags := ctx.Validate(m); diags.HasErrors() { + t.Fatalf("validate errors: %s", diags.Err()) + } + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + Targets: []addrs.Targetable{ + addrs.RootModuleInstance.Resource( + addrs.ManagedResourceMode, "aws_instance", "a", + ), + }, + }) + assertNoErrors(t, diags) + + state, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + mod := state.RootModule() + if len(mod.Resources) != 0 { + t.Fatalf("expected 0 resources, got: %#v", mod.Resources) + } + + // the root output should not get removed; only the targeted resource. + // + // Note: earlier versions of this test expected 0 outputs, but it turns out + // that was because Validate - not apply or destroy - removed the output + // (which depends on the targeted resource) from state. That version of this + // test did not match actual tofu behavior: the output remains in + // state. + // + // The reason it remains in the state is that we prune out the root module + // output values from the destroy graph as part of pruning out the "update" + // nodes for the resources, because otherwise the root module output values + // force the resources to stay in the graph and can therefore cause + // unwanted dependency cycles. + // + // TODO: Future refactoring may enable us to remove the output from state in + // this case, and that would be Just Fine - this test can be modified to + // expect 0 outputs. + if len(mod.OutputValues) != 1 { + t.Fatalf("expected 1 outputs, got: %#v", mod.OutputValues) + } + + // the module instance should remain + mod = state.Module(addrs.RootModuleInstance.Child("child", addrs.NoKey)) + if len(mod.Resources) != 1 { + t.Fatalf("expected 1 resources, got: %#v", mod.Resources) + } +} + +func TestContext2Apply_targetedDestroyCountDeps(t *testing.T) { + m := testModule(t, "apply-destroy-targeted-count") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"i-bcd345"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.bar").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"i-abc123"}`), + Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("aws_instance.foo")}, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + Targets: []addrs.Targetable{ + addrs.RootModuleInstance.Resource( + addrs.ManagedResourceMode, "aws_instance", "foo", + ), + }, + }) + assertNoErrors(t, diags) + + state, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + checkStateString(t, state, ``) +} + +// https://github.com/hashicorp/terraform/issues/4462 +func TestContext2Apply_targetedDestroyModule(t *testing.T) { + m := testModule(t, "apply-targeted-module") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"i-bcd345"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.bar").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"i-abc123"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + child := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) + child.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"i-bcd345"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + child.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.bar").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"i-abc123"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + Targets: []addrs.Targetable{ + addrs.RootModuleInstance.Child("child", addrs.NoKey).Resource( + addrs.ManagedResourceMode, "aws_instance", "foo", + ), + }, + }) + assertNoErrors(t, diags) + + state, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + checkStateString(t, state, ` +aws_instance.bar: + ID = i-abc123 + provider = provider["registry.opentofu.org/hashicorp/aws"] +aws_instance.foo: + ID = i-bcd345 + provider = provider["registry.opentofu.org/hashicorp/aws"] + +module.child: + aws_instance.bar: + ID = i-abc123 + provider = provider["registry.opentofu.org/hashicorp/aws"] + `) +} + +func TestContext2Apply_targetedDestroyCountIndex(t *testing.T) { + m := testModule(t, "apply-targeted-count") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + foo := &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"i-bcd345"}`), + } + bar := &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"i-abc123"}`), + } + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[0]").Resource, + foo, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[1]").Resource, + foo, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[2]").Resource, + foo, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.bar[0]").Resource, + bar, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.bar[1]").Resource, + bar, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.bar[2]").Resource, + bar, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + Targets: []addrs.Targetable{ + addrs.RootModuleInstance.ResourceInstance( + addrs.ManagedResourceMode, "aws_instance", "foo", addrs.IntKey(2), + ), + addrs.RootModuleInstance.ResourceInstance( + addrs.ManagedResourceMode, "aws_instance", "bar", addrs.IntKey(1), + ), + }, + }) + assertNoErrors(t, diags) + + state, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + checkStateString(t, state, ` +aws_instance.bar.0: + ID = i-abc123 + provider = provider["registry.opentofu.org/hashicorp/aws"] +aws_instance.bar.2: + ID = i-abc123 + provider = provider["registry.opentofu.org/hashicorp/aws"] +aws_instance.foo.0: + ID = i-bcd345 + provider = provider["registry.opentofu.org/hashicorp/aws"] +aws_instance.foo.1: + ID = i-bcd345 + provider = provider["registry.opentofu.org/hashicorp/aws"] + `) +} + +func TestContext2Apply_targetedModule(t *testing.T) { + m := testModule(t, "apply-targeted-module") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + Targets: []addrs.Targetable{ + addrs.RootModuleInstance.Child("child", addrs.NoKey), + }, + }) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + mod := state.Module(addrs.RootModuleInstance.Child("child", addrs.NoKey)) + if mod == nil { + t.Fatalf("no child module found in the state!\n\n%#v", state) + } + if len(mod.Resources) != 2 { + t.Fatalf("expected 2 resources, got: %#v", mod.Resources) + } + + checkStateString(t, state, ` + +module.child: + aws_instance.bar: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + num = 2 + type = aws_instance + aws_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + num = 2 + type = aws_instance + `) +} + +// GH-1858 +func TestContext2Apply_targetedModuleDep(t *testing.T) { + m := testModule(t, "apply-targeted-module-dep") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + Targets: []addrs.Targetable{ + addrs.RootModuleInstance.Resource( + addrs.ManagedResourceMode, "aws_instance", "foo", + ), + }, + }) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } else { + t.Logf("Diff: %s", legacyDiffComparisonString(plan.Changes)) + } + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + checkStateString(t, state, ` +aws_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = foo + type = aws_instance + + Dependencies: + module.child.aws_instance.mod + +module.child: + aws_instance.mod: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + type = aws_instance + + Outputs: + + output = foo + `) +} + +// GH-10911 untargeted outputs should not be in the graph, and therefore +// not execute. +func TestContext2Apply_targetedModuleUnrelatedOutputs(t *testing.T) { + m := testModule(t, "apply-targeted-module-unrelated-outputs") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + + state := states.NewState() + _ = state.EnsureModule(addrs.RootModuleInstance.Child("child2", addrs.NoKey)) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + Targets: []addrs.Targetable{ + addrs.RootModuleInstance.Child("child2", addrs.NoKey), + }, + }) + assertNoErrors(t, diags) + + s, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + // - module.child1's instance_id output is dropped because we don't preserve + // non-root module outputs between runs (they can be recalculated from config) + // - module.child2's instance_id is updated because its dependency is updated + // - child2_id is updated because if its transitive dependency via module.child2 + checkStateString(t, s, ` + +Outputs: + +child2_id = foo + +module.child2: + aws_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + type = aws_instance + + Outputs: + + instance_id = foo +`) +} + +func TestContext2Apply_targetedModuleResource(t *testing.T) { + m := testModule(t, "apply-targeted-module-resource") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + Targets: []addrs.Targetable{ + addrs.RootModuleInstance.Child("child", addrs.NoKey).Resource( + addrs.ManagedResourceMode, "aws_instance", "foo", + ), + }, + }) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + mod := state.Module(addrs.RootModuleInstance.Child("child", addrs.NoKey)) + if mod == nil || len(mod.Resources) != 1 { + t.Fatalf("expected 1 resource, got: %#v", mod) + } + + checkStateString(t, state, ` + +module.child: + aws_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + num = 2 + type = aws_instance + `) +} + +func TestContext2Apply_targetedResourceOrphanModule(t *testing.T) { + m := testModule(t, "apply-targeted-resource-orphan-module") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + state := states.NewState() + child := state.EnsureModule(addrs.RootModuleInstance.Child("parent", addrs.NoKey)) + child.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.bar").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"type":"aws_instance"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + Targets: []addrs.Targetable{ + addrs.RootModuleInstance.Resource( + addrs.ManagedResourceMode, "aws_instance", "foo", + ), + }, + }) + assertNoErrors(t, diags) + + if _, diags := ctx.Apply(plan, m); diags.HasErrors() { + t.Fatalf("apply errors: %s", diags.Err()) + } +} + +func TestContext2Apply_unknownAttribute(t *testing.T) { + m := testModule(t, "apply-unknown") + p := testProvider("aws") + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + resp = testDiffFn(req) + planned := resp.PlannedState.AsValueMap() + planned["unknown"] = cty.UnknownVal(cty.String) + resp.PlannedState = cty.ObjectVal(planned) + return resp + } + p.ApplyResourceChangeFn = testApplyFn + + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "num": {Type: cty.Number, Optional: true}, + "unknown": {Type: cty.String, Computed: true}, + "type": {Type: cty.String, Computed: true}, + }, + }, + }, + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if !diags.HasErrors() { + t.Error("should error, because attribute 'unknown' is still unknown after apply") + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testTofuApplyUnknownAttrStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +func TestContext2Apply_unknownAttributeInterpolate(t *testing.T) { + m := testModule(t, "apply-unknown-interpolate") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + if _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts); diags == nil { + t.Fatal("should error") + } +} + +func TestContext2Apply_vars(t *testing.T) { + fixture := contextFixtureApplyVars(t) + opts := fixture.ContextOpts() + ctx := testContext2(t, opts) + m := fixture.Config + + diags := ctx.Validate(m) + if len(diags) != 0 { + t.Fatalf("bad: %s", diags.ErrWithWarnings()) + } + + variables := InputValues{ + "foo": &InputValue{ + Value: cty.StringVal("us-east-1"), + SourceType: ValueFromCaller, + }, + "bar": &InputValue{ + // This one is not explicitly set but that's okay because it + // has a declared default, which OpenTofu Core will use instead. + Value: cty.NilVal, + SourceType: ValueFromCaller, + }, + "test_list": &InputValue{ + Value: cty.ListVal([]cty.Value{ + cty.StringVal("Hello"), + cty.StringVal("World"), + }), + SourceType: ValueFromCaller, + }, + "test_map": &InputValue{ + Value: cty.MapVal(map[string]cty.Value{ + "Hello": cty.StringVal("World"), + "Foo": cty.StringVal("Bar"), + "Baz": cty.StringVal("Foo"), + }), + SourceType: ValueFromCaller, + }, + "amis": &InputValue{ + Value: cty.MapVal(map[string]cty.Value{ + "us-east-1": cty.StringVal("override"), + }), + SourceType: ValueFromCaller, + }, + } + + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: variables, + }) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("err: %s", diags.Err()) + } + + got := strings.TrimSpace(state.String()) + want := strings.TrimSpace(testTofuApplyVarsStr) + if got != want { + t.Errorf("wrong result\n\ngot:\n%s\n\nwant:\n%s", got, want) + } +} + +func TestContext2Apply_varsEnv(t *testing.T) { + fixture := contextFixtureApplyVarsEnv(t) + opts := fixture.ContextOpts() + ctx := testContext2(t, opts) + m := fixture.Config + + diags := ctx.Validate(m) + if len(diags) != 0 { + t.Fatalf("bad: %s", diags.ErrWithWarnings()) + } + + variables := InputValues{ + "string": &InputValue{ + Value: cty.StringVal("baz"), + SourceType: ValueFromEnvVar, + }, + "list": &InputValue{ + Value: cty.ListVal([]cty.Value{ + cty.StringVal("Hello"), + cty.StringVal("World"), + }), + SourceType: ValueFromEnvVar, + }, + "map": &InputValue{ + Value: cty.MapVal(map[string]cty.Value{ + "Hello": cty.StringVal("World"), + "Foo": cty.StringVal("Bar"), + "Baz": cty.StringVal("Foo"), + }), + SourceType: ValueFromEnvVar, + }, + } + + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: variables, + }) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("err: %s", diags.Err()) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testTofuApplyVarsEnvStr) + if actual != expected { + t.Errorf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +func TestContext2Apply_createBefore_depends(t *testing.T) { + m := testModule(t, "apply-depends-create-before") + h := new(HookRecordApplyOrder) + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "web", + }.Instance(addrs.NoKey), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar","require_new":"ami-old"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("aws"), + Module: addrs.RootModule, + }, + ) + + root.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "lb", + }.Instance(addrs.NoKey), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"baz","instance":"bar"}`), + Dependencies: []addrs.ConfigResource{ + { + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "web", + }, + Module: addrs.RootModule, + }, + }, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("aws"), + Module: addrs.RootModule, + }, + ) + + ctx := testContext2(t, &ContextOpts{ + Hooks: []Hook{h}, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + if diags.HasErrors() { + logDiagnostics(t, diags) + t.Fatal("plan failed") + } else { + t.Logf("plan:\n%s", legacyDiffComparisonString(plan.Changes)) + } + + h.Active = true + state, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + logDiagnostics(t, diags) + t.Fatal("apply failed") + } + + mod := state.RootModule() + if len(mod.Resources) < 2 { + t.Logf("state after apply:\n%s", state.String()) + t.Fatalf("only %d resources in root module; want at least 2", len(mod.Resources)) + } + + got := strings.TrimSpace(state.String()) + want := strings.TrimSpace(testTofuApplyDependsCreateBeforeStr) + if got != want { + t.Fatalf("wrong final state\ngot:\n%s\n\nwant:\n%s", got, want) + } + + // Test that things were managed _in the right order_ + order := h.States + + diffs := h.Diffs + if !order[0].IsNull() || diffs[0].Action == plans.Delete { + t.Fatalf("should create new instance first: %#v", order) + } + + if order[1].GetAttr("id").AsString() != "baz" { + t.Fatalf("update must happen after create: %#v", order[1]) + } + + if order[2].GetAttr("id").AsString() != "bar" || diffs[2].Action != plans.Delete { + t.Fatalf("destroy must happen after update: %#v", order[2]) + } +} + +func TestContext2Apply_singleDestroy(t *testing.T) { + m := testModule(t, "apply-depends-create-before") + h := new(HookRecordApplyOrder) + p := testProvider("aws") + invokeCount := 0 + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + invokeCount++ + switch invokeCount { + case 1: + if req.PlannedState.IsNull() { + t.Fatalf("should not destroy") + } + if id := req.PlannedState.GetAttr("id"); id.IsKnown() { + t.Fatalf("should not have ID") + } + case 2: + if req.PlannedState.IsNull() { + t.Fatalf("should not destroy") + } + if id := req.PlannedState.GetAttr("id"); id.AsString() != "baz" { + t.Fatalf("should have id") + } + case 3: + if !req.PlannedState.IsNull() { + t.Fatalf("should destroy") + } + default: + t.Fatalf("bad invoke count %d", invokeCount) + } + return testApplyFn(req) + } + + p.PlanResourceChangeFn = testDiffFn + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "web", + }.Instance(addrs.NoKey), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar","require_new":"ami-old"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("aws"), + Module: addrs.RootModule, + }, + ) + + root.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "lb", + }.Instance(addrs.NoKey), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"baz","instance":"bar"}`), + Dependencies: []addrs.ConfigResource{ + { + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "web", + }, + Module: addrs.RootModule, + }, + }, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("aws"), + Module: addrs.RootModule, + }, + ) + + ctx := testContext2(t, &ContextOpts{ + Hooks: []Hook{h}, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + assertNoErrors(t, diags) + + h.Active = true + _, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + if invokeCount != 3 { + t.Fatalf("bad: %d", invokeCount) + } +} + +// GH-7824 +func TestContext2Apply_issue7824(t *testing.T) { + p := testProvider("template") + p.PlanResourceChangeFn = testDiffFn + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "template_file": { + Attributes: map[string]*configschema.Attribute{ + "template": {Type: cty.String, Optional: true}, + "__template_requires_new": {Type: cty.Bool, Optional: true}, + }, + }, + }, + }) + + m, snap := testModuleWithSnapshot(t, "issue-7824") + + // Apply cleanly step 0 + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("template"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) + if diags.HasErrors() { + t.Fatalf("err: %s", diags.Err()) + } + + // Write / Read plan to simulate running it through a Plan file + ctxOpts, m, plan, err := contextOptsForPlanViaFile(t, snap, plan) + if err != nil { + t.Fatalf("failed to round-trip through planfile: %s", err) + } + + ctxOpts.Providers = + map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("template"): testProviderFuncFixed(p), + } + + ctx, diags = NewContext(ctxOpts) + if diags.HasErrors() { + t.Fatalf("err: %s", diags.Err()) + } + + _, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("err: %s", diags.Err()) + } +} + +// This deals with the situation where a splat expression is used referring +// to another resource whose count is non-constant. +func TestContext2Apply_issue5254(t *testing.T) { + // Create a provider. We use "template" here just to match the repro + // we got from the issue itself. + p := testProvider("template") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "template_file": { + Attributes: map[string]*configschema.Attribute{ + "template": {Type: cty.String, Optional: true}, + "__template_requires_new": {Type: cty.Bool, Optional: true}, + "id": {Type: cty.String, Computed: true}, + "type": {Type: cty.String, Computed: true}, + }, + }, + }, + }) + + // Apply cleanly step 0 + m := testModule(t, "issue-5254/step-0") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("template"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) + if diags.HasErrors() { + t.Fatalf("err: %s", diags.Err()) + } + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("err: %s", diags.Err()) + } + + m, snap := testModuleWithSnapshot(t, "issue-5254/step-1") + + // Application success. Now make the modification and store a plan + ctx = testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("template"): testProviderFuncFixed(p), + }, + }) + + plan, diags = ctx.Plan(m, state, SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) + if diags.HasErrors() { + t.Fatalf("err: %s", diags.Err()) + } + + // Write / Read plan to simulate running it through a Plan file + ctxOpts, m, plan, err := contextOptsForPlanViaFile(t, snap, plan) + if err != nil { + t.Fatalf("failed to round-trip through planfile: %s", err) + } + + ctxOpts.Providers = map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("template"): testProviderFuncFixed(p), + } + + ctx, diags = NewContext(ctxOpts) + if diags.HasErrors() { + t.Fatalf("err: %s", diags.Err()) + } + + state, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("err: %s", diags.Err()) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(` +template_file.child: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/template"] + __template_requires_new = true + template = Hi + type = template_file + + Dependencies: + template_file.parent +template_file.parent.0: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/template"] + template = Hi + type = template_file +`) + if actual != expected { + t.Fatalf("wrong final state\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +func TestContext2Apply_targetedWithTaintedInState(t *testing.T) { + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + m, snap := testModuleWithSnapshot(t, "apply-tainted-targets") + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.ifailedprovisioners").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectTainted, + AttrsJSON: []byte(`{"id":"ifailedprovisioners"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + Targets: []addrs.Targetable{ + addrs.RootModuleInstance.Resource( + addrs.ManagedResourceMode, "aws_instance", "iambeingadded", + ), + }, + }) + if diags.HasErrors() { + t.Fatalf("err: %s", diags.Err()) + } + + // Write / Read plan to simulate running it through a Plan file + ctxOpts, m, plan, err := contextOptsForPlanViaFile(t, snap, plan) + if err != nil { + t.Fatalf("failed to round-trip through planfile: %s", err) + } + + ctxOpts.Providers = map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + } + + ctx, diags = NewContext(ctxOpts) + if diags.HasErrors() { + t.Fatalf("err: %s", diags.Err()) + } + + s, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("err: %s", diags.Err()) + } + + actual := strings.TrimSpace(s.String()) + expected := strings.TrimSpace(` +aws_instance.iambeingadded: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + type = aws_instance +aws_instance.ifailedprovisioners: (tainted) + ID = ifailedprovisioners + provider = provider["registry.opentofu.org/hashicorp/aws"] + `) + if actual != expected { + t.Fatalf("expected state: \n%s\ngot: \n%s", expected, actual) + } +} + +// Higher level test exposing the bug this covers in +// TestResource_ignoreChangesRequired +func TestContext2Apply_ignoreChangesCreate(t *testing.T) { + m := testModule(t, "apply-ignore-changes-create") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + + instanceSchema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + instanceSchema.Attributes["required_field"] = &configschema.Attribute{ + Type: cty.String, + Required: true, + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } else { + t.Logf(legacyDiffComparisonString(plan.Changes)) + } + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + mod := state.RootModule() + if len(mod.Resources) != 1 { + t.Fatalf("bad: %s", state) + } + + actual := strings.TrimSpace(state.String()) + // Expect no changes from original state + expected := strings.TrimSpace(` +aws_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + required_field = set + type = aws_instance +`) + if actual != expected { + t.Fatalf("expected:\n%s\ngot:\n%s", expected, actual) + } +} + +func TestContext2Apply_ignoreChangesWithDep(t *testing.T) { + m := testModule(t, "apply-ignore-changes-dep") + p := testProvider("aws") + + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + resp.PlannedState = req.ProposedNewState + + switch req.TypeName { + case "aws_instance": + resp.RequiresReplace = append(resp.RequiresReplace, cty.Path{cty.GetAttrStep{Name: "ami"}}) + case "aws_eip": + return testDiffFn(req) + default: + t.Fatalf("Unexpected type: %s", req.TypeName) + } + return + } + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[0]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"i-abc123","ami":"ami-abcd1234"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[1]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"i-bcd234","ami":"i-bcd234"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_eip.foo[0]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"eip-abc123","instance":"i-abc123"}`), + Dependencies: []addrs.ConfigResource{ + { + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + }, + Module: addrs.RootModule, + }, + }, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_eip.foo[1]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"eip-bcd234","instance":"i-bcd234"}`), + Dependencies: []addrs.ConfigResource{ + { + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + }, + Module: addrs.RootModule, + }, + }, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state.DeepCopy(), DefaultPlanOpts) + assertNoErrors(t, diags) + + s, diags := ctx.Apply(plan, m) + assertNoErrors(t, diags) + + actual := strings.TrimSpace(s.String()) + expected := strings.TrimSpace(state.String()) + if actual != expected { + t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual) + } +} + +func TestContext2Apply_ignoreChangesAll(t *testing.T) { + m := testModule(t, "apply-ignore-changes-all") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + + instanceSchema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + instanceSchema.Attributes["required_field"] = &configschema.Attribute{ + Type: cty.String, + Required: true, + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + logDiagnostics(t, diags) + t.Fatal("plan failed") + } else { + t.Logf(legacyDiffComparisonString(plan.Changes)) + } + + state, diags := ctx.Apply(plan, m) + assertNoErrors(t, diags) + + mod := state.RootModule() + if len(mod.Resources) != 1 { + t.Fatalf("bad: %s", state) + } + + actual := strings.TrimSpace(state.String()) + // Expect no changes from original state + expected := strings.TrimSpace(` +aws_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + required_field = set + type = aws_instance +`) + if actual != expected { + t.Fatalf("expected:\n%s\ngot:\n%s", expected, actual) + } +} + +// https://github.com/hashicorp/terraform/issues/7378 +func TestContext2Apply_destroyNestedModuleWithAttrsReferencingResource(t *testing.T) { + m, snap := testModuleWithSnapshot(t, "apply-destroy-nested-module-with-attrs") + p := testProvider("null") + p.PlanResourceChangeFn = testDiffFn + + var state *states.State + { + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("null"): testProviderFuncFixed(p), + }, + }) + + // First plan and apply a create operation + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("apply err: %s", diags.Err()) + } + } + + { + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("null"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + }) + if diags.HasErrors() { + t.Fatalf("destroy plan err: %s", diags.Err()) + } + + ctxOpts, m, plan, err := contextOptsForPlanViaFile(t, snap, plan) + if err != nil { + t.Fatalf("failed to round-trip through planfile: %s", err) + } + + ctxOpts.Providers = map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("null"): testProviderFuncFixed(p), + } + + ctx, diags = NewContext(ctxOpts) + if diags.HasErrors() { + t.Fatalf("err: %s", diags.Err()) + } + + state, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("destroy apply err: %s", diags.Err()) + } + } + + if !state.Empty() { + t.Fatalf("state after apply: %s\nwant empty state", spew.Sdump(state)) + } +} + +// If a data source explicitly depends on another resource, it's because we need +// that resource to be applied first. +func TestContext2Apply_dataDependsOn(t *testing.T) { + p := testProvider("null") + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "null_instance" "write" { + foo = "attribute" +} + +data "null_data_source" "read" { + count = 1 + depends_on = ["null_instance.write"] +} + +resource "null_instance" "depends" { + foo = data.null_data_source.read[0].foo +} +`}) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("null"): testProviderFuncFixed(p), + }, + }) + + // the "provisioner" here writes to this variable, because the intent is to + // create a dependency which can't be viewed through the graph, and depends + // solely on the configuration providing "depends_on" + provisionerOutput := "" + + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + // the side effect of the resource being applied + provisionerOutput = "APPLIED" + return testApplyFn(req) + } + + p.PlanResourceChangeFn = testDiffFn + p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + return providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("boop"), + "foo": cty.StringVal(provisionerOutput), + }), + } + } + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + assertNoErrors(t, diags) + + root := state.Module(addrs.RootModuleInstance) + is := root.ResourceInstance(addrs.Resource{ + Mode: addrs.DataResourceMode, + Type: "null_data_source", + Name: "read", + }.Instance(addrs.IntKey(0))) + if is == nil { + t.Fatal("data resource instance is not present in state; should be") + } + var attrs map[string]interface{} + err := json.Unmarshal(is.Current.AttrsJSON, &attrs) + if err != nil { + t.Fatal(err) + } + actual := attrs["foo"] + expected := "APPLIED" + if actual != expected { + t.Fatalf("bad:\n%s", strings.TrimSpace(state.String())) + } + + // run another plan to make sure the data source doesn't show as a change + plan, diags = ctx.Plan(m, state, DefaultPlanOpts) + assertNoErrors(t, diags) + + for _, c := range plan.Changes.Resources { + if c.Action != plans.NoOp { + t.Fatalf("unexpected change for %s", c.Addr) + } + } + + // now we cause a change in the first resource, which should trigger a plan + // in the data source, and the resource that depends on the data source + // must plan a change as well. + m = testModuleInline(t, map[string]string{ + "main.tf": ` +resource "null_instance" "write" { + foo = "new" +} + +data "null_data_source" "read" { + depends_on = ["null_instance.write"] +} + +resource "null_instance" "depends" { + foo = data.null_data_source.read.foo +} +`}) + + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + // the side effect of the resource being applied + provisionerOutput = "APPLIED_AGAIN" + return testApplyFn(req) + } + + ctx = testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("null"): testProviderFuncFixed(p), + }, + }) + + plan, diags = ctx.Plan(m, state, DefaultPlanOpts) + assertNoErrors(t, diags) + + expectedChanges := map[string]plans.Action{ + "null_instance.write": plans.Update, + "data.null_data_source.read": plans.Read, + "null_instance.depends": plans.Update, + } + + for _, c := range plan.Changes.Resources { + if c.Action != expectedChanges[c.Addr.String()] { + t.Errorf("unexpected %s for %s", c.Action, c.Addr) + } + } +} + +func TestContext2Apply_tfWorkspace(t *testing.T) { + m := testModule(t, "apply-tf-workspace") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + ctx := testContext2(t, &ContextOpts{ + Meta: &ContextMeta{Env: "foo"}, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + actual := state.RootModule().OutputValues["output"] + expected := cty.StringVal("foo") + if actual == nil || actual.Value != expected { + t.Fatalf("wrong value\ngot: %#v\nwant: %#v", actual.Value, expected) + } +} + +func TestContext2Apply_tofuWorkspace(t *testing.T) { + m := testModule(t, "apply-tofu-workspace") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + ctx := testContext2(t, &ContextOpts{ + Meta: &ContextMeta{Env: "foo"}, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + actual := state.RootModule().OutputValues["output"] + expected := cty.StringVal("foo") + if actual == nil || actual.Value != expected { + t.Fatalf("wrong value\ngot: %#v\nwant: %#v", actual.Value, expected) + } +} + +// verify that multiple config references only create a single depends_on entry +func TestContext2Apply_multiRef(t *testing.T) { + m := testModule(t, "apply-multi-ref") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("err: %s", diags.Err()) + } + + deps := state.Modules[""].Resources["aws_instance.other"].Instances[addrs.NoKey].Current.Dependencies + if len(deps) != 1 || deps[0].String() != "aws_instance.create" { + t.Fatalf("expected 1 depends_on entry for aws_instance.create, got %q", deps) + } +} + +func TestContext2Apply_targetedModuleRecursive(t *testing.T) { + m := testModule(t, "apply-targeted-module-recursive") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + Targets: []addrs.Targetable{ + addrs.RootModuleInstance.Child("child", addrs.NoKey), + }, + }) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("err: %s", diags.Err()) + } + + mod := state.Module( + addrs.RootModuleInstance.Child("child", addrs.NoKey).Child("subchild", addrs.NoKey), + ) + if mod == nil { + t.Fatalf("no subchild module found in the state!\n\n%#v", state) + } + if len(mod.Resources) != 1 { + t.Fatalf("expected 1 resources, got: %#v", mod.Resources) + } + + checkStateString(t, state, ` + +module.child.subchild: + aws_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + num = 2 + type = aws_instance + `) +} + +func TestContext2Apply_localVal(t *testing.T) { + m := testModule(t, "apply-local-val") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{}, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("error during apply: %s", diags.Err()) + } + + got := strings.TrimSpace(state.String()) + want := strings.TrimSpace(` + +Outputs: + +result_1 = hello +result_3 = hello world +`) + if got != want { + t.Fatalf("wrong final state\ngot:\n%s\nwant:\n%s", got, want) + } +} + +func TestContext2Apply_destroyWithLocals(t *testing.T) { + m := testModule(t, "apply-destroy-with-locals") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetOutputValue("name", cty.StringVal("test-bar"), false) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + }) + assertNoErrors(t, diags) + + s, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("error during apply: %s", diags.Err()) + } + + got := strings.TrimSpace(s.String()) + want := strings.TrimSpace(``) + if got != want { + t.Fatalf("wrong final state\ngot:\n%s\nwant:\n%s", got, want) + } +} + +func TestContext2Apply_providerWithLocals(t *testing.T) { + m := testModule(t, "provider-with-locals") + p := testProvider("aws") + + providerRegion := "" + // this should not be overridden during destroy + p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { + val := req.Config.GetAttr("region") + if !val.IsNull() { + providerRegion = val.AsString() + } + + return + } + + p.PlanResourceChangeFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("err: %s", diags.Err()) + } + + ctx = testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags = ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + }) + assertNoErrors(t, diags) + + state, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("err: %s", diags.Err()) + } + + if state.HasManagedResourceInstanceObjects() { + t.Fatal("expected no state, got:", state) + } + + if providerRegion != "bar" { + t.Fatalf("expected region %q, got: %q", "bar", providerRegion) + } +} + +func TestContext2Apply_destroyWithProviders(t *testing.T) { + m := testModule(t, "destroy-module-with-provider") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + state := states.NewState() + removed := state.EnsureModule(addrs.RootModuleInstance.Child("mod", addrs.NoKey).Child("removed", addrs.NoKey)) + removed.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.child").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"].baz`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + // test that we can't destroy if the provider is missing + if _, diags := ctx.Plan(m, state, &PlanOpts{Mode: plans.DestroyMode}); diags == nil { + t.Fatal("expected plan error, provider.aws.baz doesn't exist") + } + + // correct the state + state.Modules["module.mod.module.removed"].Resources["aws_instance.child"].ProviderConfig = mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"].bar`) + + ctx = testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + }) + assertNoErrors(t, diags) + + state, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("error during apply: %s", diags.Err()) + } + + got := strings.TrimSpace(state.String()) + + want := strings.TrimSpace("") + if got != want { + t.Fatalf("wrong final state\ngot:\n%s\nwant:\n%s", got, want) + } +} + +func TestContext2Apply_providersFromState(t *testing.T) { + m := configs.NewEmptyConfig() + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + implicitProviderState := states.NewState() + impRoot := implicitProviderState.EnsureModule(addrs.RootModuleInstance) + impRoot.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.a").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + aliasedProviderState := states.NewState() + aliasRoot := aliasedProviderState.EnsureModule(addrs.RootModuleInstance) + aliasRoot.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.a").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"].bar`), + ) + + moduleProviderState := states.NewState() + moduleProviderRoot := moduleProviderState.EnsureModule(addrs.RootModuleInstance) + moduleProviderRoot.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.a").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar"}`), + }, + mustProviderConfig(`module.child.provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + for _, tc := range []struct { + name string + state *states.State + output string + err bool + }{ + { + name: "add implicit provider", + state: implicitProviderState, + err: false, + output: "", + }, + + // an aliased provider must be in the config to remove a resource + { + name: "add aliased provider", + state: aliasedProviderState, + err: true, + }, + + // a provider in a module implies some sort of config, so this isn't + // allowed even without an alias + { + name: "add unaliased module provider", + state: moduleProviderState, + err: true, + }, + } { + t.Run(tc.name, func(t *testing.T) { + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, tc.state, DefaultPlanOpts) + if tc.err { + if diags == nil { + t.Fatal("expected error") + } else { + return + } + } + if !tc.err && diags.HasErrors() { + t.Fatal(diags.Err()) + } + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + checkStateString(t, state, "") + + }) + } +} + +func TestContext2Apply_plannedInterpolatedCount(t *testing.T) { + m, snap := testModuleWithSnapshot(t, "apply-interpolated-count") + + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + Providers := map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + } + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.test").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: Providers, + }) + + plan, diags := ctx.Plan(m, state, SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) + if diags.HasErrors() { + t.Fatalf("plan failed: %s", diags.Err()) + } + + // We'll marshal and unmarshal the plan here, to ensure that we have + // a clean new context as would be created if we separately ran + // tofu plan -out=tfplan && tofu apply tfplan + ctxOpts, m, plan, err := contextOptsForPlanViaFile(t, snap, plan) + if err != nil { + t.Fatalf("failed to round-trip through planfile: %s", err) + } + + ctxOpts.Providers = Providers + ctx, diags = NewContext(ctxOpts) + if diags.HasErrors() { + t.Fatalf("err: %s", diags.Err()) + } + + // Applying the plan should now succeed + _, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("apply failed: %s", diags.Err()) + } +} + +func TestContext2Apply_plannedDestroyInterpolatedCount(t *testing.T) { + m, snap := testModuleWithSnapshot(t, "plan-destroy-interpolated-count") + + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + providers := map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + } + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.a[0]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.a[1]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetOutputValue("out", cty.ListVal([]cty.Value{cty.StringVal("foo"), cty.StringVal("foo")}), false) + + ctx := testContext2(t, &ContextOpts{ + Providers: providers, + }) + + plan, diags := ctx.Plan(m, state, SimplePlanOpts(plans.DestroyMode, testInputValuesUnset(m.Module.Variables))) + if diags.HasErrors() { + t.Fatalf("plan failed: %s", diags.Err()) + } + + // We'll marshal and unmarshal the plan here, to ensure that we have + // a clean new context as would be created if we separately ran + // tofu plan -out=tfplan && tofu apply tfplan + ctxOpts, m, plan, err := contextOptsForPlanViaFile(t, snap, plan) + if err != nil { + t.Fatalf("failed to round-trip through planfile: %s", err) + } + + ctxOpts.Providers = providers + ctx, diags = NewContext(ctxOpts) + if diags.HasErrors() { + t.Fatalf("err: %s", diags.Err()) + } + + // Applying the plan should now succeed + state, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("apply failed: %s", diags.Err()) + } + if !state.Empty() { + t.Fatalf("state not empty: %s\n", state) + } +} + +func TestContext2Apply_scaleInMultivarRef(t *testing.T) { + m := testModule(t, "apply-resource-scale-in") + + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + Providers := map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + } + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.one").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.two").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: Providers, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "instance_count": { + Value: cty.NumberIntVal(0), + SourceType: ValueFromCaller, + }, + }, + }) + assertNoErrors(t, diags) + { + addr := mustResourceInstanceAddr("aws_instance.one[0]") + change := plan.Changes.ResourceInstance(addr) + if change == nil { + t.Fatalf("no planned change for %s", addr) + } + // This test was originally written with Terraform v0.11 and earlier + // in mind, so it declares a no-key instance of aws_instance.one, + // but its configuration sets count (to zero) and so we end up first + // moving the no-key instance to the zero key and then planning to + // destroy the zero key. + if got, want := change.PrevRunAddr, mustResourceInstanceAddr("aws_instance.one"); !want.Equal(got) { + t.Errorf("wrong previous run address for %s %s; want %s", addr, got, want) + } + if got, want := change.Action, plans.Delete; got != want { + t.Errorf("wrong action for %s %s; want %s", addr, got, want) + } + if got, want := change.ActionReason, plans.ResourceInstanceDeleteBecauseCountIndex; got != want { + t.Errorf("wrong action reason for %s %s; want %s", addr, got, want) + } + } + { + addr := mustResourceInstanceAddr("aws_instance.two") + change := plan.Changes.ResourceInstance(addr) + if change == nil { + t.Fatalf("no planned change for %s", addr) + } + if got, want := change.PrevRunAddr, mustResourceInstanceAddr("aws_instance.two"); !want.Equal(got) { + t.Errorf("wrong previous run address for %s %s; want %s", addr, got, want) + } + if got, want := change.Action, plans.Update; got != want { + t.Errorf("wrong action for %s %s; want %s", addr, got, want) + } + if got, want := change.ActionReason, plans.ResourceInstanceChangeNoReason; got != want { + t.Errorf("wrong action reason for %s %s; want %s", addr, got, want) + } + } + + // Applying the plan should now succeed + _, diags = ctx.Apply(plan, m) + assertNoErrors(t, diags) +} + +func TestContext2Apply_inconsistentWithPlan(t *testing.T) { + m := testModule(t, "apply-inconsistent-with-plan") + p := testProvider("test") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + }, + }) + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("before"), + }), + } + } + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + return providers.ApplyResourceChangeResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + // This is intentionally incorrect: because id was fixed at "before" + // during plan, it must not change during apply. + "id": cty.StringVal("after"), + }), + } + } + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + _, diags = ctx.Apply(plan, m) + if !diags.HasErrors() { + t.Fatalf("apply succeeded; want error") + } + if got, want := diags.Err().Error(), "Provider produced inconsistent result after apply"; !strings.Contains(got, want) { + t.Fatalf("wrong error\ngot: %s\nshould contain: %s", got, want) + } +} + +// Issue 19908 was about retaining an existing object in the state when an +// update to it fails and the provider does not return a partially-updated +// value for it. Previously we were incorrectly removing it from the state +// in that case, but instead it should be retained so the update can be +// retried. +func TestContext2Apply_issue19908(t *testing.T) { + m := testModule(t, "apply-issue19908") + p := testProvider("test") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test": { + Attributes: map[string]*configschema.Attribute{ + "baz": {Type: cty.String, Required: true}, + }, + }, + }, + }) + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + var diags tfdiags.Diagnostics + diags = diags.Append(fmt.Errorf("update failed")) + return providers.ApplyResourceChangeResponse{ + Diagnostics: diags, + } + } + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"baz":"old"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags = ctx.Apply(plan, m) + if !diags.HasErrors() { + t.Fatalf("apply succeeded; want error") + } + if got, want := diags.Err().Error(), "update failed"; !strings.Contains(got, want) { + t.Fatalf("wrong error\ngot: %s\nshould contain: %s", got, want) + } + + mod := state.RootModule() + rs := mod.Resources["test.foo"] + if rs == nil { + t.Fatalf("test.foo not in state after apply, but should be") + } + is := rs.Instances[addrs.NoKey] + if is == nil { + t.Fatalf("test.foo not in state after apply, but should be") + } + obj := is.Current + if obj == nil { + t.Fatalf("test.foo has no current object in state after apply, but should do") + } + + if got, want := obj.Status, states.ObjectReady; got != want { + t.Errorf("test.foo has wrong status %s after apply; want %s", got, want) + } + if got, want := obj.AttrsJSON, []byte(`"old"`); !bytes.Contains(got, want) { + t.Errorf("test.foo attributes JSON doesn't contain %s after apply\ngot: %s", want, got) + } +} + +func TestContext2Apply_invalidIndexRef(t *testing.T) { + p := testProvider("test") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_instance": { + Attributes: map[string]*configschema.Attribute{ + "value": {Type: cty.String, Optional: true, Computed: true}, + }, + }, + }, + }) + p.PlanResourceChangeFn = testDiffFn + + m := testModule(t, "apply-invalid-index") + c := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + diags := c.Validate(m) + if diags.HasErrors() { + t.Fatalf("unexpected validation failure: %s", diags.Err()) + } + + wantErr := `The given key does not identify an element in this collection value` + _, diags = c.Plan(m, states.NewState(), DefaultPlanOpts) + + if !diags.HasErrors() { + t.Fatalf("plan succeeded; want error") + } + gotErr := diags.Err().Error() + + if !strings.Contains(gotErr, wantErr) { + t.Fatalf("missing expected error\ngot: %s\n\nwant: error containing %q", gotErr, wantErr) + } +} + +func TestContext2Apply_moduleReplaceCycle(t *testing.T) { + for _, mode := range []string{"normal", "cbd"} { + var m *configs.Config + + switch mode { + case "normal": + m = testModule(t, "apply-module-replace-cycle") + case "cbd": + m = testModule(t, "apply-module-replace-cycle-cbd") + } + + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + instanceSchema := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "require_new": {Type: cty.String, Optional: true}, + }, + } + + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": instanceSchema, + }, + }) + + state := states.NewState() + modA := state.EnsureModule(addrs.RootModuleInstance.Child("a", addrs.NoKey)) + modA.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "a", + }.Instance(addrs.NoKey), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"a","require_new":"old"}`), + CreateBeforeDestroy: mode == "cbd", + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("aws"), + Module: addrs.RootModule, + }, + ) + + modB := state.EnsureModule(addrs.RootModuleInstance.Child("b", addrs.NoKey)) + modB.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "b", + }.Instance(addrs.IntKey(0)), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"b","require_new":"old"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("aws"), + Module: addrs.RootModule, + }, + ) + + aBefore, _ := plans.NewDynamicValue( + cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("a"), + "require_new": cty.StringVal("old"), + }), instanceSchema.ImpliedType()) + aAfter, _ := plans.NewDynamicValue( + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "require_new": cty.StringVal("new"), + }), instanceSchema.ImpliedType()) + bBefore, _ := plans.NewDynamicValue( + cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("b"), + "require_new": cty.StringVal("old"), + }), instanceSchema.ImpliedType()) + bAfter, _ := plans.NewDynamicValue( + cty.ObjectVal(map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "require_new": cty.UnknownVal(cty.String), + }), instanceSchema.ImpliedType()) + + var aAction plans.Action + switch mode { + case "normal": + aAction = plans.DeleteThenCreate + case "cbd": + aAction = plans.CreateThenDelete + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + changes := &plans.Changes{ + Resources: []*plans.ResourceInstanceChangeSrc{ + { + Addr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "a", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance.Child("a", addrs.NoKey)), + ProviderAddr: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("aws"), + Module: addrs.RootModule, + }, + ChangeSrc: plans.ChangeSrc{ + Action: aAction, + Before: aBefore, + After: aAfter, + }, + }, + { + Addr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "b", + }.Instance(addrs.IntKey(0)).Absolute(addrs.RootModuleInstance.Child("b", addrs.NoKey)), + ProviderAddr: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("aws"), + Module: addrs.RootModule, + }, + ChangeSrc: plans.ChangeSrc{ + Action: plans.DeleteThenCreate, + Before: bBefore, + After: bAfter, + }, + }, + }, + } + + plan := &plans.Plan{ + UIMode: plans.NormalMode, + Changes: changes, + PriorState: state.DeepCopy(), + PrevRunState: state.DeepCopy(), + } + + t.Run(mode, func(t *testing.T) { + _, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + }) + } +} + +func TestContext2Apply_destroyDataCycle(t *testing.T) { + m, snap := testModuleWithSnapshot(t, "apply-destroy-data-cycle") + p := testProvider("null") + p.PlanResourceChangeFn = testDiffFn + p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + return providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("new"), + "foo": cty.NullVal(cty.String), + }), + } + } + + tp := testProvider("test") + tp.PlanResourceChangeFn = testDiffFn + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "a", + }.Instance(addrs.IntKey(0)), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"a"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("null"), + Module: addrs.RootModule, + }, + ) + root.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "a", + }.Instance(addrs.IntKey(0)), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"a"}`), + Dependencies: []addrs.ConfigResource{ + { + Resource: addrs.Resource{ + Mode: addrs.DataResourceMode, + Type: "null_data_source", + Name: "d", + }, + Module: addrs.RootModule, + }, + }, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + root.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.DataResourceMode, + Type: "null_data_source", + Name: "d", + }.Instance(addrs.NoKey), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"old"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("null"), + Module: addrs.RootModule, + }, + ) + + Providers := map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("null"): testProviderFuncFixed(p), + addrs.NewDefaultProvider("test"): testProviderFuncFixed(tp), + } + + ctx := testContext2(t, &ContextOpts{ + Providers: Providers, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + }) + diags.HasErrors() + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + // We'll marshal and unmarshal the plan here, to ensure that we have + // a clean new context as would be created if we separately ran + // tofu plan -out=tfplan && tofu apply tfplan + ctxOpts, m, plan, err := contextOptsForPlanViaFile(t, snap, plan) + if err != nil { + t.Fatal(err) + } + ctxOpts.Providers = Providers + ctx, diags = NewContext(ctxOpts) + if diags.HasErrors() { + t.Fatalf("failed to create context for plan: %s", diags.Err()) + } + + tp.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { + foo := req.Config.GetAttr("foo") + if !foo.IsKnown() { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unknown config value foo")) + return resp + } + + if foo.AsString() != "new" { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("wrong config value: %q", foo.AsString())) + } + return resp + } + + _, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } +} + +func TestContext2Apply_taintedDestroyFailure(t *testing.T) { + m := testModule(t, "apply-destroy-tainted") + p := testProvider("test") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + // All destroys fail. + if req.PlannedState.IsNull() { + resp.Diagnostics = resp.Diagnostics.Append(errors.New("failure")) + return + } + + // c will also fail to create, meaning the existing tainted instance + // becomes deposed, ans is then promoted back to current. + // only C has a foo attribute + planned := req.PlannedState.AsValueMap() + foo, ok := planned["foo"] + if ok && !foo.IsNull() && foo.AsString() == "c" { + resp.Diagnostics = resp.Diagnostics.Append(errors.New("failure")) + return + } + + return testApplyFn(req) + } + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_instance": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "foo": { + Type: cty.String, + Optional: true, + }, + }, + }, + }, + }) + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "a", + }.Instance(addrs.NoKey), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectTainted, + AttrsJSON: []byte(`{"id":"a","foo":"a"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + root.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "b", + }.Instance(addrs.NoKey), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectTainted, + AttrsJSON: []byte(`{"id":"b","foo":"b"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + root.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "c", + }.Instance(addrs.NoKey), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectTainted, + AttrsJSON: []byte(`{"id":"c","foo":"old"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + + Providers := map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + } + + ctx := testContext2(t, &ContextOpts{ + Providers: Providers, + Hooks: []Hook{&testHook{}}, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + diags.HasErrors() + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + state, diags = ctx.Apply(plan, m) + if !diags.HasErrors() { + t.Fatal("expected error") + } + + root = state.Module(addrs.RootModuleInstance) + + // the instance that failed to destroy should remain tainted + a := root.ResourceInstance(addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "a", + }.Instance(addrs.NoKey)) + + if a.Current.Status != states.ObjectTainted { + t.Fatal("test_instance.a should be tainted") + } + + // b is create_before_destroy, and the destroy failed, so there should be 1 + // deposed instance. + b := root.ResourceInstance(addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "b", + }.Instance(addrs.NoKey)) + + if b.Current.Status != states.ObjectReady { + t.Fatal("test_instance.b should be Ready") + } + + if len(b.Deposed) != 1 { + t.Fatal("test_instance.b failed to keep deposed instance") + } + + // the desposed c instance should be promoted back to Current, and remain + // tainted + c := root.ResourceInstance(addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "c", + }.Instance(addrs.NoKey)) + + if c.Current == nil { + t.Fatal("test_instance.c has no current instance, but it should") + } + + if c.Current.Status != states.ObjectTainted { + t.Fatal("test_instance.c should be tainted") + } + + if len(c.Deposed) != 0 { + t.Fatal("test_instance.c should have no deposed instances") + } + + if string(c.Current.AttrsJSON) != `{"foo":"old","id":"c"}` { + t.Fatalf("unexpected attrs for c: %q\n", c.Current.AttrsJSON) + } +} + +func TestContext2Apply_plannedConnectionRefs(t *testing.T) { + m := testModule(t, "apply-plan-connection-refs") + p := testProvider("test") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + s := req.PlannedState.AsValueMap() + // delay "a" slightly, so if the reference edge is missing the "b" + // provisioner will see an unknown value. + if s["foo"].AsString() == "a" { + time.Sleep(500 * time.Millisecond) + } + + s["id"] = cty.StringVal("ID") + if ty, ok := s["type"]; ok && !ty.IsKnown() { + s["type"] = cty.StringVal(req.TypeName) + } + resp.NewState = cty.ObjectVal(s) + return resp + } + + provisionerFactory := func() (provisioners.Interface, error) { + pr := testProvisioner() + pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { + host := req.Connection.GetAttr("host") + if host.IsNull() || !host.IsKnown() { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("invalid host value: %#v", host)) + } + + return resp + } + return pr, nil + } + + Providers := map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + } + + provisioners := map[string]provisioners.Factory{ + "shell": provisionerFactory, + } + + hook := &testHook{} + ctx := testContext2(t, &ContextOpts{ + Providers: Providers, + Provisioners: provisioners, + Hooks: []Hook{hook}, + }) + + plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) + diags.HasErrors() + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + _, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } +} + +func TestContext2Apply_cbdCycle(t *testing.T) { + m, snap := testModuleWithSnapshot(t, "apply-cbd-cycle") + p := testProvider("test") + p.PlanResourceChangeFn = testDiffFn + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "a", + }.Instance(addrs.NoKey), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"a","require_new":"old","foo":"b"}`), + Dependencies: []addrs.ConfigResource{ + { + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "b", + }, + Module: addrs.RootModule, + }, + { + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "c", + }, + Module: addrs.RootModule, + }, + }, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + root.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "b", + }.Instance(addrs.NoKey), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"b","require_new":"old","foo":"c"}`), + Dependencies: []addrs.ConfigResource{ + { + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "c", + }, + Module: addrs.RootModule, + }, + }, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + root.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "c", + }.Instance(addrs.NoKey), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"c","require_new":"old"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + + Providers := map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + } + + hook := &testHook{} + ctx := testContext2(t, &ContextOpts{ + Providers: Providers, + Hooks: []Hook{hook}, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + diags.HasErrors() + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + // We'll marshal and unmarshal the plan here, to ensure that we have + // a clean new context as would be created if we separately ran + // tofu plan -out=tfplan && tofu apply tfplan + ctxOpts, m, plan, err := contextOptsForPlanViaFile(t, snap, plan) + if err != nil { + t.Fatal(err) + } + ctxOpts.Providers = Providers + ctx, diags = NewContext(ctxOpts) + if diags.HasErrors() { + t.Fatalf("failed to create context for plan: %s", diags.Err()) + } + + _, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } +} + +func TestContext2Apply_ProviderMeta_apply_set(t *testing.T) { + m := testModule(t, "provider-meta-set") + p := testProvider("test") + p.PlanResourceChangeFn = testDiffFn + schema := p.ProviderSchema() + schema.ProviderMeta = &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "baz": { + Type: cty.String, + Required: true, + }, + }, + } + + var pmMu sync.Mutex + arcPMs := map[string]cty.Value{} + + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + pmMu.Lock() + defer pmMu.Unlock() + arcPMs[req.TypeName] = req.ProviderMeta + + s := req.PlannedState.AsValueMap() + s["id"] = cty.StringVal("ID") + if ty, ok := s["type"]; ok && !ty.IsKnown() { + s["type"] = cty.StringVal(req.TypeName) + } + return providers.ApplyResourceChangeResponse{ + NewState: cty.ObjectVal(s), + } + } + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(schema) + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + _, diags = ctx.Apply(plan, m) + assertNoErrors(t, diags) + + if !p.ApplyResourceChangeCalled { + t.Fatalf("ApplyResourceChange not called") + } + + expectations := map[string]cty.Value{} + + if pm, ok := arcPMs["test_resource"]; !ok { + t.Fatalf("sub-module ApplyResourceChange not called") + } else if pm.IsNull() { + t.Fatalf("null ProviderMeta in sub-module ApplyResourceChange") + } else { + expectations["quux-submodule"] = pm + } + + if pm, ok := arcPMs["test_instance"]; !ok { + t.Fatalf("root module ApplyResourceChange not called") + } else if pm.IsNull() { + t.Fatalf("null ProviderMeta in root module ApplyResourceChange") + } else { + expectations["quux"] = pm + } + + type metaStruct struct { + Baz string `cty:"baz"` + } + + for expected, v := range expectations { + var meta metaStruct + err := gocty.FromCtyValue(v, &meta) + if err != nil { + t.Fatalf("Error parsing cty value: %s", err) + } + if meta.Baz != expected { + t.Fatalf("Expected meta.Baz to be %q, got %q", expected, meta.Baz) + } + } +} + +func TestContext2Apply_ProviderMeta_apply_unset(t *testing.T) { + m := testModule(t, "provider-meta-unset") + p := testProvider("test") + p.PlanResourceChangeFn = testDiffFn + schema := p.ProviderSchema() + schema.ProviderMeta = &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "baz": { + Type: cty.String, + Required: true, + }, + }, + } + var pmMu sync.Mutex + arcPMs := map[string]cty.Value{} + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + pmMu.Lock() + defer pmMu.Unlock() + arcPMs[req.TypeName] = req.ProviderMeta + + s := req.PlannedState.AsValueMap() + s["id"] = cty.StringVal("ID") + if ty, ok := s["type"]; ok && !ty.IsKnown() { + s["type"] = cty.StringVal(req.TypeName) + } + return providers.ApplyResourceChangeResponse{ + NewState: cty.ObjectVal(s), + } + } + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(schema) + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + _, diags = ctx.Apply(plan, m) + assertNoErrors(t, diags) + + if !p.ApplyResourceChangeCalled { + t.Fatalf("ApplyResourceChange not called") + } + + if pm, ok := arcPMs["test_resource"]; !ok { + t.Fatalf("sub-module ApplyResourceChange not called") + } else if !pm.IsNull() { + t.Fatalf("non-null ProviderMeta in sub-module ApplyResourceChange: %+v", pm) + } + + if pm, ok := arcPMs["test_instance"]; !ok { + t.Fatalf("root module ApplyResourceChange not called") + } else if !pm.IsNull() { + t.Fatalf("non-null ProviderMeta in root module ApplyResourceChange: %+v", pm) + } +} + +func TestContext2Apply_ProviderMeta_plan_set(t *testing.T) { + m := testModule(t, "provider-meta-set") + p := testProvider("test") + schema := p.ProviderSchema() + schema.ProviderMeta = &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "baz": { + Type: cty.String, + Required: true, + }, + }, + } + prcPMs := map[string]cty.Value{} + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + prcPMs[req.TypeName] = req.ProviderMeta + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(schema) + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + if !p.PlanResourceChangeCalled { + t.Fatalf("PlanResourceChange not called") + } + + expectations := map[string]cty.Value{} + + if pm, ok := prcPMs["test_resource"]; !ok { + t.Fatalf("sub-module PlanResourceChange not called") + } else if pm.IsNull() { + t.Fatalf("null ProviderMeta in sub-module PlanResourceChange") + } else { + expectations["quux-submodule"] = pm + } + + if pm, ok := prcPMs["test_instance"]; !ok { + t.Fatalf("root module PlanResourceChange not called") + } else if pm.IsNull() { + t.Fatalf("null ProviderMeta in root module PlanResourceChange") + } else { + expectations["quux"] = pm + } + + type metaStruct struct { + Baz string `cty:"baz"` + } + + for expected, v := range expectations { + var meta metaStruct + err := gocty.FromCtyValue(v, &meta) + if err != nil { + t.Fatalf("Error parsing cty value: %s", err) + } + if meta.Baz != expected { + t.Fatalf("Expected meta.Baz to be %q, got %q", expected, meta.Baz) + } + } +} + +func TestContext2Apply_ProviderMeta_plan_unset(t *testing.T) { + m := testModule(t, "provider-meta-unset") + p := testProvider("test") + schema := p.ProviderSchema() + schema.ProviderMeta = &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "baz": { + Type: cty.String, + Required: true, + }, + }, + } + prcPMs := map[string]cty.Value{} + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + prcPMs[req.TypeName] = req.ProviderMeta + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(schema) + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + if !p.PlanResourceChangeCalled { + t.Fatalf("PlanResourceChange not called") + } + + if pm, ok := prcPMs["test_resource"]; !ok { + t.Fatalf("sub-module PlanResourceChange not called") + } else if !pm.IsNull() { + t.Fatalf("non-null ProviderMeta in sub-module PlanResourceChange: %+v", pm) + } + + if pm, ok := prcPMs["test_instance"]; !ok { + t.Fatalf("root module PlanResourceChange not called") + } else if !pm.IsNull() { + t.Fatalf("non-null ProviderMeta in root module PlanResourceChange: %+v", pm) + } +} + +func TestContext2Apply_ProviderMeta_plan_setNoSchema(t *testing.T) { + m := testModule(t, "provider-meta-set") + p := testProvider("test") + p.PlanResourceChangeFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if !diags.HasErrors() { + t.Fatalf("plan supposed to error, has no errors") + } + + var rootErr, subErr bool + errorSummary := "The resource test_%s.bar belongs to a provider that doesn't support provider_meta blocks" + for _, diag := range diags { + if diag.Description().Summary != "Provider registry.opentofu.org/hashicorp/test doesn't support provider_meta" { + t.Errorf("Unexpected error: %+v", diag.Description()) + } + switch diag.Description().Detail { + case fmt.Sprintf(errorSummary, "instance"): + rootErr = true + case fmt.Sprintf(errorSummary, "resource"): + subErr = true + default: + t.Errorf("Unexpected error: %s", diag.Description()) + } + } + if !rootErr { + t.Errorf("Expected unsupported provider_meta block error for root module, none received") + } + if !subErr { + t.Errorf("Expected unsupported provider_meta block error for sub-module, none received") + } +} + +func TestContext2Apply_ProviderMeta_plan_setInvalid(t *testing.T) { + m := testModule(t, "provider-meta-set") + p := testProvider("test") + p.PlanResourceChangeFn = testDiffFn + schema := p.ProviderSchema() + schema.ProviderMeta = &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "quux": { + Type: cty.String, + Required: true, + }, + }, + } + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(schema) + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if !diags.HasErrors() { + t.Fatalf("plan supposed to error, has no errors") + } + + var reqErr, invalidErr bool + for _, diag := range diags { + switch diag.Description().Summary { + case "Missing required argument": + if diag.Description().Detail == `The argument "quux" is required, but no definition was found.` { + reqErr = true + } else { + t.Errorf("Unexpected error %+v", diag.Description()) + } + case "Unsupported argument": + if diag.Description().Detail == `An argument named "baz" is not expected here.` { + invalidErr = true + } else { + t.Errorf("Unexpected error %+v", diag.Description()) + } + default: + t.Errorf("Unexpected error %+v", diag.Description()) + } + } + if !reqErr { + t.Errorf("Expected missing required argument error, none received") + } + if !invalidErr { + t.Errorf("Expected unsupported argument error, none received") + } +} + +func TestContext2Apply_ProviderMeta_refresh_set(t *testing.T) { + m := testModule(t, "provider-meta-set") + p := testProvider("test") + p.PlanResourceChangeFn = testDiffFn + schema := p.ProviderSchema() + schema.ProviderMeta = &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "baz": { + Type: cty.String, + Required: true, + }, + }, + } + rrcPMs := map[string]cty.Value{} + p.ReadResourceFn = func(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { + rrcPMs[req.TypeName] = req.ProviderMeta + newState, err := p.GetProviderSchemaResponse.ResourceTypes[req.TypeName].Block.CoerceValue(req.PriorState) + if err != nil { + panic(err) + } + resp.NewState = newState + return resp + } + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(schema) + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + assertNoErrors(t, diags) + + _, diags = ctx.Refresh(m, state, DefaultPlanOpts) + assertNoErrors(t, diags) + + if !p.ReadResourceCalled { + t.Fatalf("ReadResource not called") + } + + expectations := map[string]cty.Value{} + + if pm, ok := rrcPMs["test_resource"]; !ok { + t.Fatalf("sub-module ReadResource not called") + } else if pm.IsNull() { + t.Fatalf("null ProviderMeta in sub-module ReadResource") + } else { + expectations["quux-submodule"] = pm + } + + if pm, ok := rrcPMs["test_instance"]; !ok { + t.Fatalf("root module ReadResource not called") + } else if pm.IsNull() { + t.Fatalf("null ProviderMeta in root module ReadResource") + } else { + expectations["quux"] = pm + } + + type metaStruct struct { + Baz string `cty:"baz"` + } + + for expected, v := range expectations { + var meta metaStruct + err := gocty.FromCtyValue(v, &meta) + if err != nil { + t.Fatalf("Error parsing cty value: %s", err) + } + if meta.Baz != expected { + t.Fatalf("Expected meta.Baz to be %q, got %q", expected, meta.Baz) + } + } +} + +func TestContext2Apply_ProviderMeta_refresh_setNoSchema(t *testing.T) { + m := testModule(t, "provider-meta-set") + p := testProvider("test") + p.PlanResourceChangeFn = testDiffFn + + // we need a schema for plan/apply so they don't error + schema := p.ProviderSchema() + schema.ProviderMeta = &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "baz": { + Type: cty.String, + Required: true, + }, + }, + } + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(schema) + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + assertNoErrors(t, diags) + + // drop the schema before refresh, to test that it errors + schema.ProviderMeta = nil + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(schema) + ctx = testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + _, diags = ctx.Refresh(m, state, DefaultPlanOpts) + if !diags.HasErrors() { + t.Fatalf("refresh supposed to error, has no errors") + } + + var rootErr, subErr bool + errorSummary := "The resource test_%s.bar belongs to a provider that doesn't support provider_meta blocks" + for _, diag := range diags { + if diag.Description().Summary != "Provider registry.opentofu.org/hashicorp/test doesn't support provider_meta" { + t.Errorf("Unexpected error: %+v", diag.Description()) + } + switch diag.Description().Detail { + case fmt.Sprintf(errorSummary, "instance"): + rootErr = true + case fmt.Sprintf(errorSummary, "resource"): + subErr = true + default: + t.Errorf("Unexpected error: %s", diag.Description()) + } + } + if !rootErr { + t.Errorf("Expected unsupported provider_meta block error for root module, none received") + } + if !subErr { + t.Errorf("Expected unsupported provider_meta block error for sub-module, none received") + } +} + +func TestContext2Apply_ProviderMeta_refresh_setInvalid(t *testing.T) { + m := testModule(t, "provider-meta-set") + p := testProvider("test") + p.PlanResourceChangeFn = testDiffFn + + // we need a matching schema for plan/apply so they don't error + schema := p.ProviderSchema() + schema.ProviderMeta = &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "baz": { + Type: cty.String, + Required: true, + }, + }, + } + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(schema) + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + assertNoErrors(t, diags) + + // change the schema before refresh, to test that it errors + schema.ProviderMeta = &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "quux": { + Type: cty.String, + Required: true, + }, + }, + } + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(schema) + ctx = testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + _, diags = ctx.Refresh(m, state, DefaultPlanOpts) + if !diags.HasErrors() { + t.Fatalf("refresh supposed to error, has no errors") + } + + var reqErr, invalidErr bool + for _, diag := range diags { + switch diag.Description().Summary { + case "Missing required argument": + if diag.Description().Detail == `The argument "quux" is required, but no definition was found.` { + reqErr = true + } else { + t.Errorf("Unexpected error %+v", diag.Description()) + } + case "Unsupported argument": + if diag.Description().Detail == `An argument named "baz" is not expected here.` { + invalidErr = true + } else { + t.Errorf("Unexpected error %+v", diag.Description()) + } + default: + t.Errorf("Unexpected error %+v", diag.Description()) + } + } + if !reqErr { + t.Errorf("Expected missing required argument error, none received") + } + if !invalidErr { + t.Errorf("Expected unsupported argument error, none received") + } +} + +func TestContext2Apply_ProviderMeta_refreshdata_set(t *testing.T) { + m := testModule(t, "provider-meta-data-set") + p := testProvider("test") + p.PlanResourceChangeFn = testDiffFn + schema := p.ProviderSchema() + schema.ProviderMeta = &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "baz": { + Type: cty.String, + Required: true, + }, + }, + } + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(schema) + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + rdsPMs := map[string]cty.Value{} + p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + rdsPMs[req.TypeName] = req.ProviderMeta + switch req.TypeName { + case "test_data_source": + log.Printf("[TRACE] test_data_source RDSR returning") + return providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yo"), + "foo": cty.StringVal("bar"), + }), + } + case "test_file": + log.Printf("[TRACE] test_file RDSR returning") + return providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("bar"), + "rendered": cty.StringVal("baz"), + "template": cty.StringVal(""), + }), + } + default: + // config drift, oops + log.Printf("[TRACE] unknown request TypeName: %q", req.TypeName) + return providers.ReadDataSourceResponse{} + } + } + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + assertNoErrors(t, diags) + + _, diags = ctx.Refresh(m, state, DefaultPlanOpts) + assertNoErrors(t, diags) + + if !p.ReadDataSourceCalled { + t.Fatalf("ReadDataSource not called") + } + + expectations := map[string]cty.Value{} + + if pm, ok := rdsPMs["test_file"]; !ok { + t.Fatalf("sub-module ReadDataSource not called") + } else if pm.IsNull() { + t.Fatalf("null ProviderMeta in sub-module ReadDataSource") + } else { + expectations["quux-submodule"] = pm + } + + if pm, ok := rdsPMs["test_data_source"]; !ok { + t.Fatalf("root module ReadDataSource not called") + } else if pm.IsNull() { + t.Fatalf("null ProviderMeta in root module ReadDataSource") + } else { + expectations["quux"] = pm + } + + type metaStruct struct { + Baz string `cty:"baz"` + } + + for expected, v := range expectations { + var meta metaStruct + err := gocty.FromCtyValue(v, &meta) + if err != nil { + t.Fatalf("Error parsing cty value: %s", err) + } + if meta.Baz != expected { + t.Fatalf("Expected meta.Baz to be %q, got %q", expected, meta.Baz) + } + } +} + +func TestContext2Apply_ProviderMeta_refreshdata_unset(t *testing.T) { + m := testModule(t, "provider-meta-data-unset") + p := testProvider("test") + p.PlanResourceChangeFn = testDiffFn + schema := p.ProviderSchema() + schema.ProviderMeta = &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "baz": { + Type: cty.String, + Required: true, + }, + }, + } + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(schema) + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + rdsPMs := map[string]cty.Value{} + p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + rdsPMs[req.TypeName] = req.ProviderMeta + switch req.TypeName { + case "test_data_source": + return providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yo"), + "foo": cty.StringVal("bar"), + }), + } + case "test_file": + return providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("bar"), + "rendered": cty.StringVal("baz"), + "template": cty.StringVal(""), + }), + } + default: + // config drift, oops + return providers.ReadDataSourceResponse{} + } + } + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + _, diags = ctx.Apply(plan, m) + assertNoErrors(t, diags) + + if !p.ReadDataSourceCalled { + t.Fatalf("ReadDataSource not called") + } + + if pm, ok := rdsPMs["test_file"]; !ok { + t.Fatalf("sub-module ReadDataSource not called") + } else if !pm.IsNull() { + t.Fatalf("non-null ProviderMeta in sub-module ReadDataSource") + } + + if pm, ok := rdsPMs["test_data_source"]; !ok { + t.Fatalf("root module ReadDataSource not called") + } else if !pm.IsNull() { + t.Fatalf("non-null ProviderMeta in root module ReadDataSource") + } +} + +func TestContext2Apply_ProviderMeta_refreshdata_setNoSchema(t *testing.T) { + m := testModule(t, "provider-meta-data-set") + p := testProvider("test") + p.PlanResourceChangeFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yo"), + "foo": cty.StringVal("bar"), + }), + } + + _, diags := ctx.Refresh(m, states.NewState(), DefaultPlanOpts) + if !diags.HasErrors() { + t.Fatalf("refresh supposed to error, has no errors") + } + + var rootErr, subErr bool + errorSummary := "The resource data.test_%s.foo belongs to a provider that doesn't support provider_meta blocks" + for _, diag := range diags { + if diag.Description().Summary != "Provider registry.opentofu.org/hashicorp/test doesn't support provider_meta" { + t.Errorf("Unexpected error: %+v", diag.Description()) + } + switch diag.Description().Detail { + case fmt.Sprintf(errorSummary, "data_source"): + rootErr = true + case fmt.Sprintf(errorSummary, "file"): + subErr = true + default: + t.Errorf("Unexpected error: %s", diag.Description()) + } + } + if !rootErr { + t.Errorf("Expected unsupported provider_meta block error for root module, none received") + } + if !subErr { + t.Errorf("Expected unsupported provider_meta block error for sub-module, none received") + } +} + +func TestContext2Apply_ProviderMeta_refreshdata_setInvalid(t *testing.T) { + m := testModule(t, "provider-meta-data-set") + p := testProvider("test") + p.PlanResourceChangeFn = testDiffFn + schema := p.ProviderSchema() + schema.ProviderMeta = &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "quux": { + Type: cty.String, + Required: true, + }, + }, + } + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(schema) + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yo"), + "foo": cty.StringVal("bar"), + }), + } + + _, diags := ctx.Refresh(m, states.NewState(), DefaultPlanOpts) + if !diags.HasErrors() { + t.Fatalf("refresh supposed to error, has no errors") + } + + var reqErr, invalidErr bool + for _, diag := range diags { + switch diag.Description().Summary { + case "Missing required argument": + if diag.Description().Detail == `The argument "quux" is required, but no definition was found.` { + reqErr = true + } else { + t.Errorf("Unexpected error %+v", diag.Description()) + } + case "Unsupported argument": + if diag.Description().Detail == `An argument named "baz" is not expected here.` { + invalidErr = true + } else { + t.Errorf("Unexpected error %+v", diag.Description()) + } + default: + t.Errorf("Unexpected error %+v", diag.Description()) + } + } + if !reqErr { + t.Errorf("Expected missing required argument error, none received") + } + if !invalidErr { + t.Errorf("Expected unsupported argument error, none received") + } +} + +func TestContext2Apply_expandModuleVariables(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +module "mod1" { + for_each = toset(["a"]) + source = "./mod" +} + +module "mod2" { + source = "./mod" + in = module.mod1["a"].out +} +`, + "mod/main.tf": ` +resource "aws_instance" "foo" { + foo = var.in +} + +variable "in" { + type = string + default = "default" +} + +output "out" { + value = aws_instance.foo.id +} +`, + }) + + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } + + expected := ` +module.mod1["a"]: + aws_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = default + type = aws_instance + + Outputs: + + out = foo +module.mod2: + aws_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = foo + type = aws_instance + + Dependencies: + module.mod1.aws_instance.foo` + + if state.String() != expected { + t.Fatalf("expected:\n%s\ngot:\n%s\n", expected, state) + } +} + +func TestContext2Apply_inheritAndStoreCBD(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "aws_instance" "foo" { +} + +resource "aws_instance" "cbd" { + foo = aws_instance.foo.id + lifecycle { + create_before_destroy = true + } +} +`, + }) + + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } + + foo := state.ResourceInstance(mustResourceInstanceAddr("aws_instance.foo")) + if !foo.Current.CreateBeforeDestroy { + t.Fatal("aws_instance.foo should also be create_before_destroy") + } +} + +func TestContext2Apply_moduleDependsOn(t *testing.T) { + m := testModule(t, "apply-module-depends-on") + + p := testProvider("test") + + // each instance being applied should happen in sequential order + applied := int64(0) + + p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + cfg := req.Config.AsValueMap() + foo := cfg["foo"].AsString() + ord := atomic.LoadInt64(&applied) + + resp := providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("data"), + "foo": cfg["foo"], + }), + } + + if foo == "a" && ord < 4 { + // due to data source "a"'s module depending on instance 4, this + // should not be less than 4 + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("data source a read too early")) + } + if foo == "b" && ord < 1 { + // due to data source "b"'s module depending on instance 1, this + // should not be less than 1 + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("data source b read too early")) + } + return resp + } + p.PlanResourceChangeFn = testDiffFn + + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + state := req.PlannedState.AsValueMap() + num, _ := state["num"].AsBigFloat().Float64() + ord := int64(num) + if !atomic.CompareAndSwapInt64(&applied, ord-1, ord) { + actual := atomic.LoadInt64(&applied) + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("instance %d was applied after %d", ord, actual)) + } + + state["id"] = cty.StringVal(fmt.Sprintf("test_%d", ord)) + state["type"] = cty.StringVal("test_instance") + resp.NewState = cty.ObjectVal(state) + + return resp + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } + + plan, diags = ctx.Plan(m, state, DefaultPlanOpts) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } + + for _, res := range plan.Changes.Resources { + if res.Action != plans.NoOp { + t.Fatalf("expected NoOp, got %s for %s", res.Action, res.Addr) + } + } +} + +func TestContext2Apply_moduleSelfReference(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +module "test" { + source = "./test" + + a = module.test.b +} + +output "c" { + value = module.test.c +} +`, + "test/main.tf": ` +variable "a" {} + +resource "test_instance" "test" { +} + +output "b" { + value = test_instance.test.id +} + +output "c" { + value = var.a +}`}) + + p := testProvider("test") + p.PlanResourceChangeFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } + + ctx = testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags = ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + }) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } + + state, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } + + if !state.Empty() { + t.Fatal("expected empty state, got:", state) + } +} + +func TestContext2Apply_moduleExpandDependsOn(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +module "child" { + count = 1 + source = "./child" + + depends_on = [test_instance.a, test_instance.b] +} + +resource "test_instance" "a" { +} + + +resource "test_instance" "b" { +} +`, + "child/main.tf": ` +resource "test_instance" "foo" { +} + +output "myoutput" { + value = "literal string" +} +`}) + + p := testProvider("test") + p.PlanResourceChangeFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } + + ctx = testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags = ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + }) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } + + state, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } + + if !state.Empty() { + t.Fatal("expected empty state, got:", state) + } +} + +func TestContext2Apply_scaleInCBD(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +variable "ct" { + type = number +} + +resource "test_instance" "a" { + count = var.ct +} + +resource "test_instance" "b" { + require_new = local.removable + lifecycle { + create_before_destroy = true + } +} + +resource "test_instance" "c" { + require_new = test_instance.b.id + lifecycle { + create_before_destroy = true + } +} + +output "out" { + value = join(".", test_instance.a[*].id) +} + +locals { + removable = join(".", test_instance.a[*].id) +} +`}) + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_instance.a[0]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"a0"}`), + Dependencies: []addrs.ConfigResource{}, + CreateBeforeDestroy: true, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_instance.a[1]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"a1"}`), + Dependencies: []addrs.ConfigResource{}, + CreateBeforeDestroy: true, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_instance.b").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"b", "require_new":"old.old"}`), + Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("test_instance.a")}, + CreateBeforeDestroy: true, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_instance.c").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"c", "require_new":"b"}`), + Dependencies: []addrs.ConfigResource{ + mustConfigResourceAddr("test_instance.a"), + mustConfigResourceAddr("test_instance.b"), + }, + CreateBeforeDestroy: true, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + + p := testProvider("test") + + p.PlanResourceChangeFn = func(r providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + // this is a destroy plan + if r.ProposedNewState.IsNull() { + resp.PlannedState = r.ProposedNewState + resp.PlannedPrivate = r.PriorPrivate + return resp + } + + n := r.ProposedNewState.AsValueMap() + + if r.PriorState.IsNull() { + n["id"] = cty.UnknownVal(cty.String) + resp.PlannedState = cty.ObjectVal(n) + return resp + } + + p := r.PriorState.AsValueMap() + + priorRN := p["require_new"] + newRN := n["require_new"] + + if eq := priorRN.Equals(newRN); !eq.IsKnown() || eq.False() { + resp.RequiresReplace = []cty.Path{{cty.GetAttrStep{Name: "require_new"}}} + n["id"] = cty.UnknownVal(cty.String) + } + + resp.PlannedState = cty.ObjectVal(n) + return resp + } + + // reduce the count to 1 + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "ct": &InputValue{ + Value: cty.NumberIntVal(1), + SourceType: ValueFromCaller, + }, + }, + }) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } + { + addr := mustResourceInstanceAddr("test_instance.a[0]") + change := plan.Changes.ResourceInstance(addr) + if change == nil { + t.Fatalf("no planned change for %s", addr) + } + if got, want := change.PrevRunAddr, mustResourceInstanceAddr("test_instance.a[0]"); !want.Equal(got) { + t.Errorf("wrong previous run address for %s %s; want %s", addr, got, want) + } + if got, want := change.Action, plans.NoOp; got != want { + t.Errorf("wrong action for %s %s; want %s", addr, got, want) + } + if got, want := change.ActionReason, plans.ResourceInstanceChangeNoReason; got != want { + t.Errorf("wrong action reason for %s %s; want %s", addr, got, want) + } + } + { + addr := mustResourceInstanceAddr("test_instance.a[1]") + change := plan.Changes.ResourceInstance(addr) + if change == nil { + t.Fatalf("no planned change for %s", addr) + } + if got, want := change.PrevRunAddr, mustResourceInstanceAddr("test_instance.a[1]"); !want.Equal(got) { + t.Errorf("wrong previous run address for %s %s; want %s", addr, got, want) + } + if got, want := change.Action, plans.Delete; got != want { + t.Errorf("wrong action for %s %s; want %s", addr, got, want) + } + if got, want := change.ActionReason, plans.ResourceInstanceDeleteBecauseCountIndex; got != want { + t.Errorf("wrong action reason for %s %s; want %s", addr, got, want) + } + } + + state, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + log.Fatal(diags.ErrWithWarnings()) + } + + // check the output, as those can't cause an error planning the value + out := state.RootModule().OutputValues["out"].Value.AsString() + if out != "a0" { + t.Fatalf(`expected output "a0", got: %q`, out) + } + + // reduce the count to 0 + ctx = testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags = ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "ct": &InputValue{ + Value: cty.NumberIntVal(0), + SourceType: ValueFromCaller, + }, + }, + }) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } + { + addr := mustResourceInstanceAddr("test_instance.a[0]") + change := plan.Changes.ResourceInstance(addr) + if change == nil { + t.Fatalf("no planned change for %s", addr) + } + if got, want := change.PrevRunAddr, mustResourceInstanceAddr("test_instance.a[0]"); !want.Equal(got) { + t.Errorf("wrong previous run address for %s %s; want %s", addr, got, want) + } + if got, want := change.Action, plans.Delete; got != want { + t.Errorf("wrong action for %s %s; want %s", addr, got, want) + } + if got, want := change.ActionReason, plans.ResourceInstanceDeleteBecauseCountIndex; got != want { + t.Errorf("wrong action reason for %s %s; want %s", addr, got, want) + } + } + { + addr := mustResourceInstanceAddr("test_instance.a[1]") + change := plan.Changes.ResourceInstance(addr) + if change != nil { + // It was already removed in the previous plan/apply + t.Errorf("unexpected planned change for %s", addr) + } + } + + state, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } + + // check the output, as those can't cause an error planning the value + out = state.RootModule().OutputValues["out"].Value.AsString() + if out != "" { + t.Fatalf(`expected output "", got: %q`, out) + } +} + +// Ensure that we can destroy when a provider references a resource that will +// also be destroyed +func TestContext2Apply_destroyProviderReference(t *testing.T) { + m, snap := testModuleWithSnapshot(t, "apply-destroy-provisider-refs") + + schemaFn := func(name string) *ProviderSchema { + return &ProviderSchema{ + Provider: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "value": { + Type: cty.String, + Required: true, + }, + }, + }, + ResourceTypes: map[string]*configschema.Block{ + name + "_instance": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "foo": { + Type: cty.String, + Optional: true, + }, + }, + }, + }, + DataSources: map[string]*configschema.Block{ + name + "_data_source": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "output": { + Type: cty.String, + Computed: true, + }, + }, + }, + }, + } + } + + testP := new(MockProvider) + testP.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { + return providers.ReadResourceResponse{NewState: req.PriorState} + } + testP.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(schemaFn("test")) + + providerConfig := "" + testP.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { + value := req.Config.GetAttr("value") + if value.IsKnown() && !value.IsNull() { + providerConfig = value.AsString() + } else { + providerConfig = "" + } + return resp + } + testP.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + if providerConfig != "valid" { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("provider config is %q", providerConfig)) + return + } + return testApplyFn(req) + } + testP.PlanResourceChangeFn = testDiffFn + + nullP := new(MockProvider) + nullP.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { + return providers.ReadResourceResponse{NewState: req.PriorState} + } + nullP.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(schemaFn("null")) + + nullP.ApplyResourceChangeFn = testApplyFn + nullP.PlanResourceChangeFn = testDiffFn + + nullP.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("ID"), + "output": cty.StringVal("valid"), + }), + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(testP), + addrs.NewDefaultProvider("null"): testProviderFuncFixed(nullP), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("apply errors: %s", diags.Err()) + } + + providers := map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(testP), + addrs.NewDefaultProvider("null"): testProviderFuncFixed(nullP), + } + ctx = testContext2(t, &ContextOpts{ + Providers: providers, + }) + + plan, diags = ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + }) + assertNoErrors(t, diags) + + // We'll marshal and unmarshal the plan here, to ensure that we have + // a clean new context as would be created if we separately ran + // tofu plan -out=tfplan && tofu apply tfplan + ctxOpts, m, plan, err := contextOptsForPlanViaFile(t, snap, plan) + if err != nil { + t.Fatal(err) + } + ctxOpts.Providers = providers + ctx, diags = NewContext(ctxOpts) + + if diags.HasErrors() { + t.Fatalf("failed to create context for plan: %s", diags.Err()) + } + if _, diags := ctx.Apply(plan, m); diags.HasErrors() { + t.Fatalf("destroy apply errors: %s", diags.Err()) + } +} + +// Destroying properly requires pruning out all unneeded config nodes to +// prevent incorrect expansion evaluation. +func TestContext2Apply_destroyInterModuleExpansion(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +data "test_data_source" "a" { + for_each = { + one = "thing" + } +} + +locals { + module_input = { + for k, v in data.test_data_source.a : k => v.id + } +} + +module "mod1" { + source = "./mod" + input = local.module_input +} + +module "mod2" { + source = "./mod" + input = module.mod1.outputs +} + +resource "test_instance" "bar" { + for_each = module.mod2.outputs +} + +output "module_output" { + value = module.mod2.outputs +} +output "test_instances" { + value = test_instance.bar +} +`, + "mod/main.tf": ` +variable "input" { +} + +data "test_data_source" "foo" { + for_each = var.input +} + +output "outputs" { + value = data.test_data_source.foo +} +`}) + + p := testProvider("test") + p.PlanResourceChangeFn = testDiffFn + p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + return providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("data_source"), + "foo": cty.StringVal("output"), + }), + } + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("apply errors: %s", diags.Err()) + } + + destroy := func() { + ctx = testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags = ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + }) + assertNoErrors(t, diags) + + state, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("destroy apply errors: %s", diags.Err()) + } + } + + destroy() + // Destroying again from the empty state should not cause any errors either + destroy() +} + +func TestContext2Apply_createBeforeDestroyWithModule(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +variable "v" {} + +module "mod" { + source = "./mod" + in = var.v +} + +resource "test_resource" "a" { + value = var.v + depends_on = [module.mod] + lifecycle { + create_before_destroy = true + } +} +`, + "mod/main.tf": ` +variable "in" {} + +resource "test_resource" "a" { + value = var.in +} +`}) + + p := testProvider("test") + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + // this is a destroy plan + if req.ProposedNewState.IsNull() { + resp.PlannedState = req.ProposedNewState + resp.PlannedPrivate = req.PriorPrivate + return resp + } + + proposed := req.ProposedNewState.AsValueMap() + proposed["id"] = cty.UnknownVal(cty.String) + + resp.PlannedState = cty.ObjectVal(proposed) + resp.RequiresReplace = []cty.Path{{cty.GetAttrStep{Name: "value"}}} + return resp + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "v": &InputValue{ + Value: cty.StringVal("A"), + }, + }, + }) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("apply errors: %s", diags.Err()) + } + + ctx = testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags = ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "v": &InputValue{ + Value: cty.StringVal("B"), + }, + }, + }) + assertNoErrors(t, diags) + + _, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("apply errors: %s", diags.Err()) + } +} + +func TestContext2Apply_forcedCBD(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +variable "v" {} + +resource "test_instance" "a" { + require_new = var.v +} + +resource "test_instance" "b" { + depends_on = [test_instance.a] + lifecycle { + create_before_destroy = true + } +} +`}) + + p := testProvider("test") + p.PlanResourceChangeFn = testDiffFn + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "v": &InputValue{ + Value: cty.StringVal("A"), + }, + }, + }) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("apply errors: %s", diags.Err()) + } + + ctx = testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags = ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "v": &InputValue{ + Value: cty.StringVal("B"), + }, + }, + }) + assertNoErrors(t, diags) + + _, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("apply errors: %s", diags.Err()) + } +} + +func TestContext2Apply_removeReferencedResource(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +variable "ct" { +} + +resource "test_resource" "to_remove" { + count = var.ct +} + +resource "test_resource" "c" { + value = join("", test_resource.to_remove[*].id) +} +`}) + + p := testProvider("test") + p.PlanResourceChangeFn = testDiffFn + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "ct": &InputValue{ + Value: cty.NumberIntVal(1), + }, + }, + }) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("apply errors: %s", diags.Err()) + } + + ctx = testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags = ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "ct": &InputValue{ + Value: cty.NumberIntVal(0), + }, + }, + }) + assertNoErrors(t, diags) + + _, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("apply errors: %s", diags.Err()) + } +} + +func TestContext2Apply_variableSensitivity(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +variable "sensitive_var" { + default = "foo" + sensitive = true +} + +variable "sensitive_id" { + default = "secret id" + sensitive = true +} + +resource "test_resource" "foo" { + value = var.sensitive_var + + network_interface { + network_interface_id = var.sensitive_id + } +}`, + }) + + p := new(MockProvider) + p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { + return providers.ReadResourceResponse{NewState: req.PriorState} + } + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + Provider: &configschema.Block{}, + ResourceTypes: map[string]*configschema.Block{ + "test_resource": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "value": { + Type: cty.String, + Optional: true, + Computed: true, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "network_interface": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "network_interface_id": {Type: cty.String, Optional: true}, + "device_index": {Type: cty.Number, Optional: true}, + }, + }, + Nesting: configschema.NestingSet, + }, + }, + }, + }, + }) + p.PlanResourceChangeFn = testDiffFn + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("apply errors: %s", diags.Err()) + } + + // Run a second apply with no changes + ctx = testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags = ctx.Plan(m, state, SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) + assertNoErrors(t, diags) + + state, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("apply errors: %s", diags.Err()) + } + + // Now change the variable value for sensitive_var + ctx = testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags = ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "sensitive_id": &InputValue{Value: cty.NilVal}, + "sensitive_var": &InputValue{ + Value: cty.StringVal("bar"), + }, + }, + }) + assertNoErrors(t, diags) + + _, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("apply errors: %s", diags.Err()) + } +} + +func TestContext2Apply_variableSensitivityPropagation(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +variable "sensitive_map" { + type = map(string) + default = { + "x" = "foo" + } + sensitive = true +} + +resource "test_resource" "foo" { + value = var.sensitive_map.x +} +`, + }) + + p := testProvider("test") + p.PlanResourceChangeFn = testDiffFn + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) + if diags.HasErrors() { + t.Fatalf("plan errors: %s", diags.Err()) + } + + wantedAttrPaths := []cty.Path{cty.GetAttrPath("sensitive_value"), cty.GetAttrPath("value")} + + isAWantedAttrPath := func(p cty.Path) bool { + for _, wanted := range wantedAttrPaths { + if p.Equals(wanted) { + return true + } + } + return false + } + + verifySensitiveValues := func(pvms []cty.PathValueMarks) { + if len(pvms) != len(wantedAttrPaths) { + t.Fatalf("expected %d sensitive paths, got %d", len(wantedAttrPaths), len(pvms)) + } + + for _, pvm := range pvms { + if !isAWantedAttrPath(pvm.Path) { + t.Errorf("unexpected path\n got: %#v\n", pvm.Path) + } + if !pvm.Marks.Equal(cty.NewValueMarks(marks.Sensitive)) { + t.Errorf("wrong marks\n got: %#v\nwant: %#v", pvm.Marks, cty.NewValueMarks(marks.Sensitive)) + } + } + } + + addr := mustResourceInstanceAddr("test_resource.foo") + fooChangeSrc := plan.Changes.ResourceInstance(addr) + verifySensitiveValues(fooChangeSrc.AfterValMarks) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("apply errors: %s", diags.Err()) + } + + fooState := state.ResourceInstance(addr) + verifySensitiveValues(fooState.Current.AttrSensitivePaths) +} + +func TestContext2Apply_variableSensitivityProviders(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_resource" "foo" { + sensitive_value = "should get marked" +} + +resource "test_resource" "bar" { + value = test_resource.foo.sensitive_value + random = test_resource.foo.id # not sensitive + + nesting_single { + value = "abc" + sensitive_value = "xyz" + } +} + +resource "test_resource" "baz" { + value = test_resource.bar.nesting_single.sensitive_value +} +`, + }) + + p := testProvider("test") + p.PlanResourceChangeFn = testDiffFn + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("plan errors: %s", diags.Err()) + } + + isAWantedAttrPath := func(p cty.Path, wantedAttrPaths []cty.Path) bool { + for _, wanted := range wantedAttrPaths { + if p.Equals(wanted) { + return true + } + } + return false + } + + verifySensitiveValues := func(pvms []cty.PathValueMarks, wantedAttrPaths []cty.Path) { + if len(pvms) != len(wantedAttrPaths) { + t.Fatalf("expected %d sensitive paths, got %d", len(wantedAttrPaths), len(pvms)) + } + + for _, pvm := range pvms { + if !isAWantedAttrPath(pvm.Path, wantedAttrPaths) { + t.Errorf("unexpected path\n got: %#v\n", pvm.Path) + } + if !pvm.Marks.Equal(cty.NewValueMarks(marks.Sensitive)) { + t.Errorf("wrong marks\n got: %#v\nwant: %#v", pvm.Marks, cty.NewValueMarks(marks.Sensitive)) + } + } + } + + wantedBarPaths := []cty.Path{ + { + cty.GetAttrStep{Name: "nesting_single"}, + cty.GetAttrStep{Name: "sensitive_value"}, + }, + cty.GetAttrPath("sensitive_value"), + cty.GetAttrPath("value"), + } + + wantedBazPaths := []cty.Path{ + cty.GetAttrPath("sensitive_value"), + cty.GetAttrPath("value"), + } + + // Sensitive attributes (defined by the provider) are marked + // as sensitive when referenced from another resource + // "bar" references sensitive resources in "foo" + barAddr := mustResourceInstanceAddr("test_resource.bar") + barChangeSrc := plan.Changes.ResourceInstance(barAddr) + verifySensitiveValues(barChangeSrc.AfterValMarks, wantedBarPaths) + + bazAddr := mustResourceInstanceAddr("test_resource.baz") + bazChangeSrc := plan.Changes.ResourceInstance(bazAddr) + verifySensitiveValues(bazChangeSrc.AfterValMarks, wantedBazPaths) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("apply errors: %s", diags.Err()) + } + + barState := state.ResourceInstance(barAddr) + verifySensitiveValues(barState.Current.AttrSensitivePaths, wantedBarPaths) + + bazState := state.ResourceInstance(bazAddr) + verifySensitiveValues(bazState.Current.AttrSensitivePaths, wantedBazPaths) +} + +func TestContext2Apply_variableSensitivityChange(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +variable "sensitive_var" { + default = "hello" + sensitive = true +} + +resource "test_resource" "foo" { + value = var.sensitive_var +}`, + }) + + p := testProvider("test") + p.PlanResourceChangeFn = testDiffFn + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo", "value":"hello"}`), + // No AttrSensitivePaths present + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + + plan, diags := ctx.Plan(m, state, SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) + assertNoErrors(t, diags) + + addr := mustResourceInstanceAddr("test_resource.foo") + + state, diags = ctx.Apply(plan, m) + assertNoErrors(t, diags) + + fooState := state.ResourceInstance(addr) + + wantedPathValueMarks := []cty.PathValueMarks{ + { + Path: cty.GetAttrPath("sensitive_value"), + Marks: cty.NewValueMarks(marks.Sensitive), + }, + { + Path: cty.GetAttrPath("value"), + Marks: cty.NewValueMarks(marks.Sensitive), + }, + } + + if len(fooState.Current.AttrSensitivePaths) != len(wantedPathValueMarks) { + t.Fatalf("wrong number of sensitive paths, expected %d, got, %d", len(wantedPathValueMarks), len(fooState.Current.AttrSensitivePaths)) + } + + for _, path := range fooState.Current.AttrSensitivePaths { + found := false + for _, wanted := range wantedPathValueMarks { + if path.Path.Equals(wanted.Path) { + found = true + if !path.Marks.Equal(wanted.Marks) { + t.Errorf("wrong marks\n got: %#v\nwant: %#v", path.Marks, wanted.Marks) + } + } + } + if !found { + t.Errorf("unexpected path\n got: %#v\n", path) + } + } + + newModule := testModuleInline(t, map[string]string{ + "main.tf": ` +variable "sensitive_var" { + default = "hello" + sensitive = false +} + +resource "test_resource" "foo" { + value = var.sensitive_var +}`, + }) + + newCtx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + _, diags = newCtx.Plan(newModule, state, SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) + assertNoErrors(t, diags) + stateWithoutSensitive, diags := newCtx.Apply(plan, newModule) + assertNoErrors(t, diags) + + newFooState := stateWithoutSensitive.ResourceInstance(addr) + + // The sensitive value that was previously applied should still be sensitive, but nothing else + if len(newFooState.Current.AttrSensitivePaths) != 1 { + t.Fatalf( + "wrong number of sensitive paths, expected 1, got, %v\n%s", + len(newFooState.Current.AttrSensitivePaths), + spew.Sdump(newFooState.Current.AttrSensitivePaths), + ) + } + + if !newFooState.Current.AttrSensitivePaths[0].Path.Equals(cty.GetAttrPath("sensitive_value")) { + t.Fatalf("wrong sensitive path, got %v", newFooState.Current.AttrSensitivePaths[0].Path) + } +} + +func TestContext2Apply_moduleVariableOptionalAttributes(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +variable "in" { + type = object({ + required = string + optional = optional(string) + default = optional(bool, true) + nested = optional( + map(object({ + a = optional(string, "foo") + b = optional(number, 5) + })), { + "boop": {} + } + ) + }) +} + +output "out" { + value = var.in +} +`}) + + ctx := testContext2(t, &ContextOpts{}) + + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "in": &InputValue{ + Value: cty.MapVal(map[string]cty.Value{ + "required": cty.StringVal("boop"), + }), + SourceType: ValueFromCaller, + }, + }, + }) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } + + got := state.RootModule().OutputValues["out"].Value + want := cty.ObjectVal(map[string]cty.Value{ + "required": cty.StringVal("boop"), + + // Because "optional" was marked as optional, it got silently filled + // in as a null value of string type rather than returning an error. + "optional": cty.NullVal(cty.String), + + // Similarly, "default" was marked as optional with a default value, + // and since it was omitted should be filled in with that default. + "default": cty.True, + + // Nested is a complex structure which has fully described defaults, + // so again it should be filled with the default structure. + "nested": cty.MapVal(map[string]cty.Value{ + "boop": cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("foo"), + "b": cty.NumberIntVal(5), + }), + }), + }) + if !want.RawEquals(got) { + t.Fatalf("wrong result\ngot: %#v\nwant: %#v", got, want) + } +} + +func TestContext2Apply_moduleVariableOptionalAttributesDefault(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +variable "in" { + type = object({ + required = string + optional = optional(string) + default = optional(bool, true) + }) + default = { + required = "boop" + } +} + +output "out" { + value = var.in +} +`}) + + ctx := testContext2(t, &ContextOpts{}) + + // We don't specify a value for the variable here, relying on its defined + // default. + plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } + + got := state.RootModule().OutputValues["out"].Value + want := cty.ObjectVal(map[string]cty.Value{ + "required": cty.StringVal("boop"), + + // "optional" is not present in the variable default, so it is filled + // with null. + "optional": cty.NullVal(cty.String), + + // Similarly, "default" is not present in the variable default, so its + // value is replaced with the type's specified default. + "default": cty.True, + }) + if !want.RawEquals(got) { + t.Fatalf("wrong result\ngot: %#v\nwant: %#v", got, want) + } +} + +func TestContext2Apply_moduleVariableOptionalAttributesDefaultNull(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +variable "in" { + type = object({ + required = string + optional = optional(string) + default = optional(bool, true) + }) + default = null +} + +# Wrap the input variable in a tuple because a null output value is elided from +# the plan, which prevents us from testing its type. +output "out" { + value = [var.in] +} +`}) + + ctx := testContext2(t, &ContextOpts{}) + + // We don't specify a value for the variable here, relying on its defined + // default. + plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } + + got := state.RootModule().OutputValues["out"].Value + // The null default value should be bound, after type converting to the + // full object type + want := cty.TupleVal([]cty.Value{cty.NullVal(cty.Object(map[string]cty.Type{ + "required": cty.String, + "optional": cty.String, + "default": cty.Bool, + }))}) + if !want.RawEquals(got) { + t.Fatalf("wrong result\ngot: %#v\nwant: %#v", got, want) + } +} + +func TestContext2Apply_moduleVariableOptionalAttributesDefaultChild(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +variable "in" { + type = list(object({ + a = optional(set(string)) + })) + default = [ + { a = [ "foo" ] }, + { }, + ] +} + +module "child" { + source = "./child" + in = var.in +} + +output "out" { + value = module.child.out +} +`, + "child/main.tf": ` +variable "in" { + type = list(object({ + a = optional(set(string), []) + })) + default = [] +} + +output "out" { + value = var.in +} +`, + }) + + ctx := testContext2(t, &ContextOpts{}) + + // We don't specify a value for the variable here, relying on its defined + // default. + plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } + + got := state.RootModule().OutputValues["out"].Value + want := cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "a": cty.SetVal([]cty.Value{cty.StringVal("foo")}), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.SetValEmpty(cty.String), + }), + }) + if !want.RawEquals(got) { + t.Fatalf("wrong result\ngot: %#v\nwant: %#v", got, want) + } +} + +func TestContext2Apply_provisionerSensitive(t *testing.T) { + m := testModule(t, "apply-provisioner-sensitive") + p := testProvider("aws") + + pr := testProvisioner() + pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { + if req.Config.ContainsMarked() { + t.Fatalf("unexpectedly marked config value: %#v", req.Config) + } + command := req.Config.GetAttr("command") + if command.IsMarked() { + t.Fatalf("unexpectedly marked command argument: %#v", command.Marks()) + } + req.UIOutput.Output(fmt.Sprintf("Executing: %q", command.AsString())) + return + } + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn + + h := new(MockHook) + ctx := testContext2(t, &ContextOpts{ + Hooks: []Hook{h}, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + Provisioners: map[string]provisioners.Factory{ + "shell": testProvisionerFuncFixed(pr), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "password": &InputValue{ + Value: cty.StringVal("secret"), + SourceType: ValueFromCaller, + }, + }, + }) + assertNoErrors(t, diags) + + // "restart" provisioner + pr.CloseCalled = false + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + logDiagnostics(t, diags) + t.Fatal("apply failed") + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testTofuApplyProvisionerSensitiveStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } + + // Verify apply was invoked + if !pr.ProvisionResourceCalled { + t.Fatalf("provisioner was not called on apply") + } + + // Verify output was suppressed + if !h.ProvisionOutputCalled { + t.Fatalf("ProvisionOutput hook not called") + } + if got, doNotWant := h.ProvisionOutputMessage, "secret"; strings.Contains(got, doNotWant) { + t.Errorf("sensitive value %q included in output:\n%s", doNotWant, got) + } + if got, want := h.ProvisionOutputMessage, "output suppressed"; !strings.Contains(got, want) { + t.Errorf("expected hook to be called with %q, but was:\n%s", want, got) + } +} + +func TestContext2Apply_warnings(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_resource" "foo" { +}`, + }) + + p := testProvider("test") + p.PlanResourceChangeFn = testDiffFn + + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + resp := testApplyFn(req) + + resp.Diagnostics = resp.Diagnostics.Append(tfdiags.SimpleWarning("warning")) + return resp + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + state, diags := ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } + + inst := state.ResourceInstance(mustResourceInstanceAddr("test_resource.foo")) + if inst == nil { + t.Fatal("missing 'test_resource.foo' in state:", state) + } +} + +func TestContext2Apply_rpcDiagnostics(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_instance" "a" { +} +`, + }) + + p := testProvider("test") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + resp = testApplyFn(req) + resp.Diagnostics = resp.Diagnostics.Append(tfdiags.SimpleWarning("don't frobble")) + return resp + } + + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_instance": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + }, + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + _, diags = ctx.Apply(plan, m) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + if len(diags) == 0 { + t.Fatal("expected warnings") + } + + for _, d := range diags { + des := d.Description().Summary + if !strings.Contains(des, "frobble") { + t.Fatalf(`expected frobble, got %q`, des) + } + } +} + +func TestContext2Apply_dataSensitive(t *testing.T) { + m := testModule(t, "apply-data-sensitive") + p := testProvider("null") + p.PlanResourceChangeFn = testDiffFn + p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + // add the required id + m := req.Config.AsValueMap() + m["id"] = cty.StringVal("foo") + + return providers.ReadDataSourceResponse{ + State: cty.ObjectVal(m), + } + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("null"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) + if diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } else { + t.Logf(legacyDiffComparisonString(plan.Changes)) + } + + state, diags := ctx.Apply(plan, m) + assertNoErrors(t, diags) + + addr := mustResourceInstanceAddr("data.null_data_source.testing") + + dataSourceState := state.ResourceInstance(addr) + pvms := dataSourceState.Current.AttrSensitivePaths + if len(pvms) != 1 { + t.Fatalf("expected 1 sensitive path, got %d", len(pvms)) + } + pvm := pvms[0] + if gotPath, wantPath := pvm.Path, cty.GetAttrPath("foo"); !gotPath.Equals(wantPath) { + t.Errorf("wrong path\n got: %#v\nwant: %#v", gotPath, wantPath) + } + if gotMarks, wantMarks := pvm.Marks, cty.NewValueMarks(marks.Sensitive); !gotMarks.Equal(wantMarks) { + t.Errorf("wrong marks\n got: %#v\nwant: %#v", gotMarks, wantMarks) + } +} + +func TestContext2Apply_errorRestorePrivateData(t *testing.T) { + // empty config to remove our resource + m := testModuleInline(t, map[string]string{ + "main.tf": "", + }) + + p := simpleMockProvider() + p.ApplyResourceChangeResponse = &providers.ApplyResourceChangeResponse{ + // we error during apply, which will trigger core to preserve the last + // known state, including private data + Diagnostics: tfdiags.Diagnostics(nil).Append(errors.New("oops")), + } + + addr := mustResourceInstanceAddr("test_object.a") + + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent(addr, &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo"}`), + Private: []byte("private"), + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`)) + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + state, _ = ctx.Apply(plan, m) + if string(state.ResourceInstance(addr).Current.Private) != "private" { + t.Fatal("missing private data in state") + } +} + +func TestContext2Apply_errorRestoreStatus(t *testing.T) { + // empty config to remove our resource + m := testModuleInline(t, map[string]string{ + "main.tf": "", + }) + + p := simpleMockProvider() + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + // We error during apply, but return the current object state. + resp.Diagnostics = resp.Diagnostics.Append(errors.New("oops")) + // return a warning too to make sure it isn't dropped + resp.Diagnostics = resp.Diagnostics.Append(tfdiags.SimpleWarning("warned")) + resp.NewState = req.PriorState + resp.Private = req.PlannedPrivate + return resp + } + + addr := mustResourceInstanceAddr("test_object.a") + + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent(addr, &states.ResourceInstanceObjectSrc{ + Status: states.ObjectTainted, + AttrsJSON: []byte(`{"test_string":"foo"}`), + Private: []byte("private"), + Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("test_object.b")}, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`)) + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + state, diags = ctx.Apply(plan, m) + + errString := diags.ErrWithWarnings().Error() + if !strings.Contains(errString, "oops") || !strings.Contains(errString, "warned") { + t.Fatalf("error missing expected info: %q", errString) + } + + if len(diags) != 2 { + t.Fatalf("expected 1 error and 1 warning, got: %q", errString) + } + + res := state.ResourceInstance(addr) + if res == nil { + t.Fatal("resource was removed from state") + } + + if res.Current.Status != states.ObjectTainted { + t.Fatal("resource should still be tainted in the state") + } + + if len(res.Current.Dependencies) != 1 || !res.Current.Dependencies[0].Equal(mustConfigResourceAddr("test_object.b")) { + t.Fatalf("incorrect dependencies, got %q", res.Current.Dependencies) + } + + if string(res.Current.Private) != "private" { + t.Fatalf("incorrect private data, got %q", res.Current.Private) + } +} + +func TestContext2Apply_nonConformingResponse(t *testing.T) { + // empty config to remove our resource + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_object" "a" { + test_string = "x" +} +`, + }) + + p := simpleMockProvider() + respDiags := tfdiags.Diagnostics(nil).Append(tfdiags.SimpleWarning("warned")) + respDiags = respDiags.Append(errors.New("oops")) + p.ApplyResourceChangeResponse = &providers.ApplyResourceChangeResponse{ + // Don't lose these diagnostics + Diagnostics: respDiags, + // This state is missing required attributes, and should produce an error + NewState: cty.ObjectVal(map[string]cty.Value{ + "test_string": cty.StringVal("x"), + }), + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + _, diags = ctx.Apply(plan, m) + errString := diags.ErrWithWarnings().Error() + if !strings.Contains(errString, "oops") || !strings.Contains(errString, "warned") { + t.Fatalf("error missing expected info: %q", errString) + } + + // we should have more than the ones returned from the provider, and they + // should not be coalesced into a single value + if len(diags) < 3 { + t.Fatalf("incorrect diagnostics, got %d values with %s", len(diags), diags.ErrWithWarnings()) + } +} + +func TestContext2Apply_nilResponse(t *testing.T) { + // empty config to remove our resource + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_object" "a" { +} +`, + }) + + p := simpleMockProvider() + p.ApplyResourceChangeResponse = &providers.ApplyResourceChangeResponse{} + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + _, diags = ctx.Apply(plan, m) + if !diags.HasErrors() { + t.Fatal("expected and error") + } + + errString := diags.ErrWithWarnings().Error() + if !strings.Contains(errString, "invalid nil value") { + t.Fatalf("error missing expected info: %q", errString) + } +} + +//////////////////////////////////////////////////////////////////////////////// +// NOTE: Due to the size of this file, new tests should be added to +// context_apply2_test.go. +//////////////////////////////////////////////////////////////////////////////// diff --git a/pkg/tofu/context_eval.go b/pkg/tofu/context_eval.go new file mode 100644 index 00000000000..9c9c5043f70 --- /dev/null +++ b/pkg/tofu/context_eval.go @@ -0,0 +1,101 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "log" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/lang" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +type EvalOpts struct { + SetVariables InputValues +} + +// Eval produces a scope in which expressions can be evaluated for +// the given module path. +// +// This method must first evaluate any ephemeral values (input variables, local +// values, and output values) in the configuration. These ephemeral values are +// not included in the persisted state, so they must be re-computed using other +// values in the state before they can be properly evaluated. The updated +// values are retained in the main state associated with the receiving context. +// +// This function takes no action against remote APIs but it does need access +// to all provider and provisioner instances in order to obtain their schemas +// for type checking. +// +// The result is an evaluation scope that can be used to resolve references +// against the root module. If the returned diagnostics contains errors then +// the returned scope may be nil. If it is not nil then it may still be used +// to attempt expression evaluation or other analysis, but some expressions +// may not behave as expected. +func (c *Context) Eval(config *configs.Config, state *states.State, moduleAddr addrs.ModuleInstance, opts *EvalOpts) (*lang.Scope, tfdiags.Diagnostics) { + // This is intended for external callers such as the "tofu console" + // command. Internally, we create an evaluator in c.walk before walking + // the graph, and create scopes in ContextGraphWalker. + + var diags tfdiags.Diagnostics + defer c.acquireRun("eval")() + + // Start with a copy of state so that we don't affect the instance that + // the caller is holding. + state = state.DeepCopy() + var walker *ContextGraphWalker + + variables := opts.SetVariables + + // By the time we get here, we should have values defined for all of + // the root module variables, even if some of them are "unknown". It's the + // caller's responsibility to have already handled the decoding of these + // from the various ways the CLI allows them to be set and to produce + // user-friendly error messages if they are not all present, and so + // the error message from checkInputVariables should never be seen and + // includes language asking the user to report a bug. + varDiags := checkInputVariables(config.Module.Variables, variables) + diags = diags.Append(varDiags) + + log.Printf("[DEBUG] Building and walking 'eval' graph") + + graph, moreDiags := (&EvalGraphBuilder{ + Config: config, + State: state, + RootVariableValues: variables, + Plugins: c.plugins, + }).Build(addrs.RootModuleInstance) + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + return nil, diags + } + + walkOpts := &graphWalkOpts{ + InputState: state, + Config: config, + } + + walker, moreDiags = c.walk(graph, walkEval, walkOpts) + diags = diags.Append(moreDiags) + if walker != nil { + diags = diags.Append(walker.NonFatalDiagnostics) + } else { + // If we skipped walking the graph (due to errors) then we'll just + // use a placeholder graph walker here, which'll refer to the + // unmodified state. + walker = c.graphWalker(walkEval, walkOpts) + } + + // This is a bit weird since we don't normally evaluate outside of + // the context of a walk, but we'll "re-enter" our desired path here + // just to get hold of an EvalContext for it. ContextGraphWalker + // caches its contexts, so we should get hold of the context that was + // previously used for evaluation here, unless we skipped walking. + evalCtx := walker.EnterPath(moduleAddr) + return evalCtx.EvaluationScope(nil, nil, EvalDataForNoInstanceKey), diags +} diff --git a/pkg/tofu/context_eval_test.go b/pkg/tofu/context_eval_test.go new file mode 100644 index 00000000000..80cd9922b67 --- /dev/null +++ b/pkg/tofu/context_eval_test.go @@ -0,0 +1,135 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "testing" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" + "github.com/zclconf/go-cty/cty" +) + +func TestContextEval(t *testing.T) { + // This test doesn't check the "Want" value for impure funcs, so the value + // on those doesn't matter. + tests := []struct { + Input string + Want cty.Value + ImpureFunc bool + }{ + { // An impure function: allowed in the console, but the result is nondeterministic + `bcrypt("example")`, + cty.NilVal, + true, + }, + { + `keys(var.map)`, + cty.ListVal([]cty.Value{ + cty.StringVal("foo"), + cty.StringVal("baz"), + }), + true, + }, + { + `local.result`, + cty.NumberIntVal(6), + false, + }, + { + `module.child.result`, + cty.NumberIntVal(6), + false, + }, + } + + // This module has a little bit of everything (and if it is missing somehitng, add to it): + // resources, variables, locals, modules, output + m := testModule(t, "eval-context-basic") + p := testProvider("test") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + scope, diags := ctx.Eval(m, states.NewState(), addrs.RootModuleInstance, &EvalOpts{ + SetVariables: testInputValuesUnset(m.Module.Variables), + }) + if diags.HasErrors() { + t.Fatalf("Eval errors: %s", diags.Err()) + } + + // Since we're testing 'eval' (used by tofu console), impure functions + // should be allowed by the scope. + if scope.PureOnly == true { + t.Fatal("wrong result: eval should allow impure funcs") + } + + for _, test := range tests { + t.Run(test.Input, func(t *testing.T) { + // Parse the test input as an expression + expr, _ := hclsyntax.ParseExpression([]byte(test.Input), "", hcl.Pos{Line: 1, Column: 1}) + got, diags := scope.EvalExpr(expr, cty.DynamicPseudoType) + + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err()) + } + + if !test.ImpureFunc { + if !got.RawEquals(test.Want) { + t.Fatalf("wrong result: want %#v, got %#v", test.Want, got) + } + } + }) + } +} + +// ensure that we can execute a console when outputs have preconditions +func TestContextEval_outputsWithPreconditions(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +module "mod" { + source = "./mod" + input = "ok" +} + +output "out" { + value = module.mod.out +} +`, + + "./mod/main.tf": ` +variable "input" { + type = string +} + +output "out" { + value = var.input + + precondition { + condition = var.input != "" + error_message = "error" + } +} +`, + }) + + p := simpleMockProvider() + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Eval(m, states.NewState(), addrs.RootModuleInstance, &EvalOpts{ + SetVariables: testInputValuesUnset(m.Module.Variables), + }) + assertNoErrors(t, diags) +} diff --git a/pkg/tofu/context_fixtures_test.go b/pkg/tofu/context_fixtures_test.go new file mode 100644 index 00000000000..ee5b64ec4c3 --- /dev/null +++ b/pkg/tofu/context_fixtures_test.go @@ -0,0 +1,90 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "testing" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/provisioners" + "github.com/zclconf/go-cty/cty" +) + +// contextTestFixture is a container for a set of objects that work together +// to create a base testing scenario. This is used to represent some common +// situations used as the basis for multiple tests. +type contextTestFixture struct { + Config *configs.Config + Providers map[addrs.Provider]providers.Factory + Provisioners map[string]provisioners.Factory +} + +// ContextOpts returns a ContextOps pre-populated with the elements of this +// fixture. Each call returns a distinct object, so callers can apply further +// _shallow_ modifications to the options as needed. +func (f *contextTestFixture) ContextOpts() *ContextOpts { + return &ContextOpts{ + Providers: f.Providers, + Provisioners: f.Provisioners, + } +} + +// contextFixtureApplyVars builds and returns a test fixture for testing +// input variables, primarily during the apply phase. The configuration is +// loaded from testdata/apply-vars, and the provider resolver is +// configured with a resource type schema for aws_instance that matches +// what's used in that configuration. +func contextFixtureApplyVars(t *testing.T) *contextTestFixture { + c := testModule(t, "apply-vars") + p := mockProviderWithResourceTypeSchema("aws_instance", &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "foo": {Type: cty.String, Optional: true}, + "bar": {Type: cty.String, Optional: true}, + "baz": {Type: cty.String, Optional: true}, + "num": {Type: cty.Number, Optional: true}, + "list": {Type: cty.List(cty.String), Optional: true}, + "map": {Type: cty.Map(cty.String), Optional: true}, + }, + }) + p.ApplyResourceChangeFn = testApplyFn + p.PlanResourceChangeFn = testDiffFn + return &contextTestFixture{ + Config: c, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + } +} + +// contextFixtureApplyVarsEnv builds and returns a test fixture for testing +// input variables set from the environment. The configuration is +// loaded from testdata/apply-vars-env, and the provider resolver is +// configured with a resource type schema for aws_instance that matches +// what's used in that configuration. +func contextFixtureApplyVarsEnv(t *testing.T) *contextTestFixture { + c := testModule(t, "apply-vars-env") + p := mockProviderWithResourceTypeSchema("aws_instance", &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "string": {Type: cty.String, Optional: true}, + "list": {Type: cty.List(cty.String), Optional: true}, + "map": {Type: cty.Map(cty.String), Optional: true}, + "id": {Type: cty.String, Computed: true}, + "type": {Type: cty.String, Computed: true}, + }, + }) + p.ApplyResourceChangeFn = testApplyFn + p.PlanResourceChangeFn = testDiffFn + return &contextTestFixture{ + Config: c, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + } +} diff --git a/pkg/tofu/context_functions.go b/pkg/tofu/context_functions.go new file mode 100644 index 00000000000..4b27a984086 --- /dev/null +++ b/pkg/tofu/context_functions.go @@ -0,0 +1,185 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "errors" + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// This builds a provider function using an EvalContext and some additional information +// This is split out of BuiltinEvalContext for testing +func evalContextProviderFunction(providers func(addrs.AbsProviderConfig) providers.Interface, mc *configs.Config, op walkOperation, pf addrs.ProviderFunction, rng tfdiags.SourceRange) (*function.Function, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + pr, ok := mc.Module.ProviderRequirements.RequiredProviders[pf.ProviderName] + if !ok { + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unknown function provider", + Detail: fmt.Sprintf("Provider %q does not exist within the required_providers of this module", pf.ProviderName), + Subject: rng.ToHCL().Ptr(), + }) + } + + // Very similar to transform_provider.go + absPc := addrs.AbsProviderConfig{ + Provider: pr.Type, + Module: mc.Path, + Alias: pf.ProviderAlias, + } + + provider := providers(absPc) + + if provider == nil { + // Configured provider (NodeApplyableProvider) not required via transform_provider.go. Instead we should use the unconfigured instance (NodeEvalableProvider) in the root. + + // Make sure the alias is valid + validAlias := pf.ProviderAlias == "" + if !validAlias { + for _, alias := range pr.Aliases { + if alias.Alias == pf.ProviderAlias { + validAlias = true + break + } + } + if !validAlias { + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unknown function provider", + Detail: fmt.Sprintf("No provider instance %q with alias %q", pf.ProviderName, pf.ProviderAlias), + Subject: rng.ToHCL().Ptr(), + }) + } + } + + provider = providers(addrs.AbsProviderConfig{Provider: pr.Type}) + if provider == nil { + // This should not be possible + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "BUG: Uninitialized function provider", + Detail: fmt.Sprintf("Provider %q has not yet been initialized", absPc.String()), + Subject: rng.ToHCL().Ptr(), + }) + } + } + + // First try to look up the function from provider schema + schema := provider.GetProviderSchema() + if schema.Diagnostics.HasErrors() { + return nil, schema.Diagnostics + } + spec, ok := schema.Functions[pf.Function] + if !ok { + // During the validate operation, providers are not configured and therefore won't provide + // a comprehensive GetFunctions list + // Validate is built around unknown values already, we can stub in a placeholder + if op == walkValidate { + // Configured provider functions are not available during validate + fn := function.New(&function.Spec{ + Description: "Validate Placeholder", + VarParam: &function.Parameter{ + Type: cty.DynamicPseudoType, + AllowNull: true, + AllowUnknown: true, + AllowDynamicType: true, + AllowMarked: false, + }, + Type: function.StaticReturnType(cty.DynamicPseudoType), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.UnknownVal(cty.DynamicPseudoType), nil + }, + }) + return &fn, nil + } + + // The provider may be configured and present additional functions via GetFunctions + specs := provider.GetFunctions() + if specs.Diagnostics.HasErrors() { + return nil, specs.Diagnostics + } + + // If the function isn't in the custom GetFunctions list, it must be undefined + spec, ok = specs.Functions[pf.Function] + if !ok { + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Function not found in provider", + Detail: fmt.Sprintf("Function %q was not registered by provider %q", pf.Function, absPc.String()), + Subject: rng.ToHCL().Ptr(), + }) + } + } + + fn := providerFunction(pf.Function, spec, provider) + + return &fn, nil + +} + +// Turn a provider function spec into a cty callable function +// This will use the instance factory to get a provider to support the +// function call. +func providerFunction(name string, spec providers.FunctionSpec, provider providers.Interface) function.Function { + params := make([]function.Parameter, len(spec.Parameters)) + for i, param := range spec.Parameters { + params[i] = providerFunctionParameter(param) + } + + var varParam *function.Parameter + if spec.VariadicParameter != nil { + value := providerFunctionParameter(*spec.VariadicParameter) + varParam = &value + } + + impl := func(args []cty.Value, retType cty.Type) (cty.Value, error) { + resp := provider.CallFunction(providers.CallFunctionRequest{ + Name: name, + Arguments: args, + }) + + if argError, ok := resp.Error.(*providers.CallFunctionArgumentError); ok { + // Convert ArgumentError to cty error + return resp.Result, function.NewArgError(argError.FunctionArgument, errors.New(argError.Text)) + } + + return resp.Result, resp.Error + } + + return function.New(&function.Spec{ + Description: spec.Summary, + Params: params, + VarParam: varParam, + Type: function.StaticReturnType(spec.Return), + Impl: impl, + }) + +} + +// Simple mapping of function parameter spec to function parameter +func providerFunctionParameter(spec providers.FunctionParameterSpec) function.Parameter { + return function.Parameter{ + Name: spec.Name, + Description: spec.Description, + Type: spec.Type, + AllowNull: spec.AllowNullValue, + AllowUnknown: spec.AllowUnknownValues, + // I don't believe this is allowable for provider functions + AllowDynamicType: false, + // force cty to strip marks ahead of time and re-add them to the resulting object + // GRPC: failed: value has marks, so it cannot be serialized. + AllowMarked: false, + } +} diff --git a/pkg/tofu/context_functions_test.go b/pkg/tofu/context_functions_test.go new file mode 100644 index 00000000000..67e69586686 --- /dev/null +++ b/pkg/tofu/context_functions_test.go @@ -0,0 +1,318 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "strings" + "testing" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +func TestFunctions(t *testing.T) { + mockProvider := &MockProvider{ + GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{}, + Functions: map[string]providers.FunctionSpec{ + "echo": providers.FunctionSpec{ + Parameters: []providers.FunctionParameterSpec{providers.FunctionParameterSpec{ + Name: "input", + Type: cty.String, + AllowNullValue: false, + AllowUnknownValues: false, + }}, + Return: cty.String, + }, + "concat": providers.FunctionSpec{ + Parameters: []providers.FunctionParameterSpec{providers.FunctionParameterSpec{ + Name: "input", + Type: cty.String, + AllowNullValue: false, + AllowUnknownValues: false, + }}, + VariadicParameter: &providers.FunctionParameterSpec{ + Name: "vary", + Type: cty.String, + AllowNullValue: false, + }, + Return: cty.String, + }, + "coalesce": providers.FunctionSpec{ + Parameters: []providers.FunctionParameterSpec{providers.FunctionParameterSpec{ + Name: "input1", + Type: cty.String, + AllowNullValue: true, + AllowUnknownValues: false, + }, providers.FunctionParameterSpec{ + Name: "input2", + Type: cty.String, + AllowNullValue: false, + AllowUnknownValues: false, + }}, + Return: cty.String, + }, + "unknown_param": providers.FunctionSpec{ + Parameters: []providers.FunctionParameterSpec{providers.FunctionParameterSpec{ + Name: "input", + Type: cty.String, + AllowNullValue: false, + AllowUnknownValues: true, + }}, + Return: cty.String, + }, + "error_param": providers.FunctionSpec{ + Parameters: []providers.FunctionParameterSpec{providers.FunctionParameterSpec{ + Name: "input", + Type: cty.String, + AllowNullValue: false, + AllowUnknownValues: false, + }}, + Return: cty.String, + }, + }, + }, + } + + mockProvider.CallFunctionFn = func(req providers.CallFunctionRequest) (resp providers.CallFunctionResponse) { + switch req.Name { + case "echo": + resp.Result = req.Arguments[0] + case "concat": + str := "" + for _, arg := range req.Arguments { + str += arg.AsString() + } + resp.Result = cty.StringVal(str) + case "coalesce": + resp.Result = req.Arguments[0] + if resp.Result.IsNull() { + resp.Result = req.Arguments[1] + } + case "unknown_param": + resp.Result = cty.StringVal("knownvalue") + case "error_param": + resp.Error = &providers.CallFunctionArgumentError{ + Text: "my error text", + FunctionArgument: 0, + } + default: + panic("Invalid function") + } + return resp + } + + mockProvider.GetFunctionsFn = func() (resp providers.GetFunctionsResponse) { + resp.Functions = mockProvider.GetProviderSchemaResponse.Functions + return resp + } + + addr := addrs.NewDefaultProvider("mock") + rng := tfdiags.SourceRange{} + providerFunc := func(fn string) addrs.ProviderFunction { + pf, _ := addrs.ParseFunction(fn).AsProviderFunction() + return pf + } + + mockCtx := new(MockEvalContext) + cfg := &configs.Config{ + Module: &configs.Module{ + ProviderRequirements: &configs.RequiredProviders{ + RequiredProviders: map[string]*configs.RequiredProvider{ + "mockname": &configs.RequiredProvider{ + Name: "mock", + Type: addr, + }, + }, + }, + }, + } + + // Provider missing + _, diags := evalContextProviderFunction(mockCtx.Provider, cfg, walkValidate, providerFunc("provider::invalid::unknown"), rng) + if !diags.HasErrors() { + t.Fatal("expected unknown function provider") + } + if diags.Err().Error() != `Unknown function provider: Provider "invalid" does not exist within the required_providers of this module` { + t.Fatal(diags.Err()) + } + + // Provider not initialized + _, diags = evalContextProviderFunction(mockCtx.Provider, cfg, walkValidate, providerFunc("provider::mockname::missing"), rng) + if !diags.HasErrors() { + t.Fatal("expected unknown function provider") + } + if diags.Err().Error() != `BUG: Uninitialized function provider: Provider "provider[\"registry.opentofu.org/hashicorp/mock\"]" has not yet been initialized` { + t.Fatal(diags.Err()) + } + + // "initialize" provider + mockCtx.ProviderProvider = mockProvider + + // Function missing (validate) + mockProvider.GetFunctionsCalled = false + _, diags = evalContextProviderFunction(mockCtx.Provider, cfg, walkValidate, providerFunc("provider::mockname::missing"), rng) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + if mockProvider.GetFunctionsCalled { + t.Fatal("expected GetFunctions NOT to be called since it's not initialized") + } + + // Function missing (Non-validate) + mockProvider.GetFunctionsCalled = false + _, diags = evalContextProviderFunction(mockCtx.Provider, cfg, walkPlan, providerFunc("provider::mockname::missing"), rng) + if !diags.HasErrors() { + t.Fatal("expected unknown function") + } + if diags.Err().Error() != `Function not found in provider: Function "missing" was not registered by provider "provider[\"registry.opentofu.org/hashicorp/mock\"]"` { + t.Fatal(diags.Err()) + } + if !mockProvider.GetFunctionsCalled { + t.Fatal("expected GetFunctions to be called") + } + + ctx := &hcl.EvalContext{ + Functions: map[string]function.Function{}, + Variables: map[string]cty.Value{ + "unknown_value": cty.UnknownVal(cty.String), + "sensitive_value": cty.StringVal("sensitive!").Mark(marks.Sensitive), + }, + } + + // Load functions into ctx + for _, fn := range []string{"echo", "concat", "coalesce", "unknown_param", "error_param"} { + pf := providerFunc("provider::mockname::" + fn) + impl, diags := evalContextProviderFunction(mockCtx.Provider, cfg, walkPlan, pf, rng) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + ctx.Functions[pf.String()] = *impl + } + evaluate := func(exprStr string) (cty.Value, hcl.Diagnostics) { + expr, diags := hclsyntax.ParseExpression([]byte(exprStr), "exprtest", hcl.InitialPos) + if diags.HasErrors() { + t.Fatal(diags) + } + return expr.Value(ctx) + } + + t.Run("echo function", func(t *testing.T) { + // These are all assumptions that the provider implementation should not have to worry about: + + t.Log("Checking not enough arguments") + _, diags := evaluate("provider::mockname::echo()") + if !strings.Contains(diags.Error(), `Not enough function arguments; Function "provider::mockname::echo" expects 1 argument(s). Missing value for "input"`) { + t.Error(diags.Error()) + } + + t.Log("Checking too many arguments") + _, diags = evaluate(`provider::mockname::echo("1", "2", "3")`) + if !strings.Contains(diags.Error(), `Too many function arguments; Function "provider::mockname::echo" expects only 1 argument(s)`) { + t.Error(diags.Error()) + } + + t.Log("Checking null argument") + _, diags = evaluate(`provider::mockname::echo(null)`) + if !strings.Contains(diags.Error(), `Invalid function argument; Invalid value for "input" parameter: argument must not be null`) { + t.Error(diags.Error()) + } + + t.Log("Checking unknown argument") + val, diags := evaluate(`provider::mockname::echo(unknown_value)`) + if diags.HasErrors() { + t.Error(diags.Error()) + } + if !val.RawEquals(cty.UnknownVal(cty.String)) { + t.Error(val.AsString()) + } + + // Actually test the function implementation + + t.Log("Checking valid argument") + + val, diags = evaluate(`provider::mockname::echo("hello functions!")`) + if diags.HasErrors() { + t.Error(diags.Error()) + } + if !val.RawEquals(cty.StringVal("hello functions!")) { + t.Error(val.AsString()) + } + + t.Log("Checking sensitive argument") + + val, diags = evaluate(`provider::mockname::echo(sensitive_value)`) + if diags.HasErrors() { + t.Error(diags.Error()) + } + if !val.RawEquals(cty.StringVal("sensitive!").Mark(marks.Sensitive)) { + t.Error(val.AsString()) + } + }) + + t.Run("concat function", func(t *testing.T) { + // Make sure varargs are handled properly + + // Single + val, diags := evaluate(`provider::mockname::concat("foo")`) + if diags.HasErrors() { + t.Error(diags.Error()) + } + if !val.RawEquals(cty.StringVal("foo")) { + t.Error(val.AsString()) + } + + // Multi + val, diags = evaluate(`provider::mockname::concat("foo", "bar", "baz")`) + if diags.HasErrors() { + t.Error(diags.Error()) + } + if !val.RawEquals(cty.StringVal("foobarbaz")) { + t.Error(val.AsString()) + } + }) + + t.Run("coalesce function", func(t *testing.T) { + val, diags := evaluate(`provider::mockname::coalesce("first", "second")`) + if diags.HasErrors() { + t.Error(diags.Error()) + } + if !val.RawEquals(cty.StringVal("first")) { + t.Error(val.AsString()) + } + + val, diags = evaluate(`provider::mockname::coalesce(null, "second")`) + if diags.HasErrors() { + t.Error(diags.Error()) + } + if !val.RawEquals(cty.StringVal("second")) { + t.Error(val.AsString()) + } + }) + + t.Run("unknown_param function", func(t *testing.T) { + val, diags := evaluate(`provider::mockname::unknown_param(unknown_value)`) + if diags.HasErrors() { + t.Error(diags.Error()) + } + if !val.RawEquals(cty.StringVal("knownvalue")) { + t.Error(val.AsString()) + } + }) + t.Run("error_param function", func(t *testing.T) { + _, diags := evaluate(`provider::mockname::error_param("foo")`) + if !strings.Contains(diags.Error(), `Invalid function argument; Invalid value for "input" parameter: my error text.`) { + t.Error(diags.Error()) + } + }) +} diff --git a/pkg/tofu/context_import.go b/pkg/tofu/context_import.go new file mode 100644 index 00000000000..db31ce5650a --- /dev/null +++ b/pkg/tofu/context_import.go @@ -0,0 +1,288 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "log" + "sync" + + "github.com/hashicorp/hcl/v2" + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/instances" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// ImportOpts are used as the configuration for Import. +type ImportOpts struct { + // Targets are the targets to import + Targets []*ImportTarget + + // SetVariables are the variables set outside of the configuration, + // such as on the command line, in variables files, etc. + SetVariables InputValues +} + +// CommandLineImportTarget is a target that we need to import, that originated from the CLI command +// It represents a single resource that we need to import. +// The resource's ID and Address are fully known when executing the command (unlike when using the `import` block) +type CommandLineImportTarget struct { + // Addr is the address for the resource instance that the new object should + // be imported into. + Addr addrs.AbsResourceInstance + + // ID is the string ID of the resource to import. This is resource-specific. + ID string +} + +// ImportTarget is a target that we need to import. +// It could either represent a single resource or multiple instances of the same resource, if for_each is used +// ImportTarget can be either a result of the import CLI command, or the import block +type ImportTarget struct { + // Config is the original import block for this import. This might be null + // if the import did not originate in config. + // Config is mutually-exclusive with CommandLineImportTarget + Config *configs.Import + + // CommandLineImportTarget is the ImportTarget information in the case of an import target origination for the + // command line. CommandLineImportTarget is mutually-exclusive with Config + *CommandLineImportTarget +} + +// IsFromImportBlock checks whether the import target originates from an `import` block +// Currently, it should yield the opposite result of IsFromImportCommandLine, as those two are mutually-exclusive +func (i *ImportTarget) IsFromImportBlock() bool { + return i.Config != nil +} + +// IsFromImportCommandLine checks whether the import target originates from a `tofu import` command +// Currently, it should yield the opposite result of IsFromImportBlock, as those two are mutually-exclusive +func (i *ImportTarget) IsFromImportCommandLine() bool { + return i.CommandLineImportTarget != nil +} + +// StaticAddr returns the static address part of an import target +// For an ImportTarget originating from the command line, the address is already known +// However for an ImportTarget originating from an import block, the full address might not be known initially, +// and could only be evaluated down the line. Here, we create a static representation for the address. +// This is useful so that we could have information on the ImportTarget early on, such as the Module and Resource of it +func (i *ImportTarget) StaticAddr() addrs.ConfigResource { + if i.IsFromImportCommandLine() { + return i.CommandLineImportTarget.Addr.ConfigResource() + } + + return i.Config.StaticTo +} + +// ResolvedAddr returns a reference to the resolved address of an import target, if possible. If not possible, it +// returns nil. +// For an ImportTarget originating from the command line, the address is already known +// However for an ImportTarget originating from an import block, the full address might not be known initially, +// and could only be evaluated down the line. +func (i *ImportTarget) ResolvedAddr() *addrs.AbsResourceInstance { + if i.IsFromImportCommandLine() { + return &i.CommandLineImportTarget.Addr + } else { + return i.Config.ResolvedTo + } +} + +// ImportResolver is a struct that maintains a map of all imports as they are being resolved. +// This is specifically for imports originating from configuration. +// Import targets' addresses are not fully known from the get-go, and could only be resolved later when walking +// the graph. This struct helps keep track of the resolved imports, mostly for validation that all imports +// have been addressed and point to an actual configuration. +// The key of the map is a string representation of the address, and the value is an EvaluatedConfigImportTarget. +type ImportResolver struct { + mu sync.RWMutex + imports map[string]EvaluatedConfigImportTarget +} + +func NewImportResolver() *ImportResolver { + return &ImportResolver{imports: make(map[string]EvaluatedConfigImportTarget)} +} + +// ExpandAndResolveImport is responsible for two operations: +// 1. Expands the ImportTarget (originating from an import block) if it contains a 'for_each' attribute. +// 2. Goes over the expanded imports and resolves the ID and address, when we have the context necessary to resolve +// them. The resolved import target would be an EvaluatedConfigImportTarget. +// This function mutates the EvalContext's ImportResolver, adding the resolved import target. +// The function errors if we failed to evaluate the ID or the address. +func (ri *ImportResolver) ExpandAndResolveImport(importTarget *ImportTarget, ctx EvalContext) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + // The import block expressions are declared within the root module. + // We need to explicitly use the context with the path of the root module, so that all references will be + // relative to the root module + rootCtx := ctx.WithPath(addrs.RootModuleInstance) + + if importTarget.Config.ForEach != nil { + const unknownsNotAllowed = false + const tupleAllowed = true + + // The import target has a for_each attribute, so we need to expand it + forEachVal, evalDiags := evaluateForEachExpressionValue(importTarget.Config.ForEach, rootCtx, unknownsNotAllowed, tupleAllowed) + diags = diags.Append(evalDiags) + if diags.HasErrors() { + return diags + } + + // We are building an instances.RepetitionData based on each for_each key and val combination + var repetitions []instances.RepetitionData + + it := forEachVal.ElementIterator() + for it.Next() { + k, v := it.Element() + repetitions = append(repetitions, instances.RepetitionData{ + EachKey: k, + EachValue: v, + }) + } + + for _, keyData := range repetitions { + diags = diags.Append(ri.resolveImport(importTarget, rootCtx, keyData)) + } + } else { + // The import target is singular, no need to expand + diags = diags.Append(ri.resolveImport(importTarget, rootCtx, EvalDataForNoInstanceKey)) + } + + return diags +} + +// resolveImport resolves the ID and address of an ImportTarget originating from an import block, +// when we have the context necessary to resolve them. The resolved import target would be an +// EvaluatedConfigImportTarget. +// This function mutates the EvalContext's ImportResolver, adding the resolved import target. +// The function errors if we failed to evaluate the ID or the address. +func (ri *ImportResolver) resolveImport(importTarget *ImportTarget, ctx EvalContext, keyData instances.RepetitionData) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + importId, evalDiags := evaluateImportIdExpression(importTarget.Config.ID, ctx, keyData) + diags = diags.Append(evalDiags) + if diags.HasErrors() { + return diags + } + + importAddress, addressDiags := evaluateImportAddress(ctx, importTarget.Config.To, keyData) + diags = diags.Append(addressDiags) + if diags.HasErrors() { + return diags + } + + ri.mu.Lock() + defer ri.mu.Unlock() + + resolvedImportKey := importAddress.String() + + if importTarget, exists := ri.imports[resolvedImportKey]; exists { + return diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Duplicate import configuration for %q", importAddress), + Detail: fmt.Sprintf("An import block for the resource %q was already declared at %s. A resource can have only one import block.", importAddress, importTarget.Config.DeclRange), + Subject: importTarget.Config.DeclRange.Ptr(), + }) + } + + ri.imports[resolvedImportKey] = EvaluatedConfigImportTarget{ + Config: importTarget.Config, + Addr: importAddress, + ID: importId, + } + + if keyData == EvalDataForNoInstanceKey { + log.Printf("[TRACE] importResolver: resolved a singular import target %s", importAddress) + } else { + log.Printf("[TRACE] importResolver: resolved an expanded import target %s", importAddress) + } + + return diags +} + +// GetAllImports returns all resolved imports +func (ri *ImportResolver) GetAllImports() []EvaluatedConfigImportTarget { + ri.mu.RLock() + defer ri.mu.RUnlock() + + var allImports []EvaluatedConfigImportTarget + for _, importTarget := range ri.imports { + allImports = append(allImports, importTarget) + } + return allImports +} + +func (ri *ImportResolver) GetImport(address addrs.AbsResourceInstance) *EvaluatedConfigImportTarget { + ri.mu.RLock() + defer ri.mu.RUnlock() + + for _, importTarget := range ri.imports { + if importTarget.Addr.Equal(address) { + return &importTarget + } + } + return nil +} + +// Import takes already-created external resources and brings them +// under OpenTofu management. Import requires the exact type, name, and ID +// of the resources to import. +// +// This operation is idempotent. If the requested resource is already +// imported, no changes are made to the state. +// +// Further, this operation also gracefully handles partial state. If during +// an import there is a failure, all previously imported resources remain +// imported. +func (c *Context) Import(config *configs.Config, prevRunState *states.State, opts *ImportOpts) (*states.State, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // Hold a lock since we can modify our own state here + defer c.acquireRun("import")() + + // Don't modify our caller's state + state := prevRunState.DeepCopy() + + log.Printf("[DEBUG] Building and walking import graph") + + variables := opts.SetVariables + + // Initialize our graph builder + builder := &PlanGraphBuilder{ + ImportTargets: opts.Targets, + Config: config, + State: state, + RootVariableValues: variables, + Plugins: c.plugins, + Operation: walkImport, + } + + // Build the graph + graph, graphDiags := builder.Build(addrs.RootModuleInstance) + diags = diags.Append(graphDiags) + if graphDiags.HasErrors() { + return state, diags + } + + // Walk it + walker, walkDiags := c.walk(graph, walkImport, &graphWalkOpts{ + Config: config, + InputState: state, + }) + diags = diags.Append(walkDiags) + if walkDiags.HasErrors() { + return state, diags + } + + // Data sources which could not be read during the import plan will be + // unknown. We need to strip those objects out so that the state can be + // serialized. + walker.State.RemovePlannedResourceInstanceObjects() + + newState := walker.State.Close() + return newState, diags +} diff --git a/pkg/tofu/context_import_test.go b/pkg/tofu/context_import_test.go new file mode 100644 index 00000000000..375d74ad114 --- /dev/null +++ b/pkg/tofu/context_import_test.go @@ -0,0 +1,1223 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "errors" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" +) + +func TestContextImport_basic(t *testing.T) { + p := testProvider("aws") + m := testModule(t, "import-provider") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "aws_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + }), + }, + }, + } + + state, diags := ctx.Import(m, states.NewState(), &ImportOpts{ + Targets: []*ImportTarget{ + { + CommandLineImportTarget: &CommandLineImportTarget{ + Addr: addrs.RootModuleInstance.ResourceInstance( + addrs.ManagedResourceMode, "aws_instance", "foo", addrs.NoKey, + ), + ID: "bar", + }, + }, + }, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testImportStr) + if actual != expected { + t.Fatalf("wrong final state\ngot:\n%s\nwant:\n%s", actual, expected) + } +} + +// import 1 of count instances in the configuration +func TestContextImport_countIndex(t *testing.T) { + p := testProvider("aws") + m := testModuleInline(t, map[string]string{ + "main.tf": ` +provider "aws" { + foo = "bar" +} + +resource "aws_instance" "foo" { + count = 2 +} +`}) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "aws_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + }), + }, + }, + } + + state, diags := ctx.Import(m, states.NewState(), &ImportOpts{ + Targets: []*ImportTarget{ + { + CommandLineImportTarget: &CommandLineImportTarget{ + Addr: addrs.RootModuleInstance.ResourceInstance( + addrs.ManagedResourceMode, "aws_instance", "foo", addrs.IntKey(0), + ), + ID: "bar", + }, + }, + }, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testImportCountIndexStr) + if actual != expected { + t.Fatalf("bad: \n%s", actual) + } +} + +func TestContextImport_importResourceWithSensitiveDataSource(t *testing.T) { + p := testProvider("aws") + m := testModuleInline(t, map[string]string{ + "main.tf": ` +provider "aws" { + foo = "bar" +} + +data "aws_sensitive_data_source" "source" { + id = "source_id" +} + +resource "aws_instance" "foo" { + id = "bar" + var = data.aws_sensitive_data_source.source.value +} +`}) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "aws_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("bar"), + }), + }, + }, + } + + p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("source_id"), + "value": cty.StringVal("pass"), + }), + } + + p.ReadResourceResponse = &providers.ReadResourceResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("bar"), + "var": cty.StringVal("pass"), + }), + } + + state, diags := ctx.Import(m, states.NewState(), &ImportOpts{ + Targets: []*ImportTarget{ + { + CommandLineImportTarget: &CommandLineImportTarget{ + Addr: addrs.RootModuleInstance.ResourceInstance( + addrs.ManagedResourceMode, "aws_instance", "foo", addrs.NoKey, + ), + ID: "bar", + }, + }, + }, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testImportResourceWithSensitiveDataSource) + if actual != expected { + t.Fatalf("bad: \n%s", actual) + } + + obj := state.ResourceInstance(mustResourceInstanceAddr("aws_instance.foo")) + if len(obj.Current.AttrSensitivePaths) != 1 { + t.Fatalf("Expected 1 sensitive mark for aws_instance.foo, got %#v\n", obj.Current.AttrSensitivePaths) + } +} + +func TestContextImport_collision(t *testing.T) { + p := testProvider("aws") + m := testModule(t, "import-provider") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsFlat: map[string]string{ + "id": "bar", + }, + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("aws"), + Module: addrs.RootModule, + }, + ) + }) + + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "aws_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + }), + }, + }, + } + + state, diags := ctx.Import(m, state, &ImportOpts{ + Targets: []*ImportTarget{ + { + CommandLineImportTarget: &CommandLineImportTarget{ + Addr: addrs.RootModuleInstance.ResourceInstance( + addrs.ManagedResourceMode, "aws_instance", "foo", addrs.NoKey, + ), + ID: "bar", + }, + }, + }, + }) + if !diags.HasErrors() { + t.Fatalf("succeeded; want an error indicating that the resource already exists in state") + } + + actual := strings.TrimSpace(state.String()) + expected := `aws_instance.foo: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/aws"]` + + if actual != expected { + t.Fatalf("bad: \n%s", actual) + } +} + +func TestContextImport_missingType(t *testing.T) { + p := testProvider("aws") + m := testModule(t, "import-provider") + + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + }), + }, + }, + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + state, diags := ctx.Import(m, states.NewState(), &ImportOpts{ + Targets: []*ImportTarget{ + { + CommandLineImportTarget: &CommandLineImportTarget{ + Addr: addrs.RootModuleInstance.ResourceInstance( + addrs.ManagedResourceMode, "aws_instance", "foo", addrs.NoKey, + ), + ID: "bar", + }, + }, + }, + }) + if !diags.HasErrors() { + t.Fatal("should error") + } + + actual := strings.TrimSpace(state.String()) + expected := "" + if actual != expected { + t.Fatalf("bad: \n%s", actual) + } +} + +func TestContextImport_moduleProvider(t *testing.T) { + p := testProvider("aws") + + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "aws_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + }), + }, + }, + } + + p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { + foo := req.Config.GetAttr("foo").AsString() + if foo != "bar" { + resp.Diagnostics = resp.Diagnostics.Append(errors.New("not bar")) + } + + return + } + + m := testModule(t, "import-provider") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + state, diags := ctx.Import(m, states.NewState(), &ImportOpts{ + Targets: []*ImportTarget{ + { + CommandLineImportTarget: &CommandLineImportTarget{ + Addr: addrs.RootModuleInstance.ResourceInstance( + addrs.ManagedResourceMode, "aws_instance", "foo", addrs.NoKey, + ), + ID: "bar", + }, + }, + }, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + if !p.ConfigureProviderCalled { + t.Fatal("didn't configure provider") + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testImportStr) + if actual != expected { + t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual) + } +} + +// Importing into a module requires a provider config in that module. +func TestContextImport_providerModule(t *testing.T) { + p := testProvider("aws") + m := testModule(t, "import-module") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "aws_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + }), + }, + }, + } + + p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { + foo := req.Config.GetAttr("foo").AsString() + if foo != "bar" { + resp.Diagnostics = resp.Diagnostics.Append(errors.New("not bar")) + } + + return + } + + _, diags := ctx.Import(m, states.NewState(), &ImportOpts{ + Targets: []*ImportTarget{ + { + CommandLineImportTarget: &CommandLineImportTarget{ + Addr: addrs.RootModuleInstance.Child("child", addrs.NoKey).ResourceInstance( + addrs.ManagedResourceMode, "aws_instance", "foo", addrs.NoKey, + ), + ID: "bar", + }, + }, + }, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + if !p.ConfigureProviderCalled { + t.Fatal("didn't configure provider") + } +} + +// Test that import will interpolate provider configuration and use +// that configuration for import. +func TestContextImport_providerConfig(t *testing.T) { + testCases := map[string]struct { + module string + value string + }{ + "variables": { + module: "import-provider-vars", + value: "bar", + }, + "locals": { + module: "import-provider-locals", + value: "baz-bar", + }, + } + for name, test := range testCases { + t.Run(name, func(t *testing.T) { + p := testProvider("aws") + m := testModule(t, test.module) + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "aws_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + }), + }, + }, + } + + state, diags := ctx.Import(m, states.NewState(), &ImportOpts{ + Targets: []*ImportTarget{ + { + CommandLineImportTarget: &CommandLineImportTarget{ + Addr: addrs.RootModuleInstance.ResourceInstance( + addrs.ManagedResourceMode, "aws_instance", "foo", addrs.NoKey, + ), + ID: "bar", + }, + }, + }, + SetVariables: InputValues{ + "foo": &InputValue{ + Value: cty.StringVal("bar"), + SourceType: ValueFromCaller, + }, + }, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + if !p.ConfigureProviderCalled { + t.Fatal("didn't configure provider") + } + + if foo := p.ConfigureProviderRequest.Config.GetAttr("foo").AsString(); foo != test.value { + t.Fatalf("bad value %#v; want %#v", foo, test.value) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testImportStr) + if actual != expected { + t.Fatalf("bad: \n%s", actual) + } + }) + } +} + +// Test that provider configs can't reference resources. +func TestContextImport_providerConfigResources(t *testing.T) { + p := testProvider("aws") + pTest := testProvider("test") + m := testModule(t, "import-provider-resources") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + addrs.NewDefaultProvider("test"): testProviderFuncFixed(pTest), + }, + }) + + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "aws_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + }), + }, + }, + } + + _, diags := ctx.Import(m, states.NewState(), &ImportOpts{ + Targets: []*ImportTarget{ + { + CommandLineImportTarget: &CommandLineImportTarget{ + Addr: addrs.RootModuleInstance.ResourceInstance( + addrs.ManagedResourceMode, "aws_instance", "foo", addrs.NoKey, + ), + ID: "bar", + }, + }, + }, + }) + if !diags.HasErrors() { + t.Fatal("should error") + } + if got, want := diags.Err().Error(), `The configuration for provider["registry.opentofu.org/hashicorp/aws"] depends on values that cannot be determined until apply.`; !strings.Contains(got, want) { + t.Errorf("wrong error\n got: %s\nwant: %s", got, want) + } +} + +func TestContextImport_refresh(t *testing.T) { + p := testProvider("aws") + m := testModuleInline(t, map[string]string{ + "main.tf": ` +provider "aws" { + foo = "bar" +} + +resource "aws_instance" "foo" { +} + + +// we are only importing aws_instance.foo, so these resources will be unknown +resource "aws_instance" "bar" { +} +data "aws_data_source" "bar" { + foo = aws_instance.bar.id +} +`}) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "aws_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + }), + }, + }, + } + + p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("id"), + "foo": cty.UnknownVal(cty.String), + }), + } + + p.ReadResourceFn = nil + + p.ReadResourceResponse = &providers.ReadResourceResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + "foo": cty.StringVal("bar"), + }), + } + + state, diags := ctx.Import(m, states.NewState(), &ImportOpts{ + Targets: []*ImportTarget{ + { + CommandLineImportTarget: &CommandLineImportTarget{ + Addr: addrs.RootModuleInstance.ResourceInstance( + addrs.ManagedResourceMode, "aws_instance", "foo", addrs.NoKey, + ), + ID: "bar", + }, + }, + }, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + if d := state.ResourceInstance(mustResourceInstanceAddr("data.aws_data_source.bar")); d != nil { + t.Errorf("data.aws_data_source.bar has a status of ObjectPlanned and should not be in the state\ngot:%#v\n", d.Current) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testImportRefreshStr) + if actual != expected { + t.Fatalf("bad: \n%s", actual) + } +} + +func TestContextImport_refreshNil(t *testing.T) { + p := testProvider("aws") + m := testModule(t, "import-provider") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "aws_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + }), + }, + }, + } + + p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { + return providers.ReadResourceResponse{ + NewState: cty.NullVal(cty.DynamicPseudoType), + } + } + + state, diags := ctx.Import(m, states.NewState(), &ImportOpts{ + Targets: []*ImportTarget{ + { + CommandLineImportTarget: &CommandLineImportTarget{ + Addr: addrs.RootModuleInstance.ResourceInstance( + addrs.ManagedResourceMode, "aws_instance", "foo", addrs.NoKey, + ), + ID: "bar", + }, + }, + }, + }) + if !diags.HasErrors() { + t.Fatal("should error") + } + + actual := strings.TrimSpace(state.String()) + expected := "" + if actual != expected { + t.Fatalf("bad: \n%s", actual) + } +} + +func TestContextImport_module(t *testing.T) { + p := testProvider("aws") + m := testModule(t, "import-module") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "aws_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + }), + }, + }, + } + + state, diags := ctx.Import(m, states.NewState(), &ImportOpts{ + Targets: []*ImportTarget{ + { + CommandLineImportTarget: &CommandLineImportTarget{ + Addr: addrs.RootModuleInstance.Child("child", addrs.IntKey(0)).ResourceInstance( + addrs.ManagedResourceMode, "aws_instance", "foo", addrs.NoKey, + ), + ID: "bar", + }, + }, + }, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testImportModuleStr) + if actual != expected { + t.Fatalf("bad: \n%s", actual) + } +} + +func TestContextImport_moduleDepth2(t *testing.T) { + p := testProvider("aws") + m := testModule(t, "import-module") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "aws_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + }), + }, + }, + } + + state, diags := ctx.Import(m, states.NewState(), &ImportOpts{ + Targets: []*ImportTarget{ + { + CommandLineImportTarget: &CommandLineImportTarget{ + Addr: addrs.RootModuleInstance.Child("child", addrs.IntKey(0)).Child("nested", addrs.NoKey).ResourceInstance( + addrs.ManagedResourceMode, "aws_instance", "foo", addrs.NoKey, + ), + ID: "baz", + }, + }, + }, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testImportModuleDepth2Str) + if actual != expected { + t.Fatalf("bad: \n%s", actual) + } +} + +func TestContextImport_moduleDiff(t *testing.T) { + p := testProvider("aws") + m := testModule(t, "import-module") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "aws_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + }), + }, + }, + } + + state, diags := ctx.Import(m, states.NewState(), &ImportOpts{ + Targets: []*ImportTarget{ + { + CommandLineImportTarget: &CommandLineImportTarget{ + Addr: addrs.RootModuleInstance.Child("child", addrs.IntKey(0)).ResourceInstance( + addrs.ManagedResourceMode, "aws_instance", "foo", addrs.NoKey, + ), + ID: "baz", + }, + }, + }, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testImportModuleStr) + if actual != expected { + t.Fatalf("\nexpected: %q\ngot: %q\n", expected, actual) + } +} + +func TestContextImport_multiState(t *testing.T) { + p := testProvider("aws") + m := testModule(t, "import-provider") + + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + Provider: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + "aws_instance_thing": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + }, + }) + + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "aws_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + }), + }, + { + TypeName: "aws_instance_thing", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("bar"), + }), + }, + }, + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + state, diags := ctx.Import(m, states.NewState(), &ImportOpts{ + Targets: []*ImportTarget{ + { + CommandLineImportTarget: &CommandLineImportTarget{ + Addr: addrs.RootModuleInstance.ResourceInstance( + addrs.ManagedResourceMode, "aws_instance", "foo", addrs.NoKey, + ), + ID: "bar", + }, + }, + }, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testImportMultiStr) + if actual != expected { + t.Fatalf("bad: \n%s", actual) + } +} + +func TestContextImport_multiStateSame(t *testing.T) { + p := testProvider("aws") + m := testModule(t, "import-provider") + + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + Provider: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + "aws_instance_thing": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + }, + }) + + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "aws_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + }), + }, + { + TypeName: "aws_instance_thing", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("bar"), + }), + }, + { + TypeName: "aws_instance_thing", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("qux"), + }), + }, + }, + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + state, diags := ctx.Import(m, states.NewState(), &ImportOpts{ + Targets: []*ImportTarget{ + { + CommandLineImportTarget: &CommandLineImportTarget{ + Addr: addrs.RootModuleInstance.ResourceInstance( + addrs.ManagedResourceMode, "aws_instance", "foo", addrs.NoKey, + ), + ID: "bar", + }, + }, + }, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testImportMultiSameStr) + if actual != expected { + t.Fatalf("bad: \n%s", actual) + } +} + +func TestContextImport_nestedModuleImport(t *testing.T) { + p := testProvider("aws") + m := testModuleInline(t, map[string]string{ + "main.tf": ` +locals { + xs = toset(["foo"]) +} + +module "a" { + for_each = local.xs + source = "./a" +} + +module "b" { + for_each = local.xs + source = "./b" + y = module.a[each.key].y +} + +resource "test_resource" "test" { +} +`, + "a/main.tf": ` +output "y" { + value = "bar" +} +`, + "b/main.tf": ` +variable "y" { + type = string +} + +resource "test_resource" "unused" { + value = var.y + // missing required, but should not error +} +`, + }) + + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + Provider: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + ResourceTypes: map[string]*configschema.Block{ + "test_resource": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "required": {Type: cty.String, Required: true}, + }, + }, + }, + }) + + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "test_resource", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("test"), + "required": cty.StringVal("value"), + }), + }, + }, + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + state, diags := ctx.Import(m, states.NewState(), &ImportOpts{ + Targets: []*ImportTarget{ + { + CommandLineImportTarget: &CommandLineImportTarget{ + Addr: addrs.RootModuleInstance.ResourceInstance( + addrs.ManagedResourceMode, "test_resource", "test", addrs.NoKey, + ), + ID: "test", + }, + }, + }, + }) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } + + ri := state.ResourceInstance(mustResourceInstanceAddr("test_resource.test")) + expected := `{"id":"test","required":"value"}` + if ri == nil || ri.Current == nil { + t.Fatal("no state is recorded for resource instance test_resource.test") + } + if string(ri.Current.AttrsJSON) != expected { + t.Fatalf("expected %q, got %q\n", expected, ri.Current.AttrsJSON) + } +} + +// New resources in the config during import won't exist for evaluation +// purposes (until import is upgraded to using a complete plan). This means +// that references to them are unknown, but in the case of single instances, we +// can at least know the type of unknown value. +func TestContextImport_newResourceUnknown(t *testing.T) { + p := testProvider("aws") + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_resource" "one" { +} + +resource "test_resource" "two" { + count = length(flatten([test_resource.one.id])) +} + +resource "test_resource" "test" { +} +`}) + + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_resource": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + }, + }) + + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "test_resource", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("test"), + }), + }, + }, + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + state, diags := ctx.Import(m, states.NewState(), &ImportOpts{ + Targets: []*ImportTarget{ + { + CommandLineImportTarget: &CommandLineImportTarget{ + Addr: addrs.RootModuleInstance.ResourceInstance( + addrs.ManagedResourceMode, "test_resource", "test", addrs.NoKey, + ), + ID: "test", + }, + }, + }, + }) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } + + ri := state.ResourceInstance(mustResourceInstanceAddr("test_resource.test")) + expected := `{"id":"test"}` + if ri == nil || ri.Current == nil { + t.Fatal("no state is recorded for resource instance test_resource.test") + } + if string(ri.Current.AttrsJSON) != expected { + t.Fatalf("expected %q, got %q\n", expected, ri.Current.AttrsJSON) + } +} + +func TestContextImport_33572(t *testing.T) { + p := testProvider("aws") + m := testModule(t, "issue-33572") + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "aws_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + }), + }, + }, + } + + state, diags := ctx.Import(m, states.NewState(), &ImportOpts{ + Targets: []*ImportTarget{ + { + CommandLineImportTarget: &CommandLineImportTarget{ + Addr: addrs.RootModuleInstance.ResourceInstance( + addrs.ManagedResourceMode, "aws_instance", "foo", addrs.NoKey, + ), + ID: "bar", + }, + }, + }, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testImportStrWithDataSource) + if diff := cmp.Diff(actual, expected); len(diff) > 0 { + t.Fatalf("wrong final state\ngot:\n%s\nwant:\n%s\ndiff:\n%s", actual, expected, diff) + } +} + +const testImportStr = ` +aws_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] +` + +const testImportStrWithDataSource = ` +data.aws_data_source.bar: + ID = baz + provider = provider["registry.opentofu.org/hashicorp/aws"] +aws_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] +` + +const testImportCountIndexStr = ` +aws_instance.foo.0: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] +` + +const testImportResourceWithSensitiveDataSource = ` +data.aws_sensitive_data_source.source: + ID = source_id + provider = provider["registry.opentofu.org/hashicorp/aws"] + value = pass +aws_instance.foo: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/aws"] + var = pass +` + +const testImportModuleStr = ` + +module.child[0]: + aws_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] +` + +const testImportModuleDepth2Str = ` + +module.child[0].nested: + aws_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] +` + +const testImportMultiStr = ` +aws_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] +aws_instance_thing.foo: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/aws"] +` + +const testImportMultiSameStr = ` +aws_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] +aws_instance_thing.foo: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/aws"] +aws_instance_thing.foo-1: + ID = qux + provider = provider["registry.opentofu.org/hashicorp/aws"] +` + +const testImportRefreshStr = ` +aws_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = bar +` diff --git a/pkg/tofu/context_input.go b/pkg/tofu/context_input.go new file mode 100644 index 00000000000..4dbb0131854 --- /dev/null +++ b/pkg/tofu/context_input.go @@ -0,0 +1,211 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "context" + "log" + "sort" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hcldec" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// Input asks for input to fill unset required arguments in provider +// configurations. +// +// Unlike the other better-behaved operation methods, this one actually +// modifies some internal state inside the receving context so that the +// captured values will be implicitly available to a subsequent call to Plan, +// or to some other operation entry point. Hopefully a future iteration of +// this will change design to make that data flow more explicit. +// +// Because Input saves the results inside the Context object, asking for +// input twice on the same Context is invalid and will lead to undefined +// behavior. +// +// Once you've called Input with a particular config, it's invalid to call +// any other Context method with a different config, because the aforementioned +// modified internal state won't match. Again, this is an architectural wart +// that we'll hopefully resolve in future. +func (c *Context) Input(config *configs.Config, mode InputMode) tfdiags.Diagnostics { + // This function used to be responsible for more than it is now, so its + // interface is more general than its current functionality requires. + // It now exists only to handle interactive prompts for provider + // configurations, with other prompts the responsibility of the CLI + // layer prior to calling in to this package. + // + // (Hopefully in future the remaining functionality here can move to the + // CLI layer too in order to avoid this odd situation where core code + // produces UI input prompts.) + + var diags tfdiags.Diagnostics + defer c.acquireRun("input")() + + schemas, moreDiags := c.Schemas(config, nil) + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + return diags + } + + if c.uiInput == nil { + log.Printf("[TRACE] Context.Input: uiInput is nil, so skipping") + return diags + } + + ctx := context.Background() + + if mode&InputModeProvider != 0 { + log.Printf("[TRACE] Context.Input: Prompting for provider arguments") + + // We prompt for input only for provider configurations defined in + // the root module. Provider configurations in other modules are a + // legacy thing we no longer recommend, and even if they weren't we + // can't practically prompt for their inputs here because we've not + // yet done "expansion" and so we don't know whether the modules are + // using count or for_each. + + pcs := make(map[string]*configs.Provider) + pas := make(map[string]addrs.LocalProviderConfig) + for _, pc := range config.Module.ProviderConfigs { + addr := pc.Addr() + pcs[addr.String()] = pc + pas[addr.String()] = addr + log.Printf("[TRACE] Context.Input: Provider %s declared at %s", addr, pc.DeclRange) + } + // We also need to detect _implied_ provider configs from resources. + // These won't have *configs.Provider objects, but they will still + // exist in the map and we'll just treat them as empty below. + for _, rc := range config.Module.ManagedResources { + pa := rc.ProviderConfigAddr() + if pa.Alias != "" { + continue // alias configurations cannot be implied + } + if _, exists := pcs[pa.String()]; !exists { + pcs[pa.String()] = nil + pas[pa.String()] = pa + log.Printf("[TRACE] Context.Input: Provider %s implied by resource block at %s", pa, rc.DeclRange) + } + } + for _, rc := range config.Module.DataResources { + pa := rc.ProviderConfigAddr() + if pa.Alias != "" { + continue // alias configurations cannot be implied + } + if _, exists := pcs[pa.String()]; !exists { + pcs[pa.String()] = nil + pas[pa.String()] = pa + log.Printf("[TRACE] Context.Input: Provider %s implied by data block at %s", pa, rc.DeclRange) + } + } + + for pk, pa := range pas { + pc := pcs[pk] // will be nil if this is an implied config + + // Wrap the input into a namespace + input := &PrefixUIInput{ + IdPrefix: pk, + QueryPrefix: pk + ".", + UIInput: c.uiInput, + } + + providerFqn := config.Module.ProviderForLocalConfig(pa) + schema := schemas.ProviderConfig(providerFqn) + if schema == nil { + // Could either be an incorrect config or just an incomplete + // mock in tests. We'll let a later pass decide, and just + // ignore this for the purposes of gathering input. + log.Printf("[TRACE] Context.Input: No schema available for provider type %q", pa.LocalName) + continue + } + + // For our purposes here we just want to detect if attrbutes are + // set in config at all, so rather than doing a full decode + // (which would require us to prepare an evalcontext, etc) we'll + // use the low-level HCL API to process only the top-level + // structure. + var attrExprs hcl.Attributes // nil if there is no config + if pc != nil && pc.Config != nil { + lowLevelSchema := schemaForInputSniffing(hcldec.ImpliedSchema(schema.DecoderSpec())) + content, _, diags := pc.Config.PartialContent(lowLevelSchema) + if diags.HasErrors() { + log.Printf("[TRACE] Context.Input: %s has decode error, so ignoring: %s", pa, diags.Error()) + continue + } + attrExprs = content.Attributes + } + + keys := make([]string, 0, len(schema.Attributes)) + for key := range schema.Attributes { + keys = append(keys, key) + } + sort.Strings(keys) + + vals := map[string]cty.Value{} + for _, key := range keys { + attrS := schema.Attributes[key] + if attrS.Optional { + continue + } + if attrExprs != nil { + if _, exists := attrExprs[key]; exists { + continue + } + } + if !attrS.Type.Equals(cty.String) { + continue + } + + log.Printf("[TRACE] Context.Input: Prompting for %s argument %s", pa, key) + rawVal, err := input.Input(ctx, &InputOpts{ + Id: key, + Query: key, + Description: attrS.Description, + }) + if err != nil { + log.Printf("[TRACE] Context.Input: Failed to prompt for %s argument %s: %s", pa, key, err) + continue + } + + vals[key] = cty.StringVal(rawVal) + } + + absConfigAddr := addrs.AbsProviderConfig{ + Provider: providerFqn, + Alias: pa.Alias, + Module: config.Path, + } + c.providerInputConfig[absConfigAddr.String()] = vals + + log.Printf("[TRACE] Context.Input: Input for %s: %#v", pk, vals) + } + } + + return diags +} + +// schemaForInputSniffing returns a transformed version of a given schema +// that marks all attributes as optional, which the Context.Input method can +// use to detect whether a required argument is set without missing arguments +// themselves generating errors. +func schemaForInputSniffing(schema *hcl.BodySchema) *hcl.BodySchema { + ret := &hcl.BodySchema{ + Attributes: make([]hcl.AttributeSchema, len(schema.Attributes)), + Blocks: schema.Blocks, + } + + for i, attrS := range schema.Attributes { + ret.Attributes[i] = attrS + ret.Attributes[i].Required = false + } + + return ret +} diff --git a/pkg/tofu/context_input_test.go b/pkg/tofu/context_input_test.go new file mode 100644 index 00000000000..78102c52a8c --- /dev/null +++ b/pkg/tofu/context_input_test.go @@ -0,0 +1,474 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "reflect" + "strings" + "sync" + "testing" + + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" +) + +func TestContext2Input_provider(t *testing.T) { + m := testModule(t, "input-provider") + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + Provider: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.String, + Required: true, + Description: "something something", + }, + }, + }, + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + }, + }, + }, + }) + + inp := &MockUIInput{ + InputReturnMap: map[string]string{ + "provider.aws.foo": "bar", + }, + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + UIInput: inp, + }) + + var actual interface{} + p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { + actual = req.Config.GetAttr("foo").AsString() + return + } + + if diags := ctx.Input(m, InputModeStd); diags.HasErrors() { + t.Fatalf("input errors: %s", diags.Err()) + } + + if !inp.InputCalled { + t.Fatal("no input prompt; want prompt for argument \"foo\"") + } + if got, want := inp.InputOpts.Description, "something something"; got != want { + t.Errorf("wrong description\ngot: %q\nwant: %q", got, want) + } + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + if _, diags := ctx.Apply(plan, m); diags.HasErrors() { + t.Fatalf("apply errors: %s", diags.Err()) + } + + if !reflect.DeepEqual(actual, "bar") { + t.Fatalf("wrong result\ngot: %#v\nwant: %#v", actual, "bar") + } +} + +func TestContext2Input_providerMulti(t *testing.T) { + m := testModule(t, "input-provider-multi") + + getProviderSchemaResponse := getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + Provider: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.String, + Required: true, + Description: "something something", + }, + }, + }, + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + }, + }, + }, + }) + + // In order to update the provider to check only the configure calls during + // apply, we will need to inject a new factory function after plan. We must + // use a closure around the factory, because in order for the inputs to + // work during apply we need to maintain the same context value, preventing + // us from assigning a new Providers map. + providerFactory := func() (providers.Interface, error) { + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponse + return p, nil + } + + inp := &MockUIInput{ + InputReturnMap: map[string]string{ + "provider.aws.foo": "bar", + "provider.aws.east.foo": "bar", + }, + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): func() (providers.Interface, error) { + return providerFactory() + }, + }, + UIInput: inp, + }) + + var actual []interface{} + var lock sync.Mutex + + if diags := ctx.Input(m, InputModeStd); diags.HasErrors() { + t.Fatalf("input errors: %s", diags.Err()) + } + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + providerFactory = func() (providers.Interface, error) { + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponse + p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { + lock.Lock() + defer lock.Unlock() + actual = append(actual, req.Config.GetAttr("foo").AsString()) + return + } + return p, nil + } + + if _, diags := ctx.Apply(plan, m); diags.HasErrors() { + t.Fatalf("apply errors: %s", diags.Err()) + } + + expected := []interface{}{"bar", "bar"} + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("wrong result\ngot: %#v\nwant: %#v", actual, expected) + } +} + +func TestContext2Input_providerOnce(t *testing.T) { + m := testModule(t, "input-provider-once") + p := testProvider("aws") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + if diags := ctx.Input(m, InputModeStd); diags.HasErrors() { + t.Fatalf("input errors: %s", diags.Err()) + } +} + +func TestContext2Input_providerId(t *testing.T) { + input := new(MockUIInput) + + m := testModule(t, "input-provider") + + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + Provider: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.String, + Required: true, + Description: "something something", + }, + }, + }, + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + }, + }, + }, + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + UIInput: input, + }) + + var actual interface{} + p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { + actual = req.Config.GetAttr("foo").AsString() + return + } + + input.InputReturnMap = map[string]string{ + "provider.aws.foo": "bar", + } + + if diags := ctx.Input(m, InputModeStd); diags.HasErrors() { + t.Fatalf("input errors: %s", diags.Err()) + } + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + if _, diags := ctx.Apply(plan, m); diags.HasErrors() { + t.Fatalf("apply errors: %s", diags.Err()) + } + + if !reflect.DeepEqual(actual, "bar") { + t.Fatalf("wrong result\ngot: %#v\nwant: %#v", actual, "bar") + } +} + +func TestContext2Input_providerOnly(t *testing.T) { + input := new(MockUIInput) + + m := testModule(t, "input-provider-vars") + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + Provider: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.String, + Required: true, + }, + }, + }, + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Required: true}, + "id": {Type: cty.String, Computed: true}, + "type": {Type: cty.String, Computed: true}, + }, + }, + }, + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + UIInput: input, + }) + + input.InputReturnMap = map[string]string{ + "provider.aws.foo": "bar", + } + + var actual interface{} + p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { + actual = req.Config.GetAttr("foo").AsString() + return + } + + if err := ctx.Input(m, InputModeProvider); err != nil { + t.Fatalf("err: %s", err) + } + + // NOTE: This is a stale test case from an older version of Terraform + // where Input was responsible for prompting for both input variables _and_ + // provider configuration arguments, where it was trying to test the case + // where we were turning off the mode of prompting for input variables. + // That's now always disabled, and so this is essentially the same as the + // normal Input test, but we're preserving it until we have time to review + // and make sure this isn't inadvertently providing unique test coverage + // other than what it set out to test. + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "foo": &InputValue{ + Value: cty.StringVal("us-west-2"), + SourceType: ValueFromCaller, + }, + }, + }) + assertNoErrors(t, diags) + + state, err := ctx.Apply(plan, m) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(actual, "bar") { + t.Fatalf("wrong result\ngot: %#v\nwant: %#v", actual, "bar") + } + + actualStr := strings.TrimSpace(state.String()) + expectedStr := strings.TrimSpace(testTofuInputProviderOnlyStr) + if actualStr != expectedStr { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actualStr, expectedStr) + } +} + +func TestContext2Input_providerVars(t *testing.T) { + input := new(MockUIInput) + m := testModule(t, "input-provider-with-vars") + p := testProvider("aws") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + UIInput: input, + }) + + input.InputReturnMap = map[string]string{ + "var.foo": "bar", + } + + var actual interface{} + p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { + actual = req.Config.GetAttr("foo").AsString() + return + } + if diags := ctx.Input(m, InputModeStd); diags.HasErrors() { + t.Fatalf("input errors: %s", diags.Err()) + } + + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "foo": &InputValue{ + Value: cty.StringVal("bar"), + SourceType: ValueFromCaller, + }, + }, + }) + assertNoErrors(t, diags) + + if _, diags := ctx.Apply(plan, m); diags.HasErrors() { + t.Fatalf("apply errors: %s", diags.Err()) + } + + if !reflect.DeepEqual(actual, "bar") { + t.Fatalf("bad: %#v", actual) + } +} + +func TestContext2Input_providerVarsModuleInherit(t *testing.T) { + input := new(MockUIInput) + m := testModule(t, "input-provider-with-vars-and-module") + p := testProvider("aws") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + UIInput: input, + }) + + if diags := ctx.Input(m, InputModeStd); diags.HasErrors() { + t.Fatalf("input errors: %s", diags.Err()) + } +} + +// adding a list interpolation in fails to interpolate the count variable +func TestContext2Input_submoduleTriggersInvalidCount(t *testing.T) { + input := new(MockUIInput) + m := testModule(t, "input-submodule-count") + p := testProvider("aws") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + UIInput: input, + }) + + if diags := ctx.Input(m, InputModeStd); diags.HasErrors() { + t.Fatalf("input errors: %s", diags.Err()) + } +} + +// In this case, a module variable can't be resolved from a data source until +// it's refreshed, but it can't be refreshed during Input. +func TestContext2Input_dataSourceRequiresRefresh(t *testing.T) { + input := new(MockUIInput) + p := testProvider("null") + m := testModule(t, "input-module-data-vars") + + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + DataSources: map[string]*configschema.Block{ + "null_data_source": { + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.List(cty.String), Optional: true}, + }, + }, + }, + }) + p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + return providers.ReadDataSourceResponse{ + State: req.Config, + } + } + + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.DataResourceMode, + Type: "null_data_source", + Name: "bar", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsFlat: map[string]string{ + "id": "-", + "foo.#": "1", + "foo.0": "a", + // foo.1 exists in the data source, but needs to be refreshed. + }, + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("null"), + Module: addrs.RootModule, + }, + ) + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("null"): testProviderFuncFixed(p), + }, + UIInput: input, + }) + + if diags := ctx.Input(m, InputModeStd); diags.HasErrors() { + t.Fatalf("input errors: %s", diags.Err()) + } + + // ensure that plan works after Refresh. This is a legacy test that + // doesn't really make sense anymore, because Refresh is really just + // a wrapper around plan anyway, but we're keeping it until we get a + // chance to review and check whether it's giving us any additional + // test coverage aside from what it's specifically intending to test. + if _, diags := ctx.Refresh(m, state, DefaultPlanOpts); diags.HasErrors() { + t.Fatalf("refresh errors: %s", diags.Err()) + } + if _, diags := ctx.Plan(m, state, DefaultPlanOpts); diags.HasErrors() { + t.Fatalf("plan errors: %s", diags.Err()) + } +} diff --git a/pkg/tofu/context_plan.go b/pkg/tofu/context_plan.go new file mode 100644 index 00000000000..a8eb77b3401 --- /dev/null +++ b/pkg/tofu/context_plan.go @@ -0,0 +1,1038 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "bytes" + "fmt" + "log" + "sort" + "strings" + "time" + + "github.com/hashicorp/hcl/v2" + + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/instances" + "github.com/kubegems/opentofu/pkg/lang/globalref" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/refactoring" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// PlanOpts are the various options that affect the details of how OpenTofu +// will build a plan. +type PlanOpts struct { + // Mode defines what variety of plan the caller wishes to create. + // Refer to the documentation of the plans.Mode type and its values + // for more information. + Mode plans.Mode + + // SkipRefresh specifies to trust that the current values for managed + // resource instances in the prior state are accurate and to therefore + // disable the usual step of fetching updated values for each resource + // instance using its corresponding provider. + SkipRefresh bool + + // PreDestroyRefresh indicated that this is being passed to a plan used to + // refresh the state immediately before a destroy plan. + // FIXME: This is a temporary fix to allow the pre-destroy refresh to + // succeed. The refreshing operation during destroy must be a special case, + // which can allow for missing instances in the state, and avoid blocking + // on failing condition tests. The destroy plan itself should be + // responsible for this special case of refreshing, and the separate + // pre-destroy plan removed entirely. + PreDestroyRefresh bool + + // SetVariables are the raw values for root module variables as provided + // by the user who is requesting the run, prior to any normalization or + // substitution of defaults. See the documentation for the InputValue + // type for more information on how to correctly populate this. + SetVariables InputValues + + // If Targets has a non-zero length then it activates targeted planning + // mode, where OpenTofu will take actions only for resource instances + // mentioned in this set and any other objects those resource instances + // depend on. + // + // Targeted planning mode is intended for exceptional use only, + // and so populating this field will cause OpenTofu to generate extra + // warnings as part of the planning result. + Targets []addrs.Targetable + + // ForceReplace is a set of resource instance addresses whose corresponding + // objects should be forced planned for replacement if the provider's + // plan would otherwise have been to either update the object in-place or + // to take no action on it at all. + // + // A typical use of this argument is to ask OpenTofu to replace an object + // which the user has determined is somehow degraded (via information from + // outside of OpenTofu), thereby hopefully replacing it with a + // fully-functional new object. + ForceReplace []addrs.AbsResourceInstance + + // ExternalReferences allows the external caller to pass in references to + // nodes that should not be pruned even if they are not referenced within + // the actual graph. + ExternalReferences []*addrs.Reference + + // ImportTargets is a list of target resources to import. These resources + // will be added to the plan graph. + ImportTargets []*ImportTarget + + // EndpointsToRemove are the list of resources and modules to forget from + // the state. + EndpointsToRemove []addrs.ConfigRemovable + + // GenerateConfig tells OpenTofu where to write any generated configuration + // for any ImportTargets that do not have configuration already. + // + // If empty, then no config will be generated. + GenerateConfigPath string +} + +// Plan generates an execution plan by comparing the given configuration +// with the given previous run state. +// +// The given planning options allow control of various other details of the +// planning process that are not represented directly in the configuration. +// You can use tofu.DefaultPlanOpts to generate a normal plan with no +// special options. +// +// If the returned diagnostics contains no errors then the returned plan is +// applyable, although OpenTofu cannot guarantee that applying it will fully +// succeed. If the returned diagnostics contains errors but this method +// still returns a non-nil Plan then the plan describes the subset of actions +// planned so far, which is not safe to apply but could potentially be used +// by the UI layer to give extra context to support understanding of the +// returned error messages. +func (c *Context) Plan(config *configs.Config, prevRunState *states.State, opts *PlanOpts) (*plans.Plan, tfdiags.Diagnostics) { + defer c.acquireRun("plan")() + var diags tfdiags.Diagnostics + + // Save the downstream functions from needing to deal with these broken situations. + // No real callers should rely on these, but we have a bunch of old and + // sloppy tests that don't always populate arguments properly. + if config == nil { + config = configs.NewEmptyConfig() + } + if prevRunState == nil { + prevRunState = states.NewState() + } + if opts == nil { + opts = &PlanOpts{ + Mode: plans.NormalMode, + } + } + + moreDiags := c.checkConfigDependencies(config) + diags = diags.Append(moreDiags) + // If required dependencies are not available then we'll bail early since + // otherwise we're likely to just see a bunch of other errors related to + // incompatibilities, which could be overwhelming for the user. + if diags.HasErrors() { + return nil, diags + } + + switch opts.Mode { + case plans.NormalMode, plans.DestroyMode: + // OK + case plans.RefreshOnlyMode: + if opts.SkipRefresh { + // The CLI layer (and other similar callers) should prevent this + // combination of options. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Incompatible plan options", + "Cannot skip refreshing in refresh-only mode. This is a bug in OpenTofu.", + )) + return nil, diags + } + default: + // The CLI layer (and other similar callers) should not try to + // create a context for a mode that OpenTofu Core doesn't support. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unsupported plan mode", + fmt.Sprintf("OpenTofu Core doesn't know how to handle plan mode %s. This is a bug in OpenTofu.", opts.Mode), + )) + return nil, diags + } + if len(opts.ForceReplace) > 0 && opts.Mode != plans.NormalMode { + // The other modes don't generate no-op or update actions that we might + // upgrade to be "replace", so doesn't make sense to combine those. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unsupported plan mode", + "Forcing resource instance replacement (with -replace=...) is allowed only in normal planning mode.", + )) + return nil, diags + } + + // By the time we get here, we should have values defined for all of + // the root module variables, even if some of them are "unknown". It's the + // caller's responsibility to have already handled the decoding of these + // from the various ways the CLI allows them to be set and to produce + // user-friendly error messages if they are not all present, and so + // the error message from checkInputVariables should never be seen and + // includes language asking the user to report a bug. + varDiags := checkInputVariables(config.Module.Variables, opts.SetVariables) + diags = diags.Append(varDiags) + + if len(opts.Targets) > 0 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + "Resource targeting is in effect", + `You are creating a plan with the -target option, which means that the result of this plan may not represent all of the changes requested by the current configuration. + +The -target option is not for routine use, and is provided only for exceptional situations such as recovering from errors or mistakes, or when OpenTofu specifically suggests to use it as part of an error message.`, + )) + } + + var plan *plans.Plan + var planDiags tfdiags.Diagnostics + switch opts.Mode { + case plans.NormalMode: + plan, planDiags = c.plan(config, prevRunState, opts) + case plans.DestroyMode: + plan, planDiags = c.destroyPlan(config, prevRunState, opts) + case plans.RefreshOnlyMode: + plan, planDiags = c.refreshOnlyPlan(config, prevRunState, opts) + default: + panic(fmt.Sprintf("unsupported plan mode %s", opts.Mode)) + } + diags = diags.Append(planDiags) + // NOTE: We're intentionally not returning early when diags.HasErrors + // here because we'll still populate other metadata below on a best-effort + // basis to try to give the UI some extra context to return alongside the + // error messages. + + // convert the variables into the format expected for the plan + varVals := make(map[string]plans.DynamicValue, len(opts.SetVariables)) + for k, iv := range opts.SetVariables { + if iv.Value == cty.NilVal { + continue // We only record values that the caller actually set + } + + // We use cty.DynamicPseudoType here so that we'll save both the + // value _and_ its dynamic type in the plan, so we can recover + // exactly the same value later. + dv, err := plans.NewDynamicValue(iv.Value, cty.DynamicPseudoType) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to prepare variable value for plan", + fmt.Sprintf("The value for variable %q could not be serialized to store in the plan: %s.", k, err), + )) + continue + } + varVals[k] = dv + } + + // insert the run-specific data from the context into the plan; variables, + // targets and provider SHAs. + if plan != nil { + plan.VariableValues = varVals + plan.TargetAddrs = opts.Targets + } else if !diags.HasErrors() { + panic("nil plan but no errors") + } + + if plan != nil { + relevantAttrs, rDiags := c.relevantResourceAttrsForPlan(config, plan) + diags = diags.Append(rDiags) + plan.RelevantAttributes = relevantAttrs + } + + if diags.HasErrors() { + // We can't proceed further with an invalid plan, because an invalid + // plan isn't applyable by definition. + if plan != nil { + // We'll explicitly mark our plan as errored so that it can't + // be accidentally applied even though it's incomplete. + plan.Errored = true + } + return plan, diags + } + + diags = diags.Append(c.checkApplyGraph(plan, config)) + + return plan, diags +} + +// checkApplyGraph builds the apply graph out of the current plan to +// check for any errors that may arise once the planned changes are added to +// the graph. This allows tofu to report errors (mostly cycles) during +// plan that would otherwise only crop up during apply +func (c *Context) checkApplyGraph(plan *plans.Plan, config *configs.Config) tfdiags.Diagnostics { + if plan.Changes.Empty() { + log.Println("[DEBUG] no planned changes, skipping apply graph check") + return nil + } + log.Println("[DEBUG] building apply graph to check for errors") + _, _, diags := c.applyGraph(plan, config, true) + return diags +} + +var DefaultPlanOpts = &PlanOpts{ + Mode: plans.NormalMode, +} + +// SimplePlanOpts is a constructor to help with creating "simple" values of +// PlanOpts which only specify a mode and input variables. +// +// This helper function is primarily intended for use in straightforward +// tests that don't need any of the more "esoteric" planning options. For +// handling real user requests to run OpenTofu, it'd probably be better +// to construct a *PlanOpts value directly and provide a way for the user +// to set values for all of its fields. +// +// The "mode" and "setVariables" arguments become the values of the "Mode" +// and "SetVariables" fields in the result. Refer to the PlanOpts type +// documentation to learn about the meanings of those fields. +func SimplePlanOpts(mode plans.Mode, setVariables InputValues) *PlanOpts { + return &PlanOpts{ + Mode: mode, + SetVariables: setVariables, + } +} + +func (c *Context) plan(config *configs.Config, prevRunState *states.State, opts *PlanOpts) (*plans.Plan, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + if opts.Mode != plans.NormalMode { + panic(fmt.Sprintf("called Context.plan with %s", opts.Mode)) + } + + opts.ImportTargets = c.findImportTargets(config) + importTargetDiags := c.validateImportTargets(config, opts.ImportTargets, opts.GenerateConfigPath) + diags = diags.Append(importTargetDiags) + if diags.HasErrors() { + return nil, diags + } + + var endpointsToRemoveDiags tfdiags.Diagnostics + opts.EndpointsToRemove, endpointsToRemoveDiags = refactoring.GetEndpointsToRemove(config) + diags = diags.Append(endpointsToRemoveDiags) + + if diags.HasErrors() { + return nil, diags + } + + plan, walkDiags := c.planWalk(config, prevRunState, opts) + diags = diags.Append(walkDiags) + + return plan, diags +} + +func (c *Context) refreshOnlyPlan(config *configs.Config, prevRunState *states.State, opts *PlanOpts) (*plans.Plan, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + if opts.Mode != plans.RefreshOnlyMode { + panic(fmt.Sprintf("called Context.refreshOnlyPlan with %s", opts.Mode)) + } + + plan, walkDiags := c.planWalk(config, prevRunState, opts) + diags = diags.Append(walkDiags) + if diags.HasErrors() { + // Non-nil plan along with errors indicates a non-applyable partial + // plan that's only suitable to be shown to the user as extra context + // to help understand the errors. + return plan, diags + } + + // If the graph builder and graph nodes correctly obeyed our directive + // to refresh only, the set of resource changes should always be empty. + // We'll safety-check that here so we can return a clear message about it, + // rather than probably just generating confusing output at the UI layer. + if len(plan.Changes.Resources) != 0 { + // Some extra context in the logs in case the user reports this message + // as a bug, as a starting point for debugging. + for _, rc := range plan.Changes.Resources { + if depKey := rc.DeposedKey; depKey == states.NotDeposed { + log.Printf("[DEBUG] Refresh-only plan includes %s change for %s", rc.Action, rc.Addr) + } else { + log.Printf("[DEBUG] Refresh-only plan includes %s change for %s deposed object %s", rc.Action, rc.Addr, depKey) + } + } + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid refresh-only plan", + "OpenTofu generated planned resource changes in a refresh-only plan. This is a bug in OpenTofu.", + )) + } + + // We don't populate RelevantResources for a refresh-only plan, because + // they never have any planned actions and so no resource can ever be + // "relevant" per the intended meaning of that field. + + return plan, diags +} + +func (c *Context) destroyPlan(config *configs.Config, prevRunState *states.State, opts *PlanOpts) (*plans.Plan, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + if opts.Mode != plans.DestroyMode { + panic(fmt.Sprintf("called Context.destroyPlan with %s", opts.Mode)) + } + + priorState := prevRunState + + // A destroy plan starts by running Refresh to read any pending data + // sources, and remove missing managed resources. This is required because + // a "destroy plan" is only creating delete changes, and is essentially a + // local operation. + // + // NOTE: if skipRefresh _is_ set then we'll rely on the destroy-plan walk + // below to upgrade the prevRunState and priorState both to the latest + // resource type schemas, so NodePlanDestroyableResourceInstance.Execute + // must coordinate with this by taking that action only when c.skipRefresh + // _is_ set. This coupling between the two is unfortunate but necessary + // to work within our current structure. + if !opts.SkipRefresh && !prevRunState.Empty() { + log.Printf("[TRACE] Context.destroyPlan: calling Context.plan to get the effect of refreshing the prior state") + refreshOpts := *opts + refreshOpts.Mode = plans.NormalMode + refreshOpts.PreDestroyRefresh = true + + // FIXME: A normal plan is required here to refresh the state, because + // the state and configuration may not match during a destroy, and a + // normal refresh plan can fail with evaluation errors. In the future + // the destroy plan should take care of refreshing instances itself, + // where the special cases of evaluation and skipping condition checks + // can be done. + refreshPlan, refreshDiags := c.plan(config, prevRunState, &refreshOpts) + if refreshDiags.HasErrors() { + // NOTE: Normally we'd append diagnostics regardless of whether + // there are errors, just in case there are warnings we'd want to + // preserve, but we're intentionally _not_ doing that here because + // if the first plan succeeded then we'll be running another plan + // in DestroyMode below, and we don't want to double-up any + // warnings that both plan walks would generate. + // (This does mean we won't show any warnings that would've been + // unique to only this walk, but we're assuming here that if the + // warnings aren't also applicable to a destroy plan then we'd + // rather not show them here, because this non-destroy plan for + // refreshing is largely an implementation detail.) + diags = diags.Append(refreshDiags) + return nil, diags + } + + // We'll use the refreshed state -- which is the "prior state" from + // the perspective of this "destroy plan" -- as the starting state + // for our destroy-plan walk, so it can take into account if we + // detected during refreshing that anything was already deleted outside OpenTofu. + priorState = refreshPlan.PriorState.DeepCopy() + + // The refresh plan may have upgraded state for some resources, make + // sure we store the new version. + prevRunState = refreshPlan.PrevRunState.DeepCopy() + log.Printf("[TRACE] Context.destroyPlan: now _really_ creating a destroy plan") + } + + destroyPlan, walkDiags := c.planWalk(config, priorState, opts) + diags = diags.Append(walkDiags) + if walkDiags.HasErrors() { + // Non-nil plan along with errors indicates a non-applyable partial + // plan that's only suitable to be shown to the user as extra context + // to help understand the errors. + return destroyPlan, diags + } + + if !opts.SkipRefresh { + // If we didn't skip refreshing then we want the previous run state to + // be the one we originally fed into the c.refreshOnlyPlan call above, + // not the refreshed version we used for the destroy planWalk. + destroyPlan.PrevRunState = prevRunState + } + + relevantAttrs, rDiags := c.relevantResourceAttrsForPlan(config, destroyPlan) + diags = diags.Append(rDiags) + + destroyPlan.RelevantAttributes = relevantAttrs + return destroyPlan, diags +} + +func (c *Context) prePlanFindAndApplyMoves(config *configs.Config, prevRunState *states.State, targets []addrs.Targetable) ([]refactoring.MoveStatement, refactoring.MoveResults) { + explicitMoveStmts := refactoring.FindMoveStatements(config) + implicitMoveStmts := refactoring.ImpliedMoveStatements(config, prevRunState, explicitMoveStmts) + var moveStmts []refactoring.MoveStatement + if stmtsLen := len(explicitMoveStmts) + len(implicitMoveStmts); stmtsLen > 0 { + moveStmts = make([]refactoring.MoveStatement, 0, stmtsLen) + moveStmts = append(moveStmts, explicitMoveStmts...) + moveStmts = append(moveStmts, implicitMoveStmts...) + } + moveResults := refactoring.ApplyMoves(moveStmts, prevRunState) + return moveStmts, moveResults +} + +func (c *Context) prePlanVerifyTargetedMoves(moveResults refactoring.MoveResults, targets []addrs.Targetable) tfdiags.Diagnostics { + if len(targets) < 1 { + return nil // the following only matters when targeting + } + + var diags tfdiags.Diagnostics + + var excluded []addrs.AbsResourceInstance + for _, result := range moveResults.Changes.Values() { + fromMatchesTarget := false + toMatchesTarget := false + for _, targetAddr := range targets { + if targetAddr.TargetContains(result.From) { + fromMatchesTarget = true + } + if targetAddr.TargetContains(result.To) { + toMatchesTarget = true + } + } + if !fromMatchesTarget { + excluded = append(excluded, result.From) + } + if !toMatchesTarget { + excluded = append(excluded, result.To) + } + } + if len(excluded) > 0 { + sort.Slice(excluded, func(i, j int) bool { + return excluded[i].Less(excluded[j]) + }) + + var listBuf strings.Builder + var prevResourceAddr addrs.AbsResource + for _, instAddr := range excluded { + // Targeting generally ends up selecting whole resources rather + // than individual instances, because we don't factor in + // individual instances until DynamicExpand, so we're going to + // always show whole resource addresses here, excluding any + // instance keys. (This also neatly avoids dealing with the + // different quoting styles required for string instance keys + // on different shells, which is handy.) + // + // To avoid showing duplicates when we have multiple instances + // of the same resource, we'll remember the most recent + // resource we rendered in prevResource, which is sufficient + // because we sorted the list of instance addresses above, and + // our sort order always groups together instances of the same + // resource. + resourceAddr := instAddr.ContainingResource() + if resourceAddr.Equal(prevResourceAddr) { + continue + } + fmt.Fprintf(&listBuf, "\n -target=%q", resourceAddr.String()) + prevResourceAddr = resourceAddr + } + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Moved resource instances excluded by targeting", + fmt.Sprintf( + "Resource instances in your current state have moved to new addresses in the latest configuration. OpenTofu must include those resource instances while planning in order to ensure a correct result, but your -target=... options do not fully cover all of those resource instances.\n\nTo create a valid plan, either remove your -target=... options altogether or add the following additional target options:%s\n\nNote that adding these options may include further additional resource instances in your plan, in order to respect object dependencies.", + listBuf.String(), + ), + )) + } + + return diags +} + +func (c *Context) postPlanValidateMoves(config *configs.Config, stmts []refactoring.MoveStatement, allInsts instances.Set) tfdiags.Diagnostics { + return refactoring.ValidateMoves(stmts, config, allInsts) +} + +// All import target addresses with a key must already exist in config. +// When we are able to generate config for expanded resources, this rule can be +// relaxed. +func (c *Context) postPlanValidateImports(importResolver *ImportResolver, allInst instances.Set) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + for _, importTarget := range importResolver.GetAllImports() { + if !allInst.HasResourceInstance(importTarget.Addr) { + diags = diags.Append(importResourceWithoutConfigDiags(importTarget.Addr.String(), nil)) + } + } + return diags +} + +// findImportTargets builds a list of import targets by going over the import +// blocks in the config. +func (c *Context) findImportTargets(config *configs.Config) []*ImportTarget { + var importTargets []*ImportTarget + for _, ic := range config.Module.Import { + importTargets = append(importTargets, &ImportTarget{ + Config: ic, + }) + } + return importTargets +} + +// validateImportTargets makes sure all import targets are not breaking the following rules: +// 1. Imports are attempted into resources that do not exist (if config generation is not enabled). +// 2. Config generation is not attempted for resources inside sub-modules +// 3. Config generation is not attempted for resources with indexes (for_each/count) - This will always include +// resources for which we could not yet resolve the address +func (c *Context) validateImportTargets(config *configs.Config, importTargets []*ImportTarget, generateConfigPath string) (diags tfdiags.Diagnostics) { + configGeneration := len(generateConfigPath) > 0 + for _, imp := range importTargets { + staticAddress := imp.StaticAddr() + descendantConfig := config.Descendent(staticAddress.Module) + + // If import target's module does not exist + if descendantConfig == nil { + if configGeneration { + // Attempted config generation for resource in non-existing module. So error because resource generation + // is not allowed in a sub-module + diags = diags.Append(importConfigGenerationInModuleDiags(staticAddress.String(), imp.Config)) + } else { + diags = diags.Append(importResourceWithoutConfigDiags(staticAddress.String(), imp.Config)) + } + continue + } + + if _, exists := descendantConfig.Module.ManagedResources[staticAddress.Resource.String()]; !exists { + if configGeneration { + if imp.ResolvedAddr() == nil { + // If we could not resolve the address of the import target, the address must have contained indexes + diags = diags.Append(importConfigGenerationWithIndexDiags(staticAddress.String(), imp.Config)) + continue + } else if !imp.ResolvedAddr().Module.IsRoot() { + diags = diags.Append(importConfigGenerationInModuleDiags(imp.ResolvedAddr().String(), imp.Config)) + continue + } else if imp.ResolvedAddr().Resource.Key != addrs.NoKey { + diags = diags.Append(importConfigGenerationWithIndexDiags(imp.ResolvedAddr().String(), imp.Config)) + continue + } + } else { + diags = diags.Append(importResourceWithoutConfigDiags(staticAddress.String(), imp.Config)) + continue + } + } + } + return +} + +func importConfigGenerationInModuleDiags(addressStr string, config *configs.Import) *hcl.Diagnostic { + diag := hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Cannot generate configuration for resource inside sub-module", + Detail: fmt.Sprintf("The configuration for the given import %s does not exist. Configuration generation is only possible for resources in the root module, and not possible for resources in sub-modules.", addressStr), + } + + if config != nil { + diag.Subject = config.DeclRange.Ptr() + } + + return &diag +} + +func importConfigGenerationWithIndexDiags(addressStr string, config *configs.Import) *hcl.Diagnostic { + diag := hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Configuration generation for count and for_each resources not supported", + Detail: fmt.Sprintf("The configuration for the given import %s does not exist. Configuration generation is only possible for resources that do not use count or for_each", addressStr), + } + + if config != nil { + diag.Subject = config.DeclRange.Ptr() + } + + return &diag +} + +func importResourceWithoutConfigDiags(addressStr string, config *configs.Import) *hcl.Diagnostic { + diag := hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Configuration for import target does not exist", + Detail: fmt.Sprintf("The configuration for the given import %s does not exist. All target instances must have an associated configuration to be imported.", addressStr), + } + + if config != nil { + diag.Subject = config.DeclRange.Ptr() + } + + return &diag +} + +func (c *Context) planWalk(config *configs.Config, prevRunState *states.State, opts *PlanOpts) (*plans.Plan, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + log.Printf("[DEBUG] Building and walking plan graph for %s", opts.Mode) + + prevRunState = prevRunState.DeepCopy() // don't modify the caller's object when we process the moves + moveStmts, moveResults := c.prePlanFindAndApplyMoves(config, prevRunState, opts.Targets) + + // If resource targeting is in effect then it might conflict with the + // move result. + diags = diags.Append(c.prePlanVerifyTargetedMoves(moveResults, opts.Targets)) + if diags.HasErrors() { + // We'll return early here, because if we have any moved resource + // instances excluded by targeting then planning is likely to encounter + // strange problems that may lead to confusing error messages. + return nil, diags + } + + graph, walkOp, moreDiags := c.planGraph(config, prevRunState, opts) + diags = diags.Append(moreDiags) + if diags.HasErrors() { + return nil, diags + } + + timestamp := time.Now().UTC() + + // If we get here then we should definitely have a non-nil "graph", which + // we can now walk. + changes := plans.NewChanges() + walker, walkDiags := c.walk(graph, walkOp, &graphWalkOpts{ + Config: config, + InputState: prevRunState, + Changes: changes, + MoveResults: moveResults, + PlanTimeTimestamp: timestamp, + }) + diags = diags.Append(walker.NonFatalDiagnostics) + diags = diags.Append(walkDiags) + + allInsts := walker.InstanceExpander.AllInstances() + + importValidateDiags := c.postPlanValidateImports(walker.ImportResolver, allInsts) + if importValidateDiags.HasErrors() { + return nil, importValidateDiags + } + + moveValidateDiags := c.postPlanValidateMoves(config, moveStmts, allInsts) + if moveValidateDiags.HasErrors() { + // If any of the move statements are invalid then those errors take + // precedence over any other errors because an incomplete move graph + // is quite likely to be the _cause_ of various errors. This oddity + // comes from the fact that we need to apply the moves before we + // actually validate them, because validation depends on the result + // of first trying to plan. + return nil, moveValidateDiags + } + diags = diags.Append(moveValidateDiags) // might just contain warnings + + if moveResults.Blocked.Len() > 0 && !diags.HasErrors() { + // If we had blocked moves and we're not going to be returning errors + // then we'll report the blockers as a warning. We do this only in the + // absense of errors because invalid move statements might well be + // the root cause of the blockers, and so better to give an actionable + // error message than a less-actionable warning. + diags = diags.Append(blockedMovesWarningDiag(moveResults)) + } + + // If we reach this point with error diagnostics then "changes" is a + // representation of the subset of changes we were able to plan before + // we encountered errors, which we'll return as part of a non-nil plan + // so that e.g. the UI can show what was planned so far in case that extra + // context helps the user to understand the error messages we're returning. + prevRunState = walker.PrevRunState.Close() + + // The refreshed state may have data resource objects which were deferred + // to apply and cannot be serialized. + walker.RefreshState.RemovePlannedResourceInstanceObjects() + priorState := walker.RefreshState.Close() + + driftedResources, driftDiags := c.driftedResources(config, prevRunState, priorState, moveResults) + diags = diags.Append(driftDiags) + + plan := &plans.Plan{ + UIMode: opts.Mode, + Changes: changes, + DriftedResources: driftedResources, + PrevRunState: prevRunState, + PriorState: priorState, + PlannedState: walker.State.Close(), + ExternalReferences: opts.ExternalReferences, + Checks: states.NewCheckResults(walker.Checks), + Timestamp: timestamp, + + // Other fields get populated by Context.Plan after we return + } + return plan, diags +} + +func (c *Context) planGraph(config *configs.Config, prevRunState *states.State, opts *PlanOpts) (*Graph, walkOperation, tfdiags.Diagnostics) { + switch mode := opts.Mode; mode { + case plans.NormalMode: + graph, diags := (&PlanGraphBuilder{ + Config: config, + State: prevRunState, + RootVariableValues: opts.SetVariables, + Plugins: c.plugins, + Targets: opts.Targets, + ForceReplace: opts.ForceReplace, + skipRefresh: opts.SkipRefresh, + preDestroyRefresh: opts.PreDestroyRefresh, + Operation: walkPlan, + ExternalReferences: opts.ExternalReferences, + ImportTargets: opts.ImportTargets, + GenerateConfigPath: opts.GenerateConfigPath, + EndpointsToRemove: opts.EndpointsToRemove, + }).Build(addrs.RootModuleInstance) + return graph, walkPlan, diags + case plans.RefreshOnlyMode: + graph, diags := (&PlanGraphBuilder{ + Config: config, + State: prevRunState, + RootVariableValues: opts.SetVariables, + Plugins: c.plugins, + Targets: opts.Targets, + skipRefresh: opts.SkipRefresh, + skipPlanChanges: true, // this activates "refresh only" mode. + Operation: walkPlan, + ExternalReferences: opts.ExternalReferences, + }).Build(addrs.RootModuleInstance) + return graph, walkPlan, diags + case plans.DestroyMode: + graph, diags := (&PlanGraphBuilder{ + Config: config, + State: prevRunState, + RootVariableValues: opts.SetVariables, + Plugins: c.plugins, + Targets: opts.Targets, + skipRefresh: opts.SkipRefresh, + Operation: walkPlanDestroy, + }).Build(addrs.RootModuleInstance) + return graph, walkPlanDestroy, diags + default: + // The above should cover all plans.Mode values + panic(fmt.Sprintf("unsupported plan mode %s", mode)) + } +} + +// driftedResources is a best-effort attempt to compare the current and prior +// state. If we cannot decode the prior state for some reason, this should only +// return warnings to help the user correlate any missing resources in the +// report. This is known to happen when targeting a subset of resources, +// because the excluded instances will have been removed from the plan and +// not upgraded. +func (c *Context) driftedResources(config *configs.Config, oldState, newState *states.State, moves refactoring.MoveResults) ([]*plans.ResourceInstanceChangeSrc, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + if newState.ManagedResourcesEqual(oldState) && moves.Changes.Len() == 0 { + // Nothing to do, because we only detect and report drift for managed + // resource instances. + return nil, diags + } + + schemas, schemaDiags := c.Schemas(config, newState) + diags = diags.Append(schemaDiags) + if diags.HasErrors() { + return nil, diags + } + + var drs []*plans.ResourceInstanceChangeSrc + + for _, ms := range oldState.Modules { + for _, rs := range ms.Resources { + if rs.Addr.Resource.Mode != addrs.ManagedResourceMode { + // Drift reporting is only for managed resources + continue + } + + provider := rs.ProviderConfig.Provider + for key, oldIS := range rs.Instances { + if oldIS.Current == nil { + // Not interested in instances that only have deposed objects + continue + } + addr := rs.Addr.Instance(key) + + // Previous run address defaults to the current address, but + // can differ if the resource moved before refreshing + prevRunAddr := addr + if move, ok := moves.Changes.GetOk(addr); ok { + prevRunAddr = move.From + } + + newIS := newState.ResourceInstance(addr) + + schema, _ := schemas.ResourceTypeConfig( + provider, + addr.Resource.Resource.Mode, + addr.Resource.Resource.Type, + ) + if schema == nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + "Missing resource schema from provider", + fmt.Sprintf("No resource schema found for %s when decoding prior state", addr.Resource.Resource.Type), + )) + continue + } + ty := schema.ImpliedType() + + oldObj, err := oldIS.Current.Decode(ty) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + "Failed to decode resource from state", + fmt.Sprintf("Error decoding %q from prior state: %s", addr.String(), err), + )) + continue + } + + var newObj *states.ResourceInstanceObject + if newIS != nil && newIS.Current != nil { + newObj, err = newIS.Current.Decode(ty) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + "Failed to decode resource from state", + fmt.Sprintf("Error decoding %q from prior state: %s", addr.String(), err), + )) + continue + } + } + + var oldVal, newVal cty.Value + oldVal = oldObj.Value + if newObj != nil { + newVal = newObj.Value + } else { + newVal = cty.NullVal(ty) + } + + if oldVal.RawEquals(newVal) && addr.Equal(prevRunAddr) { + // No drift if the two values are semantically equivalent + // and no move has happened + continue + } + + // We can detect three types of changes after refreshing state, + // only two of which are easily understood as "drift": + // + // - Resources which were deleted outside OpenTofu; + // - Resources where the object value has changed outside OpenTofu; + // - Resources which have been moved without other changes. + // + // All of these are returned as drift, to allow refresh-only plans + // to present a full set of changes which will be applied. + var action plans.Action + switch { + case newVal.IsNull(): + action = plans.Delete + case !oldVal.RawEquals(newVal): + action = plans.Update + default: + action = plans.NoOp + } + + change := &plans.ResourceInstanceChange{ + Addr: addr, + PrevRunAddr: prevRunAddr, + ProviderAddr: rs.ProviderConfig, + Change: plans.Change{ + Action: action, + Before: oldVal, + After: newVal, + }, + } + + changeSrc, err := change.Encode(ty) + if err != nil { + diags = diags.Append(err) + return nil, diags + } + + drs = append(drs, changeSrc) + } + } + } + + return drs, diags +} + +// PlanGraphForUI is a last vestage of graphs in the public interface of Context +// (as opposed to graphs as an implementation detail) intended only for use +// by the "tofu graph" command when asked to render a plan-time graph. +// +// The result of this is intended only for rendering to the user as a dot +// graph, and so may change in future in order to make the result more useful +// in that context, even if drifts away from the physical graph that OpenTofu +// Core currently uses as an implementation detail of planning. +func (c *Context) PlanGraphForUI(config *configs.Config, prevRunState *states.State, mode plans.Mode) (*Graph, tfdiags.Diagnostics) { + // For now though, this really is just the internal graph, confusing + // implementation details and all. + + var diags tfdiags.Diagnostics + + opts := &PlanOpts{Mode: mode} + + graph, _, moreDiags := c.planGraph(config, prevRunState, opts) + diags = diags.Append(moreDiags) + return graph, diags +} + +func blockedMovesWarningDiag(results refactoring.MoveResults) tfdiags.Diagnostic { + if results.Blocked.Len() < 1 { + // Caller should check first + panic("request to render blocked moves warning without any blocked moves") + } + + var itemsBuf bytes.Buffer + for _, blocked := range results.Blocked.Values() { + fmt.Fprintf(&itemsBuf, "\n - %s could not move to %s", blocked.Actual, blocked.Wanted) + } + + return tfdiags.Sourceless( + tfdiags.Warning, + "Unresolved resource instance address changes", + fmt.Sprintf( + "OpenTofu tried to adjust resource instance addresses in the prior state based on change information recorded in the configuration, but some adjustments did not succeed due to existing objects already at the intended addresses:%s\n\nOpenTofu has planned to destroy these objects. If OpenTofu's proposed changes aren't appropriate, you must first resolve the conflicts using the \"tofu state\" subcommands and then create a new plan.", + itemsBuf.String(), + ), + ) +} + +// referenceAnalyzer returns a globalref.Analyzer object to help with +// global analysis of references within the configuration that's attached +// to the receiving context. +func (c *Context) referenceAnalyzer(config *configs.Config, state *states.State) (*globalref.Analyzer, tfdiags.Diagnostics) { + schemas, diags := c.Schemas(config, state) + if diags.HasErrors() { + return nil, diags + } + return globalref.NewAnalyzer(config, schemas.Providers), diags +} + +// relevantResourcesForPlan implements the heuristic we use to populate the +// RelevantResources field of returned plans. +func (c *Context) relevantResourceAttrsForPlan(config *configs.Config, plan *plans.Plan) ([]globalref.ResourceAttr, tfdiags.Diagnostics) { + azr, diags := c.referenceAnalyzer(config, plan.PriorState) + if diags.HasErrors() { + return nil, diags + } + + var refs []globalref.Reference + for _, change := range plan.Changes.Resources { + if change.Action == plans.NoOp { + continue + } + + moreRefs := azr.ReferencesFromResourceInstance(change.Addr) + refs = append(refs, moreRefs...) + } + + for _, change := range plan.Changes.Outputs { + if change.Action == plans.NoOp { + continue + } + + moreRefs := azr.ReferencesFromOutputValue(change.Addr) + refs = append(refs, moreRefs...) + } + + var contributors []globalref.ResourceAttr + + for _, ref := range azr.ContributingResourceReferences(refs...) { + if res, ok := ref.ResourceAttr(); ok { + contributors = append(contributors, res) + } + } + + return contributors, diags +} diff --git a/pkg/tofu/context_plan2_test.go b/pkg/tofu/context_plan2_test.go new file mode 100644 index 00000000000..a76fb409f82 --- /dev/null +++ b/pkg/tofu/context_plan2_test.go @@ -0,0 +1,7717 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "bytes" + "errors" + "fmt" + "strconv" + "strings" + "sync" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/google/go-cmp/cmp" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/hcl/v2" + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/checks" + + // "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +func TestContext2Plan_removedDuringRefresh(t *testing.T) { + // This tests the situation where an object tracked in the previous run + // state has been deleted outside OpenTofu, which we should detect + // during the refresh step and thus ultimately produce a plan to recreate + // the object, since it's still present in the configuration. + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_object" "a" { +} +`, + }) + + p := simpleMockProvider() + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{Block: simpleTestSchema()}, + ResourceTypes: map[string]providers.Schema{ + "test_object": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "arg": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + p.ReadResourceFn = func(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { + resp.NewState = cty.NullVal(req.PriorState.Type()) + return resp + } + p.UpgradeResourceStateFn = func(req providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) { + // We should've been given the prior state JSON as our input to upgrade. + if !bytes.Contains(req.RawStateJSON, []byte("previous_run")) { + t.Fatalf("UpgradeResourceState request doesn't contain the previous run object\n%s", req.RawStateJSON) + } + + // We'll put something different in "arg" as part of upgrading, just + // so that we can verify below that PrevRunState contains the upgraded + // (but NOT refreshed) version of the object. + resp.UpgradedState = cty.ObjectVal(map[string]cty.Value{ + "arg": cty.StringVal("upgraded"), + }) + return resp + } + + addr := mustResourceInstanceAddr("test_object.a") + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent(addr, &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"arg":"previous_run"}`), + Status: states.ObjectTainted, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`)) + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + assertNoErrors(t, diags) + + if !p.UpgradeResourceStateCalled { + t.Errorf("Provider's UpgradeResourceState wasn't called; should've been") + } + if !p.ReadResourceCalled { + t.Errorf("Provider's ReadResource wasn't called; should've been") + } + + // The object should be absent from the plan's prior state, because that + // records the result of refreshing. + if got := plan.PriorState.ResourceInstance(addr); got != nil { + t.Errorf( + "instance %s is in the prior state after planning; should've been removed\n%s", + addr, spew.Sdump(got), + ) + } + + // However, the object should still be in the PrevRunState, because + // that reflects what we believed to exist before refreshing. + if got := plan.PrevRunState.ResourceInstance(addr); got == nil { + t.Errorf( + "instance %s is missing from the previous run state after planning; should've been preserved", + addr, + ) + } else { + if !bytes.Contains(got.Current.AttrsJSON, []byte("upgraded")) { + t.Fatalf("previous run state has non-upgraded object\n%s", got.Current.AttrsJSON) + } + } + + // This situation should result in a drifted resource change. + var drifted *plans.ResourceInstanceChangeSrc + for _, dr := range plan.DriftedResources { + if dr.Addr.Equal(addr) { + drifted = dr + break + } + } + + if drifted == nil { + t.Errorf("instance %s is missing from the drifted resource changes", addr) + } else { + if got, want := drifted.Action, plans.Delete; got != want { + t.Errorf("unexpected instance %s drifted resource change action. got: %s, want: %s", addr, got, want) + } + } + + // Because the configuration still mentions test_object.a, we should've + // planned to recreate it in order to fix the drift. + for _, c := range plan.Changes.Resources { + if c.Action != plans.Create { + t.Fatalf("expected Create action for missing %s, got %s", c.Addr, c.Action) + } + } +} + +func TestContext2Plan_noChangeDataSourceSensitiveNestedSet(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +variable "bar" { + sensitive = true + default = "baz" +} + +data "test_data_source" "foo" { + foo { + bar = var.bar + } +} +`, + }) + + p := new(MockProvider) + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + DataSources: map[string]*configschema.Block{ + "test_data_source": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "foo": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "bar": {Type: cty.String, Optional: true}, + }, + }, + Nesting: configschema.NestingSet, + }, + }, + }, + }, + }) + + p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("data_id"), + "foo": cty.SetVal([]cty.Value{cty.ObjectVal(map[string]cty.Value{"bar": cty.StringVal("baz")})}), + }), + } + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("data.test_data_source.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"data_id", "foo":[{"bar":"baz"}]}`), + AttrSensitivePaths: []cty.PathValueMarks{ + { + Path: cty.GetAttrPath("foo"), + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) + assertNoErrors(t, diags) + + for _, res := range plan.Changes.Resources { + if res.Action != plans.NoOp { + t.Fatalf("expected NoOp, got: %q %s", res.Addr, res.Action) + } + } +} + +func TestContext2Plan_orphanDataInstance(t *testing.T) { + // ensure the planned replacement of the data source is evaluated properly + m := testModuleInline(t, map[string]string{ + "main.tf": ` +data "test_object" "a" { + for_each = { new = "ok" } +} + +output "out" { + value = [ for k, _ in data.test_object.a: k ] +} +`, + }) + + p := simpleMockProvider() + p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { + resp.State = req.Config + return resp + } + + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent(mustResourceInstanceAddr(`data.test_object.a["old"]`), &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"test_string":"foo"}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`)) + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + assertNoErrors(t, diags) + + change, err := plan.Changes.Outputs[0].Decode() + if err != nil { + t.Fatal(err) + } + + expected := cty.TupleVal([]cty.Value{cty.StringVal("new")}) + + if change.After.Equals(expected).False() { + t.Fatalf("expected %#v, got %#v\n", expected, change.After) + } +} + +func TestContext2Plan_basicConfigurationAliases(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +provider "test" { + alias = "z" + test_string = "config" +} + +module "mod" { + source = "./mod" + providers = { + test.x = test.z + } +} +`, + + "mod/main.tf": ` +terraform { + required_providers { + test = { + source = "registry.opentofu.org/hashicorp/test" + configuration_aliases = [ test.x ] + } + } +} + +resource "test_object" "a" { + provider = test.x +} + +`, + }) + + p := simpleMockProvider() + + // The resource within the module should be using the provider configured + // from the root module. We should never see an empty configuration. + p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { + if req.Config.GetAttr("test_string").IsNull() { + resp.Diagnostics = resp.Diagnostics.Append(errors.New("missing test_string value")) + } + return resp + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) +} + +func TestContext2Plan_dataReferencesResourceInModules(t *testing.T) { + p := testProvider("test") + p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { + cfg := req.Config.AsValueMap() + cfg["id"] = cty.StringVal("d") + resp.State = cty.ObjectVal(cfg) + return resp + } + + m := testModuleInline(t, map[string]string{ + "main.tf": ` +locals { + things = { + old = "first" + new = "second" + } +} + +module "mod" { + source = "./mod" + for_each = local.things +} +`, + + "./mod/main.tf": ` +resource "test_resource" "a" { +} + +data "test_data_source" "d" { + depends_on = [test_resource.a] +} + +resource "test_resource" "b" { + value = data.test_data_source.d.id +} +`}) + + oldDataAddr := mustResourceInstanceAddr(`module.mod["old"].data.test_data_source.d`) + + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + mustResourceInstanceAddr(`module.mod["old"].test_resource.a`), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"a"}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + s.SetResourceInstanceCurrent( + mustResourceInstanceAddr(`module.mod["old"].test_resource.b`), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"b","value":"d"}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + s.SetResourceInstanceCurrent( + oldDataAddr, + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"d"}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + assertNoErrors(t, diags) + + oldMod := oldDataAddr.Module + + for _, c := range plan.Changes.Resources { + // there should be no changes from the old module instance + if c.Addr.Module.Equal(oldMod) && c.Action != plans.NoOp { + t.Errorf("unexpected change %s for %s\n", c.Action, c.Addr) + } + } +} + +func TestContext2Plan_resourceChecksInExpandedModule(t *testing.T) { + // When a resource is in a nested module we have two levels of expansion + // to do: first expand the module the resource is declared in, and then + // expand the resource itself. + // + // In earlier versions of Terraform we did that expansion as two levels + // of DynamicExpand, which led to a bug where we didn't have any central + // location from which to register all of the instances of a checkable + // resource. + // + // We now handle the full expansion all in one graph node and one dynamic + // subgraph, which avoids the problem. This is a regression test for the + // earlier bug. If this test is panicking with "duplicate checkable objects + // report" then that suggests the bug is reintroduced and we're now back + // to reporting each module instance separately again, which is incorrect. + + p := testProvider("test") + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{}, + }, + ResourceTypes: map[string]providers.Schema{ + "test": { + Block: &configschema.Block{}, + }, + }, + } + p.ReadResourceFn = func(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { + resp.NewState = req.PriorState + return resp + } + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + resp.PlannedState = cty.EmptyObjectVal + return resp + } + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + resp.NewState = req.PlannedState + return resp + } + + m := testModuleInline(t, map[string]string{ + "main.tf": ` + module "child" { + source = "./child" + count = 2 # must be at least 2 for this test to be valid + } + `, + "child/child.tf": ` + locals { + a = "a" + } + + resource "test" "test1" { + lifecycle { + postcondition { + # It doesn't matter what this checks as long as it + # passes, because if we don't handle expansion properly + # then we'll crash before we even get to evaluating this. + condition = local.a == local.a + error_message = "Postcondition failed." + } + } + } + + resource "test" "test2" { + count = 2 + + lifecycle { + postcondition { + # It doesn't matter what this checks as long as it + # passes, because if we don't handle expansion properly + # then we'll crash before we even get to evaluating this. + condition = local.a == local.a + error_message = "Postcondition failed." + } + } + } + `, + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + priorState := states.NewState() + plan, diags := ctx.Plan(m, priorState, DefaultPlanOpts) + assertNoErrors(t, diags) + + resourceInsts := []addrs.AbsResourceInstance{ + mustResourceInstanceAddr("module.child[0].test.test1"), + mustResourceInstanceAddr("module.child[0].test.test2[0]"), + mustResourceInstanceAddr("module.child[0].test.test2[1]"), + mustResourceInstanceAddr("module.child[1].test.test1"), + mustResourceInstanceAddr("module.child[1].test.test2[0]"), + mustResourceInstanceAddr("module.child[1].test.test2[1]"), + } + + for _, instAddr := range resourceInsts { + t.Run(fmt.Sprintf("results for %s", instAddr), func(t *testing.T) { + if rc := plan.Changes.ResourceInstance(instAddr); rc != nil { + if got, want := rc.Action, plans.Create; got != want { + t.Errorf("wrong action for %s\ngot: %s\nwant: %s", instAddr, got, want) + } + if got, want := rc.ActionReason, plans.ResourceInstanceChangeNoReason; got != want { + t.Errorf("wrong action reason for %s\ngot: %s\nwant: %s", instAddr, got, want) + } + } else { + t.Errorf("no planned change for %s", instAddr) + } + + if checkResult := plan.Checks.GetObjectResult(instAddr); checkResult != nil { + if got, want := checkResult.Status, checks.StatusPass; got != want { + t.Errorf("wrong check status for %s\ngot: %s\nwant: %s", instAddr, got, want) + } + } else { + t.Errorf("no check result for %s", instAddr) + } + }) + } +} + +func TestContext2Plan_dataResourceChecksManagedResourceChange(t *testing.T) { + // This tests the situation where the remote system contains data that + // isn't valid per a data resource postcondition, but that the + // configuration is destined to make the remote system valid during apply + // and so we must defer reading the data resource and checking its + // conditions until the apply step. + // + // This is an exception to the rule tested in + // TestContext2Plan_dataReferencesResourceIndirectly which is relevant + // whenever there's at least one precondition or postcondition attached + // to a data resource. + // + // See TestContext2Plan_managedResourceChecksOtherManagedResourceChange for + // an incorrect situation where a data resource is used only indirectly + // to drive a precondition elsewhere, which therefore doesn't achieve this + // special exception. + + p := testProvider("test") + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{}, + }, + ResourceTypes: map[string]providers.Schema{ + "test_resource": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "valid": { + Type: cty.Bool, + Required: true, + }, + }, + }, + }, + }, + DataSources: map[string]providers.Schema{ + "test_data_source": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Required: true, + }, + "valid": { + Type: cty.Bool, + Computed: true, + }, + }, + }, + }, + }, + } + var mu sync.Mutex + validVal := cty.False + p.ReadResourceFn = func(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { + // NOTE: This assumes that the prior state declared below will have + // "valid" set to false already, and thus will match validVal above. + resp.NewState = req.PriorState + return resp + } + p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { + cfg := req.Config.AsValueMap() + mu.Lock() + cfg["valid"] = validVal + mu.Unlock() + resp.State = cty.ObjectVal(cfg) + return resp + } + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + cfg := req.Config.AsValueMap() + prior := req.PriorState.AsValueMap() + resp.PlannedState = cty.ObjectVal(map[string]cty.Value{ + "id": prior["id"], + "valid": cfg["valid"], + }) + return resp + } + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + planned := req.PlannedState.AsValueMap() + + mu.Lock() + validVal = planned["valid"] + mu.Unlock() + + resp.NewState = req.PlannedState + return resp + } + + m := testModuleInline(t, map[string]string{ + "main.tf": ` + +resource "test_resource" "a" { + valid = true +} + +locals { + # NOTE: We intentionally read through a local value here to make sure + # that this behavior still works even if there isn't a direct dependency + # between the data resource and the managed resource. + object_id = test_resource.a.id +} + +data "test_data_source" "a" { + id = local.object_id + + lifecycle { + postcondition { + condition = self.valid + error_message = "Not valid!" + } + } +} +`}) + + managedAddr := mustResourceInstanceAddr(`test_resource.a`) + dataAddr := mustResourceInstanceAddr(`data.test_data_source.a`) + + // This state is intended to represent the outcome of a previous apply that + // failed due to postcondition failure but had already updated the + // relevant object to be invalid. + // + // It could also potentially represent a similar situation where the + // previous apply succeeded but there has been a change outside of + // OpenTofu that made it invalid, although technically in that scenario + // the state data would become invalid only during the planning step. For + // our purposes here that's close enough because we don't have a real + // remote system in place anyway. + priorState := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + managedAddr, + &states.ResourceInstanceObjectSrc{ + // NOTE: "valid" is false here but is true in the configuration + // above, which is intended to represent that applying the + // configuration change would make this object become valid. + AttrsJSON: []byte(`{"id":"boop","valid":false}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, priorState, DefaultPlanOpts) + assertNoErrors(t, diags) + + if rc := plan.Changes.ResourceInstance(dataAddr); rc != nil { + if got, want := rc.Action, plans.Read; got != want { + t.Errorf("wrong action for %s\ngot: %s\nwant: %s", dataAddr, got, want) + } + if got, want := rc.ActionReason, plans.ResourceInstanceReadBecauseDependencyPending; got != want { + t.Errorf("wrong action reason for %s\ngot: %s\nwant: %s", dataAddr, got, want) + } + } else { + t.Fatalf("no planned change for %s", dataAddr) + } + + if rc := plan.Changes.ResourceInstance(managedAddr); rc != nil { + if got, want := rc.Action, plans.Update; got != want { + t.Errorf("wrong action for %s\ngot: %s\nwant: %s", managedAddr, got, want) + } + if got, want := rc.ActionReason, plans.ResourceInstanceChangeNoReason; got != want { + t.Errorf("wrong action reason for %s\ngot: %s\nwant: %s", managedAddr, got, want) + } + } else { + t.Fatalf("no planned change for %s", managedAddr) + } + + // This is primarily a plan-time test, since the special handling of + // data resources is a plan-time concern, but we'll still try applying the + // plan here just to make sure it's valid. + newState, diags := ctx.Apply(plan, m) + assertNoErrors(t, diags) + + if rs := newState.ResourceInstance(dataAddr); rs != nil { + if !rs.HasCurrent() { + t.Errorf("no final state for %s", dataAddr) + } + } else { + t.Errorf("no final state for %s", dataAddr) + } + + if rs := newState.ResourceInstance(managedAddr); rs != nil { + if !rs.HasCurrent() { + t.Errorf("no final state for %s", managedAddr) + } + } else { + t.Errorf("no final state for %s", managedAddr) + } + + if got, want := validVal, cty.True; got != want { + t.Errorf("wrong final valid value\ngot: %#v\nwant: %#v", got, want) + } + +} + +func TestContext2Plan_managedResourceChecksOtherManagedResourceChange(t *testing.T) { + // This tests the incorrect situation where a managed resource checks + // another managed resource indirectly via a data resource. + // This doesn't work because OpenTofu can't tell that the data resource + // outcome will be updated by a separate managed resource change and so + // we expect it to fail. + // This would ideally have worked except that we previously included a + // special case in the rules for data resources where they only consider + // direct dependencies when deciding whether to defer (except when the + // data resource itself has conditions) and so they can potentially + // read "too early" if the user creates the explicitly-not-recommended + // situation of a data resource and a managed resource in the same + // configuration both representing the same remote object. + + p := testProvider("test") + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{}, + }, + ResourceTypes: map[string]providers.Schema{ + "test_resource": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "valid": { + Type: cty.Bool, + Required: true, + }, + }, + }, + }, + }, + DataSources: map[string]providers.Schema{ + "test_data_source": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Required: true, + }, + "valid": { + Type: cty.Bool, + Computed: true, + }, + }, + }, + }, + }, + } + var mu sync.Mutex + validVal := cty.False + p.ReadResourceFn = func(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { + // NOTE: This assumes that the prior state declared below will have + // "valid" set to false already, and thus will match validVal above. + resp.NewState = req.PriorState + return resp + } + p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { + cfg := req.Config.AsValueMap() + if cfg["id"].AsString() == "main" { + mu.Lock() + cfg["valid"] = validVal + mu.Unlock() + } + resp.State = cty.ObjectVal(cfg) + return resp + } + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + cfg := req.Config.AsValueMap() + prior := req.PriorState.AsValueMap() + resp.PlannedState = cty.ObjectVal(map[string]cty.Value{ + "id": prior["id"], + "valid": cfg["valid"], + }) + return resp + } + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + planned := req.PlannedState.AsValueMap() + + if planned["id"].AsString() == "main" { + mu.Lock() + validVal = planned["valid"] + mu.Unlock() + } + + resp.NewState = req.PlannedState + return resp + } + + m := testModuleInline(t, map[string]string{ + "main.tf": ` + +resource "test_resource" "a" { + valid = true +} + +locals { + # NOTE: We intentionally read through a local value here because a + # direct reference from data.test_data_source.a to test_resource.a would + # cause OpenTofu to defer the data resource to the apply phase due to + # there being a pending change for the managed resource. We're explicitly + # testing the failure case where the data resource read happens too + # eagerly, which is what results from the reference being only indirect + # so OpenTofu can't "see" that the data resource result might be affected + # by changes to the managed resource. + object_id = test_resource.a.id +} + +data "test_data_source" "a" { + id = local.object_id +} + +resource "test_resource" "b" { + valid = true + + lifecycle { + precondition { + condition = data.test_data_source.a.valid + error_message = "Not valid!" + } + } +} +`}) + + managedAddrA := mustResourceInstanceAddr(`test_resource.a`) + managedAddrB := mustResourceInstanceAddr(`test_resource.b`) + + // This state is intended to represent the outcome of a previous apply that + // failed due to postcondition failure but had already updated the + // relevant object to be invalid. + // + // It could also potentially represent a similar situation where the + // previous apply succeeded but there has been a change outside of + // OpenTofu that made it invalid, although technically in that scenario + // the state data would become invalid only during the planning step. For + // our purposes here that's close enough because we don't have a real + // remote system in place anyway. + priorState := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + managedAddrA, + &states.ResourceInstanceObjectSrc{ + // NOTE: "valid" is false here but is true in the configuration + // above, which is intended to represent that applying the + // configuration change would make this object become valid. + AttrsJSON: []byte(`{"id":"main","valid":false}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + s.SetResourceInstanceCurrent( + managedAddrB, + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"checker","valid":true}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan(m, priorState, DefaultPlanOpts) + if !diags.HasErrors() { + t.Fatalf("unexpected successful plan; should've failed with non-passing precondition") + } + + if got, want := diags.Err().Error(), "Resource precondition failed: Not valid!"; !strings.Contains(got, want) { + t.Errorf("Missing expected error message\ngot: %s\nwant substring: %s", got, want) + } +} + +func TestContext2Plan_destroyWithRefresh(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_object" "a" { +} +`, + }) + + p := simpleMockProvider() + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{Block: simpleTestSchema()}, + ResourceTypes: map[string]providers.Schema{ + "test_object": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "arg": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + + // This is called from the first instance of this provider, so we can't + // check p.ReadResourceCalled after plan. + readResourceCalled := false + p.ReadResourceFn = func(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { + readResourceCalled = true + newVal, err := cty.Transform(req.PriorState, func(path cty.Path, v cty.Value) (cty.Value, error) { + if len(path) == 1 && path[0] == (cty.GetAttrStep{Name: "arg"}) { + return cty.StringVal("current"), nil + } + return v, nil + }) + if err != nil { + // shouldn't get here + t.Fatalf("ReadResourceFn transform failed") + return providers.ReadResourceResponse{} + } + return providers.ReadResourceResponse{ + NewState: newVal, + } + } + + upgradeResourceStateCalled := false + p.UpgradeResourceStateFn = func(req providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) { + upgradeResourceStateCalled = true + t.Logf("UpgradeResourceState %s", req.RawStateJSON) + + // In the destroy-with-refresh codepath we end up calling + // UpgradeResourceState twice, because we do so once during refreshing + // (as part making a normal plan) and then again during the plan-destroy + // walk. The second call recieves the result of the earlier refresh, + // so we need to tolerate both "before" and "current" as possible + // inputs here. + if !bytes.Contains(req.RawStateJSON, []byte("before")) { + if !bytes.Contains(req.RawStateJSON, []byte("current")) { + t.Fatalf("UpgradeResourceState request doesn't contain the 'before' object or the 'current' object\n%s", req.RawStateJSON) + } + } + + // We'll put something different in "arg" as part of upgrading, just + // so that we can verify below that PrevRunState contains the upgraded + // (but NOT refreshed) version of the object. + resp.UpgradedState = cty.ObjectVal(map[string]cty.Value{ + "arg": cty.StringVal("upgraded"), + }) + return resp + } + + addr := mustResourceInstanceAddr("test_object.a") + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent(addr, &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"arg":"before"}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`)) + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + SkipRefresh: false, // the default + }) + assertNoErrors(t, diags) + + if !upgradeResourceStateCalled { + t.Errorf("Provider's UpgradeResourceState wasn't called; should've been") + } + if !readResourceCalled { + t.Errorf("Provider's ReadResource wasn't called; should've been") + } + + if plan.PriorState == nil { + t.Fatal("missing plan state") + } + + for _, c := range plan.Changes.Resources { + if c.Action != plans.Delete { + t.Errorf("unexpected %s change for %s", c.Action, c.Addr) + } + } + + if instState := plan.PrevRunState.ResourceInstance(addr); instState == nil { + t.Errorf("%s has no previous run state at all after plan", addr) + } else { + if instState.Current == nil { + t.Errorf("%s has no current object in the previous run state", addr) + } else if got, want := instState.Current.AttrsJSON, `"upgraded"`; !bytes.Contains(got, []byte(want)) { + t.Errorf("%s has wrong previous run state after plan\ngot:\n%s\n\nwant substring: %s", addr, got, want) + } + } + if instState := plan.PriorState.ResourceInstance(addr); instState == nil { + t.Errorf("%s has no prior state at all after plan", addr) + } else { + if instState.Current == nil { + t.Errorf("%s has no current object in the prior state", addr) + } else if got, want := instState.Current.AttrsJSON, `"current"`; !bytes.Contains(got, []byte(want)) { + t.Errorf("%s has wrong prior state after plan\ngot:\n%s\n\nwant substring: %s", addr, got, want) + } + } +} + +func TestContext2Plan_destroySkipRefresh(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_object" "a" { +} +`, + }) + + p := simpleMockProvider() + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{Block: simpleTestSchema()}, + ResourceTypes: map[string]providers.Schema{ + "test_object": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "arg": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + p.ReadResourceFn = func(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { + t.Helper() + t.Errorf("unexpected call to ReadResource") + resp.NewState = req.PriorState + return resp + } + p.UpgradeResourceStateFn = func(req providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) { + t.Logf("UpgradeResourceState %s", req.RawStateJSON) + // We should've been given the prior state JSON as our input to upgrade. + if !bytes.Contains(req.RawStateJSON, []byte("before")) { + t.Fatalf("UpgradeResourceState request doesn't contain the 'before' object\n%s", req.RawStateJSON) + } + + // We'll put something different in "arg" as part of upgrading, just + // so that we can verify below that PrevRunState contains the upgraded + // (but NOT refreshed) version of the object. + resp.UpgradedState = cty.ObjectVal(map[string]cty.Value{ + "arg": cty.StringVal("upgraded"), + }) + return resp + } + + addr := mustResourceInstanceAddr("test_object.a") + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent(addr, &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"arg":"before"}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`)) + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + SkipRefresh: true, + }) + assertNoErrors(t, diags) + + if !p.UpgradeResourceStateCalled { + t.Errorf("Provider's UpgradeResourceState wasn't called; should've been") + } + if p.ReadResourceCalled { + t.Errorf("Provider's ReadResource was called; shouldn't have been") + } + + if plan.PriorState == nil { + t.Fatal("missing plan state") + } + + for _, c := range plan.Changes.Resources { + if c.Action != plans.Delete { + t.Errorf("unexpected %s change for %s", c.Action, c.Addr) + } + } + + if instState := plan.PrevRunState.ResourceInstance(addr); instState == nil { + t.Errorf("%s has no previous run state at all after plan", addr) + } else { + if instState.Current == nil { + t.Errorf("%s has no current object in the previous run state", addr) + } else if got, want := instState.Current.AttrsJSON, `"upgraded"`; !bytes.Contains(got, []byte(want)) { + t.Errorf("%s has wrong previous run state after plan\ngot:\n%s\n\nwant substring: %s", addr, got, want) + } + } + if instState := plan.PriorState.ResourceInstance(addr); instState == nil { + t.Errorf("%s has no prior state at all after plan", addr) + } else { + if instState.Current == nil { + t.Errorf("%s has no current object in the prior state", addr) + } else if got, want := instState.Current.AttrsJSON, `"upgraded"`; !bytes.Contains(got, []byte(want)) { + // NOTE: The prior state should still have been _upgraded_, even + // though we skipped running refresh after upgrading it. + t.Errorf("%s has wrong prior state after plan\ngot:\n%s\n\nwant substring: %s", addr, got, want) + } + } +} + +func TestContext2Plan_unmarkingSensitiveAttributeForOutput(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_resource" "foo" { +} + +output "result" { + value = nonsensitive(test_resource.foo.sensitive_attr) +} +`, + }) + + p := new(MockProvider) + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_resource": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "sensitive_attr": { + Type: cty.String, + Computed: true, + Sensitive: true, + }, + }, + }, + }, + }) + + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: cty.UnknownVal(cty.Object(map[string]cty.Type{ + "id": cty.String, + "sensitive_attr": cty.String, + })), + } + } + + state := states.NewState() + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + assertNoErrors(t, diags) + + for _, res := range plan.Changes.Resources { + if res.Action != plans.Create { + t.Fatalf("expected create, got: %q %s", res.Addr, res.Action) + } + } +} + +func TestContext2Plan_destroyNoProviderConfig(t *testing.T) { + // providers do not need to be configured during a destroy plan + p := simpleMockProvider() + p.ValidateProviderConfigFn = func(req providers.ValidateProviderConfigRequest) (resp providers.ValidateProviderConfigResponse) { + v := req.Config.GetAttr("test_string") + if v.IsNull() || !v.IsKnown() || v.AsString() != "ok" { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("invalid provider configuration: %#v", req.Config)) + } + return resp + } + + m := testModuleInline(t, map[string]string{ + "main.tf": ` +locals { + value = "ok" +} + +provider "test" { + test_string = local.value +} +`, + }) + + addr := mustResourceInstanceAddr("test_object.a") + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent(addr, &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"test_string":"foo"}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`)) + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + }) + assertNoErrors(t, diags) +} + +func TestContext2Plan_movedResourceBasic(t *testing.T) { + addrA := mustResourceInstanceAddr("test_object.a") + addrB := mustResourceInstanceAddr("test_object.b") + m := testModuleInline(t, map[string]string{ + "main.tf": ` + resource "test_object" "b" { + } + + moved { + from = test_object.a + to = test_object.b + } + `, + }) + + state := states.BuildState(func(s *states.SyncState) { + // The prior state tracks test_object.a, which we should treat as + // test_object.b because of the "moved" block in the config. + s.SetResourceInstanceCurrent(addrA, &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`)) + }) + + p := simpleMockProvider() + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + ForceReplace: []addrs.AbsResourceInstance{ + addrA, + }, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors\n%s", diags.Err().Error()) + } + + t.Run(addrA.String(), func(t *testing.T) { + instPlan := plan.Changes.ResourceInstance(addrA) + if instPlan != nil { + t.Fatalf("unexpected plan for %s; should've moved to %s", addrA, addrB) + } + }) + t.Run(addrB.String(), func(t *testing.T) { + instPlan := plan.Changes.ResourceInstance(addrB) + if instPlan == nil { + t.Fatalf("no plan for %s at all", addrB) + } + + if got, want := instPlan.Addr, addrB; !got.Equal(want) { + t.Errorf("wrong current address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.PrevRunAddr, addrA; !got.Equal(want) { + t.Errorf("wrong previous run address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.Action, plans.NoOp; got != want { + t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.ActionReason, plans.ResourceInstanceChangeNoReason; got != want { + t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) + } + }) +} + +func TestContext2Plan_movedResourceCollision(t *testing.T) { + addrNoKey := mustResourceInstanceAddr("test_object.a") + addrZeroKey := mustResourceInstanceAddr("test_object.a[0]") + m := testModuleInline(t, map[string]string{ + "main.tf": ` + resource "test_object" "a" { + # No "count" set, so test_object.a[0] will want + # to implicitly move to test_object.a, but will get + # blocked by the existing object at that address. + } + `, + }) + + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent(addrNoKey, &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`)) + s.SetResourceInstanceCurrent(addrZeroKey, &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`)) + }) + + p := simpleMockProvider() + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors\n%s", diags.Err().Error()) + } + + // We should have a warning, though! We'll lightly abuse the "for RPC" + // feature of diagnostics to get some more-readily-comparable diagnostic + // values. + gotDiags := diags.ForRPC() + wantDiags := tfdiags.Diagnostics{ + tfdiags.Sourceless( + tfdiags.Warning, + "Unresolved resource instance address changes", + `OpenTofu tried to adjust resource instance addresses in the prior state based on change information recorded in the configuration, but some adjustments did not succeed due to existing objects already at the intended addresses: + - test_object.a[0] could not move to test_object.a + +OpenTofu has planned to destroy these objects. If OpenTofu's proposed changes aren't appropriate, you must first resolve the conflicts using the "tofu state" subcommands and then create a new plan.`, + ), + }.ForRPC() + if diff := cmp.Diff(wantDiags, gotDiags); diff != "" { + t.Errorf("wrong diagnostics\n%s", diff) + } + + t.Run(addrNoKey.String(), func(t *testing.T) { + instPlan := plan.Changes.ResourceInstance(addrNoKey) + if instPlan == nil { + t.Fatalf("no plan for %s at all", addrNoKey) + } + + if got, want := instPlan.Addr, addrNoKey; !got.Equal(want) { + t.Errorf("wrong current address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.PrevRunAddr, addrNoKey; !got.Equal(want) { + t.Errorf("wrong previous run address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.Action, plans.NoOp; got != want { + t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.ActionReason, plans.ResourceInstanceChangeNoReason; got != want { + t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) + } + }) + t.Run(addrZeroKey.String(), func(t *testing.T) { + instPlan := plan.Changes.ResourceInstance(addrZeroKey) + if instPlan == nil { + t.Fatalf("no plan for %s at all", addrZeroKey) + } + + if got, want := instPlan.Addr, addrZeroKey; !got.Equal(want) { + t.Errorf("wrong current address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.PrevRunAddr, addrZeroKey; !got.Equal(want) { + t.Errorf("wrong previous run address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.Action, plans.Delete; got != want { + t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.ActionReason, plans.ResourceInstanceDeleteBecauseWrongRepetition; got != want { + t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) + } + }) +} + +func TestContext2Plan_movedResourceCollisionDestroy(t *testing.T) { + // This is like TestContext2Plan_movedResourceCollision but intended to + // ensure we still produce the expected warning (and produce it only once) + // when we're creating a destroy plan, rather than a normal plan. + // (This case is interesting at the time of writing because we happen to + // use a normal plan as a trick to refresh before creating a destroy plan. + // This test will probably become uninteresting if a future change to + // the destroy-time planning behavior handles refreshing in a different + // way, which avoids this pre-processing step of running a normal plan + // first.) + + addrNoKey := mustResourceInstanceAddr("test_object.a") + addrZeroKey := mustResourceInstanceAddr("test_object.a[0]") + m := testModuleInline(t, map[string]string{ + "main.tf": ` + resource "test_object" "a" { + # No "count" set, so test_object.a[0] will want + # to implicitly move to test_object.a, but will get + # blocked by the existing object at that address. + } + `, + }) + + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent(addrNoKey, &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`)) + s.SetResourceInstanceCurrent(addrZeroKey, &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`)) + }) + + p := simpleMockProvider() + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors\n%s", diags.Err().Error()) + } + + // We should have a warning, though! We'll lightly abuse the "for RPC" + // feature of diagnostics to get some more-readily-comparable diagnostic + // values. + gotDiags := diags.ForRPC() + wantDiags := tfdiags.Diagnostics{ + tfdiags.Sourceless( + tfdiags.Warning, + "Unresolved resource instance address changes", + // NOTE: This message is _lightly_ confusing in the destroy case, + // because it says "OpenTofu has planned to destroy these objects" + // but this is a plan to destroy all objects, anyway. We expect the + // conflict situation to be pretty rare though, and even rarer in + // a "tofu destroy", so we'll just live with that for now + // unless we see evidence that lots of folks are being confused by + // it in practice. + `OpenTofu tried to adjust resource instance addresses in the prior state based on change information recorded in the configuration, but some adjustments did not succeed due to existing objects already at the intended addresses: + - test_object.a[0] could not move to test_object.a + +OpenTofu has planned to destroy these objects. If OpenTofu's proposed changes aren't appropriate, you must first resolve the conflicts using the "tofu state" subcommands and then create a new plan.`, + ), + }.ForRPC() + if diff := cmp.Diff(wantDiags, gotDiags); diff != "" { + // If we get here with a diff that makes it seem like the above warning + // is being reported twice, the likely cause is not correctly handling + // the warnings from the hidden normal plan we run as part of preparing + // for a destroy plan, unless that strategy has changed in the meantime + // since we originally wrote this test. + t.Errorf("wrong diagnostics\n%s", diff) + } + + t.Run(addrNoKey.String(), func(t *testing.T) { + instPlan := plan.Changes.ResourceInstance(addrNoKey) + if instPlan == nil { + t.Fatalf("no plan for %s at all", addrNoKey) + } + + if got, want := instPlan.Addr, addrNoKey; !got.Equal(want) { + t.Errorf("wrong current address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.PrevRunAddr, addrNoKey; !got.Equal(want) { + t.Errorf("wrong previous run address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.Action, plans.Delete; got != want { + t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.ActionReason, plans.ResourceInstanceChangeNoReason; got != want { + t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) + } + }) + t.Run(addrZeroKey.String(), func(t *testing.T) { + instPlan := plan.Changes.ResourceInstance(addrZeroKey) + if instPlan == nil { + t.Fatalf("no plan for %s at all", addrZeroKey) + } + + if got, want := instPlan.Addr, addrZeroKey; !got.Equal(want) { + t.Errorf("wrong current address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.PrevRunAddr, addrZeroKey; !got.Equal(want) { + t.Errorf("wrong previous run address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.Action, plans.Delete; got != want { + t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.ActionReason, plans.ResourceInstanceChangeNoReason; got != want { + t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) + } + }) +} + +func TestContext2Plan_movedResourceUntargeted(t *testing.T) { + addrA := mustResourceInstanceAddr("test_object.a") + addrB := mustResourceInstanceAddr("test_object.b") + m := testModuleInline(t, map[string]string{ + "main.tf": ` + resource "test_object" "b" { + } + + moved { + from = test_object.a + to = test_object.b + } + `, + }) + + state := states.BuildState(func(s *states.SyncState) { + // The prior state tracks test_object.a, which we should treat as + // test_object.b because of the "moved" block in the config. + s.SetResourceInstanceCurrent(addrA, &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`)) + }) + + p := simpleMockProvider() + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + t.Run("without targeting instance A", func(t *testing.T) { + _, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + Targets: []addrs.Targetable{ + // NOTE: addrA isn't included here, but it's pending move to addrB + // and so this plan request is invalid. + addrB, + }, + }) + diags.Sort() + + // We're semi-abusing "ForRPC" here just to get diagnostics that are + // more easily comparable than the various different diagnostics types + // tfdiags uses internally. The RPC-friendly diagnostics are also + // comparison-friendly, by discarding all of the dynamic type information. + gotDiags := diags.ForRPC() + wantDiags := tfdiags.Diagnostics{ + tfdiags.Sourceless( + tfdiags.Warning, + "Resource targeting is in effect", + `You are creating a plan with the -target option, which means that the result of this plan may not represent all of the changes requested by the current configuration. + +The -target option is not for routine use, and is provided only for exceptional situations such as recovering from errors or mistakes, or when OpenTofu specifically suggests to use it as part of an error message.`, + ), + tfdiags.Sourceless( + tfdiags.Error, + "Moved resource instances excluded by targeting", + `Resource instances in your current state have moved to new addresses in the latest configuration. OpenTofu must include those resource instances while planning in order to ensure a correct result, but your -target=... options do not fully cover all of those resource instances. + +To create a valid plan, either remove your -target=... options altogether or add the following additional target options: + -target="test_object.a" + +Note that adding these options may include further additional resource instances in your plan, in order to respect object dependencies.`, + ), + }.ForRPC() + + if diff := cmp.Diff(wantDiags, gotDiags); diff != "" { + t.Errorf("wrong diagnostics\n%s", diff) + } + }) + t.Run("without targeting instance B", func(t *testing.T) { + _, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + Targets: []addrs.Targetable{ + addrA, + // NOTE: addrB isn't included here, but it's pending move from + // addrA and so this plan request is invalid. + }, + }) + diags.Sort() + + // We're semi-abusing "ForRPC" here just to get diagnostics that are + // more easily comparable than the various different diagnostics types + // tfdiags uses internally. The RPC-friendly diagnostics are also + // comparison-friendly, by discarding all of the dynamic type information. + gotDiags := diags.ForRPC() + wantDiags := tfdiags.Diagnostics{ + tfdiags.Sourceless( + tfdiags.Warning, + "Resource targeting is in effect", + `You are creating a plan with the -target option, which means that the result of this plan may not represent all of the changes requested by the current configuration. + +The -target option is not for routine use, and is provided only for exceptional situations such as recovering from errors or mistakes, or when OpenTofu specifically suggests to use it as part of an error message.`, + ), + tfdiags.Sourceless( + tfdiags.Error, + "Moved resource instances excluded by targeting", + `Resource instances in your current state have moved to new addresses in the latest configuration. OpenTofu must include those resource instances while planning in order to ensure a correct result, but your -target=... options do not fully cover all of those resource instances. + +To create a valid plan, either remove your -target=... options altogether or add the following additional target options: + -target="test_object.b" + +Note that adding these options may include further additional resource instances in your plan, in order to respect object dependencies.`, + ), + }.ForRPC() + + if diff := cmp.Diff(wantDiags, gotDiags); diff != "" { + t.Errorf("wrong diagnostics\n%s", diff) + } + }) + t.Run("without targeting either instance", func(t *testing.T) { + _, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + Targets: []addrs.Targetable{ + mustResourceInstanceAddr("test_object.unrelated"), + // NOTE: neither addrA nor addrB are included here, but there's + // a pending move between them and so this is invalid. + }, + }) + diags.Sort() + + // We're semi-abusing "ForRPC" here just to get diagnostics that are + // more easily comparable than the various different diagnostics types + // tfdiags uses internally. The RPC-friendly diagnostics are also + // comparison-friendly, by discarding all of the dynamic type information. + gotDiags := diags.ForRPC() + wantDiags := tfdiags.Diagnostics{ + tfdiags.Sourceless( + tfdiags.Warning, + "Resource targeting is in effect", + `You are creating a plan with the -target option, which means that the result of this plan may not represent all of the changes requested by the current configuration. + +The -target option is not for routine use, and is provided only for exceptional situations such as recovering from errors or mistakes, or when OpenTofu specifically suggests to use it as part of an error message.`, + ), + tfdiags.Sourceless( + tfdiags.Error, + "Moved resource instances excluded by targeting", + `Resource instances in your current state have moved to new addresses in the latest configuration. OpenTofu must include those resource instances while planning in order to ensure a correct result, but your -target=... options do not fully cover all of those resource instances. + +To create a valid plan, either remove your -target=... options altogether or add the following additional target options: + -target="test_object.a" + -target="test_object.b" + +Note that adding these options may include further additional resource instances in your plan, in order to respect object dependencies.`, + ), + }.ForRPC() + + if diff := cmp.Diff(wantDiags, gotDiags); diff != "" { + t.Errorf("wrong diagnostics\n%s", diff) + } + }) + t.Run("with both addresses in the target set", func(t *testing.T) { + // The error messages in the other subtests above suggest adding + // addresses to the set of targets. This additional test makes sure that + // following that advice actually leads to a valid result. + + _, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + Targets: []addrs.Targetable{ + // This time we're including both addresses in the target, + // to get the same effect an end-user would get if following + // the advice in our error message in the other subtests. + addrA, + addrB, + }, + }) + diags.Sort() + + // We're semi-abusing "ForRPC" here just to get diagnostics that are + // more easily comparable than the various different diagnostics types + // tfdiags uses internally. The RPC-friendly diagnostics are also + // comparison-friendly, by discarding all of the dynamic type information. + gotDiags := diags.ForRPC() + wantDiags := tfdiags.Diagnostics{ + // Still get the warning about the -target option... + tfdiags.Sourceless( + tfdiags.Warning, + "Resource targeting is in effect", + `You are creating a plan with the -target option, which means that the result of this plan may not represent all of the changes requested by the current configuration. + +The -target option is not for routine use, and is provided only for exceptional situations such as recovering from errors or mistakes, or when OpenTofu specifically suggests to use it as part of an error message.`, + ), + // ...but now we have no error about test_object.a + }.ForRPC() + + if diff := cmp.Diff(wantDiags, gotDiags); diff != "" { + t.Errorf("wrong diagnostics\n%s", diff) + } + }) +} + +func TestContext2Plan_untargetedResourceSchemaChange(t *testing.T) { + // an untargeted resource which requires a schema migration should not + // block planning due external changes in the plan. + addrA := mustResourceInstanceAddr("test_object.a") + addrB := mustResourceInstanceAddr("test_object.b") + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_object" "a" { +} +resource "test_object" "b" { +}`, + }) + + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent(addrA, &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`)) + s.SetResourceInstanceCurrent(addrB, &states.ResourceInstanceObjectSrc{ + // old_list is no longer in the schema + AttrsJSON: []byte(`{"old_list":["used to be","a list here"]}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`)) + }) + + p := simpleMockProvider() + + // external changes trigger a "drift report", but because test_object.b was + // not targeted, the state was not fixed to match the schema and cannot be + // deocded for the report. + p.ReadResourceFn = func(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { + obj := req.PriorState.AsValueMap() + // test_number changed externally + obj["test_number"] = cty.NumberIntVal(1) + resp.NewState = cty.ObjectVal(obj) + return resp + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + Targets: []addrs.Targetable{ + addrA, + }, + }) + // + assertNoErrors(t, diags) +} + +func TestContext2Plan_movedResourceRefreshOnly(t *testing.T) { + addrA := mustResourceInstanceAddr("test_object.a") + addrB := mustResourceInstanceAddr("test_object.b") + m := testModuleInline(t, map[string]string{ + "main.tf": ` + resource "test_object" "b" { + } + + moved { + from = test_object.a + to = test_object.b + } + `, + }) + + state := states.BuildState(func(s *states.SyncState) { + // The prior state tracks test_object.a, which we should treat as + // test_object.b because of the "moved" block in the config. + s.SetResourceInstanceCurrent(addrA, &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`)) + }) + + p := simpleMockProvider() + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.RefreshOnlyMode, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors\n%s", diags.Err().Error()) + } + + t.Run(addrA.String(), func(t *testing.T) { + instPlan := plan.Changes.ResourceInstance(addrA) + if instPlan != nil { + t.Fatalf("unexpected plan for %s; should've moved to %s", addrA, addrB) + } + }) + t.Run(addrB.String(), func(t *testing.T) { + instPlan := plan.Changes.ResourceInstance(addrB) + if instPlan != nil { + t.Fatalf("unexpected plan for %s", addrB) + } + }) + t.Run("drift", func(t *testing.T) { + var drifted *plans.ResourceInstanceChangeSrc + for _, dr := range plan.DriftedResources { + if dr.Addr.Equal(addrB) { + drifted = dr + break + } + } + + if drifted == nil { + t.Fatalf("instance %s is missing from the drifted resource changes", addrB) + } + + if got, want := drifted.PrevRunAddr, addrA; !got.Equal(want) { + t.Errorf("wrong previous run address\ngot: %s\nwant: %s", got, want) + } + if got, want := drifted.Action, plans.NoOp; got != want { + t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) + } + }) +} + +func TestContext2Plan_refreshOnlyMode(t *testing.T) { + addr := mustResourceInstanceAddr("test_object.a") + + // The configuration, the prior state, and the refresh result intentionally + // have different values for "test_string" so we can observe that the + // refresh took effect but the configuration change wasn't considered. + m := testModuleInline(t, map[string]string{ + "main.tf": ` + resource "test_object" "a" { + arg = "after" + } + + output "out" { + value = test_object.a.arg + } + `, + }) + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent(addr, &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"arg":"before"}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`)) + }) + + p := simpleMockProvider() + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{Block: simpleTestSchema()}, + ResourceTypes: map[string]providers.Schema{ + "test_object": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "arg": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { + newVal, err := cty.Transform(req.PriorState, func(path cty.Path, v cty.Value) (cty.Value, error) { + if len(path) == 1 && path[0] == (cty.GetAttrStep{Name: "arg"}) { + return cty.StringVal("current"), nil + } + return v, nil + }) + if err != nil { + // shouldn't get here + t.Fatalf("ReadResourceFn transform failed") + return providers.ReadResourceResponse{} + } + return providers.ReadResourceResponse{ + NewState: newVal, + } + } + p.UpgradeResourceStateFn = func(req providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) { + // We should've been given the prior state JSON as our input to upgrade. + if !bytes.Contains(req.RawStateJSON, []byte("before")) { + t.Fatalf("UpgradeResourceState request doesn't contain the 'before' object\n%s", req.RawStateJSON) + } + + // We'll put something different in "arg" as part of upgrading, just + // so that we can verify below that PrevRunState contains the upgraded + // (but NOT refreshed) version of the object. + resp.UpgradedState = cty.ObjectVal(map[string]cty.Value{ + "arg": cty.StringVal("upgraded"), + }) + return resp + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.RefreshOnlyMode, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors\n%s", diags.Err().Error()) + } + + if !p.UpgradeResourceStateCalled { + t.Errorf("Provider's UpgradeResourceState wasn't called; should've been") + } + if !p.ReadResourceCalled { + t.Errorf("Provider's ReadResource wasn't called; should've been") + } + + if got, want := len(plan.Changes.Resources), 0; got != want { + t.Errorf("plan contains resource changes; want none\n%s", spew.Sdump(plan.Changes.Resources)) + } + + if instState := plan.PriorState.ResourceInstance(addr); instState == nil { + t.Errorf("%s has no prior state at all after plan", addr) + } else { + if instState.Current == nil { + t.Errorf("%s has no current object after plan", addr) + } else if got, want := instState.Current.AttrsJSON, `"current"`; !bytes.Contains(got, []byte(want)) { + // Should've saved the result of refreshing + t.Errorf("%s has wrong prior state after plan\ngot:\n%s\n\nwant substring: %s", addr, got, want) + } + } + if instState := plan.PrevRunState.ResourceInstance(addr); instState == nil { + t.Errorf("%s has no previous run state at all after plan", addr) + } else { + if instState.Current == nil { + t.Errorf("%s has no current object in the previous run state", addr) + } else if got, want := instState.Current.AttrsJSON, `"upgraded"`; !bytes.Contains(got, []byte(want)) { + // Should've saved the result of upgrading + t.Errorf("%s has wrong previous run state after plan\ngot:\n%s\n\nwant substring: %s", addr, got, want) + } + } + + // The output value should also have updated. If not, it's likely that we + // skipped updating the working state to match the refreshed state when we + // were evaluating the resource. + if outChangeSrc := plan.Changes.OutputValue(addrs.RootModuleInstance.OutputValue("out")); outChangeSrc == nil { + t.Errorf("no change planned for output value 'out'") + } else { + outChange, err := outChangeSrc.Decode() + if err != nil { + t.Fatalf("failed to decode output value 'out': %s", err) + } + got := outChange.After + want := cty.StringVal("current") + if !want.RawEquals(got) { + t.Errorf("wrong value for output value 'out'\ngot: %#v\nwant: %#v", got, want) + } + } +} + +func TestContext2Plan_refreshOnlyMode_deposed(t *testing.T) { + addr := mustResourceInstanceAddr("test_object.a") + deposedKey := states.DeposedKey("byebye") + + // The configuration, the prior state, and the refresh result intentionally + // have different values for "test_string" so we can observe that the + // refresh took effect but the configuration change wasn't considered. + m := testModuleInline(t, map[string]string{ + "main.tf": ` + resource "test_object" "a" { + arg = "after" + } + + output "out" { + value = test_object.a.arg + } + `, + }) + state := states.BuildState(func(s *states.SyncState) { + // Note that we're intentionally recording a _deposed_ object here, + // and not including a current object, so a normal (non-refresh) + // plan would normally plan to create a new object _and_ destroy + // the deposed one, but refresh-only mode should prevent that. + s.SetResourceInstanceDeposed(addr, deposedKey, &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"arg":"before"}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`)) + }) + + p := simpleMockProvider() + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{Block: simpleTestSchema()}, + ResourceTypes: map[string]providers.Schema{ + "test_object": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "arg": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { + newVal, err := cty.Transform(req.PriorState, func(path cty.Path, v cty.Value) (cty.Value, error) { + if len(path) == 1 && path[0] == (cty.GetAttrStep{Name: "arg"}) { + return cty.StringVal("current"), nil + } + return v, nil + }) + if err != nil { + // shouldn't get here + t.Fatalf("ReadResourceFn transform failed") + return providers.ReadResourceResponse{} + } + return providers.ReadResourceResponse{ + NewState: newVal, + } + } + p.UpgradeResourceStateFn = func(req providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) { + // We should've been given the prior state JSON as our input to upgrade. + if !bytes.Contains(req.RawStateJSON, []byte("before")) { + t.Fatalf("UpgradeResourceState request doesn't contain the 'before' object\n%s", req.RawStateJSON) + } + + // We'll put something different in "arg" as part of upgrading, just + // so that we can verify below that PrevRunState contains the upgraded + // (but NOT refreshed) version of the object. + resp.UpgradedState = cty.ObjectVal(map[string]cty.Value{ + "arg": cty.StringVal("upgraded"), + }) + return resp + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.RefreshOnlyMode, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors\n%s", diags.Err().Error()) + } + + if !p.UpgradeResourceStateCalled { + t.Errorf("Provider's UpgradeResourceState wasn't called; should've been") + } + if !p.ReadResourceCalled { + t.Errorf("Provider's ReadResource wasn't called; should've been") + } + + if got, want := len(plan.Changes.Resources), 0; got != want { + t.Errorf("plan contains resource changes; want none\n%s", spew.Sdump(plan.Changes.Resources)) + } + + if instState := plan.PriorState.ResourceInstance(addr); instState == nil { + t.Errorf("%s has no prior state at all after plan", addr) + } else { + if obj := instState.Deposed[deposedKey]; obj == nil { + t.Errorf("%s has no deposed object after plan", addr) + } else if got, want := obj.AttrsJSON, `"current"`; !bytes.Contains(got, []byte(want)) { + // Should've saved the result of refreshing + t.Errorf("%s has wrong prior state after plan\ngot:\n%s\n\nwant substring: %s", addr, got, want) + } + } + if instState := plan.PrevRunState.ResourceInstance(addr); instState == nil { + t.Errorf("%s has no previous run state at all after plan", addr) + } else { + if obj := instState.Deposed[deposedKey]; obj == nil { + t.Errorf("%s has no deposed object in the previous run state", addr) + } else if got, want := obj.AttrsJSON, `"upgraded"`; !bytes.Contains(got, []byte(want)) { + // Should've saved the result of upgrading + t.Errorf("%s has wrong previous run state after plan\ngot:\n%s\n\nwant substring: %s", addr, got, want) + } + } + + // The output value should also have updated. If not, it's likely that we + // skipped updating the working state to match the refreshed state when we + // were evaluating the resource. + if outChangeSrc := plan.Changes.OutputValue(addrs.RootModuleInstance.OutputValue("out")); outChangeSrc == nil { + t.Errorf("no change planned for output value 'out'") + } else { + outChange, err := outChangeSrc.Decode() + if err != nil { + t.Fatalf("failed to decode output value 'out': %s", err) + } + got := outChange.After + want := cty.UnknownVal(cty.String) + if !want.RawEquals(got) { + t.Errorf("wrong value for output value 'out'\ngot: %#v\nwant: %#v", got, want) + } + } + + // Deposed objects should not be represented in drift. + if len(plan.DriftedResources) > 0 { + t.Errorf("unexpected drifted resources (%d)", len(plan.DriftedResources)) + } +} + +func TestContext2Plan_refreshOnlyMode_orphan(t *testing.T) { + addr := mustAbsResourceAddr("test_object.a") + + // The configuration, the prior state, and the refresh result intentionally + // have different values for "test_string" so we can observe that the + // refresh took effect but the configuration change wasn't considered. + m := testModuleInline(t, map[string]string{ + "main.tf": ` + resource "test_object" "a" { + arg = "after" + count = 1 + } + + output "out" { + value = test_object.a.*.arg + } + `, + }) + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent(addr.Instance(addrs.IntKey(0)), &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"arg":"before"}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`)) + s.SetResourceInstanceCurrent(addr.Instance(addrs.IntKey(1)), &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"arg":"before"}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`)) + }) + + p := simpleMockProvider() + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{Block: simpleTestSchema()}, + ResourceTypes: map[string]providers.Schema{ + "test_object": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "arg": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { + newVal, err := cty.Transform(req.PriorState, func(path cty.Path, v cty.Value) (cty.Value, error) { + if len(path) == 1 && path[0] == (cty.GetAttrStep{Name: "arg"}) { + return cty.StringVal("current"), nil + } + return v, nil + }) + if err != nil { + // shouldn't get here + t.Fatalf("ReadResourceFn transform failed") + return providers.ReadResourceResponse{} + } + return providers.ReadResourceResponse{ + NewState: newVal, + } + } + p.UpgradeResourceStateFn = func(req providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) { + // We should've been given the prior state JSON as our input to upgrade. + if !bytes.Contains(req.RawStateJSON, []byte("before")) { + t.Fatalf("UpgradeResourceState request doesn't contain the 'before' object\n%s", req.RawStateJSON) + } + + // We'll put something different in "arg" as part of upgrading, just + // so that we can verify below that PrevRunState contains the upgraded + // (but NOT refreshed) version of the object. + resp.UpgradedState = cty.ObjectVal(map[string]cty.Value{ + "arg": cty.StringVal("upgraded"), + }) + return resp + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.RefreshOnlyMode, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors\n%s", diags.Err().Error()) + } + + if !p.UpgradeResourceStateCalled { + t.Errorf("Provider's UpgradeResourceState wasn't called; should've been") + } + if !p.ReadResourceCalled { + t.Errorf("Provider's ReadResource wasn't called; should've been") + } + + if got, want := len(plan.Changes.Resources), 0; got != want { + t.Errorf("plan contains resource changes; want none\n%s", spew.Sdump(plan.Changes.Resources)) + } + + if rState := plan.PriorState.Resource(addr); rState == nil { + t.Errorf("%s has no prior state at all after plan", addr) + } else { + for i := 0; i < 2; i++ { + instKey := addrs.IntKey(i) + if obj := rState.Instance(instKey).Current; obj == nil { + t.Errorf("%s%s has no object after plan", addr, instKey) + } else if got, want := obj.AttrsJSON, `"current"`; !bytes.Contains(got, []byte(want)) { + // Should've saved the result of refreshing + t.Errorf("%s%s has wrong prior state after plan\ngot:\n%s\n\nwant substring: %s", addr, instKey, got, want) + } + } + } + if rState := plan.PrevRunState.Resource(addr); rState == nil { + t.Errorf("%s has no prior state at all after plan", addr) + } else { + for i := 0; i < 2; i++ { + instKey := addrs.IntKey(i) + if obj := rState.Instance(instKey).Current; obj == nil { + t.Errorf("%s%s has no object after plan", addr, instKey) + } else if got, want := obj.AttrsJSON, `"upgraded"`; !bytes.Contains(got, []byte(want)) { + // Should've saved the result of upgrading + t.Errorf("%s%s has wrong prior state after plan\ngot:\n%s\n\nwant substring: %s", addr, instKey, got, want) + } + } + } + + // The output value should also have updated. If not, it's likely that we + // skipped updating the working state to match the refreshed state when we + // were evaluating the resource. + if outChangeSrc := plan.Changes.OutputValue(addrs.RootModuleInstance.OutputValue("out")); outChangeSrc == nil { + t.Errorf("no change planned for output value 'out'") + } else { + outChange, err := outChangeSrc.Decode() + if err != nil { + t.Fatalf("failed to decode output value 'out': %s", err) + } + got := outChange.After + want := cty.TupleVal([]cty.Value{cty.StringVal("current"), cty.StringVal("current")}) + if !want.RawEquals(got) { + t.Errorf("wrong value for output value 'out'\ngot: %#v\nwant: %#v", got, want) + } + } +} + +func TestContext2Plan_invalidSensitiveModuleOutput(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "child/main.tf": ` +output "out" { + value = sensitive("xyz") +}`, + "main.tf": ` +module "child" { + source = "./child" +} + +output "root" { + value = module.child.out +}`, + }) + + ctx := testContext2(t, &ContextOpts{}) + + _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if !diags.HasErrors() { + t.Fatal("succeeded; want errors") + } + if got, want := diags.Err().Error(), "Output refers to sensitive values"; !strings.Contains(got, want) { + t.Fatalf("wrong error:\ngot: %s\nwant: message containing %q", got, want) + } +} + +func TestContext2Plan_planDataSourceSensitiveNested(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_instance" "bar" { +} + +data "test_data_source" "foo" { + foo { + bar = test_instance.bar.sensitive + } +} +`, + }) + + p := new(MockProvider) + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + resp.PlannedState = cty.ObjectVal(map[string]cty.Value{ + "sensitive": cty.UnknownVal(cty.String), + }) + return resp + } + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_instance": { + Attributes: map[string]*configschema.Attribute{ + "sensitive": { + Type: cty.String, + Computed: true, + Sensitive: true, + }, + }, + }, + }, + DataSources: map[string]*configschema.Block{ + "test_data_source": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "foo": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "bar": {Type: cty.String, Optional: true}, + }, + }, + Nesting: configschema.NestingSet, + }, + }, + }, + }, + }) + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("data.test_data_source.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"string":"data_id", "foo":[{"bar":"old"}]}`), + AttrSensitivePaths: []cty.PathValueMarks{ + { + Path: cty.GetAttrPath("foo"), + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_instance.bar").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"sensitive":"old"}`), + AttrSensitivePaths: []cty.PathValueMarks{ + { + Path: cty.GetAttrPath("sensitive"), + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + assertNoErrors(t, diags) + + for _, res := range plan.Changes.Resources { + switch res.Addr.String() { + case "test_instance.bar": + if res.Action != plans.Update { + t.Fatalf("unexpected %s change for %s", res.Action, res.Addr) + } + case "data.test_data_source.foo": + if res.Action != plans.Read { + t.Fatalf("unexpected %s change for %s", res.Action, res.Addr) + } + default: + t.Fatalf("unexpected %s change for %s", res.Action, res.Addr) + } + } +} + +func TestContext2Plan_forceReplace(t *testing.T) { + addrA := mustResourceInstanceAddr("test_object.a") + addrB := mustResourceInstanceAddr("test_object.b") + m := testModuleInline(t, map[string]string{ + "main.tf": ` + resource "test_object" "a" { + } + resource "test_object" "b" { + } + `, + }) + + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent(addrA, &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`)) + s.SetResourceInstanceCurrent(addrB, &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`)) + }) + + p := simpleMockProvider() + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + ForceReplace: []addrs.AbsResourceInstance{ + addrA, + }, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors\n%s", diags.Err().Error()) + } + + t.Run(addrA.String(), func(t *testing.T) { + instPlan := plan.Changes.ResourceInstance(addrA) + if instPlan == nil { + t.Fatalf("no plan for %s at all", addrA) + } + + if got, want := instPlan.Action, plans.DeleteThenCreate; got != want { + t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.ActionReason, plans.ResourceInstanceReplaceByRequest; got != want { + t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) + } + }) + t.Run(addrB.String(), func(t *testing.T) { + instPlan := plan.Changes.ResourceInstance(addrB) + if instPlan == nil { + t.Fatalf("no plan for %s at all", addrB) + } + + if got, want := instPlan.Action, plans.NoOp; got != want { + t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.ActionReason, plans.ResourceInstanceChangeNoReason; got != want { + t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) + } + }) +} + +func TestContext2Plan_forceReplaceIncompleteAddr(t *testing.T) { + addr0 := mustResourceInstanceAddr("test_object.a[0]") + addr1 := mustResourceInstanceAddr("test_object.a[1]") + addrBare := mustResourceInstanceAddr("test_object.a") + m := testModuleInline(t, map[string]string{ + "main.tf": ` + resource "test_object" "a" { + count = 2 + } + `, + }) + + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent(addr0, &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`)) + s.SetResourceInstanceCurrent(addr1, &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`)) + }) + + p := simpleMockProvider() + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + ForceReplace: []addrs.AbsResourceInstance{ + addrBare, + }, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors\n%s", diags.Err().Error()) + } + diagsErr := diags.ErrWithWarnings() + if diagsErr == nil { + t.Fatalf("no warnings were returned") + } + if got, want := diagsErr.Error(), "Incompletely-matched force-replace resource instance"; !strings.Contains(got, want) { + t.Errorf("missing expected warning\ngot:\n%s\n\nwant substring: %s", got, want) + } + + t.Run(addr0.String(), func(t *testing.T) { + instPlan := plan.Changes.ResourceInstance(addr0) + if instPlan == nil { + t.Fatalf("no plan for %s at all", addr0) + } + + if got, want := instPlan.Action, plans.NoOp; got != want { + t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.ActionReason, plans.ResourceInstanceChangeNoReason; got != want { + t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) + } + }) + t.Run(addr1.String(), func(t *testing.T) { + instPlan := plan.Changes.ResourceInstance(addr1) + if instPlan == nil { + t.Fatalf("no plan for %s at all", addr1) + } + + if got, want := instPlan.Action, plans.NoOp; got != want { + t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.ActionReason, plans.ResourceInstanceChangeNoReason; got != want { + t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) + } + }) +} + +// Verify that adding a module instance does force existing module data sources +// to be deferred +func TestContext2Plan_noChangeDataSourceAddingModuleInstance(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +locals { + data = { + a = "a" + b = "b" + } +} + +module "one" { + source = "./mod" + for_each = local.data + input = each.value +} + +module "two" { + source = "./mod" + for_each = module.one + input = each.value.output +} +`, + "mod/main.tf": ` +variable "input" { +} + +resource "test_resource" "x" { + value = var.input +} + +data "test_data_source" "d" { + foo = test_resource.x.id +} + +output "output" { + value = test_resource.x.id +} +`, + }) + + p := testProvider("test") + p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("data"), + "foo": cty.StringVal("foo"), + }), + } + state := states.NewState() + modOne := addrs.RootModuleInstance.Child("one", addrs.StringKey("a")) + modTwo := addrs.RootModuleInstance.Child("two", addrs.StringKey("a")) + one := state.EnsureModule(modOne) + two := state.EnsureModule(modTwo) + one.SetResourceInstanceCurrent( + mustResourceInstanceAddr(`test_resource.x`).Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo","value":"a"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + one.SetResourceInstanceCurrent( + mustResourceInstanceAddr(`data.test_data_source.d`).Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"data"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + two.SetResourceInstanceCurrent( + mustResourceInstanceAddr(`test_resource.x`).Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo","value":"foo"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + two.SetResourceInstanceCurrent( + mustResourceInstanceAddr(`data.test_data_source.d`).Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"data"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + assertNoErrors(t, diags) + + for _, res := range plan.Changes.Resources { + // both existing data sources should be read during plan + if res.Addr.Module[0].InstanceKey == addrs.StringKey("b") { + continue + } + + if res.Addr.Resource.Resource.Mode == addrs.DataResourceMode && res.Action != plans.NoOp { + t.Errorf("unexpected %s plan for %s", res.Action, res.Addr) + } + } +} + +func TestContext2Plan_moduleExpandOrphansResourceInstance(t *testing.T) { + // This test deals with the situation where a user has changed the + // repetition/expansion mode for a module call while there are already + // resource instances from the previous declaration in the state. + // + // This is conceptually just the same as removing the resources + // from the module configuration only for that instance, but the + // implementation of it ends up a little different because it's + // an entry in the resource address's _module path_ that we'll find + // missing, rather than the resource's own instance key, and so + // our analyses need to handle that situation by indicating that all + // of the resources under the missing module instance have zero + // instances, regardless of which resource in that module we might + // be asking about, and do so without tripping over any missing + // registrations in the instance expander that might lead to panics + // if we aren't careful. + // + // (For some history here, see https://github.com/hashicorp/terraform/issues/30110 ) + + addrNoKey := mustResourceInstanceAddr("module.child.test_object.a[0]") + addrZeroKey := mustResourceInstanceAddr("module.child[0].test_object.a[0]") + m := testModuleInline(t, map[string]string{ + "main.tf": ` + module "child" { + source = "./child" + count = 1 + } + `, + "child/main.tf": ` + resource "test_object" "a" { + count = 1 + } + `, + }) + + state := states.BuildState(func(s *states.SyncState) { + // Notice that addrNoKey is the address which lacks any instance key + // for module.child, and so that module instance doesn't match the + // call declared above with count = 1, and therefore the resource + // inside is "orphaned" even though the resource block actually + // still exists there. + s.SetResourceInstanceCurrent(addrNoKey, &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`)) + }) + + p := simpleMockProvider() + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors\n%s", diags.Err().Error()) + } + + t.Run(addrNoKey.String(), func(t *testing.T) { + instPlan := plan.Changes.ResourceInstance(addrNoKey) + if instPlan == nil { + t.Fatalf("no plan for %s at all", addrNoKey) + } + + if got, want := instPlan.Addr, addrNoKey; !got.Equal(want) { + t.Errorf("wrong current address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.PrevRunAddr, addrNoKey; !got.Equal(want) { + t.Errorf("wrong previous run address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.Action, plans.Delete; got != want { + t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.ActionReason, plans.ResourceInstanceDeleteBecauseNoModule; got != want { + t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) + } + }) + + t.Run(addrZeroKey.String(), func(t *testing.T) { + instPlan := plan.Changes.ResourceInstance(addrZeroKey) + if instPlan == nil { + t.Fatalf("no plan for %s at all", addrZeroKey) + } + + if got, want := instPlan.Addr, addrZeroKey; !got.Equal(want) { + t.Errorf("wrong current address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.PrevRunAddr, addrZeroKey; !got.Equal(want) { + t.Errorf("wrong previous run address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.Action, plans.Create; got != want { + t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.ActionReason, plans.ResourceInstanceChangeNoReason; got != want { + t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) + } + }) +} + +func TestContext2Plan_resourcePreconditionPostcondition(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +variable "boop" { + type = string +} + +resource "test_resource" "a" { + value = var.boop + lifecycle { + precondition { + condition = var.boop == "boop" + error_message = "Wrong boop." + } + postcondition { + condition = self.output != "" + error_message = "Output must not be blank." + } + } +} + +`, + }) + + p := testProvider("test") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_resource": { + Attributes: map[string]*configschema.Attribute{ + "value": { + Type: cty.String, + Required: true, + }, + "output": { + Type: cty.String, + Computed: true, + }, + }, + }, + }, + }) + + t.Run("conditions pass", func(t *testing.T) { + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + m := req.ProposedNewState.AsValueMap() + m["output"] = cty.StringVal("bar") + + resp.PlannedState = cty.ObjectVal(m) + resp.LegacyTypeSystem = true + return resp + } + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "boop": &InputValue{ + Value: cty.StringVal("boop"), + SourceType: ValueFromCLIArg, + }, + }, + }) + assertNoErrors(t, diags) + for _, res := range plan.Changes.Resources { + switch res.Addr.String() { + case "test_resource.a": + if res.Action != plans.Create { + t.Fatalf("unexpected %s change for %s", res.Action, res.Addr) + } + default: + t.Fatalf("unexpected %s change for %s", res.Action, res.Addr) + } + } + }) + + t.Run("precondition fail", func(t *testing.T) { + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "boop": &InputValue{ + Value: cty.StringVal("nope"), + SourceType: ValueFromCLIArg, + }, + }, + }) + if !diags.HasErrors() { + t.Fatal("succeeded; want errors") + } + if got, want := diags.Err().Error(), "Resource precondition failed: Wrong boop."; got != want { + t.Fatalf("wrong error:\ngot: %s\nwant: %q", got, want) + } + if p.PlanResourceChangeCalled { + t.Errorf("Provider's PlanResourceChange was called; should'nt've been") + } + }) + + t.Run("precondition fail refresh-only", func(t *testing.T) { + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent(mustResourceInstanceAddr("test_resource.a"), &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"value":"boop","output":"blorp"}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`)) + }) + _, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.RefreshOnlyMode, + SetVariables: InputValues{ + "boop": &InputValue{ + Value: cty.StringVal("nope"), + SourceType: ValueFromCLIArg, + }, + }, + }) + assertNoErrors(t, diags) + if len(diags) == 0 { + t.Fatalf("no diags, but should have warnings") + } + if got, want := diags.ErrWithWarnings().Error(), "Resource precondition failed: Wrong boop."; got != want { + t.Fatalf("wrong warning:\ngot: %s\nwant: %q", got, want) + } + if !p.ReadResourceCalled { + t.Errorf("Provider's ReadResource wasn't called; should've been") + } + }) + + t.Run("postcondition fail", func(t *testing.T) { + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + m := req.ProposedNewState.AsValueMap() + m["output"] = cty.StringVal("") + + resp.PlannedState = cty.ObjectVal(m) + resp.LegacyTypeSystem = true + return resp + } + _, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "boop": &InputValue{ + Value: cty.StringVal("boop"), + SourceType: ValueFromCLIArg, + }, + }, + }) + if !diags.HasErrors() { + t.Fatal("succeeded; want errors") + } + if got, want := diags.Err().Error(), "Resource postcondition failed: Output must not be blank."; got != want { + t.Fatalf("wrong error:\ngot: %s\nwant: %q", got, want) + } + if !p.PlanResourceChangeCalled { + t.Errorf("Provider's PlanResourceChange wasn't called; should've been") + } + }) + + t.Run("postcondition fail refresh-only", func(t *testing.T) { + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent(mustResourceInstanceAddr("test_resource.a"), &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"value":"boop","output":"blorp"}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`)) + }) + p.ReadResourceFn = func(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { + newVal, err := cty.Transform(req.PriorState, func(path cty.Path, v cty.Value) (cty.Value, error) { + if len(path) == 1 && path[0] == (cty.GetAttrStep{Name: "output"}) { + return cty.StringVal(""), nil + } + return v, nil + }) + if err != nil { + // shouldn't get here + t.Fatalf("ReadResourceFn transform failed") + return providers.ReadResourceResponse{} + } + return providers.ReadResourceResponse{ + NewState: newVal, + } + } + _, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.RefreshOnlyMode, + SetVariables: InputValues{ + "boop": &InputValue{ + Value: cty.StringVal("boop"), + SourceType: ValueFromCLIArg, + }, + }, + }) + assertNoErrors(t, diags) + if len(diags) == 0 { + t.Fatalf("no diags, but should have warnings") + } + if got, want := diags.ErrWithWarnings().Error(), "Resource postcondition failed: Output must not be blank."; got != want { + t.Fatalf("wrong warning:\ngot: %s\nwant: %q", got, want) + } + if !p.ReadResourceCalled { + t.Errorf("Provider's ReadResource wasn't called; should've been") + } + if p.PlanResourceChangeCalled { + t.Errorf("Provider's PlanResourceChange was called; should'nt've been") + } + }) + + t.Run("precondition and postcondition fail refresh-only", func(t *testing.T) { + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent(mustResourceInstanceAddr("test_resource.a"), &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"value":"boop","output":"blorp"}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`)) + }) + p.ReadResourceFn = func(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { + newVal, err := cty.Transform(req.PriorState, func(path cty.Path, v cty.Value) (cty.Value, error) { + if len(path) == 1 && path[0] == (cty.GetAttrStep{Name: "output"}) { + return cty.StringVal(""), nil + } + return v, nil + }) + if err != nil { + // shouldn't get here + t.Fatalf("ReadResourceFn transform failed") + return providers.ReadResourceResponse{} + } + return providers.ReadResourceResponse{ + NewState: newVal, + } + } + _, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.RefreshOnlyMode, + SetVariables: InputValues{ + "boop": &InputValue{ + Value: cty.StringVal("nope"), + SourceType: ValueFromCLIArg, + }, + }, + }) + assertNoErrors(t, diags) + if got, want := len(diags), 2; got != want { + t.Errorf("wrong number of warnings, got %d, want %d", got, want) + } + warnings := diags.ErrWithWarnings().Error() + wantWarnings := []string{ + "Resource precondition failed: Wrong boop.", + "Resource postcondition failed: Output must not be blank.", + } + for _, want := range wantWarnings { + if !strings.Contains(warnings, want) { + t.Errorf("missing warning:\ngot: %s\nwant to contain: %q", warnings, want) + } + } + if !p.ReadResourceCalled { + t.Errorf("Provider's ReadResource wasn't called; should've been") + } + if p.PlanResourceChangeCalled { + t.Errorf("Provider's PlanResourceChange was called; should'nt've been") + } + }) +} + +func TestContext2Plan_dataSourcePreconditionPostcondition(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +variable "boop" { + type = string +} + +data "test_data_source" "a" { + foo = var.boop + lifecycle { + precondition { + condition = var.boop == "boop" + error_message = "Wrong boop." + } + postcondition { + condition = length(self.results) > 0 + error_message = "Results cannot be empty." + } + } +} + +resource "test_resource" "a" { + value = data.test_data_source.a.results[0] +} +`, + }) + + p := testProvider("test") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_resource": { + Attributes: map[string]*configschema.Attribute{ + "value": { + Type: cty.String, + Required: true, + }, + }, + }, + }, + DataSources: map[string]*configschema.Block{ + "test_data_source": { + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.String, + Required: true, + }, + "results": { + Type: cty.List(cty.String), + Computed: true, + }, + }, + }, + }, + }) + + t.Run("conditions pass", func(t *testing.T) { + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("boop"), + "results": cty.ListVal([]cty.Value{cty.StringVal("boop")}), + }), + } + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "boop": &InputValue{ + Value: cty.StringVal("boop"), + SourceType: ValueFromCLIArg, + }, + }, + }) + assertNoErrors(t, diags) + for _, res := range plan.Changes.Resources { + switch res.Addr.String() { + case "test_resource.a": + if res.Action != plans.Create { + t.Fatalf("unexpected %s change for %s", res.Action, res.Addr) + } + case "data.test_data_source.a": + if res.Action != plans.Read { + t.Fatalf("unexpected %s change for %s", res.Action, res.Addr) + } + default: + t.Fatalf("unexpected %s change for %s", res.Action, res.Addr) + } + } + + addr := mustResourceInstanceAddr("data.test_data_source.a") + if gotResult := plan.Checks.GetObjectResult(addr); gotResult == nil { + t.Errorf("no check result for %s", addr) + } else { + wantResult := &states.CheckResultObject{ + Status: checks.StatusPass, + } + if diff := cmp.Diff(wantResult, gotResult, valueComparer); diff != "" { + t.Errorf("wrong check result for %s\n%s", addr, diff) + } + } + }) + + t.Run("precondition fail", func(t *testing.T) { + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + _, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "boop": &InputValue{ + Value: cty.StringVal("nope"), + SourceType: ValueFromCLIArg, + }, + }, + }) + if !diags.HasErrors() { + t.Fatal("succeeded; want errors") + } + if got, want := diags.Err().Error(), "Resource precondition failed: Wrong boop."; got != want { + t.Fatalf("wrong error:\ngot: %s\nwant: %q", got, want) + } + if p.ReadDataSourceCalled { + t.Errorf("Provider's ReadResource was called; should'nt've been") + } + }) + + t.Run("precondition fail refresh-only", func(t *testing.T) { + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.RefreshOnlyMode, + SetVariables: InputValues{ + "boop": &InputValue{ + Value: cty.StringVal("nope"), + SourceType: ValueFromCLIArg, + }, + }, + }) + assertNoErrors(t, diags) + if len(diags) == 0 { + t.Fatalf("no diags, but should have warnings") + } + if got, want := diags.ErrWithWarnings().Error(), "Resource precondition failed: Wrong boop."; got != want { + t.Fatalf("wrong warning:\ngot: %s\nwant: %q", got, want) + } + for _, res := range plan.Changes.Resources { + switch res.Addr.String() { + case "test_resource.a": + if res.Action != plans.Create { + t.Fatalf("unexpected %s change for %s", res.Action, res.Addr) + } + case "data.test_data_source.a": + if res.Action != plans.Read { + t.Fatalf("unexpected %s change for %s", res.Action, res.Addr) + } + default: + t.Fatalf("unexpected %s change for %s", res.Action, res.Addr) + } + } + }) + + t.Run("postcondition fail", func(t *testing.T) { + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("boop"), + "results": cty.ListValEmpty(cty.String), + }), + } + _, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "boop": &InputValue{ + Value: cty.StringVal("boop"), + SourceType: ValueFromCLIArg, + }, + }, + }) + if !diags.HasErrors() { + t.Fatal("succeeded; want errors") + } + if got, want := diags.Err().Error(), "Resource postcondition failed: Results cannot be empty."; got != want { + t.Fatalf("wrong error:\ngot: %s\nwant: %q", got, want) + } + if !p.ReadDataSourceCalled { + t.Errorf("Provider's ReadDataSource wasn't called; should've been") + } + }) + + t.Run("postcondition fail refresh-only", func(t *testing.T) { + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("boop"), + "results": cty.ListValEmpty(cty.String), + }), + } + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.RefreshOnlyMode, + SetVariables: InputValues{ + "boop": &InputValue{ + Value: cty.StringVal("boop"), + SourceType: ValueFromCLIArg, + }, + }, + }) + assertNoErrors(t, diags) + if got, want := diags.ErrWithWarnings().Error(), "Resource postcondition failed: Results cannot be empty."; got != want { + t.Fatalf("wrong error:\ngot: %s\nwant: %q", got, want) + } + addr := mustResourceInstanceAddr("data.test_data_source.a") + if gotResult := plan.Checks.GetObjectResult(addr); gotResult == nil { + t.Errorf("no check result for %s", addr) + } else { + wantResult := &states.CheckResultObject{ + Status: checks.StatusFail, + FailureMessages: []string{ + "Results cannot be empty.", + }, + } + if diff := cmp.Diff(wantResult, gotResult, valueComparer); diff != "" { + t.Errorf("wrong check result\n%s", diff) + } + } + }) + + t.Run("precondition and postcondition fail refresh-only", func(t *testing.T) { + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("nope"), + "results": cty.ListValEmpty(cty.String), + }), + } + _, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.RefreshOnlyMode, + SetVariables: InputValues{ + "boop": &InputValue{ + Value: cty.StringVal("nope"), + SourceType: ValueFromCLIArg, + }, + }, + }) + assertNoErrors(t, diags) + if got, want := len(diags), 2; got != want { + t.Errorf("wrong number of warnings, got %d, want %d", got, want) + } + warnings := diags.ErrWithWarnings().Error() + wantWarnings := []string{ + "Resource precondition failed: Wrong boop.", + "Resource postcondition failed: Results cannot be empty.", + } + for _, want := range wantWarnings { + if !strings.Contains(warnings, want) { + t.Errorf("missing warning:\ngot: %s\nwant to contain: %q", warnings, want) + } + } + }) +} + +func TestContext2Plan_outputPrecondition(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +variable "boop" { + type = string +} + +output "a" { + value = var.boop + precondition { + condition = var.boop == "boop" + error_message = "Wrong boop." + } +} +`, + }) + + p := testProvider("test") + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + t.Run("condition pass", func(t *testing.T) { + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "boop": &InputValue{ + Value: cty.StringVal("boop"), + SourceType: ValueFromCLIArg, + }, + }, + }) + assertNoErrors(t, diags) + addr := addrs.RootModuleInstance.OutputValue("a") + outputPlan := plan.Changes.OutputValue(addr) + if outputPlan == nil { + t.Fatalf("no plan for %s at all", addr) + } + if got, want := outputPlan.Addr, addr; !got.Equal(want) { + t.Errorf("wrong current address\ngot: %s\nwant: %s", got, want) + } + if got, want := outputPlan.Action, plans.Create; got != want { + t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) + } + if gotResult := plan.Checks.GetObjectResult(addr); gotResult == nil { + t.Errorf("no check result for %s", addr) + } else { + wantResult := &states.CheckResultObject{ + Status: checks.StatusPass, + } + if diff := cmp.Diff(wantResult, gotResult, valueComparer); diff != "" { + t.Errorf("wrong check result\n%s", diff) + } + } + }) + + t.Run("condition fail", func(t *testing.T) { + _, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "boop": &InputValue{ + Value: cty.StringVal("nope"), + SourceType: ValueFromCLIArg, + }, + }, + }) + if !diags.HasErrors() { + t.Fatal("succeeded; want errors") + } + if got, want := diags.Err().Error(), "Module output value precondition failed: Wrong boop."; got != want { + t.Fatalf("wrong error:\ngot: %s\nwant: %q", got, want) + } + }) + + t.Run("condition fail refresh-only", func(t *testing.T) { + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.RefreshOnlyMode, + SetVariables: InputValues{ + "boop": &InputValue{ + Value: cty.StringVal("nope"), + SourceType: ValueFromCLIArg, + }, + }, + }) + assertNoErrors(t, diags) + if len(diags) == 0 { + t.Fatalf("no diags, but should have warnings") + } + if got, want := diags.ErrWithWarnings().Error(), "Module output value precondition failed: Wrong boop."; got != want { + t.Errorf("wrong warning:\ngot: %s\nwant: %q", got, want) + } + addr := addrs.RootModuleInstance.OutputValue("a") + outputPlan := plan.Changes.OutputValue(addr) + if outputPlan == nil { + t.Fatalf("no plan for %s at all", addr) + } + if got, want := outputPlan.Addr, addr; !got.Equal(want) { + t.Errorf("wrong current address\ngot: %s\nwant: %s", got, want) + } + if got, want := outputPlan.Action, plans.Create; got != want { + t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) + } + if gotResult := plan.Checks.GetObjectResult(addr); gotResult == nil { + t.Errorf("no condition result for %s", addr) + } else { + wantResult := &states.CheckResultObject{ + Status: checks.StatusFail, + FailureMessages: []string{"Wrong boop."}, + } + if diff := cmp.Diff(wantResult, gotResult, valueComparer); diff != "" { + t.Errorf("wrong condition result\n%s", diff) + } + } + }) +} + +func TestContext2Plan_preconditionErrors(t *testing.T) { + testCases := []struct { + condition string + wantSummary string + wantDetail string + }{ + { + "data.test_data_source", + "Invalid reference", + `The "data" object must be followed by two attribute names`, + }, + { + "self.value", + `Invalid "self" reference`, + "only in resource provisioner, connection, and postcondition blocks", + }, + { + "data.foo.bar", + "Reference to undeclared resource", + `A data resource "foo" "bar" has not been declared in the root module`, + }, + { + "test_resource.b.value", + "Invalid condition result", + "Condition expression must return either true or false", + }, + { + "test_resource.c.value", + "Invalid condition result", + "Invalid condition result value: a bool is required", + }, + } + + p := testProvider("test") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + for _, tc := range testCases { + t.Run(tc.condition, func(t *testing.T) { + main := fmt.Sprintf(` + resource "test_resource" "a" { + value = var.boop + lifecycle { + precondition { + condition = %s + error_message = "Not relevant." + } + } + } + + resource "test_resource" "b" { + value = null + } + + resource "test_resource" "c" { + value = "bar" + } + `, tc.condition) + m := testModuleInline(t, map[string]string{"main.tf": main}) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if !diags.HasErrors() { + t.Fatal("succeeded; want errors") + } + + if !plan.Errored { + t.Fatal("plan failed to record error") + } + + diag := diags[0] + if got, want := diag.Description().Summary, tc.wantSummary; got != want { + t.Errorf("unexpected summary\n got: %s\nwant: %s", got, want) + } + if got, want := diag.Description().Detail, tc.wantDetail; !strings.Contains(got, want) { + t.Errorf("unexpected summary\ngot: %s\nwant to contain %q", got, want) + } + + for _, kv := range plan.Checks.ConfigResults.Elements() { + // All these are configuration or evaluation errors + if kv.Value.Status != checks.StatusError { + t.Errorf("incorrect status, got %s", kv.Value.Status) + } + } + }) + } +} + +func TestContext2Plan_preconditionSensitiveValues(t *testing.T) { + p := testProvider("test") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + m := testModuleInline(t, map[string]string{ + "main.tf": ` +variable "boop" { + sensitive = true + type = string +} + +output "a" { + sensitive = true + value = var.boop + + precondition { + condition = length(var.boop) <= 4 + error_message = "Boop is too long, ${length(var.boop)} > 4" + } +} +`, + }) + + _, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "boop": &InputValue{ + Value: cty.StringVal("bleep"), + SourceType: ValueFromCLIArg, + }, + }, + }) + if !diags.HasErrors() { + t.Fatal("succeeded; want errors") + } + if got, want := len(diags), 2; got != want { + t.Errorf("wrong number of diags, got %d, want %d", got, want) + } + for _, diag := range diags { + desc := diag.Description() + if desc.Summary == "Module output value precondition failed" { + if got, want := desc.Detail, "This check failed, but has an invalid error message as described in the other accompanying messages."; !strings.Contains(got, want) { + t.Errorf("unexpected detail\ngot: %s\nwant to contain %q", got, want) + } + } else if desc.Summary == "Error message refers to sensitive values" { + if got, want := desc.Detail, "The error expression used to explain this condition refers to sensitive values, so OpenTofu will not display the resulting message."; !strings.Contains(got, want) { + t.Errorf("unexpected detail\ngot: %s\nwant to contain %q", got, want) + } + } else { + t.Errorf("unexpected summary\ngot: %s", desc.Summary) + } + } +} + +func TestContext2Plan_triggeredBy(t *testing.T) { + type TestConfiguration struct { + Description string + inlineConfiguration map[string]string + } + configurations := []TestConfiguration{ + { + Description: "TF configuration", + inlineConfiguration: map[string]string{ + "main.tf": ` +resource "test_object" "a" { + count = 1 + test_string = "new" +} +resource "test_object" "b" { + count = 1 + test_string = test_object.a[count.index].test_string + lifecycle { + # the change to test_string in the other resource should trigger replacement + replace_triggered_by = [ test_object.a[count.index].test_string ] + } +} +`, + }, + }, + { + Description: "Json configuration", + inlineConfiguration: map[string]string{ + "main.tf.json": ` +{ + "resource": { + "test_object": { + "a": [ + { + "count": 1, + "test_string": "new" + } + ], + "b": [ + { + "count": 1, + "lifecycle": [ + { + "replace_triggered_by": [ + "test_object.a[count.index].test_string" + ] + } + ], + "test_string": "test_object.a[count.index].test_string" + } + ] + } + } +} +`, + }, + }, + } + + p := simpleMockProvider() + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.a[0]"), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"test_string":"old"}`), + Status: states.ObjectReady, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + s.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.b[0]"), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: states.ObjectReady, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + }) + for _, configuration := range configurations { + m := testModuleInline(t, configuration.inlineConfiguration) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors\n%s", diags.Err().Error()) + } + for _, c := range plan.Changes.Resources { + switch c.Addr.String() { + case "test_object.a[0]": + if c.Action != plans.Update { + t.Fatalf("unexpected %s change for %s\n", c.Action, c.Addr) + } + case "test_object.b[0]": + if c.Action != plans.DeleteThenCreate { + t.Fatalf("unexpected %s change for %s\n", c.Action, c.Addr) + } + if c.ActionReason != plans.ResourceInstanceReplaceByTriggers { + t.Fatalf("incorrect reason for change: %s\n", c.ActionReason) + } + default: + t.Fatal("unexpected change", c.Addr, c.Action) + } + } + } +} + +func TestContext2Plan_dataSchemaChange(t *testing.T) { + // We can't decode the prior state when a data source upgrades the schema + // in an incompatible way. Since prior state for data sources is purely + // informational, decoding should be skipped altogether. + m := testModuleInline(t, map[string]string{ + "main.tf": ` +data "test_object" "a" { + obj { + # args changes from a list to a map + args = { + val = "string" + } + } +} +`, + }) + + p := new(MockProvider) + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + DataSources: map[string]*configschema.Block{ + "test_object": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "obj": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "args": {Type: cty.Map(cty.String), Optional: true}, + }, + }, + Nesting: configschema.NestingSet, + }, + }, + }, + }, + }) + + p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { + resp.State = req.Config + return resp + } + + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent(mustResourceInstanceAddr(`data.test_object.a`), &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"old","obj":[{"args":["string"]}]}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`)) + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan(m, state, DefaultPlanOpts) + assertNoErrors(t, diags) +} + +func TestContext2Plan_applyGraphError(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_object" "a" { +} +resource "test_object" "b" { + depends_on = [test_object.a] +} +`, + }) + + p := simpleMockProvider() + + // Here we introduce a cycle via state which only shows up in the apply + // graph where the actual destroy instances are connected in the graph. + // This could happen for example when a user has an existing state with + // stored dependencies, and changes the config in such a way that + // contradicts the stored dependencies. + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.a").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectTainted, + AttrsJSON: []byte(`{"test_string":"a"}`), + Dependencies: []addrs.ConfigResource{mustResourceInstanceAddr("test_object.b").ContainingResource().Config()}, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.b").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectTainted, + AttrsJSON: []byte(`{"test_string":"b"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + }) + if !diags.HasErrors() { + t.Fatal("cycle error not detected") + } + + msg := diags.ErrWithWarnings().Error() + if !strings.Contains(msg, "Cycle") { + t.Fatalf("no cycle error found:\n got: %s\n", msg) + } +} + +// plan a destroy with no state where configuration could fail to evaluate +// expansion indexes. +func TestContext2Plan_emptyDestroy(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +locals { + enable = true + value = local.enable ? module.example[0].out : null +} + +module "example" { + count = local.enable ? 1 : 0 + source = "./example" +} +`, + "example/main.tf": ` +resource "test_resource" "x" { +} + +output "out" { + value = test_resource.x +} +`, + }) + + p := testProvider("test") + state := states.NewState() + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + }) + + assertNoErrors(t, diags) + + // ensure that the given states are valid and can be serialized + if plan.PrevRunState == nil { + t.Fatal("nil plan.PrevRunState") + } + if plan.PriorState == nil { + t.Fatal("nil plan.PriorState") + } +} + +// A deposed instances which no longer exists during ReadResource creates NoOp +// change, which should not effect the plan. +func TestContext2Plan_deposedNoLongerExists(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_object" "b" { + count = 1 + test_string = "updated" + lifecycle { + create_before_destroy = true + } +} +`, + }) + + p := simpleMockProvider() + p.ReadResourceFn = func(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { + s := req.PriorState.GetAttr("test_string").AsString() + if s == "current" { + resp.NewState = req.PriorState + return resp + } + // pretend the non-current instance has been deleted already + resp.NewState = cty.NullVal(req.PriorState.Type()) + return resp + } + + // Here we introduce a cycle via state which only shows up in the apply + // graph where the actual destroy instances are connected in the graph. + // This could happen for example when a user has an existing state with + // stored dependencies, and changes the config in such a way that + // contradicts the stored dependencies. + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceDeposed( + mustResourceInstanceAddr("test_object.a[0]").Resource, + states.DeposedKey("deposed"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectTainted, + AttrsJSON: []byte(`{"test_string":"old"}`), + Dependencies: []addrs.ConfigResource{}, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.a[0]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectTainted, + AttrsJSON: []byte(`{"test_string":"current"}`), + Dependencies: []addrs.ConfigResource{}, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + }) + assertNoErrors(t, diags) +} + +// make sure there are no cycles with changes around a provider configured via +// managed resources. +func TestContext2Plan_destroyWithResourceConfiguredProvider(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_object" "a" { + in = "a" +} + +provider "test" { + alias = "other" + in = test_object.a.out +} + +resource "test_object" "b" { + provider = test.other + in = "a" +} +`}) + + testProvider := &MockProvider{ + GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "in": { + Type: cty.String, + Optional: true, + }, + }, + }, + }, + ResourceTypes: map[string]providers.Schema{ + "test_object": providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "in": { + Type: cty.String, + Optional: true, + }, + "out": { + Type: cty.Number, + Computed: true, + }, + }, + }, + }, + }, + }, + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(testProvider), + }, + }) + + // plan+apply to create the initial state + opts := SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables)) + plan, diags := ctx.Plan(m, states.NewState(), opts) + assertNoErrors(t, diags) + state, diags := ctx.Apply(plan, m) + assertNoErrors(t, diags) + + // Resource changes which have dependencies across providers which + // themselves depend on resources can result in cycles. + // Because other_object transitively depends on the module resources + // through its provider, we trigger changes on both sides of this boundary + // to ensure we can create a valid plan. + // + // Try to replace both instances + addrA := mustResourceInstanceAddr("test_object.a") + addrB := mustResourceInstanceAddr(`test_object.b`) + opts.ForceReplace = []addrs.AbsResourceInstance{addrA, addrB} + + _, diags = ctx.Plan(m, state, opts) + assertNoErrors(t, diags) +} + +func TestContext2Plan_destroyPartialState(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_object" "a" { +} + +output "out" { + value = module.mod.out +} + +module "mod" { + source = "./mod" +} +`, + + "./mod/main.tf": ` +resource "test_object" "a" { + count = 2 + + lifecycle { + precondition { + # test_object_b has already been destroyed, so referencing the first + # instance must not fail during a destroy plan. + condition = test_object.b[0].test_string == "invalid" + error_message = "should not block destroy" + } + precondition { + # this failing condition should bot block a destroy plan + condition = !local.continue + error_message = "should not block destroy" + } + } +} + +resource "test_object" "b" { + count = 2 +} + +locals { + continue = true +} + +output "out" { + # the reference to test_object.b[0] may not be valid during a destroy plan, + # but should not fail. + value = local.continue ? test_object.a[1].test_string != "invalid" && test_object.b[0].test_string != "invalid" : false + + precondition { + # test_object_b has already been destroyed, so referencing the first + # instance must not fail during a destroy plan. + condition = test_object.b[0].test_string == "invalid" + error_message = "should not block destroy" + } + precondition { + # this failing condition should bot block a destroy plan + condition = test_object.a[0].test_string == "invalid" + error_message = "should not block destroy" + } +} +`}) + + p := simpleMockProvider() + + // This state could be the result of a failed destroy, leaving only 2 + // remaining instances. We want to be able to continue the destroy to + // remove everything without blocking on invalid references or failing + // conditions. + state := states.NewState() + mod := state.EnsureModule(addrs.RootModuleInstance.Child("mod", addrs.NoKey)) + mod.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.a[0]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectTainted, + AttrsJSON: []byte(`{"test_string":"current"}`), + Dependencies: []addrs.ConfigResource{}, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + mod.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.a[1]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectTainted, + AttrsJSON: []byte(`{"test_string":"current"}`), + Dependencies: []addrs.ConfigResource{}, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + }) + assertNoErrors(t, diags) +} + +func TestContext2Plan_destroyPartialStateLocalRef(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +module "already_destroyed" { + count = 1 + source = "./mod" +} + +locals { + eval_error = module.already_destroyed[0].out +} + +output "already_destroyed" { + value = local.eval_error +} + +`, + + "./mod/main.tf": ` +resource "test_object" "a" { +} + +output "out" { + value = test_object.a.test_string +} +`}) + + p := simpleMockProvider() + + state := states.NewState() + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + }) + assertNoErrors(t, diags) +} + +// Make sure the data sources in the prior state are serializeable even if +// there were an error in the plan. +func TestContext2Plan_dataSourceReadPlanError(t *testing.T) { + m, snap := testModuleWithSnapshot(t, "data-source-read-with-plan-error") + awsProvider := testProvider("aws") + testProvider := testProvider("test") + + testProvider.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + resp.PlannedState = req.ProposedNewState + resp.Diagnostics = resp.Diagnostics.Append(errors.New("oops")) + return resp + } + + state := states.NewState() + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(awsProvider), + addrs.NewDefaultProvider("test"): testProviderFuncFixed(testProvider), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + if !diags.HasErrors() { + t.Fatalf("expected plan error") + } + + // make sure we can serialize the plan even if there were an error + _, _, _, err := contextOptsForPlanViaFile(t, snap, plan) + if err != nil { + t.Fatalf("failed to round-trip through planfile: %s", err) + } +} + +func TestContext2Plan_ignoredMarkedValue(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_object" "a" { + map = { + prior = "value" + new = sensitive("ignored") + } +} +`}) + + testProvider := &MockProvider{ + GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_object": providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "map": { + Type: cty.Map(cty.String), + Optional: true, + }, + }, + }, + }, + }, + }, + } + + testProvider.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + // We're going to ignore any changes here and return the prior state. + resp.PlannedState = req.PriorState + return resp + } + + state := states.NewState() + root := state.RootModule() + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.a").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"map":{"prior":"value"}}`), + Dependencies: []addrs.ConfigResource{}, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(testProvider), + }, + }) + + // plan+apply to create the initial state + opts := SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables)) + plan, diags := ctx.Plan(m, state, opts) + assertNoErrors(t, diags) + + for _, c := range plan.Changes.Resources { + if c.Action != plans.NoOp { + t.Errorf("unexpected %s change for %s", c.Action, c.Addr) + } + } +} + +func TestContext2Plan_importResourceBasic(t *testing.T) { + addr := mustResourceInstanceAddr("test_object.a") + + type TestConfiguration struct { + Description string + inlineConfiguration map[string]string + } + configurations := []TestConfiguration{ + { + Description: "TF configuration", + inlineConfiguration: map[string]string{ + "main.tf": ` +resource "test_object" "a" { + test_string = "foo" +} + +import { + to = test_object.a + id = "123" +} +`, + }, + }, + { + Description: "Json configuration", + inlineConfiguration: map[string]string{ + "main.tf.json": ` +{ + "import": [ + { + "id": "123", + "to": "test_object.a" + } + ], + "resource": { + "test_object": { + "a": [ + { + "test_string": "foo" + } + ] + } + } +} +`, + }, + }, + } + + p := simpleMockProvider() + hook := new(MockHook) + ctx := testContext2(t, &ContextOpts{ + Hooks: []Hook{hook}, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + p.ReadResourceResponse = &providers.ReadResourceResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "test_string": cty.StringVal("foo"), + }), + } + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "test_object", + State: cty.ObjectVal(map[string]cty.Value{ + "test_string": cty.StringVal("foo"), + }), + }, + }, + } + + for _, configuration := range configurations { + m := testModuleInline(t, configuration.inlineConfiguration) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors\n%s", diags.Err().Error()) + } + + t.Run(configuration.Description, func(t *testing.T) { + instPlan := plan.Changes.ResourceInstance(addr) + if instPlan == nil { + t.Fatalf("no plan for %s at all", addr) + } + + if got, want := instPlan.Addr, addr; !got.Equal(want) { + t.Errorf("wrong current address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.PrevRunAddr, addr; !got.Equal(want) { + t.Errorf("wrong previous run address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.Action, plans.NoOp; got != want { + t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.ActionReason, plans.ResourceInstanceChangeNoReason; got != want { + t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) + } + if instPlan.Importing.ID != "123" { + t.Errorf("expected import change from \"123\", got non-import change") + } + + if !hook.PrePlanImportCalled { + t.Fatalf("PostPlanImport hook not called") + } + if addr, wantAddr := hook.PrePlanImportAddr, instPlan.Addr; !addr.Equal(wantAddr) { + t.Errorf("expected addr to be %s, but was %s", wantAddr, addr) + } + + if !hook.PostPlanImportCalled { + t.Fatalf("PostPlanImport hook not called") + } + if addr, wantAddr := hook.PostPlanImportAddr, instPlan.Addr; !addr.Equal(wantAddr) { + t.Errorf("expected addr to be %s, but was %s", wantAddr, addr) + } + }) + } +} + +func TestContext2Plan_importToDynamicAddress(t *testing.T) { + type TestConfiguration struct { + Description string + ResolvedAddress string + inlineConfiguration map[string]string + } + configurations := []TestConfiguration{ + { + Description: "To address includes a variable as index in TF configuration", + ResolvedAddress: "test_object.a[0]", + inlineConfiguration: map[string]string{ + "main.tf": ` +variable "index" { + default = 0 +} + +resource "test_object" "a" { + count = 1 + test_string = "foo" +} + +import { + to = test_object.a[var.index] + id = "%d" +} +`, + }, + }, + { + Description: "To address includes a variable as index in JSON configuration", + ResolvedAddress: "test_object.a[0]", + inlineConfiguration: map[string]string{ + "main.tf.json": ` +{ + "locals": [ + { + "index": 0 + } + ], + "import": [ + { + "id": "%d", + "to": "test_object.a[local.index]" + } + ], + "resource": { + "test_object": { + "a": [ + { + "count": 1, + "test_string": "foo" + } + ] + } + } +} +`, + }, + }, + { + Description: "To address includes a local as index", + ResolvedAddress: "test_object.a[0]", + inlineConfiguration: map[string]string{ + "main.tf": ` +locals { + index = 0 +} + +resource "test_object" "a" { + count = 1 + test_string = "foo" +} + +import { + to = test_object.a[local.index] + id = "%d" +} +`, + }, + }, + { + Description: "To address includes a conditional expression as index", + ResolvedAddress: "test_object.a[\"zero\"]", + inlineConfiguration: map[string]string{ + "main.tf": ` +resource "test_object" "a" { + for_each = toset(["zero"]) + test_string = "foo" +} + +import { + to = test_object.a[ true ? "zero" : "one"] + id = "%d" +} +`, + }, + }, + { + Description: "To address includes a conditional expression with vars and locals as index", + ResolvedAddress: "test_object.a[\"one\"]", + inlineConfiguration: map[string]string{ + "main.tf": ` +variable "one" { + default = 1 +} + +locals { + zero = "zero" + one = "one" +} + +resource "test_object" "a" { + for_each = toset(["one"]) + test_string = "foo" +} + +import { + to = test_object.a[var.one == 1 ? local.one : local.zero] + id = "%d" +} +`, + }, + }, + { + Description: "To address includes a resource reference as index", + ResolvedAddress: "test_object.a[\"boop\"]", + inlineConfiguration: map[string]string{ + "main.tf": ` +resource "test_object" "reference" { + test_string = "boop" +} + +resource "test_object" "a" { + for_each = toset(["boop"]) + test_string = "foo" +} + +import { + to = test_object.a[test_object.reference.test_string] + id = "%d" +} +`, + }, + }, + { + Description: "To address includes a data reference as index", + ResolvedAddress: "test_object.a[\"bip\"]", + inlineConfiguration: map[string]string{ + "main.tf": ` +data "test_object" "reference" { +} + +resource "test_object" "a" { + for_each = toset(["bip"]) + test_string = "foo" +} + +import { + to = test_object.a[data.test_object.reference.test_string] + id = "%d" +} +`, + }, + }, + } + + const importId = 123 + + for _, configuration := range configurations { + t.Run(configuration.Description, func(t *testing.T) { + + // Format the configuration with the import ID + formattedConfiguration := make(map[string]string) + for configFileName, configFileContent := range configuration.inlineConfiguration { + formattedConfigFileContent := fmt.Sprintf(configFileContent, importId) + formattedConfiguration[configFileName] = formattedConfigFileContent + } + + addr := mustResourceInstanceAddr(configuration.ResolvedAddress) + m := testModuleInline(t, formattedConfiguration) + + p := &MockProvider{ + GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{Block: simpleTestSchema()}, + ResourceTypes: map[string]providers.Schema{ + "test_object": providers.Schema{Block: simpleTestSchema()}, + }, + DataSources: map[string]providers.Schema{ + "test_object": providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "test_string": { + Type: cty.String, + Optional: true, + }, + }, + }, + }, + }, + }, + } + + hook := new(MockHook) + ctx := testContext2(t, &ContextOpts{ + Hooks: []Hook{hook}, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + p.ReadResourceResponse = &providers.ReadResourceResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "test_string": cty.StringVal("foo"), + }), + } + + p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "test_string": cty.StringVal("bip"), + }), + } + + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "test_object", + State: cty.ObjectVal(map[string]cty.Value{ + "test_string": cty.StringVal("foo"), + }), + }, + }, + } + + plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) + if diags.HasErrors() { + t.Fatalf("unexpected errors\n%s", diags.Err().Error()) + } + + t.Run(addr.String(), func(t *testing.T) { + instPlan := plan.Changes.ResourceInstance(addr) + if instPlan == nil { + t.Fatalf("no plan for %s at all", addr) + } + + if got, want := instPlan.Addr, addr; !got.Equal(want) { + t.Errorf("wrong current address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.PrevRunAddr, addr; !got.Equal(want) { + t.Errorf("wrong previous run address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.Action, plans.NoOp; got != want { + t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.ActionReason, plans.ResourceInstanceChangeNoReason; got != want { + t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) + } + if instPlan.Importing.ID != strconv.Itoa(importId) { + t.Errorf("expected import change from \"%d\", got non-import change", importId) + } + + if !hook.PrePlanImportCalled { + t.Fatalf("PostPlanImport hook not called") + } + if addr, wantAddr := hook.PrePlanImportAddr, instPlan.Addr; !addr.Equal(wantAddr) { + t.Errorf("expected addr to be %s, but was %s", wantAddr, addr) + } + + if !hook.PostPlanImportCalled { + t.Fatalf("PostPlanImport hook not called") + } + if addr, wantAddr := hook.PostPlanImportAddr, instPlan.Addr; !addr.Equal(wantAddr) { + t.Errorf("expected addr to be %s, but was %s", wantAddr, addr) + } + }) + }) + } +} + +func TestContext2Plan_importToInvalidDynamicAddress(t *testing.T) { + type TestConfiguration struct { + Description string + expectedError string + inlineConfiguration map[string]string + } + configurations := []TestConfiguration{ + { + Description: "To address index value is null", + expectedError: "Import block 'to' address contains an invalid key: Import block contained a resource address using an index which is null. Please ensure the expression for the index is not null", + inlineConfiguration: map[string]string{ + "main.tf": ` +variable "index" { + default = null +} + +resource "test_object" "a" { + count = 1 + test_string = "foo" +} + +import { + to = test_object.a[var.index] + id = "123" +} +`, + }, + }, + { + Description: "To address index is not a number or a string", + expectedError: "Import block 'to' address contains an invalid key: Import block contained a resource address using an index which is not valid for a resource instance (not a string or a number). Please ensure the expression for the index is correct, and returns either a string or a number", + inlineConfiguration: map[string]string{ + "main.tf": ` +locals { + index = toset(["foo"]) +} + +resource "test_object" "a" { + for_each = toset(["foo"]) + test_string = "foo" +} + +import { + to = test_object.a[local.index] + id = "123" +} +`, + }, + }, + { + Description: "To address index value is sensitive", + expectedError: "Import block 'to' address contains an invalid key: Import block contained a resource address using an index which is sensitive. Please ensure indexes used in the resource address of an import target are not sensitive", + inlineConfiguration: map[string]string{ + "main.tf": ` +locals { + index = sensitive("foo") +} + +resource "test_object" "a" { + for_each = toset(["foo"]) + test_string = "foo" +} + +import { + to = test_object.a[local.index] + id = "123" +} +`, + }, + }, + { + Description: "To address index value will only be known after apply", + expectedError: "Import block contained a resource address using an index that will only be known after apply. Please ensure to use expressions that are known at plan time for the index of an import target address", + inlineConfiguration: map[string]string{ + "main.tf": ` +resource "test_object" "reference" { +} + +resource "test_object" "a" { + count = 1 + test_string = "foo" +} + +import { + to = test_object.a[test_object.reference.id] + id = "123" +} +`, + }, + }, + } + + for _, configuration := range configurations { + t.Run(configuration.Description, func(t *testing.T) { + m := testModuleInline(t, configuration.inlineConfiguration) + + providerSchema := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "test_string": { + Type: cty.String, + Optional: true, + }, + "id": { + Type: cty.String, + Computed: true, + }, + }, + } + + p := &MockProvider{ + GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{Block: providerSchema}, + ResourceTypes: map[string]providers.Schema{ + "test_object": providers.Schema{Block: providerSchema}, + }, + }, + } + + hook := new(MockHook) + ctx := testContext2(t, &ContextOpts{ + Hooks: []Hook{hook}, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + p.ReadResourceResponse = &providers.ReadResourceResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "test_string": cty.StringVal("foo"), + }), + } + + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + testStringVal := req.ProposedNewState.GetAttr("test_string") + return providers.PlanResourceChangeResponse{ + PlannedState: cty.ObjectVal(map[string]cty.Value{ + "test_string": testStringVal, + "id": cty.UnknownVal(cty.String), + }), + } + } + + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "test_object", + State: cty.ObjectVal(map[string]cty.Value{ + "test_string": cty.StringVal("foo"), + }), + }, + }, + } + + _, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) + + if !diags.HasErrors() { + t.Fatal("succeeded; want errors") + } + if got, want := diags.Err().Error(), configuration.expectedError; !strings.Contains(got, want) { + t.Fatalf("wrong error:\ngot: %s\nwant: message containing %q", got, want) + } + }) + } +} + +func TestContext2Plan_importForEach(t *testing.T) { + type ImportResult struct { + ResolvedAddress string + ResolvedId string + } + type TestConfiguration struct { + Description string + ImportResults []ImportResult + inlineConfiguration map[string]string + } + configurations := []TestConfiguration{ + { + Description: "valid map", + ImportResults: []ImportResult{{ResolvedAddress: `test_object.a["key1"]`, ResolvedId: "val1"}, {ResolvedAddress: `test_object.a["key2"]`, ResolvedId: "val2"}, {ResolvedAddress: `test_object.a["key3"]`, ResolvedId: "val3"}}, + inlineConfiguration: map[string]string{ + "main.tf": ` +locals { + map = { + "key1" = "val1" + "key2" = "val2" + "key3" = "val3" + } +} + +resource "test_object" "a" { + for_each = local.map +} + +import { + for_each = local.map + to = test_object.a[each.key] + id = each.value +} +`, + }, + }, + { + Description: "valid set", + ImportResults: []ImportResult{{ResolvedAddress: `test_object.a["val0"]`, ResolvedId: "val0"}, {ResolvedAddress: `test_object.a["val1"]`, ResolvedId: "val1"}, {ResolvedAddress: `test_object.a["val2"]`, ResolvedId: "val2"}}, + inlineConfiguration: map[string]string{ + "main.tf": ` +variable "set" { + type = set(string) + default = ["val0", "val1", "val2"] +} + +resource "test_object" "a" { + for_each = var.set +} + +import { + for_each = var.set + to = test_object.a[each.key] + id = each.value +} +`, + }, + }, + { + Description: "valid tuple", + ImportResults: []ImportResult{{ResolvedAddress: `module.mod[0].test_object.a["resKey1"]`, ResolvedId: "val1"}, {ResolvedAddress: `module.mod[0].test_object.a["resKey2"]`, ResolvedId: "val2"}, {ResolvedAddress: `module.mod[1].test_object.a["resKey1"]`, ResolvedId: "val3"}, {ResolvedAddress: `module.mod[1].test_object.a["resKey2"]`, ResolvedId: "val4"}}, + inlineConfiguration: map[string]string{ + "mod/main.tf": ` +variable "set" { + type = set(string) + default = ["resKey1", "resKey2"] +} + +resource "test_object" "a" { + for_each = var.set +} +`, + "main.tf": ` +locals { + tuple = [ + { + moduleKey = 0 + resourceKey = "resKey1" + id = "val1" + }, + { + moduleKey = 0 + resourceKey = "resKey2" + id = "val2" + }, + { + moduleKey = 1 + resourceKey = "resKey1" + id = "val3" + }, + { + moduleKey = 1 + resourceKey = "resKey2" + id = "val4" + }, + ] +} + +module "mod" { + count = 2 + source = "./mod" +} + +import { + for_each = local.tuple + id = each.value.id + to = module.mod[each.value.moduleKey].test_object.a[each.value.resourceKey] +} +`, + }, + }, + } + + for _, configuration := range configurations { + t.Run(configuration.Description, func(t *testing.T) { + m := testModuleInline(t, configuration.inlineConfiguration) + p := simpleMockProvider() + + hook := new(MockHook) + ctx := testContext2(t, &ContextOpts{ + Hooks: []Hook{hook}, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + p.ReadResourceResponse = &providers.ReadResourceResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{}), + } + + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "test_object", + State: cty.ObjectVal(map[string]cty.Value{}), + }, + }, + } + + plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) + if diags.HasErrors() { + t.Fatalf("unexpected errors\n%s", diags.Err().Error()) + } + + if len(plan.Changes.Resources) != len(configuration.ImportResults) { + t.Fatalf("excpected %d resource chnages in the plan, got %d instead", len(configuration.ImportResults), len(plan.Changes.Resources)) + } + + for _, importResult := range configuration.ImportResults { + addr := mustResourceInstanceAddr(importResult.ResolvedAddress) + + t.Run(addr.String(), func(t *testing.T) { + instPlan := plan.Changes.ResourceInstance(addr) + if instPlan == nil { + t.Fatalf("no plan for %s at all", addr) + } + + if got, want := instPlan.Addr, addr; !got.Equal(want) { + t.Errorf("wrong current address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.PrevRunAddr, addr; !got.Equal(want) { + t.Errorf("wrong previous run address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.Action, plans.NoOp; got != want { + t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.ActionReason, plans.ResourceInstanceChangeNoReason; got != want { + t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) + } + if instPlan.Importing.ID != importResult.ResolvedId { + t.Errorf("expected import change from \"%s\", got non-import change", importResult.ResolvedId) + } + + if !hook.PrePlanImportCalled { + t.Fatalf("PostPlanImport hook not called") + } + + if !hook.PostPlanImportCalled { + t.Fatalf("PostPlanImport hook not called") + } + }) + } + }) + } +} + +func TestContext2Plan_importWithInvalidForEach(t *testing.T) { + type TestConfiguration struct { + Description string + expectedError string + inlineConfiguration map[string]string + } + configurations := []TestConfiguration{ + { + Description: "for_each value is null", + expectedError: "Invalid import id argument: The import ID cannot be null", + inlineConfiguration: map[string]string{ + "main.tf": ` +locals { + map = { + "key1" = null + } +} + +resource "test_object" "a" { + for_each = local.map +} + +import { + for_each = local.map + to = test_object.a[each.key] + id = each.value +} +`, + }, + }, + { + Description: "for_each key is null", + expectedError: "Null value as key: Can't use a null value as a key.", + inlineConfiguration: map[string]string{ + "main.tf": ` +variable "nil" { + default = null +} + +locals { + map = { + (var.nil) = "val1" + } +} + +resource "test_object" "a" { + for_each = local.map +} + +import { + for_each = local.map + to = test_object.a[each.key] + id = each.value +} +`, + }, + }, + { + Description: "for_each expression is null", + expectedError: `Invalid for_each argument: The given "for_each" argument value is unsuitable: the "for_each" argument must be a map, set of strings, or a tuple, and you have provided a value of type dynamic.`, + inlineConfiguration: map[string]string{ + "main.tf": ` +variable "map" { + default = null +} + +resource "test_object" "a" { +} + +import { + for_each = var.map + to = test_object.a[each.key] + id = each.value +} +`, + }, + }, + { + Description: "for_each key is unknown", + expectedError: `Invalid for_each argument: The "for_each" map includes keys derived from resource attributes that cannot be determined until apply, and so OpenTofu cannot determine the full set of keys that will identify the instances of this resource.`, + inlineConfiguration: map[string]string{ + "main.tf": ` +resource "test_object" "reference" { +} + +locals { + map = { + (test_object.reference.id) = "val1" + } +} + +resource "test_object" "a" { + count = 1 +} + +import { + for_each = local.map + to = test_object.a[each.key] + id = each.value +} +`, + }, + }, + { + Description: "for_each value is unknown", + expectedError: `Invalid import id argument: The import block "id" argument depends on resource attributes that cannot be determined until apply, so OpenTofu cannot plan to import this resource.`, + inlineConfiguration: map[string]string{ + "main.tf": ` +resource "test_object" "reference" { +} + +locals { + map = { + "key1" = (test_object.reference.id) + } +} + +resource "test_object" "a" { + count = 1 +} + +import { + for_each = local.map + to = test_object.a[each.key] + id = each.value +} +`, + }, + }, + { + Description: "for_each expression is unknown", + expectedError: `Invalid for_each argument: The "for_each" map includes keys derived from resource attributes that cannot be determined until apply, and so OpenTofu cannot determine the full set of keys that will identify the instances of this resource.`, + inlineConfiguration: map[string]string{ + "main.tf": ` +resource "test_object" "reference" { +} + +locals { + map = (test_object.reference.id) +} + +resource "test_object" "a" { + count = 1 +} + +import { + for_each = local.map + to = test_object.a[each.key] + id = each.value +} +`, + }, + }, + { + Description: "for_each value is sensitive", + expectedError: "Invalid import id argument: The import ID cannot be sensitive.", + inlineConfiguration: map[string]string{ + "main.tf": ` +locals { + index = sensitive("foo") + map = { + "key1" = local.index + } +} + +resource "test_object" "a" { + for_each = local.map +} + +import { + for_each = local.map + to = test_object.a[each.key] + id = each.value +} +`, + }, + }, + { + Description: "for_each key is sensitive", + expectedError: "Invalid for_each argument: Sensitive values, or values derived from sensitive values, cannot be used as for_each arguments. If used, the sensitive value could be exposed as a resource instance key.", + inlineConfiguration: map[string]string{ + "main.tf": ` +locals { + index = sensitive("foo") + map = { + (local.index) = "val1" + } +} + +resource "test_object" "a" { + count = 1 +} + +import { + for_each = local.map + to = test_object.a[each.key] + id = each.value +} +`, + }, + }, + { + Description: "for_each expression is sensitive", + expectedError: "Invalid for_each argument: Sensitive values, or values derived from sensitive values, cannot be used as for_each arguments. If used, the sensitive value could be exposed as a resource instance key.", + inlineConfiguration: map[string]string{ + "main.tf": ` +resource "test_object" "reference" { +} + +locals { + map = sensitive({ + "key1" = "val1" + }) +} + +resource "test_object" "a" { + count = 0 +} + +import { + for_each = local.map + to = test_object.a[each.key] + id = each.value +} +`, + }, + }, + } + + for _, configuration := range configurations { + t.Run(configuration.Description, func(t *testing.T) { + m := testModuleInline(t, configuration.inlineConfiguration) + + providerSchema := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "test_string": { + Type: cty.String, + Optional: true, + }, + "id": { + Type: cty.String, + Computed: true, + }, + }, + } + + p := &MockProvider{ + GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{Block: providerSchema}, + ResourceTypes: map[string]providers.Schema{ + "test_object": providers.Schema{Block: providerSchema}, + }, + }, + } + + hook := new(MockHook) + ctx := testContext2(t, &ContextOpts{ + Hooks: []Hook{hook}, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + p.ReadResourceResponse = &providers.ReadResourceResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "test_string": cty.StringVal("foo"), + }), + } + + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + testStringVal := req.ProposedNewState.GetAttr("test_string") + return providers.PlanResourceChangeResponse{ + PlannedState: cty.ObjectVal(map[string]cty.Value{ + "test_string": testStringVal, + "id": cty.UnknownVal(cty.String), + }), + } + } + + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "test_object", + State: cty.ObjectVal(map[string]cty.Value{ + "test_string": cty.StringVal("foo"), + }), + }, + }, + } + + _, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) + + if !diags.HasErrors() { + t.Fatal("succeeded; want errors") + } + if got, want := diags.Err().Error(), configuration.expectedError; !strings.Contains(got, want) { + t.Fatalf("wrong error:\ngot: %s\nwant: message containing %q", got, want) + } + }) + } +} + +func TestContext2Plan_importResourceAlreadyInState(t *testing.T) { + addr := mustResourceInstanceAddr("test_object.a") + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_object" "a" { + test_string = "foo" +} + +import { + to = test_object.a + id = "123" +} +`, + }) + + p := simpleMockProvider() + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + p.ReadResourceResponse = &providers.ReadResourceResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "test_string": cty.StringVal("foo"), + }), + } + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "test_object", + State: cty.ObjectVal(map[string]cty.Value{ + "test_string": cty.StringVal("foo"), + }), + }, + }, + } + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.a").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"test_string":"foo"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors\n%s", diags.Err().Error()) + } + + t.Run(addr.String(), func(t *testing.T) { + instPlan := plan.Changes.ResourceInstance(addr) + if instPlan == nil { + t.Fatalf("no plan for %s at all", addr) + } + + if got, want := instPlan.Addr, addr; !got.Equal(want) { + t.Errorf("wrong current address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.PrevRunAddr, addr; !got.Equal(want) { + t.Errorf("wrong previous run address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.Action, plans.NoOp; got != want { + t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.ActionReason, plans.ResourceInstanceChangeNoReason; got != want { + t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) + } + if instPlan.Importing != nil { + t.Errorf("expected non-import change, got import change %+v", instPlan.Importing) + } + }) +} + +func TestContext2Plan_importResourceUpdate(t *testing.T) { + addr := mustResourceInstanceAddr("test_object.a") + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_object" "a" { + test_string = "bar" +} + +import { + to = test_object.a + id = "123" +} +`, + }) + + p := simpleMockProvider() + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + p.ReadResourceResponse = &providers.ReadResourceResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "test_string": cty.StringVal("foo"), + }), + } + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "test_object", + State: cty.ObjectVal(map[string]cty.Value{ + "test_string": cty.StringVal("foo"), + }), + }, + }, + } + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors\n%s", diags.Err().Error()) + } + + t.Run(addr.String(), func(t *testing.T) { + instPlan := plan.Changes.ResourceInstance(addr) + if instPlan == nil { + t.Fatalf("no plan for %s at all", addr) + } + + if got, want := instPlan.Addr, addr; !got.Equal(want) { + t.Errorf("wrong current address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.PrevRunAddr, addr; !got.Equal(want) { + t.Errorf("wrong previous run address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.Action, plans.Update; got != want { + t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.ActionReason, plans.ResourceInstanceChangeNoReason; got != want { + t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) + } + if instPlan.Importing.ID != "123" { + t.Errorf("expected import change from \"123\", got non-import change") + } + }) +} + +func TestContext2Plan_importResourceReplace(t *testing.T) { + addr := mustResourceInstanceAddr("test_object.a") + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_object" "a" { + test_string = "bar" +} + +import { + to = test_object.a + id = "123" +} +`, + }) + + p := simpleMockProvider() + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + p.ReadResourceResponse = &providers.ReadResourceResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "test_string": cty.StringVal("foo"), + }), + } + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "test_object", + State: cty.ObjectVal(map[string]cty.Value{ + "test_string": cty.StringVal("foo"), + }), + }, + }, + } + + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + ForceReplace: []addrs.AbsResourceInstance{ + addr, + }, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors\n%s", diags.Err().Error()) + } + + t.Run(addr.String(), func(t *testing.T) { + instPlan := plan.Changes.ResourceInstance(addr) + if instPlan == nil { + t.Fatalf("no plan for %s at all", addr) + } + + if got, want := instPlan.Addr, addr; !got.Equal(want) { + t.Errorf("wrong current address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.PrevRunAddr, addr; !got.Equal(want) { + t.Errorf("wrong previous run address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.Action, plans.DeleteThenCreate; got != want { + t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) + } + if instPlan.Importing.ID != "123" { + t.Errorf("expected import change from \"123\", got non-import change") + } + }) +} + +func TestContext2Plan_importRefreshOnce(t *testing.T) { + addr := mustResourceInstanceAddr("test_object.a") + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_object" "a" { + test_string = "bar" +} + +import { + to = test_object.a + id = "123" +} +`, + }) + + p := simpleMockProvider() + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + readCalled := 0 + p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { + readCalled++ + state, _ := simpleTestSchema().CoerceValue(cty.ObjectVal(map[string]cty.Value{ + "test_string": cty.StringVal("foo"), + })) + + return providers.ReadResourceResponse{ + NewState: state, + } + } + + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "test_object", + State: cty.ObjectVal(map[string]cty.Value{ + "test_string": cty.StringVal("foo"), + }), + }, + }, + } + + _, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + ForceReplace: []addrs.AbsResourceInstance{ + addr, + }, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors\n%s", diags.Err().Error()) + } + + if readCalled > 1 { + t.Error("ReadResource called multiple times for import") + } +} + +func TestContext2Plan_importIdVariable(t *testing.T) { + p := testProvider("aws") + m := testModule(t, "import-id-variable") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "aws_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + }), + }, + }, + } + + _, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + SetVariables: InputValues{ + "the_id": &InputValue{ + // let var take its default value + Value: cty.NilVal, + }, + }, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } +} + +func TestContext2Plan_importIdReference(t *testing.T) { + p := testProvider("aws") + m := testModule(t, "import-id-reference") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "aws_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + }), + }, + }, + } + + _, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + SetVariables: InputValues{ + "the_id": &InputValue{ + // let var take its default value + Value: cty.NilVal, + }, + }, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } +} + +func TestContext2Plan_importIdFunc(t *testing.T) { + p := testProvider("aws") + m := testModule(t, "import-id-func") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "aws_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + }), + }, + }, + } + + _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } +} + +func TestContext2Plan_importIdDataSource(t *testing.T) { + p := testProvider("aws") + m := testModule(t, "import-id-data-source") + + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "aws_subnet": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + }, + }, + }, + DataSources: map[string]*configschema.Block{ + "aws_subnet": { + Attributes: map[string]*configschema.Attribute{ + "vpc_id": { + Type: cty.String, + Required: true, + }, + "cidr_block": { + Type: cty.String, + Computed: true, + }, + "id": { + Type: cty.String, + Computed: true, + }, + }, + }, + }, + }) + p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "vpc_id": cty.StringVal("abc"), + "cidr_block": cty.StringVal("10.0.1.0/24"), + "id": cty.StringVal("123"), + }), + } + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "aws_subnet", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + }), + }, + }, + } + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } +} + +func TestContext2Plan_importIdModule(t *testing.T) { + p := testProvider("aws") + m := testModule(t, "import-id-module") + + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "aws_lb": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + }, + }, + }, + }) + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "aws_lb", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + }), + }, + }, + } + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } +} + +func TestContext2Plan_importIdInvalidNull(t *testing.T) { + p := testProvider("test") + m := testModule(t, "import-id-invalid-null") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + SetVariables: InputValues{ + "the_id": &InputValue{ + Value: cty.NullVal(cty.String), + }, + }, + }) + if !diags.HasErrors() { + t.Fatal("succeeded; want errors") + } + if got, want := diags.Err().Error(), "The import ID cannot be null"; !strings.Contains(got, want) { + t.Fatalf("wrong error:\ngot: %s\nwant: message containing %q", got, want) + } +} + +func TestContext2Plan_importIdInvalidUnknown(t *testing.T) { + p := testProvider("test") + m := testModule(t, "import-id-invalid-unknown") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_resource": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + }, + }, + }, + }) + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: cty.UnknownVal(cty.Object(map[string]cty.Type{ + "id": cty.String, + })), + } + } + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "test_resource", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + }), + }, + }, + } + + _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if !diags.HasErrors() { + t.Fatal("succeeded; want errors") + } + if got, want := diags.Err().Error(), `The import block "id" argument depends on resource attributes that cannot be determined until apply`; !strings.Contains(got, want) { + t.Fatalf("wrong error:\ngot: %s\nwant: message containing %q", got, want) + } +} + +func TestContext2Plan_importIntoModuleWithGeneratedConfig(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +import { + to = test_object.a + id = "123" +} + +import { + to = module.mod.test_object.a + id = "456" +} + +module "mod" { + source = "./mod" +} +`, + "./mod/main.tf": ` +resource "test_object" "a" { + test_string = "bar" +} +`, + }) + + p := simpleMockProvider() + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + p.ReadResourceResponse = &providers.ReadResourceResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "test_string": cty.StringVal("foo"), + }), + } + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "test_object", + State: cty.ObjectVal(map[string]cty.Value{ + "test_string": cty.StringVal("foo"), + }), + }, + }, + } + + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "test_object", + State: cty.ObjectVal(map[string]cty.Value{ + "test_string": cty.StringVal("foo"), + }), + }, + }, + } + + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + GenerateConfigPath: "generated.tf", // Actual value here doesn't matter, as long as it is not empty. + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors\n%s", diags.Err().Error()) + } + + one := mustResourceInstanceAddr("test_object.a") + two := mustResourceInstanceAddr("module.mod.test_object.a") + + onePlan := plan.Changes.ResourceInstance(one) + twoPlan := plan.Changes.ResourceInstance(two) + + // This test is just to make sure things work e2e with modules and generated + // config, so we're not too careful about the actual responses - we're just + // happy nothing panicked. See the other import tests for actual validation + // of responses and the like. + if twoPlan.Action != plans.Update { + t.Errorf("expected nested item to be updated but was %s", twoPlan.Action) + } + + if len(onePlan.GeneratedConfig) == 0 { + t.Errorf("expected root item to generate config but it didn't") + } +} + +func TestContext2Plan_importIntoNonExistentConfiguration(t *testing.T) { + type TestConfiguration struct { + Description string + inlineConfiguration map[string]string + } + configurations := []TestConfiguration{ + { + Description: "Basic missing configuration", + inlineConfiguration: map[string]string{ + "main.tf": ` +import { + to = test_object.a + id = "123" +} +`, + }, + }, + { + Description: "Non-existent module", + inlineConfiguration: map[string]string{ + "main.tf": ` +import { + to = module.mod.test_object.a + id = "456" +} +`, + }, + }, + { + Description: "Wrong module key", + inlineConfiguration: map[string]string{ + "main.tf": ` +import { + to = module.mod["non-existent"].test_object.a + id = "123" +} + +module "mod" { + for_each = { + existent = "1" + } + source = "./mod" +} +`, + "./mod/main.tf": ` +resource "test_object" "a" { + test_string = "bar" +} +`, + }, + }, + { + Description: "Module key without for_each", + inlineConfiguration: map[string]string{ + "main.tf": ` +import { + to = module.mod["non-existent"].test_object.a + id = "123" +} + +module "mod" { + source = "./mod" +} +`, + "./mod/main.tf": ` +resource "test_object" "a" { + test_string = "bar" +} +`, + }, + }, + { + Description: "Non-existent resource key - in module", + inlineConfiguration: map[string]string{ + "main.tf": ` +import { + to = module.mod.test_object.a["non-existent"] + id = "123" +} + +module "mod" { + source = "./mod" +} +`, + "./mod/main.tf": ` +resource "test_object" "a" { + for_each = { + existent = "1" + } + test_string = "bar" +} +`, + }, + }, + { + Description: "Non-existent resource key - in root", + inlineConfiguration: map[string]string{ + "main.tf": ` +import { + to = test_object.a[42] + id = "123" +} + +resource "test_object" "a" { + test_string = "bar" +} +`, + }, + }, + { + Description: "Existent module key, non-existent resource key", + inlineConfiguration: map[string]string{ + "main.tf": ` +import { + to = module.mod["existent"].test_object.b + id = "123" +} + +module "mod" { + for_each = { + existent = "1" + existent_two = "2" + } + source = "./mod" +} +`, + "./mod/main.tf": ` +resource "test_object" "a" { + test_string = "bar" +} +`, + }, + }, + } + + for _, configuration := range configurations { + t.Run(configuration.Description, func(t *testing.T) { + m := testModuleInline(t, configuration.inlineConfiguration) + + p := simpleMockProvider() + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + }) + + if !diags.HasErrors() { + t.Fatalf("expected error") + } + + var errNum int + for _, diag := range diags { + if diag.Severity() == tfdiags.Error { + errNum++ + } + } + if errNum > 1 { + t.Fatalf("expected a single error, but got %d", errNum) + } + + if !strings.Contains(diags.Err().Error(), "Configuration for import target does not exist") { + t.Fatalf("expected error to be \"Configuration for import target does not exist\", but it was %s", diags.Err().Error()) + } + }) + } +} + +func TestContext2Plan_importDuplication(t *testing.T) { + type TestConfiguration struct { + Description string + inlineConfiguration map[string]string + expectedError string + } + configurations := []TestConfiguration{ + { + Description: "Duplication with dynamic address with a variable", + inlineConfiguration: map[string]string{ + "main.tf": ` +resource "test_object" "a" { + count = 2 +} + +variable "address1" { + default = 1 +} + +variable "address2" { + default = 1 +} + +import { + to = test_object.a[var.address1] + id = "123" +} + +import { + to = test_object.a[var.address2] + id = "123" +} +`, + }, + expectedError: "Duplicate import configuration for \"test_object.a[1]\"", + }, + { + Description: "Duplication with dynamic address with a resource reference", + inlineConfiguration: map[string]string{ + "main.tf": ` +resource "test_object" "example" { + test_string = "boop" +} + +resource "test_object" "a" { + for_each = toset(["boop"]) +} + +import { + to = test_object.a[test_object.example.test_string] + id = "123" +} + +import { + to = test_object.a[test_object.example.test_string] + id = "123" +} +`, + }, + expectedError: "Duplicate import configuration for \"test_object.a[\\\"boop\\\"]\"", + }, + } + + for _, configuration := range configurations { + t.Run(configuration.Description, func(t *testing.T) { + m := testModuleInline(t, configuration.inlineConfiguration) + + p := simpleMockProvider() + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + p.ReadResourceResponse = &providers.ReadResourceResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "test_string": cty.StringVal("foo"), + }), + } + + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "test_object", + State: cty.ObjectVal(map[string]cty.Value{ + "test_string": cty.StringVal("foo"), + }), + }, + }, + } + + _, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) + + if !diags.HasErrors() { + t.Fatalf("expected error") + } + + var errNum int + for _, diag := range diags { + if diag.Severity() == tfdiags.Error { + errNum++ + } + } + if errNum > 1 { + t.Fatalf("expected a single error, but got %d", errNum) + } + + if !strings.Contains(diags.Err().Error(), configuration.expectedError) { + t.Fatalf("expected error to be %s, but it was %s", configuration.expectedError, diags.Err().Error()) + } + }) + } +} + +func TestContext2Plan_importResourceConfigGen(t *testing.T) { + addr := mustResourceInstanceAddr("test_object.a") + m := testModuleInline(t, map[string]string{ + "main.tf": ` +import { + to = test_object.a + id = "123" +} +`, + }) + + p := simpleMockProvider() + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + p.ReadResourceResponse = &providers.ReadResourceResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "test_string": cty.StringVal("foo"), + }), + } + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "test_object", + State: cty.ObjectVal(map[string]cty.Value{ + "test_string": cty.StringVal("foo"), + }), + }, + }, + } + + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + GenerateConfigPath: "generated.tf", // Actual value here doesn't matter, as long as it is not empty. + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors\n%s", diags.Err().Error()) + } + + t.Run(addr.String(), func(t *testing.T) { + instPlan := plan.Changes.ResourceInstance(addr) + if instPlan == nil { + t.Fatalf("no plan for %s at all", addr) + } + + if got, want := instPlan.Addr, addr; !got.Equal(want) { + t.Errorf("wrong current address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.PrevRunAddr, addr; !got.Equal(want) { + t.Errorf("wrong previous run address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.Action, plans.NoOp; got != want { + t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.ActionReason, plans.ResourceInstanceChangeNoReason; got != want { + t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) + } + if instPlan.Importing.ID != "123" { + t.Errorf("expected import change from \"123\", got non-import change") + } + + want := `resource "test_object" "a" { + test_bool = null + test_list = null + test_map = null + test_number = null + test_string = "foo" +}` + got := instPlan.GeneratedConfig + if diff := cmp.Diff(want, got); len(diff) > 0 { + t.Errorf("got:\n%s\nwant:\n%s\ndiff:\n%s", got, want, diff) + } + }) +} + +func TestContext2Plan_importResourceConfigGenWithAlias(t *testing.T) { + addr := mustResourceInstanceAddr("test_object.a") + m := testModuleInline(t, map[string]string{ + "main.tf": ` +provider "test" { + alias = "backup" +} + +import { + provider = test.backup + to = test_object.a + id = "123" +} +`, + }) + + p := simpleMockProvider() + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + p.ReadResourceResponse = &providers.ReadResourceResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "test_string": cty.StringVal("foo"), + }), + } + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "test_object", + State: cty.ObjectVal(map[string]cty.Value{ + "test_string": cty.StringVal("foo"), + }), + }, + }, + } + + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + GenerateConfigPath: "generated.tf", // Actual value here doesn't matter, as long as it is not empty. + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors\n%s", diags.Err().Error()) + } + + t.Run(addr.String(), func(t *testing.T) { + instPlan := plan.Changes.ResourceInstance(addr) + if instPlan == nil { + t.Fatalf("no plan for %s at all", addr) + } + + if got, want := instPlan.Addr, addr; !got.Equal(want) { + t.Errorf("wrong current address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.PrevRunAddr, addr; !got.Equal(want) { + t.Errorf("wrong previous run address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.Action, plans.NoOp; got != want { + t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.ActionReason, plans.ResourceInstanceChangeNoReason; got != want { + t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) + } + if instPlan.Importing.ID != "123" { + t.Errorf("expected import change from \"123\", got non-import change") + } + + want := `resource "test_object" "a" { + provider = test.backup + test_bool = null + test_list = null + test_map = null + test_number = null + test_string = "foo" +}` + got := instPlan.GeneratedConfig + if diff := cmp.Diff(want, got); len(diff) > 0 { + t.Errorf("got:\n%s\nwant:\n%s\ndiff:\n%s", got, want, diff) + } + }) +} + +func TestContext2Plan_importResourceConfigGenValidation(t *testing.T) { + type TestConfiguration struct { + Description string + inlineConfiguration map[string]string + expectedError string + } + configurations := []TestConfiguration{ + { + Description: "Resource with index", + inlineConfiguration: map[string]string{ + "main.tf": ` +import { + to = test_object.a[0] + id = "123" +} +`, + }, + expectedError: "Configuration generation for count and for_each resources not supported", + }, + { + Description: "Resource with dynamic index", + inlineConfiguration: map[string]string{ + "main.tf": ` +locals { + loc = "something" +} + +import { + to = test_object.a[local.loc] + id = "123" +} +`, + }, + expectedError: "Configuration generation for count and for_each resources not supported", + }, + { + Description: "Resource in module", + inlineConfiguration: map[string]string{ + "main.tf": ` +import { + to = module.mod.test_object.b + id = "456" +} + +module "mod" { + source = "./mod" +} + + +`, + "./mod/main.tf": ` +resource "test_object" "a" { + test_string = "bar" +} +`, + }, + expectedError: "Cannot generate configuration for resource inside sub-module", + }, + { + Description: "Resource in non-existent module", + inlineConfiguration: map[string]string{ + "main.tf": ` +import { + to = module.mod.test_object.a + id = "456" +} +`, + }, + expectedError: "Cannot generate configuration for resource inside sub-module", + }, + { + Description: "Wrong module key", + inlineConfiguration: map[string]string{ + "main.tf": ` +import { + to = module.mod["non-existent"].test_object.a + id = "123" +} + +module "mod" { + for_each = { + existent = "1" + } + source = "./mod" +} +`, + "./mod/main.tf": ` +resource "test_object" "a" { + test_string = "bar" +} +`, + }, + expectedError: "Configuration for import target does not exist", + }, + { + Description: "In module with module key", + inlineConfiguration: map[string]string{ + "main.tf": ` +import { + to = module.mod["existent"].test_object.b + id = "123" +} + +module "mod" { + for_each = { + existent = "1" + } + source = "./mod" +} +`, + "./mod/main.tf": ` +resource "test_object" "a" { + test_string = "bar" +} +`, + }, + expectedError: "Cannot generate configuration for resource inside sub-module", + }, + { + Description: "Module key without for_each", + inlineConfiguration: map[string]string{ + "main.tf": ` +import { + to = module.mod["non-existent"].test_object.a + id = "123" +} + +module "mod" { + source = "./mod" +} +`, + "./mod/main.tf": ` +resource "test_object" "a" { + test_string = "bar" +} +`, + }, + expectedError: "Configuration for import target does not exist", + }, + { + Description: "Non-existent resource key - in module", + inlineConfiguration: map[string]string{ + "main.tf": ` +import { + to = module.mod.test_object.a["non-existent"] + id = "123" +} + +module "mod" { + source = "./mod" +} +`, + "./mod/main.tf": ` +resource "test_object" "a" { + for_each = { + existent = "1" + } + test_string = "bar" +} +`, + }, + expectedError: "Configuration for import target does not exist", + }, + { + Description: "Non-existent resource key - in root", + inlineConfiguration: map[string]string{ + "main.tf": ` +import { + to = test_object.a[42] + id = "123" +} + +resource "test_object" "a" { + test_string = "bar" +} +`, + }, + expectedError: "Configuration for import target does not exist", + }, + { + Description: "Existent module key, non-existent resource key", + inlineConfiguration: map[string]string{ + "main.tf": ` +import { + to = module.mod["existent"].test_object.b + id = "123" +} + +module "mod" { + for_each = { + existent = "1" + existent_two = "2" + } + source = "./mod" +} +`, + "./mod/main.tf": ` +resource "test_object" "a" { + test_string = "bar" +} +`, + }, + expectedError: "Cannot generate configuration for resource inside sub-module", + }, + } + + for _, configuration := range configurations { + t.Run(configuration.Description, func(t *testing.T) { + m := testModuleInline(t, configuration.inlineConfiguration) + + p := simpleMockProvider() + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + GenerateConfigPath: "generated.tf", + }) + + if !diags.HasErrors() { + t.Fatalf("expected error") + } + + var errNum int + for _, diag := range diags { + if diag.Severity() == tfdiags.Error { + errNum++ + } + } + if errNum > 1 { + t.Fatalf("expected a single error, but got %d", errNum) + } + + if !strings.Contains(diags.Err().Error(), configuration.expectedError) { + t.Fatalf("expected error to be %s, but it was %s", configuration.expectedError, diags.Err().Error()) + } + }) + } +} + +func TestContext2Plan_importResourceConfigGenExpandedResource(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +import { + to = test_object.a[0] + id = "123" +} +`, + }) + + p := simpleMockProvider() + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + p.ReadResourceResponse = &providers.ReadResourceResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "test_string": cty.StringVal("foo"), + }), + } + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "test_object", + State: cty.ObjectVal(map[string]cty.Value{ + "test_string": cty.StringVal("foo"), + }), + }, + }, + } + + _, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + GenerateConfigPath: "generated.tf", + }) + if !diags.HasErrors() { + t.Fatalf("expected plan to error, but it did not") + } + if !strings.Contains(diags.Err().Error(), "Configuration generation for count and for_each resources not supported") { + t.Fatalf("expected error to be \"Config generation for count and for_each resources not supported\", but it is %s", diags.Err().Error()) + } +} + +// config generation still succeeds even when planning fails +func TestContext2Plan_importResourceConfigGenWithError(t *testing.T) { + addr := mustResourceInstanceAddr("test_object.a") + m := testModuleInline(t, map[string]string{ + "main.tf": ` +import { + to = test_object.a + id = "123" +} +`, + }) + + p := simpleMockProvider() + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + p.PlanResourceChangeResponse = &providers.PlanResourceChangeResponse{ + PlannedState: cty.NullVal(cty.DynamicPseudoType), + Diagnostics: tfdiags.Diagnostics(nil).Append(errors.New("plan failed")), + } + p.ReadResourceResponse = &providers.ReadResourceResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "test_string": cty.StringVal("foo"), + }), + } + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "test_object", + State: cty.ObjectVal(map[string]cty.Value{ + "test_string": cty.StringVal("foo"), + }), + }, + }, + } + + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + GenerateConfigPath: "generated.tf", // Actual value here doesn't matter, as long as it is not empty. + }) + if !diags.HasErrors() { + t.Fatal("expected error") + } + + instPlan := plan.Changes.ResourceInstance(addr) + if instPlan == nil { + t.Fatalf("no plan for %s at all", addr) + } + + want := `resource "test_object" "a" { + test_bool = null + test_list = null + test_map = null + test_number = null + test_string = "foo" +}` + got := instPlan.GeneratedConfig + if diff := cmp.Diff(want, got); len(diff) > 0 { + t.Errorf("got:\n%s\nwant:\n%s\ndiff:\n%s", got, want, diff) + } +} + +func TestContext2Plan_plannedState(t *testing.T) { + addr := mustResourceInstanceAddr("test_object.a") + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_object" "a" { + test_string = "foo" +} + +locals { + local_value = test_object.a.test_string +} +`, + }) + + p := simpleMockProvider() + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + state := states.NewState() + plan, diags := ctx.Plan(m, state, nil) + if diags.HasErrors() { + t.Errorf("expected no errors, but got %s", diags) + } + + module := state.RootModule() + + // So, the original state shouldn't have been updated at all. + if len(module.LocalValues) > 0 { + t.Errorf("expected no local values in the state but found %d", len(module.LocalValues)) + } + + if len(module.Resources) > 0 { + t.Errorf("expected no resources in the state but found %d", len(module.LocalValues)) + } + + // But, this makes it hard for the testing framework to valid things about + // the returned plan. So, the plan contains the planned state: + module = plan.PlannedState.RootModule() + + if module.LocalValues["local_value"].AsString() != "foo" { + t.Errorf("expected local value to be \"foo\" but was \"%s\"", module.LocalValues["local_value"].AsString()) + } + + if module.ResourceInstance(addr.Resource).Current.Status != states.ObjectPlanned { + t.Errorf("expected resource to be in planned state") + } +} + +func TestContext2Plan_removedResourceBasic(t *testing.T) { + desposedKey := states.DeposedKey("deposed") + addr := mustResourceInstanceAddr("test_object.a") + m := testModuleInline(t, map[string]string{ + "main.tf": ` + removed { + from = test_object.a + } + `, + }) + + state := states.BuildState(func(s *states.SyncState) { + // The prior state tracks test_object.a, which we should be + // removed from the state by the "removed" block in the config. + s.SetResourceInstanceCurrent(addr, &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`)) + s.SetResourceInstanceDeposed( + mustResourceInstanceAddr(addr.String()), + desposedKey, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectTainted, + AttrsJSON: []byte(`{"test_string":"old"}`), + Dependencies: []addrs.ConfigResource{}, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + }) + + p := simpleMockProvider() + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + ForceReplace: []addrs.AbsResourceInstance{ + addr, + }, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors\n%s", diags.Err().Error()) + } + + for _, test := range []struct { + deposedKey states.DeposedKey + wantReason plans.ResourceInstanceChangeActionReason + }{{desposedKey, plans.ResourceInstanceChangeNoReason}, {states.NotDeposed, plans.ResourceInstanceDeleteBecauseNoResourceConfig}} { + t.Run(addr.String(), func(t *testing.T) { + var instPlan *plans.ResourceInstanceChangeSrc + + if test.deposedKey == states.NotDeposed { + instPlan = plan.Changes.ResourceInstance(addr) + } else { + instPlan = plan.Changes.ResourceInstanceDeposed(addr, test.deposedKey) + } + + if instPlan == nil { + t.Fatalf("no plan for %s at all", addr) + } + + if got, want := instPlan.Addr, addr; !got.Equal(want) { + t.Errorf("wrong current address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.PrevRunAddr, addr; !got.Equal(want) { + t.Errorf("wrong previous run address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.Action, plans.Forget; got != want { + t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.ActionReason, test.wantReason; got != want { + t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) + } + }) + } +} + +func TestContext2Plan_removedModuleBasic(t *testing.T) { + desposedKey := states.DeposedKey("deposed") + addr := mustResourceInstanceAddr("module.mod.test_object.a") + m := testModuleInline(t, map[string]string{ + "main.tf": ` + removed { + from = module.mod + } + `, + }) + + state := states.BuildState(func(s *states.SyncState) { + // The prior state tracks module.mod.test_object.a, which should be + // removed from the state by the module's "removed" block in the root module config. + s.SetResourceInstanceCurrent(addr, &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`)) + s.SetResourceInstanceDeposed( + mustResourceInstanceAddr(addr.String()), + desposedKey, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectTainted, + AttrsJSON: []byte(`{"test_string":"old"}`), + Dependencies: []addrs.ConfigResource{}, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + }) + + p := simpleMockProvider() + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + ForceReplace: []addrs.AbsResourceInstance{ + addr, + }, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors\n%s", diags.Err().Error()) + } + + for _, test := range []struct { + deposedKey states.DeposedKey + wantReason plans.ResourceInstanceChangeActionReason + }{{desposedKey, plans.ResourceInstanceChangeNoReason}, {states.NotDeposed, plans.ResourceInstanceDeleteBecauseNoResourceConfig}} { + t.Run(addr.String(), func(t *testing.T) { + var instPlan *plans.ResourceInstanceChangeSrc + + if test.deposedKey == states.NotDeposed { + instPlan = plan.Changes.ResourceInstance(addr) + } else { + instPlan = plan.Changes.ResourceInstanceDeposed(addr, test.deposedKey) + } + + if instPlan == nil { + t.Fatalf("no plan for %s at all", addr) + } + + if got, want := instPlan.Addr, addr; !got.Equal(want) { + t.Errorf("wrong current address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.PrevRunAddr, addr; !got.Equal(want) { + t.Errorf("wrong previous run address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.Action, plans.Forget; got != want { + t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.ActionReason, test.wantReason; got != want { + t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) + } + }) + } +} + +func TestContext2Plan_removedModuleForgetsAllInstances(t *testing.T) { + addrFirst := mustResourceInstanceAddr("module.mod[0].test_object.a") + addrSecond := mustResourceInstanceAddr("module.mod[1].test_object.a") + + m := testModuleInline(t, map[string]string{ + "main.tf": ` + removed { + from = module.mod + } + `, + }) + + state := states.BuildState(func(s *states.SyncState) { + // The prior state tracks module.mod[0].test_object.a and + // module.mod[1].test_object.a, which we should be removed + // from the state by the "removed" block in the config. + s.SetResourceInstanceCurrent(addrFirst, &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`)) + s.SetResourceInstanceCurrent(addrSecond, &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`)) + }) + + p := simpleMockProvider() + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + ForceReplace: []addrs.AbsResourceInstance{ + addrFirst, addrSecond, + }, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors\n%s", diags.Err().Error()) + } + + for _, resourceInstance := range []addrs.AbsResourceInstance{addrFirst, addrSecond} { + t.Run(resourceInstance.String(), func(t *testing.T) { + instPlan := plan.Changes.ResourceInstance(resourceInstance) + if instPlan == nil { + t.Fatalf("no plan for %s at all", resourceInstance) + } + + if got, want := instPlan.Addr, resourceInstance; !got.Equal(want) { + t.Errorf("wrong current address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.PrevRunAddr, resourceInstance; !got.Equal(want) { + t.Errorf("wrong previous run address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.Action, plans.Forget; got != want { + t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.ActionReason, plans.ResourceInstanceDeleteBecauseNoResourceConfig; got != want { + t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) + } + }) + } +} + +func TestContext2Plan_removedResourceForgetsAllInstances(t *testing.T) { + addrFirst := mustResourceInstanceAddr("test_object.a[0]") + addrSecond := mustResourceInstanceAddr("test_object.a[1]") + + m := testModuleInline(t, map[string]string{ + "main.tf": ` + removed { + from = test_object.a + } + `, + }) + + state := states.BuildState(func(s *states.SyncState) { + // The prior state tracks test_object.a[0] and + // test_object.a[1], which we should be removed from + // the state by the "removed" block in the config. + s.SetResourceInstanceCurrent(addrFirst, &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`)) + s.SetResourceInstanceCurrent(addrSecond, &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`)) + }) + + p := simpleMockProvider() + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + ForceReplace: []addrs.AbsResourceInstance{ + addrFirst, addrSecond, + }, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors\n%s", diags.Err().Error()) + } + + for _, resourceInstance := range []addrs.AbsResourceInstance{addrFirst, addrSecond} { + t.Run(resourceInstance.String(), func(t *testing.T) { + instPlan := plan.Changes.ResourceInstance(resourceInstance) + if instPlan == nil { + t.Fatalf("no plan for %s at all", resourceInstance) + } + + if got, want := instPlan.Addr, resourceInstance; !got.Equal(want) { + t.Errorf("wrong current address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.PrevRunAddr, resourceInstance; !got.Equal(want) { + t.Errorf("wrong previous run address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.Action, plans.Forget; got != want { + t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.ActionReason, plans.ResourceInstanceDeleteBecauseNoResourceConfig; got != want { + t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) + } + }) + } +} + +func TestContext2Plan_removedResourceInChildModuleFromParentModule(t *testing.T) { + addr := mustResourceInstanceAddr("module.mod.test_object.a") + m := testModuleInline(t, map[string]string{ + "main.tf": ` + module "mod" { + source = "./mod" + } + + removed { + from = module.mod.test_object.a + } + `, + "mod/main.tf": ``, + }) + + state := states.BuildState(func(s *states.SyncState) { + // The prior state tracks module.mod.test_object.a.a, which we should be + // removed from the state by the "removed" block in the root config. + s.SetResourceInstanceCurrent(addr, &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`)) + }) + + p := simpleMockProvider() + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + ForceReplace: []addrs.AbsResourceInstance{ + addr, + }, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors\n%s", diags.Err().Error()) + } + + t.Run(addr.String(), func(t *testing.T) { + instPlan := plan.Changes.ResourceInstance(addr) + if instPlan == nil { + t.Fatalf("no plan for %s at all", addr) + } + + if got, want := instPlan.Addr, addr; !got.Equal(want) { + t.Errorf("wrong current address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.PrevRunAddr, addr; !got.Equal(want) { + t.Errorf("wrong previous run address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.Action, plans.Forget; got != want { + t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.ActionReason, plans.ResourceInstanceDeleteBecauseNoResourceConfig; got != want { + t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) + } + }) +} + +func TestContext2Plan_removedResourceInChildModuleFromChildModule(t *testing.T) { + addr := mustResourceInstanceAddr("module.mod.test_object.a") + m := testModuleInline(t, map[string]string{ + "main.tf": ` + module "mod" { + source = "./mod" + } + `, + "mod/main.tf": ` + removed { + from = test_object.a + } + `, + }) + + state := states.BuildState(func(s *states.SyncState) { + // The prior state tracks module.mod.test_object.a.a, which we should be + // removed from the state by the "removed" block in the child mofule config. + s.SetResourceInstanceCurrent(addr, &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`)) + }) + + p := simpleMockProvider() + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + ForceReplace: []addrs.AbsResourceInstance{ + addr, + }, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors\n%s", diags.Err().Error()) + } + + t.Run(addr.String(), func(t *testing.T) { + instPlan := plan.Changes.ResourceInstance(addr) + if instPlan == nil { + t.Fatalf("no plan for %s at all", addr) + } + + if got, want := instPlan.Addr, addr; !got.Equal(want) { + t.Errorf("wrong current address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.PrevRunAddr, addr; !got.Equal(want) { + t.Errorf("wrong previous run address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.Action, plans.Forget; got != want { + t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.ActionReason, plans.ResourceInstanceDeleteBecauseNoResourceConfig; got != want { + t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) + } + }) +} + +func TestContext2Plan_removedResourceInGrandchildModuleFromRootModule(t *testing.T) { + addr := mustResourceInstanceAddr("module.child.module.grandchild.test_object.a") + m := testModuleInline(t, map[string]string{ + "main.tf": ` + module "child" { + source = "./child" + } + + removed { + from = module.child.module.grandchild.test_object.a + } + `, + "child/main.tf": ``, + }) + + state := states.BuildState(func(s *states.SyncState) { + // The prior state tracks module.child.module.grandchild.test_object.a, + // which we should be removed from the state by the "removed" block in + // the root config. + s.SetResourceInstanceCurrent(addr, &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`)) + }) + + p := simpleMockProvider() + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + ForceReplace: []addrs.AbsResourceInstance{ + addr, + }, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors\n%s", diags.Err().Error()) + } + + t.Run(addr.String(), func(t *testing.T) { + instPlan := plan.Changes.ResourceInstance(addr) + if instPlan == nil { + t.Fatalf("no plan for %s at all", addr) + } + + if got, want := instPlan.Addr, addr; !got.Equal(want) { + t.Errorf("wrong current address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.PrevRunAddr, addr; !got.Equal(want) { + t.Errorf("wrong previous run address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.Action, plans.Forget; got != want { + t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.ActionReason, plans.ResourceInstanceDeleteBecauseNoResourceConfig; got != want { + t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) + } + }) +} + +func TestContext2Plan_removedChildModuleForgetsResourceInGrandchildModule(t *testing.T) { + addr := mustResourceInstanceAddr("module.child.module.grandchild.test_object.a") + m := testModuleInline(t, map[string]string{ + "main.tf": ` + module "child" { + source = "./child" + } + + removed { + from = module.child.module.grandchild + } + `, + "child/main.tf": ``, + }) + + state := states.BuildState(func(s *states.SyncState) { + // The prior state tracks module.child.module.grandchild.test_object.a, + // which we should be removed from the state by the "removed" block + // in the root config. + s.SetResourceInstanceCurrent(addr, &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`)) + }) + + p := simpleMockProvider() + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + ForceReplace: []addrs.AbsResourceInstance{ + addr, + }, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors\n%s", diags.Err().Error()) + } + + t.Run(addr.String(), func(t *testing.T) { + instPlan := plan.Changes.ResourceInstance(addr) + if instPlan == nil { + t.Fatalf("no plan for %s at all", addr) + } + + if got, want := instPlan.Addr, addr; !got.Equal(want) { + t.Errorf("wrong current address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.PrevRunAddr, addr; !got.Equal(want) { + t.Errorf("wrong previous run address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.Action, plans.Forget; got != want { + t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.ActionReason, plans.ResourceInstanceDeleteBecauseNoResourceConfig; got != want { + t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) + } + }) +} + +func TestContext2Plan_movedAndRemovedResourceAtTheSameTime(t *testing.T) { + // This is the only scenario where the "moved" and "removed" blocks can + // coexist while referencing the same resource. In this case, the "moved" logic + // will run first, trying to move the resource to a non-existing target. + // Usually ,it will cause the resource to be destroyed, but because the + // "removed" block is also present, it will be removed from the state instead. + addrA := mustResourceInstanceAddr("test_object.a") + addrB := mustResourceInstanceAddr("test_object.b") + m := testModuleInline(t, map[string]string{ + "main.tf": ` + removed { + from = test_object.b + } + + moved { + from = test_object.a + to = test_object.b + } + `, + }) + + state := states.BuildState(func(s *states.SyncState) { + // The prior state tracks test_object.a, which we should treat as + // test_object.b because of the "moved" block in the config. + s.SetResourceInstanceCurrent(addrA, &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`)) + }) + + p := simpleMockProvider() + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + ForceReplace: []addrs.AbsResourceInstance{ + addrA, + }, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors\n%s", diags.Err().Error()) + } + + t.Run(addrA.String(), func(t *testing.T) { + instPlan := plan.Changes.ResourceInstance(addrA) + if instPlan != nil { + t.Fatalf("unexpected plan for %s; should've moved to %s", addrA, addrB) + } + }) + t.Run(addrB.String(), func(t *testing.T) { + instPlan := plan.Changes.ResourceInstance(addrB) + if instPlan == nil { + t.Fatalf("no plan for %s at all", addrB) + } + + if got, want := instPlan.Addr, addrB; !got.Equal(want) { + t.Errorf("wrong current address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.PrevRunAddr, addrA; !got.Equal(want) { + t.Errorf("wrong previous run address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.Action, plans.Forget; got != want { + t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.ActionReason, plans.ResourceInstanceDeleteBecauseNoMoveTarget; got != want { + t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) + } + }) +} + +func TestContext2Plan_removedResourceButResourceBlockStillExists(t *testing.T) { + addr := mustResourceInstanceAddr("test_object.a") + m := testModuleInline(t, map[string]string{ + "main.tf": ` + resource "test_object" "a" { + test_string = "foo" + } + + removed { + from = test_object.a + } + `, + }) + + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent(addr, &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`)) + }) + + p := simpleMockProvider() + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + ForceReplace: []addrs.AbsResourceInstance{ + addr, + }, + }) + + if !diags.HasErrors() { + t.Fatal("succeeded; want errors") + } + + if got, want := diags.Err().Error(), "Removed resource block still exists"; !strings.Contains(got, want) { + t.Fatalf("wrong error:\ngot: %s\nwant: message containing %q", got, want) + } +} + +func TestContext2Plan_removedResourceButResourceBlockStillExistsInChildModule(t *testing.T) { + addr := mustResourceInstanceAddr("module.mod.test_object.a") + m := testModuleInline(t, map[string]string{ + "main.tf": ` + module "mod" { + source = "./mod" + } + + removed { + from = module.mod.test_object.a + } + `, + "mod/main.tf": ` + resource "test_object" "a" { + test_string = "foo" + } + `, + }) + + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent(addr, &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`)) + }) + + p := simpleMockProvider() + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + ForceReplace: []addrs.AbsResourceInstance{ + addr, + }, + }) + + if !diags.HasErrors() { + t.Fatal("succeeded; want errors") + } + + if got, want := diags.Err().Error(), "Removed resource block still exists"; !strings.Contains(got, want) { + t.Fatalf("wrong error:\ngot: %s\nwant: message containing %q", got, want) + } +} + +func TestContext2Plan_removedModuleButModuleBlockStillExists(t *testing.T) { + addr := mustResourceInstanceAddr("module.mod.test_object.a") + m := testModuleInline(t, map[string]string{ + "main.tf": ` + module "mod" { + source = "./mod" + } + + removed { + from = module.mod + } + `, + "mod/main.tf": ` + resource "test_object" "a" { + test_string = "foo" + } + `, + }) + + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent(addr, &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`)) + }) + + p := simpleMockProvider() + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + ForceReplace: []addrs.AbsResourceInstance{ + addr, + }, + }) + + if !diags.HasErrors() { + t.Fatal("succeeded; want errors") + } + + if got, want := diags.Err().Error(), "Removed module block still exists"; !strings.Contains(got, want) { + t.Fatalf("wrong error:\ngot: %s\nwant: message containing %q", got, want) + } +} + +func TestContext2Plan_importResourceWithSensitiveDataSource(t *testing.T) { + addr := mustResourceInstanceAddr("test_object.b") + m := testModuleInline(t, map[string]string{ + "main.tf": ` + data "test_data_source" "a" { + } + resource "test_object" "b" { + test_string = data.test_data_source.a.test_string + } + import { + to = test_object.b + id = "123" + } + `, + }) + + p := &MockProvider{ + GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{Block: simpleTestSchema()}, + ResourceTypes: map[string]providers.Schema{ + "test_object": {Block: simpleTestSchema()}, + }, + DataSources: map[string]providers.Schema{ + "test_data_source": {Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "test_string": { + Type: cty.String, + Computed: true, + Sensitive: true, + }, + }, + }}, + }, + }, + } + hook := new(MockHook) + ctx := testContext2(t, &ContextOpts{ + Hooks: []Hook{hook}, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "test_string": cty.StringVal("foo"), + }), + } + + p.ReadResourceResponse = &providers.ReadResourceResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "test_string": cty.StringVal("foo"), + }), + } + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "test_object", + State: cty.ObjectVal(map[string]cty.Value{ + "test_string": cty.StringVal("foo"), + }), + }, + }, + } + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors\n%s", diags.Err().Error()) + } + + t.Run(addr.String(), func(t *testing.T) { + instPlan := plan.Changes.ResourceInstance(addr) + if instPlan == nil { + t.Fatalf("no plan for %s at all", addr) + } + + if got, want := instPlan.Addr, addr; !got.Equal(want) { + t.Errorf("wrong current address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.PrevRunAddr, addr; !got.Equal(want) { + t.Errorf("wrong previous run address\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.Action, plans.NoOp; got != want { + t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) + } + if got, want := instPlan.ActionReason, plans.ResourceInstanceChangeNoReason; got != want { + t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) + } + if instPlan.Importing.ID != "123" { + t.Errorf("expected import change from \"123\", got non-import change") + } + + if !hook.PrePlanImportCalled { + t.Fatalf("PostPlanImport hook not called") + } + if addr, wantAddr := hook.PrePlanImportAddr, instPlan.Addr; !addr.Equal(wantAddr) { + t.Errorf("expected addr to be %s, but was %s", wantAddr, addr) + } + + if !hook.PostPlanImportCalled { + t.Fatalf("PostPlanImport hook not called") + } + if addr, wantAddr := hook.PostPlanImportAddr, instPlan.Addr; !addr.Equal(wantAddr) { + t.Errorf("expected addr to be %s, but was %s", wantAddr, addr) + } + }) +} + +func TestContext2Plan_insuffient_block(t *testing.T) { + type testcase struct { + filename string + start hcl.Pos + end hcl.Pos + } + + tests := map[string]testcase{ + "insufficient-features-blocks-aliased-provider": { + filename: "provider[\"registry.opentofu.org/hashicorp/test\"] with no configuration", + start: hcl.InitialPos, + end: hcl.InitialPos, + }, + "insufficient-features-blocks-nested_module": { + filename: "provider[\"registry.opentofu.org/hashicorp/test\"] with no configuration", + start: hcl.InitialPos, + end: hcl.InitialPos, + }, + "insufficient-features-blocks-no-feats": { + filename: "testdata/insufficient-features-blocks-no-feats/main.tf", + start: hcl.Pos{Line: 9, Column: 17, Byte: 146}, + end: hcl.Pos{Line: 9, Column: 17, Byte: 146}, + }, + } + + for testName, tc := range tests { + t.Run(testName, func(t *testing.T) { + m := testModule(t, testName) + p := mockProviderWithFeaturesBlock() + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + var expectedDiags tfdiags.Diagnostics + + expectedDiags = expectedDiags.Append( + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Insufficient features blocks", + Detail: "At least 1 \"features\" blocks are required.", + Subject: &hcl.Range{ + Filename: tc.filename, + Start: tc.start, + End: tc.end, + }, + }, + ) + + assertDiagnosticsMatch(t, diags, expectedDiags) + }) + } +} + +func mockProviderWithFeaturesBlock() *MockProvider { + return &MockProvider{ + GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{Block: featuresBlockTestSchema()}, + ResourceTypes: map[string]providers.Schema{ + "test_object": {Block: simpleTestSchema()}, + }, + }, + } +} + +func featuresBlockTestSchema() *configschema.Block { + return &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "test_string": { + Type: cty.String, + Optional: true, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "features": { + MinItems: 1, + MaxItems: 1, + Nesting: configschema.NestingList, + }, + }, + } +} diff --git a/pkg/tofu/context_plan_test.go b/pkg/tofu/context_plan_test.go new file mode 100644 index 00000000000..3cdfea98fcc --- /dev/null +++ b/pkg/tofu/context_plan_test.go @@ -0,0 +1,6949 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "bytes" + "errors" + "fmt" + "os" + "path/filepath" + "reflect" + "sort" + "strings" + "sync" + "sync/atomic" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/google/go-cmp/cmp" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/configs/hcl2shim" + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/provisioners" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +func TestContext2Plan_basic(t *testing.T) { + m := testModule(t, "plan-good") + p := testProvider("aws") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + if l := len(plan.Changes.Resources); l < 2 { + t.Fatalf("wrong number of resources %d; want fewer than two\n%s", l, spew.Sdump(plan.Changes.Resources)) + } + + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + for _, r := range plan.Changes.Resources { + ric, err := r.Decode(ty) + if err != nil { + t.Fatal(err) + } + + switch i := ric.Addr.String(); i { + case "aws_instance.bar": + foo := ric.After.GetAttr("foo").AsString() + if foo != "2" { + t.Fatalf("incorrect plan for 'bar': %#v", ric.After) + } + case "aws_instance.foo": + num, _ := ric.After.GetAttr("num").AsBigFloat().Int64() + if num != 2 { + t.Fatalf("incorrect plan for 'foo': %#v", ric.After) + } + default: + t.Fatal("unknown instance:", i) + } + } + + if !p.ValidateProviderConfigCalled { + t.Fatal("provider config was not checked before Configure") + } + +} + +func TestContext2Plan_createBefore_deposed(t *testing.T) { + m := testModule(t, "plan-cbd") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"baz","type":"aws_instance"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceDeposed( + mustResourceInstanceAddr("aws_instance.foo").Resource, + states.DeposedKey("00000001"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + // the state should still show one deposed + expectedState := strings.TrimSpace(` + aws_instance.foo: (1 deposed) + ID = baz + provider = provider["registry.opentofu.org/hashicorp/aws"] + type = aws_instance + Deposed ID 1 = foo`) + + if plan.PriorState.String() != expectedState { + t.Fatalf("\nexpected: %q\ngot: %q\n", expectedState, plan.PriorState.String()) + } + + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + type InstanceGen struct { + Addr string + DeposedKey states.DeposedKey + } + want := map[InstanceGen]bool{ + { + Addr: "aws_instance.foo", + }: true, + { + Addr: "aws_instance.foo", + DeposedKey: states.DeposedKey("00000001"), + }: true, + } + got := make(map[InstanceGen]bool) + changes := make(map[InstanceGen]*plans.ResourceInstanceChangeSrc) + + for _, change := range plan.Changes.Resources { + k := InstanceGen{ + Addr: change.Addr.String(), + DeposedKey: change.DeposedKey, + } + got[k] = true + changes[k] = change + } + if !reflect.DeepEqual(got, want) { + t.Fatalf("wrong resource instance object changes in plan\ngot: %s\nwant: %s", spew.Sdump(got), spew.Sdump(want)) + } + + { + ric, err := changes[InstanceGen{Addr: "aws_instance.foo"}].Decode(ty) + if err != nil { + t.Fatal(err) + } + + if got, want := ric.Action, plans.NoOp; got != want { + t.Errorf("current object change action is %s; want %s", got, want) + } + + // the existing instance should only have an unchanged id + expected, err := schema.CoerceValue(cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("baz"), + "type": cty.StringVal("aws_instance"), + })) + if err != nil { + t.Fatal(err) + } + + checkVals(t, expected, ric.After) + } + + { + ric, err := changes[InstanceGen{Addr: "aws_instance.foo", DeposedKey: states.DeposedKey("00000001")}].Decode(ty) + if err != nil { + t.Fatal(err) + } + + if got, want := ric.Action, plans.Delete; got != want { + t.Errorf("deposed object change action is %s; want %s", got, want) + } + } +} + +func TestContext2Plan_createBefore_maintainRoot(t *testing.T) { + m := testModule(t, "plan-cbd-maintain-root") + p := testProvider("aws") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + if !plan.PriorState.Empty() { + t.Fatal("expected empty prior state, got:", plan.PriorState) + } + + if len(plan.Changes.Resources) != 4 { + t.Error("expected 4 resource in plan, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + // these should all be creates + if res.Action != plans.Create { + t.Fatalf("unexpected action %s for %s", res.Action, res.Addr.String()) + } + } +} + +func TestContext2Plan_emptyDiff(t *testing.T) { + m := testModule(t, "plan-empty") + p := testProvider("aws") + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + resp.PlannedState = req.ProposedNewState + return resp + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + if !plan.PriorState.Empty() { + t.Fatal("expected empty state, got:", plan.PriorState) + } + + if len(plan.Changes.Resources) != 2 { + t.Error("expected 2 resource in plan, got", len(plan.Changes.Resources)) + } + + actions := map[string]plans.Action{} + + for _, res := range plan.Changes.Resources { + actions[res.Addr.String()] = res.Action + } + + expected := map[string]plans.Action{ + "aws_instance.foo": plans.Create, + "aws_instance.bar": plans.Create, + } + if !cmp.Equal(expected, actions) { + t.Fatal(cmp.Diff(expected, actions)) + } +} + +func TestContext2Plan_escapedVar(t *testing.T) { + m := testModule(t, "plan-escaped-var") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + if len(plan.Changes.Resources) != 1 { + t.Error("expected 1 resource in plan, got", len(plan.Changes.Resources)) + } + + res := plan.Changes.Resources[0] + if res.Action != plans.Create { + t.Fatalf("expected resource creation, got %s", res.Action) + } + + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + expected := objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "foo": cty.StringVal("bar-${baz}"), + "type": cty.UnknownVal(cty.String), + }) + + checkVals(t, expected, ric.After) +} + +func TestContext2Plan_minimal(t *testing.T) { + m := testModule(t, "plan-empty") + p := testProvider("aws") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + if !plan.PriorState.Empty() { + t.Fatal("expected empty state, got:", plan.PriorState) + } + + if len(plan.Changes.Resources) != 2 { + t.Error("expected 2 resource in plan, got", len(plan.Changes.Resources)) + } + + actions := map[string]plans.Action{} + + for _, res := range plan.Changes.Resources { + actions[res.Addr.String()] = res.Action + } + + expected := map[string]plans.Action{ + "aws_instance.foo": plans.Create, + "aws_instance.bar": plans.Create, + } + if !cmp.Equal(expected, actions) { + t.Fatal(cmp.Diff(expected, actions)) + } +} + +func TestContext2Plan_modules(t *testing.T) { + m := testModule(t, "plan-modules") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + if len(plan.Changes.Resources) != 3 { + t.Error("expected 3 resource in plan, got", len(plan.Changes.Resources)) + } + + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + expectFoo := objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "foo": cty.StringVal("2"), + "type": cty.UnknownVal(cty.String), + }) + + expectNum := objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "num": cty.NumberIntVal(2), + "type": cty.UnknownVal(cty.String), + }) + + for _, res := range plan.Changes.Resources { + if res.Action != plans.Create { + t.Fatalf("expected resource creation, got %s", res.Action) + } + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + var expected cty.Value + switch i := ric.Addr.String(); i { + case "aws_instance.bar": + expected = expectFoo + case "aws_instance.foo": + expected = expectNum + case "module.child.aws_instance.foo": + expected = expectNum + default: + t.Fatal("unknown instance:", i) + } + + checkVals(t, expected, ric.After) + } +} +func TestContext2Plan_moduleExpand(t *testing.T) { + // Test a smattering of plan expansion behavior + m := testModule(t, "plan-modules-expand") + p := testProvider("aws") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + expected := map[string]struct{}{ + `aws_instance.foo["a"]`: {}, + `module.count_child[1].aws_instance.foo[0]`: {}, + `module.count_child[1].aws_instance.foo[1]`: {}, + `module.count_child[0].aws_instance.foo[0]`: {}, + `module.count_child[0].aws_instance.foo[1]`: {}, + `module.for_each_child["a"].aws_instance.foo[1]`: {}, + `module.for_each_child["a"].aws_instance.foo[0]`: {}, + } + + for _, res := range plan.Changes.Resources { + if res.Action != plans.Create { + t.Fatalf("expected resource creation, got %s", res.Action) + } + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + _, ok := expected[ric.Addr.String()] + if !ok { + t.Fatal("unexpected resource:", ric.Addr.String()) + } + delete(expected, ric.Addr.String()) + } + for addr := range expected { + t.Error("missing resource", addr) + } +} + +// GH-1475 +func TestContext2Plan_moduleCycle(t *testing.T) { + m := testModule(t, "plan-module-cycle") + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "some_input": {Type: cty.String, Optional: true}, + "type": {Type: cty.String, Computed: true}, + }, + }, + }, + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 2 { + t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + if res.Action != plans.Create { + t.Fatalf("expected resource creation, got %s", res.Action) + } + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + var expected cty.Value + switch i := ric.Addr.String(); i { + case "aws_instance.b": + expected = objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "type": cty.UnknownVal(cty.String), + }) + case "aws_instance.c": + expected = objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "some_input": cty.UnknownVal(cty.String), + "type": cty.UnknownVal(cty.String), + }) + default: + t.Fatal("unknown instance:", i) + } + + checkVals(t, expected, ric.After) + } +} + +func TestContext2Plan_moduleDeadlock(t *testing.T) { + testCheckDeadlock(t, func() { + m := testModule(t, "plan-module-deadlock") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, err := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if err != nil { + t.Fatalf("err: %s", err) + } + + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + for _, res := range plan.Changes.Resources { + if res.Action != plans.Create { + t.Fatalf("expected resource creation, got %s", res.Action) + } + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + expected := objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "type": cty.UnknownVal(cty.String), + }) + switch i := ric.Addr.String(); i { + case "module.child.aws_instance.foo[0]": + case "module.child.aws_instance.foo[1]": + case "module.child.aws_instance.foo[2]": + default: + t.Fatal("unknown instance:", i) + } + + checkVals(t, expected, ric.After) + } + }) +} + +func TestContext2Plan_moduleInput(t *testing.T) { + m := testModule(t, "plan-module-input") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 2 { + t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + if res.Action != plans.Create { + t.Fatalf("expected resource creation, got %s", res.Action) + } + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + var expected cty.Value + + switch i := ric.Addr.String(); i { + case "aws_instance.bar": + expected = objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "foo": cty.StringVal("2"), + "type": cty.UnknownVal(cty.String), + }) + case "module.child.aws_instance.foo": + expected = objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "foo": cty.StringVal("42"), + "type": cty.UnknownVal(cty.String), + }) + default: + t.Fatal("unknown instance:", i) + } + + checkVals(t, expected, ric.After) + } +} + +func TestContext2Plan_moduleInputComputed(t *testing.T) { + m := testModule(t, "plan-module-input-computed") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 2 { + t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + if res.Action != plans.Create { + t.Fatalf("expected resource creation, got %s", res.Action) + } + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + switch i := ric.Addr.String(); i { + case "aws_instance.bar": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "foo": cty.UnknownVal(cty.String), + "type": cty.UnknownVal(cty.String), + "compute": cty.StringVal("foo"), + }), ric.After) + case "module.child.aws_instance.foo": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "foo": cty.UnknownVal(cty.String), + "type": cty.UnknownVal(cty.String), + }), ric.After) + default: + t.Fatal("unknown instance:", i) + } + } +} + +func TestContext2Plan_moduleInputFromVar(t *testing.T) { + m := testModule(t, "plan-module-input-var") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "foo": &InputValue{ + Value: cty.StringVal("52"), + SourceType: ValueFromCaller, + }, + }, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 2 { + t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + if res.Action != plans.Create { + t.Fatalf("expected resource creation, got %s", res.Action) + } + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + switch i := ric.Addr.String(); i { + case "aws_instance.bar": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "foo": cty.StringVal("2"), + "type": cty.UnknownVal(cty.String), + }), ric.After) + case "module.child.aws_instance.foo": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "foo": cty.StringVal("52"), + "type": cty.UnknownVal(cty.String), + }), ric.After) + default: + t.Fatal("unknown instance:", i) + } + } +} + +func TestContext2Plan_moduleMultiVar(t *testing.T) { + m := testModule(t, "plan-module-multi-var") + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "foo": {Type: cty.String, Optional: true}, + "baz": {Type: cty.String, Optional: true}, + }, + }, + }, + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 5 { + t.Fatal("expected 5 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + if res.Action != plans.Create { + t.Fatalf("expected resource creation, got %s", res.Action) + } + + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + switch i := ric.Addr.String(); i { + case "aws_instance.parent[0]": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + }), ric.After) + case "aws_instance.parent[1]": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + }), ric.After) + case "module.child.aws_instance.bar[0]": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "baz": cty.StringVal("baz"), + }), ric.After) + case "module.child.aws_instance.bar[1]": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "baz": cty.StringVal("baz"), + }), ric.After) + case "module.child.aws_instance.foo": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "foo": cty.StringVal("baz,baz"), + }), ric.After) + default: + t.Fatal("unknown instance:", i) + } + } +} + +func TestContext2Plan_moduleOrphans(t *testing.T) { + m := testModule(t, "plan-modules-remove") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + state := states.NewState() + child := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) + child.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"baz"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 2 { + t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + switch i := ric.Addr.String(); i { + case "aws_instance.foo": + if res.Action != plans.Create { + t.Fatalf("expected resource creation, got %s", res.Action) + } + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "num": cty.NumberIntVal(2), + "type": cty.UnknownVal(cty.String), + }), ric.After) + case "module.child.aws_instance.foo": + if res.Action != plans.Delete { + t.Fatalf("expected resource delete, got %s", res.Action) + } + default: + t.Fatal("unknown instance:", i) + } + } + + expectedState := ` +module.child: + aws_instance.foo: + ID = baz + provider = provider["registry.opentofu.org/hashicorp/aws"]` + + if plan.PriorState.String() != expectedState { + t.Fatalf("\nexpected state: %q\n\ngot: %q", expectedState, plan.PriorState.String()) + } +} + +// https://github.com/hashicorp/terraform/issues/3114 +func TestContext2Plan_moduleOrphansWithProvisioner(t *testing.T) { + m := testModule(t, "plan-modules-remove-provisioners") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + pr := testProvisioner() + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.top").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"top","type":"aws_instance"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + child1 := state.EnsureModule(addrs.RootModuleInstance.Child("parent", addrs.NoKey).Child("child1", addrs.NoKey)) + child1.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"baz","type":"aws_instance"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + child2 := state.EnsureModule(addrs.RootModuleInstance.Child("parent", addrs.NoKey).Child("child2", addrs.NoKey)) + child2.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"baz","type":"aws_instance"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + Provisioners: map[string]provisioners.Factory{ + "shell": testProvisionerFuncFixed(pr), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 3 { + t.Error("expected 3 planned resources, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + switch i := ric.Addr.String(); i { + case "module.parent.module.child1.aws_instance.foo": + if res.Action != plans.Delete { + t.Fatalf("expected resource Delete, got %s", res.Action) + } + case "module.parent.module.child2.aws_instance.foo": + if res.Action != plans.Delete { + t.Fatalf("expected resource Delete, got %s", res.Action) + } + case "aws_instance.top": + if res.Action != plans.NoOp { + t.Fatalf("expected no changes, got %s", res.Action) + } + default: + t.Fatalf("unknown instance: %s\nafter: %#v", i, hcl2shim.ConfigValueFromHCL2(ric.After)) + } + } + + expectedState := `aws_instance.top: + ID = top + provider = provider["registry.opentofu.org/hashicorp/aws"] + type = aws_instance + +module.parent.child1: + aws_instance.foo: + ID = baz + provider = provider["registry.opentofu.org/hashicorp/aws"] + type = aws_instance +module.parent.child2: + aws_instance.foo: + ID = baz + provider = provider["registry.opentofu.org/hashicorp/aws"] + type = aws_instance` + + if expectedState != plan.PriorState.String() { + t.Fatalf("\nexpect state:\n%s\n\ngot state:\n%s\n", expectedState, plan.PriorState.String()) + } +} + +func TestContext2Plan_moduleProviderInherit(t *testing.T) { + var l sync.Mutex + var calls []string + + m := testModule(t, "plan-module-provider-inherit") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): func() (providers.Interface, error) { + l.Lock() + defer l.Unlock() + + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + Provider: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "from": {Type: cty.String, Optional: true}, + }, + }, + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "from": {Type: cty.String, Optional: true}, + }, + }, + }, + }) + p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { + from := req.Config.GetAttr("from") + if from.IsNull() || from.AsString() != "root" { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("not root")) + } + + return + } + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + from := req.Config.GetAttr("from").AsString() + + l.Lock() + defer l.Unlock() + calls = append(calls, from) + return testDiffFn(req) + } + return p, nil + }, + }, + }) + + _, err := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if err != nil { + t.Fatalf("err: %s", err) + } + + actual := calls + sort.Strings(actual) + expected := []string{"child", "root"} + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: %#v", actual) + } +} + +// This tests (for GH-11282) that deeply nested modules properly inherit +// configuration. +func TestContext2Plan_moduleProviderInheritDeep(t *testing.T) { + var l sync.Mutex + + m := testModule(t, "plan-module-provider-inherit-deep") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): func() (providers.Interface, error) { + l.Lock() + defer l.Unlock() + + var from string + p := testProvider("aws") + + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + Provider: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "from": {Type: cty.String, Optional: true}, + }, + }, + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{}, + }, + }, + }) + + p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { + v := req.Config.GetAttr("from") + if v.IsNull() || v.AsString() != "root" { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("not root")) + } + from = v.AsString() + + return + } + + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + if from != "root" { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("bad resource")) + return + } + + return testDiffFn(req) + } + return p, nil + }, + }, + }) + + _, err := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestContext2Plan_moduleProviderDefaultsVar(t *testing.T) { + var l sync.Mutex + var calls []string + + m := testModule(t, "plan-module-provider-defaults-var") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): func() (providers.Interface, error) { + l.Lock() + defer l.Unlock() + + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + Provider: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "to": {Type: cty.String, Optional: true}, + "from": {Type: cty.String, Optional: true}, + }, + }, + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "from": {Type: cty.String, Optional: true}, + }, + }, + }, + }) + p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { + var buf bytes.Buffer + from := req.Config.GetAttr("from") + if !from.IsNull() { + buf.WriteString(from.AsString() + "\n") + } + to := req.Config.GetAttr("to") + if !to.IsNull() { + buf.WriteString(to.AsString() + "\n") + } + + l.Lock() + defer l.Unlock() + calls = append(calls, buf.String()) + return + } + + return p, nil + }, + }, + }) + + _, err := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "foo": &InputValue{ + Value: cty.StringVal("root"), + SourceType: ValueFromCaller, + }, + }, + }) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := []string{ + "child\nchild\n", + "root\n", + } + sort.Strings(calls) + if !reflect.DeepEqual(calls, expected) { + t.Fatalf("expected:\n%#v\ngot:\n%#v\n", expected, calls) + } +} + +func TestContext2Plan_moduleProviderVar(t *testing.T) { + m := testModule(t, "plan-module-provider-var") + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + Provider: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "value": {Type: cty.String, Optional: true}, + }, + }, + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "value": {Type: cty.String, Optional: true}, + }, + }, + }, + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 1 { + t.Fatal("expected 1 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + if res.Action != plans.Create { + t.Fatalf("expected resource creation, got %s", res.Action) + } + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + switch i := ric.Addr.String(); i { + case "module.child.aws_instance.test": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "value": cty.StringVal("hello"), + }), ric.After) + default: + t.Fatal("unknown instance:", i) + } + } +} + +func TestContext2Plan_moduleVar(t *testing.T) { + m := testModule(t, "plan-module-var") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 2 { + t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + if res.Action != plans.Create { + t.Fatalf("expected resource creation, got %s", res.Action) + } + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + var expected cty.Value + + switch i := ric.Addr.String(); i { + case "aws_instance.bar": + expected = objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "foo": cty.StringVal("2"), + "type": cty.UnknownVal(cty.String), + }) + case "module.child.aws_instance.foo": + expected = objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "num": cty.NumberIntVal(2), + "type": cty.UnknownVal(cty.String), + }) + default: + t.Fatal("unknown instance:", i) + } + + checkVals(t, expected, ric.After) + } +} + +func TestContext2Plan_moduleVarWrongTypeBasic(t *testing.T) { + m := testModule(t, "plan-module-wrong-var-type") + p := testProvider("aws") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if !diags.HasErrors() { + t.Fatalf("succeeded; want errors") + } +} + +func TestContext2Plan_moduleVarWrongTypeNested(t *testing.T) { + m := testModule(t, "plan-module-wrong-var-type-nested") + p := testProvider("null") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("null"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if !diags.HasErrors() { + t.Fatalf("succeeded; want errors") + } +} + +func TestContext2Plan_moduleVarWithDefaultValue(t *testing.T) { + m := testModule(t, "plan-module-var-with-default-value") + p := testProvider("null") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("null"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } +} + +func TestContext2Plan_moduleVarComputed(t *testing.T) { + m := testModule(t, "plan-module-var-computed") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 2 { + t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + if res.Action != plans.Create { + t.Fatalf("expected resource creation, got %s", res.Action) + } + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + switch i := ric.Addr.String(); i { + case "aws_instance.bar": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "foo": cty.UnknownVal(cty.String), + "type": cty.UnknownVal(cty.String), + }), ric.After) + case "module.child.aws_instance.foo": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "foo": cty.UnknownVal(cty.String), + "type": cty.UnknownVal(cty.String), + "compute": cty.StringVal("foo"), + }), ric.After) + default: + t.Fatal("unknown instance:", i) + } + } +} + +func TestContext2Plan_preventDestroy_bad(t *testing.T) { + m := testModule(t, "plan-prevent-destroy-bad") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"i-abc123"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, err := ctx.Plan(m, state, DefaultPlanOpts) + + expectedErr := "aws_instance.foo has lifecycle.prevent_destroy" + if !strings.Contains(fmt.Sprintf("%s", err), expectedErr) { + if plan != nil { + t.Logf(legacyDiffComparisonString(plan.Changes)) + } + t.Fatalf("expected err would contain %q\nerr: %s", expectedErr, err) + } + + // Plan should show the expected changes, even though prevent_destroy validation fails + // So we could see why the resource was attempted to be destroyed + if got, want := 1, len(plan.Changes.Resources); got != want { + t.Fatalf("wrong number of planned resource changes %d; want %d\n%s", got, want, spew.Sdump(plan.Changes.Resources)) + } +} + +func TestContext2Plan_preventDestroy_good(t *testing.T) { + m := testModule(t, "plan-prevent-destroy-good") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"i-abc123","type":"aws_instance"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + if !plan.Changes.Empty() { + t.Fatalf("expected no changes, got %#v\n", plan.Changes) + } +} + +func TestContext2Plan_preventDestroy_countBad(t *testing.T) { + m := testModule(t, "plan-prevent-destroy-count-bad") + p := testProvider("aws") + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[0]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"i-abc123"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[1]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"i-abc345"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, err := ctx.Plan(m, state, DefaultPlanOpts) + + expectedErr := "aws_instance.foo[1] has lifecycle.prevent_destroy" + if !strings.Contains(fmt.Sprintf("%s", err), expectedErr) { + if plan != nil { + t.Logf(legacyDiffComparisonString(plan.Changes)) + } + t.Fatalf("expected err would contain %q\nerr: %s", expectedErr, err) + } + + // Plan should show the expected changes, even though prevent_destroy validation fails + // So we could see why the resource was attempted to be destroyed + if got, want := 1, len(plan.Changes.Resources); got != want { + t.Fatalf("wrong number of planned resource changes %d; want %d\n%s", got, want, spew.Sdump(plan.Changes.Resources)) + } +} + +func TestContext2Plan_preventDestroy_countGood(t *testing.T) { + m := testModule(t, "plan-prevent-destroy-count-good") + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "current": {Type: cty.String, Optional: true}, + "id": {Type: cty.String, Computed: true}, + }, + }, + }, + }) + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[0]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"i-abc123"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[1]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"i-abc345"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + if plan.Changes.Empty() { + t.Fatalf("Expected non-empty plan, got %s", legacyDiffComparisonString(plan.Changes)) + } +} + +func TestContext2Plan_preventDestroy_countGoodNoChange(t *testing.T) { + m := testModule(t, "plan-prevent-destroy-count-good") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "current": {Type: cty.String, Optional: true}, + "type": {Type: cty.String, Optional: true, Computed: true}, + "id": {Type: cty.String, Computed: true}, + }, + }, + }, + }) + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[0]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"i-abc123","current":"0","type":"aws_instance"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + if !plan.Changes.Empty() { + t.Fatalf("Expected empty plan, got %s", legacyDiffComparisonString(plan.Changes)) + } +} + +func TestContext2Plan_preventDestroy_destroyPlan(t *testing.T) { + m := testModule(t, "plan-prevent-destroy-good") + p := testProvider("aws") + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"i-abc123"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + }) + + expectedErr := "aws_instance.foo has lifecycle.prevent_destroy" + if !strings.Contains(fmt.Sprintf("%s", diags.Err()), expectedErr) { + if plan != nil { + t.Logf(legacyDiffComparisonString(plan.Changes)) + } + t.Fatalf("expected diagnostics would contain %q\nactual diags: %s", expectedErr, diags.Err()) + } +} + +func TestContext2Plan_provisionerCycle(t *testing.T) { + m := testModule(t, "plan-provisioner-cycle") + p := testProvider("aws") + pr := testProvisioner() + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + Provisioners: map[string]provisioners.Factory{ + "local-exec": testProvisionerFuncFixed(pr), + }, + }) + + _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if !diags.HasErrors() { + t.Fatalf("succeeded; want errors") + } +} + +func TestContext2Plan_computed(t *testing.T) { + m := testModule(t, "plan-computed") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 2 { + t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + if res.Action != plans.Create { + t.Fatalf("expected resource creation, got %s", res.Action) + } + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + switch i := ric.Addr.String(); i { + case "aws_instance.bar": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "foo": cty.UnknownVal(cty.String), + "type": cty.UnknownVal(cty.String), + }), ric.After) + case "aws_instance.foo": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "foo": cty.UnknownVal(cty.String), + "num": cty.NumberIntVal(2), + "type": cty.UnknownVal(cty.String), + "compute": cty.StringVal("foo"), + }), ric.After) + default: + t.Fatal("unknown instance:", i) + } + } +} + +func TestContext2Plan_blockNestingGroup(t *testing.T) { + m := testModule(t, "plan-block-nesting-group") + p := testProvider("test") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test": { + BlockTypes: map[string]*configschema.NestedBlock{ + "blah": { + Nesting: configschema.NestingGroup, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "baz": {Type: cty.String, Required: true}, + }, + }, + }, + }, + }, + }, + }) + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + if got, want := 1, len(plan.Changes.Resources); got != want { + t.Fatalf("wrong number of planned resource changes %d; want %d\n%s", got, want, spew.Sdump(plan.Changes.Resources)) + } + + if !p.PlanResourceChangeCalled { + t.Fatalf("PlanResourceChange was not called at all") + } + + got := p.PlanResourceChangeRequest + want := providers.PlanResourceChangeRequest{ + TypeName: "test", + + // Because block type "blah" is defined as NestingGroup, we get a non-null + // value for it with null nested attributes, rather than the "blah" object + // itself being null, when there's no "blah" block in the config at all. + // + // This represents the situation where the remote service _always_ creates + // a single "blah", regardless of whether the block is present, but when + // the block _is_ present the user can override some aspects of it. The + // absense of the block means "use the defaults", in that case. + Config: cty.ObjectVal(map[string]cty.Value{ + "blah": cty.ObjectVal(map[string]cty.Value{ + "baz": cty.NullVal(cty.String), + }), + }), + ProposedNewState: cty.ObjectVal(map[string]cty.Value{ + "blah": cty.ObjectVal(map[string]cty.Value{ + "baz": cty.NullVal(cty.String), + }), + }), + } + if !cmp.Equal(got, want, valueTrans) { + t.Errorf("wrong PlanResourceChange request\n%s", cmp.Diff(got, want, valueTrans)) + } +} + +func TestContext2Plan_computedDataResource(t *testing.T) { + m := testModule(t, "plan-computed-data-resource") + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "num": {Type: cty.String, Optional: true}, + "compute": {Type: cty.String, Optional: true}, + "foo": {Type: cty.String, Computed: true}, + }, + }, + }, + DataSources: map[string]*configschema.Block{ + "aws_vpc": { + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + schema := p.GetProviderSchemaResponse.DataSources["aws_vpc"].Block + ty := schema.ImpliedType() + + if rc := plan.Changes.ResourceInstance(addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "aws_instance", Name: "foo"}.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance)); rc == nil { + t.Fatalf("missing diff for aws_instance.foo") + } + rcs := plan.Changes.ResourceInstance(addrs.Resource{ + Mode: addrs.DataResourceMode, + Type: "aws_vpc", + Name: "bar", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance)) + if rcs == nil { + t.Fatalf("missing diff for data.aws_vpc.bar") + } + + rc, err := rcs.Decode(ty) + if err != nil { + t.Fatal(err) + } + + checkVals(t, + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.UnknownVal(cty.String), + }), + rc.After, + ) + if got, want := rc.ActionReason, plans.ResourceInstanceReadBecauseConfigUnknown; got != want { + t.Errorf("wrong ActionReason\ngot: %s\nwant: %s", got, want) + } +} + +func TestContext2Plan_computedInFunction(t *testing.T) { + m := testModule(t, "plan-computed-in-function") + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "attr": {Type: cty.Number, Optional: true}, + }, + }, + }, + DataSources: map[string]*configschema.Block{ + "aws_data_source": { + Attributes: map[string]*configschema.Attribute{ + "computed": {Type: cty.List(cty.String), Computed: true}, + }, + }, + }, + }) + p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "computed": cty.ListVal([]cty.Value{ + cty.StringVal("foo"), + }), + }), + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + diags := ctx.Validate(m) + assertNoErrors(t, diags) + + _, diags = ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + if !p.ReadDataSourceCalled { + t.Fatalf("ReadDataSource was not called on provider during plan; should've been called") + } +} + +func TestContext2Plan_computedDataCountResource(t *testing.T) { + m := testModule(t, "plan-computed-data-count") + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "num": {Type: cty.String, Optional: true}, + "compute": {Type: cty.String, Optional: true}, + "foo": {Type: cty.String, Computed: true}, + }, + }, + }, + DataSources: map[string]*configschema.Block{ + "aws_vpc": { + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + // make sure we created 3 "bar"s + for i := 0; i < 3; i++ { + addr := addrs.Resource{ + Mode: addrs.DataResourceMode, + Type: "aws_vpc", + Name: "bar", + }.Instance(addrs.IntKey(i)).Absolute(addrs.RootModuleInstance) + + if rcs := plan.Changes.ResourceInstance(addr); rcs == nil { + t.Fatalf("missing changes for %s", addr) + } + } +} + +func TestContext2Plan_localValueCount(t *testing.T) { + m := testModule(t, "plan-local-value-count") + p := testProvider("test") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + // make sure we created 3 "foo"s + for i := 0; i < 3; i++ { + addr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "foo", + }.Instance(addrs.IntKey(i)).Absolute(addrs.RootModuleInstance) + + if rcs := plan.Changes.ResourceInstance(addr); rcs == nil { + t.Fatalf("missing changes for %s", addr) + } + } +} + +func TestContext2Plan_dataResourceBecomesComputed(t *testing.T) { + m := testModule(t, "plan-data-resource-becomes-computed") + p := testProvider("aws") + + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + "computed": {Type: cty.String, Computed: true}, + }, + }, + }, + DataSources: map[string]*configschema.Block{ + "aws_data_source": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + }) + + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + fooVal := req.ProposedNewState.GetAttr("foo") + return providers.PlanResourceChangeResponse{ + PlannedState: cty.ObjectVal(map[string]cty.Value{ + "foo": fooVal, + "computed": cty.UnknownVal(cty.String), + }), + PlannedPrivate: req.PriorPrivate, + } + } + + schema := p.GetProviderSchemaResponse.DataSources["aws_data_source"].Block + ty := schema.ImpliedType() + + p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ + // This should not be called, because the configuration for the + // data resource contains an unknown value for "foo". + Diagnostics: tfdiags.Diagnostics(nil).Append(fmt.Errorf("ReadDataSource called, but should not have been")), + } + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("data.aws_data_source.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"i-abc123","foo":"baz"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors during plan: %s", diags.Err()) + } + + rcs := plan.Changes.ResourceInstance(addrs.Resource{ + Mode: addrs.DataResourceMode, + Type: "aws_data_source", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance)) + if rcs == nil { + t.Logf("full changeset: %s", spew.Sdump(plan.Changes)) + t.Fatalf("missing diff for data.aws_data_resource.foo") + } + + rc, err := rcs.Decode(ty) + if err != nil { + t.Fatal(err) + } + + if got, want := rc.ActionReason, plans.ResourceInstanceReadBecauseConfigUnknown; got != want { + t.Errorf("wrong ActionReason\ngot: %s\nwant: %s", got, want) + } + + // foo should now be unknown + foo := rc.After.GetAttr("foo") + if foo.IsKnown() { + t.Fatalf("foo should be unknown, got %#v", foo) + } +} + +func TestContext2Plan_computedList(t *testing.T) { + m := testModule(t, "plan-computed-list") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "compute": {Type: cty.String, Optional: true}, + "foo": {Type: cty.String, Optional: true}, + "num": {Type: cty.String, Optional: true}, + "list": {Type: cty.List(cty.String), Computed: true}, + }, + }, + }, + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 2 { + t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + if res.Action != plans.Create { + t.Fatalf("expected resource creation, got %s", res.Action) + } + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + switch i := ric.Addr.String(); i { + case "aws_instance.bar": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "foo": cty.UnknownVal(cty.String), + }), ric.After) + case "aws_instance.foo": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "list": cty.UnknownVal(cty.List(cty.String)), + "num": cty.NumberIntVal(2), + "compute": cty.StringVal("list.#"), + }), ric.After) + default: + t.Fatal("unknown instance:", i) + } + } +} + +// GH-8695. This tests that you can index into a computed list on a +// splatted resource. +func TestContext2Plan_computedMultiIndex(t *testing.T) { + m := testModule(t, "plan-computed-multi-index") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "compute": {Type: cty.String, Optional: true}, + "foo": {Type: cty.List(cty.String), Optional: true}, + "ip": {Type: cty.List(cty.String), Computed: true}, + }, + }, + }, + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 3 { + t.Fatal("expected 3 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + if res.Action != plans.Create { + t.Fatalf("expected resource creation, got %s", res.Action) + } + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + switch i := ric.Addr.String(); i { + case "aws_instance.foo[0]": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "ip": cty.UnknownVal(cty.List(cty.String)), + "foo": cty.NullVal(cty.List(cty.String)), + "compute": cty.StringVal("ip.#"), + }), ric.After) + case "aws_instance.foo[1]": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "ip": cty.UnknownVal(cty.List(cty.String)), + "foo": cty.NullVal(cty.List(cty.String)), + "compute": cty.StringVal("ip.#"), + }), ric.After) + case "aws_instance.bar[0]": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "foo": cty.UnknownVal(cty.List(cty.String)), + }), ric.After) + default: + t.Fatal("unknown instance:", i) + } + } +} + +func TestContext2Plan_count(t *testing.T) { + m := testModule(t, "plan-count") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 6 { + t.Fatal("expected 6 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + if res.Action != plans.Create { + t.Fatalf("expected resource creation, got %s", res.Action) + } + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + switch i := ric.Addr.String(); i { + case "aws_instance.bar": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "foo": cty.StringVal("foo,foo,foo,foo,foo"), + "type": cty.UnknownVal(cty.String), + }), ric.After) + case "aws_instance.foo[0]": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "foo": cty.StringVal("foo"), + "type": cty.UnknownVal(cty.String), + }), ric.After) + case "aws_instance.foo[1]": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "foo": cty.StringVal("foo"), + "type": cty.UnknownVal(cty.String), + }), ric.After) + case "aws_instance.foo[2]": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "foo": cty.StringVal("foo"), + "type": cty.UnknownVal(cty.String), + }), ric.After) + case "aws_instance.foo[3]": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "foo": cty.StringVal("foo"), + "type": cty.UnknownVal(cty.String), + }), ric.After) + case "aws_instance.foo[4]": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "foo": cty.StringVal("foo"), + "type": cty.UnknownVal(cty.String), + }), ric.After) + default: + t.Fatal("unknown instance:", i) + } + } +} + +func TestContext2Plan_countComputed(t *testing.T) { + m := testModule(t, "plan-count-computed") + p := testProvider("aws") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + _, err := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if err == nil { + t.Fatal("should error") + } +} + +func TestContext2Plan_countComputedModule(t *testing.T) { + m := testModule(t, "plan-count-computed-module") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + _, err := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + + expectedErr := `The "count" value depends on resource attributes` + if !strings.Contains(fmt.Sprintf("%s", err), expectedErr) { + t.Fatalf("expected err would contain %q\nerr: %s\n", + expectedErr, err) + } +} + +func TestContext2Plan_countModuleStatic(t *testing.T) { + m := testModule(t, "plan-count-module-static") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 3 { + t.Fatal("expected 3 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + if res.Action != plans.Create { + t.Fatalf("expected resource creation, got %s", res.Action) + } + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + switch i := ric.Addr.String(); i { + case "module.child.aws_instance.foo[0]": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "type": cty.UnknownVal(cty.String), + }), ric.After) + case "module.child.aws_instance.foo[1]": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "type": cty.UnknownVal(cty.String), + }), ric.After) + case "module.child.aws_instance.foo[2]": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "type": cty.UnknownVal(cty.String), + }), ric.After) + default: + t.Fatal("unknown instance:", i) + } + } +} + +func TestContext2Plan_countModuleStaticGrandchild(t *testing.T) { + m := testModule(t, "plan-count-module-static-grandchild") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 3 { + t.Fatal("expected 3 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + if res.Action != plans.Create { + t.Fatalf("expected resource creation, got %s", res.Action) + } + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + switch i := ric.Addr.String(); i { + case "module.child.module.child.aws_instance.foo[0]": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "type": cty.UnknownVal(cty.String), + }), ric.After) + case "module.child.module.child.aws_instance.foo[1]": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "type": cty.UnknownVal(cty.String), + }), ric.After) + case "module.child.module.child.aws_instance.foo[2]": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "type": cty.UnknownVal(cty.String), + }), ric.After) + default: + t.Fatal("unknown instance:", i) + } + } +} + +func TestContext2Plan_countIndex(t *testing.T) { + m := testModule(t, "plan-count-index") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 2 { + t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + if res.Action != plans.Create { + t.Fatalf("expected resource creation, got %s", res.Action) + } + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + switch i := ric.Addr.String(); i { + case "aws_instance.foo[0]": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "foo": cty.StringVal("0"), + "type": cty.UnknownVal(cty.String), + }), ric.After) + case "aws_instance.foo[1]": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "foo": cty.StringVal("1"), + "type": cty.UnknownVal(cty.String), + }), ric.After) + default: + t.Fatal("unknown instance:", i) + } + } +} + +func TestContext2Plan_countVar(t *testing.T) { + m := testModule(t, "plan-count-var") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "instance_count": &InputValue{ + Value: cty.StringVal("3"), + SourceType: ValueFromCaller, + }, + }, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 4 { + t.Fatal("expected 4 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + if res.Action != plans.Create { + t.Fatalf("expected resource creation, got %s", res.Action) + } + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + switch i := ric.Addr.String(); i { + case "aws_instance.bar": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "foo": cty.StringVal("foo,foo,foo"), + "type": cty.UnknownVal(cty.String), + }), ric.After) + case "aws_instance.foo[0]": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "foo": cty.StringVal("foo"), + "type": cty.UnknownVal(cty.String), + }), ric.After) + case "aws_instance.foo[1]": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "foo": cty.StringVal("foo"), + "type": cty.UnknownVal(cty.String), + }), ric.After) + case "aws_instance.foo[2]": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "foo": cty.StringVal("foo"), + "type": cty.UnknownVal(cty.String), + }), ric.After) + default: + t.Fatal("unknown instance:", i) + } + } +} + +func TestContext2Plan_countZero(t *testing.T) { + m := testModule(t, "plan-count-zero") + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.DynamicPseudoType, Optional: true}, + }, + }, + }, + }) + + // This schema contains a DynamicPseudoType, and therefore can't go through any shim functions + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + resp.PlannedState = req.ProposedNewState + resp.PlannedPrivate = req.PriorPrivate + return resp + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 1 { + t.Fatal("expected 1 changes, got", len(plan.Changes.Resources)) + } + + res := plan.Changes.Resources[0] + + if res.Action != plans.Create { + t.Fatalf("expected resource creation, got %s", res.Action) + } + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + expected := cty.TupleVal(nil) + + foo := ric.After.GetAttr("foo") + + if !cmp.Equal(expected, foo, valueComparer) { + t.Fatal(cmp.Diff(expected, foo, valueComparer)) + } +} + +func TestContext2Plan_countOneIndex(t *testing.T) { + m := testModule(t, "plan-count-one-index") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 2 { + t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + if res.Action != plans.Create { + t.Fatalf("expected resource creation, got %s", res.Action) + } + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + switch i := ric.Addr.String(); i { + case "aws_instance.bar": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "foo": cty.StringVal("foo"), + "type": cty.UnknownVal(cty.String), + }), ric.After) + case "aws_instance.foo[0]": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "foo": cty.StringVal("foo"), + "type": cty.UnknownVal(cty.String), + }), ric.After) + default: + t.Fatal("unknown instance:", i) + } + } +} + +func TestContext2Plan_countDecreaseToOne(t *testing.T) { + m := testModule(t, "plan-count-dec") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[0]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar","foo":"foo","type":"aws_instance"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[1]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[2]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 4 { + t.Fatal("expected 4 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + switch i := ric.Addr.String(); i { + case "aws_instance.bar": + if res.Action != plans.Create { + t.Fatalf("expected resource create, got %s", res.Action) + } + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "foo": cty.StringVal("bar"), + "type": cty.UnknownVal(cty.String), + }), ric.After) + case "aws_instance.foo": + if res.Action != plans.NoOp { + t.Fatalf("resource %s should be unchanged", i) + } + case "aws_instance.foo[1]": + if res.Action != plans.Delete { + t.Fatalf("expected resource delete, got %s", res.Action) + } + case "aws_instance.foo[2]": + if res.Action != plans.Delete { + t.Fatalf("expected resource delete, got %s", res.Action) + } + default: + t.Fatal("unknown instance:", i) + } + } + + expectedState := `aws_instance.foo: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = foo + type = aws_instance +aws_instance.foo.1: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/aws"] +aws_instance.foo.2: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/aws"]` + + if plan.PriorState.String() != expectedState { + t.Fatalf("epected state:\n%q\n\ngot state:\n%q\n", expectedState, plan.PriorState.String()) + } +} + +func TestContext2Plan_countIncreaseFromNotSet(t *testing.T) { + m := testModule(t, "plan-count-inc") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar","type":"aws_instance","foo":"foo"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 4 { + t.Fatal("expected 4 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + switch i := ric.Addr.String(); i { + case "aws_instance.bar": + if res.Action != plans.Create { + t.Fatalf("expected resource create, got %s", res.Action) + } + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "foo": cty.StringVal("bar"), + "type": cty.UnknownVal(cty.String), + }), ric.After) + case "aws_instance.foo[0]": + if res.Action != plans.NoOp { + t.Fatalf("resource %s should be unchanged", i) + } + case "aws_instance.foo[1]": + if res.Action != plans.Create { + t.Fatalf("expected resource create, got %s", res.Action) + } + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "foo": cty.StringVal("foo"), + "type": cty.UnknownVal(cty.String), + }), ric.After) + case "aws_instance.foo[2]": + if res.Action != plans.Create { + t.Fatalf("expected resource create, got %s", res.Action) + } + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "foo": cty.StringVal("foo"), + "type": cty.UnknownVal(cty.String), + }), ric.After) + default: + t.Fatal("unknown instance:", i) + } + } +} + +func TestContext2Plan_countIncreaseFromOne(t *testing.T) { + m := testModule(t, "plan-count-inc") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[0]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar","foo":"foo","type":"aws_instance"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 4 { + t.Fatal("expected 4 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + switch i := ric.Addr.String(); i { + case "aws_instance.bar": + if res.Action != plans.Create { + t.Fatalf("expected resource create, got %s", res.Action) + } + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "foo": cty.StringVal("bar"), + "type": cty.UnknownVal(cty.String), + }), ric.After) + case "aws_instance.foo[0]": + if res.Action != plans.NoOp { + t.Fatalf("resource %s should be unchanged", i) + } + case "aws_instance.foo[1]": + if res.Action != plans.Create { + t.Fatalf("expected resource create, got %s", res.Action) + } + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "foo": cty.StringVal("foo"), + "type": cty.UnknownVal(cty.String), + }), ric.After) + case "aws_instance.foo[2]": + if res.Action != plans.Create { + t.Fatalf("expected resource create, got %s", res.Action) + } + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "foo": cty.StringVal("foo"), + "type": cty.UnknownVal(cty.String), + }), ric.After) + default: + t.Fatal("unknown instance:", i) + } + } +} + +// https://github.com/PeoplePerHour/terraform/pull/11 +// +// This tests a case where both a "resource" and "resource.0" are in +// the state file, which apparently is a reasonable backwards compatibility +// concern found in the above 3rd party repo. +func TestContext2Plan_countIncreaseFromOneCorrupted(t *testing.T) { + m := testModule(t, "plan-count-inc") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar","foo":"foo","type":"aws_instance"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[0]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar","foo":"foo","type":"aws_instance"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 5 { + t.Fatal("expected 5 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + switch i := ric.Addr.String(); i { + case "aws_instance.bar": + if res.Action != plans.Create { + t.Fatalf("expected resource create, got %s", res.Action) + } + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "foo": cty.StringVal("bar"), + "type": cty.UnknownVal(cty.String), + }), ric.After) + case "aws_instance.foo": + if res.Action != plans.Delete { + t.Fatalf("resource %s should be removed", i) + } + case "aws_instance.foo[0]": + if res.Action != plans.NoOp { + t.Fatalf("resource %s should be unchanged", i) + } + case "aws_instance.foo[1]": + if res.Action != plans.Create { + t.Fatalf("expected resource create, got %s", res.Action) + } + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "foo": cty.StringVal("foo"), + "type": cty.UnknownVal(cty.String), + }), ric.After) + case "aws_instance.foo[2]": + if res.Action != plans.Create { + t.Fatalf("expected resource create, got %s", res.Action) + } + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "foo": cty.StringVal("foo"), + "type": cty.UnknownVal(cty.String), + }), ric.After) + default: + t.Fatal("unknown instance:", i) + } + } +} + +// A common pattern in TF configs is to have a set of resources with the same +// count and to use count.index to create correspondences between them: +// +// foo_id = "${foo.bar.*.id[count.index]}" +// +// This test is for the situation where some instances already exist and the +// count is increased. In that case, we should see only the create diffs +// for the new instances and not any update diffs for the existing ones. +func TestContext2Plan_countIncreaseWithSplatReference(t *testing.T) { + m := testModule(t, "plan-count-splat-reference") + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "name": {Type: cty.String, Optional: true}, + "foo_name": {Type: cty.String, Optional: true}, + "id": {Type: cty.String, Computed: true}, + }, + }, + }, + }) + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[0]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar","name":"foo 0"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[1]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar","name":"foo 1"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.bar[0]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar","foo_name":"foo 0"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.bar[1]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar","foo_name":"foo 1"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 6 { + t.Fatal("expected 6 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + switch i := ric.Addr.String(); i { + case "aws_instance.bar[0]", "aws_instance.bar[1]", "aws_instance.foo[0]", "aws_instance.foo[1]": + if res.Action != plans.NoOp { + t.Fatalf("resource %s should be unchanged", i) + } + case "aws_instance.bar[2]": + if res.Action != plans.Create { + t.Fatalf("expected resource create, got %s", res.Action) + } + // The instance ID changed, so just check that the name updated + if ric.After.GetAttr("foo_name") != cty.StringVal("foo 2") { + t.Fatalf("resource %s attr \"foo_name\" should be changed", i) + } + case "aws_instance.foo[2]": + if res.Action != plans.Create { + t.Fatalf("expected resource create, got %s", res.Action) + } + // The instance ID changed, so just check that the name updated + if ric.After.GetAttr("name") != cty.StringVal("foo 2") { + t.Fatalf("resource %s attr \"name\" should be changed", i) + } + default: + t.Fatal("unknown instance:", i) + } + } +} + +func TestContext2Plan_forEach(t *testing.T) { + m := testModule(t, "plan-for-each") + p := testProvider("aws") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 8 { + t.Fatal("expected 8 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + if res.Action != plans.Create { + t.Fatalf("expected resource creation, got %s", res.Action) + } + _, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + } +} + +func TestContext2Plan_forEachUnknownValue(t *testing.T) { + // This module has a variable defined, but it's value is unknown. We + // expect this to produce an error, but not to panic. + m := testModule(t, "plan-for-each-unknown-value") + p := testProvider("aws") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "foo": { + Value: cty.UnknownVal(cty.String), + SourceType: ValueFromCLIArg, + }, + }, + }) + if !diags.HasErrors() { + // Should get this error: + // Invalid for_each argument: The "for_each" value depends on resource attributes that cannot be determined until apply... + t.Fatal("succeeded; want errors") + } + + gotErrStr := diags.Err().Error() + wantErrStr := "Invalid for_each argument" + if !strings.Contains(gotErrStr, wantErrStr) { + t.Fatalf("missing expected error\ngot: %s\n\nwant: error containing %q", gotErrStr, wantErrStr) + } + + // We should have a diagnostic that is marked as being caused by unknown + // values. + for _, diag := range diags { + if tfdiags.DiagnosticCausedByUnknown(diag) { + return // don't fall through to the error below + } + } + t.Fatalf("no diagnostic is marked as being caused by unknown\n%s", diags.Err().Error()) +} + +func TestContext2Plan_destroy(t *testing.T) { + m := testModule(t, "plan-destroy") + p := testProvider("aws") + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.one").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.two").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"baz"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 2 { + t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + switch i := ric.Addr.String(); i { + case "aws_instance.one", "aws_instance.two": + if res.Action != plans.Delete { + t.Fatalf("resource %s should be removed", i) + } + + default: + t.Fatal("unknown instance:", i) + } + } +} + +func TestContext2Plan_moduleDestroy(t *testing.T) { + m := testModule(t, "plan-module-destroy") + p := testProvider("aws") + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + child := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) + child.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 2 { + t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + switch i := ric.Addr.String(); i { + case "aws_instance.foo", "module.child.aws_instance.foo": + if res.Action != plans.Delete { + t.Fatalf("resource %s should be removed", i) + } + + default: + t.Fatal("unknown instance:", i) + } + } +} + +// GH-1835 +func TestContext2Plan_moduleDestroyCycle(t *testing.T) { + m := testModule(t, "plan-module-destroy-gh-1835") + p := testProvider("aws") + + state := states.NewState() + aModule := state.EnsureModule(addrs.RootModuleInstance.Child("a_module", addrs.NoKey)) + aModule.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.a").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"a"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + bModule := state.EnsureModule(addrs.RootModuleInstance.Child("b_module", addrs.NoKey)) + bModule.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.b").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"b"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 2 { + t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + switch i := ric.Addr.String(); i { + case "module.a_module.aws_instance.a", "module.b_module.aws_instance.b": + if res.Action != plans.Delete { + t.Fatalf("resource %s should be removed", i) + } + + default: + t.Fatal("unknown instance:", i) + } + } +} + +func TestContext2Plan_moduleDestroyMultivar(t *testing.T) { + m := testModule(t, "plan-module-destroy-multivar") + p := testProvider("aws") + + state := states.NewState() + child := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) + child.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[0]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar0"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + child.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[1]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar1"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 2 { + t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + switch i := ric.Addr.String(); i { + case "module.child.aws_instance.foo[0]", "module.child.aws_instance.foo[1]": + if res.Action != plans.Delete { + t.Fatalf("resource %s should be removed", i) + } + + default: + t.Fatal("unknown instance:", i) + } + } +} + +func TestContext2Plan_pathVar(t *testing.T) { + cwd, err := os.Getwd() + if err != nil { + t.Fatalf("err: %s", err) + } + + m := testModule(t, "plan-path-var") + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "cwd": {Type: cty.String, Optional: true}, + "module": {Type: cty.String, Optional: true}, + "root": {Type: cty.String, Optional: true}, + }, + }, + }, + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("err: %s", diags.Err()) + } + + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 1 { + t.Fatal("expected 1 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + switch i := ric.Addr.String(); i { + case "aws_instance.foo": + if res.Action != plans.Create { + t.Fatalf("resource %s should be created", i) + } + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "cwd": cty.StringVal(filepath.ToSlash(cwd + "/barpath")), + "module": cty.StringVal(filepath.ToSlash(m.Module.SourceDir + "/foopath")), + "root": cty.StringVal(filepath.ToSlash(m.Module.SourceDir + "/barpath")), + }), ric.After) + default: + t.Fatal("unknown instance:", i) + } + } +} + +func TestContext2Plan_diffVar(t *testing.T) { + m := testModule(t, "plan-diffvar") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar","num":"2","type":"aws_instance"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 2 { + t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + switch i := ric.Addr.String(); i { + case "aws_instance.bar": + if res.Action != plans.Create { + t.Fatalf("resource %s should be created", i) + } + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "num": cty.NumberIntVal(3), + "type": cty.UnknownVal(cty.String), + }), ric.After) + case "aws_instance.foo": + if res.Action != plans.Update { + t.Fatalf("resource %s should be updated", i) + } + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.StringVal("bar"), + "num": cty.NumberIntVal(2), + "type": cty.StringVal("aws_instance"), + }), ric.Before) + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.StringVal("bar"), + "num": cty.NumberIntVal(3), + "type": cty.StringVal("aws_instance"), + }), ric.After) + default: + t.Fatal("unknown instance:", i) + } + } +} + +func TestContext2Plan_hook(t *testing.T) { + m := testModule(t, "plan-good") + h := new(MockHook) + p := testProvider("aws") + ctx := testContext2(t, &ContextOpts{ + Hooks: []Hook{h}, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + if !h.PreDiffCalled { + t.Fatal("should be called") + } + if !h.PostDiffCalled { + t.Fatal("should be called") + } +} + +func TestContext2Plan_closeProvider(t *testing.T) { + // this fixture only has an aliased provider located in the module, to make + // sure that the provier name contains a path more complex than + // "provider.aws". + m := testModule(t, "plan-close-module-provider") + p := testProvider("aws") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + if !p.CloseCalled { + t.Fatal("provider not closed") + } +} + +func TestContext2Plan_orphan(t *testing.T) { + m := testModule(t, "plan-orphan") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.baz").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 2 { + t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + switch i := ric.Addr.String(); i { + case "aws_instance.baz": + if res.Action != plans.Delete { + t.Fatalf("resource %s should be removed", i) + } + if got, want := ric.ActionReason, plans.ResourceInstanceDeleteBecauseNoResourceConfig; got != want { + t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) + } + case "aws_instance.foo": + if res.Action != plans.Create { + t.Fatalf("resource %s should be created", i) + } + if got, want := ric.ActionReason, plans.ResourceInstanceChangeNoReason; got != want { + t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) + } + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "num": cty.NumberIntVal(2), + "type": cty.UnknownVal(cty.String), + }), ric.After) + default: + t.Fatal("unknown instance:", i) + } + } +} + +// This tests that configurations with UUIDs don't produce errors. +// For shadows, this would produce errors since a UUID changes every time. +func TestContext2Plan_shadowUuid(t *testing.T) { + m := testModule(t, "plan-shadow-uuid") + p := testProvider("aws") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } +} + +func TestContext2Plan_state(t *testing.T) { + m := testModule(t, "plan-good") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + if len(plan.Changes.Resources) < 2 { + t.Fatalf("bad: %#v", plan.Changes.Resources) + } + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 2 { + t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + switch i := ric.Addr.String(); i { + case "aws_instance.bar": + if res.Action != plans.Create { + t.Fatalf("resource %s should be created", i) + } + if got, want := ric.ActionReason, plans.ResourceInstanceChangeNoReason; got != want { + t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) + } + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "foo": cty.StringVal("2"), + "type": cty.UnknownVal(cty.String), + }), ric.After) + case "aws_instance.foo": + if res.Action != plans.Update { + t.Fatalf("resource %s should be updated", i) + } + if got, want := ric.ActionReason, plans.ResourceInstanceChangeNoReason; got != want { + t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) + } + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.StringVal("bar"), + "num": cty.NullVal(cty.Number), + "type": cty.NullVal(cty.String), + }), ric.Before) + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.StringVal("bar"), + "num": cty.NumberIntVal(2), + "type": cty.UnknownVal(cty.String), + }), ric.After) + default: + t.Fatal("unknown instance:", i) + } + } +} + +func TestContext2Plan_requiresReplace(t *testing.T) { + m := testModule(t, "plan-requires-replace") + p := testProvider("test") + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{}, + }, + ResourceTypes: map[string]providers.Schema{ + "test_thing": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "v": { + Type: cty.String, + Required: true, + }, + }, + }, + }, + }, + } + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + RequiresReplace: []cty.Path{ + cty.GetAttrPath("v"), + }, + } + } + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_thing.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"v":"hello"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + schema := p.GetProviderSchemaResponse.ResourceTypes["test_thing"].Block + ty := schema.ImpliedType() + + if got, want := len(plan.Changes.Resources), 1; got != want { + t.Fatalf("got %d changes; want %d", got, want) + } + + for _, res := range plan.Changes.Resources { + t.Run(res.Addr.String(), func(t *testing.T) { + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + switch i := ric.Addr.String(); i { + case "test_thing.foo": + if got, want := ric.Action, plans.DeleteThenCreate; got != want { + t.Errorf("wrong action\ngot: %s\nwant: %s", got, want) + } + if got, want := ric.ActionReason, plans.ResourceInstanceReplaceBecauseCannotUpdate; got != want { + t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) + } + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "v": cty.StringVal("goodbye"), + }), ric.After) + default: + t.Fatalf("unexpected resource instance %s", i) + } + }) + } +} + +func TestContext2Plan_taint(t *testing.T) { + m := testModule(t, "plan-taint") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar","num":"2","type":"aws_instance"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.bar").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectTainted, + AttrsJSON: []byte(`{"id":"baz"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 2 { + t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + t.Run(res.Addr.String(), func(t *testing.T) { + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + switch i := ric.Addr.String(); i { + case "aws_instance.bar": + if got, want := res.Action, plans.DeleteThenCreate; got != want { + t.Errorf("wrong action\ngot: %s\nwant: %s", got, want) + } + if got, want := res.ActionReason, plans.ResourceInstanceReplaceBecauseTainted; got != want { + t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) + } + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "foo": cty.StringVal("2"), + "type": cty.UnknownVal(cty.String), + }), ric.After) + case "aws_instance.foo": + if got, want := res.Action, plans.NoOp; got != want { + t.Errorf("wrong action\ngot: %s\nwant: %s", got, want) + } + if got, want := res.ActionReason, plans.ResourceInstanceChangeNoReason; got != want { + t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) + } + default: + t.Fatal("unknown instance:", i) + } + }) + } +} + +func TestContext2Plan_taintIgnoreChanges(t *testing.T) { + m := testModule(t, "plan-taint-ignore-changes") + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "vars": {Type: cty.String, Optional: true}, + "type": {Type: cty.String, Computed: true}, + }, + }, + }, + }) + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectTainted, + AttrsJSON: []byte(`{"id":"foo","vars":"foo","type":"aws_instance"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 1 { + t.Fatal("expected 1 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + switch i := ric.Addr.String(); i { + case "aws_instance.foo": + if got, want := res.Action, plans.DeleteThenCreate; got != want { + t.Errorf("wrong action\ngot: %s\nwant: %s", got, want) + } + if got, want := res.ActionReason, plans.ResourceInstanceReplaceBecauseTainted; got != want { + t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) + } + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.StringVal("foo"), + "vars": cty.StringVal("foo"), + "type": cty.StringVal("aws_instance"), + }), ric.Before) + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "vars": cty.StringVal("foo"), + "type": cty.UnknownVal(cty.String), + }), ric.After) + default: + t.Fatal("unknown instance:", i) + } + } +} + +// Fails about 50% of the time before the fix for GH-4982, covers the fix. +func TestContext2Plan_taintDestroyInterpolatedCountRace(t *testing.T) { + m := testModule(t, "plan-taint-interpolated-count") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[0]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectTainted, + AttrsJSON: []byte(`{"id":"bar","type":"aws_instance"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[1]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar","type":"aws_instance"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[2]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar","type":"aws_instance"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + for i := 0; i < 100; i++ { + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state.DeepCopy(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 3 { + t.Fatal("expected 3 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + switch i := ric.Addr.String(); i { + case "aws_instance.foo[0]": + if got, want := ric.Action, plans.DeleteThenCreate; got != want { + t.Errorf("wrong action\ngot: %s\nwant: %s", got, want) + } + if got, want := ric.ActionReason, plans.ResourceInstanceReplaceBecauseTainted; got != want { + t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want) + } + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.StringVal("bar"), + "type": cty.StringVal("aws_instance"), + }), ric.Before) + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "type": cty.UnknownVal(cty.String), + }), ric.After) + case "aws_instance.foo[1]", "aws_instance.foo[2]": + if res.Action != plans.NoOp { + t.Fatalf("resource %s should not be changed", i) + } + default: + t.Fatal("unknown instance:", i) + } + } + } +} + +func TestContext2Plan_targeted(t *testing.T) { + m := testModule(t, "plan-targeted") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + Targets: []addrs.Targetable{ + addrs.RootModuleInstance.Resource( + addrs.ManagedResourceMode, "aws_instance", "foo", + ), + }, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 1 { + t.Fatal("expected 1 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + switch i := ric.Addr.String(); i { + case "aws_instance.foo": + if res.Action != plans.Create { + t.Fatalf("resource %s should be created", i) + } + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "num": cty.NumberIntVal(2), + "type": cty.UnknownVal(cty.String), + }), ric.After) + default: + t.Fatal("unknown instance:", i) + } + } +} + +// Test that targeting a module properly plans any inputs that depend +// on another module. +func TestContext2Plan_targetedCrossModule(t *testing.T) { + m := testModule(t, "plan-targeted-cross-module") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + Targets: []addrs.Targetable{ + addrs.RootModuleInstance.Child("B", addrs.NoKey), + }, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 2 { + t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + if res.Action != plans.Create { + t.Fatalf("resource %s should be created", ric.Addr) + } + switch i := ric.Addr.String(); i { + case "module.A.aws_instance.foo": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "foo": cty.StringVal("bar"), + "type": cty.UnknownVal(cty.String), + }), ric.After) + case "module.B.aws_instance.bar": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "foo": cty.UnknownVal(cty.String), + "type": cty.UnknownVal(cty.String), + }), ric.After) + default: + t.Fatal("unknown instance:", i) + } + } +} + +func TestContext2Plan_targetedModuleWithProvider(t *testing.T) { + m := testModule(t, "plan-targeted-module-with-provider") + p := testProvider("null") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + Provider: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "key": {Type: cty.String, Optional: true}, + }, + }, + ResourceTypes: map[string]*configschema.Block{ + "null_resource": { + Attributes: map[string]*configschema.Attribute{}, + }, + }, + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("null"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + Targets: []addrs.Targetable{ + addrs.RootModuleInstance.Child("child2", addrs.NoKey), + }, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + schema := p.GetProviderSchemaResponse.ResourceTypes["null_resource"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 1 { + t.Fatal("expected 1 changes, got", len(plan.Changes.Resources)) + } + + res := plan.Changes.Resources[0] + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + if ric.Addr.String() != "module.child2.null_resource.foo" { + t.Fatalf("unexpcetd resource: %s", ric.Addr) + } +} + +func TestContext2Plan_targetedOrphan(t *testing.T) { + m := testModule(t, "plan-targeted-orphan") + p := testProvider("aws") + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.orphan").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"i-789xyz"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.nottargeted").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"i-abc123"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + Targets: []addrs.Targetable{ + addrs.RootModuleInstance.Resource( + addrs.ManagedResourceMode, "aws_instance", "orphan", + ), + }, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 1 { + t.Fatal("expected 1 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + switch i := ric.Addr.String(); i { + case "aws_instance.orphan": + if res.Action != plans.Delete { + t.Fatalf("resource %s should be destroyed", ric.Addr) + } + default: + t.Fatal("unknown instance:", i) + } + } +} + +// https://github.com/hashicorp/terraform/issues/2538 +func TestContext2Plan_targetedModuleOrphan(t *testing.T) { + m := testModule(t, "plan-targeted-module-orphan") + p := testProvider("aws") + + state := states.NewState() + child := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) + child.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.orphan").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"i-789xyz"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + child.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.nottargeted").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"i-abc123"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + Targets: []addrs.Targetable{ + addrs.RootModuleInstance.Child("child", addrs.NoKey).Resource( + addrs.ManagedResourceMode, "aws_instance", "orphan", + ), + }, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 1 { + t.Fatal("expected 1 changes, got", len(plan.Changes.Resources)) + } + + res := plan.Changes.Resources[0] + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + if ric.Addr.String() != "module.child.aws_instance.orphan" { + t.Fatalf("unexpected resource :%s", ric.Addr) + } + if res.Action != plans.Delete { + t.Fatalf("resource %s should be deleted", ric.Addr) + } +} + +func TestContext2Plan_targetedModuleUntargetedVariable(t *testing.T) { + m := testModule(t, "plan-targeted-module-untargeted-variable") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Targets: []addrs.Targetable{ + addrs.RootModuleInstance.Resource( + addrs.ManagedResourceMode, "aws_instance", "blue", + ), + addrs.RootModuleInstance.Child("blue_mod", addrs.NoKey), + }, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 2 { + t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + if res.Action != plans.Create { + t.Fatalf("resource %s should be created", ric.Addr) + } + switch i := ric.Addr.String(); i { + case "aws_instance.blue": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "type": cty.UnknownVal(cty.String), + }), ric.After) + case "module.blue_mod.aws_instance.mod": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "value": cty.UnknownVal(cty.String), + "type": cty.UnknownVal(cty.String), + }), ric.After) + default: + t.Fatal("unknown instance:", i) + } + } +} + +// ensure that outputs missing references due to targetting are removed from +// the graph. +func TestContext2Plan_outputContainsTargetedResource(t *testing.T) { + m := testModule(t, "plan-untargeted-resource-output") + p := testProvider("aws") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Targets: []addrs.Targetable{ + addrs.RootModuleInstance.Child("mod", addrs.NoKey).Resource( + addrs.ManagedResourceMode, "aws_instance", "a", + ), + }, + }) + if diags.HasErrors() { + t.Fatalf("err: %s", diags) + } + if len(diags) != 1 { + t.Fatalf("got %d diagnostics; want 1", diags) + } + if got, want := diags[0].Severity(), tfdiags.Warning; got != want { + t.Errorf("wrong diagnostic severity %#v; want %#v", got, want) + } + if got, want := diags[0].Description().Summary, "Resource targeting is in effect"; got != want { + t.Errorf("wrong diagnostic summary %#v; want %#v", got, want) + } +} + +// https://github.com/hashicorp/terraform/issues/4515 +func TestContext2Plan_targetedOverTen(t *testing.T) { + m := testModule(t, "plan-targeted-over-ten") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + for i := 0; i < 13; i++ { + key := fmt.Sprintf("aws_instance.foo[%d]", i) + id := fmt.Sprintf("i-abc%d", i) + attrs := fmt.Sprintf(`{"id":"%s","type":"aws_instance"}`, id) + + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr(key).Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(attrs), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Targets: []addrs.Targetable{ + addrs.RootModuleInstance.ResourceInstance( + addrs.ManagedResourceMode, "aws_instance", "foo", addrs.IntKey(1), + ), + }, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + for _, res := range plan.Changes.Resources { + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + if res.Action != plans.NoOp { + t.Fatalf("unexpected action %s for %s", res.Action, ric.Addr) + } + } +} + +func TestContext2Plan_provider(t *testing.T) { + m := testModule(t, "plan-provider") + p := testProvider("aws") + + var value interface{} + p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { + value = req.Config.GetAttr("foo").AsString() + return + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + opts := &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "foo": &InputValue{ + Value: cty.StringVal("bar"), + SourceType: ValueFromCaller, + }, + }, + } + + if _, err := ctx.Plan(m, states.NewState(), opts); err != nil { + t.Fatalf("err: %s", err) + } + + if value != "bar" { + t.Fatalf("bad: %#v", value) + } +} + +func TestContext2Plan_varListErr(t *testing.T) { + m := testModule(t, "plan-var-list-err") + p := testProvider("aws") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + _, err := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + + if err == nil { + t.Fatal("should error") + } +} + +func TestContext2Plan_ignoreChanges(t *testing.T) { + m := testModule(t, "plan-ignore-changes") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar","ami":"ami-abcd1234","type":"aws_instance"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "foo": &InputValue{ + Value: cty.StringVal("ami-1234abcd"), + SourceType: ValueFromCaller, + }, + }, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 1 { + t.Fatal("expected 1 changes, got", len(plan.Changes.Resources)) + } + + res := plan.Changes.Resources[0] + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + if ric.Addr.String() != "aws_instance.foo" { + t.Fatalf("unexpected resource: %s", ric.Addr) + } + + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.StringVal("bar"), + "ami": cty.StringVal("ami-abcd1234"), + "type": cty.StringVal("aws_instance"), + }), ric.After) +} + +func TestContext2Plan_ignoreChangesWildcard(t *testing.T) { + m := testModule(t, "plan-ignore-changes-wildcard") + p := testProvider("aws") + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + // computed attributes should not be set in config + id := req.Config.GetAttr("id") + if !id.IsNull() { + t.Error("computed id set in plan config") + } + + foo := req.Config.GetAttr("foo") + if foo.IsNull() { + t.Error(`missing "foo" during plan, was set to "bar" in state and config`) + } + + return testDiffFn(req) + } + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar","ami":"ami-abcd1234","instance":"t2.micro","type":"aws_instance","foo":"bar"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "foo": &InputValue{ + Value: cty.StringVal("ami-1234abcd"), + SourceType: ValueFromCaller, + }, + "bar": &InputValue{ + Value: cty.StringVal("t2.small"), + SourceType: ValueFromCaller, + }, + }, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + for _, res := range plan.Changes.Resources { + if res.Action != plans.NoOp { + t.Fatalf("unexpected resource diffs in root module: %s", spew.Sdump(plan.Changes.Resources)) + } + } +} + +func TestContext2Plan_ignoreChangesInMap(t *testing.T) { + p := testProvider("test") + + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_ignore_changes_map": { + Attributes: map[string]*configschema.Attribute{ + "tags": {Type: cty.Map(cty.String), Optional: true}, + }, + }, + }, + }) + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + + s := states.BuildState(func(ss *states.SyncState) { + ss.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_ignore_changes_map", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo","tags":{"ignored":"from state","other":"from state"},"type":"aws_instance"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + m := testModule(t, "plan-ignore-changes-in-map") + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, s, DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + schema := p.GetProviderSchemaResponse.ResourceTypes["test_ignore_changes_map"].Block + ty := schema.ImpliedType() + + if got, want := len(plan.Changes.Resources), 1; got != want { + t.Fatalf("wrong number of changes %d; want %d", got, want) + } + + res := plan.Changes.Resources[0] + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + if res.Action != plans.Update { + t.Fatalf("resource %s should be updated, got %s", ric.Addr, res.Action) + } + + if got, want := ric.Addr.String(), "test_ignore_changes_map.foo"; got != want { + t.Fatalf("unexpected resource address %s; want %s", got, want) + } + + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "tags": cty.MapVal(map[string]cty.Value{ + "ignored": cty.StringVal("from state"), + "other": cty.StringVal("from config"), + }), + }), ric.After) +} + +func TestContext2Plan_ignoreChangesSensitive(t *testing.T) { + m := testModule(t, "plan-ignore-changes-sensitive") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar","ami":"ami-abcd1234","type":"aws_instance"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "foo": &InputValue{ + Value: cty.StringVal("ami-1234abcd"), + SourceType: ValueFromCaller, + }, + }, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 1 { + t.Fatal("expected 1 changes, got", len(plan.Changes.Resources)) + } + + res := plan.Changes.Resources[0] + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + if ric.Addr.String() != "aws_instance.foo" { + t.Fatalf("unexpected resource: %s", ric.Addr) + } + + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.StringVal("bar"), + "ami": cty.StringVal("ami-abcd1234"), + "type": cty.StringVal("aws_instance"), + }), ric.After) +} + +func TestContext2Plan_moduleMapLiteral(t *testing.T) { + m := testModule(t, "plan-module-map-literal") + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "meta": {Type: cty.Map(cty.String), Optional: true}, + "tags": {Type: cty.Map(cty.String), Optional: true}, + }, + }, + }, + }) + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + s := req.ProposedNewState.AsValueMap() + m := s["tags"].AsValueMap() + + if m["foo"].AsString() != "bar" { + t.Fatalf("Bad value in tags attr: %#v", m) + } + + meta := s["meta"].AsValueMap() + if len(meta) != 0 { + t.Fatalf("Meta attr not empty: %#v", meta) + } + return testDiffFn(req) + } + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } +} + +func TestContext2Plan_computedValueInMap(t *testing.T) { + m := testModule(t, "plan-computed-value-in-map") + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "looked_up": {Type: cty.String, Optional: true}, + }, + }, + "aws_computed_source": { + Attributes: map[string]*configschema.Attribute{ + "computed_read_only": {Type: cty.String, Computed: true}, + }, + }, + }, + }) + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + resp = testDiffFn(req) + + if req.TypeName != "aws_computed_source" { + return + } + + planned := resp.PlannedState.AsValueMap() + planned["computed_read_only"] = cty.UnknownVal(cty.String) + resp.PlannedState = cty.ObjectVal(planned) + return resp + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + if len(plan.Changes.Resources) != 2 { + t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + schema := p.GetProviderSchemaResponse.ResourceTypes[res.Addr.Resource.Resource.Type].Block + + ric, err := res.Decode(schema.ImpliedType()) + if err != nil { + t.Fatal(err) + } + + if res.Action != plans.Create { + t.Fatalf("resource %s should be created", ric.Addr) + } + + switch i := ric.Addr.String(); i { + case "aws_computed_source.intermediates": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "computed_read_only": cty.UnknownVal(cty.String), + }), ric.After) + case "module.test_mod.aws_instance.inner2": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "looked_up": cty.UnknownVal(cty.String), + }), ric.After) + default: + t.Fatal("unknown instance:", i) + } + } +} + +func TestContext2Plan_moduleVariableFromSplat(t *testing.T) { + m := testModule(t, "plan-module-variable-from-splat") + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "thing": {Type: cty.String, Optional: true}, + }, + }, + }, + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + if len(plan.Changes.Resources) != 4 { + t.Fatal("expected 4 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + schema := p.GetProviderSchemaResponse.ResourceTypes[res.Addr.Resource.Resource.Type].Block + + ric, err := res.Decode(schema.ImpliedType()) + if err != nil { + t.Fatal(err) + } + + if res.Action != plans.Create { + t.Fatalf("resource %s should be created", ric.Addr) + } + + switch i := ric.Addr.String(); i { + case "module.mod1.aws_instance.test[0]", + "module.mod1.aws_instance.test[1]", + "module.mod2.aws_instance.test[0]", + "module.mod2.aws_instance.test[1]": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "thing": cty.StringVal("doesnt"), + }), ric.After) + default: + t.Fatal("unknown instance:", i) + } + } +} + +func TestContext2Plan_createBeforeDestroy_depends_datasource(t *testing.T) { + m := testModule(t, "plan-cbd-depends-datasource") + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "num": {Type: cty.String, Optional: true}, + "computed": {Type: cty.String, Optional: true, Computed: true}, + }, + }, + }, + DataSources: map[string]*configschema.Block{ + "aws_vpc": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "foo": {Type: cty.Number, Optional: true}, + }, + }, + }, + }) + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + computedVal := req.ProposedNewState.GetAttr("computed") + if computedVal.IsNull() { + computedVal = cty.UnknownVal(cty.String) + } + return providers.PlanResourceChangeResponse{ + PlannedState: cty.ObjectVal(map[string]cty.Value{ + "num": req.ProposedNewState.GetAttr("num"), + "computed": computedVal, + }), + } + } + p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + cfg := req.Config.AsValueMap() + cfg["id"] = cty.StringVal("data_id") + return providers.ReadDataSourceResponse{ + State: cty.ObjectVal(cfg), + } + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + seenAddrs := make(map[string]struct{}) + for _, res := range plan.Changes.Resources { + var schema *configschema.Block + switch res.Addr.Resource.Resource.Mode { + case addrs.DataResourceMode: + schema = p.GetProviderSchemaResponse.DataSources[res.Addr.Resource.Resource.Type].Block + case addrs.ManagedResourceMode: + schema = p.GetProviderSchemaResponse.ResourceTypes[res.Addr.Resource.Resource.Type].Block + } + + ric, err := res.Decode(schema.ImpliedType()) + if err != nil { + t.Fatal(err) + } + + seenAddrs[ric.Addr.String()] = struct{}{} + + t.Run(ric.Addr.String(), func(t *testing.T) { + switch i := ric.Addr.String(); i { + case "aws_instance.foo[0]": + if res.Action != plans.Create { + t.Fatalf("resource %s should be created, got %s", ric.Addr, ric.Action) + } + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "num": cty.StringVal("2"), + "computed": cty.StringVal("data_id"), + }), ric.After) + case "aws_instance.foo[1]": + if res.Action != plans.Create { + t.Fatalf("resource %s should be created, got %s", ric.Addr, ric.Action) + } + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "num": cty.StringVal("2"), + "computed": cty.StringVal("data_id"), + }), ric.After) + default: + t.Fatal("unknown instance:", i) + } + }) + } + + wantAddrs := map[string]struct{}{ + "aws_instance.foo[0]": {}, + "aws_instance.foo[1]": {}, + } + if !cmp.Equal(seenAddrs, wantAddrs) { + t.Errorf("incorrect addresses in changeset:\n%s", cmp.Diff(wantAddrs, seenAddrs)) + } +} + +// interpolated lists need to be stored in the original order. +func TestContext2Plan_listOrder(t *testing.T) { + m := testModule(t, "plan-list-order") + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.List(cty.String), Optional: true}, + }, + }, + }, + }) + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + changes := plan.Changes + rDiffA := changes.ResourceInstance(addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "a", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance)) + rDiffB := changes.ResourceInstance(addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "b", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance)) + + if !cmp.Equal(rDiffA.After, rDiffB.After, valueComparer) { + t.Fatal(cmp.Diff(rDiffA.After, rDiffB.After, valueComparer)) + } +} + +// Make sure ignore-changes doesn't interfere with set/list/map diffs. +// If a resource was being replaced by a RequiresNew attribute that gets +// ignored, we need to filter the diff properly to properly update rather than +// replace. +func TestContext2Plan_ignoreChangesWithFlatmaps(t *testing.T) { + m := testModule(t, "plan-ignore-changes-with-flatmaps") + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "user_data": {Type: cty.String, Optional: true}, + "require_new": {Type: cty.String, Optional: true}, + + // This test predates the 0.12 work to integrate cty and + // HCL, and so it was ported as-is where its expected + // test output was clearly expecting a list of maps here + // even though it is named "set". + "set": {Type: cty.List(cty.Map(cty.String)), Optional: true}, + "lst": {Type: cty.List(cty.String), Optional: true}, + }, + }, + }, + }) + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{ + "user_data":"x","require_new":"", + "set":[{"a":"1"}], + "lst":["j"] + }`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + if len(plan.Changes.Resources) != 1 { + t.Fatal("expected 1 changes, got", len(plan.Changes.Resources)) + } + + res := plan.Changes.Resources[0] + schema := p.GetProviderSchemaResponse.ResourceTypes[res.Addr.Resource.Resource.Type].Block + + ric, err := res.Decode(schema.ImpliedType()) + if err != nil { + t.Fatal(err) + } + + if res.Action != plans.Update { + t.Fatalf("resource %s should be updated, got %s", ric.Addr, ric.Action) + } + + if ric.Addr.String() != "aws_instance.foo" { + t.Fatalf("unknown resource: %s", ric.Addr) + } + + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "lst": cty.ListVal([]cty.Value{ + cty.StringVal("j"), + cty.StringVal("k"), + }), + "require_new": cty.StringVal(""), + "user_data": cty.StringVal("x"), + "set": cty.ListVal([]cty.Value{cty.MapVal(map[string]cty.Value{ + "a": cty.StringVal("1"), + "b": cty.StringVal("2"), + })}), + }), ric.After) +} + +// TestContext2Plan_resourceNestedCount ensures resource sets that depend on +// the count of another resource set (ie: count of a data source that depends +// on another data source's instance count - data.x.foo.*.id) get properly +// normalized to the indexes they should be. This case comes up when there is +// an existing state (after an initial apply). +func TestContext2Plan_resourceNestedCount(t *testing.T) { + m := testModule(t, "nested-resource-count-plan") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { + return providers.ReadResourceResponse{ + NewState: req.PriorState, + } + } + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[0]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo0","type":"aws_instance"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[1]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo1","type":"aws_instance"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.bar[0]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar0","type":"aws_instance"}`), + Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("aws_instance.foo")}, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.bar[1]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar1","type":"aws_instance"}`), + Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("aws_instance.foo")}, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.baz[0]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"baz0","type":"aws_instance"}`), + Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("aws_instance.bar")}, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.baz[1]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"baz1","type":"aws_instance"}`), + Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("aws_instance.bar")}, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + diags := ctx.Validate(m) + if diags.HasErrors() { + t.Fatalf("validate errors: %s", diags.Err()) + } + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("plan errors: %s", diags.Err()) + } + + for _, res := range plan.Changes.Resources { + if res.Action != plans.NoOp { + t.Fatalf("resource %s should not change, plan returned %s", res.Addr, res.Action) + } + } +} + +// Higher level test at TestResource_dataSourceListApplyPanic +func TestContext2Plan_computedAttrRefTypeMismatch(t *testing.T) { + m := testModule(t, "plan-computed-attr-ref-type-mismatch") + p := testProvider("aws") + p.ValidateResourceConfigFn = func(req providers.ValidateResourceConfigRequest) providers.ValidateResourceConfigResponse { + var diags tfdiags.Diagnostics + if req.TypeName == "aws_instance" { + amiVal := req.Config.GetAttr("ami") + if amiVal.Type() != cty.String { + diags = diags.Append(fmt.Errorf("Expected ami to be cty.String, got %#v", amiVal)) + } + } + return providers.ValidateResourceConfigResponse{ + Diagnostics: diags, + } + } + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + if req.TypeName != "aws_ami_list" { + t.Fatalf("Reached apply for unexpected resource type! %s", req.TypeName) + } + // Pretend like we make a thing and the computed list "ids" is populated + s := req.PlannedState.AsValueMap() + s["id"] = cty.StringVal("someid") + s["ids"] = cty.ListVal([]cty.Value{ + cty.StringVal("ami-abc123"), + cty.StringVal("ami-bcd345"), + }) + + resp.NewState = cty.ObjectVal(s) + return + } + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if !diags.HasErrors() { + t.Fatalf("Succeeded; want type mismatch error for 'ami' argument") + } + + expected := `Inappropriate value for attribute "ami"` + if errStr := diags.Err().Error(); !strings.Contains(errStr, expected) { + t.Fatalf("expected:\n\n%s\n\nto contain:\n\n%s", errStr, expected) + } +} + +func TestContext2Plan_selfRef(t *testing.T) { + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + }) + + m := testModule(t, "plan-self-ref") + c := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + diags := c.Validate(m) + if diags.HasErrors() { + t.Fatalf("unexpected validation failure: %s", diags.Err()) + } + + _, diags = c.Plan(m, states.NewState(), DefaultPlanOpts) + if !diags.HasErrors() { + t.Fatalf("plan succeeded; want error") + } + + gotErrStr := diags.Err().Error() + wantErrStr := "Self-referential block" + if !strings.Contains(gotErrStr, wantErrStr) { + t.Fatalf("missing expected error\ngot: %s\n\nwant: error containing %q", gotErrStr, wantErrStr) + } +} + +func TestContext2Plan_selfRefMulti(t *testing.T) { + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + }) + + m := testModule(t, "plan-self-ref-multi") + c := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + diags := c.Validate(m) + if diags.HasErrors() { + t.Fatalf("unexpected validation failure: %s", diags.Err()) + } + + _, diags = c.Plan(m, states.NewState(), DefaultPlanOpts) + if !diags.HasErrors() { + t.Fatalf("plan succeeded; want error") + } + + gotErrStr := diags.Err().Error() + wantErrStr := "Self-referential block" + if !strings.Contains(gotErrStr, wantErrStr) { + t.Fatalf("missing expected error\ngot: %s\n\nwant: error containing %q", gotErrStr, wantErrStr) + } +} + +func TestContext2Plan_selfRefMultiAll(t *testing.T) { + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.List(cty.String), Optional: true}, + }, + }, + }, + }) + + m := testModule(t, "plan-self-ref-multi-all") + c := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + diags := c.Validate(m) + if diags.HasErrors() { + t.Fatalf("unexpected validation failure: %s", diags.Err()) + } + + _, diags = c.Plan(m, states.NewState(), DefaultPlanOpts) + if !diags.HasErrors() { + t.Fatalf("plan succeeded; want error") + } + + gotErrStr := diags.Err().Error() + + // The graph is checked for cycles before we can walk it, so we don't + // encounter the self-reference check. + // wantErrStr := "Self-referential block" + wantErrStr := "Cycle" + if !strings.Contains(gotErrStr, wantErrStr) { + t.Fatalf("missing expected error\ngot: %s\n\nwant: error containing %q", gotErrStr, wantErrStr) + } +} + +func TestContext2Plan_invalidOutput(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +data "aws_data_source" "name" {} + +output "out" { + value = data.aws_data_source.name.missing +}`, + }) + + p := testProvider("aws") + p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("data_id"), + "foo": cty.StringVal("foo"), + }), + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if !diags.HasErrors() { + // Should get this error: + // Unsupported attribute: This object does not have an attribute named "missing" + t.Fatal("succeeded; want errors") + } + + gotErrStr := diags.Err().Error() + wantErrStr := "Unsupported attribute" + if !strings.Contains(gotErrStr, wantErrStr) { + t.Fatalf("missing expected error\ngot: %s\n\nwant: error containing %q", gotErrStr, wantErrStr) + } +} + +func TestContext2Plan_invalidModuleOutput(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "child/main.tf": ` +data "aws_data_source" "name" {} + +output "out" { + value = "${data.aws_data_source.name.missing}" +}`, + "main.tf": ` +module "child" { + source = "./child" +} + +resource "aws_instance" "foo" { + foo = "${module.child.out}" +}`, + }) + + p := testProvider("aws") + p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("data_id"), + "foo": cty.StringVal("foo"), + }), + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if !diags.HasErrors() { + // Should get this error: + // Unsupported attribute: This object does not have an attribute named "missing" + t.Fatal("succeeded; want errors") + } + + gotErrStr := diags.Err().Error() + wantErrStr := "Unsupported attribute" + if !strings.Contains(gotErrStr, wantErrStr) { + t.Fatalf("missing expected error\ngot: %s\n\nwant: error containing %q", gotErrStr, wantErrStr) + } +} + +func TestContext2Plan_variableValidation(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +variable "x" { + default = "bar" +} + +resource "aws_instance" "foo" { + foo = var.x +}`, + }) + + p := testProvider("aws") + p.ValidateResourceConfigFn = func(req providers.ValidateResourceConfigRequest) (resp providers.ValidateResourceConfigResponse) { + foo := req.Config.GetAttr("foo").AsString() + if foo == "bar" { + resp.Diagnostics = resp.Diagnostics.Append(errors.New("foo cannot be bar")) + } + return + } + + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + resp.PlannedState = req.ProposedNewState + return + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if !diags.HasErrors() { + // Should get this error: + // Unsupported attribute: This object does not have an attribute named "missing" + t.Fatal("succeeded; want errors") + } +} + +func TestContext2Plan_variableSensitivity(t *testing.T) { + m := testModule(t, "plan-variable-sensitivity") + + p := testProvider("aws") + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + resp.PlannedState = req.ProposedNewState + return + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 1 { + t.Fatal("expected 1 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + if res.Action != plans.Create { + t.Fatalf("expected resource creation, got %s", res.Action) + } + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + switch i := ric.Addr.String(); i { + case "aws_instance.foo": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "foo": cty.StringVal("foo").Mark(marks.Sensitive), + }), ric.After) + if len(res.ChangeSrc.BeforeValMarks) != 0 { + t.Errorf("unexpected BeforeValMarks: %#v", res.ChangeSrc.BeforeValMarks) + } + if len(res.ChangeSrc.AfterValMarks) != 1 { + t.Errorf("unexpected AfterValMarks: %#v", res.ChangeSrc.AfterValMarks) + continue + } + pvm := res.ChangeSrc.AfterValMarks[0] + if got, want := pvm.Path, cty.GetAttrPath("foo"); !got.Equals(want) { + t.Errorf("unexpected path for mark\n got: %#v\nwant: %#v", got, want) + } + if got, want := pvm.Marks, cty.NewValueMarks(marks.Sensitive); !got.Equal(want) { + t.Errorf("unexpected value for mark\n got: %#v\nwant: %#v", got, want) + } + default: + t.Fatal("unknown instance:", i) + } + } +} + +func TestContext2Plan_variableSensitivityModule(t *testing.T) { + m := testModule(t, "plan-variable-sensitivity-module") + + p := testProvider("aws") + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + resp.PlannedState = req.ProposedNewState + return + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "sensitive_var": {Value: cty.NilVal}, + "another_var": &InputValue{ + Value: cty.StringVal("boop"), + SourceType: ValueFromCaller, + }, + }, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 1 { + t.Fatal("expected 1 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + if res.Action != plans.Create { + t.Fatalf("expected resource creation, got %s", res.Action) + } + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + switch i := ric.Addr.String(); i { + case "module.child.aws_instance.foo": + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "foo": cty.StringVal("foo").Mark(marks.Sensitive), + "value": cty.StringVal("boop").Mark(marks.Sensitive), + }), ric.After) + if len(res.ChangeSrc.BeforeValMarks) != 0 { + t.Errorf("unexpected BeforeValMarks: %#v", res.ChangeSrc.BeforeValMarks) + } + if len(res.ChangeSrc.AfterValMarks) != 2 { + t.Errorf("expected AfterValMarks to contain two elements: %#v", res.ChangeSrc.AfterValMarks) + continue + } + // validate that the after marks have "foo" and "value" + contains := func(pvmSlice []cty.PathValueMarks, stepName string) bool { + for _, pvm := range pvmSlice { + if pvm.Path.Equals(cty.GetAttrPath(stepName)) { + if pvm.Marks.Equal(cty.NewValueMarks(marks.Sensitive)) { + return true + } + } + } + return false + } + if !contains(res.ChangeSrc.AfterValMarks, "foo") { + t.Error("unexpected AfterValMarks to contain \"foo\" with sensitive mark") + } + if !contains(res.ChangeSrc.AfterValMarks, "value") { + t.Error("unexpected AfterValMarks to contain \"value\" with sensitive mark") + } + default: + t.Fatal("unknown instance:", i) + } + } +} + +func checkVals(t *testing.T, expected, got cty.Value) { + t.Helper() + // The GoStringer format seems to result in the closest thing to a useful + // diff for values with marks. + // TODO: if we want to continue using cmp.Diff on cty.Values, we should + // make a transformer that creates a more comparable structure. + valueTrans := cmp.Transformer("gostring", func(v cty.Value) string { + return fmt.Sprintf("%#v\n", v) + }) + if !cmp.Equal(expected, got, valueComparer, typeComparer, equateEmpty) { + t.Fatal(cmp.Diff(expected, got, valueTrans, equateEmpty)) + } +} + +func objectVal(t *testing.T, schema *configschema.Block, m map[string]cty.Value) cty.Value { + t.Helper() + v, err := schema.CoerceValue( + cty.ObjectVal(m), + ) + if err != nil { + t.Fatal(err) + } + return v +} + +func TestContext2Plan_requiredModuleOutput(t *testing.T) { + m := testModule(t, "plan-required-output") + p := testProvider("test") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_resource": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "required": {Type: cty.String, Required: true}, + }, + }, + }, + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + schema := p.GetProviderSchemaResponse.ResourceTypes["test_resource"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 2 { + t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + t.Run(fmt.Sprintf("%s %s", res.Action, res.Addr), func(t *testing.T) { + if res.Action != plans.Create { + t.Fatalf("expected resource creation, got %s", res.Action) + } + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + var expected cty.Value + switch i := ric.Addr.String(); i { + case "test_resource.root": + expected = objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "required": cty.UnknownVal(cty.String), + }) + case "module.mod.test_resource.for_output": + expected = objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "required": cty.StringVal("val"), + }) + default: + t.Fatal("unknown instance:", i) + } + + checkVals(t, expected, ric.After) + }) + } +} + +func TestContext2Plan_requiredModuleObject(t *testing.T) { + m := testModule(t, "plan-required-whole-mod") + p := testProvider("test") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_resource": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "required": {Type: cty.String, Required: true}, + }, + }, + }, + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + schema := p.GetProviderSchemaResponse.ResourceTypes["test_resource"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 2 { + t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + t.Run(fmt.Sprintf("%s %s", res.Action, res.Addr), func(t *testing.T) { + if res.Action != plans.Create { + t.Fatalf("expected resource creation, got %s", res.Action) + } + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + var expected cty.Value + switch i := ric.Addr.String(); i { + case "test_resource.root": + expected = objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "required": cty.UnknownVal(cty.String), + }) + case "module.mod.test_resource.for_output": + expected = objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "required": cty.StringVal("val"), + }) + default: + t.Fatal("unknown instance:", i) + } + + checkVals(t, expected, ric.After) + }) + } +} + +func TestContext2Plan_expandOrphan(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +module "mod" { + count = 1 + source = "./mod" +} +`, + "mod/main.tf": ` +resource "aws_instance" "foo" { +} +`, + }) + + state := states.NewState() + state.EnsureModule(addrs.RootModuleInstance.Child("mod", addrs.IntKey(0))).SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"child","type":"aws_instance"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + state.EnsureModule(addrs.RootModuleInstance.Child("mod", addrs.IntKey(1))).SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"child","type":"aws_instance"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } + + expected := map[string]plans.Action{ + `module.mod[1].aws_instance.foo`: plans.Delete, + `module.mod[0].aws_instance.foo`: plans.NoOp, + } + + for _, res := range plan.Changes.Resources { + want := expected[res.Addr.String()] + if res.Action != want { + t.Fatalf("expected %s action, got: %q %s", want, res.Addr, res.Action) + } + delete(expected, res.Addr.String()) + } + + for res, action := range expected { + t.Errorf("missing %s change for %s", action, res) + } +} + +func TestContext2Plan_indexInVar(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +module "a" { + count = 1 + source = "./mod" + in = "test" +} + +module "b" { + count = 1 + source = "./mod" + in = length(module.a) +} +`, + "mod/main.tf": ` +resource "aws_instance" "foo" { + foo = var.in +} + +variable "in" { +} + +output"out" { + value = aws_instance.foo.id +} +`, + }) + + p := testProvider("aws") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } +} + +func TestContext2Plan_targetExpandedAddress(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +module "mod" { + count = 3 + source = "./mod" +} +`, + "mod/main.tf": ` +resource "aws_instance" "foo" { + count = 2 +} +`, + }) + + p := testProvider("aws") + + targets := []addrs.Targetable{} + target, diags := addrs.ParseTargetStr("module.mod[1].aws_instance.foo[0]") + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } + targets = append(targets, target.Subject) + + target, diags = addrs.ParseTargetStr("module.mod[2]") + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } + targets = append(targets, target.Subject) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + Targets: targets, + }) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } + + expected := map[string]plans.Action{ + // the single targeted mod[1] instances + `module.mod[1].aws_instance.foo[0]`: plans.Create, + // the whole mode[2] + `module.mod[2].aws_instance.foo[0]`: plans.Create, + `module.mod[2].aws_instance.foo[1]`: plans.Create, + } + + for _, res := range plan.Changes.Resources { + want := expected[res.Addr.String()] + if res.Action != want { + t.Fatalf("expected %s action, got: %q %s", want, res.Addr, res.Action) + } + delete(expected, res.Addr.String()) + } + + for res, action := range expected { + t.Errorf("missing %s change for %s", action, res) + } +} + +func TestContext2Plan_targetResourceInModuleInstance(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +module "mod" { + count = 3 + source = "./mod" +} +`, + "mod/main.tf": ` +resource "aws_instance" "foo" { +} +`, + }) + + p := testProvider("aws") + + target, diags := addrs.ParseTargetStr("module.mod[1].aws_instance.foo") + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } + + targets := []addrs.Targetable{target.Subject} + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + Targets: targets, + }) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } + + expected := map[string]plans.Action{ + // the single targeted mod[1] instance + `module.mod[1].aws_instance.foo`: plans.Create, + } + + for _, res := range plan.Changes.Resources { + want := expected[res.Addr.String()] + if res.Action != want { + t.Fatalf("expected %s action, got: %q %s", want, res.Addr, res.Action) + } + delete(expected, res.Addr.String()) + } + + for res, action := range expected { + t.Errorf("missing %s change for %s", action, res) + } +} + +func TestContext2Plan_moduleRefIndex(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +module "mod" { + for_each = { + a = "thing" + } + in = null + source = "./mod" +} + +module "single" { + source = "./mod" + in = module.mod["a"] +} +`, + "mod/main.tf": ` +variable "in" { +} + +output "out" { + value = "foo" +} + +resource "aws_instance" "foo" { +} +`, + }) + + p := testProvider("aws") + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } +} + +func TestContext2Plan_noChangeDataPlan(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +data "test_data_source" "foo" {} +`, + }) + + p := new(MockProvider) + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + DataSources: map[string]*configschema.Block{ + "test_data_source": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "foo": { + Type: cty.String, + Optional: true, + }, + }, + }, + }, + }) + + p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("data_id"), + "foo": cty.StringVal("foo"), + }), + } + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("data.test_data_source.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"data_id", "foo":"foo"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } + + for _, res := range plan.Changes.Resources { + if res.Action != plans.NoOp { + t.Fatalf("expected NoOp, got: %q %s", res.Addr, res.Action) + } + } +} + +// for_each can reference a resource with 0 instances +func TestContext2Plan_scaleInForEach(t *testing.T) { + p := testProvider("test") + + m := testModuleInline(t, map[string]string{ + "main.tf": ` +locals { + m = {} +} + +resource "test_instance" "a" { + for_each = local.m +} + +resource "test_instance" "b" { + for_each = test_instance.a +} +`}) + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_instance.a[0]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"a0"}`), + Dependencies: []addrs.ConfigResource{}, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_instance.b").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"b"}`), + Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("test_instance.a")}, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + assertNoErrors(t, diags) + + t.Run("test_instance.a[0]", func(t *testing.T) { + instAddr := mustResourceInstanceAddr("test_instance.a[0]") + change := plan.Changes.ResourceInstance(instAddr) + if change == nil { + t.Fatalf("no planned change for %s", instAddr) + } + if got, want := change.PrevRunAddr, instAddr; !want.Equal(got) { + t.Errorf("wrong previous run address for %s %s; want %s", instAddr, got, want) + } + if got, want := change.Action, plans.Delete; got != want { + t.Errorf("wrong action for %s %s; want %s", instAddr, got, want) + } + if got, want := change.ActionReason, plans.ResourceInstanceDeleteBecauseWrongRepetition; got != want { + t.Errorf("wrong action reason for %s %s; want %s", instAddr, got, want) + } + }) + t.Run("test_instance.b", func(t *testing.T) { + instAddr := mustResourceInstanceAddr("test_instance.b") + change := plan.Changes.ResourceInstance(instAddr) + if change == nil { + t.Fatalf("no planned change for %s", instAddr) + } + if got, want := change.PrevRunAddr, instAddr; !want.Equal(got) { + t.Errorf("wrong previous run address for %s %s; want %s", instAddr, got, want) + } + if got, want := change.Action, plans.Delete; got != want { + t.Errorf("wrong action for %s %s; want %s", instAddr, got, want) + } + if got, want := change.ActionReason, plans.ResourceInstanceDeleteBecauseWrongRepetition; got != want { + t.Errorf("wrong action reason for %s %s; want %s", instAddr, got, want) + } + }) +} + +func TestContext2Plan_targetedModuleInstance(t *testing.T) { + m := testModule(t, "plan-targeted") + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + Targets: []addrs.Targetable{ + addrs.RootModuleInstance.Child("mod", addrs.IntKey(0)), + }, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 1 { + t.Fatal("expected 1 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + switch i := ric.Addr.String(); i { + case "module.mod[0].aws_instance.foo": + if res.Action != plans.Create { + t.Fatalf("resource %s should be created", i) + } + checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "num": cty.NumberIntVal(2), + "type": cty.UnknownVal(cty.String), + }), ric.After) + default: + t.Fatal("unknown instance:", i) + } + } +} + +func TestContext2Plan_dataRefreshedInPlan(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +data "test_data_source" "d" { +} +`}) + + p := testProvider("test") + p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("this"), + "foo": cty.NullVal(cty.String), + }), + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } + + d := plan.PriorState.ResourceInstance(mustResourceInstanceAddr("data.test_data_source.d")) + if d == nil || d.Current == nil { + t.Fatal("data.test_data_source.d not found in state:", plan.PriorState) + } + + if d.Current.Status != states.ObjectReady { + t.Fatal("expected data.test_data_source.d to be fully read in refreshed state, got status", d.Current.Status) + } +} + +func TestContext2Plan_dataReferencesResourceDirectly(t *testing.T) { + // When a data resource refers to a managed resource _directly_, any + // pending change for the managed resource will cause the data resource + // to be deferred to the apply step. + // See also TestContext2Plan_dataReferencesResourceIndirectly for the + // other case, where the reference is indirect. + + p := testProvider("test") + + p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("data source should not be read")) + return resp + } + + m := testModuleInline(t, map[string]string{ + "main.tf": ` +locals { + x = "value" +} + +resource "test_resource" "a" { + value = local.x +} + +// test_resource.a.value can be resolved during plan, but the reference implies +// that the data source should wait until the resource is created. +data "test_data_source" "d" { + foo = test_resource.a.value +} + +// ensure referencing an indexed instance that has not yet created will also +// delay reading the data source +resource "test_resource" "b" { + count = 2 + value = local.x +} + +data "test_data_source" "e" { + foo = test_resource.b[0].value +} +`}) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + rc := plan.Changes.ResourceInstance(addrs.Resource{ + Mode: addrs.DataResourceMode, + Type: "test_data_source", + Name: "d", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance)) + if rc != nil { + if got, want := rc.ActionReason, plans.ResourceInstanceReadBecauseDependencyPending; got != want { + t.Errorf("wrong ActionReason\ngot: %s\nwant: %s", got, want) + } + } else { + t.Error("no change for test_data_source.e") + } +} + +func TestContext2Plan_dataReferencesResourceIndirectly(t *testing.T) { + // When a data resource refers to a managed resource indirectly, pending + // changes for the managed resource _do not_ cause the data resource to + // be deferred to apply. This is a pragmatic special case added for + // backward compatibility with the old situation where we would _always_ + // eagerly read data resources with known configurations, regardless of + // the plans for their dependencies. + // This test creates an indirection through a local value, but the same + // principle would apply for both input variable and output value + // indirection. + // + // See also TestContext2Plan_dataReferencesResourceDirectly for the + // other case, where the reference is direct. + // This special exception doesn't apply for a data resource that has + // custom conditions; see + // TestContext2Plan_dataResourceChecksManagedResourceChange for that + // situation. + + p := testProvider("test") + var applyCount int64 + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + atomic.AddInt64(&applyCount, 1) + resp.NewState = req.PlannedState + return resp + } + p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { + if atomic.LoadInt64(&applyCount) == 0 { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("data source read before managed resource apply")) + } else { + resp.State = req.Config + } + return resp + } + + m := testModuleInline(t, map[string]string{ + "main.tf": ` +locals { + x = "value" +} + +resource "test_resource" "a" { + value = local.x +} + +locals { + y = test_resource.a.value +} + +// test_resource.a.value would ideally cause a pending change for +// test_resource.a to defer this to the apply step, but we intentionally don't +// do that when it's indirect (through a local value, here) as a concession +// to backward compatibility. +data "test_data_source" "d" { + foo = local.y +} +`}) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if !diags.HasErrors() { + t.Fatalf("successful plan; want an error") + } + + if got, want := diags.Err().Error(), "data source read before managed resource apply"; !strings.Contains(got, want) { + t.Errorf("Missing expected error message\ngot: %s\nwant substring: %s", got, want) + } +} + +func TestContext2Plan_skipRefresh(t *testing.T) { + p := testProvider("test") + p.PlanResourceChangeFn = testDiffFn + + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_instance" "a" { +} +`}) + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_instance.a").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"a","type":"test_instance"}`), + Dependencies: []addrs.ConfigResource{}, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{ + Mode: plans.NormalMode, + SkipRefresh: true, + }) + assertNoErrors(t, diags) + + if p.ReadResourceCalled { + t.Fatal("Resource should not have been refreshed") + } + + for _, c := range plan.Changes.Resources { + if c.Action != plans.NoOp { + t.Fatalf("expected no changes, got %s for %q", c.Action, c.Addr) + } + } +} + +func TestContext2Plan_dataInModuleDependsOn(t *testing.T) { + p := testProvider("test") + + readDataSourceB := false + p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { + cfg := req.Config.AsValueMap() + foo := cfg["foo"].AsString() + + cfg["id"] = cty.StringVal("ID") + cfg["foo"] = cty.StringVal("new") + + if foo == "b" { + readDataSourceB = true + } + + resp.State = cty.ObjectVal(cfg) + return resp + } + + m := testModuleInline(t, map[string]string{ + "main.tf": ` +module "a" { + source = "./mod_a" +} + +module "b" { + source = "./mod_b" + depends_on = [module.a] +}`, + "mod_a/main.tf": ` +data "test_data_source" "a" { + foo = "a" +}`, + "mod_b/main.tf": ` +data "test_data_source" "b" { + foo = "b" +}`, + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + assertNoErrors(t, diags) + + // The change to data source a should not prevent data source b from being + // read. + if !readDataSourceB { + t.Fatal("data source b was not read during plan") + } +} + +func TestContext2Plan_rpcDiagnostics(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_instance" "a" { +} +`, + }) + + p := testProvider("test") + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + resp := testDiffFn(req) + resp.Diagnostics = resp.Diagnostics.Append(tfdiags.SimpleWarning("don't frobble")) + return resp + } + + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_instance": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + }, + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + if len(diags) == 0 { + t.Fatal("expected warnings") + } + + for _, d := range diags { + des := d.Description().Summary + if !strings.Contains(des, "frobble") { + t.Fatalf(`expected frobble, got %q`, des) + } + } +} + +// ignore_changes needs to be re-applied to the planned value for provider +// using the LegacyTypeSystem +func TestContext2Plan_legacyProviderIgnoreChanges(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_instance" "a" { + lifecycle { + ignore_changes = [data] + } +} +`, + }) + + p := testProvider("test") + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + m := req.ProposedNewState.AsValueMap() + // this provider "hashes" the data attribute as bar + m["data"] = cty.StringVal("bar") + + resp.PlannedState = cty.ObjectVal(m) + resp.LegacyTypeSystem = true + return resp + } + + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_instance": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "data": {Type: cty.String, Optional: true}, + }, + }, + }, + }) + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_instance.a").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"a","data":"foo"}`), + Dependencies: []addrs.ConfigResource{}, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + for _, c := range plan.Changes.Resources { + if c.Action != plans.NoOp { + t.Fatalf("expected no changes, got %s for %q", c.Action, c.Addr) + } + } +} + +func TestContext2Plan_validateIgnoreAll(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_instance" "a" { + lifecycle { + ignore_changes = all + } +} +`, + }) + + p := testProvider("test") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_instance": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "data": {Type: cty.String, Optional: true}, + }, + }, + }, + }) + p.ValidateResourceConfigFn = func(req providers.ValidateResourceConfigRequest) providers.ValidateResourceConfigResponse { + var diags tfdiags.Diagnostics + if req.TypeName == "test_instance" { + if !req.Config.GetAttr("id").IsNull() { + diags = diags.Append(errors.New("id cannot be set in config")) + } + } + return providers.ValidateResourceConfigResponse{ + Diagnostics: diags, + } + } + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_instance.a").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"a","data":"foo"}`), + Dependencies: []addrs.ConfigResource{}, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + _, diags := ctx.Plan(m, state, DefaultPlanOpts) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } +} + +func TestContext2Plan_legacyProviderIgnoreAll(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_instance" "a" { + lifecycle { + ignore_changes = all + } + data = "foo" +} +`, + }) + + p := testProvider("test") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_instance": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "data": {Type: cty.String, Optional: true}, + }, + }, + }, + }) + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + plan := req.ProposedNewState.AsValueMap() + // Update both the computed id and the configured data. + // Legacy providers expect tofu to be able to ignore these. + + plan["id"] = cty.StringVal("updated") + plan["data"] = cty.StringVal("updated") + resp.PlannedState = cty.ObjectVal(plan) + resp.LegacyTypeSystem = true + return resp + } + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_instance.a").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"orig","data":"orig"}`), + Dependencies: []addrs.ConfigResource{}, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + for _, c := range plan.Changes.Resources { + if c.Action != plans.NoOp { + t.Fatalf("expected NoOp plan, got %s\n", c.Action) + } + } +} + +func TestContext2Plan_dataRemovalNoProvider(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_instance" "a" { +} +`, + }) + + p := testProvider("test") + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_instance.a").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"a","data":"foo"}`), + Dependencies: []addrs.ConfigResource{}, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + + // the provider for this data source is no longer in the config, but that + // should not matter for state removal. + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("data.test_data_source.d").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"d"}`), + Dependencies: []addrs.ConfigResource{}, + }, + mustProviderConfig(`provider["registry.opentofu.org/local/test"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + // We still need to be able to locate the provider to decode the + // state, since we do not know during init that this provider is + // only used for an orphaned data source. + addrs.NewProvider("registry.opentofu.org", "local", "test"): testProviderFuncFixed(p), + }, + }) + _, diags := ctx.Plan(m, state, DefaultPlanOpts) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } +} + +func TestContext2Plan_noSensitivityChange(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +variable "sensitive_var" { + default = "hello" + sensitive = true +} + +resource "test_resource" "foo" { + value = var.sensitive_var + sensitive_value = var.sensitive_var +}`, + }) + + p := testProvider("test") + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo", "value":"hello", "sensitive_value":"hello"}`), + AttrSensitivePaths: []cty.PathValueMarks{ + {Path: cty.Path{cty.GetAttrStep{Name: "value"}}, Marks: cty.NewValueMarks(marks.Sensitive)}, + {Path: cty.Path{cty.GetAttrStep{Name: "sensitive_value"}}, Marks: cty.NewValueMarks(marks.Sensitive)}, + }, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + plan, diags := ctx.Plan(m, state, SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables))) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + for _, c := range plan.Changes.Resources { + if c.Action != plans.NoOp { + t.Fatalf("expected no changes, got %s for %q", c.Action, c.Addr) + } + } +} + +func TestContext2Plan_variableCustomValidationsSensitive(t *testing.T) { + m := testModule(t, "validate-variable-custom-validations-child-sensitive") + + p := testProvider("test") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if !diags.HasErrors() { + t.Fatal("succeeded; want errors") + } + if got, want := diags.Err().Error(), `Invalid value for variable: Value must not be "nope".`; !strings.Contains(got, want) { + t.Fatalf("wrong error:\ngot: %s\nwant: message containing %q", got, want) + } +} + +func TestContext2Plan_nullOutputNoOp(t *testing.T) { + // this should always plan a NoOp change for the output + m := testModuleInline(t, map[string]string{ + "main.tf": ` +output "planned" { + value = false ? 1 : null +} +`, + }) + + ctx := testContext2(t, &ContextOpts{}) + state := states.BuildState(func(s *states.SyncState) { + r := s.Module(addrs.RootModuleInstance) + r.SetOutputValue("planned", cty.NullVal(cty.DynamicPseudoType), false) + }) + plan, diags := ctx.Plan(m, state, DefaultPlanOpts) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + for _, c := range plan.Changes.Outputs { + if c.Action != plans.NoOp { + t.Fatalf("expected no changes, got %s for %q", c.Action, c.Addr) + } + } +} + +func TestContext2Plan_createOutput(t *testing.T) { + // this should always plan a NoOp change for the output + m := testModuleInline(t, map[string]string{ + "main.tf": ` +output "planned" { + value = 1 +} +`, + }) + + ctx := testContext2(t, &ContextOpts{}) + plan, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + for _, c := range plan.Changes.Outputs { + if c.Action != plans.Create { + t.Fatalf("expected Create change, got %s for %q", c.Action, c.Addr) + } + } +} + +//////////////////////////////////////////////////////////////////////////////// +// NOTE: Due to the size of this file, new tests should be added to +// context_plan2_test.go. +//////////////////////////////////////////////////////////////////////////////// diff --git a/pkg/tofu/context_plugins.go b/pkg/tofu/context_plugins.go new file mode 100644 index 00000000000..4c3dd694a87 --- /dev/null +++ b/pkg/tofu/context_plugins.go @@ -0,0 +1,181 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "log" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/provisioners" +) + +// contextPlugins represents a library of available plugins (providers and +// provisioners) which we assume will all be used with the same +// tofu.Context, and thus it'll be safe to cache certain information +// about the providers for performance reasons. +type contextPlugins struct { + providerFactories map[addrs.Provider]providers.Factory + provisionerFactories map[string]provisioners.Factory +} + +func newContextPlugins(providerFactories map[addrs.Provider]providers.Factory, provisionerFactories map[string]provisioners.Factory) *contextPlugins { + return &contextPlugins{ + providerFactories: providerFactories, + provisionerFactories: provisionerFactories, + } +} + +func (cp *contextPlugins) HasProvider(addr addrs.Provider) bool { + _, ok := cp.providerFactories[addr] + return ok +} + +func (cp *contextPlugins) NewProviderInstance(addr addrs.Provider) (providers.Interface, error) { + f, ok := cp.providerFactories[addr] + if !ok { + return nil, fmt.Errorf("unavailable provider %q", addr.String()) + } + + return f() + +} + +func (cp *contextPlugins) HasProvisioner(typ string) bool { + _, ok := cp.provisionerFactories[typ] + return ok +} + +func (cp *contextPlugins) NewProvisionerInstance(typ string) (provisioners.Interface, error) { + f, ok := cp.provisionerFactories[typ] + if !ok { + return nil, fmt.Errorf("unavailable provisioner %q", typ) + } + + return f() +} + +// ProviderSchema uses a temporary instance of the provider with the given +// address to obtain the full schema for all aspects of that provider. +// +// ProviderSchema memoizes results by unique provider address, so it's fine +// to repeatedly call this method with the same address if various different +// parts of OpenTofu all need the same schema information. +func (cp *contextPlugins) ProviderSchema(addr addrs.Provider) (providers.ProviderSchema, error) { + // Check the global schema cache first. + // This cache is only written by the provider client, and transparently + // used by GetProviderSchema, but we check it here because at this point we + // may be able to avoid spinning up the provider instance at all. + // + // It's worth noting that ServerCapabilities.GetProviderSchemaOptional is ignored here. + // That is because we're checking *prior* to the provider's instantiation. + // GetProviderSchemaOptional only says that *if we instantiate a provider*, + // then we need to run the get schema call at least once. + // BUG This SHORT CIRCUITS the logic below and is not the only code which inserts provider schemas into the cache!! + schemas, ok := providers.SchemaCache.Get(addr) + if ok { + log.Printf("[TRACE] tofu.contextPlugins: Serving provider %q schema from global schema cache", addr) + return schemas, nil + } + + log.Printf("[TRACE] tofu.contextPlugins: Initializing provider %q to read its schema", addr) + provider, err := cp.NewProviderInstance(addr) + if err != nil { + return schemas, fmt.Errorf("failed to instantiate provider %q to obtain schema: %w", addr, err) + } + defer provider.Close() + + resp := provider.GetProviderSchema() + if resp.Diagnostics.HasErrors() { + return resp, fmt.Errorf("failed to retrieve schema from provider %q: %w", addr, resp.Diagnostics.Err()) + } + + if resp.Provider.Version < 0 { + // We're not using the version numbers here yet, but we'll check + // for validity anyway in case we start using them in future. + return resp, fmt.Errorf("provider %s has invalid negative schema version for its configuration blocks,which is a bug in the provider ", addr) + } + + for t, r := range resp.ResourceTypes { + if err := r.Block.InternalValidate(); err != nil { + return resp, fmt.Errorf("provider %s has invalid schema for managed resource type %q, which is a bug in the provider: %w", addr, t, err) + } + if r.Version < 0 { + return resp, fmt.Errorf("provider %s has invalid negative schema version for managed resource type %q, which is a bug in the provider", addr, t) + } + } + + for t, d := range resp.DataSources { + if err := d.Block.InternalValidate(); err != nil { + return resp, fmt.Errorf("provider %s has invalid schema for data resource type %q, which is a bug in the provider: %w", addr, t, err) + } + if d.Version < 0 { + // We're not using the version numbers here yet, but we'll check + // for validity anyway in case we start using them in future. + return resp, fmt.Errorf("provider %s has invalid negative schema version for data resource type %q, which is a bug in the provider", addr, t) + } + } + + return resp, nil +} + +// ProviderConfigSchema is a helper wrapper around ProviderSchema which first +// reads the full schema of the given provider and then extracts just the +// provider's configuration schema, which defines what's expected in a +// "provider" block in the configuration when configuring this provider. +func (cp *contextPlugins) ProviderConfigSchema(providerAddr addrs.Provider) (*configschema.Block, error) { + providerSchema, err := cp.ProviderSchema(providerAddr) + if err != nil { + return nil, err + } + + return providerSchema.Provider.Block, nil +} + +// ResourceTypeSchema is a helper wrapper around ProviderSchema which first +// reads the schema of the given provider and then tries to find the schema +// for the resource type of the given resource mode in that provider. +// +// ResourceTypeSchema will return an error if the provider schema lookup +// fails, but will return nil if the provider schema lookup succeeds but then +// the provider doesn't have a resource of the requested type. +// +// Managed resource types have versioned schemas, so the second return value +// is the current schema version number for the requested resource. The version +// is irrelevant for other resource modes. +func (cp *contextPlugins) ResourceTypeSchema(providerAddr addrs.Provider, resourceMode addrs.ResourceMode, resourceType string) (*configschema.Block, uint64, error) { + providerSchema, err := cp.ProviderSchema(providerAddr) + if err != nil { + return nil, 0, err + } + + schema, version := providerSchema.SchemaForResourceType(resourceMode, resourceType) + return schema, version, nil +} + +// ProvisionerSchema uses a temporary instance of the provisioner with the +// given type name to obtain the schema for that provisioner's configuration. +// +// ProvisionerSchema memoizes results by provisioner type name, so it's fine +// to repeatedly call this method with the same name if various different +// parts of OpenTofu all need the same schema information. +func (cp *contextPlugins) ProvisionerSchema(typ string) (*configschema.Block, error) { + log.Printf("[TRACE] tofu.contextPlugins: Initializing provisioner %q to read its schema", typ) + provisioner, err := cp.NewProvisionerInstance(typ) + if err != nil { + return nil, fmt.Errorf("failed to instantiate provisioner %q to obtain schema: %w", typ, err) + } + defer provisioner.Close() + + resp := provisioner.GetSchema() + if resp.Diagnostics.HasErrors() { + return nil, fmt.Errorf("failed to retrieve schema from provisioner %q: %w", typ, resp.Diagnostics.Err()) + } + + return resp.Provisioner, nil +} diff --git a/pkg/tofu/context_plugins_test.go b/pkg/tofu/context_plugins_test.go new file mode 100644 index 00000000000..a8e07168ba7 --- /dev/null +++ b/pkg/tofu/context_plugins_test.go @@ -0,0 +1,87 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/provisioners" +) + +// simpleMockPluginLibrary returns a plugin library pre-configured with +// one provider and one provisioner, both called "test". +// +// The provider is built with simpleMockProvider and the provisioner with +// simpleMockProvisioner, and all schemas used in both are as built by +// function simpleTestSchema. +// +// Each call to this function produces an entirely-separate set of objects, +// so the caller can feel free to modify the returned value to further +// customize the mocks contained within. +func simpleMockPluginLibrary() *contextPlugins { + // We create these out here, rather than in the factory functions below, + // because we want each call to the factory to return the _same_ instance, + // so that test code can customize it before passing this component + // factory into real code under test. + provider := simpleMockProvider() + provisioner := simpleMockProvisioner() + ret := &contextPlugins{ + providerFactories: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): func() (providers.Interface, error) { + return provider, nil + }, + }, + provisionerFactories: map[string]provisioners.Factory{ + "test": func() (provisioners.Interface, error) { + return provisioner, nil + }, + }, + } + return ret +} + +// simpleTestSchema returns a block schema that contains a few optional +// attributes for use in tests. +// +// The returned schema contains the following optional attributes: +// +// - test_string, of type string +// - test_number, of type number +// - test_bool, of type bool +// - test_list, of type list(string) +// - test_map, of type map(string) +// +// Each call to this function produces an entirely new schema instance, so +// callers can feel free to modify it once returned. +func simpleTestSchema() *configschema.Block { + return &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "test_string": { + Type: cty.String, + Optional: true, + }, + "test_number": { + Type: cty.Number, + Optional: true, + }, + "test_bool": { + Type: cty.Bool, + Optional: true, + }, + "test_list": { + Type: cty.List(cty.String), + Optional: true, + }, + "test_map": { + Type: cty.Map(cty.String), + Optional: true, + }, + }, + } +} diff --git a/pkg/tofu/context_refresh.go b/pkg/tofu/context_refresh.go new file mode 100644 index 00000000000..ecf2e106288 --- /dev/null +++ b/pkg/tofu/context_refresh.go @@ -0,0 +1,42 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "log" + + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// Refresh is a vestigial operation that is equivalent to call to Plan and +// then taking the prior state of the resulting plan. +// +// We retain this only as a measure of semi-backward-compatibility for +// automation relying on the "tofu refresh" subcommand. The modern way +// to get this effect is to create and then apply a plan in the refresh-only +// mode. +func (c *Context) Refresh(config *configs.Config, prevRunState *states.State, opts *PlanOpts) (*states.State, tfdiags.Diagnostics) { + if opts == nil { + // This fallback is only here for tests, not for real code. + opts = &PlanOpts{ + Mode: plans.NormalMode, + } + } + if opts.Mode != plans.NormalMode { + panic("can only Refresh in the normal planning mode") + } + + log.Printf("[DEBUG] Refresh is really just plan now, so creating a %s plan", opts.Mode) + p, diags := c.Plan(config, prevRunState, opts) + if diags.HasErrors() { + return nil, diags + } + + return p.PriorState, diags +} diff --git a/pkg/tofu/context_refresh_test.go b/pkg/tofu/context_refresh_test.go new file mode 100644 index 00000000000..1a83cc779f2 --- /dev/null +++ b/pkg/tofu/context_refresh_test.go @@ -0,0 +1,1690 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "reflect" + "sort" + "strings" + "sync" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/configs/hcl2shim" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" +) + +func TestContext2Refresh(t *testing.T) { + p := testProvider("aws") + m := testModule(t, "refresh-basic") + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.web").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo","foo":"bar"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + readState, err := hcl2shim.HCL2ValueFromFlatmap(map[string]string{"id": "foo", "foo": "baz"}, ty) + if err != nil { + t.Fatal(err) + } + + p.ReadResourceResponse = &providers.ReadResourceResponse{ + NewState: readState, + } + + s, diags := ctx.Refresh(m, state, &PlanOpts{Mode: plans.NormalMode}) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + if !p.ReadResourceCalled { + t.Fatal("ReadResource should be called") + } + + mod := s.RootModule() + fromState, err := mod.Resources["aws_instance.web"].Instances[addrs.NoKey].Current.Decode(ty) + if err != nil { + t.Fatal(err) + } + + newState, err := schema.CoerceValue(fromState.Value) + if err != nil { + t.Fatal(err) + } + + if !cmp.Equal(readState, newState, valueComparer) { + t.Fatal(cmp.Diff(readState, newState, valueComparer, equateEmpty)) + } +} + +func TestContext2Refresh_dynamicAttr(t *testing.T) { + m := testModule(t, "refresh-dynamic") + + startingState := states.BuildState(func(ss *states.SyncState) { + ss.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"dynamic":{"type":"string","value":"hello"}}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + + readStateVal := cty.ObjectVal(map[string]cty.Value{ + "dynamic": cty.EmptyTupleVal, + }) + + p := testProvider("test") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_instance": { + Attributes: map[string]*configschema.Attribute{ + "dynamic": {Type: cty.DynamicPseudoType, Optional: true}, + }, + }, + }, + }) + p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { + return providers.ReadResourceResponse{ + NewState: readStateVal, + } + } + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + resp.PlannedState = req.ProposedNewState + return resp + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + schema := p.GetProviderSchemaResponse.ResourceTypes["test_instance"].Block + ty := schema.ImpliedType() + + s, diags := ctx.Refresh(m, startingState, &PlanOpts{Mode: plans.NormalMode}) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + if !p.ReadResourceCalled { + t.Fatal("ReadResource should be called") + } + + mod := s.RootModule() + newState, err := mod.Resources["test_instance.foo"].Instances[addrs.NoKey].Current.Decode(ty) + if err != nil { + t.Fatal(err) + } + + if !cmp.Equal(readStateVal, newState.Value, valueComparer) { + t.Error(cmp.Diff(newState.Value, readStateVal, valueComparer, equateEmpty)) + } +} + +func TestContext2Refresh_dataComputedModuleVar(t *testing.T) { + p := testProvider("aws") + m := testModule(t, "refresh-data-module-var") + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + obj := req.ProposedNewState.AsValueMap() + obj["id"] = cty.UnknownVal(cty.String) + resp.PlannedState = cty.ObjectVal(obj) + return resp + } + p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { + resp.State = req.Config + return resp + } + + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + Provider: &configschema.Block{}, + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.String, + Optional: true, + }, + "id": { + Type: cty.String, + Computed: true, + }, + }, + }, + }, + DataSources: map[string]*configschema.Block{ + "aws_data_source": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Optional: true, + }, + "output": { + Type: cty.String, + Computed: true, + }, + }, + }, + }, + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{Mode: plans.RefreshOnlyMode}) + if diags.HasErrors() { + t.Fatalf("refresh errors: %s", diags.Err()) + } + + checkStateString(t, plan.PriorState, ` + +`) +} + +func TestContext2Refresh_targeted(t *testing.T) { + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + Provider: &configschema.Block{}, + ResourceTypes: map[string]*configschema.Block{ + "aws_elb": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "instances": { + Type: cty.Set(cty.String), + Optional: true, + }, + }, + }, + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "vpc_id": { + Type: cty.String, + Optional: true, + }, + }, + }, + "aws_vpc": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + }, + }, + }, + }) + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + testSetResourceInstanceCurrent(root, "aws_vpc.metoo", `{"id":"vpc-abc123"}`, `provider["registry.opentofu.org/hashicorp/aws"]`) + testSetResourceInstanceCurrent(root, "aws_instance.notme", `{"id":"i-bcd345"}`, `provider["registry.opentofu.org/hashicorp/aws"]`) + testSetResourceInstanceCurrent(root, "aws_instance.me", `{"id":"i-abc123"}`, `provider["registry.opentofu.org/hashicorp/aws"]`) + testSetResourceInstanceCurrent(root, "aws_elb.meneither", `{"id":"lb-abc123"}`, `provider["registry.opentofu.org/hashicorp/aws"]`) + + m := testModule(t, "refresh-targeted") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + refreshedResources := make([]string, 0, 2) + p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { + refreshedResources = append(refreshedResources, req.PriorState.GetAttr("id").AsString()) + return providers.ReadResourceResponse{ + NewState: req.PriorState, + } + } + + _, diags := ctx.Refresh(m, state, &PlanOpts{ + Mode: plans.NormalMode, + Targets: []addrs.Targetable{ + addrs.RootModuleInstance.Resource( + addrs.ManagedResourceMode, "aws_instance", "me", + ), + }, + }) + if diags.HasErrors() { + t.Fatalf("refresh errors: %s", diags.Err()) + } + + expected := []string{"vpc-abc123", "i-abc123"} + if !reflect.DeepEqual(refreshedResources, expected) { + t.Fatalf("expected: %#v, got: %#v", expected, refreshedResources) + } +} + +func TestContext2Refresh_targetedCount(t *testing.T) { + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + Provider: &configschema.Block{}, + ResourceTypes: map[string]*configschema.Block{ + "aws_elb": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "instances": { + Type: cty.Set(cty.String), + Optional: true, + }, + }, + }, + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "vpc_id": { + Type: cty.String, + Optional: true, + }, + }, + }, + "aws_vpc": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + }, + }, + }, + }) + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + testSetResourceInstanceCurrent(root, "aws_vpc.metoo", `{"id":"vpc-abc123"}`, `provider["registry.opentofu.org/hashicorp/aws"]`) + testSetResourceInstanceCurrent(root, "aws_instance.notme", `{"id":"i-bcd345"}`, `provider["registry.opentofu.org/hashicorp/aws"]`) + testSetResourceInstanceCurrent(root, "aws_instance.me[0]", `{"id":"i-abc123"}`, `provider["registry.opentofu.org/hashicorp/aws"]`) + testSetResourceInstanceCurrent(root, "aws_instance.me[1]", `{"id":"i-cde567"}`, `provider["registry.opentofu.org/hashicorp/aws"]`) + testSetResourceInstanceCurrent(root, "aws_instance.me[2]", `{"id":"i-cde789"}`, `provider["registry.opentofu.org/hashicorp/aws"]`) + testSetResourceInstanceCurrent(root, "aws_elb.meneither", `{"id":"lb-abc123"}`, `provider["registry.opentofu.org/hashicorp/aws"]`) + + m := testModule(t, "refresh-targeted-count") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + refreshedResources := make([]string, 0, 2) + p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { + refreshedResources = append(refreshedResources, req.PriorState.GetAttr("id").AsString()) + return providers.ReadResourceResponse{ + NewState: req.PriorState, + } + } + + _, diags := ctx.Refresh(m, state, &PlanOpts{ + Mode: plans.NormalMode, + Targets: []addrs.Targetable{ + addrs.RootModuleInstance.Resource( + addrs.ManagedResourceMode, "aws_instance", "me", + ), + }, + }) + if diags.HasErrors() { + t.Fatalf("refresh errors: %s", diags.Err()) + } + + // Target didn't specify index, so we should get all our instances + expected := []string{ + "vpc-abc123", + "i-abc123", + "i-cde567", + "i-cde789", + } + sort.Strings(expected) + sort.Strings(refreshedResources) + if !reflect.DeepEqual(refreshedResources, expected) { + t.Fatalf("wrong result\ngot: %#v\nwant: %#v", refreshedResources, expected) + } +} + +func TestContext2Refresh_targetedCountIndex(t *testing.T) { + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + Provider: &configschema.Block{}, + ResourceTypes: map[string]*configschema.Block{ + "aws_elb": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "instances": { + Type: cty.Set(cty.String), + Optional: true, + }, + }, + }, + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "vpc_id": { + Type: cty.String, + Optional: true, + }, + }, + }, + "aws_vpc": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + }, + }, + }, + }) + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + testSetResourceInstanceCurrent(root, "aws_vpc.metoo", `{"id":"vpc-abc123"}`, `provider["registry.opentofu.org/hashicorp/aws"]`) + testSetResourceInstanceCurrent(root, "aws_instance.notme", `{"id":"i-bcd345"}`, `provider["registry.opentofu.org/hashicorp/aws"]`) + testSetResourceInstanceCurrent(root, "aws_instance.me[0]", `{"id":"i-abc123"}`, `provider["registry.opentofu.org/hashicorp/aws"]`) + testSetResourceInstanceCurrent(root, "aws_instance.me[1]", `{"id":"i-cde567"}`, `provider["registry.opentofu.org/hashicorp/aws"]`) + testSetResourceInstanceCurrent(root, "aws_instance.me[2]", `{"id":"i-cde789"}`, `provider["registry.opentofu.org/hashicorp/aws"]`) + testSetResourceInstanceCurrent(root, "aws_elb.meneither", `{"id":"lb-abc123"}`, `provider["registry.opentofu.org/hashicorp/aws"]`) + + m := testModule(t, "refresh-targeted-count") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + refreshedResources := make([]string, 0, 2) + p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { + refreshedResources = append(refreshedResources, req.PriorState.GetAttr("id").AsString()) + return providers.ReadResourceResponse{ + NewState: req.PriorState, + } + } + + _, diags := ctx.Refresh(m, state, &PlanOpts{ + Mode: plans.NormalMode, + Targets: []addrs.Targetable{ + addrs.RootModuleInstance.ResourceInstance( + addrs.ManagedResourceMode, "aws_instance", "me", addrs.IntKey(0), + ), + }, + }) + if diags.HasErrors() { + t.Fatalf("refresh errors: %s", diags.Err()) + } + + expected := []string{"vpc-abc123", "i-abc123"} + if !reflect.DeepEqual(refreshedResources, expected) { + t.Fatalf("wrong result\ngot: %#v\nwant: %#v", refreshedResources, expected) + } +} + +func TestContext2Refresh_moduleComputedVar(t *testing.T) { + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + Provider: &configschema.Block{}, + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "value": { + Type: cty.String, + Optional: true, + }, + }, + }, + }, + }) + + m := testModule(t, "refresh-module-computed-var") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + // This was failing (see GH-2188) at some point, so this test just + // verifies that the failure goes away. + if _, diags := ctx.Refresh(m, states.NewState(), &PlanOpts{Mode: plans.NormalMode}); diags.HasErrors() { + t.Fatalf("refresh errs: %s", diags.Err()) + } +} + +func TestContext2Refresh_delete(t *testing.T) { + p := testProvider("aws") + m := testModule(t, "refresh-basic") + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + testSetResourceInstanceCurrent(root, "aws_instance.web", `{"id":"foo"}`, `provider["registry.opentofu.org/hashicorp/aws"]`) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + p.ReadResourceResponse = &providers.ReadResourceResponse{ + NewState: cty.NullVal(p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block.ImpliedType()), + } + + s, diags := ctx.Refresh(m, state, &PlanOpts{Mode: plans.NormalMode}) + if diags.HasErrors() { + t.Fatalf("refresh errors: %s", diags.Err()) + } + + mod := s.RootModule() + if len(mod.Resources) > 0 { + t.Fatal("resources should be empty") + } +} + +func TestContext2Refresh_ignoreUncreated(t *testing.T) { + p := testProvider("aws") + m := testModule(t, "refresh-basic") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + p.ReadResourceResponse = &providers.ReadResourceResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + }), + } + + _, diags := ctx.Refresh(m, states.NewState(), &PlanOpts{Mode: plans.NormalMode}) + if diags.HasErrors() { + t.Fatalf("refresh errors: %s", diags.Err()) + } + if p.ReadResourceCalled { + t.Fatal("refresh should not be called") + } +} + +func TestContext2Refresh_hook(t *testing.T) { + h := new(MockHook) + p := testProvider("aws") + m := testModule(t, "refresh-basic") + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + testSetResourceInstanceCurrent(root, "aws_instance.web", `{"id":"foo"}`, `provider["registry.opentofu.org/hashicorp/aws"]`) + + ctx := testContext2(t, &ContextOpts{ + Hooks: []Hook{h}, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + if _, diags := ctx.Refresh(m, state, &PlanOpts{Mode: plans.NormalMode}); diags.HasErrors() { + t.Fatalf("refresh errs: %s", diags.Err()) + } + if !h.PreRefreshCalled { + t.Fatal("should be called") + } + if !h.PostRefreshCalled { + t.Fatal("should be called") + } +} + +func TestContext2Refresh_modules(t *testing.T) { + p := testProvider("aws") + m := testModule(t, "refresh-modules") + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + testSetResourceInstanceTainted(root, "aws_instance.web", `{"id":"bar"}`, `provider["registry.opentofu.org/hashicorp/aws"]`) + child := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) + testSetResourceInstanceCurrent(child, "aws_instance.web", `{"id":"baz"}`, `provider["registry.opentofu.org/hashicorp/aws"]`) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { + if !req.PriorState.GetAttr("id").RawEquals(cty.StringVal("baz")) { + return providers.ReadResourceResponse{ + NewState: req.PriorState, + } + } + + new, _ := cty.Transform(req.PriorState, func(path cty.Path, v cty.Value) (cty.Value, error) { + if len(path) == 1 && path[0].(cty.GetAttrStep).Name == "id" { + return cty.StringVal("new"), nil + } + return v, nil + }) + return providers.ReadResourceResponse{ + NewState: new, + } + } + + s, diags := ctx.Refresh(m, state, &PlanOpts{Mode: plans.NormalMode}) + if diags.HasErrors() { + t.Fatalf("refresh errors: %s", diags.Err()) + } + + actual := strings.TrimSpace(s.String()) + expected := strings.TrimSpace(testContextRefreshModuleStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +func TestContext2Refresh_moduleInputComputedOutput(t *testing.T) { + m := testModule(t, "refresh-module-input-computed-output") + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + Provider: &configschema.Block{}, + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.String, + Optional: true, + Computed: true, + }, + "compute": { + Type: cty.String, + Optional: true, + }, + }, + }, + }, + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + if _, diags := ctx.Refresh(m, states.NewState(), &PlanOpts{Mode: plans.NormalMode}); diags.HasErrors() { + t.Fatalf("refresh errs: %s", diags.Err()) + } +} + +func TestContext2Refresh_moduleVarModule(t *testing.T) { + m := testModule(t, "refresh-module-var-module") + p := testProvider("aws") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + if _, diags := ctx.Refresh(m, states.NewState(), &PlanOpts{Mode: plans.NormalMode}); diags.HasErrors() { + t.Fatalf("refresh errs: %s", diags.Err()) + } +} + +// GH-70 +func TestContext2Refresh_noState(t *testing.T) { + p := testProvider("aws") + m := testModule(t, "refresh-no-state") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + p.ReadResourceResponse = &providers.ReadResourceResponse{ + NewState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + }), + } + + if _, diags := ctx.Refresh(m, states.NewState(), &PlanOpts{Mode: plans.NormalMode}); diags.HasErrors() { + t.Fatalf("refresh errs: %s", diags.Err()) + } +} + +func TestContext2Refresh_output(t *testing.T) { + p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + Provider: &configschema.Block{}, + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "foo": { + Type: cty.String, + Optional: true, + Computed: true, + }, + }, + }, + }, + }) + + m := testModule(t, "refresh-output") + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + testSetResourceInstanceCurrent(root, "aws_instance.web", `{"id":"foo","foo":"bar"}`, `provider["registry.opentofu.org/hashicorp/aws"]`) + root.SetOutputValue("foo", cty.StringVal("foo"), false) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + s, diags := ctx.Refresh(m, state, &PlanOpts{Mode: plans.NormalMode}) + if diags.HasErrors() { + t.Fatalf("refresh errors: %s", diags.Err()) + } + + actual := strings.TrimSpace(s.String()) + expected := strings.TrimSpace(testContextRefreshOutputStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%q\n\nwant:\n%q", actual, expected) + } +} + +func TestContext2Refresh_outputPartial(t *testing.T) { + p := testProvider("aws") + m := testModule(t, "refresh-output-partial") + + // Refresh creates a partial plan for any instances that don't have + // remote objects yet, to get stub values for interpolation. Therefore + // we need to make DiffFn available to let that complete. + + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + Provider: &configschema.Block{}, + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.String, + Computed: true, + }, + }, + }, + }, + }) + + p.ReadResourceResponse = &providers.ReadResourceResponse{ + NewState: cty.NullVal(p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block.ImpliedType()), + } + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + testSetResourceInstanceCurrent(root, "aws_instance.foo", `{}`, `provider["registry.opentofu.org/hashicorp/aws"]`) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + s, diags := ctx.Refresh(m, state, &PlanOpts{Mode: plans.NormalMode}) + if diags.HasErrors() { + t.Fatalf("refresh errors: %s", diags.Err()) + } + + actual := strings.TrimSpace(s.String()) + expected := strings.TrimSpace(testContextRefreshOutputPartialStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +func TestContext2Refresh_stateBasic(t *testing.T) { + p := testProvider("aws") + m := testModule(t, "refresh-basic") + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + testSetResourceInstanceCurrent(root, "aws_instance.web", `{"id":"bar"}`, `provider["registry.opentofu.org/hashicorp/aws"]`) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + schema := p.GetProviderSchemaResponse.ResourceTypes["aws_instance"].Block + ty := schema.ImpliedType() + + readStateVal, err := schema.CoerceValue(cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + })) + if err != nil { + t.Fatal(err) + } + + p.ReadResourceResponse = &providers.ReadResourceResponse{ + NewState: readStateVal, + } + + s, diags := ctx.Refresh(m, state, &PlanOpts{Mode: plans.NormalMode}) + if diags.HasErrors() { + t.Fatalf("refresh errors: %s", diags.Err()) + } + + if !p.ReadResourceCalled { + t.Fatal("read resource should be called") + } + + mod := s.RootModule() + newState, err := mod.Resources["aws_instance.web"].Instances[addrs.NoKey].Current.Decode(ty) + if err != nil { + t.Fatal(err) + } + + if !cmp.Equal(readStateVal, newState.Value, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(readStateVal, newState.Value, valueComparer, equateEmpty)) + } +} + +func TestContext2Refresh_dataCount(t *testing.T) { + p := testProvider("test") + m := testModule(t, "refresh-data-count") + + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + m := req.ProposedNewState.AsValueMap() + m["things"] = cty.ListVal([]cty.Value{cty.StringVal("foo")}) + resp.PlannedState = cty.ObjectVal(m) + return resp + } + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "things": {Type: cty.List(cty.String), Computed: true}, + }, + }, + }, + DataSources: map[string]*configschema.Block{ + "test": {}, + }, + }) + + p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + return providers.ReadDataSourceResponse{ + State: req.Config, + } + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + s, diags := ctx.Refresh(m, states.NewState(), &PlanOpts{Mode: plans.NormalMode}) + + if diags.HasErrors() { + t.Fatalf("refresh errors: %s", diags.Err()) + } + + checkStateString(t, s, ``) +} + +func TestContext2Refresh_dataState(t *testing.T) { + m := testModule(t, "refresh-data-resource-basic") + state := states.NewState() + schema := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "inputs": { + Type: cty.Map(cty.String), + Optional: true, + }, + }, + } + + p := testProvider("null") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + Provider: &configschema.Block{}, + DataSources: map[string]*configschema.Block{ + "null_data_source": schema, + }, + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("null"): testProviderFuncFixed(p), + }, + }) + + var readStateVal cty.Value + + p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + m := req.Config.AsValueMap() + readStateVal = cty.ObjectVal(m) + + return providers.ReadDataSourceResponse{ + State: readStateVal, + } + } + + s, diags := ctx.Refresh(m, state, &PlanOpts{Mode: plans.NormalMode}) + if diags.HasErrors() { + t.Fatalf("refresh errors: %s", diags.Err()) + } + + if !p.ReadDataSourceCalled { + t.Fatal("ReadDataSource should have been called") + } + + mod := s.RootModule() + + newState, err := mod.Resources["data.null_data_source.testing"].Instances[addrs.NoKey].Current.Decode(schema.ImpliedType()) + if err != nil { + t.Fatal(err) + } + + if !cmp.Equal(readStateVal, newState.Value, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(readStateVal, newState.Value, valueComparer, equateEmpty)) + } +} + +func TestContext2Refresh_dataStateRefData(t *testing.T) { + p := testProvider("null") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + Provider: &configschema.Block{}, + DataSources: map[string]*configschema.Block{ + "null_data_source": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "foo": { + Type: cty.String, + Optional: true, + }, + "bar": { + Type: cty.String, + Optional: true, + }, + }, + }, + }, + }) + + m := testModule(t, "refresh-data-ref-data") + state := states.NewState() + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("null"): testProviderFuncFixed(p), + }, + }) + + p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + // add the required id + m := req.Config.AsValueMap() + m["id"] = cty.StringVal("foo") + + return providers.ReadDataSourceResponse{ + State: cty.ObjectVal(m), + } + } + + s, diags := ctx.Refresh(m, state, &PlanOpts{Mode: plans.NormalMode}) + if diags.HasErrors() { + t.Fatalf("refresh errors: %s", diags.Err()) + } + + actual := strings.TrimSpace(s.String()) + expected := strings.TrimSpace(testTofuRefreshDataRefDataStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +func TestContext2Refresh_tainted(t *testing.T) { + p := testProvider("aws") + m := testModule(t, "refresh-basic") + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + testSetResourceInstanceTainted(root, "aws_instance.web", `{"id":"bar"}`, `provider["registry.opentofu.org/hashicorp/aws"]`) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { + // add the required id + m := req.PriorState.AsValueMap() + m["id"] = cty.StringVal("foo") + + return providers.ReadResourceResponse{ + NewState: cty.ObjectVal(m), + } + } + + s, diags := ctx.Refresh(m, state, &PlanOpts{Mode: plans.NormalMode}) + if diags.HasErrors() { + t.Fatalf("refresh errors: %s", diags.Err()) + } + if !p.ReadResourceCalled { + t.Fatal("ReadResource was not called; should have been") + } + + actual := strings.TrimSpace(s.String()) + expected := strings.TrimSpace(testContextRefreshTaintedStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +// Doing a Refresh (or any operation really, but Refresh usually +// happens first) with a config with an unknown provider should result in +// an error. The key bug this found was that this wasn't happening if +// Providers was _empty_. +func TestContext2Refresh_unknownProvider(t *testing.T) { + m := testModule(t, "refresh-unknown-provider") + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + testSetResourceInstanceCurrent(root, "aws_instance.web", `{"id":"foo"}`, `provider["registry.opentofu.org/hashicorp/aws"]`) + + c, diags := NewContext(&ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{}, + }) + assertNoDiagnostics(t, diags) + + _, diags = c.Refresh(m, states.NewState(), &PlanOpts{Mode: plans.NormalMode}) + if !diags.HasErrors() { + t.Fatal("successfully refreshed; want error") + } + + if got, want := diags.Err().Error(), "Missing required provider"; !strings.Contains(got, want) { + t.Errorf("missing expected error\nwant substring: %s\ngot:\n%s", want, got) + } +} + +func TestContext2Refresh_vars(t *testing.T) { + p := testProvider("aws") + + schema := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "ami": { + Type: cty.String, + Optional: true, + }, + "id": { + Type: cty.String, + Computed: true, + }, + }, + } + + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + Provider: &configschema.Block{}, + ResourceTypes: map[string]*configschema.Block{"aws_instance": schema}, + }) + + m := testModule(t, "refresh-vars") + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + testSetResourceInstanceCurrent(root, "aws_instance.web", `{"id":"foo"}`, `provider["registry.opentofu.org/hashicorp/aws"]`) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + readStateVal, err := schema.CoerceValue(cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + })) + if err != nil { + t.Fatal(err) + } + + p.ReadResourceResponse = &providers.ReadResourceResponse{ + NewState: readStateVal, + } + + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + + s, diags := ctx.Refresh(m, state, &PlanOpts{Mode: plans.NormalMode}) + if diags.HasErrors() { + t.Fatalf("refresh errors: %s", diags.Err()) + } + + if !p.ReadResourceCalled { + t.Fatal("read resource should be called") + } + + mod := s.RootModule() + + newState, err := mod.Resources["aws_instance.web"].Instances[addrs.NoKey].Current.Decode(schema.ImpliedType()) + if err != nil { + t.Fatal(err) + } + + if !cmp.Equal(readStateVal, newState.Value, valueComparer, equateEmpty) { + t.Fatal(cmp.Diff(readStateVal, newState.Value, valueComparer, equateEmpty)) + } + + for _, r := range mod.Resources { + if r.Addr.Resource.Type == "" { + t.Fatalf("no type: %#v", r) + } + } +} + +func TestContext2Refresh_orphanModule(t *testing.T) { + p := testProvider("aws") + m := testModule(t, "refresh-module-orphan") + + // Create a custom refresh function to track the order they were visited + var order []string + var orderLock sync.Mutex + p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { + orderLock.Lock() + defer orderLock.Unlock() + + order = append(order, req.PriorState.GetAttr("id").AsString()) + return providers.ReadResourceResponse{ + NewState: req.PriorState, + } + } + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"i-abc123"}`), + Dependencies: []addrs.ConfigResource{ + {Module: addrs.Module{"module.child"}}, + {Module: addrs.Module{"module.child"}}, + }, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + child := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) + child.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.bar").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"i-bcd23"}`), + Dependencies: []addrs.ConfigResource{{Module: addrs.Module{"module.grandchild"}}}, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + grandchild := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey).Child("grandchild", addrs.NoKey)) + testSetResourceInstanceCurrent(grandchild, "aws_instance.baz", `{"id":"i-cde345"}`, `provider["registry.opentofu.org/hashicorp/aws"]`) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + testCheckDeadlock(t, func() { + _, err := ctx.Refresh(m, state, &PlanOpts{Mode: plans.NormalMode}) + if err != nil { + t.Fatalf("err: %s", err.Err()) + } + + // TODO: handle order properly for orphaned modules / resources + // expected := []string{"i-abc123", "i-bcd234", "i-cde345"} + // if !reflect.DeepEqual(order, expected) { + // t.Fatalf("expected: %#v, got: %#v", expected, order) + // } + }) +} + +func TestContext2Validate(t *testing.T) { + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + Provider: &configschema.Block{}, + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.String, + Optional: true, + }, + "num": { + Type: cty.String, + Optional: true, + }, + }, + }, + }, + }) + + m := testModule(t, "validate-good") + c := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + diags := c.Validate(m) + if len(diags) != 0 { + t.Fatalf("unexpected error: %#v", diags.ErrWithWarnings()) + } +} + +func TestContext2Refresh_updateProviderInState(t *testing.T) { + m := testModule(t, "update-resource-provider") + p := testProvider("aws") + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + testSetResourceInstanceCurrent(root, "aws_instance.bar", `{"id":"foo"}`, `provider["registry.opentofu.org/hashicorp/aws"].baz`) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + expected := strings.TrimSpace(` +aws_instance.bar: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"].foo`) + + s, diags := ctx.Refresh(m, state, &PlanOpts{Mode: plans.NormalMode}) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + actual := s.String() + if actual != expected { + t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual) + } +} + +func TestContext2Refresh_schemaUpgradeFlatmap(t *testing.T) { + m := testModule(t, "refresh-schema-upgrade") + p := testProvider("test") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_thing": { + Attributes: map[string]*configschema.Attribute{ + "name": { // imagining we renamed this from "id" + Type: cty.String, + Optional: true, + }, + }, + }, + }, + ResourceTypeSchemaVersions: map[string]uint64{ + "test_thing": 5, + }, + }) + p.UpgradeResourceStateResponse = &providers.UpgradeResourceStateResponse{ + UpgradedState: cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("foo"), + }), + } + + s := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "bar", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + SchemaVersion: 3, + AttrsFlat: map[string]string{ + "id": "foo", + }, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + state, diags := ctx.Refresh(m, s, &PlanOpts{Mode: plans.NormalMode}) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + { + got := p.UpgradeResourceStateRequest + want := providers.UpgradeResourceStateRequest{ + TypeName: "test_thing", + Version: 3, + RawStateFlatmap: map[string]string{ + "id": "foo", + }, + } + if !cmp.Equal(got, want) { + t.Errorf("wrong upgrade request\n%s", cmp.Diff(want, got)) + } + } + + { + got := state.String() + want := strings.TrimSpace(` +test_thing.bar: + ID = + provider = provider["registry.opentofu.org/hashicorp/test"] + name = foo +`) + if got != want { + t.Fatalf("wrong result state\ngot:\n%s\n\nwant:\n%s", got, want) + } + } +} + +func TestContext2Refresh_schemaUpgradeJSON(t *testing.T) { + m := testModule(t, "refresh-schema-upgrade") + p := testProvider("test") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_thing": { + Attributes: map[string]*configschema.Attribute{ + "name": { // imagining we renamed this from "id" + Type: cty.String, + Optional: true, + }, + }, + }, + }, + ResourceTypeSchemaVersions: map[string]uint64{ + "test_thing": 5, + }, + }) + p.UpgradeResourceStateResponse = &providers.UpgradeResourceStateResponse{ + UpgradedState: cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("foo"), + }), + } + + s := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "bar", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + SchemaVersion: 3, + AttrsJSON: []byte(`{"id":"foo"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + state, diags := ctx.Refresh(m, s, &PlanOpts{Mode: plans.NormalMode}) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + { + got := p.UpgradeResourceStateRequest + want := providers.UpgradeResourceStateRequest{ + TypeName: "test_thing", + Version: 3, + RawStateJSON: []byte(`{"id":"foo"}`), + } + if !cmp.Equal(got, want) { + t.Errorf("wrong upgrade request\n%s", cmp.Diff(want, got)) + } + } + + { + got := state.String() + want := strings.TrimSpace(` +test_thing.bar: + ID = + provider = provider["registry.opentofu.org/hashicorp/test"] + name = foo +`) + if got != want { + t.Fatalf("wrong result state\ngot:\n%s\n\nwant:\n%s", got, want) + } + } +} + +func TestContext2Refresh_dataValidation(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +data "aws_data_source" "foo" { + foo = "bar" +} +`, + }) + + p := testProvider("aws") + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + resp.PlannedState = req.ProposedNewState + return + } + p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { + resp.State = req.Config + return + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Refresh(m, states.NewState(), &PlanOpts{Mode: plans.NormalMode}) + if diags.HasErrors() { + // Should get this error: + // Unsupported attribute: This object does not have an attribute named "missing" + t.Fatal(diags.Err()) + } + + if !p.ValidateDataResourceConfigCalled { + t.Fatal("ValidateDataSourceConfig not called during plan") + } +} + +func TestContext2Refresh_dataResourceDependsOn(t *testing.T) { + m := testModule(t, "plan-data-depends-on") + p := testProvider("test") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_resource": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + DataSources: map[string]*configschema.Block{ + "test_data": { + Attributes: map[string]*configschema.Attribute{ + "compute": {Type: cty.String, Computed: true}, + }, + }, + }, + }) + p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "compute": cty.StringVal("value"), + }), + } + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + testSetResourceInstanceCurrent(root, "test_resource.a", `{"id":"a"}`, `provider["registry.opentofu.org/hashicorp/test"]`) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Refresh(m, state, &PlanOpts{Mode: plans.NormalMode}) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } +} + +// verify that create_before_destroy is updated in the state during refresh +func TestRefresh_updateLifecycle(t *testing.T) { + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "bar", + }.Instance(addrs.NoKey), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("aws"), + Module: addrs.RootModule, + }, + ) + + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "aws_instance" "bar" { + lifecycle { + create_before_destroy = true + } +} +`, + }) + + p := testProvider("aws") + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + state, diags := ctx.Refresh(m, state, &PlanOpts{Mode: plans.NormalMode}) + if diags.HasErrors() { + t.Fatalf("plan errors: %s", diags.Err()) + } + + r := state.ResourceInstance(mustResourceInstanceAddr("aws_instance.bar")) + if !r.Current.CreateBeforeDestroy { + t.Fatal("create_before_destroy not updated in instance state") + } +} + +func TestContext2Refresh_dataSourceOrphan(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ``, + }) + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.DataResourceMode, + Type: "test_data_source", + Name: "foo", + }.Instance(addrs.NoKey), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo"}`), + Dependencies: []addrs.ConfigResource{}, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + p := testProvider("test") + p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { + resp.State = cty.NullVal(req.Config.Type()) + return + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Refresh(m, state, &PlanOpts{Mode: plans.NormalMode}) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + if p.ReadResourceCalled { + t.Fatal("there are no managed resources to read") + } + + if p.ReadDataSourceCalled { + t.Fatal("orphaned data source instance should not be read") + } +} + +// Legacy providers may return invalid null values for blocks, causing noise in +// the diff output and unexpected behavior with ignore_changes. Make sure +// refresh fixes these up before storing the state. +func TestContext2Refresh_reifyNullBlock(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_resource" "foo" { +} +`, + }) + + p := new(MockProvider) + p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { + // incorrectly return a null _set_block value + v := req.PriorState.AsValueMap() + v["set_block"] = cty.NullVal(v["set_block"].Type()) + return providers.ReadResourceResponse{NewState: cty.ObjectVal(v)} + } + + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + Provider: &configschema.Block{}, + ResourceTypes: map[string]*configschema.Block{ + "test_resource": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "set_block": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "a": {Type: cty.String, Optional: true}, + }, + }, + Nesting: configschema.NestingSet, + }, + }, + }, + }, + }) + p.PlanResourceChangeFn = testDiffFn + + fooAddr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "foo", + }.Instance(addrs.NoKey) + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + fooAddr, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo", "network_interface":[]}`), + Dependencies: []addrs.ConfigResource{}, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan(m, state, &PlanOpts{Mode: plans.RefreshOnlyMode}) + if diags.HasErrors() { + t.Fatalf("refresh errors: %s", diags.Err()) + } + + jsonState := plan.PriorState.ResourceInstance(fooAddr.Absolute(addrs.RootModuleInstance)).Current.AttrsJSON + + // the set_block should still be an empty container, and not null + expected := `{"id":"foo","set_block":[]}` + if string(jsonState) != expected { + t.Fatalf("invalid state\nexpected: %s\ngot: %s\n", expected, jsonState) + } +} diff --git a/pkg/tofu/context_test.go b/pkg/tofu/context_test.go new file mode 100644 index 00000000000..604f1efd0f4 --- /dev/null +++ b/pkg/tofu/context_test.go @@ -0,0 +1,1059 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "bufio" + "bytes" + "fmt" + "path/filepath" + "sort" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/hashicorp/go-version" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configload" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/configs/hcl2shim" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/plans/planfile" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/provisioners" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/states/statefile" + "github.com/kubegems/opentofu/pkg/tfdiags" + tfversion "github.com/kubegems/opentofu/version" + "github.com/zclconf/go-cty/cty" +) + +var ( + equateEmpty = cmpopts.EquateEmpty() + typeComparer = cmp.Comparer(cty.Type.Equals) + valueComparer = cmp.Comparer(cty.Value.RawEquals) + valueTrans = cmp.Transformer("hcl2shim", hcl2shim.ConfigValueFromHCL2) +) + +func TestNewContextRequiredVersion(t *testing.T) { + cases := []struct { + Name string + Version string + Value string + Err bool + }{ + { + "no requirement", + "0.1.0", + "", + false, + }, + + { + "doesn't match", + "0.1.0", + "> 0.6.0", + true, + }, + + { + "matches", + "0.7.0", + "> 0.6.0", + false, + }, + + { + "prerelease doesn't match with inequality", + "0.8.0", + "> 0.7.0-beta", + true, + }, + + { + "prerelease doesn't match with equality", + "0.7.0", + "0.7.0-beta", + true, + }, + } + + for i, tc := range cases { + t.Run(fmt.Sprintf("%d-%s", i, tc.Name), func(t *testing.T) { + // Reset the version for the tests + old := tfversion.SemVer + tfversion.SemVer = version.Must(version.NewVersion(tc.Version)) + defer func() { tfversion.SemVer = old }() + + mod := testModule(t, "context-required-version") + if tc.Value != "" { + constraint, err := version.NewConstraint(tc.Value) + if err != nil { + t.Fatalf("can't parse %q as version constraint", tc.Value) + } + mod.Module.CoreVersionConstraints = append(mod.Module.CoreVersionConstraints, configs.VersionConstraint{ + Required: constraint, + }) + } + c, diags := NewContext(&ContextOpts{}) + if diags.HasErrors() { + t.Fatalf("unexpected NewContext errors: %s", diags.Err()) + } + + diags = c.Validate(mod) + if diags.HasErrors() != tc.Err { + t.Fatalf("err: %s", diags.Err()) + } + }) + } +} + +func TestNewContextRequiredVersion_child(t *testing.T) { + mod := testModuleInline(t, map[string]string{ + "main.tf": ` +module "child" { + source = "./child" +} +`, + "child/main.tf": ` +terraform {} +`, + }) + + cases := map[string]struct { + Version string + Constraint string + Err bool + }{ + "matches": { + "0.5.0", + ">= 0.5.0", + false, + }, + "doesn't match": { + "0.4.0", + ">= 0.5.0", + true, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + // Reset the version for the tests + old := tfversion.SemVer + tfversion.SemVer = version.Must(version.NewVersion(tc.Version)) + defer func() { tfversion.SemVer = old }() + + if tc.Constraint != "" { + constraint, err := version.NewConstraint(tc.Constraint) + if err != nil { + t.Fatalf("can't parse %q as version constraint", tc.Constraint) + } + child := mod.Children["child"] + child.Module.CoreVersionConstraints = append(child.Module.CoreVersionConstraints, configs.VersionConstraint{ + Required: constraint, + }) + } + c, diags := NewContext(&ContextOpts{}) + if diags.HasErrors() { + t.Fatalf("unexpected NewContext errors: %s", diags.Err()) + } + + diags = c.Validate(mod) + if diags.HasErrors() != tc.Err { + t.Fatalf("err: %s", diags.Err()) + } + }) + } +} + +func TestContext_missingPlugins(t *testing.T) { + ctx, diags := NewContext(&ContextOpts{}) + assertNoDiagnostics(t, diags) + + configSrc := ` +terraform { + required_providers { + explicit = { + source = "example.com/foo/beep" + } + builtin = { + source = "terraform.io/builtin/nonexist" + } + } +} + +resource "implicit_thing" "a" { + provisioner "nonexist" { + } +} + +resource "implicit_thing" "b" { + provider = implicit2 +} +` + + cfg := testModuleInline(t, map[string]string{ + "main.tf": configSrc, + }) + + // Validate and Plan are the two entry points where we explicitly verify + // the available plugins match what the configuration needs. For other + // operations we typically fail more deeply in OpenTofu Core, with + // potentially-less-helpful error messages, because getting there would + // require doing some pretty weird things that aren't common enough to + // be worth the complexity to check for them. + + validateDiags := ctx.Validate(cfg) + _, planDiags := ctx.Plan(cfg, nil, DefaultPlanOpts) + + tests := map[string]tfdiags.Diagnostics{ + "validate": validateDiags, + "plan": planDiags, + } + + for testName, gotDiags := range tests { + t.Run(testName, func(t *testing.T) { + var wantDiags tfdiags.Diagnostics + wantDiags = wantDiags.Append( + tfdiags.Sourceless( + tfdiags.Error, + "Missing required provider", + "This configuration requires built-in provider terraform.io/builtin/nonexist, but that provider isn't available in this OpenTofu version.", + ), + tfdiags.Sourceless( + tfdiags.Error, + "Missing required provider", + "This configuration requires provider example.com/foo/beep, but that provider isn't available. You may be able to install it automatically by running:\n tofu init", + ), + tfdiags.Sourceless( + tfdiags.Error, + "Missing required provider", + "This configuration requires provider registry.opentofu.org/hashicorp/implicit, but that provider isn't available. You may be able to install it automatically by running:\n tofu init", + ), + tfdiags.Sourceless( + tfdiags.Error, + "Missing required provider", + "This configuration requires provider registry.opentofu.org/hashicorp/implicit2, but that provider isn't available. You may be able to install it automatically by running:\n tofu init", + ), + tfdiags.Sourceless( + tfdiags.Error, + "Missing required provisioner plugin", + `This configuration requires provisioner plugin "nonexist", which isn't available. If you're intending to use an external provisioner plugin, you must install it manually into one of the plugin search directories before running OpenTofu.`, + ), + ) + assertDiagnosticsMatch(t, gotDiags, wantDiags) + }) + } +} + +func testContext2(t *testing.T, opts *ContextOpts) *Context { + t.Helper() + + ctx, diags := NewContext(opts) + if diags.HasErrors() { + t.Fatalf("failed to create test context\n\n%s\n", diags.Err()) + } + + ctx.encryption = encryption.Disabled() + + return ctx +} + +func testApplyFn(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + resp.NewState = req.PlannedState + if req.PlannedState.IsNull() { + resp.NewState = cty.NullVal(req.PriorState.Type()) + return + } + + planned := req.PlannedState.AsValueMap() + if planned == nil { + planned = map[string]cty.Value{} + } + + id, ok := planned["id"] + if !ok || id.IsNull() || !id.IsKnown() { + planned["id"] = cty.StringVal("foo") + } + + // our default schema has a computed "type" attr + if ty, ok := planned["type"]; ok && !ty.IsNull() { + planned["type"] = cty.StringVal(req.TypeName) + } + + if cmp, ok := planned["compute"]; ok && !cmp.IsNull() { + computed := cmp.AsString() + if val, ok := planned[computed]; ok && !val.IsKnown() { + planned[computed] = cty.StringVal("computed_value") + } + } + + for k, v := range planned { + if k == "unknown" { + // "unknown" should cause an error + continue + } + + if !v.IsKnown() { + switch k { + case "type": + planned[k] = cty.StringVal(req.TypeName) + default: + planned[k] = cty.NullVal(v.Type()) + } + } + } + + resp.NewState = cty.ObjectVal(planned) + return +} + +func testDiffFn(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + var planned map[string]cty.Value + + // this is a destroy plan + if req.ProposedNewState.IsNull() { + resp.PlannedState = req.ProposedNewState + resp.PlannedPrivate = req.PriorPrivate + return resp + } + + if !req.ProposedNewState.IsNull() { + planned = req.ProposedNewState.AsValueMap() + } + if planned == nil { + planned = map[string]cty.Value{} + } + + // id is always computed for the tests + if id, ok := planned["id"]; ok && id.IsNull() { + planned["id"] = cty.UnknownVal(cty.String) + } + + // the old tests have require_new replace on every plan + if _, ok := planned["require_new"]; ok { + resp.RequiresReplace = append(resp.RequiresReplace, cty.Path{cty.GetAttrStep{Name: "require_new"}}) + } + + for k := range planned { + requiresNewKey := "__" + k + "_requires_new" + _, ok := planned[requiresNewKey] + if ok { + resp.RequiresReplace = append(resp.RequiresReplace, cty.Path{cty.GetAttrStep{Name: requiresNewKey}}) + } + } + + if v, ok := planned["compute"]; ok && !v.IsNull() { + k := v.AsString() + unknown := cty.UnknownVal(cty.String) + if strings.HasSuffix(k, ".#") { + k = k[:len(k)-2] + unknown = cty.UnknownVal(cty.List(cty.String)) + } + planned[k] = unknown + } + + if t, ok := planned["type"]; ok && t.IsNull() { + planned["type"] = cty.UnknownVal(cty.String) + } + + resp.PlannedState = cty.ObjectVal(planned) + return +} + +func testProvider(prefix string) *MockProvider { + p := new(MockProvider) + p.GetProviderSchemaResponse = testProviderSchema(prefix) + + return p +} + +func testProvisioner() *MockProvisioner { + p := new(MockProvisioner) + p.GetSchemaResponse = provisioners.GetSchemaResponse{ + Provisioner: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "command": { + Type: cty.String, + Optional: true, + }, + "order": { + Type: cty.String, + Optional: true, + }, + "when": { + Type: cty.String, + Optional: true, + }, + }, + }, + } + return p +} + +func checkStateString(t *testing.T, state *states.State, expected string) { + t.Helper() + actual := strings.TrimSpace(state.String()) + expected = strings.TrimSpace(expected) + + if actual != expected { + t.Fatalf("incorrect state\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +// Test helper that gives a function 3 seconds to finish, assumes deadlock and +// fails test if it does not. +func testCheckDeadlock(t *testing.T, f func()) { + t.Helper() + timeout := make(chan bool, 1) + done := make(chan bool, 1) + go func() { + time.Sleep(3 * time.Second) + timeout <- true + }() + go func(f func(), done chan bool) { + defer func() { done <- true }() + f() + }(f, done) + select { + case <-timeout: + t.Fatalf("timed out! probably deadlock") + case <-done: + // ok + } +} + +func testProviderSchema(name string) *providers.GetProviderSchemaResponse { + return getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + Provider: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "region": { + Type: cty.String, + Optional: true, + }, + "foo": { + Type: cty.String, + Optional: true, + }, + "value": { + Type: cty.String, + Optional: true, + }, + "root": { + Type: cty.Number, + Optional: true, + }, + }, + }, + ResourceTypes: map[string]*configschema.Block{ + name + "_instance": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "ami": { + Type: cty.String, + Optional: true, + }, + "dep": { + Type: cty.String, + Optional: true, + }, + "num": { + Type: cty.Number, + Optional: true, + }, + "require_new": { + Type: cty.String, + Optional: true, + }, + "var": { + Type: cty.String, + Optional: true, + }, + "foo": { + Type: cty.String, + Optional: true, + Computed: true, + }, + "bar": { + Type: cty.String, + Optional: true, + }, + "compute": { + Type: cty.String, + Optional: true, + Computed: false, + }, + "compute_value": { + Type: cty.String, + Optional: true, + Computed: true, + }, + "value": { + Type: cty.String, + Optional: true, + Computed: true, + }, + "output": { + Type: cty.String, + Optional: true, + }, + "write": { + Type: cty.String, + Optional: true, + }, + "instance": { + Type: cty.String, + Optional: true, + }, + "vpc_id": { + Type: cty.String, + Optional: true, + }, + "type": { + Type: cty.String, + Computed: true, + }, + + // Generated by testDiffFn if compute = "unknown" is set in the test config + "unknown": { + Type: cty.String, + Computed: true, + }, + }, + }, + name + "_eip": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "instance": { + Type: cty.String, + Optional: true, + }, + }, + }, + name + "_resource": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "value": { + Type: cty.String, + Optional: true, + }, + "sensitive_value": { + Type: cty.String, + Sensitive: true, + Optional: true, + }, + "random": { + Type: cty.String, + Optional: true, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "nesting_single": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "value": {Type: cty.String, Optional: true}, + "sensitive_value": {Type: cty.String, Optional: true, Sensitive: true}, + }, + }, + Nesting: configschema.NestingSingle, + }, + }, + }, + name + "_ami_list": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Optional: true, + Computed: true, + }, + "ids": { + Type: cty.List(cty.String), + Optional: true, + Computed: true, + }, + }, + }, + name + "_remote_state": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Optional: true, + }, + "foo": { + Type: cty.String, + Optional: true, + }, + "output": { + Type: cty.Map(cty.String), + Computed: true, + }, + }, + }, + name + "_file": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Optional: true, + }, + "template": { + Type: cty.String, + Optional: true, + }, + "rendered": { + Type: cty.String, + Computed: true, + }, + "__template_requires_new": { + Type: cty.String, + Optional: true, + }, + }, + }, + }, + DataSources: map[string]*configschema.Block{ + name + "_data_source": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "foo": { + Type: cty.String, + Optional: true, + Computed: true, + }, + }, + }, + name + "_remote_state": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Optional: true, + }, + "foo": { + Type: cty.String, + Optional: true, + }, + "output": { + Type: cty.Map(cty.String), + Optional: true, + }, + }, + }, + name + "_file": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Optional: true, + }, + "template": { + Type: cty.String, + Optional: true, + }, + "rendered": { + Type: cty.String, + Computed: true, + }, + }, + }, + name + "_sensitive_data_source": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "value": { + Type: cty.String, + Optional: true, + Sensitive: true, + }, + }, + }, + }, + }) +} + +// contextOptsForPlanViaFile is a helper that creates a temporary plan file, +// then reads it back in again and produces a ContextOpts object containing the +// planned changes, prior state and config from the plan file. +// +// This is intended for testing the separated plan/apply workflow in a more +// convenient way than spelling out all of these steps every time. Normally +// only the command and backend packages need to deal with such things, but +// our context tests try to exercise lots of stuff at once and so having them +// round-trip things through on-disk files is often an important part of +// fully representing an old bug in a regression test. +func contextOptsForPlanViaFile(t *testing.T, configSnap *configload.Snapshot, plan *plans.Plan) (*ContextOpts, *configs.Config, *plans.Plan, error) { + dir := t.TempDir() + + // We'll just create a dummy statefile.File here because we're not going + // to run through any of the codepaths that care about Lineage/Serial/etc + // here anyway. + stateFile := &statefile.File{ + State: plan.PriorState, + } + prevStateFile := &statefile.File{ + State: plan.PrevRunState, + } + + // To make life a little easier for test authors, we'll populate a simple + // backend configuration if they didn't set one, since the backend is + // usually dealt with in a calling package and so tests in this package + // don't really care about it. + if plan.Backend.Config == nil { + cfg, err := plans.NewDynamicValue(cty.EmptyObjectVal, cty.EmptyObject) + if err != nil { + panic(fmt.Sprintf("NewDynamicValue failed: %s", err)) // shouldn't happen because we control the inputs + } + plan.Backend.Type = "local" + plan.Backend.Config = cfg + plan.Backend.Workspace = "default" + } + + filename := filepath.Join(dir, "tfplan") + err := planfile.Create(filename, planfile.CreateArgs{ + ConfigSnapshot: configSnap, + PreviousRunStateFile: prevStateFile, + StateFile: stateFile, + Plan: plan, + }, encryption.PlanEncryptionDisabled()) + if err != nil { + return nil, nil, nil, err + } + + pr, err := planfile.Open(filename, encryption.PlanEncryptionDisabled()) + if err != nil { + return nil, nil, nil, err + } + + config, diags := pr.ReadConfig(configs.RootModuleCallForTesting()) + if diags.HasErrors() { + return nil, nil, nil, diags.Err() + } + + plan, err = pr.ReadPlan() + if err != nil { + return nil, nil, nil, err + } + + // Note: This has grown rather silly over the course of ongoing refactoring, + // because ContextOpts is no longer actually responsible for carrying + // any information from a plan file and instead all of the information + // lives inside the config and plan objects. We continue to return a + // silly empty ContextOpts here just to keep all of the calling tests + // working. + return &ContextOpts{}, config, plan, nil +} + +// legacyPlanComparisonString produces a string representation of the changes +// from a plan and a given state togther, as was formerly produced by the +// String method of tofu.Plan. +// +// This is here only for compatibility with existing tests that predate our +// new plan and state types, and should not be used in new tests. Instead, use +// a library like "cmp" to do a deep equality check and diff on the two +// data structures. +func legacyPlanComparisonString(state *states.State, changes *plans.Changes) string { + return fmt.Sprintf( + "DIFF:\n\n%s\n\nSTATE:\n\n%s", + legacyDiffComparisonString(changes), + state.String(), + ) +} + +// legacyDiffComparisonString produces a string representation of the changes +// from a planned changes object, as was formerly produced by the String method +// of tofu.Diff. +// +// This is here only for compatibility with existing tests that predate our +// new plan types, and should not be used in new tests. Instead, use a library +// like "cmp" to do a deep equality check and diff on the two data structures. +func legacyDiffComparisonString(changes *plans.Changes) string { + // The old string representation of a plan was grouped by module, but + // our new plan structure is not grouped in that way and so we'll need + // to preprocess it in order to produce that grouping. + type ResourceChanges struct { + Current *plans.ResourceInstanceChangeSrc + Deposed map[states.DeposedKey]*plans.ResourceInstanceChangeSrc + } + byModule := map[string]map[string]*ResourceChanges{} + resourceKeys := map[string][]string{} + var moduleKeys []string + for _, rc := range changes.Resources { + if rc.Action == plans.NoOp { + // We won't mention no-op changes here at all, since the old plan + // model we are emulating here didn't have such a concept. + continue + } + moduleKey := rc.Addr.Module.String() + if _, exists := byModule[moduleKey]; !exists { + moduleKeys = append(moduleKeys, moduleKey) + byModule[moduleKey] = make(map[string]*ResourceChanges) + } + resourceKey := rc.Addr.Resource.String() + if _, exists := byModule[moduleKey][resourceKey]; !exists { + resourceKeys[moduleKey] = append(resourceKeys[moduleKey], resourceKey) + byModule[moduleKey][resourceKey] = &ResourceChanges{ + Deposed: make(map[states.DeposedKey]*plans.ResourceInstanceChangeSrc), + } + } + + if rc.DeposedKey == states.NotDeposed { + byModule[moduleKey][resourceKey].Current = rc + } else { + byModule[moduleKey][resourceKey].Deposed[rc.DeposedKey] = rc + } + } + sort.Strings(moduleKeys) + for _, ks := range resourceKeys { + sort.Strings(ks) + } + + var buf bytes.Buffer + + for _, moduleKey := range moduleKeys { + rcs := byModule[moduleKey] + var mBuf bytes.Buffer + + for _, resourceKey := range resourceKeys[moduleKey] { + rc := rcs[resourceKey] + + crud := "UPDATE" + if rc.Current != nil { + switch rc.Current.Action { + case plans.DeleteThenCreate: + crud = "DESTROY/CREATE" + case plans.CreateThenDelete: + crud = "CREATE/DESTROY" + case plans.Delete: + crud = "DESTROY" + case plans.Create: + crud = "CREATE" + } + } else { + // We must be working on a deposed object then, in which + // case destroying is the only possible action. + crud = "DESTROY" + } + + extra := "" + if rc.Current == nil && len(rc.Deposed) > 0 { + extra = " (deposed only)" + } + + fmt.Fprintf( + &mBuf, "%s: %s%s\n", + crud, resourceKey, extra, + ) + + attrNames := map[string]bool{} + var oldAttrs map[string]string + var newAttrs map[string]string + if rc.Current != nil { + if before := rc.Current.Before; before != nil { + ty, err := before.ImpliedType() + if err == nil { + val, err := before.Decode(ty) + if err == nil { + oldAttrs = hcl2shim.FlatmapValueFromHCL2(val) + for k := range oldAttrs { + attrNames[k] = true + } + } + } + } + if after := rc.Current.After; after != nil { + ty, err := after.ImpliedType() + if err == nil { + val, err := after.Decode(ty) + if err == nil { + newAttrs = hcl2shim.FlatmapValueFromHCL2(val) + for k := range newAttrs { + attrNames[k] = true + } + } + } + } + } + if oldAttrs == nil { + oldAttrs = make(map[string]string) + } + if newAttrs == nil { + newAttrs = make(map[string]string) + } + + attrNamesOrder := make([]string, 0, len(attrNames)) + keyLen := 0 + for n := range attrNames { + attrNamesOrder = append(attrNamesOrder, n) + if len(n) > keyLen { + keyLen = len(n) + } + } + sort.Strings(attrNamesOrder) + + for _, attrK := range attrNamesOrder { + v := newAttrs[attrK] + u := oldAttrs[attrK] + + if v == hcl2shim.UnknownVariableValue { + v = "" + } + // NOTE: we don't support here because we would + // need schema to do that. Excluding sensitive values + // is now done at the UI layer, and so should not be tested + // at the core layer. + + updateMsg := "" + // TODO: Mark " (forces new resource)" in updateMsg when appropriate. + + fmt.Fprintf( + &mBuf, " %s:%s %#v => %#v%s\n", + attrK, + strings.Repeat(" ", keyLen-len(attrK)), + u, v, + updateMsg, + ) + } + } + + if moduleKey == "" { // root module + buf.Write(mBuf.Bytes()) + buf.WriteByte('\n') + continue + } + + fmt.Fprintf(&buf, "%s:\n", moduleKey) + s := bufio.NewScanner(&mBuf) + for s.Scan() { + buf.WriteString(fmt.Sprintf(" %s\n", s.Text())) + } + } + + return buf.String() +} + +// assertNoDiagnostics fails the test in progress (using t.Fatal) if the given +// diagnostics is non-empty. +func assertNoDiagnostics(t *testing.T, diags tfdiags.Diagnostics) { + t.Helper() + if len(diags) == 0 { + return + } + logDiagnostics(t, diags) + t.FailNow() +} + +// assertNoDiagnostics fails the test in progress (using t.Fatal) if the given +// diagnostics has any errors. +func assertNoErrors(t *testing.T, diags tfdiags.Diagnostics) { + t.Helper() + if !diags.HasErrors() { + return + } + logDiagnostics(t, diags) + t.FailNow() +} + +// assertDiagnosticsMatch fails the test in progress (using t.Fatal) if the +// two sets of diagnostics don't match after being normalized using the +// "ForRPC" processing step, which eliminates the specific type information +// and HCL expression information of each diagnostic. +// +// assertDiagnosticsMatch sorts the two sets of diagnostics in the usual way +// before comparing them, though diagnostics only have a partial order so that +// will not totally normalize the ordering of all diagnostics sets. +func assertDiagnosticsMatch(t *testing.T, got, want tfdiags.Diagnostics) { + got = got.ForRPC() + want = want.ForRPC() + got.Sort() + want.Sort() + if diff := cmp.Diff(want, got); diff != "" { + t.Fatalf("wrong diagnostics\n%s", diff) + } +} + +// logDiagnostics is a test helper that logs the given diagnostics to to the +// given testing.T using t.Log, in a way that is hopefully useful in debugging +// a test. It does not generate any errors or fail the test. See +// assertNoDiagnostics and assertNoErrors for more specific helpers that can +// also fail the test. +func logDiagnostics(t *testing.T, diags tfdiags.Diagnostics) { + t.Helper() + for _, diag := range diags { + desc := diag.Description() + rng := diag.Source() + + var severity string + switch diag.Severity() { + case tfdiags.Error: + severity = "ERROR" + case tfdiags.Warning: + severity = "WARN" + default: + severity = "???" // should never happen + } + + if subj := rng.Subject; subj != nil { + if desc.Detail == "" { + t.Logf("[%s@%s] %s", severity, subj.StartString(), desc.Summary) + } else { + t.Logf("[%s@%s] %s: %s", severity, subj.StartString(), desc.Summary, desc.Detail) + } + } else { + if desc.Detail == "" { + t.Logf("[%s] %s", severity, desc.Summary) + } else { + t.Logf("[%s] %s: %s", severity, desc.Summary, desc.Detail) + } + } + } +} + +const testContextRefreshModuleStr = ` +aws_instance.web: (tainted) + ID = bar + provider = provider["registry.opentofu.org/hashicorp/aws"] + +module.child: + aws_instance.web: + ID = new + provider = provider["registry.opentofu.org/hashicorp/aws"] +` + +const testContextRefreshOutputStr = ` +aws_instance.web: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = bar + +Outputs: + +foo = bar +` + +const testContextRefreshOutputPartialStr = ` + +` + +const testContextRefreshTaintedStr = ` +aws_instance.web: (tainted) + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] +` diff --git a/pkg/tofu/context_validate.go b/pkg/tofu/context_validate.go new file mode 100644 index 00000000000..3dede5860ad --- /dev/null +++ b/pkg/tofu/context_validate.go @@ -0,0 +1,85 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "log" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +// Validate performs semantic validation of a configuration, and returns +// any warnings or errors. +// +// Syntax and structural checks are performed by the configuration loader, +// and so are not repeated here. +// +// Validate considers only the configuration and so it won't catch any +// errors caused by current values in the state, or other external information +// such as root module input variables. However, the Plan function includes +// all of the same checks as Validate, in addition to the other work it does +// to consider the previous run state and the planning options. +func (c *Context) Validate(config *configs.Config) tfdiags.Diagnostics { + defer c.acquireRun("validate")() + + var diags tfdiags.Diagnostics + + moreDiags := c.checkConfigDependencies(config) + diags = diags.Append(moreDiags) + // If required dependencies are not available then we'll bail early since + // otherwise we're likely to just see a bunch of other errors related to + // incompatibilities, which could be overwhelming for the user. + if diags.HasErrors() { + return diags + } + + log.Printf("[DEBUG] Building and walking validate graph") + + // Validate is to check if the given module is valid regardless of + // input values, current state, etc. Therefore we populate all of the + // input values with unknown values of the expected type, allowing us + // to perform a type check without assuming any particular values. + varValues := make(InputValues) + for name, variable := range config.Module.Variables { + ty := variable.Type + if ty == cty.NilType { + // Can't predict the type at all, so we'll just mark it as + // cty.DynamicVal (unknown value of cty.DynamicPseudoType). + ty = cty.DynamicPseudoType + } + varValues[name] = &InputValue{ + Value: cty.UnknownVal(ty), + SourceType: ValueFromUnknown, + } + } + + graph, moreDiags := (&PlanGraphBuilder{ + Config: config, + Plugins: c.plugins, + State: states.NewState(), + RootVariableValues: varValues, + Operation: walkValidate, + }).Build(addrs.RootModuleInstance) + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + return diags + } + + walker, walkDiags := c.walk(graph, walkValidate, &graphWalkOpts{ + Config: config, + }) + diags = diags.Append(walker.NonFatalDiagnostics) + diags = diags.Append(walkDiags) + if walkDiags.HasErrors() { + return diags + } + + return diags +} diff --git a/pkg/tofu/context_validate_test.go b/pkg/tofu/context_validate_test.go new file mode 100644 index 00000000000..f2e186e3b10 --- /dev/null +++ b/pkg/tofu/context_validate_test.go @@ -0,0 +1,2489 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "errors" + "fmt" + "strings" + "testing" + + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/provisioners" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +func TestContext2Validate_badCount(t *testing.T) { + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{}, + }, + }, + }) + + m := testModule(t, "validate-bad-count") + c := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + diags := c.Validate(m) + if !diags.HasErrors() { + t.Fatalf("succeeded; want error") + } +} + +func TestContext2Validate_badResource_reference(t *testing.T) { + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{}, + }, + }, + }) + + m := testModule(t, "validate-bad-resource-count") + c := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + diags := c.Validate(m) + if !diags.HasErrors() { + t.Fatalf("succeeded; want error") + } +} + +func TestContext2Validate_badVar(t *testing.T) { + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + "num": {Type: cty.String, Optional: true}, + }, + }, + }, + }) + + m := testModule(t, "validate-bad-var") + c := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + diags := c.Validate(m) + if !diags.HasErrors() { + t.Fatalf("succeeded; want error") + } +} + +func TestContext2Validate_varNoDefaultExplicitType(t *testing.T) { + m := testModule(t, "validate-var-no-default-explicit-type") + c, diags := NewContext(&ContextOpts{}) + if diags.HasErrors() { + t.Fatalf("unexpected NewContext errors: %s", diags.Err()) + } + + // NOTE: This test has grown idiosyncratic because originally Terraform + // would (optionally) check variables during validation, and then in + // Terraform v0.12 we switched to checking variables during NewContext, + // and now most recently we've switched to checking variables only during + // planning because root variables are a plan option. Therefore this has + // grown into a plan test rather than a validate test, but it lives on + // here in order to make it easier to navigate through that history in + // version control. + _, diags = c.Plan(m, states.NewState(), DefaultPlanOpts) + if !diags.HasErrors() { + // Error should be: The input variable "maybe_a_map" has not been assigned a value. + t.Fatalf("succeeded; want error") + } +} + +func TestContext2Validate_computedVar(t *testing.T) { + p := testProvider("aws") + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "value": {Type: cty.String, Optional: true}, + }, + }, + }, + ResourceTypes: map[string]providers.Schema{ + "aws_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{}, + }, + }, + }, + } + pt := testProvider("test") + pt.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "value": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + + m := testModule(t, "validate-computed-var") + c := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + addrs.NewDefaultProvider("test"): testProviderFuncFixed(pt), + }, + }) + + p.ValidateProviderConfigFn = func(req providers.ValidateProviderConfigRequest) (resp providers.ValidateProviderConfigResponse) { + val := req.Config.GetAttr("value") + if val.IsKnown() { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("value isn't computed")) + } + + return + } + + diags := c.Validate(m) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err()) + } + if p.ConfigureProviderCalled { + t.Fatal("Configure should not be called for provider") + } +} + +func TestContext2Validate_computedInFunction(t *testing.T) { + p := testProvider("aws") + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "aws_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "attr": {Type: cty.Number, Optional: true}, + }, + }, + }, + }, + DataSources: map[string]providers.Schema{ + "aws_data_source": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "optional_attr": {Type: cty.String, Optional: true}, + "computed": {Type: cty.String, Computed: true}, + }, + }, + }, + }, + } + + m := testModule(t, "validate-computed-in-function") + c := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + diags := c.Validate(m) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err()) + } +} + +// Test that validate allows through computed counts. We do this and allow +// them to fail during "plan" since we can't know if the computed values +// can be realized during a plan. +func TestContext2Validate_countComputed(t *testing.T) { + p := testProvider("aws") + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "aws_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{}, + }, + }, + }, + DataSources: map[string]providers.Schema{ + "aws_data_source": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "compute": {Type: cty.String, Optional: true}, + "value": {Type: cty.String, Computed: true}, + }, + }, + }, + }, + } + + m := testModule(t, "validate-count-computed") + c := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + diags := c.Validate(m) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err()) + } +} + +func TestContext2Validate_countNegative(t *testing.T) { + p := testProvider("aws") + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "aws_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{}, + }, + }, + }, + } + m := testModule(t, "validate-count-negative") + c := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + diags := c.Validate(m) + if !diags.HasErrors() { + t.Fatalf("succeeded; want error") + } +} + +func TestContext2Validate_countVariable(t *testing.T) { + p := testProvider("aws") + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "aws_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + m := testModule(t, "apply-count-variable") + c := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + diags := c.Validate(m) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err()) + } +} + +func TestContext2Validate_countVariableNoDefault(t *testing.T) { + p := testProvider("aws") + m := testModule(t, "validate-count-variable") + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "aws_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + c, diags := NewContext(&ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + assertNoDiagnostics(t, diags) + + _, diags = c.Plan(m, nil, &PlanOpts{}) + if !diags.HasErrors() { + // Error should be: The input variable "foo" has not been assigned a value. + t.Fatalf("succeeded; want error") + } +} + +func TestContext2Validate_moduleBadOutput(t *testing.T) { + p := testProvider("aws") + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "aws_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + m := testModule(t, "validate-bad-module-output") + c := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + diags := c.Validate(m) + if !diags.HasErrors() { + t.Fatalf("succeeded; want error") + } +} + +func TestContext2Validate_moduleGood(t *testing.T) { + p := testProvider("aws") + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "aws_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + m := testModule(t, "validate-good-module") + c := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + diags := c.Validate(m) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err()) + } +} + +func TestContext2Validate_moduleBadResource(t *testing.T) { + m := testModule(t, "validate-module-bad-rc") + p := testProvider("aws") + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "aws_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{}, + }, + }, + }, + } + + c := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + p.ValidateResourceConfigResponse = &providers.ValidateResourceConfigResponse{ + Diagnostics: tfdiags.Diagnostics{}.Append(fmt.Errorf("bad")), + } + + diags := c.Validate(m) + if !diags.HasErrors() { + t.Fatalf("succeeded; want error") + } +} + +func TestContext2Validate_moduleDepsShouldNotCycle(t *testing.T) { + m := testModule(t, "validate-module-deps-cycle") + p := testProvider("aws") + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "aws_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + diags := ctx.Validate(m) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err()) + } +} + +func TestContext2Validate_moduleProviderVar(t *testing.T) { + m := testModule(t, "validate-module-pc-vars") + p := testProvider("aws") + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + ResourceTypes: map[string]providers.Schema{ + "aws_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + + c := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + p.ValidateProviderConfigFn = func(req providers.ValidateProviderConfigRequest) (resp providers.ValidateProviderConfigResponse) { + if req.Config.GetAttr("foo").IsNull() { + resp.Diagnostics = resp.Diagnostics.Append(errors.New("foo is null")) + } + return + } + + diags := c.Validate(m) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err()) + } +} + +func TestContext2Validate_moduleProviderInheritUnused(t *testing.T) { + m := testModule(t, "validate-module-pc-inherit-unused") + p := testProvider("aws") + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + ResourceTypes: map[string]providers.Schema{ + "aws_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + + c := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + p.ValidateProviderConfigFn = func(req providers.ValidateProviderConfigRequest) (resp providers.ValidateProviderConfigResponse) { + if req.Config.GetAttr("foo").IsNull() { + resp.Diagnostics = resp.Diagnostics.Append(errors.New("foo is null")) + } + return + } + + diags := c.Validate(m) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err()) + } +} + +func TestContext2Validate_orphans(t *testing.T) { + p := testProvider("aws") + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "aws_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + "num": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + + m := testModule(t, "validate-good") + + c := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + p.ValidateResourceConfigFn = func(req providers.ValidateResourceConfigRequest) providers.ValidateResourceConfigResponse { + var diags tfdiags.Diagnostics + if req.Config.GetAttr("foo").IsNull() { + diags = diags.Append(errors.New("foo is not set")) + } + return providers.ValidateResourceConfigResponse{ + Diagnostics: diags, + } + } + + diags := c.Validate(m) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err()) + } +} + +func TestContext2Validate_providerConfig_bad(t *testing.T) { + m := testModule(t, "validate-bad-pc") + p := testProvider("aws") + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + ResourceTypes: map[string]providers.Schema{ + "aws_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{}, + }, + }, + }, + } + + c := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + p.ValidateProviderConfigResponse = &providers.ValidateProviderConfigResponse{ + Diagnostics: tfdiags.Diagnostics{}.Append(fmt.Errorf("bad")), + } + + diags := c.Validate(m) + if len(diags) != 1 { + t.Fatalf("wrong number of diagnostics %d; want %d", len(diags), 1) + } + if !strings.Contains(diags.Err().Error(), "bad") { + t.Fatalf("bad: %s", diags.Err().Error()) + } +} + +func TestContext2Validate_providerConfig_skippedEmpty(t *testing.T) { + m := testModule(t, "validate-skipped-pc-empty") + p := testProvider("aws") + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + ResourceTypes: map[string]providers.Schema{ + "aws_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{}, + }, + }, + }, + } + + c := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + p.ValidateProviderConfigResponse = &providers.ValidateProviderConfigResponse{ + Diagnostics: tfdiags.Diagnostics{}.Append(fmt.Errorf("should not be called")), + } + + diags := c.Validate(m) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err()) + } +} + +func TestContext2Validate_providerConfig_good(t *testing.T) { + m := testModule(t, "validate-bad-pc") + p := testProvider("aws") + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + ResourceTypes: map[string]providers.Schema{ + "aws_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{}, + }, + }, + }, + } + + c := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + diags := c.Validate(m) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err()) + } +} + +// In this test there is a mismatch between the provider's fqn (hashicorp/test) +// and it's local name set in required_providers (arbitrary). +func TestContext2Validate_requiredProviderConfig(t *testing.T) { + m := testModule(t, "validate-required-provider-config") + p := testProvider("aws") + + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "required_attribute": {Type: cty.String, Required: true}, + }, + }, + }, + ResourceTypes: map[string]providers.Schema{ + "aws_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{}, + }, + }, + }, + } + + c := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + diags := c.Validate(m) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err()) + } +} + +func TestContext2Validate_provisionerConfig_bad(t *testing.T) { + m := testModule(t, "validate-bad-prov-conf") + p := testProvider("aws") + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "aws_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + + pr := simpleMockProvisioner() + + c := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + Provisioners: map[string]provisioners.Factory{ + "shell": testProvisionerFuncFixed(pr), + }, + }) + + p.ValidateProviderConfigResponse = &providers.ValidateProviderConfigResponse{ + Diagnostics: tfdiags.Diagnostics{}.Append(fmt.Errorf("bad")), + } + + diags := c.Validate(m) + if !diags.HasErrors() { + t.Fatalf("succeeded; want error") + } +} + +func TestContext2Validate_badResourceConnection(t *testing.T) { + m := testModule(t, "validate-bad-resource-connection") + p := testProvider("aws") + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "aws_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + + pr := simpleMockProvisioner() + + c := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + Provisioners: map[string]provisioners.Factory{ + "shell": testProvisionerFuncFixed(pr), + }, + }) + + diags := c.Validate(m) + t.Log(diags.Err()) + if !diags.HasErrors() { + t.Fatalf("succeeded; want error") + } +} + +func TestContext2Validate_badProvisionerConnection(t *testing.T) { + m := testModule(t, "validate-bad-prov-connection") + p := testProvider("aws") + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "aws_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + + pr := simpleMockProvisioner() + + c := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + Provisioners: map[string]provisioners.Factory{ + "shell": testProvisionerFuncFixed(pr), + }, + }) + + diags := c.Validate(m) + t.Log(diags.Err()) + if !diags.HasErrors() { + t.Fatalf("succeeded; want error") + } +} + +func TestContext2Validate_provisionerConfig_good(t *testing.T) { + m := testModule(t, "validate-bad-prov-conf") + p := testProvider("aws") + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + ResourceTypes: map[string]providers.Schema{ + "aws_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + + pr := simpleMockProvisioner() + pr.ValidateProvisionerConfigFn = func(req provisioners.ValidateProvisionerConfigRequest) provisioners.ValidateProvisionerConfigResponse { + var diags tfdiags.Diagnostics + if req.Config.GetAttr("test_string").IsNull() { + diags = diags.Append(errors.New("test_string is not set")) + } + return provisioners.ValidateProvisionerConfigResponse{ + Diagnostics: diags, + } + } + + c := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + Provisioners: map[string]provisioners.Factory{ + "shell": testProvisionerFuncFixed(pr), + }, + }) + + diags := c.Validate(m) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err()) + } +} + +func TestContext2Validate_requiredVar(t *testing.T) { + m := testModule(t, "validate-required-var") + p := testProvider("aws") + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "aws_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "ami": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + c, diags := NewContext(&ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + assertNoDiagnostics(t, diags) + + // NOTE: This test has grown idiosyncratic because originally Terraform + // would (optionally) check variables during validation, and then in + // Terraform v0.12 we switched to checking variables during NewContext, + // and now most recently we've switched to checking variables only during + // planning because root variables are a plan option. Therefore this has + // grown into a plan test rather than a validate test, but it lives on + // here in order to make it easier to navigate through that history in + // version control. + _, diags = c.Plan(m, states.NewState(), DefaultPlanOpts) + if !diags.HasErrors() { + // Error should be: The input variable "foo" has not been assigned a value. + t.Fatalf("succeeded; want error") + } +} + +func TestContext2Validate_resourceConfig_bad(t *testing.T) { + m := testModule(t, "validate-bad-rc") + p := testProvider("aws") + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "aws_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + c := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + p.ValidateResourceConfigResponse = &providers.ValidateResourceConfigResponse{ + Diagnostics: tfdiags.Diagnostics{}.Append(fmt.Errorf("bad")), + } + + diags := c.Validate(m) + if !diags.HasErrors() { + t.Fatalf("succeeded; want error") + } +} + +func TestContext2Validate_resourceConfig_good(t *testing.T) { + m := testModule(t, "validate-bad-rc") + p := testProvider("aws") + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "aws_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + c := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + diags := c.Validate(m) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err()) + } +} + +func TestContext2Validate_tainted(t *testing.T) { + p := testProvider("aws") + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "aws_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + "num": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + + m := testModule(t, "validate-good") + c := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + p.ValidateResourceConfigFn = func(req providers.ValidateResourceConfigRequest) providers.ValidateResourceConfigResponse { + var diags tfdiags.Diagnostics + if req.Config.GetAttr("foo").IsNull() { + diags = diags.Append(errors.New("foo is not set")) + } + return providers.ValidateResourceConfigResponse{ + Diagnostics: diags, + } + } + + diags := c.Validate(m) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err()) + } +} + +func TestContext2Validate_targetedDestroy(t *testing.T) { + m := testModule(t, "validate-targeted") + p := testProvider("aws") + pr := simpleMockProvisioner() + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "aws_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + "num": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + testSetResourceInstanceCurrent(root, "aws_instance.foo", `{"id":"i-bcd345"}`, `provider["registry.opentofu.org/hashicorp/aws"]`) + testSetResourceInstanceCurrent(root, "aws_instance.bar", `{"id":"i-abc123"}`, `provider["registry.opentofu.org/hashicorp/aws"]`) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + Provisioners: map[string]provisioners.Factory{ + "shell": testProvisionerFuncFixed(pr), + }, + }) + + diags := ctx.Validate(m) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err()) + } +} + +func TestContext2Validate_varRefUnknown(t *testing.T) { + m := testModule(t, "validate-variable-ref") + p := testProvider("aws") + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "aws_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + c := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + var value cty.Value + p.ValidateResourceConfigFn = func(req providers.ValidateResourceConfigRequest) providers.ValidateResourceConfigResponse { + value = req.Config.GetAttr("foo") + return providers.ValidateResourceConfigResponse{} + } + + c.Validate(m) + + // Input variables are always unknown during the validate walk, because + // we're checking for validity of all possible input values. Validity + // against specific input values is checked during the plan walk. + if !value.RawEquals(cty.UnknownVal(cty.String)) { + t.Fatalf("bad: %#v", value) + } +} + +// Module variables weren't being interpolated during Validate phase. +// related to https://github.com/hashicorp/terraform/issues/5322 +func TestContext2Validate_interpolateVar(t *testing.T) { + input := new(MockUIInput) + + m := testModule(t, "input-interpolate-var") + p := testProvider("null") + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "template_file": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "template": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("template"): testProviderFuncFixed(p), + }, + UIInput: input, + }) + + diags := ctx.Validate(m) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err()) + } +} + +// When module vars reference something that is actually computed, this +// shouldn't cause validation to fail. +func TestContext2Validate_interpolateComputedModuleVarDef(t *testing.T) { + input := new(MockUIInput) + + m := testModule(t, "validate-computed-module-var-ref") + p := testProvider("aws") + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "aws_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "attr": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + UIInput: input, + }) + + diags := ctx.Validate(m) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err()) + } +} + +// Computed values are lost when a map is output from a module +func TestContext2Validate_interpolateMap(t *testing.T) { + input := new(MockUIInput) + + m := testModule(t, "issue-9549") + p := testProvider("template") + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("template"): testProviderFuncFixed(p), + }, + UIInput: input, + }) + + diags := ctx.Validate(m) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err()) + } +} + +func TestContext2Validate_varSensitive(t *testing.T) { + // Smoke test through validate where a variable has sensitive applied + m := testModuleInline(t, map[string]string{ + "main.tf": ` +variable "foo" { + default = "xyz" + sensitive = true +} + +variable "bar" { + sensitive = true +} + +data "aws_data_source" "bar" { + foo = var.bar +} + +resource "aws_instance" "foo" { + foo = var.foo +} +`, + }) + + p := testProvider("aws") + p.ValidateResourceConfigFn = func(req providers.ValidateResourceConfigRequest) providers.ValidateResourceConfigResponse { + // Providers receive unmarked values + if got, want := req.Config.GetAttr("foo"), cty.UnknownVal(cty.String); !got.RawEquals(want) { + t.Fatalf("wrong value for foo\ngot: %#v\nwant: %#v", got, want) + } + return providers.ValidateResourceConfigResponse{} + } + p.ValidateDataResourceConfigFn = func(req providers.ValidateDataResourceConfigRequest) (resp providers.ValidateDataResourceConfigResponse) { + if got, want := req.Config.GetAttr("foo"), cty.UnknownVal(cty.String); !got.RawEquals(want) { + t.Fatalf("wrong value for foo\ngot: %#v\nwant: %#v", got, want) + } + return providers.ValidateDataResourceConfigResponse{} + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + diags := ctx.Validate(m) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + if !p.ValidateResourceConfigCalled { + t.Fatal("expected ValidateResourceConfigFn to be called") + } + + if !p.ValidateDataResourceConfigCalled { + t.Fatal("expected ValidateDataSourceConfigFn to be called") + } +} + +func TestContext2Validate_invalidOutput(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +data "aws_data_source" "name" {} + +output "out" { + value = "${data.aws_data_source.name.missing}" +}`, + }) + + p := testProvider("aws") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + diags := ctx.Validate(m) + if !diags.HasErrors() { + t.Fatal("succeeded; want errors") + } + // Should get this error: + // Unsupported attribute: This object does not have an attribute named "missing" + if got, want := diags.Err().Error(), "Unsupported attribute"; !strings.Contains(got, want) { + t.Fatalf("wrong error:\ngot: %s\nwant: message containing %q", got, want) + } +} + +func TestContext2Validate_invalidModuleOutput(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "child/main.tf": ` +data "aws_data_source" "name" {} + +output "out" { + value = "${data.aws_data_source.name.missing}" +}`, + "main.tf": ` +module "child" { + source = "./child" +} + +resource "aws_instance" "foo" { + foo = "${module.child.out}" +}`, + }) + + p := testProvider("aws") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + diags := ctx.Validate(m) + if !diags.HasErrors() { + t.Fatal("succeeded; want errors") + } + // Should get this error: + // Unsupported attribute: This object does not have an attribute named "missing" + if got, want := diags.Err().Error(), "Unsupported attribute"; !strings.Contains(got, want) { + t.Fatalf("wrong error:\ngot: %s\nwant: message containing %q", got, want) + } +} + +func TestContext2Validate_sensitiveRootModuleOutput(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "child/main.tf": ` +variable "foo" { + default = "xyz" + sensitive = true +} + +output "out" { + value = var.foo +}`, + "main.tf": ` +module "child" { + source = "./child" +} + +output "root" { + value = module.child.out + sensitive = true +}`, + }) + + ctx := testContext2(t, &ContextOpts{}) + + diags := ctx.Validate(m) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } +} + +func TestContext2Validate_legacyResourceCount(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "aws_instance" "test" {} + +output "out" { + value = aws_instance.test.count +}`, + }) + + p := testProvider("aws") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + diags := ctx.Validate(m) + if !diags.HasErrors() { + t.Fatal("succeeded; want errors") + } + // Should get this error: + // Invalid resource count attribute: The special "count" attribute is no longer supported after Terraform v0.12. Instead, use length(aws_instance.test) to count resource instances. + if got, want := diags.Err().Error(), "Invalid resource count attribute:"; !strings.Contains(got, want) { + t.Fatalf("wrong error:\ngot: %s\nwant: message containing %q", got, want) + } +} + +func TestContext2Validate_invalidModuleRef(t *testing.T) { + // This test is verifying that we properly validate and report on references + // to modules that are not declared, since we were missing some validation + // here in early 0.12.0 alphas that led to a panic. + m := testModuleInline(t, map[string]string{ + "main.tf": ` +output "out" { + # Intentionally referencing undeclared module to ensure error + value = module.foo +}`, + }) + + p := testProvider("aws") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + diags := ctx.Validate(m) + if !diags.HasErrors() { + t.Fatal("succeeded; want errors") + } + // Should get this error: + // Reference to undeclared module: No module call named "foo" is declared in the root module. + if got, want := diags.Err().Error(), "Reference to undeclared module:"; !strings.Contains(got, want) { + t.Fatalf("wrong error:\ngot: %s\nwant: message containing %q", got, want) + } +} + +func TestContext2Validate_invalidModuleOutputRef(t *testing.T) { + // This test is verifying that we properly validate and report on references + // to modules that are not declared, since we were missing some validation + // here in early 0.12.0 alphas that led to a panic. + m := testModuleInline(t, map[string]string{ + "main.tf": ` +output "out" { + # Intentionally referencing undeclared module to ensure error + value = module.foo.bar +}`, + }) + + p := testProvider("aws") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + diags := ctx.Validate(m) + if !diags.HasErrors() { + t.Fatal("succeeded; want errors") + } + // Should get this error: + // Reference to undeclared module: No module call named "foo" is declared in the root module. + if got, want := diags.Err().Error(), "Reference to undeclared module:"; !strings.Contains(got, want) { + t.Fatalf("wrong error:\ngot: %s\nwant: message containing %q", got, want) + } +} + +func TestContext2Validate_invalidDependsOnResourceRef(t *testing.T) { + // This test is verifying that we raise an error if depends_on + // refers to something that doesn't exist in configuration. + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_instance" "bar" { + depends_on = [test_resource.nonexistant] +} +`, + }) + + p := testProvider("test") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + diags := ctx.Validate(m) + if !diags.HasErrors() { + t.Fatal("succeeded; want errors") + } + // Should get this error: + // Reference to undeclared module: No module call named "foo" is declared in the root module. + if got, want := diags.Err().Error(), "Reference to undeclared resource:"; !strings.Contains(got, want) { + t.Fatalf("wrong error:\ngot: %s\nwant: message containing %q", got, want) + } +} + +func TestContext2Validate_invalidResourceIgnoreChanges(t *testing.T) { + // This test is verifying that we raise an error if ignore_changes + // refers to something that can be statically detected as not conforming + // to the resource type schema. + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_instance" "bar" { + lifecycle { + ignore_changes = [does_not_exist_in_schema] + } +} +`, + }) + + p := testProvider("test") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + diags := ctx.Validate(m) + if !diags.HasErrors() { + t.Fatal("succeeded; want errors") + } + // Should get this error: + // Reference to undeclared module: No module call named "foo" is declared in the root module. + if got, want := diags.Err().Error(), `no argument, nested block, or exported attribute named "does_not_exist_in_schema"`; !strings.Contains(got, want) { + t.Fatalf("wrong error:\ngot: %s\nwant: message containing %q", got, want) + } +} + +func TestContext2Validate_variableCustomValidationsFail(t *testing.T) { + // This test is for custom validation rules associated with root module + // variables, and specifically that we handle the situation where the + // given value is invalid in a child module. + m := testModule(t, "validate-variable-custom-validations-child") + + p := testProvider("test") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + diags := ctx.Validate(m) + if !diags.HasErrors() { + t.Fatal("succeeded; want errors") + } + if got, want := diags.Err().Error(), `Invalid value for variable: Value must not be "nope".`; !strings.Contains(got, want) { + t.Fatalf("wrong error:\ngot: %s\nwant: message containing %q", got, want) + } +} + +func TestContext2Validate_variableCustomValidationsRoot(t *testing.T) { + // This test is for custom validation rules associated with root module + // variables, and specifically that we handle the situation where their + // values are unknown during validation, skipping the validation check + // altogether. (Root module variables are never known during validation.) + m := testModuleInline(t, map[string]string{ + "main.tf": ` +variable "test" { + type = string + + validation { + condition = var.test != "nope" + error_message = "Value must not be \"nope\"." + } +} +`, + }) + + p := testProvider("test") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + diags := ctx.Validate(m) + if diags.HasErrors() { + t.Fatalf("unexpected error\ngot: %s", diags.Err().Error()) + } +} + +func TestContext2Validate_expandModules(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +module "mod1" { + for_each = toset(["a", "b"]) + source = "./mod" +} + +module "mod2" { + for_each = module.mod1 + source = "./mod" + input = module.mod1["a"].out +} + +module "mod3" { + count = length(module.mod2) + source = "./mod" +} +`, + "mod/main.tf": ` +resource "aws_instance" "foo" { +} + +output "out" { + value = 1 +} + +variable "input" { + type = number + default = 0 +} + +module "nested" { + count = 2 + source = "./nested" + input = count.index +} +`, + "mod/nested/main.tf": ` +variable "input" { +} + +resource "aws_instance" "foo" { + count = var.input +} +`, + }) + + p := testProvider("aws") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + diags := ctx.Validate(m) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } +} + +func TestContext2Validate_expandModulesInvalidCount(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +module "mod1" { + count = -1 + source = "./mod" +} +`, + "mod/main.tf": ` +resource "aws_instance" "foo" { +} +`, + }) + + p := testProvider("aws") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + diags := ctx.Validate(m) + if !diags.HasErrors() { + t.Fatal("succeeded; want errors") + } + if got, want := diags.Err().Error(), `Invalid count argument`; !strings.Contains(got, want) { + t.Fatalf("wrong error:\ngot: %s\nwant: message containing %q", got, want) + } +} + +func TestContext2Validate_expandModulesInvalidForEach(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +module "mod1" { + for_each = ["a", "b"] + source = "./mod" +} +`, + "mod/main.tf": ` +resource "aws_instance" "foo" { +} +`, + }) + + p := testProvider("aws") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + diags := ctx.Validate(m) + if !diags.HasErrors() { + t.Fatal("succeeded; want errors") + } + if got, want := diags.Err().Error(), `Invalid for_each argument`; !strings.Contains(got, want) { + t.Fatalf("wrong error:\ngot: %s\nwant: message containing %q", got, want) + } +} + +func TestContext2Validate_expandMultipleNestedModules(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +module "modA" { + for_each = { + first = "m" + second = "n" + } + source = "./modA" +} +`, + "modA/main.tf": ` +locals { + m = { + first = "m" + second = "n" + } +} + +module "modB" { + for_each = local.m + source = "./modB" + y = each.value +} + +module "modC" { + for_each = local.m + source = "./modC" + x = module.modB[each.key].out + y = module.modB[each.key].out +} + +`, + "modA/modB/main.tf": ` +variable "y" { + type = string +} + +resource "aws_instance" "foo" { + foo = var.y +} + +output "out" { + value = aws_instance.foo.id +} +`, + "modA/modC/main.tf": ` +variable "x" { + type = string +} + +variable "y" { + type = string +} + +resource "aws_instance" "foo" { + foo = var.x +} + +output "out" { + value = var.y +} +`, + }) + + p := testProvider("aws") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + diags := ctx.Validate(m) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } +} + +func TestContext2Validate_invalidModuleDependsOn(t *testing.T) { + // validate module and output depends_on + m := testModuleInline(t, map[string]string{ + "main.tf": ` +module "mod1" { + source = "./mod" + depends_on = [resource_foo.bar.baz] +} + +module "mod2" { + source = "./mod" + depends_on = [resource_foo.bar.baz] +} +`, + "mod/main.tf": ` +output "out" { + value = "foo" +} +`, + }) + + diags := testContext2(t, &ContextOpts{}).Validate(m) + if !diags.HasErrors() { + t.Fatal("succeeded; want errors") + } + + if len(diags) != 2 { + t.Fatalf("wanted 2 diagnostic errors, got %q", diags) + } + + for _, d := range diags { + des := d.Description().Summary + if !strings.Contains(des, "Invalid depends_on reference") { + t.Fatalf(`expected "Invalid depends_on reference", got %q`, des) + } + } +} + +func TestContext2Validate_invalidOutputDependsOn(t *testing.T) { + // validate module and output depends_on + m := testModuleInline(t, map[string]string{ + "main.tf": ` +module "mod1" { + source = "./mod" +} + +output "out" { + value = "bar" + depends_on = [resource_foo.bar.baz] +} +`, + "mod/main.tf": ` +output "out" { + value = "bar" + depends_on = [resource_foo.bar.baz] +} +`, + }) + + diags := testContext2(t, &ContextOpts{}).Validate(m) + if !diags.HasErrors() { + t.Fatal("succeeded; want errors") + } + + if len(diags) != 2 { + t.Fatalf("wanted 2 diagnostic errors, got %q", diags) + } + + for _, d := range diags { + des := d.Description().Summary + if !strings.Contains(des, "Invalid depends_on reference") { + t.Fatalf(`expected "Invalid depends_on reference", got %q`, des) + } + } +} + +func TestContext2Validate_rpcDiagnostics(t *testing.T) { + // validate module and output depends_on + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_instance" "a" { +} +`, + }) + + p := testProvider("test") + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + }, + }, + } + + p.ValidateResourceConfigResponse = &providers.ValidateResourceConfigResponse{ + Diagnostics: tfdiags.Diagnostics(nil).Append(tfdiags.SimpleWarning("don't frobble")), + } + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + diags := ctx.Validate(m) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + if len(diags) == 0 { + t.Fatal("expected warnings") + } + + for _, d := range diags { + des := d.Description().Summary + if !strings.Contains(des, "frobble") { + t.Fatalf(`expected frobble, got %q`, des) + } + } +} + +func TestContext2Validate_sensitiveProvisionerConfig(t *testing.T) { + m := testModule(t, "validate-sensitive-provisioner-config") + p := testProvider("aws") + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "aws_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + + pr := simpleMockProvisioner() + + c := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + Provisioners: map[string]provisioners.Factory{ + "test": testProvisionerFuncFixed(pr), + }, + }) + + pr.ValidateProvisionerConfigFn = func(r provisioners.ValidateProvisionerConfigRequest) provisioners.ValidateProvisionerConfigResponse { + if r.Config.ContainsMarked() { + t.Errorf("provisioner config contains marked values") + } + return pr.ValidateProvisionerConfigResponse + } + + diags := c.Validate(m) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err()) + } + if !pr.ValidateProvisionerConfigCalled { + t.Fatal("ValidateProvisionerConfig not called") + } +} + +func TestContext2Plan_validateMinMaxDynamicBlock(t *testing.T) { + p := new(MockProvider) + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_instance": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "things": { + Type: cty.List(cty.String), + Computed: true, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "foo": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "bar": {Type: cty.String, Optional: true}, + }, + }, + Nesting: configschema.NestingList, + MinItems: 2, + MaxItems: 3, + }, + }, + }, + }, + }) + + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_instance" "a" { + // MinItems 2 + foo { + bar = "a" + } + foo { + bar = "b" + } +} + +resource "test_instance" "b" { + // one dymamic block can satisfy MinItems of 2 + dynamic "foo" { + for_each = test_instance.a.things + content { + bar = foo.value + } + } +} + +resource "test_instance" "c" { + // we may have more than MaxItems dynamic blocks when they are unknown + foo { + bar = "b" + } + dynamic "foo" { + for_each = test_instance.a.things + content { + bar = foo.value + } + } + dynamic "foo" { + for_each = test_instance.a.things + content { + bar = "${foo.value}-2" + } + } + dynamic "foo" { + for_each = test_instance.b.things + content { + bar = foo.value + } + } +} +`}) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + diags := ctx.Validate(m) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } +} + +func TestContext2Validate_passInheritedProvider(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +terraform { + required_providers { + test = { + source = "hashicorp/test" + } + } +} + +module "first" { + source = "./first" + providers = { + test = test + } +} +`, + + // This module does not define a config for the test provider, but we + // should be able to pass whatever the implied config is to a child + // module. + "first/main.tf": ` +terraform { + required_providers { + test = { + source = "hashicorp/test" + } + } +} + +module "second" { + source = "./second" + providers = { + test.alias = test + } +}`, + + "first/second/main.tf": ` +terraform { + required_providers { + test = { + source = "hashicorp/test" + configuration_aliases = [test.alias] + } + } +} + +resource "test_object" "t" { + provider = test.alias +} +`, + }) + + p := simpleMockProvider() + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + diags := ctx.Validate(m) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } +} + +func TestContext2Plan_lookupMismatchedObjectTypes(t *testing.T) { + p := new(MockProvider) + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_instance": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "things": { + Type: cty.List(cty.String), + Optional: true, + }, + }, + }, + }, + }) + + m := testModuleInline(t, map[string]string{ + "main.tf": ` +variable "items" { + type = list(string) + default = [] +} + +resource "test_instance" "a" { + for_each = length(var.items) > 0 ? { default = {} } : {} +} + +output "out" { + // Strictly speaking, this expression is incorrect because the map element + // type is a different type from the default value, and the lookup + // implementation expects to be able to convert the default to match the + // element type. + // There are two reasons this works which we need to maintain for + // compatibility. First during validation the 'test_instance.a' expression + // only returns a dynamic value, preventing any type comparison. Later during + // plan and apply 'test_instance.a' is an object and not a map, and the + // lookup implementation skips the type comparison when the keys are known + // statically. + value = lookup(test_instance.a, "default", { id = null })["id"] +} +`}) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + diags := ctx.Validate(m) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } +} + +func TestContext2Validate_nonNullableVariableDefaultValidation(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` + module "first" { + source = "./mod" + input = null + } + `, + + "mod/main.tf": ` + variable "input" { + type = string + default = "default" + nullable = false + + // Validation expressions should receive the default with nullable=false and + // a null input. + validation { + condition = var.input != null + error_message = "Input cannot be null!" + } + } + `, + }) + + ctx := testContext2(t, &ContextOpts{}) + + diags := ctx.Validate(m) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } +} + +func TestContext2Validate_precondition_good(t *testing.T) { + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + }) + m := testModuleInline(t, map[string]string{ + "main.tf": ` +variable "input" { + type = string + default = "foo" +} + +resource "aws_instance" "test" { + foo = var.input + + lifecycle { + precondition { + condition = length(var.input) > 0 + error_message = "Input cannot be empty." + } + } +} + `, + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + diags := ctx.Validate(m) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } +} + +func TestContext2Validate_precondition_badCondition(t *testing.T) { + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + }) + m := testModuleInline(t, map[string]string{ + "main.tf": ` +variable "input" { + type = string + default = "foo" +} + +resource "aws_instance" "test" { + foo = var.input + + lifecycle { + precondition { + condition = length(one(var.input)) == 1 + error_message = "You can't do that." + } + } +} + `, + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + diags := ctx.Validate(m) + if !diags.HasErrors() { + t.Fatalf("succeeded; want error") + } + if got, want := diags.Err().Error(), "Invalid function argument"; !strings.Contains(got, want) { + t.Errorf("unexpected error.\ngot: %s\nshould contain: %q", got, want) + } +} + +func TestContext2Validate_precondition_badErrorMessage(t *testing.T) { + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + }) + m := testModuleInline(t, map[string]string{ + "main.tf": ` +variable "input" { + type = string + default = "foo" +} + +resource "aws_instance" "test" { + foo = var.input + + lifecycle { + precondition { + condition = var.input != "foo" + error_message = "This is a bad use of a function: ${one(var.input)}." + } + } +} + `, + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + diags := ctx.Validate(m) + if !diags.HasErrors() { + t.Fatalf("succeeded; want error") + } + if got, want := diags.Err().Error(), "Invalid function argument"; !strings.Contains(got, want) { + t.Errorf("unexpected error.\ngot: %s\nshould contain: %q", got, want) + } +} + +func TestContext2Validate_postcondition_good(t *testing.T) { + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + }) + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "aws_instance" "test" { + foo = "foo" + + lifecycle { + postcondition { + condition = length(self.foo) > 0 + error_message = "Input cannot be empty." + } + } +} + `, + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + diags := ctx.Validate(m) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } +} + +func TestContext2Validate_postcondition_badCondition(t *testing.T) { + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + }) + // This postcondition's condition expression does not refer to self, which + // is unrealistic. This is because at the time of writing the test, self is + // always an unknown value of dynamic type during validation. As a result, + // validation of conditions which refer to resource arguments is not + // possible until plan time. For now we exercise the code by referring to + // an input variable. + m := testModuleInline(t, map[string]string{ + "main.tf": ` +variable "input" { + type = string + default = "foo" +} + +resource "aws_instance" "test" { + foo = var.input + + lifecycle { + postcondition { + condition = length(one(var.input)) == 1 + error_message = "You can't do that." + } + } +} + `, + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + diags := ctx.Validate(m) + if !diags.HasErrors() { + t.Fatalf("succeeded; want error") + } + if got, want := diags.Err().Error(), "Invalid function argument"; !strings.Contains(got, want) { + t.Errorf("unexpected error.\ngot: %s\nshould contain: %q", got, want) + } +} + +func TestContext2Validate_postcondition_badErrorMessage(t *testing.T) { + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + }) + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "aws_instance" "test" { + foo = "foo" + + lifecycle { + postcondition { + condition = self.foo != "foo" + error_message = "This is a bad use of a function: ${one("foo")}." + } + } +} + `, + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + diags := ctx.Validate(m) + if !diags.HasErrors() { + t.Fatalf("succeeded; want error") + } + if got, want := diags.Err().Error(), "Invalid function argument"; !strings.Contains(got, want) { + t.Errorf("unexpected error.\ngot: %s\nshould contain: %q", got, want) + } +} + +func TestContext2Validate_precondition_count(t *testing.T) { + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + }) + m := testModuleInline(t, map[string]string{ + "main.tf": ` +locals { + foos = ["bar", "baz"] +} + +resource "aws_instance" "test" { + count = 3 + foo = local.foos[count.index] + + lifecycle { + precondition { + condition = count.index < length(local.foos) + error_message = "Insufficient foos." + } + } +} + `, + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + diags := ctx.Validate(m) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } +} + +func TestContext2Validate_postcondition_forEach(t *testing.T) { + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + }) + m := testModuleInline(t, map[string]string{ + "main.tf": ` +locals { + foos = toset(["bar", "baz", "boop"]) +} + +resource "aws_instance" "test" { + for_each = local.foos + foo = "foo" + + lifecycle { + postcondition { + condition = length(each.value) == 3 + error_message = "Short foo required, not \"${each.key}\"." + } + } +} + `, + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + diags := ctx.Validate(m) + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } +} + +func TestContext2Validate_deprecatedAttr(t *testing.T) { + p := testProvider("aws") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "aws_instance": { + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true, Deprecated: true}, + }, + }, + }, + }) + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "aws_instance" "test" { +} +locals { + deprecated = aws_instance.test.foo +} + + `, + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + diags := ctx.Validate(m) + warn := diags.ErrWithWarnings().Error() + if !strings.Contains(warn, `The attribute "foo" is deprecated`) { + t.Fatalf("expected deprecated warning, got: %q\n", warn) + } +} diff --git a/pkg/tofu/context_walk.go b/pkg/tofu/context_walk.go new file mode 100644 index 00000000000..c480dc091e7 --- /dev/null +++ b/pkg/tofu/context_walk.go @@ -0,0 +1,158 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "log" + "time" + + "github.com/kubegems/opentofu/pkg/checks" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/instances" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/refactoring" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// graphWalkOpts captures some transient values we use (and possibly mutate) +// during a graph walk. +// +// The way these options get used unfortunately varies between the different +// walkOperation types. This is a historical design wart that dates back to +// us using the same graph structure for all operations; hopefully we'll +// make the necessary differences between the walk types more explicit someday. +type graphWalkOpts struct { + InputState *states.State + Changes *plans.Changes + Config *configs.Config + + // PlanTimeCheckResults should be populated during the apply phase with + // the snapshot of check results that was generated during the plan step. + // + // This then propagates the decisions about which checkable objects exist + // from the plan phase into the apply phase without having to re-compute + // the module and resource expansion. + PlanTimeCheckResults *states.CheckResults + + // PlanTimeTimestamp should be populated during the plan phase by retrieving + // the current UTC timestamp, and should be read from the plan file during + // the apply phase. + PlanTimeTimestamp time.Time + + MoveResults refactoring.MoveResults +} + +func (c *Context) walk(graph *Graph, operation walkOperation, opts *graphWalkOpts) (*ContextGraphWalker, tfdiags.Diagnostics) { + log.Printf("[DEBUG] Starting graph walk: %s", operation.String()) + + walker := c.graphWalker(operation, opts) + + // Watch for a stop so we can call the provider Stop() API. + watchStop, watchWait := c.watchStop(walker) + + // Walk the real graph, this will block until it completes + diags := graph.Walk(walker) + + // Close the channel so the watcher stops, and wait for it to return. + close(watchStop) + <-watchWait + + return walker, diags +} + +func (c *Context) graphWalker(operation walkOperation, opts *graphWalkOpts) *ContextGraphWalker { + var state *states.SyncState + var refreshState *states.SyncState + var prevRunState *states.SyncState + + // NOTE: None of the SyncState objects must directly wrap opts.InputState, + // because we use those to mutate the state object and opts.InputState + // belongs to our caller and thus we must treat it as immutable. + // + // To account for that, most of our SyncState values created below end up + // wrapping a _deep copy_ of opts.InputState instead. + inputState := opts.InputState + if inputState == nil { + // Lots of callers use nil to represent the "empty" case where we've + // not run Apply yet, so we tolerate that. + inputState = states.NewState() + } + + switch operation { + case walkValidate: + // validate should not use any state + state = states.NewState().SyncWrapper() + + // validate currently uses the plan graph, so we have to populate the + // refreshState and the prevRunState. + refreshState = states.NewState().SyncWrapper() + prevRunState = states.NewState().SyncWrapper() + + case walkPlan, walkPlanDestroy, walkImport: + state = inputState.DeepCopy().SyncWrapper() + refreshState = inputState.DeepCopy().SyncWrapper() + prevRunState = inputState.DeepCopy().SyncWrapper() + + // For both of our new states we'll discard the previous run's + // check results, since we can still refer to them from the + // prevRunState object if we need to. + state.DiscardCheckResults() + refreshState.DiscardCheckResults() + + default: + state = inputState.DeepCopy().SyncWrapper() + // Only plan-like walks use refreshState and prevRunState + + // Discard the input state's check results, because we should create + // a new set as a result of the graph walk. + state.DiscardCheckResults() + } + + changes := opts.Changes + if changes == nil { + // Several of our non-plan walks end up sharing codepaths with the + // plan walk and thus expect to generate planned changes even though + // we don't care about them. To avoid those crashing, we'll just + // insert a placeholder changes object which'll get discarded + // afterwards. + changes = plans.NewChanges() + } + + if opts.Config == nil { + panic("Context.graphWalker call without Config") + } + + checkState := checks.NewState(opts.Config) + if opts.PlanTimeCheckResults != nil { + // We'll re-report all of the same objects we determined during the + // plan phase so that we can repeat the checks during the apply + // phase to finalize them. + for _, configElem := range opts.PlanTimeCheckResults.ConfigResults.Elems { + if configElem.Value.ObjectAddrsKnown() { + configAddr := configElem.Key + checkState.ReportCheckableObjects(configAddr, configElem.Value.ObjectResults.Keys()) + } + } + } + + return &ContextGraphWalker{ + Context: c, + State: state, + Config: opts.Config, + RefreshState: refreshState, + PrevRunState: prevRunState, + Changes: changes.SyncWrapper(), + Checks: checkState, + InstanceExpander: instances.NewExpander(), + MoveResults: opts.MoveResults, + ImportResolver: NewImportResolver(), + Operation: operation, + StopContext: c.runContext, + PlanTimestamp: opts.PlanTimeTimestamp, + Encryption: c.encryption, + } +} diff --git a/pkg/tofu/diagnostics.go b/pkg/tofu/diagnostics.go new file mode 100644 index 00000000000..6a6e922d9f4 --- /dev/null +++ b/pkg/tofu/diagnostics.go @@ -0,0 +1,47 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// This file contains some package-local helpers for working with diagnostics. +// For the main diagnostics API, see the separate "tfdiags" package. + +// diagnosticCausedByUnknown is an implementation of +// tfdiags.DiagnosticExtraBecauseUnknown which we can use in the "Extra" field +// of a diagnostic to indicate that the problem was caused by unknown values +// being involved in an expression evaluation. +// +// When using this, set the Extra to diagnosticCausedByUnknown(true) and also +// populate the EvalContext and Expression fields of the diagnostic so that +// the diagnostic renderer can use all of that information together to assist +// the user in understanding what was unknown. +type diagnosticCausedByUnknown bool + +var _ tfdiags.DiagnosticExtraBecauseUnknown = diagnosticCausedByUnknown(true) + +func (e diagnosticCausedByUnknown) DiagnosticCausedByUnknown() bool { + return bool(e) +} + +// diagnosticCausedBySensitive is an implementation of +// tfdiags.DiagnosticExtraBecauseSensitive which we can use in the "Extra" field +// of a diagnostic to indicate that the problem was caused by sensitive values +// being involved in an expression evaluation. +// +// When using this, set the Extra to diagnosticCausedBySensitive(true) and also +// populate the EvalContext and Expression fields of the diagnostic so that +// the diagnostic renderer can use all of that information together to assist +// the user in understanding what was sensitive. +type diagnosticCausedBySensitive bool + +var _ tfdiags.DiagnosticExtraBecauseSensitive = diagnosticCausedBySensitive(true) + +func (e diagnosticCausedBySensitive) DiagnosticCausedBySensitive() bool { + return bool(e) +} diff --git a/pkg/tofu/eval_conditions.go b/pkg/tofu/eval_conditions.go new file mode 100644 index 00000000000..dbaf1d4ae20 --- /dev/null +++ b/pkg/tofu/eval_conditions.go @@ -0,0 +1,279 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/checks" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/instances" + "github.com/kubegems/opentofu/pkg/lang" + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// evalCheckRules ensures that all of the given check rules pass against +// the given HCL evaluation context. +// +// If any check rules produce an unknown result then they will be silently +// ignored on the assumption that the same checks will be run again later +// with fewer unknown values in the EvalContext. +// +// If any of the rules do not pass, the returned diagnostics will contain +// errors. Otherwise, it will either be empty or contain only warnings. +func evalCheckRules(typ addrs.CheckRuleType, rules []*configs.CheckRule, ctx EvalContext, self addrs.Checkable, keyData instances.RepetitionData, diagSeverity tfdiags.Severity) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + checkState := ctx.Checks() + if !checkState.ConfigHasChecks(self.ConfigCheckable()) { + // We have nothing to do if this object doesn't have any checks, + // but the "rules" slice should agree that we don't. + if ct := len(rules); ct != 0 { + panic(fmt.Sprintf("check state says that %s should have no rules, but it has %d", self, ct)) + } + return diags + } + + if len(rules) == 0 { + // Nothing to do + return nil + } + + severity := diagSeverity.ToHCL() + + for i, rule := range rules { + result, ruleDiags := evalCheckRule(addrs.NewCheckRule(self, typ, i), rule, ctx, keyData, severity) + diags = diags.Append(ruleDiags) + + log.Printf("[TRACE] evalCheckRules: %s status is now %s", self, result.Status) + if result.Status == checks.StatusFail { + checkState.ReportCheckFailure(self, typ, i, result.FailureMessage) + } else { + checkState.ReportCheckResult(self, typ, i, result.Status) + } + } + + return diags +} + +type checkResult struct { + Status checks.Status + FailureMessage string +} + +func validateCheckRule(addr addrs.CheckRule, rule *configs.CheckRule, ctx EvalContext, keyData instances.RepetitionData) (string, *hcl.EvalContext, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + refs, moreDiags := lang.ReferencesInExpr(addrs.ParseRef, rule.Condition) + diags = diags.Append(moreDiags) + moreRefs, moreDiags := lang.ReferencesInExpr(addrs.ParseRef, rule.ErrorMessage) + diags = diags.Append(moreDiags) + refs = append(refs, moreRefs...) + + var selfReference, sourceReference addrs.Referenceable + switch addr.Type { + case addrs.ResourcePostcondition: + switch s := addr.Container.(type) { + case addrs.AbsResourceInstance: + // Only resource postconditions can refer to self + selfReference = s.Resource + default: + panic(fmt.Sprintf("Invalid self reference type %t", addr.Container)) + } + case addrs.CheckAssertion: + switch s := addr.Container.(type) { + case addrs.AbsCheck: + // Only check blocks have scoped resources so need to specify their + // source. + sourceReference = s.Check + default: + panic(fmt.Sprintf("Invalid source reference type %t", addr.Container)) + } + } + scope := ctx.EvaluationScope(selfReference, sourceReference, keyData) + + hclCtx, moreDiags := scope.EvalContext(refs) + diags = diags.Append(moreDiags) + + errorMessage, moreDiags := evalCheckErrorMessage(rule.ErrorMessage, hclCtx) + diags = diags.Append(moreDiags) + + return errorMessage, hclCtx, diags +} + +func evalCheckRule(addr addrs.CheckRule, rule *configs.CheckRule, ctx EvalContext, keyData instances.RepetitionData, severity hcl.DiagnosticSeverity) (checkResult, tfdiags.Diagnostics) { + // NOTE: Intentionally not passing the caller's selected severity in here, + // because this reports errors in the configuration itself, not the failure + // of an otherwise-valid condition. + errorMessage, hclCtx, diags := validateCheckRule(addr, rule, ctx, keyData) + + const errInvalidCondition = "Invalid condition result" + + resultVal, hclDiags := rule.Condition.Value(hclCtx) + diags = diags.Append(hclDiags) + + if diags.HasErrors() { + log.Printf("[TRACE] evalCheckRule: %s: %s", addr.Type, diags.Err().Error()) + return checkResult{Status: checks.StatusError}, diags + } + + if !resultVal.IsKnown() { + + // Check assertions warn if a status is unknown. + if addr.Type == addrs.CheckAssertion { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: fmt.Sprintf("%s known after apply", addr.Type.Description()), + Detail: "The condition could not be evaluated at this time, a result will be known when this plan is applied.", + Subject: rule.Condition.Range().Ptr(), + Expression: rule.Condition, + EvalContext: hclCtx, + Extra: &addrs.CheckRuleDiagnosticExtra{ + CheckRule: addr, + }, + }) + } + + // We'll wait until we've learned more, then. + return checkResult{Status: checks.StatusUnknown}, diags + } + if resultVal.IsNull() { + // NOTE: Intentionally not passing the caller's selected severity in here, + // because this reports errors in the configuration itself, not the failure + // of an otherwise-valid condition. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: errInvalidCondition, + Detail: "Condition expression must return either true or false, not null.", + Subject: rule.Condition.Range().Ptr(), + Expression: rule.Condition, + EvalContext: hclCtx, + }) + return checkResult{Status: checks.StatusError}, diags + } + var err error + resultVal, err = convert.Convert(resultVal, cty.Bool) + if err != nil { + // NOTE: Intentionally not passing the caller's selected severity in here, + // because this reports errors in the configuration itself, not the failure + // of an otherwise-valid condition. + detail := fmt.Sprintf("Invalid condition result value: %s.", tfdiags.FormatError(err)) + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: errInvalidCondition, + Detail: detail, + Subject: rule.Condition.Range().Ptr(), + Expression: rule.Condition, + EvalContext: hclCtx, + }) + return checkResult{Status: checks.StatusError}, diags + } + + // The condition result may be marked if the expression refers to a + // sensitive value. + resultVal, _ = resultVal.Unmark() + + status := checks.StatusForCtyValue(resultVal) + + if status != checks.StatusFail { + return checkResult{Status: status}, diags + } + + errorMessageForDiags := errorMessage + if errorMessageForDiags == "" { + errorMessageForDiags = "This check failed, but has an invalid error message as described in the other accompanying messages." + } + diags = diags.Append(&hcl.Diagnostic{ + // The caller gets to choose the severity of this one, because we + // treat condition failures as warnings in the presence of + // certain special planning options. + Severity: severity, + Summary: fmt.Sprintf("%s failed", addr.Type.Description()), + Detail: errorMessageForDiags, + Subject: rule.Condition.Range().Ptr(), + Expression: rule.Condition, + EvalContext: hclCtx, + Extra: &addrs.CheckRuleDiagnosticExtra{ + CheckRule: addr, + }, + }) + + return checkResult{ + Status: status, + FailureMessage: errorMessage, + }, diags +} + +// evalCheckErrorMessage makes a best effort to evaluate the given expression, +// as an error message string. +// +// It will either return a non-empty message string or it'll return diagnostics +// with either errors or warnings that explain why the given expression isn't +// acceptable. +func evalCheckErrorMessage(expr hcl.Expression, hclCtx *hcl.EvalContext) (string, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + val, hclDiags := expr.Value(hclCtx) + diags = diags.Append(hclDiags) + if hclDiags.HasErrors() { + return "", diags + } + + val, err := convert.Convert(val, cty.String) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid error message", + Detail: fmt.Sprintf("Unsuitable value for error message: %s.", tfdiags.FormatError(err)), + Subject: expr.Range().Ptr(), + Expression: expr, + EvalContext: hclCtx, + }) + return "", diags + } + if !val.IsKnown() { + return "", diags + } + if val.IsNull() { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid error message", + Detail: "Unsuitable value for error message: must not be null.", + Subject: expr.Range().Ptr(), + Expression: expr, + EvalContext: hclCtx, + }) + return "", diags + } + + val, valMarks := val.Unmark() + if _, sensitive := valMarks[marks.Sensitive]; sensitive { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "Error message refers to sensitive values", + Detail: `The error expression used to explain this condition refers to sensitive values, so OpenTofu will not display the resulting message. + +You can correct this by removing references to sensitive values, or by carefully using the nonsensitive() function if the expression will not reveal the sensitive data.`, + Subject: expr.Range().Ptr(), + Expression: expr, + EvalContext: hclCtx, + }) + return "", diags + } + + // NOTE: We've discarded any other marks the string might have been carrying, + // aside from the sensitive mark. + + return strings.TrimSpace(val.AsString()), diags +} diff --git a/pkg/tofu/eval_context.go b/pkg/tofu/eval_context.go new file mode 100644 index 00000000000..e1fefbe86cf --- /dev/null +++ b/pkg/tofu/eval_context.go @@ -0,0 +1,221 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/checks" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/instances" + "github.com/kubegems/opentofu/pkg/lang" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/provisioners" + "github.com/kubegems/opentofu/pkg/refactoring" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +// EvalContext is the interface that is given to eval nodes to execute. +type EvalContext interface { + // Stopped returns a channel that is closed when evaluation is stopped + // via tofu.Context.Stop() + Stopped() <-chan struct{} + + // Path is the current module path. + Path() addrs.ModuleInstance + + // Hook is used to call hook methods. The callback is called for each + // hook and should return the hook action to take and the error. + Hook(func(Hook) (HookAction, error)) error + + // Input is the UIInput object for interacting with the UI. + Input() UIInput + + // InitProvider initializes the provider with the given address, and returns + // the implementation of the resource provider or an error. + // + // It is an error to initialize the same provider more than once. This + // method will panic if the module instance address of the given provider + // configuration does not match the Path() of the EvalContext. + InitProvider(addr addrs.AbsProviderConfig) (providers.Interface, error) + + // Provider gets the provider instance with the given address (already + // initialized) or returns nil if the provider isn't initialized. + // + // This method expects an _absolute_ provider configuration address, since + // resources in one module are able to use providers from other modules. + // InitProvider must've been called on the EvalContext of the module + // that owns the given provider before calling this method. + Provider(addrs.AbsProviderConfig) providers.Interface + + // ProviderSchema retrieves the schema for a particular provider, which + // must have already been initialized with InitProvider. + // + // This method expects an _absolute_ provider configuration address, since + // resources in one module are able to use providers from other modules. + ProviderSchema(addrs.AbsProviderConfig) (providers.ProviderSchema, error) + + // CloseProvider closes provider connections that aren't needed anymore. + // + // This method will panic if the module instance address of the given + // provider configuration does not match the Path() of the EvalContext. + CloseProvider(addrs.AbsProviderConfig) error + + // ConfigureProvider configures the provider with the given + // configuration. This is a separate context call because this call + // is used to store the provider configuration for inheritance lookups + // with ParentProviderConfig(). + // + // This method will panic if the module instance address of the given + // provider configuration does not match the Path() of the EvalContext. + ConfigureProvider(addrs.AbsProviderConfig, cty.Value) tfdiags.Diagnostics + + // ProviderInput and SetProviderInput are used to configure providers + // from user input. + // + // These methods will panic if the module instance address of the given + // provider configuration does not match the Path() of the EvalContext. + ProviderInput(addrs.AbsProviderConfig) map[string]cty.Value + SetProviderInput(addrs.AbsProviderConfig, map[string]cty.Value) + + // Provisioner gets the provisioner instance with the given name. + Provisioner(string) (provisioners.Interface, error) + + // ProvisionerSchema retrieves the main configuration schema for a + // particular provisioner, which must have already been initialized with + // InitProvisioner. + ProvisionerSchema(string) (*configschema.Block, error) + + // CloseProvisioner closes all provisioner plugins. + CloseProvisioners() error + + // EvaluateBlock takes the given raw configuration block and associated + // schema and evaluates it to produce a value of an object type that + // conforms to the implied type of the schema. + // + // The "self" argument is optional. If given, it is the referenceable + // address that the name "self" should behave as an alias for when + // evaluating. Set this to nil if the "self" object should not be available. + // + // The "key" argument is also optional. If given, it is the instance key + // of the current object within the multi-instance container it belongs + // to. For example, on a resource block with "count" set this should be + // set to a different addrs.IntKey for each instance created from that + // block. Set this to addrs.NoKey if not appropriate. + // + // The returned body is an expanded version of the given body, with any + // "dynamic" blocks replaced with zero or more static blocks. This can be + // used to extract correct source location information about attributes of + // the returned object value. + EvaluateBlock(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) + + // EvaluateExpr takes the given HCL expression and evaluates it to produce + // a value. + // + // The "self" argument is optional. If given, it is the referenceable + // address that the name "self" should behave as an alias for when + // evaluating. Set this to nil if the "self" object should not be available. + EvaluateExpr(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) + + // EvaluateReplaceTriggeredBy takes the raw reference expression from the + // config, and returns the evaluated *addrs.Reference along with a boolean + // indicating if that reference forces replacement. + EvaluateReplaceTriggeredBy(expr hcl.Expression, repData instances.RepetitionData) (*addrs.Reference, bool, tfdiags.Diagnostics) + + // EvaluationScope returns a scope that can be used to evaluate reference + // addresses in this context. + EvaluationScope(self addrs.Referenceable, source addrs.Referenceable, keyData InstanceKeyEvalData) *lang.Scope + + // SetRootModuleArgument defines the value for one variable of the root + // module. The caller must ensure that given value is a suitable + // "final value" for the variable, which means that it's already converted + // and validated to match any configured constraints and validation rules. + // + // Calling this function multiple times with the same variable address + // will silently overwrite the value provided by a previous call. + SetRootModuleArgument(addrs.InputVariable, cty.Value) + + // SetModuleCallArgument defines the value for one input variable of a + // particular child module call. The caller must ensure that the given + // value is a suitable "final value" for the variable, which means that + // it's already converted and validated to match any configured + // constraints and validation rules. + // + // Calling this function multiple times with the same variable address + // will silently overwrite the value provided by a previous call. + SetModuleCallArgument(addrs.ModuleCallInstance, addrs.InputVariable, cty.Value) + + // GetVariableValue returns the value provided for the input variable with + // the given address, or cty.DynamicVal if the variable hasn't been assigned + // a value yet. + // + // Most callers should deal with variable values only indirectly via + // EvaluationScope and the other expression evaluation functions, but + // this is provided because variables tend to be evaluated outside of + // the context of the module they belong to and so we sometimes need to + // override the normal expression evaluation behavior. + GetVariableValue(addr addrs.AbsInputVariableInstance) cty.Value + + // Changes returns the writer object that can be used to write new proposed + // changes into the global changes set. + Changes() *plans.ChangesSync + + // State returns a wrapper object that provides safe concurrent access to + // the global state. + State() *states.SyncState + + // Checks returns the object that tracks the state of any custom checks + // declared in the configuration. + Checks() *checks.State + + // RefreshState returns a wrapper object that provides safe concurrent + // access to the state used to store the most recently refreshed resource + // values. + RefreshState() *states.SyncState + + // PrevRunState returns a wrapper object that provides safe concurrent + // access to the state which represents the result of the previous run, + // updated only so that object data conforms to current schemas for + // meaningful comparison with RefreshState. + PrevRunState() *states.SyncState + + // InstanceExpander returns a helper object for tracking the expansion of + // graph nodes during the plan phase in response to "count" and "for_each" + // arguments. + // + // The InstanceExpander is a global object that is shared across all of the + // EvalContext objects for a given configuration. + InstanceExpander() *instances.Expander + + // MoveResults returns a map describing the results of handling any + // resource instance move statements prior to the graph walk, so that + // the graph walk can then record that information appropriately in other + // artifacts produced by the graph walk. + // + // This data structure is created prior to the graph walk and read-only + // thereafter, so callers must not modify the returned map or any other + // objects accessible through it. + MoveResults() refactoring.MoveResults + + // ImportResolver returns a helper object for tracking the resolution of + // imports, after evaluating the dynamic address and ID of the import targets + // + // This data is created during the graph walk, as import target addresses are being resolved + // Its primary use is for validation at the end of a plan - To make sure all imports have been satisfied + // and have a configuration + ImportResolver() *ImportResolver + + // WithPath returns a copy of the context with the internal path set to the + // path argument. + WithPath(path addrs.ModuleInstance) EvalContext + + // Returns the currently configured encryption setup + GetEncryption() encryption.Encryption +} diff --git a/pkg/tofu/eval_context_builtin.go b/pkg/tofu/eval_context_builtin.go new file mode 100644 index 00000000000..ccd79d31df1 --- /dev/null +++ b/pkg/tofu/eval_context_builtin.go @@ -0,0 +1,529 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "context" + "fmt" + "log" + "sync" + + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/checks" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/instances" + "github.com/kubegems/opentofu/pkg/lang" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/provisioners" + "github.com/kubegems/opentofu/pkg/refactoring" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/kubegems/opentofu/version" +) + +// BuiltinEvalContext is an EvalContext implementation that is used by +// OpenTofu by default. +type BuiltinEvalContext struct { + // StopContext is the context used to track whether we're complete + StopContext context.Context + + // PathValue is the Path that this context is operating within. + PathValue addrs.ModuleInstance + + // pathSet indicates that this context was explicitly created for a + // specific path, and can be safely used for evaluation. This lets us + // differentiate between PathValue being unset, and the zero value which is + // equivalent to RootModuleInstance. Path and Evaluation methods will + // panic if this is not set. + pathSet bool + + // Evaluator is used for evaluating expressions within the scope of this + // eval context. + Evaluator *Evaluator + + VariableValuesLock *sync.Mutex + // VariableValues contains the variable values across all modules. This + // structure is shared across the entire containing context, and so it + // may be accessed only when holding VariableValuesLock. + // The keys of the first level of VariableValues are the string + // representations of addrs.ModuleInstance values. The second-level keys + // are variable names within each module instance. + VariableValues map[string]map[string]cty.Value + + // Plugins is a library of plugin components (providers and provisioners) + // available for use during a graph walk. + Plugins *contextPlugins + + Hooks []Hook + InputValue UIInput + + ProviderLock *sync.Mutex + ProviderCache map[string]providers.Interface + ProviderInputConfig map[string]map[string]cty.Value + + ProvisionerLock *sync.Mutex + ProvisionerCache map[string]provisioners.Interface + + ChangesValue *plans.ChangesSync + StateValue *states.SyncState + ChecksValue *checks.State + RefreshStateValue *states.SyncState + PrevRunStateValue *states.SyncState + InstanceExpanderValue *instances.Expander + MoveResultsValue refactoring.MoveResults + ImportResolverValue *ImportResolver + Encryption encryption.Encryption +} + +// BuiltinEvalContext implements EvalContext +var _ EvalContext = (*BuiltinEvalContext)(nil) + +func (ctx *BuiltinEvalContext) WithPath(path addrs.ModuleInstance) EvalContext { + newCtx := *ctx + newCtx.pathSet = true + newCtx.PathValue = path + return &newCtx +} + +func (ctx *BuiltinEvalContext) Stopped() <-chan struct{} { + // This can happen during tests. During tests, we just block forever. + if ctx.StopContext == nil { + return nil + } + + return ctx.StopContext.Done() +} + +func (ctx *BuiltinEvalContext) Hook(fn func(Hook) (HookAction, error)) error { + for _, h := range ctx.Hooks { + action, err := fn(h) + if err != nil { + return err + } + + switch action { + case HookActionContinue: + continue + case HookActionHalt: + // Return an early exit error to trigger an early exit + log.Printf("[WARN] Early exit triggered by hook: %T", h) + return nil + } + } + + return nil +} + +func (ctx *BuiltinEvalContext) Input() UIInput { + return ctx.InputValue +} + +func (ctx *BuiltinEvalContext) InitProvider(addr addrs.AbsProviderConfig) (providers.Interface, error) { + ctx.ProviderLock.Lock() + defer ctx.ProviderLock.Unlock() + + key := addr.String() + + // If we have already initialized, it is an error + if _, ok := ctx.ProviderCache[key]; ok { + return nil, fmt.Errorf("%s is already initialized", addr) + } + + p, err := ctx.Plugins.NewProviderInstance(addr.Provider) + if err != nil { + return nil, err + } + + if ctx.Evaluator != nil && ctx.Evaluator.Config != nil && ctx.Evaluator.Config.Module != nil { + // If an aliased provider is mocked, we use providerForTest wrapper. + // We cannot wrap providers.Factory itself, because factories don't support aliases. + pc, ok := ctx.Evaluator.Config.Module.GetProviderConfig(addr.Provider.Type, addr.Alias) + if ok && pc.IsMocked { + p, err = newProviderForTest(p, pc.MockResources) + if err != nil { + return nil, err + } + } + } + + log.Printf("[TRACE] BuiltinEvalContext: Initialized %q provider for %s", addr.String(), addr) + ctx.ProviderCache[key] = p + + return p, nil +} + +func (ctx *BuiltinEvalContext) Provider(addr addrs.AbsProviderConfig) providers.Interface { + ctx.ProviderLock.Lock() + defer ctx.ProviderLock.Unlock() + + return ctx.ProviderCache[addr.String()] +} + +func (ctx *BuiltinEvalContext) ProviderSchema(addr addrs.AbsProviderConfig) (providers.ProviderSchema, error) { + return ctx.Plugins.ProviderSchema(addr.Provider) +} + +func (ctx *BuiltinEvalContext) CloseProvider(addr addrs.AbsProviderConfig) error { + ctx.ProviderLock.Lock() + defer ctx.ProviderLock.Unlock() + + key := addr.String() + provider := ctx.ProviderCache[key] + if provider != nil { + delete(ctx.ProviderCache, key) + return provider.Close() + } + + return nil +} + +func (ctx *BuiltinEvalContext) ConfigureProvider(addr addrs.AbsProviderConfig, cfg cty.Value) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + if !addr.Module.Equal(ctx.Path().Module()) { + // This indicates incorrect use of ConfigureProvider: it should be used + // only from the module that the provider configuration belongs to. + panic(fmt.Sprintf("%s configured by wrong module %s", addr, ctx.Path())) + } + + p := ctx.Provider(addr) + if p == nil { + diags = diags.Append(fmt.Errorf("%s not initialized", addr)) + return diags + } + + req := providers.ConfigureProviderRequest{ + TerraformVersion: version.String(), + Config: cfg, + } + + resp := p.ConfigureProvider(req) + return resp.Diagnostics +} + +func (ctx *BuiltinEvalContext) ProviderInput(pc addrs.AbsProviderConfig) map[string]cty.Value { + ctx.ProviderLock.Lock() + defer ctx.ProviderLock.Unlock() + + if !pc.Module.Equal(ctx.Path().Module()) { + // This indicates incorrect use of InitProvider: it should be used + // only from the module that the provider configuration belongs to. + panic(fmt.Sprintf("%s initialized by wrong module %s", pc, ctx.Path())) + } + + if !ctx.Path().IsRoot() { + // Only root module provider configurations can have input. + return nil + } + + return ctx.ProviderInputConfig[pc.String()] +} + +func (ctx *BuiltinEvalContext) SetProviderInput(pc addrs.AbsProviderConfig, c map[string]cty.Value) { + absProvider := pc + if !pc.Module.IsRoot() { + // Only root module provider configurations can have input. + log.Printf("[WARN] BuiltinEvalContext: attempt to SetProviderInput for non-root module") + return + } + + // Save the configuration + ctx.ProviderLock.Lock() + ctx.ProviderInputConfig[absProvider.String()] = c + ctx.ProviderLock.Unlock() +} + +func (ctx *BuiltinEvalContext) Provisioner(n string) (provisioners.Interface, error) { + ctx.ProvisionerLock.Lock() + defer ctx.ProvisionerLock.Unlock() + + p, ok := ctx.ProvisionerCache[n] + if !ok { + var err error + p, err = ctx.Plugins.NewProvisionerInstance(n) + if err != nil { + return nil, err + } + + ctx.ProvisionerCache[n] = p + } + + return p, nil +} + +func (ctx *BuiltinEvalContext) ProvisionerSchema(n string) (*configschema.Block, error) { + return ctx.Plugins.ProvisionerSchema(n) +} + +func (ctx *BuiltinEvalContext) CloseProvisioners() error { + var diags tfdiags.Diagnostics + ctx.ProvisionerLock.Lock() + defer ctx.ProvisionerLock.Unlock() + + for name, prov := range ctx.ProvisionerCache { + err := prov.Close() + if err != nil { + diags = diags.Append(fmt.Errorf("provisioner.Close %s: %w", name, err)) + } + } + + return diags.Err() +} + +func (ctx *BuiltinEvalContext) EvaluateBlock(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + scope := ctx.EvaluationScope(self, nil, keyData) + body, evalDiags := scope.ExpandBlock(body, schema) + diags = diags.Append(evalDiags) + val, evalDiags := scope.EvalBlock(body, schema) + diags = diags.Append(evalDiags) + return val, body, diags +} + +func (ctx *BuiltinEvalContext) EvaluateExpr(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) { + scope := ctx.EvaluationScope(self, nil, EvalDataForNoInstanceKey) + return scope.EvalExpr(expr, wantType) +} + +func (ctx *BuiltinEvalContext) EvaluateReplaceTriggeredBy(expr hcl.Expression, repData instances.RepetitionData) (*addrs.Reference, bool, tfdiags.Diagnostics) { + + // get the reference to lookup changes in the plan + ref, diags := evalReplaceTriggeredByExpr(expr, repData) + if diags.HasErrors() { + return nil, false, diags + } + + var changes []*plans.ResourceInstanceChangeSrc + // store the address once we get it for validation + var resourceAddr addrs.Resource + + // The reference is either a resource or resource instance + switch sub := ref.Subject.(type) { + case addrs.Resource: + resourceAddr = sub + rc := sub.Absolute(ctx.Path()) + changes = ctx.Changes().GetChangesForAbsResource(rc) + case addrs.ResourceInstance: + resourceAddr = sub.ContainingResource() + rc := sub.Absolute(ctx.Path()) + change := ctx.Changes().GetResourceInstanceChange(rc, states.CurrentGen) + if change != nil { + // we'll generate an error below if there was no change + changes = append(changes, change) + } + } + + // Do some validation to make sure we are expecting a change at all + cfg := ctx.Evaluator.Config.Descendent(ctx.Path().Module()) + resCfg := cfg.Module.ResourceByAddr(resourceAddr) + if resCfg == nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to undeclared resource`, + Detail: fmt.Sprintf(`A resource %s has not been declared in %s`, ref.Subject, moduleDisplayAddr(ctx.Path())), + Subject: expr.Range().Ptr(), + }) + return nil, false, diags + } + + if len(changes) == 0 { + // If the resource is valid there should always be at least one change. + diags = diags.Append(fmt.Errorf("no change found for %s in %s", ref.Subject, moduleDisplayAddr(ctx.Path()))) + return nil, false, diags + } + + // If we don't have a traversal beyond the resource, then we can just look + // for any change. + if len(ref.Remaining) == 0 { + for _, c := range changes { + switch c.ChangeSrc.Action { + // Only immediate changes to the resource will trigger replacement. + case plans.Update, plans.DeleteThenCreate, plans.CreateThenDelete: + return ref, true, diags + } + } + + // no change triggered + return nil, false, diags + } + + // This must be an instances to have a remaining traversal, which means a + // single change. + change := changes[0] + + // Make sure the change is actionable. A create or delete action will have + // a change in value, but are not valid for our purposes here. + switch change.ChangeSrc.Action { + case plans.Update, plans.DeleteThenCreate, plans.CreateThenDelete: + // OK + default: + return nil, false, diags + } + + // Since we have a traversal after the resource reference, we will need to + // decode the changes, which means we need a schema. + providerAddr := change.ProviderAddr + schema, err := ctx.ProviderSchema(providerAddr) + if err != nil { + diags = diags.Append(err) + return nil, false, diags + } + + resAddr := change.Addr.ContainingResource().Resource + resSchema, _ := schema.SchemaForResourceType(resAddr.Mode, resAddr.Type) + ty := resSchema.ImpliedType() + + before, err := change.ChangeSrc.Before.Decode(ty) + if err != nil { + diags = diags.Append(err) + return nil, false, diags + } + + after, err := change.ChangeSrc.After.Decode(ty) + if err != nil { + diags = diags.Append(err) + return nil, false, diags + } + + path := traversalToPath(ref.Remaining) + attrBefore, _ := path.Apply(before) + attrAfter, _ := path.Apply(after) + + if attrBefore == cty.NilVal || attrAfter == cty.NilVal { + replace := attrBefore != attrAfter + return ref, replace, diags + } + + replace := !attrBefore.RawEquals(attrAfter) + + return ref, replace, diags +} + +func (ctx *BuiltinEvalContext) EvaluationScope(self addrs.Referenceable, source addrs.Referenceable, keyData InstanceKeyEvalData) *lang.Scope { + if !ctx.pathSet { + panic("context path not set") + } + data := &evaluationStateData{ + Evaluator: ctx.Evaluator, + ModulePath: ctx.PathValue, + InstanceKeyData: keyData, + Operation: ctx.Evaluator.Operation, + } + + // ctx.PathValue is the path of the module that contains whatever + // expression the caller will be trying to evaluate, so this will + // activate only the experiments from that particular module, to + // be consistent with how experiment checking in the "configs" + // package itself works. The nil check here is for robustness in + // incompletely-mocked testing situations; mc should never be nil in + // real situations. + mc := ctx.Evaluator.Config.DescendentForInstance(ctx.PathValue) + + if mc == nil || mc.Module.ProviderRequirements == nil { + return ctx.Evaluator.Scope(data, self, source, nil) + } + + scope := ctx.Evaluator.Scope(data, self, source, func(pf addrs.ProviderFunction, rng tfdiags.SourceRange) (*function.Function, tfdiags.Diagnostics) { + return evalContextProviderFunction(ctx.Provider, mc, ctx.Evaluator.Operation, pf, rng) + }) + scope.SetActiveExperiments(mc.Module.ActiveExperiments) + + return scope +} + +func (ctx *BuiltinEvalContext) Path() addrs.ModuleInstance { + if !ctx.pathSet { + panic("context path not set") + } + return ctx.PathValue +} + +func (ctx *BuiltinEvalContext) SetRootModuleArgument(addr addrs.InputVariable, v cty.Value) { + ctx.VariableValuesLock.Lock() + defer ctx.VariableValuesLock.Unlock() + + log.Printf("[TRACE] BuiltinEvalContext: Storing final value for variable %s", addr.Absolute(addrs.RootModuleInstance)) + key := addrs.RootModuleInstance.String() + args := ctx.VariableValues[key] + if args == nil { + args = make(map[string]cty.Value) + ctx.VariableValues[key] = args + } + args[addr.Name] = v +} + +func (ctx *BuiltinEvalContext) SetModuleCallArgument(callAddr addrs.ModuleCallInstance, varAddr addrs.InputVariable, v cty.Value) { + ctx.VariableValuesLock.Lock() + defer ctx.VariableValuesLock.Unlock() + + if !ctx.pathSet { + panic("context path not set") + } + + childPath := callAddr.ModuleInstance(ctx.PathValue) + log.Printf("[TRACE] BuiltinEvalContext: Storing final value for variable %s", varAddr.Absolute(childPath)) + key := childPath.String() + args := ctx.VariableValues[key] + if args == nil { + args = make(map[string]cty.Value) + ctx.VariableValues[key] = args + } + args[varAddr.Name] = v +} + +func (ctx *BuiltinEvalContext) GetVariableValue(addr addrs.AbsInputVariableInstance) cty.Value { + ctx.VariableValuesLock.Lock() + defer ctx.VariableValuesLock.Unlock() + + modKey := addr.Module.String() + modVars := ctx.VariableValues[modKey] + val, ok := modVars[addr.Variable.Name] + if !ok { + return cty.DynamicVal + } + return val +} + +func (ctx *BuiltinEvalContext) Changes() *plans.ChangesSync { + return ctx.ChangesValue +} + +func (ctx *BuiltinEvalContext) State() *states.SyncState { + return ctx.StateValue +} + +func (ctx *BuiltinEvalContext) Checks() *checks.State { + return ctx.ChecksValue +} + +func (ctx *BuiltinEvalContext) RefreshState() *states.SyncState { + return ctx.RefreshStateValue +} + +func (ctx *BuiltinEvalContext) PrevRunState() *states.SyncState { + return ctx.PrevRunStateValue +} + +func (ctx *BuiltinEvalContext) InstanceExpander() *instances.Expander { + return ctx.InstanceExpanderValue +} + +func (ctx *BuiltinEvalContext) MoveResults() refactoring.MoveResults { + return ctx.MoveResultsValue +} + +func (ctx *BuiltinEvalContext) ImportResolver() *ImportResolver { + return ctx.ImportResolverValue +} + +func (ctx *BuiltinEvalContext) GetEncryption() encryption.Encryption { + return ctx.Encryption +} diff --git a/pkg/tofu/eval_context_builtin_test.go b/pkg/tofu/eval_context_builtin_test.go new file mode 100644 index 00000000000..ad398fd5945 --- /dev/null +++ b/pkg/tofu/eval_context_builtin_test.go @@ -0,0 +1,93 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "reflect" + "sync" + "testing" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/zclconf/go-cty/cty" +) + +func TestBuiltinEvalContextProviderInput(t *testing.T) { + var lock sync.Mutex + cache := make(map[string]map[string]cty.Value) + + ctx1 := testBuiltinEvalContext(t) + ctx1 = ctx1.WithPath(addrs.RootModuleInstance).(*BuiltinEvalContext) + ctx1.ProviderInputConfig = cache + ctx1.ProviderLock = &lock + + ctx2 := testBuiltinEvalContext(t) + ctx2 = ctx2.WithPath(addrs.RootModuleInstance.Child("child", addrs.NoKey)).(*BuiltinEvalContext) + ctx2.ProviderInputConfig = cache + ctx2.ProviderLock = &lock + + providerAddr1 := addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("foo"), + } + providerAddr2 := addrs.AbsProviderConfig{ + Module: addrs.RootModule.Child("child"), + Provider: addrs.NewDefaultProvider("foo"), + } + + expected1 := map[string]cty.Value{"value": cty.StringVal("foo")} + ctx1.SetProviderInput(providerAddr1, expected1) + + try2 := map[string]cty.Value{"value": cty.StringVal("bar")} + ctx2.SetProviderInput(providerAddr2, try2) // ignored because not a root module + + actual1 := ctx1.ProviderInput(providerAddr1) + actual2 := ctx2.ProviderInput(providerAddr2) + + if !reflect.DeepEqual(actual1, expected1) { + t.Errorf("wrong result 1\ngot: %#v\nwant: %#v", actual1, expected1) + } + if actual2 != nil { + t.Errorf("wrong result 2\ngot: %#v\nwant: %#v", actual2, nil) + } +} + +func TestBuildingEvalContextInitProvider(t *testing.T) { + var lock sync.Mutex + + testP := &MockProvider{} + + ctx := testBuiltinEvalContext(t) + ctx = ctx.WithPath(addrs.RootModuleInstance).(*BuiltinEvalContext) + ctx.ProviderLock = &lock + ctx.ProviderCache = make(map[string]providers.Interface) + ctx.Plugins = newContextPlugins(map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): providers.FactoryFixed(testP), + }, nil) + + providerAddrDefault := addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("test"), + } + providerAddrAlias := addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("test"), + Alias: "foo", + } + + _, err := ctx.InitProvider(providerAddrDefault) + if err != nil { + t.Fatalf("error initializing provider test: %s", err) + } + _, err = ctx.InitProvider(providerAddrAlias) + if err != nil { + t.Fatalf("error initializing provider test.foo: %s", err) + } +} + +func testBuiltinEvalContext(t *testing.T) *BuiltinEvalContext { + return &BuiltinEvalContext{} +} diff --git a/pkg/tofu/eval_context_mock.go b/pkg/tofu/eval_context_mock.go new file mode 100644 index 00000000000..9f7673a5fc5 --- /dev/null +++ b/pkg/tofu/eval_context_mock.go @@ -0,0 +1,419 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hcldec" + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/checks" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/instances" + "github.com/kubegems/opentofu/pkg/lang" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/provisioners" + "github.com/kubegems/opentofu/pkg/refactoring" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// MockEvalContext is a mock version of EvalContext that can be used +// for tests. +type MockEvalContext struct { + StoppedCalled bool + StoppedValue <-chan struct{} + + HookCalled bool + HookHook Hook + HookError error + + InputCalled bool + InputInput UIInput + + InitProviderCalled bool + InitProviderType string + InitProviderAddr addrs.AbsProviderConfig + InitProviderProvider providers.Interface + InitProviderError error + + ProviderCalled bool + ProviderAddr addrs.AbsProviderConfig + ProviderProvider providers.Interface + + ProviderSchemaCalled bool + ProviderSchemaAddr addrs.AbsProviderConfig + ProviderSchemaSchema providers.ProviderSchema + ProviderSchemaError error + + CloseProviderCalled bool + CloseProviderAddr addrs.AbsProviderConfig + CloseProviderProvider providers.Interface + + ProviderInputCalled bool + ProviderInputAddr addrs.AbsProviderConfig + ProviderInputValues map[string]cty.Value + + SetProviderInputCalled bool + SetProviderInputAddr addrs.AbsProviderConfig + SetProviderInputValues map[string]cty.Value + + ConfigureProviderFn func( + addr addrs.AbsProviderConfig, + cfg cty.Value) tfdiags.Diagnostics // overrides the other values below, if set + ConfigureProviderCalled bool + ConfigureProviderAddr addrs.AbsProviderConfig + ConfigureProviderConfig cty.Value + ConfigureProviderDiags tfdiags.Diagnostics + + ProvisionerCalled bool + ProvisionerName string + ProvisionerProvisioner provisioners.Interface + + ProvisionerSchemaCalled bool + ProvisionerSchemaName string + ProvisionerSchemaSchema *configschema.Block + ProvisionerSchemaError error + + CloseProvisionersCalled bool + + EvaluateBlockCalled bool + EvaluateBlockBody hcl.Body + EvaluateBlockSchema *configschema.Block + EvaluateBlockSelf addrs.Referenceable + EvaluateBlockKeyData InstanceKeyEvalData + EvaluateBlockResultFunc func( + body hcl.Body, + schema *configschema.Block, + self addrs.Referenceable, + keyData InstanceKeyEvalData, + ) (cty.Value, hcl.Body, tfdiags.Diagnostics) // overrides the other values below, if set + EvaluateBlockResult cty.Value + EvaluateBlockExpandedBody hcl.Body + EvaluateBlockDiags tfdiags.Diagnostics + + EvaluateExprCalled bool + EvaluateExprExpr hcl.Expression + EvaluateExprWantType cty.Type + EvaluateExprSelf addrs.Referenceable + EvaluateExprResultFunc func( + expr hcl.Expression, + wantType cty.Type, + self addrs.Referenceable, + ) (cty.Value, tfdiags.Diagnostics) // overrides the other values below, if set + EvaluateExprResult cty.Value + EvaluateExprDiags tfdiags.Diagnostics + + EvaluationScopeCalled bool + EvaluationScopeSelf addrs.Referenceable + EvaluationScopeKeyData InstanceKeyEvalData + EvaluationScopeScope *lang.Scope + + PathCalled bool + PathPath addrs.ModuleInstance + + SetRootModuleArgumentCalled bool + SetRootModuleArgumentAddr addrs.InputVariable + SetRootModuleArgumentValue cty.Value + SetRootModuleArgumentFunc func(addr addrs.InputVariable, v cty.Value) + + SetModuleCallArgumentCalled bool + SetModuleCallArgumentModuleCall addrs.ModuleCallInstance + SetModuleCallArgumentVariable addrs.InputVariable + SetModuleCallArgumentValue cty.Value + SetModuleCallArgumentFunc func(callAddr addrs.ModuleCallInstance, varAddr addrs.InputVariable, v cty.Value) + + GetVariableValueCalled bool + GetVariableValueAddr addrs.AbsInputVariableInstance + GetVariableValueValue cty.Value + GetVariableValueFunc func(addr addrs.AbsInputVariableInstance) cty.Value // supersedes GetVariableValueValue + + ChangesCalled bool + ChangesChanges *plans.ChangesSync + + StateCalled bool + StateState *states.SyncState + + ChecksCalled bool + ChecksState *checks.State + + RefreshStateCalled bool + RefreshStateState *states.SyncState + + PrevRunStateCalled bool + PrevRunStateState *states.SyncState + + MoveResultsCalled bool + MoveResultsResults refactoring.MoveResults + + ImportResolverCalled bool + ImportResolverResults *ImportResolver + + InstanceExpanderCalled bool + InstanceExpanderExpander *instances.Expander +} + +// MockEvalContext implements EvalContext +var _ EvalContext = (*MockEvalContext)(nil) + +func (c *MockEvalContext) Stopped() <-chan struct{} { + c.StoppedCalled = true + return c.StoppedValue +} + +func (c *MockEvalContext) Hook(fn func(Hook) (HookAction, error)) error { + c.HookCalled = true + if c.HookHook != nil { + if _, err := fn(c.HookHook); err != nil { + return err + } + } + + return c.HookError +} + +func (c *MockEvalContext) Input() UIInput { + c.InputCalled = true + return c.InputInput +} + +func (c *MockEvalContext) InitProvider(addr addrs.AbsProviderConfig) (providers.Interface, error) { + c.InitProviderCalled = true + c.InitProviderType = addr.String() + c.InitProviderAddr = addr + return c.InitProviderProvider, c.InitProviderError +} + +func (c *MockEvalContext) Provider(addr addrs.AbsProviderConfig) providers.Interface { + c.ProviderCalled = true + c.ProviderAddr = addr + return c.ProviderProvider +} + +func (c *MockEvalContext) ProviderSchema(addr addrs.AbsProviderConfig) (providers.ProviderSchema, error) { + c.ProviderSchemaCalled = true + c.ProviderSchemaAddr = addr + return c.ProviderSchemaSchema, c.ProviderSchemaError +} + +func (c *MockEvalContext) CloseProvider(addr addrs.AbsProviderConfig) error { + c.CloseProviderCalled = true + c.CloseProviderAddr = addr + return nil +} + +func (c *MockEvalContext) ConfigureProvider(addr addrs.AbsProviderConfig, cfg cty.Value) tfdiags.Diagnostics { + + c.ConfigureProviderCalled = true + c.ConfigureProviderAddr = addr + c.ConfigureProviderConfig = cfg + if c.ConfigureProviderFn != nil { + return c.ConfigureProviderFn(addr, cfg) + } + return c.ConfigureProviderDiags +} + +func (c *MockEvalContext) ProviderInput(addr addrs.AbsProviderConfig) map[string]cty.Value { + c.ProviderInputCalled = true + c.ProviderInputAddr = addr + return c.ProviderInputValues +} + +func (c *MockEvalContext) SetProviderInput(addr addrs.AbsProviderConfig, vals map[string]cty.Value) { + c.SetProviderInputCalled = true + c.SetProviderInputAddr = addr + c.SetProviderInputValues = vals +} + +func (c *MockEvalContext) Provisioner(n string) (provisioners.Interface, error) { + c.ProvisionerCalled = true + c.ProvisionerName = n + return c.ProvisionerProvisioner, nil +} + +func (c *MockEvalContext) ProvisionerSchema(n string) (*configschema.Block, error) { + c.ProvisionerSchemaCalled = true + c.ProvisionerSchemaName = n + return c.ProvisionerSchemaSchema, c.ProvisionerSchemaError +} + +func (c *MockEvalContext) CloseProvisioners() error { + c.CloseProvisionersCalled = true + return nil +} + +func (c *MockEvalContext) EvaluateBlock(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) { + c.EvaluateBlockCalled = true + c.EvaluateBlockBody = body + c.EvaluateBlockSchema = schema + c.EvaluateBlockSelf = self + c.EvaluateBlockKeyData = keyData + if c.EvaluateBlockResultFunc != nil { + return c.EvaluateBlockResultFunc(body, schema, self, keyData) + } + return c.EvaluateBlockResult, c.EvaluateBlockExpandedBody, c.EvaluateBlockDiags +} + +func (c *MockEvalContext) EvaluateExpr(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) { + c.EvaluateExprCalled = true + c.EvaluateExprExpr = expr + c.EvaluateExprWantType = wantType + c.EvaluateExprSelf = self + if c.EvaluateExprResultFunc != nil { + return c.EvaluateExprResultFunc(expr, wantType, self) + } + return c.EvaluateExprResult, c.EvaluateExprDiags +} + +func (c *MockEvalContext) EvaluateReplaceTriggeredBy(hcl.Expression, instances.RepetitionData) (*addrs.Reference, bool, tfdiags.Diagnostics) { + return nil, false, nil +} + +// installSimpleEval is a helper to install a simple mock implementation of +// both EvaluateBlock and EvaluateExpr into the receiver. +// +// These default implementations will either evaluate the given input against +// the scope in field EvaluationScopeScope or, if it is nil, with no eval +// context at all so that only constant values may be used. +// +// This function overwrites any existing functions installed in fields +// EvaluateBlockResultFunc and EvaluateExprResultFunc. +func (c *MockEvalContext) installSimpleEval() { + c.EvaluateBlockResultFunc = func(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) { + if scope := c.EvaluationScopeScope; scope != nil { + // Fully-functional codepath. + var diags tfdiags.Diagnostics + body, diags = scope.ExpandBlock(body, schema) + if diags.HasErrors() { + return cty.DynamicVal, body, diags + } + val, evalDiags := c.EvaluationScopeScope.EvalBlock(body, schema) + diags = diags.Append(evalDiags) + if evalDiags.HasErrors() { + return cty.DynamicVal, body, diags + } + return val, body, diags + } + + // Fallback codepath supporting constant values only. + val, hclDiags := hcldec.Decode(body, schema.DecoderSpec(), nil) + return val, body, tfdiags.Diagnostics(nil).Append(hclDiags) + } + c.EvaluateExprResultFunc = func(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) { + if scope := c.EvaluationScopeScope; scope != nil { + // Fully-functional codepath. + return scope.EvalExpr(expr, wantType) + } + + // Fallback codepath supporting constant values only. + var diags tfdiags.Diagnostics + val, hclDiags := expr.Value(nil) + diags = diags.Append(hclDiags) + if hclDiags.HasErrors() { + return cty.DynamicVal, diags + } + var err error + val, err = convert.Convert(val, wantType) + if err != nil { + diags = diags.Append(err) + return cty.DynamicVal, diags + } + return val, diags + } +} + +func (c *MockEvalContext) EvaluationScope(self addrs.Referenceable, source addrs.Referenceable, keyData InstanceKeyEvalData) *lang.Scope { + c.EvaluationScopeCalled = true + c.EvaluationScopeSelf = self + c.EvaluationScopeKeyData = keyData + return c.EvaluationScopeScope +} + +func (c *MockEvalContext) WithPath(path addrs.ModuleInstance) EvalContext { + newC := *c + newC.PathPath = path + return &newC +} + +func (c *MockEvalContext) Path() addrs.ModuleInstance { + c.PathCalled = true + return c.PathPath +} + +func (c *MockEvalContext) SetRootModuleArgument(addr addrs.InputVariable, v cty.Value) { + c.SetRootModuleArgumentCalled = true + c.SetRootModuleArgumentAddr = addr + c.SetRootModuleArgumentValue = v + if c.SetRootModuleArgumentFunc != nil { + c.SetRootModuleArgumentFunc(addr, v) + } +} + +func (c *MockEvalContext) SetModuleCallArgument(callAddr addrs.ModuleCallInstance, varAddr addrs.InputVariable, v cty.Value) { + c.SetModuleCallArgumentCalled = true + c.SetModuleCallArgumentModuleCall = callAddr + c.SetModuleCallArgumentVariable = varAddr + c.SetModuleCallArgumentValue = v + if c.SetModuleCallArgumentFunc != nil { + c.SetModuleCallArgumentFunc(callAddr, varAddr, v) + } +} + +func (c *MockEvalContext) GetVariableValue(addr addrs.AbsInputVariableInstance) cty.Value { + c.GetVariableValueCalled = true + c.GetVariableValueAddr = addr + if c.GetVariableValueFunc != nil { + return c.GetVariableValueFunc(addr) + } + return c.GetVariableValueValue +} + +func (c *MockEvalContext) Changes() *plans.ChangesSync { + c.ChangesCalled = true + return c.ChangesChanges +} + +func (c *MockEvalContext) State() *states.SyncState { + c.StateCalled = true + return c.StateState +} + +func (c *MockEvalContext) Checks() *checks.State { + c.ChecksCalled = true + return c.ChecksState +} + +func (c *MockEvalContext) RefreshState() *states.SyncState { + c.RefreshStateCalled = true + return c.RefreshStateState +} + +func (c *MockEvalContext) PrevRunState() *states.SyncState { + c.PrevRunStateCalled = true + return c.PrevRunStateState +} + +func (c *MockEvalContext) MoveResults() refactoring.MoveResults { + c.MoveResultsCalled = true + return c.MoveResultsResults +} + +func (c *MockEvalContext) ImportResolver() *ImportResolver { + c.ImportResolverCalled = true + return c.ImportResolverResults +} + +func (c *MockEvalContext) InstanceExpander() *instances.Expander { + c.InstanceExpanderCalled = true + return c.InstanceExpanderExpander +} + +func (c *MockEvalContext) GetEncryption() encryption.Encryption { + return encryption.Disabled() +} diff --git a/pkg/tofu/eval_count.go b/pkg/tofu/eval_count.go new file mode 100644 index 00000000000..47ec7c978ff --- /dev/null +++ b/pkg/tofu/eval_count.go @@ -0,0 +1,112 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/gocty" +) + +// evaluateCountExpression is our standard mechanism for interpreting an +// expression given for a "count" argument on a resource or a module. This +// should be called during expansion in order to determine the final count +// value. +// +// evaluateCountExpression differs from evaluateCountExpressionValue by +// returning an error if the count value is not known, and converting the +// cty.Value to an integer. +func evaluateCountExpression(expr hcl.Expression, ctx EvalContext) (int, tfdiags.Diagnostics) { + countVal, diags := evaluateCountExpressionValue(expr, ctx) + if !countVal.IsKnown() { + // Currently this is a rather bad outcome from a UX standpoint, since we have + // no real mechanism to deal with this situation and all we can do is produce + // an error message. + // FIXME: In future, implement a built-in mechanism for deferring changes that + // can't yet be predicted, and use it to guide the user through several + // plan/apply steps until the desired configuration is eventually reached. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count argument", + Detail: `The "count" value depends on resource attributes that cannot be determined until apply, so OpenTofu cannot predict how many instances will be created. To work around this, use the -target argument to first apply only the resources that the count depends on.`, + Subject: expr.Range().Ptr(), + + // TODO: Also populate Expression and EvalContext in here, but + // we can't easily do that right now because the hcl.EvalContext + // (which is not the same as the ctx we have in scope here) is + // hidden away inside evaluateCountExpressionValue. + Extra: diagnosticCausedByUnknown(true), + }) + } + + if countVal.IsNull() || !countVal.IsKnown() { + return -1, diags + } + + count, _ := countVal.AsBigFloat().Int64() + return int(count), diags +} + +// evaluateCountExpressionValue is like evaluateCountExpression +// except that it returns a cty.Value which must be a cty.Number and can be +// unknown. +func evaluateCountExpressionValue(expr hcl.Expression, ctx EvalContext) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + nullCount := cty.NullVal(cty.Number) + if expr == nil { + return nullCount, nil + } + + countVal, countDiags := ctx.EvaluateExpr(expr, cty.Number, nil) + diags = diags.Append(countDiags) + if diags.HasErrors() { + return nullCount, diags + } + + // Unmark the count value, sensitive values are allowed in count but not for_each, + // as using it here will not disclose the sensitive value + countVal, _ = countVal.Unmark() + + switch { + case countVal.IsNull(): + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count argument", + Detail: `The given "count" argument value is null. An integer is required.`, + Subject: expr.Range().Ptr(), + }) + return nullCount, diags + + case !countVal.IsKnown(): + return cty.UnknownVal(cty.Number), diags + } + + var count int + err := gocty.FromCtyValue(countVal, &count) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count argument", + Detail: fmt.Sprintf(`The given "count" argument value is unsuitable: %s.`, err), + Subject: expr.Range().Ptr(), + }) + return nullCount, diags + } + if count < 0 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count argument", + Detail: `The given "count" argument value is unsuitable: must be greater than or equal to zero.`, + Subject: expr.Range().Ptr(), + }) + return nullCount, diags + } + + return countVal, diags +} diff --git a/pkg/tofu/eval_count_test.go b/pkg/tofu/eval_count_test.go new file mode 100644 index 00000000000..a0043859a89 --- /dev/null +++ b/pkg/tofu/eval_count_test.go @@ -0,0 +1,51 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "reflect" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hcltest" + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/zclconf/go-cty/cty" +) + +func TestEvaluateCountExpression(t *testing.T) { + tests := map[string]struct { + Expr hcl.Expression + Count int + }{ + "zero": { + hcltest.MockExprLiteral(cty.NumberIntVal(0)), + 0, + }, + "expression with marked value": { + hcltest.MockExprLiteral(cty.NumberIntVal(8).Mark(marks.Sensitive)), + 8, + }, + } + for name, test := range tests { + t.Run(name, func(t *testing.T) { + ctx := &MockEvalContext{} + ctx.installSimpleEval() + countVal, diags := evaluateCountExpression(test.Expr, ctx) + + if len(diags) != 0 { + t.Errorf("unexpected diagnostics %s", spew.Sdump(diags)) + } + + if !reflect.DeepEqual(countVal, test.Count) { + t.Errorf( + "wrong map value\ngot: %swant: %s", + spew.Sdump(countVal), spew.Sdump(test.Count), + ) + } + }) + } +} diff --git a/pkg/tofu/eval_for_each.go b/pkg/tofu/eval_for_each.go new file mode 100644 index 00000000000..ebe4598e9f4 --- /dev/null +++ b/pkg/tofu/eval_for_each.go @@ -0,0 +1,216 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/lang" + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// evaluateForEachExpression is our standard mechanism for interpreting an +// expression given for a "for_each" argument on a resource or a module. This +// should be called during expansion in order to determine the final keys and +// values. +// +// evaluateForEachExpression differs from evaluateForEachExpressionValue by +// returning an error if the count value is not known, and converting the +// cty.Value to a map[string]cty.Value for compatibility with other calls. +func evaluateForEachExpression(expr hcl.Expression, ctx EvalContext) (forEach map[string]cty.Value, diags tfdiags.Diagnostics) { + const unknownsNotAllowed = false + const tupleNotAllowed = false + forEachVal, diags := evaluateForEachExpressionValue(expr, ctx, unknownsNotAllowed, tupleNotAllowed) + // forEachVal might be unknown, but if it is then there should already + // be an error about it in diags, which we'll return below. + + if forEachVal.IsNull() || !forEachVal.IsKnown() || markSafeLengthInt(forEachVal) == 0 { + // we check length, because an empty set return a nil map + return map[string]cty.Value{}, diags + } + + return forEachVal.AsValueMap(), diags +} + +// evaluateForEachExpressionValue is like evaluateForEachExpression +// except that it returns a cty.Value map or set which can be unknown. +// The 'allowTuple' argument is used to support evaluating for_each from tuple +// values, and is currently supported when using for_each in import blocks. +func evaluateForEachExpressionValue(expr hcl.Expression, ctx EvalContext, allowUnknown bool, allowTuple bool) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + nullMap := cty.NullVal(cty.Map(cty.DynamicPseudoType)) + + if expr == nil { + return nullMap, diags + } + + refs, moreDiags := lang.ReferencesInExpr(addrs.ParseRef, expr) + diags = diags.Append(moreDiags) + scope := ctx.EvaluationScope(nil, nil, EvalDataForNoInstanceKey) + var hclCtx *hcl.EvalContext + if scope != nil { + hclCtx, moreDiags = scope.EvalContext(refs) + } else { + // This shouldn't happen in real code, but it can unfortunately arise + // in unit tests due to incompletely-implemented mocks. :( + hclCtx = &hcl.EvalContext{} + } + diags = diags.Append(moreDiags) + if diags.HasErrors() { // Can't continue if we don't even have a valid scope + return nullMap, diags + } + + forEachVal, forEachDiags := expr.Value(hclCtx) + diags = diags.Append(forEachDiags) + + // If a whole map is marked, or a set contains marked values (which means the set is then marked) + // give an error diagnostic as this value cannot be used in for_each + if forEachVal.HasMark(marks.Sensitive) { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid for_each argument", + Detail: "Sensitive values, or values derived from sensitive values, cannot be used as for_each arguments. If used, the sensitive value could be exposed as a resource instance key.", + Subject: expr.Range().Ptr(), + Expression: expr, + EvalContext: hclCtx, + Extra: diagnosticCausedBySensitive(true), + }) + } + + ty := forEachVal.Type() + + var isAllowedType bool + var allowedTypesMessage string + if allowTuple { + isAllowedType = ty.IsMapType() || ty.IsSetType() || ty.IsObjectType() || ty.IsTupleType() + allowedTypesMessage = "map, set of strings, or a tuple" + } else { + isAllowedType = ty.IsMapType() || ty.IsSetType() || ty.IsObjectType() + allowedTypesMessage = "map, or set of strings" + } + + // Check if the type is allowed whether the value is marked or not + if forEachVal.IsKnown() && !isAllowedType { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid for_each argument", + Detail: fmt.Sprintf(`The given "for_each" argument value is unsuitable: the "for_each" argument must be a %s, and you have provided a value of type %s.`, allowedTypesMessage, ty.FriendlyName()), + Subject: expr.Range().Ptr(), + Expression: expr, + EvalContext: hclCtx, + }) + } + + // Return it to avoid expose sensitive value by further check + if diags.HasErrors() { + return nullMap, diags + } + + const errInvalidUnknownDetailMap = "The \"for_each\" map includes keys derived from resource attributes that cannot be determined until apply, and so OpenTofu cannot determine the full set of keys that will identify the instances of this resource.\n\nWhen working with unknown values in for_each, it's better to define the map keys statically in your configuration and place apply-time results only in the map values.\n\nAlternatively, you could use the -target planning option to first apply only the resources that the for_each value depends on, and then apply a second time to fully converge." + const errInvalidUnknownDetailSet = "The \"for_each\" set includes values derived from resource attributes that cannot be determined until apply, and so OpenTofu cannot determine the full set of keys that will identify the instances of this resource.\n\nWhen working with unknown values in for_each, it's better to use a map value where the keys are defined statically in your configuration and where only the values contain apply-time results.\n\nAlternatively, you could use the -target planning option to first apply only the resources that the for_each value depends on, and then apply a second time to fully converge." + + switch { + case forEachVal.IsNull(): + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid for_each argument", + Detail: fmt.Sprintf(`The given "for_each" argument value is unsuitable: the given "for_each" argument value is null. A %s is allowed.`, allowedTypesMessage), + Subject: expr.Range().Ptr(), + Expression: expr, + EvalContext: hclCtx, + }) + return nullMap, diags + case !forEachVal.IsKnown(): + if !allowUnknown { + var detailMsg string + switch { + case ty.IsSetType(): + detailMsg = errInvalidUnknownDetailSet + default: + detailMsg = errInvalidUnknownDetailMap + } + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid for_each argument", + Detail: detailMsg, + Subject: expr.Range().Ptr(), + Expression: expr, + EvalContext: hclCtx, + Extra: diagnosticCausedByUnknown(true), + }) + } + // ensure that we have a map, and not a DynamicValue + return cty.UnknownVal(cty.Map(cty.DynamicPseudoType)), diags + case markSafeLengthInt(forEachVal) == 0: + // If the map is empty ({}), return an empty map, because cty will + // return nil when representing {} AsValueMap. This also covers an empty + // set (toset([])) + return forEachVal, diags + } + + if ty.IsSetType() { + // since we can't use a set values that are unknown, we treat the + // entire set as unknown + if !forEachVal.IsWhollyKnown() { + if !allowUnknown { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid for_each argument", + Detail: errInvalidUnknownDetailSet, + Subject: expr.Range().Ptr(), + Expression: expr, + EvalContext: hclCtx, + Extra: diagnosticCausedByUnknown(true), + }) + } + return cty.UnknownVal(ty), diags + } + + if ty.ElementType() != cty.String { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid for_each set argument", + Detail: fmt.Sprintf(`The given "for_each" argument value is unsuitable: "for_each" supports sets of strings, but you have provided a set containing type %s.`, forEachVal.Type().ElementType().FriendlyName()), + Subject: expr.Range().Ptr(), + Expression: expr, + EvalContext: hclCtx, + }) + return cty.NullVal(ty), diags + } + + // A set of strings may contain null, which makes it impossible to + // convert to a map, so we must return an error + it := forEachVal.ElementIterator() + for it.Next() { + item, _ := it.Element() + if item.IsNull() { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid for_each set argument", + Detail: `The given "for_each" argument value is unsuitable: "for_each" sets must not contain null values.`, + Subject: expr.Range().Ptr(), + Expression: expr, + EvalContext: hclCtx, + }) + return cty.NullVal(ty), diags + } + } + } + + return forEachVal, nil +} + +// markSafeLengthInt allows calling LengthInt on marked values safely +func markSafeLengthInt(val cty.Value) int { + v, _ := val.UnmarkDeep() + return v.LengthInt() +} diff --git a/pkg/tofu/eval_for_each_test.go b/pkg/tofu/eval_for_each_test.go new file mode 100644 index 00000000000..f6934edc5d2 --- /dev/null +++ b/pkg/tofu/eval_for_each_test.go @@ -0,0 +1,401 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "reflect" + "strings" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hcltest" + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +func TestEvaluateForEachExpression_valid(t *testing.T) { + tests := map[string]struct { + Expr hcl.Expression + ForEachMap map[string]cty.Value + }{ + "empty set": { + hcltest.MockExprLiteral(cty.SetValEmpty(cty.String)), + map[string]cty.Value{}, + }, + "multi-value string set": { + hcltest.MockExprLiteral(cty.SetVal([]cty.Value{cty.StringVal("a"), cty.StringVal("b")})), + map[string]cty.Value{ + "a": cty.StringVal("a"), + "b": cty.StringVal("b"), + }, + }, + "empty map": { + hcltest.MockExprLiteral(cty.MapValEmpty(cty.Bool)), + map[string]cty.Value{}, + }, + "map": { + hcltest.MockExprLiteral(cty.MapVal(map[string]cty.Value{ + "a": cty.BoolVal(true), + "b": cty.BoolVal(false), + })), + map[string]cty.Value{ + "a": cty.BoolVal(true), + "b": cty.BoolVal(false), + }, + }, + "map containing unknown values": { + hcltest.MockExprLiteral(cty.MapVal(map[string]cty.Value{ + "a": cty.UnknownVal(cty.Bool), + "b": cty.UnknownVal(cty.Bool), + })), + map[string]cty.Value{ + "a": cty.UnknownVal(cty.Bool), + "b": cty.UnknownVal(cty.Bool), + }, + }, + "map containing sensitive values, but strings are literal": { + hcltest.MockExprLiteral(cty.MapVal(map[string]cty.Value{ + "a": cty.BoolVal(true).Mark(marks.Sensitive), + "b": cty.BoolVal(false), + })), + map[string]cty.Value{ + "a": cty.BoolVal(true).Mark(marks.Sensitive), + "b": cty.BoolVal(false), + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + ctx := &MockEvalContext{} + ctx.installSimpleEval() + forEachMap, diags := evaluateForEachExpression(test.Expr, ctx) + + if len(diags) != 0 { + t.Errorf("unexpected diagnostics %s", spew.Sdump(diags)) + } + + if !reflect.DeepEqual(forEachMap, test.ForEachMap) { + t.Errorf( + "wrong map value\ngot: %swant: %s", + spew.Sdump(forEachMap), spew.Sdump(test.ForEachMap), + ) + } + + }) + } +} + +func TestEvaluateForEachExpression_errors(t *testing.T) { + tests := map[string]struct { + Expr hcl.Expression + Summary, DetailSubstring string + CausedByUnknown, CausedBySensitive bool + }{ + "null set": { + hcltest.MockExprLiteral(cty.NullVal(cty.Set(cty.String))), + "Invalid for_each argument", + `the given "for_each" argument value is null`, + false, false, + }, + "string": { + hcltest.MockExprLiteral(cty.StringVal("i am definitely a set")), + "Invalid for_each argument", + "must be a map, or set of strings, and you have provided a value of type string", + false, false, + }, + "list": { + hcltest.MockExprLiteral(cty.ListVal([]cty.Value{cty.StringVal("a"), cty.StringVal("a")})), + "Invalid for_each argument", + "must be a map, or set of strings, and you have provided a value of type list", + false, false, + }, + "tuple": { + hcltest.MockExprLiteral(cty.TupleVal([]cty.Value{cty.StringVal("a"), cty.StringVal("b")})), + "Invalid for_each argument", + "must be a map, or set of strings, and you have provided a value of type tuple", + false, false, + }, + "unknown string set": { + hcltest.MockExprLiteral(cty.UnknownVal(cty.Set(cty.String))), + "Invalid for_each argument", + "set includes values derived from resource attributes that cannot be determined until apply", + true, false, + }, + "unknown map": { + hcltest.MockExprLiteral(cty.UnknownVal(cty.Map(cty.Bool))), + "Invalid for_each argument", + "map includes keys derived from resource attributes that cannot be determined until apply", + true, false, + }, + "unknown pseudo-type": { + hcltest.MockExprLiteral(cty.UnknownVal(cty.DynamicPseudoType)), + "Invalid for_each argument", + "map includes keys derived from resource attributes that cannot be determined until apply", + true, false, + }, + "marked map": { + hcltest.MockExprLiteral(cty.MapVal(map[string]cty.Value{ + "a": cty.BoolVal(true), + "b": cty.BoolVal(false), + }).Mark(marks.Sensitive)), + "Invalid for_each argument", + "Sensitive values, or values derived from sensitive values, cannot be used as for_each arguments. If used, the sensitive value could be exposed as a resource instance key.", + false, true, + }, + "set containing booleans": { + hcltest.MockExprLiteral(cty.SetVal([]cty.Value{cty.BoolVal(true)})), + "Invalid for_each set argument", + "supports sets of strings, but you have provided a set containing type bool", + false, false, + }, + "set containing null": { + hcltest.MockExprLiteral(cty.SetVal([]cty.Value{cty.NullVal(cty.String)})), + "Invalid for_each set argument", + "must not contain null values", + false, false, + }, + "set containing unknown value": { + hcltest.MockExprLiteral(cty.SetVal([]cty.Value{cty.UnknownVal(cty.String)})), + "Invalid for_each argument", + "set includes values derived from resource attributes that cannot be determined until apply", + true, false, + }, + "set containing dynamic unknown value": { + hcltest.MockExprLiteral(cty.SetVal([]cty.Value{cty.UnknownVal(cty.DynamicPseudoType)})), + "Invalid for_each argument", + "set includes values derived from resource attributes that cannot be determined until apply", + true, false, + }, + "set containing marked values": { + hcltest.MockExprLiteral(cty.SetVal([]cty.Value{cty.StringVal("beep").Mark(marks.Sensitive), cty.StringVal("boop")})), + "Invalid for_each argument", + "Sensitive values, or values derived from sensitive values, cannot be used as for_each arguments. If used, the sensitive value could be exposed as a resource instance key.", + false, true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + ctx := &MockEvalContext{} + ctx.installSimpleEval() + _, diags := evaluateForEachExpression(test.Expr, ctx) + + if len(diags) != 1 { + t.Fatalf("got %d diagnostics; want 1", diags) + } + if got, want := diags[0].Severity(), tfdiags.Error; got != want { + t.Errorf("wrong diagnostic severity %#v; want %#v", got, want) + } + if got, want := diags[0].Description().Summary, test.Summary; got != want { + t.Errorf("wrong diagnostic summary\ngot: %s\nwant: %s", got, want) + } + if got, want := diags[0].Description().Detail, test.DetailSubstring; !strings.Contains(got, want) { + t.Errorf("wrong diagnostic detail\ngot: %s\nwant substring: %s", got, want) + } + if fromExpr := diags[0].FromExpr(); fromExpr != nil { + if fromExpr.Expression == nil { + t.Errorf("diagnostic does not refer to an expression") + } + if fromExpr.EvalContext == nil { + t.Errorf("diagnostic does not refer to an EvalContext") + } + } else { + t.Errorf("diagnostic does not support FromExpr\ngot: %s", spew.Sdump(diags[0])) + } + + if got, want := tfdiags.DiagnosticCausedByUnknown(diags[0]), test.CausedByUnknown; got != want { + t.Errorf("wrong result from tfdiags.DiagnosticCausedByUnknown\ngot: %#v\nwant: %#v", got, want) + } + if got, want := tfdiags.DiagnosticCausedBySensitive(diags[0]), test.CausedBySensitive; got != want { + t.Errorf("wrong result from tfdiags.DiagnosticCausedBySensitive\ngot: %#v\nwant: %#v", got, want) + } + }) + } +} + +func TestEvaluateForEachExpression_multi_errors(t *testing.T) { + tests := map[string]struct { + Expr hcl.Expression + Wanted []struct { + Summary string + DetailSubstring string + CausedBySensitive bool + } + }{ + "marked list": { + hcltest.MockExprLiteral(cty.ListVal([]cty.Value{cty.StringVal("a"), cty.StringVal("a")}).Mark(marks.Sensitive)), + []struct { + Summary string + DetailSubstring string + CausedBySensitive bool + }{ + { + "Invalid for_each argument", + "Sensitive values, or values derived from sensitive values, cannot be used as for_each arguments. If used, the sensitive value could be exposed as a resource instance key.", + true, + }, + { + "Invalid for_each argument", + "must be a map, or set of strings, and you have provided a value of type list", + false, + }, + }, + }, + "marked tuple": { + hcltest.MockExprLiteral(cty.TupleVal([]cty.Value{cty.StringVal("a"), cty.StringVal("a")}).Mark(marks.Sensitive)), + []struct { + Summary string + DetailSubstring string + CausedBySensitive bool + }{ + { + "Invalid for_each argument", + "Sensitive values, or values derived from sensitive values, cannot be used as for_each arguments. If used, the sensitive value could be exposed as a resource instance key.", + true, + }, + { + "Invalid for_each argument", + "must be a map, or set of strings, and you have provided a value of type tuple", + false, + }, + }, + }, + "marked string": { + hcltest.MockExprLiteral(cty.StringVal("a").Mark(marks.Sensitive)), + []struct { + Summary string + DetailSubstring string + CausedBySensitive bool + }{ + { + "Invalid for_each argument", + "Sensitive values, or values derived from sensitive values, cannot be used as for_each arguments. If used, the sensitive value could be exposed as a resource instance key.", + true, + }, + { + "Invalid for_each argument", + "must be a map, or set of strings, and you have provided a value of type string", + false, + }, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + ctx := &MockEvalContext{} + ctx.installSimpleEval() + _, diags := evaluateForEachExpression(test.Expr, ctx) + if len(diags) != len(test.Wanted) { + t.Errorf("unexpected diagnostics %s", spew.Sdump(diags)) + } + for idx := range test.Wanted { + if got, want := diags[idx].Severity(), tfdiags.Error; got != want { + t.Errorf("wrong diagnostic severity %#v; want %#v", got, want) + } + if got, want := diags[idx].Description().Summary, test.Wanted[idx].Summary; got != want { + t.Errorf("wrong diagnostic summary\ngot: %s\nwant: %s", got, want) + } + if got, want := diags[idx].Description().Detail, test.Wanted[idx].DetailSubstring; !strings.Contains(got, want) { + t.Errorf("wrong diagnostic detail\ngot: %s\nwant substring: %s", got, want) + } + if fromExpr := diags[idx].FromExpr(); fromExpr != nil { + if fromExpr.Expression == nil { + t.Errorf("diagnostic does not refer to an expression") + } + if fromExpr.EvalContext == nil { + t.Errorf("diagnostic does not refer to an EvalContext") + } + } else { + t.Errorf("diagnostic does not support FromExpr\ngot: %s", spew.Sdump(diags[idx])) + } + + if got, want := tfdiags.DiagnosticCausedBySensitive(diags[idx]), test.Wanted[idx].CausedBySensitive; got != want { + t.Errorf("wrong result from tfdiags.DiagnosticCausedBySensitive\ngot: %#v\nwant: %#v", got, want) + } + } + }) + } +} + +func TestEvaluateForEachExpressionKnown(t *testing.T) { + tests := map[string]hcl.Expression{ + "unknown string set": hcltest.MockExprLiteral(cty.UnknownVal(cty.Set(cty.String))), + "unknown map": hcltest.MockExprLiteral(cty.UnknownVal(cty.Map(cty.Bool))), + "unknown tuple": hcltest.MockExprLiteral(cty.UnknownVal(cty.Tuple([]cty.Type{cty.String, cty.Number, cty.Bool}))), + "unknown pseudo-type": hcltest.MockExprLiteral(cty.UnknownVal(cty.DynamicPseudoType)), + } + + for name, expr := range tests { + t.Run(name, func(t *testing.T) { + ctx := &MockEvalContext{} + ctx.installSimpleEval() + forEachVal, diags := evaluateForEachExpressionValue(expr, ctx, true, true) + + if len(diags) != 0 { + t.Errorf("unexpected diagnostics %s", spew.Sdump(diags)) + } + + if forEachVal.IsKnown() { + t.Error("got known, want unknown") + } + }) + } +} + +func TestEvaluateForEachExpressionValueTuple(t *testing.T) { + tests := map[string]struct { + Expr hcl.Expression + AllowTuple bool + ExpectedError string + }{ + "valid tuple": { + Expr: hcltest.MockExprLiteral(cty.TupleVal([]cty.Value{cty.StringVal("a"), cty.StringVal("b")})), + AllowTuple: true, + }, + "empty tuple": { + Expr: hcltest.MockExprLiteral(cty.EmptyTupleVal), + AllowTuple: true, + }, + "null tuple": { + Expr: hcltest.MockExprLiteral(cty.NullVal(cty.Tuple([]cty.Type{}))), + AllowTuple: true, + ExpectedError: "the given \"for_each\" argument value is null", + }, + "sensitive tuple": { + Expr: hcltest.MockExprLiteral(cty.TupleVal([]cty.Value{cty.StringVal("a"), cty.StringVal("b")}).Mark(marks.Sensitive)), + AllowTuple: true, + ExpectedError: "Sensitive values, or values derived from sensitive values, cannot be used as for_each arguments", + }, + "allow tuple is off": { + Expr: hcltest.MockExprLiteral(cty.TupleVal([]cty.Value{cty.StringVal("a"), cty.StringVal("b")})), + AllowTuple: false, + ExpectedError: "the \"for_each\" argument must be a map, or set of strings, and you have provided a value of type tuple.", + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + ctx := &MockEvalContext{} + ctx.installSimpleEval() + _, diags := evaluateForEachExpressionValue(test.Expr, ctx, true, test.AllowTuple) + + if test.ExpectedError == "" { + if len(diags) != 0 { + t.Errorf("unexpected diagnostics %s", spew.Sdump(diags)) + } + } else { + if got, want := diags[0].Description().Detail, test.ExpectedError; test.ExpectedError != "" && !strings.Contains(got, want) { + t.Errorf("wrong diagnostic detail\ngot: %s\nwant substring: %s", got, want) + } + } + + }) + } +} diff --git a/pkg/tofu/eval_import.go b/pkg/tofu/eval_import.go new file mode 100644 index 00000000000..0574c62b275 --- /dev/null +++ b/pkg/tofu/eval_import.go @@ -0,0 +1,195 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/instances" + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/gocty" +) + +func evaluateImportIdExpression(expr hcl.Expression, ctx EvalContext, keyData instances.RepetitionData) (string, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + if expr == nil { + return "", diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid import id argument", + Detail: "The import ID cannot be null.", + Subject: nil, + }) + } + + // evaluate the import ID and take into consideration the for_each key (if exists) + importIdVal, evalDiags := evaluateExprWithRepetitionData(ctx, expr, cty.String, keyData) + diags = diags.Append(evalDiags) + + if importIdVal.IsNull() { + return "", diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid import id argument", + Detail: "The import ID cannot be null.", + Subject: expr.Range().Ptr(), + }) + } + + if !importIdVal.IsKnown() { + return "", diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid import id argument", + Detail: `The import block "id" argument depends on resource attributes that cannot be determined until apply, so OpenTofu cannot plan to import this resource.`, // FIXME and what should I do about that? + Subject: expr.Range().Ptr(), + // Expression: + // EvalContext: + Extra: diagnosticCausedByUnknown(true), + }) + } + + if importIdVal.HasMark(marks.Sensitive) { + return "", diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid import id argument", + Detail: "The import ID cannot be sensitive.", + Subject: expr.Range().Ptr(), + }) + } + + var importId string + err := gocty.FromCtyValue(importIdVal, &importId) + if err != nil { + return "", diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid import id argument", + Detail: fmt.Sprintf("The import ID value is unsuitable: %s.", err), + Subject: expr.Range().Ptr(), + }) + } + + return importId, diags +} + +// evaluateExprWithRepetitionData takes the given HCL expression and evaluates +// it to produce a value, while taking into consideration any repetition key +// (a single combination of each.key and each.value of a for_each argument) +// that should be a part of the scope. +func evaluateExprWithRepetitionData(ctx EvalContext, expr hcl.Expression, wantType cty.Type, keyData instances.RepetitionData) (cty.Value, tfdiags.Diagnostics) { + scope := ctx.EvaluationScope(nil, nil, keyData) + return scope.EvalExpr(expr, wantType) +} + +// EvaluateImportAddress takes the raw reference expression of the import address +// from the config, and returns the evaluated address addrs.AbsResourceInstance +// +// The implementation is inspired by config.AbsTraversalForImportToExpr, but this time we can evaluate the expression +// in the indexes of expressions. If we encounter a hclsyntax.IndexExpr, we can evaluate the Key expression and create +// an Index Traversal, adding it to the Traverser +func evaluateImportAddress(ctx EvalContext, expr hcl.Expression, keyData instances.RepetitionData) (addrs.AbsResourceInstance, tfdiags.Diagnostics) { + traversal, diags := traversalForImportExpr(ctx, expr, keyData) + if diags.HasErrors() { + return addrs.AbsResourceInstance{}, diags + } + + return addrs.ParseAbsResourceInstance(traversal) +} + +func traversalForImportExpr(ctx EvalContext, expr hcl.Expression, keyData instances.RepetitionData) (hcl.Traversal, tfdiags.Diagnostics) { + var traversal hcl.Traversal + var diags tfdiags.Diagnostics + + switch e := expr.(type) { + case *hclsyntax.IndexExpr: + t, d := traversalForImportExpr(ctx, e.Collection, keyData) + diags = diags.Append(d) + traversal = append(traversal, t...) + + tIndex, dIndex := parseImportIndexKeyExpr(ctx, e.Key, keyData) + diags = diags.Append(dIndex) + traversal = append(traversal, tIndex) + case *hclsyntax.RelativeTraversalExpr: + t, d := traversalForImportExpr(ctx, e.Source, keyData) + diags = diags.Append(d) + traversal = append(traversal, t...) + traversal = append(traversal, e.Traversal...) + case *hclsyntax.ScopeTraversalExpr: + traversal = append(traversal, e.Traversal...) + default: + // This should not happen, as it should have failed validation earlier, in config.AbsTraversalForImportToExpr + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid import address expression", + Detail: "Import address must be a reference to a resource's address, and only allows for indexing with dynamic keys. For example: module.my_module[expression1].aws_s3_bucket.my_buckets[expression2] for resources inside of modules, or simply aws_s3_bucket.my_bucket for a resource in the root module", + Subject: expr.Range().Ptr(), + }) + } + + return traversal, diags +} + +// parseImportIndexKeyExpr parses an expression that is used as a key in an index, of an HCL expression representing an +// import target address, into a traversal of type hcl.TraverseIndex. +// After evaluation, the expression must be known, not null, not sensitive, and must be a string (for_each) or a number +// (count) +func parseImportIndexKeyExpr(ctx EvalContext, expr hcl.Expression, keyData instances.RepetitionData) (hcl.TraverseIndex, tfdiags.Diagnostics) { + idx := hcl.TraverseIndex{ + SrcRange: expr.Range(), + } + + // evaluate and take into consideration the for_each key (if exists) + val, diags := evaluateExprWithRepetitionData(ctx, expr, cty.DynamicPseudoType, keyData) + if diags.HasErrors() { + return idx, diags + } + + if !val.IsKnown() { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Import block 'to' address contains an invalid key", + Detail: "Import block contained a resource address using an index that will only be known after apply. Please ensure to use expressions that are known at plan time for the index of an import target address", + Subject: expr.Range().Ptr(), + }) + return idx, diags + } + + if val.IsNull() { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Import block 'to' address contains an invalid key", + Detail: "Import block contained a resource address using an index which is null. Please ensure the expression for the index is not null", + Subject: expr.Range().Ptr(), + }) + return idx, diags + } + + if val.Type() != cty.String && val.Type() != cty.Number { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Import block 'to' address contains an invalid key", + Detail: "Import block contained a resource address using an index which is not valid for a resource instance (not a string or a number). Please ensure the expression for the index is correct, and returns either a string or a number", + Subject: expr.Range().Ptr(), + }) + return idx, diags + } + + unmarkedVal, valMarks := val.Unmark() + if _, sensitive := valMarks[marks.Sensitive]; sensitive { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Import block 'to' address contains an invalid key", + Detail: "Import block contained a resource address using an index which is sensitive. Please ensure indexes used in the resource address of an import target are not sensitive", + Subject: expr.Range().Ptr(), + }) + } + + idx.Key = unmarkedVal + return idx, diags +} diff --git a/pkg/tofu/eval_import_test.go b/pkg/tofu/eval_import_test.go new file mode 100644 index 00000000000..39142c9e651 --- /dev/null +++ b/pkg/tofu/eval_import_test.go @@ -0,0 +1,73 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hcltest" + "github.com/kubegems/opentofu/pkg/lang" + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/zclconf/go-cty/cty" +) + +func TestEvaluateImportIdExpression_SensitiveValue(t *testing.T) { + ctx := &MockEvalContext{} + ctx.installSimpleEval() + ctx.EvaluationScopeScope = &lang.Scope{} + + testCases := []struct { + name string + expr hcl.Expression + wantErr string + }{ + { + name: "sensitive_value", + expr: hcltest.MockExprLiteral(cty.StringVal("value").Mark(marks.Sensitive)), + wantErr: "Invalid import id argument: The import ID cannot be sensitive.", + }, + { + name: "expr_is_nil", + expr: nil, + wantErr: "Invalid import id argument: The import ID cannot be null.", + }, + { + name: "evaluates_to_null", + expr: hcltest.MockExprLiteral(cty.NullVal(cty.String)), + wantErr: "Invalid import id argument: The import ID cannot be null.", + }, + { + name: "evaluates_to_unknown", + expr: hcltest.MockExprLiteral(cty.UnknownVal(cty.String)), + wantErr: "Invalid import id argument: The import block \"id\" argument depends on resource attributes that cannot be determined until apply, so OpenTofu cannot plan to import this resource.", // Adapted the message from your original code + }, + { + name: "valid_value", + expr: hcltest.MockExprLiteral(cty.StringVal("value")), + wantErr: "", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + _, diags := evaluateImportIdExpression(tc.expr, ctx, EvalDataForNoInstanceKey) + + if tc.wantErr != "" { + if len(diags) != 1 { + t.Errorf("expected diagnostics, got %s", spew.Sdump(diags)) + } else if diags.Err().Error() != tc.wantErr { + t.Errorf("unexpected error diagnostic %s", diags.Err().Error()) + } + } else { + if len(diags) != 0 { + t.Errorf("unexpected diagnostics %s", spew.Sdump(diags)) + } + } + }) + } +} diff --git a/pkg/tofu/eval_provider.go b/pkg/tofu/eval_provider.go new file mode 100644 index 00000000000..5b8f3544861 --- /dev/null +++ b/pkg/tofu/eval_provider.go @@ -0,0 +1,67 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "log" + + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/hcl2shim" + "github.com/kubegems/opentofu/pkg/providers" +) + +func buildProviderConfig(ctx EvalContext, addr addrs.AbsProviderConfig, config *configs.Provider) hcl.Body { + var configBody hcl.Body + if config != nil { + configBody = config.Config + } + + var inputBody hcl.Body + inputConfig := ctx.ProviderInput(addr) + if len(inputConfig) > 0 { + inputBody = configs.SynthBody("", inputConfig) + } + + switch { + case configBody != nil && inputBody != nil: + log.Printf("[TRACE] buildProviderConfig for %s: merging explicit config and input", addr) + return hcl.MergeBodies([]hcl.Body{inputBody, configBody}) + case configBody != nil: + log.Printf("[TRACE] buildProviderConfig for %s: using explicit config only", addr) + return configBody + case inputBody != nil: + log.Printf("[TRACE] buildProviderConfig for %s: using input only", addr) + return inputBody + default: + log.Printf("[TRACE] buildProviderConfig for %s: no configuration at all", addr) + addr := fmt.Sprintf("%s with no configuration", addr) + return hcl2shim.SynthBody(addr, make(map[string]cty.Value)) + } +} + +// getProvider returns the providers.Interface and schema for a given provider. +func getProvider(ctx EvalContext, addr addrs.AbsProviderConfig) (providers.Interface, providers.ProviderSchema, error) { + if addr.Provider.Type == "" { + // Should never happen + panic("GetProvider used with uninitialized provider configuration address") + } + provider := ctx.Provider(addr) + if provider == nil { + return nil, providers.ProviderSchema{}, fmt.Errorf("provider %s not initialized", addr) + } + // Not all callers require a schema, so we will leave checking for a nil + // schema to the callers. + schema, err := ctx.ProviderSchema(addr) + if err != nil { + return nil, providers.ProviderSchema{}, fmt.Errorf("failed to read schema for provider %s: %w", addr, err) + } + return provider, schema, nil +} diff --git a/pkg/tofu/eval_provider_test.go b/pkg/tofu/eval_provider_test.go new file mode 100644 index 00000000000..d70597765e2 --- /dev/null +++ b/pkg/tofu/eval_provider_test.go @@ -0,0 +1,60 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "testing" + + "github.com/hashicorp/hcl/v2/hcldec" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configschema" +) + +func TestBuildProviderConfig(t *testing.T) { + configBody := configs.SynthBody("", map[string]cty.Value{ + "set_in_config": cty.StringVal("config"), + }) + providerAddr := addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("foo"), + } + + ctx := &MockEvalContext{ + // The input values map is expected to contain only keys that aren't + // already present in the config, since we skip prompting for + // attributes that are already set. + ProviderInputValues: map[string]cty.Value{ + "set_by_input": cty.StringVal("input"), + }, + } + gotBody := buildProviderConfig(ctx, providerAddr, &configs.Provider{ + Name: "foo", + Config: configBody, + }) + + schema := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "set_in_config": {Type: cty.String, Optional: true}, + "set_by_input": {Type: cty.String, Optional: true}, + }, + } + got, diags := hcldec.Decode(gotBody, schema.DecoderSpec(), nil) + if diags.HasErrors() { + t.Fatalf("body decode failed: %s", diags.Error()) + } + + // We expect the provider config with the added input value + want := cty.ObjectVal(map[string]cty.Value{ + "set_in_config": cty.StringVal("config"), + "set_by_input": cty.StringVal("input"), + }) + if !got.RawEquals(want) { + t.Fatalf("incorrect merged config\ngot: %#v\nwant: %#v", got, want) + } +} diff --git a/pkg/tofu/eval_variable.go b/pkg/tofu/eval_variable.go new file mode 100644 index 00000000000..76e122163d2 --- /dev/null +++ b/pkg/tofu/eval_variable.go @@ -0,0 +1,444 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/gohcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/checks" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/lang" + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +func prepareFinalInputVariableValue(addr addrs.AbsInputVariableInstance, raw *InputValue, cfg *configs.Variable) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + convertTy := cfg.ConstraintType + log.Printf("[TRACE] prepareFinalInputVariableValue: preparing %s", addr) + + var defaultVal cty.Value + if cfg.Default != cty.NilVal { + log.Printf("[TRACE] prepareFinalInputVariableValue: %s has a default value", addr) + var err error + defaultVal, err = convert.Convert(cfg.Default, convertTy) + if err != nil { + // Validation of the declaration should typically catch this, + // but we'll check it here too to be robust. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid default value for module argument", + Detail: fmt.Sprintf( + "The default value for variable %q is incompatible with its type constraint: %s.", + cfg.Name, err, + ), + Subject: &cfg.DeclRange, + }) + // We'll return a placeholder unknown value to avoid producing + // redundant downstream errors. + return cty.UnknownVal(cfg.Type), diags + } + } + + var sourceRange tfdiags.SourceRange + var nonFileSource string + if raw.HasSourceRange() { + sourceRange = raw.SourceRange + } else { + // If the value came from a place that isn't a file and thus doesn't + // have its own source range, we'll use the declaration range as + // our source range and generate some slightly different error + // messages. + sourceRange = tfdiags.SourceRangeFromHCL(cfg.DeclRange) + switch raw.SourceType { + case ValueFromCLIArg: + nonFileSource = fmt.Sprintf("set using -var=\"%s=...\"", addr.Variable.Name) + case ValueFromEnvVar: + nonFileSource = fmt.Sprintf("set using the TF_VAR_%s environment variable", addr.Variable.Name) + case ValueFromInput: + nonFileSource = "set using an interactive prompt" + default: + nonFileSource = "set from outside of the configuration" + } + } + + given := raw.Value + if given == cty.NilVal { // The variable wasn't set at all (even to null) + log.Printf("[TRACE] prepareFinalInputVariableValue: %s has no defined value", addr) + if cfg.Required() { + // NOTE: The CLI layer typically checks for itself whether all of + // the required _root_ module variables are set, which would + // mask this error with a more specific one that refers to the + // CLI features for setting such variables. We can get here for + // child module variables, though. + log.Printf("[ERROR] prepareFinalInputVariableValue: %s is required but is not set", addr) + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Required variable not set`, + Detail: fmt.Sprintf(`The variable %q is required, but is not set.`, addr.Variable.Name), + Subject: cfg.DeclRange.Ptr(), + }) + // We'll return a placeholder unknown value to avoid producing + // redundant downstream errors. + return cty.UnknownVal(cfg.Type), diags + } + + given = defaultVal // must be set, because we checked above that the variable isn't required + } + + // Apply defaults from the variable's type constraint to the converted value, + // unless the converted value is null. We do not apply defaults to top-level + // null values, as doing so could prevent assigning null to a nullable + // variable. + if cfg.TypeDefaults != nil && !given.IsNull() { + given = cfg.TypeDefaults.Apply(given) + } + + val, err := convert.Convert(given, convertTy) + if err != nil { + log.Printf("[ERROR] prepareFinalInputVariableValue: %s has unsuitable type\n got: %s\n want: %s", addr, given.Type(), convertTy) + var detail string + var subject *hcl.Range + if nonFileSource != "" { + detail = fmt.Sprintf( + "Unsuitable value for %s %s: %s.", + addr, nonFileSource, err, + ) + subject = cfg.DeclRange.Ptr() + } else { + detail = fmt.Sprintf( + "The given value is not suitable for %s declared at %s: %s.", + addr, cfg.DeclRange.String(), err, + ) + subject = sourceRange.ToHCL().Ptr() + + // In some workflows, the operator running tofu does not have access to the variables + // themselves. They are for example stored in encrypted files that will be used by the CI toolset + // and not by the operator directly. In such a case, the failing secret value should not be + // displayed to the operator + if cfg.Sensitive { + detail = fmt.Sprintf( + "The given value is not suitable for %s, which is sensitive: %s. Invalid value defined at %s.", + addr, err, sourceRange.ToHCL(), + ) + subject = cfg.DeclRange.Ptr() + } + } + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid value for input variable", + Detail: detail, + Subject: subject, + }) + // We'll return a placeholder unknown value to avoid producing + // redundant downstream errors. + return cty.UnknownVal(cfg.Type), diags + } + + // By the time we get here, we know: + // - val matches the variable's type constraint + // - val is definitely not cty.NilVal, but might be a null value if the given was already null. + // + // That means we just need to handle the case where the value is null, + // which might mean we need to use the default value, or produce an error. + // + // For historical reasons we do this only for a "non-nullable" variable. + // Nullable variables just appear as null if they were set to null, + // regardless of any default value. + if val.IsNull() && !cfg.Nullable { + log.Printf("[TRACE] prepareFinalInputVariableValue: %s is defined as null", addr) + if defaultVal != cty.NilVal { + val = defaultVal + } else { + log.Printf("[ERROR] prepareFinalInputVariableValue: %s is non-nullable but set to null, and is required", addr) + if nonFileSource != "" { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Required variable not set`, + Detail: fmt.Sprintf( + "Unsuitable value for %s %s: required variable may not be set to null.", + addr, nonFileSource, + ), + Subject: cfg.DeclRange.Ptr(), + }) + } else { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Required variable not set`, + Detail: fmt.Sprintf( + "The given value is not suitable for %s defined at %s: required variable may not be set to null.", + addr, cfg.DeclRange.String(), + ), + Subject: sourceRange.ToHCL().Ptr(), + }) + } + // Stub out our return value so that the semantic checker doesn't + // produce redundant downstream errors. + val = cty.UnknownVal(cfg.Type) + } + } + + return val, diags +} + +// evalVariableValidations ensures that all of the configured custom validations +// for a variable are passing. +// +// This must be used only after any side-effects that make the value of the +// variable available for use in expression evaluation, such as +// EvalModuleCallArgument for variables in descendent modules. +func evalVariableValidations(addr addrs.AbsInputVariableInstance, config *configs.Variable, expr hcl.Expression, ctx EvalContext) (diags tfdiags.Diagnostics) { + if config == nil || len(config.Validations) == 0 { + log.Printf("[TRACE] evalVariableValidations: no validation rules declared for %s, so skipping", addr) + return nil + } + log.Printf("[TRACE] evalVariableValidations: validating %s", addr) + + checkState := ctx.Checks() + if !checkState.ConfigHasChecks(addr.ConfigCheckable()) { + // We have nothing to do if this object doesn't have any checks, + // but the "rules" slice should agree that we don't. + if ct := len(config.Validations); ct != 0 { + panic(fmt.Sprintf("check state says that %s should have no rules, but it has %d", addr, ct)) + } + return diags + } + + // Variable nodes evaluate in the parent module to where they were declared + // because the value expression (n.Expr, if set) comes from the calling + // "module" block in the parent module. + // + // Validation expressions are statically validated (during configuration + // loading) to refer only to the variable being validated, so we can + // bypass our usual evaluation machinery here and just produce a minimal + // evaluation context containing just the required value, and thus avoid + // the problem that ctx's evaluation functions refer to the wrong module. + val := ctx.GetVariableValue(addr) + if val == cty.NilVal { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "No final value for variable", + Detail: fmt.Sprintf("OpenTofu doesn't have a final value for %s during validation. This is a bug in OpenTofu; please report it!", addr), + }) + return diags + } + for ix, validation := range config.Validations { + condFuncs, condDiags := lang.ProviderFunctionsInExpr(addrs.ParseRef, validation.Condition) + diags = diags.Append(condDiags) + errFuncs, errDiags := lang.ProviderFunctionsInExpr(addrs.ParseRef, validation.ErrorMessage) + diags = diags.Append(errDiags) + + if diags.HasErrors() { + continue + } + + hclCtx, ctxDiags := ctx.EvaluationScope(nil, nil, EvalDataForNoInstanceKey).EvalContext(append(condFuncs, errFuncs...)) + diags = diags.Append(ctxDiags) + if diags.HasErrors() { + continue + } + hclCtx.Variables["var"] = cty.ObjectVal(map[string]cty.Value{ + config.Name: val, + }) + + result, ruleDiags := evalVariableValidation(validation, hclCtx, addr, config, expr, ix) + diags = diags.Append(ruleDiags) + + log.Printf("[TRACE] evalVariableValidations: %s status is now %s", addr, result.Status) + if result.Status == checks.StatusFail { + checkState.ReportCheckFailure(addr, addrs.InputValidation, ix, result.FailureMessage) + } else { + checkState.ReportCheckResult(addr, addrs.InputValidation, ix, result.Status) + } + } + + return diags +} + +func evalVariableValidation(validation *configs.CheckRule, hclCtx *hcl.EvalContext, addr addrs.AbsInputVariableInstance, config *configs.Variable, expr hcl.Expression, ix int) (checkResult, tfdiags.Diagnostics) { + const errInvalidCondition = "Invalid variable validation result" + const errInvalidValue = "Invalid value for variable" + var diags tfdiags.Diagnostics + + result, moreDiags := validation.Condition.Value(hclCtx) + diags = diags.Append(moreDiags) + errorValue, errorDiags := validation.ErrorMessage.Value(hclCtx) + + // The following error handling is a workaround to preserve backwards + // compatibility. Due to an implementation quirk, all prior versions of + // Terraform would treat error messages specified using JSON + // configuration syntax (.tf.json) as string literals, even if they + // contained the "${" template expression operator. This behaviour did + // not match that of HCL configuration syntax, where a template + // expression would result in a validation error. + // + // As a result, users writing or generating JSON configuration syntax + // may have specified error messages which are invalid template + // expressions. As we add support for error message expressions, we are + // unable to perfectly distinguish between these two cases. + // + // To ensure that we don't break backwards compatibility, we have the + // below fallback logic if the error message fails to evaluate. This + // should only have any effect for JSON configurations. The gohcl + // DecodeExpression function behaves differently when the source of the + // expression is a JSON configuration file and a nil context is passed. + if errorDiags.HasErrors() { + // Attempt to decode the expression as a string literal. Passing + // nil as the context forces a JSON syntax string value to be + // interpreted as a string literal. + var errorString string + moreErrorDiags := gohcl.DecodeExpression(validation.ErrorMessage, nil, &errorString) + if !moreErrorDiags.HasErrors() { + // Decoding succeeded, meaning that this is a JSON syntax + // string value. We rewrap that as a cty value to allow later + // decoding to succeed. + errorValue = cty.StringVal(errorString) + + // This warning diagnostic explains this odd behaviour, while + // giving us an escape hatch to change this to a hard failure + // in some future OpenTofu 1.x version. + errorDiags = hcl.Diagnostics{ + &hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "Validation error message expression is invalid", + Detail: fmt.Sprintf("The error message provided could not be evaluated as an expression, so OpenTofu is interpreting it as a string literal.\n\nIn future versions of OpenTofu, this will be considered an error. Please file a GitHub issue if this would break your workflow.\n\n%s", errorDiags.Error()), + Subject: validation.ErrorMessage.Range().Ptr(), + Context: validation.DeclRange.Ptr(), + Expression: validation.ErrorMessage, + EvalContext: hclCtx, + }, + } + } + + // We want to either report the original diagnostics if the + // fallback failed, or the warning generated above if it succeeded. + diags = diags.Append(errorDiags) + } + + if diags.HasErrors() { + log.Printf("[TRACE] evalVariableValidations: %s rule %s check rule evaluation failed: %s", addr, validation.DeclRange, diags.Err().Error()) + } + if !result.IsKnown() { + log.Printf("[TRACE] evalVariableValidations: %s rule %s condition value is unknown, so skipping validation for now", addr, validation.DeclRange) + + return checkResult{Status: checks.StatusUnknown}, diags // We'll wait until we've learned more, then. + } + if result.IsNull() { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: errInvalidCondition, + Detail: "Validation condition expression must return either true or false, not null.", + Subject: validation.Condition.Range().Ptr(), + Expression: validation.Condition, + EvalContext: hclCtx, + }) + return checkResult{Status: checks.StatusError}, diags + } + var err error + result, err = convert.Convert(result, cty.Bool) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: errInvalidCondition, + Detail: fmt.Sprintf("Invalid validation condition result value: %s.", tfdiags.FormatError(err)), + Subject: validation.Condition.Range().Ptr(), + Expression: validation.Condition, + EvalContext: hclCtx, + }) + return checkResult{Status: checks.StatusError}, diags + } + + // Validation condition may be marked if the input variable is bound to + // a sensitive value. This is irrelevant to the validation process, so + // we discard the marks now. + result, _ = result.Unmark() + status := checks.StatusForCtyValue(result) + + if status != checks.StatusFail { + return checkResult{Status: status}, diags + } + + var errorMessage string + if !errorDiags.HasErrors() && errorValue.IsKnown() && !errorValue.IsNull() { + var err error + errorValue, err = convert.Convert(errorValue, cty.String) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid error message", + Detail: fmt.Sprintf("Unsuitable value for error message: %s.", tfdiags.FormatError(err)), + Subject: validation.ErrorMessage.Range().Ptr(), + Expression: validation.ErrorMessage, + EvalContext: hclCtx, + }) + } else { + if marks.Has(errorValue, marks.Sensitive) { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + + Summary: "Error message refers to sensitive values", + Detail: `The error expression used to explain this condition refers to sensitive values. OpenTofu will not display the resulting message. + +You can correct this by removing references to sensitive values, or by carefully using the nonsensitive() function if the expression will not reveal the sensitive data.`, + + Subject: validation.ErrorMessage.Range().Ptr(), + Expression: validation.ErrorMessage, + EvalContext: hclCtx, + }) + errorMessage = "The error message included a sensitive value, so it will not be displayed." + } else { + errorMessage = strings.TrimSpace(errorValue.AsString()) + } + } + } + if errorMessage == "" { + errorMessage = "Failed to evaluate condition error message." + } + + if expr != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: errInvalidValue, + Detail: fmt.Sprintf("%s\n\nThis was checked by the validation rule at %s.", errorMessage, validation.DeclRange.String()), + Subject: expr.Range().Ptr(), + Expression: validation.Condition, + EvalContext: hclCtx, + Extra: &addrs.CheckRuleDiagnosticExtra{ + CheckRule: addr.CheckRule(addrs.InputValidation, ix), + }, + }) + } else { + // Since we don't have a source expression for a root module + // variable, we'll just report the error from the perspective + // of the variable declaration itself. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: errInvalidValue, + Detail: fmt.Sprintf("%s\n\nThis was checked by the validation rule at %s.", errorMessage, validation.DeclRange.String()), + Subject: config.DeclRange.Ptr(), + Expression: validation.Condition, + EvalContext: hclCtx, + Extra: &addrs.CheckRuleDiagnosticExtra{ + CheckRule: addr.CheckRule(addrs.InputValidation, ix), + }, + }) + } + + return checkResult{ + Status: status, + FailureMessage: errorMessage, + }, diags +} diff --git a/pkg/tofu/eval_variable_test.go b/pkg/tofu/eval_variable_test.go new file mode 100644 index 00000000000..c83eeedd492 --- /dev/null +++ b/pkg/tofu/eval_variable_test.go @@ -0,0 +1,1373 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/checks" + "github.com/kubegems/opentofu/pkg/lang" + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +func TestPrepareFinalInputVariableValue(t *testing.T) { + // This is just a concise way to define a bunch of *configs.Variable + // objects to use in our tests below. We're only going to decode this + // config, not fully evaluate it. + cfgSrc := ` + variable "nullable_required" { + } + variable "nullable_optional_default_string" { + default = "hello" + } + variable "nullable_optional_default_null" { + default = null + } + variable "constrained_string_nullable_required" { + type = string + } + variable "constrained_string_nullable_optional_default_string" { + type = string + default = "hello" + } + variable "constrained_string_nullable_optional_default_bool" { + type = string + default = true + } + variable "constrained_string_nullable_optional_default_null" { + type = string + default = null + } + variable "required" { + nullable = false + } + variable "optional_default_string" { + nullable = false + default = "hello" + } + variable "constrained_string_required" { + nullable = false + type = string + } + variable "constrained_string_optional_default_string" { + nullable = false + type = string + default = "hello" + } + variable "constrained_string_optional_default_bool" { + nullable = false + type = string + default = true + } + variable "constrained_string_sensitive_required" { + sensitive = true + nullable = false + type = string + } + variable "complex_type_with_nested_default_optional" { + type = set(object({ + name = string + schedules = set(object({ + name = string + cold_storage_after = optional(number, 10) + })) + })) + } + variable "complex_type_with_nested_complex_types" { + type = object({ + name = string + nested_object = object({ + name = string + value = optional(string, "foo") + }) + nested_object_with_default = optional(object({ + name = string + value = optional(string, "bar") + }), { + name = "nested_object_with_default" + }) + }) + } + // https://github.com/hashicorp/terraform/issues/32152 + // This variable was originally added to test that optional attribute + // metadata is stripped from empty default collections. Essentially, you + // should be able to mix and match custom and default values for the + // optional_list attribute. + variable "complex_type_with_empty_default_and_nested_optional" { + type = list(object({ + name = string + optional_list = optional(list(object({ + string = string + optional_string = optional(string) + })), []) + })) + } + // https://github.com/hashicorp/terraform/issues/32160#issuecomment-1302783910 + // These variables were added to test the specific use case from this + // GitHub comment. + variable "empty_object_with_optional_nested_object_with_optional_bool" { + type = object({ + thing = optional(object({ + flag = optional(bool, false) + })) + }) + default = {} + } + variable "populated_object_with_optional_nested_object_with_optional_bool" { + type = object({ + thing = optional(object({ + flag = optional(bool, false) + })) + }) + default = { + thing = {} + } + } + variable "empty_object_with_default_nested_object_with_optional_bool" { + type = object({ + thing = optional(object({ + flag = optional(bool, false) + }), {}) + }) + default = {} + } + // https://github.com/hashicorp/terraform/issues/32160 + // This variable was originally added to test that optional objects do + // get created containing only their defaults. Instead they should be + // left empty. We do not expect nested_object to be created just because + // optional_string has a default value. + variable "object_with_nested_object_with_required_and_optional_attributes" { + type = object({ + nested_object = optional(object({ + string = string + optional_string = optional(string, "optional") + })) + }) + } + // https://github.com/hashicorp/terraform/issues/32157 + // Similar to above, we want to see that merging combinations of the + // nested_object into a single collection doesn't crash because of + // inconsistent elements. + variable "list_with_nested_object_with_required_and_optional_attributes" { + type = list(object({ + nested_object = optional(object({ + string = string + optional_string = optional(string, "optional") + })) + })) + } + // https://github.com/hashicorp/terraform/issues/32109 + // This variable was originally introduced to test the behaviour of + // the dynamic type constraint. You should be able to use the 'any' + // constraint and introduce empty, null, and populated values into the + // list. + variable "list_with_nested_list_of_any" { + type = list(object({ + a = string + b = optional(list(any)) + })) + default = [ + { + a = "a" + }, + { + a = "b" + b = [1] + } + ] + } + // https://github.com/hashicorp/terraform/issues/32396 + // This variable was originally introduced to test the behaviour of the + // dynamic type constraint. You should be able to set primitive types in + // the list consistently. + variable "list_with_nested_collections_dynamic_with_default" { + type = list( + object({ + name = optional(string, "default") + taints = optional(list(map(any)), []) + }) + ) + } + // https://github.com/hashicorp/terraform/issues/32752 + // This variable was introduced to make sure the evaluation doesn't + // crash even when the types are wrong. + variable "invalid_nested_type" { + type = map( + object({ + rules = map( + object({ + destination_addresses = optional(list(string), []) + }) + ) + }) + ) + default = {} + } + ` + cfg := testModuleInline(t, map[string]string{ + "main.tf": cfgSrc, + }) + variableConfigs := cfg.Module.Variables + + // Because we loaded our pseudo-module from a temporary file, the + // declaration source ranges will have unpredictable filenames. We'll + // fix that here just to make things easier below. + for _, vc := range variableConfigs { + vc.DeclRange.Filename = "main.tf" + } + + tests := []struct { + varName string + given cty.Value + want cty.Value + wantErr string + }{ + // nullable_required + { + "nullable_required", + cty.NilVal, + cty.UnknownVal(cty.DynamicPseudoType), + `Required variable not set: The variable "nullable_required" is required, but is not set.`, + }, + { + "nullable_required", + cty.NullVal(cty.DynamicPseudoType), + cty.NullVal(cty.DynamicPseudoType), + ``, // "required" for a nullable variable means only that it must be set, even if it's set to null + }, + { + "nullable_required", + cty.StringVal("ahoy"), + cty.StringVal("ahoy"), + ``, + }, + { + "nullable_required", + cty.UnknownVal(cty.String), + cty.UnknownVal(cty.String), + ``, + }, + + // nullable_optional_default_string + { + "nullable_optional_default_string", + cty.NilVal, + cty.StringVal("hello"), // the declared default value + ``, + }, + { + "nullable_optional_default_string", + cty.NullVal(cty.DynamicPseudoType), + cty.NullVal(cty.DynamicPseudoType), // nullable variables can be really set to null, masking the default + ``, + }, + { + "nullable_optional_default_string", + cty.StringVal("ahoy"), + cty.StringVal("ahoy"), + ``, + }, + { + "nullable_optional_default_string", + cty.UnknownVal(cty.String), + cty.UnknownVal(cty.String), + ``, + }, + + // nullable_optional_default_null + { + "nullable_optional_default_null", + cty.NilVal, + cty.NullVal(cty.DynamicPseudoType), // the declared default value + ``, + }, + { + "nullable_optional_default_null", + cty.NullVal(cty.String), + cty.NullVal(cty.String), // nullable variables can be really set to null, masking the default + ``, + }, + { + "nullable_optional_default_null", + cty.StringVal("ahoy"), + cty.StringVal("ahoy"), + ``, + }, + { + "nullable_optional_default_null", + cty.UnknownVal(cty.String), + cty.UnknownVal(cty.String), + ``, + }, + + // constrained_string_nullable_required + { + "constrained_string_nullable_required", + cty.NilVal, + cty.UnknownVal(cty.String), + `Required variable not set: The variable "constrained_string_nullable_required" is required, but is not set.`, + }, + { + "constrained_string_nullable_required", + cty.NullVal(cty.DynamicPseudoType), + cty.NullVal(cty.String), // the null value still gets converted to match the type constraint + ``, // "required" for a nullable variable means only that it must be set, even if it's set to null + }, + { + "constrained_string_nullable_required", + cty.StringVal("ahoy"), + cty.StringVal("ahoy"), + ``, + }, + { + "constrained_string_nullable_required", + cty.UnknownVal(cty.String), + cty.UnknownVal(cty.String), + ``, + }, + + // constrained_string_nullable_optional_default_string + { + "constrained_string_nullable_optional_default_string", + cty.NilVal, + cty.StringVal("hello"), // the declared default value + ``, + }, + { + "constrained_string_nullable_optional_default_string", + cty.NullVal(cty.DynamicPseudoType), + cty.NullVal(cty.String), // nullable variables can be really set to null, masking the default + ``, + }, + { + "constrained_string_nullable_optional_default_string", + cty.StringVal("ahoy"), + cty.StringVal("ahoy"), + ``, + }, + { + "constrained_string_nullable_optional_default_string", + cty.UnknownVal(cty.String), + cty.UnknownVal(cty.String), + ``, + }, + + // constrained_string_nullable_optional_default_bool + { + "constrained_string_nullable_optional_default_bool", + cty.NilVal, + cty.StringVal("true"), // the declared default value, automatically converted to match type constraint + ``, + }, + { + "constrained_string_nullable_optional_default_bool", + cty.NullVal(cty.DynamicPseudoType), + cty.NullVal(cty.String), // nullable variables can be really set to null, masking the default + ``, + }, + { + "constrained_string_nullable_optional_default_bool", + cty.StringVal("ahoy"), + cty.StringVal("ahoy"), + ``, + }, + { + "constrained_string_nullable_optional_default_bool", + cty.UnknownVal(cty.String), + cty.UnknownVal(cty.String), + ``, + }, + + // constrained_string_nullable_optional_default_null + { + "constrained_string_nullable_optional_default_null", + cty.NilVal, + cty.NullVal(cty.String), + ``, + }, + { + "constrained_string_nullable_optional_default_null", + cty.NullVal(cty.DynamicPseudoType), + cty.NullVal(cty.String), + ``, + }, + { + "constrained_string_nullable_optional_default_null", + cty.StringVal("ahoy"), + cty.StringVal("ahoy"), + ``, + }, + { + "constrained_string_nullable_optional_default_null", + cty.UnknownVal(cty.String), + cty.UnknownVal(cty.String), + ``, + }, + + // required + { + "required", + cty.NilVal, + cty.UnknownVal(cty.DynamicPseudoType), + `Required variable not set: The variable "required" is required, but is not set.`, + }, + { + "required", + cty.NullVal(cty.DynamicPseudoType), + cty.UnknownVal(cty.DynamicPseudoType), + `Required variable not set: Unsuitable value for var.required set from outside of the configuration: required variable may not be set to null.`, + }, + { + "required", + cty.StringVal("ahoy"), + cty.StringVal("ahoy"), + ``, + }, + { + "required", + cty.UnknownVal(cty.String), + cty.UnknownVal(cty.String), + ``, + }, + + // optional_default_string + { + "optional_default_string", + cty.NilVal, + cty.StringVal("hello"), // the declared default value + ``, + }, + { + "optional_default_string", + cty.NullVal(cty.DynamicPseudoType), + cty.StringVal("hello"), // the declared default value + ``, + }, + { + "optional_default_string", + cty.StringVal("ahoy"), + cty.StringVal("ahoy"), + ``, + }, + { + "optional_default_string", + cty.UnknownVal(cty.String), + cty.UnknownVal(cty.String), + ``, + }, + + // constrained_string_required + { + "constrained_string_required", + cty.NilVal, + cty.UnknownVal(cty.String), + `Required variable not set: The variable "constrained_string_required" is required, but is not set.`, + }, + { + "constrained_string_required", + cty.NullVal(cty.DynamicPseudoType), + cty.UnknownVal(cty.String), + `Required variable not set: Unsuitable value for var.constrained_string_required set from outside of the configuration: required variable may not be set to null.`, + }, + { + "constrained_string_required", + cty.StringVal("ahoy"), + cty.StringVal("ahoy"), + ``, + }, + { + "constrained_string_required", + cty.UnknownVal(cty.String), + cty.UnknownVal(cty.String), + ``, + }, + + // constrained_string_optional_default_string + { + "constrained_string_optional_default_string", + cty.NilVal, + cty.StringVal("hello"), // the declared default value + ``, + }, + { + "constrained_string_optional_default_string", + cty.NullVal(cty.DynamicPseudoType), + cty.StringVal("hello"), // the declared default value + ``, + }, + { + "constrained_string_optional_default_string", + cty.StringVal("ahoy"), + cty.StringVal("ahoy"), + ``, + }, + { + "constrained_string_optional_default_string", + cty.UnknownVal(cty.String), + cty.UnknownVal(cty.String), + ``, + }, + + // constrained_string_optional_default_bool + { + "constrained_string_optional_default_bool", + cty.NilVal, + cty.StringVal("true"), // the declared default value, automatically converted to match type constraint + ``, + }, + { + "constrained_string_optional_default_bool", + cty.NullVal(cty.DynamicPseudoType), + cty.StringVal("true"), // the declared default value, automatically converted to match type constraint + ``, + }, + { + "constrained_string_optional_default_bool", + cty.StringVal("ahoy"), + cty.StringVal("ahoy"), + ``, + }, + { + "constrained_string_optional_default_bool", + cty.UnknownVal(cty.String), + cty.UnknownVal(cty.String), + ``, + }, + { + "list_with_nested_collections_dynamic_with_default", + cty.TupleVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("default"), + }), + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("complex"), + "taints": cty.ListVal([]cty.Value{ + cty.MapVal(map[string]cty.Value{ + "key": cty.StringVal("my_key"), + "value": cty.StringVal("my_value"), + }), + }), + }), + }), + cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("default"), + "taints": cty.ListValEmpty(cty.Map(cty.String)), + }), + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("complex"), + "taints": cty.ListVal([]cty.Value{ + cty.MapVal(map[string]cty.Value{ + "key": cty.StringVal("my_key"), + "value": cty.StringVal("my_value"), + }), + }), + }), + }), + ``, + }, + + // complex types + + { + "complex_type_with_nested_default_optional", + cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("test1"), + "schedules": cty.SetVal([]cty.Value{ + cty.MapVal(map[string]cty.Value{ + "name": cty.StringVal("daily"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("test2"), + "schedules": cty.SetVal([]cty.Value{ + cty.MapVal(map[string]cty.Value{ + "name": cty.StringVal("daily"), + }), + cty.MapVal(map[string]cty.Value{ + "name": cty.StringVal("weekly"), + "cold_storage_after": cty.StringVal("0"), + }), + }), + }), + }), + cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("test1"), + "schedules": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("daily"), + "cold_storage_after": cty.NumberIntVal(10), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("test2"), + "schedules": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("daily"), + "cold_storage_after": cty.NumberIntVal(10), + }), + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("weekly"), + "cold_storage_after": cty.NumberIntVal(0), + }), + }), + }), + }), + ``, + }, + { + "complex_type_with_nested_complex_types", + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("object"), + "nested_object": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("nested_object"), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("object"), + "nested_object": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("nested_object"), + "value": cty.StringVal("foo"), + }), + "nested_object_with_default": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("nested_object_with_default"), + "value": cty.StringVal("bar"), + }), + }), + ``, + }, + { + "complex_type_with_empty_default_and_nested_optional", + cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("abc"), + "optional_list": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "string": cty.StringVal("child"), + "optional_string": cty.NullVal(cty.String), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("def"), + "optional_list": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{ + "string": cty.String, + "optional_string": cty.String, + }))), + }), + }), + cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("abc"), + "optional_list": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "string": cty.StringVal("child"), + "optional_string": cty.NullVal(cty.String), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("def"), + "optional_list": cty.ListValEmpty(cty.Object(map[string]cty.Type{ + "string": cty.String, + "optional_string": cty.String, + })), + }), + }), + ``, + }, + { + "object_with_nested_object_with_required_and_optional_attributes", + cty.EmptyObjectVal, + cty.ObjectVal(map[string]cty.Value{ + "nested_object": cty.NullVal(cty.Object(map[string]cty.Type{ + "string": cty.String, + "optional_string": cty.String, + })), + }), + ``, + }, + { + "empty_object_with_optional_nested_object_with_optional_bool", + cty.NilVal, + cty.ObjectVal(map[string]cty.Value{ + "thing": cty.NullVal(cty.Object(map[string]cty.Type{ + "flag": cty.Bool, + })), + }), + ``, + }, + { + "populated_object_with_optional_nested_object_with_optional_bool", + cty.NilVal, + cty.ObjectVal(map[string]cty.Value{ + "thing": cty.ObjectVal(map[string]cty.Value{ + "flag": cty.False, + }), + }), + ``, + }, + { + "empty_object_with_default_nested_object_with_optional_bool", + cty.NilVal, + cty.ObjectVal(map[string]cty.Value{ + "thing": cty.ObjectVal(map[string]cty.Value{ + "flag": cty.False, + }), + }), + ``, + }, + { + "list_with_nested_object_with_required_and_optional_attributes", + cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "nested_object": cty.ObjectVal(map[string]cty.Value{ + "string": cty.StringVal("string"), + "optional_string": cty.NullVal(cty.String), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "nested_object": cty.NullVal(cty.Object(map[string]cty.Type{ + "string": cty.String, + "optional_string": cty.String, + })), + }), + }), + cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "nested_object": cty.ObjectVal(map[string]cty.Value{ + "string": cty.StringVal("string"), + "optional_string": cty.StringVal("optional"), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "nested_object": cty.NullVal(cty.Object(map[string]cty.Type{ + "string": cty.String, + "optional_string": cty.String, + })), + }), + }), + ``, + }, + { + "list_with_nested_list_of_any", + cty.NilVal, + cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("a"), + "b": cty.NullVal(cty.List(cty.Number)), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("b"), + "b": cty.ListVal([]cty.Value{ + cty.NumberIntVal(1), + }), + }), + }), + ``, + }, + { + "list_with_nested_collections_dynamic_with_default", + cty.TupleVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("default"), + }), + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("complex"), + "taints": cty.ListVal([]cty.Value{ + cty.MapVal(map[string]cty.Value{ + "key": cty.StringVal("my_key"), + "value": cty.StringVal("my_value"), + }), + }), + }), + }), + cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("default"), + "taints": cty.ListValEmpty(cty.Map(cty.String)), + }), + cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("complex"), + "taints": cty.ListVal([]cty.Value{ + cty.MapVal(map[string]cty.Value{ + "key": cty.StringVal("my_key"), + "value": cty.StringVal("my_value"), + }), + }), + }), + }), + ``, + }, + { + "invalid_nested_type", + cty.MapVal(map[string]cty.Value{ + "mysql": cty.ObjectVal(map[string]cty.Value{ + "rules": cty.ObjectVal(map[string]cty.Value{ + "destination_addresses": cty.ListVal([]cty.Value{cty.StringVal("192.168.0.1")}), + }), + }), + }), + cty.UnknownVal(cty.Map(cty.Object(map[string]cty.Type{ + "rules": cty.Map(cty.Object(map[string]cty.Type{ + "destination_addresses": cty.List(cty.String), + })), + }))), + `Invalid value for input variable: Unsuitable value for var.invalid_nested_type set from outside of the configuration: incorrect map element type: attribute "rules": element "destination_addresses": object required.`, + }, + + // sensitive + { + "constrained_string_sensitive_required", + cty.UnknownVal(cty.String), + cty.UnknownVal(cty.String), + ``, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%s %#v", test.varName, test.given), func(t *testing.T) { + varAddr := addrs.InputVariable{Name: test.varName}.Absolute(addrs.RootModuleInstance) + varCfg := variableConfigs[test.varName] + if varCfg == nil { + t.Fatalf("invalid variable name %q", test.varName) + } + + t.Logf( + "test case\nvariable: %s\nconstraint: %#v\ndefault: %#v\nnullable: %#v\ngiven value: %#v", + varAddr, + varCfg.Type, + varCfg.Default, + varCfg.Nullable, + test.given, + ) + + rawVal := &InputValue{ + Value: test.given, + SourceType: ValueFromCaller, + } + + got, diags := prepareFinalInputVariableValue( + varAddr, rawVal, varCfg, + ) + + if test.wantErr != "" { + if !diags.HasErrors() { + t.Errorf("unexpected success\nwant error: %s", test.wantErr) + } else if got, want := diags.Err().Error(), test.wantErr; got != want { + t.Errorf("wrong error\ngot: %s\nwant: %s", got, want) + } + } else { + if diags.HasErrors() { + t.Errorf("unexpected error\ngot: %s", diags.Err().Error()) + } + } + + // NOTE: should still have returned some reasonable value even if there was an error + if !test.want.RawEquals(got) { + t.Fatalf("wrong result\ngot: %#v\nwant: %#v", got, test.want) + } + }) + } + + t.Run("SourceType error message variants", func(t *testing.T) { + tests := []struct { + SourceType ValueSourceType + SourceRange tfdiags.SourceRange + WantTypeErr string + WantNullErr string + }{ + { + ValueFromUnknown, + tfdiags.SourceRange{}, + `Invalid value for input variable: Unsuitable value for var.constrained_string_required set from outside of the configuration: string required.`, + `Required variable not set: Unsuitable value for var.constrained_string_required set from outside of the configuration: required variable may not be set to null.`, + }, + { + ValueFromConfig, + tfdiags.SourceRange{ + Filename: "example.tf", + Start: tfdiags.SourcePos(hcl.InitialPos), + End: tfdiags.SourcePos(hcl.InitialPos), + }, + `Invalid value for input variable: The given value is not suitable for var.constrained_string_required declared at main.tf:32,3-41: string required.`, + `Required variable not set: The given value is not suitable for var.constrained_string_required defined at main.tf:32,3-41: required variable may not be set to null.`, + }, + { + ValueFromAutoFile, + tfdiags.SourceRange{ + Filename: "example.auto.tfvars", + Start: tfdiags.SourcePos(hcl.InitialPos), + End: tfdiags.SourcePos(hcl.InitialPos), + }, + `Invalid value for input variable: The given value is not suitable for var.constrained_string_required declared at main.tf:32,3-41: string required.`, + `Required variable not set: The given value is not suitable for var.constrained_string_required defined at main.tf:32,3-41: required variable may not be set to null.`, + }, + { + ValueFromNamedFile, + tfdiags.SourceRange{ + Filename: "example.tfvars", + Start: tfdiags.SourcePos(hcl.InitialPos), + End: tfdiags.SourcePos(hcl.InitialPos), + }, + `Invalid value for input variable: The given value is not suitable for var.constrained_string_required declared at main.tf:32,3-41: string required.`, + `Required variable not set: The given value is not suitable for var.constrained_string_required defined at main.tf:32,3-41: required variable may not be set to null.`, + }, + { + ValueFromCLIArg, + tfdiags.SourceRange{}, + `Invalid value for input variable: Unsuitable value for var.constrained_string_required set using -var="constrained_string_required=...": string required.`, + `Required variable not set: Unsuitable value for var.constrained_string_required set using -var="constrained_string_required=...": required variable may not be set to null.`, + }, + { + ValueFromEnvVar, + tfdiags.SourceRange{}, + `Invalid value for input variable: Unsuitable value for var.constrained_string_required set using the TF_VAR_constrained_string_required environment variable: string required.`, + `Required variable not set: Unsuitable value for var.constrained_string_required set using the TF_VAR_constrained_string_required environment variable: required variable may not be set to null.`, + }, + { + ValueFromInput, + tfdiags.SourceRange{}, + `Invalid value for input variable: Unsuitable value for var.constrained_string_required set using an interactive prompt: string required.`, + `Required variable not set: Unsuitable value for var.constrained_string_required set using an interactive prompt: required variable may not be set to null.`, + }, + { + // NOTE: This isn't actually a realistic case for this particular + // function, because if we have a value coming from a plan then + // we must be in the apply step, and we shouldn't be able to + // get past the plan step if we have invalid variable values, + // and during planning we'll always have other source types. + ValueFromPlan, + tfdiags.SourceRange{}, + `Invalid value for input variable: Unsuitable value for var.constrained_string_required set from outside of the configuration: string required.`, + `Required variable not set: Unsuitable value for var.constrained_string_required set from outside of the configuration: required variable may not be set to null.`, + }, + { + ValueFromCaller, + tfdiags.SourceRange{}, + `Invalid value for input variable: Unsuitable value for var.constrained_string_required set from outside of the configuration: string required.`, + `Required variable not set: Unsuitable value for var.constrained_string_required set from outside of the configuration: required variable may not be set to null.`, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%s %s", test.SourceType, test.SourceRange.StartString()), func(t *testing.T) { + varAddr := addrs.InputVariable{Name: "constrained_string_required"}.Absolute(addrs.RootModuleInstance) + varCfg := variableConfigs[varAddr.Variable.Name] + t.Run("type error", func(t *testing.T) { + rawVal := &InputValue{ + Value: cty.EmptyObjectVal, + SourceType: test.SourceType, + SourceRange: test.SourceRange, + } + + _, diags := prepareFinalInputVariableValue( + varAddr, rawVal, varCfg, + ) + if !diags.HasErrors() { + t.Fatalf("unexpected success; want error") + } + + if got, want := diags.Err().Error(), test.WantTypeErr; got != want { + t.Errorf("wrong error\ngot: %s\nwant: %s", got, want) + } + }) + t.Run("null error", func(t *testing.T) { + rawVal := &InputValue{ + Value: cty.NullVal(cty.DynamicPseudoType), + SourceType: test.SourceType, + SourceRange: test.SourceRange, + } + + _, diags := prepareFinalInputVariableValue( + varAddr, rawVal, varCfg, + ) + if !diags.HasErrors() { + t.Fatalf("unexpected success; want error") + } + + if got, want := diags.Err().Error(), test.WantNullErr; got != want { + t.Errorf("wrong error\ngot: %s\nwant: %s", got, want) + } + }) + }) + } + }) + + t.Run("SensitiveVariable error message variants, with source variants", func(t *testing.T) { + tests := []struct { + SourceType ValueSourceType + SourceRange tfdiags.SourceRange + WantTypeErr string + HideSubject bool + }{ + { + ValueFromUnknown, + tfdiags.SourceRange{}, + "Invalid value for input variable: Unsuitable value for var.constrained_string_sensitive_required set from outside of the configuration: string required.", + false, + }, + { + ValueFromConfig, + tfdiags.SourceRange{ + Filename: "example.tfvars", + Start: tfdiags.SourcePos(hcl.InitialPos), + End: tfdiags.SourcePos(hcl.InitialPos), + }, + `Invalid value for input variable: The given value is not suitable for var.constrained_string_sensitive_required, which is sensitive: string required. Invalid value defined at example.tfvars:1,1-1.`, + true, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%s %s", test.SourceType, test.SourceRange.StartString()), func(t *testing.T) { + varAddr := addrs.InputVariable{Name: "constrained_string_sensitive_required"}.Absolute(addrs.RootModuleInstance) + varCfg := variableConfigs[varAddr.Variable.Name] + t.Run("type error", func(t *testing.T) { + rawVal := &InputValue{ + Value: cty.EmptyObjectVal, + SourceType: test.SourceType, + SourceRange: test.SourceRange, + } + + _, diags := prepareFinalInputVariableValue( + varAddr, rawVal, varCfg, + ) + if !diags.HasErrors() { + t.Fatalf("unexpected success; want error") + } + + if got, want := diags.Err().Error(), test.WantTypeErr; got != want { + t.Errorf("wrong error\ngot: %s\nwant: %s", got, want) + } + + if test.HideSubject { + if got, want := diags[0].Source().Subject.StartString(), test.SourceRange.StartString(); got == want { + t.Errorf("Subject start should have been hidden, but was %s", got) + } + } + }) + }) + } + }) +} + +// These tests cover the JSON syntax configuration edge case handling, +// the background of which is described in detail in comments in the +// evalVariableValidations function. Future versions of OpenTofu may +// be able to remove this behaviour altogether. +func TestEvalVariableValidations_jsonErrorMessageEdgeCase(t *testing.T) { + cfgSrc := `{ + "variable": { + "valid": { + "type": "string", + "validation": { + "condition": "${var.valid != \"bar\"}", + "error_message": "Valid template string ${var.valid}" + } + }, + "invalid": { + "type": "string", + "validation": { + "condition": "${var.invalid != \"bar\"}", + "error_message": "Invalid template string ${" + } + } + } +} +` + cfg := testModuleInline(t, map[string]string{ + "main.tf.json": cfgSrc, + }) + variableConfigs := cfg.Module.Variables + + // Because we loaded our pseudo-module from a temporary file, the + // declaration source ranges will have unpredictable filenames. We'll + // fix that here just to make things easier below. + for _, vc := range variableConfigs { + vc.DeclRange.Filename = "main.tf.json" + for _, v := range vc.Validations { + v.DeclRange.Filename = "main.tf.json" + } + } + + tests := []struct { + varName string + given cty.Value + wantErr []string + wantWarn []string + status checks.Status + }{ + // Valid variable validation declaration, assigned value which passes + // the condition generates no diagnostics. + { + varName: "valid", + given: cty.StringVal("foo"), + status: checks.StatusPass, + }, + // Assigning a value which fails the condition generates an error + // message with the expression successfully evaluated. + { + varName: "valid", + given: cty.StringVal("bar"), + wantErr: []string{ + "Invalid value for variable", + "Valid template string bar", + }, + status: checks.StatusFail, + }, + // Invalid variable validation declaration due to an unparseable + // template string. Assigning a value which passes the condition + // results in a warning about the error message. + { + varName: "invalid", + given: cty.StringVal("foo"), + wantWarn: []string{ + "Validation error message expression is invalid", + "Missing expression; Expected the start of an expression, but found the end of the file.", + }, + status: checks.StatusPass, + }, + // Assigning a value which fails the condition generates an error + // message including the configured string interpreted as a literal + // value, and the same warning diagnostic as above. + { + varName: "invalid", + given: cty.StringVal("bar"), + wantErr: []string{ + "Invalid value for variable", + "Invalid template string ${", + }, + wantWarn: []string{ + "Validation error message expression is invalid", + "Missing expression; Expected the start of an expression, but found the end of the file.", + }, + status: checks.StatusFail, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%s %#v", test.varName, test.given), func(t *testing.T) { + varAddr := addrs.InputVariable{Name: test.varName}.Absolute(addrs.RootModuleInstance) + varCfg := variableConfigs[test.varName] + if varCfg == nil { + t.Fatalf("invalid variable name %q", test.varName) + } + + // Build a mock context to allow the function under test to + // retrieve the variable value and evaluate the expressions + ctx := &MockEvalContext{} + + // We need a minimal scope to allow basic functions to be passed to + // the HCL scope + ctx.EvaluationScopeScope = &lang.Scope{} + ctx.GetVariableValueFunc = func(addr addrs.AbsInputVariableInstance) cty.Value { + if got, want := addr.String(), varAddr.String(); got != want { + t.Errorf("incorrect argument to GetVariableValue: got %s, want %s", got, want) + } + return test.given + } + ctx.ChecksState = checks.NewState(cfg) + ctx.ChecksState.ReportCheckableObjects(varAddr.ConfigCheckable(), addrs.MakeSet[addrs.Checkable](varAddr)) + + gotDiags := evalVariableValidations( + varAddr, varCfg, nil, ctx, + ) + + if ctx.ChecksState.ObjectCheckStatus(varAddr) != test.status { + t.Errorf("expected check result %s but instead %s", test.status, ctx.ChecksState.ObjectCheckStatus(varAddr)) + } + + if len(test.wantErr) == 0 && len(test.wantWarn) == 0 { + if len(gotDiags) > 0 { + t.Errorf("no diags expected, got %s", gotDiags.Err().Error()) + } + } else { + wantErrs: + for _, want := range test.wantErr { + for _, diag := range gotDiags { + if diag.Severity() != tfdiags.Error { + continue + } + desc := diag.Description() + if strings.Contains(desc.Summary, want) || strings.Contains(desc.Detail, want) { + continue wantErrs + } + } + t.Errorf("no error diagnostics found containing %q\ngot: %s", want, gotDiags.Err().Error()) + } + + wantWarns: + for _, want := range test.wantWarn { + for _, diag := range gotDiags { + if diag.Severity() != tfdiags.Warning { + continue + } + desc := diag.Description() + if strings.Contains(desc.Summary, want) || strings.Contains(desc.Detail, want) { + continue wantWarns + } + } + t.Errorf("no warning diagnostics found containing %q\ngot: %s", want, gotDiags.Err().Error()) + } + } + }) + } +} + +func TestEvalVariableValidations_sensitiveValues(t *testing.T) { + cfgSrc := ` +variable "foo" { + type = string + sensitive = true + default = "boop" + + validation { + condition = length(var.foo) == 4 + error_message = "Foo must be 4 characters, not ${length(var.foo)}" + } +} + +variable "bar" { + type = string + sensitive = true + default = "boop" + + validation { + condition = length(var.bar) == 4 + error_message = "Bar must be 4 characters, not ${nonsensitive(length(var.bar))}." + } +} +` + cfg := testModuleInline(t, map[string]string{ + "main.tf": cfgSrc, + }) + variableConfigs := cfg.Module.Variables + + // Because we loaded our pseudo-module from a temporary file, the + // declaration source ranges will have unpredictable filenames. We'll + // fix that here just to make things easier below. + for _, vc := range variableConfigs { + vc.DeclRange.Filename = "main.tf" + for _, v := range vc.Validations { + v.DeclRange.Filename = "main.tf" + } + } + + tests := []struct { + varName string + given cty.Value + wantErr []string + status checks.Status + }{ + // Validations pass on a sensitive variable with an error message which + // would generate a sensitive value + { + varName: "foo", + given: cty.StringVal("boop"), + status: checks.StatusPass, + }, + // Assigning a value which fails the condition generates a sensitive + // error message, which is elided and generates another error + { + varName: "foo", + given: cty.StringVal("bap"), + wantErr: []string{ + "Invalid value for variable", + "The error message included a sensitive value, so it will not be displayed.", + "Error message refers to sensitive values", + }, + status: checks.StatusFail, + }, + // Validations pass on a sensitive variable with a correctly defined + // error message + { + varName: "bar", + given: cty.StringVal("boop"), + status: checks.StatusPass, + }, + // Assigning a value which fails the condition generates a nonsensitive + // error message, which is displayed + { + varName: "bar", + given: cty.StringVal("bap"), + wantErr: []string{ + "Invalid value for variable", + "Bar must be 4 characters, not 3.", + }, + status: checks.StatusFail, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%s %#v", test.varName, test.given), func(t *testing.T) { + varAddr := addrs.InputVariable{Name: test.varName}.Absolute(addrs.RootModuleInstance) + varCfg := variableConfigs[test.varName] + if varCfg == nil { + t.Fatalf("invalid variable name %q", test.varName) + } + + // Build a mock context to allow the function under test to + // retrieve the variable value and evaluate the expressions + ctx := &MockEvalContext{} + + // We need a minimal scope to allow basic functions to be passed to + // the HCL scope + ctx.EvaluationScopeScope = &lang.Scope{} + ctx.GetVariableValueFunc = func(addr addrs.AbsInputVariableInstance) cty.Value { + if got, want := addr.String(), varAddr.String(); got != want { + t.Errorf("incorrect argument to GetVariableValue: got %s, want %s", got, want) + } + if varCfg.Sensitive { + return test.given.Mark(marks.Sensitive) + } else { + return test.given + } + } + ctx.ChecksState = checks.NewState(cfg) + ctx.ChecksState.ReportCheckableObjects(varAddr.ConfigCheckable(), addrs.MakeSet[addrs.Checkable](varAddr)) + + gotDiags := evalVariableValidations( + varAddr, varCfg, nil, ctx, + ) + + if ctx.ChecksState.ObjectCheckStatus(varAddr) != test.status { + t.Errorf("expected check result %s but instead %s", test.status, ctx.ChecksState.ObjectCheckStatus(varAddr)) + } + + if len(test.wantErr) == 0 { + if len(gotDiags) > 0 { + t.Errorf("no diags expected, got %s", gotDiags.Err().Error()) + } + } else { + wantErrs: + for _, want := range test.wantErr { + for _, diag := range gotDiags { + if diag.Severity() != tfdiags.Error { + continue + } + desc := diag.Description() + if strings.Contains(desc.Summary, want) || strings.Contains(desc.Detail, want) { + continue wantErrs + } + } + t.Errorf("no error diagnostics found containing %q\ngot: %s", want, gotDiags.Err().Error()) + } + } + }) + } +} diff --git a/pkg/tofu/evaluate.go b/pkg/tofu/evaluate.go new file mode 100644 index 00000000000..40ca6999723 --- /dev/null +++ b/pkg/tofu/evaluate.go @@ -0,0 +1,1032 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "log" + "os" + "path/filepath" + "sync" + "time" + + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/didyoumean" + "github.com/kubegems/opentofu/pkg/instances" + "github.com/kubegems/opentofu/pkg/lang" + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// Evaluator provides the necessary contextual data for evaluating expressions +// for a particular walk operation. +type Evaluator struct { + // Operation defines what type of operation this evaluator is being used + // for. + Operation walkOperation + + // Meta is contextual metadata about the current operation. + Meta *ContextMeta + + // Config is the root node in the configuration tree. + Config *configs.Config + + VariableValuesLock *sync.Mutex + // VariableValues is a map from variable names to their associated values, + // within the module indicated by ModulePath. VariableValues is modified + // concurrently, and so it must be accessed only while holding + // VariableValuesLock. + // + // The first map level is string representations of addr.ModuleInstance + // values, while the second level is variable names. + VariableValues map[string]map[string]cty.Value + + // Plugins is the library of available plugin components (providers and + // provisioners) that we have available to help us evaluate expressions + // that interact with plugin-provided objects. + // + // From this we only access the schemas of the plugins, and don't otherwise + // interact with plugin instances. + Plugins *contextPlugins + + // State is the current state, embedded in a wrapper that ensures that + // it can be safely accessed and modified concurrently. + State *states.SyncState + + // Changes is the set of proposed changes, embedded in a wrapper that + // ensures they can be safely accessed and modified concurrently. + Changes *plans.ChangesSync + + PlanTimestamp time.Time +} + +// Scope creates an evaluation scope for the given module path and optional +// resource. +// +// If the "self" argument is nil then the "self" object is not available +// in evaluated expressions. Otherwise, it behaves as an alias for the given +// address. +func (e *Evaluator) Scope(data lang.Data, self addrs.Referenceable, source addrs.Referenceable, functions lang.ProviderFunction) *lang.Scope { + return &lang.Scope{ + Data: data, + ParseRef: addrs.ParseRef, + SelfAddr: self, + SourceAddr: source, + PureOnly: e.Operation != walkApply && e.Operation != walkDestroy && e.Operation != walkEval, + BaseDir: ".", // Always current working directory for now. + PlanTimestamp: e.PlanTimestamp, + ProviderFunctions: functions, + } +} + +// evaluationStateData is an implementation of lang.Data that resolves +// references primarily (but not exclusively) using information from a State. +type evaluationStateData struct { + Evaluator *Evaluator + + // ModulePath is the path through the dynamic module tree to the module + // that references will be resolved relative to. + ModulePath addrs.ModuleInstance + + // InstanceKeyData describes the values, if any, that are accessible due + // to repetition of a containing object using "count" or "for_each" + // arguments. (It is _not_ used for the for_each inside "dynamic" blocks, + // since the user specifies in that case which variable name to locally + // shadow.) + InstanceKeyData InstanceKeyEvalData + + // Operation records the type of walk the evaluationStateData is being used + // for. + Operation walkOperation +} + +// InstanceKeyEvalData is the old name for instances.RepetitionData, aliased +// here for compatibility. In new code, use instances.RepetitionData instead. +type InstanceKeyEvalData = instances.RepetitionData + +// EvalDataForInstanceKey constructs a suitable InstanceKeyEvalData for +// evaluating in a context that has the given instance key. +// +// The forEachMap argument can be nil when preparing for evaluation +// in a context where each.value is prohibited, such as a destroy-time +// provisioner. In that case, the returned EachValue will always be +// cty.NilVal. +func EvalDataForInstanceKey(key addrs.InstanceKey, forEachMap map[string]cty.Value) InstanceKeyEvalData { + var evalData InstanceKeyEvalData + if key == nil { + return evalData + } + + keyValue := key.Value() + switch keyValue.Type() { + case cty.String: + evalData.EachKey = keyValue + evalData.EachValue = forEachMap[keyValue.AsString()] + case cty.Number: + evalData.CountIndex = keyValue + } + return evalData +} + +// EvalDataForNoInstanceKey is a value of InstanceKeyData that sets no instance +// key values at all, suitable for use in contexts where no keyed instance +// is relevant. +var EvalDataForNoInstanceKey = InstanceKeyEvalData{} + +// evaluationStateData must implement lang.Data +var _ lang.Data = (*evaluationStateData)(nil) + +func (d *evaluationStateData) GetCountAttr(addr addrs.CountAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + switch addr.Name { + + case "index": + idxVal := d.InstanceKeyData.CountIndex + if idxVal == cty.NilVal { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to "count" in non-counted context`, + Detail: `The "count" object can only be used in "module", "resource", and "data" blocks, and only when the "count" argument is set.`, + Subject: rng.ToHCL().Ptr(), + }) + return cty.UnknownVal(cty.Number), diags + } + return idxVal, diags + + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid "count" attribute`, + Detail: fmt.Sprintf(`The "count" object does not have an attribute named %q. The only supported attribute is count.index, which is the index of each instance of a resource block that has the "count" argument set.`, addr.Name), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } +} + +func (d *evaluationStateData) GetForEachAttr(addr addrs.ForEachAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + var returnVal cty.Value + switch addr.Name { + + case "key": + returnVal = d.InstanceKeyData.EachKey + case "value": + returnVal = d.InstanceKeyData.EachValue + + if returnVal == cty.NilVal { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `each.value cannot be used in this context`, + Detail: `A reference to "each.value" has been used in a context in which it is unavailable, such as when the configuration no longer contains the value in its "for_each" expression. Remove this reference to each.value in your configuration to work around this error.`, + Subject: rng.ToHCL().Ptr(), + }) + return cty.UnknownVal(cty.DynamicPseudoType), diags + } + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid "each" attribute`, + Detail: fmt.Sprintf(`The "each" object does not have an attribute named %q. The supported attributes are each.key and each.value, the current key and value pair of the "for_each" attribute set.`, addr.Name), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + + if returnVal == cty.NilVal { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to "each" in context without for_each`, + Detail: `The "each" object can be used only in "module" or "resource" blocks, and only when the "for_each" argument is set.`, + Subject: rng.ToHCL().Ptr(), + }) + return cty.UnknownVal(cty.DynamicPseudoType), diags + } + return returnVal, diags +} + +func (d *evaluationStateData) GetInputVariable(addr addrs.InputVariable, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // First we'll make sure the requested value is declared in configuration, + // so we can produce a nice message if not. + moduleConfig := d.Evaluator.Config.DescendentForInstance(d.ModulePath) + if moduleConfig == nil { + // should never happen, since we can't be evaluating in a module + // that wasn't mentioned in configuration. + panic(fmt.Sprintf("input variable read from %s, which has no configuration", d.ModulePath)) + } + + config := moduleConfig.Module.Variables[addr.Name] + if config == nil { + var suggestions []string + for k := range moduleConfig.Module.Variables { + suggestions = append(suggestions, k) + } + suggestion := didyoumean.NameSuggestion(addr.Name, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } else { + suggestion = fmt.Sprintf(" This variable can be declared with a variable %q {} block.", addr.Name) + } + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to undeclared input variable`, + Detail: fmt.Sprintf(`An input variable with the name %q has not been declared.%s`, addr.Name, suggestion), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + d.Evaluator.VariableValuesLock.Lock() + defer d.Evaluator.VariableValuesLock.Unlock() + + // During the validate walk, input variables are always unknown so + // that we are validating the configuration for all possible input values + // rather than for a specific set. Checking against a specific set of + // input values then happens during the plan walk. + // + // This is important because otherwise the validation walk will tend to be + // overly strict, requiring expressions throughout the configuration to + // be complicated to accommodate all possible inputs, whereas returning + // unknown here allows for simpler patterns like using input values as + // guards to broadly enable/disable resources, avoid processing things + // that are disabled, etc. OpenTofu's static validation leans towards + // being liberal in what it accepts because the subsequent plan walk has + // more information available and so can be more conservative. + if d.Operation == walkValidate { + // Ensure variable sensitivity is captured in the validate walk + if config.Sensitive { + return cty.UnknownVal(config.Type).Mark(marks.Sensitive), diags + } + return cty.UnknownVal(config.Type), diags + } + + moduleAddrStr := d.ModulePath.String() + vals := d.Evaluator.VariableValues[moduleAddrStr] + if vals == nil { + return cty.UnknownVal(config.Type), diags + } + + // d.Evaluator.VariableValues should always contain valid "final values" + // for variables, which is to say that they have already had type + // conversions, validations, and default value handling applied to them. + // Those are the responsibility of the graph notes representing the + // variable declarations. Therefore here we just trust that we already + // have a correct value. + + val, isSet := vals[addr.Name] + if !isSet { + // We should not be able to get here without having a valid value + // for every variable, so this always indicates a bug in either + // the graph builder (not including all the needed nodes) or in + // the graph nodes representing variables. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to unresolved input variable`, + Detail: fmt.Sprintf( + `The final value for %s is missing in OpenTofu's evaluation context. This is a bug in OpenTofu; please report it!`, + addr.Absolute(d.ModulePath), + ), + Subject: rng.ToHCL().Ptr(), + }) + val = cty.UnknownVal(config.Type) + } + + // Mark if sensitive + if config.Sensitive { + val = val.Mark(marks.Sensitive) + } + + return val, diags +} + +func (d *evaluationStateData) GetLocalValue(addr addrs.LocalValue, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // First we'll make sure the requested value is declared in configuration, + // so we can produce a nice message if not. + moduleConfig := d.Evaluator.Config.DescendentForInstance(d.ModulePath) + if moduleConfig == nil { + // should never happen, since we can't be evaluating in a module + // that wasn't mentioned in configuration. + panic(fmt.Sprintf("local value read from %s, which has no configuration", d.ModulePath)) + } + + config := moduleConfig.Module.Locals[addr.Name] + if config == nil { + var suggestions []string + for k := range moduleConfig.Module.Locals { + suggestions = append(suggestions, k) + } + suggestion := didyoumean.NameSuggestion(addr.Name, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to undeclared local value`, + Detail: fmt.Sprintf(`A local value with the name %q has not been declared.%s`, addr.Name, suggestion), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + + val := d.Evaluator.State.LocalValue(addr.Absolute(d.ModulePath)) + if val == cty.NilVal { + // Not evaluated yet? + val = cty.DynamicVal + } + + return val, diags +} + +func (d *evaluationStateData) GetModule(addr addrs.ModuleCall, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // Output results live in the module that declares them, which is one of + // the child module instances of our current module path. + moduleAddr := d.ModulePath.Module().Child(addr.Name) + + parentCfg := d.Evaluator.Config.DescendentForInstance(d.ModulePath) + callConfig, ok := parentCfg.Module.ModuleCalls[addr.Name] + if !ok { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to undeclared module`, + Detail: fmt.Sprintf(`The configuration contains no %s.`, moduleAddr), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + + // We'll consult the configuration to see what output names we are + // expecting, so we can ensure the resulting object is of the expected + // type even if our data is incomplete for some reason. + moduleConfig := d.Evaluator.Config.Descendent(moduleAddr) + if moduleConfig == nil { + // should never happen, since we have a valid module call above, this + // should be caught during static validation. + panic(fmt.Sprintf("output value read from %s, which has no configuration", moduleAddr)) + } + outputConfigs := moduleConfig.Module.Outputs + + // Collect all the relevant outputs that current exist in the state. + // We know the instance path up to this point, and the child module name, + // so we only need to store these by instance key. + stateMap := map[addrs.InstanceKey]map[string]cty.Value{} + for _, output := range d.Evaluator.State.ModuleOutputs(d.ModulePath, addr) { + val := output.Value + if output.Sensitive { + val = val.Mark(marks.Sensitive) + } + + _, callInstance := output.Addr.Module.CallInstance() + instance, ok := stateMap[callInstance.Key] + if !ok { + instance = map[string]cty.Value{} + stateMap[callInstance.Key] = instance + } + + instance[output.Addr.OutputValue.Name] = val + } + + // Get all changes that reside for this module call within our path. + // The change contains the full addr, so we can key these with strings. + changesMap := map[addrs.InstanceKey]map[string]*plans.OutputChangeSrc{} + for _, change := range d.Evaluator.Changes.GetOutputChanges(d.ModulePath, addr) { + _, callInstance := change.Addr.Module.CallInstance() + instance, ok := changesMap[callInstance.Key] + if !ok { + instance = map[string]*plans.OutputChangeSrc{} + changesMap[callInstance.Key] = instance + } + + instance[change.Addr.OutputValue.Name] = change + } + + // Build up all the module objects, creating a map of values for each + // module instance. + moduleInstances := map[addrs.InstanceKey]map[string]cty.Value{} + + // create a dummy object type for validation below + unknownMap := map[string]cty.Type{} + + // the structure is based on the configuration, so iterate through all the + // defined outputs, and add any instance state or changes we find. + for _, cfg := range outputConfigs { + // record the output names for validation + unknownMap[cfg.Name] = cty.DynamicPseudoType + + // get all instance output for this path from the state + for key, states := range stateMap { + outputState, ok := states[cfg.Name] + if !ok { + continue + } + + instance, ok := moduleInstances[key] + if !ok { + instance = map[string]cty.Value{} + moduleInstances[key] = instance + } + + instance[cfg.Name] = outputState + } + + // any pending changes override the state state values + for key, changes := range changesMap { + changeSrc, ok := changes[cfg.Name] + if !ok { + continue + } + + instance, ok := moduleInstances[key] + if !ok { + instance = map[string]cty.Value{} + moduleInstances[key] = instance + } + + change, err := changeSrc.Decode() + if err != nil { + // This should happen only if someone has tampered with a plan + // file, so we won't bother with a pretty error for it. + diags = diags.Append(fmt.Errorf("planned change for %s could not be decoded: %w", addr, err)) + instance[cfg.Name] = cty.DynamicVal + continue + } + + instance[cfg.Name] = change.After + + if change.Sensitive { + instance[cfg.Name] = change.After.Mark(marks.Sensitive) + } + } + } + + var ret cty.Value + + // compile the outputs into the correct value type for the each mode + switch { + case callConfig.Count != nil: + // figure out what the last index we have is + length := -1 + for key := range moduleInstances { + intKey, ok := key.(addrs.IntKey) + if !ok { + // old key from state which is being dropped + continue + } + if int(intKey) >= length { + length = int(intKey) + 1 + } + } + + if length > 0 { + vals := make([]cty.Value, length) + for key, instance := range moduleInstances { + intKey, ok := key.(addrs.IntKey) + if !ok { + // old key from state which is being dropped + continue + } + + vals[int(intKey)] = cty.ObjectVal(instance) + } + + // Insert unknown values where there are any missing instances + for i, v := range vals { + if v.IsNull() { + vals[i] = cty.DynamicVal + continue + } + } + ret = cty.TupleVal(vals) + } else { + ret = cty.EmptyTupleVal + } + + case callConfig.ForEach != nil: + vals := make(map[string]cty.Value) + for key, instance := range moduleInstances { + strKey, ok := key.(addrs.StringKey) + if !ok { + continue + } + + vals[string(strKey)] = cty.ObjectVal(instance) + } + + if len(vals) > 0 { + ret = cty.ObjectVal(vals) + } else { + ret = cty.EmptyObjectVal + } + + default: + val, ok := moduleInstances[addrs.NoKey] + if !ok { + // create the object if there wasn't one known + val = map[string]cty.Value{} + for k := range outputConfigs { + val[k] = cty.DynamicVal + } + } + + ret = cty.ObjectVal(val) + } + + // The module won't be expanded during validation, so we need to return an + // unknown value. This will ensure the types looks correct, since we built + // the objects based on the configuration. + if d.Operation == walkValidate { + // While we know the type here and it would be nice to validate whether + // indexes are valid or not, because tuples and objects have fixed + // numbers of elements we can't simply return an unknown value of the + // same type since we have not expanded any instances during + // validation. + // + // In order to validate the expression a little precisely, we'll create + // an unknown map or list here to get more type information. + ty := cty.Object(unknownMap) + switch { + case callConfig.Count != nil: + ret = cty.UnknownVal(cty.List(ty)) + case callConfig.ForEach != nil: + ret = cty.UnknownVal(cty.Map(ty)) + default: + ret = cty.UnknownVal(ty) + } + } + + return ret, diags +} + +func (d *evaluationStateData) GetPathAttr(addr addrs.PathAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + switch addr.Name { + + case "cwd": + var err error + var wd string + if d.Evaluator.Meta != nil { + // Meta is always non-nil in the normal case, but some test cases + // are not so realistic. + wd = d.Evaluator.Meta.OriginalWorkingDir + } + if wd == "" { + wd, err = os.Getwd() + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Failed to get working directory`, + Detail: fmt.Sprintf(`The value for path.cwd cannot be determined due to a system error: %s`, err), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + } + // The current working directory should always be absolute, whether we + // just looked it up or whether we were relying on ContextMeta's + // (possibly non-normalized) path. + wd, err = filepath.Abs(wd) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Failed to get working directory`, + Detail: fmt.Sprintf(`The value for path.cwd cannot be determined due to a system error: %s`, err), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + + return cty.StringVal(filepath.ToSlash(wd)), diags + + case "module": + moduleConfig := d.Evaluator.Config.DescendentForInstance(d.ModulePath) + if moduleConfig == nil { + // should never happen, since we can't be evaluating in a module + // that wasn't mentioned in configuration. + panic(fmt.Sprintf("module.path read from module %s, which has no configuration", d.ModulePath)) + } + sourceDir := moduleConfig.Module.SourceDir + return cty.StringVal(filepath.ToSlash(sourceDir)), diags + + case "root": + sourceDir := d.Evaluator.Config.Module.SourceDir + return cty.StringVal(filepath.ToSlash(sourceDir)), diags + + default: + suggestion := didyoumean.NameSuggestion(addr.Name, []string{"cwd", "module", "root"}) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid "path" attribute`, + Detail: fmt.Sprintf(`The "path" object does not have an attribute named %q.%s`, addr.Name, suggestion), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } +} + +func (d *evaluationStateData) GetResource(addr addrs.Resource, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + // First we'll consult the configuration to see if an resource of this + // name is declared at all. + moduleAddr := d.ModulePath + moduleConfig := d.Evaluator.Config.DescendentForInstance(moduleAddr) + if moduleConfig == nil { + // should never happen, since we can't be evaluating in a module + // that wasn't mentioned in configuration. + panic(fmt.Sprintf("resource value read from %s, which has no configuration", moduleAddr)) + } + + config := moduleConfig.Module.ResourceByAddr(addr) + if config == nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to undeclared resource`, + Detail: fmt.Sprintf(`A resource %q %q has not been declared in %s`, addr.Type, addr.Name, moduleDisplayAddr(moduleAddr)), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + + // Build the provider address from configuration, since we may not have + // state available in all cases. + // We need to build an abs provider address, but we can use a default + // instance since we're only interested in the schema. + schema := d.getResourceSchema(addr, config.Provider) + if schema == nil { + // This shouldn't happen, since validation before we get here should've + // taken care of it, but we'll show a reasonable error message anyway. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Missing resource type schema`, + Detail: fmt.Sprintf("No schema is available for %s in %s. This is a bug in OpenTofu and should be reported.", addr, config.Provider), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + ty := schema.ImpliedType() + + rs := d.Evaluator.State.Resource(addr.Absolute(d.ModulePath)) + + if rs == nil { + switch d.Operation { + case walkPlan, walkApply: + // During plan and apply as we evaluate each removed instance they + // are removed from the working state. Since we know there are no + // instances, return an empty container of the expected type. + switch { + case config.Count != nil: + return cty.EmptyTupleVal, diags + case config.ForEach != nil: + return cty.EmptyObjectVal, diags + default: + // While we can reference an expanded resource with 0 + // instances, we cannot reference instances that do not exist. + // Due to the fact that we may have direct references to + // instances that may end up in a root output during destroy + // (since a planned destroy cannot yet remove root outputs), we + // need to return a dynamic value here to allow evaluation to + // continue. + log.Printf("[ERROR] unknown instance %q referenced during %s", addr.Absolute(d.ModulePath), d.Operation) + return cty.DynamicVal, diags + } + + case walkImport: + // Import does not yet plan resource changes, so new resources from + // config are not going to be found here. Once walkImport fully + // plans resources, this case should not longer be needed. + // In the single instance case, we can return a typed unknown value + // for the instance to better satisfy other expressions using the + // value. This of course will not help if statically known + // attributes are expected to be known elsewhere, but reduces the + // number of problematic configs for now. + // Unlike in plan and apply above we can't be sure the count or + // for_each instances are empty, so we return a DynamicVal. We + // don't really have a good value to return otherwise -- empty + // values will fail for direct index expressions, and unknown + // Lists and Maps could fail in some type unifications. + switch { + case config.Count != nil: + return cty.DynamicVal, diags + case config.ForEach != nil: + return cty.DynamicVal, diags + default: + return cty.UnknownVal(ty), diags + } + + default: + // We should only end up here during the validate walk, + // since later walks should have at least partial states populated + // for all resources in the configuration. + return cty.DynamicVal, diags + } + } + + // Decode all instances in the current state + instances := map[addrs.InstanceKey]cty.Value{} + pendingDestroy := d.Operation == walkDestroy + for key, instance := range rs.Instances { + if instance == nil || instance.Current == nil { + // Assume we're dealing with an instance that hasn't been created yet. + instances[key] = cty.UnknownVal(ty) + continue + } + + instAddr := addr.Instance(key).Absolute(d.ModulePath) + + change := d.Evaluator.Changes.GetResourceInstanceChange(instAddr, states.CurrentGen) + if change != nil { + // Don't take any resources that are yet to be deleted into account. + // If the referenced resource is CreateBeforeDestroy, then orphaned + // instances will be in the state, as they are not destroyed until + // after their dependants are updated. + if change.Action == plans.Delete { + if !pendingDestroy { + continue + } + } + } + + // Planned resources are temporarily stored in state with empty values, + // and need to be replaced by the planned value here. + if instance.Current.Status == states.ObjectPlanned { + if change == nil { + // If the object is in planned status then we should not get + // here, since we should have found a pending value in the plan + // above instead. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing pending object in plan", + Detail: fmt.Sprintf("Instance %s is marked as having a change pending but that change is not recorded in the plan. This is a bug in OpenTofu; please report it.", instAddr), + Subject: &config.DeclRange, + }) + continue + } + val, err := change.After.Decode(ty) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource instance data in plan", + Detail: fmt.Sprintf("Instance %s data could not be decoded from the plan: %s.", instAddr, err), + Subject: &config.DeclRange, + }) + continue + } + + afterMarks := change.AfterValMarks + if schema.ContainsSensitive() { + // Now that we know that the schema contains sensitive marks, + // Combine those marks together to ensure that the value is marked correctly but not double marked + schemaMarks := schema.ValueMarks(val, nil) + afterMarks = combinePathValueMarks(afterMarks, schemaMarks) + } + + instances[key] = val.MarkWithPaths(afterMarks) + + continue + } + + instanceObjectSrc, err := instance.Current.Decode(ty) + if err != nil { + // This shouldn't happen, since by the time we get here we + // should have upgraded the state data already. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource instance data in state", + Detail: fmt.Sprintf("Instance %s data could not be decoded from the state: %s.", instAddr, err), + Subject: &config.DeclRange, + }) + continue + } + + val := instanceObjectSrc.Value + + if schema.ContainsSensitive() { + var marks []cty.PathValueMarks + // Now that we know that the schema contains sensitive marks, + // Combine those marks together to ensure that the value is marked correctly but not double marked + val, marks = val.UnmarkDeepWithPaths() + schemaMarks := schema.ValueMarks(val, nil) + + combined := combinePathValueMarks(marks, schemaMarks) + val = val.MarkWithPaths(combined) + } + instances[key] = val + } + + // ret should be populated with a valid value in all cases below + var ret cty.Value + + switch { + case config.Count != nil: + // figure out what the last index we have is + length := -1 + for key := range instances { + intKey, ok := key.(addrs.IntKey) + if !ok { + continue + } + if int(intKey) >= length { + length = int(intKey) + 1 + } + } + + if length > 0 { + vals := make([]cty.Value, length) + for key, instance := range instances { + intKey, ok := key.(addrs.IntKey) + if !ok { + // old key from state, which isn't valid for evaluation + continue + } + + vals[int(intKey)] = instance + } + + // Insert unknown values where there are any missing instances + for i, v := range vals { + if v == cty.NilVal { + vals[i] = cty.UnknownVal(ty) + } + } + ret = cty.TupleVal(vals) + } else { + ret = cty.EmptyTupleVal + } + + case config.ForEach != nil: + vals := make(map[string]cty.Value) + for key, instance := range instances { + strKey, ok := key.(addrs.StringKey) + if !ok { + // old key that is being dropped and not used for evaluation + continue + } + vals[string(strKey)] = instance + } + + if len(vals) > 0 { + // We use an object rather than a map here because resource schemas + // may include dynamically-typed attributes, which will then cause + // each instance to potentially have a different runtime type even + // though they all conform to the static schema. + ret = cty.ObjectVal(vals) + } else { + ret = cty.EmptyObjectVal + } + + default: + val, ok := instances[addrs.NoKey] + if !ok { + // if the instance is missing, insert an unknown value + val = cty.UnknownVal(ty) + } + + ret = val + } + + return ret, diags +} + +func (d *evaluationStateData) getResourceSchema(addr addrs.Resource, providerAddr addrs.Provider) *configschema.Block { + schema, _, err := d.Evaluator.Plugins.ResourceTypeSchema(providerAddr, addr.Mode, addr.Type) + if err != nil { + // We have plently other codepaths that will detect and report + // schema lookup errors before we'd reach this point, so we'll just + // treat a failure here the same as having no schema. + return nil + } + return schema +} + +func (d *evaluationStateData) GetTerraformAttr(addr addrs.TerraformAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + switch addr.Name { + case "workspace": + workspaceName := d.Evaluator.Meta.Env + return cty.StringVal(workspaceName), diags + + case "env": + // Prior to Terraform 0.12 there was an attribute "env", which was + // an alias name for "workspace". This was deprecated and is now + // removed. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Invalid %q attribute", addr.Alias), + Detail: fmt.Sprintf(`The %s.env attribute was deprecated in v0.10 and removed in v0.12. The "state environment" concept was renamed to "workspace" in v0.12, and so the workspace name can now be accessed using the %s.workspace attribute.`, addr.Alias, addr.Alias), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Invalid %q attribute", addr.Alias), + Detail: fmt.Sprintf(`The %q object does not have an attribute named %q. The only supported attribute is %s.workspace, the name of the currently-selected workspace.`, addr.Alias, addr.Name, addr.Alias), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } +} + +func (d *evaluationStateData) GetOutput(addr addrs.OutputValue, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // First we'll make sure the requested value is declared in configuration, + // so we can produce a nice message if not. + moduleConfig := d.Evaluator.Config.DescendentForInstance(d.ModulePath) + if moduleConfig == nil { + // should never happen, since we can't be evaluating in a module + // that wasn't mentioned in configuration. + panic(fmt.Sprintf("output value read from %s, which has no configuration", d.ModulePath)) + } + + config := moduleConfig.Module.Outputs[addr.Name] + if config == nil { + var suggestions []string + for k := range moduleConfig.Module.Outputs { + suggestions = append(suggestions, k) + } + suggestion := didyoumean.NameSuggestion(addr.Name, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to undeclared output value`, + Detail: fmt.Sprintf(`An output value with the name %q has not been declared.%s`, addr.Name, suggestion), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + + output := d.Evaluator.State.OutputValue(addr.Absolute(d.ModulePath)) + + // https://github.com/kubegems/opentofu/issues/257 + // If the output is null - it does not serialize as part of the node_output state https://github.com/kubegems/opentofu/blob/4b623c56ffe9e6c1dc345e54470b71b0f261297a/internal/tofu/node_output.go#L592-L596 + // In such a case, we should simply return a nil value because OpenTofu test crash to evaluate for invalid memory address or nil pointer dereference + if output == nil { + return cty.NilVal, diags + } else { + val := output.Value + if val == cty.NilVal { + // Not evaluated yet? + val = cty.DynamicVal + } + + if output.Sensitive { + val = val.Mark(marks.Sensitive) + } + + return val, diags + } +} + +func (d *evaluationStateData) GetCheckBlock(addr addrs.Check, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + // For now, check blocks don't contain any meaningful data and can only + // be referenced from the testing scope within an expect_failures attribute. + // + // We've added them into the scope explicitly since they are referencable, + // but we'll actually just return an error message saying they can't be + // referenced in this context. + var diags tfdiags.Diagnostics + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reference to \"check\" in invalid context", + Detail: "The \"check\" object can only be referenced from an \"expect_failures\" attribute within a OpenTofu testing \"run\" block.", + Subject: rng.ToHCL().Ptr(), + }) + return cty.NilVal, diags +} + +// moduleDisplayAddr returns a string describing the given module instance +// address that is appropriate for returning to users in situations where the +// root module is possible. Specifically, it returns "the root module" if the +// root module instance is given, or a string representation of the module +// address otherwise. +func moduleDisplayAddr(addr addrs.ModuleInstance) string { + switch { + case addr.IsRoot(): + return "the root module" + default: + return addr.String() + } +} diff --git a/pkg/tofu/evaluate_test.go b/pkg/tofu/evaluate_test.go new file mode 100644 index 00000000000..cea3843e2a9 --- /dev/null +++ b/pkg/tofu/evaluate_test.go @@ -0,0 +1,647 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "sync" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +func TestEvaluatorGetTerraformAttr(t *testing.T) { + evaluator := &Evaluator{ + Meta: &ContextMeta{ + Env: "foo", + }, + } + data := &evaluationStateData{ + Evaluator: evaluator, + } + scope := evaluator.Scope(data, nil, nil, nil) + + t.Run("terraform.workspace", func(t *testing.T) { + want := cty.StringVal("foo") + got, diags := scope.Data.GetTerraformAttr(addrs.NewTerraformAttr("terraform", "workspace"), tfdiags.SourceRange{}) + if len(diags) != 0 { + t.Errorf("unexpected diagnostics %s", spew.Sdump(diags)) + } + if !got.RawEquals(want) { + t.Errorf("wrong result %q; want %q", got, want) + } + }) + + t.Run("tofu.workspace", func(t *testing.T) { + want := cty.StringVal("foo") + got, diags := scope.Data.GetTerraformAttr(addrs.NewTerraformAttr("tofu", "workspace"), tfdiags.SourceRange{}) + if len(diags) != 0 { + t.Errorf("unexpected diagnostics %s", spew.Sdump(diags)) + } + if !got.RawEquals(want) { + t.Errorf("wrong result %q; want %q", got, want) + } + }) +} + +func TestEvaluatorGetPathAttr(t *testing.T) { + evaluator := &Evaluator{ + Meta: &ContextMeta{ + Env: "foo", + }, + Config: &configs.Config{ + Module: &configs.Module{ + SourceDir: "bar/baz", + }, + }, + } + data := &evaluationStateData{ + Evaluator: evaluator, + } + scope := evaluator.Scope(data, nil, nil, nil) + + t.Run("module", func(t *testing.T) { + want := cty.StringVal("bar/baz") + got, diags := scope.Data.GetPathAttr(addrs.PathAttr{ + Name: "module", + }, tfdiags.SourceRange{}) + if len(diags) != 0 { + t.Errorf("unexpected diagnostics %s", spew.Sdump(diags)) + } + if !got.RawEquals(want) { + t.Errorf("wrong result %#v; want %#v", got, want) + } + }) + + t.Run("root", func(t *testing.T) { + want := cty.StringVal("bar/baz") + got, diags := scope.Data.GetPathAttr(addrs.PathAttr{ + Name: "root", + }, tfdiags.SourceRange{}) + if len(diags) != 0 { + t.Errorf("unexpected diagnostics %s", spew.Sdump(diags)) + } + if !got.RawEquals(want) { + t.Errorf("wrong result %#v; want %#v", got, want) + } + }) +} + +func TestEvaluatorGetOutputValue(t *testing.T) { + evaluator := &Evaluator{ + Meta: &ContextMeta{ + Env: "foo", + }, + Config: &configs.Config{ + Module: &configs.Module{ + Outputs: map[string]*configs.Output{ + "some_output": { + Name: "some_output", + Sensitive: true, + }, + "some_other_output": { + Name: "some_other_output", + }, + }, + }, + }, + State: states.BuildState(func(state *states.SyncState) { + state.SetOutputValue(addrs.AbsOutputValue{ + Module: addrs.RootModuleInstance, + OutputValue: addrs.OutputValue{ + Name: "some_output", + }, + }, cty.StringVal("first"), true) + state.SetOutputValue(addrs.AbsOutputValue{ + Module: addrs.RootModuleInstance, + OutputValue: addrs.OutputValue{ + Name: "some_other_output", + }, + }, cty.StringVal("second"), false) + }).SyncWrapper(), + } + + data := &evaluationStateData{ + Evaluator: evaluator, + } + scope := evaluator.Scope(data, nil, nil, nil) + + want := cty.StringVal("first").Mark(marks.Sensitive) + got, diags := scope.Data.GetOutput(addrs.OutputValue{ + Name: "some_output", + }, tfdiags.SourceRange{}) + + if len(diags) != 0 { + t.Errorf("unexpected diagnostics %s", spew.Sdump(diags)) + } + if !got.RawEquals(want) { + t.Errorf("wrong result %#v; want %#v", got, want) + } + + want = cty.StringVal("second") + got, diags = scope.Data.GetOutput(addrs.OutputValue{ + Name: "some_other_output", + }, tfdiags.SourceRange{}) + + if len(diags) != 0 { + t.Errorf("unexpected diagnostics %s", spew.Sdump(diags)) + } + if !got.RawEquals(want) { + t.Errorf("wrong result %#v; want %#v", got, want) + } +} + +// This particularly tests that a sensitive attribute in config +// results in a value that has a "sensitive" cty Mark +func TestEvaluatorGetInputVariable(t *testing.T) { + evaluator := &Evaluator{ + Meta: &ContextMeta{ + Env: "foo", + }, + Config: &configs.Config{ + Module: &configs.Module{ + Variables: map[string]*configs.Variable{ + "some_var": { + Name: "some_var", + Sensitive: true, + Default: cty.StringVal("foo"), + Type: cty.String, + ConstraintType: cty.String, + }, + // Avoid double marking a value + "some_other_var": { + Name: "some_other_var", + Sensitive: true, + Default: cty.StringVal("bar"), + Type: cty.String, + ConstraintType: cty.String, + }, + }, + }, + }, + VariableValues: map[string]map[string]cty.Value{ + "": { + "some_var": cty.StringVal("bar"), + "some_other_var": cty.StringVal("boop").Mark(marks.Sensitive), + }, + }, + VariableValuesLock: &sync.Mutex{}, + } + + data := &evaluationStateData{ + Evaluator: evaluator, + } + scope := evaluator.Scope(data, nil, nil, nil) + + want := cty.StringVal("bar").Mark(marks.Sensitive) + got, diags := scope.Data.GetInputVariable(addrs.InputVariable{ + Name: "some_var", + }, tfdiags.SourceRange{}) + + if len(diags) != 0 { + t.Errorf("unexpected diagnostics %s", spew.Sdump(diags)) + } + if !got.RawEquals(want) { + t.Errorf("wrong result %#v; want %#v", got, want) + } + + want = cty.StringVal("boop").Mark(marks.Sensitive) + got, diags = scope.Data.GetInputVariable(addrs.InputVariable{ + Name: "some_other_var", + }, tfdiags.SourceRange{}) + + if len(diags) != 0 { + t.Errorf("unexpected diagnostics %s", spew.Sdump(diags)) + } + if !got.RawEquals(want) { + t.Errorf("wrong result %#v; want %#v", got, want) + } +} + +func TestEvaluatorGetResource(t *testing.T) { + stateSync := states.BuildState(func(ss *states.SyncState) { + ss.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo", "nesting_list": [{"sensitive_value":"abc"}], "nesting_map": {"foo":{"foo":"x"}}, "nesting_set": [{"baz":"abc"}], "nesting_single": {"boop":"abc"}, "nesting_nesting": {"nesting_list":[{"sensitive_value":"abc"}]}, "value":"hello"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }).SyncWrapper() + + rc := &configs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "foo", + Config: configs.SynthBody("", map[string]cty.Value{ + "id": cty.StringVal("foo"), + }), + Provider: addrs.Provider{ + Hostname: addrs.DefaultProviderRegistryHost, + Namespace: "hashicorp", + Type: "test", + }, + } + + evaluator := &Evaluator{ + Meta: &ContextMeta{ + Env: "foo", + }, + Changes: plans.NewChanges().SyncWrapper(), + Config: &configs.Config{ + Module: &configs.Module{ + ManagedResources: map[string]*configs.Resource{ + "test_resource.foo": rc, + }, + }, + }, + State: stateSync, + Plugins: schemaOnlyProvidersForTesting(map[addrs.Provider]providers.ProviderSchema{ + addrs.NewDefaultProvider("test"): { + ResourceTypes: map[string]providers.Schema{ + "test_resource": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "value": { + Type: cty.String, + Computed: true, + Sensitive: true, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "nesting_list": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "value": {Type: cty.String, Optional: true}, + "sensitive_value": {Type: cty.String, Optional: true, Sensitive: true}, + }, + }, + Nesting: configschema.NestingList, + }, + "nesting_map": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true, Sensitive: true}, + }, + }, + Nesting: configschema.NestingMap, + }, + "nesting_set": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "baz": {Type: cty.String, Optional: true, Sensitive: true}, + }, + }, + Nesting: configschema.NestingSet, + }, + "nesting_single": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "boop": {Type: cty.String, Optional: true, Sensitive: true}, + }, + }, + Nesting: configschema.NestingSingle, + }, + "nesting_nesting": { + Block: configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "nesting_list": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "value": {Type: cty.String, Optional: true}, + "sensitive_value": {Type: cty.String, Optional: true, Sensitive: true}, + }, + }, + Nesting: configschema.NestingList, + }, + }, + }, + Nesting: configschema.NestingSingle, + }, + }, + }, + }, + }, + }, + }, t), + } + + data := &evaluationStateData{ + Evaluator: evaluator, + } + scope := evaluator.Scope(data, nil, nil, nil) + + want := cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + "nesting_list": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "sensitive_value": cty.StringVal("abc").Mark(marks.Sensitive), + "value": cty.NullVal(cty.String), + }), + }), + "nesting_map": cty.MapVal(map[string]cty.Value{ + "foo": cty.ObjectVal(map[string]cty.Value{"foo": cty.StringVal("x").Mark(marks.Sensitive)}), + }), + "nesting_nesting": cty.ObjectVal(map[string]cty.Value{ + "nesting_list": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "sensitive_value": cty.StringVal("abc").Mark(marks.Sensitive), + "value": cty.NullVal(cty.String), + }), + }), + }), + "nesting_set": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "baz": cty.StringVal("abc").Mark(marks.Sensitive), + }), + }), + "nesting_single": cty.ObjectVal(map[string]cty.Value{ + "boop": cty.StringVal("abc").Mark(marks.Sensitive), + }), + "value": cty.StringVal("hello").Mark(marks.Sensitive), + }) + + addr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "foo", + } + got, diags := scope.Data.GetResource(addr, tfdiags.SourceRange{}) + + if len(diags) != 0 { + t.Errorf("unexpected diagnostics %s", spew.Sdump(diags)) + } + + if !got.RawEquals(want) { + t.Errorf("wrong result:\ngot: %#v\nwant: %#v", got, want) + } +} + +// GetResource will return a planned object's After value +// if there is a change for that resource instance. +func TestEvaluatorGetResource_changes(t *testing.T) { + // Set up existing state + stateSync := states.BuildState(func(ss *states.SyncState) { + ss.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectPlanned, + AttrsJSON: []byte(`{"id":"foo", "to_mark_val":"tacos", "sensitive_value":"abc"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }).SyncWrapper() + + // Create a change for the existing state resource, + // to exercise retrieving the After value of the change + changesSync := plans.NewChanges().SyncWrapper() + change := &plans.ResourceInstanceChange{ + Addr: mustResourceInstanceAddr("test_resource.foo"), + ProviderAddr: addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("test"), + }, + Change: plans.Change{ + Action: plans.Update, + // Provide an After value that contains a marked value + After: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + "to_mark_val": cty.StringVal("pizza").Mark(marks.Sensitive), + "sensitive_value": cty.StringVal("abc"), + "sensitive_collection": cty.MapVal(map[string]cty.Value{ + "boop": cty.StringVal("beep"), + }), + }), + }, + } + + // Set up our schemas + schemas := &Schemas{ + Providers: map[addrs.Provider]providers.ProviderSchema{ + addrs.NewDefaultProvider("test"): { + ResourceTypes: map[string]providers.Schema{ + "test_resource": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "to_mark_val": { + Type: cty.String, + Computed: true, + }, + "sensitive_value": { + Type: cty.String, + Computed: true, + Sensitive: true, + }, + "sensitive_collection": { + Type: cty.Map(cty.String), + Computed: true, + Sensitive: true, + }, + }, + }, + }, + }, + }, + }, + } + + // The resource we'll inspect + addr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "foo", + } + schema, _ := schemas.ResourceTypeConfig(addrs.NewDefaultProvider("test"), addr.Mode, addr.Type) + // This encoding separates out the After's marks into its AfterValMarks + csrc, _ := change.Encode(schema.ImpliedType()) + changesSync.AppendResourceInstanceChange(csrc) + + evaluator := &Evaluator{ + Meta: &ContextMeta{ + Env: "foo", + }, + Changes: changesSync, + Config: &configs.Config{ + Module: &configs.Module{ + ManagedResources: map[string]*configs.Resource{ + "test_resource.foo": { + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "foo", + Provider: addrs.Provider{ + Hostname: addrs.DefaultProviderRegistryHost, + Namespace: "hashicorp", + Type: "test", + }, + }, + }, + }, + }, + State: stateSync, + Plugins: schemaOnlyProvidersForTesting(schemas.Providers, t), + } + + data := &evaluationStateData{ + Evaluator: evaluator, + } + scope := evaluator.Scope(data, nil, nil, nil) + + want := cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + "to_mark_val": cty.StringVal("pizza").Mark(marks.Sensitive), + "sensitive_value": cty.StringVal("abc").Mark(marks.Sensitive), + "sensitive_collection": cty.MapVal(map[string]cty.Value{ + "boop": cty.StringVal("beep"), + }).Mark(marks.Sensitive), + }) + + got, diags := scope.Data.GetResource(addr, tfdiags.SourceRange{}) + + if len(diags) != 0 { + t.Errorf("unexpected diagnostics %s", spew.Sdump(diags)) + } + + if !got.RawEquals(want) { + t.Errorf("wrong result:\ngot: %#v\nwant: %#v", got, want) + } +} + +func TestEvaluatorGetModule(t *testing.T) { + // Create a new evaluator with an existing state + stateSync := states.BuildState(func(ss *states.SyncState) { + ss.SetOutputValue( + addrs.OutputValue{Name: "out"}.Absolute(addrs.ModuleInstance{addrs.ModuleInstanceStep{Name: "mod"}}), + cty.StringVal("bar"), + true, + ) + }).SyncWrapper() + evaluator := evaluatorForModule(stateSync, plans.NewChanges().SyncWrapper()) + data := &evaluationStateData{ + Evaluator: evaluator, + } + scope := evaluator.Scope(data, nil, nil, nil) + want := cty.ObjectVal(map[string]cty.Value{"out": cty.StringVal("bar").Mark(marks.Sensitive)}) + got, diags := scope.Data.GetModule(addrs.ModuleCall{ + Name: "mod", + }, tfdiags.SourceRange{}) + + if len(diags) != 0 { + t.Errorf("unexpected diagnostics %s", spew.Sdump(diags)) + } + if !got.RawEquals(want) { + t.Errorf("wrong result %#v; want %#v", got, want) + } + + // Changes should override the state value + changesSync := plans.NewChanges().SyncWrapper() + change := &plans.OutputChange{ + Addr: addrs.OutputValue{Name: "out"}.Absolute(addrs.ModuleInstance{addrs.ModuleInstanceStep{Name: "mod"}}), + Sensitive: true, + Change: plans.Change{ + After: cty.StringVal("baz"), + }, + } + cs, _ := change.Encode() + changesSync.AppendOutputChange(cs) + evaluator = evaluatorForModule(stateSync, changesSync) + data = &evaluationStateData{ + Evaluator: evaluator, + } + scope = evaluator.Scope(data, nil, nil, nil) + want = cty.ObjectVal(map[string]cty.Value{"out": cty.StringVal("baz").Mark(marks.Sensitive)}) + got, diags = scope.Data.GetModule(addrs.ModuleCall{ + Name: "mod", + }, tfdiags.SourceRange{}) + + if len(diags) != 0 { + t.Errorf("unexpected diagnostics %s", spew.Sdump(diags)) + } + if !got.RawEquals(want) { + t.Errorf("wrong result %#v; want %#v", got, want) + } + + // Test changes with empty state + evaluator = evaluatorForModule(states.NewState().SyncWrapper(), changesSync) + data = &evaluationStateData{ + Evaluator: evaluator, + } + scope = evaluator.Scope(data, nil, nil, nil) + want = cty.ObjectVal(map[string]cty.Value{"out": cty.StringVal("baz").Mark(marks.Sensitive)}) + got, diags = scope.Data.GetModule(addrs.ModuleCall{ + Name: "mod", + }, tfdiags.SourceRange{}) + + if len(diags) != 0 { + t.Errorf("unexpected diagnostics %s", spew.Sdump(diags)) + } + if !got.RawEquals(want) { + t.Errorf("wrong result %#v; want %#v", got, want) + } +} + +func evaluatorForModule(stateSync *states.SyncState, changesSync *plans.ChangesSync) *Evaluator { + return &Evaluator{ + Meta: &ContextMeta{ + Env: "foo", + }, + Config: &configs.Config{ + Module: &configs.Module{ + ModuleCalls: map[string]*configs.ModuleCall{ + "mod": { + Name: "mod", + }, + }, + }, + Children: map[string]*configs.Config{ + "mod": { + Path: addrs.Module{"module.mod"}, + Module: &configs.Module{ + Outputs: map[string]*configs.Output{ + "out": { + Name: "out", + Sensitive: true, + }, + }, + }, + }, + }, + }, + State: stateSync, + Changes: changesSync, + } +} diff --git a/pkg/tofu/evaluate_triggers.go b/pkg/tofu/evaluate_triggers.go new file mode 100644 index 00000000000..de4cc8e9f76 --- /dev/null +++ b/pkg/tofu/evaluate_triggers.go @@ -0,0 +1,148 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "strings" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/instances" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +func evalReplaceTriggeredByExpr(expr hcl.Expression, keyData instances.RepetitionData) (*addrs.Reference, tfdiags.Diagnostics) { + var ref *addrs.Reference + var diags tfdiags.Diagnostics + + traversal, diags := triggersExprToTraversal(expr, keyData) + if diags.HasErrors() { + return nil, diags + } + + // We now have a static traversal, so we can just turn it into an addrs.Reference. + ref, ds := addrs.ParseRef(traversal) + diags = diags.Append(ds) + + return ref, diags +} + +// trggersExprToTraversal takes an hcl expression limited to the syntax allowed +// in replace_triggered_by, and converts it to a static traversal. The +// RepetitionData contains the data necessary to evaluate the only allowed +// variables in the expression, count.index and each.key. +func triggersExprToTraversal(expr hcl.Expression, keyData instances.RepetitionData) (hcl.Traversal, tfdiags.Diagnostics) { + var trav hcl.Traversal + var diags tfdiags.Diagnostics + + switch e := expr.(type) { + case *hclsyntax.RelativeTraversalExpr: + t, d := triggersExprToTraversal(e.Source, keyData) + diags = diags.Append(d) + trav = append(trav, t...) + trav = append(trav, e.Traversal...) + + case *hclsyntax.ScopeTraversalExpr: + // a static reference, we can just append the traversal + trav = append(trav, e.Traversal...) + + case *hclsyntax.IndexExpr: + // Get the collection from the index expression + t, d := triggersExprToTraversal(e.Collection, keyData) + diags = diags.Append(d) + if diags.HasErrors() { + return nil, diags + } + trav = append(trav, t...) + + // The index key is the only place where we could have variables that + // reference count and each, so we need to parse those independently. + idx, hclDiags := parseIndexKeyExpr(e.Key, keyData) + diags = diags.Append(hclDiags) + + trav = append(trav, idx) + + default: + // Something unexpected got through config validation. We're not sure + // what it is, but we'll point it out in the diagnostics for the user + // to fix. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid replace_triggered_by expression", + Detail: "Unexpected expression found in replace_triggered_by.", + Subject: e.Range().Ptr(), + }) + } + + return trav, diags +} + +// parseIndexKeyExpr takes an hcl.Expression and parses it as an index key, while +// evaluating any references to count.index or each.key. +func parseIndexKeyExpr(expr hcl.Expression, keyData instances.RepetitionData) (hcl.TraverseIndex, hcl.Diagnostics) { + idx := hcl.TraverseIndex{ + SrcRange: expr.Range(), + } + + trav, diags := hcl.RelTraversalForExpr(expr) + if diags.HasErrors() { + return idx, diags + } + + keyParts := []string{} + + for _, t := range trav { + attr, ok := t.(hcl.TraverseAttr) + if !ok { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid index expression", + Detail: "Only constant values, count.index or each.key are allowed in index expressions.", + Subject: expr.Range().Ptr(), + }) + return idx, diags + } + keyParts = append(keyParts, attr.Name) + } + + switch strings.Join(keyParts, ".") { + case "count.index": + if keyData.CountIndex == cty.NilVal { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to "count" in non-counted context`, + Detail: `The "count" object can only be used in "resource" blocks when the "count" argument is set.`, + Subject: expr.Range().Ptr(), + }) + } + idx.Key = keyData.CountIndex + + case "each.key": + if keyData.EachKey == cty.NilVal { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to "each" in context without for_each`, + Detail: `The "each" object can be used only in "resource" blocks when the "for_each" argument is set.`, + Subject: expr.Range().Ptr(), + }) + } + idx.Key = keyData.EachKey + default: + // Something may have slipped through validation, probably from a json + // configuration. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid index expression", + Detail: "Only constant values, count.index or each.key are allowed in index expressions.", + Subject: expr.Range().Ptr(), + }) + } + + return idx, diags + +} diff --git a/pkg/tofu/evaluate_triggers_test.go b/pkg/tofu/evaluate_triggers_test.go new file mode 100644 index 00000000000..51fe85c6d04 --- /dev/null +++ b/pkg/tofu/evaluate_triggers_test.go @@ -0,0 +1,99 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "testing" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/instances" + "github.com/zclconf/go-cty/cty" +) + +func TestEvalReplaceTriggeredBy(t *testing.T) { + tests := map[string]struct { + // Raw config expression from within replace_triggered_by list. + // If this does not contains any count or each references, it should + // directly parse into the same *addrs.Reference. + expr string + + // If the expression contains count or each, then we need to add + // repetition data, and the static string to parse into the desired + // *addrs.Reference + repData instances.RepetitionData + reference string + }{ + "single resource": { + expr: "test_resource.a", + }, + + "resource instance attr": { + expr: "test_resource.a.attr", + }, + + "resource instance index attr": { + expr: "test_resource.a[0].attr", + }, + + "resource instance count": { + expr: "test_resource.a[count.index]", + repData: instances.RepetitionData{ + CountIndex: cty.NumberIntVal(0), + }, + reference: "test_resource.a[0]", + }, + "resource instance for_each": { + expr: "test_resource.a[each.key].attr", + repData: instances.RepetitionData{ + EachKey: cty.StringVal("k"), + }, + reference: `test_resource.a["k"].attr`, + }, + "resource instance for_each map attr": { + expr: "test_resource.a[each.key].attr[each.key]", + repData: instances.RepetitionData{ + EachKey: cty.StringVal("k"), + }, + reference: `test_resource.a["k"].attr["k"]`, + }, + } + + for name, tc := range tests { + pos := hcl.Pos{Line: 1, Column: 1} + t.Run(name, func(t *testing.T) { + expr, hclDiags := hclsyntax.ParseExpression([]byte(tc.expr), "", pos) + if hclDiags.HasErrors() { + t.Fatal(hclDiags) + } + + got, diags := evalReplaceTriggeredByExpr(expr, tc.repData) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + want := tc.reference + if want == "" { + want = tc.expr + } + + // create the desired reference + traversal, travDiags := hclsyntax.ParseTraversalAbs([]byte(want), "", pos) + if travDiags.HasErrors() { + t.Fatal(travDiags) + } + ref, diags := addrs.ParseRef(traversal) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + if got.DisplayString() != ref.DisplayString() { + t.Fatalf("expected %q: got %q", ref.DisplayString(), got.DisplayString()) + } + }) + } +} diff --git a/pkg/tofu/evaluate_valid.go b/pkg/tofu/evaluate_valid.go new file mode 100644 index 00000000000..ef4c692149e --- /dev/null +++ b/pkg/tofu/evaluate_valid.go @@ -0,0 +1,332 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "sort" + + "github.com/hashicorp/hcl/v2" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/didyoumean" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// StaticValidateReferences checks the given references against schemas and +// other statically-checkable rules, producing error diagnostics if any +// problems are found. +// +// If this method returns errors for a particular reference then evaluating +// that reference is likely to generate a very similar error, so callers should +// not run this method and then also evaluate the source expression(s) and +// merge the two sets of diagnostics together, since this will result in +// confusing redundant errors. +// +// This method can find more errors than can be found by evaluating an +// expression with a partially-populated scope, since it checks the referenced +// names directly against the schema rather than relying on evaluation errors. +// +// The result may include warning diagnostics if, for example, deprecated +// features are referenced. +func (d *evaluationStateData) StaticValidateReferences(refs []*addrs.Reference, self addrs.Referenceable, source addrs.Referenceable) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + for _, ref := range refs { + moreDiags := d.staticValidateReference(ref, self, source) + diags = diags.Append(moreDiags) + } + return diags +} + +func (d *evaluationStateData) staticValidateReference(ref *addrs.Reference, self addrs.Referenceable, source addrs.Referenceable) tfdiags.Diagnostics { + modCfg := d.Evaluator.Config.DescendentForInstance(d.ModulePath) + if modCfg == nil { + // This is a bug in the caller rather than a problem with the + // reference, but rather than crashing out here in an unhelpful way + // we'll just ignore it and trust a different layer to catch it. + return nil + } + + if ref.Subject == addrs.Self { + // The "self" address is a special alias for the address given as + // our self parameter here, if present. + if self == nil { + var diags tfdiags.Diagnostics + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid "self" reference`, + // This detail message mentions some current practice that + // this codepath doesn't really "know about". If the "self" + // object starts being supported in more contexts later then + // we'll need to adjust this message. + Detail: `The "self" object is not available in this context. This object can be used only in resource provisioner, connection, and postcondition blocks.`, + Subject: ref.SourceRange.ToHCL().Ptr(), + }) + return diags + } + + synthRef := *ref // shallow copy + synthRef.Subject = self + ref = &synthRef + } + + switch addr := ref.Subject.(type) { + + // For static validation we validate both resource and resource instance references the same way. + // We mostly disregard the index, though we do some simple validation of + // its _presence_ in staticValidateSingleResourceReference and + // staticValidateMultiResourceReference respectively. + case addrs.Resource: + var diags tfdiags.Diagnostics + diags = diags.Append(d.staticValidateSingleResourceReference(modCfg, addr, ref.Remaining, ref.SourceRange)) + diags = diags.Append(d.staticValidateResourceReference(modCfg, addr, source, ref.Remaining, ref.SourceRange)) + return diags + case addrs.ResourceInstance: + var diags tfdiags.Diagnostics + diags = diags.Append(d.staticValidateMultiResourceReference(modCfg, addr, ref.Remaining, ref.SourceRange)) + diags = diags.Append(d.staticValidateResourceReference(modCfg, addr.ContainingResource(), source, ref.Remaining, ref.SourceRange)) + return diags + + // We also handle all module call references the same way, disregarding index. + case addrs.ModuleCall: + return d.staticValidateModuleCallReference(modCfg, addr, ref.Remaining, ref.SourceRange) + case addrs.ModuleCallInstance: + return d.staticValidateModuleCallReference(modCfg, addr.Call, ref.Remaining, ref.SourceRange) + case addrs.ModuleCallInstanceOutput: + // This one is a funny one because we will take the output name referenced + // and use it to fake up a "remaining" that would make sense for the + // module call itself, rather than for the specific output, and then + // we can just re-use our static module call validation logic. + remain := make(hcl.Traversal, len(ref.Remaining)+1) + copy(remain[1:], ref.Remaining) + remain[0] = hcl.TraverseAttr{ + Name: addr.Name, + + // Using the whole reference as the source range here doesn't exactly + // match how HCL would normally generate an attribute traversal, + // but is close enough for our purposes. + SrcRange: ref.SourceRange.ToHCL(), + } + return d.staticValidateModuleCallReference(modCfg, addr.Call.Call, remain, ref.SourceRange) + + default: + // Anything else we'll just permit through without any static validation + // and let it be caught during dynamic evaluation, in evaluate.go . + return nil + } +} + +func (d *evaluationStateData) staticValidateSingleResourceReference(modCfg *configs.Config, addr addrs.Resource, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics { + // If we have at least one step in "remain" and this resource has + // "count" set then we know for sure this in invalid because we have + // something like: + // aws_instance.foo.bar + // ...when we really need + // aws_instance.foo[count.index].bar + + // It is _not_ safe to do this check when remain is empty, because that + // would also match aws_instance.foo[count.index].bar due to `count.index` + // not being statically-resolvable as part of a reference, and match + // direct references to the whole aws_instance.foo tuple. + if len(remain) == 0 { + return nil + } + + var diags tfdiags.Diagnostics + + cfg := modCfg.Module.ResourceByAddr(addr) + if cfg == nil { + // We'll just bail out here and catch this in our subsequent call to + // staticValidateResourceReference, then. + return diags + } + + if cfg.Count != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Missing resource instance key`, + Detail: fmt.Sprintf("Because %s has \"count\" set, its attributes must be accessed on specific instances.\n\nFor example, to correlate with indices of a referring resource, use:\n %s[count.index]", addr, addr), + Subject: rng.ToHCL().Ptr(), + }) + } + if cfg.ForEach != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Missing resource instance key`, + Detail: fmt.Sprintf("Because %s has \"for_each\" set, its attributes must be accessed on specific instances.\n\nFor example, to correlate with indices of a referring resource, use:\n %s[each.key]", addr, addr), + Subject: rng.ToHCL().Ptr(), + }) + } + + return diags +} + +func (d *evaluationStateData) staticValidateMultiResourceReference(modCfg *configs.Config, addr addrs.ResourceInstance, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + cfg := modCfg.Module.ResourceByAddr(addr.ContainingResource()) + if cfg == nil { + // We'll just bail out here and catch this in our subsequent call to + // staticValidateResourceReference, then. + return diags + } + + if addr.Key == addrs.NoKey { + // This is a different path into staticValidateSingleResourceReference + return d.staticValidateSingleResourceReference(modCfg, addr.ContainingResource(), remain, rng) + } else { + if cfg.Count == nil && cfg.ForEach == nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Unexpected resource instance key`, + Detail: fmt.Sprintf(`Because %s does not have "count" or "for_each" set, references to it must not include an index key. Remove the bracketed index to refer to the single instance of this resource.`, addr.ContainingResource()), + Subject: rng.ToHCL().Ptr(), + }) + } + } + + return diags +} + +func (d *evaluationStateData) staticValidateResourceReference(modCfg *configs.Config, addr addrs.Resource, source addrs.Referenceable, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + var modeAdjective string + switch addr.Mode { + case addrs.ManagedResourceMode: + modeAdjective = "managed" + case addrs.DataResourceMode: + modeAdjective = "data" + default: + // should never happen + modeAdjective = "" + } + + cfg := modCfg.Module.ResourceByAddr(addr) + if cfg == nil { + var suggestion string + // A common mistake is omitting the data. prefix when trying to refer + // to a data resource, so we'll add a special hint for that. + if addr.Mode == addrs.ManagedResourceMode { + candidateAddr := addr // not a pointer, so this is a copy + candidateAddr.Mode = addrs.DataResourceMode + if candidateCfg := modCfg.Module.ResourceByAddr(candidateAddr); candidateCfg != nil { + suggestion = fmt.Sprintf("\n\nDid you mean the data resource %s?", candidateAddr) + } + } + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to undeclared resource`, + Detail: fmt.Sprintf(`A %s resource %q %q has not been declared in %s.%s`, modeAdjective, addr.Type, addr.Name, moduleConfigDisplayAddr(modCfg.Path), suggestion), + Subject: rng.ToHCL().Ptr(), + }) + return diags + } + + if cfg.Container != nil && (source == nil || !cfg.Container.Accessible(source)) { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to scoped resource`, + Detail: fmt.Sprintf(`The referenced %s resource %q %q is not available from this context.`, modeAdjective, addr.Type, addr.Name), + Subject: rng.ToHCL().Ptr(), + }) + } + + providerFqn := modCfg.Module.ProviderForLocalConfig(cfg.ProviderConfigAddr()) + schema, _, err := d.Evaluator.Plugins.ResourceTypeSchema(providerFqn, addr.Mode, addr.Type) + if err != nil { + // Prior validation should've taken care of a schema lookup error, + // so we should never get here but we'll handle it here anyway for + // robustness. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Failed provider schema lookup`, + Detail: fmt.Sprintf(`Couldn't load schema for %s resource type %q in %s: %s.`, modeAdjective, addr.Type, providerFqn.String(), err), + Subject: rng.ToHCL().Ptr(), + }) + } + + if schema == nil { + // Prior validation should've taken care of a resource block with an + // unsupported type, so we should never get here but we'll handle it + // here anyway for robustness. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid resource type`, + Detail: fmt.Sprintf(`A %s resource type %q is not supported by provider %q.`, modeAdjective, addr.Type, providerFqn.String()), + Subject: rng.ToHCL().Ptr(), + }) + return diags + } + + // As a special case we'll detect attempts to access an attribute called + // "count" and produce a special error for it, since versions of Terraform + // prior to v0.12 offered this as a weird special case that we can no + // longer support. + if len(remain) > 0 { + if step, ok := remain[0].(hcl.TraverseAttr); ok && step.Name == "count" { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid resource count attribute`, + Detail: fmt.Sprintf(`The special "count" attribute is no longer supported after OpenTofu v0.12. Instead, use length(%s) to count resource instances.`, addr), + Subject: rng.ToHCL().Ptr(), + }) + return diags + } + } + + // If we got this far then we'll try to validate the remaining traversal + // steps against our schema. + moreDiags := schema.StaticValidateTraversal(remain) + diags = diags.Append(moreDiags) + + return diags +} + +func (d *evaluationStateData) staticValidateModuleCallReference(modCfg *configs.Config, addr addrs.ModuleCall, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + // For now, our focus here is just in testing that the referenced module + // call exists. All other validation is deferred until evaluation time. + _, exists := modCfg.Module.ModuleCalls[addr.Name] + if !exists { + var suggestions []string + for name := range modCfg.Module.ModuleCalls { + suggestions = append(suggestions, name) + } + sort.Strings(suggestions) + suggestion := didyoumean.NameSuggestion(addr.Name, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to undeclared module`, + Detail: fmt.Sprintf(`No module call named %q is declared in %s.%s`, addr.Name, moduleConfigDisplayAddr(modCfg.Path), suggestion), + Subject: rng.ToHCL().Ptr(), + }) + return diags + } + + return diags +} + +// moduleConfigDisplayAddr returns a string describing the given module +// address that is appropriate for returning to users in situations where the +// root module is possible. Specifically, it returns "the root module" if the +// root module instance is given, or a string representation of the module +// address otherwise. +func moduleConfigDisplayAddr(addr addrs.Module) string { + switch { + case addr.IsRoot(): + return "the root module" + default: + return addr.String() + } +} diff --git a/pkg/tofu/evaluate_valid_test.go b/pkg/tofu/evaluate_valid_test.go new file mode 100644 index 00000000000..314a96fc260 --- /dev/null +++ b/pkg/tofu/evaluate_valid_test.go @@ -0,0 +1,154 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "testing" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/lang" + "github.com/kubegems/opentofu/pkg/providers" +) + +func TestStaticValidateReferences(t *testing.T) { + tests := []struct { + Ref string + Src addrs.Referenceable + WantErr string + }{ + { + Ref: "aws_instance.no_count", + WantErr: ``, + }, + { + Ref: "aws_instance.count", + WantErr: ``, + }, + { + Ref: "aws_instance.count[0]", + WantErr: ``, + }, + { + Ref: "aws_instance.nonexist", + WantErr: `Reference to undeclared resource: A managed resource "aws_instance" "nonexist" has not been declared in the root module.`, + }, + { + Ref: "beep.boop", + WantErr: `Reference to undeclared resource: A managed resource "beep" "boop" has not been declared in the root module. + +Did you mean the data resource data.beep.boop?`, + }, + { + Ref: "aws_instance.no_count[0]", + WantErr: `Unexpected resource instance key: Because aws_instance.no_count does not have "count" or "for_each" set, references to it must not include an index key. Remove the bracketed index to refer to the single instance of this resource.`, + }, + { + Ref: "aws_instance.count.foo", + // In this case we return two errors that are somewhat redundant with + // one another, but we'll accept that because they both report the + // problem from different perspectives and so give the user more + // opportunity to understand what's going on here. + WantErr: `2 problems: + +- Missing resource instance key: Because aws_instance.count has "count" set, its attributes must be accessed on specific instances. + +For example, to correlate with indices of a referring resource, use: + aws_instance.count[count.index] +- Unsupported attribute: This object has no argument, nested block, or exported attribute named "foo".`, + }, + { + Ref: "boop_instance.yep", + WantErr: ``, + }, + { + Ref: "boop_whatever.nope", + WantErr: `Invalid resource type: A managed resource type "boop_whatever" is not supported by provider "registry.opentofu.org/foobar/beep".`, + }, + { + Ref: "data.boop_data.boop_nested", + WantErr: `Reference to scoped resource: The referenced data resource "boop_data" "boop_nested" is not available from this context.`, + }, + { + Ref: "data.boop_data.boop_nested", + WantErr: ``, + Src: addrs.Check{Name: "foo"}, + }, + } + + cfg := testModule(t, "static-validate-refs") + evaluator := &Evaluator{ + Config: cfg, + Plugins: schemaOnlyProvidersForTesting(map[addrs.Provider]providers.ProviderSchema{ + addrs.NewDefaultProvider("aws"): { + ResourceTypes: map[string]providers.Schema{ + "aws_instance": { + Block: &configschema.Block{}, + }, + }, + }, + addrs.MustParseProviderSourceString("foobar/beep"): { + ResourceTypes: map[string]providers.Schema{ + // intentional mismatch between resource type prefix and provider type + "boop_instance": { + Block: &configschema.Block{}, + }, + }, + DataSources: map[string]providers.Schema{ + "boop_data": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Optional: true, + }, + }, + }, + }, + }, + }, + }, t), + } + + for _, test := range tests { + t.Run(test.Ref, func(t *testing.T) { + traversal, hclDiags := hclsyntax.ParseTraversalAbs([]byte(test.Ref), "", hcl.Pos{Line: 1, Column: 1}) + if hclDiags.HasErrors() { + t.Fatal(hclDiags.Error()) + } + + refs, diags := lang.References(addrs.ParseRef, []hcl.Traversal{traversal}) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + data := &evaluationStateData{ + Evaluator: evaluator, + } + + diags = data.StaticValidateReferences(refs, nil, test.Src) + if diags.HasErrors() { + if test.WantErr == "" { + t.Fatalf("Unexpected diagnostics: %s", diags.Err()) + } + + gotErr := diags.Err().Error() + if gotErr != test.WantErr { + t.Fatalf("Wrong diagnostics\ngot: %s\nwant: %s", gotErr, test.WantErr) + } + return + } + + if test.WantErr != "" { + t.Fatalf("Expected diagnostics, but got none\nwant: %s", test.WantErr) + } + }) + } +} diff --git a/pkg/tofu/execute.go b/pkg/tofu/execute.go new file mode 100644 index 00000000000..88d97d96c80 --- /dev/null +++ b/pkg/tofu/execute.go @@ -0,0 +1,14 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import "github.com/kubegems/opentofu/pkg/tfdiags" + +// GraphNodeExecutable is the interface that graph nodes must implement to +// enable execution. +type GraphNodeExecutable interface { + Execute(EvalContext, walkOperation) tfdiags.Diagnostics +} diff --git a/pkg/tofu/features.go b/pkg/tofu/features.go new file mode 100644 index 00000000000..5f3fe1e9207 --- /dev/null +++ b/pkg/tofu/features.go @@ -0,0 +1,12 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import "os" + +// This file holds feature flags for the next release + +var flagWarnOutputErrors = os.Getenv("TF_WARN_OUTPUT_ERRORS") != "" diff --git a/pkg/tofu/graph.go b/pkg/tofu/graph.go new file mode 100644 index 00000000000..ae40959c2bd --- /dev/null +++ b/pkg/tofu/graph.go @@ -0,0 +1,146 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "log" + "strings" + + "github.com/kubegems/opentofu/pkg/logging" + "github.com/kubegems/opentofu/pkg/tfdiags" + + "github.com/kubegems/opentofu/pkg/addrs" + + "github.com/kubegems/opentofu/pkg/dag" +) + +// Graph represents the graph that OpenTofu uses to represent resources +// and their dependencies. +type Graph struct { + // Graph is the actual DAG. This is embedded so you can call the DAG + // methods directly. + dag.AcyclicGraph + + // Path is the path in the module tree that this Graph represents. + Path addrs.ModuleInstance +} + +func (g *Graph) DirectedGraph() dag.Grapher { + return &g.AcyclicGraph +} + +// Walk walks the graph with the given walker for callbacks. The graph +// will be walked with full parallelism, so the walker should expect +// to be called in concurrently. +func (g *Graph) Walk(walker GraphWalker) tfdiags.Diagnostics { + return g.walk(walker) +} + +func (g *Graph) walk(walker GraphWalker) tfdiags.Diagnostics { + // The callbacks for enter/exiting a graph + ctx := walker.EvalContext() + + // We explicitly create the panicHandler before + // spawning many go routines for vertex evaluation + // to minimize the performance impact of capturing + // the stack trace. + panicHandler := logging.PanicHandlerWithTraceFn() + + // Walk the graph. + walkFn := func(v dag.Vertex) (diags tfdiags.Diagnostics) { + // the walkFn is called asynchronously, and needs to be recovered + // separately in the case of a panic. + defer panicHandler() + + log.Printf("[TRACE] vertex %q: starting visit (%T)", dag.VertexName(v), v) + + defer func() { + if diags.HasErrors() { + for _, diag := range diags { + if diag.Severity() == tfdiags.Error { + desc := diag.Description() + log.Printf("[ERROR] vertex %q error: %s", dag.VertexName(v), desc.Summary) + } + } + log.Printf("[TRACE] vertex %q: visit complete, with errors", dag.VertexName(v)) + } else { + log.Printf("[TRACE] vertex %q: visit complete", dag.VertexName(v)) + } + }() + + // vertexCtx is the context that we use when evaluating. This + // is normally the context of our graph but can be overridden + // with a GraphNodeModuleInstance impl. + vertexCtx := ctx + if pn, ok := v.(GraphNodeModuleInstance); ok { + vertexCtx = walker.EnterPath(pn.Path()) + defer walker.ExitPath(pn.Path()) + } + + // If the node is exec-able, then execute it. + if ev, ok := v.(GraphNodeExecutable); ok { + diags = diags.Append(walker.Execute(vertexCtx, ev)) + if diags.HasErrors() { + return + } + } + + // If the node is dynamically expanded, then expand it + if ev, ok := v.(GraphNodeDynamicExpandable); ok { + log.Printf("[TRACE] vertex %q: expanding dynamic subgraph", dag.VertexName(v)) + + g, err := ev.DynamicExpand(vertexCtx) + diags = diags.Append(err) + if diags.HasErrors() { + log.Printf("[TRACE] vertex %q: failed expanding dynamic subgraph: %s", dag.VertexName(v), err) + return + } + if g != nil { + // The subgraph should always be valid, per our normal acyclic + // graph validation rules. + if err := g.Validate(); err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Graph node has invalid dynamic subgraph", + fmt.Sprintf("The internal logic for %q generated an invalid dynamic subgraph: %s.\n\nThis is a bug in OpenTofu. Please report it!", dag.VertexName(v), err), + )) + return + } + // If we passed validation then there is exactly one root node. + // That root node should always be "rootNode", the singleton + // root node value. + if n, err := g.Root(); err != nil || n != dag.Vertex(rootNode) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Graph node has invalid dynamic subgraph", + fmt.Sprintf("The internal logic for %q generated an invalid dynamic subgraph: the root node is %T, which is not a suitable root node type.\n\nThis is a bug in OpenTofu. Please report it!", dag.VertexName(v), n), + )) + return + } + + // Walk the subgraph + log.Printf("[TRACE] vertex %q: entering dynamic subgraph", dag.VertexName(v)) + subDiags := g.walk(walker) + diags = diags.Append(subDiags) + if subDiags.HasErrors() { + var errs []string + for _, d := range subDiags { + errs = append(errs, d.Description().Summary) + } + log.Printf("[TRACE] vertex %q: dynamic subgraph encountered errors: %s", dag.VertexName(v), strings.Join(errs, ",")) + return + } + log.Printf("[TRACE] vertex %q: dynamic subgraph completed successfully", dag.VertexName(v)) + } else { + log.Printf("[TRACE] vertex %q: produced no dynamic subgraph", dag.VertexName(v)) + } + } + return + } + + return g.AcyclicGraph.Walk(walkFn) +} diff --git a/pkg/tofu/graph_builder.go b/pkg/tofu/graph_builder.go new file mode 100644 index 00000000000..8f280035cb8 --- /dev/null +++ b/pkg/tofu/graph_builder.go @@ -0,0 +1,73 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "log" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/logging" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// GraphBuilder is an interface that can be implemented and used with +// OpenTofu to build the graph that OpenTofu walks. +type GraphBuilder interface { + // Build builds the graph for the given module path. It is up to + // the interface implementation whether this build should expand + // the graph or not. + Build(addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) +} + +// BasicGraphBuilder is a GraphBuilder that builds a graph out of a +// series of transforms and (optionally) validates the graph is a valid +// structure. +type BasicGraphBuilder struct { + Steps []GraphTransformer + // Optional name to add to the graph debug log + Name string +} + +func (b *BasicGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + g := &Graph{Path: path} + + var lastStepStr string + for _, step := range b.Steps { + if step == nil { + continue + } + log.Printf("[TRACE] Executing graph transform %T", step) + + err := step.Transform(g) + + if logging.IsDebugOrHigher() { + if thisStepStr := g.StringWithNodeTypes(); thisStepStr != lastStepStr { + log.Printf("[TRACE] Completed graph transform %T with new graph:\n%s ------", step, logging.Indent(thisStepStr)) + lastStepStr = thisStepStr + } else { + log.Printf("[TRACE] Completed graph transform %T (no changes)", step) + } + } + + if err != nil { + if nf, isNF := err.(tfdiags.NonFatalError); isNF { + diags = diags.Append(nf.Diagnostics) + } else { + diags = diags.Append(err) + return g, diags + } + } + } + + if err := g.Validate(); err != nil { + log.Printf("[ERROR] Graph validation failed. Graph:\n\n%s", g.String()) + diags = diags.Append(err) + return nil, diags + } + + return g, diags +} diff --git a/pkg/tofu/graph_builder_apply.go b/pkg/tofu/graph_builder_apply.go new file mode 100644 index 00000000000..c8da09a34d6 --- /dev/null +++ b/pkg/tofu/graph_builder_apply.go @@ -0,0 +1,209 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/dag" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// ApplyGraphBuilder implements GraphBuilder and is responsible for building +// a graph for applying a OpenTofu diff. +// +// Because the graph is built from the diff (vs. the config or state), +// this helps ensure that the apply-time graph doesn't modify any resources +// that aren't explicitly in the diff. There are other scenarios where the +// diff can be deviated, so this is just one layer of protection. +type ApplyGraphBuilder struct { + // Config is the configuration tree that the diff was built from. + Config *configs.Config + + // Changes describes the changes that we need apply. + Changes *plans.Changes + + // State is the current state + State *states.State + + // RootVariableValues are the root module input variables captured as + // part of the plan object, which we must reproduce in the apply step + // to get a consistent result. + RootVariableValues InputValues + + // Plugins is a library of the plug-in components (providers and + // provisioners) available for use. + Plugins *contextPlugins + + // Targets are resources to target. This is only required to make sure + // unnecessary outputs aren't included in the apply graph. The plan + // builder successfully handles targeting resources. In the future, + // outputs should go into the diff so that this is unnecessary. + Targets []addrs.Targetable + + // ForceReplace are the resource instance addresses that the user + // requested to force replacement for when creating the plan, if any. + // The apply step refers to these as part of verifying that the planned + // actions remain consistent between plan and apply. + ForceReplace []addrs.AbsResourceInstance + + // Plan Operation this graph will be used for. + Operation walkOperation + + // ExternalReferences allows the external caller to pass in references to + // nodes that should not be pruned even if they are not referenced within + // the actual graph. + ExternalReferences []*addrs.Reference +} + +// See GraphBuilder +func (b *ApplyGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { + return (&BasicGraphBuilder{ + Steps: b.Steps(), + Name: "ApplyGraphBuilder", + }).Build(path) +} + +// See GraphBuilder +func (b *ApplyGraphBuilder) Steps() []GraphTransformer { + // Custom factory for creating providers. + concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { + return &NodeApplyableProvider{ + NodeAbstractProvider: a, + } + } + + concreteResource := func(a *NodeAbstractResource) dag.Vertex { + return &nodeExpandApplyableResource{ + NodeAbstractResource: a, + } + } + + concreteResourceInstance := func(a *NodeAbstractResourceInstance) dag.Vertex { + return &NodeApplyableResourceInstance{ + NodeAbstractResourceInstance: a, + forceReplace: b.ForceReplace, + } + } + + steps := []GraphTransformer{ + // Creates all the resources represented in the config. During apply, + // we use this just to ensure that the whole-resource metadata is + // updated to reflect things such as whether the count argument is + // set in config, or which provider configuration manages each resource. + &ConfigTransformer{ + Concrete: concreteResource, + Config: b.Config, + }, + + // Add dynamic values + &RootVariableTransformer{Config: b.Config, RawValues: b.RootVariableValues}, + &ModuleVariableTransformer{Config: b.Config}, + &LocalTransformer{Config: b.Config}, + &OutputTransformer{ + Config: b.Config, + Destroying: b.Operation == walkDestroy, + }, + + // Creates all the resource instances represented in the diff, along + // with dependency edges against the whole-resource nodes added by + // ConfigTransformer above. + &DiffTransformer{ + Concrete: concreteResourceInstance, + State: b.State, + Changes: b.Changes, + Config: b.Config, + }, + + // Add nodes and edges for check block assertions. Check block data + // sources were added earlier. + &checkTransformer{ + Config: b.Config, + Operation: b.Operation, + }, + + // Attach the state + &AttachStateTransformer{State: b.State}, + + // Create orphan output nodes + &OrphanOutputTransformer{Config: b.Config, State: b.State}, + + // Attach the configuration to any resources + &AttachResourceConfigTransformer{Config: b.Config}, + + // add providers + transformProviders(concreteProvider, b.Config), + + // Remove modules no longer present in the config + &RemovedModuleTransformer{Config: b.Config, State: b.State}, + + // Must attach schemas before ReferenceTransformer so that we can + // analyze the configuration to find references. + &AttachSchemaTransformer{Plugins: b.Plugins, Config: b.Config}, + + // After schema transformer, we can add function references + &ProviderFunctionTransformer{Config: b.Config}, + + // Remove unused providers and proxies + &PruneProviderTransformer{}, + + // Create expansion nodes for all of the module calls. This must + // come after all other transformers that create nodes representing + // objects that can belong to modules. + &ModuleExpansionTransformer{Config: b.Config}, + + // Plug in any external references. + &ExternalReferenceTransformer{ + ExternalReferences: b.ExternalReferences, + }, + + // Connect references so ordering is correct + &ReferenceTransformer{}, + &AttachDependenciesTransformer{}, + + // Nested data blocks should be loaded after every other resource has + // done its thing. + &checkStartTransformer{Config: b.Config, Operation: b.Operation}, + + // Detect when create_before_destroy must be forced on for a particular + // node due to dependency edges, to avoid graph cycles during apply. + &ForcedCBDTransformer{}, + + // Destruction ordering + &DestroyEdgeTransformer{ + Changes: b.Changes, + Operation: b.Operation, + }, + &CBDEdgeTransformer{ + Config: b.Config, + State: b.State, + }, + + // We need to remove configuration nodes that are not used at all, as + // they may not be able to evaluate, especially during destroy. + // These include variables, locals, and instance expanders. + &pruneUnusedNodesTransformer{}, + + // Target + &TargetsTransformer{Targets: b.Targets}, + + // Close opened plugin connections + &CloseProviderTransformer{}, + + // close the root module + &CloseRootModuleTransformer{ + RootConfig: b.Config, + }, + + // Perform the transitive reduction to make our graph a bit + // more understandable if possible (it usually is possible). + &TransitiveReductionTransformer{}, + } + + return steps +} diff --git a/pkg/tofu/graph_builder_apply_test.go b/pkg/tofu/graph_builder_apply_test.go new file mode 100644 index 00000000000..c2484156094 --- /dev/null +++ b/pkg/tofu/graph_builder_apply_test.go @@ -0,0 +1,837 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" +) + +func TestApplyGraphBuilder_impl(t *testing.T) { + var _ GraphBuilder = new(ApplyGraphBuilder) +} + +func TestApplyGraphBuilder(t *testing.T) { + changes := &plans.Changes{ + Resources: []*plans.ResourceInstanceChangeSrc{ + { + Addr: mustResourceInstanceAddr("test_object.create"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Create, + }, + }, + { + Addr: mustResourceInstanceAddr("test_object.other"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Update, + }, + }, + { + Addr: mustResourceInstanceAddr("module.child.test_object.create"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Create, + }, + }, + { + Addr: mustResourceInstanceAddr("module.child.test_object.other"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Create, + }, + }, + }, + } + + b := &ApplyGraphBuilder{ + Config: testModule(t, "graph-builder-apply-basic"), + Changes: changes, + Plugins: simpleMockPluginLibrary(), + } + + g, err := b.Build(addrs.RootModuleInstance) + if err != nil { + t.Fatalf("err: %s", err) + } + + if g.Path.String() != addrs.RootModuleInstance.String() { + t.Fatalf("wrong path %q", g.Path.String()) + } + + got := strings.TrimSpace(g.String()) + want := strings.TrimSpace(testApplyGraphBuilderStr) + if diff := cmp.Diff(want, got); diff != "" { + t.Fatalf("wrong result\n%s", diff) + } +} + +// This tests the ordering of two resources where a non-CBD depends +// on a CBD. GH-11349. +func TestApplyGraphBuilder_depCbd(t *testing.T) { + changes := &plans.Changes{ + Resources: []*plans.ResourceInstanceChangeSrc{ + { + Addr: mustResourceInstanceAddr("test_object.A"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.CreateThenDelete, + }, + }, + { + Addr: mustResourceInstanceAddr("test_object.B"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Update, + }, + }, + }, + } + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.A").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"A"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.B").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"B","test_list":["x"]}`), + Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("test_object.A")}, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + + b := &ApplyGraphBuilder{ + Config: testModule(t, "graph-builder-apply-dep-cbd"), + Changes: changes, + Plugins: simpleMockPluginLibrary(), + State: state, + } + + g, err := b.Build(addrs.RootModuleInstance) + if err != nil { + t.Fatalf("err: %s", err) + } + + if g.Path.String() != addrs.RootModuleInstance.String() { + t.Fatalf("wrong path %q", g.Path.String()) + } + + // We're going to go hunting for our deposed instance node here, so we + // can find out its key to use in the assertions below. + var dk states.DeposedKey + for _, v := range g.Vertices() { + tv, ok := v.(*NodeDestroyDeposedResourceInstanceObject) + if !ok { + continue + } + if dk != states.NotDeposed { + t.Fatalf("more than one deposed instance node in the graph; want only one") + } + dk = tv.DeposedKey + } + if dk == states.NotDeposed { + t.Fatalf("no deposed instance node in the graph; want one") + } + + destroyName := fmt.Sprintf("test_object.A (destroy deposed %s)", dk) + + // Create A, Modify B, Destroy A + testGraphHappensBefore( + t, g, + "test_object.A", + destroyName, + ) + testGraphHappensBefore( + t, g, + "test_object.A", + "test_object.B", + ) + testGraphHappensBefore( + t, g, + "test_object.B", + destroyName, + ) +} + +// This tests the ordering of two resources that are both CBD that +// require destroy/create. +func TestApplyGraphBuilder_doubleCBD(t *testing.T) { + changes := &plans.Changes{ + Resources: []*plans.ResourceInstanceChangeSrc{ + { + Addr: mustResourceInstanceAddr("test_object.A"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.CreateThenDelete, + }, + }, + { + Addr: mustResourceInstanceAddr("test_object.B"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.CreateThenDelete, + }, + }, + }, + } + + b := &ApplyGraphBuilder{ + Config: testModule(t, "graph-builder-apply-double-cbd"), + Changes: changes, + Plugins: simpleMockPluginLibrary(), + } + + g, err := b.Build(addrs.RootModuleInstance) + if err != nil { + t.Fatalf("err: %s", err) + } + + if g.Path.String() != addrs.RootModuleInstance.String() { + t.Fatalf("wrong path %q", g.Path.String()) + } + + // We're going to go hunting for our deposed instance node here, so we + // can find out its key to use in the assertions below. + var destroyA, destroyB string + for _, v := range g.Vertices() { + tv, ok := v.(*NodeDestroyDeposedResourceInstanceObject) + if !ok { + continue + } + + switch tv.Addr.Resource.Resource.Name { + case "A": + destroyA = fmt.Sprintf("test_object.A (destroy deposed %s)", tv.DeposedKey) + case "B": + destroyB = fmt.Sprintf("test_object.B (destroy deposed %s)", tv.DeposedKey) + default: + t.Fatalf("unknown instance: %s", tv.Addr) + } + } + + // Create A, Modify B, Destroy A + testGraphHappensBefore( + t, g, + "test_object.A", + destroyA, + ) + testGraphHappensBefore( + t, g, + "test_object.A", + "test_object.B", + ) + testGraphHappensBefore( + t, g, + "test_object.B", + destroyB, + ) +} + +// This tests the ordering of two resources being destroyed that depend +// on each other from only state. GH-11749 +func TestApplyGraphBuilder_destroyStateOnly(t *testing.T) { + changes := &plans.Changes{ + Resources: []*plans.ResourceInstanceChangeSrc{ + { + Addr: mustResourceInstanceAddr("module.child.test_object.A"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Delete, + }, + }, + { + Addr: mustResourceInstanceAddr("module.child.test_object.B"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Delete, + }, + }, + }, + } + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + child := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.A").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + child.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.B").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"bar"}`), + Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("module.child.test_object.A")}, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + + b := &ApplyGraphBuilder{ + Config: testModule(t, "empty"), + Changes: changes, + State: state, + Plugins: simpleMockPluginLibrary(), + } + + g, diags := b.Build(addrs.RootModuleInstance) + if diags.HasErrors() { + t.Fatalf("err: %s", diags.Err()) + } + + if g.Path.String() != addrs.RootModuleInstance.String() { + t.Fatalf("wrong path %q", g.Path.String()) + } + + testGraphHappensBefore( + t, g, + "module.child.test_object.B (destroy)", + "module.child.test_object.A (destroy)") +} + +// This tests the ordering of destroying a single count of a resource. +func TestApplyGraphBuilder_destroyCount(t *testing.T) { + changes := &plans.Changes{ + Resources: []*plans.ResourceInstanceChangeSrc{ + { + Addr: mustResourceInstanceAddr("test_object.A[1]"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Delete, + }, + }, + { + Addr: mustResourceInstanceAddr("test_object.B"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Update, + }, + }, + }, + } + + state := states.NewState() + root := state.RootModule() + addrA := mustResourceInstanceAddr("test_object.A[1]") + root.SetResourceInstanceCurrent( + addrA.Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"B"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.B").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"B"}`), + Dependencies: []addrs.ConfigResource{addrA.ContainingResource().Config()}, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + + b := &ApplyGraphBuilder{ + Config: testModule(t, "graph-builder-apply-count"), + Changes: changes, + Plugins: simpleMockPluginLibrary(), + State: state, + } + + g, err := b.Build(addrs.RootModuleInstance) + if err != nil { + t.Fatalf("err: %s", err) + } + + if g.Path.String() != addrs.RootModuleInstance.String() { + t.Fatalf("wrong module path %q", g.Path) + } + + got := strings.TrimSpace(g.String()) + want := strings.TrimSpace(testApplyGraphBuilderDestroyCountStr) + if diff := cmp.Diff(want, got); diff != "" { + t.Fatalf("wrong result\n%s", diff) + } +} + +func TestApplyGraphBuilder_moduleDestroy(t *testing.T) { + changes := &plans.Changes{ + Resources: []*plans.ResourceInstanceChangeSrc{ + { + Addr: mustResourceInstanceAddr("module.A.test_object.foo"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Delete, + }, + }, + { + Addr: mustResourceInstanceAddr("module.B.test_object.foo"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Delete, + }, + }, + }, + } + + state := states.NewState() + modA := state.EnsureModule(addrs.RootModuleInstance.Child("A", addrs.NoKey)) + modA.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + modB := state.EnsureModule(addrs.RootModuleInstance.Child("B", addrs.NoKey)) + modB.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo","value":"foo"}`), + Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("module.A.test_object.foo")}, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + + b := &ApplyGraphBuilder{ + Config: testModule(t, "graph-builder-apply-module-destroy"), + Changes: changes, + Plugins: simpleMockPluginLibrary(), + State: state, + } + + g, err := b.Build(addrs.RootModuleInstance) + if err != nil { + t.Fatalf("err: %s", err) + } + + testGraphHappensBefore( + t, g, + "module.B.test_object.foo (destroy)", + "module.A.test_object.foo (destroy)", + ) +} + +func TestApplyGraphBuilder_targetModule(t *testing.T) { + changes := &plans.Changes{ + Resources: []*plans.ResourceInstanceChangeSrc{ + { + Addr: mustResourceInstanceAddr("test_object.foo"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Update, + }, + }, + { + Addr: mustResourceInstanceAddr("module.child2.test_object.foo"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Update, + }, + }, + }, + } + + b := &ApplyGraphBuilder{ + Config: testModule(t, "graph-builder-apply-target-module"), + Changes: changes, + Plugins: simpleMockPluginLibrary(), + Targets: []addrs.Targetable{ + addrs.RootModuleInstance.Child("child2", addrs.NoKey), + }, + } + + g, err := b.Build(addrs.RootModuleInstance) + if err != nil { + t.Fatalf("err: %s", err) + } + + testGraphNotContains(t, g, "module.child1.output.instance_id") +} + +// Ensure that an update resulting from the removal of a resource happens after +// that resource is destroyed. +func TestApplyGraphBuilder_updateFromOrphan(t *testing.T) { + schemas := simpleTestSchemas() + instanceSchema := schemas.Providers[addrs.NewDefaultProvider("test")].ResourceTypes["test_object"] + + bBefore, _ := plans.NewDynamicValue( + cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("b_id"), + "test_string": cty.StringVal("a_id"), + }), instanceSchema.Block.ImpliedType()) + bAfter, _ := plans.NewDynamicValue( + cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("b_id"), + "test_string": cty.StringVal("changed"), + }), instanceSchema.Block.ImpliedType()) + + changes := &plans.Changes{ + Resources: []*plans.ResourceInstanceChangeSrc{ + { + Addr: mustResourceInstanceAddr("test_object.a"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Delete, + }, + }, + { + Addr: mustResourceInstanceAddr("test_object.b"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Update, + Before: bBefore, + After: bAfter, + }, + }, + }, + } + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_object", + Name: "a", + }.Instance(addrs.NoKey), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"a_id"}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + root.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_object", + Name: "b", + }.Instance(addrs.NoKey), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"b_id","test_string":"a_id"}`), + Dependencies: []addrs.ConfigResource{ + { + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_object", + Name: "a", + }, + Module: root.Addr.Module(), + }, + }, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + + b := &ApplyGraphBuilder{ + Config: testModule(t, "graph-builder-apply-orphan-update"), + Changes: changes, + Plugins: simpleMockPluginLibrary(), + State: state, + } + + g, err := b.Build(addrs.RootModuleInstance) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := strings.TrimSpace(` +test_object.a (destroy) +test_object.b + test_object.a (destroy) +`) + + instanceGraph := filterInstances(g) + got := strings.TrimSpace(instanceGraph.String()) + + if got != expected { + t.Fatalf("expected:\n%s\ngot:\n%s", expected, got) + } +} + +// Ensure that an update resulting from the removal of a resource happens before +// a CBD resource is destroyed. +func TestApplyGraphBuilder_updateFromCBDOrphan(t *testing.T) { + schemas := simpleTestSchemas() + instanceSchema := schemas.Providers[addrs.NewDefaultProvider("test")].ResourceTypes["test_object"] + + bBefore, _ := plans.NewDynamicValue( + cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("b_id"), + "test_string": cty.StringVal("a_id"), + }), instanceSchema.Block.ImpliedType()) + bAfter, _ := plans.NewDynamicValue( + cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("b_id"), + "test_string": cty.StringVal("changed"), + }), instanceSchema.Block.ImpliedType()) + + changes := &plans.Changes{ + Resources: []*plans.ResourceInstanceChangeSrc{ + { + Addr: mustResourceInstanceAddr("test_object.a"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Delete, + }, + }, + { + Addr: mustResourceInstanceAddr("test_object.b"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Update, + Before: bBefore, + After: bAfter, + }, + }, + }, + } + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_object", + Name: "a", + }.Instance(addrs.NoKey), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"a_id"}`), + CreateBeforeDestroy: true, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + root.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_object", + Name: "b", + }.Instance(addrs.NoKey), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"b_id","test_string":"a_id"}`), + Dependencies: []addrs.ConfigResource{ + { + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_object", + Name: "a", + }, + Module: root.Addr.Module(), + }, + }, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + + b := &ApplyGraphBuilder{ + Config: testModule(t, "graph-builder-apply-orphan-update"), + Changes: changes, + Plugins: simpleMockPluginLibrary(), + State: state, + } + + g, err := b.Build(addrs.RootModuleInstance) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := strings.TrimSpace(` +test_object.a (destroy) + test_object.b +test_object.b +`) + + instanceGraph := filterInstances(g) + got := strings.TrimSpace(instanceGraph.String()) + + if got != expected { + t.Fatalf("expected:\n%s\ngot:\n%s", expected, got) + } +} + +// The orphan clean up node should not be connected to a provider +func TestApplyGraphBuilder_orphanedWithProvider(t *testing.T) { + changes := &plans.Changes{ + Resources: []*plans.ResourceInstanceChangeSrc{ + { + Addr: mustResourceInstanceAddr("test_object.A"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Delete, + }, + }, + }, + } + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.A").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"A"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"].foo`), + ) + + b := &ApplyGraphBuilder{ + Config: testModule(t, "graph-builder-orphan-alias"), + Changes: changes, + Plugins: simpleMockPluginLibrary(), + State: state, + } + + g, err := b.Build(addrs.RootModuleInstance) + if err != nil { + t.Fatal(err) + } + + // The cleanup node has no state or config of its own, so would create a + // default provider which we don't want. + testGraphNotContains(t, g, "provider.test") +} + +func TestApplyGraphBuilder_withChecks(t *testing.T) { + awsProvider := mockProviderWithResourceTypeSchema("aws_instance", simpleTestSchema()) + + changes := &plans.Changes{ + Resources: []*plans.ResourceInstanceChangeSrc{ + { + Addr: mustResourceInstanceAddr("aws_instance.foo"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Create, + }, + }, + { + Addr: mustResourceInstanceAddr("aws_instance.baz"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Create, + }, + }, + { + Addr: mustResourceInstanceAddr("data.aws_data_source.bar"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Read, + }, + ActionReason: plans.ResourceInstanceReadBecauseCheckNested, + }, + }, + } + + plugins := newContextPlugins(map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): providers.FactoryFixed(awsProvider), + }, nil) + + b := &ApplyGraphBuilder{ + Config: testModule(t, "apply-with-checks"), + Changes: changes, + Plugins: plugins, + State: states.NewState(), + Operation: walkApply, + } + + g, err := b.Build(addrs.RootModuleInstance) + if err != nil { + t.Fatalf("err: %s", err) + } + + if g.Path.String() != addrs.RootModuleInstance.String() { + t.Fatalf("wrong path %q", g.Path.String()) + } + + got := strings.TrimSpace(g.String()) + // We're especially looking for the edge here, where aws_instance.bat + // has a dependency on aws_instance.boo + want := strings.TrimSpace(testPlanWithCheckGraphBuilderStr) + if diff := cmp.Diff(want, got); diff != "" { + t.Fatalf("\ngot:\n%s\n\nwant:\n%s\n\ndiff:\n%s", got, want, diff) + } + +} + +const testPlanWithCheckGraphBuilderStr = ` +(execute checks) + aws_instance.baz +aws_instance.baz + aws_instance.baz (expand) +aws_instance.baz (expand) + aws_instance.foo +aws_instance.foo + aws_instance.foo (expand) +aws_instance.foo (expand) + provider["registry.opentofu.org/hashicorp/aws"] +check.my_check (expand) + data.aws_data_source.bar +data.aws_data_source.bar + (execute checks) + data.aws_data_source.bar (expand) +data.aws_data_source.bar (expand) + provider["registry.opentofu.org/hashicorp/aws"] +provider["registry.opentofu.org/hashicorp/aws"] +provider["registry.opentofu.org/hashicorp/aws"] (close) + data.aws_data_source.bar +root + check.my_check (expand) + provider["registry.opentofu.org/hashicorp/aws"] (close) +` + +const testApplyGraphBuilderStr = ` +module.child (close) + module.child.test_object.other +module.child (expand) +module.child.test_object.create + module.child.test_object.create (expand) +module.child.test_object.create (expand) + module.child (expand) + provider["registry.opentofu.org/hashicorp/test"] +module.child.test_object.other + module.child.test_object.other (expand) +module.child.test_object.other (expand) + module.child.test_object.create +provider["registry.opentofu.org/hashicorp/test"] +provider["registry.opentofu.org/hashicorp/test"] (close) + module.child.test_object.other + test_object.other +root + module.child (close) + provider["registry.opentofu.org/hashicorp/test"] (close) +test_object.create + test_object.create (expand) +test_object.create (expand) + provider["registry.opentofu.org/hashicorp/test"] +test_object.other + test_object.other (expand) +test_object.other (expand) + test_object.create +` + +const testApplyGraphBuilderDestroyCountStr = ` +provider["registry.opentofu.org/hashicorp/test"] +provider["registry.opentofu.org/hashicorp/test"] (close) + test_object.B +root + provider["registry.opentofu.org/hashicorp/test"] (close) +test_object.A (expand) + provider["registry.opentofu.org/hashicorp/test"] +test_object.A[1] (destroy) + provider["registry.opentofu.org/hashicorp/test"] +test_object.B + test_object.A[1] (destroy) + test_object.B (expand) +test_object.B (expand) + test_object.A (expand) +` diff --git a/pkg/tofu/graph_builder_eval.go b/pkg/tofu/graph_builder_eval.go new file mode 100644 index 00000000000..e583be5e54a --- /dev/null +++ b/pkg/tofu/graph_builder_eval.go @@ -0,0 +1,121 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/dag" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// EvalGraphBuilder implements GraphBuilder and constructs a graph suitable +// for evaluating in-memory values (input variables, local values, output +// values) in the state without any other side-effects. +// +// This graph is used only in weird cases, such as the "tofu console" +// CLI command, where we need to evaluate expressions against the state +// without taking any other actions. +// +// The generated graph will include nodes for providers, resources, etc +// just to allow indirect dependencies to be resolved, but these nodes will +// not take any actions themselves since we assume that their parts of the +// state, if any, are already complete. +// +// Although the providers are never configured, they must still be available +// in order to obtain schema information used for type checking, etc. +type EvalGraphBuilder struct { + // Config is the configuration tree. + Config *configs.Config + + // State is the current state + State *states.State + + // RootVariableValues are the raw input values for root input variables + // given by the caller, which we'll resolve into final values as part + // of the plan walk. + RootVariableValues InputValues + + // Plugins is a library of plug-in components (providers and + // provisioners) available for use. + Plugins *contextPlugins +} + +// See GraphBuilder +func (b *EvalGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { + return (&BasicGraphBuilder{ + Steps: b.Steps(), + Name: "EvalGraphBuilder", + }).Build(path) +} + +// See GraphBuilder +func (b *EvalGraphBuilder) Steps() []GraphTransformer { + concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { + return &NodeEvalableProvider{ + NodeAbstractProvider: a, + } + } + + steps := []GraphTransformer{ + // Creates all the data resources that aren't in the state. This will also + // add any orphans from scaling in as destroy nodes. + &ConfigTransformer{ + Config: b.Config, + }, + + // Add dynamic values + &RootVariableTransformer{Config: b.Config, RawValues: b.RootVariableValues}, + &ModuleVariableTransformer{Config: b.Config}, + &LocalTransformer{Config: b.Config}, + &OutputTransformer{ + Config: b.Config, + Planning: true, + }, + + // Attach the configuration to any resources + &AttachResourceConfigTransformer{Config: b.Config}, + + // Attach the state + &AttachStateTransformer{State: b.State}, + + transformProviders(concreteProvider, b.Config), + + // Must attach schemas before ReferenceTransformer so that we can + // analyze the configuration to find references. + &AttachSchemaTransformer{Plugins: b.Plugins, Config: b.Config}, + + // After schema transformer, we can add function references + &ProviderFunctionTransformer{Config: b.Config}, + + // Remove unused providers and proxies + &PruneProviderTransformer{}, + + // Create expansion nodes for all of the module calls. This must + // come after all other transformers that create nodes representing + // objects that can belong to modules. + &ModuleExpansionTransformer{Config: b.Config}, + + // Connect so that the references are ready for targeting. We'll + // have to connect again later for providers and so on. + &ReferenceTransformer{}, + + // Although we don't configure providers, we do still start them up + // to get their schemas, and so we must shut them down again here. + &CloseProviderTransformer{}, + + // Close root module + &CloseRootModuleTransformer{ + RootConfig: b.Config, + }, + + // Remove redundant edges to simplify the graph. + &TransitiveReductionTransformer{}, + } + + return steps +} diff --git a/pkg/tofu/graph_builder_plan.go b/pkg/tofu/graph_builder_plan.go new file mode 100644 index 00000000000..a08eee4efc7 --- /dev/null +++ b/pkg/tofu/graph_builder_plan.go @@ -0,0 +1,352 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "log" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/dag" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// PlanGraphBuilder is a GraphBuilder implementation that builds a graph for +// planning and for other "plan-like" operations which don't require an +// already-calculated plan as input. +// +// Unlike the apply graph builder, this graph builder: +// +// - Makes its decisions primarily based on the given configuration, which +// represents the desired state. +// +// - Ignores certain lifecycle concerns like create_before_destroy, because +// those are only important once we already know what action we're planning +// to take against a particular resource instance. +type PlanGraphBuilder struct { + // Config is the configuration tree to build a plan from. + Config *configs.Config + + // State is the current state + State *states.State + + // RootVariableValues are the raw input values for root input variables + // given by the caller, which we'll resolve into final values as part + // of the plan walk. + RootVariableValues InputValues + + // Plugins is a library of plug-in components (providers and + // provisioners) available for use. + Plugins *contextPlugins + + // Targets are resources to target + Targets []addrs.Targetable + + // ForceReplace are resource instances where if we would normally have + // generated a NoOp or Update action then we'll force generating a replace + // action instead. Create and Delete actions are not affected. + ForceReplace []addrs.AbsResourceInstance + + // skipRefresh indicates that we should skip refreshing managed resources + skipRefresh bool + + // preDestroyRefresh indicates that we are executing the refresh which + // happens immediately before a destroy plan, which happens to use the + // normal planing mode so skipPlanChanges cannot be set. + preDestroyRefresh bool + + // skipPlanChanges indicates that we should skip the step of comparing + // prior state with configuration and generating planned changes to + // resource instances. (This is for the "refresh only" planning mode, + // where we _only_ do the refresh step.) + skipPlanChanges bool + + ConcreteProvider ConcreteProviderNodeFunc + ConcreteResource ConcreteResourceNodeFunc + ConcreteResourceInstance ConcreteResourceInstanceNodeFunc + ConcreteResourceOrphan ConcreteResourceInstanceNodeFunc + ConcreteResourceInstanceDeposed ConcreteResourceInstanceDeposedNodeFunc + ConcreteModule ConcreteModuleNodeFunc + + // Plan Operation this graph will be used for. + Operation walkOperation + + // ExternalReferences allows the external caller to pass in references to + // nodes that should not be pruned even if they are not referenced within + // the actual graph. + ExternalReferences []*addrs.Reference + + // ImportTargets are the list of resources to import. + ImportTargets []*ImportTarget + + // EndpointsToRemove are the list of resources and modules to forget from + // the state. + EndpointsToRemove []addrs.ConfigRemovable + + // GenerateConfig tells OpenTofu where to write and generated config for + // any import targets that do not already have configuration. + // + // If empty, then config will not be generated. + GenerateConfigPath string +} + +// See GraphBuilder +func (b *PlanGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { + log.Printf("[TRACE] building graph for %s", b.Operation) + return (&BasicGraphBuilder{ + Steps: b.Steps(), + Name: "PlanGraphBuilder", + }).Build(path) +} + +// See GraphBuilder +func (b *PlanGraphBuilder) Steps() []GraphTransformer { + switch b.Operation { + case walkPlan: + b.initPlan() + case walkPlanDestroy: + b.initDestroy() + case walkValidate: + b.initValidate() + case walkImport: + b.initImport() + default: + panic("invalid plan operation: " + b.Operation.String()) + } + + steps := []GraphTransformer{ + // Creates all the resources represented in the config + &ConfigTransformer{ + Concrete: b.ConcreteResource, + Config: b.Config, + + // Resources are not added from the config on destroy. + skip: b.Operation == walkPlanDestroy, + + importTargets: b.ImportTargets, + + // We only want to generate config during a plan operation. + generateConfigPathForImportTargets: b.GenerateConfigPath, + }, + + // Add dynamic values + &RootVariableTransformer{Config: b.Config, RawValues: b.RootVariableValues}, + &ModuleVariableTransformer{Config: b.Config}, + &LocalTransformer{Config: b.Config}, + &OutputTransformer{ + Config: b.Config, + RefreshOnly: b.skipPlanChanges || b.preDestroyRefresh, + Destroying: b.Operation == walkPlanDestroy, + + // NOTE: We currently treat anything built with the plan graph + // builder as "planning" for our purposes here, because we share + // the same graph node implementation between all of the walk + // types and so the pre-planning walks still think they are + // producing a plan even though we immediately discard it. + Planning: true, + }, + + // Add nodes and edges for the check block assertions. Check block data + // sources were added earlier. + &checkTransformer{ + Config: b.Config, + Operation: b.Operation, + }, + + // Add orphan resources + &OrphanResourceInstanceTransformer{ + Concrete: b.ConcreteResourceOrphan, + State: b.State, + Config: b.Config, + skip: b.Operation == walkPlanDestroy, + }, + + // We also need nodes for any deposed instance objects present in the + // state, so we can plan to destroy them. (During plan this will + // intentionally skip creating nodes for _current_ objects, since + // ConfigTransformer created nodes that will do that during + // DynamicExpand.) + &StateTransformer{ + ConcreteCurrent: b.ConcreteResourceInstance, + ConcreteDeposed: b.ConcreteResourceInstanceDeposed, + State: b.State, + }, + + // Attach the state + &AttachStateTransformer{State: b.State}, + + // Create orphan output nodes + &OrphanOutputTransformer{ + Config: b.Config, + State: b.State, + Planning: true, + }, + + // Attach the configuration to any resources + &AttachResourceConfigTransformer{Config: b.Config}, + + // add providers + transformProviders(b.ConcreteProvider, b.Config), + + // Remove modules no longer present in the config + &RemovedModuleTransformer{Config: b.Config, State: b.State}, + + // Must attach schemas before ReferenceTransformer so that we can + // analyze the configuration to find references. + &AttachSchemaTransformer{Plugins: b.Plugins, Config: b.Config}, + + // After schema transformer, we can add function references + &ProviderFunctionTransformer{Config: b.Config}, + + // Remove unused providers and proxies + &PruneProviderTransformer{}, + + // Create expansion nodes for all of the module calls. This must + // come after all other transformers that create nodes representing + // objects that can belong to modules. + &ModuleExpansionTransformer{Concrete: b.ConcreteModule, Config: b.Config}, + + // Plug in any external references. + &ExternalReferenceTransformer{ + ExternalReferences: b.ExternalReferences, + }, + + &ReferenceTransformer{}, + + &AttachDependenciesTransformer{}, + + // Make sure data sources are aware of any depends_on from the + // configuration + &attachDataResourceDependsOnTransformer{}, + + // DestroyEdgeTransformer is only required during a plan so that the + // TargetsTransformer can determine which nodes to keep in the graph. + &DestroyEdgeTransformer{ + Operation: b.Operation, + }, + + &pruneUnusedNodesTransformer{ + skip: b.Operation != walkPlanDestroy, + }, + + // Target + &TargetsTransformer{Targets: b.Targets}, + + // Detect when create_before_destroy must be forced on for a particular + // node due to dependency edges, to avoid graph cycles during apply. + &ForcedCBDTransformer{}, + + // Close opened plugin connections + &CloseProviderTransformer{}, + + // Close the root module + &CloseRootModuleTransformer{ + RootConfig: b.Config, + }, + + // Perform the transitive reduction to make our graph a bit + // more understandable if possible (it usually is possible). + &TransitiveReductionTransformer{}, + } + + return steps +} + +func (b *PlanGraphBuilder) initPlan() { + b.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex { + return &NodeApplyableProvider{ + NodeAbstractProvider: a, + } + } + + b.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex { + return &nodeExpandPlannableResource{ + NodeAbstractResource: a, + skipRefresh: b.skipRefresh, + skipPlanChanges: b.skipPlanChanges, + preDestroyRefresh: b.preDestroyRefresh, + forceReplace: b.ForceReplace, + } + } + + b.ConcreteResourceOrphan = func(a *NodeAbstractResourceInstance) dag.Vertex { + return &NodePlannableResourceInstanceOrphan{ + NodeAbstractResourceInstance: a, + skipRefresh: b.skipRefresh, + skipPlanChanges: b.skipPlanChanges, + EndpointsToRemove: b.EndpointsToRemove, + } + } + + b.ConcreteResourceInstanceDeposed = func(a *NodeAbstractResourceInstance, key states.DeposedKey) dag.Vertex { + return &NodePlanDeposedResourceInstanceObject{ + NodeAbstractResourceInstance: a, + DeposedKey: key, + + skipRefresh: b.skipRefresh, + skipPlanChanges: b.skipPlanChanges, + EndpointsToRemove: b.EndpointsToRemove, + } + } +} + +func (b *PlanGraphBuilder) initDestroy() { + b.initPlan() + + b.ConcreteResourceInstance = func(a *NodeAbstractResourceInstance) dag.Vertex { + return &NodePlanDestroyableResourceInstance{ + NodeAbstractResourceInstance: a, + skipRefresh: b.skipRefresh, + } + } +} + +func (b *PlanGraphBuilder) initValidate() { + // Set the provider to the normal provider. This will ask for input. + b.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex { + return &NodeApplyableProvider{ + NodeAbstractProvider: a, + } + } + + b.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex { + return &NodeValidatableResource{ + NodeAbstractResource: a, + } + } + + b.ConcreteModule = func(n *nodeExpandModule) dag.Vertex { + return &nodeValidateModule{ + nodeExpandModule: *n, + } + } +} + +func (b *PlanGraphBuilder) initImport() { + b.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex { + return &NodeApplyableProvider{ + NodeAbstractProvider: a, + } + } + + b.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex { + return &nodeExpandPlannableResource{ + NodeAbstractResource: a, + + // For now we always skip planning changes for import, since we are + // not going to combine importing with other changes. This is + // temporary to try and maintain existing import behaviors, but + // planning will need to be allowed for more complex configurations. + skipPlanChanges: true, + + // We also skip refresh for now, since the plan output is written + // as the new state, and users are not expecting the import process + // to update any other instances in state. + skipRefresh: true, + } + } +} diff --git a/pkg/tofu/graph_builder_plan_test.go b/pkg/tofu/graph_builder_plan_test.go new file mode 100644 index 00000000000..8e14823893d --- /dev/null +++ b/pkg/tofu/graph_builder_plan_test.go @@ -0,0 +1,278 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/providers" +) + +func TestPlanGraphBuilder_impl(t *testing.T) { + var _ GraphBuilder = new(PlanGraphBuilder) +} + +func TestPlanGraphBuilder(t *testing.T) { + awsProvider := &MockProvider{ + GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{Block: simpleTestSchema()}, + ResourceTypes: map[string]providers.Schema{ + "aws_security_group": {Block: simpleTestSchema()}, + "aws_instance": {Block: simpleTestSchema()}, + "aws_load_balancer": {Block: simpleTestSchema()}, + }, + }, + } + openstackProvider := mockProviderWithResourceTypeSchema("openstack_floating_ip", simpleTestSchema()) + plugins := newContextPlugins(map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): providers.FactoryFixed(awsProvider), + addrs.NewDefaultProvider("openstack"): providers.FactoryFixed(openstackProvider), + }, nil) + + b := &PlanGraphBuilder{ + Config: testModule(t, "graph-builder-plan-basic"), + Plugins: plugins, + Operation: walkPlan, + } + + g, err := b.Build(addrs.RootModuleInstance) + if err != nil { + t.Fatalf("err: %s", err) + } + + if g.Path.String() != addrs.RootModuleInstance.String() { + t.Fatalf("wrong module path %q", g.Path) + } + + got := strings.TrimSpace(g.String()) + want := strings.TrimSpace(testPlanGraphBuilderStr) + if diff := cmp.Diff(want, got); diff != "" { + t.Fatalf("wrong result\n%s", diff) + } +} + +func TestPlanGraphBuilder_dynamicBlock(t *testing.T) { + provider := mockProviderWithResourceTypeSchema("test_thing", &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "list": {Type: cty.List(cty.String), Computed: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "nested": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + }) + plugins := newContextPlugins(map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): providers.FactoryFixed(provider), + }, nil) + + b := &PlanGraphBuilder{ + Config: testModule(t, "graph-builder-plan-dynblock"), + Plugins: plugins, + Operation: walkPlan, + } + + g, err := b.Build(addrs.RootModuleInstance) + if err != nil { + t.Fatalf("err: %s", err) + } + + if g.Path.String() != addrs.RootModuleInstance.String() { + t.Fatalf("wrong module path %q", g.Path) + } + + // This test is here to make sure we properly detect references inside + // the special "dynamic" block construct. The most important thing here + // is that at the end test_thing.c depends on both test_thing.a and + // test_thing.b. Other details might shift over time as other logic in + // the graph builders changes. + got := strings.TrimSpace(g.String()) + want := strings.TrimSpace(` +provider["registry.opentofu.org/hashicorp/test"] +provider["registry.opentofu.org/hashicorp/test"] (close) + test_thing.c (expand) +root + provider["registry.opentofu.org/hashicorp/test"] (close) +test_thing.a (expand) + provider["registry.opentofu.org/hashicorp/test"] +test_thing.b (expand) + provider["registry.opentofu.org/hashicorp/test"] +test_thing.c (expand) + test_thing.a (expand) + test_thing.b (expand) +`) + if diff := cmp.Diff(want, got); diff != "" { + t.Fatalf("wrong result\n%s", diff) + } +} + +func TestPlanGraphBuilder_attrAsBlocks(t *testing.T) { + provider := mockProviderWithResourceTypeSchema("test_thing", &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "nested": { + Type: cty.List(cty.Object(map[string]cty.Type{ + "foo": cty.String, + })), + Optional: true, + }, + }, + }) + plugins := newContextPlugins(map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): providers.FactoryFixed(provider), + }, nil) + + b := &PlanGraphBuilder{ + Config: testModule(t, "graph-builder-plan-attr-as-blocks"), + Plugins: plugins, + Operation: walkPlan, + } + + g, err := b.Build(addrs.RootModuleInstance) + if err != nil { + t.Fatalf("err: %s", err) + } + + if g.Path.String() != addrs.RootModuleInstance.String() { + t.Fatalf("wrong module path %q", g.Path) + } + + // This test is here to make sure we properly detect references inside + // the "nested" block that is actually defined in the schema as a + // list-of-objects attribute. This requires some special effort + // inside lang.ReferencesInBlock to make sure it searches blocks of + // type "nested" along with an attribute named "nested". + got := strings.TrimSpace(g.String()) + want := strings.TrimSpace(` +provider["registry.opentofu.org/hashicorp/test"] +provider["registry.opentofu.org/hashicorp/test"] (close) + test_thing.b (expand) +root + provider["registry.opentofu.org/hashicorp/test"] (close) +test_thing.a (expand) + provider["registry.opentofu.org/hashicorp/test"] +test_thing.b (expand) + test_thing.a (expand) +`) + if diff := cmp.Diff(want, got); diff != "" { + t.Fatalf("wrong result\n%s", diff) + } +} + +func TestPlanGraphBuilder_targetModule(t *testing.T) { + b := &PlanGraphBuilder{ + Config: testModule(t, "graph-builder-plan-target-module-provider"), + Plugins: simpleMockPluginLibrary(), + Targets: []addrs.Targetable{ + addrs.RootModuleInstance.Child("child2", addrs.NoKey), + }, + Operation: walkPlan, + } + + g, err := b.Build(addrs.RootModuleInstance) + if err != nil { + t.Fatalf("err: %s", err) + } + + t.Logf("Graph: %s", g.String()) + + testGraphNotContains(t, g, `module.child1.provider["registry.opentofu.org/hashicorp/test"]`) + testGraphNotContains(t, g, "module.child1.test_object.foo") +} + +func TestPlanGraphBuilder_forEach(t *testing.T) { + awsProvider := mockProviderWithResourceTypeSchema("aws_instance", simpleTestSchema()) + + plugins := newContextPlugins(map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): providers.FactoryFixed(awsProvider), + }, nil) + + b := &PlanGraphBuilder{ + Config: testModule(t, "plan-for-each"), + Plugins: plugins, + Operation: walkPlan, + } + + g, err := b.Build(addrs.RootModuleInstance) + if err != nil { + t.Fatalf("err: %s", err) + } + + if g.Path.String() != addrs.RootModuleInstance.String() { + t.Fatalf("wrong module path %q", g.Path) + } + + got := strings.TrimSpace(g.String()) + // We're especially looking for the edge here, where aws_instance.bat + // has a dependency on aws_instance.boo + want := strings.TrimSpace(testPlanGraphBuilderForEachStr) + if diff := cmp.Diff(want, got); diff != "" { + t.Fatalf("wrong result\n%s", diff) + } +} + +const testPlanGraphBuilderStr = ` +aws_instance.web (expand) + aws_security_group.firewall (expand) + var.foo +aws_load_balancer.weblb (expand) + aws_instance.web (expand) +aws_security_group.firewall (expand) + provider["registry.opentofu.org/hashicorp/aws"] +local.instance_id (expand) + aws_instance.web (expand) +openstack_floating_ip.random (expand) + provider["registry.opentofu.org/hashicorp/openstack"] +output.instance_id (expand) + local.instance_id (expand) +provider["registry.opentofu.org/hashicorp/aws"] + openstack_floating_ip.random (expand) +provider["registry.opentofu.org/hashicorp/aws"] (close) + aws_load_balancer.weblb (expand) +provider["registry.opentofu.org/hashicorp/openstack"] +provider["registry.opentofu.org/hashicorp/openstack"] (close) + openstack_floating_ip.random (expand) +root + output.instance_id (expand) + provider["registry.opentofu.org/hashicorp/aws"] (close) + provider["registry.opentofu.org/hashicorp/openstack"] (close) +var.foo +` +const testPlanGraphBuilderForEachStr = ` +aws_instance.bar (expand) + provider["registry.opentofu.org/hashicorp/aws"] +aws_instance.bar2 (expand) + provider["registry.opentofu.org/hashicorp/aws"] +aws_instance.bat (expand) + aws_instance.boo (expand) +aws_instance.baz (expand) + provider["registry.opentofu.org/hashicorp/aws"] +aws_instance.boo (expand) + provider["registry.opentofu.org/hashicorp/aws"] +aws_instance.foo (expand) + provider["registry.opentofu.org/hashicorp/aws"] +provider["registry.opentofu.org/hashicorp/aws"] +provider["registry.opentofu.org/hashicorp/aws"] (close) + aws_instance.bar (expand) + aws_instance.bar2 (expand) + aws_instance.bat (expand) + aws_instance.baz (expand) + aws_instance.foo (expand) +root + provider["registry.opentofu.org/hashicorp/aws"] (close) +` diff --git a/pkg/tofu/graph_builder_test.go b/pkg/tofu/graph_builder_test.go new file mode 100644 index 00000000000..61535136340 --- /dev/null +++ b/pkg/tofu/graph_builder_test.go @@ -0,0 +1,69 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/addrs" + + "github.com/kubegems/opentofu/pkg/dag" +) + +func TestBasicGraphBuilder_impl(t *testing.T) { + var _ GraphBuilder = new(BasicGraphBuilder) +} + +func TestBasicGraphBuilder(t *testing.T) { + b := &BasicGraphBuilder{ + Steps: []GraphTransformer{ + &testBasicGraphBuilderTransform{1}, + }, + } + + g, err := b.Build(addrs.RootModuleInstance) + if err != nil { + t.Fatalf("err: %s", err) + } + + if g.Path.String() != addrs.RootModuleInstance.String() { + t.Fatalf("wrong module path %q", g.Path) + } + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(testBasicGraphBuilderStr) + if actual != expected { + t.Fatalf("bad: %s", actual) + } +} + +func TestBasicGraphBuilder_validate(t *testing.T) { + b := &BasicGraphBuilder{ + Steps: []GraphTransformer{ + &testBasicGraphBuilderTransform{1}, + &testBasicGraphBuilderTransform{2}, + }, + } + + _, err := b.Build(addrs.RootModuleInstance) + if err == nil { + t.Fatal("should error") + } +} + +type testBasicGraphBuilderTransform struct { + V dag.Vertex +} + +func (t *testBasicGraphBuilderTransform) Transform(g *Graph) error { + g.Add(t.V) + return nil +} + +const testBasicGraphBuilderStr = ` +1 +` diff --git a/pkg/tofu/graph_dot.go b/pkg/tofu/graph_dot.go new file mode 100644 index 00000000000..9a44328d52a --- /dev/null +++ b/pkg/tofu/graph_dot.go @@ -0,0 +1,14 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import "github.com/kubegems/opentofu/pkg/dag" + +// GraphDot returns the dot formatting of a visual representation of +// the given OpenTofu graph. +func GraphDot(g *Graph, opts *dag.DotOpts) (string, error) { + return string(g.Dot(opts)), nil +} diff --git a/pkg/tofu/graph_dot_test.go b/pkg/tofu/graph_dot_test.go new file mode 100644 index 00000000000..33d32655779 --- /dev/null +++ b/pkg/tofu/graph_dot_test.go @@ -0,0 +1,301 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/dag" +) + +func TestGraphDot(t *testing.T) { + cases := []struct { + Name string + Graph testGraphFunc + Opts dag.DotOpts + Expect string + }{ + { + Name: "empty", + Graph: func() *Graph { return &Graph{} }, + Expect: ` +digraph { + compound = "true" + newrank = "true" + subgraph "root" { + } +}`, + }, + { + Name: "three-level", + Graph: func() *Graph { + var g Graph + root := &testDrawableOrigin{"root"} + g.Add(root) + + levelOne := []interface{}{"foo", "bar"} + for i, s := range levelOne { + levelOne[i] = &testDrawable{ + VertexName: s.(string), + } + v := levelOne[i] + + g.Add(v) + g.Connect(dag.BasicEdge(v, root)) + } + + levelTwo := []string{"baz", "qux"} + for i, s := range levelTwo { + v := &testDrawable{ + VertexName: s, + } + + g.Add(v) + g.Connect(dag.BasicEdge(v, levelOne[i])) + } + + return &g + }, + Expect: ` +digraph { + compound = "true" + newrank = "true" + subgraph "root" { + "[root] bar" + "[root] baz" + "[root] foo" + "[root] qux" + "[root] root" + "[root] bar" -> "[root] root" + "[root] baz" -> "[root] foo" + "[root] foo" -> "[root] root" + "[root] qux" -> "[root] bar" + } +} + `, + }, + + { + Name: "cycle", + Opts: dag.DotOpts{ + DrawCycles: true, + }, + Graph: func() *Graph { + var g Graph + root := &testDrawableOrigin{"root"} + g.Add(root) + + vA := g.Add(&testDrawable{ + VertexName: "A", + }) + + vB := g.Add(&testDrawable{ + VertexName: "B", + }) + + vC := g.Add(&testDrawable{ + VertexName: "C", + }) + + g.Connect(dag.BasicEdge(vA, root)) + g.Connect(dag.BasicEdge(vA, vC)) + g.Connect(dag.BasicEdge(vB, vA)) + g.Connect(dag.BasicEdge(vC, vB)) + + return &g + }, + Expect: ` +digraph { + compound = "true" + newrank = "true" + subgraph "root" { + "[root] A" + "[root] B" + "[root] C" + "[root] root" + "[root] A" -> "[root] B" [color = "red", penwidth = "2.0"] + "[root] A" -> "[root] C" + "[root] A" -> "[root] root" + "[root] B" -> "[root] A" + "[root] B" -> "[root] C" [color = "red", penwidth = "2.0"] + "[root] C" -> "[root] A" [color = "red", penwidth = "2.0"] + "[root] C" -> "[root] B" + } +} + `, + }, + + { + Name: "subgraphs, no depth restriction", + Opts: dag.DotOpts{ + MaxDepth: -1, + }, + Graph: func() *Graph { + var g Graph + root := &testDrawableOrigin{"root"} + g.Add(root) + + var sub Graph + vSubRoot := sub.Add(&testDrawableOrigin{"sub_root"}) + + var subsub Graph + subsub.Add(&testDrawableOrigin{"subsub_root"}) + vSubV := sub.Add(&testDrawableSubgraph{ + VertexName: "subsub", + SubgraphMock: &subsub, + }) + + vSub := g.Add(&testDrawableSubgraph{ + VertexName: "sub", + SubgraphMock: &sub, + }) + + g.Connect(dag.BasicEdge(vSub, root)) + sub.Connect(dag.BasicEdge(vSubV, vSubRoot)) + + return &g + }, + Expect: ` +digraph { + compound = "true" + newrank = "true" + subgraph "root" { + "[root] root" + "[root] sub" + "[root] sub" -> "[root] root" + } + subgraph "cluster_sub" { + label = "sub" + "[sub] sub_root" + "[sub] subsub" + "[sub] subsub" -> "[sub] sub_root" + } + subgraph "cluster_subsub" { + label = "subsub" + "[subsub] subsub_root" + } +} + `, + }, + + { + Name: "subgraphs, with depth restriction", + Opts: dag.DotOpts{ + MaxDepth: 1, + }, + Graph: func() *Graph { + var g Graph + root := &testDrawableOrigin{"root"} + g.Add(root) + + var sub Graph + rootSub := sub.Add(&testDrawableOrigin{"sub_root"}) + + var subsub Graph + subsub.Add(&testDrawableOrigin{"subsub_root"}) + + subV := sub.Add(&testDrawableSubgraph{ + VertexName: "subsub", + SubgraphMock: &subsub, + }) + vSub := g.Add(&testDrawableSubgraph{ + VertexName: "sub", + SubgraphMock: &sub, + }) + + g.Connect(dag.BasicEdge(vSub, root)) + sub.Connect(dag.BasicEdge(subV, rootSub)) + return &g + }, + Expect: ` +digraph { + compound = "true" + newrank = "true" + subgraph "root" { + "[root] root" + "[root] sub" + "[root] sub" -> "[root] root" + } + subgraph "cluster_sub" { + label = "sub" + "[sub] sub_root" + "[sub] subsub" + "[sub] subsub" -> "[sub] sub_root" + } +} + `, + }, + } + + for _, tc := range cases { + tn := tc.Name + t.Run(tn, func(t *testing.T) { + g := tc.Graph() + actual := string(g.Dot(&tc.Opts)) + expected := strings.TrimSpace(tc.Expect) + "\n" + if actual != expected { + t.Fatalf("%s:\n\nexpected:\n%s\n\ngot:\n%s", tn, expected, actual) + } + }) + } +} + +type testGraphFunc func() *Graph + +type testDrawable struct { + VertexName string + DependentOnMock []string +} + +func (node *testDrawable) Name() string { + return node.VertexName +} +func (node *testDrawable) DotNode(n string, opts *dag.DotOpts) *dag.DotNode { + return &dag.DotNode{Name: n, Attrs: map[string]string{}} +} +func (node *testDrawable) DependableName() []string { + return []string{node.VertexName} +} +func (node *testDrawable) DependentOn() []string { + return node.DependentOnMock +} + +type testDrawableOrigin struct { + VertexName string +} + +func (node *testDrawableOrigin) Name() string { + return node.VertexName +} +func (node *testDrawableOrigin) DotNode(n string, opts *dag.DotOpts) *dag.DotNode { + return &dag.DotNode{Name: n, Attrs: map[string]string{}} +} +func (node *testDrawableOrigin) DotOrigin() bool { + return true +} +func (node *testDrawableOrigin) DependableName() []string { + return []string{node.VertexName} +} + +type testDrawableSubgraph struct { + VertexName string + SubgraphMock *Graph + DependentOnMock []string +} + +func (node *testDrawableSubgraph) Name() string { + return node.VertexName +} +func (node *testDrawableSubgraph) Subgraph() dag.Grapher { + return node.SubgraphMock +} +func (node *testDrawableSubgraph) DotNode(n string, opts *dag.DotOpts) *dag.DotNode { + return &dag.DotNode{Name: n, Attrs: map[string]string{}} +} +func (node *testDrawableSubgraph) DependentOn() []string { + return node.DependentOnMock +} diff --git a/pkg/tofu/graph_interface_subgraph.go b/pkg/tofu/graph_interface_subgraph.go new file mode 100644 index 00000000000..bb0735c372e --- /dev/null +++ b/pkg/tofu/graph_interface_subgraph.go @@ -0,0 +1,22 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "github.com/kubegems/opentofu/pkg/addrs" +) + +// GraphNodeModuleInstance says that a node is part of a graph with a +// different path, and the context should be adjusted accordingly. +type GraphNodeModuleInstance interface { + Path() addrs.ModuleInstance +} + +// GraphNodeModulePath is implemented by all referenceable nodes, to indicate +// their configuration path in unexpanded modules. +type GraphNodeModulePath interface { + ModulePath() addrs.Module +} diff --git a/pkg/tofu/graph_test.go b/pkg/tofu/graph_test.go new file mode 100644 index 00000000000..e66849459cf --- /dev/null +++ b/pkg/tofu/graph_test.go @@ -0,0 +1,61 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "testing" + + "github.com/kubegems/opentofu/pkg/dag" +) + +// testGraphnotContains is an assertion helper that tests that a node is +// NOT contained in the graph. +func testGraphNotContains(t *testing.T, g *Graph, name string) { + for _, v := range g.Vertices() { + if dag.VertexName(v) == name { + t.Fatalf( + "Expected %q to NOT be in:\n\n%s", + name, g.String()) + } + } +} + +// testGraphHappensBefore is an assertion helper that tests that node +// A (dag.VertexName value) happens before node B. +func testGraphHappensBefore(t *testing.T, g *Graph, A, B string) { + t.Helper() + // Find the B vertex + var vertexB dag.Vertex + for _, v := range g.Vertices() { + if dag.VertexName(v) == B { + vertexB = v + break + } + } + if vertexB == nil { + t.Fatalf( + "Expected %q before %q. Couldn't find %q in:\n\n%s", + A, B, B, g.String()) + } + + // Look at ancestors + deps, err := g.Ancestors(vertexB) + if err != nil { + t.Fatalf("Error: %s in graph:\n\n%s", err, g.String()) + } + + // Make sure B is in there + for _, v := range deps.List() { + if dag.VertexName(v) == A { + // Success + return + } + } + + t.Fatalf( + "Expected %q before %q in:\n\n%s", + A, B, g.String()) +} diff --git a/pkg/tofu/graph_walk.go b/pkg/tofu/graph_walk.go new file mode 100644 index 00000000000..6ea4e8eddf4 --- /dev/null +++ b/pkg/tofu/graph_walk.go @@ -0,0 +1,30 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// GraphWalker is an interface that can be implemented that when used +// with Graph.Walk will invoke the given callbacks under certain events. +type GraphWalker interface { + EvalContext() EvalContext + EnterPath(addrs.ModuleInstance) EvalContext + ExitPath(addrs.ModuleInstance) + Execute(EvalContext, GraphNodeExecutable) tfdiags.Diagnostics +} + +// NullGraphWalker is a GraphWalker implementation that does nothing. +// This can be embedded within other GraphWalker implementations for easily +// implementing all the required functions. +type NullGraphWalker struct{} + +func (NullGraphWalker) EvalContext() EvalContext { return new(MockEvalContext) } +func (NullGraphWalker) EnterPath(addrs.ModuleInstance) EvalContext { return new(MockEvalContext) } +func (NullGraphWalker) ExitPath(addrs.ModuleInstance) {} +func (NullGraphWalker) Execute(EvalContext, GraphNodeExecutable) tfdiags.Diagnostics { return nil } diff --git a/pkg/tofu/graph_walk_context.go b/pkg/tofu/graph_walk_context.go new file mode 100644 index 00000000000..66e8990abf9 --- /dev/null +++ b/pkg/tofu/graph_walk_context.go @@ -0,0 +1,148 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "context" + "sync" + "time" + + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/checks" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/instances" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/provisioners" + "github.com/kubegems/opentofu/pkg/refactoring" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// ContextGraphWalker is the GraphWalker implementation used with the +// Context struct to walk and evaluate the graph. +type ContextGraphWalker struct { + NullGraphWalker + + // Configurable values + Context *Context + State *states.SyncState // Used for safe concurrent access to state + RefreshState *states.SyncState // Used for safe concurrent access to state + PrevRunState *states.SyncState // Used for safe concurrent access to state + Changes *plans.ChangesSync // Used for safe concurrent writes to changes + Checks *checks.State // Used for safe concurrent writes of checkable objects and their check results + InstanceExpander *instances.Expander // Tracks our gradual expansion of module and resource instances + ImportResolver *ImportResolver // Tracks import targets as they are being resolved + MoveResults refactoring.MoveResults // Read-only record of earlier processing of move statements + Operation walkOperation + StopContext context.Context + RootVariableValues InputValues + Config *configs.Config + PlanTimestamp time.Time + Encryption encryption.Encryption + + // This is an output. Do not set this, nor read it while a graph walk + // is in progress. + NonFatalDiagnostics tfdiags.Diagnostics + + once sync.Once + contextLock sync.Mutex + contexts map[string]*BuiltinEvalContext + + variableValuesLock sync.Mutex + variableValues map[string]map[string]cty.Value + + providerLock sync.Mutex + providerCache map[string]providers.Interface + + provisionerLock sync.Mutex + provisionerCache map[string]provisioners.Interface +} + +func (w *ContextGraphWalker) EnterPath(path addrs.ModuleInstance) EvalContext { + w.contextLock.Lock() + defer w.contextLock.Unlock() + + // If we already have a context for this path cached, use that + key := path.String() + if ctx, ok := w.contexts[key]; ok { + return ctx + } + + ctx := w.EvalContext().WithPath(path) + w.contexts[key] = ctx.(*BuiltinEvalContext) + return ctx +} + +func (w *ContextGraphWalker) EvalContext() EvalContext { + w.once.Do(w.init) + + // Our evaluator shares some locks with the main context and the walker + // so that we can safely run multiple evaluations at once across + // different modules. + evaluator := &Evaluator{ + Meta: w.Context.meta, + Config: w.Config, + Operation: w.Operation, + State: w.State, + Changes: w.Changes, + Plugins: w.Context.plugins, + VariableValues: w.variableValues, + VariableValuesLock: &w.variableValuesLock, + PlanTimestamp: w.PlanTimestamp, + } + + ctx := &BuiltinEvalContext{ + StopContext: w.StopContext, + Hooks: w.Context.hooks, + InputValue: w.Context.uiInput, + InstanceExpanderValue: w.InstanceExpander, + Plugins: w.Context.plugins, + MoveResultsValue: w.MoveResults, + ImportResolverValue: w.ImportResolver, + ProviderCache: w.providerCache, + ProviderInputConfig: w.Context.providerInputConfig, + ProviderLock: &w.providerLock, + ProvisionerCache: w.provisionerCache, + ProvisionerLock: &w.provisionerLock, + ChangesValue: w.Changes, + ChecksValue: w.Checks, + StateValue: w.State, + RefreshStateValue: w.RefreshState, + PrevRunStateValue: w.PrevRunState, + Evaluator: evaluator, + VariableValues: w.variableValues, + VariableValuesLock: &w.variableValuesLock, + Encryption: w.Encryption, + } + + return ctx +} + +func (w *ContextGraphWalker) init() { + w.contexts = make(map[string]*BuiltinEvalContext) + w.providerCache = make(map[string]providers.Interface) + w.provisionerCache = make(map[string]provisioners.Interface) + w.variableValues = make(map[string]map[string]cty.Value) + + // Populate root module variable values. Other modules will be populated + // during the graph walk. + w.variableValues[""] = make(map[string]cty.Value) + for k, iv := range w.RootVariableValues { + w.variableValues[""][k] = iv.Value + } +} + +func (w *ContextGraphWalker) Execute(ctx EvalContext, n GraphNodeExecutable) tfdiags.Diagnostics { + // Acquire a lock on the semaphore + w.Context.parallelSem.Acquire() + defer w.Context.parallelSem.Release() + + return n.Execute(ctx, w.Operation) +} diff --git a/pkg/tofu/graph_walk_operation.go b/pkg/tofu/graph_walk_operation.go new file mode 100644 index 00000000000..14e999220cf --- /dev/null +++ b/pkg/tofu/graph_walk_operation.go @@ -0,0 +1,22 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +//go:generate go run golang.org/x/tools/cmd/stringer -type=walkOperation graph_walk_operation.go + +// walkOperation is an enum which tells the walkContext what to do. +type walkOperation byte + +const ( + walkInvalid walkOperation = iota + walkApply + walkPlan + walkPlanDestroy + walkValidate + walkDestroy + walkImport + walkEval // used just to prepare EvalContext for expression evaluation, with no other actions +) diff --git a/pkg/tofu/graph_walk_test.go b/pkg/tofu/graph_walk_test.go new file mode 100644 index 00000000000..9588b1831a5 --- /dev/null +++ b/pkg/tofu/graph_walk_test.go @@ -0,0 +1,14 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "testing" +) + +func TestNullGraphWalker_impl(t *testing.T) { + var _ GraphWalker = NullGraphWalker{} +} diff --git a/pkg/tofu/hook.go b/pkg/tofu/hook.go new file mode 100644 index 00000000000..e21f5411f7c --- /dev/null +++ b/pkg/tofu/hook.go @@ -0,0 +1,197 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" +) + +// HookAction is an enum of actions that can be taken as a result of a hook +// callback. This allows you to modify the behavior of OpenTofu at runtime. +type HookAction byte + +const ( + // HookActionContinue continues with processing as usual. + HookActionContinue HookAction = iota + + // HookActionHalt halts immediately: no more hooks are processed + // and the action that OpenTofu was about to take is cancelled. + HookActionHalt +) + +// Hook is the interface that must be implemented to hook into various +// parts of OpenTofu, allowing you to inspect or change behavior at runtime. +// +// There are MANY hook points into OpenTofu. If you only want to implement +// some hook points, but not all (which is the likely case), then embed the +// NilHook into your struct, which implements all of the interface but does +// nothing. Then, override only the functions you want to implement. +type Hook interface { + // PreApply and PostApply are called before and after an action for a + // single instance is applied. The error argument in PostApply is the + // error, if any, that was returned from the provider Apply call itself. + PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) + PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) + + // PreDiff and PostDiff are called before and after a provider is given + // the opportunity to customize the proposed new state to produce the + // planned new state. + PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) + PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) + + // The provisioning hooks signal both the overall start end end of + // provisioning for a particular instance and of each of the individual + // configured provisioners for each instance. The sequence of these + // for a given instance might look something like this: + // + // PreProvisionInstance(aws_instance.foo[1], ...) + // PreProvisionInstanceStep(aws_instance.foo[1], "file") + // PostProvisionInstanceStep(aws_instance.foo[1], "file", nil) + // PreProvisionInstanceStep(aws_instance.foo[1], "remote-exec") + // ProvisionOutput(aws_instance.foo[1], "remote-exec", "Installing foo...") + // ProvisionOutput(aws_instance.foo[1], "remote-exec", "Configuring bar...") + // PostProvisionInstanceStep(aws_instance.foo[1], "remote-exec", nil) + // PostProvisionInstance(aws_instance.foo[1], ...) + // + // ProvisionOutput is called with output sent back by the provisioners. + // This will be called multiple times as output comes in, with each call + // representing one line of output. It cannot control whether the + // provisioner continues running. + PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) + PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) + PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) + PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) + ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) + + // PreRefresh and PostRefresh are called before and after a single + // resource state is refreshed, respectively. + PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) + PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) + + // PreImportState and PostImportState are called before and after + // (respectively) each state import operation for a given resource address when + // using the legacy import command. + PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) + PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) + + // PrePlanImport and PostPlanImport are called during a plan before and after planning to import + // a new resource using the configuration-driven import workflow. + PrePlanImport(addr addrs.AbsResourceInstance, importID string) (HookAction, error) + PostPlanImport(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) + + // PreApplyImport and PostApplyImport are called during an apply for each imported resource when + // using the configuration-driven import workflow. + PreApplyImport(addr addrs.AbsResourceInstance, importing plans.ImportingSrc) (HookAction, error) + PostApplyImport(addr addrs.AbsResourceInstance, importing plans.ImportingSrc) (HookAction, error) + + // Stopping is called if an external signal requests that OpenTofu + // gracefully abort an operation in progress. + // + // This notification might suggest that the user wants OpenTofu to exit + // ASAP and in that case it's possible that if OpenTofu runs for too much + // longer then it'll get killed un-gracefully, and so this hook could be + // an opportunity to persist any transient data that would be lost under + // a subsequent kill signal. However, implementations must take care to do + // so in a way that won't cause corruption if the process _is_ killed while + // this hook is still running. + // + // This hook cannot control whether OpenTofu continues, because the + // graceful shutdown process is typically already running by the time this + // function is called. + Stopping() + + // PostStateUpdate is called each time the state is updated. It receives + // a deep copy of the state, which it may therefore access freely without + // any need for locks to protect from concurrent writes from the caller. + PostStateUpdate(new *states.State) (HookAction, error) +} + +// NilHook is a Hook implementation that does nothing. It exists only to +// simplify implementing hooks. You can embed this into your Hook implementation +// and only implement the functions you are interested in. +type NilHook struct{} + +var _ Hook = (*NilHook)(nil) + +func (*NilHook) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) { +} + +func (*NilHook) PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) { + return HookActionContinue, nil +} + +func (h *NilHook) PrePlanImport(addr addrs.AbsResourceInstance, importID string) (HookAction, error) { + return HookActionContinue, nil +} + +func (h *NilHook) PostPlanImport(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) { + return HookActionContinue, nil +} + +func (h *NilHook) PreApplyImport(addr addrs.AbsResourceInstance, importing plans.ImportingSrc) (HookAction, error) { + return HookActionContinue, nil +} + +func (h *NilHook) PostApplyImport(addr addrs.AbsResourceInstance, importing plans.ImportingSrc) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) Stopping() { + // Does nothing at all by default +} + +func (*NilHook) PostStateUpdate(new *states.State) (HookAction, error) { + return HookActionContinue, nil +} diff --git a/pkg/tofu/hook_mock.go b/pkg/tofu/hook_mock.go new file mode 100644 index 00000000000..939059ad4ee --- /dev/null +++ b/pkg/tofu/hook_mock.go @@ -0,0 +1,344 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "sync" + + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" +) + +// MockHook is an implementation of Hook that can be used for tests. +// It records all of its function calls. +type MockHook struct { + sync.Mutex + + PreApplyCalled bool + PreApplyAddr addrs.AbsResourceInstance + PreApplyGen states.Generation + PreApplyAction plans.Action + PreApplyPriorState cty.Value + PreApplyPlannedState cty.Value + PreApplyReturn HookAction + PreApplyError error + + PostApplyCalled bool + PostApplyAddr addrs.AbsResourceInstance + PostApplyGen states.Generation + PostApplyNewState cty.Value + PostApplyError error + PostApplyReturn HookAction + PostApplyReturnError error + PostApplyFn func(addrs.AbsResourceInstance, states.Generation, cty.Value, error) (HookAction, error) + + PreDiffCalled bool + PreDiffAddr addrs.AbsResourceInstance + PreDiffGen states.Generation + PreDiffPriorState cty.Value + PreDiffProposedState cty.Value + PreDiffReturn HookAction + PreDiffError error + + PostDiffCalled bool + PostDiffAddr addrs.AbsResourceInstance + PostDiffGen states.Generation + PostDiffAction plans.Action + PostDiffPriorState cty.Value + PostDiffPlannedState cty.Value + PostDiffReturn HookAction + PostDiffError error + + PreProvisionInstanceCalled bool + PreProvisionInstanceAddr addrs.AbsResourceInstance + PreProvisionInstanceState cty.Value + PreProvisionInstanceReturn HookAction + PreProvisionInstanceError error + + PostProvisionInstanceCalled bool + PostProvisionInstanceAddr addrs.AbsResourceInstance + PostProvisionInstanceState cty.Value + PostProvisionInstanceReturn HookAction + PostProvisionInstanceError error + + PreProvisionInstanceStepCalled bool + PreProvisionInstanceStepAddr addrs.AbsResourceInstance + PreProvisionInstanceStepProvisionerType string + PreProvisionInstanceStepReturn HookAction + PreProvisionInstanceStepError error + + PostProvisionInstanceStepCalled bool + PostProvisionInstanceStepAddr addrs.AbsResourceInstance + PostProvisionInstanceStepProvisionerType string + PostProvisionInstanceStepErrorArg error + PostProvisionInstanceStepReturn HookAction + PostProvisionInstanceStepError error + + ProvisionOutputCalled bool + ProvisionOutputAddr addrs.AbsResourceInstance + ProvisionOutputProvisionerType string + ProvisionOutputMessage string + + PreRefreshCalled bool + PreRefreshAddr addrs.AbsResourceInstance + PreRefreshGen states.Generation + PreRefreshPriorState cty.Value + PreRefreshReturn HookAction + PreRefreshError error + + PostRefreshCalled bool + PostRefreshAddr addrs.AbsResourceInstance + PostRefreshGen states.Generation + PostRefreshPriorState cty.Value + PostRefreshNewState cty.Value + PostRefreshReturn HookAction + PostRefreshError error + + PreImportStateCalled bool + PreImportStateAddr addrs.AbsResourceInstance + PreImportStateID string + PreImportStateReturn HookAction + PreImportStateError error + + PostImportStateCalled bool + PostImportStateAddr addrs.AbsResourceInstance + PostImportStateNewStates []providers.ImportedResource + PostImportStateReturn HookAction + PostImportStateError error + + PrePlanImportCalled bool + PrePlanImportAddr addrs.AbsResourceInstance + PrePlanImportReturn HookAction + PrePlanImportError error + + PostPlanImportAddr addrs.AbsResourceInstance + PostPlanImportCalled bool + PostPlanImportReturn HookAction + PostPlanImportError error + + PreApplyImportCalled bool + PreApplyImportAddr addrs.AbsResourceInstance + PreApplyImportReturn HookAction + PreApplyImportError error + + PostApplyImportCalled bool + PostApplyImportAddr addrs.AbsResourceInstance + PostApplyImportReturn HookAction + PostApplyImportError error + + StoppingCalled bool + + PostStateUpdateCalled bool + PostStateUpdateState *states.State + PostStateUpdateReturn HookAction + PostStateUpdateError error +} + +var _ Hook = (*MockHook)(nil) + +func (h *MockHook) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PreApplyCalled = true + h.PreApplyAddr = addr + h.PreApplyGen = gen + h.PreApplyAction = action + h.PreApplyPriorState = priorState + h.PreApplyPlannedState = plannedNewState + return h.PreApplyReturn, h.PreApplyError +} + +func (h *MockHook) PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PostApplyCalled = true + h.PostApplyAddr = addr + h.PostApplyGen = gen + h.PostApplyNewState = newState + h.PostApplyError = err + + if h.PostApplyFn != nil { + return h.PostApplyFn(addr, gen, newState, err) + } + + return h.PostApplyReturn, h.PostApplyReturnError +} + +func (h *MockHook) PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PreDiffCalled = true + h.PreDiffAddr = addr + h.PreDiffGen = gen + h.PreDiffPriorState = priorState + h.PreDiffProposedState = proposedNewState + return h.PreDiffReturn, h.PreDiffError +} + +func (h *MockHook) PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PostDiffCalled = true + h.PostDiffAddr = addr + h.PostDiffGen = gen + h.PostDiffAction = action + h.PostDiffPriorState = priorState + h.PostDiffPlannedState = plannedNewState + return h.PostDiffReturn, h.PostDiffError +} + +func (h *MockHook) PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PreProvisionInstanceCalled = true + h.PreProvisionInstanceAddr = addr + h.PreProvisionInstanceState = state + return h.PreProvisionInstanceReturn, h.PreProvisionInstanceError +} + +func (h *MockHook) PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PostProvisionInstanceCalled = true + h.PostProvisionInstanceAddr = addr + h.PostProvisionInstanceState = state + return h.PostProvisionInstanceReturn, h.PostProvisionInstanceError +} + +func (h *MockHook) PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PreProvisionInstanceStepCalled = true + h.PreProvisionInstanceStepAddr = addr + h.PreProvisionInstanceStepProvisionerType = typeName + return h.PreProvisionInstanceStepReturn, h.PreProvisionInstanceStepError +} + +func (h *MockHook) PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PostProvisionInstanceStepCalled = true + h.PostProvisionInstanceStepAddr = addr + h.PostProvisionInstanceStepProvisionerType = typeName + h.PostProvisionInstanceStepErrorArg = err + return h.PostProvisionInstanceStepReturn, h.PostProvisionInstanceStepError +} + +func (h *MockHook) ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) { + h.Lock() + defer h.Unlock() + + h.ProvisionOutputCalled = true + h.ProvisionOutputAddr = addr + h.ProvisionOutputProvisionerType = typeName + h.ProvisionOutputMessage = line +} + +func (h *MockHook) PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PreRefreshCalled = true + h.PreRefreshAddr = addr + h.PreRefreshGen = gen + h.PreRefreshPriorState = priorState + return h.PreRefreshReturn, h.PreRefreshError +} + +func (h *MockHook) PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PostRefreshCalled = true + h.PostRefreshAddr = addr + h.PostRefreshPriorState = priorState + h.PostRefreshNewState = newState + return h.PostRefreshReturn, h.PostRefreshError +} + +func (h *MockHook) PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PreImportStateCalled = true + h.PreImportStateAddr = addr + h.PreImportStateID = importID + return h.PreImportStateReturn, h.PreImportStateError +} + +func (h *MockHook) PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PostImportStateCalled = true + h.PostImportStateAddr = addr + h.PostImportStateNewStates = imported + return h.PostImportStateReturn, h.PostImportStateError +} + +func (h *MockHook) PrePlanImport(addr addrs.AbsResourceInstance, importID string) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PrePlanImportCalled = true + h.PrePlanImportAddr = addr + return h.PrePlanImportReturn, h.PrePlanImportError +} + +func (h *MockHook) PostPlanImport(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PostPlanImportCalled = true + h.PostPlanImportAddr = addr + return h.PostPlanImportReturn, h.PostPlanImportError +} + +func (h *MockHook) PreApplyImport(addr addrs.AbsResourceInstance, importing plans.ImportingSrc) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PreApplyImportCalled = true + h.PreApplyImportAddr = addr + return h.PreApplyImportReturn, h.PreApplyImportError +} + +func (h *MockHook) PostApplyImport(addr addrs.AbsResourceInstance, importing plans.ImportingSrc) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PostApplyImportCalled = true + h.PostApplyImportAddr = addr + return h.PostApplyImportReturn, h.PostApplyImportError +} + +func (h *MockHook) Stopping() { + h.Lock() + defer h.Unlock() + + h.StoppingCalled = true +} + +func (h *MockHook) PostStateUpdate(new *states.State) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PostStateUpdateCalled = true + h.PostStateUpdateState = new + return h.PostStateUpdateReturn, h.PostStateUpdateError +} diff --git a/pkg/tofu/hook_stop.go b/pkg/tofu/hook_stop.go new file mode 100644 index 00000000000..7e35cbd0646 --- /dev/null +++ b/pkg/tofu/hook_stop.go @@ -0,0 +1,120 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "errors" + "sync/atomic" + + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" +) + +// stopHook is a private Hook implementation that OpenTofu uses to +// signal when to stop or cancel actions. +type stopHook struct { + stop uint32 +} + +var _ Hook = (*stopHook)(nil) + +func (h *stopHook) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) { +} + +func (h *stopHook) PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PrePlanImport(addr addrs.AbsResourceInstance, importID string) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PostPlanImport(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PreApplyImport(addr addrs.AbsResourceInstance, importing plans.ImportingSrc) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PostApplyImport(addr addrs.AbsResourceInstance, importing plans.ImportingSrc) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) Stopping() {} + +func (h *stopHook) PostStateUpdate(new *states.State) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) hook() (HookAction, error) { + if h.Stopped() { + return HookActionHalt, errors.New("execution halted") + } + + return HookActionContinue, nil +} + +// reset should be called within the lock context +func (h *stopHook) Reset() { + atomic.StoreUint32(&h.stop, 0) +} + +func (h *stopHook) Stop() { + atomic.StoreUint32(&h.stop, 1) +} + +func (h *stopHook) Stopped() bool { + return atomic.LoadUint32(&h.stop) == 1 +} diff --git a/pkg/tofu/hook_stop_test.go b/pkg/tofu/hook_stop_test.go new file mode 100644 index 00000000000..4f667d2580c --- /dev/null +++ b/pkg/tofu/hook_stop_test.go @@ -0,0 +1,14 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "testing" +) + +func TestStopHook_impl(t *testing.T) { + var _ Hook = new(stopHook) +} diff --git a/pkg/tofu/hook_test.go b/pkg/tofu/hook_test.go new file mode 100644 index 00000000000..9f7dd4f5d84 --- /dev/null +++ b/pkg/tofu/hook_test.go @@ -0,0 +1,171 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "sync" + "testing" + + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" +) + +func TestNilHook_impl(t *testing.T) { + var _ Hook = new(NilHook) +} + +// testHook is a Hook implementation that logs the calls it receives. +// It is intended for testing that core code is emitting the correct hooks +// for a given situation. +type testHook struct { + mu sync.Mutex + Calls []*testHookCall +} + +var _ Hook = (*testHook)(nil) + +// testHookCall represents a single call in testHook. +// This hook just logs string names to make it easy to write "want" expressions +// in tests that can DeepEqual against the real calls. +type testHookCall struct { + Action string + InstanceID string +} + +func (h *testHook) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { + h.mu.Lock() + defer h.mu.Unlock() + h.Calls = append(h.Calls, &testHookCall{"PreApply", addr.String()}) + return HookActionContinue, nil +} + +func (h *testHook) PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) { + h.mu.Lock() + defer h.mu.Unlock() + h.Calls = append(h.Calls, &testHookCall{"PostApply", addr.String()}) + return HookActionContinue, nil +} + +func (h *testHook) PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) { + h.mu.Lock() + defer h.mu.Unlock() + h.Calls = append(h.Calls, &testHookCall{"PreDiff", addr.String()}) + return HookActionContinue, nil +} + +func (h *testHook) PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { + h.mu.Lock() + defer h.mu.Unlock() + h.Calls = append(h.Calls, &testHookCall{"PostDiff", addr.String()}) + return HookActionContinue, nil +} + +func (h *testHook) PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { + h.mu.Lock() + defer h.mu.Unlock() + h.Calls = append(h.Calls, &testHookCall{"PreProvisionInstance", addr.String()}) + return HookActionContinue, nil +} + +func (h *testHook) PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { + h.mu.Lock() + defer h.mu.Unlock() + h.Calls = append(h.Calls, &testHookCall{"PostProvisionInstance", addr.String()}) + return HookActionContinue, nil +} + +func (h *testHook) PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) { + h.mu.Lock() + defer h.mu.Unlock() + h.Calls = append(h.Calls, &testHookCall{"PreProvisionInstanceStep", addr.String()}) + return HookActionContinue, nil +} + +func (h *testHook) PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) { + h.mu.Lock() + defer h.mu.Unlock() + h.Calls = append(h.Calls, &testHookCall{"PostProvisionInstanceStep", addr.String()}) + return HookActionContinue, nil +} + +func (h *testHook) ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) { + h.mu.Lock() + defer h.mu.Unlock() + h.Calls = append(h.Calls, &testHookCall{"ProvisionOutput", addr.String()}) +} + +func (h *testHook) PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) { + h.mu.Lock() + defer h.mu.Unlock() + h.Calls = append(h.Calls, &testHookCall{"PreRefresh", addr.String()}) + return HookActionContinue, nil +} + +func (h *testHook) PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) { + h.mu.Lock() + defer h.mu.Unlock() + h.Calls = append(h.Calls, &testHookCall{"PostRefresh", addr.String()}) + return HookActionContinue, nil +} + +func (h *testHook) PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) { + h.mu.Lock() + defer h.mu.Unlock() + h.Calls = append(h.Calls, &testHookCall{"PreImportState", addr.String()}) + return HookActionContinue, nil +} + +func (h *testHook) PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) { + h.mu.Lock() + defer h.mu.Unlock() + h.Calls = append(h.Calls, &testHookCall{"PostImportState", addr.String()}) + return HookActionContinue, nil +} + +func (h *testHook) PrePlanImport(addr addrs.AbsResourceInstance, importID string) (HookAction, error) { + h.mu.Lock() + defer h.mu.Unlock() + h.Calls = append(h.Calls, &testHookCall{"PrePlanImport", addr.String()}) + return HookActionContinue, nil +} + +func (h *testHook) PostPlanImport(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) { + h.mu.Lock() + defer h.mu.Unlock() + h.Calls = append(h.Calls, &testHookCall{"PostPlanImport", addr.String()}) + return HookActionContinue, nil +} + +func (h *testHook) PreApplyImport(addr addrs.AbsResourceInstance, importing plans.ImportingSrc) (HookAction, error) { + h.mu.Lock() + defer h.mu.Unlock() + h.Calls = append(h.Calls, &testHookCall{"PreApplyImport", addr.String()}) + return HookActionContinue, nil +} + +func (h *testHook) PostApplyImport(addr addrs.AbsResourceInstance, importing plans.ImportingSrc) (HookAction, error) { + h.mu.Lock() + defer h.mu.Unlock() + h.Calls = append(h.Calls, &testHookCall{"PostApplyImport", addr.String()}) + return HookActionContinue, nil +} + +func (h *testHook) Stopping() { + h.mu.Lock() + defer h.mu.Unlock() + h.Calls = append(h.Calls, &testHookCall{"Stopping", ""}) +} + +func (h *testHook) PostStateUpdate(new *states.State) (HookAction, error) { + h.mu.Lock() + defer h.mu.Unlock() + h.Calls = append(h.Calls, &testHookCall{"PostStateUpdate", ""}) + return HookActionContinue, nil +} diff --git a/pkg/tofu/instance_expanders.go b/pkg/tofu/instance_expanders.go new file mode 100644 index 00000000000..e695aefc243 --- /dev/null +++ b/pkg/tofu/instance_expanders.go @@ -0,0 +1,12 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +// graphNodeExpandsInstances is implemented by nodes that causes instances to +// be registered in the instances.Expander. +type graphNodeExpandsInstances interface { + expandsInstances() +} diff --git a/pkg/tofu/marks.go b/pkg/tofu/marks.go new file mode 100644 index 00000000000..5b46d81dccb --- /dev/null +++ b/pkg/tofu/marks.go @@ -0,0 +1,97 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "sort" + + "github.com/zclconf/go-cty/cty" +) + +// marksEqual compares 2 unordered sets of PathValue marks for equality, with +// the comparison using the cty.PathValueMarks.Equal method. +func marksEqual(a, b []cty.PathValueMarks) bool { + if len(a) == 0 && len(b) == 0 { + return true + } + + if len(a) != len(b) { + return false + } + + less := func(s []cty.PathValueMarks) func(i, j int) bool { + return func(i, j int) bool { + // the sort only needs to be consistent, so use the GoString format + // to get a comparable value + return fmt.Sprintf("%#v", s[i]) < fmt.Sprintf("%#v", s[j]) + } + } + + sort.Slice(a, less(a)) + sort.Slice(b, less(b)) + + for i := 0; i < len(a); i++ { + if !a[i].Equal(b[i]) { + return false + } + } + + return true +} + +func copyPathValueMarks(marks cty.PathValueMarks) cty.PathValueMarks { + newMarks := make(cty.ValueMarks, len(marks.Marks)) + result := cty.PathValueMarks{Path: marks.Path} + for k, v := range marks.Marks { + newMarks[k] = v + } + result.Marks = newMarks + return result +} + +// combinePathValueMarks will combine the marks from two sets of marks with paths, ensuring that we don't duplicate marks +// for the same path, but instead combine the marks for the same path +// This ensures that we don't lose user marks when combining 2 different sets of marks for the same path +func combinePathValueMarks(marks []cty.PathValueMarks, other []cty.PathValueMarks) []cty.PathValueMarks { + // skip some work if we don't have any marks in either of the lists + if len(marks) == 0 { + return other + } + if len(other) == 0 { + return marks + } + + combined := make([]cty.PathValueMarks, 0, len(marks)) + // construct the initial set of marks + combined = append(combined, marks...) + + // check if we've already inserted this by looping over and calling .Equals(). + // This isn't so nice but there is no nice comparison for cty.PathValueMarks + // so we have to do it this way + for _, mark := range other { + exists := false + for i, existing := range combined { + if mark.Path.Equals(existing.Path) { + // if we found a matching path, we should combine the marks and update the existing item + dupe := copyPathValueMarks(existing) + for k, v := range mark.Marks { + dupe.Marks[k] = v + } + combined[i] = dupe + exists = true + break + } + } + // Otherwise we haven't seen this path before, so we should add it to the list + // no merging required + if !exists { + combined = append(combined, mark) + } + } + + return combined +} diff --git a/pkg/tofu/marks_test.go b/pkg/tofu/marks_test.go new file mode 100644 index 00000000000..5d88f6ce41e --- /dev/null +++ b/pkg/tofu/marks_test.go @@ -0,0 +1,206 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "testing" + + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/lang/marks" +) + +func TestMarksEqual(t *testing.T) { + for i, tc := range []struct { + a, b []cty.PathValueMarks + equal bool + }{ + { + []cty.PathValueMarks{ + cty.PathValueMarks{Path: cty.Path{cty.GetAttrStep{Name: "a"}}, Marks: cty.NewValueMarks(marks.Sensitive)}, + }, + []cty.PathValueMarks{ + cty.PathValueMarks{Path: cty.Path{cty.GetAttrStep{Name: "a"}}, Marks: cty.NewValueMarks(marks.Sensitive)}, + }, + true, + }, + { + []cty.PathValueMarks{ + cty.PathValueMarks{Path: cty.Path{cty.GetAttrStep{Name: "a"}}, Marks: cty.NewValueMarks(marks.Sensitive)}, + }, + []cty.PathValueMarks{ + cty.PathValueMarks{Path: cty.Path{cty.GetAttrStep{Name: "A"}}, Marks: cty.NewValueMarks(marks.Sensitive)}, + }, + false, + }, + { + []cty.PathValueMarks{ + cty.PathValueMarks{Path: cty.Path{cty.GetAttrStep{Name: "a"}}, Marks: cty.NewValueMarks(marks.Sensitive)}, + cty.PathValueMarks{Path: cty.Path{cty.GetAttrStep{Name: "b"}}, Marks: cty.NewValueMarks(marks.Sensitive)}, + cty.PathValueMarks{Path: cty.Path{cty.GetAttrStep{Name: "c"}}, Marks: cty.NewValueMarks(marks.Sensitive)}, + }, + []cty.PathValueMarks{ + cty.PathValueMarks{Path: cty.Path{cty.GetAttrStep{Name: "b"}}, Marks: cty.NewValueMarks(marks.Sensitive)}, + cty.PathValueMarks{Path: cty.Path{cty.GetAttrStep{Name: "c"}}, Marks: cty.NewValueMarks(marks.Sensitive)}, + cty.PathValueMarks{Path: cty.Path{cty.GetAttrStep{Name: "a"}}, Marks: cty.NewValueMarks(marks.Sensitive)}, + }, + true, + }, + { + []cty.PathValueMarks{ + cty.PathValueMarks{ + Path: cty.Path{cty.GetAttrStep{Name: "a"}, cty.GetAttrStep{Name: "b"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + cty.PathValueMarks{ + Path: cty.Path{cty.GetAttrStep{Name: "a"}, cty.GetAttrStep{Name: "c"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + []cty.PathValueMarks{ + cty.PathValueMarks{ + Path: cty.Path{cty.GetAttrStep{Name: "a"}, cty.GetAttrStep{Name: "c"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + cty.PathValueMarks{ + Path: cty.Path{cty.GetAttrStep{Name: "a"}, cty.GetAttrStep{Name: "b"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + true, + }, + { + []cty.PathValueMarks{ + cty.PathValueMarks{Path: cty.Path{cty.GetAttrStep{Name: "a"}}, Marks: cty.NewValueMarks(marks.Sensitive)}, + }, + []cty.PathValueMarks{ + cty.PathValueMarks{Path: cty.Path{cty.GetAttrStep{Name: "b"}}, Marks: cty.NewValueMarks(marks.Sensitive)}, + }, + false, + }, + { + nil, + nil, + true, + }, + { + []cty.PathValueMarks{ + cty.PathValueMarks{Path: cty.Path{cty.GetAttrStep{Name: "a"}}, Marks: cty.NewValueMarks(marks.Sensitive)}, + }, + nil, + false, + }, + { + nil, + []cty.PathValueMarks{ + cty.PathValueMarks{Path: cty.Path{cty.GetAttrStep{Name: "a"}}, Marks: cty.NewValueMarks(marks.Sensitive)}, + }, + false, + }, + } { + t.Run(fmt.Sprint(i), func(t *testing.T) { + if marksEqual(tc.a, tc.b) != tc.equal { + t.Fatalf("marksEqual(\n%#v,\n%#v,\n) != %t\n", tc.a, tc.b, tc.equal) + } + }) + } +} + +func TestCombinePathValueMarks(t *testing.T) { + paths := map[string]cty.PathValueMarks{ + "a.b": { + Path: cty.Path{cty.GetAttrStep{Name: "a"}, cty.GetAttrStep{Name: "b"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + "a.c": { + Path: cty.Path{cty.GetAttrStep{Name: "a"}, cty.GetAttrStep{Name: "c"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + "[0]": { + Path: cty.Path{cty.IndexStep{Key: cty.NumberIntVal(0)}}, + Marks: cty.NewValueMarks("a"), + }, + "a.b": { + Path: cty.Path{cty.GetAttrStep{Name: "a"}, cty.GetAttrStep{Name: "b"}}, + Marks: cty.NewValueMarks("a"), + }, + } + + tests := []struct { + name string + LHS []cty.PathValueMarks + RHS []cty.PathValueMarks + Want []cty.PathValueMarks + }{ + { + name: "no marks", + LHS: []cty.PathValueMarks{}, + RHS: []cty.PathValueMarks{}, + Want: []cty.PathValueMarks{}, + }, + { + name: "one mark", + LHS: []cty.PathValueMarks{paths["a.b"]}, + RHS: []cty.PathValueMarks{}, + Want: []cty.PathValueMarks{paths["a.b"]}, + }, + { + name: "one overlapping mark", + LHS: []cty.PathValueMarks{paths["a.b"]}, + RHS: []cty.PathValueMarks{paths["a.b"]}, + Want: []cty.PathValueMarks{paths["a.b"]}, + }, + { + name: "one non-overlapping mark", + LHS: []cty.PathValueMarks{paths["a.b"]}, + RHS: []cty.PathValueMarks{paths["a.c"]}, + Want: []cty.PathValueMarks{paths["a.b"], paths["a.c"]}, + }, + { + name: "one overlapping and two non-overlapping marks", + LHS: []cty.PathValueMarks{paths["a.b"], paths["a.c"], paths["[0]"]}, + RHS: []cty.PathValueMarks{paths["a.c"]}, + Want: []cty.PathValueMarks{paths["a.b"], paths["a.c"], paths["[0]"]}, + }, + { + name: "one overlapping mark with different values", + LHS: []cty.PathValueMarks{ + { + Path: cty.Path{cty.GetAttrStep{Name: "a"}, cty.GetAttrStep{Name: "b"}}, + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + RHS: []cty.PathValueMarks{ + { + Path: cty.Path{cty.GetAttrStep{Name: "a"}, cty.GetAttrStep{Name: "b"}}, + Marks: cty.NewValueMarks("OTHERMARK"), + }, + }, + Want: []cty.PathValueMarks{ + { + Path: cty.Path{cty.GetAttrStep{Name: "a"}, cty.GetAttrStep{Name: "b"}}, + Marks: cty.NewValueMarks(marks.Sensitive, "OTHERMARK"), + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := combinePathValueMarks(test.LHS, test.RHS) + if len(got) != len(test.Want) { + t.Fatalf("incorrect result length\ngot: %#v\nwant: %#v", got, test.Want) + } + + for i, want := range test.Want { + if !got[i].Equal(want) { + t.Errorf("incorrect result\nindex: %d\ngot: %#v\nwant: %#v", i, got[i], want) + } + } + }) + } +} diff --git a/pkg/tofu/node_check.go b/pkg/tofu/node_check.go new file mode 100644 index 00000000000..a0075bec39b --- /dev/null +++ b/pkg/tofu/node_check.go @@ -0,0 +1,206 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "log" + + "github.com/hashicorp/hcl/v2/hclsyntax" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/checks" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/dag" + "github.com/kubegems/opentofu/pkg/lang" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +var ( + _ GraphNodeModulePath = (*nodeReportCheck)(nil) + _ GraphNodeExecutable = (*nodeReportCheck)(nil) +) + +// nodeReportCheck calls the ReportCheckableObjects function for our assertions +// within the check blocks. +// +// We need this to happen before the checks are actually verified and before any +// nested data blocks, so the creator of this structure should make sure this +// node is a parent of any nested data blocks. +// +// This needs to be separate to nodeExpandCheck, because the actual checks +// should happen after referenced data blocks rather than before. +type nodeReportCheck struct { + addr addrs.ConfigCheck +} + +func (n *nodeReportCheck) ModulePath() addrs.Module { + return n.addr.Module +} + +func (n *nodeReportCheck) Execute(ctx EvalContext, _ walkOperation) tfdiags.Diagnostics { + exp := ctx.InstanceExpander() + modInsts := exp.ExpandModule(n.ModulePath()) + + instAddrs := addrs.MakeSet[addrs.Checkable]() + for _, modAddr := range modInsts { + instAddrs.Add(n.addr.Check.Absolute(modAddr)) + } + ctx.Checks().ReportCheckableObjects(n.addr, instAddrs) + return nil +} + +func (n *nodeReportCheck) Name() string { + return n.addr.String() + " (report)" +} + +var ( + _ GraphNodeModulePath = (*nodeExpandCheck)(nil) + _ GraphNodeDynamicExpandable = (*nodeExpandCheck)(nil) + _ GraphNodeReferencer = (*nodeExpandCheck)(nil) +) + +// nodeExpandCheck creates child nodes that actually execute the assertions for +// a given check block. +// +// This must happen after any other nodes/resources/data sources that are +// referenced, so we implement GraphNodeReferencer. +// +// This needs to be separate to nodeReportCheck as nodeReportCheck must happen +// first, while nodeExpandCheck must execute after any referenced blocks. +type nodeExpandCheck struct { + addr addrs.ConfigCheck + config *configs.Check + + makeInstance func(addrs.AbsCheck, *configs.Check) dag.Vertex +} + +func (n *nodeExpandCheck) ModulePath() addrs.Module { + return n.addr.Module +} + +func (n *nodeExpandCheck) DynamicExpand(ctx EvalContext) (*Graph, error) { + exp := ctx.InstanceExpander() + modInsts := exp.ExpandModule(n.ModulePath()) + + var g Graph + for _, modAddr := range modInsts { + testAddr := n.addr.Check.Absolute(modAddr) + log.Printf("[TRACE] nodeExpandCheck: Node for %s", testAddr) + g.Add(n.makeInstance(testAddr, n.config)) + } + addRootNodeToGraph(&g) + + return &g, nil +} + +func (n *nodeExpandCheck) References() []*addrs.Reference { + var refs []*addrs.Reference + for _, assert := range n.config.Asserts { + // Check blocks reference anything referenced by conditions or messages + // in their check rules. + condition, _ := lang.ReferencesInExpr(addrs.ParseRef, assert.Condition) + message, _ := lang.ReferencesInExpr(addrs.ParseRef, assert.ErrorMessage) + refs = append(refs, condition...) + refs = append(refs, message...) + } + if n.config.DataResource != nil { + // We'll also always reference our nested data block if it exists, as + // there is nothing enforcing that it has to also be referenced by our + // conditions or messages. + // + // We don't need to make this addr absolute, because the check block and + // the data resource are always within the same module/instance. + traversal, _ := hclsyntax.ParseTraversalAbs( + []byte(n.config.DataResource.Addr().String()), + n.config.DataResource.DeclRange.Filename, + n.config.DataResource.DeclRange.Start) + ref, _ := addrs.ParseRef(traversal) + refs = append(refs, ref) + } + return refs +} + +func (n *nodeExpandCheck) Name() string { + return n.addr.String() + " (expand)" +} + +var ( + _ GraphNodeModuleInstance = (*nodeCheckAssert)(nil) + _ GraphNodeExecutable = (*nodeCheckAssert)(nil) +) + +type nodeCheckAssert struct { + addr addrs.AbsCheck + config *configs.Check + + // We only want to actually execute the checks during the plan and apply + // operations, but we still want to validate our config during + // other operations. + executeChecks bool +} + +func (n *nodeCheckAssert) ModulePath() addrs.Module { + return n.Path().Module() +} + +func (n *nodeCheckAssert) Path() addrs.ModuleInstance { + return n.addr.Module +} + +func (n *nodeCheckAssert) Execute(ctx EvalContext, _ walkOperation) tfdiags.Diagnostics { + + // We only want to actually execute the checks during specific + // operations, such as plan and applies. + if n.executeChecks { + if status := ctx.Checks().ObjectCheckStatus(n.addr); status == checks.StatusFail || status == checks.StatusError { + // This check is already failing, so we won't try and evaluate it. + // This typically means there was an error in a data block within + // the check block. + return nil + } + + return evalCheckRules( + addrs.CheckAssertion, + n.config.Asserts, + ctx, + n.addr, + EvalDataForNoInstanceKey, + tfdiags.Warning) + + } + + // Otherwise let's still validate the config and references and return + // diagnostics if references do not exist etc. + var diags tfdiags.Diagnostics + for ix, assert := range n.config.Asserts { + _, _, moreDiags := validateCheckRule(addrs.NewCheckRule(n.addr, addrs.CheckAssertion, ix), assert, ctx, EvalDataForNoInstanceKey) + diags = diags.Append(moreDiags) + } + return diags +} + +func (n *nodeCheckAssert) Name() string { + return n.addr.String() + " (assertions)" +} + +var ( + _ GraphNodeExecutable = (*nodeCheckStart)(nil) +) + +// We need to ensure that any nested data sources execute after all other +// resource changes have been applied. This node acts as a single point of +// dependency that can enforce this ordering. +type nodeCheckStart struct{} + +func (n *nodeCheckStart) Execute(context EvalContext, operation walkOperation) tfdiags.Diagnostics { + // This node doesn't actually do anything, except simplify the underlying + // graph structure. + return nil +} + +func (n *nodeCheckStart) Name() string { + return "(execute checks)" +} diff --git a/pkg/tofu/node_data_destroy.go b/pkg/tofu/node_data_destroy.go new file mode 100644 index 00000000000..70b756fafab --- /dev/null +++ b/pkg/tofu/node_data_destroy.go @@ -0,0 +1,29 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "log" + + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// NodeDestroyableDataResourceInstance represents a resource that is "destroyable": +// it is ready to be destroyed. +type NodeDestroyableDataResourceInstance struct { + *NodeAbstractResourceInstance +} + +var ( + _ GraphNodeExecutable = (*NodeDestroyableDataResourceInstance)(nil) +) + +// GraphNodeExecutable +func (n *NodeDestroyableDataResourceInstance) Execute(ctx EvalContext, op walkOperation) tfdiags.Diagnostics { + log.Printf("[TRACE] NodeDestroyableDataResourceInstance: removing state object for %s", n.Addr) + ctx.State().SetResourceInstanceCurrent(n.Addr, nil, n.ResolvedProvider) + return nil +} diff --git a/pkg/tofu/node_data_destroy_test.go b/pkg/tofu/node_data_destroy_test.go new file mode 100644 index 00000000000..7ea48ae4c3d --- /dev/null +++ b/pkg/tofu/node_data_destroy_test.go @@ -0,0 +1,53 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "testing" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/states" +) + +func TestNodeDataDestroyExecute(t *testing.T) { + state := states.NewState() + state.Module(addrs.RootModuleInstance).SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.DataResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"dynamic":{"type":"string","value":"hello"}}`), + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + ctx := &MockEvalContext{ + StateState: state.SyncWrapper(), + } + + node := NodeDestroyableDataResourceInstance{&NodeAbstractResourceInstance{ + Addr: addrs.Resource{ + Mode: addrs.DataResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + }} + + diags := node.Execute(ctx, walkApply) + if diags.HasErrors() { + t.Fatalf("unexpected error: %v", diags.Err()) + } + + // verify resource removed from state + if state.HasManagedResourceInstanceObjects() { + t.Fatal("resources still in state after NodeDataDestroy.Execute") + } +} diff --git a/pkg/tofu/node_external_reference.go b/pkg/tofu/node_external_reference.go new file mode 100644 index 00000000000..24fc98c9549 --- /dev/null +++ b/pkg/tofu/node_external_reference.go @@ -0,0 +1,36 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import "github.com/kubegems/opentofu/pkg/addrs" + +// nodeExternalReference allows external callers (such as the testing framework) +// to provide the list of references they are making into the graph. This +// ensures that OpenTofu will not remove any nodes from the graph that might +// not be referenced from within a module but are referenced by the currently +// executing test file. +// +// This should only be added to the graph if we are executing the +// `tofu test` command. +type nodeExternalReference struct { + ExternalReferences []*addrs.Reference +} + +var ( + _ GraphNodeReferencer = (*nodeExternalReference)(nil) +) + +// GraphNodeModulePath +func (n *nodeExternalReference) ModulePath() addrs.Module { + // The external references are always made from test files, which currently + // execute as if they are in the root module. + return addrs.RootModule +} + +// GraphNodeReferencer +func (n *nodeExternalReference) References() []*addrs.Reference { + return n.ExternalReferences +} diff --git a/pkg/tofu/node_local.go b/pkg/tofu/node_local.go new file mode 100644 index 00000000000..fb40ed42945 --- /dev/null +++ b/pkg/tofu/node_local.go @@ -0,0 +1,186 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "log" + + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/dag" + "github.com/kubegems/opentofu/pkg/lang" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// nodeExpandLocal represents a named local value in a configuration module, +// which has not yet been expanded. +type nodeExpandLocal struct { + Addr addrs.LocalValue + Module addrs.Module + Config *configs.Local +} + +var ( + _ GraphNodeReferenceable = (*nodeExpandLocal)(nil) + _ GraphNodeReferencer = (*nodeExpandLocal)(nil) + _ GraphNodeDynamicExpandable = (*nodeExpandLocal)(nil) + _ graphNodeTemporaryValue = (*nodeExpandLocal)(nil) + _ graphNodeExpandsInstances = (*nodeExpandLocal)(nil) +) + +func (n *nodeExpandLocal) expandsInstances() {} + +// graphNodeTemporaryValue +func (n *nodeExpandLocal) temporaryValue() bool { + return true +} + +func (n *nodeExpandLocal) Name() string { + path := n.Module.String() + addr := n.Addr.String() + " (expand)" + + if path != "" { + return path + "." + addr + } + return addr +} + +// GraphNodeModulePath +func (n *nodeExpandLocal) ModulePath() addrs.Module { + return n.Module +} + +// GraphNodeReferenceable +func (n *nodeExpandLocal) ReferenceableAddrs() []addrs.Referenceable { + return []addrs.Referenceable{n.Addr} +} + +// GraphNodeReferencer +func (n *nodeExpandLocal) References() []*addrs.Reference { + refs, _ := lang.ReferencesInExpr(addrs.ParseRef, n.Config.Expr) + return refs +} + +func (n *nodeExpandLocal) DynamicExpand(ctx EvalContext) (*Graph, error) { + var g Graph + expander := ctx.InstanceExpander() + for _, module := range expander.ExpandModule(n.Module) { + o := &NodeLocal{ + Addr: n.Addr.Absolute(module), + Config: n.Config, + } + log.Printf("[TRACE] Expanding local: adding %s as %T", o.Addr.String(), o) + g.Add(o) + } + addRootNodeToGraph(&g) + return &g, nil +} + +// NodeLocal represents a named local value in a particular module. +// +// Local value nodes only have one operation, common to all walk types: +// evaluate the result and place it in state. +type NodeLocal struct { + Addr addrs.AbsLocalValue + Config *configs.Local +} + +var ( + _ GraphNodeModuleInstance = (*NodeLocal)(nil) + _ GraphNodeReferenceable = (*NodeLocal)(nil) + _ GraphNodeReferencer = (*NodeLocal)(nil) + _ GraphNodeExecutable = (*NodeLocal)(nil) + _ graphNodeTemporaryValue = (*NodeLocal)(nil) + _ dag.GraphNodeDotter = (*NodeLocal)(nil) +) + +// graphNodeTemporaryValue +func (n *NodeLocal) temporaryValue() bool { + return true +} + +func (n *NodeLocal) Name() string { + return n.Addr.String() +} + +// GraphNodeModuleInstance +func (n *NodeLocal) Path() addrs.ModuleInstance { + return n.Addr.Module +} + +// GraphNodeModulePath +func (n *NodeLocal) ModulePath() addrs.Module { + return n.Addr.Module.Module() +} + +// GraphNodeReferenceable +func (n *NodeLocal) ReferenceableAddrs() []addrs.Referenceable { + return []addrs.Referenceable{n.Addr.LocalValue} +} + +// GraphNodeReferencer +func (n *NodeLocal) References() []*addrs.Reference { + refs, _ := lang.ReferencesInExpr(addrs.ParseRef, n.Config.Expr) + return refs +} + +// GraphNodeExecutable +// NodeLocal.Execute is an Execute implementation that evaluates the +// expression for a local value and writes it into a transient part of +// the state. +func (n *NodeLocal) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { + expr := n.Config.Expr + addr := n.Addr.LocalValue + + // We ignore diags here because any problems we might find will be found + // again in EvaluateExpr below. + refs, _ := lang.ReferencesInExpr(addrs.ParseRef, expr) + for _, ref := range refs { + if ref.Subject == addr { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Self-referencing local value", + Detail: fmt.Sprintf("Local value %s cannot use its own result as part of its expression.", addr), + Subject: ref.SourceRange.ToHCL().Ptr(), + Context: expr.Range().Ptr(), + }) + } + } + if diags.HasErrors() { + return diags + } + + val, moreDiags := ctx.EvaluateExpr(expr, cty.DynamicPseudoType, nil) + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + return diags + } + + state := ctx.State() + if state == nil { + diags = diags.Append(fmt.Errorf("cannot write local value to nil state")) + return diags + } + + state.SetLocalValue(addr.Absolute(ctx.Path()), val) + + return diags +} + +// dag.GraphNodeDotter impl. +func (n *NodeLocal) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { + return &dag.DotNode{ + Name: name, + Attrs: map[string]string{ + "label": n.Name(), + "shape": "note", + }, + } +} diff --git a/pkg/tofu/node_local_test.go b/pkg/tofu/node_local_test.go new file mode 100644 index 00000000000..4f085fc95d4 --- /dev/null +++ b/pkg/tofu/node_local_test.go @@ -0,0 +1,90 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "reflect" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/hcl2shim" + "github.com/kubegems/opentofu/pkg/states" +) + +func TestNodeLocalExecute(t *testing.T) { + tests := []struct { + Value string + Want interface{} + Err bool + }{ + { + "hello!", + "hello!", + false, + }, + { + "", + "", + false, + }, + { + "Hello, ${local.foo}", + nil, + true, // self-referencing + }, + } + + for _, test := range tests { + t.Run(test.Value, func(t *testing.T) { + expr, diags := hclsyntax.ParseTemplate([]byte(test.Value), "", hcl.Pos{Line: 1, Column: 1}) + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + n := &NodeLocal{ + Addr: addrs.LocalValue{Name: "foo"}.Absolute(addrs.RootModuleInstance), + Config: &configs.Local{ + Expr: expr, + }, + } + ctx := &MockEvalContext{ + StateState: states.NewState().SyncWrapper(), + + EvaluateExprResult: hcl2shim.HCL2ValueFromConfigValue(test.Want), + } + + err := n.Execute(ctx, walkApply) + if (err != nil) != test.Err { + if err != nil { + t.Errorf("unexpected error: %s", err) + } else { + t.Errorf("successful Eval; want error") + } + } + + ms := ctx.StateState.Module(addrs.RootModuleInstance) + gotLocals := ms.LocalValues + wantLocals := map[string]cty.Value{} + if test.Want != nil { + wantLocals["foo"] = hcl2shim.HCL2ValueFromConfigValue(test.Want) + } + + if !reflect.DeepEqual(gotLocals, wantLocals) { + t.Errorf( + "wrong locals after Eval\ngot: %swant: %s", + spew.Sdump(gotLocals), spew.Sdump(wantLocals), + ) + } + }) + } + +} diff --git a/pkg/tofu/node_module_expand.go b/pkg/tofu/node_module_expand.go new file mode 100644 index 00000000000..ee21ed986b5 --- /dev/null +++ b/pkg/tofu/node_module_expand.go @@ -0,0 +1,274 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "log" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/dag" + "github.com/kubegems/opentofu/pkg/lang" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +type ConcreteModuleNodeFunc func(n *nodeExpandModule) dag.Vertex + +// nodeExpandModule represents a module call in the configuration that +// might expand into multiple module instances depending on how it is +// configured. +type nodeExpandModule struct { + Addr addrs.Module + Config *configs.Module + ModuleCall *configs.ModuleCall +} + +var ( + _ GraphNodeExecutable = (*nodeExpandModule)(nil) + _ GraphNodeReferencer = (*nodeExpandModule)(nil) + _ GraphNodeReferenceOutside = (*nodeExpandModule)(nil) + _ graphNodeExpandsInstances = (*nodeExpandModule)(nil) +) + +func (n *nodeExpandModule) expandsInstances() {} + +func (n *nodeExpandModule) Name() string { + return n.Addr.String() + " (expand)" +} + +// GraphNodeModulePath implementation +func (n *nodeExpandModule) ModulePath() addrs.Module { + return n.Addr +} + +// GraphNodeReferencer implementation +func (n *nodeExpandModule) References() []*addrs.Reference { + var refs []*addrs.Reference + + if n.ModuleCall == nil { + return nil + } + + refs = append(refs, n.DependsOn()...) + + // Expansion only uses the count and for_each expressions, so this + // particular graph node only refers to those. + // Individual variable values in the module call definition might also + // refer to other objects, but that's handled by + // NodeApplyableModuleVariable. + // + // Because our Path method returns the module instance that contains + // our call, these references will be correctly interpreted as being + // in the calling module's namespace, not the namespaces of any of the + // child module instances we might expand to during our evaluation. + + if n.ModuleCall.Count != nil { + countRefs, _ := lang.ReferencesInExpr(addrs.ParseRef, n.ModuleCall.Count) + refs = append(refs, countRefs...) + } + if n.ModuleCall.ForEach != nil { + forEachRefs, _ := lang.ReferencesInExpr(addrs.ParseRef, n.ModuleCall.ForEach) + refs = append(refs, forEachRefs...) + } + return refs +} + +func (n *nodeExpandModule) DependsOn() []*addrs.Reference { + if n.ModuleCall == nil { + return nil + } + + var refs []*addrs.Reference + for _, traversal := range n.ModuleCall.DependsOn { + ref, diags := addrs.ParseRef(traversal) + if diags.HasErrors() { + // We ignore this here, because this isn't a suitable place to return + // errors. This situation should be caught and rejected during + // validation. + log.Printf("[ERROR] Can't parse %#v from depends_on as reference: %s", traversal, diags.Err()) + continue + } + + refs = append(refs, ref) + } + + return refs +} + +// GraphNodeReferenceOutside +func (n *nodeExpandModule) ReferenceOutside() (selfPath, referencePath addrs.Module) { + return n.Addr, n.Addr.Parent() +} + +// GraphNodeExecutable +func (n *nodeExpandModule) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { + expander := ctx.InstanceExpander() + _, call := n.Addr.Call() + + // nodeExpandModule itself does not have visibility into how its ancestors + // were expanded, so we use the expander here to provide all possible paths + // to our module, and register module instances with each of them. + for _, module := range expander.ExpandModule(n.Addr.Parent()) { + ctx = ctx.WithPath(module) + switch { + case n.ModuleCall.Count != nil: + count, ctDiags := evaluateCountExpression(n.ModuleCall.Count, ctx) + diags = diags.Append(ctDiags) + if diags.HasErrors() { + return diags + } + expander.SetModuleCount(module, call, count) + + case n.ModuleCall.ForEach != nil: + forEach, feDiags := evaluateForEachExpression(n.ModuleCall.ForEach, ctx) + diags = diags.Append(feDiags) + if diags.HasErrors() { + return diags + } + expander.SetModuleForEach(module, call, forEach) + + default: + expander.SetModuleSingle(module, call) + } + } + + return diags + +} + +// nodeCloseModule represents an expanded module during apply, and is visited +// after all other module instance nodes. This node will depend on all module +// instance resource and outputs, and anything depending on the module should +// wait on this node. +// Besides providing a root node for dependency ordering, nodeCloseModule also +// cleans up state after all the module nodes have been evaluated, removing +// empty resources and modules from the state. +// The root module instance also closes any remaining provisioner plugins which +// do not have a lifecycle controlled by individual graph nodes. +type nodeCloseModule struct { + Addr addrs.Module + RootConfig *configs.Config +} + +var ( + _ GraphNodeReferenceable = (*nodeCloseModule)(nil) + _ GraphNodeReferenceOutside = (*nodeCloseModule)(nil) + _ GraphNodeExecutable = (*nodeCloseModule)(nil) +) + +func (n *nodeCloseModule) ModulePath() addrs.Module { + return n.Addr +} + +func (n *nodeCloseModule) ReferenceOutside() (selfPath, referencePath addrs.Module) { + return n.Addr.Parent(), n.Addr +} + +func (n *nodeCloseModule) ReferenceableAddrs() []addrs.Referenceable { + _, call := n.Addr.Call() + return []addrs.Referenceable{ + call, + } +} + +func (n *nodeCloseModule) Name() string { + if len(n.Addr) == 0 { + return "root" + } + return n.Addr.String() + " (close)" +} + +func (n *nodeCloseModule) IsOverridden(addr addrs.Module) bool { + if n.RootConfig == nil { + return false + } + + modConfig := n.RootConfig.Descendent(addr) + if modConfig == nil { + return false + } + + return modConfig.Module.IsOverridden +} + +func (n *nodeCloseModule) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { + if !n.Addr.IsRoot() { + return + } + + // If this is the root module, we are cleaning up the walk, so close + // any running provisioners + diags = diags.Append(ctx.CloseProvisioners()) + + switch op { + case walkApply, walkDestroy: + state := ctx.State().Lock() + defer ctx.State().Unlock() + + for modKey, mod := range state.Modules { + // clean out any empty resources + for resKey, res := range mod.Resources { + if len(res.Instances) == 0 { + delete(mod.Resources, resKey) + } + } + + // empty non-root modules are removed normally, + // but if the module is being overridden, it should be kept + if len(mod.Resources) == 0 && !mod.Addr.IsRoot() && !n.IsOverridden(mod.Addr.Module()) { + delete(state.Modules, modKey) + } + } + return nil + default: + return nil + } +} + +// nodeValidateModule wraps a nodeExpand module for validation, ensuring that +// no expansion is attempted during evaluation, when count and for_each +// expressions may not be known. +type nodeValidateModule struct { + nodeExpandModule +} + +var _ GraphNodeExecutable = (*nodeValidateModule)(nil) + +// GraphNodeEvalable +func (n *nodeValidateModule) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { + _, call := n.Addr.Call() + expander := ctx.InstanceExpander() + + // Modules all evaluate to single instances during validation, only to + // create a proper context within which to evaluate. All parent modules + // will be a single instance, but still get our address in the expected + // manner anyway to ensure they've been registered correctly. + for _, module := range expander.ExpandModule(n.Addr.Parent()) { + ctx = ctx.WithPath(module) + + // Validate our for_each and count expressions at a basic level + // We skip validation on known, because there will be unknown values before + // a full expansion, presuming these errors will be caught in later steps + switch { + case n.ModuleCall.Count != nil: + _, countDiags := evaluateCountExpressionValue(n.ModuleCall.Count, ctx) + diags = diags.Append(countDiags) + + case n.ModuleCall.ForEach != nil: + const unknownsAllowed = true + const tupleNotAllowed = false + _, forEachDiags := evaluateForEachExpressionValue(n.ModuleCall.ForEach, ctx, unknownsAllowed, tupleNotAllowed) + diags = diags.Append(forEachDiags) + } + + diags = diags.Append(validateDependsOn(ctx, n.ModuleCall.DependsOn)) + + // now set our own mode to single + expander.SetModuleSingle(module, call) + } + + return diags +} diff --git a/pkg/tofu/node_module_expand_test.go b/pkg/tofu/node_module_expand_test.go new file mode 100644 index 00000000000..044ad2b481d --- /dev/null +++ b/pkg/tofu/node_module_expand_test.go @@ -0,0 +1,133 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "testing" + + "github.com/hashicorp/hcl/v2/hcltest" + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/instances" + "github.com/kubegems/opentofu/pkg/states" + "github.com/zclconf/go-cty/cty" +) + +func TestNodeExpandModuleExecute(t *testing.T) { + ctx := &MockEvalContext{ + InstanceExpanderExpander: instances.NewExpander(), + } + ctx.installSimpleEval() + + node := nodeExpandModule{ + Addr: addrs.Module{"child"}, + ModuleCall: &configs.ModuleCall{ + Count: hcltest.MockExprLiteral(cty.NumberIntVal(2)), + }, + } + + err := node.Execute(ctx, walkApply) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !ctx.InstanceExpanderCalled { + t.Fatal("did not expand") + } +} + +func TestNodeCloseModuleExecute(t *testing.T) { + t.Run("walkApply", func(t *testing.T) { + state := states.NewState() + state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) + ctx := &MockEvalContext{ + StateState: state.SyncWrapper(), + } + node := nodeCloseModule{Addr: addrs.Module{"child"}} + diags := node.Execute(ctx, walkApply) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err()) + } + + // Since module.child has no resources, it should be removed + if _, ok := state.Modules["module.child"]; !ok { + t.Fatal("module.child should not be removed from state yet") + } + + // the root module should do all the module cleanup + node = nodeCloseModule{Addr: addrs.RootModule} + diags = node.Execute(ctx, walkApply) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err()) + } + + // Since module.child has no resources, it should be removed + if _, ok := state.Modules["module.child"]; ok { + t.Fatal("module.child was not removed from state") + } + }) + + // walkImport is a no-op + t.Run("walkImport", func(t *testing.T) { + state := states.NewState() + state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) + ctx := &MockEvalContext{ + StateState: state.SyncWrapper(), + } + node := nodeCloseModule{Addr: addrs.Module{"child"}} + + diags := node.Execute(ctx, walkImport) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err()) + } + if _, ok := state.Modules["module.child"]; !ok { + t.Fatal("module.child was removed from state, expected no-op") + } + }) +} + +func TestNodeValidateModuleExecute(t *testing.T) { + t.Run("success", func(t *testing.T) { + ctx := &MockEvalContext{ + InstanceExpanderExpander: instances.NewExpander(), + } + ctx.installSimpleEval() + node := nodeValidateModule{ + nodeExpandModule{ + Addr: addrs.Module{"child"}, + ModuleCall: &configs.ModuleCall{ + Count: hcltest.MockExprLiteral(cty.NumberIntVal(2)), + }, + }, + } + + diags := node.Execute(ctx, walkApply) + if diags.HasErrors() { + t.Fatalf("unexpected error: %v", diags.Err()) + } + }) + + t.Run("invalid count", func(t *testing.T) { + ctx := &MockEvalContext{ + InstanceExpanderExpander: instances.NewExpander(), + } + ctx.installSimpleEval() + node := nodeValidateModule{ + nodeExpandModule{ + Addr: addrs.Module{"child"}, + ModuleCall: &configs.ModuleCall{ + Count: hcltest.MockExprLiteral(cty.StringVal("invalid")), + }, + }, + } + + err := node.Execute(ctx, walkApply) + if err == nil { + t.Fatal("expected error, got success") + } + }) + +} diff --git a/pkg/tofu/node_module_variable.go b/pkg/tofu/node_module_variable.go new file mode 100644 index 00000000000..82de1c336c7 --- /dev/null +++ b/pkg/tofu/node_module_variable.go @@ -0,0 +1,287 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "log" + + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/dag" + "github.com/kubegems/opentofu/pkg/instances" + "github.com/kubegems/opentofu/pkg/lang" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// nodeExpandModuleVariable is the placeholder for an variable that has not yet had +// its module path expanded. +type nodeExpandModuleVariable struct { + Addr addrs.InputVariable + Module addrs.Module + Config *configs.Variable + Expr hcl.Expression +} + +var ( + _ GraphNodeDynamicExpandable = (*nodeExpandModuleVariable)(nil) + _ GraphNodeReferenceOutside = (*nodeExpandModuleVariable)(nil) + _ GraphNodeReferenceable = (*nodeExpandModuleVariable)(nil) + _ GraphNodeReferencer = (*nodeExpandModuleVariable)(nil) + _ graphNodeTemporaryValue = (*nodeExpandModuleVariable)(nil) + _ graphNodeExpandsInstances = (*nodeExpandModuleVariable)(nil) +) + +func (n *nodeExpandModuleVariable) expandsInstances() {} + +func (n *nodeExpandModuleVariable) temporaryValue() bool { + return true +} + +func (n *nodeExpandModuleVariable) DynamicExpand(ctx EvalContext) (*Graph, error) { + var g Graph + + // If this variable has preconditions, we need to report these checks now. + // + // We should only do this during planning as the apply phase starts with + // all the same checkable objects that were registered during the plan. + var checkableAddrs addrs.Set[addrs.Checkable] + if checkState := ctx.Checks(); checkState.ConfigHasChecks(n.Addr.InModule(n.Module)) { + checkableAddrs = addrs.MakeSet[addrs.Checkable]() + } + + expander := ctx.InstanceExpander() + for _, module := range expander.ExpandModule(n.Module) { + addr := n.Addr.Absolute(module) + if checkableAddrs != nil { + checkableAddrs.Add(addr) + } + + o := &nodeModuleVariable{ + Addr: addr, + Config: n.Config, + Expr: n.Expr, + ModuleInstance: module, + } + g.Add(o) + } + addRootNodeToGraph(&g) + + if checkableAddrs != nil { + ctx.Checks().ReportCheckableObjects(n.Addr.InModule(n.Module), checkableAddrs) + } + + return &g, nil +} + +func (n *nodeExpandModuleVariable) Name() string { + return fmt.Sprintf("%s.%s (expand)", n.Module, n.Addr.String()) +} + +// GraphNodeModulePath +func (n *nodeExpandModuleVariable) ModulePath() addrs.Module { + return n.Module +} + +// GraphNodeReferencer +func (n *nodeExpandModuleVariable) References() []*addrs.Reference { + + // If we have no value expression, we cannot depend on anything. + if n.Expr == nil { + return nil + } + + // Variables in the root don't depend on anything, because their values + // are gathered prior to the graph walk and recorded in the context. + if len(n.Module) == 0 { + return nil + } + + // Otherwise, we depend on anything referenced by our value expression. + // We ignore diagnostics here under the assumption that we'll re-eval + // all these things later and catch them then; for our purposes here, + // we only care about valid references. + // + // Due to our GraphNodeReferenceOutside implementation, the addresses + // returned by this function are interpreted in the _parent_ module from + // where our associated variable was declared, which is correct because + // our value expression is assigned within a "module" block in the parent + // module. + refs, _ := lang.ReferencesInExpr(addrs.ParseRef, n.Expr) + return refs +} + +// GraphNodeReferenceOutside implementation +func (n *nodeExpandModuleVariable) ReferenceOutside() (selfPath, referencePath addrs.Module) { + return n.Module, n.Module.Parent() +} + +// GraphNodeReferenceable +func (n *nodeExpandModuleVariable) ReferenceableAddrs() []addrs.Referenceable { + return []addrs.Referenceable{n.Addr} +} + +// nodeModuleVariable represents a module variable input during +// the apply step. +type nodeModuleVariable struct { + Addr addrs.AbsInputVariableInstance + Config *configs.Variable // Config is the var in the config + Expr hcl.Expression // Expr is the value expression given in the call + // ModuleInstance in order to create the appropriate context for evaluating + // ModuleCallArguments, ex. so count.index and each.key can resolve + ModuleInstance addrs.ModuleInstance +} + +// Ensure that we are implementing all of the interfaces we think we are +// implementing. +var ( + _ GraphNodeModuleInstance = (*nodeModuleVariable)(nil) + _ GraphNodeExecutable = (*nodeModuleVariable)(nil) + _ graphNodeTemporaryValue = (*nodeModuleVariable)(nil) + _ dag.GraphNodeDotter = (*nodeModuleVariable)(nil) +) + +func (n *nodeModuleVariable) temporaryValue() bool { + return true +} + +func (n *nodeModuleVariable) Name() string { + return n.Addr.String() +} + +// GraphNodeModuleInstance +func (n *nodeModuleVariable) Path() addrs.ModuleInstance { + // We execute in the parent scope (above our own module) because + // expressions in our value are resolved in that context. + return n.Addr.Module.Parent() +} + +// GraphNodeModulePath +func (n *nodeModuleVariable) ModulePath() addrs.Module { + return n.Addr.Module.Module() +} + +// GraphNodeReferencer +func (n *nodeModuleVariable) References() []*addrs.Reference { + // This is identical to NodeRootVariable.References + var refs []*addrs.Reference + + if n.Config != nil { + for _, validation := range n.Config.Validations { + condFuncs, _ := lang.ProviderFunctionsInExpr(addrs.ParseRef, validation.Condition) + refs = append(refs, condFuncs...) + errFuncs, _ := lang.ProviderFunctionsInExpr(addrs.ParseRef, validation.ErrorMessage) + refs = append(refs, errFuncs...) + } + } + + return refs +} + +// GraphNodeExecutable +func (n *nodeModuleVariable) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { + log.Printf("[TRACE] nodeModuleVariable: evaluating %s", n.Addr) + + var val cty.Value + var err error + + switch op { + case walkValidate: + val, err = n.evalModuleVariable(ctx, true) + diags = diags.Append(err) + default: + val, err = n.evalModuleVariable(ctx, false) + diags = diags.Append(err) + } + if diags.HasErrors() { + return diags + } + + // Set values for arguments of a child module call, for later retrieval + // during expression evaluation. + _, call := n.Addr.Module.CallInstance() + ctx.SetModuleCallArgument(call, n.Addr.Variable, val) + + return evalVariableValidations(n.Addr, n.Config, n.Expr, ctx) +} + +// dag.GraphNodeDotter impl. +func (n *nodeModuleVariable) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { + return &dag.DotNode{ + Name: name, + Attrs: map[string]string{ + "label": n.Name(), + "shape": "note", + }, + } +} + +// evalModuleVariable produces the value for a particular variable as will +// be used by a child module instance. +// +// The result is written into a map, with its key set to the local name of the +// variable, disregarding the module instance address. A map is returned instead +// of a single value as a result of trying to be convenient for use with +// EvalContext.SetModuleCallArguments, which expects a map to merge in with any +// existing arguments. +// +// validateOnly indicates that this evaluation is only for config +// validation, and we will not have any expansion module instance +// repetition data. +func (n *nodeModuleVariable) evalModuleVariable(ctx EvalContext, validateOnly bool) (cty.Value, error) { + var diags tfdiags.Diagnostics + var givenVal cty.Value + var errSourceRange tfdiags.SourceRange + if expr := n.Expr; expr != nil { + var moduleInstanceRepetitionData instances.RepetitionData + + switch { + case validateOnly: + // the instance expander does not track unknown expansion values, so we + // have to assume all RepetitionData is unknown. + moduleInstanceRepetitionData = instances.RepetitionData{ + CountIndex: cty.UnknownVal(cty.Number), + EachKey: cty.UnknownVal(cty.String), + EachValue: cty.DynamicVal, + } + + default: + // Get the repetition data for this module instance, + // so we can create the appropriate scope for evaluating our expression + moduleInstanceRepetitionData = ctx.InstanceExpander().GetModuleInstanceRepetitionData(n.ModuleInstance) + } + + scope := ctx.EvaluationScope(nil, nil, moduleInstanceRepetitionData) + val, moreDiags := scope.EvalExpr(expr, cty.DynamicPseudoType) + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + return cty.DynamicVal, diags.ErrWithWarnings() + } + givenVal = val + errSourceRange = tfdiags.SourceRangeFromHCL(expr.Range()) + } else { + // We'll use cty.NilVal to represent the variable not being set at all. + givenVal = cty.NilVal + errSourceRange = tfdiags.SourceRangeFromHCL(n.Config.DeclRange) // we use the declaration range as a fallback for an undefined variable + } + + // We construct a synthetic InputValue here to pretend as if this were + // a root module variable set from outside, just as a convenience so we + // can reuse the InputValue type for this. + rawVal := &InputValue{ + Value: givenVal, + SourceType: ValueFromConfig, + SourceRange: errSourceRange, + } + + finalVal, moreDiags := prepareFinalInputVariableValue(n.Addr, rawVal, n.Config) + diags = diags.Append(moreDiags) + + return finalVal, diags.ErrWithWarnings() +} diff --git a/pkg/tofu/node_module_variable_test.go b/pkg/tofu/node_module_variable_test.go new file mode 100644 index 00000000000..6668fc0b8b1 --- /dev/null +++ b/pkg/tofu/node_module_variable_test.go @@ -0,0 +1,308 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "errors" + "reflect" + "testing" + + "github.com/go-test/deep" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/checks" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +func TestNodeModuleVariablePath(t *testing.T) { + n := &nodeModuleVariable{ + Addr: addrs.RootModuleInstance.InputVariable("foo"), + Config: &configs.Variable{ + Name: "foo", + Type: cty.String, + ConstraintType: cty.String, + }, + } + + want := addrs.RootModuleInstance + got := n.Path() + if got.String() != want.String() { + t.Fatalf("wrong module address %s; want %s", got, want) + } +} + +func TestNodeModuleVariableReferenceableName(t *testing.T) { + n := &nodeExpandModuleVariable{ + Addr: addrs.InputVariable{Name: "foo"}, + Config: &configs.Variable{ + Name: "foo", + Type: cty.String, + ConstraintType: cty.String, + }, + } + + { + expected := []addrs.Referenceable{ + addrs.InputVariable{Name: "foo"}, + } + actual := n.ReferenceableAddrs() + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("%#v != %#v", actual, expected) + } + } + + { + gotSelfPath, gotReferencePath := n.ReferenceOutside() + wantSelfPath := addrs.RootModuleInstance + wantReferencePath := addrs.RootModuleInstance + if got, want := gotSelfPath.String(), wantSelfPath.String(); got != want { + t.Errorf("wrong self path\ngot: %s\nwant: %s", got, want) + } + if got, want := gotReferencePath.String(), wantReferencePath.String(); got != want { + t.Errorf("wrong reference path\ngot: %s\nwant: %s", got, want) + } + } + +} + +func TestNodeModuleVariableReference(t *testing.T) { + n := &nodeExpandModuleVariable{ + Addr: addrs.InputVariable{Name: "foo"}, + Module: addrs.RootModule.Child("bar"), + Config: &configs.Variable{ + Name: "foo", + Type: cty.String, + ConstraintType: cty.String, + }, + Expr: &hclsyntax.ScopeTraversalExpr{ + Traversal: hcl.Traversal{ + hcl.TraverseRoot{Name: "var"}, + hcl.TraverseAttr{Name: "foo"}, + }, + }, + } + + want := []*addrs.Reference{ + { + Subject: addrs.InputVariable{Name: "foo"}, + }, + } + got := n.References() + for _, problem := range deep.Equal(got, want) { + t.Error(problem) + } +} + +func TestNodeModuleVariableReference_grandchild(t *testing.T) { + n := &nodeExpandModuleVariable{ + Addr: addrs.InputVariable{Name: "foo"}, + Module: addrs.RootModule.Child("bar"), + Config: &configs.Variable{ + Name: "foo", + Type: cty.String, + ConstraintType: cty.String, + }, + Expr: &hclsyntax.ScopeTraversalExpr{ + Traversal: hcl.Traversal{ + hcl.TraverseRoot{Name: "var"}, + hcl.TraverseAttr{Name: "foo"}, + }, + }, + } + + want := []*addrs.Reference{ + { + Subject: addrs.InputVariable{Name: "foo"}, + }, + } + got := n.References() + for _, problem := range deep.Equal(got, want) { + t.Error(problem) + } +} + +func TestNodeModuleVariableConstraints(t *testing.T) { + // This is a little extra convoluted to poke at some edge cases that have cropped up in the past around + // evaluating dependent nodes between the plan -> apply and destroy cycle. + m := testModuleInline(t, map[string]string{ + "main.tf": ` + variable "input" { + type = string + validation { + condition = var.input != "" + error_message = "Input must not be empty." + } + } + + module "child" { + source = "./child" + input = var.input + } + + provider "test" { + alias = "secondary" + test_string = module.child.output + } + + resource "test_object" "resource" { + provider = test.secondary + test_string = "test string" + } + + `, + "child/main.tf": ` + variable "input" { + type = string + validation { + condition = var.input != "" + error_message = "Input must not be empty." + } + } + provider "test" { + test_string = "foo" + } + resource "test_object" "resource" { + test_string = var.input + } + output "output" { + value = test_object.resource.id + } + `, + }) + + checkableObjects := []addrs.Checkable{ + addrs.InputVariable{Name: "input"}.Absolute(addrs.RootModuleInstance), + addrs.InputVariable{Name: "input"}.Absolute(addrs.RootModuleInstance.Child("child", addrs.NoKey)), + } + + p := &MockProvider{ + GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{Block: simpleTestSchema()}, + ResourceTypes: map[string]providers.Schema{ + "test_object": providers.Schema{Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "test_string": { + Type: cty.String, + Required: true, + }, + }, + }}, + }, + }, + } + p.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { + if req.Config.GetAttr("test_string").IsNull() { + resp.Diagnostics = resp.Diagnostics.Append(errors.New("missing test_string value")) + } + return resp + } + + ctxOpts := &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + } + + t.Run("pass", func(t *testing.T) { + ctx := testContext2(t, ctxOpts) + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "input": &InputValue{ + Value: cty.StringVal("beep"), + SourceType: ValueFromCLIArg, + }, + }, + }) + assertNoDiagnostics(t, diags) + + for _, addr := range checkableObjects { + result := plan.Checks.GetObjectResult(addr) + if result == nil { + t.Fatalf("no check result for %s in the plan", addr) + } + if got, want := result.Status, checks.StatusPass; got != want { + t.Fatalf("wrong check status for %s during planning\ngot: %s\nwant: %s", addr, got, want) + } + } + + state, diags := ctx.Apply(plan, m) + assertNoDiagnostics(t, diags) + for _, addr := range checkableObjects { + result := state.CheckResults.GetObjectResult(addr) + if result == nil { + t.Fatalf("no check result for %s in the final state", addr) + } + if got, want := result.Status, checks.StatusPass; got != want { + t.Errorf("wrong check status for %s after apply\ngot: %s\nwant: %s", addr, got, want) + } + } + + plan, diags = ctx.Plan(m, state, &PlanOpts{ + Mode: plans.DestroyMode, + SetVariables: InputValues{ + "input": &InputValue{ + Value: cty.StringVal("beep"), + SourceType: ValueFromCLIArg, + }, + }, + }) + assertNoDiagnostics(t, diags) + + state, diags = ctx.Apply(plan, m) + assertNoDiagnostics(t, diags) + for _, addr := range checkableObjects { + result := state.CheckResults.GetObjectResult(addr) + if result == nil { + t.Fatalf("no check result for %s in the final state", addr) + } + if got, want := result.Status, checks.StatusPass; got != want { + t.Errorf("wrong check status for %s after apply\ngot: %s\nwant: %s", addr, got, want) + } + } + }) + + t.Run("fail", func(t *testing.T) { + ctx := testContext2(t, ctxOpts) + _, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "input": &InputValue{ + Value: cty.StringVal(""), + SourceType: ValueFromCLIArg, + }, + }, + }) + if !diags.HasErrors() { + t.Fatalf("succeeded; want error") + } + + const wantSummary = "Invalid value for variable" + found := false + for _, diag := range diags { + if diag.Severity() == tfdiags.Error && diag.Description().Summary == wantSummary { + found = true + break + } + } + + if !found { + t.Fatalf("missing expected error\nwant summary: %s\ngot: %s", wantSummary, diags.Err().Error()) + } + }) +} diff --git a/pkg/tofu/node_output.go b/pkg/tofu/node_output.go new file mode 100644 index 00000000000..973865cff03 --- /dev/null +++ b/pkg/tofu/node_output.go @@ -0,0 +1,624 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "log" + + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/dag" + "github.com/kubegems/opentofu/pkg/lang" + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// nodeExpandOutput is the placeholder for a non-root module output that has +// not yet had its module path expanded. +type nodeExpandOutput struct { + Addr addrs.OutputValue + Module addrs.Module + Config *configs.Output + Destroying bool + RefreshOnly bool + + // Planning is set to true when this node is in a graph that was produced + // by the plan graph builder, as opposed to the apply graph builder. + // This quirk is just because we share the same node type between both + // phases but in practice there are a few small differences in the actions + // we need to take between plan and apply. See method DynamicExpand for + // details. + Planning bool +} + +var ( + _ GraphNodeReferenceable = (*nodeExpandOutput)(nil) + _ GraphNodeReferencer = (*nodeExpandOutput)(nil) + _ GraphNodeReferenceOutside = (*nodeExpandOutput)(nil) + _ GraphNodeDynamicExpandable = (*nodeExpandOutput)(nil) + _ graphNodeTemporaryValue = (*nodeExpandOutput)(nil) + _ graphNodeExpandsInstances = (*nodeExpandOutput)(nil) +) + +func (n *nodeExpandOutput) expandsInstances() {} + +func (n *nodeExpandOutput) temporaryValue() bool { + // non root outputs are temporary + return !n.Module.IsRoot() +} + +func (n *nodeExpandOutput) DynamicExpand(ctx EvalContext) (*Graph, error) { + expander := ctx.InstanceExpander() + changes := ctx.Changes() + + // If this is an output value that participates in custom condition checks + // (i.e. it has preconditions or postconditions) then the check state + // wants to know the addresses of the checkable objects so that it can + // treat them as unknown status if we encounter an error before actually + // visiting the checks. + // + // We must do this only during planning, because the apply phase will start + // with all of the same checkable objects that were registered during the + // planning phase. Consumers of our JSON plan and state formats expect + // that the set of checkable objects will be consistent between the plan + // and any state snapshots created during apply, and that only the statuses + // of those objects will have changed. + var checkableAddrs addrs.Set[addrs.Checkable] + if n.Planning { + if checkState := ctx.Checks(); checkState.ConfigHasChecks(n.Addr.InModule(n.Module)) { + checkableAddrs = addrs.MakeSet[addrs.Checkable]() + } + } + + var g Graph + for _, module := range expander.ExpandModule(n.Module) { + absAddr := n.Addr.Absolute(module) + if checkableAddrs != nil { + checkableAddrs.Add(absAddr) + } + + // Find any recorded change for this output + var change *plans.OutputChangeSrc + var outputChanges []*plans.OutputChangeSrc + if module.IsRoot() { + outputChanges = changes.GetRootOutputChanges() + } else { + parent, call := module.Call() + outputChanges = changes.GetOutputChanges(parent, call) + } + for _, c := range outputChanges { + if c.Addr.String() == absAddr.String() { + change = c + break + } + } + + var node dag.Vertex + switch { + case module.IsRoot() && n.Destroying: + node = &NodeDestroyableOutput{ + Addr: absAddr, + Planning: n.Planning, + } + + default: + node = &NodeApplyableOutput{ + Addr: absAddr, + Config: n.Config, + Change: change, + RefreshOnly: n.RefreshOnly, + DestroyApply: n.Destroying, + Planning: n.Planning, + } + } + + log.Printf("[TRACE] Expanding output: adding %s as %T", absAddr.String(), node) + g.Add(node) + } + addRootNodeToGraph(&g) + + if checkableAddrs != nil { + checkState := ctx.Checks() + checkState.ReportCheckableObjects(n.Addr.InModule(n.Module), checkableAddrs) + } + + return &g, nil +} + +func (n *nodeExpandOutput) Name() string { + path := n.Module.String() + addr := n.Addr.String() + " (expand)" + if path != "" { + return path + "." + addr + } + return addr +} + +// GraphNodeModulePath +func (n *nodeExpandOutput) ModulePath() addrs.Module { + return n.Module +} + +// GraphNodeReferenceable +func (n *nodeExpandOutput) ReferenceableAddrs() []addrs.Referenceable { + // An output in the root module can't be referenced at all. + if n.Module.IsRoot() { + return nil + } + + // the output is referenced through the module call, and via the + // module itself. + _, call := n.Module.Call() + callOutput := addrs.ModuleCallOutput{ + Call: call, + Name: n.Addr.Name, + } + + // Otherwise, we can reference the output via the + // module call itself + return []addrs.Referenceable{call, callOutput} +} + +// GraphNodeReferenceOutside implementation +func (n *nodeExpandOutput) ReferenceOutside() (selfPath, referencePath addrs.Module) { + // Output values have their expressions resolved in the context of the + // module where they are defined. + referencePath = n.Module + + // ...but they are referenced in the context of their calling module. + selfPath = referencePath.Parent() + + return // uses named return values +} + +// GraphNodeReferencer +func (n *nodeExpandOutput) References() []*addrs.Reference { + // DestroyNodes do not reference anything. + if n.Module.IsRoot() && n.Destroying { + return nil + } + + return referencesForOutput(n.Config) +} + +// NodeApplyableOutput represents an output that is "applyable": +// it is ready to be applied. +type NodeApplyableOutput struct { + Addr addrs.AbsOutputValue + Config *configs.Output // Config is the output in the config + // If this is being evaluated during apply, we may have a change recorded already + Change *plans.OutputChangeSrc + + // Refresh-only mode means that any failing output preconditions are + // reported as warnings rather than errors + RefreshOnly bool + + // DestroyApply indicates that we are applying a destroy plan, and do not + // need to account for conditional blocks. + DestroyApply bool + + Planning bool +} + +var ( + _ GraphNodeModuleInstance = (*NodeApplyableOutput)(nil) + _ GraphNodeReferenceable = (*NodeApplyableOutput)(nil) + _ GraphNodeReferencer = (*NodeApplyableOutput)(nil) + _ GraphNodeReferenceOutside = (*NodeApplyableOutput)(nil) + _ GraphNodeExecutable = (*NodeApplyableOutput)(nil) + _ graphNodeTemporaryValue = (*NodeApplyableOutput)(nil) + _ dag.GraphNodeDotter = (*NodeApplyableOutput)(nil) +) + +func (n *NodeApplyableOutput) temporaryValue() bool { + // this must always be evaluated if it is a root module output + return !n.Addr.Module.IsRoot() +} + +func (n *NodeApplyableOutput) Name() string { + return n.Addr.String() +} + +// GraphNodeModuleInstance +func (n *NodeApplyableOutput) Path() addrs.ModuleInstance { + return n.Addr.Module +} + +// GraphNodeModulePath +func (n *NodeApplyableOutput) ModulePath() addrs.Module { + return n.Addr.Module.Module() +} + +func referenceOutsideForOutput(addr addrs.AbsOutputValue) (selfPath, referencePath addrs.Module) { + // Output values have their expressions resolved in the context of the + // module where they are defined. + referencePath = addr.Module.Module() + + // ...but they are referenced in the context of their calling module. + selfPath = addr.Module.Parent().Module() + + return // uses named return values +} + +// GraphNodeReferenceOutside implementation +func (n *NodeApplyableOutput) ReferenceOutside() (selfPath, referencePath addrs.Module) { + return referenceOutsideForOutput(n.Addr) +} + +func referenceableAddrsForOutput(addr addrs.AbsOutputValue) []addrs.Referenceable { + // An output in the root module can't be referenced at all. + if addr.Module.IsRoot() { + return nil + } + + // Otherwise, we can be referenced via a reference to our output name + // on the parent module's call, or via a reference to the entire call. + // e.g. module.foo.bar or just module.foo . + // Note that our ReferenceOutside method causes these addresses to be + // relative to the calling module, not the module where the output + // was declared. + _, outp := addr.ModuleCallOutput() + _, call := addr.Module.CallInstance() + + return []addrs.Referenceable{outp, call} +} + +// GraphNodeReferenceable +func (n *NodeApplyableOutput) ReferenceableAddrs() []addrs.Referenceable { + return referenceableAddrsForOutput(n.Addr) +} + +func referencesForOutput(c *configs.Output) []*addrs.Reference { + var refs []*addrs.Reference + + impRefs, _ := lang.ReferencesInExpr(addrs.ParseRef, c.Expr) + expRefs, _ := lang.References(addrs.ParseRef, c.DependsOn) + + refs = append(refs, impRefs...) + refs = append(refs, expRefs...) + + for _, check := range c.Preconditions { + condRefs, _ := lang.ReferencesInExpr(addrs.ParseRef, check.Condition) + refs = append(refs, condRefs...) + errRefs, _ := lang.ReferencesInExpr(addrs.ParseRef, check.ErrorMessage) + refs = append(refs, errRefs...) + } + + return refs +} + +// GraphNodeReferencer +func (n *NodeApplyableOutput) References() []*addrs.Reference { + return referencesForOutput(n.Config) +} + +// GraphNodeExecutable +func (n *NodeApplyableOutput) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { + state := ctx.State() + if state == nil { + return + } + + changes := ctx.Changes() // may be nil, if we're not working on a changeset + + val := cty.UnknownVal(cty.DynamicPseudoType) + changeRecorded := n.Change != nil + // we we have a change recorded, we don't need to re-evaluate if the value + // was known + if changeRecorded { + change, err := n.Change.Decode() + diags = diags.Append(err) + if err == nil { + val = change.After + } + } + + // Checks are not evaluated during a destroy. The checks may fail, may not + // be valid, or may not have been registered at all. + if !n.DestroyApply { + checkRuleSeverity := tfdiags.Error + if n.RefreshOnly { + checkRuleSeverity = tfdiags.Warning + } + checkDiags := evalCheckRules( + addrs.OutputPrecondition, + n.Config.Preconditions, + ctx, n.Addr, EvalDataForNoInstanceKey, + checkRuleSeverity, + ) + diags = diags.Append(checkDiags) + if diags.HasErrors() { + return diags // failed preconditions prevent further evaluation + } + } + + // If there was no change recorded, or the recorded change was not wholly + // known, then we need to re-evaluate the output + if !changeRecorded || !val.IsWhollyKnown() { + switch { + // If the module is not being overridden, we proceed normally + case !n.Config.IsOverridden: + // This has to run before we have a state lock, since evaluation also + // reads the state + var evalDiags tfdiags.Diagnostics + val, evalDiags = ctx.EvaluateExpr(n.Config.Expr, cty.DynamicPseudoType, nil) + diags = diags.Append(evalDiags) + + // If the module is being overridden and we have a value to use, + // we just use it + case n.Config.OverrideValue != nil: + val = *n.Config.OverrideValue + + // If the module is being overridden, but we don't have any value to use, + // we just set it to null + default: + val = cty.NilVal + } + + // We'll handle errors below, after we have loaded the module. + // Outputs don't have a separate mode for validation, so validate + // depends_on expressions here too + diags = diags.Append(validateDependsOn(ctx, n.Config.DependsOn)) + + // For root module outputs in particular, an output value must be + // statically declared as sensitive in order to dynamically return + // a sensitive result, to help avoid accidental exposure in the state + // of a sensitive value that the user doesn't want to include there. + if n.Addr.Module.IsRoot() { + if !n.Config.Sensitive && marks.Contains(val, marks.Sensitive) { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Output refers to sensitive values", + Detail: `To reduce the risk of accidentally exporting sensitive data that was intended to be only internal, OpenTofu requires that any root module output containing sensitive data be explicitly marked as sensitive, to confirm your intent. + +If you do intend to export this data, annotate the output value as sensitive by adding the following argument: + sensitive = true`, + Subject: n.Config.DeclRange.Ptr(), + }) + } + } + } + + // handling the interpolation error + if diags.HasErrors() { + if flagWarnOutputErrors { + log.Printf("[ERROR] Output interpolation %q failed: %s", n.Addr, diags.Err()) + // if we're continuing, make sure the output is included, and + // marked as unknown. If the evaluator was able to find a type + // for the value in spite of the error then we'll use it. + n.setValue(state, changes, cty.UnknownVal(val.Type())) + + // Keep existing warnings, while converting errors to warnings. + // This is not meant to be the normal path, so there no need to + // make the errors pretty. + var warnings tfdiags.Diagnostics + for _, d := range diags { + switch d.Severity() { + case tfdiags.Warning: + warnings = warnings.Append(d) + case tfdiags.Error: + desc := d.Description() + warnings = warnings.Append(tfdiags.SimpleWarning(fmt.Sprintf("%s:%s", desc.Summary, desc.Detail))) + } + } + + return warnings + } + return diags + } + n.setValue(state, changes, val) + + // If we were able to evaluate a new value, we can update that in the + // refreshed state as well. + if state = ctx.RefreshState(); state != nil && val.IsWhollyKnown() { + // we only need to update the state, do not pass in the changes again + n.setValue(state, nil, val) + } + + return diags +} + +// dag.GraphNodeDotter impl. +func (n *NodeApplyableOutput) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { + return &dag.DotNode{ + Name: name, + Attrs: map[string]string{ + "label": n.Name(), + "shape": "note", + }, + } +} + +// NodeDestroyableOutput represents an output that is "destroyable": +// its application will remove the output from the state. +type NodeDestroyableOutput struct { + Addr addrs.AbsOutputValue + Planning bool +} + +var ( + _ GraphNodeExecutable = (*NodeDestroyableOutput)(nil) + _ dag.GraphNodeDotter = (*NodeDestroyableOutput)(nil) +) + +func (n *NodeDestroyableOutput) Name() string { + return fmt.Sprintf("%s (destroy)", n.Addr.String()) +} + +// GraphNodeModulePath +func (n *NodeDestroyableOutput) ModulePath() addrs.Module { + return n.Addr.Module.Module() +} + +func (n *NodeDestroyableOutput) temporaryValue() bool { + // this must always be evaluated if it is a root module output + return !n.Addr.Module.IsRoot() +} + +// GraphNodeExecutable +func (n *NodeDestroyableOutput) Execute(ctx EvalContext, op walkOperation) tfdiags.Diagnostics { + state := ctx.State() + if state == nil { + return nil + } + + // if this is a root module, try to get a before value from the state for + // the diff + sensitiveBefore := false + before := cty.NullVal(cty.DynamicPseudoType) + mod := state.Module(n.Addr.Module) + if n.Addr.Module.IsRoot() && mod != nil { + if o, ok := mod.OutputValues[n.Addr.OutputValue.Name]; ok { + sensitiveBefore = o.Sensitive + before = o.Value + } else { + // If the output was not in state, a delete change would + // be meaningless, so exit early. + return nil + + } + } + + changes := ctx.Changes() + if changes != nil && n.Planning { + change := &plans.OutputChange{ + Addr: n.Addr, + Sensitive: sensitiveBefore, + Change: plans.Change{ + Action: plans.Delete, + Before: before, + After: cty.NullVal(cty.DynamicPseudoType), + }, + } + + cs, err := change.Encode() + if err != nil { + // Should never happen, since we just constructed this right above + panic(fmt.Sprintf("planned change for %s could not be encoded: %s", n.Addr, err)) + } + log.Printf("[TRACE] NodeDestroyableOutput: Saving %s change for %s in changeset", change.Action, n.Addr) + + changes.RemoveOutputChange(n.Addr) // remove any existing planned change, if present + changes.AppendOutputChange(cs) // add the new planned change + } + + state.RemoveOutputValue(n.Addr) + return nil +} + +// dag.GraphNodeDotter impl. +func (n *NodeDestroyableOutput) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { + return &dag.DotNode{ + Name: name, + Attrs: map[string]string{ + "label": n.Name(), + "shape": "note", + }, + } +} + +func (n *NodeApplyableOutput) setValue(state *states.SyncState, changes *plans.ChangesSync, val cty.Value) { + if changes != nil && n.Planning { + // if this is a root module, try to get a before value from the state for + // the diff + sensitiveBefore := false + before := cty.NullVal(cty.DynamicPseudoType) + + // is this output new to our state? + newOutput := true + + mod := state.Module(n.Addr.Module) + if n.Addr.Module.IsRoot() && mod != nil { + for name, o := range mod.OutputValues { + if name == n.Addr.OutputValue.Name { + before = o.Value + sensitiveBefore = o.Sensitive + newOutput = false + break + } + } + } + + // We will not show the value if either the before or after are marked + // as sensitive. We can show the value again once sensitivity is + // removed from both the config and the state. + sensitiveChange := sensitiveBefore || n.Config.Sensitive + + // strip any marks here just to be sure we don't panic on the True comparison + unmarkedVal, _ := val.UnmarkDeep() + + action := plans.Update + switch { + case val.IsNull() && before.IsNull(): + // This is separate from the NoOp case below, since we can ignore + // sensitivity here when there are only null values. + action = plans.NoOp + + case newOutput: + // This output was just added to the configuration + action = plans.Create + + case val.IsWhollyKnown() && + unmarkedVal.Equals(before).True() && + n.Config.Sensitive == sensitiveBefore: + // Sensitivity must also match to be a NoOp. + // Theoretically marks may not match here, but sensitivity is the + // only one we can act on, and the state will have been loaded + // without any marks to consider. + action = plans.NoOp + } + + change := &plans.OutputChange{ + Addr: n.Addr, + Sensitive: sensitiveChange, + Change: plans.Change{ + Action: action, + Before: before, + After: val, + }, + } + + cs, err := change.Encode() + if err != nil { + // Should never happen, since we just constructed this right above + panic(fmt.Sprintf("planned change for %s could not be encoded: %s", n.Addr, err)) + } + log.Printf("[TRACE] setValue: Saving %s change for %s in changeset", change.Action, n.Addr) + changes.AppendOutputChange(cs) // add the new planned change + } + + if changes != nil && !n.Planning { + // During apply there is no longer any change to track, so we must + // ensure the state is updated and not overridden by a change. + changes.RemoveOutputChange(n.Addr) + } + + // Null outputs must be saved for modules so that they can still be + // evaluated. Null root outputs are removed entirely, which is always fine + // because they can't be referenced by anything else in the configuration. + if n.Addr.Module.IsRoot() && val.IsNull() { + log.Printf("[TRACE] setValue: Removing %s from state (it is now null)", n.Addr) + state.RemoveOutputValue(n.Addr) + return + } + + log.Printf("[TRACE] setValue: Saving value for %s in state", n.Addr) + + // non-root outputs need to keep sensitive marks for evaluation, but are + // not serialized. + if n.Addr.Module.IsRoot() { + val, _ = val.UnmarkDeep() + val = cty.UnknownAsNull(val) + } + + state.SetOutputValue(n.Addr, val, n.Config.Sensitive) +} diff --git a/pkg/tofu/node_output_test.go b/pkg/tofu/node_output_test.go new file mode 100644 index 00000000000..cdb34dfe41e --- /dev/null +++ b/pkg/tofu/node_output_test.go @@ -0,0 +1,192 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "strings" + "testing" + + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/checks" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/kubegems/opentofu/pkg/states" +) + +func TestNodeApplyableOutputExecute_knownValue(t *testing.T) { + ctx := new(MockEvalContext) + ctx.StateState = states.NewState().SyncWrapper() + ctx.RefreshStateState = states.NewState().SyncWrapper() + ctx.ChecksState = checks.NewState(nil) + + config := &configs.Output{Name: "map-output"} + addr := addrs.OutputValue{Name: config.Name}.Absolute(addrs.RootModuleInstance) + node := &NodeApplyableOutput{Config: config, Addr: addr} + val := cty.MapVal(map[string]cty.Value{ + "a": cty.StringVal("b"), + }) + ctx.EvaluateExprResult = val + + err := node.Execute(ctx, walkApply) + if err != nil { + t.Fatalf("unexpected execute error: %s", err) + } + + outputVal := ctx.StateState.OutputValue(addr) + if got, want := outputVal.Value, val; !got.RawEquals(want) { + t.Errorf("wrong output value in state\n got: %#v\nwant: %#v", got, want) + } + + if !ctx.RefreshStateCalled { + t.Fatal("should have called RefreshState, but didn't") + } + refreshOutputVal := ctx.RefreshStateState.OutputValue(addr) + if got, want := refreshOutputVal.Value, val; !got.RawEquals(want) { + t.Fatalf("wrong output value in refresh state\n got: %#v\nwant: %#v", got, want) + } +} + +func TestNodeApplyableOutputExecute_noState(t *testing.T) { + ctx := new(MockEvalContext) + + config := &configs.Output{Name: "map-output"} + addr := addrs.OutputValue{Name: config.Name}.Absolute(addrs.RootModuleInstance) + node := &NodeApplyableOutput{Config: config, Addr: addr} + val := cty.MapVal(map[string]cty.Value{ + "a": cty.StringVal("b"), + }) + ctx.EvaluateExprResult = val + + err := node.Execute(ctx, walkApply) + if err != nil { + t.Fatalf("unexpected execute error: %s", err) + } +} + +func TestNodeApplyableOutputExecute_invalidDependsOn(t *testing.T) { + ctx := new(MockEvalContext) + ctx.StateState = states.NewState().SyncWrapper() + ctx.ChecksState = checks.NewState(nil) + + config := &configs.Output{ + Name: "map-output", + DependsOn: []hcl.Traversal{ + { + hcl.TraverseRoot{Name: "test_instance"}, + hcl.TraverseAttr{Name: "foo"}, + hcl.TraverseAttr{Name: "bar"}, + }, + }, + } + addr := addrs.OutputValue{Name: config.Name}.Absolute(addrs.RootModuleInstance) + node := &NodeApplyableOutput{Config: config, Addr: addr} + val := cty.MapVal(map[string]cty.Value{ + "a": cty.StringVal("b"), + }) + ctx.EvaluateExprResult = val + + diags := node.Execute(ctx, walkApply) + if !diags.HasErrors() { + t.Fatal("expected execute error, but there was none") + } + if got, want := diags.Err().Error(), "Invalid depends_on reference"; !strings.Contains(got, want) { + t.Errorf("expected error to include %q, but was: %s", want, got) + } +} + +func TestNodeApplyableOutputExecute_sensitiveValueNotOutput(t *testing.T) { + ctx := new(MockEvalContext) + ctx.StateState = states.NewState().SyncWrapper() + ctx.ChecksState = checks.NewState(nil) + + config := &configs.Output{Name: "map-output"} + addr := addrs.OutputValue{Name: config.Name}.Absolute(addrs.RootModuleInstance) + node := &NodeApplyableOutput{Config: config, Addr: addr} + val := cty.MapVal(map[string]cty.Value{ + "a": cty.StringVal("b").Mark(marks.Sensitive), + }) + ctx.EvaluateExprResult = val + + diags := node.Execute(ctx, walkApply) + if !diags.HasErrors() { + t.Fatal("expected execute error, but there was none") + } + if got, want := diags.Err().Error(), "Output refers to sensitive values"; !strings.Contains(got, want) { + t.Errorf("expected error to include %q, but was: %s", want, got) + } +} + +func TestNodeApplyableOutputExecute_sensitiveValueAndOutput(t *testing.T) { + ctx := new(MockEvalContext) + ctx.StateState = states.NewState().SyncWrapper() + ctx.ChecksState = checks.NewState(nil) + + config := &configs.Output{ + Name: "map-output", + Sensitive: true, + } + addr := addrs.OutputValue{Name: config.Name}.Absolute(addrs.RootModuleInstance) + node := &NodeApplyableOutput{Config: config, Addr: addr} + val := cty.MapVal(map[string]cty.Value{ + "a": cty.StringVal("b").Mark(marks.Sensitive), + }) + ctx.EvaluateExprResult = val + + err := node.Execute(ctx, walkApply) + if err != nil { + t.Fatalf("unexpected execute error: %s", err) + } + + // Unmarked value should be stored in state + outputVal := ctx.StateState.OutputValue(addr) + want, _ := val.UnmarkDeep() + if got := outputVal.Value; !got.RawEquals(want) { + t.Errorf("wrong output value in state\n got: %#v\nwant: %#v", got, want) + } +} + +func TestNodeDestroyableOutputExecute(t *testing.T) { + outputAddr := addrs.OutputValue{Name: "foo"}.Absolute(addrs.RootModuleInstance) + + state := states.NewState() + state.Module(addrs.RootModuleInstance).SetOutputValue("foo", cty.StringVal("bar"), false) + state.OutputValue(outputAddr) + + ctx := &MockEvalContext{ + StateState: state.SyncWrapper(), + } + node := NodeDestroyableOutput{Addr: outputAddr} + + diags := node.Execute(ctx, walkApply) + if diags.HasErrors() { + t.Fatalf("Unexpected error: %s", diags.Err()) + } + if state.OutputValue(outputAddr) != nil { + t.Fatal("Unexpected outputs in state after removal") + } +} + +func TestNodeDestroyableOutputExecute_notInState(t *testing.T) { + outputAddr := addrs.OutputValue{Name: "foo"}.Absolute(addrs.RootModuleInstance) + + state := states.NewState() + + ctx := &MockEvalContext{ + StateState: state.SyncWrapper(), + } + node := NodeDestroyableOutput{Addr: outputAddr} + + diags := node.Execute(ctx, walkApply) + if diags.HasErrors() { + t.Fatalf("Unexpected error: %s", diags.Err()) + } + if state.OutputValue(outputAddr) != nil { + t.Fatal("Unexpected outputs in state after removal") + } +} diff --git a/pkg/tofu/node_provider.go b/pkg/tofu/node_provider.go new file mode 100644 index 00000000000..d36c130a2de --- /dev/null +++ b/pkg/tofu/node_provider.go @@ -0,0 +1,184 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "log" + + "github.com/hashicorp/hcl/v2" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +// NodeApplyableProvider represents a provider during an apply. +type NodeApplyableProvider struct { + *NodeAbstractProvider +} + +var ( + _ GraphNodeExecutable = (*NodeApplyableProvider)(nil) +) + +// GraphNodeExecutable +func (n *NodeApplyableProvider) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { + _, err := ctx.InitProvider(n.Addr) + diags = diags.Append(err) + if diags.HasErrors() { + return diags + } + provider, _, err := getProvider(ctx, n.Addr) + diags = diags.Append(err) + if diags.HasErrors() { + return diags + } + + switch op { + case walkValidate: + log.Printf("[TRACE] NodeApplyableProvider: validating configuration for %s", n.Addr) + return diags.Append(n.ValidateProvider(ctx, provider)) + case walkPlan, walkPlanDestroy, walkApply, walkDestroy: + log.Printf("[TRACE] NodeApplyableProvider: configuring %s", n.Addr) + return diags.Append(n.ConfigureProvider(ctx, provider, false)) + case walkImport: + log.Printf("[TRACE] NodeApplyableProvider: configuring %s (requiring that configuration is wholly known)", n.Addr) + return diags.Append(n.ConfigureProvider(ctx, provider, true)) + } + return diags +} + +func (n *NodeApplyableProvider) ValidateProvider(ctx EvalContext, provider providers.Interface) (diags tfdiags.Diagnostics) { + + configBody := buildProviderConfig(ctx, n.Addr, n.ProviderConfig()) + + // if a provider config is empty (only an alias), return early and don't continue + // validation. validate doesn't need to fully configure the provider itself, so + // skipping a provider with an implied configuration won't prevent other validation from completing. + _, noConfigDiags := configBody.Content(&hcl.BodySchema{}) + if !noConfigDiags.HasErrors() { + return nil + } + + schemaResp := provider.GetProviderSchema() + diags = diags.Append(schemaResp.Diagnostics.InConfigBody(configBody, n.Addr.String())) + if diags.HasErrors() { + return diags + } + + configSchema := schemaResp.Provider.Block + if configSchema == nil { + // Should never happen in real code, but often comes up in tests where + // mock schemas are being used that tend to be incomplete. + log.Printf("[WARN] ValidateProvider: no config schema is available for %s, so using empty schema", n.Addr) + configSchema = &configschema.Block{} + } + + configVal, _, evalDiags := ctx.EvaluateBlock(configBody, configSchema, nil, EvalDataForNoInstanceKey) + if evalDiags.HasErrors() { + return diags.Append(evalDiags) + } + diags = diags.Append(evalDiags) + + // If our config value contains any marked values, ensure those are + // stripped out before sending this to the provider + unmarkedConfigVal, _ := configVal.UnmarkDeep() + + req := providers.ValidateProviderConfigRequest{ + Config: unmarkedConfigVal, + } + + validateResp := provider.ValidateProviderConfig(req) + diags = diags.Append(validateResp.Diagnostics.InConfigBody(configBody, n.Addr.String())) + + return diags +} + +// ConfigureProvider configures a provider that is already initialized and retrieved. +// If verifyConfigIsKnown is true, ConfigureProvider will return an error if the +// provider configVal is not wholly known and is meant only for use during import. +func (n *NodeApplyableProvider) ConfigureProvider(ctx EvalContext, provider providers.Interface, verifyConfigIsKnown bool) (diags tfdiags.Diagnostics) { + config := n.ProviderConfig() + + configBody := buildProviderConfig(ctx, n.Addr, config) + + resp := provider.GetProviderSchema() + diags = diags.Append(resp.Diagnostics.InConfigBody(configBody, n.Addr.String())) + if diags.HasErrors() { + return diags + } + + configSchema := resp.Provider.Block + configVal, configBody, evalDiags := ctx.EvaluateBlock(configBody, configSchema, nil, EvalDataForNoInstanceKey) + diags = diags.Append(evalDiags) + if evalDiags.HasErrors() { + return diags + } + + if verifyConfigIsKnown && !configVal.IsWhollyKnown() { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration", + Detail: fmt.Sprintf("The configuration for %s depends on values that cannot be determined until apply.", n.Addr), + Subject: &config.DeclRange, + }) + return diags + } + + // If our config value contains any marked values, ensure those are + // stripped out before sending this to the provider + unmarkedConfigVal, _ := configVal.UnmarkDeep() + + // Allow the provider to validate and insert any defaults into the full + // configuration. + req := providers.ValidateProviderConfigRequest{ + Config: unmarkedConfigVal, + } + + // ValidateProviderConfig is only used for validation. We are intentionally + // ignoring the PreparedConfig field to maintain existing behavior. + validateResp := provider.ValidateProviderConfig(req) + diags = diags.Append(validateResp.Diagnostics.InConfigBody(configBody, n.Addr.String())) + if diags.HasErrors() && config == nil { + // If there isn't an explicit "provider" block in the configuration, + // this error message won't be very clear. Add some detail to the error + // message in this case. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid provider configuration", + fmt.Sprintf(providerConfigErr, n.Addr.Provider), + )) + } + + if diags.HasErrors() { + return diags + } + + // If the provider returns something different, log a warning to help + // indicate to provider developers that the value is not used. + preparedCfg := validateResp.PreparedConfig + if preparedCfg != cty.NilVal && !preparedCfg.IsNull() && !preparedCfg.RawEquals(unmarkedConfigVal) { + log.Printf("[WARN] ValidateProviderConfig from %q changed the config value, but that value is unused", n.Addr) + } + + configDiags := ctx.ConfigureProvider(n.Addr, unmarkedConfigVal) + diags = diags.Append(configDiags.InConfigBody(configBody, n.Addr.String())) + if diags.HasErrors() && config == nil { + // If there isn't an explicit "provider" block in the configuration, + // this error message won't be very clear. Add some detail to the error + // message in this case. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid provider configuration", + fmt.Sprintf(providerConfigErr, n.Addr.Provider), + )) + } + return diags +} + +const providerConfigErr = `Provider %q requires explicit configuration. Add a provider block to the root module and configure the provider's required arguments as described in the provider documentation. +` diff --git a/pkg/tofu/node_provider_abstract.go b/pkg/tofu/node_provider_abstract.go new file mode 100644 index 00000000000..70dbdddd007 --- /dev/null +++ b/pkg/tofu/node_provider_abstract.go @@ -0,0 +1,100 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + + "github.com/kubegems/opentofu/pkg/dag" +) + +// ConcreteProviderNodeFunc is a callback type used to convert an +// abstract provider to a concrete one of some type. +type ConcreteProviderNodeFunc func(*NodeAbstractProvider) dag.Vertex + +// NodeAbstractProvider represents a provider that has no associated operations. +// It registers all the common interfaces across operations for providers. +type NodeAbstractProvider struct { + Addr addrs.AbsProviderConfig + + // The fields below will be automatically set using the Attach + // interfaces if you're running those transforms, but also be explicitly + // set if you already have that information. + + Config *configs.Provider + Schema *configschema.Block +} + +var ( + _ GraphNodeModulePath = (*NodeAbstractProvider)(nil) + _ GraphNodeReferencer = (*NodeAbstractProvider)(nil) + _ GraphNodeProvider = (*NodeAbstractProvider)(nil) + _ GraphNodeAttachProvider = (*NodeAbstractProvider)(nil) + _ GraphNodeAttachProviderConfigSchema = (*NodeAbstractProvider)(nil) + _ dag.GraphNodeDotter = (*NodeAbstractProvider)(nil) +) + +func (n *NodeAbstractProvider) Name() string { + return n.Addr.String() +} + +// GraphNodeModuleInstance +func (n *NodeAbstractProvider) Path() addrs.ModuleInstance { + // Providers cannot be contained inside an expanded module, so this shim + // converts our module path to the correct ModuleInstance. + return n.Addr.Module.UnkeyedInstanceShim() +} + +// GraphNodeModulePath +func (n *NodeAbstractProvider) ModulePath() addrs.Module { + return n.Addr.Module +} + +// GraphNodeReferencer +func (n *NodeAbstractProvider) References() []*addrs.Reference { + if n.Config == nil || n.Schema == nil { + return nil + } + + return ReferencesFromConfig(n.Config.Config, n.Schema) +} + +// GraphNodeProvider +func (n *NodeAbstractProvider) ProviderAddr() addrs.AbsProviderConfig { + return n.Addr +} + +// GraphNodeProvider +func (n *NodeAbstractProvider) ProviderConfig() *configs.Provider { + if n.Config == nil { + return nil + } + + return n.Config +} + +// GraphNodeAttachProvider +func (n *NodeAbstractProvider) AttachProvider(c *configs.Provider) { + n.Config = c +} + +// GraphNodeAttachProviderConfigSchema impl. +func (n *NodeAbstractProvider) AttachProviderConfigSchema(schema *configschema.Block) { + n.Schema = schema +} + +// GraphNodeDotter impl. +func (n *NodeAbstractProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { + return &dag.DotNode{ + Name: name, + Attrs: map[string]string{ + "label": n.Name(), + "shape": "diamond", + }, + } +} diff --git a/pkg/tofu/node_provider_eval.go b/pkg/tofu/node_provider_eval.go new file mode 100644 index 00000000000..1e4dca21c90 --- /dev/null +++ b/pkg/tofu/node_provider_eval.go @@ -0,0 +1,24 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import "github.com/kubegems/opentofu/pkg/tfdiags" + +// NodeEvalableProvider represents a provider during an "eval" walk. +// This special provider node type just initializes a provider and +// fetches its schema, without configuring it or otherwise interacting +// with it. +type NodeEvalableProvider struct { + *NodeAbstractProvider +} + +var _ GraphNodeExecutable = (*NodeEvalableProvider)(nil) + +// GraphNodeExecutable +func (n *NodeEvalableProvider) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { + _, err := ctx.InitProvider(n.Addr) + return diags.Append(err) +} diff --git a/pkg/tofu/node_provider_test.go b/pkg/tofu/node_provider_test.go new file mode 100644 index 00000000000..268e3c76620 --- /dev/null +++ b/pkg/tofu/node_provider_test.go @@ -0,0 +1,529 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/hcl/v2" + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +func TestNodeApplyableProviderExecute(t *testing.T) { + config := &configs.Provider{ + Name: "foo", + Config: configs.SynthBody("", map[string]cty.Value{ + "user": cty.StringVal("hello"), + }), + } + + schema := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "user": { + Type: cty.String, + Required: true, + }, + "pw": { + Type: cty.String, + Required: true, + }, + }, + } + provider := mockProviderWithConfigSchema(schema) + providerAddr := addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("foo"), + } + + n := &NodeApplyableProvider{&NodeAbstractProvider{ + Addr: providerAddr, + Config: config, + }} + + ctx := &MockEvalContext{ProviderProvider: provider} + ctx.installSimpleEval() + ctx.ProviderInputValues = map[string]cty.Value{ + "pw": cty.StringVal("so secret"), + } + + if diags := n.Execute(ctx, walkApply); diags.HasErrors() { + t.Fatalf("err: %s", diags.Err()) + } + + if !ctx.ConfigureProviderCalled { + t.Fatal("should be called") + } + + gotObj := ctx.ConfigureProviderConfig + if !gotObj.Type().HasAttribute("user") { + t.Fatal("configuration object does not have \"user\" attribute") + } + if got, want := gotObj.GetAttr("user"), cty.StringVal("hello"); !got.RawEquals(want) { + t.Errorf("wrong configuration value\ngot: %#v\nwant: %#v", got, want) + } + + if !gotObj.Type().HasAttribute("pw") { + t.Fatal("configuration object does not have \"pw\" attribute") + } + if got, want := gotObj.GetAttr("pw"), cty.StringVal("so secret"); !got.RawEquals(want) { + t.Errorf("wrong configuration value\ngot: %#v\nwant: %#v", got, want) + } +} + +func TestNodeApplyableProviderExecute_unknownImport(t *testing.T) { + config := &configs.Provider{ + Name: "foo", + Config: configs.SynthBody("", map[string]cty.Value{ + "test_string": cty.UnknownVal(cty.String), + }), + } + provider := mockProviderWithConfigSchema(simpleTestSchema()) + providerAddr := addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("foo"), + } + n := &NodeApplyableProvider{&NodeAbstractProvider{ + Addr: providerAddr, + Config: config, + }} + + ctx := &MockEvalContext{ProviderProvider: provider} + ctx.installSimpleEval() + + diags := n.Execute(ctx, walkImport) + if !diags.HasErrors() { + t.Fatal("expected error, got success") + } + + detail := `Invalid provider configuration: The configuration for provider["registry.opentofu.org/hashicorp/foo"] depends on values that cannot be determined until apply.` + if got, want := diags.Err().Error(), detail; got != want { + t.Errorf("wrong diagnostic detail\n got: %q\nwant: %q", got, want) + } + + if ctx.ConfigureProviderCalled { + t.Fatal("should not be called") + } +} + +func TestNodeApplyableProviderExecute_unknownApply(t *testing.T) { + config := &configs.Provider{ + Name: "foo", + Config: configs.SynthBody("", map[string]cty.Value{ + "test_string": cty.UnknownVal(cty.String), + }), + } + provider := mockProviderWithConfigSchema(simpleTestSchema()) + providerAddr := addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("foo"), + } + n := &NodeApplyableProvider{&NodeAbstractProvider{ + Addr: providerAddr, + Config: config, + }} + ctx := &MockEvalContext{ProviderProvider: provider} + ctx.installSimpleEval() + + if err := n.Execute(ctx, walkApply); err != nil { + t.Fatalf("err: %s", err) + } + + if !ctx.ConfigureProviderCalled { + t.Fatal("should be called") + } + + gotObj := ctx.ConfigureProviderConfig + if !gotObj.Type().HasAttribute("test_string") { + t.Fatal("configuration object does not have \"test_string\" attribute") + } + if got, want := gotObj.GetAttr("test_string"), cty.UnknownVal(cty.String); !got.RawEquals(want) { + t.Errorf("wrong configuration value\ngot: %#v\nwant: %#v", got, want) + } +} + +func TestNodeApplyableProviderExecute_sensitive(t *testing.T) { + config := &configs.Provider{ + Name: "foo", + Config: configs.SynthBody("", map[string]cty.Value{ + "test_string": cty.StringVal("hello").Mark(marks.Sensitive), + }), + } + provider := mockProviderWithConfigSchema(simpleTestSchema()) + providerAddr := addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("foo"), + } + + n := &NodeApplyableProvider{&NodeAbstractProvider{ + Addr: providerAddr, + Config: config, + }} + + ctx := &MockEvalContext{ProviderProvider: provider} + ctx.installSimpleEval() + if err := n.Execute(ctx, walkApply); err != nil { + t.Fatalf("err: %s", err) + } + + if !ctx.ConfigureProviderCalled { + t.Fatal("should be called") + } + + gotObj := ctx.ConfigureProviderConfig + if !gotObj.Type().HasAttribute("test_string") { + t.Fatal("configuration object does not have \"test_string\" attribute") + } + if got, want := gotObj.GetAttr("test_string"), cty.StringVal("hello"); !got.RawEquals(want) { + t.Errorf("wrong configuration value\ngot: %#v\nwant: %#v", got, want) + } +} + +func TestNodeApplyableProviderExecute_sensitiveValidate(t *testing.T) { + config := &configs.Provider{ + Name: "foo", + Config: configs.SynthBody("", map[string]cty.Value{ + "test_string": cty.StringVal("hello").Mark(marks.Sensitive), + }), + } + provider := mockProviderWithConfigSchema(simpleTestSchema()) + providerAddr := addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("foo"), + } + + n := &NodeApplyableProvider{&NodeAbstractProvider{ + Addr: providerAddr, + Config: config, + }} + + ctx := &MockEvalContext{ProviderProvider: provider} + ctx.installSimpleEval() + if err := n.Execute(ctx, walkValidate); err != nil { + t.Fatalf("err: %s", err) + } + + if !provider.ValidateProviderConfigCalled { + t.Fatal("should be called") + } + + gotObj := provider.ValidateProviderConfigRequest.Config + if !gotObj.Type().HasAttribute("test_string") { + t.Fatal("configuration object does not have \"test_string\" attribute") + } + if got, want := gotObj.GetAttr("test_string"), cty.StringVal("hello"); !got.RawEquals(want) { + t.Errorf("wrong configuration value\ngot: %#v\nwant: %#v", got, want) + } +} + +func TestNodeApplyableProviderExecute_emptyValidate(t *testing.T) { + config := &configs.Provider{ + Name: "foo", + Config: configs.SynthBody("", map[string]cty.Value{}), + } + provider := mockProviderWithConfigSchema(&configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "test_string": { + Type: cty.String, + Required: true, + }, + }, + }) + providerAddr := addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("foo"), + } + + n := &NodeApplyableProvider{&NodeAbstractProvider{ + Addr: providerAddr, + Config: config, + }} + + ctx := &MockEvalContext{ProviderProvider: provider} + ctx.installSimpleEval() + if err := n.Execute(ctx, walkValidate); err != nil { + t.Fatalf("err: %s", err) + } + + if ctx.ConfigureProviderCalled { + t.Fatal("should not be called") + } +} + +func TestNodeApplyableProvider_Validate(t *testing.T) { + provider := mockProviderWithConfigSchema(&configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "region": { + Type: cty.String, + Required: true, + }, + }, + }) + ctx := &MockEvalContext{ProviderProvider: provider} + ctx.installSimpleEval() + + t.Run("valid", func(t *testing.T) { + config := &configs.Provider{ + Name: "test", + Config: configs.SynthBody("", map[string]cty.Value{ + "region": cty.StringVal("mars"), + }), + } + + node := NodeApplyableProvider{ + NodeAbstractProvider: &NodeAbstractProvider{ + Addr: mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + Config: config, + }, + } + + diags := node.ValidateProvider(ctx, provider) + if diags.HasErrors() { + t.Errorf("unexpected error with valid config: %s", diags.Err()) + } + }) + + t.Run("invalid", func(t *testing.T) { + config := &configs.Provider{ + Name: "test", + Config: configs.SynthBody("", map[string]cty.Value{ + "region": cty.MapValEmpty(cty.String), + }), + } + + node := NodeApplyableProvider{ + NodeAbstractProvider: &NodeAbstractProvider{ + Addr: mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + Config: config, + }, + } + + diags := node.ValidateProvider(ctx, provider) + if !diags.HasErrors() { + t.Error("missing expected error with invalid config") + } + }) + + t.Run("empty config", func(t *testing.T) { + node := NodeApplyableProvider{ + NodeAbstractProvider: &NodeAbstractProvider{ + Addr: mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + }, + } + + diags := node.ValidateProvider(ctx, provider) + if diags.HasErrors() { + t.Errorf("unexpected error with empty config: %s", diags.Err()) + } + }) +} + +// This test specifically tests responses from the +// providers.ValidateProviderConfigFn. See +// TestNodeApplyableProvider_ConfigProvider_config_fn_err for +// providers.ConfigureProviderRequest responses. +func TestNodeApplyableProvider_ConfigProvider(t *testing.T) { + provider := mockProviderWithConfigSchema(&configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "region": { + Type: cty.String, + Optional: true, + }, + }, + }) + // For this test, we're returning an error for an optional argument. This + // can happen for example if an argument is only conditionally required. + provider.ValidateProviderConfigFn = func(req providers.ValidateProviderConfigRequest) (resp providers.ValidateProviderConfigResponse) { + region := req.Config.GetAttr("region") + if region.IsNull() { + resp.Diagnostics = resp.Diagnostics.Append( + tfdiags.WholeContainingBody(tfdiags.Error, "value is not found", "you did not supply a required value")) + } + return + } + ctx := &MockEvalContext{ProviderProvider: provider} + ctx.installSimpleEval() + + t.Run("valid", func(t *testing.T) { + config := &configs.Provider{ + Name: "test", + Config: configs.SynthBody("", map[string]cty.Value{ + "region": cty.StringVal("mars"), + }), + } + + node := NodeApplyableProvider{ + NodeAbstractProvider: &NodeAbstractProvider{ + Addr: mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + Config: config, + }, + } + + diags := node.ConfigureProvider(ctx, provider, false) + if diags.HasErrors() { + t.Errorf("unexpected error with valid config: %s", diags.Err()) + } + }) + + t.Run("missing required config (no config at all)", func(t *testing.T) { + node := NodeApplyableProvider{ + NodeAbstractProvider: &NodeAbstractProvider{ + Addr: mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + }, + } + + diags := node.ConfigureProvider(ctx, provider, false) + if !diags.HasErrors() { + t.Fatal("missing expected error with nil config") + } + if !strings.Contains(diags.Err().Error(), "requires explicit configuration") { + t.Errorf("diagnostic is missing \"requires explicit configuration\" message: %s", diags.Err()) + } + }) + + t.Run("missing required config", func(t *testing.T) { + config := &configs.Provider{ + Name: "test", + Config: hcl.EmptyBody(), + } + node := NodeApplyableProvider{ + NodeAbstractProvider: &NodeAbstractProvider{ + Addr: mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + Config: config, + }, + } + + diags := node.ConfigureProvider(ctx, provider, false) + if !diags.HasErrors() { + t.Fatal("missing expected error with invalid config") + } + if !strings.Contains(diags.Err().Error(), "value is not found") { + t.Errorf("wrong diagnostic: %s", diags.Err()) + } + }) + +} + +// This test is similar to TestNodeApplyableProvider_ConfigProvider, but tests responses from the providers.ConfigureProviderRequest +func TestNodeApplyableProvider_ConfigProvider_config_fn_err(t *testing.T) { + provider := mockProviderWithConfigSchema(&configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "region": { + Type: cty.String, + Optional: true, + }, + }, + }) + ctx := &MockEvalContext{ProviderProvider: provider} + ctx.installSimpleEval() + // For this test, provider.PrepareConfigFn will succeed every time but the + // ctx.ConfigureProviderFn will return an error if a value is not found. + // + // This is an unlikely but real situation that occurs: + // https://github.com/hashicorp/terraform/issues/23087 + ctx.ConfigureProviderFn = func(addr addrs.AbsProviderConfig, cfg cty.Value) (diags tfdiags.Diagnostics) { + if cfg.IsNull() { + diags = diags.Append(fmt.Errorf("no config provided")) + } else { + region := cfg.GetAttr("region") + if region.IsNull() { + diags = diags.Append(fmt.Errorf("value is not found")) + } + } + return + } + + t.Run("valid", func(t *testing.T) { + config := &configs.Provider{ + Name: "test", + Config: configs.SynthBody("", map[string]cty.Value{ + "region": cty.StringVal("mars"), + }), + } + + node := NodeApplyableProvider{ + NodeAbstractProvider: &NodeAbstractProvider{ + Addr: mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + Config: config, + }, + } + + diags := node.ConfigureProvider(ctx, provider, false) + if diags.HasErrors() { + t.Errorf("unexpected error with valid config: %s", diags.Err()) + } + }) + + t.Run("missing required config (no config at all)", func(t *testing.T) { + node := NodeApplyableProvider{ + NodeAbstractProvider: &NodeAbstractProvider{ + Addr: mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + }, + } + + diags := node.ConfigureProvider(ctx, provider, false) + if !diags.HasErrors() { + t.Fatal("missing expected error with nil config") + } + if !strings.Contains(diags.Err().Error(), "requires explicit configuration") { + t.Errorf("diagnostic is missing \"requires explicit configuration\" message: %s", diags.Err()) + } + }) + + t.Run("missing required config", func(t *testing.T) { + config := &configs.Provider{ + Name: "test", + Config: hcl.EmptyBody(), + } + node := NodeApplyableProvider{ + NodeAbstractProvider: &NodeAbstractProvider{ + Addr: mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + Config: config, + }, + } + + diags := node.ConfigureProvider(ctx, provider, false) + if !diags.HasErrors() { + t.Fatal("missing expected error with invalid config") + } + if diags.Err().Error() != "value is not found" { + t.Errorf("wrong diagnostic: %s", diags.Err()) + } + }) +} + +func TestGetSchemaError(t *testing.T) { + provider := &MockProvider{ + GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ + Diagnostics: tfdiags.Diagnostics.Append(nil, tfdiags.WholeContainingBody(tfdiags.Error, "oops", "error")), + }, + } + + providerAddr := mustProviderConfig(`provider["terraform.io/some/provider"]`) + ctx := &MockEvalContext{ProviderProvider: provider} + ctx.installSimpleEval() + node := NodeApplyableProvider{ + NodeAbstractProvider: &NodeAbstractProvider{ + Addr: providerAddr, + }, + } + + diags := node.ConfigureProvider(ctx, provider, false) + for _, d := range diags { + desc := d.Description() + if desc.Address != providerAddr.String() { + t.Fatalf("missing provider address from diagnostics: %#v", desc) + } + } + +} diff --git a/pkg/tofu/node_resource_abstract.go b/pkg/tofu/node_resource_abstract.go new file mode 100644 index 00000000000..635501a86e7 --- /dev/null +++ b/pkg/tofu/node_resource_abstract.go @@ -0,0 +1,604 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "log" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/dag" + "github.com/kubegems/opentofu/pkg/lang" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// ConcreteResourceNodeFunc is a callback type used to convert an +// abstract resource to a concrete one of some type. +type ConcreteResourceNodeFunc func(*NodeAbstractResource) dag.Vertex + +// GraphNodeConfigResource is implemented by any nodes that represent a resource. +// The type of operation cannot be assumed, only that this node represents +// the given resource. +type GraphNodeConfigResource interface { + ResourceAddr() addrs.ConfigResource +} + +// ConcreteResourceInstanceNodeFunc is a callback type used to convert an +// abstract resource instance to a concrete one of some type. +type ConcreteResourceInstanceNodeFunc func(*NodeAbstractResourceInstance) dag.Vertex + +// GraphNodeResourceInstance is implemented by any nodes that represent +// a resource instance. A single resource may have multiple instances if, +// for example, the "count" or "for_each" argument is used for it in +// configuration. +type GraphNodeResourceInstance interface { + ResourceInstanceAddr() addrs.AbsResourceInstance + + // StateDependencies returns any inter-resource dependencies that are + // stored in the state. + StateDependencies() []addrs.ConfigResource +} + +// NodeAbstractResource represents a resource that has no associated +// operations. It registers all the interfaces for a resource that common +// across multiple operation types. +type NodeAbstractResource struct { + Addr addrs.ConfigResource + + // The fields below will be automatically set using the Attach + // interfaces if you're running those transforms, but also be explicitly + // set if you already have that information. + + Schema *configschema.Block // Schema for processing the configuration body + SchemaVersion uint64 // Schema version of "Schema", as decided by the provider + Config *configs.Resource // Config is the resource in the config + + // ProviderMetas is the provider_meta configs for the module this resource belongs to + ProviderMetas map[addrs.Provider]*configs.ProviderMeta + + ProvisionerSchemas map[string]*configschema.Block + + // Set from GraphNodeTargetable + Targets []addrs.Targetable + + // Set from AttachDataResourceDependsOn + dependsOn []addrs.ConfigResource + forceDependsOn bool + + // The address of the provider this resource will use + ResolvedProvider addrs.AbsProviderConfig + // storedProviderConfig is the provider address retrieved from the + // state. This is defined here for access within the ProvidedBy method, but + // will be set from the embedding instance type when the state is attached. + storedProviderConfig addrs.AbsProviderConfig + + // This resource may expand into instances which need to be imported. + importTargets []*ImportTarget + + // generateConfigPath tells this node which file to write generated config + // into. If empty, then config should not be generated. + generateConfigPath string +} + +var ( + _ GraphNodeReferenceable = (*NodeAbstractResource)(nil) + _ GraphNodeReferencer = (*NodeAbstractResource)(nil) + _ GraphNodeProviderConsumer = (*NodeAbstractResource)(nil) + _ GraphNodeProvisionerConsumer = (*NodeAbstractResource)(nil) + _ GraphNodeConfigResource = (*NodeAbstractResource)(nil) + _ GraphNodeAttachResourceConfig = (*NodeAbstractResource)(nil) + _ GraphNodeAttachResourceSchema = (*NodeAbstractResource)(nil) + _ GraphNodeAttachProvisionerSchema = (*NodeAbstractResource)(nil) + _ GraphNodeAttachProviderMetaConfigs = (*NodeAbstractResource)(nil) + _ GraphNodeTargetable = (*NodeAbstractResource)(nil) + _ graphNodeAttachDataResourceDependsOn = (*NodeAbstractResource)(nil) + _ dag.GraphNodeDotter = (*NodeAbstractResource)(nil) +) + +// NewNodeAbstractResource creates an abstract resource graph node for +// the given absolute resource address. +func NewNodeAbstractResource(addr addrs.ConfigResource) *NodeAbstractResource { + return &NodeAbstractResource{ + Addr: addr, + } +} + +var ( + _ GraphNodeModuleInstance = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeReferenceable = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeReferencer = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeRootReferencer = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeProviderConsumer = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeProvisionerConsumer = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeConfigResource = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeResourceInstance = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeAttachResourceState = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeAttachResourceConfig = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeAttachResourceSchema = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeAttachProvisionerSchema = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeAttachProviderMetaConfigs = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeTargetable = (*NodeAbstractResourceInstance)(nil) + _ dag.GraphNodeDotter = (*NodeAbstractResourceInstance)(nil) +) + +func (n *NodeAbstractResource) Name() string { + return n.ResourceAddr().String() +} + +// GraphNodeModulePath +func (n *NodeAbstractResource) ModulePath() addrs.Module { + return n.Addr.Module +} + +// GraphNodeReferenceable +func (n *NodeAbstractResource) ReferenceableAddrs() []addrs.Referenceable { + return []addrs.Referenceable{n.Addr.Resource} +} + +// GraphNodeReferencer +func (n *NodeAbstractResource) References() []*addrs.Reference { + var result []*addrs.Reference + // If we have a config then we prefer to use that. + if c := n.Config; c != nil { + result = append(result, n.DependsOn()...) + + if n.Schema == nil { + // Should never happen, but we'll log if it does so that we can + // see this easily when debugging. + log.Printf("[WARN] no schema is attached to %s, so config references cannot be detected", n.Name()) + } + + refs, _ := lang.ReferencesInExpr(addrs.ParseRef, c.Count) + result = append(result, refs...) + refs, _ = lang.ReferencesInExpr(addrs.ParseRef, c.ForEach) + result = append(result, refs...) + + for _, expr := range c.TriggersReplacement { + refs, _ = lang.ReferencesInExpr(addrs.ParseRef, expr) + result = append(result, refs...) + } + + // ReferencesInBlock() requires a schema + if n.Schema != nil { + refs, _ = lang.ReferencesInBlock(addrs.ParseRef, c.Config, n.Schema) + result = append(result, refs...) + } + + if c.Managed != nil { + if c.Managed.Connection != nil { + refs, _ = lang.ReferencesInBlock(addrs.ParseRef, c.Managed.Connection.Config, connectionBlockSupersetSchema) + result = append(result, refs...) + } + + for _, p := range c.Managed.Provisioners { + if p.When != configs.ProvisionerWhenCreate { + continue + } + if p.Connection != nil { + refs, _ = lang.ReferencesInBlock(addrs.ParseRef, p.Connection.Config, connectionBlockSupersetSchema) + result = append(result, refs...) + } + + schema := n.ProvisionerSchemas[p.Type] + if schema == nil { + log.Printf("[WARN] no schema for provisioner %q is attached to %s, so provisioner block references cannot be detected", p.Type, n.Name()) + } + refs, _ = lang.ReferencesInBlock(addrs.ParseRef, p.Config, schema) + result = append(result, refs...) + } + } + + for _, check := range c.Preconditions { + refs, _ := lang.ReferencesInExpr(addrs.ParseRef, check.Condition) + result = append(result, refs...) + refs, _ = lang.ReferencesInExpr(addrs.ParseRef, check.ErrorMessage) + result = append(result, refs...) + } + for _, check := range c.Postconditions { + refs, _ := lang.ReferencesInExpr(addrs.ParseRef, check.Condition) + result = append(result, refs...) + refs, _ = lang.ReferencesInExpr(addrs.ParseRef, check.ErrorMessage) + result = append(result, refs...) + } + } + + return result +} + +// referencesInImportAddress find all references relevant to the node in an import target address expression. +// The only references we care about here are the references that exist in the keys of hclsyntax.IndexExpr. +// For example, if the address is module.my_module1[expression1].aws_s3_bucket.bucket[expression2], then we would only +// consider references in expression1 and expression2, as the rest of the expression is the static part of the current +// resource's address +func referencesInImportAddress(expr hcl.Expression) (refs []*addrs.Reference, diags tfdiags.Diagnostics) { + switch e := expr.(type) { + case *hclsyntax.IndexExpr: + r, d := referencesInImportAddress(e.Collection) + diags = diags.Append(d) + refs = append(refs, r...) + + r, _ = lang.ReferencesInExpr(addrs.ParseRef, e.Key) + refs = append(refs, r...) + case *hclsyntax.RelativeTraversalExpr: + r, d := referencesInImportAddress(e.Source) + refs = append(refs, r...) + diags = diags.Append(d) + + // We don't care about the traversal part of the relative expression + // as it should not contain any references in the index keys + case *hclsyntax.ScopeTraversalExpr: + // Static traversals should not contain any references in the index keys + default: + // This should not happen, as it should have failed validation earlier, in config.absTraversalForImportToExpr + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid import address expression", + Detail: "Import address must be a reference to a resource's address, and only allows for indexing with dynamic keys. For example: module.my_module[expression1].aws_s3_bucket.my_buckets[expression2] for resources inside of modules, or simply aws_s3_bucket.my_bucket for a resource in the root module", + Subject: expr.Range().Ptr(), + }) + } + return +} + +func (n *NodeAbstractResource) RootReferences() []*addrs.Reference { + var root []*addrs.Reference + + for _, importTarget := range n.importTargets { + // References are only possible in import targets originating from an import block + if !importTarget.IsFromImportBlock() { + continue + } + + refs, _ := referencesInImportAddress(importTarget.Config.To) + root = append(root, refs...) + + refs, _ = lang.ReferencesInExpr(addrs.ParseRef, importTarget.Config.ForEach) + root = append(root, refs...) + + refs, _ = lang.ReferencesInExpr(addrs.ParseRef, importTarget.Config.ID) + root = append(root, refs...) + } + + return root +} + +func (n *NodeAbstractResource) DependsOn() []*addrs.Reference { + var result []*addrs.Reference + if c := n.Config; c != nil { + + for _, traversal := range c.DependsOn { + ref, diags := addrs.ParseRef(traversal) + if diags.HasErrors() { + // We ignore this here, because this isn't a suitable place to return + // errors. This situation should be caught and rejected during + // validation. + log.Printf("[ERROR] Can't parse %#v from depends_on as reference: %s", traversal, diags.Err()) + continue + } + + result = append(result, ref) + } + } + return result +} + +func (n *NodeAbstractResource) SetProvider(p addrs.AbsProviderConfig) { + n.ResolvedProvider = p +} + +// GraphNodeProviderConsumer +func (n *NodeAbstractResource) ProvidedBy() (addrs.ProviderConfig, bool) { + // Once the provider is fully resolved, we can return the known value. + if n.ResolvedProvider.Provider.Type != "" { + return n.ResolvedProvider, true + } + + // If we have a config we prefer that above all else + if n.Config != nil { + relAddr := n.Config.ProviderConfigAddr() + return addrs.LocalProviderConfig{ + LocalName: relAddr.LocalName, + Alias: relAddr.Alias, + }, false + } + + // See if we have a valid provider config from the state. + if n.storedProviderConfig.Provider.Type != "" { + // An address from the state must match exactly, since we must ensure + // we refresh/destroy a resource with the same provider configuration + // that created it. + return n.storedProviderConfig, true + } + + // We might have an import target that is providing a specific provider, + // this is okay as we know there is nothing else potentially providing a + // provider configuration. + if len(n.importTargets) > 0 { + // The import targets should either all be defined via config or none + // of them should be. They should also all have the same provider, so it + // shouldn't matter which we check here, as they'll all give the same. + if n.importTargets[0].Config != nil && n.importTargets[0].Config.ProviderConfigRef != nil { + return addrs.LocalProviderConfig{ + LocalName: n.importTargets[0].Config.ProviderConfigRef.Name, + Alias: n.importTargets[0].Config.ProviderConfigRef.Alias, + }, false + } + } + + // No provider configuration found; return a default address + return addrs.AbsProviderConfig{ + Provider: n.Provider(), + Module: n.ModulePath(), + }, false +} + +// GraphNodeProviderConsumer +func (n *NodeAbstractResource) Provider() addrs.Provider { + if n.Config != nil { + return n.Config.Provider + } + if n.storedProviderConfig.Provider.Type != "" { + return n.storedProviderConfig.Provider + } + + if len(n.importTargets) > 0 { + // The import targets should either all be defined via config or none + // of them should be. They should also all have the same provider, so it + // shouldn't matter which we check here, as they'll all give the same. + if n.importTargets[0].Config != nil { + return n.importTargets[0].Config.Provider + } + } + + return addrs.ImpliedProviderForUnqualifiedType(n.Addr.Resource.ImpliedProvider()) +} + +// GraphNodeProvisionerConsumer +func (n *NodeAbstractResource) ProvisionedBy() []string { + // If we have no configuration, then we have no provisioners + if n.Config == nil || n.Config.Managed == nil { + return nil + } + + // Build the list of provisioners we need based on the configuration. + // It is okay to have duplicates here. + result := make([]string, len(n.Config.Managed.Provisioners)) + for i, p := range n.Config.Managed.Provisioners { + result[i] = p.Type + } + + return result +} + +// GraphNodeProvisionerConsumer +func (n *NodeAbstractResource) AttachProvisionerSchema(name string, schema *configschema.Block) { + if n.ProvisionerSchemas == nil { + n.ProvisionerSchemas = make(map[string]*configschema.Block) + } + n.ProvisionerSchemas[name] = schema +} + +// GraphNodeResource +func (n *NodeAbstractResource) ResourceAddr() addrs.ConfigResource { + return n.Addr +} + +// GraphNodeTargetable +func (n *NodeAbstractResource) SetTargets(targets []addrs.Targetable) { + n.Targets = targets +} + +// graphNodeAttachDataResourceDependsOn +func (n *NodeAbstractResource) AttachDataResourceDependsOn(deps []addrs.ConfigResource, force bool) { + n.dependsOn = deps + n.forceDependsOn = force +} + +// GraphNodeAttachResourceConfig +func (n *NodeAbstractResource) AttachResourceConfig(c *configs.Resource) { + n.Config = c +} + +// GraphNodeAttachResourceSchema impl +func (n *NodeAbstractResource) AttachResourceSchema(schema *configschema.Block, version uint64) { + n.Schema = schema + n.SchemaVersion = version +} + +// GraphNodeAttachProviderMetaConfigs impl +func (n *NodeAbstractResource) AttachProviderMetaConfigs(c map[addrs.Provider]*configs.ProviderMeta) { + n.ProviderMetas = c +} + +// GraphNodeDotter impl. +func (n *NodeAbstractResource) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { + return &dag.DotNode{ + Name: name, + Attrs: map[string]string{ + "label": n.Name(), + "shape": "box", + }, + } +} + +// writeResourceState ensures that a suitable resource-level state record is +// present in the state, if that's required for the "each mode" of that +// resource. +// +// This is important primarily for the situation where count = 0, since this +// eval is the only change we get to set the resource "each mode" to list +// in that case, allowing expression evaluation to see it as a zero-element list +// rather than as not set at all. +func (n *NodeAbstractResource) writeResourceState(ctx EvalContext, addr addrs.AbsResource) (diags tfdiags.Diagnostics) { + state := ctx.State() + + // We'll record our expansion decision in the shared "expander" object + // so that later operations (i.e. DynamicExpand and expression evaluation) + // can refer to it. Since this node represents the abstract module, we need + // to expand the module here to create all resources. + expander := ctx.InstanceExpander() + + switch { + case n.Config != nil && n.Config.Count != nil: + count, countDiags := evaluateCountExpression(n.Config.Count, ctx) + diags = diags.Append(countDiags) + if countDiags.HasErrors() { + return diags + } + + state.SetResourceProvider(addr, n.ResolvedProvider) + expander.SetResourceCount(addr.Module, n.Addr.Resource, count) + + case n.Config != nil && n.Config.ForEach != nil: + forEach, forEachDiags := evaluateForEachExpression(n.Config.ForEach, ctx) + diags = diags.Append(forEachDiags) + if forEachDiags.HasErrors() { + return diags + } + + // This method takes care of all of the business logic of updating this + // while ensuring that any existing instances are preserved, etc. + state.SetResourceProvider(addr, n.ResolvedProvider) + expander.SetResourceForEach(addr.Module, n.Addr.Resource, forEach) + + default: + state.SetResourceProvider(addr, n.ResolvedProvider) + expander.SetResourceSingle(addr.Module, n.Addr.Resource) + } + + return diags +} + +// readResourceInstanceState reads the current object for a specific instance in +// the state. +func (n *NodeAbstractResource) readResourceInstanceState(ctx EvalContext, addr addrs.AbsResourceInstance) (*states.ResourceInstanceObject, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + provider, providerSchema, err := getProvider(ctx, n.ResolvedProvider) + if err != nil { + diags = diags.Append(err) + return nil, diags + } + + log.Printf("[TRACE] readResourceInstanceState: reading state for %s", addr) + + src := ctx.State().ResourceInstanceObject(addr, states.CurrentGen) + if src == nil { + // Presumably we only have deposed objects, then. + log.Printf("[TRACE] readResourceInstanceState: no state present for %s", addr) + return nil, nil + } + + schema, currentVersion := (providerSchema).SchemaForResourceAddr(addr.Resource.ContainingResource()) + if schema == nil { + // Shouldn't happen since we should've failed long ago if no schema is present + return nil, diags.Append(fmt.Errorf("no schema available for %s while reading state; this is a bug in OpenTofu and should be reported", addr)) + } + src, upgradeDiags := upgradeResourceState(addr, provider, src, schema, currentVersion) + if n.Config != nil { + upgradeDiags = upgradeDiags.InConfigBody(n.Config.Config, addr.String()) + } + diags = diags.Append(upgradeDiags) + if diags.HasErrors() { + return nil, diags + } + + obj, err := src.Decode(schema.ImpliedType()) + if err != nil { + diags = diags.Append(err) + } + + return obj, diags +} + +// readResourceInstanceStateDeposed reads the deposed object for a specific +// instance in the state. +func (n *NodeAbstractResource) readResourceInstanceStateDeposed(ctx EvalContext, addr addrs.AbsResourceInstance, key states.DeposedKey) (*states.ResourceInstanceObject, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + provider, providerSchema, err := getProvider(ctx, n.ResolvedProvider) + if err != nil { + diags = diags.Append(err) + return nil, diags + } + + if key == states.NotDeposed { + return nil, diags.Append(fmt.Errorf("readResourceInstanceStateDeposed used with no instance key; this is a bug in OpenTofu and should be reported")) + } + + log.Printf("[TRACE] readResourceInstanceStateDeposed: reading state for %s deposed object %s", addr, key) + + src := ctx.State().ResourceInstanceObject(addr, key) + if src == nil { + // Presumably we only have deposed objects, then. + log.Printf("[TRACE] readResourceInstanceStateDeposed: no state present for %s deposed object %s", addr, key) + return nil, diags + } + + schema, currentVersion := (providerSchema).SchemaForResourceAddr(addr.Resource.ContainingResource()) + if schema == nil { + // Shouldn't happen since we should've failed long ago if no schema is present + return nil, diags.Append(fmt.Errorf("no schema available for %s while reading state; this is a bug in OpenTofu and should be reported", addr)) + + } + + src, upgradeDiags := upgradeResourceState(addr, provider, src, schema, currentVersion) + if n.Config != nil { + upgradeDiags = upgradeDiags.InConfigBody(n.Config.Config, addr.String()) + } + diags = diags.Append(upgradeDiags) + if diags.HasErrors() { + // Note that we don't have any channel to return warnings here. We'll + // accept that for now since warnings during a schema upgrade would + // be pretty weird anyway, since this operation is supposed to seem + // invisible to the user. + return nil, diags + } + + obj, err := src.Decode(schema.ImpliedType()) + if err != nil { + diags = diags.Append(err) + } + + return obj, diags +} + +// graphNodesAreResourceInstancesInDifferentInstancesOfSameModule is an +// annoyingly-task-specific helper function that returns true if and only if +// the following conditions hold: +// - Both of the given vertices represent specific resource instances, as +// opposed to unexpanded resources or any other non-resource-related object. +// - The module instance addresses for both of the resource instances belong +// to the same static module. +// - The module instance addresses for both of the resource instances are +// not equal, indicating that they belong to different instances of the +// same module. +// +// This result can be used as a way to compensate for the effects of +// conservative analysis passes in our graph builders which make their +// decisions based only on unexpanded addresses, often so that they can behave +// correctly for interactions between expanded and not-yet-expanded objects. +// +// Callers of this helper function will typically skip adding an edge between +// the two given nodes if this function returns true. +func graphNodesAreResourceInstancesInDifferentInstancesOfSameModule(a, b dag.Vertex) bool { + aRI, aOK := a.(GraphNodeResourceInstance) + bRI, bOK := b.(GraphNodeResourceInstance) + if !(aOK && bOK) { + return false + } + aModInst := aRI.ResourceInstanceAddr().Module + bModInst := bRI.ResourceInstanceAddr().Module + aMod := aModInst.Module() + bMod := bModInst.Module() + if !aMod.Equal(bMod) { + return false + } + return !aModInst.Equal(bModInst) +} diff --git a/pkg/tofu/node_resource_abstract_instance.go b/pkg/tofu/node_resource_abstract_instance.go new file mode 100644 index 00000000000..0e8918af04e --- /dev/null +++ b/pkg/tofu/node_resource_abstract_instance.go @@ -0,0 +1,2597 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/checks" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/encryption" + "github.com/kubegems/opentofu/pkg/instances" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/plans/objchange" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/provisioners" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// NodeAbstractResourceInstance represents a resource instance with no +// associated operations. It embeds NodeAbstractResource but additionally +// contains an instance key, used to identify one of potentially many +// instances that were created from a resource in configuration, e.g. using +// the "count" or "for_each" arguments. +type NodeAbstractResourceInstance struct { + NodeAbstractResource + Addr addrs.AbsResourceInstance + + // These are set via the AttachState method. + instanceState *states.ResourceInstance + + Dependencies []addrs.ConfigResource + + preDestroyRefresh bool + + // During import we may generate configuration for a resource, which needs + // to be stored in the final change. + generatedConfigHCL string +} + +// NewNodeAbstractResourceInstance creates an abstract resource instance graph +// node for the given absolute resource instance address. +func NewNodeAbstractResourceInstance(addr addrs.AbsResourceInstance) *NodeAbstractResourceInstance { + // Due to the fact that we embed NodeAbstractResource, the given address + // actually ends up split between the resource address in the embedded + // object and the InstanceKey field in our own struct. The + // ResourceInstanceAddr method will stick these back together again on + // request. + r := NewNodeAbstractResource(addr.ContainingResource().Config()) + return &NodeAbstractResourceInstance{ + NodeAbstractResource: *r, + Addr: addr, + } +} + +func (n *NodeAbstractResourceInstance) Name() string { + return n.ResourceInstanceAddr().String() +} + +func (n *NodeAbstractResourceInstance) Path() addrs.ModuleInstance { + return n.Addr.Module +} + +// GraphNodeReferenceable +func (n *NodeAbstractResourceInstance) ReferenceableAddrs() []addrs.Referenceable { + addr := n.ResourceInstanceAddr() + return []addrs.Referenceable{ + addr.Resource, + + // A resource instance can also be referenced by the address of its + // containing resource, so that e.g. a reference to aws_instance.foo + // would match both aws_instance.foo[0] and aws_instance.foo[1]. + addr.ContainingResource().Resource, + } +} + +// GraphNodeReferencer +func (n *NodeAbstractResourceInstance) References() []*addrs.Reference { + // If we have a configuration attached then we'll delegate to our + // embedded abstract resource, which knows how to extract dependencies + // from configuration. If there is no config, then the dependencies will + // be connected during destroy from those stored in the state. + if n.Config != nil { + if n.Schema == nil { + // We'll produce a log message about this out here so that + // we can include the full instance address, since the equivalent + // message in NodeAbstractResource.References cannot see it. + log.Printf("[WARN] no schema is attached to %s, so config references cannot be detected", n.Name()) + return nil + } + return n.NodeAbstractResource.References() + } + + // If we have neither config nor state then we have no references. + return nil +} + +// StateDependencies returns the dependencies which will be saved in the state +// for managed resources, or the most current dependencies for data resources. +func (n *NodeAbstractResourceInstance) StateDependencies() []addrs.ConfigResource { + // Managed resources prefer the stored dependencies, to avoid possible + // conflicts in ordering when refactoring configuration. + if s := n.instanceState; s != nil { + if s.Current != nil { + return s.Current.Dependencies + } + } + + // If there are no stored dependencies, this is either a newly created + // managed resource, or a data source, and we can use the most recently + // calculated dependencies. + return n.Dependencies +} + +// GraphNodeResourceInstance +func (n *NodeAbstractResourceInstance) ResourceInstanceAddr() addrs.AbsResourceInstance { + return n.Addr +} + +// GraphNodeAttachResourceState +func (n *NodeAbstractResourceInstance) AttachResourceState(s *states.Resource) { + if s == nil { + log.Printf("[WARN] attaching nil state to %s", n.Addr) + return + } + log.Printf("[TRACE] NodeAbstractResourceInstance.AttachResourceState for %s", n.Addr) + n.instanceState = s.Instance(n.Addr.Resource.Key) + n.storedProviderConfig = s.ProviderConfig +} + +// readDiff returns the planned change for a particular resource instance +// object. +func (n *NodeAbstractResourceInstance) readDiff(ctx EvalContext, providerSchema providers.ProviderSchema) (*plans.ResourceInstanceChange, error) { + changes := ctx.Changes() + addr := n.ResourceInstanceAddr() + + schema, _ := providerSchema.SchemaForResourceAddr(addr.Resource.Resource) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + return nil, fmt.Errorf("provider does not support resource type %q", addr.Resource.Resource.Type) + } + + gen := states.CurrentGen + csrc := changes.GetResourceInstanceChange(addr, gen) + if csrc == nil { + log.Printf("[TRACE] readDiff: No planned change recorded for %s", n.Addr) + return nil, nil + } + + change, err := csrc.Decode(schema.ImpliedType()) + if err != nil { + return nil, fmt.Errorf("failed to decode planned changes for %s: %w", n.Addr, err) + } + + log.Printf("[TRACE] readDiff: Read %s change from plan for %s", change.Action, n.Addr) + + return change, nil +} + +func (n *NodeAbstractResourceInstance) checkPreventDestroy(change *plans.ResourceInstanceChange) error { + if change == nil || n.Config == nil || n.Config.Managed == nil { + return nil + } + + preventDestroy := n.Config.Managed.PreventDestroy + + if (change.Action == plans.Delete || change.Action.IsReplace()) && preventDestroy { + var diags tfdiags.Diagnostics + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Instance cannot be destroyed", + Detail: fmt.Sprintf( + "Resource %s has lifecycle.prevent_destroy set, but the plan calls for this resource to be destroyed. To avoid this error and continue with the plan, either disable lifecycle.prevent_destroy or reduce the scope of the plan using the -target flag.", + n.Addr.String(), + ), + Subject: &n.Config.DeclRange, + }) + return diags.Err() + } + + return nil +} + +// preApplyHook calls the pre-Apply hook +func (n *NodeAbstractResourceInstance) preApplyHook(ctx EvalContext, change *plans.ResourceInstanceChange) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + if change == nil { + panic(fmt.Sprintf("preApplyHook for %s called with nil Change", n.Addr)) + } + + // Only managed resources have user-visible apply actions. + if n.Addr.Resource.Resource.Mode == addrs.ManagedResourceMode { + priorState := change.Before + plannedNewState := change.After + + diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreApply(n.Addr, change.DeposedKey.Generation(), change.Action, priorState, plannedNewState) + })) + if diags.HasErrors() { + return diags + } + } + + return nil +} + +// postApplyHook calls the post-Apply hook +func (n *NodeAbstractResourceInstance) postApplyHook(ctx EvalContext, state *states.ResourceInstanceObject, err error) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + // Only managed resources have user-visible apply actions. + if n.Addr.Resource.Resource.Mode == addrs.ManagedResourceMode { + var newState cty.Value + if state != nil { + newState = state.Value + } else { + newState = cty.NullVal(cty.DynamicPseudoType) + } + diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostApply(n.Addr, nil, newState, err) + })) + } + + return diags +} + +type phaseState int + +const ( + workingState phaseState = iota + refreshState + prevRunState +) + +//go:generate go run golang.org/x/tools/cmd/stringer -type phaseState + +// writeResourceInstanceState saves the given object as the current object for +// the selected resource instance. +// +// dependencies is a parameter, instead of those directly attacted to the +// NodeAbstractResourceInstance, because we don't write dependencies for +// datasources. +// +// targetState determines which context state we're writing to during plan. The +// default is the global working state. +func (n *NodeAbstractResourceInstance) writeResourceInstanceState(ctx EvalContext, obj *states.ResourceInstanceObject, targetState phaseState) error { + return n.writeResourceInstanceStateImpl(ctx, states.NotDeposed, obj, targetState) +} + +func (n *NodeAbstractResourceInstance) writeResourceInstanceStateDeposed(ctx EvalContext, deposedKey states.DeposedKey, obj *states.ResourceInstanceObject, targetState phaseState) error { + if deposedKey == states.NotDeposed { + // Bail out to avoid silently doing something other than what the + // caller seems to have intended. + panic("trying to write current state object using writeResourceInstanceStateDeposed") + } + return n.writeResourceInstanceStateImpl(ctx, deposedKey, obj, targetState) +} + +// (this is the private common body of both writeResourceInstanceState and +// writeResourceInstanceStateDeposed. Don't call it directly; instead, use +// one of the two wrappers to be explicit about which of the instance's +// objects you are intending to write. +func (n *NodeAbstractResourceInstance) writeResourceInstanceStateImpl(ctx EvalContext, deposedKey states.DeposedKey, obj *states.ResourceInstanceObject, targetState phaseState) error { + absAddr := n.Addr + _, providerSchema, err := n.getProvider(ctx, n.ResolvedProvider) + if err != nil { + return err + } + logFuncName := "NodeAbstractResouceInstance.writeResourceInstanceState" + if deposedKey == states.NotDeposed { + log.Printf("[TRACE] %s to %s for %s", logFuncName, targetState, absAddr) + } else { + logFuncName = "NodeAbstractResouceInstance.writeResourceInstanceStateDeposed" + log.Printf("[TRACE] %s to %s for %s (deposed key %s)", logFuncName, targetState, absAddr, deposedKey) + } + + var state *states.SyncState + switch targetState { + case workingState: + state = ctx.State() + case refreshState: + state = ctx.RefreshState() + case prevRunState: + state = ctx.PrevRunState() + default: + panic(fmt.Sprintf("unsupported phaseState value %#v", targetState)) + } + if state == nil { + // Should not happen, because we shouldn't ever try to write to + // a state that isn't applicable to the current operation. + // (We can also get in here for unit tests which are using + // EvalContextMock but not populating PrevRunStateState with + // a suitable state object.) + return fmt.Errorf("state of type %s is not applicable to the current operation; this is a bug in OpenTofu", targetState) + } + + // In spite of the name, this function also handles the non-deposed case + // via the writeResourceInstanceState wrapper, by setting deposedKey to + // the NotDeposed value (the zero value of DeposedKey). + var write func(src *states.ResourceInstanceObjectSrc) + if deposedKey == states.NotDeposed { + write = func(src *states.ResourceInstanceObjectSrc) { + state.SetResourceInstanceCurrent(absAddr, src, n.ResolvedProvider) + } + } else { + write = func(src *states.ResourceInstanceObjectSrc) { + state.SetResourceInstanceDeposed(absAddr, deposedKey, src, n.ResolvedProvider) + } + } + + if obj == nil || obj.Value.IsNull() { + // No need to encode anything: we'll just write it directly. + write(nil) + log.Printf("[TRACE] %s: removing state object for %s", logFuncName, absAddr) + return nil + } + + log.Printf("[TRACE] %s: writing state object for %s", logFuncName, absAddr) + + schema, currentVersion := providerSchema.SchemaForResourceAddr(absAddr.ContainingResource().Resource) + if schema == nil { + // It shouldn't be possible to get this far in any real scenario + // without a schema, but we might end up here in contrived tests that + // fail to set up their world properly. + return fmt.Errorf("failed to encode %s in state: no resource type schema available", absAddr) + } + + src, err := obj.Encode(schema.ImpliedType(), currentVersion) + if err != nil { + return fmt.Errorf("failed to encode %s in state: %w", absAddr, err) + } + + write(src) + return nil +} + +// planForget returns a removed from state diff. +func (n *NodeAbstractResourceInstance) planForget(ctx EvalContext, currentState *states.ResourceInstanceObject, deposedKey states.DeposedKey) *plans.ResourceInstanceChange { + var plan *plans.ResourceInstanceChange + + unmarkedPriorVal, _ := currentState.Value.UnmarkDeep() + + // The config and new value are null to signify that this is a forget + // operation. + nullVal := cty.NullVal(unmarkedPriorVal.Type()) + + plan = &plans.ResourceInstanceChange{ + Addr: n.Addr, + PrevRunAddr: n.prevRunAddr(ctx), + DeposedKey: deposedKey, + Change: plans.Change{ + Action: plans.Forget, + Before: currentState.Value, + After: nullVal, + }, + ProviderAddr: n.ResolvedProvider, + } + + return plan +} + +// planDestroy returns a plain destroy diff. +func (n *NodeAbstractResourceInstance) planDestroy(ctx EvalContext, currentState *states.ResourceInstanceObject, deposedKey states.DeposedKey) (*plans.ResourceInstanceChange, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + var plan *plans.ResourceInstanceChange + + absAddr := n.Addr + + if n.ResolvedProvider.Provider.Type == "" { + if deposedKey == "" { + panic(fmt.Sprintf("planDestroy for %s does not have ProviderAddr set", absAddr)) + } else { + panic(fmt.Sprintf("planDestroy for %s (deposed %s) does not have ProviderAddr set", absAddr, deposedKey)) + } + } + + // If there is no state or our attributes object is null then we're already + // destroyed. + if currentState == nil || currentState.Value.IsNull() { + // We still need to generate a NoOp change, because that allows + // outside consumers of the plan to distinguish between us affirming + // that we checked something and concluded no changes were needed + // vs. that something being entirely excluded e.g. due to -target. + noop := &plans.ResourceInstanceChange{ + Addr: absAddr, + PrevRunAddr: n.prevRunAddr(ctx), + DeposedKey: deposedKey, + Change: plans.Change{ + Action: plans.NoOp, + Before: cty.NullVal(cty.DynamicPseudoType), + After: cty.NullVal(cty.DynamicPseudoType), + }, + ProviderAddr: n.ResolvedProvider, + } + return noop, nil + } + + unmarkedPriorVal, _ := currentState.Value.UnmarkDeep() + + // The config and new value are null to signify that this is a destroy + // operation. + nullVal := cty.NullVal(unmarkedPriorVal.Type()) + + provider, _, err := n.getProvider(ctx, n.ResolvedProvider) + if err != nil { + return plan, diags.Append(err) + } + + metaConfigVal, metaDiags := n.providerMetas(ctx) + diags = diags.Append(metaDiags) + if diags.HasErrors() { + return plan, diags + } + + // Allow the provider to check the destroy plan, and insert any necessary + // private data. + resp := provider.PlanResourceChange(providers.PlanResourceChangeRequest{ + TypeName: n.Addr.Resource.Resource.Type, + Config: nullVal, + PriorState: unmarkedPriorVal, + ProposedNewState: nullVal, + PriorPrivate: currentState.Private, + ProviderMeta: metaConfigVal, + }) + + // We may not have a config for all destroys, but we want to reference it in + // the diagnostics if we do. + if n.Config != nil { + resp.Diagnostics = resp.Diagnostics.InConfigBody(n.Config.Config, n.Addr.String()) + } + diags = diags.Append(resp.Diagnostics) + if diags.HasErrors() { + return plan, diags + } + + // Check that the provider returned a null value here, since that is the + // only valid value for a destroy plan. + if !resp.PlannedState.IsNull() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid plan", + fmt.Sprintf( + "Provider %q planned a non-null destroy value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ResolvedProvider.Provider, n.Addr), + ), + ) + return plan, diags + } + + // Plan is always the same for a destroy. + plan = &plans.ResourceInstanceChange{ + Addr: absAddr, + PrevRunAddr: n.prevRunAddr(ctx), + DeposedKey: deposedKey, + Change: plans.Change{ + Action: plans.Delete, + Before: currentState.Value, + After: nullVal, + }, + Private: resp.PlannedPrivate, + ProviderAddr: n.ResolvedProvider, + } + + return plan, diags +} + +// writeChange saves a planned change for an instance object into the set of +// global planned changes. +func (n *NodeAbstractResourceInstance) writeChange(ctx EvalContext, change *plans.ResourceInstanceChange, deposedKey states.DeposedKey) error { + changes := ctx.Changes() + + if change == nil { + // Caller sets nil to indicate that we need to remove a change from + // the set of changes. + gen := states.CurrentGen + if deposedKey != states.NotDeposed { + gen = deposedKey + } + changes.RemoveResourceInstanceChange(n.Addr, gen) + return nil + } + + _, providerSchema, err := n.getProvider(ctx, n.ResolvedProvider) + if err != nil { + return err + } + + if change.Addr.String() != n.Addr.String() || change.DeposedKey != deposedKey { + // Should never happen, and indicates a bug in the caller. + panic("inconsistent address and/or deposed key in writeChange") + } + if change.PrevRunAddr.Resource.Resource.Type == "" { + // Should never happen, and indicates a bug in the caller. + // (The change.Encode function actually has its own fixup to just + // quietly make this match change.Addr in the incorrect case, but we + // intentionally panic here in order to catch incorrect callers where + // the stack trace will hopefully be actually useful. The tolerance + // at the next layer down is mainly to accommodate sloppy input in + // older tests.) + panic("unpopulated ResourceInstanceChange.PrevRunAddr in writeChange") + } + + ri := n.Addr.Resource + schema, _ := providerSchema.SchemaForResourceAddr(ri.Resource) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + return fmt.Errorf("provider does not support resource type %q", ri.Resource.Type) + } + + csrc, err := change.Encode(schema.ImpliedType()) + if err != nil { + return fmt.Errorf("failed to encode planned changes for %s: %w", n.Addr, err) + } + + changes.AppendResourceInstanceChange(csrc) + if deposedKey == states.NotDeposed { + log.Printf("[TRACE] writeChange: recorded %s change for %s", change.Action, n.Addr) + } else { + log.Printf("[TRACE] writeChange: recorded %s change for %s deposed object %s", change.Action, n.Addr, deposedKey) + } + + return nil +} + +// refresh does a refresh for a resource +func (n *NodeAbstractResourceInstance) refresh(ctx EvalContext, deposedKey states.DeposedKey, state *states.ResourceInstanceObject) (*states.ResourceInstanceObject, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + absAddr := n.Addr + if deposedKey == states.NotDeposed { + log.Printf("[TRACE] NodeAbstractResourceInstance.refresh for %s", absAddr) + } else { + log.Printf("[TRACE] NodeAbstractResourceInstance.refresh for %s (deposed object %s)", absAddr, deposedKey) + } + provider, providerSchema, err := n.getProvider(ctx, n.ResolvedProvider) + if err != nil { + return state, diags.Append(err) + } + // If we have no state, we don't do any refreshing + if state == nil { + log.Printf("[DEBUG] refresh: %s: no state, so not refreshing", absAddr) + return state, diags + } + + schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.Resource.ContainingResource()) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + diags = diags.Append(fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Resource.Type)) + return state, diags + } + + metaConfigVal, metaDiags := n.providerMetas(ctx) + diags = diags.Append(metaDiags) + if diags.HasErrors() { + return state, diags + } + + hookGen := states.CurrentGen + if deposedKey != states.NotDeposed { + hookGen = deposedKey + } + + // Call pre-refresh hook + diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreRefresh(absAddr, hookGen, state.Value) + })) + if diags.HasErrors() { + return state, diags + } + + // Refresh! + priorVal := state.Value + + // Unmarked before sending to provider + var priorPaths []cty.PathValueMarks + if priorVal.ContainsMarked() { + priorVal, priorPaths = priorVal.UnmarkDeepWithPaths() + } + + providerReq := providers.ReadResourceRequest{ + TypeName: n.Addr.Resource.Resource.Type, + PriorState: priorVal, + Private: state.Private, + ProviderMeta: metaConfigVal, + } + + resp := provider.ReadResource(providerReq) + if n.Config != nil { + resp.Diagnostics = resp.Diagnostics.InConfigBody(n.Config.Config, n.Addr.String()) + } + + diags = diags.Append(resp.Diagnostics) + if diags.HasErrors() { + return state, diags + } + + if resp.NewState == cty.NilVal { + // This ought not to happen in real cases since it's not possible to + // send NilVal over the plugin RPC channel, but it can come up in + // tests due to sloppy mocking. + panic("new state is cty.NilVal") + } + + for _, err := range resp.NewState.Type().TestConformance(schema.ImpliedType()) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid object", + fmt.Sprintf( + "Provider %q planned an invalid value for %s during refresh: %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ResolvedProvider.Provider.String(), absAddr, tfdiags.FormatError(err), + ), + )) + } + if diags.HasErrors() { + return state, diags + } + + newState := objchange.NormalizeObjectFromLegacySDK(resp.NewState, schema) + if !newState.RawEquals(resp.NewState) { + // We had to fix up this object in some way, and we still need to + // accept any changes for compatibility, so all we can do is log a + // warning about the change. + log.Printf("[WARN] Provider %q produced an invalid new value containing null blocks for %q during refresh\n", n.ResolvedProvider.Provider, n.Addr) + } + + ret := state.DeepCopy() + ret.Value = newState + ret.Private = resp.Private + + // We have no way to exempt provider using the legacy SDK from this check, + // so we can only log inconsistencies with the updated state values. + // In most cases these are not errors anyway, and represent "drift" from + // external changes which will be handled by the subsequent plan. + if errs := objchange.AssertObjectCompatible(schema, priorVal, ret.Value); len(errs) > 0 { + var buf strings.Builder + fmt.Fprintf(&buf, "[WARN] Provider %q produced an unexpected new value for %s during refresh.", n.ResolvedProvider.Provider.String(), absAddr) + for _, err := range errs { + fmt.Fprintf(&buf, "\n - %s", tfdiags.FormatError(err)) + } + log.Print(buf.String()) + } + + // Call post-refresh hook + diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostRefresh(absAddr, hookGen, priorVal, ret.Value) + })) + if diags.HasErrors() { + return ret, diags + } + + // Bring in the marks from the schema for the value, this will be merged with the marks from the + // previous value to preserve user-marked values, for example: someone passing a sensitive arg to a non-sensitive + // prop on a resource + marks := combinePathValueMarks(priorPaths, schema.ValueMarks(ret.Value, nil)) + + // we only want to mark the value if it has marks + if len(marks) > 0 { + ret.Value = ret.Value.MarkWithPaths(marks) + } + + return ret, diags +} + +func (n *NodeAbstractResourceInstance) plan( + ctx EvalContext, + plannedChange *plans.ResourceInstanceChange, + currentState *states.ResourceInstanceObject, + createBeforeDestroy bool, + forceReplace []addrs.AbsResourceInstance, +) (*plans.ResourceInstanceChange, *states.ResourceInstanceObject, instances.RepetitionData, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + var keyData instances.RepetitionData + + resource := n.Addr.Resource.Resource + provider, providerSchema, err := n.getProvider(ctx, n.ResolvedProvider) + if err != nil { + return nil, nil, keyData, diags.Append(err) + } + + schema, _ := providerSchema.SchemaForResourceAddr(resource) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + diags = diags.Append(fmt.Errorf("provider does not support resource type %q", resource.Type)) + return nil, nil, keyData, diags + } + + // If we're importing and generating config, generate it now. + if n.Config == nil { + // This shouldn't happen. A node that isn't generating config should + // have embedded config, and the rest of OpenTofu should enforce this. + // If, however, we didn't do things correctly the next line will panic, + // so let's not do that and return an error message with more context. + + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Resource has no configuration", + fmt.Sprintf("OpenTofu attempted to process a resource at %s that has no configuration. This is a bug in OpenTofu; please report it!", n.Addr.String()))) + return nil, nil, keyData, diags + } + + config := *n.Config + + checkRuleSeverity := tfdiags.Error + if n.preDestroyRefresh { + checkRuleSeverity = tfdiags.Warning + } + + if plannedChange != nil { + // If we already planned the action, we stick to that plan + createBeforeDestroy = plannedChange.Action == plans.CreateThenDelete + } + + // Evaluate the configuration + forEach, _ := evaluateForEachExpression(n.Config.ForEach, ctx) + + keyData = EvalDataForInstanceKey(n.ResourceInstanceAddr().Resource.Key, forEach) + + checkDiags := evalCheckRules( + addrs.ResourcePrecondition, + n.Config.Preconditions, + ctx, n.Addr, keyData, + checkRuleSeverity, + ) + diags = diags.Append(checkDiags) + if diags.HasErrors() { + return nil, nil, keyData, diags // failed preconditions prevent further evaluation + } + + // If we have a previous plan and the action was a noop, then the only + // reason we're in this method was to evaluate the preconditions. There's + // no need to re-plan this resource. + if plannedChange != nil && plannedChange.Action == plans.NoOp { + return plannedChange, currentState.DeepCopy(), keyData, diags + } + + origConfigVal, _, configDiags := ctx.EvaluateBlock(config.Config, schema, nil, keyData) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + return nil, nil, keyData, diags + } + + metaConfigVal, metaDiags := n.providerMetas(ctx) + diags = diags.Append(metaDiags) + if diags.HasErrors() { + return nil, nil, keyData, diags + } + + var priorVal cty.Value + var priorValTainted cty.Value + var priorPrivate []byte + if currentState != nil { + if currentState.Status != states.ObjectTainted { + priorVal = currentState.Value + priorPrivate = currentState.Private + } else { + // If the prior state is tainted then we'll proceed below like + // we're creating an entirely new object, but then turn it into + // a synthetic "Replace" change at the end, creating the same + // result as if the provider had marked at least one argument + // change as "requires replacement". + priorValTainted = currentState.Value + priorVal = cty.NullVal(schema.ImpliedType()) + } + } else { + priorVal = cty.NullVal(schema.ImpliedType()) + } + + log.Printf("[TRACE] Re-validating config for %q", n.Addr) + // Allow the provider to validate the final set of values. The config was + // statically validated early on, but there may have been unknown values + // which the provider could not validate at the time. + // + // TODO: It would be more correct to validate the config after + // ignore_changes has been applied, but the current implementation cannot + // exclude computed-only attributes when given the `all` option. + + // we must unmark and use the original config, since the ignore_changes + // handling below needs access to the marks. + unmarkedConfigVal, _ := origConfigVal.UnmarkDeep() + validateResp := provider.ValidateResourceConfig( + providers.ValidateResourceConfigRequest{ + TypeName: n.Addr.Resource.Resource.Type, + Config: unmarkedConfigVal, + }, + ) + diags = diags.Append(validateResp.Diagnostics.InConfigBody(config.Config, n.Addr.String())) + if diags.HasErrors() { + return nil, nil, keyData, diags + } + + // ignore_changes is meant to only apply to the configuration, so it must + // be applied before we generate a plan. This ensures the config used for + // the proposed value, the proposed value itself, and the config presented + // to the provider in the PlanResourceChange request all agree on the + // starting values. + // Here we operate on the marked values, so as to revert any changes to the + // marks as well as the value. + configValIgnored, ignoreChangeDiags := n.processIgnoreChanges(priorVal, origConfigVal, schema) + diags = diags.Append(ignoreChangeDiags) + if ignoreChangeDiags.HasErrors() { + return nil, nil, keyData, diags + } + + // Create an unmarked version of our config val and our prior val. + // Store the paths for the config val to re-mark after we've sent things + // over the wire. + unmarkedConfigVal, unmarkedPaths := configValIgnored.UnmarkDeepWithPaths() + unmarkedPriorVal, _ := priorVal.UnmarkDeepWithPaths() + + proposedNewVal := objchange.ProposedNew(schema, unmarkedPriorVal, unmarkedConfigVal) + + // Call pre-diff hook + diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreDiff(n.Addr, states.CurrentGen, priorVal, proposedNewVal) + })) + if diags.HasErrors() { + return nil, nil, keyData, diags + } + + resp := provider.PlanResourceChange(providers.PlanResourceChangeRequest{ + TypeName: n.Addr.Resource.Resource.Type, + Config: unmarkedConfigVal, + PriorState: unmarkedPriorVal, + ProposedNewState: proposedNewVal, + PriorPrivate: priorPrivate, + ProviderMeta: metaConfigVal, + }) + + diags = diags.Append(resp.Diagnostics.InConfigBody(config.Config, n.Addr.String())) + if diags.HasErrors() { + return nil, nil, keyData, diags + } + + plannedNewVal := resp.PlannedState + // Store an unmarked version of our planned new value because the `plan` now marks properties correctly with the config marks + unmarkedPlannedNewVal, _ := plannedNewVal.UnmarkDeep() + plannedPrivate := resp.PlannedPrivate + + if plannedNewVal == cty.NilVal { + // Should never happen. Since real-world providers return via RPC a nil + // is always a bug in the client-side stub. This is more likely caused + // by an incompletely-configured mock provider in tests, though. + panic(fmt.Sprintf("PlanResourceChange of %s produced nil value", n.Addr)) + } + + // We allow the planned new value to disagree with configuration _values_ + // here, since that allows the provider to do special logic like a + // DiffSuppressFunc, but we still require that the provider produces + // a value whose type conforms to the schema. + for _, err := range plannedNewVal.Type().TestConformance(schema.ImpliedType()) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid plan", + fmt.Sprintf( + "Provider %q planned an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ResolvedProvider.Provider, tfdiags.FormatErrorPrefixed(err, n.Addr.String()), + ), + )) + } + if diags.HasErrors() { + return nil, nil, keyData, diags + } + + if errs := objchange.AssertPlanValid(schema, unmarkedPriorVal, unmarkedConfigVal, unmarkedPlannedNewVal); len(errs) > 0 { + if resp.LegacyTypeSystem { + // The shimming of the old type system in the legacy SDK is not precise + // enough to pass this consistency check, so we'll give it a pass here, + // but we will generate a warning about it so that we are more likely + // to notice in the logs if an inconsistency beyond the type system + // leads to a downstream provider failure. + var buf strings.Builder + fmt.Fprintf(&buf, + "[WARN] Provider %q produced an invalid plan for %s, but we are tolerating it because it is using the legacy plugin SDK.\n The following problems may be the cause of any confusing errors from downstream operations:", + n.ResolvedProvider.Provider, n.Addr, + ) + for _, err := range errs { + fmt.Fprintf(&buf, "\n - %s", tfdiags.FormatError(err)) + } + log.Print(buf.String()) + } else { + for _, err := range errs { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid plan", + fmt.Sprintf( + "Provider %q planned an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ResolvedProvider.Provider, tfdiags.FormatErrorPrefixed(err, n.Addr.String()), + ), + )) + } + return nil, nil, keyData, diags + } + } + + if resp.LegacyTypeSystem { + // Because we allow legacy providers to depart from the contract and + // return changes to non-computed values, the plan response may have + // altered values that were already suppressed with ignore_changes. + // A prime example of this is where providers attempt to obfuscate + // config data by turning the config value into a hash and storing the + // hash value in the state. There are enough cases of this in existing + // providers that we must accommodate the behavior for now, so for + // ignore_changes to work at all on these values, we will revert the + // ignored values once more. + // A nil schema is passed to processIgnoreChanges to indicate that we + // don't want to fixup a config value according to the schema when + // ignoring "all", rather we are reverting provider imposed changes. + plannedNewVal, ignoreChangeDiags = n.processIgnoreChanges(unmarkedPriorVal, plannedNewVal, nil) + diags = diags.Append(ignoreChangeDiags) + if ignoreChangeDiags.HasErrors() { + return nil, nil, keyData, diags + } + } + + // Add the marks back to the planned new value -- this must happen after ignore changes + // have been processed + marks := combinePathValueMarks(unmarkedPaths, schema.ValueMarks(plannedNewVal, nil)) + if len(marks) > 0 { + plannedNewVal = plannedNewVal.MarkWithPaths(marks) + } + + // The test assertion error handling above could've changed the plannedNewVal + // so we should store the unmarked version before we go ahead and re-mark it again + unmarkedPlannedNewVal, _ = plannedNewVal.UnmarkDeep() + + // The provider produces a list of paths to attributes whose changes mean + // that we must replace rather than update an existing remote object. + // However, we only need to do that if the identified attributes _have_ + // actually changed -- particularly after we may have undone some of the + // changes in processIgnoreChanges -- so now we'll filter that list to + // include only where changes are detected. + reqRep := cty.NewPathSet() + if len(resp.RequiresReplace) > 0 { + for _, path := range resp.RequiresReplace { + if priorVal.IsNull() { + // If prior is null then we don't expect any RequiresReplace at all, + // because this is a Create action. + continue + } + + priorChangedVal, priorPathDiags := hcl.ApplyPath(unmarkedPriorVal, path, nil) + plannedChangedVal, plannedPathDiags := hcl.ApplyPath(plannedNewVal, path, nil) + if plannedPathDiags.HasErrors() && priorPathDiags.HasErrors() { + // This means the path was invalid in both the prior and new + // values, which is an error with the provider itself. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid plan", + fmt.Sprintf( + "Provider %q has indicated \"requires replacement\" on %s for a non-existent attribute path %#v.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ResolvedProvider.Provider, n.Addr, path, + ), + )) + continue + } + + // Make sure we have valid Values for both values. + // Note: if the opposing value was of the type + // cty.DynamicPseudoType, the type assigned here may not exactly + // match the schema. This is fine here, since we're only going to + // check for equality, but if the NullVal is to be used, we need to + // check the schema for th true type. + switch { + case priorChangedVal == cty.NilVal && plannedChangedVal == cty.NilVal: + // this should never happen without ApplyPath errors above + panic("requires replace path returned 2 nil values") + case priorChangedVal == cty.NilVal: + priorChangedVal = cty.NullVal(plannedChangedVal.Type()) + case plannedChangedVal == cty.NilVal: + plannedChangedVal = cty.NullVal(priorChangedVal.Type()) + } + + // Unmark for this value for the equality test. If only sensitivity has changed, + // this does not require an Update or Replace + unmarkedPlannedChangedVal, _ := plannedChangedVal.UnmarkDeep() + eqV := unmarkedPlannedChangedVal.Equals(priorChangedVal) + if !eqV.IsKnown() || eqV.False() { + reqRep.Add(path) + } + } + if diags.HasErrors() { + return nil, nil, keyData, diags + } + } + + // The user might also ask us to force replacing a particular resource + // instance, regardless of whether the provider thinks it needs replacing. + // For example, users typically do this if they learn a particular object + // has become degraded in an immutable infrastructure scenario and so + // replacing it with a new object is a viable repair path. + matchedForceReplace := false + for _, candidateAddr := range forceReplace { + if candidateAddr.Equal(n.Addr) { + matchedForceReplace = true + break + } + + // For "force replace" purposes we require an exact resource instance + // address to match. If a user forgets to include the instance key + // for a multi-instance resource then it won't match here, but we + // have an earlier check in NodePlannableResource.Execute that should + // prevent us from getting here in that case. + } + + // Unmark for this test for value equality. + eqV := unmarkedPlannedNewVal.Equals(unmarkedPriorVal) + eq := eqV.IsKnown() && eqV.True() + + var action plans.Action + var actionReason plans.ResourceInstanceChangeActionReason + switch { + case priorVal.IsNull(): + action = plans.Create + case eq && !matchedForceReplace: + action = plans.NoOp + case matchedForceReplace || !reqRep.Empty(): + // If the user "forced replace" of this instance of if there are any + // "requires replace" paths left _after our filtering above_ then this + // is a replace action. + if createBeforeDestroy { + action = plans.CreateThenDelete + } else { + action = plans.DeleteThenCreate + } + switch { + case matchedForceReplace: + actionReason = plans.ResourceInstanceReplaceByRequest + case !reqRep.Empty(): + actionReason = plans.ResourceInstanceReplaceBecauseCannotUpdate + } + default: + action = plans.Update + // "Delete" is never chosen here, because deletion plans are always + // created more directly elsewhere, such as in "orphan" handling. + } + + if action.IsReplace() { + // In this strange situation we want to produce a change object that + // shows our real prior object but has a _new_ object that is built + // from a null prior object, since we're going to delete the one + // that has all the computed values on it. + // + // Therefore we'll ask the provider to plan again here, giving it + // a null object for the prior, and then we'll meld that with the + // _actual_ prior state to produce a correctly-shaped replace change. + // The resulting change should show any computed attributes changing + // from known prior values to unknown values, unless the provider is + // able to predict new values for any of these computed attributes. + nullPriorVal := cty.NullVal(schema.ImpliedType()) + + // Since there is no prior state to compare after replacement, we need + // a new unmarked config from our original with no ignored values. + unmarkedConfigVal := origConfigVal + if origConfigVal.ContainsMarked() { + unmarkedConfigVal, _ = origConfigVal.UnmarkDeep() + } + + // create a new proposed value from the null state and the config + proposedNewVal = objchange.ProposedNew(schema, nullPriorVal, unmarkedConfigVal) + + resp = provider.PlanResourceChange(providers.PlanResourceChangeRequest{ + TypeName: n.Addr.Resource.Resource.Type, + Config: unmarkedConfigVal, + PriorState: nullPriorVal, + ProposedNewState: proposedNewVal, + PriorPrivate: plannedPrivate, + ProviderMeta: metaConfigVal, + }) + // We need to tread carefully here, since if there are any warnings + // in here they probably also came out of our previous call to + // PlanResourceChange above, and so we don't want to repeat them. + // Consequently, we break from the usual pattern here and only + // append these new diagnostics if there's at least one error inside. + if resp.Diagnostics.HasErrors() { + diags = diags.Append(resp.Diagnostics.InConfigBody(config.Config, n.Addr.String())) + return nil, nil, keyData, diags + } + plannedNewVal = resp.PlannedState + plannedPrivate = resp.PlannedPrivate + + if len(unmarkedPaths) > 0 { + plannedNewVal = plannedNewVal.MarkWithPaths(unmarkedPaths) + } + + for _, err := range plannedNewVal.Type().TestConformance(schema.ImpliedType()) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid plan", + fmt.Sprintf( + "Provider %q planned an invalid value for %s%s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ResolvedProvider.Provider, n.Addr, tfdiags.FormatError(err), + ), + )) + } + if diags.HasErrors() { + return nil, nil, keyData, diags + } + } + + // If our prior value was tainted then we actually want this to appear + // as a replace change, even though so far we've been treating it as a + // create. + if action == plans.Create && !priorValTainted.IsNull() { + if createBeforeDestroy { + action = plans.CreateThenDelete + } else { + action = plans.DeleteThenCreate + } + priorVal = priorValTainted + actionReason = plans.ResourceInstanceReplaceBecauseTainted + } + + // compare the marks between the prior and the new value, there may have been a change of sensitivity + // in the new value that requires an update + _, plannedNewValMarks := plannedNewVal.UnmarkDeepWithPaths() + _, priorValMarks := priorVal.UnmarkDeepWithPaths() + + marksAreEqual := marksEqual(plannedNewValMarks, priorValMarks) + + // If we plan to update sensitive paths from state, + // this is an Update action instead of a NoOp. + if action == plans.NoOp && !marksAreEqual { + action = plans.Update + } + + // As a special case, if we have a previous diff (presumably from the plan + // phases, whereas we're now in the apply phase) and it was for a replace, + // we've already deleted the original object from state by the time we + // get here and so we would've ended up with a _create_ action this time, + // which we now need to paper over to get a result consistent with what + // we originally intended. + if plannedChange != nil { + prevChange := *plannedChange + if prevChange.Action.IsReplace() && action == plans.Create { + log.Printf("[TRACE] plan: %s treating Create change as %s change to match with earlier plan", n.Addr, prevChange.Action) + action = prevChange.Action + priorVal = prevChange.Before + } + } + + // Call post-refresh hook + diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostDiff(n.Addr, states.CurrentGen, action, priorVal, plannedNewVal) + })) + if diags.HasErrors() { + return nil, nil, keyData, diags + } + + // Update our return plan + plan := &plans.ResourceInstanceChange{ + Addr: n.Addr, + PrevRunAddr: n.prevRunAddr(ctx), + Private: plannedPrivate, + ProviderAddr: n.ResolvedProvider, + Change: plans.Change{ + Action: action, + Before: priorVal, + // Pass the marked planned value through in our change + // to propogate through evaluation. + // Marks will be removed when encoding. + After: plannedNewVal, + GeneratedConfig: n.generatedConfigHCL, + }, + ActionReason: actionReason, + RequiredReplace: reqRep, + } + + // Update our return state + state := &states.ResourceInstanceObject{ + // We use the special "planned" status here to note that this + // object's value is not yet complete. Objects with this status + // cannot be used during expression evaluation, so the caller + // must _also_ record the returned change in the active plan, + // which the expression evaluator will use in preference to this + // incomplete value recorded in the state. + Status: states.ObjectPlanned, + Value: plannedNewVal, + Private: plannedPrivate, + } + + return plan, state, keyData, diags +} + +func (n *NodeAbstractResource) processIgnoreChanges(prior, config cty.Value, schema *configschema.Block) (cty.Value, tfdiags.Diagnostics) { + // ignore_changes only applies when an object already exists, since we + // can't ignore changes to a thing we've not created yet. + if prior.IsNull() { + return config, nil + } + + ignoreChanges := traversalsToPaths(n.Config.Managed.IgnoreChanges) + ignoreAll := n.Config.Managed.IgnoreAllChanges + + if len(ignoreChanges) == 0 && !ignoreAll { + return config, nil + } + + if ignoreAll { + // Legacy providers need up to clean up their invalid plans and ensure + // no changes are passed though, but that also means making an invalid + // config with computed values. In that case we just don't supply a + // schema and return the prior val directly. + if schema == nil { + return prior, nil + } + + // If we are trying to ignore all attribute changes, we must filter + // computed attributes out from the prior state to avoid sending them + // to the provider as if they were included in the configuration. + ret, _ := cty.Transform(prior, func(path cty.Path, v cty.Value) (cty.Value, error) { + attr := schema.AttributeByPath(path) + if attr != nil && attr.Computed && !attr.Optional { + return cty.NullVal(v.Type()), nil + } + + return v, nil + }) + + return ret, nil + } + + if prior.IsNull() || config.IsNull() { + // Ignore changes doesn't apply when we're creating for the first time. + // Proposed should never be null here, but if it is then we'll just let it be. + return config, nil + } + + ret, diags := processIgnoreChangesIndividual(prior, config, ignoreChanges) + + return ret, diags +} + +// Convert the hcl.Traversal values we get form the configuration to the +// cty.Path values we need to operate on the cty.Values +func traversalsToPaths(traversals []hcl.Traversal) []cty.Path { + paths := make([]cty.Path, len(traversals)) + for i, traversal := range traversals { + path := traversalToPath(traversal) + paths[i] = path + } + return paths +} + +func traversalToPath(traversal hcl.Traversal) cty.Path { + path := make(cty.Path, len(traversal)) + for si, step := range traversal { + switch ts := step.(type) { + case hcl.TraverseRoot: + path[si] = cty.GetAttrStep{ + Name: ts.Name, + } + case hcl.TraverseAttr: + path[si] = cty.GetAttrStep{ + Name: ts.Name, + } + case hcl.TraverseIndex: + path[si] = cty.IndexStep{ + Key: ts.Key, + } + default: + panic(fmt.Sprintf("unsupported traversal step %#v", step)) + } + } + return path +} + +func processIgnoreChangesIndividual(prior, config cty.Value, ignoreChangesPath []cty.Path) (cty.Value, tfdiags.Diagnostics) { + type ignoreChange struct { + // Path is the full path, minus any trailing map index + path cty.Path + // Value is the value we are to retain at the above path. If there is a + // key value, this must be a map and the desired value will be at the + // key index. + value cty.Value + // Key is the index key if the ignored path ends in a map index. + key cty.Value + } + var ignoredValues []ignoreChange + + // Find the actual changes first and store them in the ignoreChange struct. + // If the change was to a map value, and the key doesn't exist in the + // config, it would never be visited in the transform walk. + for _, icPath := range ignoreChangesPath { + key := cty.NullVal(cty.String) + // check for a map index, since maps are the only structure where we + // could have invalid path steps. + last, ok := icPath[len(icPath)-1].(cty.IndexStep) + if ok { + if last.Key.Type() == cty.String { + icPath = icPath[:len(icPath)-1] + key = last.Key + } + } + + // The structure should have been validated already, and we already + // trimmed the trailing map index. Any other intermediate index error + // means we wouldn't be able to apply the value below, so no need to + // record this. + p, err := icPath.Apply(prior) + if err != nil { + continue + } + c, err := icPath.Apply(config) + if err != nil { + continue + } + + // If this is a map, it is checking the entire map value for equality + // rather than the individual key. This means that the change is stored + // here even if our ignored key doesn't change. That is OK since it + // won't cause any changes in the transformation, but allows us to skip + // breaking up the maps and checking for key existence here too. + if !p.RawEquals(c) { + // there a change to ignore at this path, store the prior value + ignoredValues = append(ignoredValues, ignoreChange{icPath, p, key}) + } + } + + if len(ignoredValues) == 0 { + return config, nil + } + + ret, _ := cty.Transform(config, func(path cty.Path, v cty.Value) (cty.Value, error) { + // Easy path for when we are only matching the entire value. The only + // values we break up for inspection are maps. + if !v.Type().IsMapType() { + for _, ignored := range ignoredValues { + if path.Equals(ignored.path) { + return ignored.value, nil + } + } + return v, nil + } + // We now know this must be a map, so we need to accumulate the values + // key-by-key. + + if !v.IsNull() && !v.IsKnown() { + // since v is not known, we cannot ignore individual keys + return v, nil + } + + // The map values will remain as cty values, so we only need to store + // the marks from the outer map itself + v, vMarks := v.Unmark() + + // The configMap is the current configuration value, which we will + // mutate based on the ignored paths and the prior map value. + var configMap map[string]cty.Value + switch { + case v.IsNull() || v.LengthInt() == 0: + configMap = map[string]cty.Value{} + default: + configMap = v.AsValueMap() + } + + for _, ignored := range ignoredValues { + if !path.Equals(ignored.path) { + continue + } + + if ignored.key.IsNull() { + // The map address is confirmed to match at this point, + // so if there is no key, we want the entire map and can + // stop accumulating values. + return ignored.value, nil + } + // Now we know we are ignoring a specific index of this map, so get + // the config map and modify, add, or remove the desired key. + + // We also need to create a prior map, so we can check for + // existence while getting the value, because Value.Index will + // return null for a key with a null value and for a non-existent + // key. + var priorMap map[string]cty.Value + + // We need to drop the marks from the ignored map for handling. We + // don't need to store these, as we now know the ignored value is + // only within the map, not the map itself. + ignoredVal, _ := ignored.value.Unmark() + + switch { + case ignored.value.IsNull() || ignoredVal.LengthInt() == 0: + priorMap = map[string]cty.Value{} + default: + priorMap = ignoredVal.AsValueMap() + } + + key := ignored.key.AsString() + priorElem, keep := priorMap[key] + + switch { + case !keep: + // this didn't exist in the old map value, so we're keeping the + // "absence" of the key by removing it from the config + delete(configMap, key) + default: + configMap[key] = priorElem + } + } + + var newVal cty.Value + switch { + case len(configMap) > 0: + newVal = cty.MapVal(configMap) + case v.IsNull(): + // if the config value was null, and no values remain in the map, + // reset the value to null. + newVal = v + default: + newVal = cty.MapValEmpty(v.Type().ElementType()) + } + + if len(vMarks) > 0 { + newVal = newVal.WithMarks(vMarks) + } + + return newVal, nil + }) + return ret, nil +} + +type ProviderWithEncryption interface { + ReadDataSourceEncrypted(req providers.ReadDataSourceRequest, path addrs.AbsResourceInstance, enc encryption.Encryption) providers.ReadDataSourceResponse +} + +// readDataSource handles everything needed to call ReadDataSource on the provider. +// A previously evaluated configVal can be passed in, or a new one is generated +// from the resource configuration. +func (n *NodeAbstractResourceInstance) readDataSource(ctx EvalContext, configVal cty.Value) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + var newVal cty.Value + + config := *n.Config + + provider, providerSchema, err := n.getProvider(ctx, n.ResolvedProvider) + diags = diags.Append(err) + if diags.HasErrors() { + return newVal, diags + } + schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource().Resource) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + diags = diags.Append(fmt.Errorf("provider %q does not support data source %q", n.ResolvedProvider, n.Addr.ContainingResource().Resource.Type)) + return newVal, diags + } + + metaConfigVal, metaDiags := n.providerMetas(ctx) + diags = diags.Append(metaDiags) + if diags.HasErrors() { + return newVal, diags + } + + // Unmark before sending to provider, will re-mark before returning + var pvm []cty.PathValueMarks + configVal, pvm = configVal.UnmarkDeepWithPaths() + + log.Printf("[TRACE] readDataSource: Re-validating config for %s", n.Addr) + validateResp := provider.ValidateDataResourceConfig( + providers.ValidateDataResourceConfigRequest{ + TypeName: n.Addr.ContainingResource().Resource.Type, + Config: configVal, + }, + ) + diags = diags.Append(validateResp.Diagnostics.InConfigBody(config.Config, n.Addr.String())) + if diags.HasErrors() { + return newVal, diags + } + + // If we get down here then our configuration is complete and we're read + // to actually call the provider to read the data. + log.Printf("[TRACE] readDataSource: %s configuration is complete, so reading from provider", n.Addr) + + diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreApply(n.Addr, states.CurrentGen, plans.Read, cty.NullVal(configVal.Type()), configVal) + })) + if diags.HasErrors() { + return newVal, diags + } + + req := providers.ReadDataSourceRequest{ + TypeName: n.Addr.ContainingResource().Resource.Type, + Config: configVal, + ProviderMeta: metaConfigVal, + } + var resp providers.ReadDataSourceResponse + if tfp, ok := provider.(ProviderWithEncryption); ok { + // Special case for terraform_remote_state + resp = tfp.ReadDataSourceEncrypted(req, n.Addr, ctx.GetEncryption()) + } else { + resp = provider.ReadDataSource(req) + } + diags = diags.Append(resp.Diagnostics.InConfigBody(config.Config, n.Addr.String())) + if diags.HasErrors() { + return newVal, diags + } + newVal = resp.State + if newVal == cty.NilVal { + // This can happen with incompletely-configured mocks. We'll allow it + // and treat it as an alias for a properly-typed null value. + newVal = cty.NullVal(schema.ImpliedType()) + } + + for _, err := range newVal.Type().TestConformance(schema.ImpliedType()) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid object", + fmt.Sprintf( + "Provider %q produced an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ResolvedProvider, tfdiags.FormatErrorPrefixed(err, n.Addr.String()), + ), + )) + } + if diags.HasErrors() { + return newVal, diags + } + + if newVal.IsNull() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced null object", + fmt.Sprintf( + "Provider %q produced a null value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ResolvedProvider, n.Addr, + ), + )) + } + + if !newVal.IsNull() && !newVal.IsWhollyKnown() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid object", + fmt.Sprintf( + "Provider %q produced a value for %s that is not wholly known.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ResolvedProvider, n.Addr, + ), + )) + + // We'll still save the object, but we need to eliminate any unknown + // values first because we can't serialize them in the state file. + // Note that this may cause set elements to be coalesced if they + // differed only by having unknown values, but we don't worry about + // that here because we're saving the value only for inspection + // purposes; the error we added above will halt the graph walk. + newVal = cty.UnknownAsNull(newVal) + } + + if len(pvm) > 0 { + newVal = newVal.MarkWithPaths(pvm) + } + + diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostApply(n.Addr, states.CurrentGen, newVal, diags.Err()) + })) + + return newVal, diags +} + +func (n *NodeAbstractResourceInstance) providerMetas(ctx EvalContext) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + metaConfigVal := cty.NullVal(cty.DynamicPseudoType) + + _, providerSchema, err := n.getProvider(ctx, n.ResolvedProvider) + if err != nil { + return metaConfigVal, diags.Append(err) + } + if n.ProviderMetas != nil { + if m, ok := n.ProviderMetas[n.ResolvedProvider.Provider]; ok && m != nil { + // if the provider doesn't support this feature, throw an error + if providerSchema.ProviderMeta.Block == nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Provider %s doesn't support provider_meta", n.ResolvedProvider.Provider.String()), + Detail: fmt.Sprintf("The resource %s belongs to a provider that doesn't support provider_meta blocks", n.Addr.Resource), + Subject: &m.ProviderRange, + }) + } else { + var configDiags tfdiags.Diagnostics + metaConfigVal, _, configDiags = ctx.EvaluateBlock(m.Config, providerSchema.ProviderMeta.Block, nil, EvalDataForNoInstanceKey) + diags = diags.Append(configDiags) + } + } + } + return metaConfigVal, diags +} + +// planDataSource deals with the main part of the data resource lifecycle: +// either actually reading from the data source or generating a plan to do so. +// +// currentState is the current state for the data source, and the new state is +// returned. While data sources are read-only, we need to start with the prior +// state to determine if we have a change or not. If we needed to read a new +// value, but it still matches the previous state, then we can record a NoNop +// change. If the states don't match then we record a Read change so that the +// new value is applied to the state. +func (n *NodeAbstractResourceInstance) planDataSource(ctx EvalContext, checkRuleSeverity tfdiags.Severity, skipPlanChanges bool) (*plans.ResourceInstanceChange, *states.ResourceInstanceObject, instances.RepetitionData, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + var keyData instances.RepetitionData + var configVal cty.Value + + _, providerSchema, err := n.getProvider(ctx, n.ResolvedProvider) + if err != nil { + return nil, nil, keyData, diags.Append(err) + } + + config := *n.Config + schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource().Resource) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + diags = diags.Append(fmt.Errorf("provider %q does not support data source %q", n.ResolvedProvider, n.Addr.ContainingResource().Resource.Type)) + return nil, nil, keyData, diags + } + + objTy := schema.ImpliedType() + priorVal := cty.NullVal(objTy) + + forEach, _ := evaluateForEachExpression(config.ForEach, ctx) + keyData = EvalDataForInstanceKey(n.ResourceInstanceAddr().Resource.Key, forEach) + + checkDiags := evalCheckRules( + addrs.ResourcePrecondition, + n.Config.Preconditions, + ctx, n.Addr, keyData, + checkRuleSeverity, + ) + diags = diags.Append(checkDiags) + if diags.HasErrors() { + return nil, nil, keyData, diags // failed preconditions prevent further evaluation + } + + var configDiags tfdiags.Diagnostics + configVal, _, configDiags = ctx.EvaluateBlock(config.Config, schema, nil, keyData) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + return nil, nil, keyData, diags + } + + check, nested := n.nestedInCheckBlock() + if nested { + // Going forward from this point, the only reason we will fail is + // that the data source fails to load its data. Normally, this would + // cancel the entire plan and this error message would bubble its way + // back up to the user. + // + // But, if we are in a check block then we don't want this data block to + // cause the plan to fail. We also need to report a status on the data + // block so the check processing later on knows whether to attempt to + // process the checks. Either we'll report the data block as failed + // if/when we load the data block later, or we want to report it as a + // success overall. + // + // Therefore, we create a deferred function here that will check if the + // status for the check has been updated yet, and if not we will set it + // to be StatusPass. The rest of this function will only update the + // status if it should be StatusFail. + defer func() { + status := ctx.Checks().ObjectCheckStatus(check.Addr().Absolute(n.Addr.Module)) + if status == checks.StatusUnknown { + ctx.Checks().ReportCheckResult(check.Addr().Absolute(n.Addr.Module), addrs.CheckDataResource, 0, checks.StatusPass) + } + }() + } + + configKnown := configVal.IsWhollyKnown() + depsPending := n.dependenciesHavePendingChanges(ctx) + // If our configuration contains any unknown values, or we depend on any + // unknown values then we must defer the read to the apply phase by + // producing a "Read" change for this resource, and a placeholder value for + // it in the state. + if depsPending || !configKnown { + // We can't plan any changes if we're only refreshing, so the only + // value we can set here is whatever was in state previously. + if skipPlanChanges { + plannedNewState := &states.ResourceInstanceObject{ + Value: priorVal, + Status: states.ObjectReady, + } + + return nil, plannedNewState, keyData, diags + } + + var reason plans.ResourceInstanceChangeActionReason + switch { + case !configKnown: + log.Printf("[TRACE] planDataSource: %s configuration not fully known yet, so deferring to apply phase", n.Addr) + reason = plans.ResourceInstanceReadBecauseConfigUnknown + case depsPending: + // NOTE: depsPending can be true at the same time as configKnown + // is false; configKnown takes precedence because it's more + // specific. + log.Printf("[TRACE] planDataSource: %s configuration is fully known, at least one dependency has changes pending", n.Addr) + reason = plans.ResourceInstanceReadBecauseDependencyPending + } + + unmarkedConfigVal, configMarkPaths := configVal.UnmarkDeepWithPaths() + proposedNewVal := objchange.PlannedDataResourceObject(schema, unmarkedConfigVal) + proposedNewVal = proposedNewVal.MarkWithPaths(configMarkPaths) + + // Apply detects that the data source will need to be read by the After + // value containing unknowns from PlanDataResourceObject. + plannedChange := &plans.ResourceInstanceChange{ + Addr: n.Addr, + PrevRunAddr: n.prevRunAddr(ctx), + ProviderAddr: n.ResolvedProvider, + Change: plans.Change{ + Action: plans.Read, + Before: priorVal, + After: proposedNewVal, + }, + ActionReason: reason, + } + + plannedNewState := &states.ResourceInstanceObject{ + Value: proposedNewVal, + Status: states.ObjectPlanned, + } + + diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostDiff(n.Addr, states.CurrentGen, plans.Read, priorVal, proposedNewVal) + })) + + return plannedChange, plannedNewState, keyData, diags + } + + // We have a complete configuration with no dependencies to wait on, so we + // can read the data source into the state. + newVal, readDiags := n.readDataSource(ctx, configVal) + + // Now we've loaded the data, and diags tells us whether we were successful + // or not, we are going to create our plannedChange and our + // proposedNewState. + var plannedChange *plans.ResourceInstanceChange + var plannedNewState *states.ResourceInstanceObject + + // If we are a nested block, then we want to create a plannedChange that + // tells OpenTofu to reload the data block during the apply stage even if + // we managed to get the data now. + // Another consideration is that if we failed to load the data, we need to + // disguise that for a nested block. Nested blocks will report the overall + // check as failed but won't affect the rest of the plan operation or block + // an apply operation. + + if nested { + addr := check.Addr().Absolute(n.Addr.Module) + + // Let's fix things up for a nested data block. + // + // A nested data block doesn't error, and creates a planned change. So, + // if we encountered an error we'll tidy up newVal so it makes sense + // and handle the error. We'll also create the plannedChange if + // appropriate. + + if readDiags.HasErrors() { + // If we had errors, then we can cover that up by marking the new + // state as unknown. + unmarkedConfigVal, configMarkPaths := configVal.UnmarkDeepWithPaths() + newVal = objchange.PlannedDataResourceObject(schema, unmarkedConfigVal) + newVal = newVal.MarkWithPaths(configMarkPaths) + + // We still want to report the check as failed even if we are still + // letting it run again during the apply stage. + ctx.Checks().ReportCheckFailure(addr, addrs.CheckDataResource, 0, readDiags.Err().Error()) + } + + // Any warning or error diagnostics we'll wrap with some special checks + // diagnostics. This is so we can identify them later, and so they'll + // only report as warnings. + readDiags = tfdiags.OverrideAll(readDiags, tfdiags.Warning, func() tfdiags.DiagnosticExtraWrapper { + return &addrs.CheckRuleDiagnosticExtra{ + CheckRule: addrs.NewCheckRule(addr, addrs.CheckDataResource, 0), + } + }) + + if !skipPlanChanges { + // refreshOnly plans cannot produce planned changes, so we only do + // this if skipPlanChanges is false. + plannedChange = &plans.ResourceInstanceChange{ + Addr: n.Addr, + PrevRunAddr: n.prevRunAddr(ctx), + ProviderAddr: n.ResolvedProvider, + Change: plans.Change{ + Action: plans.Read, + Before: priorVal, + After: newVal, + }, + ActionReason: plans.ResourceInstanceReadBecauseCheckNested, + } + } + } + + diags = diags.Append(readDiags) + if !diags.HasErrors() { + // Finally, let's make our new state. + plannedNewState = &states.ResourceInstanceObject{ + Value: newVal, + Status: states.ObjectReady, + } + } + + return plannedChange, plannedNewState, keyData, diags +} + +// nestedInCheckBlock determines if this resource is nested in a Check config +// block. If so, this resource will be loaded during both plan and apply +// operations to make sure the check is always giving the latest information. +func (n *NodeAbstractResourceInstance) nestedInCheckBlock() (*configs.Check, bool) { + if n.Config.Container != nil { + check, ok := n.Config.Container.(*configs.Check) + return check, ok + } + return nil, false +} + +// dependenciesHavePendingChanges determines whether any managed resource the +// receiver depends on has a change pending in the plan, in which case we'd +// need to override the usual behavior of immediately reading from the data +// source where possible, and instead defer the read until the apply step. +func (n *NodeAbstractResourceInstance) dependenciesHavePendingChanges(ctx EvalContext) bool { + nModInst := n.Addr.Module + nMod := nModInst.Module() + + // Check and see if any depends_on dependencies have + // changes, since they won't show up as changes in the + // configuration. + changes := ctx.Changes() + + depsToUse := n.dependsOn + + if n.Addr.Resource.Resource.Mode == addrs.DataResourceMode { + if n.Config.HasCustomConditions() { + // For a data resource with custom conditions we need to look at + // the full set of resource dependencies -- both direct and + // indirect -- because an upstream update might be what's needed + // in order to make a condition pass. + depsToUse = n.Dependencies + } + } + + for _, d := range depsToUse { + if d.Resource.Mode == addrs.DataResourceMode { + // Data sources have no external side effects, so they pose a need + // to delay this read. If they do have a change planned, it must be + // because of a dependency on a managed resource, in which case + // we'll also encounter it in this list of dependencies. + continue + } + + for _, change := range changes.GetChangesForConfigResource(d) { + changeModInst := change.Addr.Module + changeMod := changeModInst.Module() + + if changeMod.Equal(nMod) && !changeModInst.Equal(nModInst) { + // Dependencies are tracked by configuration address, which + // means we may have changes from other instances of parent + // modules. The actual reference can only take effect within + // the same module instance, so skip any that aren't an exact + // match + continue + } + + if change != nil && change.Action != plans.NoOp { + return true + } + } + } + return false +} + +// apply deals with the main part of the data resource lifecycle: either +// actually reading from the data source or generating a plan to do so. +func (n *NodeAbstractResourceInstance) applyDataSource(ctx EvalContext, planned *plans.ResourceInstanceChange) (*states.ResourceInstanceObject, instances.RepetitionData, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + var keyData instances.RepetitionData + + _, providerSchema, err := n.getProvider(ctx, n.ResolvedProvider) + if err != nil { + return nil, keyData, diags.Append(err) + } + if planned != nil && planned.Action != plans.Read && planned.Action != plans.NoOp { + // If any other action gets in here then that's always a bug; this + // EvalNode only deals with reading. + diags = diags.Append(fmt.Errorf( + "invalid action %s for %s: only Read is supported (this is a bug in OpenTofu; please report it!)", + planned.Action, n.Addr, + )) + return nil, keyData, diags + } + + config := *n.Config + schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource().Resource) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + diags = diags.Append(fmt.Errorf("provider %q does not support data source %q", n.ResolvedProvider, n.Addr.ContainingResource().Resource.Type)) + return nil, keyData, diags + } + + forEach, _ := evaluateForEachExpression(config.ForEach, ctx) + keyData = EvalDataForInstanceKey(n.Addr.Resource.Key, forEach) + + checkDiags := evalCheckRules( + addrs.ResourcePrecondition, + n.Config.Preconditions, + ctx, n.Addr, keyData, + tfdiags.Error, + ) + diags = diags.Append(checkDiags) + if diags.HasErrors() { + diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostApply(n.Addr, states.CurrentGen, planned.Before, diags.Err()) + })) + return nil, keyData, diags // failed preconditions prevent further evaluation + } + + if planned.Action == plans.NoOp { + // If we didn't actually plan to read this then we have nothing more + // to do; we're evaluating this only for incidentals like the + // precondition/postcondition checks. + return nil, keyData, diags + } + + configVal, _, configDiags := ctx.EvaluateBlock(config.Config, schema, nil, keyData) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + return nil, keyData, diags + } + + newVal, readDiags := n.readDataSource(ctx, configVal) + if check, nested := n.nestedInCheckBlock(); nested { + addr := check.Addr().Absolute(n.Addr.Module) + + // We're just going to jump in here and hide away any errors for nested + // data blocks. + if readDiags.HasErrors() { + ctx.Checks().ReportCheckFailure(addr, addrs.CheckDataResource, 0, readDiags.Err().Error()) + diags = diags.Append(tfdiags.OverrideAll(readDiags, tfdiags.Warning, func() tfdiags.DiagnosticExtraWrapper { + return &addrs.CheckRuleDiagnosticExtra{ + CheckRule: addrs.NewCheckRule(addr, addrs.CheckDataResource, 0), + } + })) + return nil, keyData, diags + } + + // Even though we know there are no errors here, we still want to + // identify these diags has having been generated from a check block. + readDiags = tfdiags.OverrideAll(readDiags, tfdiags.Warning, func() tfdiags.DiagnosticExtraWrapper { + return &addrs.CheckRuleDiagnosticExtra{ + CheckRule: addrs.NewCheckRule(addr, addrs.CheckDataResource, 0), + } + }) + + // If no errors, just remember to report this as a success and continue + // as normal. + ctx.Checks().ReportCheckResult(addr, addrs.CheckDataResource, 0, checks.StatusPass) + } + + diags = diags.Append(readDiags) + if readDiags.HasErrors() { + return nil, keyData, diags + } + + state := &states.ResourceInstanceObject{ + Value: newVal, + Status: states.ObjectReady, + } + + return state, keyData, diags +} + +// evalApplyProvisioners determines if provisioners need to be run, and if so +// executes the provisioners for a resource and returns an updated error if +// provisioning fails. +func (n *NodeAbstractResourceInstance) evalApplyProvisioners(ctx EvalContext, state *states.ResourceInstanceObject, createNew bool, when configs.ProvisionerWhen) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + if state == nil { + log.Printf("[TRACE] evalApplyProvisioners: %s has no state, so skipping provisioners", n.Addr) + return nil + } + if when == configs.ProvisionerWhenCreate && !createNew { + // If we're not creating a new resource, then don't run provisioners + log.Printf("[TRACE] evalApplyProvisioners: %s is not freshly-created, so no provisioning is required", n.Addr) + return nil + } + if state.Status == states.ObjectTainted { + // No point in provisioning an object that is already tainted, since + // it's going to get recreated on the next apply anyway. + log.Printf("[TRACE] evalApplyProvisioners: %s is tainted, so skipping provisioning", n.Addr) + return nil + } + + provs := filterProvisioners(n.Config, when) + if len(provs) == 0 { + // We have no provisioners, so don't do anything + return nil + } + + // Call pre hook + diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreProvisionInstance(n.Addr, state.Value) + })) + if diags.HasErrors() { + return diags + } + + // If there are no errors, then we append it to our output error + // if we have one, otherwise we just output it. + diags = diags.Append(n.applyProvisioners(ctx, state, when, provs)) + if diags.HasErrors() { + log.Printf("[TRACE] evalApplyProvisioners: %s provisioning failed, but we will continue anyway at the caller's request", n.Addr) + return diags + } + + // Call post hook + return diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostProvisionInstance(n.Addr, state.Value) + })) +} + +// filterProvisioners filters the provisioners on the resource to only +// the provisioners specified by the "when" option. +func filterProvisioners(config *configs.Resource, when configs.ProvisionerWhen) []*configs.Provisioner { + // Fast path the zero case + if config == nil || config.Managed == nil { + return nil + } + + if len(config.Managed.Provisioners) == 0 { + return nil + } + + result := make([]*configs.Provisioner, 0, len(config.Managed.Provisioners)) + for _, p := range config.Managed.Provisioners { + if p.When == when { + result = append(result, p) + } + } + + return result +} + +// applyProvisioners executes the provisioners for a resource. +func (n *NodeAbstractResourceInstance) applyProvisioners(ctx EvalContext, state *states.ResourceInstanceObject, when configs.ProvisionerWhen, provs []*configs.Provisioner) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + // this self is only used for destroy provisioner evaluation, and must + // refer to the last known value of the resource. + self := state.Value + + var evalScope func(EvalContext, hcl.Body, cty.Value, *configschema.Block) (cty.Value, tfdiags.Diagnostics) + switch when { + case configs.ProvisionerWhenDestroy: + evalScope = n.evalDestroyProvisionerConfig + default: + evalScope = n.evalProvisionerConfig + } + + // If there's a connection block defined directly inside the resource block + // then it'll serve as a base connection configuration for all of the + // provisioners. + var baseConn hcl.Body + if n.Config.Managed != nil && n.Config.Managed.Connection != nil { + baseConn = n.Config.Managed.Connection.Config + } + + for _, prov := range provs { + log.Printf("[TRACE] applyProvisioners: provisioning %s with %q", n.Addr, prov.Type) + + // Get the provisioner + provisioner, err := ctx.Provisioner(prov.Type) + if err != nil { + return diags.Append(err) + } + + schema, err := ctx.ProvisionerSchema(prov.Type) + if err != nil { + // This error probably won't be a great diagnostic, but in practice + // we typically catch this problem long before we get here, so + // it should be rare to return via this codepath. + diags = diags.Append(err) + return diags + } + + config, configDiags := evalScope(ctx, prov.Config, self, schema) + diags = diags.Append(configDiags) + if diags.HasErrors() { + return diags + } + + // If the provisioner block contains a connection block of its own then + // it can override the base connection configuration, if any. + var localConn hcl.Body + if prov.Connection != nil { + localConn = prov.Connection.Config + } + + var connBody hcl.Body + switch { + case baseConn != nil && localConn != nil: + // Our standard merging logic applies here, similar to what we do + // with _override.tf configuration files: arguments from the + // base connection block will be masked by any arguments of the + // same name in the local connection block. + connBody = configs.MergeBodies(baseConn, localConn) + case baseConn != nil: + connBody = baseConn + case localConn != nil: + connBody = localConn + } + + // start with an empty connInfo + connInfo := cty.NullVal(connectionBlockSupersetSchema.ImpliedType()) + + if connBody != nil { + var connInfoDiags tfdiags.Diagnostics + connInfo, connInfoDiags = evalScope(ctx, connBody, self, connectionBlockSupersetSchema) + diags = diags.Append(connInfoDiags) + if diags.HasErrors() { + return diags + } + } + + { + // Call pre hook + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreProvisionInstanceStep(n.Addr, prov.Type) + }) + if err != nil { + return diags.Append(err) + } + } + + // The output function + outputFn := func(msg string) { + ctx.Hook(func(h Hook) (HookAction, error) { + h.ProvisionOutput(n.Addr, prov.Type, msg) + return HookActionContinue, nil + }) + } + + // If our config or connection info contains any marked values, ensure + // those are stripped out before sending to the provisioner. Unlike + // resources, we have no need to capture the marked paths and reapply + // later. + unmarkedConfig, configMarks := config.UnmarkDeep() + unmarkedConnInfo, _ := connInfo.UnmarkDeep() + + // Marks on the config might result in leaking sensitive values through + // provisioner logging, so we conservatively suppress all output in + // this case. This should not apply to connection info values, which + // provisioners ought not to be logging anyway. + if len(configMarks) > 0 { + outputFn = func(msg string) { + ctx.Hook(func(h Hook) (HookAction, error) { + h.ProvisionOutput(n.Addr, prov.Type, "(output suppressed due to sensitive value in config)") + return HookActionContinue, nil + }) + } + } + + output := CallbackUIOutput{OutputFn: outputFn} + resp := provisioner.ProvisionResource(provisioners.ProvisionResourceRequest{ + Config: unmarkedConfig, + Connection: unmarkedConnInfo, + UIOutput: &output, + }) + applyDiags := resp.Diagnostics.InConfigBody(prov.Config, n.Addr.String()) + + // Call post hook + hookErr := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostProvisionInstanceStep(n.Addr, prov.Type, applyDiags.Err()) + }) + + switch prov.OnFailure { + case configs.ProvisionerOnFailureContinue: + if applyDiags.HasErrors() { + log.Printf("[WARN] Errors while provisioning %s with %q, but continuing as requested in configuration", n.Addr, prov.Type) + } else { + // Maybe there are warnings that we still want to see + diags = diags.Append(applyDiags) + } + default: + diags = diags.Append(applyDiags) + if applyDiags.HasErrors() { + log.Printf("[WARN] Errors while provisioning %s with %q, so aborting", n.Addr, prov.Type) + return diags + } + } + + // Deal with the hook + if hookErr != nil { + return diags.Append(hookErr) + } + } + + return diags +} + +func (n *NodeAbstractResourceInstance) evalProvisionerConfig(ctx EvalContext, body hcl.Body, self cty.Value, schema *configschema.Block) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + forEach, forEachDiags := evaluateForEachExpression(n.Config.ForEach, ctx) + diags = diags.Append(forEachDiags) + + keyData := EvalDataForInstanceKey(n.ResourceInstanceAddr().Resource.Key, forEach) + + config, _, configDiags := ctx.EvaluateBlock(body, schema, n.ResourceInstanceAddr().Resource, keyData) + diags = diags.Append(configDiags) + + return config, diags +} + +// during destroy a provisioner can only evaluate within the scope of the parent resource +func (n *NodeAbstractResourceInstance) evalDestroyProvisionerConfig(ctx EvalContext, body hcl.Body, self cty.Value, schema *configschema.Block) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // For a destroy-time provisioner forEach is intentionally nil here, + // which EvalDataForInstanceKey responds to by not populating EachValue + // in its result. That's okay because each.value is prohibited for + // destroy-time provisioners. + keyData := EvalDataForInstanceKey(n.ResourceInstanceAddr().Resource.Key, nil) + + evalScope := ctx.EvaluationScope(n.ResourceInstanceAddr().Resource, nil, keyData) + config, evalDiags := evalScope.EvalSelfBlock(body, self, schema, keyData) + diags = diags.Append(evalDiags) + + return config, diags +} + +// apply accepts an applyConfig, instead of using n.Config, so destroy plans can +// send a nil config. The keyData information can be empty if the config is +// nil, since it is only used to evaluate the configuration. +func (n *NodeAbstractResourceInstance) apply( + ctx EvalContext, + state *states.ResourceInstanceObject, + change *plans.ResourceInstanceChange, + applyConfig *configs.Resource, + keyData instances.RepetitionData, + createBeforeDestroy bool) (*states.ResourceInstanceObject, tfdiags.Diagnostics) { + + var diags tfdiags.Diagnostics + if state == nil { + state = &states.ResourceInstanceObject{} + } + + if change.Action == plans.NoOp { + // If this is a no-op change then we don't want to actually change + // anything, so we'll just echo back the state we were given and + // let our internal checks and updates proceed. + log.Printf("[TRACE] NodeAbstractResourceInstance.apply: skipping %s because it has no planned action", n.Addr) + return state, diags + } + + provider, providerSchema, err := n.getProvider(ctx, n.ResolvedProvider) + if err != nil { + return nil, diags.Append(err) + } + schema, _ := providerSchema.SchemaForResourceType(n.Addr.Resource.Resource.Mode, n.Addr.Resource.Resource.Type) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + diags = diags.Append(fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Resource.Type)) + return nil, diags + } + + log.Printf("[INFO] Starting apply for %s", n.Addr) + + configVal := cty.NullVal(cty.DynamicPseudoType) + if applyConfig != nil { + var configDiags tfdiags.Diagnostics + configVal, _, configDiags = ctx.EvaluateBlock(applyConfig.Config, schema, nil, keyData) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + return nil, diags + } + } + + if !configVal.IsWhollyKnown() { + // We don't have a pretty format function for a path, but since this is + // such a rare error, we can just drop the raw GoString values in here + // to make sure we have something to debug with. + var unknownPaths []string + cty.Transform(configVal, func(p cty.Path, v cty.Value) (cty.Value, error) { + if !v.IsKnown() { + unknownPaths = append(unknownPaths, fmt.Sprintf("%#v", p)) + } + return v, nil + }) + + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Configuration contains unknown value", + fmt.Sprintf("configuration for %s still contains unknown values during apply (this is a bug in OpenTofu; please report it!)\n"+ + "The following paths in the resource configuration are unknown:\n%s", + n.Addr, + strings.Join(unknownPaths, "\n"), + ), + )) + return nil, diags + } + + metaConfigVal, metaDiags := n.providerMetas(ctx) + diags = diags.Append(metaDiags) + if diags.HasErrors() { + return nil, diags + } + + log.Printf("[DEBUG] %s: applying the planned %s change", n.Addr, change.Action) + + // If our config, Before or After value contain any marked values, + // ensure those are stripped out before sending + // this to the provider + unmarkedConfigVal, _ := configVal.UnmarkDeep() + unmarkedBefore, beforePaths := change.Before.UnmarkDeepWithPaths() + unmarkedAfter, afterPaths := change.After.UnmarkDeepWithPaths() + + // If we have an Update action, our before and after values are equal, + // and only differ on their sensitivity, the newVal is the after val + // and we should not communicate with the provider. We do need to update + // the state with this new value, to ensure the sensitivity change is + // persisted. + eqV := unmarkedBefore.Equals(unmarkedAfter) + eq := eqV.IsKnown() && eqV.True() + if change.Action == plans.Update && eq && !marksEqual(beforePaths, afterPaths) { + // Copy the previous state, changing only the value + newState := &states.ResourceInstanceObject{ + CreateBeforeDestroy: state.CreateBeforeDestroy, + Dependencies: state.Dependencies, + Private: state.Private, + Status: state.Status, + Value: change.After, + } + return newState, diags + } + + resp := provider.ApplyResourceChange(providers.ApplyResourceChangeRequest{ + TypeName: n.Addr.Resource.Resource.Type, + PriorState: unmarkedBefore, + Config: unmarkedConfigVal, + PlannedState: unmarkedAfter, + PlannedPrivate: change.Private, + ProviderMeta: metaConfigVal, + }) + + applyDiags := resp.Diagnostics + if applyConfig != nil { + applyDiags = applyDiags.InConfigBody(applyConfig.Config, n.Addr.String()) + } + diags = diags.Append(applyDiags) + + // Even if there are errors in the returned diagnostics, the provider may + // have returned a _partial_ state for an object that already exists but + // failed to fully configure, and so the remaining code must always run + // to completion but must be defensive against the new value being + // incomplete. + newVal := resp.NewState + + // If we have paths to mark, mark those on this new value + if len(afterPaths) > 0 { + newVal = newVal.MarkWithPaths(afterPaths) + } + + if newVal == cty.NilVal { + // Providers are supposed to return a partial new value even when errors + // occur, but sometimes they don't and so in that case we'll patch that up + // by just using the prior state, so we'll at least keep track of the + // object for the user to retry. + newVal = change.Before + + // As a special case, we'll set the new value to null if it looks like + // we were trying to execute a delete, because the provider in this case + // probably left the newVal unset intending it to be interpreted as "null". + if change.After.IsNull() { + newVal = cty.NullVal(schema.ImpliedType()) + } + + if !diags.HasErrors() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid object", + fmt.Sprintf( + "Provider %q produced an invalid nil value after apply for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ResolvedProvider.String(), n.Addr.String(), + ), + )) + } + } + + var conformDiags tfdiags.Diagnostics + for _, err := range newVal.Type().TestConformance(schema.ImpliedType()) { + conformDiags = conformDiags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid object", + fmt.Sprintf( + "Provider %q produced an invalid value after apply for %s. The result cannot not be saved in the OpenTofu state.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ResolvedProvider.String(), tfdiags.FormatErrorPrefixed(err, n.Addr.String()), + ), + )) + } + diags = diags.Append(conformDiags) + if conformDiags.HasErrors() { + // Bail early in this particular case, because an object that doesn't + // conform to the schema can't be saved in the state anyway -- the + // serializer will reject it. + return nil, diags + } + + // After this point we have a type-conforming result object and so we + // must always run to completion to ensure it can be saved. If n.Error + // is set then we must not return a non-nil error, in order to allow + // evaluation to continue to a later point where our state object will + // be saved. + + // By this point there must not be any unknown values remaining in our + // object, because we've applied the change and we can't save unknowns + // in our persistent state. If any are present then we will indicate an + // error (which is always a bug in the provider) but we will also replace + // them with nulls so that we can successfully save the portions of the + // returned value that are known. + if !newVal.IsWhollyKnown() { + // To generate better error messages, we'll go for a walk through the + // value and make a separate diagnostic for each unknown value we + // find. + cty.Walk(newVal, func(path cty.Path, val cty.Value) (bool, error) { + if !val.IsKnown() { + pathStr := tfdiags.FormatCtyPath(path) + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider returned invalid result object after apply", + fmt.Sprintf( + "After the apply operation, the provider still indicated an unknown value for %s%s. All values must be known after apply, so this is always a bug in the provider and should be reported in the provider's own repository. OpenTofu will still save the other known object values in the state.", + n.Addr, pathStr, + ), + )) + } + return true, nil + }) + + // NOTE: This operation can potentially be lossy if there are multiple + // elements in a set that differ only by unknown values: after + // replacing with null these will be merged together into a single set + // element. Since we can only get here in the presence of a provider + // bug, we accept this because storing a result here is always a + // best-effort sort of thing. + newVal = cty.UnknownAsNull(newVal) + } + + if change.Action != plans.Delete && !diags.HasErrors() { + // Only values that were marked as unknown in the planned value are allowed + // to change during the apply operation. (We do this after the unknown-ness + // check above so that we also catch anything that became unknown after + // being known during plan.) + // + // If we are returning other errors anyway then we'll give this + // a pass since the other errors are usually the explanation for + // this one and so it's more helpful to let the user focus on the + // root cause rather than distract with this extra problem. + if errs := objchange.AssertObjectCompatible(schema, change.After, newVal); len(errs) > 0 { + if resp.LegacyTypeSystem { + // The shimming of the old type system in the legacy SDK is not precise + // enough to pass this consistency check, so we'll give it a pass here, + // but we will generate a warning about it so that we are more likely + // to notice in the logs if an inconsistency beyond the type system + // leads to a downstream provider failure. + var buf strings.Builder + fmt.Fprintf(&buf, "[WARN] Provider %q produced an unexpected new value for %s, but we are tolerating it because it is using the legacy plugin SDK.\n The following problems may be the cause of any confusing errors from downstream operations:", n.ResolvedProvider.String(), n.Addr) + for _, err := range errs { + fmt.Fprintf(&buf, "\n - %s", tfdiags.FormatError(err)) + } + log.Print(buf.String()) + + // The sort of inconsistency we won't catch here is if a known value + // in the plan is changed during apply. That can cause downstream + // problems because a dependent resource would make its own plan based + // on the planned value, and thus get a different result during the + // apply phase. This will usually lead to a "Provider produced invalid plan" + // error that incorrectly blames the downstream resource for the change. + + } else { + for _, err := range errs { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced inconsistent result after apply", + fmt.Sprintf( + "When applying changes to %s, provider %q produced an unexpected new value: %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.Addr, n.ResolvedProvider.String(), tfdiags.FormatError(err), + ), + )) + } + } + } + } + + // If a provider returns a null or non-null object at the wrong time then + // we still want to save that but it often causes some confusing behaviors + // where it seems like OpenTofu is failing to take any action at all, + // so we'll generate some errors to draw attention to it. + if !diags.HasErrors() { + if change.Action == plans.Delete && !newVal.IsNull() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider returned invalid result object after apply", + fmt.Sprintf( + "After applying a %s plan, the provider returned a non-null object for %s. Destroying should always produce a null value, so this is always a bug in the provider and should be reported in the provider's own repository. OpenTofu will still save this errant object in the state for debugging and recovery.", + change.Action, n.Addr, + ), + )) + } + if change.Action != plans.Delete && newVal.IsNull() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider returned invalid result object after apply", + fmt.Sprintf( + "After applying a %s plan, the provider returned a null object for %s. Only destroying should always produce a null value, so this is always a bug in the provider and should be reported in the provider's own repository.", + change.Action, n.Addr, + ), + )) + } + } + + switch { + case diags.HasErrors() && newVal.IsNull(): + // Sometimes providers return a null value when an operation fails for + // some reason, but we'd rather keep the prior state so that the error + // can be corrected on a subsequent run. We must only do this for null + // new value though, or else we may discard partial updates the + // provider was able to complete. Otherwise, we'll continue using the + // prior state as the new value, making this effectively a no-op. If + // the item really _has_ been deleted then our next refresh will detect + // that and fix it up. + return state.DeepCopy(), diags + + case diags.HasErrors() && !newVal.IsNull(): + // if we have an error, make sure we restore the object status in the new state + newState := &states.ResourceInstanceObject{ + Status: state.Status, + Value: newVal, + Private: resp.Private, + CreateBeforeDestroy: createBeforeDestroy, + } + + // if the resource was being deleted, the dependencies are not going to + // be recalculated and we need to restore those as well. + if change.Action == plans.Delete { + newState.Dependencies = state.Dependencies + } + + return newState, diags + + case !newVal.IsNull(): + // Non error case with a new state + newState := &states.ResourceInstanceObject{ + Status: states.ObjectReady, + Value: newVal, + Private: resp.Private, + CreateBeforeDestroy: createBeforeDestroy, + } + return newState, diags + + default: + // Non error case, were the object was deleted + return nil, diags + } +} + +func (n *NodeAbstractResourceInstance) prevRunAddr(ctx EvalContext) addrs.AbsResourceInstance { + return resourceInstancePrevRunAddr(ctx, n.Addr) +} + +func resourceInstancePrevRunAddr(ctx EvalContext, currentAddr addrs.AbsResourceInstance) addrs.AbsResourceInstance { + table := ctx.MoveResults() + return table.OldAddr(currentAddr) +} + +func (n *NodeAbstractResourceInstance) getProvider(ctx EvalContext, addr addrs.AbsProviderConfig) (providers.Interface, providers.ProviderSchema, error) { + underlyingProvider, schema, err := getProvider(ctx, addr) + if err != nil { + return nil, providers.ProviderSchema{}, err + } + + if n.Config == nil || !n.Config.IsOverridden { + return underlyingProvider, schema, nil + } + + providerForTest := newProviderForTestWithSchema(underlyingProvider, schema) + + providerForTest.setSingleResource(n.Addr.Resource.Resource, n.Config.OverrideValues) + + return providerForTest, schema, nil +} diff --git a/pkg/tofu/node_resource_abstract_instance_test.go b/pkg/tofu/node_resource_abstract_instance_test.go new file mode 100644 index 00000000000..973df76f37a --- /dev/null +++ b/pkg/tofu/node_resource_abstract_instance_test.go @@ -0,0 +1,189 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "testing" + + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/states" +) + +func TestNodeAbstractResourceInstanceProvider(t *testing.T) { + tests := []struct { + Addr addrs.AbsResourceInstance + Config *configs.Resource + StoredProviderConfig addrs.AbsProviderConfig + Want addrs.Provider + }{ + { + Addr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "baz", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + Want: addrs.Provider{ + Hostname: addrs.DefaultProviderRegistryHost, + Namespace: "hashicorp", + Type: "null", + }, + }, + { + Addr: addrs.Resource{ + Mode: addrs.DataResourceMode, + Type: "terraform_remote_state", + Name: "baz", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + Want: addrs.Provider{ + // As a special case, the type prefix "terraform_" maps to + // the builtin provider, not the default one. + Hostname: addrs.BuiltInProviderHost, + Namespace: addrs.BuiltInProviderNamespace, + Type: "terraform", + }, + }, + { + Addr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "baz", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + Config: &configs.Resource{ + // Just enough configs.Resource for the Provider method. Not + // actually valid for general use. + Provider: addrs.Provider{ + Hostname: addrs.DefaultProviderRegistryHost, + Namespace: "awesomecorp", + Type: "happycloud", + }, + }, + // The config overrides the default behavior. + Want: addrs.Provider{ + Hostname: addrs.DefaultProviderRegistryHost, + Namespace: "awesomecorp", + Type: "happycloud", + }, + }, + { + Addr: addrs.Resource{ + Mode: addrs.DataResourceMode, + Type: "terraform_remote_state", + Name: "baz", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + Config: &configs.Resource{ + // Just enough configs.Resource for the Provider method. Not + // actually valid for general use. + Provider: addrs.Provider{ + Hostname: addrs.DefaultProviderRegistryHost, + Namespace: "awesomecorp", + Type: "happycloud", + }, + }, + // The config overrides the default behavior. + Want: addrs.Provider{ + Hostname: addrs.DefaultProviderRegistryHost, + Namespace: "awesomecorp", + Type: "happycloud", + }, + }, + { + Addr: addrs.Resource{ + Mode: addrs.DataResourceMode, + Type: "null_resource", + Name: "baz", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + Config: nil, + StoredProviderConfig: addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.Provider{ + Hostname: addrs.DefaultProviderRegistryHost, + Namespace: "awesomecorp", + Type: "null", + }, + }, + // The stored provider config overrides the default behavior. + Want: addrs.Provider{ + Hostname: addrs.DefaultProviderRegistryHost, + Namespace: "awesomecorp", + Type: "null", + }, + }, + } + + for _, test := range tests { + var name string + if test.Config != nil { + name = fmt.Sprintf("%s with configured %s", test.Addr, test.Config.Provider) + } else { + name = fmt.Sprintf("%s with no configuration", test.Addr) + } + t.Run(name, func(t *testing.T) { + node := &NodeAbstractResourceInstance{ + // Just enough NodeAbstractResourceInstance for the Provider + // function. (This would not be valid for some other functions.) + Addr: test.Addr, + NodeAbstractResource: NodeAbstractResource{ + Addr: test.Addr.ConfigResource(), + Config: test.Config, + storedProviderConfig: test.StoredProviderConfig, + }, + } + got := node.Provider() + if got != test.Want { + t.Errorf("wrong result\naddr: %s\nconfig: %#v\ngot: %s\nwant: %s", test.Addr, test.Config, got, test.Want) + } + }) + } +} + +func TestNodeAbstractResourceInstance_WriteResourceInstanceState(t *testing.T) { + state := states.NewState() + ctx := new(MockEvalContext) + ctx.StateState = state.SyncWrapper() + ctx.PathPath = addrs.RootModuleInstance + + mockProvider := mockProviderWithResourceTypeSchema("aws_instance", &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Optional: true, + }, + }, + }) + + obj := &states.ResourceInstanceObject{ + Value: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-abc123"), + }), + Status: states.ObjectReady, + } + + node := &NodeAbstractResourceInstance{ + Addr: mustResourceInstanceAddr("aws_instance.foo"), + // instanceState: obj, + NodeAbstractResource: NodeAbstractResource{ + ResolvedProvider: mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + }, + } + ctx.ProviderProvider = mockProvider + ctx.ProviderSchemaSchema = mockProvider.GetProviderSchema() + + err := node.writeResourceInstanceState(ctx, obj, workingState) + if err != nil { + t.Fatalf("unexpected error: %s", err.Error()) + } + + checkStateString(t, state, ` +aws_instance.foo: + ID = i-abc123 + provider = provider["registry.opentofu.org/hashicorp/aws"] + `) +} diff --git a/pkg/tofu/node_resource_abstract_test.go b/pkg/tofu/node_resource_abstract_test.go new file mode 100644 index 00000000000..2c7e8222ef7 --- /dev/null +++ b/pkg/tofu/node_resource_abstract_test.go @@ -0,0 +1,317 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "testing" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" + "github.com/zclconf/go-cty/cty" +) + +func TestNodeAbstractResourceProvider(t *testing.T) { + tests := []struct { + Addr addrs.ConfigResource + Config *configs.Resource + Want addrs.Provider + }{ + { + Addr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "baz", + }.InModule(addrs.RootModule), + Want: addrs.Provider{ + Hostname: addrs.DefaultProviderRegistryHost, + Namespace: "hashicorp", + Type: "null", + }, + }, + { + Addr: addrs.Resource{ + Mode: addrs.DataResourceMode, + Type: "terraform_remote_state", + Name: "baz", + }.InModule(addrs.RootModule), + Want: addrs.Provider{ + // As a special case, the type prefix "terraform_" maps to + // the builtin provider, not the default one. + Hostname: addrs.BuiltInProviderHost, + Namespace: addrs.BuiltInProviderNamespace, + Type: "terraform", + }, + }, + { + Addr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "baz", + }.InModule(addrs.RootModule), + Config: &configs.Resource{ + // Just enough configs.Resource for the Provider method. Not + // actually valid for general use. + Provider: addrs.Provider{ + Hostname: addrs.DefaultProviderRegistryHost, + Namespace: "awesomecorp", + Type: "happycloud", + }, + }, + // The config overrides the default behavior. + Want: addrs.Provider{ + Hostname: addrs.DefaultProviderRegistryHost, + Namespace: "awesomecorp", + Type: "happycloud", + }, + }, + { + Addr: addrs.Resource{ + Mode: addrs.DataResourceMode, + Type: "terraform_remote_state", + Name: "baz", + }.InModule(addrs.RootModule), + Config: &configs.Resource{ + // Just enough configs.Resource for the Provider method. Not + // actually valid for general use. + Provider: addrs.Provider{ + Hostname: addrs.DefaultProviderRegistryHost, + Namespace: "awesomecorp", + Type: "happycloud", + }, + }, + // The config overrides the default behavior. + Want: addrs.Provider{ + Hostname: addrs.DefaultProviderRegistryHost, + Namespace: "awesomecorp", + Type: "happycloud", + }, + }, + } + + for _, test := range tests { + var name string + if test.Config != nil { + name = fmt.Sprintf("%s with configured %s", test.Addr, test.Config.Provider) + } else { + name = fmt.Sprintf("%s with no configuration", test.Addr) + } + t.Run(name, func(t *testing.T) { + node := &NodeAbstractResource{ + // Just enough NodeAbstractResource for the Provider function. + // (This would not be valid for some other functions.) + Addr: test.Addr, + Config: test.Config, + } + got := node.Provider() + if got != test.Want { + t.Errorf("wrong result\naddr: %s\nconfig: %#v\ngot: %s\nwant: %s", test.Addr, test.Config, got, test.Want) + } + }) + } +} + +// Make sure ProvideBy returns the final resolved provider +func TestNodeAbstractResourceSetProvider(t *testing.T) { + node := &NodeAbstractResource{ + + // Just enough NodeAbstractResource for the Provider function. + // (This would not be valid for some other functions.) + Addr: addrs.Resource{ + Mode: addrs.DataResourceMode, + Type: "terraform_remote_state", + Name: "baz", + }.InModule(addrs.RootModule), + Config: &configs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "terraform_remote_state", + Name: "baz", + // Just enough configs.Resource for the Provider method. Not + // actually valid for general use. + Provider: addrs.Provider{ + Hostname: addrs.DefaultProviderRegistryHost, + Namespace: "awesomecorp", + Type: "happycloud", + }, + }, + } + + p, exact := node.ProvidedBy() + if exact { + t.Fatalf("no exact provider should be found from this confniguration, got %q\n", p) + } + + // the implied non-exact provider should be "terraform" + lpc, ok := p.(addrs.LocalProviderConfig) + if !ok { + t.Fatalf("expected LocalProviderConfig, got %#v\n", p) + } + + if lpc.LocalName != "terraform" { + t.Fatalf("expected non-exact provider of 'terraform', got %q", lpc.LocalName) + } + + // now set a resolved provider for the resource + resolved := addrs.AbsProviderConfig{ + Provider: addrs.Provider{ + Hostname: addrs.DefaultProviderRegistryHost, + Namespace: "awesomecorp", + Type: "happycloud", + }, + Module: addrs.RootModule, + Alias: "test", + } + + node.SetProvider(resolved) + p, exact = node.ProvidedBy() + if !exact { + t.Fatalf("exact provider should be found, got %q\n", p) + } + + apc, ok := p.(addrs.AbsProviderConfig) + if !ok { + t.Fatalf("expected AbsProviderConfig, got %#v\n", p) + } + + if apc.String() != resolved.String() { + t.Fatalf("incorrect resolved config: got %#v, wanted %#v\n", apc, resolved) + } +} + +func TestNodeAbstractResource_ReadResourceInstanceState(t *testing.T) { + mockProvider := mockProviderWithResourceTypeSchema("aws_instance", &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Optional: true, + }, + }, + }) + // This test does not configure the provider, but the mock provider will + // check that this was called and report errors. + mockProvider.ConfigureProviderCalled = true + + tests := map[string]struct { + State *states.State + Node *NodeAbstractResource + ExpectedInstanceId string + }{ + "ReadState gets primary instance state": { + State: states.BuildState(func(s *states.SyncState) { + providerAddr := addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("aws"), + Module: addrs.RootModule, + } + oneAddr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "bar", + }.Absolute(addrs.RootModuleInstance) + s.SetResourceProvider(oneAddr, providerAddr) + s.SetResourceInstanceCurrent(oneAddr.Instance(addrs.NoKey), &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"i-abc123"}`), + }, providerAddr) + }), + Node: &NodeAbstractResource{ + Addr: mustConfigResourceAddr("aws_instance.bar"), + ResolvedProvider: mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + }, + ExpectedInstanceId: "i-abc123", + }, + } + + for k, test := range tests { + t.Run(k, func(t *testing.T) { + ctx := new(MockEvalContext) + ctx.StateState = test.State.SyncWrapper() + ctx.PathPath = addrs.RootModuleInstance + ctx.ProviderSchemaSchema = mockProvider.GetProviderSchema() + + ctx.ProviderProvider = providers.Interface(mockProvider) + + got, readDiags := test.Node.readResourceInstanceState(ctx, test.Node.Addr.Resource.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance)) + if readDiags.HasErrors() { + t.Fatalf("[%s] Got err: %#v", k, readDiags.Err()) + } + + expected := test.ExpectedInstanceId + + if !(got != nil && got.Value.GetAttr("id") == cty.StringVal(expected)) { + t.Fatalf("[%s] Expected output with ID %#v, got: %#v", k, expected, got) + } + }) + } +} + +func TestNodeAbstractResource_ReadResourceInstanceStateDeposed(t *testing.T) { + mockProvider := mockProviderWithResourceTypeSchema("aws_instance", &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Optional: true, + }, + }, + }) + // This test does not configure the provider, but the mock provider will + // check that this was called and report errors. + mockProvider.ConfigureProviderCalled = true + + tests := map[string]struct { + State *states.State + Node *NodeAbstractResource + ExpectedInstanceId string + }{ + "ReadStateDeposed gets deposed instance": { + State: states.BuildState(func(s *states.SyncState) { + providerAddr := addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("aws"), + Module: addrs.RootModule, + } + oneAddr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "bar", + }.Absolute(addrs.RootModuleInstance) + s.SetResourceProvider(oneAddr, providerAddr) + s.SetResourceInstanceDeposed(oneAddr.Instance(addrs.NoKey), states.DeposedKey("00000001"), &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"i-abc123"}`), + }, providerAddr) + }), + Node: &NodeAbstractResource{ + Addr: mustConfigResourceAddr("aws_instance.bar"), + ResolvedProvider: mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + }, + ExpectedInstanceId: "i-abc123", + }, + } + for k, test := range tests { + t.Run(k, func(t *testing.T) { + ctx := new(MockEvalContext) + ctx.StateState = test.State.SyncWrapper() + ctx.PathPath = addrs.RootModuleInstance + ctx.ProviderSchemaSchema = mockProvider.GetProviderSchema() + ctx.ProviderProvider = providers.Interface(mockProvider) + + key := states.DeposedKey("00000001") // shim from legacy state assigns 0th deposed index this key + + got, readDiags := test.Node.readResourceInstanceStateDeposed(ctx, test.Node.Addr.Resource.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), key) + if readDiags.HasErrors() { + t.Fatalf("[%s] Got err: %#v", k, readDiags.Err()) + } + + expected := test.ExpectedInstanceId + + if !(got != nil && got.Value.GetAttr("id") == cty.StringVal(expected)) { + t.Fatalf("[%s] Expected output with ID %#v, got: %#v", k, expected, got) + } + }) + } +} diff --git a/pkg/tofu/node_resource_apply.go b/pkg/tofu/node_resource_apply.go new file mode 100644 index 00000000000..f82eb302d66 --- /dev/null +++ b/pkg/tofu/node_resource_apply.go @@ -0,0 +1,61 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// nodeExpandApplyableResource handles the first layer of resource +// expansion during apply. Even though the resource instances themselves are +// already expanded from the plan, we still need to expand the +// NodeApplyableResource nodes into their respective modules. +type nodeExpandApplyableResource struct { + *NodeAbstractResource +} + +var ( + _ GraphNodeReferenceable = (*nodeExpandApplyableResource)(nil) + _ GraphNodeReferencer = (*nodeExpandApplyableResource)(nil) + _ GraphNodeConfigResource = (*nodeExpandApplyableResource)(nil) + _ GraphNodeAttachResourceConfig = (*nodeExpandApplyableResource)(nil) + _ graphNodeExpandsInstances = (*nodeExpandApplyableResource)(nil) + _ GraphNodeTargetable = (*nodeExpandApplyableResource)(nil) +) + +func (n *nodeExpandApplyableResource) expandsInstances() { +} + +func (n *nodeExpandApplyableResource) References() []*addrs.Reference { + refs := n.NodeAbstractResource.References() + + // The expand node needs to connect to the individual resource instances it + // references, but cannot refer to it's own instances without causing + // cycles. It would be preferable to entirely disallow self references + // without the `self` identifier, but those were allowed in provisioners + // for compatibility with legacy configuration. We also can't always just + // filter them out for all resource node types, because the only method we + // have for catching certain invalid configurations are the cycles that + // result from these inter-instance references. + return filterSelfRefs(n.Addr.Resource, refs) +} + +func (n *nodeExpandApplyableResource) Name() string { + return n.NodeAbstractResource.Name() + " (expand)" +} + +func (n *nodeExpandApplyableResource) Execute(ctx EvalContext, op walkOperation) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + expander := ctx.InstanceExpander() + moduleInstances := expander.ExpandModule(n.Addr.Module) + for _, module := range moduleInstances { + ctx = ctx.WithPath(module) + diags = diags.Append(n.writeResourceState(ctx, n.Addr.Resource.Absolute(module))) + } + + return diags +} diff --git a/pkg/tofu/node_resource_apply_instance.go b/pkg/tofu/node_resource_apply_instance.go new file mode 100644 index 00000000000..7c8cd748554 --- /dev/null +++ b/pkg/tofu/node_resource_apply_instance.go @@ -0,0 +1,492 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "log" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/instances" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/plans/objchange" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// NodeApplyableResourceInstance represents a resource instance that is +// "applyable": it is ready to be applied and is represented by a diff. +// +// This node is for a specific instance of a resource. It will usually be +// accompanied in the graph by a NodeApplyableResource representing its +// containing resource, and should depend on that node to ensure that the +// state is properly prepared to receive changes to instances. +type NodeApplyableResourceInstance struct { + *NodeAbstractResourceInstance + + graphNodeDeposer // implementation of GraphNodeDeposerConfig + + // If this node is forced to be CreateBeforeDestroy, we need to record that + // in the state to. + ForceCreateBeforeDestroy bool + + // forceReplace are resource instance addresses where the user wants to + // force generating a replace action. This set isn't pre-filtered, so + // it might contain addresses that have nothing to do with the resource + // that this node represents, which the node itself must therefore ignore. + forceReplace []addrs.AbsResourceInstance +} + +var ( + _ GraphNodeConfigResource = (*NodeApplyableResourceInstance)(nil) + _ GraphNodeResourceInstance = (*NodeApplyableResourceInstance)(nil) + _ GraphNodeCreator = (*NodeApplyableResourceInstance)(nil) + _ GraphNodeReferencer = (*NodeApplyableResourceInstance)(nil) + _ GraphNodeDeposer = (*NodeApplyableResourceInstance)(nil) + _ GraphNodeExecutable = (*NodeApplyableResourceInstance)(nil) + _ GraphNodeAttachDependencies = (*NodeApplyableResourceInstance)(nil) +) + +// CreateBeforeDestroy returns this node's CreateBeforeDestroy status. +func (n *NodeApplyableResourceInstance) CreateBeforeDestroy() bool { + if n.ForceCreateBeforeDestroy { + return n.ForceCreateBeforeDestroy + } + + if n.Config != nil && n.Config.Managed != nil { + return n.Config.Managed.CreateBeforeDestroy + } + + return false +} + +func (n *NodeApplyableResourceInstance) ModifyCreateBeforeDestroy(v bool) error { + n.ForceCreateBeforeDestroy = v + return nil +} + +// GraphNodeCreator +func (n *NodeApplyableResourceInstance) CreateAddr() *addrs.AbsResourceInstance { + addr := n.ResourceInstanceAddr() + return &addr +} + +// GraphNodeReferencer, overriding NodeAbstractResourceInstance +func (n *NodeApplyableResourceInstance) References() []*addrs.Reference { + // Start with the usual resource instance implementation + ret := n.NodeAbstractResourceInstance.References() + + // Applying a resource must also depend on the destruction of any of its + // dependencies, since this may for example affect the outcome of + // evaluating an entire list of resources with "count" set (by reducing + // the count). + // + // However, we can't do this in create_before_destroy mode because that + // would create a dependency cycle. We make a compromise here of requiring + // changes to be updated across two applies in this case, since the first + // plan will use the old values. + if !n.CreateBeforeDestroy() { + for _, ref := range ret { + switch tr := ref.Subject.(type) { + case addrs.ResourceInstance: + newRef := *ref // shallow copy so we can mutate + newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy) + newRef.Remaining = nil // can't access attributes of something being destroyed + ret = append(ret, &newRef) + case addrs.Resource: + newRef := *ref // shallow copy so we can mutate + newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy) + newRef.Remaining = nil // can't access attributes of something being destroyed + ret = append(ret, &newRef) + } + } + } + + return ret +} + +// GraphNodeAttachDependencies +func (n *NodeApplyableResourceInstance) AttachDependencies(deps []addrs.ConfigResource) { + n.Dependencies = deps +} + +// GraphNodeExecutable +func (n *NodeApplyableResourceInstance) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { + addr := n.ResourceInstanceAddr() + + if n.Config == nil { + // If there is no config, and there is no change, then we have nothing + // to do and the change was left in the plan for informational + // purposes only. + changes := ctx.Changes() + csrc := changes.GetResourceInstanceChange(n.ResourceInstanceAddr(), states.CurrentGen) + if csrc == nil || csrc.Action == plans.NoOp { + log.Printf("[DEBUG] NodeApplyableResourceInstance: No config or planned change recorded for %s", n.Addr) + return nil + } + + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Resource node has no configuration attached", + fmt.Sprintf( + "The graph node for %s has no configuration attached to it. This suggests a bug in OpenTofu's apply graph builder; please report it!", + addr, + ), + )) + return diags + } + + // Eval info is different depending on what kind of resource this is + switch n.Config.Mode { + case addrs.ManagedResourceMode: + return n.managedResourceExecute(ctx) + case addrs.DataResourceMode: + return n.dataResourceExecute(ctx) + default: + panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode)) + } +} + +func (n *NodeApplyableResourceInstance) dataResourceExecute(ctx EvalContext) (diags tfdiags.Diagnostics) { + _, providerSchema, err := getProvider(ctx, n.ResolvedProvider) + diags = diags.Append(err) + if diags.HasErrors() { + return diags + } + + change, err := n.readDiff(ctx, providerSchema) + diags = diags.Append(err) + if diags.HasErrors() { + return diags + } + // Stop early if we don't actually have a diff + if change == nil { + return diags + } + if change.Action != plans.Read && change.Action != plans.NoOp { + diags = diags.Append(fmt.Errorf("nonsensical planned action %#v for %s; this is a bug in OpenTofu", change.Action, n.Addr)) + } + + // In this particular call to applyDataSource we include our planned + // change, which signals that we expect this read to complete fully + // with no unknown values; it'll produce an error if not. + state, repeatData, applyDiags := n.applyDataSource(ctx, change) + diags = diags.Append(applyDiags) + if diags.HasErrors() { + return diags + } + + if state != nil { + // If n.applyDataSource returned a nil state object with no accompanying + // errors then it determined that the given change doesn't require + // actually reading the data (e.g. because it was already read during + // the plan phase) and so we're only running through here to get the + // extra details like precondition/postcondition checks. + diags = diags.Append(n.writeResourceInstanceState(ctx, state, workingState)) + if diags.HasErrors() { + return diags + } + } + + diags = diags.Append(n.writeChange(ctx, nil, "")) + + diags = diags.Append(updateStateHook(ctx)) + + // Post-conditions might block further progress. We intentionally do this + // _after_ writing the state/diff because we want to check against + // the result of the operation, and to fail on future operations + // until the user makes the condition succeed. + checkDiags := evalCheckRules( + addrs.ResourcePostcondition, + n.Config.Postconditions, + ctx, n.ResourceInstanceAddr(), + repeatData, + tfdiags.Error, + ) + diags = diags.Append(checkDiags) + + return diags +} + +func (n *NodeApplyableResourceInstance) managedResourceExecute(ctx EvalContext) (diags tfdiags.Diagnostics) { + // Declare a bunch of variables that are used for state during + // evaluation. Most of this are written to by-address below. + var state *states.ResourceInstanceObject + var createBeforeDestroyEnabled bool + var deposedKey states.DeposedKey + + addr := n.ResourceInstanceAddr().Resource + _, providerSchema, err := getProvider(ctx, n.ResolvedProvider) + diags = diags.Append(err) + if diags.HasErrors() { + return diags + } + + // Get the saved diff for apply + diffApply, err := n.readDiff(ctx, providerSchema) + diags = diags.Append(err) + if diags.HasErrors() { + return diags + } + + // We don't want to do any destroys + // (these are handled by NodeDestroyResourceInstance instead) + if diffApply == nil || diffApply.Action == plans.Delete { + return diags + } + if diffApply.Action == plans.Read { + diags = diags.Append(fmt.Errorf("nonsensical planned action %#v for %s; this is a bug in OpenTofu", diffApply.Action, n.Addr)) + } + + destroy := (diffApply.Action == plans.Delete || diffApply.Action.IsReplace()) + // Get the stored action for CBD if we have a plan already + createBeforeDestroyEnabled = diffApply.Change.Action == plans.CreateThenDelete + + if destroy && n.CreateBeforeDestroy() { + createBeforeDestroyEnabled = true + } + + if createBeforeDestroyEnabled { + state := ctx.State() + if n.PreallocatedDeposedKey == states.NotDeposed { + deposedKey = state.DeposeResourceInstanceObject(n.Addr) + } else { + deposedKey = n.PreallocatedDeposedKey + state.DeposeResourceInstanceObjectForceKey(n.Addr, deposedKey) + } + log.Printf("[TRACE] managedResourceExecute: prior object for %s now deposed with key %s", n.Addr, deposedKey) + } + + state, readDiags := n.readResourceInstanceState(ctx, n.ResourceInstanceAddr()) + diags = diags.Append(readDiags) + if diags.HasErrors() { + return diags + } + + // Get the saved diff + diff, err := n.readDiff(ctx, providerSchema) + diags = diags.Append(err) + if diags.HasErrors() { + return diags + } + + // Make a new diff, in case we've learned new values in the state + // during apply which we can now incorporate. + diffApply, _, repeatData, planDiags := n.plan(ctx, diff, state, false, n.forceReplace) + diags = diags.Append(planDiags) + if diags.HasErrors() { + return diags + } + + // Compare the diffs + diags = diags.Append(n.checkPlannedChange(ctx, diff, diffApply, providerSchema)) + if diags.HasErrors() { + return diags + } + + diffApply = reducePlan(addr, diffApply, false) + // reducePlan may have simplified our planned change + // into a NoOp if it only requires destroying, since destroying + // is handled by NodeDestroyResourceInstance. If so, we'll + // still run through most of the logic here because we do still + // need to deal with other book-keeping such as marking the + // change as "complete", and running the author's postconditions. + + diags = diags.Append(n.preApplyHook(ctx, diffApply)) + if diags.HasErrors() { + return diags + } + + // If there is no change, there was nothing to apply, and we don't need to + // re-write the state, but we do need to re-evaluate postconditions. + if diffApply.Action == plans.NoOp { + return diags.Append(n.managedResourcePostconditions(ctx, repeatData)) + } + + state, applyDiags := n.apply(ctx, state, diffApply, n.Config, repeatData, n.CreateBeforeDestroy()) + diags = diags.Append(applyDiags) + + // We clear the change out here so that future nodes don't see a change + // that is already complete. + err = n.writeChange(ctx, nil, "") + if err != nil { + return diags.Append(err) + } + + state = maybeTainted(addr.Absolute(ctx.Path()), state, diffApply, diags.Err()) + + if state != nil { + // dependencies are always updated to match the configuration during apply + state.Dependencies = n.Dependencies + } + err = n.writeResourceInstanceState(ctx, state, workingState) + if err != nil { + return diags.Append(err) + } + + // Run Provisioners + createNew := (diffApply.Action == plans.Create || diffApply.Action.IsReplace()) + applyProvisionersDiags := n.evalApplyProvisioners(ctx, state, createNew, configs.ProvisionerWhenCreate) + // the provisioner errors count as port of the apply error, so we can bundle the diags + diags = diags.Append(applyProvisionersDiags) + + state = maybeTainted(addr.Absolute(ctx.Path()), state, diffApply, diags.Err()) + + err = n.writeResourceInstanceState(ctx, state, workingState) + if err != nil { + return diags.Append(err) + } + + if createBeforeDestroyEnabled && diags.HasErrors() { + if deposedKey == states.NotDeposed { + // This should never happen, and so it always indicates a bug. + // We should evaluate this node only if we've previously deposed + // an object as part of the same operation. + if diffApply != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Attempt to restore non-existent deposed object", + fmt.Sprintf( + "OpenTofu has encountered a bug where it would need to restore a deposed object for %s without knowing a deposed object key for that object. This occurred during a %s action. This is a bug in OpenTofu; please report it!", + addr, diffApply.Action, + ), + )) + } else { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Attempt to restore non-existent deposed object", + fmt.Sprintf( + "OpenTofu has encountered a bug where it would need to restore a deposed object for %s without knowing a deposed object key for that object. This is a bug in OpenTofu; please report it!", + addr, + ), + )) + } + } else { + restored := ctx.State().MaybeRestoreResourceInstanceDeposed(addr.Absolute(ctx.Path()), deposedKey) + if restored { + log.Printf("[TRACE] managedResourceExecute: %s deposed object %s was restored as the current object", addr, deposedKey) + } else { + log.Printf("[TRACE] managedResourceExecute: %s deposed object %s remains deposed", addr, deposedKey) + } + } + } + + diags = diags.Append(n.postApplyHook(ctx, state, diags.Err())) + diags = diags.Append(updateStateHook(ctx)) + + // Post-conditions might block further progress. We intentionally do this + // _after_ writing the state because we want to check against + // the result of the operation, and to fail on future operations + // until the user makes the condition succeed. + return diags.Append(n.managedResourcePostconditions(ctx, repeatData)) +} + +func (n *NodeApplyableResourceInstance) managedResourcePostconditions(ctx EvalContext, repeatData instances.RepetitionData) (diags tfdiags.Diagnostics) { + + checkDiags := evalCheckRules( + addrs.ResourcePostcondition, + n.Config.Postconditions, + ctx, n.ResourceInstanceAddr(), repeatData, + tfdiags.Error, + ) + return diags.Append(checkDiags) +} + +// checkPlannedChange produces errors if the _actual_ expected value is not +// compatible with what was recorded in the plan. +// +// Errors here are most often indicative of a bug in the provider, so our error +// messages will report with that in mind. It's also possible that there's a bug +// in OpenTofu's Core's own "proposed new value" code in EvalDiff. +func (n *NodeApplyableResourceInstance) checkPlannedChange(ctx EvalContext, plannedChange, actualChange *plans.ResourceInstanceChange, providerSchema providers.ProviderSchema) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + addr := n.ResourceInstanceAddr().Resource + + schema, _ := providerSchema.SchemaForResourceAddr(addr.ContainingResource()) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + diags = diags.Append(fmt.Errorf("provider does not support %q", addr.Resource.Type)) + return diags + } + + absAddr := addr.Absolute(ctx.Path()) + + log.Printf("[TRACE] checkPlannedChange: Verifying that actual change (action %s) matches planned change (action %s)", actualChange.Action, plannedChange.Action) + + if plannedChange.Action != actualChange.Action { + switch { + case plannedChange.Action == plans.Update && actualChange.Action == plans.NoOp: + // It's okay for an update to become a NoOp once we've filled in + // all of the unknown values, since the final values might actually + // match what was there before after all. + log.Printf("[DEBUG] After incorporating new values learned so far during apply, %s change has become NoOp", absAddr) + + case (plannedChange.Action == plans.CreateThenDelete && actualChange.Action == plans.DeleteThenCreate) || + (plannedChange.Action == plans.DeleteThenCreate && actualChange.Action == plans.CreateThenDelete): + // If the order of replacement changed, then that is a bug in tofu + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "OpenTofu produced inconsistent final plan", + fmt.Sprintf( + "When expanding the plan for %s to include new values learned so far during apply, the planned action changed from %s to %s.\n\nThis is a bug in OpenTofu and should be reported.", + absAddr, plannedChange.Action, actualChange.Action, + ), + )) + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced inconsistent final plan", + fmt.Sprintf( + "When expanding the plan for %s to include new values learned so far during apply, provider %q changed the planned action from %s to %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + absAddr, n.ResolvedProvider.Provider.String(), + plannedChange.Action, actualChange.Action, + ), + )) + } + } + + errs := objchange.AssertObjectCompatible(schema, plannedChange.After, actualChange.After) + for _, err := range errs { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced inconsistent final plan", + fmt.Sprintf( + "When expanding the plan for %s to include new values learned so far during apply, provider %q produced an invalid new value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + absAddr, n.ResolvedProvider.Provider.String(), tfdiags.FormatError(err), + ), + )) + } + return diags +} + +// maybeTainted takes the resource addr, new value, planned change, and possible +// error from an apply operation and return a new instance object marked as +// tainted if it appears that a create operation has failed. +func maybeTainted(addr addrs.AbsResourceInstance, state *states.ResourceInstanceObject, change *plans.ResourceInstanceChange, err error) *states.ResourceInstanceObject { + if state == nil || change == nil || err == nil { + return state + } + if state.Status == states.ObjectTainted { + log.Printf("[TRACE] maybeTainted: %s was already tainted, so nothing to do", addr) + return state + } + if change.Action == plans.Create { + // If there are errors during a _create_ then the object is + // in an undefined state, and so we'll mark it as tainted so + // we can try again on the next run. + // + // We don't do this for other change actions because errors + // during updates will often not change the remote object at all. + // If there _were_ changes prior to the error, it's the provider's + // responsibility to record the effect of those changes in the + // object value it returned. + log.Printf("[TRACE] maybeTainted: %s encountered an error during creation, so it is now marked as tainted", addr) + return state.AsTainted() + } + return state +} diff --git a/pkg/tofu/node_resource_apply_test.go b/pkg/tofu/node_resource_apply_test.go new file mode 100644 index 00000000000..e1492230aa0 --- /dev/null +++ b/pkg/tofu/node_resource_apply_test.go @@ -0,0 +1,74 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "testing" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/instances" + "github.com/kubegems/opentofu/pkg/states" +) + +func TestNodeExpandApplyableResourceExecute(t *testing.T) { + state := states.NewState() + t.Run("no config", func(t *testing.T) { + ctx := &MockEvalContext{ + StateState: state.SyncWrapper(), + InstanceExpanderExpander: instances.NewExpander(), + } + + node := &nodeExpandApplyableResource{ + NodeAbstractResource: &NodeAbstractResource{ + Addr: mustConfigResourceAddr("test_instance.foo"), + Config: nil, + }, + } + diags := node.Execute(ctx, walkApply) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err()) + } + + state.PruneResourceHusks() + if !state.Empty() { + t.Fatalf("expected no state, got:\n %s", state.String()) + } + }) + + t.Run("simple", func(t *testing.T) { + ctx := &MockEvalContext{ + StateState: state.SyncWrapper(), + InstanceExpanderExpander: instances.NewExpander(), + } + + node := &nodeExpandApplyableResource{ + NodeAbstractResource: &NodeAbstractResource{ + Addr: mustConfigResourceAddr("test_instance.foo"), + Config: &configs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }, + ResolvedProvider: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + }, + } + diags := node.Execute(ctx, walkApply) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err()) + } + if state.Empty() { + t.Fatal("expected resources in state, got empty state") + } + r := state.Resource(mustAbsResourceAddr("test_instance.foo")) + if r == nil { + t.Fatal("test_instance.foo not found in state") + } + }) +} diff --git a/pkg/tofu/node_resource_deposed.go b/pkg/tofu/node_resource_deposed.go new file mode 100644 index 00000000000..1a79b00253e --- /dev/null +++ b/pkg/tofu/node_resource_deposed.go @@ -0,0 +1,416 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "log" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/dag" + "github.com/kubegems/opentofu/pkg/instances" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// ConcreteResourceInstanceDeposedNodeFunc is a callback type used to convert +// an abstract resource instance to a concrete one of some type that has +// an associated deposed object key. +type ConcreteResourceInstanceDeposedNodeFunc func(*NodeAbstractResourceInstance, states.DeposedKey) dag.Vertex + +type GraphNodeDeposedResourceInstanceObject interface { + DeposedInstanceObjectKey() states.DeposedKey +} + +// NodePlanDeposedResourceInstanceObject represents deposed resource +// instance objects during plan. These are distinct from the primary object +// for each resource instance since the only valid operation to do with them +// is to destroy them. +// +// This node type is also used during the refresh walk to ensure that the +// record of a deposed object is up-to-date before we plan to destroy it. +type NodePlanDeposedResourceInstanceObject struct { + *NodeAbstractResourceInstance + DeposedKey states.DeposedKey + + // skipRefresh indicates that we should skip refreshing individual instances + skipRefresh bool + + // skipPlanChanges indicates we should skip trying to plan change actions + // for any instances. + skipPlanChanges bool + + // EndpointsToRemove are resource instance addresses where the user wants to + // forget from the state. This set isn't pre-filtered, so + // it might contain addresses that have nothing to do with the resource + // that this node represents, which the node itself must therefore ignore. + EndpointsToRemove []addrs.ConfigRemovable +} + +var ( + _ GraphNodeDeposedResourceInstanceObject = (*NodePlanDeposedResourceInstanceObject)(nil) + _ GraphNodeConfigResource = (*NodePlanDeposedResourceInstanceObject)(nil) + _ GraphNodeResourceInstance = (*NodePlanDeposedResourceInstanceObject)(nil) + _ GraphNodeReferenceable = (*NodePlanDeposedResourceInstanceObject)(nil) + _ GraphNodeReferencer = (*NodePlanDeposedResourceInstanceObject)(nil) + _ GraphNodeExecutable = (*NodePlanDeposedResourceInstanceObject)(nil) + _ GraphNodeProviderConsumer = (*NodePlanDeposedResourceInstanceObject)(nil) + _ GraphNodeProvisionerConsumer = (*NodePlanDeposedResourceInstanceObject)(nil) +) + +func (n *NodePlanDeposedResourceInstanceObject) Name() string { + return fmt.Sprintf("%s (deposed %s)", n.ResourceInstanceAddr().String(), n.DeposedKey) +} + +func (n *NodePlanDeposedResourceInstanceObject) DeposedInstanceObjectKey() states.DeposedKey { + return n.DeposedKey +} + +// GraphNodeReferenceable implementation, overriding the one from NodeAbstractResourceInstance +func (n *NodePlanDeposedResourceInstanceObject) ReferenceableAddrs() []addrs.Referenceable { + // Deposed objects don't participate in references. + return nil +} + +// GraphNodeReferencer implementation, overriding the one from NodeAbstractResourceInstance +func (n *NodePlanDeposedResourceInstanceObject) References() []*addrs.Reference { + // We don't evaluate configuration for deposed objects, so they effectively + // make no references. + return nil +} + +// GraphNodeEvalable impl. +func (n *NodePlanDeposedResourceInstanceObject) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { + log.Printf("[TRACE] NodePlanDeposedResourceInstanceObject: planning %s deposed object %s", n.Addr, n.DeposedKey) + + // Read the state for the deposed resource instance + state, err := n.readResourceInstanceStateDeposed(ctx, n.Addr, n.DeposedKey) + diags = diags.Append(err) + if diags.HasErrors() { + return diags + } + + // Note any upgrades that readResourceInstanceState might've done in the + // prevRunState, so that it'll conform to current schema. + diags = diags.Append(n.writeResourceInstanceStateDeposed(ctx, n.DeposedKey, state, prevRunState)) + if diags.HasErrors() { + return diags + } + // Also the refreshState, because that should still reflect schema upgrades + // even if not refreshing. + diags = diags.Append(n.writeResourceInstanceStateDeposed(ctx, n.DeposedKey, state, refreshState)) + if diags.HasErrors() { + return diags + } + + // We don't refresh during the planDestroy walk, since that is only adding + // the destroy changes to the plan and the provider will not be configured + // at this point. The other nodes use separate types for plan and destroy, + // while deposed instances are always a destroy operation, so the logic + // here is a bit overloaded. + if !n.skipRefresh && op != walkPlanDestroy { + // Refresh this object even though it is going to be destroyed, in + // case it's already been deleted outside OpenTofu. If this is a + // normal plan, providers expect a Read request to remove missing + // resources from the plan before apply, and may not handle a missing + // resource during Delete correctly. If this is a simple refresh, + // OpenTofu is expected to remove the missing resource from the state + // entirely + refreshedState, refreshDiags := n.refresh(ctx, n.DeposedKey, state) + diags = diags.Append(refreshDiags) + if diags.HasErrors() { + return diags + } + + diags = diags.Append(n.writeResourceInstanceStateDeposed(ctx, n.DeposedKey, refreshedState, refreshState)) + if diags.HasErrors() { + return diags + } + + // If we refreshed then our subsequent planning should be in terms of + // the new object, not the original object. + state = refreshedState + } + + if !n.skipPlanChanges { + var change *plans.ResourceInstanceChange + var planDiags tfdiags.Diagnostics + + shouldForget := false + + for _, etf := range n.EndpointsToRemove { + if etf.TargetContains(n.Addr) { + shouldForget = true + } + } + + if shouldForget { + change = n.planForget(ctx, state, n.DeposedKey) + } else { + change, planDiags = n.planDestroy(ctx, state, n.DeposedKey) + } + + diags = diags.Append(planDiags) + if diags.HasErrors() { + return diags + } + + // NOTE: We don't check prevent_destroy for deposed objects, even + // though we would do so here for a "current" object, because + // if we've reached a point where an object is already deposed then + // we've already planned and partially-executed a create_before_destroy + // replace and we would've checked prevent_destroy at that point. We're + // now just need to get the deposed object destroyed, because there + // should be a new object already serving as its replacement. + + diags = diags.Append(n.writeChange(ctx, change, n.DeposedKey)) + if diags.HasErrors() { + return diags + } + + diags = diags.Append(n.writeResourceInstanceStateDeposed(ctx, n.DeposedKey, nil, workingState)) + } else { + // The working state should at least be updated with the result + // of upgrading and refreshing from above. + diags = diags.Append(n.writeResourceInstanceStateDeposed(ctx, n.DeposedKey, state, workingState)) + } + + return diags +} + +// NodeDestroyDeposedResourceInstanceObject represents deposed resource +// instance objects during apply. Nodes of this type are inserted by +// DiffTransformer when the planned changeset contains "delete" changes for +// deposed instance objects, and its only supported operation is to destroy +// and then forget the associated object. +type NodeDestroyDeposedResourceInstanceObject struct { + *NodeAbstractResourceInstance + DeposedKey states.DeposedKey +} + +var ( + _ GraphNodeDeposedResourceInstanceObject = (*NodeDestroyDeposedResourceInstanceObject)(nil) + _ GraphNodeConfigResource = (*NodeDestroyDeposedResourceInstanceObject)(nil) + _ GraphNodeResourceInstance = (*NodeDestroyDeposedResourceInstanceObject)(nil) + _ GraphNodeDestroyer = (*NodeDestroyDeposedResourceInstanceObject)(nil) + _ GraphNodeDestroyerCBD = (*NodeDestroyDeposedResourceInstanceObject)(nil) + _ GraphNodeReferenceable = (*NodeDestroyDeposedResourceInstanceObject)(nil) + _ GraphNodeReferencer = (*NodeDestroyDeposedResourceInstanceObject)(nil) + _ GraphNodeExecutable = (*NodeDestroyDeposedResourceInstanceObject)(nil) + _ GraphNodeProviderConsumer = (*NodeDestroyDeposedResourceInstanceObject)(nil) + _ GraphNodeProvisionerConsumer = (*NodeDestroyDeposedResourceInstanceObject)(nil) +) + +func (n *NodeDestroyDeposedResourceInstanceObject) Name() string { + return fmt.Sprintf("%s (destroy deposed %s)", n.ResourceInstanceAddr(), n.DeposedKey) +} + +func (n *NodeDestroyDeposedResourceInstanceObject) DeposedInstanceObjectKey() states.DeposedKey { + return n.DeposedKey +} + +// GraphNodeReferenceable implementation, overriding the one from NodeAbstractResourceInstance +func (n *NodeDestroyDeposedResourceInstanceObject) ReferenceableAddrs() []addrs.Referenceable { + // Deposed objects don't participate in references. + return nil +} + +// GraphNodeReferencer implementation, overriding the one from NodeAbstractResourceInstance +func (n *NodeDestroyDeposedResourceInstanceObject) References() []*addrs.Reference { + // We don't evaluate configuration for deposed objects, so they effectively + // make no references. + return nil +} + +// GraphNodeDestroyer +func (n *NodeDestroyDeposedResourceInstanceObject) DestroyAddr() *addrs.AbsResourceInstance { + addr := n.ResourceInstanceAddr() + return &addr +} + +// GraphNodeDestroyerCBD +func (n *NodeDestroyDeposedResourceInstanceObject) CreateBeforeDestroy() bool { + // A deposed instance is always CreateBeforeDestroy by definition, since + // we use deposed only to handle create-before-destroy. + return true +} + +// GraphNodeDestroyerCBD +func (n *NodeDestroyDeposedResourceInstanceObject) ModifyCreateBeforeDestroy(v bool) error { + if !v { + // Should never happen: deposed instances are _always_ create_before_destroy. + return fmt.Errorf("can't deactivate create_before_destroy for a deposed instance") + } + return nil +} + +// GraphNodeExecutable impl. +func (n *NodeDestroyDeposedResourceInstanceObject) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { + var change *plans.ResourceInstanceChange + + // Read the state for the deposed resource instance + state, err := n.readResourceInstanceStateDeposed(ctx, n.Addr, n.DeposedKey) + if err != nil { + return diags.Append(err) + } + + if state == nil { + diags = diags.Append(fmt.Errorf("missing deposed state for %s (%s)", n.Addr, n.DeposedKey)) + return diags + } + + change, destroyPlanDiags := n.planDestroy(ctx, state, n.DeposedKey) + diags = diags.Append(destroyPlanDiags) + if diags.HasErrors() { + return diags + } + + // Call pre-apply hook + diags = diags.Append(n.preApplyHook(ctx, change)) + if diags.HasErrors() { + return diags + } + + // we pass a nil configuration to apply because we are destroying + state, applyDiags := n.apply(ctx, state, change, nil, instances.RepetitionData{}, false) + diags = diags.Append(applyDiags) + // don't return immediately on errors, we need to handle the state + + // Always write the resource back to the state deposed. If it + // was successfully destroyed it will be pruned. If it was not, it will + // be caught on the next run. + writeDiags := n.writeResourceInstanceState(ctx, state) + diags.Append(writeDiags) + if diags.HasErrors() { + return diags + } + + diags = diags.Append(n.postApplyHook(ctx, state, diags.Err())) + + return diags.Append(updateStateHook(ctx)) +} + +// GraphNodeDeposer is an optional interface implemented by graph nodes that +// might create a single new deposed object for a specific associated resource +// instance, allowing a caller to optionally pre-allocate a DeposedKey for +// it. +type GraphNodeDeposer interface { + // SetPreallocatedDeposedKey will be called during graph construction + // if a particular node must use a pre-allocated deposed key if/when it + // "deposes" the current object of its associated resource instance. + SetPreallocatedDeposedKey(key states.DeposedKey) +} + +// graphNodeDeposer is an embeddable implementation of GraphNodeDeposer. +// Embed it in a node type to get automatic support for it, and then access +// the field PreallocatedDeposedKey to access any pre-allocated key. +type graphNodeDeposer struct { + PreallocatedDeposedKey states.DeposedKey +} + +func (n *graphNodeDeposer) SetPreallocatedDeposedKey(key states.DeposedKey) { + n.PreallocatedDeposedKey = key +} + +func (n *NodeDestroyDeposedResourceInstanceObject) writeResourceInstanceState(ctx EvalContext, obj *states.ResourceInstanceObject) error { + absAddr := n.Addr + key := n.DeposedKey + state := ctx.State() + + if key == states.NotDeposed { + // should never happen + return fmt.Errorf("can't save deposed object for %s without a deposed key; this is a bug in OpenTofu that should be reported", absAddr) + } + + if obj == nil { + // No need to encode anything: we'll just write it directly. + state.SetResourceInstanceDeposed(absAddr, key, nil, n.ResolvedProvider) + log.Printf("[TRACE] writeResourceInstanceStateDeposed: removing state object for %s deposed %s", absAddr, key) + return nil + } + + _, providerSchema, err := getProvider(ctx, n.ResolvedProvider) + if err != nil { + return err + } + + schema, currentVersion := providerSchema.SchemaForResourceAddr(absAddr.ContainingResource().Resource) + if schema == nil { + // It shouldn't be possible to get this far in any real scenario + // without a schema, but we might end up here in contrived tests that + // fail to set up their world properly. + return fmt.Errorf("failed to encode %s in state: no resource type schema available", absAddr) + } + src, err := obj.Encode(schema.ImpliedType(), currentVersion) + if err != nil { + return fmt.Errorf("failed to encode %s in state: %w", absAddr, err) + } + + log.Printf("[TRACE] writeResourceInstanceStateDeposed: writing state object for %s deposed %s", absAddr, key) + state.SetResourceInstanceDeposed(absAddr, key, src, n.ResolvedProvider) + return nil +} + +// NodeForgetDeposedResourceInstanceObject represents deposed resource +// instance objects during apply. Nodes of this type are inserted by +// DiffTransformer when the planned changeset contains "forget" changes for +// deposed instance objects, and its only supported operation is to forget +// the associated object from the state. +type NodeForgetDeposedResourceInstanceObject struct { + *NodeAbstractResourceInstance + DeposedKey states.DeposedKey +} + +var ( + _ GraphNodeDeposedResourceInstanceObject = (*NodeForgetDeposedResourceInstanceObject)(nil) + _ GraphNodeConfigResource = (*NodeForgetDeposedResourceInstanceObject)(nil) + _ GraphNodeResourceInstance = (*NodeForgetDeposedResourceInstanceObject)(nil) + _ GraphNodeReferenceable = (*NodeForgetDeposedResourceInstanceObject)(nil) + _ GraphNodeReferencer = (*NodeForgetDeposedResourceInstanceObject)(nil) + _ GraphNodeExecutable = (*NodeForgetDeposedResourceInstanceObject)(nil) + _ GraphNodeProviderConsumer = (*NodeForgetDeposedResourceInstanceObject)(nil) + _ GraphNodeProvisionerConsumer = (*NodeForgetDeposedResourceInstanceObject)(nil) +) + +func (n *NodeForgetDeposedResourceInstanceObject) Name() string { + return fmt.Sprintf("%s (forget deposed %s)", n.ResourceInstanceAddr(), n.DeposedKey) +} + +func (n *NodeForgetDeposedResourceInstanceObject) DeposedInstanceObjectKey() states.DeposedKey { + return n.DeposedKey +} + +// GraphNodeReferenceable implementation, overriding the one from NodeAbstractResourceInstance +func (n *NodeForgetDeposedResourceInstanceObject) ReferenceableAddrs() []addrs.Referenceable { + // Deposed objects don't participate in references. + return nil +} + +// GraphNodeReferencer implementation, overriding the one from NodeAbstractResourceInstance +func (n *NodeForgetDeposedResourceInstanceObject) References() []*addrs.Reference { + // We don't evaluate configuration for deposed objects, so they effectively + // make no references. + return nil +} + +// GraphNodeExecutable impl. +func (n *NodeForgetDeposedResourceInstanceObject) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { + // Read the state for the deposed resource instance + state, err := n.readResourceInstanceStateDeposed(ctx, n.Addr, n.DeposedKey) + if err != nil { + return diags.Append(err) + } + + if state == nil { + log.Printf("[WARN] NodeForgetDeposedResourceInstanceObject for %s (%s) with no state", n.Addr, n.DeposedKey) + } + + contextState := ctx.State() + contextState.ForgetResourceInstanceDeposed(n.Addr, n.DeposedKey) + + return diags.Append(updateStateHook(ctx)) +} diff --git a/pkg/tofu/node_resource_deposed_test.go b/pkg/tofu/node_resource_deposed_test.go new file mode 100644 index 00000000000..615f8d16130 --- /dev/null +++ b/pkg/tofu/node_resource_deposed_test.go @@ -0,0 +1,298 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "testing" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" + "github.com/zclconf/go-cty/cty" +) + +func TestNodePlanDeposedResourceInstanceObject_Execute(t *testing.T) { + tests := []struct { + description string + nodeAddress string + nodeEndpointsToRemove []addrs.ConfigRemovable + wantAction plans.Action + }{ + { + nodeAddress: "test_instance.foo", + nodeEndpointsToRemove: make([]addrs.ConfigRemovable, 0), + wantAction: plans.Delete, + }, + { + nodeAddress: "test_instance.foo", + nodeEndpointsToRemove: []addrs.ConfigRemovable{ + interface{}(mustConfigResourceAddr("test_instance.bar")).(addrs.ConfigRemovable), + }, + wantAction: plans.Delete, + }, + { + nodeAddress: "test_instance.foo", + nodeEndpointsToRemove: []addrs.ConfigRemovable{ + interface{}(addrs.Module{"boop"}).(addrs.ConfigRemovable), + }, + wantAction: plans.Delete, + }, + { + nodeAddress: "test_instance.foo", + nodeEndpointsToRemove: []addrs.ConfigRemovable{ + interface{}(mustConfigResourceAddr("test_instance.foo")).(addrs.ConfigRemovable), + }, + wantAction: plans.Forget, + }, + { + nodeAddress: "test_instance.foo[1]", + nodeEndpointsToRemove: []addrs.ConfigRemovable{ + interface{}(mustConfigResourceAddr("test_instance.foo")).(addrs.ConfigRemovable), + }, + wantAction: plans.Forget, + }, + { + nodeAddress: "module.boop.test_instance.foo", + nodeEndpointsToRemove: []addrs.ConfigRemovable{ + interface{}(mustConfigResourceAddr("module.boop.test_instance.foo")).(addrs.ConfigRemovable), + }, + wantAction: plans.Forget, + }, + { + nodeAddress: "module.boop[1].test_instance.foo[1]", + nodeEndpointsToRemove: []addrs.ConfigRemovable{ + interface{}(mustConfigResourceAddr("module.boop.test_instance.foo")).(addrs.ConfigRemovable), + }, + wantAction: plans.Forget, + }, + { + nodeAddress: "module.boop.test_instance.foo", + nodeEndpointsToRemove: []addrs.ConfigRemovable{ + interface{}(addrs.Module{"boop"}).(addrs.ConfigRemovable), + }, + wantAction: plans.Forget, + }, + { + nodeAddress: "module.boop[1].test_instance.foo", + nodeEndpointsToRemove: []addrs.ConfigRemovable{ + interface{}(addrs.Module{"boop"}).(addrs.ConfigRemovable), + }, + wantAction: plans.Forget, + }, + } + + for _, test := range tests { + deposedKey := states.NewDeposedKey() + absResource := mustResourceInstanceAddr(test.nodeAddress) + + ctx, p := initMockEvalContext(test.nodeAddress, deposedKey) + + node := NodePlanDeposedResourceInstanceObject{ + NodeAbstractResourceInstance: &NodeAbstractResourceInstance{ + Addr: absResource, + NodeAbstractResource: NodeAbstractResource{ + ResolvedProvider: mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + }, + }, + DeposedKey: deposedKey, + EndpointsToRemove: test.nodeEndpointsToRemove, + } + + err := node.Execute(ctx, walkPlan) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !p.UpgradeResourceStateCalled { + t.Errorf("UpgradeResourceState wasn't called; should've been called to upgrade the previous run's object") + } + if !p.ReadResourceCalled { + t.Errorf("ReadResource wasn't called; should've been called to refresh the deposed object") + } + + change := ctx.Changes().GetResourceInstanceChange(absResource, deposedKey) + if got, want := change.ChangeSrc.Action, test.wantAction; got != want { + t.Fatalf("wrong planned action\ngot: %s\nwant: %s", got, want) + } + } +} + +func TestNodeDestroyDeposedResourceInstanceObject_Execute(t *testing.T) { + deposedKey := states.NewDeposedKey() + state := states.NewState() + absResourceAddr := "test_instance.foo" + ctx, _ := initMockEvalContext(absResourceAddr, deposedKey) + + absResource := mustResourceInstanceAddr(absResourceAddr) + node := NodeDestroyDeposedResourceInstanceObject{ + NodeAbstractResourceInstance: &NodeAbstractResourceInstance{ + Addr: absResource, + NodeAbstractResource: NodeAbstractResource{ + ResolvedProvider: mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + }, + }, + DeposedKey: deposedKey, + } + err := node.Execute(ctx, walkApply) + + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !state.Empty() { + t.Fatalf("resources left in state after destroy") + } +} + +func TestNodeDestroyDeposedResourceInstanceObject_WriteResourceInstanceState(t *testing.T) { + state := states.NewState() + ctx := new(MockEvalContext) + ctx.StateState = state.SyncWrapper() + ctx.PathPath = addrs.RootModuleInstance + mockProvider := mockProviderWithResourceTypeSchema("aws_instance", &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Optional: true, + }, + }, + }) + ctx.ProviderProvider = mockProvider + ctx.ProviderSchemaSchema = mockProvider.GetProviderSchema() + + obj := &states.ResourceInstanceObject{ + Value: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-abc123"), + }), + Status: states.ObjectReady, + } + node := &NodeDestroyDeposedResourceInstanceObject{ + NodeAbstractResourceInstance: &NodeAbstractResourceInstance{ + NodeAbstractResource: NodeAbstractResource{ + ResolvedProvider: mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + }, + Addr: mustResourceInstanceAddr("aws_instance.foo"), + }, + DeposedKey: states.NewDeposedKey(), + } + err := node.writeResourceInstanceState(ctx, obj) + if err != nil { + t.Fatalf("unexpected error: %s", err.Error()) + } + + checkStateString(t, state, ` +aws_instance.foo: (1 deposed) + ID = + provider = provider["registry.opentofu.org/hashicorp/aws"] + Deposed ID 1 = i-abc123 + `) +} + +func TestNodeDestroyDeposedResourceInstanceObject_ExecuteMissingState(t *testing.T) { + p := simpleMockProvider() + ctx := &MockEvalContext{ + StateState: states.NewState().SyncWrapper(), + ProviderProvider: simpleMockProvider(), + ProviderSchemaSchema: p.GetProviderSchema(), + ChangesChanges: plans.NewChanges().SyncWrapper(), + } + + node := NodeDestroyDeposedResourceInstanceObject{ + NodeAbstractResourceInstance: &NodeAbstractResourceInstance{ + Addr: mustResourceInstanceAddr("test_object.foo"), + NodeAbstractResource: NodeAbstractResource{ + ResolvedProvider: mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + }, + }, + DeposedKey: states.NewDeposedKey(), + } + err := node.Execute(ctx, walkApply) + + if err == nil { + t.Fatal("expected error") + } +} + +func TestNodeForgetDeposedResourceInstanceObject_Execute(t *testing.T) { + deposedKey := states.NewDeposedKey() + state := states.NewState() + absResourceAddr := "test_instance.foo" + ctx, _ := initMockEvalContext(absResourceAddr, deposedKey) + + absResource := mustResourceInstanceAddr(absResourceAddr) + node := NodeForgetDeposedResourceInstanceObject{ + NodeAbstractResourceInstance: &NodeAbstractResourceInstance{ + Addr: absResource, + NodeAbstractResource: NodeAbstractResource{ + ResolvedProvider: mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + }, + }, + DeposedKey: deposedKey, + } + err := node.Execute(ctx, walkApply) + + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !state.Empty() { + t.Fatalf("resources left in state after forget") + } +} + +func initMockEvalContext(resourceAddrs string, deposedKey states.DeposedKey) (*MockEvalContext, *MockProvider) { + state := states.NewState() + absResource := mustResourceInstanceAddr(resourceAddrs) + + if !absResource.Module.Module().Equal(addrs.RootModule) { + state.EnsureModule(addrs.RootModuleInstance.Child(absResource.Module[0].Name, absResource.Module[0].InstanceKey)) + } + + state.Module(absResource.Module).SetResourceInstanceDeposed( + absResource.Resource, + deposedKey, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectTainted, + AttrsJSON: []byte(`{"id":"bar"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + + schema := providers.ProviderSchema{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + }, + }, + }, + }, + } + + p := testProvider("test") + p.ConfigureProvider(providers.ConfigureProviderRequest{}) + p.GetProviderSchemaResponse = &schema + + p.UpgradeResourceStateResponse = &providers.UpgradeResourceStateResponse{ + UpgradedState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("bar"), + }), + } + return &MockEvalContext{ + PrevRunStateState: state.DeepCopy().SyncWrapper(), + RefreshStateState: state.DeepCopy().SyncWrapper(), + StateState: state.SyncWrapper(), + ProviderProvider: p, + ProviderSchemaSchema: schema, + ChangesChanges: plans.NewChanges().SyncWrapper(), + }, p +} diff --git a/pkg/tofu/node_resource_destroy.go b/pkg/tofu/node_resource_destroy.go new file mode 100644 index 00000000000..bc1e780a172 --- /dev/null +++ b/pkg/tofu/node_resource_destroy.go @@ -0,0 +1,239 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "log" + + "github.com/kubegems/opentofu/pkg/instances" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/tfdiags" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/states" +) + +// NodeDestroyResourceInstance represents a resource instance that is to be +// destroyed. +type NodeDestroyResourceInstance struct { + *NodeAbstractResourceInstance + + // If DeposedKey is set to anything other than states.NotDeposed then + // this node destroys a deposed object of the associated instance + // rather than its current object. + DeposedKey states.DeposedKey +} + +var ( + _ GraphNodeModuleInstance = (*NodeDestroyResourceInstance)(nil) + _ GraphNodeConfigResource = (*NodeDestroyResourceInstance)(nil) + _ GraphNodeResourceInstance = (*NodeDestroyResourceInstance)(nil) + _ GraphNodeDestroyer = (*NodeDestroyResourceInstance)(nil) + _ GraphNodeDestroyerCBD = (*NodeDestroyResourceInstance)(nil) + _ GraphNodeReferenceable = (*NodeDestroyResourceInstance)(nil) + _ GraphNodeReferencer = (*NodeDestroyResourceInstance)(nil) + _ GraphNodeExecutable = (*NodeDestroyResourceInstance)(nil) + _ GraphNodeProviderConsumer = (*NodeDestroyResourceInstance)(nil) + _ GraphNodeProvisionerConsumer = (*NodeDestroyResourceInstance)(nil) +) + +func (n *NodeDestroyResourceInstance) Name() string { + if n.DeposedKey != states.NotDeposed { + return fmt.Sprintf("%s (destroy deposed %s)", n.ResourceInstanceAddr(), n.DeposedKey) + } + return n.ResourceInstanceAddr().String() + " (destroy)" +} + +func (n *NodeDestroyResourceInstance) ProvidedBy() (addr addrs.ProviderConfig, exact bool) { + if n.Addr.Resource.Resource.Mode == addrs.DataResourceMode { + // indicate that this node does not require a configured provider + return nil, true + } + return n.NodeAbstractResourceInstance.ProvidedBy() +} + +// GraphNodeDestroyer +func (n *NodeDestroyResourceInstance) DestroyAddr() *addrs.AbsResourceInstance { + addr := n.ResourceInstanceAddr() + return &addr +} + +// GraphNodeDestroyerCBD +func (n *NodeDestroyResourceInstance) CreateBeforeDestroy() bool { + // State takes precedence during destroy. + // If the resource was removed, there is no config to check. + // If CBD was forced from descendent, it should be saved in the state + // already. + if s := n.instanceState; s != nil { + if s.Current != nil { + return s.Current.CreateBeforeDestroy + } + } + + if n.Config != nil && n.Config.Managed != nil { + return n.Config.Managed.CreateBeforeDestroy + } + + return false +} + +// GraphNodeDestroyerCBD +func (n *NodeDestroyResourceInstance) ModifyCreateBeforeDestroy(v bool) error { + return nil +} + +// GraphNodeReferenceable, overriding NodeAbstractResource +func (n *NodeDestroyResourceInstance) ReferenceableAddrs() []addrs.Referenceable { + normalAddrs := n.NodeAbstractResourceInstance.ReferenceableAddrs() + destroyAddrs := make([]addrs.Referenceable, len(normalAddrs)) + + phaseType := addrs.ResourceInstancePhaseDestroy + if n.CreateBeforeDestroy() { + phaseType = addrs.ResourceInstancePhaseDestroyCBD + } + + for i, normalAddr := range normalAddrs { + switch ta := normalAddr.(type) { + case addrs.Resource: + destroyAddrs[i] = ta.Phase(phaseType) + case addrs.ResourceInstance: + destroyAddrs[i] = ta.Phase(phaseType) + default: + destroyAddrs[i] = normalAddr + } + } + + return destroyAddrs +} + +// GraphNodeReferencer, overriding NodeAbstractResource +func (n *NodeDestroyResourceInstance) References() []*addrs.Reference { + // If we have a config, then we need to include destroy-time dependencies + if c := n.Config; c != nil && c.Managed != nil { + var result []*addrs.Reference + + // We include conn info and config for destroy time provisioners + // as dependencies that we have. + for _, p := range c.Managed.Provisioners { + schema := n.ProvisionerSchemas[p.Type] + + if p.When == configs.ProvisionerWhenDestroy { + if p.Connection != nil { + result = append(result, ReferencesFromConfig(p.Connection.Config, connectionBlockSupersetSchema)...) + } + result = append(result, ReferencesFromConfig(p.Config, schema)...) + } + } + + return result + } + + return nil +} + +// GraphNodeExecutable +func (n *NodeDestroyResourceInstance) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { + addr := n.ResourceInstanceAddr() + + // Eval info is different depending on what kind of resource this is + switch addr.Resource.Resource.Mode { + case addrs.ManagedResourceMode: + return n.managedResourceExecute(ctx) + case addrs.DataResourceMode: + return n.dataResourceExecute(ctx) + default: + panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode)) + } +} + +func (n *NodeDestroyResourceInstance) managedResourceExecute(ctx EvalContext) (diags tfdiags.Diagnostics) { + addr := n.ResourceInstanceAddr() + + // Get our state + is := n.instanceState + if is == nil { + log.Printf("[WARN] NodeDestroyResourceInstance for %s with no state", addr) + } + + // These vars are updated through pointers at various stages below. + var changeApply *plans.ResourceInstanceChange + var state *states.ResourceInstanceObject + + _, providerSchema, err := getProvider(ctx, n.ResolvedProvider) + diags = diags.Append(err) + if diags.HasErrors() { + return diags + } + + changeApply, err = n.readDiff(ctx, providerSchema) + diags = diags.Append(err) + if changeApply == nil || diags.HasErrors() { + return diags + } + + changeApply = reducePlan(addr.Resource, changeApply, true) + // reducePlan may have simplified our planned change + // into a NoOp if it does not require destroying. + if changeApply == nil || changeApply.Action == plans.NoOp { + return diags + } + + state, readDiags := n.readResourceInstanceState(ctx, addr) + diags = diags.Append(readDiags) + if diags.HasErrors() { + return diags + } + + // Exit early if the state object is null after reading the state + if state == nil || state.Value.IsNull() { + return diags + } + + diags = diags.Append(n.preApplyHook(ctx, changeApply)) + if diags.HasErrors() { + return diags + } + + // Run destroy provisioners if not tainted + if state.Status != states.ObjectTainted { + applyProvisionersDiags := n.evalApplyProvisioners(ctx, state, false, configs.ProvisionerWhenDestroy) + diags = diags.Append(applyProvisionersDiags) + // keep the diags separate from the main set until we handle the cleanup + + if diags.HasErrors() { + // If we have a provisioning error, then we just call + // the post-apply hook now. + diags = diags.Append(n.postApplyHook(ctx, state, diags.Err())) + return diags + } + } + + // Managed resources need to be destroyed, while data sources + // are only removed from state. + // we pass a nil configuration to apply because we are destroying + s, d := n.apply(ctx, state, changeApply, nil, instances.RepetitionData{}, false) + state, diags = s, diags.Append(d) + // we don't return immediately here on error, so that the state can be + // finalized + + err = n.writeResourceInstanceState(ctx, state, workingState) + if err != nil { + return diags.Append(err) + } + + // create the err value for postApplyHook + diags = diags.Append(n.postApplyHook(ctx, state, diags.Err())) + diags = diags.Append(updateStateHook(ctx)) + return diags +} + +func (n *NodeDestroyResourceInstance) dataResourceExecute(ctx EvalContext) (diags tfdiags.Diagnostics) { + log.Printf("[TRACE] NodeDestroyResourceInstance: removing state object for %s", n.Addr) + ctx.State().SetResourceInstanceCurrent(n.Addr, nil, n.ResolvedProvider) + return diags.Append(updateStateHook(ctx)) +} diff --git a/pkg/tofu/node_resource_forget.go b/pkg/tofu/node_resource_forget.go new file mode 100644 index 00000000000..8e389390336 --- /dev/null +++ b/pkg/tofu/node_resource_forget.go @@ -0,0 +1,75 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "log" + + "github.com/kubegems/opentofu/pkg/tfdiags" + + "github.com/kubegems/opentofu/pkg/states" +) + +// NodeForgetResourceInstance represents a resource instance that is to be +// forgotten from the state. +type NodeForgetResourceInstance struct { + *NodeAbstractResourceInstance + + // If DeposedKey is set to anything other than states.NotDeposed then + // this node forgets a deposed object of the associated instance + // rather than its current object. + DeposedKey states.DeposedKey +} + +var ( + _ GraphNodeModuleInstance = (*NodeForgetResourceInstance)(nil) + _ GraphNodeConfigResource = (*NodeForgetResourceInstance)(nil) + _ GraphNodeResourceInstance = (*NodeForgetResourceInstance)(nil) + _ GraphNodeReferenceable = (*NodeForgetResourceInstance)(nil) + _ GraphNodeReferencer = (*NodeForgetResourceInstance)(nil) + _ GraphNodeExecutable = (*NodeForgetResourceInstance)(nil) + _ GraphNodeProviderConsumer = (*NodeForgetResourceInstance)(nil) + _ GraphNodeProvisionerConsumer = (*NodeForgetResourceInstance)(nil) +) + +func (n *NodeForgetResourceInstance) Name() string { + if n.DeposedKey != states.NotDeposed { + return fmt.Sprintf("%s (forget deposed %s)", n.ResourceInstanceAddr(), n.DeposedKey) + } + return n.ResourceInstanceAddr().String() + " (forget)" +} + +// GraphNodeExecutable +func (n *NodeForgetResourceInstance) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { + addr := n.ResourceInstanceAddr() + + // Get our state + is := n.instanceState + if is == nil { + log.Printf("[WARN] NodeForgetResourceInstance for %s with no state", addr) + } + + var state *states.ResourceInstanceObject + + state, readDiags := n.readResourceInstanceState(ctx, addr) + diags = diags.Append(readDiags) + if diags.HasErrors() { + return diags + } + + // Exit early if the state object is null after reading the state + if state == nil || state.Value.IsNull() { + return diags + } + + contextState := ctx.State() + contextState.ForgetResourceInstanceAll(n.Addr) + + diags = diags.Append(updateStateHook(ctx)) + + return diags +} diff --git a/pkg/tofu/node_resource_import.go b/pkg/tofu/node_resource_import.go new file mode 100644 index 00000000000..cc2d6d43bc0 --- /dev/null +++ b/pkg/tofu/node_resource_import.go @@ -0,0 +1,281 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "log" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +type graphNodeImportState struct { + Addr addrs.AbsResourceInstance // Addr is the resource address to import into + ID string // ID is the ID to import as + ProviderAddr addrs.AbsProviderConfig // Provider address given by the user, or implied by the resource type + ResolvedProvider addrs.AbsProviderConfig // provider node address after resolution + + Schema *configschema.Block // Schema for processing the configuration body + SchemaVersion uint64 // Schema version of "Schema", as decided by the provider + Config *configs.Resource // Config is the resource in the config + + states []providers.ImportedResource +} + +var ( + _ GraphNodeModulePath = (*graphNodeImportState)(nil) + _ GraphNodeExecutable = (*graphNodeImportState)(nil) + _ GraphNodeProviderConsumer = (*graphNodeImportState)(nil) + _ GraphNodeDynamicExpandable = (*graphNodeImportState)(nil) +) + +func (n *graphNodeImportState) Name() string { + return fmt.Sprintf("%s (import id %q)", n.Addr, n.ID) +} + +// GraphNodeProviderConsumer +func (n *graphNodeImportState) ProvidedBy() (addrs.ProviderConfig, bool) { + // We assume that n.ProviderAddr has been properly populated here. + // It's the responsibility of the code creating a graphNodeImportState + // to populate this, possibly by calling DefaultProviderConfig() on the + // resource address to infer an implied provider from the resource type + // name. + return n.ProviderAddr, false +} + +// GraphNodeProviderConsumer +func (n *graphNodeImportState) Provider() addrs.Provider { + // We assume that n.ProviderAddr has been properly populated here. + // It's the responsibility of the code creating a graphNodeImportState + // to populate this, possibly by calling DefaultProviderConfig() on the + // resource address to infer an implied provider from the resource type + // name. + return n.ProviderAddr.Provider +} + +// GraphNodeProviderConsumer +func (n *graphNodeImportState) SetProvider(addr addrs.AbsProviderConfig) { + n.ResolvedProvider = addr +} + +// GraphNodeModuleInstance +func (n *graphNodeImportState) Path() addrs.ModuleInstance { + return n.Addr.Module +} + +// GraphNodeModulePath +func (n *graphNodeImportState) ModulePath() addrs.Module { + return n.Addr.Module.Module() +} + +// GraphNodeExecutable impl. +func (n *graphNodeImportState) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { + // Reset our states + n.states = nil + + provider, _, err := getProvider(ctx, n.ResolvedProvider) + diags = diags.Append(err) + if diags.HasErrors() { + return diags + } + + // import state + absAddr := n.Addr.Resource.Absolute(ctx.Path()) + + // Call pre-import hook + diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreImportState(absAddr, n.ID) + })) + if diags.HasErrors() { + return diags + } + + resp := provider.ImportResourceState(providers.ImportResourceStateRequest{ + TypeName: n.Addr.Resource.Resource.Type, + ID: n.ID, + }) + diags = diags.Append(resp.Diagnostics) + if diags.HasErrors() { + return diags + } + + imported := resp.ImportedResources + for _, obj := range imported { + log.Printf("[TRACE] graphNodeImportState: import %s %q produced instance object of type %s", absAddr.String(), n.ID, obj.TypeName) + } + n.states = imported + + // Call post-import hook + diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostImportState(absAddr, imported) + })) + return diags +} + +// GraphNodeDynamicExpandable impl. +// +// We use DynamicExpand as a way to generate the subgraph of refreshes +// and state inserts we need to do for our import state. Since they're new +// resources they don't depend on anything else and refreshes are isolated +// so this is nearly a perfect use case for dynamic expand. +func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) { + var diags tfdiags.Diagnostics + + g := &Graph{Path: ctx.Path()} + + // nameCounter is used to de-dup names in the state. + nameCounter := make(map[string]int) + + // Compile the list of addresses that we'll be inserting into the state. + // We do this ahead of time so we can verify that we aren't importing + // something that already exists. + addrs := make([]addrs.AbsResourceInstance, len(n.states)) + for i, state := range n.states { + addr := n.Addr + if t := state.TypeName; t != "" { + addr.Resource.Resource.Type = t + } + + // Determine if we need to suffix the name to de-dup + key := addr.String() + count, ok := nameCounter[key] + if ok { + count++ + addr.Resource.Resource.Name += fmt.Sprintf("-%d", count) + } + nameCounter[key] = count + + // Add it to our list + addrs[i] = addr + } + + // Verify that all the addresses are clear + state := ctx.State() + for _, addr := range addrs { + existing := state.ResourceInstance(addr) + if existing != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Resource already managed by OpenTofu", + fmt.Sprintf("OpenTofu is already managing a remote object for %s. To import to this address you must first remove the existing object from the state.", addr), + )) + continue + } + } + if diags.HasErrors() { + // Bail out early, then. + return nil, diags.Err() + } + + // For each of the states, we add a node to handle the refresh/add to state. + // "n.states" is populated by our own Execute with the result of + // ImportState. Since DynamicExpand is always called after Execute, this is + // safe. + for i, state := range n.states { + g.Add(&graphNodeImportStateSub{ + TargetAddr: addrs[i], + State: state, + ResolvedProvider: n.ResolvedProvider, + Schema: n.Schema, + SchemaVersion: n.SchemaVersion, + Config: n.Config, + }) + } + + addRootNodeToGraph(g) + + // Done! + return g, diags.Err() +} + +// graphNodeImportStateSub is the sub-node of graphNodeImportState +// and is part of the subgraph. This node is responsible for refreshing +// and adding a resource to the state once it is imported. +type graphNodeImportStateSub struct { + TargetAddr addrs.AbsResourceInstance + State providers.ImportedResource + ResolvedProvider addrs.AbsProviderConfig + + Schema *configschema.Block // Schema for processing the configuration body + SchemaVersion uint64 // Schema version of "Schema", as decided by the provider + Config *configs.Resource // Config is the resource in the config + +} + +var ( + _ GraphNodeModuleInstance = (*graphNodeImportStateSub)(nil) + _ GraphNodeExecutable = (*graphNodeImportStateSub)(nil) +) + +func (n *graphNodeImportStateSub) Name() string { + return fmt.Sprintf("import %s result", n.TargetAddr) +} + +func (n *graphNodeImportStateSub) Path() addrs.ModuleInstance { + return n.TargetAddr.Module +} + +// GraphNodeExecutable impl. +func (n *graphNodeImportStateSub) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { + // If the Ephemeral type isn't set, then it is an error + if n.State.TypeName == "" { + diags = diags.Append(fmt.Errorf("import of %s didn't set type", n.TargetAddr.String())) + return diags + } + + state := n.State.AsInstanceObject() + + // Refresh + riNode := &NodeAbstractResourceInstance{ + Addr: n.TargetAddr, + NodeAbstractResource: NodeAbstractResource{ + ResolvedProvider: n.ResolvedProvider, + }, + } + state, refreshDiags := riNode.refresh(ctx, states.NotDeposed, state) + diags = diags.Append(refreshDiags) + if diags.HasErrors() { + return diags + } + + // Verify the existance of the imported resource + if state.Value.IsNull() { + var diags tfdiags.Diagnostics + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Cannot import non-existent remote object", + fmt.Sprintf( + "While attempting to import an existing object to %q, "+ + "the provider detected that no object exists with the given id. "+ + "Only pre-existing objects can be imported; check that the id "+ + "is correct and that it is associated with the provider's "+ + "configured region or endpoint, or use \"tofu apply\" to "+ + "create a new remote object for this resource.", + n.TargetAddr, + ), + )) + return diags + } + + // Insert marks from configuration + if n.Config != nil { + // Since the import command allow import resource with incomplete configuration, we ignore diagnostics here + valueWithConfigurationSchemaMarks, _, _ := ctx.EvaluateBlock(n.Config.Config, n.Schema, nil, EvalDataForNoInstanceKey) + + _, stateValueMarks := state.Value.UnmarkDeepWithPaths() + _, valueWithConfigurationSchemaMarksPaths := valueWithConfigurationSchemaMarks.UnmarkDeepWithPaths() + combined := combinePathValueMarks(stateValueMarks, valueWithConfigurationSchemaMarksPaths) + state.Value = state.Value.MarkWithPaths(combined) + } + + diags = diags.Append(riNode.writeResourceInstanceState(ctx, state, workingState)) + return diags +} diff --git a/pkg/tofu/node_resource_plan.go b/pkg/tofu/node_resource_plan.go new file mode 100644 index 00000000000..a31b5694c05 --- /dev/null +++ b/pkg/tofu/node_resource_plan.go @@ -0,0 +1,432 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "strings" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/dag" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// nodeExpandPlannableResource represents an addrs.ConfigResource and implements +// DynamicExpand to a subgraph containing all of the addrs.AbsResourceInstance +// resulting from both the containing module and resource-specific expansion. +type nodeExpandPlannableResource struct { + *NodeAbstractResource + + // ForceCreateBeforeDestroy might be set via our GraphNodeDestroyerCBD + // during graph construction, if dependencies require us to force this + // on regardless of what the configuration says. + ForceCreateBeforeDestroy *bool + + // skipRefresh indicates that we should skip refreshing individual instances + skipRefresh bool + + preDestroyRefresh bool + + // skipPlanChanges indicates we should skip trying to plan change actions + // for any instances. + skipPlanChanges bool + + // forceReplace are resource instance addresses where the user wants to + // force generating a replace action. This set isn't pre-filtered, so + // it might contain addresses that have nothing to do with the resource + // that this node represents, which the node itself must therefore ignore. + forceReplace []addrs.AbsResourceInstance + + // We attach dependencies to the Resource during refresh, since the + // instances are instantiated during DynamicExpand. + // FIXME: These would be better off converted to a generic Set data + // structure in the future, as we need to compare for equality and take the + // union of multiple groups of dependencies. + dependencies []addrs.ConfigResource +} + +var ( + _ GraphNodeDestroyerCBD = (*nodeExpandPlannableResource)(nil) + _ GraphNodeDynamicExpandable = (*nodeExpandPlannableResource)(nil) + _ GraphNodeReferenceable = (*nodeExpandPlannableResource)(nil) + _ GraphNodeReferencer = (*nodeExpandPlannableResource)(nil) + _ GraphNodeConfigResource = (*nodeExpandPlannableResource)(nil) + _ GraphNodeAttachResourceConfig = (*nodeExpandPlannableResource)(nil) + _ GraphNodeAttachDependencies = (*nodeExpandPlannableResource)(nil) + _ GraphNodeTargetable = (*nodeExpandPlannableResource)(nil) + _ graphNodeExpandsInstances = (*nodeExpandPlannableResource)(nil) +) + +func (n *nodeExpandPlannableResource) Name() string { + return n.NodeAbstractResource.Name() + " (expand)" +} + +func (n *nodeExpandPlannableResource) expandsInstances() { +} + +// GraphNodeAttachDependencies +func (n *nodeExpandPlannableResource) AttachDependencies(deps []addrs.ConfigResource) { + n.dependencies = deps +} + +// GraphNodeDestroyerCBD +func (n *nodeExpandPlannableResource) CreateBeforeDestroy() bool { + if n.ForceCreateBeforeDestroy != nil { + return *n.ForceCreateBeforeDestroy + } + + // If we have no config, we just assume no + if n.Config == nil || n.Config.Managed == nil { + return false + } + + return n.Config.Managed.CreateBeforeDestroy +} + +// GraphNodeDestroyerCBD +func (n *nodeExpandPlannableResource) ModifyCreateBeforeDestroy(v bool) error { + n.ForceCreateBeforeDestroy = &v + return nil +} + +func (n *nodeExpandPlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) { + var g Graph + + expander := ctx.InstanceExpander() + moduleInstances := expander.ExpandModule(n.Addr.Module) + + // Lock the state while we inspect it + state := ctx.State().Lock() + + var orphans []*states.Resource + for _, res := range state.Resources(n.Addr) { + found := false + for _, m := range moduleInstances { + if m.Equal(res.Addr.Module) { + found = true + break + } + } + // The module instance of the resource in the state doesn't exist + // in the current config, so this whole resource is orphaned. + if !found { + orphans = append(orphans, res) + } + } + + // We'll no longer use the state directly here, and the other functions + // we'll call below may use it so we'll release the lock. + state = nil + ctx.State().Unlock() + + // The concrete resource factory we'll use for orphans + concreteResourceOrphan := func(a *NodeAbstractResourceInstance) *NodePlannableResourceInstanceOrphan { + // Add the config and state since we don't do that via transforms + a.Config = n.Config + a.ResolvedProvider = n.ResolvedProvider + a.Schema = n.Schema + a.ProvisionerSchemas = n.ProvisionerSchemas + a.ProviderMetas = n.ProviderMetas + a.Dependencies = n.dependencies + + return &NodePlannableResourceInstanceOrphan{ + NodeAbstractResourceInstance: a, + skipRefresh: n.skipRefresh, + skipPlanChanges: n.skipPlanChanges, + } + } + + for _, res := range orphans { + for key := range res.Instances { + addr := res.Addr.Instance(key) + abs := NewNodeAbstractResourceInstance(addr) + abs.AttachResourceState(res) + n := concreteResourceOrphan(abs) + g.Add(n) + } + } + + // Resolve addresses and IDs of all import targets that originate from import blocks + // We do it here before expanding the resources in the modules, to avoid running this resolution multiple times + importResolver := ctx.ImportResolver() + var diags tfdiags.Diagnostics + for _, importTarget := range n.importTargets { + if importTarget.IsFromImportBlock() { + err := importResolver.ExpandAndResolveImport(importTarget, ctx) + diags = diags.Append(err) + } + } + + // The above dealt with the expansion of the containing module, so now + // we need to deal with the expansion of the resource itself across all + // instances of the module. + // + // We'll gather up all of the leaf instances we learn about along the way + // so that we can inform the checks subsystem of which instances it should + // be expecting check results for, below. + instAddrs := addrs.MakeSet[addrs.Checkable]() + for _, module := range moduleInstances { + resAddr := n.Addr.Resource.Absolute(module) + err := n.expandResourceInstances(ctx, resAddr, &g, instAddrs) + diags = diags.Append(err) + } + if diags.HasErrors() { + return nil, diags.ErrWithWarnings() + } + + // If this is a resource that participates in custom condition checks + // (i.e. it has preconditions or postconditions) then the check state + // wants to know the addresses of the checkable objects so that it can + // treat them as unknown status if we encounter an error before actually + // visiting the checks. + if checkState := ctx.Checks(); checkState.ConfigHasChecks(n.NodeAbstractResource.Addr) { + checkState.ReportCheckableObjects(n.NodeAbstractResource.Addr, instAddrs) + } + + addRootNodeToGraph(&g) + + return &g, diags.ErrWithWarnings() +} + +// expandResourceInstances calculates the dynamic expansion for the resource +// itself in the context of a particular module instance. +// +// It has several side-effects: +// - Adds a node to Graph g for each leaf resource instance it discovers, whether present or orphaned. +// - Registers the expansion of the resource in the "expander" object embedded inside EvalContext ctx. +// - Adds each present (non-orphaned) resource instance address to instAddrs (guaranteed to always be addrs.AbsResourceInstance, despite being declared as addrs.Checkable). +// +// After calling this for each of the module instances the resource appears +// within, the caller must register the final superset instAddrs with the +// checks subsystem so that it knows the fully expanded set of checkable +// object instances for this resource instance. +func (n *nodeExpandPlannableResource) expandResourceInstances(globalCtx EvalContext, resAddr addrs.AbsResource, g *Graph, instAddrs addrs.Set[addrs.Checkable]) error { + var diags tfdiags.Diagnostics + + // The rest of our work here needs to know which module instance it's + // working in, so that it can evaluate expressions in the appropriate scope. + moduleCtx := globalCtx.WithPath(resAddr.Module) + + // writeResourceState is responsible for informing the expander of what + // repetition mode this resource has, which allows expander.ExpandResource + // to work below. + moreDiags := n.writeResourceState(moduleCtx, resAddr) + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + return diags.ErrWithWarnings() + } + + // Before we expand our resource into potentially many resource instances, + // we'll verify that any mention of this resource in n.forceReplace is + // consistent with the repetition mode of the resource. In other words, + // we're aiming to catch a situation where naming a particular resource + // instance would require an instance key but the given address has none. + expander := moduleCtx.InstanceExpander() + instanceAddrs := expander.ExpandResource(resAddr) + + // If there's a number of instances other than 1 then we definitely need + // an index. + mustHaveIndex := len(instanceAddrs) != 1 + // If there's only one instance then we might still need an index, if the + // instance address has one. + if len(instanceAddrs) == 1 && instanceAddrs[0].Resource.Key != addrs.NoKey { + mustHaveIndex = true + } + if mustHaveIndex { + for _, candidateAddr := range n.forceReplace { + if candidateAddr.Resource.Key == addrs.NoKey { + if n.Addr.Resource.Equal(candidateAddr.Resource.Resource) { + switch { + case len(instanceAddrs) == 0: + // In this case there _are_ no instances to replace, so + // there isn't any alternative address for us to suggest. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + "Incompletely-matched force-replace resource instance", + fmt.Sprintf( + "Your force-replace request for %s doesn't match any resource instances because this resource doesn't have any instances.", + candidateAddr, + ), + )) + case len(instanceAddrs) == 1: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + "Incompletely-matched force-replace resource instance", + fmt.Sprintf( + "Your force-replace request for %s doesn't match any resource instances because it lacks an instance key.\n\nTo force replacement of the single declared instance, use the following option instead:\n -replace=%q", + candidateAddr, instanceAddrs[0], + ), + )) + default: + var possibleValidOptions strings.Builder + for _, addr := range instanceAddrs { + fmt.Fprintf(&possibleValidOptions, "\n -replace=%q", addr) + } + + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + "Incompletely-matched force-replace resource instance", + fmt.Sprintf( + "Your force-replace request for %s doesn't match any resource instances because it lacks an instance key.\n\nTo force replacement of particular instances, use one or more of the following options instead:%s", + candidateAddr, possibleValidOptions.String(), + ), + )) + } + } + } + } + } + // NOTE: The actual interpretation of n.forceReplace to produce replace + // actions is in the per-instance function we're about to call, because + // we need to evaluate it on a per-instance basis. + + for _, addr := range instanceAddrs { + // If this resource is participating in the "checks" mechanism then our + // caller will need to know all of our expanded instance addresses as + // checkable object instances. + // (NOTE: instAddrs probably already has other instance addresses in it + // from earlier calls to this function with different resource addresses, + // because its purpose is to aggregate them all together into a single set.) + instAddrs.Add(addr) + } + + // Our graph builder mechanism expects to always be constructing new + // graphs rather than adding to existing ones, so we'll first + // construct a subgraph just for this individual modules's instances and + // then we'll steal all of its nodes and edges to incorporate into our + // main graph which contains all of the resource instances together. + instG, err := n.resourceInstanceSubgraph(moduleCtx, resAddr, instanceAddrs) + if err != nil { + diags = diags.Append(err) + return diags.ErrWithWarnings() + } + g.Subsume(&instG.AcyclicGraph.Graph) + + return diags.ErrWithWarnings() +} + +func (n *nodeExpandPlannableResource) resourceInstanceSubgraph(ctx EvalContext, addr addrs.AbsResource, instanceAddrs []addrs.AbsResourceInstance) (*Graph, error) { + var diags tfdiags.Diagnostics + + var commandLineImportTargets []CommandLineImportTarget + + for _, importTarget := range n.importTargets { + if importTarget.IsFromImportCommandLine() { + commandLineImportTargets = append(commandLineImportTargets, *importTarget.CommandLineImportTarget) + } + } + + // Our graph transformers require access to the full state, so we'll + // temporarily lock it while we work on this. + state := ctx.State().Lock() + defer ctx.State().Unlock() + + // The concrete resource factory we'll use + concreteResource := func(a *NodeAbstractResourceInstance) dag.Vertex { + var m *NodePlannableResourceInstance + + // If we're in the `tofu import` CLI command, we only need + // to return the import node, not a plannable resource node. + for _, c := range commandLineImportTargets { + if c.Addr.Equal(a.Addr) { + return &graphNodeImportState{ + Addr: c.Addr, + ID: c.ID, + ResolvedProvider: n.ResolvedProvider, + Schema: n.Schema, + SchemaVersion: n.SchemaVersion, + Config: n.Config, + } + } + } + + // Add the config and state since we don't do that via transforms + a.Config = n.Config + a.ResolvedProvider = n.ResolvedProvider + a.Schema = n.Schema + a.ProvisionerSchemas = n.ProvisionerSchemas + a.ProviderMetas = n.ProviderMetas + a.dependsOn = n.dependsOn + a.Dependencies = n.dependencies + a.preDestroyRefresh = n.preDestroyRefresh + a.generateConfigPath = n.generateConfigPath + + m = &NodePlannableResourceInstance{ + NodeAbstractResourceInstance: a, + + // By the time we're walking, we've figured out whether we need + // to force on CreateBeforeDestroy due to dependencies on other + // nodes that have it. + ForceCreateBeforeDestroy: n.CreateBeforeDestroy(), + skipRefresh: n.skipRefresh, + skipPlanChanges: n.skipPlanChanges, + forceReplace: n.forceReplace, + } + + resolvedImportTarget := ctx.ImportResolver().GetImport(a.Addr) + if resolvedImportTarget != nil { + m.importTarget = *resolvedImportTarget + } + + return m + } + + // The concrete resource factory we'll use for orphans + concreteResourceOrphan := func(a *NodeAbstractResourceInstance) dag.Vertex { + // Add the config and state since we don't do that via transforms + a.Config = n.Config + a.ResolvedProvider = n.ResolvedProvider + a.Schema = n.Schema + a.ProvisionerSchemas = n.ProvisionerSchemas + a.ProviderMetas = n.ProviderMetas + + return &NodePlannableResourceInstanceOrphan{ + NodeAbstractResourceInstance: a, + skipRefresh: n.skipRefresh, + skipPlanChanges: n.skipPlanChanges, + } + } + + // Start creating the steps + steps := []GraphTransformer{ + // Expand the count or for_each (if present) + &ResourceCountTransformer{ + Concrete: concreteResource, + Schema: n.Schema, + Addr: n.ResourceAddr(), + InstanceAddrs: instanceAddrs, + }, + + // Add the count/for_each orphans + &OrphanResourceInstanceCountTransformer{ + Concrete: concreteResourceOrphan, + Addr: addr, + InstanceAddrs: instanceAddrs, + State: state, + }, + + // Attach the state + &AttachStateTransformer{State: state}, + + // Targeting + &TargetsTransformer{Targets: n.Targets}, + + // Connect references so ordering is correct + &ReferenceTransformer{}, + + // Make sure there is a single root + &RootTransformer{}, + } + + // Build the graph + b := &BasicGraphBuilder{ + Steps: steps, + Name: "nodeExpandPlannableResource", + } + graph, diags := b.Build(addr.Module) + return graph, diags.ErrWithWarnings() +} diff --git a/pkg/tofu/node_resource_plan_destroy.go b/pkg/tofu/node_resource_plan_destroy.go new file mode 100644 index 00000000000..3d1e162bc69 --- /dev/null +++ b/pkg/tofu/node_resource_plan_destroy.go @@ -0,0 +1,127 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +// NodePlanDestroyableResourceInstance represents a resource that is ready +// to be planned for destruction. +type NodePlanDestroyableResourceInstance struct { + *NodeAbstractResourceInstance + + // skipRefresh indicates that we should skip refreshing + skipRefresh bool +} + +var ( + _ GraphNodeModuleInstance = (*NodePlanDestroyableResourceInstance)(nil) + _ GraphNodeReferenceable = (*NodePlanDestroyableResourceInstance)(nil) + _ GraphNodeReferencer = (*NodePlanDestroyableResourceInstance)(nil) + _ GraphNodeDestroyer = (*NodePlanDestroyableResourceInstance)(nil) + _ GraphNodeConfigResource = (*NodePlanDestroyableResourceInstance)(nil) + _ GraphNodeResourceInstance = (*NodePlanDestroyableResourceInstance)(nil) + _ GraphNodeAttachResourceConfig = (*NodePlanDestroyableResourceInstance)(nil) + _ GraphNodeAttachResourceState = (*NodePlanDestroyableResourceInstance)(nil) + _ GraphNodeExecutable = (*NodePlanDestroyableResourceInstance)(nil) + _ GraphNodeProviderConsumer = (*NodePlanDestroyableResourceInstance)(nil) +) + +// GraphNodeDestroyer +func (n *NodePlanDestroyableResourceInstance) DestroyAddr() *addrs.AbsResourceInstance { + addr := n.ResourceInstanceAddr() + return &addr +} + +// GraphNodeEvalable +func (n *NodePlanDestroyableResourceInstance) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { + addr := n.ResourceInstanceAddr() + + switch addr.Resource.Resource.Mode { + case addrs.ManagedResourceMode: + return n.managedResourceExecute(ctx, op) + case addrs.DataResourceMode: + return n.dataResourceExecute(ctx, op) + default: + panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode)) + } +} + +func (n *NodePlanDestroyableResourceInstance) managedResourceExecute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { + addr := n.ResourceInstanceAddr() + + // Declare a bunch of variables that are used for state during + // evaluation. These are written to by address in the EvalNodes we + // declare below. + var change *plans.ResourceInstanceChange + var state *states.ResourceInstanceObject + + state, err := n.readResourceInstanceState(ctx, addr) + diags = diags.Append(err) + if diags.HasErrors() { + return diags + } + + // If we are in the "skip refresh" mode then we will have skipped over our + // usual opportunity to update the previous run state and refresh state + // with the result of any provider schema upgrades, so we'll compensate + // by doing that here. + // + // NOTE: this is coupled with logic in Context.destroyPlan which skips + // running a normal plan walk when refresh is enabled. These two + // conditionals must agree (be exactly opposite) in order to get the + // correct behavior in both cases. + if n.skipRefresh { + diags = diags.Append(n.writeResourceInstanceState(ctx, state, prevRunState)) + if diags.HasErrors() { + return diags + } + diags = diags.Append(n.writeResourceInstanceState(ctx, state, refreshState)) + if diags.HasErrors() { + return diags + } + } + + change, destroyPlanDiags := n.planDestroy(ctx, state, "") + diags = diags.Append(destroyPlanDiags) + if diags.HasErrors() { + return diags + } + + diags = diags.Append(n.writeChange(ctx, change, "")) + if diags.HasErrors() { + return diags + } + + diags = diags.Append(n.checkPreventDestroy(change)) + return diags +} + +func (n *NodePlanDestroyableResourceInstance) dataResourceExecute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { + + // We may not be able to read a prior data source from the state if the + // schema was upgraded and we are destroying before ever refreshing that + // data source. Regardless, a data source "destroy" is simply writing a + // null state, which we can do with a null prior state too. + change := &plans.ResourceInstanceChange{ + Addr: n.ResourceInstanceAddr(), + PrevRunAddr: n.prevRunAddr(ctx), + Change: plans.Change{ + Action: plans.Delete, + Before: cty.NullVal(cty.DynamicPseudoType), + After: cty.NullVal(cty.DynamicPseudoType), + }, + ProviderAddr: n.ResolvedProvider, + } + return diags.Append(n.writeChange(ctx, change, "")) +} diff --git a/pkg/tofu/node_resource_plan_instance.go b/pkg/tofu/node_resource_plan_instance.go new file mode 100644 index 00000000000..634ae9a3b93 --- /dev/null +++ b/pkg/tofu/node_resource_plan_instance.go @@ -0,0 +1,740 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "log" + "path/filepath" + "sort" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/genconfig" + "github.com/kubegems/opentofu/pkg/instances" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// NodePlannableResourceInstance represents a _single_ resource +// instance that is plannable. This means this represents a single +// count index, for example. +type NodePlannableResourceInstance struct { + *NodeAbstractResourceInstance + ForceCreateBeforeDestroy bool + + // skipRefresh indicates that we should skip refreshing individual instances + skipRefresh bool + + // skipPlanChanges indicates we should skip trying to plan change actions + // for any instances. + skipPlanChanges bool + + // forceReplace are resource instance addresses where the user wants to + // force generating a replace action. This set isn't pre-filtered, so + // it might contain addresses that have nothing to do with the resource + // that this node represents, which the node itself must therefore ignore. + forceReplace []addrs.AbsResourceInstance + + // replaceTriggeredBy stores references from replace_triggered_by which + // triggered this instance to be replaced. + replaceTriggeredBy []*addrs.Reference + + // importTarget, if populated, contains the information necessary to plan + // an import of this resource. + importTarget EvaluatedConfigImportTarget +} + +// EvaluatedConfigImportTarget is a target that we need to import. It's created when an import target originated from +// an import block, after everything regarding the configuration has been evaluated. +// At this point, the import target is of a single resource instance +type EvaluatedConfigImportTarget struct { + // Config is the original import block for this import. This might be null + // if the import did not originate in config. + Config *configs.Import + + // Addr is the actual address of the resource instance that we should import into. At this point, the address + // should be fully evaluated + Addr addrs.AbsResourceInstance + + // ID is the string ID of the resource to import. This is resource-instance specific. + ID string +} + +var ( + _ GraphNodeModuleInstance = (*NodePlannableResourceInstance)(nil) + _ GraphNodeReferenceable = (*NodePlannableResourceInstance)(nil) + _ GraphNodeReferencer = (*NodePlannableResourceInstance)(nil) + _ GraphNodeConfigResource = (*NodePlannableResourceInstance)(nil) + _ GraphNodeResourceInstance = (*NodePlannableResourceInstance)(nil) + _ GraphNodeAttachResourceConfig = (*NodePlannableResourceInstance)(nil) + _ GraphNodeAttachResourceState = (*NodePlannableResourceInstance)(nil) + _ GraphNodeExecutable = (*NodePlannableResourceInstance)(nil) +) + +// GraphNodeEvalable +func (n *NodePlannableResourceInstance) Execute(ctx EvalContext, op walkOperation) tfdiags.Diagnostics { + addr := n.ResourceInstanceAddr() + + // Eval info is different depending on what kind of resource this is + switch addr.Resource.Resource.Mode { + case addrs.ManagedResourceMode: + return n.managedResourceExecute(ctx) + case addrs.DataResourceMode: + return n.dataResourceExecute(ctx) + default: + panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode)) + } +} + +func (n *NodePlannableResourceInstance) dataResourceExecute(ctx EvalContext) (diags tfdiags.Diagnostics) { + config := n.Config + addr := n.ResourceInstanceAddr() + + var change *plans.ResourceInstanceChange + + _, providerSchema, err := getProvider(ctx, n.ResolvedProvider) + diags = diags.Append(err) + if diags.HasErrors() { + return diags + } + + diags = diags.Append(validateSelfRef(addr.Resource, config.Config, providerSchema)) + if diags.HasErrors() { + return diags + } + + checkRuleSeverity := tfdiags.Error + if n.skipPlanChanges || n.preDestroyRefresh { + checkRuleSeverity = tfdiags.Warning + } + + change, state, repeatData, planDiags := n.planDataSource(ctx, checkRuleSeverity, n.skipPlanChanges) + diags = diags.Append(planDiags) + if diags.HasErrors() { + return diags + } + + // write the data source into both the refresh state and the + // working state + diags = diags.Append(n.writeResourceInstanceState(ctx, state, refreshState)) + if diags.HasErrors() { + return diags + } + diags = diags.Append(n.writeResourceInstanceState(ctx, state, workingState)) + if diags.HasErrors() { + return diags + } + + diags = diags.Append(n.writeChange(ctx, change, "")) + + // Post-conditions might block further progress. We intentionally do this + // _after_ writing the state/diff because we want to check against + // the result of the operation, and to fail on future operations + // until the user makes the condition succeed. + checkDiags := evalCheckRules( + addrs.ResourcePostcondition, + n.Config.Postconditions, + ctx, addr, repeatData, + checkRuleSeverity, + ) + diags = diags.Append(checkDiags) + + return diags +} + +func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext) (diags tfdiags.Diagnostics) { + config := n.Config + addr := n.ResourceInstanceAddr() + + var instanceRefreshState *states.ResourceInstanceObject + + checkRuleSeverity := tfdiags.Error + if n.skipPlanChanges || n.preDestroyRefresh { + checkRuleSeverity = tfdiags.Warning + } + + provider, providerSchema, err := getProvider(ctx, n.ResolvedProvider) + diags = diags.Append(err) + if diags.HasErrors() { + return diags + } + + if config != nil { + diags = diags.Append(validateSelfRef(addr.Resource, config.Config, providerSchema)) + if diags.HasErrors() { + return diags + } + } + + importing := n.shouldImport(ctx) + + if importing && n.Config == nil && len(n.generateConfigPath) == 0 { + // Then the user wrote an import target to a target that didn't exist. + if n.Addr.Module.IsRoot() { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Import block target does not exist", + Detail: "The target for the given import block does not exist. If you wish to automatically generate config for this resource, use the -generate-config-out option within tofu plan. Otherwise, make sure the target resource exists within your configuration. For example:\n\n tofu plan -generate-config-out=generated.tf", + Subject: n.importTarget.Config.DeclRange.Ptr(), + }) + } else { + // You can't generate config for a resource that is inside a + // module, so we will present a different error message for + // this case. + diags = diags.Append(importResourceWithoutConfigDiags(n.Addr.String(), n.importTarget.Config)) + } + return diags + } + + // If the resource is to be imported, we now ask the provider for an Import + // and a Refresh, and save the resulting state to instanceRefreshState. + if importing { + instanceRefreshState, diags = n.importState(ctx, addr, n.importTarget.ID, provider, providerSchema) + } else { + var readDiags tfdiags.Diagnostics + instanceRefreshState, readDiags = n.readResourceInstanceState(ctx, addr) + diags = diags.Append(readDiags) + if diags.HasErrors() { + return diags + } + } + + // We'll save a snapshot of what we just read from the state into the + // prevRunState before we do anything else, since this will capture the + // result of any schema upgrading that readResourceInstanceState just did, + // but not include any out-of-band changes we might detect in in the + // refresh step below. + diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, prevRunState)) + if diags.HasErrors() { + return diags + } + // Also the refreshState, because that should still reflect schema upgrades + // even if it doesn't reflect upstream changes. + diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState)) + if diags.HasErrors() { + return diags + } + + // In 0.13 we could be refreshing a resource with no config. + // We should be operating on managed resource, but check here to be certain + if n.Config == nil || n.Config.Managed == nil { + log.Printf("[WARN] managedResourceExecute: no Managed config value found in instance state for %q", n.Addr) + } else { + if instanceRefreshState != nil { + instanceRefreshState.CreateBeforeDestroy = n.Config.Managed.CreateBeforeDestroy || n.ForceCreateBeforeDestroy + } + } + + // Refresh, maybe + // The import process handles its own refresh + if !n.skipRefresh && !importing { + s, refreshDiags := n.refresh(ctx, states.NotDeposed, instanceRefreshState) + diags = diags.Append(refreshDiags) + if diags.HasErrors() { + return diags + } + + instanceRefreshState = s + + if instanceRefreshState != nil { + // When refreshing we start by merging the stored dependencies and + // the configured dependencies. The configured dependencies will be + // stored to state once the changes are applied. If the plan + // results in no changes, we will re-write these dependencies + // below. + instanceRefreshState.Dependencies = mergeDeps(n.Dependencies, instanceRefreshState.Dependencies) + } + + diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState)) + if diags.HasErrors() { + return diags + } + } + + // Plan the instance, unless we're in the refresh-only mode + if !n.skipPlanChanges { + + // add this instance to n.forceReplace if replacement is triggered by + // another change + repData := instances.RepetitionData{} + switch k := addr.Resource.Key.(type) { + case addrs.IntKey: + repData.CountIndex = k.Value() + case addrs.StringKey: + repData.EachKey = k.Value() + repData.EachValue = cty.DynamicVal + } + + diags = diags.Append(n.replaceTriggered(ctx, repData)) + if diags.HasErrors() { + return diags + } + + change, instancePlanState, repeatData, planDiags := n.plan( + ctx, nil, instanceRefreshState, n.ForceCreateBeforeDestroy, n.forceReplace, + ) + diags = diags.Append(planDiags) + if diags.HasErrors() { + // If we are importing and generating a configuration, we need to + // ensure the change is written out so the configuration can be + // captured. + if len(n.generateConfigPath) > 0 { + // Update our return plan + change := &plans.ResourceInstanceChange{ + Addr: n.Addr, + PrevRunAddr: n.prevRunAddr(ctx), + ProviderAddr: n.ResolvedProvider, + Change: plans.Change{ + // we only need a placeholder, so this will be a NoOp + Action: plans.NoOp, + Before: instanceRefreshState.Value, + After: instanceRefreshState.Value, + GeneratedConfig: n.generatedConfigHCL, + }, + } + diags = diags.Append(n.writeChange(ctx, change, "")) + } + + return diags + } + + if importing { + change.Importing = &plans.Importing{ID: n.importTarget.ID} + } + + // FIXME: here we udpate the change to reflect the reason for + // replacement, but we still overload forceReplace to get the correct + // change planned. + if len(n.replaceTriggeredBy) > 0 { + change.ActionReason = plans.ResourceInstanceReplaceByTriggers + } + + // FIXME: it is currently important that we write resource changes to + // the plan (n.writeChange) before we write the corresponding state + // (n.writeResourceInstanceState). + // + // This is because the planned resource state will normally have the + // status of states.ObjectPlanned, which causes later logic to refer to + // the contents of the plan to retrieve the resource data. Because + // there is no shared lock between these two data structures, reversing + // the order of these writes will cause a brief window of inconsistency + // which can lead to a failed safety check. + // + // Future work should adjust these APIs such that it is impossible to + // update these two data structures incorrectly through any objects + // reachable via the tofu.EvalContext API. + diags = diags.Append(n.writeChange(ctx, change, "")) + if diags.HasErrors() { + return diags + } + diags = diags.Append(n.checkPreventDestroy(change)) + if diags.HasErrors() { + return diags + } + + diags = diags.Append(n.writeResourceInstanceState(ctx, instancePlanState, workingState)) + if diags.HasErrors() { + return diags + } + + // If this plan resulted in a NoOp, then apply won't have a chance to make + // any changes to the stored dependencies. Since this is a NoOp we know + // that the stored dependencies will have no effect during apply, and we can + // write them out now. + if change.Action == plans.NoOp && !depsEqual(instanceRefreshState.Dependencies, n.Dependencies) { + // the refresh state will be the final state for this resource, so + // finalize the dependencies here if they need to be updated. + instanceRefreshState.Dependencies = n.Dependencies + diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState)) + if diags.HasErrors() { + return diags + } + } + + // Post-conditions might block completion. We intentionally do this + // _after_ writing the state/diff because we want to check against + // the result of the operation, and to fail on future operations + // until the user makes the condition succeed. + // (Note that some preconditions will end up being skipped during + // planning, because their conditions depend on values not yet known.) + checkDiags := evalCheckRules( + addrs.ResourcePostcondition, + n.Config.Postconditions, + ctx, n.ResourceInstanceAddr(), repeatData, + checkRuleSeverity, + ) + diags = diags.Append(checkDiags) + } else { + // In refresh-only mode we need to evaluate the for-each expression in + // order to supply the value to the pre- and post-condition check + // blocks. This has the unfortunate edge case of a refresh-only plan + // executing with a for-each map which has the same keys but different + // values, which could result in a post-condition check relying on that + // value being inaccurate. Unless we decide to store the value of the + // for-each expression in state, this is unavoidable. + forEach, _ := evaluateForEachExpression(n.Config.ForEach, ctx) + repeatData := EvalDataForInstanceKey(n.ResourceInstanceAddr().Resource.Key, forEach) + + checkDiags := evalCheckRules( + addrs.ResourcePrecondition, + n.Config.Preconditions, + ctx, addr, repeatData, + checkRuleSeverity, + ) + diags = diags.Append(checkDiags) + + // Even if we don't plan changes, we do still need to at least update + // the working state to reflect the refresh result. If not, then e.g. + // any output values refering to this will not react to the drift. + // (Even if we didn't actually refresh above, this will still save + // the result of any schema upgrading we did in readResourceInstanceState.) + diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, workingState)) + if diags.HasErrors() { + return diags + } + + // Here we also evaluate post-conditions after updating the working + // state, because we want to check against the result of the refresh. + // Unlike in normal planning mode, these checks are still evaluated + // even if pre-conditions generated diagnostics, because we have no + // planned changes to block. + checkDiags = evalCheckRules( + addrs.ResourcePostcondition, + n.Config.Postconditions, + ctx, addr, repeatData, + checkRuleSeverity, + ) + diags = diags.Append(checkDiags) + } + + return diags +} + +// replaceTriggered checks if this instance needs to be replace due to a change +// in a replace_triggered_by reference. If replacement is required, the +// instance address is added to forceReplace +func (n *NodePlannableResourceInstance) replaceTriggered(ctx EvalContext, repData instances.RepetitionData) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + if n.Config == nil { + return diags + } + + for _, expr := range n.Config.TriggersReplacement { + ref, replace, evalDiags := ctx.EvaluateReplaceTriggeredBy(expr, repData) + diags = diags.Append(evalDiags) + if diags.HasErrors() { + continue + } + + if replace { + // FIXME: forceReplace accomplishes the same goal, however we may + // want to communicate more information about which resource + // triggered the replacement in the plan. + // Rather than further complicating the plan method with more + // options, we can refactor both of these features later. + n.forceReplace = append(n.forceReplace, n.Addr) + log.Printf("[DEBUG] ReplaceTriggeredBy forcing replacement of %s due to change in %s", n.Addr, ref.DisplayString()) + + n.replaceTriggeredBy = append(n.replaceTriggeredBy, ref) + break + } + } + + return diags +} + +func (n *NodePlannableResourceInstance) importState(ctx EvalContext, addr addrs.AbsResourceInstance, importId string, provider providers.Interface, providerSchema providers.ProviderSchema) (*states.ResourceInstanceObject, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + absAddr := addr.Resource.Absolute(ctx.Path()) + + diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { + return h.PrePlanImport(absAddr, importId) + })) + if diags.HasErrors() { + return nil, diags + } + + resp := provider.ImportResourceState(providers.ImportResourceStateRequest{ + TypeName: addr.Resource.Resource.Type, + ID: importId, + }) + diags = diags.Append(resp.Diagnostics) + if diags.HasErrors() { + return nil, diags + } + + imported := resp.ImportedResources + + if len(imported) == 0 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Import returned no resources", + fmt.Sprintf("While attempting to import with ID %s, the provider"+ + "returned no instance states.", + importId, + ), + )) + return nil, diags + } + for _, obj := range imported { + log.Printf("[TRACE] graphNodeImportState: import %s %q produced instance object of type %s", absAddr.String(), importId, obj.TypeName) + } + if len(imported) > 1 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Multiple import states not supported", + fmt.Sprintf("While attempting to import with ID %s, the provider "+ + "returned multiple resource instance states. This "+ + "is not currently supported.", + importId, + ), + )) + return nil, diags + } + + // call post-import hook + diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostPlanImport(absAddr, imported) + })) + + if imported[0].TypeName == "" { + diags = diags.Append(fmt.Errorf("import of %s didn't set type", n.Addr.String())) + return nil, diags + } + + importedState := imported[0].AsInstanceObject() + + if importedState.Value.IsNull() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Import returned null resource", + fmt.Sprintf("While attempting to import with ID %s, the provider"+ + "returned an instance with no state.", + n.importTarget.ID, + ), + )) + } + + // refresh + riNode := &NodeAbstractResourceInstance{ + Addr: n.Addr, + NodeAbstractResource: NodeAbstractResource{ + ResolvedProvider: n.ResolvedProvider, + }, + } + instanceRefreshState, refreshDiags := riNode.refresh(ctx, states.NotDeposed, importedState) + diags = diags.Append(refreshDiags) + if diags.HasErrors() { + return instanceRefreshState, diags + } + + // verify the existence of the imported resource + if instanceRefreshState.Value.IsNull() { + var diags tfdiags.Diagnostics + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Cannot import non-existent remote object", + fmt.Sprintf( + "While attempting to import an existing object to %q, "+ + "the provider detected that no object exists with the given id. "+ + "Only pre-existing objects can be imported; check that the id "+ + "is correct and that it is associated with the provider's "+ + "configured region or endpoint, or use \"tofu apply\" to "+ + "create a new remote object for this resource.", + n.Addr, + ), + )) + return instanceRefreshState, diags + } + + // Insert marks from configuration + if n.Config != nil { + keyData := EvalDataForNoInstanceKey + + switch { + case n.Config.Count != nil: + keyData = InstanceKeyEvalData{ + CountIndex: cty.UnknownVal(cty.Number), + } + case n.Config.ForEach != nil: + keyData = InstanceKeyEvalData{ + EachKey: cty.UnknownVal(cty.String), + EachValue: cty.UnknownVal(cty.DynamicPseudoType), + } + } + + valueWithConfigurationSchemaMarks, _, configDiags := ctx.EvaluateBlock(n.Config.Config, n.Schema, nil, keyData) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + return instanceRefreshState, diags + } + + _, marks := instanceRefreshState.Value.UnmarkDeepWithPaths() + _, configSchemaMarks := valueWithConfigurationSchemaMarks.UnmarkDeepWithPaths() + merged := combinePathValueMarks(marks, configSchemaMarks) + + instanceRefreshState.Value = instanceRefreshState.Value.MarkWithPaths(merged) + } + + // If we're importing and generating config, generate it now. + if len(n.generateConfigPath) > 0 { + if n.Config != nil { + return instanceRefreshState, diags.Append(fmt.Errorf("tried to generate config for %s, but it already exists", n.Addr)) + } + + schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.Resource.Resource) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + diags = diags.Append(fmt.Errorf("provider does not support resource type for %q", n.Addr)) + return instanceRefreshState, diags + } + + // Generate the HCL string first, then parse the HCL body from it. + // First we generate the contents of the resource block for use within + // the planning node. Then we wrap it in an enclosing resource block to + // pass into the plan for rendering. + generatedHCLAttributes, generatedDiags := n.generateHCLStringAttributes(n.Addr, instanceRefreshState, schema) + diags = diags.Append(generatedDiags) + + n.generatedConfigHCL = genconfig.WrapResourceContents(n.Addr, generatedHCLAttributes) + + // parse the "file" as HCL to get the hcl.Body + synthHCLFile, hclDiags := hclsyntax.ParseConfig([]byte(generatedHCLAttributes), filepath.Base(n.generateConfigPath), hcl.Pos{Byte: 0, Line: 1, Column: 1}) + diags = diags.Append(hclDiags) + if hclDiags.HasErrors() { + return instanceRefreshState, diags + } + + // We have to do a kind of mini parsing of the content here to correctly + // mark attributes like 'provider' as hidden. We only care about the + // resulting content, so it's remain that gets passed into the resource + // as the config. + _, remain, resourceDiags := synthHCLFile.Body.PartialContent(configs.ResourceBlockSchema) + diags = diags.Append(resourceDiags) + if resourceDiags.HasErrors() { + return instanceRefreshState, diags + } + + n.Config = &configs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: n.Addr.Resource.Resource.Type, + Name: n.Addr.Resource.Resource.Name, + Config: remain, + Managed: &configs.ManagedResource{}, + Provider: n.ResolvedProvider.Provider, + } + } + + diags = diags.Append(riNode.writeResourceInstanceState(ctx, instanceRefreshState, refreshState)) + return instanceRefreshState, diags +} + +func (n *NodePlannableResourceInstance) shouldImport(ctx EvalContext) bool { + if n.importTarget.ID == "" { + return false + } + + // If the import target already has a state - we should not attempt to import it, but instead run a normal plan + // for it + state := ctx.State() + return state.ResourceInstance(n.ResourceInstanceAddr()) == nil +} + +// generateHCLStringAttributes produces a string in HCL format for the given +// resource state and schema without the surrounding block. +func (n *NodePlannableResourceInstance) generateHCLStringAttributes(addr addrs.AbsResourceInstance, state *states.ResourceInstanceObject, schema *configschema.Block) (string, tfdiags.Diagnostics) { + filteredSchema := schema.Filter( + configschema.FilterOr( + configschema.FilterReadOnlyAttribute, + configschema.FilterDeprecatedAttribute, + + // The legacy SDK adds an Optional+Computed "id" attribute to the + // resource schema even if not defined in provider code. + // During validation, however, the presence of an extraneous "id" + // attribute in config will cause an error. + // Remove this attribute so we do not generate an "id" attribute + // where there is a risk that it is not in the real resource schema. + // + // TRADEOFF: Resources in which there actually is an + // Optional+Computed "id" attribute in the schema will have that + // attribute missing from generated config. + configschema.FilterHelperSchemaIdAttribute, + ), + configschema.FilterDeprecatedBlock, + ) + + providerAddr := addrs.LocalProviderConfig{ + LocalName: n.ResolvedProvider.Provider.Type, + Alias: n.ResolvedProvider.Alias, + } + + return genconfig.GenerateResourceContents(addr, filteredSchema, providerAddr, state.Value) +} + +// mergeDeps returns the union of 2 sets of dependencies +func mergeDeps(a, b []addrs.ConfigResource) []addrs.ConfigResource { + switch { + case len(a) == 0: + return b + case len(b) == 0: + return a + } + + set := make(map[string]addrs.ConfigResource) + + for _, dep := range a { + set[dep.String()] = dep + } + + for _, dep := range b { + set[dep.String()] = dep + } + + newDeps := make([]addrs.ConfigResource, 0, len(set)) + for _, dep := range set { + newDeps = append(newDeps, dep) + } + + return newDeps +} + +func depsEqual(a, b []addrs.ConfigResource) bool { + if len(a) != len(b) { + return false + } + + // Because we need to sort the deps to compare equality, make shallow + // copies to prevent concurrently modifying the array values on + // dependencies shared between expanded instances. + copyA, copyB := make([]addrs.ConfigResource, len(a)), make([]addrs.ConfigResource, len(b)) + copy(copyA, a) + copy(copyB, b) + a, b = copyA, copyB + + less := func(s []addrs.ConfigResource) func(i, j int) bool { + return func(i, j int) bool { + return s[i].String() < s[j].String() + } + } + + sort.Slice(a, less(a)) + sort.Slice(b, less(b)) + + for i := range a { + if !a[i].Equal(b[i]) { + return false + } + } + return true +} diff --git a/pkg/tofu/node_resource_plan_orphan.go b/pkg/tofu/node_resource_plan_orphan.go new file mode 100644 index 00000000000..13e0f3182e0 --- /dev/null +++ b/pkg/tofu/node_resource_plan_orphan.go @@ -0,0 +1,311 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "log" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// NodePlannableResourceInstanceOrphan represents a resource that is "applyable": +// it is ready to be applied and is represented by a diff. +type NodePlannableResourceInstanceOrphan struct { + *NodeAbstractResourceInstance + + // skipRefresh indicates that we should skip refreshing individual instances + skipRefresh bool + + // skipPlanChanges indicates we should skip trying to plan change actions + // for any instances. + skipPlanChanges bool + + // EndpointsToRemove are resource instance addresses where the user wants to + // forget from the state. This set isn't pre-filtered, so + // it might contain addresses that have nothing to do with the resource + // that this node represents, which the node itself must therefore ignore. + EndpointsToRemove []addrs.ConfigRemovable +} + +var ( + _ GraphNodeModuleInstance = (*NodePlannableResourceInstanceOrphan)(nil) + _ GraphNodeReferenceable = (*NodePlannableResourceInstanceOrphan)(nil) + _ GraphNodeReferencer = (*NodePlannableResourceInstanceOrphan)(nil) + _ GraphNodeConfigResource = (*NodePlannableResourceInstanceOrphan)(nil) + _ GraphNodeResourceInstance = (*NodePlannableResourceInstanceOrphan)(nil) + _ GraphNodeAttachResourceConfig = (*NodePlannableResourceInstanceOrphan)(nil) + _ GraphNodeAttachResourceState = (*NodePlannableResourceInstanceOrphan)(nil) + _ GraphNodeExecutable = (*NodePlannableResourceInstanceOrphan)(nil) + _ GraphNodeProviderConsumer = (*NodePlannableResourceInstanceOrphan)(nil) +) + +func (n *NodePlannableResourceInstanceOrphan) Name() string { + return n.ResourceInstanceAddr().String() + " (orphan)" +} + +// GraphNodeExecutable +func (n *NodePlannableResourceInstanceOrphan) Execute(ctx EvalContext, op walkOperation) tfdiags.Diagnostics { + addr := n.ResourceInstanceAddr() + + // Eval info is different depending on what kind of resource this is + switch addr.Resource.Resource.Mode { + case addrs.ManagedResourceMode: + return n.managedResourceExecute(ctx) + case addrs.DataResourceMode: + return n.dataResourceExecute(ctx) + default: + panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode)) + } +} + +func (n *NodePlannableResourceInstanceOrphan) ProvidedBy() (addr addrs.ProviderConfig, exact bool) { + if n.Addr.Resource.Resource.Mode == addrs.DataResourceMode { + // indicate that this node does not require a configured provider + return nil, true + } + return n.NodeAbstractResourceInstance.ProvidedBy() +} + +func (n *NodePlannableResourceInstanceOrphan) dataResourceExecute(ctx EvalContext) tfdiags.Diagnostics { + // A data source that is no longer in the config is removed from the state + log.Printf("[TRACE] NodePlannableResourceInstanceOrphan: removing state object for %s", n.Addr) + + // we need to update both the refresh state to refresh the current data + // source, and the working state for plan-time evaluations. + refreshState := ctx.RefreshState() + refreshState.SetResourceInstanceCurrent(n.Addr, nil, n.ResolvedProvider) + + workingState := ctx.State() + workingState.SetResourceInstanceCurrent(n.Addr, nil, n.ResolvedProvider) + return nil +} + +func (n *NodePlannableResourceInstanceOrphan) managedResourceExecute(ctx EvalContext) (diags tfdiags.Diagnostics) { + addr := n.ResourceInstanceAddr() + + oldState, readDiags := n.readResourceInstanceState(ctx, addr) + diags = diags.Append(readDiags) + if diags.HasErrors() { + return diags + } + + // Note any upgrades that readResourceInstanceState might've done in the + // prevRunState, so that it'll conform to current schema. + diags = diags.Append(n.writeResourceInstanceState(ctx, oldState, prevRunState)) + if diags.HasErrors() { + return diags + } + // Also the refreshState, because that should still reflect schema upgrades + // even if not refreshing. + diags = diags.Append(n.writeResourceInstanceState(ctx, oldState, refreshState)) + if diags.HasErrors() { + return diags + } + + if !n.skipRefresh { + // Refresh this instance even though it is going to be destroyed, in + // order to catch missing resources. If this is a normal plan, + // providers expect a Read request to remove missing resources from the + // plan before apply, and may not handle a missing resource during + // Delete correctly. If this is a simple refresh, OpenTofu is + // expected to remove the missing resource from the state entirely + refreshedState, refreshDiags := n.refresh(ctx, states.NotDeposed, oldState) + diags = diags.Append(refreshDiags) + if diags.HasErrors() { + return diags + } + + diags = diags.Append(n.writeResourceInstanceState(ctx, refreshedState, refreshState)) + if diags.HasErrors() { + return diags + } + + // If we refreshed then our subsequent planning should be in terms of + // the new object, not the original object. + oldState = refreshedState + } + + // If we're skipping planning, all we need to do is write the state. If the + // refresh indicates the instance no longer exists, there is also nothing + // to plan because there is no longer any state and it doesn't exist in the + // config. + if n.skipPlanChanges || oldState == nil || oldState.Value.IsNull() { + return diags.Append(n.writeResourceInstanceState(ctx, oldState, workingState)) + } + + var change *plans.ResourceInstanceChange + var planDiags tfdiags.Diagnostics + + shouldForget := false + + for _, etf := range n.EndpointsToRemove { + if etf.TargetContains(n.Addr) { + shouldForget = true + } + } + + if shouldForget { + change = n.planForget(ctx, oldState, "") + } else { + change, planDiags = n.planDestroy(ctx, oldState, "") + } + + diags = diags.Append(planDiags) + if diags.HasErrors() { + return diags + } + + // We might be able to offer an approximate reason for why we are + // planning to delete this object. (This is best-effort; we might + // sometimes not have a reason.) + change.ActionReason = n.deleteActionReason(ctx) + + diags = diags.Append(n.writeChange(ctx, change, "")) + if diags.HasErrors() { + return diags + } + + diags = diags.Append(n.checkPreventDestroy(change)) + if diags.HasErrors() { + return diags + } + + return diags.Append(n.writeResourceInstanceState(ctx, nil, workingState)) +} + +func (n *NodePlannableResourceInstanceOrphan) deleteActionReason(ctx EvalContext) plans.ResourceInstanceChangeActionReason { + cfg := n.Config + if cfg == nil { + if !n.Addr.Equal(n.prevRunAddr(ctx)) { + // This means the resource was moved - see also + // ResourceInstanceChange.Moved() which calculates + // this the same way. + return plans.ResourceInstanceDeleteBecauseNoMoveTarget + } + + return plans.ResourceInstanceDeleteBecauseNoResourceConfig + } + + // If this is a resource instance inside a module instance that's no + // longer declared then we will have a config (because config isn't + // instance-specific) but the expander will know that our resource + // address's module path refers to an undeclared module instance. + if expander := ctx.InstanceExpander(); expander != nil { // (sometimes nil in MockEvalContext in tests) + validModuleAddr := expander.GetDeepestExistingModuleInstance(n.Addr.Module) + if len(validModuleAddr) != len(n.Addr.Module) { + // If we get here then at least one step in the resource's module + // path is to a module instance that doesn't exist at all, and + // so a missing module instance is the delete reason regardless + // of whether there might _also_ be a change to the resource + // configuration inside the module. (Conceptually the configurations + // inside the non-existing module instance don't exist at all, + // but they end up existing just as an artifact of the + // implementation detail that we detect module instance orphans + // only dynamically.) + return plans.ResourceInstanceDeleteBecauseNoModule + } + } + + switch n.Addr.Resource.Key.(type) { + case nil: // no instance key at all + if cfg.Count != nil || cfg.ForEach != nil { + return plans.ResourceInstanceDeleteBecauseWrongRepetition + } + case addrs.IntKey: + if cfg.Count == nil { + // This resource isn't using "count" at all, then + return plans.ResourceInstanceDeleteBecauseWrongRepetition + } + + expander := ctx.InstanceExpander() + if expander == nil { + break // only for tests that produce an incomplete MockEvalContext + } + insts := expander.ExpandResource(n.Addr.ContainingResource()) + + declared := false + for _, inst := range insts { + if n.Addr.Equal(inst) { + declared = true + } + } + if !declared { + // This instance key is outside of the configured range + return plans.ResourceInstanceDeleteBecauseCountIndex + } + case addrs.StringKey: + if cfg.ForEach == nil { + // This resource isn't using "for_each" at all, then + return plans.ResourceInstanceDeleteBecauseWrongRepetition + } + + expander := ctx.InstanceExpander() + if expander == nil { + break // only for tests that produce an incomplete MockEvalContext + } + insts := expander.ExpandResource(n.Addr.ContainingResource()) + + declared := false + for _, inst := range insts { + if n.Addr.Equal(inst) { + declared = true + } + } + if !declared { + // This instance key is outside of the configured range + return plans.ResourceInstanceDeleteBecauseEachKey + } + } + + // If we get here then the instance key type matches the configured + // repetition mode, and so we need to consider whether the key itself + // is within the range of the repetition construct. + if expander := ctx.InstanceExpander(); expander != nil { // (sometimes nil in MockEvalContext in tests) + // First we'll check whether our containing module instance still + // exists, so we can talk about that differently in the reason. + declared := false + for _, inst := range expander.ExpandModule(n.Addr.Module.Module()) { + if n.Addr.Module.Equal(inst) { + declared = true + break + } + } + if !declared { + return plans.ResourceInstanceDeleteBecauseNoModule + } + + // Now we've proven that we're in a still-existing module instance, + // we'll see if our instance key matches something actually declared. + declared = false + for _, inst := range expander.ExpandResource(n.Addr.ContainingResource()) { + if n.Addr.Equal(inst) { + declared = true + break + } + } + if !declared { + // Because we already checked that the key _type_ was correct + // above, we can assume that any mismatch here is a range error, + // and thus we just need to decide which of the two range + // errors we're going to return. + switch n.Addr.Resource.Key.(type) { + case addrs.IntKey: + return plans.ResourceInstanceDeleteBecauseCountIndex + case addrs.StringKey: + return plans.ResourceInstanceDeleteBecauseEachKey + } + } + } + + // If we didn't find any specific reason to report, we'll report "no reason" + // as a fallback, which means the UI should just state it'll be deleted + // without any explicit reasoning. + return plans.ResourceInstanceChangeNoReason +} diff --git a/pkg/tofu/node_resource_plan_orphan_test.go b/pkg/tofu/node_resource_plan_orphan_test.go new file mode 100644 index 00000000000..2858227b479 --- /dev/null +++ b/pkg/tofu/node_resource_plan_orphan_test.go @@ -0,0 +1,314 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "testing" + + "github.com/kubegems/opentofu/pkg/configs/configschema" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/instances" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" + "github.com/zclconf/go-cty/cty" +) + +func TestNodeResourcePlanOrphan_Execute(t *testing.T) { + tests := []struct { + description string + nodeAddress string + nodeEndpointsToRemove []addrs.ConfigRemovable + wantAction plans.Action + }{ + { + nodeAddress: "test_instance.foo", + nodeEndpointsToRemove: make([]addrs.ConfigRemovable, 0), + wantAction: plans.Delete, + }, + { + nodeAddress: "test_instance.foo", + nodeEndpointsToRemove: []addrs.ConfigRemovable{ + interface{}(mustConfigResourceAddr("test_instance.bar")).(addrs.ConfigRemovable), + }, + wantAction: plans.Delete, + }, + { + nodeAddress: "test_instance.foo", + nodeEndpointsToRemove: []addrs.ConfigRemovable{ + interface{}(addrs.Module{"boop"}).(addrs.ConfigRemovable), + }, + wantAction: plans.Delete, + }, + { + nodeAddress: "test_instance.foo", + nodeEndpointsToRemove: []addrs.ConfigRemovable{ + interface{}(mustConfigResourceAddr("test_instance.foo")).(addrs.ConfigRemovable), + }, + wantAction: plans.Forget, + }, + { + nodeAddress: "test_instance.foo[1]", + nodeEndpointsToRemove: []addrs.ConfigRemovable{ + interface{}(mustConfigResourceAddr("test_instance.foo")).(addrs.ConfigRemovable), + }, + wantAction: plans.Forget, + }, + { + nodeAddress: "module.boop.test_instance.foo", + nodeEndpointsToRemove: []addrs.ConfigRemovable{ + interface{}(mustConfigResourceAddr("module.boop.test_instance.foo")).(addrs.ConfigRemovable), + }, + wantAction: plans.Forget, + }, + { + nodeAddress: "module.boop[1].test_instance.foo[1]", + nodeEndpointsToRemove: []addrs.ConfigRemovable{ + interface{}(mustConfigResourceAddr("module.boop.test_instance.foo")).(addrs.ConfigRemovable), + }, + wantAction: plans.Forget, + }, + { + nodeAddress: "module.boop.test_instance.foo", + nodeEndpointsToRemove: []addrs.ConfigRemovable{ + interface{}(addrs.Module{"boop"}).(addrs.ConfigRemovable), + }, + wantAction: plans.Forget, + }, + { + nodeAddress: "module.boop[1].test_instance.foo", + nodeEndpointsToRemove: []addrs.ConfigRemovable{ + interface{}(addrs.Module{"boop"}).(addrs.ConfigRemovable), + }, + wantAction: plans.Forget, + }, + } + + for _, test := range tests { + state := states.NewState() + absResource := mustResourceInstanceAddr(test.nodeAddress) + + if !absResource.Module.Module().Equal(addrs.RootModule) { + state.EnsureModule(addrs.RootModuleInstance.Child(absResource.Module[0].Name, absResource.Module[0].InstanceKey)) + } + + state.Module(absResource.Module).SetResourceInstanceCurrent( + absResource.Resource, + &states.ResourceInstanceObjectSrc{ + AttrsFlat: map[string]string{ + "test_string": "foo", + }, + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + + schema := providers.ProviderSchema{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + }, + }, + }, + }, + } + + p := simpleMockProvider() + p.ConfigureProvider(providers.ConfigureProviderRequest{}) + p.GetProviderSchemaResponse = &schema + + ctx := &MockEvalContext{ + StateState: state.SyncWrapper(), + RefreshStateState: state.DeepCopy().SyncWrapper(), + PrevRunStateState: state.DeepCopy().SyncWrapper(), + InstanceExpanderExpander: instances.NewExpander(), + ProviderProvider: p, + ProviderSchemaSchema: schema, + ChangesChanges: plans.NewChanges().SyncWrapper(), + } + + node := NodePlannableResourceInstanceOrphan{ + NodeAbstractResourceInstance: &NodeAbstractResourceInstance{ + NodeAbstractResource: NodeAbstractResource{ + ResolvedProvider: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + }, + Addr: absResource, + }, + EndpointsToRemove: test.nodeEndpointsToRemove, + } + + err := node.Execute(ctx, walkPlan) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + change := ctx.Changes().GetResourceInstanceChange(absResource, states.NotDeposed) + if got, want := change.ChangeSrc.Action, test.wantAction; got != want { + t.Fatalf("wrong planned action\ngot: %s\nwant: %s", got, want) + } + + if !state.Empty() { + t.Fatalf("expected empty state, got %s", state.String()) + } + } +} + +func TestNodeResourcePlanOrphanExecute_alreadyDeleted(t *testing.T) { + addr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_object", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + + state := states.NewState() + state.Module(addrs.RootModuleInstance).SetResourceInstanceCurrent( + addr.Resource, + &states.ResourceInstanceObjectSrc{ + AttrsFlat: map[string]string{ + "test_string": "foo", + }, + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + refreshState := state.DeepCopy() + prevRunState := state.DeepCopy() + changes := plans.NewChanges() + + p := simpleMockProvider() + p.ConfigureProvider(providers.ConfigureProviderRequest{}) + p.ReadResourceResponse = &providers.ReadResourceResponse{ + NewState: cty.NullVal(p.GetProviderSchemaResponse.ResourceTypes["test_string"].Block.ImpliedType()), + } + ctx := &MockEvalContext{ + StateState: state.SyncWrapper(), + RefreshStateState: refreshState.SyncWrapper(), + PrevRunStateState: prevRunState.SyncWrapper(), + InstanceExpanderExpander: instances.NewExpander(), + ProviderProvider: p, + ProviderSchemaSchema: providers.ProviderSchema{ + ResourceTypes: map[string]providers.Schema{ + "test_object": { + Block: simpleTestSchema(), + }, + }, + }, + ChangesChanges: changes.SyncWrapper(), + } + + node := NodePlannableResourceInstanceOrphan{ + NodeAbstractResourceInstance: &NodeAbstractResourceInstance{ + NodeAbstractResource: NodeAbstractResource{ + ResolvedProvider: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + }, + Addr: mustResourceInstanceAddr("test_object.foo"), + }, + } + diags := node.Execute(ctx, walkPlan) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err()) + } + if !state.Empty() { + t.Fatalf("expected empty state, got %s", state.String()) + } + + if got := prevRunState.ResourceInstance(addr); got == nil { + t.Errorf("no entry for %s in the prev run state; should still be present", addr) + } + if got := refreshState.ResourceInstance(addr); got != nil { + t.Errorf("refresh state has entry for %s; should've been removed", addr) + } + if got := changes.ResourceInstance(addr); got != nil { + t.Errorf("there should be no change for the %s instance, got %s", addr, got.Action) + } +} + +// This test describes a situation which should not be possible, as this node +// should never work on deposed instances. However, a bug elsewhere resulted in +// this code path being exercised and triggered a panic. As a result, the +// assertions at the end of the test are minimal, as the behaviour (aside from +// not panicking) is unspecified. +func TestNodeResourcePlanOrphanExecute_deposed(t *testing.T) { + addr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_object", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) + + state := states.NewState() + state.Module(addrs.RootModuleInstance).SetResourceInstanceDeposed( + addr.Resource, + states.NewDeposedKey(), + &states.ResourceInstanceObjectSrc{ + AttrsFlat: map[string]string{ + "test_string": "foo", + }, + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + refreshState := state.DeepCopy() + prevRunState := state.DeepCopy() + changes := plans.NewChanges() + + p := simpleMockProvider() + p.ConfigureProvider(providers.ConfigureProviderRequest{}) + p.ReadResourceResponse = &providers.ReadResourceResponse{ + NewState: cty.NullVal(p.GetProviderSchemaResponse.ResourceTypes["test_string"].Block.ImpliedType()), + } + ctx := &MockEvalContext{ + StateState: state.SyncWrapper(), + RefreshStateState: refreshState.SyncWrapper(), + PrevRunStateState: prevRunState.SyncWrapper(), + InstanceExpanderExpander: instances.NewExpander(), + ProviderProvider: p, + ProviderSchemaSchema: providers.ProviderSchema{ + ResourceTypes: map[string]providers.Schema{ + "test_object": { + Block: simpleTestSchema(), + }, + }, + }, + ChangesChanges: changes.SyncWrapper(), + } + + node := NodePlannableResourceInstanceOrphan{ + NodeAbstractResourceInstance: &NodeAbstractResourceInstance{ + NodeAbstractResource: NodeAbstractResource{ + ResolvedProvider: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + }, + Addr: mustResourceInstanceAddr("test_object.foo"), + }, + } + diags := node.Execute(ctx, walkPlan) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err()) + } +} diff --git a/pkg/tofu/node_resource_validate.go b/pkg/tofu/node_resource_validate.go new file mode 100644 index 00000000000..4549342cdf0 --- /dev/null +++ b/pkg/tofu/node_resource_validate.go @@ -0,0 +1,610 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "strings" + + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/didyoumean" + "github.com/kubegems/opentofu/pkg/instances" + "github.com/kubegems/opentofu/pkg/lang" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/provisioners" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// NodeValidatableResource represents a resource that is used for validation +// only. +type NodeValidatableResource struct { + *NodeAbstractResource +} + +var ( + _ GraphNodeModuleInstance = (*NodeValidatableResource)(nil) + _ GraphNodeExecutable = (*NodeValidatableResource)(nil) + _ GraphNodeReferenceable = (*NodeValidatableResource)(nil) + _ GraphNodeReferencer = (*NodeValidatableResource)(nil) + _ GraphNodeConfigResource = (*NodeValidatableResource)(nil) + _ GraphNodeAttachResourceConfig = (*NodeValidatableResource)(nil) + _ GraphNodeAttachProviderMetaConfigs = (*NodeValidatableResource)(nil) +) + +func (n *NodeValidatableResource) Path() addrs.ModuleInstance { + // There is no expansion during validation, so we evaluate everything as + // single module instances. + return n.Addr.Module.UnkeyedInstanceShim() +} + +// GraphNodeEvalable +func (n *NodeValidatableResource) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { + if n.Config == nil { + return diags + } + + diags = diags.Append(n.validateResource(ctx)) + + diags = diags.Append(n.validateCheckRules(ctx, n.Config)) + + if managed := n.Config.Managed; managed != nil { + // Validate all the provisioners + for _, p := range managed.Provisioners { + // Create a local shallow copy of the provisioner + provisioner := *p + + if p.Connection == nil { + provisioner.Connection = n.Config.Managed.Connection + } else if n.Config.Managed.Connection != nil { + // Merge the connection with n.Config.Managed.Connection, but only in + // our local provisioner, as it will only be used by + // "validateProvisioner" + connection := &configs.Connection{} + *connection = *p.Connection + connection.Config = configs.MergeBodies(n.Config.Managed.Connection.Config, connection.Config) + provisioner.Connection = connection + } + + // Validate Provisioner Config + diags = diags.Append(n.validateProvisioner(ctx, &provisioner)) + if diags.HasErrors() { + return diags + } + } + } + return diags +} + +// validateProvisioner validates the configuration of a provisioner belonging to +// a resource. The provisioner config is expected to contain the merged +// connection configurations. +func (n *NodeValidatableResource) validateProvisioner(ctx EvalContext, p *configs.Provisioner) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + provisioner, err := ctx.Provisioner(p.Type) + if err != nil { + diags = diags.Append(err) + return diags + } + + if provisioner == nil { + return diags.Append(fmt.Errorf("provisioner %s not initialized", p.Type)) + } + provisionerSchema, err := ctx.ProvisionerSchema(p.Type) + if err != nil { + return diags.Append(fmt.Errorf("failed to read schema for provisioner %s: %w", p.Type, err)) + } + if provisionerSchema == nil { + return diags.Append(fmt.Errorf("provisioner %s has no schema", p.Type)) + } + + // Validate the provisioner's own config first + configVal, _, configDiags := n.evaluateBlock(ctx, p.Config, provisionerSchema) + diags = diags.Append(configDiags) + + if configVal == cty.NilVal { + // Should never happen for a well-behaved EvaluateBlock implementation + return diags.Append(fmt.Errorf("EvaluateBlock returned nil value")) + } + + // Use unmarked value for validate request + unmarkedConfigVal, _ := configVal.UnmarkDeep() + req := provisioners.ValidateProvisionerConfigRequest{ + Config: unmarkedConfigVal, + } + + resp := provisioner.ValidateProvisionerConfig(req) + diags = diags.Append(resp.Diagnostics) + + if p.Connection != nil { + // We can't comprehensively validate the connection config since its + // final structure is decided by the communicator and we can't instantiate + // that until we have a complete instance state. However, we *can* catch + // configuration keys that are not valid for *any* communicator, catching + // typos early rather than waiting until we actually try to run one of + // the resource's provisioners. + _, _, connDiags := n.evaluateBlock(ctx, p.Connection.Config, connectionBlockSupersetSchema) + diags = diags.Append(connDiags) + } + return diags +} + +func (n *NodeValidatableResource) evaluateBlock(ctx EvalContext, body hcl.Body, schema *configschema.Block) (cty.Value, hcl.Body, tfdiags.Diagnostics) { + keyData, selfAddr := n.stubRepetitionData(n.Config.Count != nil, n.Config.ForEach != nil) + + return ctx.EvaluateBlock(body, schema, selfAddr, keyData) +} + +// connectionBlockSupersetSchema is a schema representing the superset of all +// possible arguments for "connection" blocks across all supported connection +// types. +// +// This currently lives here because we've not yet updated our communicator +// subsystem to be aware of schema itself. Once that is done, we can remove +// this and use a type-specific schema from the communicator to validate +// exactly what is expected for a given connection type. +var connectionBlockSupersetSchema = &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + // NOTE: "type" is not included here because it's treated special + // by the config loader and stored away in a separate field. + + // Common attributes for both connection types + "host": { + Type: cty.String, + Required: true, + }, + "type": { + Type: cty.String, + Optional: true, + }, + "user": { + Type: cty.String, + Optional: true, + }, + "password": { + Type: cty.String, + Optional: true, + }, + "port": { + Type: cty.Number, + Optional: true, + }, + "timeout": { + Type: cty.String, + Optional: true, + }, + "script_path": { + Type: cty.String, + Optional: true, + }, + // For type=ssh only (enforced in ssh communicator) + "target_platform": { + Type: cty.String, + Optional: true, + }, + "private_key": { + Type: cty.String, + Optional: true, + }, + "certificate": { + Type: cty.String, + Optional: true, + }, + "host_key": { + Type: cty.String, + Optional: true, + }, + "agent": { + Type: cty.Bool, + Optional: true, + }, + "agent_identity": { + Type: cty.String, + Optional: true, + }, + "proxy_scheme": { + Type: cty.String, + Optional: true, + }, + "proxy_host": { + Type: cty.String, + Optional: true, + }, + "proxy_port": { + Type: cty.Number, + Optional: true, + }, + "proxy_user_name": { + Type: cty.String, + Optional: true, + }, + "proxy_user_password": { + Type: cty.String, + Optional: true, + }, + "bastion_host": { + Type: cty.String, + Optional: true, + }, + "bastion_host_key": { + Type: cty.String, + Optional: true, + }, + "bastion_port": { + Type: cty.Number, + Optional: true, + }, + "bastion_user": { + Type: cty.String, + Optional: true, + }, + "bastion_password": { + Type: cty.String, + Optional: true, + }, + "bastion_private_key": { + Type: cty.String, + Optional: true, + }, + "bastion_certificate": { + Type: cty.String, + Optional: true, + }, + + // For type=winrm only (enforced in winrm communicator) + "https": { + Type: cty.Bool, + Optional: true, + }, + "insecure": { + Type: cty.Bool, + Optional: true, + }, + "cacert": { + Type: cty.String, + Optional: true, + }, + "use_ntlm": { + Type: cty.Bool, + Optional: true, + }, + }, +} + +func (n *NodeValidatableResource) validateResource(ctx EvalContext) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + provider, providerSchema, err := getProvider(ctx, n.ResolvedProvider) + diags = diags.Append(err) + if diags.HasErrors() { + return diags + } + + keyData := EvalDataForNoInstanceKey + + switch { + case n.Config.Count != nil: + // If the config block has count, we'll evaluate with an unknown + // number as count.index so we can still type check even though + // we won't expand count until the plan phase. + keyData = InstanceKeyEvalData{ + CountIndex: cty.UnknownVal(cty.Number), + } + + // Basic type-checking of the count argument. More complete validation + // of this will happen when we DynamicExpand during the plan walk. + countDiags := validateCount(ctx, n.Config.Count) + diags = diags.Append(countDiags) + + case n.Config.ForEach != nil: + keyData = InstanceKeyEvalData{ + EachKey: cty.UnknownVal(cty.String), + EachValue: cty.UnknownVal(cty.DynamicPseudoType), + } + + // Evaluate the for_each expression here so we can expose the diagnostics + forEachDiags := validateForEach(ctx, n.Config.ForEach) + diags = diags.Append(forEachDiags) + } + + diags = diags.Append(validateDependsOn(ctx, n.Config.DependsOn)) + + // Validate the provider_meta block for the provider this resource + // belongs to, if there is one. + // + // Note: this will return an error for every resource a provider + // uses in a module, if the provider_meta for that module is + // incorrect. The only way to solve this that we've found is to + // insert a new ProviderMeta graph node in the graph, and make all + // that provider's resources in the module depend on the node. That's + // an awful heavy hammer to swing for this feature, which should be + // used only in limited cases with heavy coordination with the + // OpenTofu team, so we're going to defer that solution for a future + // enhancement to this functionality. + /* + if n.ProviderMetas != nil { + if m, ok := n.ProviderMetas[n.ProviderAddr.ProviderConfig.Type]; ok && m != nil { + // if the provider doesn't support this feature, throw an error + if (*n.ProviderSchema).ProviderMeta == nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Provider %s doesn't support provider_meta", cfg.ProviderConfigAddr()), + Detail: fmt.Sprintf("The resource %s belongs to a provider that doesn't support provider_meta blocks", n.Addr), + Subject: &m.ProviderRange, + }) + } else { + _, _, metaDiags := ctx.EvaluateBlock(m.Config, (*n.ProviderSchema).ProviderMeta, nil, EvalDataForNoInstanceKey) + diags = diags.Append(metaDiags) + } + } + } + */ + // BUG(paddy): we're not validating provider_meta blocks on EvalValidate right now + // because the ProviderAddr for the resource isn't available on the EvalValidate + // struct. + + // Provider entry point varies depending on resource mode, because + // managed resources and data resources are two distinct concepts + // in the provider abstraction. + switch n.Config.Mode { + case addrs.ManagedResourceMode: + schema, _ := providerSchema.SchemaForResourceType(n.Config.Mode, n.Config.Type) + if schema == nil { + var suggestion string + if dSchema, _ := providerSchema.SchemaForResourceType(addrs.DataResourceMode, n.Config.Type); dSchema != nil { + suggestion = fmt.Sprintf("\n\nDid you intend to use the data source %q? If so, declare this using a \"data\" block instead of a \"resource\" block.", n.Config.Type) + } else if len(providerSchema.ResourceTypes) > 0 { + suggestions := make([]string, 0, len(providerSchema.ResourceTypes)) + for name := range providerSchema.ResourceTypes { + suggestions = append(suggestions, name) + } + if suggestion = didyoumean.NameSuggestion(n.Config.Type, suggestions); suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + } + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource type", + Detail: fmt.Sprintf("The provider %s does not support resource type %q.%s", n.Provider().ForDisplay(), n.Config.Type, suggestion), + Subject: &n.Config.TypeRange, + }) + return diags + } + + configVal, _, valDiags := ctx.EvaluateBlock(n.Config.Config, schema, nil, keyData) + diags = diags.Append(valDiags) + if valDiags.HasErrors() { + return diags + } + + if n.Config.Managed != nil { // can be nil only in tests with poorly-configured mocks + for _, traversal := range n.Config.Managed.IgnoreChanges { + // validate the ignore_changes traversals apply. + moreDiags := schema.StaticValidateTraversal(traversal) + diags = diags.Append(moreDiags) + + // ignore_changes cannot be used for Computed attributes, + // unless they are also Optional. + // If the traversal was valid, convert it to a cty.Path and + // use that to check whether the Attribute is Computed and + // non-Optional. + if !diags.HasErrors() { + path := traversalToPath(traversal) + + attrSchema := schema.AttributeByPath(path) + + if attrSchema != nil && !attrSchema.Optional && attrSchema.Computed { + // ignore_changes uses absolute traversal syntax in config despite + // using relative traversals, so we strip the leading "." added by + // FormatCtyPath for a better error message. + attrDisplayPath := strings.TrimPrefix(tfdiags.FormatCtyPath(path), ".") + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "Redundant ignore_changes element", + Detail: fmt.Sprintf("Adding an attribute name to ignore_changes tells OpenTofu to ignore future changes to the argument in configuration after the object has been created, retaining the value originally configured.\n\nThe attribute %s is decided by the provider alone and therefore there can be no configured value to compare with. Including this attribute in ignore_changes has no effect. Remove the attribute from ignore_changes to quiet this warning.", attrDisplayPath), + Subject: &n.Config.TypeRange, + }) + } + } + } + } + + // Use unmarked value for validate request + unmarkedConfigVal, _ := configVal.UnmarkDeep() + req := providers.ValidateResourceConfigRequest{ + TypeName: n.Config.Type, + Config: unmarkedConfigVal, + } + + resp := provider.ValidateResourceConfig(req) + diags = diags.Append(resp.Diagnostics.InConfigBody(n.Config.Config, n.Addr.String())) + + case addrs.DataResourceMode: + schema, _ := providerSchema.SchemaForResourceType(n.Config.Mode, n.Config.Type) + if schema == nil { + var suggestion string + if dSchema, _ := providerSchema.SchemaForResourceType(addrs.ManagedResourceMode, n.Config.Type); dSchema != nil { + suggestion = fmt.Sprintf("\n\nDid you intend to use the managed resource type %q? If so, declare this using a \"resource\" block instead of a \"data\" block.", n.Config.Type) + } else if len(providerSchema.DataSources) > 0 { + suggestions := make([]string, 0, len(providerSchema.DataSources)) + for name := range providerSchema.DataSources { + suggestions = append(suggestions, name) + } + if suggestion = didyoumean.NameSuggestion(n.Config.Type, suggestions); suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + } + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid data source", + Detail: fmt.Sprintf("The provider %s does not support data source %q.%s", n.Provider().ForDisplay(), n.Config.Type, suggestion), + Subject: &n.Config.TypeRange, + }) + return diags + } + + configVal, _, valDiags := ctx.EvaluateBlock(n.Config.Config, schema, nil, keyData) + diags = diags.Append(valDiags) + if valDiags.HasErrors() { + return diags + } + + // Use unmarked value for validate request + unmarkedConfigVal, _ := configVal.UnmarkDeep() + req := providers.ValidateDataResourceConfigRequest{ + TypeName: n.Config.Type, + Config: unmarkedConfigVal, + } + + resp := provider.ValidateDataResourceConfig(req) + diags = diags.Append(resp.Diagnostics.InConfigBody(n.Config.Config, n.Addr.String())) + } + + return diags +} + +func (n *NodeValidatableResource) evaluateExpr(ctx EvalContext, expr hcl.Expression, wantTy cty.Type, self addrs.Referenceable, keyData instances.RepetitionData) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + refs, refDiags := lang.ReferencesInExpr(addrs.ParseRef, expr) + diags = diags.Append(refDiags) + + scope := ctx.EvaluationScope(self, nil, keyData) + + hclCtx, moreDiags := scope.EvalContext(refs) + diags = diags.Append(moreDiags) + + result, hclDiags := expr.Value(hclCtx) + diags = diags.Append(hclDiags) + + return result, diags +} + +func (n *NodeValidatableResource) stubRepetitionData(hasCount, hasForEach bool) (instances.RepetitionData, addrs.Referenceable) { + keyData := EvalDataForNoInstanceKey + selfAddr := n.ResourceAddr().Resource.Instance(addrs.NoKey) + + if n.Config.Count != nil { + // For a resource that has count, we allow count.index but don't + // know at this stage what it will return. + keyData = InstanceKeyEvalData{ + CountIndex: cty.UnknownVal(cty.Number), + } + + // "self" can't point to an unknown key, but we'll force it to be + // key 0 here, which should return an unknown value of the + // expected type since none of these elements are known at this + // point anyway. + selfAddr = n.ResourceAddr().Resource.Instance(addrs.IntKey(0)) + } else if n.Config.ForEach != nil { + // For a resource that has for_each, we allow each.value and each.key + // but don't know at this stage what it will return. + keyData = InstanceKeyEvalData{ + EachKey: cty.UnknownVal(cty.String), + EachValue: cty.DynamicVal, + } + + // "self" can't point to an unknown key, but we'll force it to be + // key "" here, which should return an unknown value of the + // expected type since none of these elements are known at + // this point anyway. + selfAddr = n.ResourceAddr().Resource.Instance(addrs.StringKey("")) + } + + return keyData, selfAddr +} + +func (n *NodeValidatableResource) validateCheckRules(ctx EvalContext, config *configs.Resource) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + keyData, selfAddr := n.stubRepetitionData(n.Config.Count != nil, n.Config.ForEach != nil) + + for _, cr := range config.Preconditions { + _, conditionDiags := n.evaluateExpr(ctx, cr.Condition, cty.Bool, nil, keyData) + diags = diags.Append(conditionDiags) + + _, errorMessageDiags := n.evaluateExpr(ctx, cr.ErrorMessage, cty.Bool, nil, keyData) + diags = diags.Append(errorMessageDiags) + } + + for _, cr := range config.Postconditions { + _, conditionDiags := n.evaluateExpr(ctx, cr.Condition, cty.Bool, selfAddr, keyData) + diags = diags.Append(conditionDiags) + + _, errorMessageDiags := n.evaluateExpr(ctx, cr.ErrorMessage, cty.Bool, selfAddr, keyData) + diags = diags.Append(errorMessageDiags) + } + + return diags +} + +func validateCount(ctx EvalContext, expr hcl.Expression) (diags tfdiags.Diagnostics) { + val, countDiags := evaluateCountExpressionValue(expr, ctx) + // If the value isn't known then that's the best we can do for now, but + // we'll check more thoroughly during the plan walk + if !val.IsKnown() { + return diags + } + + if countDiags.HasErrors() { + diags = diags.Append(countDiags) + } + + return diags +} + +func validateForEach(ctx EvalContext, expr hcl.Expression) (diags tfdiags.Diagnostics) { + const unknownsAllowed = true + const tupleNotAllowed = false + + val, forEachDiags := evaluateForEachExpressionValue(expr, ctx, unknownsAllowed, tupleNotAllowed) + // If the value isn't known then that's the best we can do for now, but + // we'll check more thoroughly during the plan walk + if !val.IsKnown() { + return diags + } + + if forEachDiags.HasErrors() { + diags = diags.Append(forEachDiags) + } + + return diags +} + +func validateDependsOn(ctx EvalContext, dependsOn []hcl.Traversal) (diags tfdiags.Diagnostics) { + for _, traversal := range dependsOn { + ref, refDiags := addrs.ParseRef(traversal) + diags = diags.Append(refDiags) + if !refDiags.HasErrors() && len(ref.Remaining) != 0 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid depends_on reference", + Detail: "References in depends_on must be to a whole object (resource, etc), not to an attribute of an object.", + Subject: ref.Remaining.SourceRange().Ptr(), + }) + } + + // The ref must also refer to something that exists. To test that, + // we'll just eval it and count on the fact that our evaluator will + // detect references to non-existent objects. + if !diags.HasErrors() { + scope := ctx.EvaluationScope(nil, nil, EvalDataForNoInstanceKey) + if scope != nil { // sometimes nil in tests, due to incomplete mocks + _, refDiags = scope.EvalReference(ref, cty.DynamicPseudoType) + diags = diags.Append(refDiags) + } + } + } + return diags +} diff --git a/pkg/tofu/node_resource_validate_test.go b/pkg/tofu/node_resource_validate_test.go new file mode 100644 index 00000000000..68bffc315b3 --- /dev/null +++ b/pkg/tofu/node_resource_validate_test.go @@ -0,0 +1,640 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "errors" + "strings" + "testing" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hcltest" + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/provisioners" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +func TestNodeValidatableResource_ValidateProvisioner_valid(t *testing.T) { + ctx := &MockEvalContext{} + ctx.installSimpleEval() + mp := &MockProvisioner{} + ps := &configschema.Block{} + ctx.ProvisionerSchemaSchema = ps + ctx.ProvisionerProvisioner = mp + + pc := &configs.Provisioner{ + Type: "baz", + Config: hcl.EmptyBody(), + Connection: &configs.Connection{ + Config: configs.SynthBody("", map[string]cty.Value{ + "host": cty.StringVal("localhost"), + "type": cty.StringVal("ssh"), + "port": cty.NumberIntVal(10022), + }), + }, + } + + rc := &configs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_foo", + Name: "bar", + Config: configs.SynthBody("", map[string]cty.Value{}), + } + + node := NodeValidatableResource{ + NodeAbstractResource: &NodeAbstractResource{ + Addr: mustConfigResourceAddr("test_foo.bar"), + Config: rc, + }, + } + + diags := node.validateProvisioner(ctx, pc) + if diags.HasErrors() { + t.Fatalf("node.Eval failed: %s", diags.Err()) + } + if !mp.ValidateProvisionerConfigCalled { + t.Fatalf("p.ValidateProvisionerConfig not called") + } +} + +func TestNodeValidatableResource_ValidateProvisioner__warning(t *testing.T) { + ctx := &MockEvalContext{} + ctx.installSimpleEval() + mp := &MockProvisioner{} + ps := &configschema.Block{} + ctx.ProvisionerSchemaSchema = ps + ctx.ProvisionerProvisioner = mp + + pc := &configs.Provisioner{ + Type: "baz", + Config: hcl.EmptyBody(), + } + + rc := &configs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_foo", + Name: "bar", + Config: configs.SynthBody("", map[string]cty.Value{}), + Managed: &configs.ManagedResource{}, + } + + node := NodeValidatableResource{ + NodeAbstractResource: &NodeAbstractResource{ + Addr: mustConfigResourceAddr("test_foo.bar"), + Config: rc, + }, + } + + { + var diags tfdiags.Diagnostics + diags = diags.Append(tfdiags.SimpleWarning("foo is deprecated")) + mp.ValidateProvisionerConfigResponse = provisioners.ValidateProvisionerConfigResponse{ + Diagnostics: diags, + } + } + + diags := node.validateProvisioner(ctx, pc) + if len(diags) != 1 { + t.Fatalf("wrong number of diagnostics in %s; want one warning", diags.ErrWithWarnings()) + } + + if got, want := diags[0].Description().Summary, mp.ValidateProvisionerConfigResponse.Diagnostics[0].Description().Summary; got != want { + t.Fatalf("wrong warning %q; want %q", got, want) + } +} + +func TestNodeValidatableResource_ValidateProvisioner__connectionInvalid(t *testing.T) { + ctx := &MockEvalContext{} + ctx.installSimpleEval() + mp := &MockProvisioner{} + ps := &configschema.Block{} + ctx.ProvisionerSchemaSchema = ps + ctx.ProvisionerProvisioner = mp + + pc := &configs.Provisioner{ + Type: "baz", + Config: hcl.EmptyBody(), + Connection: &configs.Connection{ + Config: configs.SynthBody("", map[string]cty.Value{ + "type": cty.StringVal("ssh"), + "bananananananana": cty.StringVal("foo"), + "bazaz": cty.StringVal("bar"), + }), + }, + } + + rc := &configs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_foo", + Name: "bar", + Config: configs.SynthBody("", map[string]cty.Value{}), + Managed: &configs.ManagedResource{}, + } + + node := NodeValidatableResource{ + NodeAbstractResource: &NodeAbstractResource{ + Addr: mustConfigResourceAddr("test_foo.bar"), + Config: rc, + }, + } + + diags := node.validateProvisioner(ctx, pc) + if !diags.HasErrors() { + t.Fatalf("node.Eval succeeded; want error") + } + if len(diags) != 3 { + t.Fatalf("wrong number of diagnostics; want two errors\n\n%s", diags.Err()) + } + + errStr := diags.Err().Error() + if !(strings.Contains(errStr, "bananananananana") && strings.Contains(errStr, "bazaz")) { + t.Fatalf("wrong errors %q; want something about each of our invalid connInfo keys", errStr) + } +} + +func TestNodeValidatableResource_ValidateResource_managedResource(t *testing.T) { + mp := simpleMockProvider() + mp.ValidateResourceConfigFn = func(req providers.ValidateResourceConfigRequest) providers.ValidateResourceConfigResponse { + if got, want := req.TypeName, "test_object"; got != want { + t.Fatalf("wrong resource type\ngot: %#v\nwant: %#v", got, want) + } + if got, want := req.Config.GetAttr("test_string"), cty.StringVal("bar"); !got.RawEquals(want) { + t.Fatalf("wrong value for test_string\ngot: %#v\nwant: %#v", got, want) + } + if got, want := req.Config.GetAttr("test_number"), cty.NumberIntVal(2); !got.RawEquals(want) { + t.Fatalf("wrong value for test_number\ngot: %#v\nwant: %#v", got, want) + } + return providers.ValidateResourceConfigResponse{} + } + + p := providers.Interface(mp) + rc := &configs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_object", + Name: "foo", + Config: configs.SynthBody("", map[string]cty.Value{ + "test_string": cty.StringVal("bar"), + "test_number": cty.NumberIntVal(2).Mark(marks.Sensitive), + }), + } + node := NodeValidatableResource{ + NodeAbstractResource: &NodeAbstractResource{ + Addr: mustConfigResourceAddr("test_foo.bar"), + Config: rc, + ResolvedProvider: mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + }, + } + + ctx := &MockEvalContext{} + ctx.installSimpleEval() + ctx.ProviderSchemaSchema = mp.GetProviderSchema() + ctx.ProviderProvider = p + + err := node.validateResource(ctx) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !mp.ValidateResourceConfigCalled { + t.Fatal("Expected ValidateResourceConfig to be called, but it was not!") + } +} + +func TestNodeValidatableResource_ValidateResource_managedResourceCount(t *testing.T) { + // Setup + mp := simpleMockProvider() + mp.ValidateResourceConfigFn = func(req providers.ValidateResourceConfigRequest) providers.ValidateResourceConfigResponse { + if got, want := req.TypeName, "test_object"; got != want { + t.Fatalf("wrong resource type\ngot: %#v\nwant: %#v", got, want) + } + if got, want := req.Config.GetAttr("test_string"), cty.StringVal("bar"); !got.RawEquals(want) { + t.Fatalf("wrong value for test_string\ngot: %#v\nwant: %#v", got, want) + } + return providers.ValidateResourceConfigResponse{} + } + + p := providers.Interface(mp) + + ctx := &MockEvalContext{} + ctx.installSimpleEval() + ctx.ProviderSchemaSchema = mp.GetProviderSchema() + ctx.ProviderProvider = p + + tests := []struct { + name string + count hcl.Expression + }{ + { + "simple count", + hcltest.MockExprLiteral(cty.NumberIntVal(2)), + }, + { + "marked count value", + hcltest.MockExprLiteral(cty.NumberIntVal(3).Mark("marked")), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + rc := &configs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_object", + Name: "foo", + Count: test.count, + Config: configs.SynthBody("", map[string]cty.Value{ + "test_string": cty.StringVal("bar"), + }), + } + node := NodeValidatableResource{ + NodeAbstractResource: &NodeAbstractResource{ + Addr: mustConfigResourceAddr("test_foo.bar"), + Config: rc, + ResolvedProvider: mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + }, + } + + diags := node.validateResource(ctx) + if diags.HasErrors() { + t.Fatalf("err: %s", diags.Err()) + } + + if !mp.ValidateResourceConfigCalled { + t.Fatal("Expected ValidateResourceConfig to be called, but it was not!") + } + }) + } +} + +func TestNodeValidatableResource_ValidateResource_dataSource(t *testing.T) { + mp := simpleMockProvider() + mp.ValidateDataResourceConfigFn = func(req providers.ValidateDataResourceConfigRequest) providers.ValidateDataResourceConfigResponse { + if got, want := req.TypeName, "test_object"; got != want { + t.Fatalf("wrong resource type\ngot: %#v\nwant: %#v", got, want) + } + if got, want := req.Config.GetAttr("test_string"), cty.StringVal("bar"); !got.RawEquals(want) { + t.Fatalf("wrong value for test_string\ngot: %#v\nwant: %#v", got, want) + } + if got, want := req.Config.GetAttr("test_number"), cty.NumberIntVal(2); !got.RawEquals(want) { + t.Fatalf("wrong value for test_number\ngot: %#v\nwant: %#v", got, want) + } + return providers.ValidateDataResourceConfigResponse{} + } + + p := providers.Interface(mp) + rc := &configs.Resource{ + Mode: addrs.DataResourceMode, + Type: "test_object", + Name: "foo", + Config: configs.SynthBody("", map[string]cty.Value{ + "test_string": cty.StringVal("bar"), + "test_number": cty.NumberIntVal(2).Mark(marks.Sensitive), + }), + } + + node := NodeValidatableResource{ + NodeAbstractResource: &NodeAbstractResource{ + Addr: mustConfigResourceAddr("test_foo.bar"), + Config: rc, + ResolvedProvider: mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + }, + } + + ctx := &MockEvalContext{} + ctx.installSimpleEval() + ctx.ProviderSchemaSchema = mp.GetProviderSchema() + ctx.ProviderProvider = p + + diags := node.validateResource(ctx) + if diags.HasErrors() { + t.Fatalf("err: %s", diags.Err()) + } + + if !mp.ValidateDataResourceConfigCalled { + t.Fatal("Expected ValidateDataSourceConfig to be called, but it was not!") + } +} + +func TestNodeValidatableResource_ValidateResource_valid(t *testing.T) { + mp := simpleMockProvider() + mp.ValidateResourceConfigFn = func(req providers.ValidateResourceConfigRequest) providers.ValidateResourceConfigResponse { + return providers.ValidateResourceConfigResponse{} + } + + p := providers.Interface(mp) + rc := &configs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_object", + Name: "foo", + Config: configs.SynthBody("", map[string]cty.Value{}), + } + node := NodeValidatableResource{ + NodeAbstractResource: &NodeAbstractResource{ + Addr: mustConfigResourceAddr("test_object.foo"), + Config: rc, + ResolvedProvider: mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + }, + } + + ctx := &MockEvalContext{} + ctx.installSimpleEval() + ctx.ProviderSchemaSchema = mp.GetProviderSchema() + ctx.ProviderProvider = p + + diags := node.validateResource(ctx) + if diags.HasErrors() { + t.Fatalf("err: %s", diags.Err()) + } +} + +func TestNodeValidatableResource_ValidateResource_warningsAndErrorsPassedThrough(t *testing.T) { + mp := simpleMockProvider() + mp.ValidateResourceConfigFn = func(req providers.ValidateResourceConfigRequest) providers.ValidateResourceConfigResponse { + var diags tfdiags.Diagnostics + diags = diags.Append(tfdiags.SimpleWarning("warn")) + diags = diags.Append(errors.New("err")) + return providers.ValidateResourceConfigResponse{ + Diagnostics: diags, + } + } + + p := providers.Interface(mp) + rc := &configs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_object", + Name: "foo", + Config: configs.SynthBody("", map[string]cty.Value{}), + } + node := NodeValidatableResource{ + NodeAbstractResource: &NodeAbstractResource{ + Addr: mustConfigResourceAddr("test_foo.bar"), + Config: rc, + ResolvedProvider: mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + }, + } + + ctx := &MockEvalContext{} + ctx.installSimpleEval() + ctx.ProviderSchemaSchema = mp.GetProviderSchema() + ctx.ProviderProvider = p + + diags := node.validateResource(ctx) + if !diags.HasErrors() { + t.Fatal("unexpected success; want error") + } + + bySeverity := map[tfdiags.Severity]tfdiags.Diagnostics{} + for _, diag := range diags { + bySeverity[diag.Severity()] = append(bySeverity[diag.Severity()], diag) + } + if len(bySeverity[tfdiags.Warning]) != 1 || bySeverity[tfdiags.Warning][0].Description().Summary != "warn" { + t.Errorf("Expected 1 warning 'warn', got: %s", bySeverity[tfdiags.Warning].ErrWithWarnings()) + } + if len(bySeverity[tfdiags.Error]) != 1 || bySeverity[tfdiags.Error][0].Description().Summary != "err" { + t.Errorf("Expected 1 error 'err', got: %s", bySeverity[tfdiags.Error].Err()) + } +} + +func TestNodeValidatableResource_ValidateResource_invalidDependsOn(t *testing.T) { + mp := simpleMockProvider() + mp.ValidateResourceConfigFn = func(req providers.ValidateResourceConfigRequest) providers.ValidateResourceConfigResponse { + return providers.ValidateResourceConfigResponse{} + } + + // We'll check a _valid_ config first, to make sure we're not failing + // for some other reason, and then make it invalid. + p := providers.Interface(mp) + rc := &configs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_object", + Name: "foo", + Config: configs.SynthBody("", map[string]cty.Value{}), + DependsOn: []hcl.Traversal{ + // Depending on path.module is pointless, since it is immediately + // available, but we allow all of the referencable addrs here + // for consistency: referencing them is harmless, and avoids the + // need for us to document a different subset of addresses that + // are valid in depends_on. + // For the sake of this test, it's a valid address we can use that + // doesn't require something else to exist in the configuration. + { + hcl.TraverseRoot{ + Name: "path", + }, + hcl.TraverseAttr{ + Name: "module", + }, + }, + }, + } + node := NodeValidatableResource{ + NodeAbstractResource: &NodeAbstractResource{ + Addr: mustConfigResourceAddr("test_foo.bar"), + Config: rc, + ResolvedProvider: mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + }, + } + + ctx := &MockEvalContext{} + ctx.installSimpleEval() + + ctx.ProviderSchemaSchema = mp.GetProviderSchema() + ctx.ProviderProvider = p + + diags := node.validateResource(ctx) + if diags.HasErrors() { + t.Fatalf("error for supposedly-valid config: %s", diags.ErrWithWarnings()) + } + + // Now we'll make it invalid by adding additional traversal steps at + // the end of what we're referencing. This is intended to catch the + // situation where the user tries to depend on e.g. a specific resource + // attribute, rather than the whole resource, like aws_instance.foo.id. + rc.DependsOn = append(rc.DependsOn, hcl.Traversal{ + hcl.TraverseRoot{ + Name: "path", + }, + hcl.TraverseAttr{ + Name: "module", + }, + hcl.TraverseAttr{ + Name: "extra", + }, + }) + + diags = node.validateResource(ctx) + if !diags.HasErrors() { + t.Fatal("no error for invalid depends_on") + } + if got, want := diags.Err().Error(), "Invalid depends_on reference"; !strings.Contains(got, want) { + t.Fatalf("wrong error\ngot: %s\nwant: Message containing %q", got, want) + } + + // Test for handling an unknown root without attribute, like a + // typo that omits the dot inbetween "path.module". + rc.DependsOn = append(rc.DependsOn, hcl.Traversal{ + hcl.TraverseRoot{ + Name: "pathmodule", + }, + }) + + diags = node.validateResource(ctx) + if !diags.HasErrors() { + t.Fatal("no error for invalid depends_on") + } + if got, want := diags.Err().Error(), "Invalid depends_on reference"; !strings.Contains(got, want) { + t.Fatalf("wrong error\ngot: %s\nwant: Message containing %q", got, want) + } +} + +func TestNodeValidatableResource_ValidateResource_invalidIgnoreChangesNonexistent(t *testing.T) { + mp := simpleMockProvider() + mp.ValidateResourceConfigFn = func(req providers.ValidateResourceConfigRequest) providers.ValidateResourceConfigResponse { + return providers.ValidateResourceConfigResponse{} + } + + // We'll check a _valid_ config first, to make sure we're not failing + // for some other reason, and then make it invalid. + p := providers.Interface(mp) + rc := &configs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_object", + Name: "foo", + Config: configs.SynthBody("", map[string]cty.Value{}), + Managed: &configs.ManagedResource{ + IgnoreChanges: []hcl.Traversal{ + { + hcl.TraverseAttr{ + Name: "test_string", + }, + }, + }, + }, + } + node := NodeValidatableResource{ + NodeAbstractResource: &NodeAbstractResource{ + Addr: mustConfigResourceAddr("test_foo.bar"), + Config: rc, + ResolvedProvider: mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + }, + } + + ctx := &MockEvalContext{} + ctx.installSimpleEval() + + ctx.ProviderSchemaSchema = mp.GetProviderSchema() + ctx.ProviderProvider = p + + diags := node.validateResource(ctx) + if diags.HasErrors() { + t.Fatalf("error for supposedly-valid config: %s", diags.ErrWithWarnings()) + } + + // Now we'll make it invalid by attempting to ignore a nonexistent + // attribute. + rc.Managed.IgnoreChanges = append(rc.Managed.IgnoreChanges, hcl.Traversal{ + hcl.TraverseAttr{ + Name: "nonexistent", + }, + }) + + diags = node.validateResource(ctx) + if !diags.HasErrors() { + t.Fatal("no error for invalid ignore_changes") + } + if got, want := diags.Err().Error(), "Unsupported attribute: This object has no argument, nested block, or exported attribute named \"nonexistent\""; !strings.Contains(got, want) { + t.Fatalf("wrong error\ngot: %s\nwant: Message containing %q", got, want) + } +} + +func TestNodeValidatableResource_ValidateResource_invalidIgnoreChangesComputed(t *testing.T) { + // construct a schema with a computed attribute + ms := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "test_string": { + Type: cty.String, + Optional: true, + }, + "computed_string": { + Type: cty.String, + Computed: true, + Optional: false, + }, + }, + } + + mp := &MockProvider{ + GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{Block: ms}, + ResourceTypes: map[string]providers.Schema{ + "test_object": providers.Schema{Block: ms}, + }, + }, + } + + mp.ValidateResourceConfigFn = func(req providers.ValidateResourceConfigRequest) providers.ValidateResourceConfigResponse { + return providers.ValidateResourceConfigResponse{} + } + + // We'll check a _valid_ config first, to make sure we're not failing + // for some other reason, and then make it invalid. + p := providers.Interface(mp) + rc := &configs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_object", + Name: "foo", + Config: configs.SynthBody("", map[string]cty.Value{}), + Managed: &configs.ManagedResource{ + IgnoreChanges: []hcl.Traversal{ + { + hcl.TraverseAttr{ + Name: "test_string", + }, + }, + }, + }, + } + node := NodeValidatableResource{ + NodeAbstractResource: &NodeAbstractResource{ + Addr: mustConfigResourceAddr("test_foo.bar"), + Config: rc, + ResolvedProvider: mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + }, + } + + ctx := &MockEvalContext{} + ctx.installSimpleEval() + + ctx.ProviderSchemaSchema = mp.GetProviderSchema() + ctx.ProviderProvider = p + + diags := node.validateResource(ctx) + if diags.HasErrors() { + t.Fatalf("error for supposedly-valid config: %s", diags.ErrWithWarnings()) + } + + // Now we'll make it invalid by attempting to ignore a computed + // attribute. + rc.Managed.IgnoreChanges = append(rc.Managed.IgnoreChanges, hcl.Traversal{ + hcl.TraverseAttr{ + Name: "computed_string", + }, + }) + + diags = node.validateResource(ctx) + if diags.HasErrors() { + t.Fatalf("got unexpected error: %s", diags.ErrWithWarnings()) + } + if got, want := diags.ErrWithWarnings().Error(), `Redundant ignore_changes element: Adding an attribute name to ignore_changes tells OpenTofu to ignore future changes to the argument in configuration after the object has been created, retaining the value originally configured. + +The attribute computed_string is decided by the provider alone and therefore there can be no configured value to compare with. Including this attribute in ignore_changes has no effect. Remove the attribute from ignore_changes to quiet this warning.`; !strings.Contains(got, want) { + t.Fatalf("wrong error\ngot: %s\nwant: Message containing %q", got, want) + } +} diff --git a/pkg/tofu/node_root_variable.go b/pkg/tofu/node_root_variable.go new file mode 100644 index 00000000000..313b99bc851 --- /dev/null +++ b/pkg/tofu/node_root_variable.go @@ -0,0 +1,146 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "log" + + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/dag" + "github.com/kubegems/opentofu/pkg/lang" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// NodeRootVariable represents a root variable input. +type NodeRootVariable struct { + Addr addrs.InputVariable + Config *configs.Variable + + // RawValue is the value for the variable set from outside OpenTofu + // Core, such as on the command line, or from an environment variable, + // or similar. This is the raw value that was provided, not yet + // converted or validated, and can be nil for a variable that isn't + // set at all. + RawValue *InputValue +} + +var ( + _ GraphNodeExecutable = (*NodeRootVariable)(nil) + _ GraphNodeModuleInstance = (*NodeRootVariable)(nil) + _ GraphNodeReferenceable = (*NodeRootVariable)(nil) +) + +func (n *NodeRootVariable) Name() string { + return n.Addr.String() +} + +// GraphNodeModuleInstance +func (n *NodeRootVariable) Path() addrs.ModuleInstance { + return addrs.RootModuleInstance +} + +func (n *NodeRootVariable) ModulePath() addrs.Module { + return addrs.RootModule +} + +// GraphNodeReferenceable +func (n *NodeRootVariable) ReferenceableAddrs() []addrs.Referenceable { + return []addrs.Referenceable{n.Addr} +} + +// GraphNodeReferencer +func (n *NodeRootVariable) References() []*addrs.Reference { + // This is identical to nodeModuleVariable.References + var refs []*addrs.Reference + + if n.Config != nil { + for _, validation := range n.Config.Validations { + condFuncs, _ := lang.ProviderFunctionsInExpr(addrs.ParseRef, validation.Condition) + refs = append(refs, condFuncs...) + errFuncs, _ := lang.ProviderFunctionsInExpr(addrs.ParseRef, validation.ErrorMessage) + refs = append(refs, errFuncs...) + } + } + + return refs +} + +// GraphNodeExecutable +func (n *NodeRootVariable) Execute(ctx EvalContext, op walkOperation) tfdiags.Diagnostics { + // Root module variables are special in that they are provided directly + // by the caller (usually, the CLI layer) and so we don't really need to + // evaluate them in the usual sense, but we do need to process the raw + // values given by the caller to match what the module is expecting, and + // make sure the values are valid. + var diags tfdiags.Diagnostics + + addr := addrs.RootModuleInstance.InputVariable(n.Addr.Name) + log.Printf("[TRACE] NodeRootVariable: evaluating %s", addr) + + if n.Config == nil { + // Because we build NodeRootVariable from configuration in the normal + // case it's strange to get here, but we tolerate it to allow for + // tests that might not populate the inputs fully. + return nil + } + + givenVal := n.RawValue + if givenVal == nil { + // We'll use cty.NilVal to represent the variable not being set at + // all, which for historical reasons is unfortunately different than + // explicitly setting it to null in some cases. In normal code we + // should never get here because all variables should have raw + // values, but we can get here in some historical tests that call + // in directly and don't necessarily obey the rules. + givenVal = &InputValue{ + Value: cty.NilVal, + SourceType: ValueFromUnknown, + } + } + + if checkState := ctx.Checks(); checkState.ConfigHasChecks(n.Addr.InModule(addrs.RootModule)) { + ctx.Checks().ReportCheckableObjects( + n.Addr.InModule(addrs.RootModule), + addrs.MakeSet[addrs.Checkable](n.Addr.Absolute(addrs.RootModuleInstance))) + } + + finalVal, moreDiags := prepareFinalInputVariableValue( + addr, + givenVal, + n.Config, + ) + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + // No point in proceeding to validations then, because they'll + // probably fail trying to work with a value of the wrong type. + return diags + } + + ctx.SetRootModuleArgument(addr.Variable, finalVal) + + moreDiags = evalVariableValidations( + addrs.RootModuleInstance.InputVariable(n.Addr.Name), + n.Config, + nil, // not set for root module variables + ctx, + ) + diags = diags.Append(moreDiags) + return diags +} + +// dag.GraphNodeDotter impl. +func (n *NodeRootVariable) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { + return &dag.DotNode{ + Name: name, + Attrs: map[string]string{ + "label": n.Name(), + "shape": "note", + }, + } +} diff --git a/pkg/tofu/node_root_variable_test.go b/pkg/tofu/node_root_variable_test.go new file mode 100644 index 00000000000..b87be6bdaed --- /dev/null +++ b/pkg/tofu/node_root_variable_test.go @@ -0,0 +1,196 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "testing" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hcltest" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/checks" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/lang" +) + +func TestNodeRootVariableExecute(t *testing.T) { + t.Run("type conversion", func(t *testing.T) { + ctx := new(MockEvalContext) + + n := &NodeRootVariable{ + Addr: addrs.InputVariable{Name: "foo"}, + Config: &configs.Variable{ + Name: "foo", + Type: cty.String, + ConstraintType: cty.String, + }, + RawValue: &InputValue{ + Value: cty.True, + SourceType: ValueFromUnknown, + }, + } + + ctx.ChecksState = checks.NewState(&configs.Config{ + Module: &configs.Module{ + Variables: map[string]*configs.Variable{ + "foo": n.Config, + }, + }, + }) + + diags := n.Execute(ctx, walkApply) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err()) + } + + if !ctx.SetRootModuleArgumentCalled { + t.Fatalf("ctx.SetRootModuleArgument wasn't called") + } + if got, want := ctx.SetRootModuleArgumentAddr.String(), "var.foo"; got != want { + t.Errorf("wrong address for ctx.SetRootModuleArgument\ngot: %s\nwant: %s", got, want) + } + if got, want := ctx.SetRootModuleArgumentValue, cty.StringVal("true"); !want.RawEquals(got) { + // NOTE: The given value was cty.Bool but the type constraint was + // cty.String, so it was NodeRootVariable's responsibility to convert + // as part of preparing the "final value". + t.Errorf("wrong value for ctx.SetRootModuleArgument\ngot: %#v\nwant: %#v", got, want) + } + }) + t.Run("validation", func(t *testing.T) { + ctx := new(MockEvalContext) + + // The variable validation function gets called with OpenTofu's + // built-in functions available, so we need a minimal scope just for + // it to get the functions from. + ctx.EvaluationScopeScope = &lang.Scope{} + + // We need to reimplement a _little_ bit of EvalContextBuiltin logic + // here to get a similar effect with EvalContextMock just to get the + // value to flow through here in a realistic way that'll make this test + // useful. + var finalVal cty.Value + ctx.SetRootModuleArgumentFunc = func(addr addrs.InputVariable, v cty.Value) { + if addr.Name == "foo" { + t.Logf("set %s to %#v", addr.String(), v) + finalVal = v + } + } + ctx.GetVariableValueFunc = func(addr addrs.AbsInputVariableInstance) cty.Value { + if addr.String() != "var.foo" { + return cty.NilVal + } + t.Logf("reading final val for %s (%#v)", addr.String(), finalVal) + return finalVal + } + + n := &NodeRootVariable{ + Addr: addrs.InputVariable{Name: "foo"}, + Config: &configs.Variable{ + Name: "foo", + Type: cty.Number, + ConstraintType: cty.Number, + Validations: []*configs.CheckRule{ + { + Condition: fakeHCLExpressionFunc(func(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + // This returns true only if the given variable value + // is exactly cty.Number, which allows us to verify + // that we were given the value _after_ type + // conversion. + // This had previously not been handled correctly, + // as reported in: + // https://github.com/hashicorp/terraform/issues/29899 + vars := ctx.Variables["var"] + if vars == cty.NilVal || !vars.Type().IsObjectType() || !vars.Type().HasAttribute("foo") { + t.Logf("var.foo isn't available") + return cty.False, nil + } + val := vars.GetAttr("foo") + if val == cty.NilVal || val.Type() != cty.Number { + t.Logf("var.foo is %#v; want a number", val) + return cty.False, nil + } + return cty.True, nil + }), + ErrorMessage: hcltest.MockExprLiteral(cty.StringVal("Must be a number.")), + }, + }, + }, + RawValue: &InputValue{ + // Note: This is a string, but the variable's type constraint + // is number so it should be converted before use. + Value: cty.StringVal("5"), + SourceType: ValueFromUnknown, + }, + } + + ctx.ChecksState = checks.NewState(&configs.Config{ + Module: &configs.Module{ + Variables: map[string]*configs.Variable{ + "foo": n.Config, + }, + }, + }) + + diags := n.Execute(ctx, walkApply) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err()) + } + + if !ctx.SetRootModuleArgumentCalled { + t.Fatalf("ctx.SetRootModuleArgument wasn't called") + } + if got, want := ctx.SetRootModuleArgumentAddr.String(), "var.foo"; got != want { + t.Errorf("wrong address for ctx.SetRootModuleArgument\ngot: %s\nwant: %s", got, want) + } + if got, want := ctx.SetRootModuleArgumentValue, cty.NumberIntVal(5); !want.RawEquals(got) { + // NOTE: The given value was cty.Bool but the type constraint was + // cty.String, so it was NodeRootVariable's responsibility to convert + // as part of preparing the "final value". + t.Errorf("wrong value for ctx.SetRootModuleArgument\ngot: %#v\nwant: %#v", got, want) + } + if status := ctx.Checks().ObjectCheckStatus(n.Addr.Absolute(addrs.RootModuleInstance)); status != checks.StatusPass { + t.Errorf("expected checks to pass but go %s instead", status) + } + }) +} + +// fakeHCLExpressionFunc is a fake implementation of hcl.Expression that just +// directly produces a value with direct Go code. +// +// An expression of this type has no references and so it cannot access any +// variables from the EvalContext unless something else arranges for them +// to be guaranteed available. For example, custom variable validations just +// unconditionally have access to the variable they are validating regardless +// of references. +type fakeHCLExpressionFunc func(*hcl.EvalContext) (cty.Value, hcl.Diagnostics) + +var _ hcl.Expression = fakeHCLExpressionFunc(nil) + +func (f fakeHCLExpressionFunc) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + return f(ctx) +} + +func (f fakeHCLExpressionFunc) Variables() []hcl.Traversal { + return nil +} + +func (f fakeHCLExpressionFunc) Functions() []hcl.Traversal { + return nil +} + +func (f fakeHCLExpressionFunc) Range() hcl.Range { + return hcl.Range{ + Filename: "fake", + Start: hcl.InitialPos, + End: hcl.InitialPos, + } +} + +func (f fakeHCLExpressionFunc) StartRange() hcl.Range { + return f.Range() +} diff --git a/pkg/tofu/node_value.go b/pkg/tofu/node_value.go new file mode 100644 index 00000000000..55b1fb46e2c --- /dev/null +++ b/pkg/tofu/node_value.go @@ -0,0 +1,15 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +// graphNodeTemporaryValue is implemented by nodes that may represent temporary +// values, which are those not saved to the state file. This includes locals, +// variables, and non-root outputs. +// A boolean return value allows a node which may need to be saved to +// conditionally do so. +type graphNodeTemporaryValue interface { + temporaryValue() bool +} diff --git a/pkg/tofu/opentf_test.go b/pkg/tofu/opentf_test.go new file mode 100644 index 00000000000..642e296587f --- /dev/null +++ b/pkg/tofu/opentf_test.go @@ -0,0 +1,1096 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "context" + "flag" + "io" + "os" + "path/filepath" + "strings" + "sync" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configload" + "github.com/kubegems/opentofu/pkg/initwd" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/provisioners" + "github.com/kubegems/opentofu/pkg/registry" + "github.com/kubegems/opentofu/pkg/states" + + _ "github.com/kubegems/opentofu/pkg/logging" +) + +// This is the directory where our test fixtures are. +const fixtureDir = "./testdata" + +func TestMain(m *testing.M) { + flag.Parse() + + // We have fmt.Stringer implementations on lots of objects that hide + // details that we very often want to see in tests, so we just disable + // spew's use of String methods globally on the assumption that spew + // usage implies an intent to see the raw values and ignore any + // abstractions. + spew.Config.DisableMethods = true + + os.Exit(m.Run()) +} + +func testModule(t *testing.T, name string) *configs.Config { + t.Helper() + c, _ := testModuleWithSnapshot(t, name) + return c +} + +func testModuleWithSnapshot(t *testing.T, name string) (*configs.Config, *configload.Snapshot) { + t.Helper() + + dir := filepath.Join(fixtureDir, name) + // FIXME: We're not dealing with the cleanup function here because + // this testModule function is used all over and so we don't want to + // change its interface at this late stage. + loader, _ := configload.NewLoaderForTests(t) + + // We need to be able to exercise experimental features in our integration tests. + loader.AllowLanguageExperiments(true) + + // Test modules usually do not refer to remote sources, and for local + // sources only this ultimately just records all of the module paths + // in a JSON file so that we can load them below. + inst := initwd.NewModuleInstaller(loader.ModulesDir(), loader, registry.NewClient(nil, nil)) + _, instDiags := inst.InstallModules(context.Background(), dir, "tests", true, false, initwd.ModuleInstallHooksImpl{}, configs.RootModuleCallForTesting()) + if instDiags.HasErrors() { + t.Fatal(instDiags.Err()) + } + + // Since module installer has modified the module manifest on disk, we need + // to refresh the cache of it in the loader. + if err := loader.RefreshModules(); err != nil { + t.Fatalf("failed to refresh modules after installation: %s", err) + } + + config, snap, diags := loader.LoadConfigWithSnapshot(dir, configs.RootModuleCallForTesting()) + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + return config, snap +} + +// testModuleInline takes a map of path -> config strings and yields a config +// structure with those files loaded from disk +func testModuleInline(t *testing.T, sources map[string]string) *configs.Config { + t.Helper() + + cfgPath := t.TempDir() + + for path, configStr := range sources { + dir := filepath.Dir(path) + if dir != "." { + err := os.MkdirAll(filepath.Join(cfgPath, dir), os.FileMode(0777)) + if err != nil { + t.Fatalf("Error creating subdir: %s", err) + } + } + // Write the configuration + cfgF, err := os.Create(filepath.Join(cfgPath, path)) + if err != nil { + t.Fatalf("Error creating temporary file for config: %s", err) + } + + _, err = io.Copy(cfgF, strings.NewReader(configStr)) + cfgF.Close() + if err != nil { + t.Fatalf("Error creating temporary file for config: %s", err) + } + } + + loader, cleanup := configload.NewLoaderForTests(t) + defer cleanup() + + // We need to be able to exercise experimental features in our integration tests. + loader.AllowLanguageExperiments(true) + + // Test modules usually do not refer to remote sources, and for local + // sources only this ultimately just records all of the module paths + // in a JSON file so that we can load them below. + inst := initwd.NewModuleInstaller(loader.ModulesDir(), loader, registry.NewClient(nil, nil)) + _, instDiags := inst.InstallModules(context.Background(), cfgPath, "tests", true, false, initwd.ModuleInstallHooksImpl{}, configs.RootModuleCallForTesting()) + if instDiags.HasErrors() { + t.Fatal(instDiags.Err()) + } + + // Since module installer has modified the module manifest on disk, we need + // to refresh the cache of it in the loader. + if err := loader.RefreshModules(); err != nil { + t.Fatalf("failed to refresh modules after installation: %s", err) + } + + config, diags := loader.LoadConfigWithTests(cfgPath, "tests", configs.RootModuleCallForTesting()) + if diags.HasErrors() { + t.Fatal(diags.Error()) + } + + return config +} + +// testSetResourceInstanceCurrent is a helper function for tests that sets a Current, +// Ready resource instance for the given module. +func testSetResourceInstanceCurrent(module *states.Module, resource, attrsJson, provider string) { + module.SetResourceInstanceCurrent( + mustResourceInstanceAddr(resource).Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(attrsJson), + }, + mustProviderConfig(provider), + ) +} + +// testSetResourceInstanceTainted is a helper function for tests that sets a Current, +// Tainted resource instance for the given module. +func testSetResourceInstanceTainted(module *states.Module, resource, attrsJson, provider string) { + module.SetResourceInstanceCurrent( + mustResourceInstanceAddr(resource).Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectTainted, + AttrsJSON: []byte(attrsJson), + }, + mustProviderConfig(provider), + ) +} + +func testProviderFuncFixed(rp providers.Interface) providers.Factory { + if p, ok := rp.(*MockProvider); ok { + // make sure none of the methods were "called" on this new instance + p.GetProviderSchemaCalled = false + p.ValidateProviderConfigCalled = false + p.ValidateResourceConfigCalled = false + p.ValidateDataResourceConfigCalled = false + p.UpgradeResourceStateCalled = false + p.ConfigureProviderCalled = false + p.StopCalled = false + p.ReadResourceCalled = false + p.PlanResourceChangeCalled = false + p.ApplyResourceChangeCalled = false + p.ImportResourceStateCalled = false + p.ReadDataSourceCalled = false + p.CloseCalled = false + } + + return func() (providers.Interface, error) { + return rp, nil + } +} + +func testProvisionerFuncFixed(rp *MockProvisioner) provisioners.Factory { + // make sure this provisioner has has not been closed + rp.CloseCalled = false + + return func() (provisioners.Interface, error) { + return rp, nil + } +} + +func mustResourceInstanceAddr(s string) addrs.AbsResourceInstance { + addr, diags := addrs.ParseAbsResourceInstanceStr(s) + if diags.HasErrors() { + panic(diags.Err()) + } + return addr +} + +func mustConfigResourceAddr(s string) addrs.ConfigResource { + addr, diags := addrs.ParseAbsResourceStr(s) + if diags.HasErrors() { + panic(diags.Err()) + } + return addr.Config() +} + +func mustAbsResourceAddr(s string) addrs.AbsResource { + addr, diags := addrs.ParseAbsResourceStr(s) + if diags.HasErrors() { + panic(diags.Err()) + } + return addr +} + +func mustProviderConfig(s string) addrs.AbsProviderConfig { + p, diags := addrs.ParseAbsProviderConfigStr(s) + if diags.HasErrors() { + panic(diags.Err()) + } + return p +} + +func mustReference(s string) *addrs.Reference { + p, diags := addrs.ParseRefStr(s) + if diags.HasErrors() { + panic(diags.Err()) + } + return p +} + +// HookRecordApplyOrder is a test hook that records the order of applies +// by recording the PreApply event. +type HookRecordApplyOrder struct { + NilHook + + Active bool + + l sync.Mutex + IDs []string + States []cty.Value + Diffs []*plans.Change +} + +func (h *HookRecordApplyOrder) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { + if plannedNewState.RawEquals(priorState) { + return HookActionContinue, nil + } + + if h.Active { + h.l.Lock() + defer h.l.Unlock() + + h.IDs = append(h.IDs, addr.String()) + h.Diffs = append(h.Diffs, &plans.Change{ + Action: action, + Before: priorState, + After: plannedNewState, + }) + h.States = append(h.States, priorState) + } + + return HookActionContinue, nil +} + +// Below are all the constant strings that are the expected output for +// various tests. + +const testTofuInputProviderOnlyStr = ` +aws_instance.foo: + ID = + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = us-west-2 + type = +` + +const testTofuApplyStr = ` +aws_instance.bar: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = bar + type = aws_instance +aws_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + num = 2 + type = aws_instance +` + +const testTofuApplyDataBasicStr = ` +data.null_data_source.testing: + ID = yo + provider = provider["registry.opentofu.org/hashicorp/null"] +` + +const testTofuApplyRefCountStr = ` +aws_instance.bar: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = 3 + type = aws_instance + + Dependencies: + aws_instance.foo +aws_instance.foo.0: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + type = aws_instance +aws_instance.foo.1: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + type = aws_instance +aws_instance.foo.2: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + type = aws_instance +` + +const testTofuApplyProviderAliasStr = ` +aws_instance.bar: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"].bar + foo = bar + type = aws_instance +aws_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + num = 2 + type = aws_instance +` + +const testTofuApplyProviderAliasConfigStr = ` +another_instance.bar: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/another"].two + type = another_instance +another_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/another"] + type = another_instance +` + +const testTofuApplyEmptyModuleStr = ` + +Outputs: + +end = XXXX +` + +const testTofuApplyDependsCreateBeforeStr = ` +aws_instance.lb: + ID = baz + provider = provider["registry.opentofu.org/hashicorp/aws"] + instance = foo + type = aws_instance + + Dependencies: + aws_instance.web +aws_instance.web: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + require_new = ami-new + type = aws_instance +` + +const testTofuApplyCreateBeforeStr = ` +aws_instance.bar: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + require_new = xyz + type = aws_instance +` + +const testTofuApplyCreateBeforeUpdateStr = ` +aws_instance.bar: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = baz + type = aws_instance +` + +const testTofuApplyCancelStr = ` +aws_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + type = aws_instance + value = 2 +` + +const testTofuApplyComputeStr = ` +aws_instance.bar: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = computed_value + type = aws_instance + + Dependencies: + aws_instance.foo +aws_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + compute = value + compute_value = 1 + num = 2 + type = aws_instance + value = computed_value +` + +const testTofuApplyCountDecStr = ` +aws_instance.bar: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = bar + type = aws_instance +aws_instance.foo.0: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = foo + type = aws_instance +aws_instance.foo.1: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = foo + type = aws_instance +` + +const testTofuApplyCountDecToOneStr = ` +aws_instance.foo: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = foo + type = aws_instance +` + +const testTofuApplyCountDecToOneCorruptedStr = ` +aws_instance.foo: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = foo + type = aws_instance +` + +const testTofuApplyCountDecToOneCorruptedPlanStr = ` +DIFF: + +DESTROY: aws_instance.foo[0] + id: "baz" => "" + type: "aws_instance" => "" + + + +STATE: + +aws_instance.foo: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = foo + type = aws_instance +aws_instance.foo.0: + ID = baz + provider = provider["registry.opentofu.org/hashicorp/aws"] + type = aws_instance +` + +const testTofuApplyCountVariableStr = ` +aws_instance.foo.0: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = foo + type = aws_instance +aws_instance.foo.1: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = foo + type = aws_instance +` + +const testTofuApplyCountVariableRefStr = ` +aws_instance.bar: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = 2 + type = aws_instance + + Dependencies: + aws_instance.foo +aws_instance.foo.0: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + type = aws_instance +aws_instance.foo.1: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + type = aws_instance +` +const testTofuApplyForEachVariableStr = ` +aws_instance.foo["b15c6d616d6143248c575900dff57325eb1de498"]: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = foo + type = aws_instance +aws_instance.foo["c3de47d34b0a9f13918dd705c141d579dd6555fd"]: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = foo + type = aws_instance +aws_instance.foo["e30a7edcc42a846684f2a4eea5f3cd261d33c46d"]: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = foo + type = aws_instance +aws_instance.one["a"]: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + type = aws_instance +aws_instance.one["b"]: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + type = aws_instance +aws_instance.two["a"]: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + type = aws_instance + + Dependencies: + aws_instance.one +aws_instance.two["b"]: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + type = aws_instance + + Dependencies: + aws_instance.one` +const testTofuApplyMinimalStr = ` +aws_instance.bar: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + type = aws_instance +aws_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + type = aws_instance +` + +const testTofuApplyModuleStr = ` +aws_instance.bar: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = bar + type = aws_instance +aws_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + num = 2 + type = aws_instance + +module.child: + aws_instance.baz: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = bar + type = aws_instance +` + +const testTofuApplyModuleBoolStr = ` +aws_instance.bar: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = true + type = aws_instance +` + +const testTofuApplyModuleDestroyOrderStr = ` + +` + +const testTofuApplyMultiProviderStr = ` +aws_instance.bar: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = bar + type = aws_instance +do_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/do"] + num = 2 + type = do_instance +` + +const testTofuApplyModuleOnlyProviderStr = ` + +module.child: + aws_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + type = aws_instance + test_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/test"] + type = test_instance +` + +const testTofuApplyModuleProviderAliasStr = ` + +module.child: + aws_instance.foo: + ID = foo + provider = module.child.provider["registry.opentofu.org/hashicorp/aws"].eu + type = aws_instance +` + +const testTofuApplyModuleVarRefExistingStr = ` +aws_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = bar + type = aws_instance + +module.child: + aws_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + type = aws_instance + value = bar + + Dependencies: + aws_instance.foo +` + +const testTofuApplyOutputOrphanStr = ` + +Outputs: + +foo = bar +` + +const testTofuApplyOutputOrphanModuleStr = ` + +` + +const testTofuApplyProvisionerStr = ` +aws_instance.bar: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + type = aws_instance + + Dependencies: + aws_instance.foo +aws_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + compute = value + compute_value = 1 + num = 2 + type = aws_instance + value = computed_value +` + +const testTofuApplyProvisionerModuleStr = ` + +module.child: + aws_instance.bar: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + type = aws_instance +` + +const testTofuApplyProvisionerFailStr = ` +aws_instance.bar: (tainted) + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + type = aws_instance +aws_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + num = 2 + type = aws_instance +` + +const testTofuApplyProvisionerFailCreateStr = ` +aws_instance.bar: (tainted) + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + type = aws_instance +` + +const testTofuApplyProvisionerFailCreateNoIdStr = ` + +` + +const testTofuApplyProvisionerFailCreateBeforeDestroyStr = ` +aws_instance.bar: (tainted) (1 deposed) + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + require_new = xyz + type = aws_instance + Deposed ID 1 = bar +` + +const testTofuApplyProvisionerResourceRefStr = ` +aws_instance.bar: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + num = 2 + type = aws_instance +` + +const testTofuApplyProvisionerSelfRefStr = ` +aws_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = bar + type = aws_instance +` + +const testTofuApplyProvisionerMultiSelfRefStr = ` +aws_instance.foo.0: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = number 0 + type = aws_instance +aws_instance.foo.1: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = number 1 + type = aws_instance +aws_instance.foo.2: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = number 2 + type = aws_instance +` + +const testTofuApplyProvisionerMultiSelfRefSingleStr = ` +aws_instance.foo.0: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = number 0 + type = aws_instance +aws_instance.foo.1: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = number 1 + type = aws_instance +aws_instance.foo.2: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = number 2 + type = aws_instance +` + +const testTofuApplyProvisionerDiffStr = ` +aws_instance.bar: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = bar + type = aws_instance +` + +const testTofuApplyProvisionerSensitiveStr = ` +aws_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + type = aws_instance +` + +const testTofuApplyDestroyStr = ` + +` + +const testTofuApplyErrorStr = ` +aws_instance.bar: (tainted) + ID = + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = 2 + + Dependencies: + aws_instance.foo +aws_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + type = aws_instance + value = 2 +` + +const testTofuApplyErrorCreateBeforeDestroyStr = ` +aws_instance.bar: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/aws"] + require_new = abc + type = aws_instance +` + +const testTofuApplyErrorDestroyCreateBeforeDestroyStr = ` +aws_instance.bar: (1 deposed) + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + require_new = xyz + type = aws_instance + Deposed ID 1 = bar +` + +const testTofuApplyErrorPartialStr = ` +aws_instance.bar: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/aws"] + type = aws_instance + + Dependencies: + aws_instance.foo +aws_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + type = aws_instance + value = 2 +` + +const testTofuApplyResourceDependsOnModuleStr = ` +aws_instance.a: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + ami = parent + type = aws_instance + + Dependencies: + module.child.aws_instance.child + +module.child: + aws_instance.child: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + ami = child + type = aws_instance +` + +const testTofuApplyResourceDependsOnModuleDeepStr = ` +aws_instance.a: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + ami = parent + type = aws_instance + + Dependencies: + module.child.module.grandchild.aws_instance.c + +module.child.grandchild: + aws_instance.c: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + ami = grandchild + type = aws_instance +` + +const testTofuApplyResourceDependsOnModuleInModuleStr = ` + +module.child: + aws_instance.b: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + ami = child + type = aws_instance + + Dependencies: + module.child.module.grandchild.aws_instance.c +module.child.grandchild: + aws_instance.c: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + ami = grandchild + type = aws_instance +` + +const testTofuApplyTaintStr = ` +aws_instance.bar: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + num = 2 + type = aws_instance +` + +const testTofuApplyTaintDepStr = ` +aws_instance.bar: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = foo + num = 2 + type = aws_instance + + Dependencies: + aws_instance.foo +aws_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + num = 2 + type = aws_instance +` + +const testTofuApplyTaintDepRequireNewStr = ` +aws_instance.bar: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = foo + require_new = yes + type = aws_instance + + Dependencies: + aws_instance.foo +aws_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + num = 2 + type = aws_instance +` + +const testTofuApplyOutputStr = ` +aws_instance.bar: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = bar + type = aws_instance +aws_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + num = 2 + type = aws_instance + +Outputs: + +foo_num = 2 +` + +const testTofuApplyOutputAddStr = ` +aws_instance.test.0: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = foo0 + type = aws_instance +aws_instance.test.1: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = foo1 + type = aws_instance + +Outputs: + +firstOutput = foo0 +secondOutput = foo1 +` + +const testTofuApplyOutputListStr = ` +aws_instance.bar.0: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = bar + type = aws_instance +aws_instance.bar.1: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = bar + type = aws_instance +aws_instance.bar.2: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = bar + type = aws_instance +aws_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + num = 2 + type = aws_instance + +Outputs: + +foo_num = [bar,bar,bar] +` + +const testTofuApplyOutputMultiStr = ` +aws_instance.bar.0: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = bar + type = aws_instance +aws_instance.bar.1: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = bar + type = aws_instance +aws_instance.bar.2: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = bar + type = aws_instance +aws_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + num = 2 + type = aws_instance + +Outputs: + +foo_num = bar,bar,bar +` + +const testTofuApplyOutputMultiIndexStr = ` +aws_instance.bar.0: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = bar + type = aws_instance +aws_instance.bar.1: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = bar + type = aws_instance +aws_instance.bar.2: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + foo = bar + type = aws_instance +aws_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + num = 2 + type = aws_instance + +Outputs: + +foo_num = bar +` + +const testTofuApplyUnknownAttrStr = ` +aws_instance.foo: (tainted) + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + num = 2 + type = aws_instance +` + +const testTofuApplyVarsStr = ` +aws_instance.bar: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + bar = override + baz = override + foo = us-east-1 +aws_instance.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + bar = baz + list.# = 2 + list.0 = Hello + list.1 = World + map.Baz = Foo + map.Foo = Bar + map.Hello = World + num = 2 +` + +const testTofuApplyVarsEnvStr = ` +aws_instance.bar: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/aws"] + list.# = 2 + list.0 = Hello + list.1 = World + map.Baz = Foo + map.Foo = Bar + map.Hello = World + string = baz + type = aws_instance +` + +const testTofuRefreshDataRefDataStr = ` +data.null_data_source.bar: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/null"] + bar = yes +data.null_data_source.foo: + ID = foo + provider = provider["registry.opentofu.org/hashicorp/null"] + foo = yes +` diff --git a/pkg/tofu/phasestate_string.go b/pkg/tofu/phasestate_string.go new file mode 100644 index 00000000000..0ba4633fc61 --- /dev/null +++ b/pkg/tofu/phasestate_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type phaseState"; DO NOT EDIT. + +package tofu + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[workingState-0] + _ = x[refreshState-1] + _ = x[prevRunState-2] +} + +const _phaseState_name = "workingStaterefreshStateprevRunState" + +var _phaseState_index = [...]uint8{0, 12, 24, 36} + +func (i phaseState) String() string { + if i < 0 || i >= phaseState(len(_phaseState_index)-1) { + return "phaseState(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _phaseState_name[_phaseState_index[i]:_phaseState_index[i+1]] +} diff --git a/pkg/tofu/provider_for_test_framework.go b/pkg/tofu/provider_for_test_framework.go new file mode 100644 index 00000000000..4820502409e --- /dev/null +++ b/pkg/tofu/provider_for_test_framework.go @@ -0,0 +1,207 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "hash/fnv" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/hcl2shim" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/zclconf/go-cty/cty" +) + +var _ providers.Interface = &providerForTest{} + +// providerForTest is a wrapper around a real provider to allow certain resources to be overridden +// (by address) or mocked (by provider and resource type) for testing framework. +type providerForTest struct { + // providers.Interface is not embedded to make it safer to extend + // the interface without silently breaking providerForTest functionality. + internal providers.Interface + schema providers.ProviderSchema + + managedResources resourceForTestByType + dataResources resourceForTestByType +} + +func newProviderForTestWithSchema(internal providers.Interface, schema providers.ProviderSchema) *providerForTest { + return &providerForTest{ + internal: internal, + schema: schema, + managedResources: make(resourceForTestByType), + dataResources: make(resourceForTestByType), + } +} + +func newProviderForTest(internal providers.Interface, res []*configs.MockResource) (providers.Interface, error) { + schema := internal.GetProviderSchema() + if schema.Diagnostics.HasErrors() { + return nil, fmt.Errorf("getting provider schema for test wrapper: %w", schema.Diagnostics.Err()) + } + + p := newProviderForTestWithSchema(internal, schema) + + p.addMockResources(res) + + return p, nil +} + +func (p *providerForTest) ReadResource(r providers.ReadResourceRequest) providers.ReadResourceResponse { + var resp providers.ReadResourceResponse + + resSchema, _ := p.schema.SchemaForResourceType(addrs.ManagedResourceMode, r.TypeName) + + overrideValues := p.managedResources.getOverrideValues(r.TypeName) + + resp.NewState, resp.Diagnostics = newMockValueComposer(r.TypeName). + ComposeBySchema(resSchema, r.ProviderMeta, overrideValues) + + return resp +} + +func (p *providerForTest) PlanResourceChange(r providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + if r.Config.IsNull() { + return providers.PlanResourceChangeResponse{ + PlannedState: r.ProposedNewState, // null + } + } + + resSchema, _ := p.schema.SchemaForResourceType(addrs.ManagedResourceMode, r.TypeName) + + overrideValues := p.managedResources.getOverrideValues(r.TypeName) + + var resp providers.PlanResourceChangeResponse + + resp.PlannedState, resp.Diagnostics = newMockValueComposer(r.TypeName). + ComposeBySchema(resSchema, r.Config, overrideValues) + + return resp +} + +func (p *providerForTest) ApplyResourceChange(r providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + return providers.ApplyResourceChangeResponse{ + NewState: r.PlannedState, + } +} + +func (p *providerForTest) ReadDataSource(r providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + resSchema, _ := p.schema.SchemaForResourceType(addrs.DataResourceMode, r.TypeName) + + var resp providers.ReadDataSourceResponse + + overrideValues := p.dataResources.getOverrideValues(r.TypeName) + + resp.State, resp.Diagnostics = newMockValueComposer(r.TypeName). + ComposeBySchema(resSchema, r.Config, overrideValues) + + return resp +} + +// Calling the internal provider ensures providerForTest has the same behaviour as if +// it wasn't overridden or mocked. The only exception is ImportResourceState, which panics +// if called via providerForTest because importing is not supported in testing framework. + +func (p *providerForTest) GetProviderSchema() providers.GetProviderSchemaResponse { + return p.internal.GetProviderSchema() +} + +func (p *providerForTest) ValidateProviderConfig(r providers.ValidateProviderConfigRequest) providers.ValidateProviderConfigResponse { + return p.internal.ValidateProviderConfig(r) +} + +func (p *providerForTest) ValidateResourceConfig(r providers.ValidateResourceConfigRequest) providers.ValidateResourceConfigResponse { + return p.internal.ValidateResourceConfig(r) +} + +func (p *providerForTest) ValidateDataResourceConfig(r providers.ValidateDataResourceConfigRequest) providers.ValidateDataResourceConfigResponse { + return p.internal.ValidateDataResourceConfig(r) +} + +func (p *providerForTest) UpgradeResourceState(r providers.UpgradeResourceStateRequest) providers.UpgradeResourceStateResponse { + return p.internal.UpgradeResourceState(r) +} + +// providerForTest doesn't configure its internal provider because it is mocked. +func (p *providerForTest) ConfigureProvider(_ providers.ConfigureProviderRequest) providers.ConfigureProviderResponse { + return providers.ConfigureProviderResponse{} +} + +func (p *providerForTest) Stop() error { + return p.internal.Stop() +} + +func (p *providerForTest) GetFunctions() providers.GetFunctionsResponse { + return p.internal.GetFunctions() +} + +func (p *providerForTest) CallFunction(r providers.CallFunctionRequest) providers.CallFunctionResponse { + return p.internal.CallFunction(r) +} + +func (p *providerForTest) Close() error { + return p.internal.Close() +} + +func (p *providerForTest) ImportResourceState(providers.ImportResourceStateRequest) providers.ImportResourceStateResponse { + panic("Importing is not supported in testing context. providerForTest must not be used to call ImportResourceState") +} + +func (p *providerForTest) setSingleResource(addr addrs.Resource, overrides map[string]cty.Value) { + res := resourceForTest{ + overrideValues: overrides, + } + + switch addr.Mode { + case addrs.ManagedResourceMode: + p.managedResources[addr.Type] = res + case addrs.DataResourceMode: + p.dataResources[addr.Type] = res + case addrs.InvalidResourceMode: + panic("BUG: invalid mock resource mode") + default: + panic("BUG: unsupported resource mode: " + addr.Mode.String()) + } +} + +func (p *providerForTest) addMockResources(mockResources []*configs.MockResource) { + for _, mockRes := range mockResources { + var resources resourceForTestByType + + switch mockRes.Mode { + case addrs.ManagedResourceMode: + resources = p.managedResources + case addrs.DataResourceMode: + resources = p.dataResources + case addrs.InvalidResourceMode: + panic("BUG: invalid mock resource mode") + default: + panic("BUG: unsupported mock resource mode: " + mockRes.Mode.String()) + } + + resources[mockRes.Type] = resourceForTest{ + overrideValues: mockRes.Defaults, + } + } +} + +type resourceForTest struct { + overrideValues map[string]cty.Value +} + +type resourceForTestByType map[string]resourceForTest + +func (m resourceForTestByType) getOverrideValues(typeName string) map[string]cty.Value { + return m[typeName].overrideValues +} + +func newMockValueComposer(typeName string) hcl2shim.MockValueComposer { + hash := fnv.New64() + hash.Write([]byte(typeName)) + return hcl2shim.NewMockValueComposer(int64(hash.Sum64())) +} diff --git a/pkg/tofu/provider_mock.go b/pkg/tofu/provider_mock.go new file mode 100644 index 00000000000..d9459f71170 --- /dev/null +++ b/pkg/tofu/provider_mock.go @@ -0,0 +1,567 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "sync" + + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + "github.com/zclconf/go-cty/cty/msgpack" + + "github.com/kubegems/opentofu/pkg/configs/hcl2shim" + "github.com/kubegems/opentofu/pkg/providers" +) + +var _ providers.Interface = (*MockProvider)(nil) + +// MockProvider implements providers.Interface but mocks out all the +// calls for testing purposes. +type MockProvider struct { + sync.Mutex + + // Anything you want, in case you need to store extra data with the mock. + Meta interface{} + + GetProviderSchemaCalled bool + GetProviderSchemaResponse *providers.GetProviderSchemaResponse + + ValidateProviderConfigCalled bool + ValidateProviderConfigResponse *providers.ValidateProviderConfigResponse + ValidateProviderConfigRequest providers.ValidateProviderConfigRequest + ValidateProviderConfigFn func(providers.ValidateProviderConfigRequest) providers.ValidateProviderConfigResponse + + ValidateResourceConfigCalled bool + ValidateResourceConfigTypeName string + ValidateResourceConfigResponse *providers.ValidateResourceConfigResponse + ValidateResourceConfigRequest providers.ValidateResourceConfigRequest + ValidateResourceConfigFn func(providers.ValidateResourceConfigRequest) providers.ValidateResourceConfigResponse + + ValidateDataResourceConfigCalled bool + ValidateDataResourceConfigTypeName string + ValidateDataResourceConfigResponse *providers.ValidateDataResourceConfigResponse + ValidateDataResourceConfigRequest providers.ValidateDataResourceConfigRequest + ValidateDataResourceConfigFn func(providers.ValidateDataResourceConfigRequest) providers.ValidateDataResourceConfigResponse + + UpgradeResourceStateCalled bool + UpgradeResourceStateTypeName string + UpgradeResourceStateResponse *providers.UpgradeResourceStateResponse + UpgradeResourceStateRequest providers.UpgradeResourceStateRequest + UpgradeResourceStateFn func(providers.UpgradeResourceStateRequest) providers.UpgradeResourceStateResponse + + ConfigureProviderCalled bool + ConfigureProviderResponse *providers.ConfigureProviderResponse + ConfigureProviderRequest providers.ConfigureProviderRequest + ConfigureProviderFn func(providers.ConfigureProviderRequest) providers.ConfigureProviderResponse + + StopCalled bool + StopFn func() error + StopResponse error + + ReadResourceCalled bool + ReadResourceResponse *providers.ReadResourceResponse + ReadResourceRequest providers.ReadResourceRequest + ReadResourceFn func(providers.ReadResourceRequest) providers.ReadResourceResponse + + PlanResourceChangeCalled bool + PlanResourceChangeResponse *providers.PlanResourceChangeResponse + PlanResourceChangeRequest providers.PlanResourceChangeRequest + PlanResourceChangeFn func(providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse + + ApplyResourceChangeCalled bool + ApplyResourceChangeResponse *providers.ApplyResourceChangeResponse + ApplyResourceChangeRequest providers.ApplyResourceChangeRequest + ApplyResourceChangeFn func(providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse + + ImportResourceStateCalled bool + ImportResourceStateResponse *providers.ImportResourceStateResponse + ImportResourceStateRequest providers.ImportResourceStateRequest + ImportResourceStateFn func(providers.ImportResourceStateRequest) providers.ImportResourceStateResponse + + ReadDataSourceCalled bool + ReadDataSourceResponse *providers.ReadDataSourceResponse + ReadDataSourceRequest providers.ReadDataSourceRequest + ReadDataSourceFn func(providers.ReadDataSourceRequest) providers.ReadDataSourceResponse + + GetFunctionsCalled bool + GetFunctionsResponse *providers.GetFunctionsResponse + GetFunctionsFn func() providers.GetFunctionsResponse + + CallFunctionCalled bool + CallFunctionResponse *providers.CallFunctionResponse + CallFunctionRequest providers.CallFunctionRequest + CallFunctionFn func(providers.CallFunctionRequest) providers.CallFunctionResponse + + CloseCalled bool + CloseError error +} + +func (p *MockProvider) GetProviderSchema() providers.GetProviderSchemaResponse { + p.Lock() + defer p.Unlock() + p.GetProviderSchemaCalled = true + return p.getProviderSchema() +} + +func (p *MockProvider) getProviderSchema() providers.GetProviderSchemaResponse { + // This version of getProviderSchema doesn't do any locking, so it's suitable to + // call from other methods of this mock as long as they are already + // holding the lock. + if p.GetProviderSchemaResponse != nil { + return *p.GetProviderSchemaResponse + } + + return providers.GetProviderSchemaResponse{ + Provider: providers.Schema{}, + DataSources: map[string]providers.Schema{}, + ResourceTypes: map[string]providers.Schema{}, + } +} + +func (p *MockProvider) ValidateProviderConfig(r providers.ValidateProviderConfigRequest) (resp providers.ValidateProviderConfigResponse) { + p.Lock() + defer p.Unlock() + + p.ValidateProviderConfigCalled = true + p.ValidateProviderConfigRequest = r + if p.ValidateProviderConfigFn != nil { + return p.ValidateProviderConfigFn(r) + } + + if p.ValidateProviderConfigResponse != nil { + return *p.ValidateProviderConfigResponse + } + + resp.PreparedConfig = r.Config + return resp +} + +func (p *MockProvider) ValidateResourceConfig(r providers.ValidateResourceConfigRequest) (resp providers.ValidateResourceConfigResponse) { + p.Lock() + defer p.Unlock() + + p.ValidateResourceConfigCalled = true + p.ValidateResourceConfigRequest = r + + // Marshall the value to replicate behavior by the GRPC protocol, + // and return any relevant errors + resourceSchema, ok := p.getProviderSchema().ResourceTypes[r.TypeName] + if !ok { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("no schema found for %q", r.TypeName)) + return resp + } + + _, err := msgpack.Marshal(r.Config, resourceSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + if p.ValidateResourceConfigFn != nil { + return p.ValidateResourceConfigFn(r) + } + + if p.ValidateResourceConfigResponse != nil { + return *p.ValidateResourceConfigResponse + } + + return resp +} + +func (p *MockProvider) ValidateDataResourceConfig(r providers.ValidateDataResourceConfigRequest) (resp providers.ValidateDataResourceConfigResponse) { + p.Lock() + defer p.Unlock() + + p.ValidateDataResourceConfigCalled = true + p.ValidateDataResourceConfigRequest = r + + // Marshall the value to replicate behavior by the GRPC protocol + dataSchema, ok := p.getProviderSchema().DataSources[r.TypeName] + if !ok { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("no schema found for %q", r.TypeName)) + return resp + } + _, err := msgpack.Marshal(r.Config, dataSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + if p.ValidateDataResourceConfigFn != nil { + return p.ValidateDataResourceConfigFn(r) + } + + if p.ValidateDataResourceConfigResponse != nil { + return *p.ValidateDataResourceConfigResponse + } + + return resp +} + +func (p *MockProvider) UpgradeResourceState(r providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) { + p.Lock() + defer p.Unlock() + + if !p.ConfigureProviderCalled { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("Configure not called before UpgradeResourceState %q", r.TypeName)) + return resp + } + + schema, ok := p.getProviderSchema().ResourceTypes[r.TypeName] + if !ok { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("no schema found for %q", r.TypeName)) + return resp + } + + schemaType := schema.Block.ImpliedType() + + p.UpgradeResourceStateCalled = true + p.UpgradeResourceStateRequest = r + + if p.UpgradeResourceStateFn != nil { + return p.UpgradeResourceStateFn(r) + } + + if p.UpgradeResourceStateResponse != nil { + return *p.UpgradeResourceStateResponse + } + + switch { + case r.RawStateFlatmap != nil: + v, err := hcl2shim.HCL2ValueFromFlatmap(r.RawStateFlatmap, schemaType) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.UpgradedState = v + case len(r.RawStateJSON) > 0: + v, err := ctyjson.Unmarshal(r.RawStateJSON, schemaType) + + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.UpgradedState = v + } + + return resp +} + +func (p *MockProvider) ConfigureProvider(r providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) { + p.Lock() + defer p.Unlock() + + p.ConfigureProviderCalled = true + p.ConfigureProviderRequest = r + + if p.ConfigureProviderFn != nil { + return p.ConfigureProviderFn(r) + } + + if p.ConfigureProviderResponse != nil { + return *p.ConfigureProviderResponse + } + + return resp +} + +func (p *MockProvider) Stop() error { + // We intentionally don't lock in this one because the whole point of this + // method is to be called concurrently with another operation that can + // be cancelled. The provider itself is responsible for handling + // any concurrency concerns in this case. + + p.StopCalled = true + if p.StopFn != nil { + return p.StopFn() + } + + return p.StopResponse +} + +func (p *MockProvider) ReadResource(r providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { + p.Lock() + defer p.Unlock() + + p.ReadResourceCalled = true + p.ReadResourceRequest = r + + if !p.ConfigureProviderCalled { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("Configure not called before ReadResource %q", r.TypeName)) + return resp + } + + if p.ReadResourceFn != nil { + return p.ReadResourceFn(r) + } + + if p.ReadResourceResponse != nil { + resp = *p.ReadResourceResponse + + // Make sure the NewState conforms to the schema. + // This isn't always the case for the existing tests. + schema, ok := p.getProviderSchema().ResourceTypes[r.TypeName] + if !ok { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("no schema found for %q", r.TypeName)) + return resp + } + + newState, err := schema.Block.CoerceValue(resp.NewState) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + } + resp.NewState = newState + return resp + } + + // otherwise just return the same state we received + resp.NewState = r.PriorState + resp.Private = r.Private + return resp +} + +func (p *MockProvider) PlanResourceChange(r providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + p.Lock() + defer p.Unlock() + + if !p.ConfigureProviderCalled { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("Configure not called before PlanResourceChange %q", r.TypeName)) + return resp + } + + p.PlanResourceChangeCalled = true + p.PlanResourceChangeRequest = r + + if p.PlanResourceChangeFn != nil { + return p.PlanResourceChangeFn(r) + } + + if p.PlanResourceChangeResponse != nil { + return *p.PlanResourceChangeResponse + } + + // this is a destroy plan, + if r.ProposedNewState.IsNull() { + resp.PlannedState = r.ProposedNewState + resp.PlannedPrivate = r.PriorPrivate + return resp + } + + schema, ok := p.getProviderSchema().ResourceTypes[r.TypeName] + if !ok { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("no schema found for %q", r.TypeName)) + return resp + } + + // The default plan behavior is to accept the proposed value, and mark all + // nil computed attributes as unknown. + val, err := cty.Transform(r.ProposedNewState, func(path cty.Path, v cty.Value) (cty.Value, error) { + // We're only concerned with known null values, which can be computed + // by the provider. + if !v.IsKnown() { + return v, nil + } + + attrSchema := schema.Block.AttributeByPath(path) + if attrSchema == nil { + // this is an intermediate path which does not represent an attribute + return v, nil + } + + // get the current configuration value, to detect when a + // computed+optional attributes has become unset + configVal, err := path.Apply(r.Config) + if err != nil { + return v, err + } + + switch { + case attrSchema.Computed && !attrSchema.Optional && v.IsNull(): + // this is the easy path, this value is not yet set, and _must_ be computed + return cty.UnknownVal(v.Type()), nil + + case attrSchema.Computed && attrSchema.Optional && !v.IsNull() && configVal.IsNull(): + // If an optional+computed value has gone from set to unset, it + // becomes computed. (this was not possible to do with legacy + // providers) + return cty.UnknownVal(v.Type()), nil + } + + return v, nil + }) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + resp.PlannedPrivate = r.PriorPrivate + resp.PlannedState = val + + return resp +} + +func (p *MockProvider) ApplyResourceChange(r providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + p.Lock() + defer p.Unlock() + p.ApplyResourceChangeCalled = true + p.ApplyResourceChangeRequest = r + + if !p.ConfigureProviderCalled { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("Configure not called before ApplyResourceChange %q", r.TypeName)) + return resp + } + + if p.ApplyResourceChangeFn != nil { + return p.ApplyResourceChangeFn(r) + } + + if p.ApplyResourceChangeResponse != nil { + return *p.ApplyResourceChangeResponse + } + + // if the value is nil, we return that directly to correspond to a delete + if r.PlannedState.IsNull() { + resp.NewState = r.PlannedState + return resp + } + + // the default behavior will be to create the minimal valid apply value by + // setting unknowns (which correspond to computed attributes) to a zero + // value. + val, _ := cty.Transform(r.PlannedState, func(path cty.Path, v cty.Value) (cty.Value, error) { + if !v.IsKnown() { + ty := v.Type() + switch { + case ty == cty.String: + return cty.StringVal(""), nil + case ty == cty.Number: + return cty.NumberIntVal(0), nil + case ty == cty.Bool: + return cty.False, nil + case ty.IsMapType(): + return cty.MapValEmpty(ty.ElementType()), nil + case ty.IsListType(): + return cty.ListValEmpty(ty.ElementType()), nil + default: + return cty.NullVal(ty), nil + } + } + return v, nil + }) + + resp.NewState = val + resp.Private = r.PlannedPrivate + + return resp +} + +func (p *MockProvider) ImportResourceState(r providers.ImportResourceStateRequest) (resp providers.ImportResourceStateResponse) { + p.Lock() + defer p.Unlock() + + if !p.ConfigureProviderCalled { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("Configure not called before ImportResourceState %q", r.TypeName)) + return resp + } + + p.ImportResourceStateCalled = true + p.ImportResourceStateRequest = r + if p.ImportResourceStateFn != nil { + return p.ImportResourceStateFn(r) + } + + if p.ImportResourceStateResponse != nil { + // There's no guarantee that the imported resources slice isn't being read somewhere else + // As such, any changes we make on it (including through pointers) would lead to data races. + // To avoid that, copy and make changes on the copy + resp.ImportedResources = make([]providers.ImportedResource, len(p.ImportResourceStateResponse.ImportedResources)) + + // fixup the cty value to match the schema + for i, res := range p.ImportResourceStateResponse.ImportedResources { + schema, ok := p.getProviderSchema().ResourceTypes[res.TypeName] + if !ok { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("no schema found for %q", res.TypeName)) + return resp + } + + var err error + res.State, err = schema.Block.CoerceValue(res.State) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + resp.ImportedResources[i] = res + } + } + + return resp +} + +func (p *MockProvider) ReadDataSource(r providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { + p.Lock() + defer p.Unlock() + + if !p.ConfigureProviderCalled { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("Configure not called before ReadDataSource %q", r.TypeName)) + return resp + } + + p.ReadDataSourceCalled = true + p.ReadDataSourceRequest = r + + if p.ReadDataSourceFn != nil { + return p.ReadDataSourceFn(r) + } + + if p.ReadDataSourceResponse != nil { + resp = *p.ReadDataSourceResponse + } + + return resp +} + +func (p *MockProvider) GetFunctions() (resp providers.GetFunctionsResponse) { + p.Lock() + defer p.Unlock() + + p.GetFunctionsCalled = true + + if p.GetFunctionsFn != nil { + return p.GetFunctionsFn() + } + + if p.GetFunctionsResponse != nil { + resp = *p.GetFunctionsResponse + } + return resp +} + +func (p *MockProvider) CallFunction(r providers.CallFunctionRequest) (resp providers.CallFunctionResponse) { + p.Lock() + defer p.Unlock() + + p.CallFunctionCalled = true + p.CallFunctionRequest = r + + if p.CallFunctionFn != nil { + return p.CallFunctionFn(r) + } + + if p.CallFunctionResponse != nil { + resp = *p.CallFunctionResponse + } + return resp +} + +func (p *MockProvider) Close() error { + p.Lock() + defer p.Unlock() + + p.CloseCalled = true + return p.CloseError +} diff --git a/pkg/tofu/provisioner_mock.go b/pkg/tofu/provisioner_mock.go new file mode 100644 index 00000000000..90b3089c33c --- /dev/null +++ b/pkg/tofu/provisioner_mock.go @@ -0,0 +1,109 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "sync" + + "github.com/kubegems/opentofu/pkg/provisioners" +) + +var _ provisioners.Interface = (*MockProvisioner)(nil) + +// MockProvisioner implements provisioners.Interface but mocks out all the +// calls for testing purposes. +type MockProvisioner struct { + sync.Mutex + // Anything you want, in case you need to store extra data with the mock. + Meta interface{} + + GetSchemaCalled bool + GetSchemaResponse provisioners.GetSchemaResponse + + ValidateProvisionerConfigCalled bool + ValidateProvisionerConfigRequest provisioners.ValidateProvisionerConfigRequest + ValidateProvisionerConfigResponse provisioners.ValidateProvisionerConfigResponse + ValidateProvisionerConfigFn func(provisioners.ValidateProvisionerConfigRequest) provisioners.ValidateProvisionerConfigResponse + + ProvisionResourceCalled bool + ProvisionResourceRequest provisioners.ProvisionResourceRequest + ProvisionResourceResponse provisioners.ProvisionResourceResponse + ProvisionResourceFn func(provisioners.ProvisionResourceRequest) provisioners.ProvisionResourceResponse + + StopCalled bool + StopResponse error + StopFn func() error + + CloseCalled bool + CloseResponse error + CloseFn func() error +} + +func (p *MockProvisioner) GetSchema() provisioners.GetSchemaResponse { + p.Lock() + defer p.Unlock() + + p.GetSchemaCalled = true + return p.getSchema() +} + +// getSchema is the implementation of GetSchema, which can be called from other +// methods on MockProvisioner that may already be holding the lock. +func (p *MockProvisioner) getSchema() provisioners.GetSchemaResponse { + return p.GetSchemaResponse +} + +func (p *MockProvisioner) ValidateProvisionerConfig(r provisioners.ValidateProvisionerConfigRequest) provisioners.ValidateProvisionerConfigResponse { + p.Lock() + defer p.Unlock() + + p.ValidateProvisionerConfigCalled = true + p.ValidateProvisionerConfigRequest = r + if p.ValidateProvisionerConfigFn != nil { + return p.ValidateProvisionerConfigFn(r) + } + return p.ValidateProvisionerConfigResponse +} + +func (p *MockProvisioner) ProvisionResource(r provisioners.ProvisionResourceRequest) provisioners.ProvisionResourceResponse { + p.Lock() + defer p.Unlock() + + p.ProvisionResourceCalled = true + p.ProvisionResourceRequest = r + if p.ProvisionResourceFn != nil { + fn := p.ProvisionResourceFn + return fn(r) + } + + return p.ProvisionResourceResponse +} + +func (p *MockProvisioner) Stop() error { + // We intentionally don't lock in this one because the whole point of this + // method is to be called concurrently with another operation that can + // be cancelled. The provisioner itself is responsible for handling + // any concurrency concerns in this case. + + p.StopCalled = true + if p.StopFn != nil { + return p.StopFn() + } + + return p.StopResponse +} + +func (p *MockProvisioner) Close() error { + p.Lock() + defer p.Unlock() + + p.CloseCalled = true + if p.CloseFn != nil { + return p.CloseFn() + } + + return p.CloseResponse +} diff --git a/pkg/tofu/provisioner_mock_test.go b/pkg/tofu/provisioner_mock_test.go new file mode 100644 index 00000000000..b28ec8aca0f --- /dev/null +++ b/pkg/tofu/provisioner_mock_test.go @@ -0,0 +1,32 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "github.com/kubegems/opentofu/pkg/provisioners" +) + +// simpleMockProvisioner returns a MockProvisioner that is pre-configured +// with schema for its own config, with the same content as returned by +// function simpleTestSchema. +// +// For most reasonable uses the returned provisioner must be registered in a +// componentFactory under the name "test". Use simpleMockComponentFactory +// to obtain a pre-configured componentFactory containing the result of +// this function along with simpleMockProvider, both registered as "test". +// +// The returned provisioner has no other behaviors by default, but the caller +// may modify it in order to stub any other required functionality, or modify +// the default schema stored in the field GetSchemaReturn. Each new call to +// simpleTestProvisioner produces entirely new instances of all of the nested +// objects so that callers can mutate without affecting mock objects. +func simpleMockProvisioner() *MockProvisioner { + return &MockProvisioner{ + GetSchemaResponse: provisioners.GetSchemaResponse{ + Provisioner: simpleTestSchema(), + }, + } +} diff --git a/pkg/tofu/reduce_plan.go b/pkg/tofu/reduce_plan.go new file mode 100644 index 00000000000..b41faecdfe3 --- /dev/null +++ b/pkg/tofu/reduce_plan.go @@ -0,0 +1,37 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "log" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/plans" +) + +// reducePlan takes a planned resource instance change as might be produced by +// Plan or PlanDestroy and "simplifies" it to a single atomic action to be +// performed by a specific graph node. +// +// Callers must specify whether they are a destroy node or a regular apply node. +// If the result is NoOp then the given change requires no action for the +// specific graph node calling this and so evaluation of the that graph node +// should exit early and take no action. +// +// The returned object may either be identical to the input change or a new +// change object derived from the input. Because of the former case, the caller +// must not mutate the object returned in OutChange. +func reducePlan(addr addrs.ResourceInstance, in *plans.ResourceInstanceChange, destroy bool) *plans.ResourceInstanceChange { + out := in.Simplify(destroy) + if out.Action != in.Action { + if destroy { + log.Printf("[TRACE] reducePlan: %s change simplified from %s to %s for destroy node", addr, in.Action, out.Action) + } else { + log.Printf("[TRACE] reducePlan: %s change simplified from %s to %s for apply node", addr, in.Action, out.Action) + } + } + return out +} diff --git a/pkg/tofu/reduce_plan_test.go b/pkg/tofu/reduce_plan_test.go new file mode 100644 index 00000000000..4eb2d40028e --- /dev/null +++ b/pkg/tofu/reduce_plan_test.go @@ -0,0 +1,448 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "testing" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/zclconf/go-cty/cty" +) + +func TestProcessIgnoreChangesIndividual(t *testing.T) { + tests := map[string]struct { + Old, New cty.Value + Ignore []string + Want cty.Value + }{ + "string": { + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("a value"), + "b": cty.StringVal("b value"), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("new a value"), + "b": cty.StringVal("new b value"), + }), + []string{"a"}, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("a value"), + "b": cty.StringVal("new b value"), + }), + }, + "changed type": { + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("a value"), + "b": cty.StringVal("b value"), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.NumberIntVal(1), + "b": cty.StringVal("new b value"), + }), + []string{"a"}, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("a value"), + "b": cty.StringVal("new b value"), + }), + }, + "list": { + cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.StringVal("a0 value"), + cty.StringVal("a1 value"), + }), + "b": cty.StringVal("b value"), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.StringVal("new a0 value"), + cty.StringVal("new a1 value"), + }), + "b": cty.StringVal("new b value"), + }), + []string{"a"}, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.StringVal("a0 value"), + cty.StringVal("a1 value"), + }), + "b": cty.StringVal("new b value"), + }), + }, + "list_index": { + cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.StringVal("a0 value"), + cty.StringVal("a1 value"), + }), + "b": cty.StringVal("b value"), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.StringVal("new a0 value"), + cty.StringVal("new a1 value"), + }), + "b": cty.StringVal("new b value"), + }), + []string{"a[1]"}, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.StringVal("new a0 value"), + cty.StringVal("a1 value"), + }), + "b": cty.StringVal("new b value"), + }), + }, + "map": { + cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapVal(map[string]cty.Value{ + "a0": cty.StringVal("a0 value"), + "a1": cty.StringVal("a1 value"), + }), + "b": cty.StringVal("b value"), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapVal(map[string]cty.Value{ + "a0": cty.StringVal("new a0 value"), + "a1": cty.UnknownVal(cty.String), + }), + "b": cty.StringVal("b value"), + }), + []string{`a`}, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapVal(map[string]cty.Value{ + "a0": cty.StringVal("a0 value"), + "a1": cty.StringVal("a1 value"), + }), + "b": cty.StringVal("b value"), + }), + }, + "map_index": { + cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapVal(map[string]cty.Value{ + "a0": cty.StringVal("a0 value"), + "a1": cty.StringVal("a1 value"), + }), + "b": cty.StringVal("b value"), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapVal(map[string]cty.Value{ + "a0": cty.StringVal("new a0 value"), + "a1": cty.StringVal("new a1 value"), + }), + "b": cty.StringVal("b value"), + }), + []string{`a["a1"]`}, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapVal(map[string]cty.Value{ + "a0": cty.StringVal("new a0 value"), + "a1": cty.StringVal("a1 value"), + }), + "b": cty.StringVal("b value"), + }), + }, + "map_index_no_config": { + cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapVal(map[string]cty.Value{ + "a0": cty.StringVal("a0 value"), + "a1": cty.StringVal("a1 value"), + }), + "b": cty.StringVal("b value"), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.Map(cty.String)), + "b": cty.StringVal("b value"), + }), + []string{`a["a1"]`}, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapVal(map[string]cty.Value{ + "a1": cty.StringVal("a1 value"), + }), + "b": cty.StringVal("b value"), + }), + }, + "map_index_unknown_value": { + cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapVal(map[string]cty.Value{ + "a0": cty.StringVal("a0 value"), + "a1": cty.StringVal("a1 value"), + }), + "b": cty.StringVal("b value"), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapVal(map[string]cty.Value{ + "a0": cty.StringVal("a0 value"), + "a1": cty.UnknownVal(cty.String), + }), + "b": cty.StringVal("b value"), + }), + []string{`a["a1"]`}, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapVal(map[string]cty.Value{ + "a0": cty.StringVal("a0 value"), + "a1": cty.StringVal("a1 value"), + }), + "b": cty.StringVal("b value"), + }), + }, + "map_index_multiple_keys": { + cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapVal(map[string]cty.Value{ + "a0": cty.StringVal("a0 value"), + "a1": cty.StringVal("a1 value"), + "a2": cty.StringVal("a2 value"), + "a3": cty.StringVal("a3 value"), + }), + "b": cty.StringVal("b value"), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.Map(cty.String)), + "b": cty.StringVal("new b value"), + }), + []string{`a["a1"]`, `a["a2"]`, `a["a3"]`, `b`}, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapVal(map[string]cty.Value{ + "a1": cty.StringVal("a1 value"), + "a2": cty.StringVal("a2 value"), + "a3": cty.StringVal("a3 value"), + }), + "b": cty.StringVal("b value"), + }), + }, + "map_index_redundant": { + cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapVal(map[string]cty.Value{ + "a0": cty.StringVal("a0 value"), + "a1": cty.StringVal("a1 value"), + "a2": cty.StringVal("a2 value"), + }), + "b": cty.StringVal("b value"), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.Map(cty.String)), + "b": cty.StringVal("new b value"), + }), + []string{`a["a1"]`, `a`, `b`}, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapVal(map[string]cty.Value{ + "a0": cty.StringVal("a0 value"), + "a1": cty.StringVal("a1 value"), + "a2": cty.StringVal("a2 value"), + }), + "b": cty.StringVal("b value"), + }), + }, + "missing_map_index": { + cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapVal(map[string]cty.Value{ + "a0": cty.StringVal("a0 value"), + "a1": cty.StringVal("a1 value"), + }), + "b": cty.StringVal("b value"), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapValEmpty(cty.String), + "b": cty.StringVal("b value"), + }), + []string{`a["a1"]`}, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapVal(map[string]cty.Value{ + "a1": cty.StringVal("a1 value"), + }), + "b": cty.StringVal("b value"), + }), + }, + "missing_map_index_empty": { + cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapValEmpty(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapVal(map[string]cty.Value{ + "a": cty.StringVal("a0 value"), + }), + }), + []string{`a["a"]`}, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapValEmpty(cty.String), + }), + }, + "missing_map_index_to_object": { + cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("aa0"), + "b": cty.StringVal("ab0"), + }), + "b": cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("ba0"), + "b": cty.StringVal("bb0"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapValEmpty( + cty.Object(map[string]cty.Type{ + "a": cty.String, + "b": cty.String, + }), + ), + }), + // we expect the config to be used here, as the ignore changes was + // `a["a"].b`, but the change was larger than that removing + // `a["a"]` entirely. + []string{`a["a"].b`}, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapValEmpty( + cty.Object(map[string]cty.Type{ + "a": cty.String, + "b": cty.String, + }), + ), + }), + }, + "missing_prior_map_index": { + cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapVal(map[string]cty.Value{ + "a0": cty.StringVal("a0 value"), + }), + "b": cty.StringVal("b value"), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapVal(map[string]cty.Value{ + "a0": cty.StringVal("a0 value"), + "a1": cty.StringVal("new a1 value"), + }), + "b": cty.StringVal("b value"), + }), + []string{`a["a1"]`}, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapVal(map[string]cty.Value{ + "a0": cty.StringVal("a0 value"), + }), + "b": cty.StringVal("b value"), + }), + }, + "object attribute": { + cty.ObjectVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("a.foo value"), + "bar": cty.StringVal("a.bar value"), + }), + "b": cty.StringVal("b value"), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("new a.foo value"), + "bar": cty.StringVal("new a.bar value"), + }), + "b": cty.StringVal("new b value"), + }), + []string{"a.bar"}, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("new a.foo value"), + "bar": cty.StringVal("a.bar value"), + }), + "b": cty.StringVal("new b value"), + }), + }, + "unknown_object_attribute": { + cty.ObjectVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("a.foo value"), + "bar": cty.StringVal("a.bar value"), + }), + "b": cty.StringVal("b value"), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("new a.foo value"), + "bar": cty.UnknownVal(cty.String), + }), + "b": cty.StringVal("new b value"), + }), + []string{"a.bar"}, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("new a.foo value"), + "bar": cty.StringVal("a.bar value"), + }), + "b": cty.StringVal("new b value"), + }), + }, + "null_map": { + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("ok"), + "list": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "s": cty.StringVal("ok"), + "map": cty.NullVal(cty.Map(cty.String)), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.String), + "list": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "s": cty.StringVal("ok"), + "map": cty.NullVal(cty.Map(cty.String)), + }), + }), + }), + []string{"a"}, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("ok"), + "list": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "s": cty.StringVal("ok"), + "map": cty.NullVal(cty.Map(cty.String)), + }), + }), + }), + }, + "marked_map": { + cty.ObjectVal(map[string]cty.Value{ + "map": cty.MapVal(map[string]cty.Value{ + "key": cty.StringVal("val"), + }).Mark("marked"), + }), + cty.ObjectVal(map[string]cty.Value{ + "map": cty.MapVal(map[string]cty.Value{ + "key": cty.StringVal("new val"), + }).Mark("marked"), + }), + []string{`map["key"]`}, + cty.ObjectVal(map[string]cty.Value{ + "map": cty.MapVal(map[string]cty.Value{ + "key": cty.StringVal("val"), + }).Mark("marked"), + }), + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + ignore := make([]hcl.Traversal, len(test.Ignore)) + for i, ignoreStr := range test.Ignore { + trav, diags := hclsyntax.ParseTraversalAbs([]byte(ignoreStr), "", hcl.Pos{Line: 1, Column: 1}) + if diags.HasErrors() { + t.Fatalf("failed to parse %q: %s", ignoreStr, diags.Error()) + } + ignore[i] = trav + } + + ret, diags := processIgnoreChangesIndividual(test.Old, test.New, traversalsToPaths(ignore)) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + if got, want := ret, test.Want; !want.RawEquals(got) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, want) + } + }) + } +} diff --git a/pkg/tofu/resource_provider_mock_test.go b/pkg/tofu/resource_provider_mock_test.go new file mode 100644 index 00000000000..1a4759b2120 --- /dev/null +++ b/pkg/tofu/resource_provider_mock_test.go @@ -0,0 +1,142 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/zclconf/go-cty/cty" +) + +// mockProviderWithConfigSchema is a test helper to concisely create a mock +// provider with the given schema for its own configuration. +func mockProviderWithConfigSchema(schema *configschema.Block) *MockProvider { + return &MockProvider{ + GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{Block: schema}, + }, + } +} + +// mockProviderWithResourceTypeSchema is a test helper to concisely create a mock +// provider with a schema containing a single resource type. +func mockProviderWithResourceTypeSchema(name string, schema *configschema.Block) *MockProvider { + return &MockProvider{ + GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "string": { + Type: cty.String, + Optional: true, + }, + "list": { + Type: cty.List(cty.String), + Optional: true, + }, + "root": { + Type: cty.Map(cty.String), + Optional: true, + }, + }, + }, + }, + ResourceTypes: map[string]providers.Schema{ + name: providers.Schema{Block: schema}, + }, + }, + } +} + +// simpleMockProvider returns a MockProvider that is pre-configured +// with schema for its own config, for a resource type called "test_object" and +// for a data source also called "test_object". +// +// All three schemas have the same content as returned by function +// simpleTestSchema. +// +// For most reasonable uses the returned provider must be registered in a +// componentFactory under the name "test". Use simpleMockComponentFactory +// to obtain a pre-configured componentFactory containing the result of +// this function along with simpleMockProvisioner, both registered as "test". +// +// The returned provider has no other behaviors by default, but the caller may +// modify it in order to stub any other required functionality, or modify +// the default schema stored in the field GetSchemaReturn. Each new call to +// simpleTestProvider produces entirely new instances of all of the nested +// objects so that callers can mutate without affecting mock objects. +func simpleMockProvider() *MockProvider { + return &MockProvider{ + GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{Block: simpleTestSchema()}, + ResourceTypes: map[string]providers.Schema{ + "test_object": providers.Schema{Block: simpleTestSchema()}, + }, + DataSources: map[string]providers.Schema{ + "test_object": providers.Schema{Block: simpleTestSchema()}, + }, + }, + } +} + +// ProviderSchema is a helper to convert from the internal GetProviderSchemaResponse to +// a ProviderSchema. +func (p *MockProvider) ProviderSchema() *ProviderSchema { + resp := p.getProviderSchema() + + schema := &ProviderSchema{ + Provider: resp.Provider.Block, + ProviderMeta: resp.ProviderMeta.Block, + ResourceTypes: map[string]*configschema.Block{}, + DataSources: map[string]*configschema.Block{}, + ResourceTypeSchemaVersions: map[string]uint64{}, + } + + for resType, s := range resp.ResourceTypes { + schema.ResourceTypes[resType] = s.Block + schema.ResourceTypeSchemaVersions[resType] = uint64(s.Version) + } + + for dataSource, s := range resp.DataSources { + schema.DataSources[dataSource] = s.Block + } + + return schema +} + +// the type was refactored out with all the functionality handled within the +// provider package, but we keep this here for a shim in existing tests. +type ProviderSchema struct { + Provider *configschema.Block + ProviderMeta *configschema.Block + ResourceTypes map[string]*configschema.Block + ResourceTypeSchemaVersions map[string]uint64 + DataSources map[string]*configschema.Block +} + +// getProviderSchemaResponseFromProviderSchema is a test helper to convert a +// ProviderSchema to a GetProviderSchemaResponse for use when building a mock provider. +func getProviderSchemaResponseFromProviderSchema(providerSchema *ProviderSchema) *providers.GetProviderSchemaResponse { + resp := &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{Block: providerSchema.Provider}, + ProviderMeta: providers.Schema{Block: providerSchema.ProviderMeta}, + ResourceTypes: map[string]providers.Schema{}, + DataSources: map[string]providers.Schema{}, + } + + for name, schema := range providerSchema.ResourceTypes { + resp.ResourceTypes[name] = providers.Schema{ + Block: schema, + Version: int64(providerSchema.ResourceTypeSchemaVersions[name]), + } + } + + for name, schema := range providerSchema.DataSources { + resp.DataSources[name] = providers.Schema{Block: schema} + } + + return resp +} diff --git a/pkg/tofu/schemas.go b/pkg/tofu/schemas.go new file mode 100644 index 00000000000..5d49b39545b --- /dev/null +++ b/pkg/tofu/schemas.go @@ -0,0 +1,179 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "log" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// Schemas is a container for various kinds of schema that OpenTofu needs +// during processing. +type Schemas struct { + Providers map[addrs.Provider]providers.ProviderSchema + Provisioners map[string]*configschema.Block +} + +// ProviderSchema returns the entire ProviderSchema object that was produced +// by the plugin for the given provider, or nil if no such schema is available. +// +// It's usually better to go use the more precise methods offered by type +// Schemas to handle this detail automatically. +func (ss *Schemas) ProviderSchema(provider addrs.Provider) providers.ProviderSchema { + return ss.Providers[provider] +} + +// ProviderConfig returns the schema for the provider configuration of the +// given provider type, or nil if no such schema is available. +func (ss *Schemas) ProviderConfig(provider addrs.Provider) *configschema.Block { + return ss.ProviderSchema(provider).Provider.Block +} + +// ResourceTypeConfig returns the schema for the configuration of a given +// resource type belonging to a given provider type, or nil of no such +// schema is available. +// +// In many cases the provider type is inferrable from the resource type name, +// but this is not always true because users can override the provider for +// a resource using the "provider" meta-argument. Therefore it's important to +// always pass the correct provider name, even though it many cases it feels +// redundant. +func (ss *Schemas) ResourceTypeConfig(provider addrs.Provider, resourceMode addrs.ResourceMode, resourceType string) (block *configschema.Block, schemaVersion uint64) { + ps := ss.ProviderSchema(provider) + if ps.ResourceTypes == nil { + return nil, 0 + } + return ps.SchemaForResourceType(resourceMode, resourceType) +} + +// ProvisionerConfig returns the schema for the configuration of a given +// provisioner, or nil of no such schema is available. +func (ss *Schemas) ProvisionerConfig(name string) *configschema.Block { + return ss.Provisioners[name] +} + +// loadSchemas searches the given configuration, state and plan (any of which +// may be nil) for constructs that have an associated schema, requests the +// necessary schemas from the given component factory (which must _not_ be nil), +// and returns a single object representing all of the necessary schemas. +// +// If an error is returned, it may be a wrapped tfdiags.Diagnostics describing +// errors across multiple separate objects. Errors here will usually indicate +// either misbehavior on the part of one of the providers or of the provider +// protocol itself. When returned with errors, the returned schemas object is +// still valid but may be incomplete. +func loadSchemas(config *configs.Config, state *states.State, plugins *contextPlugins) (*Schemas, error) { + schemas := &Schemas{ + Providers: map[addrs.Provider]providers.ProviderSchema{}, + Provisioners: map[string]*configschema.Block{}, + } + var diags tfdiags.Diagnostics + + newDiags := loadProviderSchemas(schemas.Providers, config, state, plugins) + diags = diags.Append(newDiags) + newDiags = loadProvisionerSchemas(schemas.Provisioners, config, plugins) + diags = diags.Append(newDiags) + + return schemas, diags.Err() +} + +func loadProviderSchemas(schemas map[addrs.Provider]providers.ProviderSchema, config *configs.Config, state *states.State, plugins *contextPlugins) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + ensure := func(fqn addrs.Provider) { + name := fqn.String() + + if _, exists := schemas[fqn]; exists { + return + } + + log.Printf("[TRACE] LoadSchemas: retrieving schema for provider type %q", name) + schema, err := plugins.ProviderSchema(fqn) + if err != nil { + // We'll put a stub in the map so we won't re-attempt this on + // future calls, which would then repeat the same error message + // multiple times. + schemas[fqn] = providers.ProviderSchema{} + diags = diags.Append( + tfdiags.Sourceless( + tfdiags.Error, + "Failed to obtain provider schema", + fmt.Sprintf("Could not load the schema for provider %s: %s.", fqn, err), + ), + ) + return + } + + schemas[fqn] = schema + } + + if config != nil { + for _, fqn := range config.ProviderTypes() { + ensure(fqn) + } + } + + if state != nil { + needed := providers.AddressedTypesAbs(state.ProviderAddrs()) + for _, typeAddr := range needed { + ensure(typeAddr) + } + } + + return diags +} + +func loadProvisionerSchemas(schemas map[string]*configschema.Block, config *configs.Config, plugins *contextPlugins) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + ensure := func(name string) { + if _, exists := schemas[name]; exists { + return + } + + log.Printf("[TRACE] LoadSchemas: retrieving schema for provisioner %q", name) + schema, err := plugins.ProvisionerSchema(name) + if err != nil { + // We'll put a stub in the map so we won't re-attempt this on + // future calls, which would then repeat the same error message + // multiple times. + schemas[name] = &configschema.Block{} + diags = diags.Append( + tfdiags.Sourceless( + tfdiags.Error, + "Failed to obtain provisioner schema", + fmt.Sprintf("Could not load the schema for provisioner %q: %s.", name, err), + ), + ) + return + } + + schemas[name] = schema + } + + if config != nil { + for _, rc := range config.Module.ManagedResources { + for _, pc := range rc.Managed.Provisioners { + ensure(pc.Type) + } + } + + // Must also visit our child modules, recursively. + for _, cc := range config.Children { + childDiags := loadProvisionerSchemas(schemas, cc, plugins) + diags = diags.Append(childDiags) + } + } + + return diags +} diff --git a/pkg/tofu/schemas_test.go b/pkg/tofu/schemas_test.go new file mode 100644 index 00000000000..7fbe92e778c --- /dev/null +++ b/pkg/tofu/schemas_test.go @@ -0,0 +1,54 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "testing" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/providers" +) + +func simpleTestSchemas() *Schemas { + provider := simpleMockProvider() + provisioner := simpleMockProvisioner() + + return &Schemas{ + Providers: map[addrs.Provider]providers.ProviderSchema{ + addrs.NewDefaultProvider("test"): provider.GetProviderSchema(), + }, + Provisioners: map[string]*configschema.Block{ + "test": provisioner.GetSchemaResponse.Provisioner, + }, + } +} + +// schemaOnlyProvidersForTesting is a testing helper that constructs a +// plugin library that contains a set of providers that only know how to +// return schema, and will exhibit undefined behavior if used for any other +// purpose. +// +// The intended use for this is in testing components that use schemas to +// drive other behavior, such as reference analysis during graph construction, +// but that don't actually need to interact with providers otherwise. +func schemaOnlyProvidersForTesting(schemas map[addrs.Provider]providers.ProviderSchema, t *testing.T) *contextPlugins { + factories := make(map[addrs.Provider]providers.Factory, len(schemas)) + + for providerAddr, schema := range schemas { + schema := schema + + provider := &MockProvider{ + GetProviderSchemaResponse: &schema, + } + + factories[providerAddr] = func() (providers.Interface, error) { + return provider, nil + } + } + + return newContextPlugins(factories, nil) +} diff --git a/pkg/tofu/test_context.go b/pkg/tofu/test_context.go new file mode 100644 index 00000000000..6e4985d32ba --- /dev/null +++ b/pkg/tofu/test_context.go @@ -0,0 +1,251 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "log" + "sync" + + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/lang" + "github.com/kubegems/opentofu/pkg/moduletest" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// TestContext wraps a Context, and adds in direct values for the current state, +// most recent plan, and configuration. +// +// This combination allows functions called on the TestContext to create a +// complete scope to evaluate test assertions. +type TestContext struct { + *Context + + Config *configs.Config + State *states.State + Plan *plans.Plan + Variables InputValues +} + +// TestContext creates a TestContext structure that can evaluate test assertions +// against the provided state and plan. +func (c *Context) TestContext(config *configs.Config, state *states.State, plan *plans.Plan, variables InputValues) *TestContext { + return &TestContext{ + Context: c, + Config: config, + State: state, + Plan: plan, + Variables: variables, + } +} + +// EvaluateAgainstState processes the assertions inside the provided +// configs.TestRun against the embedded state. +// +// The provided plan is import as it is needed to evaluate the `plantimestamp` +// function, but no data or changes from the embedded plan is referenced in +// this function. +func (ctx *TestContext) EvaluateAgainstState(run *moduletest.Run) { + defer ctx.acquireRun("evaluate")() + ctx.evaluate(ctx.State.SyncWrapper(), plans.NewChanges().SyncWrapper(), run, walkApply) +} + +// EvaluateAgainstPlan processes the assertions inside the provided +// configs.TestRun against the embedded plan and state. +func (ctx *TestContext) EvaluateAgainstPlan(run *moduletest.Run) { + defer ctx.acquireRun("evaluate")() + ctx.evaluate(ctx.State.SyncWrapper(), ctx.Plan.Changes.SyncWrapper(), run, walkPlan) +} + +func (ctx *TestContext) evaluate(state *states.SyncState, changes *plans.ChangesSync, run *moduletest.Run, operation walkOperation) { + // The state does not include the module that has no resources, making its outputs unusable. + // synchronizeStates function synchronizes the state with the planned state, ensuring inclusion of all modules. + if ctx.Plan != nil && ctx.Plan.PlannedState != nil && + len(ctx.State.Modules) != len(ctx.Plan.PlannedState.Modules) { + state = synchronizeStates(ctx.State, ctx.Plan.PlannedState) + } + + data := &evaluationStateData{ + Evaluator: &Evaluator{ + Operation: operation, + Meta: ctx.meta, + Config: ctx.Config, + Plugins: ctx.plugins, + State: state, + Changes: changes, + VariableValues: func() map[string]map[string]cty.Value { + variables := map[string]map[string]cty.Value{ + addrs.RootModule.String(): make(map[string]cty.Value), + } + for name, variable := range ctx.Variables { + variables[addrs.RootModule.String()][name] = variable.Value + } + return variables + }(), + VariableValuesLock: new(sync.Mutex), + PlanTimestamp: ctx.Plan.Timestamp, + }, + ModulePath: nil, // nil for the root module + InstanceKeyData: EvalDataForNoInstanceKey, + Operation: operation, + } + + var providerInstanceLock sync.Mutex + providerInstances := make(map[addrs.Provider]providers.Interface) + defer func() { + for addr, inst := range providerInstances { + log.Printf("[INFO] Shutting down test provider %s", addr) + inst.Close() + } + }() + + providerSupplier := func(addr addrs.AbsProviderConfig) providers.Interface { + providerInstanceLock.Lock() + defer providerInstanceLock.Unlock() + + if inst, ok := providerInstances[addr.Provider]; ok { + return inst + } + + factory, ok := ctx.plugins.providerFactories[addr.Provider] + if !ok { + log.Printf("[WARN] Unable to find provider %s in test context", addr) + providerInstances[addr.Provider] = nil + return nil + } + log.Printf("[INFO] Starting test provider %s", addr) + inst, err := factory() + if err != nil { + log.Printf("[WARN] Unable to start provider %s in test context", addr) + providerInstances[addr.Provider] = nil + return nil + } else { + log.Printf("[INFO] Shutting down test provider %s", addr) + providerInstances[addr.Provider] = inst + return inst + } + } + + scope := &lang.Scope{ + Data: data, + BaseDir: ".", + PureOnly: operation != walkApply, + PlanTimestamp: ctx.Plan.Timestamp, + ProviderFunctions: func(pf addrs.ProviderFunction, rng tfdiags.SourceRange) (*function.Function, tfdiags.Diagnostics) { + return evalContextProviderFunction(providerSupplier, ctx.Config, walkPlan, pf, rng) + }, + } + + // We're going to assume the run has passed, and then if anything fails this + // value will be updated. + run.Status = run.Status.Merge(moduletest.Pass) + + // Now validate all the assertions within this run block. + for _, rule := range run.Config.CheckRules { + var diags tfdiags.Diagnostics + + refs, moreDiags := lang.ReferencesInExpr(addrs.ParseRefFromTestingScope, rule.Condition) + diags = diags.Append(moreDiags) + moreRefs, moreDiags := lang.ReferencesInExpr(addrs.ParseRefFromTestingScope, rule.ErrorMessage) + diags = diags.Append(moreDiags) + refs = append(refs, moreRefs...) + + hclCtx, moreDiags := scope.EvalContext(refs) + diags = diags.Append(moreDiags) + + errorMessage, moreDiags := evalCheckErrorMessage(rule.ErrorMessage, hclCtx) + diags = diags.Append(moreDiags) + + runVal, hclDiags := rule.Condition.Value(hclCtx) + diags = diags.Append(hclDiags) + + run.Diagnostics = run.Diagnostics.Append(diags) + if diags.HasErrors() { + run.Status = run.Status.Merge(moduletest.Error) + continue + } + + // The condition result may be marked if the expression refers to a + // sensitive value. + runVal, _ = runVal.Unmark() + + if runVal.IsNull() { + run.Status = run.Status.Merge(moduletest.Error) + run.Diagnostics = run.Diagnostics.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid condition run", + Detail: "Condition expression must return either true or false, not null.", + Subject: rule.Condition.Range().Ptr(), + Expression: rule.Condition, + EvalContext: hclCtx, + }) + continue + } + + if !runVal.IsKnown() { + run.Status = run.Status.Merge(moduletest.Error) + run.Diagnostics = run.Diagnostics.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unknown condition run", + Detail: "Condition expression could not be evaluated at this time.", + Subject: rule.Condition.Range().Ptr(), + Expression: rule.Condition, + EvalContext: hclCtx, + }) + continue + } + + var err error + if runVal, err = convert.Convert(runVal, cty.Bool); err != nil { + run.Status = run.Status.Merge(moduletest.Error) + run.Diagnostics = run.Diagnostics.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid condition run", + Detail: fmt.Sprintf("Invalid condition run value: %s.", tfdiags.FormatError(err)), + Subject: rule.Condition.Range().Ptr(), + Expression: rule.Condition, + EvalContext: hclCtx, + }) + continue + } + + if runVal.False() { + run.Status = run.Status.Merge(moduletest.Fail) + run.Diagnostics = run.Diagnostics.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Test assertion failed", + Detail: errorMessage, + Subject: rule.Condition.Range().Ptr(), + Expression: rule.Condition, + EvalContext: hclCtx, + }) + continue + } + } +} + +// synchronizeStates compares the planned state to the current state and incorporates any missing modules +// from the planned state into the current state. +// +// If a module has no resources, it is included in the current state to ensure that its output variables are usable. +func synchronizeStates(state, plannedState *states.State) *states.SyncState { + newState := state.DeepCopy() + for key, value := range plannedState.Modules { + if _, exists := newState.Modules[key]; !exists { + newState.Modules[key] = value + } + } + return newState.SyncWrapper() +} diff --git a/pkg/tofu/test_context_test.go b/pkg/tofu/test_context_test.go new file mode 100644 index 00000000000..e5f0d28d373 --- /dev/null +++ b/pkg/tofu/test_context_test.go @@ -0,0 +1,598 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + ctymsgpack "github.com/zclconf/go-cty/cty/msgpack" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/lang/marks" + "github.com/kubegems/opentofu/pkg/moduletest" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +func TestTestContext_EvaluateAgainstState(t *testing.T) { + tcs := map[string]struct { + configs map[string]string + state *states.State + variables InputValues + provider *MockProvider + + expectedDiags []tfdiags.Description + expectedStatus moduletest.Status + }{ + "basic_passing": { + configs: map[string]string{ + "main.tf": ` +resource "test_resource" "a" { + value = "Hello, world!" +} +`, + "main.tftest.hcl": ` +run "test_case" { + assert { + condition = test_resource.a.value == "Hello, world!" + error_message = "invalid value" + } +} +`, + }, + state: states.BuildState(func(state *states.SyncState) { + state.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "a", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: encodeCtyValue(t, cty.ObjectVal(map[string]cty.Value{ + "value": cty.StringVal("Hello, world!"), + })), + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("test"), + }) + }), + provider: &MockProvider{ + GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_resource": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "value": { + Type: cty.String, + Required: true, + }, + }, + }, + }, + }, + }, + }, + expectedStatus: moduletest.Pass, + }, + "basic_passing_with_sensitive_value": { + configs: map[string]string{ + "main.tf": ` +resource "test_resource" "a" { + sensitive_value = "Shhhhh!" +} +`, + "main.tftest.hcl": ` +run "test_case" { + assert { + condition = test_resource.a.sensitive_value == "Shhhhh!" + error_message = "invalid value" + } +} +`, + }, + state: states.BuildState(func(state *states.SyncState) { + state.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "a", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: encodeCtyValue(t, cty.ObjectVal(map[string]cty.Value{ + "sensitive_value": cty.StringVal("Shhhhh!"), + })), + AttrSensitivePaths: []cty.PathValueMarks{ + { + Path: cty.GetAttrPath("sensitive_value"), + Marks: cty.NewValueMarks(marks.Sensitive), + }, + }, + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("test"), + }) + }), + provider: &MockProvider{ + GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_resource": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "sensitive_value": { + Type: cty.String, + Required: true, + }, + }, + }, + }, + }, + }, + }, + expectedStatus: moduletest.Pass, + }, + "with_variables": { + configs: map[string]string{ + "main.tf": ` +variable "value" { + type = string +} + +resource "test_resource" "a" { + value = var.value +} +`, + "main.tftest.hcl": ` +variables { + value = "Hello, world!" +} + +run "test_case" { + assert { + condition = test_resource.a.value == var.value + error_message = "invalid value" + } +} +`, + }, + state: states.BuildState(func(state *states.SyncState) { + state.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "a", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: encodeCtyValue(t, cty.ObjectVal(map[string]cty.Value{ + "value": cty.StringVal("Hello, world!"), + })), + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("test"), + }) + }), + variables: InputValues{ + "value": { + Value: cty.StringVal("Hello, world!"), + }, + }, + provider: &MockProvider{ + GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_resource": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "value": { + Type: cty.String, + Required: true, + }, + }, + }, + }, + }, + }, + }, + expectedStatus: moduletest.Pass, + }, + "basic_failing": { + configs: map[string]string{ + "main.tf": ` +resource "test_resource" "a" { + value = "Hello, world!" +} +`, + "main.tftest.hcl": ` +run "test_case" { + assert { + condition = test_resource.a.value == "incorrect!" + error_message = "invalid value" + } +} +`, + }, + state: states.BuildState(func(state *states.SyncState) { + state.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "a", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: encodeCtyValue(t, cty.ObjectVal(map[string]cty.Value{ + "value": cty.StringVal("Hello, world!"), + })), + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("test"), + }) + }), + provider: &MockProvider{ + GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_resource": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "value": { + Type: cty.String, + Required: true, + }, + }, + }, + }, + }, + }, + }, + expectedStatus: moduletest.Fail, + expectedDiags: []tfdiags.Description{ + { + Summary: "Test assertion failed", + Detail: "invalid value", + }, + }, + }, + "two_failing_assertions": { + configs: map[string]string{ + "main.tf": ` +resource "test_resource" "a" { + value = "Hello, world!" +} +`, + "main.tftest.hcl": ` +run "test_case" { + assert { + condition = test_resource.a.value == "incorrect!" + error_message = "invalid value" + } + + assert { + condition = test_resource.a.value == "also incorrect!" + error_message = "still invalid" + } +} +`, + }, + state: states.BuildState(func(state *states.SyncState) { + state.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "a", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: encodeCtyValue(t, cty.ObjectVal(map[string]cty.Value{ + "value": cty.StringVal("Hello, world!"), + })), + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("test"), + }) + }), + provider: &MockProvider{ + GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_resource": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "value": { + Type: cty.String, + Required: true, + }, + }, + }, + }, + }, + }, + }, + expectedStatus: moduletest.Fail, + expectedDiags: []tfdiags.Description{ + { + Summary: "Test assertion failed", + Detail: "invalid value", + }, + { + Summary: "Test assertion failed", + Detail: "still invalid", + }, + }, + }, + } + for name, tc := range tcs { + t.Run(name, func(t *testing.T) { + config := testModuleInline(t, tc.configs) + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(tc.provider), + }, + }) + + run := moduletest.Run{ + Config: config.Module.Tests["main.tftest.hcl"].Runs[0], + Name: "test_case", + } + + tctx := ctx.TestContext(config, tc.state, &plans.Plan{}, tc.variables) + tctx.EvaluateAgainstState(&run) + + if expected, actual := tc.expectedStatus, run.Status; expected != actual { + t.Errorf("expected status \"%s\" but got \"%s\"", expected, actual) + } + + compareDiagnosticsFromTestResult(t, tc.expectedDiags, run.Diagnostics) + }) + } +} + +func TestTestContext_EvaluateAgainstPlan(t *testing.T) { + tcs := map[string]struct { + configs map[string]string + state *states.State + plan *plans.Plan + variables InputValues + provider *MockProvider + + expectedDiags []tfdiags.Description + expectedStatus moduletest.Status + }{ + "basic_passing": { + configs: map[string]string{ + "main.tf": ` +resource "test_resource" "a" { + value = "Hello, world!" +} +`, + "main.tftest.hcl": ` +run "test_case" { + assert { + condition = test_resource.a.value == "Hello, world!" + error_message = "invalid value" + } +} +`, + }, + state: states.BuildState(func(state *states.SyncState) { + state.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "a", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectPlanned, + AttrsJSON: encodeCtyValue(t, cty.NullVal(cty.Object(map[string]cty.Type{ + "value": cty.String, + }))), + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("test"), + }) + }), + plan: &plans.Plan{ + Changes: &plans.Changes{ + Resources: []*plans.ResourceInstanceChangeSrc{ + { + Addr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "a", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + ProviderAddr: addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("test"), + }, + ChangeSrc: plans.ChangeSrc{ + Action: plans.Create, + Before: nil, + After: encodeDynamicValue(t, cty.ObjectVal(map[string]cty.Value{ + "value": cty.StringVal("Hello, world!"), + })), + }, + }, + }, + }, + }, + provider: &MockProvider{ + GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_resource": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "value": { + Type: cty.String, + Required: true, + }, + }, + }, + }, + }, + }, + }, + expectedStatus: moduletest.Pass, + }, + "basic_failing": { + configs: map[string]string{ + "main.tf": ` +resource "test_resource" "a" { + value = "Hello, world!" +} +`, + "main.tftest.hcl": ` +run "test_case" { + assert { + condition = test_resource.a.value == "incorrect!" + error_message = "invalid value" + } +} +`, + }, + state: states.BuildState(func(state *states.SyncState) { + state.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "a", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectPlanned, + AttrsJSON: encodeCtyValue(t, cty.NullVal(cty.Object(map[string]cty.Type{ + "value": cty.String, + }))), + }, + addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("test"), + }) + }), + plan: &plans.Plan{ + Changes: &plans.Changes{ + Resources: []*plans.ResourceInstanceChangeSrc{ + { + Addr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "a", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + ProviderAddr: addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("test"), + }, + ChangeSrc: plans.ChangeSrc{ + Action: plans.Create, + Before: nil, + After: encodeDynamicValue(t, cty.ObjectVal(map[string]cty.Value{ + "value": cty.StringVal("Hello, world!"), + })), + }, + }, + }, + }, + }, + provider: &MockProvider{ + GetProviderSchemaResponse: &providers.GetProviderSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_resource": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "value": { + Type: cty.String, + Required: true, + }, + }, + }, + }, + }, + }, + }, + expectedStatus: moduletest.Fail, + expectedDiags: []tfdiags.Description{ + { + Summary: "Test assertion failed", + Detail: "invalid value", + }, + }, + }, + } + for name, tc := range tcs { + t.Run(name, func(t *testing.T) { + config := testModuleInline(t, tc.configs) + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(tc.provider), + }, + }) + + run := moduletest.Run{ + Config: config.Module.Tests["main.tftest.hcl"].Runs[0], + Name: "test_case", + } + + tctx := ctx.TestContext(config, tc.state, tc.plan, tc.variables) + tctx.EvaluateAgainstPlan(&run) + + if expected, actual := tc.expectedStatus, run.Status; expected != actual { + t.Errorf("expected status \"%s\" but got \"%s\"", expected, actual) + } + + compareDiagnosticsFromTestResult(t, tc.expectedDiags, run.Diagnostics) + }) + } +} + +func compareDiagnosticsFromTestResult(t *testing.T, expected []tfdiags.Description, actual tfdiags.Diagnostics) { + if len(expected) != len(actual) { + t.Errorf("found invalid number of diagnostics, expected %d but found %d", len(expected), len(actual)) + } + + length := len(expected) + if len(actual) > length { + length = len(actual) + } + + for ix := 0; ix < length; ix++ { + if ix >= len(expected) { + t.Errorf("found extra diagnostic at %d:\n%v", ix, actual[ix].Description()) + } else if ix >= len(actual) { + t.Errorf("missing diagnostic at %d:\n%v", ix, expected[ix]) + } else { + expected := expected[ix] + actual := actual[ix].Description() + if diff := cmp.Diff(expected, actual); len(diff) > 0 { + t.Errorf("found different diagnostics at %d:\nexpected:\n%s\nactual:\n%s\ndiff:%s", ix, expected, actual, diff) + } + } + } +} + +func encodeDynamicValue(t *testing.T, value cty.Value) []byte { + data, err := ctymsgpack.Marshal(value, value.Type()) + if err != nil { + t.Fatalf("failed to marshal JSON: %s", err) + } + return data +} + +func encodeCtyValue(t *testing.T, value cty.Value) []byte { + data, err := ctyjson.Marshal(value, value.Type()) + if err != nil { + t.Fatalf("failed to marshal JSON: %s", err) + } + return data +} diff --git a/pkg/tofu/testdata/apply-blank/main.tf b/pkg/tofu/testdata/apply-blank/main.tf new file mode 100644 index 00000000000..0081db1861a --- /dev/null +++ b/pkg/tofu/testdata/apply-blank/main.tf @@ -0,0 +1 @@ +// Nothing! diff --git a/pkg/tofu/testdata/apply-cancel-block/main.tf b/pkg/tofu/testdata/apply-cancel-block/main.tf new file mode 100644 index 00000000000..98f5ee87e9f --- /dev/null +++ b/pkg/tofu/testdata/apply-cancel-block/main.tf @@ -0,0 +1,3 @@ +resource "aws_instance" "foo" { + num = "2" +} diff --git a/pkg/tofu/testdata/apply-cancel-provisioner/main.tf b/pkg/tofu/testdata/apply-cancel-provisioner/main.tf new file mode 100644 index 00000000000..dadabd882c0 --- /dev/null +++ b/pkg/tofu/testdata/apply-cancel-provisioner/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "foo" { + num = "2" + + provisioner "shell" { + foo = "bar" + } +} diff --git a/pkg/tofu/testdata/apply-cancel/main.tf b/pkg/tofu/testdata/apply-cancel/main.tf new file mode 100644 index 00000000000..7c4af5f71a4 --- /dev/null +++ b/pkg/tofu/testdata/apply-cancel/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "foo" { + value = "2" +} + +resource "aws_instance" "bar" { + foo = "${aws_instance.foo.value}" +} diff --git a/pkg/tofu/testdata/apply-cbd-count/main.tf b/pkg/tofu/testdata/apply-cbd-count/main.tf new file mode 100644 index 00000000000..058d3382c53 --- /dev/null +++ b/pkg/tofu/testdata/apply-cbd-count/main.tf @@ -0,0 +1,8 @@ +resource "aws_instance" "bar" { + count = 2 + foo = "bar" + + lifecycle { + create_before_destroy = true + } +} diff --git a/pkg/tofu/testdata/apply-cbd-cycle/main.tf b/pkg/tofu/testdata/apply-cbd-cycle/main.tf new file mode 100644 index 00000000000..5ac53107ebe --- /dev/null +++ b/pkg/tofu/testdata/apply-cbd-cycle/main.tf @@ -0,0 +1,19 @@ +resource "test_instance" "a" { + foo = test_instance.b.id + require_new = "changed" + + lifecycle { + create_before_destroy = true + } +} + +resource "test_instance" "b" { + foo = test_instance.c.id + require_new = "changed" +} + + +resource "test_instance" "c" { + require_new = "changed" +} + diff --git a/pkg/tofu/testdata/apply-cbd-depends-non-cbd/main.tf b/pkg/tofu/testdata/apply-cbd-depends-non-cbd/main.tf new file mode 100644 index 00000000000..6ba1b983fb8 --- /dev/null +++ b/pkg/tofu/testdata/apply-cbd-depends-non-cbd/main.tf @@ -0,0 +1,12 @@ +resource "aws_instance" "foo" { + require_new = "yes" +} + +resource "aws_instance" "bar" { + require_new = "yes" + value = "${aws_instance.foo.id}" + + lifecycle { + create_before_destroy = true + } +} diff --git a/pkg/tofu/testdata/apply-cbd-deposed-only/main.tf b/pkg/tofu/testdata/apply-cbd-deposed-only/main.tf new file mode 100644 index 00000000000..0d2e2d3f92b --- /dev/null +++ b/pkg/tofu/testdata/apply-cbd-deposed-only/main.tf @@ -0,0 +1,5 @@ +resource "aws_instance" "bar" { + lifecycle { + create_before_destroy = true + } +} diff --git a/pkg/tofu/testdata/apply-compute/main.tf b/pkg/tofu/testdata/apply-compute/main.tf new file mode 100644 index 00000000000..e785294ab44 --- /dev/null +++ b/pkg/tofu/testdata/apply-compute/main.tf @@ -0,0 +1,13 @@ +variable "value" { + default = "" +} + +resource "aws_instance" "foo" { + num = "2" + compute = "value" + compute_value = "${var.value}" +} + +resource "aws_instance" "bar" { + foo = "${aws_instance.foo.value}" +} diff --git a/pkg/tofu/testdata/apply-count-dec-one/main.tf b/pkg/tofu/testdata/apply-count-dec-one/main.tf new file mode 100644 index 00000000000..3b0fd942859 --- /dev/null +++ b/pkg/tofu/testdata/apply-count-dec-one/main.tf @@ -0,0 +1,3 @@ +resource "aws_instance" "foo" { + foo = "foo" +} diff --git a/pkg/tofu/testdata/apply-count-dec/main.tf b/pkg/tofu/testdata/apply-count-dec/main.tf new file mode 100644 index 00000000000..f18748c3b5c --- /dev/null +++ b/pkg/tofu/testdata/apply-count-dec/main.tf @@ -0,0 +1,8 @@ +resource "aws_instance" "foo" { + foo = "foo" + count = 2 +} + +resource "aws_instance" "bar" { + foo = "bar" +} diff --git a/pkg/tofu/testdata/apply-count-tainted/main.tf b/pkg/tofu/testdata/apply-count-tainted/main.tf new file mode 100644 index 00000000000..ba35b034377 --- /dev/null +++ b/pkg/tofu/testdata/apply-count-tainted/main.tf @@ -0,0 +1,4 @@ +resource "aws_instance" "foo" { + count = 2 + foo = "foo" +} diff --git a/pkg/tofu/testdata/apply-count-variable-ref/main.tf b/pkg/tofu/testdata/apply-count-variable-ref/main.tf new file mode 100644 index 00000000000..8e9e4526612 --- /dev/null +++ b/pkg/tofu/testdata/apply-count-variable-ref/main.tf @@ -0,0 +1,11 @@ +variable "foo" { + default = "2" +} + +resource "aws_instance" "foo" { + count = "${var.foo}" +} + +resource "aws_instance" "bar" { + foo = length(aws_instance.foo) +} diff --git a/pkg/tofu/testdata/apply-count-variable/main.tf b/pkg/tofu/testdata/apply-count-variable/main.tf new file mode 100644 index 00000000000..6f322f2187f --- /dev/null +++ b/pkg/tofu/testdata/apply-count-variable/main.tf @@ -0,0 +1,8 @@ +variable "foo" { + default = "2" +} + +resource "aws_instance" "foo" { + foo = "foo" + count = "${var.foo}" +} diff --git a/pkg/tofu/testdata/apply-data-basic/main.tf b/pkg/tofu/testdata/apply-data-basic/main.tf new file mode 100644 index 00000000000..0c3bd8817ec --- /dev/null +++ b/pkg/tofu/testdata/apply-data-basic/main.tf @@ -0,0 +1 @@ +data "null_data_source" "testing" {} diff --git a/pkg/tofu/testdata/apply-data-sensitive/main.tf b/pkg/tofu/testdata/apply-data-sensitive/main.tf new file mode 100644 index 00000000000..c248a7c3316 --- /dev/null +++ b/pkg/tofu/testdata/apply-data-sensitive/main.tf @@ -0,0 +1,8 @@ +variable "foo" { + sensitive = true + default = "foo" +} + +data "null_data_source" "testing" { + foo = var.foo +} diff --git a/pkg/tofu/testdata/apply-depends-create-before/main.tf b/pkg/tofu/testdata/apply-depends-create-before/main.tf new file mode 100644 index 00000000000..63478d893d9 --- /dev/null +++ b/pkg/tofu/testdata/apply-depends-create-before/main.tf @@ -0,0 +1,10 @@ +resource "aws_instance" "web" { + require_new = "ami-new" + lifecycle { + create_before_destroy = true + } +} + +resource "aws_instance" "lb" { + instance = aws_instance.web.id +} diff --git a/pkg/tofu/testdata/apply-destroy-cbd/main.tf b/pkg/tofu/testdata/apply-destroy-cbd/main.tf new file mode 100644 index 00000000000..3c7a46f7c17 --- /dev/null +++ b/pkg/tofu/testdata/apply-destroy-cbd/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "foo" { } +resource "aws_instance" "bar" { + depends_on = ["aws_instance.foo"] + lifecycle { + create_before_destroy = true + } +} diff --git a/pkg/tofu/testdata/apply-destroy-computed/child/main.tf b/pkg/tofu/testdata/apply-destroy-computed/child/main.tf new file mode 100644 index 00000000000..5cd1f02b666 --- /dev/null +++ b/pkg/tofu/testdata/apply-destroy-computed/child/main.tf @@ -0,0 +1,5 @@ +variable "value" {} + +resource "aws_instance" "bar" { + value = "${var.value}" +} diff --git a/pkg/tofu/testdata/apply-destroy-computed/main.tf b/pkg/tofu/testdata/apply-destroy-computed/main.tf new file mode 100644 index 00000000000..768c9680d80 --- /dev/null +++ b/pkg/tofu/testdata/apply-destroy-computed/main.tf @@ -0,0 +1,6 @@ +resource "aws_instance" "foo" {} + +module "child" { + source = "./child" + value = "${aws_instance.foo.output}" +} diff --git a/pkg/tofu/testdata/apply-destroy-cross-providers/child/main.tf b/pkg/tofu/testdata/apply-destroy-cross-providers/child/main.tf new file mode 100644 index 00000000000..048b26dec80 --- /dev/null +++ b/pkg/tofu/testdata/apply-destroy-cross-providers/child/main.tf @@ -0,0 +1,5 @@ +variable "value" {} + +resource "aws_vpc" "bar" { + value = "${var.value}" +} diff --git a/pkg/tofu/testdata/apply-destroy-cross-providers/main.tf b/pkg/tofu/testdata/apply-destroy-cross-providers/main.tf new file mode 100644 index 00000000000..1ff123a73b5 --- /dev/null +++ b/pkg/tofu/testdata/apply-destroy-cross-providers/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "shared" { +} + +module "child" { + source = "./child" + value = "${aws_instance.shared.id}" +} diff --git a/pkg/tofu/testdata/apply-destroy-data-cycle/main.tf b/pkg/tofu/testdata/apply-destroy-data-cycle/main.tf new file mode 100644 index 00000000000..591af82004a --- /dev/null +++ b/pkg/tofu/testdata/apply-destroy-data-cycle/main.tf @@ -0,0 +1,14 @@ +locals { + l = data.null_data_source.d.id +} + +data "null_data_source" "d" { +} + +resource "null_resource" "a" { + count = local.l == "NONE" ? 1 : 0 +} + +provider "test" { + foo = data.null_data_source.d.id +} diff --git a/pkg/tofu/testdata/apply-destroy-data-resource/main.tf b/pkg/tofu/testdata/apply-destroy-data-resource/main.tf new file mode 100644 index 00000000000..0d941a70774 --- /dev/null +++ b/pkg/tofu/testdata/apply-destroy-data-resource/main.tf @@ -0,0 +1,3 @@ +data "null_data_source" "testing" { + foo = "yes" +} diff --git a/pkg/tofu/testdata/apply-destroy-deeply-nested-module/child/main.tf b/pkg/tofu/testdata/apply-destroy-deeply-nested-module/child/main.tf new file mode 100644 index 00000000000..3694951f572 --- /dev/null +++ b/pkg/tofu/testdata/apply-destroy-deeply-nested-module/child/main.tf @@ -0,0 +1,3 @@ +module "subchild" { + source = "./subchild" +} diff --git a/pkg/tofu/testdata/apply-destroy-deeply-nested-module/child/subchild/main.tf b/pkg/tofu/testdata/apply-destroy-deeply-nested-module/child/subchild/main.tf new file mode 100644 index 00000000000..d31b87e0c64 --- /dev/null +++ b/pkg/tofu/testdata/apply-destroy-deeply-nested-module/child/subchild/main.tf @@ -0,0 +1,5 @@ +/* +module "subsubchild" { + source = "./subsubchild" +} +*/ diff --git a/pkg/tofu/testdata/apply-destroy-deeply-nested-module/child/subchild/subsubchild/main.tf b/pkg/tofu/testdata/apply-destroy-deeply-nested-module/child/subchild/subsubchild/main.tf new file mode 100644 index 00000000000..6ff716a4d4c --- /dev/null +++ b/pkg/tofu/testdata/apply-destroy-deeply-nested-module/child/subchild/subsubchild/main.tf @@ -0,0 +1 @@ +resource "aws_instance" "bar" {} diff --git a/pkg/tofu/testdata/apply-destroy-deeply-nested-module/main.tf b/pkg/tofu/testdata/apply-destroy-deeply-nested-module/main.tf new file mode 100644 index 00000000000..1f95749fa7e --- /dev/null +++ b/pkg/tofu/testdata/apply-destroy-deeply-nested-module/main.tf @@ -0,0 +1,3 @@ +module "child" { + source = "./child" +} diff --git a/pkg/tofu/testdata/apply-destroy-depends-on/main.tf b/pkg/tofu/testdata/apply-destroy-depends-on/main.tf new file mode 100644 index 00000000000..3c3ee656f5b --- /dev/null +++ b/pkg/tofu/testdata/apply-destroy-depends-on/main.tf @@ -0,0 +1,5 @@ +resource "aws_instance" "foo" { + depends_on = ["aws_instance.bar"] +} + +resource "aws_instance" "bar" {} diff --git a/pkg/tofu/testdata/apply-destroy-mod-var-and-count-nested/child/child2/main.tf b/pkg/tofu/testdata/apply-destroy-mod-var-and-count-nested/child/child2/main.tf new file mode 100644 index 00000000000..6a4f91d5e90 --- /dev/null +++ b/pkg/tofu/testdata/apply-destroy-mod-var-and-count-nested/child/child2/main.tf @@ -0,0 +1,5 @@ +variable "mod_count_child2" { } + +resource "aws_instance" "foo" { + count = "${var.mod_count_child2}" +} diff --git a/pkg/tofu/testdata/apply-destroy-mod-var-and-count-nested/child/main.tf b/pkg/tofu/testdata/apply-destroy-mod-var-and-count-nested/child/main.tf new file mode 100644 index 00000000000..28b52679580 --- /dev/null +++ b/pkg/tofu/testdata/apply-destroy-mod-var-and-count-nested/child/main.tf @@ -0,0 +1,8 @@ +variable "mod_count_child" { } + +module "child2" { + source = "./child2" + mod_count_child2 = "${var.mod_count_child}" +} + +resource "aws_instance" "foo" { } diff --git a/pkg/tofu/testdata/apply-destroy-mod-var-and-count-nested/main.tf b/pkg/tofu/testdata/apply-destroy-mod-var-and-count-nested/main.tf new file mode 100644 index 00000000000..58600cdb94a --- /dev/null +++ b/pkg/tofu/testdata/apply-destroy-mod-var-and-count-nested/main.tf @@ -0,0 +1,9 @@ +variable "mod_count_root" { + type = string + default = "3" +} + +module "child" { + source = "./child" + mod_count_child = var.mod_count_root +} diff --git a/pkg/tofu/testdata/apply-destroy-mod-var-and-count/child/main.tf b/pkg/tofu/testdata/apply-destroy-mod-var-and-count/child/main.tf new file mode 100644 index 00000000000..67dac02a275 --- /dev/null +++ b/pkg/tofu/testdata/apply-destroy-mod-var-and-count/child/main.tf @@ -0,0 +1,5 @@ +variable "mod_count" { } + +resource "aws_instance" "foo" { + count = "${var.mod_count}" +} diff --git a/pkg/tofu/testdata/apply-destroy-mod-var-and-count/main.tf b/pkg/tofu/testdata/apply-destroy-mod-var-and-count/main.tf new file mode 100644 index 00000000000..918b40d0671 --- /dev/null +++ b/pkg/tofu/testdata/apply-destroy-mod-var-and-count/main.tf @@ -0,0 +1,4 @@ +module "child" { + source = "./child" + mod_count = "3" +} diff --git a/pkg/tofu/testdata/apply-destroy-mod-var-provider-config/child/child.tf b/pkg/tofu/testdata/apply-destroy-mod-var-provider-config/child/child.tf new file mode 100644 index 00000000000..6544cf6cb45 --- /dev/null +++ b/pkg/tofu/testdata/apply-destroy-mod-var-provider-config/child/child.tf @@ -0,0 +1,7 @@ +variable "input" {} + +provider "aws" { + region = "us-east-${var.input}" +} + +resource "aws_instance" "foo" { } diff --git a/pkg/tofu/testdata/apply-destroy-mod-var-provider-config/main.tf b/pkg/tofu/testdata/apply-destroy-mod-var-provider-config/main.tf new file mode 100644 index 00000000000..1e2dfb3521d --- /dev/null +++ b/pkg/tofu/testdata/apply-destroy-mod-var-provider-config/main.tf @@ -0,0 +1,4 @@ +module "child" { + source = "./child" + input = "1" +} diff --git a/pkg/tofu/testdata/apply-destroy-module-resource-prefix/child/main.tf b/pkg/tofu/testdata/apply-destroy-module-resource-prefix/child/main.tf new file mode 100644 index 00000000000..919f140bba6 --- /dev/null +++ b/pkg/tofu/testdata/apply-destroy-module-resource-prefix/child/main.tf @@ -0,0 +1 @@ +resource "aws_instance" "foo" {} diff --git a/pkg/tofu/testdata/apply-destroy-module-resource-prefix/main.tf b/pkg/tofu/testdata/apply-destroy-module-resource-prefix/main.tf new file mode 100644 index 00000000000..0f6991c536c --- /dev/null +++ b/pkg/tofu/testdata/apply-destroy-module-resource-prefix/main.tf @@ -0,0 +1,3 @@ +module "child" { + source = "./child" +} diff --git a/pkg/tofu/testdata/apply-destroy-module-with-attrs/child/main.tf b/pkg/tofu/testdata/apply-destroy-module-with-attrs/child/main.tf new file mode 100644 index 00000000000..55fa601707f --- /dev/null +++ b/pkg/tofu/testdata/apply-destroy-module-with-attrs/child/main.tf @@ -0,0 +1,9 @@ +variable "vpc_id" {} + +resource "aws_instance" "child" { + vpc_id = var.vpc_id +} + +output "modout" { + value = aws_instance.child.id +} diff --git a/pkg/tofu/testdata/apply-destroy-module-with-attrs/main.tf b/pkg/tofu/testdata/apply-destroy-module-with-attrs/main.tf new file mode 100644 index 00000000000..9b2d46db741 --- /dev/null +++ b/pkg/tofu/testdata/apply-destroy-module-with-attrs/main.tf @@ -0,0 +1,10 @@ +resource "aws_instance" "vpc" { } + +module "child" { + source = "./child" + vpc_id = aws_instance.vpc.id +} + +output "out" { + value = module.child.modout +} diff --git a/pkg/tofu/testdata/apply-destroy-nested-module-with-attrs/middle/bottom/bottom.tf b/pkg/tofu/testdata/apply-destroy-nested-module-with-attrs/middle/bottom/bottom.tf new file mode 100644 index 00000000000..b5db44ee33e --- /dev/null +++ b/pkg/tofu/testdata/apply-destroy-nested-module-with-attrs/middle/bottom/bottom.tf @@ -0,0 +1,5 @@ +variable bottom_param {} + +resource "null_resource" "bottom" { + value = "${var.bottom_param}" +} diff --git a/pkg/tofu/testdata/apply-destroy-nested-module-with-attrs/middle/middle.tf b/pkg/tofu/testdata/apply-destroy-nested-module-with-attrs/middle/middle.tf new file mode 100644 index 00000000000..76652ee443d --- /dev/null +++ b/pkg/tofu/testdata/apply-destroy-nested-module-with-attrs/middle/middle.tf @@ -0,0 +1,10 @@ +variable param {} + +module "bottom" { + source = "./bottom" + bottom_param = "${var.param}" +} + +resource "null_resource" "middle" { + value = "${var.param}" +} diff --git a/pkg/tofu/testdata/apply-destroy-nested-module-with-attrs/top.tf b/pkg/tofu/testdata/apply-destroy-nested-module-with-attrs/top.tf new file mode 100644 index 00000000000..1b631f4d5c0 --- /dev/null +++ b/pkg/tofu/testdata/apply-destroy-nested-module-with-attrs/top.tf @@ -0,0 +1,4 @@ +module "middle" { + source = "./middle" + param = "foo" +} diff --git a/pkg/tofu/testdata/apply-destroy-nested-module/child/main.tf b/pkg/tofu/testdata/apply-destroy-nested-module/child/main.tf new file mode 100644 index 00000000000..852bce8b9f3 --- /dev/null +++ b/pkg/tofu/testdata/apply-destroy-nested-module/child/main.tf @@ -0,0 +1,3 @@ +module "subchild" { + source = "./subchild" +} diff --git a/pkg/tofu/testdata/apply-destroy-nested-module/child/subchild/main.tf b/pkg/tofu/testdata/apply-destroy-nested-module/child/subchild/main.tf new file mode 100644 index 00000000000..6ff716a4d4c --- /dev/null +++ b/pkg/tofu/testdata/apply-destroy-nested-module/child/subchild/main.tf @@ -0,0 +1 @@ +resource "aws_instance" "bar" {} diff --git a/pkg/tofu/testdata/apply-destroy-nested-module/main.tf b/pkg/tofu/testdata/apply-destroy-nested-module/main.tf new file mode 100644 index 00000000000..8a5a1b2e5be --- /dev/null +++ b/pkg/tofu/testdata/apply-destroy-nested-module/main.tf @@ -0,0 +1,5 @@ +/* +module "child" { + source = "./child" +} +*/ diff --git a/pkg/tofu/testdata/apply-destroy-outputs/main.tf b/pkg/tofu/testdata/apply-destroy-outputs/main.tf new file mode 100644 index 00000000000..8a0384798ea --- /dev/null +++ b/pkg/tofu/testdata/apply-destroy-outputs/main.tf @@ -0,0 +1,34 @@ +data "test_data_source" "bar" { + for_each = { + a = "b" + } + foo = "zing" +} + +data "test_data_source" "foo" { + for_each = data.test_data_source.bar + foo = "ok" +} + +locals { + l = [ + { + name = data.test_data_source.foo["a"].id + val = "null" + }, + ] + + m = { for v in local.l : + v.name => v + } +} + +resource "test_instance" "bar" { + for_each = local.m + foo = format("%s", each.value.name) + dep = each.value.val +} + +output "out" { + value = test_instance.bar +} diff --git a/pkg/tofu/testdata/apply-destroy-provisider-refs/main.tf b/pkg/tofu/testdata/apply-destroy-provisider-refs/main.tf new file mode 100644 index 00000000000..e7ef6f0b5d2 --- /dev/null +++ b/pkg/tofu/testdata/apply-destroy-provisider-refs/main.tf @@ -0,0 +1,15 @@ +provider "null" { + value = "" +} + +module "mod" { + source = "./mod" +} + +provider "test" { + value = module.mod.output +} + +resource "test_instance" "bar" { +} + diff --git a/pkg/tofu/testdata/apply-destroy-provisider-refs/mod/main.tf b/pkg/tofu/testdata/apply-destroy-provisider-refs/mod/main.tf new file mode 100644 index 00000000000..c9ecec002bc --- /dev/null +++ b/pkg/tofu/testdata/apply-destroy-provisider-refs/mod/main.tf @@ -0,0 +1,9 @@ +data "null_data_source" "foo" { + count = 1 +} + + +output "output" { + value = data.null_data_source.foo[0].output +} + diff --git a/pkg/tofu/testdata/apply-destroy-provisioner/main.tf b/pkg/tofu/testdata/apply-destroy-provisioner/main.tf new file mode 100644 index 00000000000..51b29c72a08 --- /dev/null +++ b/pkg/tofu/testdata/apply-destroy-provisioner/main.tf @@ -0,0 +1,3 @@ +resource "aws_instance" "foo" { + provisioner "shell" {} +} diff --git a/pkg/tofu/testdata/apply-destroy-tainted/main.tf b/pkg/tofu/testdata/apply-destroy-tainted/main.tf new file mode 100644 index 00000000000..48f4f13783e --- /dev/null +++ b/pkg/tofu/testdata/apply-destroy-tainted/main.tf @@ -0,0 +1,17 @@ +resource "test_instance" "a" { + foo = "a" +} + +resource "test_instance" "b" { + foo = "b" + lifecycle { + create_before_destroy = true + } +} + +resource "test_instance" "c" { + foo = "c" + lifecycle { + create_before_destroy = true + } +} diff --git a/pkg/tofu/testdata/apply-destroy-targeted-count/main.tf b/pkg/tofu/testdata/apply-destroy-targeted-count/main.tf new file mode 100644 index 00000000000..680d30ffaa3 --- /dev/null +++ b/pkg/tofu/testdata/apply-destroy-targeted-count/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "foo" { + count = 3 +} + +resource "aws_instance" "bar" { + foo = ["${aws_instance.foo.*.id}"] +} diff --git a/pkg/tofu/testdata/apply-destroy-with-locals/main.tf b/pkg/tofu/testdata/apply-destroy-with-locals/main.tf new file mode 100644 index 00000000000..1ab75187155 --- /dev/null +++ b/pkg/tofu/testdata/apply-destroy-with-locals/main.tf @@ -0,0 +1,8 @@ +locals { + name = "test-${aws_instance.foo.id}" +} +resource "aws_instance" "foo" {} + +output "name" { + value = "${local.name}" +} diff --git a/pkg/tofu/testdata/apply-destroy/main.tf b/pkg/tofu/testdata/apply-destroy/main.tf new file mode 100644 index 00000000000..1b6cdae67b0 --- /dev/null +++ b/pkg/tofu/testdata/apply-destroy/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "foo" { + num = "2" +} + +resource "aws_instance" "bar" { + foo = "${aws_instance.foo.num}" +} diff --git a/pkg/tofu/testdata/apply-empty-module/child/main.tf b/pkg/tofu/testdata/apply-empty-module/child/main.tf new file mode 100644 index 00000000000..6db38ea162c --- /dev/null +++ b/pkg/tofu/testdata/apply-empty-module/child/main.tf @@ -0,0 +1,11 @@ +output "aws_route53_zone_id" { + value = "XXXX" +} + +output "aws_access_key" { + value = "YYYYY" +} + +output "aws_secret_key" { + value = "ZZZZ" +} diff --git a/pkg/tofu/testdata/apply-empty-module/main.tf b/pkg/tofu/testdata/apply-empty-module/main.tf new file mode 100644 index 00000000000..50ce84f0bc3 --- /dev/null +++ b/pkg/tofu/testdata/apply-empty-module/main.tf @@ -0,0 +1,7 @@ +module "child" { + source = "./child" +} + +output "end" { + value = "${module.child.aws_route53_zone_id}" +} diff --git a/pkg/tofu/testdata/apply-error-create-before/main.tf b/pkg/tofu/testdata/apply-error-create-before/main.tf new file mode 100644 index 00000000000..c7c2776eb77 --- /dev/null +++ b/pkg/tofu/testdata/apply-error-create-before/main.tf @@ -0,0 +1,6 @@ +resource "aws_instance" "bar" { + require_new = "xyz" + lifecycle { + create_before_destroy = true + } +} diff --git a/pkg/tofu/testdata/apply-error/main.tf b/pkg/tofu/testdata/apply-error/main.tf new file mode 100644 index 00000000000..7c4af5f71a4 --- /dev/null +++ b/pkg/tofu/testdata/apply-error/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "foo" { + value = "2" +} + +resource "aws_instance" "bar" { + foo = "${aws_instance.foo.value}" +} diff --git a/pkg/tofu/testdata/apply-escape/main.tf b/pkg/tofu/testdata/apply-escape/main.tf new file mode 100644 index 00000000000..bca2c9b7e27 --- /dev/null +++ b/pkg/tofu/testdata/apply-escape/main.tf @@ -0,0 +1,3 @@ +resource "aws_instance" "bar" { + foo = "${"\"bar\""}" +} diff --git a/pkg/tofu/testdata/apply-good-create-before-update/main.tf b/pkg/tofu/testdata/apply-good-create-before-update/main.tf new file mode 100644 index 00000000000..d0a2fc93766 --- /dev/null +++ b/pkg/tofu/testdata/apply-good-create-before-update/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "bar" { + foo = "baz" + + lifecycle { + create_before_destroy = true + } +} diff --git a/pkg/tofu/testdata/apply-good-create-before/main.tf b/pkg/tofu/testdata/apply-good-create-before/main.tf new file mode 100644 index 00000000000..c7c2776eb77 --- /dev/null +++ b/pkg/tofu/testdata/apply-good-create-before/main.tf @@ -0,0 +1,6 @@ +resource "aws_instance" "bar" { + require_new = "xyz" + lifecycle { + create_before_destroy = true + } +} diff --git a/pkg/tofu/testdata/apply-good/main.tf b/pkg/tofu/testdata/apply-good/main.tf new file mode 100644 index 00000000000..5c22c19d109 --- /dev/null +++ b/pkg/tofu/testdata/apply-good/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "foo" { + num = 2 +} + +resource "aws_instance" "bar" { + foo = "bar" +} diff --git a/pkg/tofu/testdata/apply-idattr/main.tf b/pkg/tofu/testdata/apply-idattr/main.tf new file mode 100644 index 00000000000..1c49f397555 --- /dev/null +++ b/pkg/tofu/testdata/apply-idattr/main.tf @@ -0,0 +1,3 @@ +resource "aws_instance" "foo" { + num = 42 +} diff --git a/pkg/tofu/testdata/apply-ignore-changes-all/main.tf b/pkg/tofu/testdata/apply-ignore-changes-all/main.tf new file mode 100644 index 00000000000..a89889a09be --- /dev/null +++ b/pkg/tofu/testdata/apply-ignore-changes-all/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "foo" { + required_field = "set" + + lifecycle { + ignore_changes = all + } +} diff --git a/pkg/tofu/testdata/apply-ignore-changes-create/main.tf b/pkg/tofu/testdata/apply-ignore-changes-create/main.tf new file mode 100644 index 00000000000..d470660ec1c --- /dev/null +++ b/pkg/tofu/testdata/apply-ignore-changes-create/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "foo" { + required_field = "set" + + lifecycle { + ignore_changes = ["required_field"] + } +} diff --git a/pkg/tofu/testdata/apply-ignore-changes-dep/main.tf b/pkg/tofu/testdata/apply-ignore-changes-dep/main.tf new file mode 100644 index 00000000000..097d4894283 --- /dev/null +++ b/pkg/tofu/testdata/apply-ignore-changes-dep/main.tf @@ -0,0 +1,12 @@ +resource "aws_instance" "foo" { + count = 2 + ami = "ami-bcd456" + lifecycle { + ignore_changes = ["ami"] + } +} + +resource "aws_eip" "foo" { + count = 2 + instance = "${aws_instance.foo.*.id[count.index]}" +} diff --git a/pkg/tofu/testdata/apply-inconsistent-with-plan/main.tf b/pkg/tofu/testdata/apply-inconsistent-with-plan/main.tf new file mode 100644 index 00000000000..9284072dc9c --- /dev/null +++ b/pkg/tofu/testdata/apply-inconsistent-with-plan/main.tf @@ -0,0 +1,2 @@ +resource "test" "foo" { +} diff --git a/pkg/tofu/testdata/apply-interpolated-count/main.tf b/pkg/tofu/testdata/apply-interpolated-count/main.tf new file mode 100644 index 00000000000..527a0b84205 --- /dev/null +++ b/pkg/tofu/testdata/apply-interpolated-count/main.tf @@ -0,0 +1,11 @@ +variable "instance_count" { + default = 1 +} + +resource "aws_instance" "test" { + count = "${var.instance_count}" +} + +resource "aws_instance" "dependent" { + count = "${length(aws_instance.test)}" +} diff --git a/pkg/tofu/testdata/apply-invalid-index/main.tf b/pkg/tofu/testdata/apply-invalid-index/main.tf new file mode 100644 index 00000000000..8ea02d77384 --- /dev/null +++ b/pkg/tofu/testdata/apply-invalid-index/main.tf @@ -0,0 +1,7 @@ +resource "test_instance" "a" { + count = 0 +} + +resource "test_instance" "b" { + value = test_instance.a[0].value +} diff --git a/pkg/tofu/testdata/apply-issue19908/issue19908.tf b/pkg/tofu/testdata/apply-issue19908/issue19908.tf new file mode 100644 index 00000000000..0c802fb653f --- /dev/null +++ b/pkg/tofu/testdata/apply-issue19908/issue19908.tf @@ -0,0 +1,3 @@ +resource "test" "foo" { + baz = "updated" +} diff --git a/pkg/tofu/testdata/apply-local-val/child/child.tf b/pkg/tofu/testdata/apply-local-val/child/child.tf new file mode 100644 index 00000000000..f7febc42f65 --- /dev/null +++ b/pkg/tofu/testdata/apply-local-val/child/child.tf @@ -0,0 +1,4 @@ + +output "result" { + value = "hello" +} diff --git a/pkg/tofu/testdata/apply-local-val/main.tf b/pkg/tofu/testdata/apply-local-val/main.tf new file mode 100644 index 00000000000..51ca2dedcf3 --- /dev/null +++ b/pkg/tofu/testdata/apply-local-val/main.tf @@ -0,0 +1,10 @@ + +module "child" { + source = "./child" +} + +locals { + result_1 = "${module.child.result}" + result_2 = "${local.result_1}" + result_3 = "${local.result_2} world" +} diff --git a/pkg/tofu/testdata/apply-local-val/outputs.tf b/pkg/tofu/testdata/apply-local-val/outputs.tf new file mode 100644 index 00000000000..f0078c190b3 --- /dev/null +++ b/pkg/tofu/testdata/apply-local-val/outputs.tf @@ -0,0 +1,9 @@ +# These are in a separate file to make sure config merging is working properly + +output "result_1" { + value = "${local.result_1}" +} + +output "result_3" { + value = "${local.result_3}" +} diff --git a/pkg/tofu/testdata/apply-map-var-through-module/amodule/main.tf b/pkg/tofu/testdata/apply-map-var-through-module/amodule/main.tf new file mode 100644 index 00000000000..a5284966ed0 --- /dev/null +++ b/pkg/tofu/testdata/apply-map-var-through-module/amodule/main.tf @@ -0,0 +1,9 @@ +variable "amis" { + type = map(string) +} + +resource "null_resource" "noop" {} + +output "amis_out" { + value = var.amis +} diff --git a/pkg/tofu/testdata/apply-map-var-through-module/main.tf b/pkg/tofu/testdata/apply-map-var-through-module/main.tf new file mode 100644 index 00000000000..4cec4a678b0 --- /dev/null +++ b/pkg/tofu/testdata/apply-map-var-through-module/main.tf @@ -0,0 +1,19 @@ +variable "amis_in" { + type = map(string) + default = { + "us-west-1" = "ami-123456" + "us-west-2" = "ami-456789" + "eu-west-1" = "ami-789012" + "eu-west-2" = "ami-989484" + } +} + +module "test" { + source = "./amodule" + + amis = var.amis_in +} + +output "amis_from_module" { + value = module.test.amis_out +} diff --git a/pkg/tofu/testdata/apply-minimal/main.tf b/pkg/tofu/testdata/apply-minimal/main.tf new file mode 100644 index 00000000000..88002d078a1 --- /dev/null +++ b/pkg/tofu/testdata/apply-minimal/main.tf @@ -0,0 +1,5 @@ +resource "aws_instance" "foo" { +} + +resource "aws_instance" "bar" { +} diff --git a/pkg/tofu/testdata/apply-module-bool/child/main.tf b/pkg/tofu/testdata/apply-module-bool/child/main.tf new file mode 100644 index 00000000000..d2a38434c29 --- /dev/null +++ b/pkg/tofu/testdata/apply-module-bool/child/main.tf @@ -0,0 +1,7 @@ +variable "leader" { + default = false +} + +output "leader" { + value = "${var.leader}" +} diff --git a/pkg/tofu/testdata/apply-module-bool/main.tf b/pkg/tofu/testdata/apply-module-bool/main.tf new file mode 100644 index 00000000000..1d40cd4f4ae --- /dev/null +++ b/pkg/tofu/testdata/apply-module-bool/main.tf @@ -0,0 +1,8 @@ +module "child" { + source = "./child" + leader = true +} + +resource "aws_instance" "bar" { + foo = "${module.child.leader}" +} diff --git a/pkg/tofu/testdata/apply-module-depends-on/main.tf b/pkg/tofu/testdata/apply-module-depends-on/main.tf new file mode 100644 index 00000000000..9f7102d531c --- /dev/null +++ b/pkg/tofu/testdata/apply-module-depends-on/main.tf @@ -0,0 +1,32 @@ +module "moda" { + source = "./moda" + depends_on = [test_instance.a, module.modb] +} + +resource "test_instance" "a" { + depends_on = [module.modb] + num = 4 + foo = test_instance.aa.id +} + +resource "test_instance" "aa" { + num = 3 + foo = module.modb.out +} + +module "modb" { + source = "./modb" + depends_on = [test_instance.b] +} + +resource "test_instance" "b" { + num = 1 +} + +output "moda_data" { + value = module.moda.out +} + +output "modb_resource" { + value = module.modb.out +} diff --git a/pkg/tofu/testdata/apply-module-depends-on/moda/main.tf b/pkg/tofu/testdata/apply-module-depends-on/moda/main.tf new file mode 100644 index 00000000000..e60d300bae2 --- /dev/null +++ b/pkg/tofu/testdata/apply-module-depends-on/moda/main.tf @@ -0,0 +1,11 @@ +resource "test_instance" "a" { + num = 5 +} + +data "test_data_source" "a" { + foo = "a" +} + +output "out" { + value = data.test_data_source.a.id +} diff --git a/pkg/tofu/testdata/apply-module-depends-on/modb/main.tf b/pkg/tofu/testdata/apply-module-depends-on/modb/main.tf new file mode 100644 index 00000000000..961c5d560bd --- /dev/null +++ b/pkg/tofu/testdata/apply-module-depends-on/modb/main.tf @@ -0,0 +1,11 @@ +resource "test_instance" "b" { + num = 2 +} + +data "test_data_source" "b" { + foo = "b" +} + +output "out" { + value = test_instance.b.id +} diff --git a/pkg/tofu/testdata/apply-module-destroy-order/child/main.tf b/pkg/tofu/testdata/apply-module-destroy-order/child/main.tf new file mode 100644 index 00000000000..0b2a8bc07dd --- /dev/null +++ b/pkg/tofu/testdata/apply-module-destroy-order/child/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "a" { + id = "a" +} + +output "a_output" { + value = "${aws_instance.a.id}" +} diff --git a/pkg/tofu/testdata/apply-module-destroy-order/main.tf b/pkg/tofu/testdata/apply-module-destroy-order/main.tf new file mode 100644 index 00000000000..2c47edadff9 --- /dev/null +++ b/pkg/tofu/testdata/apply-module-destroy-order/main.tf @@ -0,0 +1,8 @@ +module "child" { + source = "./child" +} + +resource "aws_instance" "b" { + id = "b" + blah = "${module.child.a_output}" +} diff --git a/pkg/tofu/testdata/apply-module-grandchild-provider-inherit/child/grandchild/main.tf b/pkg/tofu/testdata/apply-module-grandchild-provider-inherit/child/grandchild/main.tf new file mode 100644 index 00000000000..919f140bba6 --- /dev/null +++ b/pkg/tofu/testdata/apply-module-grandchild-provider-inherit/child/grandchild/main.tf @@ -0,0 +1 @@ +resource "aws_instance" "foo" {} diff --git a/pkg/tofu/testdata/apply-module-grandchild-provider-inherit/child/main.tf b/pkg/tofu/testdata/apply-module-grandchild-provider-inherit/child/main.tf new file mode 100644 index 00000000000..b422300ec98 --- /dev/null +++ b/pkg/tofu/testdata/apply-module-grandchild-provider-inherit/child/main.tf @@ -0,0 +1,3 @@ +module "grandchild" { + source = "./grandchild" +} diff --git a/pkg/tofu/testdata/apply-module-grandchild-provider-inherit/main.tf b/pkg/tofu/testdata/apply-module-grandchild-provider-inherit/main.tf new file mode 100644 index 00000000000..25d0993d1e4 --- /dev/null +++ b/pkg/tofu/testdata/apply-module-grandchild-provider-inherit/main.tf @@ -0,0 +1,7 @@ +provider "aws" { + value = "foo" +} + +module "child" { + source = "./child" +} diff --git a/pkg/tofu/testdata/apply-module-only-provider/child/main.tf b/pkg/tofu/testdata/apply-module-only-provider/child/main.tf new file mode 100644 index 00000000000..e15099c171b --- /dev/null +++ b/pkg/tofu/testdata/apply-module-only-provider/child/main.tf @@ -0,0 +1,2 @@ +resource "aws_instance" "foo" {} +resource "test_instance" "foo" {} diff --git a/pkg/tofu/testdata/apply-module-only-provider/main.tf b/pkg/tofu/testdata/apply-module-only-provider/main.tf new file mode 100644 index 00000000000..2276b5f36ca --- /dev/null +++ b/pkg/tofu/testdata/apply-module-only-provider/main.tf @@ -0,0 +1,5 @@ +provider "aws" {} + +module "child" { + source = "./child" +} diff --git a/pkg/tofu/testdata/apply-module-orphan-provider-inherit/main.tf b/pkg/tofu/testdata/apply-module-orphan-provider-inherit/main.tf new file mode 100644 index 00000000000..e334ff2c77b --- /dev/null +++ b/pkg/tofu/testdata/apply-module-orphan-provider-inherit/main.tf @@ -0,0 +1,3 @@ +provider "aws" { + value = "foo" +} diff --git a/pkg/tofu/testdata/apply-module-provider-alias/child/main.tf b/pkg/tofu/testdata/apply-module-provider-alias/child/main.tf new file mode 100644 index 00000000000..ee923f255ae --- /dev/null +++ b/pkg/tofu/testdata/apply-module-provider-alias/child/main.tf @@ -0,0 +1,7 @@ +provider "aws" { + alias = "eu" +} + +resource "aws_instance" "foo" { + provider = "aws.eu" +} diff --git a/pkg/tofu/testdata/apply-module-provider-alias/main.tf b/pkg/tofu/testdata/apply-module-provider-alias/main.tf new file mode 100644 index 00000000000..0f6991c536c --- /dev/null +++ b/pkg/tofu/testdata/apply-module-provider-alias/main.tf @@ -0,0 +1,3 @@ +module "child" { + source = "./child" +} diff --git a/pkg/tofu/testdata/apply-module-provider-close-nested/child/main.tf b/pkg/tofu/testdata/apply-module-provider-close-nested/child/main.tf new file mode 100644 index 00000000000..852bce8b9f3 --- /dev/null +++ b/pkg/tofu/testdata/apply-module-provider-close-nested/child/main.tf @@ -0,0 +1,3 @@ +module "subchild" { + source = "./subchild" +} diff --git a/pkg/tofu/testdata/apply-module-provider-close-nested/child/subchild/main.tf b/pkg/tofu/testdata/apply-module-provider-close-nested/child/subchild/main.tf new file mode 100644 index 00000000000..919f140bba6 --- /dev/null +++ b/pkg/tofu/testdata/apply-module-provider-close-nested/child/subchild/main.tf @@ -0,0 +1 @@ +resource "aws_instance" "foo" {} diff --git a/pkg/tofu/testdata/apply-module-provider-close-nested/main.tf b/pkg/tofu/testdata/apply-module-provider-close-nested/main.tf new file mode 100644 index 00000000000..0f6991c536c --- /dev/null +++ b/pkg/tofu/testdata/apply-module-provider-close-nested/main.tf @@ -0,0 +1,3 @@ +module "child" { + source = "./child" +} diff --git a/pkg/tofu/testdata/apply-module-provider-inherit-alias-orphan/main.tf b/pkg/tofu/testdata/apply-module-provider-inherit-alias-orphan/main.tf new file mode 100644 index 00000000000..4332b9adb72 --- /dev/null +++ b/pkg/tofu/testdata/apply-module-provider-inherit-alias-orphan/main.tf @@ -0,0 +1,6 @@ +provider "aws" { +} + +provider "aws" { + alias = "eu" +} diff --git a/pkg/tofu/testdata/apply-module-provider-inherit-alias/child/main.tf b/pkg/tofu/testdata/apply-module-provider-inherit-alias/child/main.tf new file mode 100644 index 00000000000..2db7c4ee88b --- /dev/null +++ b/pkg/tofu/testdata/apply-module-provider-inherit-alias/child/main.tf @@ -0,0 +1,7 @@ +provider "aws" { + alias = "eu" +} + +resource "aws_instance" "foo" { + provider = "aws.eu" +} diff --git a/pkg/tofu/testdata/apply-module-provider-inherit-alias/main.tf b/pkg/tofu/testdata/apply-module-provider-inherit-alias/main.tf new file mode 100644 index 00000000000..a018d1468f1 --- /dev/null +++ b/pkg/tofu/testdata/apply-module-provider-inherit-alias/main.tf @@ -0,0 +1,15 @@ +provider "aws" { + root = 1 +} + +provider "aws" { + value = "eu" + alias = "eu" +} + +module "child" { + source = "./child" + providers = { + "aws.eu" = "aws.eu" + } +} diff --git a/pkg/tofu/testdata/apply-module-replace-cycle-cbd/main.tf b/pkg/tofu/testdata/apply-module-replace-cycle-cbd/main.tf new file mode 100644 index 00000000000..6393231d685 --- /dev/null +++ b/pkg/tofu/testdata/apply-module-replace-cycle-cbd/main.tf @@ -0,0 +1,8 @@ +module "a" { + source = "./mod1" +} + +module "b" { + source = "./mod2" + ids = module.a.ids +} diff --git a/pkg/tofu/testdata/apply-module-replace-cycle-cbd/mod1/main.tf b/pkg/tofu/testdata/apply-module-replace-cycle-cbd/mod1/main.tf new file mode 100644 index 00000000000..2ade442bfd3 --- /dev/null +++ b/pkg/tofu/testdata/apply-module-replace-cycle-cbd/mod1/main.tf @@ -0,0 +1,10 @@ +resource "aws_instance" "a" { + require_new = "new" + lifecycle { + create_before_destroy = true + } +} + +output "ids" { + value = [aws_instance.a.id] +} diff --git a/pkg/tofu/testdata/apply-module-replace-cycle-cbd/mod2/main.tf b/pkg/tofu/testdata/apply-module-replace-cycle-cbd/mod2/main.tf new file mode 100644 index 00000000000..83fb1dcd467 --- /dev/null +++ b/pkg/tofu/testdata/apply-module-replace-cycle-cbd/mod2/main.tf @@ -0,0 +1,8 @@ +resource "aws_instance" "b" { + count = length(var.ids) + require_new = var.ids[count.index] +} + +variable "ids" { + type = list(string) +} diff --git a/pkg/tofu/testdata/apply-module-replace-cycle/main.tf b/pkg/tofu/testdata/apply-module-replace-cycle/main.tf new file mode 100644 index 00000000000..6393231d685 --- /dev/null +++ b/pkg/tofu/testdata/apply-module-replace-cycle/main.tf @@ -0,0 +1,8 @@ +module "a" { + source = "./mod1" +} + +module "b" { + source = "./mod2" + ids = module.a.ids +} diff --git a/pkg/tofu/testdata/apply-module-replace-cycle/mod1/main.tf b/pkg/tofu/testdata/apply-module-replace-cycle/mod1/main.tf new file mode 100644 index 00000000000..3dd26cb8e7e --- /dev/null +++ b/pkg/tofu/testdata/apply-module-replace-cycle/mod1/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "a" { + require_new = "new" +} + +output "ids" { + value = [aws_instance.a.id] +} diff --git a/pkg/tofu/testdata/apply-module-replace-cycle/mod2/main.tf b/pkg/tofu/testdata/apply-module-replace-cycle/mod2/main.tf new file mode 100644 index 00000000000..83fb1dcd467 --- /dev/null +++ b/pkg/tofu/testdata/apply-module-replace-cycle/mod2/main.tf @@ -0,0 +1,8 @@ +resource "aws_instance" "b" { + count = length(var.ids) + require_new = var.ids[count.index] +} + +variable "ids" { + type = list(string) +} diff --git a/pkg/tofu/testdata/apply-module-var-resource-count/child/main.tf b/pkg/tofu/testdata/apply-module-var-resource-count/child/main.tf new file mode 100644 index 00000000000..1a19910e8f3 --- /dev/null +++ b/pkg/tofu/testdata/apply-module-var-resource-count/child/main.tf @@ -0,0 +1,6 @@ +variable "num" { +} + +resource "aws_instance" "foo" { + count = "${var.num}" +} diff --git a/pkg/tofu/testdata/apply-module-var-resource-count/main.tf b/pkg/tofu/testdata/apply-module-var-resource-count/main.tf new file mode 100644 index 00000000000..6f7d20c48bf --- /dev/null +++ b/pkg/tofu/testdata/apply-module-var-resource-count/main.tf @@ -0,0 +1,7 @@ +variable "num" { +} + +module "child" { + source = "./child" + num = "${var.num}" +} diff --git a/pkg/tofu/testdata/apply-module/child/main.tf b/pkg/tofu/testdata/apply-module/child/main.tf new file mode 100644 index 00000000000..f279d9b80bf --- /dev/null +++ b/pkg/tofu/testdata/apply-module/child/main.tf @@ -0,0 +1,3 @@ +resource "aws_instance" "baz" { + foo = "bar" +} diff --git a/pkg/tofu/testdata/apply-module/main.tf b/pkg/tofu/testdata/apply-module/main.tf new file mode 100644 index 00000000000..f9119a109eb --- /dev/null +++ b/pkg/tofu/testdata/apply-module/main.tf @@ -0,0 +1,11 @@ +module "child" { + source = "./child" +} + +resource "aws_instance" "foo" { + num = "2" +} + +resource "aws_instance" "bar" { + foo = "bar" +} diff --git a/pkg/tofu/testdata/apply-multi-depose-create-before-destroy/main.tf b/pkg/tofu/testdata/apply-multi-depose-create-before-destroy/main.tf new file mode 100644 index 00000000000..e5a723b3a49 --- /dev/null +++ b/pkg/tofu/testdata/apply-multi-depose-create-before-destroy/main.tf @@ -0,0 +1,12 @@ +variable "require_new" { + type = string +} + +resource "aws_instance" "web" { + // require_new is a special attribute recognized by testDiffFn that forces + // a new resource on every apply + require_new = var.require_new + lifecycle { + create_before_destroy = true + } +} diff --git a/pkg/tofu/testdata/apply-multi-provider-destroy-child/child/main.tf b/pkg/tofu/testdata/apply-multi-provider-destroy-child/child/main.tf new file mode 100644 index 00000000000..ae1bc8ee4c2 --- /dev/null +++ b/pkg/tofu/testdata/apply-multi-provider-destroy-child/child/main.tf @@ -0,0 +1,3 @@ +resource "aws_instance" "bar" { + foo = "bar" +} diff --git a/pkg/tofu/testdata/apply-multi-provider-destroy-child/main.tf b/pkg/tofu/testdata/apply-multi-provider-destroy-child/main.tf new file mode 100644 index 00000000000..9b799979b13 --- /dev/null +++ b/pkg/tofu/testdata/apply-multi-provider-destroy-child/main.tf @@ -0,0 +1,9 @@ +resource "vault_instance" "foo" {} + +provider "aws" { + value = "${vault_instance.foo.id}" +} + +module "child" { + source = "./child" +} diff --git a/pkg/tofu/testdata/apply-multi-provider-destroy/main.tf b/pkg/tofu/testdata/apply-multi-provider-destroy/main.tf new file mode 100644 index 00000000000..dd3041bb5d4 --- /dev/null +++ b/pkg/tofu/testdata/apply-multi-provider-destroy/main.tf @@ -0,0 +1,9 @@ +resource "vault_instance" "foo" {} + +provider "aws" { + addr = "${vault_instance.foo.id}" +} + +resource "aws_instance" "bar" { + foo = "bar" +} diff --git a/pkg/tofu/testdata/apply-multi-provider/main.tf b/pkg/tofu/testdata/apply-multi-provider/main.tf new file mode 100644 index 00000000000..4ee94a3bfe6 --- /dev/null +++ b/pkg/tofu/testdata/apply-multi-provider/main.tf @@ -0,0 +1,7 @@ +resource "do_instance" "foo" { + num = "2" +} + +resource "aws_instance" "bar" { + foo = "bar" +} diff --git a/pkg/tofu/testdata/apply-multi-ref/main.tf b/pkg/tofu/testdata/apply-multi-ref/main.tf new file mode 100644 index 00000000000..2a6a6715217 --- /dev/null +++ b/pkg/tofu/testdata/apply-multi-ref/main.tf @@ -0,0 +1,8 @@ +resource "aws_instance" "create" { + bar = "abc" +} + +resource "aws_instance" "other" { + var = "${aws_instance.create.id}" + foo = "${aws_instance.create.bar}" +} diff --git a/pkg/tofu/testdata/apply-multi-var-comprehensive/child/child.tf b/pkg/tofu/testdata/apply-multi-var-comprehensive/child/child.tf new file mode 100644 index 00000000000..8fe7df7c232 --- /dev/null +++ b/pkg/tofu/testdata/apply-multi-var-comprehensive/child/child.tf @@ -0,0 +1,29 @@ +variable "num" { +} + +variable "source_ids" { + type = list(string) +} + +variable "source_names" { + type = list(string) +} + +resource "test_thing" "multi_count_var" { + count = var.num + + key = "child.multi_count_var.${count.index}" + + # Can pluck a single item out of a multi-var + source_id = var.source_ids[count.index] +} + +resource "test_thing" "whole_splat" { + key = "child.whole_splat" + + # Can "splat" the ids directly into an attribute of type list. + source_ids = var.source_ids + source_names = var.source_names + source_ids_wrapped = ["${var.source_ids}"] + source_names_wrapped = ["${var.source_names}"] +} diff --git a/pkg/tofu/testdata/apply-multi-var-comprehensive/root.tf b/pkg/tofu/testdata/apply-multi-var-comprehensive/root.tf new file mode 100644 index 00000000000..64ada6be6f2 --- /dev/null +++ b/pkg/tofu/testdata/apply-multi-var-comprehensive/root.tf @@ -0,0 +1,74 @@ +variable "num" { +} + +resource "test_thing" "source" { + count = var.num + + key = "source.${count.index}" + + # The diffFunc in the test exports "name" here too, which we can use + # to test values that are known during plan. +} + +resource "test_thing" "multi_count_var" { + count = var.num + + key = "multi_count_var.${count.index}" + + # Can pluck a single item out of a multi-var + source_id = test_thing.source.*.id[count.index] + source_name = test_thing.source.*.name[count.index] +} + +resource "test_thing" "multi_count_derived" { + # Can use the source to get the count + count = length(test_thing.source) + + key = "multi_count_derived.${count.index}" + + source_id = test_thing.source.*.id[count.index] + source_name = test_thing.source.*.name[count.index] +} + +resource "test_thing" "whole_splat" { + key = "whole_splat" + + # Can "splat" the ids directly into an attribute of type list. + source_ids = test_thing.source.*.id + source_names = test_thing.source.*.name + + # Accessing through a function should work. + source_ids_from_func = split(" ", join(" ", test_thing.source.*.id)) + source_names_from_func = split(" ", join(" ", test_thing.source.*.name)) + + # A common pattern of selecting with a default. + first_source_id = element(concat(test_thing.source.*.id, ["default"]), 0) + first_source_name = element(concat(test_thing.source.*.name, ["default"]), 0) + + # Prior to v0.12 we were handling lists containing list interpolations as + # a special case, flattening the result, for compatibility with behavior + # prior to v0.10. This deprecated handling is now removed, and so these + # each produce a list of lists. We're still using the interpolation syntax + # here, rather than the splat expression directly, to properly mimic how + # this would've looked prior to v0.12 to be explicit about what the new + # behavior is for this old syntax. + source_ids_wrapped = ["${test_thing.source.*.id}"] + source_names_wrapped = ["${test_thing.source.*.name}"] + +} + +module "child" { + source = "./child" + + num = var.num + source_ids = test_thing.source.*.id + source_names = test_thing.source.*.name +} + +output "source_ids" { + value = test_thing.source.*.id +} + +output "source_names" { + value = test_thing.source.*.name +} diff --git a/pkg/tofu/testdata/apply-multi-var-count-dec/main.tf b/pkg/tofu/testdata/apply-multi-var-count-dec/main.tf new file mode 100644 index 00000000000..40476512fa0 --- /dev/null +++ b/pkg/tofu/testdata/apply-multi-var-count-dec/main.tf @@ -0,0 +1,12 @@ +variable "num" {} + +resource "aws_instance" "foo" { + count = "${var.num}" + value = "foo" +} + +resource "aws_instance" "bar" { + ami = "special" + + value = "${join(",", aws_instance.foo.*.id)}" +} diff --git a/pkg/tofu/testdata/apply-multi-var-missing-state/child/child.tf b/pkg/tofu/testdata/apply-multi-var-missing-state/child/child.tf new file mode 100644 index 00000000000..b5df05d0e24 --- /dev/null +++ b/pkg/tofu/testdata/apply-multi-var-missing-state/child/child.tf @@ -0,0 +1,15 @@ + +# This resource gets visited first on the apply walk, but since it DynamicExpands +# to an empty subgraph it ends up being a no-op, leaving the module state +# uninitialized. +resource "test_thing" "a" { + count = 0 +} + +# This resource is visited second. During its eval walk we try to build the +# array for the null_resource.a.*.id interpolation, which involves iterating +# over all of the resource in the state. This should succeed even though the +# module state will be nil when evaluating the variable. +resource "test_thing" "b" { + a_ids = "${join(" ", test_thing.a.*.id)}" +} diff --git a/pkg/tofu/testdata/apply-multi-var-missing-state/root.tf b/pkg/tofu/testdata/apply-multi-var-missing-state/root.tf new file mode 100644 index 00000000000..25a0a1f9b49 --- /dev/null +++ b/pkg/tofu/testdata/apply-multi-var-missing-state/root.tf @@ -0,0 +1,7 @@ +// We test this in a child module, since the root module state exists +// very early on, even before any resources are created in it, but that is not +// true for child modules. + +module "child" { + source = "./child" +} diff --git a/pkg/tofu/testdata/apply-multi-var-order-interp/main.tf b/pkg/tofu/testdata/apply-multi-var-order-interp/main.tf new file mode 100644 index 00000000000..6cc2e29d9ad --- /dev/null +++ b/pkg/tofu/testdata/apply-multi-var-order-interp/main.tf @@ -0,0 +1,17 @@ +variable "num" { + default = 15 +} + +resource "aws_instance" "bar" { + count = "${var.num}" + foo = "index-${count.index}" +} + +resource "aws_instance" "baz" { + count = "${var.num}" + foo = "baz-${element(aws_instance.bar.*.foo, count.index)}" +} + +output "should-be-11" { + value = "${element(aws_instance.baz.*.foo, 11)}" +} diff --git a/pkg/tofu/testdata/apply-multi-var-order/main.tf b/pkg/tofu/testdata/apply-multi-var-order/main.tf new file mode 100644 index 00000000000..7ffefb6f349 --- /dev/null +++ b/pkg/tofu/testdata/apply-multi-var-order/main.tf @@ -0,0 +1,12 @@ +variable "num" { + default = 15 +} + +resource "aws_instance" "bar" { + count = "${var.num}" + foo = "index-${count.index}" +} + +output "should-be-11" { + value = "${element(aws_instance.bar.*.foo, 11)}" +} diff --git a/pkg/tofu/testdata/apply-multi-var/main.tf b/pkg/tofu/testdata/apply-multi-var/main.tf new file mode 100644 index 00000000000..c7ed45c6a81 --- /dev/null +++ b/pkg/tofu/testdata/apply-multi-var/main.tf @@ -0,0 +1,10 @@ +variable "num" {} + +resource "aws_instance" "bar" { + count = "${var.num}" + foo = "bar${count.index}" +} + +output "output" { + value = "${join(",", aws_instance.bar.*.foo)}" +} diff --git a/pkg/tofu/testdata/apply-nullable-variables/main.tf b/pkg/tofu/testdata/apply-nullable-variables/main.tf new file mode 100644 index 00000000000..ed4b6c7f26f --- /dev/null +++ b/pkg/tofu/testdata/apply-nullable-variables/main.tf @@ -0,0 +1,28 @@ +module "mod" { + source = "./mod" + nullable_null_default = null + nullable_non_null_default = null + nullable_no_default = null + non_nullable_default = null + non_nullable_no_default = "ok" +} + +output "nullable_null_default" { + value = module.mod.nullable_null_default +} + +output "nullable_non_null_default" { + value = module.mod.nullable_non_null_default +} + +output "nullable_no_default" { + value = module.mod.nullable_no_default +} + +output "non_nullable_default" { + value = module.mod.non_nullable_default +} + +output "non_nullable_no_default" { + value = module.mod.non_nullable_no_default +} diff --git a/pkg/tofu/testdata/apply-nullable-variables/mod/main.tf b/pkg/tofu/testdata/apply-nullable-variables/mod/main.tf new file mode 100644 index 00000000000..fcac3ba3726 --- /dev/null +++ b/pkg/tofu/testdata/apply-nullable-variables/mod/main.tf @@ -0,0 +1,59 @@ +// optional, and this can take null as an input +variable "nullable_null_default" { + // This is implied now as the default, and probably should be implied even + // when nullable=false is the default, so we're leaving this unset for the test. + // nullable = true + + default = null +} + +// assigning null can still override the default. +variable "nullable_non_null_default" { + nullable = true + default = "ok" +} + +// required, and assigning null is valid. +variable "nullable_no_default" { + nullable = true +} + + +// this combination is invalid +//variable "non_nullable_null_default" { +// nullable = false +// default = null +//} + + +// assigning null will take the default +variable "non_nullable_default" { + nullable = false + default = "ok" +} + +// required, but null is not a valid value +variable "non_nullable_no_default" { + nullable = false +} + +output "nullable_null_default" { + value = var.nullable_null_default +} + +output "nullable_non_null_default" { + value = var.nullable_non_null_default +} + +output "nullable_no_default" { + value = var.nullable_no_default +} + +output "non_nullable_default" { + value = var.non_nullable_default +} + +output "non_nullable_no_default" { + value = var.non_nullable_no_default +} + diff --git a/pkg/tofu/testdata/apply-orphan-resource/main.tf b/pkg/tofu/testdata/apply-orphan-resource/main.tf new file mode 100644 index 00000000000..3e093ac83f5 --- /dev/null +++ b/pkg/tofu/testdata/apply-orphan-resource/main.tf @@ -0,0 +1,7 @@ +resource "test_thing" "zero" { + count = 0 +} + +resource "test_thing" "one" { + count = 1 +} diff --git a/pkg/tofu/testdata/apply-output-add-after/main.tf b/pkg/tofu/testdata/apply-output-add-after/main.tf new file mode 100644 index 00000000000..1c10eaafc57 --- /dev/null +++ b/pkg/tofu/testdata/apply-output-add-after/main.tf @@ -0,0 +1,6 @@ +provider "aws" {} + +resource "aws_instance" "test" { + foo = "${format("foo%d", count.index)}" + count = 2 +} diff --git a/pkg/tofu/testdata/apply-output-add-after/outputs.tf.json b/pkg/tofu/testdata/apply-output-add-after/outputs.tf.json new file mode 100644 index 00000000000..32e96b0ee07 --- /dev/null +++ b/pkg/tofu/testdata/apply-output-add-after/outputs.tf.json @@ -0,0 +1,10 @@ +{ + "output": { + "firstOutput": { + "value": "${aws_instance.test.0.foo}" + }, + "secondOutput": { + "value": "${aws_instance.test.1.foo}" + } + } +} diff --git a/pkg/tofu/testdata/apply-output-add-before/main.tf b/pkg/tofu/testdata/apply-output-add-before/main.tf new file mode 100644 index 00000000000..1c10eaafc57 --- /dev/null +++ b/pkg/tofu/testdata/apply-output-add-before/main.tf @@ -0,0 +1,6 @@ +provider "aws" {} + +resource "aws_instance" "test" { + foo = "${format("foo%d", count.index)}" + count = 2 +} diff --git a/pkg/tofu/testdata/apply-output-add-before/outputs.tf.json b/pkg/tofu/testdata/apply-output-add-before/outputs.tf.json new file mode 100644 index 00000000000..238668ef3d1 --- /dev/null +++ b/pkg/tofu/testdata/apply-output-add-before/outputs.tf.json @@ -0,0 +1,7 @@ +{ + "output": { + "firstOutput": { + "value": "${aws_instance.test.0.foo}" + } + } +} diff --git a/pkg/tofu/testdata/apply-output-list/main.tf b/pkg/tofu/testdata/apply-output-list/main.tf new file mode 100644 index 00000000000..11b8107dffd --- /dev/null +++ b/pkg/tofu/testdata/apply-output-list/main.tf @@ -0,0 +1,12 @@ +resource "aws_instance" "foo" { + num = "2" +} + +resource "aws_instance" "bar" { + foo = "bar" + count = 3 +} + +output "foo_num" { + value = ["${join(",", aws_instance.bar.*.foo)}"] +} diff --git a/pkg/tofu/testdata/apply-output-multi-index/main.tf b/pkg/tofu/testdata/apply-output-multi-index/main.tf new file mode 100644 index 00000000000..c7ede94d5a8 --- /dev/null +++ b/pkg/tofu/testdata/apply-output-multi-index/main.tf @@ -0,0 +1,12 @@ +resource "aws_instance" "foo" { + num = "2" +} + +resource "aws_instance" "bar" { + foo = "bar" + count = 3 +} + +output "foo_num" { + value = "${aws_instance.bar.0.foo}" +} diff --git a/pkg/tofu/testdata/apply-output-multi/main.tf b/pkg/tofu/testdata/apply-output-multi/main.tf new file mode 100644 index 00000000000..a70e334b16b --- /dev/null +++ b/pkg/tofu/testdata/apply-output-multi/main.tf @@ -0,0 +1,12 @@ +resource "aws_instance" "foo" { + num = "2" +} + +resource "aws_instance" "bar" { + foo = "bar" + count = 3 +} + +output "foo_num" { + value = "${join(",", aws_instance.bar.*.foo)}" +} diff --git a/pkg/tofu/testdata/apply-output-orphan-module/child/main.tf b/pkg/tofu/testdata/apply-output-orphan-module/child/main.tf new file mode 100644 index 00000000000..ae32f8aa13b --- /dev/null +++ b/pkg/tofu/testdata/apply-output-orphan-module/child/main.tf @@ -0,0 +1,3 @@ +output "foo" { + value = "bar" +} diff --git a/pkg/tofu/testdata/apply-output-orphan-module/main.tf b/pkg/tofu/testdata/apply-output-orphan-module/main.tf new file mode 100644 index 00000000000..0f6991c536c --- /dev/null +++ b/pkg/tofu/testdata/apply-output-orphan-module/main.tf @@ -0,0 +1,3 @@ +module "child" { + source = "./child" +} diff --git a/pkg/tofu/testdata/apply-output-orphan/main.tf b/pkg/tofu/testdata/apply-output-orphan/main.tf new file mode 100644 index 00000000000..ae32f8aa13b --- /dev/null +++ b/pkg/tofu/testdata/apply-output-orphan/main.tf @@ -0,0 +1,3 @@ +output "foo" { + value = "bar" +} diff --git a/pkg/tofu/testdata/apply-output/main.tf b/pkg/tofu/testdata/apply-output/main.tf new file mode 100644 index 00000000000..1f91a40f150 --- /dev/null +++ b/pkg/tofu/testdata/apply-output/main.tf @@ -0,0 +1,11 @@ +resource "aws_instance" "foo" { + num = "2" +} + +resource "aws_instance" "bar" { + foo = "bar" +} + +output "foo_num" { + value = "${aws_instance.foo.num}" +} diff --git a/pkg/tofu/testdata/apply-plan-connection-refs/main.tf b/pkg/tofu/testdata/apply-plan-connection-refs/main.tf new file mode 100644 index 00000000000..d20191f33b1 --- /dev/null +++ b/pkg/tofu/testdata/apply-plan-connection-refs/main.tf @@ -0,0 +1,18 @@ +variable "msg" { + default = "ok" +} + +resource "test_instance" "a" { + foo = "a" +} + + +resource "test_instance" "b" { + foo = "b" + provisioner "shell" { + command = "echo ${var.msg}" + } + connection { + host = test_instance.a.id + } +} diff --git a/pkg/tofu/testdata/apply-provider-alias-configure/main.tf b/pkg/tofu/testdata/apply-provider-alias-configure/main.tf new file mode 100644 index 00000000000..4487e4573ab --- /dev/null +++ b/pkg/tofu/testdata/apply-provider-alias-configure/main.tf @@ -0,0 +1,14 @@ +provider "another" { + foo = "bar" +} + +provider "another" { + alias = "two" + foo = "bar" +} + +resource "another_instance" "foo" {} + +resource "another_instance" "bar" { + provider = "another.two" +} diff --git a/pkg/tofu/testdata/apply-provider-alias/main.tf b/pkg/tofu/testdata/apply-provider-alias/main.tf new file mode 100644 index 00000000000..19fd985abf2 --- /dev/null +++ b/pkg/tofu/testdata/apply-provider-alias/main.tf @@ -0,0 +1,12 @@ +provider "aws" { + alias = "bar" +} + +resource "aws_instance" "foo" { + num = "2" +} + +resource "aws_instance" "bar" { + foo = "bar" + provider = "aws.bar" +} diff --git a/pkg/tofu/testdata/apply-provider-computed/main.tf b/pkg/tofu/testdata/apply-provider-computed/main.tf new file mode 100644 index 00000000000..81acf7cfaa9 --- /dev/null +++ b/pkg/tofu/testdata/apply-provider-computed/main.tf @@ -0,0 +1,9 @@ +provider "aws" { + value = test_instance.foo.id +} + +resource "aws_instance" "bar" {} + +resource "test_instance" "foo" { + value = "yes" +} diff --git a/pkg/tofu/testdata/apply-provider-configure-disabled/child/main.tf b/pkg/tofu/testdata/apply-provider-configure-disabled/child/main.tf new file mode 100644 index 00000000000..c421bf743c3 --- /dev/null +++ b/pkg/tofu/testdata/apply-provider-configure-disabled/child/main.tf @@ -0,0 +1,5 @@ +provider "aws" { + value = "foo" +} + +resource "aws_instance" "foo" {} diff --git a/pkg/tofu/testdata/apply-provider-configure-disabled/main.tf b/pkg/tofu/testdata/apply-provider-configure-disabled/main.tf new file mode 100644 index 00000000000..dbfc52745d6 --- /dev/null +++ b/pkg/tofu/testdata/apply-provider-configure-disabled/main.tf @@ -0,0 +1,7 @@ +provider "aws" { + foo = "bar" +} + +module "child" { + source = "./child" +} diff --git a/pkg/tofu/testdata/apply-provider-warning/main.tf b/pkg/tofu/testdata/apply-provider-warning/main.tf new file mode 100644 index 00000000000..919f140bba6 --- /dev/null +++ b/pkg/tofu/testdata/apply-provider-warning/main.tf @@ -0,0 +1 @@ +resource "aws_instance" "foo" {} diff --git a/pkg/tofu/testdata/apply-provisioner-compute/main.tf b/pkg/tofu/testdata/apply-provisioner-compute/main.tf new file mode 100644 index 00000000000..598296501d0 --- /dev/null +++ b/pkg/tofu/testdata/apply-provisioner-compute/main.tf @@ -0,0 +1,13 @@ +variable "value" {} + +resource "aws_instance" "foo" { + num = "2" + compute = "value" + compute_value = "${var.value}" +} + +resource "aws_instance" "bar" { + provisioner "shell" { + command = "${aws_instance.foo.value}" + } +} diff --git a/pkg/tofu/testdata/apply-provisioner-destroy-continue/main.tf b/pkg/tofu/testdata/apply-provisioner-destroy-continue/main.tf new file mode 100644 index 00000000000..0be0d331e51 --- /dev/null +++ b/pkg/tofu/testdata/apply-provisioner-destroy-continue/main.tf @@ -0,0 +1,15 @@ +resource "aws_instance" "foo" { + foo = "bar" + + provisioner "shell" { + command = "one" + when = "destroy" + on_failure = "continue" + } + + provisioner "shell" { + command = "two" + when = "destroy" + on_failure = "continue" + } +} diff --git a/pkg/tofu/testdata/apply-provisioner-destroy-fail/main.tf b/pkg/tofu/testdata/apply-provisioner-destroy-fail/main.tf new file mode 100644 index 00000000000..14ad1258293 --- /dev/null +++ b/pkg/tofu/testdata/apply-provisioner-destroy-fail/main.tf @@ -0,0 +1,14 @@ +resource "aws_instance" "foo" { + foo = "bar" + + provisioner "shell" { + command = "one" + when = "destroy" + on_failure = "continue" + } + + provisioner "shell" { + command = "two" + when = "destroy" + } +} diff --git a/pkg/tofu/testdata/apply-provisioner-destroy/main.tf b/pkg/tofu/testdata/apply-provisioner-destroy/main.tf new file mode 100644 index 00000000000..8804f649524 --- /dev/null +++ b/pkg/tofu/testdata/apply-provisioner-destroy/main.tf @@ -0,0 +1,18 @@ +resource "aws_instance" "foo" { + for_each = var.input + foo = "bar" + + provisioner "shell" { + command = "create ${each.key} ${each.value}" + } + + provisioner "shell" { + when = "destroy" + command = "destroy ${each.key} ${self.foo}" + } +} + +variable "input" { + type = map(string) + default = {} +} diff --git a/pkg/tofu/testdata/apply-provisioner-diff/main.tf b/pkg/tofu/testdata/apply-provisioner-diff/main.tf new file mode 100644 index 00000000000..ac4f38e97a9 --- /dev/null +++ b/pkg/tofu/testdata/apply-provisioner-diff/main.tf @@ -0,0 +1,4 @@ +resource "aws_instance" "bar" { + foo = "bar" + provisioner "shell" {} +} diff --git a/pkg/tofu/testdata/apply-provisioner-explicit-self-ref/main.tf b/pkg/tofu/testdata/apply-provisioner-explicit-self-ref/main.tf new file mode 100644 index 00000000000..7ceca47db81 --- /dev/null +++ b/pkg/tofu/testdata/apply-provisioner-explicit-self-ref/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "foo" { + foo = "bar" + + provisioner "shell" { + command = "${aws_instance.foo.foo}" + } +} diff --git a/pkg/tofu/testdata/apply-provisioner-fail-continue/main.tf b/pkg/tofu/testdata/apply-provisioner-fail-continue/main.tf new file mode 100644 index 00000000000..39587984e66 --- /dev/null +++ b/pkg/tofu/testdata/apply-provisioner-fail-continue/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "foo" { + foo = "bar" + + provisioner "shell" { + on_failure = "continue" + } +} diff --git a/pkg/tofu/testdata/apply-provisioner-fail-create-before/main.tf b/pkg/tofu/testdata/apply-provisioner-fail-create-before/main.tf new file mode 100644 index 00000000000..00d32cbc24f --- /dev/null +++ b/pkg/tofu/testdata/apply-provisioner-fail-create-before/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "bar" { + require_new = "xyz" + provisioner "shell" {} + lifecycle { + create_before_destroy = true + } +} diff --git a/pkg/tofu/testdata/apply-provisioner-fail-create/main.tf b/pkg/tofu/testdata/apply-provisioner-fail-create/main.tf new file mode 100644 index 00000000000..c1dcd222c0b --- /dev/null +++ b/pkg/tofu/testdata/apply-provisioner-fail-create/main.tf @@ -0,0 +1,3 @@ +resource "aws_instance" "bar" { + provisioner "shell" {} +} diff --git a/pkg/tofu/testdata/apply-provisioner-fail/main.tf b/pkg/tofu/testdata/apply-provisioner-fail/main.tf new file mode 100644 index 00000000000..4aacf4b5b16 --- /dev/null +++ b/pkg/tofu/testdata/apply-provisioner-fail/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "foo" { + num = "2" +} + +resource "aws_instance" "bar" { + provisioner "shell" {} +} diff --git a/pkg/tofu/testdata/apply-provisioner-for-each-self/main.tf b/pkg/tofu/testdata/apply-provisioner-for-each-self/main.tf new file mode 100644 index 00000000000..f3e1d58df26 --- /dev/null +++ b/pkg/tofu/testdata/apply-provisioner-for-each-self/main.tf @@ -0,0 +1,8 @@ +resource "aws_instance" "foo" { + for_each = toset(["a", "b", "c"]) + foo = "number ${each.value}" + + provisioner "shell" { + command = "${self.foo}" + } +} diff --git a/pkg/tofu/testdata/apply-provisioner-interp-count/provisioner-interp-count.tf b/pkg/tofu/testdata/apply-provisioner-interp-count/provisioner-interp-count.tf new file mode 100644 index 00000000000..337129e61b0 --- /dev/null +++ b/pkg/tofu/testdata/apply-provisioner-interp-count/provisioner-interp-count.tf @@ -0,0 +1,17 @@ +variable "num" { + default = 3 +} + +resource "aws_instance" "a" { + count = var.num +} + +resource "aws_instance" "b" { + provisioner "local-exec" { + # Since we're in a provisioner block here, this expression is + # resolved during the apply walk and so the resource count must + # be known during that walk, even though apply walk doesn't + # do DynamicExpand. + command = "echo ${length(aws_instance.a)}" + } +} diff --git a/pkg/tofu/testdata/apply-provisioner-module/child/main.tf b/pkg/tofu/testdata/apply-provisioner-module/child/main.tf new file mode 100644 index 00000000000..85b58ff94dc --- /dev/null +++ b/pkg/tofu/testdata/apply-provisioner-module/child/main.tf @@ -0,0 +1,5 @@ +resource "aws_instance" "bar" { + provisioner "shell" { + foo = "bar" + } +} diff --git a/pkg/tofu/testdata/apply-provisioner-module/main.tf b/pkg/tofu/testdata/apply-provisioner-module/main.tf new file mode 100644 index 00000000000..1f95749fa7e --- /dev/null +++ b/pkg/tofu/testdata/apply-provisioner-module/main.tf @@ -0,0 +1,3 @@ +module "child" { + source = "./child" +} diff --git a/pkg/tofu/testdata/apply-provisioner-multi-self-ref-single/main.tf b/pkg/tofu/testdata/apply-provisioner-multi-self-ref-single/main.tf new file mode 100644 index 00000000000..d6c995115ea --- /dev/null +++ b/pkg/tofu/testdata/apply-provisioner-multi-self-ref-single/main.tf @@ -0,0 +1,9 @@ +resource "aws_instance" "foo" { + count = 3 + foo = "number ${count.index}" + + provisioner "shell" { + command = aws_instance.foo[0].foo + order = count.index + } +} diff --git a/pkg/tofu/testdata/apply-provisioner-multi-self-ref/main.tf b/pkg/tofu/testdata/apply-provisioner-multi-self-ref/main.tf new file mode 100644 index 00000000000..72a1e792007 --- /dev/null +++ b/pkg/tofu/testdata/apply-provisioner-multi-self-ref/main.tf @@ -0,0 +1,8 @@ +resource "aws_instance" "foo" { + count = 3 + foo = "number ${count.index}" + + provisioner "shell" { + command = "${self.foo}" + } +} diff --git a/pkg/tofu/testdata/apply-provisioner-resource-ref/main.tf b/pkg/tofu/testdata/apply-provisioner-resource-ref/main.tf new file mode 100644 index 00000000000..25da37781cc --- /dev/null +++ b/pkg/tofu/testdata/apply-provisioner-resource-ref/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "bar" { + num = "2" + + provisioner "shell" { + command = "${aws_instance.bar.num}" + } +} diff --git a/pkg/tofu/testdata/apply-provisioner-self-ref/main.tf b/pkg/tofu/testdata/apply-provisioner-self-ref/main.tf new file mode 100644 index 00000000000..5f401f7c07f --- /dev/null +++ b/pkg/tofu/testdata/apply-provisioner-self-ref/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "foo" { + foo = "bar" + + provisioner "shell" { + command = "${self.foo}" + } +} diff --git a/pkg/tofu/testdata/apply-provisioner-sensitive/main.tf b/pkg/tofu/testdata/apply-provisioner-sensitive/main.tf new file mode 100644 index 00000000000..99ec4a290b7 --- /dev/null +++ b/pkg/tofu/testdata/apply-provisioner-sensitive/main.tf @@ -0,0 +1,18 @@ +variable "password" { + type = string + sensitive = true +} + +resource "aws_instance" "foo" { + connection { + host = "localhost" + type = "telnet" + user = "superuser" + port = 2222 + password = var.password + } + + provisioner "shell" { + command = "echo ${var.password} > secrets" + } +} diff --git a/pkg/tofu/testdata/apply-ref-count/main.tf b/pkg/tofu/testdata/apply-ref-count/main.tf new file mode 100644 index 00000000000..1ce2ffe21f5 --- /dev/null +++ b/pkg/tofu/testdata/apply-ref-count/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "foo" { + count = 3 +} + +resource "aws_instance" "bar" { + foo = length(aws_instance.foo) +} diff --git a/pkg/tofu/testdata/apply-ref-existing/child/main.tf b/pkg/tofu/testdata/apply-ref-existing/child/main.tf new file mode 100644 index 00000000000..cd1e56eec90 --- /dev/null +++ b/pkg/tofu/testdata/apply-ref-existing/child/main.tf @@ -0,0 +1,5 @@ +variable "var" {} + +resource "aws_instance" "foo" { + value = "${var.var}" +} diff --git a/pkg/tofu/testdata/apply-ref-existing/main.tf b/pkg/tofu/testdata/apply-ref-existing/main.tf new file mode 100644 index 00000000000..a05056c52e5 --- /dev/null +++ b/pkg/tofu/testdata/apply-ref-existing/main.tf @@ -0,0 +1,9 @@ +resource "aws_instance" "foo" { + foo = "bar" +} + +module "child" { + source = "./child" + + var = "${aws_instance.foo.foo}" +} diff --git a/pkg/tofu/testdata/apply-resource-count-one-list/main.tf b/pkg/tofu/testdata/apply-resource-count-one-list/main.tf new file mode 100644 index 00000000000..0aeb75b1afa --- /dev/null +++ b/pkg/tofu/testdata/apply-resource-count-one-list/main.tf @@ -0,0 +1,7 @@ +resource "null_resource" "foo" { + count = 1 +} + +output "test" { + value = "${sort(null_resource.foo.*.id)}" +} diff --git a/pkg/tofu/testdata/apply-resource-count-zero-list/main.tf b/pkg/tofu/testdata/apply-resource-count-zero-list/main.tf new file mode 100644 index 00000000000..6d9b4d55d28 --- /dev/null +++ b/pkg/tofu/testdata/apply-resource-count-zero-list/main.tf @@ -0,0 +1,7 @@ +resource "null_resource" "foo" { + count = 0 +} + +output "test" { + value = "${sort(null_resource.foo.*.id)}" +} diff --git a/pkg/tofu/testdata/apply-resource-depends-on-module-deep/child/child/main.tf b/pkg/tofu/testdata/apply-resource-depends-on-module-deep/child/child/main.tf new file mode 100644 index 00000000000..77203263df4 --- /dev/null +++ b/pkg/tofu/testdata/apply-resource-depends-on-module-deep/child/child/main.tf @@ -0,0 +1,3 @@ +resource "aws_instance" "c" { + ami = "grandchild" +} diff --git a/pkg/tofu/testdata/apply-resource-depends-on-module-deep/child/main.tf b/pkg/tofu/testdata/apply-resource-depends-on-module-deep/child/main.tf new file mode 100644 index 00000000000..6cbe350a795 --- /dev/null +++ b/pkg/tofu/testdata/apply-resource-depends-on-module-deep/child/main.tf @@ -0,0 +1,3 @@ +module "grandchild" { + source = "./child" +} diff --git a/pkg/tofu/testdata/apply-resource-depends-on-module-deep/main.tf b/pkg/tofu/testdata/apply-resource-depends-on-module-deep/main.tf new file mode 100644 index 00000000000..1a7862b0a3f --- /dev/null +++ b/pkg/tofu/testdata/apply-resource-depends-on-module-deep/main.tf @@ -0,0 +1,9 @@ +module "child" { + source = "./child" +} + +resource "aws_instance" "a" { + ami = "parent" + + depends_on = ["module.child"] +} diff --git a/pkg/tofu/testdata/apply-resource-depends-on-module-empty/main.tf b/pkg/tofu/testdata/apply-resource-depends-on-module-empty/main.tf new file mode 100644 index 00000000000..f2316bd73ad --- /dev/null +++ b/pkg/tofu/testdata/apply-resource-depends-on-module-empty/main.tf @@ -0,0 +1 @@ +# Empty! diff --git a/pkg/tofu/testdata/apply-resource-depends-on-module-in-module/child/child/main.tf b/pkg/tofu/testdata/apply-resource-depends-on-module-in-module/child/child/main.tf new file mode 100644 index 00000000000..77203263df4 --- /dev/null +++ b/pkg/tofu/testdata/apply-resource-depends-on-module-in-module/child/child/main.tf @@ -0,0 +1,3 @@ +resource "aws_instance" "c" { + ami = "grandchild" +} diff --git a/pkg/tofu/testdata/apply-resource-depends-on-module-in-module/child/main.tf b/pkg/tofu/testdata/apply-resource-depends-on-module-in-module/child/main.tf new file mode 100644 index 00000000000..a816cae90e5 --- /dev/null +++ b/pkg/tofu/testdata/apply-resource-depends-on-module-in-module/child/main.tf @@ -0,0 +1,8 @@ +module "grandchild" { + source = "./child" +} + +resource "aws_instance" "b" { + ami = "child" + depends_on = ["module.grandchild"] +} diff --git a/pkg/tofu/testdata/apply-resource-depends-on-module-in-module/main.tf b/pkg/tofu/testdata/apply-resource-depends-on-module-in-module/main.tf new file mode 100644 index 00000000000..0f6991c536c --- /dev/null +++ b/pkg/tofu/testdata/apply-resource-depends-on-module-in-module/main.tf @@ -0,0 +1,3 @@ +module "child" { + source = "./child" +} diff --git a/pkg/tofu/testdata/apply-resource-depends-on-module/child/main.tf b/pkg/tofu/testdata/apply-resource-depends-on-module/child/main.tf new file mode 100644 index 00000000000..949d8e1b5e6 --- /dev/null +++ b/pkg/tofu/testdata/apply-resource-depends-on-module/child/main.tf @@ -0,0 +1,3 @@ +resource "aws_instance" "child" { + ami = "child" +} diff --git a/pkg/tofu/testdata/apply-resource-depends-on-module/main.tf b/pkg/tofu/testdata/apply-resource-depends-on-module/main.tf new file mode 100644 index 00000000000..1a7862b0a3f --- /dev/null +++ b/pkg/tofu/testdata/apply-resource-depends-on-module/main.tf @@ -0,0 +1,9 @@ +module "child" { + source = "./child" +} + +resource "aws_instance" "a" { + ami = "parent" + + depends_on = ["module.child"] +} diff --git a/pkg/tofu/testdata/apply-resource-scale-in/main.tf b/pkg/tofu/testdata/apply-resource-scale-in/main.tf new file mode 100644 index 00000000000..8cb38473e16 --- /dev/null +++ b/pkg/tofu/testdata/apply-resource-scale-in/main.tf @@ -0,0 +1,13 @@ +variable "instance_count" {} + +resource "aws_instance" "one" { + count = var.instance_count +} + +locals { + one_id = element(concat(aws_instance.one.*.id, [""]), 0) +} + +resource "aws_instance" "two" { + value = local.one_id +} diff --git a/pkg/tofu/testdata/apply-stop/apply-stop.tf b/pkg/tofu/testdata/apply-stop/apply-stop.tf new file mode 100644 index 00000000000..003ca67b3bf --- /dev/null +++ b/pkg/tofu/testdata/apply-stop/apply-stop.tf @@ -0,0 +1,23 @@ +terraform { + required_providers { + indefinite = { + source = "terraform.io/test/indefinite" + } + } +} + +# The TestContext2Apply_stop test arranges for "indefinite"'s +# ApplyResourceChange to just block indefinitely until the operation +# is cancelled using Context.Stop. +resource "indefinite" "foo" { +} + +resource "indefinite" "bar" { + # Should never get here during apply because we're going to interrupt the + # run during indefinite.foo's ApplyResourceChange. + depends_on = [indefinite.foo] +} + +output "result" { + value = indefinite.foo.result +} diff --git a/pkg/tofu/testdata/apply-taint-dep-requires-new/main.tf b/pkg/tofu/testdata/apply-taint-dep-requires-new/main.tf new file mode 100644 index 00000000000..f964fe46e9d --- /dev/null +++ b/pkg/tofu/testdata/apply-taint-dep-requires-new/main.tf @@ -0,0 +1,8 @@ +resource "aws_instance" "foo" { + num = "2" +} + +resource "aws_instance" "bar" { + foo = "${aws_instance.foo.id}" + require_new = "yes" +} diff --git a/pkg/tofu/testdata/apply-taint-dep/main.tf b/pkg/tofu/testdata/apply-taint-dep/main.tf new file mode 100644 index 00000000000..164db2d18ae --- /dev/null +++ b/pkg/tofu/testdata/apply-taint-dep/main.tf @@ -0,0 +1,8 @@ +resource "aws_instance" "foo" { + num = "2" +} + +resource "aws_instance" "bar" { + num = "2" + foo = "${aws_instance.foo.id}" +} diff --git a/pkg/tofu/testdata/apply-taint/main.tf b/pkg/tofu/testdata/apply-taint/main.tf new file mode 100644 index 00000000000..801ddbaf9b3 --- /dev/null +++ b/pkg/tofu/testdata/apply-taint/main.tf @@ -0,0 +1,3 @@ +resource "aws_instance" "bar" { + num = "2" +} diff --git a/pkg/tofu/testdata/apply-tainted-targets/main.tf b/pkg/tofu/testdata/apply-tainted-targets/main.tf new file mode 100644 index 00000000000..8f6b317d5bd --- /dev/null +++ b/pkg/tofu/testdata/apply-tainted-targets/main.tf @@ -0,0 +1,3 @@ +resource "aws_instance" "ifailedprovisioners" { } + +resource "aws_instance" "iambeingadded" { } diff --git a/pkg/tofu/testdata/apply-targeted-count/main.tf b/pkg/tofu/testdata/apply-targeted-count/main.tf new file mode 100644 index 00000000000..cd861898f20 --- /dev/null +++ b/pkg/tofu/testdata/apply-targeted-count/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "foo" { + count = 3 +} + +resource "aws_instance" "bar" { + count = 3 +} diff --git a/pkg/tofu/testdata/apply-targeted-module-dep/child/main.tf b/pkg/tofu/testdata/apply-targeted-module-dep/child/main.tf new file mode 100644 index 00000000000..90a7c407b94 --- /dev/null +++ b/pkg/tofu/testdata/apply-targeted-module-dep/child/main.tf @@ -0,0 +1,5 @@ +resource "aws_instance" "mod" { } + +output "output" { + value = "${aws_instance.mod.id}" +} diff --git a/pkg/tofu/testdata/apply-targeted-module-dep/main.tf b/pkg/tofu/testdata/apply-targeted-module-dep/main.tf new file mode 100644 index 00000000000..754219c3e3f --- /dev/null +++ b/pkg/tofu/testdata/apply-targeted-module-dep/main.tf @@ -0,0 +1,7 @@ +module "child" { + source = "./child" +} + +resource "aws_instance" "foo" { + foo = "${module.child.output}" +} diff --git a/pkg/tofu/testdata/apply-targeted-module-recursive/child/main.tf b/pkg/tofu/testdata/apply-targeted-module-recursive/child/main.tf new file mode 100644 index 00000000000..852bce8b9f3 --- /dev/null +++ b/pkg/tofu/testdata/apply-targeted-module-recursive/child/main.tf @@ -0,0 +1,3 @@ +module "subchild" { + source = "./subchild" +} diff --git a/pkg/tofu/testdata/apply-targeted-module-recursive/child/subchild/main.tf b/pkg/tofu/testdata/apply-targeted-module-recursive/child/subchild/main.tf new file mode 100644 index 00000000000..98f5ee87e9f --- /dev/null +++ b/pkg/tofu/testdata/apply-targeted-module-recursive/child/subchild/main.tf @@ -0,0 +1,3 @@ +resource "aws_instance" "foo" { + num = "2" +} diff --git a/pkg/tofu/testdata/apply-targeted-module-recursive/main.tf b/pkg/tofu/testdata/apply-targeted-module-recursive/main.tf new file mode 100644 index 00000000000..0f6991c536c --- /dev/null +++ b/pkg/tofu/testdata/apply-targeted-module-recursive/main.tf @@ -0,0 +1,3 @@ +module "child" { + source = "./child" +} diff --git a/pkg/tofu/testdata/apply-targeted-module-resource/child/main.tf b/pkg/tofu/testdata/apply-targeted-module-resource/child/main.tf new file mode 100644 index 00000000000..7872c90fcf5 --- /dev/null +++ b/pkg/tofu/testdata/apply-targeted-module-resource/child/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "foo" { + num = "2" +} + +resource "aws_instance" "bar" { + num = "2" +} diff --git a/pkg/tofu/testdata/apply-targeted-module-resource/main.tf b/pkg/tofu/testdata/apply-targeted-module-resource/main.tf new file mode 100644 index 00000000000..88bf07f6995 --- /dev/null +++ b/pkg/tofu/testdata/apply-targeted-module-resource/main.tf @@ -0,0 +1,7 @@ +module "child" { + source = "./child" +} + +resource "aws_instance" "bar" { + foo = "bar" +} diff --git a/pkg/tofu/testdata/apply-targeted-module-unrelated-outputs/child1/main.tf b/pkg/tofu/testdata/apply-targeted-module-unrelated-outputs/child1/main.tf new file mode 100644 index 00000000000..cffe3829e79 --- /dev/null +++ b/pkg/tofu/testdata/apply-targeted-module-unrelated-outputs/child1/main.tf @@ -0,0 +1,17 @@ +variable "instance_id" { +} + +output "instance_id" { + # The instance here isn't targeted, so this output shouldn't get updated. + # But it already has an existing value in state (specified within the + # test code) so we expect this to remain unchanged afterwards. + value = "${aws_instance.foo.id}" +} + +output "given_instance_id" { + value = "${var.instance_id}" +} + +resource "aws_instance" "foo" { + foo = "${var.instance_id}" +} diff --git a/pkg/tofu/testdata/apply-targeted-module-unrelated-outputs/child2/main.tf b/pkg/tofu/testdata/apply-targeted-module-unrelated-outputs/child2/main.tf new file mode 100644 index 00000000000..d8aa6cf3535 --- /dev/null +++ b/pkg/tofu/testdata/apply-targeted-module-unrelated-outputs/child2/main.tf @@ -0,0 +1,9 @@ +resource "aws_instance" "foo" { +} + +output "instance_id" { + # Even though we're targeting just the resource above, this should still + # be populated because outputs are implicitly targeted when their + # dependencies are + value = "${aws_instance.foo.id}" +} diff --git a/pkg/tofu/testdata/apply-targeted-module-unrelated-outputs/main.tf b/pkg/tofu/testdata/apply-targeted-module-unrelated-outputs/main.tf new file mode 100644 index 00000000000..11700723769 --- /dev/null +++ b/pkg/tofu/testdata/apply-targeted-module-unrelated-outputs/main.tf @@ -0,0 +1,37 @@ +resource "aws_instance" "foo" {} + +module "child1" { + source = "./child1" + instance_id = "${aws_instance.foo.id}" +} + +module "child2" { + source = "./child2" +} + +output "child1_id" { + value = "${module.child1.instance_id}" +} + +output "child1_given_id" { + value = "${module.child1.given_instance_id}" +} + +output "child2_id" { + # This should get updated even though we're targeting specifically + # module.child2, because outputs are implicitly targeted when their + # dependencies are. + value = "${module.child2.instance_id}" +} + +output "all_ids" { + # Here we are intentionally referencing values covering three different scenarios: + # - not targeted and not already in state + # - not targeted and already in state + # - targeted + # This is important because this output must appear in the graph after + # target filtering in case the targeted node changes its value, but we must + # therefore silently ignore the failure that results from trying to + # interpolate the un-targeted, not-in-state node. + value = "${aws_instance.foo.id} ${module.child1.instance_id} ${module.child2.instance_id}" +} diff --git a/pkg/tofu/testdata/apply-targeted-module/child/main.tf b/pkg/tofu/testdata/apply-targeted-module/child/main.tf new file mode 100644 index 00000000000..7872c90fcf5 --- /dev/null +++ b/pkg/tofu/testdata/apply-targeted-module/child/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "foo" { + num = "2" +} + +resource "aws_instance" "bar" { + num = "2" +} diff --git a/pkg/tofu/testdata/apply-targeted-module/main.tf b/pkg/tofu/testdata/apply-targeted-module/main.tf new file mode 100644 index 00000000000..938ce3a5606 --- /dev/null +++ b/pkg/tofu/testdata/apply-targeted-module/main.tf @@ -0,0 +1,11 @@ +module "child" { + source = "./child" +} + +resource "aws_instance" "foo" { + foo = "bar" +} + +resource "aws_instance" "bar" { + foo = "bar" +} diff --git a/pkg/tofu/testdata/apply-targeted-resource-orphan-module/child/main.tf b/pkg/tofu/testdata/apply-targeted-resource-orphan-module/child/main.tf new file mode 100644 index 00000000000..6ff716a4d4c --- /dev/null +++ b/pkg/tofu/testdata/apply-targeted-resource-orphan-module/child/main.tf @@ -0,0 +1 @@ +resource "aws_instance" "bar" {} diff --git a/pkg/tofu/testdata/apply-targeted-resource-orphan-module/main.tf b/pkg/tofu/testdata/apply-targeted-resource-orphan-module/main.tf new file mode 100644 index 00000000000..0c15c4bb2e1 --- /dev/null +++ b/pkg/tofu/testdata/apply-targeted-resource-orphan-module/main.tf @@ -0,0 +1,5 @@ +//module "child" { +// source = "./child" +//} + +resource "aws_instance" "foo" {} diff --git a/pkg/tofu/testdata/apply-targeted/main.tf b/pkg/tofu/testdata/apply-targeted/main.tf new file mode 100644 index 00000000000..b07fc97f4d4 --- /dev/null +++ b/pkg/tofu/testdata/apply-targeted/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "foo" { + num = "2" +} + +resource "aws_instance" "bar" { + foo = "bar" +} diff --git a/pkg/tofu/testdata/apply-tf-workspace/main.tf b/pkg/tofu/testdata/apply-tf-workspace/main.tf new file mode 100644 index 00000000000..0a6e640cd9e --- /dev/null +++ b/pkg/tofu/testdata/apply-tf-workspace/main.tf @@ -0,0 +1,3 @@ +output "output" { + value = terraform.workspace +} diff --git a/pkg/tofu/testdata/apply-tofu-workspace/main.tf b/pkg/tofu/testdata/apply-tofu-workspace/main.tf new file mode 100644 index 00000000000..161e5e93abd --- /dev/null +++ b/pkg/tofu/testdata/apply-tofu-workspace/main.tf @@ -0,0 +1,3 @@ +output "output" { + value = tofu.workspace +} diff --git a/pkg/tofu/testdata/apply-unknown-interpolate/child/main.tf b/pkg/tofu/testdata/apply-unknown-interpolate/child/main.tf new file mode 100644 index 00000000000..1caedabc458 --- /dev/null +++ b/pkg/tofu/testdata/apply-unknown-interpolate/child/main.tf @@ -0,0 +1,5 @@ +variable "value" {} + +resource "aws_instance" "bar" { + foo = "${var.value}" +} diff --git a/pkg/tofu/testdata/apply-unknown-interpolate/main.tf b/pkg/tofu/testdata/apply-unknown-interpolate/main.tf new file mode 100644 index 00000000000..1ee7dd6cbc4 --- /dev/null +++ b/pkg/tofu/testdata/apply-unknown-interpolate/main.tf @@ -0,0 +1,6 @@ +resource "aws_instance" "foo" {} + +module "child" { + source = "./child" + value = "${aws_instance.foo.nope}" +} diff --git a/pkg/tofu/testdata/apply-unknown/main.tf b/pkg/tofu/testdata/apply-unknown/main.tf new file mode 100644 index 00000000000..98f5ee87e9f --- /dev/null +++ b/pkg/tofu/testdata/apply-unknown/main.tf @@ -0,0 +1,3 @@ +resource "aws_instance" "foo" { + num = "2" +} diff --git a/pkg/tofu/testdata/apply-unstable/main.tf b/pkg/tofu/testdata/apply-unstable/main.tf new file mode 100644 index 00000000000..32754bb4664 --- /dev/null +++ b/pkg/tofu/testdata/apply-unstable/main.tf @@ -0,0 +1,3 @@ +resource "test_resource" "foo" { + random = "${uuid()}" +} diff --git a/pkg/tofu/testdata/apply-vars-env/main.tf b/pkg/tofu/testdata/apply-vars-env/main.tf new file mode 100644 index 00000000000..1b62ad63382 --- /dev/null +++ b/pkg/tofu/testdata/apply-vars-env/main.tf @@ -0,0 +1,20 @@ +variable "string" { + default = "foo" + type = string +} + +variable "list" { + default = [] + type = list(string) +} + +variable "map" { + default = {} + type = map(string) +} + +resource "aws_instance" "bar" { + string = var.string + list = var.list + map = var.map +} diff --git a/pkg/tofu/testdata/apply-vars/main.tf b/pkg/tofu/testdata/apply-vars/main.tf new file mode 100644 index 00000000000..dc413c0be4c --- /dev/null +++ b/pkg/tofu/testdata/apply-vars/main.tf @@ -0,0 +1,33 @@ +variable "amis" { + default = { + us-east-1 = "foo" + us-west-2 = "foo" + } +} + +variable "test_list" { + type = list(string) +} + +variable "test_map" { + type = map(string) +} + +variable "bar" { + default = "baz" +} + +variable "foo" {} + +resource "aws_instance" "foo" { + num = "2" + bar = var.bar + list = var.test_list + map = var.test_map +} + +resource "aws_instance" "bar" { + foo = var.foo + bar = var.amis[var.foo] + baz = var.amis["us-east-1"] +} diff --git a/pkg/tofu/testdata/apply-with-checks/main.tf b/pkg/tofu/testdata/apply-with-checks/main.tf new file mode 100644 index 00000000000..0064a4b4aec --- /dev/null +++ b/pkg/tofu/testdata/apply-with-checks/main.tf @@ -0,0 +1,20 @@ + +resource "aws_instance" "foo" { + test_string = "Hello, world!" +} + +resource "aws_instance" "baz" { + test_string = aws_instance.foo.test_string +} + +check "my_check" { + data "aws_data_source" "bar" { + id = "UI098L" + } + + assert { + condition = data.aws_data_source.bar.foo == "valid value" + error_message = "invalid value" + } + +} diff --git a/pkg/tofu/testdata/context-required-version/main.tf b/pkg/tofu/testdata/context-required-version/main.tf new file mode 100644 index 00000000000..75db792903e --- /dev/null +++ b/pkg/tofu/testdata/context-required-version/main.tf @@ -0,0 +1 @@ +terraform {} diff --git a/pkg/tofu/testdata/data-source-read-with-plan-error/main.tf b/pkg/tofu/testdata/data-source-read-with-plan-error/main.tf new file mode 100644 index 00000000000..2559406f7ab --- /dev/null +++ b/pkg/tofu/testdata/data-source-read-with-plan-error/main.tf @@ -0,0 +1,12 @@ +resource "aws_instance" "foo" { +} + +// this will be postponed until apply +data "aws_data_source" "foo" { + foo = aws_instance.foo.id +} + +// this will cause an error in the final plan +resource "test_instance" "bar" { + foo = "error" +} diff --git a/pkg/tofu/testdata/destroy-module-with-provider/main.tf b/pkg/tofu/testdata/destroy-module-with-provider/main.tf new file mode 100644 index 00000000000..3b183ecac49 --- /dev/null +++ b/pkg/tofu/testdata/destroy-module-with-provider/main.tf @@ -0,0 +1,11 @@ +// this is the provider that should actually be used by orphaned resources +provider "aws" { + alias = "bar" +} + +module "mod" { + source = "./mod" + providers = { + aws.foo = "aws.bar" + } +} diff --git a/pkg/tofu/testdata/destroy-module-with-provider/mod/main.tf b/pkg/tofu/testdata/destroy-module-with-provider/mod/main.tf new file mode 100644 index 00000000000..3e360ee4604 --- /dev/null +++ b/pkg/tofu/testdata/destroy-module-with-provider/mod/main.tf @@ -0,0 +1,6 @@ +provider "aws" { + alias = "foo" +} + +// removed module configuration referencing aws.foo, which was passed in by the +// root module diff --git a/pkg/tofu/testdata/destroy-targeted/child/main.tf b/pkg/tofu/testdata/destroy-targeted/child/main.tf new file mode 100644 index 00000000000..47ef076b12d --- /dev/null +++ b/pkg/tofu/testdata/destroy-targeted/child/main.tf @@ -0,0 +1,10 @@ +variable "in" { +} + +resource "aws_instance" "b" { + foo = var.in +} + +output "out" { + value = var.in +} diff --git a/pkg/tofu/testdata/destroy-targeted/main.tf b/pkg/tofu/testdata/destroy-targeted/main.tf new file mode 100644 index 00000000000..70048b50c01 --- /dev/null +++ b/pkg/tofu/testdata/destroy-targeted/main.tf @@ -0,0 +1,12 @@ +resource "aws_instance" "a" { + foo = "bar" +} + +module "child" { + source = "./child" + in = aws_instance.a.id +} + +output "out" { + value = aws_instance.a.id +} diff --git a/pkg/tofu/testdata/empty/main.tf b/pkg/tofu/testdata/empty/main.tf new file mode 100644 index 00000000000..8974d9ed254 --- /dev/null +++ b/pkg/tofu/testdata/empty/main.tf @@ -0,0 +1 @@ +# Empty, use this for any test that requires a module but no config. diff --git a/pkg/tofu/testdata/eval-context-basic/child/main.tf b/pkg/tofu/testdata/eval-context-basic/child/main.tf new file mode 100644 index 00000000000..e24069df759 --- /dev/null +++ b/pkg/tofu/testdata/eval-context-basic/child/main.tf @@ -0,0 +1,7 @@ +variable "list" { +} + + +output "result" { + value = length(var.list) +} diff --git a/pkg/tofu/testdata/eval-context-basic/main.tf b/pkg/tofu/testdata/eval-context-basic/main.tf new file mode 100644 index 00000000000..2dc96ad8635 --- /dev/null +++ b/pkg/tofu/testdata/eval-context-basic/main.tf @@ -0,0 +1,39 @@ +variable "number" { + default = 3 +} + +variable "string" { + default = "Hello, World" +} + +variable "map" { + type = map(string) + default = { + "foo" = "bar", + "baz" = "bat", + } +} + +locals { + result = length(var.list) +} + +variable "list" { + type = list(string) + default = ["red", "orange", "yellow", "green", "blue", "purple"] +} + +resource "test_resource" "example" { + for_each = var.map + name = each.key + tag = each.value +} + +module "child" { + source = "./child" + list = var.list +} + +output "result" { + value = module.child.result +} diff --git a/pkg/tofu/testdata/graph-basic/main.tf b/pkg/tofu/testdata/graph-basic/main.tf new file mode 100644 index 00000000000..a40802cc98e --- /dev/null +++ b/pkg/tofu/testdata/graph-basic/main.tf @@ -0,0 +1,24 @@ +variable "foo" { + default = "bar" + description = "bar" +} + +provider "aws" { + foo = "${openstack_floating_ip.random.value}" +} + +resource "openstack_floating_ip" "random" {} + +resource "aws_security_group" "firewall" {} + +resource "aws_instance" "web" { + ami = "${var.foo}" + security_groups = [ + "foo", + "${aws_security_group.firewall.foo}" + ] +} + +resource "aws_load_balancer" "weblb" { + members = "${aws_instance.web.id_list}" +} diff --git a/pkg/tofu/testdata/graph-builder-apply-basic/child/main.tf b/pkg/tofu/testdata/graph-builder-apply-basic/child/main.tf new file mode 100644 index 00000000000..79be97bf161 --- /dev/null +++ b/pkg/tofu/testdata/graph-builder-apply-basic/child/main.tf @@ -0,0 +1,7 @@ +resource "test_object" "create" { + provisioner "test" {} +} + +resource "test_object" "other" { + test_string = "${test_object.create.test_string}" +} diff --git a/pkg/tofu/testdata/graph-builder-apply-basic/main.tf b/pkg/tofu/testdata/graph-builder-apply-basic/main.tf new file mode 100644 index 00000000000..b42bd439e40 --- /dev/null +++ b/pkg/tofu/testdata/graph-builder-apply-basic/main.tf @@ -0,0 +1,9 @@ +module "child" { + source = "./child" +} + +resource "test_object" "create" {} + +resource "test_object" "other" { + test_string = "${test_object.create.test_string}" +} diff --git a/pkg/tofu/testdata/graph-builder-apply-count/main.tf b/pkg/tofu/testdata/graph-builder-apply-count/main.tf new file mode 100644 index 00000000000..dee4eb41259 --- /dev/null +++ b/pkg/tofu/testdata/graph-builder-apply-count/main.tf @@ -0,0 +1,7 @@ +resource "test_object" "A" { + count = 1 +} + +resource "test_object" "B" { + test_list = test_object.A.*.test_string +} diff --git a/pkg/tofu/testdata/graph-builder-apply-dep-cbd/main.tf b/pkg/tofu/testdata/graph-builder-apply-dep-cbd/main.tf new file mode 100644 index 00000000000..df6f2908cf3 --- /dev/null +++ b/pkg/tofu/testdata/graph-builder-apply-dep-cbd/main.tf @@ -0,0 +1,9 @@ +resource "test_object" "A" { + lifecycle { + create_before_destroy = true + } +} + +resource "test_object" "B" { + test_list = test_object.A.*.test_string +} diff --git a/pkg/tofu/testdata/graph-builder-apply-double-cbd/main.tf b/pkg/tofu/testdata/graph-builder-apply-double-cbd/main.tf new file mode 100644 index 00000000000..cb1f7342267 --- /dev/null +++ b/pkg/tofu/testdata/graph-builder-apply-double-cbd/main.tf @@ -0,0 +1,13 @@ +resource "test_object" "A" { + lifecycle { + create_before_destroy = true + } +} + +resource "test_object" "B" { + test_list = test_object.A.*.test_string + + lifecycle { + create_before_destroy = true + } +} diff --git a/pkg/tofu/testdata/graph-builder-apply-module-destroy/A/main.tf b/pkg/tofu/testdata/graph-builder-apply-module-destroy/A/main.tf new file mode 100644 index 00000000000..2c427f5c3b2 --- /dev/null +++ b/pkg/tofu/testdata/graph-builder-apply-module-destroy/A/main.tf @@ -0,0 +1,9 @@ +variable "input" {} + +resource "test_object" "foo" { + test_string = var.input +} + +output "output" { + value = test_object.foo.id +} diff --git a/pkg/tofu/testdata/graph-builder-apply-module-destroy/main.tf b/pkg/tofu/testdata/graph-builder-apply-module-destroy/main.tf new file mode 100644 index 00000000000..3c566646d13 --- /dev/null +++ b/pkg/tofu/testdata/graph-builder-apply-module-destroy/main.tf @@ -0,0 +1,13 @@ +variable "input" { + default = "value" +} + +module "A" { + source = "./A" + input = var.input +} + +module "B" { + source = "./A" + input = module.A.output +} diff --git a/pkg/tofu/testdata/graph-builder-apply-orphan-update/main.tf b/pkg/tofu/testdata/graph-builder-apply-orphan-update/main.tf new file mode 100644 index 00000000000..22e7ae0f1a1 --- /dev/null +++ b/pkg/tofu/testdata/graph-builder-apply-orphan-update/main.tf @@ -0,0 +1,3 @@ +resource "test_object" "b" { + test_string = "changed" +} diff --git a/pkg/tofu/testdata/graph-builder-apply-provisioner/main.tf b/pkg/tofu/testdata/graph-builder-apply-provisioner/main.tf new file mode 100644 index 00000000000..1ea5d2122ee --- /dev/null +++ b/pkg/tofu/testdata/graph-builder-apply-provisioner/main.tf @@ -0,0 +1,3 @@ +resource "test_object" "foo" { + provisioner "test" {} +} diff --git a/pkg/tofu/testdata/graph-builder-apply-target-module/child1/main.tf b/pkg/tofu/testdata/graph-builder-apply-target-module/child1/main.tf new file mode 100644 index 00000000000..7ac75f5edb9 --- /dev/null +++ b/pkg/tofu/testdata/graph-builder-apply-target-module/child1/main.tf @@ -0,0 +1,11 @@ +variable "instance_id" {} + +output "instance_id" { + value = "${var.instance_id}" +} + +resource "test_object" "foo" { + triggers = { + instance_id = "${var.instance_id}" + } +} diff --git a/pkg/tofu/testdata/graph-builder-apply-target-module/child2/main.tf b/pkg/tofu/testdata/graph-builder-apply-target-module/child2/main.tf new file mode 100644 index 00000000000..0afe7efac64 --- /dev/null +++ b/pkg/tofu/testdata/graph-builder-apply-target-module/child2/main.tf @@ -0,0 +1 @@ +resource "test_object" "foo" {} diff --git a/pkg/tofu/testdata/graph-builder-apply-target-module/main.tf b/pkg/tofu/testdata/graph-builder-apply-target-module/main.tf new file mode 100644 index 00000000000..994d8fca17d --- /dev/null +++ b/pkg/tofu/testdata/graph-builder-apply-target-module/main.tf @@ -0,0 +1,10 @@ +resource "test_object" "foo" {} + +module "child1" { + source = "./child1" + instance_id = "${test_object.foo.id}" +} + +module "child2" { + source = "./child2" +} diff --git a/pkg/tofu/testdata/graph-builder-orphan-alias/main.tf b/pkg/tofu/testdata/graph-builder-orphan-alias/main.tf new file mode 100644 index 00000000000..039881847c5 --- /dev/null +++ b/pkg/tofu/testdata/graph-builder-orphan-alias/main.tf @@ -0,0 +1,3 @@ +provider "test" { + alias = "foo" +} diff --git a/pkg/tofu/testdata/graph-builder-plan-attr-as-blocks/attr-as-blocks.tf b/pkg/tofu/testdata/graph-builder-plan-attr-as-blocks/attr-as-blocks.tf new file mode 100644 index 00000000000..d154cc26421 --- /dev/null +++ b/pkg/tofu/testdata/graph-builder-plan-attr-as-blocks/attr-as-blocks.tf @@ -0,0 +1,8 @@ +resource "test_thing" "a" { +} + +resource "test_thing" "b" { + nested { + foo = test_thing.a.id + } +} diff --git a/pkg/tofu/testdata/graph-builder-plan-basic/main.tf b/pkg/tofu/testdata/graph-builder-plan-basic/main.tf new file mode 100644 index 00000000000..df74468a190 --- /dev/null +++ b/pkg/tofu/testdata/graph-builder-plan-basic/main.tf @@ -0,0 +1,33 @@ +variable "foo" { + default = "bar" + description = "bar" +} + +provider "aws" { + test_string = "${openstack_floating_ip.random.test_string}" +} + +resource "openstack_floating_ip" "random" {} + +resource "aws_security_group" "firewall" {} + +resource "aws_instance" "web" { + test_string = var.foo + + test_list = [ + "foo", + aws_security_group.firewall.test_string, + ] +} + +resource "aws_load_balancer" "weblb" { + test_list = aws_instance.web.test_list +} + +locals { + instance_id = "${aws_instance.web.test_string}" +} + +output "instance_id" { + value = "${local.instance_id}" +} diff --git a/pkg/tofu/testdata/graph-builder-plan-dynblock/dynblock.tf b/pkg/tofu/testdata/graph-builder-plan-dynblock/dynblock.tf new file mode 100644 index 00000000000..8946969775c --- /dev/null +++ b/pkg/tofu/testdata/graph-builder-plan-dynblock/dynblock.tf @@ -0,0 +1,14 @@ +resource "test_thing" "a" { +} + +resource "test_thing" "b" { +} + +resource "test_thing" "c" { + dynamic "nested" { + for_each = test_thing.a.list + content { + foo = test_thing.b.id + } + } +} diff --git a/pkg/tofu/testdata/graph-builder-plan-target-module-provider/child1/main.tf b/pkg/tofu/testdata/graph-builder-plan-target-module-provider/child1/main.tf new file mode 100644 index 00000000000..f95800f7a0d --- /dev/null +++ b/pkg/tofu/testdata/graph-builder-plan-target-module-provider/child1/main.tf @@ -0,0 +1,7 @@ +variable "key" {} + +provider "test" { + test_string = "${var.key}" +} + +resource "test_object" "foo" {} diff --git a/pkg/tofu/testdata/graph-builder-plan-target-module-provider/child2/main.tf b/pkg/tofu/testdata/graph-builder-plan-target-module-provider/child2/main.tf new file mode 100644 index 00000000000..f95800f7a0d --- /dev/null +++ b/pkg/tofu/testdata/graph-builder-plan-target-module-provider/child2/main.tf @@ -0,0 +1,7 @@ +variable "key" {} + +provider "test" { + test_string = "${var.key}" +} + +resource "test_object" "foo" {} diff --git a/pkg/tofu/testdata/graph-builder-plan-target-module-provider/main.tf b/pkg/tofu/testdata/graph-builder-plan-target-module-provider/main.tf new file mode 100644 index 00000000000..d5a01db9a0d --- /dev/null +++ b/pkg/tofu/testdata/graph-builder-plan-target-module-provider/main.tf @@ -0,0 +1,9 @@ +module "child1" { + source = "./child1" + key = "!" +} + +module "child2" { + source = "./child2" + key = "!" +} diff --git a/pkg/tofu/testdata/import-id-data-source/main.tf b/pkg/tofu/testdata/import-id-data-source/main.tf new file mode 100644 index 00000000000..ad524fe5fa1 --- /dev/null +++ b/pkg/tofu/testdata/import-id-data-source/main.tf @@ -0,0 +1,11 @@ +data "aws_subnet" "bar" { + vpc_id = "abc" + cidr_block = "10.0.1.0/24" +} + +import { + to = aws_subnet.bar + id = data.aws_subnet.bar.id +} + +resource "aws_subnet" "bar" {} diff --git a/pkg/tofu/testdata/import-id-func/main.tf b/pkg/tofu/testdata/import-id-func/main.tf new file mode 100644 index 00000000000..f861fae0694 --- /dev/null +++ b/pkg/tofu/testdata/import-id-func/main.tf @@ -0,0 +1,8 @@ +import { + to = aws_instance.foo + id = substr("hmm123", "2", "3") +} + +resource "aws_instance" "foo" { + +} diff --git a/pkg/tofu/testdata/import-id-invalid-null/main.tf b/pkg/tofu/testdata/import-id-invalid-null/main.tf new file mode 100644 index 00000000000..5de128d1ec6 --- /dev/null +++ b/pkg/tofu/testdata/import-id-invalid-null/main.tf @@ -0,0 +1,10 @@ +variable "the_id" { + type = string +} + +import { + to = test_resource.foo + id = var.the_id +} + +resource "test_resource" "foo" {} diff --git a/pkg/tofu/testdata/import-id-invalid-unknown/main.tf b/pkg/tofu/testdata/import-id-invalid-unknown/main.tf new file mode 100644 index 00000000000..317e99f0061 --- /dev/null +++ b/pkg/tofu/testdata/import-id-invalid-unknown/main.tf @@ -0,0 +1,12 @@ +resource "test_resource" "foo" { + +} + +import { + to = test_resource.bar + id = test_resource.foo.id +} + +resource "test_resource" "bar" { + +} diff --git a/pkg/tofu/testdata/import-id-module/child/main.tf b/pkg/tofu/testdata/import-id-module/child/main.tf new file mode 100644 index 00000000000..251f582ae5b --- /dev/null +++ b/pkg/tofu/testdata/import-id-module/child/main.tf @@ -0,0 +1,3 @@ +output "lb_id" { + value = 1 +} diff --git a/pkg/tofu/testdata/import-id-module/main.tf b/pkg/tofu/testdata/import-id-module/main.tf new file mode 100644 index 00000000000..ef75c78ec19 --- /dev/null +++ b/pkg/tofu/testdata/import-id-module/main.tf @@ -0,0 +1,10 @@ +module "child" { + source = "./child" +} + +import { + to = aws_lb.foo + id = module.child.lb_id +} + +resource "aws_lb" "foo" {} diff --git a/pkg/tofu/testdata/import-id-reference/main.tf b/pkg/tofu/testdata/import-id-reference/main.tf new file mode 100644 index 00000000000..d7d3f541862 --- /dev/null +++ b/pkg/tofu/testdata/import-id-reference/main.tf @@ -0,0 +1,13 @@ +variable "the_id" { + default = "123" +} + +module "refmod" { + source = "./mod" +} + +import { + to = module.refmod.aws_instance.foo + id = var.the_id +} + diff --git a/pkg/tofu/testdata/import-id-reference/mod/mod.tf b/pkg/tofu/testdata/import-id-reference/mod/mod.tf new file mode 100644 index 00000000000..b626e60c824 --- /dev/null +++ b/pkg/tofu/testdata/import-id-reference/mod/mod.tf @@ -0,0 +1,2 @@ +resource "aws_instance" "foo" { +} diff --git a/pkg/tofu/testdata/import-id-variable/main.tf b/pkg/tofu/testdata/import-id-variable/main.tf new file mode 100644 index 00000000000..85056efa8a7 --- /dev/null +++ b/pkg/tofu/testdata/import-id-variable/main.tf @@ -0,0 +1,11 @@ +variable "the_id" { + default = "123" +} + +import { + to = aws_instance.foo + id = var.the_id +} + +resource "aws_instance" "foo" { +} diff --git a/pkg/tofu/testdata/import-module/child/main.tf b/pkg/tofu/testdata/import-module/child/main.tf new file mode 100644 index 00000000000..8a8164b3b24 --- /dev/null +++ b/pkg/tofu/testdata/import-module/child/main.tf @@ -0,0 +1,10 @@ +# Empty +provider "aws" {} + +resource "aws_instance" "foo" { + id = "bar" +} + +module "nested" { + source = "./submodule" +} diff --git a/pkg/tofu/testdata/import-module/child/submodule/main.tf b/pkg/tofu/testdata/import-module/child/submodule/main.tf new file mode 100644 index 00000000000..93c90158bb1 --- /dev/null +++ b/pkg/tofu/testdata/import-module/child/submodule/main.tf @@ -0,0 +1,3 @@ +resource "aws_instance" "foo" { + id = "baz" +} diff --git a/pkg/tofu/testdata/import-module/main.tf b/pkg/tofu/testdata/import-module/main.tf new file mode 100644 index 00000000000..c899a2c510e --- /dev/null +++ b/pkg/tofu/testdata/import-module/main.tf @@ -0,0 +1,11 @@ +provider "aws" { + foo = "bar" +} + +module "child" { + count = 1 + source = "./child" + providers = { + aws = aws + } +} diff --git a/pkg/tofu/testdata/import-provider-locals/main.tf b/pkg/tofu/testdata/import-provider-locals/main.tf new file mode 100644 index 00000000000..a83512ccd98 --- /dev/null +++ b/pkg/tofu/testdata/import-provider-locals/main.tf @@ -0,0 +1,13 @@ +variable "foo" {} + +locals { + baz = "baz-${var.foo}" +} + +provider "aws" { + foo = "${local.baz}" +} + +resource "aws_instance" "foo" { + id = "bar" +} diff --git a/pkg/tofu/testdata/import-provider-resources/main.tf b/pkg/tofu/testdata/import-provider-resources/main.tf new file mode 100644 index 00000000000..a99ee5e9416 --- /dev/null +++ b/pkg/tofu/testdata/import-provider-resources/main.tf @@ -0,0 +1,11 @@ +provider "aws" { + value = "${test_instance.bar.id}" +} + +resource "aws_instance" "foo" { + bar = "value" +} + +resource "test_instance" "bar" { + value = "yes" +} diff --git a/pkg/tofu/testdata/import-provider-vars/main.tf b/pkg/tofu/testdata/import-provider-vars/main.tf new file mode 100644 index 00000000000..6a88bc926b8 --- /dev/null +++ b/pkg/tofu/testdata/import-provider-vars/main.tf @@ -0,0 +1,9 @@ +variable "foo" {} + +provider "aws" { + foo = "${var.foo}" +} + +resource "aws_instance" "foo" { + id = "bar" +} diff --git a/pkg/tofu/testdata/import-provider/main.tf b/pkg/tofu/testdata/import-provider/main.tf new file mode 100644 index 00000000000..5d41fb3e616 --- /dev/null +++ b/pkg/tofu/testdata/import-provider/main.tf @@ -0,0 +1,6 @@ +provider "aws" { + foo = "bar" +} + +resource "aws_instance" "foo" { +} diff --git a/pkg/tofu/testdata/input-interpolate-var/child/main.tf b/pkg/tofu/testdata/input-interpolate-var/child/main.tf new file mode 100644 index 00000000000..beb8c098c09 --- /dev/null +++ b/pkg/tofu/testdata/input-interpolate-var/child/main.tf @@ -0,0 +1,6 @@ +variable "length" { } + +resource "template_file" "temp" { + count = var.length + template = "foo" +} diff --git a/pkg/tofu/testdata/input-interpolate-var/main.tf b/pkg/tofu/testdata/input-interpolate-var/main.tf new file mode 100644 index 00000000000..4e68495e7b9 --- /dev/null +++ b/pkg/tofu/testdata/input-interpolate-var/main.tf @@ -0,0 +1,7 @@ +module "source" { + source = "./source" +} +module "child" { + source = "./child" + length = module.source.length +} diff --git a/pkg/tofu/testdata/input-interpolate-var/source/main.tf b/pkg/tofu/testdata/input-interpolate-var/source/main.tf new file mode 100644 index 00000000000..1405fe296d7 --- /dev/null +++ b/pkg/tofu/testdata/input-interpolate-var/source/main.tf @@ -0,0 +1,3 @@ +output "length" { + value = 3 +} diff --git a/pkg/tofu/testdata/input-module-data-vars/child/main.tf b/pkg/tofu/testdata/input-module-data-vars/child/main.tf new file mode 100644 index 00000000000..aa5d69bd5f8 --- /dev/null +++ b/pkg/tofu/testdata/input-module-data-vars/child/main.tf @@ -0,0 +1,5 @@ +variable "in" {} + +output "out" { + value = "${var.in}" +} diff --git a/pkg/tofu/testdata/input-module-data-vars/main.tf b/pkg/tofu/testdata/input-module-data-vars/main.tf new file mode 100644 index 00000000000..0a327b10247 --- /dev/null +++ b/pkg/tofu/testdata/input-module-data-vars/main.tf @@ -0,0 +1,8 @@ +data "null_data_source" "bar" { + foo = ["a", "b"] +} + +module "child" { + source = "./child" + in = "${data.null_data_source.bar.foo[1]}" +} diff --git a/pkg/tofu/testdata/input-provider-multi/main.tf b/pkg/tofu/testdata/input-provider-multi/main.tf new file mode 100644 index 00000000000..db49fd3b0a7 --- /dev/null +++ b/pkg/tofu/testdata/input-provider-multi/main.tf @@ -0,0 +1,9 @@ +provider "aws" { + alias = "east" +} + +resource "aws_instance" "foo" { + provider = aws.east +} + +resource "aws_instance" "bar" {} diff --git a/pkg/tofu/testdata/input-provider-once/child/main.tf b/pkg/tofu/testdata/input-provider-once/child/main.tf new file mode 100644 index 00000000000..ca39ff5e561 --- /dev/null +++ b/pkg/tofu/testdata/input-provider-once/child/main.tf @@ -0,0 +1,2 @@ +provider "aws" {} +resource "aws_instance" "bar" {} diff --git a/pkg/tofu/testdata/input-provider-once/main.tf b/pkg/tofu/testdata/input-provider-once/main.tf new file mode 100644 index 00000000000..006a74087c5 --- /dev/null +++ b/pkg/tofu/testdata/input-provider-once/main.tf @@ -0,0 +1,5 @@ +resource "aws_instance" "foo" {} + +module "child" { + source = "./child" +} diff --git a/pkg/tofu/testdata/input-provider-vars/main.tf b/pkg/tofu/testdata/input-provider-vars/main.tf new file mode 100644 index 00000000000..692bfb30f3b --- /dev/null +++ b/pkg/tofu/testdata/input-provider-vars/main.tf @@ -0,0 +1,5 @@ +variable "foo" {} + +resource "aws_instance" "foo" { + foo = "${var.foo}" +} diff --git a/pkg/tofu/testdata/input-provider-with-vars-and-module/child/main.tf b/pkg/tofu/testdata/input-provider-with-vars-and-module/child/main.tf new file mode 100644 index 00000000000..7ec25bda0c9 --- /dev/null +++ b/pkg/tofu/testdata/input-provider-with-vars-and-module/child/main.tf @@ -0,0 +1 @@ +resource "aws_instance" "foo" { } diff --git a/pkg/tofu/testdata/input-provider-with-vars-and-module/main.tf b/pkg/tofu/testdata/input-provider-with-vars-and-module/main.tf new file mode 100644 index 00000000000..c5112dca05f --- /dev/null +++ b/pkg/tofu/testdata/input-provider-with-vars-and-module/main.tf @@ -0,0 +1,7 @@ +provider "aws" { + access_key = "abc123" +} + +module "child" { + source = "./child" +} diff --git a/pkg/tofu/testdata/input-provider-with-vars/main.tf b/pkg/tofu/testdata/input-provider-with-vars/main.tf new file mode 100644 index 00000000000..d8f9311150e --- /dev/null +++ b/pkg/tofu/testdata/input-provider-with-vars/main.tf @@ -0,0 +1,7 @@ +variable "foo" {} + +provider "aws" { + foo = "${var.foo}" +} + +resource "aws_instance" "foo" {} diff --git a/pkg/tofu/testdata/input-provider/main.tf b/pkg/tofu/testdata/input-provider/main.tf new file mode 100644 index 00000000000..919f140bba6 --- /dev/null +++ b/pkg/tofu/testdata/input-provider/main.tf @@ -0,0 +1 @@ +resource "aws_instance" "foo" {} diff --git a/pkg/tofu/testdata/input-submodule-count/main.tf b/pkg/tofu/testdata/input-submodule-count/main.tf new file mode 100644 index 00000000000..723a15c6d5e --- /dev/null +++ b/pkg/tofu/testdata/input-submodule-count/main.tf @@ -0,0 +1,4 @@ +module "mod" { + source = "./mod" + instance_count = 2 +} diff --git a/pkg/tofu/testdata/input-submodule-count/mod/main.tf b/pkg/tofu/testdata/input-submodule-count/mod/main.tf new file mode 100644 index 00000000000..dd7cf3d9a84 --- /dev/null +++ b/pkg/tofu/testdata/input-submodule-count/mod/main.tf @@ -0,0 +1,11 @@ +variable "instance_count" { +} + +resource "aws_instance" "foo" { + count = "${var.instance_count}" +} + +module "submod" { + source = "./submod" + list = ["${aws_instance.foo.*.id}"] +} diff --git a/pkg/tofu/testdata/input-submodule-count/mod/submod/main.tf b/pkg/tofu/testdata/input-submodule-count/mod/submod/main.tf new file mode 100644 index 00000000000..732ce43b1ab --- /dev/null +++ b/pkg/tofu/testdata/input-submodule-count/mod/submod/main.tf @@ -0,0 +1,7 @@ +variable "list" { + type = list(string) +} + +resource "aws_instance" "bar" { + count = var.list[0] +} diff --git a/pkg/tofu/testdata/input-variables/main.tf b/pkg/tofu/testdata/input-variables/main.tf new file mode 100644 index 00000000000..9d6d49aa398 --- /dev/null +++ b/pkg/tofu/testdata/input-variables/main.tf @@ -0,0 +1,30 @@ +# Required +variable "foo" { +} + +# Optional +variable "bar" { + default = "baz" +} + +# Mapping +variable "map" { + default = { + foo = "bar" + } +} + +# Complex Object Types +variable "object_map" { + type = map(object({ + foo = string, + bar = any + })) +} + +variable "object_list" { + type = list(object({ + foo = string, + bar = any + })) +} diff --git a/pkg/tofu/testdata/insufficient-features-blocks-aliased-provider/main.tf b/pkg/tofu/testdata/insufficient-features-blocks-aliased-provider/main.tf new file mode 100644 index 00000000000..55d7bc6e44b --- /dev/null +++ b/pkg/tofu/testdata/insufficient-features-blocks-aliased-provider/main.tf @@ -0,0 +1,16 @@ +terraform { + required_providers { + test = { + source = "registry.opentofu.org/hashicorp/test" + } + } +} + +provider "test" { + alias = "test2" + test_string = "config" + features {} +} + +resource "test_object" "a" { +} \ No newline at end of file diff --git a/pkg/tofu/testdata/insufficient-features-blocks-nested_module/main.tf b/pkg/tofu/testdata/insufficient-features-blocks-nested_module/main.tf new file mode 100644 index 00000000000..8e004da2ad4 --- /dev/null +++ b/pkg/tofu/testdata/insufficient-features-blocks-nested_module/main.tf @@ -0,0 +1,11 @@ +terraform { + required_providers { + test = { + source = "registry.opentofu.org/hashicorp/test" + } + } +} + +module "nested" { + source = "./nested" +} diff --git a/pkg/tofu/testdata/insufficient-features-blocks-nested_module/nested/main.tf b/pkg/tofu/testdata/insufficient-features-blocks-nested_module/nested/main.tf new file mode 100644 index 00000000000..b70dd91d716 --- /dev/null +++ b/pkg/tofu/testdata/insufficient-features-blocks-nested_module/nested/main.tf @@ -0,0 +1,8 @@ +provider "test" { + alias = "test2" + test_string = "config" + features {} +} + +resource "test_object" "a" { +} \ No newline at end of file diff --git a/pkg/tofu/testdata/insufficient-features-blocks-no-feats/main.tf b/pkg/tofu/testdata/insufficient-features-blocks-no-feats/main.tf new file mode 100644 index 00000000000..42e12b2f123 --- /dev/null +++ b/pkg/tofu/testdata/insufficient-features-blocks-no-feats/main.tf @@ -0,0 +1,14 @@ +terraform { + required_providers { + test = { + source = "registry.opentofu.org/hashicorp/test" + } + } +} + +provider "test" { + test_string = "config" +} + +resource "test_object" "a" { +} \ No newline at end of file diff --git a/pkg/tofu/testdata/issue-33572/main.tf b/pkg/tofu/testdata/issue-33572/main.tf new file mode 100644 index 00000000000..2718b62996a --- /dev/null +++ b/pkg/tofu/testdata/issue-33572/main.tf @@ -0,0 +1,14 @@ +provider "aws" {} + +resource "aws_instance" "foo" {} + +check "aws_instance_exists" { + data "aws_data_source" "bar" { + id = "baz" + } + + assert { + condition = data.aws_data_source.bar.foo == "Hello, world!" + error_message = "incorrect value" + } +} diff --git a/pkg/tofu/testdata/issue-5254/step-0/main.tf b/pkg/tofu/testdata/issue-5254/step-0/main.tf new file mode 100644 index 00000000000..dd666eba18c --- /dev/null +++ b/pkg/tofu/testdata/issue-5254/step-0/main.tf @@ -0,0 +1,12 @@ +variable "c" { + default = 1 +} + +resource "template_file" "parent" { + count = var.c + template = "Hi" +} + +resource "template_file" "child" { + template = "${join(",", template_file.parent.*.template)} ok" +} diff --git a/pkg/tofu/testdata/issue-5254/step-1/main.tf b/pkg/tofu/testdata/issue-5254/step-1/main.tf new file mode 100644 index 00000000000..3510fe1c4b4 --- /dev/null +++ b/pkg/tofu/testdata/issue-5254/step-1/main.tf @@ -0,0 +1,13 @@ +variable "c" { + default = 1 +} + +resource "template_file" "parent" { + count = var.c + template = "Hi" +} + +resource "template_file" "child" { + template = join(",", template_file.parent.*.template) + __template_requires_new = true +} diff --git a/pkg/tofu/testdata/issue-7824/main.tf b/pkg/tofu/testdata/issue-7824/main.tf new file mode 100644 index 00000000000..ec76bc39223 --- /dev/null +++ b/pkg/tofu/testdata/issue-7824/main.tf @@ -0,0 +1,6 @@ +variable "test" { + type = map(string) + default = { + "test" = "1" + } +} \ No newline at end of file diff --git a/pkg/tofu/testdata/issue-9549/main.tf b/pkg/tofu/testdata/issue-9549/main.tf new file mode 100644 index 00000000000..5bf28c66d0c --- /dev/null +++ b/pkg/tofu/testdata/issue-9549/main.tf @@ -0,0 +1,11 @@ +module "mod" { + source = "./mod" +} + +output "out" { + value = module.mod.base_config["base_template"] +} + +resource "template_instance" "root_template" { + foo = module.mod.base_config["base_template"] +} diff --git a/pkg/tofu/testdata/issue-9549/mod/main.tf b/pkg/tofu/testdata/issue-9549/mod/main.tf new file mode 100644 index 00000000000..aedf9f003ed --- /dev/null +++ b/pkg/tofu/testdata/issue-9549/mod/main.tf @@ -0,0 +1,10 @@ +resource "template_instance" "example" { + compute_value = "template text" + compute = "value" +} + +output "base_config" { + value = { + base_template = template_instance.example.value + } +} diff --git a/pkg/tofu/testdata/nested-resource-count-plan/main.tf b/pkg/tofu/testdata/nested-resource-count-plan/main.tf new file mode 100644 index 00000000000..f803fd1f654 --- /dev/null +++ b/pkg/tofu/testdata/nested-resource-count-plan/main.tf @@ -0,0 +1,11 @@ +resource "aws_instance" "foo" { + count = 2 +} + +resource "aws_instance" "bar" { + count = "${length(aws_instance.foo.*.id)}" +} + +resource "aws_instance" "baz" { + count = "${length(aws_instance.bar.*.id)}" +} diff --git a/pkg/tofu/testdata/plan-block-nesting-group/block-nesting-group.tf b/pkg/tofu/testdata/plan-block-nesting-group/block-nesting-group.tf new file mode 100644 index 00000000000..9284072dc9c --- /dev/null +++ b/pkg/tofu/testdata/plan-block-nesting-group/block-nesting-group.tf @@ -0,0 +1,2 @@ +resource "test" "foo" { +} diff --git a/pkg/tofu/testdata/plan-cbd-depends-datasource/main.tf b/pkg/tofu/testdata/plan-cbd-depends-datasource/main.tf new file mode 100644 index 00000000000..b523204a8de --- /dev/null +++ b/pkg/tofu/testdata/plan-cbd-depends-datasource/main.tf @@ -0,0 +1,14 @@ +resource "aws_instance" "foo" { + count = 2 + num = "2" + computed = data.aws_vpc.bar[count.index].id + + lifecycle { + create_before_destroy = true + } +} + +data "aws_vpc" "bar" { + count = 2 + foo = count.index +} diff --git a/pkg/tofu/testdata/plan-cbd-maintain-root/main.tf b/pkg/tofu/testdata/plan-cbd-maintain-root/main.tf new file mode 100644 index 00000000000..99c96b9eee4 --- /dev/null +++ b/pkg/tofu/testdata/plan-cbd-maintain-root/main.tf @@ -0,0 +1,19 @@ +resource "aws_instance" "foo" { + count = "2" + + lifecycle { + create_before_destroy = true + } +} + +resource "aws_instance" "bar" { + count = "2" + + lifecycle { + create_before_destroy = true + } +} + +output "out" { + value = "${aws_instance.foo.0.id}" +} diff --git a/pkg/tofu/testdata/plan-cbd/main.tf b/pkg/tofu/testdata/plan-cbd/main.tf new file mode 100644 index 00000000000..83d173a5357 --- /dev/null +++ b/pkg/tofu/testdata/plan-cbd/main.tf @@ -0,0 +1,5 @@ +resource "aws_instance" "foo" { + lifecycle { + create_before_destroy = true + } +} diff --git a/pkg/tofu/testdata/plan-close-module-provider/main.tf b/pkg/tofu/testdata/plan-close-module-provider/main.tf new file mode 100644 index 00000000000..ba846846994 --- /dev/null +++ b/pkg/tofu/testdata/plan-close-module-provider/main.tf @@ -0,0 +1,3 @@ +module "mod" { + source = "./mod" +} diff --git a/pkg/tofu/testdata/plan-close-module-provider/mod/main.tf b/pkg/tofu/testdata/plan-close-module-provider/mod/main.tf new file mode 100644 index 00000000000..3ce1991f202 --- /dev/null +++ b/pkg/tofu/testdata/plan-close-module-provider/mod/main.tf @@ -0,0 +1,7 @@ +provider "aws" { + alias = "mod" +} + +resource "aws_instance" "bar" { + provider = "aws.mod" +} diff --git a/pkg/tofu/testdata/plan-computed-attr-ref-type-mismatch/main.tf b/pkg/tofu/testdata/plan-computed-attr-ref-type-mismatch/main.tf new file mode 100644 index 00000000000..41761b2d5db --- /dev/null +++ b/pkg/tofu/testdata/plan-computed-attr-ref-type-mismatch/main.tf @@ -0,0 +1,10 @@ +resource "aws_ami_list" "foo" { + # assume this has a computed attr called "ids" +} + +resource "aws_instance" "foo" { + # this is erroneously referencing the list of all ids. The value of this + # is unknown during plan, but we should still know that the unknown value + # is a list of strings and so catch this during plan. + ami = "${aws_ami_list.foo.ids}" +} diff --git a/pkg/tofu/testdata/plan-computed-data-count/main.tf b/pkg/tofu/testdata/plan-computed-data-count/main.tf new file mode 100644 index 00000000000..2d014045271 --- /dev/null +++ b/pkg/tofu/testdata/plan-computed-data-count/main.tf @@ -0,0 +1,9 @@ +resource "aws_instance" "foo" { + num = "2" + compute = "foo" +} + +data "aws_vpc" "bar" { + count = 3 + foo = "${aws_instance.foo.foo}" +} diff --git a/pkg/tofu/testdata/plan-computed-data-resource/main.tf b/pkg/tofu/testdata/plan-computed-data-resource/main.tf new file mode 100644 index 00000000000..aff26ebde5e --- /dev/null +++ b/pkg/tofu/testdata/plan-computed-data-resource/main.tf @@ -0,0 +1,8 @@ +resource "aws_instance" "foo" { + num = "2" + compute = "foo" +} + +data "aws_vpc" "bar" { + foo = "${aws_instance.foo.foo}" +} diff --git a/pkg/tofu/testdata/plan-computed-in-function/main.tf b/pkg/tofu/testdata/plan-computed-in-function/main.tf new file mode 100644 index 00000000000..554394de6aa --- /dev/null +++ b/pkg/tofu/testdata/plan-computed-in-function/main.tf @@ -0,0 +1,7 @@ +data "aws_data_source" "foo" { + +} + +resource "aws_instance" "bar" { + attr = "${length(data.aws_data_source.foo.computed)}" +} diff --git a/pkg/tofu/testdata/plan-computed-list/main.tf b/pkg/tofu/testdata/plan-computed-list/main.tf new file mode 100644 index 00000000000..aeec6ba9350 --- /dev/null +++ b/pkg/tofu/testdata/plan-computed-list/main.tf @@ -0,0 +1,8 @@ +resource "aws_instance" "foo" { + num = "2" + compute = "list.#" +} + +resource "aws_instance" "bar" { + foo = aws_instance.foo.list.0 +} diff --git a/pkg/tofu/testdata/plan-computed-multi-index/main.tf b/pkg/tofu/testdata/plan-computed-multi-index/main.tf new file mode 100644 index 00000000000..2d8a799d058 --- /dev/null +++ b/pkg/tofu/testdata/plan-computed-multi-index/main.tf @@ -0,0 +1,9 @@ +resource "aws_instance" "foo" { + count = 2 + compute = "ip.#" +} + +resource "aws_instance" "bar" { + count = 1 + foo = "${aws_instance.foo.*.ip[count.index]}" +} diff --git a/pkg/tofu/testdata/plan-computed-value-in-map/main.tf b/pkg/tofu/testdata/plan-computed-value-in-map/main.tf new file mode 100644 index 00000000000..ef2cf08099a --- /dev/null +++ b/pkg/tofu/testdata/plan-computed-value-in-map/main.tf @@ -0,0 +1,16 @@ +resource "aws_computed_source" "intermediates" {} + +module "test_mod" { + source = "./mod" + + services = [ + { + "exists" = "true" + "elb" = "${aws_computed_source.intermediates.computed_read_only}" + }, + { + "otherexists" = " true" + "elb" = "${aws_computed_source.intermediates.computed_read_only}" + }, + ] +} diff --git a/pkg/tofu/testdata/plan-computed-value-in-map/mod/main.tf b/pkg/tofu/testdata/plan-computed-value-in-map/mod/main.tf new file mode 100644 index 00000000000..f6adccf40da --- /dev/null +++ b/pkg/tofu/testdata/plan-computed-value-in-map/mod/main.tf @@ -0,0 +1,8 @@ +variable "services" { + type = list(map(string)) +} + +resource "aws_instance" "inner2" { + looked_up = var.services[0]["elb"] +} + diff --git a/pkg/tofu/testdata/plan-computed/main.tf b/pkg/tofu/testdata/plan-computed/main.tf new file mode 100644 index 00000000000..71809138b12 --- /dev/null +++ b/pkg/tofu/testdata/plan-computed/main.tf @@ -0,0 +1,8 @@ +resource "aws_instance" "foo" { + num = "2" + compute = "foo" +} + +resource "aws_instance" "bar" { + foo = "${aws_instance.foo.foo}" +} diff --git a/pkg/tofu/testdata/plan-count-computed-module/child/main.tf b/pkg/tofu/testdata/plan-count-computed-module/child/main.tf new file mode 100644 index 00000000000..f80d699d9c3 --- /dev/null +++ b/pkg/tofu/testdata/plan-count-computed-module/child/main.tf @@ -0,0 +1,5 @@ +variable "value" {} + +resource "aws_instance" "bar" { + count = "${var.value}" +} diff --git a/pkg/tofu/testdata/plan-count-computed-module/main.tf b/pkg/tofu/testdata/plan-count-computed-module/main.tf new file mode 100644 index 00000000000..c87beb5f896 --- /dev/null +++ b/pkg/tofu/testdata/plan-count-computed-module/main.tf @@ -0,0 +1,8 @@ +resource "aws_instance" "foo" { + compute = "foo" +} + +module "child" { + source = "./child" + value = "${aws_instance.foo.foo}" +} diff --git a/pkg/tofu/testdata/plan-count-computed/main.tf b/pkg/tofu/testdata/plan-count-computed/main.tf new file mode 100644 index 00000000000..8a029236b1e --- /dev/null +++ b/pkg/tofu/testdata/plan-count-computed/main.tf @@ -0,0 +1,8 @@ +resource "aws_instance" "foo" { + num = "2" + compute = "foo" +} + +resource "aws_instance" "bar" { + count = "${aws_instance.foo.foo}" +} diff --git a/pkg/tofu/testdata/plan-count-dec/main.tf b/pkg/tofu/testdata/plan-count-dec/main.tf new file mode 100644 index 00000000000..7837f58655f --- /dev/null +++ b/pkg/tofu/testdata/plan-count-dec/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "foo" { + foo = "foo" +} + +resource "aws_instance" "bar" { + foo = "bar" +} diff --git a/pkg/tofu/testdata/plan-count-inc/main.tf b/pkg/tofu/testdata/plan-count-inc/main.tf new file mode 100644 index 00000000000..3c7fdb9fff7 --- /dev/null +++ b/pkg/tofu/testdata/plan-count-inc/main.tf @@ -0,0 +1,8 @@ +resource "aws_instance" "foo" { + foo = "foo" + count = 3 +} + +resource "aws_instance" "bar" { + foo = "bar" +} diff --git a/pkg/tofu/testdata/plan-count-index/main.tf b/pkg/tofu/testdata/plan-count-index/main.tf new file mode 100644 index 00000000000..9a0d1ebbcc2 --- /dev/null +++ b/pkg/tofu/testdata/plan-count-index/main.tf @@ -0,0 +1,4 @@ +resource "aws_instance" "foo" { + count = 2 + foo = "${count.index}" +} diff --git a/pkg/tofu/testdata/plan-count-module-static-grandchild/child/child/main.tf b/pkg/tofu/testdata/plan-count-module-static-grandchild/child/child/main.tf new file mode 100644 index 00000000000..5b75831fdc1 --- /dev/null +++ b/pkg/tofu/testdata/plan-count-module-static-grandchild/child/child/main.tf @@ -0,0 +1,5 @@ +variable "value" {} + +resource "aws_instance" "foo" { + count = "${var.value}" +} diff --git a/pkg/tofu/testdata/plan-count-module-static-grandchild/child/main.tf b/pkg/tofu/testdata/plan-count-module-static-grandchild/child/main.tf new file mode 100644 index 00000000000..4dff927d51e --- /dev/null +++ b/pkg/tofu/testdata/plan-count-module-static-grandchild/child/main.tf @@ -0,0 +1,6 @@ +variable "value" {} + +module "child" { + source = "./child" + value = "${var.value}" +} diff --git a/pkg/tofu/testdata/plan-count-module-static-grandchild/main.tf b/pkg/tofu/testdata/plan-count-module-static-grandchild/main.tf new file mode 100644 index 00000000000..b2c7ca66e7a --- /dev/null +++ b/pkg/tofu/testdata/plan-count-module-static-grandchild/main.tf @@ -0,0 +1,8 @@ +variable "foo" { + default = "3" +} + +module "child" { + source = "./child" + value = "${var.foo}" +} diff --git a/pkg/tofu/testdata/plan-count-module-static/child/main.tf b/pkg/tofu/testdata/plan-count-module-static/child/main.tf new file mode 100644 index 00000000000..5b75831fdc1 --- /dev/null +++ b/pkg/tofu/testdata/plan-count-module-static/child/main.tf @@ -0,0 +1,5 @@ +variable "value" {} + +resource "aws_instance" "foo" { + count = "${var.value}" +} diff --git a/pkg/tofu/testdata/plan-count-module-static/main.tf b/pkg/tofu/testdata/plan-count-module-static/main.tf new file mode 100644 index 00000000000..b2c7ca66e7a --- /dev/null +++ b/pkg/tofu/testdata/plan-count-module-static/main.tf @@ -0,0 +1,8 @@ +variable "foo" { + default = "3" +} + +module "child" { + source = "./child" + value = "${var.foo}" +} diff --git a/pkg/tofu/testdata/plan-count-one-index/main.tf b/pkg/tofu/testdata/plan-count-one-index/main.tf new file mode 100644 index 00000000000..58d4acf7113 --- /dev/null +++ b/pkg/tofu/testdata/plan-count-one-index/main.tf @@ -0,0 +1,8 @@ +resource "aws_instance" "foo" { + count = 1 + foo = "foo" +} + +resource "aws_instance" "bar" { + foo = "${aws_instance.foo.0.foo}" +} diff --git a/pkg/tofu/testdata/plan-count-splat-reference/main.tf b/pkg/tofu/testdata/plan-count-splat-reference/main.tf new file mode 100644 index 00000000000..76834e2555c --- /dev/null +++ b/pkg/tofu/testdata/plan-count-splat-reference/main.tf @@ -0,0 +1,9 @@ +resource "aws_instance" "foo" { + name = "foo ${count.index}" + count = 3 +} + +resource "aws_instance" "bar" { + foo_name = "${aws_instance.foo.*.name[count.index]}" + count = 3 +} diff --git a/pkg/tofu/testdata/plan-count-var/main.tf b/pkg/tofu/testdata/plan-count-var/main.tf new file mode 100644 index 00000000000..8b8a04333e3 --- /dev/null +++ b/pkg/tofu/testdata/plan-count-var/main.tf @@ -0,0 +1,10 @@ +variable "instance_count" {} + +resource "aws_instance" "foo" { + count = var.instance_count + foo = "foo" +} + +resource "aws_instance" "bar" { + foo = join(",", aws_instance.foo.*.foo) +} diff --git a/pkg/tofu/testdata/plan-count-zero/main.tf b/pkg/tofu/testdata/plan-count-zero/main.tf new file mode 100644 index 00000000000..4845cbb0bf2 --- /dev/null +++ b/pkg/tofu/testdata/plan-count-zero/main.tf @@ -0,0 +1,8 @@ +resource "aws_instance" "foo" { + count = 0 + foo = "foo" +} + +resource "aws_instance" "bar" { + foo = "${aws_instance.foo.*.foo}" +} diff --git a/pkg/tofu/testdata/plan-count/main.tf b/pkg/tofu/testdata/plan-count/main.tf new file mode 100644 index 00000000000..276670ce447 --- /dev/null +++ b/pkg/tofu/testdata/plan-count/main.tf @@ -0,0 +1,8 @@ +resource "aws_instance" "foo" { + count = 5 + foo = "foo" +} + +resource "aws_instance" "bar" { + foo = "${join(",", aws_instance.foo.*.foo)}" +} diff --git a/pkg/tofu/testdata/plan-data-depends-on/main.tf b/pkg/tofu/testdata/plan-data-depends-on/main.tf new file mode 100644 index 00000000000..c7332ad291e --- /dev/null +++ b/pkg/tofu/testdata/plan-data-depends-on/main.tf @@ -0,0 +1,14 @@ +resource "test_resource" "a" { +} + +data "test_data" "d" { + count = 1 + depends_on = [ + test_resource.a + ] +} + +resource "test_resource" "b" { + count = 1 + foo = data.test_data.d[count.index].compute +} diff --git a/pkg/tofu/testdata/plan-data-resource-becomes-computed/main.tf b/pkg/tofu/testdata/plan-data-resource-becomes-computed/main.tf new file mode 100644 index 00000000000..3f07be3522b --- /dev/null +++ b/pkg/tofu/testdata/plan-data-resource-becomes-computed/main.tf @@ -0,0 +1,6 @@ +resource "aws_instance" "foo" { +} + +data "aws_data_source" "foo" { + foo = "${aws_instance.foo.computed}" +} diff --git a/pkg/tofu/testdata/plan-destroy-interpolated-count/main.tf b/pkg/tofu/testdata/plan-destroy-interpolated-count/main.tf new file mode 100644 index 00000000000..ac0dadbf81f --- /dev/null +++ b/pkg/tofu/testdata/plan-destroy-interpolated-count/main.tf @@ -0,0 +1,20 @@ +variable "list" { + default = ["1", "2"] +} + +resource "aws_instance" "a" { + count = length(var.list) +} + +locals { + ids = aws_instance.a[*].id +} + +module "empty" { + source = "./mod" + input = zipmap(var.list, local.ids) +} + +output "out" { + value = aws_instance.a[*].id +} diff --git a/pkg/tofu/testdata/plan-destroy-interpolated-count/mod/main.tf b/pkg/tofu/testdata/plan-destroy-interpolated-count/mod/main.tf new file mode 100644 index 00000000000..682e0f0db76 --- /dev/null +++ b/pkg/tofu/testdata/plan-destroy-interpolated-count/mod/main.tf @@ -0,0 +1,2 @@ +variable "input" { +} diff --git a/pkg/tofu/testdata/plan-destroy/main.tf b/pkg/tofu/testdata/plan-destroy/main.tf new file mode 100644 index 00000000000..1b6cdae67b0 --- /dev/null +++ b/pkg/tofu/testdata/plan-destroy/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "foo" { + num = "2" +} + +resource "aws_instance" "bar" { + foo = "${aws_instance.foo.num}" +} diff --git a/pkg/tofu/testdata/plan-diffvar/main.tf b/pkg/tofu/testdata/plan-diffvar/main.tf new file mode 100644 index 00000000000..eccc16ff2c3 --- /dev/null +++ b/pkg/tofu/testdata/plan-diffvar/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "foo" { + num = "3" +} + +resource "aws_instance" "bar" { + num = aws_instance.foo.num +} diff --git a/pkg/tofu/testdata/plan-empty/main.tf b/pkg/tofu/testdata/plan-empty/main.tf new file mode 100644 index 00000000000..88002d078a1 --- /dev/null +++ b/pkg/tofu/testdata/plan-empty/main.tf @@ -0,0 +1,5 @@ +resource "aws_instance" "foo" { +} + +resource "aws_instance" "bar" { +} diff --git a/pkg/tofu/testdata/plan-escaped-var/main.tf b/pkg/tofu/testdata/plan-escaped-var/main.tf new file mode 100644 index 00000000000..5a017207ccf --- /dev/null +++ b/pkg/tofu/testdata/plan-escaped-var/main.tf @@ -0,0 +1,3 @@ +resource "aws_instance" "foo" { + foo = "bar-$${baz}" +} diff --git a/pkg/tofu/testdata/plan-for-each-unknown-value/main.tf b/pkg/tofu/testdata/plan-for-each-unknown-value/main.tf new file mode 100644 index 00000000000..933ed5f4c32 --- /dev/null +++ b/pkg/tofu/testdata/plan-for-each-unknown-value/main.tf @@ -0,0 +1,20 @@ +# expressions with variable reference +variable "foo" { + type = string +} + +resource "aws_instance" "foo" { + for_each = toset( + [for i in range(0,3) : sha1("${i}${var.foo}")] + ) + foo = "foo" +} + +# referencing another resource, which means it has some unknown values in it +resource "aws_instance" "one" { + for_each = toset(["a", "b"]) +} + +resource "aws_instance" "two" { + for_each = aws_instance.one +} diff --git a/pkg/tofu/testdata/plan-for-each/main.tf b/pkg/tofu/testdata/plan-for-each/main.tf new file mode 100644 index 00000000000..94572e20a47 --- /dev/null +++ b/pkg/tofu/testdata/plan-for-each/main.tf @@ -0,0 +1,35 @@ +# maps +resource "aws_instance" "foo" { + for_each = { + a = "thing" + b = "another thing" + c = "yet another thing" + } + num = "3" +} + +# sets +resource "aws_instance" "bar" { + for_each = toset([]) +} +resource "aws_instance" "bar2" { + for_each = toset(["z", "y", "x"]) +} + +# an empty map should generate no resource +resource "aws_instance" "baz" { + for_each = {} +} + +# references +resource "aws_instance" "boo" { + foo = aws_instance.foo["a"].num +} + +resource "aws_instance" "bat" { + for_each = { + my_key = aws_instance.boo.foo + } + foo = each.value +} + diff --git a/pkg/tofu/testdata/plan-good/main.tf b/pkg/tofu/testdata/plan-good/main.tf new file mode 100644 index 00000000000..1b6cdae67b0 --- /dev/null +++ b/pkg/tofu/testdata/plan-good/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "foo" { + num = "2" +} + +resource "aws_instance" "bar" { + foo = "${aws_instance.foo.num}" +} diff --git a/pkg/tofu/testdata/plan-ignore-changes-in-map/ignore-changes-in-map.tf b/pkg/tofu/testdata/plan-ignore-changes-in-map/ignore-changes-in-map.tf new file mode 100644 index 00000000000..75adcac5c3d --- /dev/null +++ b/pkg/tofu/testdata/plan-ignore-changes-in-map/ignore-changes-in-map.tf @@ -0,0 +1,13 @@ + +resource "test_ignore_changes_map" "foo" { + tags = { + ignored = "from config" + other = "from config" + } + + lifecycle { + ignore_changes = [ + tags["ignored"], + ] + } +} diff --git a/pkg/tofu/testdata/plan-ignore-changes-sensitive/ignore-changes-sensitive.tf b/pkg/tofu/testdata/plan-ignore-changes-sensitive/ignore-changes-sensitive.tf new file mode 100644 index 00000000000..1f6cc98aced --- /dev/null +++ b/pkg/tofu/testdata/plan-ignore-changes-sensitive/ignore-changes-sensitive.tf @@ -0,0 +1,11 @@ +variable "foo" { + sensitive = true +} + +resource "aws_instance" "foo" { + ami = var.foo + + lifecycle { + ignore_changes = [ami] + } +} diff --git a/pkg/tofu/testdata/plan-ignore-changes-wildcard/main.tf b/pkg/tofu/testdata/plan-ignore-changes-wildcard/main.tf new file mode 100644 index 00000000000..ac594a9eb84 --- /dev/null +++ b/pkg/tofu/testdata/plan-ignore-changes-wildcard/main.tf @@ -0,0 +1,13 @@ +variable "foo" {} + +variable "bar" {} + +resource "aws_instance" "foo" { + ami = "${var.foo}" + instance = "${var.bar}" + foo = "bar" + + lifecycle { + ignore_changes = all + } +} diff --git a/pkg/tofu/testdata/plan-ignore-changes-with-flatmaps/main.tf b/pkg/tofu/testdata/plan-ignore-changes-with-flatmaps/main.tf new file mode 100644 index 00000000000..f61a3d42fc4 --- /dev/null +++ b/pkg/tofu/testdata/plan-ignore-changes-with-flatmaps/main.tf @@ -0,0 +1,15 @@ +resource "aws_instance" "foo" { + user_data = "x" + require_new = "yes" + + set = [{ + a = "1" + b = "2" + }] + + lst = ["j", "k"] + + lifecycle { + ignore_changes = ["require_new"] + } +} diff --git a/pkg/tofu/testdata/plan-ignore-changes/main.tf b/pkg/tofu/testdata/plan-ignore-changes/main.tf new file mode 100644 index 00000000000..ed17c634497 --- /dev/null +++ b/pkg/tofu/testdata/plan-ignore-changes/main.tf @@ -0,0 +1,9 @@ +variable "foo" {} + +resource "aws_instance" "foo" { + ami = var.foo + + lifecycle { + ignore_changes = [ami] + } +} diff --git a/pkg/tofu/testdata/plan-list-order/main.tf b/pkg/tofu/testdata/plan-list-order/main.tf new file mode 100644 index 00000000000..77db3d0597e --- /dev/null +++ b/pkg/tofu/testdata/plan-list-order/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "a" { + foo = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 20] +} + +resource "aws_instance" "b" { + foo = "${aws_instance.a.foo}" +} diff --git a/pkg/tofu/testdata/plan-local-value-count/main.tf b/pkg/tofu/testdata/plan-local-value-count/main.tf new file mode 100644 index 00000000000..34aad96ad65 --- /dev/null +++ b/pkg/tofu/testdata/plan-local-value-count/main.tf @@ -0,0 +1,8 @@ + +locals { + count = 3 +} + +resource "test_resource" "foo" { + count = "${local.count}" +} diff --git a/pkg/tofu/testdata/plan-module-cycle/child/main.tf b/pkg/tofu/testdata/plan-module-cycle/child/main.tf new file mode 100644 index 00000000000..e2e60c1f086 --- /dev/null +++ b/pkg/tofu/testdata/plan-module-cycle/child/main.tf @@ -0,0 +1,5 @@ +variable "in" {} + +output "out" { + value = "${var.in}" +} diff --git a/pkg/tofu/testdata/plan-module-cycle/main.tf b/pkg/tofu/testdata/plan-module-cycle/main.tf new file mode 100644 index 00000000000..e9c459721f5 --- /dev/null +++ b/pkg/tofu/testdata/plan-module-cycle/main.tf @@ -0,0 +1,12 @@ +module "a" { + source = "./child" + in = "${aws_instance.b.id}" +} + +resource "aws_instance" "b" {} + +resource "aws_instance" "c" { + some_input = "${module.a.out}" + + depends_on = ["aws_instance.b"] +} diff --git a/pkg/tofu/testdata/plan-module-deadlock/child/main.tf b/pkg/tofu/testdata/plan-module-deadlock/child/main.tf new file mode 100644 index 00000000000..2451bf0542f --- /dev/null +++ b/pkg/tofu/testdata/plan-module-deadlock/child/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "foo" { + count = "${length("abc")}" + + lifecycle { + create_before_destroy = true + } +} diff --git a/pkg/tofu/testdata/plan-module-deadlock/main.tf b/pkg/tofu/testdata/plan-module-deadlock/main.tf new file mode 100644 index 00000000000..1f95749fa7e --- /dev/null +++ b/pkg/tofu/testdata/plan-module-deadlock/main.tf @@ -0,0 +1,3 @@ +module "child" { + source = "./child" +} diff --git a/pkg/tofu/testdata/plan-module-destroy-gh-1835/a/main.tf b/pkg/tofu/testdata/plan-module-destroy-gh-1835/a/main.tf new file mode 100644 index 00000000000..ca44c757d01 --- /dev/null +++ b/pkg/tofu/testdata/plan-module-destroy-gh-1835/a/main.tf @@ -0,0 +1,5 @@ +resource "aws_instance" "a" {} + +output "a_output" { + value = "${aws_instance.a.id}" +} diff --git a/pkg/tofu/testdata/plan-module-destroy-gh-1835/b/main.tf b/pkg/tofu/testdata/plan-module-destroy-gh-1835/b/main.tf new file mode 100644 index 00000000000..3b0cc666450 --- /dev/null +++ b/pkg/tofu/testdata/plan-module-destroy-gh-1835/b/main.tf @@ -0,0 +1,5 @@ +variable "a_id" {} + +resource "aws_instance" "b" { + foo = "echo ${var.a_id}" +} diff --git a/pkg/tofu/testdata/plan-module-destroy-gh-1835/main.tf b/pkg/tofu/testdata/plan-module-destroy-gh-1835/main.tf new file mode 100644 index 00000000000..c2f72c45e32 --- /dev/null +++ b/pkg/tofu/testdata/plan-module-destroy-gh-1835/main.tf @@ -0,0 +1,8 @@ +module "a_module" { + source = "./a" +} + +module "b_module" { + source = "./b" + a_id = "${module.a_module.a_output}" +} diff --git a/pkg/tofu/testdata/plan-module-destroy-multivar/child/main.tf b/pkg/tofu/testdata/plan-module-destroy-multivar/child/main.tf new file mode 100644 index 00000000000..6a496f06f6a --- /dev/null +++ b/pkg/tofu/testdata/plan-module-destroy-multivar/child/main.tf @@ -0,0 +1,8 @@ +variable "instance_count" { + default = "1" +} + +resource "aws_instance" "foo" { + count = "${var.instance_count}" + bar = "bar" +} diff --git a/pkg/tofu/testdata/plan-module-destroy-multivar/main.tf b/pkg/tofu/testdata/plan-module-destroy-multivar/main.tf new file mode 100644 index 00000000000..2f965b68cc1 --- /dev/null +++ b/pkg/tofu/testdata/plan-module-destroy-multivar/main.tf @@ -0,0 +1,4 @@ +module "child" { + source = "./child" + instance_count = "2" +} diff --git a/pkg/tofu/testdata/plan-module-destroy/child/main.tf b/pkg/tofu/testdata/plan-module-destroy/child/main.tf new file mode 100644 index 00000000000..98f5ee87e9f --- /dev/null +++ b/pkg/tofu/testdata/plan-module-destroy/child/main.tf @@ -0,0 +1,3 @@ +resource "aws_instance" "foo" { + num = "2" +} diff --git a/pkg/tofu/testdata/plan-module-destroy/main.tf b/pkg/tofu/testdata/plan-module-destroy/main.tf new file mode 100644 index 00000000000..428f89834db --- /dev/null +++ b/pkg/tofu/testdata/plan-module-destroy/main.tf @@ -0,0 +1,7 @@ +module "child" { + source = "./child" +} + +resource "aws_instance" "foo" { + num = "2" +} diff --git a/pkg/tofu/testdata/plan-module-input-computed/child/main.tf b/pkg/tofu/testdata/plan-module-input-computed/child/main.tf new file mode 100644 index 00000000000..c1a00c5a326 --- /dev/null +++ b/pkg/tofu/testdata/plan-module-input-computed/child/main.tf @@ -0,0 +1,5 @@ +variable "input" {} + +resource "aws_instance" "foo" { + foo = "${var.input}" +} diff --git a/pkg/tofu/testdata/plan-module-input-computed/main.tf b/pkg/tofu/testdata/plan-module-input-computed/main.tf new file mode 100644 index 00000000000..3a0576434fb --- /dev/null +++ b/pkg/tofu/testdata/plan-module-input-computed/main.tf @@ -0,0 +1,8 @@ +module "child" { + input = "${aws_instance.bar.foo}" + source = "./child" +} + +resource "aws_instance" "bar" { + compute = "foo" +} diff --git a/pkg/tofu/testdata/plan-module-input-var/child/main.tf b/pkg/tofu/testdata/plan-module-input-var/child/main.tf new file mode 100644 index 00000000000..c1a00c5a326 --- /dev/null +++ b/pkg/tofu/testdata/plan-module-input-var/child/main.tf @@ -0,0 +1,5 @@ +variable "input" {} + +resource "aws_instance" "foo" { + foo = "${var.input}" +} diff --git a/pkg/tofu/testdata/plan-module-input-var/main.tf b/pkg/tofu/testdata/plan-module-input-var/main.tf new file mode 100644 index 00000000000..3fba315ee2f --- /dev/null +++ b/pkg/tofu/testdata/plan-module-input-var/main.tf @@ -0,0 +1,10 @@ +variable "foo" {} + +module "child" { + input = "${var.foo}" + source = "./child" +} + +resource "aws_instance" "bar" { + foo = "2" +} diff --git a/pkg/tofu/testdata/plan-module-input/child/main.tf b/pkg/tofu/testdata/plan-module-input/child/main.tf new file mode 100644 index 00000000000..c1a00c5a326 --- /dev/null +++ b/pkg/tofu/testdata/plan-module-input/child/main.tf @@ -0,0 +1,5 @@ +variable "input" {} + +resource "aws_instance" "foo" { + foo = "${var.input}" +} diff --git a/pkg/tofu/testdata/plan-module-input/main.tf b/pkg/tofu/testdata/plan-module-input/main.tf new file mode 100644 index 00000000000..2ad8ec0ca10 --- /dev/null +++ b/pkg/tofu/testdata/plan-module-input/main.tf @@ -0,0 +1,8 @@ +module "child" { + input = "42" + source = "./child" +} + +resource "aws_instance" "bar" { + foo = "2" +} diff --git a/pkg/tofu/testdata/plan-module-map-literal/child/main.tf b/pkg/tofu/testdata/plan-module-map-literal/child/main.tf new file mode 100644 index 00000000000..912431922a7 --- /dev/null +++ b/pkg/tofu/testdata/plan-module-map-literal/child/main.tf @@ -0,0 +1,12 @@ +variable "amap" { + type = map(string) +} + +variable "othermap" { + type = map(string) +} + +resource "aws_instance" "foo" { + tags = "${var.amap}" + meta = "${var.othermap}" +} diff --git a/pkg/tofu/testdata/plan-module-map-literal/main.tf b/pkg/tofu/testdata/plan-module-map-literal/main.tf new file mode 100644 index 00000000000..90235ed7a2f --- /dev/null +++ b/pkg/tofu/testdata/plan-module-map-literal/main.tf @@ -0,0 +1,9 @@ +module "child" { + source = "./child" + + amap = { + foo = "bar" + } + + othermap = {} +} diff --git a/pkg/tofu/testdata/plan-module-multi-var/child/main.tf b/pkg/tofu/testdata/plan-module-multi-var/child/main.tf new file mode 100644 index 00000000000..ad8dd6073e5 --- /dev/null +++ b/pkg/tofu/testdata/plan-module-multi-var/child/main.tf @@ -0,0 +1,10 @@ +variable "things" {} + +resource "aws_instance" "bar" { + baz = "baz" + count = 2 +} + +resource "aws_instance" "foo" { + foo = "${join(",",aws_instance.bar.*.baz)}" +} diff --git a/pkg/tofu/testdata/plan-module-multi-var/main.tf b/pkg/tofu/testdata/plan-module-multi-var/main.tf new file mode 100644 index 00000000000..40c7618fe09 --- /dev/null +++ b/pkg/tofu/testdata/plan-module-multi-var/main.tf @@ -0,0 +1,9 @@ +resource "aws_instance" "parent" { + count = 2 +} + +module "child" { + source = "./child" + things = "${join(",", aws_instance.parent.*.id)}" +} + diff --git a/pkg/tofu/testdata/plan-module-provider-defaults-var/child/main.tf b/pkg/tofu/testdata/plan-module-provider-defaults-var/child/main.tf new file mode 100644 index 00000000000..5ce4f55fe84 --- /dev/null +++ b/pkg/tofu/testdata/plan-module-provider-defaults-var/child/main.tf @@ -0,0 +1,8 @@ +provider "aws" { + from = "child" + to = "child" +} + +resource "aws_instance" "foo" { + from = "child" +} diff --git a/pkg/tofu/testdata/plan-module-provider-defaults-var/main.tf b/pkg/tofu/testdata/plan-module-provider-defaults-var/main.tf new file mode 100644 index 00000000000..d3c34908bd1 --- /dev/null +++ b/pkg/tofu/testdata/plan-module-provider-defaults-var/main.tf @@ -0,0 +1,11 @@ +module "child" { + source = "./child" +} + +provider "aws" { + from = "${var.foo}" +} + +resource "aws_instance" "foo" {} + +variable "foo" {} diff --git a/pkg/tofu/testdata/plan-module-provider-defaults/child/main.tf b/pkg/tofu/testdata/plan-module-provider-defaults/child/main.tf new file mode 100644 index 00000000000..5ce4f55fe84 --- /dev/null +++ b/pkg/tofu/testdata/plan-module-provider-defaults/child/main.tf @@ -0,0 +1,8 @@ +provider "aws" { + from = "child" + to = "child" +} + +resource "aws_instance" "foo" { + from = "child" +} diff --git a/pkg/tofu/testdata/plan-module-provider-defaults/main.tf b/pkg/tofu/testdata/plan-module-provider-defaults/main.tf new file mode 100644 index 00000000000..5b08577c6e4 --- /dev/null +++ b/pkg/tofu/testdata/plan-module-provider-defaults/main.tf @@ -0,0 +1,11 @@ +module "child" { + source = "./child" +} + +provider "aws" { + from = "root" +} + +resource "aws_instance" "foo" { + from = "root" +} diff --git a/pkg/tofu/testdata/plan-module-provider-inherit-deep/A/main.tf b/pkg/tofu/testdata/plan-module-provider-inherit-deep/A/main.tf new file mode 100644 index 00000000000..efe683c318e --- /dev/null +++ b/pkg/tofu/testdata/plan-module-provider-inherit-deep/A/main.tf @@ -0,0 +1,3 @@ +module "B" { + source = "../B" +} diff --git a/pkg/tofu/testdata/plan-module-provider-inherit-deep/B/main.tf b/pkg/tofu/testdata/plan-module-provider-inherit-deep/B/main.tf new file mode 100644 index 00000000000..29cba7fc3b0 --- /dev/null +++ b/pkg/tofu/testdata/plan-module-provider-inherit-deep/B/main.tf @@ -0,0 +1,3 @@ +module "C" { + source = "../C" +} diff --git a/pkg/tofu/testdata/plan-module-provider-inherit-deep/C/main.tf b/pkg/tofu/testdata/plan-module-provider-inherit-deep/C/main.tf new file mode 100644 index 00000000000..919f140bba6 --- /dev/null +++ b/pkg/tofu/testdata/plan-module-provider-inherit-deep/C/main.tf @@ -0,0 +1 @@ +resource "aws_instance" "foo" {} diff --git a/pkg/tofu/testdata/plan-module-provider-inherit-deep/main.tf b/pkg/tofu/testdata/plan-module-provider-inherit-deep/main.tf new file mode 100644 index 00000000000..12677b69b22 --- /dev/null +++ b/pkg/tofu/testdata/plan-module-provider-inherit-deep/main.tf @@ -0,0 +1,7 @@ +module "A" { + source = "./A" +} + +provider "aws" { + from = "root" +} diff --git a/pkg/tofu/testdata/plan-module-provider-inherit/child/main.tf b/pkg/tofu/testdata/plan-module-provider-inherit/child/main.tf new file mode 100644 index 00000000000..2e890bbc09c --- /dev/null +++ b/pkg/tofu/testdata/plan-module-provider-inherit/child/main.tf @@ -0,0 +1,3 @@ +resource "aws_instance" "foo" { + from = "child" +} diff --git a/pkg/tofu/testdata/plan-module-provider-inherit/main.tf b/pkg/tofu/testdata/plan-module-provider-inherit/main.tf new file mode 100644 index 00000000000..5b08577c6e4 --- /dev/null +++ b/pkg/tofu/testdata/plan-module-provider-inherit/main.tf @@ -0,0 +1,11 @@ +module "child" { + source = "./child" +} + +provider "aws" { + from = "root" +} + +resource "aws_instance" "foo" { + from = "root" +} diff --git a/pkg/tofu/testdata/plan-module-provider-var/child/main.tf b/pkg/tofu/testdata/plan-module-provider-var/child/main.tf new file mode 100644 index 00000000000..599cb99db5b --- /dev/null +++ b/pkg/tofu/testdata/plan-module-provider-var/child/main.tf @@ -0,0 +1,9 @@ +variable "foo" {} + +provider "aws" { + value = "${var.foo}" +} + +resource "aws_instance" "test" { + value = "hello" +} diff --git a/pkg/tofu/testdata/plan-module-provider-var/main.tf b/pkg/tofu/testdata/plan-module-provider-var/main.tf new file mode 100644 index 00000000000..43675f913c4 --- /dev/null +++ b/pkg/tofu/testdata/plan-module-provider-var/main.tf @@ -0,0 +1,8 @@ +variable "foo" { + default = "bar" +} + +module "child" { + source = "./child" + foo = "${var.foo}" +} diff --git a/pkg/tofu/testdata/plan-module-var-computed/child/main.tf b/pkg/tofu/testdata/plan-module-var-computed/child/main.tf new file mode 100644 index 00000000000..20a301330bc --- /dev/null +++ b/pkg/tofu/testdata/plan-module-var-computed/child/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "foo" { + compute = "foo" +} + +output "num" { + value = "${aws_instance.foo.foo}" +} diff --git a/pkg/tofu/testdata/plan-module-var-computed/main.tf b/pkg/tofu/testdata/plan-module-var-computed/main.tf new file mode 100644 index 00000000000..b38f538a237 --- /dev/null +++ b/pkg/tofu/testdata/plan-module-var-computed/main.tf @@ -0,0 +1,7 @@ +module "child" { + source = "./child" +} + +resource "aws_instance" "bar" { + foo = "${module.child.num}" +} diff --git a/pkg/tofu/testdata/plan-module-var-with-default-value/inner/main.tf b/pkg/tofu/testdata/plan-module-var-with-default-value/inner/main.tf new file mode 100644 index 00000000000..5b5cf6cdfc5 --- /dev/null +++ b/pkg/tofu/testdata/plan-module-var-with-default-value/inner/main.tf @@ -0,0 +1,12 @@ +variable "im_a_string" { + type = string +} + +variable "service_region_ami" { + type = map(string) + default = { + us-east-1 = "ami-e4c9db8e" + } +} + +resource "null_resource" "noop" {} diff --git a/pkg/tofu/testdata/plan-module-var-with-default-value/main.tf b/pkg/tofu/testdata/plan-module-var-with-default-value/main.tf new file mode 100644 index 00000000000..96b27418a03 --- /dev/null +++ b/pkg/tofu/testdata/plan-module-var-with-default-value/main.tf @@ -0,0 +1,7 @@ +resource "null_resource" "noop" {} + +module "test" { + source = "./inner" + + im_a_string = "hello" +} diff --git a/pkg/tofu/testdata/plan-module-var/child/main.tf b/pkg/tofu/testdata/plan-module-var/child/main.tf new file mode 100644 index 00000000000..c7b1d283e3a --- /dev/null +++ b/pkg/tofu/testdata/plan-module-var/child/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "foo" { + num = "2" +} + +output "num" { + value = "${aws_instance.foo.num}" +} diff --git a/pkg/tofu/testdata/plan-module-var/main.tf b/pkg/tofu/testdata/plan-module-var/main.tf new file mode 100644 index 00000000000..942bdba9269 --- /dev/null +++ b/pkg/tofu/testdata/plan-module-var/main.tf @@ -0,0 +1,7 @@ +module "child" { + source = "./child" +} + +resource "aws_instance" "bar" { + foo = "${module.child.num}" +} diff --git a/pkg/tofu/testdata/plan-module-variable-from-splat/main.tf b/pkg/tofu/testdata/plan-module-variable-from-splat/main.tf new file mode 100644 index 00000000000..be900a3c4a7 --- /dev/null +++ b/pkg/tofu/testdata/plan-module-variable-from-splat/main.tf @@ -0,0 +1,9 @@ +module "mod1" { + source = "./mod" + param = ["this", "one", "works"] +} + +module "mod2" { + source = "./mod" + param = [module.mod1.out_from_splat[0]] +} diff --git a/pkg/tofu/testdata/plan-module-variable-from-splat/mod/main.tf b/pkg/tofu/testdata/plan-module-variable-from-splat/mod/main.tf new file mode 100644 index 00000000000..66127d36b0a --- /dev/null +++ b/pkg/tofu/testdata/plan-module-variable-from-splat/mod/main.tf @@ -0,0 +1,12 @@ +variable "param" { + type = list(string) +} + +resource "aws_instance" "test" { + count = "2" + thing = "doesnt" +} + +output "out_from_splat" { + value = aws_instance.test.*.thing +} diff --git a/pkg/tofu/testdata/plan-module-wrong-var-type-nested/inner/main.tf b/pkg/tofu/testdata/plan-module-wrong-var-type-nested/inner/main.tf new file mode 100644 index 00000000000..dabe507fe57 --- /dev/null +++ b/pkg/tofu/testdata/plan-module-wrong-var-type-nested/inner/main.tf @@ -0,0 +1,13 @@ +variable "inner_in" { + type = map(string) + default = { + us-west-1 = "ami-12345" + us-west-2 = "ami-67890" + } +} + +resource "null_resource" "inner_noop" {} + +output "inner_out" { + value = lookup(var.inner_in, "us-west-1") +} diff --git a/pkg/tofu/testdata/plan-module-wrong-var-type-nested/main.tf b/pkg/tofu/testdata/plan-module-wrong-var-type-nested/main.tf new file mode 100644 index 00000000000..8f9fdcc5651 --- /dev/null +++ b/pkg/tofu/testdata/plan-module-wrong-var-type-nested/main.tf @@ -0,0 +1,3 @@ +module "middle" { + source = "./middle" +} diff --git a/pkg/tofu/testdata/plan-module-wrong-var-type-nested/middle/main.tf b/pkg/tofu/testdata/plan-module-wrong-var-type-nested/middle/main.tf new file mode 100644 index 00000000000..eb989fe9360 --- /dev/null +++ b/pkg/tofu/testdata/plan-module-wrong-var-type-nested/middle/main.tf @@ -0,0 +1,19 @@ +variable "middle_in" { + type = map(string) + default = { + eu-west-1 = "ami-12345" + eu-west-2 = "ami-67890" + } +} + +module "inner" { + source = "../inner" + + inner_in = "hello" +} + +resource "null_resource" "middle_noop" {} + +output "middle_out" { + value = lookup(var.middle_in, "us-west-1") +} diff --git a/pkg/tofu/testdata/plan-module-wrong-var-type/inner/main.tf b/pkg/tofu/testdata/plan-module-wrong-var-type/inner/main.tf new file mode 100644 index 00000000000..7782d1b844d --- /dev/null +++ b/pkg/tofu/testdata/plan-module-wrong-var-type/inner/main.tf @@ -0,0 +1,13 @@ +variable "map_in" { + type = map(string) + + default = { + us-west-1 = "ami-12345" + us-west-2 = "ami-67890" + } +} + +// We have to reference it so it isn't pruned +output "output" { + value = var.map_in +} diff --git a/pkg/tofu/testdata/plan-module-wrong-var-type/main.tf b/pkg/tofu/testdata/plan-module-wrong-var-type/main.tf new file mode 100644 index 00000000000..5a39cd5d5ae --- /dev/null +++ b/pkg/tofu/testdata/plan-module-wrong-var-type/main.tf @@ -0,0 +1,10 @@ +variable "input" { + type = string + default = "hello world" +} + +module "test" { + source = "./inner" + + map_in = var.input +} diff --git a/pkg/tofu/testdata/plan-modules-expand/child/main.tf b/pkg/tofu/testdata/plan-modules-expand/child/main.tf new file mode 100644 index 00000000000..612478f79d5 --- /dev/null +++ b/pkg/tofu/testdata/plan-modules-expand/child/main.tf @@ -0,0 +1,12 @@ +variable "foo" {} +variable "bar" {} + +resource "aws_instance" "foo" { + count = 2 + num = var.foo + bar = "baz" #var.bar +} + +output "out" { + value = aws_instance.foo[0].id +} diff --git a/pkg/tofu/testdata/plan-modules-expand/main.tf b/pkg/tofu/testdata/plan-modules-expand/main.tf new file mode 100644 index 00000000000..023709596c6 --- /dev/null +++ b/pkg/tofu/testdata/plan-modules-expand/main.tf @@ -0,0 +1,29 @@ +locals { + val = 2 + bar = "baz" + m = { + "a" = "b" + } +} + +variable "myvar" { + default = "baz" +} + +module "count_child" { + count = local.val + foo = count.index + bar = var.myvar + source = "./child" +} + +module "for_each_child" { + for_each = aws_instance.foo + foo = 2 + bar = each.key + source = "./child" +} + +resource "aws_instance" "foo" { + for_each = local.m +} diff --git a/pkg/tofu/testdata/plan-modules-remove-provisioners/main.tf b/pkg/tofu/testdata/plan-modules-remove-provisioners/main.tf new file mode 100644 index 00000000000..ce9a3886646 --- /dev/null +++ b/pkg/tofu/testdata/plan-modules-remove-provisioners/main.tf @@ -0,0 +1,5 @@ +resource "aws_instance" "top" {} + +# module "test" { +# source = "./parent" +# } diff --git a/pkg/tofu/testdata/plan-modules-remove-provisioners/parent/child/main.tf b/pkg/tofu/testdata/plan-modules-remove-provisioners/parent/child/main.tf new file mode 100644 index 00000000000..b626e60c824 --- /dev/null +++ b/pkg/tofu/testdata/plan-modules-remove-provisioners/parent/child/main.tf @@ -0,0 +1,2 @@ +resource "aws_instance" "foo" { +} diff --git a/pkg/tofu/testdata/plan-modules-remove-provisioners/parent/main.tf b/pkg/tofu/testdata/plan-modules-remove-provisioners/parent/main.tf new file mode 100644 index 00000000000..fbc1aa09c1e --- /dev/null +++ b/pkg/tofu/testdata/plan-modules-remove-provisioners/parent/main.tf @@ -0,0 +1,7 @@ +module "childone" { + source = "./child" +} + +module "childtwo" { + source = "./child" +} diff --git a/pkg/tofu/testdata/plan-modules-remove/main.tf b/pkg/tofu/testdata/plan-modules-remove/main.tf new file mode 100644 index 00000000000..98f5ee87e9f --- /dev/null +++ b/pkg/tofu/testdata/plan-modules-remove/main.tf @@ -0,0 +1,3 @@ +resource "aws_instance" "foo" { + num = "2" +} diff --git a/pkg/tofu/testdata/plan-modules/child/main.tf b/pkg/tofu/testdata/plan-modules/child/main.tf new file mode 100644 index 00000000000..98f5ee87e9f --- /dev/null +++ b/pkg/tofu/testdata/plan-modules/child/main.tf @@ -0,0 +1,3 @@ +resource "aws_instance" "foo" { + num = "2" +} diff --git a/pkg/tofu/testdata/plan-modules/main.tf b/pkg/tofu/testdata/plan-modules/main.tf new file mode 100644 index 00000000000..dcdb236a1d3 --- /dev/null +++ b/pkg/tofu/testdata/plan-modules/main.tf @@ -0,0 +1,11 @@ +module "child" { + source = "./child" +} + +resource "aws_instance" "foo" { + num = "2" +} + +resource "aws_instance" "bar" { + foo = "${aws_instance.foo.num}" +} diff --git a/pkg/tofu/testdata/plan-orphan/main.tf b/pkg/tofu/testdata/plan-orphan/main.tf new file mode 100644 index 00000000000..98f5ee87e9f --- /dev/null +++ b/pkg/tofu/testdata/plan-orphan/main.tf @@ -0,0 +1,3 @@ +resource "aws_instance" "foo" { + num = "2" +} diff --git a/pkg/tofu/testdata/plan-path-var/main.tf b/pkg/tofu/testdata/plan-path-var/main.tf new file mode 100644 index 00000000000..13012569882 --- /dev/null +++ b/pkg/tofu/testdata/plan-path-var/main.tf @@ -0,0 +1,5 @@ +resource "aws_instance" "foo" { + cwd = "${path.cwd}/barpath" + module = "${path.module}/foopath" + root = "${path.root}/barpath" +} diff --git a/pkg/tofu/testdata/plan-prevent-destroy-bad/main.tf b/pkg/tofu/testdata/plan-prevent-destroy-bad/main.tf new file mode 100644 index 00000000000..19077c1a651 --- /dev/null +++ b/pkg/tofu/testdata/plan-prevent-destroy-bad/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "foo" { + require_new = "yes" + + lifecycle { + prevent_destroy = true + } +} diff --git a/pkg/tofu/testdata/plan-prevent-destroy-count-bad/main.tf b/pkg/tofu/testdata/plan-prevent-destroy-count-bad/main.tf new file mode 100644 index 00000000000..818f93e7020 --- /dev/null +++ b/pkg/tofu/testdata/plan-prevent-destroy-count-bad/main.tf @@ -0,0 +1,8 @@ +resource "aws_instance" "foo" { + count = "1" + current = "${count.index}" + + lifecycle { + prevent_destroy = true + } +} diff --git a/pkg/tofu/testdata/plan-prevent-destroy-count-good/main.tf b/pkg/tofu/testdata/plan-prevent-destroy-count-good/main.tf new file mode 100644 index 00000000000..b6b47907850 --- /dev/null +++ b/pkg/tofu/testdata/plan-prevent-destroy-count-good/main.tf @@ -0,0 +1,4 @@ +resource "aws_instance" "foo" { + count = "1" + current = "${count.index}" +} diff --git a/pkg/tofu/testdata/plan-prevent-destroy-good/main.tf b/pkg/tofu/testdata/plan-prevent-destroy-good/main.tf new file mode 100644 index 00000000000..a88b9e3e101 --- /dev/null +++ b/pkg/tofu/testdata/plan-prevent-destroy-good/main.tf @@ -0,0 +1,5 @@ +resource "aws_instance" "foo" { + lifecycle { + prevent_destroy = true + } +} diff --git a/pkg/tofu/testdata/plan-provider/main.tf b/pkg/tofu/testdata/plan-provider/main.tf new file mode 100644 index 00000000000..8010f70aef9 --- /dev/null +++ b/pkg/tofu/testdata/plan-provider/main.tf @@ -0,0 +1,7 @@ +variable "foo" {} + +provider "aws" { + foo = "${var.foo}" +} + +resource "aws_instance" "bar" {} diff --git a/pkg/tofu/testdata/plan-provisioner-cycle/main.tf b/pkg/tofu/testdata/plan-provisioner-cycle/main.tf new file mode 100644 index 00000000000..ed65c0918ca --- /dev/null +++ b/pkg/tofu/testdata/plan-provisioner-cycle/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "foo" { + count = 3 + + provisioner "local-exec" { + command = "echo ${aws_instance.foo.0.id} ${aws_instance.foo.1.id} ${aws_instance.foo.2.id}" + } +} diff --git a/pkg/tofu/testdata/plan-required-output/main.tf b/pkg/tofu/testdata/plan-required-output/main.tf new file mode 100644 index 00000000000..227b5c1530c --- /dev/null +++ b/pkg/tofu/testdata/plan-required-output/main.tf @@ -0,0 +1,7 @@ +resource "test_resource" "root" { + required = module.mod.object.id +} + +module "mod" { + source = "./mod" +} diff --git a/pkg/tofu/testdata/plan-required-output/mod/main.tf b/pkg/tofu/testdata/plan-required-output/mod/main.tf new file mode 100644 index 00000000000..772f1645f3e --- /dev/null +++ b/pkg/tofu/testdata/plan-required-output/mod/main.tf @@ -0,0 +1,7 @@ +resource "test_resource" "for_output" { + required = "val" +} + +output "object" { + value = test_resource.for_output +} diff --git a/pkg/tofu/testdata/plan-required-whole-mod/main.tf b/pkg/tofu/testdata/plan-required-whole-mod/main.tf new file mode 100644 index 00000000000..9deb3c5a162 --- /dev/null +++ b/pkg/tofu/testdata/plan-required-whole-mod/main.tf @@ -0,0 +1,17 @@ +resource "test_resource" "root" { + required = local.object.id +} + +locals { + # This indirection is here to force the evaluator to produce the whole + # module object here rather than just fetching the single "object" output. + # This makes this fixture different than plan-required-output, which just + # accesses module.mod.object.id directly and thus visits a different + # codepath in the evaluator. + mod = module.mod + object = local.mod.object +} + +module "mod" { + source = "./mod" +} diff --git a/pkg/tofu/testdata/plan-required-whole-mod/mod/main.tf b/pkg/tofu/testdata/plan-required-whole-mod/mod/main.tf new file mode 100644 index 00000000000..772f1645f3e --- /dev/null +++ b/pkg/tofu/testdata/plan-required-whole-mod/mod/main.tf @@ -0,0 +1,7 @@ +resource "test_resource" "for_output" { + required = "val" +} + +output "object" { + value = test_resource.for_output +} diff --git a/pkg/tofu/testdata/plan-requires-replace/main.tf b/pkg/tofu/testdata/plan-requires-replace/main.tf new file mode 100644 index 00000000000..23cee56b3b8 --- /dev/null +++ b/pkg/tofu/testdata/plan-requires-replace/main.tf @@ -0,0 +1,3 @@ +resource "test_thing" "foo" { + v = "goodbye" +} diff --git a/pkg/tofu/testdata/plan-self-ref-multi-all/main.tf b/pkg/tofu/testdata/plan-self-ref-multi-all/main.tf new file mode 100644 index 00000000000..d3a9857f7bd --- /dev/null +++ b/pkg/tofu/testdata/plan-self-ref-multi-all/main.tf @@ -0,0 +1,4 @@ +resource "aws_instance" "web" { + foo = "${aws_instance.web.*.foo}" + count = 4 +} diff --git a/pkg/tofu/testdata/plan-self-ref-multi/main.tf b/pkg/tofu/testdata/plan-self-ref-multi/main.tf new file mode 100644 index 00000000000..5b27cac7150 --- /dev/null +++ b/pkg/tofu/testdata/plan-self-ref-multi/main.tf @@ -0,0 +1,4 @@ +resource "aws_instance" "web" { + foo = "${aws_instance.web.0.foo}" + count = 4 +} diff --git a/pkg/tofu/testdata/plan-self-ref/main.tf b/pkg/tofu/testdata/plan-self-ref/main.tf new file mode 100644 index 00000000000..f2bf91d77bf --- /dev/null +++ b/pkg/tofu/testdata/plan-self-ref/main.tf @@ -0,0 +1,3 @@ +resource "aws_instance" "web" { + foo = "${aws_instance.web.foo}" +} diff --git a/pkg/tofu/testdata/plan-shadow-uuid/main.tf b/pkg/tofu/testdata/plan-shadow-uuid/main.tf new file mode 100644 index 00000000000..2b6ec72a001 --- /dev/null +++ b/pkg/tofu/testdata/plan-shadow-uuid/main.tf @@ -0,0 +1,3 @@ +resource "aws_instance" "test" { + value = "${uuid()}" +} diff --git a/pkg/tofu/testdata/plan-taint-ignore-changes/main.tf b/pkg/tofu/testdata/plan-taint-ignore-changes/main.tf new file mode 100644 index 00000000000..ff95d6596dc --- /dev/null +++ b/pkg/tofu/testdata/plan-taint-ignore-changes/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "foo" { + vars = "foo" + + lifecycle { + ignore_changes = ["vars"] + } +} diff --git a/pkg/tofu/testdata/plan-taint-interpolated-count/main.tf b/pkg/tofu/testdata/plan-taint-interpolated-count/main.tf new file mode 100644 index 00000000000..91d8b65c81c --- /dev/null +++ b/pkg/tofu/testdata/plan-taint-interpolated-count/main.tf @@ -0,0 +1,7 @@ +variable "instance_count" { + default = 3 +} + +resource "aws_instance" "foo" { + count = "${var.instance_count}" +} diff --git a/pkg/tofu/testdata/plan-taint/main.tf b/pkg/tofu/testdata/plan-taint/main.tf new file mode 100644 index 00000000000..1b6cdae67b0 --- /dev/null +++ b/pkg/tofu/testdata/plan-taint/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "foo" { + num = "2" +} + +resource "aws_instance" "bar" { + foo = "${aws_instance.foo.num}" +} diff --git a/pkg/tofu/testdata/plan-targeted-cross-module/A/main.tf b/pkg/tofu/testdata/plan-targeted-cross-module/A/main.tf new file mode 100644 index 00000000000..4c014aa2234 --- /dev/null +++ b/pkg/tofu/testdata/plan-targeted-cross-module/A/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "foo" { + foo = "bar" +} + +output "value" { + value = "${aws_instance.foo.id}" +} diff --git a/pkg/tofu/testdata/plan-targeted-cross-module/B/main.tf b/pkg/tofu/testdata/plan-targeted-cross-module/B/main.tf new file mode 100644 index 00000000000..c3aeb7b76e3 --- /dev/null +++ b/pkg/tofu/testdata/plan-targeted-cross-module/B/main.tf @@ -0,0 +1,5 @@ +variable "input" {} + +resource "aws_instance" "bar" { + foo = "${var.input}" +} diff --git a/pkg/tofu/testdata/plan-targeted-cross-module/main.tf b/pkg/tofu/testdata/plan-targeted-cross-module/main.tf new file mode 100644 index 00000000000..e6a83b2a02b --- /dev/null +++ b/pkg/tofu/testdata/plan-targeted-cross-module/main.tf @@ -0,0 +1,8 @@ +module "A" { + source = "./A" +} + +module "B" { + source = "./B" + input = "${module.A.value}" +} diff --git a/pkg/tofu/testdata/plan-targeted-module-orphan/main.tf b/pkg/tofu/testdata/plan-targeted-module-orphan/main.tf new file mode 100644 index 00000000000..2b33fedaed1 --- /dev/null +++ b/pkg/tofu/testdata/plan-targeted-module-orphan/main.tf @@ -0,0 +1,6 @@ +# Once opon a time, there was a child module here +/* +module "child" { + source = "./child" +} +*/ diff --git a/pkg/tofu/testdata/plan-targeted-module-untargeted-variable/child/main.tf b/pkg/tofu/testdata/plan-targeted-module-untargeted-variable/child/main.tf new file mode 100644 index 00000000000..f7b424b8415 --- /dev/null +++ b/pkg/tofu/testdata/plan-targeted-module-untargeted-variable/child/main.tf @@ -0,0 +1,5 @@ +variable "id" {} + +resource "aws_instance" "mod" { + value = "${var.id}" +} diff --git a/pkg/tofu/testdata/plan-targeted-module-untargeted-variable/main.tf b/pkg/tofu/testdata/plan-targeted-module-untargeted-variable/main.tf new file mode 100644 index 00000000000..90e44dceba6 --- /dev/null +++ b/pkg/tofu/testdata/plan-targeted-module-untargeted-variable/main.tf @@ -0,0 +1,12 @@ +resource "aws_instance" "blue" { } +resource "aws_instance" "green" { } + +module "blue_mod" { + source = "./child" + id = "${aws_instance.blue.id}" +} + +module "green_mod" { + source = "./child" + id = "${aws_instance.green.id}" +} diff --git a/pkg/tofu/testdata/plan-targeted-module-with-provider/child1/main.tf b/pkg/tofu/testdata/plan-targeted-module-with-provider/child1/main.tf new file mode 100644 index 00000000000..c9aaff5f724 --- /dev/null +++ b/pkg/tofu/testdata/plan-targeted-module-with-provider/child1/main.tf @@ -0,0 +1,7 @@ +variable "key" {} + +provider "null" { + key = "${var.key}" +} + +resource "null_resource" "foo" {} diff --git a/pkg/tofu/testdata/plan-targeted-module-with-provider/child2/main.tf b/pkg/tofu/testdata/plan-targeted-module-with-provider/child2/main.tf new file mode 100644 index 00000000000..c9aaff5f724 --- /dev/null +++ b/pkg/tofu/testdata/plan-targeted-module-with-provider/child2/main.tf @@ -0,0 +1,7 @@ +variable "key" {} + +provider "null" { + key = "${var.key}" +} + +resource "null_resource" "foo" {} diff --git a/pkg/tofu/testdata/plan-targeted-module-with-provider/main.tf b/pkg/tofu/testdata/plan-targeted-module-with-provider/main.tf new file mode 100644 index 00000000000..0fa7bcffdd7 --- /dev/null +++ b/pkg/tofu/testdata/plan-targeted-module-with-provider/main.tf @@ -0,0 +1,9 @@ +module "child1" { + source = "./child1" + key = "value" +} + +module "child2" { + source = "./child2" + key = "value" +} diff --git a/pkg/tofu/testdata/plan-targeted-orphan/main.tf b/pkg/tofu/testdata/plan-targeted-orphan/main.tf new file mode 100644 index 00000000000..f2020858b14 --- /dev/null +++ b/pkg/tofu/testdata/plan-targeted-orphan/main.tf @@ -0,0 +1,6 @@ +# This resource was previously "created" and the fixture represents +# it being destroyed subsequently + +/*resource "aws_instance" "orphan" {*/ + /*foo = "bar"*/ +/*}*/ diff --git a/pkg/tofu/testdata/plan-targeted-over-ten/main.tf b/pkg/tofu/testdata/plan-targeted-over-ten/main.tf new file mode 100644 index 00000000000..1c7bc8769e0 --- /dev/null +++ b/pkg/tofu/testdata/plan-targeted-over-ten/main.tf @@ -0,0 +1,3 @@ +resource "aws_instance" "foo" { + count = 13 +} diff --git a/pkg/tofu/testdata/plan-targeted/main.tf b/pkg/tofu/testdata/plan-targeted/main.tf new file mode 100644 index 00000000000..ab00a845fa5 --- /dev/null +++ b/pkg/tofu/testdata/plan-targeted/main.tf @@ -0,0 +1,12 @@ +resource "aws_instance" "foo" { + num = "2" +} + +resource "aws_instance" "bar" { + foo = aws_instance.foo.num +} + +module "mod" { + source = "./mod" + count = 1 +} diff --git a/pkg/tofu/testdata/plan-targeted/mod/main.tf b/pkg/tofu/testdata/plan-targeted/mod/main.tf new file mode 100644 index 00000000000..98f5ee87e9f --- /dev/null +++ b/pkg/tofu/testdata/plan-targeted/mod/main.tf @@ -0,0 +1,3 @@ +resource "aws_instance" "foo" { + num = "2" +} diff --git a/pkg/tofu/testdata/plan-untargeted-resource-output/main.tf b/pkg/tofu/testdata/plan-untargeted-resource-output/main.tf new file mode 100644 index 00000000000..9d4a1c882d1 --- /dev/null +++ b/pkg/tofu/testdata/plan-untargeted-resource-output/main.tf @@ -0,0 +1,8 @@ +module "mod" { + source = "./mod" +} + + +resource "aws_instance" "c" { + name = "${module.mod.output}" +} diff --git a/pkg/tofu/testdata/plan-untargeted-resource-output/mod/main.tf b/pkg/tofu/testdata/plan-untargeted-resource-output/mod/main.tf new file mode 100644 index 00000000000..dd6d791cba4 --- /dev/null +++ b/pkg/tofu/testdata/plan-untargeted-resource-output/mod/main.tf @@ -0,0 +1,15 @@ +locals { + one = 1 +} + +resource "aws_instance" "a" { + count = "${local.one}" +} + +resource "aws_instance" "b" { + count = "${local.one}" +} + +output "output" { + value = "${join("", coalescelist(aws_instance.a.*.id, aws_instance.b.*.id))}" +} diff --git a/pkg/tofu/testdata/plan-var-list-err/main.tf b/pkg/tofu/testdata/plan-var-list-err/main.tf new file mode 100644 index 00000000000..6303064c9f6 --- /dev/null +++ b/pkg/tofu/testdata/plan-var-list-err/main.tf @@ -0,0 +1,16 @@ +provider "aws" { + access_key = "a" + secret_key = "b" + region = "us-east-1" +} + +resource "aws_instance" "foo" { + ami = "ami-foo" + instance_type = "t2.micro" + security_groups = "${aws_security_group.foo.name}" +} + +resource "aws_security_group" "foo" { + name = "foobar" + description = "foobar" +} diff --git a/pkg/tofu/testdata/plan-variable-sensitivity-module/child/main.tf b/pkg/tofu/testdata/plan-variable-sensitivity-module/child/main.tf new file mode 100644 index 00000000000..e34751aa9b6 --- /dev/null +++ b/pkg/tofu/testdata/plan-variable-sensitivity-module/child/main.tf @@ -0,0 +1,13 @@ +variable "foo" { + type = string +} + +// "bar" is defined as sensitive by both the parent and the child +variable "bar" { + sensitive = true +} + +resource "aws_instance" "foo" { + foo = var.foo + value = var.bar +} diff --git a/pkg/tofu/testdata/plan-variable-sensitivity-module/main.tf b/pkg/tofu/testdata/plan-variable-sensitivity-module/main.tf new file mode 100644 index 00000000000..69bdbb4cbed --- /dev/null +++ b/pkg/tofu/testdata/plan-variable-sensitivity-module/main.tf @@ -0,0 +1,14 @@ +variable "sensitive_var" { + default = "foo" + sensitive = true +} + +variable "another_var" { + sensitive = true +} + +module "child" { + source = "./child" + foo = var.sensitive_var + bar = var.another_var +} diff --git a/pkg/tofu/testdata/plan-variable-sensitivity/main.tf b/pkg/tofu/testdata/plan-variable-sensitivity/main.tf new file mode 100644 index 00000000000..00a4b1ef9ee --- /dev/null +++ b/pkg/tofu/testdata/plan-variable-sensitivity/main.tf @@ -0,0 +1,8 @@ +variable "sensitive_var" { + default = "foo" + sensitive = true +} + +resource "aws_instance" "foo" { + foo = var.sensitive_var +} \ No newline at end of file diff --git a/pkg/tofu/testdata/provider-meta-data-set/main.tf b/pkg/tofu/testdata/provider-meta-data-set/main.tf new file mode 100644 index 00000000000..ef7acd957b3 --- /dev/null +++ b/pkg/tofu/testdata/provider-meta-data-set/main.tf @@ -0,0 +1,13 @@ +data "test_data_source" "foo" { + foo = "bar" +} + +terraform { + provider_meta "test" { + baz = "quux" + } +} + +module "my_module" { + source = "./my-module" +} diff --git a/pkg/tofu/testdata/provider-meta-data-set/my-module/main.tf b/pkg/tofu/testdata/provider-meta-data-set/my-module/main.tf new file mode 100644 index 00000000000..61a97706935 --- /dev/null +++ b/pkg/tofu/testdata/provider-meta-data-set/my-module/main.tf @@ -0,0 +1,9 @@ +data "test_file" "foo" { + id = "bar" +} + +terraform { + provider_meta "test" { + baz = "quux-submodule" + } +} diff --git a/pkg/tofu/testdata/provider-meta-data-unset/main.tf b/pkg/tofu/testdata/provider-meta-data-unset/main.tf new file mode 100644 index 00000000000..c4091f37b13 --- /dev/null +++ b/pkg/tofu/testdata/provider-meta-data-unset/main.tf @@ -0,0 +1,7 @@ +data "test_data_source" "foo" { + foo = "bar" +} + +module "my_module" { + source = "./my-module" +} diff --git a/pkg/tofu/testdata/provider-meta-data-unset/my-module/main.tf b/pkg/tofu/testdata/provider-meta-data-unset/my-module/main.tf new file mode 100644 index 00000000000..7e0ea46b6b7 --- /dev/null +++ b/pkg/tofu/testdata/provider-meta-data-unset/my-module/main.tf @@ -0,0 +1,3 @@ +data "test_file" "foo" { + id = "bar" +} diff --git a/pkg/tofu/testdata/provider-meta-set/main.tf b/pkg/tofu/testdata/provider-meta-set/main.tf new file mode 100644 index 00000000000..a3e9f804bee --- /dev/null +++ b/pkg/tofu/testdata/provider-meta-set/main.tf @@ -0,0 +1,13 @@ +resource "test_instance" "bar" { + foo = "bar" +} + +terraform { + provider_meta "test" { + baz = "quux" + } +} + +module "my_module" { + source = "./my-module" +} diff --git a/pkg/tofu/testdata/provider-meta-set/my-module/main.tf b/pkg/tofu/testdata/provider-meta-set/my-module/main.tf new file mode 100644 index 00000000000..2a89dd51f34 --- /dev/null +++ b/pkg/tofu/testdata/provider-meta-set/my-module/main.tf @@ -0,0 +1,9 @@ +resource "test_resource" "bar" { + value = "bar" +} + +terraform { + provider_meta "test" { + baz = "quux-submodule" + } +} diff --git a/pkg/tofu/testdata/provider-meta-unset/main.tf b/pkg/tofu/testdata/provider-meta-unset/main.tf new file mode 100644 index 00000000000..0ae85d39fa2 --- /dev/null +++ b/pkg/tofu/testdata/provider-meta-unset/main.tf @@ -0,0 +1,7 @@ +resource "test_instance" "bar" { + foo = "bar" +} + +module "my_module" { + source = "./my-module" +} diff --git a/pkg/tofu/testdata/provider-meta-unset/my-module/main.tf b/pkg/tofu/testdata/provider-meta-unset/my-module/main.tf new file mode 100644 index 00000000000..ec9701f9560 --- /dev/null +++ b/pkg/tofu/testdata/provider-meta-unset/my-module/main.tf @@ -0,0 +1,3 @@ +resource "test_resource" "bar" { + value = "bar" +} diff --git a/pkg/tofu/testdata/provider-with-locals/main.tf b/pkg/tofu/testdata/provider-with-locals/main.tf new file mode 100644 index 00000000000..3a7db0f8772 --- /dev/null +++ b/pkg/tofu/testdata/provider-with-locals/main.tf @@ -0,0 +1,11 @@ +provider "aws" { + region = "${local.foo}" +} + +locals { + foo = "bar" +} + +resource "aws_instance" "foo" { + value = "${local.foo}" +} diff --git a/pkg/tofu/testdata/refresh-basic/main.tf b/pkg/tofu/testdata/refresh-basic/main.tf new file mode 100644 index 00000000000..64cbf623665 --- /dev/null +++ b/pkg/tofu/testdata/refresh-basic/main.tf @@ -0,0 +1 @@ +resource "aws_instance" "web" {} diff --git a/pkg/tofu/testdata/refresh-data-count/refresh-data-count.tf b/pkg/tofu/testdata/refresh-data-count/refresh-data-count.tf new file mode 100644 index 00000000000..ccabdb2c689 --- /dev/null +++ b/pkg/tofu/testdata/refresh-data-count/refresh-data-count.tf @@ -0,0 +1,6 @@ +resource "test" "foo" { +} + +data "test" "foo" { + count = length(test.foo.things) +} diff --git a/pkg/tofu/testdata/refresh-data-module-var/child/main.tf b/pkg/tofu/testdata/refresh-data-module-var/child/main.tf new file mode 100644 index 00000000000..64d21beda04 --- /dev/null +++ b/pkg/tofu/testdata/refresh-data-module-var/child/main.tf @@ -0,0 +1,6 @@ +variable "key" {} + +data "aws_data_source" "foo" { + id = "${var.key}" +} + diff --git a/pkg/tofu/testdata/refresh-data-module-var/main.tf b/pkg/tofu/testdata/refresh-data-module-var/main.tf new file mode 100644 index 00000000000..a371831bd23 --- /dev/null +++ b/pkg/tofu/testdata/refresh-data-module-var/main.tf @@ -0,0 +1,8 @@ +resource "aws_instance" "A" { + foo = "bar" +} + +module "child" { + source = "./child" + key = "${aws_instance.A.id}" +} diff --git a/pkg/tofu/testdata/refresh-data-ref-data/main.tf b/pkg/tofu/testdata/refresh-data-ref-data/main.tf new file mode 100644 index 00000000000..5512be23321 --- /dev/null +++ b/pkg/tofu/testdata/refresh-data-ref-data/main.tf @@ -0,0 +1,7 @@ +data "null_data_source" "foo" { + foo = "yes" +} + +data "null_data_source" "bar" { + bar = "${data.null_data_source.foo.foo}" +} diff --git a/pkg/tofu/testdata/refresh-data-resource-basic/main.tf b/pkg/tofu/testdata/refresh-data-resource-basic/main.tf new file mode 100644 index 00000000000..cb16d9f3414 --- /dev/null +++ b/pkg/tofu/testdata/refresh-data-resource-basic/main.tf @@ -0,0 +1,5 @@ +data "null_data_source" "testing" { + inputs = { + test = "yes" + } +} diff --git a/pkg/tofu/testdata/refresh-dynamic/main.tf b/pkg/tofu/testdata/refresh-dynamic/main.tf new file mode 100644 index 00000000000..5c857a2f459 --- /dev/null +++ b/pkg/tofu/testdata/refresh-dynamic/main.tf @@ -0,0 +1,3 @@ +resource "test_instance" "foo" { + dynamic = {} +} diff --git a/pkg/tofu/testdata/refresh-module-computed-var/child/main.tf b/pkg/tofu/testdata/refresh-module-computed-var/child/main.tf new file mode 100644 index 00000000000..38260d6373c --- /dev/null +++ b/pkg/tofu/testdata/refresh-module-computed-var/child/main.tf @@ -0,0 +1,5 @@ +variable "value" {} + +output "value" { + value = "${var.value}" +} diff --git a/pkg/tofu/testdata/refresh-module-computed-var/main.tf b/pkg/tofu/testdata/refresh-module-computed-var/main.tf new file mode 100644 index 00000000000..a8573327b15 --- /dev/null +++ b/pkg/tofu/testdata/refresh-module-computed-var/main.tf @@ -0,0 +1,8 @@ +module "child" { + source = "./child" + value = "${join(" ", aws_instance.test.*.id)}" +} + +resource "aws_instance" "test" { + value = "yes" +} diff --git a/pkg/tofu/testdata/refresh-module-input-computed-output/child/main.tf b/pkg/tofu/testdata/refresh-module-input-computed-output/child/main.tf new file mode 100644 index 00000000000..ebc1e3ffc14 --- /dev/null +++ b/pkg/tofu/testdata/refresh-module-input-computed-output/child/main.tf @@ -0,0 +1,11 @@ +variable "input" { + type = string +} + +resource "aws_instance" "foo" { + foo = var.input +} + +output "foo" { + value = aws_instance.foo.foo +} diff --git a/pkg/tofu/testdata/refresh-module-input-computed-output/main.tf b/pkg/tofu/testdata/refresh-module-input-computed-output/main.tf new file mode 100644 index 00000000000..5827a5da25e --- /dev/null +++ b/pkg/tofu/testdata/refresh-module-input-computed-output/main.tf @@ -0,0 +1,8 @@ +module "child" { + input = aws_instance.bar.foo + source = "./child" +} + +resource "aws_instance" "bar" { + compute = "foo" +} diff --git a/pkg/tofu/testdata/refresh-module-orphan/child/grandchild/main.tf b/pkg/tofu/testdata/refresh-module-orphan/child/grandchild/main.tf new file mode 100644 index 00000000000..942e93dbc48 --- /dev/null +++ b/pkg/tofu/testdata/refresh-module-orphan/child/grandchild/main.tf @@ -0,0 +1,3 @@ +resource "aws_instance" "baz" {} + +output "id" { value = "${aws_instance.baz.id}" } diff --git a/pkg/tofu/testdata/refresh-module-orphan/child/main.tf b/pkg/tofu/testdata/refresh-module-orphan/child/main.tf new file mode 100644 index 00000000000..7c3fc842f34 --- /dev/null +++ b/pkg/tofu/testdata/refresh-module-orphan/child/main.tf @@ -0,0 +1,10 @@ +module "grandchild" { + source = "./grandchild" +} + +resource "aws_instance" "bar" { + grandchildid = "${module.grandchild.id}" +} + +output "id" { value = "${aws_instance.bar.id}" } +output "grandchild_id" { value = "${module.grandchild.id}" } diff --git a/pkg/tofu/testdata/refresh-module-orphan/main.tf b/pkg/tofu/testdata/refresh-module-orphan/main.tf new file mode 100644 index 00000000000..244374d9d16 --- /dev/null +++ b/pkg/tofu/testdata/refresh-module-orphan/main.tf @@ -0,0 +1,10 @@ +/* +module "child" { + source = "./child" +} + +resource "aws_instance" "bar" { + childid = "${module.child.id}" + grandchildid = "${module.child.grandchild_id}" +} +*/ diff --git a/pkg/tofu/testdata/refresh-module-var-module/bar/main.tf b/pkg/tofu/testdata/refresh-module-var-module/bar/main.tf new file mode 100644 index 00000000000..46ea37f14f2 --- /dev/null +++ b/pkg/tofu/testdata/refresh-module-var-module/bar/main.tf @@ -0,0 +1,3 @@ +variable "value" {} + +resource "aws_instance" "bar" {} diff --git a/pkg/tofu/testdata/refresh-module-var-module/foo/main.tf b/pkg/tofu/testdata/refresh-module-var-module/foo/main.tf new file mode 100644 index 00000000000..2ee798058d3 --- /dev/null +++ b/pkg/tofu/testdata/refresh-module-var-module/foo/main.tf @@ -0,0 +1,7 @@ +output "output" { + value = "${aws_instance.foo.foo}" +} + +resource "aws_instance" "foo" { + compute = "foo" +} diff --git a/pkg/tofu/testdata/refresh-module-var-module/main.tf b/pkg/tofu/testdata/refresh-module-var-module/main.tf new file mode 100644 index 00000000000..76775e3e6d0 --- /dev/null +++ b/pkg/tofu/testdata/refresh-module-var-module/main.tf @@ -0,0 +1,8 @@ +module "foo" { + source = "./foo" +} + +module "bar" { + source = "./bar" + value = "${module.foo.output}" +} diff --git a/pkg/tofu/testdata/refresh-modules/child/main.tf b/pkg/tofu/testdata/refresh-modules/child/main.tf new file mode 100644 index 00000000000..64cbf623665 --- /dev/null +++ b/pkg/tofu/testdata/refresh-modules/child/main.tf @@ -0,0 +1 @@ +resource "aws_instance" "web" {} diff --git a/pkg/tofu/testdata/refresh-modules/main.tf b/pkg/tofu/testdata/refresh-modules/main.tf new file mode 100644 index 00000000000..6b4520ec0f4 --- /dev/null +++ b/pkg/tofu/testdata/refresh-modules/main.tf @@ -0,0 +1,5 @@ +module "child" { + source = "./child" +} + +resource "aws_instance" "web" {} diff --git a/pkg/tofu/testdata/refresh-no-state/main.tf b/pkg/tofu/testdata/refresh-no-state/main.tf new file mode 100644 index 00000000000..76c0f87671c --- /dev/null +++ b/pkg/tofu/testdata/refresh-no-state/main.tf @@ -0,0 +1,3 @@ +output "foo" { + value = "" +} diff --git a/pkg/tofu/testdata/refresh-output-partial/main.tf b/pkg/tofu/testdata/refresh-output-partial/main.tf new file mode 100644 index 00000000000..36ce289a34b --- /dev/null +++ b/pkg/tofu/testdata/refresh-output-partial/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "foo" {} + +resource "aws_instance" "web" {} + +output "foo" { + value = "${aws_instance.web.foo}" +} diff --git a/pkg/tofu/testdata/refresh-output/main.tf b/pkg/tofu/testdata/refresh-output/main.tf new file mode 100644 index 00000000000..42a01bd5ca1 --- /dev/null +++ b/pkg/tofu/testdata/refresh-output/main.tf @@ -0,0 +1,5 @@ +resource "aws_instance" "web" {} + +output "foo" { + value = "${aws_instance.web.foo}" +} diff --git a/pkg/tofu/testdata/refresh-schema-upgrade/main.tf b/pkg/tofu/testdata/refresh-schema-upgrade/main.tf new file mode 100644 index 00000000000..ee0590e3c2d --- /dev/null +++ b/pkg/tofu/testdata/refresh-schema-upgrade/main.tf @@ -0,0 +1,2 @@ +resource "test_thing" "bar" { +} diff --git a/pkg/tofu/testdata/refresh-targeted-count/main.tf b/pkg/tofu/testdata/refresh-targeted-count/main.tf new file mode 100644 index 00000000000..f564b629c1a --- /dev/null +++ b/pkg/tofu/testdata/refresh-targeted-count/main.tf @@ -0,0 +1,9 @@ +resource "aws_vpc" "metoo" {} +resource "aws_instance" "notme" { } +resource "aws_instance" "me" { + vpc_id = "${aws_vpc.metoo.id}" + count = 3 +} +resource "aws_elb" "meneither" { + instances = ["${aws_instance.me.*.id}"] +} diff --git a/pkg/tofu/testdata/refresh-targeted/main.tf b/pkg/tofu/testdata/refresh-targeted/main.tf new file mode 100644 index 00000000000..3a76184647f --- /dev/null +++ b/pkg/tofu/testdata/refresh-targeted/main.tf @@ -0,0 +1,8 @@ +resource "aws_vpc" "metoo" {} +resource "aws_instance" "notme" { } +resource "aws_instance" "me" { + vpc_id = "${aws_vpc.metoo.id}" +} +resource "aws_elb" "meneither" { + instances = ["${aws_instance.me.*.id}"] +} diff --git a/pkg/tofu/testdata/refresh-unknown-provider/main.tf b/pkg/tofu/testdata/refresh-unknown-provider/main.tf new file mode 100644 index 00000000000..8a29fddd086 --- /dev/null +++ b/pkg/tofu/testdata/refresh-unknown-provider/main.tf @@ -0,0 +1,4 @@ +resource "unknown_instance" "foo" { + num = "2" + compute = "foo" +} diff --git a/pkg/tofu/testdata/refresh-vars/main.tf b/pkg/tofu/testdata/refresh-vars/main.tf new file mode 100644 index 00000000000..86cd6ace372 --- /dev/null +++ b/pkg/tofu/testdata/refresh-vars/main.tf @@ -0,0 +1,5 @@ +resource "aws_instance" "web" {} + +resource "aws_instance" "db" { + ami = "${aws_instance.web.id}" +} diff --git a/pkg/tofu/testdata/static-validate-refs/static-validate-refs.tf b/pkg/tofu/testdata/static-validate-refs/static-validate-refs.tf new file mode 100644 index 00000000000..2f71e21713d --- /dev/null +++ b/pkg/tofu/testdata/static-validate-refs/static-validate-refs.tf @@ -0,0 +1,32 @@ +terraform { + required_providers { + boop = { + source = "foobar/beep" # intentional mismatch between local name and type + } + } +} + +resource "aws_instance" "no_count" { +} + +resource "aws_instance" "count" { + count = 1 +} + +resource "boop_instance" "yep" { +} + +resource "boop_whatever" "nope" { +} + +data "beep" "boop" { +} + +check "foo" { + data "boop_data" "boop_nested" {} + + assert { + condition = data.boop_data.boop_nested.id == null + error_message = "check failed" + } +} diff --git a/pkg/tofu/testdata/transform-attach-config/child/main.tf b/pkg/tofu/testdata/transform-attach-config/child/main.tf new file mode 100644 index 00000000000..9702079cd91 --- /dev/null +++ b/pkg/tofu/testdata/transform-attach-config/child/main.tf @@ -0,0 +1,6 @@ +data "aws_instance" "child_data_instance" { + data_config = 30 +} +resource "aws_instance" "child_resource_instance" { + data_config = 40 +} diff --git a/pkg/tofu/testdata/transform-attach-config/main.tf b/pkg/tofu/testdata/transform-attach-config/main.tf new file mode 100644 index 00000000000..750061a4367 --- /dev/null +++ b/pkg/tofu/testdata/transform-attach-config/main.tf @@ -0,0 +1,9 @@ +data "aws_instance" "data_instance" { + data_config = 10 +} +resource "aws_instance" "resource_instance" { + resource_config = 20 +} +module "child" { + source = "./child" +} diff --git a/pkg/tofu/testdata/transform-cbd-destroy-edge-both-count/main.tf b/pkg/tofu/testdata/transform-cbd-destroy-edge-both-count/main.tf new file mode 100644 index 00000000000..c19e78eaa2f --- /dev/null +++ b/pkg/tofu/testdata/transform-cbd-destroy-edge-both-count/main.tf @@ -0,0 +1,11 @@ +resource "test_object" "A" { + count = 2 + lifecycle { + create_before_destroy = true + } +} + +resource "test_object" "B" { + count = 2 + test_string = test_object.A[*].test_string[count.index] +} diff --git a/pkg/tofu/testdata/transform-cbd-destroy-edge-count/main.tf b/pkg/tofu/testdata/transform-cbd-destroy-edge-count/main.tf new file mode 100644 index 00000000000..775900fcdd8 --- /dev/null +++ b/pkg/tofu/testdata/transform-cbd-destroy-edge-count/main.tf @@ -0,0 +1,10 @@ +resource "test_object" "A" { + lifecycle { + create_before_destroy = true + } +} + +resource "test_object" "B" { + count = 2 + test_string = test_object.A.test_string +} diff --git a/pkg/tofu/testdata/transform-config-mode-data/main.tf b/pkg/tofu/testdata/transform-config-mode-data/main.tf new file mode 100644 index 00000000000..3c3e7e50d55 --- /dev/null +++ b/pkg/tofu/testdata/transform-config-mode-data/main.tf @@ -0,0 +1,3 @@ +data "aws_ami" "foo" {} + +resource "aws_instance" "web" {} diff --git a/pkg/tofu/testdata/transform-destroy-cbd-edge-basic/main.tf b/pkg/tofu/testdata/transform-destroy-cbd-edge-basic/main.tf new file mode 100644 index 00000000000..a17d8b4e35c --- /dev/null +++ b/pkg/tofu/testdata/transform-destroy-cbd-edge-basic/main.tf @@ -0,0 +1,9 @@ +resource "test_object" "A" { + lifecycle { + create_before_destroy = true + } +} + +resource "test_object" "B" { + test_string = "${test_object.A.id}" +} diff --git a/pkg/tofu/testdata/transform-destroy-cbd-edge-multi/main.tf b/pkg/tofu/testdata/transform-destroy-cbd-edge-multi/main.tf new file mode 100644 index 00000000000..964bc44cfd8 --- /dev/null +++ b/pkg/tofu/testdata/transform-destroy-cbd-edge-multi/main.tf @@ -0,0 +1,15 @@ +resource "test_object" "A" { + lifecycle { + create_before_destroy = true + } +} + +resource "test_object" "B" { + lifecycle { + create_before_destroy = true + } +} + +resource "test_object" "C" { + test_string = "${test_object.A.id}-${test_object.B.id}" +} diff --git a/pkg/tofu/testdata/transform-destroy-edge-basic/main.tf b/pkg/tofu/testdata/transform-destroy-edge-basic/main.tf new file mode 100644 index 00000000000..8afeda4feed --- /dev/null +++ b/pkg/tofu/testdata/transform-destroy-edge-basic/main.tf @@ -0,0 +1,5 @@ +resource "test_object" "A" {} + +resource "test_object" "B" { + test_string = "${test_object.A.test_string}" +} diff --git a/pkg/tofu/testdata/transform-destroy-edge-module-only/child/main.tf b/pkg/tofu/testdata/transform-destroy-edge-module-only/child/main.tf new file mode 100644 index 00000000000..242bb335904 --- /dev/null +++ b/pkg/tofu/testdata/transform-destroy-edge-module-only/child/main.tf @@ -0,0 +1,9 @@ +resource "test_object" "a" {} + +resource "test_object" "b" { + test_string = "${test_object.a.test_string}" +} + +resource "test_object" "c" { + test_string = "${test_object.b.test_string}" +} diff --git a/pkg/tofu/testdata/transform-destroy-edge-module-only/main.tf b/pkg/tofu/testdata/transform-destroy-edge-module-only/main.tf new file mode 100644 index 00000000000..919351443d2 --- /dev/null +++ b/pkg/tofu/testdata/transform-destroy-edge-module-only/main.tf @@ -0,0 +1,4 @@ +module "child" { + source = "./child" + count = 2 +} diff --git a/pkg/tofu/testdata/transform-destroy-edge-module/child/main.tf b/pkg/tofu/testdata/transform-destroy-edge-module/child/main.tf new file mode 100644 index 00000000000..337bbe754e7 --- /dev/null +++ b/pkg/tofu/testdata/transform-destroy-edge-module/child/main.tf @@ -0,0 +1,7 @@ +resource "test_object" "b" { + test_string = "foo" +} + +output "output" { + value = "${test_object.b.test_string}" +} diff --git a/pkg/tofu/testdata/transform-destroy-edge-module/main.tf b/pkg/tofu/testdata/transform-destroy-edge-module/main.tf new file mode 100644 index 00000000000..2a42635e4f5 --- /dev/null +++ b/pkg/tofu/testdata/transform-destroy-edge-module/main.tf @@ -0,0 +1,7 @@ +resource "test_object" "a" { + test_string = "${module.child.output}" +} + +module "child" { + source = "./child" +} diff --git a/pkg/tofu/testdata/transform-destroy-edge-multi/main.tf b/pkg/tofu/testdata/transform-destroy-edge-multi/main.tf new file mode 100644 index 00000000000..3474bf60a42 --- /dev/null +++ b/pkg/tofu/testdata/transform-destroy-edge-multi/main.tf @@ -0,0 +1,9 @@ +resource "test_object" "A" {} + +resource "test_object" "B" { + test_string = "${test_object.A.test_string}" +} + +resource "test_object" "C" { + test_string = "${test_object.B.test_string}" +} diff --git a/pkg/tofu/testdata/transform-destroy-edge-self-ref/main.tf b/pkg/tofu/testdata/transform-destroy-edge-self-ref/main.tf new file mode 100644 index 00000000000..d91e024c475 --- /dev/null +++ b/pkg/tofu/testdata/transform-destroy-edge-self-ref/main.tf @@ -0,0 +1,5 @@ +resource "test" "A" { + provisioner "foo" { + command = "${test.A.id}" + } +} diff --git a/pkg/tofu/testdata/transform-module-var-basic/child/main.tf b/pkg/tofu/testdata/transform-module-var-basic/child/main.tf new file mode 100644 index 00000000000..53f3cd731d6 --- /dev/null +++ b/pkg/tofu/testdata/transform-module-var-basic/child/main.tf @@ -0,0 +1,5 @@ +variable "value" {} + +output "result" { + value = "${var.value}" +} diff --git a/pkg/tofu/testdata/transform-module-var-basic/main.tf b/pkg/tofu/testdata/transform-module-var-basic/main.tf new file mode 100644 index 00000000000..0adb513f10e --- /dev/null +++ b/pkg/tofu/testdata/transform-module-var-basic/main.tf @@ -0,0 +1,4 @@ +module "child" { + source = "./child" + value = "foo" +} diff --git a/pkg/tofu/testdata/transform-module-var-nested/child/child/main.tf b/pkg/tofu/testdata/transform-module-var-nested/child/child/main.tf new file mode 100644 index 00000000000..53f3cd731d6 --- /dev/null +++ b/pkg/tofu/testdata/transform-module-var-nested/child/child/main.tf @@ -0,0 +1,5 @@ +variable "value" {} + +output "result" { + value = "${var.value}" +} diff --git a/pkg/tofu/testdata/transform-module-var-nested/child/main.tf b/pkg/tofu/testdata/transform-module-var-nested/child/main.tf new file mode 100644 index 00000000000..b8c7f0bac24 --- /dev/null +++ b/pkg/tofu/testdata/transform-module-var-nested/child/main.tf @@ -0,0 +1,6 @@ +variable "value" {} + +module "child" { + source = "./child" + value = "${var.value}" +} diff --git a/pkg/tofu/testdata/transform-module-var-nested/main.tf b/pkg/tofu/testdata/transform-module-var-nested/main.tf new file mode 100644 index 00000000000..2c20f197927 --- /dev/null +++ b/pkg/tofu/testdata/transform-module-var-nested/main.tf @@ -0,0 +1,4 @@ +module "child" { + source = "./child" + value = "foo" +} diff --git a/pkg/tofu/testdata/transform-orphan-basic/main.tf b/pkg/tofu/testdata/transform-orphan-basic/main.tf new file mode 100644 index 00000000000..64cbf623665 --- /dev/null +++ b/pkg/tofu/testdata/transform-orphan-basic/main.tf @@ -0,0 +1 @@ +resource "aws_instance" "web" {} diff --git a/pkg/tofu/testdata/transform-orphan-count-empty/main.tf b/pkg/tofu/testdata/transform-orphan-count-empty/main.tf new file mode 100644 index 00000000000..e8045d6fce1 --- /dev/null +++ b/pkg/tofu/testdata/transform-orphan-count-empty/main.tf @@ -0,0 +1 @@ +# Purposefully empty diff --git a/pkg/tofu/testdata/transform-orphan-count/main.tf b/pkg/tofu/testdata/transform-orphan-count/main.tf new file mode 100644 index 00000000000..acef373b35d --- /dev/null +++ b/pkg/tofu/testdata/transform-orphan-count/main.tf @@ -0,0 +1,3 @@ +resource "aws_instance" "foo" { + count = 3 +} diff --git a/pkg/tofu/testdata/transform-orphan-modules/main.tf b/pkg/tofu/testdata/transform-orphan-modules/main.tf new file mode 100644 index 00000000000..919f140bba6 --- /dev/null +++ b/pkg/tofu/testdata/transform-orphan-modules/main.tf @@ -0,0 +1 @@ +resource "aws_instance" "foo" {} diff --git a/pkg/tofu/testdata/transform-provider-basic/main.tf b/pkg/tofu/testdata/transform-provider-basic/main.tf new file mode 100644 index 00000000000..8a44e1dcbb5 --- /dev/null +++ b/pkg/tofu/testdata/transform-provider-basic/main.tf @@ -0,0 +1,2 @@ +provider "aws" {} +resource "aws_instance" "web" {} diff --git a/pkg/tofu/testdata/transform-provider-fqns-module/child/main.tf b/pkg/tofu/testdata/transform-provider-fqns-module/child/main.tf new file mode 100644 index 00000000000..5c56b769397 --- /dev/null +++ b/pkg/tofu/testdata/transform-provider-fqns-module/child/main.tf @@ -0,0 +1,11 @@ +terraform { + required_providers { + your-aws = { + source = "hashicorp/aws" + } + } +} + +resource "aws_instance" "web" { + provider = "your-aws" +} diff --git a/pkg/tofu/testdata/transform-provider-fqns-module/main.tf b/pkg/tofu/testdata/transform-provider-fqns-module/main.tf new file mode 100644 index 00000000000..dd582c0634b --- /dev/null +++ b/pkg/tofu/testdata/transform-provider-fqns-module/main.tf @@ -0,0 +1,11 @@ +terraform { + required_providers { + my-aws = { + source = "hashicorp/aws" + } + } +} + +resource "aws_instance" "web" { + provider = "my-aws" +} diff --git a/pkg/tofu/testdata/transform-provider-fqns/main.tf b/pkg/tofu/testdata/transform-provider-fqns/main.tf new file mode 100644 index 00000000000..dd582c0634b --- /dev/null +++ b/pkg/tofu/testdata/transform-provider-fqns/main.tf @@ -0,0 +1,11 @@ +terraform { + required_providers { + my-aws = { + source = "hashicorp/aws" + } + } +} + +resource "aws_instance" "web" { + provider = "my-aws" +} diff --git a/pkg/tofu/testdata/transform-provider-grandchild-inherit/child/grandchild/main.tf b/pkg/tofu/testdata/transform-provider-grandchild-inherit/child/grandchild/main.tf new file mode 100644 index 00000000000..58363ef0c08 --- /dev/null +++ b/pkg/tofu/testdata/transform-provider-grandchild-inherit/child/grandchild/main.tf @@ -0,0 +1,7 @@ +provider "aws" { + alias = "baz" +} + +resource "aws_instance" "baz" { + provider = "aws.baz" +} diff --git a/pkg/tofu/testdata/transform-provider-grandchild-inherit/child/main.tf b/pkg/tofu/testdata/transform-provider-grandchild-inherit/child/main.tf new file mode 100644 index 00000000000..7ec80343de7 --- /dev/null +++ b/pkg/tofu/testdata/transform-provider-grandchild-inherit/child/main.tf @@ -0,0 +1,10 @@ +provider "aws" { + alias = "bar" +} + +module "grandchild" { + source = "./grandchild" + providers = { + aws.baz = aws.bar + } +} diff --git a/pkg/tofu/testdata/transform-provider-grandchild-inherit/main.tf b/pkg/tofu/testdata/transform-provider-grandchild-inherit/main.tf new file mode 100644 index 00000000000..cb9a2f9de98 --- /dev/null +++ b/pkg/tofu/testdata/transform-provider-grandchild-inherit/main.tf @@ -0,0 +1,11 @@ +provider "aws" { + alias = "foo" + value = "config" +} + +module "child" { + source = "./child" + providers = { + aws.bar = aws.foo + } +} diff --git a/pkg/tofu/testdata/transform-provider-inherit/child/main.tf b/pkg/tofu/testdata/transform-provider-inherit/child/main.tf new file mode 100644 index 00000000000..b1f07068461 --- /dev/null +++ b/pkg/tofu/testdata/transform-provider-inherit/child/main.tf @@ -0,0 +1,7 @@ +provider "aws" { + alias = "bar" +} + +resource "aws_instance" "thing" { + provider = aws.bar +} diff --git a/pkg/tofu/testdata/transform-provider-inherit/main.tf b/pkg/tofu/testdata/transform-provider-inherit/main.tf new file mode 100644 index 00000000000..cb9a2f9de98 --- /dev/null +++ b/pkg/tofu/testdata/transform-provider-inherit/main.tf @@ -0,0 +1,11 @@ +provider "aws" { + alias = "foo" + value = "config" +} + +module "child" { + source = "./child" + providers = { + aws.bar = aws.foo + } +} diff --git a/pkg/tofu/testdata/transform-provider-missing-grandchild/main.tf b/pkg/tofu/testdata/transform-provider-missing-grandchild/main.tf new file mode 100644 index 00000000000..385674a891e --- /dev/null +++ b/pkg/tofu/testdata/transform-provider-missing-grandchild/main.tf @@ -0,0 +1,3 @@ +module "sub" { + source = "./sub" +} diff --git a/pkg/tofu/testdata/transform-provider-missing-grandchild/sub/main.tf b/pkg/tofu/testdata/transform-provider-missing-grandchild/sub/main.tf new file mode 100644 index 00000000000..65adf2d1ccc --- /dev/null +++ b/pkg/tofu/testdata/transform-provider-missing-grandchild/sub/main.tf @@ -0,0 +1,5 @@ +provider "foo" {} + +module "subsub" { + source = "./subsub" +} diff --git a/pkg/tofu/testdata/transform-provider-missing-grandchild/sub/subsub/main.tf b/pkg/tofu/testdata/transform-provider-missing-grandchild/sub/subsub/main.tf new file mode 100644 index 00000000000..fd865a52501 --- /dev/null +++ b/pkg/tofu/testdata/transform-provider-missing-grandchild/sub/subsub/main.tf @@ -0,0 +1,2 @@ +resource "foo_instance" "one" {} +resource "bar_instance" "two" {} diff --git a/pkg/tofu/testdata/transform-provider-missing/main.tf b/pkg/tofu/testdata/transform-provider-missing/main.tf new file mode 100644 index 00000000000..976f3e5af84 --- /dev/null +++ b/pkg/tofu/testdata/transform-provider-missing/main.tf @@ -0,0 +1,3 @@ +provider "aws" {} +resource "aws_instance" "web" {} +resource "foo_instance" "web" {} diff --git a/pkg/tofu/testdata/transform-provider-prune/main.tf b/pkg/tofu/testdata/transform-provider-prune/main.tf new file mode 100644 index 00000000000..986f8840bf9 --- /dev/null +++ b/pkg/tofu/testdata/transform-provider-prune/main.tf @@ -0,0 +1,2 @@ +provider "aws" {} +resource "foo_instance" "web" {} diff --git a/pkg/tofu/testdata/transform-provisioner-basic/main.tf b/pkg/tofu/testdata/transform-provisioner-basic/main.tf new file mode 100644 index 00000000000..3898ac4dbe1 --- /dev/null +++ b/pkg/tofu/testdata/transform-provisioner-basic/main.tf @@ -0,0 +1,3 @@ +resource "aws_instance" "web" { + provisioner "shell" {} +} diff --git a/pkg/tofu/testdata/transform-provisioner-module/child/main.tf b/pkg/tofu/testdata/transform-provisioner-module/child/main.tf new file mode 100644 index 00000000000..51b29c72a08 --- /dev/null +++ b/pkg/tofu/testdata/transform-provisioner-module/child/main.tf @@ -0,0 +1,3 @@ +resource "aws_instance" "foo" { + provisioner "shell" {} +} diff --git a/pkg/tofu/testdata/transform-provisioner-module/main.tf b/pkg/tofu/testdata/transform-provisioner-module/main.tf new file mode 100644 index 00000000000..a825a449eb1 --- /dev/null +++ b/pkg/tofu/testdata/transform-provisioner-module/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "foo" { + provisioner "shell" {} +} + +module "child" { + source = "./child" +} diff --git a/pkg/tofu/testdata/transform-root-basic/main.tf b/pkg/tofu/testdata/transform-root-basic/main.tf new file mode 100644 index 00000000000..e4ff4b3e905 --- /dev/null +++ b/pkg/tofu/testdata/transform-root-basic/main.tf @@ -0,0 +1,5 @@ +provider "aws" {} +resource "aws_instance" "foo" {} + +provider "do" {} +resource "do_droplet" "bar" {} diff --git a/pkg/tofu/testdata/transform-targets-basic/main.tf b/pkg/tofu/testdata/transform-targets-basic/main.tf new file mode 100644 index 00000000000..47edc2a7fef --- /dev/null +++ b/pkg/tofu/testdata/transform-targets-basic/main.tf @@ -0,0 +1,22 @@ +resource "aws_vpc" "me" {} + +resource "aws_subnet" "me" { + depends_on = [ + aws_vpc.me, + ] +} + +resource "aws_instance" "me" { + depends_on = [ + aws_subnet.me, + ] +} + +resource "aws_vpc" "notme" {} +resource "aws_subnet" "notme" {} +resource "aws_instance" "notme" {} +resource "aws_instance" "notmeeither" { + depends_on = [ + aws_instance.me, + ] +} diff --git a/pkg/tofu/testdata/transform-targets-downstream/child/child.tf b/pkg/tofu/testdata/transform-targets-downstream/child/child.tf new file mode 100644 index 00000000000..6548b794930 --- /dev/null +++ b/pkg/tofu/testdata/transform-targets-downstream/child/child.tf @@ -0,0 +1,14 @@ +resource "aws_instance" "foo" { +} + +module "grandchild" { + source = "./grandchild" +} + +output "id" { + value = "${aws_instance.foo.id}" +} + +output "grandchild_id" { + value = "${module.grandchild.id}" +} diff --git a/pkg/tofu/testdata/transform-targets-downstream/child/grandchild/grandchild.tf b/pkg/tofu/testdata/transform-targets-downstream/child/grandchild/grandchild.tf new file mode 100644 index 00000000000..3ad8fd07701 --- /dev/null +++ b/pkg/tofu/testdata/transform-targets-downstream/child/grandchild/grandchild.tf @@ -0,0 +1,6 @@ +resource "aws_instance" "foo" { +} + +output "id" { + value = "${aws_instance.foo.id}" +} diff --git a/pkg/tofu/testdata/transform-targets-downstream/main.tf b/pkg/tofu/testdata/transform-targets-downstream/main.tf new file mode 100644 index 00000000000..b732fdad7ea --- /dev/null +++ b/pkg/tofu/testdata/transform-targets-downstream/main.tf @@ -0,0 +1,18 @@ +resource "aws_instance" "foo" { +} + +module "child" { + source = "./child" +} + +output "root_id" { + value = "${aws_instance.foo.id}" +} + +output "child_id" { + value = "${module.child.id}" +} + +output "grandchild_id" { + value = "${module.child.grandchild_id}" +} diff --git a/pkg/tofu/testdata/transform-trans-reduce-basic/main.tf b/pkg/tofu/testdata/transform-trans-reduce-basic/main.tf new file mode 100644 index 00000000000..4fb97c7a7b9 --- /dev/null +++ b/pkg/tofu/testdata/transform-trans-reduce-basic/main.tf @@ -0,0 +1,10 @@ +resource "aws_instance" "A" {} + +resource "aws_instance" "B" { + A = "${aws_instance.A.id}" +} + +resource "aws_instance" "C" { + A = "${aws_instance.A.id}" + B = "${aws_instance.B.id}" +} diff --git a/pkg/tofu/testdata/update-resource-provider/main.tf b/pkg/tofu/testdata/update-resource-provider/main.tf new file mode 100644 index 00000000000..6c082d54081 --- /dev/null +++ b/pkg/tofu/testdata/update-resource-provider/main.tf @@ -0,0 +1,7 @@ +provider "aws" { + alias = "foo" +} + +resource "aws_instance" "bar" { + provider = "aws.foo" +} diff --git a/pkg/tofu/testdata/validate-bad-count/main.tf b/pkg/tofu/testdata/validate-bad-count/main.tf new file mode 100644 index 00000000000..a582e5ee39e --- /dev/null +++ b/pkg/tofu/testdata/validate-bad-count/main.tf @@ -0,0 +1,3 @@ +resource "aws_instance" "foo" { + count = "${list}" +} diff --git a/pkg/tofu/testdata/validate-bad-module-output/child/main.tf b/pkg/tofu/testdata/validate-bad-module-output/child/main.tf new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/tofu/testdata/validate-bad-module-output/main.tf b/pkg/tofu/testdata/validate-bad-module-output/main.tf new file mode 100644 index 00000000000..bda34f51a4e --- /dev/null +++ b/pkg/tofu/testdata/validate-bad-module-output/main.tf @@ -0,0 +1,7 @@ +module "child" { + source = "./child" +} + +resource "aws_instance" "bar" { + foo = "${module.child.bad}" +} diff --git a/pkg/tofu/testdata/validate-bad-pc/main.tf b/pkg/tofu/testdata/validate-bad-pc/main.tf new file mode 100644 index 00000000000..70ad701e6cb --- /dev/null +++ b/pkg/tofu/testdata/validate-bad-pc/main.tf @@ -0,0 +1,5 @@ +provider "aws" { + foo = "bar" +} + +resource "aws_instance" "test" {} diff --git a/pkg/tofu/testdata/validate-bad-prov-conf/main.tf b/pkg/tofu/testdata/validate-bad-prov-conf/main.tf new file mode 100644 index 00000000000..af12124b3fa --- /dev/null +++ b/pkg/tofu/testdata/validate-bad-prov-conf/main.tf @@ -0,0 +1,9 @@ +provider "aws" { + foo = "bar" +} + +resource "aws_instance" "test" { + provisioner "shell" { + test_string = "foo" + } +} diff --git a/pkg/tofu/testdata/validate-bad-prov-connection/main.tf b/pkg/tofu/testdata/validate-bad-prov-connection/main.tf new file mode 100644 index 00000000000..550714ff1d1 --- /dev/null +++ b/pkg/tofu/testdata/validate-bad-prov-connection/main.tf @@ -0,0 +1,8 @@ +resource "aws_instance" "foo" { + provisioner "shell" { + test_string = "test" + connection { + user = "test" + } + } +} diff --git a/pkg/tofu/testdata/validate-bad-rc/main.tf b/pkg/tofu/testdata/validate-bad-rc/main.tf new file mode 100644 index 00000000000..152a23e0d86 --- /dev/null +++ b/pkg/tofu/testdata/validate-bad-rc/main.tf @@ -0,0 +1,3 @@ +resource "aws_instance" "test" { + foo = "bar" +} diff --git a/pkg/tofu/testdata/validate-bad-resource-connection/main.tf b/pkg/tofu/testdata/validate-bad-resource-connection/main.tf new file mode 100644 index 00000000000..46a16717591 --- /dev/null +++ b/pkg/tofu/testdata/validate-bad-resource-connection/main.tf @@ -0,0 +1,8 @@ +resource "aws_instance" "foo" { + connection { + user = "test" + } + provisioner "shell" { + test_string = "test" + } +} diff --git a/pkg/tofu/testdata/validate-bad-resource-count/main.tf b/pkg/tofu/testdata/validate-bad-resource-count/main.tf new file mode 100644 index 00000000000..f852a447ead --- /dev/null +++ b/pkg/tofu/testdata/validate-bad-resource-count/main.tf @@ -0,0 +1,22 @@ +// a resource named "aws_security_groups" does not exist in the schema +variable "sg_ports" { + type = list(number) + description = "List of ingress ports" + default = [8200, 8201, 8300, 9200, 9500] +} + + +resource "aws_security_groups" "dynamicsg" { + name = "dynamicsg" + description = "Ingress for Vault" + + dynamic "ingress" { + for_each = var.sg_ports + content { + from_port = ingress.value + to_port = ingress.value + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + } + } +} diff --git a/pkg/tofu/testdata/validate-bad-var/main.tf b/pkg/tofu/testdata/validate-bad-var/main.tf new file mode 100644 index 00000000000..50028453d41 --- /dev/null +++ b/pkg/tofu/testdata/validate-bad-var/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "foo" { + num = "2" +} + +resource "aws_instance" "bar" { + foo = "${var.foo}" +} diff --git a/pkg/tofu/testdata/validate-computed-in-function/main.tf b/pkg/tofu/testdata/validate-computed-in-function/main.tf new file mode 100644 index 00000000000..504e1942612 --- /dev/null +++ b/pkg/tofu/testdata/validate-computed-in-function/main.tf @@ -0,0 +1,7 @@ +data "aws_data_source" "foo" { + optional_attr = "value" +} + +resource "aws_instance" "bar" { + attr = "${length(data.aws_data_source.foo.computed)}" +} diff --git a/pkg/tofu/testdata/validate-computed-module-var-ref/dest/main.tf b/pkg/tofu/testdata/validate-computed-module-var-ref/dest/main.tf new file mode 100644 index 00000000000..44095ea7542 --- /dev/null +++ b/pkg/tofu/testdata/validate-computed-module-var-ref/dest/main.tf @@ -0,0 +1,5 @@ +variable "destin" { } + +resource "aws_instance" "dest" { + attr = "${var.destin}" +} diff --git a/pkg/tofu/testdata/validate-computed-module-var-ref/main.tf b/pkg/tofu/testdata/validate-computed-module-var-ref/main.tf new file mode 100644 index 00000000000..d7c799cc8b6 --- /dev/null +++ b/pkg/tofu/testdata/validate-computed-module-var-ref/main.tf @@ -0,0 +1,8 @@ +module "source" { + source = "./source" +} + +module "dest" { + source = "./dest" + destin = "${module.source.sourceout}" +} diff --git a/pkg/tofu/testdata/validate-computed-module-var-ref/source/main.tf b/pkg/tofu/testdata/validate-computed-module-var-ref/source/main.tf new file mode 100644 index 00000000000..d2edc9e0f17 --- /dev/null +++ b/pkg/tofu/testdata/validate-computed-module-var-ref/source/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "source" { + attr = "foo" +} + +output "sourceout" { + value = "${aws_instance.source.attr}" +} diff --git a/pkg/tofu/testdata/validate-computed-var/main.tf b/pkg/tofu/testdata/validate-computed-var/main.tf new file mode 100644 index 00000000000..81acf7cfaa9 --- /dev/null +++ b/pkg/tofu/testdata/validate-computed-var/main.tf @@ -0,0 +1,9 @@ +provider "aws" { + value = test_instance.foo.id +} + +resource "aws_instance" "bar" {} + +resource "test_instance" "foo" { + value = "yes" +} diff --git a/pkg/tofu/testdata/validate-count-computed/main.tf b/pkg/tofu/testdata/validate-count-computed/main.tf new file mode 100644 index 00000000000..e7de125f226 --- /dev/null +++ b/pkg/tofu/testdata/validate-count-computed/main.tf @@ -0,0 +1,7 @@ +data "aws_data_source" "foo" { + compute = "value" +} + +resource "aws_instance" "bar" { + count = "${data.aws_data_source.foo.value}" +} diff --git a/pkg/tofu/testdata/validate-count-negative/main.tf b/pkg/tofu/testdata/validate-count-negative/main.tf new file mode 100644 index 00000000000..d5bb046533d --- /dev/null +++ b/pkg/tofu/testdata/validate-count-negative/main.tf @@ -0,0 +1,3 @@ +resource "aws_instance" "test" { + count = "-5" +} diff --git a/pkg/tofu/testdata/validate-count-variable/main.tf b/pkg/tofu/testdata/validate-count-variable/main.tf new file mode 100644 index 00000000000..9c892ac2eac --- /dev/null +++ b/pkg/tofu/testdata/validate-count-variable/main.tf @@ -0,0 +1,6 @@ +variable "foo" {} + +resource "aws_instance" "foo" { + foo = "foo" + count = "${var.foo}" +} diff --git a/pkg/tofu/testdata/validate-good-module/child/main.tf b/pkg/tofu/testdata/validate-good-module/child/main.tf new file mode 100644 index 00000000000..17d8c60a772 --- /dev/null +++ b/pkg/tofu/testdata/validate-good-module/child/main.tf @@ -0,0 +1,3 @@ +output "good" { + value = "great" +} diff --git a/pkg/tofu/testdata/validate-good-module/main.tf b/pkg/tofu/testdata/validate-good-module/main.tf new file mode 100644 index 00000000000..439d20210c4 --- /dev/null +++ b/pkg/tofu/testdata/validate-good-module/main.tf @@ -0,0 +1,7 @@ +module "child" { + source = "./child" +} + +resource "aws_instance" "bar" { + foo = "${module.child.good}" +} diff --git a/pkg/tofu/testdata/validate-good/main.tf b/pkg/tofu/testdata/validate-good/main.tf new file mode 100644 index 00000000000..fe44019b7da --- /dev/null +++ b/pkg/tofu/testdata/validate-good/main.tf @@ -0,0 +1,8 @@ +resource "aws_instance" "foo" { + num = "2" + foo = "bar" +} + +resource "aws_instance" "bar" { + foo = "bar" +} diff --git a/pkg/tofu/testdata/validate-module-bad-rc/child/main.tf b/pkg/tofu/testdata/validate-module-bad-rc/child/main.tf new file mode 100644 index 00000000000..919f140bba6 --- /dev/null +++ b/pkg/tofu/testdata/validate-module-bad-rc/child/main.tf @@ -0,0 +1 @@ +resource "aws_instance" "foo" {} diff --git a/pkg/tofu/testdata/validate-module-bad-rc/main.tf b/pkg/tofu/testdata/validate-module-bad-rc/main.tf new file mode 100644 index 00000000000..0f6991c536c --- /dev/null +++ b/pkg/tofu/testdata/validate-module-bad-rc/main.tf @@ -0,0 +1,3 @@ +module "child" { + source = "./child" +} diff --git a/pkg/tofu/testdata/validate-module-deps-cycle/a/main.tf b/pkg/tofu/testdata/validate-module-deps-cycle/a/main.tf new file mode 100644 index 00000000000..3d3b01634eb --- /dev/null +++ b/pkg/tofu/testdata/validate-module-deps-cycle/a/main.tf @@ -0,0 +1,5 @@ +resource "aws_instance" "a" { } + +output "output" { + value = "${aws_instance.a.id}" +} diff --git a/pkg/tofu/testdata/validate-module-deps-cycle/b/main.tf b/pkg/tofu/testdata/validate-module-deps-cycle/b/main.tf new file mode 100644 index 00000000000..0f8fc9116e6 --- /dev/null +++ b/pkg/tofu/testdata/validate-module-deps-cycle/b/main.tf @@ -0,0 +1,5 @@ +variable "input" {} + +resource "aws_instance" "b" { + id = "${var.input}" +} diff --git a/pkg/tofu/testdata/validate-module-deps-cycle/main.tf b/pkg/tofu/testdata/validate-module-deps-cycle/main.tf new file mode 100644 index 00000000000..11ddb64bfa7 --- /dev/null +++ b/pkg/tofu/testdata/validate-module-deps-cycle/main.tf @@ -0,0 +1,8 @@ +module "a" { + source = "./a" +} + +module "b" { + source = "./b" + input = "${module.a.output}" +} diff --git a/pkg/tofu/testdata/validate-module-pc-inherit-unused/child/main.tf b/pkg/tofu/testdata/validate-module-pc-inherit-unused/child/main.tf new file mode 100644 index 00000000000..919f140bba6 --- /dev/null +++ b/pkg/tofu/testdata/validate-module-pc-inherit-unused/child/main.tf @@ -0,0 +1 @@ +resource "aws_instance" "foo" {} diff --git a/pkg/tofu/testdata/validate-module-pc-inherit-unused/main.tf b/pkg/tofu/testdata/validate-module-pc-inherit-unused/main.tf new file mode 100644 index 00000000000..32c8a38f1e6 --- /dev/null +++ b/pkg/tofu/testdata/validate-module-pc-inherit-unused/main.tf @@ -0,0 +1,7 @@ +module "child" { + source = "./child" +} + +provider "aws" { + foo = "set" +} diff --git a/pkg/tofu/testdata/validate-module-pc-inherit/child/main.tf b/pkg/tofu/testdata/validate-module-pc-inherit/child/main.tf new file mode 100644 index 00000000000..37189c1ffb6 --- /dev/null +++ b/pkg/tofu/testdata/validate-module-pc-inherit/child/main.tf @@ -0,0 +1,3 @@ +provider "aws" {} + +resource "aws_instance" "foo" {} diff --git a/pkg/tofu/testdata/validate-module-pc-inherit/main.tf b/pkg/tofu/testdata/validate-module-pc-inherit/main.tf new file mode 100644 index 00000000000..8976f4aa9f1 --- /dev/null +++ b/pkg/tofu/testdata/validate-module-pc-inherit/main.tf @@ -0,0 +1,9 @@ +module "child" { + source = "./child" +} + +provider "aws" { + set = true +} + +resource "aws_instance" "foo" {} diff --git a/pkg/tofu/testdata/validate-module-pc-vars/child/main.tf b/pkg/tofu/testdata/validate-module-pc-vars/child/main.tf new file mode 100644 index 00000000000..380cd465a39 --- /dev/null +++ b/pkg/tofu/testdata/validate-module-pc-vars/child/main.tf @@ -0,0 +1,7 @@ +variable "value" {} + +provider "aws" { + foo = var.value +} + +resource "aws_instance" "foo" {} diff --git a/pkg/tofu/testdata/validate-module-pc-vars/main.tf b/pkg/tofu/testdata/validate-module-pc-vars/main.tf new file mode 100644 index 00000000000..5e239b40665 --- /dev/null +++ b/pkg/tofu/testdata/validate-module-pc-vars/main.tf @@ -0,0 +1,7 @@ +variable "provider_var" {} + +module "child" { + source = "./child" + + value = var.provider_var +} diff --git a/pkg/tofu/testdata/validate-required-provider-config/main.tf b/pkg/tofu/testdata/validate-required-provider-config/main.tf new file mode 100644 index 00000000000..898a23fdf25 --- /dev/null +++ b/pkg/tofu/testdata/validate-required-provider-config/main.tf @@ -0,0 +1,20 @@ +# This test verifies that the provider local name, local config and fqn map +# together properly when the local name does not match the type. + +terraform { + required_providers { + arbitrary = { + source = "hashicorp/aws" + } + } +} + +# hashicorp/test has required provider config attributes. This "arbitrary" +# provider configuration block should map to hashicorp/test. +provider "arbitrary" { + required_attribute = "bloop" +} + +resource "aws_instance" "test" { + provider = "arbitrary" +} diff --git a/pkg/tofu/testdata/validate-required-var/main.tf b/pkg/tofu/testdata/validate-required-var/main.tf new file mode 100644 index 00000000000..bd55ea11bf7 --- /dev/null +++ b/pkg/tofu/testdata/validate-required-var/main.tf @@ -0,0 +1,5 @@ +variable "foo" {} + +resource "aws_instance" "web" { + ami = "${var.foo}" +} diff --git a/pkg/tofu/testdata/validate-sensitive-provisioner-config/main.tf b/pkg/tofu/testdata/validate-sensitive-provisioner-config/main.tf new file mode 100644 index 00000000000..88a37275a83 --- /dev/null +++ b/pkg/tofu/testdata/validate-sensitive-provisioner-config/main.tf @@ -0,0 +1,11 @@ +variable "secret" { + type = string + default = " password123" + sensitive = true +} + +resource "aws_instance" "foo" { + provisioner "test" { + test_string = var.secret + } +} diff --git a/pkg/tofu/testdata/validate-skipped-pc-empty/main.tf b/pkg/tofu/testdata/validate-skipped-pc-empty/main.tf new file mode 100644 index 00000000000..1ad9ade8948 --- /dev/null +++ b/pkg/tofu/testdata/validate-skipped-pc-empty/main.tf @@ -0,0 +1 @@ +resource "aws_instance" "test" {} diff --git a/pkg/tofu/testdata/validate-targeted/main.tf b/pkg/tofu/testdata/validate-targeted/main.tf new file mode 100644 index 00000000000..a1e847d9a0e --- /dev/null +++ b/pkg/tofu/testdata/validate-targeted/main.tf @@ -0,0 +1,9 @@ +resource "aws_instance" "foo" { + num = "2" + provisioner "shell" {} +} + +resource "aws_instance" "bar" { + foo = "bar" + provisioner "shell" {} +} diff --git a/pkg/tofu/testdata/validate-var-no-default-explicit-type/main.tf b/pkg/tofu/testdata/validate-var-no-default-explicit-type/main.tf new file mode 100644 index 00000000000..5953eab4da9 --- /dev/null +++ b/pkg/tofu/testdata/validate-var-no-default-explicit-type/main.tf @@ -0,0 +1,5 @@ +variable "maybe_a_map" { + type = map(string) + + // No default +} diff --git a/pkg/tofu/testdata/validate-variable-custom-validations-child-sensitive/child/child.tf b/pkg/tofu/testdata/validate-variable-custom-validations-child-sensitive/child/child.tf new file mode 100644 index 00000000000..05027f75ade --- /dev/null +++ b/pkg/tofu/testdata/validate-variable-custom-validations-child-sensitive/child/child.tf @@ -0,0 +1,8 @@ +variable "test" { + type = string + + validation { + condition = var.test != "nope" + error_message = "Value must not be \"nope\"." + } +} diff --git a/pkg/tofu/testdata/validate-variable-custom-validations-child-sensitive/validate-variable-custom-validations.tf b/pkg/tofu/testdata/validate-variable-custom-validations-child-sensitive/validate-variable-custom-validations.tf new file mode 100644 index 00000000000..4f436db11a3 --- /dev/null +++ b/pkg/tofu/testdata/validate-variable-custom-validations-child-sensitive/validate-variable-custom-validations.tf @@ -0,0 +1,10 @@ +variable "test" { + sensitive = true + default = "nope" +} + +module "child" { + source = "./child" + + test = var.test +} diff --git a/pkg/tofu/testdata/validate-variable-custom-validations-child/child/child.tf b/pkg/tofu/testdata/validate-variable-custom-validations-child/child/child.tf new file mode 100644 index 00000000000..05027f75ade --- /dev/null +++ b/pkg/tofu/testdata/validate-variable-custom-validations-child/child/child.tf @@ -0,0 +1,8 @@ +variable "test" { + type = string + + validation { + condition = var.test != "nope" + error_message = "Value must not be \"nope\"." + } +} diff --git a/pkg/tofu/testdata/validate-variable-custom-validations-child/validate-variable-custom-validations.tf b/pkg/tofu/testdata/validate-variable-custom-validations-child/validate-variable-custom-validations.tf new file mode 100644 index 00000000000..8b8111e675c --- /dev/null +++ b/pkg/tofu/testdata/validate-variable-custom-validations-child/validate-variable-custom-validations.tf @@ -0,0 +1,5 @@ +module "child" { + source = "./child" + + test = "nope" +} diff --git a/pkg/tofu/testdata/validate-variable-ref/main.tf b/pkg/tofu/testdata/validate-variable-ref/main.tf new file mode 100644 index 00000000000..3bc9860b602 --- /dev/null +++ b/pkg/tofu/testdata/validate-variable-ref/main.tf @@ -0,0 +1,5 @@ +variable "foo" {} + +resource "aws_instance" "bar" { + foo = "${var.foo}" +} diff --git a/pkg/tofu/testdata/vars-basic-bool/main.tf b/pkg/tofu/testdata/vars-basic-bool/main.tf new file mode 100644 index 00000000000..52d90595a27 --- /dev/null +++ b/pkg/tofu/testdata/vars-basic-bool/main.tf @@ -0,0 +1,10 @@ +// At the time of writing Terraform doesn't formally support a boolean +// type, but historically this has magically worked. Lots of TF code +// relies on this so we test it now. +variable "a" { + default = true +} + +variable "b" { + default = false +} diff --git a/pkg/tofu/testdata/vars-basic/main.tf b/pkg/tofu/testdata/vars-basic/main.tf new file mode 100644 index 00000000000..af3ba5cc695 --- /dev/null +++ b/pkg/tofu/testdata/vars-basic/main.tf @@ -0,0 +1,14 @@ +variable "a" { + default = "foo" + type = string +} + +variable "b" { + default = [] + type = list(string) +} + +variable "c" { + default = {} + type = map(string) +} diff --git a/pkg/tofu/transform.go b/pkg/tofu/transform.go new file mode 100644 index 00000000000..0aabded1c06 --- /dev/null +++ b/pkg/tofu/transform.go @@ -0,0 +1,60 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "log" + + "github.com/kubegems/opentofu/pkg/dag" + "github.com/kubegems/opentofu/pkg/logging" +) + +// GraphTransformer is the interface that transformers implement. This +// interface is only for transforms that need entire graph visibility. +type GraphTransformer interface { + Transform(*Graph) error +} + +// GraphVertexTransformer is an interface that transforms a single +// Vertex within with graph. This is a specialization of GraphTransformer +// that makes it easy to do vertex replacement. +// +// The GraphTransformer that runs through the GraphVertexTransformers is +// VertexTransformer. +type GraphVertexTransformer interface { + Transform(dag.Vertex) (dag.Vertex, error) +} + +type graphTransformerMulti struct { + Transforms []GraphTransformer +} + +func (t *graphTransformerMulti) Transform(g *Graph) error { + var lastStepStr string + for _, t := range t.Transforms { + log.Printf("[TRACE] (graphTransformerMulti) Executing graph transform %T", t) + if err := t.Transform(g); err != nil { + return err + } + + if logging.IsDebugOrHigher() { + if thisStepStr := g.StringWithNodeTypes(); thisStepStr != lastStepStr { + log.Printf("[TRACE] (graphTransformerMulti) Completed graph transform %T with new graph:\n%s ------", t, logging.Indent(thisStepStr)) + lastStepStr = thisStepStr + } else { + log.Printf("[TRACE] (graphTransformerMulti) Completed graph transform %T (no changes)", t) + } + } + } + + return nil +} + +// GraphTransformMulti combines multiple graph transformers into a single +// GraphTransformer that runs all the individual graph transformers. +func GraphTransformMulti(ts ...GraphTransformer) GraphTransformer { + return &graphTransformerMulti{Transforms: ts} +} diff --git a/pkg/tofu/transform_attach_config_provider.go b/pkg/tofu/transform_attach_config_provider.go new file mode 100644 index 00000000000..01638c8a3c1 --- /dev/null +++ b/pkg/tofu/transform_attach_config_provider.go @@ -0,0 +1,21 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" +) + +// GraphNodeAttachProvider is an interface that must be implemented by nodes +// that want provider configurations attached. +type GraphNodeAttachProvider interface { + // ProviderName with no module prefix. Example: "aws". + ProviderAddr() addrs.AbsProviderConfig + + // Sets the configuration + AttachProvider(*configs.Provider) +} diff --git a/pkg/tofu/transform_attach_config_provider_meta.go b/pkg/tofu/transform_attach_config_provider_meta.go new file mode 100644 index 00000000000..0541d84600c --- /dev/null +++ b/pkg/tofu/transform_attach_config_provider_meta.go @@ -0,0 +1,20 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" +) + +// GraphNodeAttachProviderMetaConfigs is an interface that must be implemented +// by nodes that want provider meta configurations attached. +type GraphNodeAttachProviderMetaConfigs interface { + GraphNodeConfigResource + + // Sets the configuration + AttachProviderMetaConfigs(map[addrs.Provider]*configs.ProviderMeta) +} diff --git a/pkg/tofu/transform_attach_config_resource.go b/pkg/tofu/transform_attach_config_resource.go new file mode 100644 index 00000000000..8f5cf90bf4b --- /dev/null +++ b/pkg/tofu/transform_attach_config_resource.go @@ -0,0 +1,78 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "log" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/dag" +) + +// GraphNodeAttachResourceConfig is an interface that must be implemented by nodes +// that want resource configurations attached. +type GraphNodeAttachResourceConfig interface { + GraphNodeConfigResource + + // Sets the configuration + AttachResourceConfig(*configs.Resource) +} + +// AttachResourceConfigTransformer goes through the graph and attaches +// resource configuration structures to nodes that implement +// GraphNodeAttachManagedResourceConfig or GraphNodeAttachDataResourceConfig. +// +// The attached configuration structures are directly from the configuration. +// If they're going to be modified, a copy should be made. +type AttachResourceConfigTransformer struct { + Config *configs.Config // Config is the root node in the config tree +} + +func (t *AttachResourceConfigTransformer) Transform(g *Graph) error { + + // Go through and find GraphNodeAttachResource + for _, v := range g.Vertices() { + // Only care about GraphNodeAttachResource implementations + arn, ok := v.(GraphNodeAttachResourceConfig) + if !ok { + continue + } + + // Determine what we're looking for + addr := arn.ResourceAddr() + + // Get the configuration. + config := t.Config.Descendent(addr.Module) + if config == nil { + log.Printf("[TRACE] AttachResourceConfigTransformer: %q (%T) has no configuration available", dag.VertexName(v), v) + continue + } + var m map[string]*configs.Resource + if addr.Resource.Mode == addrs.ManagedResourceMode { + m = config.Module.ManagedResources + } else if addr.Resource.Mode == addrs.DataResourceMode { + m = config.Module.DataResources + } else { + panic("unknown resource mode: " + addr.Resource.Mode.String()) + } + coord := addr.Resource.String() + if r, ok := m[coord]; ok && r.Addr() == addr.Resource { + log.Printf("[TRACE] AttachResourceConfigTransformer: attaching to %q (%T) config from %#v", dag.VertexName(v), v, r.DeclRange) + arn.AttachResourceConfig(r) + if gnapmc, ok := v.(GraphNodeAttachProviderMetaConfigs); ok { + log.Printf("[TRACE] AttachResourceConfigTransformer: attaching provider meta configs to %s", dag.VertexName(v)) + if config.Module.ProviderMetas != nil { + gnapmc.AttachProviderMetaConfigs(config.Module.ProviderMetas) + } else { + log.Printf("[TRACE] AttachResourceConfigTransformer: no provider meta configs available to attach to %s", dag.VertexName(v)) + } + } + } + } + + return nil +} diff --git a/pkg/tofu/transform_attach_config_resource_test.go b/pkg/tofu/transform_attach_config_resource_test.go new file mode 100644 index 00000000000..52032c92e07 --- /dev/null +++ b/pkg/tofu/transform_attach_config_resource_test.go @@ -0,0 +1,71 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "reflect" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/zclconf/go-cty/cty/gocty" +) + +func TestModuleTransformAttachConfigTransformer(t *testing.T) { + g := Graph{Path: addrs.RootModuleInstance} + module := testModule(t, "transform-attach-config") + + err := (&ConfigTransformer{Config: module}).Transform(&g) + if err != nil { + t.Fatal(err) + } + + err = (&AttachResourceConfigTransformer{Config: module}).Transform(&g) + if err != nil { + t.Fatal(err) + } + + verts := g.Vertices() + + if len(verts) != 4 { + t.Fatalf("Expected 4 vertices, got %v", len(verts)) + } + + expected := map[string]map[string]int{ + "data.aws_instance.data_instance": map[string]int{ + "data_config": 10, + }, + "aws_instance.resource_instance": map[string]int{ + "resource_config": 20, + }, + "module.child.data.aws_instance.child_data_instance": map[string]int{ + "data_config": 30, + }, + "module.child.aws_instance.child_resource_instance": map[string]int{ + "data_config": 40, + }, + } + + got := make(map[string]map[string]int) + for _, v := range verts { + ar := v.(*NodeAbstractResource) + attrs, _ := ar.Config.Config.JustAttributes() + + values := make(map[string]int) + for _, attr := range attrs { + val, _ := attr.Expr.Value(nil) + var target int + gocty.FromCtyValue(val, &target) + values[attr.Name] = target + } + + got[ar.ResourceAddr().String()] = values + } + + if !reflect.DeepEqual(expected, got) { + t.Fatalf("Expected %s, got %s", spew.Sdump(expected), spew.Sdump(got)) + } +} diff --git a/pkg/tofu/transform_attach_schema.go b/pkg/tofu/transform_attach_schema.go new file mode 100644 index 00000000000..9a816bced3b --- /dev/null +++ b/pkg/tofu/transform_attach_schema.go @@ -0,0 +1,114 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "log" + + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/dag" +) + +// GraphNodeAttachResourceSchema is an interface implemented by node types +// that need a resource schema attached. +type GraphNodeAttachResourceSchema interface { + GraphNodeConfigResource + GraphNodeProviderConsumer + + AttachResourceSchema(schema *configschema.Block, version uint64) +} + +// GraphNodeAttachProviderConfigSchema is an interface implemented by node types +// that need a provider configuration schema attached. +type GraphNodeAttachProviderConfigSchema interface { + GraphNodeProvider + + AttachProviderConfigSchema(*configschema.Block) +} + +// GraphNodeAttachProvisionerSchema is an interface implemented by node types +// that need one or more provisioner schemas attached. +type GraphNodeAttachProvisionerSchema interface { + ProvisionedBy() []string + + // SetProvisionerSchema is called during transform for each provisioner + // type returned from ProvisionedBy, providing the configuration schema + // for each provisioner in turn. The implementer should save these for + // later use in evaluating provisioner configuration blocks. + AttachProvisionerSchema(name string, schema *configschema.Block) +} + +// AttachSchemaTransformer finds nodes that implement +// GraphNodeAttachResourceSchema, GraphNodeAttachProviderConfigSchema, or +// GraphNodeAttachProvisionerSchema, looks up the needed schemas for each +// and then passes them to a method implemented by the node. +type AttachSchemaTransformer struct { + Plugins *contextPlugins + Config *configs.Config +} + +func (t *AttachSchemaTransformer) Transform(g *Graph) error { + if t.Plugins == nil { + // Should never happen with a reasonable caller, but we'll return a + // proper error here anyway so that we'll fail gracefully. + return fmt.Errorf("AttachSchemaTransformer used with nil Plugins") + } + + for _, v := range g.Vertices() { + + if tv, ok := v.(GraphNodeAttachResourceSchema); ok { + addr := tv.ResourceAddr() + mode := addr.Resource.Mode + typeName := addr.Resource.Type + providerFqn := tv.Provider() + + schema, version, err := t.Plugins.ResourceTypeSchema(providerFqn, mode, typeName) + if err != nil { + return fmt.Errorf("failed to read schema for %s in %s: %w", addr, providerFqn, err) + } + if schema == nil { + log.Printf("[ERROR] AttachSchemaTransformer: No resource schema available for %s", addr) + continue + } + log.Printf("[TRACE] AttachSchemaTransformer: attaching resource schema to %s", dag.VertexName(v)) + tv.AttachResourceSchema(schema, version) + } + + if tv, ok := v.(GraphNodeAttachProviderConfigSchema); ok { + providerAddr := tv.ProviderAddr() + schema, err := t.Plugins.ProviderConfigSchema(providerAddr.Provider) + if err != nil { + return fmt.Errorf("failed to read provider configuration schema for %s: %w", providerAddr.Provider, err) + } + if schema == nil { + log.Printf("[ERROR] AttachSchemaTransformer: No provider config schema available for %s", providerAddr) + continue + } + log.Printf("[TRACE] AttachSchemaTransformer: attaching provider config schema to %s", dag.VertexName(v)) + tv.AttachProviderConfigSchema(schema) + } + + if tv, ok := v.(GraphNodeAttachProvisionerSchema); ok { + names := tv.ProvisionedBy() + for _, name := range names { + schema, err := t.Plugins.ProvisionerSchema(name) + if err != nil { + return fmt.Errorf("failed to read provisioner configuration schema for %q: %w", name, err) + } + if schema == nil { + log.Printf("[ERROR] AttachSchemaTransformer: No schema available for provisioner %q on %q", name, dag.VertexName(v)) + continue + } + log.Printf("[TRACE] AttachSchemaTransformer: attaching provisioner %q config schema to %s", name, dag.VertexName(v)) + tv.AttachProvisionerSchema(name, schema) + } + } + } + + return nil +} diff --git a/pkg/tofu/transform_attach_state.go b/pkg/tofu/transform_attach_state.go new file mode 100644 index 00000000000..339af3a94bb --- /dev/null +++ b/pkg/tofu/transform_attach_state.go @@ -0,0 +1,73 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "log" + + "github.com/kubegems/opentofu/pkg/dag" + "github.com/kubegems/opentofu/pkg/states" +) + +// GraphNodeAttachResourceState is an interface that can be implemented +// to request that a ResourceState is attached to the node. +// +// Due to a historical naming inconsistency, the type ResourceState actually +// represents the state for a particular _instance_, while InstanceState +// represents the values for that instance during a particular phase +// (e.g. primary vs. deposed). Consequently, GraphNodeAttachResourceState +// is supported only for nodes that represent resource instances, even though +// the name might suggest it is for containing resources. +type GraphNodeAttachResourceState interface { + GraphNodeResourceInstance + + // Sets the state + AttachResourceState(*states.Resource) +} + +// AttachStateTransformer goes through the graph and attaches +// state to nodes that implement the interfaces above. +type AttachStateTransformer struct { + State *states.State // State is the root state +} + +func (t *AttachStateTransformer) Transform(g *Graph) error { + // If no state, then nothing to do + if t.State == nil { + log.Printf("[DEBUG] Not attaching any node states: overall state is nil") + return nil + } + + for _, v := range g.Vertices() { + // Nodes implement this interface to request state attachment. + an, ok := v.(GraphNodeAttachResourceState) + if !ok { + continue + } + addr := an.ResourceInstanceAddr() + + rs := t.State.Resource(addr.ContainingResource()) + if rs == nil { + log.Printf("[DEBUG] Resource state not found for node %q, instance %s", dag.VertexName(v), addr) + continue + } + + is := rs.Instance(addr.Resource.Key) + if is == nil { + // We don't actually need this here, since we'll attach the whole + // resource state, but we still check because it'd be weird + // for the specific instance we're attaching to not to exist. + log.Printf("[DEBUG] Resource instance state not found for node %q, instance %s", dag.VertexName(v), addr) + continue + } + + // make sure to attach a copy of the state, so instances can modify the + // same ResourceState. + an.AttachResourceState(rs.DeepCopy()) + } + + return nil +} diff --git a/pkg/tofu/transform_check.go b/pkg/tofu/transform_check.go new file mode 100644 index 00000000000..f18e04fed1f --- /dev/null +++ b/pkg/tofu/transform_check.go @@ -0,0 +1,147 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "log" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/dag" +) + +type checkTransformer struct { + // Config for the entire module. + Config *configs.Config + + // Operation is the current operation this node will be part of. + Operation walkOperation +} + +var _ GraphTransformer = (*checkTransformer)(nil) + +func (t *checkTransformer) Transform(graph *Graph) error { + return t.transform(graph, t.Config, graph.Vertices()) +} + +func (t *checkTransformer) transform(g *Graph, cfg *configs.Config, allNodes []dag.Vertex) error { + + if t.Operation == walkDestroy || t.Operation == walkPlanDestroy { + // Don't include anything about checks during destroy operations. + // + // For other plan and normal apply operations we do everything, for + // destroy operations we do nothing. For any other operations we still + // include the check nodes, but we don't actually execute the checks + // instead we still validate their references and make sure their + // conditions make sense etc. + return nil + } + + moduleAddr := cfg.Path + + for _, check := range cfg.Module.Checks { + configAddr := check.Addr().InModule(moduleAddr) + + // We want to create a node for each check block. This node will execute + // after anything it references, and will update the checks object + // embedded in the plan and/or state. + + log.Printf("[TRACE] checkTransformer: Nodes and edges for %s", configAddr) + expand := &nodeExpandCheck{ + addr: configAddr, + config: check, + makeInstance: func(addr addrs.AbsCheck, cfg *configs.Check) dag.Vertex { + return &nodeCheckAssert{ + addr: addr, + config: cfg, + executeChecks: t.ExecuteChecks(), + } + }, + } + g.Add(expand) + + // We also need to report the checks we are going to execute before we + // try and execute them. + if t.ReportChecks() { + report := &nodeReportCheck{ + addr: configAddr, + } + g.Add(report) + + // Make sure we report our checks before we start executing the + // actual checks. + g.Connect(dag.BasicEdge(expand, report)) + + if check.DataResource != nil { + // If we have a nested data source, we need to make sure we + // also report the check before the data source executes. + // + // We loop through all the nodes in the graph to find the one + // that contains our data source and connect it. + for _, other := range allNodes { + if resource, isResource := other.(GraphNodeConfigResource); isResource { + resourceAddr := resource.ResourceAddr() + if !resourceAddr.Module.Equal(moduleAddr) { + // This resource isn't in the same module as our check + // so skip it. + continue + } + + resourceCfg := cfg.Module.ResourceByAddr(resourceAddr.Resource) + if resourceCfg != nil && resourceCfg.Container != nil && resourceCfg.Container.Accessible(check.Addr()) { + // Make sure we report our checks before we execute any + // embedded data resource. + g.Connect(dag.BasicEdge(other, report)) + + // There's at most one embedded data source, and + // we've found it so stop looking. + break + } + } + } + } + } + } + + for _, child := range cfg.Children { + if err := t.transform(g, child, allNodes); err != nil { + return err + } + } + + return nil +} + +// ReportChecks returns true if this operation should report any check blocks +// that it is about to execute. +// +// This is true for planning operations, as apply operations recreate the +// expected checks from the plan. +// +// We'll also report the checks during an import operation. We still execute +// our check blocks during an import operation so they need to be reported +// first. +func (t *checkTransformer) ReportChecks() bool { + return t.Operation == walkPlan || t.Operation == walkImport +} + +// ExecuteChecks returns true if this operation should actually execute any +// check blocks in the config. +// +// If this returns false we will still create and execute check nodes in the +// graph, but they will only validate things like references and syntax. +func (t *checkTransformer) ExecuteChecks() bool { + switch t.Operation { + case walkPlan, walkApply, walkImport: + // We only actually execute the checks for plan and apply operations. + return true + default: + // For everything else, we still want to validate the checks make sense + // logically and syntactically, but we won't actually resolve the check + // conditions. + return false + } +} diff --git a/pkg/tofu/transform_check_starter.go b/pkg/tofu/transform_check_starter.go new file mode 100644 index 00000000000..dc8a805e9f9 --- /dev/null +++ b/pkg/tofu/transform_check_starter.go @@ -0,0 +1,131 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/dag" +) + +var _ GraphTransformer = (*checkStartTransformer)(nil) + +// checkStartTransformer checks if the configuration has any data blocks nested +// within check blocks, and if it does then it introduces a nodeCheckStart +// vertex that ensures all resources have been applied before it starts loading +// the nested data sources. +type checkStartTransformer struct { + // Config for the entire module. + Config *configs.Config + + // Operation is the current operation this node will be part of. + Operation walkOperation +} + +func (s *checkStartTransformer) Transform(graph *Graph) error { + if s.Operation != walkApply && s.Operation != walkPlan { + // We only actually execute the checks during plan apply operations + // so if we are doing something else we can just skip this and + // leave the graph alone. + return nil + } + + var resources []dag.Vertex + var nested []dag.Vertex + + // We're going to step through all the vertices and pull out the relevant + // resources and data sources. + for _, vertex := range graph.Vertices() { + if node, isResource := vertex.(GraphNodeCreator); isResource { + addr := node.CreateAddr() + + if addr.Resource.Resource.Mode == addrs.ManagedResourceMode { + // This is a resource, so we want to make sure it executes + // before any nested data sources. + + // We can reduce the number of additional edges we write into + // the graph by only including "leaf" resources, that is + // resources that aren't referenced by other resources. If a + // resource is referenced by another resource then we know that + // it will execute before that resource so we only need to worry + // about the referencing resource. + + leafResource := true + for _, other := range graph.UpEdges(vertex) { + if otherResource, isResource := other.(GraphNodeCreator); isResource { + otherAddr := otherResource.CreateAddr() + if otherAddr.Resource.Resource.Mode == addrs.ManagedResourceMode { + // Then this resource is being referenced so skip + // it. + leafResource = false + break + } + } + } + + if leafResource { + resources = append(resources, vertex) + } + + // We've handled the resource so move to the next vertex. + continue + } + + // Now, we know we are processing a data block. + + config := s.Config + if !addr.Module.IsRoot() { + config = s.Config.Descendent(addr.Module.Module()) + } + if config == nil { + // might have been deleted, so it won't be subject to any checks + // anyway. + continue + } + + resource := config.Module.ResourceByAddr(addr.Resource.Resource) + if resource == nil { + // might have been deleted, so it won't be subject to any checks + // anyway. + continue + } + + if _, ok := resource.Container.(*configs.Check); ok { + // Then this is a data source within a check block, so let's + // make a note of it. + nested = append(nested, vertex) + } + + // Otherwise, it's just a normal data source. From a check block we + // don't really care when OpenTofu is loading non-nested data + // sources so we'll just forget about it and move on. + } + } + + if len(nested) > 0 { + + // We don't need to do any of this if we don't have any nested data + // sources, so we check that first. + // + // Otherwise we introduce a vertex that can act as a pauser between + // our nested data sources and leaf resources. + + check := &nodeCheckStart{} + graph.Add(check) + + // Finally, connect everything up so it all executes in order. + + for _, vertex := range nested { + graph.Connect(dag.BasicEdge(vertex, check)) + } + + for _, vertex := range resources { + graph.Connect(dag.BasicEdge(check, vertex)) + } + } + + return nil +} diff --git a/pkg/tofu/transform_config.go b/pkg/tofu/transform_config.go new file mode 100644 index 00000000000..136639b087a --- /dev/null +++ b/pkg/tofu/transform_config.go @@ -0,0 +1,200 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "log" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/dag" +) + +// ConfigTransformer is a GraphTransformer that adds all the resources +// from the configuration to the graph. +// +// The module used to configure this transformer must be the root module. +// +// Only resources are added to the graph. Variables, outputs, and +// providers must be added via other transforms. +// +// Unlike ConfigTransformerOld, this transformer creates a graph with +// all resources including module resources, rather than creating module +// nodes that are then "flattened". +type ConfigTransformer struct { + Concrete ConcreteResourceNodeFunc + + // Module is the module to add resources from. + Config *configs.Config + + // Mode will only add resources that match the given mode + ModeFilter bool + Mode addrs.ResourceMode + + // Do not apply this transformer. + skip bool + + // importTargets specifies a slice of addresses that will have state + // imported for them. + importTargets []*ImportTarget + + // generateConfigPathForImportTargets tells the graph where to write any + // generated config for import targets that are not contained within config. + // + // If this is empty and an import target has no config, the graph will + // simply import the state for the target and any follow-up operations will + // try to delete the imported resource unless the config is updated + // manually. + generateConfigPathForImportTargets string +} + +func (t *ConfigTransformer) Transform(g *Graph) error { + if t.skip { + return nil + } + + // If no configuration is available, we don't do anything + if t.Config == nil { + return nil + } + + // Start the transformation process + return t.transform(g, t.Config, t.generateConfigPathForImportTargets) +} + +func (t *ConfigTransformer) transform(g *Graph, config *configs.Config, generateConfigPath string) error { + // If no config, do nothing + if config == nil { + return nil + } + + // If the module is being overridden, do nothing. We don't want to create anything + // from the underlying module. + if config.Module.IsOverridden { + return nil + } + + // Add our resources + if err := t.transformSingle(g, config, generateConfigPath); err != nil { + return err + } + + // Transform all the children without generating config. + for _, c := range config.Children { + if err := t.transform(g, c, ""); err != nil { + return err + } + } + + return nil +} + +func (t *ConfigTransformer) transformSingle(g *Graph, config *configs.Config, generateConfigPath string) error { + path := config.Path + module := config.Module + log.Printf("[TRACE] ConfigTransformer: Starting for path: %v", path) + + allResources := make([]*configs.Resource, 0, len(module.ManagedResources)+len(module.DataResources)) + for _, r := range module.ManagedResources { + allResources = append(allResources, r) + } + for _, r := range module.DataResources { + allResources = append(allResources, r) + } + + // Take a copy of the import targets, so we can edit them as we go. + // Only include import targets that are targeting the current module. + var importTargets []*ImportTarget + for _, target := range t.importTargets { + if targetModule := target.StaticAddr().Module; targetModule.Equal(config.Path) { + importTargets = append(importTargets, target) + } + } + + for _, r := range allResources { + relAddr := r.Addr() + + if t.ModeFilter && relAddr.Mode != t.Mode { + // Skip non-matching modes + continue + } + + // If any of the import targets can apply to this node's instances, + // filter them down to the applicable addresses. + var imports []*ImportTarget + configAddr := relAddr.InModule(path) + + var matchedIndices []int + for ix, i := range importTargets { + if target := i.StaticAddr(); target.Equal(configAddr) { + // This import target has been claimed by an actual resource, + // let's make a note of this to remove it from the targets. + matchedIndices = append(matchedIndices, ix) + imports = append(imports, i) + } + } + + for ix := len(matchedIndices) - 1; ix >= 0; ix-- { + tIx := matchedIndices[ix] + + // We do this backwards, since it means we don't have to adjust the + // later indices as we change the length of import targets. + // + // We need to do this separately, as a single resource could match + // multiple import targets. + importTargets = append(importTargets[:tIx], importTargets[tIx+1:]...) + } + + abstract := &NodeAbstractResource{ + Addr: addrs.ConfigResource{ + Resource: relAddr, + Module: path, + }, + importTargets: imports, + } + + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + + g.Add(node) + } + + // If any import targets were not claimed by resources, then let's add them + // into the graph now. + // + // We should only reach this point if config generation is enabled, since we validate that all import targets have + // a resource in validateImportTargets when config generation is disabled + // + // We'll add the nodes that we know will fail, and catch them again later + // in the processing when we are in a position to raise a much more helpful + // error message. + for _, i := range importTargets { + if len(generateConfigPath) > 0 { + // Create a node with the resource and import target. This node will take care of the config generation + abstract := &NodeAbstractResource{ + // We've already validated in validateImportTargets that the address is fully resolvable + Addr: i.ResolvedAddr().ConfigResource(), + importTargets: []*ImportTarget{i}, + generateConfigPath: generateConfigPath, + } + + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + + g.Add(node) + } else { + // Technically we shouldn't reach this point, as we've already validated that a resource exists + // in validateImportTargets + return importResourceWithoutConfigDiags(i.StaticAddr().String(), i.Config) + } + } + + return nil +} diff --git a/pkg/tofu/transform_config_test.go b/pkg/tofu/transform_config_test.go new file mode 100644 index 00000000000..65de5cb6fb7 --- /dev/null +++ b/pkg/tofu/transform_config_test.go @@ -0,0 +1,91 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/addrs" +) + +func TestConfigTransformer_nilModule(t *testing.T) { + g := Graph{Path: addrs.RootModuleInstance} + tf := &ConfigTransformer{} + if err := tf.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + + if len(g.Vertices()) > 0 { + t.Fatalf("graph is not empty: %s", g.String()) + } +} + +func TestConfigTransformer(t *testing.T) { + g := Graph{Path: addrs.RootModuleInstance} + tf := &ConfigTransformer{Config: testModule(t, "graph-basic")} + if err := tf.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(testConfigTransformerGraphBasicStr) + if actual != expected { + t.Fatalf("bad:\n\n%s", actual) + } +} + +func TestConfigTransformer_mode(t *testing.T) { + g := Graph{Path: addrs.RootModuleInstance} + tf := &ConfigTransformer{ + Config: testModule(t, "transform-config-mode-data"), + ModeFilter: true, + Mode: addrs.DataResourceMode, + } + if err := tf.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(` +data.aws_ami.foo +`) + if actual != expected { + t.Fatalf("bad:\n\n%s", actual) + } +} + +func TestConfigTransformer_nonUnique(t *testing.T) { + g := Graph{Path: addrs.RootModuleInstance} + g.Add(NewNodeAbstractResource( + addrs.RootModule.Resource( + addrs.ManagedResourceMode, "aws_instance", "web", + ), + )) + tf := &ConfigTransformer{Config: testModule(t, "graph-basic")} + if err := tf.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(` +aws_instance.web +aws_instance.web +aws_load_balancer.weblb +aws_security_group.firewall +openstack_floating_ip.random +`) + if actual != expected { + t.Fatalf("bad:\n\n%s", actual) + } +} + +const testConfigTransformerGraphBasicStr = ` +aws_instance.web +aws_load_balancer.weblb +aws_security_group.firewall +openstack_floating_ip.random +` diff --git a/pkg/tofu/transform_destroy_cbd.go b/pkg/tofu/transform_destroy_cbd.go new file mode 100644 index 00000000000..4f594af1fc5 --- /dev/null +++ b/pkg/tofu/transform_destroy_cbd.go @@ -0,0 +1,155 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "log" + + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/dag" + "github.com/kubegems/opentofu/pkg/states" +) + +// GraphNodeDestroyerCBD must be implemented by nodes that might be +// create-before-destroy destroyers, or might plan a create-before-destroy +// action. +type GraphNodeDestroyerCBD interface { + // CreateBeforeDestroy returns true if this node represents a node + // that is doing a CBD. + CreateBeforeDestroy() bool + + // ModifyCreateBeforeDestroy is called when the CBD state of a node + // is changed dynamically. This can return an error if this isn't + // allowed. + ModifyCreateBeforeDestroy(bool) error +} + +// ForcedCBDTransformer detects when a particular CBD-able graph node has +// dependencies with another that has create_before_destroy set that require +// it to be forced on, and forces it on. +// +// This must be used in the plan graph builder to ensure that +// create_before_destroy settings are properly propagated before constructing +// the planned changes. This requires that the plannable resource nodes +// implement GraphNodeDestroyerCBD. +type ForcedCBDTransformer struct { +} + +func (t *ForcedCBDTransformer) Transform(g *Graph) error { + for _, v := range g.Vertices() { + dn, ok := v.(GraphNodeDestroyerCBD) + if !ok { + continue + } + + if !dn.CreateBeforeDestroy() { + // If there are no CBD decendent (dependent nodes), then we + // do nothing here. + if !t.hasCBDDescendent(g, v) { + log.Printf("[TRACE] ForcedCBDTransformer: %q (%T) has no CBD descendent, so skipping", dag.VertexName(v), v) + continue + } + + // If this isn't naturally a CBD node, this means that an descendent is + // and we need to auto-upgrade this node to CBD. We do this because + // a CBD node depending on non-CBD will result in cycles. To avoid this, + // we always attempt to upgrade it. + log.Printf("[TRACE] ForcedCBDTransformer: forcing create_before_destroy on for %q (%T)", dag.VertexName(v), v) + if err := dn.ModifyCreateBeforeDestroy(true); err != nil { + return fmt.Errorf( + "%s: must have create before destroy enabled because "+ + "a dependent resource has CBD enabled. However, when "+ + "attempting to automatically do this, an error occurred: %w", + dag.VertexName(v), err) + } + } else { + log.Printf("[TRACE] ForcedCBDTransformer: %q (%T) already has create_before_destroy set", dag.VertexName(v), v) + } + } + return nil +} + +// hasCBDDescendent returns true if any descendent (node that depends on this) +// has CBD set. +func (t *ForcedCBDTransformer) hasCBDDescendent(g *Graph, v dag.Vertex) bool { + s, _ := g.Descendents(v) + if s == nil { + return true + } + + for _, ov := range s { + dn, ok := ov.(GraphNodeDestroyerCBD) + if !ok { + continue + } + + if dn.CreateBeforeDestroy() { + // some descendent is CreateBeforeDestroy, so we need to follow suit + log.Printf("[TRACE] ForcedCBDTransformer: %q has CBD descendent %q", dag.VertexName(v), dag.VertexName(ov)) + return true + } + } + + return false +} + +// CBDEdgeTransformer modifies the edges of create-before-destroy ("CBD") nodes +// that went through the DestroyEdgeTransformer so that they will have the +// correct dependencies. There are two parts to this: +// +// 1. With CBD, the destroy edge is inverted: the destroy depends on +// the creation. +// +// 2. Destroy for A must depend on resources that depend on A. This is to +// allow the destroy to only happen once nodes that depend on A successfully +// update to A. Example: adding a web server updates the load balancer +// before deleting the old web server. +// +// This transformer requires that a previous transformer has already forced +// create_before_destroy on for nodes that are depended on by explicit CBD +// nodes. This is the logic in ForcedCBDTransformer, though in practice we +// will get here by recording the CBD-ness of each change in the plan during +// the plan walk and then forcing the nodes into the appropriate setting during +// DiffTransformer when building the apply graph. +type CBDEdgeTransformer struct { + // Module and State are only needed to look up dependencies in + // any way possible. Either can be nil if not availabile. + Config *configs.Config + State *states.State +} + +func (t *CBDEdgeTransformer) Transform(g *Graph) error { + // Go through and reverse any destroy edges + for _, v := range g.Vertices() { + dn, ok := v.(GraphNodeDestroyerCBD) + if !ok { + continue + } + if _, ok = v.(GraphNodeDestroyer); !ok { + continue + } + + if !dn.CreateBeforeDestroy() { + continue + } + + // Find the resource edges + for _, e := range g.EdgesTo(v) { + src := e.Source() + + // If source is a create node, invert the edge. + // This covers both the node's own creator, as well as reversing + // any dependants' edges. + if _, ok := src.(GraphNodeCreator); ok { + log.Printf("[TRACE] CBDEdgeTransformer: reversing edge %s -> %s", dag.VertexName(src), dag.VertexName(v)) + g.RemoveEdge(e) + g.Connect(dag.BasicEdge(v, src)) + } + } + } + return nil +} diff --git a/pkg/tofu/transform_destroy_cbd_test.go b/pkg/tofu/transform_destroy_cbd_test.go new file mode 100644 index 00000000000..26fc7c2b254 --- /dev/null +++ b/pkg/tofu/transform_destroy_cbd_test.go @@ -0,0 +1,373 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "regexp" + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/dag" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/states" +) + +func cbdTestGraph(t *testing.T, mod string, changes *plans.Changes, state *states.State) *Graph { + module := testModule(t, mod) + + applyBuilder := &ApplyGraphBuilder{ + Config: module, + Changes: changes, + Plugins: simpleMockPluginLibrary(), + State: state, + } + g, err := (&BasicGraphBuilder{ + Steps: cbdTestSteps(applyBuilder.Steps()), + Name: "ApplyGraphBuilder", + }).Build(addrs.RootModuleInstance) + if err != nil { + t.Fatalf("err: %s", err) + } + + return filterInstances(g) +} + +// override the apply graph builder to halt the process after CBD +func cbdTestSteps(steps []GraphTransformer) []GraphTransformer { + found := false + var i int + var t GraphTransformer + for i, t = range steps { + if _, ok := t.(*CBDEdgeTransformer); ok { + found = true + break + } + } + + if !found { + panic("CBDEdgeTransformer not found") + } + + // re-add the root node so we have a valid graph for a walk, then reduce + // the graph for less output + steps = append(steps[:i+1], &CloseRootModuleTransformer{}) + steps = append(steps, &TransitiveReductionTransformer{}) + + return steps +} + +// remove extra nodes for easier test comparisons +func filterInstances(g *Graph) *Graph { + for _, v := range g.Vertices() { + if _, ok := v.(GraphNodeResourceInstance); !ok { + // connect around the node to remove it without breaking deps + for _, down := range g.DownEdges(v) { + for _, up := range g.UpEdges(v) { + g.Connect(dag.BasicEdge(up, down)) + } + } + + g.Remove(v) + } + + } + return g +} + +func TestCBDEdgeTransformer(t *testing.T) { + changes := &plans.Changes{ + Resources: []*plans.ResourceInstanceChangeSrc{ + { + Addr: mustResourceInstanceAddr("test_object.A"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.CreateThenDelete, + }, + }, + { + Addr: mustResourceInstanceAddr("test_object.B"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Update, + }, + }, + }, + } + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.A").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"A"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.B").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"B","test_list":["x"]}`), + Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("test_object.A")}, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + + g := cbdTestGraph(t, "transform-destroy-cbd-edge-basic", changes, state) + g = filterInstances(g) + + actual := strings.TrimSpace(g.String()) + expected := regexp.MustCompile(strings.TrimSpace(` +(?m)test_object.A +test_object.A \(destroy deposed \w+\) + test_object.B +test_object.B + test_object.A +`)) + + if !expected.MatchString(actual) { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +func TestCBDEdgeTransformerMulti(t *testing.T) { + changes := &plans.Changes{ + Resources: []*plans.ResourceInstanceChangeSrc{ + { + Addr: mustResourceInstanceAddr("test_object.A"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.CreateThenDelete, + }, + }, + { + Addr: mustResourceInstanceAddr("test_object.B"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.CreateThenDelete, + }, + }, + { + Addr: mustResourceInstanceAddr("test_object.C"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Update, + }, + }, + }, + } + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.A").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"A"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.B").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"B"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.C").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"C","test_list":["x"]}`), + Dependencies: []addrs.ConfigResource{ + mustConfigResourceAddr("test_object.A"), + mustConfigResourceAddr("test_object.B"), + }, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + + g := cbdTestGraph(t, "transform-destroy-cbd-edge-multi", changes, state) + g = filterInstances(g) + + actual := strings.TrimSpace(g.String()) + expected := regexp.MustCompile(strings.TrimSpace(` +(?m)test_object.A +test_object.A \(destroy deposed \w+\) + test_object.C +test_object.B +test_object.B \(destroy deposed \w+\) + test_object.C +test_object.C + test_object.A + test_object.B +`)) + + if !expected.MatchString(actual) { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +func TestCBDEdgeTransformer_depNonCBDCount(t *testing.T) { + changes := &plans.Changes{ + Resources: []*plans.ResourceInstanceChangeSrc{ + { + Addr: mustResourceInstanceAddr("test_object.A"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.CreateThenDelete, + }, + }, + { + Addr: mustResourceInstanceAddr("test_object.B[0]"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Update, + }, + }, + { + Addr: mustResourceInstanceAddr("test_object.B[1]"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Update, + }, + }, + }, + } + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.A").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"A"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.B[0]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"B","test_list":["x"]}`), + Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("test_object.A")}, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.B[1]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"B","test_list":["x"]}`), + Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("test_object.A")}, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + + g := cbdTestGraph(t, "transform-cbd-destroy-edge-count", changes, state) + + actual := strings.TrimSpace(g.String()) + expected := regexp.MustCompile(strings.TrimSpace(` +(?m)test_object.A +test_object.A \(destroy deposed \w+\) + test_object.B\[0\] + test_object.B\[1\] +test_object.B\[0\] + test_object.A +test_object.B\[1\] + test_object.A`)) + + if !expected.MatchString(actual) { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +func TestCBDEdgeTransformer_depNonCBDCountBoth(t *testing.T) { + changes := &plans.Changes{ + Resources: []*plans.ResourceInstanceChangeSrc{ + { + Addr: mustResourceInstanceAddr("test_object.A[0]"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.CreateThenDelete, + }, + }, + { + Addr: mustResourceInstanceAddr("test_object.A[1]"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.CreateThenDelete, + }, + }, + { + Addr: mustResourceInstanceAddr("test_object.B[0]"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Update, + }, + }, + { + Addr: mustResourceInstanceAddr("test_object.B[1]"), + ChangeSrc: plans.ChangeSrc{ + Action: plans.Update, + }, + }, + }, + } + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.A[0]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"A"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.A[1]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"A"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.B[0]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"B","test_list":["x"]}`), + Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("test_object.A")}, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.B[1]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"B","test_list":["x"]}`), + Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("test_object.A")}, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + + g := cbdTestGraph(t, "transform-cbd-destroy-edge-both-count", changes, state) + + actual := strings.TrimSpace(g.String()) + expected := regexp.MustCompile(strings.TrimSpace(` +test_object.A\[0\] +test_object.A\[0\] \(destroy deposed \w+\) + test_object.B\[0\] + test_object.B\[1\] +test_object.A\[1\] +test_object.A\[1\] \(destroy deposed \w+\) + test_object.B\[0\] + test_object.B\[1\] +test_object.B\[0\] + test_object.A\[0\] + test_object.A\[1\] +test_object.B\[1\] + test_object.A\[0\] + test_object.A\[1\] +`)) + + if !expected.MatchString(actual) { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} diff --git a/pkg/tofu/transform_destroy_edge.go b/pkg/tofu/transform_destroy_edge.go new file mode 100644 index 00000000000..45ada1e628b --- /dev/null +++ b/pkg/tofu/transform_destroy_edge.go @@ -0,0 +1,391 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "log" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/dag" + "github.com/kubegems/opentofu/pkg/plans" +) + +// GraphNodeDestroyer must be implemented by nodes that destroy resources. +type GraphNodeDestroyer interface { + dag.Vertex + + // DestroyAddr is the address of the resource that is being + // destroyed by this node. If this returns nil, then this node + // is not destroying anything. + DestroyAddr() *addrs.AbsResourceInstance +} + +// GraphNodeCreator must be implemented by nodes that create OR update resources. +type GraphNodeCreator interface { + // CreateAddr is the address of the resource being created or updated + CreateAddr() *addrs.AbsResourceInstance +} + +// DestroyEdgeTransformer is a GraphTransformer that creates the proper +// references for destroy resources. Destroy resources are more complex +// in that they must be depend on the destruction of resources that +// in turn depend on the CREATION of the node being destroy. +// +// That is complicated. Visually: +// +// B_d -> A_d -> A -> B +// +// Notice that A destroy depends on B destroy, while B create depends on +// A create. They're inverted. This must be done for example because often +// dependent resources will block parent resources from deleting. Concrete +// example: VPC with subnets, the VPC can't be deleted while there are +// still subnets. +type DestroyEdgeTransformer struct { + // FIXME: GraphNodeCreators are not always applying changes, and should not + // participate in the destroy graph if there are no operations which could + // interract with destroy nodes. We need Changes for now to detect the + // action type, but perhaps this should be indicated somehow by the + // DiffTransformer which was intended to be the only transformer operating + // from the change set. + Changes *plans.Changes + + // FIXME: Operation will not be needed here one we can better track + // inter-provider dependencies and remove the cycle checks in + // tryInterProviderDestroyEdge. + Operation walkOperation +} + +// tryInterProviderDestroyEdge checks if we're inserting a destroy edge +// across a provider boundary, and only adds the edge if it results in no cycles. +// +// FIXME: The cycles can arise in valid configurations when a provider depends +// on resources from another provider. In the future we may want to inspect +// the dependencies of the providers themselves, to avoid needing to use the +// blunt hammer of checking for cycles. +// +// A reduced example of this dependency problem looks something like: +/* + +createA <- createB + | \ / | + | providerB <- | + v \ v +destroyA -------------> destroyB + +*/ +// +// The edge from destroyA to destroyB would be skipped in this case, but there +// are still other combinations of changes which could connect the A and B +// groups around providerB in various ways. +// +// The most difficult problem here happens during a full destroy operation. +// That creates a special case where resources on which a provider depends must +// exist for evaluation before they are destroyed. This means that any provider +// dependencies must wait until all that provider's resources have first been +// destroyed. This is where these cross-provider edges are still required to +// ensure the correct order. +func (t *DestroyEdgeTransformer) tryInterProviderDestroyEdge(g *Graph, from, to dag.Vertex) { + e := dag.BasicEdge(from, to) + g.Connect(e) + + // If this is a complete destroy operation, then there are no create/update + // nodes to worry about and we can accept the edge without deeper inspection. + if t.Operation == walkDestroy || t.Operation == walkPlanDestroy { + return + } + + // getComparableProvider inspects the node to try and get the most precise + // description of the provider being used to help determine if 2 nodes are + // from the same provider instance. + getComparableProvider := func(pc GraphNodeProviderConsumer) string { + ps := pc.Provider().String() + + // we don't care about `exact` here, since we're only looking for any + // clue that the providers may differ. + p, _ := pc.ProvidedBy() + switch p := p.(type) { + case addrs.AbsProviderConfig: + ps = p.String() + case addrs.LocalProviderConfig: + ps = p.String() + } + + return ps + } + + pc, ok := from.(GraphNodeProviderConsumer) + if !ok { + return + } + fromProvider := getComparableProvider(pc) + + pc, ok = to.(GraphNodeProviderConsumer) + if !ok { + return + } + toProvider := getComparableProvider(pc) + + // Check for cycles, and back out the edge if there are any. + // The cycles we are looking for only appears between providers, so don't + // waste time checking for cycles if both nodes use the same provider. + if fromProvider != toProvider && len(g.Cycles()) > 0 { + log.Printf("[DEBUG] DestroyEdgeTransformer: skipping inter-provider edge %s->%s which creates a cycle", + dag.VertexName(from), dag.VertexName(to)) + g.RemoveEdge(e) + } +} + +func (t *DestroyEdgeTransformer) Transform(g *Graph) error { + // Build a map of what is being destroyed (by address string) to + // the list of destroyers. + destroyers := make(map[string][]GraphNodeDestroyer) + + // Record the creators, which will need to depend on the destroyers if they + // are only being updated. + creators := make(map[string][]GraphNodeCreator) + + // destroyersByResource records each destroyer by the ConfigResource + // address. We use this because dependencies are only referenced as + // resources and have no index or module instance information, but we will + // want to connect all the individual instances for correct ordering. + destroyersByResource := make(map[string][]GraphNodeDestroyer) + for _, v := range g.Vertices() { + switch n := v.(type) { + case GraphNodeDestroyer: + addrP := n.DestroyAddr() + if addrP == nil { + log.Printf("[WARN] DestroyEdgeTransformer: %q (%T) has no destroy address", dag.VertexName(n), v) + continue + } + addr := *addrP + + key := addr.String() + log.Printf("[TRACE] DestroyEdgeTransformer: %q (%T) destroys %s", dag.VertexName(n), v, key) + destroyers[key] = append(destroyers[key], n) + + resAddr := addr.ContainingResource().Config().String() + destroyersByResource[resAddr] = append(destroyersByResource[resAddr], n) + case GraphNodeCreator: + addr := n.CreateAddr() + cfgAddr := addr.ContainingResource().Config().String() + + if t.Changes == nil { + // unit tests may not have changes + creators[cfgAddr] = append(creators[cfgAddr], n) + break + } + + // NoOp changes should not participate in the destroy dependencies. + rc := t.Changes.ResourceInstance(*addr) + if rc != nil && rc.Action != plans.NoOp { + creators[cfgAddr] = append(creators[cfgAddr], n) + } + } + } + + // If we aren't destroying anything, there will be no edges to make + // so just exit early and avoid future work. + if len(destroyers) == 0 { + return nil + } + + // Go through and connect creators to destroyers. Going along with + // our example, this makes: A_d => A + for _, v := range g.Vertices() { + cn, ok := v.(GraphNodeCreator) + if !ok { + continue + } + + addr := cn.CreateAddr() + if addr == nil { + continue + } + + for _, d := range destroyers[addr.String()] { + // For illustrating our example + a_d := d.(dag.Vertex) + a := v + + log.Printf( + "[TRACE] DestroyEdgeTransformer: connecting creator %q with destroyer %q", + dag.VertexName(a), dag.VertexName(a_d)) + + g.Connect(dag.BasicEdge(a, a_d)) + } + } + + // connect creators to any destroyers on which they may depend + for _, cs := range creators { + for _, c := range cs { + ri, ok := c.(GraphNodeResourceInstance) + if !ok { + continue + } + + for _, resAddr := range ri.StateDependencies() { + for _, desDep := range destroyersByResource[resAddr.String()] { + if !graphNodesAreResourceInstancesInDifferentInstancesOfSameModule(c, desDep) { + log.Printf("[TRACE] DestroyEdgeTransformer: %s has stored dependency of %s\n", dag.VertexName(c), dag.VertexName(desDep)) + g.Connect(dag.BasicEdge(c, desDep)) + } else { + log.Printf("[TRACE] DestroyEdgeTransformer: skipping %s => %s inter-module-instance dependency\n", dag.VertexName(c), dag.VertexName(desDep)) + } + } + } + } + } + + // Connect destroy dependencies as stored in the state + for _, ds := range destroyers { + for _, des := range ds { + ri, ok := des.(GraphNodeResourceInstance) + if !ok { + continue + } + + for _, resAddr := range ri.StateDependencies() { + for _, desDep := range destroyersByResource[resAddr.String()] { + if !graphNodesAreResourceInstancesInDifferentInstancesOfSameModule(desDep, des) { + log.Printf("[TRACE] DestroyEdgeTransformer: %s has stored dependency of %s\n", dag.VertexName(desDep), dag.VertexName(des)) + t.tryInterProviderDestroyEdge(g, desDep, des) + } else { + log.Printf("[TRACE] DestroyEdgeTransformer: skipping %s => %s inter-module-instance dependency\n", dag.VertexName(desDep), dag.VertexName(des)) + } + } + + // We can have some create or update nodes which were + // dependents of the destroy node. If they have no destroyer + // themselves, make the connection directly from the creator. + for _, createDep := range creators[resAddr.String()] { + if !graphNodesAreResourceInstancesInDifferentInstancesOfSameModule(createDep, des) { + log.Printf("[DEBUG] DestroyEdgeTransformer2: %s has stored dependency of %s\n", dag.VertexName(createDep), dag.VertexName(des)) + t.tryInterProviderDestroyEdge(g, createDep, des) + } else { + log.Printf("[TRACE] DestroyEdgeTransformer2: skipping %s => %s inter-module-instance dependency\n", dag.VertexName(createDep), dag.VertexName(des)) + } + } + } + } + } + + return nil +} + +// Remove any nodes that aren't needed when destroying modules. +// Variables, outputs, locals, and expanders may not be able to evaluate +// correctly, so we can remove these if nothing depends on them. The module +// closers also need to disable their use of expansion if the module itself is +// no longer present. +type pruneUnusedNodesTransformer struct { + // The plan graph builder will skip this transformer except during a full + // destroy. Planing normally involves all nodes, but during a destroy plan + // we may need to prune things which are in the configuration but do not + // exist in state to evaluate. + skip bool +} + +func (t *pruneUnusedNodesTransformer) Transform(g *Graph) error { + if t.skip { + return nil + } + + // We need a reverse depth first walk of modules, processing them in order + // from the leaf modules to the root. This allows us to remove unneeded + // dependencies from child modules, freeing up nodes in the parent module + // to also be removed. + + nodes := g.Vertices() + + for removed := true; removed; { + removed = false + + for i := 0; i < len(nodes); i++ { + // run this in a closure, so we can return early rather than + // dealing with complex looping and labels + func() { + n := nodes[i] + switch n := n.(type) { + case graphNodeTemporaryValue: + // root module outputs indicate they are not temporary by + // returning false here. + if !n.temporaryValue() { + return + } + + // temporary values, which consist of variables, locals, + // and outputs, must be kept if anything refers to them. + for _, v := range g.UpEdges(n) { + // keep any value which is connected through a + // reference + if _, ok := v.(GraphNodeReferencer); ok { + return + } + } + + case graphNodeExpandsInstances: + // Any nodes that expand instances are kept when their + // instances may need to be evaluated. + for _, v := range g.UpEdges(n) { + switch v.(type) { + case graphNodeExpandsInstances: + // Root module output values (which the following + // condition matches) are exempt because we know + // there is only ever exactly one instance of the + // root module, and so it's not actually important + // to expand it and so this lets us do a bit more + // pruning than we'd be able to do otherwise. + if tmp, ok := v.(graphNodeTemporaryValue); ok && !tmp.temporaryValue() { + continue + } + + // expanders can always depend on module expansion + // themselves + return + case GraphNodeResourceInstance: + // resource instances always depend on their + // resource node, which is an expander + return + } + } + + case GraphNodeProvider: + // Only keep providers for evaluation if they have + // resources to handle. + // The provider transformers removed most unused providers + // earlier, however there may be more to prune now based on + // targeting or a destroy with no related instances in the + // state. + // TODO: consider replacing this with an actual "references" check instead of the simple type check below. + // Due to provider functions, many provider references through GraphNodeReferencer still are required. + des, _ := g.Descendents(n) + for _, v := range des { + switch v.(type) { + case GraphNodeProviderConsumer: + return + case GraphNodeReferencer: + return + } + } + + default: + return + } + + log.Printf("[DEBUG] pruneUnusedNodes: %s is no longer needed, removing", dag.VertexName(n)) + g.Remove(n) + removed = true + + // remove the node from our iteration as well + last := len(nodes) - 1 + nodes[i], nodes[last] = nodes[last], nodes[i] + nodes = nodes[:last] + }() + } + } + + return nil +} diff --git a/pkg/tofu/transform_destroy_edge_test.go b/pkg/tofu/transform_destroy_edge_test.go new file mode 100644 index 00000000000..c18f630928c --- /dev/null +++ b/pkg/tofu/transform_destroy_edge_test.go @@ -0,0 +1,600 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "strings" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/dag" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/states" +) + +func TestDestroyEdgeTransformer_basic(t *testing.T) { + g := Graph{Path: addrs.RootModuleInstance} + g.Add(testDestroyNode("test_object.A")) + g.Add(testDestroyNode("test_object.B")) + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.A").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"A"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.B").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"B","test_string":"x"}`), + Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("test_object.A")}, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + if err := (&AttachStateTransformer{State: state}).Transform(&g); err != nil { + t.Fatal(err) + } + + tf := &DestroyEdgeTransformer{} + if err := tf.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(testTransformDestroyEdgeBasicStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +func TestDestroyEdgeTransformer_multi(t *testing.T) { + g := Graph{Path: addrs.RootModuleInstance} + g.Add(testDestroyNode("test_object.A")) + g.Add(testDestroyNode("test_object.B")) + g.Add(testDestroyNode("test_object.C")) + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.A").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"A"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.B").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"B","test_string":"x"}`), + Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("test_object.A")}, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.C").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"C","test_string":"x"}`), + Dependencies: []addrs.ConfigResource{ + mustConfigResourceAddr("test_object.A"), + mustConfigResourceAddr("test_object.B"), + }, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + + if err := (&AttachStateTransformer{State: state}).Transform(&g); err != nil { + t.Fatal(err) + } + + tf := &DestroyEdgeTransformer{} + if err := tf.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(testTransformDestroyEdgeMultiStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +func TestDestroyEdgeTransformer_selfRef(t *testing.T) { + g := Graph{Path: addrs.RootModuleInstance} + g.Add(testDestroyNode("test_object.A")) + tf := &DestroyEdgeTransformer{} + if err := tf.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(testTransformDestroyEdgeSelfRefStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +func TestDestroyEdgeTransformer_module(t *testing.T) { + g := Graph{Path: addrs.RootModuleInstance} + g.Add(testDestroyNode("module.child.test_object.b")) + g.Add(testDestroyNode("test_object.a")) + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + child := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.a").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"a"}`), + Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("module.child.test_object.b")}, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + child.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.b").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"b","test_string":"x"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + + if err := (&AttachStateTransformer{State: state}).Transform(&g); err != nil { + t.Fatal(err) + } + + tf := &DestroyEdgeTransformer{} + if err := tf.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(testTransformDestroyEdgeModuleStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +func TestDestroyEdgeTransformer_moduleOnly(t *testing.T) { + g := Graph{Path: addrs.RootModuleInstance} + + state := states.NewState() + for moduleIdx := 0; moduleIdx < 2; moduleIdx++ { + g.Add(testDestroyNode(fmt.Sprintf("module.child[%d].test_object.a", moduleIdx))) + g.Add(testDestroyNode(fmt.Sprintf("module.child[%d].test_object.b", moduleIdx))) + g.Add(testDestroyNode(fmt.Sprintf("module.child[%d].test_object.c", moduleIdx))) + + child := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.IntKey(moduleIdx))) + child.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.a").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"a"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + child.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.b").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"b","test_string":"x"}`), + Dependencies: []addrs.ConfigResource{ + mustConfigResourceAddr("module.child.test_object.a"), + }, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + child.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.c").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"c","test_string":"x"}`), + Dependencies: []addrs.ConfigResource{ + mustConfigResourceAddr("module.child.test_object.a"), + mustConfigResourceAddr("module.child.test_object.b"), + }, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + } + + if err := (&AttachStateTransformer{State: state}).Transform(&g); err != nil { + t.Fatal(err) + } + + tf := &DestroyEdgeTransformer{} + if err := tf.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + + // The analyses done in the destroy edge transformer are between + // not-yet-expanded objects, which is conservative and so it will generate + // edges that aren't strictly necessary. As a special case we filter out + // any edges that are between resources instances that are in different + // instances of the same module, because those edges are never needed + // (one instance of a module cannot depend on another instance of the + // same module) and including them can, in complex cases, cause cycles due + // to unnecessary interactions between destroyed and created module + // instances in the same plan. + // + // Therefore below we expect to see the dependencies within each instance + // of module.child reflected, but we should not see any dependencies + // _between_ instances of module.child. + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(` +module.child[0].test_object.a (destroy) + module.child[0].test_object.b (destroy) + module.child[0].test_object.c (destroy) +module.child[0].test_object.b (destroy) + module.child[0].test_object.c (destroy) +module.child[0].test_object.c (destroy) +module.child[1].test_object.a (destroy) + module.child[1].test_object.b (destroy) + module.child[1].test_object.c (destroy) +module.child[1].test_object.b (destroy) + module.child[1].test_object.c (destroy) +module.child[1].test_object.c (destroy) +`) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +func TestDestroyEdgeTransformer_destroyThenUpdate(t *testing.T) { + g := Graph{Path: addrs.RootModuleInstance} + g.Add(testUpdateNode("test_object.A")) + g.Add(testDestroyNode("test_object.B")) + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.A").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"A","test_string":"old"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.B").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"B","test_string":"x"}`), + Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("test_object.A")}, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + + if err := (&AttachStateTransformer{State: state}).Transform(&g); err != nil { + t.Fatal(err) + } + + tf := &DestroyEdgeTransformer{} + if err := tf.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + + expected := strings.TrimSpace(` +test_object.A + test_object.B (destroy) +test_object.B (destroy) +`) + actual := strings.TrimSpace(g.String()) + + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +func TestPruneUnusedNodesTransformer_rootModuleOutputValues(t *testing.T) { + // This is a kinda-weird test case covering the very narrow situation + // where a root module output value depends on a resource, where we + // need to make sure that the output value doesn't block pruning of + // the resource from the graph. This special case exists because although + // root module objects are "expanders", they in practice always expand + // to exactly one instance and so don't have the usual requirement of + // needing to stick around in order to support downstream expanders + // when there are e.g. nested expanding modules. + + // In order to keep this test focused on the pruneUnusedNodesTransformer + // as much as possible we're using a minimal graph construction here which + // is just enough to get the nodes we need, but this does mean that this + // test might be invalidated by future changes to the apply graph builder, + // and so if something seems off here it might help to compare the + // following with the real apply graph transformer and verify whether + // this smaller construction is still realistic enough to be a valid test. + // It might be valid to change or remove this test to "make it work", as + // long as you verify that there is still _something_ upholding the + // invariant that a root module output value should not block a resource + // node from being pruned from the graph. + + concreteResource := func(a *NodeAbstractResource) dag.Vertex { + return &nodeExpandApplyableResource{ + NodeAbstractResource: a, + } + } + + concreteResourceInstance := func(a *NodeAbstractResourceInstance) dag.Vertex { + return &NodeApplyableResourceInstance{ + NodeAbstractResourceInstance: a, + } + } + + resourceInstAddr := mustResourceInstanceAddr("test.a") + providerCfgAddr := addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.MustParseProviderSourceString("foo/test"), + } + emptyObjDynamicVal, err := plans.NewDynamicValue(cty.EmptyObjectVal, cty.EmptyObject) + if err != nil { + t.Fatal(err) + } + nullObjDynamicVal, err := plans.NewDynamicValue(cty.NullVal(cty.EmptyObject), cty.EmptyObject) + if err != nil { + t.Fatal(err) + } + + config := testModuleInline(t, map[string]string{ + "main.tf": ` + resource "test" "a" { + } + + output "test" { + value = test.a.foo + } + `, + }) + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + resourceInstAddr, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{}`), + }, + providerCfgAddr, + ) + }) + changes := plans.NewChanges() + changes.SyncWrapper().AppendResourceInstanceChange(&plans.ResourceInstanceChangeSrc{ + Addr: resourceInstAddr, + PrevRunAddr: resourceInstAddr, + ProviderAddr: providerCfgAddr, + ChangeSrc: plans.ChangeSrc{ + Action: plans.Delete, + Before: emptyObjDynamicVal, + After: nullObjDynamicVal, + }, + }) + + builder := &BasicGraphBuilder{ + Steps: []GraphTransformer{ + &ConfigTransformer{ + Concrete: concreteResource, + Config: config, + }, + &OutputTransformer{ + Config: config, + }, + &DiffTransformer{ + Concrete: concreteResourceInstance, + State: state, + Changes: changes, + }, + &ReferenceTransformer{}, + &AttachDependenciesTransformer{}, + &pruneUnusedNodesTransformer{}, + &CloseRootModuleTransformer{}, + }, + } + graph, diags := builder.Build(addrs.RootModuleInstance) + assertNoDiagnostics(t, diags) + + // At this point, thanks to pruneUnusedNodesTransformer, we should still + // have the node for the output value, but the "test.a (expand)" node + // should've been pruned in recognition of the fact that we're performing + // a destroy and therefore we only need the "test.a (destroy)" node. + + nodesByName := make(map[string]dag.Vertex) + nodesByResourceExpand := make(map[string]dag.Vertex) + for _, n := range graph.Vertices() { + name := dag.VertexName(n) + if _, exists := nodesByName[name]; exists { + t.Fatalf("multiple nodes have name %q", name) + } + nodesByName[name] = n + + if exp, ok := n.(*nodeExpandApplyableResource); ok { + addr := exp.Addr + if _, exists := nodesByResourceExpand[addr.String()]; exists { + t.Fatalf("multiple nodes are expanders for %s", addr) + } + nodesByResourceExpand[addr.String()] = exp + } + } + + // NOTE: The following is sensitive to the current name string formats we + // use for these particular node types. These names are not contractual + // so if this breaks in future it is fine to update these names to the new + // names as long as you verify first that the new names correspond to + // the same meaning as what we're assuming below. + if _, exists := nodesByName["test.a (destroy)"]; !exists { + t.Errorf("missing destroy node for resource instance test.a") + } + if _, exists := nodesByName["output.test (expand)"]; !exists { + t.Errorf("missing expand for output value 'test'") + } + + // We _must not_ have any node that expands a resource. + if len(nodesByResourceExpand) != 0 { + t.Errorf("resource expand nodes remain the graph after transform; should've been pruned\n%s", spew.Sdump(nodesByResourceExpand)) + } +} + +// NoOp changes should not be participating in the destroy sequence +func TestDestroyEdgeTransformer_noOp(t *testing.T) { + g := Graph{Path: addrs.RootModuleInstance} + g.Add(testDestroyNode("test_object.A")) + g.Add(testUpdateNode("test_object.B")) + g.Add(testDestroyNode("test_object.C")) + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.A").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"A"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.B").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"B","test_string":"x"}`), + Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("test_object.A")}, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.C").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"C","test_string":"x"}`), + Dependencies: []addrs.ConfigResource{mustConfigResourceAddr("test_object.A"), + mustConfigResourceAddr("test_object.B")}, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + + if err := (&AttachStateTransformer{State: state}).Transform(&g); err != nil { + t.Fatal(err) + } + + tf := &DestroyEdgeTransformer{ + // We only need a minimal object to indicate GraphNodeCreator change is + // a NoOp here. + Changes: &plans.Changes{ + Resources: []*plans.ResourceInstanceChangeSrc{ + { + Addr: mustResourceInstanceAddr("test_object.B"), + ChangeSrc: plans.ChangeSrc{Action: plans.NoOp}, + }, + }, + }, + } + if err := tf.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + + expected := strings.TrimSpace(` +test_object.A (destroy) + test_object.C (destroy) +test_object.B +test_object.C (destroy)`) + + actual := strings.TrimSpace(g.String()) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +func TestDestroyEdgeTransformer_dataDependsOn(t *testing.T) { + g := Graph{Path: addrs.RootModuleInstance} + + addrA := mustResourceInstanceAddr("test_object.A") + instA := NewNodeAbstractResourceInstance(addrA) + a := &NodeDestroyResourceInstance{NodeAbstractResourceInstance: instA} + g.Add(a) + + // B here represents a data sources, which is effectively an update during + // apply, but won't have dependencies stored in the state. + addrB := mustResourceInstanceAddr("test_object.B") + instB := NewNodeAbstractResourceInstance(addrB) + instB.Dependencies = append(instB.Dependencies, addrA.ConfigResource()) + b := &NodeApplyableResourceInstance{NodeAbstractResourceInstance: instB} + + g.Add(b) + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_object.A").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"A"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"]`), + ) + + if err := (&AttachStateTransformer{State: state}).Transform(&g); err != nil { + t.Fatal(err) + } + + tf := &DestroyEdgeTransformer{} + if err := tf.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(` +test_object.A (destroy) +test_object.B + test_object.A (destroy) +`) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +func testDestroyNode(addrString string) GraphNodeDestroyer { + instAddr := mustResourceInstanceAddr(addrString) + inst := NewNodeAbstractResourceInstance(instAddr) + return &NodeDestroyResourceInstance{NodeAbstractResourceInstance: inst} +} + +func testUpdateNode(addrString string) GraphNodeCreator { + instAddr := mustResourceInstanceAddr(addrString) + inst := NewNodeAbstractResourceInstance(instAddr) + return &NodeApplyableResourceInstance{NodeAbstractResourceInstance: inst} +} + +const testTransformDestroyEdgeBasicStr = ` +test_object.A (destroy) + test_object.B (destroy) +test_object.B (destroy) +` + +const testTransformDestroyEdgeMultiStr = ` +test_object.A (destroy) + test_object.B (destroy) + test_object.C (destroy) +test_object.B (destroy) + test_object.C (destroy) +test_object.C (destroy) +` + +const testTransformDestroyEdgeSelfRefStr = ` +test_object.A (destroy) +` + +const testTransformDestroyEdgeModuleStr = ` +module.child.test_object.b (destroy) + test_object.a (destroy) +test_object.a (destroy) +` diff --git a/pkg/tofu/transform_diff.go b/pkg/tofu/transform_diff.go new file mode 100644 index 00000000000..30313f8a7b4 --- /dev/null +++ b/pkg/tofu/transform_diff.go @@ -0,0 +1,241 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "log" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/dag" + "github.com/kubegems/opentofu/pkg/plans" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// DiffTransformer is a GraphTransformer that adds graph nodes representing +// each of the resource changes described in the given Changes object. +type DiffTransformer struct { + Concrete ConcreteResourceInstanceNodeFunc + State *states.State + Changes *plans.Changes + Config *configs.Config +} + +// return true if the given resource instance has either Preconditions or +// Postconditions defined in the configuration. +func (t *DiffTransformer) hasConfigConditions(addr addrs.AbsResourceInstance) bool { + // unit tests may have no config + if t.Config == nil { + return false + } + + cfg := t.Config.DescendentForInstance(addr.Module) + if cfg == nil { + return false + } + + res := cfg.Module.ResourceByAddr(addr.ConfigResource().Resource) + if res == nil { + return false + } + + return len(res.Preconditions) > 0 || len(res.Postconditions) > 0 +} + +func (t *DiffTransformer) Transform(g *Graph) error { + if t.Changes == nil || len(t.Changes.Resources) == 0 { + // Nothing to do! + return nil + } + + // Go through all the modules in the diff. + log.Printf("[TRACE] DiffTransformer starting") + + var diags tfdiags.Diagnostics + state := t.State + changes := t.Changes + + // DiffTransformer creates resource _instance_ nodes. If there are any + // whole-resource nodes already in the graph, we must ensure that they + // get evaluated before any of the corresponding instances by creating + // dependency edges, so we'll do some prep work here to ensure we'll only + // create connections to nodes that existed before we started here. + resourceNodes := map[string][]GraphNodeConfigResource{} + for _, node := range g.Vertices() { + rn, ok := node.(GraphNodeConfigResource) + if !ok { + continue + } + // We ignore any instances that _also_ implement + // GraphNodeResourceInstance, since in the unlikely event that they + // do exist we'd probably end up creating cycles by connecting them. + if _, ok := node.(GraphNodeResourceInstance); ok { + continue + } + + addr := rn.ResourceAddr().String() + resourceNodes[addr] = append(resourceNodes[addr], rn) + } + + for _, rc := range changes.Resources { + addr := rc.Addr + dk := rc.DeposedKey + + log.Printf("[TRACE] DiffTransformer: found %s change for %s %s", rc.Action, addr, dk) + + // Depending on the action we'll need some different combinations of + // nodes, because destroying uses a special node type separate from + // other actions. + var update, delete, forget, createBeforeDestroy bool + switch rc.Action { + case plans.NoOp: + // For a no-op change we don't take any action but we still + // run any condition checks associated with the object, to + // make sure that they still hold when considering the + // results of other changes. + update = t.hasConfigConditions(addr) + case plans.Delete: + delete = true + case plans.Forget: + forget = true + case plans.DeleteThenCreate, plans.CreateThenDelete: + update = true + delete = true + createBeforeDestroy = (rc.Action == plans.CreateThenDelete) + default: + update = true + } + + // A deposed instance may only have a change of Delete, Forget or NoOp. + // A NoOp can happen if the provider shows it no longer exists during + // the most recent ReadResource operation. + if dk != states.NotDeposed && !(rc.Action == plans.Delete || rc.Action == plans.NoOp || rc.Action == plans.Forget) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid planned change for deposed object", + fmt.Sprintf("The plan contains a non-removal change for %s deposed object %s. The only valid actions for a deposed object is to destroy it or forget it, so this is a bug in OpenTofu.", addr, dk), + )) + continue + } + + // If we're going to do a create_before_destroy Replace operation then + // we need to allocate a DeposedKey to use to retain the + // not-yet-destroyed prior object, so that the delete node can destroy + // _that_ rather than the newly-created node, which will be current + // by the time the delete node is visited. + if update && delete && createBeforeDestroy { + // In this case, variable dk will be the _pre-assigned_ DeposedKey + // that must be used if the update graph node deposes the current + // instance, which will then align with the same key we pass + // into the destroy node to ensure we destroy exactly the deposed + // object we expect. + if state != nil { + ris := state.ResourceInstance(addr) + if ris == nil { + // Should never happen, since we don't plan to replace an + // instance that doesn't exist yet. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid planned change", + fmt.Sprintf("The plan contains a replace change for %s, which doesn't exist yet. This is a bug in OpenTofu.", addr), + )) + continue + } + + // Allocating a deposed key separately from using it can be racy + // in general, but we assume here that nothing except the apply + // node we instantiate below will actually make new deposed objects + // in practice, and so the set of already-used keys will not change + // between now and then. + dk = ris.FindUnusedDeposedKey() + } else { + // If we have no state at all yet then we can use _any_ + // DeposedKey. + dk = states.NewDeposedKey() + } + } + + if update { + // All actions except destroying the node type chosen by t.Concrete + abstract := NewNodeAbstractResourceInstance(addr) + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + + if createBeforeDestroy { + // We'll attach our pre-allocated DeposedKey to the node if + // it supports that. NodeApplyableResourceInstance is the + // specific concrete node type we are looking for here really, + // since that's the only node type that might depose objects. + if dn, ok := node.(GraphNodeDeposer); ok { + dn.SetPreallocatedDeposedKey(dk) + } + log.Printf("[TRACE] DiffTransformer: %s will be represented by %s, deposing prior object to %s", addr, dag.VertexName(node), dk) + } else { + log.Printf("[TRACE] DiffTransformer: %s will be represented by %s", addr, dag.VertexName(node)) + } + + g.Add(node) + rsrcAddr := addr.ContainingResource().String() + for _, rsrcNode := range resourceNodes[rsrcAddr] { + g.Connect(dag.BasicEdge(node, rsrcNode)) + } + } + + if delete { + // Destroying always uses a destroy-specific node type, though + // which one depends on whether we're destroying a current object + // or a deposed object. + var node GraphNodeResourceInstance + abstract := NewNodeAbstractResourceInstance(addr) + if dk == states.NotDeposed { + node = &NodeDestroyResourceInstance{ + NodeAbstractResourceInstance: abstract, + DeposedKey: dk, + } + } else { + node = &NodeDestroyDeposedResourceInstanceObject{ + NodeAbstractResourceInstance: abstract, + DeposedKey: dk, + } + } + if dk == states.NotDeposed { + log.Printf("[TRACE] DiffTransformer: %s will be represented for destruction by %s", addr, dag.VertexName(node)) + } else { + log.Printf("[TRACE] DiffTransformer: %s deposed object %s will be represented for destruction by %s", addr, dk, dag.VertexName(node)) + } + g.Add(node) + } + + if forget { + var node GraphNodeResourceInstance + abstract := NewNodeAbstractResourceInstance(addr) + if dk == states.NotDeposed { + node = &NodeForgetResourceInstance{ + NodeAbstractResourceInstance: abstract, + DeposedKey: dk, + } + log.Printf("[TRACE] DiffTransformer: %s will be represented for removal from the state by %s", addr, dag.VertexName(node)) + } else { + node = &NodeForgetDeposedResourceInstanceObject{ + NodeAbstractResourceInstance: abstract, + DeposedKey: dk, + } + log.Printf("[TRACE] DiffTransformer: %s deposed object %s will be represented for removal from the state by %s", addr, dk, dag.VertexName(node)) + } + + g.Add(node) + } + + } + + log.Printf("[TRACE] DiffTransformer complete") + + return diags.Err() +} diff --git a/pkg/tofu/transform_diff_test.go b/pkg/tofu/transform_diff_test.go new file mode 100644 index 00000000000..010da1a3a1b --- /dev/null +++ b/pkg/tofu/transform_diff_test.go @@ -0,0 +1,172 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "strings" + "testing" + + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/plans" +) + +func TestDiffTransformer_nilDiff(t *testing.T) { + g := Graph{Path: addrs.RootModuleInstance} + tf := &DiffTransformer{} + if err := tf.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + + if len(g.Vertices()) > 0 { + t.Fatal("graph should be empty") + } +} + +func TestDiffTransformer(t *testing.T) { + g := Graph{Path: addrs.RootModuleInstance} + + beforeVal, err := plans.NewDynamicValue(cty.StringVal(""), cty.String) + if err != nil { + t.Fatal(err) + } + afterVal, err := plans.NewDynamicValue(cty.StringVal(""), cty.String) + if err != nil { + t.Fatal(err) + } + + tf := &DiffTransformer{ + Changes: &plans.Changes{ + Resources: []*plans.ResourceInstanceChangeSrc{ + { + Addr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + ProviderAddr: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("aws"), + Module: addrs.RootModule, + }, + ChangeSrc: plans.ChangeSrc{ + Action: plans.Update, + Before: beforeVal, + After: afterVal, + }, + }, + }, + }, + } + if err := tf.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(testTransformDiffBasicStr) + if actual != expected { + t.Fatalf("bad:\n\n%s", actual) + } +} + +func TestDiffTransformer_noOpChange(t *testing.T) { + // "No-op" changes are how we record explicitly in a plan that we did + // indeed visit a particular resource instance during the planning phase + // and concluded that no changes were needed, as opposed to the resource + // instance not existing at all or having been excluded from planning + // entirely. + // + // We must include nodes for resource instances with no-op changes in the + // apply graph, even though they won't take any external actions, because + // there are some secondary effects such as precondition/postcondition + // checks that can refer to objects elsewhere and so might have their + // results changed even if the resource instance they are attached to + // didn't actually change directly itself. + + // aws_instance.foo has a precondition, so should be included in the final + // graph. aws_instance.bar has no conditions, so there is nothing to + // execute during apply and it should not be included in the graph. + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "aws_instance" "bar" { +} + +resource "aws_instance" "foo" { + test_string = "ok" + + lifecycle { + precondition { + condition = self.test_string != "" + error_message = "resource error" + } + } +} +`}) + + g := Graph{Path: addrs.RootModuleInstance} + + beforeVal, err := plans.NewDynamicValue(cty.StringVal(""), cty.String) + if err != nil { + t.Fatal(err) + } + + tf := &DiffTransformer{ + Config: m, + Changes: &plans.Changes{ + Resources: []*plans.ResourceInstanceChangeSrc{ + { + Addr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + ProviderAddr: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("aws"), + Module: addrs.RootModule, + }, + ChangeSrc: plans.ChangeSrc{ + // A "no-op" change has the no-op action and has the + // same object as both Before and After. + Action: plans.NoOp, + Before: beforeVal, + After: beforeVal, + }, + }, + { + Addr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "bar", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + ProviderAddr: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("aws"), + Module: addrs.RootModule, + }, + ChangeSrc: plans.ChangeSrc{ + // A "no-op" change has the no-op action and has the + // same object as both Before and After. + Action: plans.NoOp, + Before: beforeVal, + After: beforeVal, + }, + }, + }, + }, + } + if err := tf.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(testTransformDiffBasicStr) + if actual != expected { + t.Fatalf("bad:\n\n%s", actual) + } +} + +const testTransformDiffBasicStr = ` +aws_instance.foo +` diff --git a/pkg/tofu/transform_expand.go b/pkg/tofu/transform_expand.go new file mode 100644 index 00000000000..b13d4720b4e --- /dev/null +++ b/pkg/tofu/transform_expand.go @@ -0,0 +1,22 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +// GraphNodeDynamicExpandable is an interface that nodes can implement +// to signal that they can be expanded at eval-time (hence dynamic). +// These nodes are given the eval context and are expected to return +// a new subgraph. +type GraphNodeDynamicExpandable interface { + // DynamicExpand returns a new graph which will be treated as the dynamic + // subgraph of the receiving node. + // + // The second return value is of type error for historical reasons; + // it's valid (and most ideal) for DynamicExpand to return the result + // of calling ErrWithWarnings on a tfdiags.Diagnostics value instead, + // in which case the caller will unwrap it and gather the individual + // diagnostics. + DynamicExpand(EvalContext) (*Graph, error) +} diff --git a/pkg/tofu/transform_external_reference.go b/pkg/tofu/transform_external_reference.go new file mode 100644 index 00000000000..c8190e0a85e --- /dev/null +++ b/pkg/tofu/transform_external_reference.go @@ -0,0 +1,28 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import "github.com/kubegems/opentofu/pkg/addrs" + +// ExternalReferenceTransformer will add a GraphNodeReferencer into the graph +// that makes no changes to the graph itself but, by referencing the addresses +// within ExternalReferences, ensures that any temporary nodes that are required +// by an external caller, such as the tofu testing framework, are not +// skipped because they are not referenced from within the module. +type ExternalReferenceTransformer struct { + ExternalReferences []*addrs.Reference +} + +func (t *ExternalReferenceTransformer) Transform(g *Graph) error { + if len(t.ExternalReferences) == 0 { + return nil + } + + g.Add(&nodeExternalReference{ + ExternalReferences: t.ExternalReferences, + }) + return nil +} diff --git a/pkg/tofu/transform_import_state_test.go b/pkg/tofu/transform_import_state_test.go new file mode 100644 index 00000000000..b5d43b05adf --- /dev/null +++ b/pkg/tofu/transform_import_state_test.go @@ -0,0 +1,176 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" + "github.com/zclconf/go-cty/cty" +) + +func TestGraphNodeImportStateExecute(t *testing.T) { + state := states.NewState() + provider := testProvider("aws") + provider.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "aws_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("bar"), + }), + }, + }, + } + provider.ConfigureProvider(providers.ConfigureProviderRequest{}) + + ctx := &MockEvalContext{ + StateState: state.SyncWrapper(), + ProviderProvider: provider, + } + + // Import a new aws_instance.foo, this time with ID=bar. The original + // aws_instance.foo object should be removed from state and replaced with + // the new. + node := graphNodeImportState{ + Addr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + ID: "bar", + ResolvedProvider: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("aws"), + Module: addrs.RootModule, + }, + } + + diags := node.Execute(ctx, walkImport) + if diags.HasErrors() { + t.Fatalf("Unexpected error: %s", diags.Err()) + } + + if len(node.states) != 1 { + t.Fatalf("Wrong result! Expected one imported resource, got %d", len(node.states)) + } + // Verify the ID for good measure + id := node.states[0].State.GetAttr("id") + if !id.RawEquals(cty.StringVal("bar")) { + t.Fatalf("Wrong result! Expected id \"bar\", got %q", id.AsString()) + } +} + +func TestGraphNodeImportStateSubExecute(t *testing.T) { + state := states.NewState() + provider := testProvider("aws") + provider.ConfigureProvider(providers.ConfigureProviderRequest{}) + ctx := &MockEvalContext{ + StateState: state.SyncWrapper(), + ProviderProvider: provider, + ProviderSchemaSchema: providers.ProviderSchema{ + ResourceTypes: map[string]providers.Schema{ + "aws_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + }, + }, + }, + }, + }, + } + + importedResource := providers.ImportedResource{ + TypeName: "aws_instance", + State: cty.ObjectVal(map[string]cty.Value{"id": cty.StringVal("bar")}), + } + + node := graphNodeImportStateSub{ + TargetAddr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + State: importedResource, + ResolvedProvider: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("aws"), + Module: addrs.RootModule, + }, + } + diags := node.Execute(ctx, walkImport) + if diags.HasErrors() { + t.Fatalf("Unexpected error: %s", diags.Err()) + } + + // check for resource in state + actual := strings.TrimSpace(state.String()) + expected := `aws_instance.foo: + ID = bar + provider = provider["registry.opentofu.org/hashicorp/aws"]` + if actual != expected { + t.Fatalf("bad state after import: \n%s", actual) + } +} + +func TestGraphNodeImportStateSubExecuteNull(t *testing.T) { + state := states.NewState() + provider := testProvider("aws") + provider.ReadResourceFn = func(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { + // return null indicating that the requested resource does not exist + resp.NewState = cty.NullVal(cty.Object(map[string]cty.Type{ + "id": cty.String, + })) + return resp + } + + ctx := &MockEvalContext{ + StateState: state.SyncWrapper(), + ProviderProvider: provider, + ProviderSchemaSchema: providers.ProviderSchema{ + ResourceTypes: map[string]providers.Schema{ + "aws_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + }, + }, + }, + }, + }, + } + + importedResource := providers.ImportedResource{ + TypeName: "aws_instance", + State: cty.ObjectVal(map[string]cty.Value{"id": cty.StringVal("bar")}), + } + + node := graphNodeImportStateSub{ + TargetAddr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + State: importedResource, + ResolvedProvider: addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("aws"), + Module: addrs.RootModule, + }, + } + diags := node.Execute(ctx, walkImport) + if !diags.HasErrors() { + t.Fatal("expected error for non-existent resource") + } +} diff --git a/pkg/tofu/transform_local.go b/pkg/tofu/transform_local.go new file mode 100644 index 00000000000..e381116f455 --- /dev/null +++ b/pkg/tofu/transform_local.go @@ -0,0 +1,47 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" +) + +// LocalTransformer is a GraphTransformer that adds all the local values +// from the configuration to the graph. +type LocalTransformer struct { + Config *configs.Config +} + +func (t *LocalTransformer) Transform(g *Graph) error { + return t.transformModule(g, t.Config) +} + +func (t *LocalTransformer) transformModule(g *Graph, c *configs.Config) error { + if c == nil { + // Can't have any locals if there's no config + return nil + } + + for _, local := range c.Module.Locals { + addr := addrs.LocalValue{Name: local.Name} + node := &nodeExpandLocal{ + Addr: addr, + Module: c.Path, + Config: local, + } + g.Add(node) + } + + // Also populate locals for child modules + for _, cc := range c.Children { + if err := t.transformModule(g, cc); err != nil { + return err + } + } + + return nil +} diff --git a/pkg/tofu/transform_module_expansion.go b/pkg/tofu/transform_module_expansion.go new file mode 100644 index 00000000000..6ce8cb16ae7 --- /dev/null +++ b/pkg/tofu/transform_module_expansion.go @@ -0,0 +1,236 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "log" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/dag" +) + +// ModuleExpansionTransformer is a GraphTransformer that adds graph nodes +// representing the possible expansion of each module call in the configuration, +// and ensures that any nodes representing objects declared within a module +// are dependent on the expansion node so that they will be visited only +// after the module expansion has been decided. +// +// This transform must be applied only after all nodes representing objects +// that can be contained within modules have already been added. +type ModuleExpansionTransformer struct { + Config *configs.Config + + // Concrete allows injection of a wrapped module node by the graph builder + // to alter the evaluation behavior. + Concrete ConcreteModuleNodeFunc + + closers map[string]*nodeCloseModule +} + +func (t *ModuleExpansionTransformer) Transform(g *Graph) error { + t.closers = make(map[string]*nodeCloseModule) + + // Construct a tree for fast lookups of Vertices based on their ModulePath. + tree := &pathTree{ + children: make(map[string]*pathTree), + } + + for _, v := range g.Vertices() { + tree.addVertex(v) + } + + // The root module is always a singleton and so does not need expansion + // processing, but any descendent modules do. We'll process them + // recursively using t.transform. + for _, cfg := range t.Config.Children { + err := t.transform(g, cfg, tree, nil) + if err != nil { + return err + } + } + + // Now go through and connect all nodes to their respective module closers. + // This is done all at once here, because orphaned modules were already + // handled by the RemovedModuleTransformer, and those module closers are in + // the graph already, and need to be connected to their parent closers. + for _, v := range g.Vertices() { + switch v.(type) { + case GraphNodeDestroyer: + // Destroy nodes can only be ordered relative to other resource + // instances. + continue + case *nodeCloseModule: + // a module closer cannot connect to itself + continue + } + + // any node that executes within the scope of a module should be a + // GraphNodeModulePath + pather, ok := v.(GraphNodeModulePath) + if !ok { + continue + } + if closer, ok := t.closers[pather.ModulePath().String()]; ok { + // The module closer depends on each child resource instance, since + // during apply the module expansion will complete before the + // individual instances are applied. + g.Connect(dag.BasicEdge(closer, v)) + } + } + + // Modules implicitly depend on their child modules, so connect closers to + // other which contain their path. + for _, c := range t.closers { + // For a closer c with address ["module.foo", "module.bar", "module.baz"], + // we'll look up all potential parent modules: + // + // - t.closers["module.foo"] + // - t.closers["module.foo.module.bar"] + // + // And connect the parent module to c. + // + // We skip i=0 because c.Addr[0:0] == [], and the root module should not exist in t.closers. + for i := 1; i < len(c.Addr); i++ { + parentAddr := c.Addr[0:i].String() + if parent, ok := t.closers[parentAddr]; ok { + g.Connect(dag.BasicEdge(parent, c)) + } + } + } + + return nil +} + +func (t *ModuleExpansionTransformer) transform(g *Graph, c *configs.Config, tree *pathTree, parentNode dag.Vertex) error { + _, call := c.Path.Call() + modCall := c.Parent.Module.ModuleCalls[call.Name] + + n := &nodeExpandModule{ + Addr: c.Path, + Config: c.Module, + ModuleCall: modCall, + } + var expander dag.Vertex = n + if t.Concrete != nil { + expander = t.Concrete(n) + } + + g.Add(expander) + tree.addVertex(expander) + log.Printf("[TRACE] ModuleExpansionTransformer: Added %s as %T", c.Path, expander) + + if parentNode != nil { + log.Printf("[TRACE] ModuleExpansionTransformer: %s must wait for expansion of %s", dag.VertexName(expander), dag.VertexName(parentNode)) + g.Connect(dag.BasicEdge(expander, parentNode)) + } + + // Add the closer (which acts as the root module node) to provide a + // single exit point for the expanded module. + closer := &nodeCloseModule{ + Addr: c.Path, + } + g.Add(closer) + tree.addVertex(closer) + g.Connect(dag.BasicEdge(closer, expander)) + t.closers[c.Path.String()] = closer + + for _, childV := range tree.findModule(c.Path) { + // don't connect a node to itself + if childV == expander { + continue + } + + var path addrs.Module + switch t := childV.(type) { + case GraphNodeDestroyer: + // skip destroyers, as they can only depend on other resources. + continue + + case GraphNodeModulePath: + path = t.ModulePath() + default: + continue + } + + if path.Equal(c.Path) { + log.Printf("[TRACE] ModuleExpansionTransformer: %s must wait for expansion of %s", dag.VertexName(childV), c.Path) + g.Connect(dag.BasicEdge(childV, expander)) + } + } + + // Also visit child modules, recursively. + for _, cc := range c.Children { + if err := t.transform(g, cc, tree, expander); err != nil { + return err + } + } + + return nil +} + +// pathTree is a tree containing a dag.Set of dag.Vertex per addrs.Module +// +// Given V = vertices in the graph and M = modules in the graph, constructing +// the tree takes ~O(V*log(M)) time to insert all the vertices, which gives +// us ~O(log(M)) access time to find all vertices that are part of a module. +// +// The previous implementation iterated over every node for each module, which made +// Transform() take O(V * M). +// +// This improves that to O(V*log(M) + M). +type pathTree struct { + children map[string]*pathTree + leaves dag.Set +} + +func (t *pathTree) addVertex(v dag.Vertex) { + mp, ok := v.(GraphNodeModulePath) + if !ok { + return + } + + t.add(v, mp.ModulePath()) +} + +func (t *pathTree) add(v dag.Vertex, addr []string) { + if len(addr) == 0 { + if t.leaves == nil { + t.leaves = make(dag.Set) + } + t.leaves.Add(v) + return + } + + next, addr := addr[0], addr[1:] + child, ok := t.children[next] + if !ok { + child = &pathTree{ + children: make(map[string]*pathTree), + } + t.children[next] = child + } + + child.add(v, addr) +} + +func (t *pathTree) findModule(p addrs.Module) dag.Set { + return t.find(p) +} + +func (t *pathTree) find(addr []string) dag.Set { + if len(addr) == 0 { + return t.leaves + } + + next, addr := addr[0], addr[1:] + child, ok := t.children[next] + if !ok { + return nil + } + + return child.find(addr) +} diff --git a/pkg/tofu/transform_module_expansion_test.go b/pkg/tofu/transform_module_expansion_test.go new file mode 100644 index 00000000000..c10e2c347af --- /dev/null +++ b/pkg/tofu/transform_module_expansion_test.go @@ -0,0 +1,66 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/addrs" +) + +func TestModuleExpansionTransformer(t *testing.T) { + g := Graph{Path: addrs.RootModuleInstance} + module := testModule(t, "transform-module-var-basic") + + { + tf := &ModuleExpansionTransformer{Config: module} + if err := tf.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + } + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(testTransformModuleExpBasicStr) + if actual != expected { + t.Fatalf("want:\n\n%s\n\ngot:\n\n%s", expected, actual) + } +} + +func TestModuleExpansionTransformer_nested(t *testing.T) { + g := Graph{Path: addrs.RootModuleInstance} + module := testModule(t, "transform-module-var-nested") + + { + tf := &ModuleExpansionTransformer{Config: module} + if err := tf.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + } + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(testTransformModuleExpNestedStr) + if actual != expected { + t.Fatalf("want:\n\n%s\n\ngot:\n\n%s", expected, actual) + } +} + +const testTransformModuleExpBasicStr = ` +module.child (close) + module.child (expand) +module.child (expand) +` + +const testTransformModuleExpNestedStr = ` +module.child (close) + module.child (expand) + module.child.module.child (close) +module.child (expand) +module.child.module.child (close) + module.child.module.child (expand) +module.child.module.child (expand) + module.child (expand) +` diff --git a/pkg/tofu/transform_module_variable.go b/pkg/tofu/transform_module_variable.go new file mode 100644 index 00000000000..c97ce08ee63 --- /dev/null +++ b/pkg/tofu/transform_module_variable.go @@ -0,0 +1,119 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/tfdiags" + + "github.com/hashicorp/hcl/v2" + + "github.com/kubegems/opentofu/pkg/configs" +) + +// ModuleVariableTransformer is a GraphTransformer that adds all the variables +// in the configuration to the graph. +// +// Any "variable" block present in any non-root module is included here, even +// if a particular variable is not referenced from anywhere. +// +// The transform will produce errors if a call to a module does not conform +// to the expected set of arguments, but this transformer is not in a good +// position to return errors and so the validate walk should include specific +// steps for validating module blocks, separate from this transform. +type ModuleVariableTransformer struct { + Config *configs.Config +} + +func (t *ModuleVariableTransformer) Transform(g *Graph) error { + return t.transform(g, nil, t.Config) +} + +func (t *ModuleVariableTransformer) transform(g *Graph, parent, c *configs.Config) error { + // We can have no variables if we have no configuration. + if c == nil { + return nil + } + + // Transform all the children first. + for _, cc := range c.Children { + if err := t.transform(g, c, cc); err != nil { + return err + } + } + + // If we're processing anything other than the root module then we'll + // add graph nodes for variables defined inside. (Variables for the root + // module are dealt with in RootVariableTransformer). + // If we have a parent, we can determine if a module variable is being + // used, so we transform this. + if parent != nil { + if err := t.transformSingle(g, parent, c); err != nil { + return err + } + } + + return nil +} + +func (t *ModuleVariableTransformer) transformSingle(g *Graph, parent, c *configs.Config) error { + _, call := c.Path.Call() + + // Find the call in the parent module configuration, so we can get the + // expressions given for each input variable at the call site. + callConfig, exists := parent.Module.ModuleCalls[call.Name] + if !exists { + // This should never happen, since it indicates an improperly-constructed + // configuration tree. + panic(fmt.Errorf("no module call block found for %s", c.Path)) + } + + // We need to construct a schema for the expected call arguments based on + // the configured variables in our config, which we can then use to + // decode the content of the call block. + schema := &hcl.BodySchema{} + for _, v := range c.Module.Variables { + schema.Attributes = append(schema.Attributes, hcl.AttributeSchema{ + Name: v.Name, + Required: v.Default == cty.NilVal, + }) + } + + content, contentDiags := callConfig.Config.Content(schema) + if contentDiags.HasErrors() { + // Validation code elsewhere should deal with any errors before we + // get in here, but we'll report them out here just in case, to + // avoid crashes. + var diags tfdiags.Diagnostics + diags = diags.Append(contentDiags) + return diags.Err() + } + + for _, v := range c.Module.Variables { + var expr hcl.Expression + if attr := content.Attributes[v.Name]; attr != nil { + expr = attr.Expr + } + + // Add a plannable node, as the variable may expand + // during module expansion + node := &nodeExpandModuleVariable{ + Addr: addrs.InputVariable{ + Name: v.Name, + }, + Module: c.Path, + Config: v, + Expr: expr, + } + g.Add(node) + } + + return nil +} diff --git a/pkg/tofu/transform_module_variable_test.go b/pkg/tofu/transform_module_variable_test.go new file mode 100644 index 00000000000..02d3ff9d20e --- /dev/null +++ b/pkg/tofu/transform_module_variable_test.go @@ -0,0 +1,72 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/addrs" +) + +func TestModuleVariableTransformer(t *testing.T) { + g := Graph{Path: addrs.RootModuleInstance} + module := testModule(t, "transform-module-var-basic") + + { + tf := &RootVariableTransformer{Config: module} + if err := tf.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + } + + { + tf := &ModuleVariableTransformer{Config: module} + if err := tf.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + } + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(testTransformModuleVarBasicStr) + if actual != expected { + t.Fatalf("bad:\n\n%s", actual) + } +} + +func TestModuleVariableTransformer_nested(t *testing.T) { + g := Graph{Path: addrs.RootModuleInstance} + module := testModule(t, "transform-module-var-nested") + + { + tf := &RootVariableTransformer{Config: module} + if err := tf.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + } + + { + tf := &ModuleVariableTransformer{Config: module} + if err := tf.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + } + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(testTransformModuleVarNestedStr) + if actual != expected { + t.Fatalf("bad:\n\n%s", actual) + } +} + +const testTransformModuleVarBasicStr = ` +module.child.var.value (expand) +` + +const testTransformModuleVarNestedStr = ` +module.child.module.child.var.value (expand) +module.child.var.value (expand) +` diff --git a/pkg/tofu/transform_orphan_count.go b/pkg/tofu/transform_orphan_count.go new file mode 100644 index 00000000000..b5b49db908c --- /dev/null +++ b/pkg/tofu/transform_orphan_count.go @@ -0,0 +1,66 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "log" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/dag" + "github.com/kubegems/opentofu/pkg/states" +) + +// OrphanResourceInstanceCountTransformer is a GraphTransformer that adds orphans +// for an expanded count to the graph. The determination of this depends +// on the count argument given. +// +// Orphans are found by comparing the count to what is found in the state. +// This transform assumes that if an element in the state is within the count +// bounds given, that it is not an orphan. +type OrphanResourceInstanceCountTransformer struct { + Concrete ConcreteResourceInstanceNodeFunc + + Addr addrs.AbsResource // Addr of the resource to look for orphans + InstanceAddrs []addrs.AbsResourceInstance // Addresses that currently exist in config + State *states.State // Full global state +} + +func (t *OrphanResourceInstanceCountTransformer) Transform(g *Graph) error { + rs := t.State.Resource(t.Addr) + if rs == nil { + return nil // Resource doesn't exist in state, so nothing to do! + } + + // This is an O(n*m) analysis, which we accept for now because the + // number of instances of a single resource ought to always be small in any + // reasonable OpenTofu configuration. +Have: + for key, inst := range rs.Instances { + // Instances which have no current objects (only one or more + // deposed objects) will be taken care of separately + if inst.Current == nil { + continue + } + + thisAddr := rs.Addr.Instance(key) + for _, wantAddr := range t.InstanceAddrs { + if wantAddr.Equal(thisAddr) { + continue Have + } + } + // If thisAddr is not in t.InstanceAddrs then we've found an "orphan" + + abstract := NewNodeAbstractResourceInstance(thisAddr) + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + log.Printf("[TRACE] OrphanResourceInstanceCountTransformer: adding %s as %T", thisAddr, node) + g.Add(node) + } + + return nil +} diff --git a/pkg/tofu/transform_orphan_count_test.go b/pkg/tofu/transform_orphan_count_test.go new file mode 100644 index 00000000000..8797c29267b --- /dev/null +++ b/pkg/tofu/transform_orphan_count_test.go @@ -0,0 +1,311 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/states" +) + +func TestOrphanResourceCountTransformer(t *testing.T) { + state := states.NewState() + root := state.RootModule() + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.web").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[0]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[2]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + g := Graph{Path: addrs.RootModuleInstance} + + { + tf := &OrphanResourceInstanceCountTransformer{ + Concrete: testOrphanResourceConcreteFunc, + Addr: addrs.RootModuleInstance.Resource( + addrs.ManagedResourceMode, "aws_instance", "foo", + ), + InstanceAddrs: []addrs.AbsResourceInstance{mustResourceInstanceAddr("aws_instance.foo[0]")}, + State: state, + } + if err := tf.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + } + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(testTransformOrphanResourceCountBasicStr) + if actual != expected { + t.Fatalf("bad:\n\n%s", actual) + } +} + +func TestOrphanResourceCountTransformer_zero(t *testing.T) { + state := states.NewState() + root := state.RootModule() + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.web").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[0]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[2]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + g := Graph{Path: addrs.RootModuleInstance} + + { + tf := &OrphanResourceInstanceCountTransformer{ + Concrete: testOrphanResourceConcreteFunc, + Addr: addrs.RootModuleInstance.Resource( + addrs.ManagedResourceMode, "aws_instance", "foo", + ), + InstanceAddrs: []addrs.AbsResourceInstance{}, + State: state, + } + if err := tf.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + } + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(testTransformOrphanResourceCountZeroStr) + if actual != expected { + t.Fatalf("bad:\n\n%s", actual) + } +} + +func TestOrphanResourceCountTransformer_oneIndex(t *testing.T) { + state := states.NewState() + root := state.RootModule() + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.web").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[0]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[1]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + g := Graph{Path: addrs.RootModuleInstance} + + { + tf := &OrphanResourceInstanceCountTransformer{ + Concrete: testOrphanResourceConcreteFunc, + Addr: addrs.RootModuleInstance.Resource( + addrs.ManagedResourceMode, "aws_instance", "foo", + ), + InstanceAddrs: []addrs.AbsResourceInstance{mustResourceInstanceAddr("aws_instance.foo[0]")}, + State: state, + } + if err := tf.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + } + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(testTransformOrphanResourceCountOneIndexStr) + if actual != expected { + t.Fatalf("bad:\n\n%s", actual) + } +} + +func TestOrphanResourceCountTransformer_deposed(t *testing.T) { + state := states.NewState() + root := state.RootModule() + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.web").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[0]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[1]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + root.SetResourceInstanceDeposed( + mustResourceInstanceAddr("aws_instance.foo[2]").Resource, + states.NewDeposedKey(), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo"}`), + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + g := Graph{Path: addrs.RootModuleInstance} + + { + tf := &OrphanResourceInstanceCountTransformer{ + Concrete: testOrphanResourceConcreteFunc, + Addr: addrs.RootModuleInstance.Resource( + addrs.ManagedResourceMode, "aws_instance", "foo", + ), + InstanceAddrs: []addrs.AbsResourceInstance{mustResourceInstanceAddr("aws_instance.foo[0]")}, + State: state, + } + if err := tf.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + } + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(testTransformOrphanResourceCountDeposedStr) + if actual != expected { + t.Fatalf("bad:\n\n%s", actual) + } +} + +// When converting from a NoEach mode to an EachMap via a switch to for_each, +// an edge is necessary to ensure that the map-key'd instances +// are evaluated after the NoKey resource, because the final instance evaluated +// sets the whole resource's EachMode. +func TestOrphanResourceCountTransformer_ForEachEdgesAdded(t *testing.T) { + state := states.BuildState(func(s *states.SyncState) { + // "bar" key'd resource + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + }.Instance(addrs.StringKey("bar")).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsFlat: map[string]string{ + "id": "foo", + }, + Status: states.ObjectReady, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + + // NoKey'd resource + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsFlat: map[string]string{ + "id": "foo", + }, + Status: states.ObjectReady, + }, + mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"]`), + ) + }) + + g := Graph{Path: addrs.RootModuleInstance} + + { + tf := &OrphanResourceInstanceCountTransformer{ + Concrete: testOrphanResourceConcreteFunc, + Addr: addrs.RootModuleInstance.Resource( + addrs.ManagedResourceMode, "aws_instance", "foo", + ), + InstanceAddrs: []addrs.AbsResourceInstance{}, + State: state, + } + if err := tf.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + } + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(testTransformOrphanResourceForEachStr) + if actual != expected { + t.Fatalf("bad:\n\n%s", actual) + } +} + +const testTransformOrphanResourceCountBasicStr = ` +aws_instance.foo[2] (orphan) +` + +const testTransformOrphanResourceCountZeroStr = ` +aws_instance.foo[0] (orphan) +aws_instance.foo[2] (orphan) +` + +const testTransformOrphanResourceCountOneIndexStr = ` +aws_instance.foo[1] (orphan) +` + +const testTransformOrphanResourceCountDeposedStr = ` +aws_instance.foo[1] (orphan) +` + +const testTransformOrphanResourceForEachStr = ` +aws_instance.foo (orphan) +aws_instance.foo["bar"] (orphan) +` diff --git a/pkg/tofu/transform_orphan_output.go b/pkg/tofu/transform_orphan_output.go new file mode 100644 index 00000000000..14f610ebb67 --- /dev/null +++ b/pkg/tofu/transform_orphan_output.go @@ -0,0 +1,67 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "log" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/states" +) + +// OrphanOutputTransformer finds the outputs that aren't present +// in the given config that are in the state and adds them to the graph +// for deletion. +type OrphanOutputTransformer struct { + Config *configs.Config // Root of config tree + State *states.State // State is the root state + Planning bool +} + +func (t *OrphanOutputTransformer) Transform(g *Graph) error { + if t.State == nil { + log.Printf("[DEBUG] No state, no orphan outputs") + return nil + } + + for _, ms := range t.State.Modules { + if err := t.transform(g, ms); err != nil { + return err + } + } + return nil +} + +func (t *OrphanOutputTransformer) transform(g *Graph, ms *states.Module) error { + if ms == nil { + return nil + } + + moduleAddr := ms.Addr + + // Get the config for this path, which is nil if the entire module has been + // removed. + var outputs map[string]*configs.Output + if c := t.Config.DescendentForInstance(moduleAddr); c != nil { + outputs = c.Module.Outputs + } + + // An output is "orphaned" if it's present in the state but not declared + // in the configuration. + for name := range ms.OutputValues { + if _, exists := outputs[name]; exists { + continue + } + + g.Add(&NodeDestroyableOutput{ + Addr: addrs.OutputValue{Name: name}.Absolute(moduleAddr), + Planning: t.Planning, + }) + } + + return nil +} diff --git a/pkg/tofu/transform_orphan_resource.go b/pkg/tofu/transform_orphan_resource.go new file mode 100644 index 00000000000..ec0c6d6a464 --- /dev/null +++ b/pkg/tofu/transform_orphan_resource.go @@ -0,0 +1,113 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "log" + + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/dag" + "github.com/kubegems/opentofu/pkg/states" +) + +// OrphanResourceInstanceTransformer is a GraphTransformer that adds orphaned +// resource instances to the graph. An "orphan" is an instance that is present +// in the state but belongs to a resource that is no longer present in the +// configuration. +// +// This is not the transformer that deals with "count orphans" (instances that +// are no longer covered by a resource's "count" or "for_each" setting); that's +// handled instead by OrphanResourceCountTransformer. +type OrphanResourceInstanceTransformer struct { + Concrete ConcreteResourceInstanceNodeFunc + + // State is the global state. We require the global state to + // properly find module orphans at our path. + State *states.State + + // Config is the root node in the configuration tree. We'll look up + // the appropriate note in this tree using the path in each node. + Config *configs.Config + + // Do not apply this transformer + skip bool +} + +func (t *OrphanResourceInstanceTransformer) Transform(g *Graph) error { + if t.skip { + return nil + } + + if t.State == nil { + // If the entire state is nil, there can't be any orphans + return nil + } + if t.Config == nil { + // Should never happen: we can't be doing any OpenTofu operations + // without at least an empty configuration. + panic("OrphanResourceInstanceTransformer used without setting Config") + } + + // Go through the modules and for each module transform in order + // to add the orphan. + for _, ms := range t.State.Modules { + if err := t.transform(g, ms); err != nil { + return err + } + } + + return nil +} + +func (t *OrphanResourceInstanceTransformer) transform(g *Graph, ms *states.Module) error { + if ms == nil { + return nil + } + + moduleAddr := ms.Addr + + // Get the configuration for this module. The configuration might be + // nil if the module was removed from the configuration. This is okay, + // this just means that every resource is an orphan. + var m *configs.Module + if c := t.Config.DescendentForInstance(moduleAddr); c != nil { + m = c.Module + } + + // An "orphan" is a resource that is in the state but not the configuration, + // so we'll walk the state resources and try to correlate each of them + // with a configuration block. Each orphan gets a node in the graph whose + // type is decided by t.Concrete. + // + // We don't handle orphans related to changes in the "count" and "for_each" + // pseudo-arguments here. They are handled by OrphanResourceCountTransformer. + for _, rs := range ms.Resources { + if m != nil { + if r := m.ResourceByAddr(rs.Addr.Resource); r != nil { + continue + } + } + + for key, inst := range rs.Instances { + // Instances which have no current objects (only one or more + // deposed objects) will be taken care of separately + if inst.Current == nil { + continue + } + + addr := rs.Addr.Instance(key) + abstract := NewNodeAbstractResourceInstance(addr) + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + log.Printf("[TRACE] OrphanResourceInstanceTransformer: adding single-instance orphan node for %s", addr) + g.Add(node) + } + } + + return nil +} diff --git a/pkg/tofu/transform_orphan_resource_test.go b/pkg/tofu/transform_orphan_resource_test.go new file mode 100644 index 00000000000..2884540d4fa --- /dev/null +++ b/pkg/tofu/transform_orphan_resource_test.go @@ -0,0 +1,331 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/dag" + "github.com/kubegems/opentofu/pkg/states" +) + +func TestOrphanResourceInstanceTransformer(t *testing.T) { + mod := testModule(t, "transform-orphan-basic") + + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "web", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsFlat: map[string]string{ + "id": "foo", + }, + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("aws"), + Module: addrs.RootModule, + }, + ) + + // The orphan + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "db", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsFlat: map[string]string{ + "id": "foo", + }, + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("aws"), + Module: addrs.RootModule, + }, + ) + + // A deposed orphan should not be handled by this transformer + s.SetResourceInstanceDeposed( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "deposed", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + states.NewDeposedKey(), + &states.ResourceInstanceObjectSrc{ + AttrsFlat: map[string]string{ + "id": "foo", + }, + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + + g := Graph{Path: addrs.RootModuleInstance} + { + tf := &ConfigTransformer{Config: mod} + if err := tf.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + } + + { + tf := &OrphanResourceInstanceTransformer{ + Concrete: testOrphanResourceConcreteFunc, + State: state, + Config: mod, + } + if err := tf.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + } + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(testTransformOrphanResourceBasicStr) + if actual != expected { + t.Fatalf("bad:\n\n%s", actual) + } +} + +func TestOrphanResourceInstanceTransformer_countGood(t *testing.T) { + mod := testModule(t, "transform-orphan-count") + + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + }.Instance(addrs.IntKey(0)).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsFlat: map[string]string{ + "id": "foo", + }, + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("aws"), + Module: addrs.RootModule, + }, + ) + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + }.Instance(addrs.IntKey(1)).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsFlat: map[string]string{ + "id": "foo", + }, + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("aws"), + Module: addrs.RootModule, + }, + ) + }) + + g := Graph{Path: addrs.RootModuleInstance} + { + tf := &ConfigTransformer{Config: mod} + if err := tf.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + } + + { + tf := &OrphanResourceInstanceTransformer{ + Concrete: testOrphanResourceConcreteFunc, + State: state, + Config: mod, + } + if err := tf.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + } + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(testTransformOrphanResourceCountStr) + if actual != expected { + t.Fatalf("bad:\n\n%s", actual) + } +} + +func TestOrphanResourceInstanceTransformer_countBad(t *testing.T) { + mod := testModule(t, "transform-orphan-count-empty") + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + }.Instance(addrs.IntKey(0)).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsFlat: map[string]string{ + "id": "foo", + }, + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("aws"), + Module: addrs.RootModule, + }, + ) + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + }.Instance(addrs.IntKey(1)).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsFlat: map[string]string{ + "id": "foo", + }, + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("aws"), + Module: addrs.RootModule, + }, + ) + }) + + g := Graph{Path: addrs.RootModuleInstance} + { + tf := &ConfigTransformer{Config: mod} + if err := tf.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + } + + { + tf := &OrphanResourceInstanceTransformer{ + Concrete: testOrphanResourceConcreteFunc, + State: state, + Config: mod, + } + if err := tf.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + } + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(testTransformOrphanResourceCountBadStr) + if actual != expected { + t.Fatalf("bad:\n\n%s", actual) + } +} + +func TestOrphanResourceInstanceTransformer_modules(t *testing.T) { + mod := testModule(t, "transform-orphan-modules") + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsFlat: map[string]string{ + "id": "foo", + }, + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("aws"), + Module: addrs.RootModule, + }, + ) + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "web", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance.Child("child", addrs.NoKey)), + &states.ResourceInstanceObjectSrc{ + AttrsFlat: map[string]string{ + "id": "foo", + }, + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("aws"), + Module: addrs.RootModule, + }, + ) + }) + + g := Graph{Path: addrs.RootModuleInstance} + { + tf := &ConfigTransformer{Config: mod} + if err := tf.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + } + + { + tf := &OrphanResourceInstanceTransformer{ + Concrete: testOrphanResourceConcreteFunc, + State: state, + Config: mod, + } + if err := tf.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + } + + got := strings.TrimSpace(g.String()) + want := strings.TrimSpace(testTransformOrphanResourceModulesStr) + if got != want { + t.Fatalf("wrong state result\ngot:\n%s\n\nwant:\n%s", got, want) + } +} + +const testTransformOrphanResourceBasicStr = ` +aws_instance.db (orphan) +aws_instance.web +` + +const testTransformOrphanResourceCountStr = ` +aws_instance.foo +` + +const testTransformOrphanResourceCountBadStr = ` +aws_instance.foo[0] (orphan) +aws_instance.foo[1] (orphan) +` + +const testTransformOrphanResourceModulesStr = ` +aws_instance.foo +module.child.aws_instance.web (orphan) +` + +func testOrphanResourceConcreteFunc(a *NodeAbstractResourceInstance) dag.Vertex { + return &testOrphanResourceInstanceConcrete{a} +} + +type testOrphanResourceInstanceConcrete struct { + *NodeAbstractResourceInstance +} + +func (n *testOrphanResourceInstanceConcrete) Name() string { + return fmt.Sprintf("%s (orphan)", n.NodeAbstractResourceInstance.Name()) +} diff --git a/pkg/tofu/transform_output.go b/pkg/tofu/transform_output.go new file mode 100644 index 00000000000..adae1654e3f --- /dev/null +++ b/pkg/tofu/transform_output.go @@ -0,0 +1,73 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "log" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" +) + +// OutputTransformer is a GraphTransformer that adds all the outputs +// in the configuration to the graph. +// +// This is done for the apply graph builder even if dependent nodes +// aren't changing since there is no downside: the state will be available +// even if the dependent items aren't changing. +type OutputTransformer struct { + Config *configs.Config + + // Refresh-only mode means that any failing output preconditions are + // reported as warnings rather than errors + RefreshOnly bool + + // Planning must be set to true only when we're building a planning graph. + // It must be set to false whenever we're building an apply graph. + Planning bool + + // If this is a planned destroy, root outputs are still in the configuration + // so we need to record that we wish to remove them. + Destroying bool +} + +func (t *OutputTransformer) Transform(g *Graph) error { + return t.transform(g, t.Config) +} + +func (t *OutputTransformer) transform(g *Graph, c *configs.Config) error { + // If we have no config then there can be no outputs. + if c == nil { + return nil + } + + // Transform all the children. We must do this first because + // we can reference module outputs and they must show up in the + // reference map. + for _, cc := range c.Children { + if err := t.transform(g, cc); err != nil { + return err + } + } + + for _, o := range c.Module.Outputs { + addr := addrs.OutputValue{Name: o.Name} + + node := &nodeExpandOutput{ + Addr: addr, + Module: c.Path, + Config: o, + Destroying: t.Destroying, + RefreshOnly: t.RefreshOnly, + Planning: t.Planning, + } + + log.Printf("[TRACE] OutputTransformer: adding %s as %T", o.Name, node) + g.Add(node) + } + + return nil +} diff --git a/pkg/tofu/transform_provider.go b/pkg/tofu/transform_provider.go new file mode 100644 index 00000000000..a445a242e5b --- /dev/null +++ b/pkg/tofu/transform_provider.go @@ -0,0 +1,865 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "log" + + "github.com/hashicorp/hcl/v2" + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/dag" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +func transformProviders(concrete ConcreteProviderNodeFunc, config *configs.Config) GraphTransformer { + return GraphTransformMulti( + // Add providers from the config + &ProviderConfigTransformer{ + Config: config, + Concrete: concrete, + }, + // Add any remaining missing providers + &MissingProviderTransformer{ + Config: config, + Concrete: concrete, + }, + // Connect the providers + &ProviderTransformer{ + Config: config, + }, + // The following comment shows what must be added to the transformer list after the schema transformer + // After schema transformer, we can add function references + // &ProviderFunctionTransformer{Config: config}, + // Remove unused providers and proxies + // &PruneProviderTransformer{}, + ) +} + +// GraphNodeProvider is an interface that nodes that can be a provider +// must implement. +// +// ProviderAddr returns the address of the provider configuration this +// satisfies, which is relative to the path returned by method Path(). +// +// Name returns the full name of the provider in the config. +type GraphNodeProvider interface { + GraphNodeModulePath + ProviderAddr() addrs.AbsProviderConfig + Name() string +} + +// GraphNodeCloseProvider is an interface that nodes that can be a close +// provider must implement. The CloseProviderName returned is the name of +// the provider they satisfy. +type GraphNodeCloseProvider interface { + GraphNodeModulePath + CloseProviderAddr() addrs.AbsProviderConfig +} + +// GraphNodeProviderConsumer is an interface that nodes that require +// a provider must implement. ProvidedBy must return the address of the provider +// to use, which will be resolved to a configuration either in the same module +// or in an ancestor module, with the resulting absolute address passed to +// SetProvider. +type GraphNodeProviderConsumer interface { + GraphNodeModulePath + // ProvidedBy returns the address of the provider configuration the node + // refers to, if available. The following value types may be returned: + // + // nil + exact true: the node does not require a provider + // * addrs.LocalProviderConfig: the provider was set in the resource config + // * addrs.AbsProviderConfig + exact true: the provider configuration was + // taken from the instance state. + // * addrs.AbsProviderConfig + exact false: no config or state; the returned + // value is a default provider configuration address for the resource's + // Provider + ProvidedBy() (addr addrs.ProviderConfig, exact bool) + + // Provider() returns the Provider FQN for the node. + Provider() (provider addrs.Provider) + + // Set the resolved provider address for this resource. + SetProvider(addrs.AbsProviderConfig) +} + +// ProviderTransformer is a GraphTransformer that maps resources to providers +// within the graph. This will error if there are any resources that don't map +// to proper resources. +type ProviderTransformer struct { + Config *configs.Config +} + +func (t *ProviderTransformer) Transform(g *Graph) error { + // We need to find a provider configuration address for each resource + // either directly represented by a node or referenced by a node in + // the graph, and then create graph edges from provider to provider user + // so that the providers will get initialized first. + + var diags tfdiags.Diagnostics + + // To start, we'll collect the _requested_ provider addresses for each + // node, which we'll then resolve (handling provider inheritence, etc) in + // the next step. + // Our "requested" map is from graph vertices to string representations of + // provider config addresses (for deduping) to requests. + type ProviderRequest struct { + Addr addrs.AbsProviderConfig + Exact bool // If true, inheritence from parent modules is not attempted + } + requested := map[dag.Vertex]map[string]ProviderRequest{} + needConfigured := map[string]addrs.AbsProviderConfig{} + for _, v := range g.Vertices() { + // Does the vertex _directly_ use a provider? + if pv, ok := v.(GraphNodeProviderConsumer); ok { + providerAddr, exact := pv.ProvidedBy() + if providerAddr == nil && exact { + // no provider is required + continue + } + + requested[v] = make(map[string]ProviderRequest) + + var absPc addrs.AbsProviderConfig + + switch p := providerAddr.(type) { + case addrs.AbsProviderConfig: + // ProvidedBy() returns an AbsProviderConfig when the provider + // configuration is set in state, so we do not need to verify + // the FQN matches. + absPc = p + + if exact { + log.Printf("[TRACE] ProviderTransformer: %s is provided by %s exactly", dag.VertexName(v), absPc) + } + + case addrs.LocalProviderConfig: + // ProvidedBy() return a LocalProviderConfig when the resource + // contains a `provider` attribute + absPc.Provider = pv.Provider() + modPath := pv.ModulePath() + if t.Config == nil { + absPc.Module = modPath + absPc.Alias = p.Alias + break + } + + absPc.Module = modPath + absPc.Alias = p.Alias + + default: + // This should never happen; the case statements are meant to be exhaustive + panic(fmt.Sprintf("%s: provider for %s couldn't be determined", dag.VertexName(v), absPc)) + } + + requested[v][absPc.String()] = ProviderRequest{ + Addr: absPc, + Exact: exact, + } + + // Direct references need the provider configured as well as initialized + needConfigured[absPc.String()] = absPc + } + } + + // Now we'll go through all the requested addresses we just collected and + // figure out which _actual_ config address each belongs to, after resolving + // for provider inheritance and passing. + m := providerVertexMap(g) + for v, reqs := range requested { + for key, req := range reqs { + p := req.Addr + target := m[key] + + _, ok := v.(GraphNodeModulePath) + if !ok && target == nil { + // No target and no path to traverse up from + diags = diags.Append(fmt.Errorf("%s: provider %s couldn't be found", dag.VertexName(v), p)) + continue + } + + if target != nil { + // Providers with configuration will already exist within the graph and can be directly referenced + log.Printf("[TRACE] ProviderTransformer: exact match for %s serving %s", p, dag.VertexName(v)) + } + + // if we don't have a provider at this level, walk up the path looking for one, + // unless we were told to be exact. + if target == nil && !req.Exact { + for pp, ok := p.Inherited(); ok; pp, ok = pp.Inherited() { + key := pp.String() + target = m[key] + if target != nil { + log.Printf("[TRACE] ProviderTransformer: %s uses inherited configuration %s", dag.VertexName(v), pp) + break + } + log.Printf("[TRACE] ProviderTransformer: looking for %s to serve %s", pp, dag.VertexName(v)) + } + } + + // If this provider doesn't need to be configured then we can just + // stub it out with an init-only provider node, which will just + // start up the provider and fetch its schema. + if _, exists := needConfigured[key]; target == nil && !exists { + stubAddr := addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: p.Provider, + } + stub := &NodeEvalableProvider{ + &NodeAbstractProvider{ + Addr: stubAddr, + }, + } + m[stubAddr.String()] = stub + log.Printf("[TRACE] ProviderTransformer: creating init-only node for %s", stubAddr) + target = stub + g.Add(target) + } + + if target == nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider configuration not present", + fmt.Sprintf( + "To work with %s its original provider configuration at %s is required, but it has been removed. This occurs when a provider configuration is removed while objects created by that provider still exist in the state. Re-add the provider configuration to destroy %s, after which you can remove the provider configuration again.", + dag.VertexName(v), p, dag.VertexName(v), + ), + )) + break + } + + // see if this is a proxy provider pointing to another concrete config + if p, ok := target.(*graphNodeProxyProvider); ok { + g.Remove(p) + target = p.Target() + } + + log.Printf("[DEBUG] ProviderTransformer: %q (%T) needs %s", dag.VertexName(v), v, dag.VertexName(target)) + if pv, ok := v.(GraphNodeProviderConsumer); ok { + pv.SetProvider(target.ProviderAddr()) + } + g.Connect(dag.BasicEdge(v, target)) + } + } + + return diags.Err() +} + +// ProviderFunctionTransformer is a GraphTransformer that maps nodes which reference functions to providers +// within the graph. This will error if there are any provider functions that don't map to known providers. +type ProviderFunctionTransformer struct { + Config *configs.Config +} + +func (t *ProviderFunctionTransformer) Transform(g *Graph) error { + var diags tfdiags.Diagnostics + + if t.Config == nil { + // This is probably a test case, inherited from ProviderTransformer + log.Printf("[WARN] Skipping provider function transformer due to missing config") + return nil + } + + // Locate all providers in the graph + providers := providerVertexMap(g) + + type providerReference struct { + path string + name string + alias string + } + // LuT of provider reference -> provider vertex + providerReferences := make(map[providerReference]dag.Vertex) + + for _, v := range g.Vertices() { + // Provider function references + if nr, ok := v.(GraphNodeReferencer); ok && t.Config != nil { + for _, ref := range nr.References() { + if pf, ok := ref.Subject.(addrs.ProviderFunction); ok { + key := providerReference{ + path: nr.ModulePath().String(), + name: pf.ProviderName, + alias: pf.ProviderAlias, + } + + // We already know about this provider and can link directly + if provider, ok := providerReferences[key]; ok { + // Is it worth skipping if we have already connected this provider? + g.Connect(dag.BasicEdge(v, provider)) + continue + } + + // Find the config that this node belongs to + mc := t.Config.Descendent(nr.ModulePath()) + if mc == nil { + // I don't think this is possible + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unknown Descendent Module", + Detail: nr.ModulePath().String(), + Subject: ref.SourceRange.ToHCL().Ptr(), + }) + continue + } + + // Find the provider type from required_providers + pr, ok := mc.Module.ProviderRequirements.RequiredProviders[pf.ProviderName] + if !ok { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unknown function provider", + Detail: fmt.Sprintf("Provider %q does not exist within the required_providers of this module", pf.ProviderName), + Subject: ref.SourceRange.ToHCL().Ptr(), + }) + continue + } + + // Build fully qualified provider address + absPc := addrs.AbsProviderConfig{ + Provider: pr.Type, + Module: nr.ModulePath(), + Alias: pf.ProviderAlias, + } + + log.Printf("[TRACE] ProviderFunctionTransformer: %s in %s is provided by %s", pf, dag.VertexName(v), absPc) + + // Lookup provider via full address + provider := providers[absPc.String()] + + if provider != nil { + // Providers with configuration will already exist within the graph and can be directly referenced + log.Printf("[TRACE] ProviderFunctionTransformer: exact match for %s serving %s", absPc, dag.VertexName(v)) + } else { + // If this provider doesn't need to be configured then we can just + // stub it out with an init-only provider node, which will just + // start up the provider and fetch its schema. + stubAddr := addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: absPc.Provider, + } + if provider, ok = providers[stubAddr.String()]; !ok { + stub := &NodeEvalableProvider{ + &NodeAbstractProvider{ + Addr: stubAddr, + }, + } + providers[stubAddr.String()] = stub + log.Printf("[TRACE] ProviderFunctionTransformer: creating init-only node for %s", stubAddr) + provider = stub + g.Add(provider) + } + } + + // see if this is a proxy provider pointing to another concrete config + if p, ok := provider.(*graphNodeProxyProvider); ok { + g.Remove(p) + provider = p.Target() + } + + log.Printf("[DEBUG] ProviderFunctionTransformer: %q (%T) needs %s", dag.VertexName(v), v, dag.VertexName(provider)) + g.Connect(dag.BasicEdge(v, provider)) + + // Save for future lookups + providerReferences[key] = provider + } + } + } + } + + return diags.Err() +} + +// CloseProviderTransformer is a GraphTransformer that adds nodes to the +// graph that will close open provider connections that aren't needed anymore. +// A provider connection is not needed anymore once all depended resources +// in the graph are evaluated. +type CloseProviderTransformer struct{} + +func (t *CloseProviderTransformer) Transform(g *Graph) error { + pm := providerVertexMap(g) + cpm := make(map[string]*graphNodeCloseProvider) + var err error + + for _, p := range pm { + key := p.ProviderAddr().String() + + // get the close provider of this type if we alread created it + closer := cpm[key] + + if closer == nil { + // create a closer for this provider type + closer = &graphNodeCloseProvider{Addr: p.ProviderAddr()} + g.Add(closer) + cpm[key] = closer + } + + // Close node depends on the provider itself + // this is added unconditionally, so it will connect to all instances + // of the provider. Extra edges will be removed by transitive + // reduction. + g.Connect(dag.BasicEdge(closer, p)) + + // connect all the provider's resources to the close node + for _, s := range g.UpEdges(p) { + if _, ok := s.(GraphNodeProviderConsumer); ok { + g.Connect(dag.BasicEdge(closer, s)) + } else if _, ok := s.(GraphNodeReferencer); ok { + g.Connect(dag.BasicEdge(closer, s)) + } + } + } + + return err +} + +// MissingProviderTransformer is a GraphTransformer that adds to the graph +// a node for each default provider configuration that is referenced by another +// node but not already present in the graph. +// +// These "default" nodes are always added to the root module, regardless of +// where they are requested. This is important because our inheritance +// resolution behavior in ProviderTransformer will then treat these as a +// last-ditch fallback after walking up the tree, rather than preferring them +// as it would if they were placed in the same module as the requester. +// +// This transformer may create extra nodes that are not needed in practice, +// due to overriding provider configurations in child modules. +// PruneProviderTransformer can then remove these once ProviderTransformer +// has resolved all of the inheritence, etc. +type MissingProviderTransformer struct { + // MissingProviderTransformer needs the config to rule out _implied_ default providers + Config *configs.Config + + // Concrete, if set, overrides how the providers are made. + Concrete ConcreteProviderNodeFunc +} + +func (t *MissingProviderTransformer) Transform(g *Graph) error { + // Initialize factory + if t.Concrete == nil { + t.Concrete = func(a *NodeAbstractProvider) dag.Vertex { + return a + } + } + + var err error + m := providerVertexMap(g) + for _, v := range g.Vertices() { + pv, ok := v.(GraphNodeProviderConsumer) + if !ok { + continue + } + + // For our work here we actually care only about the provider type and + // we plan to place all default providers in the root module. + providerFqn := pv.Provider() + + // We're going to create an implicit _default_ configuration for the + // referenced provider type in the _root_ module, ignoring all other + // aspects of the resource's declared provider address. + defaultAddr := addrs.RootModuleInstance.ProviderConfigDefault(providerFqn) + key := defaultAddr.String() + provider := m[key] + + if provider != nil { + // There's already an explicit default configuration for this + // provider type in the root module, so we have nothing to do. + continue + } + + log.Printf("[DEBUG] adding implicit provider configuration %s, implied first by %s", defaultAddr, dag.VertexName(v)) + + // create the missing top-level provider + provider = t.Concrete(&NodeAbstractProvider{ + Addr: defaultAddr, + }).(GraphNodeProvider) + + g.Add(provider) + m[key] = provider + } + + return err +} + +// PruneProviderTransformer removes any providers that are not actually used by +// anything, and provider proxies. This avoids the provider being initialized +// and configured. This both saves resources but also avoids errors since +// configuration may imply initialization which may require auth. +type PruneProviderTransformer struct{} + +func (t *PruneProviderTransformer) Transform(g *Graph) error { + for _, v := range g.Vertices() { + // We only care about providers + _, ok := v.(GraphNodeProvider) + if !ok { + continue + } + + // ProxyProviders will have up edges, but we're now done with them in the graph + if _, ok := v.(*graphNodeProxyProvider); ok { + log.Printf("[DEBUG] pruning proxy %s", dag.VertexName(v)) + g.Remove(v) + } + + // Remove providers with no dependencies. + if g.UpEdges(v).Len() == 0 { + log.Printf("[DEBUG] pruning unused %s", dag.VertexName(v)) + g.Remove(v) + } + } + + return nil +} + +func providerVertexMap(g *Graph) map[string]GraphNodeProvider { + m := make(map[string]GraphNodeProvider) + for _, v := range g.Vertices() { + if pv, ok := v.(GraphNodeProvider); ok { + addr := pv.ProviderAddr() + m[addr.String()] = pv + } + } + + return m +} + +type graphNodeCloseProvider struct { + Addr addrs.AbsProviderConfig +} + +var ( + _ GraphNodeCloseProvider = (*graphNodeCloseProvider)(nil) + _ GraphNodeExecutable = (*graphNodeCloseProvider)(nil) +) + +func (n *graphNodeCloseProvider) Name() string { + return n.Addr.String() + " (close)" +} + +// GraphNodeModulePath +func (n *graphNodeCloseProvider) ModulePath() addrs.Module { + return n.Addr.Module +} + +// GraphNodeExecutable impl. +func (n *graphNodeCloseProvider) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { + return diags.Append(ctx.CloseProvider(n.Addr)) +} + +func (n *graphNodeCloseProvider) CloseProviderAddr() addrs.AbsProviderConfig { + return n.Addr +} + +// GraphNodeDotter impl. +func (n *graphNodeCloseProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { + if !opts.Verbose { + return nil + } + return &dag.DotNode{ + Name: name, + Attrs: map[string]string{ + "label": n.Name(), + "shape": "diamond", + }, + } +} + +// graphNodeProxyProvider is a GraphNodeProvider implementation that is used to +// store the name and value of a provider node for inheritance between modules. +// These nodes are only used to store the data while loading the provider +// configurations, and are removed after all the resources have been connected +// to their providers. +type graphNodeProxyProvider struct { + addr addrs.AbsProviderConfig + target GraphNodeProvider +} + +var ( + _ GraphNodeModulePath = (*graphNodeProxyProvider)(nil) + _ GraphNodeProvider = (*graphNodeProxyProvider)(nil) +) + +func (n *graphNodeProxyProvider) ProviderAddr() addrs.AbsProviderConfig { + return n.addr +} + +func (n *graphNodeProxyProvider) ModulePath() addrs.Module { + return n.addr.Module +} + +func (n *graphNodeProxyProvider) Name() string { + return n.addr.String() + " (proxy)" +} + +// find the concrete provider instance +func (n *graphNodeProxyProvider) Target() GraphNodeProvider { + switch t := n.target.(type) { + case *graphNodeProxyProvider: + return t.Target() + default: + return n.target + } +} + +// ProviderConfigTransformer adds all provider nodes from the configuration and +// attaches the configs. +type ProviderConfigTransformer struct { + Concrete ConcreteProviderNodeFunc + + // each provider node is stored here so that the proxy nodes can look up + // their targets by name. + providers map[string]GraphNodeProvider + // record providers that can be overriden with a proxy + proxiable map[string]bool + + // Config is the root node of the configuration tree to add providers from. + Config *configs.Config +} + +func (t *ProviderConfigTransformer) Transform(g *Graph) error { + // If no configuration is given, we don't do anything + if t.Config == nil { + return nil + } + + t.providers = make(map[string]GraphNodeProvider) + t.proxiable = make(map[string]bool) + + // Start the transformation process + if err := t.transform(g, t.Config); err != nil { + return err + } + + // finally attach the configs to the new nodes + return t.attachProviderConfigs(g) +} + +func (t *ProviderConfigTransformer) transform(g *Graph, c *configs.Config) error { + // If no config, do nothing + if c == nil { + return nil + } + + // Add our resources + if err := t.transformSingle(g, c); err != nil { + return err + } + + // Transform all the children. + for _, cc := range c.Children { + if err := t.transform(g, cc); err != nil { + return err + } + } + return nil +} + +func (t *ProviderConfigTransformer) transformSingle(g *Graph, c *configs.Config) error { + // Get the module associated with this configuration tree node + mod := c.Module + path := c.Path + + // If this is the root module, we can add nodes for required providers that + // have no configuration, equivalent to having an empty configuration + // block. This will ensure that a provider node exists for modules to + // access when passing around configuration and inheritance. + if path.IsRoot() && c.Module.ProviderRequirements != nil { + for name, p := range c.Module.ProviderRequirements.RequiredProviders { + if _, configured := mod.ProviderConfigs[name]; configured { + continue + } + + addr := addrs.AbsProviderConfig{ + Provider: p.Type, + Module: path, + } + + if _, ok := t.providers[addr.String()]; ok { + // The config validation warns about this too, but we can't + // completely prevent it in v1. + log.Printf("[WARN] ProviderConfigTransformer: duplicate required_providers entry for %s", addr) + continue + } + + abstract := &NodeAbstractProvider{ + Addr: addr, + } + + var v dag.Vertex + if t.Concrete != nil { + v = t.Concrete(abstract) + } else { + v = abstract + } + + g.Add(v) + t.providers[addr.String()] = v.(GraphNodeProvider) + } + } + + // add all providers from the configuration + for _, p := range mod.ProviderConfigs { + fqn := mod.ProviderForLocalConfig(p.Addr()) + addr := addrs.AbsProviderConfig{ + Provider: fqn, + Alias: p.Alias, + Module: path, + } + + if _, ok := t.providers[addr.String()]; ok { + // The abstract provider node may already have been added from the + // provider requirements. + log.Printf("[WARN] ProviderConfigTransformer: provider node %s already added", addr) + continue + } + + abstract := &NodeAbstractProvider{ + Addr: addr, + } + var v dag.Vertex + if t.Concrete != nil { + v = t.Concrete(abstract) + } else { + v = abstract + } + + // Add it to the graph + g.Add(v) + key := addr.String() + t.providers[key] = v.(GraphNodeProvider) + + // While deprecated, we still accept empty configuration blocks within + // modules as being a possible proxy for passed configuration. + if !path.IsRoot() { + // A provider configuration is "proxyable" if its configuration is + // entirely empty. This means it's standing in for a provider + // configuration that must be passed in from the parent module. + // We decide this by evaluating the config with an empty schema; + // if this succeeds, then we know there's nothing in the body. + _, diags := p.Config.Content(&hcl.BodySchema{}) + t.proxiable[key] = !diags.HasErrors() + } + } + + // Now replace the provider nodes with proxy nodes if a provider was being + // passed in, and create implicit proxies if there was no config. Any extra + // proxies will be removed in the prune step. + return t.addProxyProviders(g, c) +} + +func (t *ProviderConfigTransformer) addProxyProviders(g *Graph, c *configs.Config) error { + path := c.Path + + // can't add proxies at the root + if path.IsRoot() { + return nil + } + + parentPath, callAddr := path.Call() + parent := c.Parent + if parent == nil { + return nil + } + + callName := callAddr.Name + var parentCfg *configs.ModuleCall + for name, mod := range parent.Module.ModuleCalls { + if name == callName { + parentCfg = mod + break + } + } + + if parentCfg == nil { + // this can't really happen during normal execution. + return fmt.Errorf("parent module config not found for %s", c.Path.String()) + } + + // Go through all the providers the parent is passing in, and add proxies to + // the parent provider nodes. + for _, pair := range parentCfg.Providers { + fqn := c.Module.ProviderForLocalConfig(pair.InChild.Addr()) + fullAddr := addrs.AbsProviderConfig{ + Provider: fqn, + Module: path, + Alias: pair.InChild.Addr().Alias, + } + + fullParentAddr := addrs.AbsProviderConfig{ + Provider: fqn, + Module: parentPath, + Alias: pair.InParent.Addr().Alias, + } + + fullName := fullAddr.String() + fullParentName := fullParentAddr.String() + + parentProvider := t.providers[fullParentName] + + if parentProvider == nil { + return fmt.Errorf("missing provider %s", fullParentName) + } + + proxy := &graphNodeProxyProvider{ + addr: fullAddr, + target: parentProvider, + } + + concreteProvider := t.providers[fullName] + + // replace the concrete node with the provider passed in only if it is + // proxyable + if concreteProvider != nil { + if t.proxiable[fullName] { + g.Replace(concreteProvider, proxy) + t.providers[fullName] = proxy + } + continue + } + + // There was no concrete provider, so add this as an implicit provider. + // The extra proxy will be pruned later if it's unused. + g.Add(proxy) + t.providers[fullName] = proxy + } + + return nil +} + +func (t *ProviderConfigTransformer) attachProviderConfigs(g *Graph) error { + for _, v := range g.Vertices() { + // Only care about GraphNodeAttachProvider implementations + apn, ok := v.(GraphNodeAttachProvider) + if !ok { + continue + } + + // Determine what we're looking for + addr := apn.ProviderAddr() + + // Get the configuration. + mc := t.Config.Descendent(addr.Module) + if mc == nil { + log.Printf("[TRACE] ProviderConfigTransformer: no configuration available for %s", addr.String()) + continue + } + + // Find the localName for the provider fqn + localName := mc.Module.LocalNameForProvider(addr.Provider) + + // Go through the provider configs to find the matching config + for _, p := range mc.Module.ProviderConfigs { + if p.Name == localName && p.Alias == addr.Alias { + log.Printf("[TRACE] ProviderConfigTransformer: attaching to %q provider configuration from %s", dag.VertexName(v), p.DeclRange) + apn.AttachProvider(p) + break + } + } + } + + return nil +} diff --git a/pkg/tofu/transform_provider_test.go b/pkg/tofu/transform_provider_test.go new file mode 100644 index 00000000000..a2d85e983c7 --- /dev/null +++ b/pkg/tofu/transform_provider_test.go @@ -0,0 +1,520 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/dag" +) + +func testProviderTransformerGraph(t *testing.T, cfg *configs.Config) *Graph { + t.Helper() + + g := &Graph{Path: addrs.RootModuleInstance} + ct := &ConfigTransformer{Config: cfg} + if err := ct.Transform(g); err != nil { + t.Fatal(err) + } + arct := &AttachResourceConfigTransformer{Config: cfg} + if err := arct.Transform(g); err != nil { + t.Fatal(err) + } + + return g +} + +// This variant exists purely for testing and can not currently include the ProviderFunctionTransformer +func testTransformProviders(concrete ConcreteProviderNodeFunc, config *configs.Config) GraphTransformer { + return GraphTransformMulti( + // Add providers from the config + &ProviderConfigTransformer{ + Config: config, + Concrete: concrete, + }, + // Add any remaining missing providers + &MissingProviderTransformer{ + Config: config, + Concrete: concrete, + }, + // Connect the providers + &ProviderTransformer{ + Config: config, + }, + // After schema transformer, we can add function references + // &ProviderFunctionTransformer{Config: config}, + // Remove unused providers and proxies + &PruneProviderTransformer{}, + ) +} + +func TestProviderTransformer(t *testing.T) { + mod := testModule(t, "transform-provider-basic") + + g := testProviderTransformerGraph(t, mod) + { + transform := &MissingProviderTransformer{} + if err := transform.Transform(g); err != nil { + t.Fatalf("err: %s", err) + } + } + + transform := &ProviderTransformer{} + if err := transform.Transform(g); err != nil { + t.Fatalf("err: %s", err) + } + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(testTransformProviderBasicStr) + if actual != expected { + t.Fatalf("bad:\n\n%s", actual) + } +} + +// Test providers with FQNs that do not match the typeName +func TestProviderTransformer_fqns(t *testing.T) { + for _, mod := range []string{"fqns", "fqns-module"} { + mod := testModule(t, fmt.Sprintf("transform-provider-%s", mod)) + + g := testProviderTransformerGraph(t, mod) + { + transform := &MissingProviderTransformer{Config: mod} + if err := transform.Transform(g); err != nil { + t.Fatalf("err: %s", err) + } + } + + transform := &ProviderTransformer{Config: mod} + if err := transform.Transform(g); err != nil { + t.Fatalf("err: %s", err) + } + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(testTransformProviderBasicStr) + if actual != expected { + t.Fatalf("bad:\n\n%s", actual) + } + } +} + +func TestCloseProviderTransformer(t *testing.T) { + mod := testModule(t, "transform-provider-basic") + g := testProviderTransformerGraph(t, mod) + + { + transform := &MissingProviderTransformer{} + if err := transform.Transform(g); err != nil { + t.Fatalf("err: %s", err) + } + } + + { + transform := &ProviderTransformer{} + if err := transform.Transform(g); err != nil { + t.Fatalf("err: %s", err) + } + } + + { + transform := &CloseProviderTransformer{} + if err := transform.Transform(g); err != nil { + t.Fatalf("err: %s", err) + } + } + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(testTransformCloseProviderBasicStr) + if actual != expected { + t.Fatalf("bad:\n\n%s", actual) + } +} + +func TestCloseProviderTransformer_withTargets(t *testing.T) { + mod := testModule(t, "transform-provider-basic") + + g := testProviderTransformerGraph(t, mod) + transforms := []GraphTransformer{ + &MissingProviderTransformer{}, + &ProviderTransformer{}, + &CloseProviderTransformer{}, + &TargetsTransformer{ + Targets: []addrs.Targetable{ + addrs.RootModuleInstance.Resource( + addrs.ManagedResourceMode, "something", "else", + ), + }, + }, + } + + for _, tr := range transforms { + if err := tr.Transform(g); err != nil { + t.Fatalf("err: %s", err) + } + } + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(``) + if actual != expected { + t.Fatalf("expected:%s\n\ngot:\n\n%s", expected, actual) + } +} + +func TestMissingProviderTransformer(t *testing.T) { + mod := testModule(t, "transform-provider-missing") + + g := testProviderTransformerGraph(t, mod) + { + transform := &MissingProviderTransformer{} + if err := transform.Transform(g); err != nil { + t.Fatalf("err: %s", err) + } + } + + { + transform := &ProviderTransformer{} + if err := transform.Transform(g); err != nil { + t.Fatalf("err: %s", err) + } + } + + { + transform := &CloseProviderTransformer{} + if err := transform.Transform(g); err != nil { + t.Fatalf("err: %s", err) + } + } + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(testTransformMissingProviderBasicStr) + if actual != expected { + t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual) + } +} + +func TestMissingProviderTransformer_grandchildMissing(t *testing.T) { + mod := testModule(t, "transform-provider-missing-grandchild") + + concrete := func(a *NodeAbstractProvider) dag.Vertex { return a } + + g := testProviderTransformerGraph(t, mod) + { + transform := testTransformProviders(concrete, mod) + if err := transform.Transform(g); err != nil { + t.Fatalf("err: %s", err) + } + } + { + transform := &TransitiveReductionTransformer{} + if err := transform.Transform(g); err != nil { + t.Fatalf("err: %s", err) + } + } + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(testTransformMissingGrandchildProviderStr) + if actual != expected { + t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual) + } +} + +func TestPruneProviderTransformer(t *testing.T) { + mod := testModule(t, "transform-provider-prune") + + g := testProviderTransformerGraph(t, mod) + { + transform := &MissingProviderTransformer{} + if err := transform.Transform(g); err != nil { + t.Fatalf("err: %s", err) + } + } + + { + transform := &ProviderTransformer{} + if err := transform.Transform(g); err != nil { + t.Fatalf("err: %s", err) + } + } + + { + transform := &CloseProviderTransformer{} + if err := transform.Transform(g); err != nil { + t.Fatalf("err: %s", err) + } + } + + { + transform := &PruneProviderTransformer{} + if err := transform.Transform(g); err != nil { + t.Fatalf("err: %s", err) + } + } + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(testTransformPruneProviderBasicStr) + if actual != expected { + t.Fatalf("bad:\n\n%s", actual) + } +} + +// the child module resource is attached to the configured parent provider +func TestProviderConfigTransformer_parentProviders(t *testing.T) { + mod := testModule(t, "transform-provider-inherit") + concrete := func(a *NodeAbstractProvider) dag.Vertex { return a } + + g := testProviderTransformerGraph(t, mod) + { + tf := testTransformProviders(concrete, mod) + if err := tf.Transform(g); err != nil { + t.Fatalf("err: %s", err) + } + } + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(testTransformModuleProviderConfigStr) + if actual != expected { + t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual) + } +} + +// the child module resource is attached to the configured grand-parent provider +func TestProviderConfigTransformer_grandparentProviders(t *testing.T) { + mod := testModule(t, "transform-provider-grandchild-inherit") + concrete := func(a *NodeAbstractProvider) dag.Vertex { return a } + + g := testProviderTransformerGraph(t, mod) + { + tf := testTransformProviders(concrete, mod) + if err := tf.Transform(g); err != nil { + t.Fatalf("err: %s", err) + } + } + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(testTransformModuleProviderGrandparentStr) + if actual != expected { + t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual) + } +} + +func TestProviderConfigTransformer_inheritOldSkool(t *testing.T) { + mod := testModuleInline(t, map[string]string{ + "main.tf": ` +provider "test" { + test_string = "config" +} + +module "moda" { + source = "./moda" +} +`, + + "moda/main.tf": ` +resource "test_object" "a" { +} +`, + }) + concrete := func(a *NodeAbstractProvider) dag.Vertex { return a } + + g := testProviderTransformerGraph(t, mod) + { + tf := testTransformProviders(concrete, mod) + if err := tf.Transform(g); err != nil { + t.Fatalf("err: %s", err) + } + } + + expected := `module.moda.test_object.a + provider["registry.opentofu.org/hashicorp/test"] +provider["registry.opentofu.org/hashicorp/test"]` + + actual := strings.TrimSpace(g.String()) + if actual != expected { + t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual) + } +} + +// Verify that configurations which are not recommended yet supported still work +func TestProviderConfigTransformer_nestedModuleProviders(t *testing.T) { + mod := testModuleInline(t, map[string]string{ + "main.tf": ` +terraform { + required_providers { + test = { + source = "registry.opentofu.org/hashicorp/test" + } + } +} + +provider "test" { + alias = "z" + test_string = "config" +} + +module "moda" { + source = "./moda" + providers = { + test.x = test.z + } +} +`, + + "moda/main.tf": ` +terraform { + required_providers { + test = { + source = "registry.opentofu.org/hashicorp/test" + configuration_aliases = [ test.x ] + } + } +} + +provider "test" { + test_string = "config" +} + +// this should connect to this module's provider +resource "test_object" "a" { +} + +resource "test_object" "x" { + provider = test.x +} + +module "modb" { + source = "./modb" +} +`, + + "moda/modb/main.tf": ` +# this should end up with the provider from the parent module +resource "test_object" "a" { +} +`, + }) + concrete := func(a *NodeAbstractProvider) dag.Vertex { return a } + + g := testProviderTransformerGraph(t, mod) + { + tf := testTransformProviders(concrete, mod) + if err := tf.Transform(g); err != nil { + t.Fatalf("err: %s", err) + } + } + + expected := `module.moda.module.modb.test_object.a + module.moda.provider["registry.opentofu.org/hashicorp/test"] +module.moda.provider["registry.opentofu.org/hashicorp/test"] +module.moda.test_object.a + module.moda.provider["registry.opentofu.org/hashicorp/test"] +module.moda.test_object.x + provider["registry.opentofu.org/hashicorp/test"].z +provider["registry.opentofu.org/hashicorp/test"].z` + + actual := strings.TrimSpace(g.String()) + if actual != expected { + t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual) + } +} + +func TestProviderConfigTransformer_duplicateLocalName(t *testing.T) { + mod := testModuleInline(t, map[string]string{ + "main.tf": ` +terraform { + required_providers { + # We have to allow this since it wasn't previously prevented. If the + # default config is equivalent to the provider config, the user may never + # see an error. + dupe = { + source = "registry.opentofu.org/hashicorp/test" + } + } +} + +provider "test" { +} +`}) + concrete := func(a *NodeAbstractProvider) dag.Vertex { return a } + + g := testProviderTransformerGraph(t, mod) + tf := ProviderConfigTransformer{ + Config: mod, + Concrete: concrete, + } + if err := tf.Transform(g); err != nil { + t.Fatalf("err: %s", err) + } + + expected := `provider["registry.opentofu.org/hashicorp/test"]` + + actual := strings.TrimSpace(g.String()) + if actual != expected { + t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual) + } +} + +const testTransformProviderBasicStr = ` +aws_instance.web + provider["registry.opentofu.org/hashicorp/aws"] +provider["registry.opentofu.org/hashicorp/aws"] +` + +const testTransformCloseProviderBasicStr = ` +aws_instance.web + provider["registry.opentofu.org/hashicorp/aws"] +provider["registry.opentofu.org/hashicorp/aws"] +provider["registry.opentofu.org/hashicorp/aws"] (close) + aws_instance.web + provider["registry.opentofu.org/hashicorp/aws"] +` + +const testTransformMissingProviderBasicStr = ` +aws_instance.web + provider["registry.opentofu.org/hashicorp/aws"] +foo_instance.web + provider["registry.opentofu.org/hashicorp/foo"] +provider["registry.opentofu.org/hashicorp/aws"] +provider["registry.opentofu.org/hashicorp/aws"] (close) + aws_instance.web + provider["registry.opentofu.org/hashicorp/aws"] +provider["registry.opentofu.org/hashicorp/foo"] +provider["registry.opentofu.org/hashicorp/foo"] (close) + foo_instance.web + provider["registry.opentofu.org/hashicorp/foo"] +` + +const testTransformMissingGrandchildProviderStr = ` +module.sub.module.subsub.bar_instance.two + provider["registry.opentofu.org/hashicorp/bar"] +module.sub.module.subsub.foo_instance.one + module.sub.provider["registry.opentofu.org/hashicorp/foo"] +module.sub.provider["registry.opentofu.org/hashicorp/foo"] +provider["registry.opentofu.org/hashicorp/bar"] +` + +const testTransformPruneProviderBasicStr = ` +foo_instance.web + provider["registry.opentofu.org/hashicorp/foo"] +provider["registry.opentofu.org/hashicorp/foo"] +provider["registry.opentofu.org/hashicorp/foo"] (close) + foo_instance.web + provider["registry.opentofu.org/hashicorp/foo"] +` + +const testTransformModuleProviderConfigStr = ` +module.child.aws_instance.thing + provider["registry.opentofu.org/hashicorp/aws"].foo +provider["registry.opentofu.org/hashicorp/aws"].foo +` + +const testTransformModuleProviderGrandparentStr = ` +module.child.module.grandchild.aws_instance.baz + provider["registry.opentofu.org/hashicorp/aws"].foo +provider["registry.opentofu.org/hashicorp/aws"].foo +` diff --git a/pkg/tofu/transform_provisioner.go b/pkg/tofu/transform_provisioner.go new file mode 100644 index 00000000000..0202c95eaeb --- /dev/null +++ b/pkg/tofu/transform_provisioner.go @@ -0,0 +1,13 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +// GraphNodeProvisionerConsumer is an interface that nodes that require +// a provisioner must implement. ProvisionedBy must return the names of the +// provisioners to use. +type GraphNodeProvisionerConsumer interface { + ProvisionedBy() []string +} diff --git a/pkg/tofu/transform_reference.go b/pkg/tofu/transform_reference.go new file mode 100644 index 00000000000..d471782f424 --- /dev/null +++ b/pkg/tofu/transform_reference.go @@ -0,0 +1,590 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "log" + "sort" + + "github.com/hashicorp/hcl/v2" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/dag" + "github.com/kubegems/opentofu/pkg/lang" +) + +// GraphNodeReferenceable must be implemented by any node that represents +// a OpenTofu thing that can be referenced (resource, module, etc.). +// +// Even if the thing has no name, this should return an empty list. By +// implementing this and returning a non-nil result, you say that this CAN +// be referenced and other methods of referencing may still be possible (such +// as by path!) +type GraphNodeReferenceable interface { + GraphNodeModulePath + + // ReferenceableAddrs returns a list of addresses through which this can be + // referenced. + ReferenceableAddrs() []addrs.Referenceable +} + +// GraphNodeReferencer must be implemented by nodes that reference other +// OpenTofu items and therefore depend on them. +type GraphNodeReferencer interface { + GraphNodeModulePath + + // References returns a list of references made by this node, which + // include both a referenced address and source location information for + // the reference. + References() []*addrs.Reference +} + +// GraphNodeRootReferencer is implemented by nodes that reference the root +// module, for example module imports +type GraphNodeRootReferencer interface { + GraphNodeReferencer + + RootReferences() []*addrs.Reference +} + +type GraphNodeAttachDependencies interface { + GraphNodeConfigResource + AttachDependencies([]addrs.ConfigResource) +} + +// graphNodeDependsOn is implemented by resources that need to expose any +// references set via DependsOn in their configuration. +type graphNodeDependsOn interface { + GraphNodeReferencer + DependsOn() []*addrs.Reference +} + +// graphNodeAttachDataResourceDependsOn records all resources that are transitively +// referenced through depends_on in the configuration. This is used by data +// resources to determine if they can be read during the plan, or if they need +// to be further delayed until apply. +// We can only use an addrs.ConfigResource address here, because modules are +// not yet expended in the graph. While this will cause some extra data +// resources to show in the plan when their depends_on references may be in +// unrelated module instances, the fact that it only happens when there are any +// resource updates pending means we can still avoid the problem of the +// "perpetual diff" +type graphNodeAttachDataResourceDependsOn interface { + GraphNodeConfigResource + graphNodeDependsOn + + // AttachDataResourceDependsOn stores the discovered dependencies in the + // resource node for evaluation later. + // + // The force parameter indicates that even if there are no dependencies, + // force the data source to act as though there are for refresh purposes. + // This is needed because yet-to-be-created resources won't be in the + // initial refresh graph, but may still be referenced through depends_on. + AttachDataResourceDependsOn(deps []addrs.ConfigResource, force bool) +} + +// GraphNodeReferenceOutside is an interface that can optionally be implemented. +// A node that implements it can specify that its own referenceable addresses +// and/or the addresses it references are in a different module than the +// node itself. +// +// Any referenceable addresses returned by ReferenceableAddrs are interpreted +// relative to the returned selfPath. +// +// Any references returned by References are interpreted relative to the +// returned referencePath. +// +// It is valid but not required for either of these paths to match what is +// returned by method Path, though if both match the main Path then there +// is no reason to implement this method. +// +// The primary use-case for this is the nodes representing module input +// variables, since their expressions are resolved in terms of their calling +// module, but they are still referenced from their own module. +type GraphNodeReferenceOutside interface { + // ReferenceOutside returns a path in which any references from this node + // are resolved. + ReferenceOutside() (selfPath, referencePath addrs.Module) +} + +// ReferenceTransformer is a GraphTransformer that connects all the +// nodes that reference each other in order to form the proper ordering. +type ReferenceTransformer struct{} + +func (t *ReferenceTransformer) Transform(g *Graph) error { + // Build a reference map so we can efficiently look up the references + vs := g.Vertices() + m := NewReferenceMap(vs) + + // Find the things that reference things and connect them + for _, v := range vs { + if _, ok := v.(GraphNodeDestroyer); ok { + // destroy nodes references are not connected, since they can only + // use their own state. + continue + } + + parents := m.References(v) + parentsDbg := make([]string, len(parents)) + for i, v := range parents { + parentsDbg[i] = dag.VertexName(v) + } + log.Printf( + "[DEBUG] ReferenceTransformer: %q references: %v", + dag.VertexName(v), parentsDbg) + + for _, parent := range parents { + // A destroy plan relies solely on the state, so we only need to + // ensure that temporary values are connected to get the evaluation + // order correct. Any references to destroy nodes will cause + // cycles, because they are connected in reverse order. + if _, ok := parent.(GraphNodeDestroyer); ok { + continue + } + + if !graphNodesAreResourceInstancesInDifferentInstancesOfSameModule(v, parent) { + g.Connect(dag.BasicEdge(v, parent)) + } else { + log.Printf("[TRACE] ReferenceTransformer: skipping %s => %s inter-module-instance dependency", dag.VertexName(v), dag.VertexName(parent)) + } + } + + if len(parents) > 0 { + continue + } + } + + return nil +} + +type depMap map[string]addrs.ConfigResource + +// add stores the vertex if it represents a resource in the +// graph. +func (m depMap) add(v dag.Vertex) { + // we're only concerned with resources which may have changes that + // need to be applied. + switch v := v.(type) { + case GraphNodeResourceInstance: + instAddr := v.ResourceInstanceAddr() + addr := instAddr.ContainingResource().Config() + m[addr.String()] = addr + case GraphNodeConfigResource: + addr := v.ResourceAddr() + m[addr.String()] = addr + } +} + +// attachDataResourceDependsOnTransformer records all resources transitively +// referenced through a configuration depends_on. +type attachDataResourceDependsOnTransformer struct { +} + +func (t attachDataResourceDependsOnTransformer) Transform(g *Graph) error { + // First we need to make a map of referenceable addresses to their vertices. + // This is very similar to what's done in ReferenceTransformer, but we keep + // implementation separate as they may need to change independently. + vertices := g.Vertices() + refMap := NewReferenceMap(vertices) + + for _, v := range vertices { + depender, ok := v.(graphNodeAttachDataResourceDependsOn) + if !ok { + continue + } + + // Only data need to attach depends_on, so they can determine if they + // are eligible to be read during plan. + if depender.ResourceAddr().Resource.Mode != addrs.DataResourceMode { + continue + } + + // depMap will only add resource references then dedupe + deps := make(depMap) + dependsOnDeps, fromModule := refMap.dependsOn(g, depender) + for _, dep := range dependsOnDeps { + // any the dependency + deps.add(dep) + } + + res := make([]addrs.ConfigResource, 0, len(deps)) + for _, d := range deps { + res = append(res, d) + } + + log.Printf("[TRACE] attachDataDependenciesTransformer: %s depends on %s", depender.ResourceAddr(), res) + depender.AttachDataResourceDependsOn(res, fromModule) + } + + return nil +} + +// AttachDependenciesTransformer records all resource dependencies for each +// instance, and attaches the addresses to the node itself. Managed resource +// will record these in the state for proper ordering of destroy operations. +type AttachDependenciesTransformer struct { +} + +func (t AttachDependenciesTransformer) Transform(g *Graph) error { + for _, v := range g.Vertices() { + attacher, ok := v.(GraphNodeAttachDependencies) + if !ok { + continue + } + selfAddr := attacher.ResourceAddr() + + ans, err := g.Ancestors(v) + if err != nil { + return err + } + + // dedupe addrs when there's multiple instances involved, or + // multiple paths in the un-reduced graph + depMap := map[string]addrs.ConfigResource{} + for _, d := range ans { + var addr addrs.ConfigResource + + switch d := d.(type) { + case GraphNodeResourceInstance: + instAddr := d.ResourceInstanceAddr() + addr = instAddr.ContainingResource().Config() + case GraphNodeConfigResource: + addr = d.ResourceAddr() + default: + continue + } + + if addr.Equal(selfAddr) { + continue + } + depMap[addr.String()] = addr + } + + deps := make([]addrs.ConfigResource, 0, len(depMap)) + for _, d := range depMap { + deps = append(deps, d) + } + sort.Slice(deps, func(i, j int) bool { + return deps[i].String() < deps[j].String() + }) + + log.Printf("[TRACE] AttachDependenciesTransformer: %s depends on %s", attacher.ResourceAddr(), deps) + attacher.AttachDependencies(deps) + } + + return nil +} + +func isDependableResource(v dag.Vertex) bool { + switch v.(type) { + case GraphNodeResourceInstance: + return true + case GraphNodeConfigResource: + return true + } + return false +} + +// ReferenceMap is a structure that can be used to efficiently check +// for references on a graph, mapping internal reference keys (as produced by +// the mapKey method) to one or more vertices that are identified by each key. +type ReferenceMap map[string][]dag.Vertex + +// References returns the set of vertices that the given vertex refers to, +// and any referenced addresses that do not have corresponding vertices. +func (m ReferenceMap) References(v dag.Vertex) []dag.Vertex { + rn, ok := v.(GraphNodeReferencer) + if !ok { + return nil + } + + var matches []dag.Vertex + + if rrn, ok := rn.(GraphNodeRootReferencer); ok { + for _, ref := range rrn.RootReferences() { + matches = append(matches, m.addReference(addrs.RootModule, v, ref)...) + } + } + + for _, ref := range rn.References() { + matches = append(matches, m.addReference(vertexReferencePath(v), v, ref)...) + } + + return matches +} + +// addReferences returns the set of vertices that the given reference requires +// within a given module. It additionally excludes the current vertex. +func (m ReferenceMap) addReference(path addrs.Module, current dag.Vertex, ref *addrs.Reference) []dag.Vertex { + var matches []dag.Vertex + + subject := ref.Subject + + key := m.mapKey(path, subject) + if _, exists := m[key]; !exists { + // If what we were looking for was a ResourceInstance then we + // might be in a resource-oriented graph rather than an + // instance-oriented graph, and so we'll see if we have the + // resource itself instead. + switch ri := subject.(type) { + case addrs.ResourceInstance: + subject = ri.ContainingResource() + case addrs.ResourceInstancePhase: + subject = ri.ContainingResource() + case addrs.ModuleCallInstanceOutput: + subject = ri.ModuleCallOutput() + case addrs.ModuleCallInstance: + subject = ri.Call + case addrs.ProviderFunction: + return nil + default: + log.Printf("[INFO] ReferenceTransformer: reference not found: %q", subject) + return nil + } + key = m.mapKey(path, subject) + } + vertices := m[key] + for _, rv := range vertices { + // don't include self-references + if rv == current { + continue + } + matches = append(matches, rv) + } + return matches +} + +// dependsOn returns the set of vertices that the given vertex refers to from +// the configured depends_on. The bool return value indicates if depends_on was +// found in a parent module configuration. +func (m ReferenceMap) dependsOn(g *Graph, depender graphNodeDependsOn) ([]dag.Vertex, bool) { + var res []dag.Vertex + fromModule := false + + refs := depender.DependsOn() + + // get any implied dependencies for data sources + refs = append(refs, m.dataDependsOn(depender)...) + + // This is where we record that a module has depends_on configured. + if _, ok := depender.(*nodeExpandModule); ok && len(refs) > 0 { + fromModule = true + } + + for _, ref := range refs { + subject := ref.Subject + + key := m.referenceMapKey(depender, subject) + vertices, ok := m[key] + if !ok { + // the ReferenceMap generates all possible keys, so any warning + // here is probably not useful for this implementation. + continue + } + for _, rv := range vertices { + // don't include self-references + if rv == depender { + continue + } + res = append(res, rv) + + // Check any ancestors for transitive dependencies when we're + // not pointed directly at a resource. We can't be much more + // precise here, since in order to maintain our guarantee that data + // sources will wait for explicit dependencies, if those dependencies + // happen to be a module, output, or variable, we have to find some + // upstream managed resource in order to check for a planned + // change. + if _, ok := rv.(GraphNodeConfigResource); !ok { + ans, _ := g.Ancestors(rv) + for _, v := range ans { + if isDependableResource(v) { + res = append(res, v) + } + } + } + } + } + + parentDeps, fromParentModule := m.parentModuleDependsOn(g, depender) + res = append(res, parentDeps...) + + return res, fromModule || fromParentModule +} + +// Return extra depends_on references if this is a data source. +// For data sources we implicitly treat references to managed resources as +// depends_on entries. If a data source references a managed resource, even if +// that reference is resolvable, it stands to reason that the user intends for +// the data source to require that resource in some way. +func (m ReferenceMap) dataDependsOn(depender graphNodeDependsOn) []*addrs.Reference { + var refs []*addrs.Reference + if n, ok := depender.(GraphNodeConfigResource); ok && + n.ResourceAddr().Resource.Mode == addrs.DataResourceMode { + for _, r := range depender.References() { + + var resAddr addrs.Resource + switch s := r.Subject.(type) { + case addrs.Resource: + resAddr = s + case addrs.ResourceInstance: + resAddr = s.Resource + r.Subject = resAddr + case addrs.ProviderFunction: + continue + } + + if resAddr.Mode != addrs.ManagedResourceMode { + // We only want to wait on directly referenced managed resources. + // Data sources have no external side effects, so normal + // references to them in the config will suffice for proper + // ordering. + continue + } + + refs = append(refs, r) + } + } + return refs +} + +// parentModuleDependsOn returns the set of vertices that a data sources parent +// module references through the module call's depends_on. The bool return +// value indicates if depends_on was found in a parent module configuration. +func (m ReferenceMap) parentModuleDependsOn(g *Graph, depender graphNodeDependsOn) ([]dag.Vertex, bool) { + var res []dag.Vertex + fromModule := false + + // Look for containing modules with DependsOn. + // This should be connected directly to the module node, so we only need to + // look one step away. + for _, v := range g.DownEdges(depender) { + // we're only concerned with module expansion nodes here. + mod, ok := v.(*nodeExpandModule) + if !ok { + continue + } + + deps, fromParentModule := m.dependsOn(g, mod) + for _, dep := range deps { + // add the dependency + res = append(res, dep) + + // and check any transitive resource dependencies for more resources + ans, _ := g.Ancestors(dep) + for _, v := range ans { + if isDependableResource(v) { + res = append(res, v) + } + } + } + fromModule = fromModule || fromParentModule + } + + return res, fromModule +} + +func (m *ReferenceMap) mapKey(path addrs.Module, addr addrs.Referenceable) string { + return fmt.Sprintf("%s|%s", path.String(), addr.String()) +} + +// vertexReferenceablePath returns the path in which the given vertex can be +// referenced. This is the path that its results from ReferenceableAddrs +// are considered to be relative to. +// +// Only GraphNodeModulePath implementations can be referenced, so this method will +// panic if the given vertex does not implement that interface. +func vertexReferenceablePath(v dag.Vertex) addrs.Module { + sp, ok := v.(GraphNodeModulePath) + if !ok { + // Only nodes with paths can participate in a reference map. + panic(fmt.Errorf("vertexMapKey on vertex type %T which doesn't implement GraphNodeModulePath", sp)) + } + + if outside, ok := v.(GraphNodeReferenceOutside); ok { + // Vertex is referenced from a different module than where it was + // declared. + path, _ := outside.ReferenceOutside() + return path + } + + // Vertex is referenced from the same module as where it was declared. + return sp.ModulePath() +} + +// vertexReferencePath returns the path in which references _from_ the given +// vertex must be interpreted. +// +// Only GraphNodeModulePath implementations can have references, so this method +// will panic if the given vertex does not implement that interface. +func vertexReferencePath(v dag.Vertex) addrs.Module { + sp, ok := v.(GraphNodeModulePath) + if !ok { + // Only nodes with paths can participate in a reference map. + panic(fmt.Errorf("vertexReferencePath on vertex type %T which doesn't implement GraphNodeModulePath", v)) + } + + if outside, ok := v.(GraphNodeReferenceOutside); ok { + // Vertex makes references to objects in a different module than where + // it was declared. + _, path := outside.ReferenceOutside() + return path + } + + // Vertex makes references to objects in the same module as where it + // was declared. + return sp.ModulePath() +} + +// referenceMapKey produces keys for the "edges" map. "referrer" is the vertex +// that the reference is from, and "addr" is the address of the object being +// referenced. +// +// The result is an opaque string that includes both the address of the given +// object and the address of the module instance that object belongs to. +// +// Only GraphNodeModulePath implementations can be referrers, so this method will +// panic if the given vertex does not implement that interface. +func (m *ReferenceMap) referenceMapKey(referrer dag.Vertex, addr addrs.Referenceable) string { + path := vertexReferencePath(referrer) + return m.mapKey(path, addr) +} + +// NewReferenceMap is used to create a new reference map for the +// given set of vertices. +func NewReferenceMap(vs []dag.Vertex) ReferenceMap { + // Build the lookup table + m := make(ReferenceMap) + for _, v := range vs { + // We're only looking for referenceable nodes + rn, ok := v.(GraphNodeReferenceable) + if !ok { + continue + } + + path := vertexReferenceablePath(v) + + // Go through and cache them + for _, addr := range rn.ReferenceableAddrs() { + key := m.mapKey(path, addr) + m[key] = append(m[key], v) + } + } + + return m +} + +// ReferencesFromConfig returns the references that a configuration has +// based on the interpolated variables in a configuration. +func ReferencesFromConfig(body hcl.Body, schema *configschema.Block) []*addrs.Reference { + if body == nil { + return nil + } + refs, _ := lang.ReferencesInBlock(addrs.ParseRef, body, schema) + return refs +} diff --git a/pkg/tofu/transform_reference_test.go b/pkg/tofu/transform_reference_test.go new file mode 100644 index 00000000000..da481382e9a --- /dev/null +++ b/pkg/tofu/transform_reference_test.go @@ -0,0 +1,324 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "reflect" + "sort" + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/dag" +) + +func TestReferenceTransformer_simple(t *testing.T) { + g := Graph{Path: addrs.RootModuleInstance} + g.Add(&graphNodeRefParentTest{ + NameValue: "A", + Names: []string{"A"}, + }) + g.Add(&graphNodeRefChildTest{ + NameValue: "B", + Refs: []string{"A"}, + }) + + tf := &ReferenceTransformer{} + if err := tf.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(testTransformRefBasicStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +func TestReferenceTransformer_self(t *testing.T) { + g := Graph{Path: addrs.RootModuleInstance} + g.Add(&graphNodeRefParentTest{ + NameValue: "A", + Names: []string{"A"}, + }) + g.Add(&graphNodeRefChildTest{ + NameValue: "B", + Refs: []string{"A", "B"}, + }) + + tf := &ReferenceTransformer{} + if err := tf.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(testTransformRefBasicStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +func TestReferenceTransformer_path(t *testing.T) { + g := Graph{Path: addrs.RootModuleInstance} + g.Add(&graphNodeRefParentTest{ + NameValue: "A", + Names: []string{"A"}, + }) + g.Add(&graphNodeRefChildTest{ + NameValue: "B", + Refs: []string{"A"}, + }) + g.Add(&graphNodeRefParentTest{ + NameValue: "child.A", + PathValue: addrs.ModuleInstance{addrs.ModuleInstanceStep{Name: "child"}}, + Names: []string{"A"}, + }) + g.Add(&graphNodeRefChildTest{ + NameValue: "child.B", + PathValue: addrs.ModuleInstance{addrs.ModuleInstanceStep{Name: "child"}}, + Refs: []string{"A"}, + }) + + tf := &ReferenceTransformer{} + if err := tf.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(testTransformRefPathStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +func TestReferenceTransformer_resourceInstances(t *testing.T) { + // Our reference analyses are all done based on unexpanded addresses + // so that we can use this transformer both in the plan graph (where things + // are not expanded yet) and the apply graph (where resource instances are + // pre-expanded but nothing else is.) + // However, that would make the result too conservative about instances + // of the same resource in different instances of the same module, so we + // make an exception for that situation in particular, keeping references + // between resource instances segregated by their containing module + // instance. + g := Graph{Path: addrs.RootModuleInstance} + moduleInsts := []addrs.ModuleInstance{ + { + { + Name: "foo", InstanceKey: addrs.IntKey(0), + }, + }, + { + { + Name: "foo", InstanceKey: addrs.IntKey(1), + }, + }, + } + resourceAs := make([]addrs.AbsResourceInstance, len(moduleInsts)) + for i, moduleInst := range moduleInsts { + resourceAs[i] = addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "thing", + Name: "a", + }.Instance(addrs.NoKey).Absolute(moduleInst) + } + resourceBs := make([]addrs.AbsResourceInstance, len(moduleInsts)) + for i, moduleInst := range moduleInsts { + resourceBs[i] = addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "thing", + Name: "b", + }.Instance(addrs.NoKey).Absolute(moduleInst) + } + g.Add(&graphNodeFakeResourceInstance{ + Addr: resourceAs[0], + }) + g.Add(&graphNodeFakeResourceInstance{ + Addr: resourceBs[0], + Refs: []*addrs.Reference{ + { + Subject: resourceAs[0].Resource, + }, + }, + }) + g.Add(&graphNodeFakeResourceInstance{ + Addr: resourceAs[1], + }) + g.Add(&graphNodeFakeResourceInstance{ + Addr: resourceBs[1], + Refs: []*addrs.Reference{ + { + Subject: resourceAs[1].Resource, + }, + }, + }) + + tf := &ReferenceTransformer{} + if err := tf.Transform(&g); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + // Resource B should be connected to resource A in each module instance, + // but there should be no connections between the two module instances. + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(` +module.foo[0].thing.a +module.foo[0].thing.b + module.foo[0].thing.a +module.foo[1].thing.a +module.foo[1].thing.b + module.foo[1].thing.a +`) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +func TestReferenceMapReferences(t *testing.T) { + cases := map[string]struct { + Nodes []dag.Vertex + Check dag.Vertex + Result []string + }{ + "simple": { + Nodes: []dag.Vertex{ + &graphNodeRefParentTest{ + NameValue: "A", + Names: []string{"A"}, + }, + }, + Check: &graphNodeRefChildTest{ + NameValue: "foo", + Refs: []string{"A"}, + }, + Result: []string{"A"}, + }, + } + + for tn, tc := range cases { + t.Run(tn, func(t *testing.T) { + rm := NewReferenceMap(tc.Nodes) + result := rm.References(tc.Check) + + var resultStr []string + for _, v := range result { + resultStr = append(resultStr, dag.VertexName(v)) + } + + sort.Strings(resultStr) + sort.Strings(tc.Result) + if !reflect.DeepEqual(resultStr, tc.Result) { + t.Fatalf("bad: %#v", resultStr) + } + }) + } +} + +type graphNodeRefParentTest struct { + NameValue string + PathValue addrs.ModuleInstance + Names []string +} + +var _ GraphNodeReferenceable = (*graphNodeRefParentTest)(nil) + +func (n *graphNodeRefParentTest) Name() string { + return n.NameValue +} + +func (n *graphNodeRefParentTest) ReferenceableAddrs() []addrs.Referenceable { + ret := make([]addrs.Referenceable, len(n.Names)) + for i, name := range n.Names { + ret[i] = addrs.LocalValue{Name: name} + } + return ret +} + +func (n *graphNodeRefParentTest) Path() addrs.ModuleInstance { + return n.PathValue +} + +func (n *graphNodeRefParentTest) ModulePath() addrs.Module { + return n.PathValue.Module() +} + +type graphNodeRefChildTest struct { + NameValue string + PathValue addrs.ModuleInstance + Refs []string +} + +var _ GraphNodeReferencer = (*graphNodeRefChildTest)(nil) + +func (n *graphNodeRefChildTest) Name() string { + return n.NameValue +} + +func (n *graphNodeRefChildTest) References() []*addrs.Reference { + ret := make([]*addrs.Reference, len(n.Refs)) + for i, name := range n.Refs { + ret[i] = &addrs.Reference{ + Subject: addrs.LocalValue{Name: name}, + } + } + return ret +} + +func (n *graphNodeRefChildTest) Path() addrs.ModuleInstance { + return n.PathValue +} + +func (n *graphNodeRefChildTest) ModulePath() addrs.Module { + return n.PathValue.Module() +} + +type graphNodeFakeResourceInstance struct { + Addr addrs.AbsResourceInstance + Refs []*addrs.Reference +} + +var _ GraphNodeResourceInstance = (*graphNodeFakeResourceInstance)(nil) +var _ GraphNodeReferenceable = (*graphNodeFakeResourceInstance)(nil) +var _ GraphNodeReferencer = (*graphNodeFakeResourceInstance)(nil) + +func (n *graphNodeFakeResourceInstance) ResourceInstanceAddr() addrs.AbsResourceInstance { + return n.Addr +} + +func (n *graphNodeFakeResourceInstance) ModulePath() addrs.Module { + return n.Addr.Module.Module() +} + +func (n *graphNodeFakeResourceInstance) ReferenceableAddrs() []addrs.Referenceable { + return []addrs.Referenceable{n.Addr.Resource} +} + +func (n *graphNodeFakeResourceInstance) References() []*addrs.Reference { + return n.Refs +} + +func (n *graphNodeFakeResourceInstance) StateDependencies() []addrs.ConfigResource { + return nil +} + +func (n *graphNodeFakeResourceInstance) String() string { + return n.Addr.String() +} + +const testTransformRefBasicStr = ` +A +B + A +` + +const testTransformRefPathStr = ` +A +B + A +child.A +child.B + child.A +` diff --git a/pkg/tofu/transform_removed_modules.go b/pkg/tofu/transform_removed_modules.go new file mode 100644 index 00000000000..df0adb51c6e --- /dev/null +++ b/pkg/tofu/transform_removed_modules.go @@ -0,0 +1,49 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "log" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/states" +) + +// RemovedModuleTransformer implements GraphTransformer to add nodes indicating +// when a module was removed from the configuration. +type RemovedModuleTransformer struct { + Config *configs.Config // root node in the config tree + State *states.State +} + +func (t *RemovedModuleTransformer) Transform(g *Graph) error { + // nothing to remove if there's no state! + if t.State == nil { + return nil + } + + removed := map[string]addrs.Module{} + + for _, m := range t.State.Modules { + cc := t.Config.DescendentForInstance(m.Addr) + if cc != nil { + continue + } + removed[m.Addr.Module().String()] = m.Addr.Module() + log.Printf("[DEBUG] %s is no longer in configuration\n", m.Addr) + } + + // add closers to collect any module instances we're removing + for _, modAddr := range removed { + closer := &nodeCloseModule{ + Addr: modAddr, + } + g.Add(closer) + } + + return nil +} diff --git a/pkg/tofu/transform_resource_count.go b/pkg/tofu/transform_resource_count.go new file mode 100644 index 00000000000..7cca5748190 --- /dev/null +++ b/pkg/tofu/transform_resource_count.go @@ -0,0 +1,41 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "log" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/dag" +) + +// ResourceCountTransformer is a GraphTransformer that expands the count +// out for a specific resource. +// +// This assumes that the count is already interpolated. +type ResourceCountTransformer struct { + Concrete ConcreteResourceInstanceNodeFunc + Schema *configschema.Block + + Addr addrs.ConfigResource + InstanceAddrs []addrs.AbsResourceInstance +} + +func (t *ResourceCountTransformer) Transform(g *Graph) error { + for _, addr := range t.InstanceAddrs { + abstract := NewNodeAbstractResourceInstance(addr) + abstract.Schema = t.Schema + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + + log.Printf("[TRACE] ResourceCountTransformer: adding %s as %T", addr, node) + g.Add(node) + } + return nil +} diff --git a/pkg/tofu/transform_root.go b/pkg/tofu/transform_root.go new file mode 100644 index 00000000000..db7f318d408 --- /dev/null +++ b/pkg/tofu/transform_root.go @@ -0,0 +1,92 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/dag" +) + +const rootNodeName = "root" + +// RootTransformer is a GraphTransformer that adds a root to the graph. +type RootTransformer struct{} + +func (t *RootTransformer) Transform(g *Graph) error { + addRootNodeToGraph(g) + return nil +} + +// addRootNodeToGraph modifies the given graph in-place so that it has a root +// node if it didn't already have one and so that any other node which doesn't +// already depend on something will depend on that root node. +// +// After this function returns, the graph will have only one node that doesn't +// depend on any other nodes. +func addRootNodeToGraph(g *Graph) { + // We always add the root node. This is a singleton so if it's already + // in the graph this will do nothing and just retain the existing root node. + // + // Note that rootNode is intentionally added by value and not by pointer + // so that all root nodes will be equal to one another and therefore + // coalesce when two valid graphs get merged together into a single graph. + g.Add(rootNode) + + // Everything that doesn't already depend on at least one other node will + // depend on the root node, except the root node itself. + for _, v := range g.Vertices() { + if v == dag.Vertex(rootNode) { + continue + } + + if g.UpEdges(v).Len() == 0 { + g.Connect(dag.BasicEdge(rootNode, v)) + } + } +} + +type graphNodeRoot struct{} + +// rootNode is the singleton value representing all root graph nodes. +// +// The root node for all graphs should be this value directly, and in particular +// _not_ a pointer to this value. Using the value directly here means that +// multiple root nodes will always coalesce together when subsuming one graph +// into another. +var rootNode graphNodeRoot + +func (n graphNodeRoot) Name() string { + return rootNodeName +} + +// CloseRootModuleTransformer is a GraphTransformer that adds a root to the graph. +type CloseRootModuleTransformer struct { + RootConfig *configs.Config +} + +func (t *CloseRootModuleTransformer) Transform(g *Graph) error { + // close the root module + closeRoot := &nodeCloseModule{ + RootConfig: t.RootConfig, + } + g.Add(closeRoot) + + // since this is closing the root module, make it depend on everything in + // the root module. + for _, v := range g.Vertices() { + if v == closeRoot { + continue + } + + // since this is closing the root module, and must be last, we can + // connect to anything that doesn't have any up edges. + if g.UpEdges(v).Len() == 0 { + g.Connect(dag.BasicEdge(closeRoot, v)) + } + } + + return nil +} diff --git a/pkg/tofu/transform_root_test.go b/pkg/tofu/transform_root_test.go new file mode 100644 index 00000000000..17fc8f5494b --- /dev/null +++ b/pkg/tofu/transform_root_test.go @@ -0,0 +1,100 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/addrs" +) + +func TestRootTransformer(t *testing.T) { + t.Run("many nodes", func(t *testing.T) { + mod := testModule(t, "transform-root-basic") + + g := Graph{Path: addrs.RootModuleInstance} + { + tf := &ConfigTransformer{Config: mod} + if err := tf.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + } + + { + transform := &MissingProviderTransformer{} + if err := transform.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + } + + { + transform := &ProviderTransformer{} + if err := transform.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + } + + { + transform := &RootTransformer{} + if err := transform.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + } + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(testTransformRootBasicStr) + if actual != expected { + t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) + } + + root, err := g.Root() + if err != nil { + t.Fatalf("err: %s", err) + } + if _, ok := root.(graphNodeRoot); !ok { + t.Fatalf("bad: %#v", root) + } + }) + + t.Run("only one initial node", func(t *testing.T) { + g := Graph{Path: addrs.RootModuleInstance} + g.Add("foo") + addRootNodeToGraph(&g) + got := strings.TrimSpace(g.String()) + want := strings.TrimSpace(` +foo +root + foo +`) + if got != want { + t.Errorf("wrong final graph\ngot:\n%s\nwant:\n%s", got, want) + } + }) + + t.Run("graph initially empty", func(t *testing.T) { + g := Graph{Path: addrs.RootModuleInstance} + addRootNodeToGraph(&g) + got := strings.TrimSpace(g.String()) + want := `root` + if got != want { + t.Errorf("wrong final graph\ngot:\n%s\nwant:\n%s", got, want) + } + }) + +} + +const testTransformRootBasicStr = ` +aws_instance.foo + provider["registry.opentofu.org/hashicorp/aws"] +do_droplet.bar + provider["registry.opentofu.org/hashicorp/do"] +provider["registry.opentofu.org/hashicorp/aws"] +provider["registry.opentofu.org/hashicorp/do"] +root + aws_instance.foo + do_droplet.bar +` diff --git a/pkg/tofu/transform_state.go b/pkg/tofu/transform_state.go new file mode 100644 index 00000000000..cd422ffeba6 --- /dev/null +++ b/pkg/tofu/transform_state.go @@ -0,0 +1,77 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "log" + + "github.com/kubegems/opentofu/pkg/states" +) + +// StateTransformer is a GraphTransformer that adds the elements of +// the state to the graph. +// +// This transform is used for example by the DestroyPlanGraphBuilder to ensure +// that only resources that are in the state are represented in the graph. +type StateTransformer struct { + // ConcreteCurrent and ConcreteDeposed are used to specialize the abstract + // resource instance nodes that this transformer will create. + // + // If either of these is nil, the objects of that type will be skipped and + // not added to the graph at all. It doesn't make sense to use this + // transformer without setting at least one of these, since that would + // skip everything and thus be a no-op. + ConcreteCurrent ConcreteResourceInstanceNodeFunc + ConcreteDeposed ConcreteResourceInstanceDeposedNodeFunc + + State *states.State +} + +func (t *StateTransformer) Transform(g *Graph) error { + if t.State == nil { + log.Printf("[TRACE] StateTransformer: state is nil, so nothing to do") + return nil + } + + switch { + case t.ConcreteCurrent != nil && t.ConcreteDeposed != nil: + log.Printf("[TRACE] StateTransformer: creating nodes for both current and deposed instance objects") + case t.ConcreteCurrent != nil: + log.Printf("[TRACE] StateTransformer: creating nodes for current instance objects only") + case t.ConcreteDeposed != nil: + log.Printf("[TRACE] StateTransformer: creating nodes for deposed instance objects only") + default: + log.Printf("[TRACE] StateTransformer: pointless no-op call, creating no nodes at all") + } + + for _, ms := range t.State.Modules { + for _, rs := range ms.Resources { + resourceAddr := rs.Addr + + for key, is := range rs.Instances { + addr := resourceAddr.Instance(key) + + if obj := is.Current; obj != nil && t.ConcreteCurrent != nil { + abstract := NewNodeAbstractResourceInstance(addr) + node := t.ConcreteCurrent(abstract) + g.Add(node) + log.Printf("[TRACE] StateTransformer: added %T for %s current object", node, addr) + } + + if t.ConcreteDeposed != nil { + for dk := range is.Deposed { + abstract := NewNodeAbstractResourceInstance(addr) + node := t.ConcreteDeposed(abstract, dk) + g.Add(node) + log.Printf("[TRACE] StateTransformer: added %T for %s deposed object %s", node, addr, dk) + } + } + } + } + } + + return nil +} diff --git a/pkg/tofu/transform_targets.go b/pkg/tofu/transform_targets.go new file mode 100644 index 00000000000..0bc1d118c9d --- /dev/null +++ b/pkg/tofu/transform_targets.go @@ -0,0 +1,164 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "log" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/dag" +) + +// GraphNodeTargetable is an interface for graph nodes to implement when they +// need to be told about incoming targets. This is useful for nodes that need +// to respect targets as they dynamically expand. Note that the list of targets +// provided will contain every target provided, and each implementing graph +// node must filter this list to targets considered relevant. +type GraphNodeTargetable interface { + SetTargets([]addrs.Targetable) +} + +// TargetsTransformer is a GraphTransformer that, when the user specifies a +// list of resources to target, limits the graph to only those resources and +// their dependencies. +type TargetsTransformer struct { + // List of targeted resource names specified by the user + Targets []addrs.Targetable +} + +func (t *TargetsTransformer) Transform(g *Graph) error { + if len(t.Targets) > 0 { + targetedNodes, err := t.selectTargetedNodes(g, t.Targets) + if err != nil { + return err + } + + for _, v := range g.Vertices() { + if !targetedNodes.Include(v) { + log.Printf("[DEBUG] Removing %q, filtered by targeting.", dag.VertexName(v)) + g.Remove(v) + } + } + } + + return nil +} + +// Returns a set of targeted nodes. A targeted node is either addressed +// directly, address indirectly via its container, or it's a dependency of a +// targeted node. +func (t *TargetsTransformer) selectTargetedNodes(g *Graph, addrs []addrs.Targetable) (dag.Set, error) { + targetedNodes := make(dag.Set) + + vertices := g.Vertices() + + for _, v := range vertices { + if t.nodeIsTarget(v, addrs) { + targetedNodes.Add(v) + + // We inform nodes that ask about the list of targets - helps for nodes + // that need to dynamically expand. Note that this only occurs for nodes + // that are already directly targeted. + if tn, ok := v.(GraphNodeTargetable); ok { + tn.SetTargets(addrs) + } + + deps, _ := g.Ancestors(v) + for _, d := range deps { + targetedNodes.Add(d) + } + } + } + + // It is expected that outputs which are only derived from targeted + // resources are also updated. While we don't include any other possible + // side effects from the targeted nodes, these are added because outputs + // cannot be targeted on their own. + // Start by finding the root module output nodes themselves + for _, v := range vertices { + // outputs are all temporary value types + tv, ok := v.(graphNodeTemporaryValue) + if !ok { + continue + } + + // root module outputs indicate that while they are an output type, + // they not temporary and will return false here. + if tv.temporaryValue() { + continue + } + + // If this output is descended only from targeted resources, then we + // will keep it + deps, _ := g.Ancestors(v) + found := 0 + for _, d := range deps { + switch d.(type) { + case GraphNodeResourceInstance: + case GraphNodeConfigResource: + default: + continue + } + + if !targetedNodes.Include(d) { + // this dependency isn't being targeted, so we can't process this + // output + found = 0 + break + } + + found++ + } + + if found > 0 { + // we found an output we can keep; add it, and all it's dependencies + targetedNodes.Add(v) + for _, d := range deps { + targetedNodes.Add(d) + } + } + } + + return targetedNodes, nil +} + +func (t *TargetsTransformer) nodeIsTarget(v dag.Vertex, targets []addrs.Targetable) bool { + var vertexAddr addrs.Targetable + switch r := v.(type) { + case GraphNodeResourceInstance: + vertexAddr = r.ResourceInstanceAddr() + case GraphNodeConfigResource: + vertexAddr = r.ResourceAddr() + + default: + // Only resource and resource instance nodes can be targeted. + return false + } + + for _, targetAddr := range targets { + switch vertexAddr.(type) { + case addrs.ConfigResource: + // Before expansion happens, we only have nodes that know their + // ConfigResource address. We need to take the more specific + // target addresses and generalize them in order to compare with a + // ConfigResource. + switch target := targetAddr.(type) { + case addrs.AbsResourceInstance: + targetAddr = target.ContainingResource().Config() + case addrs.AbsResource: + targetAddr = target.Config() + case addrs.ModuleInstance: + targetAddr = target.Module() + } + } + + if targetAddr.TargetContains(vertexAddr) { + return true + } + } + + return false +} diff --git a/pkg/tofu/transform_targets_test.go b/pkg/tofu/transform_targets_test.go new file mode 100644 index 00000000000..bd8c292c41b --- /dev/null +++ b/pkg/tofu/transform_targets_test.go @@ -0,0 +1,207 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/addrs" +) + +func TestTargetsTransformer(t *testing.T) { + mod := testModule(t, "transform-targets-basic") + + g := Graph{Path: addrs.RootModuleInstance} + { + tf := &ConfigTransformer{Config: mod} + if err := tf.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + } + + { + transform := &AttachResourceConfigTransformer{Config: mod} + if err := transform.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + } + + { + transform := &ReferenceTransformer{} + if err := transform.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + } + + { + transform := &TargetsTransformer{ + Targets: []addrs.Targetable{ + addrs.RootModuleInstance.Resource( + addrs.ManagedResourceMode, "aws_instance", "me", + ), + }, + } + if err := transform.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + } + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(` +aws_instance.me + aws_subnet.me +aws_subnet.me + aws_vpc.me +aws_vpc.me + `) + if actual != expected { + t.Fatalf("bad:\n\nexpected:\n%s\n\ngot:\n%s\n", expected, actual) + } +} + +func TestTargetsTransformer_downstream(t *testing.T) { + mod := testModule(t, "transform-targets-downstream") + + g := Graph{Path: addrs.RootModuleInstance} + { + transform := &ConfigTransformer{Config: mod} + if err := transform.Transform(&g); err != nil { + t.Fatalf("%T failed: %s", transform, err) + } + } + + { + transform := &AttachResourceConfigTransformer{Config: mod} + if err := transform.Transform(&g); err != nil { + t.Fatalf("%T failed: %s", transform, err) + } + } + + { + transform := &AttachResourceConfigTransformer{Config: mod} + if err := transform.Transform(&g); err != nil { + t.Fatalf("%T failed: %s", transform, err) + } + } + + { + transform := &OutputTransformer{Config: mod} + if err := transform.Transform(&g); err != nil { + t.Fatalf("%T failed: %s", transform, err) + } + } + + { + transform := &ReferenceTransformer{} + if err := transform.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + } + + { + transform := &TargetsTransformer{ + Targets: []addrs.Targetable{ + addrs.RootModuleInstance. + Child("child", addrs.NoKey). + Child("grandchild", addrs.NoKey). + Resource( + addrs.ManagedResourceMode, "aws_instance", "foo", + ), + }, + } + if err := transform.Transform(&g); err != nil { + t.Fatalf("%T failed: %s", transform, err) + } + } + + actual := strings.TrimSpace(g.String()) + // Even though we only asked to target the grandchild resource, all of the + // outputs that descend from it are also targeted. + expected := strings.TrimSpace(` +module.child.module.grandchild.aws_instance.foo +module.child.module.grandchild.output.id (expand) + module.child.module.grandchild.aws_instance.foo +module.child.output.grandchild_id (expand) + module.child.module.grandchild.output.id (expand) +output.grandchild_id (expand) + module.child.output.grandchild_id (expand) + `) + if actual != expected { + t.Fatalf("bad:\n\nexpected:\n%s\n\ngot:\n%s\n", expected, actual) + } +} + +// This tests the TargetsTransformer targeting a whole module, +// rather than a resource within a module instance. +func TestTargetsTransformer_wholeModule(t *testing.T) { + mod := testModule(t, "transform-targets-downstream") + + g := Graph{Path: addrs.RootModuleInstance} + { + transform := &ConfigTransformer{Config: mod} + if err := transform.Transform(&g); err != nil { + t.Fatalf("%T failed: %s", transform, err) + } + } + + { + transform := &AttachResourceConfigTransformer{Config: mod} + if err := transform.Transform(&g); err != nil { + t.Fatalf("%T failed: %s", transform, err) + } + } + + { + transform := &AttachResourceConfigTransformer{Config: mod} + if err := transform.Transform(&g); err != nil { + t.Fatalf("%T failed: %s", transform, err) + } + } + + { + transform := &OutputTransformer{Config: mod} + if err := transform.Transform(&g); err != nil { + t.Fatalf("%T failed: %s", transform, err) + } + } + + { + transform := &ReferenceTransformer{} + if err := transform.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + } + + { + transform := &TargetsTransformer{ + Targets: []addrs.Targetable{ + addrs.RootModule. + Child("child"). + Child("grandchild"), + }, + } + if err := transform.Transform(&g); err != nil { + t.Fatalf("%T failed: %s", transform, err) + } + } + + actual := strings.TrimSpace(g.String()) + // Even though we only asked to target the grandchild module, all of the + // outputs that descend from it are also targeted. + expected := strings.TrimSpace(` +module.child.module.grandchild.aws_instance.foo +module.child.module.grandchild.output.id (expand) + module.child.module.grandchild.aws_instance.foo +module.child.output.grandchild_id (expand) + module.child.module.grandchild.output.id (expand) +output.grandchild_id (expand) + module.child.output.grandchild_id (expand) + `) + if actual != expected { + t.Fatalf("bad:\n\nexpected:\n%s\n\ngot:\n%s\n", expected, actual) + } +} diff --git a/pkg/tofu/transform_transitive_reduction.go b/pkg/tofu/transform_transitive_reduction.go new file mode 100644 index 00000000000..dd5ba6752ef --- /dev/null +++ b/pkg/tofu/transform_transitive_reduction.go @@ -0,0 +1,25 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +// TransitiveReductionTransformer is a GraphTransformer that +// finds the transitive reduction of the graph. For a definition of +// transitive reduction, see [Wikipedia](https://en.wikipedia.org/wiki/Transitive_reduction). +type TransitiveReductionTransformer struct{} + +func (t *TransitiveReductionTransformer) Transform(g *Graph) error { + // If the graph isn't valid, skip the transitive reduction. + // We don't error here because OpenTofu itself handles graph + // validation in a better way, or we assume it does. + if err := g.Validate(); err != nil { + return nil + } + + // Do it + g.TransitiveReduction() + + return nil +} diff --git a/pkg/tofu/transform_transitive_reduction_test.go b/pkg/tofu/transform_transitive_reduction_test.go new file mode 100644 index 00000000000..c5bc1184b50 --- /dev/null +++ b/pkg/tofu/transform_transitive_reduction_test.go @@ -0,0 +1,94 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/zclconf/go-cty/cty" +) + +func TestTransitiveReductionTransformer(t *testing.T) { + mod := testModule(t, "transform-trans-reduce-basic") + + g := Graph{Path: addrs.RootModuleInstance} + { + tf := &ConfigTransformer{Config: mod} + if err := tf.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + t.Logf("graph after ConfigTransformer:\n%s", g.String()) + } + + { + transform := &AttachResourceConfigTransformer{Config: mod} + if err := transform.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + } + + { + transform := &AttachSchemaTransformer{ + Plugins: schemaOnlyProvidersForTesting(map[addrs.Provider]providers.ProviderSchema{ + addrs.NewDefaultProvider("aws"): { + ResourceTypes: map[string]providers.Schema{ + "aws_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "A": { + Type: cty.String, + Optional: true, + }, + "B": { + Type: cty.String, + Optional: true, + }, + }, + }, + }, + }, + }, + }, t), + } + if err := transform.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + } + + { + transform := &ReferenceTransformer{} + if err := transform.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + t.Logf("graph after ReferenceTransformer:\n%s", g.String()) + } + + { + transform := &TransitiveReductionTransformer{} + if err := transform.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + t.Logf("graph after TransitiveReductionTransformer:\n%s", g.String()) + } + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(testTransformTransReduceBasicStr) + if actual != expected { + t.Errorf("wrong result\ngot:\n%s\n\nwant:\n%s", actual, expected) + } +} + +const testTransformTransReduceBasicStr = ` +aws_instance.A +aws_instance.B + aws_instance.A +aws_instance.C + aws_instance.B +` diff --git a/pkg/tofu/transform_variable.go b/pkg/tofu/transform_variable.go new file mode 100644 index 00000000000..efe71d0a19a --- /dev/null +++ b/pkg/tofu/transform_variable.go @@ -0,0 +1,48 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" +) + +// RootVariableTransformer is a GraphTransformer that adds all the root +// variables to the graph. +// +// Root variables are currently no-ops but they must be added to the +// graph since downstream things that depend on them must be able to +// reach them. +type RootVariableTransformer struct { + Config *configs.Config + + RawValues InputValues +} + +func (t *RootVariableTransformer) Transform(g *Graph) error { + // We can have no variables if we have no config. + if t.Config == nil { + return nil + } + + // We're only considering root module variables here, since child + // module variables are handled by ModuleVariableTransformer. + vars := t.Config.Module.Variables + + // Add all variables here + for _, v := range vars { + node := &NodeRootVariable{ + Addr: addrs.InputVariable{ + Name: v.Name, + }, + Config: v, + RawValue: t.RawValues[v.Name], + } + g.Add(node) + } + + return nil +} diff --git a/pkg/tofu/transform_vertex.go b/pkg/tofu/transform_vertex.go new file mode 100644 index 00000000000..1920f5823b8 --- /dev/null +++ b/pkg/tofu/transform_vertex.go @@ -0,0 +1,49 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + + "github.com/kubegems/opentofu/pkg/dag" +) + +// VertexTransformer is a GraphTransformer that transforms vertices +// using the GraphVertexTransformers. The Transforms are run in sequential +// order. If a transform replaces a vertex then the next transform will see +// the new vertex. +type VertexTransformer struct { + Transforms []GraphVertexTransformer +} + +func (t *VertexTransformer) Transform(g *Graph) error { + for _, v := range g.Vertices() { + for _, vt := range t.Transforms { + newV, err := vt.Transform(v) + if err != nil { + return err + } + + // If the vertex didn't change, then don't do anything more + if newV == v { + continue + } + + // Vertex changed, replace it within the graph + if ok := g.Replace(v, newV); !ok { + // This should never happen, big problem + return fmt.Errorf( + "failed to replace %s with %s!\n\nSource: %#v\n\nTarget: %#v", + dag.VertexName(v), dag.VertexName(newV), v, newV) + } + + // Replace v so that future transforms use the proper vertex + v = newV + } + } + + return nil +} diff --git a/pkg/tofu/transform_vertex_test.go b/pkg/tofu/transform_vertex_test.go new file mode 100644 index 00000000000..f41277824e4 --- /dev/null +++ b/pkg/tofu/transform_vertex_test.go @@ -0,0 +1,63 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "strings" + "testing" + + "github.com/kubegems/opentofu/pkg/dag" +) + +func TestVertexTransformer_impl(t *testing.T) { + var _ GraphTransformer = new(VertexTransformer) +} + +func TestVertexTransformer(t *testing.T) { + var g Graph + g.Add(1) + g.Add(2) + g.Add(3) + g.Connect(dag.BasicEdge(1, 2)) + g.Connect(dag.BasicEdge(2, 3)) + + { + tf := &VertexTransformer{ + Transforms: []GraphVertexTransformer{ + &testVertexTransform{Source: 2, Target: 42}, + }, + } + if err := tf.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + } + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(testVertexTransformerStr) + if actual != expected { + t.Fatalf("bad: %s", actual) + } +} + +type testVertexTransform struct { + Source, Target dag.Vertex +} + +func (t *testVertexTransform) Transform(v dag.Vertex) (dag.Vertex, error) { + if t.Source == v { + v = t.Target + } + + return v, nil +} + +const testVertexTransformerStr = ` +1 + 42 +3 +42 + 3 +` diff --git a/pkg/tofu/ui_input.go b/pkg/tofu/ui_input.go new file mode 100644 index 00000000000..bc1569cd826 --- /dev/null +++ b/pkg/tofu/ui_input.go @@ -0,0 +1,37 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import "context" + +// UIInput is the interface that must be implemented to ask for input +// from this user. This should forward the request to wherever the user +// inputs things to ask for values. +type UIInput interface { + Input(context.Context, *InputOpts) (string, error) +} + +// InputOpts are options for asking for input. +type InputOpts struct { + // Id is a unique ID for the question being asked that might be + // used for logging or to look up a prior answered question. + Id string + + // Query is a human-friendly question for inputting this value. + Query string + + // Description is a description about what this option is. Be wary + // that this will probably be in a terminal so split lines as you see + // necessary. + Description string + + // Default will be the value returned if no data is entered. + Default string + + // Secret should be true if we are asking for sensitive input. + // If attached to a TTY, OpenTofu will disable echo. + Secret bool +} diff --git a/pkg/tofu/ui_input_mock.go b/pkg/tofu/ui_input_mock.go new file mode 100644 index 00000000000..9e2f2891a7e --- /dev/null +++ b/pkg/tofu/ui_input_mock.go @@ -0,0 +1,30 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import "context" + +// MockUIInput is an implementation of UIInput that can be used for tests. +type MockUIInput struct { + InputCalled bool + InputOpts *InputOpts + InputReturnMap map[string]string + InputReturnString string + InputReturnError error + InputFn func(*InputOpts) (string, error) +} + +func (i *MockUIInput) Input(ctx context.Context, opts *InputOpts) (string, error) { + i.InputCalled = true + i.InputOpts = opts + if i.InputFn != nil { + return i.InputFn(opts) + } + if i.InputReturnMap != nil { + return i.InputReturnMap[opts.Id], i.InputReturnError + } + return i.InputReturnString, i.InputReturnError +} diff --git a/pkg/tofu/ui_input_prefix.go b/pkg/tofu/ui_input_prefix.go new file mode 100644 index 00000000000..a755c3319b4 --- /dev/null +++ b/pkg/tofu/ui_input_prefix.go @@ -0,0 +1,25 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "context" + "fmt" +) + +// PrefixUIInput is an implementation of UIInput that prefixes the ID +// with a string, allowing queries to be namespaced. +type PrefixUIInput struct { + IdPrefix string + QueryPrefix string + UIInput UIInput +} + +func (i *PrefixUIInput) Input(ctx context.Context, opts *InputOpts) (string, error) { + opts.Id = fmt.Sprintf("%s.%s", i.IdPrefix, opts.Id) + opts.Query = fmt.Sprintf("%s%s", i.QueryPrefix, opts.Query) + return i.UIInput.Input(ctx, opts) +} diff --git a/pkg/tofu/ui_input_prefix_test.go b/pkg/tofu/ui_input_prefix_test.go new file mode 100644 index 00000000000..423f97f8f23 --- /dev/null +++ b/pkg/tofu/ui_input_prefix_test.go @@ -0,0 +1,32 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "context" + "testing" +) + +func TestPrefixUIInput_impl(t *testing.T) { + var _ UIInput = new(PrefixUIInput) +} + +func TestPrefixUIInput(t *testing.T) { + input := new(MockUIInput) + prefix := &PrefixUIInput{ + IdPrefix: "foo", + UIInput: input, + } + + _, err := prefix.Input(context.Background(), &InputOpts{Id: "bar"}) + if err != nil { + t.Fatalf("err: %s", err) + } + + if input.InputOpts.Id != "foo.bar" { + t.Fatalf("bad: %#v", input.InputOpts) + } +} diff --git a/pkg/tofu/ui_output.go b/pkg/tofu/ui_output.go new file mode 100644 index 00000000000..3378a521574 --- /dev/null +++ b/pkg/tofu/ui_output.go @@ -0,0 +1,12 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +// UIOutput is the interface that must be implemented to output +// data to the end user. +type UIOutput interface { + Output(string) +} diff --git a/pkg/tofu/ui_output_callback.go b/pkg/tofu/ui_output_callback.go new file mode 100644 index 00000000000..02188ba9b8b --- /dev/null +++ b/pkg/tofu/ui_output_callback.go @@ -0,0 +1,14 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +type CallbackUIOutput struct { + OutputFn func(string) +} + +func (o *CallbackUIOutput) Output(v string) { + o.OutputFn(v) +} diff --git a/pkg/tofu/ui_output_callback_test.go b/pkg/tofu/ui_output_callback_test.go new file mode 100644 index 00000000000..1494934d220 --- /dev/null +++ b/pkg/tofu/ui_output_callback_test.go @@ -0,0 +1,14 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "testing" +) + +func TestCallbackUIOutput_impl(t *testing.T) { + var _ UIOutput = new(CallbackUIOutput) +} diff --git a/pkg/tofu/ui_output_mock.go b/pkg/tofu/ui_output_mock.go new file mode 100644 index 00000000000..3fa45007bf6 --- /dev/null +++ b/pkg/tofu/ui_output_mock.go @@ -0,0 +1,26 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import "sync" + +// MockUIOutput is an implementation of UIOutput that can be used for tests. +type MockUIOutput struct { + sync.Mutex + OutputCalled bool + OutputMessage string + OutputFn func(string) +} + +func (o *MockUIOutput) Output(v string) { + o.Lock() + defer o.Unlock() + o.OutputCalled = true + o.OutputMessage = v + if o.OutputFn != nil { + o.OutputFn(v) + } +} diff --git a/pkg/tofu/ui_output_mock_test.go b/pkg/tofu/ui_output_mock_test.go new file mode 100644 index 00000000000..7f0ffa5aa73 --- /dev/null +++ b/pkg/tofu/ui_output_mock_test.go @@ -0,0 +1,14 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "testing" +) + +func TestMockUIOutput(t *testing.T) { + var _ UIOutput = new(MockUIOutput) +} diff --git a/pkg/tofu/ui_output_provisioner.go b/pkg/tofu/ui_output_provisioner.go new file mode 100644 index 00000000000..d8d06cb5279 --- /dev/null +++ b/pkg/tofu/ui_output_provisioner.go @@ -0,0 +1,24 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "github.com/kubegems/opentofu/pkg/addrs" +) + +// ProvisionerUIOutput is an implementation of UIOutput that calls a hook +// for the output so that the hooks can handle it. +type ProvisionerUIOutput struct { + InstanceAddr addrs.AbsResourceInstance + ProvisionerType string + Hooks []Hook +} + +func (o *ProvisionerUIOutput) Output(msg string) { + for _, h := range o.Hooks { + h.ProvisionOutput(o.InstanceAddr, o.ProvisionerType, msg) + } +} diff --git a/pkg/tofu/ui_output_provisioner_test.go b/pkg/tofu/ui_output_provisioner_test.go new file mode 100644 index 00000000000..6ffb25062fc --- /dev/null +++ b/pkg/tofu/ui_output_provisioner_test.go @@ -0,0 +1,41 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "testing" + + "github.com/kubegems/opentofu/pkg/addrs" +) + +func TestProvisionerUIOutput_impl(t *testing.T) { + var _ UIOutput = new(ProvisionerUIOutput) +} + +func TestProvisionerUIOutputOutput(t *testing.T) { + hook := new(MockHook) + output := &ProvisionerUIOutput{ + InstanceAddr: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "test", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + ProvisionerType: "foo", + Hooks: []Hook{hook}, + } + + output.Output("bar") + + if !hook.ProvisionOutputCalled { + t.Fatal("hook.ProvisionOutput was not called, and should've been") + } + if got, want := hook.ProvisionOutputProvisionerType, "foo"; got != want { + t.Fatalf("wrong provisioner type\ngot: %q\nwant: %q", got, want) + } + if got, want := hook.ProvisionOutputMessage, "bar"; got != want { + t.Fatalf("wrong output message\ngot: %q\nwant: %q", got, want) + } +} diff --git a/pkg/tofu/update_state_hook.go b/pkg/tofu/update_state_hook.go new file mode 100644 index 00000000000..332fc2c1980 --- /dev/null +++ b/pkg/tofu/update_state_hook.go @@ -0,0 +1,24 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +// updateStateHook calls the PostStateUpdate hook with the current state. +func updateStateHook(ctx EvalContext) error { + // In principle we could grab the lock here just long enough to take a + // deep copy and then pass that to our hooks below, but we'll instead + // hold the hook for the duration to avoid the potential confusing + // situation of us racing to call PostStateUpdate concurrently with + // different state snapshots. + stateSync := ctx.State() + state := stateSync.Lock().DeepCopy() + defer stateSync.Unlock() + + // Call the hook + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostStateUpdate(state) + }) + return err +} diff --git a/pkg/tofu/update_state_hook_test.go b/pkg/tofu/update_state_hook_test.go new file mode 100644 index 00000000000..bc91267eccb --- /dev/null +++ b/pkg/tofu/update_state_hook_test.go @@ -0,0 +1,38 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/states" +) + +func TestUpdateStateHook(t *testing.T) { + mockHook := new(MockHook) + + state := states.NewState() + state.Module(addrs.RootModuleInstance).SetLocalValue("foo", cty.StringVal("hello")) + + ctx := new(MockEvalContext) + ctx.HookHook = mockHook + ctx.StateState = state.SyncWrapper() + + if err := updateStateHook(ctx); err != nil { + t.Fatalf("err: %s", err) + } + + if !mockHook.PostStateUpdateCalled { + t.Fatal("should call PostStateUpdate") + } + if mockHook.PostStateUpdateState.LocalValue(addrs.LocalValue{Name: "foo"}.Absolute(addrs.RootModuleInstance)) != cty.StringVal("hello") { + t.Fatalf("wrong state passed to hook: %s", spew.Sdump(mockHook.PostStateUpdateState)) + } +} diff --git a/pkg/tofu/upgrade_resource_state.go b/pkg/tofu/upgrade_resource_state.go new file mode 100644 index 00000000000..61c2353d40d --- /dev/null +++ b/pkg/tofu/upgrade_resource_state.go @@ -0,0 +1,211 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "encoding/json" + "fmt" + "log" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +// upgradeResourceState will, if necessary, run the provider-defined upgrade +// logic against the given state object to make it compliant with the +// current schema version. This is a no-op if the given state object is +// already at the latest version. +// +// If any errors occur during upgrade, error diagnostics are returned. In that +// case it is not safe to proceed with using the original state object. +func upgradeResourceState(addr addrs.AbsResourceInstance, provider providers.Interface, src *states.ResourceInstanceObjectSrc, currentSchema *configschema.Block, currentVersion uint64) (*states.ResourceInstanceObjectSrc, tfdiags.Diagnostics) { + if addr.Resource.Resource.Mode != addrs.ManagedResourceMode { + // We only do state upgrading for managed resources. + // This was a part of the normal workflow in older versions and + // returned early, so we are only going to log the error for now. + log.Printf("[ERROR] data resource %s should not require state upgrade", addr) + return src, nil + } + + // Remove any attributes from state that are not present in the schema. + // This was previously taken care of by the provider, but data sources do + // not go through the UpgradeResourceState process. + // + // Legacy flatmap state is already taken care of during conversion. + // If the schema version is be changed, then allow the provider to handle + // removed attributes. + if len(src.AttrsJSON) > 0 && src.SchemaVersion == currentVersion { + src.AttrsJSON = stripRemovedStateAttributes(src.AttrsJSON, currentSchema.ImpliedType()) + } + + stateIsFlatmap := len(src.AttrsJSON) == 0 + + // TODO: This should eventually use a proper FQN. + providerType := addr.Resource.Resource.ImpliedProvider() + if src.SchemaVersion > currentVersion { + log.Printf("[TRACE] upgradeResourceState: can't downgrade state for %s from version %d to %d", addr, src.SchemaVersion, currentVersion) + var diags tfdiags.Diagnostics + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Resource instance managed by newer provider version", + // This is not a very good error message, but we don't retain enough + // information in state to give good feedback on what provider + // version might be required here. :( + fmt.Sprintf("The current state of %s was created by a newer provider version than is currently selected. Upgrade the %s provider to work with this state.", addr, providerType), + )) + return nil, diags + } + + // If we get down here then we need to upgrade the state, with the + // provider's help. + // If this state was originally created by a version of OpenTofu prior to + // v0.12, this also includes translating from legacy flatmap to new-style + // representation, since only the provider has enough information to + // understand a flatmap built against an older schema. + if src.SchemaVersion != currentVersion { + log.Printf("[TRACE] upgradeResourceState: upgrading state for %s from version %d to %d using provider %q", addr, src.SchemaVersion, currentVersion, providerType) + } else { + log.Printf("[TRACE] upgradeResourceState: schema version of %s is still %d; calling provider %q for any other minor fixups", addr, currentVersion, providerType) + } + + req := providers.UpgradeResourceStateRequest{ + TypeName: addr.Resource.Resource.Type, + + // TODO: The internal schema version representations are all using + // uint64 instead of int64, but unsigned integers aren't friendly + // to all protobuf target languages so in practice we use int64 + // on the wire. In future we will change all of our internal + // representations to int64 too. + Version: int64(src.SchemaVersion), + } + + if stateIsFlatmap { + req.RawStateFlatmap = src.AttrsFlat + } else { + req.RawStateJSON = src.AttrsJSON + } + + resp := provider.UpgradeResourceState(req) + diags := resp.Diagnostics + if diags.HasErrors() { + return nil, diags + } + + // After upgrading, the new value must conform to the current schema. When + // going over RPC this is actually already ensured by the + // marshaling/unmarshaling of the new value, but we'll check it here + // anyway for robustness, e.g. for in-process providers. + newValue := resp.UpgradedState + if errs := newValue.Type().TestConformance(currentSchema.ImpliedType()); len(errs) > 0 { + for _, err := range errs { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid resource state upgrade", + fmt.Sprintf("The %s provider upgraded the state for %s from a previous version, but produced an invalid result: %s.", providerType, addr, tfdiags.FormatError(err)), + )) + } + return nil, diags + } + + new, err := src.CompleteUpgrade(newValue, currentSchema.ImpliedType(), uint64(currentVersion)) + if err != nil { + // We already checked for type conformance above, so getting into this + // codepath should be rare and is probably a bug somewhere under CompleteUpgrade. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to encode result of resource state upgrade", + fmt.Sprintf("Failed to encode state for %s after resource schema upgrade: %s.", addr, tfdiags.FormatError(err)), + )) + } + return new, diags +} + +// stripRemovedStateAttributes deletes any attributes no longer present in the +// schema, so that the json can be correctly decoded. +func stripRemovedStateAttributes(state []byte, ty cty.Type) []byte { + jsonMap := map[string]interface{}{} + err := json.Unmarshal(state, &jsonMap) + if err != nil { + // we just log any errors here, and let the normal decode process catch + // invalid JSON. + log.Printf("[ERROR] UpgradeResourceState: stripRemovedStateAttributes: %s", err) + return state + } + + // if no changes were made, we return the original state to ensure nothing + // was altered in the marshaling process. + if !removeRemovedAttrs(jsonMap, ty) { + return state + } + + js, err := json.Marshal(jsonMap) + if err != nil { + // if the json map was somehow mangled enough to not marhsal, something + // went horribly wrong + panic(err) + } + + return js +} + +// strip out the actual missing attributes, and return a bool indicating if any +// changes were made. +func removeRemovedAttrs(v interface{}, ty cty.Type) bool { + modified := false + // we're only concerned with finding maps that correspond to object + // attributes + switch v := v.(type) { + case []interface{}: + switch { + // If these aren't blocks the next call will be a noop + case ty.IsListType() || ty.IsSetType(): + eTy := ty.ElementType() + for _, eV := range v { + modified = removeRemovedAttrs(eV, eTy) || modified + } + } + return modified + case map[string]interface{}: + switch { + case ty.IsMapType(): + // map blocks aren't yet supported, but handle this just in case + eTy := ty.ElementType() + for _, eV := range v { + modified = removeRemovedAttrs(eV, eTy) || modified + } + return modified + + case ty == cty.DynamicPseudoType: + log.Printf("[DEBUG] UpgradeResourceState: ignoring dynamic block: %#v\n", v) + return false + + case ty.IsObjectType(): + attrTypes := ty.AttributeTypes() + for attr, attrV := range v { + attrTy, ok := attrTypes[attr] + if !ok { + log.Printf("[DEBUG] UpgradeResourceState: attribute %q no longer present in schema", attr) + delete(v, attr) + modified = true + continue + } + + modified = removeRemovedAttrs(attrV, attrTy) || modified + } + return modified + default: + // This shouldn't happen, and will fail to decode further on, so + // there's no need to handle it here. + log.Printf("[WARN] UpgradeResourceState: unexpected type %#v for map in json state", ty) + return false + } + } + return modified +} diff --git a/pkg/tofu/upgrade_resource_state_test.go b/pkg/tofu/upgrade_resource_state_test.go new file mode 100644 index 00000000000..f0d996cb9a1 --- /dev/null +++ b/pkg/tofu/upgrade_resource_state_test.go @@ -0,0 +1,153 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "reflect" + "testing" + + "github.com/zclconf/go-cty/cty" +) + +func TestStripRemovedStateAttributes(t *testing.T) { + cases := []struct { + name string + state map[string]interface{} + expect map[string]interface{} + ty cty.Type + modified bool + }{ + { + "removed string", + map[string]interface{}{ + "a": "ok", + "b": "gone", + }, + map[string]interface{}{ + "a": "ok", + }, + cty.Object(map[string]cty.Type{ + "a": cty.String, + }), + true, + }, + { + "removed null", + map[string]interface{}{ + "a": "ok", + "b": nil, + }, + map[string]interface{}{ + "a": "ok", + }, + cty.Object(map[string]cty.Type{ + "a": cty.String, + }), + true, + }, + { + "removed nested string", + map[string]interface{}{ + "a": "ok", + "b": map[string]interface{}{ + "a": "ok", + "b": "removed", + }, + }, + map[string]interface{}{ + "a": "ok", + "b": map[string]interface{}{ + "a": "ok", + }, + }, + cty.Object(map[string]cty.Type{ + "a": cty.String, + "b": cty.Object(map[string]cty.Type{ + "a": cty.String, + }), + }), + true, + }, + { + "removed nested list", + map[string]interface{}{ + "a": "ok", + "b": map[string]interface{}{ + "a": "ok", + "b": []interface{}{"removed"}, + }, + }, + map[string]interface{}{ + "a": "ok", + "b": map[string]interface{}{ + "a": "ok", + }, + }, + cty.Object(map[string]cty.Type{ + "a": cty.String, + "b": cty.Object(map[string]cty.Type{ + "a": cty.String, + }), + }), + true, + }, + { + "removed keys in set of objs", + map[string]interface{}{ + "a": "ok", + "b": map[string]interface{}{ + "a": "ok", + "set": []interface{}{ + map[string]interface{}{ + "x": "ok", + "y": "removed", + }, + map[string]interface{}{ + "x": "ok", + "y": "removed", + }, + }, + }, + }, + map[string]interface{}{ + "a": "ok", + "b": map[string]interface{}{ + "a": "ok", + "set": []interface{}{ + map[string]interface{}{ + "x": "ok", + }, + map[string]interface{}{ + "x": "ok", + }, + }, + }, + }, + cty.Object(map[string]cty.Type{ + "a": cty.String, + "b": cty.Object(map[string]cty.Type{ + "a": cty.String, + "set": cty.Set(cty.Object(map[string]cty.Type{ + "x": cty.String, + })), + }), + }), + true, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + modified := removeRemovedAttrs(tc.state, tc.ty) + if !reflect.DeepEqual(tc.state, tc.expect) { + t.Fatalf("expected: %#v\n got: %#v\n", tc.expect, tc.state) + } + if modified != tc.modified { + t.Fatal("incorrect return value") + } + }) + } +} diff --git a/pkg/tofu/util.go b/pkg/tofu/util.go new file mode 100644 index 00000000000..0a853336d6d --- /dev/null +++ b/pkg/tofu/util.go @@ -0,0 +1,48 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +// Semaphore is a wrapper around a channel to provide +// utility methods to clarify that we are treating the +// channel as a semaphore +type Semaphore chan struct{} + +// NewSemaphore creates a semaphore that allows up +// to a given limit of simultaneous acquisitions +func NewSemaphore(n int) Semaphore { + if n <= 0 { + panic("semaphore with limit <=0") + } + ch := make(chan struct{}, n) + return Semaphore(ch) +} + +// Acquire is used to acquire an available slot. +// Blocks until available. +func (s Semaphore) Acquire() { + s <- struct{}{} +} + +// TryAcquire is used to do a non-blocking acquire. +// Returns a bool indicating success +func (s Semaphore) TryAcquire() bool { + select { + case s <- struct{}{}: + return true + default: + return false + } +} + +// Release is used to return a slot. Acquire must +// be called as a pre-condition. +func (s Semaphore) Release() { + select { + case <-s: + default: + panic("release without an acquire") + } +} diff --git a/pkg/tofu/util_test.go b/pkg/tofu/util_test.go new file mode 100644 index 00000000000..f27cc0d6330 --- /dev/null +++ b/pkg/tofu/util_test.go @@ -0,0 +1,38 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "testing" + "time" +) + +func TestSemaphore(t *testing.T) { + s := NewSemaphore(2) + timer := time.AfterFunc(time.Second, func() { + panic("deadlock") + }) + defer timer.Stop() + + s.Acquire() + if !s.TryAcquire() { + t.Fatalf("should acquire") + } + if s.TryAcquire() { + t.Fatalf("should not acquire") + } + s.Release() + s.Release() + + // This release should panic + defer func() { + r := recover() + if r == nil { + t.Fatalf("should panic") + } + }() + s.Release() +} diff --git a/pkg/tofu/validate_selfref.go b/pkg/tofu/validate_selfref.go new file mode 100644 index 00000000000..63ddbf771fd --- /dev/null +++ b/pkg/tofu/validate_selfref.go @@ -0,0 +1,88 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/lang" + "github.com/kubegems/opentofu/pkg/providers" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// validateSelfRef checks to ensure that expressions within a particular +// referencable block do not reference that same block. +func validateSelfRef(addr addrs.Referenceable, config hcl.Body, providerSchema providers.ProviderSchema) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + addrStrs := make([]string, 0, 1) + addrStrs = append(addrStrs, addr.String()) + switch tAddr := addr.(type) { + case addrs.ResourceInstance: + // A resource instance may not refer to its containing resource either. + addrStrs = append(addrStrs, tAddr.ContainingResource().String()) + } + + var schema *configschema.Block + switch tAddr := addr.(type) { + case addrs.Resource: + schema, _ = providerSchema.SchemaForResourceAddr(tAddr) + case addrs.ResourceInstance: + schema, _ = providerSchema.SchemaForResourceAddr(tAddr.ContainingResource()) + } + + if schema == nil { + diags = diags.Append(fmt.Errorf("no schema available for %s to validate for self-references; this is a bug in OpenTofu and should be reported", addr)) + return diags + } + + refs, _ := lang.ReferencesInBlock(addrs.ParseRef, config, schema) + for _, ref := range refs { + for _, addrStr := range addrStrs { + if ref.Subject.String() == addrStr { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Self-referential block", + Detail: fmt.Sprintf("Configuration for %s may not refer to itself.", addrStr), + Subject: ref.SourceRange.ToHCL().Ptr(), + }) + } + } + } + + return diags +} + +// Legacy provisioner configurations may refer to single instances using the +// resource address. We need to filter these out from the reported references +// to prevent cycles. +func filterSelfRefs(self addrs.Resource, refs []*addrs.Reference) []*addrs.Reference { + for i := 0; i < len(refs); i++ { + ref := refs[i] + + var subject addrs.Resource + switch subj := ref.Subject.(type) { + case addrs.Resource: + subject = subj + case addrs.ResourceInstance: + subject = subj.ContainingResource() + default: + continue + } + + if self.Equal(subject) { + tail := len(refs) - 1 + + refs[i], refs[tail] = refs[tail], refs[i] + refs = refs[:tail] + } + } + return refs +} diff --git a/pkg/tofu/validate_selfref_test.go b/pkg/tofu/validate_selfref_test.go new file mode 100644 index 00000000000..0f85e1a8d1d --- /dev/null +++ b/pkg/tofu/validate_selfref_test.go @@ -0,0 +1,113 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + "testing" + + "github.com/kubegems/opentofu/pkg/configs/configschema" + "github.com/kubegems/opentofu/pkg/providers" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hcltest" + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/zclconf/go-cty/cty" +) + +func TestValidateSelfRef(t *testing.T) { + rAddr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + } + + tests := []struct { + Name string + Addr addrs.Referenceable + Expr hcl.Expression + Err bool + }{ + { + "no references at all", + rAddr, + hcltest.MockExprLiteral(cty.StringVal("bar")), + false, + }, + + { + "non self reference", + rAddr, + hcltest.MockExprTraversalSrc("aws_instance.bar.id"), + false, + }, + + { + "self reference", + rAddr, + hcltest.MockExprTraversalSrc("aws_instance.foo.id"), + true, + }, + + { + "self reference other index", + rAddr, + hcltest.MockExprTraversalSrc("aws_instance.foo[4].id"), + false, + }, + + { + "self reference same index", + rAddr.Instance(addrs.IntKey(4)), + hcltest.MockExprTraversalSrc("aws_instance.foo[4].id"), + true, + }, + + { + "self reference whole", + rAddr.Instance(addrs.IntKey(4)), + hcltest.MockExprTraversalSrc("aws_instance.foo"), + true, + }, + } + + for i, test := range tests { + t.Run(fmt.Sprintf("%d-%s", i, test.Name), func(t *testing.T) { + body := hcltest.MockBody(&hcl.BodyContent{ + Attributes: hcl.Attributes{ + "foo": { + Name: "foo", + Expr: test.Expr, + }, + }, + }) + + ps := providers.ProviderSchema{ + ResourceTypes: map[string]providers.Schema{ + "aws_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.String, + Required: true, + }, + }, + }, + }, + }, + } + + diags := validateSelfRef(test.Addr, body, ps) + if diags.HasErrors() != test.Err { + if test.Err { + t.Errorf("unexpected success; want error") + } else { + t.Errorf("unexpected error\n\n%s", diags.Err()) + } + } + }) + } +} diff --git a/pkg/tofu/valuesourcetype_string.go b/pkg/tofu/valuesourcetype_string.go new file mode 100644 index 00000000000..45b4c9caad7 --- /dev/null +++ b/pkg/tofu/valuesourcetype_string.go @@ -0,0 +1,59 @@ +// Code generated by "stringer -type ValueSourceType"; DO NOT EDIT. + +package tofu + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ValueFromUnknown-0] + _ = x[ValueFromConfig-67] + _ = x[ValueFromAutoFile-70] + _ = x[ValueFromNamedFile-78] + _ = x[ValueFromCLIArg-65] + _ = x[ValueFromEnvVar-69] + _ = x[ValueFromInput-73] + _ = x[ValueFromPlan-80] + _ = x[ValueFromCaller-83] +} + +const ( + _ValueSourceType_name_0 = "ValueFromUnknown" + _ValueSourceType_name_1 = "ValueFromCLIArg" + _ValueSourceType_name_2 = "ValueFromConfig" + _ValueSourceType_name_3 = "ValueFromEnvVarValueFromAutoFile" + _ValueSourceType_name_4 = "ValueFromInput" + _ValueSourceType_name_5 = "ValueFromNamedFile" + _ValueSourceType_name_6 = "ValueFromPlan" + _ValueSourceType_name_7 = "ValueFromCaller" +) + +var ( + _ValueSourceType_index_3 = [...]uint8{0, 15, 32} +) + +func (i ValueSourceType) String() string { + switch { + case i == 0: + return _ValueSourceType_name_0 + case i == 65: + return _ValueSourceType_name_1 + case i == 67: + return _ValueSourceType_name_2 + case 69 <= i && i <= 70: + i -= 69 + return _ValueSourceType_name_3[_ValueSourceType_index_3[i]:_ValueSourceType_index_3[i+1]] + case i == 73: + return _ValueSourceType_name_4 + case i == 78: + return _ValueSourceType_name_5 + case i == 80: + return _ValueSourceType_name_6 + case i == 83: + return _ValueSourceType_name_7 + default: + return "ValueSourceType(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/pkg/tofu/variables.go b/pkg/tofu/variables.go new file mode 100644 index 00000000000..43ac936e0fc --- /dev/null +++ b/pkg/tofu/variables.go @@ -0,0 +1,320 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" + + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// InputValue represents a raw value for a root module input variable as +// provided by the external caller into a function like tofu.Context.Plan. +// +// InputValue should represent as directly as possible what the user set the +// variable to, without any attempt to convert the value to the variable's +// type constraint or substitute the configured default values for variables +// that wasn't set. Those adjustments will be handled by OpenTofu Core itself +// as part of performing the requested operation. +// +// A OpenTofu Core caller must provide an InputValue object for each of the +// variables declared in the root module, even if the end user didn't provide +// an explicit value for some of them. See the Value field documentation for +// how to handle that situation. +// +// OpenTofu Core also internally uses InputValue to represent the raw value +// provided for a variable in a child module call, following the same +// conventions. However, that's an implementation detail not visible to +// outside callers. +type InputValue struct { + // Value is the raw value as provided by the user as part of the plan + // options, or a corresponding similar data structure for non-plan + // operations. + // + // If a particular variable declared in the root module is _not_ set by + // the user then the caller must still provide an InputValue for it but + // must set Value to cty.NilVal to represent the absense of a value. + // This requirement is to help detect situations where the caller isn't + // correctly detecting and handling all of the declared variables. + // + // For historical reasons it's important that callers distinguish the + // situation of the value not being set at all (cty.NilVal) from the + // situation of it being explicitly set to null (a cty.NullVal result): + // for "nullable" input variables that distinction unfortunately decides + // whether the final value will be the variable's default or will be + // explicitly null. + Value cty.Value + + // SourceType is a high-level category for where the value of Value + // came from, which OpenTofu Core uses to tailor some of its error + // messages to be more helpful to the user. + // + // Some SourceType values should be accompanied by a populated SourceRange + // value. See that field's documentation below for more information. + SourceType ValueSourceType + + // SourceRange provides source location information for values whose + // SourceType is either ValueFromConfig, ValueFromNamedFile, or + // ValueForNormalFile. It is not populated for other source types, and so + // should not be used. + SourceRange tfdiags.SourceRange +} + +// ValueSourceType describes what broad category of source location provided +// a particular value. +type ValueSourceType rune + +const ( + // ValueFromUnknown is the zero value of ValueSourceType and is not valid. + ValueFromUnknown ValueSourceType = 0 + + // ValueFromConfig indicates that a value came from a .tf or .tf.json file, + // e.g. the default value defined for a variable. + ValueFromConfig ValueSourceType = 'C' + + // ValueFromAutoFile indicates that a value came from a "values file", like + // a .tfvars file, that was implicitly loaded by naming convention. + ValueFromAutoFile ValueSourceType = 'F' + + // ValueFromNamedFile indicates that a value came from a named "values file", + // like a .tfvars file, that was passed explicitly on the command line (e.g. + // -var-file=foo.tfvars). + ValueFromNamedFile ValueSourceType = 'N' + + // ValueFromCLIArg indicates that the value was provided directly in + // a CLI argument. The name of this argument is not recorded and so it must + // be inferred from context. + ValueFromCLIArg ValueSourceType = 'A' + + // ValueFromEnvVar indicates that the value was provided via an environment + // variable. The name of the variable is not recorded and so it must be + // inferred from context. + ValueFromEnvVar ValueSourceType = 'E' + + // ValueFromInput indicates that the value was provided at an interactive + // input prompt. + ValueFromInput ValueSourceType = 'I' + + // ValueFromPlan indicates that the value was retrieved from a stored plan. + ValueFromPlan ValueSourceType = 'P' + + // ValueFromCaller indicates that the value was explicitly overridden by + // a caller to Context.SetVariable after the context was constructed. + ValueFromCaller ValueSourceType = 'S' +) + +func (v *InputValue) GoString() string { + if (v.SourceRange != tfdiags.SourceRange{}) { + return fmt.Sprintf("&tofu.InputValue{Value: %#v, SourceType: %#v, SourceRange: %#v}", v.Value, v.SourceType, v.SourceRange) + } else { + return fmt.Sprintf("&tofu.InputValue{Value: %#v, SourceType: %#v}", v.Value, v.SourceType) + } +} + +// HasSourceRange returns true if the reciever has a source type for which +// we expect the SourceRange field to be populated with a valid range. +func (v *InputValue) HasSourceRange() bool { + return v.SourceType.HasSourceRange() +} + +// HasSourceRange returns true if the reciever is one of the source types +// that is used along with a valid SourceRange field when appearing inside an +// InputValue object. +func (v ValueSourceType) HasSourceRange() bool { + switch v { + case ValueFromConfig, ValueFromAutoFile, ValueFromNamedFile: + return true + default: + return false + } +} + +func (v ValueSourceType) GoString() string { + return fmt.Sprintf("tofu.%s", v) +} + +//go:generate go run golang.org/x/tools/cmd/stringer -type ValueSourceType + +// InputValues is a map of InputValue instances. +type InputValues map[string]*InputValue + +// InputValuesFromCaller turns the given map of naked values into an +// InputValues that attributes each value to "a caller", using the source +// type ValueFromCaller. This is primarily useful for testing purposes. +// +// This should not be used as a general way to convert map[string]cty.Value +// into InputValues, since in most real cases we want to set a suitable +// other SourceType and possibly SourceRange value. +func InputValuesFromCaller(vals map[string]cty.Value) InputValues { + ret := make(InputValues, len(vals)) + for k, v := range vals { + ret[k] = &InputValue{ + Value: v, + SourceType: ValueFromCaller, + } + } + return ret +} + +// Override merges the given value maps with the receiver, overriding any +// conflicting keys so that the latest definition wins. +func (vv InputValues) Override(others ...InputValues) InputValues { + // FIXME: This should check to see if any of the values are maps and + // merge them if so, in order to preserve the behavior from prior to + // Terraform 0.12. + ret := make(InputValues) + for k, v := range vv { + ret[k] = v + } + for _, other := range others { + for k, v := range other { + ret[k] = v + } + } + return ret +} + +// JustValues returns a map that just includes the values, discarding the +// source information. +func (vv InputValues) JustValues() map[string]cty.Value { + ret := make(map[string]cty.Value, len(vv)) + for k, v := range vv { + ret[k] = v.Value + } + return ret +} + +// SameValues returns true if the given InputValues has the same values as +// the receiever, disregarding the source types and source ranges. +// +// Values are compared using the cty "RawEquals" method, which means that +// unknown values can be considered equal to one another if they are of the +// same type. +func (vv InputValues) SameValues(other InputValues) bool { + if len(vv) != len(other) { + return false + } + + for k, v := range vv { + ov, exists := other[k] + if !exists { + return false + } + if !v.Value.RawEquals(ov.Value) { + return false + } + } + + return true +} + +// HasValues returns true if the reciever has the same values as in the given +// map, disregarding the source types and source ranges. +// +// Values are compared using the cty "RawEquals" method, which means that +// unknown values can be considered equal to one another if they are of the +// same type. +func (vv InputValues) HasValues(vals map[string]cty.Value) bool { + if len(vv) != len(vals) { + return false + } + + for k, v := range vv { + oVal, exists := vals[k] + if !exists { + return false + } + if !v.Value.RawEquals(oVal) { + return false + } + } + + return true +} + +// Identical returns true if the given InputValues has the same values, +// source types, and source ranges as the receiver. +// +// Values are compared using the cty "RawEquals" method, which means that +// unknown values can be considered equal to one another if they are of the +// same type. +// +// This method is primarily for testing. For most practical purposes, it's +// better to use SameValues or HasValues. +func (vv InputValues) Identical(other InputValues) bool { + if len(vv) != len(other) { + return false + } + + for k, v := range vv { + ov, exists := other[k] + if !exists { + return false + } + if !v.Value.RawEquals(ov.Value) { + return false + } + if v.SourceType != ov.SourceType { + return false + } + if v.SourceRange != ov.SourceRange { + return false + } + } + + return true +} + +// checkInputVariables ensures that the caller provided an InputValue +// definition for each root module variable declared in the configuration. +// The caller must provide an InputVariables with keys exactly matching +// the declared variables, though some of them may be marked explicitly +// unset by their values being cty.NilVal. +// +// This doesn't perform any type checking, default value substitution, or +// validation checks. Those are all handled during a graph walk when we +// visit the graph nodes representing each root variable. +// +// The set of values is considered valid only if the returned diagnostics +// does not contain errors. A valid set of values may still produce warnings, +// which should be returned to the user. +func checkInputVariables(vcs map[string]*configs.Variable, vs InputValues) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + for name := range vcs { + _, isSet := vs[name] + if !isSet { + // Always an error, since the caller should have produced an + // item with Value: cty.NilVal to be explicit that it offered + // an opportunity to set this variable. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unassigned variable", + fmt.Sprintf("The input variable %q has not been assigned a value. This is a bug in OpenTofu; please report it in a GitHub issue.", name), + )) + continue + } + } + + // Check for any variables that are assigned without being configured. + // This is always an implementation error in the caller, because we + // expect undefined variables to be caught during context construction + // where there is better context to report it well. + for name := range vs { + if _, defined := vcs[name]; !defined { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Value assigned to undeclared variable", + fmt.Sprintf("A value was assigned to an undeclared input variable %q.", name), + )) + } + } + + return diags +} diff --git a/pkg/tofu/variables_test.go b/pkg/tofu/variables_test.go new file mode 100644 index 00000000000..430a8771e43 --- /dev/null +++ b/pkg/tofu/variables_test.go @@ -0,0 +1,153 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "testing" + + "github.com/kubegems/opentofu/pkg/configs" + "github.com/zclconf/go-cty/cty" +) + +func TestCheckInputVariables(t *testing.T) { + c := testModule(t, "input-variables") + + t.Run("No variables set", func(t *testing.T) { + // No variables set + diags := checkInputVariables(c.Module.Variables, nil) + if !diags.HasErrors() { + t.Fatal("check succeeded, but want errors") + } + + // Required variables set, optional variables unset + // This is still an error at this layer, since it's the caller's + // responsibility to have already merged in any default values. + diags = checkInputVariables(c.Module.Variables, InputValues{ + "foo": &InputValue{ + Value: cty.StringVal("bar"), + SourceType: ValueFromCLIArg, + }, + }) + if !diags.HasErrors() { + t.Fatal("check succeeded, but want errors") + } + }) + + t.Run("All variables set", func(t *testing.T) { + diags := checkInputVariables(c.Module.Variables, InputValues{ + "foo": &InputValue{ + Value: cty.StringVal("bar"), + SourceType: ValueFromCLIArg, + }, + "bar": &InputValue{ + Value: cty.StringVal("baz"), + SourceType: ValueFromCLIArg, + }, + "map": &InputValue{ + Value: cty.StringVal("baz"), // okay because config has no type constraint + SourceType: ValueFromCLIArg, + }, + "object_map": &InputValue{ + Value: cty.MapVal(map[string]cty.Value{ + "uno": cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("baz"), + "bar": cty.NumberIntVal(2), // type = any + }), + "dos": cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bat"), + "bar": cty.NumberIntVal(99), // type = any + }), + }), + SourceType: ValueFromCLIArg, + }, + "object_list": &InputValue{ + Value: cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("baz"), + "bar": cty.NumberIntVal(2), // type = any + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bang"), + "bar": cty.NumberIntVal(42), // type = any + }), + }), + SourceType: ValueFromCLIArg, + }, + }) + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + }) + + t.Run("Invalid Complex Types", func(t *testing.T) { + diags := checkInputVariables(c.Module.Variables, InputValues{ + "foo": &InputValue{ + Value: cty.StringVal("bar"), + SourceType: ValueFromCLIArg, + }, + "bar": &InputValue{ + Value: cty.StringVal("baz"), + SourceType: ValueFromCLIArg, + }, + "map": &InputValue{ + Value: cty.StringVal("baz"), // okay because config has no type constraint + SourceType: ValueFromCLIArg, + }, + "object_map": &InputValue{ + Value: cty.MapVal(map[string]cty.Value{ + "uno": cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("baz"), + "bar": cty.NumberIntVal(2), // type = any + }), + "dos": cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bat"), + "bar": cty.NumberIntVal(99), // type = any + }), + }), + SourceType: ValueFromCLIArg, + }, + "object_list": &InputValue{ + Value: cty.TupleVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("baz"), + "bar": cty.NumberIntVal(2), // type = any + }), + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bang"), + "bar": cty.StringVal("42"), // type = any, but mismatch with the first list item + }), + }), + SourceType: ValueFromCLIArg, + }, + }) + + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + }) +} + +// testInputValuesUnset is a helper for constructing InputValues values for +// situations where all of the root module variables are optional and a +// test case intends to just use those default values and not override them +// at all. +// +// In other words, this constructs an InputValues with one entry per given +// input variable declaration where all of them are declared as unset. +func testInputValuesUnset(decls map[string]*configs.Variable) InputValues { + if len(decls) == 0 { + return nil + } + + ret := make(InputValues, len(decls)) + for name := range decls { + ret[name] = &InputValue{ + Value: cty.NilVal, + SourceType: ValueFromUnknown, + } + } + return ret +} diff --git a/pkg/tofu/version_required.go b/pkg/tofu/version_required.go new file mode 100644 index 00000000000..e31876e07cd --- /dev/null +++ b/pkg/tofu/version_required.go @@ -0,0 +1,30 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofu + +import ( + "github.com/kubegems/opentofu/pkg/tfdiags" + + "github.com/kubegems/opentofu/pkg/configs" +) + +// CheckCoreVersionRequirements visits each of the modules in the given +// configuration tree and verifies that any given Core version constraints +// match with the version of OpenTofu Core that is being used. +// +// The returned diagnostics will contain errors if any constraints do not match. +// The returned diagnostics might also return warnings, which should be +// displayed to the user. +func CheckCoreVersionRequirements(config *configs.Config) tfdiags.Diagnostics { + if config == nil { + return nil + } + + var diags tfdiags.Diagnostics + diags = diags.Append(config.CheckCoreVersionRequirements()) + + return diags +} diff --git a/pkg/tofu/walkoperation_string.go b/pkg/tofu/walkoperation_string.go new file mode 100644 index 00000000000..e5d49ffe552 --- /dev/null +++ b/pkg/tofu/walkoperation_string.go @@ -0,0 +1,30 @@ +// Code generated by "stringer -type=walkOperation graph_walk_operation.go"; DO NOT EDIT. + +package tofu + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[walkInvalid-0] + _ = x[walkApply-1] + _ = x[walkPlan-2] + _ = x[walkPlanDestroy-3] + _ = x[walkValidate-4] + _ = x[walkDestroy-5] + _ = x[walkImport-6] + _ = x[walkEval-7] +} + +const _walkOperation_name = "walkInvalidwalkApplywalkPlanwalkPlanDestroywalkValidatewalkDestroywalkImportwalkEval" + +var _walkOperation_index = [...]uint8{0, 11, 20, 28, 43, 55, 66, 76, 84} + +func (i walkOperation) String() string { + if i >= walkOperation(len(_walkOperation_index)-1) { + return "walkOperation(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _walkOperation_name[_walkOperation_index[i]:_walkOperation_index[i+1]] +} diff --git a/pkg/tofumigrate/testdata/mention/main.tf b/pkg/tofumigrate/testdata/mention/main.tf new file mode 100644 index 00000000000..b2c87b65ab6 --- /dev/null +++ b/pkg/tofumigrate/testdata/mention/main.tf @@ -0,0 +1,19 @@ +terraform { + required_providers { + aws = { + source = "registry.terraform.io/hashicorp/aws" + } + } +} + +provider "random" {} +provider "aws" {} + +resource "random_id" "example" { + byte_length = 8 +} + +resource "aws_instance" "example" { + ami = "abc" + instance_type = "t2.micro" +} diff --git a/pkg/tofumigrate/testdata/nomention/main.tf b/pkg/tofumigrate/testdata/nomention/main.tf new file mode 100644 index 00000000000..56fd0e1216d --- /dev/null +++ b/pkg/tofumigrate/testdata/nomention/main.tf @@ -0,0 +1,11 @@ +provider "random" {} +provider "aws" {} + +resource "random_id" "example" { + byte_length = 8 +} + +resource "aws_instance" "example" { + ami = "abc" + instance_type = "t2.micro" +} diff --git a/pkg/tofumigrate/tofumigrate.go b/pkg/tofumigrate/tofumigrate.go new file mode 100644 index 00000000000..7f8d150d995 --- /dev/null +++ b/pkg/tofumigrate/tofumigrate.go @@ -0,0 +1,76 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofumigrate + +import ( + "os" + + "github.com/hashicorp/hcl/v2" + tfaddr "github.com/opentofu/registry-address" + + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/getproviders" + "github.com/kubegems/opentofu/pkg/states" + "github.com/kubegems/opentofu/pkg/tfdiags" +) + +// MigrateStateProviderAddresses can be used to update the in-memory view of the state to use registry.opentofu.org +// provider addresses. This only applies for providers which are *not* explicitly referenced in the configuration in full form. +// For example, if the configuration contains a provider block like this: +// +// terraform { +// required_providers { +// random = {} +// } +// } +// +// we will migrate the in-memory view of the statefile to use registry.opentofu.org/hashicorp/random. +// However, if the configuration contains a provider block like this: +// +// terraform { +// required_providers { +// random = { +// source = "registry.terraform.io/hashicorp/random" +// } +// } +// } +// +// then we keep the old address. +func MigrateStateProviderAddresses(config *configs.Config, state *states.State) (*states.State, tfdiags.Diagnostics) { + if os.Getenv("OPENTOFU_STATEFILE_PROVIDER_ADDRESS_TRANSLATION") == "0" { + return state, nil + } + + if state == nil { + return nil, nil + } + + var diags tfdiags.Diagnostics + + stateCopy := state.DeepCopy() + + providers := getproviders.Requirements{} + // config could be nil when we're e.g. showing a statefile without the configuration present + if config != nil { + var hclDiags hcl.Diagnostics + providers, hclDiags = config.ProviderRequirements() + diags = diags.Append(hclDiags) + if hclDiags.HasErrors() { + return nil, diags + } + } + + for _, module := range stateCopy.Modules { + for _, resource := range module.Resources { + _, referencedInConfig := providers[resource.ProviderConfig.Provider] + if resource.ProviderConfig.Provider.Hostname == "registry.terraform.io" && !referencedInConfig { + resource.ProviderConfig.Provider.Hostname = tfaddr.DefaultProviderRegistryHost + } + } + } + + return stateCopy, diags +} diff --git a/pkg/tofumigrate/tofumigrate_test.go b/pkg/tofumigrate/tofumigrate_test.go new file mode 100644 index 00000000000..12b0d3b92cb --- /dev/null +++ b/pkg/tofumigrate/tofumigrate_test.go @@ -0,0 +1,237 @@ +// Copyright (c) The OpenTofu Authors +// SPDX-License-Identifier: MPL-2.0 +// Copyright (c) 2023 HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tofumigrate + +import ( + "reflect" + "testing" + + "github.com/hashicorp/hcl/v2" + + "github.com/kubegems/opentofu/pkg/addrs" + "github.com/kubegems/opentofu/pkg/configs" + "github.com/kubegems/opentofu/pkg/configs/configload" + "github.com/kubegems/opentofu/pkg/states" +) + +func TestMigrateStateProviderAddresses(t *testing.T) { + loader, close := configload.NewLoaderForTests(t) + defer close() + + mustParseInstAddr := func(s string) addrs.AbsResourceInstance { + addr, err := addrs.ParseAbsResourceInstanceStr(s) + if err != nil { + t.Fatal(err) + } + return addr + } + + makeRootProviderAddr := func(s string) addrs.AbsProviderConfig { + return addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.MustParseProviderSourceString(s), + } + } + + type args struct { + configDir string + state *states.State + } + tests := []struct { + name string + args args + want *states.State + }{ + { + name: "if there are no code references, migrate", + args: args{ + configDir: "testdata/nomention", + state: states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + mustParseInstAddr("random_id.example"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{}`), + }, + makeRootProviderAddr("registry.terraform.io/hashicorp/random"), + ) + s.SetResourceInstanceCurrent( + mustParseInstAddr("aws_instance.example"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{}`), + }, + makeRootProviderAddr("registry.terraform.io/hashicorp/aws"), + ) + }), + }, + want: states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + mustParseInstAddr("random_id.example"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{}`), + }, + makeRootProviderAddr("registry.opentofu.org/hashicorp/random"), + ) + s.SetResourceInstanceCurrent( + mustParseInstAddr("aws_instance.example"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{}`), + }, + makeRootProviderAddr("registry.opentofu.org/hashicorp/aws"), + ) + }), + }, + { + name: "if there are some full-form references in the code, only migrate the ones not referenced", + args: args{ + configDir: "testdata/mention", + state: states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + mustParseInstAddr("random_id.example"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{}`), + }, + makeRootProviderAddr("registry.terraform.io/hashicorp/random"), + ) + s.SetResourceInstanceCurrent( + mustParseInstAddr("aws_instance.example"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{}`), + }, + makeRootProviderAddr("registry.terraform.io/hashicorp/aws"), + ) + }), + }, + want: states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + mustParseInstAddr("random_id.example"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{}`), + }, + makeRootProviderAddr("registry.opentofu.org/hashicorp/random"), + ) + s.SetResourceInstanceCurrent( + mustParseInstAddr("aws_instance.example"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{}`), + }, + makeRootProviderAddr("registry.terraform.io/hashicorp/aws"), + ) + }), + }, + { + name: "if the state file contains no legacy references, return statefile unchanged", + args: args{ + configDir: "testdata/nomention", + state: states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + mustParseInstAddr("random_id.example"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{}`), + }, + makeRootProviderAddr("registry.opentofu.org/hashicorp/random"), + ) + s.SetResourceInstanceCurrent( + mustParseInstAddr("aws_instance.example"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{}`), + }, + makeRootProviderAddr("registry.opentofu.org/hashicorp/aws"), + ) + }), + }, + want: states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + mustParseInstAddr("random_id.example"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{}`), + }, + makeRootProviderAddr("registry.opentofu.org/hashicorp/random"), + ) + s.SetResourceInstanceCurrent( + mustParseInstAddr("aws_instance.example"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{}`), + }, + makeRootProviderAddr("registry.opentofu.org/hashicorp/aws"), + ) + }), + }, + { + name: "if there is no code, migrate", + args: args{ + configDir: "", + state: states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + mustParseInstAddr("random_id.example"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{}`), + }, + makeRootProviderAddr("registry.terraform.io/hashicorp/random"), + ) + s.SetResourceInstanceCurrent( + mustParseInstAddr("aws_instance.example"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{}`), + }, + makeRootProviderAddr("registry.terraform.io/hashicorp/aws"), + ) + }), + }, + want: states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + mustParseInstAddr("random_id.example"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{}`), + }, + makeRootProviderAddr("registry.opentofu.org/hashicorp/random"), + ) + s.SetResourceInstanceCurrent( + mustParseInstAddr("aws_instance.example"), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{}`), + }, + makeRootProviderAddr("registry.opentofu.org/hashicorp/aws"), + ) + }), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var cfg *configs.Config + if tt.args.configDir != "" { + var hclDiags hcl.Diagnostics + cfg, hclDiags = loader.LoadConfig(tt.args.configDir, configs.RootModuleCallForTesting()) + if hclDiags.HasErrors() { + t.Fatalf("invalid configuration: %s", hclDiags.Error()) + } + } + + got, err := MigrateStateProviderAddresses(cfg, tt.args.state) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("MigrateStateProviderAddresses() got = %v, want %v", got, tt.want) + } + if err != nil { + t.Errorf("MigrateStateProviderAddresses() err = %v, want %v", err, nil) + } + }) + } +}